xref: /linux/kernel/kprobes.c (revision 5499b45190237ca90dd2ac86395cf464fe1f4cc7)
1 /*
2  *  Kernel Probes (KProbes)
3  *  kernel/kprobes.c
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18  *
19  * Copyright (C) IBM Corporation, 2002, 2004
20  *
21  * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22  *		Probes initial implementation (includes suggestions from
23  *		Rusty Russell).
24  * 2004-Aug	Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25  *		hlists and exceptions notifier as suggested by Andi Kleen.
26  * 2004-July	Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27  *		interface to access function arguments.
28  * 2004-Sep	Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29  *		exceptions notifier to be first on the priority list.
30  * 2005-May	Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31  *		<jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32  *		<prasanna@in.ibm.com> added function-return probes.
33  */
34 #include <linux/kprobes.h>
35 #include <linux/hash.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/stddef.h>
39 #include <linux/module.h>
40 #include <linux/moduleloader.h>
41 #include <linux/kallsyms.h>
42 #include <linux/freezer.h>
43 #include <linux/seq_file.h>
44 #include <linux/debugfs.h>
45 #include <linux/kdebug.h>
46 #include <linux/memory.h>
47 #include <linux/ftrace.h>
48 
49 #include <asm-generic/sections.h>
50 #include <asm/cacheflush.h>
51 #include <asm/errno.h>
52 #include <asm/uaccess.h>
53 
54 #define KPROBE_HASH_BITS 6
55 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
56 
57 
58 /*
59  * Some oddball architectures like 64bit powerpc have function descriptors
60  * so this must be overridable.
61  */
62 #ifndef kprobe_lookup_name
63 #define kprobe_lookup_name(name, addr) \
64 	addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
65 #endif
66 
67 static int kprobes_initialized;
68 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
69 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
70 
71 /* NOTE: change this value only with kprobe_mutex held */
72 static bool kprobes_all_disarmed;
73 
74 static DEFINE_MUTEX(kprobe_mutex);	/* Protects kprobe_table */
75 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
76 static struct {
77 	spinlock_t lock ____cacheline_aligned_in_smp;
78 } kretprobe_table_locks[KPROBE_TABLE_SIZE];
79 
80 static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
81 {
82 	return &(kretprobe_table_locks[hash].lock);
83 }
84 
85 /*
86  * Normally, functions that we'd want to prohibit kprobes in, are marked
87  * __kprobes. But, there are cases where such functions already belong to
88  * a different section (__sched for preempt_schedule)
89  *
90  * For such cases, we now have a blacklist
91  */
92 static struct kprobe_blackpoint kprobe_blacklist[] = {
93 	{"preempt_schedule",},
94 	{"native_get_debugreg",},
95 	{"irq_entries_start",},
96 	{"common_interrupt",},
97 	{"mcount",},	/* mcount can be called from everywhere */
98 	{NULL}    /* Terminator */
99 };
100 
101 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
102 /*
103  * kprobe->ainsn.insn points to the copy of the instruction to be
104  * single-stepped. x86_64, POWER4 and above have no-exec support and
105  * stepping on the instruction on a vmalloced/kmalloced/data page
106  * is a recipe for disaster
107  */
108 #define INSNS_PER_PAGE	(PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
109 
110 struct kprobe_insn_page {
111 	struct list_head list;
112 	kprobe_opcode_t *insns;		/* Page of instruction slots */
113 	char slot_used[INSNS_PER_PAGE];
114 	int nused;
115 	int ngarbage;
116 };
117 
118 enum kprobe_slot_state {
119 	SLOT_CLEAN = 0,
120 	SLOT_DIRTY = 1,
121 	SLOT_USED = 2,
122 };
123 
124 static DEFINE_MUTEX(kprobe_insn_mutex);	/* Protects kprobe_insn_pages */
125 static LIST_HEAD(kprobe_insn_pages);
126 static int kprobe_garbage_slots;
127 static int collect_garbage_slots(void);
128 
129 /**
130  * __get_insn_slot() - Find a slot on an executable page for an instruction.
131  * We allocate an executable page if there's no room on existing ones.
132  */
133 static kprobe_opcode_t __kprobes *__get_insn_slot(void)
134 {
135 	struct kprobe_insn_page *kip;
136 
137  retry:
138 	list_for_each_entry(kip, &kprobe_insn_pages, list) {
139 		if (kip->nused < INSNS_PER_PAGE) {
140 			int i;
141 			for (i = 0; i < INSNS_PER_PAGE; i++) {
142 				if (kip->slot_used[i] == SLOT_CLEAN) {
143 					kip->slot_used[i] = SLOT_USED;
144 					kip->nused++;
145 					return kip->insns + (i * MAX_INSN_SIZE);
146 				}
147 			}
148 			/* Surprise!  No unused slots.  Fix kip->nused. */
149 			kip->nused = INSNS_PER_PAGE;
150 		}
151 	}
152 
153 	/* If there are any garbage slots, collect it and try again. */
154 	if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
155 		goto retry;
156 	}
157 	/* All out of space.  Need to allocate a new page. Use slot 0. */
158 	kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
159 	if (!kip)
160 		return NULL;
161 
162 	/*
163 	 * Use module_alloc so this page is within +/- 2GB of where the
164 	 * kernel image and loaded module images reside. This is required
165 	 * so x86_64 can correctly handle the %rip-relative fixups.
166 	 */
167 	kip->insns = module_alloc(PAGE_SIZE);
168 	if (!kip->insns) {
169 		kfree(kip);
170 		return NULL;
171 	}
172 	INIT_LIST_HEAD(&kip->list);
173 	list_add(&kip->list, &kprobe_insn_pages);
174 	memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
175 	kip->slot_used[0] = SLOT_USED;
176 	kip->nused = 1;
177 	kip->ngarbage = 0;
178 	return kip->insns;
179 }
180 
181 kprobe_opcode_t __kprobes *get_insn_slot(void)
182 {
183 	kprobe_opcode_t *ret;
184 	mutex_lock(&kprobe_insn_mutex);
185 	ret = __get_insn_slot();
186 	mutex_unlock(&kprobe_insn_mutex);
187 	return ret;
188 }
189 
190 /* Return 1 if all garbages are collected, otherwise 0. */
191 static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
192 {
193 	kip->slot_used[idx] = SLOT_CLEAN;
194 	kip->nused--;
195 	if (kip->nused == 0) {
196 		/*
197 		 * Page is no longer in use.  Free it unless
198 		 * it's the last one.  We keep the last one
199 		 * so as not to have to set it up again the
200 		 * next time somebody inserts a probe.
201 		 */
202 		if (!list_is_singular(&kprobe_insn_pages)) {
203 			list_del(&kip->list);
204 			module_free(NULL, kip->insns);
205 			kfree(kip);
206 		}
207 		return 1;
208 	}
209 	return 0;
210 }
211 
212 static int __kprobes collect_garbage_slots(void)
213 {
214 	struct kprobe_insn_page *kip, *next;
215 
216 	/* Ensure no-one is interrupted on the garbages */
217 	synchronize_sched();
218 
219 	list_for_each_entry_safe(kip, next, &kprobe_insn_pages, list) {
220 		int i;
221 		if (kip->ngarbage == 0)
222 			continue;
223 		kip->ngarbage = 0;	/* we will collect all garbages */
224 		for (i = 0; i < INSNS_PER_PAGE; i++) {
225 			if (kip->slot_used[i] == SLOT_DIRTY &&
226 			    collect_one_slot(kip, i))
227 				break;
228 		}
229 	}
230 	kprobe_garbage_slots = 0;
231 	return 0;
232 }
233 
234 void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
235 {
236 	struct kprobe_insn_page *kip;
237 
238 	mutex_lock(&kprobe_insn_mutex);
239 	list_for_each_entry(kip, &kprobe_insn_pages, list) {
240 		if (kip->insns <= slot &&
241 		    slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
242 			int i = (slot - kip->insns) / MAX_INSN_SIZE;
243 			if (dirty) {
244 				kip->slot_used[i] = SLOT_DIRTY;
245 				kip->ngarbage++;
246 			} else
247 				collect_one_slot(kip, i);
248 			break;
249 		}
250 	}
251 
252 	if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE)
253 		collect_garbage_slots();
254 
255 	mutex_unlock(&kprobe_insn_mutex);
256 }
257 #endif
258 
259 /* We have preemption disabled.. so it is safe to use __ versions */
260 static inline void set_kprobe_instance(struct kprobe *kp)
261 {
262 	__get_cpu_var(kprobe_instance) = kp;
263 }
264 
265 static inline void reset_kprobe_instance(void)
266 {
267 	__get_cpu_var(kprobe_instance) = NULL;
268 }
269 
270 /*
271  * This routine is called either:
272  * 	- under the kprobe_mutex - during kprobe_[un]register()
273  * 				OR
274  * 	- with preemption disabled - from arch/xxx/kernel/kprobes.c
275  */
276 struct kprobe __kprobes *get_kprobe(void *addr)
277 {
278 	struct hlist_head *head;
279 	struct hlist_node *node;
280 	struct kprobe *p;
281 
282 	head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
283 	hlist_for_each_entry_rcu(p, node, head, hlist) {
284 		if (p->addr == addr)
285 			return p;
286 	}
287 	return NULL;
288 }
289 
290 /* Arm a kprobe with text_mutex */
291 static void __kprobes arm_kprobe(struct kprobe *kp)
292 {
293 	mutex_lock(&text_mutex);
294 	arch_arm_kprobe(kp);
295 	mutex_unlock(&text_mutex);
296 }
297 
298 /* Disarm a kprobe with text_mutex */
299 static void __kprobes disarm_kprobe(struct kprobe *kp)
300 {
301 	mutex_lock(&text_mutex);
302 	arch_disarm_kprobe(kp);
303 	mutex_unlock(&text_mutex);
304 }
305 
306 /*
307  * Aggregate handlers for multiple kprobes support - these handlers
308  * take care of invoking the individual kprobe handlers on p->list
309  */
310 static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
311 {
312 	struct kprobe *kp;
313 
314 	list_for_each_entry_rcu(kp, &p->list, list) {
315 		if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
316 			set_kprobe_instance(kp);
317 			if (kp->pre_handler(kp, regs))
318 				return 1;
319 		}
320 		reset_kprobe_instance();
321 	}
322 	return 0;
323 }
324 
325 static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
326 					unsigned long flags)
327 {
328 	struct kprobe *kp;
329 
330 	list_for_each_entry_rcu(kp, &p->list, list) {
331 		if (kp->post_handler && likely(!kprobe_disabled(kp))) {
332 			set_kprobe_instance(kp);
333 			kp->post_handler(kp, regs, flags);
334 			reset_kprobe_instance();
335 		}
336 	}
337 }
338 
339 static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
340 					int trapnr)
341 {
342 	struct kprobe *cur = __get_cpu_var(kprobe_instance);
343 
344 	/*
345 	 * if we faulted "during" the execution of a user specified
346 	 * probe handler, invoke just that probe's fault handler
347 	 */
348 	if (cur && cur->fault_handler) {
349 		if (cur->fault_handler(cur, regs, trapnr))
350 			return 1;
351 	}
352 	return 0;
353 }
354 
355 static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
356 {
357 	struct kprobe *cur = __get_cpu_var(kprobe_instance);
358 	int ret = 0;
359 
360 	if (cur && cur->break_handler) {
361 		if (cur->break_handler(cur, regs))
362 			ret = 1;
363 	}
364 	reset_kprobe_instance();
365 	return ret;
366 }
367 
368 /* Walks the list and increments nmissed count for multiprobe case */
369 void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
370 {
371 	struct kprobe *kp;
372 	if (p->pre_handler != aggr_pre_handler) {
373 		p->nmissed++;
374 	} else {
375 		list_for_each_entry_rcu(kp, &p->list, list)
376 			kp->nmissed++;
377 	}
378 	return;
379 }
380 
381 void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
382 				struct hlist_head *head)
383 {
384 	struct kretprobe *rp = ri->rp;
385 
386 	/* remove rp inst off the rprobe_inst_table */
387 	hlist_del(&ri->hlist);
388 	INIT_HLIST_NODE(&ri->hlist);
389 	if (likely(rp)) {
390 		spin_lock(&rp->lock);
391 		hlist_add_head(&ri->hlist, &rp->free_instances);
392 		spin_unlock(&rp->lock);
393 	} else
394 		/* Unregistering */
395 		hlist_add_head(&ri->hlist, head);
396 }
397 
398 void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
399 			 struct hlist_head **head, unsigned long *flags)
400 {
401 	unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
402 	spinlock_t *hlist_lock;
403 
404 	*head = &kretprobe_inst_table[hash];
405 	hlist_lock = kretprobe_table_lock_ptr(hash);
406 	spin_lock_irqsave(hlist_lock, *flags);
407 }
408 
409 static void __kprobes kretprobe_table_lock(unsigned long hash,
410 	unsigned long *flags)
411 {
412 	spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
413 	spin_lock_irqsave(hlist_lock, *flags);
414 }
415 
416 void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
417 	unsigned long *flags)
418 {
419 	unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
420 	spinlock_t *hlist_lock;
421 
422 	hlist_lock = kretprobe_table_lock_ptr(hash);
423 	spin_unlock_irqrestore(hlist_lock, *flags);
424 }
425 
426 void __kprobes kretprobe_table_unlock(unsigned long hash, unsigned long *flags)
427 {
428 	spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
429 	spin_unlock_irqrestore(hlist_lock, *flags);
430 }
431 
432 /*
433  * This function is called from finish_task_switch when task tk becomes dead,
434  * so that we can recycle any function-return probe instances associated
435  * with this task. These left over instances represent probed functions
436  * that have been called but will never return.
437  */
438 void __kprobes kprobe_flush_task(struct task_struct *tk)
439 {
440 	struct kretprobe_instance *ri;
441 	struct hlist_head *head, empty_rp;
442 	struct hlist_node *node, *tmp;
443 	unsigned long hash, flags = 0;
444 
445 	if (unlikely(!kprobes_initialized))
446 		/* Early boot.  kretprobe_table_locks not yet initialized. */
447 		return;
448 
449 	hash = hash_ptr(tk, KPROBE_HASH_BITS);
450 	head = &kretprobe_inst_table[hash];
451 	kretprobe_table_lock(hash, &flags);
452 	hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
453 		if (ri->task == tk)
454 			recycle_rp_inst(ri, &empty_rp);
455 	}
456 	kretprobe_table_unlock(hash, &flags);
457 	INIT_HLIST_HEAD(&empty_rp);
458 	hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
459 		hlist_del(&ri->hlist);
460 		kfree(ri);
461 	}
462 }
463 
464 static inline void free_rp_inst(struct kretprobe *rp)
465 {
466 	struct kretprobe_instance *ri;
467 	struct hlist_node *pos, *next;
468 
469 	hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) {
470 		hlist_del(&ri->hlist);
471 		kfree(ri);
472 	}
473 }
474 
475 static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
476 {
477 	unsigned long flags, hash;
478 	struct kretprobe_instance *ri;
479 	struct hlist_node *pos, *next;
480 	struct hlist_head *head;
481 
482 	/* No race here */
483 	for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
484 		kretprobe_table_lock(hash, &flags);
485 		head = &kretprobe_inst_table[hash];
486 		hlist_for_each_entry_safe(ri, pos, next, head, hlist) {
487 			if (ri->rp == rp)
488 				ri->rp = NULL;
489 		}
490 		kretprobe_table_unlock(hash, &flags);
491 	}
492 	free_rp_inst(rp);
493 }
494 
495 /*
496  * Keep all fields in the kprobe consistent
497  */
498 static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
499 {
500 	memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
501 	memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
502 }
503 
504 /*
505 * Add the new probe to ap->list. Fail if this is the
506 * second jprobe at the address - two jprobes can't coexist
507 */
508 static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
509 {
510 	BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
511 	if (p->break_handler) {
512 		if (ap->break_handler)
513 			return -EEXIST;
514 		list_add_tail_rcu(&p->list, &ap->list);
515 		ap->break_handler = aggr_break_handler;
516 	} else
517 		list_add_rcu(&p->list, &ap->list);
518 	if (p->post_handler && !ap->post_handler)
519 		ap->post_handler = aggr_post_handler;
520 
521 	if (kprobe_disabled(ap) && !kprobe_disabled(p)) {
522 		ap->flags &= ~KPROBE_FLAG_DISABLED;
523 		if (!kprobes_all_disarmed)
524 			/* Arm the breakpoint again. */
525 			arm_kprobe(ap);
526 	}
527 	return 0;
528 }
529 
530 /*
531  * Fill in the required fields of the "manager kprobe". Replace the
532  * earlier kprobe in the hlist with the manager kprobe
533  */
534 static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
535 {
536 	copy_kprobe(p, ap);
537 	flush_insn_slot(ap);
538 	ap->addr = p->addr;
539 	ap->flags = p->flags;
540 	ap->pre_handler = aggr_pre_handler;
541 	ap->fault_handler = aggr_fault_handler;
542 	/* We don't care the kprobe which has gone. */
543 	if (p->post_handler && !kprobe_gone(p))
544 		ap->post_handler = aggr_post_handler;
545 	if (p->break_handler && !kprobe_gone(p))
546 		ap->break_handler = aggr_break_handler;
547 
548 	INIT_LIST_HEAD(&ap->list);
549 	list_add_rcu(&p->list, &ap->list);
550 
551 	hlist_replace_rcu(&p->hlist, &ap->hlist);
552 }
553 
554 /*
555  * This is the second or subsequent kprobe at the address - handle
556  * the intricacies
557  */
558 static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
559 					  struct kprobe *p)
560 {
561 	int ret = 0;
562 	struct kprobe *ap = old_p;
563 
564 	if (old_p->pre_handler != aggr_pre_handler) {
565 		/* If old_p is not an aggr_probe, create new aggr_kprobe. */
566 		ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
567 		if (!ap)
568 			return -ENOMEM;
569 		add_aggr_kprobe(ap, old_p);
570 	}
571 
572 	if (kprobe_gone(ap)) {
573 		/*
574 		 * Attempting to insert new probe at the same location that
575 		 * had a probe in the module vaddr area which already
576 		 * freed. So, the instruction slot has already been
577 		 * released. We need a new slot for the new probe.
578 		 */
579 		ret = arch_prepare_kprobe(ap);
580 		if (ret)
581 			/*
582 			 * Even if fail to allocate new slot, don't need to
583 			 * free aggr_probe. It will be used next time, or
584 			 * freed by unregister_kprobe.
585 			 */
586 			return ret;
587 
588 		/*
589 		 * Clear gone flag to prevent allocating new slot again, and
590 		 * set disabled flag because it is not armed yet.
591 		 */
592 		ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
593 			    | KPROBE_FLAG_DISABLED;
594 	}
595 
596 	copy_kprobe(ap, p);
597 	return add_new_kprobe(ap, p);
598 }
599 
600 /* Try to disable aggr_kprobe, and return 1 if succeeded.*/
601 static int __kprobes try_to_disable_aggr_kprobe(struct kprobe *p)
602 {
603 	struct kprobe *kp;
604 
605 	list_for_each_entry_rcu(kp, &p->list, list) {
606 		if (!kprobe_disabled(kp))
607 			/*
608 			 * There is an active probe on the list.
609 			 * We can't disable aggr_kprobe.
610 			 */
611 			return 0;
612 	}
613 	p->flags |= KPROBE_FLAG_DISABLED;
614 	return 1;
615 }
616 
617 static int __kprobes in_kprobes_functions(unsigned long addr)
618 {
619 	struct kprobe_blackpoint *kb;
620 
621 	if (addr >= (unsigned long)__kprobes_text_start &&
622 	    addr < (unsigned long)__kprobes_text_end)
623 		return -EINVAL;
624 	/*
625 	 * If there exists a kprobe_blacklist, verify and
626 	 * fail any probe registration in the prohibited area
627 	 */
628 	for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
629 		if (kb->start_addr) {
630 			if (addr >= kb->start_addr &&
631 			    addr < (kb->start_addr + kb->range))
632 				return -EINVAL;
633 		}
634 	}
635 	return 0;
636 }
637 
638 /*
639  * If we have a symbol_name argument, look it up and add the offset field
640  * to it. This way, we can specify a relative address to a symbol.
641  */
642 static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
643 {
644 	kprobe_opcode_t *addr = p->addr;
645 	if (p->symbol_name) {
646 		if (addr)
647 			return NULL;
648 		kprobe_lookup_name(p->symbol_name, addr);
649 	}
650 
651 	if (!addr)
652 		return NULL;
653 	return (kprobe_opcode_t *)(((char *)addr) + p->offset);
654 }
655 
656 /* Check passed kprobe is valid and return kprobe in kprobe_table. */
657 static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
658 {
659 	struct kprobe *old_p, *list_p;
660 
661 	old_p = get_kprobe(p->addr);
662 	if (unlikely(!old_p))
663 		return NULL;
664 
665 	if (p != old_p) {
666 		list_for_each_entry_rcu(list_p, &old_p->list, list)
667 			if (list_p == p)
668 			/* kprobe p is a valid probe */
669 				goto valid;
670 		return NULL;
671 	}
672 valid:
673 	return old_p;
674 }
675 
676 /* Return error if the kprobe is being re-registered */
677 static inline int check_kprobe_rereg(struct kprobe *p)
678 {
679 	int ret = 0;
680 	struct kprobe *old_p;
681 
682 	mutex_lock(&kprobe_mutex);
683 	old_p = __get_valid_kprobe(p);
684 	if (old_p)
685 		ret = -EINVAL;
686 	mutex_unlock(&kprobe_mutex);
687 	return ret;
688 }
689 
690 int __kprobes register_kprobe(struct kprobe *p)
691 {
692 	int ret = 0;
693 	struct kprobe *old_p;
694 	struct module *probed_mod;
695 	kprobe_opcode_t *addr;
696 
697 	addr = kprobe_addr(p);
698 	if (!addr)
699 		return -EINVAL;
700 	p->addr = addr;
701 
702 	ret = check_kprobe_rereg(p);
703 	if (ret)
704 		return ret;
705 
706 	preempt_disable();
707 	if (!kernel_text_address((unsigned long) p->addr) ||
708 	    in_kprobes_functions((unsigned long) p->addr) ||
709 	    ftrace_text_reserved(p->addr, p->addr)) {
710 		preempt_enable();
711 		return -EINVAL;
712 	}
713 
714 	/* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
715 	p->flags &= KPROBE_FLAG_DISABLED;
716 
717 	/*
718 	 * Check if are we probing a module.
719 	 */
720 	probed_mod = __module_text_address((unsigned long) p->addr);
721 	if (probed_mod) {
722 		/*
723 		 * We must hold a refcount of the probed module while updating
724 		 * its code to prohibit unexpected unloading.
725 		 */
726 		if (unlikely(!try_module_get(probed_mod))) {
727 			preempt_enable();
728 			return -EINVAL;
729 		}
730 		/*
731 		 * If the module freed .init.text, we couldn't insert
732 		 * kprobes in there.
733 		 */
734 		if (within_module_init((unsigned long)p->addr, probed_mod) &&
735 		    probed_mod->state != MODULE_STATE_COMING) {
736 			module_put(probed_mod);
737 			preempt_enable();
738 			return -EINVAL;
739 		}
740 	}
741 	preempt_enable();
742 
743 	p->nmissed = 0;
744 	INIT_LIST_HEAD(&p->list);
745 	mutex_lock(&kprobe_mutex);
746 	old_p = get_kprobe(p->addr);
747 	if (old_p) {
748 		ret = register_aggr_kprobe(old_p, p);
749 		goto out;
750 	}
751 
752 	mutex_lock(&text_mutex);
753 	ret = arch_prepare_kprobe(p);
754 	if (ret)
755 		goto out_unlock_text;
756 
757 	INIT_HLIST_NODE(&p->hlist);
758 	hlist_add_head_rcu(&p->hlist,
759 		       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
760 
761 	if (!kprobes_all_disarmed && !kprobe_disabled(p))
762 		arch_arm_kprobe(p);
763 
764 out_unlock_text:
765 	mutex_unlock(&text_mutex);
766 out:
767 	mutex_unlock(&kprobe_mutex);
768 
769 	if (probed_mod)
770 		module_put(probed_mod);
771 
772 	return ret;
773 }
774 EXPORT_SYMBOL_GPL(register_kprobe);
775 
776 /*
777  * Unregister a kprobe without a scheduler synchronization.
778  */
779 static int __kprobes __unregister_kprobe_top(struct kprobe *p)
780 {
781 	struct kprobe *old_p, *list_p;
782 
783 	old_p = __get_valid_kprobe(p);
784 	if (old_p == NULL)
785 		return -EINVAL;
786 
787 	if (old_p == p ||
788 	    (old_p->pre_handler == aggr_pre_handler &&
789 	     list_is_singular(&old_p->list))) {
790 		/*
791 		 * Only probe on the hash list. Disarm only if kprobes are
792 		 * enabled and not gone - otherwise, the breakpoint would
793 		 * already have been removed. We save on flushing icache.
794 		 */
795 		if (!kprobes_all_disarmed && !kprobe_disabled(old_p))
796 			disarm_kprobe(p);
797 		hlist_del_rcu(&old_p->hlist);
798 	} else {
799 		if (p->break_handler && !kprobe_gone(p))
800 			old_p->break_handler = NULL;
801 		if (p->post_handler && !kprobe_gone(p)) {
802 			list_for_each_entry_rcu(list_p, &old_p->list, list) {
803 				if ((list_p != p) && (list_p->post_handler))
804 					goto noclean;
805 			}
806 			old_p->post_handler = NULL;
807 		}
808 noclean:
809 		list_del_rcu(&p->list);
810 		if (!kprobe_disabled(old_p)) {
811 			try_to_disable_aggr_kprobe(old_p);
812 			if (!kprobes_all_disarmed && kprobe_disabled(old_p))
813 				disarm_kprobe(old_p);
814 		}
815 	}
816 	return 0;
817 }
818 
819 static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
820 {
821 	struct kprobe *old_p;
822 
823 	if (list_empty(&p->list))
824 		arch_remove_kprobe(p);
825 	else if (list_is_singular(&p->list)) {
826 		/* "p" is the last child of an aggr_kprobe */
827 		old_p = list_entry(p->list.next, struct kprobe, list);
828 		list_del(&p->list);
829 		arch_remove_kprobe(old_p);
830 		kfree(old_p);
831 	}
832 }
833 
834 int __kprobes register_kprobes(struct kprobe **kps, int num)
835 {
836 	int i, ret = 0;
837 
838 	if (num <= 0)
839 		return -EINVAL;
840 	for (i = 0; i < num; i++) {
841 		ret = register_kprobe(kps[i]);
842 		if (ret < 0) {
843 			if (i > 0)
844 				unregister_kprobes(kps, i);
845 			break;
846 		}
847 	}
848 	return ret;
849 }
850 EXPORT_SYMBOL_GPL(register_kprobes);
851 
852 void __kprobes unregister_kprobe(struct kprobe *p)
853 {
854 	unregister_kprobes(&p, 1);
855 }
856 EXPORT_SYMBOL_GPL(unregister_kprobe);
857 
858 void __kprobes unregister_kprobes(struct kprobe **kps, int num)
859 {
860 	int i;
861 
862 	if (num <= 0)
863 		return;
864 	mutex_lock(&kprobe_mutex);
865 	for (i = 0; i < num; i++)
866 		if (__unregister_kprobe_top(kps[i]) < 0)
867 			kps[i]->addr = NULL;
868 	mutex_unlock(&kprobe_mutex);
869 
870 	synchronize_sched();
871 	for (i = 0; i < num; i++)
872 		if (kps[i]->addr)
873 			__unregister_kprobe_bottom(kps[i]);
874 }
875 EXPORT_SYMBOL_GPL(unregister_kprobes);
876 
877 static struct notifier_block kprobe_exceptions_nb = {
878 	.notifier_call = kprobe_exceptions_notify,
879 	.priority = 0x7fffffff /* we need to be notified first */
880 };
881 
882 unsigned long __weak arch_deref_entry_point(void *entry)
883 {
884 	return (unsigned long)entry;
885 }
886 
887 int __kprobes register_jprobes(struct jprobe **jps, int num)
888 {
889 	struct jprobe *jp;
890 	int ret = 0, i;
891 
892 	if (num <= 0)
893 		return -EINVAL;
894 	for (i = 0; i < num; i++) {
895 		unsigned long addr;
896 		jp = jps[i];
897 		addr = arch_deref_entry_point(jp->entry);
898 
899 		if (!kernel_text_address(addr))
900 			ret = -EINVAL;
901 		else {
902 			/* Todo: Verify probepoint is a function entry point */
903 			jp->kp.pre_handler = setjmp_pre_handler;
904 			jp->kp.break_handler = longjmp_break_handler;
905 			ret = register_kprobe(&jp->kp);
906 		}
907 		if (ret < 0) {
908 			if (i > 0)
909 				unregister_jprobes(jps, i);
910 			break;
911 		}
912 	}
913 	return ret;
914 }
915 EXPORT_SYMBOL_GPL(register_jprobes);
916 
917 int __kprobes register_jprobe(struct jprobe *jp)
918 {
919 	return register_jprobes(&jp, 1);
920 }
921 EXPORT_SYMBOL_GPL(register_jprobe);
922 
923 void __kprobes unregister_jprobe(struct jprobe *jp)
924 {
925 	unregister_jprobes(&jp, 1);
926 }
927 EXPORT_SYMBOL_GPL(unregister_jprobe);
928 
929 void __kprobes unregister_jprobes(struct jprobe **jps, int num)
930 {
931 	int i;
932 
933 	if (num <= 0)
934 		return;
935 	mutex_lock(&kprobe_mutex);
936 	for (i = 0; i < num; i++)
937 		if (__unregister_kprobe_top(&jps[i]->kp) < 0)
938 			jps[i]->kp.addr = NULL;
939 	mutex_unlock(&kprobe_mutex);
940 
941 	synchronize_sched();
942 	for (i = 0; i < num; i++) {
943 		if (jps[i]->kp.addr)
944 			__unregister_kprobe_bottom(&jps[i]->kp);
945 	}
946 }
947 EXPORT_SYMBOL_GPL(unregister_jprobes);
948 
949 #ifdef CONFIG_KRETPROBES
950 /*
951  * This kprobe pre_handler is registered with every kretprobe. When probe
952  * hits it will set up the return probe.
953  */
954 static int __kprobes pre_handler_kretprobe(struct kprobe *p,
955 					   struct pt_regs *regs)
956 {
957 	struct kretprobe *rp = container_of(p, struct kretprobe, kp);
958 	unsigned long hash, flags = 0;
959 	struct kretprobe_instance *ri;
960 
961 	/*TODO: consider to only swap the RA after the last pre_handler fired */
962 	hash = hash_ptr(current, KPROBE_HASH_BITS);
963 	spin_lock_irqsave(&rp->lock, flags);
964 	if (!hlist_empty(&rp->free_instances)) {
965 		ri = hlist_entry(rp->free_instances.first,
966 				struct kretprobe_instance, hlist);
967 		hlist_del(&ri->hlist);
968 		spin_unlock_irqrestore(&rp->lock, flags);
969 
970 		ri->rp = rp;
971 		ri->task = current;
972 
973 		if (rp->entry_handler && rp->entry_handler(ri, regs))
974 			return 0;
975 
976 		arch_prepare_kretprobe(ri, regs);
977 
978 		/* XXX(hch): why is there no hlist_move_head? */
979 		INIT_HLIST_NODE(&ri->hlist);
980 		kretprobe_table_lock(hash, &flags);
981 		hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
982 		kretprobe_table_unlock(hash, &flags);
983 	} else {
984 		rp->nmissed++;
985 		spin_unlock_irqrestore(&rp->lock, flags);
986 	}
987 	return 0;
988 }
989 
990 int __kprobes register_kretprobe(struct kretprobe *rp)
991 {
992 	int ret = 0;
993 	struct kretprobe_instance *inst;
994 	int i;
995 	void *addr;
996 
997 	if (kretprobe_blacklist_size) {
998 		addr = kprobe_addr(&rp->kp);
999 		if (!addr)
1000 			return -EINVAL;
1001 
1002 		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1003 			if (kretprobe_blacklist[i].addr == addr)
1004 				return -EINVAL;
1005 		}
1006 	}
1007 
1008 	rp->kp.pre_handler = pre_handler_kretprobe;
1009 	rp->kp.post_handler = NULL;
1010 	rp->kp.fault_handler = NULL;
1011 	rp->kp.break_handler = NULL;
1012 
1013 	/* Pre-allocate memory for max kretprobe instances */
1014 	if (rp->maxactive <= 0) {
1015 #ifdef CONFIG_PREEMPT
1016 		rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
1017 #else
1018 		rp->maxactive = num_possible_cpus();
1019 #endif
1020 	}
1021 	spin_lock_init(&rp->lock);
1022 	INIT_HLIST_HEAD(&rp->free_instances);
1023 	for (i = 0; i < rp->maxactive; i++) {
1024 		inst = kmalloc(sizeof(struct kretprobe_instance) +
1025 			       rp->data_size, GFP_KERNEL);
1026 		if (inst == NULL) {
1027 			free_rp_inst(rp);
1028 			return -ENOMEM;
1029 		}
1030 		INIT_HLIST_NODE(&inst->hlist);
1031 		hlist_add_head(&inst->hlist, &rp->free_instances);
1032 	}
1033 
1034 	rp->nmissed = 0;
1035 	/* Establish function entry probe point */
1036 	ret = register_kprobe(&rp->kp);
1037 	if (ret != 0)
1038 		free_rp_inst(rp);
1039 	return ret;
1040 }
1041 EXPORT_SYMBOL_GPL(register_kretprobe);
1042 
1043 int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1044 {
1045 	int ret = 0, i;
1046 
1047 	if (num <= 0)
1048 		return -EINVAL;
1049 	for (i = 0; i < num; i++) {
1050 		ret = register_kretprobe(rps[i]);
1051 		if (ret < 0) {
1052 			if (i > 0)
1053 				unregister_kretprobes(rps, i);
1054 			break;
1055 		}
1056 	}
1057 	return ret;
1058 }
1059 EXPORT_SYMBOL_GPL(register_kretprobes);
1060 
1061 void __kprobes unregister_kretprobe(struct kretprobe *rp)
1062 {
1063 	unregister_kretprobes(&rp, 1);
1064 }
1065 EXPORT_SYMBOL_GPL(unregister_kretprobe);
1066 
1067 void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1068 {
1069 	int i;
1070 
1071 	if (num <= 0)
1072 		return;
1073 	mutex_lock(&kprobe_mutex);
1074 	for (i = 0; i < num; i++)
1075 		if (__unregister_kprobe_top(&rps[i]->kp) < 0)
1076 			rps[i]->kp.addr = NULL;
1077 	mutex_unlock(&kprobe_mutex);
1078 
1079 	synchronize_sched();
1080 	for (i = 0; i < num; i++) {
1081 		if (rps[i]->kp.addr) {
1082 			__unregister_kprobe_bottom(&rps[i]->kp);
1083 			cleanup_rp_inst(rps[i]);
1084 		}
1085 	}
1086 }
1087 EXPORT_SYMBOL_GPL(unregister_kretprobes);
1088 
1089 #else /* CONFIG_KRETPROBES */
1090 int __kprobes register_kretprobe(struct kretprobe *rp)
1091 {
1092 	return -ENOSYS;
1093 }
1094 EXPORT_SYMBOL_GPL(register_kretprobe);
1095 
1096 int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1097 {
1098 	return -ENOSYS;
1099 }
1100 EXPORT_SYMBOL_GPL(register_kretprobes);
1101 
1102 void __kprobes unregister_kretprobe(struct kretprobe *rp)
1103 {
1104 }
1105 EXPORT_SYMBOL_GPL(unregister_kretprobe);
1106 
1107 void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1108 {
1109 }
1110 EXPORT_SYMBOL_GPL(unregister_kretprobes);
1111 
1112 static int __kprobes pre_handler_kretprobe(struct kprobe *p,
1113 					   struct pt_regs *regs)
1114 {
1115 	return 0;
1116 }
1117 
1118 #endif /* CONFIG_KRETPROBES */
1119 
1120 /* Set the kprobe gone and remove its instruction buffer. */
1121 static void __kprobes kill_kprobe(struct kprobe *p)
1122 {
1123 	struct kprobe *kp;
1124 
1125 	p->flags |= KPROBE_FLAG_GONE;
1126 	if (p->pre_handler == aggr_pre_handler) {
1127 		/*
1128 		 * If this is an aggr_kprobe, we have to list all the
1129 		 * chained probes and mark them GONE.
1130 		 */
1131 		list_for_each_entry_rcu(kp, &p->list, list)
1132 			kp->flags |= KPROBE_FLAG_GONE;
1133 		p->post_handler = NULL;
1134 		p->break_handler = NULL;
1135 	}
1136 	/*
1137 	 * Here, we can remove insn_slot safely, because no thread calls
1138 	 * the original probed function (which will be freed soon) any more.
1139 	 */
1140 	arch_remove_kprobe(p);
1141 }
1142 
1143 void __kprobes dump_kprobe(struct kprobe *kp)
1144 {
1145 	printk(KERN_WARNING "Dumping kprobe:\n");
1146 	printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
1147 	       kp->symbol_name, kp->addr, kp->offset);
1148 }
1149 
1150 /* Module notifier call back, checking kprobes on the module */
1151 static int __kprobes kprobes_module_callback(struct notifier_block *nb,
1152 					     unsigned long val, void *data)
1153 {
1154 	struct module *mod = data;
1155 	struct hlist_head *head;
1156 	struct hlist_node *node;
1157 	struct kprobe *p;
1158 	unsigned int i;
1159 	int checkcore = (val == MODULE_STATE_GOING);
1160 
1161 	if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
1162 		return NOTIFY_DONE;
1163 
1164 	/*
1165 	 * When MODULE_STATE_GOING was notified, both of module .text and
1166 	 * .init.text sections would be freed. When MODULE_STATE_LIVE was
1167 	 * notified, only .init.text section would be freed. We need to
1168 	 * disable kprobes which have been inserted in the sections.
1169 	 */
1170 	mutex_lock(&kprobe_mutex);
1171 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1172 		head = &kprobe_table[i];
1173 		hlist_for_each_entry_rcu(p, node, head, hlist)
1174 			if (within_module_init((unsigned long)p->addr, mod) ||
1175 			    (checkcore &&
1176 			     within_module_core((unsigned long)p->addr, mod))) {
1177 				/*
1178 				 * The vaddr this probe is installed will soon
1179 				 * be vfreed buy not synced to disk. Hence,
1180 				 * disarming the breakpoint isn't needed.
1181 				 */
1182 				kill_kprobe(p);
1183 			}
1184 	}
1185 	mutex_unlock(&kprobe_mutex);
1186 	return NOTIFY_DONE;
1187 }
1188 
1189 static struct notifier_block kprobe_module_nb = {
1190 	.notifier_call = kprobes_module_callback,
1191 	.priority = 0
1192 };
1193 
1194 static int __init init_kprobes(void)
1195 {
1196 	int i, err = 0;
1197 	unsigned long offset = 0, size = 0;
1198 	char *modname, namebuf[128];
1199 	const char *symbol_name;
1200 	void *addr;
1201 	struct kprobe_blackpoint *kb;
1202 
1203 	/* FIXME allocate the probe table, currently defined statically */
1204 	/* initialize all list heads */
1205 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1206 		INIT_HLIST_HEAD(&kprobe_table[i]);
1207 		INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
1208 		spin_lock_init(&(kretprobe_table_locks[i].lock));
1209 	}
1210 
1211 	/*
1212 	 * Lookup and populate the kprobe_blacklist.
1213 	 *
1214 	 * Unlike the kretprobe blacklist, we'll need to determine
1215 	 * the range of addresses that belong to the said functions,
1216 	 * since a kprobe need not necessarily be at the beginning
1217 	 * of a function.
1218 	 */
1219 	for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
1220 		kprobe_lookup_name(kb->name, addr);
1221 		if (!addr)
1222 			continue;
1223 
1224 		kb->start_addr = (unsigned long)addr;
1225 		symbol_name = kallsyms_lookup(kb->start_addr,
1226 				&size, &offset, &modname, namebuf);
1227 		if (!symbol_name)
1228 			kb->range = 0;
1229 		else
1230 			kb->range = size;
1231 	}
1232 
1233 	if (kretprobe_blacklist_size) {
1234 		/* lookup the function address from its name */
1235 		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1236 			kprobe_lookup_name(kretprobe_blacklist[i].name,
1237 					   kretprobe_blacklist[i].addr);
1238 			if (!kretprobe_blacklist[i].addr)
1239 				printk("kretprobe: lookup failed: %s\n",
1240 				       kretprobe_blacklist[i].name);
1241 		}
1242 	}
1243 
1244 	/* By default, kprobes are armed */
1245 	kprobes_all_disarmed = false;
1246 
1247 	err = arch_init_kprobes();
1248 	if (!err)
1249 		err = register_die_notifier(&kprobe_exceptions_nb);
1250 	if (!err)
1251 		err = register_module_notifier(&kprobe_module_nb);
1252 
1253 	kprobes_initialized = (err == 0);
1254 
1255 	if (!err)
1256 		init_test_probes();
1257 	return err;
1258 }
1259 
1260 #ifdef CONFIG_DEBUG_FS
1261 static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
1262 		const char *sym, int offset,char *modname)
1263 {
1264 	char *kprobe_type;
1265 
1266 	if (p->pre_handler == pre_handler_kretprobe)
1267 		kprobe_type = "r";
1268 	else if (p->pre_handler == setjmp_pre_handler)
1269 		kprobe_type = "j";
1270 	else
1271 		kprobe_type = "k";
1272 	if (sym)
1273 		seq_printf(pi, "%p  %s  %s+0x%x  %s %s%s\n",
1274 			p->addr, kprobe_type, sym, offset,
1275 			(modname ? modname : " "),
1276 			(kprobe_gone(p) ? "[GONE]" : ""),
1277 			((kprobe_disabled(p) && !kprobe_gone(p)) ?
1278 			 "[DISABLED]" : ""));
1279 	else
1280 		seq_printf(pi, "%p  %s  %p %s%s\n",
1281 			p->addr, kprobe_type, p->addr,
1282 			(kprobe_gone(p) ? "[GONE]" : ""),
1283 			((kprobe_disabled(p) && !kprobe_gone(p)) ?
1284 			 "[DISABLED]" : ""));
1285 }
1286 
1287 static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
1288 {
1289 	return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
1290 }
1291 
1292 static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
1293 {
1294 	(*pos)++;
1295 	if (*pos >= KPROBE_TABLE_SIZE)
1296 		return NULL;
1297 	return pos;
1298 }
1299 
1300 static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
1301 {
1302 	/* Nothing to do */
1303 }
1304 
1305 static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
1306 {
1307 	struct hlist_head *head;
1308 	struct hlist_node *node;
1309 	struct kprobe *p, *kp;
1310 	const char *sym = NULL;
1311 	unsigned int i = *(loff_t *) v;
1312 	unsigned long offset = 0;
1313 	char *modname, namebuf[128];
1314 
1315 	head = &kprobe_table[i];
1316 	preempt_disable();
1317 	hlist_for_each_entry_rcu(p, node, head, hlist) {
1318 		sym = kallsyms_lookup((unsigned long)p->addr, NULL,
1319 					&offset, &modname, namebuf);
1320 		if (p->pre_handler == aggr_pre_handler) {
1321 			list_for_each_entry_rcu(kp, &p->list, list)
1322 				report_probe(pi, kp, sym, offset, modname);
1323 		} else
1324 			report_probe(pi, p, sym, offset, modname);
1325 	}
1326 	preempt_enable();
1327 	return 0;
1328 }
1329 
1330 static const struct seq_operations kprobes_seq_ops = {
1331 	.start = kprobe_seq_start,
1332 	.next  = kprobe_seq_next,
1333 	.stop  = kprobe_seq_stop,
1334 	.show  = show_kprobe_addr
1335 };
1336 
1337 static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
1338 {
1339 	return seq_open(filp, &kprobes_seq_ops);
1340 }
1341 
1342 static const struct file_operations debugfs_kprobes_operations = {
1343 	.open           = kprobes_open,
1344 	.read           = seq_read,
1345 	.llseek         = seq_lseek,
1346 	.release        = seq_release,
1347 };
1348 
1349 /* Disable one kprobe */
1350 int __kprobes disable_kprobe(struct kprobe *kp)
1351 {
1352 	int ret = 0;
1353 	struct kprobe *p;
1354 
1355 	mutex_lock(&kprobe_mutex);
1356 
1357 	/* Check whether specified probe is valid. */
1358 	p = __get_valid_kprobe(kp);
1359 	if (unlikely(p == NULL)) {
1360 		ret = -EINVAL;
1361 		goto out;
1362 	}
1363 
1364 	/* If the probe is already disabled (or gone), just return */
1365 	if (kprobe_disabled(kp))
1366 		goto out;
1367 
1368 	kp->flags |= KPROBE_FLAG_DISABLED;
1369 	if (p != kp)
1370 		/* When kp != p, p is always enabled. */
1371 		try_to_disable_aggr_kprobe(p);
1372 
1373 	if (!kprobes_all_disarmed && kprobe_disabled(p))
1374 		disarm_kprobe(p);
1375 out:
1376 	mutex_unlock(&kprobe_mutex);
1377 	return ret;
1378 }
1379 EXPORT_SYMBOL_GPL(disable_kprobe);
1380 
1381 /* Enable one kprobe */
1382 int __kprobes enable_kprobe(struct kprobe *kp)
1383 {
1384 	int ret = 0;
1385 	struct kprobe *p;
1386 
1387 	mutex_lock(&kprobe_mutex);
1388 
1389 	/* Check whether specified probe is valid. */
1390 	p = __get_valid_kprobe(kp);
1391 	if (unlikely(p == NULL)) {
1392 		ret = -EINVAL;
1393 		goto out;
1394 	}
1395 
1396 	if (kprobe_gone(kp)) {
1397 		/* This kprobe has gone, we couldn't enable it. */
1398 		ret = -EINVAL;
1399 		goto out;
1400 	}
1401 
1402 	if (!kprobes_all_disarmed && kprobe_disabled(p))
1403 		arm_kprobe(p);
1404 
1405 	p->flags &= ~KPROBE_FLAG_DISABLED;
1406 	if (p != kp)
1407 		kp->flags &= ~KPROBE_FLAG_DISABLED;
1408 out:
1409 	mutex_unlock(&kprobe_mutex);
1410 	return ret;
1411 }
1412 EXPORT_SYMBOL_GPL(enable_kprobe);
1413 
1414 static void __kprobes arm_all_kprobes(void)
1415 {
1416 	struct hlist_head *head;
1417 	struct hlist_node *node;
1418 	struct kprobe *p;
1419 	unsigned int i;
1420 
1421 	mutex_lock(&kprobe_mutex);
1422 
1423 	/* If kprobes are armed, just return */
1424 	if (!kprobes_all_disarmed)
1425 		goto already_enabled;
1426 
1427 	mutex_lock(&text_mutex);
1428 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1429 		head = &kprobe_table[i];
1430 		hlist_for_each_entry_rcu(p, node, head, hlist)
1431 			if (!kprobe_disabled(p))
1432 				arch_arm_kprobe(p);
1433 	}
1434 	mutex_unlock(&text_mutex);
1435 
1436 	kprobes_all_disarmed = false;
1437 	printk(KERN_INFO "Kprobes globally enabled\n");
1438 
1439 already_enabled:
1440 	mutex_unlock(&kprobe_mutex);
1441 	return;
1442 }
1443 
1444 static void __kprobes disarm_all_kprobes(void)
1445 {
1446 	struct hlist_head *head;
1447 	struct hlist_node *node;
1448 	struct kprobe *p;
1449 	unsigned int i;
1450 
1451 	mutex_lock(&kprobe_mutex);
1452 
1453 	/* If kprobes are already disarmed, just return */
1454 	if (kprobes_all_disarmed)
1455 		goto already_disabled;
1456 
1457 	kprobes_all_disarmed = true;
1458 	printk(KERN_INFO "Kprobes globally disabled\n");
1459 	mutex_lock(&text_mutex);
1460 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1461 		head = &kprobe_table[i];
1462 		hlist_for_each_entry_rcu(p, node, head, hlist) {
1463 			if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
1464 				arch_disarm_kprobe(p);
1465 		}
1466 	}
1467 
1468 	mutex_unlock(&text_mutex);
1469 	mutex_unlock(&kprobe_mutex);
1470 	/* Allow all currently running kprobes to complete */
1471 	synchronize_sched();
1472 	return;
1473 
1474 already_disabled:
1475 	mutex_unlock(&kprobe_mutex);
1476 	return;
1477 }
1478 
1479 /*
1480  * XXX: The debugfs bool file interface doesn't allow for callbacks
1481  * when the bool state is switched. We can reuse that facility when
1482  * available
1483  */
1484 static ssize_t read_enabled_file_bool(struct file *file,
1485 	       char __user *user_buf, size_t count, loff_t *ppos)
1486 {
1487 	char buf[3];
1488 
1489 	if (!kprobes_all_disarmed)
1490 		buf[0] = '1';
1491 	else
1492 		buf[0] = '0';
1493 	buf[1] = '\n';
1494 	buf[2] = 0x00;
1495 	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
1496 }
1497 
1498 static ssize_t write_enabled_file_bool(struct file *file,
1499 	       const char __user *user_buf, size_t count, loff_t *ppos)
1500 {
1501 	char buf[32];
1502 	int buf_size;
1503 
1504 	buf_size = min(count, (sizeof(buf)-1));
1505 	if (copy_from_user(buf, user_buf, buf_size))
1506 		return -EFAULT;
1507 
1508 	switch (buf[0]) {
1509 	case 'y':
1510 	case 'Y':
1511 	case '1':
1512 		arm_all_kprobes();
1513 		break;
1514 	case 'n':
1515 	case 'N':
1516 	case '0':
1517 		disarm_all_kprobes();
1518 		break;
1519 	}
1520 
1521 	return count;
1522 }
1523 
1524 static const struct file_operations fops_kp = {
1525 	.read =         read_enabled_file_bool,
1526 	.write =        write_enabled_file_bool,
1527 };
1528 
1529 static int __kprobes debugfs_kprobe_init(void)
1530 {
1531 	struct dentry *dir, *file;
1532 	unsigned int value = 1;
1533 
1534 	dir = debugfs_create_dir("kprobes", NULL);
1535 	if (!dir)
1536 		return -ENOMEM;
1537 
1538 	file = debugfs_create_file("list", 0444, dir, NULL,
1539 				&debugfs_kprobes_operations);
1540 	if (!file) {
1541 		debugfs_remove(dir);
1542 		return -ENOMEM;
1543 	}
1544 
1545 	file = debugfs_create_file("enabled", 0600, dir,
1546 					&value, &fops_kp);
1547 	if (!file) {
1548 		debugfs_remove(dir);
1549 		return -ENOMEM;
1550 	}
1551 
1552 	return 0;
1553 }
1554 
1555 late_initcall(debugfs_kprobe_init);
1556 #endif /* CONFIG_DEBUG_FS */
1557 
1558 module_init(init_kprobes);
1559 
1560 /* defined in arch/.../kernel/kprobes.c */
1561 EXPORT_SYMBOL_GPL(jprobe_return);
1562