xref: /linux/kernel/kprobes.c (revision 81c29435073355b8194986a2193d3e7b9d449225)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Kernel Probes (KProbes)
4  *
5  * Copyright (C) IBM Corporation, 2002, 2004
6  *
7  * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
8  *		Probes initial implementation (includes suggestions from
9  *		Rusty Russell).
10  * 2004-Aug	Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
11  *		hlists and exceptions notifier as suggested by Andi Kleen.
12  * 2004-July	Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
13  *		interface to access function arguments.
14  * 2004-Sep	Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
15  *		exceptions notifier to be first on the priority list.
16  * 2005-May	Hien Nguyen <hien@us.ibm.com>, Jim Keniston
17  *		<jkenisto@us.ibm.com> and Prasanna S Panchamukhi
18  *		<prasanna@in.ibm.com> added function-return probes.
19  */
20 
21 #define pr_fmt(fmt) "kprobes: " fmt
22 
23 #include <linux/kprobes.h>
24 #include <linux/hash.h>
25 #include <linux/init.h>
26 #include <linux/slab.h>
27 #include <linux/stddef.h>
28 #include <linux/export.h>
29 #include <linux/moduleloader.h>
30 #include <linux/kallsyms.h>
31 #include <linux/freezer.h>
32 #include <linux/seq_file.h>
33 #include <linux/debugfs.h>
34 #include <linux/sysctl.h>
35 #include <linux/kdebug.h>
36 #include <linux/memory.h>
37 #include <linux/ftrace.h>
38 #include <linux/cpu.h>
39 #include <linux/jump_label.h>
40 #include <linux/static_call.h>
41 #include <linux/perf_event.h>
42 
43 #include <asm/sections.h>
44 #include <asm/cacheflush.h>
45 #include <asm/errno.h>
46 #include <linux/uaccess.h>
47 
48 #define KPROBE_HASH_BITS 6
49 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
50 
51 #if !defined(CONFIG_OPTPROBES) || !defined(CONFIG_SYSCTL)
52 #define kprobe_sysctls_init() do { } while (0)
53 #endif
54 
55 static int kprobes_initialized;
56 /* kprobe_table can be accessed by
57  * - Normal hlist traversal and RCU add/del under 'kprobe_mutex' is held.
58  * Or
59  * - RCU hlist traversal under disabling preempt (breakpoint handlers)
60  */
61 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
62 
63 /* NOTE: change this value only with 'kprobe_mutex' held */
64 static bool kprobes_all_disarmed;
65 
66 /* This protects 'kprobe_table' and 'optimizing_list' */
67 static DEFINE_MUTEX(kprobe_mutex);
68 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance);
69 
70 kprobe_opcode_t * __weak kprobe_lookup_name(const char *name,
71 					unsigned int __unused)
72 {
73 	return ((kprobe_opcode_t *)(kallsyms_lookup_name(name)));
74 }
75 
76 /*
77  * Blacklist -- list of 'struct kprobe_blacklist_entry' to store info where
78  * kprobes can not probe.
79  */
80 static LIST_HEAD(kprobe_blacklist);
81 
82 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
83 /*
84  * 'kprobe::ainsn.insn' points to the copy of the instruction to be
85  * single-stepped. x86_64, POWER4 and above have no-exec support and
86  * stepping on the instruction on a vmalloced/kmalloced/data page
87  * is a recipe for disaster
88  */
89 struct kprobe_insn_page {
90 	struct list_head list;
91 	kprobe_opcode_t *insns;		/* Page of instruction slots */
92 	struct kprobe_insn_cache *cache;
93 	int nused;
94 	int ngarbage;
95 	char slot_used[];
96 };
97 
98 #define KPROBE_INSN_PAGE_SIZE(slots)			\
99 	(offsetof(struct kprobe_insn_page, slot_used) +	\
100 	 (sizeof(char) * (slots)))
101 
102 static int slots_per_page(struct kprobe_insn_cache *c)
103 {
104 	return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
105 }
106 
107 enum kprobe_slot_state {
108 	SLOT_CLEAN = 0,
109 	SLOT_DIRTY = 1,
110 	SLOT_USED = 2,
111 };
112 
113 void __weak *alloc_insn_page(void)
114 {
115 	/*
116 	 * Use module_alloc() so this page is within +/- 2GB of where the
117 	 * kernel image and loaded module images reside. This is required
118 	 * for most of the architectures.
119 	 * (e.g. x86-64 needs this to handle the %rip-relative fixups.)
120 	 */
121 	return module_alloc(PAGE_SIZE);
122 }
123 
124 static void free_insn_page(void *page)
125 {
126 	module_memfree(page);
127 }
128 
129 struct kprobe_insn_cache kprobe_insn_slots = {
130 	.mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
131 	.alloc = alloc_insn_page,
132 	.free = free_insn_page,
133 	.sym = KPROBE_INSN_PAGE_SYM,
134 	.pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
135 	.insn_size = MAX_INSN_SIZE,
136 	.nr_garbage = 0,
137 };
138 static int collect_garbage_slots(struct kprobe_insn_cache *c);
139 
140 /**
141  * __get_insn_slot() - Find a slot on an executable page for an instruction.
142  * We allocate an executable page if there's no room on existing ones.
143  */
144 kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
145 {
146 	struct kprobe_insn_page *kip;
147 	kprobe_opcode_t *slot = NULL;
148 
149 	/* Since the slot array is not protected by rcu, we need a mutex */
150 	mutex_lock(&c->mutex);
151  retry:
152 	rcu_read_lock();
153 	list_for_each_entry_rcu(kip, &c->pages, list) {
154 		if (kip->nused < slots_per_page(c)) {
155 			int i;
156 
157 			for (i = 0; i < slots_per_page(c); i++) {
158 				if (kip->slot_used[i] == SLOT_CLEAN) {
159 					kip->slot_used[i] = SLOT_USED;
160 					kip->nused++;
161 					slot = kip->insns + (i * c->insn_size);
162 					rcu_read_unlock();
163 					goto out;
164 				}
165 			}
166 			/* kip->nused is broken. Fix it. */
167 			kip->nused = slots_per_page(c);
168 			WARN_ON(1);
169 		}
170 	}
171 	rcu_read_unlock();
172 
173 	/* If there are any garbage slots, collect it and try again. */
174 	if (c->nr_garbage && collect_garbage_slots(c) == 0)
175 		goto retry;
176 
177 	/* All out of space.  Need to allocate a new page. */
178 	kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
179 	if (!kip)
180 		goto out;
181 
182 	kip->insns = c->alloc();
183 	if (!kip->insns) {
184 		kfree(kip);
185 		goto out;
186 	}
187 	INIT_LIST_HEAD(&kip->list);
188 	memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
189 	kip->slot_used[0] = SLOT_USED;
190 	kip->nused = 1;
191 	kip->ngarbage = 0;
192 	kip->cache = c;
193 	list_add_rcu(&kip->list, &c->pages);
194 	slot = kip->insns;
195 
196 	/* Record the perf ksymbol register event after adding the page */
197 	perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, (unsigned long)kip->insns,
198 			   PAGE_SIZE, false, c->sym);
199 out:
200 	mutex_unlock(&c->mutex);
201 	return slot;
202 }
203 
204 /* Return true if all garbages are collected, otherwise false. */
205 static bool collect_one_slot(struct kprobe_insn_page *kip, int idx)
206 {
207 	kip->slot_used[idx] = SLOT_CLEAN;
208 	kip->nused--;
209 	if (kip->nused == 0) {
210 		/*
211 		 * Page is no longer in use.  Free it unless
212 		 * it's the last one.  We keep the last one
213 		 * so as not to have to set it up again the
214 		 * next time somebody inserts a probe.
215 		 */
216 		if (!list_is_singular(&kip->list)) {
217 			/*
218 			 * Record perf ksymbol unregister event before removing
219 			 * the page.
220 			 */
221 			perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
222 					   (unsigned long)kip->insns, PAGE_SIZE, true,
223 					   kip->cache->sym);
224 			list_del_rcu(&kip->list);
225 			synchronize_rcu();
226 			kip->cache->free(kip->insns);
227 			kfree(kip);
228 		}
229 		return true;
230 	}
231 	return false;
232 }
233 
234 static int collect_garbage_slots(struct kprobe_insn_cache *c)
235 {
236 	struct kprobe_insn_page *kip, *next;
237 
238 	/* Ensure no-one is interrupted on the garbages */
239 	synchronize_rcu();
240 
241 	list_for_each_entry_safe(kip, next, &c->pages, list) {
242 		int i;
243 
244 		if (kip->ngarbage == 0)
245 			continue;
246 		kip->ngarbage = 0;	/* we will collect all garbages */
247 		for (i = 0; i < slots_per_page(c); i++) {
248 			if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i))
249 				break;
250 		}
251 	}
252 	c->nr_garbage = 0;
253 	return 0;
254 }
255 
256 void __free_insn_slot(struct kprobe_insn_cache *c,
257 		      kprobe_opcode_t *slot, int dirty)
258 {
259 	struct kprobe_insn_page *kip;
260 	long idx;
261 
262 	mutex_lock(&c->mutex);
263 	rcu_read_lock();
264 	list_for_each_entry_rcu(kip, &c->pages, list) {
265 		idx = ((long)slot - (long)kip->insns) /
266 			(c->insn_size * sizeof(kprobe_opcode_t));
267 		if (idx >= 0 && idx < slots_per_page(c))
268 			goto out;
269 	}
270 	/* Could not find this slot. */
271 	WARN_ON(1);
272 	kip = NULL;
273 out:
274 	rcu_read_unlock();
275 	/* Mark and sweep: this may sleep */
276 	if (kip) {
277 		/* Check double free */
278 		WARN_ON(kip->slot_used[idx] != SLOT_USED);
279 		if (dirty) {
280 			kip->slot_used[idx] = SLOT_DIRTY;
281 			kip->ngarbage++;
282 			if (++c->nr_garbage > slots_per_page(c))
283 				collect_garbage_slots(c);
284 		} else {
285 			collect_one_slot(kip, idx);
286 		}
287 	}
288 	mutex_unlock(&c->mutex);
289 }
290 
291 /*
292  * Check given address is on the page of kprobe instruction slots.
293  * This will be used for checking whether the address on a stack
294  * is on a text area or not.
295  */
296 bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr)
297 {
298 	struct kprobe_insn_page *kip;
299 	bool ret = false;
300 
301 	rcu_read_lock();
302 	list_for_each_entry_rcu(kip, &c->pages, list) {
303 		if (addr >= (unsigned long)kip->insns &&
304 		    addr < (unsigned long)kip->insns + PAGE_SIZE) {
305 			ret = true;
306 			break;
307 		}
308 	}
309 	rcu_read_unlock();
310 
311 	return ret;
312 }
313 
314 int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum,
315 			     unsigned long *value, char *type, char *sym)
316 {
317 	struct kprobe_insn_page *kip;
318 	int ret = -ERANGE;
319 
320 	rcu_read_lock();
321 	list_for_each_entry_rcu(kip, &c->pages, list) {
322 		if ((*symnum)--)
323 			continue;
324 		strscpy(sym, c->sym, KSYM_NAME_LEN);
325 		*type = 't';
326 		*value = (unsigned long)kip->insns;
327 		ret = 0;
328 		break;
329 	}
330 	rcu_read_unlock();
331 
332 	return ret;
333 }
334 
335 #ifdef CONFIG_OPTPROBES
336 void __weak *alloc_optinsn_page(void)
337 {
338 	return alloc_insn_page();
339 }
340 
341 void __weak free_optinsn_page(void *page)
342 {
343 	free_insn_page(page);
344 }
345 
346 /* For optimized_kprobe buffer */
347 struct kprobe_insn_cache kprobe_optinsn_slots = {
348 	.mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
349 	.alloc = alloc_optinsn_page,
350 	.free = free_optinsn_page,
351 	.sym = KPROBE_OPTINSN_PAGE_SYM,
352 	.pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
353 	/* .insn_size is initialized later */
354 	.nr_garbage = 0,
355 };
356 #endif
357 #endif
358 
359 /* We have preemption disabled.. so it is safe to use __ versions */
360 static inline void set_kprobe_instance(struct kprobe *kp)
361 {
362 	__this_cpu_write(kprobe_instance, kp);
363 }
364 
365 static inline void reset_kprobe_instance(void)
366 {
367 	__this_cpu_write(kprobe_instance, NULL);
368 }
369 
370 /*
371  * This routine is called either:
372  *	- under the 'kprobe_mutex' - during kprobe_[un]register().
373  *				OR
374  *	- with preemption disabled - from architecture specific code.
375  */
376 struct kprobe *get_kprobe(void *addr)
377 {
378 	struct hlist_head *head;
379 	struct kprobe *p;
380 
381 	head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
382 	hlist_for_each_entry_rcu(p, head, hlist,
383 				 lockdep_is_held(&kprobe_mutex)) {
384 		if (p->addr == addr)
385 			return p;
386 	}
387 
388 	return NULL;
389 }
390 NOKPROBE_SYMBOL(get_kprobe);
391 
392 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
393 
394 /* Return true if 'p' is an aggregator */
395 static inline bool kprobe_aggrprobe(struct kprobe *p)
396 {
397 	return p->pre_handler == aggr_pre_handler;
398 }
399 
400 /* Return true if 'p' is unused */
401 static inline bool kprobe_unused(struct kprobe *p)
402 {
403 	return kprobe_aggrprobe(p) && kprobe_disabled(p) &&
404 	       list_empty(&p->list);
405 }
406 
407 /* Keep all fields in the kprobe consistent. */
408 static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p)
409 {
410 	memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t));
411 	memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn));
412 }
413 
414 #ifdef CONFIG_OPTPROBES
415 /* NOTE: This is protected by 'kprobe_mutex'. */
416 static bool kprobes_allow_optimization;
417 
418 /*
419  * Call all 'kprobe::pre_handler' on the list, but ignores its return value.
420  * This must be called from arch-dep optimized caller.
421  */
422 void opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
423 {
424 	struct kprobe *kp;
425 
426 	list_for_each_entry_rcu(kp, &p->list, list) {
427 		if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
428 			set_kprobe_instance(kp);
429 			kp->pre_handler(kp, regs);
430 		}
431 		reset_kprobe_instance();
432 	}
433 }
434 NOKPROBE_SYMBOL(opt_pre_handler);
435 
436 /* Free optimized instructions and optimized_kprobe */
437 static void free_aggr_kprobe(struct kprobe *p)
438 {
439 	struct optimized_kprobe *op;
440 
441 	op = container_of(p, struct optimized_kprobe, kp);
442 	arch_remove_optimized_kprobe(op);
443 	arch_remove_kprobe(p);
444 	kfree(op);
445 }
446 
447 /* Return true if the kprobe is ready for optimization. */
448 static inline int kprobe_optready(struct kprobe *p)
449 {
450 	struct optimized_kprobe *op;
451 
452 	if (kprobe_aggrprobe(p)) {
453 		op = container_of(p, struct optimized_kprobe, kp);
454 		return arch_prepared_optinsn(&op->optinsn);
455 	}
456 
457 	return 0;
458 }
459 
460 /* Return true if the kprobe is disarmed. Note: p must be on hash list */
461 bool kprobe_disarmed(struct kprobe *p)
462 {
463 	struct optimized_kprobe *op;
464 
465 	/* If kprobe is not aggr/opt probe, just return kprobe is disabled */
466 	if (!kprobe_aggrprobe(p))
467 		return kprobe_disabled(p);
468 
469 	op = container_of(p, struct optimized_kprobe, kp);
470 
471 	return kprobe_disabled(p) && list_empty(&op->list);
472 }
473 
474 /* Return true if the probe is queued on (un)optimizing lists */
475 static bool kprobe_queued(struct kprobe *p)
476 {
477 	struct optimized_kprobe *op;
478 
479 	if (kprobe_aggrprobe(p)) {
480 		op = container_of(p, struct optimized_kprobe, kp);
481 		if (!list_empty(&op->list))
482 			return true;
483 	}
484 	return false;
485 }
486 
487 /*
488  * Return an optimized kprobe whose optimizing code replaces
489  * instructions including 'addr' (exclude breakpoint).
490  */
491 static struct kprobe *get_optimized_kprobe(kprobe_opcode_t *addr)
492 {
493 	int i;
494 	struct kprobe *p = NULL;
495 	struct optimized_kprobe *op;
496 
497 	/* Don't check i == 0, since that is a breakpoint case. */
498 	for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH / sizeof(kprobe_opcode_t); i++)
499 		p = get_kprobe(addr - i);
500 
501 	if (p && kprobe_optready(p)) {
502 		op = container_of(p, struct optimized_kprobe, kp);
503 		if (arch_within_optimized_kprobe(op, addr))
504 			return p;
505 	}
506 
507 	return NULL;
508 }
509 
510 /* Optimization staging list, protected by 'kprobe_mutex' */
511 static LIST_HEAD(optimizing_list);
512 static LIST_HEAD(unoptimizing_list);
513 static LIST_HEAD(freeing_list);
514 
515 static void kprobe_optimizer(struct work_struct *work);
516 static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
517 #define OPTIMIZE_DELAY 5
518 
519 /*
520  * Optimize (replace a breakpoint with a jump) kprobes listed on
521  * 'optimizing_list'.
522  */
523 static void do_optimize_kprobes(void)
524 {
525 	lockdep_assert_held(&text_mutex);
526 	/*
527 	 * The optimization/unoptimization refers 'online_cpus' via
528 	 * stop_machine() and cpu-hotplug modifies the 'online_cpus'.
529 	 * And same time, 'text_mutex' will be held in cpu-hotplug and here.
530 	 * This combination can cause a deadlock (cpu-hotplug tries to lock
531 	 * 'text_mutex' but stop_machine() can not be done because
532 	 * the 'online_cpus' has been changed)
533 	 * To avoid this deadlock, caller must have locked cpu-hotplug
534 	 * for preventing cpu-hotplug outside of 'text_mutex' locking.
535 	 */
536 	lockdep_assert_cpus_held();
537 
538 	/* Optimization never be done when disarmed */
539 	if (kprobes_all_disarmed || !kprobes_allow_optimization ||
540 	    list_empty(&optimizing_list))
541 		return;
542 
543 	arch_optimize_kprobes(&optimizing_list);
544 }
545 
546 /*
547  * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
548  * if need) kprobes listed on 'unoptimizing_list'.
549  */
550 static void do_unoptimize_kprobes(void)
551 {
552 	struct optimized_kprobe *op, *tmp;
553 
554 	lockdep_assert_held(&text_mutex);
555 	/* See comment in do_optimize_kprobes() */
556 	lockdep_assert_cpus_held();
557 
558 	if (!list_empty(&unoptimizing_list))
559 		arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
560 
561 	/* Loop on 'freeing_list' for disarming and removing from kprobe hash list */
562 	list_for_each_entry_safe(op, tmp, &freeing_list, list) {
563 		/* Switching from detour code to origin */
564 		op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
565 		/* Disarm probes if marked disabled and not gone */
566 		if (kprobe_disabled(&op->kp) && !kprobe_gone(&op->kp))
567 			arch_disarm_kprobe(&op->kp);
568 		if (kprobe_unused(&op->kp)) {
569 			/*
570 			 * Remove unused probes from hash list. After waiting
571 			 * for synchronization, these probes are reclaimed.
572 			 * (reclaiming is done by do_free_cleaned_kprobes().)
573 			 */
574 			hlist_del_rcu(&op->kp.hlist);
575 		} else
576 			list_del_init(&op->list);
577 	}
578 }
579 
580 /* Reclaim all kprobes on the 'freeing_list' */
581 static void do_free_cleaned_kprobes(void)
582 {
583 	struct optimized_kprobe *op, *tmp;
584 
585 	list_for_each_entry_safe(op, tmp, &freeing_list, list) {
586 		list_del_init(&op->list);
587 		if (WARN_ON_ONCE(!kprobe_unused(&op->kp))) {
588 			/*
589 			 * This must not happen, but if there is a kprobe
590 			 * still in use, keep it on kprobes hash list.
591 			 */
592 			continue;
593 		}
594 		free_aggr_kprobe(&op->kp);
595 	}
596 }
597 
598 /* Start optimizer after OPTIMIZE_DELAY passed */
599 static void kick_kprobe_optimizer(void)
600 {
601 	schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
602 }
603 
604 /* Kprobe jump optimizer */
605 static void kprobe_optimizer(struct work_struct *work)
606 {
607 	mutex_lock(&kprobe_mutex);
608 	cpus_read_lock();
609 	mutex_lock(&text_mutex);
610 
611 	/*
612 	 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
613 	 * kprobes before waiting for quiesence period.
614 	 */
615 	do_unoptimize_kprobes();
616 
617 	/*
618 	 * Step 2: Wait for quiesence period to ensure all potentially
619 	 * preempted tasks to have normally scheduled. Because optprobe
620 	 * may modify multiple instructions, there is a chance that Nth
621 	 * instruction is preempted. In that case, such tasks can return
622 	 * to 2nd-Nth byte of jump instruction. This wait is for avoiding it.
623 	 * Note that on non-preemptive kernel, this is transparently converted
624 	 * to synchronoze_sched() to wait for all interrupts to have completed.
625 	 */
626 	synchronize_rcu_tasks();
627 
628 	/* Step 3: Optimize kprobes after quiesence period */
629 	do_optimize_kprobes();
630 
631 	/* Step 4: Free cleaned kprobes after quiesence period */
632 	do_free_cleaned_kprobes();
633 
634 	mutex_unlock(&text_mutex);
635 	cpus_read_unlock();
636 
637 	/* Step 5: Kick optimizer again if needed */
638 	if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
639 		kick_kprobe_optimizer();
640 
641 	mutex_unlock(&kprobe_mutex);
642 }
643 
644 /* Wait for completing optimization and unoptimization */
645 void wait_for_kprobe_optimizer(void)
646 {
647 	mutex_lock(&kprobe_mutex);
648 
649 	while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
650 		mutex_unlock(&kprobe_mutex);
651 
652 		/* This will also make 'optimizing_work' execute immmediately */
653 		flush_delayed_work(&optimizing_work);
654 		/* 'optimizing_work' might not have been queued yet, relax */
655 		cpu_relax();
656 
657 		mutex_lock(&kprobe_mutex);
658 	}
659 
660 	mutex_unlock(&kprobe_mutex);
661 }
662 
663 bool optprobe_queued_unopt(struct optimized_kprobe *op)
664 {
665 	struct optimized_kprobe *_op;
666 
667 	list_for_each_entry(_op, &unoptimizing_list, list) {
668 		if (op == _op)
669 			return true;
670 	}
671 
672 	return false;
673 }
674 
675 /* Optimize kprobe if p is ready to be optimized */
676 static void optimize_kprobe(struct kprobe *p)
677 {
678 	struct optimized_kprobe *op;
679 
680 	/* Check if the kprobe is disabled or not ready for optimization. */
681 	if (!kprobe_optready(p) || !kprobes_allow_optimization ||
682 	    (kprobe_disabled(p) || kprobes_all_disarmed))
683 		return;
684 
685 	/* kprobes with 'post_handler' can not be optimized */
686 	if (p->post_handler)
687 		return;
688 
689 	op = container_of(p, struct optimized_kprobe, kp);
690 
691 	/* Check there is no other kprobes at the optimized instructions */
692 	if (arch_check_optimized_kprobe(op) < 0)
693 		return;
694 
695 	/* Check if it is already optimized. */
696 	if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) {
697 		if (optprobe_queued_unopt(op)) {
698 			/* This is under unoptimizing. Just dequeue the probe */
699 			list_del_init(&op->list);
700 		}
701 		return;
702 	}
703 	op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
704 
705 	/*
706 	 * On the 'unoptimizing_list' and 'optimizing_list',
707 	 * 'op' must have OPTIMIZED flag
708 	 */
709 	if (WARN_ON_ONCE(!list_empty(&op->list)))
710 		return;
711 
712 	list_add(&op->list, &optimizing_list);
713 	kick_kprobe_optimizer();
714 }
715 
716 /* Short cut to direct unoptimizing */
717 static void force_unoptimize_kprobe(struct optimized_kprobe *op)
718 {
719 	lockdep_assert_cpus_held();
720 	arch_unoptimize_kprobe(op);
721 	op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
722 }
723 
724 /* Unoptimize a kprobe if p is optimized */
725 static void unoptimize_kprobe(struct kprobe *p, bool force)
726 {
727 	struct optimized_kprobe *op;
728 
729 	if (!kprobe_aggrprobe(p) || kprobe_disarmed(p))
730 		return; /* This is not an optprobe nor optimized */
731 
732 	op = container_of(p, struct optimized_kprobe, kp);
733 	if (!kprobe_optimized(p))
734 		return;
735 
736 	if (!list_empty(&op->list)) {
737 		if (optprobe_queued_unopt(op)) {
738 			/* Queued in unoptimizing queue */
739 			if (force) {
740 				/*
741 				 * Forcibly unoptimize the kprobe here, and queue it
742 				 * in the freeing list for release afterwards.
743 				 */
744 				force_unoptimize_kprobe(op);
745 				list_move(&op->list, &freeing_list);
746 			}
747 		} else {
748 			/* Dequeue from the optimizing queue */
749 			list_del_init(&op->list);
750 			op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
751 		}
752 		return;
753 	}
754 
755 	/* Optimized kprobe case */
756 	if (force) {
757 		/* Forcibly update the code: this is a special case */
758 		force_unoptimize_kprobe(op);
759 	} else {
760 		list_add(&op->list, &unoptimizing_list);
761 		kick_kprobe_optimizer();
762 	}
763 }
764 
765 /* Cancel unoptimizing for reusing */
766 static int reuse_unused_kprobe(struct kprobe *ap)
767 {
768 	struct optimized_kprobe *op;
769 
770 	/*
771 	 * Unused kprobe MUST be on the way of delayed unoptimizing (means
772 	 * there is still a relative jump) and disabled.
773 	 */
774 	op = container_of(ap, struct optimized_kprobe, kp);
775 	WARN_ON_ONCE(list_empty(&op->list));
776 	/* Enable the probe again */
777 	ap->flags &= ~KPROBE_FLAG_DISABLED;
778 	/* Optimize it again. (remove from 'op->list') */
779 	if (!kprobe_optready(ap))
780 		return -EINVAL;
781 
782 	optimize_kprobe(ap);
783 	return 0;
784 }
785 
786 /* Remove optimized instructions */
787 static void kill_optimized_kprobe(struct kprobe *p)
788 {
789 	struct optimized_kprobe *op;
790 
791 	op = container_of(p, struct optimized_kprobe, kp);
792 	if (!list_empty(&op->list))
793 		/* Dequeue from the (un)optimization queue */
794 		list_del_init(&op->list);
795 	op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
796 
797 	if (kprobe_unused(p)) {
798 		/*
799 		 * Unused kprobe is on unoptimizing or freeing list. We move it
800 		 * to freeing_list and let the kprobe_optimizer() remove it from
801 		 * the kprobe hash list and free it.
802 		 */
803 		if (optprobe_queued_unopt(op))
804 			list_move(&op->list, &freeing_list);
805 	}
806 
807 	/* Don't touch the code, because it is already freed. */
808 	arch_remove_optimized_kprobe(op);
809 }
810 
811 static inline
812 void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
813 {
814 	if (!kprobe_ftrace(p))
815 		arch_prepare_optimized_kprobe(op, p);
816 }
817 
818 /* Try to prepare optimized instructions */
819 static void prepare_optimized_kprobe(struct kprobe *p)
820 {
821 	struct optimized_kprobe *op;
822 
823 	op = container_of(p, struct optimized_kprobe, kp);
824 	__prepare_optimized_kprobe(op, p);
825 }
826 
827 /* Allocate new optimized_kprobe and try to prepare optimized instructions. */
828 static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
829 {
830 	struct optimized_kprobe *op;
831 
832 	op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
833 	if (!op)
834 		return NULL;
835 
836 	INIT_LIST_HEAD(&op->list);
837 	op->kp.addr = p->addr;
838 	__prepare_optimized_kprobe(op, p);
839 
840 	return &op->kp;
841 }
842 
843 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
844 
845 /*
846  * Prepare an optimized_kprobe and optimize it.
847  * NOTE: 'p' must be a normal registered kprobe.
848  */
849 static void try_to_optimize_kprobe(struct kprobe *p)
850 {
851 	struct kprobe *ap;
852 	struct optimized_kprobe *op;
853 
854 	/* Impossible to optimize ftrace-based kprobe. */
855 	if (kprobe_ftrace(p))
856 		return;
857 
858 	/* For preparing optimization, jump_label_text_reserved() is called. */
859 	cpus_read_lock();
860 	jump_label_lock();
861 	mutex_lock(&text_mutex);
862 
863 	ap = alloc_aggr_kprobe(p);
864 	if (!ap)
865 		goto out;
866 
867 	op = container_of(ap, struct optimized_kprobe, kp);
868 	if (!arch_prepared_optinsn(&op->optinsn)) {
869 		/* If failed to setup optimizing, fallback to kprobe. */
870 		arch_remove_optimized_kprobe(op);
871 		kfree(op);
872 		goto out;
873 	}
874 
875 	init_aggr_kprobe(ap, p);
876 	optimize_kprobe(ap);	/* This just kicks optimizer thread. */
877 
878 out:
879 	mutex_unlock(&text_mutex);
880 	jump_label_unlock();
881 	cpus_read_unlock();
882 }
883 
884 static void optimize_all_kprobes(void)
885 {
886 	struct hlist_head *head;
887 	struct kprobe *p;
888 	unsigned int i;
889 
890 	mutex_lock(&kprobe_mutex);
891 	/* If optimization is already allowed, just return. */
892 	if (kprobes_allow_optimization)
893 		goto out;
894 
895 	cpus_read_lock();
896 	kprobes_allow_optimization = true;
897 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
898 		head = &kprobe_table[i];
899 		hlist_for_each_entry(p, head, hlist)
900 			if (!kprobe_disabled(p))
901 				optimize_kprobe(p);
902 	}
903 	cpus_read_unlock();
904 	pr_info("kprobe jump-optimization is enabled. All kprobes are optimized if possible.\n");
905 out:
906 	mutex_unlock(&kprobe_mutex);
907 }
908 
909 #ifdef CONFIG_SYSCTL
910 static void unoptimize_all_kprobes(void)
911 {
912 	struct hlist_head *head;
913 	struct kprobe *p;
914 	unsigned int i;
915 
916 	mutex_lock(&kprobe_mutex);
917 	/* If optimization is already prohibited, just return. */
918 	if (!kprobes_allow_optimization) {
919 		mutex_unlock(&kprobe_mutex);
920 		return;
921 	}
922 
923 	cpus_read_lock();
924 	kprobes_allow_optimization = false;
925 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
926 		head = &kprobe_table[i];
927 		hlist_for_each_entry(p, head, hlist) {
928 			if (!kprobe_disabled(p))
929 				unoptimize_kprobe(p, false);
930 		}
931 	}
932 	cpus_read_unlock();
933 	mutex_unlock(&kprobe_mutex);
934 
935 	/* Wait for unoptimizing completion. */
936 	wait_for_kprobe_optimizer();
937 	pr_info("kprobe jump-optimization is disabled. All kprobes are based on software breakpoint.\n");
938 }
939 
940 static DEFINE_MUTEX(kprobe_sysctl_mutex);
941 static int sysctl_kprobes_optimization;
942 static int proc_kprobes_optimization_handler(struct ctl_table *table,
943 					     int write, void *buffer,
944 					     size_t *length, loff_t *ppos)
945 {
946 	int ret;
947 
948 	mutex_lock(&kprobe_sysctl_mutex);
949 	sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
950 	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
951 
952 	if (sysctl_kprobes_optimization)
953 		optimize_all_kprobes();
954 	else
955 		unoptimize_all_kprobes();
956 	mutex_unlock(&kprobe_sysctl_mutex);
957 
958 	return ret;
959 }
960 
961 static struct ctl_table kprobe_sysctls[] = {
962 	{
963 		.procname	= "kprobes-optimization",
964 		.data		= &sysctl_kprobes_optimization,
965 		.maxlen		= sizeof(int),
966 		.mode		= 0644,
967 		.proc_handler	= proc_kprobes_optimization_handler,
968 		.extra1		= SYSCTL_ZERO,
969 		.extra2		= SYSCTL_ONE,
970 	},
971 	{}
972 };
973 
974 static void __init kprobe_sysctls_init(void)
975 {
976 	register_sysctl_init("debug", kprobe_sysctls);
977 }
978 #endif /* CONFIG_SYSCTL */
979 
980 /* Put a breakpoint for a probe. */
981 static void __arm_kprobe(struct kprobe *p)
982 {
983 	struct kprobe *_p;
984 
985 	lockdep_assert_held(&text_mutex);
986 
987 	/* Find the overlapping optimized kprobes. */
988 	_p = get_optimized_kprobe(p->addr);
989 	if (unlikely(_p))
990 		/* Fallback to unoptimized kprobe */
991 		unoptimize_kprobe(_p, true);
992 
993 	arch_arm_kprobe(p);
994 	optimize_kprobe(p);	/* Try to optimize (add kprobe to a list) */
995 }
996 
997 /* Remove the breakpoint of a probe. */
998 static void __disarm_kprobe(struct kprobe *p, bool reopt)
999 {
1000 	struct kprobe *_p;
1001 
1002 	lockdep_assert_held(&text_mutex);
1003 
1004 	/* Try to unoptimize */
1005 	unoptimize_kprobe(p, kprobes_all_disarmed);
1006 
1007 	if (!kprobe_queued(p)) {
1008 		arch_disarm_kprobe(p);
1009 		/* If another kprobe was blocked, re-optimize it. */
1010 		_p = get_optimized_kprobe(p->addr);
1011 		if (unlikely(_p) && reopt)
1012 			optimize_kprobe(_p);
1013 	}
1014 	/*
1015 	 * TODO: Since unoptimization and real disarming will be done by
1016 	 * the worker thread, we can not check whether another probe are
1017 	 * unoptimized because of this probe here. It should be re-optimized
1018 	 * by the worker thread.
1019 	 */
1020 }
1021 
1022 #else /* !CONFIG_OPTPROBES */
1023 
1024 #define optimize_kprobe(p)			do {} while (0)
1025 #define unoptimize_kprobe(p, f)			do {} while (0)
1026 #define kill_optimized_kprobe(p)		do {} while (0)
1027 #define prepare_optimized_kprobe(p)		do {} while (0)
1028 #define try_to_optimize_kprobe(p)		do {} while (0)
1029 #define __arm_kprobe(p)				arch_arm_kprobe(p)
1030 #define __disarm_kprobe(p, o)			arch_disarm_kprobe(p)
1031 #define kprobe_disarmed(p)			kprobe_disabled(p)
1032 #define wait_for_kprobe_optimizer()		do {} while (0)
1033 
1034 static int reuse_unused_kprobe(struct kprobe *ap)
1035 {
1036 	/*
1037 	 * If the optimized kprobe is NOT supported, the aggr kprobe is
1038 	 * released at the same time that the last aggregated kprobe is
1039 	 * unregistered.
1040 	 * Thus there should be no chance to reuse unused kprobe.
1041 	 */
1042 	WARN_ON_ONCE(1);
1043 	return -EINVAL;
1044 }
1045 
1046 static void free_aggr_kprobe(struct kprobe *p)
1047 {
1048 	arch_remove_kprobe(p);
1049 	kfree(p);
1050 }
1051 
1052 static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
1053 {
1054 	return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
1055 }
1056 #endif /* CONFIG_OPTPROBES */
1057 
1058 #ifdef CONFIG_KPROBES_ON_FTRACE
1059 static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
1060 	.func = kprobe_ftrace_handler,
1061 	.flags = FTRACE_OPS_FL_SAVE_REGS,
1062 };
1063 
1064 static struct ftrace_ops kprobe_ipmodify_ops __read_mostly = {
1065 	.func = kprobe_ftrace_handler,
1066 	.flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY,
1067 };
1068 
1069 static int kprobe_ipmodify_enabled;
1070 static int kprobe_ftrace_enabled;
1071 
1072 static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
1073 			       int *cnt)
1074 {
1075 	int ret = 0;
1076 
1077 	lockdep_assert_held(&kprobe_mutex);
1078 
1079 	ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 0, 0);
1080 	if (WARN_ONCE(ret < 0, "Failed to arm kprobe-ftrace at %pS (error %d)\n", p->addr, ret))
1081 		return ret;
1082 
1083 	if (*cnt == 0) {
1084 		ret = register_ftrace_function(ops);
1085 		if (WARN(ret < 0, "Failed to register kprobe-ftrace (error %d)\n", ret))
1086 			goto err_ftrace;
1087 	}
1088 
1089 	(*cnt)++;
1090 	return ret;
1091 
1092 err_ftrace:
1093 	/*
1094 	 * At this point, sinec ops is not registered, we should be sefe from
1095 	 * registering empty filter.
1096 	 */
1097 	ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
1098 	return ret;
1099 }
1100 
1101 static int arm_kprobe_ftrace(struct kprobe *p)
1102 {
1103 	bool ipmodify = (p->post_handler != NULL);
1104 
1105 	return __arm_kprobe_ftrace(p,
1106 		ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops,
1107 		ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled);
1108 }
1109 
1110 static int __disarm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
1111 				  int *cnt)
1112 {
1113 	int ret = 0;
1114 
1115 	lockdep_assert_held(&kprobe_mutex);
1116 
1117 	if (*cnt == 1) {
1118 		ret = unregister_ftrace_function(ops);
1119 		if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (error %d)\n", ret))
1120 			return ret;
1121 	}
1122 
1123 	(*cnt)--;
1124 
1125 	ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
1126 	WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (error %d)\n",
1127 		  p->addr, ret);
1128 	return ret;
1129 }
1130 
1131 static int disarm_kprobe_ftrace(struct kprobe *p)
1132 {
1133 	bool ipmodify = (p->post_handler != NULL);
1134 
1135 	return __disarm_kprobe_ftrace(p,
1136 		ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops,
1137 		ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled);
1138 }
1139 #else	/* !CONFIG_KPROBES_ON_FTRACE */
1140 static inline int arm_kprobe_ftrace(struct kprobe *p)
1141 {
1142 	return -ENODEV;
1143 }
1144 
1145 static inline int disarm_kprobe_ftrace(struct kprobe *p)
1146 {
1147 	return -ENODEV;
1148 }
1149 #endif
1150 
1151 static int prepare_kprobe(struct kprobe *p)
1152 {
1153 	/* Must ensure p->addr is really on ftrace */
1154 	if (kprobe_ftrace(p))
1155 		return arch_prepare_kprobe_ftrace(p);
1156 
1157 	return arch_prepare_kprobe(p);
1158 }
1159 
1160 static int arm_kprobe(struct kprobe *kp)
1161 {
1162 	if (unlikely(kprobe_ftrace(kp)))
1163 		return arm_kprobe_ftrace(kp);
1164 
1165 	cpus_read_lock();
1166 	mutex_lock(&text_mutex);
1167 	__arm_kprobe(kp);
1168 	mutex_unlock(&text_mutex);
1169 	cpus_read_unlock();
1170 
1171 	return 0;
1172 }
1173 
1174 static int disarm_kprobe(struct kprobe *kp, bool reopt)
1175 {
1176 	if (unlikely(kprobe_ftrace(kp)))
1177 		return disarm_kprobe_ftrace(kp);
1178 
1179 	cpus_read_lock();
1180 	mutex_lock(&text_mutex);
1181 	__disarm_kprobe(kp, reopt);
1182 	mutex_unlock(&text_mutex);
1183 	cpus_read_unlock();
1184 
1185 	return 0;
1186 }
1187 
1188 /*
1189  * Aggregate handlers for multiple kprobes support - these handlers
1190  * take care of invoking the individual kprobe handlers on p->list
1191  */
1192 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
1193 {
1194 	struct kprobe *kp;
1195 
1196 	list_for_each_entry_rcu(kp, &p->list, list) {
1197 		if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
1198 			set_kprobe_instance(kp);
1199 			if (kp->pre_handler(kp, regs))
1200 				return 1;
1201 		}
1202 		reset_kprobe_instance();
1203 	}
1204 	return 0;
1205 }
1206 NOKPROBE_SYMBOL(aggr_pre_handler);
1207 
1208 static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
1209 			      unsigned long flags)
1210 {
1211 	struct kprobe *kp;
1212 
1213 	list_for_each_entry_rcu(kp, &p->list, list) {
1214 		if (kp->post_handler && likely(!kprobe_disabled(kp))) {
1215 			set_kprobe_instance(kp);
1216 			kp->post_handler(kp, regs, flags);
1217 			reset_kprobe_instance();
1218 		}
1219 	}
1220 }
1221 NOKPROBE_SYMBOL(aggr_post_handler);
1222 
1223 /* Walks the list and increments 'nmissed' if 'p' has child probes. */
1224 void kprobes_inc_nmissed_count(struct kprobe *p)
1225 {
1226 	struct kprobe *kp;
1227 
1228 	if (!kprobe_aggrprobe(p)) {
1229 		p->nmissed++;
1230 	} else {
1231 		list_for_each_entry_rcu(kp, &p->list, list)
1232 			kp->nmissed++;
1233 	}
1234 }
1235 NOKPROBE_SYMBOL(kprobes_inc_nmissed_count);
1236 
1237 static struct kprobe kprobe_busy = {
1238 	.addr = (void *) get_kprobe,
1239 };
1240 
1241 void kprobe_busy_begin(void)
1242 {
1243 	struct kprobe_ctlblk *kcb;
1244 
1245 	preempt_disable();
1246 	__this_cpu_write(current_kprobe, &kprobe_busy);
1247 	kcb = get_kprobe_ctlblk();
1248 	kcb->kprobe_status = KPROBE_HIT_ACTIVE;
1249 }
1250 
1251 void kprobe_busy_end(void)
1252 {
1253 	__this_cpu_write(current_kprobe, NULL);
1254 	preempt_enable();
1255 }
1256 
1257 /* Add the new probe to 'ap->list'. */
1258 static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
1259 {
1260 	if (p->post_handler)
1261 		unoptimize_kprobe(ap, true);	/* Fall back to normal kprobe */
1262 
1263 	list_add_rcu(&p->list, &ap->list);
1264 	if (p->post_handler && !ap->post_handler)
1265 		ap->post_handler = aggr_post_handler;
1266 
1267 	return 0;
1268 }
1269 
1270 /*
1271  * Fill in the required fields of the aggregator kprobe. Replace the
1272  * earlier kprobe in the hlist with the aggregator kprobe.
1273  */
1274 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
1275 {
1276 	/* Copy the insn slot of 'p' to 'ap'. */
1277 	copy_kprobe(p, ap);
1278 	flush_insn_slot(ap);
1279 	ap->addr = p->addr;
1280 	ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
1281 	ap->pre_handler = aggr_pre_handler;
1282 	/* We don't care the kprobe which has gone. */
1283 	if (p->post_handler && !kprobe_gone(p))
1284 		ap->post_handler = aggr_post_handler;
1285 
1286 	INIT_LIST_HEAD(&ap->list);
1287 	INIT_HLIST_NODE(&ap->hlist);
1288 
1289 	list_add_rcu(&p->list, &ap->list);
1290 	hlist_replace_rcu(&p->hlist, &ap->hlist);
1291 }
1292 
1293 /*
1294  * This registers the second or subsequent kprobe at the same address.
1295  */
1296 static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
1297 {
1298 	int ret = 0;
1299 	struct kprobe *ap = orig_p;
1300 
1301 	cpus_read_lock();
1302 
1303 	/* For preparing optimization, jump_label_text_reserved() is called */
1304 	jump_label_lock();
1305 	mutex_lock(&text_mutex);
1306 
1307 	if (!kprobe_aggrprobe(orig_p)) {
1308 		/* If 'orig_p' is not an 'aggr_kprobe', create new one. */
1309 		ap = alloc_aggr_kprobe(orig_p);
1310 		if (!ap) {
1311 			ret = -ENOMEM;
1312 			goto out;
1313 		}
1314 		init_aggr_kprobe(ap, orig_p);
1315 	} else if (kprobe_unused(ap)) {
1316 		/* This probe is going to die. Rescue it */
1317 		ret = reuse_unused_kprobe(ap);
1318 		if (ret)
1319 			goto out;
1320 	}
1321 
1322 	if (kprobe_gone(ap)) {
1323 		/*
1324 		 * Attempting to insert new probe at the same location that
1325 		 * had a probe in the module vaddr area which already
1326 		 * freed. So, the instruction slot has already been
1327 		 * released. We need a new slot for the new probe.
1328 		 */
1329 		ret = arch_prepare_kprobe(ap);
1330 		if (ret)
1331 			/*
1332 			 * Even if fail to allocate new slot, don't need to
1333 			 * free the 'ap'. It will be used next time, or
1334 			 * freed by unregister_kprobe().
1335 			 */
1336 			goto out;
1337 
1338 		/* Prepare optimized instructions if possible. */
1339 		prepare_optimized_kprobe(ap);
1340 
1341 		/*
1342 		 * Clear gone flag to prevent allocating new slot again, and
1343 		 * set disabled flag because it is not armed yet.
1344 		 */
1345 		ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
1346 			    | KPROBE_FLAG_DISABLED;
1347 	}
1348 
1349 	/* Copy the insn slot of 'p' to 'ap'. */
1350 	copy_kprobe(ap, p);
1351 	ret = add_new_kprobe(ap, p);
1352 
1353 out:
1354 	mutex_unlock(&text_mutex);
1355 	jump_label_unlock();
1356 	cpus_read_unlock();
1357 
1358 	if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
1359 		ap->flags &= ~KPROBE_FLAG_DISABLED;
1360 		if (!kprobes_all_disarmed) {
1361 			/* Arm the breakpoint again. */
1362 			ret = arm_kprobe(ap);
1363 			if (ret) {
1364 				ap->flags |= KPROBE_FLAG_DISABLED;
1365 				list_del_rcu(&p->list);
1366 				synchronize_rcu();
1367 			}
1368 		}
1369 	}
1370 	return ret;
1371 }
1372 
1373 bool __weak arch_within_kprobe_blacklist(unsigned long addr)
1374 {
1375 	/* The '__kprobes' functions and entry code must not be probed. */
1376 	return addr >= (unsigned long)__kprobes_text_start &&
1377 	       addr < (unsigned long)__kprobes_text_end;
1378 }
1379 
1380 static bool __within_kprobe_blacklist(unsigned long addr)
1381 {
1382 	struct kprobe_blacklist_entry *ent;
1383 
1384 	if (arch_within_kprobe_blacklist(addr))
1385 		return true;
1386 	/*
1387 	 * If 'kprobe_blacklist' is defined, check the address and
1388 	 * reject any probe registration in the prohibited area.
1389 	 */
1390 	list_for_each_entry(ent, &kprobe_blacklist, list) {
1391 		if (addr >= ent->start_addr && addr < ent->end_addr)
1392 			return true;
1393 	}
1394 	return false;
1395 }
1396 
1397 bool within_kprobe_blacklist(unsigned long addr)
1398 {
1399 	char symname[KSYM_NAME_LEN], *p;
1400 
1401 	if (__within_kprobe_blacklist(addr))
1402 		return true;
1403 
1404 	/* Check if the address is on a suffixed-symbol */
1405 	if (!lookup_symbol_name(addr, symname)) {
1406 		p = strchr(symname, '.');
1407 		if (!p)
1408 			return false;
1409 		*p = '\0';
1410 		addr = (unsigned long)kprobe_lookup_name(symname, 0);
1411 		if (addr)
1412 			return __within_kprobe_blacklist(addr);
1413 	}
1414 	return false;
1415 }
1416 
1417 /*
1418  * arch_adjust_kprobe_addr - adjust the address
1419  * @addr: symbol base address
1420  * @offset: offset within the symbol
1421  * @on_func_entry: was this @addr+@offset on the function entry
1422  *
1423  * Typically returns @addr + @offset, except for special cases where the
1424  * function might be prefixed by a CFI landing pad, in that case any offset
1425  * inside the landing pad is mapped to the first 'real' instruction of the
1426  * symbol.
1427  *
1428  * Specifically, for things like IBT/BTI, skip the resp. ENDBR/BTI.C
1429  * instruction at +0.
1430  */
1431 kprobe_opcode_t *__weak arch_adjust_kprobe_addr(unsigned long addr,
1432 						unsigned long offset,
1433 						bool *on_func_entry)
1434 {
1435 	*on_func_entry = !offset;
1436 	return (kprobe_opcode_t *)(addr + offset);
1437 }
1438 
1439 /*
1440  * If 'symbol_name' is specified, look it up and add the 'offset'
1441  * to it. This way, we can specify a relative address to a symbol.
1442  * This returns encoded errors if it fails to look up symbol or invalid
1443  * combination of parameters.
1444  */
1445 static kprobe_opcode_t *
1446 _kprobe_addr(kprobe_opcode_t *addr, const char *symbol_name,
1447 	     unsigned long offset, bool *on_func_entry)
1448 {
1449 	if ((symbol_name && addr) || (!symbol_name && !addr))
1450 		goto invalid;
1451 
1452 	if (symbol_name) {
1453 		/*
1454 		 * Input: @sym + @offset
1455 		 * Output: @addr + @offset
1456 		 *
1457 		 * NOTE: kprobe_lookup_name() does *NOT* fold the offset
1458 		 *       argument into it's output!
1459 		 */
1460 		addr = kprobe_lookup_name(symbol_name, offset);
1461 		if (!addr)
1462 			return ERR_PTR(-ENOENT);
1463 	}
1464 
1465 	/*
1466 	 * So here we have @addr + @offset, displace it into a new
1467 	 * @addr' + @offset' where @addr' is the symbol start address.
1468 	 */
1469 	addr = (void *)addr + offset;
1470 	if (!kallsyms_lookup_size_offset((unsigned long)addr, NULL, &offset))
1471 		return ERR_PTR(-ENOENT);
1472 	addr = (void *)addr - offset;
1473 
1474 	/*
1475 	 * Then ask the architecture to re-combine them, taking care of
1476 	 * magical function entry details while telling us if this was indeed
1477 	 * at the start of the function.
1478 	 */
1479 	addr = arch_adjust_kprobe_addr((unsigned long)addr, offset, on_func_entry);
1480 	if (addr)
1481 		return addr;
1482 
1483 invalid:
1484 	return ERR_PTR(-EINVAL);
1485 }
1486 
1487 static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
1488 {
1489 	bool on_func_entry;
1490 	return _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry);
1491 }
1492 
1493 /*
1494  * Check the 'p' is valid and return the aggregator kprobe
1495  * at the same address.
1496  */
1497 static struct kprobe *__get_valid_kprobe(struct kprobe *p)
1498 {
1499 	struct kprobe *ap, *list_p;
1500 
1501 	lockdep_assert_held(&kprobe_mutex);
1502 
1503 	ap = get_kprobe(p->addr);
1504 	if (unlikely(!ap))
1505 		return NULL;
1506 
1507 	if (p != ap) {
1508 		list_for_each_entry(list_p, &ap->list, list)
1509 			if (list_p == p)
1510 			/* kprobe p is a valid probe */
1511 				goto valid;
1512 		return NULL;
1513 	}
1514 valid:
1515 	return ap;
1516 }
1517 
1518 /*
1519  * Warn and return error if the kprobe is being re-registered since
1520  * there must be a software bug.
1521  */
1522 static inline int warn_kprobe_rereg(struct kprobe *p)
1523 {
1524 	int ret = 0;
1525 
1526 	mutex_lock(&kprobe_mutex);
1527 	if (WARN_ON_ONCE(__get_valid_kprobe(p)))
1528 		ret = -EINVAL;
1529 	mutex_unlock(&kprobe_mutex);
1530 
1531 	return ret;
1532 }
1533 
1534 static int check_ftrace_location(struct kprobe *p)
1535 {
1536 	unsigned long addr = (unsigned long)p->addr;
1537 
1538 	if (ftrace_location(addr) == addr) {
1539 #ifdef CONFIG_KPROBES_ON_FTRACE
1540 		p->flags |= KPROBE_FLAG_FTRACE;
1541 #else	/* !CONFIG_KPROBES_ON_FTRACE */
1542 		return -EINVAL;
1543 #endif
1544 	}
1545 	return 0;
1546 }
1547 
1548 static int check_kprobe_address_safe(struct kprobe *p,
1549 				     struct module **probed_mod)
1550 {
1551 	int ret;
1552 
1553 	ret = check_ftrace_location(p);
1554 	if (ret)
1555 		return ret;
1556 	jump_label_lock();
1557 	preempt_disable();
1558 
1559 	/* Ensure it is not in reserved area nor out of text */
1560 	if (!(core_kernel_text((unsigned long) p->addr) ||
1561 	    is_module_text_address((unsigned long) p->addr)) ||
1562 	    in_gate_area_no_mm((unsigned long) p->addr) ||
1563 	    within_kprobe_blacklist((unsigned long) p->addr) ||
1564 	    jump_label_text_reserved(p->addr, p->addr) ||
1565 	    static_call_text_reserved(p->addr, p->addr) ||
1566 	    find_bug((unsigned long)p->addr)) {
1567 		ret = -EINVAL;
1568 		goto out;
1569 	}
1570 
1571 	/* Check if 'p' is probing a module. */
1572 	*probed_mod = __module_text_address((unsigned long) p->addr);
1573 	if (*probed_mod) {
1574 		/*
1575 		 * We must hold a refcount of the probed module while updating
1576 		 * its code to prohibit unexpected unloading.
1577 		 */
1578 		if (unlikely(!try_module_get(*probed_mod))) {
1579 			ret = -ENOENT;
1580 			goto out;
1581 		}
1582 
1583 		/*
1584 		 * If the module freed '.init.text', we couldn't insert
1585 		 * kprobes in there.
1586 		 */
1587 		if (within_module_init((unsigned long)p->addr, *probed_mod) &&
1588 		    (*probed_mod)->state != MODULE_STATE_COMING) {
1589 			module_put(*probed_mod);
1590 			*probed_mod = NULL;
1591 			ret = -ENOENT;
1592 		}
1593 	}
1594 out:
1595 	preempt_enable();
1596 	jump_label_unlock();
1597 
1598 	return ret;
1599 }
1600 
1601 int register_kprobe(struct kprobe *p)
1602 {
1603 	int ret;
1604 	struct kprobe *old_p;
1605 	struct module *probed_mod;
1606 	kprobe_opcode_t *addr;
1607 	bool on_func_entry;
1608 
1609 	/* Adjust probe address from symbol */
1610 	addr = _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry);
1611 	if (IS_ERR(addr))
1612 		return PTR_ERR(addr);
1613 	p->addr = addr;
1614 
1615 	ret = warn_kprobe_rereg(p);
1616 	if (ret)
1617 		return ret;
1618 
1619 	/* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
1620 	p->flags &= KPROBE_FLAG_DISABLED;
1621 	p->nmissed = 0;
1622 	INIT_LIST_HEAD(&p->list);
1623 
1624 	ret = check_kprobe_address_safe(p, &probed_mod);
1625 	if (ret)
1626 		return ret;
1627 
1628 	mutex_lock(&kprobe_mutex);
1629 
1630 	if (on_func_entry)
1631 		p->flags |= KPROBE_FLAG_ON_FUNC_ENTRY;
1632 
1633 	old_p = get_kprobe(p->addr);
1634 	if (old_p) {
1635 		/* Since this may unoptimize 'old_p', locking 'text_mutex'. */
1636 		ret = register_aggr_kprobe(old_p, p);
1637 		goto out;
1638 	}
1639 
1640 	cpus_read_lock();
1641 	/* Prevent text modification */
1642 	mutex_lock(&text_mutex);
1643 	ret = prepare_kprobe(p);
1644 	mutex_unlock(&text_mutex);
1645 	cpus_read_unlock();
1646 	if (ret)
1647 		goto out;
1648 
1649 	INIT_HLIST_NODE(&p->hlist);
1650 	hlist_add_head_rcu(&p->hlist,
1651 		       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
1652 
1653 	if (!kprobes_all_disarmed && !kprobe_disabled(p)) {
1654 		ret = arm_kprobe(p);
1655 		if (ret) {
1656 			hlist_del_rcu(&p->hlist);
1657 			synchronize_rcu();
1658 			goto out;
1659 		}
1660 	}
1661 
1662 	/* Try to optimize kprobe */
1663 	try_to_optimize_kprobe(p);
1664 out:
1665 	mutex_unlock(&kprobe_mutex);
1666 
1667 	if (probed_mod)
1668 		module_put(probed_mod);
1669 
1670 	return ret;
1671 }
1672 EXPORT_SYMBOL_GPL(register_kprobe);
1673 
1674 /* Check if all probes on the 'ap' are disabled. */
1675 static bool aggr_kprobe_disabled(struct kprobe *ap)
1676 {
1677 	struct kprobe *kp;
1678 
1679 	lockdep_assert_held(&kprobe_mutex);
1680 
1681 	list_for_each_entry(kp, &ap->list, list)
1682 		if (!kprobe_disabled(kp))
1683 			/*
1684 			 * Since there is an active probe on the list,
1685 			 * we can't disable this 'ap'.
1686 			 */
1687 			return false;
1688 
1689 	return true;
1690 }
1691 
1692 static struct kprobe *__disable_kprobe(struct kprobe *p)
1693 {
1694 	struct kprobe *orig_p;
1695 	int ret;
1696 
1697 	lockdep_assert_held(&kprobe_mutex);
1698 
1699 	/* Get an original kprobe for return */
1700 	orig_p = __get_valid_kprobe(p);
1701 	if (unlikely(orig_p == NULL))
1702 		return ERR_PTR(-EINVAL);
1703 
1704 	if (!kprobe_disabled(p)) {
1705 		/* Disable probe if it is a child probe */
1706 		if (p != orig_p)
1707 			p->flags |= KPROBE_FLAG_DISABLED;
1708 
1709 		/* Try to disarm and disable this/parent probe */
1710 		if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
1711 			/*
1712 			 * Don't be lazy here.  Even if 'kprobes_all_disarmed'
1713 			 * is false, 'orig_p' might not have been armed yet.
1714 			 * Note arm_all_kprobes() __tries__ to arm all kprobes
1715 			 * on the best effort basis.
1716 			 */
1717 			if (!kprobes_all_disarmed && !kprobe_disabled(orig_p)) {
1718 				ret = disarm_kprobe(orig_p, true);
1719 				if (ret) {
1720 					p->flags &= ~KPROBE_FLAG_DISABLED;
1721 					return ERR_PTR(ret);
1722 				}
1723 			}
1724 			orig_p->flags |= KPROBE_FLAG_DISABLED;
1725 		}
1726 	}
1727 
1728 	return orig_p;
1729 }
1730 
1731 /*
1732  * Unregister a kprobe without a scheduler synchronization.
1733  */
1734 static int __unregister_kprobe_top(struct kprobe *p)
1735 {
1736 	struct kprobe *ap, *list_p;
1737 
1738 	/* Disable kprobe. This will disarm it if needed. */
1739 	ap = __disable_kprobe(p);
1740 	if (IS_ERR(ap))
1741 		return PTR_ERR(ap);
1742 
1743 	if (ap == p)
1744 		/*
1745 		 * This probe is an independent(and non-optimized) kprobe
1746 		 * (not an aggrprobe). Remove from the hash list.
1747 		 */
1748 		goto disarmed;
1749 
1750 	/* Following process expects this probe is an aggrprobe */
1751 	WARN_ON(!kprobe_aggrprobe(ap));
1752 
1753 	if (list_is_singular(&ap->list) && kprobe_disarmed(ap))
1754 		/*
1755 		 * !disarmed could be happen if the probe is under delayed
1756 		 * unoptimizing.
1757 		 */
1758 		goto disarmed;
1759 	else {
1760 		/* If disabling probe has special handlers, update aggrprobe */
1761 		if (p->post_handler && !kprobe_gone(p)) {
1762 			list_for_each_entry(list_p, &ap->list, list) {
1763 				if ((list_p != p) && (list_p->post_handler))
1764 					goto noclean;
1765 			}
1766 			/*
1767 			 * For the kprobe-on-ftrace case, we keep the
1768 			 * post_handler setting to identify this aggrprobe
1769 			 * armed with kprobe_ipmodify_ops.
1770 			 */
1771 			if (!kprobe_ftrace(ap))
1772 				ap->post_handler = NULL;
1773 		}
1774 noclean:
1775 		/*
1776 		 * Remove from the aggrprobe: this path will do nothing in
1777 		 * __unregister_kprobe_bottom().
1778 		 */
1779 		list_del_rcu(&p->list);
1780 		if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
1781 			/*
1782 			 * Try to optimize this probe again, because post
1783 			 * handler may have been changed.
1784 			 */
1785 			optimize_kprobe(ap);
1786 	}
1787 	return 0;
1788 
1789 disarmed:
1790 	hlist_del_rcu(&ap->hlist);
1791 	return 0;
1792 }
1793 
1794 static void __unregister_kprobe_bottom(struct kprobe *p)
1795 {
1796 	struct kprobe *ap;
1797 
1798 	if (list_empty(&p->list))
1799 		/* This is an independent kprobe */
1800 		arch_remove_kprobe(p);
1801 	else if (list_is_singular(&p->list)) {
1802 		/* This is the last child of an aggrprobe */
1803 		ap = list_entry(p->list.next, struct kprobe, list);
1804 		list_del(&p->list);
1805 		free_aggr_kprobe(ap);
1806 	}
1807 	/* Otherwise, do nothing. */
1808 }
1809 
1810 int register_kprobes(struct kprobe **kps, int num)
1811 {
1812 	int i, ret = 0;
1813 
1814 	if (num <= 0)
1815 		return -EINVAL;
1816 	for (i = 0; i < num; i++) {
1817 		ret = register_kprobe(kps[i]);
1818 		if (ret < 0) {
1819 			if (i > 0)
1820 				unregister_kprobes(kps, i);
1821 			break;
1822 		}
1823 	}
1824 	return ret;
1825 }
1826 EXPORT_SYMBOL_GPL(register_kprobes);
1827 
1828 void unregister_kprobe(struct kprobe *p)
1829 {
1830 	unregister_kprobes(&p, 1);
1831 }
1832 EXPORT_SYMBOL_GPL(unregister_kprobe);
1833 
1834 void unregister_kprobes(struct kprobe **kps, int num)
1835 {
1836 	int i;
1837 
1838 	if (num <= 0)
1839 		return;
1840 	mutex_lock(&kprobe_mutex);
1841 	for (i = 0; i < num; i++)
1842 		if (__unregister_kprobe_top(kps[i]) < 0)
1843 			kps[i]->addr = NULL;
1844 	mutex_unlock(&kprobe_mutex);
1845 
1846 	synchronize_rcu();
1847 	for (i = 0; i < num; i++)
1848 		if (kps[i]->addr)
1849 			__unregister_kprobe_bottom(kps[i]);
1850 }
1851 EXPORT_SYMBOL_GPL(unregister_kprobes);
1852 
1853 int __weak kprobe_exceptions_notify(struct notifier_block *self,
1854 					unsigned long val, void *data)
1855 {
1856 	return NOTIFY_DONE;
1857 }
1858 NOKPROBE_SYMBOL(kprobe_exceptions_notify);
1859 
1860 static struct notifier_block kprobe_exceptions_nb = {
1861 	.notifier_call = kprobe_exceptions_notify,
1862 	.priority = 0x7fffffff /* we need to be notified first */
1863 };
1864 
1865 #ifdef CONFIG_KRETPROBES
1866 
1867 #if !defined(CONFIG_KRETPROBE_ON_RETHOOK)
1868 static void free_rp_inst_rcu(struct rcu_head *head)
1869 {
1870 	struct kretprobe_instance *ri = container_of(head, struct kretprobe_instance, rcu);
1871 
1872 	if (refcount_dec_and_test(&ri->rph->ref))
1873 		kfree(ri->rph);
1874 	kfree(ri);
1875 }
1876 NOKPROBE_SYMBOL(free_rp_inst_rcu);
1877 
1878 static void recycle_rp_inst(struct kretprobe_instance *ri)
1879 {
1880 	struct kretprobe *rp = get_kretprobe(ri);
1881 
1882 	if (likely(rp))
1883 		freelist_add(&ri->freelist, &rp->freelist);
1884 	else
1885 		call_rcu(&ri->rcu, free_rp_inst_rcu);
1886 }
1887 NOKPROBE_SYMBOL(recycle_rp_inst);
1888 
1889 /*
1890  * This function is called from delayed_put_task_struct() when a task is
1891  * dead and cleaned up to recycle any kretprobe instances associated with
1892  * this task. These left over instances represent probed functions that
1893  * have been called but will never return.
1894  */
1895 void kprobe_flush_task(struct task_struct *tk)
1896 {
1897 	struct kretprobe_instance *ri;
1898 	struct llist_node *node;
1899 
1900 	/* Early boot, not yet initialized. */
1901 	if (unlikely(!kprobes_initialized))
1902 		return;
1903 
1904 	kprobe_busy_begin();
1905 
1906 	node = __llist_del_all(&tk->kretprobe_instances);
1907 	while (node) {
1908 		ri = container_of(node, struct kretprobe_instance, llist);
1909 		node = node->next;
1910 
1911 		recycle_rp_inst(ri);
1912 	}
1913 
1914 	kprobe_busy_end();
1915 }
1916 NOKPROBE_SYMBOL(kprobe_flush_task);
1917 
1918 static inline void free_rp_inst(struct kretprobe *rp)
1919 {
1920 	struct kretprobe_instance *ri;
1921 	struct freelist_node *node;
1922 	int count = 0;
1923 
1924 	node = rp->freelist.head;
1925 	while (node) {
1926 		ri = container_of(node, struct kretprobe_instance, freelist);
1927 		node = node->next;
1928 
1929 		kfree(ri);
1930 		count++;
1931 	}
1932 
1933 	if (refcount_sub_and_test(count, &rp->rph->ref)) {
1934 		kfree(rp->rph);
1935 		rp->rph = NULL;
1936 	}
1937 }
1938 
1939 /* This assumes the 'tsk' is the current task or the is not running. */
1940 static kprobe_opcode_t *__kretprobe_find_ret_addr(struct task_struct *tsk,
1941 						  struct llist_node **cur)
1942 {
1943 	struct kretprobe_instance *ri = NULL;
1944 	struct llist_node *node = *cur;
1945 
1946 	if (!node)
1947 		node = tsk->kretprobe_instances.first;
1948 	else
1949 		node = node->next;
1950 
1951 	while (node) {
1952 		ri = container_of(node, struct kretprobe_instance, llist);
1953 		if (ri->ret_addr != kretprobe_trampoline_addr()) {
1954 			*cur = node;
1955 			return ri->ret_addr;
1956 		}
1957 		node = node->next;
1958 	}
1959 	return NULL;
1960 }
1961 NOKPROBE_SYMBOL(__kretprobe_find_ret_addr);
1962 
1963 /**
1964  * kretprobe_find_ret_addr -- Find correct return address modified by kretprobe
1965  * @tsk: Target task
1966  * @fp: A frame pointer
1967  * @cur: a storage of the loop cursor llist_node pointer for next call
1968  *
1969  * Find the correct return address modified by a kretprobe on @tsk in unsigned
1970  * long type. If it finds the return address, this returns that address value,
1971  * or this returns 0.
1972  * The @tsk must be 'current' or a task which is not running. @fp is a hint
1973  * to get the currect return address - which is compared with the
1974  * kretprobe_instance::fp field. The @cur is a loop cursor for searching the
1975  * kretprobe return addresses on the @tsk. The '*@cur' should be NULL at the
1976  * first call, but '@cur' itself must NOT NULL.
1977  */
1978 unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp,
1979 				      struct llist_node **cur)
1980 {
1981 	struct kretprobe_instance *ri = NULL;
1982 	kprobe_opcode_t *ret;
1983 
1984 	if (WARN_ON_ONCE(!cur))
1985 		return 0;
1986 
1987 	do {
1988 		ret = __kretprobe_find_ret_addr(tsk, cur);
1989 		if (!ret)
1990 			break;
1991 		ri = container_of(*cur, struct kretprobe_instance, llist);
1992 	} while (ri->fp != fp);
1993 
1994 	return (unsigned long)ret;
1995 }
1996 NOKPROBE_SYMBOL(kretprobe_find_ret_addr);
1997 
1998 void __weak arch_kretprobe_fixup_return(struct pt_regs *regs,
1999 					kprobe_opcode_t *correct_ret_addr)
2000 {
2001 	/*
2002 	 * Do nothing by default. Please fill this to update the fake return
2003 	 * address on the stack with the correct one on each arch if possible.
2004 	 */
2005 }
2006 
2007 unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs,
2008 					     void *frame_pointer)
2009 {
2010 	kprobe_opcode_t *correct_ret_addr = NULL;
2011 	struct kretprobe_instance *ri = NULL;
2012 	struct llist_node *first, *node = NULL;
2013 	struct kretprobe *rp;
2014 
2015 	/* Find correct address and all nodes for this frame. */
2016 	correct_ret_addr = __kretprobe_find_ret_addr(current, &node);
2017 	if (!correct_ret_addr) {
2018 		pr_err("kretprobe: Return address not found, not execute handler. Maybe there is a bug in the kernel.\n");
2019 		BUG_ON(1);
2020 	}
2021 
2022 	/*
2023 	 * Set the return address as the instruction pointer, because if the
2024 	 * user handler calls stack_trace_save_regs() with this 'regs',
2025 	 * the stack trace will start from the instruction pointer.
2026 	 */
2027 	instruction_pointer_set(regs, (unsigned long)correct_ret_addr);
2028 
2029 	/* Run the user handler of the nodes. */
2030 	first = current->kretprobe_instances.first;
2031 	while (first) {
2032 		ri = container_of(first, struct kretprobe_instance, llist);
2033 
2034 		if (WARN_ON_ONCE(ri->fp != frame_pointer))
2035 			break;
2036 
2037 		rp = get_kretprobe(ri);
2038 		if (rp && rp->handler) {
2039 			struct kprobe *prev = kprobe_running();
2040 
2041 			__this_cpu_write(current_kprobe, &rp->kp);
2042 			ri->ret_addr = correct_ret_addr;
2043 			rp->handler(ri, regs);
2044 			__this_cpu_write(current_kprobe, prev);
2045 		}
2046 		if (first == node)
2047 			break;
2048 
2049 		first = first->next;
2050 	}
2051 
2052 	arch_kretprobe_fixup_return(regs, correct_ret_addr);
2053 
2054 	/* Unlink all nodes for this frame. */
2055 	first = current->kretprobe_instances.first;
2056 	current->kretprobe_instances.first = node->next;
2057 	node->next = NULL;
2058 
2059 	/* Recycle free instances. */
2060 	while (first) {
2061 		ri = container_of(first, struct kretprobe_instance, llist);
2062 		first = first->next;
2063 
2064 		recycle_rp_inst(ri);
2065 	}
2066 
2067 	return (unsigned long)correct_ret_addr;
2068 }
2069 NOKPROBE_SYMBOL(__kretprobe_trampoline_handler)
2070 
2071 /*
2072  * This kprobe pre_handler is registered with every kretprobe. When probe
2073  * hits it will set up the return probe.
2074  */
2075 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
2076 {
2077 	struct kretprobe *rp = container_of(p, struct kretprobe, kp);
2078 	struct kretprobe_instance *ri;
2079 	struct freelist_node *fn;
2080 
2081 	fn = freelist_try_get(&rp->freelist);
2082 	if (!fn) {
2083 		rp->nmissed++;
2084 		return 0;
2085 	}
2086 
2087 	ri = container_of(fn, struct kretprobe_instance, freelist);
2088 
2089 	if (rp->entry_handler && rp->entry_handler(ri, regs)) {
2090 		freelist_add(&ri->freelist, &rp->freelist);
2091 		return 0;
2092 	}
2093 
2094 	arch_prepare_kretprobe(ri, regs);
2095 
2096 	__llist_add(&ri->llist, &current->kretprobe_instances);
2097 
2098 	return 0;
2099 }
2100 NOKPROBE_SYMBOL(pre_handler_kretprobe);
2101 #else /* CONFIG_KRETPROBE_ON_RETHOOK */
2102 /*
2103  * This kprobe pre_handler is registered with every kretprobe. When probe
2104  * hits it will set up the return probe.
2105  */
2106 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
2107 {
2108 	struct kretprobe *rp = container_of(p, struct kretprobe, kp);
2109 	struct kretprobe_instance *ri;
2110 	struct rethook_node *rhn;
2111 
2112 	rhn = rethook_try_get(rp->rh);
2113 	if (!rhn) {
2114 		rp->nmissed++;
2115 		return 0;
2116 	}
2117 
2118 	ri = container_of(rhn, struct kretprobe_instance, node);
2119 
2120 	if (rp->entry_handler && rp->entry_handler(ri, regs))
2121 		rethook_recycle(rhn);
2122 	else
2123 		rethook_hook(rhn, regs, kprobe_ftrace(p));
2124 
2125 	return 0;
2126 }
2127 NOKPROBE_SYMBOL(pre_handler_kretprobe);
2128 
2129 static void kretprobe_rethook_handler(struct rethook_node *rh, void *data,
2130 				      struct pt_regs *regs)
2131 {
2132 	struct kretprobe *rp = (struct kretprobe *)data;
2133 	struct kretprobe_instance *ri;
2134 	struct kprobe_ctlblk *kcb;
2135 
2136 	/* The data must NOT be null. This means rethook data structure is broken. */
2137 	if (WARN_ON_ONCE(!data) || !rp->handler)
2138 		return;
2139 
2140 	__this_cpu_write(current_kprobe, &rp->kp);
2141 	kcb = get_kprobe_ctlblk();
2142 	kcb->kprobe_status = KPROBE_HIT_ACTIVE;
2143 
2144 	ri = container_of(rh, struct kretprobe_instance, node);
2145 	rp->handler(ri, regs);
2146 
2147 	__this_cpu_write(current_kprobe, NULL);
2148 }
2149 NOKPROBE_SYMBOL(kretprobe_rethook_handler);
2150 
2151 #endif /* !CONFIG_KRETPROBE_ON_RETHOOK */
2152 
2153 /**
2154  * kprobe_on_func_entry() -- check whether given address is function entry
2155  * @addr: Target address
2156  * @sym:  Target symbol name
2157  * @offset: The offset from the symbol or the address
2158  *
2159  * This checks whether the given @addr+@offset or @sym+@offset is on the
2160  * function entry address or not.
2161  * This returns 0 if it is the function entry, or -EINVAL if it is not.
2162  * And also it returns -ENOENT if it fails the symbol or address lookup.
2163  * Caller must pass @addr or @sym (either one must be NULL), or this
2164  * returns -EINVAL.
2165  */
2166 int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
2167 {
2168 	bool on_func_entry;
2169 	kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset, &on_func_entry);
2170 
2171 	if (IS_ERR(kp_addr))
2172 		return PTR_ERR(kp_addr);
2173 
2174 	if (!on_func_entry)
2175 		return -EINVAL;
2176 
2177 	return 0;
2178 }
2179 
2180 int register_kretprobe(struct kretprobe *rp)
2181 {
2182 	int ret;
2183 	struct kretprobe_instance *inst;
2184 	int i;
2185 	void *addr;
2186 
2187 	ret = kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset);
2188 	if (ret)
2189 		return ret;
2190 
2191 	/* If only 'rp->kp.addr' is specified, check reregistering kprobes */
2192 	if (rp->kp.addr && warn_kprobe_rereg(&rp->kp))
2193 		return -EINVAL;
2194 
2195 	if (kretprobe_blacklist_size) {
2196 		addr = kprobe_addr(&rp->kp);
2197 		if (IS_ERR(addr))
2198 			return PTR_ERR(addr);
2199 
2200 		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
2201 			if (kretprobe_blacklist[i].addr == addr)
2202 				return -EINVAL;
2203 		}
2204 	}
2205 
2206 	if (rp->data_size > KRETPROBE_MAX_DATA_SIZE)
2207 		return -E2BIG;
2208 
2209 	rp->kp.pre_handler = pre_handler_kretprobe;
2210 	rp->kp.post_handler = NULL;
2211 
2212 	/* Pre-allocate memory for max kretprobe instances */
2213 	if (rp->maxactive <= 0)
2214 		rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
2215 
2216 #ifdef CONFIG_KRETPROBE_ON_RETHOOK
2217 	rp->rh = rethook_alloc((void *)rp, kretprobe_rethook_handler);
2218 	if (!rp->rh)
2219 		return -ENOMEM;
2220 
2221 	for (i = 0; i < rp->maxactive; i++) {
2222 		inst = kzalloc(sizeof(struct kretprobe_instance) +
2223 			       rp->data_size, GFP_KERNEL);
2224 		if (inst == NULL) {
2225 			rethook_free(rp->rh);
2226 			rp->rh = NULL;
2227 			return -ENOMEM;
2228 		}
2229 		rethook_add_node(rp->rh, &inst->node);
2230 	}
2231 	rp->nmissed = 0;
2232 	/* Establish function entry probe point */
2233 	ret = register_kprobe(&rp->kp);
2234 	if (ret != 0) {
2235 		rethook_free(rp->rh);
2236 		rp->rh = NULL;
2237 	}
2238 #else	/* !CONFIG_KRETPROBE_ON_RETHOOK */
2239 	rp->freelist.head = NULL;
2240 	rp->rph = kzalloc(sizeof(struct kretprobe_holder), GFP_KERNEL);
2241 	if (!rp->rph)
2242 		return -ENOMEM;
2243 
2244 	rp->rph->rp = rp;
2245 	for (i = 0; i < rp->maxactive; i++) {
2246 		inst = kzalloc(sizeof(struct kretprobe_instance) +
2247 			       rp->data_size, GFP_KERNEL);
2248 		if (inst == NULL) {
2249 			refcount_set(&rp->rph->ref, i);
2250 			free_rp_inst(rp);
2251 			return -ENOMEM;
2252 		}
2253 		inst->rph = rp->rph;
2254 		freelist_add(&inst->freelist, &rp->freelist);
2255 	}
2256 	refcount_set(&rp->rph->ref, i);
2257 
2258 	rp->nmissed = 0;
2259 	/* Establish function entry probe point */
2260 	ret = register_kprobe(&rp->kp);
2261 	if (ret != 0)
2262 		free_rp_inst(rp);
2263 #endif
2264 	return ret;
2265 }
2266 EXPORT_SYMBOL_GPL(register_kretprobe);
2267 
2268 int register_kretprobes(struct kretprobe **rps, int num)
2269 {
2270 	int ret = 0, i;
2271 
2272 	if (num <= 0)
2273 		return -EINVAL;
2274 	for (i = 0; i < num; i++) {
2275 		ret = register_kretprobe(rps[i]);
2276 		if (ret < 0) {
2277 			if (i > 0)
2278 				unregister_kretprobes(rps, i);
2279 			break;
2280 		}
2281 	}
2282 	return ret;
2283 }
2284 EXPORT_SYMBOL_GPL(register_kretprobes);
2285 
2286 void unregister_kretprobe(struct kretprobe *rp)
2287 {
2288 	unregister_kretprobes(&rp, 1);
2289 }
2290 EXPORT_SYMBOL_GPL(unregister_kretprobe);
2291 
2292 void unregister_kretprobes(struct kretprobe **rps, int num)
2293 {
2294 	int i;
2295 
2296 	if (num <= 0)
2297 		return;
2298 	mutex_lock(&kprobe_mutex);
2299 	for (i = 0; i < num; i++) {
2300 		if (__unregister_kprobe_top(&rps[i]->kp) < 0)
2301 			rps[i]->kp.addr = NULL;
2302 #ifdef CONFIG_KRETPROBE_ON_RETHOOK
2303 		rethook_free(rps[i]->rh);
2304 #else
2305 		rps[i]->rph->rp = NULL;
2306 #endif
2307 	}
2308 	mutex_unlock(&kprobe_mutex);
2309 
2310 	synchronize_rcu();
2311 	for (i = 0; i < num; i++) {
2312 		if (rps[i]->kp.addr) {
2313 			__unregister_kprobe_bottom(&rps[i]->kp);
2314 #ifndef CONFIG_KRETPROBE_ON_RETHOOK
2315 			free_rp_inst(rps[i]);
2316 #endif
2317 		}
2318 	}
2319 }
2320 EXPORT_SYMBOL_GPL(unregister_kretprobes);
2321 
2322 #else /* CONFIG_KRETPROBES */
2323 int register_kretprobe(struct kretprobe *rp)
2324 {
2325 	return -EOPNOTSUPP;
2326 }
2327 EXPORT_SYMBOL_GPL(register_kretprobe);
2328 
2329 int register_kretprobes(struct kretprobe **rps, int num)
2330 {
2331 	return -EOPNOTSUPP;
2332 }
2333 EXPORT_SYMBOL_GPL(register_kretprobes);
2334 
2335 void unregister_kretprobe(struct kretprobe *rp)
2336 {
2337 }
2338 EXPORT_SYMBOL_GPL(unregister_kretprobe);
2339 
2340 void unregister_kretprobes(struct kretprobe **rps, int num)
2341 {
2342 }
2343 EXPORT_SYMBOL_GPL(unregister_kretprobes);
2344 
2345 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
2346 {
2347 	return 0;
2348 }
2349 NOKPROBE_SYMBOL(pre_handler_kretprobe);
2350 
2351 #endif /* CONFIG_KRETPROBES */
2352 
2353 /* Set the kprobe gone and remove its instruction buffer. */
2354 static void kill_kprobe(struct kprobe *p)
2355 {
2356 	struct kprobe *kp;
2357 
2358 	lockdep_assert_held(&kprobe_mutex);
2359 
2360 	/*
2361 	 * The module is going away. We should disarm the kprobe which
2362 	 * is using ftrace, because ftrace framework is still available at
2363 	 * 'MODULE_STATE_GOING' notification.
2364 	 */
2365 	if (kprobe_ftrace(p) && !kprobe_disabled(p) && !kprobes_all_disarmed)
2366 		disarm_kprobe_ftrace(p);
2367 
2368 	p->flags |= KPROBE_FLAG_GONE;
2369 	if (kprobe_aggrprobe(p)) {
2370 		/*
2371 		 * If this is an aggr_kprobe, we have to list all the
2372 		 * chained probes and mark them GONE.
2373 		 */
2374 		list_for_each_entry(kp, &p->list, list)
2375 			kp->flags |= KPROBE_FLAG_GONE;
2376 		p->post_handler = NULL;
2377 		kill_optimized_kprobe(p);
2378 	}
2379 	/*
2380 	 * Here, we can remove insn_slot safely, because no thread calls
2381 	 * the original probed function (which will be freed soon) any more.
2382 	 */
2383 	arch_remove_kprobe(p);
2384 }
2385 
2386 /* Disable one kprobe */
2387 int disable_kprobe(struct kprobe *kp)
2388 {
2389 	int ret = 0;
2390 	struct kprobe *p;
2391 
2392 	mutex_lock(&kprobe_mutex);
2393 
2394 	/* Disable this kprobe */
2395 	p = __disable_kprobe(kp);
2396 	if (IS_ERR(p))
2397 		ret = PTR_ERR(p);
2398 
2399 	mutex_unlock(&kprobe_mutex);
2400 	return ret;
2401 }
2402 EXPORT_SYMBOL_GPL(disable_kprobe);
2403 
2404 /* Enable one kprobe */
2405 int enable_kprobe(struct kprobe *kp)
2406 {
2407 	int ret = 0;
2408 	struct kprobe *p;
2409 
2410 	mutex_lock(&kprobe_mutex);
2411 
2412 	/* Check whether specified probe is valid. */
2413 	p = __get_valid_kprobe(kp);
2414 	if (unlikely(p == NULL)) {
2415 		ret = -EINVAL;
2416 		goto out;
2417 	}
2418 
2419 	if (kprobe_gone(kp)) {
2420 		/* This kprobe has gone, we couldn't enable it. */
2421 		ret = -EINVAL;
2422 		goto out;
2423 	}
2424 
2425 	if (p != kp)
2426 		kp->flags &= ~KPROBE_FLAG_DISABLED;
2427 
2428 	if (!kprobes_all_disarmed && kprobe_disabled(p)) {
2429 		p->flags &= ~KPROBE_FLAG_DISABLED;
2430 		ret = arm_kprobe(p);
2431 		if (ret) {
2432 			p->flags |= KPROBE_FLAG_DISABLED;
2433 			if (p != kp)
2434 				kp->flags |= KPROBE_FLAG_DISABLED;
2435 		}
2436 	}
2437 out:
2438 	mutex_unlock(&kprobe_mutex);
2439 	return ret;
2440 }
2441 EXPORT_SYMBOL_GPL(enable_kprobe);
2442 
2443 /* Caller must NOT call this in usual path. This is only for critical case */
2444 void dump_kprobe(struct kprobe *kp)
2445 {
2446 	pr_err("Dump kprobe:\n.symbol_name = %s, .offset = %x, .addr = %pS\n",
2447 	       kp->symbol_name, kp->offset, kp->addr);
2448 }
2449 NOKPROBE_SYMBOL(dump_kprobe);
2450 
2451 int kprobe_add_ksym_blacklist(unsigned long entry)
2452 {
2453 	struct kprobe_blacklist_entry *ent;
2454 	unsigned long offset = 0, size = 0;
2455 
2456 	if (!kernel_text_address(entry) ||
2457 	    !kallsyms_lookup_size_offset(entry, &size, &offset))
2458 		return -EINVAL;
2459 
2460 	ent = kmalloc(sizeof(*ent), GFP_KERNEL);
2461 	if (!ent)
2462 		return -ENOMEM;
2463 	ent->start_addr = entry;
2464 	ent->end_addr = entry + size;
2465 	INIT_LIST_HEAD(&ent->list);
2466 	list_add_tail(&ent->list, &kprobe_blacklist);
2467 
2468 	return (int)size;
2469 }
2470 
2471 /* Add all symbols in given area into kprobe blacklist */
2472 int kprobe_add_area_blacklist(unsigned long start, unsigned long end)
2473 {
2474 	unsigned long entry;
2475 	int ret = 0;
2476 
2477 	for (entry = start; entry < end; entry += ret) {
2478 		ret = kprobe_add_ksym_blacklist(entry);
2479 		if (ret < 0)
2480 			return ret;
2481 		if (ret == 0)	/* In case of alias symbol */
2482 			ret = 1;
2483 	}
2484 	return 0;
2485 }
2486 
2487 /* Remove all symbols in given area from kprobe blacklist */
2488 static void kprobe_remove_area_blacklist(unsigned long start, unsigned long end)
2489 {
2490 	struct kprobe_blacklist_entry *ent, *n;
2491 
2492 	list_for_each_entry_safe(ent, n, &kprobe_blacklist, list) {
2493 		if (ent->start_addr < start || ent->start_addr >= end)
2494 			continue;
2495 		list_del(&ent->list);
2496 		kfree(ent);
2497 	}
2498 }
2499 
2500 static void kprobe_remove_ksym_blacklist(unsigned long entry)
2501 {
2502 	kprobe_remove_area_blacklist(entry, entry + 1);
2503 }
2504 
2505 int __weak arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value,
2506 				   char *type, char *sym)
2507 {
2508 	return -ERANGE;
2509 }
2510 
2511 int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
2512 		       char *sym)
2513 {
2514 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
2515 	if (!kprobe_cache_get_kallsym(&kprobe_insn_slots, &symnum, value, type, sym))
2516 		return 0;
2517 #ifdef CONFIG_OPTPROBES
2518 	if (!kprobe_cache_get_kallsym(&kprobe_optinsn_slots, &symnum, value, type, sym))
2519 		return 0;
2520 #endif
2521 #endif
2522 	if (!arch_kprobe_get_kallsym(&symnum, value, type, sym))
2523 		return 0;
2524 	return -ERANGE;
2525 }
2526 
2527 int __init __weak arch_populate_kprobe_blacklist(void)
2528 {
2529 	return 0;
2530 }
2531 
2532 /*
2533  * Lookup and populate the kprobe_blacklist.
2534  *
2535  * Unlike the kretprobe blacklist, we'll need to determine
2536  * the range of addresses that belong to the said functions,
2537  * since a kprobe need not necessarily be at the beginning
2538  * of a function.
2539  */
2540 static int __init populate_kprobe_blacklist(unsigned long *start,
2541 					     unsigned long *end)
2542 {
2543 	unsigned long entry;
2544 	unsigned long *iter;
2545 	int ret;
2546 
2547 	for (iter = start; iter < end; iter++) {
2548 		entry = (unsigned long)dereference_symbol_descriptor((void *)*iter);
2549 		ret = kprobe_add_ksym_blacklist(entry);
2550 		if (ret == -EINVAL)
2551 			continue;
2552 		if (ret < 0)
2553 			return ret;
2554 	}
2555 
2556 	/* Symbols in '__kprobes_text' are blacklisted */
2557 	ret = kprobe_add_area_blacklist((unsigned long)__kprobes_text_start,
2558 					(unsigned long)__kprobes_text_end);
2559 	if (ret)
2560 		return ret;
2561 
2562 	/* Symbols in 'noinstr' section are blacklisted */
2563 	ret = kprobe_add_area_blacklist((unsigned long)__noinstr_text_start,
2564 					(unsigned long)__noinstr_text_end);
2565 
2566 	return ret ? : arch_populate_kprobe_blacklist();
2567 }
2568 
2569 static void add_module_kprobe_blacklist(struct module *mod)
2570 {
2571 	unsigned long start, end;
2572 	int i;
2573 
2574 	if (mod->kprobe_blacklist) {
2575 		for (i = 0; i < mod->num_kprobe_blacklist; i++)
2576 			kprobe_add_ksym_blacklist(mod->kprobe_blacklist[i]);
2577 	}
2578 
2579 	start = (unsigned long)mod->kprobes_text_start;
2580 	if (start) {
2581 		end = start + mod->kprobes_text_size;
2582 		kprobe_add_area_blacklist(start, end);
2583 	}
2584 
2585 	start = (unsigned long)mod->noinstr_text_start;
2586 	if (start) {
2587 		end = start + mod->noinstr_text_size;
2588 		kprobe_add_area_blacklist(start, end);
2589 	}
2590 }
2591 
2592 static void remove_module_kprobe_blacklist(struct module *mod)
2593 {
2594 	unsigned long start, end;
2595 	int i;
2596 
2597 	if (mod->kprobe_blacklist) {
2598 		for (i = 0; i < mod->num_kprobe_blacklist; i++)
2599 			kprobe_remove_ksym_blacklist(mod->kprobe_blacklist[i]);
2600 	}
2601 
2602 	start = (unsigned long)mod->kprobes_text_start;
2603 	if (start) {
2604 		end = start + mod->kprobes_text_size;
2605 		kprobe_remove_area_blacklist(start, end);
2606 	}
2607 
2608 	start = (unsigned long)mod->noinstr_text_start;
2609 	if (start) {
2610 		end = start + mod->noinstr_text_size;
2611 		kprobe_remove_area_blacklist(start, end);
2612 	}
2613 }
2614 
2615 /* Module notifier call back, checking kprobes on the module */
2616 static int kprobes_module_callback(struct notifier_block *nb,
2617 				   unsigned long val, void *data)
2618 {
2619 	struct module *mod = data;
2620 	struct hlist_head *head;
2621 	struct kprobe *p;
2622 	unsigned int i;
2623 	int checkcore = (val == MODULE_STATE_GOING);
2624 
2625 	if (val == MODULE_STATE_COMING) {
2626 		mutex_lock(&kprobe_mutex);
2627 		add_module_kprobe_blacklist(mod);
2628 		mutex_unlock(&kprobe_mutex);
2629 	}
2630 	if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
2631 		return NOTIFY_DONE;
2632 
2633 	/*
2634 	 * When 'MODULE_STATE_GOING' was notified, both of module '.text' and
2635 	 * '.init.text' sections would be freed. When 'MODULE_STATE_LIVE' was
2636 	 * notified, only '.init.text' section would be freed. We need to
2637 	 * disable kprobes which have been inserted in the sections.
2638 	 */
2639 	mutex_lock(&kprobe_mutex);
2640 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2641 		head = &kprobe_table[i];
2642 		hlist_for_each_entry(p, head, hlist)
2643 			if (within_module_init((unsigned long)p->addr, mod) ||
2644 			    (checkcore &&
2645 			     within_module_core((unsigned long)p->addr, mod))) {
2646 				/*
2647 				 * The vaddr this probe is installed will soon
2648 				 * be vfreed buy not synced to disk. Hence,
2649 				 * disarming the breakpoint isn't needed.
2650 				 *
2651 				 * Note, this will also move any optimized probes
2652 				 * that are pending to be removed from their
2653 				 * corresponding lists to the 'freeing_list' and
2654 				 * will not be touched by the delayed
2655 				 * kprobe_optimizer() work handler.
2656 				 */
2657 				kill_kprobe(p);
2658 			}
2659 	}
2660 	if (val == MODULE_STATE_GOING)
2661 		remove_module_kprobe_blacklist(mod);
2662 	mutex_unlock(&kprobe_mutex);
2663 	return NOTIFY_DONE;
2664 }
2665 
2666 static struct notifier_block kprobe_module_nb = {
2667 	.notifier_call = kprobes_module_callback,
2668 	.priority = 0
2669 };
2670 
2671 void kprobe_free_init_mem(void)
2672 {
2673 	void *start = (void *)(&__init_begin);
2674 	void *end = (void *)(&__init_end);
2675 	struct hlist_head *head;
2676 	struct kprobe *p;
2677 	int i;
2678 
2679 	mutex_lock(&kprobe_mutex);
2680 
2681 	/* Kill all kprobes on initmem because the target code has been freed. */
2682 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2683 		head = &kprobe_table[i];
2684 		hlist_for_each_entry(p, head, hlist) {
2685 			if (start <= (void *)p->addr && (void *)p->addr < end)
2686 				kill_kprobe(p);
2687 		}
2688 	}
2689 
2690 	mutex_unlock(&kprobe_mutex);
2691 }
2692 
2693 static int __init init_kprobes(void)
2694 {
2695 	int i, err = 0;
2696 
2697 	/* FIXME allocate the probe table, currently defined statically */
2698 	/* initialize all list heads */
2699 	for (i = 0; i < KPROBE_TABLE_SIZE; i++)
2700 		INIT_HLIST_HEAD(&kprobe_table[i]);
2701 
2702 	err = populate_kprobe_blacklist(__start_kprobe_blacklist,
2703 					__stop_kprobe_blacklist);
2704 	if (err)
2705 		pr_err("Failed to populate blacklist (error %d), kprobes not restricted, be careful using them!\n", err);
2706 
2707 	if (kretprobe_blacklist_size) {
2708 		/* lookup the function address from its name */
2709 		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
2710 			kretprobe_blacklist[i].addr =
2711 				kprobe_lookup_name(kretprobe_blacklist[i].name, 0);
2712 			if (!kretprobe_blacklist[i].addr)
2713 				pr_err("Failed to lookup symbol '%s' for kretprobe blacklist. Maybe the target function is removed or renamed.\n",
2714 				       kretprobe_blacklist[i].name);
2715 		}
2716 	}
2717 
2718 	/* By default, kprobes are armed */
2719 	kprobes_all_disarmed = false;
2720 
2721 #if defined(CONFIG_OPTPROBES) && defined(__ARCH_WANT_KPROBES_INSN_SLOT)
2722 	/* Init 'kprobe_optinsn_slots' for allocation */
2723 	kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
2724 #endif
2725 
2726 	err = arch_init_kprobes();
2727 	if (!err)
2728 		err = register_die_notifier(&kprobe_exceptions_nb);
2729 	if (!err)
2730 		err = register_module_notifier(&kprobe_module_nb);
2731 
2732 	kprobes_initialized = (err == 0);
2733 	kprobe_sysctls_init();
2734 	return err;
2735 }
2736 early_initcall(init_kprobes);
2737 
2738 #if defined(CONFIG_OPTPROBES)
2739 static int __init init_optprobes(void)
2740 {
2741 	/*
2742 	 * Enable kprobe optimization - this kicks the optimizer which
2743 	 * depends on synchronize_rcu_tasks() and ksoftirqd, that is
2744 	 * not spawned in early initcall. So delay the optimization.
2745 	 */
2746 	optimize_all_kprobes();
2747 
2748 	return 0;
2749 }
2750 subsys_initcall(init_optprobes);
2751 #endif
2752 
2753 #ifdef CONFIG_DEBUG_FS
2754 static void report_probe(struct seq_file *pi, struct kprobe *p,
2755 		const char *sym, int offset, char *modname, struct kprobe *pp)
2756 {
2757 	char *kprobe_type;
2758 	void *addr = p->addr;
2759 
2760 	if (p->pre_handler == pre_handler_kretprobe)
2761 		kprobe_type = "r";
2762 	else
2763 		kprobe_type = "k";
2764 
2765 	if (!kallsyms_show_value(pi->file->f_cred))
2766 		addr = NULL;
2767 
2768 	if (sym)
2769 		seq_printf(pi, "%px  %s  %s+0x%x  %s ",
2770 			addr, kprobe_type, sym, offset,
2771 			(modname ? modname : " "));
2772 	else	/* try to use %pS */
2773 		seq_printf(pi, "%px  %s  %pS ",
2774 			addr, kprobe_type, p->addr);
2775 
2776 	if (!pp)
2777 		pp = p;
2778 	seq_printf(pi, "%s%s%s%s\n",
2779 		(kprobe_gone(p) ? "[GONE]" : ""),
2780 		((kprobe_disabled(p) && !kprobe_gone(p)) ?  "[DISABLED]" : ""),
2781 		(kprobe_optimized(pp) ? "[OPTIMIZED]" : ""),
2782 		(kprobe_ftrace(pp) ? "[FTRACE]" : ""));
2783 }
2784 
2785 static void *kprobe_seq_start(struct seq_file *f, loff_t *pos)
2786 {
2787 	return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
2788 }
2789 
2790 static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
2791 {
2792 	(*pos)++;
2793 	if (*pos >= KPROBE_TABLE_SIZE)
2794 		return NULL;
2795 	return pos;
2796 }
2797 
2798 static void kprobe_seq_stop(struct seq_file *f, void *v)
2799 {
2800 	/* Nothing to do */
2801 }
2802 
2803 static int show_kprobe_addr(struct seq_file *pi, void *v)
2804 {
2805 	struct hlist_head *head;
2806 	struct kprobe *p, *kp;
2807 	const char *sym = NULL;
2808 	unsigned int i = *(loff_t *) v;
2809 	unsigned long offset = 0;
2810 	char *modname, namebuf[KSYM_NAME_LEN];
2811 
2812 	head = &kprobe_table[i];
2813 	preempt_disable();
2814 	hlist_for_each_entry_rcu(p, head, hlist) {
2815 		sym = kallsyms_lookup((unsigned long)p->addr, NULL,
2816 					&offset, &modname, namebuf);
2817 		if (kprobe_aggrprobe(p)) {
2818 			list_for_each_entry_rcu(kp, &p->list, list)
2819 				report_probe(pi, kp, sym, offset, modname, p);
2820 		} else
2821 			report_probe(pi, p, sym, offset, modname, NULL);
2822 	}
2823 	preempt_enable();
2824 	return 0;
2825 }
2826 
2827 static const struct seq_operations kprobes_sops = {
2828 	.start = kprobe_seq_start,
2829 	.next  = kprobe_seq_next,
2830 	.stop  = kprobe_seq_stop,
2831 	.show  = show_kprobe_addr
2832 };
2833 
2834 DEFINE_SEQ_ATTRIBUTE(kprobes);
2835 
2836 /* kprobes/blacklist -- shows which functions can not be probed */
2837 static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos)
2838 {
2839 	mutex_lock(&kprobe_mutex);
2840 	return seq_list_start(&kprobe_blacklist, *pos);
2841 }
2842 
2843 static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos)
2844 {
2845 	return seq_list_next(v, &kprobe_blacklist, pos);
2846 }
2847 
2848 static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
2849 {
2850 	struct kprobe_blacklist_entry *ent =
2851 		list_entry(v, struct kprobe_blacklist_entry, list);
2852 
2853 	/*
2854 	 * If '/proc/kallsyms' is not showing kernel address, we won't
2855 	 * show them here either.
2856 	 */
2857 	if (!kallsyms_show_value(m->file->f_cred))
2858 		seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL,
2859 			   (void *)ent->start_addr);
2860 	else
2861 		seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr,
2862 			   (void *)ent->end_addr, (void *)ent->start_addr);
2863 	return 0;
2864 }
2865 
2866 static void kprobe_blacklist_seq_stop(struct seq_file *f, void *v)
2867 {
2868 	mutex_unlock(&kprobe_mutex);
2869 }
2870 
2871 static const struct seq_operations kprobe_blacklist_sops = {
2872 	.start = kprobe_blacklist_seq_start,
2873 	.next  = kprobe_blacklist_seq_next,
2874 	.stop  = kprobe_blacklist_seq_stop,
2875 	.show  = kprobe_blacklist_seq_show,
2876 };
2877 DEFINE_SEQ_ATTRIBUTE(kprobe_blacklist);
2878 
2879 static int arm_all_kprobes(void)
2880 {
2881 	struct hlist_head *head;
2882 	struct kprobe *p;
2883 	unsigned int i, total = 0, errors = 0;
2884 	int err, ret = 0;
2885 
2886 	mutex_lock(&kprobe_mutex);
2887 
2888 	/* If kprobes are armed, just return */
2889 	if (!kprobes_all_disarmed)
2890 		goto already_enabled;
2891 
2892 	/*
2893 	 * optimize_kprobe() called by arm_kprobe() checks
2894 	 * kprobes_all_disarmed, so set kprobes_all_disarmed before
2895 	 * arm_kprobe.
2896 	 */
2897 	kprobes_all_disarmed = false;
2898 	/* Arming kprobes doesn't optimize kprobe itself */
2899 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2900 		head = &kprobe_table[i];
2901 		/* Arm all kprobes on a best-effort basis */
2902 		hlist_for_each_entry(p, head, hlist) {
2903 			if (!kprobe_disabled(p)) {
2904 				err = arm_kprobe(p);
2905 				if (err)  {
2906 					errors++;
2907 					ret = err;
2908 				}
2909 				total++;
2910 			}
2911 		}
2912 	}
2913 
2914 	if (errors)
2915 		pr_warn("Kprobes globally enabled, but failed to enable %d out of %d probes. Please check which kprobes are kept disabled via debugfs.\n",
2916 			errors, total);
2917 	else
2918 		pr_info("Kprobes globally enabled\n");
2919 
2920 already_enabled:
2921 	mutex_unlock(&kprobe_mutex);
2922 	return ret;
2923 }
2924 
2925 static int disarm_all_kprobes(void)
2926 {
2927 	struct hlist_head *head;
2928 	struct kprobe *p;
2929 	unsigned int i, total = 0, errors = 0;
2930 	int err, ret = 0;
2931 
2932 	mutex_lock(&kprobe_mutex);
2933 
2934 	/* If kprobes are already disarmed, just return */
2935 	if (kprobes_all_disarmed) {
2936 		mutex_unlock(&kprobe_mutex);
2937 		return 0;
2938 	}
2939 
2940 	kprobes_all_disarmed = true;
2941 
2942 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2943 		head = &kprobe_table[i];
2944 		/* Disarm all kprobes on a best-effort basis */
2945 		hlist_for_each_entry(p, head, hlist) {
2946 			if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) {
2947 				err = disarm_kprobe(p, false);
2948 				if (err) {
2949 					errors++;
2950 					ret = err;
2951 				}
2952 				total++;
2953 			}
2954 		}
2955 	}
2956 
2957 	if (errors)
2958 		pr_warn("Kprobes globally disabled, but failed to disable %d out of %d probes. Please check which kprobes are kept enabled via debugfs.\n",
2959 			errors, total);
2960 	else
2961 		pr_info("Kprobes globally disabled\n");
2962 
2963 	mutex_unlock(&kprobe_mutex);
2964 
2965 	/* Wait for disarming all kprobes by optimizer */
2966 	wait_for_kprobe_optimizer();
2967 
2968 	return ret;
2969 }
2970 
2971 /*
2972  * XXX: The debugfs bool file interface doesn't allow for callbacks
2973  * when the bool state is switched. We can reuse that facility when
2974  * available
2975  */
2976 static ssize_t read_enabled_file_bool(struct file *file,
2977 	       char __user *user_buf, size_t count, loff_t *ppos)
2978 {
2979 	char buf[3];
2980 
2981 	if (!kprobes_all_disarmed)
2982 		buf[0] = '1';
2983 	else
2984 		buf[0] = '0';
2985 	buf[1] = '\n';
2986 	buf[2] = 0x00;
2987 	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
2988 }
2989 
2990 static ssize_t write_enabled_file_bool(struct file *file,
2991 	       const char __user *user_buf, size_t count, loff_t *ppos)
2992 {
2993 	bool enable;
2994 	int ret;
2995 
2996 	ret = kstrtobool_from_user(user_buf, count, &enable);
2997 	if (ret)
2998 		return ret;
2999 
3000 	ret = enable ? arm_all_kprobes() : disarm_all_kprobes();
3001 	if (ret)
3002 		return ret;
3003 
3004 	return count;
3005 }
3006 
3007 static const struct file_operations fops_kp = {
3008 	.read =         read_enabled_file_bool,
3009 	.write =        write_enabled_file_bool,
3010 	.llseek =	default_llseek,
3011 };
3012 
3013 static int __init debugfs_kprobe_init(void)
3014 {
3015 	struct dentry *dir;
3016 
3017 	dir = debugfs_create_dir("kprobes", NULL);
3018 
3019 	debugfs_create_file("list", 0400, dir, NULL, &kprobes_fops);
3020 
3021 	debugfs_create_file("enabled", 0600, dir, NULL, &fops_kp);
3022 
3023 	debugfs_create_file("blacklist", 0400, dir, NULL,
3024 			    &kprobe_blacklist_fops);
3025 
3026 	return 0;
3027 }
3028 
3029 late_initcall(debugfs_kprobe_init);
3030 #endif /* CONFIG_DEBUG_FS */
3031