1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Kernel Probes (KProbes)
4 *
5 * Copyright (C) IBM Corporation, 2002, 2004
6 *
7 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
8 * Probes initial implementation (includes suggestions from
9 * Rusty Russell).
10 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
11 * hlists and exceptions notifier as suggested by Andi Kleen.
12 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
13 * interface to access function arguments.
14 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
15 * exceptions notifier to be first on the priority list.
16 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
17 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
18 * <prasanna@in.ibm.com> added function-return probes.
19 */
20
21 #define pr_fmt(fmt) "kprobes: " fmt
22
23 #include <linux/kprobes.h>
24 #include <linux/hash.h>
25 #include <linux/init.h>
26 #include <linux/slab.h>
27 #include <linux/stddef.h>
28 #include <linux/export.h>
29 #include <linux/kallsyms.h>
30 #include <linux/freezer.h>
31 #include <linux/seq_file.h>
32 #include <linux/debugfs.h>
33 #include <linux/sysctl.h>
34 #include <linux/kdebug.h>
35 #include <linux/memory.h>
36 #include <linux/ftrace.h>
37 #include <linux/cpu.h>
38 #include <linux/jump_label.h>
39 #include <linux/static_call.h>
40 #include <linux/perf_event.h>
41 #include <linux/execmem.h>
42
43 #include <asm/sections.h>
44 #include <asm/cacheflush.h>
45 #include <asm/errno.h>
46 #include <linux/uaccess.h>
47
48 #define KPROBE_HASH_BITS 6
49 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
50
51 #if !defined(CONFIG_OPTPROBES) || !defined(CONFIG_SYSCTL)
52 #define kprobe_sysctls_init() do { } while (0)
53 #endif
54
55 static int kprobes_initialized;
56 /* kprobe_table can be accessed by
57 * - Normal hlist traversal and RCU add/del under 'kprobe_mutex' is held.
58 * Or
59 * - RCU hlist traversal under disabling preempt (breakpoint handlers)
60 */
61 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
62
63 /* NOTE: change this value only with 'kprobe_mutex' held */
64 static bool kprobes_all_disarmed;
65
66 /* This protects 'kprobe_table' and 'optimizing_list' */
67 static DEFINE_MUTEX(kprobe_mutex);
68 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance);
69
kprobe_lookup_name(const char * name,unsigned int __unused)70 kprobe_opcode_t * __weak kprobe_lookup_name(const char *name,
71 unsigned int __unused)
72 {
73 return ((kprobe_opcode_t *)(kallsyms_lookup_name(name)));
74 }
75
76 /*
77 * Blacklist -- list of 'struct kprobe_blacklist_entry' to store info where
78 * kprobes can not probe.
79 */
80 static LIST_HEAD(kprobe_blacklist);
81
82 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
83 /*
84 * 'kprobe::ainsn.insn' points to the copy of the instruction to be
85 * single-stepped. x86_64, POWER4 and above have no-exec support and
86 * stepping on the instruction on a vmalloced/kmalloced/data page
87 * is a recipe for disaster
88 */
89 struct kprobe_insn_page {
90 struct list_head list;
91 kprobe_opcode_t *insns; /* Page of instruction slots */
92 struct kprobe_insn_cache *cache;
93 int nused;
94 int ngarbage;
95 char slot_used[];
96 };
97
98 #define KPROBE_INSN_PAGE_SIZE(slots) \
99 (offsetof(struct kprobe_insn_page, slot_used) + \
100 (sizeof(char) * (slots)))
101
slots_per_page(struct kprobe_insn_cache * c)102 static int slots_per_page(struct kprobe_insn_cache *c)
103 {
104 return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
105 }
106
107 enum kprobe_slot_state {
108 SLOT_CLEAN = 0,
109 SLOT_DIRTY = 1,
110 SLOT_USED = 2,
111 };
112
alloc_insn_page(void)113 void __weak *alloc_insn_page(void)
114 {
115 /*
116 * Use execmem_alloc() so this page is within +/- 2GB of where the
117 * kernel image and loaded module images reside. This is required
118 * for most of the architectures.
119 * (e.g. x86-64 needs this to handle the %rip-relative fixups.)
120 */
121 return execmem_alloc(EXECMEM_KPROBES, PAGE_SIZE);
122 }
123
free_insn_page(void * page)124 static void free_insn_page(void *page)
125 {
126 execmem_free(page);
127 }
128
129 struct kprobe_insn_cache kprobe_insn_slots = {
130 .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
131 .alloc = alloc_insn_page,
132 .free = free_insn_page,
133 .sym = KPROBE_INSN_PAGE_SYM,
134 .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
135 .insn_size = MAX_INSN_SIZE,
136 .nr_garbage = 0,
137 };
138 static int collect_garbage_slots(struct kprobe_insn_cache *c);
139
140 /**
141 * __get_insn_slot() - Find a slot on an executable page for an instruction.
142 * We allocate an executable page if there's no room on existing ones.
143 */
__get_insn_slot(struct kprobe_insn_cache * c)144 kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
145 {
146 struct kprobe_insn_page *kip;
147 kprobe_opcode_t *slot = NULL;
148
149 /* Since the slot array is not protected by rcu, we need a mutex */
150 mutex_lock(&c->mutex);
151 retry:
152 rcu_read_lock();
153 list_for_each_entry_rcu(kip, &c->pages, list) {
154 if (kip->nused < slots_per_page(c)) {
155 int i;
156
157 for (i = 0; i < slots_per_page(c); i++) {
158 if (kip->slot_used[i] == SLOT_CLEAN) {
159 kip->slot_used[i] = SLOT_USED;
160 kip->nused++;
161 slot = kip->insns + (i * c->insn_size);
162 rcu_read_unlock();
163 goto out;
164 }
165 }
166 /* kip->nused is broken. Fix it. */
167 kip->nused = slots_per_page(c);
168 WARN_ON(1);
169 }
170 }
171 rcu_read_unlock();
172
173 /* If there are any garbage slots, collect it and try again. */
174 if (c->nr_garbage && collect_garbage_slots(c) == 0)
175 goto retry;
176
177 /* All out of space. Need to allocate a new page. */
178 kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
179 if (!kip)
180 goto out;
181
182 kip->insns = c->alloc();
183 if (!kip->insns) {
184 kfree(kip);
185 goto out;
186 }
187 INIT_LIST_HEAD(&kip->list);
188 memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
189 kip->slot_used[0] = SLOT_USED;
190 kip->nused = 1;
191 kip->ngarbage = 0;
192 kip->cache = c;
193 list_add_rcu(&kip->list, &c->pages);
194 slot = kip->insns;
195
196 /* Record the perf ksymbol register event after adding the page */
197 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, (unsigned long)kip->insns,
198 PAGE_SIZE, false, c->sym);
199 out:
200 mutex_unlock(&c->mutex);
201 return slot;
202 }
203
204 /* Return true if all garbages are collected, otherwise false. */
collect_one_slot(struct kprobe_insn_page * kip,int idx)205 static bool collect_one_slot(struct kprobe_insn_page *kip, int idx)
206 {
207 kip->slot_used[idx] = SLOT_CLEAN;
208 kip->nused--;
209 if (kip->nused == 0) {
210 /*
211 * Page is no longer in use. Free it unless
212 * it's the last one. We keep the last one
213 * so as not to have to set it up again the
214 * next time somebody inserts a probe.
215 */
216 if (!list_is_singular(&kip->list)) {
217 /*
218 * Record perf ksymbol unregister event before removing
219 * the page.
220 */
221 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
222 (unsigned long)kip->insns, PAGE_SIZE, true,
223 kip->cache->sym);
224 list_del_rcu(&kip->list);
225 synchronize_rcu();
226 kip->cache->free(kip->insns);
227 kfree(kip);
228 }
229 return true;
230 }
231 return false;
232 }
233
collect_garbage_slots(struct kprobe_insn_cache * c)234 static int collect_garbage_slots(struct kprobe_insn_cache *c)
235 {
236 struct kprobe_insn_page *kip, *next;
237
238 /* Ensure no-one is interrupted on the garbages */
239 synchronize_rcu();
240
241 list_for_each_entry_safe(kip, next, &c->pages, list) {
242 int i;
243
244 if (kip->ngarbage == 0)
245 continue;
246 kip->ngarbage = 0; /* we will collect all garbages */
247 for (i = 0; i < slots_per_page(c); i++) {
248 if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i))
249 break;
250 }
251 }
252 c->nr_garbage = 0;
253 return 0;
254 }
255
__free_insn_slot(struct kprobe_insn_cache * c,kprobe_opcode_t * slot,int dirty)256 void __free_insn_slot(struct kprobe_insn_cache *c,
257 kprobe_opcode_t *slot, int dirty)
258 {
259 struct kprobe_insn_page *kip;
260 long idx;
261
262 mutex_lock(&c->mutex);
263 rcu_read_lock();
264 list_for_each_entry_rcu(kip, &c->pages, list) {
265 idx = ((long)slot - (long)kip->insns) /
266 (c->insn_size * sizeof(kprobe_opcode_t));
267 if (idx >= 0 && idx < slots_per_page(c))
268 goto out;
269 }
270 /* Could not find this slot. */
271 WARN_ON(1);
272 kip = NULL;
273 out:
274 rcu_read_unlock();
275 /* Mark and sweep: this may sleep */
276 if (kip) {
277 /* Check double free */
278 WARN_ON(kip->slot_used[idx] != SLOT_USED);
279 if (dirty) {
280 kip->slot_used[idx] = SLOT_DIRTY;
281 kip->ngarbage++;
282 if (++c->nr_garbage > slots_per_page(c))
283 collect_garbage_slots(c);
284 } else {
285 collect_one_slot(kip, idx);
286 }
287 }
288 mutex_unlock(&c->mutex);
289 }
290
291 /*
292 * Check given address is on the page of kprobe instruction slots.
293 * This will be used for checking whether the address on a stack
294 * is on a text area or not.
295 */
__is_insn_slot_addr(struct kprobe_insn_cache * c,unsigned long addr)296 bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr)
297 {
298 struct kprobe_insn_page *kip;
299 bool ret = false;
300
301 rcu_read_lock();
302 list_for_each_entry_rcu(kip, &c->pages, list) {
303 if (addr >= (unsigned long)kip->insns &&
304 addr < (unsigned long)kip->insns + PAGE_SIZE) {
305 ret = true;
306 break;
307 }
308 }
309 rcu_read_unlock();
310
311 return ret;
312 }
313
kprobe_cache_get_kallsym(struct kprobe_insn_cache * c,unsigned int * symnum,unsigned long * value,char * type,char * sym)314 int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum,
315 unsigned long *value, char *type, char *sym)
316 {
317 struct kprobe_insn_page *kip;
318 int ret = -ERANGE;
319
320 rcu_read_lock();
321 list_for_each_entry_rcu(kip, &c->pages, list) {
322 if ((*symnum)--)
323 continue;
324 strscpy(sym, c->sym, KSYM_NAME_LEN);
325 *type = 't';
326 *value = (unsigned long)kip->insns;
327 ret = 0;
328 break;
329 }
330 rcu_read_unlock();
331
332 return ret;
333 }
334
335 #ifdef CONFIG_OPTPROBES
alloc_optinsn_page(void)336 void __weak *alloc_optinsn_page(void)
337 {
338 return alloc_insn_page();
339 }
340
free_optinsn_page(void * page)341 void __weak free_optinsn_page(void *page)
342 {
343 free_insn_page(page);
344 }
345
346 /* For optimized_kprobe buffer */
347 struct kprobe_insn_cache kprobe_optinsn_slots = {
348 .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
349 .alloc = alloc_optinsn_page,
350 .free = free_optinsn_page,
351 .sym = KPROBE_OPTINSN_PAGE_SYM,
352 .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
353 /* .insn_size is initialized later */
354 .nr_garbage = 0,
355 };
356 #endif
357 #endif
358
359 /* We have preemption disabled.. so it is safe to use __ versions */
set_kprobe_instance(struct kprobe * kp)360 static inline void set_kprobe_instance(struct kprobe *kp)
361 {
362 __this_cpu_write(kprobe_instance, kp);
363 }
364
reset_kprobe_instance(void)365 static inline void reset_kprobe_instance(void)
366 {
367 __this_cpu_write(kprobe_instance, NULL);
368 }
369
370 /*
371 * This routine is called either:
372 * - under the 'kprobe_mutex' - during kprobe_[un]register().
373 * OR
374 * - with preemption disabled - from architecture specific code.
375 */
get_kprobe(void * addr)376 struct kprobe *get_kprobe(void *addr)
377 {
378 struct hlist_head *head;
379 struct kprobe *p;
380
381 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
382 hlist_for_each_entry_rcu(p, head, hlist,
383 lockdep_is_held(&kprobe_mutex)) {
384 if (p->addr == addr)
385 return p;
386 }
387
388 return NULL;
389 }
390 NOKPROBE_SYMBOL(get_kprobe);
391
392 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
393
394 /* Return true if 'p' is an aggregator */
kprobe_aggrprobe(struct kprobe * p)395 static inline bool kprobe_aggrprobe(struct kprobe *p)
396 {
397 return p->pre_handler == aggr_pre_handler;
398 }
399
400 /* Return true if 'p' is unused */
kprobe_unused(struct kprobe * p)401 static inline bool kprobe_unused(struct kprobe *p)
402 {
403 return kprobe_aggrprobe(p) && kprobe_disabled(p) &&
404 list_empty(&p->list);
405 }
406
407 /* Keep all fields in the kprobe consistent. */
copy_kprobe(struct kprobe * ap,struct kprobe * p)408 static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p)
409 {
410 memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t));
411 memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn));
412 }
413
414 #ifdef CONFIG_OPTPROBES
415 /* NOTE: This is protected by 'kprobe_mutex'. */
416 static bool kprobes_allow_optimization;
417
418 /*
419 * Call all 'kprobe::pre_handler' on the list, but ignores its return value.
420 * This must be called from arch-dep optimized caller.
421 */
opt_pre_handler(struct kprobe * p,struct pt_regs * regs)422 void opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
423 {
424 struct kprobe *kp;
425
426 list_for_each_entry_rcu(kp, &p->list, list) {
427 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
428 set_kprobe_instance(kp);
429 kp->pre_handler(kp, regs);
430 }
431 reset_kprobe_instance();
432 }
433 }
434 NOKPROBE_SYMBOL(opt_pre_handler);
435
436 /* Free optimized instructions and optimized_kprobe */
free_aggr_kprobe(struct kprobe * p)437 static void free_aggr_kprobe(struct kprobe *p)
438 {
439 struct optimized_kprobe *op;
440
441 op = container_of(p, struct optimized_kprobe, kp);
442 arch_remove_optimized_kprobe(op);
443 arch_remove_kprobe(p);
444 kfree(op);
445 }
446
447 /* Return true if the kprobe is ready for optimization. */
kprobe_optready(struct kprobe * p)448 static inline int kprobe_optready(struct kprobe *p)
449 {
450 struct optimized_kprobe *op;
451
452 if (kprobe_aggrprobe(p)) {
453 op = container_of(p, struct optimized_kprobe, kp);
454 return arch_prepared_optinsn(&op->optinsn);
455 }
456
457 return 0;
458 }
459
460 /* Return true if the kprobe is disarmed. Note: p must be on hash list */
kprobe_disarmed(struct kprobe * p)461 bool kprobe_disarmed(struct kprobe *p)
462 {
463 struct optimized_kprobe *op;
464
465 /* If kprobe is not aggr/opt probe, just return kprobe is disabled */
466 if (!kprobe_aggrprobe(p))
467 return kprobe_disabled(p);
468
469 op = container_of(p, struct optimized_kprobe, kp);
470
471 return kprobe_disabled(p) && list_empty(&op->list);
472 }
473
474 /* Return true if the probe is queued on (un)optimizing lists */
kprobe_queued(struct kprobe * p)475 static bool kprobe_queued(struct kprobe *p)
476 {
477 struct optimized_kprobe *op;
478
479 if (kprobe_aggrprobe(p)) {
480 op = container_of(p, struct optimized_kprobe, kp);
481 if (!list_empty(&op->list))
482 return true;
483 }
484 return false;
485 }
486
487 /*
488 * Return an optimized kprobe whose optimizing code replaces
489 * instructions including 'addr' (exclude breakpoint).
490 */
get_optimized_kprobe(kprobe_opcode_t * addr)491 static struct kprobe *get_optimized_kprobe(kprobe_opcode_t *addr)
492 {
493 int i;
494 struct kprobe *p = NULL;
495 struct optimized_kprobe *op;
496
497 /* Don't check i == 0, since that is a breakpoint case. */
498 for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH / sizeof(kprobe_opcode_t); i++)
499 p = get_kprobe(addr - i);
500
501 if (p && kprobe_optready(p)) {
502 op = container_of(p, struct optimized_kprobe, kp);
503 if (arch_within_optimized_kprobe(op, addr))
504 return p;
505 }
506
507 return NULL;
508 }
509
510 /* Optimization staging list, protected by 'kprobe_mutex' */
511 static LIST_HEAD(optimizing_list);
512 static LIST_HEAD(unoptimizing_list);
513 static LIST_HEAD(freeing_list);
514
515 static void kprobe_optimizer(struct work_struct *work);
516 static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
517 #define OPTIMIZE_DELAY 5
518
519 /*
520 * Optimize (replace a breakpoint with a jump) kprobes listed on
521 * 'optimizing_list'.
522 */
do_optimize_kprobes(void)523 static void do_optimize_kprobes(void)
524 {
525 lockdep_assert_held(&text_mutex);
526 /*
527 * The optimization/unoptimization refers 'online_cpus' via
528 * stop_machine() and cpu-hotplug modifies the 'online_cpus'.
529 * And same time, 'text_mutex' will be held in cpu-hotplug and here.
530 * This combination can cause a deadlock (cpu-hotplug tries to lock
531 * 'text_mutex' but stop_machine() can not be done because
532 * the 'online_cpus' has been changed)
533 * To avoid this deadlock, caller must have locked cpu-hotplug
534 * for preventing cpu-hotplug outside of 'text_mutex' locking.
535 */
536 lockdep_assert_cpus_held();
537
538 /* Optimization never be done when disarmed */
539 if (kprobes_all_disarmed || !kprobes_allow_optimization ||
540 list_empty(&optimizing_list))
541 return;
542
543 arch_optimize_kprobes(&optimizing_list);
544 }
545
546 /*
547 * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
548 * if need) kprobes listed on 'unoptimizing_list'.
549 */
do_unoptimize_kprobes(void)550 static void do_unoptimize_kprobes(void)
551 {
552 struct optimized_kprobe *op, *tmp;
553
554 lockdep_assert_held(&text_mutex);
555 /* See comment in do_optimize_kprobes() */
556 lockdep_assert_cpus_held();
557
558 if (!list_empty(&unoptimizing_list))
559 arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
560
561 /* Loop on 'freeing_list' for disarming and removing from kprobe hash list */
562 list_for_each_entry_safe(op, tmp, &freeing_list, list) {
563 /* Switching from detour code to origin */
564 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
565 /* Disarm probes if marked disabled and not gone */
566 if (kprobe_disabled(&op->kp) && !kprobe_gone(&op->kp))
567 arch_disarm_kprobe(&op->kp);
568 if (kprobe_unused(&op->kp)) {
569 /*
570 * Remove unused probes from hash list. After waiting
571 * for synchronization, these probes are reclaimed.
572 * (reclaiming is done by do_free_cleaned_kprobes().)
573 */
574 hlist_del_rcu(&op->kp.hlist);
575 } else
576 list_del_init(&op->list);
577 }
578 }
579
580 /* Reclaim all kprobes on the 'freeing_list' */
do_free_cleaned_kprobes(void)581 static void do_free_cleaned_kprobes(void)
582 {
583 struct optimized_kprobe *op, *tmp;
584
585 list_for_each_entry_safe(op, tmp, &freeing_list, list) {
586 list_del_init(&op->list);
587 if (WARN_ON_ONCE(!kprobe_unused(&op->kp))) {
588 /*
589 * This must not happen, but if there is a kprobe
590 * still in use, keep it on kprobes hash list.
591 */
592 continue;
593 }
594 free_aggr_kprobe(&op->kp);
595 }
596 }
597
598 /* Start optimizer after OPTIMIZE_DELAY passed */
kick_kprobe_optimizer(void)599 static void kick_kprobe_optimizer(void)
600 {
601 schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
602 }
603
604 /* Kprobe jump optimizer */
kprobe_optimizer(struct work_struct * work)605 static void kprobe_optimizer(struct work_struct *work)
606 {
607 mutex_lock(&kprobe_mutex);
608 cpus_read_lock();
609 mutex_lock(&text_mutex);
610
611 /*
612 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
613 * kprobes before waiting for quiesence period.
614 */
615 do_unoptimize_kprobes();
616
617 /*
618 * Step 2: Wait for quiesence period to ensure all potentially
619 * preempted tasks to have normally scheduled. Because optprobe
620 * may modify multiple instructions, there is a chance that Nth
621 * instruction is preempted. In that case, such tasks can return
622 * to 2nd-Nth byte of jump instruction. This wait is for avoiding it.
623 * Note that on non-preemptive kernel, this is transparently converted
624 * to synchronoze_sched() to wait for all interrupts to have completed.
625 */
626 synchronize_rcu_tasks();
627
628 /* Step 3: Optimize kprobes after quiesence period */
629 do_optimize_kprobes();
630
631 /* Step 4: Free cleaned kprobes after quiesence period */
632 do_free_cleaned_kprobes();
633
634 mutex_unlock(&text_mutex);
635 cpus_read_unlock();
636
637 /* Step 5: Kick optimizer again if needed */
638 if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
639 kick_kprobe_optimizer();
640
641 mutex_unlock(&kprobe_mutex);
642 }
643
644 /* Wait for completing optimization and unoptimization */
wait_for_kprobe_optimizer(void)645 void wait_for_kprobe_optimizer(void)
646 {
647 mutex_lock(&kprobe_mutex);
648
649 while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
650 mutex_unlock(&kprobe_mutex);
651
652 /* This will also make 'optimizing_work' execute immmediately */
653 flush_delayed_work(&optimizing_work);
654 /* 'optimizing_work' might not have been queued yet, relax */
655 cpu_relax();
656
657 mutex_lock(&kprobe_mutex);
658 }
659
660 mutex_unlock(&kprobe_mutex);
661 }
662
optprobe_queued_unopt(struct optimized_kprobe * op)663 bool optprobe_queued_unopt(struct optimized_kprobe *op)
664 {
665 struct optimized_kprobe *_op;
666
667 list_for_each_entry(_op, &unoptimizing_list, list) {
668 if (op == _op)
669 return true;
670 }
671
672 return false;
673 }
674
675 /* Optimize kprobe if p is ready to be optimized */
optimize_kprobe(struct kprobe * p)676 static void optimize_kprobe(struct kprobe *p)
677 {
678 struct optimized_kprobe *op;
679
680 /* Check if the kprobe is disabled or not ready for optimization. */
681 if (!kprobe_optready(p) || !kprobes_allow_optimization ||
682 (kprobe_disabled(p) || kprobes_all_disarmed))
683 return;
684
685 /* kprobes with 'post_handler' can not be optimized */
686 if (p->post_handler)
687 return;
688
689 op = container_of(p, struct optimized_kprobe, kp);
690
691 /* Check there is no other kprobes at the optimized instructions */
692 if (arch_check_optimized_kprobe(op) < 0)
693 return;
694
695 /* Check if it is already optimized. */
696 if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) {
697 if (optprobe_queued_unopt(op)) {
698 /* This is under unoptimizing. Just dequeue the probe */
699 list_del_init(&op->list);
700 }
701 return;
702 }
703 op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
704
705 /*
706 * On the 'unoptimizing_list' and 'optimizing_list',
707 * 'op' must have OPTIMIZED flag
708 */
709 if (WARN_ON_ONCE(!list_empty(&op->list)))
710 return;
711
712 list_add(&op->list, &optimizing_list);
713 kick_kprobe_optimizer();
714 }
715
716 /* Short cut to direct unoptimizing */
force_unoptimize_kprobe(struct optimized_kprobe * op)717 static void force_unoptimize_kprobe(struct optimized_kprobe *op)
718 {
719 lockdep_assert_cpus_held();
720 arch_unoptimize_kprobe(op);
721 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
722 }
723
724 /* Unoptimize a kprobe if p is optimized */
unoptimize_kprobe(struct kprobe * p,bool force)725 static void unoptimize_kprobe(struct kprobe *p, bool force)
726 {
727 struct optimized_kprobe *op;
728
729 if (!kprobe_aggrprobe(p) || kprobe_disarmed(p))
730 return; /* This is not an optprobe nor optimized */
731
732 op = container_of(p, struct optimized_kprobe, kp);
733 if (!kprobe_optimized(p))
734 return;
735
736 if (!list_empty(&op->list)) {
737 if (optprobe_queued_unopt(op)) {
738 /* Queued in unoptimizing queue */
739 if (force) {
740 /*
741 * Forcibly unoptimize the kprobe here, and queue it
742 * in the freeing list for release afterwards.
743 */
744 force_unoptimize_kprobe(op);
745 list_move(&op->list, &freeing_list);
746 }
747 } else {
748 /* Dequeue from the optimizing queue */
749 list_del_init(&op->list);
750 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
751 }
752 return;
753 }
754
755 /* Optimized kprobe case */
756 if (force) {
757 /* Forcibly update the code: this is a special case */
758 force_unoptimize_kprobe(op);
759 } else {
760 list_add(&op->list, &unoptimizing_list);
761 kick_kprobe_optimizer();
762 }
763 }
764
765 /* Cancel unoptimizing for reusing */
reuse_unused_kprobe(struct kprobe * ap)766 static int reuse_unused_kprobe(struct kprobe *ap)
767 {
768 struct optimized_kprobe *op;
769
770 /*
771 * Unused kprobe MUST be on the way of delayed unoptimizing (means
772 * there is still a relative jump) and disabled.
773 */
774 op = container_of(ap, struct optimized_kprobe, kp);
775 WARN_ON_ONCE(list_empty(&op->list));
776 /* Enable the probe again */
777 ap->flags &= ~KPROBE_FLAG_DISABLED;
778 /* Optimize it again. (remove from 'op->list') */
779 if (!kprobe_optready(ap))
780 return -EINVAL;
781
782 optimize_kprobe(ap);
783 return 0;
784 }
785
786 /* Remove optimized instructions */
kill_optimized_kprobe(struct kprobe * p)787 static void kill_optimized_kprobe(struct kprobe *p)
788 {
789 struct optimized_kprobe *op;
790
791 op = container_of(p, struct optimized_kprobe, kp);
792 if (!list_empty(&op->list))
793 /* Dequeue from the (un)optimization queue */
794 list_del_init(&op->list);
795 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
796
797 if (kprobe_unused(p)) {
798 /*
799 * Unused kprobe is on unoptimizing or freeing list. We move it
800 * to freeing_list and let the kprobe_optimizer() remove it from
801 * the kprobe hash list and free it.
802 */
803 if (optprobe_queued_unopt(op))
804 list_move(&op->list, &freeing_list);
805 }
806
807 /* Don't touch the code, because it is already freed. */
808 arch_remove_optimized_kprobe(op);
809 }
810
811 static inline
__prepare_optimized_kprobe(struct optimized_kprobe * op,struct kprobe * p)812 void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
813 {
814 if (!kprobe_ftrace(p))
815 arch_prepare_optimized_kprobe(op, p);
816 }
817
818 /* Try to prepare optimized instructions */
prepare_optimized_kprobe(struct kprobe * p)819 static void prepare_optimized_kprobe(struct kprobe *p)
820 {
821 struct optimized_kprobe *op;
822
823 op = container_of(p, struct optimized_kprobe, kp);
824 __prepare_optimized_kprobe(op, p);
825 }
826
827 /* Allocate new optimized_kprobe and try to prepare optimized instructions. */
alloc_aggr_kprobe(struct kprobe * p)828 static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
829 {
830 struct optimized_kprobe *op;
831
832 op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
833 if (!op)
834 return NULL;
835
836 INIT_LIST_HEAD(&op->list);
837 op->kp.addr = p->addr;
838 __prepare_optimized_kprobe(op, p);
839
840 return &op->kp;
841 }
842
843 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
844
845 /*
846 * Prepare an optimized_kprobe and optimize it.
847 * NOTE: 'p' must be a normal registered kprobe.
848 */
try_to_optimize_kprobe(struct kprobe * p)849 static void try_to_optimize_kprobe(struct kprobe *p)
850 {
851 struct kprobe *ap;
852 struct optimized_kprobe *op;
853
854 /* Impossible to optimize ftrace-based kprobe. */
855 if (kprobe_ftrace(p))
856 return;
857
858 /* For preparing optimization, jump_label_text_reserved() is called. */
859 cpus_read_lock();
860 jump_label_lock();
861 mutex_lock(&text_mutex);
862
863 ap = alloc_aggr_kprobe(p);
864 if (!ap)
865 goto out;
866
867 op = container_of(ap, struct optimized_kprobe, kp);
868 if (!arch_prepared_optinsn(&op->optinsn)) {
869 /* If failed to setup optimizing, fallback to kprobe. */
870 arch_remove_optimized_kprobe(op);
871 kfree(op);
872 goto out;
873 }
874
875 init_aggr_kprobe(ap, p);
876 optimize_kprobe(ap); /* This just kicks optimizer thread. */
877
878 out:
879 mutex_unlock(&text_mutex);
880 jump_label_unlock();
881 cpus_read_unlock();
882 }
883
optimize_all_kprobes(void)884 static void optimize_all_kprobes(void)
885 {
886 struct hlist_head *head;
887 struct kprobe *p;
888 unsigned int i;
889
890 mutex_lock(&kprobe_mutex);
891 /* If optimization is already allowed, just return. */
892 if (kprobes_allow_optimization)
893 goto out;
894
895 cpus_read_lock();
896 kprobes_allow_optimization = true;
897 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
898 head = &kprobe_table[i];
899 hlist_for_each_entry(p, head, hlist)
900 if (!kprobe_disabled(p))
901 optimize_kprobe(p);
902 }
903 cpus_read_unlock();
904 pr_info("kprobe jump-optimization is enabled. All kprobes are optimized if possible.\n");
905 out:
906 mutex_unlock(&kprobe_mutex);
907 }
908
909 #ifdef CONFIG_SYSCTL
unoptimize_all_kprobes(void)910 static void unoptimize_all_kprobes(void)
911 {
912 struct hlist_head *head;
913 struct kprobe *p;
914 unsigned int i;
915
916 mutex_lock(&kprobe_mutex);
917 /* If optimization is already prohibited, just return. */
918 if (!kprobes_allow_optimization) {
919 mutex_unlock(&kprobe_mutex);
920 return;
921 }
922
923 cpus_read_lock();
924 kprobes_allow_optimization = false;
925 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
926 head = &kprobe_table[i];
927 hlist_for_each_entry(p, head, hlist) {
928 if (!kprobe_disabled(p))
929 unoptimize_kprobe(p, false);
930 }
931 }
932 cpus_read_unlock();
933 mutex_unlock(&kprobe_mutex);
934
935 /* Wait for unoptimizing completion. */
936 wait_for_kprobe_optimizer();
937 pr_info("kprobe jump-optimization is disabled. All kprobes are based on software breakpoint.\n");
938 }
939
940 static DEFINE_MUTEX(kprobe_sysctl_mutex);
941 static int sysctl_kprobes_optimization;
proc_kprobes_optimization_handler(const struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)942 static int proc_kprobes_optimization_handler(const struct ctl_table *table,
943 int write, void *buffer,
944 size_t *length, loff_t *ppos)
945 {
946 int ret;
947
948 mutex_lock(&kprobe_sysctl_mutex);
949 sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
950 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
951
952 if (sysctl_kprobes_optimization)
953 optimize_all_kprobes();
954 else
955 unoptimize_all_kprobes();
956 mutex_unlock(&kprobe_sysctl_mutex);
957
958 return ret;
959 }
960
961 static struct ctl_table kprobe_sysctls[] = {
962 {
963 .procname = "kprobes-optimization",
964 .data = &sysctl_kprobes_optimization,
965 .maxlen = sizeof(int),
966 .mode = 0644,
967 .proc_handler = proc_kprobes_optimization_handler,
968 .extra1 = SYSCTL_ZERO,
969 .extra2 = SYSCTL_ONE,
970 },
971 };
972
kprobe_sysctls_init(void)973 static void __init kprobe_sysctls_init(void)
974 {
975 register_sysctl_init("debug", kprobe_sysctls);
976 }
977 #endif /* CONFIG_SYSCTL */
978
979 /* Put a breakpoint for a probe. */
__arm_kprobe(struct kprobe * p)980 static void __arm_kprobe(struct kprobe *p)
981 {
982 struct kprobe *_p;
983
984 lockdep_assert_held(&text_mutex);
985
986 /* Find the overlapping optimized kprobes. */
987 _p = get_optimized_kprobe(p->addr);
988 if (unlikely(_p))
989 /* Fallback to unoptimized kprobe */
990 unoptimize_kprobe(_p, true);
991
992 arch_arm_kprobe(p);
993 optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */
994 }
995
996 /* Remove the breakpoint of a probe. */
__disarm_kprobe(struct kprobe * p,bool reopt)997 static void __disarm_kprobe(struct kprobe *p, bool reopt)
998 {
999 struct kprobe *_p;
1000
1001 lockdep_assert_held(&text_mutex);
1002
1003 /* Try to unoptimize */
1004 unoptimize_kprobe(p, kprobes_all_disarmed);
1005
1006 if (!kprobe_queued(p)) {
1007 arch_disarm_kprobe(p);
1008 /* If another kprobe was blocked, re-optimize it. */
1009 _p = get_optimized_kprobe(p->addr);
1010 if (unlikely(_p) && reopt)
1011 optimize_kprobe(_p);
1012 }
1013 /*
1014 * TODO: Since unoptimization and real disarming will be done by
1015 * the worker thread, we can not check whether another probe are
1016 * unoptimized because of this probe here. It should be re-optimized
1017 * by the worker thread.
1018 */
1019 }
1020
1021 #else /* !CONFIG_OPTPROBES */
1022
1023 #define optimize_kprobe(p) do {} while (0)
1024 #define unoptimize_kprobe(p, f) do {} while (0)
1025 #define kill_optimized_kprobe(p) do {} while (0)
1026 #define prepare_optimized_kprobe(p) do {} while (0)
1027 #define try_to_optimize_kprobe(p) do {} while (0)
1028 #define __arm_kprobe(p) arch_arm_kprobe(p)
1029 #define __disarm_kprobe(p, o) arch_disarm_kprobe(p)
1030 #define kprobe_disarmed(p) kprobe_disabled(p)
1031 #define wait_for_kprobe_optimizer() do {} while (0)
1032
reuse_unused_kprobe(struct kprobe * ap)1033 static int reuse_unused_kprobe(struct kprobe *ap)
1034 {
1035 /*
1036 * If the optimized kprobe is NOT supported, the aggr kprobe is
1037 * released at the same time that the last aggregated kprobe is
1038 * unregistered.
1039 * Thus there should be no chance to reuse unused kprobe.
1040 */
1041 WARN_ON_ONCE(1);
1042 return -EINVAL;
1043 }
1044
free_aggr_kprobe(struct kprobe * p)1045 static void free_aggr_kprobe(struct kprobe *p)
1046 {
1047 arch_remove_kprobe(p);
1048 kfree(p);
1049 }
1050
alloc_aggr_kprobe(struct kprobe * p)1051 static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
1052 {
1053 return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
1054 }
1055 #endif /* CONFIG_OPTPROBES */
1056
1057 #ifdef CONFIG_KPROBES_ON_FTRACE
1058 static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
1059 .func = kprobe_ftrace_handler,
1060 .flags = FTRACE_OPS_FL_SAVE_REGS,
1061 };
1062
1063 static struct ftrace_ops kprobe_ipmodify_ops __read_mostly = {
1064 .func = kprobe_ftrace_handler,
1065 .flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY,
1066 };
1067
1068 static int kprobe_ipmodify_enabled;
1069 static int kprobe_ftrace_enabled;
1070 bool kprobe_ftrace_disabled;
1071
__arm_kprobe_ftrace(struct kprobe * p,struct ftrace_ops * ops,int * cnt)1072 static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
1073 int *cnt)
1074 {
1075 int ret;
1076
1077 lockdep_assert_held(&kprobe_mutex);
1078
1079 ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 0, 0);
1080 if (WARN_ONCE(ret < 0, "Failed to arm kprobe-ftrace at %pS (error %d)\n", p->addr, ret))
1081 return ret;
1082
1083 if (*cnt == 0) {
1084 ret = register_ftrace_function(ops);
1085 if (WARN(ret < 0, "Failed to register kprobe-ftrace (error %d)\n", ret))
1086 goto err_ftrace;
1087 }
1088
1089 (*cnt)++;
1090 return ret;
1091
1092 err_ftrace:
1093 /*
1094 * At this point, sinec ops is not registered, we should be sefe from
1095 * registering empty filter.
1096 */
1097 ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
1098 return ret;
1099 }
1100
arm_kprobe_ftrace(struct kprobe * p)1101 static int arm_kprobe_ftrace(struct kprobe *p)
1102 {
1103 bool ipmodify = (p->post_handler != NULL);
1104
1105 return __arm_kprobe_ftrace(p,
1106 ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops,
1107 ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled);
1108 }
1109
__disarm_kprobe_ftrace(struct kprobe * p,struct ftrace_ops * ops,int * cnt)1110 static int __disarm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
1111 int *cnt)
1112 {
1113 int ret;
1114
1115 lockdep_assert_held(&kprobe_mutex);
1116
1117 if (*cnt == 1) {
1118 ret = unregister_ftrace_function(ops);
1119 if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (error %d)\n", ret))
1120 return ret;
1121 }
1122
1123 (*cnt)--;
1124
1125 ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
1126 WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (error %d)\n",
1127 p->addr, ret);
1128 return ret;
1129 }
1130
disarm_kprobe_ftrace(struct kprobe * p)1131 static int disarm_kprobe_ftrace(struct kprobe *p)
1132 {
1133 bool ipmodify = (p->post_handler != NULL);
1134
1135 return __disarm_kprobe_ftrace(p,
1136 ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops,
1137 ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled);
1138 }
1139
kprobe_ftrace_kill(void)1140 void kprobe_ftrace_kill(void)
1141 {
1142 kprobe_ftrace_disabled = true;
1143 }
1144 #else /* !CONFIG_KPROBES_ON_FTRACE */
arm_kprobe_ftrace(struct kprobe * p)1145 static inline int arm_kprobe_ftrace(struct kprobe *p)
1146 {
1147 return -ENODEV;
1148 }
1149
disarm_kprobe_ftrace(struct kprobe * p)1150 static inline int disarm_kprobe_ftrace(struct kprobe *p)
1151 {
1152 return -ENODEV;
1153 }
1154 #endif
1155
prepare_kprobe(struct kprobe * p)1156 static int prepare_kprobe(struct kprobe *p)
1157 {
1158 /* Must ensure p->addr is really on ftrace */
1159 if (kprobe_ftrace(p))
1160 return arch_prepare_kprobe_ftrace(p);
1161
1162 return arch_prepare_kprobe(p);
1163 }
1164
arm_kprobe(struct kprobe * kp)1165 static int arm_kprobe(struct kprobe *kp)
1166 {
1167 if (unlikely(kprobe_ftrace(kp)))
1168 return arm_kprobe_ftrace(kp);
1169
1170 cpus_read_lock();
1171 mutex_lock(&text_mutex);
1172 __arm_kprobe(kp);
1173 mutex_unlock(&text_mutex);
1174 cpus_read_unlock();
1175
1176 return 0;
1177 }
1178
disarm_kprobe(struct kprobe * kp,bool reopt)1179 static int disarm_kprobe(struct kprobe *kp, bool reopt)
1180 {
1181 if (unlikely(kprobe_ftrace(kp)))
1182 return disarm_kprobe_ftrace(kp);
1183
1184 cpus_read_lock();
1185 mutex_lock(&text_mutex);
1186 __disarm_kprobe(kp, reopt);
1187 mutex_unlock(&text_mutex);
1188 cpus_read_unlock();
1189
1190 return 0;
1191 }
1192
1193 /*
1194 * Aggregate handlers for multiple kprobes support - these handlers
1195 * take care of invoking the individual kprobe handlers on p->list
1196 */
aggr_pre_handler(struct kprobe * p,struct pt_regs * regs)1197 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
1198 {
1199 struct kprobe *kp;
1200
1201 list_for_each_entry_rcu(kp, &p->list, list) {
1202 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
1203 set_kprobe_instance(kp);
1204 if (kp->pre_handler(kp, regs))
1205 return 1;
1206 }
1207 reset_kprobe_instance();
1208 }
1209 return 0;
1210 }
1211 NOKPROBE_SYMBOL(aggr_pre_handler);
1212
aggr_post_handler(struct kprobe * p,struct pt_regs * regs,unsigned long flags)1213 static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
1214 unsigned long flags)
1215 {
1216 struct kprobe *kp;
1217
1218 list_for_each_entry_rcu(kp, &p->list, list) {
1219 if (kp->post_handler && likely(!kprobe_disabled(kp))) {
1220 set_kprobe_instance(kp);
1221 kp->post_handler(kp, regs, flags);
1222 reset_kprobe_instance();
1223 }
1224 }
1225 }
1226 NOKPROBE_SYMBOL(aggr_post_handler);
1227
1228 /* Walks the list and increments 'nmissed' if 'p' has child probes. */
kprobes_inc_nmissed_count(struct kprobe * p)1229 void kprobes_inc_nmissed_count(struct kprobe *p)
1230 {
1231 struct kprobe *kp;
1232
1233 if (!kprobe_aggrprobe(p)) {
1234 p->nmissed++;
1235 } else {
1236 list_for_each_entry_rcu(kp, &p->list, list)
1237 kp->nmissed++;
1238 }
1239 }
1240 NOKPROBE_SYMBOL(kprobes_inc_nmissed_count);
1241
1242 static struct kprobe kprobe_busy = {
1243 .addr = (void *) get_kprobe,
1244 };
1245
kprobe_busy_begin(void)1246 void kprobe_busy_begin(void)
1247 {
1248 struct kprobe_ctlblk *kcb;
1249
1250 preempt_disable();
1251 __this_cpu_write(current_kprobe, &kprobe_busy);
1252 kcb = get_kprobe_ctlblk();
1253 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
1254 }
1255
kprobe_busy_end(void)1256 void kprobe_busy_end(void)
1257 {
1258 __this_cpu_write(current_kprobe, NULL);
1259 preempt_enable();
1260 }
1261
1262 /* Add the new probe to 'ap->list'. */
add_new_kprobe(struct kprobe * ap,struct kprobe * p)1263 static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
1264 {
1265 if (p->post_handler)
1266 unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */
1267
1268 list_add_rcu(&p->list, &ap->list);
1269 if (p->post_handler && !ap->post_handler)
1270 ap->post_handler = aggr_post_handler;
1271
1272 return 0;
1273 }
1274
1275 /*
1276 * Fill in the required fields of the aggregator kprobe. Replace the
1277 * earlier kprobe in the hlist with the aggregator kprobe.
1278 */
init_aggr_kprobe(struct kprobe * ap,struct kprobe * p)1279 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
1280 {
1281 /* Copy the insn slot of 'p' to 'ap'. */
1282 copy_kprobe(p, ap);
1283 flush_insn_slot(ap);
1284 ap->addr = p->addr;
1285 ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
1286 ap->pre_handler = aggr_pre_handler;
1287 /* We don't care the kprobe which has gone. */
1288 if (p->post_handler && !kprobe_gone(p))
1289 ap->post_handler = aggr_post_handler;
1290
1291 INIT_LIST_HEAD(&ap->list);
1292 INIT_HLIST_NODE(&ap->hlist);
1293
1294 list_add_rcu(&p->list, &ap->list);
1295 hlist_replace_rcu(&p->hlist, &ap->hlist);
1296 }
1297
1298 /*
1299 * This registers the second or subsequent kprobe at the same address.
1300 */
register_aggr_kprobe(struct kprobe * orig_p,struct kprobe * p)1301 static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
1302 {
1303 int ret = 0;
1304 struct kprobe *ap = orig_p;
1305
1306 cpus_read_lock();
1307
1308 /* For preparing optimization, jump_label_text_reserved() is called */
1309 jump_label_lock();
1310 mutex_lock(&text_mutex);
1311
1312 if (!kprobe_aggrprobe(orig_p)) {
1313 /* If 'orig_p' is not an 'aggr_kprobe', create new one. */
1314 ap = alloc_aggr_kprobe(orig_p);
1315 if (!ap) {
1316 ret = -ENOMEM;
1317 goto out;
1318 }
1319 init_aggr_kprobe(ap, orig_p);
1320 } else if (kprobe_unused(ap)) {
1321 /* This probe is going to die. Rescue it */
1322 ret = reuse_unused_kprobe(ap);
1323 if (ret)
1324 goto out;
1325 }
1326
1327 if (kprobe_gone(ap)) {
1328 /*
1329 * Attempting to insert new probe at the same location that
1330 * had a probe in the module vaddr area which already
1331 * freed. So, the instruction slot has already been
1332 * released. We need a new slot for the new probe.
1333 */
1334 ret = arch_prepare_kprobe(ap);
1335 if (ret)
1336 /*
1337 * Even if fail to allocate new slot, don't need to
1338 * free the 'ap'. It will be used next time, or
1339 * freed by unregister_kprobe().
1340 */
1341 goto out;
1342
1343 /* Prepare optimized instructions if possible. */
1344 prepare_optimized_kprobe(ap);
1345
1346 /*
1347 * Clear gone flag to prevent allocating new slot again, and
1348 * set disabled flag because it is not armed yet.
1349 */
1350 ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
1351 | KPROBE_FLAG_DISABLED;
1352 }
1353
1354 /* Copy the insn slot of 'p' to 'ap'. */
1355 copy_kprobe(ap, p);
1356 ret = add_new_kprobe(ap, p);
1357
1358 out:
1359 mutex_unlock(&text_mutex);
1360 jump_label_unlock();
1361 cpus_read_unlock();
1362
1363 if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
1364 ap->flags &= ~KPROBE_FLAG_DISABLED;
1365 if (!kprobes_all_disarmed) {
1366 /* Arm the breakpoint again. */
1367 ret = arm_kprobe(ap);
1368 if (ret) {
1369 ap->flags |= KPROBE_FLAG_DISABLED;
1370 list_del_rcu(&p->list);
1371 synchronize_rcu();
1372 }
1373 }
1374 }
1375 return ret;
1376 }
1377
arch_within_kprobe_blacklist(unsigned long addr)1378 bool __weak arch_within_kprobe_blacklist(unsigned long addr)
1379 {
1380 /* The '__kprobes' functions and entry code must not be probed. */
1381 return addr >= (unsigned long)__kprobes_text_start &&
1382 addr < (unsigned long)__kprobes_text_end;
1383 }
1384
__within_kprobe_blacklist(unsigned long addr)1385 static bool __within_kprobe_blacklist(unsigned long addr)
1386 {
1387 struct kprobe_blacklist_entry *ent;
1388
1389 if (arch_within_kprobe_blacklist(addr))
1390 return true;
1391 /*
1392 * If 'kprobe_blacklist' is defined, check the address and
1393 * reject any probe registration in the prohibited area.
1394 */
1395 list_for_each_entry(ent, &kprobe_blacklist, list) {
1396 if (addr >= ent->start_addr && addr < ent->end_addr)
1397 return true;
1398 }
1399 return false;
1400 }
1401
within_kprobe_blacklist(unsigned long addr)1402 bool within_kprobe_blacklist(unsigned long addr)
1403 {
1404 char symname[KSYM_NAME_LEN], *p;
1405
1406 if (__within_kprobe_blacklist(addr))
1407 return true;
1408
1409 /* Check if the address is on a suffixed-symbol */
1410 if (!lookup_symbol_name(addr, symname)) {
1411 p = strchr(symname, '.');
1412 if (!p)
1413 return false;
1414 *p = '\0';
1415 addr = (unsigned long)kprobe_lookup_name(symname, 0);
1416 if (addr)
1417 return __within_kprobe_blacklist(addr);
1418 }
1419 return false;
1420 }
1421
1422 /*
1423 * arch_adjust_kprobe_addr - adjust the address
1424 * @addr: symbol base address
1425 * @offset: offset within the symbol
1426 * @on_func_entry: was this @addr+@offset on the function entry
1427 *
1428 * Typically returns @addr + @offset, except for special cases where the
1429 * function might be prefixed by a CFI landing pad, in that case any offset
1430 * inside the landing pad is mapped to the first 'real' instruction of the
1431 * symbol.
1432 *
1433 * Specifically, for things like IBT/BTI, skip the resp. ENDBR/BTI.C
1434 * instruction at +0.
1435 */
arch_adjust_kprobe_addr(unsigned long addr,unsigned long offset,bool * on_func_entry)1436 kprobe_opcode_t *__weak arch_adjust_kprobe_addr(unsigned long addr,
1437 unsigned long offset,
1438 bool *on_func_entry)
1439 {
1440 *on_func_entry = !offset;
1441 return (kprobe_opcode_t *)(addr + offset);
1442 }
1443
1444 /*
1445 * If 'symbol_name' is specified, look it up and add the 'offset'
1446 * to it. This way, we can specify a relative address to a symbol.
1447 * This returns encoded errors if it fails to look up symbol or invalid
1448 * combination of parameters.
1449 */
1450 static kprobe_opcode_t *
_kprobe_addr(kprobe_opcode_t * addr,const char * symbol_name,unsigned long offset,bool * on_func_entry)1451 _kprobe_addr(kprobe_opcode_t *addr, const char *symbol_name,
1452 unsigned long offset, bool *on_func_entry)
1453 {
1454 if ((symbol_name && addr) || (!symbol_name && !addr))
1455 goto invalid;
1456
1457 if (symbol_name) {
1458 /*
1459 * Input: @sym + @offset
1460 * Output: @addr + @offset
1461 *
1462 * NOTE: kprobe_lookup_name() does *NOT* fold the offset
1463 * argument into it's output!
1464 */
1465 addr = kprobe_lookup_name(symbol_name, offset);
1466 if (!addr)
1467 return ERR_PTR(-ENOENT);
1468 }
1469
1470 /*
1471 * So here we have @addr + @offset, displace it into a new
1472 * @addr' + @offset' where @addr' is the symbol start address.
1473 */
1474 addr = (void *)addr + offset;
1475 if (!kallsyms_lookup_size_offset((unsigned long)addr, NULL, &offset))
1476 return ERR_PTR(-ENOENT);
1477 addr = (void *)addr - offset;
1478
1479 /*
1480 * Then ask the architecture to re-combine them, taking care of
1481 * magical function entry details while telling us if this was indeed
1482 * at the start of the function.
1483 */
1484 addr = arch_adjust_kprobe_addr((unsigned long)addr, offset, on_func_entry);
1485 if (addr)
1486 return addr;
1487
1488 invalid:
1489 return ERR_PTR(-EINVAL);
1490 }
1491
kprobe_addr(struct kprobe * p)1492 static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
1493 {
1494 bool on_func_entry;
1495 return _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry);
1496 }
1497
1498 /*
1499 * Check the 'p' is valid and return the aggregator kprobe
1500 * at the same address.
1501 */
__get_valid_kprobe(struct kprobe * p)1502 static struct kprobe *__get_valid_kprobe(struct kprobe *p)
1503 {
1504 struct kprobe *ap, *list_p;
1505
1506 lockdep_assert_held(&kprobe_mutex);
1507
1508 ap = get_kprobe(p->addr);
1509 if (unlikely(!ap))
1510 return NULL;
1511
1512 if (p != ap) {
1513 list_for_each_entry(list_p, &ap->list, list)
1514 if (list_p == p)
1515 /* kprobe p is a valid probe */
1516 goto valid;
1517 return NULL;
1518 }
1519 valid:
1520 return ap;
1521 }
1522
1523 /*
1524 * Warn and return error if the kprobe is being re-registered since
1525 * there must be a software bug.
1526 */
warn_kprobe_rereg(struct kprobe * p)1527 static inline int warn_kprobe_rereg(struct kprobe *p)
1528 {
1529 int ret = 0;
1530
1531 mutex_lock(&kprobe_mutex);
1532 if (WARN_ON_ONCE(__get_valid_kprobe(p)))
1533 ret = -EINVAL;
1534 mutex_unlock(&kprobe_mutex);
1535
1536 return ret;
1537 }
1538
check_ftrace_location(struct kprobe * p)1539 static int check_ftrace_location(struct kprobe *p)
1540 {
1541 unsigned long addr = (unsigned long)p->addr;
1542
1543 if (ftrace_location(addr) == addr) {
1544 #ifdef CONFIG_KPROBES_ON_FTRACE
1545 p->flags |= KPROBE_FLAG_FTRACE;
1546 #else /* !CONFIG_KPROBES_ON_FTRACE */
1547 return -EINVAL;
1548 #endif
1549 }
1550 return 0;
1551 }
1552
is_cfi_preamble_symbol(unsigned long addr)1553 static bool is_cfi_preamble_symbol(unsigned long addr)
1554 {
1555 char symbuf[KSYM_NAME_LEN];
1556
1557 if (lookup_symbol_name(addr, symbuf))
1558 return false;
1559
1560 return str_has_prefix(symbuf, "__cfi_") ||
1561 str_has_prefix(symbuf, "__pfx_");
1562 }
1563
check_kprobe_address_safe(struct kprobe * p,struct module ** probed_mod)1564 static int check_kprobe_address_safe(struct kprobe *p,
1565 struct module **probed_mod)
1566 {
1567 int ret;
1568
1569 ret = check_ftrace_location(p);
1570 if (ret)
1571 return ret;
1572 jump_label_lock();
1573 preempt_disable();
1574
1575 /* Ensure the address is in a text area, and find a module if exists. */
1576 *probed_mod = NULL;
1577 if (!core_kernel_text((unsigned long) p->addr)) {
1578 *probed_mod = __module_text_address((unsigned long) p->addr);
1579 if (!(*probed_mod)) {
1580 ret = -EINVAL;
1581 goto out;
1582 }
1583 }
1584 /* Ensure it is not in reserved area. */
1585 if (in_gate_area_no_mm((unsigned long) p->addr) ||
1586 within_kprobe_blacklist((unsigned long) p->addr) ||
1587 jump_label_text_reserved(p->addr, p->addr) ||
1588 static_call_text_reserved(p->addr, p->addr) ||
1589 find_bug((unsigned long)p->addr) ||
1590 is_cfi_preamble_symbol((unsigned long)p->addr)) {
1591 ret = -EINVAL;
1592 goto out;
1593 }
1594
1595 /* Get module refcount and reject __init functions for loaded modules. */
1596 if (IS_ENABLED(CONFIG_MODULES) && *probed_mod) {
1597 /*
1598 * We must hold a refcount of the probed module while updating
1599 * its code to prohibit unexpected unloading.
1600 */
1601 if (unlikely(!try_module_get(*probed_mod))) {
1602 ret = -ENOENT;
1603 goto out;
1604 }
1605
1606 /*
1607 * If the module freed '.init.text', we couldn't insert
1608 * kprobes in there.
1609 */
1610 if (within_module_init((unsigned long)p->addr, *probed_mod) &&
1611 !module_is_coming(*probed_mod)) {
1612 module_put(*probed_mod);
1613 *probed_mod = NULL;
1614 ret = -ENOENT;
1615 }
1616 }
1617
1618 out:
1619 preempt_enable();
1620 jump_label_unlock();
1621
1622 return ret;
1623 }
1624
register_kprobe(struct kprobe * p)1625 int register_kprobe(struct kprobe *p)
1626 {
1627 int ret;
1628 struct kprobe *old_p;
1629 struct module *probed_mod;
1630 kprobe_opcode_t *addr;
1631 bool on_func_entry;
1632
1633 /* Adjust probe address from symbol */
1634 addr = _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry);
1635 if (IS_ERR(addr))
1636 return PTR_ERR(addr);
1637 p->addr = addr;
1638
1639 ret = warn_kprobe_rereg(p);
1640 if (ret)
1641 return ret;
1642
1643 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
1644 p->flags &= KPROBE_FLAG_DISABLED;
1645 p->nmissed = 0;
1646 INIT_LIST_HEAD(&p->list);
1647
1648 ret = check_kprobe_address_safe(p, &probed_mod);
1649 if (ret)
1650 return ret;
1651
1652 mutex_lock(&kprobe_mutex);
1653
1654 if (on_func_entry)
1655 p->flags |= KPROBE_FLAG_ON_FUNC_ENTRY;
1656
1657 old_p = get_kprobe(p->addr);
1658 if (old_p) {
1659 /* Since this may unoptimize 'old_p', locking 'text_mutex'. */
1660 ret = register_aggr_kprobe(old_p, p);
1661 goto out;
1662 }
1663
1664 cpus_read_lock();
1665 /* Prevent text modification */
1666 mutex_lock(&text_mutex);
1667 ret = prepare_kprobe(p);
1668 mutex_unlock(&text_mutex);
1669 cpus_read_unlock();
1670 if (ret)
1671 goto out;
1672
1673 INIT_HLIST_NODE(&p->hlist);
1674 hlist_add_head_rcu(&p->hlist,
1675 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
1676
1677 if (!kprobes_all_disarmed && !kprobe_disabled(p)) {
1678 ret = arm_kprobe(p);
1679 if (ret) {
1680 hlist_del_rcu(&p->hlist);
1681 synchronize_rcu();
1682 goto out;
1683 }
1684 }
1685
1686 /* Try to optimize kprobe */
1687 try_to_optimize_kprobe(p);
1688 out:
1689 mutex_unlock(&kprobe_mutex);
1690
1691 if (probed_mod)
1692 module_put(probed_mod);
1693
1694 return ret;
1695 }
1696 EXPORT_SYMBOL_GPL(register_kprobe);
1697
1698 /* Check if all probes on the 'ap' are disabled. */
aggr_kprobe_disabled(struct kprobe * ap)1699 static bool aggr_kprobe_disabled(struct kprobe *ap)
1700 {
1701 struct kprobe *kp;
1702
1703 lockdep_assert_held(&kprobe_mutex);
1704
1705 list_for_each_entry(kp, &ap->list, list)
1706 if (!kprobe_disabled(kp))
1707 /*
1708 * Since there is an active probe on the list,
1709 * we can't disable this 'ap'.
1710 */
1711 return false;
1712
1713 return true;
1714 }
1715
__disable_kprobe(struct kprobe * p)1716 static struct kprobe *__disable_kprobe(struct kprobe *p)
1717 {
1718 struct kprobe *orig_p;
1719 int ret;
1720
1721 lockdep_assert_held(&kprobe_mutex);
1722
1723 /* Get an original kprobe for return */
1724 orig_p = __get_valid_kprobe(p);
1725 if (unlikely(orig_p == NULL))
1726 return ERR_PTR(-EINVAL);
1727
1728 if (!kprobe_disabled(p)) {
1729 /* Disable probe if it is a child probe */
1730 if (p != orig_p)
1731 p->flags |= KPROBE_FLAG_DISABLED;
1732
1733 /* Try to disarm and disable this/parent probe */
1734 if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
1735 /*
1736 * Don't be lazy here. Even if 'kprobes_all_disarmed'
1737 * is false, 'orig_p' might not have been armed yet.
1738 * Note arm_all_kprobes() __tries__ to arm all kprobes
1739 * on the best effort basis.
1740 */
1741 if (!kprobes_all_disarmed && !kprobe_disabled(orig_p)) {
1742 ret = disarm_kprobe(orig_p, true);
1743 if (ret) {
1744 p->flags &= ~KPROBE_FLAG_DISABLED;
1745 return ERR_PTR(ret);
1746 }
1747 }
1748 orig_p->flags |= KPROBE_FLAG_DISABLED;
1749 }
1750 }
1751
1752 return orig_p;
1753 }
1754
1755 /*
1756 * Unregister a kprobe without a scheduler synchronization.
1757 */
__unregister_kprobe_top(struct kprobe * p)1758 static int __unregister_kprobe_top(struct kprobe *p)
1759 {
1760 struct kprobe *ap, *list_p;
1761
1762 /* Disable kprobe. This will disarm it if needed. */
1763 ap = __disable_kprobe(p);
1764 if (IS_ERR(ap))
1765 return PTR_ERR(ap);
1766
1767 if (ap == p)
1768 /*
1769 * This probe is an independent(and non-optimized) kprobe
1770 * (not an aggrprobe). Remove from the hash list.
1771 */
1772 goto disarmed;
1773
1774 /* Following process expects this probe is an aggrprobe */
1775 WARN_ON(!kprobe_aggrprobe(ap));
1776
1777 if (list_is_singular(&ap->list) && kprobe_disarmed(ap))
1778 /*
1779 * !disarmed could be happen if the probe is under delayed
1780 * unoptimizing.
1781 */
1782 goto disarmed;
1783 else {
1784 /* If disabling probe has special handlers, update aggrprobe */
1785 if (p->post_handler && !kprobe_gone(p)) {
1786 list_for_each_entry(list_p, &ap->list, list) {
1787 if ((list_p != p) && (list_p->post_handler))
1788 goto noclean;
1789 }
1790 /*
1791 * For the kprobe-on-ftrace case, we keep the
1792 * post_handler setting to identify this aggrprobe
1793 * armed with kprobe_ipmodify_ops.
1794 */
1795 if (!kprobe_ftrace(ap))
1796 ap->post_handler = NULL;
1797 }
1798 noclean:
1799 /*
1800 * Remove from the aggrprobe: this path will do nothing in
1801 * __unregister_kprobe_bottom().
1802 */
1803 list_del_rcu(&p->list);
1804 if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
1805 /*
1806 * Try to optimize this probe again, because post
1807 * handler may have been changed.
1808 */
1809 optimize_kprobe(ap);
1810 }
1811 return 0;
1812
1813 disarmed:
1814 hlist_del_rcu(&ap->hlist);
1815 return 0;
1816 }
1817
__unregister_kprobe_bottom(struct kprobe * p)1818 static void __unregister_kprobe_bottom(struct kprobe *p)
1819 {
1820 struct kprobe *ap;
1821
1822 if (list_empty(&p->list))
1823 /* This is an independent kprobe */
1824 arch_remove_kprobe(p);
1825 else if (list_is_singular(&p->list)) {
1826 /* This is the last child of an aggrprobe */
1827 ap = list_entry(p->list.next, struct kprobe, list);
1828 list_del(&p->list);
1829 free_aggr_kprobe(ap);
1830 }
1831 /* Otherwise, do nothing. */
1832 }
1833
register_kprobes(struct kprobe ** kps,int num)1834 int register_kprobes(struct kprobe **kps, int num)
1835 {
1836 int i, ret = 0;
1837
1838 if (num <= 0)
1839 return -EINVAL;
1840 for (i = 0; i < num; i++) {
1841 ret = register_kprobe(kps[i]);
1842 if (ret < 0) {
1843 if (i > 0)
1844 unregister_kprobes(kps, i);
1845 break;
1846 }
1847 }
1848 return ret;
1849 }
1850 EXPORT_SYMBOL_GPL(register_kprobes);
1851
unregister_kprobe(struct kprobe * p)1852 void unregister_kprobe(struct kprobe *p)
1853 {
1854 unregister_kprobes(&p, 1);
1855 }
1856 EXPORT_SYMBOL_GPL(unregister_kprobe);
1857
unregister_kprobes(struct kprobe ** kps,int num)1858 void unregister_kprobes(struct kprobe **kps, int num)
1859 {
1860 int i;
1861
1862 if (num <= 0)
1863 return;
1864 mutex_lock(&kprobe_mutex);
1865 for (i = 0; i < num; i++)
1866 if (__unregister_kprobe_top(kps[i]) < 0)
1867 kps[i]->addr = NULL;
1868 mutex_unlock(&kprobe_mutex);
1869
1870 synchronize_rcu();
1871 for (i = 0; i < num; i++)
1872 if (kps[i]->addr)
1873 __unregister_kprobe_bottom(kps[i]);
1874 }
1875 EXPORT_SYMBOL_GPL(unregister_kprobes);
1876
kprobe_exceptions_notify(struct notifier_block * self,unsigned long val,void * data)1877 int __weak kprobe_exceptions_notify(struct notifier_block *self,
1878 unsigned long val, void *data)
1879 {
1880 return NOTIFY_DONE;
1881 }
1882 NOKPROBE_SYMBOL(kprobe_exceptions_notify);
1883
1884 static struct notifier_block kprobe_exceptions_nb = {
1885 .notifier_call = kprobe_exceptions_notify,
1886 .priority = 0x7fffffff /* we need to be notified first */
1887 };
1888
1889 #ifdef CONFIG_KRETPROBES
1890
1891 #if !defined(CONFIG_KRETPROBE_ON_RETHOOK)
1892
1893 /* callbacks for objpool of kretprobe instances */
kretprobe_init_inst(void * nod,void * context)1894 static int kretprobe_init_inst(void *nod, void *context)
1895 {
1896 struct kretprobe_instance *ri = nod;
1897
1898 ri->rph = context;
1899 return 0;
1900 }
kretprobe_fini_pool(struct objpool_head * head,void * context)1901 static int kretprobe_fini_pool(struct objpool_head *head, void *context)
1902 {
1903 kfree(context);
1904 return 0;
1905 }
1906
free_rp_inst_rcu(struct rcu_head * head)1907 static void free_rp_inst_rcu(struct rcu_head *head)
1908 {
1909 struct kretprobe_instance *ri = container_of(head, struct kretprobe_instance, rcu);
1910 struct kretprobe_holder *rph = ri->rph;
1911
1912 objpool_drop(ri, &rph->pool);
1913 }
1914 NOKPROBE_SYMBOL(free_rp_inst_rcu);
1915
recycle_rp_inst(struct kretprobe_instance * ri)1916 static void recycle_rp_inst(struct kretprobe_instance *ri)
1917 {
1918 struct kretprobe *rp = get_kretprobe(ri);
1919
1920 if (likely(rp))
1921 objpool_push(ri, &rp->rph->pool);
1922 else
1923 call_rcu(&ri->rcu, free_rp_inst_rcu);
1924 }
1925 NOKPROBE_SYMBOL(recycle_rp_inst);
1926
1927 /*
1928 * This function is called from delayed_put_task_struct() when a task is
1929 * dead and cleaned up to recycle any kretprobe instances associated with
1930 * this task. These left over instances represent probed functions that
1931 * have been called but will never return.
1932 */
kprobe_flush_task(struct task_struct * tk)1933 void kprobe_flush_task(struct task_struct *tk)
1934 {
1935 struct kretprobe_instance *ri;
1936 struct llist_node *node;
1937
1938 /* Early boot, not yet initialized. */
1939 if (unlikely(!kprobes_initialized))
1940 return;
1941
1942 kprobe_busy_begin();
1943
1944 node = __llist_del_all(&tk->kretprobe_instances);
1945 while (node) {
1946 ri = container_of(node, struct kretprobe_instance, llist);
1947 node = node->next;
1948
1949 recycle_rp_inst(ri);
1950 }
1951
1952 kprobe_busy_end();
1953 }
1954 NOKPROBE_SYMBOL(kprobe_flush_task);
1955
free_rp_inst(struct kretprobe * rp)1956 static inline void free_rp_inst(struct kretprobe *rp)
1957 {
1958 struct kretprobe_holder *rph = rp->rph;
1959
1960 if (!rph)
1961 return;
1962 rp->rph = NULL;
1963 objpool_fini(&rph->pool);
1964 }
1965
1966 /* This assumes the 'tsk' is the current task or the is not running. */
__kretprobe_find_ret_addr(struct task_struct * tsk,struct llist_node ** cur)1967 static kprobe_opcode_t *__kretprobe_find_ret_addr(struct task_struct *tsk,
1968 struct llist_node **cur)
1969 {
1970 struct kretprobe_instance *ri = NULL;
1971 struct llist_node *node = *cur;
1972
1973 if (!node)
1974 node = tsk->kretprobe_instances.first;
1975 else
1976 node = node->next;
1977
1978 while (node) {
1979 ri = container_of(node, struct kretprobe_instance, llist);
1980 if (ri->ret_addr != kretprobe_trampoline_addr()) {
1981 *cur = node;
1982 return ri->ret_addr;
1983 }
1984 node = node->next;
1985 }
1986 return NULL;
1987 }
1988 NOKPROBE_SYMBOL(__kretprobe_find_ret_addr);
1989
1990 /**
1991 * kretprobe_find_ret_addr -- Find correct return address modified by kretprobe
1992 * @tsk: Target task
1993 * @fp: A frame pointer
1994 * @cur: a storage of the loop cursor llist_node pointer for next call
1995 *
1996 * Find the correct return address modified by a kretprobe on @tsk in unsigned
1997 * long type. If it finds the return address, this returns that address value,
1998 * or this returns 0.
1999 * The @tsk must be 'current' or a task which is not running. @fp is a hint
2000 * to get the currect return address - which is compared with the
2001 * kretprobe_instance::fp field. The @cur is a loop cursor for searching the
2002 * kretprobe return addresses on the @tsk. The '*@cur' should be NULL at the
2003 * first call, but '@cur' itself must NOT NULL.
2004 */
kretprobe_find_ret_addr(struct task_struct * tsk,void * fp,struct llist_node ** cur)2005 unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp,
2006 struct llist_node **cur)
2007 {
2008 struct kretprobe_instance *ri;
2009 kprobe_opcode_t *ret;
2010
2011 if (WARN_ON_ONCE(!cur))
2012 return 0;
2013
2014 do {
2015 ret = __kretprobe_find_ret_addr(tsk, cur);
2016 if (!ret)
2017 break;
2018 ri = container_of(*cur, struct kretprobe_instance, llist);
2019 } while (ri->fp != fp);
2020
2021 return (unsigned long)ret;
2022 }
2023 NOKPROBE_SYMBOL(kretprobe_find_ret_addr);
2024
arch_kretprobe_fixup_return(struct pt_regs * regs,kprobe_opcode_t * correct_ret_addr)2025 void __weak arch_kretprobe_fixup_return(struct pt_regs *regs,
2026 kprobe_opcode_t *correct_ret_addr)
2027 {
2028 /*
2029 * Do nothing by default. Please fill this to update the fake return
2030 * address on the stack with the correct one on each arch if possible.
2031 */
2032 }
2033
__kretprobe_trampoline_handler(struct pt_regs * regs,void * frame_pointer)2034 unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs,
2035 void *frame_pointer)
2036 {
2037 struct kretprobe_instance *ri = NULL;
2038 struct llist_node *first, *node = NULL;
2039 kprobe_opcode_t *correct_ret_addr;
2040 struct kretprobe *rp;
2041
2042 /* Find correct address and all nodes for this frame. */
2043 correct_ret_addr = __kretprobe_find_ret_addr(current, &node);
2044 if (!correct_ret_addr) {
2045 pr_err("kretprobe: Return address not found, not execute handler. Maybe there is a bug in the kernel.\n");
2046 BUG_ON(1);
2047 }
2048
2049 /*
2050 * Set the return address as the instruction pointer, because if the
2051 * user handler calls stack_trace_save_regs() with this 'regs',
2052 * the stack trace will start from the instruction pointer.
2053 */
2054 instruction_pointer_set(regs, (unsigned long)correct_ret_addr);
2055
2056 /* Run the user handler of the nodes. */
2057 first = current->kretprobe_instances.first;
2058 while (first) {
2059 ri = container_of(first, struct kretprobe_instance, llist);
2060
2061 if (WARN_ON_ONCE(ri->fp != frame_pointer))
2062 break;
2063
2064 rp = get_kretprobe(ri);
2065 if (rp && rp->handler) {
2066 struct kprobe *prev = kprobe_running();
2067
2068 __this_cpu_write(current_kprobe, &rp->kp);
2069 ri->ret_addr = correct_ret_addr;
2070 rp->handler(ri, regs);
2071 __this_cpu_write(current_kprobe, prev);
2072 }
2073 if (first == node)
2074 break;
2075
2076 first = first->next;
2077 }
2078
2079 arch_kretprobe_fixup_return(regs, correct_ret_addr);
2080
2081 /* Unlink all nodes for this frame. */
2082 first = current->kretprobe_instances.first;
2083 current->kretprobe_instances.first = node->next;
2084 node->next = NULL;
2085
2086 /* Recycle free instances. */
2087 while (first) {
2088 ri = container_of(first, struct kretprobe_instance, llist);
2089 first = first->next;
2090
2091 recycle_rp_inst(ri);
2092 }
2093
2094 return (unsigned long)correct_ret_addr;
2095 }
NOKPROBE_SYMBOL(__kretprobe_trampoline_handler)2096 NOKPROBE_SYMBOL(__kretprobe_trampoline_handler)
2097
2098 /*
2099 * This kprobe pre_handler is registered with every kretprobe. When probe
2100 * hits it will set up the return probe.
2101 */
2102 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
2103 {
2104 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
2105 struct kretprobe_holder *rph = rp->rph;
2106 struct kretprobe_instance *ri;
2107
2108 ri = objpool_pop(&rph->pool);
2109 if (!ri) {
2110 rp->nmissed++;
2111 return 0;
2112 }
2113
2114 if (rp->entry_handler && rp->entry_handler(ri, regs)) {
2115 objpool_push(ri, &rph->pool);
2116 return 0;
2117 }
2118
2119 arch_prepare_kretprobe(ri, regs);
2120
2121 __llist_add(&ri->llist, ¤t->kretprobe_instances);
2122
2123 return 0;
2124 }
2125 NOKPROBE_SYMBOL(pre_handler_kretprobe);
2126 #else /* CONFIG_KRETPROBE_ON_RETHOOK */
2127 /*
2128 * This kprobe pre_handler is registered with every kretprobe. When probe
2129 * hits it will set up the return probe.
2130 */
pre_handler_kretprobe(struct kprobe * p,struct pt_regs * regs)2131 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
2132 {
2133 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
2134 struct kretprobe_instance *ri;
2135 struct rethook_node *rhn;
2136
2137 rhn = rethook_try_get(rp->rh);
2138 if (!rhn) {
2139 rp->nmissed++;
2140 return 0;
2141 }
2142
2143 ri = container_of(rhn, struct kretprobe_instance, node);
2144
2145 if (rp->entry_handler && rp->entry_handler(ri, regs))
2146 rethook_recycle(rhn);
2147 else
2148 rethook_hook(rhn, regs, kprobe_ftrace(p));
2149
2150 return 0;
2151 }
2152 NOKPROBE_SYMBOL(pre_handler_kretprobe);
2153
kretprobe_rethook_handler(struct rethook_node * rh,void * data,unsigned long ret_addr,struct pt_regs * regs)2154 static void kretprobe_rethook_handler(struct rethook_node *rh, void *data,
2155 unsigned long ret_addr,
2156 struct pt_regs *regs)
2157 {
2158 struct kretprobe *rp = (struct kretprobe *)data;
2159 struct kretprobe_instance *ri;
2160 struct kprobe_ctlblk *kcb;
2161
2162 /* The data must NOT be null. This means rethook data structure is broken. */
2163 if (WARN_ON_ONCE(!data) || !rp->handler)
2164 return;
2165
2166 __this_cpu_write(current_kprobe, &rp->kp);
2167 kcb = get_kprobe_ctlblk();
2168 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
2169
2170 ri = container_of(rh, struct kretprobe_instance, node);
2171 rp->handler(ri, regs);
2172
2173 __this_cpu_write(current_kprobe, NULL);
2174 }
2175 NOKPROBE_SYMBOL(kretprobe_rethook_handler);
2176
2177 #endif /* !CONFIG_KRETPROBE_ON_RETHOOK */
2178
2179 /**
2180 * kprobe_on_func_entry() -- check whether given address is function entry
2181 * @addr: Target address
2182 * @sym: Target symbol name
2183 * @offset: The offset from the symbol or the address
2184 *
2185 * This checks whether the given @addr+@offset or @sym+@offset is on the
2186 * function entry address or not.
2187 * This returns 0 if it is the function entry, or -EINVAL if it is not.
2188 * And also it returns -ENOENT if it fails the symbol or address lookup.
2189 * Caller must pass @addr or @sym (either one must be NULL), or this
2190 * returns -EINVAL.
2191 */
kprobe_on_func_entry(kprobe_opcode_t * addr,const char * sym,unsigned long offset)2192 int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
2193 {
2194 bool on_func_entry;
2195 kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset, &on_func_entry);
2196
2197 if (IS_ERR(kp_addr))
2198 return PTR_ERR(kp_addr);
2199
2200 if (!on_func_entry)
2201 return -EINVAL;
2202
2203 return 0;
2204 }
2205
register_kretprobe(struct kretprobe * rp)2206 int register_kretprobe(struct kretprobe *rp)
2207 {
2208 int ret;
2209 int i;
2210 void *addr;
2211
2212 ret = kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset);
2213 if (ret)
2214 return ret;
2215
2216 /* If only 'rp->kp.addr' is specified, check reregistering kprobes */
2217 if (rp->kp.addr && warn_kprobe_rereg(&rp->kp))
2218 return -EINVAL;
2219
2220 if (kretprobe_blacklist_size) {
2221 addr = kprobe_addr(&rp->kp);
2222 if (IS_ERR(addr))
2223 return PTR_ERR(addr);
2224
2225 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
2226 if (kretprobe_blacklist[i].addr == addr)
2227 return -EINVAL;
2228 }
2229 }
2230
2231 if (rp->data_size > KRETPROBE_MAX_DATA_SIZE)
2232 return -E2BIG;
2233
2234 rp->kp.pre_handler = pre_handler_kretprobe;
2235 rp->kp.post_handler = NULL;
2236
2237 /* Pre-allocate memory for max kretprobe instances */
2238 if (rp->maxactive <= 0)
2239 rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
2240
2241 #ifdef CONFIG_KRETPROBE_ON_RETHOOK
2242 rp->rh = rethook_alloc((void *)rp, kretprobe_rethook_handler,
2243 sizeof(struct kretprobe_instance) +
2244 rp->data_size, rp->maxactive);
2245 if (IS_ERR(rp->rh))
2246 return PTR_ERR(rp->rh);
2247
2248 rp->nmissed = 0;
2249 /* Establish function entry probe point */
2250 ret = register_kprobe(&rp->kp);
2251 if (ret != 0) {
2252 rethook_free(rp->rh);
2253 rp->rh = NULL;
2254 }
2255 #else /* !CONFIG_KRETPROBE_ON_RETHOOK */
2256 rp->rph = kzalloc(sizeof(struct kretprobe_holder), GFP_KERNEL);
2257 if (!rp->rph)
2258 return -ENOMEM;
2259
2260 if (objpool_init(&rp->rph->pool, rp->maxactive, rp->data_size +
2261 sizeof(struct kretprobe_instance), GFP_KERNEL,
2262 rp->rph, kretprobe_init_inst, kretprobe_fini_pool)) {
2263 kfree(rp->rph);
2264 rp->rph = NULL;
2265 return -ENOMEM;
2266 }
2267 rcu_assign_pointer(rp->rph->rp, rp);
2268 rp->nmissed = 0;
2269 /* Establish function entry probe point */
2270 ret = register_kprobe(&rp->kp);
2271 if (ret != 0)
2272 free_rp_inst(rp);
2273 #endif
2274 return ret;
2275 }
2276 EXPORT_SYMBOL_GPL(register_kretprobe);
2277
register_kretprobes(struct kretprobe ** rps,int num)2278 int register_kretprobes(struct kretprobe **rps, int num)
2279 {
2280 int ret = 0, i;
2281
2282 if (num <= 0)
2283 return -EINVAL;
2284 for (i = 0; i < num; i++) {
2285 ret = register_kretprobe(rps[i]);
2286 if (ret < 0) {
2287 if (i > 0)
2288 unregister_kretprobes(rps, i);
2289 break;
2290 }
2291 }
2292 return ret;
2293 }
2294 EXPORT_SYMBOL_GPL(register_kretprobes);
2295
unregister_kretprobe(struct kretprobe * rp)2296 void unregister_kretprobe(struct kretprobe *rp)
2297 {
2298 unregister_kretprobes(&rp, 1);
2299 }
2300 EXPORT_SYMBOL_GPL(unregister_kretprobe);
2301
unregister_kretprobes(struct kretprobe ** rps,int num)2302 void unregister_kretprobes(struct kretprobe **rps, int num)
2303 {
2304 int i;
2305
2306 if (num <= 0)
2307 return;
2308 mutex_lock(&kprobe_mutex);
2309 for (i = 0; i < num; i++) {
2310 if (__unregister_kprobe_top(&rps[i]->kp) < 0)
2311 rps[i]->kp.addr = NULL;
2312 #ifdef CONFIG_KRETPROBE_ON_RETHOOK
2313 rethook_free(rps[i]->rh);
2314 #else
2315 rcu_assign_pointer(rps[i]->rph->rp, NULL);
2316 #endif
2317 }
2318 mutex_unlock(&kprobe_mutex);
2319
2320 synchronize_rcu();
2321 for (i = 0; i < num; i++) {
2322 if (rps[i]->kp.addr) {
2323 __unregister_kprobe_bottom(&rps[i]->kp);
2324 #ifndef CONFIG_KRETPROBE_ON_RETHOOK
2325 free_rp_inst(rps[i]);
2326 #endif
2327 }
2328 }
2329 }
2330 EXPORT_SYMBOL_GPL(unregister_kretprobes);
2331
2332 #else /* CONFIG_KRETPROBES */
register_kretprobe(struct kretprobe * rp)2333 int register_kretprobe(struct kretprobe *rp)
2334 {
2335 return -EOPNOTSUPP;
2336 }
2337 EXPORT_SYMBOL_GPL(register_kretprobe);
2338
register_kretprobes(struct kretprobe ** rps,int num)2339 int register_kretprobes(struct kretprobe **rps, int num)
2340 {
2341 return -EOPNOTSUPP;
2342 }
2343 EXPORT_SYMBOL_GPL(register_kretprobes);
2344
unregister_kretprobe(struct kretprobe * rp)2345 void unregister_kretprobe(struct kretprobe *rp)
2346 {
2347 }
2348 EXPORT_SYMBOL_GPL(unregister_kretprobe);
2349
unregister_kretprobes(struct kretprobe ** rps,int num)2350 void unregister_kretprobes(struct kretprobe **rps, int num)
2351 {
2352 }
2353 EXPORT_SYMBOL_GPL(unregister_kretprobes);
2354
pre_handler_kretprobe(struct kprobe * p,struct pt_regs * regs)2355 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
2356 {
2357 return 0;
2358 }
2359 NOKPROBE_SYMBOL(pre_handler_kretprobe);
2360
2361 #endif /* CONFIG_KRETPROBES */
2362
2363 /* Set the kprobe gone and remove its instruction buffer. */
kill_kprobe(struct kprobe * p)2364 static void kill_kprobe(struct kprobe *p)
2365 {
2366 struct kprobe *kp;
2367
2368 lockdep_assert_held(&kprobe_mutex);
2369
2370 /*
2371 * The module is going away. We should disarm the kprobe which
2372 * is using ftrace, because ftrace framework is still available at
2373 * 'MODULE_STATE_GOING' notification.
2374 */
2375 if (kprobe_ftrace(p) && !kprobe_disabled(p) && !kprobes_all_disarmed)
2376 disarm_kprobe_ftrace(p);
2377
2378 p->flags |= KPROBE_FLAG_GONE;
2379 if (kprobe_aggrprobe(p)) {
2380 /*
2381 * If this is an aggr_kprobe, we have to list all the
2382 * chained probes and mark them GONE.
2383 */
2384 list_for_each_entry(kp, &p->list, list)
2385 kp->flags |= KPROBE_FLAG_GONE;
2386 p->post_handler = NULL;
2387 kill_optimized_kprobe(p);
2388 }
2389 /*
2390 * Here, we can remove insn_slot safely, because no thread calls
2391 * the original probed function (which will be freed soon) any more.
2392 */
2393 arch_remove_kprobe(p);
2394 }
2395
2396 /* Disable one kprobe */
disable_kprobe(struct kprobe * kp)2397 int disable_kprobe(struct kprobe *kp)
2398 {
2399 int ret = 0;
2400 struct kprobe *p;
2401
2402 mutex_lock(&kprobe_mutex);
2403
2404 /* Disable this kprobe */
2405 p = __disable_kprobe(kp);
2406 if (IS_ERR(p))
2407 ret = PTR_ERR(p);
2408
2409 mutex_unlock(&kprobe_mutex);
2410 return ret;
2411 }
2412 EXPORT_SYMBOL_GPL(disable_kprobe);
2413
2414 /* Enable one kprobe */
enable_kprobe(struct kprobe * kp)2415 int enable_kprobe(struct kprobe *kp)
2416 {
2417 int ret = 0;
2418 struct kprobe *p;
2419
2420 mutex_lock(&kprobe_mutex);
2421
2422 /* Check whether specified probe is valid. */
2423 p = __get_valid_kprobe(kp);
2424 if (unlikely(p == NULL)) {
2425 ret = -EINVAL;
2426 goto out;
2427 }
2428
2429 if (kprobe_gone(kp)) {
2430 /* This kprobe has gone, we couldn't enable it. */
2431 ret = -EINVAL;
2432 goto out;
2433 }
2434
2435 if (p != kp)
2436 kp->flags &= ~KPROBE_FLAG_DISABLED;
2437
2438 if (!kprobes_all_disarmed && kprobe_disabled(p)) {
2439 p->flags &= ~KPROBE_FLAG_DISABLED;
2440 ret = arm_kprobe(p);
2441 if (ret) {
2442 p->flags |= KPROBE_FLAG_DISABLED;
2443 if (p != kp)
2444 kp->flags |= KPROBE_FLAG_DISABLED;
2445 }
2446 }
2447 out:
2448 mutex_unlock(&kprobe_mutex);
2449 return ret;
2450 }
2451 EXPORT_SYMBOL_GPL(enable_kprobe);
2452
2453 /* Caller must NOT call this in usual path. This is only for critical case */
dump_kprobe(struct kprobe * kp)2454 void dump_kprobe(struct kprobe *kp)
2455 {
2456 pr_err("Dump kprobe:\n.symbol_name = %s, .offset = %x, .addr = %pS\n",
2457 kp->symbol_name, kp->offset, kp->addr);
2458 }
2459 NOKPROBE_SYMBOL(dump_kprobe);
2460
kprobe_add_ksym_blacklist(unsigned long entry)2461 int kprobe_add_ksym_blacklist(unsigned long entry)
2462 {
2463 struct kprobe_blacklist_entry *ent;
2464 unsigned long offset = 0, size = 0;
2465
2466 if (!kernel_text_address(entry) ||
2467 !kallsyms_lookup_size_offset(entry, &size, &offset))
2468 return -EINVAL;
2469
2470 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
2471 if (!ent)
2472 return -ENOMEM;
2473 ent->start_addr = entry;
2474 ent->end_addr = entry + size;
2475 INIT_LIST_HEAD(&ent->list);
2476 list_add_tail(&ent->list, &kprobe_blacklist);
2477
2478 return (int)size;
2479 }
2480
2481 /* Add all symbols in given area into kprobe blacklist */
kprobe_add_area_blacklist(unsigned long start,unsigned long end)2482 int kprobe_add_area_blacklist(unsigned long start, unsigned long end)
2483 {
2484 unsigned long entry;
2485 int ret = 0;
2486
2487 for (entry = start; entry < end; entry += ret) {
2488 ret = kprobe_add_ksym_blacklist(entry);
2489 if (ret < 0)
2490 return ret;
2491 if (ret == 0) /* In case of alias symbol */
2492 ret = 1;
2493 }
2494 return 0;
2495 }
2496
arch_kprobe_get_kallsym(unsigned int * symnum,unsigned long * value,char * type,char * sym)2497 int __weak arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value,
2498 char *type, char *sym)
2499 {
2500 return -ERANGE;
2501 }
2502
kprobe_get_kallsym(unsigned int symnum,unsigned long * value,char * type,char * sym)2503 int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
2504 char *sym)
2505 {
2506 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
2507 if (!kprobe_cache_get_kallsym(&kprobe_insn_slots, &symnum, value, type, sym))
2508 return 0;
2509 #ifdef CONFIG_OPTPROBES
2510 if (!kprobe_cache_get_kallsym(&kprobe_optinsn_slots, &symnum, value, type, sym))
2511 return 0;
2512 #endif
2513 #endif
2514 if (!arch_kprobe_get_kallsym(&symnum, value, type, sym))
2515 return 0;
2516 return -ERANGE;
2517 }
2518
arch_populate_kprobe_blacklist(void)2519 int __init __weak arch_populate_kprobe_blacklist(void)
2520 {
2521 return 0;
2522 }
2523
2524 /*
2525 * Lookup and populate the kprobe_blacklist.
2526 *
2527 * Unlike the kretprobe blacklist, we'll need to determine
2528 * the range of addresses that belong to the said functions,
2529 * since a kprobe need not necessarily be at the beginning
2530 * of a function.
2531 */
populate_kprobe_blacklist(unsigned long * start,unsigned long * end)2532 static int __init populate_kprobe_blacklist(unsigned long *start,
2533 unsigned long *end)
2534 {
2535 unsigned long entry;
2536 unsigned long *iter;
2537 int ret;
2538
2539 for (iter = start; iter < end; iter++) {
2540 entry = (unsigned long)dereference_symbol_descriptor((void *)*iter);
2541 ret = kprobe_add_ksym_blacklist(entry);
2542 if (ret == -EINVAL)
2543 continue;
2544 if (ret < 0)
2545 return ret;
2546 }
2547
2548 /* Symbols in '__kprobes_text' are blacklisted */
2549 ret = kprobe_add_area_blacklist((unsigned long)__kprobes_text_start,
2550 (unsigned long)__kprobes_text_end);
2551 if (ret)
2552 return ret;
2553
2554 /* Symbols in 'noinstr' section are blacklisted */
2555 ret = kprobe_add_area_blacklist((unsigned long)__noinstr_text_start,
2556 (unsigned long)__noinstr_text_end);
2557
2558 return ret ? : arch_populate_kprobe_blacklist();
2559 }
2560
2561 #ifdef CONFIG_MODULES
2562 /* Remove all symbols in given area from kprobe blacklist */
kprobe_remove_area_blacklist(unsigned long start,unsigned long end)2563 static void kprobe_remove_area_blacklist(unsigned long start, unsigned long end)
2564 {
2565 struct kprobe_blacklist_entry *ent, *n;
2566
2567 list_for_each_entry_safe(ent, n, &kprobe_blacklist, list) {
2568 if (ent->start_addr < start || ent->start_addr >= end)
2569 continue;
2570 list_del(&ent->list);
2571 kfree(ent);
2572 }
2573 }
2574
kprobe_remove_ksym_blacklist(unsigned long entry)2575 static void kprobe_remove_ksym_blacklist(unsigned long entry)
2576 {
2577 kprobe_remove_area_blacklist(entry, entry + 1);
2578 }
2579
add_module_kprobe_blacklist(struct module * mod)2580 static void add_module_kprobe_blacklist(struct module *mod)
2581 {
2582 unsigned long start, end;
2583 int i;
2584
2585 if (mod->kprobe_blacklist) {
2586 for (i = 0; i < mod->num_kprobe_blacklist; i++)
2587 kprobe_add_ksym_blacklist(mod->kprobe_blacklist[i]);
2588 }
2589
2590 start = (unsigned long)mod->kprobes_text_start;
2591 if (start) {
2592 end = start + mod->kprobes_text_size;
2593 kprobe_add_area_blacklist(start, end);
2594 }
2595
2596 start = (unsigned long)mod->noinstr_text_start;
2597 if (start) {
2598 end = start + mod->noinstr_text_size;
2599 kprobe_add_area_blacklist(start, end);
2600 }
2601 }
2602
remove_module_kprobe_blacklist(struct module * mod)2603 static void remove_module_kprobe_blacklist(struct module *mod)
2604 {
2605 unsigned long start, end;
2606 int i;
2607
2608 if (mod->kprobe_blacklist) {
2609 for (i = 0; i < mod->num_kprobe_blacklist; i++)
2610 kprobe_remove_ksym_blacklist(mod->kprobe_blacklist[i]);
2611 }
2612
2613 start = (unsigned long)mod->kprobes_text_start;
2614 if (start) {
2615 end = start + mod->kprobes_text_size;
2616 kprobe_remove_area_blacklist(start, end);
2617 }
2618
2619 start = (unsigned long)mod->noinstr_text_start;
2620 if (start) {
2621 end = start + mod->noinstr_text_size;
2622 kprobe_remove_area_blacklist(start, end);
2623 }
2624 }
2625
2626 /* Module notifier call back, checking kprobes on the module */
kprobes_module_callback(struct notifier_block * nb,unsigned long val,void * data)2627 static int kprobes_module_callback(struct notifier_block *nb,
2628 unsigned long val, void *data)
2629 {
2630 struct module *mod = data;
2631 struct hlist_head *head;
2632 struct kprobe *p;
2633 unsigned int i;
2634 int checkcore = (val == MODULE_STATE_GOING);
2635
2636 if (val == MODULE_STATE_COMING) {
2637 mutex_lock(&kprobe_mutex);
2638 add_module_kprobe_blacklist(mod);
2639 mutex_unlock(&kprobe_mutex);
2640 }
2641 if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
2642 return NOTIFY_DONE;
2643
2644 /*
2645 * When 'MODULE_STATE_GOING' was notified, both of module '.text' and
2646 * '.init.text' sections would be freed. When 'MODULE_STATE_LIVE' was
2647 * notified, only '.init.text' section would be freed. We need to
2648 * disable kprobes which have been inserted in the sections.
2649 */
2650 mutex_lock(&kprobe_mutex);
2651 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2652 head = &kprobe_table[i];
2653 hlist_for_each_entry(p, head, hlist)
2654 if (within_module_init((unsigned long)p->addr, mod) ||
2655 (checkcore &&
2656 within_module_core((unsigned long)p->addr, mod))) {
2657 /*
2658 * The vaddr this probe is installed will soon
2659 * be vfreed buy not synced to disk. Hence,
2660 * disarming the breakpoint isn't needed.
2661 *
2662 * Note, this will also move any optimized probes
2663 * that are pending to be removed from their
2664 * corresponding lists to the 'freeing_list' and
2665 * will not be touched by the delayed
2666 * kprobe_optimizer() work handler.
2667 */
2668 kill_kprobe(p);
2669 }
2670 }
2671 if (val == MODULE_STATE_GOING)
2672 remove_module_kprobe_blacklist(mod);
2673 mutex_unlock(&kprobe_mutex);
2674 return NOTIFY_DONE;
2675 }
2676
2677 static struct notifier_block kprobe_module_nb = {
2678 .notifier_call = kprobes_module_callback,
2679 .priority = 0
2680 };
2681
kprobe_register_module_notifier(void)2682 static int kprobe_register_module_notifier(void)
2683 {
2684 return register_module_notifier(&kprobe_module_nb);
2685 }
2686 #else
kprobe_register_module_notifier(void)2687 static int kprobe_register_module_notifier(void)
2688 {
2689 return 0;
2690 }
2691 #endif /* CONFIG_MODULES */
2692
kprobe_free_init_mem(void)2693 void kprobe_free_init_mem(void)
2694 {
2695 void *start = (void *)(&__init_begin);
2696 void *end = (void *)(&__init_end);
2697 struct hlist_head *head;
2698 struct kprobe *p;
2699 int i;
2700
2701 mutex_lock(&kprobe_mutex);
2702
2703 /* Kill all kprobes on initmem because the target code has been freed. */
2704 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2705 head = &kprobe_table[i];
2706 hlist_for_each_entry(p, head, hlist) {
2707 if (start <= (void *)p->addr && (void *)p->addr < end)
2708 kill_kprobe(p);
2709 }
2710 }
2711
2712 mutex_unlock(&kprobe_mutex);
2713 }
2714
init_kprobes(void)2715 static int __init init_kprobes(void)
2716 {
2717 int i, err;
2718
2719 /* FIXME allocate the probe table, currently defined statically */
2720 /* initialize all list heads */
2721 for (i = 0; i < KPROBE_TABLE_SIZE; i++)
2722 INIT_HLIST_HEAD(&kprobe_table[i]);
2723
2724 err = populate_kprobe_blacklist(__start_kprobe_blacklist,
2725 __stop_kprobe_blacklist);
2726 if (err)
2727 pr_err("Failed to populate blacklist (error %d), kprobes not restricted, be careful using them!\n", err);
2728
2729 if (kretprobe_blacklist_size) {
2730 /* lookup the function address from its name */
2731 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
2732 kretprobe_blacklist[i].addr =
2733 kprobe_lookup_name(kretprobe_blacklist[i].name, 0);
2734 if (!kretprobe_blacklist[i].addr)
2735 pr_err("Failed to lookup symbol '%s' for kretprobe blacklist. Maybe the target function is removed or renamed.\n",
2736 kretprobe_blacklist[i].name);
2737 }
2738 }
2739
2740 /* By default, kprobes are armed */
2741 kprobes_all_disarmed = false;
2742
2743 #if defined(CONFIG_OPTPROBES) && defined(__ARCH_WANT_KPROBES_INSN_SLOT)
2744 /* Init 'kprobe_optinsn_slots' for allocation */
2745 kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
2746 #endif
2747
2748 err = arch_init_kprobes();
2749 if (!err)
2750 err = register_die_notifier(&kprobe_exceptions_nb);
2751 if (!err)
2752 err = kprobe_register_module_notifier();
2753
2754 kprobes_initialized = (err == 0);
2755 kprobe_sysctls_init();
2756 return err;
2757 }
2758 early_initcall(init_kprobes);
2759
2760 #if defined(CONFIG_OPTPROBES)
init_optprobes(void)2761 static int __init init_optprobes(void)
2762 {
2763 /*
2764 * Enable kprobe optimization - this kicks the optimizer which
2765 * depends on synchronize_rcu_tasks() and ksoftirqd, that is
2766 * not spawned in early initcall. So delay the optimization.
2767 */
2768 optimize_all_kprobes();
2769
2770 return 0;
2771 }
2772 subsys_initcall(init_optprobes);
2773 #endif
2774
2775 #ifdef CONFIG_DEBUG_FS
report_probe(struct seq_file * pi,struct kprobe * p,const char * sym,int offset,char * modname,struct kprobe * pp)2776 static void report_probe(struct seq_file *pi, struct kprobe *p,
2777 const char *sym, int offset, char *modname, struct kprobe *pp)
2778 {
2779 char *kprobe_type;
2780 void *addr = p->addr;
2781
2782 if (p->pre_handler == pre_handler_kretprobe)
2783 kprobe_type = "r";
2784 else
2785 kprobe_type = "k";
2786
2787 if (!kallsyms_show_value(pi->file->f_cred))
2788 addr = NULL;
2789
2790 if (sym)
2791 seq_printf(pi, "%px %s %s+0x%x %s ",
2792 addr, kprobe_type, sym, offset,
2793 (modname ? modname : " "));
2794 else /* try to use %pS */
2795 seq_printf(pi, "%px %s %pS ",
2796 addr, kprobe_type, p->addr);
2797
2798 if (!pp)
2799 pp = p;
2800 seq_printf(pi, "%s%s%s%s\n",
2801 (kprobe_gone(p) ? "[GONE]" : ""),
2802 ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""),
2803 (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""),
2804 (kprobe_ftrace(pp) ? "[FTRACE]" : ""));
2805 }
2806
kprobe_seq_start(struct seq_file * f,loff_t * pos)2807 static void *kprobe_seq_start(struct seq_file *f, loff_t *pos)
2808 {
2809 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
2810 }
2811
kprobe_seq_next(struct seq_file * f,void * v,loff_t * pos)2812 static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
2813 {
2814 (*pos)++;
2815 if (*pos >= KPROBE_TABLE_SIZE)
2816 return NULL;
2817 return pos;
2818 }
2819
kprobe_seq_stop(struct seq_file * f,void * v)2820 static void kprobe_seq_stop(struct seq_file *f, void *v)
2821 {
2822 /* Nothing to do */
2823 }
2824
show_kprobe_addr(struct seq_file * pi,void * v)2825 static int show_kprobe_addr(struct seq_file *pi, void *v)
2826 {
2827 struct hlist_head *head;
2828 struct kprobe *p, *kp;
2829 const char *sym;
2830 unsigned int i = *(loff_t *) v;
2831 unsigned long offset = 0;
2832 char *modname, namebuf[KSYM_NAME_LEN];
2833
2834 head = &kprobe_table[i];
2835 preempt_disable();
2836 hlist_for_each_entry_rcu(p, head, hlist) {
2837 sym = kallsyms_lookup((unsigned long)p->addr, NULL,
2838 &offset, &modname, namebuf);
2839 if (kprobe_aggrprobe(p)) {
2840 list_for_each_entry_rcu(kp, &p->list, list)
2841 report_probe(pi, kp, sym, offset, modname, p);
2842 } else
2843 report_probe(pi, p, sym, offset, modname, NULL);
2844 }
2845 preempt_enable();
2846 return 0;
2847 }
2848
2849 static const struct seq_operations kprobes_sops = {
2850 .start = kprobe_seq_start,
2851 .next = kprobe_seq_next,
2852 .stop = kprobe_seq_stop,
2853 .show = show_kprobe_addr
2854 };
2855
2856 DEFINE_SEQ_ATTRIBUTE(kprobes);
2857
2858 /* kprobes/blacklist -- shows which functions can not be probed */
kprobe_blacklist_seq_start(struct seq_file * m,loff_t * pos)2859 static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos)
2860 {
2861 mutex_lock(&kprobe_mutex);
2862 return seq_list_start(&kprobe_blacklist, *pos);
2863 }
2864
kprobe_blacklist_seq_next(struct seq_file * m,void * v,loff_t * pos)2865 static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos)
2866 {
2867 return seq_list_next(v, &kprobe_blacklist, pos);
2868 }
2869
kprobe_blacklist_seq_show(struct seq_file * m,void * v)2870 static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
2871 {
2872 struct kprobe_blacklist_entry *ent =
2873 list_entry(v, struct kprobe_blacklist_entry, list);
2874
2875 /*
2876 * If '/proc/kallsyms' is not showing kernel address, we won't
2877 * show them here either.
2878 */
2879 if (!kallsyms_show_value(m->file->f_cred))
2880 seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL,
2881 (void *)ent->start_addr);
2882 else
2883 seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr,
2884 (void *)ent->end_addr, (void *)ent->start_addr);
2885 return 0;
2886 }
2887
kprobe_blacklist_seq_stop(struct seq_file * f,void * v)2888 static void kprobe_blacklist_seq_stop(struct seq_file *f, void *v)
2889 {
2890 mutex_unlock(&kprobe_mutex);
2891 }
2892
2893 static const struct seq_operations kprobe_blacklist_sops = {
2894 .start = kprobe_blacklist_seq_start,
2895 .next = kprobe_blacklist_seq_next,
2896 .stop = kprobe_blacklist_seq_stop,
2897 .show = kprobe_blacklist_seq_show,
2898 };
2899 DEFINE_SEQ_ATTRIBUTE(kprobe_blacklist);
2900
arm_all_kprobes(void)2901 static int arm_all_kprobes(void)
2902 {
2903 struct hlist_head *head;
2904 struct kprobe *p;
2905 unsigned int i, total = 0, errors = 0;
2906 int err, ret = 0;
2907
2908 mutex_lock(&kprobe_mutex);
2909
2910 /* If kprobes are armed, just return */
2911 if (!kprobes_all_disarmed)
2912 goto already_enabled;
2913
2914 /*
2915 * optimize_kprobe() called by arm_kprobe() checks
2916 * kprobes_all_disarmed, so set kprobes_all_disarmed before
2917 * arm_kprobe.
2918 */
2919 kprobes_all_disarmed = false;
2920 /* Arming kprobes doesn't optimize kprobe itself */
2921 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2922 head = &kprobe_table[i];
2923 /* Arm all kprobes on a best-effort basis */
2924 hlist_for_each_entry(p, head, hlist) {
2925 if (!kprobe_disabled(p)) {
2926 err = arm_kprobe(p);
2927 if (err) {
2928 errors++;
2929 ret = err;
2930 }
2931 total++;
2932 }
2933 }
2934 }
2935
2936 if (errors)
2937 pr_warn("Kprobes globally enabled, but failed to enable %d out of %d probes. Please check which kprobes are kept disabled via debugfs.\n",
2938 errors, total);
2939 else
2940 pr_info("Kprobes globally enabled\n");
2941
2942 already_enabled:
2943 mutex_unlock(&kprobe_mutex);
2944 return ret;
2945 }
2946
disarm_all_kprobes(void)2947 static int disarm_all_kprobes(void)
2948 {
2949 struct hlist_head *head;
2950 struct kprobe *p;
2951 unsigned int i, total = 0, errors = 0;
2952 int err, ret = 0;
2953
2954 mutex_lock(&kprobe_mutex);
2955
2956 /* If kprobes are already disarmed, just return */
2957 if (kprobes_all_disarmed) {
2958 mutex_unlock(&kprobe_mutex);
2959 return 0;
2960 }
2961
2962 kprobes_all_disarmed = true;
2963
2964 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2965 head = &kprobe_table[i];
2966 /* Disarm all kprobes on a best-effort basis */
2967 hlist_for_each_entry(p, head, hlist) {
2968 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) {
2969 err = disarm_kprobe(p, false);
2970 if (err) {
2971 errors++;
2972 ret = err;
2973 }
2974 total++;
2975 }
2976 }
2977 }
2978
2979 if (errors)
2980 pr_warn("Kprobes globally disabled, but failed to disable %d out of %d probes. Please check which kprobes are kept enabled via debugfs.\n",
2981 errors, total);
2982 else
2983 pr_info("Kprobes globally disabled\n");
2984
2985 mutex_unlock(&kprobe_mutex);
2986
2987 /* Wait for disarming all kprobes by optimizer */
2988 wait_for_kprobe_optimizer();
2989
2990 return ret;
2991 }
2992
2993 /*
2994 * XXX: The debugfs bool file interface doesn't allow for callbacks
2995 * when the bool state is switched. We can reuse that facility when
2996 * available
2997 */
read_enabled_file_bool(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)2998 static ssize_t read_enabled_file_bool(struct file *file,
2999 char __user *user_buf, size_t count, loff_t *ppos)
3000 {
3001 char buf[3];
3002
3003 if (!kprobes_all_disarmed)
3004 buf[0] = '1';
3005 else
3006 buf[0] = '0';
3007 buf[1] = '\n';
3008 buf[2] = 0x00;
3009 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
3010 }
3011
write_enabled_file_bool(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)3012 static ssize_t write_enabled_file_bool(struct file *file,
3013 const char __user *user_buf, size_t count, loff_t *ppos)
3014 {
3015 bool enable;
3016 int ret;
3017
3018 ret = kstrtobool_from_user(user_buf, count, &enable);
3019 if (ret)
3020 return ret;
3021
3022 ret = enable ? arm_all_kprobes() : disarm_all_kprobes();
3023 if (ret)
3024 return ret;
3025
3026 return count;
3027 }
3028
3029 static const struct file_operations fops_kp = {
3030 .read = read_enabled_file_bool,
3031 .write = write_enabled_file_bool,
3032 .llseek = default_llseek,
3033 };
3034
debugfs_kprobe_init(void)3035 static int __init debugfs_kprobe_init(void)
3036 {
3037 struct dentry *dir;
3038
3039 dir = debugfs_create_dir("kprobes", NULL);
3040
3041 debugfs_create_file("list", 0400, dir, NULL, &kprobes_fops);
3042
3043 debugfs_create_file("enabled", 0600, dir, NULL, &fops_kp);
3044
3045 debugfs_create_file("blacklist", 0400, dir, NULL,
3046 &kprobe_blacklist_fops);
3047
3048 return 0;
3049 }
3050
3051 late_initcall(debugfs_kprobe_init);
3052 #endif /* CONFIG_DEBUG_FS */
3053