1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Kernel Probes (KProbes) 4 * 5 * Copyright (C) IBM Corporation, 2002, 2004 6 * 7 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel 8 * Probes initial implementation (includes suggestions from 9 * Rusty Russell). 10 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with 11 * hlists and exceptions notifier as suggested by Andi Kleen. 12 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes 13 * interface to access function arguments. 14 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes 15 * exceptions notifier to be first on the priority list. 16 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston 17 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi 18 * <prasanna@in.ibm.com> added function-return probes. 19 */ 20 21 #define pr_fmt(fmt) "kprobes: " fmt 22 23 #include <linux/kprobes.h> 24 #include <linux/hash.h> 25 #include <linux/init.h> 26 #include <linux/slab.h> 27 #include <linux/stddef.h> 28 #include <linux/export.h> 29 #include <linux/moduleloader.h> 30 #include <linux/kallsyms.h> 31 #include <linux/freezer.h> 32 #include <linux/seq_file.h> 33 #include <linux/debugfs.h> 34 #include <linux/sysctl.h> 35 #include <linux/kdebug.h> 36 #include <linux/memory.h> 37 #include <linux/ftrace.h> 38 #include <linux/cpu.h> 39 #include <linux/jump_label.h> 40 #include <linux/static_call.h> 41 #include <linux/perf_event.h> 42 43 #include <asm/sections.h> 44 #include <asm/cacheflush.h> 45 #include <asm/errno.h> 46 #include <linux/uaccess.h> 47 48 #define KPROBE_HASH_BITS 6 49 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS) 50 51 #if !defined(CONFIG_OPTPROBES) || !defined(CONFIG_SYSCTL) 52 #define kprobe_sysctls_init() do { } while (0) 53 #endif 54 55 static int kprobes_initialized; 56 /* kprobe_table can be accessed by 57 * - Normal hlist traversal and RCU add/del under 'kprobe_mutex' is held. 58 * Or 59 * - RCU hlist traversal under disabling preempt (breakpoint handlers) 60 */ 61 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; 62 63 /* NOTE: change this value only with 'kprobe_mutex' held */ 64 static bool kprobes_all_disarmed; 65 66 /* This protects 'kprobe_table' and 'optimizing_list' */ 67 static DEFINE_MUTEX(kprobe_mutex); 68 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance); 69 70 kprobe_opcode_t * __weak kprobe_lookup_name(const char *name, 71 unsigned int __unused) 72 { 73 return ((kprobe_opcode_t *)(kallsyms_lookup_name(name))); 74 } 75 76 /* 77 * Blacklist -- list of 'struct kprobe_blacklist_entry' to store info where 78 * kprobes can not probe. 79 */ 80 static LIST_HEAD(kprobe_blacklist); 81 82 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT 83 /* 84 * 'kprobe::ainsn.insn' points to the copy of the instruction to be 85 * single-stepped. x86_64, POWER4 and above have no-exec support and 86 * stepping on the instruction on a vmalloced/kmalloced/data page 87 * is a recipe for disaster 88 */ 89 struct kprobe_insn_page { 90 struct list_head list; 91 kprobe_opcode_t *insns; /* Page of instruction slots */ 92 struct kprobe_insn_cache *cache; 93 int nused; 94 int ngarbage; 95 char slot_used[]; 96 }; 97 98 #define KPROBE_INSN_PAGE_SIZE(slots) \ 99 (offsetof(struct kprobe_insn_page, slot_used) + \ 100 (sizeof(char) * (slots))) 101 102 static int slots_per_page(struct kprobe_insn_cache *c) 103 { 104 return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t)); 105 } 106 107 enum kprobe_slot_state { 108 SLOT_CLEAN = 0, 109 SLOT_DIRTY = 1, 110 SLOT_USED = 2, 111 }; 112 113 void __weak *alloc_insn_page(void) 114 { 115 /* 116 * Use module_alloc() so this page is within +/- 2GB of where the 117 * kernel image and loaded module images reside. This is required 118 * for most of the architectures. 119 * (e.g. x86-64 needs this to handle the %rip-relative fixups.) 120 */ 121 return module_alloc(PAGE_SIZE); 122 } 123 124 static void free_insn_page(void *page) 125 { 126 module_memfree(page); 127 } 128 129 struct kprobe_insn_cache kprobe_insn_slots = { 130 .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex), 131 .alloc = alloc_insn_page, 132 .free = free_insn_page, 133 .sym = KPROBE_INSN_PAGE_SYM, 134 .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages), 135 .insn_size = MAX_INSN_SIZE, 136 .nr_garbage = 0, 137 }; 138 static int collect_garbage_slots(struct kprobe_insn_cache *c); 139 140 /** 141 * __get_insn_slot() - Find a slot on an executable page for an instruction. 142 * We allocate an executable page if there's no room on existing ones. 143 */ 144 kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c) 145 { 146 struct kprobe_insn_page *kip; 147 kprobe_opcode_t *slot = NULL; 148 149 /* Since the slot array is not protected by rcu, we need a mutex */ 150 mutex_lock(&c->mutex); 151 retry: 152 rcu_read_lock(); 153 list_for_each_entry_rcu(kip, &c->pages, list) { 154 if (kip->nused < slots_per_page(c)) { 155 int i; 156 157 for (i = 0; i < slots_per_page(c); i++) { 158 if (kip->slot_used[i] == SLOT_CLEAN) { 159 kip->slot_used[i] = SLOT_USED; 160 kip->nused++; 161 slot = kip->insns + (i * c->insn_size); 162 rcu_read_unlock(); 163 goto out; 164 } 165 } 166 /* kip->nused is broken. Fix it. */ 167 kip->nused = slots_per_page(c); 168 WARN_ON(1); 169 } 170 } 171 rcu_read_unlock(); 172 173 /* If there are any garbage slots, collect it and try again. */ 174 if (c->nr_garbage && collect_garbage_slots(c) == 0) 175 goto retry; 176 177 /* All out of space. Need to allocate a new page. */ 178 kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL); 179 if (!kip) 180 goto out; 181 182 kip->insns = c->alloc(); 183 if (!kip->insns) { 184 kfree(kip); 185 goto out; 186 } 187 INIT_LIST_HEAD(&kip->list); 188 memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c)); 189 kip->slot_used[0] = SLOT_USED; 190 kip->nused = 1; 191 kip->ngarbage = 0; 192 kip->cache = c; 193 list_add_rcu(&kip->list, &c->pages); 194 slot = kip->insns; 195 196 /* Record the perf ksymbol register event after adding the page */ 197 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, (unsigned long)kip->insns, 198 PAGE_SIZE, false, c->sym); 199 out: 200 mutex_unlock(&c->mutex); 201 return slot; 202 } 203 204 /* Return true if all garbages are collected, otherwise false. */ 205 static bool collect_one_slot(struct kprobe_insn_page *kip, int idx) 206 { 207 kip->slot_used[idx] = SLOT_CLEAN; 208 kip->nused--; 209 if (kip->nused == 0) { 210 /* 211 * Page is no longer in use. Free it unless 212 * it's the last one. We keep the last one 213 * so as not to have to set it up again the 214 * next time somebody inserts a probe. 215 */ 216 if (!list_is_singular(&kip->list)) { 217 /* 218 * Record perf ksymbol unregister event before removing 219 * the page. 220 */ 221 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, 222 (unsigned long)kip->insns, PAGE_SIZE, true, 223 kip->cache->sym); 224 list_del_rcu(&kip->list); 225 synchronize_rcu(); 226 kip->cache->free(kip->insns); 227 kfree(kip); 228 } 229 return true; 230 } 231 return false; 232 } 233 234 static int collect_garbage_slots(struct kprobe_insn_cache *c) 235 { 236 struct kprobe_insn_page *kip, *next; 237 238 /* Ensure no-one is interrupted on the garbages */ 239 synchronize_rcu(); 240 241 list_for_each_entry_safe(kip, next, &c->pages, list) { 242 int i; 243 244 if (kip->ngarbage == 0) 245 continue; 246 kip->ngarbage = 0; /* we will collect all garbages */ 247 for (i = 0; i < slots_per_page(c); i++) { 248 if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i)) 249 break; 250 } 251 } 252 c->nr_garbage = 0; 253 return 0; 254 } 255 256 void __free_insn_slot(struct kprobe_insn_cache *c, 257 kprobe_opcode_t *slot, int dirty) 258 { 259 struct kprobe_insn_page *kip; 260 long idx; 261 262 mutex_lock(&c->mutex); 263 rcu_read_lock(); 264 list_for_each_entry_rcu(kip, &c->pages, list) { 265 idx = ((long)slot - (long)kip->insns) / 266 (c->insn_size * sizeof(kprobe_opcode_t)); 267 if (idx >= 0 && idx < slots_per_page(c)) 268 goto out; 269 } 270 /* Could not find this slot. */ 271 WARN_ON(1); 272 kip = NULL; 273 out: 274 rcu_read_unlock(); 275 /* Mark and sweep: this may sleep */ 276 if (kip) { 277 /* Check double free */ 278 WARN_ON(kip->slot_used[idx] != SLOT_USED); 279 if (dirty) { 280 kip->slot_used[idx] = SLOT_DIRTY; 281 kip->ngarbage++; 282 if (++c->nr_garbage > slots_per_page(c)) 283 collect_garbage_slots(c); 284 } else { 285 collect_one_slot(kip, idx); 286 } 287 } 288 mutex_unlock(&c->mutex); 289 } 290 291 /* 292 * Check given address is on the page of kprobe instruction slots. 293 * This will be used for checking whether the address on a stack 294 * is on a text area or not. 295 */ 296 bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr) 297 { 298 struct kprobe_insn_page *kip; 299 bool ret = false; 300 301 rcu_read_lock(); 302 list_for_each_entry_rcu(kip, &c->pages, list) { 303 if (addr >= (unsigned long)kip->insns && 304 addr < (unsigned long)kip->insns + PAGE_SIZE) { 305 ret = true; 306 break; 307 } 308 } 309 rcu_read_unlock(); 310 311 return ret; 312 } 313 314 int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum, 315 unsigned long *value, char *type, char *sym) 316 { 317 struct kprobe_insn_page *kip; 318 int ret = -ERANGE; 319 320 rcu_read_lock(); 321 list_for_each_entry_rcu(kip, &c->pages, list) { 322 if ((*symnum)--) 323 continue; 324 strscpy(sym, c->sym, KSYM_NAME_LEN); 325 *type = 't'; 326 *value = (unsigned long)kip->insns; 327 ret = 0; 328 break; 329 } 330 rcu_read_unlock(); 331 332 return ret; 333 } 334 335 #ifdef CONFIG_OPTPROBES 336 void __weak *alloc_optinsn_page(void) 337 { 338 return alloc_insn_page(); 339 } 340 341 void __weak free_optinsn_page(void *page) 342 { 343 free_insn_page(page); 344 } 345 346 /* For optimized_kprobe buffer */ 347 struct kprobe_insn_cache kprobe_optinsn_slots = { 348 .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex), 349 .alloc = alloc_optinsn_page, 350 .free = free_optinsn_page, 351 .sym = KPROBE_OPTINSN_PAGE_SYM, 352 .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages), 353 /* .insn_size is initialized later */ 354 .nr_garbage = 0, 355 }; 356 #endif 357 #endif 358 359 /* We have preemption disabled.. so it is safe to use __ versions */ 360 static inline void set_kprobe_instance(struct kprobe *kp) 361 { 362 __this_cpu_write(kprobe_instance, kp); 363 } 364 365 static inline void reset_kprobe_instance(void) 366 { 367 __this_cpu_write(kprobe_instance, NULL); 368 } 369 370 /* 371 * This routine is called either: 372 * - under the 'kprobe_mutex' - during kprobe_[un]register(). 373 * OR 374 * - with preemption disabled - from architecture specific code. 375 */ 376 struct kprobe *get_kprobe(void *addr) 377 { 378 struct hlist_head *head; 379 struct kprobe *p; 380 381 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; 382 hlist_for_each_entry_rcu(p, head, hlist, 383 lockdep_is_held(&kprobe_mutex)) { 384 if (p->addr == addr) 385 return p; 386 } 387 388 return NULL; 389 } 390 NOKPROBE_SYMBOL(get_kprobe); 391 392 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs); 393 394 /* Return true if 'p' is an aggregator */ 395 static inline bool kprobe_aggrprobe(struct kprobe *p) 396 { 397 return p->pre_handler == aggr_pre_handler; 398 } 399 400 /* Return true if 'p' is unused */ 401 static inline bool kprobe_unused(struct kprobe *p) 402 { 403 return kprobe_aggrprobe(p) && kprobe_disabled(p) && 404 list_empty(&p->list); 405 } 406 407 /* Keep all fields in the kprobe consistent. */ 408 static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p) 409 { 410 memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t)); 411 memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn)); 412 } 413 414 #ifdef CONFIG_OPTPROBES 415 /* NOTE: This is protected by 'kprobe_mutex'. */ 416 static bool kprobes_allow_optimization; 417 418 /* 419 * Call all 'kprobe::pre_handler' on the list, but ignores its return value. 420 * This must be called from arch-dep optimized caller. 421 */ 422 void opt_pre_handler(struct kprobe *p, struct pt_regs *regs) 423 { 424 struct kprobe *kp; 425 426 list_for_each_entry_rcu(kp, &p->list, list) { 427 if (kp->pre_handler && likely(!kprobe_disabled(kp))) { 428 set_kprobe_instance(kp); 429 kp->pre_handler(kp, regs); 430 } 431 reset_kprobe_instance(); 432 } 433 } 434 NOKPROBE_SYMBOL(opt_pre_handler); 435 436 /* Free optimized instructions and optimized_kprobe */ 437 static void free_aggr_kprobe(struct kprobe *p) 438 { 439 struct optimized_kprobe *op; 440 441 op = container_of(p, struct optimized_kprobe, kp); 442 arch_remove_optimized_kprobe(op); 443 arch_remove_kprobe(p); 444 kfree(op); 445 } 446 447 /* Return true if the kprobe is ready for optimization. */ 448 static inline int kprobe_optready(struct kprobe *p) 449 { 450 struct optimized_kprobe *op; 451 452 if (kprobe_aggrprobe(p)) { 453 op = container_of(p, struct optimized_kprobe, kp); 454 return arch_prepared_optinsn(&op->optinsn); 455 } 456 457 return 0; 458 } 459 460 /* Return true if the kprobe is disarmed. Note: p must be on hash list */ 461 static inline bool kprobe_disarmed(struct kprobe *p) 462 { 463 struct optimized_kprobe *op; 464 465 /* If kprobe is not aggr/opt probe, just return kprobe is disabled */ 466 if (!kprobe_aggrprobe(p)) 467 return kprobe_disabled(p); 468 469 op = container_of(p, struct optimized_kprobe, kp); 470 471 return kprobe_disabled(p) && list_empty(&op->list); 472 } 473 474 /* Return true if the probe is queued on (un)optimizing lists */ 475 static bool kprobe_queued(struct kprobe *p) 476 { 477 struct optimized_kprobe *op; 478 479 if (kprobe_aggrprobe(p)) { 480 op = container_of(p, struct optimized_kprobe, kp); 481 if (!list_empty(&op->list)) 482 return true; 483 } 484 return false; 485 } 486 487 /* 488 * Return an optimized kprobe whose optimizing code replaces 489 * instructions including 'addr' (exclude breakpoint). 490 */ 491 static struct kprobe *get_optimized_kprobe(kprobe_opcode_t *addr) 492 { 493 int i; 494 struct kprobe *p = NULL; 495 struct optimized_kprobe *op; 496 497 /* Don't check i == 0, since that is a breakpoint case. */ 498 for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH / sizeof(kprobe_opcode_t); i++) 499 p = get_kprobe(addr - i); 500 501 if (p && kprobe_optready(p)) { 502 op = container_of(p, struct optimized_kprobe, kp); 503 if (arch_within_optimized_kprobe(op, addr)) 504 return p; 505 } 506 507 return NULL; 508 } 509 510 /* Optimization staging list, protected by 'kprobe_mutex' */ 511 static LIST_HEAD(optimizing_list); 512 static LIST_HEAD(unoptimizing_list); 513 static LIST_HEAD(freeing_list); 514 515 static void kprobe_optimizer(struct work_struct *work); 516 static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); 517 #define OPTIMIZE_DELAY 5 518 519 /* 520 * Optimize (replace a breakpoint with a jump) kprobes listed on 521 * 'optimizing_list'. 522 */ 523 static void do_optimize_kprobes(void) 524 { 525 lockdep_assert_held(&text_mutex); 526 /* 527 * The optimization/unoptimization refers 'online_cpus' via 528 * stop_machine() and cpu-hotplug modifies the 'online_cpus'. 529 * And same time, 'text_mutex' will be held in cpu-hotplug and here. 530 * This combination can cause a deadlock (cpu-hotplug tries to lock 531 * 'text_mutex' but stop_machine() can not be done because 532 * the 'online_cpus' has been changed) 533 * To avoid this deadlock, caller must have locked cpu-hotplug 534 * for preventing cpu-hotplug outside of 'text_mutex' locking. 535 */ 536 lockdep_assert_cpus_held(); 537 538 /* Optimization never be done when disarmed */ 539 if (kprobes_all_disarmed || !kprobes_allow_optimization || 540 list_empty(&optimizing_list)) 541 return; 542 543 arch_optimize_kprobes(&optimizing_list); 544 } 545 546 /* 547 * Unoptimize (replace a jump with a breakpoint and remove the breakpoint 548 * if need) kprobes listed on 'unoptimizing_list'. 549 */ 550 static void do_unoptimize_kprobes(void) 551 { 552 struct optimized_kprobe *op, *tmp; 553 554 lockdep_assert_held(&text_mutex); 555 /* See comment in do_optimize_kprobes() */ 556 lockdep_assert_cpus_held(); 557 558 /* Unoptimization must be done anytime */ 559 if (list_empty(&unoptimizing_list)) 560 return; 561 562 arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list); 563 /* Loop on 'freeing_list' for disarming */ 564 list_for_each_entry_safe(op, tmp, &freeing_list, list) { 565 /* Switching from detour code to origin */ 566 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; 567 /* Disarm probes if marked disabled */ 568 if (kprobe_disabled(&op->kp)) 569 arch_disarm_kprobe(&op->kp); 570 if (kprobe_unused(&op->kp)) { 571 /* 572 * Remove unused probes from hash list. After waiting 573 * for synchronization, these probes are reclaimed. 574 * (reclaiming is done by do_free_cleaned_kprobes().) 575 */ 576 hlist_del_rcu(&op->kp.hlist); 577 } else 578 list_del_init(&op->list); 579 } 580 } 581 582 /* Reclaim all kprobes on the 'freeing_list' */ 583 static void do_free_cleaned_kprobes(void) 584 { 585 struct optimized_kprobe *op, *tmp; 586 587 list_for_each_entry_safe(op, tmp, &freeing_list, list) { 588 list_del_init(&op->list); 589 if (WARN_ON_ONCE(!kprobe_unused(&op->kp))) { 590 /* 591 * This must not happen, but if there is a kprobe 592 * still in use, keep it on kprobes hash list. 593 */ 594 continue; 595 } 596 free_aggr_kprobe(&op->kp); 597 } 598 } 599 600 /* Start optimizer after OPTIMIZE_DELAY passed */ 601 static void kick_kprobe_optimizer(void) 602 { 603 schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY); 604 } 605 606 /* Kprobe jump optimizer */ 607 static void kprobe_optimizer(struct work_struct *work) 608 { 609 mutex_lock(&kprobe_mutex); 610 cpus_read_lock(); 611 mutex_lock(&text_mutex); 612 613 /* 614 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed) 615 * kprobes before waiting for quiesence period. 616 */ 617 do_unoptimize_kprobes(); 618 619 /* 620 * Step 2: Wait for quiesence period to ensure all potentially 621 * preempted tasks to have normally scheduled. Because optprobe 622 * may modify multiple instructions, there is a chance that Nth 623 * instruction is preempted. In that case, such tasks can return 624 * to 2nd-Nth byte of jump instruction. This wait is for avoiding it. 625 * Note that on non-preemptive kernel, this is transparently converted 626 * to synchronoze_sched() to wait for all interrupts to have completed. 627 */ 628 synchronize_rcu_tasks(); 629 630 /* Step 3: Optimize kprobes after quiesence period */ 631 do_optimize_kprobes(); 632 633 /* Step 4: Free cleaned kprobes after quiesence period */ 634 do_free_cleaned_kprobes(); 635 636 mutex_unlock(&text_mutex); 637 cpus_read_unlock(); 638 639 /* Step 5: Kick optimizer again if needed */ 640 if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) 641 kick_kprobe_optimizer(); 642 643 mutex_unlock(&kprobe_mutex); 644 } 645 646 /* Wait for completing optimization and unoptimization */ 647 void wait_for_kprobe_optimizer(void) 648 { 649 mutex_lock(&kprobe_mutex); 650 651 while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) { 652 mutex_unlock(&kprobe_mutex); 653 654 /* This will also make 'optimizing_work' execute immmediately */ 655 flush_delayed_work(&optimizing_work); 656 /* 'optimizing_work' might not have been queued yet, relax */ 657 cpu_relax(); 658 659 mutex_lock(&kprobe_mutex); 660 } 661 662 mutex_unlock(&kprobe_mutex); 663 } 664 665 static bool optprobe_queued_unopt(struct optimized_kprobe *op) 666 { 667 struct optimized_kprobe *_op; 668 669 list_for_each_entry(_op, &unoptimizing_list, list) { 670 if (op == _op) 671 return true; 672 } 673 674 return false; 675 } 676 677 /* Optimize kprobe if p is ready to be optimized */ 678 static void optimize_kprobe(struct kprobe *p) 679 { 680 struct optimized_kprobe *op; 681 682 /* Check if the kprobe is disabled or not ready for optimization. */ 683 if (!kprobe_optready(p) || !kprobes_allow_optimization || 684 (kprobe_disabled(p) || kprobes_all_disarmed)) 685 return; 686 687 /* kprobes with 'post_handler' can not be optimized */ 688 if (p->post_handler) 689 return; 690 691 op = container_of(p, struct optimized_kprobe, kp); 692 693 /* Check there is no other kprobes at the optimized instructions */ 694 if (arch_check_optimized_kprobe(op) < 0) 695 return; 696 697 /* Check if it is already optimized. */ 698 if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) { 699 if (optprobe_queued_unopt(op)) { 700 /* This is under unoptimizing. Just dequeue the probe */ 701 list_del_init(&op->list); 702 } 703 return; 704 } 705 op->kp.flags |= KPROBE_FLAG_OPTIMIZED; 706 707 /* 708 * On the 'unoptimizing_list' and 'optimizing_list', 709 * 'op' must have OPTIMIZED flag 710 */ 711 if (WARN_ON_ONCE(!list_empty(&op->list))) 712 return; 713 714 list_add(&op->list, &optimizing_list); 715 kick_kprobe_optimizer(); 716 } 717 718 /* Short cut to direct unoptimizing */ 719 static void force_unoptimize_kprobe(struct optimized_kprobe *op) 720 { 721 lockdep_assert_cpus_held(); 722 arch_unoptimize_kprobe(op); 723 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; 724 } 725 726 /* Unoptimize a kprobe if p is optimized */ 727 static void unoptimize_kprobe(struct kprobe *p, bool force) 728 { 729 struct optimized_kprobe *op; 730 731 if (!kprobe_aggrprobe(p) || kprobe_disarmed(p)) 732 return; /* This is not an optprobe nor optimized */ 733 734 op = container_of(p, struct optimized_kprobe, kp); 735 if (!kprobe_optimized(p)) 736 return; 737 738 if (!list_empty(&op->list)) { 739 if (optprobe_queued_unopt(op)) { 740 /* Queued in unoptimizing queue */ 741 if (force) { 742 /* 743 * Forcibly unoptimize the kprobe here, and queue it 744 * in the freeing list for release afterwards. 745 */ 746 force_unoptimize_kprobe(op); 747 list_move(&op->list, &freeing_list); 748 } 749 } else { 750 /* Dequeue from the optimizing queue */ 751 list_del_init(&op->list); 752 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; 753 } 754 return; 755 } 756 757 /* Optimized kprobe case */ 758 if (force) { 759 /* Forcibly update the code: this is a special case */ 760 force_unoptimize_kprobe(op); 761 } else { 762 list_add(&op->list, &unoptimizing_list); 763 kick_kprobe_optimizer(); 764 } 765 } 766 767 /* Cancel unoptimizing for reusing */ 768 static int reuse_unused_kprobe(struct kprobe *ap) 769 { 770 struct optimized_kprobe *op; 771 772 /* 773 * Unused kprobe MUST be on the way of delayed unoptimizing (means 774 * there is still a relative jump) and disabled. 775 */ 776 op = container_of(ap, struct optimized_kprobe, kp); 777 WARN_ON_ONCE(list_empty(&op->list)); 778 /* Enable the probe again */ 779 ap->flags &= ~KPROBE_FLAG_DISABLED; 780 /* Optimize it again. (remove from 'op->list') */ 781 if (!kprobe_optready(ap)) 782 return -EINVAL; 783 784 optimize_kprobe(ap); 785 return 0; 786 } 787 788 /* Remove optimized instructions */ 789 static void kill_optimized_kprobe(struct kprobe *p) 790 { 791 struct optimized_kprobe *op; 792 793 op = container_of(p, struct optimized_kprobe, kp); 794 if (!list_empty(&op->list)) 795 /* Dequeue from the (un)optimization queue */ 796 list_del_init(&op->list); 797 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; 798 799 if (kprobe_unused(p)) { 800 /* Enqueue if it is unused */ 801 list_add(&op->list, &freeing_list); 802 /* 803 * Remove unused probes from the hash list. After waiting 804 * for synchronization, this probe is reclaimed. 805 * (reclaiming is done by do_free_cleaned_kprobes().) 806 */ 807 hlist_del_rcu(&op->kp.hlist); 808 } 809 810 /* Don't touch the code, because it is already freed. */ 811 arch_remove_optimized_kprobe(op); 812 } 813 814 static inline 815 void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) 816 { 817 if (!kprobe_ftrace(p)) 818 arch_prepare_optimized_kprobe(op, p); 819 } 820 821 /* Try to prepare optimized instructions */ 822 static void prepare_optimized_kprobe(struct kprobe *p) 823 { 824 struct optimized_kprobe *op; 825 826 op = container_of(p, struct optimized_kprobe, kp); 827 __prepare_optimized_kprobe(op, p); 828 } 829 830 /* Allocate new optimized_kprobe and try to prepare optimized instructions. */ 831 static struct kprobe *alloc_aggr_kprobe(struct kprobe *p) 832 { 833 struct optimized_kprobe *op; 834 835 op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL); 836 if (!op) 837 return NULL; 838 839 INIT_LIST_HEAD(&op->list); 840 op->kp.addr = p->addr; 841 __prepare_optimized_kprobe(op, p); 842 843 return &op->kp; 844 } 845 846 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p); 847 848 /* 849 * Prepare an optimized_kprobe and optimize it. 850 * NOTE: 'p' must be a normal registered kprobe. 851 */ 852 static void try_to_optimize_kprobe(struct kprobe *p) 853 { 854 struct kprobe *ap; 855 struct optimized_kprobe *op; 856 857 /* Impossible to optimize ftrace-based kprobe. */ 858 if (kprobe_ftrace(p)) 859 return; 860 861 /* For preparing optimization, jump_label_text_reserved() is called. */ 862 cpus_read_lock(); 863 jump_label_lock(); 864 mutex_lock(&text_mutex); 865 866 ap = alloc_aggr_kprobe(p); 867 if (!ap) 868 goto out; 869 870 op = container_of(ap, struct optimized_kprobe, kp); 871 if (!arch_prepared_optinsn(&op->optinsn)) { 872 /* If failed to setup optimizing, fallback to kprobe. */ 873 arch_remove_optimized_kprobe(op); 874 kfree(op); 875 goto out; 876 } 877 878 init_aggr_kprobe(ap, p); 879 optimize_kprobe(ap); /* This just kicks optimizer thread. */ 880 881 out: 882 mutex_unlock(&text_mutex); 883 jump_label_unlock(); 884 cpus_read_unlock(); 885 } 886 887 static void optimize_all_kprobes(void) 888 { 889 struct hlist_head *head; 890 struct kprobe *p; 891 unsigned int i; 892 893 mutex_lock(&kprobe_mutex); 894 /* If optimization is already allowed, just return. */ 895 if (kprobes_allow_optimization) 896 goto out; 897 898 cpus_read_lock(); 899 kprobes_allow_optimization = true; 900 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 901 head = &kprobe_table[i]; 902 hlist_for_each_entry(p, head, hlist) 903 if (!kprobe_disabled(p)) 904 optimize_kprobe(p); 905 } 906 cpus_read_unlock(); 907 pr_info("kprobe jump-optimization is enabled. All kprobes are optimized if possible.\n"); 908 out: 909 mutex_unlock(&kprobe_mutex); 910 } 911 912 #ifdef CONFIG_SYSCTL 913 static void unoptimize_all_kprobes(void) 914 { 915 struct hlist_head *head; 916 struct kprobe *p; 917 unsigned int i; 918 919 mutex_lock(&kprobe_mutex); 920 /* If optimization is already prohibited, just return. */ 921 if (!kprobes_allow_optimization) { 922 mutex_unlock(&kprobe_mutex); 923 return; 924 } 925 926 cpus_read_lock(); 927 kprobes_allow_optimization = false; 928 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 929 head = &kprobe_table[i]; 930 hlist_for_each_entry(p, head, hlist) { 931 if (!kprobe_disabled(p)) 932 unoptimize_kprobe(p, false); 933 } 934 } 935 cpus_read_unlock(); 936 mutex_unlock(&kprobe_mutex); 937 938 /* Wait for unoptimizing completion. */ 939 wait_for_kprobe_optimizer(); 940 pr_info("kprobe jump-optimization is disabled. All kprobes are based on software breakpoint.\n"); 941 } 942 943 static DEFINE_MUTEX(kprobe_sysctl_mutex); 944 static int sysctl_kprobes_optimization; 945 static int proc_kprobes_optimization_handler(struct ctl_table *table, 946 int write, void *buffer, 947 size_t *length, loff_t *ppos) 948 { 949 int ret; 950 951 mutex_lock(&kprobe_sysctl_mutex); 952 sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0; 953 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 954 955 if (sysctl_kprobes_optimization) 956 optimize_all_kprobes(); 957 else 958 unoptimize_all_kprobes(); 959 mutex_unlock(&kprobe_sysctl_mutex); 960 961 return ret; 962 } 963 964 static struct ctl_table kprobe_sysctls[] = { 965 { 966 .procname = "kprobes-optimization", 967 .data = &sysctl_kprobes_optimization, 968 .maxlen = sizeof(int), 969 .mode = 0644, 970 .proc_handler = proc_kprobes_optimization_handler, 971 .extra1 = SYSCTL_ZERO, 972 .extra2 = SYSCTL_ONE, 973 }, 974 {} 975 }; 976 977 static void __init kprobe_sysctls_init(void) 978 { 979 register_sysctl_init("debug", kprobe_sysctls); 980 } 981 #endif /* CONFIG_SYSCTL */ 982 983 /* Put a breakpoint for a probe. */ 984 static void __arm_kprobe(struct kprobe *p) 985 { 986 struct kprobe *_p; 987 988 lockdep_assert_held(&text_mutex); 989 990 /* Find the overlapping optimized kprobes. */ 991 _p = get_optimized_kprobe(p->addr); 992 if (unlikely(_p)) 993 /* Fallback to unoptimized kprobe */ 994 unoptimize_kprobe(_p, true); 995 996 arch_arm_kprobe(p); 997 optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */ 998 } 999 1000 /* Remove the breakpoint of a probe. */ 1001 static void __disarm_kprobe(struct kprobe *p, bool reopt) 1002 { 1003 struct kprobe *_p; 1004 1005 lockdep_assert_held(&text_mutex); 1006 1007 /* Try to unoptimize */ 1008 unoptimize_kprobe(p, kprobes_all_disarmed); 1009 1010 if (!kprobe_queued(p)) { 1011 arch_disarm_kprobe(p); 1012 /* If another kprobe was blocked, re-optimize it. */ 1013 _p = get_optimized_kprobe(p->addr); 1014 if (unlikely(_p) && reopt) 1015 optimize_kprobe(_p); 1016 } 1017 /* 1018 * TODO: Since unoptimization and real disarming will be done by 1019 * the worker thread, we can not check whether another probe are 1020 * unoptimized because of this probe here. It should be re-optimized 1021 * by the worker thread. 1022 */ 1023 } 1024 1025 #else /* !CONFIG_OPTPROBES */ 1026 1027 #define optimize_kprobe(p) do {} while (0) 1028 #define unoptimize_kprobe(p, f) do {} while (0) 1029 #define kill_optimized_kprobe(p) do {} while (0) 1030 #define prepare_optimized_kprobe(p) do {} while (0) 1031 #define try_to_optimize_kprobe(p) do {} while (0) 1032 #define __arm_kprobe(p) arch_arm_kprobe(p) 1033 #define __disarm_kprobe(p, o) arch_disarm_kprobe(p) 1034 #define kprobe_disarmed(p) kprobe_disabled(p) 1035 #define wait_for_kprobe_optimizer() do {} while (0) 1036 1037 static int reuse_unused_kprobe(struct kprobe *ap) 1038 { 1039 /* 1040 * If the optimized kprobe is NOT supported, the aggr kprobe is 1041 * released at the same time that the last aggregated kprobe is 1042 * unregistered. 1043 * Thus there should be no chance to reuse unused kprobe. 1044 */ 1045 WARN_ON_ONCE(1); 1046 return -EINVAL; 1047 } 1048 1049 static void free_aggr_kprobe(struct kprobe *p) 1050 { 1051 arch_remove_kprobe(p); 1052 kfree(p); 1053 } 1054 1055 static struct kprobe *alloc_aggr_kprobe(struct kprobe *p) 1056 { 1057 return kzalloc(sizeof(struct kprobe), GFP_KERNEL); 1058 } 1059 #endif /* CONFIG_OPTPROBES */ 1060 1061 #ifdef CONFIG_KPROBES_ON_FTRACE 1062 static struct ftrace_ops kprobe_ftrace_ops __read_mostly = { 1063 .func = kprobe_ftrace_handler, 1064 .flags = FTRACE_OPS_FL_SAVE_REGS, 1065 }; 1066 1067 static struct ftrace_ops kprobe_ipmodify_ops __read_mostly = { 1068 .func = kprobe_ftrace_handler, 1069 .flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY, 1070 }; 1071 1072 static int kprobe_ipmodify_enabled; 1073 static int kprobe_ftrace_enabled; 1074 1075 static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops, 1076 int *cnt) 1077 { 1078 int ret = 0; 1079 1080 lockdep_assert_held(&kprobe_mutex); 1081 1082 ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 0, 0); 1083 if (WARN_ONCE(ret < 0, "Failed to arm kprobe-ftrace at %pS (error %d)\n", p->addr, ret)) 1084 return ret; 1085 1086 if (*cnt == 0) { 1087 ret = register_ftrace_function(ops); 1088 if (WARN(ret < 0, "Failed to register kprobe-ftrace (error %d)\n", ret)) 1089 goto err_ftrace; 1090 } 1091 1092 (*cnt)++; 1093 return ret; 1094 1095 err_ftrace: 1096 /* 1097 * At this point, sinec ops is not registered, we should be sefe from 1098 * registering empty filter. 1099 */ 1100 ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0); 1101 return ret; 1102 } 1103 1104 static int arm_kprobe_ftrace(struct kprobe *p) 1105 { 1106 bool ipmodify = (p->post_handler != NULL); 1107 1108 return __arm_kprobe_ftrace(p, 1109 ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops, 1110 ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled); 1111 } 1112 1113 static int __disarm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops, 1114 int *cnt) 1115 { 1116 int ret = 0; 1117 1118 lockdep_assert_held(&kprobe_mutex); 1119 1120 if (*cnt == 1) { 1121 ret = unregister_ftrace_function(ops); 1122 if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (error %d)\n", ret)) 1123 return ret; 1124 } 1125 1126 (*cnt)--; 1127 1128 ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0); 1129 WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (error %d)\n", 1130 p->addr, ret); 1131 return ret; 1132 } 1133 1134 static int disarm_kprobe_ftrace(struct kprobe *p) 1135 { 1136 bool ipmodify = (p->post_handler != NULL); 1137 1138 return __disarm_kprobe_ftrace(p, 1139 ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops, 1140 ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled); 1141 } 1142 #else /* !CONFIG_KPROBES_ON_FTRACE */ 1143 static inline int arm_kprobe_ftrace(struct kprobe *p) 1144 { 1145 return -ENODEV; 1146 } 1147 1148 static inline int disarm_kprobe_ftrace(struct kprobe *p) 1149 { 1150 return -ENODEV; 1151 } 1152 #endif 1153 1154 static int prepare_kprobe(struct kprobe *p) 1155 { 1156 /* Must ensure p->addr is really on ftrace */ 1157 if (kprobe_ftrace(p)) 1158 return arch_prepare_kprobe_ftrace(p); 1159 1160 return arch_prepare_kprobe(p); 1161 } 1162 1163 static int arm_kprobe(struct kprobe *kp) 1164 { 1165 if (unlikely(kprobe_ftrace(kp))) 1166 return arm_kprobe_ftrace(kp); 1167 1168 cpus_read_lock(); 1169 mutex_lock(&text_mutex); 1170 __arm_kprobe(kp); 1171 mutex_unlock(&text_mutex); 1172 cpus_read_unlock(); 1173 1174 return 0; 1175 } 1176 1177 static int disarm_kprobe(struct kprobe *kp, bool reopt) 1178 { 1179 if (unlikely(kprobe_ftrace(kp))) 1180 return disarm_kprobe_ftrace(kp); 1181 1182 cpus_read_lock(); 1183 mutex_lock(&text_mutex); 1184 __disarm_kprobe(kp, reopt); 1185 mutex_unlock(&text_mutex); 1186 cpus_read_unlock(); 1187 1188 return 0; 1189 } 1190 1191 /* 1192 * Aggregate handlers for multiple kprobes support - these handlers 1193 * take care of invoking the individual kprobe handlers on p->list 1194 */ 1195 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) 1196 { 1197 struct kprobe *kp; 1198 1199 list_for_each_entry_rcu(kp, &p->list, list) { 1200 if (kp->pre_handler && likely(!kprobe_disabled(kp))) { 1201 set_kprobe_instance(kp); 1202 if (kp->pre_handler(kp, regs)) 1203 return 1; 1204 } 1205 reset_kprobe_instance(); 1206 } 1207 return 0; 1208 } 1209 NOKPROBE_SYMBOL(aggr_pre_handler); 1210 1211 static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs, 1212 unsigned long flags) 1213 { 1214 struct kprobe *kp; 1215 1216 list_for_each_entry_rcu(kp, &p->list, list) { 1217 if (kp->post_handler && likely(!kprobe_disabled(kp))) { 1218 set_kprobe_instance(kp); 1219 kp->post_handler(kp, regs, flags); 1220 reset_kprobe_instance(); 1221 } 1222 } 1223 } 1224 NOKPROBE_SYMBOL(aggr_post_handler); 1225 1226 /* Walks the list and increments 'nmissed' if 'p' has child probes. */ 1227 void kprobes_inc_nmissed_count(struct kprobe *p) 1228 { 1229 struct kprobe *kp; 1230 1231 if (!kprobe_aggrprobe(p)) { 1232 p->nmissed++; 1233 } else { 1234 list_for_each_entry_rcu(kp, &p->list, list) 1235 kp->nmissed++; 1236 } 1237 } 1238 NOKPROBE_SYMBOL(kprobes_inc_nmissed_count); 1239 1240 static struct kprobe kprobe_busy = { 1241 .addr = (void *) get_kprobe, 1242 }; 1243 1244 void kprobe_busy_begin(void) 1245 { 1246 struct kprobe_ctlblk *kcb; 1247 1248 preempt_disable(); 1249 __this_cpu_write(current_kprobe, &kprobe_busy); 1250 kcb = get_kprobe_ctlblk(); 1251 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 1252 } 1253 1254 void kprobe_busy_end(void) 1255 { 1256 __this_cpu_write(current_kprobe, NULL); 1257 preempt_enable(); 1258 } 1259 1260 #if !defined(CONFIG_KRETPROBE_ON_RETHOOK) 1261 static void free_rp_inst_rcu(struct rcu_head *head) 1262 { 1263 struct kretprobe_instance *ri = container_of(head, struct kretprobe_instance, rcu); 1264 1265 if (refcount_dec_and_test(&ri->rph->ref)) 1266 kfree(ri->rph); 1267 kfree(ri); 1268 } 1269 NOKPROBE_SYMBOL(free_rp_inst_rcu); 1270 1271 static void recycle_rp_inst(struct kretprobe_instance *ri) 1272 { 1273 struct kretprobe *rp = get_kretprobe(ri); 1274 1275 if (likely(rp)) 1276 freelist_add(&ri->freelist, &rp->freelist); 1277 else 1278 call_rcu(&ri->rcu, free_rp_inst_rcu); 1279 } 1280 NOKPROBE_SYMBOL(recycle_rp_inst); 1281 1282 /* 1283 * This function is called from delayed_put_task_struct() when a task is 1284 * dead and cleaned up to recycle any kretprobe instances associated with 1285 * this task. These left over instances represent probed functions that 1286 * have been called but will never return. 1287 */ 1288 void kprobe_flush_task(struct task_struct *tk) 1289 { 1290 struct kretprobe_instance *ri; 1291 struct llist_node *node; 1292 1293 /* Early boot, not yet initialized. */ 1294 if (unlikely(!kprobes_initialized)) 1295 return; 1296 1297 kprobe_busy_begin(); 1298 1299 node = __llist_del_all(&tk->kretprobe_instances); 1300 while (node) { 1301 ri = container_of(node, struct kretprobe_instance, llist); 1302 node = node->next; 1303 1304 recycle_rp_inst(ri); 1305 } 1306 1307 kprobe_busy_end(); 1308 } 1309 NOKPROBE_SYMBOL(kprobe_flush_task); 1310 1311 static inline void free_rp_inst(struct kretprobe *rp) 1312 { 1313 struct kretprobe_instance *ri; 1314 struct freelist_node *node; 1315 int count = 0; 1316 1317 node = rp->freelist.head; 1318 while (node) { 1319 ri = container_of(node, struct kretprobe_instance, freelist); 1320 node = node->next; 1321 1322 kfree(ri); 1323 count++; 1324 } 1325 1326 if (refcount_sub_and_test(count, &rp->rph->ref)) { 1327 kfree(rp->rph); 1328 rp->rph = NULL; 1329 } 1330 } 1331 #endif /* !CONFIG_KRETPROBE_ON_RETHOOK */ 1332 1333 /* Add the new probe to 'ap->list'. */ 1334 static int add_new_kprobe(struct kprobe *ap, struct kprobe *p) 1335 { 1336 if (p->post_handler) 1337 unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */ 1338 1339 list_add_rcu(&p->list, &ap->list); 1340 if (p->post_handler && !ap->post_handler) 1341 ap->post_handler = aggr_post_handler; 1342 1343 return 0; 1344 } 1345 1346 /* 1347 * Fill in the required fields of the aggregator kprobe. Replace the 1348 * earlier kprobe in the hlist with the aggregator kprobe. 1349 */ 1350 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p) 1351 { 1352 /* Copy the insn slot of 'p' to 'ap'. */ 1353 copy_kprobe(p, ap); 1354 flush_insn_slot(ap); 1355 ap->addr = p->addr; 1356 ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED; 1357 ap->pre_handler = aggr_pre_handler; 1358 /* We don't care the kprobe which has gone. */ 1359 if (p->post_handler && !kprobe_gone(p)) 1360 ap->post_handler = aggr_post_handler; 1361 1362 INIT_LIST_HEAD(&ap->list); 1363 INIT_HLIST_NODE(&ap->hlist); 1364 1365 list_add_rcu(&p->list, &ap->list); 1366 hlist_replace_rcu(&p->hlist, &ap->hlist); 1367 } 1368 1369 /* 1370 * This registers the second or subsequent kprobe at the same address. 1371 */ 1372 static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p) 1373 { 1374 int ret = 0; 1375 struct kprobe *ap = orig_p; 1376 1377 cpus_read_lock(); 1378 1379 /* For preparing optimization, jump_label_text_reserved() is called */ 1380 jump_label_lock(); 1381 mutex_lock(&text_mutex); 1382 1383 if (!kprobe_aggrprobe(orig_p)) { 1384 /* If 'orig_p' is not an 'aggr_kprobe', create new one. */ 1385 ap = alloc_aggr_kprobe(orig_p); 1386 if (!ap) { 1387 ret = -ENOMEM; 1388 goto out; 1389 } 1390 init_aggr_kprobe(ap, orig_p); 1391 } else if (kprobe_unused(ap)) { 1392 /* This probe is going to die. Rescue it */ 1393 ret = reuse_unused_kprobe(ap); 1394 if (ret) 1395 goto out; 1396 } 1397 1398 if (kprobe_gone(ap)) { 1399 /* 1400 * Attempting to insert new probe at the same location that 1401 * had a probe in the module vaddr area which already 1402 * freed. So, the instruction slot has already been 1403 * released. We need a new slot for the new probe. 1404 */ 1405 ret = arch_prepare_kprobe(ap); 1406 if (ret) 1407 /* 1408 * Even if fail to allocate new slot, don't need to 1409 * free the 'ap'. It will be used next time, or 1410 * freed by unregister_kprobe(). 1411 */ 1412 goto out; 1413 1414 /* Prepare optimized instructions if possible. */ 1415 prepare_optimized_kprobe(ap); 1416 1417 /* 1418 * Clear gone flag to prevent allocating new slot again, and 1419 * set disabled flag because it is not armed yet. 1420 */ 1421 ap->flags = (ap->flags & ~KPROBE_FLAG_GONE) 1422 | KPROBE_FLAG_DISABLED; 1423 } 1424 1425 /* Copy the insn slot of 'p' to 'ap'. */ 1426 copy_kprobe(ap, p); 1427 ret = add_new_kprobe(ap, p); 1428 1429 out: 1430 mutex_unlock(&text_mutex); 1431 jump_label_unlock(); 1432 cpus_read_unlock(); 1433 1434 if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) { 1435 ap->flags &= ~KPROBE_FLAG_DISABLED; 1436 if (!kprobes_all_disarmed) { 1437 /* Arm the breakpoint again. */ 1438 ret = arm_kprobe(ap); 1439 if (ret) { 1440 ap->flags |= KPROBE_FLAG_DISABLED; 1441 list_del_rcu(&p->list); 1442 synchronize_rcu(); 1443 } 1444 } 1445 } 1446 return ret; 1447 } 1448 1449 bool __weak arch_within_kprobe_blacklist(unsigned long addr) 1450 { 1451 /* The '__kprobes' functions and entry code must not be probed. */ 1452 return addr >= (unsigned long)__kprobes_text_start && 1453 addr < (unsigned long)__kprobes_text_end; 1454 } 1455 1456 static bool __within_kprobe_blacklist(unsigned long addr) 1457 { 1458 struct kprobe_blacklist_entry *ent; 1459 1460 if (arch_within_kprobe_blacklist(addr)) 1461 return true; 1462 /* 1463 * If 'kprobe_blacklist' is defined, check the address and 1464 * reject any probe registration in the prohibited area. 1465 */ 1466 list_for_each_entry(ent, &kprobe_blacklist, list) { 1467 if (addr >= ent->start_addr && addr < ent->end_addr) 1468 return true; 1469 } 1470 return false; 1471 } 1472 1473 bool within_kprobe_blacklist(unsigned long addr) 1474 { 1475 char symname[KSYM_NAME_LEN], *p; 1476 1477 if (__within_kprobe_blacklist(addr)) 1478 return true; 1479 1480 /* Check if the address is on a suffixed-symbol */ 1481 if (!lookup_symbol_name(addr, symname)) { 1482 p = strchr(symname, '.'); 1483 if (!p) 1484 return false; 1485 *p = '\0'; 1486 addr = (unsigned long)kprobe_lookup_name(symname, 0); 1487 if (addr) 1488 return __within_kprobe_blacklist(addr); 1489 } 1490 return false; 1491 } 1492 1493 /* 1494 * arch_adjust_kprobe_addr - adjust the address 1495 * @addr: symbol base address 1496 * @offset: offset within the symbol 1497 * @on_func_entry: was this @addr+@offset on the function entry 1498 * 1499 * Typically returns @addr + @offset, except for special cases where the 1500 * function might be prefixed by a CFI landing pad, in that case any offset 1501 * inside the landing pad is mapped to the first 'real' instruction of the 1502 * symbol. 1503 * 1504 * Specifically, for things like IBT/BTI, skip the resp. ENDBR/BTI.C 1505 * instruction at +0. 1506 */ 1507 kprobe_opcode_t *__weak arch_adjust_kprobe_addr(unsigned long addr, 1508 unsigned long offset, 1509 bool *on_func_entry) 1510 { 1511 *on_func_entry = !offset; 1512 return (kprobe_opcode_t *)(addr + offset); 1513 } 1514 1515 /* 1516 * If 'symbol_name' is specified, look it up and add the 'offset' 1517 * to it. This way, we can specify a relative address to a symbol. 1518 * This returns encoded errors if it fails to look up symbol or invalid 1519 * combination of parameters. 1520 */ 1521 static kprobe_opcode_t * 1522 _kprobe_addr(kprobe_opcode_t *addr, const char *symbol_name, 1523 unsigned long offset, bool *on_func_entry) 1524 { 1525 if ((symbol_name && addr) || (!symbol_name && !addr)) 1526 goto invalid; 1527 1528 if (symbol_name) { 1529 /* 1530 * Input: @sym + @offset 1531 * Output: @addr + @offset 1532 * 1533 * NOTE: kprobe_lookup_name() does *NOT* fold the offset 1534 * argument into it's output! 1535 */ 1536 addr = kprobe_lookup_name(symbol_name, offset); 1537 if (!addr) 1538 return ERR_PTR(-ENOENT); 1539 } 1540 1541 /* 1542 * So here we have @addr + @offset, displace it into a new 1543 * @addr' + @offset' where @addr' is the symbol start address. 1544 */ 1545 addr = (void *)addr + offset; 1546 if (!kallsyms_lookup_size_offset((unsigned long)addr, NULL, &offset)) 1547 return ERR_PTR(-ENOENT); 1548 addr = (void *)addr - offset; 1549 1550 /* 1551 * Then ask the architecture to re-combine them, taking care of 1552 * magical function entry details while telling us if this was indeed 1553 * at the start of the function. 1554 */ 1555 addr = arch_adjust_kprobe_addr((unsigned long)addr, offset, on_func_entry); 1556 if (addr) 1557 return addr; 1558 1559 invalid: 1560 return ERR_PTR(-EINVAL); 1561 } 1562 1563 static kprobe_opcode_t *kprobe_addr(struct kprobe *p) 1564 { 1565 bool on_func_entry; 1566 return _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry); 1567 } 1568 1569 /* 1570 * Check the 'p' is valid and return the aggregator kprobe 1571 * at the same address. 1572 */ 1573 static struct kprobe *__get_valid_kprobe(struct kprobe *p) 1574 { 1575 struct kprobe *ap, *list_p; 1576 1577 lockdep_assert_held(&kprobe_mutex); 1578 1579 ap = get_kprobe(p->addr); 1580 if (unlikely(!ap)) 1581 return NULL; 1582 1583 if (p != ap) { 1584 list_for_each_entry(list_p, &ap->list, list) 1585 if (list_p == p) 1586 /* kprobe p is a valid probe */ 1587 goto valid; 1588 return NULL; 1589 } 1590 valid: 1591 return ap; 1592 } 1593 1594 /* 1595 * Warn and return error if the kprobe is being re-registered since 1596 * there must be a software bug. 1597 */ 1598 static inline int warn_kprobe_rereg(struct kprobe *p) 1599 { 1600 int ret = 0; 1601 1602 mutex_lock(&kprobe_mutex); 1603 if (WARN_ON_ONCE(__get_valid_kprobe(p))) 1604 ret = -EINVAL; 1605 mutex_unlock(&kprobe_mutex); 1606 1607 return ret; 1608 } 1609 1610 static int check_ftrace_location(struct kprobe *p) 1611 { 1612 unsigned long addr = (unsigned long)p->addr; 1613 1614 if (ftrace_location(addr) == addr) { 1615 #ifdef CONFIG_KPROBES_ON_FTRACE 1616 p->flags |= KPROBE_FLAG_FTRACE; 1617 #else /* !CONFIG_KPROBES_ON_FTRACE */ 1618 return -EINVAL; 1619 #endif 1620 } 1621 return 0; 1622 } 1623 1624 static int check_kprobe_address_safe(struct kprobe *p, 1625 struct module **probed_mod) 1626 { 1627 int ret; 1628 1629 ret = check_ftrace_location(p); 1630 if (ret) 1631 return ret; 1632 jump_label_lock(); 1633 preempt_disable(); 1634 1635 /* Ensure it is not in reserved area nor out of text */ 1636 if (!kernel_text_address((unsigned long) p->addr) || 1637 within_kprobe_blacklist((unsigned long) p->addr) || 1638 jump_label_text_reserved(p->addr, p->addr) || 1639 static_call_text_reserved(p->addr, p->addr) || 1640 find_bug((unsigned long)p->addr)) { 1641 ret = -EINVAL; 1642 goto out; 1643 } 1644 1645 /* Check if 'p' is probing a module. */ 1646 *probed_mod = __module_text_address((unsigned long) p->addr); 1647 if (*probed_mod) { 1648 /* 1649 * We must hold a refcount of the probed module while updating 1650 * its code to prohibit unexpected unloading. 1651 */ 1652 if (unlikely(!try_module_get(*probed_mod))) { 1653 ret = -ENOENT; 1654 goto out; 1655 } 1656 1657 /* 1658 * If the module freed '.init.text', we couldn't insert 1659 * kprobes in there. 1660 */ 1661 if (within_module_init((unsigned long)p->addr, *probed_mod) && 1662 (*probed_mod)->state != MODULE_STATE_COMING) { 1663 module_put(*probed_mod); 1664 *probed_mod = NULL; 1665 ret = -ENOENT; 1666 } 1667 } 1668 out: 1669 preempt_enable(); 1670 jump_label_unlock(); 1671 1672 return ret; 1673 } 1674 1675 int register_kprobe(struct kprobe *p) 1676 { 1677 int ret; 1678 struct kprobe *old_p; 1679 struct module *probed_mod; 1680 kprobe_opcode_t *addr; 1681 1682 /* Adjust probe address from symbol */ 1683 addr = kprobe_addr(p); 1684 if (IS_ERR(addr)) 1685 return PTR_ERR(addr); 1686 p->addr = addr; 1687 1688 ret = warn_kprobe_rereg(p); 1689 if (ret) 1690 return ret; 1691 1692 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ 1693 p->flags &= KPROBE_FLAG_DISABLED; 1694 p->nmissed = 0; 1695 INIT_LIST_HEAD(&p->list); 1696 1697 ret = check_kprobe_address_safe(p, &probed_mod); 1698 if (ret) 1699 return ret; 1700 1701 mutex_lock(&kprobe_mutex); 1702 1703 old_p = get_kprobe(p->addr); 1704 if (old_p) { 1705 /* Since this may unoptimize 'old_p', locking 'text_mutex'. */ 1706 ret = register_aggr_kprobe(old_p, p); 1707 goto out; 1708 } 1709 1710 cpus_read_lock(); 1711 /* Prevent text modification */ 1712 mutex_lock(&text_mutex); 1713 ret = prepare_kprobe(p); 1714 mutex_unlock(&text_mutex); 1715 cpus_read_unlock(); 1716 if (ret) 1717 goto out; 1718 1719 INIT_HLIST_NODE(&p->hlist); 1720 hlist_add_head_rcu(&p->hlist, 1721 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); 1722 1723 if (!kprobes_all_disarmed && !kprobe_disabled(p)) { 1724 ret = arm_kprobe(p); 1725 if (ret) { 1726 hlist_del_rcu(&p->hlist); 1727 synchronize_rcu(); 1728 goto out; 1729 } 1730 } 1731 1732 /* Try to optimize kprobe */ 1733 try_to_optimize_kprobe(p); 1734 out: 1735 mutex_unlock(&kprobe_mutex); 1736 1737 if (probed_mod) 1738 module_put(probed_mod); 1739 1740 return ret; 1741 } 1742 EXPORT_SYMBOL_GPL(register_kprobe); 1743 1744 /* Check if all probes on the 'ap' are disabled. */ 1745 static bool aggr_kprobe_disabled(struct kprobe *ap) 1746 { 1747 struct kprobe *kp; 1748 1749 lockdep_assert_held(&kprobe_mutex); 1750 1751 list_for_each_entry(kp, &ap->list, list) 1752 if (!kprobe_disabled(kp)) 1753 /* 1754 * Since there is an active probe on the list, 1755 * we can't disable this 'ap'. 1756 */ 1757 return false; 1758 1759 return true; 1760 } 1761 1762 static struct kprobe *__disable_kprobe(struct kprobe *p) 1763 { 1764 struct kprobe *orig_p; 1765 int ret; 1766 1767 lockdep_assert_held(&kprobe_mutex); 1768 1769 /* Get an original kprobe for return */ 1770 orig_p = __get_valid_kprobe(p); 1771 if (unlikely(orig_p == NULL)) 1772 return ERR_PTR(-EINVAL); 1773 1774 if (!kprobe_disabled(p)) { 1775 /* Disable probe if it is a child probe */ 1776 if (p != orig_p) 1777 p->flags |= KPROBE_FLAG_DISABLED; 1778 1779 /* Try to disarm and disable this/parent probe */ 1780 if (p == orig_p || aggr_kprobe_disabled(orig_p)) { 1781 /* 1782 * If 'kprobes_all_disarmed' is set, 'orig_p' 1783 * should have already been disarmed, so 1784 * skip unneed disarming process. 1785 */ 1786 if (!kprobes_all_disarmed) { 1787 ret = disarm_kprobe(orig_p, true); 1788 if (ret) { 1789 p->flags &= ~KPROBE_FLAG_DISABLED; 1790 return ERR_PTR(ret); 1791 } 1792 } 1793 orig_p->flags |= KPROBE_FLAG_DISABLED; 1794 } 1795 } 1796 1797 return orig_p; 1798 } 1799 1800 /* 1801 * Unregister a kprobe without a scheduler synchronization. 1802 */ 1803 static int __unregister_kprobe_top(struct kprobe *p) 1804 { 1805 struct kprobe *ap, *list_p; 1806 1807 /* Disable kprobe. This will disarm it if needed. */ 1808 ap = __disable_kprobe(p); 1809 if (IS_ERR(ap)) 1810 return PTR_ERR(ap); 1811 1812 if (ap == p) 1813 /* 1814 * This probe is an independent(and non-optimized) kprobe 1815 * (not an aggrprobe). Remove from the hash list. 1816 */ 1817 goto disarmed; 1818 1819 /* Following process expects this probe is an aggrprobe */ 1820 WARN_ON(!kprobe_aggrprobe(ap)); 1821 1822 if (list_is_singular(&ap->list) && kprobe_disarmed(ap)) 1823 /* 1824 * !disarmed could be happen if the probe is under delayed 1825 * unoptimizing. 1826 */ 1827 goto disarmed; 1828 else { 1829 /* If disabling probe has special handlers, update aggrprobe */ 1830 if (p->post_handler && !kprobe_gone(p)) { 1831 list_for_each_entry(list_p, &ap->list, list) { 1832 if ((list_p != p) && (list_p->post_handler)) 1833 goto noclean; 1834 } 1835 ap->post_handler = NULL; 1836 } 1837 noclean: 1838 /* 1839 * Remove from the aggrprobe: this path will do nothing in 1840 * __unregister_kprobe_bottom(). 1841 */ 1842 list_del_rcu(&p->list); 1843 if (!kprobe_disabled(ap) && !kprobes_all_disarmed) 1844 /* 1845 * Try to optimize this probe again, because post 1846 * handler may have been changed. 1847 */ 1848 optimize_kprobe(ap); 1849 } 1850 return 0; 1851 1852 disarmed: 1853 hlist_del_rcu(&ap->hlist); 1854 return 0; 1855 } 1856 1857 static void __unregister_kprobe_bottom(struct kprobe *p) 1858 { 1859 struct kprobe *ap; 1860 1861 if (list_empty(&p->list)) 1862 /* This is an independent kprobe */ 1863 arch_remove_kprobe(p); 1864 else if (list_is_singular(&p->list)) { 1865 /* This is the last child of an aggrprobe */ 1866 ap = list_entry(p->list.next, struct kprobe, list); 1867 list_del(&p->list); 1868 free_aggr_kprobe(ap); 1869 } 1870 /* Otherwise, do nothing. */ 1871 } 1872 1873 int register_kprobes(struct kprobe **kps, int num) 1874 { 1875 int i, ret = 0; 1876 1877 if (num <= 0) 1878 return -EINVAL; 1879 for (i = 0; i < num; i++) { 1880 ret = register_kprobe(kps[i]); 1881 if (ret < 0) { 1882 if (i > 0) 1883 unregister_kprobes(kps, i); 1884 break; 1885 } 1886 } 1887 return ret; 1888 } 1889 EXPORT_SYMBOL_GPL(register_kprobes); 1890 1891 void unregister_kprobe(struct kprobe *p) 1892 { 1893 unregister_kprobes(&p, 1); 1894 } 1895 EXPORT_SYMBOL_GPL(unregister_kprobe); 1896 1897 void unregister_kprobes(struct kprobe **kps, int num) 1898 { 1899 int i; 1900 1901 if (num <= 0) 1902 return; 1903 mutex_lock(&kprobe_mutex); 1904 for (i = 0; i < num; i++) 1905 if (__unregister_kprobe_top(kps[i]) < 0) 1906 kps[i]->addr = NULL; 1907 mutex_unlock(&kprobe_mutex); 1908 1909 synchronize_rcu(); 1910 for (i = 0; i < num; i++) 1911 if (kps[i]->addr) 1912 __unregister_kprobe_bottom(kps[i]); 1913 } 1914 EXPORT_SYMBOL_GPL(unregister_kprobes); 1915 1916 int __weak kprobe_exceptions_notify(struct notifier_block *self, 1917 unsigned long val, void *data) 1918 { 1919 return NOTIFY_DONE; 1920 } 1921 NOKPROBE_SYMBOL(kprobe_exceptions_notify); 1922 1923 static struct notifier_block kprobe_exceptions_nb = { 1924 .notifier_call = kprobe_exceptions_notify, 1925 .priority = 0x7fffffff /* we need to be notified first */ 1926 }; 1927 1928 #ifdef CONFIG_KRETPROBES 1929 1930 #if !defined(CONFIG_KRETPROBE_ON_RETHOOK) 1931 /* This assumes the 'tsk' is the current task or the is not running. */ 1932 static kprobe_opcode_t *__kretprobe_find_ret_addr(struct task_struct *tsk, 1933 struct llist_node **cur) 1934 { 1935 struct kretprobe_instance *ri = NULL; 1936 struct llist_node *node = *cur; 1937 1938 if (!node) 1939 node = tsk->kretprobe_instances.first; 1940 else 1941 node = node->next; 1942 1943 while (node) { 1944 ri = container_of(node, struct kretprobe_instance, llist); 1945 if (ri->ret_addr != kretprobe_trampoline_addr()) { 1946 *cur = node; 1947 return ri->ret_addr; 1948 } 1949 node = node->next; 1950 } 1951 return NULL; 1952 } 1953 NOKPROBE_SYMBOL(__kretprobe_find_ret_addr); 1954 1955 /** 1956 * kretprobe_find_ret_addr -- Find correct return address modified by kretprobe 1957 * @tsk: Target task 1958 * @fp: A frame pointer 1959 * @cur: a storage of the loop cursor llist_node pointer for next call 1960 * 1961 * Find the correct return address modified by a kretprobe on @tsk in unsigned 1962 * long type. If it finds the return address, this returns that address value, 1963 * or this returns 0. 1964 * The @tsk must be 'current' or a task which is not running. @fp is a hint 1965 * to get the currect return address - which is compared with the 1966 * kretprobe_instance::fp field. The @cur is a loop cursor for searching the 1967 * kretprobe return addresses on the @tsk. The '*@cur' should be NULL at the 1968 * first call, but '@cur' itself must NOT NULL. 1969 */ 1970 unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp, 1971 struct llist_node **cur) 1972 { 1973 struct kretprobe_instance *ri = NULL; 1974 kprobe_opcode_t *ret; 1975 1976 if (WARN_ON_ONCE(!cur)) 1977 return 0; 1978 1979 do { 1980 ret = __kretprobe_find_ret_addr(tsk, cur); 1981 if (!ret) 1982 break; 1983 ri = container_of(*cur, struct kretprobe_instance, llist); 1984 } while (ri->fp != fp); 1985 1986 return (unsigned long)ret; 1987 } 1988 NOKPROBE_SYMBOL(kretprobe_find_ret_addr); 1989 1990 void __weak arch_kretprobe_fixup_return(struct pt_regs *regs, 1991 kprobe_opcode_t *correct_ret_addr) 1992 { 1993 /* 1994 * Do nothing by default. Please fill this to update the fake return 1995 * address on the stack with the correct one on each arch if possible. 1996 */ 1997 } 1998 1999 unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs, 2000 void *frame_pointer) 2001 { 2002 kprobe_opcode_t *correct_ret_addr = NULL; 2003 struct kretprobe_instance *ri = NULL; 2004 struct llist_node *first, *node = NULL; 2005 struct kretprobe *rp; 2006 2007 /* Find correct address and all nodes for this frame. */ 2008 correct_ret_addr = __kretprobe_find_ret_addr(current, &node); 2009 if (!correct_ret_addr) { 2010 pr_err("kretprobe: Return address not found, not execute handler. Maybe there is a bug in the kernel.\n"); 2011 BUG_ON(1); 2012 } 2013 2014 /* 2015 * Set the return address as the instruction pointer, because if the 2016 * user handler calls stack_trace_save_regs() with this 'regs', 2017 * the stack trace will start from the instruction pointer. 2018 */ 2019 instruction_pointer_set(regs, (unsigned long)correct_ret_addr); 2020 2021 /* Run the user handler of the nodes. */ 2022 first = current->kretprobe_instances.first; 2023 while (first) { 2024 ri = container_of(first, struct kretprobe_instance, llist); 2025 2026 if (WARN_ON_ONCE(ri->fp != frame_pointer)) 2027 break; 2028 2029 rp = get_kretprobe(ri); 2030 if (rp && rp->handler) { 2031 struct kprobe *prev = kprobe_running(); 2032 2033 __this_cpu_write(current_kprobe, &rp->kp); 2034 ri->ret_addr = correct_ret_addr; 2035 rp->handler(ri, regs); 2036 __this_cpu_write(current_kprobe, prev); 2037 } 2038 if (first == node) 2039 break; 2040 2041 first = first->next; 2042 } 2043 2044 arch_kretprobe_fixup_return(regs, correct_ret_addr); 2045 2046 /* Unlink all nodes for this frame. */ 2047 first = current->kretprobe_instances.first; 2048 current->kretprobe_instances.first = node->next; 2049 node->next = NULL; 2050 2051 /* Recycle free instances. */ 2052 while (first) { 2053 ri = container_of(first, struct kretprobe_instance, llist); 2054 first = first->next; 2055 2056 recycle_rp_inst(ri); 2057 } 2058 2059 return (unsigned long)correct_ret_addr; 2060 } 2061 NOKPROBE_SYMBOL(__kretprobe_trampoline_handler) 2062 2063 /* 2064 * This kprobe pre_handler is registered with every kretprobe. When probe 2065 * hits it will set up the return probe. 2066 */ 2067 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) 2068 { 2069 struct kretprobe *rp = container_of(p, struct kretprobe, kp); 2070 struct kretprobe_instance *ri; 2071 struct freelist_node *fn; 2072 2073 fn = freelist_try_get(&rp->freelist); 2074 if (!fn) { 2075 rp->nmissed++; 2076 return 0; 2077 } 2078 2079 ri = container_of(fn, struct kretprobe_instance, freelist); 2080 2081 if (rp->entry_handler && rp->entry_handler(ri, regs)) { 2082 freelist_add(&ri->freelist, &rp->freelist); 2083 return 0; 2084 } 2085 2086 arch_prepare_kretprobe(ri, regs); 2087 2088 __llist_add(&ri->llist, ¤t->kretprobe_instances); 2089 2090 return 0; 2091 } 2092 NOKPROBE_SYMBOL(pre_handler_kretprobe); 2093 #else /* CONFIG_KRETPROBE_ON_RETHOOK */ 2094 /* 2095 * This kprobe pre_handler is registered with every kretprobe. When probe 2096 * hits it will set up the return probe. 2097 */ 2098 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) 2099 { 2100 struct kretprobe *rp = container_of(p, struct kretprobe, kp); 2101 struct kretprobe_instance *ri; 2102 struct rethook_node *rhn; 2103 2104 rhn = rethook_try_get(rp->rh); 2105 if (!rhn) { 2106 rp->nmissed++; 2107 return 0; 2108 } 2109 2110 ri = container_of(rhn, struct kretprobe_instance, node); 2111 2112 if (rp->entry_handler && rp->entry_handler(ri, regs)) 2113 rethook_recycle(rhn); 2114 else 2115 rethook_hook(rhn, regs, kprobe_ftrace(p)); 2116 2117 return 0; 2118 } 2119 NOKPROBE_SYMBOL(pre_handler_kretprobe); 2120 2121 static void kretprobe_rethook_handler(struct rethook_node *rh, void *data, 2122 struct pt_regs *regs) 2123 { 2124 struct kretprobe *rp = (struct kretprobe *)data; 2125 struct kretprobe_instance *ri; 2126 struct kprobe_ctlblk *kcb; 2127 2128 /* The data must NOT be null. This means rethook data structure is broken. */ 2129 if (WARN_ON_ONCE(!data)) 2130 return; 2131 2132 __this_cpu_write(current_kprobe, &rp->kp); 2133 kcb = get_kprobe_ctlblk(); 2134 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 2135 2136 ri = container_of(rh, struct kretprobe_instance, node); 2137 rp->handler(ri, regs); 2138 2139 __this_cpu_write(current_kprobe, NULL); 2140 } 2141 NOKPROBE_SYMBOL(kretprobe_rethook_handler); 2142 2143 #endif /* !CONFIG_KRETPROBE_ON_RETHOOK */ 2144 2145 /** 2146 * kprobe_on_func_entry() -- check whether given address is function entry 2147 * @addr: Target address 2148 * @sym: Target symbol name 2149 * @offset: The offset from the symbol or the address 2150 * 2151 * This checks whether the given @addr+@offset or @sym+@offset is on the 2152 * function entry address or not. 2153 * This returns 0 if it is the function entry, or -EINVAL if it is not. 2154 * And also it returns -ENOENT if it fails the symbol or address lookup. 2155 * Caller must pass @addr or @sym (either one must be NULL), or this 2156 * returns -EINVAL. 2157 */ 2158 int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset) 2159 { 2160 bool on_func_entry; 2161 kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset, &on_func_entry); 2162 2163 if (IS_ERR(kp_addr)) 2164 return PTR_ERR(kp_addr); 2165 2166 if (!on_func_entry) 2167 return -EINVAL; 2168 2169 return 0; 2170 } 2171 2172 int register_kretprobe(struct kretprobe *rp) 2173 { 2174 int ret; 2175 struct kretprobe_instance *inst; 2176 int i; 2177 void *addr; 2178 2179 ret = kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset); 2180 if (ret) 2181 return ret; 2182 2183 /* If only 'rp->kp.addr' is specified, check reregistering kprobes */ 2184 if (rp->kp.addr && warn_kprobe_rereg(&rp->kp)) 2185 return -EINVAL; 2186 2187 if (kretprobe_blacklist_size) { 2188 addr = kprobe_addr(&rp->kp); 2189 if (IS_ERR(addr)) 2190 return PTR_ERR(addr); 2191 2192 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { 2193 if (kretprobe_blacklist[i].addr == addr) 2194 return -EINVAL; 2195 } 2196 } 2197 2198 if (rp->data_size > KRETPROBE_MAX_DATA_SIZE) 2199 return -E2BIG; 2200 2201 rp->kp.pre_handler = pre_handler_kretprobe; 2202 rp->kp.post_handler = NULL; 2203 2204 /* Pre-allocate memory for max kretprobe instances */ 2205 if (rp->maxactive <= 0) { 2206 #ifdef CONFIG_PREEMPTION 2207 rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus()); 2208 #else 2209 rp->maxactive = num_possible_cpus(); 2210 #endif 2211 } 2212 #ifdef CONFIG_KRETPROBE_ON_RETHOOK 2213 rp->rh = rethook_alloc((void *)rp, kretprobe_rethook_handler); 2214 if (!rp->rh) 2215 return -ENOMEM; 2216 2217 for (i = 0; i < rp->maxactive; i++) { 2218 inst = kzalloc(sizeof(struct kretprobe_instance) + 2219 rp->data_size, GFP_KERNEL); 2220 if (inst == NULL) { 2221 rethook_free(rp->rh); 2222 rp->rh = NULL; 2223 return -ENOMEM; 2224 } 2225 rethook_add_node(rp->rh, &inst->node); 2226 } 2227 rp->nmissed = 0; 2228 /* Establish function entry probe point */ 2229 ret = register_kprobe(&rp->kp); 2230 if (ret != 0) { 2231 rethook_free(rp->rh); 2232 rp->rh = NULL; 2233 } 2234 #else /* !CONFIG_KRETPROBE_ON_RETHOOK */ 2235 rp->freelist.head = NULL; 2236 rp->rph = kzalloc(sizeof(struct kretprobe_holder), GFP_KERNEL); 2237 if (!rp->rph) 2238 return -ENOMEM; 2239 2240 rp->rph->rp = rp; 2241 for (i = 0; i < rp->maxactive; i++) { 2242 inst = kzalloc(sizeof(struct kretprobe_instance) + 2243 rp->data_size, GFP_KERNEL); 2244 if (inst == NULL) { 2245 refcount_set(&rp->rph->ref, i); 2246 free_rp_inst(rp); 2247 return -ENOMEM; 2248 } 2249 inst->rph = rp->rph; 2250 freelist_add(&inst->freelist, &rp->freelist); 2251 } 2252 refcount_set(&rp->rph->ref, i); 2253 2254 rp->nmissed = 0; 2255 /* Establish function entry probe point */ 2256 ret = register_kprobe(&rp->kp); 2257 if (ret != 0) 2258 free_rp_inst(rp); 2259 #endif 2260 return ret; 2261 } 2262 EXPORT_SYMBOL_GPL(register_kretprobe); 2263 2264 int register_kretprobes(struct kretprobe **rps, int num) 2265 { 2266 int ret = 0, i; 2267 2268 if (num <= 0) 2269 return -EINVAL; 2270 for (i = 0; i < num; i++) { 2271 ret = register_kretprobe(rps[i]); 2272 if (ret < 0) { 2273 if (i > 0) 2274 unregister_kretprobes(rps, i); 2275 break; 2276 } 2277 } 2278 return ret; 2279 } 2280 EXPORT_SYMBOL_GPL(register_kretprobes); 2281 2282 void unregister_kretprobe(struct kretprobe *rp) 2283 { 2284 unregister_kretprobes(&rp, 1); 2285 } 2286 EXPORT_SYMBOL_GPL(unregister_kretprobe); 2287 2288 void unregister_kretprobes(struct kretprobe **rps, int num) 2289 { 2290 int i; 2291 2292 if (num <= 0) 2293 return; 2294 mutex_lock(&kprobe_mutex); 2295 for (i = 0; i < num; i++) { 2296 if (__unregister_kprobe_top(&rps[i]->kp) < 0) 2297 rps[i]->kp.addr = NULL; 2298 #ifdef CONFIG_KRETPROBE_ON_RETHOOK 2299 rethook_free(rps[i]->rh); 2300 #else 2301 rps[i]->rph->rp = NULL; 2302 #endif 2303 } 2304 mutex_unlock(&kprobe_mutex); 2305 2306 synchronize_rcu(); 2307 for (i = 0; i < num; i++) { 2308 if (rps[i]->kp.addr) { 2309 __unregister_kprobe_bottom(&rps[i]->kp); 2310 #ifndef CONFIG_KRETPROBE_ON_RETHOOK 2311 free_rp_inst(rps[i]); 2312 #endif 2313 } 2314 } 2315 } 2316 EXPORT_SYMBOL_GPL(unregister_kretprobes); 2317 2318 #else /* CONFIG_KRETPROBES */ 2319 int register_kretprobe(struct kretprobe *rp) 2320 { 2321 return -EOPNOTSUPP; 2322 } 2323 EXPORT_SYMBOL_GPL(register_kretprobe); 2324 2325 int register_kretprobes(struct kretprobe **rps, int num) 2326 { 2327 return -EOPNOTSUPP; 2328 } 2329 EXPORT_SYMBOL_GPL(register_kretprobes); 2330 2331 void unregister_kretprobe(struct kretprobe *rp) 2332 { 2333 } 2334 EXPORT_SYMBOL_GPL(unregister_kretprobe); 2335 2336 void unregister_kretprobes(struct kretprobe **rps, int num) 2337 { 2338 } 2339 EXPORT_SYMBOL_GPL(unregister_kretprobes); 2340 2341 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) 2342 { 2343 return 0; 2344 } 2345 NOKPROBE_SYMBOL(pre_handler_kretprobe); 2346 2347 #endif /* CONFIG_KRETPROBES */ 2348 2349 /* Set the kprobe gone and remove its instruction buffer. */ 2350 static void kill_kprobe(struct kprobe *p) 2351 { 2352 struct kprobe *kp; 2353 2354 lockdep_assert_held(&kprobe_mutex); 2355 2356 p->flags |= KPROBE_FLAG_GONE; 2357 if (kprobe_aggrprobe(p)) { 2358 /* 2359 * If this is an aggr_kprobe, we have to list all the 2360 * chained probes and mark them GONE. 2361 */ 2362 list_for_each_entry(kp, &p->list, list) 2363 kp->flags |= KPROBE_FLAG_GONE; 2364 p->post_handler = NULL; 2365 kill_optimized_kprobe(p); 2366 } 2367 /* 2368 * Here, we can remove insn_slot safely, because no thread calls 2369 * the original probed function (which will be freed soon) any more. 2370 */ 2371 arch_remove_kprobe(p); 2372 2373 /* 2374 * The module is going away. We should disarm the kprobe which 2375 * is using ftrace, because ftrace framework is still available at 2376 * 'MODULE_STATE_GOING' notification. 2377 */ 2378 if (kprobe_ftrace(p) && !kprobe_disabled(p) && !kprobes_all_disarmed) 2379 disarm_kprobe_ftrace(p); 2380 } 2381 2382 /* Disable one kprobe */ 2383 int disable_kprobe(struct kprobe *kp) 2384 { 2385 int ret = 0; 2386 struct kprobe *p; 2387 2388 mutex_lock(&kprobe_mutex); 2389 2390 /* Disable this kprobe */ 2391 p = __disable_kprobe(kp); 2392 if (IS_ERR(p)) 2393 ret = PTR_ERR(p); 2394 2395 mutex_unlock(&kprobe_mutex); 2396 return ret; 2397 } 2398 EXPORT_SYMBOL_GPL(disable_kprobe); 2399 2400 /* Enable one kprobe */ 2401 int enable_kprobe(struct kprobe *kp) 2402 { 2403 int ret = 0; 2404 struct kprobe *p; 2405 2406 mutex_lock(&kprobe_mutex); 2407 2408 /* Check whether specified probe is valid. */ 2409 p = __get_valid_kprobe(kp); 2410 if (unlikely(p == NULL)) { 2411 ret = -EINVAL; 2412 goto out; 2413 } 2414 2415 if (kprobe_gone(kp)) { 2416 /* This kprobe has gone, we couldn't enable it. */ 2417 ret = -EINVAL; 2418 goto out; 2419 } 2420 2421 if (p != kp) 2422 kp->flags &= ~KPROBE_FLAG_DISABLED; 2423 2424 if (!kprobes_all_disarmed && kprobe_disabled(p)) { 2425 p->flags &= ~KPROBE_FLAG_DISABLED; 2426 ret = arm_kprobe(p); 2427 if (ret) 2428 p->flags |= KPROBE_FLAG_DISABLED; 2429 } 2430 out: 2431 mutex_unlock(&kprobe_mutex); 2432 return ret; 2433 } 2434 EXPORT_SYMBOL_GPL(enable_kprobe); 2435 2436 /* Caller must NOT call this in usual path. This is only for critical case */ 2437 void dump_kprobe(struct kprobe *kp) 2438 { 2439 pr_err("Dump kprobe:\n.symbol_name = %s, .offset = %x, .addr = %pS\n", 2440 kp->symbol_name, kp->offset, kp->addr); 2441 } 2442 NOKPROBE_SYMBOL(dump_kprobe); 2443 2444 int kprobe_add_ksym_blacklist(unsigned long entry) 2445 { 2446 struct kprobe_blacklist_entry *ent; 2447 unsigned long offset = 0, size = 0; 2448 2449 if (!kernel_text_address(entry) || 2450 !kallsyms_lookup_size_offset(entry, &size, &offset)) 2451 return -EINVAL; 2452 2453 ent = kmalloc(sizeof(*ent), GFP_KERNEL); 2454 if (!ent) 2455 return -ENOMEM; 2456 ent->start_addr = entry; 2457 ent->end_addr = entry + size; 2458 INIT_LIST_HEAD(&ent->list); 2459 list_add_tail(&ent->list, &kprobe_blacklist); 2460 2461 return (int)size; 2462 } 2463 2464 /* Add all symbols in given area into kprobe blacklist */ 2465 int kprobe_add_area_blacklist(unsigned long start, unsigned long end) 2466 { 2467 unsigned long entry; 2468 int ret = 0; 2469 2470 for (entry = start; entry < end; entry += ret) { 2471 ret = kprobe_add_ksym_blacklist(entry); 2472 if (ret < 0) 2473 return ret; 2474 if (ret == 0) /* In case of alias symbol */ 2475 ret = 1; 2476 } 2477 return 0; 2478 } 2479 2480 /* Remove all symbols in given area from kprobe blacklist */ 2481 static void kprobe_remove_area_blacklist(unsigned long start, unsigned long end) 2482 { 2483 struct kprobe_blacklist_entry *ent, *n; 2484 2485 list_for_each_entry_safe(ent, n, &kprobe_blacklist, list) { 2486 if (ent->start_addr < start || ent->start_addr >= end) 2487 continue; 2488 list_del(&ent->list); 2489 kfree(ent); 2490 } 2491 } 2492 2493 static void kprobe_remove_ksym_blacklist(unsigned long entry) 2494 { 2495 kprobe_remove_area_blacklist(entry, entry + 1); 2496 } 2497 2498 int __weak arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value, 2499 char *type, char *sym) 2500 { 2501 return -ERANGE; 2502 } 2503 2504 int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type, 2505 char *sym) 2506 { 2507 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT 2508 if (!kprobe_cache_get_kallsym(&kprobe_insn_slots, &symnum, value, type, sym)) 2509 return 0; 2510 #ifdef CONFIG_OPTPROBES 2511 if (!kprobe_cache_get_kallsym(&kprobe_optinsn_slots, &symnum, value, type, sym)) 2512 return 0; 2513 #endif 2514 #endif 2515 if (!arch_kprobe_get_kallsym(&symnum, value, type, sym)) 2516 return 0; 2517 return -ERANGE; 2518 } 2519 2520 int __init __weak arch_populate_kprobe_blacklist(void) 2521 { 2522 return 0; 2523 } 2524 2525 /* 2526 * Lookup and populate the kprobe_blacklist. 2527 * 2528 * Unlike the kretprobe blacklist, we'll need to determine 2529 * the range of addresses that belong to the said functions, 2530 * since a kprobe need not necessarily be at the beginning 2531 * of a function. 2532 */ 2533 static int __init populate_kprobe_blacklist(unsigned long *start, 2534 unsigned long *end) 2535 { 2536 unsigned long entry; 2537 unsigned long *iter; 2538 int ret; 2539 2540 for (iter = start; iter < end; iter++) { 2541 entry = (unsigned long)dereference_symbol_descriptor((void *)*iter); 2542 ret = kprobe_add_ksym_blacklist(entry); 2543 if (ret == -EINVAL) 2544 continue; 2545 if (ret < 0) 2546 return ret; 2547 } 2548 2549 /* Symbols in '__kprobes_text' are blacklisted */ 2550 ret = kprobe_add_area_blacklist((unsigned long)__kprobes_text_start, 2551 (unsigned long)__kprobes_text_end); 2552 if (ret) 2553 return ret; 2554 2555 /* Symbols in 'noinstr' section are blacklisted */ 2556 ret = kprobe_add_area_blacklist((unsigned long)__noinstr_text_start, 2557 (unsigned long)__noinstr_text_end); 2558 2559 return ret ? : arch_populate_kprobe_blacklist(); 2560 } 2561 2562 static void add_module_kprobe_blacklist(struct module *mod) 2563 { 2564 unsigned long start, end; 2565 int i; 2566 2567 if (mod->kprobe_blacklist) { 2568 for (i = 0; i < mod->num_kprobe_blacklist; i++) 2569 kprobe_add_ksym_blacklist(mod->kprobe_blacklist[i]); 2570 } 2571 2572 start = (unsigned long)mod->kprobes_text_start; 2573 if (start) { 2574 end = start + mod->kprobes_text_size; 2575 kprobe_add_area_blacklist(start, end); 2576 } 2577 2578 start = (unsigned long)mod->noinstr_text_start; 2579 if (start) { 2580 end = start + mod->noinstr_text_size; 2581 kprobe_add_area_blacklist(start, end); 2582 } 2583 } 2584 2585 static void remove_module_kprobe_blacklist(struct module *mod) 2586 { 2587 unsigned long start, end; 2588 int i; 2589 2590 if (mod->kprobe_blacklist) { 2591 for (i = 0; i < mod->num_kprobe_blacklist; i++) 2592 kprobe_remove_ksym_blacklist(mod->kprobe_blacklist[i]); 2593 } 2594 2595 start = (unsigned long)mod->kprobes_text_start; 2596 if (start) { 2597 end = start + mod->kprobes_text_size; 2598 kprobe_remove_area_blacklist(start, end); 2599 } 2600 2601 start = (unsigned long)mod->noinstr_text_start; 2602 if (start) { 2603 end = start + mod->noinstr_text_size; 2604 kprobe_remove_area_blacklist(start, end); 2605 } 2606 } 2607 2608 /* Module notifier call back, checking kprobes on the module */ 2609 static int kprobes_module_callback(struct notifier_block *nb, 2610 unsigned long val, void *data) 2611 { 2612 struct module *mod = data; 2613 struct hlist_head *head; 2614 struct kprobe *p; 2615 unsigned int i; 2616 int checkcore = (val == MODULE_STATE_GOING); 2617 2618 if (val == MODULE_STATE_COMING) { 2619 mutex_lock(&kprobe_mutex); 2620 add_module_kprobe_blacklist(mod); 2621 mutex_unlock(&kprobe_mutex); 2622 } 2623 if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE) 2624 return NOTIFY_DONE; 2625 2626 /* 2627 * When 'MODULE_STATE_GOING' was notified, both of module '.text' and 2628 * '.init.text' sections would be freed. When 'MODULE_STATE_LIVE' was 2629 * notified, only '.init.text' section would be freed. We need to 2630 * disable kprobes which have been inserted in the sections. 2631 */ 2632 mutex_lock(&kprobe_mutex); 2633 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2634 head = &kprobe_table[i]; 2635 hlist_for_each_entry(p, head, hlist) 2636 if (within_module_init((unsigned long)p->addr, mod) || 2637 (checkcore && 2638 within_module_core((unsigned long)p->addr, mod))) { 2639 /* 2640 * The vaddr this probe is installed will soon 2641 * be vfreed buy not synced to disk. Hence, 2642 * disarming the breakpoint isn't needed. 2643 * 2644 * Note, this will also move any optimized probes 2645 * that are pending to be removed from their 2646 * corresponding lists to the 'freeing_list' and 2647 * will not be touched by the delayed 2648 * kprobe_optimizer() work handler. 2649 */ 2650 kill_kprobe(p); 2651 } 2652 } 2653 if (val == MODULE_STATE_GOING) 2654 remove_module_kprobe_blacklist(mod); 2655 mutex_unlock(&kprobe_mutex); 2656 return NOTIFY_DONE; 2657 } 2658 2659 static struct notifier_block kprobe_module_nb = { 2660 .notifier_call = kprobes_module_callback, 2661 .priority = 0 2662 }; 2663 2664 void kprobe_free_init_mem(void) 2665 { 2666 void *start = (void *)(&__init_begin); 2667 void *end = (void *)(&__init_end); 2668 struct hlist_head *head; 2669 struct kprobe *p; 2670 int i; 2671 2672 mutex_lock(&kprobe_mutex); 2673 2674 /* Kill all kprobes on initmem because the target code has been freed. */ 2675 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2676 head = &kprobe_table[i]; 2677 hlist_for_each_entry(p, head, hlist) { 2678 if (start <= (void *)p->addr && (void *)p->addr < end) 2679 kill_kprobe(p); 2680 } 2681 } 2682 2683 mutex_unlock(&kprobe_mutex); 2684 } 2685 2686 static int __init init_kprobes(void) 2687 { 2688 int i, err = 0; 2689 2690 /* FIXME allocate the probe table, currently defined statically */ 2691 /* initialize all list heads */ 2692 for (i = 0; i < KPROBE_TABLE_SIZE; i++) 2693 INIT_HLIST_HEAD(&kprobe_table[i]); 2694 2695 err = populate_kprobe_blacklist(__start_kprobe_blacklist, 2696 __stop_kprobe_blacklist); 2697 if (err) 2698 pr_err("Failed to populate blacklist (error %d), kprobes not restricted, be careful using them!\n", err); 2699 2700 if (kretprobe_blacklist_size) { 2701 /* lookup the function address from its name */ 2702 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { 2703 kretprobe_blacklist[i].addr = 2704 kprobe_lookup_name(kretprobe_blacklist[i].name, 0); 2705 if (!kretprobe_blacklist[i].addr) 2706 pr_err("Failed to lookup symbol '%s' for kretprobe blacklist. Maybe the target function is removed or renamed.\n", 2707 kretprobe_blacklist[i].name); 2708 } 2709 } 2710 2711 /* By default, kprobes are armed */ 2712 kprobes_all_disarmed = false; 2713 2714 #if defined(CONFIG_OPTPROBES) && defined(__ARCH_WANT_KPROBES_INSN_SLOT) 2715 /* Init 'kprobe_optinsn_slots' for allocation */ 2716 kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE; 2717 #endif 2718 2719 err = arch_init_kprobes(); 2720 if (!err) 2721 err = register_die_notifier(&kprobe_exceptions_nb); 2722 if (!err) 2723 err = register_module_notifier(&kprobe_module_nb); 2724 2725 kprobes_initialized = (err == 0); 2726 kprobe_sysctls_init(); 2727 return err; 2728 } 2729 early_initcall(init_kprobes); 2730 2731 #if defined(CONFIG_OPTPROBES) 2732 static int __init init_optprobes(void) 2733 { 2734 /* 2735 * Enable kprobe optimization - this kicks the optimizer which 2736 * depends on synchronize_rcu_tasks() and ksoftirqd, that is 2737 * not spawned in early initcall. So delay the optimization. 2738 */ 2739 optimize_all_kprobes(); 2740 2741 return 0; 2742 } 2743 subsys_initcall(init_optprobes); 2744 #endif 2745 2746 #ifdef CONFIG_DEBUG_FS 2747 static void report_probe(struct seq_file *pi, struct kprobe *p, 2748 const char *sym, int offset, char *modname, struct kprobe *pp) 2749 { 2750 char *kprobe_type; 2751 void *addr = p->addr; 2752 2753 if (p->pre_handler == pre_handler_kretprobe) 2754 kprobe_type = "r"; 2755 else 2756 kprobe_type = "k"; 2757 2758 if (!kallsyms_show_value(pi->file->f_cred)) 2759 addr = NULL; 2760 2761 if (sym) 2762 seq_printf(pi, "%px %s %s+0x%x %s ", 2763 addr, kprobe_type, sym, offset, 2764 (modname ? modname : " ")); 2765 else /* try to use %pS */ 2766 seq_printf(pi, "%px %s %pS ", 2767 addr, kprobe_type, p->addr); 2768 2769 if (!pp) 2770 pp = p; 2771 seq_printf(pi, "%s%s%s%s\n", 2772 (kprobe_gone(p) ? "[GONE]" : ""), 2773 ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""), 2774 (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""), 2775 (kprobe_ftrace(pp) ? "[FTRACE]" : "")); 2776 } 2777 2778 static void *kprobe_seq_start(struct seq_file *f, loff_t *pos) 2779 { 2780 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL; 2781 } 2782 2783 static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos) 2784 { 2785 (*pos)++; 2786 if (*pos >= KPROBE_TABLE_SIZE) 2787 return NULL; 2788 return pos; 2789 } 2790 2791 static void kprobe_seq_stop(struct seq_file *f, void *v) 2792 { 2793 /* Nothing to do */ 2794 } 2795 2796 static int show_kprobe_addr(struct seq_file *pi, void *v) 2797 { 2798 struct hlist_head *head; 2799 struct kprobe *p, *kp; 2800 const char *sym = NULL; 2801 unsigned int i = *(loff_t *) v; 2802 unsigned long offset = 0; 2803 char *modname, namebuf[KSYM_NAME_LEN]; 2804 2805 head = &kprobe_table[i]; 2806 preempt_disable(); 2807 hlist_for_each_entry_rcu(p, head, hlist) { 2808 sym = kallsyms_lookup((unsigned long)p->addr, NULL, 2809 &offset, &modname, namebuf); 2810 if (kprobe_aggrprobe(p)) { 2811 list_for_each_entry_rcu(kp, &p->list, list) 2812 report_probe(pi, kp, sym, offset, modname, p); 2813 } else 2814 report_probe(pi, p, sym, offset, modname, NULL); 2815 } 2816 preempt_enable(); 2817 return 0; 2818 } 2819 2820 static const struct seq_operations kprobes_sops = { 2821 .start = kprobe_seq_start, 2822 .next = kprobe_seq_next, 2823 .stop = kprobe_seq_stop, 2824 .show = show_kprobe_addr 2825 }; 2826 2827 DEFINE_SEQ_ATTRIBUTE(kprobes); 2828 2829 /* kprobes/blacklist -- shows which functions can not be probed */ 2830 static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos) 2831 { 2832 mutex_lock(&kprobe_mutex); 2833 return seq_list_start(&kprobe_blacklist, *pos); 2834 } 2835 2836 static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos) 2837 { 2838 return seq_list_next(v, &kprobe_blacklist, pos); 2839 } 2840 2841 static int kprobe_blacklist_seq_show(struct seq_file *m, void *v) 2842 { 2843 struct kprobe_blacklist_entry *ent = 2844 list_entry(v, struct kprobe_blacklist_entry, list); 2845 2846 /* 2847 * If '/proc/kallsyms' is not showing kernel address, we won't 2848 * show them here either. 2849 */ 2850 if (!kallsyms_show_value(m->file->f_cred)) 2851 seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL, 2852 (void *)ent->start_addr); 2853 else 2854 seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr, 2855 (void *)ent->end_addr, (void *)ent->start_addr); 2856 return 0; 2857 } 2858 2859 static void kprobe_blacklist_seq_stop(struct seq_file *f, void *v) 2860 { 2861 mutex_unlock(&kprobe_mutex); 2862 } 2863 2864 static const struct seq_operations kprobe_blacklist_sops = { 2865 .start = kprobe_blacklist_seq_start, 2866 .next = kprobe_blacklist_seq_next, 2867 .stop = kprobe_blacklist_seq_stop, 2868 .show = kprobe_blacklist_seq_show, 2869 }; 2870 DEFINE_SEQ_ATTRIBUTE(kprobe_blacklist); 2871 2872 static int arm_all_kprobes(void) 2873 { 2874 struct hlist_head *head; 2875 struct kprobe *p; 2876 unsigned int i, total = 0, errors = 0; 2877 int err, ret = 0; 2878 2879 mutex_lock(&kprobe_mutex); 2880 2881 /* If kprobes are armed, just return */ 2882 if (!kprobes_all_disarmed) 2883 goto already_enabled; 2884 2885 /* 2886 * optimize_kprobe() called by arm_kprobe() checks 2887 * kprobes_all_disarmed, so set kprobes_all_disarmed before 2888 * arm_kprobe. 2889 */ 2890 kprobes_all_disarmed = false; 2891 /* Arming kprobes doesn't optimize kprobe itself */ 2892 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2893 head = &kprobe_table[i]; 2894 /* Arm all kprobes on a best-effort basis */ 2895 hlist_for_each_entry(p, head, hlist) { 2896 if (!kprobe_disabled(p)) { 2897 err = arm_kprobe(p); 2898 if (err) { 2899 errors++; 2900 ret = err; 2901 } 2902 total++; 2903 } 2904 } 2905 } 2906 2907 if (errors) 2908 pr_warn("Kprobes globally enabled, but failed to enable %d out of %d probes. Please check which kprobes are kept disabled via debugfs.\n", 2909 errors, total); 2910 else 2911 pr_info("Kprobes globally enabled\n"); 2912 2913 already_enabled: 2914 mutex_unlock(&kprobe_mutex); 2915 return ret; 2916 } 2917 2918 static int disarm_all_kprobes(void) 2919 { 2920 struct hlist_head *head; 2921 struct kprobe *p; 2922 unsigned int i, total = 0, errors = 0; 2923 int err, ret = 0; 2924 2925 mutex_lock(&kprobe_mutex); 2926 2927 /* If kprobes are already disarmed, just return */ 2928 if (kprobes_all_disarmed) { 2929 mutex_unlock(&kprobe_mutex); 2930 return 0; 2931 } 2932 2933 kprobes_all_disarmed = true; 2934 2935 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2936 head = &kprobe_table[i]; 2937 /* Disarm all kprobes on a best-effort basis */ 2938 hlist_for_each_entry(p, head, hlist) { 2939 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) { 2940 err = disarm_kprobe(p, false); 2941 if (err) { 2942 errors++; 2943 ret = err; 2944 } 2945 total++; 2946 } 2947 } 2948 } 2949 2950 if (errors) 2951 pr_warn("Kprobes globally disabled, but failed to disable %d out of %d probes. Please check which kprobes are kept enabled via debugfs.\n", 2952 errors, total); 2953 else 2954 pr_info("Kprobes globally disabled\n"); 2955 2956 mutex_unlock(&kprobe_mutex); 2957 2958 /* Wait for disarming all kprobes by optimizer */ 2959 wait_for_kprobe_optimizer(); 2960 2961 return ret; 2962 } 2963 2964 /* 2965 * XXX: The debugfs bool file interface doesn't allow for callbacks 2966 * when the bool state is switched. We can reuse that facility when 2967 * available 2968 */ 2969 static ssize_t read_enabled_file_bool(struct file *file, 2970 char __user *user_buf, size_t count, loff_t *ppos) 2971 { 2972 char buf[3]; 2973 2974 if (!kprobes_all_disarmed) 2975 buf[0] = '1'; 2976 else 2977 buf[0] = '0'; 2978 buf[1] = '\n'; 2979 buf[2] = 0x00; 2980 return simple_read_from_buffer(user_buf, count, ppos, buf, 2); 2981 } 2982 2983 static ssize_t write_enabled_file_bool(struct file *file, 2984 const char __user *user_buf, size_t count, loff_t *ppos) 2985 { 2986 bool enable; 2987 int ret; 2988 2989 ret = kstrtobool_from_user(user_buf, count, &enable); 2990 if (ret) 2991 return ret; 2992 2993 ret = enable ? arm_all_kprobes() : disarm_all_kprobes(); 2994 if (ret) 2995 return ret; 2996 2997 return count; 2998 } 2999 3000 static const struct file_operations fops_kp = { 3001 .read = read_enabled_file_bool, 3002 .write = write_enabled_file_bool, 3003 .llseek = default_llseek, 3004 }; 3005 3006 static int __init debugfs_kprobe_init(void) 3007 { 3008 struct dentry *dir; 3009 3010 dir = debugfs_create_dir("kprobes", NULL); 3011 3012 debugfs_create_file("list", 0400, dir, NULL, &kprobes_fops); 3013 3014 debugfs_create_file("enabled", 0600, dir, NULL, &fops_kp); 3015 3016 debugfs_create_file("blacklist", 0400, dir, NULL, 3017 &kprobe_blacklist_fops); 3018 3019 return 0; 3020 } 3021 3022 late_initcall(debugfs_kprobe_init); 3023 #endif /* CONFIG_DEBUG_FS */ 3024