1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Kernel Probes (KProbes) 4 * kernel/kprobes.c 5 * 6 * Copyright (C) IBM Corporation, 2002, 2004 7 * 8 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel 9 * Probes initial implementation (includes suggestions from 10 * Rusty Russell). 11 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with 12 * hlists and exceptions notifier as suggested by Andi Kleen. 13 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes 14 * interface to access function arguments. 15 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes 16 * exceptions notifier to be first on the priority list. 17 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston 18 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi 19 * <prasanna@in.ibm.com> added function-return probes. 20 */ 21 #include <linux/kprobes.h> 22 #include <linux/hash.h> 23 #include <linux/init.h> 24 #include <linux/slab.h> 25 #include <linux/stddef.h> 26 #include <linux/export.h> 27 #include <linux/moduleloader.h> 28 #include <linux/kallsyms.h> 29 #include <linux/freezer.h> 30 #include <linux/seq_file.h> 31 #include <linux/debugfs.h> 32 #include <linux/sysctl.h> 33 #include <linux/kdebug.h> 34 #include <linux/memory.h> 35 #include <linux/ftrace.h> 36 #include <linux/cpu.h> 37 #include <linux/jump_label.h> 38 #include <linux/perf_event.h> 39 40 #include <asm/sections.h> 41 #include <asm/cacheflush.h> 42 #include <asm/errno.h> 43 #include <linux/uaccess.h> 44 45 #define KPROBE_HASH_BITS 6 46 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS) 47 48 49 static int kprobes_initialized; 50 /* kprobe_table can be accessed by 51 * - Normal hlist traversal and RCU add/del under kprobe_mutex is held. 52 * Or 53 * - RCU hlist traversal under disabling preempt (breakpoint handlers) 54 */ 55 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; 56 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; 57 58 /* NOTE: change this value only with kprobe_mutex held */ 59 static bool kprobes_all_disarmed; 60 61 /* This protects kprobe_table and optimizing_list */ 62 static DEFINE_MUTEX(kprobe_mutex); 63 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; 64 static struct { 65 raw_spinlock_t lock ____cacheline_aligned_in_smp; 66 } kretprobe_table_locks[KPROBE_TABLE_SIZE]; 67 68 kprobe_opcode_t * __weak kprobe_lookup_name(const char *name, 69 unsigned int __unused) 70 { 71 return ((kprobe_opcode_t *)(kallsyms_lookup_name(name))); 72 } 73 74 static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash) 75 { 76 return &(kretprobe_table_locks[hash].lock); 77 } 78 79 /* Blacklist -- list of struct kprobe_blacklist_entry */ 80 static LIST_HEAD(kprobe_blacklist); 81 82 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT 83 /* 84 * kprobe->ainsn.insn points to the copy of the instruction to be 85 * single-stepped. x86_64, POWER4 and above have no-exec support and 86 * stepping on the instruction on a vmalloced/kmalloced/data page 87 * is a recipe for disaster 88 */ 89 struct kprobe_insn_page { 90 struct list_head list; 91 kprobe_opcode_t *insns; /* Page of instruction slots */ 92 struct kprobe_insn_cache *cache; 93 int nused; 94 int ngarbage; 95 char slot_used[]; 96 }; 97 98 #define KPROBE_INSN_PAGE_SIZE(slots) \ 99 (offsetof(struct kprobe_insn_page, slot_used) + \ 100 (sizeof(char) * (slots))) 101 102 static int slots_per_page(struct kprobe_insn_cache *c) 103 { 104 return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t)); 105 } 106 107 enum kprobe_slot_state { 108 SLOT_CLEAN = 0, 109 SLOT_DIRTY = 1, 110 SLOT_USED = 2, 111 }; 112 113 void __weak *alloc_insn_page(void) 114 { 115 return module_alloc(PAGE_SIZE); 116 } 117 118 void __weak free_insn_page(void *page) 119 { 120 module_memfree(page); 121 } 122 123 struct kprobe_insn_cache kprobe_insn_slots = { 124 .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex), 125 .alloc = alloc_insn_page, 126 .free = free_insn_page, 127 .sym = KPROBE_INSN_PAGE_SYM, 128 .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages), 129 .insn_size = MAX_INSN_SIZE, 130 .nr_garbage = 0, 131 }; 132 static int collect_garbage_slots(struct kprobe_insn_cache *c); 133 134 /** 135 * __get_insn_slot() - Find a slot on an executable page for an instruction. 136 * We allocate an executable page if there's no room on existing ones. 137 */ 138 kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c) 139 { 140 struct kprobe_insn_page *kip; 141 kprobe_opcode_t *slot = NULL; 142 143 /* Since the slot array is not protected by rcu, we need a mutex */ 144 mutex_lock(&c->mutex); 145 retry: 146 rcu_read_lock(); 147 list_for_each_entry_rcu(kip, &c->pages, list) { 148 if (kip->nused < slots_per_page(c)) { 149 int i; 150 for (i = 0; i < slots_per_page(c); i++) { 151 if (kip->slot_used[i] == SLOT_CLEAN) { 152 kip->slot_used[i] = SLOT_USED; 153 kip->nused++; 154 slot = kip->insns + (i * c->insn_size); 155 rcu_read_unlock(); 156 goto out; 157 } 158 } 159 /* kip->nused is broken. Fix it. */ 160 kip->nused = slots_per_page(c); 161 WARN_ON(1); 162 } 163 } 164 rcu_read_unlock(); 165 166 /* If there are any garbage slots, collect it and try again. */ 167 if (c->nr_garbage && collect_garbage_slots(c) == 0) 168 goto retry; 169 170 /* All out of space. Need to allocate a new page. */ 171 kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL); 172 if (!kip) 173 goto out; 174 175 /* 176 * Use module_alloc so this page is within +/- 2GB of where the 177 * kernel image and loaded module images reside. This is required 178 * so x86_64 can correctly handle the %rip-relative fixups. 179 */ 180 kip->insns = c->alloc(); 181 if (!kip->insns) { 182 kfree(kip); 183 goto out; 184 } 185 INIT_LIST_HEAD(&kip->list); 186 memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c)); 187 kip->slot_used[0] = SLOT_USED; 188 kip->nused = 1; 189 kip->ngarbage = 0; 190 kip->cache = c; 191 list_add_rcu(&kip->list, &c->pages); 192 slot = kip->insns; 193 194 /* Record the perf ksymbol register event after adding the page */ 195 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, (unsigned long)kip->insns, 196 PAGE_SIZE, false, c->sym); 197 out: 198 mutex_unlock(&c->mutex); 199 return slot; 200 } 201 202 /* Return 1 if all garbages are collected, otherwise 0. */ 203 static int collect_one_slot(struct kprobe_insn_page *kip, int idx) 204 { 205 kip->slot_used[idx] = SLOT_CLEAN; 206 kip->nused--; 207 if (kip->nused == 0) { 208 /* 209 * Page is no longer in use. Free it unless 210 * it's the last one. We keep the last one 211 * so as not to have to set it up again the 212 * next time somebody inserts a probe. 213 */ 214 if (!list_is_singular(&kip->list)) { 215 /* 216 * Record perf ksymbol unregister event before removing 217 * the page. 218 */ 219 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, 220 (unsigned long)kip->insns, PAGE_SIZE, true, 221 kip->cache->sym); 222 list_del_rcu(&kip->list); 223 synchronize_rcu(); 224 kip->cache->free(kip->insns); 225 kfree(kip); 226 } 227 return 1; 228 } 229 return 0; 230 } 231 232 static int collect_garbage_slots(struct kprobe_insn_cache *c) 233 { 234 struct kprobe_insn_page *kip, *next; 235 236 /* Ensure no-one is interrupted on the garbages */ 237 synchronize_rcu(); 238 239 list_for_each_entry_safe(kip, next, &c->pages, list) { 240 int i; 241 if (kip->ngarbage == 0) 242 continue; 243 kip->ngarbage = 0; /* we will collect all garbages */ 244 for (i = 0; i < slots_per_page(c); i++) { 245 if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i)) 246 break; 247 } 248 } 249 c->nr_garbage = 0; 250 return 0; 251 } 252 253 void __free_insn_slot(struct kprobe_insn_cache *c, 254 kprobe_opcode_t *slot, int dirty) 255 { 256 struct kprobe_insn_page *kip; 257 long idx; 258 259 mutex_lock(&c->mutex); 260 rcu_read_lock(); 261 list_for_each_entry_rcu(kip, &c->pages, list) { 262 idx = ((long)slot - (long)kip->insns) / 263 (c->insn_size * sizeof(kprobe_opcode_t)); 264 if (idx >= 0 && idx < slots_per_page(c)) 265 goto out; 266 } 267 /* Could not find this slot. */ 268 WARN_ON(1); 269 kip = NULL; 270 out: 271 rcu_read_unlock(); 272 /* Mark and sweep: this may sleep */ 273 if (kip) { 274 /* Check double free */ 275 WARN_ON(kip->slot_used[idx] != SLOT_USED); 276 if (dirty) { 277 kip->slot_used[idx] = SLOT_DIRTY; 278 kip->ngarbage++; 279 if (++c->nr_garbage > slots_per_page(c)) 280 collect_garbage_slots(c); 281 } else { 282 collect_one_slot(kip, idx); 283 } 284 } 285 mutex_unlock(&c->mutex); 286 } 287 288 /* 289 * Check given address is on the page of kprobe instruction slots. 290 * This will be used for checking whether the address on a stack 291 * is on a text area or not. 292 */ 293 bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr) 294 { 295 struct kprobe_insn_page *kip; 296 bool ret = false; 297 298 rcu_read_lock(); 299 list_for_each_entry_rcu(kip, &c->pages, list) { 300 if (addr >= (unsigned long)kip->insns && 301 addr < (unsigned long)kip->insns + PAGE_SIZE) { 302 ret = true; 303 break; 304 } 305 } 306 rcu_read_unlock(); 307 308 return ret; 309 } 310 311 int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum, 312 unsigned long *value, char *type, char *sym) 313 { 314 struct kprobe_insn_page *kip; 315 int ret = -ERANGE; 316 317 rcu_read_lock(); 318 list_for_each_entry_rcu(kip, &c->pages, list) { 319 if ((*symnum)--) 320 continue; 321 strlcpy(sym, c->sym, KSYM_NAME_LEN); 322 *type = 't'; 323 *value = (unsigned long)kip->insns; 324 ret = 0; 325 break; 326 } 327 rcu_read_unlock(); 328 329 return ret; 330 } 331 332 #ifdef CONFIG_OPTPROBES 333 /* For optimized_kprobe buffer */ 334 struct kprobe_insn_cache kprobe_optinsn_slots = { 335 .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex), 336 .alloc = alloc_insn_page, 337 .free = free_insn_page, 338 .sym = KPROBE_OPTINSN_PAGE_SYM, 339 .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages), 340 /* .insn_size is initialized later */ 341 .nr_garbage = 0, 342 }; 343 #endif 344 #endif 345 346 /* We have preemption disabled.. so it is safe to use __ versions */ 347 static inline void set_kprobe_instance(struct kprobe *kp) 348 { 349 __this_cpu_write(kprobe_instance, kp); 350 } 351 352 static inline void reset_kprobe_instance(void) 353 { 354 __this_cpu_write(kprobe_instance, NULL); 355 } 356 357 /* 358 * This routine is called either: 359 * - under the kprobe_mutex - during kprobe_[un]register() 360 * OR 361 * - with preemption disabled - from arch/xxx/kernel/kprobes.c 362 */ 363 struct kprobe *get_kprobe(void *addr) 364 { 365 struct hlist_head *head; 366 struct kprobe *p; 367 368 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; 369 hlist_for_each_entry_rcu(p, head, hlist, 370 lockdep_is_held(&kprobe_mutex)) { 371 if (p->addr == addr) 372 return p; 373 } 374 375 return NULL; 376 } 377 NOKPROBE_SYMBOL(get_kprobe); 378 379 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs); 380 381 /* Return true if the kprobe is an aggregator */ 382 static inline int kprobe_aggrprobe(struct kprobe *p) 383 { 384 return p->pre_handler == aggr_pre_handler; 385 } 386 387 /* Return true(!0) if the kprobe is unused */ 388 static inline int kprobe_unused(struct kprobe *p) 389 { 390 return kprobe_aggrprobe(p) && kprobe_disabled(p) && 391 list_empty(&p->list); 392 } 393 394 /* 395 * Keep all fields in the kprobe consistent 396 */ 397 static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p) 398 { 399 memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t)); 400 memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn)); 401 } 402 403 #ifdef CONFIG_OPTPROBES 404 /* NOTE: change this value only with kprobe_mutex held */ 405 static bool kprobes_allow_optimization; 406 407 /* 408 * Call all pre_handler on the list, but ignores its return value. 409 * This must be called from arch-dep optimized caller. 410 */ 411 void opt_pre_handler(struct kprobe *p, struct pt_regs *regs) 412 { 413 struct kprobe *kp; 414 415 list_for_each_entry_rcu(kp, &p->list, list) { 416 if (kp->pre_handler && likely(!kprobe_disabled(kp))) { 417 set_kprobe_instance(kp); 418 kp->pre_handler(kp, regs); 419 } 420 reset_kprobe_instance(); 421 } 422 } 423 NOKPROBE_SYMBOL(opt_pre_handler); 424 425 /* Free optimized instructions and optimized_kprobe */ 426 static void free_aggr_kprobe(struct kprobe *p) 427 { 428 struct optimized_kprobe *op; 429 430 op = container_of(p, struct optimized_kprobe, kp); 431 arch_remove_optimized_kprobe(op); 432 arch_remove_kprobe(p); 433 kfree(op); 434 } 435 436 /* Return true(!0) if the kprobe is ready for optimization. */ 437 static inline int kprobe_optready(struct kprobe *p) 438 { 439 struct optimized_kprobe *op; 440 441 if (kprobe_aggrprobe(p)) { 442 op = container_of(p, struct optimized_kprobe, kp); 443 return arch_prepared_optinsn(&op->optinsn); 444 } 445 446 return 0; 447 } 448 449 /* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */ 450 static inline int kprobe_disarmed(struct kprobe *p) 451 { 452 struct optimized_kprobe *op; 453 454 /* If kprobe is not aggr/opt probe, just return kprobe is disabled */ 455 if (!kprobe_aggrprobe(p)) 456 return kprobe_disabled(p); 457 458 op = container_of(p, struct optimized_kprobe, kp); 459 460 return kprobe_disabled(p) && list_empty(&op->list); 461 } 462 463 /* Return true(!0) if the probe is queued on (un)optimizing lists */ 464 static int kprobe_queued(struct kprobe *p) 465 { 466 struct optimized_kprobe *op; 467 468 if (kprobe_aggrprobe(p)) { 469 op = container_of(p, struct optimized_kprobe, kp); 470 if (!list_empty(&op->list)) 471 return 1; 472 } 473 return 0; 474 } 475 476 /* 477 * Return an optimized kprobe whose optimizing code replaces 478 * instructions including addr (exclude breakpoint). 479 */ 480 static struct kprobe *get_optimized_kprobe(unsigned long addr) 481 { 482 int i; 483 struct kprobe *p = NULL; 484 struct optimized_kprobe *op; 485 486 /* Don't check i == 0, since that is a breakpoint case. */ 487 for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++) 488 p = get_kprobe((void *)(addr - i)); 489 490 if (p && kprobe_optready(p)) { 491 op = container_of(p, struct optimized_kprobe, kp); 492 if (arch_within_optimized_kprobe(op, addr)) 493 return p; 494 } 495 496 return NULL; 497 } 498 499 /* Optimization staging list, protected by kprobe_mutex */ 500 static LIST_HEAD(optimizing_list); 501 static LIST_HEAD(unoptimizing_list); 502 static LIST_HEAD(freeing_list); 503 504 static void kprobe_optimizer(struct work_struct *work); 505 static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); 506 #define OPTIMIZE_DELAY 5 507 508 /* 509 * Optimize (replace a breakpoint with a jump) kprobes listed on 510 * optimizing_list. 511 */ 512 static void do_optimize_kprobes(void) 513 { 514 lockdep_assert_held(&text_mutex); 515 /* 516 * The optimization/unoptimization refers online_cpus via 517 * stop_machine() and cpu-hotplug modifies online_cpus. 518 * And same time, text_mutex will be held in cpu-hotplug and here. 519 * This combination can cause a deadlock (cpu-hotplug try to lock 520 * text_mutex but stop_machine can not be done because online_cpus 521 * has been changed) 522 * To avoid this deadlock, caller must have locked cpu hotplug 523 * for preventing cpu-hotplug outside of text_mutex locking. 524 */ 525 lockdep_assert_cpus_held(); 526 527 /* Optimization never be done when disarmed */ 528 if (kprobes_all_disarmed || !kprobes_allow_optimization || 529 list_empty(&optimizing_list)) 530 return; 531 532 arch_optimize_kprobes(&optimizing_list); 533 } 534 535 /* 536 * Unoptimize (replace a jump with a breakpoint and remove the breakpoint 537 * if need) kprobes listed on unoptimizing_list. 538 */ 539 static void do_unoptimize_kprobes(void) 540 { 541 struct optimized_kprobe *op, *tmp; 542 543 lockdep_assert_held(&text_mutex); 544 /* See comment in do_optimize_kprobes() */ 545 lockdep_assert_cpus_held(); 546 547 /* Unoptimization must be done anytime */ 548 if (list_empty(&unoptimizing_list)) 549 return; 550 551 arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list); 552 /* Loop free_list for disarming */ 553 list_for_each_entry_safe(op, tmp, &freeing_list, list) { 554 /* Switching from detour code to origin */ 555 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; 556 /* Disarm probes if marked disabled */ 557 if (kprobe_disabled(&op->kp)) 558 arch_disarm_kprobe(&op->kp); 559 if (kprobe_unused(&op->kp)) { 560 /* 561 * Remove unused probes from hash list. After waiting 562 * for synchronization, these probes are reclaimed. 563 * (reclaiming is done by do_free_cleaned_kprobes.) 564 */ 565 hlist_del_rcu(&op->kp.hlist); 566 } else 567 list_del_init(&op->list); 568 } 569 } 570 571 /* Reclaim all kprobes on the free_list */ 572 static void do_free_cleaned_kprobes(void) 573 { 574 struct optimized_kprobe *op, *tmp; 575 576 list_for_each_entry_safe(op, tmp, &freeing_list, list) { 577 list_del_init(&op->list); 578 if (WARN_ON_ONCE(!kprobe_unused(&op->kp))) { 579 /* 580 * This must not happen, but if there is a kprobe 581 * still in use, keep it on kprobes hash list. 582 */ 583 continue; 584 } 585 free_aggr_kprobe(&op->kp); 586 } 587 } 588 589 /* Start optimizer after OPTIMIZE_DELAY passed */ 590 static void kick_kprobe_optimizer(void) 591 { 592 schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY); 593 } 594 595 /* Kprobe jump optimizer */ 596 static void kprobe_optimizer(struct work_struct *work) 597 { 598 mutex_lock(&kprobe_mutex); 599 cpus_read_lock(); 600 mutex_lock(&text_mutex); 601 602 /* 603 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed) 604 * kprobes before waiting for quiesence period. 605 */ 606 do_unoptimize_kprobes(); 607 608 /* 609 * Step 2: Wait for quiesence period to ensure all potentially 610 * preempted tasks to have normally scheduled. Because optprobe 611 * may modify multiple instructions, there is a chance that Nth 612 * instruction is preempted. In that case, such tasks can return 613 * to 2nd-Nth byte of jump instruction. This wait is for avoiding it. 614 * Note that on non-preemptive kernel, this is transparently converted 615 * to synchronoze_sched() to wait for all interrupts to have completed. 616 */ 617 synchronize_rcu_tasks(); 618 619 /* Step 3: Optimize kprobes after quiesence period */ 620 do_optimize_kprobes(); 621 622 /* Step 4: Free cleaned kprobes after quiesence period */ 623 do_free_cleaned_kprobes(); 624 625 mutex_unlock(&text_mutex); 626 cpus_read_unlock(); 627 628 /* Step 5: Kick optimizer again if needed */ 629 if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) 630 kick_kprobe_optimizer(); 631 632 mutex_unlock(&kprobe_mutex); 633 } 634 635 /* Wait for completing optimization and unoptimization */ 636 void wait_for_kprobe_optimizer(void) 637 { 638 mutex_lock(&kprobe_mutex); 639 640 while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) { 641 mutex_unlock(&kprobe_mutex); 642 643 /* this will also make optimizing_work execute immmediately */ 644 flush_delayed_work(&optimizing_work); 645 /* @optimizing_work might not have been queued yet, relax */ 646 cpu_relax(); 647 648 mutex_lock(&kprobe_mutex); 649 } 650 651 mutex_unlock(&kprobe_mutex); 652 } 653 654 static bool optprobe_queued_unopt(struct optimized_kprobe *op) 655 { 656 struct optimized_kprobe *_op; 657 658 list_for_each_entry(_op, &unoptimizing_list, list) { 659 if (op == _op) 660 return true; 661 } 662 663 return false; 664 } 665 666 /* Optimize kprobe if p is ready to be optimized */ 667 static void optimize_kprobe(struct kprobe *p) 668 { 669 struct optimized_kprobe *op; 670 671 /* Check if the kprobe is disabled or not ready for optimization. */ 672 if (!kprobe_optready(p) || !kprobes_allow_optimization || 673 (kprobe_disabled(p) || kprobes_all_disarmed)) 674 return; 675 676 /* kprobes with post_handler can not be optimized */ 677 if (p->post_handler) 678 return; 679 680 op = container_of(p, struct optimized_kprobe, kp); 681 682 /* Check there is no other kprobes at the optimized instructions */ 683 if (arch_check_optimized_kprobe(op) < 0) 684 return; 685 686 /* Check if it is already optimized. */ 687 if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) { 688 if (optprobe_queued_unopt(op)) { 689 /* This is under unoptimizing. Just dequeue the probe */ 690 list_del_init(&op->list); 691 } 692 return; 693 } 694 op->kp.flags |= KPROBE_FLAG_OPTIMIZED; 695 696 /* On unoptimizing/optimizing_list, op must have OPTIMIZED flag */ 697 if (WARN_ON_ONCE(!list_empty(&op->list))) 698 return; 699 700 list_add(&op->list, &optimizing_list); 701 kick_kprobe_optimizer(); 702 } 703 704 /* Short cut to direct unoptimizing */ 705 static void force_unoptimize_kprobe(struct optimized_kprobe *op) 706 { 707 lockdep_assert_cpus_held(); 708 arch_unoptimize_kprobe(op); 709 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; 710 } 711 712 /* Unoptimize a kprobe if p is optimized */ 713 static void unoptimize_kprobe(struct kprobe *p, bool force) 714 { 715 struct optimized_kprobe *op; 716 717 if (!kprobe_aggrprobe(p) || kprobe_disarmed(p)) 718 return; /* This is not an optprobe nor optimized */ 719 720 op = container_of(p, struct optimized_kprobe, kp); 721 if (!kprobe_optimized(p)) 722 return; 723 724 if (!list_empty(&op->list)) { 725 if (optprobe_queued_unopt(op)) { 726 /* Queued in unoptimizing queue */ 727 if (force) { 728 /* 729 * Forcibly unoptimize the kprobe here, and queue it 730 * in the freeing list for release afterwards. 731 */ 732 force_unoptimize_kprobe(op); 733 list_move(&op->list, &freeing_list); 734 } 735 } else { 736 /* Dequeue from the optimizing queue */ 737 list_del_init(&op->list); 738 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; 739 } 740 return; 741 } 742 743 /* Optimized kprobe case */ 744 if (force) { 745 /* Forcibly update the code: this is a special case */ 746 force_unoptimize_kprobe(op); 747 } else { 748 list_add(&op->list, &unoptimizing_list); 749 kick_kprobe_optimizer(); 750 } 751 } 752 753 /* Cancel unoptimizing for reusing */ 754 static int reuse_unused_kprobe(struct kprobe *ap) 755 { 756 struct optimized_kprobe *op; 757 758 /* 759 * Unused kprobe MUST be on the way of delayed unoptimizing (means 760 * there is still a relative jump) and disabled. 761 */ 762 op = container_of(ap, struct optimized_kprobe, kp); 763 WARN_ON_ONCE(list_empty(&op->list)); 764 /* Enable the probe again */ 765 ap->flags &= ~KPROBE_FLAG_DISABLED; 766 /* Optimize it again (remove from op->list) */ 767 if (!kprobe_optready(ap)) 768 return -EINVAL; 769 770 optimize_kprobe(ap); 771 return 0; 772 } 773 774 /* Remove optimized instructions */ 775 static void kill_optimized_kprobe(struct kprobe *p) 776 { 777 struct optimized_kprobe *op; 778 779 op = container_of(p, struct optimized_kprobe, kp); 780 if (!list_empty(&op->list)) 781 /* Dequeue from the (un)optimization queue */ 782 list_del_init(&op->list); 783 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; 784 785 if (kprobe_unused(p)) { 786 /* Enqueue if it is unused */ 787 list_add(&op->list, &freeing_list); 788 /* 789 * Remove unused probes from the hash list. After waiting 790 * for synchronization, this probe is reclaimed. 791 * (reclaiming is done by do_free_cleaned_kprobes().) 792 */ 793 hlist_del_rcu(&op->kp.hlist); 794 } 795 796 /* Don't touch the code, because it is already freed. */ 797 arch_remove_optimized_kprobe(op); 798 } 799 800 static inline 801 void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) 802 { 803 if (!kprobe_ftrace(p)) 804 arch_prepare_optimized_kprobe(op, p); 805 } 806 807 /* Try to prepare optimized instructions */ 808 static void prepare_optimized_kprobe(struct kprobe *p) 809 { 810 struct optimized_kprobe *op; 811 812 op = container_of(p, struct optimized_kprobe, kp); 813 __prepare_optimized_kprobe(op, p); 814 } 815 816 /* Allocate new optimized_kprobe and try to prepare optimized instructions */ 817 static struct kprobe *alloc_aggr_kprobe(struct kprobe *p) 818 { 819 struct optimized_kprobe *op; 820 821 op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL); 822 if (!op) 823 return NULL; 824 825 INIT_LIST_HEAD(&op->list); 826 op->kp.addr = p->addr; 827 __prepare_optimized_kprobe(op, p); 828 829 return &op->kp; 830 } 831 832 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p); 833 834 /* 835 * Prepare an optimized_kprobe and optimize it 836 * NOTE: p must be a normal registered kprobe 837 */ 838 static void try_to_optimize_kprobe(struct kprobe *p) 839 { 840 struct kprobe *ap; 841 struct optimized_kprobe *op; 842 843 /* Impossible to optimize ftrace-based kprobe */ 844 if (kprobe_ftrace(p)) 845 return; 846 847 /* For preparing optimization, jump_label_text_reserved() is called */ 848 cpus_read_lock(); 849 jump_label_lock(); 850 mutex_lock(&text_mutex); 851 852 ap = alloc_aggr_kprobe(p); 853 if (!ap) 854 goto out; 855 856 op = container_of(ap, struct optimized_kprobe, kp); 857 if (!arch_prepared_optinsn(&op->optinsn)) { 858 /* If failed to setup optimizing, fallback to kprobe */ 859 arch_remove_optimized_kprobe(op); 860 kfree(op); 861 goto out; 862 } 863 864 init_aggr_kprobe(ap, p); 865 optimize_kprobe(ap); /* This just kicks optimizer thread */ 866 867 out: 868 mutex_unlock(&text_mutex); 869 jump_label_unlock(); 870 cpus_read_unlock(); 871 } 872 873 #ifdef CONFIG_SYSCTL 874 static void optimize_all_kprobes(void) 875 { 876 struct hlist_head *head; 877 struct kprobe *p; 878 unsigned int i; 879 880 mutex_lock(&kprobe_mutex); 881 /* If optimization is already allowed, just return */ 882 if (kprobes_allow_optimization) 883 goto out; 884 885 cpus_read_lock(); 886 kprobes_allow_optimization = true; 887 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 888 head = &kprobe_table[i]; 889 hlist_for_each_entry(p, head, hlist) 890 if (!kprobe_disabled(p)) 891 optimize_kprobe(p); 892 } 893 cpus_read_unlock(); 894 printk(KERN_INFO "Kprobes globally optimized\n"); 895 out: 896 mutex_unlock(&kprobe_mutex); 897 } 898 899 static void unoptimize_all_kprobes(void) 900 { 901 struct hlist_head *head; 902 struct kprobe *p; 903 unsigned int i; 904 905 mutex_lock(&kprobe_mutex); 906 /* If optimization is already prohibited, just return */ 907 if (!kprobes_allow_optimization) { 908 mutex_unlock(&kprobe_mutex); 909 return; 910 } 911 912 cpus_read_lock(); 913 kprobes_allow_optimization = false; 914 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 915 head = &kprobe_table[i]; 916 hlist_for_each_entry(p, head, hlist) { 917 if (!kprobe_disabled(p)) 918 unoptimize_kprobe(p, false); 919 } 920 } 921 cpus_read_unlock(); 922 mutex_unlock(&kprobe_mutex); 923 924 /* Wait for unoptimizing completion */ 925 wait_for_kprobe_optimizer(); 926 printk(KERN_INFO "Kprobes globally unoptimized\n"); 927 } 928 929 static DEFINE_MUTEX(kprobe_sysctl_mutex); 930 int sysctl_kprobes_optimization; 931 int proc_kprobes_optimization_handler(struct ctl_table *table, int write, 932 void *buffer, size_t *length, 933 loff_t *ppos) 934 { 935 int ret; 936 937 mutex_lock(&kprobe_sysctl_mutex); 938 sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0; 939 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 940 941 if (sysctl_kprobes_optimization) 942 optimize_all_kprobes(); 943 else 944 unoptimize_all_kprobes(); 945 mutex_unlock(&kprobe_sysctl_mutex); 946 947 return ret; 948 } 949 #endif /* CONFIG_SYSCTL */ 950 951 /* Put a breakpoint for a probe. Must be called with text_mutex locked */ 952 static void __arm_kprobe(struct kprobe *p) 953 { 954 struct kprobe *_p; 955 956 /* Check collision with other optimized kprobes */ 957 _p = get_optimized_kprobe((unsigned long)p->addr); 958 if (unlikely(_p)) 959 /* Fallback to unoptimized kprobe */ 960 unoptimize_kprobe(_p, true); 961 962 arch_arm_kprobe(p); 963 optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */ 964 } 965 966 /* Remove the breakpoint of a probe. Must be called with text_mutex locked */ 967 static void __disarm_kprobe(struct kprobe *p, bool reopt) 968 { 969 struct kprobe *_p; 970 971 /* Try to unoptimize */ 972 unoptimize_kprobe(p, kprobes_all_disarmed); 973 974 if (!kprobe_queued(p)) { 975 arch_disarm_kprobe(p); 976 /* If another kprobe was blocked, optimize it. */ 977 _p = get_optimized_kprobe((unsigned long)p->addr); 978 if (unlikely(_p) && reopt) 979 optimize_kprobe(_p); 980 } 981 /* TODO: reoptimize others after unoptimized this probe */ 982 } 983 984 #else /* !CONFIG_OPTPROBES */ 985 986 #define optimize_kprobe(p) do {} while (0) 987 #define unoptimize_kprobe(p, f) do {} while (0) 988 #define kill_optimized_kprobe(p) do {} while (0) 989 #define prepare_optimized_kprobe(p) do {} while (0) 990 #define try_to_optimize_kprobe(p) do {} while (0) 991 #define __arm_kprobe(p) arch_arm_kprobe(p) 992 #define __disarm_kprobe(p, o) arch_disarm_kprobe(p) 993 #define kprobe_disarmed(p) kprobe_disabled(p) 994 #define wait_for_kprobe_optimizer() do {} while (0) 995 996 static int reuse_unused_kprobe(struct kprobe *ap) 997 { 998 /* 999 * If the optimized kprobe is NOT supported, the aggr kprobe is 1000 * released at the same time that the last aggregated kprobe is 1001 * unregistered. 1002 * Thus there should be no chance to reuse unused kprobe. 1003 */ 1004 printk(KERN_ERR "Error: There should be no unused kprobe here.\n"); 1005 return -EINVAL; 1006 } 1007 1008 static void free_aggr_kprobe(struct kprobe *p) 1009 { 1010 arch_remove_kprobe(p); 1011 kfree(p); 1012 } 1013 1014 static struct kprobe *alloc_aggr_kprobe(struct kprobe *p) 1015 { 1016 return kzalloc(sizeof(struct kprobe), GFP_KERNEL); 1017 } 1018 #endif /* CONFIG_OPTPROBES */ 1019 1020 #ifdef CONFIG_KPROBES_ON_FTRACE 1021 static struct ftrace_ops kprobe_ftrace_ops __read_mostly = { 1022 .func = kprobe_ftrace_handler, 1023 .flags = FTRACE_OPS_FL_SAVE_REGS, 1024 }; 1025 1026 static struct ftrace_ops kprobe_ipmodify_ops __read_mostly = { 1027 .func = kprobe_ftrace_handler, 1028 .flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY, 1029 }; 1030 1031 static int kprobe_ipmodify_enabled; 1032 static int kprobe_ftrace_enabled; 1033 1034 /* Must ensure p->addr is really on ftrace */ 1035 static int prepare_kprobe(struct kprobe *p) 1036 { 1037 if (!kprobe_ftrace(p)) 1038 return arch_prepare_kprobe(p); 1039 1040 return arch_prepare_kprobe_ftrace(p); 1041 } 1042 1043 /* Caller must lock kprobe_mutex */ 1044 static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops, 1045 int *cnt) 1046 { 1047 int ret = 0; 1048 1049 ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 0, 0); 1050 if (ret) { 1051 pr_debug("Failed to arm kprobe-ftrace at %pS (%d)\n", 1052 p->addr, ret); 1053 return ret; 1054 } 1055 1056 if (*cnt == 0) { 1057 ret = register_ftrace_function(ops); 1058 if (ret) { 1059 pr_debug("Failed to init kprobe-ftrace (%d)\n", ret); 1060 goto err_ftrace; 1061 } 1062 } 1063 1064 (*cnt)++; 1065 return ret; 1066 1067 err_ftrace: 1068 /* 1069 * At this point, sinec ops is not registered, we should be sefe from 1070 * registering empty filter. 1071 */ 1072 ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0); 1073 return ret; 1074 } 1075 1076 static int arm_kprobe_ftrace(struct kprobe *p) 1077 { 1078 bool ipmodify = (p->post_handler != NULL); 1079 1080 return __arm_kprobe_ftrace(p, 1081 ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops, 1082 ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled); 1083 } 1084 1085 /* Caller must lock kprobe_mutex */ 1086 static int __disarm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops, 1087 int *cnt) 1088 { 1089 int ret = 0; 1090 1091 if (*cnt == 1) { 1092 ret = unregister_ftrace_function(ops); 1093 if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (%d)\n", ret)) 1094 return ret; 1095 } 1096 1097 (*cnt)--; 1098 1099 ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0); 1100 WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (%d)\n", 1101 p->addr, ret); 1102 return ret; 1103 } 1104 1105 static int disarm_kprobe_ftrace(struct kprobe *p) 1106 { 1107 bool ipmodify = (p->post_handler != NULL); 1108 1109 return __disarm_kprobe_ftrace(p, 1110 ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops, 1111 ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled); 1112 } 1113 #else /* !CONFIG_KPROBES_ON_FTRACE */ 1114 #define prepare_kprobe(p) arch_prepare_kprobe(p) 1115 #define arm_kprobe_ftrace(p) (-ENODEV) 1116 #define disarm_kprobe_ftrace(p) (-ENODEV) 1117 #endif 1118 1119 /* Arm a kprobe with text_mutex */ 1120 static int arm_kprobe(struct kprobe *kp) 1121 { 1122 if (unlikely(kprobe_ftrace(kp))) 1123 return arm_kprobe_ftrace(kp); 1124 1125 cpus_read_lock(); 1126 mutex_lock(&text_mutex); 1127 __arm_kprobe(kp); 1128 mutex_unlock(&text_mutex); 1129 cpus_read_unlock(); 1130 1131 return 0; 1132 } 1133 1134 /* Disarm a kprobe with text_mutex */ 1135 static int disarm_kprobe(struct kprobe *kp, bool reopt) 1136 { 1137 if (unlikely(kprobe_ftrace(kp))) 1138 return disarm_kprobe_ftrace(kp); 1139 1140 cpus_read_lock(); 1141 mutex_lock(&text_mutex); 1142 __disarm_kprobe(kp, reopt); 1143 mutex_unlock(&text_mutex); 1144 cpus_read_unlock(); 1145 1146 return 0; 1147 } 1148 1149 /* 1150 * Aggregate handlers for multiple kprobes support - these handlers 1151 * take care of invoking the individual kprobe handlers on p->list 1152 */ 1153 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) 1154 { 1155 struct kprobe *kp; 1156 1157 list_for_each_entry_rcu(kp, &p->list, list) { 1158 if (kp->pre_handler && likely(!kprobe_disabled(kp))) { 1159 set_kprobe_instance(kp); 1160 if (kp->pre_handler(kp, regs)) 1161 return 1; 1162 } 1163 reset_kprobe_instance(); 1164 } 1165 return 0; 1166 } 1167 NOKPROBE_SYMBOL(aggr_pre_handler); 1168 1169 static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs, 1170 unsigned long flags) 1171 { 1172 struct kprobe *kp; 1173 1174 list_for_each_entry_rcu(kp, &p->list, list) { 1175 if (kp->post_handler && likely(!kprobe_disabled(kp))) { 1176 set_kprobe_instance(kp); 1177 kp->post_handler(kp, regs, flags); 1178 reset_kprobe_instance(); 1179 } 1180 } 1181 } 1182 NOKPROBE_SYMBOL(aggr_post_handler); 1183 1184 static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, 1185 int trapnr) 1186 { 1187 struct kprobe *cur = __this_cpu_read(kprobe_instance); 1188 1189 /* 1190 * if we faulted "during" the execution of a user specified 1191 * probe handler, invoke just that probe's fault handler 1192 */ 1193 if (cur && cur->fault_handler) { 1194 if (cur->fault_handler(cur, regs, trapnr)) 1195 return 1; 1196 } 1197 return 0; 1198 } 1199 NOKPROBE_SYMBOL(aggr_fault_handler); 1200 1201 /* Walks the list and increments nmissed count for multiprobe case */ 1202 void kprobes_inc_nmissed_count(struct kprobe *p) 1203 { 1204 struct kprobe *kp; 1205 if (!kprobe_aggrprobe(p)) { 1206 p->nmissed++; 1207 } else { 1208 list_for_each_entry_rcu(kp, &p->list, list) 1209 kp->nmissed++; 1210 } 1211 return; 1212 } 1213 NOKPROBE_SYMBOL(kprobes_inc_nmissed_count); 1214 1215 void recycle_rp_inst(struct kretprobe_instance *ri, 1216 struct hlist_head *head) 1217 { 1218 struct kretprobe *rp = ri->rp; 1219 1220 /* remove rp inst off the rprobe_inst_table */ 1221 hlist_del(&ri->hlist); 1222 INIT_HLIST_NODE(&ri->hlist); 1223 if (likely(rp)) { 1224 raw_spin_lock(&rp->lock); 1225 hlist_add_head(&ri->hlist, &rp->free_instances); 1226 raw_spin_unlock(&rp->lock); 1227 } else 1228 /* Unregistering */ 1229 hlist_add_head(&ri->hlist, head); 1230 } 1231 NOKPROBE_SYMBOL(recycle_rp_inst); 1232 1233 void kretprobe_hash_lock(struct task_struct *tsk, 1234 struct hlist_head **head, unsigned long *flags) 1235 __acquires(hlist_lock) 1236 { 1237 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); 1238 raw_spinlock_t *hlist_lock; 1239 1240 *head = &kretprobe_inst_table[hash]; 1241 hlist_lock = kretprobe_table_lock_ptr(hash); 1242 raw_spin_lock_irqsave(hlist_lock, *flags); 1243 } 1244 NOKPROBE_SYMBOL(kretprobe_hash_lock); 1245 1246 static void kretprobe_table_lock(unsigned long hash, 1247 unsigned long *flags) 1248 __acquires(hlist_lock) 1249 { 1250 raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); 1251 raw_spin_lock_irqsave(hlist_lock, *flags); 1252 } 1253 NOKPROBE_SYMBOL(kretprobe_table_lock); 1254 1255 void kretprobe_hash_unlock(struct task_struct *tsk, 1256 unsigned long *flags) 1257 __releases(hlist_lock) 1258 { 1259 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); 1260 raw_spinlock_t *hlist_lock; 1261 1262 hlist_lock = kretprobe_table_lock_ptr(hash); 1263 raw_spin_unlock_irqrestore(hlist_lock, *flags); 1264 } 1265 NOKPROBE_SYMBOL(kretprobe_hash_unlock); 1266 1267 static void kretprobe_table_unlock(unsigned long hash, 1268 unsigned long *flags) 1269 __releases(hlist_lock) 1270 { 1271 raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); 1272 raw_spin_unlock_irqrestore(hlist_lock, *flags); 1273 } 1274 NOKPROBE_SYMBOL(kretprobe_table_unlock); 1275 1276 struct kprobe kprobe_busy = { 1277 .addr = (void *) get_kprobe, 1278 }; 1279 1280 void kprobe_busy_begin(void) 1281 { 1282 struct kprobe_ctlblk *kcb; 1283 1284 preempt_disable(); 1285 __this_cpu_write(current_kprobe, &kprobe_busy); 1286 kcb = get_kprobe_ctlblk(); 1287 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 1288 } 1289 1290 void kprobe_busy_end(void) 1291 { 1292 __this_cpu_write(current_kprobe, NULL); 1293 preempt_enable(); 1294 } 1295 1296 /* 1297 * This function is called from finish_task_switch when task tk becomes dead, 1298 * so that we can recycle any function-return probe instances associated 1299 * with this task. These left over instances represent probed functions 1300 * that have been called but will never return. 1301 */ 1302 void kprobe_flush_task(struct task_struct *tk) 1303 { 1304 struct kretprobe_instance *ri; 1305 struct hlist_head *head, empty_rp; 1306 struct hlist_node *tmp; 1307 unsigned long hash, flags = 0; 1308 1309 if (unlikely(!kprobes_initialized)) 1310 /* Early boot. kretprobe_table_locks not yet initialized. */ 1311 return; 1312 1313 kprobe_busy_begin(); 1314 1315 INIT_HLIST_HEAD(&empty_rp); 1316 hash = hash_ptr(tk, KPROBE_HASH_BITS); 1317 head = &kretprobe_inst_table[hash]; 1318 kretprobe_table_lock(hash, &flags); 1319 hlist_for_each_entry_safe(ri, tmp, head, hlist) { 1320 if (ri->task == tk) 1321 recycle_rp_inst(ri, &empty_rp); 1322 } 1323 kretprobe_table_unlock(hash, &flags); 1324 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { 1325 hlist_del(&ri->hlist); 1326 kfree(ri); 1327 } 1328 1329 kprobe_busy_end(); 1330 } 1331 NOKPROBE_SYMBOL(kprobe_flush_task); 1332 1333 static inline void free_rp_inst(struct kretprobe *rp) 1334 { 1335 struct kretprobe_instance *ri; 1336 struct hlist_node *next; 1337 1338 hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) { 1339 hlist_del(&ri->hlist); 1340 kfree(ri); 1341 } 1342 } 1343 1344 static void cleanup_rp_inst(struct kretprobe *rp) 1345 { 1346 unsigned long flags, hash; 1347 struct kretprobe_instance *ri; 1348 struct hlist_node *next; 1349 struct hlist_head *head; 1350 1351 /* No race here */ 1352 for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) { 1353 kretprobe_table_lock(hash, &flags); 1354 head = &kretprobe_inst_table[hash]; 1355 hlist_for_each_entry_safe(ri, next, head, hlist) { 1356 if (ri->rp == rp) 1357 ri->rp = NULL; 1358 } 1359 kretprobe_table_unlock(hash, &flags); 1360 } 1361 free_rp_inst(rp); 1362 } 1363 NOKPROBE_SYMBOL(cleanup_rp_inst); 1364 1365 /* Add the new probe to ap->list */ 1366 static int add_new_kprobe(struct kprobe *ap, struct kprobe *p) 1367 { 1368 if (p->post_handler) 1369 unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */ 1370 1371 list_add_rcu(&p->list, &ap->list); 1372 if (p->post_handler && !ap->post_handler) 1373 ap->post_handler = aggr_post_handler; 1374 1375 return 0; 1376 } 1377 1378 /* 1379 * Fill in the required fields of the "manager kprobe". Replace the 1380 * earlier kprobe in the hlist with the manager kprobe 1381 */ 1382 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p) 1383 { 1384 /* Copy p's insn slot to ap */ 1385 copy_kprobe(p, ap); 1386 flush_insn_slot(ap); 1387 ap->addr = p->addr; 1388 ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED; 1389 ap->pre_handler = aggr_pre_handler; 1390 ap->fault_handler = aggr_fault_handler; 1391 /* We don't care the kprobe which has gone. */ 1392 if (p->post_handler && !kprobe_gone(p)) 1393 ap->post_handler = aggr_post_handler; 1394 1395 INIT_LIST_HEAD(&ap->list); 1396 INIT_HLIST_NODE(&ap->hlist); 1397 1398 list_add_rcu(&p->list, &ap->list); 1399 hlist_replace_rcu(&p->hlist, &ap->hlist); 1400 } 1401 1402 /* 1403 * This is the second or subsequent kprobe at the address - handle 1404 * the intricacies 1405 */ 1406 static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p) 1407 { 1408 int ret = 0; 1409 struct kprobe *ap = orig_p; 1410 1411 cpus_read_lock(); 1412 1413 /* For preparing optimization, jump_label_text_reserved() is called */ 1414 jump_label_lock(); 1415 mutex_lock(&text_mutex); 1416 1417 if (!kprobe_aggrprobe(orig_p)) { 1418 /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */ 1419 ap = alloc_aggr_kprobe(orig_p); 1420 if (!ap) { 1421 ret = -ENOMEM; 1422 goto out; 1423 } 1424 init_aggr_kprobe(ap, orig_p); 1425 } else if (kprobe_unused(ap)) { 1426 /* This probe is going to die. Rescue it */ 1427 ret = reuse_unused_kprobe(ap); 1428 if (ret) 1429 goto out; 1430 } 1431 1432 if (kprobe_gone(ap)) { 1433 /* 1434 * Attempting to insert new probe at the same location that 1435 * had a probe in the module vaddr area which already 1436 * freed. So, the instruction slot has already been 1437 * released. We need a new slot for the new probe. 1438 */ 1439 ret = arch_prepare_kprobe(ap); 1440 if (ret) 1441 /* 1442 * Even if fail to allocate new slot, don't need to 1443 * free aggr_probe. It will be used next time, or 1444 * freed by unregister_kprobe. 1445 */ 1446 goto out; 1447 1448 /* Prepare optimized instructions if possible. */ 1449 prepare_optimized_kprobe(ap); 1450 1451 /* 1452 * Clear gone flag to prevent allocating new slot again, and 1453 * set disabled flag because it is not armed yet. 1454 */ 1455 ap->flags = (ap->flags & ~KPROBE_FLAG_GONE) 1456 | KPROBE_FLAG_DISABLED; 1457 } 1458 1459 /* Copy ap's insn slot to p */ 1460 copy_kprobe(ap, p); 1461 ret = add_new_kprobe(ap, p); 1462 1463 out: 1464 mutex_unlock(&text_mutex); 1465 jump_label_unlock(); 1466 cpus_read_unlock(); 1467 1468 if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) { 1469 ap->flags &= ~KPROBE_FLAG_DISABLED; 1470 if (!kprobes_all_disarmed) { 1471 /* Arm the breakpoint again. */ 1472 ret = arm_kprobe(ap); 1473 if (ret) { 1474 ap->flags |= KPROBE_FLAG_DISABLED; 1475 list_del_rcu(&p->list); 1476 synchronize_rcu(); 1477 } 1478 } 1479 } 1480 return ret; 1481 } 1482 1483 bool __weak arch_within_kprobe_blacklist(unsigned long addr) 1484 { 1485 /* The __kprobes marked functions and entry code must not be probed */ 1486 return addr >= (unsigned long)__kprobes_text_start && 1487 addr < (unsigned long)__kprobes_text_end; 1488 } 1489 1490 static bool __within_kprobe_blacklist(unsigned long addr) 1491 { 1492 struct kprobe_blacklist_entry *ent; 1493 1494 if (arch_within_kprobe_blacklist(addr)) 1495 return true; 1496 /* 1497 * If there exists a kprobe_blacklist, verify and 1498 * fail any probe registration in the prohibited area 1499 */ 1500 list_for_each_entry(ent, &kprobe_blacklist, list) { 1501 if (addr >= ent->start_addr && addr < ent->end_addr) 1502 return true; 1503 } 1504 return false; 1505 } 1506 1507 bool within_kprobe_blacklist(unsigned long addr) 1508 { 1509 char symname[KSYM_NAME_LEN], *p; 1510 1511 if (__within_kprobe_blacklist(addr)) 1512 return true; 1513 1514 /* Check if the address is on a suffixed-symbol */ 1515 if (!lookup_symbol_name(addr, symname)) { 1516 p = strchr(symname, '.'); 1517 if (!p) 1518 return false; 1519 *p = '\0'; 1520 addr = (unsigned long)kprobe_lookup_name(symname, 0); 1521 if (addr) 1522 return __within_kprobe_blacklist(addr); 1523 } 1524 return false; 1525 } 1526 1527 /* 1528 * If we have a symbol_name argument, look it up and add the offset field 1529 * to it. This way, we can specify a relative address to a symbol. 1530 * This returns encoded errors if it fails to look up symbol or invalid 1531 * combination of parameters. 1532 */ 1533 static kprobe_opcode_t *_kprobe_addr(kprobe_opcode_t *addr, 1534 const char *symbol_name, unsigned int offset) 1535 { 1536 if ((symbol_name && addr) || (!symbol_name && !addr)) 1537 goto invalid; 1538 1539 if (symbol_name) { 1540 addr = kprobe_lookup_name(symbol_name, offset); 1541 if (!addr) 1542 return ERR_PTR(-ENOENT); 1543 } 1544 1545 addr = (kprobe_opcode_t *)(((char *)addr) + offset); 1546 if (addr) 1547 return addr; 1548 1549 invalid: 1550 return ERR_PTR(-EINVAL); 1551 } 1552 1553 static kprobe_opcode_t *kprobe_addr(struct kprobe *p) 1554 { 1555 return _kprobe_addr(p->addr, p->symbol_name, p->offset); 1556 } 1557 1558 /* Check passed kprobe is valid and return kprobe in kprobe_table. */ 1559 static struct kprobe *__get_valid_kprobe(struct kprobe *p) 1560 { 1561 struct kprobe *ap, *list_p; 1562 1563 lockdep_assert_held(&kprobe_mutex); 1564 1565 ap = get_kprobe(p->addr); 1566 if (unlikely(!ap)) 1567 return NULL; 1568 1569 if (p != ap) { 1570 list_for_each_entry(list_p, &ap->list, list) 1571 if (list_p == p) 1572 /* kprobe p is a valid probe */ 1573 goto valid; 1574 return NULL; 1575 } 1576 valid: 1577 return ap; 1578 } 1579 1580 /* Return error if the kprobe is being re-registered */ 1581 static inline int check_kprobe_rereg(struct kprobe *p) 1582 { 1583 int ret = 0; 1584 1585 mutex_lock(&kprobe_mutex); 1586 if (__get_valid_kprobe(p)) 1587 ret = -EINVAL; 1588 mutex_unlock(&kprobe_mutex); 1589 1590 return ret; 1591 } 1592 1593 int __weak arch_check_ftrace_location(struct kprobe *p) 1594 { 1595 unsigned long ftrace_addr; 1596 1597 ftrace_addr = ftrace_location((unsigned long)p->addr); 1598 if (ftrace_addr) { 1599 #ifdef CONFIG_KPROBES_ON_FTRACE 1600 /* Given address is not on the instruction boundary */ 1601 if ((unsigned long)p->addr != ftrace_addr) 1602 return -EILSEQ; 1603 p->flags |= KPROBE_FLAG_FTRACE; 1604 #else /* !CONFIG_KPROBES_ON_FTRACE */ 1605 return -EINVAL; 1606 #endif 1607 } 1608 return 0; 1609 } 1610 1611 static int check_kprobe_address_safe(struct kprobe *p, 1612 struct module **probed_mod) 1613 { 1614 int ret; 1615 1616 ret = arch_check_ftrace_location(p); 1617 if (ret) 1618 return ret; 1619 jump_label_lock(); 1620 preempt_disable(); 1621 1622 /* Ensure it is not in reserved area nor out of text */ 1623 if (!kernel_text_address((unsigned long) p->addr) || 1624 within_kprobe_blacklist((unsigned long) p->addr) || 1625 jump_label_text_reserved(p->addr, p->addr) || 1626 find_bug((unsigned long)p->addr)) { 1627 ret = -EINVAL; 1628 goto out; 1629 } 1630 1631 /* Check if are we probing a module */ 1632 *probed_mod = __module_text_address((unsigned long) p->addr); 1633 if (*probed_mod) { 1634 /* 1635 * We must hold a refcount of the probed module while updating 1636 * its code to prohibit unexpected unloading. 1637 */ 1638 if (unlikely(!try_module_get(*probed_mod))) { 1639 ret = -ENOENT; 1640 goto out; 1641 } 1642 1643 /* 1644 * If the module freed .init.text, we couldn't insert 1645 * kprobes in there. 1646 */ 1647 if (within_module_init((unsigned long)p->addr, *probed_mod) && 1648 (*probed_mod)->state != MODULE_STATE_COMING) { 1649 module_put(*probed_mod); 1650 *probed_mod = NULL; 1651 ret = -ENOENT; 1652 } 1653 } 1654 out: 1655 preempt_enable(); 1656 jump_label_unlock(); 1657 1658 return ret; 1659 } 1660 1661 int register_kprobe(struct kprobe *p) 1662 { 1663 int ret; 1664 struct kprobe *old_p; 1665 struct module *probed_mod; 1666 kprobe_opcode_t *addr; 1667 1668 /* Adjust probe address from symbol */ 1669 addr = kprobe_addr(p); 1670 if (IS_ERR(addr)) 1671 return PTR_ERR(addr); 1672 p->addr = addr; 1673 1674 ret = check_kprobe_rereg(p); 1675 if (ret) 1676 return ret; 1677 1678 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ 1679 p->flags &= KPROBE_FLAG_DISABLED; 1680 p->nmissed = 0; 1681 INIT_LIST_HEAD(&p->list); 1682 1683 ret = check_kprobe_address_safe(p, &probed_mod); 1684 if (ret) 1685 return ret; 1686 1687 mutex_lock(&kprobe_mutex); 1688 1689 old_p = get_kprobe(p->addr); 1690 if (old_p) { 1691 /* Since this may unoptimize old_p, locking text_mutex. */ 1692 ret = register_aggr_kprobe(old_p, p); 1693 goto out; 1694 } 1695 1696 cpus_read_lock(); 1697 /* Prevent text modification */ 1698 mutex_lock(&text_mutex); 1699 ret = prepare_kprobe(p); 1700 mutex_unlock(&text_mutex); 1701 cpus_read_unlock(); 1702 if (ret) 1703 goto out; 1704 1705 INIT_HLIST_NODE(&p->hlist); 1706 hlist_add_head_rcu(&p->hlist, 1707 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); 1708 1709 if (!kprobes_all_disarmed && !kprobe_disabled(p)) { 1710 ret = arm_kprobe(p); 1711 if (ret) { 1712 hlist_del_rcu(&p->hlist); 1713 synchronize_rcu(); 1714 goto out; 1715 } 1716 } 1717 1718 /* Try to optimize kprobe */ 1719 try_to_optimize_kprobe(p); 1720 out: 1721 mutex_unlock(&kprobe_mutex); 1722 1723 if (probed_mod) 1724 module_put(probed_mod); 1725 1726 return ret; 1727 } 1728 EXPORT_SYMBOL_GPL(register_kprobe); 1729 1730 /* Check if all probes on the aggrprobe are disabled */ 1731 static int aggr_kprobe_disabled(struct kprobe *ap) 1732 { 1733 struct kprobe *kp; 1734 1735 lockdep_assert_held(&kprobe_mutex); 1736 1737 list_for_each_entry(kp, &ap->list, list) 1738 if (!kprobe_disabled(kp)) 1739 /* 1740 * There is an active probe on the list. 1741 * We can't disable this ap. 1742 */ 1743 return 0; 1744 1745 return 1; 1746 } 1747 1748 /* Disable one kprobe: Make sure called under kprobe_mutex is locked */ 1749 static struct kprobe *__disable_kprobe(struct kprobe *p) 1750 { 1751 struct kprobe *orig_p; 1752 int ret; 1753 1754 /* Get an original kprobe for return */ 1755 orig_p = __get_valid_kprobe(p); 1756 if (unlikely(orig_p == NULL)) 1757 return ERR_PTR(-EINVAL); 1758 1759 if (!kprobe_disabled(p)) { 1760 /* Disable probe if it is a child probe */ 1761 if (p != orig_p) 1762 p->flags |= KPROBE_FLAG_DISABLED; 1763 1764 /* Try to disarm and disable this/parent probe */ 1765 if (p == orig_p || aggr_kprobe_disabled(orig_p)) { 1766 /* 1767 * If kprobes_all_disarmed is set, orig_p 1768 * should have already been disarmed, so 1769 * skip unneed disarming process. 1770 */ 1771 if (!kprobes_all_disarmed) { 1772 ret = disarm_kprobe(orig_p, true); 1773 if (ret) { 1774 p->flags &= ~KPROBE_FLAG_DISABLED; 1775 return ERR_PTR(ret); 1776 } 1777 } 1778 orig_p->flags |= KPROBE_FLAG_DISABLED; 1779 } 1780 } 1781 1782 return orig_p; 1783 } 1784 1785 /* 1786 * Unregister a kprobe without a scheduler synchronization. 1787 */ 1788 static int __unregister_kprobe_top(struct kprobe *p) 1789 { 1790 struct kprobe *ap, *list_p; 1791 1792 /* Disable kprobe. This will disarm it if needed. */ 1793 ap = __disable_kprobe(p); 1794 if (IS_ERR(ap)) 1795 return PTR_ERR(ap); 1796 1797 if (ap == p) 1798 /* 1799 * This probe is an independent(and non-optimized) kprobe 1800 * (not an aggrprobe). Remove from the hash list. 1801 */ 1802 goto disarmed; 1803 1804 /* Following process expects this probe is an aggrprobe */ 1805 WARN_ON(!kprobe_aggrprobe(ap)); 1806 1807 if (list_is_singular(&ap->list) && kprobe_disarmed(ap)) 1808 /* 1809 * !disarmed could be happen if the probe is under delayed 1810 * unoptimizing. 1811 */ 1812 goto disarmed; 1813 else { 1814 /* If disabling probe has special handlers, update aggrprobe */ 1815 if (p->post_handler && !kprobe_gone(p)) { 1816 list_for_each_entry(list_p, &ap->list, list) { 1817 if ((list_p != p) && (list_p->post_handler)) 1818 goto noclean; 1819 } 1820 ap->post_handler = NULL; 1821 } 1822 noclean: 1823 /* 1824 * Remove from the aggrprobe: this path will do nothing in 1825 * __unregister_kprobe_bottom(). 1826 */ 1827 list_del_rcu(&p->list); 1828 if (!kprobe_disabled(ap) && !kprobes_all_disarmed) 1829 /* 1830 * Try to optimize this probe again, because post 1831 * handler may have been changed. 1832 */ 1833 optimize_kprobe(ap); 1834 } 1835 return 0; 1836 1837 disarmed: 1838 hlist_del_rcu(&ap->hlist); 1839 return 0; 1840 } 1841 1842 static void __unregister_kprobe_bottom(struct kprobe *p) 1843 { 1844 struct kprobe *ap; 1845 1846 if (list_empty(&p->list)) 1847 /* This is an independent kprobe */ 1848 arch_remove_kprobe(p); 1849 else if (list_is_singular(&p->list)) { 1850 /* This is the last child of an aggrprobe */ 1851 ap = list_entry(p->list.next, struct kprobe, list); 1852 list_del(&p->list); 1853 free_aggr_kprobe(ap); 1854 } 1855 /* Otherwise, do nothing. */ 1856 } 1857 1858 int register_kprobes(struct kprobe **kps, int num) 1859 { 1860 int i, ret = 0; 1861 1862 if (num <= 0) 1863 return -EINVAL; 1864 for (i = 0; i < num; i++) { 1865 ret = register_kprobe(kps[i]); 1866 if (ret < 0) { 1867 if (i > 0) 1868 unregister_kprobes(kps, i); 1869 break; 1870 } 1871 } 1872 return ret; 1873 } 1874 EXPORT_SYMBOL_GPL(register_kprobes); 1875 1876 void unregister_kprobe(struct kprobe *p) 1877 { 1878 unregister_kprobes(&p, 1); 1879 } 1880 EXPORT_SYMBOL_GPL(unregister_kprobe); 1881 1882 void unregister_kprobes(struct kprobe **kps, int num) 1883 { 1884 int i; 1885 1886 if (num <= 0) 1887 return; 1888 mutex_lock(&kprobe_mutex); 1889 for (i = 0; i < num; i++) 1890 if (__unregister_kprobe_top(kps[i]) < 0) 1891 kps[i]->addr = NULL; 1892 mutex_unlock(&kprobe_mutex); 1893 1894 synchronize_rcu(); 1895 for (i = 0; i < num; i++) 1896 if (kps[i]->addr) 1897 __unregister_kprobe_bottom(kps[i]); 1898 } 1899 EXPORT_SYMBOL_GPL(unregister_kprobes); 1900 1901 int __weak kprobe_exceptions_notify(struct notifier_block *self, 1902 unsigned long val, void *data) 1903 { 1904 return NOTIFY_DONE; 1905 } 1906 NOKPROBE_SYMBOL(kprobe_exceptions_notify); 1907 1908 static struct notifier_block kprobe_exceptions_nb = { 1909 .notifier_call = kprobe_exceptions_notify, 1910 .priority = 0x7fffffff /* we need to be notified first */ 1911 }; 1912 1913 unsigned long __weak arch_deref_entry_point(void *entry) 1914 { 1915 return (unsigned long)entry; 1916 } 1917 1918 #ifdef CONFIG_KRETPROBES 1919 /* 1920 * This kprobe pre_handler is registered with every kretprobe. When probe 1921 * hits it will set up the return probe. 1922 */ 1923 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) 1924 { 1925 struct kretprobe *rp = container_of(p, struct kretprobe, kp); 1926 unsigned long hash, flags = 0; 1927 struct kretprobe_instance *ri; 1928 1929 /* 1930 * To avoid deadlocks, prohibit return probing in NMI contexts, 1931 * just skip the probe and increase the (inexact) 'nmissed' 1932 * statistical counter, so that the user is informed that 1933 * something happened: 1934 */ 1935 if (unlikely(in_nmi())) { 1936 rp->nmissed++; 1937 return 0; 1938 } 1939 1940 /* TODO: consider to only swap the RA after the last pre_handler fired */ 1941 hash = hash_ptr(current, KPROBE_HASH_BITS); 1942 raw_spin_lock_irqsave(&rp->lock, flags); 1943 if (!hlist_empty(&rp->free_instances)) { 1944 ri = hlist_entry(rp->free_instances.first, 1945 struct kretprobe_instance, hlist); 1946 hlist_del(&ri->hlist); 1947 raw_spin_unlock_irqrestore(&rp->lock, flags); 1948 1949 ri->rp = rp; 1950 ri->task = current; 1951 1952 if (rp->entry_handler && rp->entry_handler(ri, regs)) { 1953 raw_spin_lock_irqsave(&rp->lock, flags); 1954 hlist_add_head(&ri->hlist, &rp->free_instances); 1955 raw_spin_unlock_irqrestore(&rp->lock, flags); 1956 return 0; 1957 } 1958 1959 arch_prepare_kretprobe(ri, regs); 1960 1961 /* XXX(hch): why is there no hlist_move_head? */ 1962 INIT_HLIST_NODE(&ri->hlist); 1963 kretprobe_table_lock(hash, &flags); 1964 hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]); 1965 kretprobe_table_unlock(hash, &flags); 1966 } else { 1967 rp->nmissed++; 1968 raw_spin_unlock_irqrestore(&rp->lock, flags); 1969 } 1970 return 0; 1971 } 1972 NOKPROBE_SYMBOL(pre_handler_kretprobe); 1973 1974 bool __weak arch_kprobe_on_func_entry(unsigned long offset) 1975 { 1976 return !offset; 1977 } 1978 1979 bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset) 1980 { 1981 kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset); 1982 1983 if (IS_ERR(kp_addr)) 1984 return false; 1985 1986 if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset) || 1987 !arch_kprobe_on_func_entry(offset)) 1988 return false; 1989 1990 return true; 1991 } 1992 1993 int register_kretprobe(struct kretprobe *rp) 1994 { 1995 int ret = 0; 1996 struct kretprobe_instance *inst; 1997 int i; 1998 void *addr; 1999 2000 if (!kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset)) 2001 return -EINVAL; 2002 2003 if (kretprobe_blacklist_size) { 2004 addr = kprobe_addr(&rp->kp); 2005 if (IS_ERR(addr)) 2006 return PTR_ERR(addr); 2007 2008 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { 2009 if (kretprobe_blacklist[i].addr == addr) 2010 return -EINVAL; 2011 } 2012 } 2013 2014 rp->kp.pre_handler = pre_handler_kretprobe; 2015 rp->kp.post_handler = NULL; 2016 rp->kp.fault_handler = NULL; 2017 2018 /* Pre-allocate memory for max kretprobe instances */ 2019 if (rp->maxactive <= 0) { 2020 #ifdef CONFIG_PREEMPTION 2021 rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus()); 2022 #else 2023 rp->maxactive = num_possible_cpus(); 2024 #endif 2025 } 2026 raw_spin_lock_init(&rp->lock); 2027 INIT_HLIST_HEAD(&rp->free_instances); 2028 for (i = 0; i < rp->maxactive; i++) { 2029 inst = kmalloc(sizeof(struct kretprobe_instance) + 2030 rp->data_size, GFP_KERNEL); 2031 if (inst == NULL) { 2032 free_rp_inst(rp); 2033 return -ENOMEM; 2034 } 2035 INIT_HLIST_NODE(&inst->hlist); 2036 hlist_add_head(&inst->hlist, &rp->free_instances); 2037 } 2038 2039 rp->nmissed = 0; 2040 /* Establish function entry probe point */ 2041 ret = register_kprobe(&rp->kp); 2042 if (ret != 0) 2043 free_rp_inst(rp); 2044 return ret; 2045 } 2046 EXPORT_SYMBOL_GPL(register_kretprobe); 2047 2048 int register_kretprobes(struct kretprobe **rps, int num) 2049 { 2050 int ret = 0, i; 2051 2052 if (num <= 0) 2053 return -EINVAL; 2054 for (i = 0; i < num; i++) { 2055 ret = register_kretprobe(rps[i]); 2056 if (ret < 0) { 2057 if (i > 0) 2058 unregister_kretprobes(rps, i); 2059 break; 2060 } 2061 } 2062 return ret; 2063 } 2064 EXPORT_SYMBOL_GPL(register_kretprobes); 2065 2066 void unregister_kretprobe(struct kretprobe *rp) 2067 { 2068 unregister_kretprobes(&rp, 1); 2069 } 2070 EXPORT_SYMBOL_GPL(unregister_kretprobe); 2071 2072 void unregister_kretprobes(struct kretprobe **rps, int num) 2073 { 2074 int i; 2075 2076 if (num <= 0) 2077 return; 2078 mutex_lock(&kprobe_mutex); 2079 for (i = 0; i < num; i++) 2080 if (__unregister_kprobe_top(&rps[i]->kp) < 0) 2081 rps[i]->kp.addr = NULL; 2082 mutex_unlock(&kprobe_mutex); 2083 2084 synchronize_rcu(); 2085 for (i = 0; i < num; i++) { 2086 if (rps[i]->kp.addr) { 2087 __unregister_kprobe_bottom(&rps[i]->kp); 2088 cleanup_rp_inst(rps[i]); 2089 } 2090 } 2091 } 2092 EXPORT_SYMBOL_GPL(unregister_kretprobes); 2093 2094 #else /* CONFIG_KRETPROBES */ 2095 int register_kretprobe(struct kretprobe *rp) 2096 { 2097 return -ENOSYS; 2098 } 2099 EXPORT_SYMBOL_GPL(register_kretprobe); 2100 2101 int register_kretprobes(struct kretprobe **rps, int num) 2102 { 2103 return -ENOSYS; 2104 } 2105 EXPORT_SYMBOL_GPL(register_kretprobes); 2106 2107 void unregister_kretprobe(struct kretprobe *rp) 2108 { 2109 } 2110 EXPORT_SYMBOL_GPL(unregister_kretprobe); 2111 2112 void unregister_kretprobes(struct kretprobe **rps, int num) 2113 { 2114 } 2115 EXPORT_SYMBOL_GPL(unregister_kretprobes); 2116 2117 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) 2118 { 2119 return 0; 2120 } 2121 NOKPROBE_SYMBOL(pre_handler_kretprobe); 2122 2123 #endif /* CONFIG_KRETPROBES */ 2124 2125 /* Set the kprobe gone and remove its instruction buffer. */ 2126 static void kill_kprobe(struct kprobe *p) 2127 { 2128 struct kprobe *kp; 2129 2130 lockdep_assert_held(&kprobe_mutex); 2131 2132 p->flags |= KPROBE_FLAG_GONE; 2133 if (kprobe_aggrprobe(p)) { 2134 /* 2135 * If this is an aggr_kprobe, we have to list all the 2136 * chained probes and mark them GONE. 2137 */ 2138 list_for_each_entry(kp, &p->list, list) 2139 kp->flags |= KPROBE_FLAG_GONE; 2140 p->post_handler = NULL; 2141 kill_optimized_kprobe(p); 2142 } 2143 /* 2144 * Here, we can remove insn_slot safely, because no thread calls 2145 * the original probed function (which will be freed soon) any more. 2146 */ 2147 arch_remove_kprobe(p); 2148 } 2149 2150 /* Disable one kprobe */ 2151 int disable_kprobe(struct kprobe *kp) 2152 { 2153 int ret = 0; 2154 struct kprobe *p; 2155 2156 mutex_lock(&kprobe_mutex); 2157 2158 /* Disable this kprobe */ 2159 p = __disable_kprobe(kp); 2160 if (IS_ERR(p)) 2161 ret = PTR_ERR(p); 2162 2163 mutex_unlock(&kprobe_mutex); 2164 return ret; 2165 } 2166 EXPORT_SYMBOL_GPL(disable_kprobe); 2167 2168 /* Enable one kprobe */ 2169 int enable_kprobe(struct kprobe *kp) 2170 { 2171 int ret = 0; 2172 struct kprobe *p; 2173 2174 mutex_lock(&kprobe_mutex); 2175 2176 /* Check whether specified probe is valid. */ 2177 p = __get_valid_kprobe(kp); 2178 if (unlikely(p == NULL)) { 2179 ret = -EINVAL; 2180 goto out; 2181 } 2182 2183 if (kprobe_gone(kp)) { 2184 /* This kprobe has gone, we couldn't enable it. */ 2185 ret = -EINVAL; 2186 goto out; 2187 } 2188 2189 if (p != kp) 2190 kp->flags &= ~KPROBE_FLAG_DISABLED; 2191 2192 if (!kprobes_all_disarmed && kprobe_disabled(p)) { 2193 p->flags &= ~KPROBE_FLAG_DISABLED; 2194 ret = arm_kprobe(p); 2195 if (ret) 2196 p->flags |= KPROBE_FLAG_DISABLED; 2197 } 2198 out: 2199 mutex_unlock(&kprobe_mutex); 2200 return ret; 2201 } 2202 EXPORT_SYMBOL_GPL(enable_kprobe); 2203 2204 /* Caller must NOT call this in usual path. This is only for critical case */ 2205 void dump_kprobe(struct kprobe *kp) 2206 { 2207 pr_err("Dumping kprobe:\n"); 2208 pr_err("Name: %s\nOffset: %x\nAddress: %pS\n", 2209 kp->symbol_name, kp->offset, kp->addr); 2210 } 2211 NOKPROBE_SYMBOL(dump_kprobe); 2212 2213 int kprobe_add_ksym_blacklist(unsigned long entry) 2214 { 2215 struct kprobe_blacklist_entry *ent; 2216 unsigned long offset = 0, size = 0; 2217 2218 if (!kernel_text_address(entry) || 2219 !kallsyms_lookup_size_offset(entry, &size, &offset)) 2220 return -EINVAL; 2221 2222 ent = kmalloc(sizeof(*ent), GFP_KERNEL); 2223 if (!ent) 2224 return -ENOMEM; 2225 ent->start_addr = entry; 2226 ent->end_addr = entry + size; 2227 INIT_LIST_HEAD(&ent->list); 2228 list_add_tail(&ent->list, &kprobe_blacklist); 2229 2230 return (int)size; 2231 } 2232 2233 /* Add all symbols in given area into kprobe blacklist */ 2234 int kprobe_add_area_blacklist(unsigned long start, unsigned long end) 2235 { 2236 unsigned long entry; 2237 int ret = 0; 2238 2239 for (entry = start; entry < end; entry += ret) { 2240 ret = kprobe_add_ksym_blacklist(entry); 2241 if (ret < 0) 2242 return ret; 2243 if (ret == 0) /* In case of alias symbol */ 2244 ret = 1; 2245 } 2246 return 0; 2247 } 2248 2249 /* Remove all symbols in given area from kprobe blacklist */ 2250 static void kprobe_remove_area_blacklist(unsigned long start, unsigned long end) 2251 { 2252 struct kprobe_blacklist_entry *ent, *n; 2253 2254 list_for_each_entry_safe(ent, n, &kprobe_blacklist, list) { 2255 if (ent->start_addr < start || ent->start_addr >= end) 2256 continue; 2257 list_del(&ent->list); 2258 kfree(ent); 2259 } 2260 } 2261 2262 static void kprobe_remove_ksym_blacklist(unsigned long entry) 2263 { 2264 kprobe_remove_area_blacklist(entry, entry + 1); 2265 } 2266 2267 int __weak arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value, 2268 char *type, char *sym) 2269 { 2270 return -ERANGE; 2271 } 2272 2273 int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type, 2274 char *sym) 2275 { 2276 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT 2277 if (!kprobe_cache_get_kallsym(&kprobe_insn_slots, &symnum, value, type, sym)) 2278 return 0; 2279 #ifdef CONFIG_OPTPROBES 2280 if (!kprobe_cache_get_kallsym(&kprobe_optinsn_slots, &symnum, value, type, sym)) 2281 return 0; 2282 #endif 2283 #endif 2284 if (!arch_kprobe_get_kallsym(&symnum, value, type, sym)) 2285 return 0; 2286 return -ERANGE; 2287 } 2288 2289 int __init __weak arch_populate_kprobe_blacklist(void) 2290 { 2291 return 0; 2292 } 2293 2294 /* 2295 * Lookup and populate the kprobe_blacklist. 2296 * 2297 * Unlike the kretprobe blacklist, we'll need to determine 2298 * the range of addresses that belong to the said functions, 2299 * since a kprobe need not necessarily be at the beginning 2300 * of a function. 2301 */ 2302 static int __init populate_kprobe_blacklist(unsigned long *start, 2303 unsigned long *end) 2304 { 2305 unsigned long entry; 2306 unsigned long *iter; 2307 int ret; 2308 2309 for (iter = start; iter < end; iter++) { 2310 entry = arch_deref_entry_point((void *)*iter); 2311 ret = kprobe_add_ksym_blacklist(entry); 2312 if (ret == -EINVAL) 2313 continue; 2314 if (ret < 0) 2315 return ret; 2316 } 2317 2318 /* Symbols in __kprobes_text are blacklisted */ 2319 ret = kprobe_add_area_blacklist((unsigned long)__kprobes_text_start, 2320 (unsigned long)__kprobes_text_end); 2321 if (ret) 2322 return ret; 2323 2324 /* Symbols in noinstr section are blacklisted */ 2325 ret = kprobe_add_area_blacklist((unsigned long)__noinstr_text_start, 2326 (unsigned long)__noinstr_text_end); 2327 2328 return ret ? : arch_populate_kprobe_blacklist(); 2329 } 2330 2331 static void add_module_kprobe_blacklist(struct module *mod) 2332 { 2333 unsigned long start, end; 2334 int i; 2335 2336 if (mod->kprobe_blacklist) { 2337 for (i = 0; i < mod->num_kprobe_blacklist; i++) 2338 kprobe_add_ksym_blacklist(mod->kprobe_blacklist[i]); 2339 } 2340 2341 start = (unsigned long)mod->kprobes_text_start; 2342 if (start) { 2343 end = start + mod->kprobes_text_size; 2344 kprobe_add_area_blacklist(start, end); 2345 } 2346 2347 start = (unsigned long)mod->noinstr_text_start; 2348 if (start) { 2349 end = start + mod->noinstr_text_size; 2350 kprobe_add_area_blacklist(start, end); 2351 } 2352 } 2353 2354 static void remove_module_kprobe_blacklist(struct module *mod) 2355 { 2356 unsigned long start, end; 2357 int i; 2358 2359 if (mod->kprobe_blacklist) { 2360 for (i = 0; i < mod->num_kprobe_blacklist; i++) 2361 kprobe_remove_ksym_blacklist(mod->kprobe_blacklist[i]); 2362 } 2363 2364 start = (unsigned long)mod->kprobes_text_start; 2365 if (start) { 2366 end = start + mod->kprobes_text_size; 2367 kprobe_remove_area_blacklist(start, end); 2368 } 2369 2370 start = (unsigned long)mod->noinstr_text_start; 2371 if (start) { 2372 end = start + mod->noinstr_text_size; 2373 kprobe_remove_area_blacklist(start, end); 2374 } 2375 } 2376 2377 /* Module notifier call back, checking kprobes on the module */ 2378 static int kprobes_module_callback(struct notifier_block *nb, 2379 unsigned long val, void *data) 2380 { 2381 struct module *mod = data; 2382 struct hlist_head *head; 2383 struct kprobe *p; 2384 unsigned int i; 2385 int checkcore = (val == MODULE_STATE_GOING); 2386 2387 if (val == MODULE_STATE_COMING) { 2388 mutex_lock(&kprobe_mutex); 2389 add_module_kprobe_blacklist(mod); 2390 mutex_unlock(&kprobe_mutex); 2391 } 2392 if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE) 2393 return NOTIFY_DONE; 2394 2395 /* 2396 * When MODULE_STATE_GOING was notified, both of module .text and 2397 * .init.text sections would be freed. When MODULE_STATE_LIVE was 2398 * notified, only .init.text section would be freed. We need to 2399 * disable kprobes which have been inserted in the sections. 2400 */ 2401 mutex_lock(&kprobe_mutex); 2402 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2403 head = &kprobe_table[i]; 2404 hlist_for_each_entry(p, head, hlist) 2405 if (within_module_init((unsigned long)p->addr, mod) || 2406 (checkcore && 2407 within_module_core((unsigned long)p->addr, mod))) { 2408 /* 2409 * The vaddr this probe is installed will soon 2410 * be vfreed buy not synced to disk. Hence, 2411 * disarming the breakpoint isn't needed. 2412 * 2413 * Note, this will also move any optimized probes 2414 * that are pending to be removed from their 2415 * corresponding lists to the freeing_list and 2416 * will not be touched by the delayed 2417 * kprobe_optimizer work handler. 2418 */ 2419 kill_kprobe(p); 2420 } 2421 } 2422 if (val == MODULE_STATE_GOING) 2423 remove_module_kprobe_blacklist(mod); 2424 mutex_unlock(&kprobe_mutex); 2425 return NOTIFY_DONE; 2426 } 2427 2428 static struct notifier_block kprobe_module_nb = { 2429 .notifier_call = kprobes_module_callback, 2430 .priority = 0 2431 }; 2432 2433 /* Markers of _kprobe_blacklist section */ 2434 extern unsigned long __start_kprobe_blacklist[]; 2435 extern unsigned long __stop_kprobe_blacklist[]; 2436 2437 static int __init init_kprobes(void) 2438 { 2439 int i, err = 0; 2440 2441 /* FIXME allocate the probe table, currently defined statically */ 2442 /* initialize all list heads */ 2443 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2444 INIT_HLIST_HEAD(&kprobe_table[i]); 2445 INIT_HLIST_HEAD(&kretprobe_inst_table[i]); 2446 raw_spin_lock_init(&(kretprobe_table_locks[i].lock)); 2447 } 2448 2449 err = populate_kprobe_blacklist(__start_kprobe_blacklist, 2450 __stop_kprobe_blacklist); 2451 if (err) { 2452 pr_err("kprobes: failed to populate blacklist: %d\n", err); 2453 pr_err("Please take care of using kprobes.\n"); 2454 } 2455 2456 if (kretprobe_blacklist_size) { 2457 /* lookup the function address from its name */ 2458 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { 2459 kretprobe_blacklist[i].addr = 2460 kprobe_lookup_name(kretprobe_blacklist[i].name, 0); 2461 if (!kretprobe_blacklist[i].addr) 2462 printk("kretprobe: lookup failed: %s\n", 2463 kretprobe_blacklist[i].name); 2464 } 2465 } 2466 2467 #if defined(CONFIG_OPTPROBES) 2468 #if defined(__ARCH_WANT_KPROBES_INSN_SLOT) 2469 /* Init kprobe_optinsn_slots */ 2470 kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE; 2471 #endif 2472 /* By default, kprobes can be optimized */ 2473 kprobes_allow_optimization = true; 2474 #endif 2475 2476 /* By default, kprobes are armed */ 2477 kprobes_all_disarmed = false; 2478 2479 err = arch_init_kprobes(); 2480 if (!err) 2481 err = register_die_notifier(&kprobe_exceptions_nb); 2482 if (!err) 2483 err = register_module_notifier(&kprobe_module_nb); 2484 2485 kprobes_initialized = (err == 0); 2486 2487 if (!err) 2488 init_test_probes(); 2489 return err; 2490 } 2491 subsys_initcall(init_kprobes); 2492 2493 #ifdef CONFIG_DEBUG_FS 2494 static void report_probe(struct seq_file *pi, struct kprobe *p, 2495 const char *sym, int offset, char *modname, struct kprobe *pp) 2496 { 2497 char *kprobe_type; 2498 void *addr = p->addr; 2499 2500 if (p->pre_handler == pre_handler_kretprobe) 2501 kprobe_type = "r"; 2502 else 2503 kprobe_type = "k"; 2504 2505 if (!kallsyms_show_value(pi->file->f_cred)) 2506 addr = NULL; 2507 2508 if (sym) 2509 seq_printf(pi, "%px %s %s+0x%x %s ", 2510 addr, kprobe_type, sym, offset, 2511 (modname ? modname : " ")); 2512 else /* try to use %pS */ 2513 seq_printf(pi, "%px %s %pS ", 2514 addr, kprobe_type, p->addr); 2515 2516 if (!pp) 2517 pp = p; 2518 seq_printf(pi, "%s%s%s%s\n", 2519 (kprobe_gone(p) ? "[GONE]" : ""), 2520 ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""), 2521 (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""), 2522 (kprobe_ftrace(pp) ? "[FTRACE]" : "")); 2523 } 2524 2525 static void *kprobe_seq_start(struct seq_file *f, loff_t *pos) 2526 { 2527 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL; 2528 } 2529 2530 static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos) 2531 { 2532 (*pos)++; 2533 if (*pos >= KPROBE_TABLE_SIZE) 2534 return NULL; 2535 return pos; 2536 } 2537 2538 static void kprobe_seq_stop(struct seq_file *f, void *v) 2539 { 2540 /* Nothing to do */ 2541 } 2542 2543 static int show_kprobe_addr(struct seq_file *pi, void *v) 2544 { 2545 struct hlist_head *head; 2546 struct kprobe *p, *kp; 2547 const char *sym = NULL; 2548 unsigned int i = *(loff_t *) v; 2549 unsigned long offset = 0; 2550 char *modname, namebuf[KSYM_NAME_LEN]; 2551 2552 head = &kprobe_table[i]; 2553 preempt_disable(); 2554 hlist_for_each_entry_rcu(p, head, hlist) { 2555 sym = kallsyms_lookup((unsigned long)p->addr, NULL, 2556 &offset, &modname, namebuf); 2557 if (kprobe_aggrprobe(p)) { 2558 list_for_each_entry_rcu(kp, &p->list, list) 2559 report_probe(pi, kp, sym, offset, modname, p); 2560 } else 2561 report_probe(pi, p, sym, offset, modname, NULL); 2562 } 2563 preempt_enable(); 2564 return 0; 2565 } 2566 2567 static const struct seq_operations kprobes_sops = { 2568 .start = kprobe_seq_start, 2569 .next = kprobe_seq_next, 2570 .stop = kprobe_seq_stop, 2571 .show = show_kprobe_addr 2572 }; 2573 2574 DEFINE_SEQ_ATTRIBUTE(kprobes); 2575 2576 /* kprobes/blacklist -- shows which functions can not be probed */ 2577 static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos) 2578 { 2579 mutex_lock(&kprobe_mutex); 2580 return seq_list_start(&kprobe_blacklist, *pos); 2581 } 2582 2583 static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos) 2584 { 2585 return seq_list_next(v, &kprobe_blacklist, pos); 2586 } 2587 2588 static int kprobe_blacklist_seq_show(struct seq_file *m, void *v) 2589 { 2590 struct kprobe_blacklist_entry *ent = 2591 list_entry(v, struct kprobe_blacklist_entry, list); 2592 2593 /* 2594 * If /proc/kallsyms is not showing kernel address, we won't 2595 * show them here either. 2596 */ 2597 if (!kallsyms_show_value(m->file->f_cred)) 2598 seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL, 2599 (void *)ent->start_addr); 2600 else 2601 seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr, 2602 (void *)ent->end_addr, (void *)ent->start_addr); 2603 return 0; 2604 } 2605 2606 static void kprobe_blacklist_seq_stop(struct seq_file *f, void *v) 2607 { 2608 mutex_unlock(&kprobe_mutex); 2609 } 2610 2611 static const struct seq_operations kprobe_blacklist_sops = { 2612 .start = kprobe_blacklist_seq_start, 2613 .next = kprobe_blacklist_seq_next, 2614 .stop = kprobe_blacklist_seq_stop, 2615 .show = kprobe_blacklist_seq_show, 2616 }; 2617 DEFINE_SEQ_ATTRIBUTE(kprobe_blacklist); 2618 2619 static int arm_all_kprobes(void) 2620 { 2621 struct hlist_head *head; 2622 struct kprobe *p; 2623 unsigned int i, total = 0, errors = 0; 2624 int err, ret = 0; 2625 2626 mutex_lock(&kprobe_mutex); 2627 2628 /* If kprobes are armed, just return */ 2629 if (!kprobes_all_disarmed) 2630 goto already_enabled; 2631 2632 /* 2633 * optimize_kprobe() called by arm_kprobe() checks 2634 * kprobes_all_disarmed, so set kprobes_all_disarmed before 2635 * arm_kprobe. 2636 */ 2637 kprobes_all_disarmed = false; 2638 /* Arming kprobes doesn't optimize kprobe itself */ 2639 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2640 head = &kprobe_table[i]; 2641 /* Arm all kprobes on a best-effort basis */ 2642 hlist_for_each_entry(p, head, hlist) { 2643 if (!kprobe_disabled(p)) { 2644 err = arm_kprobe(p); 2645 if (err) { 2646 errors++; 2647 ret = err; 2648 } 2649 total++; 2650 } 2651 } 2652 } 2653 2654 if (errors) 2655 pr_warn("Kprobes globally enabled, but failed to arm %d out of %d probes\n", 2656 errors, total); 2657 else 2658 pr_info("Kprobes globally enabled\n"); 2659 2660 already_enabled: 2661 mutex_unlock(&kprobe_mutex); 2662 return ret; 2663 } 2664 2665 static int disarm_all_kprobes(void) 2666 { 2667 struct hlist_head *head; 2668 struct kprobe *p; 2669 unsigned int i, total = 0, errors = 0; 2670 int err, ret = 0; 2671 2672 mutex_lock(&kprobe_mutex); 2673 2674 /* If kprobes are already disarmed, just return */ 2675 if (kprobes_all_disarmed) { 2676 mutex_unlock(&kprobe_mutex); 2677 return 0; 2678 } 2679 2680 kprobes_all_disarmed = true; 2681 2682 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2683 head = &kprobe_table[i]; 2684 /* Disarm all kprobes on a best-effort basis */ 2685 hlist_for_each_entry(p, head, hlist) { 2686 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) { 2687 err = disarm_kprobe(p, false); 2688 if (err) { 2689 errors++; 2690 ret = err; 2691 } 2692 total++; 2693 } 2694 } 2695 } 2696 2697 if (errors) 2698 pr_warn("Kprobes globally disabled, but failed to disarm %d out of %d probes\n", 2699 errors, total); 2700 else 2701 pr_info("Kprobes globally disabled\n"); 2702 2703 mutex_unlock(&kprobe_mutex); 2704 2705 /* Wait for disarming all kprobes by optimizer */ 2706 wait_for_kprobe_optimizer(); 2707 2708 return ret; 2709 } 2710 2711 /* 2712 * XXX: The debugfs bool file interface doesn't allow for callbacks 2713 * when the bool state is switched. We can reuse that facility when 2714 * available 2715 */ 2716 static ssize_t read_enabled_file_bool(struct file *file, 2717 char __user *user_buf, size_t count, loff_t *ppos) 2718 { 2719 char buf[3]; 2720 2721 if (!kprobes_all_disarmed) 2722 buf[0] = '1'; 2723 else 2724 buf[0] = '0'; 2725 buf[1] = '\n'; 2726 buf[2] = 0x00; 2727 return simple_read_from_buffer(user_buf, count, ppos, buf, 2); 2728 } 2729 2730 static ssize_t write_enabled_file_bool(struct file *file, 2731 const char __user *user_buf, size_t count, loff_t *ppos) 2732 { 2733 char buf[32]; 2734 size_t buf_size; 2735 int ret = 0; 2736 2737 buf_size = min(count, (sizeof(buf)-1)); 2738 if (copy_from_user(buf, user_buf, buf_size)) 2739 return -EFAULT; 2740 2741 buf[buf_size] = '\0'; 2742 switch (buf[0]) { 2743 case 'y': 2744 case 'Y': 2745 case '1': 2746 ret = arm_all_kprobes(); 2747 break; 2748 case 'n': 2749 case 'N': 2750 case '0': 2751 ret = disarm_all_kprobes(); 2752 break; 2753 default: 2754 return -EINVAL; 2755 } 2756 2757 if (ret) 2758 return ret; 2759 2760 return count; 2761 } 2762 2763 static const struct file_operations fops_kp = { 2764 .read = read_enabled_file_bool, 2765 .write = write_enabled_file_bool, 2766 .llseek = default_llseek, 2767 }; 2768 2769 static int __init debugfs_kprobe_init(void) 2770 { 2771 struct dentry *dir; 2772 unsigned int value = 1; 2773 2774 dir = debugfs_create_dir("kprobes", NULL); 2775 2776 debugfs_create_file("list", 0400, dir, NULL, &kprobes_fops); 2777 2778 debugfs_create_file("enabled", 0600, dir, &value, &fops_kp); 2779 2780 debugfs_create_file("blacklist", 0400, dir, NULL, 2781 &kprobe_blacklist_fops); 2782 2783 return 0; 2784 } 2785 2786 late_initcall(debugfs_kprobe_init); 2787 #endif /* CONFIG_DEBUG_FS */ 2788