1 /* 2 * Kernel Probes (KProbes) 3 * kernel/kprobes.c 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 * 19 * Copyright (C) IBM Corporation, 2002, 2004 20 * 21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel 22 * Probes initial implementation (includes suggestions from 23 * Rusty Russell). 24 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with 25 * hlists and exceptions notifier as suggested by Andi Kleen. 26 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes 27 * interface to access function arguments. 28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes 29 * exceptions notifier to be first on the priority list. 30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston 31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi 32 * <prasanna@in.ibm.com> added function-return probes. 33 */ 34 #include <linux/kprobes.h> 35 #include <linux/hash.h> 36 #include <linux/init.h> 37 #include <linux/slab.h> 38 #include <linux/stddef.h> 39 #include <linux/export.h> 40 #include <linux/moduleloader.h> 41 #include <linux/kallsyms.h> 42 #include <linux/freezer.h> 43 #include <linux/seq_file.h> 44 #include <linux/debugfs.h> 45 #include <linux/sysctl.h> 46 #include <linux/kdebug.h> 47 #include <linux/memory.h> 48 #include <linux/ftrace.h> 49 #include <linux/cpu.h> 50 #include <linux/jump_label.h> 51 52 #include <asm/sections.h> 53 #include <asm/cacheflush.h> 54 #include <asm/errno.h> 55 #include <linux/uaccess.h> 56 57 #define KPROBE_HASH_BITS 6 58 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS) 59 60 61 static int kprobes_initialized; 62 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; 63 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; 64 65 /* NOTE: change this value only with kprobe_mutex held */ 66 static bool kprobes_all_disarmed; 67 68 /* This protects kprobe_table and optimizing_list */ 69 static DEFINE_MUTEX(kprobe_mutex); 70 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; 71 static struct { 72 raw_spinlock_t lock ____cacheline_aligned_in_smp; 73 } kretprobe_table_locks[KPROBE_TABLE_SIZE]; 74 75 kprobe_opcode_t * __weak kprobe_lookup_name(const char *name, 76 unsigned int __unused) 77 { 78 return ((kprobe_opcode_t *)(kallsyms_lookup_name(name))); 79 } 80 81 static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash) 82 { 83 return &(kretprobe_table_locks[hash].lock); 84 } 85 86 /* Blacklist -- list of struct kprobe_blacklist_entry */ 87 static LIST_HEAD(kprobe_blacklist); 88 89 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT 90 /* 91 * kprobe->ainsn.insn points to the copy of the instruction to be 92 * single-stepped. x86_64, POWER4 and above have no-exec support and 93 * stepping on the instruction on a vmalloced/kmalloced/data page 94 * is a recipe for disaster 95 */ 96 struct kprobe_insn_page { 97 struct list_head list; 98 kprobe_opcode_t *insns; /* Page of instruction slots */ 99 struct kprobe_insn_cache *cache; 100 int nused; 101 int ngarbage; 102 char slot_used[]; 103 }; 104 105 #define KPROBE_INSN_PAGE_SIZE(slots) \ 106 (offsetof(struct kprobe_insn_page, slot_used) + \ 107 (sizeof(char) * (slots))) 108 109 static int slots_per_page(struct kprobe_insn_cache *c) 110 { 111 return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t)); 112 } 113 114 enum kprobe_slot_state { 115 SLOT_CLEAN = 0, 116 SLOT_DIRTY = 1, 117 SLOT_USED = 2, 118 }; 119 120 void __weak *alloc_insn_page(void) 121 { 122 return module_alloc(PAGE_SIZE); 123 } 124 125 void __weak free_insn_page(void *page) 126 { 127 module_memfree(page); 128 } 129 130 struct kprobe_insn_cache kprobe_insn_slots = { 131 .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex), 132 .alloc = alloc_insn_page, 133 .free = free_insn_page, 134 .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages), 135 .insn_size = MAX_INSN_SIZE, 136 .nr_garbage = 0, 137 }; 138 static int collect_garbage_slots(struct kprobe_insn_cache *c); 139 140 /** 141 * __get_insn_slot() - Find a slot on an executable page for an instruction. 142 * We allocate an executable page if there's no room on existing ones. 143 */ 144 kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c) 145 { 146 struct kprobe_insn_page *kip; 147 kprobe_opcode_t *slot = NULL; 148 149 /* Since the slot array is not protected by rcu, we need a mutex */ 150 mutex_lock(&c->mutex); 151 retry: 152 rcu_read_lock(); 153 list_for_each_entry_rcu(kip, &c->pages, list) { 154 if (kip->nused < slots_per_page(c)) { 155 int i; 156 for (i = 0; i < slots_per_page(c); i++) { 157 if (kip->slot_used[i] == SLOT_CLEAN) { 158 kip->slot_used[i] = SLOT_USED; 159 kip->nused++; 160 slot = kip->insns + (i * c->insn_size); 161 rcu_read_unlock(); 162 goto out; 163 } 164 } 165 /* kip->nused is broken. Fix it. */ 166 kip->nused = slots_per_page(c); 167 WARN_ON(1); 168 } 169 } 170 rcu_read_unlock(); 171 172 /* If there are any garbage slots, collect it and try again. */ 173 if (c->nr_garbage && collect_garbage_slots(c) == 0) 174 goto retry; 175 176 /* All out of space. Need to allocate a new page. */ 177 kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL); 178 if (!kip) 179 goto out; 180 181 /* 182 * Use module_alloc so this page is within +/- 2GB of where the 183 * kernel image and loaded module images reside. This is required 184 * so x86_64 can correctly handle the %rip-relative fixups. 185 */ 186 kip->insns = c->alloc(); 187 if (!kip->insns) { 188 kfree(kip); 189 goto out; 190 } 191 INIT_LIST_HEAD(&kip->list); 192 memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c)); 193 kip->slot_used[0] = SLOT_USED; 194 kip->nused = 1; 195 kip->ngarbage = 0; 196 kip->cache = c; 197 list_add_rcu(&kip->list, &c->pages); 198 slot = kip->insns; 199 out: 200 mutex_unlock(&c->mutex); 201 return slot; 202 } 203 204 /* Return 1 if all garbages are collected, otherwise 0. */ 205 static int collect_one_slot(struct kprobe_insn_page *kip, int idx) 206 { 207 kip->slot_used[idx] = SLOT_CLEAN; 208 kip->nused--; 209 if (kip->nused == 0) { 210 /* 211 * Page is no longer in use. Free it unless 212 * it's the last one. We keep the last one 213 * so as not to have to set it up again the 214 * next time somebody inserts a probe. 215 */ 216 if (!list_is_singular(&kip->list)) { 217 list_del_rcu(&kip->list); 218 synchronize_rcu(); 219 kip->cache->free(kip->insns); 220 kfree(kip); 221 } 222 return 1; 223 } 224 return 0; 225 } 226 227 static int collect_garbage_slots(struct kprobe_insn_cache *c) 228 { 229 struct kprobe_insn_page *kip, *next; 230 231 /* Ensure no-one is interrupted on the garbages */ 232 synchronize_rcu(); 233 234 list_for_each_entry_safe(kip, next, &c->pages, list) { 235 int i; 236 if (kip->ngarbage == 0) 237 continue; 238 kip->ngarbage = 0; /* we will collect all garbages */ 239 for (i = 0; i < slots_per_page(c); i++) { 240 if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i)) 241 break; 242 } 243 } 244 c->nr_garbage = 0; 245 return 0; 246 } 247 248 void __free_insn_slot(struct kprobe_insn_cache *c, 249 kprobe_opcode_t *slot, int dirty) 250 { 251 struct kprobe_insn_page *kip; 252 long idx; 253 254 mutex_lock(&c->mutex); 255 rcu_read_lock(); 256 list_for_each_entry_rcu(kip, &c->pages, list) { 257 idx = ((long)slot - (long)kip->insns) / 258 (c->insn_size * sizeof(kprobe_opcode_t)); 259 if (idx >= 0 && idx < slots_per_page(c)) 260 goto out; 261 } 262 /* Could not find this slot. */ 263 WARN_ON(1); 264 kip = NULL; 265 out: 266 rcu_read_unlock(); 267 /* Mark and sweep: this may sleep */ 268 if (kip) { 269 /* Check double free */ 270 WARN_ON(kip->slot_used[idx] != SLOT_USED); 271 if (dirty) { 272 kip->slot_used[idx] = SLOT_DIRTY; 273 kip->ngarbage++; 274 if (++c->nr_garbage > slots_per_page(c)) 275 collect_garbage_slots(c); 276 } else { 277 collect_one_slot(kip, idx); 278 } 279 } 280 mutex_unlock(&c->mutex); 281 } 282 283 /* 284 * Check given address is on the page of kprobe instruction slots. 285 * This will be used for checking whether the address on a stack 286 * is on a text area or not. 287 */ 288 bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr) 289 { 290 struct kprobe_insn_page *kip; 291 bool ret = false; 292 293 rcu_read_lock(); 294 list_for_each_entry_rcu(kip, &c->pages, list) { 295 if (addr >= (unsigned long)kip->insns && 296 addr < (unsigned long)kip->insns + PAGE_SIZE) { 297 ret = true; 298 break; 299 } 300 } 301 rcu_read_unlock(); 302 303 return ret; 304 } 305 306 #ifdef CONFIG_OPTPROBES 307 /* For optimized_kprobe buffer */ 308 struct kprobe_insn_cache kprobe_optinsn_slots = { 309 .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex), 310 .alloc = alloc_insn_page, 311 .free = free_insn_page, 312 .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages), 313 /* .insn_size is initialized later */ 314 .nr_garbage = 0, 315 }; 316 #endif 317 #endif 318 319 /* We have preemption disabled.. so it is safe to use __ versions */ 320 static inline void set_kprobe_instance(struct kprobe *kp) 321 { 322 __this_cpu_write(kprobe_instance, kp); 323 } 324 325 static inline void reset_kprobe_instance(void) 326 { 327 __this_cpu_write(kprobe_instance, NULL); 328 } 329 330 /* 331 * This routine is called either: 332 * - under the kprobe_mutex - during kprobe_[un]register() 333 * OR 334 * - with preemption disabled - from arch/xxx/kernel/kprobes.c 335 */ 336 struct kprobe *get_kprobe(void *addr) 337 { 338 struct hlist_head *head; 339 struct kprobe *p; 340 341 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; 342 hlist_for_each_entry_rcu(p, head, hlist) { 343 if (p->addr == addr) 344 return p; 345 } 346 347 return NULL; 348 } 349 NOKPROBE_SYMBOL(get_kprobe); 350 351 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs); 352 353 /* Return true if the kprobe is an aggregator */ 354 static inline int kprobe_aggrprobe(struct kprobe *p) 355 { 356 return p->pre_handler == aggr_pre_handler; 357 } 358 359 /* Return true(!0) if the kprobe is unused */ 360 static inline int kprobe_unused(struct kprobe *p) 361 { 362 return kprobe_aggrprobe(p) && kprobe_disabled(p) && 363 list_empty(&p->list); 364 } 365 366 /* 367 * Keep all fields in the kprobe consistent 368 */ 369 static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p) 370 { 371 memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t)); 372 memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn)); 373 } 374 375 #ifdef CONFIG_OPTPROBES 376 /* NOTE: change this value only with kprobe_mutex held */ 377 static bool kprobes_allow_optimization; 378 379 /* 380 * Call all pre_handler on the list, but ignores its return value. 381 * This must be called from arch-dep optimized caller. 382 */ 383 void opt_pre_handler(struct kprobe *p, struct pt_regs *regs) 384 { 385 struct kprobe *kp; 386 387 list_for_each_entry_rcu(kp, &p->list, list) { 388 if (kp->pre_handler && likely(!kprobe_disabled(kp))) { 389 set_kprobe_instance(kp); 390 kp->pre_handler(kp, regs); 391 } 392 reset_kprobe_instance(); 393 } 394 } 395 NOKPROBE_SYMBOL(opt_pre_handler); 396 397 /* Free optimized instructions and optimized_kprobe */ 398 static void free_aggr_kprobe(struct kprobe *p) 399 { 400 struct optimized_kprobe *op; 401 402 op = container_of(p, struct optimized_kprobe, kp); 403 arch_remove_optimized_kprobe(op); 404 arch_remove_kprobe(p); 405 kfree(op); 406 } 407 408 /* Return true(!0) if the kprobe is ready for optimization. */ 409 static inline int kprobe_optready(struct kprobe *p) 410 { 411 struct optimized_kprobe *op; 412 413 if (kprobe_aggrprobe(p)) { 414 op = container_of(p, struct optimized_kprobe, kp); 415 return arch_prepared_optinsn(&op->optinsn); 416 } 417 418 return 0; 419 } 420 421 /* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */ 422 static inline int kprobe_disarmed(struct kprobe *p) 423 { 424 struct optimized_kprobe *op; 425 426 /* If kprobe is not aggr/opt probe, just return kprobe is disabled */ 427 if (!kprobe_aggrprobe(p)) 428 return kprobe_disabled(p); 429 430 op = container_of(p, struct optimized_kprobe, kp); 431 432 return kprobe_disabled(p) && list_empty(&op->list); 433 } 434 435 /* Return true(!0) if the probe is queued on (un)optimizing lists */ 436 static int kprobe_queued(struct kprobe *p) 437 { 438 struct optimized_kprobe *op; 439 440 if (kprobe_aggrprobe(p)) { 441 op = container_of(p, struct optimized_kprobe, kp); 442 if (!list_empty(&op->list)) 443 return 1; 444 } 445 return 0; 446 } 447 448 /* 449 * Return an optimized kprobe whose optimizing code replaces 450 * instructions including addr (exclude breakpoint). 451 */ 452 static struct kprobe *get_optimized_kprobe(unsigned long addr) 453 { 454 int i; 455 struct kprobe *p = NULL; 456 struct optimized_kprobe *op; 457 458 /* Don't check i == 0, since that is a breakpoint case. */ 459 for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++) 460 p = get_kprobe((void *)(addr - i)); 461 462 if (p && kprobe_optready(p)) { 463 op = container_of(p, struct optimized_kprobe, kp); 464 if (arch_within_optimized_kprobe(op, addr)) 465 return p; 466 } 467 468 return NULL; 469 } 470 471 /* Optimization staging list, protected by kprobe_mutex */ 472 static LIST_HEAD(optimizing_list); 473 static LIST_HEAD(unoptimizing_list); 474 static LIST_HEAD(freeing_list); 475 476 static void kprobe_optimizer(struct work_struct *work); 477 static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); 478 #define OPTIMIZE_DELAY 5 479 480 /* 481 * Optimize (replace a breakpoint with a jump) kprobes listed on 482 * optimizing_list. 483 */ 484 static void do_optimize_kprobes(void) 485 { 486 /* 487 * The optimization/unoptimization refers online_cpus via 488 * stop_machine() and cpu-hotplug modifies online_cpus. 489 * And same time, text_mutex will be held in cpu-hotplug and here. 490 * This combination can cause a deadlock (cpu-hotplug try to lock 491 * text_mutex but stop_machine can not be done because online_cpus 492 * has been changed) 493 * To avoid this deadlock, caller must have locked cpu hotplug 494 * for preventing cpu-hotplug outside of text_mutex locking. 495 */ 496 lockdep_assert_cpus_held(); 497 498 /* Optimization never be done when disarmed */ 499 if (kprobes_all_disarmed || !kprobes_allow_optimization || 500 list_empty(&optimizing_list)) 501 return; 502 503 mutex_lock(&text_mutex); 504 arch_optimize_kprobes(&optimizing_list); 505 mutex_unlock(&text_mutex); 506 } 507 508 /* 509 * Unoptimize (replace a jump with a breakpoint and remove the breakpoint 510 * if need) kprobes listed on unoptimizing_list. 511 */ 512 static void do_unoptimize_kprobes(void) 513 { 514 struct optimized_kprobe *op, *tmp; 515 516 /* See comment in do_optimize_kprobes() */ 517 lockdep_assert_cpus_held(); 518 519 /* Unoptimization must be done anytime */ 520 if (list_empty(&unoptimizing_list)) 521 return; 522 523 mutex_lock(&text_mutex); 524 arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list); 525 /* Loop free_list for disarming */ 526 list_for_each_entry_safe(op, tmp, &freeing_list, list) { 527 /* Disarm probes if marked disabled */ 528 if (kprobe_disabled(&op->kp)) 529 arch_disarm_kprobe(&op->kp); 530 if (kprobe_unused(&op->kp)) { 531 /* 532 * Remove unused probes from hash list. After waiting 533 * for synchronization, these probes are reclaimed. 534 * (reclaiming is done by do_free_cleaned_kprobes.) 535 */ 536 hlist_del_rcu(&op->kp.hlist); 537 } else 538 list_del_init(&op->list); 539 } 540 mutex_unlock(&text_mutex); 541 } 542 543 /* Reclaim all kprobes on the free_list */ 544 static void do_free_cleaned_kprobes(void) 545 { 546 struct optimized_kprobe *op, *tmp; 547 548 list_for_each_entry_safe(op, tmp, &freeing_list, list) { 549 list_del_init(&op->list); 550 if (WARN_ON_ONCE(!kprobe_unused(&op->kp))) { 551 /* 552 * This must not happen, but if there is a kprobe 553 * still in use, keep it on kprobes hash list. 554 */ 555 continue; 556 } 557 free_aggr_kprobe(&op->kp); 558 } 559 } 560 561 /* Start optimizer after OPTIMIZE_DELAY passed */ 562 static void kick_kprobe_optimizer(void) 563 { 564 schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY); 565 } 566 567 /* Kprobe jump optimizer */ 568 static void kprobe_optimizer(struct work_struct *work) 569 { 570 mutex_lock(&kprobe_mutex); 571 cpus_read_lock(); 572 /* Lock modules while optimizing kprobes */ 573 mutex_lock(&module_mutex); 574 575 /* 576 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed) 577 * kprobes before waiting for quiesence period. 578 */ 579 do_unoptimize_kprobes(); 580 581 /* 582 * Step 2: Wait for quiesence period to ensure all potentially 583 * preempted tasks to have normally scheduled. Because optprobe 584 * may modify multiple instructions, there is a chance that Nth 585 * instruction is preempted. In that case, such tasks can return 586 * to 2nd-Nth byte of jump instruction. This wait is for avoiding it. 587 * Note that on non-preemptive kernel, this is transparently converted 588 * to synchronoze_sched() to wait for all interrupts to have completed. 589 */ 590 synchronize_rcu_tasks(); 591 592 /* Step 3: Optimize kprobes after quiesence period */ 593 do_optimize_kprobes(); 594 595 /* Step 4: Free cleaned kprobes after quiesence period */ 596 do_free_cleaned_kprobes(); 597 598 mutex_unlock(&module_mutex); 599 cpus_read_unlock(); 600 mutex_unlock(&kprobe_mutex); 601 602 /* Step 5: Kick optimizer again if needed */ 603 if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) 604 kick_kprobe_optimizer(); 605 } 606 607 /* Wait for completing optimization and unoptimization */ 608 void wait_for_kprobe_optimizer(void) 609 { 610 mutex_lock(&kprobe_mutex); 611 612 while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) { 613 mutex_unlock(&kprobe_mutex); 614 615 /* this will also make optimizing_work execute immmediately */ 616 flush_delayed_work(&optimizing_work); 617 /* @optimizing_work might not have been queued yet, relax */ 618 cpu_relax(); 619 620 mutex_lock(&kprobe_mutex); 621 } 622 623 mutex_unlock(&kprobe_mutex); 624 } 625 626 /* Optimize kprobe if p is ready to be optimized */ 627 static void optimize_kprobe(struct kprobe *p) 628 { 629 struct optimized_kprobe *op; 630 631 /* Check if the kprobe is disabled or not ready for optimization. */ 632 if (!kprobe_optready(p) || !kprobes_allow_optimization || 633 (kprobe_disabled(p) || kprobes_all_disarmed)) 634 return; 635 636 /* kprobes with post_handler can not be optimized */ 637 if (p->post_handler) 638 return; 639 640 op = container_of(p, struct optimized_kprobe, kp); 641 642 /* Check there is no other kprobes at the optimized instructions */ 643 if (arch_check_optimized_kprobe(op) < 0) 644 return; 645 646 /* Check if it is already optimized. */ 647 if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) 648 return; 649 op->kp.flags |= KPROBE_FLAG_OPTIMIZED; 650 651 if (!list_empty(&op->list)) 652 /* This is under unoptimizing. Just dequeue the probe */ 653 list_del_init(&op->list); 654 else { 655 list_add(&op->list, &optimizing_list); 656 kick_kprobe_optimizer(); 657 } 658 } 659 660 /* Short cut to direct unoptimizing */ 661 static void force_unoptimize_kprobe(struct optimized_kprobe *op) 662 { 663 lockdep_assert_cpus_held(); 664 arch_unoptimize_kprobe(op); 665 if (kprobe_disabled(&op->kp)) 666 arch_disarm_kprobe(&op->kp); 667 } 668 669 /* Unoptimize a kprobe if p is optimized */ 670 static void unoptimize_kprobe(struct kprobe *p, bool force) 671 { 672 struct optimized_kprobe *op; 673 674 if (!kprobe_aggrprobe(p) || kprobe_disarmed(p)) 675 return; /* This is not an optprobe nor optimized */ 676 677 op = container_of(p, struct optimized_kprobe, kp); 678 if (!kprobe_optimized(p)) { 679 /* Unoptimized or unoptimizing case */ 680 if (force && !list_empty(&op->list)) { 681 /* 682 * Only if this is unoptimizing kprobe and forced, 683 * forcibly unoptimize it. (No need to unoptimize 684 * unoptimized kprobe again :) 685 */ 686 list_del_init(&op->list); 687 force_unoptimize_kprobe(op); 688 } 689 return; 690 } 691 692 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; 693 if (!list_empty(&op->list)) { 694 /* Dequeue from the optimization queue */ 695 list_del_init(&op->list); 696 return; 697 } 698 /* Optimized kprobe case */ 699 if (force) 700 /* Forcibly update the code: this is a special case */ 701 force_unoptimize_kprobe(op); 702 else { 703 list_add(&op->list, &unoptimizing_list); 704 kick_kprobe_optimizer(); 705 } 706 } 707 708 /* Cancel unoptimizing for reusing */ 709 static int reuse_unused_kprobe(struct kprobe *ap) 710 { 711 struct optimized_kprobe *op; 712 713 /* 714 * Unused kprobe MUST be on the way of delayed unoptimizing (means 715 * there is still a relative jump) and disabled. 716 */ 717 op = container_of(ap, struct optimized_kprobe, kp); 718 WARN_ON_ONCE(list_empty(&op->list)); 719 /* Enable the probe again */ 720 ap->flags &= ~KPROBE_FLAG_DISABLED; 721 /* Optimize it again (remove from op->list) */ 722 if (!kprobe_optready(ap)) 723 return -EINVAL; 724 725 optimize_kprobe(ap); 726 return 0; 727 } 728 729 /* Remove optimized instructions */ 730 static void kill_optimized_kprobe(struct kprobe *p) 731 { 732 struct optimized_kprobe *op; 733 734 op = container_of(p, struct optimized_kprobe, kp); 735 if (!list_empty(&op->list)) 736 /* Dequeue from the (un)optimization queue */ 737 list_del_init(&op->list); 738 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; 739 740 if (kprobe_unused(p)) { 741 /* Enqueue if it is unused */ 742 list_add(&op->list, &freeing_list); 743 /* 744 * Remove unused probes from the hash list. After waiting 745 * for synchronization, this probe is reclaimed. 746 * (reclaiming is done by do_free_cleaned_kprobes().) 747 */ 748 hlist_del_rcu(&op->kp.hlist); 749 } 750 751 /* Don't touch the code, because it is already freed. */ 752 arch_remove_optimized_kprobe(op); 753 } 754 755 static inline 756 void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) 757 { 758 if (!kprobe_ftrace(p)) 759 arch_prepare_optimized_kprobe(op, p); 760 } 761 762 /* Try to prepare optimized instructions */ 763 static void prepare_optimized_kprobe(struct kprobe *p) 764 { 765 struct optimized_kprobe *op; 766 767 op = container_of(p, struct optimized_kprobe, kp); 768 __prepare_optimized_kprobe(op, p); 769 } 770 771 /* Allocate new optimized_kprobe and try to prepare optimized instructions */ 772 static struct kprobe *alloc_aggr_kprobe(struct kprobe *p) 773 { 774 struct optimized_kprobe *op; 775 776 op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL); 777 if (!op) 778 return NULL; 779 780 INIT_LIST_HEAD(&op->list); 781 op->kp.addr = p->addr; 782 __prepare_optimized_kprobe(op, p); 783 784 return &op->kp; 785 } 786 787 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p); 788 789 /* 790 * Prepare an optimized_kprobe and optimize it 791 * NOTE: p must be a normal registered kprobe 792 */ 793 static void try_to_optimize_kprobe(struct kprobe *p) 794 { 795 struct kprobe *ap; 796 struct optimized_kprobe *op; 797 798 /* Impossible to optimize ftrace-based kprobe */ 799 if (kprobe_ftrace(p)) 800 return; 801 802 /* For preparing optimization, jump_label_text_reserved() is called */ 803 cpus_read_lock(); 804 jump_label_lock(); 805 mutex_lock(&text_mutex); 806 807 ap = alloc_aggr_kprobe(p); 808 if (!ap) 809 goto out; 810 811 op = container_of(ap, struct optimized_kprobe, kp); 812 if (!arch_prepared_optinsn(&op->optinsn)) { 813 /* If failed to setup optimizing, fallback to kprobe */ 814 arch_remove_optimized_kprobe(op); 815 kfree(op); 816 goto out; 817 } 818 819 init_aggr_kprobe(ap, p); 820 optimize_kprobe(ap); /* This just kicks optimizer thread */ 821 822 out: 823 mutex_unlock(&text_mutex); 824 jump_label_unlock(); 825 cpus_read_unlock(); 826 } 827 828 #ifdef CONFIG_SYSCTL 829 static void optimize_all_kprobes(void) 830 { 831 struct hlist_head *head; 832 struct kprobe *p; 833 unsigned int i; 834 835 mutex_lock(&kprobe_mutex); 836 /* If optimization is already allowed, just return */ 837 if (kprobes_allow_optimization) 838 goto out; 839 840 cpus_read_lock(); 841 kprobes_allow_optimization = true; 842 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 843 head = &kprobe_table[i]; 844 hlist_for_each_entry_rcu(p, head, hlist) 845 if (!kprobe_disabled(p)) 846 optimize_kprobe(p); 847 } 848 cpus_read_unlock(); 849 printk(KERN_INFO "Kprobes globally optimized\n"); 850 out: 851 mutex_unlock(&kprobe_mutex); 852 } 853 854 static void unoptimize_all_kprobes(void) 855 { 856 struct hlist_head *head; 857 struct kprobe *p; 858 unsigned int i; 859 860 mutex_lock(&kprobe_mutex); 861 /* If optimization is already prohibited, just return */ 862 if (!kprobes_allow_optimization) { 863 mutex_unlock(&kprobe_mutex); 864 return; 865 } 866 867 cpus_read_lock(); 868 kprobes_allow_optimization = false; 869 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 870 head = &kprobe_table[i]; 871 hlist_for_each_entry_rcu(p, head, hlist) { 872 if (!kprobe_disabled(p)) 873 unoptimize_kprobe(p, false); 874 } 875 } 876 cpus_read_unlock(); 877 mutex_unlock(&kprobe_mutex); 878 879 /* Wait for unoptimizing completion */ 880 wait_for_kprobe_optimizer(); 881 printk(KERN_INFO "Kprobes globally unoptimized\n"); 882 } 883 884 static DEFINE_MUTEX(kprobe_sysctl_mutex); 885 int sysctl_kprobes_optimization; 886 int proc_kprobes_optimization_handler(struct ctl_table *table, int write, 887 void __user *buffer, size_t *length, 888 loff_t *ppos) 889 { 890 int ret; 891 892 mutex_lock(&kprobe_sysctl_mutex); 893 sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0; 894 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 895 896 if (sysctl_kprobes_optimization) 897 optimize_all_kprobes(); 898 else 899 unoptimize_all_kprobes(); 900 mutex_unlock(&kprobe_sysctl_mutex); 901 902 return ret; 903 } 904 #endif /* CONFIG_SYSCTL */ 905 906 /* Put a breakpoint for a probe. Must be called with text_mutex locked */ 907 static void __arm_kprobe(struct kprobe *p) 908 { 909 struct kprobe *_p; 910 911 /* Check collision with other optimized kprobes */ 912 _p = get_optimized_kprobe((unsigned long)p->addr); 913 if (unlikely(_p)) 914 /* Fallback to unoptimized kprobe */ 915 unoptimize_kprobe(_p, true); 916 917 arch_arm_kprobe(p); 918 optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */ 919 } 920 921 /* Remove the breakpoint of a probe. Must be called with text_mutex locked */ 922 static void __disarm_kprobe(struct kprobe *p, bool reopt) 923 { 924 struct kprobe *_p; 925 926 /* Try to unoptimize */ 927 unoptimize_kprobe(p, kprobes_all_disarmed); 928 929 if (!kprobe_queued(p)) { 930 arch_disarm_kprobe(p); 931 /* If another kprobe was blocked, optimize it. */ 932 _p = get_optimized_kprobe((unsigned long)p->addr); 933 if (unlikely(_p) && reopt) 934 optimize_kprobe(_p); 935 } 936 /* TODO: reoptimize others after unoptimized this probe */ 937 } 938 939 #else /* !CONFIG_OPTPROBES */ 940 941 #define optimize_kprobe(p) do {} while (0) 942 #define unoptimize_kprobe(p, f) do {} while (0) 943 #define kill_optimized_kprobe(p) do {} while (0) 944 #define prepare_optimized_kprobe(p) do {} while (0) 945 #define try_to_optimize_kprobe(p) do {} while (0) 946 #define __arm_kprobe(p) arch_arm_kprobe(p) 947 #define __disarm_kprobe(p, o) arch_disarm_kprobe(p) 948 #define kprobe_disarmed(p) kprobe_disabled(p) 949 #define wait_for_kprobe_optimizer() do {} while (0) 950 951 static int reuse_unused_kprobe(struct kprobe *ap) 952 { 953 /* 954 * If the optimized kprobe is NOT supported, the aggr kprobe is 955 * released at the same time that the last aggregated kprobe is 956 * unregistered. 957 * Thus there should be no chance to reuse unused kprobe. 958 */ 959 printk(KERN_ERR "Error: There should be no unused kprobe here.\n"); 960 return -EINVAL; 961 } 962 963 static void free_aggr_kprobe(struct kprobe *p) 964 { 965 arch_remove_kprobe(p); 966 kfree(p); 967 } 968 969 static struct kprobe *alloc_aggr_kprobe(struct kprobe *p) 970 { 971 return kzalloc(sizeof(struct kprobe), GFP_KERNEL); 972 } 973 #endif /* CONFIG_OPTPROBES */ 974 975 #ifdef CONFIG_KPROBES_ON_FTRACE 976 static struct ftrace_ops kprobe_ftrace_ops __read_mostly = { 977 .func = kprobe_ftrace_handler, 978 .flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY, 979 }; 980 static int kprobe_ftrace_enabled; 981 982 /* Must ensure p->addr is really on ftrace */ 983 static int prepare_kprobe(struct kprobe *p) 984 { 985 if (!kprobe_ftrace(p)) 986 return arch_prepare_kprobe(p); 987 988 return arch_prepare_kprobe_ftrace(p); 989 } 990 991 /* Caller must lock kprobe_mutex */ 992 static int arm_kprobe_ftrace(struct kprobe *p) 993 { 994 int ret = 0; 995 996 ret = ftrace_set_filter_ip(&kprobe_ftrace_ops, 997 (unsigned long)p->addr, 0, 0); 998 if (ret) { 999 pr_debug("Failed to arm kprobe-ftrace at %pS (%d)\n", 1000 p->addr, ret); 1001 return ret; 1002 } 1003 1004 if (kprobe_ftrace_enabled == 0) { 1005 ret = register_ftrace_function(&kprobe_ftrace_ops); 1006 if (ret) { 1007 pr_debug("Failed to init kprobe-ftrace (%d)\n", ret); 1008 goto err_ftrace; 1009 } 1010 } 1011 1012 kprobe_ftrace_enabled++; 1013 return ret; 1014 1015 err_ftrace: 1016 /* 1017 * Note: Since kprobe_ftrace_ops has IPMODIFY set, and ftrace requires a 1018 * non-empty filter_hash for IPMODIFY ops, we're safe from an accidental 1019 * empty filter_hash which would undesirably trace all functions. 1020 */ 1021 ftrace_set_filter_ip(&kprobe_ftrace_ops, (unsigned long)p->addr, 1, 0); 1022 return ret; 1023 } 1024 1025 /* Caller must lock kprobe_mutex */ 1026 static int disarm_kprobe_ftrace(struct kprobe *p) 1027 { 1028 int ret = 0; 1029 1030 if (kprobe_ftrace_enabled == 1) { 1031 ret = unregister_ftrace_function(&kprobe_ftrace_ops); 1032 if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (%d)\n", ret)) 1033 return ret; 1034 } 1035 1036 kprobe_ftrace_enabled--; 1037 1038 ret = ftrace_set_filter_ip(&kprobe_ftrace_ops, 1039 (unsigned long)p->addr, 1, 0); 1040 WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (%d)\n", 1041 p->addr, ret); 1042 return ret; 1043 } 1044 #else /* !CONFIG_KPROBES_ON_FTRACE */ 1045 #define prepare_kprobe(p) arch_prepare_kprobe(p) 1046 #define arm_kprobe_ftrace(p) (-ENODEV) 1047 #define disarm_kprobe_ftrace(p) (-ENODEV) 1048 #endif 1049 1050 /* Arm a kprobe with text_mutex */ 1051 static int arm_kprobe(struct kprobe *kp) 1052 { 1053 if (unlikely(kprobe_ftrace(kp))) 1054 return arm_kprobe_ftrace(kp); 1055 1056 cpus_read_lock(); 1057 mutex_lock(&text_mutex); 1058 __arm_kprobe(kp); 1059 mutex_unlock(&text_mutex); 1060 cpus_read_unlock(); 1061 1062 return 0; 1063 } 1064 1065 /* Disarm a kprobe with text_mutex */ 1066 static int disarm_kprobe(struct kprobe *kp, bool reopt) 1067 { 1068 if (unlikely(kprobe_ftrace(kp))) 1069 return disarm_kprobe_ftrace(kp); 1070 1071 cpus_read_lock(); 1072 mutex_lock(&text_mutex); 1073 __disarm_kprobe(kp, reopt); 1074 mutex_unlock(&text_mutex); 1075 cpus_read_unlock(); 1076 1077 return 0; 1078 } 1079 1080 /* 1081 * Aggregate handlers for multiple kprobes support - these handlers 1082 * take care of invoking the individual kprobe handlers on p->list 1083 */ 1084 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) 1085 { 1086 struct kprobe *kp; 1087 1088 list_for_each_entry_rcu(kp, &p->list, list) { 1089 if (kp->pre_handler && likely(!kprobe_disabled(kp))) { 1090 set_kprobe_instance(kp); 1091 if (kp->pre_handler(kp, regs)) 1092 return 1; 1093 } 1094 reset_kprobe_instance(); 1095 } 1096 return 0; 1097 } 1098 NOKPROBE_SYMBOL(aggr_pre_handler); 1099 1100 static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs, 1101 unsigned long flags) 1102 { 1103 struct kprobe *kp; 1104 1105 list_for_each_entry_rcu(kp, &p->list, list) { 1106 if (kp->post_handler && likely(!kprobe_disabled(kp))) { 1107 set_kprobe_instance(kp); 1108 kp->post_handler(kp, regs, flags); 1109 reset_kprobe_instance(); 1110 } 1111 } 1112 } 1113 NOKPROBE_SYMBOL(aggr_post_handler); 1114 1115 static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, 1116 int trapnr) 1117 { 1118 struct kprobe *cur = __this_cpu_read(kprobe_instance); 1119 1120 /* 1121 * if we faulted "during" the execution of a user specified 1122 * probe handler, invoke just that probe's fault handler 1123 */ 1124 if (cur && cur->fault_handler) { 1125 if (cur->fault_handler(cur, regs, trapnr)) 1126 return 1; 1127 } 1128 return 0; 1129 } 1130 NOKPROBE_SYMBOL(aggr_fault_handler); 1131 1132 /* Walks the list and increments nmissed count for multiprobe case */ 1133 void kprobes_inc_nmissed_count(struct kprobe *p) 1134 { 1135 struct kprobe *kp; 1136 if (!kprobe_aggrprobe(p)) { 1137 p->nmissed++; 1138 } else { 1139 list_for_each_entry_rcu(kp, &p->list, list) 1140 kp->nmissed++; 1141 } 1142 return; 1143 } 1144 NOKPROBE_SYMBOL(kprobes_inc_nmissed_count); 1145 1146 void recycle_rp_inst(struct kretprobe_instance *ri, 1147 struct hlist_head *head) 1148 { 1149 struct kretprobe *rp = ri->rp; 1150 1151 /* remove rp inst off the rprobe_inst_table */ 1152 hlist_del(&ri->hlist); 1153 INIT_HLIST_NODE(&ri->hlist); 1154 if (likely(rp)) { 1155 raw_spin_lock(&rp->lock); 1156 hlist_add_head(&ri->hlist, &rp->free_instances); 1157 raw_spin_unlock(&rp->lock); 1158 } else 1159 /* Unregistering */ 1160 hlist_add_head(&ri->hlist, head); 1161 } 1162 NOKPROBE_SYMBOL(recycle_rp_inst); 1163 1164 void kretprobe_hash_lock(struct task_struct *tsk, 1165 struct hlist_head **head, unsigned long *flags) 1166 __acquires(hlist_lock) 1167 { 1168 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); 1169 raw_spinlock_t *hlist_lock; 1170 1171 *head = &kretprobe_inst_table[hash]; 1172 hlist_lock = kretprobe_table_lock_ptr(hash); 1173 raw_spin_lock_irqsave(hlist_lock, *flags); 1174 } 1175 NOKPROBE_SYMBOL(kretprobe_hash_lock); 1176 1177 static void kretprobe_table_lock(unsigned long hash, 1178 unsigned long *flags) 1179 __acquires(hlist_lock) 1180 { 1181 raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); 1182 raw_spin_lock_irqsave(hlist_lock, *flags); 1183 } 1184 NOKPROBE_SYMBOL(kretprobe_table_lock); 1185 1186 void kretprobe_hash_unlock(struct task_struct *tsk, 1187 unsigned long *flags) 1188 __releases(hlist_lock) 1189 { 1190 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); 1191 raw_spinlock_t *hlist_lock; 1192 1193 hlist_lock = kretprobe_table_lock_ptr(hash); 1194 raw_spin_unlock_irqrestore(hlist_lock, *flags); 1195 } 1196 NOKPROBE_SYMBOL(kretprobe_hash_unlock); 1197 1198 static void kretprobe_table_unlock(unsigned long hash, 1199 unsigned long *flags) 1200 __releases(hlist_lock) 1201 { 1202 raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); 1203 raw_spin_unlock_irqrestore(hlist_lock, *flags); 1204 } 1205 NOKPROBE_SYMBOL(kretprobe_table_unlock); 1206 1207 /* 1208 * This function is called from finish_task_switch when task tk becomes dead, 1209 * so that we can recycle any function-return probe instances associated 1210 * with this task. These left over instances represent probed functions 1211 * that have been called but will never return. 1212 */ 1213 void kprobe_flush_task(struct task_struct *tk) 1214 { 1215 struct kretprobe_instance *ri; 1216 struct hlist_head *head, empty_rp; 1217 struct hlist_node *tmp; 1218 unsigned long hash, flags = 0; 1219 1220 if (unlikely(!kprobes_initialized)) 1221 /* Early boot. kretprobe_table_locks not yet initialized. */ 1222 return; 1223 1224 INIT_HLIST_HEAD(&empty_rp); 1225 hash = hash_ptr(tk, KPROBE_HASH_BITS); 1226 head = &kretprobe_inst_table[hash]; 1227 kretprobe_table_lock(hash, &flags); 1228 hlist_for_each_entry_safe(ri, tmp, head, hlist) { 1229 if (ri->task == tk) 1230 recycle_rp_inst(ri, &empty_rp); 1231 } 1232 kretprobe_table_unlock(hash, &flags); 1233 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { 1234 hlist_del(&ri->hlist); 1235 kfree(ri); 1236 } 1237 } 1238 NOKPROBE_SYMBOL(kprobe_flush_task); 1239 1240 static inline void free_rp_inst(struct kretprobe *rp) 1241 { 1242 struct kretprobe_instance *ri; 1243 struct hlist_node *next; 1244 1245 hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) { 1246 hlist_del(&ri->hlist); 1247 kfree(ri); 1248 } 1249 } 1250 1251 static void cleanup_rp_inst(struct kretprobe *rp) 1252 { 1253 unsigned long flags, hash; 1254 struct kretprobe_instance *ri; 1255 struct hlist_node *next; 1256 struct hlist_head *head; 1257 1258 /* No race here */ 1259 for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) { 1260 kretprobe_table_lock(hash, &flags); 1261 head = &kretprobe_inst_table[hash]; 1262 hlist_for_each_entry_safe(ri, next, head, hlist) { 1263 if (ri->rp == rp) 1264 ri->rp = NULL; 1265 } 1266 kretprobe_table_unlock(hash, &flags); 1267 } 1268 free_rp_inst(rp); 1269 } 1270 NOKPROBE_SYMBOL(cleanup_rp_inst); 1271 1272 /* Add the new probe to ap->list */ 1273 static int add_new_kprobe(struct kprobe *ap, struct kprobe *p) 1274 { 1275 if (p->post_handler) 1276 unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */ 1277 1278 list_add_rcu(&p->list, &ap->list); 1279 if (p->post_handler && !ap->post_handler) 1280 ap->post_handler = aggr_post_handler; 1281 1282 return 0; 1283 } 1284 1285 /* 1286 * Fill in the required fields of the "manager kprobe". Replace the 1287 * earlier kprobe in the hlist with the manager kprobe 1288 */ 1289 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p) 1290 { 1291 /* Copy p's insn slot to ap */ 1292 copy_kprobe(p, ap); 1293 flush_insn_slot(ap); 1294 ap->addr = p->addr; 1295 ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED; 1296 ap->pre_handler = aggr_pre_handler; 1297 ap->fault_handler = aggr_fault_handler; 1298 /* We don't care the kprobe which has gone. */ 1299 if (p->post_handler && !kprobe_gone(p)) 1300 ap->post_handler = aggr_post_handler; 1301 1302 INIT_LIST_HEAD(&ap->list); 1303 INIT_HLIST_NODE(&ap->hlist); 1304 1305 list_add_rcu(&p->list, &ap->list); 1306 hlist_replace_rcu(&p->hlist, &ap->hlist); 1307 } 1308 1309 /* 1310 * This is the second or subsequent kprobe at the address - handle 1311 * the intricacies 1312 */ 1313 static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p) 1314 { 1315 int ret = 0; 1316 struct kprobe *ap = orig_p; 1317 1318 cpus_read_lock(); 1319 1320 /* For preparing optimization, jump_label_text_reserved() is called */ 1321 jump_label_lock(); 1322 mutex_lock(&text_mutex); 1323 1324 if (!kprobe_aggrprobe(orig_p)) { 1325 /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */ 1326 ap = alloc_aggr_kprobe(orig_p); 1327 if (!ap) { 1328 ret = -ENOMEM; 1329 goto out; 1330 } 1331 init_aggr_kprobe(ap, orig_p); 1332 } else if (kprobe_unused(ap)) { 1333 /* This probe is going to die. Rescue it */ 1334 ret = reuse_unused_kprobe(ap); 1335 if (ret) 1336 goto out; 1337 } 1338 1339 if (kprobe_gone(ap)) { 1340 /* 1341 * Attempting to insert new probe at the same location that 1342 * had a probe in the module vaddr area which already 1343 * freed. So, the instruction slot has already been 1344 * released. We need a new slot for the new probe. 1345 */ 1346 ret = arch_prepare_kprobe(ap); 1347 if (ret) 1348 /* 1349 * Even if fail to allocate new slot, don't need to 1350 * free aggr_probe. It will be used next time, or 1351 * freed by unregister_kprobe. 1352 */ 1353 goto out; 1354 1355 /* Prepare optimized instructions if possible. */ 1356 prepare_optimized_kprobe(ap); 1357 1358 /* 1359 * Clear gone flag to prevent allocating new slot again, and 1360 * set disabled flag because it is not armed yet. 1361 */ 1362 ap->flags = (ap->flags & ~KPROBE_FLAG_GONE) 1363 | KPROBE_FLAG_DISABLED; 1364 } 1365 1366 /* Copy ap's insn slot to p */ 1367 copy_kprobe(ap, p); 1368 ret = add_new_kprobe(ap, p); 1369 1370 out: 1371 mutex_unlock(&text_mutex); 1372 jump_label_unlock(); 1373 cpus_read_unlock(); 1374 1375 if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) { 1376 ap->flags &= ~KPROBE_FLAG_DISABLED; 1377 if (!kprobes_all_disarmed) { 1378 /* Arm the breakpoint again. */ 1379 ret = arm_kprobe(ap); 1380 if (ret) { 1381 ap->flags |= KPROBE_FLAG_DISABLED; 1382 list_del_rcu(&p->list); 1383 synchronize_rcu(); 1384 } 1385 } 1386 } 1387 return ret; 1388 } 1389 1390 bool __weak arch_within_kprobe_blacklist(unsigned long addr) 1391 { 1392 /* The __kprobes marked functions and entry code must not be probed */ 1393 return addr >= (unsigned long)__kprobes_text_start && 1394 addr < (unsigned long)__kprobes_text_end; 1395 } 1396 1397 static bool __within_kprobe_blacklist(unsigned long addr) 1398 { 1399 struct kprobe_blacklist_entry *ent; 1400 1401 if (arch_within_kprobe_blacklist(addr)) 1402 return true; 1403 /* 1404 * If there exists a kprobe_blacklist, verify and 1405 * fail any probe registration in the prohibited area 1406 */ 1407 list_for_each_entry(ent, &kprobe_blacklist, list) { 1408 if (addr >= ent->start_addr && addr < ent->end_addr) 1409 return true; 1410 } 1411 return false; 1412 } 1413 1414 bool within_kprobe_blacklist(unsigned long addr) 1415 { 1416 char symname[KSYM_NAME_LEN], *p; 1417 1418 if (__within_kprobe_blacklist(addr)) 1419 return true; 1420 1421 /* Check if the address is on a suffixed-symbol */ 1422 if (!lookup_symbol_name(addr, symname)) { 1423 p = strchr(symname, '.'); 1424 if (!p) 1425 return false; 1426 *p = '\0'; 1427 addr = (unsigned long)kprobe_lookup_name(symname, 0); 1428 if (addr) 1429 return __within_kprobe_blacklist(addr); 1430 } 1431 return false; 1432 } 1433 1434 /* 1435 * If we have a symbol_name argument, look it up and add the offset field 1436 * to it. This way, we can specify a relative address to a symbol. 1437 * This returns encoded errors if it fails to look up symbol or invalid 1438 * combination of parameters. 1439 */ 1440 static kprobe_opcode_t *_kprobe_addr(kprobe_opcode_t *addr, 1441 const char *symbol_name, unsigned int offset) 1442 { 1443 if ((symbol_name && addr) || (!symbol_name && !addr)) 1444 goto invalid; 1445 1446 if (symbol_name) { 1447 addr = kprobe_lookup_name(symbol_name, offset); 1448 if (!addr) 1449 return ERR_PTR(-ENOENT); 1450 } 1451 1452 addr = (kprobe_opcode_t *)(((char *)addr) + offset); 1453 if (addr) 1454 return addr; 1455 1456 invalid: 1457 return ERR_PTR(-EINVAL); 1458 } 1459 1460 static kprobe_opcode_t *kprobe_addr(struct kprobe *p) 1461 { 1462 return _kprobe_addr(p->addr, p->symbol_name, p->offset); 1463 } 1464 1465 /* Check passed kprobe is valid and return kprobe in kprobe_table. */ 1466 static struct kprobe *__get_valid_kprobe(struct kprobe *p) 1467 { 1468 struct kprobe *ap, *list_p; 1469 1470 ap = get_kprobe(p->addr); 1471 if (unlikely(!ap)) 1472 return NULL; 1473 1474 if (p != ap) { 1475 list_for_each_entry_rcu(list_p, &ap->list, list) 1476 if (list_p == p) 1477 /* kprobe p is a valid probe */ 1478 goto valid; 1479 return NULL; 1480 } 1481 valid: 1482 return ap; 1483 } 1484 1485 /* Return error if the kprobe is being re-registered */ 1486 static inline int check_kprobe_rereg(struct kprobe *p) 1487 { 1488 int ret = 0; 1489 1490 mutex_lock(&kprobe_mutex); 1491 if (__get_valid_kprobe(p)) 1492 ret = -EINVAL; 1493 mutex_unlock(&kprobe_mutex); 1494 1495 return ret; 1496 } 1497 1498 int __weak arch_check_ftrace_location(struct kprobe *p) 1499 { 1500 unsigned long ftrace_addr; 1501 1502 ftrace_addr = ftrace_location((unsigned long)p->addr); 1503 if (ftrace_addr) { 1504 #ifdef CONFIG_KPROBES_ON_FTRACE 1505 /* Given address is not on the instruction boundary */ 1506 if ((unsigned long)p->addr != ftrace_addr) 1507 return -EILSEQ; 1508 p->flags |= KPROBE_FLAG_FTRACE; 1509 #else /* !CONFIG_KPROBES_ON_FTRACE */ 1510 return -EINVAL; 1511 #endif 1512 } 1513 return 0; 1514 } 1515 1516 static int check_kprobe_address_safe(struct kprobe *p, 1517 struct module **probed_mod) 1518 { 1519 int ret; 1520 1521 ret = arch_check_ftrace_location(p); 1522 if (ret) 1523 return ret; 1524 jump_label_lock(); 1525 preempt_disable(); 1526 1527 /* Ensure it is not in reserved area nor out of text */ 1528 if (!kernel_text_address((unsigned long) p->addr) || 1529 within_kprobe_blacklist((unsigned long) p->addr) || 1530 jump_label_text_reserved(p->addr, p->addr)) { 1531 ret = -EINVAL; 1532 goto out; 1533 } 1534 1535 /* Check if are we probing a module */ 1536 *probed_mod = __module_text_address((unsigned long) p->addr); 1537 if (*probed_mod) { 1538 /* 1539 * We must hold a refcount of the probed module while updating 1540 * its code to prohibit unexpected unloading. 1541 */ 1542 if (unlikely(!try_module_get(*probed_mod))) { 1543 ret = -ENOENT; 1544 goto out; 1545 } 1546 1547 /* 1548 * If the module freed .init.text, we couldn't insert 1549 * kprobes in there. 1550 */ 1551 if (within_module_init((unsigned long)p->addr, *probed_mod) && 1552 (*probed_mod)->state != MODULE_STATE_COMING) { 1553 module_put(*probed_mod); 1554 *probed_mod = NULL; 1555 ret = -ENOENT; 1556 } 1557 } 1558 out: 1559 preempt_enable(); 1560 jump_label_unlock(); 1561 1562 return ret; 1563 } 1564 1565 int register_kprobe(struct kprobe *p) 1566 { 1567 int ret; 1568 struct kprobe *old_p; 1569 struct module *probed_mod; 1570 kprobe_opcode_t *addr; 1571 1572 /* Adjust probe address from symbol */ 1573 addr = kprobe_addr(p); 1574 if (IS_ERR(addr)) 1575 return PTR_ERR(addr); 1576 p->addr = addr; 1577 1578 ret = check_kprobe_rereg(p); 1579 if (ret) 1580 return ret; 1581 1582 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ 1583 p->flags &= KPROBE_FLAG_DISABLED; 1584 p->nmissed = 0; 1585 INIT_LIST_HEAD(&p->list); 1586 1587 ret = check_kprobe_address_safe(p, &probed_mod); 1588 if (ret) 1589 return ret; 1590 1591 mutex_lock(&kprobe_mutex); 1592 1593 old_p = get_kprobe(p->addr); 1594 if (old_p) { 1595 /* Since this may unoptimize old_p, locking text_mutex. */ 1596 ret = register_aggr_kprobe(old_p, p); 1597 goto out; 1598 } 1599 1600 cpus_read_lock(); 1601 /* Prevent text modification */ 1602 mutex_lock(&text_mutex); 1603 ret = prepare_kprobe(p); 1604 mutex_unlock(&text_mutex); 1605 cpus_read_unlock(); 1606 if (ret) 1607 goto out; 1608 1609 INIT_HLIST_NODE(&p->hlist); 1610 hlist_add_head_rcu(&p->hlist, 1611 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); 1612 1613 if (!kprobes_all_disarmed && !kprobe_disabled(p)) { 1614 ret = arm_kprobe(p); 1615 if (ret) { 1616 hlist_del_rcu(&p->hlist); 1617 synchronize_rcu(); 1618 goto out; 1619 } 1620 } 1621 1622 /* Try to optimize kprobe */ 1623 try_to_optimize_kprobe(p); 1624 out: 1625 mutex_unlock(&kprobe_mutex); 1626 1627 if (probed_mod) 1628 module_put(probed_mod); 1629 1630 return ret; 1631 } 1632 EXPORT_SYMBOL_GPL(register_kprobe); 1633 1634 /* Check if all probes on the aggrprobe are disabled */ 1635 static int aggr_kprobe_disabled(struct kprobe *ap) 1636 { 1637 struct kprobe *kp; 1638 1639 list_for_each_entry_rcu(kp, &ap->list, list) 1640 if (!kprobe_disabled(kp)) 1641 /* 1642 * There is an active probe on the list. 1643 * We can't disable this ap. 1644 */ 1645 return 0; 1646 1647 return 1; 1648 } 1649 1650 /* Disable one kprobe: Make sure called under kprobe_mutex is locked */ 1651 static struct kprobe *__disable_kprobe(struct kprobe *p) 1652 { 1653 struct kprobe *orig_p; 1654 int ret; 1655 1656 /* Get an original kprobe for return */ 1657 orig_p = __get_valid_kprobe(p); 1658 if (unlikely(orig_p == NULL)) 1659 return ERR_PTR(-EINVAL); 1660 1661 if (!kprobe_disabled(p)) { 1662 /* Disable probe if it is a child probe */ 1663 if (p != orig_p) 1664 p->flags |= KPROBE_FLAG_DISABLED; 1665 1666 /* Try to disarm and disable this/parent probe */ 1667 if (p == orig_p || aggr_kprobe_disabled(orig_p)) { 1668 /* 1669 * If kprobes_all_disarmed is set, orig_p 1670 * should have already been disarmed, so 1671 * skip unneed disarming process. 1672 */ 1673 if (!kprobes_all_disarmed) { 1674 ret = disarm_kprobe(orig_p, true); 1675 if (ret) { 1676 p->flags &= ~KPROBE_FLAG_DISABLED; 1677 return ERR_PTR(ret); 1678 } 1679 } 1680 orig_p->flags |= KPROBE_FLAG_DISABLED; 1681 } 1682 } 1683 1684 return orig_p; 1685 } 1686 1687 /* 1688 * Unregister a kprobe without a scheduler synchronization. 1689 */ 1690 static int __unregister_kprobe_top(struct kprobe *p) 1691 { 1692 struct kprobe *ap, *list_p; 1693 1694 /* Disable kprobe. This will disarm it if needed. */ 1695 ap = __disable_kprobe(p); 1696 if (IS_ERR(ap)) 1697 return PTR_ERR(ap); 1698 1699 if (ap == p) 1700 /* 1701 * This probe is an independent(and non-optimized) kprobe 1702 * (not an aggrprobe). Remove from the hash list. 1703 */ 1704 goto disarmed; 1705 1706 /* Following process expects this probe is an aggrprobe */ 1707 WARN_ON(!kprobe_aggrprobe(ap)); 1708 1709 if (list_is_singular(&ap->list) && kprobe_disarmed(ap)) 1710 /* 1711 * !disarmed could be happen if the probe is under delayed 1712 * unoptimizing. 1713 */ 1714 goto disarmed; 1715 else { 1716 /* If disabling probe has special handlers, update aggrprobe */ 1717 if (p->post_handler && !kprobe_gone(p)) { 1718 list_for_each_entry_rcu(list_p, &ap->list, list) { 1719 if ((list_p != p) && (list_p->post_handler)) 1720 goto noclean; 1721 } 1722 ap->post_handler = NULL; 1723 } 1724 noclean: 1725 /* 1726 * Remove from the aggrprobe: this path will do nothing in 1727 * __unregister_kprobe_bottom(). 1728 */ 1729 list_del_rcu(&p->list); 1730 if (!kprobe_disabled(ap) && !kprobes_all_disarmed) 1731 /* 1732 * Try to optimize this probe again, because post 1733 * handler may have been changed. 1734 */ 1735 optimize_kprobe(ap); 1736 } 1737 return 0; 1738 1739 disarmed: 1740 hlist_del_rcu(&ap->hlist); 1741 return 0; 1742 } 1743 1744 static void __unregister_kprobe_bottom(struct kprobe *p) 1745 { 1746 struct kprobe *ap; 1747 1748 if (list_empty(&p->list)) 1749 /* This is an independent kprobe */ 1750 arch_remove_kprobe(p); 1751 else if (list_is_singular(&p->list)) { 1752 /* This is the last child of an aggrprobe */ 1753 ap = list_entry(p->list.next, struct kprobe, list); 1754 list_del(&p->list); 1755 free_aggr_kprobe(ap); 1756 } 1757 /* Otherwise, do nothing. */ 1758 } 1759 1760 int register_kprobes(struct kprobe **kps, int num) 1761 { 1762 int i, ret = 0; 1763 1764 if (num <= 0) 1765 return -EINVAL; 1766 for (i = 0; i < num; i++) { 1767 ret = register_kprobe(kps[i]); 1768 if (ret < 0) { 1769 if (i > 0) 1770 unregister_kprobes(kps, i); 1771 break; 1772 } 1773 } 1774 return ret; 1775 } 1776 EXPORT_SYMBOL_GPL(register_kprobes); 1777 1778 void unregister_kprobe(struct kprobe *p) 1779 { 1780 unregister_kprobes(&p, 1); 1781 } 1782 EXPORT_SYMBOL_GPL(unregister_kprobe); 1783 1784 void unregister_kprobes(struct kprobe **kps, int num) 1785 { 1786 int i; 1787 1788 if (num <= 0) 1789 return; 1790 mutex_lock(&kprobe_mutex); 1791 for (i = 0; i < num; i++) 1792 if (__unregister_kprobe_top(kps[i]) < 0) 1793 kps[i]->addr = NULL; 1794 mutex_unlock(&kprobe_mutex); 1795 1796 synchronize_rcu(); 1797 for (i = 0; i < num; i++) 1798 if (kps[i]->addr) 1799 __unregister_kprobe_bottom(kps[i]); 1800 } 1801 EXPORT_SYMBOL_GPL(unregister_kprobes); 1802 1803 int __weak kprobe_exceptions_notify(struct notifier_block *self, 1804 unsigned long val, void *data) 1805 { 1806 return NOTIFY_DONE; 1807 } 1808 NOKPROBE_SYMBOL(kprobe_exceptions_notify); 1809 1810 static struct notifier_block kprobe_exceptions_nb = { 1811 .notifier_call = kprobe_exceptions_notify, 1812 .priority = 0x7fffffff /* we need to be notified first */ 1813 }; 1814 1815 unsigned long __weak arch_deref_entry_point(void *entry) 1816 { 1817 return (unsigned long)entry; 1818 } 1819 1820 #ifdef CONFIG_KRETPROBES 1821 /* 1822 * This kprobe pre_handler is registered with every kretprobe. When probe 1823 * hits it will set up the return probe. 1824 */ 1825 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) 1826 { 1827 struct kretprobe *rp = container_of(p, struct kretprobe, kp); 1828 unsigned long hash, flags = 0; 1829 struct kretprobe_instance *ri; 1830 1831 /* 1832 * To avoid deadlocks, prohibit return probing in NMI contexts, 1833 * just skip the probe and increase the (inexact) 'nmissed' 1834 * statistical counter, so that the user is informed that 1835 * something happened: 1836 */ 1837 if (unlikely(in_nmi())) { 1838 rp->nmissed++; 1839 return 0; 1840 } 1841 1842 /* TODO: consider to only swap the RA after the last pre_handler fired */ 1843 hash = hash_ptr(current, KPROBE_HASH_BITS); 1844 raw_spin_lock_irqsave(&rp->lock, flags); 1845 if (!hlist_empty(&rp->free_instances)) { 1846 ri = hlist_entry(rp->free_instances.first, 1847 struct kretprobe_instance, hlist); 1848 hlist_del(&ri->hlist); 1849 raw_spin_unlock_irqrestore(&rp->lock, flags); 1850 1851 ri->rp = rp; 1852 ri->task = current; 1853 1854 if (rp->entry_handler && rp->entry_handler(ri, regs)) { 1855 raw_spin_lock_irqsave(&rp->lock, flags); 1856 hlist_add_head(&ri->hlist, &rp->free_instances); 1857 raw_spin_unlock_irqrestore(&rp->lock, flags); 1858 return 0; 1859 } 1860 1861 arch_prepare_kretprobe(ri, regs); 1862 1863 /* XXX(hch): why is there no hlist_move_head? */ 1864 INIT_HLIST_NODE(&ri->hlist); 1865 kretprobe_table_lock(hash, &flags); 1866 hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]); 1867 kretprobe_table_unlock(hash, &flags); 1868 } else { 1869 rp->nmissed++; 1870 raw_spin_unlock_irqrestore(&rp->lock, flags); 1871 } 1872 return 0; 1873 } 1874 NOKPROBE_SYMBOL(pre_handler_kretprobe); 1875 1876 bool __weak arch_kprobe_on_func_entry(unsigned long offset) 1877 { 1878 return !offset; 1879 } 1880 1881 bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset) 1882 { 1883 kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset); 1884 1885 if (IS_ERR(kp_addr)) 1886 return false; 1887 1888 if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset) || 1889 !arch_kprobe_on_func_entry(offset)) 1890 return false; 1891 1892 return true; 1893 } 1894 1895 int register_kretprobe(struct kretprobe *rp) 1896 { 1897 int ret = 0; 1898 struct kretprobe_instance *inst; 1899 int i; 1900 void *addr; 1901 1902 if (!kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset)) 1903 return -EINVAL; 1904 1905 if (kretprobe_blacklist_size) { 1906 addr = kprobe_addr(&rp->kp); 1907 if (IS_ERR(addr)) 1908 return PTR_ERR(addr); 1909 1910 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { 1911 if (kretprobe_blacklist[i].addr == addr) 1912 return -EINVAL; 1913 } 1914 } 1915 1916 rp->kp.pre_handler = pre_handler_kretprobe; 1917 rp->kp.post_handler = NULL; 1918 rp->kp.fault_handler = NULL; 1919 1920 /* Pre-allocate memory for max kretprobe instances */ 1921 if (rp->maxactive <= 0) { 1922 #ifdef CONFIG_PREEMPT 1923 rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus()); 1924 #else 1925 rp->maxactive = num_possible_cpus(); 1926 #endif 1927 } 1928 raw_spin_lock_init(&rp->lock); 1929 INIT_HLIST_HEAD(&rp->free_instances); 1930 for (i = 0; i < rp->maxactive; i++) { 1931 inst = kmalloc(sizeof(struct kretprobe_instance) + 1932 rp->data_size, GFP_KERNEL); 1933 if (inst == NULL) { 1934 free_rp_inst(rp); 1935 return -ENOMEM; 1936 } 1937 INIT_HLIST_NODE(&inst->hlist); 1938 hlist_add_head(&inst->hlist, &rp->free_instances); 1939 } 1940 1941 rp->nmissed = 0; 1942 /* Establish function entry probe point */ 1943 ret = register_kprobe(&rp->kp); 1944 if (ret != 0) 1945 free_rp_inst(rp); 1946 return ret; 1947 } 1948 EXPORT_SYMBOL_GPL(register_kretprobe); 1949 1950 int register_kretprobes(struct kretprobe **rps, int num) 1951 { 1952 int ret = 0, i; 1953 1954 if (num <= 0) 1955 return -EINVAL; 1956 for (i = 0; i < num; i++) { 1957 ret = register_kretprobe(rps[i]); 1958 if (ret < 0) { 1959 if (i > 0) 1960 unregister_kretprobes(rps, i); 1961 break; 1962 } 1963 } 1964 return ret; 1965 } 1966 EXPORT_SYMBOL_GPL(register_kretprobes); 1967 1968 void unregister_kretprobe(struct kretprobe *rp) 1969 { 1970 unregister_kretprobes(&rp, 1); 1971 } 1972 EXPORT_SYMBOL_GPL(unregister_kretprobe); 1973 1974 void unregister_kretprobes(struct kretprobe **rps, int num) 1975 { 1976 int i; 1977 1978 if (num <= 0) 1979 return; 1980 mutex_lock(&kprobe_mutex); 1981 for (i = 0; i < num; i++) 1982 if (__unregister_kprobe_top(&rps[i]->kp) < 0) 1983 rps[i]->kp.addr = NULL; 1984 mutex_unlock(&kprobe_mutex); 1985 1986 synchronize_rcu(); 1987 for (i = 0; i < num; i++) { 1988 if (rps[i]->kp.addr) { 1989 __unregister_kprobe_bottom(&rps[i]->kp); 1990 cleanup_rp_inst(rps[i]); 1991 } 1992 } 1993 } 1994 EXPORT_SYMBOL_GPL(unregister_kretprobes); 1995 1996 #else /* CONFIG_KRETPROBES */ 1997 int register_kretprobe(struct kretprobe *rp) 1998 { 1999 return -ENOSYS; 2000 } 2001 EXPORT_SYMBOL_GPL(register_kretprobe); 2002 2003 int register_kretprobes(struct kretprobe **rps, int num) 2004 { 2005 return -ENOSYS; 2006 } 2007 EXPORT_SYMBOL_GPL(register_kretprobes); 2008 2009 void unregister_kretprobe(struct kretprobe *rp) 2010 { 2011 } 2012 EXPORT_SYMBOL_GPL(unregister_kretprobe); 2013 2014 void unregister_kretprobes(struct kretprobe **rps, int num) 2015 { 2016 } 2017 EXPORT_SYMBOL_GPL(unregister_kretprobes); 2018 2019 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) 2020 { 2021 return 0; 2022 } 2023 NOKPROBE_SYMBOL(pre_handler_kretprobe); 2024 2025 #endif /* CONFIG_KRETPROBES */ 2026 2027 /* Set the kprobe gone and remove its instruction buffer. */ 2028 static void kill_kprobe(struct kprobe *p) 2029 { 2030 struct kprobe *kp; 2031 2032 p->flags |= KPROBE_FLAG_GONE; 2033 if (kprobe_aggrprobe(p)) { 2034 /* 2035 * If this is an aggr_kprobe, we have to list all the 2036 * chained probes and mark them GONE. 2037 */ 2038 list_for_each_entry_rcu(kp, &p->list, list) 2039 kp->flags |= KPROBE_FLAG_GONE; 2040 p->post_handler = NULL; 2041 kill_optimized_kprobe(p); 2042 } 2043 /* 2044 * Here, we can remove insn_slot safely, because no thread calls 2045 * the original probed function (which will be freed soon) any more. 2046 */ 2047 arch_remove_kprobe(p); 2048 } 2049 2050 /* Disable one kprobe */ 2051 int disable_kprobe(struct kprobe *kp) 2052 { 2053 int ret = 0; 2054 struct kprobe *p; 2055 2056 mutex_lock(&kprobe_mutex); 2057 2058 /* Disable this kprobe */ 2059 p = __disable_kprobe(kp); 2060 if (IS_ERR(p)) 2061 ret = PTR_ERR(p); 2062 2063 mutex_unlock(&kprobe_mutex); 2064 return ret; 2065 } 2066 EXPORT_SYMBOL_GPL(disable_kprobe); 2067 2068 /* Enable one kprobe */ 2069 int enable_kprobe(struct kprobe *kp) 2070 { 2071 int ret = 0; 2072 struct kprobe *p; 2073 2074 mutex_lock(&kprobe_mutex); 2075 2076 /* Check whether specified probe is valid. */ 2077 p = __get_valid_kprobe(kp); 2078 if (unlikely(p == NULL)) { 2079 ret = -EINVAL; 2080 goto out; 2081 } 2082 2083 if (kprobe_gone(kp)) { 2084 /* This kprobe has gone, we couldn't enable it. */ 2085 ret = -EINVAL; 2086 goto out; 2087 } 2088 2089 if (p != kp) 2090 kp->flags &= ~KPROBE_FLAG_DISABLED; 2091 2092 if (!kprobes_all_disarmed && kprobe_disabled(p)) { 2093 p->flags &= ~KPROBE_FLAG_DISABLED; 2094 ret = arm_kprobe(p); 2095 if (ret) 2096 p->flags |= KPROBE_FLAG_DISABLED; 2097 } 2098 out: 2099 mutex_unlock(&kprobe_mutex); 2100 return ret; 2101 } 2102 EXPORT_SYMBOL_GPL(enable_kprobe); 2103 2104 /* Caller must NOT call this in usual path. This is only for critical case */ 2105 void dump_kprobe(struct kprobe *kp) 2106 { 2107 pr_err("Dumping kprobe:\n"); 2108 pr_err("Name: %s\nOffset: %x\nAddress: %pS\n", 2109 kp->symbol_name, kp->offset, kp->addr); 2110 } 2111 NOKPROBE_SYMBOL(dump_kprobe); 2112 2113 int kprobe_add_ksym_blacklist(unsigned long entry) 2114 { 2115 struct kprobe_blacklist_entry *ent; 2116 unsigned long offset = 0, size = 0; 2117 2118 if (!kernel_text_address(entry) || 2119 !kallsyms_lookup_size_offset(entry, &size, &offset)) 2120 return -EINVAL; 2121 2122 ent = kmalloc(sizeof(*ent), GFP_KERNEL); 2123 if (!ent) 2124 return -ENOMEM; 2125 ent->start_addr = entry; 2126 ent->end_addr = entry + size; 2127 INIT_LIST_HEAD(&ent->list); 2128 list_add_tail(&ent->list, &kprobe_blacklist); 2129 2130 return (int)size; 2131 } 2132 2133 /* Add all symbols in given area into kprobe blacklist */ 2134 int kprobe_add_area_blacklist(unsigned long start, unsigned long end) 2135 { 2136 unsigned long entry; 2137 int ret = 0; 2138 2139 for (entry = start; entry < end; entry += ret) { 2140 ret = kprobe_add_ksym_blacklist(entry); 2141 if (ret < 0) 2142 return ret; 2143 if (ret == 0) /* In case of alias symbol */ 2144 ret = 1; 2145 } 2146 return 0; 2147 } 2148 2149 int __init __weak arch_populate_kprobe_blacklist(void) 2150 { 2151 return 0; 2152 } 2153 2154 /* 2155 * Lookup and populate the kprobe_blacklist. 2156 * 2157 * Unlike the kretprobe blacklist, we'll need to determine 2158 * the range of addresses that belong to the said functions, 2159 * since a kprobe need not necessarily be at the beginning 2160 * of a function. 2161 */ 2162 static int __init populate_kprobe_blacklist(unsigned long *start, 2163 unsigned long *end) 2164 { 2165 unsigned long entry; 2166 unsigned long *iter; 2167 int ret; 2168 2169 for (iter = start; iter < end; iter++) { 2170 entry = arch_deref_entry_point((void *)*iter); 2171 ret = kprobe_add_ksym_blacklist(entry); 2172 if (ret == -EINVAL) 2173 continue; 2174 if (ret < 0) 2175 return ret; 2176 } 2177 2178 /* Symbols in __kprobes_text are blacklisted */ 2179 ret = kprobe_add_area_blacklist((unsigned long)__kprobes_text_start, 2180 (unsigned long)__kprobes_text_end); 2181 2182 return ret ? : arch_populate_kprobe_blacklist(); 2183 } 2184 2185 /* Module notifier call back, checking kprobes on the module */ 2186 static int kprobes_module_callback(struct notifier_block *nb, 2187 unsigned long val, void *data) 2188 { 2189 struct module *mod = data; 2190 struct hlist_head *head; 2191 struct kprobe *p; 2192 unsigned int i; 2193 int checkcore = (val == MODULE_STATE_GOING); 2194 2195 if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE) 2196 return NOTIFY_DONE; 2197 2198 /* 2199 * When MODULE_STATE_GOING was notified, both of module .text and 2200 * .init.text sections would be freed. When MODULE_STATE_LIVE was 2201 * notified, only .init.text section would be freed. We need to 2202 * disable kprobes which have been inserted in the sections. 2203 */ 2204 mutex_lock(&kprobe_mutex); 2205 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2206 head = &kprobe_table[i]; 2207 hlist_for_each_entry_rcu(p, head, hlist) 2208 if (within_module_init((unsigned long)p->addr, mod) || 2209 (checkcore && 2210 within_module_core((unsigned long)p->addr, mod))) { 2211 /* 2212 * The vaddr this probe is installed will soon 2213 * be vfreed buy not synced to disk. Hence, 2214 * disarming the breakpoint isn't needed. 2215 * 2216 * Note, this will also move any optimized probes 2217 * that are pending to be removed from their 2218 * corresponding lists to the freeing_list and 2219 * will not be touched by the delayed 2220 * kprobe_optimizer work handler. 2221 */ 2222 kill_kprobe(p); 2223 } 2224 } 2225 mutex_unlock(&kprobe_mutex); 2226 return NOTIFY_DONE; 2227 } 2228 2229 static struct notifier_block kprobe_module_nb = { 2230 .notifier_call = kprobes_module_callback, 2231 .priority = 0 2232 }; 2233 2234 /* Markers of _kprobe_blacklist section */ 2235 extern unsigned long __start_kprobe_blacklist[]; 2236 extern unsigned long __stop_kprobe_blacklist[]; 2237 2238 static int __init init_kprobes(void) 2239 { 2240 int i, err = 0; 2241 2242 /* FIXME allocate the probe table, currently defined statically */ 2243 /* initialize all list heads */ 2244 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2245 INIT_HLIST_HEAD(&kprobe_table[i]); 2246 INIT_HLIST_HEAD(&kretprobe_inst_table[i]); 2247 raw_spin_lock_init(&(kretprobe_table_locks[i].lock)); 2248 } 2249 2250 err = populate_kprobe_blacklist(__start_kprobe_blacklist, 2251 __stop_kprobe_blacklist); 2252 if (err) { 2253 pr_err("kprobes: failed to populate blacklist: %d\n", err); 2254 pr_err("Please take care of using kprobes.\n"); 2255 } 2256 2257 if (kretprobe_blacklist_size) { 2258 /* lookup the function address from its name */ 2259 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { 2260 kretprobe_blacklist[i].addr = 2261 kprobe_lookup_name(kretprobe_blacklist[i].name, 0); 2262 if (!kretprobe_blacklist[i].addr) 2263 printk("kretprobe: lookup failed: %s\n", 2264 kretprobe_blacklist[i].name); 2265 } 2266 } 2267 2268 #if defined(CONFIG_OPTPROBES) 2269 #if defined(__ARCH_WANT_KPROBES_INSN_SLOT) 2270 /* Init kprobe_optinsn_slots */ 2271 kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE; 2272 #endif 2273 /* By default, kprobes can be optimized */ 2274 kprobes_allow_optimization = true; 2275 #endif 2276 2277 /* By default, kprobes are armed */ 2278 kprobes_all_disarmed = false; 2279 2280 err = arch_init_kprobes(); 2281 if (!err) 2282 err = register_die_notifier(&kprobe_exceptions_nb); 2283 if (!err) 2284 err = register_module_notifier(&kprobe_module_nb); 2285 2286 kprobes_initialized = (err == 0); 2287 2288 if (!err) 2289 init_test_probes(); 2290 return err; 2291 } 2292 2293 #ifdef CONFIG_DEBUG_FS 2294 static void report_probe(struct seq_file *pi, struct kprobe *p, 2295 const char *sym, int offset, char *modname, struct kprobe *pp) 2296 { 2297 char *kprobe_type; 2298 void *addr = p->addr; 2299 2300 if (p->pre_handler == pre_handler_kretprobe) 2301 kprobe_type = "r"; 2302 else 2303 kprobe_type = "k"; 2304 2305 if (!kallsyms_show_value()) 2306 addr = NULL; 2307 2308 if (sym) 2309 seq_printf(pi, "%px %s %s+0x%x %s ", 2310 addr, kprobe_type, sym, offset, 2311 (modname ? modname : " ")); 2312 else /* try to use %pS */ 2313 seq_printf(pi, "%px %s %pS ", 2314 addr, kprobe_type, p->addr); 2315 2316 if (!pp) 2317 pp = p; 2318 seq_printf(pi, "%s%s%s%s\n", 2319 (kprobe_gone(p) ? "[GONE]" : ""), 2320 ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""), 2321 (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""), 2322 (kprobe_ftrace(pp) ? "[FTRACE]" : "")); 2323 } 2324 2325 static void *kprobe_seq_start(struct seq_file *f, loff_t *pos) 2326 { 2327 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL; 2328 } 2329 2330 static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos) 2331 { 2332 (*pos)++; 2333 if (*pos >= KPROBE_TABLE_SIZE) 2334 return NULL; 2335 return pos; 2336 } 2337 2338 static void kprobe_seq_stop(struct seq_file *f, void *v) 2339 { 2340 /* Nothing to do */ 2341 } 2342 2343 static int show_kprobe_addr(struct seq_file *pi, void *v) 2344 { 2345 struct hlist_head *head; 2346 struct kprobe *p, *kp; 2347 const char *sym = NULL; 2348 unsigned int i = *(loff_t *) v; 2349 unsigned long offset = 0; 2350 char *modname, namebuf[KSYM_NAME_LEN]; 2351 2352 head = &kprobe_table[i]; 2353 preempt_disable(); 2354 hlist_for_each_entry_rcu(p, head, hlist) { 2355 sym = kallsyms_lookup((unsigned long)p->addr, NULL, 2356 &offset, &modname, namebuf); 2357 if (kprobe_aggrprobe(p)) { 2358 list_for_each_entry_rcu(kp, &p->list, list) 2359 report_probe(pi, kp, sym, offset, modname, p); 2360 } else 2361 report_probe(pi, p, sym, offset, modname, NULL); 2362 } 2363 preempt_enable(); 2364 return 0; 2365 } 2366 2367 static const struct seq_operations kprobes_seq_ops = { 2368 .start = kprobe_seq_start, 2369 .next = kprobe_seq_next, 2370 .stop = kprobe_seq_stop, 2371 .show = show_kprobe_addr 2372 }; 2373 2374 static int kprobes_open(struct inode *inode, struct file *filp) 2375 { 2376 return seq_open(filp, &kprobes_seq_ops); 2377 } 2378 2379 static const struct file_operations debugfs_kprobes_operations = { 2380 .open = kprobes_open, 2381 .read = seq_read, 2382 .llseek = seq_lseek, 2383 .release = seq_release, 2384 }; 2385 2386 /* kprobes/blacklist -- shows which functions can not be probed */ 2387 static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos) 2388 { 2389 return seq_list_start(&kprobe_blacklist, *pos); 2390 } 2391 2392 static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos) 2393 { 2394 return seq_list_next(v, &kprobe_blacklist, pos); 2395 } 2396 2397 static int kprobe_blacklist_seq_show(struct seq_file *m, void *v) 2398 { 2399 struct kprobe_blacklist_entry *ent = 2400 list_entry(v, struct kprobe_blacklist_entry, list); 2401 2402 /* 2403 * If /proc/kallsyms is not showing kernel address, we won't 2404 * show them here either. 2405 */ 2406 if (!kallsyms_show_value()) 2407 seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL, 2408 (void *)ent->start_addr); 2409 else 2410 seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr, 2411 (void *)ent->end_addr, (void *)ent->start_addr); 2412 return 0; 2413 } 2414 2415 static const struct seq_operations kprobe_blacklist_seq_ops = { 2416 .start = kprobe_blacklist_seq_start, 2417 .next = kprobe_blacklist_seq_next, 2418 .stop = kprobe_seq_stop, /* Reuse void function */ 2419 .show = kprobe_blacklist_seq_show, 2420 }; 2421 2422 static int kprobe_blacklist_open(struct inode *inode, struct file *filp) 2423 { 2424 return seq_open(filp, &kprobe_blacklist_seq_ops); 2425 } 2426 2427 static const struct file_operations debugfs_kprobe_blacklist_ops = { 2428 .open = kprobe_blacklist_open, 2429 .read = seq_read, 2430 .llseek = seq_lseek, 2431 .release = seq_release, 2432 }; 2433 2434 static int arm_all_kprobes(void) 2435 { 2436 struct hlist_head *head; 2437 struct kprobe *p; 2438 unsigned int i, total = 0, errors = 0; 2439 int err, ret = 0; 2440 2441 mutex_lock(&kprobe_mutex); 2442 2443 /* If kprobes are armed, just return */ 2444 if (!kprobes_all_disarmed) 2445 goto already_enabled; 2446 2447 /* 2448 * optimize_kprobe() called by arm_kprobe() checks 2449 * kprobes_all_disarmed, so set kprobes_all_disarmed before 2450 * arm_kprobe. 2451 */ 2452 kprobes_all_disarmed = false; 2453 /* Arming kprobes doesn't optimize kprobe itself */ 2454 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2455 head = &kprobe_table[i]; 2456 /* Arm all kprobes on a best-effort basis */ 2457 hlist_for_each_entry_rcu(p, head, hlist) { 2458 if (!kprobe_disabled(p)) { 2459 err = arm_kprobe(p); 2460 if (err) { 2461 errors++; 2462 ret = err; 2463 } 2464 total++; 2465 } 2466 } 2467 } 2468 2469 if (errors) 2470 pr_warn("Kprobes globally enabled, but failed to arm %d out of %d probes\n", 2471 errors, total); 2472 else 2473 pr_info("Kprobes globally enabled\n"); 2474 2475 already_enabled: 2476 mutex_unlock(&kprobe_mutex); 2477 return ret; 2478 } 2479 2480 static int disarm_all_kprobes(void) 2481 { 2482 struct hlist_head *head; 2483 struct kprobe *p; 2484 unsigned int i, total = 0, errors = 0; 2485 int err, ret = 0; 2486 2487 mutex_lock(&kprobe_mutex); 2488 2489 /* If kprobes are already disarmed, just return */ 2490 if (kprobes_all_disarmed) { 2491 mutex_unlock(&kprobe_mutex); 2492 return 0; 2493 } 2494 2495 kprobes_all_disarmed = true; 2496 2497 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2498 head = &kprobe_table[i]; 2499 /* Disarm all kprobes on a best-effort basis */ 2500 hlist_for_each_entry_rcu(p, head, hlist) { 2501 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) { 2502 err = disarm_kprobe(p, false); 2503 if (err) { 2504 errors++; 2505 ret = err; 2506 } 2507 total++; 2508 } 2509 } 2510 } 2511 2512 if (errors) 2513 pr_warn("Kprobes globally disabled, but failed to disarm %d out of %d probes\n", 2514 errors, total); 2515 else 2516 pr_info("Kprobes globally disabled\n"); 2517 2518 mutex_unlock(&kprobe_mutex); 2519 2520 /* Wait for disarming all kprobes by optimizer */ 2521 wait_for_kprobe_optimizer(); 2522 2523 return ret; 2524 } 2525 2526 /* 2527 * XXX: The debugfs bool file interface doesn't allow for callbacks 2528 * when the bool state is switched. We can reuse that facility when 2529 * available 2530 */ 2531 static ssize_t read_enabled_file_bool(struct file *file, 2532 char __user *user_buf, size_t count, loff_t *ppos) 2533 { 2534 char buf[3]; 2535 2536 if (!kprobes_all_disarmed) 2537 buf[0] = '1'; 2538 else 2539 buf[0] = '0'; 2540 buf[1] = '\n'; 2541 buf[2] = 0x00; 2542 return simple_read_from_buffer(user_buf, count, ppos, buf, 2); 2543 } 2544 2545 static ssize_t write_enabled_file_bool(struct file *file, 2546 const char __user *user_buf, size_t count, loff_t *ppos) 2547 { 2548 char buf[32]; 2549 size_t buf_size; 2550 int ret = 0; 2551 2552 buf_size = min(count, (sizeof(buf)-1)); 2553 if (copy_from_user(buf, user_buf, buf_size)) 2554 return -EFAULT; 2555 2556 buf[buf_size] = '\0'; 2557 switch (buf[0]) { 2558 case 'y': 2559 case 'Y': 2560 case '1': 2561 ret = arm_all_kprobes(); 2562 break; 2563 case 'n': 2564 case 'N': 2565 case '0': 2566 ret = disarm_all_kprobes(); 2567 break; 2568 default: 2569 return -EINVAL; 2570 } 2571 2572 if (ret) 2573 return ret; 2574 2575 return count; 2576 } 2577 2578 static const struct file_operations fops_kp = { 2579 .read = read_enabled_file_bool, 2580 .write = write_enabled_file_bool, 2581 .llseek = default_llseek, 2582 }; 2583 2584 static int __init debugfs_kprobe_init(void) 2585 { 2586 struct dentry *dir, *file; 2587 unsigned int value = 1; 2588 2589 dir = debugfs_create_dir("kprobes", NULL); 2590 if (!dir) 2591 return -ENOMEM; 2592 2593 file = debugfs_create_file("list", 0400, dir, NULL, 2594 &debugfs_kprobes_operations); 2595 if (!file) 2596 goto error; 2597 2598 file = debugfs_create_file("enabled", 0600, dir, 2599 &value, &fops_kp); 2600 if (!file) 2601 goto error; 2602 2603 file = debugfs_create_file("blacklist", 0400, dir, NULL, 2604 &debugfs_kprobe_blacklist_ops); 2605 if (!file) 2606 goto error; 2607 2608 return 0; 2609 2610 error: 2611 debugfs_remove(dir); 2612 return -ENOMEM; 2613 } 2614 2615 late_initcall(debugfs_kprobe_init); 2616 #endif /* CONFIG_DEBUG_FS */ 2617 2618 module_init(init_kprobes); 2619