1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2008-2014 Mathieu Desnoyers 4 */ 5 #include <linux/module.h> 6 #include <linux/mutex.h> 7 #include <linux/types.h> 8 #include <linux/jhash.h> 9 #include <linux/list.h> 10 #include <linux/rcupdate.h> 11 #include <linux/tracepoint.h> 12 #include <linux/err.h> 13 #include <linux/slab.h> 14 #include <linux/sched/signal.h> 15 #include <linux/sched/task.h> 16 #include <linux/static_key.h> 17 18 extern tracepoint_ptr_t __start___tracepoints_ptrs[]; 19 extern tracepoint_ptr_t __stop___tracepoints_ptrs[]; 20 21 DEFINE_SRCU(tracepoint_srcu); 22 EXPORT_SYMBOL_GPL(tracepoint_srcu); 23 24 /* Set to 1 to enable tracepoint debug output */ 25 static const int tracepoint_debug; 26 27 #ifdef CONFIG_MODULES 28 /* 29 * Tracepoint module list mutex protects the local module list. 30 */ 31 static DEFINE_MUTEX(tracepoint_module_list_mutex); 32 33 /* Local list of struct tp_module */ 34 static LIST_HEAD(tracepoint_module_list); 35 #endif /* CONFIG_MODULES */ 36 37 /* 38 * tracepoints_mutex protects the builtin and module tracepoints. 39 * tracepoints_mutex nests inside tracepoint_module_list_mutex. 40 */ 41 static DEFINE_MUTEX(tracepoints_mutex); 42 43 static struct rcu_head *early_probes; 44 static bool ok_to_free_tracepoints; 45 46 /* 47 * Note about RCU : 48 * It is used to delay the free of multiple probes array until a quiescent 49 * state is reached. 50 */ 51 struct tp_probes { 52 struct rcu_head rcu; 53 struct tracepoint_func probes[]; 54 }; 55 56 /* Called in removal of a func but failed to allocate a new tp_funcs */ 57 static void tp_stub_func(void) 58 { 59 return; 60 } 61 62 static inline void *allocate_probes(int count) 63 { 64 struct tp_probes *p = kmalloc(struct_size(p, probes, count), 65 GFP_KERNEL); 66 return p == NULL ? NULL : p->probes; 67 } 68 69 static void srcu_free_old_probes(struct rcu_head *head) 70 { 71 kfree(container_of(head, struct tp_probes, rcu)); 72 } 73 74 static void rcu_free_old_probes(struct rcu_head *head) 75 { 76 call_srcu(&tracepoint_srcu, head, srcu_free_old_probes); 77 } 78 79 static __init int release_early_probes(void) 80 { 81 struct rcu_head *tmp; 82 83 ok_to_free_tracepoints = true; 84 85 while (early_probes) { 86 tmp = early_probes; 87 early_probes = tmp->next; 88 call_rcu(tmp, rcu_free_old_probes); 89 } 90 91 return 0; 92 } 93 94 /* SRCU is initialized at core_initcall */ 95 postcore_initcall(release_early_probes); 96 97 static inline void release_probes(struct tracepoint_func *old) 98 { 99 if (old) { 100 struct tp_probes *tp_probes = container_of(old, 101 struct tp_probes, probes[0]); 102 103 /* 104 * We can't free probes if SRCU is not initialized yet. 105 * Postpone the freeing till after SRCU is initialized. 106 */ 107 if (unlikely(!ok_to_free_tracepoints)) { 108 tp_probes->rcu.next = early_probes; 109 early_probes = &tp_probes->rcu; 110 return; 111 } 112 113 /* 114 * Tracepoint probes are protected by both sched RCU and SRCU, 115 * by calling the SRCU callback in the sched RCU callback we 116 * cover both cases. So let us chain the SRCU and sched RCU 117 * callbacks to wait for both grace periods. 118 */ 119 call_rcu(&tp_probes->rcu, rcu_free_old_probes); 120 } 121 } 122 123 static void debug_print_probes(struct tracepoint_func *funcs) 124 { 125 int i; 126 127 if (!tracepoint_debug || !funcs) 128 return; 129 130 for (i = 0; funcs[i].func; i++) 131 printk(KERN_DEBUG "Probe %d : %p\n", i, funcs[i].func); 132 } 133 134 static struct tracepoint_func * 135 func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func, 136 int prio) 137 { 138 struct tracepoint_func *old, *new; 139 int nr_probes = 0; 140 int stub_funcs = 0; 141 int pos = -1; 142 143 if (WARN_ON(!tp_func->func)) 144 return ERR_PTR(-EINVAL); 145 146 debug_print_probes(*funcs); 147 old = *funcs; 148 if (old) { 149 /* (N -> N+1), (N != 0, 1) probes */ 150 for (nr_probes = 0; old[nr_probes].func; nr_probes++) { 151 /* Insert before probes of lower priority */ 152 if (pos < 0 && old[nr_probes].prio < prio) 153 pos = nr_probes; 154 if (old[nr_probes].func == tp_func->func && 155 old[nr_probes].data == tp_func->data) 156 return ERR_PTR(-EEXIST); 157 if (old[nr_probes].func == tp_stub_func) 158 stub_funcs++; 159 } 160 } 161 /* + 2 : one for new probe, one for NULL func - stub functions */ 162 new = allocate_probes(nr_probes + 2 - stub_funcs); 163 if (new == NULL) 164 return ERR_PTR(-ENOMEM); 165 if (old) { 166 if (stub_funcs) { 167 /* Need to copy one at a time to remove stubs */ 168 int probes = 0; 169 170 pos = -1; 171 for (nr_probes = 0; old[nr_probes].func; nr_probes++) { 172 if (old[nr_probes].func == tp_stub_func) 173 continue; 174 if (pos < 0 && old[nr_probes].prio < prio) 175 pos = probes++; 176 new[probes++] = old[nr_probes]; 177 } 178 nr_probes = probes; 179 if (pos < 0) 180 pos = probes; 181 else 182 nr_probes--; /* Account for insertion */ 183 184 } else if (pos < 0) { 185 pos = nr_probes; 186 memcpy(new, old, nr_probes * sizeof(struct tracepoint_func)); 187 } else { 188 /* Copy higher priority probes ahead of the new probe */ 189 memcpy(new, old, pos * sizeof(struct tracepoint_func)); 190 /* Copy the rest after it. */ 191 memcpy(new + pos + 1, old + pos, 192 (nr_probes - pos) * sizeof(struct tracepoint_func)); 193 } 194 } else 195 pos = 0; 196 new[pos] = *tp_func; 197 new[nr_probes + 1].func = NULL; 198 *funcs = new; 199 debug_print_probes(*funcs); 200 return old; 201 } 202 203 static void *func_remove(struct tracepoint_func **funcs, 204 struct tracepoint_func *tp_func) 205 { 206 int nr_probes = 0, nr_del = 0, i; 207 struct tracepoint_func *old, *new; 208 209 old = *funcs; 210 211 if (!old) 212 return ERR_PTR(-ENOENT); 213 214 debug_print_probes(*funcs); 215 /* (N -> M), (N > 1, M >= 0) probes */ 216 if (tp_func->func) { 217 for (nr_probes = 0; old[nr_probes].func; nr_probes++) { 218 if ((old[nr_probes].func == tp_func->func && 219 old[nr_probes].data == tp_func->data) || 220 old[nr_probes].func == tp_stub_func) 221 nr_del++; 222 } 223 } 224 225 /* 226 * If probe is NULL, then nr_probes = nr_del = 0, and then the 227 * entire entry will be removed. 228 */ 229 if (nr_probes - nr_del == 0) { 230 /* N -> 0, (N > 1) */ 231 *funcs = NULL; 232 debug_print_probes(*funcs); 233 return old; 234 } else { 235 int j = 0; 236 /* N -> M, (N > 1, M > 0) */ 237 /* + 1 for NULL */ 238 new = allocate_probes(nr_probes - nr_del + 1); 239 if (new) { 240 for (i = 0; old[i].func; i++) 241 if ((old[i].func != tp_func->func 242 || old[i].data != tp_func->data) 243 && old[i].func != tp_stub_func) 244 new[j++] = old[i]; 245 new[nr_probes - nr_del].func = NULL; 246 *funcs = new; 247 } else { 248 /* 249 * Failed to allocate, replace the old function 250 * with calls to tp_stub_func. 251 */ 252 for (i = 0; old[i].func; i++) 253 if (old[i].func == tp_func->func && 254 old[i].data == tp_func->data) { 255 old[i].func = tp_stub_func; 256 /* Set the prio to the next event. */ 257 if (old[i + 1].func) 258 old[i].prio = 259 old[i + 1].prio; 260 else 261 old[i].prio = -1; 262 } 263 *funcs = old; 264 } 265 } 266 debug_print_probes(*funcs); 267 return old; 268 } 269 270 static void tracepoint_update_call(struct tracepoint *tp, struct tracepoint_func *tp_funcs, bool sync) 271 { 272 void *func = tp->iterator; 273 274 /* Synthetic events do not have static call sites */ 275 if (!tp->static_call_key) 276 return; 277 278 if (!tp_funcs[1].func) { 279 func = tp_funcs[0].func; 280 /* 281 * If going from the iterator back to a single caller, 282 * we need to synchronize with __DO_TRACE to make sure 283 * that the data passed to the callback is the one that 284 * belongs to that callback. 285 */ 286 if (sync) 287 tracepoint_synchronize_unregister(); 288 } 289 290 __static_call_update(tp->static_call_key, tp->static_call_tramp, func); 291 } 292 293 /* 294 * Add the probe function to a tracepoint. 295 */ 296 static int tracepoint_add_func(struct tracepoint *tp, 297 struct tracepoint_func *func, int prio) 298 { 299 struct tracepoint_func *old, *tp_funcs; 300 int ret; 301 302 if (tp->regfunc && !static_key_enabled(&tp->key)) { 303 ret = tp->regfunc(); 304 if (ret < 0) 305 return ret; 306 } 307 308 tp_funcs = rcu_dereference_protected(tp->funcs, 309 lockdep_is_held(&tracepoints_mutex)); 310 old = func_add(&tp_funcs, func, prio); 311 if (IS_ERR(old)) { 312 WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM); 313 return PTR_ERR(old); 314 } 315 316 /* 317 * rcu_assign_pointer has as smp_store_release() which makes sure 318 * that the new probe callbacks array is consistent before setting 319 * a pointer to it. This array is referenced by __DO_TRACE from 320 * include/linux/tracepoint.h using rcu_dereference_sched(). 321 */ 322 rcu_assign_pointer(tp->funcs, tp_funcs); 323 tracepoint_update_call(tp, tp_funcs, false); 324 static_key_enable(&tp->key); 325 326 release_probes(old); 327 return 0; 328 } 329 330 /* 331 * Remove a probe function from a tracepoint. 332 * Note: only waiting an RCU period after setting elem->call to the empty 333 * function insures that the original callback is not used anymore. This insured 334 * by preempt_disable around the call site. 335 */ 336 static int tracepoint_remove_func(struct tracepoint *tp, 337 struct tracepoint_func *func) 338 { 339 struct tracepoint_func *old, *tp_funcs; 340 341 tp_funcs = rcu_dereference_protected(tp->funcs, 342 lockdep_is_held(&tracepoints_mutex)); 343 old = func_remove(&tp_funcs, func); 344 if (WARN_ON_ONCE(IS_ERR(old))) 345 return PTR_ERR(old); 346 347 if (tp_funcs == old) 348 /* Failed allocating new tp_funcs, replaced func with stub */ 349 return 0; 350 351 if (!tp_funcs) { 352 /* Removed last function */ 353 if (tp->unregfunc && static_key_enabled(&tp->key)) 354 tp->unregfunc(); 355 356 static_key_disable(&tp->key); 357 rcu_assign_pointer(tp->funcs, tp_funcs); 358 } else { 359 rcu_assign_pointer(tp->funcs, tp_funcs); 360 tracepoint_update_call(tp, tp_funcs, 361 tp_funcs[0].func != old[0].func); 362 } 363 release_probes(old); 364 return 0; 365 } 366 367 /** 368 * tracepoint_probe_register_prio - Connect a probe to a tracepoint with priority 369 * @tp: tracepoint 370 * @probe: probe handler 371 * @data: tracepoint data 372 * @prio: priority of this function over other registered functions 373 * 374 * Returns 0 if ok, error value on error. 375 * Note: if @tp is within a module, the caller is responsible for 376 * unregistering the probe before the module is gone. This can be 377 * performed either with a tracepoint module going notifier, or from 378 * within module exit functions. 379 */ 380 int tracepoint_probe_register_prio(struct tracepoint *tp, void *probe, 381 void *data, int prio) 382 { 383 struct tracepoint_func tp_func; 384 int ret; 385 386 mutex_lock(&tracepoints_mutex); 387 tp_func.func = probe; 388 tp_func.data = data; 389 tp_func.prio = prio; 390 ret = tracepoint_add_func(tp, &tp_func, prio); 391 mutex_unlock(&tracepoints_mutex); 392 return ret; 393 } 394 EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio); 395 396 /** 397 * tracepoint_probe_register - Connect a probe to a tracepoint 398 * @tp: tracepoint 399 * @probe: probe handler 400 * @data: tracepoint data 401 * 402 * Returns 0 if ok, error value on error. 403 * Note: if @tp is within a module, the caller is responsible for 404 * unregistering the probe before the module is gone. This can be 405 * performed either with a tracepoint module going notifier, or from 406 * within module exit functions. 407 */ 408 int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data) 409 { 410 return tracepoint_probe_register_prio(tp, probe, data, TRACEPOINT_DEFAULT_PRIO); 411 } 412 EXPORT_SYMBOL_GPL(tracepoint_probe_register); 413 414 /** 415 * tracepoint_probe_unregister - Disconnect a probe from a tracepoint 416 * @tp: tracepoint 417 * @probe: probe function pointer 418 * @data: tracepoint data 419 * 420 * Returns 0 if ok, error value on error. 421 */ 422 int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data) 423 { 424 struct tracepoint_func tp_func; 425 int ret; 426 427 mutex_lock(&tracepoints_mutex); 428 tp_func.func = probe; 429 tp_func.data = data; 430 ret = tracepoint_remove_func(tp, &tp_func); 431 mutex_unlock(&tracepoints_mutex); 432 return ret; 433 } 434 EXPORT_SYMBOL_GPL(tracepoint_probe_unregister); 435 436 static void for_each_tracepoint_range( 437 tracepoint_ptr_t *begin, tracepoint_ptr_t *end, 438 void (*fct)(struct tracepoint *tp, void *priv), 439 void *priv) 440 { 441 tracepoint_ptr_t *iter; 442 443 if (!begin) 444 return; 445 for (iter = begin; iter < end; iter++) 446 fct(tracepoint_ptr_deref(iter), priv); 447 } 448 449 #ifdef CONFIG_MODULES 450 bool trace_module_has_bad_taint(struct module *mod) 451 { 452 return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP) | 453 (1 << TAINT_UNSIGNED_MODULE)); 454 } 455 456 static BLOCKING_NOTIFIER_HEAD(tracepoint_notify_list); 457 458 /** 459 * register_tracepoint_notifier - register tracepoint coming/going notifier 460 * @nb: notifier block 461 * 462 * Notifiers registered with this function are called on module 463 * coming/going with the tracepoint_module_list_mutex held. 464 * The notifier block callback should expect a "struct tp_module" data 465 * pointer. 466 */ 467 int register_tracepoint_module_notifier(struct notifier_block *nb) 468 { 469 struct tp_module *tp_mod; 470 int ret; 471 472 mutex_lock(&tracepoint_module_list_mutex); 473 ret = blocking_notifier_chain_register(&tracepoint_notify_list, nb); 474 if (ret) 475 goto end; 476 list_for_each_entry(tp_mod, &tracepoint_module_list, list) 477 (void) nb->notifier_call(nb, MODULE_STATE_COMING, tp_mod); 478 end: 479 mutex_unlock(&tracepoint_module_list_mutex); 480 return ret; 481 } 482 EXPORT_SYMBOL_GPL(register_tracepoint_module_notifier); 483 484 /** 485 * unregister_tracepoint_notifier - unregister tracepoint coming/going notifier 486 * @nb: notifier block 487 * 488 * The notifier block callback should expect a "struct tp_module" data 489 * pointer. 490 */ 491 int unregister_tracepoint_module_notifier(struct notifier_block *nb) 492 { 493 struct tp_module *tp_mod; 494 int ret; 495 496 mutex_lock(&tracepoint_module_list_mutex); 497 ret = blocking_notifier_chain_unregister(&tracepoint_notify_list, nb); 498 if (ret) 499 goto end; 500 list_for_each_entry(tp_mod, &tracepoint_module_list, list) 501 (void) nb->notifier_call(nb, MODULE_STATE_GOING, tp_mod); 502 end: 503 mutex_unlock(&tracepoint_module_list_mutex); 504 return ret; 505 506 } 507 EXPORT_SYMBOL_GPL(unregister_tracepoint_module_notifier); 508 509 /* 510 * Ensure the tracer unregistered the module's probes before the module 511 * teardown is performed. Prevents leaks of probe and data pointers. 512 */ 513 static void tp_module_going_check_quiescent(struct tracepoint *tp, void *priv) 514 { 515 WARN_ON_ONCE(tp->funcs); 516 } 517 518 static int tracepoint_module_coming(struct module *mod) 519 { 520 struct tp_module *tp_mod; 521 int ret = 0; 522 523 if (!mod->num_tracepoints) 524 return 0; 525 526 /* 527 * We skip modules that taint the kernel, especially those with different 528 * module headers (for forced load), to make sure we don't cause a crash. 529 * Staging, out-of-tree, and unsigned GPL modules are fine. 530 */ 531 if (trace_module_has_bad_taint(mod)) 532 return 0; 533 mutex_lock(&tracepoint_module_list_mutex); 534 tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL); 535 if (!tp_mod) { 536 ret = -ENOMEM; 537 goto end; 538 } 539 tp_mod->mod = mod; 540 list_add_tail(&tp_mod->list, &tracepoint_module_list); 541 blocking_notifier_call_chain(&tracepoint_notify_list, 542 MODULE_STATE_COMING, tp_mod); 543 end: 544 mutex_unlock(&tracepoint_module_list_mutex); 545 return ret; 546 } 547 548 static void tracepoint_module_going(struct module *mod) 549 { 550 struct tp_module *tp_mod; 551 552 if (!mod->num_tracepoints) 553 return; 554 555 mutex_lock(&tracepoint_module_list_mutex); 556 list_for_each_entry(tp_mod, &tracepoint_module_list, list) { 557 if (tp_mod->mod == mod) { 558 blocking_notifier_call_chain(&tracepoint_notify_list, 559 MODULE_STATE_GOING, tp_mod); 560 list_del(&tp_mod->list); 561 kfree(tp_mod); 562 /* 563 * Called the going notifier before checking for 564 * quiescence. 565 */ 566 for_each_tracepoint_range(mod->tracepoints_ptrs, 567 mod->tracepoints_ptrs + mod->num_tracepoints, 568 tp_module_going_check_quiescent, NULL); 569 break; 570 } 571 } 572 /* 573 * In the case of modules that were tainted at "coming", we'll simply 574 * walk through the list without finding it. We cannot use the "tainted" 575 * flag on "going", in case a module taints the kernel only after being 576 * loaded. 577 */ 578 mutex_unlock(&tracepoint_module_list_mutex); 579 } 580 581 static int tracepoint_module_notify(struct notifier_block *self, 582 unsigned long val, void *data) 583 { 584 struct module *mod = data; 585 int ret = 0; 586 587 switch (val) { 588 case MODULE_STATE_COMING: 589 ret = tracepoint_module_coming(mod); 590 break; 591 case MODULE_STATE_LIVE: 592 break; 593 case MODULE_STATE_GOING: 594 tracepoint_module_going(mod); 595 break; 596 case MODULE_STATE_UNFORMED: 597 break; 598 } 599 return notifier_from_errno(ret); 600 } 601 602 static struct notifier_block tracepoint_module_nb = { 603 .notifier_call = tracepoint_module_notify, 604 .priority = 0, 605 }; 606 607 static __init int init_tracepoints(void) 608 { 609 int ret; 610 611 ret = register_module_notifier(&tracepoint_module_nb); 612 if (ret) 613 pr_warn("Failed to register tracepoint module enter notifier\n"); 614 615 return ret; 616 } 617 __initcall(init_tracepoints); 618 #endif /* CONFIG_MODULES */ 619 620 /** 621 * for_each_kernel_tracepoint - iteration on all kernel tracepoints 622 * @fct: callback 623 * @priv: private data 624 */ 625 void for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv), 626 void *priv) 627 { 628 for_each_tracepoint_range(__start___tracepoints_ptrs, 629 __stop___tracepoints_ptrs, fct, priv); 630 } 631 EXPORT_SYMBOL_GPL(for_each_kernel_tracepoint); 632 633 #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS 634 635 /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ 636 static int sys_tracepoint_refcount; 637 638 int syscall_regfunc(void) 639 { 640 struct task_struct *p, *t; 641 642 if (!sys_tracepoint_refcount) { 643 read_lock(&tasklist_lock); 644 for_each_process_thread(p, t) { 645 set_task_syscall_work(t, SYSCALL_TRACEPOINT); 646 } 647 read_unlock(&tasklist_lock); 648 } 649 sys_tracepoint_refcount++; 650 651 return 0; 652 } 653 654 void syscall_unregfunc(void) 655 { 656 struct task_struct *p, *t; 657 658 sys_tracepoint_refcount--; 659 if (!sys_tracepoint_refcount) { 660 read_lock(&tasklist_lock); 661 for_each_process_thread(p, t) { 662 clear_task_syscall_work(t, SYSCALL_TRACEPOINT); 663 } 664 read_unlock(&tasklist_lock); 665 } 666 } 667 #endif 668