1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2008-2014 Mathieu Desnoyers 4 */ 5 #include <linux/module.h> 6 #include <linux/mutex.h> 7 #include <linux/types.h> 8 #include <linux/jhash.h> 9 #include <linux/list.h> 10 #include <linux/rcupdate.h> 11 #include <linux/tracepoint.h> 12 #include <linux/err.h> 13 #include <linux/slab.h> 14 #include <linux/sched/signal.h> 15 #include <linux/sched/task.h> 16 #include <linux/static_key.h> 17 18 enum tp_func_state { 19 TP_FUNC_0, 20 TP_FUNC_1, 21 TP_FUNC_2, 22 TP_FUNC_N, 23 }; 24 25 extern tracepoint_ptr_t __start___tracepoints_ptrs[]; 26 extern tracepoint_ptr_t __stop___tracepoints_ptrs[]; 27 28 enum tp_transition_sync { 29 TP_TRANSITION_SYNC_1_0_1, 30 TP_TRANSITION_SYNC_N_2_1, 31 32 _NR_TP_TRANSITION_SYNC, 33 }; 34 35 struct tp_transition_snapshot { 36 unsigned long rcu; 37 unsigned long srcu_gp; 38 bool ongoing; 39 }; 40 41 DEFINE_SRCU_FAST(tracepoint_srcu); 42 EXPORT_SYMBOL_GPL(tracepoint_srcu); 43 44 /* Protected by tracepoints_mutex */ 45 static struct tp_transition_snapshot tp_transition_snapshot[_NR_TP_TRANSITION_SYNC]; 46 47 static void tp_rcu_get_state(enum tp_transition_sync sync) 48 { 49 struct tp_transition_snapshot *snapshot = &tp_transition_snapshot[sync]; 50 51 /* Keep the latest get_state snapshot. */ 52 snapshot->rcu = get_state_synchronize_rcu(); 53 snapshot->srcu_gp = start_poll_synchronize_srcu(&tracepoint_srcu); 54 snapshot->ongoing = true; 55 } 56 57 static void tp_rcu_cond_sync(enum tp_transition_sync sync) 58 { 59 struct tp_transition_snapshot *snapshot = &tp_transition_snapshot[sync]; 60 61 if (!snapshot->ongoing) 62 return; 63 cond_synchronize_rcu(snapshot->rcu); 64 if (!poll_state_synchronize_srcu(&tracepoint_srcu, snapshot->srcu_gp)) 65 synchronize_srcu(&tracepoint_srcu); 66 snapshot->ongoing = false; 67 } 68 69 /* Set to 1 to enable tracepoint debug output */ 70 static const int tracepoint_debug; 71 72 #ifdef CONFIG_MODULES 73 /* 74 * Tracepoint module list mutex protects the local module list. 75 */ 76 static DEFINE_MUTEX(tracepoint_module_list_mutex); 77 78 /* Local list of struct tp_module */ 79 static LIST_HEAD(tracepoint_module_list); 80 #endif /* CONFIG_MODULES */ 81 82 /* 83 * tracepoints_mutex protects the builtin and module tracepoints. 84 * tracepoints_mutex nests inside tracepoint_module_list_mutex. 85 */ 86 static DEFINE_MUTEX(tracepoints_mutex); 87 88 /* 89 * Note about RCU : 90 * It is used to delay the free of multiple probes array until a quiescent 91 * state is reached. 92 */ 93 struct tp_probes { 94 struct rcu_head rcu; 95 struct tracepoint_func probes[]; 96 }; 97 98 /* Called in removal of a func but failed to allocate a new tp_funcs */ 99 static void tp_stub_func(void) 100 { 101 return; 102 } 103 104 static inline void *allocate_probes(int count) 105 { 106 struct tp_probes *p = kmalloc_flex(*p, probes, count, GFP_KERNEL); 107 return p == NULL ? NULL : p->probes; 108 } 109 110 static void rcu_free_old_probes(struct rcu_head *head) 111 { 112 kfree(container_of(head, struct tp_probes, rcu)); 113 } 114 115 static inline void release_probes(struct tracepoint *tp, struct tracepoint_func *old) 116 { 117 if (old) { 118 struct tp_probes *tp_probes = container_of(old, 119 struct tp_probes, probes[0]); 120 121 if (tracepoint_is_faultable(tp)) { 122 call_rcu_tasks_trace(&tp_probes->rcu, 123 rcu_free_old_probes); 124 } else { 125 call_srcu(&tracepoint_srcu, &tp_probes->rcu, 126 rcu_free_old_probes); 127 } 128 } 129 } 130 131 static void debug_print_probes(struct tracepoint_func *funcs) 132 { 133 int i; 134 135 if (!tracepoint_debug || !funcs) 136 return; 137 138 for (i = 0; funcs[i].func; i++) 139 printk(KERN_DEBUG "Probe %d : %pSb\n", i, funcs[i].func); 140 } 141 142 static struct tracepoint_func * 143 func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func, 144 int prio) 145 { 146 struct tracepoint_func *old, *new; 147 int iter_probes; /* Iterate over old probe array. */ 148 int nr_probes = 0; /* Counter for probes */ 149 int pos = -1; /* Insertion position into new array */ 150 151 if (WARN_ON(!tp_func->func)) 152 return ERR_PTR(-EINVAL); 153 154 debug_print_probes(*funcs); 155 old = *funcs; 156 if (old) { 157 /* (N -> N+1), (N != 0, 1) probes */ 158 for (iter_probes = 0; old[iter_probes].func; iter_probes++) { 159 if (old[iter_probes].func == tp_stub_func) 160 continue; /* Skip stub functions. */ 161 if (old[iter_probes].func == tp_func->func && 162 old[iter_probes].data == tp_func->data) 163 return ERR_PTR(-EEXIST); 164 nr_probes++; 165 } 166 } 167 /* + 2 : one for new probe, one for NULL func */ 168 new = allocate_probes(nr_probes + 2); 169 if (new == NULL) 170 return ERR_PTR(-ENOMEM); 171 if (old) { 172 nr_probes = 0; 173 for (iter_probes = 0; old[iter_probes].func; iter_probes++) { 174 if (old[iter_probes].func == tp_stub_func) 175 continue; 176 /* Insert before probes of lower priority */ 177 if (pos < 0 && old[iter_probes].prio < prio) 178 pos = nr_probes++; 179 new[nr_probes++] = old[iter_probes]; 180 } 181 if (pos < 0) 182 pos = nr_probes++; 183 /* nr_probes now points to the end of the new array */ 184 } else { 185 pos = 0; 186 nr_probes = 1; /* must point at end of array */ 187 } 188 new[pos] = *tp_func; 189 new[nr_probes].func = NULL; 190 *funcs = new; 191 debug_print_probes(*funcs); 192 return old; 193 } 194 195 static void *func_remove(struct tracepoint_func **funcs, 196 struct tracepoint_func *tp_func) 197 { 198 int nr_probes = 0, nr_del = 0, i; 199 struct tracepoint_func *old, *new; 200 201 old = *funcs; 202 203 if (!old) 204 return ERR_PTR(-ENOENT); 205 206 debug_print_probes(*funcs); 207 /* (N -> M), (N > 1, M >= 0) probes */ 208 if (tp_func->func) { 209 for (nr_probes = 0; old[nr_probes].func; nr_probes++) { 210 if ((old[nr_probes].func == tp_func->func && 211 old[nr_probes].data == tp_func->data) || 212 old[nr_probes].func == tp_stub_func) 213 nr_del++; 214 } 215 } 216 217 /* 218 * If probe is NULL, then nr_probes = nr_del = 0, and then the 219 * entire entry will be removed. 220 */ 221 if (nr_probes - nr_del == 0) { 222 /* N -> 0, (N > 1) */ 223 *funcs = NULL; 224 debug_print_probes(*funcs); 225 return old; 226 } else { 227 int j = 0; 228 /* N -> M, (N > 1, M > 0) */ 229 /* + 1 for NULL */ 230 new = allocate_probes(nr_probes - nr_del + 1); 231 if (new) { 232 for (i = 0; old[i].func; i++) { 233 if ((old[i].func != tp_func->func || 234 old[i].data != tp_func->data) && 235 old[i].func != tp_stub_func) 236 new[j++] = old[i]; 237 } 238 new[nr_probes - nr_del].func = NULL; 239 *funcs = new; 240 } else { 241 /* 242 * Failed to allocate, replace the old function 243 * with calls to tp_stub_func. 244 */ 245 for (i = 0; old[i].func; i++) { 246 if (old[i].func == tp_func->func && 247 old[i].data == tp_func->data) 248 WRITE_ONCE(old[i].func, tp_stub_func); 249 } 250 *funcs = old; 251 } 252 } 253 debug_print_probes(*funcs); 254 return old; 255 } 256 257 /* 258 * Count the number of functions (enum tp_func_state) in a tp_funcs array. 259 */ 260 static enum tp_func_state nr_func_state(const struct tracepoint_func *tp_funcs) 261 { 262 if (!tp_funcs) 263 return TP_FUNC_0; 264 if (!tp_funcs[1].func) 265 return TP_FUNC_1; 266 if (!tp_funcs[2].func) 267 return TP_FUNC_2; 268 return TP_FUNC_N; /* 3 or more */ 269 } 270 271 static void tracepoint_update_call(struct tracepoint *tp, struct tracepoint_func *tp_funcs) 272 { 273 void *func = tp->iterator; 274 275 /* Synthetic events do not have static call sites */ 276 if (!tp->static_call_key) 277 return; 278 if (nr_func_state(tp_funcs) == TP_FUNC_1) 279 func = tp_funcs[0].func; 280 __static_call_update(tp->static_call_key, tp->static_call_tramp, func); 281 } 282 283 /* 284 * Add the probe function to a tracepoint. 285 */ 286 static int tracepoint_add_func(struct tracepoint *tp, 287 struct tracepoint_func *func, int prio, 288 bool warn) 289 { 290 struct tracepoint_func *old, *tp_funcs; 291 int ret; 292 293 if (tp->ext && tp->ext->regfunc && !static_key_enabled(&tp->key)) { 294 ret = tp->ext->regfunc(); 295 if (ret < 0) 296 return ret; 297 } 298 299 tp_funcs = rcu_dereference_protected(tp->funcs, 300 lockdep_is_held(&tracepoints_mutex)); 301 old = func_add(&tp_funcs, func, prio); 302 if (IS_ERR(old)) { 303 WARN_ON_ONCE(warn && PTR_ERR(old) != -ENOMEM); 304 return PTR_ERR(old); 305 } 306 307 /* 308 * rcu_assign_pointer has as smp_store_release() which makes sure 309 * that the new probe callbacks array is consistent before setting 310 * a pointer to it. This array is referenced by __DO_TRACE from 311 * include/linux/tracepoint.h using rcu_dereference_sched(). 312 */ 313 switch (nr_func_state(tp_funcs)) { 314 case TP_FUNC_1: /* 0->1 */ 315 /* 316 * Make sure new static func never uses old data after a 317 * 1->0->1 transition sequence. 318 */ 319 tp_rcu_cond_sync(TP_TRANSITION_SYNC_1_0_1); 320 /* Set static call to first function */ 321 tracepoint_update_call(tp, tp_funcs); 322 /* Both iterator and static call handle NULL tp->funcs */ 323 rcu_assign_pointer(tp->funcs, tp_funcs); 324 static_branch_enable(&tp->key); 325 break; 326 case TP_FUNC_2: /* 1->2 */ 327 /* Set iterator static call */ 328 tracepoint_update_call(tp, tp_funcs); 329 /* 330 * Iterator callback installed before updating tp->funcs. 331 * Requires ordering between RCU assign/dereference and 332 * static call update/call. 333 */ 334 fallthrough; 335 case TP_FUNC_N: /* N->N+1 (N>1) */ 336 rcu_assign_pointer(tp->funcs, tp_funcs); 337 /* 338 * Make sure static func never uses incorrect data after a 339 * N->...->2->1 (N>1) transition sequence. 340 */ 341 if (tp_funcs[0].data != old[0].data) 342 tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1); 343 break; 344 default: 345 WARN_ON_ONCE(1); 346 break; 347 } 348 349 release_probes(tp, old); 350 return 0; 351 } 352 353 /* 354 * Remove a probe function from a tracepoint. 355 * Note: only waiting an RCU period after setting elem->call to the empty 356 * function insures that the original callback is not used anymore. This insured 357 * by preempt_disable around the call site. 358 */ 359 static int tracepoint_remove_func(struct tracepoint *tp, 360 struct tracepoint_func *func) 361 { 362 struct tracepoint_func *old, *tp_funcs; 363 364 tp_funcs = rcu_dereference_protected(tp->funcs, 365 lockdep_is_held(&tracepoints_mutex)); 366 old = func_remove(&tp_funcs, func); 367 if (WARN_ON_ONCE(IS_ERR(old))) 368 return PTR_ERR(old); 369 370 if (tp_funcs == old) 371 /* Failed allocating new tp_funcs, replaced func with stub */ 372 return 0; 373 374 switch (nr_func_state(tp_funcs)) { 375 case TP_FUNC_0: /* 1->0 */ 376 /* Removed last function */ 377 if (tp->ext && tp->ext->unregfunc && static_key_enabled(&tp->key)) 378 tp->ext->unregfunc(); 379 static_branch_disable(&tp->key); 380 /* Set iterator static call */ 381 tracepoint_update_call(tp, tp_funcs); 382 /* Both iterator and static call handle NULL tp->funcs */ 383 rcu_assign_pointer(tp->funcs, NULL); 384 /* 385 * Make sure new static func never uses old data after a 386 * 1->0->1 transition sequence. 387 */ 388 tp_rcu_get_state(TP_TRANSITION_SYNC_1_0_1); 389 break; 390 case TP_FUNC_1: /* 2->1 */ 391 rcu_assign_pointer(tp->funcs, tp_funcs); 392 /* 393 * Make sure static func never uses incorrect data after a 394 * N->...->2->1 (N>2) transition sequence. If the first 395 * element's data has changed, then force the synchronization 396 * to prevent current readers that have loaded the old data 397 * from calling the new function. 398 */ 399 if (tp_funcs[0].data != old[0].data) 400 tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1); 401 tp_rcu_cond_sync(TP_TRANSITION_SYNC_N_2_1); 402 /* Set static call to first function */ 403 tracepoint_update_call(tp, tp_funcs); 404 break; 405 case TP_FUNC_2: /* N->N-1 (N>2) */ 406 fallthrough; 407 case TP_FUNC_N: 408 rcu_assign_pointer(tp->funcs, tp_funcs); 409 /* 410 * Make sure static func never uses incorrect data after a 411 * N->...->2->1 (N>2) transition sequence. 412 */ 413 if (tp_funcs[0].data != old[0].data) 414 tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1); 415 break; 416 default: 417 WARN_ON_ONCE(1); 418 break; 419 } 420 release_probes(tp, old); 421 return 0; 422 } 423 424 /** 425 * tracepoint_probe_register_prio_may_exist - Connect a probe to a tracepoint with priority 426 * @tp: tracepoint 427 * @probe: probe handler 428 * @data: tracepoint data 429 * @prio: priority of this function over other registered functions 430 * 431 * Same as tracepoint_probe_register_prio() except that it will not warn 432 * if the tracepoint is already registered. 433 */ 434 int tracepoint_probe_register_prio_may_exist(struct tracepoint *tp, void *probe, 435 void *data, int prio) 436 { 437 struct tracepoint_func tp_func; 438 int ret; 439 440 mutex_lock(&tracepoints_mutex); 441 tp_func.func = probe; 442 tp_func.data = data; 443 tp_func.prio = prio; 444 ret = tracepoint_add_func(tp, &tp_func, prio, false); 445 mutex_unlock(&tracepoints_mutex); 446 return ret; 447 } 448 EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio_may_exist); 449 450 /** 451 * tracepoint_probe_register_prio - Connect a probe to a tracepoint with priority 452 * @tp: tracepoint 453 * @probe: probe handler 454 * @data: tracepoint data 455 * @prio: priority of this function over other registered functions 456 * 457 * Returns 0 if ok, error value on error. 458 * Note: if @tp is within a module, the caller is responsible for 459 * unregistering the probe before the module is gone. This can be 460 * performed either with a tracepoint module going notifier, or from 461 * within module exit functions. 462 */ 463 int tracepoint_probe_register_prio(struct tracepoint *tp, void *probe, 464 void *data, int prio) 465 { 466 struct tracepoint_func tp_func; 467 int ret; 468 469 mutex_lock(&tracepoints_mutex); 470 tp_func.func = probe; 471 tp_func.data = data; 472 tp_func.prio = prio; 473 ret = tracepoint_add_func(tp, &tp_func, prio, true); 474 mutex_unlock(&tracepoints_mutex); 475 return ret; 476 } 477 EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio); 478 479 /** 480 * tracepoint_probe_register - Connect a probe to a tracepoint 481 * @tp: tracepoint 482 * @probe: probe handler 483 * @data: tracepoint data 484 * 485 * Returns 0 if ok, error value on error. 486 * Note: if @tp is within a module, the caller is responsible for 487 * unregistering the probe before the module is gone. This can be 488 * performed either with a tracepoint module going notifier, or from 489 * within module exit functions. 490 */ 491 int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data) 492 { 493 return tracepoint_probe_register_prio(tp, probe, data, TRACEPOINT_DEFAULT_PRIO); 494 } 495 EXPORT_SYMBOL_GPL(tracepoint_probe_register); 496 497 /** 498 * tracepoint_probe_unregister - Disconnect a probe from a tracepoint 499 * @tp: tracepoint 500 * @probe: probe function pointer 501 * @data: tracepoint data 502 * 503 * Returns 0 if ok, error value on error. 504 */ 505 int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data) 506 { 507 struct tracepoint_func tp_func; 508 int ret; 509 510 mutex_lock(&tracepoints_mutex); 511 tp_func.func = probe; 512 tp_func.data = data; 513 ret = tracepoint_remove_func(tp, &tp_func); 514 mutex_unlock(&tracepoints_mutex); 515 return ret; 516 } 517 EXPORT_SYMBOL_GPL(tracepoint_probe_unregister); 518 519 static void for_each_tracepoint_range( 520 tracepoint_ptr_t *begin, tracepoint_ptr_t *end, 521 void (*fct)(struct tracepoint *tp, void *priv), 522 void *priv) 523 { 524 tracepoint_ptr_t *iter; 525 526 if (!begin) 527 return; 528 for (iter = begin; iter < end; iter++) 529 fct(tracepoint_ptr_deref(iter), priv); 530 } 531 532 #ifdef CONFIG_MODULES 533 bool trace_module_has_bad_taint(struct module *mod) 534 { 535 return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP) | 536 (1 << TAINT_UNSIGNED_MODULE) | (1 << TAINT_TEST) | 537 (1 << TAINT_LIVEPATCH)); 538 } 539 540 static BLOCKING_NOTIFIER_HEAD(tracepoint_notify_list); 541 542 /** 543 * register_tracepoint_module_notifier - register tracepoint coming/going notifier 544 * @nb: notifier block 545 * 546 * Notifiers registered with this function are called on module 547 * coming/going with the tracepoint_module_list_mutex held. 548 * The notifier block callback should expect a "struct tp_module" data 549 * pointer. 550 */ 551 int register_tracepoint_module_notifier(struct notifier_block *nb) 552 { 553 struct tp_module *tp_mod; 554 int ret; 555 556 mutex_lock(&tracepoint_module_list_mutex); 557 ret = blocking_notifier_chain_register(&tracepoint_notify_list, nb); 558 if (ret) 559 goto end; 560 list_for_each_entry(tp_mod, &tracepoint_module_list, list) 561 (void) nb->notifier_call(nb, MODULE_STATE_COMING, tp_mod); 562 end: 563 mutex_unlock(&tracepoint_module_list_mutex); 564 return ret; 565 } 566 EXPORT_SYMBOL_GPL(register_tracepoint_module_notifier); 567 568 /** 569 * unregister_tracepoint_module_notifier - unregister tracepoint coming/going notifier 570 * @nb: notifier block 571 * 572 * The notifier block callback should expect a "struct tp_module" data 573 * pointer. 574 */ 575 int unregister_tracepoint_module_notifier(struct notifier_block *nb) 576 { 577 struct tp_module *tp_mod; 578 int ret; 579 580 mutex_lock(&tracepoint_module_list_mutex); 581 ret = blocking_notifier_chain_unregister(&tracepoint_notify_list, nb); 582 if (ret) 583 goto end; 584 list_for_each_entry(tp_mod, &tracepoint_module_list, list) 585 (void) nb->notifier_call(nb, MODULE_STATE_GOING, tp_mod); 586 end: 587 mutex_unlock(&tracepoint_module_list_mutex); 588 return ret; 589 590 } 591 EXPORT_SYMBOL_GPL(unregister_tracepoint_module_notifier); 592 593 /* 594 * Ensure the tracer unregistered the module's probes before the module 595 * teardown is performed. Prevents leaks of probe and data pointers. 596 */ 597 static void tp_module_going_check_quiescent(struct tracepoint *tp, void *priv) 598 { 599 WARN_ON_ONCE(tp->funcs); 600 } 601 602 static int tracepoint_module_coming(struct module *mod) 603 { 604 struct tp_module *tp_mod; 605 606 if (!mod->num_tracepoints) 607 return 0; 608 609 /* 610 * We skip modules that taint the kernel, especially those with different 611 * module headers (for forced load), to make sure we don't cause a crash. 612 * Staging, out-of-tree, unsigned GPL, and test modules are fine. 613 */ 614 if (trace_module_has_bad_taint(mod)) 615 return 0; 616 617 tp_mod = kmalloc_obj(struct tp_module, GFP_KERNEL); 618 if (!tp_mod) 619 return -ENOMEM; 620 tp_mod->mod = mod; 621 622 mutex_lock(&tracepoint_module_list_mutex); 623 list_add_tail(&tp_mod->list, &tracepoint_module_list); 624 blocking_notifier_call_chain(&tracepoint_notify_list, 625 MODULE_STATE_COMING, tp_mod); 626 mutex_unlock(&tracepoint_module_list_mutex); 627 return 0; 628 } 629 630 static void tracepoint_module_going(struct module *mod) 631 { 632 struct tp_module *tp_mod; 633 634 if (!mod->num_tracepoints) 635 return; 636 637 mutex_lock(&tracepoint_module_list_mutex); 638 list_for_each_entry(tp_mod, &tracepoint_module_list, list) { 639 if (tp_mod->mod == mod) { 640 blocking_notifier_call_chain(&tracepoint_notify_list, 641 MODULE_STATE_GOING, tp_mod); 642 list_del(&tp_mod->list); 643 kfree(tp_mod); 644 /* 645 * Called the going notifier before checking for 646 * quiescence. 647 */ 648 for_each_tracepoint_range(mod->tracepoints_ptrs, 649 mod->tracepoints_ptrs + mod->num_tracepoints, 650 tp_module_going_check_quiescent, NULL); 651 break; 652 } 653 } 654 /* 655 * In the case of modules that were tainted at "coming", we'll simply 656 * walk through the list without finding it. We cannot use the "tainted" 657 * flag on "going", in case a module taints the kernel only after being 658 * loaded. 659 */ 660 mutex_unlock(&tracepoint_module_list_mutex); 661 } 662 663 static int tracepoint_module_notify(struct notifier_block *self, 664 unsigned long val, void *data) 665 { 666 struct module *mod = data; 667 int ret = 0; 668 669 switch (val) { 670 case MODULE_STATE_COMING: 671 ret = tracepoint_module_coming(mod); 672 break; 673 case MODULE_STATE_LIVE: 674 break; 675 case MODULE_STATE_GOING: 676 tracepoint_module_going(mod); 677 break; 678 case MODULE_STATE_UNFORMED: 679 break; 680 } 681 return notifier_from_errno(ret); 682 } 683 684 static struct notifier_block tracepoint_module_nb = { 685 .notifier_call = tracepoint_module_notify, 686 .priority = 0, 687 }; 688 689 static __init int init_tracepoints(void) 690 { 691 int ret; 692 693 ret = register_module_notifier(&tracepoint_module_nb); 694 if (ret) 695 pr_warn("Failed to register tracepoint module enter notifier\n"); 696 697 return ret; 698 } 699 __initcall(init_tracepoints); 700 701 /** 702 * for_each_tracepoint_in_module - iteration on all tracepoints in a module 703 * @mod: module 704 * @fct: callback 705 * @priv: private data 706 */ 707 void for_each_tracepoint_in_module(struct module *mod, 708 void (*fct)(struct tracepoint *tp, 709 struct module *mod, void *priv), 710 void *priv) 711 { 712 tracepoint_ptr_t *begin, *end, *iter; 713 714 lockdep_assert_held(&tracepoint_module_list_mutex); 715 716 if (!mod) 717 return; 718 719 begin = mod->tracepoints_ptrs; 720 end = mod->tracepoints_ptrs + mod->num_tracepoints; 721 722 for (iter = begin; iter < end; iter++) 723 fct(tracepoint_ptr_deref(iter), mod, priv); 724 } 725 726 /** 727 * for_each_module_tracepoint - iteration on all tracepoints in all modules 728 * @fct: callback 729 * @priv: private data 730 */ 731 void for_each_module_tracepoint(void (*fct)(struct tracepoint *tp, 732 struct module *mod, void *priv), 733 void *priv) 734 { 735 struct tp_module *tp_mod; 736 737 mutex_lock(&tracepoint_module_list_mutex); 738 list_for_each_entry(tp_mod, &tracepoint_module_list, list) 739 for_each_tracepoint_in_module(tp_mod->mod, fct, priv); 740 mutex_unlock(&tracepoint_module_list_mutex); 741 } 742 #endif /* CONFIG_MODULES */ 743 744 /** 745 * for_each_kernel_tracepoint - iteration on all kernel tracepoints 746 * @fct: callback 747 * @priv: private data 748 */ 749 void for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv), 750 void *priv) 751 { 752 for_each_tracepoint_range(__start___tracepoints_ptrs, 753 __stop___tracepoints_ptrs, fct, priv); 754 } 755 EXPORT_SYMBOL_GPL(for_each_kernel_tracepoint); 756 757 #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS 758 759 /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ 760 static int sys_tracepoint_refcount; 761 762 int syscall_regfunc(void) 763 { 764 struct task_struct *p, *t; 765 766 if (!sys_tracepoint_refcount) { 767 read_lock(&tasklist_lock); 768 for_each_process_thread(p, t) { 769 set_task_syscall_work(t, SYSCALL_TRACEPOINT); 770 } 771 read_unlock(&tasklist_lock); 772 } 773 sys_tracepoint_refcount++; 774 775 return 0; 776 } 777 778 void syscall_unregfunc(void) 779 { 780 struct task_struct *p, *t; 781 782 sys_tracepoint_refcount--; 783 if (!sys_tracepoint_refcount) { 784 read_lock(&tasklist_lock); 785 for_each_process_thread(p, t) { 786 clear_task_syscall_work(t, SYSCALL_TRACEPOINT); 787 } 788 read_unlock(&tasklist_lock); 789 } 790 } 791 #endif 792