1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2008-2014 Mathieu Desnoyers 4 */ 5 #include <linux/module.h> 6 #include <linux/mutex.h> 7 #include <linux/types.h> 8 #include <linux/jhash.h> 9 #include <linux/list.h> 10 #include <linux/rcupdate.h> 11 #include <linux/tracepoint.h> 12 #include <linux/err.h> 13 #include <linux/slab.h> 14 #include <linux/sched/signal.h> 15 #include <linux/sched/task.h> 16 #include <linux/static_key.h> 17 18 enum tp_func_state { 19 TP_FUNC_0, 20 TP_FUNC_1, 21 TP_FUNC_2, 22 TP_FUNC_N, 23 }; 24 25 extern tracepoint_ptr_t __start___tracepoints_ptrs[]; 26 extern tracepoint_ptr_t __stop___tracepoints_ptrs[]; 27 28 enum tp_transition_sync { 29 TP_TRANSITION_SYNC_1_0_1, 30 TP_TRANSITION_SYNC_N_2_1, 31 32 _NR_TP_TRANSITION_SYNC, 33 }; 34 35 struct tp_transition_snapshot { 36 unsigned long rcu; 37 unsigned long srcu_gp; 38 bool ongoing; 39 }; 40 41 DEFINE_SRCU_FAST(tracepoint_srcu); 42 EXPORT_SYMBOL_GPL(tracepoint_srcu); 43 44 /* Protected by tracepoints_mutex */ 45 static struct tp_transition_snapshot tp_transition_snapshot[_NR_TP_TRANSITION_SYNC]; 46 47 static void tp_rcu_get_state(enum tp_transition_sync sync) 48 { 49 struct tp_transition_snapshot *snapshot = &tp_transition_snapshot[sync]; 50 51 /* Keep the latest get_state snapshot. */ 52 snapshot->rcu = get_state_synchronize_rcu(); 53 snapshot->srcu_gp = start_poll_synchronize_srcu(&tracepoint_srcu); 54 snapshot->ongoing = true; 55 } 56 57 static void tp_rcu_cond_sync(enum tp_transition_sync sync) 58 { 59 struct tp_transition_snapshot *snapshot = &tp_transition_snapshot[sync]; 60 61 if (!snapshot->ongoing) 62 return; 63 cond_synchronize_rcu(snapshot->rcu); 64 if (!poll_state_synchronize_srcu(&tracepoint_srcu, snapshot->srcu_gp)) 65 synchronize_srcu(&tracepoint_srcu); 66 snapshot->ongoing = false; 67 } 68 69 /* Set to 1 to enable tracepoint debug output */ 70 static const int tracepoint_debug; 71 72 #ifdef CONFIG_MODULES 73 /* 74 * Tracepoint module list mutex protects the local module list. 75 */ 76 static DEFINE_MUTEX(tracepoint_module_list_mutex); 77 78 /* Local list of struct tp_module */ 79 static LIST_HEAD(tracepoint_module_list); 80 #endif /* CONFIG_MODULES */ 81 82 /* 83 * tracepoints_mutex protects the builtin and module tracepoints. 84 * tracepoints_mutex nests inside tracepoint_module_list_mutex. 85 */ 86 static DEFINE_MUTEX(tracepoints_mutex); 87 88 /* 89 * Note about RCU : 90 * It is used to delay the free of multiple probes array until a quiescent 91 * state is reached. 92 */ 93 struct tp_probes { 94 struct rcu_head rcu; 95 struct tracepoint_func probes[]; 96 }; 97 98 /* Called in removal of a func but failed to allocate a new tp_funcs */ 99 static void tp_stub_func(void) 100 { 101 return; 102 } 103 104 static inline void *allocate_probes(int count) 105 { 106 struct tp_probes *p = kmalloc_flex(*p, probes, count); 107 return p == NULL ? NULL : p->probes; 108 } 109 110 static void rcu_free_old_probes(struct rcu_head *head) 111 { 112 kfree(container_of(head, struct tp_probes, rcu)); 113 } 114 115 static inline void release_probes(struct tracepoint *tp, struct tracepoint_func *old) 116 { 117 if (old) { 118 struct tp_probes *tp_probes = container_of(old, 119 struct tp_probes, probes[0]); 120 121 if (tracepoint_is_faultable(tp)) { 122 call_rcu_tasks_trace(&tp_probes->rcu, 123 rcu_free_old_probes); 124 } else { 125 call_srcu(&tracepoint_srcu, &tp_probes->rcu, 126 rcu_free_old_probes); 127 } 128 } 129 } 130 131 static void debug_print_probes(struct tracepoint_func *funcs) 132 { 133 int i; 134 135 if (!tracepoint_debug || !funcs) 136 return; 137 138 for (i = 0; funcs[i].func; i++) 139 printk(KERN_DEBUG "Probe %d : %pSb\n", i, funcs[i].func); 140 } 141 142 static struct tracepoint_func * 143 func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func, 144 int prio) 145 { 146 struct tracepoint_func *old, *new; 147 int iter_probes; /* Iterate over old probe array. */ 148 int nr_probes = 0; /* Counter for probes */ 149 int pos = -1; /* Insertion position into new array */ 150 151 if (WARN_ON(!tp_func->func)) 152 return ERR_PTR(-EINVAL); 153 154 debug_print_probes(*funcs); 155 old = *funcs; 156 if (old) { 157 /* (N -> N+1), (N != 0, 1) probes */ 158 for (iter_probes = 0; old[iter_probes].func; iter_probes++) { 159 if (old[iter_probes].func == tp_stub_func) 160 continue; /* Skip stub functions. */ 161 if (old[iter_probes].func == tp_func->func && 162 old[iter_probes].data == tp_func->data) 163 return ERR_PTR(-EEXIST); 164 nr_probes++; 165 } 166 } 167 /* + 2 : one for new probe, one for NULL func */ 168 new = allocate_probes(nr_probes + 2); 169 if (new == NULL) 170 return ERR_PTR(-ENOMEM); 171 if (old) { 172 nr_probes = 0; 173 for (iter_probes = 0; old[iter_probes].func; iter_probes++) { 174 if (old[iter_probes].func == tp_stub_func) 175 continue; 176 /* Insert before probes of lower priority */ 177 if (pos < 0 && old[iter_probes].prio < prio) 178 pos = nr_probes++; 179 new[nr_probes++] = old[iter_probes]; 180 } 181 if (pos < 0) 182 pos = nr_probes++; 183 /* nr_probes now points to the end of the new array */ 184 } else { 185 pos = 0; 186 nr_probes = 1; /* must point at end of array */ 187 } 188 new[pos] = *tp_func; 189 new[nr_probes].func = NULL; 190 *funcs = new; 191 debug_print_probes(*funcs); 192 return old; 193 } 194 195 static void *func_remove(struct tracepoint_func **funcs, 196 struct tracepoint_func *tp_func) 197 { 198 int nr_probes = 0, nr_del = 0, i; 199 struct tracepoint_func *old, *new; 200 201 old = *funcs; 202 203 if (!old) 204 return ERR_PTR(-ENOENT); 205 206 debug_print_probes(*funcs); 207 /* (N -> M), (N > 1, M >= 0) probes */ 208 if (tp_func->func) { 209 for (nr_probes = 0; old[nr_probes].func; nr_probes++) { 210 if ((old[nr_probes].func == tp_func->func && 211 old[nr_probes].data == tp_func->data) || 212 old[nr_probes].func == tp_stub_func) 213 nr_del++; 214 } 215 } 216 217 /* 218 * If probe is NULL, then nr_probes = nr_del = 0, and then the 219 * entire entry will be removed. 220 */ 221 if (nr_probes - nr_del == 0) { 222 /* N -> 0, (N > 1) */ 223 *funcs = NULL; 224 debug_print_probes(*funcs); 225 return old; 226 } else { 227 int j = 0; 228 /* N -> M, (N > 1, M > 0) */ 229 /* + 1 for NULL */ 230 new = allocate_probes(nr_probes - nr_del + 1); 231 if (new) { 232 for (i = 0; old[i].func; i++) { 233 if ((old[i].func != tp_func->func || 234 old[i].data != tp_func->data) && 235 old[i].func != tp_stub_func) 236 new[j++] = old[i]; 237 } 238 new[nr_probes - nr_del].func = NULL; 239 *funcs = new; 240 } else { 241 /* 242 * Failed to allocate, replace the old function 243 * with calls to tp_stub_func. 244 */ 245 for (i = 0; old[i].func; i++) { 246 if (old[i].func == tp_func->func && 247 old[i].data == tp_func->data) 248 WRITE_ONCE(old[i].func, tp_stub_func); 249 } 250 *funcs = old; 251 } 252 } 253 debug_print_probes(*funcs); 254 return old; 255 } 256 257 /* 258 * Count the number of functions (enum tp_func_state) in a tp_funcs array. 259 */ 260 static enum tp_func_state nr_func_state(const struct tracepoint_func *tp_funcs) 261 { 262 if (!tp_funcs) 263 return TP_FUNC_0; 264 if (!tp_funcs[1].func) 265 return TP_FUNC_1; 266 if (!tp_funcs[2].func) 267 return TP_FUNC_2; 268 return TP_FUNC_N; /* 3 or more */ 269 } 270 271 static void tracepoint_update_call(struct tracepoint *tp, struct tracepoint_func *tp_funcs) 272 { 273 void *func = tp->iterator; 274 275 /* Synthetic events do not have static call sites */ 276 if (!tp->static_call_key) 277 return; 278 if (nr_func_state(tp_funcs) == TP_FUNC_1) 279 func = tp_funcs[0].func; 280 __static_call_update(tp->static_call_key, tp->static_call_tramp, func); 281 } 282 283 /* 284 * Add the probe function to a tracepoint. 285 */ 286 static int tracepoint_add_func(struct tracepoint *tp, 287 struct tracepoint_func *func, int prio, 288 bool warn) 289 { 290 struct tracepoint_func *old, *tp_funcs; 291 int ret; 292 293 if (tp->ext && tp->ext->regfunc && !static_key_enabled(&tp->key)) { 294 ret = tp->ext->regfunc(); 295 if (ret < 0) 296 return ret; 297 } 298 299 tp_funcs = rcu_dereference_protected(tp->funcs, 300 lockdep_is_held(&tracepoints_mutex)); 301 old = func_add(&tp_funcs, func, prio); 302 if (IS_ERR(old)) { 303 if (tp->ext && tp->ext->unregfunc && !static_key_enabled(&tp->key)) 304 tp->ext->unregfunc(); 305 WARN_ON_ONCE(warn && PTR_ERR(old) != -ENOMEM); 306 return PTR_ERR(old); 307 } 308 309 /* 310 * rcu_assign_pointer has as smp_store_release() which makes sure 311 * that the new probe callbacks array is consistent before setting 312 * a pointer to it. This array is referenced by __DO_TRACE from 313 * include/linux/tracepoint.h using rcu_dereference_sched(). 314 */ 315 switch (nr_func_state(tp_funcs)) { 316 case TP_FUNC_1: /* 0->1 */ 317 /* 318 * Make sure new static func never uses old data after a 319 * 1->0->1 transition sequence. 320 */ 321 tp_rcu_cond_sync(TP_TRANSITION_SYNC_1_0_1); 322 /* Set static call to first function */ 323 tracepoint_update_call(tp, tp_funcs); 324 /* Both iterator and static call handle NULL tp->funcs */ 325 rcu_assign_pointer(tp->funcs, tp_funcs); 326 static_branch_enable(&tp->key); 327 break; 328 case TP_FUNC_2: /* 1->2 */ 329 /* Set iterator static call */ 330 tracepoint_update_call(tp, tp_funcs); 331 /* 332 * Iterator callback installed before updating tp->funcs. 333 * Requires ordering between RCU assign/dereference and 334 * static call update/call. 335 */ 336 fallthrough; 337 case TP_FUNC_N: /* N->N+1 (N>1) */ 338 rcu_assign_pointer(tp->funcs, tp_funcs); 339 /* 340 * Make sure static func never uses incorrect data after a 341 * N->...->2->1 (N>1) transition sequence. 342 */ 343 if (tp_funcs[0].data != old[0].data) 344 tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1); 345 break; 346 default: 347 WARN_ON_ONCE(1); 348 break; 349 } 350 351 release_probes(tp, old); 352 return 0; 353 } 354 355 /* 356 * Remove a probe function from a tracepoint. 357 * Note: only waiting an RCU period after setting elem->call to the empty 358 * function insures that the original callback is not used anymore. This insured 359 * by preempt_disable around the call site. 360 */ 361 static int tracepoint_remove_func(struct tracepoint *tp, 362 struct tracepoint_func *func) 363 { 364 struct tracepoint_func *old, *tp_funcs; 365 366 tp_funcs = rcu_dereference_protected(tp->funcs, 367 lockdep_is_held(&tracepoints_mutex)); 368 old = func_remove(&tp_funcs, func); 369 if (WARN_ON_ONCE(IS_ERR(old))) 370 return PTR_ERR(old); 371 372 if (tp_funcs == old) 373 /* Failed allocating new tp_funcs, replaced func with stub */ 374 return 0; 375 376 switch (nr_func_state(tp_funcs)) { 377 case TP_FUNC_0: /* 1->0 */ 378 /* Removed last function */ 379 if (tp->ext && tp->ext->unregfunc && static_key_enabled(&tp->key)) 380 tp->ext->unregfunc(); 381 static_branch_disable(&tp->key); 382 /* Set iterator static call */ 383 tracepoint_update_call(tp, tp_funcs); 384 /* Both iterator and static call handle NULL tp->funcs */ 385 rcu_assign_pointer(tp->funcs, NULL); 386 /* 387 * Make sure new static func never uses old data after a 388 * 1->0->1 transition sequence. 389 */ 390 tp_rcu_get_state(TP_TRANSITION_SYNC_1_0_1); 391 break; 392 case TP_FUNC_1: /* 2->1 */ 393 rcu_assign_pointer(tp->funcs, tp_funcs); 394 /* 395 * Make sure static func never uses incorrect data after a 396 * N->...->2->1 (N>2) transition sequence. If the first 397 * element's data has changed, then force the synchronization 398 * to prevent current readers that have loaded the old data 399 * from calling the new function. 400 */ 401 if (tp_funcs[0].data != old[0].data) 402 tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1); 403 tp_rcu_cond_sync(TP_TRANSITION_SYNC_N_2_1); 404 /* Set static call to first function */ 405 tracepoint_update_call(tp, tp_funcs); 406 break; 407 case TP_FUNC_2: /* N->N-1 (N>2) */ 408 fallthrough; 409 case TP_FUNC_N: 410 rcu_assign_pointer(tp->funcs, tp_funcs); 411 /* 412 * Make sure static func never uses incorrect data after a 413 * N->...->2->1 (N>2) transition sequence. 414 */ 415 if (tp_funcs[0].data != old[0].data) 416 tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1); 417 break; 418 default: 419 WARN_ON_ONCE(1); 420 break; 421 } 422 release_probes(tp, old); 423 return 0; 424 } 425 426 /** 427 * tracepoint_probe_register_prio_may_exist - Connect a probe to a tracepoint with priority 428 * @tp: tracepoint 429 * @probe: probe handler 430 * @data: tracepoint data 431 * @prio: priority of this function over other registered functions 432 * 433 * Same as tracepoint_probe_register_prio() except that it will not warn 434 * if the tracepoint is already registered. 435 */ 436 int tracepoint_probe_register_prio_may_exist(struct tracepoint *tp, void *probe, 437 void *data, int prio) 438 { 439 struct tracepoint_func tp_func; 440 int ret; 441 442 mutex_lock(&tracepoints_mutex); 443 tp_func.func = probe; 444 tp_func.data = data; 445 tp_func.prio = prio; 446 ret = tracepoint_add_func(tp, &tp_func, prio, false); 447 mutex_unlock(&tracepoints_mutex); 448 return ret; 449 } 450 EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio_may_exist); 451 452 /** 453 * tracepoint_probe_register_prio - Connect a probe to a tracepoint with priority 454 * @tp: tracepoint 455 * @probe: probe handler 456 * @data: tracepoint data 457 * @prio: priority of this function over other registered functions 458 * 459 * Returns 0 if ok, error value on error. 460 * Note: if @tp is within a module, the caller is responsible for 461 * unregistering the probe before the module is gone. This can be 462 * performed either with a tracepoint module going notifier, or from 463 * within module exit functions. 464 */ 465 int tracepoint_probe_register_prio(struct tracepoint *tp, void *probe, 466 void *data, int prio) 467 { 468 struct tracepoint_func tp_func; 469 int ret; 470 471 mutex_lock(&tracepoints_mutex); 472 tp_func.func = probe; 473 tp_func.data = data; 474 tp_func.prio = prio; 475 ret = tracepoint_add_func(tp, &tp_func, prio, true); 476 mutex_unlock(&tracepoints_mutex); 477 return ret; 478 } 479 EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio); 480 481 /** 482 * tracepoint_probe_register - Connect a probe to a tracepoint 483 * @tp: tracepoint 484 * @probe: probe handler 485 * @data: tracepoint data 486 * 487 * Returns 0 if ok, error value on error. 488 * Note: if @tp is within a module, the caller is responsible for 489 * unregistering the probe before the module is gone. This can be 490 * performed either with a tracepoint module going notifier, or from 491 * within module exit functions. 492 */ 493 int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data) 494 { 495 return tracepoint_probe_register_prio(tp, probe, data, TRACEPOINT_DEFAULT_PRIO); 496 } 497 EXPORT_SYMBOL_GPL(tracepoint_probe_register); 498 499 /** 500 * tracepoint_probe_unregister - Disconnect a probe from a tracepoint 501 * @tp: tracepoint 502 * @probe: probe function pointer 503 * @data: tracepoint data 504 * 505 * Returns 0 if ok, error value on error. 506 */ 507 int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data) 508 { 509 struct tracepoint_func tp_func; 510 int ret; 511 512 mutex_lock(&tracepoints_mutex); 513 tp_func.func = probe; 514 tp_func.data = data; 515 ret = tracepoint_remove_func(tp, &tp_func); 516 mutex_unlock(&tracepoints_mutex); 517 return ret; 518 } 519 EXPORT_SYMBOL_GPL(tracepoint_probe_unregister); 520 521 static void for_each_tracepoint_range( 522 tracepoint_ptr_t *begin, tracepoint_ptr_t *end, 523 void (*fct)(struct tracepoint *tp, void *priv), 524 void *priv) 525 { 526 tracepoint_ptr_t *iter; 527 528 if (!begin) 529 return; 530 for (iter = begin; iter < end; iter++) 531 fct(tracepoint_ptr_deref(iter), priv); 532 } 533 534 #ifdef CONFIG_MODULES 535 bool trace_module_has_bad_taint(struct module *mod) 536 { 537 return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP) | 538 (1 << TAINT_UNSIGNED_MODULE) | (1 << TAINT_TEST) | 539 (1 << TAINT_LIVEPATCH)); 540 } 541 542 static BLOCKING_NOTIFIER_HEAD(tracepoint_notify_list); 543 544 /** 545 * register_tracepoint_module_notifier - register tracepoint coming/going notifier 546 * @nb: notifier block 547 * 548 * Notifiers registered with this function are called on module 549 * coming/going with the tracepoint_module_list_mutex held. 550 * The notifier block callback should expect a "struct tp_module" data 551 * pointer. 552 */ 553 int register_tracepoint_module_notifier(struct notifier_block *nb) 554 { 555 struct tp_module *tp_mod; 556 int ret; 557 558 mutex_lock(&tracepoint_module_list_mutex); 559 ret = blocking_notifier_chain_register(&tracepoint_notify_list, nb); 560 if (ret) 561 goto end; 562 list_for_each_entry(tp_mod, &tracepoint_module_list, list) 563 (void) nb->notifier_call(nb, MODULE_STATE_COMING, tp_mod); 564 end: 565 mutex_unlock(&tracepoint_module_list_mutex); 566 return ret; 567 } 568 EXPORT_SYMBOL_GPL(register_tracepoint_module_notifier); 569 570 /** 571 * unregister_tracepoint_module_notifier - unregister tracepoint coming/going notifier 572 * @nb: notifier block 573 * 574 * The notifier block callback should expect a "struct tp_module" data 575 * pointer. 576 */ 577 int unregister_tracepoint_module_notifier(struct notifier_block *nb) 578 { 579 struct tp_module *tp_mod; 580 int ret; 581 582 mutex_lock(&tracepoint_module_list_mutex); 583 ret = blocking_notifier_chain_unregister(&tracepoint_notify_list, nb); 584 if (ret) 585 goto end; 586 list_for_each_entry(tp_mod, &tracepoint_module_list, list) 587 (void) nb->notifier_call(nb, MODULE_STATE_GOING, tp_mod); 588 end: 589 mutex_unlock(&tracepoint_module_list_mutex); 590 return ret; 591 592 } 593 EXPORT_SYMBOL_GPL(unregister_tracepoint_module_notifier); 594 595 /* 596 * Ensure the tracer unregistered the module's probes before the module 597 * teardown is performed. Prevents leaks of probe and data pointers. 598 */ 599 static void tp_module_going_check_quiescent(struct tracepoint *tp, void *priv) 600 { 601 WARN_ON_ONCE(tp->funcs); 602 } 603 604 static int tracepoint_module_coming(struct module *mod) 605 { 606 struct tp_module *tp_mod; 607 608 if (!mod->num_tracepoints) 609 return 0; 610 611 /* 612 * We skip modules that taint the kernel, especially those with different 613 * module headers (for forced load), to make sure we don't cause a crash. 614 * Staging, out-of-tree, unsigned GPL, and test modules are fine. 615 */ 616 if (trace_module_has_bad_taint(mod)) 617 return 0; 618 619 tp_mod = kmalloc_obj(struct tp_module); 620 if (!tp_mod) 621 return -ENOMEM; 622 tp_mod->mod = mod; 623 624 mutex_lock(&tracepoint_module_list_mutex); 625 list_add_tail(&tp_mod->list, &tracepoint_module_list); 626 blocking_notifier_call_chain(&tracepoint_notify_list, 627 MODULE_STATE_COMING, tp_mod); 628 mutex_unlock(&tracepoint_module_list_mutex); 629 return 0; 630 } 631 632 static void tracepoint_module_going(struct module *mod) 633 { 634 struct tp_module *tp_mod; 635 636 if (!mod->num_tracepoints) 637 return; 638 639 mutex_lock(&tracepoint_module_list_mutex); 640 list_for_each_entry(tp_mod, &tracepoint_module_list, list) { 641 if (tp_mod->mod == mod) { 642 blocking_notifier_call_chain(&tracepoint_notify_list, 643 MODULE_STATE_GOING, tp_mod); 644 list_del(&tp_mod->list); 645 kfree(tp_mod); 646 /* 647 * Called the going notifier before checking for 648 * quiescence. 649 */ 650 for_each_tracepoint_range(mod->tracepoints_ptrs, 651 mod->tracepoints_ptrs + mod->num_tracepoints, 652 tp_module_going_check_quiescent, NULL); 653 break; 654 } 655 } 656 /* 657 * In the case of modules that were tainted at "coming", we'll simply 658 * walk through the list without finding it. We cannot use the "tainted" 659 * flag on "going", in case a module taints the kernel only after being 660 * loaded. 661 */ 662 mutex_unlock(&tracepoint_module_list_mutex); 663 } 664 665 static int tracepoint_module_notify(struct notifier_block *self, 666 unsigned long val, void *data) 667 { 668 struct module *mod = data; 669 int ret = 0; 670 671 switch (val) { 672 case MODULE_STATE_COMING: 673 ret = tracepoint_module_coming(mod); 674 break; 675 case MODULE_STATE_LIVE: 676 break; 677 case MODULE_STATE_GOING: 678 tracepoint_module_going(mod); 679 break; 680 case MODULE_STATE_UNFORMED: 681 break; 682 } 683 return notifier_from_errno(ret); 684 } 685 686 static struct notifier_block tracepoint_module_nb = { 687 .notifier_call = tracepoint_module_notify, 688 .priority = 0, 689 }; 690 691 static __init int init_tracepoints(void) 692 { 693 int ret; 694 695 ret = register_module_notifier(&tracepoint_module_nb); 696 if (ret) 697 pr_warn("Failed to register tracepoint module enter notifier\n"); 698 699 return ret; 700 } 701 __initcall(init_tracepoints); 702 703 /** 704 * for_each_tracepoint_in_module - iteration on all tracepoints in a module 705 * @mod: module 706 * @fct: callback 707 * @priv: private data 708 */ 709 void for_each_tracepoint_in_module(struct module *mod, 710 void (*fct)(struct tracepoint *tp, 711 struct module *mod, void *priv), 712 void *priv) 713 { 714 tracepoint_ptr_t *begin, *end, *iter; 715 716 lockdep_assert_held(&tracepoint_module_list_mutex); 717 718 if (!mod) 719 return; 720 721 begin = mod->tracepoints_ptrs; 722 end = mod->tracepoints_ptrs + mod->num_tracepoints; 723 724 for (iter = begin; iter < end; iter++) 725 fct(tracepoint_ptr_deref(iter), mod, priv); 726 } 727 728 /** 729 * for_each_module_tracepoint - iteration on all tracepoints in all modules 730 * @fct: callback 731 * @priv: private data 732 */ 733 void for_each_module_tracepoint(void (*fct)(struct tracepoint *tp, 734 struct module *mod, void *priv), 735 void *priv) 736 { 737 struct tp_module *tp_mod; 738 739 mutex_lock(&tracepoint_module_list_mutex); 740 list_for_each_entry(tp_mod, &tracepoint_module_list, list) 741 for_each_tracepoint_in_module(tp_mod->mod, fct, priv); 742 mutex_unlock(&tracepoint_module_list_mutex); 743 } 744 #endif /* CONFIG_MODULES */ 745 746 /** 747 * for_each_kernel_tracepoint - iteration on all kernel tracepoints 748 * @fct: callback 749 * @priv: private data 750 */ 751 void for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv), 752 void *priv) 753 { 754 for_each_tracepoint_range(__start___tracepoints_ptrs, 755 __stop___tracepoints_ptrs, fct, priv); 756 } 757 EXPORT_SYMBOL_GPL(for_each_kernel_tracepoint); 758 759 #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS 760 761 /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ 762 static int sys_tracepoint_refcount; 763 764 int syscall_regfunc(void) 765 { 766 struct task_struct *p, *t; 767 768 if (!sys_tracepoint_refcount) { 769 read_lock(&tasklist_lock); 770 for_each_process_thread(p, t) { 771 set_task_syscall_work(t, SYSCALL_TRACEPOINT); 772 } 773 read_unlock(&tasklist_lock); 774 } 775 sys_tracepoint_refcount++; 776 777 return 0; 778 } 779 780 void syscall_unregfunc(void) 781 { 782 struct task_struct *p, *t; 783 784 sys_tracepoint_refcount--; 785 if (!sys_tracepoint_refcount) { 786 read_lock(&tasklist_lock); 787 for_each_process_thread(p, t) { 788 clear_task_syscall_work(t, SYSCALL_TRACEPOINT); 789 } 790 read_unlock(&tasklist_lock); 791 } 792 } 793 #endif 794