1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2008-2014 Mathieu Desnoyers 4 */ 5 #include <linux/module.h> 6 #include <linux/mutex.h> 7 #include <linux/types.h> 8 #include <linux/jhash.h> 9 #include <linux/list.h> 10 #include <linux/rcupdate.h> 11 #include <linux/tracepoint.h> 12 #include <linux/err.h> 13 #include <linux/slab.h> 14 #include <linux/sched/signal.h> 15 #include <linux/sched/task.h> 16 #include <linux/static_key.h> 17 18 enum tp_func_state { 19 TP_FUNC_0, 20 TP_FUNC_1, 21 TP_FUNC_2, 22 TP_FUNC_N, 23 }; 24 25 extern tracepoint_ptr_t __start___tracepoints_ptrs[]; 26 extern tracepoint_ptr_t __stop___tracepoints_ptrs[]; 27 28 enum tp_transition_sync { 29 TP_TRANSITION_SYNC_1_0_1, 30 TP_TRANSITION_SYNC_N_2_1, 31 32 _NR_TP_TRANSITION_SYNC, 33 }; 34 35 struct tp_transition_snapshot { 36 unsigned long rcu; 37 bool ongoing; 38 }; 39 40 /* Protected by tracepoints_mutex */ 41 static struct tp_transition_snapshot tp_transition_snapshot[_NR_TP_TRANSITION_SYNC]; 42 43 static void tp_rcu_get_state(enum tp_transition_sync sync) 44 { 45 struct tp_transition_snapshot *snapshot = &tp_transition_snapshot[sync]; 46 47 /* Keep the latest get_state snapshot. */ 48 snapshot->rcu = get_state_synchronize_rcu(); 49 snapshot->ongoing = true; 50 } 51 52 static void tp_rcu_cond_sync(enum tp_transition_sync sync) 53 { 54 struct tp_transition_snapshot *snapshot = &tp_transition_snapshot[sync]; 55 56 if (!snapshot->ongoing) 57 return; 58 cond_synchronize_rcu(snapshot->rcu); 59 snapshot->ongoing = false; 60 } 61 62 /* Set to 1 to enable tracepoint debug output */ 63 static const int tracepoint_debug; 64 65 #ifdef CONFIG_MODULES 66 /* 67 * Tracepoint module list mutex protects the local module list. 68 */ 69 static DEFINE_MUTEX(tracepoint_module_list_mutex); 70 71 /* Local list of struct tp_module */ 72 static LIST_HEAD(tracepoint_module_list); 73 #endif /* CONFIG_MODULES */ 74 75 /* 76 * tracepoints_mutex protects the builtin and module tracepoints. 77 * tracepoints_mutex nests inside tracepoint_module_list_mutex. 78 */ 79 static DEFINE_MUTEX(tracepoints_mutex); 80 81 /* 82 * Note about RCU : 83 * It is used to delay the free of multiple probes array until a quiescent 84 * state is reached. 85 */ 86 struct tp_probes { 87 struct rcu_head rcu; 88 struct tracepoint_func probes[]; 89 }; 90 91 /* Called in removal of a func but failed to allocate a new tp_funcs */ 92 static void tp_stub_func(void) 93 { 94 return; 95 } 96 97 static inline void *allocate_probes(int count) 98 { 99 struct tp_probes *p = kmalloc(struct_size(p, probes, count), 100 GFP_KERNEL); 101 return p == NULL ? NULL : p->probes; 102 } 103 104 static void rcu_free_old_probes(struct rcu_head *head) 105 { 106 kfree(container_of(head, struct tp_probes, rcu)); 107 } 108 109 static inline void release_probes(struct tracepoint *tp, struct tracepoint_func *old) 110 { 111 if (old) { 112 struct tp_probes *tp_probes = container_of(old, 113 struct tp_probes, probes[0]); 114 115 if (tracepoint_is_faultable(tp)) 116 call_rcu_tasks_trace(&tp_probes->rcu, rcu_free_old_probes); 117 else 118 call_rcu(&tp_probes->rcu, rcu_free_old_probes); 119 } 120 } 121 122 static void debug_print_probes(struct tracepoint_func *funcs) 123 { 124 int i; 125 126 if (!tracepoint_debug || !funcs) 127 return; 128 129 for (i = 0; funcs[i].func; i++) 130 printk(KERN_DEBUG "Probe %d : %p\n", i, funcs[i].func); 131 } 132 133 static struct tracepoint_func * 134 func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func, 135 int prio) 136 { 137 struct tracepoint_func *old, *new; 138 int iter_probes; /* Iterate over old probe array. */ 139 int nr_probes = 0; /* Counter for probes */ 140 int pos = -1; /* Insertion position into new array */ 141 142 if (WARN_ON(!tp_func->func)) 143 return ERR_PTR(-EINVAL); 144 145 debug_print_probes(*funcs); 146 old = *funcs; 147 if (old) { 148 /* (N -> N+1), (N != 0, 1) probes */ 149 for (iter_probes = 0; old[iter_probes].func; iter_probes++) { 150 if (old[iter_probes].func == tp_stub_func) 151 continue; /* Skip stub functions. */ 152 if (old[iter_probes].func == tp_func->func && 153 old[iter_probes].data == tp_func->data) 154 return ERR_PTR(-EEXIST); 155 nr_probes++; 156 } 157 } 158 /* + 2 : one for new probe, one for NULL func */ 159 new = allocate_probes(nr_probes + 2); 160 if (new == NULL) 161 return ERR_PTR(-ENOMEM); 162 if (old) { 163 nr_probes = 0; 164 for (iter_probes = 0; old[iter_probes].func; iter_probes++) { 165 if (old[iter_probes].func == tp_stub_func) 166 continue; 167 /* Insert before probes of lower priority */ 168 if (pos < 0 && old[iter_probes].prio < prio) 169 pos = nr_probes++; 170 new[nr_probes++] = old[iter_probes]; 171 } 172 if (pos < 0) 173 pos = nr_probes++; 174 /* nr_probes now points to the end of the new array */ 175 } else { 176 pos = 0; 177 nr_probes = 1; /* must point at end of array */ 178 } 179 new[pos] = *tp_func; 180 new[nr_probes].func = NULL; 181 *funcs = new; 182 debug_print_probes(*funcs); 183 return old; 184 } 185 186 static void *func_remove(struct tracepoint_func **funcs, 187 struct tracepoint_func *tp_func) 188 { 189 int nr_probes = 0, nr_del = 0, i; 190 struct tracepoint_func *old, *new; 191 192 old = *funcs; 193 194 if (!old) 195 return ERR_PTR(-ENOENT); 196 197 debug_print_probes(*funcs); 198 /* (N -> M), (N > 1, M >= 0) probes */ 199 if (tp_func->func) { 200 for (nr_probes = 0; old[nr_probes].func; nr_probes++) { 201 if ((old[nr_probes].func == tp_func->func && 202 old[nr_probes].data == tp_func->data) || 203 old[nr_probes].func == tp_stub_func) 204 nr_del++; 205 } 206 } 207 208 /* 209 * If probe is NULL, then nr_probes = nr_del = 0, and then the 210 * entire entry will be removed. 211 */ 212 if (nr_probes - nr_del == 0) { 213 /* N -> 0, (N > 1) */ 214 *funcs = NULL; 215 debug_print_probes(*funcs); 216 return old; 217 } else { 218 int j = 0; 219 /* N -> M, (N > 1, M > 0) */ 220 /* + 1 for NULL */ 221 new = allocate_probes(nr_probes - nr_del + 1); 222 if (new) { 223 for (i = 0; old[i].func; i++) { 224 if ((old[i].func != tp_func->func || 225 old[i].data != tp_func->data) && 226 old[i].func != tp_stub_func) 227 new[j++] = old[i]; 228 } 229 new[nr_probes - nr_del].func = NULL; 230 *funcs = new; 231 } else { 232 /* 233 * Failed to allocate, replace the old function 234 * with calls to tp_stub_func. 235 */ 236 for (i = 0; old[i].func; i++) { 237 if (old[i].func == tp_func->func && 238 old[i].data == tp_func->data) 239 WRITE_ONCE(old[i].func, tp_stub_func); 240 } 241 *funcs = old; 242 } 243 } 244 debug_print_probes(*funcs); 245 return old; 246 } 247 248 /* 249 * Count the number of functions (enum tp_func_state) in a tp_funcs array. 250 */ 251 static enum tp_func_state nr_func_state(const struct tracepoint_func *tp_funcs) 252 { 253 if (!tp_funcs) 254 return TP_FUNC_0; 255 if (!tp_funcs[1].func) 256 return TP_FUNC_1; 257 if (!tp_funcs[2].func) 258 return TP_FUNC_2; 259 return TP_FUNC_N; /* 3 or more */ 260 } 261 262 static void tracepoint_update_call(struct tracepoint *tp, struct tracepoint_func *tp_funcs) 263 { 264 void *func = tp->iterator; 265 266 /* Synthetic events do not have static call sites */ 267 if (!tp->static_call_key) 268 return; 269 if (nr_func_state(tp_funcs) == TP_FUNC_1) 270 func = tp_funcs[0].func; 271 __static_call_update(tp->static_call_key, tp->static_call_tramp, func); 272 } 273 274 /* 275 * Add the probe function to a tracepoint. 276 */ 277 static int tracepoint_add_func(struct tracepoint *tp, 278 struct tracepoint_func *func, int prio, 279 bool warn) 280 { 281 struct tracepoint_func *old, *tp_funcs; 282 int ret; 283 284 if (tp->ext && tp->ext->regfunc && !static_key_enabled(&tp->key)) { 285 ret = tp->ext->regfunc(); 286 if (ret < 0) 287 return ret; 288 } 289 290 tp_funcs = rcu_dereference_protected(tp->funcs, 291 lockdep_is_held(&tracepoints_mutex)); 292 old = func_add(&tp_funcs, func, prio); 293 if (IS_ERR(old)) { 294 WARN_ON_ONCE(warn && PTR_ERR(old) != -ENOMEM); 295 return PTR_ERR(old); 296 } 297 298 /* 299 * rcu_assign_pointer has as smp_store_release() which makes sure 300 * that the new probe callbacks array is consistent before setting 301 * a pointer to it. This array is referenced by __DO_TRACE from 302 * include/linux/tracepoint.h using rcu_dereference_sched(). 303 */ 304 switch (nr_func_state(tp_funcs)) { 305 case TP_FUNC_1: /* 0->1 */ 306 /* 307 * Make sure new static func never uses old data after a 308 * 1->0->1 transition sequence. 309 */ 310 tp_rcu_cond_sync(TP_TRANSITION_SYNC_1_0_1); 311 /* Set static call to first function */ 312 tracepoint_update_call(tp, tp_funcs); 313 /* Both iterator and static call handle NULL tp->funcs */ 314 rcu_assign_pointer(tp->funcs, tp_funcs); 315 static_branch_enable(&tp->key); 316 break; 317 case TP_FUNC_2: /* 1->2 */ 318 /* Set iterator static call */ 319 tracepoint_update_call(tp, tp_funcs); 320 /* 321 * Iterator callback installed before updating tp->funcs. 322 * Requires ordering between RCU assign/dereference and 323 * static call update/call. 324 */ 325 fallthrough; 326 case TP_FUNC_N: /* N->N+1 (N>1) */ 327 rcu_assign_pointer(tp->funcs, tp_funcs); 328 /* 329 * Make sure static func never uses incorrect data after a 330 * N->...->2->1 (N>1) transition sequence. 331 */ 332 if (tp_funcs[0].data != old[0].data) 333 tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1); 334 break; 335 default: 336 WARN_ON_ONCE(1); 337 break; 338 } 339 340 release_probes(tp, old); 341 return 0; 342 } 343 344 /* 345 * Remove a probe function from a tracepoint. 346 * Note: only waiting an RCU period after setting elem->call to the empty 347 * function insures that the original callback is not used anymore. This insured 348 * by preempt_disable around the call site. 349 */ 350 static int tracepoint_remove_func(struct tracepoint *tp, 351 struct tracepoint_func *func) 352 { 353 struct tracepoint_func *old, *tp_funcs; 354 355 tp_funcs = rcu_dereference_protected(tp->funcs, 356 lockdep_is_held(&tracepoints_mutex)); 357 old = func_remove(&tp_funcs, func); 358 if (WARN_ON_ONCE(IS_ERR(old))) 359 return PTR_ERR(old); 360 361 if (tp_funcs == old) 362 /* Failed allocating new tp_funcs, replaced func with stub */ 363 return 0; 364 365 switch (nr_func_state(tp_funcs)) { 366 case TP_FUNC_0: /* 1->0 */ 367 /* Removed last function */ 368 if (tp->ext && tp->ext->unregfunc && static_key_enabled(&tp->key)) 369 tp->ext->unregfunc(); 370 static_branch_disable(&tp->key); 371 /* Set iterator static call */ 372 tracepoint_update_call(tp, tp_funcs); 373 /* Both iterator and static call handle NULL tp->funcs */ 374 rcu_assign_pointer(tp->funcs, NULL); 375 /* 376 * Make sure new static func never uses old data after a 377 * 1->0->1 transition sequence. 378 */ 379 tp_rcu_get_state(TP_TRANSITION_SYNC_1_0_1); 380 break; 381 case TP_FUNC_1: /* 2->1 */ 382 rcu_assign_pointer(tp->funcs, tp_funcs); 383 /* 384 * Make sure static func never uses incorrect data after a 385 * N->...->2->1 (N>2) transition sequence. If the first 386 * element's data has changed, then force the synchronization 387 * to prevent current readers that have loaded the old data 388 * from calling the new function. 389 */ 390 if (tp_funcs[0].data != old[0].data) 391 tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1); 392 tp_rcu_cond_sync(TP_TRANSITION_SYNC_N_2_1); 393 /* Set static call to first function */ 394 tracepoint_update_call(tp, tp_funcs); 395 break; 396 case TP_FUNC_2: /* N->N-1 (N>2) */ 397 fallthrough; 398 case TP_FUNC_N: 399 rcu_assign_pointer(tp->funcs, tp_funcs); 400 /* 401 * Make sure static func never uses incorrect data after a 402 * N->...->2->1 (N>2) transition sequence. 403 */ 404 if (tp_funcs[0].data != old[0].data) 405 tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1); 406 break; 407 default: 408 WARN_ON_ONCE(1); 409 break; 410 } 411 release_probes(tp, old); 412 return 0; 413 } 414 415 /** 416 * tracepoint_probe_register_prio_may_exist - Connect a probe to a tracepoint with priority 417 * @tp: tracepoint 418 * @probe: probe handler 419 * @data: tracepoint data 420 * @prio: priority of this function over other registered functions 421 * 422 * Same as tracepoint_probe_register_prio() except that it will not warn 423 * if the tracepoint is already registered. 424 */ 425 int tracepoint_probe_register_prio_may_exist(struct tracepoint *tp, void *probe, 426 void *data, int prio) 427 { 428 struct tracepoint_func tp_func; 429 int ret; 430 431 mutex_lock(&tracepoints_mutex); 432 tp_func.func = probe; 433 tp_func.data = data; 434 tp_func.prio = prio; 435 ret = tracepoint_add_func(tp, &tp_func, prio, false); 436 mutex_unlock(&tracepoints_mutex); 437 return ret; 438 } 439 EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio_may_exist); 440 441 /** 442 * tracepoint_probe_register_prio - Connect a probe to a tracepoint with priority 443 * @tp: tracepoint 444 * @probe: probe handler 445 * @data: tracepoint data 446 * @prio: priority of this function over other registered functions 447 * 448 * Returns 0 if ok, error value on error. 449 * Note: if @tp is within a module, the caller is responsible for 450 * unregistering the probe before the module is gone. This can be 451 * performed either with a tracepoint module going notifier, or from 452 * within module exit functions. 453 */ 454 int tracepoint_probe_register_prio(struct tracepoint *tp, void *probe, 455 void *data, int prio) 456 { 457 struct tracepoint_func tp_func; 458 int ret; 459 460 mutex_lock(&tracepoints_mutex); 461 tp_func.func = probe; 462 tp_func.data = data; 463 tp_func.prio = prio; 464 ret = tracepoint_add_func(tp, &tp_func, prio, true); 465 mutex_unlock(&tracepoints_mutex); 466 return ret; 467 } 468 EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio); 469 470 /** 471 * tracepoint_probe_register - Connect a probe to a tracepoint 472 * @tp: tracepoint 473 * @probe: probe handler 474 * @data: tracepoint data 475 * 476 * Returns 0 if ok, error value on error. 477 * Note: if @tp is within a module, the caller is responsible for 478 * unregistering the probe before the module is gone. This can be 479 * performed either with a tracepoint module going notifier, or from 480 * within module exit functions. 481 */ 482 int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data) 483 { 484 return tracepoint_probe_register_prio(tp, probe, data, TRACEPOINT_DEFAULT_PRIO); 485 } 486 EXPORT_SYMBOL_GPL(tracepoint_probe_register); 487 488 /** 489 * tracepoint_probe_unregister - Disconnect a probe from a tracepoint 490 * @tp: tracepoint 491 * @probe: probe function pointer 492 * @data: tracepoint data 493 * 494 * Returns 0 if ok, error value on error. 495 */ 496 int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data) 497 { 498 struct tracepoint_func tp_func; 499 int ret; 500 501 mutex_lock(&tracepoints_mutex); 502 tp_func.func = probe; 503 tp_func.data = data; 504 ret = tracepoint_remove_func(tp, &tp_func); 505 mutex_unlock(&tracepoints_mutex); 506 return ret; 507 } 508 EXPORT_SYMBOL_GPL(tracepoint_probe_unregister); 509 510 static void for_each_tracepoint_range( 511 tracepoint_ptr_t *begin, tracepoint_ptr_t *end, 512 void (*fct)(struct tracepoint *tp, void *priv), 513 void *priv) 514 { 515 tracepoint_ptr_t *iter; 516 517 if (!begin) 518 return; 519 for (iter = begin; iter < end; iter++) 520 fct(tracepoint_ptr_deref(iter), priv); 521 } 522 523 #ifdef CONFIG_MODULES 524 bool trace_module_has_bad_taint(struct module *mod) 525 { 526 return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP) | 527 (1 << TAINT_UNSIGNED_MODULE) | (1 << TAINT_TEST) | 528 (1 << TAINT_LIVEPATCH)); 529 } 530 531 static BLOCKING_NOTIFIER_HEAD(tracepoint_notify_list); 532 533 /** 534 * register_tracepoint_module_notifier - register tracepoint coming/going notifier 535 * @nb: notifier block 536 * 537 * Notifiers registered with this function are called on module 538 * coming/going with the tracepoint_module_list_mutex held. 539 * The notifier block callback should expect a "struct tp_module" data 540 * pointer. 541 */ 542 int register_tracepoint_module_notifier(struct notifier_block *nb) 543 { 544 struct tp_module *tp_mod; 545 int ret; 546 547 mutex_lock(&tracepoint_module_list_mutex); 548 ret = blocking_notifier_chain_register(&tracepoint_notify_list, nb); 549 if (ret) 550 goto end; 551 list_for_each_entry(tp_mod, &tracepoint_module_list, list) 552 (void) nb->notifier_call(nb, MODULE_STATE_COMING, tp_mod); 553 end: 554 mutex_unlock(&tracepoint_module_list_mutex); 555 return ret; 556 } 557 EXPORT_SYMBOL_GPL(register_tracepoint_module_notifier); 558 559 /** 560 * unregister_tracepoint_module_notifier - unregister tracepoint coming/going notifier 561 * @nb: notifier block 562 * 563 * The notifier block callback should expect a "struct tp_module" data 564 * pointer. 565 */ 566 int unregister_tracepoint_module_notifier(struct notifier_block *nb) 567 { 568 struct tp_module *tp_mod; 569 int ret; 570 571 mutex_lock(&tracepoint_module_list_mutex); 572 ret = blocking_notifier_chain_unregister(&tracepoint_notify_list, nb); 573 if (ret) 574 goto end; 575 list_for_each_entry(tp_mod, &tracepoint_module_list, list) 576 (void) nb->notifier_call(nb, MODULE_STATE_GOING, tp_mod); 577 end: 578 mutex_unlock(&tracepoint_module_list_mutex); 579 return ret; 580 581 } 582 EXPORT_SYMBOL_GPL(unregister_tracepoint_module_notifier); 583 584 /* 585 * Ensure the tracer unregistered the module's probes before the module 586 * teardown is performed. Prevents leaks of probe and data pointers. 587 */ 588 static void tp_module_going_check_quiescent(struct tracepoint *tp, void *priv) 589 { 590 WARN_ON_ONCE(tp->funcs); 591 } 592 593 static int tracepoint_module_coming(struct module *mod) 594 { 595 struct tp_module *tp_mod; 596 597 if (!mod->num_tracepoints) 598 return 0; 599 600 /* 601 * We skip modules that taint the kernel, especially those with different 602 * module headers (for forced load), to make sure we don't cause a crash. 603 * Staging, out-of-tree, unsigned GPL, and test modules are fine. 604 */ 605 if (trace_module_has_bad_taint(mod)) 606 return 0; 607 608 tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL); 609 if (!tp_mod) 610 return -ENOMEM; 611 tp_mod->mod = mod; 612 613 mutex_lock(&tracepoint_module_list_mutex); 614 list_add_tail(&tp_mod->list, &tracepoint_module_list); 615 blocking_notifier_call_chain(&tracepoint_notify_list, 616 MODULE_STATE_COMING, tp_mod); 617 mutex_unlock(&tracepoint_module_list_mutex); 618 return 0; 619 } 620 621 static void tracepoint_module_going(struct module *mod) 622 { 623 struct tp_module *tp_mod; 624 625 if (!mod->num_tracepoints) 626 return; 627 628 mutex_lock(&tracepoint_module_list_mutex); 629 list_for_each_entry(tp_mod, &tracepoint_module_list, list) { 630 if (tp_mod->mod == mod) { 631 blocking_notifier_call_chain(&tracepoint_notify_list, 632 MODULE_STATE_GOING, tp_mod); 633 list_del(&tp_mod->list); 634 kfree(tp_mod); 635 /* 636 * Called the going notifier before checking for 637 * quiescence. 638 */ 639 for_each_tracepoint_range(mod->tracepoints_ptrs, 640 mod->tracepoints_ptrs + mod->num_tracepoints, 641 tp_module_going_check_quiescent, NULL); 642 break; 643 } 644 } 645 /* 646 * In the case of modules that were tainted at "coming", we'll simply 647 * walk through the list without finding it. We cannot use the "tainted" 648 * flag on "going", in case a module taints the kernel only after being 649 * loaded. 650 */ 651 mutex_unlock(&tracepoint_module_list_mutex); 652 } 653 654 static int tracepoint_module_notify(struct notifier_block *self, 655 unsigned long val, void *data) 656 { 657 struct module *mod = data; 658 int ret = 0; 659 660 switch (val) { 661 case MODULE_STATE_COMING: 662 ret = tracepoint_module_coming(mod); 663 break; 664 case MODULE_STATE_LIVE: 665 break; 666 case MODULE_STATE_GOING: 667 tracepoint_module_going(mod); 668 break; 669 case MODULE_STATE_UNFORMED: 670 break; 671 } 672 return notifier_from_errno(ret); 673 } 674 675 static struct notifier_block tracepoint_module_nb = { 676 .notifier_call = tracepoint_module_notify, 677 .priority = 0, 678 }; 679 680 static __init int init_tracepoints(void) 681 { 682 int ret; 683 684 ret = register_module_notifier(&tracepoint_module_nb); 685 if (ret) 686 pr_warn("Failed to register tracepoint module enter notifier\n"); 687 688 return ret; 689 } 690 __initcall(init_tracepoints); 691 692 /** 693 * for_each_tracepoint_in_module - iteration on all tracepoints in a module 694 * @mod: module 695 * @fct: callback 696 * @priv: private data 697 */ 698 void for_each_tracepoint_in_module(struct module *mod, 699 void (*fct)(struct tracepoint *tp, 700 struct module *mod, void *priv), 701 void *priv) 702 { 703 tracepoint_ptr_t *begin, *end, *iter; 704 705 lockdep_assert_held(&tracepoint_module_list_mutex); 706 707 if (!mod) 708 return; 709 710 begin = mod->tracepoints_ptrs; 711 end = mod->tracepoints_ptrs + mod->num_tracepoints; 712 713 for (iter = begin; iter < end; iter++) 714 fct(tracepoint_ptr_deref(iter), mod, priv); 715 } 716 717 /** 718 * for_each_module_tracepoint - iteration on all tracepoints in all modules 719 * @fct: callback 720 * @priv: private data 721 */ 722 void for_each_module_tracepoint(void (*fct)(struct tracepoint *tp, 723 struct module *mod, void *priv), 724 void *priv) 725 { 726 struct tp_module *tp_mod; 727 728 mutex_lock(&tracepoint_module_list_mutex); 729 list_for_each_entry(tp_mod, &tracepoint_module_list, list) 730 for_each_tracepoint_in_module(tp_mod->mod, fct, priv); 731 mutex_unlock(&tracepoint_module_list_mutex); 732 } 733 #endif /* CONFIG_MODULES */ 734 735 /** 736 * for_each_kernel_tracepoint - iteration on all kernel tracepoints 737 * @fct: callback 738 * @priv: private data 739 */ 740 void for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv), 741 void *priv) 742 { 743 for_each_tracepoint_range(__start___tracepoints_ptrs, 744 __stop___tracepoints_ptrs, fct, priv); 745 } 746 EXPORT_SYMBOL_GPL(for_each_kernel_tracepoint); 747 748 #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS 749 750 /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ 751 static int sys_tracepoint_refcount; 752 753 int syscall_regfunc(void) 754 { 755 struct task_struct *p, *t; 756 757 if (!sys_tracepoint_refcount) { 758 read_lock(&tasklist_lock); 759 for_each_process_thread(p, t) { 760 set_task_syscall_work(t, SYSCALL_TRACEPOINT); 761 } 762 read_unlock(&tasklist_lock); 763 } 764 sys_tracepoint_refcount++; 765 766 return 0; 767 } 768 769 void syscall_unregfunc(void) 770 { 771 struct task_struct *p, *t; 772 773 sys_tracepoint_refcount--; 774 if (!sys_tracepoint_refcount) { 775 read_lock(&tasklist_lock); 776 for_each_process_thread(p, t) { 777 clear_task_syscall_work(t, SYSCALL_TRACEPOINT); 778 } 779 read_unlock(&tasklist_lock); 780 } 781 } 782 #endif 783