1 /* 2 * Copyright (C) 2008 Mathieu Desnoyers 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 */ 18 #include <linux/module.h> 19 #include <linux/mutex.h> 20 #include <linux/types.h> 21 #include <linux/jhash.h> 22 #include <linux/list.h> 23 #include <linux/rcupdate.h> 24 #include <linux/tracepoint.h> 25 #include <linux/err.h> 26 #include <linux/slab.h> 27 #include <linux/sched.h> 28 #include <linux/static_key.h> 29 30 extern struct tracepoint * const __start___tracepoints_ptrs[]; 31 extern struct tracepoint * const __stop___tracepoints_ptrs[]; 32 33 /* Set to 1 to enable tracepoint debug output */ 34 static const int tracepoint_debug; 35 36 /* 37 * Tracepoints mutex protects the builtin and module tracepoints and the hash 38 * table, as well as the local module list. 39 */ 40 static DEFINE_MUTEX(tracepoints_mutex); 41 42 #ifdef CONFIG_MODULES 43 /* Local list of struct module */ 44 static LIST_HEAD(tracepoint_module_list); 45 #endif /* CONFIG_MODULES */ 46 47 /* 48 * Tracepoint hash table, containing the active tracepoints. 49 * Protected by tracepoints_mutex. 50 */ 51 #define TRACEPOINT_HASH_BITS 6 52 #define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS) 53 static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE]; 54 55 /* 56 * Note about RCU : 57 * It is used to delay the free of multiple probes array until a quiescent 58 * state is reached. 59 * Tracepoint entries modifications are protected by the tracepoints_mutex. 60 */ 61 struct tracepoint_entry { 62 struct hlist_node hlist; 63 struct tracepoint_func *funcs; 64 int refcount; /* Number of times armed. 0 if disarmed. */ 65 char name[0]; 66 }; 67 68 struct tp_probes { 69 union { 70 struct rcu_head rcu; 71 struct list_head list; 72 } u; 73 struct tracepoint_func probes[0]; 74 }; 75 76 static inline void *allocate_probes(int count) 77 { 78 struct tp_probes *p = kmalloc(count * sizeof(struct tracepoint_func) 79 + sizeof(struct tp_probes), GFP_KERNEL); 80 return p == NULL ? NULL : p->probes; 81 } 82 83 static void rcu_free_old_probes(struct rcu_head *head) 84 { 85 kfree(container_of(head, struct tp_probes, u.rcu)); 86 } 87 88 static inline void release_probes(struct tracepoint_func *old) 89 { 90 if (old) { 91 struct tp_probes *tp_probes = container_of(old, 92 struct tp_probes, probes[0]); 93 call_rcu_sched(&tp_probes->u.rcu, rcu_free_old_probes); 94 } 95 } 96 97 static void debug_print_probes(struct tracepoint_entry *entry) 98 { 99 int i; 100 101 if (!tracepoint_debug || !entry->funcs) 102 return; 103 104 for (i = 0; entry->funcs[i].func; i++) 105 printk(KERN_DEBUG "Probe %d : %p\n", i, entry->funcs[i].func); 106 } 107 108 static struct tracepoint_func * 109 tracepoint_entry_add_probe(struct tracepoint_entry *entry, 110 void *probe, void *data) 111 { 112 int nr_probes = 0; 113 struct tracepoint_func *old, *new; 114 115 if (WARN_ON(!probe)) 116 return ERR_PTR(-EINVAL); 117 118 debug_print_probes(entry); 119 old = entry->funcs; 120 if (old) { 121 /* (N -> N+1), (N != 0, 1) probes */ 122 for (nr_probes = 0; old[nr_probes].func; nr_probes++) 123 if (old[nr_probes].func == probe && 124 old[nr_probes].data == data) 125 return ERR_PTR(-EEXIST); 126 } 127 /* + 2 : one for new probe, one for NULL func */ 128 new = allocate_probes(nr_probes + 2); 129 if (new == NULL) 130 return ERR_PTR(-ENOMEM); 131 if (old) 132 memcpy(new, old, nr_probes * sizeof(struct tracepoint_func)); 133 new[nr_probes].func = probe; 134 new[nr_probes].data = data; 135 new[nr_probes + 1].func = NULL; 136 entry->refcount = nr_probes + 1; 137 entry->funcs = new; 138 debug_print_probes(entry); 139 return old; 140 } 141 142 static void * 143 tracepoint_entry_remove_probe(struct tracepoint_entry *entry, 144 void *probe, void *data) 145 { 146 int nr_probes = 0, nr_del = 0, i; 147 struct tracepoint_func *old, *new; 148 149 old = entry->funcs; 150 151 if (!old) 152 return ERR_PTR(-ENOENT); 153 154 debug_print_probes(entry); 155 /* (N -> M), (N > 1, M >= 0) probes */ 156 if (probe) { 157 for (nr_probes = 0; old[nr_probes].func; nr_probes++) { 158 if (old[nr_probes].func == probe && 159 old[nr_probes].data == data) 160 nr_del++; 161 } 162 } 163 164 /* 165 * If probe is NULL, then nr_probes = nr_del = 0, and then the 166 * entire entry will be removed. 167 */ 168 if (nr_probes - nr_del == 0) { 169 /* N -> 0, (N > 1) */ 170 entry->funcs = NULL; 171 entry->refcount = 0; 172 debug_print_probes(entry); 173 return old; 174 } else { 175 int j = 0; 176 /* N -> M, (N > 1, M > 0) */ 177 /* + 1 for NULL */ 178 new = allocate_probes(nr_probes - nr_del + 1); 179 if (new == NULL) 180 return ERR_PTR(-ENOMEM); 181 for (i = 0; old[i].func; i++) 182 if (old[i].func != probe || old[i].data != data) 183 new[j++] = old[i]; 184 new[nr_probes - nr_del].func = NULL; 185 entry->refcount = nr_probes - nr_del; 186 entry->funcs = new; 187 } 188 debug_print_probes(entry); 189 return old; 190 } 191 192 /* 193 * Get tracepoint if the tracepoint is present in the tracepoint hash table. 194 * Must be called with tracepoints_mutex held. 195 * Returns NULL if not present. 196 */ 197 static struct tracepoint_entry *get_tracepoint(const char *name) 198 { 199 struct hlist_head *head; 200 struct tracepoint_entry *e; 201 u32 hash = jhash(name, strlen(name), 0); 202 203 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)]; 204 hlist_for_each_entry(e, head, hlist) { 205 if (!strcmp(name, e->name)) 206 return e; 207 } 208 return NULL; 209 } 210 211 /* 212 * Add the tracepoint to the tracepoint hash table. Must be called with 213 * tracepoints_mutex held. 214 */ 215 static struct tracepoint_entry *add_tracepoint(const char *name) 216 { 217 struct hlist_head *head; 218 struct tracepoint_entry *e; 219 size_t name_len = strlen(name) + 1; 220 u32 hash = jhash(name, name_len-1, 0); 221 222 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)]; 223 hlist_for_each_entry(e, head, hlist) { 224 if (!strcmp(name, e->name)) { 225 printk(KERN_NOTICE 226 "tracepoint %s busy\n", name); 227 return ERR_PTR(-EEXIST); /* Already there */ 228 } 229 } 230 /* 231 * Using kmalloc here to allocate a variable length element. Could 232 * cause some memory fragmentation if overused. 233 */ 234 e = kmalloc(sizeof(struct tracepoint_entry) + name_len, GFP_KERNEL); 235 if (!e) 236 return ERR_PTR(-ENOMEM); 237 memcpy(&e->name[0], name, name_len); 238 e->funcs = NULL; 239 e->refcount = 0; 240 hlist_add_head(&e->hlist, head); 241 return e; 242 } 243 244 /* 245 * Remove the tracepoint from the tracepoint hash table. Must be called with 246 * mutex_lock held. 247 */ 248 static inline void remove_tracepoint(struct tracepoint_entry *e) 249 { 250 hlist_del(&e->hlist); 251 kfree(e); 252 } 253 254 /* 255 * Sets the probe callback corresponding to one tracepoint. 256 */ 257 static void set_tracepoint(struct tracepoint_entry **entry, 258 struct tracepoint *elem, int active) 259 { 260 WARN_ON(strcmp((*entry)->name, elem->name) != 0); 261 262 if (elem->regfunc && !static_key_enabled(&elem->key) && active) 263 elem->regfunc(); 264 else if (elem->unregfunc && static_key_enabled(&elem->key) && !active) 265 elem->unregfunc(); 266 267 /* 268 * rcu_assign_pointer has a smp_wmb() which makes sure that the new 269 * probe callbacks array is consistent before setting a pointer to it. 270 * This array is referenced by __DO_TRACE from 271 * include/linux/tracepoints.h. A matching smp_read_barrier_depends() 272 * is used. 273 */ 274 rcu_assign_pointer(elem->funcs, (*entry)->funcs); 275 if (active && !static_key_enabled(&elem->key)) 276 static_key_slow_inc(&elem->key); 277 else if (!active && static_key_enabled(&elem->key)) 278 static_key_slow_dec(&elem->key); 279 } 280 281 /* 282 * Disable a tracepoint and its probe callback. 283 * Note: only waiting an RCU period after setting elem->call to the empty 284 * function insures that the original callback is not used anymore. This insured 285 * by preempt_disable around the call site. 286 */ 287 static void disable_tracepoint(struct tracepoint *elem) 288 { 289 if (elem->unregfunc && static_key_enabled(&elem->key)) 290 elem->unregfunc(); 291 292 if (static_key_enabled(&elem->key)) 293 static_key_slow_dec(&elem->key); 294 rcu_assign_pointer(elem->funcs, NULL); 295 } 296 297 /** 298 * tracepoint_update_probe_range - Update a probe range 299 * @begin: beginning of the range 300 * @end: end of the range 301 * 302 * Updates the probe callback corresponding to a range of tracepoints. 303 * Called with tracepoints_mutex held. 304 */ 305 static void tracepoint_update_probe_range(struct tracepoint * const *begin, 306 struct tracepoint * const *end) 307 { 308 struct tracepoint * const *iter; 309 struct tracepoint_entry *mark_entry; 310 311 if (!begin) 312 return; 313 314 for (iter = begin; iter < end; iter++) { 315 mark_entry = get_tracepoint((*iter)->name); 316 if (mark_entry) { 317 set_tracepoint(&mark_entry, *iter, 318 !!mark_entry->refcount); 319 } else { 320 disable_tracepoint(*iter); 321 } 322 } 323 } 324 325 #ifdef CONFIG_MODULES 326 void module_update_tracepoints(void) 327 { 328 struct tp_module *tp_mod; 329 330 list_for_each_entry(tp_mod, &tracepoint_module_list, list) 331 tracepoint_update_probe_range(tp_mod->tracepoints_ptrs, 332 tp_mod->tracepoints_ptrs + tp_mod->num_tracepoints); 333 } 334 #else /* CONFIG_MODULES */ 335 void module_update_tracepoints(void) 336 { 337 } 338 #endif /* CONFIG_MODULES */ 339 340 341 /* 342 * Update probes, removing the faulty probes. 343 * Called with tracepoints_mutex held. 344 */ 345 static void tracepoint_update_probes(void) 346 { 347 /* Core kernel tracepoints */ 348 tracepoint_update_probe_range(__start___tracepoints_ptrs, 349 __stop___tracepoints_ptrs); 350 /* tracepoints in modules. */ 351 module_update_tracepoints(); 352 } 353 354 static struct tracepoint_func * 355 tracepoint_add_probe(const char *name, void *probe, void *data) 356 { 357 struct tracepoint_entry *entry; 358 struct tracepoint_func *old; 359 360 entry = get_tracepoint(name); 361 if (!entry) { 362 entry = add_tracepoint(name); 363 if (IS_ERR(entry)) 364 return (struct tracepoint_func *)entry; 365 } 366 old = tracepoint_entry_add_probe(entry, probe, data); 367 if (IS_ERR(old) && !entry->refcount) 368 remove_tracepoint(entry); 369 return old; 370 } 371 372 /** 373 * tracepoint_probe_register - Connect a probe to a tracepoint 374 * @name: tracepoint name 375 * @probe: probe handler 376 * 377 * Returns 0 if ok, error value on error. 378 * The probe address must at least be aligned on the architecture pointer size. 379 */ 380 int tracepoint_probe_register(const char *name, void *probe, void *data) 381 { 382 struct tracepoint_func *old; 383 384 mutex_lock(&tracepoints_mutex); 385 old = tracepoint_add_probe(name, probe, data); 386 if (IS_ERR(old)) { 387 mutex_unlock(&tracepoints_mutex); 388 return PTR_ERR(old); 389 } 390 tracepoint_update_probes(); /* may update entry */ 391 mutex_unlock(&tracepoints_mutex); 392 release_probes(old); 393 return 0; 394 } 395 EXPORT_SYMBOL_GPL(tracepoint_probe_register); 396 397 static struct tracepoint_func * 398 tracepoint_remove_probe(const char *name, void *probe, void *data) 399 { 400 struct tracepoint_entry *entry; 401 struct tracepoint_func *old; 402 403 entry = get_tracepoint(name); 404 if (!entry) 405 return ERR_PTR(-ENOENT); 406 old = tracepoint_entry_remove_probe(entry, probe, data); 407 if (IS_ERR(old)) 408 return old; 409 if (!entry->refcount) 410 remove_tracepoint(entry); 411 return old; 412 } 413 414 /** 415 * tracepoint_probe_unregister - Disconnect a probe from a tracepoint 416 * @name: tracepoint name 417 * @probe: probe function pointer 418 * 419 * We do not need to call a synchronize_sched to make sure the probes have 420 * finished running before doing a module unload, because the module unload 421 * itself uses stop_machine(), which insures that every preempt disabled section 422 * have finished. 423 */ 424 int tracepoint_probe_unregister(const char *name, void *probe, void *data) 425 { 426 struct tracepoint_func *old; 427 428 mutex_lock(&tracepoints_mutex); 429 old = tracepoint_remove_probe(name, probe, data); 430 if (IS_ERR(old)) { 431 mutex_unlock(&tracepoints_mutex); 432 return PTR_ERR(old); 433 } 434 tracepoint_update_probes(); /* may update entry */ 435 mutex_unlock(&tracepoints_mutex); 436 release_probes(old); 437 return 0; 438 } 439 EXPORT_SYMBOL_GPL(tracepoint_probe_unregister); 440 441 static LIST_HEAD(old_probes); 442 static int need_update; 443 444 static void tracepoint_add_old_probes(void *old) 445 { 446 need_update = 1; 447 if (old) { 448 struct tp_probes *tp_probes = container_of(old, 449 struct tp_probes, probes[0]); 450 list_add(&tp_probes->u.list, &old_probes); 451 } 452 } 453 454 /** 455 * tracepoint_probe_register_noupdate - register a probe but not connect 456 * @name: tracepoint name 457 * @probe: probe handler 458 * 459 * caller must call tracepoint_probe_update_all() 460 */ 461 int tracepoint_probe_register_noupdate(const char *name, void *probe, 462 void *data) 463 { 464 struct tracepoint_func *old; 465 466 mutex_lock(&tracepoints_mutex); 467 old = tracepoint_add_probe(name, probe, data); 468 if (IS_ERR(old)) { 469 mutex_unlock(&tracepoints_mutex); 470 return PTR_ERR(old); 471 } 472 tracepoint_add_old_probes(old); 473 mutex_unlock(&tracepoints_mutex); 474 return 0; 475 } 476 EXPORT_SYMBOL_GPL(tracepoint_probe_register_noupdate); 477 478 /** 479 * tracepoint_probe_unregister_noupdate - remove a probe but not disconnect 480 * @name: tracepoint name 481 * @probe: probe function pointer 482 * 483 * caller must call tracepoint_probe_update_all() 484 */ 485 int tracepoint_probe_unregister_noupdate(const char *name, void *probe, 486 void *data) 487 { 488 struct tracepoint_func *old; 489 490 mutex_lock(&tracepoints_mutex); 491 old = tracepoint_remove_probe(name, probe, data); 492 if (IS_ERR(old)) { 493 mutex_unlock(&tracepoints_mutex); 494 return PTR_ERR(old); 495 } 496 tracepoint_add_old_probes(old); 497 mutex_unlock(&tracepoints_mutex); 498 return 0; 499 } 500 EXPORT_SYMBOL_GPL(tracepoint_probe_unregister_noupdate); 501 502 /** 503 * tracepoint_probe_update_all - update tracepoints 504 */ 505 void tracepoint_probe_update_all(void) 506 { 507 LIST_HEAD(release_probes); 508 struct tp_probes *pos, *next; 509 510 mutex_lock(&tracepoints_mutex); 511 if (!need_update) { 512 mutex_unlock(&tracepoints_mutex); 513 return; 514 } 515 if (!list_empty(&old_probes)) 516 list_replace_init(&old_probes, &release_probes); 517 need_update = 0; 518 tracepoint_update_probes(); 519 mutex_unlock(&tracepoints_mutex); 520 list_for_each_entry_safe(pos, next, &release_probes, u.list) { 521 list_del(&pos->u.list); 522 call_rcu_sched(&pos->u.rcu, rcu_free_old_probes); 523 } 524 } 525 EXPORT_SYMBOL_GPL(tracepoint_probe_update_all); 526 527 /** 528 * tracepoint_get_iter_range - Get a next tracepoint iterator given a range. 529 * @tracepoint: current tracepoints (in), next tracepoint (out) 530 * @begin: beginning of the range 531 * @end: end of the range 532 * 533 * Returns whether a next tracepoint has been found (1) or not (0). 534 * Will return the first tracepoint in the range if the input tracepoint is 535 * NULL. 536 */ 537 static int tracepoint_get_iter_range(struct tracepoint * const **tracepoint, 538 struct tracepoint * const *begin, struct tracepoint * const *end) 539 { 540 if (!*tracepoint && begin != end) { 541 *tracepoint = begin; 542 return 1; 543 } 544 if (*tracepoint >= begin && *tracepoint < end) 545 return 1; 546 return 0; 547 } 548 549 #ifdef CONFIG_MODULES 550 static void tracepoint_get_iter(struct tracepoint_iter *iter) 551 { 552 int found = 0; 553 struct tp_module *iter_mod; 554 555 /* Core kernel tracepoints */ 556 if (!iter->module) { 557 found = tracepoint_get_iter_range(&iter->tracepoint, 558 __start___tracepoints_ptrs, 559 __stop___tracepoints_ptrs); 560 if (found) 561 goto end; 562 } 563 /* Tracepoints in modules */ 564 mutex_lock(&tracepoints_mutex); 565 list_for_each_entry(iter_mod, &tracepoint_module_list, list) { 566 /* 567 * Sorted module list 568 */ 569 if (iter_mod < iter->module) 570 continue; 571 else if (iter_mod > iter->module) 572 iter->tracepoint = NULL; 573 found = tracepoint_get_iter_range(&iter->tracepoint, 574 iter_mod->tracepoints_ptrs, 575 iter_mod->tracepoints_ptrs 576 + iter_mod->num_tracepoints); 577 if (found) { 578 iter->module = iter_mod; 579 break; 580 } 581 } 582 mutex_unlock(&tracepoints_mutex); 583 end: 584 if (!found) 585 tracepoint_iter_reset(iter); 586 } 587 #else /* CONFIG_MODULES */ 588 static void tracepoint_get_iter(struct tracepoint_iter *iter) 589 { 590 int found = 0; 591 592 /* Core kernel tracepoints */ 593 found = tracepoint_get_iter_range(&iter->tracepoint, 594 __start___tracepoints_ptrs, 595 __stop___tracepoints_ptrs); 596 if (!found) 597 tracepoint_iter_reset(iter); 598 } 599 #endif /* CONFIG_MODULES */ 600 601 void tracepoint_iter_start(struct tracepoint_iter *iter) 602 { 603 tracepoint_get_iter(iter); 604 } 605 EXPORT_SYMBOL_GPL(tracepoint_iter_start); 606 607 void tracepoint_iter_next(struct tracepoint_iter *iter) 608 { 609 iter->tracepoint++; 610 /* 611 * iter->tracepoint may be invalid because we blindly incremented it. 612 * Make sure it is valid by marshalling on the tracepoints, getting the 613 * tracepoints from following modules if necessary. 614 */ 615 tracepoint_get_iter(iter); 616 } 617 EXPORT_SYMBOL_GPL(tracepoint_iter_next); 618 619 void tracepoint_iter_stop(struct tracepoint_iter *iter) 620 { 621 } 622 EXPORT_SYMBOL_GPL(tracepoint_iter_stop); 623 624 void tracepoint_iter_reset(struct tracepoint_iter *iter) 625 { 626 #ifdef CONFIG_MODULES 627 iter->module = NULL; 628 #endif /* CONFIG_MODULES */ 629 iter->tracepoint = NULL; 630 } 631 EXPORT_SYMBOL_GPL(tracepoint_iter_reset); 632 633 #ifdef CONFIG_MODULES 634 bool trace_module_has_bad_taint(struct module *mod) 635 { 636 return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP)); 637 } 638 639 static int tracepoint_module_coming(struct module *mod) 640 { 641 struct tp_module *tp_mod, *iter; 642 int ret = 0; 643 644 /* 645 * We skip modules that taint the kernel, especially those with different 646 * module headers (for forced load), to make sure we don't cause a crash. 647 * Staging and out-of-tree GPL modules are fine. 648 */ 649 if (trace_module_has_bad_taint(mod)) 650 return 0; 651 mutex_lock(&tracepoints_mutex); 652 tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL); 653 if (!tp_mod) { 654 ret = -ENOMEM; 655 goto end; 656 } 657 tp_mod->num_tracepoints = mod->num_tracepoints; 658 tp_mod->tracepoints_ptrs = mod->tracepoints_ptrs; 659 660 /* 661 * tracepoint_module_list is kept sorted by struct module pointer 662 * address for iteration on tracepoints from a seq_file that can release 663 * the mutex between calls. 664 */ 665 list_for_each_entry_reverse(iter, &tracepoint_module_list, list) { 666 BUG_ON(iter == tp_mod); /* Should never be in the list twice */ 667 if (iter < tp_mod) { 668 /* We belong to the location right after iter. */ 669 list_add(&tp_mod->list, &iter->list); 670 goto module_added; 671 } 672 } 673 /* We belong to the beginning of the list */ 674 list_add(&tp_mod->list, &tracepoint_module_list); 675 module_added: 676 tracepoint_update_probe_range(mod->tracepoints_ptrs, 677 mod->tracepoints_ptrs + mod->num_tracepoints); 678 end: 679 mutex_unlock(&tracepoints_mutex); 680 return ret; 681 } 682 683 static int tracepoint_module_going(struct module *mod) 684 { 685 struct tp_module *pos; 686 687 mutex_lock(&tracepoints_mutex); 688 tracepoint_update_probe_range(mod->tracepoints_ptrs, 689 mod->tracepoints_ptrs + mod->num_tracepoints); 690 list_for_each_entry(pos, &tracepoint_module_list, list) { 691 if (pos->tracepoints_ptrs == mod->tracepoints_ptrs) { 692 list_del(&pos->list); 693 kfree(pos); 694 break; 695 } 696 } 697 /* 698 * In the case of modules that were tainted at "coming", we'll simply 699 * walk through the list without finding it. We cannot use the "tainted" 700 * flag on "going", in case a module taints the kernel only after being 701 * loaded. 702 */ 703 mutex_unlock(&tracepoints_mutex); 704 return 0; 705 } 706 707 int tracepoint_module_notify(struct notifier_block *self, 708 unsigned long val, void *data) 709 { 710 struct module *mod = data; 711 int ret = 0; 712 713 switch (val) { 714 case MODULE_STATE_COMING: 715 ret = tracepoint_module_coming(mod); 716 break; 717 case MODULE_STATE_LIVE: 718 break; 719 case MODULE_STATE_GOING: 720 ret = tracepoint_module_going(mod); 721 break; 722 } 723 return ret; 724 } 725 726 struct notifier_block tracepoint_module_nb = { 727 .notifier_call = tracepoint_module_notify, 728 .priority = 0, 729 }; 730 731 static int init_tracepoints(void) 732 { 733 return register_module_notifier(&tracepoint_module_nb); 734 } 735 __initcall(init_tracepoints); 736 #endif /* CONFIG_MODULES */ 737 738 #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS 739 740 /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ 741 static int sys_tracepoint_refcount; 742 743 void syscall_regfunc(void) 744 { 745 unsigned long flags; 746 struct task_struct *g, *t; 747 748 if (!sys_tracepoint_refcount) { 749 read_lock_irqsave(&tasklist_lock, flags); 750 do_each_thread(g, t) { 751 /* Skip kernel threads. */ 752 if (t->mm) 753 set_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT); 754 } while_each_thread(g, t); 755 read_unlock_irqrestore(&tasklist_lock, flags); 756 } 757 sys_tracepoint_refcount++; 758 } 759 760 void syscall_unregfunc(void) 761 { 762 unsigned long flags; 763 struct task_struct *g, *t; 764 765 sys_tracepoint_refcount--; 766 if (!sys_tracepoint_refcount) { 767 read_lock_irqsave(&tasklist_lock, flags); 768 do_each_thread(g, t) { 769 clear_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT); 770 } while_each_thread(g, t); 771 read_unlock_irqrestore(&tasklist_lock, flags); 772 } 773 } 774 #endif 775