1 /* 2 * trace_events_trigger - trace event triggers 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 * 18 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com> 19 */ 20 21 #include <linux/module.h> 22 #include <linux/ctype.h> 23 #include <linux/mutex.h> 24 #include <linux/slab.h> 25 26 #include "trace.h" 27 28 static LIST_HEAD(trigger_commands); 29 static DEFINE_MUTEX(trigger_cmd_mutex); 30 31 static void 32 trigger_data_free(struct event_trigger_data *data) 33 { 34 if (data->cmd_ops->set_filter) 35 data->cmd_ops->set_filter(NULL, data, NULL); 36 37 synchronize_sched(); /* make sure current triggers exit before free */ 38 kfree(data); 39 } 40 41 /** 42 * event_triggers_call - Call triggers associated with a trace event 43 * @file: The trace_event_file associated with the event 44 * @rec: The trace entry for the event, NULL for unconditional invocation 45 * 46 * For each trigger associated with an event, invoke the trigger 47 * function registered with the associated trigger command. If rec is 48 * non-NULL, it means that the trigger requires further processing and 49 * shouldn't be unconditionally invoked. If rec is non-NULL and the 50 * trigger has a filter associated with it, rec will checked against 51 * the filter and if the record matches the trigger will be invoked. 52 * If the trigger is a 'post_trigger', meaning it shouldn't be invoked 53 * in any case until the current event is written, the trigger 54 * function isn't invoked but the bit associated with the deferred 55 * trigger is set in the return value. 56 * 57 * Returns an enum event_trigger_type value containing a set bit for 58 * any trigger that should be deferred, ETT_NONE if nothing to defer. 59 * 60 * Called from tracepoint handlers (with rcu_read_lock_sched() held). 61 * 62 * Return: an enum event_trigger_type value containing a set bit for 63 * any trigger that should be deferred, ETT_NONE if nothing to defer. 64 */ 65 enum event_trigger_type 66 event_triggers_call(struct trace_event_file *file, void *rec) 67 { 68 struct event_trigger_data *data; 69 enum event_trigger_type tt = ETT_NONE; 70 struct event_filter *filter; 71 72 if (list_empty(&file->triggers)) 73 return tt; 74 75 list_for_each_entry_rcu(data, &file->triggers, list) { 76 if (!rec) { 77 data->ops->func(data); 78 continue; 79 } 80 filter = rcu_dereference_sched(data->filter); 81 if (filter && !filter_match_preds(filter, rec)) 82 continue; 83 if (data->cmd_ops->post_trigger) { 84 tt |= data->cmd_ops->trigger_type; 85 continue; 86 } 87 data->ops->func(data); 88 } 89 return tt; 90 } 91 EXPORT_SYMBOL_GPL(event_triggers_call); 92 93 /** 94 * event_triggers_post_call - Call 'post_triggers' for a trace event 95 * @file: The trace_event_file associated with the event 96 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke 97 * 98 * For each trigger associated with an event, invoke the trigger 99 * function registered with the associated trigger command, if the 100 * corresponding bit is set in the tt enum passed into this function. 101 * See @event_triggers_call for details on how those bits are set. 102 * 103 * Called from tracepoint handlers (with rcu_read_lock_sched() held). 104 */ 105 void 106 event_triggers_post_call(struct trace_event_file *file, 107 enum event_trigger_type tt) 108 { 109 struct event_trigger_data *data; 110 111 list_for_each_entry_rcu(data, &file->triggers, list) { 112 if (data->cmd_ops->trigger_type & tt) 113 data->ops->func(data); 114 } 115 } 116 EXPORT_SYMBOL_GPL(event_triggers_post_call); 117 118 #define SHOW_AVAILABLE_TRIGGERS (void *)(1UL) 119 120 static void *trigger_next(struct seq_file *m, void *t, loff_t *pos) 121 { 122 struct trace_event_file *event_file = event_file_data(m->private); 123 124 if (t == SHOW_AVAILABLE_TRIGGERS) 125 return NULL; 126 127 return seq_list_next(t, &event_file->triggers, pos); 128 } 129 130 static void *trigger_start(struct seq_file *m, loff_t *pos) 131 { 132 struct trace_event_file *event_file; 133 134 /* ->stop() is called even if ->start() fails */ 135 mutex_lock(&event_mutex); 136 event_file = event_file_data(m->private); 137 if (unlikely(!event_file)) 138 return ERR_PTR(-ENODEV); 139 140 if (list_empty(&event_file->triggers)) 141 return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL; 142 143 return seq_list_start(&event_file->triggers, *pos); 144 } 145 146 static void trigger_stop(struct seq_file *m, void *t) 147 { 148 mutex_unlock(&event_mutex); 149 } 150 151 static int trigger_show(struct seq_file *m, void *v) 152 { 153 struct event_trigger_data *data; 154 struct event_command *p; 155 156 if (v == SHOW_AVAILABLE_TRIGGERS) { 157 seq_puts(m, "# Available triggers:\n"); 158 seq_putc(m, '#'); 159 mutex_lock(&trigger_cmd_mutex); 160 list_for_each_entry_reverse(p, &trigger_commands, list) 161 seq_printf(m, " %s", p->name); 162 seq_putc(m, '\n'); 163 mutex_unlock(&trigger_cmd_mutex); 164 return 0; 165 } 166 167 data = list_entry(v, struct event_trigger_data, list); 168 data->ops->print(m, data->ops, data); 169 170 return 0; 171 } 172 173 static const struct seq_operations event_triggers_seq_ops = { 174 .start = trigger_start, 175 .next = trigger_next, 176 .stop = trigger_stop, 177 .show = trigger_show, 178 }; 179 180 static int event_trigger_regex_open(struct inode *inode, struct file *file) 181 { 182 int ret = 0; 183 184 mutex_lock(&event_mutex); 185 186 if (unlikely(!event_file_data(file))) { 187 mutex_unlock(&event_mutex); 188 return -ENODEV; 189 } 190 191 if (file->f_mode & FMODE_READ) { 192 ret = seq_open(file, &event_triggers_seq_ops); 193 if (!ret) { 194 struct seq_file *m = file->private_data; 195 m->private = file; 196 } 197 } 198 199 mutex_unlock(&event_mutex); 200 201 return ret; 202 } 203 204 static int trigger_process_regex(struct trace_event_file *file, char *buff) 205 { 206 char *command, *next = buff; 207 struct event_command *p; 208 int ret = -EINVAL; 209 210 command = strsep(&next, ": \t"); 211 command = (command[0] != '!') ? command : command + 1; 212 213 mutex_lock(&trigger_cmd_mutex); 214 list_for_each_entry(p, &trigger_commands, list) { 215 if (strcmp(p->name, command) == 0) { 216 ret = p->func(p, file, buff, command, next); 217 goto out_unlock; 218 } 219 } 220 out_unlock: 221 mutex_unlock(&trigger_cmd_mutex); 222 223 return ret; 224 } 225 226 static ssize_t event_trigger_regex_write(struct file *file, 227 const char __user *ubuf, 228 size_t cnt, loff_t *ppos) 229 { 230 struct trace_event_file *event_file; 231 ssize_t ret; 232 char *buf; 233 234 if (!cnt) 235 return 0; 236 237 if (cnt >= PAGE_SIZE) 238 return -EINVAL; 239 240 buf = memdup_user_nul(ubuf, cnt); 241 if (IS_ERR(buf)) 242 return PTR_ERR(buf); 243 244 strim(buf); 245 246 mutex_lock(&event_mutex); 247 event_file = event_file_data(file); 248 if (unlikely(!event_file)) { 249 mutex_unlock(&event_mutex); 250 kfree(buf); 251 return -ENODEV; 252 } 253 ret = trigger_process_regex(event_file, buf); 254 mutex_unlock(&event_mutex); 255 256 kfree(buf); 257 if (ret < 0) 258 goto out; 259 260 *ppos += cnt; 261 ret = cnt; 262 out: 263 return ret; 264 } 265 266 static int event_trigger_regex_release(struct inode *inode, struct file *file) 267 { 268 mutex_lock(&event_mutex); 269 270 if (file->f_mode & FMODE_READ) 271 seq_release(inode, file); 272 273 mutex_unlock(&event_mutex); 274 275 return 0; 276 } 277 278 static ssize_t 279 event_trigger_write(struct file *filp, const char __user *ubuf, 280 size_t cnt, loff_t *ppos) 281 { 282 return event_trigger_regex_write(filp, ubuf, cnt, ppos); 283 } 284 285 static int 286 event_trigger_open(struct inode *inode, struct file *filp) 287 { 288 return event_trigger_regex_open(inode, filp); 289 } 290 291 static int 292 event_trigger_release(struct inode *inode, struct file *file) 293 { 294 return event_trigger_regex_release(inode, file); 295 } 296 297 const struct file_operations event_trigger_fops = { 298 .open = event_trigger_open, 299 .read = seq_read, 300 .write = event_trigger_write, 301 .llseek = tracing_lseek, 302 .release = event_trigger_release, 303 }; 304 305 /* 306 * Currently we only register event commands from __init, so mark this 307 * __init too. 308 */ 309 static __init int register_event_command(struct event_command *cmd) 310 { 311 struct event_command *p; 312 int ret = 0; 313 314 mutex_lock(&trigger_cmd_mutex); 315 list_for_each_entry(p, &trigger_commands, list) { 316 if (strcmp(cmd->name, p->name) == 0) { 317 ret = -EBUSY; 318 goto out_unlock; 319 } 320 } 321 list_add(&cmd->list, &trigger_commands); 322 out_unlock: 323 mutex_unlock(&trigger_cmd_mutex); 324 325 return ret; 326 } 327 328 /* 329 * Currently we only unregister event commands from __init, so mark 330 * this __init too. 331 */ 332 static __init int unregister_event_command(struct event_command *cmd) 333 { 334 struct event_command *p, *n; 335 int ret = -ENODEV; 336 337 mutex_lock(&trigger_cmd_mutex); 338 list_for_each_entry_safe(p, n, &trigger_commands, list) { 339 if (strcmp(cmd->name, p->name) == 0) { 340 ret = 0; 341 list_del_init(&p->list); 342 goto out_unlock; 343 } 344 } 345 out_unlock: 346 mutex_unlock(&trigger_cmd_mutex); 347 348 return ret; 349 } 350 351 /** 352 * event_trigger_print - Generic event_trigger_ops @print implementation 353 * @name: The name of the event trigger 354 * @m: The seq_file being printed to 355 * @data: Trigger-specific data 356 * @filter_str: filter_str to print, if present 357 * 358 * Common implementation for event triggers to print themselves. 359 * 360 * Usually wrapped by a function that simply sets the @name of the 361 * trigger command and then invokes this. 362 * 363 * Return: 0 on success, errno otherwise 364 */ 365 static int 366 event_trigger_print(const char *name, struct seq_file *m, 367 void *data, char *filter_str) 368 { 369 long count = (long)data; 370 371 seq_puts(m, name); 372 373 if (count == -1) 374 seq_puts(m, ":unlimited"); 375 else 376 seq_printf(m, ":count=%ld", count); 377 378 if (filter_str) 379 seq_printf(m, " if %s\n", filter_str); 380 else 381 seq_putc(m, '\n'); 382 383 return 0; 384 } 385 386 /** 387 * event_trigger_init - Generic event_trigger_ops @init implementation 388 * @ops: The trigger ops associated with the trigger 389 * @data: Trigger-specific data 390 * 391 * Common implementation of event trigger initialization. 392 * 393 * Usually used directly as the @init method in event trigger 394 * implementations. 395 * 396 * Return: 0 on success, errno otherwise 397 */ 398 static int 399 event_trigger_init(struct event_trigger_ops *ops, 400 struct event_trigger_data *data) 401 { 402 data->ref++; 403 return 0; 404 } 405 406 /** 407 * event_trigger_free - Generic event_trigger_ops @free implementation 408 * @ops: The trigger ops associated with the trigger 409 * @data: Trigger-specific data 410 * 411 * Common implementation of event trigger de-initialization. 412 * 413 * Usually used directly as the @free method in event trigger 414 * implementations. 415 */ 416 static void 417 event_trigger_free(struct event_trigger_ops *ops, 418 struct event_trigger_data *data) 419 { 420 if (WARN_ON_ONCE(data->ref <= 0)) 421 return; 422 423 data->ref--; 424 if (!data->ref) 425 trigger_data_free(data); 426 } 427 428 static int trace_event_trigger_enable_disable(struct trace_event_file *file, 429 int trigger_enable) 430 { 431 int ret = 0; 432 433 if (trigger_enable) { 434 if (atomic_inc_return(&file->tm_ref) > 1) 435 return ret; 436 set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags); 437 ret = trace_event_enable_disable(file, 1, 1); 438 } else { 439 if (atomic_dec_return(&file->tm_ref) > 0) 440 return ret; 441 clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags); 442 ret = trace_event_enable_disable(file, 0, 1); 443 } 444 445 return ret; 446 } 447 448 /** 449 * clear_event_triggers - Clear all triggers associated with a trace array 450 * @tr: The trace array to clear 451 * 452 * For each trigger, the triggering event has its tm_ref decremented 453 * via trace_event_trigger_enable_disable(), and any associated event 454 * (in the case of enable/disable_event triggers) will have its sm_ref 455 * decremented via free()->trace_event_enable_disable(). That 456 * combination effectively reverses the soft-mode/trigger state added 457 * by trigger registration. 458 * 459 * Must be called with event_mutex held. 460 */ 461 void 462 clear_event_triggers(struct trace_array *tr) 463 { 464 struct trace_event_file *file; 465 466 list_for_each_entry(file, &tr->events, list) { 467 struct event_trigger_data *data; 468 list_for_each_entry_rcu(data, &file->triggers, list) { 469 trace_event_trigger_enable_disable(file, 0); 470 if (data->ops->free) 471 data->ops->free(data->ops, data); 472 } 473 } 474 } 475 476 /** 477 * update_cond_flag - Set or reset the TRIGGER_COND bit 478 * @file: The trace_event_file associated with the event 479 * 480 * If an event has triggers and any of those triggers has a filter or 481 * a post_trigger, trigger invocation needs to be deferred until after 482 * the current event has logged its data, and the event should have 483 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be 484 * cleared. 485 */ 486 static void update_cond_flag(struct trace_event_file *file) 487 { 488 struct event_trigger_data *data; 489 bool set_cond = false; 490 491 list_for_each_entry_rcu(data, &file->triggers, list) { 492 if (data->filter || data->cmd_ops->post_trigger) { 493 set_cond = true; 494 break; 495 } 496 } 497 498 if (set_cond) 499 set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags); 500 else 501 clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags); 502 } 503 504 /** 505 * register_trigger - Generic event_command @reg implementation 506 * @glob: The raw string used to register the trigger 507 * @ops: The trigger ops associated with the trigger 508 * @data: Trigger-specific data to associate with the trigger 509 * @file: The trace_event_file associated with the event 510 * 511 * Common implementation for event trigger registration. 512 * 513 * Usually used directly as the @reg method in event command 514 * implementations. 515 * 516 * Return: 0 on success, errno otherwise 517 */ 518 static int register_trigger(char *glob, struct event_trigger_ops *ops, 519 struct event_trigger_data *data, 520 struct trace_event_file *file) 521 { 522 struct event_trigger_data *test; 523 int ret = 0; 524 525 list_for_each_entry_rcu(test, &file->triggers, list) { 526 if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) { 527 ret = -EEXIST; 528 goto out; 529 } 530 } 531 532 if (data->ops->init) { 533 ret = data->ops->init(data->ops, data); 534 if (ret < 0) 535 goto out; 536 } 537 538 list_add_rcu(&data->list, &file->triggers); 539 ret++; 540 541 update_cond_flag(file); 542 if (trace_event_trigger_enable_disable(file, 1) < 0) { 543 list_del_rcu(&data->list); 544 update_cond_flag(file); 545 ret--; 546 } 547 out: 548 return ret; 549 } 550 551 /** 552 * unregister_trigger - Generic event_command @unreg implementation 553 * @glob: The raw string used to register the trigger 554 * @ops: The trigger ops associated with the trigger 555 * @test: Trigger-specific data used to find the trigger to remove 556 * @file: The trace_event_file associated with the event 557 * 558 * Common implementation for event trigger unregistration. 559 * 560 * Usually used directly as the @unreg method in event command 561 * implementations. 562 */ 563 static void unregister_trigger(char *glob, struct event_trigger_ops *ops, 564 struct event_trigger_data *test, 565 struct trace_event_file *file) 566 { 567 struct event_trigger_data *data; 568 bool unregistered = false; 569 570 list_for_each_entry_rcu(data, &file->triggers, list) { 571 if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) { 572 unregistered = true; 573 list_del_rcu(&data->list); 574 trace_event_trigger_enable_disable(file, 0); 575 update_cond_flag(file); 576 break; 577 } 578 } 579 580 if (unregistered && data->ops->free) 581 data->ops->free(data->ops, data); 582 } 583 584 /** 585 * event_trigger_callback - Generic event_command @func implementation 586 * @cmd_ops: The command ops, used for trigger registration 587 * @file: The trace_event_file associated with the event 588 * @glob: The raw string used to register the trigger 589 * @cmd: The cmd portion of the string used to register the trigger 590 * @param: The params portion of the string used to register the trigger 591 * 592 * Common implementation for event command parsing and trigger 593 * instantiation. 594 * 595 * Usually used directly as the @func method in event command 596 * implementations. 597 * 598 * Return: 0 on success, errno otherwise 599 */ 600 static int 601 event_trigger_callback(struct event_command *cmd_ops, 602 struct trace_event_file *file, 603 char *glob, char *cmd, char *param) 604 { 605 struct event_trigger_data *trigger_data; 606 struct event_trigger_ops *trigger_ops; 607 char *trigger = NULL; 608 char *number; 609 int ret; 610 611 /* separate the trigger from the filter (t:n [if filter]) */ 612 if (param && isdigit(param[0])) 613 trigger = strsep(¶m, " \t"); 614 615 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger); 616 617 ret = -ENOMEM; 618 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL); 619 if (!trigger_data) 620 goto out; 621 622 trigger_data->count = -1; 623 trigger_data->ops = trigger_ops; 624 trigger_data->cmd_ops = cmd_ops; 625 INIT_LIST_HEAD(&trigger_data->list); 626 627 if (glob[0] == '!') { 628 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file); 629 kfree(trigger_data); 630 ret = 0; 631 goto out; 632 } 633 634 if (trigger) { 635 number = strsep(&trigger, ":"); 636 637 ret = -EINVAL; 638 if (!strlen(number)) 639 goto out_free; 640 641 /* 642 * We use the callback data field (which is a pointer) 643 * as our counter. 644 */ 645 ret = kstrtoul(number, 0, &trigger_data->count); 646 if (ret) 647 goto out_free; 648 } 649 650 if (!param) /* if param is non-empty, it's supposed to be a filter */ 651 goto out_reg; 652 653 if (!cmd_ops->set_filter) 654 goto out_reg; 655 656 ret = cmd_ops->set_filter(param, trigger_data, file); 657 if (ret < 0) 658 goto out_free; 659 660 out_reg: 661 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file); 662 /* 663 * The above returns on success the # of functions enabled, 664 * but if it didn't find any functions it returns zero. 665 * Consider no functions a failure too. 666 */ 667 if (!ret) { 668 ret = -ENOENT; 669 goto out_free; 670 } else if (ret < 0) 671 goto out_free; 672 ret = 0; 673 out: 674 return ret; 675 676 out_free: 677 if (cmd_ops->set_filter) 678 cmd_ops->set_filter(NULL, trigger_data, NULL); 679 kfree(trigger_data); 680 goto out; 681 } 682 683 /** 684 * set_trigger_filter - Generic event_command @set_filter implementation 685 * @filter_str: The filter string for the trigger, NULL to remove filter 686 * @trigger_data: Trigger-specific data 687 * @file: The trace_event_file associated with the event 688 * 689 * Common implementation for event command filter parsing and filter 690 * instantiation. 691 * 692 * Usually used directly as the @set_filter method in event command 693 * implementations. 694 * 695 * Also used to remove a filter (if filter_str = NULL). 696 * 697 * Return: 0 on success, errno otherwise 698 */ 699 static int set_trigger_filter(char *filter_str, 700 struct event_trigger_data *trigger_data, 701 struct trace_event_file *file) 702 { 703 struct event_trigger_data *data = trigger_data; 704 struct event_filter *filter = NULL, *tmp; 705 int ret = -EINVAL; 706 char *s; 707 708 if (!filter_str) /* clear the current filter */ 709 goto assign; 710 711 s = strsep(&filter_str, " \t"); 712 713 if (!strlen(s) || strcmp(s, "if") != 0) 714 goto out; 715 716 if (!filter_str) 717 goto out; 718 719 /* The filter is for the 'trigger' event, not the triggered event */ 720 ret = create_event_filter(file->event_call, filter_str, false, &filter); 721 if (ret) 722 goto out; 723 assign: 724 tmp = rcu_access_pointer(data->filter); 725 726 rcu_assign_pointer(data->filter, filter); 727 728 if (tmp) { 729 /* Make sure the call is done with the filter */ 730 synchronize_sched(); 731 free_event_filter(tmp); 732 } 733 734 kfree(data->filter_str); 735 data->filter_str = NULL; 736 737 if (filter_str) { 738 data->filter_str = kstrdup(filter_str, GFP_KERNEL); 739 if (!data->filter_str) { 740 free_event_filter(rcu_access_pointer(data->filter)); 741 data->filter = NULL; 742 ret = -ENOMEM; 743 } 744 } 745 out: 746 return ret; 747 } 748 749 static void 750 traceon_trigger(struct event_trigger_data *data) 751 { 752 if (tracing_is_on()) 753 return; 754 755 tracing_on(); 756 } 757 758 static void 759 traceon_count_trigger(struct event_trigger_data *data) 760 { 761 if (tracing_is_on()) 762 return; 763 764 if (!data->count) 765 return; 766 767 if (data->count != -1) 768 (data->count)--; 769 770 tracing_on(); 771 } 772 773 static void 774 traceoff_trigger(struct event_trigger_data *data) 775 { 776 if (!tracing_is_on()) 777 return; 778 779 tracing_off(); 780 } 781 782 static void 783 traceoff_count_trigger(struct event_trigger_data *data) 784 { 785 if (!tracing_is_on()) 786 return; 787 788 if (!data->count) 789 return; 790 791 if (data->count != -1) 792 (data->count)--; 793 794 tracing_off(); 795 } 796 797 static int 798 traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, 799 struct event_trigger_data *data) 800 { 801 return event_trigger_print("traceon", m, (void *)data->count, 802 data->filter_str); 803 } 804 805 static int 806 traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, 807 struct event_trigger_data *data) 808 { 809 return event_trigger_print("traceoff", m, (void *)data->count, 810 data->filter_str); 811 } 812 813 static struct event_trigger_ops traceon_trigger_ops = { 814 .func = traceon_trigger, 815 .print = traceon_trigger_print, 816 .init = event_trigger_init, 817 .free = event_trigger_free, 818 }; 819 820 static struct event_trigger_ops traceon_count_trigger_ops = { 821 .func = traceon_count_trigger, 822 .print = traceon_trigger_print, 823 .init = event_trigger_init, 824 .free = event_trigger_free, 825 }; 826 827 static struct event_trigger_ops traceoff_trigger_ops = { 828 .func = traceoff_trigger, 829 .print = traceoff_trigger_print, 830 .init = event_trigger_init, 831 .free = event_trigger_free, 832 }; 833 834 static struct event_trigger_ops traceoff_count_trigger_ops = { 835 .func = traceoff_count_trigger, 836 .print = traceoff_trigger_print, 837 .init = event_trigger_init, 838 .free = event_trigger_free, 839 }; 840 841 static struct event_trigger_ops * 842 onoff_get_trigger_ops(char *cmd, char *param) 843 { 844 struct event_trigger_ops *ops; 845 846 /* we register both traceon and traceoff to this callback */ 847 if (strcmp(cmd, "traceon") == 0) 848 ops = param ? &traceon_count_trigger_ops : 849 &traceon_trigger_ops; 850 else 851 ops = param ? &traceoff_count_trigger_ops : 852 &traceoff_trigger_ops; 853 854 return ops; 855 } 856 857 static struct event_command trigger_traceon_cmd = { 858 .name = "traceon", 859 .trigger_type = ETT_TRACE_ONOFF, 860 .func = event_trigger_callback, 861 .reg = register_trigger, 862 .unreg = unregister_trigger, 863 .get_trigger_ops = onoff_get_trigger_ops, 864 .set_filter = set_trigger_filter, 865 }; 866 867 static struct event_command trigger_traceoff_cmd = { 868 .name = "traceoff", 869 .trigger_type = ETT_TRACE_ONOFF, 870 .func = event_trigger_callback, 871 .reg = register_trigger, 872 .unreg = unregister_trigger, 873 .get_trigger_ops = onoff_get_trigger_ops, 874 .set_filter = set_trigger_filter, 875 }; 876 877 #ifdef CONFIG_TRACER_SNAPSHOT 878 static void 879 snapshot_trigger(struct event_trigger_data *data) 880 { 881 tracing_snapshot(); 882 } 883 884 static void 885 snapshot_count_trigger(struct event_trigger_data *data) 886 { 887 if (!data->count) 888 return; 889 890 if (data->count != -1) 891 (data->count)--; 892 893 snapshot_trigger(data); 894 } 895 896 static int 897 register_snapshot_trigger(char *glob, struct event_trigger_ops *ops, 898 struct event_trigger_data *data, 899 struct trace_event_file *file) 900 { 901 int ret = register_trigger(glob, ops, data, file); 902 903 if (ret > 0 && tracing_alloc_snapshot() != 0) { 904 unregister_trigger(glob, ops, data, file); 905 ret = 0; 906 } 907 908 return ret; 909 } 910 911 static int 912 snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, 913 struct event_trigger_data *data) 914 { 915 return event_trigger_print("snapshot", m, (void *)data->count, 916 data->filter_str); 917 } 918 919 static struct event_trigger_ops snapshot_trigger_ops = { 920 .func = snapshot_trigger, 921 .print = snapshot_trigger_print, 922 .init = event_trigger_init, 923 .free = event_trigger_free, 924 }; 925 926 static struct event_trigger_ops snapshot_count_trigger_ops = { 927 .func = snapshot_count_trigger, 928 .print = snapshot_trigger_print, 929 .init = event_trigger_init, 930 .free = event_trigger_free, 931 }; 932 933 static struct event_trigger_ops * 934 snapshot_get_trigger_ops(char *cmd, char *param) 935 { 936 return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops; 937 } 938 939 static struct event_command trigger_snapshot_cmd = { 940 .name = "snapshot", 941 .trigger_type = ETT_SNAPSHOT, 942 .func = event_trigger_callback, 943 .reg = register_snapshot_trigger, 944 .unreg = unregister_trigger, 945 .get_trigger_ops = snapshot_get_trigger_ops, 946 .set_filter = set_trigger_filter, 947 }; 948 949 static __init int register_trigger_snapshot_cmd(void) 950 { 951 int ret; 952 953 ret = register_event_command(&trigger_snapshot_cmd); 954 WARN_ON(ret < 0); 955 956 return ret; 957 } 958 #else 959 static __init int register_trigger_snapshot_cmd(void) { return 0; } 960 #endif /* CONFIG_TRACER_SNAPSHOT */ 961 962 #ifdef CONFIG_STACKTRACE 963 /* 964 * Skip 3: 965 * stacktrace_trigger() 966 * event_triggers_post_call() 967 * trace_event_raw_event_xxx() 968 */ 969 #define STACK_SKIP 3 970 971 static void 972 stacktrace_trigger(struct event_trigger_data *data) 973 { 974 trace_dump_stack(STACK_SKIP); 975 } 976 977 static void 978 stacktrace_count_trigger(struct event_trigger_data *data) 979 { 980 if (!data->count) 981 return; 982 983 if (data->count != -1) 984 (data->count)--; 985 986 stacktrace_trigger(data); 987 } 988 989 static int 990 stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, 991 struct event_trigger_data *data) 992 { 993 return event_trigger_print("stacktrace", m, (void *)data->count, 994 data->filter_str); 995 } 996 997 static struct event_trigger_ops stacktrace_trigger_ops = { 998 .func = stacktrace_trigger, 999 .print = stacktrace_trigger_print, 1000 .init = event_trigger_init, 1001 .free = event_trigger_free, 1002 }; 1003 1004 static struct event_trigger_ops stacktrace_count_trigger_ops = { 1005 .func = stacktrace_count_trigger, 1006 .print = stacktrace_trigger_print, 1007 .init = event_trigger_init, 1008 .free = event_trigger_free, 1009 }; 1010 1011 static struct event_trigger_ops * 1012 stacktrace_get_trigger_ops(char *cmd, char *param) 1013 { 1014 return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops; 1015 } 1016 1017 static struct event_command trigger_stacktrace_cmd = { 1018 .name = "stacktrace", 1019 .trigger_type = ETT_STACKTRACE, 1020 .post_trigger = true, 1021 .func = event_trigger_callback, 1022 .reg = register_trigger, 1023 .unreg = unregister_trigger, 1024 .get_trigger_ops = stacktrace_get_trigger_ops, 1025 .set_filter = set_trigger_filter, 1026 }; 1027 1028 static __init int register_trigger_stacktrace_cmd(void) 1029 { 1030 int ret; 1031 1032 ret = register_event_command(&trigger_stacktrace_cmd); 1033 WARN_ON(ret < 0); 1034 1035 return ret; 1036 } 1037 #else 1038 static __init int register_trigger_stacktrace_cmd(void) { return 0; } 1039 #endif /* CONFIG_STACKTRACE */ 1040 1041 static __init void unregister_trigger_traceon_traceoff_cmds(void) 1042 { 1043 unregister_event_command(&trigger_traceon_cmd); 1044 unregister_event_command(&trigger_traceoff_cmd); 1045 } 1046 1047 /* Avoid typos */ 1048 #define ENABLE_EVENT_STR "enable_event" 1049 #define DISABLE_EVENT_STR "disable_event" 1050 1051 struct enable_trigger_data { 1052 struct trace_event_file *file; 1053 bool enable; 1054 }; 1055 1056 static void 1057 event_enable_trigger(struct event_trigger_data *data) 1058 { 1059 struct enable_trigger_data *enable_data = data->private_data; 1060 1061 if (enable_data->enable) 1062 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags); 1063 else 1064 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags); 1065 } 1066 1067 static void 1068 event_enable_count_trigger(struct event_trigger_data *data) 1069 { 1070 struct enable_trigger_data *enable_data = data->private_data; 1071 1072 if (!data->count) 1073 return; 1074 1075 /* Skip if the event is in a state we want to switch to */ 1076 if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED)) 1077 return; 1078 1079 if (data->count != -1) 1080 (data->count)--; 1081 1082 event_enable_trigger(data); 1083 } 1084 1085 static int 1086 event_enable_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, 1087 struct event_trigger_data *data) 1088 { 1089 struct enable_trigger_data *enable_data = data->private_data; 1090 1091 seq_printf(m, "%s:%s:%s", 1092 enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, 1093 enable_data->file->event_call->class->system, 1094 trace_event_name(enable_data->file->event_call)); 1095 1096 if (data->count == -1) 1097 seq_puts(m, ":unlimited"); 1098 else 1099 seq_printf(m, ":count=%ld", data->count); 1100 1101 if (data->filter_str) 1102 seq_printf(m, " if %s\n", data->filter_str); 1103 else 1104 seq_putc(m, '\n'); 1105 1106 return 0; 1107 } 1108 1109 static void 1110 event_enable_trigger_free(struct event_trigger_ops *ops, 1111 struct event_trigger_data *data) 1112 { 1113 struct enable_trigger_data *enable_data = data->private_data; 1114 1115 if (WARN_ON_ONCE(data->ref <= 0)) 1116 return; 1117 1118 data->ref--; 1119 if (!data->ref) { 1120 /* Remove the SOFT_MODE flag */ 1121 trace_event_enable_disable(enable_data->file, 0, 1); 1122 module_put(enable_data->file->event_call->mod); 1123 trigger_data_free(data); 1124 kfree(enable_data); 1125 } 1126 } 1127 1128 static struct event_trigger_ops event_enable_trigger_ops = { 1129 .func = event_enable_trigger, 1130 .print = event_enable_trigger_print, 1131 .init = event_trigger_init, 1132 .free = event_enable_trigger_free, 1133 }; 1134 1135 static struct event_trigger_ops event_enable_count_trigger_ops = { 1136 .func = event_enable_count_trigger, 1137 .print = event_enable_trigger_print, 1138 .init = event_trigger_init, 1139 .free = event_enable_trigger_free, 1140 }; 1141 1142 static struct event_trigger_ops event_disable_trigger_ops = { 1143 .func = event_enable_trigger, 1144 .print = event_enable_trigger_print, 1145 .init = event_trigger_init, 1146 .free = event_enable_trigger_free, 1147 }; 1148 1149 static struct event_trigger_ops event_disable_count_trigger_ops = { 1150 .func = event_enable_count_trigger, 1151 .print = event_enable_trigger_print, 1152 .init = event_trigger_init, 1153 .free = event_enable_trigger_free, 1154 }; 1155 1156 static int 1157 event_enable_trigger_func(struct event_command *cmd_ops, 1158 struct trace_event_file *file, 1159 char *glob, char *cmd, char *param) 1160 { 1161 struct trace_event_file *event_enable_file; 1162 struct enable_trigger_data *enable_data; 1163 struct event_trigger_data *trigger_data; 1164 struct event_trigger_ops *trigger_ops; 1165 struct trace_array *tr = file->tr; 1166 const char *system; 1167 const char *event; 1168 char *trigger; 1169 char *number; 1170 bool enable; 1171 int ret; 1172 1173 if (!param) 1174 return -EINVAL; 1175 1176 /* separate the trigger from the filter (s:e:n [if filter]) */ 1177 trigger = strsep(¶m, " \t"); 1178 if (!trigger) 1179 return -EINVAL; 1180 1181 system = strsep(&trigger, ":"); 1182 if (!trigger) 1183 return -EINVAL; 1184 1185 event = strsep(&trigger, ":"); 1186 1187 ret = -EINVAL; 1188 event_enable_file = find_event_file(tr, system, event); 1189 if (!event_enable_file) 1190 goto out; 1191 1192 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0; 1193 1194 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger); 1195 1196 ret = -ENOMEM; 1197 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL); 1198 if (!trigger_data) 1199 goto out; 1200 1201 enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL); 1202 if (!enable_data) { 1203 kfree(trigger_data); 1204 goto out; 1205 } 1206 1207 trigger_data->count = -1; 1208 trigger_data->ops = trigger_ops; 1209 trigger_data->cmd_ops = cmd_ops; 1210 INIT_LIST_HEAD(&trigger_data->list); 1211 RCU_INIT_POINTER(trigger_data->filter, NULL); 1212 1213 enable_data->enable = enable; 1214 enable_data->file = event_enable_file; 1215 trigger_data->private_data = enable_data; 1216 1217 if (glob[0] == '!') { 1218 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file); 1219 kfree(trigger_data); 1220 kfree(enable_data); 1221 ret = 0; 1222 goto out; 1223 } 1224 1225 if (trigger) { 1226 number = strsep(&trigger, ":"); 1227 1228 ret = -EINVAL; 1229 if (!strlen(number)) 1230 goto out_free; 1231 1232 /* 1233 * We use the callback data field (which is a pointer) 1234 * as our counter. 1235 */ 1236 ret = kstrtoul(number, 0, &trigger_data->count); 1237 if (ret) 1238 goto out_free; 1239 } 1240 1241 if (!param) /* if param is non-empty, it's supposed to be a filter */ 1242 goto out_reg; 1243 1244 if (!cmd_ops->set_filter) 1245 goto out_reg; 1246 1247 ret = cmd_ops->set_filter(param, trigger_data, file); 1248 if (ret < 0) 1249 goto out_free; 1250 1251 out_reg: 1252 /* Don't let event modules unload while probe registered */ 1253 ret = try_module_get(event_enable_file->event_call->mod); 1254 if (!ret) { 1255 ret = -EBUSY; 1256 goto out_free; 1257 } 1258 1259 ret = trace_event_enable_disable(event_enable_file, 1, 1); 1260 if (ret < 0) 1261 goto out_put; 1262 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file); 1263 /* 1264 * The above returns on success the # of functions enabled, 1265 * but if it didn't find any functions it returns zero. 1266 * Consider no functions a failure too. 1267 */ 1268 if (!ret) { 1269 ret = -ENOENT; 1270 goto out_disable; 1271 } else if (ret < 0) 1272 goto out_disable; 1273 /* Just return zero, not the number of enabled functions */ 1274 ret = 0; 1275 out: 1276 return ret; 1277 1278 out_disable: 1279 trace_event_enable_disable(event_enable_file, 0, 1); 1280 out_put: 1281 module_put(event_enable_file->event_call->mod); 1282 out_free: 1283 if (cmd_ops->set_filter) 1284 cmd_ops->set_filter(NULL, trigger_data, NULL); 1285 kfree(trigger_data); 1286 kfree(enable_data); 1287 goto out; 1288 } 1289 1290 static int event_enable_register_trigger(char *glob, 1291 struct event_trigger_ops *ops, 1292 struct event_trigger_data *data, 1293 struct trace_event_file *file) 1294 { 1295 struct enable_trigger_data *enable_data = data->private_data; 1296 struct enable_trigger_data *test_enable_data; 1297 struct event_trigger_data *test; 1298 int ret = 0; 1299 1300 list_for_each_entry_rcu(test, &file->triggers, list) { 1301 test_enable_data = test->private_data; 1302 if (test_enable_data && 1303 (test_enable_data->file == enable_data->file)) { 1304 ret = -EEXIST; 1305 goto out; 1306 } 1307 } 1308 1309 if (data->ops->init) { 1310 ret = data->ops->init(data->ops, data); 1311 if (ret < 0) 1312 goto out; 1313 } 1314 1315 list_add_rcu(&data->list, &file->triggers); 1316 ret++; 1317 1318 update_cond_flag(file); 1319 if (trace_event_trigger_enable_disable(file, 1) < 0) { 1320 list_del_rcu(&data->list); 1321 update_cond_flag(file); 1322 ret--; 1323 } 1324 out: 1325 return ret; 1326 } 1327 1328 static void event_enable_unregister_trigger(char *glob, 1329 struct event_trigger_ops *ops, 1330 struct event_trigger_data *test, 1331 struct trace_event_file *file) 1332 { 1333 struct enable_trigger_data *test_enable_data = test->private_data; 1334 struct enable_trigger_data *enable_data; 1335 struct event_trigger_data *data; 1336 bool unregistered = false; 1337 1338 list_for_each_entry_rcu(data, &file->triggers, list) { 1339 enable_data = data->private_data; 1340 if (enable_data && 1341 (enable_data->file == test_enable_data->file)) { 1342 unregistered = true; 1343 list_del_rcu(&data->list); 1344 trace_event_trigger_enable_disable(file, 0); 1345 update_cond_flag(file); 1346 break; 1347 } 1348 } 1349 1350 if (unregistered && data->ops->free) 1351 data->ops->free(data->ops, data); 1352 } 1353 1354 static struct event_trigger_ops * 1355 event_enable_get_trigger_ops(char *cmd, char *param) 1356 { 1357 struct event_trigger_ops *ops; 1358 bool enable; 1359 1360 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0; 1361 1362 if (enable) 1363 ops = param ? &event_enable_count_trigger_ops : 1364 &event_enable_trigger_ops; 1365 else 1366 ops = param ? &event_disable_count_trigger_ops : 1367 &event_disable_trigger_ops; 1368 1369 return ops; 1370 } 1371 1372 static struct event_command trigger_enable_cmd = { 1373 .name = ENABLE_EVENT_STR, 1374 .trigger_type = ETT_EVENT_ENABLE, 1375 .func = event_enable_trigger_func, 1376 .reg = event_enable_register_trigger, 1377 .unreg = event_enable_unregister_trigger, 1378 .get_trigger_ops = event_enable_get_trigger_ops, 1379 .set_filter = set_trigger_filter, 1380 }; 1381 1382 static struct event_command trigger_disable_cmd = { 1383 .name = DISABLE_EVENT_STR, 1384 .trigger_type = ETT_EVENT_ENABLE, 1385 .func = event_enable_trigger_func, 1386 .reg = event_enable_register_trigger, 1387 .unreg = event_enable_unregister_trigger, 1388 .get_trigger_ops = event_enable_get_trigger_ops, 1389 .set_filter = set_trigger_filter, 1390 }; 1391 1392 static __init void unregister_trigger_enable_disable_cmds(void) 1393 { 1394 unregister_event_command(&trigger_enable_cmd); 1395 unregister_event_command(&trigger_disable_cmd); 1396 } 1397 1398 static __init int register_trigger_enable_disable_cmds(void) 1399 { 1400 int ret; 1401 1402 ret = register_event_command(&trigger_enable_cmd); 1403 if (WARN_ON(ret < 0)) 1404 return ret; 1405 ret = register_event_command(&trigger_disable_cmd); 1406 if (WARN_ON(ret < 0)) 1407 unregister_trigger_enable_disable_cmds(); 1408 1409 return ret; 1410 } 1411 1412 static __init int register_trigger_traceon_traceoff_cmds(void) 1413 { 1414 int ret; 1415 1416 ret = register_event_command(&trigger_traceon_cmd); 1417 if (WARN_ON(ret < 0)) 1418 return ret; 1419 ret = register_event_command(&trigger_traceoff_cmd); 1420 if (WARN_ON(ret < 0)) 1421 unregister_trigger_traceon_traceoff_cmds(); 1422 1423 return ret; 1424 } 1425 1426 __init int register_trigger_cmds(void) 1427 { 1428 register_trigger_traceon_traceoff_cmds(); 1429 register_trigger_snapshot_cmd(); 1430 register_trigger_stacktrace_cmd(); 1431 register_trigger_enable_disable_cmds(); 1432 1433 return 0; 1434 } 1435