1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * event probes 4 * 5 * Part of this code was copied from kernel/trace/trace_kprobe.c written by 6 * Masami Hiramatsu <mhiramat@kernel.org> 7 * 8 * Copyright (C) 2021, VMware Inc, Steven Rostedt <rostedt@goodmis.org> 9 * Copyright (C) 2021, VMware Inc, Tzvetomir Stoyanov tz.stoyanov@gmail.com> 10 * 11 */ 12 #include <linux/cleanup.h> 13 #include <linux/ftrace.h> 14 #include <linux/module.h> 15 #include <linux/mutex.h> 16 17 #include "trace_dynevent.h" 18 #include "trace_probe.h" 19 #include "trace_probe_kernel.h" 20 #include "trace_probe_tmpl.h" 21 22 #define EPROBE_EVENT_SYSTEM "eprobes" 23 24 struct trace_eprobe { 25 /* tracepoint system */ 26 const char *event_system; 27 28 /* tracepoint event */ 29 const char *event_name; 30 31 /* filter string for the tracepoint */ 32 char *filter_str; 33 34 struct trace_event_call *event; 35 36 struct dyn_event devent; 37 struct trace_probe tp; 38 }; 39 40 struct eprobe_data { 41 struct trace_event_file *file; 42 struct trace_eprobe *ep; 43 }; 44 45 46 #define for_each_trace_eprobe_tp(ep, _tp) \ 47 list_for_each_entry(ep, trace_probe_probe_list(_tp), tp.list) 48 49 static int __trace_eprobe_create(int argc, const char *argv[]); 50 51 static void trace_event_probe_cleanup(struct trace_eprobe *ep) 52 { 53 if (!ep) 54 return; 55 trace_probe_cleanup(&ep->tp); 56 kfree(ep->event_name); 57 kfree(ep->event_system); 58 if (ep->event) 59 trace_event_put_ref(ep->event); 60 kfree(ep->filter_str); 61 kfree(ep); 62 } 63 64 DEFINE_FREE(trace_event_probe_cleanup, struct trace_eprobe *, 65 if (!IS_ERR_OR_NULL(_T)) trace_event_probe_cleanup(_T)) 66 67 static struct trace_eprobe *to_trace_eprobe(struct dyn_event *ev) 68 { 69 return container_of(ev, struct trace_eprobe, devent); 70 } 71 72 static int eprobe_dyn_event_create(const char *raw_command) 73 { 74 return trace_probe_create(raw_command, __trace_eprobe_create); 75 } 76 77 static int eprobe_dyn_event_show(struct seq_file *m, struct dyn_event *ev) 78 { 79 struct trace_eprobe *ep = to_trace_eprobe(ev); 80 int i; 81 82 seq_printf(m, "e:%s/%s", trace_probe_group_name(&ep->tp), 83 trace_probe_name(&ep->tp)); 84 seq_printf(m, " %s.%s", ep->event_system, ep->event_name); 85 86 for (i = 0; i < ep->tp.nr_args; i++) 87 seq_printf(m, " %s=%s", ep->tp.args[i].name, ep->tp.args[i].comm); 88 seq_putc(m, '\n'); 89 90 return 0; 91 } 92 93 static int unregister_trace_eprobe(struct trace_eprobe *ep) 94 { 95 /* If other probes are on the event, just unregister eprobe */ 96 if (trace_probe_has_sibling(&ep->tp)) 97 goto unreg; 98 99 /* Enabled event can not be unregistered */ 100 if (trace_probe_is_enabled(&ep->tp)) 101 return -EBUSY; 102 103 /* Will fail if probe is being used by ftrace or perf */ 104 if (trace_probe_unregister_event_call(&ep->tp)) 105 return -EBUSY; 106 107 unreg: 108 dyn_event_remove(&ep->devent); 109 trace_probe_unlink(&ep->tp); 110 111 return 0; 112 } 113 114 static int eprobe_dyn_event_release(struct dyn_event *ev) 115 { 116 struct trace_eprobe *ep = to_trace_eprobe(ev); 117 int ret = unregister_trace_eprobe(ep); 118 119 if (!ret) 120 trace_event_probe_cleanup(ep); 121 return ret; 122 } 123 124 static bool eprobe_dyn_event_is_busy(struct dyn_event *ev) 125 { 126 struct trace_eprobe *ep = to_trace_eprobe(ev); 127 128 return trace_probe_is_enabled(&ep->tp); 129 } 130 131 static bool eprobe_dyn_event_match(const char *system, const char *event, 132 int argc, const char **argv, struct dyn_event *ev) 133 { 134 struct trace_eprobe *ep = to_trace_eprobe(ev); 135 const char *slash; 136 137 /* 138 * We match the following: 139 * event only - match all eprobes with event name 140 * system and event only - match all system/event probes 141 * system only - match all system probes 142 * 143 * The below has the above satisfied with more arguments: 144 * 145 * attached system/event - If the arg has the system and event 146 * the probe is attached to, match 147 * probes with the attachment. 148 * 149 * If any more args are given, then it requires a full match. 150 */ 151 152 /* 153 * If system exists, but this probe is not part of that system 154 * do not match. 155 */ 156 if (system && strcmp(trace_probe_group_name(&ep->tp), system) != 0) 157 return false; 158 159 /* Must match the event name */ 160 if (event[0] != '\0' && strcmp(trace_probe_name(&ep->tp), event) != 0) 161 return false; 162 163 /* No arguments match all */ 164 if (argc < 1) 165 return true; 166 167 /* First argument is the system/event the probe is attached to */ 168 169 slash = strchr(argv[0], '/'); 170 if (!slash) 171 slash = strchr(argv[0], '.'); 172 if (!slash) 173 return false; 174 175 if (strncmp(ep->event_system, argv[0], slash - argv[0])) 176 return false; 177 if (strcmp(ep->event_name, slash + 1)) 178 return false; 179 180 argc--; 181 argv++; 182 183 /* If there are no other args, then match */ 184 if (argc < 1) 185 return true; 186 187 return trace_probe_match_command_args(&ep->tp, argc, argv); 188 } 189 190 static struct dyn_event_operations eprobe_dyn_event_ops = { 191 .create = eprobe_dyn_event_create, 192 .show = eprobe_dyn_event_show, 193 .is_busy = eprobe_dyn_event_is_busy, 194 .free = eprobe_dyn_event_release, 195 .match = eprobe_dyn_event_match, 196 }; 197 198 static struct trace_eprobe *alloc_event_probe(const char *group, 199 const char *this_event, 200 struct trace_event_call *event, 201 int nargs) 202 { 203 struct trace_eprobe *ep __free(trace_event_probe_cleanup) = NULL; 204 const char *event_name; 205 const char *sys_name; 206 int ret; 207 208 if (!event) 209 return ERR_PTR(-ENODEV); 210 211 sys_name = event->class->system; 212 event_name = trace_event_name(event); 213 214 ep = kzalloc(struct_size(ep, tp.args, nargs), GFP_KERNEL); 215 if (!ep) { 216 trace_event_put_ref(event); 217 return ERR_PTR(-ENOMEM); 218 } 219 ep->event = event; 220 ep->event_name = kstrdup(event_name, GFP_KERNEL); 221 if (!ep->event_name) 222 return ERR_PTR(-ENOMEM); 223 ep->event_system = kstrdup(sys_name, GFP_KERNEL); 224 if (!ep->event_system) 225 return ERR_PTR(-ENOMEM); 226 227 ret = trace_probe_init(&ep->tp, this_event, group, false, nargs); 228 if (ret < 0) 229 return ERR_PTR(ret); 230 231 dyn_event_init(&ep->devent, &eprobe_dyn_event_ops); 232 return_ptr(ep); 233 } 234 235 static int eprobe_event_define_fields(struct trace_event_call *event_call) 236 { 237 struct eprobe_trace_entry_head field; 238 struct trace_probe *tp; 239 240 tp = trace_probe_primary_from_call(event_call); 241 if (WARN_ON_ONCE(!tp)) 242 return -ENOENT; 243 244 return traceprobe_define_arg_fields(event_call, sizeof(field), tp); 245 } 246 247 static struct trace_event_fields eprobe_fields_array[] = { 248 { .type = TRACE_FUNCTION_TYPE, 249 .define_fields = eprobe_event_define_fields }, 250 {} 251 }; 252 253 /* Event entry printers */ 254 static enum print_line_t 255 print_eprobe_event(struct trace_iterator *iter, int flags, 256 struct trace_event *event) 257 { 258 struct eprobe_trace_entry_head *field; 259 struct trace_event_call *pevent; 260 struct trace_event *probed_event; 261 struct trace_seq *s = &iter->seq; 262 struct trace_eprobe *ep; 263 struct trace_probe *tp; 264 unsigned int type; 265 266 field = (struct eprobe_trace_entry_head *)iter->ent; 267 tp = trace_probe_primary_from_call( 268 container_of(event, struct trace_event_call, event)); 269 if (WARN_ON_ONCE(!tp)) 270 goto out; 271 272 ep = container_of(tp, struct trace_eprobe, tp); 273 type = ep->event->event.type; 274 275 trace_seq_printf(s, "%s: (", trace_probe_name(tp)); 276 277 probed_event = ftrace_find_event(type); 278 if (probed_event) { 279 pevent = container_of(probed_event, struct trace_event_call, event); 280 trace_seq_printf(s, "%s.%s", pevent->class->system, 281 trace_event_name(pevent)); 282 } else { 283 trace_seq_printf(s, "%u", type); 284 } 285 286 trace_seq_putc(s, ')'); 287 288 if (trace_probe_print_args(s, tp->args, tp->nr_args, 289 (u8 *)&field[1], field) < 0) 290 goto out; 291 292 trace_seq_putc(s, '\n'); 293 out: 294 return trace_handle_return(s); 295 } 296 297 static nokprobe_inline unsigned long 298 get_event_field(struct fetch_insn *code, void *rec) 299 { 300 struct ftrace_event_field *field = code->data; 301 unsigned long val; 302 void *addr; 303 304 addr = rec + field->offset; 305 306 if (is_string_field(field)) { 307 switch (field->filter_type) { 308 case FILTER_DYN_STRING: 309 val = (unsigned long)(rec + (*(unsigned int *)addr & 0xffff)); 310 break; 311 case FILTER_RDYN_STRING: 312 val = (unsigned long)(addr + (*(unsigned int *)addr & 0xffff)); 313 break; 314 case FILTER_STATIC_STRING: 315 val = (unsigned long)addr; 316 break; 317 case FILTER_PTR_STRING: 318 val = (unsigned long)(*(char *)addr); 319 break; 320 default: 321 WARN_ON_ONCE(1); 322 return 0; 323 } 324 return val; 325 } 326 327 switch (field->size) { 328 case 1: 329 if (field->is_signed) 330 val = *(char *)addr; 331 else 332 val = *(unsigned char *)addr; 333 break; 334 case 2: 335 if (field->is_signed) 336 val = *(short *)addr; 337 else 338 val = *(unsigned short *)addr; 339 break; 340 case 4: 341 if (field->is_signed) 342 val = *(int *)addr; 343 else 344 val = *(unsigned int *)addr; 345 break; 346 default: 347 if (field->size == sizeof(long)) { 348 if (field->is_signed) 349 val = *(long *)addr; 350 else 351 val = *(unsigned long *)addr; 352 break; 353 } 354 /* This is an array, point to the addr itself */ 355 val = (unsigned long)addr; 356 break; 357 } 358 return val; 359 } 360 361 static int get_eprobe_size(struct trace_probe *tp, void *rec) 362 { 363 struct fetch_insn *code; 364 struct probe_arg *arg; 365 int i, len, ret = 0; 366 367 for (i = 0; i < tp->nr_args; i++) { 368 arg = tp->args + i; 369 if (arg->dynamic) { 370 unsigned long val; 371 372 code = arg->code; 373 retry: 374 switch (code->op) { 375 case FETCH_OP_TP_ARG: 376 val = get_event_field(code, rec); 377 break; 378 case FETCH_NOP_SYMBOL: /* Ignore a place holder */ 379 code++; 380 goto retry; 381 default: 382 if (process_common_fetch_insn(code, &val) < 0) 383 continue; 384 } 385 code++; 386 len = process_fetch_insn_bottom(code, val, NULL, NULL); 387 if (len > 0) 388 ret += len; 389 } 390 } 391 392 return ret; 393 } 394 395 /* Kprobe specific fetch functions */ 396 397 /* Note that we don't verify it, since the code does not come from user space */ 398 static int 399 process_fetch_insn(struct fetch_insn *code, void *rec, void *edata, 400 void *dest, void *base) 401 { 402 unsigned long val; 403 int ret; 404 405 retry: 406 switch (code->op) { 407 case FETCH_OP_TP_ARG: 408 val = get_event_field(code, rec); 409 break; 410 case FETCH_NOP_SYMBOL: /* Ignore a place holder */ 411 code++; 412 goto retry; 413 default: 414 ret = process_common_fetch_insn(code, &val); 415 if (ret < 0) 416 return ret; 417 } 418 code++; 419 return process_fetch_insn_bottom(code, val, dest, base); 420 } 421 NOKPROBE_SYMBOL(process_fetch_insn) 422 423 /* eprobe handler */ 424 static inline void 425 __eprobe_trace_func(struct eprobe_data *edata, void *rec) 426 { 427 struct eprobe_trace_entry_head *entry; 428 struct trace_event_call *call = trace_probe_event_call(&edata->ep->tp); 429 struct trace_event_buffer fbuffer; 430 int dsize; 431 432 if (WARN_ON_ONCE(call != edata->file->event_call)) 433 return; 434 435 if (trace_trigger_soft_disabled(edata->file)) 436 return; 437 438 dsize = get_eprobe_size(&edata->ep->tp, rec); 439 440 entry = trace_event_buffer_reserve(&fbuffer, edata->file, 441 sizeof(*entry) + edata->ep->tp.size + dsize); 442 443 if (!entry) 444 return; 445 446 entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event); 447 store_trace_args(&entry[1], &edata->ep->tp, rec, NULL, sizeof(*entry), dsize); 448 449 trace_event_buffer_commit(&fbuffer); 450 } 451 452 /* 453 * The event probe implementation uses event triggers to get access to 454 * the event it is attached to, but is not an actual trigger. The below 455 * functions are just stubs to fulfill what is needed to use the trigger 456 * infrastructure. 457 */ 458 static int eprobe_trigger_init(struct event_trigger_data *data) 459 { 460 return 0; 461 } 462 463 static void eprobe_trigger_free(struct event_trigger_data *data) 464 { 465 466 } 467 468 static int eprobe_trigger_print(struct seq_file *m, 469 struct event_trigger_data *data) 470 { 471 /* Do not print eprobe event triggers */ 472 return 0; 473 } 474 475 static void eprobe_trigger_func(struct event_trigger_data *data, 476 struct trace_buffer *buffer, void *rec, 477 struct ring_buffer_event *rbe) 478 { 479 struct eprobe_data *edata = data->private_data; 480 481 if (unlikely(!rec)) 482 return; 483 484 __eprobe_trace_func(edata, rec); 485 } 486 487 static const struct event_trigger_ops eprobe_trigger_ops = { 488 .trigger = eprobe_trigger_func, 489 .print = eprobe_trigger_print, 490 .init = eprobe_trigger_init, 491 .free = eprobe_trigger_free, 492 }; 493 494 static int eprobe_trigger_cmd_parse(struct event_command *cmd_ops, 495 struct trace_event_file *file, 496 char *glob, char *cmd, 497 char *param_and_filter) 498 { 499 return -1; 500 } 501 502 static int eprobe_trigger_reg_func(char *glob, 503 struct event_trigger_data *data, 504 struct trace_event_file *file) 505 { 506 return -1; 507 } 508 509 static void eprobe_trigger_unreg_func(char *glob, 510 struct event_trigger_data *data, 511 struct trace_event_file *file) 512 { 513 514 } 515 516 static const struct event_trigger_ops *eprobe_trigger_get_ops(char *cmd, 517 char *param) 518 { 519 return &eprobe_trigger_ops; 520 } 521 522 static struct event_command event_trigger_cmd = { 523 .name = "eprobe", 524 .trigger_type = ETT_EVENT_EPROBE, 525 .flags = EVENT_CMD_FL_NEEDS_REC, 526 .parse = eprobe_trigger_cmd_parse, 527 .reg = eprobe_trigger_reg_func, 528 .unreg = eprobe_trigger_unreg_func, 529 .unreg_all = NULL, 530 .get_trigger_ops = eprobe_trigger_get_ops, 531 .set_filter = NULL, 532 }; 533 534 static struct event_trigger_data * 535 new_eprobe_trigger(struct trace_eprobe *ep, struct trace_event_file *file) 536 { 537 struct event_trigger_data *trigger; 538 struct event_filter *filter = NULL; 539 struct eprobe_data *edata; 540 int ret; 541 542 edata = kzalloc(sizeof(*edata), GFP_KERNEL); 543 trigger = kzalloc(sizeof(*trigger), GFP_KERNEL); 544 if (!trigger || !edata) { 545 ret = -ENOMEM; 546 goto error; 547 } 548 549 trigger->flags = EVENT_TRIGGER_FL_PROBE; 550 trigger->count = -1; 551 trigger->ops = &eprobe_trigger_ops; 552 553 /* 554 * EVENT PROBE triggers are not registered as commands with 555 * register_event_command(), as they are not controlled by the user 556 * from the trigger file 557 */ 558 trigger->cmd_ops = &event_trigger_cmd; 559 560 INIT_LIST_HEAD(&trigger->list); 561 562 if (ep->filter_str) { 563 ret = create_event_filter(file->tr, ep->event, 564 ep->filter_str, false, &filter); 565 if (ret) 566 goto error; 567 } 568 RCU_INIT_POINTER(trigger->filter, filter); 569 570 edata->file = file; 571 edata->ep = ep; 572 trigger->private_data = edata; 573 574 return trigger; 575 error: 576 free_event_filter(filter); 577 kfree(edata); 578 kfree(trigger); 579 return ERR_PTR(ret); 580 } 581 582 static int enable_eprobe(struct trace_eprobe *ep, 583 struct trace_event_file *eprobe_file) 584 { 585 struct event_trigger_data *trigger; 586 struct trace_event_file *file; 587 struct trace_array *tr = eprobe_file->tr; 588 589 file = find_event_file(tr, ep->event_system, ep->event_name); 590 if (!file) 591 return -ENOENT; 592 trigger = new_eprobe_trigger(ep, eprobe_file); 593 if (IS_ERR(trigger)) 594 return PTR_ERR(trigger); 595 596 list_add_tail_rcu(&trigger->list, &file->triggers); 597 598 trace_event_trigger_enable_disable(file, 1); 599 update_cond_flag(file); 600 601 return 0; 602 } 603 604 static struct trace_event_functions eprobe_funcs = { 605 .trace = print_eprobe_event 606 }; 607 608 static int disable_eprobe(struct trace_eprobe *ep, 609 struct trace_array *tr) 610 { 611 struct event_trigger_data *trigger = NULL, *iter; 612 struct trace_event_file *file; 613 struct event_filter *filter; 614 struct eprobe_data *edata; 615 616 file = find_event_file(tr, ep->event_system, ep->event_name); 617 if (!file) 618 return -ENOENT; 619 620 list_for_each_entry(iter, &file->triggers, list) { 621 if (!(iter->flags & EVENT_TRIGGER_FL_PROBE)) 622 continue; 623 edata = iter->private_data; 624 if (edata->ep == ep) { 625 trigger = iter; 626 break; 627 } 628 } 629 if (!trigger) 630 return -ENODEV; 631 632 list_del_rcu(&trigger->list); 633 634 trace_event_trigger_enable_disable(file, 0); 635 update_cond_flag(file); 636 637 /* Make sure nothing is using the edata or trigger */ 638 tracepoint_synchronize_unregister(); 639 640 filter = rcu_access_pointer(trigger->filter); 641 642 if (filter) 643 free_event_filter(filter); 644 kfree(edata); 645 kfree(trigger); 646 647 return 0; 648 } 649 650 static int enable_trace_eprobe(struct trace_event_call *call, 651 struct trace_event_file *file) 652 { 653 struct trace_probe *tp; 654 struct trace_eprobe *ep; 655 bool enabled; 656 int ret = 0; 657 int cnt = 0; 658 659 tp = trace_probe_primary_from_call(call); 660 if (WARN_ON_ONCE(!tp)) 661 return -ENODEV; 662 enabled = trace_probe_is_enabled(tp); 663 664 /* This also changes "enabled" state */ 665 if (file) { 666 ret = trace_probe_add_file(tp, file); 667 if (ret) 668 return ret; 669 } else 670 trace_probe_set_flag(tp, TP_FLAG_PROFILE); 671 672 if (enabled) 673 return 0; 674 675 for_each_trace_eprobe_tp(ep, tp) { 676 ret = enable_eprobe(ep, file); 677 if (ret) 678 break; 679 enabled = true; 680 cnt++; 681 } 682 683 if (ret) { 684 /* Failed to enable one of them. Roll back all */ 685 if (enabled) { 686 /* 687 * It's a bug if one failed for something other than memory 688 * not being available but another eprobe succeeded. 689 */ 690 WARN_ON_ONCE(ret != -ENOMEM); 691 692 for_each_trace_eprobe_tp(ep, tp) { 693 disable_eprobe(ep, file->tr); 694 if (!--cnt) 695 break; 696 } 697 } 698 if (file) 699 trace_probe_remove_file(tp, file); 700 else 701 trace_probe_clear_flag(tp, TP_FLAG_PROFILE); 702 } 703 704 return ret; 705 } 706 707 static int disable_trace_eprobe(struct trace_event_call *call, 708 struct trace_event_file *file) 709 { 710 struct trace_probe *tp; 711 struct trace_eprobe *ep; 712 713 tp = trace_probe_primary_from_call(call); 714 if (WARN_ON_ONCE(!tp)) 715 return -ENODEV; 716 717 if (file) { 718 if (!trace_probe_get_file_link(tp, file)) 719 return -ENOENT; 720 if (!trace_probe_has_single_file(tp)) 721 goto out; 722 trace_probe_clear_flag(tp, TP_FLAG_TRACE); 723 } else 724 trace_probe_clear_flag(tp, TP_FLAG_PROFILE); 725 726 if (!trace_probe_is_enabled(tp)) { 727 for_each_trace_eprobe_tp(ep, tp) 728 disable_eprobe(ep, file->tr); 729 } 730 731 out: 732 if (file) 733 /* 734 * Synchronization is done in below function. For perf event, 735 * file == NULL and perf_trace_event_unreg() calls 736 * tracepoint_synchronize_unregister() to ensure synchronize 737 * event. We don't need to care about it. 738 */ 739 trace_probe_remove_file(tp, file); 740 741 return 0; 742 } 743 744 static int eprobe_register(struct trace_event_call *event, 745 enum trace_reg type, void *data) 746 { 747 struct trace_event_file *file = data; 748 749 switch (type) { 750 case TRACE_REG_REGISTER: 751 return enable_trace_eprobe(event, file); 752 case TRACE_REG_UNREGISTER: 753 return disable_trace_eprobe(event, file); 754 #ifdef CONFIG_PERF_EVENTS 755 case TRACE_REG_PERF_REGISTER: 756 case TRACE_REG_PERF_UNREGISTER: 757 case TRACE_REG_PERF_OPEN: 758 case TRACE_REG_PERF_CLOSE: 759 case TRACE_REG_PERF_ADD: 760 case TRACE_REG_PERF_DEL: 761 return 0; 762 #endif 763 } 764 return 0; 765 } 766 767 static inline void init_trace_eprobe_call(struct trace_eprobe *ep) 768 { 769 struct trace_event_call *call = trace_probe_event_call(&ep->tp); 770 771 call->flags = TRACE_EVENT_FL_EPROBE; 772 call->event.funcs = &eprobe_funcs; 773 call->class->fields_array = eprobe_fields_array; 774 call->class->reg = eprobe_register; 775 } 776 777 static struct trace_event_call * 778 find_and_get_event(const char *system, const char *event_name) 779 { 780 struct trace_event_call *tp_event; 781 const char *name; 782 783 list_for_each_entry(tp_event, &ftrace_events, list) { 784 /* Skip other probes and ftrace events */ 785 if (tp_event->flags & 786 (TRACE_EVENT_FL_IGNORE_ENABLE | 787 TRACE_EVENT_FL_KPROBE | 788 TRACE_EVENT_FL_UPROBE | 789 TRACE_EVENT_FL_EPROBE)) 790 continue; 791 if (!tp_event->class->system || 792 strcmp(system, tp_event->class->system)) 793 continue; 794 name = trace_event_name(tp_event); 795 if (!name || strcmp(event_name, name)) 796 continue; 797 if (!trace_event_try_get_ref(tp_event)) 798 return NULL; 799 return tp_event; 800 } 801 return NULL; 802 } 803 804 static int trace_eprobe_parse_filter(struct trace_eprobe *ep, int argc, const char *argv[]) 805 { 806 struct event_filter *dummy = NULL; 807 int i, ret, len = 0; 808 char *p; 809 810 if (argc == 0) { 811 trace_probe_log_err(0, NO_EP_FILTER); 812 return -EINVAL; 813 } 814 815 /* Recover the filter string */ 816 for (i = 0; i < argc; i++) 817 len += strlen(argv[i]) + 1; 818 819 ep->filter_str = kzalloc(len, GFP_KERNEL); 820 if (!ep->filter_str) 821 return -ENOMEM; 822 823 p = ep->filter_str; 824 for (i = 0; i < argc; i++) { 825 if (i) 826 ret = snprintf(p, len, " %s", argv[i]); 827 else 828 ret = snprintf(p, len, "%s", argv[i]); 829 p += ret; 830 len -= ret; 831 } 832 833 /* 834 * Ensure the filter string can be parsed correctly. Note, this 835 * filter string is for the original event, not for the eprobe. 836 */ 837 ret = create_event_filter(top_trace_array(), ep->event, ep->filter_str, 838 true, &dummy); 839 free_event_filter(dummy); 840 if (ret) { 841 kfree(ep->filter_str); 842 ep->filter_str = NULL; 843 } 844 return ret; 845 } 846 847 static int __trace_eprobe_create(int argc, const char *argv[]) 848 { 849 /* 850 * Argument syntax: 851 * e[:[GRP/][ENAME]] SYSTEM.EVENT [FETCHARGS] [if FILTER] 852 * Fetch args (no space): 853 * <name>=$<field>[:TYPE] 854 */ 855 struct traceprobe_parse_context *ctx __free(traceprobe_parse_context) = NULL; 856 struct trace_eprobe *ep __free(trace_event_probe_cleanup) = NULL; 857 const char *trlog __free(trace_probe_log_clear) = NULL; 858 const char *event = NULL, *group = EPROBE_EVENT_SYSTEM; 859 const char *sys_event = NULL, *sys_name = NULL; 860 struct trace_event_call *event_call; 861 char *buf1 __free(kfree) = NULL; 862 char *buf2 __free(kfree) = NULL; 863 char *gbuf __free(kfree) = NULL; 864 int ret = 0, filter_idx = 0; 865 int i, filter_cnt; 866 867 if (argc < 2 || argv[0][0] != 'e') 868 return -ECANCELED; 869 870 trlog = trace_probe_log_init("event_probe", argc, argv); 871 872 event = strchr(&argv[0][1], ':'); 873 if (event) { 874 gbuf = kmalloc(MAX_EVENT_NAME_LEN, GFP_KERNEL); 875 if (!gbuf) 876 return -ENOMEM; 877 event++; 878 ret = traceprobe_parse_event_name(&event, &group, gbuf, 879 event - argv[0]); 880 if (ret) 881 return -EINVAL; 882 } 883 884 trace_probe_log_set_index(1); 885 sys_event = argv[1]; 886 887 buf2 = kmalloc(MAX_EVENT_NAME_LEN, GFP_KERNEL); 888 if (!buf2) 889 return -ENOMEM; 890 891 ret = traceprobe_parse_event_name(&sys_event, &sys_name, buf2, 0); 892 if (ret || !sys_event || !sys_name) { 893 trace_probe_log_err(0, NO_EVENT_INFO); 894 return -EINVAL; 895 } 896 897 if (!event) { 898 buf1 = kstrdup(sys_event, GFP_KERNEL); 899 if (!buf1) 900 return -ENOMEM; 901 event = buf1; 902 } 903 904 for (i = 2; i < argc; i++) { 905 if (!strcmp(argv[i], "if")) { 906 filter_idx = i + 1; 907 filter_cnt = argc - filter_idx; 908 argc = i; 909 break; 910 } 911 } 912 913 if (argc - 2 > MAX_TRACE_ARGS) { 914 trace_probe_log_set_index(2); 915 trace_probe_log_err(0, TOO_MANY_ARGS); 916 return -E2BIG; 917 } 918 919 scoped_guard(mutex, &event_mutex) { 920 event_call = find_and_get_event(sys_name, sys_event); 921 ep = alloc_event_probe(group, event, event_call, argc - 2); 922 } 923 924 if (IS_ERR(ep)) { 925 ret = PTR_ERR(ep); 926 if (ret == -ENODEV) 927 trace_probe_log_err(0, BAD_ATTACH_EVENT); 928 /* This must return -ENOMEM or missing event, else there is a bug */ 929 WARN_ON_ONCE(ret != -ENOMEM && ret != -ENODEV); 930 return ret; 931 } 932 933 if (filter_idx) { 934 trace_probe_log_set_index(filter_idx); 935 ret = trace_eprobe_parse_filter(ep, filter_cnt, argv + filter_idx); 936 if (ret) 937 return -EINVAL; 938 } else 939 ep->filter_str = NULL; 940 941 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 942 if (!ctx) 943 return -ENOMEM; 944 ctx->event = ep->event; 945 ctx->flags = TPARG_FL_KERNEL | TPARG_FL_TEVENT; 946 947 argc -= 2; argv += 2; 948 /* parse arguments */ 949 for (i = 0; i < argc; i++) { 950 trace_probe_log_set_index(i + 2); 951 952 ret = traceprobe_parse_probe_arg(&ep->tp, i, argv[i], ctx); 953 /* Handle symbols "@" */ 954 if (!ret) 955 ret = traceprobe_update_arg(&ep->tp.args[i]); 956 if (ret) 957 return ret; 958 } 959 ret = traceprobe_set_print_fmt(&ep->tp, PROBE_PRINT_EVENT); 960 if (ret < 0) 961 return ret; 962 963 init_trace_eprobe_call(ep); 964 scoped_guard(mutex, &event_mutex) { 965 ret = trace_probe_register_event_call(&ep->tp); 966 if (ret) { 967 if (ret == -EEXIST) { 968 trace_probe_log_set_index(0); 969 trace_probe_log_err(0, EVENT_EXIST); 970 } 971 return ret; 972 } 973 ret = dyn_event_add(&ep->devent, &ep->tp.event->call); 974 if (ret < 0) { 975 trace_probe_unregister_event_call(&ep->tp); 976 return ret; 977 } 978 /* To avoid freeing registered eprobe event, clear ep. */ 979 ep = NULL; 980 } 981 return ret; 982 } 983 984 /* 985 * Register dynevent at core_initcall. This allows kernel to setup eprobe 986 * events in postcore_initcall without tracefs. 987 */ 988 static __init int trace_events_eprobe_init_early(void) 989 { 990 int err = 0; 991 992 err = dyn_event_register(&eprobe_dyn_event_ops); 993 if (err) 994 pr_warn("Could not register eprobe_dyn_event_ops\n"); 995 996 return err; 997 } 998 core_initcall(trace_events_eprobe_init_early); 999