1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * trace_events_synth - synthetic trace events 4 * 5 * Copyright (C) 2015, 2020 Tom Zanussi <tom.zanussi@linux.intel.com> 6 */ 7 8 #include <linux/module.h> 9 #include <linux/kallsyms.h> 10 #include <linux/security.h> 11 #include <linux/mutex.h> 12 #include <linux/slab.h> 13 #include <linux/stacktrace.h> 14 #include <linux/rculist.h> 15 #include <linux/tracefs.h> 16 17 /* for gfp flag names */ 18 #include <linux/trace_events.h> 19 #include <trace/events/mmflags.h> 20 #include "trace_probe.h" 21 #include "trace_probe_kernel.h" 22 23 #include "trace_synth.h" 24 25 #undef ERRORS 26 #define ERRORS \ 27 C(BAD_NAME, "Illegal name"), \ 28 C(INVALID_CMD, "Command must be of the form: <name> field[;field] ..."),\ 29 C(INVALID_DYN_CMD, "Command must be of the form: s or -:[synthetic/]<name> field[;field] ..."),\ 30 C(EVENT_EXISTS, "Event already exists"), \ 31 C(TOO_MANY_FIELDS, "Too many fields"), \ 32 C(INCOMPLETE_TYPE, "Incomplete type"), \ 33 C(INVALID_TYPE, "Invalid type"), \ 34 C(INVALID_FIELD, "Invalid field"), \ 35 C(INVALID_ARRAY_SPEC, "Invalid array specification"), 36 37 #undef C 38 #define C(a, b) SYNTH_ERR_##a 39 40 enum { ERRORS }; 41 42 #undef C 43 #define C(a, b) b 44 45 static const char *err_text[] = { ERRORS }; 46 47 static DEFINE_MUTEX(lastcmd_mutex); 48 static char *last_cmd; 49 50 static int errpos(const char *str) 51 { 52 guard(mutex)(&lastcmd_mutex); 53 if (!str || !last_cmd) 54 return 0; 55 56 return err_pos(last_cmd, str); 57 } 58 59 static void last_cmd_set(const char *str) 60 { 61 if (!str) 62 return; 63 64 mutex_lock(&lastcmd_mutex); 65 kfree(last_cmd); 66 last_cmd = kstrdup(str, GFP_KERNEL); 67 mutex_unlock(&lastcmd_mutex); 68 } 69 70 static void synth_err(u8 err_type, u16 err_pos) 71 { 72 guard(mutex)(&lastcmd_mutex); 73 if (!last_cmd) 74 return; 75 76 tracing_log_err(NULL, "synthetic_events", last_cmd, err_text, 77 err_type, err_pos); 78 } 79 80 static int create_synth_event(const char *raw_command); 81 static int synth_event_show(struct seq_file *m, struct dyn_event *ev); 82 static int synth_event_release(struct dyn_event *ev); 83 static bool synth_event_is_busy(struct dyn_event *ev); 84 static bool synth_event_match(const char *system, const char *event, 85 int argc, const char **argv, struct dyn_event *ev); 86 87 static struct dyn_event_operations synth_event_ops = { 88 .create = create_synth_event, 89 .show = synth_event_show, 90 .is_busy = synth_event_is_busy, 91 .free = synth_event_release, 92 .match = synth_event_match, 93 }; 94 95 static bool is_synth_event(struct dyn_event *ev) 96 { 97 return ev->ops == &synth_event_ops; 98 } 99 100 static struct synth_event *to_synth_event(struct dyn_event *ev) 101 { 102 return container_of(ev, struct synth_event, devent); 103 } 104 105 static bool synth_event_is_busy(struct dyn_event *ev) 106 { 107 struct synth_event *event = to_synth_event(ev); 108 109 return event->ref != 0; 110 } 111 112 static bool synth_event_match(const char *system, const char *event, 113 int argc, const char **argv, struct dyn_event *ev) 114 { 115 struct synth_event *sev = to_synth_event(ev); 116 117 return strcmp(sev->name, event) == 0 && 118 (!system || strcmp(system, SYNTH_SYSTEM) == 0); 119 } 120 121 struct synth_trace_event { 122 struct trace_entry ent; 123 union trace_synth_field fields[]; 124 }; 125 126 static int synth_event_define_fields(struct trace_event_call *call) 127 { 128 struct synth_trace_event trace; 129 int offset = offsetof(typeof(trace), fields); 130 struct synth_event *event = call->data; 131 unsigned int i, size, n_u64; 132 char *name, *type; 133 bool is_signed; 134 int ret = 0; 135 136 for (i = 0, n_u64 = 0; i < event->n_fields; i++) { 137 size = event->fields[i]->size; 138 is_signed = event->fields[i]->is_signed; 139 type = event->fields[i]->type; 140 name = event->fields[i]->name; 141 ret = trace_define_field(call, type, name, offset, size, 142 is_signed, FILTER_OTHER); 143 if (ret) 144 break; 145 146 event->fields[i]->offset = n_u64; 147 148 if (event->fields[i]->is_string && !event->fields[i]->is_dynamic) { 149 offset += STR_VAR_LEN_MAX; 150 n_u64 += STR_VAR_LEN_MAX / sizeof(u64); 151 } else { 152 offset += sizeof(u64); 153 n_u64++; 154 } 155 } 156 157 event->n_u64 = n_u64; 158 159 return ret; 160 } 161 162 static bool synth_field_signed(char *type) 163 { 164 if (str_has_prefix(type, "u")) 165 return false; 166 if (strcmp(type, "gfp_t") == 0) 167 return false; 168 169 return true; 170 } 171 172 static int synth_field_is_string(char *type) 173 { 174 if (strstr(type, "char[") != NULL) 175 return true; 176 177 return false; 178 } 179 180 static int synth_field_is_stack(char *type) 181 { 182 if (strstr(type, "long[") != NULL) 183 return true; 184 185 return false; 186 } 187 188 static int synth_field_string_size(char *type) 189 { 190 char buf[4], *end, *start; 191 unsigned int len; 192 int size, err; 193 194 start = strstr(type, "char["); 195 if (start == NULL) 196 return -EINVAL; 197 start += sizeof("char[") - 1; 198 199 end = strchr(type, ']'); 200 if (!end || end < start || type + strlen(type) > end + 1) 201 return -EINVAL; 202 203 len = end - start; 204 if (len > 3) 205 return -EINVAL; 206 207 if (len == 0) 208 return 0; /* variable-length string */ 209 210 memcpy(buf, start, len); 211 buf[len] = '\0'; 212 213 err = kstrtouint(buf, 0, &size); 214 if (err) 215 return err; 216 217 if (size > STR_VAR_LEN_MAX) 218 return -EINVAL; 219 220 return size; 221 } 222 223 static int synth_field_size(char *type) 224 { 225 int size = 0; 226 227 if (strcmp(type, "s64") == 0) 228 size = sizeof(s64); 229 else if (strcmp(type, "u64") == 0) 230 size = sizeof(u64); 231 else if (strcmp(type, "s32") == 0) 232 size = sizeof(s32); 233 else if (strcmp(type, "u32") == 0) 234 size = sizeof(u32); 235 else if (strcmp(type, "s16") == 0) 236 size = sizeof(s16); 237 else if (strcmp(type, "u16") == 0) 238 size = sizeof(u16); 239 else if (strcmp(type, "s8") == 0) 240 size = sizeof(s8); 241 else if (strcmp(type, "u8") == 0) 242 size = sizeof(u8); 243 else if (strcmp(type, "char") == 0) 244 size = sizeof(char); 245 else if (strcmp(type, "unsigned char") == 0) 246 size = sizeof(unsigned char); 247 else if (strcmp(type, "int") == 0) 248 size = sizeof(int); 249 else if (strcmp(type, "unsigned int") == 0) 250 size = sizeof(unsigned int); 251 else if (strcmp(type, "long") == 0) 252 size = sizeof(long); 253 else if (strcmp(type, "unsigned long") == 0) 254 size = sizeof(unsigned long); 255 else if (strcmp(type, "bool") == 0) 256 size = sizeof(bool); 257 else if (strcmp(type, "pid_t") == 0) 258 size = sizeof(pid_t); 259 else if (strcmp(type, "gfp_t") == 0) 260 size = sizeof(gfp_t); 261 else if (synth_field_is_string(type)) 262 size = synth_field_string_size(type); 263 else if (synth_field_is_stack(type)) 264 size = 0; 265 266 return size; 267 } 268 269 static const char *synth_field_fmt(char *type) 270 { 271 const char *fmt = "%llu"; 272 273 if (strcmp(type, "s64") == 0) 274 fmt = "%lld"; 275 else if (strcmp(type, "u64") == 0) 276 fmt = "%llu"; 277 else if (strcmp(type, "s32") == 0) 278 fmt = "%d"; 279 else if (strcmp(type, "u32") == 0) 280 fmt = "%u"; 281 else if (strcmp(type, "s16") == 0) 282 fmt = "%d"; 283 else if (strcmp(type, "u16") == 0) 284 fmt = "%u"; 285 else if (strcmp(type, "s8") == 0) 286 fmt = "%d"; 287 else if (strcmp(type, "u8") == 0) 288 fmt = "%u"; 289 else if (strcmp(type, "char") == 0) 290 fmt = "%d"; 291 else if (strcmp(type, "unsigned char") == 0) 292 fmt = "%u"; 293 else if (strcmp(type, "int") == 0) 294 fmt = "%d"; 295 else if (strcmp(type, "unsigned int") == 0) 296 fmt = "%u"; 297 else if (strcmp(type, "long") == 0) 298 fmt = "%ld"; 299 else if (strcmp(type, "unsigned long") == 0) 300 fmt = "%lu"; 301 else if (strcmp(type, "bool") == 0) 302 fmt = "%d"; 303 else if (strcmp(type, "pid_t") == 0) 304 fmt = "%d"; 305 else if (strcmp(type, "gfp_t") == 0) 306 fmt = "%x"; 307 else if (synth_field_is_string(type)) 308 fmt = "%s"; 309 else if (synth_field_is_stack(type)) 310 fmt = "%s"; 311 312 return fmt; 313 } 314 315 static void print_synth_event_num_val(struct trace_seq *s, 316 char *print_fmt, char *name, 317 int size, union trace_synth_field *val, char *space) 318 { 319 switch (size) { 320 case 1: 321 trace_seq_printf(s, print_fmt, name, val->as_u8, space); 322 break; 323 324 case 2: 325 trace_seq_printf(s, print_fmt, name, val->as_u16, space); 326 break; 327 328 case 4: 329 trace_seq_printf(s, print_fmt, name, val->as_u32, space); 330 break; 331 332 default: 333 trace_seq_printf(s, print_fmt, name, val->as_u64, space); 334 break; 335 } 336 } 337 338 static enum print_line_t print_synth_event(struct trace_iterator *iter, 339 int flags, 340 struct trace_event *event) 341 { 342 struct trace_array *tr = iter->tr; 343 struct trace_seq *s = &iter->seq; 344 struct synth_trace_event *entry; 345 struct synth_event *se; 346 unsigned int i, j, n_u64; 347 char print_fmt[32]; 348 const char *fmt; 349 350 entry = (struct synth_trace_event *)iter->ent; 351 se = container_of(event, struct synth_event, call.event); 352 353 trace_seq_printf(s, "%s: ", se->name); 354 355 for (i = 0, n_u64 = 0; i < se->n_fields; i++) { 356 if (trace_seq_has_overflowed(s)) 357 goto end; 358 359 fmt = synth_field_fmt(se->fields[i]->type); 360 361 /* parameter types */ 362 if (tr && tr->trace_flags & TRACE_ITER_VERBOSE) 363 trace_seq_printf(s, "%s ", fmt); 364 365 snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt); 366 367 /* parameter values */ 368 if (se->fields[i]->is_string) { 369 if (se->fields[i]->is_dynamic) { 370 union trace_synth_field *data = &entry->fields[n_u64]; 371 372 trace_seq_printf(s, print_fmt, se->fields[i]->name, 373 STR_VAR_LEN_MAX, 374 (char *)entry + data->as_dynamic.offset, 375 i == se->n_fields - 1 ? "" : " "); 376 n_u64++; 377 } else { 378 trace_seq_printf(s, print_fmt, se->fields[i]->name, 379 STR_VAR_LEN_MAX, 380 (char *)&entry->fields[n_u64].as_u64, 381 i == se->n_fields - 1 ? "" : " "); 382 n_u64 += STR_VAR_LEN_MAX / sizeof(u64); 383 } 384 } else if (se->fields[i]->is_stack) { 385 union trace_synth_field *data = &entry->fields[n_u64]; 386 unsigned long *p = (void *)entry + data->as_dynamic.offset; 387 388 trace_seq_printf(s, "%s=STACK:\n", se->fields[i]->name); 389 for (j = 1; j < data->as_dynamic.len / sizeof(long); j++) 390 trace_seq_printf(s, "=> %pS\n", (void *)p[j]); 391 n_u64++; 392 } else { 393 struct trace_print_flags __flags[] = { 394 __def_gfpflag_names, {-1, NULL} }; 395 char *space = (i == se->n_fields - 1 ? "" : " "); 396 397 print_synth_event_num_val(s, print_fmt, 398 se->fields[i]->name, 399 se->fields[i]->size, 400 &entry->fields[n_u64], 401 space); 402 403 if (strcmp(se->fields[i]->type, "gfp_t") == 0) { 404 trace_seq_puts(s, " ("); 405 trace_print_flags_seq(s, "|", 406 entry->fields[n_u64].as_u64, 407 __flags); 408 trace_seq_putc(s, ')'); 409 } 410 n_u64++; 411 } 412 } 413 end: 414 trace_seq_putc(s, '\n'); 415 416 return trace_handle_return(s); 417 } 418 419 static struct trace_event_functions synth_event_funcs = { 420 .trace = print_synth_event 421 }; 422 423 static unsigned int trace_string(struct synth_trace_event *entry, 424 struct synth_event *event, 425 char *str_val, 426 bool is_dynamic, 427 unsigned int data_size, 428 unsigned int *n_u64) 429 { 430 unsigned int len = 0; 431 char *str_field; 432 int ret; 433 434 if (is_dynamic) { 435 union trace_synth_field *data = &entry->fields[*n_u64]; 436 437 len = fetch_store_strlen((unsigned long)str_val); 438 data->as_dynamic.offset = struct_size(entry, fields, event->n_u64) + data_size; 439 data->as_dynamic.len = len; 440 441 ret = fetch_store_string((unsigned long)str_val, &entry->fields[*n_u64], entry); 442 443 (*n_u64)++; 444 } else { 445 str_field = (char *)&entry->fields[*n_u64].as_u64; 446 447 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 448 if ((unsigned long)str_val < TASK_SIZE) 449 ret = strncpy_from_user_nofault(str_field, (const void __user *)str_val, STR_VAR_LEN_MAX); 450 else 451 #endif 452 ret = strncpy_from_kernel_nofault(str_field, str_val, STR_VAR_LEN_MAX); 453 454 if (ret < 0) 455 strcpy(str_field, FAULT_STRING); 456 457 (*n_u64) += STR_VAR_LEN_MAX / sizeof(u64); 458 } 459 460 return len; 461 } 462 463 static unsigned int trace_stack(struct synth_trace_event *entry, 464 struct synth_event *event, 465 long *stack, 466 unsigned int data_size, 467 unsigned int *n_u64) 468 { 469 union trace_synth_field *data = &entry->fields[*n_u64]; 470 unsigned int len; 471 u32 data_offset; 472 void *data_loc; 473 474 data_offset = struct_size(entry, fields, event->n_u64); 475 data_offset += data_size; 476 477 for (len = 0; len < HIST_STACKTRACE_DEPTH; len++) { 478 if (!stack[len]) 479 break; 480 } 481 482 len *= sizeof(long); 483 484 /* Find the dynamic section to copy the stack into. */ 485 data_loc = (void *)entry + data_offset; 486 memcpy(data_loc, stack, len); 487 488 /* Fill in the field that holds the offset/len combo */ 489 490 data->as_dynamic.offset = data_offset; 491 data->as_dynamic.len = len; 492 493 (*n_u64)++; 494 495 return len; 496 } 497 498 static notrace void trace_event_raw_event_synth(void *__data, 499 u64 *var_ref_vals, 500 unsigned int *var_ref_idx) 501 { 502 unsigned int i, n_u64, val_idx, len, data_size = 0; 503 struct trace_event_file *trace_file = __data; 504 struct synth_trace_event *entry; 505 struct trace_event_buffer fbuffer; 506 struct trace_buffer *buffer; 507 struct synth_event *event; 508 int fields_size = 0; 509 510 event = trace_file->event_call->data; 511 512 if (trace_trigger_soft_disabled(trace_file)) 513 return; 514 515 fields_size = event->n_u64 * sizeof(u64); 516 517 for (i = 0; i < event->n_dynamic_fields; i++) { 518 unsigned int field_pos = event->dynamic_fields[i]->field_pos; 519 char *str_val; 520 521 val_idx = var_ref_idx[field_pos]; 522 str_val = (char *)(long)var_ref_vals[val_idx]; 523 524 if (event->dynamic_fields[i]->is_stack) { 525 /* reserve one extra element for size */ 526 len = *((unsigned long *)str_val) + 1; 527 len *= sizeof(unsigned long); 528 } else { 529 len = fetch_store_strlen((unsigned long)str_val); 530 } 531 532 fields_size += len; 533 } 534 535 /* 536 * Avoid ring buffer recursion detection, as this event 537 * is being performed within another event. 538 */ 539 buffer = trace_file->tr->array_buffer.buffer; 540 ring_buffer_nest_start(buffer); 541 542 entry = trace_event_buffer_reserve(&fbuffer, trace_file, 543 sizeof(*entry) + fields_size); 544 if (!entry) 545 goto out; 546 547 for (i = 0, n_u64 = 0; i < event->n_fields; i++) { 548 val_idx = var_ref_idx[i]; 549 if (event->fields[i]->is_string) { 550 char *str_val = (char *)(long)var_ref_vals[val_idx]; 551 552 len = trace_string(entry, event, str_val, 553 event->fields[i]->is_dynamic, 554 data_size, &n_u64); 555 data_size += len; /* only dynamic string increments */ 556 } else if (event->fields[i]->is_stack) { 557 long *stack = (long *)(long)var_ref_vals[val_idx]; 558 559 len = trace_stack(entry, event, stack, 560 data_size, &n_u64); 561 data_size += len; 562 } else { 563 struct synth_field *field = event->fields[i]; 564 u64 val = var_ref_vals[val_idx]; 565 566 switch (field->size) { 567 case 1: 568 entry->fields[n_u64].as_u8 = (u8)val; 569 break; 570 571 case 2: 572 entry->fields[n_u64].as_u16 = (u16)val; 573 break; 574 575 case 4: 576 entry->fields[n_u64].as_u32 = (u32)val; 577 break; 578 579 default: 580 entry->fields[n_u64].as_u64 = val; 581 break; 582 } 583 n_u64++; 584 } 585 } 586 587 trace_event_buffer_commit(&fbuffer); 588 out: 589 ring_buffer_nest_end(buffer); 590 } 591 592 static void free_synth_event_print_fmt(struct trace_event_call *call) 593 { 594 if (call) { 595 kfree(call->print_fmt); 596 call->print_fmt = NULL; 597 } 598 } 599 600 static int __set_synth_event_print_fmt(struct synth_event *event, 601 char *buf, int len) 602 { 603 const char *fmt; 604 int pos = 0; 605 int i; 606 607 /* When len=0, we just calculate the needed length */ 608 #define LEN_OR_ZERO (len ? len - pos : 0) 609 610 pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); 611 for (i = 0; i < event->n_fields; i++) { 612 fmt = synth_field_fmt(event->fields[i]->type); 613 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s%s", 614 event->fields[i]->name, fmt, 615 i == event->n_fields - 1 ? "" : " "); 616 } 617 pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); 618 619 for (i = 0; i < event->n_fields; i++) { 620 if (event->fields[i]->is_string && 621 event->fields[i]->is_dynamic) 622 pos += snprintf(buf + pos, LEN_OR_ZERO, 623 ", __get_str(%s)", event->fields[i]->name); 624 else if (event->fields[i]->is_stack) 625 pos += snprintf(buf + pos, LEN_OR_ZERO, 626 ", __get_stacktrace(%s)", event->fields[i]->name); 627 else 628 pos += snprintf(buf + pos, LEN_OR_ZERO, 629 ", REC->%s", event->fields[i]->name); 630 } 631 632 #undef LEN_OR_ZERO 633 634 /* return the length of print_fmt */ 635 return pos; 636 } 637 638 static int set_synth_event_print_fmt(struct trace_event_call *call) 639 { 640 struct synth_event *event = call->data; 641 char *print_fmt; 642 int len; 643 644 /* First: called with 0 length to calculate the needed length */ 645 len = __set_synth_event_print_fmt(event, NULL, 0); 646 647 print_fmt = kmalloc(len + 1, GFP_KERNEL); 648 if (!print_fmt) 649 return -ENOMEM; 650 651 /* Second: actually write the @print_fmt */ 652 __set_synth_event_print_fmt(event, print_fmt, len + 1); 653 call->print_fmt = print_fmt; 654 655 return 0; 656 } 657 658 static void free_synth_field(struct synth_field *field) 659 { 660 kfree(field->type); 661 kfree(field->name); 662 kfree(field); 663 } 664 665 static int check_field_version(const char *prefix, const char *field_type, 666 const char *field_name) 667 { 668 /* 669 * For backward compatibility, the old synthetic event command 670 * format did not require semicolons, and in order to not 671 * break user space, that old format must still work. If a new 672 * feature is added, then the format that uses the new feature 673 * will be required to have semicolons, as nothing that uses 674 * the old format would be using the new, yet to be created, 675 * feature. When a new feature is added, this will detect it, 676 * and return a number greater than 1, and require the format 677 * to use semicolons. 678 */ 679 return 1; 680 } 681 682 static struct synth_field *parse_synth_field(int argc, char **argv, 683 int *consumed, int *field_version) 684 { 685 const char *prefix = NULL, *field_type = argv[0], *field_name, *array; 686 struct synth_field *field; 687 int len, ret = -ENOMEM; 688 struct seq_buf s; 689 ssize_t size; 690 691 if (!strcmp(field_type, "unsigned")) { 692 if (argc < 3) { 693 synth_err(SYNTH_ERR_INCOMPLETE_TYPE, errpos(field_type)); 694 return ERR_PTR(-EINVAL); 695 } 696 prefix = "unsigned "; 697 field_type = argv[1]; 698 field_name = argv[2]; 699 *consumed += 3; 700 } else { 701 field_name = argv[1]; 702 *consumed += 2; 703 } 704 705 if (!field_name) { 706 synth_err(SYNTH_ERR_INVALID_FIELD, errpos(field_type)); 707 return ERR_PTR(-EINVAL); 708 } 709 710 *field_version = check_field_version(prefix, field_type, field_name); 711 712 field = kzalloc(sizeof(*field), GFP_KERNEL); 713 if (!field) 714 return ERR_PTR(-ENOMEM); 715 716 len = strlen(field_name); 717 array = strchr(field_name, '['); 718 if (array) 719 len -= strlen(array); 720 721 field->name = kmemdup_nul(field_name, len, GFP_KERNEL); 722 if (!field->name) 723 goto free; 724 725 if (!is_good_name(field->name)) { 726 synth_err(SYNTH_ERR_BAD_NAME, errpos(field_name)); 727 ret = -EINVAL; 728 goto free; 729 } 730 731 len = strlen(field_type) + 1; 732 733 if (array) 734 len += strlen(array); 735 736 if (prefix) 737 len += strlen(prefix); 738 739 field->type = kzalloc(len, GFP_KERNEL); 740 if (!field->type) 741 goto free; 742 743 seq_buf_init(&s, field->type, len); 744 if (prefix) 745 seq_buf_puts(&s, prefix); 746 seq_buf_puts(&s, field_type); 747 if (array) 748 seq_buf_puts(&s, array); 749 if (WARN_ON_ONCE(!seq_buf_buffer_left(&s))) 750 goto free; 751 752 s.buffer[s.len] = '\0'; 753 754 size = synth_field_size(field->type); 755 if (size < 0) { 756 if (array) 757 synth_err(SYNTH_ERR_INVALID_ARRAY_SPEC, errpos(field_name)); 758 else 759 synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type)); 760 ret = -EINVAL; 761 goto free; 762 } else if (size == 0) { 763 if (synth_field_is_string(field->type) || 764 synth_field_is_stack(field->type)) { 765 char *type; 766 767 len = sizeof("__data_loc ") + strlen(field->type) + 1; 768 type = kzalloc(len, GFP_KERNEL); 769 if (!type) 770 goto free; 771 772 seq_buf_init(&s, type, len); 773 seq_buf_puts(&s, "__data_loc "); 774 seq_buf_puts(&s, field->type); 775 776 if (WARN_ON_ONCE(!seq_buf_buffer_left(&s))) 777 goto free; 778 s.buffer[s.len] = '\0'; 779 780 kfree(field->type); 781 field->type = type; 782 783 field->is_dynamic = true; 784 size = sizeof(u64); 785 } else { 786 synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type)); 787 ret = -EINVAL; 788 goto free; 789 } 790 } 791 field->size = size; 792 793 if (synth_field_is_string(field->type)) 794 field->is_string = true; 795 else if (synth_field_is_stack(field->type)) 796 field->is_stack = true; 797 798 field->is_signed = synth_field_signed(field->type); 799 out: 800 return field; 801 free: 802 free_synth_field(field); 803 field = ERR_PTR(ret); 804 goto out; 805 } 806 807 static void free_synth_tracepoint(struct tracepoint *tp) 808 { 809 if (!tp) 810 return; 811 812 kfree(tp->name); 813 kfree(tp); 814 } 815 816 static struct tracepoint *alloc_synth_tracepoint(char *name) 817 { 818 struct tracepoint *tp; 819 820 tp = kzalloc(sizeof(*tp), GFP_KERNEL); 821 if (!tp) 822 return ERR_PTR(-ENOMEM); 823 824 tp->name = kstrdup(name, GFP_KERNEL); 825 if (!tp->name) { 826 kfree(tp); 827 return ERR_PTR(-ENOMEM); 828 } 829 830 return tp; 831 } 832 833 struct synth_event *find_synth_event(const char *name) 834 { 835 struct dyn_event *pos; 836 struct synth_event *event; 837 838 for_each_dyn_event(pos) { 839 if (!is_synth_event(pos)) 840 continue; 841 event = to_synth_event(pos); 842 if (strcmp(event->name, name) == 0) 843 return event; 844 } 845 846 return NULL; 847 } 848 849 static struct trace_event_fields synth_event_fields_array[] = { 850 { .type = TRACE_FUNCTION_TYPE, 851 .define_fields = synth_event_define_fields }, 852 {} 853 }; 854 855 static int synth_event_reg(struct trace_event_call *call, 856 enum trace_reg type, void *data) 857 { 858 struct synth_event *event = container_of(call, struct synth_event, call); 859 860 switch (type) { 861 #ifdef CONFIG_PERF_EVENTS 862 case TRACE_REG_PERF_REGISTER: 863 #endif 864 case TRACE_REG_REGISTER: 865 if (!try_module_get(event->mod)) 866 return -EBUSY; 867 break; 868 default: 869 break; 870 } 871 872 int ret = trace_event_reg(call, type, data); 873 874 switch (type) { 875 #ifdef CONFIG_PERF_EVENTS 876 case TRACE_REG_PERF_UNREGISTER: 877 #endif 878 case TRACE_REG_UNREGISTER: 879 module_put(event->mod); 880 break; 881 default: 882 break; 883 } 884 return ret; 885 } 886 887 static int register_synth_event(struct synth_event *event) 888 { 889 struct trace_event_call *call = &event->call; 890 int ret = 0; 891 892 event->call.class = &event->class; 893 event->class.system = kstrdup(SYNTH_SYSTEM, GFP_KERNEL); 894 if (!event->class.system) { 895 ret = -ENOMEM; 896 goto out; 897 } 898 899 event->tp = alloc_synth_tracepoint(event->name); 900 if (IS_ERR(event->tp)) { 901 ret = PTR_ERR(event->tp); 902 event->tp = NULL; 903 goto out; 904 } 905 906 INIT_LIST_HEAD(&call->class->fields); 907 call->event.funcs = &synth_event_funcs; 908 call->class->fields_array = synth_event_fields_array; 909 910 ret = register_trace_event(&call->event); 911 if (!ret) { 912 ret = -ENODEV; 913 goto out; 914 } 915 call->flags = TRACE_EVENT_FL_TRACEPOINT; 916 call->class->reg = synth_event_reg; 917 call->class->probe = trace_event_raw_event_synth; 918 call->data = event; 919 call->tp = event->tp; 920 921 ret = trace_add_event_call(call); 922 if (ret) { 923 pr_warn("Failed to register synthetic event: %s\n", 924 trace_event_name(call)); 925 goto err; 926 } 927 928 ret = set_synth_event_print_fmt(call); 929 /* unregister_trace_event() will be called inside */ 930 if (ret < 0) 931 trace_remove_event_call(call); 932 out: 933 return ret; 934 err: 935 unregister_trace_event(&call->event); 936 goto out; 937 } 938 939 static int unregister_synth_event(struct synth_event *event) 940 { 941 struct trace_event_call *call = &event->call; 942 int ret; 943 944 ret = trace_remove_event_call(call); 945 946 return ret; 947 } 948 949 static void free_synth_event(struct synth_event *event) 950 { 951 unsigned int i; 952 953 if (!event) 954 return; 955 956 for (i = 0; i < event->n_fields; i++) 957 free_synth_field(event->fields[i]); 958 959 kfree(event->fields); 960 kfree(event->dynamic_fields); 961 kfree(event->name); 962 kfree(event->class.system); 963 free_synth_tracepoint(event->tp); 964 free_synth_event_print_fmt(&event->call); 965 kfree(event); 966 } 967 968 static struct synth_event *alloc_synth_event(const char *name, int n_fields, 969 struct synth_field **fields) 970 { 971 unsigned int i, j, n_dynamic_fields = 0; 972 struct synth_event *event; 973 974 event = kzalloc(sizeof(*event), GFP_KERNEL); 975 if (!event) { 976 event = ERR_PTR(-ENOMEM); 977 goto out; 978 } 979 980 event->name = kstrdup(name, GFP_KERNEL); 981 if (!event->name) { 982 kfree(event); 983 event = ERR_PTR(-ENOMEM); 984 goto out; 985 } 986 987 event->fields = kcalloc(n_fields, sizeof(*event->fields), GFP_KERNEL); 988 if (!event->fields) { 989 free_synth_event(event); 990 event = ERR_PTR(-ENOMEM); 991 goto out; 992 } 993 994 for (i = 0; i < n_fields; i++) 995 if (fields[i]->is_dynamic) 996 n_dynamic_fields++; 997 998 if (n_dynamic_fields) { 999 event->dynamic_fields = kcalloc(n_dynamic_fields, 1000 sizeof(*event->dynamic_fields), 1001 GFP_KERNEL); 1002 if (!event->dynamic_fields) { 1003 free_synth_event(event); 1004 event = ERR_PTR(-ENOMEM); 1005 goto out; 1006 } 1007 } 1008 1009 dyn_event_init(&event->devent, &synth_event_ops); 1010 1011 for (i = 0, j = 0; i < n_fields; i++) { 1012 fields[i]->field_pos = i; 1013 event->fields[i] = fields[i]; 1014 1015 if (fields[i]->is_dynamic) 1016 event->dynamic_fields[j++] = fields[i]; 1017 } 1018 event->n_dynamic_fields = j; 1019 event->n_fields = n_fields; 1020 out: 1021 return event; 1022 } 1023 1024 static int synth_event_check_arg_fn(void *data) 1025 { 1026 struct dynevent_arg_pair *arg_pair = data; 1027 int size; 1028 1029 size = synth_field_size((char *)arg_pair->lhs); 1030 if (size == 0) { 1031 if (strstr((char *)arg_pair->lhs, "[")) 1032 return 0; 1033 } 1034 1035 return size ? 0 : -EINVAL; 1036 } 1037 1038 /** 1039 * synth_event_add_field - Add a new field to a synthetic event cmd 1040 * @cmd: A pointer to the dynevent_cmd struct representing the new event 1041 * @type: The type of the new field to add 1042 * @name: The name of the new field to add 1043 * 1044 * Add a new field to a synthetic event cmd object. Field ordering is in 1045 * the same order the fields are added. 1046 * 1047 * See synth_field_size() for available types. If field_name contains 1048 * [n] the field is considered to be an array. 1049 * 1050 * Return: 0 if successful, error otherwise. 1051 */ 1052 int synth_event_add_field(struct dynevent_cmd *cmd, const char *type, 1053 const char *name) 1054 { 1055 struct dynevent_arg_pair arg_pair; 1056 int ret; 1057 1058 if (cmd->type != DYNEVENT_TYPE_SYNTH) 1059 return -EINVAL; 1060 1061 if (!type || !name) 1062 return -EINVAL; 1063 1064 dynevent_arg_pair_init(&arg_pair, 0, ';'); 1065 1066 arg_pair.lhs = type; 1067 arg_pair.rhs = name; 1068 1069 ret = dynevent_arg_pair_add(cmd, &arg_pair, synth_event_check_arg_fn); 1070 if (ret) 1071 return ret; 1072 1073 if (++cmd->n_fields > SYNTH_FIELDS_MAX) 1074 ret = -EINVAL; 1075 1076 return ret; 1077 } 1078 EXPORT_SYMBOL_GPL(synth_event_add_field); 1079 1080 /** 1081 * synth_event_add_field_str - Add a new field to a synthetic event cmd 1082 * @cmd: A pointer to the dynevent_cmd struct representing the new event 1083 * @type_name: The type and name of the new field to add, as a single string 1084 * 1085 * Add a new field to a synthetic event cmd object, as a single 1086 * string. The @type_name string is expected to be of the form 'type 1087 * name', which will be appended by ';'. No sanity checking is done - 1088 * what's passed in is assumed to already be well-formed. Field 1089 * ordering is in the same order the fields are added. 1090 * 1091 * See synth_field_size() for available types. If field_name contains 1092 * [n] the field is considered to be an array. 1093 * 1094 * Return: 0 if successful, error otherwise. 1095 */ 1096 int synth_event_add_field_str(struct dynevent_cmd *cmd, const char *type_name) 1097 { 1098 struct dynevent_arg arg; 1099 int ret; 1100 1101 if (cmd->type != DYNEVENT_TYPE_SYNTH) 1102 return -EINVAL; 1103 1104 if (!type_name) 1105 return -EINVAL; 1106 1107 dynevent_arg_init(&arg, ';'); 1108 1109 arg.str = type_name; 1110 1111 ret = dynevent_arg_add(cmd, &arg, NULL); 1112 if (ret) 1113 return ret; 1114 1115 if (++cmd->n_fields > SYNTH_FIELDS_MAX) 1116 ret = -EINVAL; 1117 1118 return ret; 1119 } 1120 EXPORT_SYMBOL_GPL(synth_event_add_field_str); 1121 1122 /** 1123 * synth_event_add_fields - Add multiple fields to a synthetic event cmd 1124 * @cmd: A pointer to the dynevent_cmd struct representing the new event 1125 * @fields: An array of type/name field descriptions 1126 * @n_fields: The number of field descriptions contained in the fields array 1127 * 1128 * Add a new set of fields to a synthetic event cmd object. The event 1129 * fields that will be defined for the event should be passed in as an 1130 * array of struct synth_field_desc, and the number of elements in the 1131 * array passed in as n_fields. Field ordering will retain the 1132 * ordering given in the fields array. 1133 * 1134 * See synth_field_size() for available types. If field_name contains 1135 * [n] the field is considered to be an array. 1136 * 1137 * Return: 0 if successful, error otherwise. 1138 */ 1139 int synth_event_add_fields(struct dynevent_cmd *cmd, 1140 struct synth_field_desc *fields, 1141 unsigned int n_fields) 1142 { 1143 unsigned int i; 1144 int ret = 0; 1145 1146 for (i = 0; i < n_fields; i++) { 1147 if (fields[i].type == NULL || fields[i].name == NULL) { 1148 ret = -EINVAL; 1149 break; 1150 } 1151 1152 ret = synth_event_add_field(cmd, fields[i].type, fields[i].name); 1153 if (ret) 1154 break; 1155 } 1156 1157 return ret; 1158 } 1159 EXPORT_SYMBOL_GPL(synth_event_add_fields); 1160 1161 /** 1162 * __synth_event_gen_cmd_start - Start a synthetic event command from arg list 1163 * @cmd: A pointer to the dynevent_cmd struct representing the new event 1164 * @name: The name of the synthetic event 1165 * @mod: The module creating the event, NULL if not created from a module 1166 * @...: Variable number of arg (pairs), one pair for each field 1167 * 1168 * NOTE: Users normally won't want to call this function directly, but 1169 * rather use the synth_event_gen_cmd_start() wrapper, which 1170 * automatically adds a NULL to the end of the arg list. If this 1171 * function is used directly, make sure the last arg in the variable 1172 * arg list is NULL. 1173 * 1174 * Generate a synthetic event command to be executed by 1175 * synth_event_gen_cmd_end(). This function can be used to generate 1176 * the complete command or only the first part of it; in the latter 1177 * case, synth_event_add_field(), synth_event_add_field_str(), or 1178 * synth_event_add_fields() can be used to add more fields following 1179 * this. 1180 * 1181 * There should be an even number variable args, each pair consisting 1182 * of a type followed by a field name. 1183 * 1184 * See synth_field_size() for available types. If field_name contains 1185 * [n] the field is considered to be an array. 1186 * 1187 * Return: 0 if successful, error otherwise. 1188 */ 1189 int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd, const char *name, 1190 struct module *mod, ...) 1191 { 1192 struct dynevent_arg arg; 1193 va_list args; 1194 int ret; 1195 1196 cmd->event_name = name; 1197 cmd->private_data = mod; 1198 1199 if (cmd->type != DYNEVENT_TYPE_SYNTH) 1200 return -EINVAL; 1201 1202 dynevent_arg_init(&arg, 0); 1203 arg.str = name; 1204 ret = dynevent_arg_add(cmd, &arg, NULL); 1205 if (ret) 1206 return ret; 1207 1208 va_start(args, mod); 1209 for (;;) { 1210 const char *type, *name; 1211 1212 type = va_arg(args, const char *); 1213 if (!type) 1214 break; 1215 name = va_arg(args, const char *); 1216 if (!name) 1217 break; 1218 1219 if (++cmd->n_fields > SYNTH_FIELDS_MAX) { 1220 ret = -EINVAL; 1221 break; 1222 } 1223 1224 ret = synth_event_add_field(cmd, type, name); 1225 if (ret) 1226 break; 1227 } 1228 va_end(args); 1229 1230 return ret; 1231 } 1232 EXPORT_SYMBOL_GPL(__synth_event_gen_cmd_start); 1233 1234 /** 1235 * synth_event_gen_cmd_array_start - Start synthetic event command from an array 1236 * @cmd: A pointer to the dynevent_cmd struct representing the new event 1237 * @name: The name of the synthetic event 1238 * @mod: The module creating the event, NULL if not created from a module 1239 * @fields: An array of type/name field descriptions 1240 * @n_fields: The number of field descriptions contained in the fields array 1241 * 1242 * Generate a synthetic event command to be executed by 1243 * synth_event_gen_cmd_end(). This function can be used to generate 1244 * the complete command or only the first part of it; in the latter 1245 * case, synth_event_add_field(), synth_event_add_field_str(), or 1246 * synth_event_add_fields() can be used to add more fields following 1247 * this. 1248 * 1249 * The event fields that will be defined for the event should be 1250 * passed in as an array of struct synth_field_desc, and the number of 1251 * elements in the array passed in as n_fields. Field ordering will 1252 * retain the ordering given in the fields array. 1253 * 1254 * See synth_field_size() for available types. If field_name contains 1255 * [n] the field is considered to be an array. 1256 * 1257 * Return: 0 if successful, error otherwise. 1258 */ 1259 int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd, const char *name, 1260 struct module *mod, 1261 struct synth_field_desc *fields, 1262 unsigned int n_fields) 1263 { 1264 struct dynevent_arg arg; 1265 unsigned int i; 1266 int ret = 0; 1267 1268 cmd->event_name = name; 1269 cmd->private_data = mod; 1270 1271 if (cmd->type != DYNEVENT_TYPE_SYNTH) 1272 return -EINVAL; 1273 1274 if (n_fields > SYNTH_FIELDS_MAX) 1275 return -EINVAL; 1276 1277 dynevent_arg_init(&arg, 0); 1278 arg.str = name; 1279 ret = dynevent_arg_add(cmd, &arg, NULL); 1280 if (ret) 1281 return ret; 1282 1283 for (i = 0; i < n_fields; i++) { 1284 if (fields[i].type == NULL || fields[i].name == NULL) 1285 return -EINVAL; 1286 1287 ret = synth_event_add_field(cmd, fields[i].type, fields[i].name); 1288 if (ret) 1289 break; 1290 } 1291 1292 return ret; 1293 } 1294 EXPORT_SYMBOL_GPL(synth_event_gen_cmd_array_start); 1295 1296 static int __create_synth_event(const char *name, const char *raw_fields) 1297 { 1298 char **argv, *field_str, *tmp_fields, *saved_fields = NULL; 1299 struct synth_field *field, *fields[SYNTH_FIELDS_MAX]; 1300 int consumed, cmd_version = 1, n_fields_this_loop; 1301 int i, argc, n_fields = 0, ret = 0; 1302 struct synth_event *event = NULL; 1303 1304 /* 1305 * Argument syntax: 1306 * - Add synthetic event: <event_name> field[;field] ... 1307 * - Remove synthetic event: !<event_name> field[;field] ... 1308 * where 'field' = type field_name 1309 */ 1310 1311 if (name[0] == '\0') { 1312 synth_err(SYNTH_ERR_INVALID_CMD, 0); 1313 return -EINVAL; 1314 } 1315 1316 if (!is_good_name(name)) { 1317 synth_err(SYNTH_ERR_BAD_NAME, errpos(name)); 1318 return -EINVAL; 1319 } 1320 1321 mutex_lock(&event_mutex); 1322 1323 event = find_synth_event(name); 1324 if (event) { 1325 synth_err(SYNTH_ERR_EVENT_EXISTS, errpos(name)); 1326 ret = -EEXIST; 1327 goto err; 1328 } 1329 1330 tmp_fields = saved_fields = kstrdup(raw_fields, GFP_KERNEL); 1331 if (!tmp_fields) { 1332 ret = -ENOMEM; 1333 goto err; 1334 } 1335 1336 while ((field_str = strsep(&tmp_fields, ";")) != NULL) { 1337 argv = argv_split(GFP_KERNEL, field_str, &argc); 1338 if (!argv) { 1339 ret = -ENOMEM; 1340 goto err; 1341 } 1342 1343 if (!argc) { 1344 argv_free(argv); 1345 continue; 1346 } 1347 1348 n_fields_this_loop = 0; 1349 consumed = 0; 1350 while (argc > consumed) { 1351 int field_version; 1352 1353 field = parse_synth_field(argc - consumed, 1354 argv + consumed, &consumed, 1355 &field_version); 1356 if (IS_ERR(field)) { 1357 ret = PTR_ERR(field); 1358 goto err_free_arg; 1359 } 1360 1361 /* 1362 * Track the highest version of any field we 1363 * found in the command. 1364 */ 1365 if (field_version > cmd_version) 1366 cmd_version = field_version; 1367 1368 /* 1369 * Now sort out what is and isn't valid for 1370 * each supported version. 1371 * 1372 * If we see more than 1 field per loop, it 1373 * means we have multiple fields between 1374 * semicolons, and that's something we no 1375 * longer support in a version 2 or greater 1376 * command. 1377 */ 1378 if (cmd_version > 1 && n_fields_this_loop >= 1) { 1379 synth_err(SYNTH_ERR_INVALID_CMD, errpos(field_str)); 1380 ret = -EINVAL; 1381 goto err_free_arg; 1382 } 1383 1384 if (n_fields == SYNTH_FIELDS_MAX) { 1385 synth_err(SYNTH_ERR_TOO_MANY_FIELDS, 0); 1386 ret = -EINVAL; 1387 goto err_free_arg; 1388 } 1389 fields[n_fields++] = field; 1390 1391 n_fields_this_loop++; 1392 } 1393 argv_free(argv); 1394 1395 if (consumed < argc) { 1396 synth_err(SYNTH_ERR_INVALID_CMD, 0); 1397 ret = -EINVAL; 1398 goto err; 1399 } 1400 1401 } 1402 1403 if (n_fields == 0) { 1404 synth_err(SYNTH_ERR_INVALID_CMD, 0); 1405 ret = -EINVAL; 1406 goto err; 1407 } 1408 1409 event = alloc_synth_event(name, n_fields, fields); 1410 if (IS_ERR(event)) { 1411 ret = PTR_ERR(event); 1412 event = NULL; 1413 goto err; 1414 } 1415 ret = register_synth_event(event); 1416 if (!ret) 1417 dyn_event_add(&event->devent, &event->call); 1418 else 1419 free_synth_event(event); 1420 out: 1421 mutex_unlock(&event_mutex); 1422 1423 kfree(saved_fields); 1424 1425 return ret; 1426 err_free_arg: 1427 argv_free(argv); 1428 err: 1429 for (i = 0; i < n_fields; i++) 1430 free_synth_field(fields[i]); 1431 1432 goto out; 1433 } 1434 1435 /** 1436 * synth_event_create - Create a new synthetic event 1437 * @name: The name of the new synthetic event 1438 * @fields: An array of type/name field descriptions 1439 * @n_fields: The number of field descriptions contained in the fields array 1440 * @mod: The module creating the event, NULL if not created from a module 1441 * 1442 * Create a new synthetic event with the given name under the 1443 * trace/events/synthetic/ directory. The event fields that will be 1444 * defined for the event should be passed in as an array of struct 1445 * synth_field_desc, and the number elements in the array passed in as 1446 * n_fields. Field ordering will retain the ordering given in the 1447 * fields array. 1448 * 1449 * If the new synthetic event is being created from a module, the mod 1450 * param must be non-NULL. This will ensure that the trace buffer 1451 * won't contain unreadable events. 1452 * 1453 * The new synth event should be deleted using synth_event_delete() 1454 * function. The new synthetic event can be generated from modules or 1455 * other kernel code using trace_synth_event() and related functions. 1456 * 1457 * Return: 0 if successful, error otherwise. 1458 */ 1459 int synth_event_create(const char *name, struct synth_field_desc *fields, 1460 unsigned int n_fields, struct module *mod) 1461 { 1462 struct dynevent_cmd cmd; 1463 char *buf; 1464 int ret; 1465 1466 buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL); 1467 if (!buf) 1468 return -ENOMEM; 1469 1470 synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN); 1471 1472 ret = synth_event_gen_cmd_array_start(&cmd, name, mod, 1473 fields, n_fields); 1474 if (ret) 1475 goto out; 1476 1477 ret = synth_event_gen_cmd_end(&cmd); 1478 out: 1479 kfree(buf); 1480 1481 return ret; 1482 } 1483 EXPORT_SYMBOL_GPL(synth_event_create); 1484 1485 static int destroy_synth_event(struct synth_event *se) 1486 { 1487 int ret; 1488 1489 if (se->ref) 1490 return -EBUSY; 1491 1492 if (trace_event_dyn_busy(&se->call)) 1493 return -EBUSY; 1494 1495 ret = unregister_synth_event(se); 1496 if (!ret) { 1497 dyn_event_remove(&se->devent); 1498 free_synth_event(se); 1499 } 1500 1501 return ret; 1502 } 1503 1504 /** 1505 * synth_event_delete - Delete a synthetic event 1506 * @event_name: The name of the new synthetic event 1507 * 1508 * Delete a synthetic event that was created with synth_event_create(). 1509 * 1510 * Return: 0 if successful, error otherwise. 1511 */ 1512 int synth_event_delete(const char *event_name) 1513 { 1514 struct synth_event *se = NULL; 1515 struct module *mod = NULL; 1516 int ret = -ENOENT; 1517 1518 mutex_lock(&event_mutex); 1519 se = find_synth_event(event_name); 1520 if (se) { 1521 mod = se->mod; 1522 ret = destroy_synth_event(se); 1523 } 1524 mutex_unlock(&event_mutex); 1525 1526 if (mod) { 1527 /* 1528 * It is safest to reset the ring buffer if the module 1529 * being unloaded registered any events that were 1530 * used. The only worry is if a new module gets 1531 * loaded, and takes on the same id as the events of 1532 * this module. When printing out the buffer, traced 1533 * events left over from this module may be passed to 1534 * the new module events and unexpected results may 1535 * occur. 1536 */ 1537 tracing_reset_all_online_cpus(); 1538 } 1539 1540 return ret; 1541 } 1542 EXPORT_SYMBOL_GPL(synth_event_delete); 1543 1544 static int check_command(const char *raw_command) 1545 { 1546 char **argv = NULL, *cmd, *saved_cmd, *name_and_field; 1547 int argc, ret = 0; 1548 1549 cmd = saved_cmd = kstrdup(raw_command, GFP_KERNEL); 1550 if (!cmd) 1551 return -ENOMEM; 1552 1553 name_and_field = strsep(&cmd, ";"); 1554 if (!name_and_field) { 1555 ret = -EINVAL; 1556 goto free; 1557 } 1558 1559 if (name_and_field[0] == '!') 1560 goto free; 1561 1562 argv = argv_split(GFP_KERNEL, name_and_field, &argc); 1563 if (!argv) { 1564 ret = -ENOMEM; 1565 goto free; 1566 } 1567 argv_free(argv); 1568 1569 if (argc < 3) 1570 ret = -EINVAL; 1571 free: 1572 kfree(saved_cmd); 1573 1574 return ret; 1575 } 1576 1577 static int create_or_delete_synth_event(const char *raw_command) 1578 { 1579 char *name = NULL, *fields, *p; 1580 int ret = 0; 1581 1582 raw_command = skip_spaces(raw_command); 1583 if (raw_command[0] == '\0') 1584 return ret; 1585 1586 last_cmd_set(raw_command); 1587 1588 ret = check_command(raw_command); 1589 if (ret) { 1590 synth_err(SYNTH_ERR_INVALID_CMD, 0); 1591 return ret; 1592 } 1593 1594 p = strpbrk(raw_command, " \t"); 1595 if (!p && raw_command[0] != '!') { 1596 synth_err(SYNTH_ERR_INVALID_CMD, 0); 1597 ret = -EINVAL; 1598 goto free; 1599 } 1600 1601 name = kmemdup_nul(raw_command, p ? p - raw_command : strlen(raw_command), GFP_KERNEL); 1602 if (!name) 1603 return -ENOMEM; 1604 1605 if (name[0] == '!') { 1606 ret = synth_event_delete(name + 1); 1607 goto free; 1608 } 1609 1610 fields = skip_spaces(p); 1611 1612 ret = __create_synth_event(name, fields); 1613 free: 1614 kfree(name); 1615 1616 return ret; 1617 } 1618 1619 static int synth_event_run_command(struct dynevent_cmd *cmd) 1620 { 1621 struct synth_event *se; 1622 int ret; 1623 1624 ret = create_or_delete_synth_event(cmd->seq.buffer); 1625 if (ret) 1626 return ret; 1627 1628 se = find_synth_event(cmd->event_name); 1629 if (WARN_ON(!se)) 1630 return -ENOENT; 1631 1632 se->mod = cmd->private_data; 1633 1634 return ret; 1635 } 1636 1637 /** 1638 * synth_event_cmd_init - Initialize a synthetic event command object 1639 * @cmd: A pointer to the dynevent_cmd struct representing the new event 1640 * @buf: A pointer to the buffer used to build the command 1641 * @maxlen: The length of the buffer passed in @buf 1642 * 1643 * Initialize a synthetic event command object. Use this before 1644 * calling any of the other dyenvent_cmd functions. 1645 */ 1646 void synth_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen) 1647 { 1648 dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_SYNTH, 1649 synth_event_run_command); 1650 } 1651 EXPORT_SYMBOL_GPL(synth_event_cmd_init); 1652 1653 static inline int 1654 __synth_event_trace_init(struct trace_event_file *file, 1655 struct synth_event_trace_state *trace_state) 1656 { 1657 int ret = 0; 1658 1659 memset(trace_state, '\0', sizeof(*trace_state)); 1660 1661 /* 1662 * Normal event tracing doesn't get called at all unless the 1663 * ENABLED bit is set (which attaches the probe thus allowing 1664 * this code to be called, etc). Because this is called 1665 * directly by the user, we don't have that but we still need 1666 * to honor not logging when disabled. For the iterated 1667 * trace case, we save the enabled state upon start and just 1668 * ignore the following data calls. 1669 */ 1670 if (!(file->flags & EVENT_FILE_FL_ENABLED) || 1671 trace_trigger_soft_disabled(file)) { 1672 trace_state->disabled = true; 1673 ret = -ENOENT; 1674 goto out; 1675 } 1676 1677 trace_state->event = file->event_call->data; 1678 out: 1679 return ret; 1680 } 1681 1682 static inline int 1683 __synth_event_trace_start(struct trace_event_file *file, 1684 struct synth_event_trace_state *trace_state, 1685 int dynamic_fields_size) 1686 { 1687 int entry_size, fields_size = 0; 1688 int ret = 0; 1689 1690 fields_size = trace_state->event->n_u64 * sizeof(u64); 1691 fields_size += dynamic_fields_size; 1692 1693 /* 1694 * Avoid ring buffer recursion detection, as this event 1695 * is being performed within another event. 1696 */ 1697 trace_state->buffer = file->tr->array_buffer.buffer; 1698 ring_buffer_nest_start(trace_state->buffer); 1699 1700 entry_size = sizeof(*trace_state->entry) + fields_size; 1701 trace_state->entry = trace_event_buffer_reserve(&trace_state->fbuffer, 1702 file, 1703 entry_size); 1704 if (!trace_state->entry) { 1705 ring_buffer_nest_end(trace_state->buffer); 1706 ret = -EINVAL; 1707 } 1708 1709 return ret; 1710 } 1711 1712 static inline void 1713 __synth_event_trace_end(struct synth_event_trace_state *trace_state) 1714 { 1715 trace_event_buffer_commit(&trace_state->fbuffer); 1716 1717 ring_buffer_nest_end(trace_state->buffer); 1718 } 1719 1720 /** 1721 * synth_event_trace - Trace a synthetic event 1722 * @file: The trace_event_file representing the synthetic event 1723 * @n_vals: The number of values in vals 1724 * @...: Variable number of args containing the event values 1725 * 1726 * Trace a synthetic event using the values passed in the variable 1727 * argument list. 1728 * 1729 * The argument list should be a list 'n_vals' u64 values. The number 1730 * of vals must match the number of field in the synthetic event, and 1731 * must be in the same order as the synthetic event fields. 1732 * 1733 * All vals should be cast to u64, and string vals are just pointers 1734 * to strings, cast to u64. Strings will be copied into space 1735 * reserved in the event for the string, using these pointers. 1736 * 1737 * Return: 0 on success, err otherwise. 1738 */ 1739 int synth_event_trace(struct trace_event_file *file, unsigned int n_vals, ...) 1740 { 1741 unsigned int i, n_u64, len, data_size = 0; 1742 struct synth_event_trace_state state; 1743 va_list args; 1744 int ret; 1745 1746 ret = __synth_event_trace_init(file, &state); 1747 if (ret) { 1748 if (ret == -ENOENT) 1749 ret = 0; /* just disabled, not really an error */ 1750 return ret; 1751 } 1752 1753 if (state.event->n_dynamic_fields) { 1754 va_start(args, n_vals); 1755 1756 for (i = 0; i < state.event->n_fields; i++) { 1757 u64 val = va_arg(args, u64); 1758 1759 if (state.event->fields[i]->is_string && 1760 state.event->fields[i]->is_dynamic) { 1761 char *str_val = (char *)(long)val; 1762 1763 data_size += strlen(str_val) + 1; 1764 } 1765 } 1766 1767 va_end(args); 1768 } 1769 1770 ret = __synth_event_trace_start(file, &state, data_size); 1771 if (ret) 1772 return ret; 1773 1774 if (n_vals != state.event->n_fields) { 1775 ret = -EINVAL; 1776 goto out; 1777 } 1778 1779 data_size = 0; 1780 1781 va_start(args, n_vals); 1782 for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) { 1783 u64 val; 1784 1785 val = va_arg(args, u64); 1786 1787 if (state.event->fields[i]->is_string) { 1788 char *str_val = (char *)(long)val; 1789 1790 len = trace_string(state.entry, state.event, str_val, 1791 state.event->fields[i]->is_dynamic, 1792 data_size, &n_u64); 1793 data_size += len; /* only dynamic string increments */ 1794 } else { 1795 struct synth_field *field = state.event->fields[i]; 1796 1797 switch (field->size) { 1798 case 1: 1799 state.entry->fields[n_u64].as_u8 = (u8)val; 1800 break; 1801 1802 case 2: 1803 state.entry->fields[n_u64].as_u16 = (u16)val; 1804 break; 1805 1806 case 4: 1807 state.entry->fields[n_u64].as_u32 = (u32)val; 1808 break; 1809 1810 default: 1811 state.entry->fields[n_u64].as_u64 = val; 1812 break; 1813 } 1814 n_u64++; 1815 } 1816 } 1817 va_end(args); 1818 out: 1819 __synth_event_trace_end(&state); 1820 1821 return ret; 1822 } 1823 EXPORT_SYMBOL_GPL(synth_event_trace); 1824 1825 /** 1826 * synth_event_trace_array - Trace a synthetic event from an array 1827 * @file: The trace_event_file representing the synthetic event 1828 * @vals: Array of values 1829 * @n_vals: The number of values in vals 1830 * 1831 * Trace a synthetic event using the values passed in as 'vals'. 1832 * 1833 * The 'vals' array is just an array of 'n_vals' u64. The number of 1834 * vals must match the number of field in the synthetic event, and 1835 * must be in the same order as the synthetic event fields. 1836 * 1837 * All vals should be cast to u64, and string vals are just pointers 1838 * to strings, cast to u64. Strings will be copied into space 1839 * reserved in the event for the string, using these pointers. 1840 * 1841 * Return: 0 on success, err otherwise. 1842 */ 1843 int synth_event_trace_array(struct trace_event_file *file, u64 *vals, 1844 unsigned int n_vals) 1845 { 1846 unsigned int i, n_u64, field_pos, len, data_size = 0; 1847 struct synth_event_trace_state state; 1848 char *str_val; 1849 int ret; 1850 1851 ret = __synth_event_trace_init(file, &state); 1852 if (ret) { 1853 if (ret == -ENOENT) 1854 ret = 0; /* just disabled, not really an error */ 1855 return ret; 1856 } 1857 1858 if (state.event->n_dynamic_fields) { 1859 for (i = 0; i < state.event->n_dynamic_fields; i++) { 1860 field_pos = state.event->dynamic_fields[i]->field_pos; 1861 str_val = (char *)(long)vals[field_pos]; 1862 len = strlen(str_val) + 1; 1863 data_size += len; 1864 } 1865 } 1866 1867 ret = __synth_event_trace_start(file, &state, data_size); 1868 if (ret) 1869 return ret; 1870 1871 if (n_vals != state.event->n_fields) { 1872 ret = -EINVAL; 1873 goto out; 1874 } 1875 1876 data_size = 0; 1877 1878 for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) { 1879 if (state.event->fields[i]->is_string) { 1880 char *str_val = (char *)(long)vals[i]; 1881 1882 len = trace_string(state.entry, state.event, str_val, 1883 state.event->fields[i]->is_dynamic, 1884 data_size, &n_u64); 1885 data_size += len; /* only dynamic string increments */ 1886 } else { 1887 struct synth_field *field = state.event->fields[i]; 1888 u64 val = vals[i]; 1889 1890 switch (field->size) { 1891 case 1: 1892 state.entry->fields[n_u64].as_u8 = (u8)val; 1893 break; 1894 1895 case 2: 1896 state.entry->fields[n_u64].as_u16 = (u16)val; 1897 break; 1898 1899 case 4: 1900 state.entry->fields[n_u64].as_u32 = (u32)val; 1901 break; 1902 1903 default: 1904 state.entry->fields[n_u64].as_u64 = val; 1905 break; 1906 } 1907 n_u64++; 1908 } 1909 } 1910 out: 1911 __synth_event_trace_end(&state); 1912 1913 return ret; 1914 } 1915 EXPORT_SYMBOL_GPL(synth_event_trace_array); 1916 1917 /** 1918 * synth_event_trace_start - Start piecewise synthetic event trace 1919 * @file: The trace_event_file representing the synthetic event 1920 * @trace_state: A pointer to object tracking the piecewise trace state 1921 * 1922 * Start the trace of a synthetic event field-by-field rather than all 1923 * at once. 1924 * 1925 * This function 'opens' an event trace, which means space is reserved 1926 * for the event in the trace buffer, after which the event's 1927 * individual field values can be set through either 1928 * synth_event_add_next_val() or synth_event_add_val(). 1929 * 1930 * A pointer to a trace_state object is passed in, which will keep 1931 * track of the current event trace state until the event trace is 1932 * closed (and the event finally traced) using 1933 * synth_event_trace_end(). 1934 * 1935 * Note that synth_event_trace_end() must be called after all values 1936 * have been added for each event trace, regardless of whether adding 1937 * all field values succeeded or not. 1938 * 1939 * Note also that for a given event trace, all fields must be added 1940 * using either synth_event_add_next_val() or synth_event_add_val() 1941 * but not both together or interleaved. 1942 * 1943 * Return: 0 on success, err otherwise. 1944 */ 1945 int synth_event_trace_start(struct trace_event_file *file, 1946 struct synth_event_trace_state *trace_state) 1947 { 1948 int ret; 1949 1950 if (!trace_state) 1951 return -EINVAL; 1952 1953 ret = __synth_event_trace_init(file, trace_state); 1954 if (ret) { 1955 if (ret == -ENOENT) 1956 ret = 0; /* just disabled, not really an error */ 1957 return ret; 1958 } 1959 1960 if (trace_state->event->n_dynamic_fields) 1961 return -ENOTSUPP; 1962 1963 ret = __synth_event_trace_start(file, trace_state, 0); 1964 1965 return ret; 1966 } 1967 EXPORT_SYMBOL_GPL(synth_event_trace_start); 1968 1969 static int __synth_event_add_val(const char *field_name, u64 val, 1970 struct synth_event_trace_state *trace_state) 1971 { 1972 struct synth_field *field = NULL; 1973 struct synth_trace_event *entry; 1974 struct synth_event *event; 1975 int i, ret = 0; 1976 1977 if (!trace_state) { 1978 ret = -EINVAL; 1979 goto out; 1980 } 1981 1982 /* can't mix add_next_synth_val() with add_synth_val() */ 1983 if (field_name) { 1984 if (trace_state->add_next) { 1985 ret = -EINVAL; 1986 goto out; 1987 } 1988 trace_state->add_name = true; 1989 } else { 1990 if (trace_state->add_name) { 1991 ret = -EINVAL; 1992 goto out; 1993 } 1994 trace_state->add_next = true; 1995 } 1996 1997 if (trace_state->disabled) 1998 goto out; 1999 2000 event = trace_state->event; 2001 if (trace_state->add_name) { 2002 for (i = 0; i < event->n_fields; i++) { 2003 field = event->fields[i]; 2004 if (strcmp(field->name, field_name) == 0) 2005 break; 2006 } 2007 if (!field) { 2008 ret = -EINVAL; 2009 goto out; 2010 } 2011 } else { 2012 if (trace_state->cur_field >= event->n_fields) { 2013 ret = -EINVAL; 2014 goto out; 2015 } 2016 field = event->fields[trace_state->cur_field++]; 2017 } 2018 2019 entry = trace_state->entry; 2020 if (field->is_string) { 2021 char *str_val = (char *)(long)val; 2022 char *str_field; 2023 2024 if (field->is_dynamic) { /* add_val can't do dynamic strings */ 2025 ret = -EINVAL; 2026 goto out; 2027 } 2028 2029 if (!str_val) { 2030 ret = -EINVAL; 2031 goto out; 2032 } 2033 2034 str_field = (char *)&entry->fields[field->offset]; 2035 strscpy(str_field, str_val, STR_VAR_LEN_MAX); 2036 } else { 2037 switch (field->size) { 2038 case 1: 2039 trace_state->entry->fields[field->offset].as_u8 = (u8)val; 2040 break; 2041 2042 case 2: 2043 trace_state->entry->fields[field->offset].as_u16 = (u16)val; 2044 break; 2045 2046 case 4: 2047 trace_state->entry->fields[field->offset].as_u32 = (u32)val; 2048 break; 2049 2050 default: 2051 trace_state->entry->fields[field->offset].as_u64 = val; 2052 break; 2053 } 2054 } 2055 out: 2056 return ret; 2057 } 2058 2059 /** 2060 * synth_event_add_next_val - Add the next field's value to an open synth trace 2061 * @val: The value to set the next field to 2062 * @trace_state: A pointer to object tracking the piecewise trace state 2063 * 2064 * Set the value of the next field in an event that's been opened by 2065 * synth_event_trace_start(). 2066 * 2067 * The val param should be the value cast to u64. If the value points 2068 * to a string, the val param should be a char * cast to u64. 2069 * 2070 * This function assumes all the fields in an event are to be set one 2071 * after another - successive calls to this function are made, one for 2072 * each field, in the order of the fields in the event, until all 2073 * fields have been set. If you'd rather set each field individually 2074 * without regard to ordering, synth_event_add_val() can be used 2075 * instead. 2076 * 2077 * Note however that synth_event_add_next_val() and 2078 * synth_event_add_val() can't be intermixed for a given event trace - 2079 * one or the other but not both can be used at the same time. 2080 * 2081 * Note also that synth_event_trace_end() must be called after all 2082 * values have been added for each event trace, regardless of whether 2083 * adding all field values succeeded or not. 2084 * 2085 * Return: 0 on success, err otherwise. 2086 */ 2087 int synth_event_add_next_val(u64 val, 2088 struct synth_event_trace_state *trace_state) 2089 { 2090 return __synth_event_add_val(NULL, val, trace_state); 2091 } 2092 EXPORT_SYMBOL_GPL(synth_event_add_next_val); 2093 2094 /** 2095 * synth_event_add_val - Add a named field's value to an open synth trace 2096 * @field_name: The name of the synthetic event field value to set 2097 * @val: The value to set the named field to 2098 * @trace_state: A pointer to object tracking the piecewise trace state 2099 * 2100 * Set the value of the named field in an event that's been opened by 2101 * synth_event_trace_start(). 2102 * 2103 * The val param should be the value cast to u64. If the value points 2104 * to a string, the val param should be a char * cast to u64. 2105 * 2106 * This function looks up the field name, and if found, sets the field 2107 * to the specified value. This lookup makes this function more 2108 * expensive than synth_event_add_next_val(), so use that or the 2109 * none-piecewise synth_event_trace() instead if efficiency is more 2110 * important. 2111 * 2112 * Note however that synth_event_add_next_val() and 2113 * synth_event_add_val() can't be intermixed for a given event trace - 2114 * one or the other but not both can be used at the same time. 2115 * 2116 * Note also that synth_event_trace_end() must be called after all 2117 * values have been added for each event trace, regardless of whether 2118 * adding all field values succeeded or not. 2119 * 2120 * Return: 0 on success, err otherwise. 2121 */ 2122 int synth_event_add_val(const char *field_name, u64 val, 2123 struct synth_event_trace_state *trace_state) 2124 { 2125 return __synth_event_add_val(field_name, val, trace_state); 2126 } 2127 EXPORT_SYMBOL_GPL(synth_event_add_val); 2128 2129 /** 2130 * synth_event_trace_end - End piecewise synthetic event trace 2131 * @trace_state: A pointer to object tracking the piecewise trace state 2132 * 2133 * End the trace of a synthetic event opened by 2134 * synth_event_trace__start(). 2135 * 2136 * This function 'closes' an event trace, which basically means that 2137 * it commits the reserved event and cleans up other loose ends. 2138 * 2139 * A pointer to a trace_state object is passed in, which will keep 2140 * track of the current event trace state opened with 2141 * synth_event_trace_start(). 2142 * 2143 * Note that this function must be called after all values have been 2144 * added for each event trace, regardless of whether adding all field 2145 * values succeeded or not. 2146 * 2147 * Return: 0 on success, err otherwise. 2148 */ 2149 int synth_event_trace_end(struct synth_event_trace_state *trace_state) 2150 { 2151 if (!trace_state) 2152 return -EINVAL; 2153 2154 __synth_event_trace_end(trace_state); 2155 2156 return 0; 2157 } 2158 EXPORT_SYMBOL_GPL(synth_event_trace_end); 2159 2160 static int create_synth_event(const char *raw_command) 2161 { 2162 char *fields, *p; 2163 const char *name; 2164 int len, ret = 0; 2165 2166 raw_command = skip_spaces(raw_command); 2167 if (raw_command[0] == '\0') 2168 return ret; 2169 2170 last_cmd_set(raw_command); 2171 2172 name = raw_command; 2173 2174 /* Don't try to process if not our system */ 2175 if (name[0] != 's' || name[1] != ':') 2176 return -ECANCELED; 2177 name += 2; 2178 2179 p = strpbrk(raw_command, " \t"); 2180 if (!p) { 2181 synth_err(SYNTH_ERR_INVALID_CMD, 0); 2182 return -EINVAL; 2183 } 2184 2185 fields = skip_spaces(p); 2186 2187 /* This interface accepts group name prefix */ 2188 if (strchr(name, '/')) { 2189 len = str_has_prefix(name, SYNTH_SYSTEM "/"); 2190 if (len == 0) { 2191 synth_err(SYNTH_ERR_INVALID_DYN_CMD, 0); 2192 return -EINVAL; 2193 } 2194 name += len; 2195 } 2196 2197 len = name - raw_command; 2198 2199 ret = check_command(raw_command + len); 2200 if (ret) { 2201 synth_err(SYNTH_ERR_INVALID_CMD, 0); 2202 return ret; 2203 } 2204 2205 name = kmemdup_nul(raw_command + len, p - raw_command - len, GFP_KERNEL); 2206 if (!name) 2207 return -ENOMEM; 2208 2209 ret = __create_synth_event(name, fields); 2210 2211 kfree(name); 2212 2213 return ret; 2214 } 2215 2216 static int synth_event_release(struct dyn_event *ev) 2217 { 2218 struct synth_event *event = to_synth_event(ev); 2219 int ret; 2220 2221 if (event->ref) 2222 return -EBUSY; 2223 2224 if (trace_event_dyn_busy(&event->call)) 2225 return -EBUSY; 2226 2227 ret = unregister_synth_event(event); 2228 if (ret) 2229 return ret; 2230 2231 dyn_event_remove(ev); 2232 free_synth_event(event); 2233 return 0; 2234 } 2235 2236 static int __synth_event_show(struct seq_file *m, struct synth_event *event) 2237 { 2238 struct synth_field *field; 2239 unsigned int i; 2240 char *type, *t; 2241 2242 seq_printf(m, "%s\t", event->name); 2243 2244 for (i = 0; i < event->n_fields; i++) { 2245 field = event->fields[i]; 2246 2247 type = field->type; 2248 t = strstr(type, "__data_loc"); 2249 if (t) { /* __data_loc belongs in format but not event desc */ 2250 t += sizeof("__data_loc"); 2251 type = t; 2252 } 2253 2254 /* parameter values */ 2255 seq_printf(m, "%s %s%s", type, field->name, 2256 i == event->n_fields - 1 ? "" : "; "); 2257 } 2258 2259 seq_putc(m, '\n'); 2260 2261 return 0; 2262 } 2263 2264 static int synth_event_show(struct seq_file *m, struct dyn_event *ev) 2265 { 2266 struct synth_event *event = to_synth_event(ev); 2267 2268 seq_printf(m, "s:%s/", event->class.system); 2269 2270 return __synth_event_show(m, event); 2271 } 2272 2273 static int synth_events_seq_show(struct seq_file *m, void *v) 2274 { 2275 struct dyn_event *ev = v; 2276 2277 if (!is_synth_event(ev)) 2278 return 0; 2279 2280 return __synth_event_show(m, to_synth_event(ev)); 2281 } 2282 2283 static const struct seq_operations synth_events_seq_op = { 2284 .start = dyn_event_seq_start, 2285 .next = dyn_event_seq_next, 2286 .stop = dyn_event_seq_stop, 2287 .show = synth_events_seq_show, 2288 }; 2289 2290 static int synth_events_open(struct inode *inode, struct file *file) 2291 { 2292 int ret; 2293 2294 ret = security_locked_down(LOCKDOWN_TRACEFS); 2295 if (ret) 2296 return ret; 2297 2298 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { 2299 ret = dyn_events_release_all(&synth_event_ops); 2300 if (ret < 0) 2301 return ret; 2302 } 2303 2304 return seq_open(file, &synth_events_seq_op); 2305 } 2306 2307 static ssize_t synth_events_write(struct file *file, 2308 const char __user *buffer, 2309 size_t count, loff_t *ppos) 2310 { 2311 return trace_parse_run_command(file, buffer, count, ppos, 2312 create_or_delete_synth_event); 2313 } 2314 2315 static const struct file_operations synth_events_fops = { 2316 .open = synth_events_open, 2317 .write = synth_events_write, 2318 .read = seq_read, 2319 .llseek = seq_lseek, 2320 .release = seq_release, 2321 }; 2322 2323 /* 2324 * Register dynevent at core_initcall. This allows kernel to setup kprobe 2325 * events in postcore_initcall without tracefs. 2326 */ 2327 static __init int trace_events_synth_init_early(void) 2328 { 2329 int err = 0; 2330 2331 err = dyn_event_register(&synth_event_ops); 2332 if (err) 2333 pr_warn("Could not register synth_event_ops\n"); 2334 2335 return err; 2336 } 2337 core_initcall(trace_events_synth_init_early); 2338 2339 static __init int trace_events_synth_init(void) 2340 { 2341 struct dentry *entry = NULL; 2342 int err = 0; 2343 err = tracing_init_dentry(); 2344 if (err) 2345 goto err; 2346 2347 entry = tracefs_create_file("synthetic_events", TRACE_MODE_WRITE, 2348 NULL, NULL, &synth_events_fops); 2349 if (!entry) { 2350 err = -ENODEV; 2351 goto err; 2352 } 2353 2354 return err; 2355 err: 2356 pr_warn("Could not create tracefs 'synthetic_events' entry\n"); 2357 2358 return err; 2359 } 2360 2361 fs_initcall(trace_events_synth_init); 2362