1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * trace_events_hist - trace event hist triggers 4 * 5 * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com> 6 */ 7 8 #include <linux/module.h> 9 #include <linux/kallsyms.h> 10 #include <linux/security.h> 11 #include <linux/mutex.h> 12 #include <linux/slab.h> 13 #include <linux/stacktrace.h> 14 #include <linux/rculist.h> 15 #include <linux/tracefs.h> 16 17 /* for gfp flag names */ 18 #include <linux/trace_events.h> 19 #include <trace/events/mmflags.h> 20 21 #include "tracing_map.h" 22 #include "trace.h" 23 #include "trace_dynevent.h" 24 25 #define SYNTH_SYSTEM "synthetic" 26 #define SYNTH_FIELDS_MAX 32 27 28 #define STR_VAR_LEN_MAX 32 /* must be multiple of sizeof(u64) */ 29 30 #define ERRORS \ 31 C(NONE, "No error"), \ 32 C(DUPLICATE_VAR, "Variable already defined"), \ 33 C(VAR_NOT_UNIQUE, "Variable name not unique, need to use fully qualified name (subsys.event.var) for variable"), \ 34 C(TOO_MANY_VARS, "Too many variables defined"), \ 35 C(MALFORMED_ASSIGNMENT, "Malformed assignment"), \ 36 C(NAMED_MISMATCH, "Named hist trigger doesn't match existing named trigger (includes variables)"), \ 37 C(TRIGGER_EEXIST, "Hist trigger already exists"), \ 38 C(TRIGGER_ENOENT_CLEAR, "Can't clear or continue a nonexistent hist trigger"), \ 39 C(SET_CLOCK_FAIL, "Couldn't set trace_clock"), \ 40 C(BAD_FIELD_MODIFIER, "Invalid field modifier"), \ 41 C(TOO_MANY_SUBEXPR, "Too many subexpressions (3 max)"), \ 42 C(TIMESTAMP_MISMATCH, "Timestamp units in expression don't match"), \ 43 C(TOO_MANY_FIELD_VARS, "Too many field variables defined"), \ 44 C(EVENT_FILE_NOT_FOUND, "Event file not found"), \ 45 C(HIST_NOT_FOUND, "Matching event histogram not found"), \ 46 C(HIST_CREATE_FAIL, "Couldn't create histogram for field"), \ 47 C(SYNTH_VAR_NOT_FOUND, "Couldn't find synthetic variable"), \ 48 C(SYNTH_EVENT_NOT_FOUND,"Couldn't find synthetic event"), \ 49 C(SYNTH_TYPE_MISMATCH, "Param type doesn't match synthetic event field type"), \ 50 C(SYNTH_COUNT_MISMATCH, "Param count doesn't match synthetic event field count"), \ 51 C(FIELD_VAR_PARSE_FAIL, "Couldn't parse field variable"), \ 52 C(VAR_CREATE_FIND_FAIL, "Couldn't create or find variable"), \ 53 C(ONX_NOT_VAR, "For onmax(x) or onchange(x), x must be a variable"), \ 54 C(ONX_VAR_NOT_FOUND, "Couldn't find onmax or onchange variable"), \ 55 C(ONX_VAR_CREATE_FAIL, "Couldn't create onmax or onchange variable"), \ 56 C(FIELD_VAR_CREATE_FAIL,"Couldn't create field variable"), \ 57 C(TOO_MANY_PARAMS, "Too many action params"), \ 58 C(PARAM_NOT_FOUND, "Couldn't find param"), \ 59 C(INVALID_PARAM, "Invalid action param"), \ 60 C(ACTION_NOT_FOUND, "No action found"), \ 61 C(NO_SAVE_PARAMS, "No params found for save()"), \ 62 C(TOO_MANY_SAVE_ACTIONS,"Can't have more than one save() action per hist"), \ 63 C(ACTION_MISMATCH, "Handler doesn't support action"), \ 64 C(NO_CLOSING_PAREN, "No closing paren found"), \ 65 C(SUBSYS_NOT_FOUND, "Missing subsystem"), \ 66 C(INVALID_SUBSYS_EVENT, "Invalid subsystem or event name"), \ 67 C(INVALID_REF_KEY, "Using variable references in keys not supported"), \ 68 C(VAR_NOT_FOUND, "Couldn't find variable"), \ 69 C(FIELD_NOT_FOUND, "Couldn't find field"), 70 71 #undef C 72 #define C(a, b) HIST_ERR_##a 73 74 enum { ERRORS }; 75 76 #undef C 77 #define C(a, b) b 78 79 static const char *err_text[] = { ERRORS }; 80 81 struct hist_field; 82 83 typedef u64 (*hist_field_fn_t) (struct hist_field *field, 84 struct tracing_map_elt *elt, 85 struct ring_buffer_event *rbe, 86 void *event); 87 88 #define HIST_FIELD_OPERANDS_MAX 2 89 #define HIST_FIELDS_MAX (TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX) 90 #define HIST_ACTIONS_MAX 8 91 92 enum field_op_id { 93 FIELD_OP_NONE, 94 FIELD_OP_PLUS, 95 FIELD_OP_MINUS, 96 FIELD_OP_UNARY_MINUS, 97 }; 98 99 /* 100 * A hist_var (histogram variable) contains variable information for 101 * hist_fields having the HIST_FIELD_FL_VAR or HIST_FIELD_FL_VAR_REF 102 * flag set. A hist_var has a variable name e.g. ts0, and is 103 * associated with a given histogram trigger, as specified by 104 * hist_data. The hist_var idx is the unique index assigned to the 105 * variable by the hist trigger's tracing_map. The idx is what is 106 * used to set a variable's value and, by a variable reference, to 107 * retrieve it. 108 */ 109 struct hist_var { 110 char *name; 111 struct hist_trigger_data *hist_data; 112 unsigned int idx; 113 }; 114 115 struct hist_field { 116 struct ftrace_event_field *field; 117 unsigned long flags; 118 hist_field_fn_t fn; 119 unsigned int size; 120 unsigned int offset; 121 unsigned int is_signed; 122 const char *type; 123 struct hist_field *operands[HIST_FIELD_OPERANDS_MAX]; 124 struct hist_trigger_data *hist_data; 125 126 /* 127 * Variable fields contain variable-specific info in var. 128 */ 129 struct hist_var var; 130 enum field_op_id operator; 131 char *system; 132 char *event_name; 133 134 /* 135 * The name field is used for EXPR and VAR_REF fields. VAR 136 * fields contain the variable name in var.name. 137 */ 138 char *name; 139 140 /* 141 * When a histogram trigger is hit, if it has any references 142 * to variables, the values of those variables are collected 143 * into a var_ref_vals array by resolve_var_refs(). The 144 * current value of each variable is read from the tracing_map 145 * using the hist field's hist_var.idx and entered into the 146 * var_ref_idx entry i.e. var_ref_vals[var_ref_idx]. 147 */ 148 unsigned int var_ref_idx; 149 bool read_once; 150 }; 151 152 static u64 hist_field_none(struct hist_field *field, 153 struct tracing_map_elt *elt, 154 struct ring_buffer_event *rbe, 155 void *event) 156 { 157 return 0; 158 } 159 160 static u64 hist_field_counter(struct hist_field *field, 161 struct tracing_map_elt *elt, 162 struct ring_buffer_event *rbe, 163 void *event) 164 { 165 return 1; 166 } 167 168 static u64 hist_field_string(struct hist_field *hist_field, 169 struct tracing_map_elt *elt, 170 struct ring_buffer_event *rbe, 171 void *event) 172 { 173 char *addr = (char *)(event + hist_field->field->offset); 174 175 return (u64)(unsigned long)addr; 176 } 177 178 static u64 hist_field_dynstring(struct hist_field *hist_field, 179 struct tracing_map_elt *elt, 180 struct ring_buffer_event *rbe, 181 void *event) 182 { 183 u32 str_item = *(u32 *)(event + hist_field->field->offset); 184 int str_loc = str_item & 0xffff; 185 char *addr = (char *)(event + str_loc); 186 187 return (u64)(unsigned long)addr; 188 } 189 190 static u64 hist_field_pstring(struct hist_field *hist_field, 191 struct tracing_map_elt *elt, 192 struct ring_buffer_event *rbe, 193 void *event) 194 { 195 char **addr = (char **)(event + hist_field->field->offset); 196 197 return (u64)(unsigned long)*addr; 198 } 199 200 static u64 hist_field_log2(struct hist_field *hist_field, 201 struct tracing_map_elt *elt, 202 struct ring_buffer_event *rbe, 203 void *event) 204 { 205 struct hist_field *operand = hist_field->operands[0]; 206 207 u64 val = operand->fn(operand, elt, rbe, event); 208 209 return (u64) ilog2(roundup_pow_of_two(val)); 210 } 211 212 static u64 hist_field_plus(struct hist_field *hist_field, 213 struct tracing_map_elt *elt, 214 struct ring_buffer_event *rbe, 215 void *event) 216 { 217 struct hist_field *operand1 = hist_field->operands[0]; 218 struct hist_field *operand2 = hist_field->operands[1]; 219 220 u64 val1 = operand1->fn(operand1, elt, rbe, event); 221 u64 val2 = operand2->fn(operand2, elt, rbe, event); 222 223 return val1 + val2; 224 } 225 226 static u64 hist_field_minus(struct hist_field *hist_field, 227 struct tracing_map_elt *elt, 228 struct ring_buffer_event *rbe, 229 void *event) 230 { 231 struct hist_field *operand1 = hist_field->operands[0]; 232 struct hist_field *operand2 = hist_field->operands[1]; 233 234 u64 val1 = operand1->fn(operand1, elt, rbe, event); 235 u64 val2 = operand2->fn(operand2, elt, rbe, event); 236 237 return val1 - val2; 238 } 239 240 static u64 hist_field_unary_minus(struct hist_field *hist_field, 241 struct tracing_map_elt *elt, 242 struct ring_buffer_event *rbe, 243 void *event) 244 { 245 struct hist_field *operand = hist_field->operands[0]; 246 247 s64 sval = (s64)operand->fn(operand, elt, rbe, event); 248 u64 val = (u64)-sval; 249 250 return val; 251 } 252 253 #define DEFINE_HIST_FIELD_FN(type) \ 254 static u64 hist_field_##type(struct hist_field *hist_field, \ 255 struct tracing_map_elt *elt, \ 256 struct ring_buffer_event *rbe, \ 257 void *event) \ 258 { \ 259 type *addr = (type *)(event + hist_field->field->offset); \ 260 \ 261 return (u64)(unsigned long)*addr; \ 262 } 263 264 DEFINE_HIST_FIELD_FN(s64); 265 DEFINE_HIST_FIELD_FN(u64); 266 DEFINE_HIST_FIELD_FN(s32); 267 DEFINE_HIST_FIELD_FN(u32); 268 DEFINE_HIST_FIELD_FN(s16); 269 DEFINE_HIST_FIELD_FN(u16); 270 DEFINE_HIST_FIELD_FN(s8); 271 DEFINE_HIST_FIELD_FN(u8); 272 273 #define for_each_hist_field(i, hist_data) \ 274 for ((i) = 0; (i) < (hist_data)->n_fields; (i)++) 275 276 #define for_each_hist_val_field(i, hist_data) \ 277 for ((i) = 0; (i) < (hist_data)->n_vals; (i)++) 278 279 #define for_each_hist_key_field(i, hist_data) \ 280 for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++) 281 282 #define HIST_STACKTRACE_DEPTH 16 283 #define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long)) 284 #define HIST_STACKTRACE_SKIP 5 285 286 #define HITCOUNT_IDX 0 287 #define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE) 288 289 enum hist_field_flags { 290 HIST_FIELD_FL_HITCOUNT = 1 << 0, 291 HIST_FIELD_FL_KEY = 1 << 1, 292 HIST_FIELD_FL_STRING = 1 << 2, 293 HIST_FIELD_FL_HEX = 1 << 3, 294 HIST_FIELD_FL_SYM = 1 << 4, 295 HIST_FIELD_FL_SYM_OFFSET = 1 << 5, 296 HIST_FIELD_FL_EXECNAME = 1 << 6, 297 HIST_FIELD_FL_SYSCALL = 1 << 7, 298 HIST_FIELD_FL_STACKTRACE = 1 << 8, 299 HIST_FIELD_FL_LOG2 = 1 << 9, 300 HIST_FIELD_FL_TIMESTAMP = 1 << 10, 301 HIST_FIELD_FL_TIMESTAMP_USECS = 1 << 11, 302 HIST_FIELD_FL_VAR = 1 << 12, 303 HIST_FIELD_FL_EXPR = 1 << 13, 304 HIST_FIELD_FL_VAR_REF = 1 << 14, 305 HIST_FIELD_FL_CPU = 1 << 15, 306 HIST_FIELD_FL_ALIAS = 1 << 16, 307 }; 308 309 struct var_defs { 310 unsigned int n_vars; 311 char *name[TRACING_MAP_VARS_MAX]; 312 char *expr[TRACING_MAP_VARS_MAX]; 313 }; 314 315 struct hist_trigger_attrs { 316 char *keys_str; 317 char *vals_str; 318 char *sort_key_str; 319 char *name; 320 char *clock; 321 bool pause; 322 bool cont; 323 bool clear; 324 bool ts_in_usecs; 325 unsigned int map_bits; 326 327 char *assignment_str[TRACING_MAP_VARS_MAX]; 328 unsigned int n_assignments; 329 330 char *action_str[HIST_ACTIONS_MAX]; 331 unsigned int n_actions; 332 333 struct var_defs var_defs; 334 }; 335 336 struct field_var { 337 struct hist_field *var; 338 struct hist_field *val; 339 }; 340 341 struct field_var_hist { 342 struct hist_trigger_data *hist_data; 343 char *cmd; 344 }; 345 346 struct hist_trigger_data { 347 struct hist_field *fields[HIST_FIELDS_MAX]; 348 unsigned int n_vals; 349 unsigned int n_keys; 350 unsigned int n_fields; 351 unsigned int n_vars; 352 unsigned int key_size; 353 struct tracing_map_sort_key sort_keys[TRACING_MAP_SORT_KEYS_MAX]; 354 unsigned int n_sort_keys; 355 struct trace_event_file *event_file; 356 struct hist_trigger_attrs *attrs; 357 struct tracing_map *map; 358 bool enable_timestamps; 359 bool remove; 360 struct hist_field *var_refs[TRACING_MAP_VARS_MAX]; 361 unsigned int n_var_refs; 362 363 struct action_data *actions[HIST_ACTIONS_MAX]; 364 unsigned int n_actions; 365 366 struct field_var *field_vars[SYNTH_FIELDS_MAX]; 367 unsigned int n_field_vars; 368 unsigned int n_field_var_str; 369 struct field_var_hist *field_var_hists[SYNTH_FIELDS_MAX]; 370 unsigned int n_field_var_hists; 371 372 struct field_var *save_vars[SYNTH_FIELDS_MAX]; 373 unsigned int n_save_vars; 374 unsigned int n_save_var_str; 375 }; 376 377 static int synth_event_create(int argc, const char **argv); 378 static int synth_event_show(struct seq_file *m, struct dyn_event *ev); 379 static int synth_event_release(struct dyn_event *ev); 380 static bool synth_event_is_busy(struct dyn_event *ev); 381 static bool synth_event_match(const char *system, const char *event, 382 int argc, const char **argv, struct dyn_event *ev); 383 384 static struct dyn_event_operations synth_event_ops = { 385 .create = synth_event_create, 386 .show = synth_event_show, 387 .is_busy = synth_event_is_busy, 388 .free = synth_event_release, 389 .match = synth_event_match, 390 }; 391 392 struct synth_field { 393 char *type; 394 char *name; 395 size_t size; 396 bool is_signed; 397 bool is_string; 398 }; 399 400 struct synth_event { 401 struct dyn_event devent; 402 int ref; 403 char *name; 404 struct synth_field **fields; 405 unsigned int n_fields; 406 unsigned int n_u64; 407 struct trace_event_class class; 408 struct trace_event_call call; 409 struct tracepoint *tp; 410 }; 411 412 static bool is_synth_event(struct dyn_event *ev) 413 { 414 return ev->ops == &synth_event_ops; 415 } 416 417 static struct synth_event *to_synth_event(struct dyn_event *ev) 418 { 419 return container_of(ev, struct synth_event, devent); 420 } 421 422 static bool synth_event_is_busy(struct dyn_event *ev) 423 { 424 struct synth_event *event = to_synth_event(ev); 425 426 return event->ref != 0; 427 } 428 429 static bool synth_event_match(const char *system, const char *event, 430 int argc, const char **argv, struct dyn_event *ev) 431 { 432 struct synth_event *sev = to_synth_event(ev); 433 434 return strcmp(sev->name, event) == 0 && 435 (!system || strcmp(system, SYNTH_SYSTEM) == 0); 436 } 437 438 struct action_data; 439 440 typedef void (*action_fn_t) (struct hist_trigger_data *hist_data, 441 struct tracing_map_elt *elt, void *rec, 442 struct ring_buffer_event *rbe, void *key, 443 struct action_data *data, u64 *var_ref_vals); 444 445 typedef bool (*check_track_val_fn_t) (u64 track_val, u64 var_val); 446 447 enum handler_id { 448 HANDLER_ONMATCH = 1, 449 HANDLER_ONMAX, 450 HANDLER_ONCHANGE, 451 }; 452 453 enum action_id { 454 ACTION_SAVE = 1, 455 ACTION_TRACE, 456 ACTION_SNAPSHOT, 457 }; 458 459 struct action_data { 460 enum handler_id handler; 461 enum action_id action; 462 char *action_name; 463 action_fn_t fn; 464 465 unsigned int n_params; 466 char *params[SYNTH_FIELDS_MAX]; 467 468 /* 469 * When a histogram trigger is hit, the values of any 470 * references to variables, including variables being passed 471 * as parameters to synthetic events, are collected into a 472 * var_ref_vals array. This var_ref_idx is the index of the 473 * first param in the array to be passed to the synthetic 474 * event invocation. 475 */ 476 unsigned int var_ref_idx; 477 struct synth_event *synth_event; 478 bool use_trace_keyword; 479 char *synth_event_name; 480 481 union { 482 struct { 483 char *event; 484 char *event_system; 485 } match_data; 486 487 struct { 488 /* 489 * var_str contains the $-unstripped variable 490 * name referenced by var_ref, and used when 491 * printing the action. Because var_ref 492 * creation is deferred to create_actions(), 493 * we need a per-action way to save it until 494 * then, thus var_str. 495 */ 496 char *var_str; 497 498 /* 499 * var_ref refers to the variable being 500 * tracked e.g onmax($var). 501 */ 502 struct hist_field *var_ref; 503 504 /* 505 * track_var contains the 'invisible' tracking 506 * variable created to keep the current 507 * e.g. max value. 508 */ 509 struct hist_field *track_var; 510 511 check_track_val_fn_t check_val; 512 action_fn_t save_data; 513 } track_data; 514 }; 515 }; 516 517 struct track_data { 518 u64 track_val; 519 bool updated; 520 521 unsigned int key_len; 522 void *key; 523 struct tracing_map_elt elt; 524 525 struct action_data *action_data; 526 struct hist_trigger_data *hist_data; 527 }; 528 529 struct hist_elt_data { 530 char *comm; 531 u64 *var_ref_vals; 532 char *field_var_str[SYNTH_FIELDS_MAX]; 533 }; 534 535 struct snapshot_context { 536 struct tracing_map_elt *elt; 537 void *key; 538 }; 539 540 static void track_data_free(struct track_data *track_data) 541 { 542 struct hist_elt_data *elt_data; 543 544 if (!track_data) 545 return; 546 547 kfree(track_data->key); 548 549 elt_data = track_data->elt.private_data; 550 if (elt_data) { 551 kfree(elt_data->comm); 552 kfree(elt_data); 553 } 554 555 kfree(track_data); 556 } 557 558 static struct track_data *track_data_alloc(unsigned int key_len, 559 struct action_data *action_data, 560 struct hist_trigger_data *hist_data) 561 { 562 struct track_data *data = kzalloc(sizeof(*data), GFP_KERNEL); 563 struct hist_elt_data *elt_data; 564 565 if (!data) 566 return ERR_PTR(-ENOMEM); 567 568 data->key = kzalloc(key_len, GFP_KERNEL); 569 if (!data->key) { 570 track_data_free(data); 571 return ERR_PTR(-ENOMEM); 572 } 573 574 data->key_len = key_len; 575 data->action_data = action_data; 576 data->hist_data = hist_data; 577 578 elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL); 579 if (!elt_data) { 580 track_data_free(data); 581 return ERR_PTR(-ENOMEM); 582 } 583 data->elt.private_data = elt_data; 584 585 elt_data->comm = kzalloc(TASK_COMM_LEN, GFP_KERNEL); 586 if (!elt_data->comm) { 587 track_data_free(data); 588 return ERR_PTR(-ENOMEM); 589 } 590 591 return data; 592 } 593 594 static char last_cmd[MAX_FILTER_STR_VAL]; 595 static char last_cmd_loc[MAX_FILTER_STR_VAL]; 596 597 static int errpos(char *str) 598 { 599 return err_pos(last_cmd, str); 600 } 601 602 static void last_cmd_set(struct trace_event_file *file, char *str) 603 { 604 const char *system = NULL, *name = NULL; 605 struct trace_event_call *call; 606 607 if (!str) 608 return; 609 610 strncpy(last_cmd, str, MAX_FILTER_STR_VAL - 1); 611 612 if (file) { 613 call = file->event_call; 614 615 system = call->class->system; 616 if (system) { 617 name = trace_event_name(call); 618 if (!name) 619 system = NULL; 620 } 621 } 622 623 if (system) 624 snprintf(last_cmd_loc, MAX_FILTER_STR_VAL, "hist:%s:%s", system, name); 625 } 626 627 static void hist_err(struct trace_array *tr, u8 err_type, u8 err_pos) 628 { 629 tracing_log_err(tr, last_cmd_loc, last_cmd, err_text, 630 err_type, err_pos); 631 } 632 633 static void hist_err_clear(void) 634 { 635 last_cmd[0] = '\0'; 636 last_cmd_loc[0] = '\0'; 637 } 638 639 struct synth_trace_event { 640 struct trace_entry ent; 641 u64 fields[]; 642 }; 643 644 static int synth_event_define_fields(struct trace_event_call *call) 645 { 646 struct synth_trace_event trace; 647 int offset = offsetof(typeof(trace), fields); 648 struct synth_event *event = call->data; 649 unsigned int i, size, n_u64; 650 char *name, *type; 651 bool is_signed; 652 int ret = 0; 653 654 for (i = 0, n_u64 = 0; i < event->n_fields; i++) { 655 size = event->fields[i]->size; 656 is_signed = event->fields[i]->is_signed; 657 type = event->fields[i]->type; 658 name = event->fields[i]->name; 659 ret = trace_define_field(call, type, name, offset, size, 660 is_signed, FILTER_OTHER); 661 if (ret) 662 break; 663 664 if (event->fields[i]->is_string) { 665 offset += STR_VAR_LEN_MAX; 666 n_u64 += STR_VAR_LEN_MAX / sizeof(u64); 667 } else { 668 offset += sizeof(u64); 669 n_u64++; 670 } 671 } 672 673 event->n_u64 = n_u64; 674 675 return ret; 676 } 677 678 static bool synth_field_signed(char *type) 679 { 680 if (str_has_prefix(type, "u")) 681 return false; 682 if (strcmp(type, "gfp_t") == 0) 683 return false; 684 685 return true; 686 } 687 688 static int synth_field_is_string(char *type) 689 { 690 if (strstr(type, "char[") != NULL) 691 return true; 692 693 return false; 694 } 695 696 static int synth_field_string_size(char *type) 697 { 698 char buf[4], *end, *start; 699 unsigned int len; 700 int size, err; 701 702 start = strstr(type, "char["); 703 if (start == NULL) 704 return -EINVAL; 705 start += sizeof("char[") - 1; 706 707 end = strchr(type, ']'); 708 if (!end || end < start) 709 return -EINVAL; 710 711 len = end - start; 712 if (len > 3) 713 return -EINVAL; 714 715 strncpy(buf, start, len); 716 buf[len] = '\0'; 717 718 err = kstrtouint(buf, 0, &size); 719 if (err) 720 return err; 721 722 if (size > STR_VAR_LEN_MAX) 723 return -EINVAL; 724 725 return size; 726 } 727 728 static int synth_field_size(char *type) 729 { 730 int size = 0; 731 732 if (strcmp(type, "s64") == 0) 733 size = sizeof(s64); 734 else if (strcmp(type, "u64") == 0) 735 size = sizeof(u64); 736 else if (strcmp(type, "s32") == 0) 737 size = sizeof(s32); 738 else if (strcmp(type, "u32") == 0) 739 size = sizeof(u32); 740 else if (strcmp(type, "s16") == 0) 741 size = sizeof(s16); 742 else if (strcmp(type, "u16") == 0) 743 size = sizeof(u16); 744 else if (strcmp(type, "s8") == 0) 745 size = sizeof(s8); 746 else if (strcmp(type, "u8") == 0) 747 size = sizeof(u8); 748 else if (strcmp(type, "char") == 0) 749 size = sizeof(char); 750 else if (strcmp(type, "unsigned char") == 0) 751 size = sizeof(unsigned char); 752 else if (strcmp(type, "int") == 0) 753 size = sizeof(int); 754 else if (strcmp(type, "unsigned int") == 0) 755 size = sizeof(unsigned int); 756 else if (strcmp(type, "long") == 0) 757 size = sizeof(long); 758 else if (strcmp(type, "unsigned long") == 0) 759 size = sizeof(unsigned long); 760 else if (strcmp(type, "pid_t") == 0) 761 size = sizeof(pid_t); 762 else if (strcmp(type, "gfp_t") == 0) 763 size = sizeof(gfp_t); 764 else if (synth_field_is_string(type)) 765 size = synth_field_string_size(type); 766 767 return size; 768 } 769 770 static const char *synth_field_fmt(char *type) 771 { 772 const char *fmt = "%llu"; 773 774 if (strcmp(type, "s64") == 0) 775 fmt = "%lld"; 776 else if (strcmp(type, "u64") == 0) 777 fmt = "%llu"; 778 else if (strcmp(type, "s32") == 0) 779 fmt = "%d"; 780 else if (strcmp(type, "u32") == 0) 781 fmt = "%u"; 782 else if (strcmp(type, "s16") == 0) 783 fmt = "%d"; 784 else if (strcmp(type, "u16") == 0) 785 fmt = "%u"; 786 else if (strcmp(type, "s8") == 0) 787 fmt = "%d"; 788 else if (strcmp(type, "u8") == 0) 789 fmt = "%u"; 790 else if (strcmp(type, "char") == 0) 791 fmt = "%d"; 792 else if (strcmp(type, "unsigned char") == 0) 793 fmt = "%u"; 794 else if (strcmp(type, "int") == 0) 795 fmt = "%d"; 796 else if (strcmp(type, "unsigned int") == 0) 797 fmt = "%u"; 798 else if (strcmp(type, "long") == 0) 799 fmt = "%ld"; 800 else if (strcmp(type, "unsigned long") == 0) 801 fmt = "%lu"; 802 else if (strcmp(type, "pid_t") == 0) 803 fmt = "%d"; 804 else if (strcmp(type, "gfp_t") == 0) 805 fmt = "%x"; 806 else if (synth_field_is_string(type)) 807 fmt = "%s"; 808 809 return fmt; 810 } 811 812 static enum print_line_t print_synth_event(struct trace_iterator *iter, 813 int flags, 814 struct trace_event *event) 815 { 816 struct trace_array *tr = iter->tr; 817 struct trace_seq *s = &iter->seq; 818 struct synth_trace_event *entry; 819 struct synth_event *se; 820 unsigned int i, n_u64; 821 char print_fmt[32]; 822 const char *fmt; 823 824 entry = (struct synth_trace_event *)iter->ent; 825 se = container_of(event, struct synth_event, call.event); 826 827 trace_seq_printf(s, "%s: ", se->name); 828 829 for (i = 0, n_u64 = 0; i < se->n_fields; i++) { 830 if (trace_seq_has_overflowed(s)) 831 goto end; 832 833 fmt = synth_field_fmt(se->fields[i]->type); 834 835 /* parameter types */ 836 if (tr->trace_flags & TRACE_ITER_VERBOSE) 837 trace_seq_printf(s, "%s ", fmt); 838 839 snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt); 840 841 /* parameter values */ 842 if (se->fields[i]->is_string) { 843 trace_seq_printf(s, print_fmt, se->fields[i]->name, 844 (char *)&entry->fields[n_u64], 845 i == se->n_fields - 1 ? "" : " "); 846 n_u64 += STR_VAR_LEN_MAX / sizeof(u64); 847 } else { 848 struct trace_print_flags __flags[] = { 849 __def_gfpflag_names, {-1, NULL} }; 850 851 trace_seq_printf(s, print_fmt, se->fields[i]->name, 852 entry->fields[n_u64], 853 i == se->n_fields - 1 ? "" : " "); 854 855 if (strcmp(se->fields[i]->type, "gfp_t") == 0) { 856 trace_seq_puts(s, " ("); 857 trace_print_flags_seq(s, "|", 858 entry->fields[n_u64], 859 __flags); 860 trace_seq_putc(s, ')'); 861 } 862 n_u64++; 863 } 864 } 865 end: 866 trace_seq_putc(s, '\n'); 867 868 return trace_handle_return(s); 869 } 870 871 static struct trace_event_functions synth_event_funcs = { 872 .trace = print_synth_event 873 }; 874 875 static notrace void trace_event_raw_event_synth(void *__data, 876 u64 *var_ref_vals, 877 unsigned int var_ref_idx) 878 { 879 struct trace_event_file *trace_file = __data; 880 struct synth_trace_event *entry; 881 struct trace_event_buffer fbuffer; 882 struct ring_buffer *buffer; 883 struct synth_event *event; 884 unsigned int i, n_u64; 885 int fields_size = 0; 886 887 event = trace_file->event_call->data; 888 889 if (trace_trigger_soft_disabled(trace_file)) 890 return; 891 892 fields_size = event->n_u64 * sizeof(u64); 893 894 /* 895 * Avoid ring buffer recursion detection, as this event 896 * is being performed within another event. 897 */ 898 buffer = trace_file->tr->trace_buffer.buffer; 899 ring_buffer_nest_start(buffer); 900 901 entry = trace_event_buffer_reserve(&fbuffer, trace_file, 902 sizeof(*entry) + fields_size); 903 if (!entry) 904 goto out; 905 906 for (i = 0, n_u64 = 0; i < event->n_fields; i++) { 907 if (event->fields[i]->is_string) { 908 char *str_val = (char *)(long)var_ref_vals[var_ref_idx + i]; 909 char *str_field = (char *)&entry->fields[n_u64]; 910 911 strscpy(str_field, str_val, STR_VAR_LEN_MAX); 912 n_u64 += STR_VAR_LEN_MAX / sizeof(u64); 913 } else { 914 struct synth_field *field = event->fields[i]; 915 u64 val = var_ref_vals[var_ref_idx + i]; 916 917 switch (field->size) { 918 case 1: 919 *(u8 *)&entry->fields[n_u64] = (u8)val; 920 break; 921 922 case 2: 923 *(u16 *)&entry->fields[n_u64] = (u16)val; 924 break; 925 926 case 4: 927 *(u32 *)&entry->fields[n_u64] = (u32)val; 928 break; 929 930 default: 931 entry->fields[n_u64] = val; 932 break; 933 } 934 n_u64++; 935 } 936 } 937 938 trace_event_buffer_commit(&fbuffer); 939 out: 940 ring_buffer_nest_end(buffer); 941 } 942 943 static void free_synth_event_print_fmt(struct trace_event_call *call) 944 { 945 if (call) { 946 kfree(call->print_fmt); 947 call->print_fmt = NULL; 948 } 949 } 950 951 static int __set_synth_event_print_fmt(struct synth_event *event, 952 char *buf, int len) 953 { 954 const char *fmt; 955 int pos = 0; 956 int i; 957 958 /* When len=0, we just calculate the needed length */ 959 #define LEN_OR_ZERO (len ? len - pos : 0) 960 961 pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); 962 for (i = 0; i < event->n_fields; i++) { 963 fmt = synth_field_fmt(event->fields[i]->type); 964 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s%s", 965 event->fields[i]->name, fmt, 966 i == event->n_fields - 1 ? "" : ", "); 967 } 968 pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); 969 970 for (i = 0; i < event->n_fields; i++) { 971 pos += snprintf(buf + pos, LEN_OR_ZERO, 972 ", REC->%s", event->fields[i]->name); 973 } 974 975 #undef LEN_OR_ZERO 976 977 /* return the length of print_fmt */ 978 return pos; 979 } 980 981 static int set_synth_event_print_fmt(struct trace_event_call *call) 982 { 983 struct synth_event *event = call->data; 984 char *print_fmt; 985 int len; 986 987 /* First: called with 0 length to calculate the needed length */ 988 len = __set_synth_event_print_fmt(event, NULL, 0); 989 990 print_fmt = kmalloc(len + 1, GFP_KERNEL); 991 if (!print_fmt) 992 return -ENOMEM; 993 994 /* Second: actually write the @print_fmt */ 995 __set_synth_event_print_fmt(event, print_fmt, len + 1); 996 call->print_fmt = print_fmt; 997 998 return 0; 999 } 1000 1001 static void free_synth_field(struct synth_field *field) 1002 { 1003 kfree(field->type); 1004 kfree(field->name); 1005 kfree(field); 1006 } 1007 1008 static struct synth_field *parse_synth_field(int argc, const char **argv, 1009 int *consumed) 1010 { 1011 struct synth_field *field; 1012 const char *prefix = NULL, *field_type = argv[0], *field_name, *array; 1013 int len, ret = 0; 1014 1015 if (field_type[0] == ';') 1016 field_type++; 1017 1018 if (!strcmp(field_type, "unsigned")) { 1019 if (argc < 3) 1020 return ERR_PTR(-EINVAL); 1021 prefix = "unsigned "; 1022 field_type = argv[1]; 1023 field_name = argv[2]; 1024 *consumed = 3; 1025 } else { 1026 field_name = argv[1]; 1027 *consumed = 2; 1028 } 1029 1030 field = kzalloc(sizeof(*field), GFP_KERNEL); 1031 if (!field) 1032 return ERR_PTR(-ENOMEM); 1033 1034 len = strlen(field_name); 1035 array = strchr(field_name, '['); 1036 if (array) 1037 len -= strlen(array); 1038 else if (field_name[len - 1] == ';') 1039 len--; 1040 1041 field->name = kmemdup_nul(field_name, len, GFP_KERNEL); 1042 if (!field->name) { 1043 ret = -ENOMEM; 1044 goto free; 1045 } 1046 1047 if (field_type[0] == ';') 1048 field_type++; 1049 len = strlen(field_type) + 1; 1050 if (array) 1051 len += strlen(array); 1052 if (prefix) 1053 len += strlen(prefix); 1054 1055 field->type = kzalloc(len, GFP_KERNEL); 1056 if (!field->type) { 1057 ret = -ENOMEM; 1058 goto free; 1059 } 1060 if (prefix) 1061 strcat(field->type, prefix); 1062 strcat(field->type, field_type); 1063 if (array) { 1064 strcat(field->type, array); 1065 if (field->type[len - 1] == ';') 1066 field->type[len - 1] = '\0'; 1067 } 1068 1069 field->size = synth_field_size(field->type); 1070 if (!field->size) { 1071 ret = -EINVAL; 1072 goto free; 1073 } 1074 1075 if (synth_field_is_string(field->type)) 1076 field->is_string = true; 1077 1078 field->is_signed = synth_field_signed(field->type); 1079 1080 out: 1081 return field; 1082 free: 1083 free_synth_field(field); 1084 field = ERR_PTR(ret); 1085 goto out; 1086 } 1087 1088 static void free_synth_tracepoint(struct tracepoint *tp) 1089 { 1090 if (!tp) 1091 return; 1092 1093 kfree(tp->name); 1094 kfree(tp); 1095 } 1096 1097 static struct tracepoint *alloc_synth_tracepoint(char *name) 1098 { 1099 struct tracepoint *tp; 1100 1101 tp = kzalloc(sizeof(*tp), GFP_KERNEL); 1102 if (!tp) 1103 return ERR_PTR(-ENOMEM); 1104 1105 tp->name = kstrdup(name, GFP_KERNEL); 1106 if (!tp->name) { 1107 kfree(tp); 1108 return ERR_PTR(-ENOMEM); 1109 } 1110 1111 return tp; 1112 } 1113 1114 typedef void (*synth_probe_func_t) (void *__data, u64 *var_ref_vals, 1115 unsigned int var_ref_idx); 1116 1117 static inline void trace_synth(struct synth_event *event, u64 *var_ref_vals, 1118 unsigned int var_ref_idx) 1119 { 1120 struct tracepoint *tp = event->tp; 1121 1122 if (unlikely(atomic_read(&tp->key.enabled) > 0)) { 1123 struct tracepoint_func *probe_func_ptr; 1124 synth_probe_func_t probe_func; 1125 void *__data; 1126 1127 if (!(cpu_online(raw_smp_processor_id()))) 1128 return; 1129 1130 probe_func_ptr = rcu_dereference_sched((tp)->funcs); 1131 if (probe_func_ptr) { 1132 do { 1133 probe_func = probe_func_ptr->func; 1134 __data = probe_func_ptr->data; 1135 probe_func(__data, var_ref_vals, var_ref_idx); 1136 } while ((++probe_func_ptr)->func); 1137 } 1138 } 1139 } 1140 1141 static struct synth_event *find_synth_event(const char *name) 1142 { 1143 struct dyn_event *pos; 1144 struct synth_event *event; 1145 1146 for_each_dyn_event(pos) { 1147 if (!is_synth_event(pos)) 1148 continue; 1149 event = to_synth_event(pos); 1150 if (strcmp(event->name, name) == 0) 1151 return event; 1152 } 1153 1154 return NULL; 1155 } 1156 1157 static struct trace_event_fields synth_event_fields_array[] = { 1158 { .type = TRACE_FUNCTION_TYPE, 1159 .define_fields = synth_event_define_fields }, 1160 {} 1161 }; 1162 1163 static int register_synth_event(struct synth_event *event) 1164 { 1165 struct trace_event_call *call = &event->call; 1166 int ret = 0; 1167 1168 event->call.class = &event->class; 1169 event->class.system = kstrdup(SYNTH_SYSTEM, GFP_KERNEL); 1170 if (!event->class.system) { 1171 ret = -ENOMEM; 1172 goto out; 1173 } 1174 1175 event->tp = alloc_synth_tracepoint(event->name); 1176 if (IS_ERR(event->tp)) { 1177 ret = PTR_ERR(event->tp); 1178 event->tp = NULL; 1179 goto out; 1180 } 1181 1182 INIT_LIST_HEAD(&call->class->fields); 1183 call->event.funcs = &synth_event_funcs; 1184 call->class->fields_array = synth_event_fields_array; 1185 1186 ret = register_trace_event(&call->event); 1187 if (!ret) { 1188 ret = -ENODEV; 1189 goto out; 1190 } 1191 call->flags = TRACE_EVENT_FL_TRACEPOINT; 1192 call->class->reg = trace_event_reg; 1193 call->class->probe = trace_event_raw_event_synth; 1194 call->data = event; 1195 call->tp = event->tp; 1196 1197 ret = trace_add_event_call(call); 1198 if (ret) { 1199 pr_warn("Failed to register synthetic event: %s\n", 1200 trace_event_name(call)); 1201 goto err; 1202 } 1203 1204 ret = set_synth_event_print_fmt(call); 1205 if (ret < 0) { 1206 trace_remove_event_call(call); 1207 goto err; 1208 } 1209 out: 1210 return ret; 1211 err: 1212 unregister_trace_event(&call->event); 1213 goto out; 1214 } 1215 1216 static int unregister_synth_event(struct synth_event *event) 1217 { 1218 struct trace_event_call *call = &event->call; 1219 int ret; 1220 1221 ret = trace_remove_event_call(call); 1222 1223 return ret; 1224 } 1225 1226 static void free_synth_event(struct synth_event *event) 1227 { 1228 unsigned int i; 1229 1230 if (!event) 1231 return; 1232 1233 for (i = 0; i < event->n_fields; i++) 1234 free_synth_field(event->fields[i]); 1235 1236 kfree(event->fields); 1237 kfree(event->name); 1238 kfree(event->class.system); 1239 free_synth_tracepoint(event->tp); 1240 free_synth_event_print_fmt(&event->call); 1241 kfree(event); 1242 } 1243 1244 static struct synth_event *alloc_synth_event(const char *name, int n_fields, 1245 struct synth_field **fields) 1246 { 1247 struct synth_event *event; 1248 unsigned int i; 1249 1250 event = kzalloc(sizeof(*event), GFP_KERNEL); 1251 if (!event) { 1252 event = ERR_PTR(-ENOMEM); 1253 goto out; 1254 } 1255 1256 event->name = kstrdup(name, GFP_KERNEL); 1257 if (!event->name) { 1258 kfree(event); 1259 event = ERR_PTR(-ENOMEM); 1260 goto out; 1261 } 1262 1263 event->fields = kcalloc(n_fields, sizeof(*event->fields), GFP_KERNEL); 1264 if (!event->fields) { 1265 free_synth_event(event); 1266 event = ERR_PTR(-ENOMEM); 1267 goto out; 1268 } 1269 1270 dyn_event_init(&event->devent, &synth_event_ops); 1271 1272 for (i = 0; i < n_fields; i++) 1273 event->fields[i] = fields[i]; 1274 1275 event->n_fields = n_fields; 1276 out: 1277 return event; 1278 } 1279 1280 static void action_trace(struct hist_trigger_data *hist_data, 1281 struct tracing_map_elt *elt, void *rec, 1282 struct ring_buffer_event *rbe, void *key, 1283 struct action_data *data, u64 *var_ref_vals) 1284 { 1285 struct synth_event *event = data->synth_event; 1286 1287 trace_synth(event, var_ref_vals, data->var_ref_idx); 1288 } 1289 1290 struct hist_var_data { 1291 struct list_head list; 1292 struct hist_trigger_data *hist_data; 1293 }; 1294 1295 static int __create_synth_event(int argc, const char *name, const char **argv) 1296 { 1297 struct synth_field *field, *fields[SYNTH_FIELDS_MAX]; 1298 struct synth_event *event = NULL; 1299 int i, consumed = 0, n_fields = 0, ret = 0; 1300 1301 /* 1302 * Argument syntax: 1303 * - Add synthetic event: <event_name> field[;field] ... 1304 * - Remove synthetic event: !<event_name> field[;field] ... 1305 * where 'field' = type field_name 1306 */ 1307 1308 if (name[0] == '\0' || argc < 1) 1309 return -EINVAL; 1310 1311 mutex_lock(&event_mutex); 1312 1313 event = find_synth_event(name); 1314 if (event) { 1315 ret = -EEXIST; 1316 goto out; 1317 } 1318 1319 for (i = 0; i < argc - 1; i++) { 1320 if (strcmp(argv[i], ";") == 0) 1321 continue; 1322 if (n_fields == SYNTH_FIELDS_MAX) { 1323 ret = -EINVAL; 1324 goto err; 1325 } 1326 1327 field = parse_synth_field(argc - i, &argv[i], &consumed); 1328 if (IS_ERR(field)) { 1329 ret = PTR_ERR(field); 1330 goto err; 1331 } 1332 fields[n_fields++] = field; 1333 i += consumed - 1; 1334 } 1335 1336 if (i < argc && strcmp(argv[i], ";") != 0) { 1337 ret = -EINVAL; 1338 goto err; 1339 } 1340 1341 event = alloc_synth_event(name, n_fields, fields); 1342 if (IS_ERR(event)) { 1343 ret = PTR_ERR(event); 1344 event = NULL; 1345 goto err; 1346 } 1347 ret = register_synth_event(event); 1348 if (!ret) 1349 dyn_event_add(&event->devent); 1350 else 1351 free_synth_event(event); 1352 out: 1353 mutex_unlock(&event_mutex); 1354 1355 return ret; 1356 err: 1357 for (i = 0; i < n_fields; i++) 1358 free_synth_field(fields[i]); 1359 1360 goto out; 1361 } 1362 1363 static int create_or_delete_synth_event(int argc, char **argv) 1364 { 1365 const char *name = argv[0]; 1366 struct synth_event *event = NULL; 1367 int ret; 1368 1369 /* trace_run_command() ensures argc != 0 */ 1370 if (name[0] == '!') { 1371 mutex_lock(&event_mutex); 1372 event = find_synth_event(name + 1); 1373 if (event) { 1374 if (event->ref) 1375 ret = -EBUSY; 1376 else { 1377 ret = unregister_synth_event(event); 1378 if (!ret) { 1379 dyn_event_remove(&event->devent); 1380 free_synth_event(event); 1381 } 1382 } 1383 } else 1384 ret = -ENOENT; 1385 mutex_unlock(&event_mutex); 1386 return ret; 1387 } 1388 1389 ret = __create_synth_event(argc - 1, name, (const char **)argv + 1); 1390 return ret == -ECANCELED ? -EINVAL : ret; 1391 } 1392 1393 static int synth_event_create(int argc, const char **argv) 1394 { 1395 const char *name = argv[0]; 1396 int len; 1397 1398 if (name[0] != 's' || name[1] != ':') 1399 return -ECANCELED; 1400 name += 2; 1401 1402 /* This interface accepts group name prefix */ 1403 if (strchr(name, '/')) { 1404 len = str_has_prefix(name, SYNTH_SYSTEM "/"); 1405 if (len == 0) 1406 return -EINVAL; 1407 name += len; 1408 } 1409 return __create_synth_event(argc - 1, name, argv + 1); 1410 } 1411 1412 static int synth_event_release(struct dyn_event *ev) 1413 { 1414 struct synth_event *event = to_synth_event(ev); 1415 int ret; 1416 1417 if (event->ref) 1418 return -EBUSY; 1419 1420 ret = unregister_synth_event(event); 1421 if (ret) 1422 return ret; 1423 1424 dyn_event_remove(ev); 1425 free_synth_event(event); 1426 return 0; 1427 } 1428 1429 static int __synth_event_show(struct seq_file *m, struct synth_event *event) 1430 { 1431 struct synth_field *field; 1432 unsigned int i; 1433 1434 seq_printf(m, "%s\t", event->name); 1435 1436 for (i = 0; i < event->n_fields; i++) { 1437 field = event->fields[i]; 1438 1439 /* parameter values */ 1440 seq_printf(m, "%s %s%s", field->type, field->name, 1441 i == event->n_fields - 1 ? "" : "; "); 1442 } 1443 1444 seq_putc(m, '\n'); 1445 1446 return 0; 1447 } 1448 1449 static int synth_event_show(struct seq_file *m, struct dyn_event *ev) 1450 { 1451 struct synth_event *event = to_synth_event(ev); 1452 1453 seq_printf(m, "s:%s/", event->class.system); 1454 1455 return __synth_event_show(m, event); 1456 } 1457 1458 static int synth_events_seq_show(struct seq_file *m, void *v) 1459 { 1460 struct dyn_event *ev = v; 1461 1462 if (!is_synth_event(ev)) 1463 return 0; 1464 1465 return __synth_event_show(m, to_synth_event(ev)); 1466 } 1467 1468 static const struct seq_operations synth_events_seq_op = { 1469 .start = dyn_event_seq_start, 1470 .next = dyn_event_seq_next, 1471 .stop = dyn_event_seq_stop, 1472 .show = synth_events_seq_show, 1473 }; 1474 1475 static int synth_events_open(struct inode *inode, struct file *file) 1476 { 1477 int ret; 1478 1479 ret = security_locked_down(LOCKDOWN_TRACEFS); 1480 if (ret) 1481 return ret; 1482 1483 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { 1484 ret = dyn_events_release_all(&synth_event_ops); 1485 if (ret < 0) 1486 return ret; 1487 } 1488 1489 return seq_open(file, &synth_events_seq_op); 1490 } 1491 1492 static ssize_t synth_events_write(struct file *file, 1493 const char __user *buffer, 1494 size_t count, loff_t *ppos) 1495 { 1496 return trace_parse_run_command(file, buffer, count, ppos, 1497 create_or_delete_synth_event); 1498 } 1499 1500 static const struct file_operations synth_events_fops = { 1501 .open = synth_events_open, 1502 .write = synth_events_write, 1503 .read = seq_read, 1504 .llseek = seq_lseek, 1505 .release = seq_release, 1506 }; 1507 1508 static u64 hist_field_timestamp(struct hist_field *hist_field, 1509 struct tracing_map_elt *elt, 1510 struct ring_buffer_event *rbe, 1511 void *event) 1512 { 1513 struct hist_trigger_data *hist_data = hist_field->hist_data; 1514 struct trace_array *tr = hist_data->event_file->tr; 1515 1516 u64 ts = ring_buffer_event_time_stamp(rbe); 1517 1518 if (hist_data->attrs->ts_in_usecs && trace_clock_in_ns(tr)) 1519 ts = ns2usecs(ts); 1520 1521 return ts; 1522 } 1523 1524 static u64 hist_field_cpu(struct hist_field *hist_field, 1525 struct tracing_map_elt *elt, 1526 struct ring_buffer_event *rbe, 1527 void *event) 1528 { 1529 int cpu = smp_processor_id(); 1530 1531 return cpu; 1532 } 1533 1534 /** 1535 * check_field_for_var_ref - Check if a VAR_REF field references a variable 1536 * @hist_field: The VAR_REF field to check 1537 * @var_data: The hist trigger that owns the variable 1538 * @var_idx: The trigger variable identifier 1539 * 1540 * Check the given VAR_REF field to see whether or not it references 1541 * the given variable associated with the given trigger. 1542 * 1543 * Return: The VAR_REF field if it does reference the variable, NULL if not 1544 */ 1545 static struct hist_field * 1546 check_field_for_var_ref(struct hist_field *hist_field, 1547 struct hist_trigger_data *var_data, 1548 unsigned int var_idx) 1549 { 1550 WARN_ON(!(hist_field && hist_field->flags & HIST_FIELD_FL_VAR_REF)); 1551 1552 if (hist_field && hist_field->var.idx == var_idx && 1553 hist_field->var.hist_data == var_data) 1554 return hist_field; 1555 1556 return NULL; 1557 } 1558 1559 /** 1560 * find_var_ref - Check if a trigger has a reference to a trigger variable 1561 * @hist_data: The hist trigger that might have a reference to the variable 1562 * @var_data: The hist trigger that owns the variable 1563 * @var_idx: The trigger variable identifier 1564 * 1565 * Check the list of var_refs[] on the first hist trigger to see 1566 * whether any of them are references to the variable on the second 1567 * trigger. 1568 * 1569 * Return: The VAR_REF field referencing the variable if so, NULL if not 1570 */ 1571 static struct hist_field *find_var_ref(struct hist_trigger_data *hist_data, 1572 struct hist_trigger_data *var_data, 1573 unsigned int var_idx) 1574 { 1575 struct hist_field *hist_field; 1576 unsigned int i; 1577 1578 for (i = 0; i < hist_data->n_var_refs; i++) { 1579 hist_field = hist_data->var_refs[i]; 1580 if (check_field_for_var_ref(hist_field, var_data, var_idx)) 1581 return hist_field; 1582 } 1583 1584 return NULL; 1585 } 1586 1587 /** 1588 * find_any_var_ref - Check if there is a reference to a given trigger variable 1589 * @hist_data: The hist trigger 1590 * @var_idx: The trigger variable identifier 1591 * 1592 * Check to see whether the given variable is currently referenced by 1593 * any other trigger. 1594 * 1595 * The trigger the variable is defined on is explicitly excluded - the 1596 * assumption being that a self-reference doesn't prevent a trigger 1597 * from being removed. 1598 * 1599 * Return: The VAR_REF field referencing the variable if so, NULL if not 1600 */ 1601 static struct hist_field *find_any_var_ref(struct hist_trigger_data *hist_data, 1602 unsigned int var_idx) 1603 { 1604 struct trace_array *tr = hist_data->event_file->tr; 1605 struct hist_field *found = NULL; 1606 struct hist_var_data *var_data; 1607 1608 list_for_each_entry(var_data, &tr->hist_vars, list) { 1609 if (var_data->hist_data == hist_data) 1610 continue; 1611 found = find_var_ref(var_data->hist_data, hist_data, var_idx); 1612 if (found) 1613 break; 1614 } 1615 1616 return found; 1617 } 1618 1619 /** 1620 * check_var_refs - Check if there is a reference to any of trigger's variables 1621 * @hist_data: The hist trigger 1622 * 1623 * A trigger can define one or more variables. If any one of them is 1624 * currently referenced by any other trigger, this function will 1625 * determine that. 1626 1627 * Typically used to determine whether or not a trigger can be removed 1628 * - if there are any references to a trigger's variables, it cannot. 1629 * 1630 * Return: True if there is a reference to any of trigger's variables 1631 */ 1632 static bool check_var_refs(struct hist_trigger_data *hist_data) 1633 { 1634 struct hist_field *field; 1635 bool found = false; 1636 int i; 1637 1638 for_each_hist_field(i, hist_data) { 1639 field = hist_data->fields[i]; 1640 if (field && field->flags & HIST_FIELD_FL_VAR) { 1641 if (find_any_var_ref(hist_data, field->var.idx)) { 1642 found = true; 1643 break; 1644 } 1645 } 1646 } 1647 1648 return found; 1649 } 1650 1651 static struct hist_var_data *find_hist_vars(struct hist_trigger_data *hist_data) 1652 { 1653 struct trace_array *tr = hist_data->event_file->tr; 1654 struct hist_var_data *var_data, *found = NULL; 1655 1656 list_for_each_entry(var_data, &tr->hist_vars, list) { 1657 if (var_data->hist_data == hist_data) { 1658 found = var_data; 1659 break; 1660 } 1661 } 1662 1663 return found; 1664 } 1665 1666 static bool field_has_hist_vars(struct hist_field *hist_field, 1667 unsigned int level) 1668 { 1669 int i; 1670 1671 if (level > 3) 1672 return false; 1673 1674 if (!hist_field) 1675 return false; 1676 1677 if (hist_field->flags & HIST_FIELD_FL_VAR || 1678 hist_field->flags & HIST_FIELD_FL_VAR_REF) 1679 return true; 1680 1681 for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++) { 1682 struct hist_field *operand; 1683 1684 operand = hist_field->operands[i]; 1685 if (field_has_hist_vars(operand, level + 1)) 1686 return true; 1687 } 1688 1689 return false; 1690 } 1691 1692 static bool has_hist_vars(struct hist_trigger_data *hist_data) 1693 { 1694 struct hist_field *hist_field; 1695 int i; 1696 1697 for_each_hist_field(i, hist_data) { 1698 hist_field = hist_data->fields[i]; 1699 if (field_has_hist_vars(hist_field, 0)) 1700 return true; 1701 } 1702 1703 return false; 1704 } 1705 1706 static int save_hist_vars(struct hist_trigger_data *hist_data) 1707 { 1708 struct trace_array *tr = hist_data->event_file->tr; 1709 struct hist_var_data *var_data; 1710 1711 var_data = find_hist_vars(hist_data); 1712 if (var_data) 1713 return 0; 1714 1715 if (tracing_check_open_get_tr(tr)) 1716 return -ENODEV; 1717 1718 var_data = kzalloc(sizeof(*var_data), GFP_KERNEL); 1719 if (!var_data) { 1720 trace_array_put(tr); 1721 return -ENOMEM; 1722 } 1723 1724 var_data->hist_data = hist_data; 1725 list_add(&var_data->list, &tr->hist_vars); 1726 1727 return 0; 1728 } 1729 1730 static void remove_hist_vars(struct hist_trigger_data *hist_data) 1731 { 1732 struct trace_array *tr = hist_data->event_file->tr; 1733 struct hist_var_data *var_data; 1734 1735 var_data = find_hist_vars(hist_data); 1736 if (!var_data) 1737 return; 1738 1739 if (WARN_ON(check_var_refs(hist_data))) 1740 return; 1741 1742 list_del(&var_data->list); 1743 1744 kfree(var_data); 1745 1746 trace_array_put(tr); 1747 } 1748 1749 static struct hist_field *find_var_field(struct hist_trigger_data *hist_data, 1750 const char *var_name) 1751 { 1752 struct hist_field *hist_field, *found = NULL; 1753 int i; 1754 1755 for_each_hist_field(i, hist_data) { 1756 hist_field = hist_data->fields[i]; 1757 if (hist_field && hist_field->flags & HIST_FIELD_FL_VAR && 1758 strcmp(hist_field->var.name, var_name) == 0) { 1759 found = hist_field; 1760 break; 1761 } 1762 } 1763 1764 return found; 1765 } 1766 1767 static struct hist_field *find_var(struct hist_trigger_data *hist_data, 1768 struct trace_event_file *file, 1769 const char *var_name) 1770 { 1771 struct hist_trigger_data *test_data; 1772 struct event_trigger_data *test; 1773 struct hist_field *hist_field; 1774 1775 hist_field = find_var_field(hist_data, var_name); 1776 if (hist_field) 1777 return hist_field; 1778 1779 list_for_each_entry_rcu(test, &file->triggers, list) { 1780 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 1781 test_data = test->private_data; 1782 hist_field = find_var_field(test_data, var_name); 1783 if (hist_field) 1784 return hist_field; 1785 } 1786 } 1787 1788 return NULL; 1789 } 1790 1791 static struct trace_event_file *find_var_file(struct trace_array *tr, 1792 char *system, 1793 char *event_name, 1794 char *var_name) 1795 { 1796 struct hist_trigger_data *var_hist_data; 1797 struct hist_var_data *var_data; 1798 struct trace_event_file *file, *found = NULL; 1799 1800 if (system) 1801 return find_event_file(tr, system, event_name); 1802 1803 list_for_each_entry(var_data, &tr->hist_vars, list) { 1804 var_hist_data = var_data->hist_data; 1805 file = var_hist_data->event_file; 1806 if (file == found) 1807 continue; 1808 1809 if (find_var_field(var_hist_data, var_name)) { 1810 if (found) { 1811 hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE, errpos(var_name)); 1812 return NULL; 1813 } 1814 1815 found = file; 1816 } 1817 } 1818 1819 return found; 1820 } 1821 1822 static struct hist_field *find_file_var(struct trace_event_file *file, 1823 const char *var_name) 1824 { 1825 struct hist_trigger_data *test_data; 1826 struct event_trigger_data *test; 1827 struct hist_field *hist_field; 1828 1829 list_for_each_entry_rcu(test, &file->triggers, list) { 1830 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 1831 test_data = test->private_data; 1832 hist_field = find_var_field(test_data, var_name); 1833 if (hist_field) 1834 return hist_field; 1835 } 1836 } 1837 1838 return NULL; 1839 } 1840 1841 static struct hist_field * 1842 find_match_var(struct hist_trigger_data *hist_data, char *var_name) 1843 { 1844 struct trace_array *tr = hist_data->event_file->tr; 1845 struct hist_field *hist_field, *found = NULL; 1846 struct trace_event_file *file; 1847 unsigned int i; 1848 1849 for (i = 0; i < hist_data->n_actions; i++) { 1850 struct action_data *data = hist_data->actions[i]; 1851 1852 if (data->handler == HANDLER_ONMATCH) { 1853 char *system = data->match_data.event_system; 1854 char *event_name = data->match_data.event; 1855 1856 file = find_var_file(tr, system, event_name, var_name); 1857 if (!file) 1858 continue; 1859 hist_field = find_file_var(file, var_name); 1860 if (hist_field) { 1861 if (found) { 1862 hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE, 1863 errpos(var_name)); 1864 return ERR_PTR(-EINVAL); 1865 } 1866 1867 found = hist_field; 1868 } 1869 } 1870 } 1871 return found; 1872 } 1873 1874 static struct hist_field *find_event_var(struct hist_trigger_data *hist_data, 1875 char *system, 1876 char *event_name, 1877 char *var_name) 1878 { 1879 struct trace_array *tr = hist_data->event_file->tr; 1880 struct hist_field *hist_field = NULL; 1881 struct trace_event_file *file; 1882 1883 if (!system || !event_name) { 1884 hist_field = find_match_var(hist_data, var_name); 1885 if (IS_ERR(hist_field)) 1886 return NULL; 1887 if (hist_field) 1888 return hist_field; 1889 } 1890 1891 file = find_var_file(tr, system, event_name, var_name); 1892 if (!file) 1893 return NULL; 1894 1895 hist_field = find_file_var(file, var_name); 1896 1897 return hist_field; 1898 } 1899 1900 static u64 hist_field_var_ref(struct hist_field *hist_field, 1901 struct tracing_map_elt *elt, 1902 struct ring_buffer_event *rbe, 1903 void *event) 1904 { 1905 struct hist_elt_data *elt_data; 1906 u64 var_val = 0; 1907 1908 if (WARN_ON_ONCE(!elt)) 1909 return var_val; 1910 1911 elt_data = elt->private_data; 1912 var_val = elt_data->var_ref_vals[hist_field->var_ref_idx]; 1913 1914 return var_val; 1915 } 1916 1917 static bool resolve_var_refs(struct hist_trigger_data *hist_data, void *key, 1918 u64 *var_ref_vals, bool self) 1919 { 1920 struct hist_trigger_data *var_data; 1921 struct tracing_map_elt *var_elt; 1922 struct hist_field *hist_field; 1923 unsigned int i, var_idx; 1924 bool resolved = true; 1925 u64 var_val = 0; 1926 1927 for (i = 0; i < hist_data->n_var_refs; i++) { 1928 hist_field = hist_data->var_refs[i]; 1929 var_idx = hist_field->var.idx; 1930 var_data = hist_field->var.hist_data; 1931 1932 if (var_data == NULL) { 1933 resolved = false; 1934 break; 1935 } 1936 1937 if ((self && var_data != hist_data) || 1938 (!self && var_data == hist_data)) 1939 continue; 1940 1941 var_elt = tracing_map_lookup(var_data->map, key); 1942 if (!var_elt) { 1943 resolved = false; 1944 break; 1945 } 1946 1947 if (!tracing_map_var_set(var_elt, var_idx)) { 1948 resolved = false; 1949 break; 1950 } 1951 1952 if (self || !hist_field->read_once) 1953 var_val = tracing_map_read_var(var_elt, var_idx); 1954 else 1955 var_val = tracing_map_read_var_once(var_elt, var_idx); 1956 1957 var_ref_vals[i] = var_val; 1958 } 1959 1960 return resolved; 1961 } 1962 1963 static const char *hist_field_name(struct hist_field *field, 1964 unsigned int level) 1965 { 1966 const char *field_name = ""; 1967 1968 if (level > 1) 1969 return field_name; 1970 1971 if (field->field) 1972 field_name = field->field->name; 1973 else if (field->flags & HIST_FIELD_FL_LOG2 || 1974 field->flags & HIST_FIELD_FL_ALIAS) 1975 field_name = hist_field_name(field->operands[0], ++level); 1976 else if (field->flags & HIST_FIELD_FL_CPU) 1977 field_name = "cpu"; 1978 else if (field->flags & HIST_FIELD_FL_EXPR || 1979 field->flags & HIST_FIELD_FL_VAR_REF) { 1980 if (field->system) { 1981 static char full_name[MAX_FILTER_STR_VAL]; 1982 1983 strcat(full_name, field->system); 1984 strcat(full_name, "."); 1985 strcat(full_name, field->event_name); 1986 strcat(full_name, "."); 1987 strcat(full_name, field->name); 1988 field_name = full_name; 1989 } else 1990 field_name = field->name; 1991 } else if (field->flags & HIST_FIELD_FL_TIMESTAMP) 1992 field_name = "common_timestamp"; 1993 1994 if (field_name == NULL) 1995 field_name = ""; 1996 1997 return field_name; 1998 } 1999 2000 static hist_field_fn_t select_value_fn(int field_size, int field_is_signed) 2001 { 2002 hist_field_fn_t fn = NULL; 2003 2004 switch (field_size) { 2005 case 8: 2006 if (field_is_signed) 2007 fn = hist_field_s64; 2008 else 2009 fn = hist_field_u64; 2010 break; 2011 case 4: 2012 if (field_is_signed) 2013 fn = hist_field_s32; 2014 else 2015 fn = hist_field_u32; 2016 break; 2017 case 2: 2018 if (field_is_signed) 2019 fn = hist_field_s16; 2020 else 2021 fn = hist_field_u16; 2022 break; 2023 case 1: 2024 if (field_is_signed) 2025 fn = hist_field_s8; 2026 else 2027 fn = hist_field_u8; 2028 break; 2029 } 2030 2031 return fn; 2032 } 2033 2034 static int parse_map_size(char *str) 2035 { 2036 unsigned long size, map_bits; 2037 int ret; 2038 2039 strsep(&str, "="); 2040 if (!str) { 2041 ret = -EINVAL; 2042 goto out; 2043 } 2044 2045 ret = kstrtoul(str, 0, &size); 2046 if (ret) 2047 goto out; 2048 2049 map_bits = ilog2(roundup_pow_of_two(size)); 2050 if (map_bits < TRACING_MAP_BITS_MIN || 2051 map_bits > TRACING_MAP_BITS_MAX) 2052 ret = -EINVAL; 2053 else 2054 ret = map_bits; 2055 out: 2056 return ret; 2057 } 2058 2059 static void destroy_hist_trigger_attrs(struct hist_trigger_attrs *attrs) 2060 { 2061 unsigned int i; 2062 2063 if (!attrs) 2064 return; 2065 2066 for (i = 0; i < attrs->n_assignments; i++) 2067 kfree(attrs->assignment_str[i]); 2068 2069 for (i = 0; i < attrs->n_actions; i++) 2070 kfree(attrs->action_str[i]); 2071 2072 kfree(attrs->name); 2073 kfree(attrs->sort_key_str); 2074 kfree(attrs->keys_str); 2075 kfree(attrs->vals_str); 2076 kfree(attrs->clock); 2077 kfree(attrs); 2078 } 2079 2080 static int parse_action(char *str, struct hist_trigger_attrs *attrs) 2081 { 2082 int ret = -EINVAL; 2083 2084 if (attrs->n_actions >= HIST_ACTIONS_MAX) 2085 return ret; 2086 2087 if ((str_has_prefix(str, "onmatch(")) || 2088 (str_has_prefix(str, "onmax(")) || 2089 (str_has_prefix(str, "onchange("))) { 2090 attrs->action_str[attrs->n_actions] = kstrdup(str, GFP_KERNEL); 2091 if (!attrs->action_str[attrs->n_actions]) { 2092 ret = -ENOMEM; 2093 return ret; 2094 } 2095 attrs->n_actions++; 2096 ret = 0; 2097 } 2098 return ret; 2099 } 2100 2101 static int parse_assignment(struct trace_array *tr, 2102 char *str, struct hist_trigger_attrs *attrs) 2103 { 2104 int ret = 0; 2105 2106 if ((str_has_prefix(str, "key=")) || 2107 (str_has_prefix(str, "keys="))) { 2108 attrs->keys_str = kstrdup(str, GFP_KERNEL); 2109 if (!attrs->keys_str) { 2110 ret = -ENOMEM; 2111 goto out; 2112 } 2113 } else if ((str_has_prefix(str, "val=")) || 2114 (str_has_prefix(str, "vals=")) || 2115 (str_has_prefix(str, "values="))) { 2116 attrs->vals_str = kstrdup(str, GFP_KERNEL); 2117 if (!attrs->vals_str) { 2118 ret = -ENOMEM; 2119 goto out; 2120 } 2121 } else if (str_has_prefix(str, "sort=")) { 2122 attrs->sort_key_str = kstrdup(str, GFP_KERNEL); 2123 if (!attrs->sort_key_str) { 2124 ret = -ENOMEM; 2125 goto out; 2126 } 2127 } else if (str_has_prefix(str, "name=")) { 2128 attrs->name = kstrdup(str, GFP_KERNEL); 2129 if (!attrs->name) { 2130 ret = -ENOMEM; 2131 goto out; 2132 } 2133 } else if (str_has_prefix(str, "clock=")) { 2134 strsep(&str, "="); 2135 if (!str) { 2136 ret = -EINVAL; 2137 goto out; 2138 } 2139 2140 str = strstrip(str); 2141 attrs->clock = kstrdup(str, GFP_KERNEL); 2142 if (!attrs->clock) { 2143 ret = -ENOMEM; 2144 goto out; 2145 } 2146 } else if (str_has_prefix(str, "size=")) { 2147 int map_bits = parse_map_size(str); 2148 2149 if (map_bits < 0) { 2150 ret = map_bits; 2151 goto out; 2152 } 2153 attrs->map_bits = map_bits; 2154 } else { 2155 char *assignment; 2156 2157 if (attrs->n_assignments == TRACING_MAP_VARS_MAX) { 2158 hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(str)); 2159 ret = -EINVAL; 2160 goto out; 2161 } 2162 2163 assignment = kstrdup(str, GFP_KERNEL); 2164 if (!assignment) { 2165 ret = -ENOMEM; 2166 goto out; 2167 } 2168 2169 attrs->assignment_str[attrs->n_assignments++] = assignment; 2170 } 2171 out: 2172 return ret; 2173 } 2174 2175 static struct hist_trigger_attrs * 2176 parse_hist_trigger_attrs(struct trace_array *tr, char *trigger_str) 2177 { 2178 struct hist_trigger_attrs *attrs; 2179 int ret = 0; 2180 2181 attrs = kzalloc(sizeof(*attrs), GFP_KERNEL); 2182 if (!attrs) 2183 return ERR_PTR(-ENOMEM); 2184 2185 while (trigger_str) { 2186 char *str = strsep(&trigger_str, ":"); 2187 2188 if (strchr(str, '=')) { 2189 ret = parse_assignment(tr, str, attrs); 2190 if (ret) 2191 goto free; 2192 } else if (strcmp(str, "pause") == 0) 2193 attrs->pause = true; 2194 else if ((strcmp(str, "cont") == 0) || 2195 (strcmp(str, "continue") == 0)) 2196 attrs->cont = true; 2197 else if (strcmp(str, "clear") == 0) 2198 attrs->clear = true; 2199 else { 2200 ret = parse_action(str, attrs); 2201 if (ret) 2202 goto free; 2203 } 2204 } 2205 2206 if (!attrs->keys_str) { 2207 ret = -EINVAL; 2208 goto free; 2209 } 2210 2211 if (!attrs->clock) { 2212 attrs->clock = kstrdup("global", GFP_KERNEL); 2213 if (!attrs->clock) { 2214 ret = -ENOMEM; 2215 goto free; 2216 } 2217 } 2218 2219 return attrs; 2220 free: 2221 destroy_hist_trigger_attrs(attrs); 2222 2223 return ERR_PTR(ret); 2224 } 2225 2226 static inline void save_comm(char *comm, struct task_struct *task) 2227 { 2228 if (!task->pid) { 2229 strcpy(comm, "<idle>"); 2230 return; 2231 } 2232 2233 if (WARN_ON_ONCE(task->pid < 0)) { 2234 strcpy(comm, "<XXX>"); 2235 return; 2236 } 2237 2238 strncpy(comm, task->comm, TASK_COMM_LEN); 2239 } 2240 2241 static void hist_elt_data_free(struct hist_elt_data *elt_data) 2242 { 2243 unsigned int i; 2244 2245 for (i = 0; i < SYNTH_FIELDS_MAX; i++) 2246 kfree(elt_data->field_var_str[i]); 2247 2248 kfree(elt_data->comm); 2249 kfree(elt_data); 2250 } 2251 2252 static void hist_trigger_elt_data_free(struct tracing_map_elt *elt) 2253 { 2254 struct hist_elt_data *elt_data = elt->private_data; 2255 2256 hist_elt_data_free(elt_data); 2257 } 2258 2259 static int hist_trigger_elt_data_alloc(struct tracing_map_elt *elt) 2260 { 2261 struct hist_trigger_data *hist_data = elt->map->private_data; 2262 unsigned int size = TASK_COMM_LEN; 2263 struct hist_elt_data *elt_data; 2264 struct hist_field *key_field; 2265 unsigned int i, n_str; 2266 2267 elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL); 2268 if (!elt_data) 2269 return -ENOMEM; 2270 2271 for_each_hist_key_field(i, hist_data) { 2272 key_field = hist_data->fields[i]; 2273 2274 if (key_field->flags & HIST_FIELD_FL_EXECNAME) { 2275 elt_data->comm = kzalloc(size, GFP_KERNEL); 2276 if (!elt_data->comm) { 2277 kfree(elt_data); 2278 return -ENOMEM; 2279 } 2280 break; 2281 } 2282 } 2283 2284 n_str = hist_data->n_field_var_str + hist_data->n_save_var_str; 2285 2286 size = STR_VAR_LEN_MAX; 2287 2288 for (i = 0; i < n_str; i++) { 2289 elt_data->field_var_str[i] = kzalloc(size, GFP_KERNEL); 2290 if (!elt_data->field_var_str[i]) { 2291 hist_elt_data_free(elt_data); 2292 return -ENOMEM; 2293 } 2294 } 2295 2296 elt->private_data = elt_data; 2297 2298 return 0; 2299 } 2300 2301 static void hist_trigger_elt_data_init(struct tracing_map_elt *elt) 2302 { 2303 struct hist_elt_data *elt_data = elt->private_data; 2304 2305 if (elt_data->comm) 2306 save_comm(elt_data->comm, current); 2307 } 2308 2309 static const struct tracing_map_ops hist_trigger_elt_data_ops = { 2310 .elt_alloc = hist_trigger_elt_data_alloc, 2311 .elt_free = hist_trigger_elt_data_free, 2312 .elt_init = hist_trigger_elt_data_init, 2313 }; 2314 2315 static const char *get_hist_field_flags(struct hist_field *hist_field) 2316 { 2317 const char *flags_str = NULL; 2318 2319 if (hist_field->flags & HIST_FIELD_FL_HEX) 2320 flags_str = "hex"; 2321 else if (hist_field->flags & HIST_FIELD_FL_SYM) 2322 flags_str = "sym"; 2323 else if (hist_field->flags & HIST_FIELD_FL_SYM_OFFSET) 2324 flags_str = "sym-offset"; 2325 else if (hist_field->flags & HIST_FIELD_FL_EXECNAME) 2326 flags_str = "execname"; 2327 else if (hist_field->flags & HIST_FIELD_FL_SYSCALL) 2328 flags_str = "syscall"; 2329 else if (hist_field->flags & HIST_FIELD_FL_LOG2) 2330 flags_str = "log2"; 2331 else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP_USECS) 2332 flags_str = "usecs"; 2333 2334 return flags_str; 2335 } 2336 2337 static void expr_field_str(struct hist_field *field, char *expr) 2338 { 2339 if (field->flags & HIST_FIELD_FL_VAR_REF) 2340 strcat(expr, "$"); 2341 2342 strcat(expr, hist_field_name(field, 0)); 2343 2344 if (field->flags && !(field->flags & HIST_FIELD_FL_VAR_REF)) { 2345 const char *flags_str = get_hist_field_flags(field); 2346 2347 if (flags_str) { 2348 strcat(expr, "."); 2349 strcat(expr, flags_str); 2350 } 2351 } 2352 } 2353 2354 static char *expr_str(struct hist_field *field, unsigned int level) 2355 { 2356 char *expr; 2357 2358 if (level > 1) 2359 return NULL; 2360 2361 expr = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL); 2362 if (!expr) 2363 return NULL; 2364 2365 if (!field->operands[0]) { 2366 expr_field_str(field, expr); 2367 return expr; 2368 } 2369 2370 if (field->operator == FIELD_OP_UNARY_MINUS) { 2371 char *subexpr; 2372 2373 strcat(expr, "-("); 2374 subexpr = expr_str(field->operands[0], ++level); 2375 if (!subexpr) { 2376 kfree(expr); 2377 return NULL; 2378 } 2379 strcat(expr, subexpr); 2380 strcat(expr, ")"); 2381 2382 kfree(subexpr); 2383 2384 return expr; 2385 } 2386 2387 expr_field_str(field->operands[0], expr); 2388 2389 switch (field->operator) { 2390 case FIELD_OP_MINUS: 2391 strcat(expr, "-"); 2392 break; 2393 case FIELD_OP_PLUS: 2394 strcat(expr, "+"); 2395 break; 2396 default: 2397 kfree(expr); 2398 return NULL; 2399 } 2400 2401 expr_field_str(field->operands[1], expr); 2402 2403 return expr; 2404 } 2405 2406 static int contains_operator(char *str) 2407 { 2408 enum field_op_id field_op = FIELD_OP_NONE; 2409 char *op; 2410 2411 op = strpbrk(str, "+-"); 2412 if (!op) 2413 return FIELD_OP_NONE; 2414 2415 switch (*op) { 2416 case '-': 2417 if (*str == '-') 2418 field_op = FIELD_OP_UNARY_MINUS; 2419 else 2420 field_op = FIELD_OP_MINUS; 2421 break; 2422 case '+': 2423 field_op = FIELD_OP_PLUS; 2424 break; 2425 default: 2426 break; 2427 } 2428 2429 return field_op; 2430 } 2431 2432 static void __destroy_hist_field(struct hist_field *hist_field) 2433 { 2434 kfree(hist_field->var.name); 2435 kfree(hist_field->name); 2436 kfree(hist_field->type); 2437 2438 kfree(hist_field); 2439 } 2440 2441 static void destroy_hist_field(struct hist_field *hist_field, 2442 unsigned int level) 2443 { 2444 unsigned int i; 2445 2446 if (level > 3) 2447 return; 2448 2449 if (!hist_field) 2450 return; 2451 2452 if (hist_field->flags & HIST_FIELD_FL_VAR_REF) 2453 return; /* var refs will be destroyed separately */ 2454 2455 for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++) 2456 destroy_hist_field(hist_field->operands[i], level + 1); 2457 2458 __destroy_hist_field(hist_field); 2459 } 2460 2461 static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data, 2462 struct ftrace_event_field *field, 2463 unsigned long flags, 2464 char *var_name) 2465 { 2466 struct hist_field *hist_field; 2467 2468 if (field && is_function_field(field)) 2469 return NULL; 2470 2471 hist_field = kzalloc(sizeof(struct hist_field), GFP_KERNEL); 2472 if (!hist_field) 2473 return NULL; 2474 2475 hist_field->hist_data = hist_data; 2476 2477 if (flags & HIST_FIELD_FL_EXPR || flags & HIST_FIELD_FL_ALIAS) 2478 goto out; /* caller will populate */ 2479 2480 if (flags & HIST_FIELD_FL_VAR_REF) { 2481 hist_field->fn = hist_field_var_ref; 2482 goto out; 2483 } 2484 2485 if (flags & HIST_FIELD_FL_HITCOUNT) { 2486 hist_field->fn = hist_field_counter; 2487 hist_field->size = sizeof(u64); 2488 hist_field->type = kstrdup("u64", GFP_KERNEL); 2489 if (!hist_field->type) 2490 goto free; 2491 goto out; 2492 } 2493 2494 if (flags & HIST_FIELD_FL_STACKTRACE) { 2495 hist_field->fn = hist_field_none; 2496 goto out; 2497 } 2498 2499 if (flags & HIST_FIELD_FL_LOG2) { 2500 unsigned long fl = flags & ~HIST_FIELD_FL_LOG2; 2501 hist_field->fn = hist_field_log2; 2502 hist_field->operands[0] = create_hist_field(hist_data, field, fl, NULL); 2503 hist_field->size = hist_field->operands[0]->size; 2504 hist_field->type = kstrdup(hist_field->operands[0]->type, GFP_KERNEL); 2505 if (!hist_field->type) 2506 goto free; 2507 goto out; 2508 } 2509 2510 if (flags & HIST_FIELD_FL_TIMESTAMP) { 2511 hist_field->fn = hist_field_timestamp; 2512 hist_field->size = sizeof(u64); 2513 hist_field->type = kstrdup("u64", GFP_KERNEL); 2514 if (!hist_field->type) 2515 goto free; 2516 goto out; 2517 } 2518 2519 if (flags & HIST_FIELD_FL_CPU) { 2520 hist_field->fn = hist_field_cpu; 2521 hist_field->size = sizeof(int); 2522 hist_field->type = kstrdup("unsigned int", GFP_KERNEL); 2523 if (!hist_field->type) 2524 goto free; 2525 goto out; 2526 } 2527 2528 if (WARN_ON_ONCE(!field)) 2529 goto out; 2530 2531 if (is_string_field(field)) { 2532 flags |= HIST_FIELD_FL_STRING; 2533 2534 hist_field->size = MAX_FILTER_STR_VAL; 2535 hist_field->type = kstrdup(field->type, GFP_KERNEL); 2536 if (!hist_field->type) 2537 goto free; 2538 2539 if (field->filter_type == FILTER_STATIC_STRING) 2540 hist_field->fn = hist_field_string; 2541 else if (field->filter_type == FILTER_DYN_STRING) 2542 hist_field->fn = hist_field_dynstring; 2543 else 2544 hist_field->fn = hist_field_pstring; 2545 } else { 2546 hist_field->size = field->size; 2547 hist_field->is_signed = field->is_signed; 2548 hist_field->type = kstrdup(field->type, GFP_KERNEL); 2549 if (!hist_field->type) 2550 goto free; 2551 2552 hist_field->fn = select_value_fn(field->size, 2553 field->is_signed); 2554 if (!hist_field->fn) { 2555 destroy_hist_field(hist_field, 0); 2556 return NULL; 2557 } 2558 } 2559 out: 2560 hist_field->field = field; 2561 hist_field->flags = flags; 2562 2563 if (var_name) { 2564 hist_field->var.name = kstrdup(var_name, GFP_KERNEL); 2565 if (!hist_field->var.name) 2566 goto free; 2567 } 2568 2569 return hist_field; 2570 free: 2571 destroy_hist_field(hist_field, 0); 2572 return NULL; 2573 } 2574 2575 static void destroy_hist_fields(struct hist_trigger_data *hist_data) 2576 { 2577 unsigned int i; 2578 2579 for (i = 0; i < HIST_FIELDS_MAX; i++) { 2580 if (hist_data->fields[i]) { 2581 destroy_hist_field(hist_data->fields[i], 0); 2582 hist_data->fields[i] = NULL; 2583 } 2584 } 2585 2586 for (i = 0; i < hist_data->n_var_refs; i++) { 2587 WARN_ON(!(hist_data->var_refs[i]->flags & HIST_FIELD_FL_VAR_REF)); 2588 __destroy_hist_field(hist_data->var_refs[i]); 2589 hist_data->var_refs[i] = NULL; 2590 } 2591 } 2592 2593 static int init_var_ref(struct hist_field *ref_field, 2594 struct hist_field *var_field, 2595 char *system, char *event_name) 2596 { 2597 int err = 0; 2598 2599 ref_field->var.idx = var_field->var.idx; 2600 ref_field->var.hist_data = var_field->hist_data; 2601 ref_field->size = var_field->size; 2602 ref_field->is_signed = var_field->is_signed; 2603 ref_field->flags |= var_field->flags & 2604 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS); 2605 2606 if (system) { 2607 ref_field->system = kstrdup(system, GFP_KERNEL); 2608 if (!ref_field->system) 2609 return -ENOMEM; 2610 } 2611 2612 if (event_name) { 2613 ref_field->event_name = kstrdup(event_name, GFP_KERNEL); 2614 if (!ref_field->event_name) { 2615 err = -ENOMEM; 2616 goto free; 2617 } 2618 } 2619 2620 if (var_field->var.name) { 2621 ref_field->name = kstrdup(var_field->var.name, GFP_KERNEL); 2622 if (!ref_field->name) { 2623 err = -ENOMEM; 2624 goto free; 2625 } 2626 } else if (var_field->name) { 2627 ref_field->name = kstrdup(var_field->name, GFP_KERNEL); 2628 if (!ref_field->name) { 2629 err = -ENOMEM; 2630 goto free; 2631 } 2632 } 2633 2634 ref_field->type = kstrdup(var_field->type, GFP_KERNEL); 2635 if (!ref_field->type) { 2636 err = -ENOMEM; 2637 goto free; 2638 } 2639 out: 2640 return err; 2641 free: 2642 kfree(ref_field->system); 2643 kfree(ref_field->event_name); 2644 kfree(ref_field->name); 2645 2646 goto out; 2647 } 2648 2649 /** 2650 * create_var_ref - Create a variable reference and attach it to trigger 2651 * @hist_data: The trigger that will be referencing the variable 2652 * @var_field: The VAR field to create a reference to 2653 * @system: The optional system string 2654 * @event_name: The optional event_name string 2655 * 2656 * Given a variable hist_field, create a VAR_REF hist_field that 2657 * represents a reference to it. 2658 * 2659 * This function also adds the reference to the trigger that 2660 * now references the variable. 2661 * 2662 * Return: The VAR_REF field if successful, NULL if not 2663 */ 2664 static struct hist_field *create_var_ref(struct hist_trigger_data *hist_data, 2665 struct hist_field *var_field, 2666 char *system, char *event_name) 2667 { 2668 unsigned long flags = HIST_FIELD_FL_VAR_REF; 2669 struct hist_field *ref_field; 2670 2671 ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL); 2672 if (ref_field) { 2673 if (init_var_ref(ref_field, var_field, system, event_name)) { 2674 destroy_hist_field(ref_field, 0); 2675 return NULL; 2676 } 2677 2678 hist_data->var_refs[hist_data->n_var_refs] = ref_field; 2679 ref_field->var_ref_idx = hist_data->n_var_refs++; 2680 } 2681 2682 return ref_field; 2683 } 2684 2685 static bool is_var_ref(char *var_name) 2686 { 2687 if (!var_name || strlen(var_name) < 2 || var_name[0] != '$') 2688 return false; 2689 2690 return true; 2691 } 2692 2693 static char *field_name_from_var(struct hist_trigger_data *hist_data, 2694 char *var_name) 2695 { 2696 char *name, *field; 2697 unsigned int i; 2698 2699 for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) { 2700 name = hist_data->attrs->var_defs.name[i]; 2701 2702 if (strcmp(var_name, name) == 0) { 2703 field = hist_data->attrs->var_defs.expr[i]; 2704 if (contains_operator(field) || is_var_ref(field)) 2705 continue; 2706 return field; 2707 } 2708 } 2709 2710 return NULL; 2711 } 2712 2713 static char *local_field_var_ref(struct hist_trigger_data *hist_data, 2714 char *system, char *event_name, 2715 char *var_name) 2716 { 2717 struct trace_event_call *call; 2718 2719 if (system && event_name) { 2720 call = hist_data->event_file->event_call; 2721 2722 if (strcmp(system, call->class->system) != 0) 2723 return NULL; 2724 2725 if (strcmp(event_name, trace_event_name(call)) != 0) 2726 return NULL; 2727 } 2728 2729 if (!!system != !!event_name) 2730 return NULL; 2731 2732 if (!is_var_ref(var_name)) 2733 return NULL; 2734 2735 var_name++; 2736 2737 return field_name_from_var(hist_data, var_name); 2738 } 2739 2740 static struct hist_field *parse_var_ref(struct hist_trigger_data *hist_data, 2741 char *system, char *event_name, 2742 char *var_name) 2743 { 2744 struct hist_field *var_field = NULL, *ref_field = NULL; 2745 struct trace_array *tr = hist_data->event_file->tr; 2746 2747 if (!is_var_ref(var_name)) 2748 return NULL; 2749 2750 var_name++; 2751 2752 var_field = find_event_var(hist_data, system, event_name, var_name); 2753 if (var_field) 2754 ref_field = create_var_ref(hist_data, var_field, 2755 system, event_name); 2756 2757 if (!ref_field) 2758 hist_err(tr, HIST_ERR_VAR_NOT_FOUND, errpos(var_name)); 2759 2760 return ref_field; 2761 } 2762 2763 static struct ftrace_event_field * 2764 parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file, 2765 char *field_str, unsigned long *flags) 2766 { 2767 struct ftrace_event_field *field = NULL; 2768 char *field_name, *modifier, *str; 2769 struct trace_array *tr = file->tr; 2770 2771 modifier = str = kstrdup(field_str, GFP_KERNEL); 2772 if (!modifier) 2773 return ERR_PTR(-ENOMEM); 2774 2775 field_name = strsep(&modifier, "."); 2776 if (modifier) { 2777 if (strcmp(modifier, "hex") == 0) 2778 *flags |= HIST_FIELD_FL_HEX; 2779 else if (strcmp(modifier, "sym") == 0) 2780 *flags |= HIST_FIELD_FL_SYM; 2781 else if (strcmp(modifier, "sym-offset") == 0) 2782 *flags |= HIST_FIELD_FL_SYM_OFFSET; 2783 else if ((strcmp(modifier, "execname") == 0) && 2784 (strcmp(field_name, "common_pid") == 0)) 2785 *flags |= HIST_FIELD_FL_EXECNAME; 2786 else if (strcmp(modifier, "syscall") == 0) 2787 *flags |= HIST_FIELD_FL_SYSCALL; 2788 else if (strcmp(modifier, "log2") == 0) 2789 *flags |= HIST_FIELD_FL_LOG2; 2790 else if (strcmp(modifier, "usecs") == 0) 2791 *flags |= HIST_FIELD_FL_TIMESTAMP_USECS; 2792 else { 2793 hist_err(tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(modifier)); 2794 field = ERR_PTR(-EINVAL); 2795 goto out; 2796 } 2797 } 2798 2799 if (strcmp(field_name, "common_timestamp") == 0) { 2800 *flags |= HIST_FIELD_FL_TIMESTAMP; 2801 hist_data->enable_timestamps = true; 2802 if (*flags & HIST_FIELD_FL_TIMESTAMP_USECS) 2803 hist_data->attrs->ts_in_usecs = true; 2804 } else if (strcmp(field_name, "cpu") == 0) 2805 *flags |= HIST_FIELD_FL_CPU; 2806 else { 2807 field = trace_find_event_field(file->event_call, field_name); 2808 if (!field || !field->size) { 2809 hist_err(tr, HIST_ERR_FIELD_NOT_FOUND, errpos(field_name)); 2810 field = ERR_PTR(-EINVAL); 2811 goto out; 2812 } 2813 } 2814 out: 2815 kfree(str); 2816 2817 return field; 2818 } 2819 2820 static struct hist_field *create_alias(struct hist_trigger_data *hist_data, 2821 struct hist_field *var_ref, 2822 char *var_name) 2823 { 2824 struct hist_field *alias = NULL; 2825 unsigned long flags = HIST_FIELD_FL_ALIAS | HIST_FIELD_FL_VAR; 2826 2827 alias = create_hist_field(hist_data, NULL, flags, var_name); 2828 if (!alias) 2829 return NULL; 2830 2831 alias->fn = var_ref->fn; 2832 alias->operands[0] = var_ref; 2833 2834 if (init_var_ref(alias, var_ref, var_ref->system, var_ref->event_name)) { 2835 destroy_hist_field(alias, 0); 2836 return NULL; 2837 } 2838 2839 alias->var_ref_idx = var_ref->var_ref_idx; 2840 2841 return alias; 2842 } 2843 2844 static struct hist_field *parse_atom(struct hist_trigger_data *hist_data, 2845 struct trace_event_file *file, char *str, 2846 unsigned long *flags, char *var_name) 2847 { 2848 char *s, *ref_system = NULL, *ref_event = NULL, *ref_var = str; 2849 struct ftrace_event_field *field = NULL; 2850 struct hist_field *hist_field = NULL; 2851 int ret = 0; 2852 2853 s = strchr(str, '.'); 2854 if (s) { 2855 s = strchr(++s, '.'); 2856 if (s) { 2857 ref_system = strsep(&str, "."); 2858 if (!str) { 2859 ret = -EINVAL; 2860 goto out; 2861 } 2862 ref_event = strsep(&str, "."); 2863 if (!str) { 2864 ret = -EINVAL; 2865 goto out; 2866 } 2867 ref_var = str; 2868 } 2869 } 2870 2871 s = local_field_var_ref(hist_data, ref_system, ref_event, ref_var); 2872 if (!s) { 2873 hist_field = parse_var_ref(hist_data, ref_system, 2874 ref_event, ref_var); 2875 if (hist_field) { 2876 if (var_name) { 2877 hist_field = create_alias(hist_data, hist_field, var_name); 2878 if (!hist_field) { 2879 ret = -ENOMEM; 2880 goto out; 2881 } 2882 } 2883 return hist_field; 2884 } 2885 } else 2886 str = s; 2887 2888 field = parse_field(hist_data, file, str, flags); 2889 if (IS_ERR(field)) { 2890 ret = PTR_ERR(field); 2891 goto out; 2892 } 2893 2894 hist_field = create_hist_field(hist_data, field, *flags, var_name); 2895 if (!hist_field) { 2896 ret = -ENOMEM; 2897 goto out; 2898 } 2899 2900 return hist_field; 2901 out: 2902 return ERR_PTR(ret); 2903 } 2904 2905 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data, 2906 struct trace_event_file *file, 2907 char *str, unsigned long flags, 2908 char *var_name, unsigned int level); 2909 2910 static struct hist_field *parse_unary(struct hist_trigger_data *hist_data, 2911 struct trace_event_file *file, 2912 char *str, unsigned long flags, 2913 char *var_name, unsigned int level) 2914 { 2915 struct hist_field *operand1, *expr = NULL; 2916 unsigned long operand_flags; 2917 int ret = 0; 2918 char *s; 2919 2920 /* we support only -(xxx) i.e. explicit parens required */ 2921 2922 if (level > 3) { 2923 hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str)); 2924 ret = -EINVAL; 2925 goto free; 2926 } 2927 2928 str++; /* skip leading '-' */ 2929 2930 s = strchr(str, '('); 2931 if (s) 2932 str++; 2933 else { 2934 ret = -EINVAL; 2935 goto free; 2936 } 2937 2938 s = strrchr(str, ')'); 2939 if (s) 2940 *s = '\0'; 2941 else { 2942 ret = -EINVAL; /* no closing ')' */ 2943 goto free; 2944 } 2945 2946 flags |= HIST_FIELD_FL_EXPR; 2947 expr = create_hist_field(hist_data, NULL, flags, var_name); 2948 if (!expr) { 2949 ret = -ENOMEM; 2950 goto free; 2951 } 2952 2953 operand_flags = 0; 2954 operand1 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level); 2955 if (IS_ERR(operand1)) { 2956 ret = PTR_ERR(operand1); 2957 goto free; 2958 } 2959 2960 expr->flags |= operand1->flags & 2961 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS); 2962 expr->fn = hist_field_unary_minus; 2963 expr->operands[0] = operand1; 2964 expr->operator = FIELD_OP_UNARY_MINUS; 2965 expr->name = expr_str(expr, 0); 2966 expr->type = kstrdup(operand1->type, GFP_KERNEL); 2967 if (!expr->type) { 2968 ret = -ENOMEM; 2969 goto free; 2970 } 2971 2972 return expr; 2973 free: 2974 destroy_hist_field(expr, 0); 2975 return ERR_PTR(ret); 2976 } 2977 2978 static int check_expr_operands(struct trace_array *tr, 2979 struct hist_field *operand1, 2980 struct hist_field *operand2) 2981 { 2982 unsigned long operand1_flags = operand1->flags; 2983 unsigned long operand2_flags = operand2->flags; 2984 2985 if ((operand1_flags & HIST_FIELD_FL_VAR_REF) || 2986 (operand1_flags & HIST_FIELD_FL_ALIAS)) { 2987 struct hist_field *var; 2988 2989 var = find_var_field(operand1->var.hist_data, operand1->name); 2990 if (!var) 2991 return -EINVAL; 2992 operand1_flags = var->flags; 2993 } 2994 2995 if ((operand2_flags & HIST_FIELD_FL_VAR_REF) || 2996 (operand2_flags & HIST_FIELD_FL_ALIAS)) { 2997 struct hist_field *var; 2998 2999 var = find_var_field(operand2->var.hist_data, operand2->name); 3000 if (!var) 3001 return -EINVAL; 3002 operand2_flags = var->flags; 3003 } 3004 3005 if ((operand1_flags & HIST_FIELD_FL_TIMESTAMP_USECS) != 3006 (operand2_flags & HIST_FIELD_FL_TIMESTAMP_USECS)) { 3007 hist_err(tr, HIST_ERR_TIMESTAMP_MISMATCH, 0); 3008 return -EINVAL; 3009 } 3010 3011 return 0; 3012 } 3013 3014 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data, 3015 struct trace_event_file *file, 3016 char *str, unsigned long flags, 3017 char *var_name, unsigned int level) 3018 { 3019 struct hist_field *operand1 = NULL, *operand2 = NULL, *expr = NULL; 3020 unsigned long operand_flags; 3021 int field_op, ret = -EINVAL; 3022 char *sep, *operand1_str; 3023 3024 if (level > 3) { 3025 hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str)); 3026 return ERR_PTR(-EINVAL); 3027 } 3028 3029 field_op = contains_operator(str); 3030 3031 if (field_op == FIELD_OP_NONE) 3032 return parse_atom(hist_data, file, str, &flags, var_name); 3033 3034 if (field_op == FIELD_OP_UNARY_MINUS) 3035 return parse_unary(hist_data, file, str, flags, var_name, ++level); 3036 3037 switch (field_op) { 3038 case FIELD_OP_MINUS: 3039 sep = "-"; 3040 break; 3041 case FIELD_OP_PLUS: 3042 sep = "+"; 3043 break; 3044 default: 3045 goto free; 3046 } 3047 3048 operand1_str = strsep(&str, sep); 3049 if (!operand1_str || !str) 3050 goto free; 3051 3052 operand_flags = 0; 3053 operand1 = parse_atom(hist_data, file, operand1_str, 3054 &operand_flags, NULL); 3055 if (IS_ERR(operand1)) { 3056 ret = PTR_ERR(operand1); 3057 operand1 = NULL; 3058 goto free; 3059 } 3060 3061 /* rest of string could be another expression e.g. b+c in a+b+c */ 3062 operand_flags = 0; 3063 operand2 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level); 3064 if (IS_ERR(operand2)) { 3065 ret = PTR_ERR(operand2); 3066 operand2 = NULL; 3067 goto free; 3068 } 3069 3070 ret = check_expr_operands(file->tr, operand1, operand2); 3071 if (ret) 3072 goto free; 3073 3074 flags |= HIST_FIELD_FL_EXPR; 3075 3076 flags |= operand1->flags & 3077 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS); 3078 3079 expr = create_hist_field(hist_data, NULL, flags, var_name); 3080 if (!expr) { 3081 ret = -ENOMEM; 3082 goto free; 3083 } 3084 3085 operand1->read_once = true; 3086 operand2->read_once = true; 3087 3088 expr->operands[0] = operand1; 3089 expr->operands[1] = operand2; 3090 expr->operator = field_op; 3091 expr->name = expr_str(expr, 0); 3092 expr->type = kstrdup(operand1->type, GFP_KERNEL); 3093 if (!expr->type) { 3094 ret = -ENOMEM; 3095 goto free; 3096 } 3097 3098 switch (field_op) { 3099 case FIELD_OP_MINUS: 3100 expr->fn = hist_field_minus; 3101 break; 3102 case FIELD_OP_PLUS: 3103 expr->fn = hist_field_plus; 3104 break; 3105 default: 3106 ret = -EINVAL; 3107 goto free; 3108 } 3109 3110 return expr; 3111 free: 3112 destroy_hist_field(operand1, 0); 3113 destroy_hist_field(operand2, 0); 3114 destroy_hist_field(expr, 0); 3115 3116 return ERR_PTR(ret); 3117 } 3118 3119 static char *find_trigger_filter(struct hist_trigger_data *hist_data, 3120 struct trace_event_file *file) 3121 { 3122 struct event_trigger_data *test; 3123 3124 list_for_each_entry_rcu(test, &file->triggers, list) { 3125 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 3126 if (test->private_data == hist_data) 3127 return test->filter_str; 3128 } 3129 } 3130 3131 return NULL; 3132 } 3133 3134 static struct event_command trigger_hist_cmd; 3135 static int event_hist_trigger_func(struct event_command *cmd_ops, 3136 struct trace_event_file *file, 3137 char *glob, char *cmd, char *param); 3138 3139 static bool compatible_keys(struct hist_trigger_data *target_hist_data, 3140 struct hist_trigger_data *hist_data, 3141 unsigned int n_keys) 3142 { 3143 struct hist_field *target_hist_field, *hist_field; 3144 unsigned int n, i, j; 3145 3146 if (hist_data->n_fields - hist_data->n_vals != n_keys) 3147 return false; 3148 3149 i = hist_data->n_vals; 3150 j = target_hist_data->n_vals; 3151 3152 for (n = 0; n < n_keys; n++) { 3153 hist_field = hist_data->fields[i + n]; 3154 target_hist_field = target_hist_data->fields[j + n]; 3155 3156 if (strcmp(hist_field->type, target_hist_field->type) != 0) 3157 return false; 3158 if (hist_field->size != target_hist_field->size) 3159 return false; 3160 if (hist_field->is_signed != target_hist_field->is_signed) 3161 return false; 3162 } 3163 3164 return true; 3165 } 3166 3167 static struct hist_trigger_data * 3168 find_compatible_hist(struct hist_trigger_data *target_hist_data, 3169 struct trace_event_file *file) 3170 { 3171 struct hist_trigger_data *hist_data; 3172 struct event_trigger_data *test; 3173 unsigned int n_keys; 3174 3175 n_keys = target_hist_data->n_fields - target_hist_data->n_vals; 3176 3177 list_for_each_entry_rcu(test, &file->triggers, list) { 3178 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 3179 hist_data = test->private_data; 3180 3181 if (compatible_keys(target_hist_data, hist_data, n_keys)) 3182 return hist_data; 3183 } 3184 } 3185 3186 return NULL; 3187 } 3188 3189 static struct trace_event_file *event_file(struct trace_array *tr, 3190 char *system, char *event_name) 3191 { 3192 struct trace_event_file *file; 3193 3194 file = __find_event_file(tr, system, event_name); 3195 if (!file) 3196 return ERR_PTR(-EINVAL); 3197 3198 return file; 3199 } 3200 3201 static struct hist_field * 3202 find_synthetic_field_var(struct hist_trigger_data *target_hist_data, 3203 char *system, char *event_name, char *field_name) 3204 { 3205 struct hist_field *event_var; 3206 char *synthetic_name; 3207 3208 synthetic_name = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL); 3209 if (!synthetic_name) 3210 return ERR_PTR(-ENOMEM); 3211 3212 strcpy(synthetic_name, "synthetic_"); 3213 strcat(synthetic_name, field_name); 3214 3215 event_var = find_event_var(target_hist_data, system, event_name, synthetic_name); 3216 3217 kfree(synthetic_name); 3218 3219 return event_var; 3220 } 3221 3222 /** 3223 * create_field_var_hist - Automatically create a histogram and var for a field 3224 * @target_hist_data: The target hist trigger 3225 * @subsys_name: Optional subsystem name 3226 * @event_name: Optional event name 3227 * @field_name: The name of the field (and the resulting variable) 3228 * 3229 * Hist trigger actions fetch data from variables, not directly from 3230 * events. However, for convenience, users are allowed to directly 3231 * specify an event field in an action, which will be automatically 3232 * converted into a variable on their behalf. 3233 3234 * If a user specifies a field on an event that isn't the event the 3235 * histogram currently being defined (the target event histogram), the 3236 * only way that can be accomplished is if a new hist trigger is 3237 * created and the field variable defined on that. 3238 * 3239 * This function creates a new histogram compatible with the target 3240 * event (meaning a histogram with the same key as the target 3241 * histogram), and creates a variable for the specified field, but 3242 * with 'synthetic_' prepended to the variable name in order to avoid 3243 * collision with normal field variables. 3244 * 3245 * Return: The variable created for the field. 3246 */ 3247 static struct hist_field * 3248 create_field_var_hist(struct hist_trigger_data *target_hist_data, 3249 char *subsys_name, char *event_name, char *field_name) 3250 { 3251 struct trace_array *tr = target_hist_data->event_file->tr; 3252 struct hist_field *event_var = ERR_PTR(-EINVAL); 3253 struct hist_trigger_data *hist_data; 3254 unsigned int i, n, first = true; 3255 struct field_var_hist *var_hist; 3256 struct trace_event_file *file; 3257 struct hist_field *key_field; 3258 char *saved_filter; 3259 char *cmd; 3260 int ret; 3261 3262 if (target_hist_data->n_field_var_hists >= SYNTH_FIELDS_MAX) { 3263 hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name)); 3264 return ERR_PTR(-EINVAL); 3265 } 3266 3267 file = event_file(tr, subsys_name, event_name); 3268 3269 if (IS_ERR(file)) { 3270 hist_err(tr, HIST_ERR_EVENT_FILE_NOT_FOUND, errpos(field_name)); 3271 ret = PTR_ERR(file); 3272 return ERR_PTR(ret); 3273 } 3274 3275 /* 3276 * Look for a histogram compatible with target. We'll use the 3277 * found histogram specification to create a new matching 3278 * histogram with our variable on it. target_hist_data is not 3279 * yet a registered histogram so we can't use that. 3280 */ 3281 hist_data = find_compatible_hist(target_hist_data, file); 3282 if (!hist_data) { 3283 hist_err(tr, HIST_ERR_HIST_NOT_FOUND, errpos(field_name)); 3284 return ERR_PTR(-EINVAL); 3285 } 3286 3287 /* See if a synthetic field variable has already been created */ 3288 event_var = find_synthetic_field_var(target_hist_data, subsys_name, 3289 event_name, field_name); 3290 if (!IS_ERR_OR_NULL(event_var)) 3291 return event_var; 3292 3293 var_hist = kzalloc(sizeof(*var_hist), GFP_KERNEL); 3294 if (!var_hist) 3295 return ERR_PTR(-ENOMEM); 3296 3297 cmd = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL); 3298 if (!cmd) { 3299 kfree(var_hist); 3300 return ERR_PTR(-ENOMEM); 3301 } 3302 3303 /* Use the same keys as the compatible histogram */ 3304 strcat(cmd, "keys="); 3305 3306 for_each_hist_key_field(i, hist_data) { 3307 key_field = hist_data->fields[i]; 3308 if (!first) 3309 strcat(cmd, ","); 3310 strcat(cmd, key_field->field->name); 3311 first = false; 3312 } 3313 3314 /* Create the synthetic field variable specification */ 3315 strcat(cmd, ":synthetic_"); 3316 strcat(cmd, field_name); 3317 strcat(cmd, "="); 3318 strcat(cmd, field_name); 3319 3320 /* Use the same filter as the compatible histogram */ 3321 saved_filter = find_trigger_filter(hist_data, file); 3322 if (saved_filter) { 3323 strcat(cmd, " if "); 3324 strcat(cmd, saved_filter); 3325 } 3326 3327 var_hist->cmd = kstrdup(cmd, GFP_KERNEL); 3328 if (!var_hist->cmd) { 3329 kfree(cmd); 3330 kfree(var_hist); 3331 return ERR_PTR(-ENOMEM); 3332 } 3333 3334 /* Save the compatible histogram information */ 3335 var_hist->hist_data = hist_data; 3336 3337 /* Create the new histogram with our variable */ 3338 ret = event_hist_trigger_func(&trigger_hist_cmd, file, 3339 "", "hist", cmd); 3340 if (ret) { 3341 kfree(cmd); 3342 kfree(var_hist->cmd); 3343 kfree(var_hist); 3344 hist_err(tr, HIST_ERR_HIST_CREATE_FAIL, errpos(field_name)); 3345 return ERR_PTR(ret); 3346 } 3347 3348 kfree(cmd); 3349 3350 /* If we can't find the variable, something went wrong */ 3351 event_var = find_synthetic_field_var(target_hist_data, subsys_name, 3352 event_name, field_name); 3353 if (IS_ERR_OR_NULL(event_var)) { 3354 kfree(var_hist->cmd); 3355 kfree(var_hist); 3356 hist_err(tr, HIST_ERR_SYNTH_VAR_NOT_FOUND, errpos(field_name)); 3357 return ERR_PTR(-EINVAL); 3358 } 3359 3360 n = target_hist_data->n_field_var_hists; 3361 target_hist_data->field_var_hists[n] = var_hist; 3362 target_hist_data->n_field_var_hists++; 3363 3364 return event_var; 3365 } 3366 3367 static struct hist_field * 3368 find_target_event_var(struct hist_trigger_data *hist_data, 3369 char *subsys_name, char *event_name, char *var_name) 3370 { 3371 struct trace_event_file *file = hist_data->event_file; 3372 struct hist_field *hist_field = NULL; 3373 3374 if (subsys_name) { 3375 struct trace_event_call *call; 3376 3377 if (!event_name) 3378 return NULL; 3379 3380 call = file->event_call; 3381 3382 if (strcmp(subsys_name, call->class->system) != 0) 3383 return NULL; 3384 3385 if (strcmp(event_name, trace_event_name(call)) != 0) 3386 return NULL; 3387 } 3388 3389 hist_field = find_var_field(hist_data, var_name); 3390 3391 return hist_field; 3392 } 3393 3394 static inline void __update_field_vars(struct tracing_map_elt *elt, 3395 struct ring_buffer_event *rbe, 3396 void *rec, 3397 struct field_var **field_vars, 3398 unsigned int n_field_vars, 3399 unsigned int field_var_str_start) 3400 { 3401 struct hist_elt_data *elt_data = elt->private_data; 3402 unsigned int i, j, var_idx; 3403 u64 var_val; 3404 3405 for (i = 0, j = field_var_str_start; i < n_field_vars; i++) { 3406 struct field_var *field_var = field_vars[i]; 3407 struct hist_field *var = field_var->var; 3408 struct hist_field *val = field_var->val; 3409 3410 var_val = val->fn(val, elt, rbe, rec); 3411 var_idx = var->var.idx; 3412 3413 if (val->flags & HIST_FIELD_FL_STRING) { 3414 char *str = elt_data->field_var_str[j++]; 3415 char *val_str = (char *)(uintptr_t)var_val; 3416 3417 strscpy(str, val_str, STR_VAR_LEN_MAX); 3418 var_val = (u64)(uintptr_t)str; 3419 } 3420 tracing_map_set_var(elt, var_idx, var_val); 3421 } 3422 } 3423 3424 static void update_field_vars(struct hist_trigger_data *hist_data, 3425 struct tracing_map_elt *elt, 3426 struct ring_buffer_event *rbe, 3427 void *rec) 3428 { 3429 __update_field_vars(elt, rbe, rec, hist_data->field_vars, 3430 hist_data->n_field_vars, 0); 3431 } 3432 3433 static void save_track_data_vars(struct hist_trigger_data *hist_data, 3434 struct tracing_map_elt *elt, void *rec, 3435 struct ring_buffer_event *rbe, void *key, 3436 struct action_data *data, u64 *var_ref_vals) 3437 { 3438 __update_field_vars(elt, rbe, rec, hist_data->save_vars, 3439 hist_data->n_save_vars, hist_data->n_field_var_str); 3440 } 3441 3442 static struct hist_field *create_var(struct hist_trigger_data *hist_data, 3443 struct trace_event_file *file, 3444 char *name, int size, const char *type) 3445 { 3446 struct hist_field *var; 3447 int idx; 3448 3449 if (find_var(hist_data, file, name) && !hist_data->remove) { 3450 var = ERR_PTR(-EINVAL); 3451 goto out; 3452 } 3453 3454 var = kzalloc(sizeof(struct hist_field), GFP_KERNEL); 3455 if (!var) { 3456 var = ERR_PTR(-ENOMEM); 3457 goto out; 3458 } 3459 3460 idx = tracing_map_add_var(hist_data->map); 3461 if (idx < 0) { 3462 kfree(var); 3463 var = ERR_PTR(-EINVAL); 3464 goto out; 3465 } 3466 3467 var->flags = HIST_FIELD_FL_VAR; 3468 var->var.idx = idx; 3469 var->var.hist_data = var->hist_data = hist_data; 3470 var->size = size; 3471 var->var.name = kstrdup(name, GFP_KERNEL); 3472 var->type = kstrdup(type, GFP_KERNEL); 3473 if (!var->var.name || !var->type) { 3474 kfree(var->var.name); 3475 kfree(var->type); 3476 kfree(var); 3477 var = ERR_PTR(-ENOMEM); 3478 } 3479 out: 3480 return var; 3481 } 3482 3483 static struct field_var *create_field_var(struct hist_trigger_data *hist_data, 3484 struct trace_event_file *file, 3485 char *field_name) 3486 { 3487 struct hist_field *val = NULL, *var = NULL; 3488 unsigned long flags = HIST_FIELD_FL_VAR; 3489 struct trace_array *tr = file->tr; 3490 struct field_var *field_var; 3491 int ret = 0; 3492 3493 if (hist_data->n_field_vars >= SYNTH_FIELDS_MAX) { 3494 hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name)); 3495 ret = -EINVAL; 3496 goto err; 3497 } 3498 3499 val = parse_atom(hist_data, file, field_name, &flags, NULL); 3500 if (IS_ERR(val)) { 3501 hist_err(tr, HIST_ERR_FIELD_VAR_PARSE_FAIL, errpos(field_name)); 3502 ret = PTR_ERR(val); 3503 goto err; 3504 } 3505 3506 var = create_var(hist_data, file, field_name, val->size, val->type); 3507 if (IS_ERR(var)) { 3508 hist_err(tr, HIST_ERR_VAR_CREATE_FIND_FAIL, errpos(field_name)); 3509 kfree(val); 3510 ret = PTR_ERR(var); 3511 goto err; 3512 } 3513 3514 field_var = kzalloc(sizeof(struct field_var), GFP_KERNEL); 3515 if (!field_var) { 3516 kfree(val); 3517 kfree(var); 3518 ret = -ENOMEM; 3519 goto err; 3520 } 3521 3522 field_var->var = var; 3523 field_var->val = val; 3524 out: 3525 return field_var; 3526 err: 3527 field_var = ERR_PTR(ret); 3528 goto out; 3529 } 3530 3531 /** 3532 * create_target_field_var - Automatically create a variable for a field 3533 * @target_hist_data: The target hist trigger 3534 * @subsys_name: Optional subsystem name 3535 * @event_name: Optional event name 3536 * @var_name: The name of the field (and the resulting variable) 3537 * 3538 * Hist trigger actions fetch data from variables, not directly from 3539 * events. However, for convenience, users are allowed to directly 3540 * specify an event field in an action, which will be automatically 3541 * converted into a variable on their behalf. 3542 3543 * This function creates a field variable with the name var_name on 3544 * the hist trigger currently being defined on the target event. If 3545 * subsys_name and event_name are specified, this function simply 3546 * verifies that they do in fact match the target event subsystem and 3547 * event name. 3548 * 3549 * Return: The variable created for the field. 3550 */ 3551 static struct field_var * 3552 create_target_field_var(struct hist_trigger_data *target_hist_data, 3553 char *subsys_name, char *event_name, char *var_name) 3554 { 3555 struct trace_event_file *file = target_hist_data->event_file; 3556 3557 if (subsys_name) { 3558 struct trace_event_call *call; 3559 3560 if (!event_name) 3561 return NULL; 3562 3563 call = file->event_call; 3564 3565 if (strcmp(subsys_name, call->class->system) != 0) 3566 return NULL; 3567 3568 if (strcmp(event_name, trace_event_name(call)) != 0) 3569 return NULL; 3570 } 3571 3572 return create_field_var(target_hist_data, file, var_name); 3573 } 3574 3575 static bool check_track_val_max(u64 track_val, u64 var_val) 3576 { 3577 if (var_val <= track_val) 3578 return false; 3579 3580 return true; 3581 } 3582 3583 static bool check_track_val_changed(u64 track_val, u64 var_val) 3584 { 3585 if (var_val == track_val) 3586 return false; 3587 3588 return true; 3589 } 3590 3591 static u64 get_track_val(struct hist_trigger_data *hist_data, 3592 struct tracing_map_elt *elt, 3593 struct action_data *data) 3594 { 3595 unsigned int track_var_idx = data->track_data.track_var->var.idx; 3596 u64 track_val; 3597 3598 track_val = tracing_map_read_var(elt, track_var_idx); 3599 3600 return track_val; 3601 } 3602 3603 static void save_track_val(struct hist_trigger_data *hist_data, 3604 struct tracing_map_elt *elt, 3605 struct action_data *data, u64 var_val) 3606 { 3607 unsigned int track_var_idx = data->track_data.track_var->var.idx; 3608 3609 tracing_map_set_var(elt, track_var_idx, var_val); 3610 } 3611 3612 static void save_track_data(struct hist_trigger_data *hist_data, 3613 struct tracing_map_elt *elt, void *rec, 3614 struct ring_buffer_event *rbe, void *key, 3615 struct action_data *data, u64 *var_ref_vals) 3616 { 3617 if (data->track_data.save_data) 3618 data->track_data.save_data(hist_data, elt, rec, rbe, key, data, var_ref_vals); 3619 } 3620 3621 static bool check_track_val(struct tracing_map_elt *elt, 3622 struct action_data *data, 3623 u64 var_val) 3624 { 3625 struct hist_trigger_data *hist_data; 3626 u64 track_val; 3627 3628 hist_data = data->track_data.track_var->hist_data; 3629 track_val = get_track_val(hist_data, elt, data); 3630 3631 return data->track_data.check_val(track_val, var_val); 3632 } 3633 3634 #ifdef CONFIG_TRACER_SNAPSHOT 3635 static bool cond_snapshot_update(struct trace_array *tr, void *cond_data) 3636 { 3637 /* called with tr->max_lock held */ 3638 struct track_data *track_data = tr->cond_snapshot->cond_data; 3639 struct hist_elt_data *elt_data, *track_elt_data; 3640 struct snapshot_context *context = cond_data; 3641 struct action_data *action; 3642 u64 track_val; 3643 3644 if (!track_data) 3645 return false; 3646 3647 action = track_data->action_data; 3648 3649 track_val = get_track_val(track_data->hist_data, context->elt, 3650 track_data->action_data); 3651 3652 if (!action->track_data.check_val(track_data->track_val, track_val)) 3653 return false; 3654 3655 track_data->track_val = track_val; 3656 memcpy(track_data->key, context->key, track_data->key_len); 3657 3658 elt_data = context->elt->private_data; 3659 track_elt_data = track_data->elt.private_data; 3660 if (elt_data->comm) 3661 strncpy(track_elt_data->comm, elt_data->comm, TASK_COMM_LEN); 3662 3663 track_data->updated = true; 3664 3665 return true; 3666 } 3667 3668 static void save_track_data_snapshot(struct hist_trigger_data *hist_data, 3669 struct tracing_map_elt *elt, void *rec, 3670 struct ring_buffer_event *rbe, void *key, 3671 struct action_data *data, 3672 u64 *var_ref_vals) 3673 { 3674 struct trace_event_file *file = hist_data->event_file; 3675 struct snapshot_context context; 3676 3677 context.elt = elt; 3678 context.key = key; 3679 3680 tracing_snapshot_cond(file->tr, &context); 3681 } 3682 3683 static void hist_trigger_print_key(struct seq_file *m, 3684 struct hist_trigger_data *hist_data, 3685 void *key, 3686 struct tracing_map_elt *elt); 3687 3688 static struct action_data *snapshot_action(struct hist_trigger_data *hist_data) 3689 { 3690 unsigned int i; 3691 3692 if (!hist_data->n_actions) 3693 return NULL; 3694 3695 for (i = 0; i < hist_data->n_actions; i++) { 3696 struct action_data *data = hist_data->actions[i]; 3697 3698 if (data->action == ACTION_SNAPSHOT) 3699 return data; 3700 } 3701 3702 return NULL; 3703 } 3704 3705 static void track_data_snapshot_print(struct seq_file *m, 3706 struct hist_trigger_data *hist_data) 3707 { 3708 struct trace_event_file *file = hist_data->event_file; 3709 struct track_data *track_data; 3710 struct action_data *action; 3711 3712 track_data = tracing_cond_snapshot_data(file->tr); 3713 if (!track_data) 3714 return; 3715 3716 if (!track_data->updated) 3717 return; 3718 3719 action = snapshot_action(hist_data); 3720 if (!action) 3721 return; 3722 3723 seq_puts(m, "\nSnapshot taken (see tracing/snapshot). Details:\n"); 3724 seq_printf(m, "\ttriggering value { %s(%s) }: %10llu", 3725 action->handler == HANDLER_ONMAX ? "onmax" : "onchange", 3726 action->track_data.var_str, track_data->track_val); 3727 3728 seq_puts(m, "\ttriggered by event with key: "); 3729 hist_trigger_print_key(m, hist_data, track_data->key, &track_data->elt); 3730 seq_putc(m, '\n'); 3731 } 3732 #else 3733 static bool cond_snapshot_update(struct trace_array *tr, void *cond_data) 3734 { 3735 return false; 3736 } 3737 static void save_track_data_snapshot(struct hist_trigger_data *hist_data, 3738 struct tracing_map_elt *elt, void *rec, 3739 struct ring_buffer_event *rbe, void *key, 3740 struct action_data *data, 3741 u64 *var_ref_vals) {} 3742 static void track_data_snapshot_print(struct seq_file *m, 3743 struct hist_trigger_data *hist_data) {} 3744 #endif /* CONFIG_TRACER_SNAPSHOT */ 3745 3746 static void track_data_print(struct seq_file *m, 3747 struct hist_trigger_data *hist_data, 3748 struct tracing_map_elt *elt, 3749 struct action_data *data) 3750 { 3751 u64 track_val = get_track_val(hist_data, elt, data); 3752 unsigned int i, save_var_idx; 3753 3754 if (data->handler == HANDLER_ONMAX) 3755 seq_printf(m, "\n\tmax: %10llu", track_val); 3756 else if (data->handler == HANDLER_ONCHANGE) 3757 seq_printf(m, "\n\tchanged: %10llu", track_val); 3758 3759 if (data->action == ACTION_SNAPSHOT) 3760 return; 3761 3762 for (i = 0; i < hist_data->n_save_vars; i++) { 3763 struct hist_field *save_val = hist_data->save_vars[i]->val; 3764 struct hist_field *save_var = hist_data->save_vars[i]->var; 3765 u64 val; 3766 3767 save_var_idx = save_var->var.idx; 3768 3769 val = tracing_map_read_var(elt, save_var_idx); 3770 3771 if (save_val->flags & HIST_FIELD_FL_STRING) { 3772 seq_printf(m, " %s: %-32s", save_var->var.name, 3773 (char *)(uintptr_t)(val)); 3774 } else 3775 seq_printf(m, " %s: %10llu", save_var->var.name, val); 3776 } 3777 } 3778 3779 static void ontrack_action(struct hist_trigger_data *hist_data, 3780 struct tracing_map_elt *elt, void *rec, 3781 struct ring_buffer_event *rbe, void *key, 3782 struct action_data *data, u64 *var_ref_vals) 3783 { 3784 u64 var_val = var_ref_vals[data->track_data.var_ref->var_ref_idx]; 3785 3786 if (check_track_val(elt, data, var_val)) { 3787 save_track_val(hist_data, elt, data, var_val); 3788 save_track_data(hist_data, elt, rec, rbe, key, data, var_ref_vals); 3789 } 3790 } 3791 3792 static void action_data_destroy(struct action_data *data) 3793 { 3794 unsigned int i; 3795 3796 lockdep_assert_held(&event_mutex); 3797 3798 kfree(data->action_name); 3799 3800 for (i = 0; i < data->n_params; i++) 3801 kfree(data->params[i]); 3802 3803 if (data->synth_event) 3804 data->synth_event->ref--; 3805 3806 kfree(data->synth_event_name); 3807 3808 kfree(data); 3809 } 3810 3811 static void track_data_destroy(struct hist_trigger_data *hist_data, 3812 struct action_data *data) 3813 { 3814 struct trace_event_file *file = hist_data->event_file; 3815 3816 destroy_hist_field(data->track_data.track_var, 0); 3817 3818 if (data->action == ACTION_SNAPSHOT) { 3819 struct track_data *track_data; 3820 3821 track_data = tracing_cond_snapshot_data(file->tr); 3822 if (track_data && track_data->hist_data == hist_data) { 3823 tracing_snapshot_cond_disable(file->tr); 3824 track_data_free(track_data); 3825 } 3826 } 3827 3828 kfree(data->track_data.var_str); 3829 3830 action_data_destroy(data); 3831 } 3832 3833 static int action_create(struct hist_trigger_data *hist_data, 3834 struct action_data *data); 3835 3836 static int track_data_create(struct hist_trigger_data *hist_data, 3837 struct action_data *data) 3838 { 3839 struct hist_field *var_field, *ref_field, *track_var = NULL; 3840 struct trace_event_file *file = hist_data->event_file; 3841 struct trace_array *tr = file->tr; 3842 char *track_data_var_str; 3843 int ret = 0; 3844 3845 track_data_var_str = data->track_data.var_str; 3846 if (track_data_var_str[0] != '$') { 3847 hist_err(tr, HIST_ERR_ONX_NOT_VAR, errpos(track_data_var_str)); 3848 return -EINVAL; 3849 } 3850 track_data_var_str++; 3851 3852 var_field = find_target_event_var(hist_data, NULL, NULL, track_data_var_str); 3853 if (!var_field) { 3854 hist_err(tr, HIST_ERR_ONX_VAR_NOT_FOUND, errpos(track_data_var_str)); 3855 return -EINVAL; 3856 } 3857 3858 ref_field = create_var_ref(hist_data, var_field, NULL, NULL); 3859 if (!ref_field) 3860 return -ENOMEM; 3861 3862 data->track_data.var_ref = ref_field; 3863 3864 if (data->handler == HANDLER_ONMAX) 3865 track_var = create_var(hist_data, file, "__max", sizeof(u64), "u64"); 3866 if (IS_ERR(track_var)) { 3867 hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0); 3868 ret = PTR_ERR(track_var); 3869 goto out; 3870 } 3871 3872 if (data->handler == HANDLER_ONCHANGE) 3873 track_var = create_var(hist_data, file, "__change", sizeof(u64), "u64"); 3874 if (IS_ERR(track_var)) { 3875 hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0); 3876 ret = PTR_ERR(track_var); 3877 goto out; 3878 } 3879 data->track_data.track_var = track_var; 3880 3881 ret = action_create(hist_data, data); 3882 out: 3883 return ret; 3884 } 3885 3886 static int parse_action_params(struct trace_array *tr, char *params, 3887 struct action_data *data) 3888 { 3889 char *param, *saved_param; 3890 bool first_param = true; 3891 int ret = 0; 3892 3893 while (params) { 3894 if (data->n_params >= SYNTH_FIELDS_MAX) { 3895 hist_err(tr, HIST_ERR_TOO_MANY_PARAMS, 0); 3896 goto out; 3897 } 3898 3899 param = strsep(¶ms, ","); 3900 if (!param) { 3901 hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, 0); 3902 ret = -EINVAL; 3903 goto out; 3904 } 3905 3906 param = strstrip(param); 3907 if (strlen(param) < 2) { 3908 hist_err(tr, HIST_ERR_INVALID_PARAM, errpos(param)); 3909 ret = -EINVAL; 3910 goto out; 3911 } 3912 3913 saved_param = kstrdup(param, GFP_KERNEL); 3914 if (!saved_param) { 3915 ret = -ENOMEM; 3916 goto out; 3917 } 3918 3919 if (first_param && data->use_trace_keyword) { 3920 data->synth_event_name = saved_param; 3921 first_param = false; 3922 continue; 3923 } 3924 first_param = false; 3925 3926 data->params[data->n_params++] = saved_param; 3927 } 3928 out: 3929 return ret; 3930 } 3931 3932 static int action_parse(struct trace_array *tr, char *str, struct action_data *data, 3933 enum handler_id handler) 3934 { 3935 char *action_name; 3936 int ret = 0; 3937 3938 strsep(&str, "."); 3939 if (!str) { 3940 hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0); 3941 ret = -EINVAL; 3942 goto out; 3943 } 3944 3945 action_name = strsep(&str, "("); 3946 if (!action_name || !str) { 3947 hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0); 3948 ret = -EINVAL; 3949 goto out; 3950 } 3951 3952 if (str_has_prefix(action_name, "save")) { 3953 char *params = strsep(&str, ")"); 3954 3955 if (!params) { 3956 hist_err(tr, HIST_ERR_NO_SAVE_PARAMS, 0); 3957 ret = -EINVAL; 3958 goto out; 3959 } 3960 3961 ret = parse_action_params(tr, params, data); 3962 if (ret) 3963 goto out; 3964 3965 if (handler == HANDLER_ONMAX) 3966 data->track_data.check_val = check_track_val_max; 3967 else if (handler == HANDLER_ONCHANGE) 3968 data->track_data.check_val = check_track_val_changed; 3969 else { 3970 hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name)); 3971 ret = -EINVAL; 3972 goto out; 3973 } 3974 3975 data->track_data.save_data = save_track_data_vars; 3976 data->fn = ontrack_action; 3977 data->action = ACTION_SAVE; 3978 } else if (str_has_prefix(action_name, "snapshot")) { 3979 char *params = strsep(&str, ")"); 3980 3981 if (!str) { 3982 hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(params)); 3983 ret = -EINVAL; 3984 goto out; 3985 } 3986 3987 if (handler == HANDLER_ONMAX) 3988 data->track_data.check_val = check_track_val_max; 3989 else if (handler == HANDLER_ONCHANGE) 3990 data->track_data.check_val = check_track_val_changed; 3991 else { 3992 hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name)); 3993 ret = -EINVAL; 3994 goto out; 3995 } 3996 3997 data->track_data.save_data = save_track_data_snapshot; 3998 data->fn = ontrack_action; 3999 data->action = ACTION_SNAPSHOT; 4000 } else { 4001 char *params = strsep(&str, ")"); 4002 4003 if (str_has_prefix(action_name, "trace")) 4004 data->use_trace_keyword = true; 4005 4006 if (params) { 4007 ret = parse_action_params(tr, params, data); 4008 if (ret) 4009 goto out; 4010 } 4011 4012 if (handler == HANDLER_ONMAX) 4013 data->track_data.check_val = check_track_val_max; 4014 else if (handler == HANDLER_ONCHANGE) 4015 data->track_data.check_val = check_track_val_changed; 4016 4017 if (handler != HANDLER_ONMATCH) { 4018 data->track_data.save_data = action_trace; 4019 data->fn = ontrack_action; 4020 } else 4021 data->fn = action_trace; 4022 4023 data->action = ACTION_TRACE; 4024 } 4025 4026 data->action_name = kstrdup(action_name, GFP_KERNEL); 4027 if (!data->action_name) { 4028 ret = -ENOMEM; 4029 goto out; 4030 } 4031 4032 data->handler = handler; 4033 out: 4034 return ret; 4035 } 4036 4037 static struct action_data *track_data_parse(struct hist_trigger_data *hist_data, 4038 char *str, enum handler_id handler) 4039 { 4040 struct action_data *data; 4041 int ret = -EINVAL; 4042 char *var_str; 4043 4044 data = kzalloc(sizeof(*data), GFP_KERNEL); 4045 if (!data) 4046 return ERR_PTR(-ENOMEM); 4047 4048 var_str = strsep(&str, ")"); 4049 if (!var_str || !str) { 4050 ret = -EINVAL; 4051 goto free; 4052 } 4053 4054 data->track_data.var_str = kstrdup(var_str, GFP_KERNEL); 4055 if (!data->track_data.var_str) { 4056 ret = -ENOMEM; 4057 goto free; 4058 } 4059 4060 ret = action_parse(hist_data->event_file->tr, str, data, handler); 4061 if (ret) 4062 goto free; 4063 out: 4064 return data; 4065 free: 4066 track_data_destroy(hist_data, data); 4067 data = ERR_PTR(ret); 4068 goto out; 4069 } 4070 4071 static void onmatch_destroy(struct action_data *data) 4072 { 4073 kfree(data->match_data.event); 4074 kfree(data->match_data.event_system); 4075 4076 action_data_destroy(data); 4077 } 4078 4079 static void destroy_field_var(struct field_var *field_var) 4080 { 4081 if (!field_var) 4082 return; 4083 4084 destroy_hist_field(field_var->var, 0); 4085 destroy_hist_field(field_var->val, 0); 4086 4087 kfree(field_var); 4088 } 4089 4090 static void destroy_field_vars(struct hist_trigger_data *hist_data) 4091 { 4092 unsigned int i; 4093 4094 for (i = 0; i < hist_data->n_field_vars; i++) 4095 destroy_field_var(hist_data->field_vars[i]); 4096 } 4097 4098 static void save_field_var(struct hist_trigger_data *hist_data, 4099 struct field_var *field_var) 4100 { 4101 hist_data->field_vars[hist_data->n_field_vars++] = field_var; 4102 4103 if (field_var->val->flags & HIST_FIELD_FL_STRING) 4104 hist_data->n_field_var_str++; 4105 } 4106 4107 4108 static int check_synth_field(struct synth_event *event, 4109 struct hist_field *hist_field, 4110 unsigned int field_pos) 4111 { 4112 struct synth_field *field; 4113 4114 if (field_pos >= event->n_fields) 4115 return -EINVAL; 4116 4117 field = event->fields[field_pos]; 4118 4119 if (strcmp(field->type, hist_field->type) != 0) 4120 return -EINVAL; 4121 4122 return 0; 4123 } 4124 4125 static struct hist_field * 4126 trace_action_find_var(struct hist_trigger_data *hist_data, 4127 struct action_data *data, 4128 char *system, char *event, char *var) 4129 { 4130 struct trace_array *tr = hist_data->event_file->tr; 4131 struct hist_field *hist_field; 4132 4133 var++; /* skip '$' */ 4134 4135 hist_field = find_target_event_var(hist_data, system, event, var); 4136 if (!hist_field) { 4137 if (!system && data->handler == HANDLER_ONMATCH) { 4138 system = data->match_data.event_system; 4139 event = data->match_data.event; 4140 } 4141 4142 hist_field = find_event_var(hist_data, system, event, var); 4143 } 4144 4145 if (!hist_field) 4146 hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, errpos(var)); 4147 4148 return hist_field; 4149 } 4150 4151 static struct hist_field * 4152 trace_action_create_field_var(struct hist_trigger_data *hist_data, 4153 struct action_data *data, char *system, 4154 char *event, char *var) 4155 { 4156 struct hist_field *hist_field = NULL; 4157 struct field_var *field_var; 4158 4159 /* 4160 * First try to create a field var on the target event (the 4161 * currently being defined). This will create a variable for 4162 * unqualified fields on the target event, or if qualified, 4163 * target fields that have qualified names matching the target. 4164 */ 4165 field_var = create_target_field_var(hist_data, system, event, var); 4166 4167 if (field_var && !IS_ERR(field_var)) { 4168 save_field_var(hist_data, field_var); 4169 hist_field = field_var->var; 4170 } else { 4171 field_var = NULL; 4172 /* 4173 * If no explicit system.event is specfied, default to 4174 * looking for fields on the onmatch(system.event.xxx) 4175 * event. 4176 */ 4177 if (!system && data->handler == HANDLER_ONMATCH) { 4178 system = data->match_data.event_system; 4179 event = data->match_data.event; 4180 } 4181 4182 /* 4183 * At this point, we're looking at a field on another 4184 * event. Because we can't modify a hist trigger on 4185 * another event to add a variable for a field, we need 4186 * to create a new trigger on that event and create the 4187 * variable at the same time. 4188 */ 4189 hist_field = create_field_var_hist(hist_data, system, event, var); 4190 if (IS_ERR(hist_field)) 4191 goto free; 4192 } 4193 out: 4194 return hist_field; 4195 free: 4196 destroy_field_var(field_var); 4197 hist_field = NULL; 4198 goto out; 4199 } 4200 4201 static int trace_action_create(struct hist_trigger_data *hist_data, 4202 struct action_data *data) 4203 { 4204 struct trace_array *tr = hist_data->event_file->tr; 4205 char *event_name, *param, *system = NULL; 4206 struct hist_field *hist_field, *var_ref; 4207 unsigned int i, var_ref_idx; 4208 unsigned int field_pos = 0; 4209 struct synth_event *event; 4210 char *synth_event_name; 4211 int ret = 0; 4212 4213 lockdep_assert_held(&event_mutex); 4214 4215 if (data->use_trace_keyword) 4216 synth_event_name = data->synth_event_name; 4217 else 4218 synth_event_name = data->action_name; 4219 4220 event = find_synth_event(synth_event_name); 4221 if (!event) { 4222 hist_err(tr, HIST_ERR_SYNTH_EVENT_NOT_FOUND, errpos(synth_event_name)); 4223 return -EINVAL; 4224 } 4225 4226 event->ref++; 4227 4228 var_ref_idx = hist_data->n_var_refs; 4229 4230 for (i = 0; i < data->n_params; i++) { 4231 char *p; 4232 4233 p = param = kstrdup(data->params[i], GFP_KERNEL); 4234 if (!param) { 4235 ret = -ENOMEM; 4236 goto err; 4237 } 4238 4239 system = strsep(¶m, "."); 4240 if (!param) { 4241 param = (char *)system; 4242 system = event_name = NULL; 4243 } else { 4244 event_name = strsep(¶m, "."); 4245 if (!param) { 4246 kfree(p); 4247 ret = -EINVAL; 4248 goto err; 4249 } 4250 } 4251 4252 if (param[0] == '$') 4253 hist_field = trace_action_find_var(hist_data, data, 4254 system, event_name, 4255 param); 4256 else 4257 hist_field = trace_action_create_field_var(hist_data, 4258 data, 4259 system, 4260 event_name, 4261 param); 4262 4263 if (!hist_field) { 4264 kfree(p); 4265 ret = -EINVAL; 4266 goto err; 4267 } 4268 4269 if (check_synth_field(event, hist_field, field_pos) == 0) { 4270 var_ref = create_var_ref(hist_data, hist_field, 4271 system, event_name); 4272 if (!var_ref) { 4273 kfree(p); 4274 ret = -ENOMEM; 4275 goto err; 4276 } 4277 4278 field_pos++; 4279 kfree(p); 4280 continue; 4281 } 4282 4283 hist_err(tr, HIST_ERR_SYNTH_TYPE_MISMATCH, errpos(param)); 4284 kfree(p); 4285 ret = -EINVAL; 4286 goto err; 4287 } 4288 4289 if (field_pos != event->n_fields) { 4290 hist_err(tr, HIST_ERR_SYNTH_COUNT_MISMATCH, errpos(event->name)); 4291 ret = -EINVAL; 4292 goto err; 4293 } 4294 4295 data->synth_event = event; 4296 data->var_ref_idx = var_ref_idx; 4297 out: 4298 return ret; 4299 err: 4300 event->ref--; 4301 4302 goto out; 4303 } 4304 4305 static int action_create(struct hist_trigger_data *hist_data, 4306 struct action_data *data) 4307 { 4308 struct trace_event_file *file = hist_data->event_file; 4309 struct trace_array *tr = file->tr; 4310 struct track_data *track_data; 4311 struct field_var *field_var; 4312 unsigned int i; 4313 char *param; 4314 int ret = 0; 4315 4316 if (data->action == ACTION_TRACE) 4317 return trace_action_create(hist_data, data); 4318 4319 if (data->action == ACTION_SNAPSHOT) { 4320 track_data = track_data_alloc(hist_data->key_size, data, hist_data); 4321 if (IS_ERR(track_data)) { 4322 ret = PTR_ERR(track_data); 4323 goto out; 4324 } 4325 4326 ret = tracing_snapshot_cond_enable(file->tr, track_data, 4327 cond_snapshot_update); 4328 if (ret) 4329 track_data_free(track_data); 4330 4331 goto out; 4332 } 4333 4334 if (data->action == ACTION_SAVE) { 4335 if (hist_data->n_save_vars) { 4336 ret = -EEXIST; 4337 hist_err(tr, HIST_ERR_TOO_MANY_SAVE_ACTIONS, 0); 4338 goto out; 4339 } 4340 4341 for (i = 0; i < data->n_params; i++) { 4342 param = kstrdup(data->params[i], GFP_KERNEL); 4343 if (!param) { 4344 ret = -ENOMEM; 4345 goto out; 4346 } 4347 4348 field_var = create_target_field_var(hist_data, NULL, NULL, param); 4349 if (IS_ERR(field_var)) { 4350 hist_err(tr, HIST_ERR_FIELD_VAR_CREATE_FAIL, 4351 errpos(param)); 4352 ret = PTR_ERR(field_var); 4353 kfree(param); 4354 goto out; 4355 } 4356 4357 hist_data->save_vars[hist_data->n_save_vars++] = field_var; 4358 if (field_var->val->flags & HIST_FIELD_FL_STRING) 4359 hist_data->n_save_var_str++; 4360 kfree(param); 4361 } 4362 } 4363 out: 4364 return ret; 4365 } 4366 4367 static int onmatch_create(struct hist_trigger_data *hist_data, 4368 struct action_data *data) 4369 { 4370 return action_create(hist_data, data); 4371 } 4372 4373 static struct action_data *onmatch_parse(struct trace_array *tr, char *str) 4374 { 4375 char *match_event, *match_event_system; 4376 struct action_data *data; 4377 int ret = -EINVAL; 4378 4379 data = kzalloc(sizeof(*data), GFP_KERNEL); 4380 if (!data) 4381 return ERR_PTR(-ENOMEM); 4382 4383 match_event = strsep(&str, ")"); 4384 if (!match_event || !str) { 4385 hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(match_event)); 4386 goto free; 4387 } 4388 4389 match_event_system = strsep(&match_event, "."); 4390 if (!match_event) { 4391 hist_err(tr, HIST_ERR_SUBSYS_NOT_FOUND, errpos(match_event_system)); 4392 goto free; 4393 } 4394 4395 if (IS_ERR(event_file(tr, match_event_system, match_event))) { 4396 hist_err(tr, HIST_ERR_INVALID_SUBSYS_EVENT, errpos(match_event)); 4397 goto free; 4398 } 4399 4400 data->match_data.event = kstrdup(match_event, GFP_KERNEL); 4401 if (!data->match_data.event) { 4402 ret = -ENOMEM; 4403 goto free; 4404 } 4405 4406 data->match_data.event_system = kstrdup(match_event_system, GFP_KERNEL); 4407 if (!data->match_data.event_system) { 4408 ret = -ENOMEM; 4409 goto free; 4410 } 4411 4412 ret = action_parse(tr, str, data, HANDLER_ONMATCH); 4413 if (ret) 4414 goto free; 4415 out: 4416 return data; 4417 free: 4418 onmatch_destroy(data); 4419 data = ERR_PTR(ret); 4420 goto out; 4421 } 4422 4423 static int create_hitcount_val(struct hist_trigger_data *hist_data) 4424 { 4425 hist_data->fields[HITCOUNT_IDX] = 4426 create_hist_field(hist_data, NULL, HIST_FIELD_FL_HITCOUNT, NULL); 4427 if (!hist_data->fields[HITCOUNT_IDX]) 4428 return -ENOMEM; 4429 4430 hist_data->n_vals++; 4431 hist_data->n_fields++; 4432 4433 if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX)) 4434 return -EINVAL; 4435 4436 return 0; 4437 } 4438 4439 static int __create_val_field(struct hist_trigger_data *hist_data, 4440 unsigned int val_idx, 4441 struct trace_event_file *file, 4442 char *var_name, char *field_str, 4443 unsigned long flags) 4444 { 4445 struct hist_field *hist_field; 4446 int ret = 0; 4447 4448 hist_field = parse_expr(hist_data, file, field_str, flags, var_name, 0); 4449 if (IS_ERR(hist_field)) { 4450 ret = PTR_ERR(hist_field); 4451 goto out; 4452 } 4453 4454 hist_data->fields[val_idx] = hist_field; 4455 4456 ++hist_data->n_vals; 4457 ++hist_data->n_fields; 4458 4459 if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX)) 4460 ret = -EINVAL; 4461 out: 4462 return ret; 4463 } 4464 4465 static int create_val_field(struct hist_trigger_data *hist_data, 4466 unsigned int val_idx, 4467 struct trace_event_file *file, 4468 char *field_str) 4469 { 4470 if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX)) 4471 return -EINVAL; 4472 4473 return __create_val_field(hist_data, val_idx, file, NULL, field_str, 0); 4474 } 4475 4476 static int create_var_field(struct hist_trigger_data *hist_data, 4477 unsigned int val_idx, 4478 struct trace_event_file *file, 4479 char *var_name, char *expr_str) 4480 { 4481 struct trace_array *tr = hist_data->event_file->tr; 4482 unsigned long flags = 0; 4483 4484 if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX)) 4485 return -EINVAL; 4486 4487 if (find_var(hist_data, file, var_name) && !hist_data->remove) { 4488 hist_err(tr, HIST_ERR_DUPLICATE_VAR, errpos(var_name)); 4489 return -EINVAL; 4490 } 4491 4492 flags |= HIST_FIELD_FL_VAR; 4493 hist_data->n_vars++; 4494 if (WARN_ON(hist_data->n_vars > TRACING_MAP_VARS_MAX)) 4495 return -EINVAL; 4496 4497 return __create_val_field(hist_data, val_idx, file, var_name, expr_str, flags); 4498 } 4499 4500 static int create_val_fields(struct hist_trigger_data *hist_data, 4501 struct trace_event_file *file) 4502 { 4503 char *fields_str, *field_str; 4504 unsigned int i, j = 1; 4505 int ret; 4506 4507 ret = create_hitcount_val(hist_data); 4508 if (ret) 4509 goto out; 4510 4511 fields_str = hist_data->attrs->vals_str; 4512 if (!fields_str) 4513 goto out; 4514 4515 strsep(&fields_str, "="); 4516 if (!fields_str) 4517 goto out; 4518 4519 for (i = 0, j = 1; i < TRACING_MAP_VALS_MAX && 4520 j < TRACING_MAP_VALS_MAX; i++) { 4521 field_str = strsep(&fields_str, ","); 4522 if (!field_str) 4523 break; 4524 4525 if (strcmp(field_str, "hitcount") == 0) 4526 continue; 4527 4528 ret = create_val_field(hist_data, j++, file, field_str); 4529 if (ret) 4530 goto out; 4531 } 4532 4533 if (fields_str && (strcmp(fields_str, "hitcount") != 0)) 4534 ret = -EINVAL; 4535 out: 4536 return ret; 4537 } 4538 4539 static int create_key_field(struct hist_trigger_data *hist_data, 4540 unsigned int key_idx, 4541 unsigned int key_offset, 4542 struct trace_event_file *file, 4543 char *field_str) 4544 { 4545 struct trace_array *tr = hist_data->event_file->tr; 4546 struct hist_field *hist_field = NULL; 4547 unsigned long flags = 0; 4548 unsigned int key_size; 4549 int ret = 0; 4550 4551 if (WARN_ON(key_idx >= HIST_FIELDS_MAX)) 4552 return -EINVAL; 4553 4554 flags |= HIST_FIELD_FL_KEY; 4555 4556 if (strcmp(field_str, "stacktrace") == 0) { 4557 flags |= HIST_FIELD_FL_STACKTRACE; 4558 key_size = sizeof(unsigned long) * HIST_STACKTRACE_DEPTH; 4559 hist_field = create_hist_field(hist_data, NULL, flags, NULL); 4560 } else { 4561 hist_field = parse_expr(hist_data, file, field_str, flags, 4562 NULL, 0); 4563 if (IS_ERR(hist_field)) { 4564 ret = PTR_ERR(hist_field); 4565 goto out; 4566 } 4567 4568 if (field_has_hist_vars(hist_field, 0)) { 4569 hist_err(tr, HIST_ERR_INVALID_REF_KEY, errpos(field_str)); 4570 destroy_hist_field(hist_field, 0); 4571 ret = -EINVAL; 4572 goto out; 4573 } 4574 4575 key_size = hist_field->size; 4576 } 4577 4578 hist_data->fields[key_idx] = hist_field; 4579 4580 key_size = ALIGN(key_size, sizeof(u64)); 4581 hist_data->fields[key_idx]->size = key_size; 4582 hist_data->fields[key_idx]->offset = key_offset; 4583 4584 hist_data->key_size += key_size; 4585 4586 if (hist_data->key_size > HIST_KEY_SIZE_MAX) { 4587 ret = -EINVAL; 4588 goto out; 4589 } 4590 4591 hist_data->n_keys++; 4592 hist_data->n_fields++; 4593 4594 if (WARN_ON(hist_data->n_keys > TRACING_MAP_KEYS_MAX)) 4595 return -EINVAL; 4596 4597 ret = key_size; 4598 out: 4599 return ret; 4600 } 4601 4602 static int create_key_fields(struct hist_trigger_data *hist_data, 4603 struct trace_event_file *file) 4604 { 4605 unsigned int i, key_offset = 0, n_vals = hist_data->n_vals; 4606 char *fields_str, *field_str; 4607 int ret = -EINVAL; 4608 4609 fields_str = hist_data->attrs->keys_str; 4610 if (!fields_str) 4611 goto out; 4612 4613 strsep(&fields_str, "="); 4614 if (!fields_str) 4615 goto out; 4616 4617 for (i = n_vals; i < n_vals + TRACING_MAP_KEYS_MAX; i++) { 4618 field_str = strsep(&fields_str, ","); 4619 if (!field_str) 4620 break; 4621 ret = create_key_field(hist_data, i, key_offset, 4622 file, field_str); 4623 if (ret < 0) 4624 goto out; 4625 key_offset += ret; 4626 } 4627 if (fields_str) { 4628 ret = -EINVAL; 4629 goto out; 4630 } 4631 ret = 0; 4632 out: 4633 return ret; 4634 } 4635 4636 static int create_var_fields(struct hist_trigger_data *hist_data, 4637 struct trace_event_file *file) 4638 { 4639 unsigned int i, j = hist_data->n_vals; 4640 int ret = 0; 4641 4642 unsigned int n_vars = hist_data->attrs->var_defs.n_vars; 4643 4644 for (i = 0; i < n_vars; i++) { 4645 char *var_name = hist_data->attrs->var_defs.name[i]; 4646 char *expr = hist_data->attrs->var_defs.expr[i]; 4647 4648 ret = create_var_field(hist_data, j++, file, var_name, expr); 4649 if (ret) 4650 goto out; 4651 } 4652 out: 4653 return ret; 4654 } 4655 4656 static void free_var_defs(struct hist_trigger_data *hist_data) 4657 { 4658 unsigned int i; 4659 4660 for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) { 4661 kfree(hist_data->attrs->var_defs.name[i]); 4662 kfree(hist_data->attrs->var_defs.expr[i]); 4663 } 4664 4665 hist_data->attrs->var_defs.n_vars = 0; 4666 } 4667 4668 static int parse_var_defs(struct hist_trigger_data *hist_data) 4669 { 4670 struct trace_array *tr = hist_data->event_file->tr; 4671 char *s, *str, *var_name, *field_str; 4672 unsigned int i, j, n_vars = 0; 4673 int ret = 0; 4674 4675 for (i = 0; i < hist_data->attrs->n_assignments; i++) { 4676 str = hist_data->attrs->assignment_str[i]; 4677 for (j = 0; j < TRACING_MAP_VARS_MAX; j++) { 4678 field_str = strsep(&str, ","); 4679 if (!field_str) 4680 break; 4681 4682 var_name = strsep(&field_str, "="); 4683 if (!var_name || !field_str) { 4684 hist_err(tr, HIST_ERR_MALFORMED_ASSIGNMENT, 4685 errpos(var_name)); 4686 ret = -EINVAL; 4687 goto free; 4688 } 4689 4690 if (n_vars == TRACING_MAP_VARS_MAX) { 4691 hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(var_name)); 4692 ret = -EINVAL; 4693 goto free; 4694 } 4695 4696 s = kstrdup(var_name, GFP_KERNEL); 4697 if (!s) { 4698 ret = -ENOMEM; 4699 goto free; 4700 } 4701 hist_data->attrs->var_defs.name[n_vars] = s; 4702 4703 s = kstrdup(field_str, GFP_KERNEL); 4704 if (!s) { 4705 kfree(hist_data->attrs->var_defs.name[n_vars]); 4706 ret = -ENOMEM; 4707 goto free; 4708 } 4709 hist_data->attrs->var_defs.expr[n_vars++] = s; 4710 4711 hist_data->attrs->var_defs.n_vars = n_vars; 4712 } 4713 } 4714 4715 return ret; 4716 free: 4717 free_var_defs(hist_data); 4718 4719 return ret; 4720 } 4721 4722 static int create_hist_fields(struct hist_trigger_data *hist_data, 4723 struct trace_event_file *file) 4724 { 4725 int ret; 4726 4727 ret = parse_var_defs(hist_data); 4728 if (ret) 4729 goto out; 4730 4731 ret = create_val_fields(hist_data, file); 4732 if (ret) 4733 goto out; 4734 4735 ret = create_var_fields(hist_data, file); 4736 if (ret) 4737 goto out; 4738 4739 ret = create_key_fields(hist_data, file); 4740 if (ret) 4741 goto out; 4742 out: 4743 free_var_defs(hist_data); 4744 4745 return ret; 4746 } 4747 4748 static int is_descending(const char *str) 4749 { 4750 if (!str) 4751 return 0; 4752 4753 if (strcmp(str, "descending") == 0) 4754 return 1; 4755 4756 if (strcmp(str, "ascending") == 0) 4757 return 0; 4758 4759 return -EINVAL; 4760 } 4761 4762 static int create_sort_keys(struct hist_trigger_data *hist_data) 4763 { 4764 char *fields_str = hist_data->attrs->sort_key_str; 4765 struct tracing_map_sort_key *sort_key; 4766 int descending, ret = 0; 4767 unsigned int i, j, k; 4768 4769 hist_data->n_sort_keys = 1; /* we always have at least one, hitcount */ 4770 4771 if (!fields_str) 4772 goto out; 4773 4774 strsep(&fields_str, "="); 4775 if (!fields_str) { 4776 ret = -EINVAL; 4777 goto out; 4778 } 4779 4780 for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) { 4781 struct hist_field *hist_field; 4782 char *field_str, *field_name; 4783 const char *test_name; 4784 4785 sort_key = &hist_data->sort_keys[i]; 4786 4787 field_str = strsep(&fields_str, ","); 4788 if (!field_str) { 4789 if (i == 0) 4790 ret = -EINVAL; 4791 break; 4792 } 4793 4794 if ((i == TRACING_MAP_SORT_KEYS_MAX - 1) && fields_str) { 4795 ret = -EINVAL; 4796 break; 4797 } 4798 4799 field_name = strsep(&field_str, "."); 4800 if (!field_name) { 4801 ret = -EINVAL; 4802 break; 4803 } 4804 4805 if (strcmp(field_name, "hitcount") == 0) { 4806 descending = is_descending(field_str); 4807 if (descending < 0) { 4808 ret = descending; 4809 break; 4810 } 4811 sort_key->descending = descending; 4812 continue; 4813 } 4814 4815 for (j = 1, k = 1; j < hist_data->n_fields; j++) { 4816 unsigned int idx; 4817 4818 hist_field = hist_data->fields[j]; 4819 if (hist_field->flags & HIST_FIELD_FL_VAR) 4820 continue; 4821 4822 idx = k++; 4823 4824 test_name = hist_field_name(hist_field, 0); 4825 4826 if (strcmp(field_name, test_name) == 0) { 4827 sort_key->field_idx = idx; 4828 descending = is_descending(field_str); 4829 if (descending < 0) { 4830 ret = descending; 4831 goto out; 4832 } 4833 sort_key->descending = descending; 4834 break; 4835 } 4836 } 4837 if (j == hist_data->n_fields) { 4838 ret = -EINVAL; 4839 break; 4840 } 4841 } 4842 4843 hist_data->n_sort_keys = i; 4844 out: 4845 return ret; 4846 } 4847 4848 static void destroy_actions(struct hist_trigger_data *hist_data) 4849 { 4850 unsigned int i; 4851 4852 for (i = 0; i < hist_data->n_actions; i++) { 4853 struct action_data *data = hist_data->actions[i]; 4854 4855 if (data->handler == HANDLER_ONMATCH) 4856 onmatch_destroy(data); 4857 else if (data->handler == HANDLER_ONMAX || 4858 data->handler == HANDLER_ONCHANGE) 4859 track_data_destroy(hist_data, data); 4860 else 4861 kfree(data); 4862 } 4863 } 4864 4865 static int parse_actions(struct hist_trigger_data *hist_data) 4866 { 4867 struct trace_array *tr = hist_data->event_file->tr; 4868 struct action_data *data; 4869 unsigned int i; 4870 int ret = 0; 4871 char *str; 4872 int len; 4873 4874 for (i = 0; i < hist_data->attrs->n_actions; i++) { 4875 str = hist_data->attrs->action_str[i]; 4876 4877 if ((len = str_has_prefix(str, "onmatch("))) { 4878 char *action_str = str + len; 4879 4880 data = onmatch_parse(tr, action_str); 4881 if (IS_ERR(data)) { 4882 ret = PTR_ERR(data); 4883 break; 4884 } 4885 } else if ((len = str_has_prefix(str, "onmax("))) { 4886 char *action_str = str + len; 4887 4888 data = track_data_parse(hist_data, action_str, 4889 HANDLER_ONMAX); 4890 if (IS_ERR(data)) { 4891 ret = PTR_ERR(data); 4892 break; 4893 } 4894 } else if ((len = str_has_prefix(str, "onchange("))) { 4895 char *action_str = str + len; 4896 4897 data = track_data_parse(hist_data, action_str, 4898 HANDLER_ONCHANGE); 4899 if (IS_ERR(data)) { 4900 ret = PTR_ERR(data); 4901 break; 4902 } 4903 } else { 4904 ret = -EINVAL; 4905 break; 4906 } 4907 4908 hist_data->actions[hist_data->n_actions++] = data; 4909 } 4910 4911 return ret; 4912 } 4913 4914 static int create_actions(struct hist_trigger_data *hist_data) 4915 { 4916 struct action_data *data; 4917 unsigned int i; 4918 int ret = 0; 4919 4920 for (i = 0; i < hist_data->attrs->n_actions; i++) { 4921 data = hist_data->actions[i]; 4922 4923 if (data->handler == HANDLER_ONMATCH) { 4924 ret = onmatch_create(hist_data, data); 4925 if (ret) 4926 break; 4927 } else if (data->handler == HANDLER_ONMAX || 4928 data->handler == HANDLER_ONCHANGE) { 4929 ret = track_data_create(hist_data, data); 4930 if (ret) 4931 break; 4932 } else { 4933 ret = -EINVAL; 4934 break; 4935 } 4936 } 4937 4938 return ret; 4939 } 4940 4941 static void print_actions(struct seq_file *m, 4942 struct hist_trigger_data *hist_data, 4943 struct tracing_map_elt *elt) 4944 { 4945 unsigned int i; 4946 4947 for (i = 0; i < hist_data->n_actions; i++) { 4948 struct action_data *data = hist_data->actions[i]; 4949 4950 if (data->action == ACTION_SNAPSHOT) 4951 continue; 4952 4953 if (data->handler == HANDLER_ONMAX || 4954 data->handler == HANDLER_ONCHANGE) 4955 track_data_print(m, hist_data, elt, data); 4956 } 4957 } 4958 4959 static void print_action_spec(struct seq_file *m, 4960 struct hist_trigger_data *hist_data, 4961 struct action_data *data) 4962 { 4963 unsigned int i; 4964 4965 if (data->action == ACTION_SAVE) { 4966 for (i = 0; i < hist_data->n_save_vars; i++) { 4967 seq_printf(m, "%s", hist_data->save_vars[i]->var->var.name); 4968 if (i < hist_data->n_save_vars - 1) 4969 seq_puts(m, ","); 4970 } 4971 } else if (data->action == ACTION_TRACE) { 4972 if (data->use_trace_keyword) 4973 seq_printf(m, "%s", data->synth_event_name); 4974 for (i = 0; i < data->n_params; i++) { 4975 if (i || data->use_trace_keyword) 4976 seq_puts(m, ","); 4977 seq_printf(m, "%s", data->params[i]); 4978 } 4979 } 4980 } 4981 4982 static void print_track_data_spec(struct seq_file *m, 4983 struct hist_trigger_data *hist_data, 4984 struct action_data *data) 4985 { 4986 if (data->handler == HANDLER_ONMAX) 4987 seq_puts(m, ":onmax("); 4988 else if (data->handler == HANDLER_ONCHANGE) 4989 seq_puts(m, ":onchange("); 4990 seq_printf(m, "%s", data->track_data.var_str); 4991 seq_printf(m, ").%s(", data->action_name); 4992 4993 print_action_spec(m, hist_data, data); 4994 4995 seq_puts(m, ")"); 4996 } 4997 4998 static void print_onmatch_spec(struct seq_file *m, 4999 struct hist_trigger_data *hist_data, 5000 struct action_data *data) 5001 { 5002 seq_printf(m, ":onmatch(%s.%s).", data->match_data.event_system, 5003 data->match_data.event); 5004 5005 seq_printf(m, "%s(", data->action_name); 5006 5007 print_action_spec(m, hist_data, data); 5008 5009 seq_puts(m, ")"); 5010 } 5011 5012 static bool actions_match(struct hist_trigger_data *hist_data, 5013 struct hist_trigger_data *hist_data_test) 5014 { 5015 unsigned int i, j; 5016 5017 if (hist_data->n_actions != hist_data_test->n_actions) 5018 return false; 5019 5020 for (i = 0; i < hist_data->n_actions; i++) { 5021 struct action_data *data = hist_data->actions[i]; 5022 struct action_data *data_test = hist_data_test->actions[i]; 5023 char *action_name, *action_name_test; 5024 5025 if (data->handler != data_test->handler) 5026 return false; 5027 if (data->action != data_test->action) 5028 return false; 5029 5030 if (data->n_params != data_test->n_params) 5031 return false; 5032 5033 for (j = 0; j < data->n_params; j++) { 5034 if (strcmp(data->params[j], data_test->params[j]) != 0) 5035 return false; 5036 } 5037 5038 if (data->use_trace_keyword) 5039 action_name = data->synth_event_name; 5040 else 5041 action_name = data->action_name; 5042 5043 if (data_test->use_trace_keyword) 5044 action_name_test = data_test->synth_event_name; 5045 else 5046 action_name_test = data_test->action_name; 5047 5048 if (strcmp(action_name, action_name_test) != 0) 5049 return false; 5050 5051 if (data->handler == HANDLER_ONMATCH) { 5052 if (strcmp(data->match_data.event_system, 5053 data_test->match_data.event_system) != 0) 5054 return false; 5055 if (strcmp(data->match_data.event, 5056 data_test->match_data.event) != 0) 5057 return false; 5058 } else if (data->handler == HANDLER_ONMAX || 5059 data->handler == HANDLER_ONCHANGE) { 5060 if (strcmp(data->track_data.var_str, 5061 data_test->track_data.var_str) != 0) 5062 return false; 5063 } 5064 } 5065 5066 return true; 5067 } 5068 5069 5070 static void print_actions_spec(struct seq_file *m, 5071 struct hist_trigger_data *hist_data) 5072 { 5073 unsigned int i; 5074 5075 for (i = 0; i < hist_data->n_actions; i++) { 5076 struct action_data *data = hist_data->actions[i]; 5077 5078 if (data->handler == HANDLER_ONMATCH) 5079 print_onmatch_spec(m, hist_data, data); 5080 else if (data->handler == HANDLER_ONMAX || 5081 data->handler == HANDLER_ONCHANGE) 5082 print_track_data_spec(m, hist_data, data); 5083 } 5084 } 5085 5086 static void destroy_field_var_hists(struct hist_trigger_data *hist_data) 5087 { 5088 unsigned int i; 5089 5090 for (i = 0; i < hist_data->n_field_var_hists; i++) { 5091 kfree(hist_data->field_var_hists[i]->cmd); 5092 kfree(hist_data->field_var_hists[i]); 5093 } 5094 } 5095 5096 static void destroy_hist_data(struct hist_trigger_data *hist_data) 5097 { 5098 if (!hist_data) 5099 return; 5100 5101 destroy_hist_trigger_attrs(hist_data->attrs); 5102 destroy_hist_fields(hist_data); 5103 tracing_map_destroy(hist_data->map); 5104 5105 destroy_actions(hist_data); 5106 destroy_field_vars(hist_data); 5107 destroy_field_var_hists(hist_data); 5108 5109 kfree(hist_data); 5110 } 5111 5112 static int create_tracing_map_fields(struct hist_trigger_data *hist_data) 5113 { 5114 struct tracing_map *map = hist_data->map; 5115 struct ftrace_event_field *field; 5116 struct hist_field *hist_field; 5117 int i, idx = 0; 5118 5119 for_each_hist_field(i, hist_data) { 5120 hist_field = hist_data->fields[i]; 5121 if (hist_field->flags & HIST_FIELD_FL_KEY) { 5122 tracing_map_cmp_fn_t cmp_fn; 5123 5124 field = hist_field->field; 5125 5126 if (hist_field->flags & HIST_FIELD_FL_STACKTRACE) 5127 cmp_fn = tracing_map_cmp_none; 5128 else if (!field) 5129 cmp_fn = tracing_map_cmp_num(hist_field->size, 5130 hist_field->is_signed); 5131 else if (is_string_field(field)) 5132 cmp_fn = tracing_map_cmp_string; 5133 else 5134 cmp_fn = tracing_map_cmp_num(field->size, 5135 field->is_signed); 5136 idx = tracing_map_add_key_field(map, 5137 hist_field->offset, 5138 cmp_fn); 5139 } else if (!(hist_field->flags & HIST_FIELD_FL_VAR)) 5140 idx = tracing_map_add_sum_field(map); 5141 5142 if (idx < 0) 5143 return idx; 5144 5145 if (hist_field->flags & HIST_FIELD_FL_VAR) { 5146 idx = tracing_map_add_var(map); 5147 if (idx < 0) 5148 return idx; 5149 hist_field->var.idx = idx; 5150 hist_field->var.hist_data = hist_data; 5151 } 5152 } 5153 5154 return 0; 5155 } 5156 5157 static struct hist_trigger_data * 5158 create_hist_data(unsigned int map_bits, 5159 struct hist_trigger_attrs *attrs, 5160 struct trace_event_file *file, 5161 bool remove) 5162 { 5163 const struct tracing_map_ops *map_ops = NULL; 5164 struct hist_trigger_data *hist_data; 5165 int ret = 0; 5166 5167 hist_data = kzalloc(sizeof(*hist_data), GFP_KERNEL); 5168 if (!hist_data) 5169 return ERR_PTR(-ENOMEM); 5170 5171 hist_data->attrs = attrs; 5172 hist_data->remove = remove; 5173 hist_data->event_file = file; 5174 5175 ret = parse_actions(hist_data); 5176 if (ret) 5177 goto free; 5178 5179 ret = create_hist_fields(hist_data, file); 5180 if (ret) 5181 goto free; 5182 5183 ret = create_sort_keys(hist_data); 5184 if (ret) 5185 goto free; 5186 5187 map_ops = &hist_trigger_elt_data_ops; 5188 5189 hist_data->map = tracing_map_create(map_bits, hist_data->key_size, 5190 map_ops, hist_data); 5191 if (IS_ERR(hist_data->map)) { 5192 ret = PTR_ERR(hist_data->map); 5193 hist_data->map = NULL; 5194 goto free; 5195 } 5196 5197 ret = create_tracing_map_fields(hist_data); 5198 if (ret) 5199 goto free; 5200 out: 5201 return hist_data; 5202 free: 5203 hist_data->attrs = NULL; 5204 5205 destroy_hist_data(hist_data); 5206 5207 hist_data = ERR_PTR(ret); 5208 5209 goto out; 5210 } 5211 5212 static void hist_trigger_elt_update(struct hist_trigger_data *hist_data, 5213 struct tracing_map_elt *elt, void *rec, 5214 struct ring_buffer_event *rbe, 5215 u64 *var_ref_vals) 5216 { 5217 struct hist_elt_data *elt_data; 5218 struct hist_field *hist_field; 5219 unsigned int i, var_idx; 5220 u64 hist_val; 5221 5222 elt_data = elt->private_data; 5223 elt_data->var_ref_vals = var_ref_vals; 5224 5225 for_each_hist_val_field(i, hist_data) { 5226 hist_field = hist_data->fields[i]; 5227 hist_val = hist_field->fn(hist_field, elt, rbe, rec); 5228 if (hist_field->flags & HIST_FIELD_FL_VAR) { 5229 var_idx = hist_field->var.idx; 5230 tracing_map_set_var(elt, var_idx, hist_val); 5231 continue; 5232 } 5233 tracing_map_update_sum(elt, i, hist_val); 5234 } 5235 5236 for_each_hist_key_field(i, hist_data) { 5237 hist_field = hist_data->fields[i]; 5238 if (hist_field->flags & HIST_FIELD_FL_VAR) { 5239 hist_val = hist_field->fn(hist_field, elt, rbe, rec); 5240 var_idx = hist_field->var.idx; 5241 tracing_map_set_var(elt, var_idx, hist_val); 5242 } 5243 } 5244 5245 update_field_vars(hist_data, elt, rbe, rec); 5246 } 5247 5248 static inline void add_to_key(char *compound_key, void *key, 5249 struct hist_field *key_field, void *rec) 5250 { 5251 size_t size = key_field->size; 5252 5253 if (key_field->flags & HIST_FIELD_FL_STRING) { 5254 struct ftrace_event_field *field; 5255 5256 field = key_field->field; 5257 if (field->filter_type == FILTER_DYN_STRING) 5258 size = *(u32 *)(rec + field->offset) >> 16; 5259 else if (field->filter_type == FILTER_PTR_STRING) 5260 size = strlen(key); 5261 else if (field->filter_type == FILTER_STATIC_STRING) 5262 size = field->size; 5263 5264 /* ensure NULL-termination */ 5265 if (size > key_field->size - 1) 5266 size = key_field->size - 1; 5267 5268 strncpy(compound_key + key_field->offset, (char *)key, size); 5269 } else 5270 memcpy(compound_key + key_field->offset, key, size); 5271 } 5272 5273 static void 5274 hist_trigger_actions(struct hist_trigger_data *hist_data, 5275 struct tracing_map_elt *elt, void *rec, 5276 struct ring_buffer_event *rbe, void *key, 5277 u64 *var_ref_vals) 5278 { 5279 struct action_data *data; 5280 unsigned int i; 5281 5282 for (i = 0; i < hist_data->n_actions; i++) { 5283 data = hist_data->actions[i]; 5284 data->fn(hist_data, elt, rec, rbe, key, data, var_ref_vals); 5285 } 5286 } 5287 5288 static void event_hist_trigger(struct event_trigger_data *data, void *rec, 5289 struct ring_buffer_event *rbe) 5290 { 5291 struct hist_trigger_data *hist_data = data->private_data; 5292 bool use_compound_key = (hist_data->n_keys > 1); 5293 unsigned long entries[HIST_STACKTRACE_DEPTH]; 5294 u64 var_ref_vals[TRACING_MAP_VARS_MAX]; 5295 char compound_key[HIST_KEY_SIZE_MAX]; 5296 struct tracing_map_elt *elt = NULL; 5297 struct hist_field *key_field; 5298 u64 field_contents; 5299 void *key = NULL; 5300 unsigned int i; 5301 5302 memset(compound_key, 0, hist_data->key_size); 5303 5304 for_each_hist_key_field(i, hist_data) { 5305 key_field = hist_data->fields[i]; 5306 5307 if (key_field->flags & HIST_FIELD_FL_STACKTRACE) { 5308 memset(entries, 0, HIST_STACKTRACE_SIZE); 5309 stack_trace_save(entries, HIST_STACKTRACE_DEPTH, 5310 HIST_STACKTRACE_SKIP); 5311 key = entries; 5312 } else { 5313 field_contents = key_field->fn(key_field, elt, rbe, rec); 5314 if (key_field->flags & HIST_FIELD_FL_STRING) { 5315 key = (void *)(unsigned long)field_contents; 5316 use_compound_key = true; 5317 } else 5318 key = (void *)&field_contents; 5319 } 5320 5321 if (use_compound_key) 5322 add_to_key(compound_key, key, key_field, rec); 5323 } 5324 5325 if (use_compound_key) 5326 key = compound_key; 5327 5328 if (hist_data->n_var_refs && 5329 !resolve_var_refs(hist_data, key, var_ref_vals, false)) 5330 return; 5331 5332 elt = tracing_map_insert(hist_data->map, key); 5333 if (!elt) 5334 return; 5335 5336 hist_trigger_elt_update(hist_data, elt, rec, rbe, var_ref_vals); 5337 5338 if (resolve_var_refs(hist_data, key, var_ref_vals, true)) 5339 hist_trigger_actions(hist_data, elt, rec, rbe, key, var_ref_vals); 5340 } 5341 5342 static void hist_trigger_stacktrace_print(struct seq_file *m, 5343 unsigned long *stacktrace_entries, 5344 unsigned int max_entries) 5345 { 5346 char str[KSYM_SYMBOL_LEN]; 5347 unsigned int spaces = 8; 5348 unsigned int i; 5349 5350 for (i = 0; i < max_entries; i++) { 5351 if (!stacktrace_entries[i]) 5352 return; 5353 5354 seq_printf(m, "%*c", 1 + spaces, ' '); 5355 sprint_symbol(str, stacktrace_entries[i]); 5356 seq_printf(m, "%s\n", str); 5357 } 5358 } 5359 5360 static void hist_trigger_print_key(struct seq_file *m, 5361 struct hist_trigger_data *hist_data, 5362 void *key, 5363 struct tracing_map_elt *elt) 5364 { 5365 struct hist_field *key_field; 5366 char str[KSYM_SYMBOL_LEN]; 5367 bool multiline = false; 5368 const char *field_name; 5369 unsigned int i; 5370 u64 uval; 5371 5372 seq_puts(m, "{ "); 5373 5374 for_each_hist_key_field(i, hist_data) { 5375 key_field = hist_data->fields[i]; 5376 5377 if (i > hist_data->n_vals) 5378 seq_puts(m, ", "); 5379 5380 field_name = hist_field_name(key_field, 0); 5381 5382 if (key_field->flags & HIST_FIELD_FL_HEX) { 5383 uval = *(u64 *)(key + key_field->offset); 5384 seq_printf(m, "%s: %llx", field_name, uval); 5385 } else if (key_field->flags & HIST_FIELD_FL_SYM) { 5386 uval = *(u64 *)(key + key_field->offset); 5387 sprint_symbol_no_offset(str, uval); 5388 seq_printf(m, "%s: [%llx] %-45s", field_name, 5389 uval, str); 5390 } else if (key_field->flags & HIST_FIELD_FL_SYM_OFFSET) { 5391 uval = *(u64 *)(key + key_field->offset); 5392 sprint_symbol(str, uval); 5393 seq_printf(m, "%s: [%llx] %-55s", field_name, 5394 uval, str); 5395 } else if (key_field->flags & HIST_FIELD_FL_EXECNAME) { 5396 struct hist_elt_data *elt_data = elt->private_data; 5397 char *comm; 5398 5399 if (WARN_ON_ONCE(!elt_data)) 5400 return; 5401 5402 comm = elt_data->comm; 5403 5404 uval = *(u64 *)(key + key_field->offset); 5405 seq_printf(m, "%s: %-16s[%10llu]", field_name, 5406 comm, uval); 5407 } else if (key_field->flags & HIST_FIELD_FL_SYSCALL) { 5408 const char *syscall_name; 5409 5410 uval = *(u64 *)(key + key_field->offset); 5411 syscall_name = get_syscall_name(uval); 5412 if (!syscall_name) 5413 syscall_name = "unknown_syscall"; 5414 5415 seq_printf(m, "%s: %-30s[%3llu]", field_name, 5416 syscall_name, uval); 5417 } else if (key_field->flags & HIST_FIELD_FL_STACKTRACE) { 5418 seq_puts(m, "stacktrace:\n"); 5419 hist_trigger_stacktrace_print(m, 5420 key + key_field->offset, 5421 HIST_STACKTRACE_DEPTH); 5422 multiline = true; 5423 } else if (key_field->flags & HIST_FIELD_FL_LOG2) { 5424 seq_printf(m, "%s: ~ 2^%-2llu", field_name, 5425 *(u64 *)(key + key_field->offset)); 5426 } else if (key_field->flags & HIST_FIELD_FL_STRING) { 5427 seq_printf(m, "%s: %-50s", field_name, 5428 (char *)(key + key_field->offset)); 5429 } else { 5430 uval = *(u64 *)(key + key_field->offset); 5431 seq_printf(m, "%s: %10llu", field_name, uval); 5432 } 5433 } 5434 5435 if (!multiline) 5436 seq_puts(m, " "); 5437 5438 seq_puts(m, "}"); 5439 } 5440 5441 static void hist_trigger_entry_print(struct seq_file *m, 5442 struct hist_trigger_data *hist_data, 5443 void *key, 5444 struct tracing_map_elt *elt) 5445 { 5446 const char *field_name; 5447 unsigned int i; 5448 5449 hist_trigger_print_key(m, hist_data, key, elt); 5450 5451 seq_printf(m, " hitcount: %10llu", 5452 tracing_map_read_sum(elt, HITCOUNT_IDX)); 5453 5454 for (i = 1; i < hist_data->n_vals; i++) { 5455 field_name = hist_field_name(hist_data->fields[i], 0); 5456 5457 if (hist_data->fields[i]->flags & HIST_FIELD_FL_VAR || 5458 hist_data->fields[i]->flags & HIST_FIELD_FL_EXPR) 5459 continue; 5460 5461 if (hist_data->fields[i]->flags & HIST_FIELD_FL_HEX) { 5462 seq_printf(m, " %s: %10llx", field_name, 5463 tracing_map_read_sum(elt, i)); 5464 } else { 5465 seq_printf(m, " %s: %10llu", field_name, 5466 tracing_map_read_sum(elt, i)); 5467 } 5468 } 5469 5470 print_actions(m, hist_data, elt); 5471 5472 seq_puts(m, "\n"); 5473 } 5474 5475 static int print_entries(struct seq_file *m, 5476 struct hist_trigger_data *hist_data) 5477 { 5478 struct tracing_map_sort_entry **sort_entries = NULL; 5479 struct tracing_map *map = hist_data->map; 5480 int i, n_entries; 5481 5482 n_entries = tracing_map_sort_entries(map, hist_data->sort_keys, 5483 hist_data->n_sort_keys, 5484 &sort_entries); 5485 if (n_entries < 0) 5486 return n_entries; 5487 5488 for (i = 0; i < n_entries; i++) 5489 hist_trigger_entry_print(m, hist_data, 5490 sort_entries[i]->key, 5491 sort_entries[i]->elt); 5492 5493 tracing_map_destroy_sort_entries(sort_entries, n_entries); 5494 5495 return n_entries; 5496 } 5497 5498 static void hist_trigger_show(struct seq_file *m, 5499 struct event_trigger_data *data, int n) 5500 { 5501 struct hist_trigger_data *hist_data; 5502 int n_entries; 5503 5504 if (n > 0) 5505 seq_puts(m, "\n\n"); 5506 5507 seq_puts(m, "# event histogram\n#\n# trigger info: "); 5508 data->ops->print(m, data->ops, data); 5509 seq_puts(m, "#\n\n"); 5510 5511 hist_data = data->private_data; 5512 n_entries = print_entries(m, hist_data); 5513 if (n_entries < 0) 5514 n_entries = 0; 5515 5516 track_data_snapshot_print(m, hist_data); 5517 5518 seq_printf(m, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n", 5519 (u64)atomic64_read(&hist_data->map->hits), 5520 n_entries, (u64)atomic64_read(&hist_data->map->drops)); 5521 } 5522 5523 static int hist_show(struct seq_file *m, void *v) 5524 { 5525 struct event_trigger_data *data; 5526 struct trace_event_file *event_file; 5527 int n = 0, ret = 0; 5528 5529 mutex_lock(&event_mutex); 5530 5531 event_file = event_file_data(m->private); 5532 if (unlikely(!event_file)) { 5533 ret = -ENODEV; 5534 goto out_unlock; 5535 } 5536 5537 list_for_each_entry_rcu(data, &event_file->triggers, list) { 5538 if (data->cmd_ops->trigger_type == ETT_EVENT_HIST) 5539 hist_trigger_show(m, data, n++); 5540 } 5541 5542 out_unlock: 5543 mutex_unlock(&event_mutex); 5544 5545 return ret; 5546 } 5547 5548 static int event_hist_open(struct inode *inode, struct file *file) 5549 { 5550 int ret; 5551 5552 ret = security_locked_down(LOCKDOWN_TRACEFS); 5553 if (ret) 5554 return ret; 5555 5556 return single_open(file, hist_show, file); 5557 } 5558 5559 const struct file_operations event_hist_fops = { 5560 .open = event_hist_open, 5561 .read = seq_read, 5562 .llseek = seq_lseek, 5563 .release = single_release, 5564 }; 5565 5566 static void hist_field_print(struct seq_file *m, struct hist_field *hist_field) 5567 { 5568 const char *field_name = hist_field_name(hist_field, 0); 5569 5570 if (hist_field->var.name) 5571 seq_printf(m, "%s=", hist_field->var.name); 5572 5573 if (hist_field->flags & HIST_FIELD_FL_CPU) 5574 seq_puts(m, "cpu"); 5575 else if (field_name) { 5576 if (hist_field->flags & HIST_FIELD_FL_VAR_REF || 5577 hist_field->flags & HIST_FIELD_FL_ALIAS) 5578 seq_putc(m, '$'); 5579 seq_printf(m, "%s", field_name); 5580 } else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP) 5581 seq_puts(m, "common_timestamp"); 5582 5583 if (hist_field->flags) { 5584 if (!(hist_field->flags & HIST_FIELD_FL_VAR_REF) && 5585 !(hist_field->flags & HIST_FIELD_FL_EXPR)) { 5586 const char *flags = get_hist_field_flags(hist_field); 5587 5588 if (flags) 5589 seq_printf(m, ".%s", flags); 5590 } 5591 } 5592 } 5593 5594 static int event_hist_trigger_print(struct seq_file *m, 5595 struct event_trigger_ops *ops, 5596 struct event_trigger_data *data) 5597 { 5598 struct hist_trigger_data *hist_data = data->private_data; 5599 struct hist_field *field; 5600 bool have_var = false; 5601 unsigned int i; 5602 5603 seq_puts(m, "hist:"); 5604 5605 if (data->name) 5606 seq_printf(m, "%s:", data->name); 5607 5608 seq_puts(m, "keys="); 5609 5610 for_each_hist_key_field(i, hist_data) { 5611 field = hist_data->fields[i]; 5612 5613 if (i > hist_data->n_vals) 5614 seq_puts(m, ","); 5615 5616 if (field->flags & HIST_FIELD_FL_STACKTRACE) 5617 seq_puts(m, "stacktrace"); 5618 else 5619 hist_field_print(m, field); 5620 } 5621 5622 seq_puts(m, ":vals="); 5623 5624 for_each_hist_val_field(i, hist_data) { 5625 field = hist_data->fields[i]; 5626 if (field->flags & HIST_FIELD_FL_VAR) { 5627 have_var = true; 5628 continue; 5629 } 5630 5631 if (i == HITCOUNT_IDX) 5632 seq_puts(m, "hitcount"); 5633 else { 5634 seq_puts(m, ","); 5635 hist_field_print(m, field); 5636 } 5637 } 5638 5639 if (have_var) { 5640 unsigned int n = 0; 5641 5642 seq_puts(m, ":"); 5643 5644 for_each_hist_val_field(i, hist_data) { 5645 field = hist_data->fields[i]; 5646 5647 if (field->flags & HIST_FIELD_FL_VAR) { 5648 if (n++) 5649 seq_puts(m, ","); 5650 hist_field_print(m, field); 5651 } 5652 } 5653 } 5654 5655 seq_puts(m, ":sort="); 5656 5657 for (i = 0; i < hist_data->n_sort_keys; i++) { 5658 struct tracing_map_sort_key *sort_key; 5659 unsigned int idx, first_key_idx; 5660 5661 /* skip VAR vals */ 5662 first_key_idx = hist_data->n_vals - hist_data->n_vars; 5663 5664 sort_key = &hist_data->sort_keys[i]; 5665 idx = sort_key->field_idx; 5666 5667 if (WARN_ON(idx >= HIST_FIELDS_MAX)) 5668 return -EINVAL; 5669 5670 if (i > 0) 5671 seq_puts(m, ","); 5672 5673 if (idx == HITCOUNT_IDX) 5674 seq_puts(m, "hitcount"); 5675 else { 5676 if (idx >= first_key_idx) 5677 idx += hist_data->n_vars; 5678 hist_field_print(m, hist_data->fields[idx]); 5679 } 5680 5681 if (sort_key->descending) 5682 seq_puts(m, ".descending"); 5683 } 5684 seq_printf(m, ":size=%u", (1 << hist_data->map->map_bits)); 5685 if (hist_data->enable_timestamps) 5686 seq_printf(m, ":clock=%s", hist_data->attrs->clock); 5687 5688 print_actions_spec(m, hist_data); 5689 5690 if (data->filter_str) 5691 seq_printf(m, " if %s", data->filter_str); 5692 5693 if (data->paused) 5694 seq_puts(m, " [paused]"); 5695 else 5696 seq_puts(m, " [active]"); 5697 5698 seq_putc(m, '\n'); 5699 5700 return 0; 5701 } 5702 5703 static int event_hist_trigger_init(struct event_trigger_ops *ops, 5704 struct event_trigger_data *data) 5705 { 5706 struct hist_trigger_data *hist_data = data->private_data; 5707 5708 if (!data->ref && hist_data->attrs->name) 5709 save_named_trigger(hist_data->attrs->name, data); 5710 5711 data->ref++; 5712 5713 return 0; 5714 } 5715 5716 static void unregister_field_var_hists(struct hist_trigger_data *hist_data) 5717 { 5718 struct trace_event_file *file; 5719 unsigned int i; 5720 char *cmd; 5721 int ret; 5722 5723 for (i = 0; i < hist_data->n_field_var_hists; i++) { 5724 file = hist_data->field_var_hists[i]->hist_data->event_file; 5725 cmd = hist_data->field_var_hists[i]->cmd; 5726 ret = event_hist_trigger_func(&trigger_hist_cmd, file, 5727 "!hist", "hist", cmd); 5728 } 5729 } 5730 5731 static void event_hist_trigger_free(struct event_trigger_ops *ops, 5732 struct event_trigger_data *data) 5733 { 5734 struct hist_trigger_data *hist_data = data->private_data; 5735 5736 if (WARN_ON_ONCE(data->ref <= 0)) 5737 return; 5738 5739 data->ref--; 5740 if (!data->ref) { 5741 if (data->name) 5742 del_named_trigger(data); 5743 5744 trigger_data_free(data); 5745 5746 remove_hist_vars(hist_data); 5747 5748 unregister_field_var_hists(hist_data); 5749 5750 destroy_hist_data(hist_data); 5751 } 5752 } 5753 5754 static struct event_trigger_ops event_hist_trigger_ops = { 5755 .func = event_hist_trigger, 5756 .print = event_hist_trigger_print, 5757 .init = event_hist_trigger_init, 5758 .free = event_hist_trigger_free, 5759 }; 5760 5761 static int event_hist_trigger_named_init(struct event_trigger_ops *ops, 5762 struct event_trigger_data *data) 5763 { 5764 data->ref++; 5765 5766 save_named_trigger(data->named_data->name, data); 5767 5768 event_hist_trigger_init(ops, data->named_data); 5769 5770 return 0; 5771 } 5772 5773 static void event_hist_trigger_named_free(struct event_trigger_ops *ops, 5774 struct event_trigger_data *data) 5775 { 5776 if (WARN_ON_ONCE(data->ref <= 0)) 5777 return; 5778 5779 event_hist_trigger_free(ops, data->named_data); 5780 5781 data->ref--; 5782 if (!data->ref) { 5783 del_named_trigger(data); 5784 trigger_data_free(data); 5785 } 5786 } 5787 5788 static struct event_trigger_ops event_hist_trigger_named_ops = { 5789 .func = event_hist_trigger, 5790 .print = event_hist_trigger_print, 5791 .init = event_hist_trigger_named_init, 5792 .free = event_hist_trigger_named_free, 5793 }; 5794 5795 static struct event_trigger_ops *event_hist_get_trigger_ops(char *cmd, 5796 char *param) 5797 { 5798 return &event_hist_trigger_ops; 5799 } 5800 5801 static void hist_clear(struct event_trigger_data *data) 5802 { 5803 struct hist_trigger_data *hist_data = data->private_data; 5804 5805 if (data->name) 5806 pause_named_trigger(data); 5807 5808 tracepoint_synchronize_unregister(); 5809 5810 tracing_map_clear(hist_data->map); 5811 5812 if (data->name) 5813 unpause_named_trigger(data); 5814 } 5815 5816 static bool compatible_field(struct ftrace_event_field *field, 5817 struct ftrace_event_field *test_field) 5818 { 5819 if (field == test_field) 5820 return true; 5821 if (field == NULL || test_field == NULL) 5822 return false; 5823 if (strcmp(field->name, test_field->name) != 0) 5824 return false; 5825 if (strcmp(field->type, test_field->type) != 0) 5826 return false; 5827 if (field->size != test_field->size) 5828 return false; 5829 if (field->is_signed != test_field->is_signed) 5830 return false; 5831 5832 return true; 5833 } 5834 5835 static bool hist_trigger_match(struct event_trigger_data *data, 5836 struct event_trigger_data *data_test, 5837 struct event_trigger_data *named_data, 5838 bool ignore_filter) 5839 { 5840 struct tracing_map_sort_key *sort_key, *sort_key_test; 5841 struct hist_trigger_data *hist_data, *hist_data_test; 5842 struct hist_field *key_field, *key_field_test; 5843 unsigned int i; 5844 5845 if (named_data && (named_data != data_test) && 5846 (named_data != data_test->named_data)) 5847 return false; 5848 5849 if (!named_data && is_named_trigger(data_test)) 5850 return false; 5851 5852 hist_data = data->private_data; 5853 hist_data_test = data_test->private_data; 5854 5855 if (hist_data->n_vals != hist_data_test->n_vals || 5856 hist_data->n_fields != hist_data_test->n_fields || 5857 hist_data->n_sort_keys != hist_data_test->n_sort_keys) 5858 return false; 5859 5860 if (!ignore_filter) { 5861 if ((data->filter_str && !data_test->filter_str) || 5862 (!data->filter_str && data_test->filter_str)) 5863 return false; 5864 } 5865 5866 for_each_hist_field(i, hist_data) { 5867 key_field = hist_data->fields[i]; 5868 key_field_test = hist_data_test->fields[i]; 5869 5870 if (key_field->flags != key_field_test->flags) 5871 return false; 5872 if (!compatible_field(key_field->field, key_field_test->field)) 5873 return false; 5874 if (key_field->offset != key_field_test->offset) 5875 return false; 5876 if (key_field->size != key_field_test->size) 5877 return false; 5878 if (key_field->is_signed != key_field_test->is_signed) 5879 return false; 5880 if (!!key_field->var.name != !!key_field_test->var.name) 5881 return false; 5882 if (key_field->var.name && 5883 strcmp(key_field->var.name, key_field_test->var.name) != 0) 5884 return false; 5885 } 5886 5887 for (i = 0; i < hist_data->n_sort_keys; i++) { 5888 sort_key = &hist_data->sort_keys[i]; 5889 sort_key_test = &hist_data_test->sort_keys[i]; 5890 5891 if (sort_key->field_idx != sort_key_test->field_idx || 5892 sort_key->descending != sort_key_test->descending) 5893 return false; 5894 } 5895 5896 if (!ignore_filter && data->filter_str && 5897 (strcmp(data->filter_str, data_test->filter_str) != 0)) 5898 return false; 5899 5900 if (!actions_match(hist_data, hist_data_test)) 5901 return false; 5902 5903 return true; 5904 } 5905 5906 static int hist_register_trigger(char *glob, struct event_trigger_ops *ops, 5907 struct event_trigger_data *data, 5908 struct trace_event_file *file) 5909 { 5910 struct hist_trigger_data *hist_data = data->private_data; 5911 struct event_trigger_data *test, *named_data = NULL; 5912 struct trace_array *tr = file->tr; 5913 int ret = 0; 5914 5915 if (hist_data->attrs->name) { 5916 named_data = find_named_trigger(hist_data->attrs->name); 5917 if (named_data) { 5918 if (!hist_trigger_match(data, named_data, named_data, 5919 true)) { 5920 hist_err(tr, HIST_ERR_NAMED_MISMATCH, errpos(hist_data->attrs->name)); 5921 ret = -EINVAL; 5922 goto out; 5923 } 5924 } 5925 } 5926 5927 if (hist_data->attrs->name && !named_data) 5928 goto new; 5929 5930 list_for_each_entry_rcu(test, &file->triggers, list) { 5931 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 5932 if (!hist_trigger_match(data, test, named_data, false)) 5933 continue; 5934 if (hist_data->attrs->pause) 5935 test->paused = true; 5936 else if (hist_data->attrs->cont) 5937 test->paused = false; 5938 else if (hist_data->attrs->clear) 5939 hist_clear(test); 5940 else { 5941 hist_err(tr, HIST_ERR_TRIGGER_EEXIST, 0); 5942 ret = -EEXIST; 5943 } 5944 goto out; 5945 } 5946 } 5947 new: 5948 if (hist_data->attrs->cont || hist_data->attrs->clear) { 5949 hist_err(tr, HIST_ERR_TRIGGER_ENOENT_CLEAR, 0); 5950 ret = -ENOENT; 5951 goto out; 5952 } 5953 5954 if (hist_data->attrs->pause) 5955 data->paused = true; 5956 5957 if (named_data) { 5958 data->private_data = named_data->private_data; 5959 set_named_trigger_data(data, named_data); 5960 data->ops = &event_hist_trigger_named_ops; 5961 } 5962 5963 if (data->ops->init) { 5964 ret = data->ops->init(data->ops, data); 5965 if (ret < 0) 5966 goto out; 5967 } 5968 5969 if (hist_data->enable_timestamps) { 5970 char *clock = hist_data->attrs->clock; 5971 5972 ret = tracing_set_clock(file->tr, hist_data->attrs->clock); 5973 if (ret) { 5974 hist_err(tr, HIST_ERR_SET_CLOCK_FAIL, errpos(clock)); 5975 goto out; 5976 } 5977 5978 tracing_set_time_stamp_abs(file->tr, true); 5979 } 5980 5981 if (named_data) 5982 destroy_hist_data(hist_data); 5983 5984 ret++; 5985 out: 5986 return ret; 5987 } 5988 5989 static int hist_trigger_enable(struct event_trigger_data *data, 5990 struct trace_event_file *file) 5991 { 5992 int ret = 0; 5993 5994 list_add_tail_rcu(&data->list, &file->triggers); 5995 5996 update_cond_flag(file); 5997 5998 if (trace_event_trigger_enable_disable(file, 1) < 0) { 5999 list_del_rcu(&data->list); 6000 update_cond_flag(file); 6001 ret--; 6002 } 6003 6004 return ret; 6005 } 6006 6007 static bool have_hist_trigger_match(struct event_trigger_data *data, 6008 struct trace_event_file *file) 6009 { 6010 struct hist_trigger_data *hist_data = data->private_data; 6011 struct event_trigger_data *test, *named_data = NULL; 6012 bool match = false; 6013 6014 if (hist_data->attrs->name) 6015 named_data = find_named_trigger(hist_data->attrs->name); 6016 6017 list_for_each_entry_rcu(test, &file->triggers, list) { 6018 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 6019 if (hist_trigger_match(data, test, named_data, false)) { 6020 match = true; 6021 break; 6022 } 6023 } 6024 } 6025 6026 return match; 6027 } 6028 6029 static bool hist_trigger_check_refs(struct event_trigger_data *data, 6030 struct trace_event_file *file) 6031 { 6032 struct hist_trigger_data *hist_data = data->private_data; 6033 struct event_trigger_data *test, *named_data = NULL; 6034 6035 if (hist_data->attrs->name) 6036 named_data = find_named_trigger(hist_data->attrs->name); 6037 6038 list_for_each_entry_rcu(test, &file->triggers, list) { 6039 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 6040 if (!hist_trigger_match(data, test, named_data, false)) 6041 continue; 6042 hist_data = test->private_data; 6043 if (check_var_refs(hist_data)) 6044 return true; 6045 break; 6046 } 6047 } 6048 6049 return false; 6050 } 6051 6052 static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops, 6053 struct event_trigger_data *data, 6054 struct trace_event_file *file) 6055 { 6056 struct hist_trigger_data *hist_data = data->private_data; 6057 struct event_trigger_data *test, *named_data = NULL; 6058 bool unregistered = false; 6059 6060 if (hist_data->attrs->name) 6061 named_data = find_named_trigger(hist_data->attrs->name); 6062 6063 list_for_each_entry_rcu(test, &file->triggers, list) { 6064 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 6065 if (!hist_trigger_match(data, test, named_data, false)) 6066 continue; 6067 unregistered = true; 6068 list_del_rcu(&test->list); 6069 trace_event_trigger_enable_disable(file, 0); 6070 update_cond_flag(file); 6071 break; 6072 } 6073 } 6074 6075 if (unregistered && test->ops->free) 6076 test->ops->free(test->ops, test); 6077 6078 if (hist_data->enable_timestamps) { 6079 if (!hist_data->remove || unregistered) 6080 tracing_set_time_stamp_abs(file->tr, false); 6081 } 6082 } 6083 6084 static bool hist_file_check_refs(struct trace_event_file *file) 6085 { 6086 struct hist_trigger_data *hist_data; 6087 struct event_trigger_data *test; 6088 6089 list_for_each_entry_rcu(test, &file->triggers, list) { 6090 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 6091 hist_data = test->private_data; 6092 if (check_var_refs(hist_data)) 6093 return true; 6094 } 6095 } 6096 6097 return false; 6098 } 6099 6100 static void hist_unreg_all(struct trace_event_file *file) 6101 { 6102 struct event_trigger_data *test, *n; 6103 struct hist_trigger_data *hist_data; 6104 struct synth_event *se; 6105 const char *se_name; 6106 6107 lockdep_assert_held(&event_mutex); 6108 6109 if (hist_file_check_refs(file)) 6110 return; 6111 6112 list_for_each_entry_safe(test, n, &file->triggers, list) { 6113 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 6114 hist_data = test->private_data; 6115 list_del_rcu(&test->list); 6116 trace_event_trigger_enable_disable(file, 0); 6117 6118 se_name = trace_event_name(file->event_call); 6119 se = find_synth_event(se_name); 6120 if (se) 6121 se->ref--; 6122 6123 update_cond_flag(file); 6124 if (hist_data->enable_timestamps) 6125 tracing_set_time_stamp_abs(file->tr, false); 6126 if (test->ops->free) 6127 test->ops->free(test->ops, test); 6128 } 6129 } 6130 } 6131 6132 static int event_hist_trigger_func(struct event_command *cmd_ops, 6133 struct trace_event_file *file, 6134 char *glob, char *cmd, char *param) 6135 { 6136 unsigned int hist_trigger_bits = TRACING_MAP_BITS_DEFAULT; 6137 struct event_trigger_data *trigger_data; 6138 struct hist_trigger_attrs *attrs; 6139 struct event_trigger_ops *trigger_ops; 6140 struct hist_trigger_data *hist_data; 6141 struct synth_event *se; 6142 const char *se_name; 6143 bool remove = false; 6144 char *trigger, *p; 6145 int ret = 0; 6146 6147 lockdep_assert_held(&event_mutex); 6148 6149 if (glob && strlen(glob)) { 6150 hist_err_clear(); 6151 last_cmd_set(file, param); 6152 } 6153 6154 if (!param) 6155 return -EINVAL; 6156 6157 if (glob[0] == '!') 6158 remove = true; 6159 6160 /* 6161 * separate the trigger from the filter (k:v [if filter]) 6162 * allowing for whitespace in the trigger 6163 */ 6164 p = trigger = param; 6165 do { 6166 p = strstr(p, "if"); 6167 if (!p) 6168 break; 6169 if (p == param) 6170 return -EINVAL; 6171 if (*(p - 1) != ' ' && *(p - 1) != '\t') { 6172 p++; 6173 continue; 6174 } 6175 if (p >= param + strlen(param) - (sizeof("if") - 1) - 1) 6176 return -EINVAL; 6177 if (*(p + sizeof("if") - 1) != ' ' && *(p + sizeof("if") - 1) != '\t') { 6178 p++; 6179 continue; 6180 } 6181 break; 6182 } while (p); 6183 6184 if (!p) 6185 param = NULL; 6186 else { 6187 *(p - 1) = '\0'; 6188 param = strstrip(p); 6189 trigger = strstrip(trigger); 6190 } 6191 6192 attrs = parse_hist_trigger_attrs(file->tr, trigger); 6193 if (IS_ERR(attrs)) 6194 return PTR_ERR(attrs); 6195 6196 if (attrs->map_bits) 6197 hist_trigger_bits = attrs->map_bits; 6198 6199 hist_data = create_hist_data(hist_trigger_bits, attrs, file, remove); 6200 if (IS_ERR(hist_data)) { 6201 destroy_hist_trigger_attrs(attrs); 6202 return PTR_ERR(hist_data); 6203 } 6204 6205 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger); 6206 6207 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL); 6208 if (!trigger_data) { 6209 ret = -ENOMEM; 6210 goto out_free; 6211 } 6212 6213 trigger_data->count = -1; 6214 trigger_data->ops = trigger_ops; 6215 trigger_data->cmd_ops = cmd_ops; 6216 6217 INIT_LIST_HEAD(&trigger_data->list); 6218 RCU_INIT_POINTER(trigger_data->filter, NULL); 6219 6220 trigger_data->private_data = hist_data; 6221 6222 /* if param is non-empty, it's supposed to be a filter */ 6223 if (param && cmd_ops->set_filter) { 6224 ret = cmd_ops->set_filter(param, trigger_data, file); 6225 if (ret < 0) 6226 goto out_free; 6227 } 6228 6229 if (remove) { 6230 if (!have_hist_trigger_match(trigger_data, file)) 6231 goto out_free; 6232 6233 if (hist_trigger_check_refs(trigger_data, file)) { 6234 ret = -EBUSY; 6235 goto out_free; 6236 } 6237 6238 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file); 6239 se_name = trace_event_name(file->event_call); 6240 se = find_synth_event(se_name); 6241 if (se) 6242 se->ref--; 6243 ret = 0; 6244 goto out_free; 6245 } 6246 6247 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file); 6248 /* 6249 * The above returns on success the # of triggers registered, 6250 * but if it didn't register any it returns zero. Consider no 6251 * triggers registered a failure too. 6252 */ 6253 if (!ret) { 6254 if (!(attrs->pause || attrs->cont || attrs->clear)) 6255 ret = -ENOENT; 6256 goto out_free; 6257 } else if (ret < 0) 6258 goto out_free; 6259 6260 if (get_named_trigger_data(trigger_data)) 6261 goto enable; 6262 6263 if (has_hist_vars(hist_data)) 6264 save_hist_vars(hist_data); 6265 6266 ret = create_actions(hist_data); 6267 if (ret) 6268 goto out_unreg; 6269 6270 ret = tracing_map_init(hist_data->map); 6271 if (ret) 6272 goto out_unreg; 6273 enable: 6274 ret = hist_trigger_enable(trigger_data, file); 6275 if (ret) 6276 goto out_unreg; 6277 6278 se_name = trace_event_name(file->event_call); 6279 se = find_synth_event(se_name); 6280 if (se) 6281 se->ref++; 6282 /* Just return zero, not the number of registered triggers */ 6283 ret = 0; 6284 out: 6285 if (ret == 0) 6286 hist_err_clear(); 6287 6288 return ret; 6289 out_unreg: 6290 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file); 6291 out_free: 6292 if (cmd_ops->set_filter) 6293 cmd_ops->set_filter(NULL, trigger_data, NULL); 6294 6295 remove_hist_vars(hist_data); 6296 6297 kfree(trigger_data); 6298 6299 destroy_hist_data(hist_data); 6300 goto out; 6301 } 6302 6303 static struct event_command trigger_hist_cmd = { 6304 .name = "hist", 6305 .trigger_type = ETT_EVENT_HIST, 6306 .flags = EVENT_CMD_FL_NEEDS_REC, 6307 .func = event_hist_trigger_func, 6308 .reg = hist_register_trigger, 6309 .unreg = hist_unregister_trigger, 6310 .unreg_all = hist_unreg_all, 6311 .get_trigger_ops = event_hist_get_trigger_ops, 6312 .set_filter = set_trigger_filter, 6313 }; 6314 6315 __init int register_trigger_hist_cmd(void) 6316 { 6317 int ret; 6318 6319 ret = register_event_command(&trigger_hist_cmd); 6320 WARN_ON(ret < 0); 6321 6322 return ret; 6323 } 6324 6325 static void 6326 hist_enable_trigger(struct event_trigger_data *data, void *rec, 6327 struct ring_buffer_event *event) 6328 { 6329 struct enable_trigger_data *enable_data = data->private_data; 6330 struct event_trigger_data *test; 6331 6332 list_for_each_entry_rcu(test, &enable_data->file->triggers, list) { 6333 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 6334 if (enable_data->enable) 6335 test->paused = false; 6336 else 6337 test->paused = true; 6338 } 6339 } 6340 } 6341 6342 static void 6343 hist_enable_count_trigger(struct event_trigger_data *data, void *rec, 6344 struct ring_buffer_event *event) 6345 { 6346 if (!data->count) 6347 return; 6348 6349 if (data->count != -1) 6350 (data->count)--; 6351 6352 hist_enable_trigger(data, rec, event); 6353 } 6354 6355 static struct event_trigger_ops hist_enable_trigger_ops = { 6356 .func = hist_enable_trigger, 6357 .print = event_enable_trigger_print, 6358 .init = event_trigger_init, 6359 .free = event_enable_trigger_free, 6360 }; 6361 6362 static struct event_trigger_ops hist_enable_count_trigger_ops = { 6363 .func = hist_enable_count_trigger, 6364 .print = event_enable_trigger_print, 6365 .init = event_trigger_init, 6366 .free = event_enable_trigger_free, 6367 }; 6368 6369 static struct event_trigger_ops hist_disable_trigger_ops = { 6370 .func = hist_enable_trigger, 6371 .print = event_enable_trigger_print, 6372 .init = event_trigger_init, 6373 .free = event_enable_trigger_free, 6374 }; 6375 6376 static struct event_trigger_ops hist_disable_count_trigger_ops = { 6377 .func = hist_enable_count_trigger, 6378 .print = event_enable_trigger_print, 6379 .init = event_trigger_init, 6380 .free = event_enable_trigger_free, 6381 }; 6382 6383 static struct event_trigger_ops * 6384 hist_enable_get_trigger_ops(char *cmd, char *param) 6385 { 6386 struct event_trigger_ops *ops; 6387 bool enable; 6388 6389 enable = (strcmp(cmd, ENABLE_HIST_STR) == 0); 6390 6391 if (enable) 6392 ops = param ? &hist_enable_count_trigger_ops : 6393 &hist_enable_trigger_ops; 6394 else 6395 ops = param ? &hist_disable_count_trigger_ops : 6396 &hist_disable_trigger_ops; 6397 6398 return ops; 6399 } 6400 6401 static void hist_enable_unreg_all(struct trace_event_file *file) 6402 { 6403 struct event_trigger_data *test, *n; 6404 6405 list_for_each_entry_safe(test, n, &file->triggers, list) { 6406 if (test->cmd_ops->trigger_type == ETT_HIST_ENABLE) { 6407 list_del_rcu(&test->list); 6408 update_cond_flag(file); 6409 trace_event_trigger_enable_disable(file, 0); 6410 if (test->ops->free) 6411 test->ops->free(test->ops, test); 6412 } 6413 } 6414 } 6415 6416 static struct event_command trigger_hist_enable_cmd = { 6417 .name = ENABLE_HIST_STR, 6418 .trigger_type = ETT_HIST_ENABLE, 6419 .func = event_enable_trigger_func, 6420 .reg = event_enable_register_trigger, 6421 .unreg = event_enable_unregister_trigger, 6422 .unreg_all = hist_enable_unreg_all, 6423 .get_trigger_ops = hist_enable_get_trigger_ops, 6424 .set_filter = set_trigger_filter, 6425 }; 6426 6427 static struct event_command trigger_hist_disable_cmd = { 6428 .name = DISABLE_HIST_STR, 6429 .trigger_type = ETT_HIST_ENABLE, 6430 .func = event_enable_trigger_func, 6431 .reg = event_enable_register_trigger, 6432 .unreg = event_enable_unregister_trigger, 6433 .unreg_all = hist_enable_unreg_all, 6434 .get_trigger_ops = hist_enable_get_trigger_ops, 6435 .set_filter = set_trigger_filter, 6436 }; 6437 6438 static __init void unregister_trigger_hist_enable_disable_cmds(void) 6439 { 6440 unregister_event_command(&trigger_hist_enable_cmd); 6441 unregister_event_command(&trigger_hist_disable_cmd); 6442 } 6443 6444 __init int register_trigger_hist_enable_disable_cmds(void) 6445 { 6446 int ret; 6447 6448 ret = register_event_command(&trigger_hist_enable_cmd); 6449 if (WARN_ON(ret < 0)) 6450 return ret; 6451 ret = register_event_command(&trigger_hist_disable_cmd); 6452 if (WARN_ON(ret < 0)) 6453 unregister_trigger_hist_enable_disable_cmds(); 6454 6455 return ret; 6456 } 6457 6458 static __init int trace_events_hist_init(void) 6459 { 6460 struct dentry *entry = NULL; 6461 struct dentry *d_tracer; 6462 int err = 0; 6463 6464 err = dyn_event_register(&synth_event_ops); 6465 if (err) { 6466 pr_warn("Could not register synth_event_ops\n"); 6467 return err; 6468 } 6469 6470 d_tracer = tracing_init_dentry(); 6471 if (IS_ERR(d_tracer)) { 6472 err = PTR_ERR(d_tracer); 6473 goto err; 6474 } 6475 6476 entry = tracefs_create_file("synthetic_events", 0644, d_tracer, 6477 NULL, &synth_events_fops); 6478 if (!entry) { 6479 err = -ENODEV; 6480 goto err; 6481 } 6482 6483 return err; 6484 err: 6485 pr_warn("Could not create tracefs 'synthetic_events' entry\n"); 6486 6487 return err; 6488 } 6489 6490 fs_initcall(trace_events_hist_init); 6491