1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * trace_events_hist - trace event hist triggers 4 * 5 * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com> 6 */ 7 8 #include <linux/module.h> 9 #include <linux/kallsyms.h> 10 #include <linux/security.h> 11 #include <linux/mutex.h> 12 #include <linux/slab.h> 13 #include <linux/stacktrace.h> 14 #include <linux/rculist.h> 15 #include <linux/tracefs.h> 16 17 /* for gfp flag names */ 18 #include <linux/trace_events.h> 19 #include <trace/events/mmflags.h> 20 21 #include "tracing_map.h" 22 #include "trace_synth.h" 23 24 #define ERRORS \ 25 C(NONE, "No error"), \ 26 C(DUPLICATE_VAR, "Variable already defined"), \ 27 C(VAR_NOT_UNIQUE, "Variable name not unique, need to use fully qualified name (subsys.event.var) for variable"), \ 28 C(TOO_MANY_VARS, "Too many variables defined"), \ 29 C(MALFORMED_ASSIGNMENT, "Malformed assignment"), \ 30 C(NAMED_MISMATCH, "Named hist trigger doesn't match existing named trigger (includes variables)"), \ 31 C(TRIGGER_EEXIST, "Hist trigger already exists"), \ 32 C(TRIGGER_ENOENT_CLEAR, "Can't clear or continue a nonexistent hist trigger"), \ 33 C(SET_CLOCK_FAIL, "Couldn't set trace_clock"), \ 34 C(BAD_FIELD_MODIFIER, "Invalid field modifier"), \ 35 C(TOO_MANY_SUBEXPR, "Too many subexpressions (3 max)"), \ 36 C(TIMESTAMP_MISMATCH, "Timestamp units in expression don't match"), \ 37 C(TOO_MANY_FIELD_VARS, "Too many field variables defined"), \ 38 C(EVENT_FILE_NOT_FOUND, "Event file not found"), \ 39 C(HIST_NOT_FOUND, "Matching event histogram not found"), \ 40 C(HIST_CREATE_FAIL, "Couldn't create histogram for field"), \ 41 C(SYNTH_VAR_NOT_FOUND, "Couldn't find synthetic variable"), \ 42 C(SYNTH_EVENT_NOT_FOUND,"Couldn't find synthetic event"), \ 43 C(SYNTH_TYPE_MISMATCH, "Param type doesn't match synthetic event field type"), \ 44 C(SYNTH_COUNT_MISMATCH, "Param count doesn't match synthetic event field count"), \ 45 C(FIELD_VAR_PARSE_FAIL, "Couldn't parse field variable"), \ 46 C(VAR_CREATE_FIND_FAIL, "Couldn't create or find variable"), \ 47 C(ONX_NOT_VAR, "For onmax(x) or onchange(x), x must be a variable"), \ 48 C(ONX_VAR_NOT_FOUND, "Couldn't find onmax or onchange variable"), \ 49 C(ONX_VAR_CREATE_FAIL, "Couldn't create onmax or onchange variable"), \ 50 C(FIELD_VAR_CREATE_FAIL,"Couldn't create field variable"), \ 51 C(TOO_MANY_PARAMS, "Too many action params"), \ 52 C(PARAM_NOT_FOUND, "Couldn't find param"), \ 53 C(INVALID_PARAM, "Invalid action param"), \ 54 C(ACTION_NOT_FOUND, "No action found"), \ 55 C(NO_SAVE_PARAMS, "No params found for save()"), \ 56 C(TOO_MANY_SAVE_ACTIONS,"Can't have more than one save() action per hist"), \ 57 C(ACTION_MISMATCH, "Handler doesn't support action"), \ 58 C(NO_CLOSING_PAREN, "No closing paren found"), \ 59 C(SUBSYS_NOT_FOUND, "Missing subsystem"), \ 60 C(INVALID_SUBSYS_EVENT, "Invalid subsystem or event name"), \ 61 C(INVALID_REF_KEY, "Using variable references in keys not supported"), \ 62 C(VAR_NOT_FOUND, "Couldn't find variable"), \ 63 C(FIELD_NOT_FOUND, "Couldn't find field"), \ 64 C(EMPTY_ASSIGNMENT, "Empty assignment"), \ 65 C(INVALID_SORT_MODIFIER,"Invalid sort modifier"), \ 66 C(EMPTY_SORT_FIELD, "Empty sort field"), \ 67 C(TOO_MANY_SORT_FIELDS, "Too many sort fields (Max = 2)"), \ 68 C(INVALID_SORT_FIELD, "Sort field must be a key or a val"), \ 69 C(INVALID_STR_OPERAND, "String type can not be an operand in expression"), 70 71 #undef C 72 #define C(a, b) HIST_ERR_##a 73 74 enum { ERRORS }; 75 76 #undef C 77 #define C(a, b) b 78 79 static const char *err_text[] = { ERRORS }; 80 81 struct hist_field; 82 83 typedef u64 (*hist_field_fn_t) (struct hist_field *field, 84 struct tracing_map_elt *elt, 85 struct trace_buffer *buffer, 86 struct ring_buffer_event *rbe, 87 void *event); 88 89 #define HIST_FIELD_OPERANDS_MAX 2 90 #define HIST_FIELDS_MAX (TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX) 91 #define HIST_ACTIONS_MAX 8 92 93 enum field_op_id { 94 FIELD_OP_NONE, 95 FIELD_OP_PLUS, 96 FIELD_OP_MINUS, 97 FIELD_OP_UNARY_MINUS, 98 }; 99 100 /* 101 * A hist_var (histogram variable) contains variable information for 102 * hist_fields having the HIST_FIELD_FL_VAR or HIST_FIELD_FL_VAR_REF 103 * flag set. A hist_var has a variable name e.g. ts0, and is 104 * associated with a given histogram trigger, as specified by 105 * hist_data. The hist_var idx is the unique index assigned to the 106 * variable by the hist trigger's tracing_map. The idx is what is 107 * used to set a variable's value and, by a variable reference, to 108 * retrieve it. 109 */ 110 struct hist_var { 111 char *name; 112 struct hist_trigger_data *hist_data; 113 unsigned int idx; 114 }; 115 116 struct hist_field { 117 struct ftrace_event_field *field; 118 unsigned long flags; 119 hist_field_fn_t fn; 120 unsigned int ref; 121 unsigned int size; 122 unsigned int offset; 123 unsigned int is_signed; 124 unsigned long buckets; 125 const char *type; 126 struct hist_field *operands[HIST_FIELD_OPERANDS_MAX]; 127 struct hist_trigger_data *hist_data; 128 129 /* 130 * Variable fields contain variable-specific info in var. 131 */ 132 struct hist_var var; 133 enum field_op_id operator; 134 char *system; 135 char *event_name; 136 137 /* 138 * The name field is used for EXPR and VAR_REF fields. VAR 139 * fields contain the variable name in var.name. 140 */ 141 char *name; 142 143 /* 144 * When a histogram trigger is hit, if it has any references 145 * to variables, the values of those variables are collected 146 * into a var_ref_vals array by resolve_var_refs(). The 147 * current value of each variable is read from the tracing_map 148 * using the hist field's hist_var.idx and entered into the 149 * var_ref_idx entry i.e. var_ref_vals[var_ref_idx]. 150 */ 151 unsigned int var_ref_idx; 152 bool read_once; 153 154 unsigned int var_str_idx; 155 }; 156 157 static u64 hist_field_none(struct hist_field *field, 158 struct tracing_map_elt *elt, 159 struct trace_buffer *buffer, 160 struct ring_buffer_event *rbe, 161 void *event) 162 { 163 return 0; 164 } 165 166 static u64 hist_field_counter(struct hist_field *field, 167 struct tracing_map_elt *elt, 168 struct trace_buffer *buffer, 169 struct ring_buffer_event *rbe, 170 void *event) 171 { 172 return 1; 173 } 174 175 static u64 hist_field_string(struct hist_field *hist_field, 176 struct tracing_map_elt *elt, 177 struct trace_buffer *buffer, 178 struct ring_buffer_event *rbe, 179 void *event) 180 { 181 char *addr = (char *)(event + hist_field->field->offset); 182 183 return (u64)(unsigned long)addr; 184 } 185 186 static u64 hist_field_dynstring(struct hist_field *hist_field, 187 struct tracing_map_elt *elt, 188 struct trace_buffer *buffer, 189 struct ring_buffer_event *rbe, 190 void *event) 191 { 192 u32 str_item = *(u32 *)(event + hist_field->field->offset); 193 int str_loc = str_item & 0xffff; 194 char *addr = (char *)(event + str_loc); 195 196 return (u64)(unsigned long)addr; 197 } 198 199 static u64 hist_field_pstring(struct hist_field *hist_field, 200 struct tracing_map_elt *elt, 201 struct trace_buffer *buffer, 202 struct ring_buffer_event *rbe, 203 void *event) 204 { 205 char **addr = (char **)(event + hist_field->field->offset); 206 207 return (u64)(unsigned long)*addr; 208 } 209 210 static u64 hist_field_log2(struct hist_field *hist_field, 211 struct tracing_map_elt *elt, 212 struct trace_buffer *buffer, 213 struct ring_buffer_event *rbe, 214 void *event) 215 { 216 struct hist_field *operand = hist_field->operands[0]; 217 218 u64 val = operand->fn(operand, elt, buffer, rbe, event); 219 220 return (u64) ilog2(roundup_pow_of_two(val)); 221 } 222 223 static u64 hist_field_bucket(struct hist_field *hist_field, 224 struct tracing_map_elt *elt, 225 struct trace_buffer *buffer, 226 struct ring_buffer_event *rbe, 227 void *event) 228 { 229 struct hist_field *operand = hist_field->operands[0]; 230 unsigned long buckets = hist_field->buckets; 231 232 u64 val = operand->fn(operand, elt, buffer, rbe, event); 233 234 if (WARN_ON_ONCE(!buckets)) 235 return val; 236 237 if (val >= LONG_MAX) 238 val = div64_ul(val, buckets); 239 else 240 val = (u64)((unsigned long)val / buckets); 241 return val * buckets; 242 } 243 244 static u64 hist_field_plus(struct hist_field *hist_field, 245 struct tracing_map_elt *elt, 246 struct trace_buffer *buffer, 247 struct ring_buffer_event *rbe, 248 void *event) 249 { 250 struct hist_field *operand1 = hist_field->operands[0]; 251 struct hist_field *operand2 = hist_field->operands[1]; 252 253 u64 val1 = operand1->fn(operand1, elt, buffer, rbe, event); 254 u64 val2 = operand2->fn(operand2, elt, buffer, rbe, event); 255 256 return val1 + val2; 257 } 258 259 static u64 hist_field_minus(struct hist_field *hist_field, 260 struct tracing_map_elt *elt, 261 struct trace_buffer *buffer, 262 struct ring_buffer_event *rbe, 263 void *event) 264 { 265 struct hist_field *operand1 = hist_field->operands[0]; 266 struct hist_field *operand2 = hist_field->operands[1]; 267 268 u64 val1 = operand1->fn(operand1, elt, buffer, rbe, event); 269 u64 val2 = operand2->fn(operand2, elt, buffer, rbe, event); 270 271 return val1 - val2; 272 } 273 274 static u64 hist_field_unary_minus(struct hist_field *hist_field, 275 struct tracing_map_elt *elt, 276 struct trace_buffer *buffer, 277 struct ring_buffer_event *rbe, 278 void *event) 279 { 280 struct hist_field *operand = hist_field->operands[0]; 281 282 s64 sval = (s64)operand->fn(operand, elt, buffer, rbe, event); 283 u64 val = (u64)-sval; 284 285 return val; 286 } 287 288 #define DEFINE_HIST_FIELD_FN(type) \ 289 static u64 hist_field_##type(struct hist_field *hist_field, \ 290 struct tracing_map_elt *elt, \ 291 struct trace_buffer *buffer, \ 292 struct ring_buffer_event *rbe, \ 293 void *event) \ 294 { \ 295 type *addr = (type *)(event + hist_field->field->offset); \ 296 \ 297 return (u64)(unsigned long)*addr; \ 298 } 299 300 DEFINE_HIST_FIELD_FN(s64); 301 DEFINE_HIST_FIELD_FN(u64); 302 DEFINE_HIST_FIELD_FN(s32); 303 DEFINE_HIST_FIELD_FN(u32); 304 DEFINE_HIST_FIELD_FN(s16); 305 DEFINE_HIST_FIELD_FN(u16); 306 DEFINE_HIST_FIELD_FN(s8); 307 DEFINE_HIST_FIELD_FN(u8); 308 309 #define for_each_hist_field(i, hist_data) \ 310 for ((i) = 0; (i) < (hist_data)->n_fields; (i)++) 311 312 #define for_each_hist_val_field(i, hist_data) \ 313 for ((i) = 0; (i) < (hist_data)->n_vals; (i)++) 314 315 #define for_each_hist_key_field(i, hist_data) \ 316 for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++) 317 318 #define HIST_STACKTRACE_DEPTH 16 319 #define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long)) 320 #define HIST_STACKTRACE_SKIP 5 321 322 #define HITCOUNT_IDX 0 323 #define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE) 324 325 enum hist_field_flags { 326 HIST_FIELD_FL_HITCOUNT = 1 << 0, 327 HIST_FIELD_FL_KEY = 1 << 1, 328 HIST_FIELD_FL_STRING = 1 << 2, 329 HIST_FIELD_FL_HEX = 1 << 3, 330 HIST_FIELD_FL_SYM = 1 << 4, 331 HIST_FIELD_FL_SYM_OFFSET = 1 << 5, 332 HIST_FIELD_FL_EXECNAME = 1 << 6, 333 HIST_FIELD_FL_SYSCALL = 1 << 7, 334 HIST_FIELD_FL_STACKTRACE = 1 << 8, 335 HIST_FIELD_FL_LOG2 = 1 << 9, 336 HIST_FIELD_FL_TIMESTAMP = 1 << 10, 337 HIST_FIELD_FL_TIMESTAMP_USECS = 1 << 11, 338 HIST_FIELD_FL_VAR = 1 << 12, 339 HIST_FIELD_FL_EXPR = 1 << 13, 340 HIST_FIELD_FL_VAR_REF = 1 << 14, 341 HIST_FIELD_FL_CPU = 1 << 15, 342 HIST_FIELD_FL_ALIAS = 1 << 16, 343 HIST_FIELD_FL_BUCKET = 1 << 17, 344 }; 345 346 struct var_defs { 347 unsigned int n_vars; 348 char *name[TRACING_MAP_VARS_MAX]; 349 char *expr[TRACING_MAP_VARS_MAX]; 350 }; 351 352 struct hist_trigger_attrs { 353 char *keys_str; 354 char *vals_str; 355 char *sort_key_str; 356 char *name; 357 char *clock; 358 bool pause; 359 bool cont; 360 bool clear; 361 bool ts_in_usecs; 362 unsigned int map_bits; 363 364 char *assignment_str[TRACING_MAP_VARS_MAX]; 365 unsigned int n_assignments; 366 367 char *action_str[HIST_ACTIONS_MAX]; 368 unsigned int n_actions; 369 370 struct var_defs var_defs; 371 }; 372 373 struct field_var { 374 struct hist_field *var; 375 struct hist_field *val; 376 }; 377 378 struct field_var_hist { 379 struct hist_trigger_data *hist_data; 380 char *cmd; 381 }; 382 383 struct hist_trigger_data { 384 struct hist_field *fields[HIST_FIELDS_MAX]; 385 unsigned int n_vals; 386 unsigned int n_keys; 387 unsigned int n_fields; 388 unsigned int n_vars; 389 unsigned int n_var_str; 390 unsigned int key_size; 391 struct tracing_map_sort_key sort_keys[TRACING_MAP_SORT_KEYS_MAX]; 392 unsigned int n_sort_keys; 393 struct trace_event_file *event_file; 394 struct hist_trigger_attrs *attrs; 395 struct tracing_map *map; 396 bool enable_timestamps; 397 bool remove; 398 struct hist_field *var_refs[TRACING_MAP_VARS_MAX]; 399 unsigned int n_var_refs; 400 401 struct action_data *actions[HIST_ACTIONS_MAX]; 402 unsigned int n_actions; 403 404 struct field_var *field_vars[SYNTH_FIELDS_MAX]; 405 unsigned int n_field_vars; 406 unsigned int n_field_var_str; 407 struct field_var_hist *field_var_hists[SYNTH_FIELDS_MAX]; 408 unsigned int n_field_var_hists; 409 410 struct field_var *save_vars[SYNTH_FIELDS_MAX]; 411 unsigned int n_save_vars; 412 unsigned int n_save_var_str; 413 }; 414 415 struct action_data; 416 417 typedef void (*action_fn_t) (struct hist_trigger_data *hist_data, 418 struct tracing_map_elt *elt, 419 struct trace_buffer *buffer, void *rec, 420 struct ring_buffer_event *rbe, void *key, 421 struct action_data *data, u64 *var_ref_vals); 422 423 typedef bool (*check_track_val_fn_t) (u64 track_val, u64 var_val); 424 425 enum handler_id { 426 HANDLER_ONMATCH = 1, 427 HANDLER_ONMAX, 428 HANDLER_ONCHANGE, 429 }; 430 431 enum action_id { 432 ACTION_SAVE = 1, 433 ACTION_TRACE, 434 ACTION_SNAPSHOT, 435 }; 436 437 struct action_data { 438 enum handler_id handler; 439 enum action_id action; 440 char *action_name; 441 action_fn_t fn; 442 443 unsigned int n_params; 444 char *params[SYNTH_FIELDS_MAX]; 445 446 /* 447 * When a histogram trigger is hit, the values of any 448 * references to variables, including variables being passed 449 * as parameters to synthetic events, are collected into a 450 * var_ref_vals array. This var_ref_idx array is an array of 451 * indices into the var_ref_vals array, one for each synthetic 452 * event param, and is passed to the synthetic event 453 * invocation. 454 */ 455 unsigned int var_ref_idx[TRACING_MAP_VARS_MAX]; 456 struct synth_event *synth_event; 457 bool use_trace_keyword; 458 char *synth_event_name; 459 460 union { 461 struct { 462 char *event; 463 char *event_system; 464 } match_data; 465 466 struct { 467 /* 468 * var_str contains the $-unstripped variable 469 * name referenced by var_ref, and used when 470 * printing the action. Because var_ref 471 * creation is deferred to create_actions(), 472 * we need a per-action way to save it until 473 * then, thus var_str. 474 */ 475 char *var_str; 476 477 /* 478 * var_ref refers to the variable being 479 * tracked e.g onmax($var). 480 */ 481 struct hist_field *var_ref; 482 483 /* 484 * track_var contains the 'invisible' tracking 485 * variable created to keep the current 486 * e.g. max value. 487 */ 488 struct hist_field *track_var; 489 490 check_track_val_fn_t check_val; 491 action_fn_t save_data; 492 } track_data; 493 }; 494 }; 495 496 struct track_data { 497 u64 track_val; 498 bool updated; 499 500 unsigned int key_len; 501 void *key; 502 struct tracing_map_elt elt; 503 504 struct action_data *action_data; 505 struct hist_trigger_data *hist_data; 506 }; 507 508 struct hist_elt_data { 509 char *comm; 510 u64 *var_ref_vals; 511 char *field_var_str[SYNTH_FIELDS_MAX]; 512 }; 513 514 struct snapshot_context { 515 struct tracing_map_elt *elt; 516 void *key; 517 }; 518 519 static void track_data_free(struct track_data *track_data) 520 { 521 struct hist_elt_data *elt_data; 522 523 if (!track_data) 524 return; 525 526 kfree(track_data->key); 527 528 elt_data = track_data->elt.private_data; 529 if (elt_data) { 530 kfree(elt_data->comm); 531 kfree(elt_data); 532 } 533 534 kfree(track_data); 535 } 536 537 static struct track_data *track_data_alloc(unsigned int key_len, 538 struct action_data *action_data, 539 struct hist_trigger_data *hist_data) 540 { 541 struct track_data *data = kzalloc(sizeof(*data), GFP_KERNEL); 542 struct hist_elt_data *elt_data; 543 544 if (!data) 545 return ERR_PTR(-ENOMEM); 546 547 data->key = kzalloc(key_len, GFP_KERNEL); 548 if (!data->key) { 549 track_data_free(data); 550 return ERR_PTR(-ENOMEM); 551 } 552 553 data->key_len = key_len; 554 data->action_data = action_data; 555 data->hist_data = hist_data; 556 557 elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL); 558 if (!elt_data) { 559 track_data_free(data); 560 return ERR_PTR(-ENOMEM); 561 } 562 563 data->elt.private_data = elt_data; 564 565 elt_data->comm = kzalloc(TASK_COMM_LEN, GFP_KERNEL); 566 if (!elt_data->comm) { 567 track_data_free(data); 568 return ERR_PTR(-ENOMEM); 569 } 570 571 return data; 572 } 573 574 static char last_cmd[MAX_FILTER_STR_VAL]; 575 static char last_cmd_loc[MAX_FILTER_STR_VAL]; 576 577 static int errpos(char *str) 578 { 579 return err_pos(last_cmd, str); 580 } 581 582 static void last_cmd_set(struct trace_event_file *file, char *str) 583 { 584 const char *system = NULL, *name = NULL; 585 struct trace_event_call *call; 586 587 if (!str) 588 return; 589 590 strcpy(last_cmd, "hist:"); 591 strncat(last_cmd, str, MAX_FILTER_STR_VAL - 1 - sizeof("hist:")); 592 593 if (file) { 594 call = file->event_call; 595 system = call->class->system; 596 if (system) { 597 name = trace_event_name(call); 598 if (!name) 599 system = NULL; 600 } 601 } 602 603 if (system) 604 snprintf(last_cmd_loc, MAX_FILTER_STR_VAL, "hist:%s:%s", system, name); 605 } 606 607 static void hist_err(struct trace_array *tr, u8 err_type, u8 err_pos) 608 { 609 tracing_log_err(tr, last_cmd_loc, last_cmd, err_text, 610 err_type, err_pos); 611 } 612 613 static void hist_err_clear(void) 614 { 615 last_cmd[0] = '\0'; 616 last_cmd_loc[0] = '\0'; 617 } 618 619 typedef void (*synth_probe_func_t) (void *__data, u64 *var_ref_vals, 620 unsigned int *var_ref_idx); 621 622 static inline void trace_synth(struct synth_event *event, u64 *var_ref_vals, 623 unsigned int *var_ref_idx) 624 { 625 struct tracepoint *tp = event->tp; 626 627 if (unlikely(atomic_read(&tp->key.enabled) > 0)) { 628 struct tracepoint_func *probe_func_ptr; 629 synth_probe_func_t probe_func; 630 void *__data; 631 632 if (!(cpu_online(raw_smp_processor_id()))) 633 return; 634 635 probe_func_ptr = rcu_dereference_sched((tp)->funcs); 636 if (probe_func_ptr) { 637 do { 638 probe_func = probe_func_ptr->func; 639 __data = probe_func_ptr->data; 640 probe_func(__data, var_ref_vals, var_ref_idx); 641 } while ((++probe_func_ptr)->func); 642 } 643 } 644 } 645 646 static void action_trace(struct hist_trigger_data *hist_data, 647 struct tracing_map_elt *elt, 648 struct trace_buffer *buffer, void *rec, 649 struct ring_buffer_event *rbe, void *key, 650 struct action_data *data, u64 *var_ref_vals) 651 { 652 struct synth_event *event = data->synth_event; 653 654 trace_synth(event, var_ref_vals, data->var_ref_idx); 655 } 656 657 struct hist_var_data { 658 struct list_head list; 659 struct hist_trigger_data *hist_data; 660 }; 661 662 static u64 hist_field_timestamp(struct hist_field *hist_field, 663 struct tracing_map_elt *elt, 664 struct trace_buffer *buffer, 665 struct ring_buffer_event *rbe, 666 void *event) 667 { 668 struct hist_trigger_data *hist_data = hist_field->hist_data; 669 struct trace_array *tr = hist_data->event_file->tr; 670 671 u64 ts = ring_buffer_event_time_stamp(buffer, rbe); 672 673 if (hist_data->attrs->ts_in_usecs && trace_clock_in_ns(tr)) 674 ts = ns2usecs(ts); 675 676 return ts; 677 } 678 679 static u64 hist_field_cpu(struct hist_field *hist_field, 680 struct tracing_map_elt *elt, 681 struct trace_buffer *buffer, 682 struct ring_buffer_event *rbe, 683 void *event) 684 { 685 int cpu = smp_processor_id(); 686 687 return cpu; 688 } 689 690 /** 691 * check_field_for_var_ref - Check if a VAR_REF field references a variable 692 * @hist_field: The VAR_REF field to check 693 * @var_data: The hist trigger that owns the variable 694 * @var_idx: The trigger variable identifier 695 * 696 * Check the given VAR_REF field to see whether or not it references 697 * the given variable associated with the given trigger. 698 * 699 * Return: The VAR_REF field if it does reference the variable, NULL if not 700 */ 701 static struct hist_field * 702 check_field_for_var_ref(struct hist_field *hist_field, 703 struct hist_trigger_data *var_data, 704 unsigned int var_idx) 705 { 706 WARN_ON(!(hist_field && hist_field->flags & HIST_FIELD_FL_VAR_REF)); 707 708 if (hist_field && hist_field->var.idx == var_idx && 709 hist_field->var.hist_data == var_data) 710 return hist_field; 711 712 return NULL; 713 } 714 715 /** 716 * find_var_ref - Check if a trigger has a reference to a trigger variable 717 * @hist_data: The hist trigger that might have a reference to the variable 718 * @var_data: The hist trigger that owns the variable 719 * @var_idx: The trigger variable identifier 720 * 721 * Check the list of var_refs[] on the first hist trigger to see 722 * whether any of them are references to the variable on the second 723 * trigger. 724 * 725 * Return: The VAR_REF field referencing the variable if so, NULL if not 726 */ 727 static struct hist_field *find_var_ref(struct hist_trigger_data *hist_data, 728 struct hist_trigger_data *var_data, 729 unsigned int var_idx) 730 { 731 struct hist_field *hist_field; 732 unsigned int i; 733 734 for (i = 0; i < hist_data->n_var_refs; i++) { 735 hist_field = hist_data->var_refs[i]; 736 if (check_field_for_var_ref(hist_field, var_data, var_idx)) 737 return hist_field; 738 } 739 740 return NULL; 741 } 742 743 /** 744 * find_any_var_ref - Check if there is a reference to a given trigger variable 745 * @hist_data: The hist trigger 746 * @var_idx: The trigger variable identifier 747 * 748 * Check to see whether the given variable is currently referenced by 749 * any other trigger. 750 * 751 * The trigger the variable is defined on is explicitly excluded - the 752 * assumption being that a self-reference doesn't prevent a trigger 753 * from being removed. 754 * 755 * Return: The VAR_REF field referencing the variable if so, NULL if not 756 */ 757 static struct hist_field *find_any_var_ref(struct hist_trigger_data *hist_data, 758 unsigned int var_idx) 759 { 760 struct trace_array *tr = hist_data->event_file->tr; 761 struct hist_field *found = NULL; 762 struct hist_var_data *var_data; 763 764 list_for_each_entry(var_data, &tr->hist_vars, list) { 765 if (var_data->hist_data == hist_data) 766 continue; 767 found = find_var_ref(var_data->hist_data, hist_data, var_idx); 768 if (found) 769 break; 770 } 771 772 return found; 773 } 774 775 /** 776 * check_var_refs - Check if there is a reference to any of trigger's variables 777 * @hist_data: The hist trigger 778 * 779 * A trigger can define one or more variables. If any one of them is 780 * currently referenced by any other trigger, this function will 781 * determine that. 782 783 * Typically used to determine whether or not a trigger can be removed 784 * - if there are any references to a trigger's variables, it cannot. 785 * 786 * Return: True if there is a reference to any of trigger's variables 787 */ 788 static bool check_var_refs(struct hist_trigger_data *hist_data) 789 { 790 struct hist_field *field; 791 bool found = false; 792 int i; 793 794 for_each_hist_field(i, hist_data) { 795 field = hist_data->fields[i]; 796 if (field && field->flags & HIST_FIELD_FL_VAR) { 797 if (find_any_var_ref(hist_data, field->var.idx)) { 798 found = true; 799 break; 800 } 801 } 802 } 803 804 return found; 805 } 806 807 static struct hist_var_data *find_hist_vars(struct hist_trigger_data *hist_data) 808 { 809 struct trace_array *tr = hist_data->event_file->tr; 810 struct hist_var_data *var_data, *found = NULL; 811 812 list_for_each_entry(var_data, &tr->hist_vars, list) { 813 if (var_data->hist_data == hist_data) { 814 found = var_data; 815 break; 816 } 817 } 818 819 return found; 820 } 821 822 static bool field_has_hist_vars(struct hist_field *hist_field, 823 unsigned int level) 824 { 825 int i; 826 827 if (level > 3) 828 return false; 829 830 if (!hist_field) 831 return false; 832 833 if (hist_field->flags & HIST_FIELD_FL_VAR || 834 hist_field->flags & HIST_FIELD_FL_VAR_REF) 835 return true; 836 837 for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++) { 838 struct hist_field *operand; 839 840 operand = hist_field->operands[i]; 841 if (field_has_hist_vars(operand, level + 1)) 842 return true; 843 } 844 845 return false; 846 } 847 848 static bool has_hist_vars(struct hist_trigger_data *hist_data) 849 { 850 struct hist_field *hist_field; 851 int i; 852 853 for_each_hist_field(i, hist_data) { 854 hist_field = hist_data->fields[i]; 855 if (field_has_hist_vars(hist_field, 0)) 856 return true; 857 } 858 859 return false; 860 } 861 862 static int save_hist_vars(struct hist_trigger_data *hist_data) 863 { 864 struct trace_array *tr = hist_data->event_file->tr; 865 struct hist_var_data *var_data; 866 867 var_data = find_hist_vars(hist_data); 868 if (var_data) 869 return 0; 870 871 if (tracing_check_open_get_tr(tr)) 872 return -ENODEV; 873 874 var_data = kzalloc(sizeof(*var_data), GFP_KERNEL); 875 if (!var_data) { 876 trace_array_put(tr); 877 return -ENOMEM; 878 } 879 880 var_data->hist_data = hist_data; 881 list_add(&var_data->list, &tr->hist_vars); 882 883 return 0; 884 } 885 886 static void remove_hist_vars(struct hist_trigger_data *hist_data) 887 { 888 struct trace_array *tr = hist_data->event_file->tr; 889 struct hist_var_data *var_data; 890 891 var_data = find_hist_vars(hist_data); 892 if (!var_data) 893 return; 894 895 if (WARN_ON(check_var_refs(hist_data))) 896 return; 897 898 list_del(&var_data->list); 899 900 kfree(var_data); 901 902 trace_array_put(tr); 903 } 904 905 static struct hist_field *find_var_field(struct hist_trigger_data *hist_data, 906 const char *var_name) 907 { 908 struct hist_field *hist_field, *found = NULL; 909 int i; 910 911 for_each_hist_field(i, hist_data) { 912 hist_field = hist_data->fields[i]; 913 if (hist_field && hist_field->flags & HIST_FIELD_FL_VAR && 914 strcmp(hist_field->var.name, var_name) == 0) { 915 found = hist_field; 916 break; 917 } 918 } 919 920 return found; 921 } 922 923 static struct hist_field *find_var(struct hist_trigger_data *hist_data, 924 struct trace_event_file *file, 925 const char *var_name) 926 { 927 struct hist_trigger_data *test_data; 928 struct event_trigger_data *test; 929 struct hist_field *hist_field; 930 931 lockdep_assert_held(&event_mutex); 932 933 hist_field = find_var_field(hist_data, var_name); 934 if (hist_field) 935 return hist_field; 936 937 list_for_each_entry(test, &file->triggers, list) { 938 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 939 test_data = test->private_data; 940 hist_field = find_var_field(test_data, var_name); 941 if (hist_field) 942 return hist_field; 943 } 944 } 945 946 return NULL; 947 } 948 949 static struct trace_event_file *find_var_file(struct trace_array *tr, 950 char *system, 951 char *event_name, 952 char *var_name) 953 { 954 struct hist_trigger_data *var_hist_data; 955 struct hist_var_data *var_data; 956 struct trace_event_file *file, *found = NULL; 957 958 if (system) 959 return find_event_file(tr, system, event_name); 960 961 list_for_each_entry(var_data, &tr->hist_vars, list) { 962 var_hist_data = var_data->hist_data; 963 file = var_hist_data->event_file; 964 if (file == found) 965 continue; 966 967 if (find_var_field(var_hist_data, var_name)) { 968 if (found) { 969 hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE, errpos(var_name)); 970 return NULL; 971 } 972 973 found = file; 974 } 975 } 976 977 return found; 978 } 979 980 static struct hist_field *find_file_var(struct trace_event_file *file, 981 const char *var_name) 982 { 983 struct hist_trigger_data *test_data; 984 struct event_trigger_data *test; 985 struct hist_field *hist_field; 986 987 lockdep_assert_held(&event_mutex); 988 989 list_for_each_entry(test, &file->triggers, list) { 990 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 991 test_data = test->private_data; 992 hist_field = find_var_field(test_data, var_name); 993 if (hist_field) 994 return hist_field; 995 } 996 } 997 998 return NULL; 999 } 1000 1001 static struct hist_field * 1002 find_match_var(struct hist_trigger_data *hist_data, char *var_name) 1003 { 1004 struct trace_array *tr = hist_data->event_file->tr; 1005 struct hist_field *hist_field, *found = NULL; 1006 struct trace_event_file *file; 1007 unsigned int i; 1008 1009 for (i = 0; i < hist_data->n_actions; i++) { 1010 struct action_data *data = hist_data->actions[i]; 1011 1012 if (data->handler == HANDLER_ONMATCH) { 1013 char *system = data->match_data.event_system; 1014 char *event_name = data->match_data.event; 1015 1016 file = find_var_file(tr, system, event_name, var_name); 1017 if (!file) 1018 continue; 1019 hist_field = find_file_var(file, var_name); 1020 if (hist_field) { 1021 if (found) { 1022 hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE, 1023 errpos(var_name)); 1024 return ERR_PTR(-EINVAL); 1025 } 1026 1027 found = hist_field; 1028 } 1029 } 1030 } 1031 return found; 1032 } 1033 1034 static struct hist_field *find_event_var(struct hist_trigger_data *hist_data, 1035 char *system, 1036 char *event_name, 1037 char *var_name) 1038 { 1039 struct trace_array *tr = hist_data->event_file->tr; 1040 struct hist_field *hist_field = NULL; 1041 struct trace_event_file *file; 1042 1043 if (!system || !event_name) { 1044 hist_field = find_match_var(hist_data, var_name); 1045 if (IS_ERR(hist_field)) 1046 return NULL; 1047 if (hist_field) 1048 return hist_field; 1049 } 1050 1051 file = find_var_file(tr, system, event_name, var_name); 1052 if (!file) 1053 return NULL; 1054 1055 hist_field = find_file_var(file, var_name); 1056 1057 return hist_field; 1058 } 1059 1060 static u64 hist_field_var_ref(struct hist_field *hist_field, 1061 struct tracing_map_elt *elt, 1062 struct trace_buffer *buffer, 1063 struct ring_buffer_event *rbe, 1064 void *event) 1065 { 1066 struct hist_elt_data *elt_data; 1067 u64 var_val = 0; 1068 1069 if (WARN_ON_ONCE(!elt)) 1070 return var_val; 1071 1072 elt_data = elt->private_data; 1073 var_val = elt_data->var_ref_vals[hist_field->var_ref_idx]; 1074 1075 return var_val; 1076 } 1077 1078 static bool resolve_var_refs(struct hist_trigger_data *hist_data, void *key, 1079 u64 *var_ref_vals, bool self) 1080 { 1081 struct hist_trigger_data *var_data; 1082 struct tracing_map_elt *var_elt; 1083 struct hist_field *hist_field; 1084 unsigned int i, var_idx; 1085 bool resolved = true; 1086 u64 var_val = 0; 1087 1088 for (i = 0; i < hist_data->n_var_refs; i++) { 1089 hist_field = hist_data->var_refs[i]; 1090 var_idx = hist_field->var.idx; 1091 var_data = hist_field->var.hist_data; 1092 1093 if (var_data == NULL) { 1094 resolved = false; 1095 break; 1096 } 1097 1098 if ((self && var_data != hist_data) || 1099 (!self && var_data == hist_data)) 1100 continue; 1101 1102 var_elt = tracing_map_lookup(var_data->map, key); 1103 if (!var_elt) { 1104 resolved = false; 1105 break; 1106 } 1107 1108 if (!tracing_map_var_set(var_elt, var_idx)) { 1109 resolved = false; 1110 break; 1111 } 1112 1113 if (self || !hist_field->read_once) 1114 var_val = tracing_map_read_var(var_elt, var_idx); 1115 else 1116 var_val = tracing_map_read_var_once(var_elt, var_idx); 1117 1118 var_ref_vals[i] = var_val; 1119 } 1120 1121 return resolved; 1122 } 1123 1124 static const char *hist_field_name(struct hist_field *field, 1125 unsigned int level) 1126 { 1127 const char *field_name = ""; 1128 1129 if (level > 1) 1130 return field_name; 1131 1132 if (field->field) 1133 field_name = field->field->name; 1134 else if (field->flags & HIST_FIELD_FL_LOG2 || 1135 field->flags & HIST_FIELD_FL_ALIAS || 1136 field->flags & HIST_FIELD_FL_BUCKET) 1137 field_name = hist_field_name(field->operands[0], ++level); 1138 else if (field->flags & HIST_FIELD_FL_CPU) 1139 field_name = "common_cpu"; 1140 else if (field->flags & HIST_FIELD_FL_EXPR || 1141 field->flags & HIST_FIELD_FL_VAR_REF) { 1142 if (field->system) { 1143 static char full_name[MAX_FILTER_STR_VAL]; 1144 1145 strcat(full_name, field->system); 1146 strcat(full_name, "."); 1147 strcat(full_name, field->event_name); 1148 strcat(full_name, "."); 1149 strcat(full_name, field->name); 1150 field_name = full_name; 1151 } else 1152 field_name = field->name; 1153 } else if (field->flags & HIST_FIELD_FL_TIMESTAMP) 1154 field_name = "common_timestamp"; 1155 1156 if (field_name == NULL) 1157 field_name = ""; 1158 1159 return field_name; 1160 } 1161 1162 static hist_field_fn_t select_value_fn(int field_size, int field_is_signed) 1163 { 1164 hist_field_fn_t fn = NULL; 1165 1166 switch (field_size) { 1167 case 8: 1168 if (field_is_signed) 1169 fn = hist_field_s64; 1170 else 1171 fn = hist_field_u64; 1172 break; 1173 case 4: 1174 if (field_is_signed) 1175 fn = hist_field_s32; 1176 else 1177 fn = hist_field_u32; 1178 break; 1179 case 2: 1180 if (field_is_signed) 1181 fn = hist_field_s16; 1182 else 1183 fn = hist_field_u16; 1184 break; 1185 case 1: 1186 if (field_is_signed) 1187 fn = hist_field_s8; 1188 else 1189 fn = hist_field_u8; 1190 break; 1191 } 1192 1193 return fn; 1194 } 1195 1196 static int parse_map_size(char *str) 1197 { 1198 unsigned long size, map_bits; 1199 int ret; 1200 1201 ret = kstrtoul(str, 0, &size); 1202 if (ret) 1203 goto out; 1204 1205 map_bits = ilog2(roundup_pow_of_two(size)); 1206 if (map_bits < TRACING_MAP_BITS_MIN || 1207 map_bits > TRACING_MAP_BITS_MAX) 1208 ret = -EINVAL; 1209 else 1210 ret = map_bits; 1211 out: 1212 return ret; 1213 } 1214 1215 static void destroy_hist_trigger_attrs(struct hist_trigger_attrs *attrs) 1216 { 1217 unsigned int i; 1218 1219 if (!attrs) 1220 return; 1221 1222 for (i = 0; i < attrs->n_assignments; i++) 1223 kfree(attrs->assignment_str[i]); 1224 1225 for (i = 0; i < attrs->n_actions; i++) 1226 kfree(attrs->action_str[i]); 1227 1228 kfree(attrs->name); 1229 kfree(attrs->sort_key_str); 1230 kfree(attrs->keys_str); 1231 kfree(attrs->vals_str); 1232 kfree(attrs->clock); 1233 kfree(attrs); 1234 } 1235 1236 static int parse_action(char *str, struct hist_trigger_attrs *attrs) 1237 { 1238 int ret = -EINVAL; 1239 1240 if (attrs->n_actions >= HIST_ACTIONS_MAX) 1241 return ret; 1242 1243 if ((str_has_prefix(str, "onmatch(")) || 1244 (str_has_prefix(str, "onmax(")) || 1245 (str_has_prefix(str, "onchange("))) { 1246 attrs->action_str[attrs->n_actions] = kstrdup(str, GFP_KERNEL); 1247 if (!attrs->action_str[attrs->n_actions]) { 1248 ret = -ENOMEM; 1249 return ret; 1250 } 1251 attrs->n_actions++; 1252 ret = 0; 1253 } 1254 return ret; 1255 } 1256 1257 static int parse_assignment(struct trace_array *tr, 1258 char *str, struct hist_trigger_attrs *attrs) 1259 { 1260 int len, ret = 0; 1261 1262 if ((len = str_has_prefix(str, "key=")) || 1263 (len = str_has_prefix(str, "keys="))) { 1264 attrs->keys_str = kstrdup(str + len, GFP_KERNEL); 1265 if (!attrs->keys_str) { 1266 ret = -ENOMEM; 1267 goto out; 1268 } 1269 } else if ((len = str_has_prefix(str, "val=")) || 1270 (len = str_has_prefix(str, "vals=")) || 1271 (len = str_has_prefix(str, "values="))) { 1272 attrs->vals_str = kstrdup(str + len, GFP_KERNEL); 1273 if (!attrs->vals_str) { 1274 ret = -ENOMEM; 1275 goto out; 1276 } 1277 } else if ((len = str_has_prefix(str, "sort="))) { 1278 attrs->sort_key_str = kstrdup(str + len, GFP_KERNEL); 1279 if (!attrs->sort_key_str) { 1280 ret = -ENOMEM; 1281 goto out; 1282 } 1283 } else if (str_has_prefix(str, "name=")) { 1284 attrs->name = kstrdup(str, GFP_KERNEL); 1285 if (!attrs->name) { 1286 ret = -ENOMEM; 1287 goto out; 1288 } 1289 } else if ((len = str_has_prefix(str, "clock="))) { 1290 str += len; 1291 1292 str = strstrip(str); 1293 attrs->clock = kstrdup(str, GFP_KERNEL); 1294 if (!attrs->clock) { 1295 ret = -ENOMEM; 1296 goto out; 1297 } 1298 } else if ((len = str_has_prefix(str, "size="))) { 1299 int map_bits = parse_map_size(str + len); 1300 1301 if (map_bits < 0) { 1302 ret = map_bits; 1303 goto out; 1304 } 1305 attrs->map_bits = map_bits; 1306 } else { 1307 char *assignment; 1308 1309 if (attrs->n_assignments == TRACING_MAP_VARS_MAX) { 1310 hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(str)); 1311 ret = -EINVAL; 1312 goto out; 1313 } 1314 1315 assignment = kstrdup(str, GFP_KERNEL); 1316 if (!assignment) { 1317 ret = -ENOMEM; 1318 goto out; 1319 } 1320 1321 attrs->assignment_str[attrs->n_assignments++] = assignment; 1322 } 1323 out: 1324 return ret; 1325 } 1326 1327 static struct hist_trigger_attrs * 1328 parse_hist_trigger_attrs(struct trace_array *tr, char *trigger_str) 1329 { 1330 struct hist_trigger_attrs *attrs; 1331 int ret = 0; 1332 1333 attrs = kzalloc(sizeof(*attrs), GFP_KERNEL); 1334 if (!attrs) 1335 return ERR_PTR(-ENOMEM); 1336 1337 while (trigger_str) { 1338 char *str = strsep(&trigger_str, ":"); 1339 char *rhs; 1340 1341 rhs = strchr(str, '='); 1342 if (rhs) { 1343 if (!strlen(++rhs)) { 1344 ret = -EINVAL; 1345 hist_err(tr, HIST_ERR_EMPTY_ASSIGNMENT, errpos(str)); 1346 goto free; 1347 } 1348 ret = parse_assignment(tr, str, attrs); 1349 if (ret) 1350 goto free; 1351 } else if (strcmp(str, "pause") == 0) 1352 attrs->pause = true; 1353 else if ((strcmp(str, "cont") == 0) || 1354 (strcmp(str, "continue") == 0)) 1355 attrs->cont = true; 1356 else if (strcmp(str, "clear") == 0) 1357 attrs->clear = true; 1358 else { 1359 ret = parse_action(str, attrs); 1360 if (ret) 1361 goto free; 1362 } 1363 } 1364 1365 if (!attrs->keys_str) { 1366 ret = -EINVAL; 1367 goto free; 1368 } 1369 1370 if (!attrs->clock) { 1371 attrs->clock = kstrdup("global", GFP_KERNEL); 1372 if (!attrs->clock) { 1373 ret = -ENOMEM; 1374 goto free; 1375 } 1376 } 1377 1378 return attrs; 1379 free: 1380 destroy_hist_trigger_attrs(attrs); 1381 1382 return ERR_PTR(ret); 1383 } 1384 1385 static inline void save_comm(char *comm, struct task_struct *task) 1386 { 1387 if (!task->pid) { 1388 strcpy(comm, "<idle>"); 1389 return; 1390 } 1391 1392 if (WARN_ON_ONCE(task->pid < 0)) { 1393 strcpy(comm, "<XXX>"); 1394 return; 1395 } 1396 1397 strncpy(comm, task->comm, TASK_COMM_LEN); 1398 } 1399 1400 static void hist_elt_data_free(struct hist_elt_data *elt_data) 1401 { 1402 unsigned int i; 1403 1404 for (i = 0; i < SYNTH_FIELDS_MAX; i++) 1405 kfree(elt_data->field_var_str[i]); 1406 1407 kfree(elt_data->comm); 1408 kfree(elt_data); 1409 } 1410 1411 static void hist_trigger_elt_data_free(struct tracing_map_elt *elt) 1412 { 1413 struct hist_elt_data *elt_data = elt->private_data; 1414 1415 hist_elt_data_free(elt_data); 1416 } 1417 1418 static int hist_trigger_elt_data_alloc(struct tracing_map_elt *elt) 1419 { 1420 struct hist_trigger_data *hist_data = elt->map->private_data; 1421 unsigned int size = TASK_COMM_LEN; 1422 struct hist_elt_data *elt_data; 1423 struct hist_field *hist_field; 1424 unsigned int i, n_str; 1425 1426 elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL); 1427 if (!elt_data) 1428 return -ENOMEM; 1429 1430 for_each_hist_field(i, hist_data) { 1431 hist_field = hist_data->fields[i]; 1432 1433 if (hist_field->flags & HIST_FIELD_FL_EXECNAME) { 1434 elt_data->comm = kzalloc(size, GFP_KERNEL); 1435 if (!elt_data->comm) { 1436 kfree(elt_data); 1437 return -ENOMEM; 1438 } 1439 break; 1440 } 1441 } 1442 1443 n_str = hist_data->n_field_var_str + hist_data->n_save_var_str + 1444 hist_data->n_var_str; 1445 if (n_str > SYNTH_FIELDS_MAX) { 1446 hist_elt_data_free(elt_data); 1447 return -EINVAL; 1448 } 1449 1450 BUILD_BUG_ON(STR_VAR_LEN_MAX & (sizeof(u64) - 1)); 1451 1452 size = STR_VAR_LEN_MAX; 1453 1454 for (i = 0; i < n_str; i++) { 1455 elt_data->field_var_str[i] = kzalloc(size, GFP_KERNEL); 1456 if (!elt_data->field_var_str[i]) { 1457 hist_elt_data_free(elt_data); 1458 return -ENOMEM; 1459 } 1460 } 1461 1462 elt->private_data = elt_data; 1463 1464 return 0; 1465 } 1466 1467 static void hist_trigger_elt_data_init(struct tracing_map_elt *elt) 1468 { 1469 struct hist_elt_data *elt_data = elt->private_data; 1470 1471 if (elt_data->comm) 1472 save_comm(elt_data->comm, current); 1473 } 1474 1475 static const struct tracing_map_ops hist_trigger_elt_data_ops = { 1476 .elt_alloc = hist_trigger_elt_data_alloc, 1477 .elt_free = hist_trigger_elt_data_free, 1478 .elt_init = hist_trigger_elt_data_init, 1479 }; 1480 1481 static const char *get_hist_field_flags(struct hist_field *hist_field) 1482 { 1483 const char *flags_str = NULL; 1484 1485 if (hist_field->flags & HIST_FIELD_FL_HEX) 1486 flags_str = "hex"; 1487 else if (hist_field->flags & HIST_FIELD_FL_SYM) 1488 flags_str = "sym"; 1489 else if (hist_field->flags & HIST_FIELD_FL_SYM_OFFSET) 1490 flags_str = "sym-offset"; 1491 else if (hist_field->flags & HIST_FIELD_FL_EXECNAME) 1492 flags_str = "execname"; 1493 else if (hist_field->flags & HIST_FIELD_FL_SYSCALL) 1494 flags_str = "syscall"; 1495 else if (hist_field->flags & HIST_FIELD_FL_LOG2) 1496 flags_str = "log2"; 1497 else if (hist_field->flags & HIST_FIELD_FL_BUCKET) 1498 flags_str = "buckets"; 1499 else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP_USECS) 1500 flags_str = "usecs"; 1501 1502 return flags_str; 1503 } 1504 1505 static void expr_field_str(struct hist_field *field, char *expr) 1506 { 1507 if (field->flags & HIST_FIELD_FL_VAR_REF) 1508 strcat(expr, "$"); 1509 1510 strcat(expr, hist_field_name(field, 0)); 1511 1512 if (field->flags && !(field->flags & HIST_FIELD_FL_VAR_REF)) { 1513 const char *flags_str = get_hist_field_flags(field); 1514 1515 if (flags_str) { 1516 strcat(expr, "."); 1517 strcat(expr, flags_str); 1518 } 1519 } 1520 } 1521 1522 static char *expr_str(struct hist_field *field, unsigned int level) 1523 { 1524 char *expr; 1525 1526 if (level > 1) 1527 return NULL; 1528 1529 expr = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL); 1530 if (!expr) 1531 return NULL; 1532 1533 if (!field->operands[0]) { 1534 expr_field_str(field, expr); 1535 return expr; 1536 } 1537 1538 if (field->operator == FIELD_OP_UNARY_MINUS) { 1539 char *subexpr; 1540 1541 strcat(expr, "-("); 1542 subexpr = expr_str(field->operands[0], ++level); 1543 if (!subexpr) { 1544 kfree(expr); 1545 return NULL; 1546 } 1547 strcat(expr, subexpr); 1548 strcat(expr, ")"); 1549 1550 kfree(subexpr); 1551 1552 return expr; 1553 } 1554 1555 expr_field_str(field->operands[0], expr); 1556 1557 switch (field->operator) { 1558 case FIELD_OP_MINUS: 1559 strcat(expr, "-"); 1560 break; 1561 case FIELD_OP_PLUS: 1562 strcat(expr, "+"); 1563 break; 1564 default: 1565 kfree(expr); 1566 return NULL; 1567 } 1568 1569 expr_field_str(field->operands[1], expr); 1570 1571 return expr; 1572 } 1573 1574 static int contains_operator(char *str) 1575 { 1576 enum field_op_id field_op = FIELD_OP_NONE; 1577 char *op; 1578 1579 op = strpbrk(str, "+-"); 1580 if (!op) 1581 return FIELD_OP_NONE; 1582 1583 switch (*op) { 1584 case '-': 1585 /* 1586 * Unfortunately, the modifier ".sym-offset" 1587 * can confuse things. 1588 */ 1589 if (op - str >= 4 && !strncmp(op - 4, ".sym-offset", 11)) 1590 return FIELD_OP_NONE; 1591 1592 if (*str == '-') 1593 field_op = FIELD_OP_UNARY_MINUS; 1594 else 1595 field_op = FIELD_OP_MINUS; 1596 break; 1597 case '+': 1598 field_op = FIELD_OP_PLUS; 1599 break; 1600 default: 1601 break; 1602 } 1603 1604 return field_op; 1605 } 1606 1607 static void get_hist_field(struct hist_field *hist_field) 1608 { 1609 hist_field->ref++; 1610 } 1611 1612 static void __destroy_hist_field(struct hist_field *hist_field) 1613 { 1614 if (--hist_field->ref > 1) 1615 return; 1616 1617 kfree(hist_field->var.name); 1618 kfree(hist_field->name); 1619 1620 /* Can likely be a const */ 1621 kfree_const(hist_field->type); 1622 1623 kfree(hist_field->system); 1624 kfree(hist_field->event_name); 1625 1626 kfree(hist_field); 1627 } 1628 1629 static void destroy_hist_field(struct hist_field *hist_field, 1630 unsigned int level) 1631 { 1632 unsigned int i; 1633 1634 if (level > 3) 1635 return; 1636 1637 if (!hist_field) 1638 return; 1639 1640 if (hist_field->flags & HIST_FIELD_FL_VAR_REF) 1641 return; /* var refs will be destroyed separately */ 1642 1643 for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++) 1644 destroy_hist_field(hist_field->operands[i], level + 1); 1645 1646 __destroy_hist_field(hist_field); 1647 } 1648 1649 static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data, 1650 struct ftrace_event_field *field, 1651 unsigned long flags, 1652 char *var_name) 1653 { 1654 struct hist_field *hist_field; 1655 1656 if (field && is_function_field(field)) 1657 return NULL; 1658 1659 hist_field = kzalloc(sizeof(struct hist_field), GFP_KERNEL); 1660 if (!hist_field) 1661 return NULL; 1662 1663 hist_field->ref = 1; 1664 1665 hist_field->hist_data = hist_data; 1666 1667 if (flags & HIST_FIELD_FL_EXPR || flags & HIST_FIELD_FL_ALIAS) 1668 goto out; /* caller will populate */ 1669 1670 if (flags & HIST_FIELD_FL_VAR_REF) { 1671 hist_field->fn = hist_field_var_ref; 1672 goto out; 1673 } 1674 1675 if (flags & HIST_FIELD_FL_HITCOUNT) { 1676 hist_field->fn = hist_field_counter; 1677 hist_field->size = sizeof(u64); 1678 hist_field->type = "u64"; 1679 goto out; 1680 } 1681 1682 if (flags & HIST_FIELD_FL_STACKTRACE) { 1683 hist_field->fn = hist_field_none; 1684 goto out; 1685 } 1686 1687 if (flags & (HIST_FIELD_FL_LOG2 | HIST_FIELD_FL_BUCKET)) { 1688 unsigned long fl = flags & ~(HIST_FIELD_FL_LOG2 | HIST_FIELD_FL_BUCKET); 1689 hist_field->fn = flags & HIST_FIELD_FL_LOG2 ? hist_field_log2 : 1690 hist_field_bucket; 1691 hist_field->operands[0] = create_hist_field(hist_data, field, fl, NULL); 1692 hist_field->size = hist_field->operands[0]->size; 1693 hist_field->type = kstrdup_const(hist_field->operands[0]->type, GFP_KERNEL); 1694 if (!hist_field->type) 1695 goto free; 1696 goto out; 1697 } 1698 1699 if (flags & HIST_FIELD_FL_TIMESTAMP) { 1700 hist_field->fn = hist_field_timestamp; 1701 hist_field->size = sizeof(u64); 1702 hist_field->type = "u64"; 1703 goto out; 1704 } 1705 1706 if (flags & HIST_FIELD_FL_CPU) { 1707 hist_field->fn = hist_field_cpu; 1708 hist_field->size = sizeof(int); 1709 hist_field->type = "unsigned int"; 1710 goto out; 1711 } 1712 1713 if (WARN_ON_ONCE(!field)) 1714 goto out; 1715 1716 /* Pointers to strings are just pointers and dangerous to dereference */ 1717 if (is_string_field(field) && 1718 (field->filter_type != FILTER_PTR_STRING)) { 1719 flags |= HIST_FIELD_FL_STRING; 1720 1721 hist_field->size = MAX_FILTER_STR_VAL; 1722 hist_field->type = kstrdup_const(field->type, GFP_KERNEL); 1723 if (!hist_field->type) 1724 goto free; 1725 1726 if (field->filter_type == FILTER_STATIC_STRING) 1727 hist_field->fn = hist_field_string; 1728 else if (field->filter_type == FILTER_DYN_STRING) 1729 hist_field->fn = hist_field_dynstring; 1730 else 1731 hist_field->fn = hist_field_pstring; 1732 } else { 1733 hist_field->size = field->size; 1734 hist_field->is_signed = field->is_signed; 1735 hist_field->type = kstrdup_const(field->type, GFP_KERNEL); 1736 if (!hist_field->type) 1737 goto free; 1738 1739 hist_field->fn = select_value_fn(field->size, 1740 field->is_signed); 1741 if (!hist_field->fn) { 1742 destroy_hist_field(hist_field, 0); 1743 return NULL; 1744 } 1745 } 1746 out: 1747 hist_field->field = field; 1748 hist_field->flags = flags; 1749 1750 if (var_name) { 1751 hist_field->var.name = kstrdup(var_name, GFP_KERNEL); 1752 if (!hist_field->var.name) 1753 goto free; 1754 } 1755 1756 return hist_field; 1757 free: 1758 destroy_hist_field(hist_field, 0); 1759 return NULL; 1760 } 1761 1762 static void destroy_hist_fields(struct hist_trigger_data *hist_data) 1763 { 1764 unsigned int i; 1765 1766 for (i = 0; i < HIST_FIELDS_MAX; i++) { 1767 if (hist_data->fields[i]) { 1768 destroy_hist_field(hist_data->fields[i], 0); 1769 hist_data->fields[i] = NULL; 1770 } 1771 } 1772 1773 for (i = 0; i < hist_data->n_var_refs; i++) { 1774 WARN_ON(!(hist_data->var_refs[i]->flags & HIST_FIELD_FL_VAR_REF)); 1775 __destroy_hist_field(hist_data->var_refs[i]); 1776 hist_data->var_refs[i] = NULL; 1777 } 1778 } 1779 1780 static int init_var_ref(struct hist_field *ref_field, 1781 struct hist_field *var_field, 1782 char *system, char *event_name) 1783 { 1784 int err = 0; 1785 1786 ref_field->var.idx = var_field->var.idx; 1787 ref_field->var.hist_data = var_field->hist_data; 1788 ref_field->size = var_field->size; 1789 ref_field->is_signed = var_field->is_signed; 1790 ref_field->flags |= var_field->flags & 1791 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS); 1792 1793 if (system) { 1794 ref_field->system = kstrdup(system, GFP_KERNEL); 1795 if (!ref_field->system) 1796 return -ENOMEM; 1797 } 1798 1799 if (event_name) { 1800 ref_field->event_name = kstrdup(event_name, GFP_KERNEL); 1801 if (!ref_field->event_name) { 1802 err = -ENOMEM; 1803 goto free; 1804 } 1805 } 1806 1807 if (var_field->var.name) { 1808 ref_field->name = kstrdup(var_field->var.name, GFP_KERNEL); 1809 if (!ref_field->name) { 1810 err = -ENOMEM; 1811 goto free; 1812 } 1813 } else if (var_field->name) { 1814 ref_field->name = kstrdup(var_field->name, GFP_KERNEL); 1815 if (!ref_field->name) { 1816 err = -ENOMEM; 1817 goto free; 1818 } 1819 } 1820 1821 ref_field->type = kstrdup_const(var_field->type, GFP_KERNEL); 1822 if (!ref_field->type) { 1823 err = -ENOMEM; 1824 goto free; 1825 } 1826 out: 1827 return err; 1828 free: 1829 kfree(ref_field->system); 1830 kfree(ref_field->event_name); 1831 kfree(ref_field->name); 1832 1833 goto out; 1834 } 1835 1836 static int find_var_ref_idx(struct hist_trigger_data *hist_data, 1837 struct hist_field *var_field) 1838 { 1839 struct hist_field *ref_field; 1840 int i; 1841 1842 for (i = 0; i < hist_data->n_var_refs; i++) { 1843 ref_field = hist_data->var_refs[i]; 1844 if (ref_field->var.idx == var_field->var.idx && 1845 ref_field->var.hist_data == var_field->hist_data) 1846 return i; 1847 } 1848 1849 return -ENOENT; 1850 } 1851 1852 /** 1853 * create_var_ref - Create a variable reference and attach it to trigger 1854 * @hist_data: The trigger that will be referencing the variable 1855 * @var_field: The VAR field to create a reference to 1856 * @system: The optional system string 1857 * @event_name: The optional event_name string 1858 * 1859 * Given a variable hist_field, create a VAR_REF hist_field that 1860 * represents a reference to it. 1861 * 1862 * This function also adds the reference to the trigger that 1863 * now references the variable. 1864 * 1865 * Return: The VAR_REF field if successful, NULL if not 1866 */ 1867 static struct hist_field *create_var_ref(struct hist_trigger_data *hist_data, 1868 struct hist_field *var_field, 1869 char *system, char *event_name) 1870 { 1871 unsigned long flags = HIST_FIELD_FL_VAR_REF; 1872 struct hist_field *ref_field; 1873 int i; 1874 1875 /* Check if the variable already exists */ 1876 for (i = 0; i < hist_data->n_var_refs; i++) { 1877 ref_field = hist_data->var_refs[i]; 1878 if (ref_field->var.idx == var_field->var.idx && 1879 ref_field->var.hist_data == var_field->hist_data) { 1880 get_hist_field(ref_field); 1881 return ref_field; 1882 } 1883 } 1884 1885 ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL); 1886 if (ref_field) { 1887 if (init_var_ref(ref_field, var_field, system, event_name)) { 1888 destroy_hist_field(ref_field, 0); 1889 return NULL; 1890 } 1891 1892 hist_data->var_refs[hist_data->n_var_refs] = ref_field; 1893 ref_field->var_ref_idx = hist_data->n_var_refs++; 1894 } 1895 1896 return ref_field; 1897 } 1898 1899 static bool is_var_ref(char *var_name) 1900 { 1901 if (!var_name || strlen(var_name) < 2 || var_name[0] != '$') 1902 return false; 1903 1904 return true; 1905 } 1906 1907 static char *field_name_from_var(struct hist_trigger_data *hist_data, 1908 char *var_name) 1909 { 1910 char *name, *field; 1911 unsigned int i; 1912 1913 for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) { 1914 name = hist_data->attrs->var_defs.name[i]; 1915 1916 if (strcmp(var_name, name) == 0) { 1917 field = hist_data->attrs->var_defs.expr[i]; 1918 if (contains_operator(field) || is_var_ref(field)) 1919 continue; 1920 return field; 1921 } 1922 } 1923 1924 return NULL; 1925 } 1926 1927 static char *local_field_var_ref(struct hist_trigger_data *hist_data, 1928 char *system, char *event_name, 1929 char *var_name) 1930 { 1931 struct trace_event_call *call; 1932 1933 if (system && event_name) { 1934 call = hist_data->event_file->event_call; 1935 1936 if (strcmp(system, call->class->system) != 0) 1937 return NULL; 1938 1939 if (strcmp(event_name, trace_event_name(call)) != 0) 1940 return NULL; 1941 } 1942 1943 if (!!system != !!event_name) 1944 return NULL; 1945 1946 if (!is_var_ref(var_name)) 1947 return NULL; 1948 1949 var_name++; 1950 1951 return field_name_from_var(hist_data, var_name); 1952 } 1953 1954 static struct hist_field *parse_var_ref(struct hist_trigger_data *hist_data, 1955 char *system, char *event_name, 1956 char *var_name) 1957 { 1958 struct hist_field *var_field = NULL, *ref_field = NULL; 1959 struct trace_array *tr = hist_data->event_file->tr; 1960 1961 if (!is_var_ref(var_name)) 1962 return NULL; 1963 1964 var_name++; 1965 1966 var_field = find_event_var(hist_data, system, event_name, var_name); 1967 if (var_field) 1968 ref_field = create_var_ref(hist_data, var_field, 1969 system, event_name); 1970 1971 if (!ref_field) 1972 hist_err(tr, HIST_ERR_VAR_NOT_FOUND, errpos(var_name)); 1973 1974 return ref_field; 1975 } 1976 1977 static struct ftrace_event_field * 1978 parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file, 1979 char *field_str, unsigned long *flags, unsigned long *buckets) 1980 { 1981 struct ftrace_event_field *field = NULL; 1982 char *field_name, *modifier, *str; 1983 struct trace_array *tr = file->tr; 1984 1985 modifier = str = kstrdup(field_str, GFP_KERNEL); 1986 if (!modifier) 1987 return ERR_PTR(-ENOMEM); 1988 1989 field_name = strsep(&modifier, "."); 1990 if (modifier) { 1991 if (strcmp(modifier, "hex") == 0) 1992 *flags |= HIST_FIELD_FL_HEX; 1993 else if (strcmp(modifier, "sym") == 0) 1994 *flags |= HIST_FIELD_FL_SYM; 1995 else if (strcmp(modifier, "sym-offset") == 0) 1996 *flags |= HIST_FIELD_FL_SYM_OFFSET; 1997 else if ((strcmp(modifier, "execname") == 0) && 1998 (strcmp(field_name, "common_pid") == 0)) 1999 *flags |= HIST_FIELD_FL_EXECNAME; 2000 else if (strcmp(modifier, "syscall") == 0) 2001 *flags |= HIST_FIELD_FL_SYSCALL; 2002 else if (strcmp(modifier, "log2") == 0) 2003 *flags |= HIST_FIELD_FL_LOG2; 2004 else if (strcmp(modifier, "usecs") == 0) 2005 *flags |= HIST_FIELD_FL_TIMESTAMP_USECS; 2006 else if (strncmp(modifier, "bucket", 6) == 0) { 2007 int ret; 2008 2009 modifier += 6; 2010 2011 if (*modifier == 's') 2012 modifier++; 2013 if (*modifier != '=') 2014 goto error; 2015 modifier++; 2016 ret = kstrtoul(modifier, 0, buckets); 2017 if (ret || !(*buckets)) 2018 goto error; 2019 *flags |= HIST_FIELD_FL_BUCKET; 2020 } else { 2021 error: 2022 hist_err(tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(modifier)); 2023 field = ERR_PTR(-EINVAL); 2024 goto out; 2025 } 2026 } 2027 2028 if (strcmp(field_name, "common_timestamp") == 0) { 2029 *flags |= HIST_FIELD_FL_TIMESTAMP; 2030 hist_data->enable_timestamps = true; 2031 if (*flags & HIST_FIELD_FL_TIMESTAMP_USECS) 2032 hist_data->attrs->ts_in_usecs = true; 2033 } else if (strcmp(field_name, "common_cpu") == 0) 2034 *flags |= HIST_FIELD_FL_CPU; 2035 else { 2036 field = trace_find_event_field(file->event_call, field_name); 2037 if (!field || !field->size) { 2038 /* 2039 * For backward compatibility, if field_name 2040 * was "cpu", then we treat this the same as 2041 * common_cpu. 2042 */ 2043 if (strcmp(field_name, "cpu") == 0) { 2044 *flags |= HIST_FIELD_FL_CPU; 2045 } else { 2046 hist_err(tr, HIST_ERR_FIELD_NOT_FOUND, 2047 errpos(field_name)); 2048 field = ERR_PTR(-EINVAL); 2049 goto out; 2050 } 2051 } 2052 } 2053 out: 2054 kfree(str); 2055 2056 return field; 2057 } 2058 2059 static struct hist_field *create_alias(struct hist_trigger_data *hist_data, 2060 struct hist_field *var_ref, 2061 char *var_name) 2062 { 2063 struct hist_field *alias = NULL; 2064 unsigned long flags = HIST_FIELD_FL_ALIAS | HIST_FIELD_FL_VAR; 2065 2066 alias = create_hist_field(hist_data, NULL, flags, var_name); 2067 if (!alias) 2068 return NULL; 2069 2070 alias->fn = var_ref->fn; 2071 alias->operands[0] = var_ref; 2072 2073 if (init_var_ref(alias, var_ref, var_ref->system, var_ref->event_name)) { 2074 destroy_hist_field(alias, 0); 2075 return NULL; 2076 } 2077 2078 alias->var_ref_idx = var_ref->var_ref_idx; 2079 2080 return alias; 2081 } 2082 2083 static struct hist_field *parse_atom(struct hist_trigger_data *hist_data, 2084 struct trace_event_file *file, char *str, 2085 unsigned long *flags, char *var_name) 2086 { 2087 char *s, *ref_system = NULL, *ref_event = NULL, *ref_var = str; 2088 struct ftrace_event_field *field = NULL; 2089 struct hist_field *hist_field = NULL; 2090 unsigned long buckets = 0; 2091 int ret = 0; 2092 2093 s = strchr(str, '.'); 2094 if (s) { 2095 s = strchr(++s, '.'); 2096 if (s) { 2097 ref_system = strsep(&str, "."); 2098 if (!str) { 2099 ret = -EINVAL; 2100 goto out; 2101 } 2102 ref_event = strsep(&str, "."); 2103 if (!str) { 2104 ret = -EINVAL; 2105 goto out; 2106 } 2107 ref_var = str; 2108 } 2109 } 2110 2111 s = local_field_var_ref(hist_data, ref_system, ref_event, ref_var); 2112 if (!s) { 2113 hist_field = parse_var_ref(hist_data, ref_system, 2114 ref_event, ref_var); 2115 if (hist_field) { 2116 if (var_name) { 2117 hist_field = create_alias(hist_data, hist_field, var_name); 2118 if (!hist_field) { 2119 ret = -ENOMEM; 2120 goto out; 2121 } 2122 } 2123 return hist_field; 2124 } 2125 } else 2126 str = s; 2127 2128 field = parse_field(hist_data, file, str, flags, &buckets); 2129 if (IS_ERR(field)) { 2130 ret = PTR_ERR(field); 2131 goto out; 2132 } 2133 2134 hist_field = create_hist_field(hist_data, field, *flags, var_name); 2135 if (!hist_field) { 2136 ret = -ENOMEM; 2137 goto out; 2138 } 2139 hist_field->buckets = buckets; 2140 2141 return hist_field; 2142 out: 2143 return ERR_PTR(ret); 2144 } 2145 2146 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data, 2147 struct trace_event_file *file, 2148 char *str, unsigned long flags, 2149 char *var_name, unsigned int level); 2150 2151 static struct hist_field *parse_unary(struct hist_trigger_data *hist_data, 2152 struct trace_event_file *file, 2153 char *str, unsigned long flags, 2154 char *var_name, unsigned int level) 2155 { 2156 struct hist_field *operand1, *expr = NULL; 2157 unsigned long operand_flags; 2158 int ret = 0; 2159 char *s; 2160 2161 /* we support only -(xxx) i.e. explicit parens required */ 2162 2163 if (level > 3) { 2164 hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str)); 2165 ret = -EINVAL; 2166 goto free; 2167 } 2168 2169 str++; /* skip leading '-' */ 2170 2171 s = strchr(str, '('); 2172 if (s) 2173 str++; 2174 else { 2175 ret = -EINVAL; 2176 goto free; 2177 } 2178 2179 s = strrchr(str, ')'); 2180 if (s) 2181 *s = '\0'; 2182 else { 2183 ret = -EINVAL; /* no closing ')' */ 2184 goto free; 2185 } 2186 2187 flags |= HIST_FIELD_FL_EXPR; 2188 expr = create_hist_field(hist_data, NULL, flags, var_name); 2189 if (!expr) { 2190 ret = -ENOMEM; 2191 goto free; 2192 } 2193 2194 operand_flags = 0; 2195 operand1 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level); 2196 if (IS_ERR(operand1)) { 2197 ret = PTR_ERR(operand1); 2198 goto free; 2199 } 2200 if (operand1->flags & HIST_FIELD_FL_STRING) { 2201 /* String type can not be the operand of unary operator. */ 2202 hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(str)); 2203 destroy_hist_field(operand1, 0); 2204 ret = -EINVAL; 2205 goto free; 2206 } 2207 2208 expr->flags |= operand1->flags & 2209 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS); 2210 expr->fn = hist_field_unary_minus; 2211 expr->operands[0] = operand1; 2212 expr->operator = FIELD_OP_UNARY_MINUS; 2213 expr->name = expr_str(expr, 0); 2214 expr->type = kstrdup_const(operand1->type, GFP_KERNEL); 2215 if (!expr->type) { 2216 ret = -ENOMEM; 2217 goto free; 2218 } 2219 2220 return expr; 2221 free: 2222 destroy_hist_field(expr, 0); 2223 return ERR_PTR(ret); 2224 } 2225 2226 static int check_expr_operands(struct trace_array *tr, 2227 struct hist_field *operand1, 2228 struct hist_field *operand2) 2229 { 2230 unsigned long operand1_flags = operand1->flags; 2231 unsigned long operand2_flags = operand2->flags; 2232 2233 if ((operand1_flags & HIST_FIELD_FL_VAR_REF) || 2234 (operand1_flags & HIST_FIELD_FL_ALIAS)) { 2235 struct hist_field *var; 2236 2237 var = find_var_field(operand1->var.hist_data, operand1->name); 2238 if (!var) 2239 return -EINVAL; 2240 operand1_flags = var->flags; 2241 } 2242 2243 if ((operand2_flags & HIST_FIELD_FL_VAR_REF) || 2244 (operand2_flags & HIST_FIELD_FL_ALIAS)) { 2245 struct hist_field *var; 2246 2247 var = find_var_field(operand2->var.hist_data, operand2->name); 2248 if (!var) 2249 return -EINVAL; 2250 operand2_flags = var->flags; 2251 } 2252 2253 if ((operand1_flags & HIST_FIELD_FL_TIMESTAMP_USECS) != 2254 (operand2_flags & HIST_FIELD_FL_TIMESTAMP_USECS)) { 2255 hist_err(tr, HIST_ERR_TIMESTAMP_MISMATCH, 0); 2256 return -EINVAL; 2257 } 2258 2259 return 0; 2260 } 2261 2262 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data, 2263 struct trace_event_file *file, 2264 char *str, unsigned long flags, 2265 char *var_name, unsigned int level) 2266 { 2267 struct hist_field *operand1 = NULL, *operand2 = NULL, *expr = NULL; 2268 unsigned long operand_flags; 2269 int field_op, ret = -EINVAL; 2270 char *sep, *operand1_str; 2271 2272 if (level > 3) { 2273 hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str)); 2274 return ERR_PTR(-EINVAL); 2275 } 2276 2277 field_op = contains_operator(str); 2278 2279 if (field_op == FIELD_OP_NONE) 2280 return parse_atom(hist_data, file, str, &flags, var_name); 2281 2282 if (field_op == FIELD_OP_UNARY_MINUS) 2283 return parse_unary(hist_data, file, str, flags, var_name, ++level); 2284 2285 switch (field_op) { 2286 case FIELD_OP_MINUS: 2287 sep = "-"; 2288 break; 2289 case FIELD_OP_PLUS: 2290 sep = "+"; 2291 break; 2292 default: 2293 goto free; 2294 } 2295 2296 operand1_str = strsep(&str, sep); 2297 if (!operand1_str || !str) 2298 goto free; 2299 2300 operand_flags = 0; 2301 operand1 = parse_atom(hist_data, file, operand1_str, 2302 &operand_flags, NULL); 2303 if (IS_ERR(operand1)) { 2304 ret = PTR_ERR(operand1); 2305 operand1 = NULL; 2306 goto free; 2307 } 2308 if (operand1->flags & HIST_FIELD_FL_STRING) { 2309 hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(operand1_str)); 2310 ret = -EINVAL; 2311 goto free; 2312 } 2313 2314 /* rest of string could be another expression e.g. b+c in a+b+c */ 2315 operand_flags = 0; 2316 operand2 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level); 2317 if (IS_ERR(operand2)) { 2318 ret = PTR_ERR(operand2); 2319 operand2 = NULL; 2320 goto free; 2321 } 2322 if (operand2->flags & HIST_FIELD_FL_STRING) { 2323 hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(str)); 2324 ret = -EINVAL; 2325 goto free; 2326 } 2327 2328 ret = check_expr_operands(file->tr, operand1, operand2); 2329 if (ret) 2330 goto free; 2331 2332 flags |= HIST_FIELD_FL_EXPR; 2333 2334 flags |= operand1->flags & 2335 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS); 2336 2337 expr = create_hist_field(hist_data, NULL, flags, var_name); 2338 if (!expr) { 2339 ret = -ENOMEM; 2340 goto free; 2341 } 2342 2343 operand1->read_once = true; 2344 operand2->read_once = true; 2345 2346 expr->operands[0] = operand1; 2347 expr->operands[1] = operand2; 2348 2349 /* The operand sizes should be the same, so just pick one */ 2350 expr->size = operand1->size; 2351 2352 expr->operator = field_op; 2353 expr->name = expr_str(expr, 0); 2354 expr->type = kstrdup_const(operand1->type, GFP_KERNEL); 2355 if (!expr->type) { 2356 ret = -ENOMEM; 2357 goto free; 2358 } 2359 2360 switch (field_op) { 2361 case FIELD_OP_MINUS: 2362 expr->fn = hist_field_minus; 2363 break; 2364 case FIELD_OP_PLUS: 2365 expr->fn = hist_field_plus; 2366 break; 2367 default: 2368 ret = -EINVAL; 2369 goto free; 2370 } 2371 2372 return expr; 2373 free: 2374 destroy_hist_field(operand1, 0); 2375 destroy_hist_field(operand2, 0); 2376 destroy_hist_field(expr, 0); 2377 2378 return ERR_PTR(ret); 2379 } 2380 2381 static char *find_trigger_filter(struct hist_trigger_data *hist_data, 2382 struct trace_event_file *file) 2383 { 2384 struct event_trigger_data *test; 2385 2386 lockdep_assert_held(&event_mutex); 2387 2388 list_for_each_entry(test, &file->triggers, list) { 2389 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 2390 if (test->private_data == hist_data) 2391 return test->filter_str; 2392 } 2393 } 2394 2395 return NULL; 2396 } 2397 2398 static struct event_command trigger_hist_cmd; 2399 static int event_hist_trigger_func(struct event_command *cmd_ops, 2400 struct trace_event_file *file, 2401 char *glob, char *cmd, char *param); 2402 2403 static bool compatible_keys(struct hist_trigger_data *target_hist_data, 2404 struct hist_trigger_data *hist_data, 2405 unsigned int n_keys) 2406 { 2407 struct hist_field *target_hist_field, *hist_field; 2408 unsigned int n, i, j; 2409 2410 if (hist_data->n_fields - hist_data->n_vals != n_keys) 2411 return false; 2412 2413 i = hist_data->n_vals; 2414 j = target_hist_data->n_vals; 2415 2416 for (n = 0; n < n_keys; n++) { 2417 hist_field = hist_data->fields[i + n]; 2418 target_hist_field = target_hist_data->fields[j + n]; 2419 2420 if (strcmp(hist_field->type, target_hist_field->type) != 0) 2421 return false; 2422 if (hist_field->size != target_hist_field->size) 2423 return false; 2424 if (hist_field->is_signed != target_hist_field->is_signed) 2425 return false; 2426 } 2427 2428 return true; 2429 } 2430 2431 static struct hist_trigger_data * 2432 find_compatible_hist(struct hist_trigger_data *target_hist_data, 2433 struct trace_event_file *file) 2434 { 2435 struct hist_trigger_data *hist_data; 2436 struct event_trigger_data *test; 2437 unsigned int n_keys; 2438 2439 lockdep_assert_held(&event_mutex); 2440 2441 n_keys = target_hist_data->n_fields - target_hist_data->n_vals; 2442 2443 list_for_each_entry(test, &file->triggers, list) { 2444 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 2445 hist_data = test->private_data; 2446 2447 if (compatible_keys(target_hist_data, hist_data, n_keys)) 2448 return hist_data; 2449 } 2450 } 2451 2452 return NULL; 2453 } 2454 2455 static struct trace_event_file *event_file(struct trace_array *tr, 2456 char *system, char *event_name) 2457 { 2458 struct trace_event_file *file; 2459 2460 file = __find_event_file(tr, system, event_name); 2461 if (!file) 2462 return ERR_PTR(-EINVAL); 2463 2464 return file; 2465 } 2466 2467 static struct hist_field * 2468 find_synthetic_field_var(struct hist_trigger_data *target_hist_data, 2469 char *system, char *event_name, char *field_name) 2470 { 2471 struct hist_field *event_var; 2472 char *synthetic_name; 2473 2474 synthetic_name = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL); 2475 if (!synthetic_name) 2476 return ERR_PTR(-ENOMEM); 2477 2478 strcpy(synthetic_name, "synthetic_"); 2479 strcat(synthetic_name, field_name); 2480 2481 event_var = find_event_var(target_hist_data, system, event_name, synthetic_name); 2482 2483 kfree(synthetic_name); 2484 2485 return event_var; 2486 } 2487 2488 /** 2489 * create_field_var_hist - Automatically create a histogram and var for a field 2490 * @target_hist_data: The target hist trigger 2491 * @subsys_name: Optional subsystem name 2492 * @event_name: Optional event name 2493 * @field_name: The name of the field (and the resulting variable) 2494 * 2495 * Hist trigger actions fetch data from variables, not directly from 2496 * events. However, for convenience, users are allowed to directly 2497 * specify an event field in an action, which will be automatically 2498 * converted into a variable on their behalf. 2499 2500 * If a user specifies a field on an event that isn't the event the 2501 * histogram currently being defined (the target event histogram), the 2502 * only way that can be accomplished is if a new hist trigger is 2503 * created and the field variable defined on that. 2504 * 2505 * This function creates a new histogram compatible with the target 2506 * event (meaning a histogram with the same key as the target 2507 * histogram), and creates a variable for the specified field, but 2508 * with 'synthetic_' prepended to the variable name in order to avoid 2509 * collision with normal field variables. 2510 * 2511 * Return: The variable created for the field. 2512 */ 2513 static struct hist_field * 2514 create_field_var_hist(struct hist_trigger_data *target_hist_data, 2515 char *subsys_name, char *event_name, char *field_name) 2516 { 2517 struct trace_array *tr = target_hist_data->event_file->tr; 2518 struct hist_trigger_data *hist_data; 2519 unsigned int i, n, first = true; 2520 struct field_var_hist *var_hist; 2521 struct trace_event_file *file; 2522 struct hist_field *key_field; 2523 struct hist_field *event_var; 2524 char *saved_filter; 2525 char *cmd; 2526 int ret; 2527 2528 if (target_hist_data->n_field_var_hists >= SYNTH_FIELDS_MAX) { 2529 hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name)); 2530 return ERR_PTR(-EINVAL); 2531 } 2532 2533 file = event_file(tr, subsys_name, event_name); 2534 2535 if (IS_ERR(file)) { 2536 hist_err(tr, HIST_ERR_EVENT_FILE_NOT_FOUND, errpos(field_name)); 2537 ret = PTR_ERR(file); 2538 return ERR_PTR(ret); 2539 } 2540 2541 /* 2542 * Look for a histogram compatible with target. We'll use the 2543 * found histogram specification to create a new matching 2544 * histogram with our variable on it. target_hist_data is not 2545 * yet a registered histogram so we can't use that. 2546 */ 2547 hist_data = find_compatible_hist(target_hist_data, file); 2548 if (!hist_data) { 2549 hist_err(tr, HIST_ERR_HIST_NOT_FOUND, errpos(field_name)); 2550 return ERR_PTR(-EINVAL); 2551 } 2552 2553 /* See if a synthetic field variable has already been created */ 2554 event_var = find_synthetic_field_var(target_hist_data, subsys_name, 2555 event_name, field_name); 2556 if (!IS_ERR_OR_NULL(event_var)) 2557 return event_var; 2558 2559 var_hist = kzalloc(sizeof(*var_hist), GFP_KERNEL); 2560 if (!var_hist) 2561 return ERR_PTR(-ENOMEM); 2562 2563 cmd = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL); 2564 if (!cmd) { 2565 kfree(var_hist); 2566 return ERR_PTR(-ENOMEM); 2567 } 2568 2569 /* Use the same keys as the compatible histogram */ 2570 strcat(cmd, "keys="); 2571 2572 for_each_hist_key_field(i, hist_data) { 2573 key_field = hist_data->fields[i]; 2574 if (!first) 2575 strcat(cmd, ","); 2576 strcat(cmd, key_field->field->name); 2577 first = false; 2578 } 2579 2580 /* Create the synthetic field variable specification */ 2581 strcat(cmd, ":synthetic_"); 2582 strcat(cmd, field_name); 2583 strcat(cmd, "="); 2584 strcat(cmd, field_name); 2585 2586 /* Use the same filter as the compatible histogram */ 2587 saved_filter = find_trigger_filter(hist_data, file); 2588 if (saved_filter) { 2589 strcat(cmd, " if "); 2590 strcat(cmd, saved_filter); 2591 } 2592 2593 var_hist->cmd = kstrdup(cmd, GFP_KERNEL); 2594 if (!var_hist->cmd) { 2595 kfree(cmd); 2596 kfree(var_hist); 2597 return ERR_PTR(-ENOMEM); 2598 } 2599 2600 /* Save the compatible histogram information */ 2601 var_hist->hist_data = hist_data; 2602 2603 /* Create the new histogram with our variable */ 2604 ret = event_hist_trigger_func(&trigger_hist_cmd, file, 2605 "", "hist", cmd); 2606 if (ret) { 2607 kfree(cmd); 2608 kfree(var_hist->cmd); 2609 kfree(var_hist); 2610 hist_err(tr, HIST_ERR_HIST_CREATE_FAIL, errpos(field_name)); 2611 return ERR_PTR(ret); 2612 } 2613 2614 kfree(cmd); 2615 2616 /* If we can't find the variable, something went wrong */ 2617 event_var = find_synthetic_field_var(target_hist_data, subsys_name, 2618 event_name, field_name); 2619 if (IS_ERR_OR_NULL(event_var)) { 2620 kfree(var_hist->cmd); 2621 kfree(var_hist); 2622 hist_err(tr, HIST_ERR_SYNTH_VAR_NOT_FOUND, errpos(field_name)); 2623 return ERR_PTR(-EINVAL); 2624 } 2625 2626 n = target_hist_data->n_field_var_hists; 2627 target_hist_data->field_var_hists[n] = var_hist; 2628 target_hist_data->n_field_var_hists++; 2629 2630 return event_var; 2631 } 2632 2633 static struct hist_field * 2634 find_target_event_var(struct hist_trigger_data *hist_data, 2635 char *subsys_name, char *event_name, char *var_name) 2636 { 2637 struct trace_event_file *file = hist_data->event_file; 2638 struct hist_field *hist_field = NULL; 2639 2640 if (subsys_name) { 2641 struct trace_event_call *call; 2642 2643 if (!event_name) 2644 return NULL; 2645 2646 call = file->event_call; 2647 2648 if (strcmp(subsys_name, call->class->system) != 0) 2649 return NULL; 2650 2651 if (strcmp(event_name, trace_event_name(call)) != 0) 2652 return NULL; 2653 } 2654 2655 hist_field = find_var_field(hist_data, var_name); 2656 2657 return hist_field; 2658 } 2659 2660 static inline void __update_field_vars(struct tracing_map_elt *elt, 2661 struct trace_buffer *buffer, 2662 struct ring_buffer_event *rbe, 2663 void *rec, 2664 struct field_var **field_vars, 2665 unsigned int n_field_vars, 2666 unsigned int field_var_str_start) 2667 { 2668 struct hist_elt_data *elt_data = elt->private_data; 2669 unsigned int i, j, var_idx; 2670 u64 var_val; 2671 2672 for (i = 0, j = field_var_str_start; i < n_field_vars; i++) { 2673 struct field_var *field_var = field_vars[i]; 2674 struct hist_field *var = field_var->var; 2675 struct hist_field *val = field_var->val; 2676 2677 var_val = val->fn(val, elt, buffer, rbe, rec); 2678 var_idx = var->var.idx; 2679 2680 if (val->flags & HIST_FIELD_FL_STRING) { 2681 char *str = elt_data->field_var_str[j++]; 2682 char *val_str = (char *)(uintptr_t)var_val; 2683 2684 strscpy(str, val_str, STR_VAR_LEN_MAX); 2685 var_val = (u64)(uintptr_t)str; 2686 } 2687 tracing_map_set_var(elt, var_idx, var_val); 2688 } 2689 } 2690 2691 static void update_field_vars(struct hist_trigger_data *hist_data, 2692 struct tracing_map_elt *elt, 2693 struct trace_buffer *buffer, 2694 struct ring_buffer_event *rbe, 2695 void *rec) 2696 { 2697 __update_field_vars(elt, buffer, rbe, rec, hist_data->field_vars, 2698 hist_data->n_field_vars, 0); 2699 } 2700 2701 static void save_track_data_vars(struct hist_trigger_data *hist_data, 2702 struct tracing_map_elt *elt, 2703 struct trace_buffer *buffer, void *rec, 2704 struct ring_buffer_event *rbe, void *key, 2705 struct action_data *data, u64 *var_ref_vals) 2706 { 2707 __update_field_vars(elt, buffer, rbe, rec, hist_data->save_vars, 2708 hist_data->n_save_vars, hist_data->n_field_var_str); 2709 } 2710 2711 static struct hist_field *create_var(struct hist_trigger_data *hist_data, 2712 struct trace_event_file *file, 2713 char *name, int size, const char *type) 2714 { 2715 struct hist_field *var; 2716 int idx; 2717 2718 if (find_var(hist_data, file, name) && !hist_data->remove) { 2719 var = ERR_PTR(-EINVAL); 2720 goto out; 2721 } 2722 2723 var = kzalloc(sizeof(struct hist_field), GFP_KERNEL); 2724 if (!var) { 2725 var = ERR_PTR(-ENOMEM); 2726 goto out; 2727 } 2728 2729 idx = tracing_map_add_var(hist_data->map); 2730 if (idx < 0) { 2731 kfree(var); 2732 var = ERR_PTR(-EINVAL); 2733 goto out; 2734 } 2735 2736 var->ref = 1; 2737 var->flags = HIST_FIELD_FL_VAR; 2738 var->var.idx = idx; 2739 var->var.hist_data = var->hist_data = hist_data; 2740 var->size = size; 2741 var->var.name = kstrdup(name, GFP_KERNEL); 2742 var->type = kstrdup_const(type, GFP_KERNEL); 2743 if (!var->var.name || !var->type) { 2744 kfree_const(var->type); 2745 kfree(var->var.name); 2746 kfree(var); 2747 var = ERR_PTR(-ENOMEM); 2748 } 2749 out: 2750 return var; 2751 } 2752 2753 static struct field_var *create_field_var(struct hist_trigger_data *hist_data, 2754 struct trace_event_file *file, 2755 char *field_name) 2756 { 2757 struct hist_field *val = NULL, *var = NULL; 2758 unsigned long flags = HIST_FIELD_FL_VAR; 2759 struct trace_array *tr = file->tr; 2760 struct field_var *field_var; 2761 int ret = 0; 2762 2763 if (hist_data->n_field_vars >= SYNTH_FIELDS_MAX) { 2764 hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name)); 2765 ret = -EINVAL; 2766 goto err; 2767 } 2768 2769 val = parse_atom(hist_data, file, field_name, &flags, NULL); 2770 if (IS_ERR(val)) { 2771 hist_err(tr, HIST_ERR_FIELD_VAR_PARSE_FAIL, errpos(field_name)); 2772 ret = PTR_ERR(val); 2773 goto err; 2774 } 2775 2776 var = create_var(hist_data, file, field_name, val->size, val->type); 2777 if (IS_ERR(var)) { 2778 hist_err(tr, HIST_ERR_VAR_CREATE_FIND_FAIL, errpos(field_name)); 2779 kfree(val); 2780 ret = PTR_ERR(var); 2781 goto err; 2782 } 2783 2784 field_var = kzalloc(sizeof(struct field_var), GFP_KERNEL); 2785 if (!field_var) { 2786 kfree(val); 2787 kfree(var); 2788 ret = -ENOMEM; 2789 goto err; 2790 } 2791 2792 field_var->var = var; 2793 field_var->val = val; 2794 out: 2795 return field_var; 2796 err: 2797 field_var = ERR_PTR(ret); 2798 goto out; 2799 } 2800 2801 /** 2802 * create_target_field_var - Automatically create a variable for a field 2803 * @target_hist_data: The target hist trigger 2804 * @subsys_name: Optional subsystem name 2805 * @event_name: Optional event name 2806 * @var_name: The name of the field (and the resulting variable) 2807 * 2808 * Hist trigger actions fetch data from variables, not directly from 2809 * events. However, for convenience, users are allowed to directly 2810 * specify an event field in an action, which will be automatically 2811 * converted into a variable on their behalf. 2812 2813 * This function creates a field variable with the name var_name on 2814 * the hist trigger currently being defined on the target event. If 2815 * subsys_name and event_name are specified, this function simply 2816 * verifies that they do in fact match the target event subsystem and 2817 * event name. 2818 * 2819 * Return: The variable created for the field. 2820 */ 2821 static struct field_var * 2822 create_target_field_var(struct hist_trigger_data *target_hist_data, 2823 char *subsys_name, char *event_name, char *var_name) 2824 { 2825 struct trace_event_file *file = target_hist_data->event_file; 2826 2827 if (subsys_name) { 2828 struct trace_event_call *call; 2829 2830 if (!event_name) 2831 return NULL; 2832 2833 call = file->event_call; 2834 2835 if (strcmp(subsys_name, call->class->system) != 0) 2836 return NULL; 2837 2838 if (strcmp(event_name, trace_event_name(call)) != 0) 2839 return NULL; 2840 } 2841 2842 return create_field_var(target_hist_data, file, var_name); 2843 } 2844 2845 static bool check_track_val_max(u64 track_val, u64 var_val) 2846 { 2847 if (var_val <= track_val) 2848 return false; 2849 2850 return true; 2851 } 2852 2853 static bool check_track_val_changed(u64 track_val, u64 var_val) 2854 { 2855 if (var_val == track_val) 2856 return false; 2857 2858 return true; 2859 } 2860 2861 static u64 get_track_val(struct hist_trigger_data *hist_data, 2862 struct tracing_map_elt *elt, 2863 struct action_data *data) 2864 { 2865 unsigned int track_var_idx = data->track_data.track_var->var.idx; 2866 u64 track_val; 2867 2868 track_val = tracing_map_read_var(elt, track_var_idx); 2869 2870 return track_val; 2871 } 2872 2873 static void save_track_val(struct hist_trigger_data *hist_data, 2874 struct tracing_map_elt *elt, 2875 struct action_data *data, u64 var_val) 2876 { 2877 unsigned int track_var_idx = data->track_data.track_var->var.idx; 2878 2879 tracing_map_set_var(elt, track_var_idx, var_val); 2880 } 2881 2882 static void save_track_data(struct hist_trigger_data *hist_data, 2883 struct tracing_map_elt *elt, 2884 struct trace_buffer *buffer, void *rec, 2885 struct ring_buffer_event *rbe, void *key, 2886 struct action_data *data, u64 *var_ref_vals) 2887 { 2888 if (data->track_data.save_data) 2889 data->track_data.save_data(hist_data, elt, buffer, rec, rbe, 2890 key, data, var_ref_vals); 2891 } 2892 2893 static bool check_track_val(struct tracing_map_elt *elt, 2894 struct action_data *data, 2895 u64 var_val) 2896 { 2897 struct hist_trigger_data *hist_data; 2898 u64 track_val; 2899 2900 hist_data = data->track_data.track_var->hist_data; 2901 track_val = get_track_val(hist_data, elt, data); 2902 2903 return data->track_data.check_val(track_val, var_val); 2904 } 2905 2906 #ifdef CONFIG_TRACER_SNAPSHOT 2907 static bool cond_snapshot_update(struct trace_array *tr, void *cond_data) 2908 { 2909 /* called with tr->max_lock held */ 2910 struct track_data *track_data = tr->cond_snapshot->cond_data; 2911 struct hist_elt_data *elt_data, *track_elt_data; 2912 struct snapshot_context *context = cond_data; 2913 struct action_data *action; 2914 u64 track_val; 2915 2916 if (!track_data) 2917 return false; 2918 2919 action = track_data->action_data; 2920 2921 track_val = get_track_val(track_data->hist_data, context->elt, 2922 track_data->action_data); 2923 2924 if (!action->track_data.check_val(track_data->track_val, track_val)) 2925 return false; 2926 2927 track_data->track_val = track_val; 2928 memcpy(track_data->key, context->key, track_data->key_len); 2929 2930 elt_data = context->elt->private_data; 2931 track_elt_data = track_data->elt.private_data; 2932 if (elt_data->comm) 2933 strncpy(track_elt_data->comm, elt_data->comm, TASK_COMM_LEN); 2934 2935 track_data->updated = true; 2936 2937 return true; 2938 } 2939 2940 static void save_track_data_snapshot(struct hist_trigger_data *hist_data, 2941 struct tracing_map_elt *elt, 2942 struct trace_buffer *buffer, void *rec, 2943 struct ring_buffer_event *rbe, void *key, 2944 struct action_data *data, 2945 u64 *var_ref_vals) 2946 { 2947 struct trace_event_file *file = hist_data->event_file; 2948 struct snapshot_context context; 2949 2950 context.elt = elt; 2951 context.key = key; 2952 2953 tracing_snapshot_cond(file->tr, &context); 2954 } 2955 2956 static void hist_trigger_print_key(struct seq_file *m, 2957 struct hist_trigger_data *hist_data, 2958 void *key, 2959 struct tracing_map_elt *elt); 2960 2961 static struct action_data *snapshot_action(struct hist_trigger_data *hist_data) 2962 { 2963 unsigned int i; 2964 2965 if (!hist_data->n_actions) 2966 return NULL; 2967 2968 for (i = 0; i < hist_data->n_actions; i++) { 2969 struct action_data *data = hist_data->actions[i]; 2970 2971 if (data->action == ACTION_SNAPSHOT) 2972 return data; 2973 } 2974 2975 return NULL; 2976 } 2977 2978 static void track_data_snapshot_print(struct seq_file *m, 2979 struct hist_trigger_data *hist_data) 2980 { 2981 struct trace_event_file *file = hist_data->event_file; 2982 struct track_data *track_data; 2983 struct action_data *action; 2984 2985 track_data = tracing_cond_snapshot_data(file->tr); 2986 if (!track_data) 2987 return; 2988 2989 if (!track_data->updated) 2990 return; 2991 2992 action = snapshot_action(hist_data); 2993 if (!action) 2994 return; 2995 2996 seq_puts(m, "\nSnapshot taken (see tracing/snapshot). Details:\n"); 2997 seq_printf(m, "\ttriggering value { %s(%s) }: %10llu", 2998 action->handler == HANDLER_ONMAX ? "onmax" : "onchange", 2999 action->track_data.var_str, track_data->track_val); 3000 3001 seq_puts(m, "\ttriggered by event with key: "); 3002 hist_trigger_print_key(m, hist_data, track_data->key, &track_data->elt); 3003 seq_putc(m, '\n'); 3004 } 3005 #else 3006 static bool cond_snapshot_update(struct trace_array *tr, void *cond_data) 3007 { 3008 return false; 3009 } 3010 static void save_track_data_snapshot(struct hist_trigger_data *hist_data, 3011 struct tracing_map_elt *elt, 3012 struct trace_buffer *buffer, void *rec, 3013 struct ring_buffer_event *rbe, void *key, 3014 struct action_data *data, 3015 u64 *var_ref_vals) {} 3016 static void track_data_snapshot_print(struct seq_file *m, 3017 struct hist_trigger_data *hist_data) {} 3018 #endif /* CONFIG_TRACER_SNAPSHOT */ 3019 3020 static void track_data_print(struct seq_file *m, 3021 struct hist_trigger_data *hist_data, 3022 struct tracing_map_elt *elt, 3023 struct action_data *data) 3024 { 3025 u64 track_val = get_track_val(hist_data, elt, data); 3026 unsigned int i, save_var_idx; 3027 3028 if (data->handler == HANDLER_ONMAX) 3029 seq_printf(m, "\n\tmax: %10llu", track_val); 3030 else if (data->handler == HANDLER_ONCHANGE) 3031 seq_printf(m, "\n\tchanged: %10llu", track_val); 3032 3033 if (data->action == ACTION_SNAPSHOT) 3034 return; 3035 3036 for (i = 0; i < hist_data->n_save_vars; i++) { 3037 struct hist_field *save_val = hist_data->save_vars[i]->val; 3038 struct hist_field *save_var = hist_data->save_vars[i]->var; 3039 u64 val; 3040 3041 save_var_idx = save_var->var.idx; 3042 3043 val = tracing_map_read_var(elt, save_var_idx); 3044 3045 if (save_val->flags & HIST_FIELD_FL_STRING) { 3046 seq_printf(m, " %s: %-32s", save_var->var.name, 3047 (char *)(uintptr_t)(val)); 3048 } else 3049 seq_printf(m, " %s: %10llu", save_var->var.name, val); 3050 } 3051 } 3052 3053 static void ontrack_action(struct hist_trigger_data *hist_data, 3054 struct tracing_map_elt *elt, 3055 struct trace_buffer *buffer, void *rec, 3056 struct ring_buffer_event *rbe, void *key, 3057 struct action_data *data, u64 *var_ref_vals) 3058 { 3059 u64 var_val = var_ref_vals[data->track_data.var_ref->var_ref_idx]; 3060 3061 if (check_track_val(elt, data, var_val)) { 3062 save_track_val(hist_data, elt, data, var_val); 3063 save_track_data(hist_data, elt, buffer, rec, rbe, 3064 key, data, var_ref_vals); 3065 } 3066 } 3067 3068 static void action_data_destroy(struct action_data *data) 3069 { 3070 unsigned int i; 3071 3072 lockdep_assert_held(&event_mutex); 3073 3074 kfree(data->action_name); 3075 3076 for (i = 0; i < data->n_params; i++) 3077 kfree(data->params[i]); 3078 3079 if (data->synth_event) 3080 data->synth_event->ref--; 3081 3082 kfree(data->synth_event_name); 3083 3084 kfree(data); 3085 } 3086 3087 static void track_data_destroy(struct hist_trigger_data *hist_data, 3088 struct action_data *data) 3089 { 3090 struct trace_event_file *file = hist_data->event_file; 3091 3092 destroy_hist_field(data->track_data.track_var, 0); 3093 3094 if (data->action == ACTION_SNAPSHOT) { 3095 struct track_data *track_data; 3096 3097 track_data = tracing_cond_snapshot_data(file->tr); 3098 if (track_data && track_data->hist_data == hist_data) { 3099 tracing_snapshot_cond_disable(file->tr); 3100 track_data_free(track_data); 3101 } 3102 } 3103 3104 kfree(data->track_data.var_str); 3105 3106 action_data_destroy(data); 3107 } 3108 3109 static int action_create(struct hist_trigger_data *hist_data, 3110 struct action_data *data); 3111 3112 static int track_data_create(struct hist_trigger_data *hist_data, 3113 struct action_data *data) 3114 { 3115 struct hist_field *var_field, *ref_field, *track_var = NULL; 3116 struct trace_event_file *file = hist_data->event_file; 3117 struct trace_array *tr = file->tr; 3118 char *track_data_var_str; 3119 int ret = 0; 3120 3121 track_data_var_str = data->track_data.var_str; 3122 if (track_data_var_str[0] != '$') { 3123 hist_err(tr, HIST_ERR_ONX_NOT_VAR, errpos(track_data_var_str)); 3124 return -EINVAL; 3125 } 3126 track_data_var_str++; 3127 3128 var_field = find_target_event_var(hist_data, NULL, NULL, track_data_var_str); 3129 if (!var_field) { 3130 hist_err(tr, HIST_ERR_ONX_VAR_NOT_FOUND, errpos(track_data_var_str)); 3131 return -EINVAL; 3132 } 3133 3134 ref_field = create_var_ref(hist_data, var_field, NULL, NULL); 3135 if (!ref_field) 3136 return -ENOMEM; 3137 3138 data->track_data.var_ref = ref_field; 3139 3140 if (data->handler == HANDLER_ONMAX) 3141 track_var = create_var(hist_data, file, "__max", sizeof(u64), "u64"); 3142 if (IS_ERR(track_var)) { 3143 hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0); 3144 ret = PTR_ERR(track_var); 3145 goto out; 3146 } 3147 3148 if (data->handler == HANDLER_ONCHANGE) 3149 track_var = create_var(hist_data, file, "__change", sizeof(u64), "u64"); 3150 if (IS_ERR(track_var)) { 3151 hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0); 3152 ret = PTR_ERR(track_var); 3153 goto out; 3154 } 3155 data->track_data.track_var = track_var; 3156 3157 ret = action_create(hist_data, data); 3158 out: 3159 return ret; 3160 } 3161 3162 static int parse_action_params(struct trace_array *tr, char *params, 3163 struct action_data *data) 3164 { 3165 char *param, *saved_param; 3166 bool first_param = true; 3167 int ret = 0; 3168 3169 while (params) { 3170 if (data->n_params >= SYNTH_FIELDS_MAX) { 3171 hist_err(tr, HIST_ERR_TOO_MANY_PARAMS, 0); 3172 goto out; 3173 } 3174 3175 param = strsep(¶ms, ","); 3176 if (!param) { 3177 hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, 0); 3178 ret = -EINVAL; 3179 goto out; 3180 } 3181 3182 param = strstrip(param); 3183 if (strlen(param) < 2) { 3184 hist_err(tr, HIST_ERR_INVALID_PARAM, errpos(param)); 3185 ret = -EINVAL; 3186 goto out; 3187 } 3188 3189 saved_param = kstrdup(param, GFP_KERNEL); 3190 if (!saved_param) { 3191 ret = -ENOMEM; 3192 goto out; 3193 } 3194 3195 if (first_param && data->use_trace_keyword) { 3196 data->synth_event_name = saved_param; 3197 first_param = false; 3198 continue; 3199 } 3200 first_param = false; 3201 3202 data->params[data->n_params++] = saved_param; 3203 } 3204 out: 3205 return ret; 3206 } 3207 3208 static int action_parse(struct trace_array *tr, char *str, struct action_data *data, 3209 enum handler_id handler) 3210 { 3211 char *action_name; 3212 int ret = 0; 3213 3214 strsep(&str, "."); 3215 if (!str) { 3216 hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0); 3217 ret = -EINVAL; 3218 goto out; 3219 } 3220 3221 action_name = strsep(&str, "("); 3222 if (!action_name || !str) { 3223 hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0); 3224 ret = -EINVAL; 3225 goto out; 3226 } 3227 3228 if (str_has_prefix(action_name, "save")) { 3229 char *params = strsep(&str, ")"); 3230 3231 if (!params) { 3232 hist_err(tr, HIST_ERR_NO_SAVE_PARAMS, 0); 3233 ret = -EINVAL; 3234 goto out; 3235 } 3236 3237 ret = parse_action_params(tr, params, data); 3238 if (ret) 3239 goto out; 3240 3241 if (handler == HANDLER_ONMAX) 3242 data->track_data.check_val = check_track_val_max; 3243 else if (handler == HANDLER_ONCHANGE) 3244 data->track_data.check_val = check_track_val_changed; 3245 else { 3246 hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name)); 3247 ret = -EINVAL; 3248 goto out; 3249 } 3250 3251 data->track_data.save_data = save_track_data_vars; 3252 data->fn = ontrack_action; 3253 data->action = ACTION_SAVE; 3254 } else if (str_has_prefix(action_name, "snapshot")) { 3255 char *params = strsep(&str, ")"); 3256 3257 if (!str) { 3258 hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(params)); 3259 ret = -EINVAL; 3260 goto out; 3261 } 3262 3263 if (handler == HANDLER_ONMAX) 3264 data->track_data.check_val = check_track_val_max; 3265 else if (handler == HANDLER_ONCHANGE) 3266 data->track_data.check_val = check_track_val_changed; 3267 else { 3268 hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name)); 3269 ret = -EINVAL; 3270 goto out; 3271 } 3272 3273 data->track_data.save_data = save_track_data_snapshot; 3274 data->fn = ontrack_action; 3275 data->action = ACTION_SNAPSHOT; 3276 } else { 3277 char *params = strsep(&str, ")"); 3278 3279 if (str_has_prefix(action_name, "trace")) 3280 data->use_trace_keyword = true; 3281 3282 if (params) { 3283 ret = parse_action_params(tr, params, data); 3284 if (ret) 3285 goto out; 3286 } 3287 3288 if (handler == HANDLER_ONMAX) 3289 data->track_data.check_val = check_track_val_max; 3290 else if (handler == HANDLER_ONCHANGE) 3291 data->track_data.check_val = check_track_val_changed; 3292 3293 if (handler != HANDLER_ONMATCH) { 3294 data->track_data.save_data = action_trace; 3295 data->fn = ontrack_action; 3296 } else 3297 data->fn = action_trace; 3298 3299 data->action = ACTION_TRACE; 3300 } 3301 3302 data->action_name = kstrdup(action_name, GFP_KERNEL); 3303 if (!data->action_name) { 3304 ret = -ENOMEM; 3305 goto out; 3306 } 3307 3308 data->handler = handler; 3309 out: 3310 return ret; 3311 } 3312 3313 static struct action_data *track_data_parse(struct hist_trigger_data *hist_data, 3314 char *str, enum handler_id handler) 3315 { 3316 struct action_data *data; 3317 int ret = -EINVAL; 3318 char *var_str; 3319 3320 data = kzalloc(sizeof(*data), GFP_KERNEL); 3321 if (!data) 3322 return ERR_PTR(-ENOMEM); 3323 3324 var_str = strsep(&str, ")"); 3325 if (!var_str || !str) { 3326 ret = -EINVAL; 3327 goto free; 3328 } 3329 3330 data->track_data.var_str = kstrdup(var_str, GFP_KERNEL); 3331 if (!data->track_data.var_str) { 3332 ret = -ENOMEM; 3333 goto free; 3334 } 3335 3336 ret = action_parse(hist_data->event_file->tr, str, data, handler); 3337 if (ret) 3338 goto free; 3339 out: 3340 return data; 3341 free: 3342 track_data_destroy(hist_data, data); 3343 data = ERR_PTR(ret); 3344 goto out; 3345 } 3346 3347 static void onmatch_destroy(struct action_data *data) 3348 { 3349 kfree(data->match_data.event); 3350 kfree(data->match_data.event_system); 3351 3352 action_data_destroy(data); 3353 } 3354 3355 static void destroy_field_var(struct field_var *field_var) 3356 { 3357 if (!field_var) 3358 return; 3359 3360 destroy_hist_field(field_var->var, 0); 3361 destroy_hist_field(field_var->val, 0); 3362 3363 kfree(field_var); 3364 } 3365 3366 static void destroy_field_vars(struct hist_trigger_data *hist_data) 3367 { 3368 unsigned int i; 3369 3370 for (i = 0; i < hist_data->n_field_vars; i++) 3371 destroy_field_var(hist_data->field_vars[i]); 3372 3373 for (i = 0; i < hist_data->n_save_vars; i++) 3374 destroy_field_var(hist_data->save_vars[i]); 3375 } 3376 3377 static void save_field_var(struct hist_trigger_data *hist_data, 3378 struct field_var *field_var) 3379 { 3380 hist_data->field_vars[hist_data->n_field_vars++] = field_var; 3381 3382 if (field_var->val->flags & HIST_FIELD_FL_STRING) 3383 hist_data->n_field_var_str++; 3384 } 3385 3386 3387 static int check_synth_field(struct synth_event *event, 3388 struct hist_field *hist_field, 3389 unsigned int field_pos) 3390 { 3391 struct synth_field *field; 3392 3393 if (field_pos >= event->n_fields) 3394 return -EINVAL; 3395 3396 field = event->fields[field_pos]; 3397 3398 /* 3399 * A dynamic string synth field can accept static or 3400 * dynamic. A static string synth field can only accept a 3401 * same-sized static string, which is checked for later. 3402 */ 3403 if (strstr(hist_field->type, "char[") && field->is_string 3404 && field->is_dynamic) 3405 return 0; 3406 3407 if (strcmp(field->type, hist_field->type) != 0) { 3408 if (field->size != hist_field->size || 3409 field->is_signed != hist_field->is_signed) 3410 return -EINVAL; 3411 } 3412 3413 return 0; 3414 } 3415 3416 static struct hist_field * 3417 trace_action_find_var(struct hist_trigger_data *hist_data, 3418 struct action_data *data, 3419 char *system, char *event, char *var) 3420 { 3421 struct trace_array *tr = hist_data->event_file->tr; 3422 struct hist_field *hist_field; 3423 3424 var++; /* skip '$' */ 3425 3426 hist_field = find_target_event_var(hist_data, system, event, var); 3427 if (!hist_field) { 3428 if (!system && data->handler == HANDLER_ONMATCH) { 3429 system = data->match_data.event_system; 3430 event = data->match_data.event; 3431 } 3432 3433 hist_field = find_event_var(hist_data, system, event, var); 3434 } 3435 3436 if (!hist_field) 3437 hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, errpos(var)); 3438 3439 return hist_field; 3440 } 3441 3442 static struct hist_field * 3443 trace_action_create_field_var(struct hist_trigger_data *hist_data, 3444 struct action_data *data, char *system, 3445 char *event, char *var) 3446 { 3447 struct hist_field *hist_field = NULL; 3448 struct field_var *field_var; 3449 3450 /* 3451 * First try to create a field var on the target event (the 3452 * currently being defined). This will create a variable for 3453 * unqualified fields on the target event, or if qualified, 3454 * target fields that have qualified names matching the target. 3455 */ 3456 field_var = create_target_field_var(hist_data, system, event, var); 3457 3458 if (field_var && !IS_ERR(field_var)) { 3459 save_field_var(hist_data, field_var); 3460 hist_field = field_var->var; 3461 } else { 3462 field_var = NULL; 3463 /* 3464 * If no explicit system.event is specified, default to 3465 * looking for fields on the onmatch(system.event.xxx) 3466 * event. 3467 */ 3468 if (!system && data->handler == HANDLER_ONMATCH) { 3469 system = data->match_data.event_system; 3470 event = data->match_data.event; 3471 } 3472 3473 if (!event) 3474 goto free; 3475 /* 3476 * At this point, we're looking at a field on another 3477 * event. Because we can't modify a hist trigger on 3478 * another event to add a variable for a field, we need 3479 * to create a new trigger on that event and create the 3480 * variable at the same time. 3481 */ 3482 hist_field = create_field_var_hist(hist_data, system, event, var); 3483 if (IS_ERR(hist_field)) 3484 goto free; 3485 } 3486 out: 3487 return hist_field; 3488 free: 3489 destroy_field_var(field_var); 3490 hist_field = NULL; 3491 goto out; 3492 } 3493 3494 static int trace_action_create(struct hist_trigger_data *hist_data, 3495 struct action_data *data) 3496 { 3497 struct trace_array *tr = hist_data->event_file->tr; 3498 char *event_name, *param, *system = NULL; 3499 struct hist_field *hist_field, *var_ref; 3500 unsigned int i; 3501 unsigned int field_pos = 0; 3502 struct synth_event *event; 3503 char *synth_event_name; 3504 int var_ref_idx, ret = 0; 3505 3506 lockdep_assert_held(&event_mutex); 3507 3508 if (data->use_trace_keyword) 3509 synth_event_name = data->synth_event_name; 3510 else 3511 synth_event_name = data->action_name; 3512 3513 event = find_synth_event(synth_event_name); 3514 if (!event) { 3515 hist_err(tr, HIST_ERR_SYNTH_EVENT_NOT_FOUND, errpos(synth_event_name)); 3516 return -EINVAL; 3517 } 3518 3519 event->ref++; 3520 3521 for (i = 0; i < data->n_params; i++) { 3522 char *p; 3523 3524 p = param = kstrdup(data->params[i], GFP_KERNEL); 3525 if (!param) { 3526 ret = -ENOMEM; 3527 goto err; 3528 } 3529 3530 system = strsep(¶m, "."); 3531 if (!param) { 3532 param = (char *)system; 3533 system = event_name = NULL; 3534 } else { 3535 event_name = strsep(¶m, "."); 3536 if (!param) { 3537 kfree(p); 3538 ret = -EINVAL; 3539 goto err; 3540 } 3541 } 3542 3543 if (param[0] == '$') 3544 hist_field = trace_action_find_var(hist_data, data, 3545 system, event_name, 3546 param); 3547 else 3548 hist_field = trace_action_create_field_var(hist_data, 3549 data, 3550 system, 3551 event_name, 3552 param); 3553 3554 if (!hist_field) { 3555 kfree(p); 3556 ret = -EINVAL; 3557 goto err; 3558 } 3559 3560 if (check_synth_field(event, hist_field, field_pos) == 0) { 3561 var_ref = create_var_ref(hist_data, hist_field, 3562 system, event_name); 3563 if (!var_ref) { 3564 kfree(p); 3565 ret = -ENOMEM; 3566 goto err; 3567 } 3568 3569 var_ref_idx = find_var_ref_idx(hist_data, var_ref); 3570 if (WARN_ON(var_ref_idx < 0)) { 3571 ret = var_ref_idx; 3572 goto err; 3573 } 3574 3575 data->var_ref_idx[i] = var_ref_idx; 3576 3577 field_pos++; 3578 kfree(p); 3579 continue; 3580 } 3581 3582 hist_err(tr, HIST_ERR_SYNTH_TYPE_MISMATCH, errpos(param)); 3583 kfree(p); 3584 ret = -EINVAL; 3585 goto err; 3586 } 3587 3588 if (field_pos != event->n_fields) { 3589 hist_err(tr, HIST_ERR_SYNTH_COUNT_MISMATCH, errpos(event->name)); 3590 ret = -EINVAL; 3591 goto err; 3592 } 3593 3594 data->synth_event = event; 3595 out: 3596 return ret; 3597 err: 3598 event->ref--; 3599 3600 goto out; 3601 } 3602 3603 static int action_create(struct hist_trigger_data *hist_data, 3604 struct action_data *data) 3605 { 3606 struct trace_event_file *file = hist_data->event_file; 3607 struct trace_array *tr = file->tr; 3608 struct track_data *track_data; 3609 struct field_var *field_var; 3610 unsigned int i; 3611 char *param; 3612 int ret = 0; 3613 3614 if (data->action == ACTION_TRACE) 3615 return trace_action_create(hist_data, data); 3616 3617 if (data->action == ACTION_SNAPSHOT) { 3618 track_data = track_data_alloc(hist_data->key_size, data, hist_data); 3619 if (IS_ERR(track_data)) { 3620 ret = PTR_ERR(track_data); 3621 goto out; 3622 } 3623 3624 ret = tracing_snapshot_cond_enable(file->tr, track_data, 3625 cond_snapshot_update); 3626 if (ret) 3627 track_data_free(track_data); 3628 3629 goto out; 3630 } 3631 3632 if (data->action == ACTION_SAVE) { 3633 if (hist_data->n_save_vars) { 3634 ret = -EEXIST; 3635 hist_err(tr, HIST_ERR_TOO_MANY_SAVE_ACTIONS, 0); 3636 goto out; 3637 } 3638 3639 for (i = 0; i < data->n_params; i++) { 3640 param = kstrdup(data->params[i], GFP_KERNEL); 3641 if (!param) { 3642 ret = -ENOMEM; 3643 goto out; 3644 } 3645 3646 field_var = create_target_field_var(hist_data, NULL, NULL, param); 3647 if (IS_ERR(field_var)) { 3648 hist_err(tr, HIST_ERR_FIELD_VAR_CREATE_FAIL, 3649 errpos(param)); 3650 ret = PTR_ERR(field_var); 3651 kfree(param); 3652 goto out; 3653 } 3654 3655 hist_data->save_vars[hist_data->n_save_vars++] = field_var; 3656 if (field_var->val->flags & HIST_FIELD_FL_STRING) 3657 hist_data->n_save_var_str++; 3658 kfree(param); 3659 } 3660 } 3661 out: 3662 return ret; 3663 } 3664 3665 static int onmatch_create(struct hist_trigger_data *hist_data, 3666 struct action_data *data) 3667 { 3668 return action_create(hist_data, data); 3669 } 3670 3671 static struct action_data *onmatch_parse(struct trace_array *tr, char *str) 3672 { 3673 char *match_event, *match_event_system; 3674 struct action_data *data; 3675 int ret = -EINVAL; 3676 3677 data = kzalloc(sizeof(*data), GFP_KERNEL); 3678 if (!data) 3679 return ERR_PTR(-ENOMEM); 3680 3681 match_event = strsep(&str, ")"); 3682 if (!match_event || !str) { 3683 hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(match_event)); 3684 goto free; 3685 } 3686 3687 match_event_system = strsep(&match_event, "."); 3688 if (!match_event) { 3689 hist_err(tr, HIST_ERR_SUBSYS_NOT_FOUND, errpos(match_event_system)); 3690 goto free; 3691 } 3692 3693 if (IS_ERR(event_file(tr, match_event_system, match_event))) { 3694 hist_err(tr, HIST_ERR_INVALID_SUBSYS_EVENT, errpos(match_event)); 3695 goto free; 3696 } 3697 3698 data->match_data.event = kstrdup(match_event, GFP_KERNEL); 3699 if (!data->match_data.event) { 3700 ret = -ENOMEM; 3701 goto free; 3702 } 3703 3704 data->match_data.event_system = kstrdup(match_event_system, GFP_KERNEL); 3705 if (!data->match_data.event_system) { 3706 ret = -ENOMEM; 3707 goto free; 3708 } 3709 3710 ret = action_parse(tr, str, data, HANDLER_ONMATCH); 3711 if (ret) 3712 goto free; 3713 out: 3714 return data; 3715 free: 3716 onmatch_destroy(data); 3717 data = ERR_PTR(ret); 3718 goto out; 3719 } 3720 3721 static int create_hitcount_val(struct hist_trigger_data *hist_data) 3722 { 3723 hist_data->fields[HITCOUNT_IDX] = 3724 create_hist_field(hist_data, NULL, HIST_FIELD_FL_HITCOUNT, NULL); 3725 if (!hist_data->fields[HITCOUNT_IDX]) 3726 return -ENOMEM; 3727 3728 hist_data->n_vals++; 3729 hist_data->n_fields++; 3730 3731 if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX)) 3732 return -EINVAL; 3733 3734 return 0; 3735 } 3736 3737 static int __create_val_field(struct hist_trigger_data *hist_data, 3738 unsigned int val_idx, 3739 struct trace_event_file *file, 3740 char *var_name, char *field_str, 3741 unsigned long flags) 3742 { 3743 struct hist_field *hist_field; 3744 int ret = 0; 3745 3746 hist_field = parse_expr(hist_data, file, field_str, flags, var_name, 0); 3747 if (IS_ERR(hist_field)) { 3748 ret = PTR_ERR(hist_field); 3749 goto out; 3750 } 3751 3752 hist_data->fields[val_idx] = hist_field; 3753 3754 ++hist_data->n_vals; 3755 ++hist_data->n_fields; 3756 3757 if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX)) 3758 ret = -EINVAL; 3759 out: 3760 return ret; 3761 } 3762 3763 static int create_val_field(struct hist_trigger_data *hist_data, 3764 unsigned int val_idx, 3765 struct trace_event_file *file, 3766 char *field_str) 3767 { 3768 if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX)) 3769 return -EINVAL; 3770 3771 return __create_val_field(hist_data, val_idx, file, NULL, field_str, 0); 3772 } 3773 3774 static const char *no_comm = "(no comm)"; 3775 3776 static u64 hist_field_execname(struct hist_field *hist_field, 3777 struct tracing_map_elt *elt, 3778 struct trace_buffer *buffer, 3779 struct ring_buffer_event *rbe, 3780 void *event) 3781 { 3782 struct hist_elt_data *elt_data; 3783 3784 if (WARN_ON_ONCE(!elt)) 3785 return (u64)(unsigned long)no_comm; 3786 3787 elt_data = elt->private_data; 3788 3789 if (WARN_ON_ONCE(!elt_data->comm)) 3790 return (u64)(unsigned long)no_comm; 3791 3792 return (u64)(unsigned long)(elt_data->comm); 3793 } 3794 3795 /* Convert a var that points to common_pid.execname to a string */ 3796 static void update_var_execname(struct hist_field *hist_field) 3797 { 3798 hist_field->flags = HIST_FIELD_FL_STRING | HIST_FIELD_FL_VAR | 3799 HIST_FIELD_FL_EXECNAME; 3800 hist_field->size = MAX_FILTER_STR_VAL; 3801 hist_field->is_signed = 0; 3802 3803 kfree_const(hist_field->type); 3804 hist_field->type = "char[]"; 3805 3806 hist_field->fn = hist_field_execname; 3807 } 3808 3809 static int create_var_field(struct hist_trigger_data *hist_data, 3810 unsigned int val_idx, 3811 struct trace_event_file *file, 3812 char *var_name, char *expr_str) 3813 { 3814 struct trace_array *tr = hist_data->event_file->tr; 3815 unsigned long flags = 0; 3816 int ret; 3817 3818 if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX)) 3819 return -EINVAL; 3820 3821 if (find_var(hist_data, file, var_name) && !hist_data->remove) { 3822 hist_err(tr, HIST_ERR_DUPLICATE_VAR, errpos(var_name)); 3823 return -EINVAL; 3824 } 3825 3826 flags |= HIST_FIELD_FL_VAR; 3827 hist_data->n_vars++; 3828 if (WARN_ON(hist_data->n_vars > TRACING_MAP_VARS_MAX)) 3829 return -EINVAL; 3830 3831 ret = __create_val_field(hist_data, val_idx, file, var_name, expr_str, flags); 3832 3833 if (!ret && hist_data->fields[val_idx]->flags & HIST_FIELD_FL_EXECNAME) 3834 update_var_execname(hist_data->fields[val_idx]); 3835 3836 if (!ret && hist_data->fields[val_idx]->flags & HIST_FIELD_FL_STRING) 3837 hist_data->fields[val_idx]->var_str_idx = hist_data->n_var_str++; 3838 3839 return ret; 3840 } 3841 3842 static int create_val_fields(struct hist_trigger_data *hist_data, 3843 struct trace_event_file *file) 3844 { 3845 char *fields_str, *field_str; 3846 unsigned int i, j = 1; 3847 int ret; 3848 3849 ret = create_hitcount_val(hist_data); 3850 if (ret) 3851 goto out; 3852 3853 fields_str = hist_data->attrs->vals_str; 3854 if (!fields_str) 3855 goto out; 3856 3857 for (i = 0, j = 1; i < TRACING_MAP_VALS_MAX && 3858 j < TRACING_MAP_VALS_MAX; i++) { 3859 field_str = strsep(&fields_str, ","); 3860 if (!field_str) 3861 break; 3862 3863 if (strcmp(field_str, "hitcount") == 0) 3864 continue; 3865 3866 ret = create_val_field(hist_data, j++, file, field_str); 3867 if (ret) 3868 goto out; 3869 } 3870 3871 if (fields_str && (strcmp(fields_str, "hitcount") != 0)) 3872 ret = -EINVAL; 3873 out: 3874 return ret; 3875 } 3876 3877 static int create_key_field(struct hist_trigger_data *hist_data, 3878 unsigned int key_idx, 3879 unsigned int key_offset, 3880 struct trace_event_file *file, 3881 char *field_str) 3882 { 3883 struct trace_array *tr = hist_data->event_file->tr; 3884 struct hist_field *hist_field = NULL; 3885 unsigned long flags = 0; 3886 unsigned int key_size; 3887 int ret = 0; 3888 3889 if (WARN_ON(key_idx >= HIST_FIELDS_MAX)) 3890 return -EINVAL; 3891 3892 flags |= HIST_FIELD_FL_KEY; 3893 3894 if (strcmp(field_str, "stacktrace") == 0) { 3895 flags |= HIST_FIELD_FL_STACKTRACE; 3896 key_size = sizeof(unsigned long) * HIST_STACKTRACE_DEPTH; 3897 hist_field = create_hist_field(hist_data, NULL, flags, NULL); 3898 } else { 3899 hist_field = parse_expr(hist_data, file, field_str, flags, 3900 NULL, 0); 3901 if (IS_ERR(hist_field)) { 3902 ret = PTR_ERR(hist_field); 3903 goto out; 3904 } 3905 3906 if (field_has_hist_vars(hist_field, 0)) { 3907 hist_err(tr, HIST_ERR_INVALID_REF_KEY, errpos(field_str)); 3908 destroy_hist_field(hist_field, 0); 3909 ret = -EINVAL; 3910 goto out; 3911 } 3912 3913 key_size = hist_field->size; 3914 } 3915 3916 hist_data->fields[key_idx] = hist_field; 3917 3918 key_size = ALIGN(key_size, sizeof(u64)); 3919 hist_data->fields[key_idx]->size = key_size; 3920 hist_data->fields[key_idx]->offset = key_offset; 3921 3922 hist_data->key_size += key_size; 3923 3924 if (hist_data->key_size > HIST_KEY_SIZE_MAX) { 3925 ret = -EINVAL; 3926 goto out; 3927 } 3928 3929 hist_data->n_keys++; 3930 hist_data->n_fields++; 3931 3932 if (WARN_ON(hist_data->n_keys > TRACING_MAP_KEYS_MAX)) 3933 return -EINVAL; 3934 3935 ret = key_size; 3936 out: 3937 return ret; 3938 } 3939 3940 static int create_key_fields(struct hist_trigger_data *hist_data, 3941 struct trace_event_file *file) 3942 { 3943 unsigned int i, key_offset = 0, n_vals = hist_data->n_vals; 3944 char *fields_str, *field_str; 3945 int ret = -EINVAL; 3946 3947 fields_str = hist_data->attrs->keys_str; 3948 if (!fields_str) 3949 goto out; 3950 3951 for (i = n_vals; i < n_vals + TRACING_MAP_KEYS_MAX; i++) { 3952 field_str = strsep(&fields_str, ","); 3953 if (!field_str) 3954 break; 3955 ret = create_key_field(hist_data, i, key_offset, 3956 file, field_str); 3957 if (ret < 0) 3958 goto out; 3959 key_offset += ret; 3960 } 3961 if (fields_str) { 3962 ret = -EINVAL; 3963 goto out; 3964 } 3965 ret = 0; 3966 out: 3967 return ret; 3968 } 3969 3970 static int create_var_fields(struct hist_trigger_data *hist_data, 3971 struct trace_event_file *file) 3972 { 3973 unsigned int i, j = hist_data->n_vals; 3974 int ret = 0; 3975 3976 unsigned int n_vars = hist_data->attrs->var_defs.n_vars; 3977 3978 for (i = 0; i < n_vars; i++) { 3979 char *var_name = hist_data->attrs->var_defs.name[i]; 3980 char *expr = hist_data->attrs->var_defs.expr[i]; 3981 3982 ret = create_var_field(hist_data, j++, file, var_name, expr); 3983 if (ret) 3984 goto out; 3985 } 3986 out: 3987 return ret; 3988 } 3989 3990 static void free_var_defs(struct hist_trigger_data *hist_data) 3991 { 3992 unsigned int i; 3993 3994 for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) { 3995 kfree(hist_data->attrs->var_defs.name[i]); 3996 kfree(hist_data->attrs->var_defs.expr[i]); 3997 } 3998 3999 hist_data->attrs->var_defs.n_vars = 0; 4000 } 4001 4002 static int parse_var_defs(struct hist_trigger_data *hist_data) 4003 { 4004 struct trace_array *tr = hist_data->event_file->tr; 4005 char *s, *str, *var_name, *field_str; 4006 unsigned int i, j, n_vars = 0; 4007 int ret = 0; 4008 4009 for (i = 0; i < hist_data->attrs->n_assignments; i++) { 4010 str = hist_data->attrs->assignment_str[i]; 4011 for (j = 0; j < TRACING_MAP_VARS_MAX; j++) { 4012 field_str = strsep(&str, ","); 4013 if (!field_str) 4014 break; 4015 4016 var_name = strsep(&field_str, "="); 4017 if (!var_name || !field_str) { 4018 hist_err(tr, HIST_ERR_MALFORMED_ASSIGNMENT, 4019 errpos(var_name)); 4020 ret = -EINVAL; 4021 goto free; 4022 } 4023 4024 if (n_vars == TRACING_MAP_VARS_MAX) { 4025 hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(var_name)); 4026 ret = -EINVAL; 4027 goto free; 4028 } 4029 4030 s = kstrdup(var_name, GFP_KERNEL); 4031 if (!s) { 4032 ret = -ENOMEM; 4033 goto free; 4034 } 4035 hist_data->attrs->var_defs.name[n_vars] = s; 4036 4037 s = kstrdup(field_str, GFP_KERNEL); 4038 if (!s) { 4039 ret = -ENOMEM; 4040 goto free; 4041 } 4042 hist_data->attrs->var_defs.expr[n_vars++] = s; 4043 4044 hist_data->attrs->var_defs.n_vars = n_vars; 4045 } 4046 } 4047 4048 return ret; 4049 free: 4050 free_var_defs(hist_data); 4051 4052 return ret; 4053 } 4054 4055 static int create_hist_fields(struct hist_trigger_data *hist_data, 4056 struct trace_event_file *file) 4057 { 4058 int ret; 4059 4060 ret = parse_var_defs(hist_data); 4061 if (ret) 4062 goto out; 4063 4064 ret = create_val_fields(hist_data, file); 4065 if (ret) 4066 goto out; 4067 4068 ret = create_var_fields(hist_data, file); 4069 if (ret) 4070 goto out; 4071 4072 ret = create_key_fields(hist_data, file); 4073 if (ret) 4074 goto out; 4075 out: 4076 free_var_defs(hist_data); 4077 4078 return ret; 4079 } 4080 4081 static int is_descending(struct trace_array *tr, const char *str) 4082 { 4083 if (!str) 4084 return 0; 4085 4086 if (strcmp(str, "descending") == 0) 4087 return 1; 4088 4089 if (strcmp(str, "ascending") == 0) 4090 return 0; 4091 4092 hist_err(tr, HIST_ERR_INVALID_SORT_MODIFIER, errpos((char *)str)); 4093 4094 return -EINVAL; 4095 } 4096 4097 static int create_sort_keys(struct hist_trigger_data *hist_data) 4098 { 4099 struct trace_array *tr = hist_data->event_file->tr; 4100 char *fields_str = hist_data->attrs->sort_key_str; 4101 struct tracing_map_sort_key *sort_key; 4102 int descending, ret = 0; 4103 unsigned int i, j, k; 4104 4105 hist_data->n_sort_keys = 1; /* we always have at least one, hitcount */ 4106 4107 if (!fields_str) 4108 goto out; 4109 4110 for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) { 4111 struct hist_field *hist_field; 4112 char *field_str, *field_name; 4113 const char *test_name; 4114 4115 sort_key = &hist_data->sort_keys[i]; 4116 4117 field_str = strsep(&fields_str, ","); 4118 if (!field_str) 4119 break; 4120 4121 if (!*field_str) { 4122 ret = -EINVAL; 4123 hist_err(tr, HIST_ERR_EMPTY_SORT_FIELD, errpos("sort=")); 4124 break; 4125 } 4126 4127 if ((i == TRACING_MAP_SORT_KEYS_MAX - 1) && fields_str) { 4128 hist_err(tr, HIST_ERR_TOO_MANY_SORT_FIELDS, errpos("sort=")); 4129 ret = -EINVAL; 4130 break; 4131 } 4132 4133 field_name = strsep(&field_str, "."); 4134 if (!field_name || !*field_name) { 4135 ret = -EINVAL; 4136 hist_err(tr, HIST_ERR_EMPTY_SORT_FIELD, errpos("sort=")); 4137 break; 4138 } 4139 4140 if (strcmp(field_name, "hitcount") == 0) { 4141 descending = is_descending(tr, field_str); 4142 if (descending < 0) { 4143 ret = descending; 4144 break; 4145 } 4146 sort_key->descending = descending; 4147 continue; 4148 } 4149 4150 for (j = 1, k = 1; j < hist_data->n_fields; j++) { 4151 unsigned int idx; 4152 4153 hist_field = hist_data->fields[j]; 4154 if (hist_field->flags & HIST_FIELD_FL_VAR) 4155 continue; 4156 4157 idx = k++; 4158 4159 test_name = hist_field_name(hist_field, 0); 4160 4161 if (strcmp(field_name, test_name) == 0) { 4162 sort_key->field_idx = idx; 4163 descending = is_descending(tr, field_str); 4164 if (descending < 0) { 4165 ret = descending; 4166 goto out; 4167 } 4168 sort_key->descending = descending; 4169 break; 4170 } 4171 } 4172 if (j == hist_data->n_fields) { 4173 ret = -EINVAL; 4174 hist_err(tr, HIST_ERR_INVALID_SORT_FIELD, errpos(field_name)); 4175 break; 4176 } 4177 } 4178 4179 hist_data->n_sort_keys = i; 4180 out: 4181 return ret; 4182 } 4183 4184 static void destroy_actions(struct hist_trigger_data *hist_data) 4185 { 4186 unsigned int i; 4187 4188 for (i = 0; i < hist_data->n_actions; i++) { 4189 struct action_data *data = hist_data->actions[i]; 4190 4191 if (data->handler == HANDLER_ONMATCH) 4192 onmatch_destroy(data); 4193 else if (data->handler == HANDLER_ONMAX || 4194 data->handler == HANDLER_ONCHANGE) 4195 track_data_destroy(hist_data, data); 4196 else 4197 kfree(data); 4198 } 4199 } 4200 4201 static int parse_actions(struct hist_trigger_data *hist_data) 4202 { 4203 struct trace_array *tr = hist_data->event_file->tr; 4204 struct action_data *data; 4205 unsigned int i; 4206 int ret = 0; 4207 char *str; 4208 int len; 4209 4210 for (i = 0; i < hist_data->attrs->n_actions; i++) { 4211 str = hist_data->attrs->action_str[i]; 4212 4213 if ((len = str_has_prefix(str, "onmatch("))) { 4214 char *action_str = str + len; 4215 4216 data = onmatch_parse(tr, action_str); 4217 if (IS_ERR(data)) { 4218 ret = PTR_ERR(data); 4219 break; 4220 } 4221 } else if ((len = str_has_prefix(str, "onmax("))) { 4222 char *action_str = str + len; 4223 4224 data = track_data_parse(hist_data, action_str, 4225 HANDLER_ONMAX); 4226 if (IS_ERR(data)) { 4227 ret = PTR_ERR(data); 4228 break; 4229 } 4230 } else if ((len = str_has_prefix(str, "onchange("))) { 4231 char *action_str = str + len; 4232 4233 data = track_data_parse(hist_data, action_str, 4234 HANDLER_ONCHANGE); 4235 if (IS_ERR(data)) { 4236 ret = PTR_ERR(data); 4237 break; 4238 } 4239 } else { 4240 ret = -EINVAL; 4241 break; 4242 } 4243 4244 hist_data->actions[hist_data->n_actions++] = data; 4245 } 4246 4247 return ret; 4248 } 4249 4250 static int create_actions(struct hist_trigger_data *hist_data) 4251 { 4252 struct action_data *data; 4253 unsigned int i; 4254 int ret = 0; 4255 4256 for (i = 0; i < hist_data->attrs->n_actions; i++) { 4257 data = hist_data->actions[i]; 4258 4259 if (data->handler == HANDLER_ONMATCH) { 4260 ret = onmatch_create(hist_data, data); 4261 if (ret) 4262 break; 4263 } else if (data->handler == HANDLER_ONMAX || 4264 data->handler == HANDLER_ONCHANGE) { 4265 ret = track_data_create(hist_data, data); 4266 if (ret) 4267 break; 4268 } else { 4269 ret = -EINVAL; 4270 break; 4271 } 4272 } 4273 4274 return ret; 4275 } 4276 4277 static void print_actions(struct seq_file *m, 4278 struct hist_trigger_data *hist_data, 4279 struct tracing_map_elt *elt) 4280 { 4281 unsigned int i; 4282 4283 for (i = 0; i < hist_data->n_actions; i++) { 4284 struct action_data *data = hist_data->actions[i]; 4285 4286 if (data->action == ACTION_SNAPSHOT) 4287 continue; 4288 4289 if (data->handler == HANDLER_ONMAX || 4290 data->handler == HANDLER_ONCHANGE) 4291 track_data_print(m, hist_data, elt, data); 4292 } 4293 } 4294 4295 static void print_action_spec(struct seq_file *m, 4296 struct hist_trigger_data *hist_data, 4297 struct action_data *data) 4298 { 4299 unsigned int i; 4300 4301 if (data->action == ACTION_SAVE) { 4302 for (i = 0; i < hist_data->n_save_vars; i++) { 4303 seq_printf(m, "%s", hist_data->save_vars[i]->var->var.name); 4304 if (i < hist_data->n_save_vars - 1) 4305 seq_puts(m, ","); 4306 } 4307 } else if (data->action == ACTION_TRACE) { 4308 if (data->use_trace_keyword) 4309 seq_printf(m, "%s", data->synth_event_name); 4310 for (i = 0; i < data->n_params; i++) { 4311 if (i || data->use_trace_keyword) 4312 seq_puts(m, ","); 4313 seq_printf(m, "%s", data->params[i]); 4314 } 4315 } 4316 } 4317 4318 static void print_track_data_spec(struct seq_file *m, 4319 struct hist_trigger_data *hist_data, 4320 struct action_data *data) 4321 { 4322 if (data->handler == HANDLER_ONMAX) 4323 seq_puts(m, ":onmax("); 4324 else if (data->handler == HANDLER_ONCHANGE) 4325 seq_puts(m, ":onchange("); 4326 seq_printf(m, "%s", data->track_data.var_str); 4327 seq_printf(m, ").%s(", data->action_name); 4328 4329 print_action_spec(m, hist_data, data); 4330 4331 seq_puts(m, ")"); 4332 } 4333 4334 static void print_onmatch_spec(struct seq_file *m, 4335 struct hist_trigger_data *hist_data, 4336 struct action_data *data) 4337 { 4338 seq_printf(m, ":onmatch(%s.%s).", data->match_data.event_system, 4339 data->match_data.event); 4340 4341 seq_printf(m, "%s(", data->action_name); 4342 4343 print_action_spec(m, hist_data, data); 4344 4345 seq_puts(m, ")"); 4346 } 4347 4348 static bool actions_match(struct hist_trigger_data *hist_data, 4349 struct hist_trigger_data *hist_data_test) 4350 { 4351 unsigned int i, j; 4352 4353 if (hist_data->n_actions != hist_data_test->n_actions) 4354 return false; 4355 4356 for (i = 0; i < hist_data->n_actions; i++) { 4357 struct action_data *data = hist_data->actions[i]; 4358 struct action_data *data_test = hist_data_test->actions[i]; 4359 char *action_name, *action_name_test; 4360 4361 if (data->handler != data_test->handler) 4362 return false; 4363 if (data->action != data_test->action) 4364 return false; 4365 4366 if (data->n_params != data_test->n_params) 4367 return false; 4368 4369 for (j = 0; j < data->n_params; j++) { 4370 if (strcmp(data->params[j], data_test->params[j]) != 0) 4371 return false; 4372 } 4373 4374 if (data->use_trace_keyword) 4375 action_name = data->synth_event_name; 4376 else 4377 action_name = data->action_name; 4378 4379 if (data_test->use_trace_keyword) 4380 action_name_test = data_test->synth_event_name; 4381 else 4382 action_name_test = data_test->action_name; 4383 4384 if (strcmp(action_name, action_name_test) != 0) 4385 return false; 4386 4387 if (data->handler == HANDLER_ONMATCH) { 4388 if (strcmp(data->match_data.event_system, 4389 data_test->match_data.event_system) != 0) 4390 return false; 4391 if (strcmp(data->match_data.event, 4392 data_test->match_data.event) != 0) 4393 return false; 4394 } else if (data->handler == HANDLER_ONMAX || 4395 data->handler == HANDLER_ONCHANGE) { 4396 if (strcmp(data->track_data.var_str, 4397 data_test->track_data.var_str) != 0) 4398 return false; 4399 } 4400 } 4401 4402 return true; 4403 } 4404 4405 4406 static void print_actions_spec(struct seq_file *m, 4407 struct hist_trigger_data *hist_data) 4408 { 4409 unsigned int i; 4410 4411 for (i = 0; i < hist_data->n_actions; i++) { 4412 struct action_data *data = hist_data->actions[i]; 4413 4414 if (data->handler == HANDLER_ONMATCH) 4415 print_onmatch_spec(m, hist_data, data); 4416 else if (data->handler == HANDLER_ONMAX || 4417 data->handler == HANDLER_ONCHANGE) 4418 print_track_data_spec(m, hist_data, data); 4419 } 4420 } 4421 4422 static void destroy_field_var_hists(struct hist_trigger_data *hist_data) 4423 { 4424 unsigned int i; 4425 4426 for (i = 0; i < hist_data->n_field_var_hists; i++) { 4427 kfree(hist_data->field_var_hists[i]->cmd); 4428 kfree(hist_data->field_var_hists[i]); 4429 } 4430 } 4431 4432 static void destroy_hist_data(struct hist_trigger_data *hist_data) 4433 { 4434 if (!hist_data) 4435 return; 4436 4437 destroy_hist_trigger_attrs(hist_data->attrs); 4438 destroy_hist_fields(hist_data); 4439 tracing_map_destroy(hist_data->map); 4440 4441 destroy_actions(hist_data); 4442 destroy_field_vars(hist_data); 4443 destroy_field_var_hists(hist_data); 4444 4445 kfree(hist_data); 4446 } 4447 4448 static int create_tracing_map_fields(struct hist_trigger_data *hist_data) 4449 { 4450 struct tracing_map *map = hist_data->map; 4451 struct ftrace_event_field *field; 4452 struct hist_field *hist_field; 4453 int i, idx = 0; 4454 4455 for_each_hist_field(i, hist_data) { 4456 hist_field = hist_data->fields[i]; 4457 if (hist_field->flags & HIST_FIELD_FL_KEY) { 4458 tracing_map_cmp_fn_t cmp_fn; 4459 4460 field = hist_field->field; 4461 4462 if (hist_field->flags & HIST_FIELD_FL_STACKTRACE) 4463 cmp_fn = tracing_map_cmp_none; 4464 else if (!field) 4465 cmp_fn = tracing_map_cmp_num(hist_field->size, 4466 hist_field->is_signed); 4467 else if (is_string_field(field)) 4468 cmp_fn = tracing_map_cmp_string; 4469 else 4470 cmp_fn = tracing_map_cmp_num(field->size, 4471 field->is_signed); 4472 idx = tracing_map_add_key_field(map, 4473 hist_field->offset, 4474 cmp_fn); 4475 } else if (!(hist_field->flags & HIST_FIELD_FL_VAR)) 4476 idx = tracing_map_add_sum_field(map); 4477 4478 if (idx < 0) 4479 return idx; 4480 4481 if (hist_field->flags & HIST_FIELD_FL_VAR) { 4482 idx = tracing_map_add_var(map); 4483 if (idx < 0) 4484 return idx; 4485 hist_field->var.idx = idx; 4486 hist_field->var.hist_data = hist_data; 4487 } 4488 } 4489 4490 return 0; 4491 } 4492 4493 static struct hist_trigger_data * 4494 create_hist_data(unsigned int map_bits, 4495 struct hist_trigger_attrs *attrs, 4496 struct trace_event_file *file, 4497 bool remove) 4498 { 4499 const struct tracing_map_ops *map_ops = NULL; 4500 struct hist_trigger_data *hist_data; 4501 int ret = 0; 4502 4503 hist_data = kzalloc(sizeof(*hist_data), GFP_KERNEL); 4504 if (!hist_data) 4505 return ERR_PTR(-ENOMEM); 4506 4507 hist_data->attrs = attrs; 4508 hist_data->remove = remove; 4509 hist_data->event_file = file; 4510 4511 ret = parse_actions(hist_data); 4512 if (ret) 4513 goto free; 4514 4515 ret = create_hist_fields(hist_data, file); 4516 if (ret) 4517 goto free; 4518 4519 ret = create_sort_keys(hist_data); 4520 if (ret) 4521 goto free; 4522 4523 map_ops = &hist_trigger_elt_data_ops; 4524 4525 hist_data->map = tracing_map_create(map_bits, hist_data->key_size, 4526 map_ops, hist_data); 4527 if (IS_ERR(hist_data->map)) { 4528 ret = PTR_ERR(hist_data->map); 4529 hist_data->map = NULL; 4530 goto free; 4531 } 4532 4533 ret = create_tracing_map_fields(hist_data); 4534 if (ret) 4535 goto free; 4536 out: 4537 return hist_data; 4538 free: 4539 hist_data->attrs = NULL; 4540 4541 destroy_hist_data(hist_data); 4542 4543 hist_data = ERR_PTR(ret); 4544 4545 goto out; 4546 } 4547 4548 static void hist_trigger_elt_update(struct hist_trigger_data *hist_data, 4549 struct tracing_map_elt *elt, 4550 struct trace_buffer *buffer, void *rec, 4551 struct ring_buffer_event *rbe, 4552 u64 *var_ref_vals) 4553 { 4554 struct hist_elt_data *elt_data; 4555 struct hist_field *hist_field; 4556 unsigned int i, var_idx; 4557 u64 hist_val; 4558 4559 elt_data = elt->private_data; 4560 elt_data->var_ref_vals = var_ref_vals; 4561 4562 for_each_hist_val_field(i, hist_data) { 4563 hist_field = hist_data->fields[i]; 4564 hist_val = hist_field->fn(hist_field, elt, buffer, rbe, rec); 4565 if (hist_field->flags & HIST_FIELD_FL_VAR) { 4566 var_idx = hist_field->var.idx; 4567 4568 if (hist_field->flags & HIST_FIELD_FL_STRING) { 4569 unsigned int str_start, var_str_idx, idx; 4570 char *str, *val_str; 4571 4572 str_start = hist_data->n_field_var_str + 4573 hist_data->n_save_var_str; 4574 var_str_idx = hist_field->var_str_idx; 4575 idx = str_start + var_str_idx; 4576 4577 str = elt_data->field_var_str[idx]; 4578 val_str = (char *)(uintptr_t)hist_val; 4579 strscpy(str, val_str, STR_VAR_LEN_MAX); 4580 4581 hist_val = (u64)(uintptr_t)str; 4582 } 4583 tracing_map_set_var(elt, var_idx, hist_val); 4584 continue; 4585 } 4586 tracing_map_update_sum(elt, i, hist_val); 4587 } 4588 4589 for_each_hist_key_field(i, hist_data) { 4590 hist_field = hist_data->fields[i]; 4591 if (hist_field->flags & HIST_FIELD_FL_VAR) { 4592 hist_val = hist_field->fn(hist_field, elt, buffer, rbe, rec); 4593 var_idx = hist_field->var.idx; 4594 tracing_map_set_var(elt, var_idx, hist_val); 4595 } 4596 } 4597 4598 update_field_vars(hist_data, elt, buffer, rbe, rec); 4599 } 4600 4601 static inline void add_to_key(char *compound_key, void *key, 4602 struct hist_field *key_field, void *rec) 4603 { 4604 size_t size = key_field->size; 4605 4606 if (key_field->flags & HIST_FIELD_FL_STRING) { 4607 struct ftrace_event_field *field; 4608 4609 field = key_field->field; 4610 if (field->filter_type == FILTER_DYN_STRING) 4611 size = *(u32 *)(rec + field->offset) >> 16; 4612 else if (field->filter_type == FILTER_STATIC_STRING) 4613 size = field->size; 4614 4615 /* ensure NULL-termination */ 4616 if (size > key_field->size - 1) 4617 size = key_field->size - 1; 4618 4619 strncpy(compound_key + key_field->offset, (char *)key, size); 4620 } else 4621 memcpy(compound_key + key_field->offset, key, size); 4622 } 4623 4624 static void 4625 hist_trigger_actions(struct hist_trigger_data *hist_data, 4626 struct tracing_map_elt *elt, 4627 struct trace_buffer *buffer, void *rec, 4628 struct ring_buffer_event *rbe, void *key, 4629 u64 *var_ref_vals) 4630 { 4631 struct action_data *data; 4632 unsigned int i; 4633 4634 for (i = 0; i < hist_data->n_actions; i++) { 4635 data = hist_data->actions[i]; 4636 data->fn(hist_data, elt, buffer, rec, rbe, key, data, var_ref_vals); 4637 } 4638 } 4639 4640 static void event_hist_trigger(struct event_trigger_data *data, 4641 struct trace_buffer *buffer, void *rec, 4642 struct ring_buffer_event *rbe) 4643 { 4644 struct hist_trigger_data *hist_data = data->private_data; 4645 bool use_compound_key = (hist_data->n_keys > 1); 4646 unsigned long entries[HIST_STACKTRACE_DEPTH]; 4647 u64 var_ref_vals[TRACING_MAP_VARS_MAX]; 4648 char compound_key[HIST_KEY_SIZE_MAX]; 4649 struct tracing_map_elt *elt = NULL; 4650 struct hist_field *key_field; 4651 u64 field_contents; 4652 void *key = NULL; 4653 unsigned int i; 4654 4655 memset(compound_key, 0, hist_data->key_size); 4656 4657 for_each_hist_key_field(i, hist_data) { 4658 key_field = hist_data->fields[i]; 4659 4660 if (key_field->flags & HIST_FIELD_FL_STACKTRACE) { 4661 memset(entries, 0, HIST_STACKTRACE_SIZE); 4662 stack_trace_save(entries, HIST_STACKTRACE_DEPTH, 4663 HIST_STACKTRACE_SKIP); 4664 key = entries; 4665 } else { 4666 field_contents = key_field->fn(key_field, elt, buffer, rbe, rec); 4667 if (key_field->flags & HIST_FIELD_FL_STRING) { 4668 key = (void *)(unsigned long)field_contents; 4669 use_compound_key = true; 4670 } else 4671 key = (void *)&field_contents; 4672 } 4673 4674 if (use_compound_key) 4675 add_to_key(compound_key, key, key_field, rec); 4676 } 4677 4678 if (use_compound_key) 4679 key = compound_key; 4680 4681 if (hist_data->n_var_refs && 4682 !resolve_var_refs(hist_data, key, var_ref_vals, false)) 4683 return; 4684 4685 elt = tracing_map_insert(hist_data->map, key); 4686 if (!elt) 4687 return; 4688 4689 hist_trigger_elt_update(hist_data, elt, buffer, rec, rbe, var_ref_vals); 4690 4691 if (resolve_var_refs(hist_data, key, var_ref_vals, true)) 4692 hist_trigger_actions(hist_data, elt, buffer, rec, rbe, key, var_ref_vals); 4693 } 4694 4695 static void hist_trigger_stacktrace_print(struct seq_file *m, 4696 unsigned long *stacktrace_entries, 4697 unsigned int max_entries) 4698 { 4699 char str[KSYM_SYMBOL_LEN]; 4700 unsigned int spaces = 8; 4701 unsigned int i; 4702 4703 for (i = 0; i < max_entries; i++) { 4704 if (!stacktrace_entries[i]) 4705 return; 4706 4707 seq_printf(m, "%*c", 1 + spaces, ' '); 4708 sprint_symbol(str, stacktrace_entries[i]); 4709 seq_printf(m, "%s\n", str); 4710 } 4711 } 4712 4713 static void hist_trigger_print_key(struct seq_file *m, 4714 struct hist_trigger_data *hist_data, 4715 void *key, 4716 struct tracing_map_elt *elt) 4717 { 4718 struct hist_field *key_field; 4719 char str[KSYM_SYMBOL_LEN]; 4720 bool multiline = false; 4721 const char *field_name; 4722 unsigned int i; 4723 u64 uval; 4724 4725 seq_puts(m, "{ "); 4726 4727 for_each_hist_key_field(i, hist_data) { 4728 key_field = hist_data->fields[i]; 4729 4730 if (i > hist_data->n_vals) 4731 seq_puts(m, ", "); 4732 4733 field_name = hist_field_name(key_field, 0); 4734 4735 if (key_field->flags & HIST_FIELD_FL_HEX) { 4736 uval = *(u64 *)(key + key_field->offset); 4737 seq_printf(m, "%s: %llx", field_name, uval); 4738 } else if (key_field->flags & HIST_FIELD_FL_SYM) { 4739 uval = *(u64 *)(key + key_field->offset); 4740 sprint_symbol_no_offset(str, uval); 4741 seq_printf(m, "%s: [%llx] %-45s", field_name, 4742 uval, str); 4743 } else if (key_field->flags & HIST_FIELD_FL_SYM_OFFSET) { 4744 uval = *(u64 *)(key + key_field->offset); 4745 sprint_symbol(str, uval); 4746 seq_printf(m, "%s: [%llx] %-55s", field_name, 4747 uval, str); 4748 } else if (key_field->flags & HIST_FIELD_FL_EXECNAME) { 4749 struct hist_elt_data *elt_data = elt->private_data; 4750 char *comm; 4751 4752 if (WARN_ON_ONCE(!elt_data)) 4753 return; 4754 4755 comm = elt_data->comm; 4756 4757 uval = *(u64 *)(key + key_field->offset); 4758 seq_printf(m, "%s: %-16s[%10llu]", field_name, 4759 comm, uval); 4760 } else if (key_field->flags & HIST_FIELD_FL_SYSCALL) { 4761 const char *syscall_name; 4762 4763 uval = *(u64 *)(key + key_field->offset); 4764 syscall_name = get_syscall_name(uval); 4765 if (!syscall_name) 4766 syscall_name = "unknown_syscall"; 4767 4768 seq_printf(m, "%s: %-30s[%3llu]", field_name, 4769 syscall_name, uval); 4770 } else if (key_field->flags & HIST_FIELD_FL_STACKTRACE) { 4771 seq_puts(m, "stacktrace:\n"); 4772 hist_trigger_stacktrace_print(m, 4773 key + key_field->offset, 4774 HIST_STACKTRACE_DEPTH); 4775 multiline = true; 4776 } else if (key_field->flags & HIST_FIELD_FL_LOG2) { 4777 seq_printf(m, "%s: ~ 2^%-2llu", field_name, 4778 *(u64 *)(key + key_field->offset)); 4779 } else if (key_field->flags & HIST_FIELD_FL_BUCKET) { 4780 unsigned long buckets = key_field->buckets; 4781 uval = *(u64 *)(key + key_field->offset); 4782 seq_printf(m, "%s: ~ %llu-%llu", field_name, 4783 uval, uval + buckets -1); 4784 } else if (key_field->flags & HIST_FIELD_FL_STRING) { 4785 seq_printf(m, "%s: %-50s", field_name, 4786 (char *)(key + key_field->offset)); 4787 } else { 4788 uval = *(u64 *)(key + key_field->offset); 4789 seq_printf(m, "%s: %10llu", field_name, uval); 4790 } 4791 } 4792 4793 if (!multiline) 4794 seq_puts(m, " "); 4795 4796 seq_puts(m, "}"); 4797 } 4798 4799 static void hist_trigger_entry_print(struct seq_file *m, 4800 struct hist_trigger_data *hist_data, 4801 void *key, 4802 struct tracing_map_elt *elt) 4803 { 4804 const char *field_name; 4805 unsigned int i; 4806 4807 hist_trigger_print_key(m, hist_data, key, elt); 4808 4809 seq_printf(m, " hitcount: %10llu", 4810 tracing_map_read_sum(elt, HITCOUNT_IDX)); 4811 4812 for (i = 1; i < hist_data->n_vals; i++) { 4813 field_name = hist_field_name(hist_data->fields[i], 0); 4814 4815 if (hist_data->fields[i]->flags & HIST_FIELD_FL_VAR || 4816 hist_data->fields[i]->flags & HIST_FIELD_FL_EXPR) 4817 continue; 4818 4819 if (hist_data->fields[i]->flags & HIST_FIELD_FL_HEX) { 4820 seq_printf(m, " %s: %10llx", field_name, 4821 tracing_map_read_sum(elt, i)); 4822 } else { 4823 seq_printf(m, " %s: %10llu", field_name, 4824 tracing_map_read_sum(elt, i)); 4825 } 4826 } 4827 4828 print_actions(m, hist_data, elt); 4829 4830 seq_puts(m, "\n"); 4831 } 4832 4833 static int print_entries(struct seq_file *m, 4834 struct hist_trigger_data *hist_data) 4835 { 4836 struct tracing_map_sort_entry **sort_entries = NULL; 4837 struct tracing_map *map = hist_data->map; 4838 int i, n_entries; 4839 4840 n_entries = tracing_map_sort_entries(map, hist_data->sort_keys, 4841 hist_data->n_sort_keys, 4842 &sort_entries); 4843 if (n_entries < 0) 4844 return n_entries; 4845 4846 for (i = 0; i < n_entries; i++) 4847 hist_trigger_entry_print(m, hist_data, 4848 sort_entries[i]->key, 4849 sort_entries[i]->elt); 4850 4851 tracing_map_destroy_sort_entries(sort_entries, n_entries); 4852 4853 return n_entries; 4854 } 4855 4856 static void hist_trigger_show(struct seq_file *m, 4857 struct event_trigger_data *data, int n) 4858 { 4859 struct hist_trigger_data *hist_data; 4860 int n_entries; 4861 4862 if (n > 0) 4863 seq_puts(m, "\n\n"); 4864 4865 seq_puts(m, "# event histogram\n#\n# trigger info: "); 4866 data->ops->print(m, data->ops, data); 4867 seq_puts(m, "#\n\n"); 4868 4869 hist_data = data->private_data; 4870 n_entries = print_entries(m, hist_data); 4871 if (n_entries < 0) 4872 n_entries = 0; 4873 4874 track_data_snapshot_print(m, hist_data); 4875 4876 seq_printf(m, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n", 4877 (u64)atomic64_read(&hist_data->map->hits), 4878 n_entries, (u64)atomic64_read(&hist_data->map->drops)); 4879 } 4880 4881 static int hist_show(struct seq_file *m, void *v) 4882 { 4883 struct event_trigger_data *data; 4884 struct trace_event_file *event_file; 4885 int n = 0, ret = 0; 4886 4887 mutex_lock(&event_mutex); 4888 4889 event_file = event_file_data(m->private); 4890 if (unlikely(!event_file)) { 4891 ret = -ENODEV; 4892 goto out_unlock; 4893 } 4894 4895 list_for_each_entry(data, &event_file->triggers, list) { 4896 if (data->cmd_ops->trigger_type == ETT_EVENT_HIST) 4897 hist_trigger_show(m, data, n++); 4898 } 4899 4900 out_unlock: 4901 mutex_unlock(&event_mutex); 4902 4903 return ret; 4904 } 4905 4906 static int event_hist_open(struct inode *inode, struct file *file) 4907 { 4908 int ret; 4909 4910 ret = security_locked_down(LOCKDOWN_TRACEFS); 4911 if (ret) 4912 return ret; 4913 4914 return single_open(file, hist_show, file); 4915 } 4916 4917 const struct file_operations event_hist_fops = { 4918 .open = event_hist_open, 4919 .read = seq_read, 4920 .llseek = seq_lseek, 4921 .release = single_release, 4922 }; 4923 4924 #ifdef CONFIG_HIST_TRIGGERS_DEBUG 4925 static void hist_field_debug_show_flags(struct seq_file *m, 4926 unsigned long flags) 4927 { 4928 seq_puts(m, " flags:\n"); 4929 4930 if (flags & HIST_FIELD_FL_KEY) 4931 seq_puts(m, " HIST_FIELD_FL_KEY\n"); 4932 else if (flags & HIST_FIELD_FL_HITCOUNT) 4933 seq_puts(m, " VAL: HIST_FIELD_FL_HITCOUNT\n"); 4934 else if (flags & HIST_FIELD_FL_VAR) 4935 seq_puts(m, " HIST_FIELD_FL_VAR\n"); 4936 else if (flags & HIST_FIELD_FL_VAR_REF) 4937 seq_puts(m, " HIST_FIELD_FL_VAR_REF\n"); 4938 else 4939 seq_puts(m, " VAL: normal u64 value\n"); 4940 4941 if (flags & HIST_FIELD_FL_ALIAS) 4942 seq_puts(m, " HIST_FIELD_FL_ALIAS\n"); 4943 } 4944 4945 static int hist_field_debug_show(struct seq_file *m, 4946 struct hist_field *field, unsigned long flags) 4947 { 4948 if ((field->flags & flags) != flags) { 4949 seq_printf(m, "ERROR: bad flags - %lx\n", flags); 4950 return -EINVAL; 4951 } 4952 4953 hist_field_debug_show_flags(m, field->flags); 4954 if (field->field) 4955 seq_printf(m, " ftrace_event_field name: %s\n", 4956 field->field->name); 4957 4958 if (field->flags & HIST_FIELD_FL_VAR) { 4959 seq_printf(m, " var.name: %s\n", field->var.name); 4960 seq_printf(m, " var.idx (into tracing_map_elt.vars[]): %u\n", 4961 field->var.idx); 4962 } 4963 4964 if (field->flags & HIST_FIELD_FL_ALIAS) 4965 seq_printf(m, " var_ref_idx (into hist_data->var_refs[]): %u\n", 4966 field->var_ref_idx); 4967 4968 if (field->flags & HIST_FIELD_FL_VAR_REF) { 4969 seq_printf(m, " name: %s\n", field->name); 4970 seq_printf(m, " var.idx (into tracing_map_elt.vars[]): %u\n", 4971 field->var.idx); 4972 seq_printf(m, " var.hist_data: %p\n", field->var.hist_data); 4973 seq_printf(m, " var_ref_idx (into hist_data->var_refs[]): %u\n", 4974 field->var_ref_idx); 4975 if (field->system) 4976 seq_printf(m, " system: %s\n", field->system); 4977 if (field->event_name) 4978 seq_printf(m, " event_name: %s\n", field->event_name); 4979 } 4980 4981 seq_printf(m, " type: %s\n", field->type); 4982 seq_printf(m, " size: %u\n", field->size); 4983 seq_printf(m, " is_signed: %u\n", field->is_signed); 4984 4985 return 0; 4986 } 4987 4988 static int field_var_debug_show(struct seq_file *m, 4989 struct field_var *field_var, unsigned int i, 4990 bool save_vars) 4991 { 4992 const char *vars_name = save_vars ? "save_vars" : "field_vars"; 4993 struct hist_field *field; 4994 int ret = 0; 4995 4996 seq_printf(m, "\n hist_data->%s[%d]:\n", vars_name, i); 4997 4998 field = field_var->var; 4999 5000 seq_printf(m, "\n %s[%d].var:\n", vars_name, i); 5001 5002 hist_field_debug_show_flags(m, field->flags); 5003 seq_printf(m, " var.name: %s\n", field->var.name); 5004 seq_printf(m, " var.idx (into tracing_map_elt.vars[]): %u\n", 5005 field->var.idx); 5006 5007 field = field_var->val; 5008 5009 seq_printf(m, "\n %s[%d].val:\n", vars_name, i); 5010 if (field->field) 5011 seq_printf(m, " ftrace_event_field name: %s\n", 5012 field->field->name); 5013 else { 5014 ret = -EINVAL; 5015 goto out; 5016 } 5017 5018 seq_printf(m, " type: %s\n", field->type); 5019 seq_printf(m, " size: %u\n", field->size); 5020 seq_printf(m, " is_signed: %u\n", field->is_signed); 5021 out: 5022 return ret; 5023 } 5024 5025 static int hist_action_debug_show(struct seq_file *m, 5026 struct action_data *data, int i) 5027 { 5028 int ret = 0; 5029 5030 if (data->handler == HANDLER_ONMAX || 5031 data->handler == HANDLER_ONCHANGE) { 5032 seq_printf(m, "\n hist_data->actions[%d].track_data.var_ref:\n", i); 5033 ret = hist_field_debug_show(m, data->track_data.var_ref, 5034 HIST_FIELD_FL_VAR_REF); 5035 if (ret) 5036 goto out; 5037 5038 seq_printf(m, "\n hist_data->actions[%d].track_data.track_var:\n", i); 5039 ret = hist_field_debug_show(m, data->track_data.track_var, 5040 HIST_FIELD_FL_VAR); 5041 if (ret) 5042 goto out; 5043 } 5044 5045 if (data->handler == HANDLER_ONMATCH) { 5046 seq_printf(m, "\n hist_data->actions[%d].match_data.event_system: %s\n", 5047 i, data->match_data.event_system); 5048 seq_printf(m, " hist_data->actions[%d].match_data.event: %s\n", 5049 i, data->match_data.event); 5050 } 5051 out: 5052 return ret; 5053 } 5054 5055 static int hist_actions_debug_show(struct seq_file *m, 5056 struct hist_trigger_data *hist_data) 5057 { 5058 int i, ret = 0; 5059 5060 if (hist_data->n_actions) 5061 seq_puts(m, "\n action tracking variables (for onmax()/onchange()/onmatch()):\n"); 5062 5063 for (i = 0; i < hist_data->n_actions; i++) { 5064 struct action_data *action = hist_data->actions[i]; 5065 5066 ret = hist_action_debug_show(m, action, i); 5067 if (ret) 5068 goto out; 5069 } 5070 5071 if (hist_data->n_save_vars) 5072 seq_puts(m, "\n save action variables (save() params):\n"); 5073 5074 for (i = 0; i < hist_data->n_save_vars; i++) { 5075 ret = field_var_debug_show(m, hist_data->save_vars[i], i, true); 5076 if (ret) 5077 goto out; 5078 } 5079 out: 5080 return ret; 5081 } 5082 5083 static void hist_trigger_debug_show(struct seq_file *m, 5084 struct event_trigger_data *data, int n) 5085 { 5086 struct hist_trigger_data *hist_data; 5087 int i, ret; 5088 5089 if (n > 0) 5090 seq_puts(m, "\n\n"); 5091 5092 seq_puts(m, "# event histogram\n#\n# trigger info: "); 5093 data->ops->print(m, data->ops, data); 5094 seq_puts(m, "#\n\n"); 5095 5096 hist_data = data->private_data; 5097 5098 seq_printf(m, "hist_data: %p\n\n", hist_data); 5099 seq_printf(m, " n_vals: %u\n", hist_data->n_vals); 5100 seq_printf(m, " n_keys: %u\n", hist_data->n_keys); 5101 seq_printf(m, " n_fields: %u\n", hist_data->n_fields); 5102 5103 seq_puts(m, "\n val fields:\n\n"); 5104 5105 seq_puts(m, " hist_data->fields[0]:\n"); 5106 ret = hist_field_debug_show(m, hist_data->fields[0], 5107 HIST_FIELD_FL_HITCOUNT); 5108 if (ret) 5109 return; 5110 5111 for (i = 1; i < hist_data->n_vals; i++) { 5112 seq_printf(m, "\n hist_data->fields[%d]:\n", i); 5113 ret = hist_field_debug_show(m, hist_data->fields[i], 0); 5114 if (ret) 5115 return; 5116 } 5117 5118 seq_puts(m, "\n key fields:\n"); 5119 5120 for (i = hist_data->n_vals; i < hist_data->n_fields; i++) { 5121 seq_printf(m, "\n hist_data->fields[%d]:\n", i); 5122 ret = hist_field_debug_show(m, hist_data->fields[i], 5123 HIST_FIELD_FL_KEY); 5124 if (ret) 5125 return; 5126 } 5127 5128 if (hist_data->n_var_refs) 5129 seq_puts(m, "\n variable reference fields:\n"); 5130 5131 for (i = 0; i < hist_data->n_var_refs; i++) { 5132 seq_printf(m, "\n hist_data->var_refs[%d]:\n", i); 5133 ret = hist_field_debug_show(m, hist_data->var_refs[i], 5134 HIST_FIELD_FL_VAR_REF); 5135 if (ret) 5136 return; 5137 } 5138 5139 if (hist_data->n_field_vars) 5140 seq_puts(m, "\n field variables:\n"); 5141 5142 for (i = 0; i < hist_data->n_field_vars; i++) { 5143 ret = field_var_debug_show(m, hist_data->field_vars[i], i, false); 5144 if (ret) 5145 return; 5146 } 5147 5148 ret = hist_actions_debug_show(m, hist_data); 5149 if (ret) 5150 return; 5151 } 5152 5153 static int hist_debug_show(struct seq_file *m, void *v) 5154 { 5155 struct event_trigger_data *data; 5156 struct trace_event_file *event_file; 5157 int n = 0, ret = 0; 5158 5159 mutex_lock(&event_mutex); 5160 5161 event_file = event_file_data(m->private); 5162 if (unlikely(!event_file)) { 5163 ret = -ENODEV; 5164 goto out_unlock; 5165 } 5166 5167 list_for_each_entry(data, &event_file->triggers, list) { 5168 if (data->cmd_ops->trigger_type == ETT_EVENT_HIST) 5169 hist_trigger_debug_show(m, data, n++); 5170 } 5171 5172 out_unlock: 5173 mutex_unlock(&event_mutex); 5174 5175 return ret; 5176 } 5177 5178 static int event_hist_debug_open(struct inode *inode, struct file *file) 5179 { 5180 int ret; 5181 5182 ret = security_locked_down(LOCKDOWN_TRACEFS); 5183 if (ret) 5184 return ret; 5185 5186 return single_open(file, hist_debug_show, file); 5187 } 5188 5189 const struct file_operations event_hist_debug_fops = { 5190 .open = event_hist_debug_open, 5191 .read = seq_read, 5192 .llseek = seq_lseek, 5193 .release = single_release, 5194 }; 5195 #endif 5196 5197 static void hist_field_print(struct seq_file *m, struct hist_field *hist_field) 5198 { 5199 const char *field_name = hist_field_name(hist_field, 0); 5200 5201 if (hist_field->var.name) 5202 seq_printf(m, "%s=", hist_field->var.name); 5203 5204 if (hist_field->flags & HIST_FIELD_FL_CPU) 5205 seq_puts(m, "common_cpu"); 5206 else if (field_name) { 5207 if (hist_field->flags & HIST_FIELD_FL_VAR_REF || 5208 hist_field->flags & HIST_FIELD_FL_ALIAS) 5209 seq_putc(m, '$'); 5210 seq_printf(m, "%s", field_name); 5211 } else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP) 5212 seq_puts(m, "common_timestamp"); 5213 5214 if (hist_field->flags) { 5215 if (!(hist_field->flags & HIST_FIELD_FL_VAR_REF) && 5216 !(hist_field->flags & HIST_FIELD_FL_EXPR)) { 5217 const char *flags = get_hist_field_flags(hist_field); 5218 5219 if (flags) 5220 seq_printf(m, ".%s", flags); 5221 } 5222 } 5223 if (hist_field->buckets) 5224 seq_printf(m, "=%ld", hist_field->buckets); 5225 } 5226 5227 static int event_hist_trigger_print(struct seq_file *m, 5228 struct event_trigger_ops *ops, 5229 struct event_trigger_data *data) 5230 { 5231 struct hist_trigger_data *hist_data = data->private_data; 5232 struct hist_field *field; 5233 bool have_var = false; 5234 unsigned int i; 5235 5236 seq_puts(m, "hist:"); 5237 5238 if (data->name) 5239 seq_printf(m, "%s:", data->name); 5240 5241 seq_puts(m, "keys="); 5242 5243 for_each_hist_key_field(i, hist_data) { 5244 field = hist_data->fields[i]; 5245 5246 if (i > hist_data->n_vals) 5247 seq_puts(m, ","); 5248 5249 if (field->flags & HIST_FIELD_FL_STACKTRACE) 5250 seq_puts(m, "stacktrace"); 5251 else 5252 hist_field_print(m, field); 5253 } 5254 5255 seq_puts(m, ":vals="); 5256 5257 for_each_hist_val_field(i, hist_data) { 5258 field = hist_data->fields[i]; 5259 if (field->flags & HIST_FIELD_FL_VAR) { 5260 have_var = true; 5261 continue; 5262 } 5263 5264 if (i == HITCOUNT_IDX) 5265 seq_puts(m, "hitcount"); 5266 else { 5267 seq_puts(m, ","); 5268 hist_field_print(m, field); 5269 } 5270 } 5271 5272 if (have_var) { 5273 unsigned int n = 0; 5274 5275 seq_puts(m, ":"); 5276 5277 for_each_hist_val_field(i, hist_data) { 5278 field = hist_data->fields[i]; 5279 5280 if (field->flags & HIST_FIELD_FL_VAR) { 5281 if (n++) 5282 seq_puts(m, ","); 5283 hist_field_print(m, field); 5284 } 5285 } 5286 } 5287 5288 seq_puts(m, ":sort="); 5289 5290 for (i = 0; i < hist_data->n_sort_keys; i++) { 5291 struct tracing_map_sort_key *sort_key; 5292 unsigned int idx, first_key_idx; 5293 5294 /* skip VAR vals */ 5295 first_key_idx = hist_data->n_vals - hist_data->n_vars; 5296 5297 sort_key = &hist_data->sort_keys[i]; 5298 idx = sort_key->field_idx; 5299 5300 if (WARN_ON(idx >= HIST_FIELDS_MAX)) 5301 return -EINVAL; 5302 5303 if (i > 0) 5304 seq_puts(m, ","); 5305 5306 if (idx == HITCOUNT_IDX) 5307 seq_puts(m, "hitcount"); 5308 else { 5309 if (idx >= first_key_idx) 5310 idx += hist_data->n_vars; 5311 hist_field_print(m, hist_data->fields[idx]); 5312 } 5313 5314 if (sort_key->descending) 5315 seq_puts(m, ".descending"); 5316 } 5317 seq_printf(m, ":size=%u", (1 << hist_data->map->map_bits)); 5318 if (hist_data->enable_timestamps) 5319 seq_printf(m, ":clock=%s", hist_data->attrs->clock); 5320 5321 print_actions_spec(m, hist_data); 5322 5323 if (data->filter_str) 5324 seq_printf(m, " if %s", data->filter_str); 5325 5326 if (data->paused) 5327 seq_puts(m, " [paused]"); 5328 else 5329 seq_puts(m, " [active]"); 5330 5331 seq_putc(m, '\n'); 5332 5333 return 0; 5334 } 5335 5336 static int event_hist_trigger_init(struct event_trigger_ops *ops, 5337 struct event_trigger_data *data) 5338 { 5339 struct hist_trigger_data *hist_data = data->private_data; 5340 5341 if (!data->ref && hist_data->attrs->name) 5342 save_named_trigger(hist_data->attrs->name, data); 5343 5344 data->ref++; 5345 5346 return 0; 5347 } 5348 5349 static void unregister_field_var_hists(struct hist_trigger_data *hist_data) 5350 { 5351 struct trace_event_file *file; 5352 unsigned int i; 5353 char *cmd; 5354 int ret; 5355 5356 for (i = 0; i < hist_data->n_field_var_hists; i++) { 5357 file = hist_data->field_var_hists[i]->hist_data->event_file; 5358 cmd = hist_data->field_var_hists[i]->cmd; 5359 ret = event_hist_trigger_func(&trigger_hist_cmd, file, 5360 "!hist", "hist", cmd); 5361 WARN_ON_ONCE(ret < 0); 5362 } 5363 } 5364 5365 static void event_hist_trigger_free(struct event_trigger_ops *ops, 5366 struct event_trigger_data *data) 5367 { 5368 struct hist_trigger_data *hist_data = data->private_data; 5369 5370 if (WARN_ON_ONCE(data->ref <= 0)) 5371 return; 5372 5373 data->ref--; 5374 if (!data->ref) { 5375 if (data->name) 5376 del_named_trigger(data); 5377 5378 trigger_data_free(data); 5379 5380 remove_hist_vars(hist_data); 5381 5382 unregister_field_var_hists(hist_data); 5383 5384 destroy_hist_data(hist_data); 5385 } 5386 } 5387 5388 static struct event_trigger_ops event_hist_trigger_ops = { 5389 .func = event_hist_trigger, 5390 .print = event_hist_trigger_print, 5391 .init = event_hist_trigger_init, 5392 .free = event_hist_trigger_free, 5393 }; 5394 5395 static int event_hist_trigger_named_init(struct event_trigger_ops *ops, 5396 struct event_trigger_data *data) 5397 { 5398 data->ref++; 5399 5400 save_named_trigger(data->named_data->name, data); 5401 5402 event_hist_trigger_init(ops, data->named_data); 5403 5404 return 0; 5405 } 5406 5407 static void event_hist_trigger_named_free(struct event_trigger_ops *ops, 5408 struct event_trigger_data *data) 5409 { 5410 if (WARN_ON_ONCE(data->ref <= 0)) 5411 return; 5412 5413 event_hist_trigger_free(ops, data->named_data); 5414 5415 data->ref--; 5416 if (!data->ref) { 5417 del_named_trigger(data); 5418 trigger_data_free(data); 5419 } 5420 } 5421 5422 static struct event_trigger_ops event_hist_trigger_named_ops = { 5423 .func = event_hist_trigger, 5424 .print = event_hist_trigger_print, 5425 .init = event_hist_trigger_named_init, 5426 .free = event_hist_trigger_named_free, 5427 }; 5428 5429 static struct event_trigger_ops *event_hist_get_trigger_ops(char *cmd, 5430 char *param) 5431 { 5432 return &event_hist_trigger_ops; 5433 } 5434 5435 static void hist_clear(struct event_trigger_data *data) 5436 { 5437 struct hist_trigger_data *hist_data = data->private_data; 5438 5439 if (data->name) 5440 pause_named_trigger(data); 5441 5442 tracepoint_synchronize_unregister(); 5443 5444 tracing_map_clear(hist_data->map); 5445 5446 if (data->name) 5447 unpause_named_trigger(data); 5448 } 5449 5450 static bool compatible_field(struct ftrace_event_field *field, 5451 struct ftrace_event_field *test_field) 5452 { 5453 if (field == test_field) 5454 return true; 5455 if (field == NULL || test_field == NULL) 5456 return false; 5457 if (strcmp(field->name, test_field->name) != 0) 5458 return false; 5459 if (strcmp(field->type, test_field->type) != 0) 5460 return false; 5461 if (field->size != test_field->size) 5462 return false; 5463 if (field->is_signed != test_field->is_signed) 5464 return false; 5465 5466 return true; 5467 } 5468 5469 static bool hist_trigger_match(struct event_trigger_data *data, 5470 struct event_trigger_data *data_test, 5471 struct event_trigger_data *named_data, 5472 bool ignore_filter) 5473 { 5474 struct tracing_map_sort_key *sort_key, *sort_key_test; 5475 struct hist_trigger_data *hist_data, *hist_data_test; 5476 struct hist_field *key_field, *key_field_test; 5477 unsigned int i; 5478 5479 if (named_data && (named_data != data_test) && 5480 (named_data != data_test->named_data)) 5481 return false; 5482 5483 if (!named_data && is_named_trigger(data_test)) 5484 return false; 5485 5486 hist_data = data->private_data; 5487 hist_data_test = data_test->private_data; 5488 5489 if (hist_data->n_vals != hist_data_test->n_vals || 5490 hist_data->n_fields != hist_data_test->n_fields || 5491 hist_data->n_sort_keys != hist_data_test->n_sort_keys) 5492 return false; 5493 5494 if (!ignore_filter) { 5495 if ((data->filter_str && !data_test->filter_str) || 5496 (!data->filter_str && data_test->filter_str)) 5497 return false; 5498 } 5499 5500 for_each_hist_field(i, hist_data) { 5501 key_field = hist_data->fields[i]; 5502 key_field_test = hist_data_test->fields[i]; 5503 5504 if (key_field->flags != key_field_test->flags) 5505 return false; 5506 if (!compatible_field(key_field->field, key_field_test->field)) 5507 return false; 5508 if (key_field->offset != key_field_test->offset) 5509 return false; 5510 if (key_field->size != key_field_test->size) 5511 return false; 5512 if (key_field->is_signed != key_field_test->is_signed) 5513 return false; 5514 if (!!key_field->var.name != !!key_field_test->var.name) 5515 return false; 5516 if (key_field->var.name && 5517 strcmp(key_field->var.name, key_field_test->var.name) != 0) 5518 return false; 5519 } 5520 5521 for (i = 0; i < hist_data->n_sort_keys; i++) { 5522 sort_key = &hist_data->sort_keys[i]; 5523 sort_key_test = &hist_data_test->sort_keys[i]; 5524 5525 if (sort_key->field_idx != sort_key_test->field_idx || 5526 sort_key->descending != sort_key_test->descending) 5527 return false; 5528 } 5529 5530 if (!ignore_filter && data->filter_str && 5531 (strcmp(data->filter_str, data_test->filter_str) != 0)) 5532 return false; 5533 5534 if (!actions_match(hist_data, hist_data_test)) 5535 return false; 5536 5537 return true; 5538 } 5539 5540 static int hist_register_trigger(char *glob, struct event_trigger_ops *ops, 5541 struct event_trigger_data *data, 5542 struct trace_event_file *file) 5543 { 5544 struct hist_trigger_data *hist_data = data->private_data; 5545 struct event_trigger_data *test, *named_data = NULL; 5546 struct trace_array *tr = file->tr; 5547 int ret = 0; 5548 5549 if (hist_data->attrs->name) { 5550 named_data = find_named_trigger(hist_data->attrs->name); 5551 if (named_data) { 5552 if (!hist_trigger_match(data, named_data, named_data, 5553 true)) { 5554 hist_err(tr, HIST_ERR_NAMED_MISMATCH, errpos(hist_data->attrs->name)); 5555 ret = -EINVAL; 5556 goto out; 5557 } 5558 } 5559 } 5560 5561 if (hist_data->attrs->name && !named_data) 5562 goto new; 5563 5564 lockdep_assert_held(&event_mutex); 5565 5566 list_for_each_entry(test, &file->triggers, list) { 5567 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 5568 if (!hist_trigger_match(data, test, named_data, false)) 5569 continue; 5570 if (hist_data->attrs->pause) 5571 test->paused = true; 5572 else if (hist_data->attrs->cont) 5573 test->paused = false; 5574 else if (hist_data->attrs->clear) 5575 hist_clear(test); 5576 else { 5577 hist_err(tr, HIST_ERR_TRIGGER_EEXIST, 0); 5578 ret = -EEXIST; 5579 } 5580 goto out; 5581 } 5582 } 5583 new: 5584 if (hist_data->attrs->cont || hist_data->attrs->clear) { 5585 hist_err(tr, HIST_ERR_TRIGGER_ENOENT_CLEAR, 0); 5586 ret = -ENOENT; 5587 goto out; 5588 } 5589 5590 if (hist_data->attrs->pause) 5591 data->paused = true; 5592 5593 if (named_data) { 5594 data->private_data = named_data->private_data; 5595 set_named_trigger_data(data, named_data); 5596 data->ops = &event_hist_trigger_named_ops; 5597 } 5598 5599 if (data->ops->init) { 5600 ret = data->ops->init(data->ops, data); 5601 if (ret < 0) 5602 goto out; 5603 } 5604 5605 if (hist_data->enable_timestamps) { 5606 char *clock = hist_data->attrs->clock; 5607 5608 ret = tracing_set_clock(file->tr, hist_data->attrs->clock); 5609 if (ret) { 5610 hist_err(tr, HIST_ERR_SET_CLOCK_FAIL, errpos(clock)); 5611 goto out; 5612 } 5613 5614 tracing_set_filter_buffering(file->tr, true); 5615 } 5616 5617 if (named_data) 5618 destroy_hist_data(hist_data); 5619 5620 ret++; 5621 out: 5622 return ret; 5623 } 5624 5625 static int hist_trigger_enable(struct event_trigger_data *data, 5626 struct trace_event_file *file) 5627 { 5628 int ret = 0; 5629 5630 list_add_tail_rcu(&data->list, &file->triggers); 5631 5632 update_cond_flag(file); 5633 5634 if (trace_event_trigger_enable_disable(file, 1) < 0) { 5635 list_del_rcu(&data->list); 5636 update_cond_flag(file); 5637 ret--; 5638 } 5639 5640 return ret; 5641 } 5642 5643 static bool have_hist_trigger_match(struct event_trigger_data *data, 5644 struct trace_event_file *file) 5645 { 5646 struct hist_trigger_data *hist_data = data->private_data; 5647 struct event_trigger_data *test, *named_data = NULL; 5648 bool match = false; 5649 5650 lockdep_assert_held(&event_mutex); 5651 5652 if (hist_data->attrs->name) 5653 named_data = find_named_trigger(hist_data->attrs->name); 5654 5655 list_for_each_entry(test, &file->triggers, list) { 5656 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 5657 if (hist_trigger_match(data, test, named_data, false)) { 5658 match = true; 5659 break; 5660 } 5661 } 5662 } 5663 5664 return match; 5665 } 5666 5667 static bool hist_trigger_check_refs(struct event_trigger_data *data, 5668 struct trace_event_file *file) 5669 { 5670 struct hist_trigger_data *hist_data = data->private_data; 5671 struct event_trigger_data *test, *named_data = NULL; 5672 5673 lockdep_assert_held(&event_mutex); 5674 5675 if (hist_data->attrs->name) 5676 named_data = find_named_trigger(hist_data->attrs->name); 5677 5678 list_for_each_entry(test, &file->triggers, list) { 5679 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 5680 if (!hist_trigger_match(data, test, named_data, false)) 5681 continue; 5682 hist_data = test->private_data; 5683 if (check_var_refs(hist_data)) 5684 return true; 5685 break; 5686 } 5687 } 5688 5689 return false; 5690 } 5691 5692 static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops, 5693 struct event_trigger_data *data, 5694 struct trace_event_file *file) 5695 { 5696 struct hist_trigger_data *hist_data = data->private_data; 5697 struct event_trigger_data *test, *named_data = NULL; 5698 bool unregistered = false; 5699 5700 lockdep_assert_held(&event_mutex); 5701 5702 if (hist_data->attrs->name) 5703 named_data = find_named_trigger(hist_data->attrs->name); 5704 5705 list_for_each_entry(test, &file->triggers, list) { 5706 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 5707 if (!hist_trigger_match(data, test, named_data, false)) 5708 continue; 5709 unregistered = true; 5710 list_del_rcu(&test->list); 5711 trace_event_trigger_enable_disable(file, 0); 5712 update_cond_flag(file); 5713 break; 5714 } 5715 } 5716 5717 if (unregistered && test->ops->free) 5718 test->ops->free(test->ops, test); 5719 5720 if (hist_data->enable_timestamps) { 5721 if (!hist_data->remove || unregistered) 5722 tracing_set_filter_buffering(file->tr, false); 5723 } 5724 } 5725 5726 static bool hist_file_check_refs(struct trace_event_file *file) 5727 { 5728 struct hist_trigger_data *hist_data; 5729 struct event_trigger_data *test; 5730 5731 lockdep_assert_held(&event_mutex); 5732 5733 list_for_each_entry(test, &file->triggers, list) { 5734 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 5735 hist_data = test->private_data; 5736 if (check_var_refs(hist_data)) 5737 return true; 5738 } 5739 } 5740 5741 return false; 5742 } 5743 5744 static void hist_unreg_all(struct trace_event_file *file) 5745 { 5746 struct event_trigger_data *test, *n; 5747 struct hist_trigger_data *hist_data; 5748 struct synth_event *se; 5749 const char *se_name; 5750 5751 lockdep_assert_held(&event_mutex); 5752 5753 if (hist_file_check_refs(file)) 5754 return; 5755 5756 list_for_each_entry_safe(test, n, &file->triggers, list) { 5757 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 5758 hist_data = test->private_data; 5759 list_del_rcu(&test->list); 5760 trace_event_trigger_enable_disable(file, 0); 5761 5762 se_name = trace_event_name(file->event_call); 5763 se = find_synth_event(se_name); 5764 if (se) 5765 se->ref--; 5766 5767 update_cond_flag(file); 5768 if (hist_data->enable_timestamps) 5769 tracing_set_filter_buffering(file->tr, false); 5770 if (test->ops->free) 5771 test->ops->free(test->ops, test); 5772 } 5773 } 5774 } 5775 5776 static int event_hist_trigger_func(struct event_command *cmd_ops, 5777 struct trace_event_file *file, 5778 char *glob, char *cmd, char *param) 5779 { 5780 unsigned int hist_trigger_bits = TRACING_MAP_BITS_DEFAULT; 5781 struct event_trigger_data *trigger_data; 5782 struct hist_trigger_attrs *attrs; 5783 struct event_trigger_ops *trigger_ops; 5784 struct hist_trigger_data *hist_data; 5785 struct synth_event *se; 5786 const char *se_name; 5787 bool remove = false; 5788 char *trigger, *p; 5789 int ret = 0; 5790 5791 lockdep_assert_held(&event_mutex); 5792 5793 if (glob && strlen(glob)) { 5794 hist_err_clear(); 5795 last_cmd_set(file, param); 5796 } 5797 5798 if (!param) 5799 return -EINVAL; 5800 5801 if (glob[0] == '!') 5802 remove = true; 5803 5804 /* 5805 * separate the trigger from the filter (k:v [if filter]) 5806 * allowing for whitespace in the trigger 5807 */ 5808 p = trigger = param; 5809 do { 5810 p = strstr(p, "if"); 5811 if (!p) 5812 break; 5813 if (p == param) 5814 return -EINVAL; 5815 if (*(p - 1) != ' ' && *(p - 1) != '\t') { 5816 p++; 5817 continue; 5818 } 5819 if (p >= param + strlen(param) - (sizeof("if") - 1) - 1) 5820 return -EINVAL; 5821 if (*(p + sizeof("if") - 1) != ' ' && *(p + sizeof("if") - 1) != '\t') { 5822 p++; 5823 continue; 5824 } 5825 break; 5826 } while (p); 5827 5828 if (!p) 5829 param = NULL; 5830 else { 5831 *(p - 1) = '\0'; 5832 param = strstrip(p); 5833 trigger = strstrip(trigger); 5834 } 5835 5836 attrs = parse_hist_trigger_attrs(file->tr, trigger); 5837 if (IS_ERR(attrs)) 5838 return PTR_ERR(attrs); 5839 5840 if (attrs->map_bits) 5841 hist_trigger_bits = attrs->map_bits; 5842 5843 hist_data = create_hist_data(hist_trigger_bits, attrs, file, remove); 5844 if (IS_ERR(hist_data)) { 5845 destroy_hist_trigger_attrs(attrs); 5846 return PTR_ERR(hist_data); 5847 } 5848 5849 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger); 5850 5851 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL); 5852 if (!trigger_data) { 5853 ret = -ENOMEM; 5854 goto out_free; 5855 } 5856 5857 trigger_data->count = -1; 5858 trigger_data->ops = trigger_ops; 5859 trigger_data->cmd_ops = cmd_ops; 5860 5861 INIT_LIST_HEAD(&trigger_data->list); 5862 RCU_INIT_POINTER(trigger_data->filter, NULL); 5863 5864 trigger_data->private_data = hist_data; 5865 5866 /* if param is non-empty, it's supposed to be a filter */ 5867 if (param && cmd_ops->set_filter) { 5868 ret = cmd_ops->set_filter(param, trigger_data, file); 5869 if (ret < 0) 5870 goto out_free; 5871 } 5872 5873 if (remove) { 5874 if (!have_hist_trigger_match(trigger_data, file)) 5875 goto out_free; 5876 5877 if (hist_trigger_check_refs(trigger_data, file)) { 5878 ret = -EBUSY; 5879 goto out_free; 5880 } 5881 5882 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file); 5883 se_name = trace_event_name(file->event_call); 5884 se = find_synth_event(se_name); 5885 if (se) 5886 se->ref--; 5887 ret = 0; 5888 goto out_free; 5889 } 5890 5891 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file); 5892 /* 5893 * The above returns on success the # of triggers registered, 5894 * but if it didn't register any it returns zero. Consider no 5895 * triggers registered a failure too. 5896 */ 5897 if (!ret) { 5898 if (!(attrs->pause || attrs->cont || attrs->clear)) 5899 ret = -ENOENT; 5900 goto out_free; 5901 } else if (ret < 0) 5902 goto out_free; 5903 5904 if (get_named_trigger_data(trigger_data)) 5905 goto enable; 5906 5907 if (has_hist_vars(hist_data)) 5908 save_hist_vars(hist_data); 5909 5910 ret = create_actions(hist_data); 5911 if (ret) 5912 goto out_unreg; 5913 5914 ret = tracing_map_init(hist_data->map); 5915 if (ret) 5916 goto out_unreg; 5917 enable: 5918 ret = hist_trigger_enable(trigger_data, file); 5919 if (ret) 5920 goto out_unreg; 5921 5922 se_name = trace_event_name(file->event_call); 5923 se = find_synth_event(se_name); 5924 if (se) 5925 se->ref++; 5926 /* Just return zero, not the number of registered triggers */ 5927 ret = 0; 5928 out: 5929 if (ret == 0) 5930 hist_err_clear(); 5931 5932 return ret; 5933 out_unreg: 5934 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file); 5935 out_free: 5936 if (cmd_ops->set_filter) 5937 cmd_ops->set_filter(NULL, trigger_data, NULL); 5938 5939 remove_hist_vars(hist_data); 5940 5941 kfree(trigger_data); 5942 5943 destroy_hist_data(hist_data); 5944 goto out; 5945 } 5946 5947 static struct event_command trigger_hist_cmd = { 5948 .name = "hist", 5949 .trigger_type = ETT_EVENT_HIST, 5950 .flags = EVENT_CMD_FL_NEEDS_REC, 5951 .func = event_hist_trigger_func, 5952 .reg = hist_register_trigger, 5953 .unreg = hist_unregister_trigger, 5954 .unreg_all = hist_unreg_all, 5955 .get_trigger_ops = event_hist_get_trigger_ops, 5956 .set_filter = set_trigger_filter, 5957 }; 5958 5959 __init int register_trigger_hist_cmd(void) 5960 { 5961 int ret; 5962 5963 ret = register_event_command(&trigger_hist_cmd); 5964 WARN_ON(ret < 0); 5965 5966 return ret; 5967 } 5968 5969 static void 5970 hist_enable_trigger(struct event_trigger_data *data, 5971 struct trace_buffer *buffer, void *rec, 5972 struct ring_buffer_event *event) 5973 { 5974 struct enable_trigger_data *enable_data = data->private_data; 5975 struct event_trigger_data *test; 5976 5977 list_for_each_entry_rcu(test, &enable_data->file->triggers, list, 5978 lockdep_is_held(&event_mutex)) { 5979 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 5980 if (enable_data->enable) 5981 test->paused = false; 5982 else 5983 test->paused = true; 5984 } 5985 } 5986 } 5987 5988 static void 5989 hist_enable_count_trigger(struct event_trigger_data *data, 5990 struct trace_buffer *buffer, void *rec, 5991 struct ring_buffer_event *event) 5992 { 5993 if (!data->count) 5994 return; 5995 5996 if (data->count != -1) 5997 (data->count)--; 5998 5999 hist_enable_trigger(data, buffer, rec, event); 6000 } 6001 6002 static struct event_trigger_ops hist_enable_trigger_ops = { 6003 .func = hist_enable_trigger, 6004 .print = event_enable_trigger_print, 6005 .init = event_trigger_init, 6006 .free = event_enable_trigger_free, 6007 }; 6008 6009 static struct event_trigger_ops hist_enable_count_trigger_ops = { 6010 .func = hist_enable_count_trigger, 6011 .print = event_enable_trigger_print, 6012 .init = event_trigger_init, 6013 .free = event_enable_trigger_free, 6014 }; 6015 6016 static struct event_trigger_ops hist_disable_trigger_ops = { 6017 .func = hist_enable_trigger, 6018 .print = event_enable_trigger_print, 6019 .init = event_trigger_init, 6020 .free = event_enable_trigger_free, 6021 }; 6022 6023 static struct event_trigger_ops hist_disable_count_trigger_ops = { 6024 .func = hist_enable_count_trigger, 6025 .print = event_enable_trigger_print, 6026 .init = event_trigger_init, 6027 .free = event_enable_trigger_free, 6028 }; 6029 6030 static struct event_trigger_ops * 6031 hist_enable_get_trigger_ops(char *cmd, char *param) 6032 { 6033 struct event_trigger_ops *ops; 6034 bool enable; 6035 6036 enable = (strcmp(cmd, ENABLE_HIST_STR) == 0); 6037 6038 if (enable) 6039 ops = param ? &hist_enable_count_trigger_ops : 6040 &hist_enable_trigger_ops; 6041 else 6042 ops = param ? &hist_disable_count_trigger_ops : 6043 &hist_disable_trigger_ops; 6044 6045 return ops; 6046 } 6047 6048 static void hist_enable_unreg_all(struct trace_event_file *file) 6049 { 6050 struct event_trigger_data *test, *n; 6051 6052 list_for_each_entry_safe(test, n, &file->triggers, list) { 6053 if (test->cmd_ops->trigger_type == ETT_HIST_ENABLE) { 6054 list_del_rcu(&test->list); 6055 update_cond_flag(file); 6056 trace_event_trigger_enable_disable(file, 0); 6057 if (test->ops->free) 6058 test->ops->free(test->ops, test); 6059 } 6060 } 6061 } 6062 6063 static struct event_command trigger_hist_enable_cmd = { 6064 .name = ENABLE_HIST_STR, 6065 .trigger_type = ETT_HIST_ENABLE, 6066 .func = event_enable_trigger_func, 6067 .reg = event_enable_register_trigger, 6068 .unreg = event_enable_unregister_trigger, 6069 .unreg_all = hist_enable_unreg_all, 6070 .get_trigger_ops = hist_enable_get_trigger_ops, 6071 .set_filter = set_trigger_filter, 6072 }; 6073 6074 static struct event_command trigger_hist_disable_cmd = { 6075 .name = DISABLE_HIST_STR, 6076 .trigger_type = ETT_HIST_ENABLE, 6077 .func = event_enable_trigger_func, 6078 .reg = event_enable_register_trigger, 6079 .unreg = event_enable_unregister_trigger, 6080 .unreg_all = hist_enable_unreg_all, 6081 .get_trigger_ops = hist_enable_get_trigger_ops, 6082 .set_filter = set_trigger_filter, 6083 }; 6084 6085 static __init void unregister_trigger_hist_enable_disable_cmds(void) 6086 { 6087 unregister_event_command(&trigger_hist_enable_cmd); 6088 unregister_event_command(&trigger_hist_disable_cmd); 6089 } 6090 6091 __init int register_trigger_hist_enable_disable_cmds(void) 6092 { 6093 int ret; 6094 6095 ret = register_event_command(&trigger_hist_enable_cmd); 6096 if (WARN_ON(ret < 0)) 6097 return ret; 6098 ret = register_event_command(&trigger_hist_disable_cmd); 6099 if (WARN_ON(ret < 0)) 6100 unregister_trigger_hist_enable_disable_cmds(); 6101 6102 return ret; 6103 } 6104