1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * trace_events_hist - trace event hist triggers 4 * 5 * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com> 6 */ 7 8 #include <linux/module.h> 9 #include <linux/kallsyms.h> 10 #include <linux/security.h> 11 #include <linux/mutex.h> 12 #include <linux/slab.h> 13 #include <linux/stacktrace.h> 14 #include <linux/rculist.h> 15 #include <linux/tracefs.h> 16 17 /* for gfp flag names */ 18 #include <linux/trace_events.h> 19 #include <trace/events/mmflags.h> 20 21 #include "tracing_map.h" 22 #include "trace_synth.h" 23 24 #define ERRORS \ 25 C(NONE, "No error"), \ 26 C(DUPLICATE_VAR, "Variable already defined"), \ 27 C(VAR_NOT_UNIQUE, "Variable name not unique, need to use fully qualified name (subsys.event.var) for variable"), \ 28 C(TOO_MANY_VARS, "Too many variables defined"), \ 29 C(MALFORMED_ASSIGNMENT, "Malformed assignment"), \ 30 C(NAMED_MISMATCH, "Named hist trigger doesn't match existing named trigger (includes variables)"), \ 31 C(TRIGGER_EEXIST, "Hist trigger already exists"), \ 32 C(TRIGGER_ENOENT_CLEAR, "Can't clear or continue a nonexistent hist trigger"), \ 33 C(SET_CLOCK_FAIL, "Couldn't set trace_clock"), \ 34 C(BAD_FIELD_MODIFIER, "Invalid field modifier"), \ 35 C(TOO_MANY_SUBEXPR, "Too many subexpressions (3 max)"), \ 36 C(TIMESTAMP_MISMATCH, "Timestamp units in expression don't match"), \ 37 C(TOO_MANY_FIELD_VARS, "Too many field variables defined"), \ 38 C(EVENT_FILE_NOT_FOUND, "Event file not found"), \ 39 C(HIST_NOT_FOUND, "Matching event histogram not found"), \ 40 C(HIST_CREATE_FAIL, "Couldn't create histogram for field"), \ 41 C(SYNTH_VAR_NOT_FOUND, "Couldn't find synthetic variable"), \ 42 C(SYNTH_EVENT_NOT_FOUND,"Couldn't find synthetic event"), \ 43 C(SYNTH_TYPE_MISMATCH, "Param type doesn't match synthetic event field type"), \ 44 C(SYNTH_COUNT_MISMATCH, "Param count doesn't match synthetic event field count"), \ 45 C(FIELD_VAR_PARSE_FAIL, "Couldn't parse field variable"), \ 46 C(VAR_CREATE_FIND_FAIL, "Couldn't create or find variable"), \ 47 C(ONX_NOT_VAR, "For onmax(x) or onchange(x), x must be a variable"), \ 48 C(ONX_VAR_NOT_FOUND, "Couldn't find onmax or onchange variable"), \ 49 C(ONX_VAR_CREATE_FAIL, "Couldn't create onmax or onchange variable"), \ 50 C(FIELD_VAR_CREATE_FAIL,"Couldn't create field variable"), \ 51 C(TOO_MANY_PARAMS, "Too many action params"), \ 52 C(PARAM_NOT_FOUND, "Couldn't find param"), \ 53 C(INVALID_PARAM, "Invalid action param"), \ 54 C(ACTION_NOT_FOUND, "No action found"), \ 55 C(NO_SAVE_PARAMS, "No params found for save()"), \ 56 C(TOO_MANY_SAVE_ACTIONS,"Can't have more than one save() action per hist"), \ 57 C(ACTION_MISMATCH, "Handler doesn't support action"), \ 58 C(NO_CLOSING_PAREN, "No closing paren found"), \ 59 C(SUBSYS_NOT_FOUND, "Missing subsystem"), \ 60 C(INVALID_SUBSYS_EVENT, "Invalid subsystem or event name"), \ 61 C(INVALID_REF_KEY, "Using variable references in keys not supported"), \ 62 C(VAR_NOT_FOUND, "Couldn't find variable"), \ 63 C(FIELD_NOT_FOUND, "Couldn't find field"), \ 64 C(EMPTY_ASSIGNMENT, "Empty assignment"), \ 65 C(INVALID_SORT_MODIFIER,"Invalid sort modifier"), \ 66 C(EMPTY_SORT_FIELD, "Empty sort field"), \ 67 C(TOO_MANY_SORT_FIELDS, "Too many sort fields (Max = 2)"), \ 68 C(INVALID_SORT_FIELD, "Sort field must be a key or a val"), \ 69 C(INVALID_STR_OPERAND, "String type can not be an operand in expression"), \ 70 C(EXPECT_NUMBER, "Expecting numeric literal"), \ 71 C(UNARY_MINUS_SUBEXPR, "Unary minus not supported in sub-expressions"), \ 72 C(DIVISION_BY_ZERO, "Division by zero"), \ 73 C(NEED_NOHC_VAL, "Non-hitcount value is required for 'nohitcount'"), 74 75 #undef C 76 #define C(a, b) HIST_ERR_##a 77 78 enum { ERRORS }; 79 80 #undef C 81 #define C(a, b) b 82 83 static const char *err_text[] = { ERRORS }; 84 85 struct hist_field; 86 87 typedef u64 (*hist_field_fn_t) (struct hist_field *field, 88 struct tracing_map_elt *elt, 89 struct trace_buffer *buffer, 90 struct ring_buffer_event *rbe, 91 void *event); 92 93 #define HIST_FIELD_OPERANDS_MAX 2 94 #define HIST_FIELDS_MAX (TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX) 95 #define HIST_ACTIONS_MAX 8 96 #define HIST_CONST_DIGITS_MAX 21 97 #define HIST_DIV_SHIFT 20 /* For optimizing division by constants */ 98 99 enum field_op_id { 100 FIELD_OP_NONE, 101 FIELD_OP_PLUS, 102 FIELD_OP_MINUS, 103 FIELD_OP_UNARY_MINUS, 104 FIELD_OP_DIV, 105 FIELD_OP_MULT, 106 }; 107 108 #define FIELD_FUNCS \ 109 C(NOP, "nop"), \ 110 C(VAR_REF, "var_ref"), \ 111 C(COUNTER, "counter"), \ 112 C(CONST, "const"), \ 113 C(LOG2, "log2"), \ 114 C(BUCKET, "bucket"), \ 115 C(TIMESTAMP, "timestamp"), \ 116 C(CPU, "cpu"), \ 117 C(COMM, "comm"), \ 118 C(STRING, "string"), \ 119 C(DYNSTRING, "dynstring"), \ 120 C(RELDYNSTRING, "reldynstring"), \ 121 C(PSTRING, "pstring"), \ 122 C(S64, "s64"), \ 123 C(U64, "u64"), \ 124 C(S32, "s32"), \ 125 C(U32, "u32"), \ 126 C(S16, "s16"), \ 127 C(U16, "u16"), \ 128 C(S8, "s8"), \ 129 C(U8, "u8"), \ 130 C(UMINUS, "uminus"), \ 131 C(MINUS, "minus"), \ 132 C(PLUS, "plus"), \ 133 C(DIV, "div"), \ 134 C(MULT, "mult"), \ 135 C(DIV_POWER2, "div_power2"), \ 136 C(DIV_NOT_POWER2, "div_not_power2"), \ 137 C(DIV_MULT_SHIFT, "div_mult_shift"), \ 138 C(EXECNAME, "execname"), \ 139 C(STACK, "stack"), 140 141 #undef C 142 #define C(a, b) HIST_FIELD_FN_##a 143 144 enum hist_field_fn { 145 FIELD_FUNCS 146 }; 147 148 /* 149 * A hist_var (histogram variable) contains variable information for 150 * hist_fields having the HIST_FIELD_FL_VAR or HIST_FIELD_FL_VAR_REF 151 * flag set. A hist_var has a variable name e.g. ts0, and is 152 * associated with a given histogram trigger, as specified by 153 * hist_data. The hist_var idx is the unique index assigned to the 154 * variable by the hist trigger's tracing_map. The idx is what is 155 * used to set a variable's value and, by a variable reference, to 156 * retrieve it. 157 */ 158 struct hist_var { 159 char *name; 160 struct hist_trigger_data *hist_data; 161 unsigned int idx; 162 }; 163 164 struct hist_field { 165 struct ftrace_event_field *field; 166 unsigned long flags; 167 unsigned long buckets; 168 const char *type; 169 struct hist_field *operands[HIST_FIELD_OPERANDS_MAX]; 170 struct hist_trigger_data *hist_data; 171 enum hist_field_fn fn_num; 172 unsigned int ref; 173 unsigned int size; 174 unsigned int offset; 175 unsigned int is_signed; 176 177 /* 178 * Variable fields contain variable-specific info in var. 179 */ 180 struct hist_var var; 181 enum field_op_id operator; 182 char *system; 183 char *event_name; 184 185 /* 186 * The name field is used for EXPR and VAR_REF fields. VAR 187 * fields contain the variable name in var.name. 188 */ 189 char *name; 190 191 /* 192 * When a histogram trigger is hit, if it has any references 193 * to variables, the values of those variables are collected 194 * into a var_ref_vals array by resolve_var_refs(). The 195 * current value of each variable is read from the tracing_map 196 * using the hist field's hist_var.idx and entered into the 197 * var_ref_idx entry i.e. var_ref_vals[var_ref_idx]. 198 */ 199 unsigned int var_ref_idx; 200 bool read_once; 201 202 unsigned int var_str_idx; 203 204 /* Numeric literals are represented as u64 */ 205 u64 constant; 206 /* Used to optimize division by constants */ 207 u64 div_multiplier; 208 }; 209 210 static u64 hist_fn_call(struct hist_field *hist_field, 211 struct tracing_map_elt *elt, 212 struct trace_buffer *buffer, 213 struct ring_buffer_event *rbe, 214 void *event); 215 216 static u64 hist_field_const(struct hist_field *field, 217 struct tracing_map_elt *elt, 218 struct trace_buffer *buffer, 219 struct ring_buffer_event *rbe, 220 void *event) 221 { 222 return field->constant; 223 } 224 225 static u64 hist_field_counter(struct hist_field *field, 226 struct tracing_map_elt *elt, 227 struct trace_buffer *buffer, 228 struct ring_buffer_event *rbe, 229 void *event) 230 { 231 return 1; 232 } 233 234 static u64 hist_field_string(struct hist_field *hist_field, 235 struct tracing_map_elt *elt, 236 struct trace_buffer *buffer, 237 struct ring_buffer_event *rbe, 238 void *event) 239 { 240 char *addr = (char *)(event + hist_field->field->offset); 241 242 return (u64)(unsigned long)addr; 243 } 244 245 static u64 hist_field_dynstring(struct hist_field *hist_field, 246 struct tracing_map_elt *elt, 247 struct trace_buffer *buffer, 248 struct ring_buffer_event *rbe, 249 void *event) 250 { 251 u32 str_item = *(u32 *)(event + hist_field->field->offset); 252 int str_loc = str_item & 0xffff; 253 char *addr = (char *)(event + str_loc); 254 255 return (u64)(unsigned long)addr; 256 } 257 258 static u64 hist_field_reldynstring(struct hist_field *hist_field, 259 struct tracing_map_elt *elt, 260 struct trace_buffer *buffer, 261 struct ring_buffer_event *rbe, 262 void *event) 263 { 264 u32 *item = event + hist_field->field->offset; 265 u32 str_item = *item; 266 int str_loc = str_item & 0xffff; 267 char *addr = (char *)&item[1] + str_loc; 268 269 return (u64)(unsigned long)addr; 270 } 271 272 static u64 hist_field_pstring(struct hist_field *hist_field, 273 struct tracing_map_elt *elt, 274 struct trace_buffer *buffer, 275 struct ring_buffer_event *rbe, 276 void *event) 277 { 278 char **addr = (char **)(event + hist_field->field->offset); 279 280 return (u64)(unsigned long)*addr; 281 } 282 283 static u64 hist_field_log2(struct hist_field *hist_field, 284 struct tracing_map_elt *elt, 285 struct trace_buffer *buffer, 286 struct ring_buffer_event *rbe, 287 void *event) 288 { 289 struct hist_field *operand = hist_field->operands[0]; 290 291 u64 val = hist_fn_call(operand, elt, buffer, rbe, event); 292 293 return (u64) ilog2(roundup_pow_of_two(val)); 294 } 295 296 static u64 hist_field_bucket(struct hist_field *hist_field, 297 struct tracing_map_elt *elt, 298 struct trace_buffer *buffer, 299 struct ring_buffer_event *rbe, 300 void *event) 301 { 302 struct hist_field *operand = hist_field->operands[0]; 303 unsigned long buckets = hist_field->buckets; 304 305 u64 val = hist_fn_call(operand, elt, buffer, rbe, event); 306 307 if (WARN_ON_ONCE(!buckets)) 308 return val; 309 310 if (val >= LONG_MAX) 311 val = div64_ul(val, buckets); 312 else 313 val = (u64)((unsigned long)val / buckets); 314 return val * buckets; 315 } 316 317 static u64 hist_field_plus(struct hist_field *hist_field, 318 struct tracing_map_elt *elt, 319 struct trace_buffer *buffer, 320 struct ring_buffer_event *rbe, 321 void *event) 322 { 323 struct hist_field *operand1 = hist_field->operands[0]; 324 struct hist_field *operand2 = hist_field->operands[1]; 325 326 u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event); 327 u64 val2 = hist_fn_call(operand2, elt, buffer, rbe, event); 328 329 return val1 + val2; 330 } 331 332 static u64 hist_field_minus(struct hist_field *hist_field, 333 struct tracing_map_elt *elt, 334 struct trace_buffer *buffer, 335 struct ring_buffer_event *rbe, 336 void *event) 337 { 338 struct hist_field *operand1 = hist_field->operands[0]; 339 struct hist_field *operand2 = hist_field->operands[1]; 340 341 u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event); 342 u64 val2 = hist_fn_call(operand2, elt, buffer, rbe, event); 343 344 return val1 - val2; 345 } 346 347 static u64 hist_field_div(struct hist_field *hist_field, 348 struct tracing_map_elt *elt, 349 struct trace_buffer *buffer, 350 struct ring_buffer_event *rbe, 351 void *event) 352 { 353 struct hist_field *operand1 = hist_field->operands[0]; 354 struct hist_field *operand2 = hist_field->operands[1]; 355 356 u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event); 357 u64 val2 = hist_fn_call(operand2, elt, buffer, rbe, event); 358 359 /* Return -1 for the undefined case */ 360 if (!val2) 361 return -1; 362 363 /* Use shift if the divisor is a power of 2 */ 364 if (!(val2 & (val2 - 1))) 365 return val1 >> __ffs64(val2); 366 367 return div64_u64(val1, val2); 368 } 369 370 static u64 div_by_power_of_two(struct hist_field *hist_field, 371 struct tracing_map_elt *elt, 372 struct trace_buffer *buffer, 373 struct ring_buffer_event *rbe, 374 void *event) 375 { 376 struct hist_field *operand1 = hist_field->operands[0]; 377 struct hist_field *operand2 = hist_field->operands[1]; 378 379 u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event); 380 381 return val1 >> __ffs64(operand2->constant); 382 } 383 384 static u64 div_by_not_power_of_two(struct hist_field *hist_field, 385 struct tracing_map_elt *elt, 386 struct trace_buffer *buffer, 387 struct ring_buffer_event *rbe, 388 void *event) 389 { 390 struct hist_field *operand1 = hist_field->operands[0]; 391 struct hist_field *operand2 = hist_field->operands[1]; 392 393 u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event); 394 395 return div64_u64(val1, operand2->constant); 396 } 397 398 static u64 div_by_mult_and_shift(struct hist_field *hist_field, 399 struct tracing_map_elt *elt, 400 struct trace_buffer *buffer, 401 struct ring_buffer_event *rbe, 402 void *event) 403 { 404 struct hist_field *operand1 = hist_field->operands[0]; 405 struct hist_field *operand2 = hist_field->operands[1]; 406 407 u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event); 408 409 /* 410 * If the divisor is a constant, do a multiplication and shift instead. 411 * 412 * Choose Z = some power of 2. If Y <= Z, then: 413 * X / Y = (X * (Z / Y)) / Z 414 * 415 * (Z / Y) is a constant (mult) which is calculated at parse time, so: 416 * X / Y = (X * mult) / Z 417 * 418 * The division by Z can be replaced by a shift since Z is a power of 2: 419 * X / Y = (X * mult) >> HIST_DIV_SHIFT 420 * 421 * As long, as X < Z the results will not be off by more than 1. 422 */ 423 if (val1 < (1 << HIST_DIV_SHIFT)) { 424 u64 mult = operand2->div_multiplier; 425 426 return (val1 * mult + ((1 << HIST_DIV_SHIFT) - 1)) >> HIST_DIV_SHIFT; 427 } 428 429 return div64_u64(val1, operand2->constant); 430 } 431 432 static u64 hist_field_mult(struct hist_field *hist_field, 433 struct tracing_map_elt *elt, 434 struct trace_buffer *buffer, 435 struct ring_buffer_event *rbe, 436 void *event) 437 { 438 struct hist_field *operand1 = hist_field->operands[0]; 439 struct hist_field *operand2 = hist_field->operands[1]; 440 441 u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event); 442 u64 val2 = hist_fn_call(operand2, elt, buffer, rbe, event); 443 444 return val1 * val2; 445 } 446 447 static u64 hist_field_unary_minus(struct hist_field *hist_field, 448 struct tracing_map_elt *elt, 449 struct trace_buffer *buffer, 450 struct ring_buffer_event *rbe, 451 void *event) 452 { 453 struct hist_field *operand = hist_field->operands[0]; 454 455 s64 sval = (s64)hist_fn_call(operand, elt, buffer, rbe, event); 456 u64 val = (u64)-sval; 457 458 return val; 459 } 460 461 #define DEFINE_HIST_FIELD_FN(type) \ 462 static u64 hist_field_##type(struct hist_field *hist_field, \ 463 struct tracing_map_elt *elt, \ 464 struct trace_buffer *buffer, \ 465 struct ring_buffer_event *rbe, \ 466 void *event) \ 467 { \ 468 type *addr = (type *)(event + hist_field->field->offset); \ 469 \ 470 return (u64)(unsigned long)*addr; \ 471 } 472 473 DEFINE_HIST_FIELD_FN(s64); 474 DEFINE_HIST_FIELD_FN(u64); 475 DEFINE_HIST_FIELD_FN(s32); 476 DEFINE_HIST_FIELD_FN(u32); 477 DEFINE_HIST_FIELD_FN(s16); 478 DEFINE_HIST_FIELD_FN(u16); 479 DEFINE_HIST_FIELD_FN(s8); 480 DEFINE_HIST_FIELD_FN(u8); 481 482 #define for_each_hist_field(i, hist_data) \ 483 for ((i) = 0; (i) < (hist_data)->n_fields; (i)++) 484 485 #define for_each_hist_val_field(i, hist_data) \ 486 for ((i) = 0; (i) < (hist_data)->n_vals; (i)++) 487 488 #define for_each_hist_key_field(i, hist_data) \ 489 for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++) 490 491 #define HITCOUNT_IDX 0 492 #define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE) 493 494 enum hist_field_flags { 495 HIST_FIELD_FL_HITCOUNT = 1 << 0, 496 HIST_FIELD_FL_KEY = 1 << 1, 497 HIST_FIELD_FL_STRING = 1 << 2, 498 HIST_FIELD_FL_HEX = 1 << 3, 499 HIST_FIELD_FL_SYM = 1 << 4, 500 HIST_FIELD_FL_SYM_OFFSET = 1 << 5, 501 HIST_FIELD_FL_EXECNAME = 1 << 6, 502 HIST_FIELD_FL_SYSCALL = 1 << 7, 503 HIST_FIELD_FL_STACKTRACE = 1 << 8, 504 HIST_FIELD_FL_LOG2 = 1 << 9, 505 HIST_FIELD_FL_TIMESTAMP = 1 << 10, 506 HIST_FIELD_FL_TIMESTAMP_USECS = 1 << 11, 507 HIST_FIELD_FL_VAR = 1 << 12, 508 HIST_FIELD_FL_EXPR = 1 << 13, 509 HIST_FIELD_FL_VAR_REF = 1 << 14, 510 HIST_FIELD_FL_CPU = 1 << 15, 511 HIST_FIELD_FL_ALIAS = 1 << 16, 512 HIST_FIELD_FL_BUCKET = 1 << 17, 513 HIST_FIELD_FL_CONST = 1 << 18, 514 HIST_FIELD_FL_PERCENT = 1 << 19, 515 HIST_FIELD_FL_GRAPH = 1 << 20, 516 HIST_FIELD_FL_COMM = 1 << 21, 517 }; 518 519 struct var_defs { 520 unsigned int n_vars; 521 char *name[TRACING_MAP_VARS_MAX]; 522 char *expr[TRACING_MAP_VARS_MAX]; 523 }; 524 525 struct hist_trigger_attrs { 526 char *keys_str; 527 char *vals_str; 528 char *sort_key_str; 529 char *name; 530 char *clock; 531 bool pause; 532 bool cont; 533 bool clear; 534 bool ts_in_usecs; 535 bool no_hitcount; 536 unsigned int map_bits; 537 538 char *assignment_str[TRACING_MAP_VARS_MAX]; 539 unsigned int n_assignments; 540 541 char *action_str[HIST_ACTIONS_MAX]; 542 unsigned int n_actions; 543 544 struct var_defs var_defs; 545 }; 546 547 struct field_var { 548 struct hist_field *var; 549 struct hist_field *val; 550 }; 551 552 struct field_var_hist { 553 struct hist_trigger_data *hist_data; 554 char *cmd; 555 }; 556 557 struct hist_trigger_data { 558 struct hist_field *fields[HIST_FIELDS_MAX]; 559 unsigned int n_vals; 560 unsigned int n_keys; 561 unsigned int n_fields; 562 unsigned int n_vars; 563 unsigned int n_var_str; 564 unsigned int key_size; 565 struct tracing_map_sort_key sort_keys[TRACING_MAP_SORT_KEYS_MAX]; 566 unsigned int n_sort_keys; 567 struct trace_event_file *event_file; 568 struct hist_trigger_attrs *attrs; 569 struct tracing_map *map; 570 bool enable_timestamps; 571 bool remove; 572 struct hist_field *var_refs[TRACING_MAP_VARS_MAX]; 573 unsigned int n_var_refs; 574 575 struct action_data *actions[HIST_ACTIONS_MAX]; 576 unsigned int n_actions; 577 578 struct field_var *field_vars[SYNTH_FIELDS_MAX]; 579 unsigned int n_field_vars; 580 unsigned int n_field_var_str; 581 struct field_var_hist *field_var_hists[SYNTH_FIELDS_MAX]; 582 unsigned int n_field_var_hists; 583 584 struct field_var *save_vars[SYNTH_FIELDS_MAX]; 585 unsigned int n_save_vars; 586 unsigned int n_save_var_str; 587 }; 588 589 struct action_data; 590 591 typedef void (*action_fn_t) (struct hist_trigger_data *hist_data, 592 struct tracing_map_elt *elt, 593 struct trace_buffer *buffer, void *rec, 594 struct ring_buffer_event *rbe, void *key, 595 struct action_data *data, u64 *var_ref_vals); 596 597 typedef bool (*check_track_val_fn_t) (u64 track_val, u64 var_val); 598 599 enum handler_id { 600 HANDLER_ONMATCH = 1, 601 HANDLER_ONMAX, 602 HANDLER_ONCHANGE, 603 }; 604 605 enum action_id { 606 ACTION_SAVE = 1, 607 ACTION_TRACE, 608 ACTION_SNAPSHOT, 609 }; 610 611 struct action_data { 612 enum handler_id handler; 613 enum action_id action; 614 char *action_name; 615 action_fn_t fn; 616 617 unsigned int n_params; 618 char *params[SYNTH_FIELDS_MAX]; 619 620 /* 621 * When a histogram trigger is hit, the values of any 622 * references to variables, including variables being passed 623 * as parameters to synthetic events, are collected into a 624 * var_ref_vals array. This var_ref_idx array is an array of 625 * indices into the var_ref_vals array, one for each synthetic 626 * event param, and is passed to the synthetic event 627 * invocation. 628 */ 629 unsigned int var_ref_idx[SYNTH_FIELDS_MAX]; 630 struct synth_event *synth_event; 631 bool use_trace_keyword; 632 char *synth_event_name; 633 634 union { 635 struct { 636 char *event; 637 char *event_system; 638 } match_data; 639 640 struct { 641 /* 642 * var_str contains the $-unstripped variable 643 * name referenced by var_ref, and used when 644 * printing the action. Because var_ref 645 * creation is deferred to create_actions(), 646 * we need a per-action way to save it until 647 * then, thus var_str. 648 */ 649 char *var_str; 650 651 /* 652 * var_ref refers to the variable being 653 * tracked e.g onmax($var). 654 */ 655 struct hist_field *var_ref; 656 657 /* 658 * track_var contains the 'invisible' tracking 659 * variable created to keep the current 660 * e.g. max value. 661 */ 662 struct hist_field *track_var; 663 664 check_track_val_fn_t check_val; 665 action_fn_t save_data; 666 } track_data; 667 }; 668 }; 669 670 struct track_data { 671 u64 track_val; 672 bool updated; 673 674 unsigned int key_len; 675 void *key; 676 struct tracing_map_elt elt; 677 678 struct action_data *action_data; 679 struct hist_trigger_data *hist_data; 680 }; 681 682 struct hist_elt_data { 683 char *comm; 684 u64 *var_ref_vals; 685 char **field_var_str; 686 int n_field_var_str; 687 }; 688 689 struct snapshot_context { 690 struct tracing_map_elt *elt; 691 void *key; 692 }; 693 694 /* 695 * Returns the specific division function to use if the divisor 696 * is constant. This avoids extra branches when the trigger is hit. 697 */ 698 static enum hist_field_fn hist_field_get_div_fn(struct hist_field *divisor) 699 { 700 u64 div = divisor->constant; 701 702 if (!(div & (div - 1))) 703 return HIST_FIELD_FN_DIV_POWER2; 704 705 /* If the divisor is too large, do a regular division */ 706 if (div > (1 << HIST_DIV_SHIFT)) 707 return HIST_FIELD_FN_DIV_NOT_POWER2; 708 709 divisor->div_multiplier = div64_u64((u64)(1 << HIST_DIV_SHIFT), div); 710 return HIST_FIELD_FN_DIV_MULT_SHIFT; 711 } 712 713 static void track_data_free(struct track_data *track_data) 714 { 715 struct hist_elt_data *elt_data; 716 717 if (!track_data) 718 return; 719 720 kfree(track_data->key); 721 722 elt_data = track_data->elt.private_data; 723 if (elt_data) { 724 kfree(elt_data->comm); 725 kfree(elt_data); 726 } 727 728 kfree(track_data); 729 } 730 731 static struct track_data *track_data_alloc(unsigned int key_len, 732 struct action_data *action_data, 733 struct hist_trigger_data *hist_data) 734 { 735 struct track_data *data = kzalloc(sizeof(*data), GFP_KERNEL); 736 struct hist_elt_data *elt_data; 737 738 if (!data) 739 return ERR_PTR(-ENOMEM); 740 741 data->key = kzalloc(key_len, GFP_KERNEL); 742 if (!data->key) { 743 track_data_free(data); 744 return ERR_PTR(-ENOMEM); 745 } 746 747 data->key_len = key_len; 748 data->action_data = action_data; 749 data->hist_data = hist_data; 750 751 elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL); 752 if (!elt_data) { 753 track_data_free(data); 754 return ERR_PTR(-ENOMEM); 755 } 756 757 data->elt.private_data = elt_data; 758 759 elt_data->comm = kzalloc(TASK_COMM_LEN, GFP_KERNEL); 760 if (!elt_data->comm) { 761 track_data_free(data); 762 return ERR_PTR(-ENOMEM); 763 } 764 765 return data; 766 } 767 768 #define HIST_PREFIX "hist:" 769 770 static char *last_cmd; 771 static char last_cmd_loc[MAX_FILTER_STR_VAL]; 772 773 static int errpos(char *str) 774 { 775 if (!str || !last_cmd) 776 return 0; 777 778 return err_pos(last_cmd, str); 779 } 780 781 static void last_cmd_set(struct trace_event_file *file, char *str) 782 { 783 const char *system = NULL, *name = NULL; 784 struct trace_event_call *call; 785 786 if (!str) 787 return; 788 789 kfree(last_cmd); 790 791 last_cmd = kasprintf(GFP_KERNEL, HIST_PREFIX "%s", str); 792 if (!last_cmd) 793 return; 794 795 if (file) { 796 call = file->event_call; 797 system = call->class->system; 798 if (system) { 799 name = trace_event_name(call); 800 if (!name) 801 system = NULL; 802 } 803 } 804 805 if (system) 806 snprintf(last_cmd_loc, MAX_FILTER_STR_VAL, HIST_PREFIX "%s:%s", system, name); 807 } 808 809 static void hist_err(struct trace_array *tr, u8 err_type, u16 err_pos) 810 { 811 if (!last_cmd) 812 return; 813 814 tracing_log_err(tr, last_cmd_loc, last_cmd, err_text, 815 err_type, err_pos); 816 } 817 818 static void hist_err_clear(void) 819 { 820 if (last_cmd) 821 last_cmd[0] = '\0'; 822 last_cmd_loc[0] = '\0'; 823 } 824 825 typedef void (*synth_probe_func_t) (void *__data, u64 *var_ref_vals, 826 unsigned int *var_ref_idx); 827 828 static inline void trace_synth(struct synth_event *event, u64 *var_ref_vals, 829 unsigned int *var_ref_idx) 830 { 831 struct tracepoint *tp = event->tp; 832 833 if (unlikely(static_key_enabled(&tp->key))) { 834 struct tracepoint_func *probe_func_ptr; 835 synth_probe_func_t probe_func; 836 void *__data; 837 838 if (!(cpu_online(raw_smp_processor_id()))) 839 return; 840 841 probe_func_ptr = rcu_dereference_sched((tp)->funcs); 842 if (probe_func_ptr) { 843 do { 844 probe_func = probe_func_ptr->func; 845 __data = probe_func_ptr->data; 846 probe_func(__data, var_ref_vals, var_ref_idx); 847 } while ((++probe_func_ptr)->func); 848 } 849 } 850 } 851 852 static void action_trace(struct hist_trigger_data *hist_data, 853 struct tracing_map_elt *elt, 854 struct trace_buffer *buffer, void *rec, 855 struct ring_buffer_event *rbe, void *key, 856 struct action_data *data, u64 *var_ref_vals) 857 { 858 struct synth_event *event = data->synth_event; 859 860 trace_synth(event, var_ref_vals, data->var_ref_idx); 861 } 862 863 struct hist_var_data { 864 struct list_head list; 865 struct hist_trigger_data *hist_data; 866 }; 867 868 static u64 hist_field_timestamp(struct hist_field *hist_field, 869 struct tracing_map_elt *elt, 870 struct trace_buffer *buffer, 871 struct ring_buffer_event *rbe, 872 void *event) 873 { 874 struct hist_trigger_data *hist_data = hist_field->hist_data; 875 struct trace_array *tr = hist_data->event_file->tr; 876 877 u64 ts = ring_buffer_event_time_stamp(buffer, rbe); 878 879 if (hist_data->attrs->ts_in_usecs && trace_clock_in_ns(tr)) 880 ts = ns2usecs(ts); 881 882 return ts; 883 } 884 885 static u64 hist_field_cpu(struct hist_field *hist_field, 886 struct tracing_map_elt *elt, 887 struct trace_buffer *buffer, 888 struct ring_buffer_event *rbe, 889 void *event) 890 { 891 int cpu = smp_processor_id(); 892 893 return cpu; 894 } 895 896 static u64 hist_field_comm(struct hist_field *hist_field, 897 struct tracing_map_elt *elt, 898 struct trace_buffer *buffer, 899 struct ring_buffer_event *rbe, 900 void *event) 901 { 902 return (u64)(unsigned long)current->comm; 903 } 904 905 /** 906 * check_field_for_var_ref - Check if a VAR_REF field references a variable 907 * @hist_field: The VAR_REF field to check 908 * @var_data: The hist trigger that owns the variable 909 * @var_idx: The trigger variable identifier 910 * 911 * Check the given VAR_REF field to see whether or not it references 912 * the given variable associated with the given trigger. 913 * 914 * Return: The VAR_REF field if it does reference the variable, NULL if not 915 */ 916 static struct hist_field * 917 check_field_for_var_ref(struct hist_field *hist_field, 918 struct hist_trigger_data *var_data, 919 unsigned int var_idx) 920 { 921 WARN_ON(!(hist_field && hist_field->flags & HIST_FIELD_FL_VAR_REF)); 922 923 if (hist_field && hist_field->var.idx == var_idx && 924 hist_field->var.hist_data == var_data) 925 return hist_field; 926 927 return NULL; 928 } 929 930 /** 931 * find_var_ref - Check if a trigger has a reference to a trigger variable 932 * @hist_data: The hist trigger that might have a reference to the variable 933 * @var_data: The hist trigger that owns the variable 934 * @var_idx: The trigger variable identifier 935 * 936 * Check the list of var_refs[] on the first hist trigger to see 937 * whether any of them are references to the variable on the second 938 * trigger. 939 * 940 * Return: The VAR_REF field referencing the variable if so, NULL if not 941 */ 942 static struct hist_field *find_var_ref(struct hist_trigger_data *hist_data, 943 struct hist_trigger_data *var_data, 944 unsigned int var_idx) 945 { 946 struct hist_field *hist_field; 947 unsigned int i; 948 949 for (i = 0; i < hist_data->n_var_refs; i++) { 950 hist_field = hist_data->var_refs[i]; 951 if (check_field_for_var_ref(hist_field, var_data, var_idx)) 952 return hist_field; 953 } 954 955 return NULL; 956 } 957 958 /** 959 * find_any_var_ref - Check if there is a reference to a given trigger variable 960 * @hist_data: The hist trigger 961 * @var_idx: The trigger variable identifier 962 * 963 * Check to see whether the given variable is currently referenced by 964 * any other trigger. 965 * 966 * The trigger the variable is defined on is explicitly excluded - the 967 * assumption being that a self-reference doesn't prevent a trigger 968 * from being removed. 969 * 970 * Return: The VAR_REF field referencing the variable if so, NULL if not 971 */ 972 static struct hist_field *find_any_var_ref(struct hist_trigger_data *hist_data, 973 unsigned int var_idx) 974 { 975 struct trace_array *tr = hist_data->event_file->tr; 976 struct hist_field *found = NULL; 977 struct hist_var_data *var_data; 978 979 list_for_each_entry(var_data, &tr->hist_vars, list) { 980 if (var_data->hist_data == hist_data) 981 continue; 982 found = find_var_ref(var_data->hist_data, hist_data, var_idx); 983 if (found) 984 break; 985 } 986 987 return found; 988 } 989 990 /** 991 * check_var_refs - Check if there is a reference to any of trigger's variables 992 * @hist_data: The hist trigger 993 * 994 * A trigger can define one or more variables. If any one of them is 995 * currently referenced by any other trigger, this function will 996 * determine that. 997 * 998 * Typically used to determine whether or not a trigger can be removed 999 * - if there are any references to a trigger's variables, it cannot. 1000 * 1001 * Return: True if there is a reference to any of trigger's variables 1002 */ 1003 static bool check_var_refs(struct hist_trigger_data *hist_data) 1004 { 1005 struct hist_field *field; 1006 bool found = false; 1007 int i; 1008 1009 for_each_hist_field(i, hist_data) { 1010 field = hist_data->fields[i]; 1011 if (field && field->flags & HIST_FIELD_FL_VAR) { 1012 if (find_any_var_ref(hist_data, field->var.idx)) { 1013 found = true; 1014 break; 1015 } 1016 } 1017 } 1018 1019 return found; 1020 } 1021 1022 static struct hist_var_data *find_hist_vars(struct hist_trigger_data *hist_data) 1023 { 1024 struct trace_array *tr = hist_data->event_file->tr; 1025 struct hist_var_data *var_data, *found = NULL; 1026 1027 list_for_each_entry(var_data, &tr->hist_vars, list) { 1028 if (var_data->hist_data == hist_data) { 1029 found = var_data; 1030 break; 1031 } 1032 } 1033 1034 return found; 1035 } 1036 1037 static bool field_has_hist_vars(struct hist_field *hist_field, 1038 unsigned int level) 1039 { 1040 int i; 1041 1042 if (level > 3) 1043 return false; 1044 1045 if (!hist_field) 1046 return false; 1047 1048 if (hist_field->flags & HIST_FIELD_FL_VAR || 1049 hist_field->flags & HIST_FIELD_FL_VAR_REF) 1050 return true; 1051 1052 for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++) { 1053 struct hist_field *operand; 1054 1055 operand = hist_field->operands[i]; 1056 if (field_has_hist_vars(operand, level + 1)) 1057 return true; 1058 } 1059 1060 return false; 1061 } 1062 1063 static bool has_hist_vars(struct hist_trigger_data *hist_data) 1064 { 1065 struct hist_field *hist_field; 1066 int i; 1067 1068 for_each_hist_field(i, hist_data) { 1069 hist_field = hist_data->fields[i]; 1070 if (field_has_hist_vars(hist_field, 0)) 1071 return true; 1072 } 1073 1074 return false; 1075 } 1076 1077 static int save_hist_vars(struct hist_trigger_data *hist_data) 1078 { 1079 struct trace_array *tr = hist_data->event_file->tr; 1080 struct hist_var_data *var_data; 1081 1082 var_data = find_hist_vars(hist_data); 1083 if (var_data) 1084 return 0; 1085 1086 if (tracing_check_open_get_tr(tr)) 1087 return -ENODEV; 1088 1089 var_data = kzalloc(sizeof(*var_data), GFP_KERNEL); 1090 if (!var_data) { 1091 trace_array_put(tr); 1092 return -ENOMEM; 1093 } 1094 1095 var_data->hist_data = hist_data; 1096 list_add(&var_data->list, &tr->hist_vars); 1097 1098 return 0; 1099 } 1100 1101 static void remove_hist_vars(struct hist_trigger_data *hist_data) 1102 { 1103 struct trace_array *tr = hist_data->event_file->tr; 1104 struct hist_var_data *var_data; 1105 1106 var_data = find_hist_vars(hist_data); 1107 if (!var_data) 1108 return; 1109 1110 if (WARN_ON(check_var_refs(hist_data))) 1111 return; 1112 1113 list_del(&var_data->list); 1114 1115 kfree(var_data); 1116 1117 trace_array_put(tr); 1118 } 1119 1120 static struct hist_field *find_var_field(struct hist_trigger_data *hist_data, 1121 const char *var_name) 1122 { 1123 struct hist_field *hist_field, *found = NULL; 1124 int i; 1125 1126 for_each_hist_field(i, hist_data) { 1127 hist_field = hist_data->fields[i]; 1128 if (hist_field && hist_field->flags & HIST_FIELD_FL_VAR && 1129 strcmp(hist_field->var.name, var_name) == 0) { 1130 found = hist_field; 1131 break; 1132 } 1133 } 1134 1135 return found; 1136 } 1137 1138 static struct hist_field *find_var(struct hist_trigger_data *hist_data, 1139 struct trace_event_file *file, 1140 const char *var_name) 1141 { 1142 struct hist_trigger_data *test_data; 1143 struct event_trigger_data *test; 1144 struct hist_field *hist_field; 1145 1146 lockdep_assert_held(&event_mutex); 1147 1148 hist_field = find_var_field(hist_data, var_name); 1149 if (hist_field) 1150 return hist_field; 1151 1152 list_for_each_entry(test, &file->triggers, list) { 1153 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 1154 test_data = test->private_data; 1155 hist_field = find_var_field(test_data, var_name); 1156 if (hist_field) 1157 return hist_field; 1158 } 1159 } 1160 1161 return NULL; 1162 } 1163 1164 static struct trace_event_file *find_var_file(struct trace_array *tr, 1165 char *system, 1166 char *event_name, 1167 char *var_name) 1168 { 1169 struct hist_trigger_data *var_hist_data; 1170 struct hist_var_data *var_data; 1171 struct trace_event_file *file, *found = NULL; 1172 1173 if (system) 1174 return find_event_file(tr, system, event_name); 1175 1176 list_for_each_entry(var_data, &tr->hist_vars, list) { 1177 var_hist_data = var_data->hist_data; 1178 file = var_hist_data->event_file; 1179 if (file == found) 1180 continue; 1181 1182 if (find_var_field(var_hist_data, var_name)) { 1183 if (found) { 1184 hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE, errpos(var_name)); 1185 return NULL; 1186 } 1187 1188 found = file; 1189 } 1190 } 1191 1192 return found; 1193 } 1194 1195 static struct hist_field *find_file_var(struct trace_event_file *file, 1196 const char *var_name) 1197 { 1198 struct hist_trigger_data *test_data; 1199 struct event_trigger_data *test; 1200 struct hist_field *hist_field; 1201 1202 lockdep_assert_held(&event_mutex); 1203 1204 list_for_each_entry(test, &file->triggers, list) { 1205 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 1206 test_data = test->private_data; 1207 hist_field = find_var_field(test_data, var_name); 1208 if (hist_field) 1209 return hist_field; 1210 } 1211 } 1212 1213 return NULL; 1214 } 1215 1216 static struct hist_field * 1217 find_match_var(struct hist_trigger_data *hist_data, char *var_name) 1218 { 1219 struct trace_array *tr = hist_data->event_file->tr; 1220 struct hist_field *hist_field, *found = NULL; 1221 struct trace_event_file *file; 1222 unsigned int i; 1223 1224 for (i = 0; i < hist_data->n_actions; i++) { 1225 struct action_data *data = hist_data->actions[i]; 1226 1227 if (data->handler == HANDLER_ONMATCH) { 1228 char *system = data->match_data.event_system; 1229 char *event_name = data->match_data.event; 1230 1231 file = find_var_file(tr, system, event_name, var_name); 1232 if (!file) 1233 continue; 1234 hist_field = find_file_var(file, var_name); 1235 if (hist_field) { 1236 if (found) { 1237 hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE, 1238 errpos(var_name)); 1239 return ERR_PTR(-EINVAL); 1240 } 1241 1242 found = hist_field; 1243 } 1244 } 1245 } 1246 return found; 1247 } 1248 1249 static struct hist_field *find_event_var(struct hist_trigger_data *hist_data, 1250 char *system, 1251 char *event_name, 1252 char *var_name) 1253 { 1254 struct trace_array *tr = hist_data->event_file->tr; 1255 struct hist_field *hist_field = NULL; 1256 struct trace_event_file *file; 1257 1258 if (!system || !event_name) { 1259 hist_field = find_match_var(hist_data, var_name); 1260 if (IS_ERR(hist_field)) 1261 return NULL; 1262 if (hist_field) 1263 return hist_field; 1264 } 1265 1266 file = find_var_file(tr, system, event_name, var_name); 1267 if (!file) 1268 return NULL; 1269 1270 hist_field = find_file_var(file, var_name); 1271 1272 return hist_field; 1273 } 1274 1275 static u64 hist_field_var_ref(struct hist_field *hist_field, 1276 struct tracing_map_elt *elt, 1277 struct trace_buffer *buffer, 1278 struct ring_buffer_event *rbe, 1279 void *event) 1280 { 1281 struct hist_elt_data *elt_data; 1282 u64 var_val = 0; 1283 1284 if (WARN_ON_ONCE(!elt)) 1285 return var_val; 1286 1287 elt_data = elt->private_data; 1288 var_val = elt_data->var_ref_vals[hist_field->var_ref_idx]; 1289 1290 return var_val; 1291 } 1292 1293 static bool resolve_var_refs(struct hist_trigger_data *hist_data, void *key, 1294 u64 *var_ref_vals, bool self) 1295 { 1296 struct hist_trigger_data *var_data; 1297 struct tracing_map_elt *var_elt; 1298 struct hist_field *hist_field; 1299 unsigned int i, var_idx; 1300 bool resolved = true; 1301 u64 var_val = 0; 1302 1303 for (i = 0; i < hist_data->n_var_refs; i++) { 1304 hist_field = hist_data->var_refs[i]; 1305 var_idx = hist_field->var.idx; 1306 var_data = hist_field->var.hist_data; 1307 1308 if (var_data == NULL) { 1309 resolved = false; 1310 break; 1311 } 1312 1313 if ((self && var_data != hist_data) || 1314 (!self && var_data == hist_data)) 1315 continue; 1316 1317 var_elt = tracing_map_lookup(var_data->map, key); 1318 if (!var_elt) { 1319 resolved = false; 1320 break; 1321 } 1322 1323 if (!tracing_map_var_set(var_elt, var_idx)) { 1324 resolved = false; 1325 break; 1326 } 1327 1328 if (self || !hist_field->read_once) 1329 var_val = tracing_map_read_var(var_elt, var_idx); 1330 else 1331 var_val = tracing_map_read_var_once(var_elt, var_idx); 1332 1333 var_ref_vals[i] = var_val; 1334 } 1335 1336 return resolved; 1337 } 1338 1339 static const char *hist_field_name(struct hist_field *field, 1340 unsigned int level) 1341 { 1342 const char *field_name = ""; 1343 1344 if (WARN_ON_ONCE(!field)) 1345 return field_name; 1346 1347 if (level > 1) 1348 return field_name; 1349 1350 if (field->field) 1351 field_name = field->field->name; 1352 else if (field->flags & HIST_FIELD_FL_LOG2 || 1353 field->flags & HIST_FIELD_FL_ALIAS || 1354 field->flags & HIST_FIELD_FL_BUCKET) 1355 field_name = hist_field_name(field->operands[0], ++level); 1356 else if (field->flags & HIST_FIELD_FL_CPU) 1357 field_name = "common_cpu"; 1358 else if (field->flags & HIST_FIELD_FL_COMM) 1359 field_name = "common_comm"; 1360 else if (field->flags & HIST_FIELD_FL_EXPR || 1361 field->flags & HIST_FIELD_FL_VAR_REF) { 1362 if (field->system) { 1363 static char full_name[MAX_FILTER_STR_VAL]; 1364 1365 strcat(full_name, field->system); 1366 strcat(full_name, "."); 1367 strcat(full_name, field->event_name); 1368 strcat(full_name, "."); 1369 strcat(full_name, field->name); 1370 field_name = full_name; 1371 } else 1372 field_name = field->name; 1373 } else if (field->flags & HIST_FIELD_FL_TIMESTAMP) 1374 field_name = "common_timestamp"; 1375 else if (field->flags & HIST_FIELD_FL_STACKTRACE) { 1376 field_name = "common_stacktrace"; 1377 } else if (field->flags & HIST_FIELD_FL_HITCOUNT) 1378 field_name = "hitcount"; 1379 1380 if (field_name == NULL) 1381 field_name = ""; 1382 1383 return field_name; 1384 } 1385 1386 static enum hist_field_fn select_value_fn(int field_size, int field_is_signed) 1387 { 1388 switch (field_size) { 1389 case 8: 1390 if (field_is_signed) 1391 return HIST_FIELD_FN_S64; 1392 else 1393 return HIST_FIELD_FN_U64; 1394 case 4: 1395 if (field_is_signed) 1396 return HIST_FIELD_FN_S32; 1397 else 1398 return HIST_FIELD_FN_U32; 1399 case 2: 1400 if (field_is_signed) 1401 return HIST_FIELD_FN_S16; 1402 else 1403 return HIST_FIELD_FN_U16; 1404 case 1: 1405 if (field_is_signed) 1406 return HIST_FIELD_FN_S8; 1407 else 1408 return HIST_FIELD_FN_U8; 1409 } 1410 1411 return HIST_FIELD_FN_NOP; 1412 } 1413 1414 static int parse_map_size(char *str) 1415 { 1416 unsigned long size, map_bits; 1417 int ret; 1418 1419 ret = kstrtoul(str, 0, &size); 1420 if (ret) 1421 goto out; 1422 1423 map_bits = ilog2(roundup_pow_of_two(size)); 1424 if (map_bits < TRACING_MAP_BITS_MIN || 1425 map_bits > TRACING_MAP_BITS_MAX) 1426 ret = -EINVAL; 1427 else 1428 ret = map_bits; 1429 out: 1430 return ret; 1431 } 1432 1433 static void destroy_hist_trigger_attrs(struct hist_trigger_attrs *attrs) 1434 { 1435 unsigned int i; 1436 1437 if (!attrs) 1438 return; 1439 1440 for (i = 0; i < attrs->n_assignments; i++) 1441 kfree(attrs->assignment_str[i]); 1442 1443 for (i = 0; i < attrs->n_actions; i++) 1444 kfree(attrs->action_str[i]); 1445 1446 kfree(attrs->name); 1447 kfree(attrs->sort_key_str); 1448 kfree(attrs->keys_str); 1449 kfree(attrs->vals_str); 1450 kfree(attrs->clock); 1451 kfree(attrs); 1452 } 1453 1454 static int parse_action(char *str, struct hist_trigger_attrs *attrs) 1455 { 1456 int ret = -EINVAL; 1457 1458 if (attrs->n_actions >= HIST_ACTIONS_MAX) 1459 return ret; 1460 1461 if ((str_has_prefix(str, "onmatch(")) || 1462 (str_has_prefix(str, "onmax(")) || 1463 (str_has_prefix(str, "onchange("))) { 1464 attrs->action_str[attrs->n_actions] = kstrdup(str, GFP_KERNEL); 1465 if (!attrs->action_str[attrs->n_actions]) { 1466 ret = -ENOMEM; 1467 return ret; 1468 } 1469 attrs->n_actions++; 1470 ret = 0; 1471 } 1472 return ret; 1473 } 1474 1475 static int parse_assignment(struct trace_array *tr, 1476 char *str, struct hist_trigger_attrs *attrs) 1477 { 1478 int len, ret = 0; 1479 1480 if ((len = str_has_prefix(str, "key=")) || 1481 (len = str_has_prefix(str, "keys="))) { 1482 attrs->keys_str = kstrdup(str + len, GFP_KERNEL); 1483 if (!attrs->keys_str) { 1484 ret = -ENOMEM; 1485 goto out; 1486 } 1487 } else if ((len = str_has_prefix(str, "val=")) || 1488 (len = str_has_prefix(str, "vals=")) || 1489 (len = str_has_prefix(str, "values="))) { 1490 attrs->vals_str = kstrdup(str + len, GFP_KERNEL); 1491 if (!attrs->vals_str) { 1492 ret = -ENOMEM; 1493 goto out; 1494 } 1495 } else if ((len = str_has_prefix(str, "sort="))) { 1496 attrs->sort_key_str = kstrdup(str + len, GFP_KERNEL); 1497 if (!attrs->sort_key_str) { 1498 ret = -ENOMEM; 1499 goto out; 1500 } 1501 } else if (str_has_prefix(str, "name=")) { 1502 attrs->name = kstrdup(str, GFP_KERNEL); 1503 if (!attrs->name) { 1504 ret = -ENOMEM; 1505 goto out; 1506 } 1507 } else if ((len = str_has_prefix(str, "clock="))) { 1508 str += len; 1509 1510 str = strstrip(str); 1511 attrs->clock = kstrdup(str, GFP_KERNEL); 1512 if (!attrs->clock) { 1513 ret = -ENOMEM; 1514 goto out; 1515 } 1516 } else if ((len = str_has_prefix(str, "size="))) { 1517 int map_bits = parse_map_size(str + len); 1518 1519 if (map_bits < 0) { 1520 ret = map_bits; 1521 goto out; 1522 } 1523 attrs->map_bits = map_bits; 1524 } else { 1525 char *assignment; 1526 1527 if (attrs->n_assignments == TRACING_MAP_VARS_MAX) { 1528 hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(str)); 1529 ret = -EINVAL; 1530 goto out; 1531 } 1532 1533 assignment = kstrdup(str, GFP_KERNEL); 1534 if (!assignment) { 1535 ret = -ENOMEM; 1536 goto out; 1537 } 1538 1539 attrs->assignment_str[attrs->n_assignments++] = assignment; 1540 } 1541 out: 1542 return ret; 1543 } 1544 1545 static struct hist_trigger_attrs * 1546 parse_hist_trigger_attrs(struct trace_array *tr, char *trigger_str) 1547 { 1548 struct hist_trigger_attrs *attrs; 1549 int ret = 0; 1550 1551 attrs = kzalloc(sizeof(*attrs), GFP_KERNEL); 1552 if (!attrs) 1553 return ERR_PTR(-ENOMEM); 1554 1555 while (trigger_str) { 1556 char *str = strsep(&trigger_str, ":"); 1557 char *rhs; 1558 1559 rhs = strchr(str, '='); 1560 if (rhs) { 1561 if (!strlen(++rhs)) { 1562 ret = -EINVAL; 1563 hist_err(tr, HIST_ERR_EMPTY_ASSIGNMENT, errpos(str)); 1564 goto free; 1565 } 1566 ret = parse_assignment(tr, str, attrs); 1567 if (ret) 1568 goto free; 1569 } else if (strcmp(str, "nohitcount") == 0 || 1570 strcmp(str, "NOHC") == 0) 1571 attrs->no_hitcount = true; 1572 else if (strcmp(str, "pause") == 0) 1573 attrs->pause = true; 1574 else if ((strcmp(str, "cont") == 0) || 1575 (strcmp(str, "continue") == 0)) 1576 attrs->cont = true; 1577 else if (strcmp(str, "clear") == 0) 1578 attrs->clear = true; 1579 else { 1580 ret = parse_action(str, attrs); 1581 if (ret) 1582 goto free; 1583 } 1584 } 1585 1586 if (!attrs->keys_str) { 1587 ret = -EINVAL; 1588 goto free; 1589 } 1590 1591 if (!attrs->clock) { 1592 attrs->clock = kstrdup("global", GFP_KERNEL); 1593 if (!attrs->clock) { 1594 ret = -ENOMEM; 1595 goto free; 1596 } 1597 } 1598 1599 return attrs; 1600 free: 1601 destroy_hist_trigger_attrs(attrs); 1602 1603 return ERR_PTR(ret); 1604 } 1605 1606 static inline void save_comm(char *comm, struct task_struct *task) 1607 { 1608 if (!task->pid) { 1609 strcpy(comm, "<idle>"); 1610 return; 1611 } 1612 1613 if (WARN_ON_ONCE(task->pid < 0)) { 1614 strcpy(comm, "<XXX>"); 1615 return; 1616 } 1617 1618 strscpy(comm, task->comm, TASK_COMM_LEN); 1619 } 1620 1621 static void hist_elt_data_free(struct hist_elt_data *elt_data) 1622 { 1623 unsigned int i; 1624 1625 for (i = 0; i < elt_data->n_field_var_str; i++) 1626 kfree(elt_data->field_var_str[i]); 1627 1628 kfree(elt_data->field_var_str); 1629 1630 kfree(elt_data->comm); 1631 kfree(elt_data); 1632 } 1633 1634 static void hist_trigger_elt_data_free(struct tracing_map_elt *elt) 1635 { 1636 struct hist_elt_data *elt_data = elt->private_data; 1637 1638 hist_elt_data_free(elt_data); 1639 } 1640 1641 static int hist_trigger_elt_data_alloc(struct tracing_map_elt *elt) 1642 { 1643 struct hist_trigger_data *hist_data = elt->map->private_data; 1644 unsigned int size = TASK_COMM_LEN; 1645 struct hist_elt_data *elt_data; 1646 struct hist_field *hist_field; 1647 unsigned int i, n_str; 1648 1649 elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL); 1650 if (!elt_data) 1651 return -ENOMEM; 1652 1653 for_each_hist_field(i, hist_data) { 1654 hist_field = hist_data->fields[i]; 1655 1656 if (hist_field->flags & HIST_FIELD_FL_EXECNAME) { 1657 elt_data->comm = kzalloc(size, GFP_KERNEL); 1658 if (!elt_data->comm) { 1659 kfree(elt_data); 1660 return -ENOMEM; 1661 } 1662 break; 1663 } 1664 } 1665 1666 n_str = hist_data->n_field_var_str + hist_data->n_save_var_str + 1667 hist_data->n_var_str; 1668 if (n_str > SYNTH_FIELDS_MAX) { 1669 hist_elt_data_free(elt_data); 1670 return -EINVAL; 1671 } 1672 1673 BUILD_BUG_ON(STR_VAR_LEN_MAX & (sizeof(u64) - 1)); 1674 1675 size = STR_VAR_LEN_MAX; 1676 1677 elt_data->field_var_str = kcalloc(n_str, sizeof(char *), GFP_KERNEL); 1678 if (!elt_data->field_var_str) { 1679 hist_elt_data_free(elt_data); 1680 return -EINVAL; 1681 } 1682 elt_data->n_field_var_str = n_str; 1683 1684 for (i = 0; i < n_str; i++) { 1685 elt_data->field_var_str[i] = kzalloc(size, GFP_KERNEL); 1686 if (!elt_data->field_var_str[i]) { 1687 hist_elt_data_free(elt_data); 1688 return -ENOMEM; 1689 } 1690 } 1691 1692 elt->private_data = elt_data; 1693 1694 return 0; 1695 } 1696 1697 static void hist_trigger_elt_data_init(struct tracing_map_elt *elt) 1698 { 1699 struct hist_elt_data *elt_data = elt->private_data; 1700 1701 if (elt_data->comm) 1702 save_comm(elt_data->comm, current); 1703 } 1704 1705 static const struct tracing_map_ops hist_trigger_elt_data_ops = { 1706 .elt_alloc = hist_trigger_elt_data_alloc, 1707 .elt_free = hist_trigger_elt_data_free, 1708 .elt_init = hist_trigger_elt_data_init, 1709 }; 1710 1711 static const char *get_hist_field_flags(struct hist_field *hist_field) 1712 { 1713 const char *flags_str = NULL; 1714 1715 if (hist_field->flags & HIST_FIELD_FL_HEX) 1716 flags_str = "hex"; 1717 else if (hist_field->flags & HIST_FIELD_FL_SYM) 1718 flags_str = "sym"; 1719 else if (hist_field->flags & HIST_FIELD_FL_SYM_OFFSET) 1720 flags_str = "sym-offset"; 1721 else if (hist_field->flags & HIST_FIELD_FL_EXECNAME) 1722 flags_str = "execname"; 1723 else if (hist_field->flags & HIST_FIELD_FL_SYSCALL) 1724 flags_str = "syscall"; 1725 else if (hist_field->flags & HIST_FIELD_FL_LOG2) 1726 flags_str = "log2"; 1727 else if (hist_field->flags & HIST_FIELD_FL_BUCKET) 1728 flags_str = "buckets"; 1729 else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP_USECS) 1730 flags_str = "usecs"; 1731 else if (hist_field->flags & HIST_FIELD_FL_PERCENT) 1732 flags_str = "percent"; 1733 else if (hist_field->flags & HIST_FIELD_FL_GRAPH) 1734 flags_str = "graph"; 1735 else if (hist_field->flags & HIST_FIELD_FL_STACKTRACE) 1736 flags_str = "stacktrace"; 1737 1738 return flags_str; 1739 } 1740 1741 static void expr_field_str(struct hist_field *field, char *expr) 1742 { 1743 if (field->flags & HIST_FIELD_FL_VAR_REF) 1744 strcat(expr, "$"); 1745 else if (field->flags & HIST_FIELD_FL_CONST) { 1746 char str[HIST_CONST_DIGITS_MAX]; 1747 1748 snprintf(str, HIST_CONST_DIGITS_MAX, "%llu", field->constant); 1749 strcat(expr, str); 1750 } 1751 1752 strcat(expr, hist_field_name(field, 0)); 1753 1754 if (field->flags && !(field->flags & HIST_FIELD_FL_VAR_REF)) { 1755 const char *flags_str = get_hist_field_flags(field); 1756 1757 if (flags_str) { 1758 strcat(expr, "."); 1759 strcat(expr, flags_str); 1760 } 1761 } 1762 } 1763 1764 static char *expr_str(struct hist_field *field, unsigned int level) 1765 { 1766 char *expr; 1767 1768 if (level > 1) 1769 return NULL; 1770 1771 expr = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL); 1772 if (!expr) 1773 return NULL; 1774 1775 if (!field->operands[0]) { 1776 expr_field_str(field, expr); 1777 return expr; 1778 } 1779 1780 if (field->operator == FIELD_OP_UNARY_MINUS) { 1781 char *subexpr; 1782 1783 strcat(expr, "-("); 1784 subexpr = expr_str(field->operands[0], ++level); 1785 if (!subexpr) { 1786 kfree(expr); 1787 return NULL; 1788 } 1789 strcat(expr, subexpr); 1790 strcat(expr, ")"); 1791 1792 kfree(subexpr); 1793 1794 return expr; 1795 } 1796 1797 expr_field_str(field->operands[0], expr); 1798 1799 switch (field->operator) { 1800 case FIELD_OP_MINUS: 1801 strcat(expr, "-"); 1802 break; 1803 case FIELD_OP_PLUS: 1804 strcat(expr, "+"); 1805 break; 1806 case FIELD_OP_DIV: 1807 strcat(expr, "/"); 1808 break; 1809 case FIELD_OP_MULT: 1810 strcat(expr, "*"); 1811 break; 1812 default: 1813 kfree(expr); 1814 return NULL; 1815 } 1816 1817 expr_field_str(field->operands[1], expr); 1818 1819 return expr; 1820 } 1821 1822 /* 1823 * If field_op != FIELD_OP_NONE, *sep points to the root operator 1824 * of the expression tree to be evaluated. 1825 */ 1826 static int contains_operator(char *str, char **sep) 1827 { 1828 enum field_op_id field_op = FIELD_OP_NONE; 1829 char *minus_op, *plus_op, *div_op, *mult_op; 1830 1831 1832 /* 1833 * Report the last occurrence of the operators first, so that the 1834 * expression is evaluated left to right. This is important since 1835 * subtraction and division are not associative. 1836 * 1837 * e.g 1838 * 64/8/4/2 is 1, i.e 64/8/4/2 = ((64/8)/4)/2 1839 * 14-7-5-2 is 0, i.e 14-7-5-2 = ((14-7)-5)-2 1840 */ 1841 1842 /* 1843 * First, find lower precedence addition and subtraction 1844 * since the expression will be evaluated recursively. 1845 */ 1846 minus_op = strrchr(str, '-'); 1847 if (minus_op) { 1848 /* 1849 * Unary minus is not supported in sub-expressions. If 1850 * present, it is always the next root operator. 1851 */ 1852 if (minus_op == str) { 1853 field_op = FIELD_OP_UNARY_MINUS; 1854 goto out; 1855 } 1856 1857 field_op = FIELD_OP_MINUS; 1858 } 1859 1860 plus_op = strrchr(str, '+'); 1861 if (plus_op || minus_op) { 1862 /* 1863 * For operators of the same precedence use to rightmost as the 1864 * root, so that the expression is evaluated left to right. 1865 */ 1866 if (plus_op > minus_op) 1867 field_op = FIELD_OP_PLUS; 1868 goto out; 1869 } 1870 1871 /* 1872 * Multiplication and division have higher precedence than addition and 1873 * subtraction. 1874 */ 1875 div_op = strrchr(str, '/'); 1876 if (div_op) 1877 field_op = FIELD_OP_DIV; 1878 1879 mult_op = strrchr(str, '*'); 1880 /* 1881 * For operators of the same precedence use to rightmost as the 1882 * root, so that the expression is evaluated left to right. 1883 */ 1884 if (mult_op > div_op) 1885 field_op = FIELD_OP_MULT; 1886 1887 out: 1888 if (sep) { 1889 switch (field_op) { 1890 case FIELD_OP_UNARY_MINUS: 1891 case FIELD_OP_MINUS: 1892 *sep = minus_op; 1893 break; 1894 case FIELD_OP_PLUS: 1895 *sep = plus_op; 1896 break; 1897 case FIELD_OP_DIV: 1898 *sep = div_op; 1899 break; 1900 case FIELD_OP_MULT: 1901 *sep = mult_op; 1902 break; 1903 case FIELD_OP_NONE: 1904 default: 1905 *sep = NULL; 1906 break; 1907 } 1908 } 1909 1910 return field_op; 1911 } 1912 1913 static void get_hist_field(struct hist_field *hist_field) 1914 { 1915 hist_field->ref++; 1916 } 1917 1918 static void __destroy_hist_field(struct hist_field *hist_field) 1919 { 1920 if (--hist_field->ref > 1) 1921 return; 1922 1923 kfree(hist_field->var.name); 1924 kfree(hist_field->name); 1925 1926 /* Can likely be a const */ 1927 kfree_const(hist_field->type); 1928 1929 kfree(hist_field->system); 1930 kfree(hist_field->event_name); 1931 1932 kfree(hist_field); 1933 } 1934 1935 static void destroy_hist_field(struct hist_field *hist_field, 1936 unsigned int level) 1937 { 1938 unsigned int i; 1939 1940 if (level > 3) 1941 return; 1942 1943 if (!hist_field) 1944 return; 1945 1946 if (hist_field->flags & HIST_FIELD_FL_VAR_REF) 1947 return; /* var refs will be destroyed separately */ 1948 1949 for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++) 1950 destroy_hist_field(hist_field->operands[i], level + 1); 1951 1952 __destroy_hist_field(hist_field); 1953 } 1954 1955 static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data, 1956 struct ftrace_event_field *field, 1957 unsigned long flags, 1958 char *var_name) 1959 { 1960 struct hist_field *hist_field; 1961 1962 if (field && is_function_field(field)) 1963 return NULL; 1964 1965 hist_field = kzalloc(sizeof(struct hist_field), GFP_KERNEL); 1966 if (!hist_field) 1967 return NULL; 1968 1969 hist_field->ref = 1; 1970 1971 hist_field->hist_data = hist_data; 1972 1973 if (flags & HIST_FIELD_FL_EXPR || flags & HIST_FIELD_FL_ALIAS) 1974 goto out; /* caller will populate */ 1975 1976 if (flags & HIST_FIELD_FL_VAR_REF) { 1977 hist_field->fn_num = HIST_FIELD_FN_VAR_REF; 1978 goto out; 1979 } 1980 1981 if (flags & HIST_FIELD_FL_HITCOUNT) { 1982 hist_field->fn_num = HIST_FIELD_FN_COUNTER; 1983 hist_field->size = sizeof(u64); 1984 hist_field->type = "u64"; 1985 goto out; 1986 } 1987 1988 if (flags & HIST_FIELD_FL_CONST) { 1989 hist_field->fn_num = HIST_FIELD_FN_CONST; 1990 hist_field->size = sizeof(u64); 1991 hist_field->type = kstrdup("u64", GFP_KERNEL); 1992 if (!hist_field->type) 1993 goto free; 1994 goto out; 1995 } 1996 1997 if (flags & HIST_FIELD_FL_STACKTRACE) { 1998 if (field) 1999 hist_field->fn_num = HIST_FIELD_FN_STACK; 2000 else 2001 hist_field->fn_num = HIST_FIELD_FN_NOP; 2002 hist_field->size = HIST_STACKTRACE_SIZE; 2003 hist_field->type = kstrdup_const("unsigned long[]", GFP_KERNEL); 2004 if (!hist_field->type) 2005 goto free; 2006 goto out; 2007 } 2008 2009 if (flags & (HIST_FIELD_FL_LOG2 | HIST_FIELD_FL_BUCKET)) { 2010 unsigned long fl = flags & ~(HIST_FIELD_FL_LOG2 | HIST_FIELD_FL_BUCKET); 2011 hist_field->fn_num = flags & HIST_FIELD_FL_LOG2 ? HIST_FIELD_FN_LOG2 : 2012 HIST_FIELD_FN_BUCKET; 2013 hist_field->operands[0] = create_hist_field(hist_data, field, fl, NULL); 2014 if (!hist_field->operands[0]) 2015 goto free; 2016 hist_field->size = hist_field->operands[0]->size; 2017 hist_field->type = kstrdup_const(hist_field->operands[0]->type, GFP_KERNEL); 2018 if (!hist_field->type) 2019 goto free; 2020 goto out; 2021 } 2022 2023 if (flags & HIST_FIELD_FL_TIMESTAMP) { 2024 hist_field->fn_num = HIST_FIELD_FN_TIMESTAMP; 2025 hist_field->size = sizeof(u64); 2026 hist_field->type = "u64"; 2027 goto out; 2028 } 2029 2030 if (flags & HIST_FIELD_FL_CPU) { 2031 hist_field->fn_num = HIST_FIELD_FN_CPU; 2032 hist_field->size = sizeof(int); 2033 hist_field->type = "unsigned int"; 2034 goto out; 2035 } 2036 2037 if (flags & HIST_FIELD_FL_COMM) { 2038 hist_field->fn_num = HIST_FIELD_FN_COMM; 2039 hist_field->size = MAX_FILTER_STR_VAL; 2040 hist_field->type = "char[]"; 2041 goto out; 2042 } 2043 2044 if (WARN_ON_ONCE(!field)) 2045 goto out; 2046 2047 /* Pointers to strings are just pointers and dangerous to dereference */ 2048 if (is_string_field(field) && 2049 (field->filter_type != FILTER_PTR_STRING)) { 2050 flags |= HIST_FIELD_FL_STRING; 2051 2052 hist_field->size = MAX_FILTER_STR_VAL; 2053 hist_field->type = kstrdup_const(field->type, GFP_KERNEL); 2054 if (!hist_field->type) 2055 goto free; 2056 2057 if (field->filter_type == FILTER_STATIC_STRING) { 2058 hist_field->fn_num = HIST_FIELD_FN_STRING; 2059 hist_field->size = field->size; 2060 } else if (field->filter_type == FILTER_DYN_STRING) { 2061 hist_field->fn_num = HIST_FIELD_FN_DYNSTRING; 2062 } else if (field->filter_type == FILTER_RDYN_STRING) 2063 hist_field->fn_num = HIST_FIELD_FN_RELDYNSTRING; 2064 else 2065 hist_field->fn_num = HIST_FIELD_FN_PSTRING; 2066 } else if (field->filter_type == FILTER_STACKTRACE) { 2067 flags |= HIST_FIELD_FL_STACKTRACE; 2068 2069 hist_field->size = MAX_FILTER_STR_VAL; 2070 hist_field->type = kstrdup_const(field->type, GFP_KERNEL); 2071 if (!hist_field->type) 2072 goto free; 2073 2074 hist_field->fn_num = HIST_FIELD_FN_STACK; 2075 } else { 2076 hist_field->size = field->size; 2077 hist_field->is_signed = field->is_signed; 2078 hist_field->type = kstrdup_const(field->type, GFP_KERNEL); 2079 if (!hist_field->type) 2080 goto free; 2081 2082 hist_field->fn_num = select_value_fn(field->size, 2083 field->is_signed); 2084 if (hist_field->fn_num == HIST_FIELD_FN_NOP) { 2085 destroy_hist_field(hist_field, 0); 2086 return NULL; 2087 } 2088 } 2089 out: 2090 hist_field->field = field; 2091 hist_field->flags = flags; 2092 2093 if (var_name) { 2094 hist_field->var.name = kstrdup(var_name, GFP_KERNEL); 2095 if (!hist_field->var.name) 2096 goto free; 2097 } 2098 2099 return hist_field; 2100 free: 2101 destroy_hist_field(hist_field, 0); 2102 return NULL; 2103 } 2104 2105 static void destroy_hist_fields(struct hist_trigger_data *hist_data) 2106 { 2107 unsigned int i; 2108 2109 for (i = 0; i < HIST_FIELDS_MAX; i++) { 2110 if (hist_data->fields[i]) { 2111 destroy_hist_field(hist_data->fields[i], 0); 2112 hist_data->fields[i] = NULL; 2113 } 2114 } 2115 2116 for (i = 0; i < hist_data->n_var_refs; i++) { 2117 WARN_ON(!(hist_data->var_refs[i]->flags & HIST_FIELD_FL_VAR_REF)); 2118 __destroy_hist_field(hist_data->var_refs[i]); 2119 hist_data->var_refs[i] = NULL; 2120 } 2121 } 2122 2123 static int init_var_ref(struct hist_field *ref_field, 2124 struct hist_field *var_field, 2125 char *system, char *event_name) 2126 { 2127 int err = 0; 2128 2129 ref_field->var.idx = var_field->var.idx; 2130 ref_field->var.hist_data = var_field->hist_data; 2131 ref_field->size = var_field->size; 2132 ref_field->is_signed = var_field->is_signed; 2133 ref_field->flags |= var_field->flags & 2134 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS); 2135 2136 if (system) { 2137 ref_field->system = kstrdup(system, GFP_KERNEL); 2138 if (!ref_field->system) 2139 return -ENOMEM; 2140 } 2141 2142 if (event_name) { 2143 ref_field->event_name = kstrdup(event_name, GFP_KERNEL); 2144 if (!ref_field->event_name) { 2145 err = -ENOMEM; 2146 goto free; 2147 } 2148 } 2149 2150 if (var_field->var.name) { 2151 ref_field->name = kstrdup(var_field->var.name, GFP_KERNEL); 2152 if (!ref_field->name) { 2153 err = -ENOMEM; 2154 goto free; 2155 } 2156 } else if (var_field->name) { 2157 ref_field->name = kstrdup(var_field->name, GFP_KERNEL); 2158 if (!ref_field->name) { 2159 err = -ENOMEM; 2160 goto free; 2161 } 2162 } 2163 2164 ref_field->type = kstrdup_const(var_field->type, GFP_KERNEL); 2165 if (!ref_field->type) { 2166 err = -ENOMEM; 2167 goto free; 2168 } 2169 out: 2170 return err; 2171 free: 2172 kfree(ref_field->system); 2173 ref_field->system = NULL; 2174 kfree(ref_field->event_name); 2175 ref_field->event_name = NULL; 2176 kfree(ref_field->name); 2177 ref_field->name = NULL; 2178 2179 goto out; 2180 } 2181 2182 static int find_var_ref_idx(struct hist_trigger_data *hist_data, 2183 struct hist_field *var_field) 2184 { 2185 struct hist_field *ref_field; 2186 int i; 2187 2188 for (i = 0; i < hist_data->n_var_refs; i++) { 2189 ref_field = hist_data->var_refs[i]; 2190 if (ref_field->var.idx == var_field->var.idx && 2191 ref_field->var.hist_data == var_field->hist_data) 2192 return i; 2193 } 2194 2195 return -ENOENT; 2196 } 2197 2198 /** 2199 * create_var_ref - Create a variable reference and attach it to trigger 2200 * @hist_data: The trigger that will be referencing the variable 2201 * @var_field: The VAR field to create a reference to 2202 * @system: The optional system string 2203 * @event_name: The optional event_name string 2204 * 2205 * Given a variable hist_field, create a VAR_REF hist_field that 2206 * represents a reference to it. 2207 * 2208 * This function also adds the reference to the trigger that 2209 * now references the variable. 2210 * 2211 * Return: The VAR_REF field if successful, NULL if not 2212 */ 2213 static struct hist_field *create_var_ref(struct hist_trigger_data *hist_data, 2214 struct hist_field *var_field, 2215 char *system, char *event_name) 2216 { 2217 unsigned long flags = HIST_FIELD_FL_VAR_REF; 2218 struct hist_field *ref_field; 2219 int i; 2220 2221 /* Check if the variable already exists */ 2222 for (i = 0; i < hist_data->n_var_refs; i++) { 2223 ref_field = hist_data->var_refs[i]; 2224 if (ref_field->var.idx == var_field->var.idx && 2225 ref_field->var.hist_data == var_field->hist_data) { 2226 get_hist_field(ref_field); 2227 return ref_field; 2228 } 2229 } 2230 /* Sanity check to avoid out-of-bound write on 'hist_data->var_refs' */ 2231 if (hist_data->n_var_refs >= TRACING_MAP_VARS_MAX) 2232 return NULL; 2233 ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL); 2234 if (ref_field) { 2235 if (init_var_ref(ref_field, var_field, system, event_name)) { 2236 destroy_hist_field(ref_field, 0); 2237 return NULL; 2238 } 2239 2240 hist_data->var_refs[hist_data->n_var_refs] = ref_field; 2241 ref_field->var_ref_idx = hist_data->n_var_refs++; 2242 } 2243 2244 return ref_field; 2245 } 2246 2247 static bool is_var_ref(char *var_name) 2248 { 2249 if (!var_name || strlen(var_name) < 2 || var_name[0] != '$') 2250 return false; 2251 2252 return true; 2253 } 2254 2255 static char *field_name_from_var(struct hist_trigger_data *hist_data, 2256 char *var_name) 2257 { 2258 char *name, *field; 2259 unsigned int i; 2260 2261 for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) { 2262 name = hist_data->attrs->var_defs.name[i]; 2263 2264 if (strcmp(var_name, name) == 0) { 2265 field = hist_data->attrs->var_defs.expr[i]; 2266 if (contains_operator(field, NULL) || is_var_ref(field)) 2267 continue; 2268 return field; 2269 } 2270 } 2271 2272 return NULL; 2273 } 2274 2275 static char *local_field_var_ref(struct hist_trigger_data *hist_data, 2276 char *system, char *event_name, 2277 char *var_name) 2278 { 2279 struct trace_event_call *call; 2280 2281 if (system && event_name) { 2282 call = hist_data->event_file->event_call; 2283 2284 if (strcmp(system, call->class->system) != 0) 2285 return NULL; 2286 2287 if (strcmp(event_name, trace_event_name(call)) != 0) 2288 return NULL; 2289 } 2290 2291 if (!!system != !!event_name) 2292 return NULL; 2293 2294 if (!is_var_ref(var_name)) 2295 return NULL; 2296 2297 var_name++; 2298 2299 return field_name_from_var(hist_data, var_name); 2300 } 2301 2302 static struct hist_field *parse_var_ref(struct hist_trigger_data *hist_data, 2303 char *system, char *event_name, 2304 char *var_name) 2305 { 2306 struct hist_field *var_field = NULL, *ref_field = NULL; 2307 struct trace_array *tr = hist_data->event_file->tr; 2308 2309 if (!is_var_ref(var_name)) 2310 return NULL; 2311 2312 var_name++; 2313 2314 var_field = find_event_var(hist_data, system, event_name, var_name); 2315 if (var_field) 2316 ref_field = create_var_ref(hist_data, var_field, 2317 system, event_name); 2318 2319 if (!ref_field) 2320 hist_err(tr, HIST_ERR_VAR_NOT_FOUND, errpos(var_name)); 2321 2322 return ref_field; 2323 } 2324 2325 static struct ftrace_event_field * 2326 parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file, 2327 char *field_str, unsigned long *flags, unsigned long *buckets) 2328 { 2329 struct ftrace_event_field *field = NULL; 2330 char *field_name, *modifier, *str; 2331 struct trace_array *tr = file->tr; 2332 2333 modifier = str = kstrdup(field_str, GFP_KERNEL); 2334 if (!modifier) 2335 return ERR_PTR(-ENOMEM); 2336 2337 field_name = strsep(&modifier, "."); 2338 if (modifier) { 2339 if (strcmp(modifier, "hex") == 0) 2340 *flags |= HIST_FIELD_FL_HEX; 2341 else if (strcmp(modifier, "sym") == 0) 2342 *flags |= HIST_FIELD_FL_SYM; 2343 /* 2344 * 'sym-offset' occurrences in the trigger string are modified 2345 * to 'symXoffset' to simplify arithmetic expression parsing. 2346 */ 2347 else if (strcmp(modifier, "symXoffset") == 0) 2348 *flags |= HIST_FIELD_FL_SYM_OFFSET; 2349 else if ((strcmp(modifier, "execname") == 0) && 2350 (strcmp(field_name, "common_pid") == 0)) 2351 *flags |= HIST_FIELD_FL_EXECNAME; 2352 else if (strcmp(modifier, "syscall") == 0) 2353 *flags |= HIST_FIELD_FL_SYSCALL; 2354 else if (strcmp(modifier, "stacktrace") == 0) 2355 *flags |= HIST_FIELD_FL_STACKTRACE; 2356 else if (strcmp(modifier, "log2") == 0) 2357 *flags |= HIST_FIELD_FL_LOG2; 2358 else if (strcmp(modifier, "usecs") == 0) 2359 *flags |= HIST_FIELD_FL_TIMESTAMP_USECS; 2360 else if (strncmp(modifier, "bucket", 6) == 0) { 2361 int ret; 2362 2363 modifier += 6; 2364 2365 if (*modifier == 's') 2366 modifier++; 2367 if (*modifier != '=') 2368 goto error; 2369 modifier++; 2370 ret = kstrtoul(modifier, 0, buckets); 2371 if (ret || !(*buckets)) 2372 goto error; 2373 *flags |= HIST_FIELD_FL_BUCKET; 2374 } else if (strncmp(modifier, "percent", 7) == 0) { 2375 if (*flags & (HIST_FIELD_FL_VAR | HIST_FIELD_FL_KEY)) 2376 goto error; 2377 *flags |= HIST_FIELD_FL_PERCENT; 2378 } else if (strncmp(modifier, "graph", 5) == 0) { 2379 if (*flags & (HIST_FIELD_FL_VAR | HIST_FIELD_FL_KEY)) 2380 goto error; 2381 *flags |= HIST_FIELD_FL_GRAPH; 2382 } else { 2383 error: 2384 hist_err(tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(modifier)); 2385 field = ERR_PTR(-EINVAL); 2386 goto out; 2387 } 2388 } 2389 2390 if (strcmp(field_name, "common_timestamp") == 0) { 2391 *flags |= HIST_FIELD_FL_TIMESTAMP; 2392 hist_data->enable_timestamps = true; 2393 if (*flags & HIST_FIELD_FL_TIMESTAMP_USECS) 2394 hist_data->attrs->ts_in_usecs = true; 2395 } else if (strcmp(field_name, "common_stacktrace") == 0) { 2396 *flags |= HIST_FIELD_FL_STACKTRACE; 2397 } else if (strcmp(field_name, "common_cpu") == 0) { 2398 *flags |= HIST_FIELD_FL_CPU; 2399 } else if (strcmp(field_name, "common_comm") == 0) { 2400 *flags |= HIST_FIELD_FL_COMM | HIST_FIELD_FL_STRING; 2401 } else if (strcmp(field_name, "hitcount") == 0) 2402 *flags |= HIST_FIELD_FL_HITCOUNT; 2403 else { 2404 field = trace_find_event_field(file->event_call, field_name); 2405 if (!field || !field->size) { 2406 /* 2407 * For backward compatibility, if field_name 2408 * was "cpu" or "stacktrace", then we treat this 2409 * the same as common_cpu and common_stacktrace 2410 * respectively. This also works for "CPU", and 2411 * "STACKTRACE". 2412 */ 2413 if (field && field->filter_type == FILTER_CPU) { 2414 *flags |= HIST_FIELD_FL_CPU; 2415 } else if (field && field->filter_type == FILTER_STACKTRACE) { 2416 *flags |= HIST_FIELD_FL_STACKTRACE; 2417 } else if (field && field->filter_type == FILTER_COMM) { 2418 *flags |= HIST_FIELD_FL_COMM | HIST_FIELD_FL_STRING; 2419 } else { 2420 hist_err(tr, HIST_ERR_FIELD_NOT_FOUND, 2421 errpos(field_name)); 2422 field = ERR_PTR(-EINVAL); 2423 goto out; 2424 } 2425 } 2426 } 2427 out: 2428 kfree(str); 2429 2430 return field; 2431 } 2432 2433 static struct hist_field *create_alias(struct hist_trigger_data *hist_data, 2434 struct hist_field *var_ref, 2435 char *var_name) 2436 { 2437 struct hist_field *alias = NULL; 2438 unsigned long flags = HIST_FIELD_FL_ALIAS | HIST_FIELD_FL_VAR; 2439 2440 alias = create_hist_field(hist_data, NULL, flags, var_name); 2441 if (!alias) 2442 return NULL; 2443 2444 alias->fn_num = var_ref->fn_num; 2445 alias->operands[0] = var_ref; 2446 2447 if (init_var_ref(alias, var_ref, var_ref->system, var_ref->event_name)) { 2448 destroy_hist_field(alias, 0); 2449 return NULL; 2450 } 2451 2452 alias->var_ref_idx = var_ref->var_ref_idx; 2453 2454 return alias; 2455 } 2456 2457 static struct hist_field *parse_const(struct hist_trigger_data *hist_data, 2458 char *str, char *var_name, 2459 unsigned long *flags) 2460 { 2461 struct trace_array *tr = hist_data->event_file->tr; 2462 struct hist_field *field = NULL; 2463 u64 constant; 2464 2465 if (kstrtoull(str, 0, &constant)) { 2466 hist_err(tr, HIST_ERR_EXPECT_NUMBER, errpos(str)); 2467 return NULL; 2468 } 2469 2470 *flags |= HIST_FIELD_FL_CONST; 2471 field = create_hist_field(hist_data, NULL, *flags, var_name); 2472 if (!field) 2473 return NULL; 2474 2475 field->constant = constant; 2476 2477 return field; 2478 } 2479 2480 static struct hist_field *parse_atom(struct hist_trigger_data *hist_data, 2481 struct trace_event_file *file, char *str, 2482 unsigned long *flags, char *var_name) 2483 { 2484 char *s, *ref_system = NULL, *ref_event = NULL, *ref_var = str; 2485 struct ftrace_event_field *field = NULL; 2486 struct hist_field *hist_field = NULL; 2487 unsigned long buckets = 0; 2488 int ret = 0; 2489 2490 if (isdigit(str[0])) { 2491 hist_field = parse_const(hist_data, str, var_name, flags); 2492 if (!hist_field) { 2493 ret = -EINVAL; 2494 goto out; 2495 } 2496 return hist_field; 2497 } 2498 2499 s = strchr(str, '.'); 2500 if (s) { 2501 s = strchr(++s, '.'); 2502 if (s) { 2503 ref_system = strsep(&str, "."); 2504 if (!str) { 2505 ret = -EINVAL; 2506 goto out; 2507 } 2508 ref_event = strsep(&str, "."); 2509 if (!str) { 2510 ret = -EINVAL; 2511 goto out; 2512 } 2513 ref_var = str; 2514 } 2515 } 2516 2517 s = local_field_var_ref(hist_data, ref_system, ref_event, ref_var); 2518 if (!s) { 2519 hist_field = parse_var_ref(hist_data, ref_system, 2520 ref_event, ref_var); 2521 if (hist_field) { 2522 if (var_name) { 2523 hist_field = create_alias(hist_data, hist_field, var_name); 2524 if (!hist_field) { 2525 ret = -ENOMEM; 2526 goto out; 2527 } 2528 } 2529 return hist_field; 2530 } 2531 } else 2532 str = s; 2533 2534 field = parse_field(hist_data, file, str, flags, &buckets); 2535 if (IS_ERR(field)) { 2536 ret = PTR_ERR(field); 2537 goto out; 2538 } 2539 2540 hist_field = create_hist_field(hist_data, field, *flags, var_name); 2541 if (!hist_field) { 2542 ret = -ENOMEM; 2543 goto out; 2544 } 2545 hist_field->buckets = buckets; 2546 2547 return hist_field; 2548 out: 2549 return ERR_PTR(ret); 2550 } 2551 2552 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data, 2553 struct trace_event_file *file, 2554 char *str, unsigned long flags, 2555 char *var_name, unsigned int *n_subexprs); 2556 2557 static struct hist_field *parse_unary(struct hist_trigger_data *hist_data, 2558 struct trace_event_file *file, 2559 char *str, unsigned long flags, 2560 char *var_name, unsigned int *n_subexprs) 2561 { 2562 struct hist_field *operand1, *expr = NULL; 2563 unsigned long operand_flags; 2564 int ret = 0; 2565 char *s; 2566 2567 /* Unary minus operator, increment n_subexprs */ 2568 ++*n_subexprs; 2569 2570 /* we support only -(xxx) i.e. explicit parens required */ 2571 2572 if (*n_subexprs > 3) { 2573 hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str)); 2574 ret = -EINVAL; 2575 goto free; 2576 } 2577 2578 str++; /* skip leading '-' */ 2579 2580 s = strchr(str, '('); 2581 if (s) 2582 str++; 2583 else { 2584 ret = -EINVAL; 2585 goto free; 2586 } 2587 2588 s = strrchr(str, ')'); 2589 if (s) { 2590 /* unary minus not supported in sub-expressions */ 2591 if (*(s+1) != '\0') { 2592 hist_err(file->tr, HIST_ERR_UNARY_MINUS_SUBEXPR, 2593 errpos(str)); 2594 ret = -EINVAL; 2595 goto free; 2596 } 2597 *s = '\0'; 2598 } 2599 else { 2600 ret = -EINVAL; /* no closing ')' */ 2601 goto free; 2602 } 2603 2604 flags |= HIST_FIELD_FL_EXPR; 2605 expr = create_hist_field(hist_data, NULL, flags, var_name); 2606 if (!expr) { 2607 ret = -ENOMEM; 2608 goto free; 2609 } 2610 2611 operand_flags = 0; 2612 operand1 = parse_expr(hist_data, file, str, operand_flags, NULL, n_subexprs); 2613 if (IS_ERR(operand1)) { 2614 ret = PTR_ERR(operand1); 2615 goto free; 2616 } 2617 if (operand1->flags & HIST_FIELD_FL_STRING) { 2618 /* String type can not be the operand of unary operator. */ 2619 hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(str)); 2620 destroy_hist_field(operand1, 0); 2621 ret = -EINVAL; 2622 goto free; 2623 } 2624 2625 expr->flags |= operand1->flags & 2626 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS); 2627 expr->fn_num = HIST_FIELD_FN_UMINUS; 2628 expr->operands[0] = operand1; 2629 expr->size = operand1->size; 2630 expr->is_signed = operand1->is_signed; 2631 expr->operator = FIELD_OP_UNARY_MINUS; 2632 expr->name = expr_str(expr, 0); 2633 expr->type = kstrdup_const(operand1->type, GFP_KERNEL); 2634 if (!expr->type) { 2635 ret = -ENOMEM; 2636 goto free; 2637 } 2638 2639 return expr; 2640 free: 2641 destroy_hist_field(expr, 0); 2642 return ERR_PTR(ret); 2643 } 2644 2645 /* 2646 * If the operands are var refs, return pointers the 2647 * variable(s) referenced in var1 and var2, else NULL. 2648 */ 2649 static int check_expr_operands(struct trace_array *tr, 2650 struct hist_field *operand1, 2651 struct hist_field *operand2, 2652 struct hist_field **var1, 2653 struct hist_field **var2) 2654 { 2655 unsigned long operand1_flags = operand1->flags; 2656 unsigned long operand2_flags = operand2->flags; 2657 2658 if ((operand1_flags & HIST_FIELD_FL_VAR_REF) || 2659 (operand1_flags & HIST_FIELD_FL_ALIAS)) { 2660 struct hist_field *var; 2661 2662 var = find_var_field(operand1->var.hist_data, operand1->name); 2663 if (!var) 2664 return -EINVAL; 2665 operand1_flags = var->flags; 2666 *var1 = var; 2667 } 2668 2669 if ((operand2_flags & HIST_FIELD_FL_VAR_REF) || 2670 (operand2_flags & HIST_FIELD_FL_ALIAS)) { 2671 struct hist_field *var; 2672 2673 var = find_var_field(operand2->var.hist_data, operand2->name); 2674 if (!var) 2675 return -EINVAL; 2676 operand2_flags = var->flags; 2677 *var2 = var; 2678 } 2679 2680 if ((operand1_flags & HIST_FIELD_FL_TIMESTAMP_USECS) != 2681 (operand2_flags & HIST_FIELD_FL_TIMESTAMP_USECS)) { 2682 hist_err(tr, HIST_ERR_TIMESTAMP_MISMATCH, 0); 2683 return -EINVAL; 2684 } 2685 2686 return 0; 2687 } 2688 2689 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data, 2690 struct trace_event_file *file, 2691 char *str, unsigned long flags, 2692 char *var_name, unsigned int *n_subexprs) 2693 { 2694 struct hist_field *operand1 = NULL, *operand2 = NULL, *expr = NULL; 2695 struct hist_field *var1 = NULL, *var2 = NULL; 2696 unsigned long operand_flags, operand2_flags; 2697 int field_op, ret = -EINVAL; 2698 char *sep, *operand1_str; 2699 enum hist_field_fn op_fn; 2700 bool combine_consts; 2701 2702 if (*n_subexprs > 3) { 2703 hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str)); 2704 return ERR_PTR(-EINVAL); 2705 } 2706 2707 field_op = contains_operator(str, &sep); 2708 2709 if (field_op == FIELD_OP_NONE) 2710 return parse_atom(hist_data, file, str, &flags, var_name); 2711 2712 if (field_op == FIELD_OP_UNARY_MINUS) 2713 return parse_unary(hist_data, file, str, flags, var_name, n_subexprs); 2714 2715 /* Binary operator found, increment n_subexprs */ 2716 ++*n_subexprs; 2717 2718 /* Split the expression string at the root operator */ 2719 if (!sep) 2720 return ERR_PTR(-EINVAL); 2721 2722 *sep = '\0'; 2723 operand1_str = str; 2724 str = sep+1; 2725 2726 /* Binary operator requires both operands */ 2727 if (*operand1_str == '\0' || *str == '\0') 2728 return ERR_PTR(-EINVAL); 2729 2730 operand_flags = 0; 2731 2732 /* LHS of string is an expression e.g. a+b in a+b+c */ 2733 operand1 = parse_expr(hist_data, file, operand1_str, operand_flags, NULL, n_subexprs); 2734 if (IS_ERR(operand1)) 2735 return ERR_CAST(operand1); 2736 2737 if (operand1->flags & HIST_FIELD_FL_STRING) { 2738 hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(operand1_str)); 2739 ret = -EINVAL; 2740 goto free_op1; 2741 } 2742 2743 /* RHS of string is another expression e.g. c in a+b+c */ 2744 operand_flags = 0; 2745 operand2 = parse_expr(hist_data, file, str, operand_flags, NULL, n_subexprs); 2746 if (IS_ERR(operand2)) { 2747 ret = PTR_ERR(operand2); 2748 goto free_op1; 2749 } 2750 if (operand2->flags & HIST_FIELD_FL_STRING) { 2751 hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(str)); 2752 ret = -EINVAL; 2753 goto free_operands; 2754 } 2755 2756 switch (field_op) { 2757 case FIELD_OP_MINUS: 2758 op_fn = HIST_FIELD_FN_MINUS; 2759 break; 2760 case FIELD_OP_PLUS: 2761 op_fn = HIST_FIELD_FN_PLUS; 2762 break; 2763 case FIELD_OP_DIV: 2764 op_fn = HIST_FIELD_FN_DIV; 2765 break; 2766 case FIELD_OP_MULT: 2767 op_fn = HIST_FIELD_FN_MULT; 2768 break; 2769 default: 2770 ret = -EINVAL; 2771 goto free_operands; 2772 } 2773 2774 ret = check_expr_operands(file->tr, operand1, operand2, &var1, &var2); 2775 if (ret) 2776 goto free_operands; 2777 2778 operand_flags = var1 ? var1->flags : operand1->flags; 2779 operand2_flags = var2 ? var2->flags : operand2->flags; 2780 2781 /* 2782 * If both operands are constant, the expression can be 2783 * collapsed to a single constant. 2784 */ 2785 combine_consts = operand_flags & operand2_flags & HIST_FIELD_FL_CONST; 2786 2787 flags |= combine_consts ? HIST_FIELD_FL_CONST : HIST_FIELD_FL_EXPR; 2788 2789 flags |= operand1->flags & 2790 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS); 2791 2792 expr = create_hist_field(hist_data, NULL, flags, var_name); 2793 if (!expr) { 2794 ret = -ENOMEM; 2795 goto free_operands; 2796 } 2797 2798 operand1->read_once = true; 2799 operand2->read_once = true; 2800 2801 /* The operands are now owned and free'd by 'expr' */ 2802 expr->operands[0] = operand1; 2803 expr->operands[1] = operand2; 2804 2805 if (field_op == FIELD_OP_DIV && 2806 operand2_flags & HIST_FIELD_FL_CONST) { 2807 u64 divisor = var2 ? var2->constant : operand2->constant; 2808 2809 if (!divisor) { 2810 hist_err(file->tr, HIST_ERR_DIVISION_BY_ZERO, errpos(str)); 2811 ret = -EDOM; 2812 goto free_expr; 2813 } 2814 2815 /* 2816 * Copy the divisor here so we don't have to look it up 2817 * later if this is a var ref 2818 */ 2819 operand2->constant = divisor; 2820 op_fn = hist_field_get_div_fn(operand2); 2821 } 2822 2823 expr->fn_num = op_fn; 2824 2825 if (combine_consts) { 2826 if (var1) 2827 expr->operands[0] = var1; 2828 if (var2) 2829 expr->operands[1] = var2; 2830 2831 expr->constant = hist_fn_call(expr, NULL, NULL, NULL, NULL); 2832 expr->fn_num = HIST_FIELD_FN_CONST; 2833 2834 expr->operands[0] = NULL; 2835 expr->operands[1] = NULL; 2836 2837 /* 2838 * var refs won't be destroyed immediately 2839 * See: destroy_hist_field() 2840 */ 2841 destroy_hist_field(operand2, 0); 2842 destroy_hist_field(operand1, 0); 2843 2844 expr->name = expr_str(expr, 0); 2845 } else { 2846 /* The operand sizes should be the same, so just pick one */ 2847 expr->size = operand1->size; 2848 expr->is_signed = operand1->is_signed; 2849 2850 expr->operator = field_op; 2851 expr->type = kstrdup_const(operand1->type, GFP_KERNEL); 2852 if (!expr->type) { 2853 ret = -ENOMEM; 2854 goto free_expr; 2855 } 2856 2857 expr->name = expr_str(expr, 0); 2858 } 2859 2860 return expr; 2861 2862 free_operands: 2863 destroy_hist_field(operand2, 0); 2864 free_op1: 2865 destroy_hist_field(operand1, 0); 2866 return ERR_PTR(ret); 2867 2868 free_expr: 2869 destroy_hist_field(expr, 0); 2870 return ERR_PTR(ret); 2871 } 2872 2873 static char *find_trigger_filter(struct hist_trigger_data *hist_data, 2874 struct trace_event_file *file) 2875 { 2876 struct event_trigger_data *test; 2877 2878 lockdep_assert_held(&event_mutex); 2879 2880 list_for_each_entry(test, &file->triggers, list) { 2881 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 2882 if (test->private_data == hist_data) 2883 return test->filter_str; 2884 } 2885 } 2886 2887 return NULL; 2888 } 2889 2890 static struct event_command trigger_hist_cmd; 2891 static int event_hist_trigger_parse(struct event_command *cmd_ops, 2892 struct trace_event_file *file, 2893 char *glob, char *cmd, 2894 char *param_and_filter); 2895 2896 static bool compatible_keys(struct hist_trigger_data *target_hist_data, 2897 struct hist_trigger_data *hist_data, 2898 unsigned int n_keys) 2899 { 2900 struct hist_field *target_hist_field, *hist_field; 2901 unsigned int n, i, j; 2902 2903 if (hist_data->n_fields - hist_data->n_vals != n_keys) 2904 return false; 2905 2906 i = hist_data->n_vals; 2907 j = target_hist_data->n_vals; 2908 2909 for (n = 0; n < n_keys; n++) { 2910 hist_field = hist_data->fields[i + n]; 2911 target_hist_field = target_hist_data->fields[j + n]; 2912 2913 if (strcmp(hist_field->type, target_hist_field->type) != 0) 2914 return false; 2915 if (hist_field->size != target_hist_field->size) 2916 return false; 2917 if (hist_field->is_signed != target_hist_field->is_signed) 2918 return false; 2919 } 2920 2921 return true; 2922 } 2923 2924 static struct hist_trigger_data * 2925 find_compatible_hist(struct hist_trigger_data *target_hist_data, 2926 struct trace_event_file *file) 2927 { 2928 struct hist_trigger_data *hist_data; 2929 struct event_trigger_data *test; 2930 unsigned int n_keys; 2931 2932 lockdep_assert_held(&event_mutex); 2933 2934 n_keys = target_hist_data->n_fields - target_hist_data->n_vals; 2935 2936 list_for_each_entry(test, &file->triggers, list) { 2937 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 2938 hist_data = test->private_data; 2939 2940 if (compatible_keys(target_hist_data, hist_data, n_keys)) 2941 return hist_data; 2942 } 2943 } 2944 2945 return NULL; 2946 } 2947 2948 static struct trace_event_file *event_file(struct trace_array *tr, 2949 char *system, char *event_name) 2950 { 2951 struct trace_event_file *file; 2952 2953 file = __find_event_file(tr, system, event_name); 2954 if (!file) 2955 return ERR_PTR(-EINVAL); 2956 2957 return file; 2958 } 2959 2960 static struct hist_field * 2961 find_synthetic_field_var(struct hist_trigger_data *target_hist_data, 2962 char *system, char *event_name, char *field_name) 2963 { 2964 struct hist_field *event_var; 2965 char *synthetic_name; 2966 2967 synthetic_name = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL); 2968 if (!synthetic_name) 2969 return ERR_PTR(-ENOMEM); 2970 2971 strcpy(synthetic_name, "synthetic_"); 2972 strcat(synthetic_name, field_name); 2973 2974 event_var = find_event_var(target_hist_data, system, event_name, synthetic_name); 2975 2976 kfree(synthetic_name); 2977 2978 return event_var; 2979 } 2980 2981 /** 2982 * create_field_var_hist - Automatically create a histogram and var for a field 2983 * @target_hist_data: The target hist trigger 2984 * @subsys_name: Optional subsystem name 2985 * @event_name: Optional event name 2986 * @field_name: The name of the field (and the resulting variable) 2987 * 2988 * Hist trigger actions fetch data from variables, not directly from 2989 * events. However, for convenience, users are allowed to directly 2990 * specify an event field in an action, which will be automatically 2991 * converted into a variable on their behalf. 2992 * 2993 * If a user specifies a field on an event that isn't the event the 2994 * histogram currently being defined (the target event histogram), the 2995 * only way that can be accomplished is if a new hist trigger is 2996 * created and the field variable defined on that. 2997 * 2998 * This function creates a new histogram compatible with the target 2999 * event (meaning a histogram with the same key as the target 3000 * histogram), and creates a variable for the specified field, but 3001 * with 'synthetic_' prepended to the variable name in order to avoid 3002 * collision with normal field variables. 3003 * 3004 * Return: The variable created for the field. 3005 */ 3006 static struct hist_field * 3007 create_field_var_hist(struct hist_trigger_data *target_hist_data, 3008 char *subsys_name, char *event_name, char *field_name) 3009 { 3010 struct trace_array *tr = target_hist_data->event_file->tr; 3011 struct hist_trigger_data *hist_data; 3012 unsigned int i, n, first = true; 3013 struct field_var_hist *var_hist; 3014 struct trace_event_file *file; 3015 struct hist_field *key_field; 3016 struct hist_field *event_var; 3017 char *saved_filter; 3018 char *cmd; 3019 int ret; 3020 3021 if (target_hist_data->n_field_var_hists >= SYNTH_FIELDS_MAX) { 3022 hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name)); 3023 return ERR_PTR(-EINVAL); 3024 } 3025 3026 file = event_file(tr, subsys_name, event_name); 3027 3028 if (IS_ERR(file)) { 3029 hist_err(tr, HIST_ERR_EVENT_FILE_NOT_FOUND, errpos(field_name)); 3030 ret = PTR_ERR(file); 3031 return ERR_PTR(ret); 3032 } 3033 3034 /* 3035 * Look for a histogram compatible with target. We'll use the 3036 * found histogram specification to create a new matching 3037 * histogram with our variable on it. target_hist_data is not 3038 * yet a registered histogram so we can't use that. 3039 */ 3040 hist_data = find_compatible_hist(target_hist_data, file); 3041 if (!hist_data) { 3042 hist_err(tr, HIST_ERR_HIST_NOT_FOUND, errpos(field_name)); 3043 return ERR_PTR(-EINVAL); 3044 } 3045 3046 /* See if a synthetic field variable has already been created */ 3047 event_var = find_synthetic_field_var(target_hist_data, subsys_name, 3048 event_name, field_name); 3049 if (!IS_ERR_OR_NULL(event_var)) 3050 return event_var; 3051 3052 var_hist = kzalloc(sizeof(*var_hist), GFP_KERNEL); 3053 if (!var_hist) 3054 return ERR_PTR(-ENOMEM); 3055 3056 cmd = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL); 3057 if (!cmd) { 3058 kfree(var_hist); 3059 return ERR_PTR(-ENOMEM); 3060 } 3061 3062 /* Use the same keys as the compatible histogram */ 3063 strcat(cmd, "keys="); 3064 3065 for_each_hist_key_field(i, hist_data) { 3066 key_field = hist_data->fields[i]; 3067 if (!first) 3068 strcat(cmd, ","); 3069 strcat(cmd, key_field->field->name); 3070 first = false; 3071 } 3072 3073 /* Create the synthetic field variable specification */ 3074 strcat(cmd, ":synthetic_"); 3075 strcat(cmd, field_name); 3076 strcat(cmd, "="); 3077 strcat(cmd, field_name); 3078 3079 /* Use the same filter as the compatible histogram */ 3080 saved_filter = find_trigger_filter(hist_data, file); 3081 if (saved_filter) { 3082 strcat(cmd, " if "); 3083 strcat(cmd, saved_filter); 3084 } 3085 3086 var_hist->cmd = kstrdup(cmd, GFP_KERNEL); 3087 if (!var_hist->cmd) { 3088 kfree(cmd); 3089 kfree(var_hist); 3090 return ERR_PTR(-ENOMEM); 3091 } 3092 3093 /* Save the compatible histogram information */ 3094 var_hist->hist_data = hist_data; 3095 3096 /* Create the new histogram with our variable */ 3097 ret = event_hist_trigger_parse(&trigger_hist_cmd, file, 3098 "", "hist", cmd); 3099 if (ret) { 3100 kfree(cmd); 3101 kfree(var_hist->cmd); 3102 kfree(var_hist); 3103 hist_err(tr, HIST_ERR_HIST_CREATE_FAIL, errpos(field_name)); 3104 return ERR_PTR(ret); 3105 } 3106 3107 kfree(cmd); 3108 3109 /* If we can't find the variable, something went wrong */ 3110 event_var = find_synthetic_field_var(target_hist_data, subsys_name, 3111 event_name, field_name); 3112 if (IS_ERR_OR_NULL(event_var)) { 3113 kfree(var_hist->cmd); 3114 kfree(var_hist); 3115 hist_err(tr, HIST_ERR_SYNTH_VAR_NOT_FOUND, errpos(field_name)); 3116 return ERR_PTR(-EINVAL); 3117 } 3118 3119 n = target_hist_data->n_field_var_hists; 3120 target_hist_data->field_var_hists[n] = var_hist; 3121 target_hist_data->n_field_var_hists++; 3122 3123 return event_var; 3124 } 3125 3126 static struct hist_field * 3127 find_target_event_var(struct hist_trigger_data *hist_data, 3128 char *subsys_name, char *event_name, char *var_name) 3129 { 3130 struct trace_event_file *file = hist_data->event_file; 3131 struct hist_field *hist_field = NULL; 3132 3133 if (subsys_name) { 3134 struct trace_event_call *call; 3135 3136 if (!event_name) 3137 return NULL; 3138 3139 call = file->event_call; 3140 3141 if (strcmp(subsys_name, call->class->system) != 0) 3142 return NULL; 3143 3144 if (strcmp(event_name, trace_event_name(call)) != 0) 3145 return NULL; 3146 } 3147 3148 hist_field = find_var_field(hist_data, var_name); 3149 3150 return hist_field; 3151 } 3152 3153 static inline void __update_field_vars(struct tracing_map_elt *elt, 3154 struct trace_buffer *buffer, 3155 struct ring_buffer_event *rbe, 3156 void *rec, 3157 struct field_var **field_vars, 3158 unsigned int n_field_vars, 3159 unsigned int field_var_str_start) 3160 { 3161 struct hist_elt_data *elt_data = elt->private_data; 3162 unsigned int i, j, var_idx; 3163 u64 var_val; 3164 3165 /* Make sure stacktrace can fit in the string variable length */ 3166 BUILD_BUG_ON((HIST_STACKTRACE_DEPTH + 1) * sizeof(long) > STR_VAR_LEN_MAX); 3167 3168 for (i = 0, j = field_var_str_start; i < n_field_vars; i++) { 3169 struct field_var *field_var = field_vars[i]; 3170 struct hist_field *var = field_var->var; 3171 struct hist_field *val = field_var->val; 3172 3173 var_val = hist_fn_call(val, elt, buffer, rbe, rec); 3174 var_idx = var->var.idx; 3175 3176 if (val->flags & (HIST_FIELD_FL_STRING | 3177 HIST_FIELD_FL_STACKTRACE)) { 3178 char *str = elt_data->field_var_str[j++]; 3179 char *val_str = (char *)(uintptr_t)var_val; 3180 unsigned int size; 3181 3182 if (val->flags & HIST_FIELD_FL_STRING) { 3183 size = min(val->size, STR_VAR_LEN_MAX); 3184 strscpy(str, val_str, size); 3185 } else { 3186 char *stack_start = str + sizeof(unsigned long); 3187 int e; 3188 3189 e = stack_trace_save((void *)stack_start, 3190 HIST_STACKTRACE_DEPTH, 3191 HIST_STACKTRACE_SKIP); 3192 if (e < HIST_STACKTRACE_DEPTH - 1) 3193 ((unsigned long *)stack_start)[e] = 0; 3194 *((unsigned long *)str) = e; 3195 } 3196 var_val = (u64)(uintptr_t)str; 3197 } 3198 tracing_map_set_var(elt, var_idx, var_val); 3199 } 3200 } 3201 3202 static void update_field_vars(struct hist_trigger_data *hist_data, 3203 struct tracing_map_elt *elt, 3204 struct trace_buffer *buffer, 3205 struct ring_buffer_event *rbe, 3206 void *rec) 3207 { 3208 __update_field_vars(elt, buffer, rbe, rec, hist_data->field_vars, 3209 hist_data->n_field_vars, 0); 3210 } 3211 3212 static void save_track_data_vars(struct hist_trigger_data *hist_data, 3213 struct tracing_map_elt *elt, 3214 struct trace_buffer *buffer, void *rec, 3215 struct ring_buffer_event *rbe, void *key, 3216 struct action_data *data, u64 *var_ref_vals) 3217 { 3218 __update_field_vars(elt, buffer, rbe, rec, hist_data->save_vars, 3219 hist_data->n_save_vars, hist_data->n_field_var_str); 3220 } 3221 3222 static struct hist_field *create_var(struct hist_trigger_data *hist_data, 3223 struct trace_event_file *file, 3224 char *name, int size, const char *type) 3225 { 3226 struct hist_field *var; 3227 int idx; 3228 3229 if (find_var(hist_data, file, name) && !hist_data->remove) { 3230 var = ERR_PTR(-EINVAL); 3231 goto out; 3232 } 3233 3234 var = kzalloc(sizeof(struct hist_field), GFP_KERNEL); 3235 if (!var) { 3236 var = ERR_PTR(-ENOMEM); 3237 goto out; 3238 } 3239 3240 idx = tracing_map_add_var(hist_data->map); 3241 if (idx < 0) { 3242 kfree(var); 3243 var = ERR_PTR(-EINVAL); 3244 goto out; 3245 } 3246 3247 var->ref = 1; 3248 var->flags = HIST_FIELD_FL_VAR; 3249 var->var.idx = idx; 3250 var->var.hist_data = var->hist_data = hist_data; 3251 var->size = size; 3252 var->var.name = kstrdup(name, GFP_KERNEL); 3253 var->type = kstrdup_const(type, GFP_KERNEL); 3254 if (!var->var.name || !var->type) { 3255 kfree_const(var->type); 3256 kfree(var->var.name); 3257 kfree(var); 3258 var = ERR_PTR(-ENOMEM); 3259 } 3260 out: 3261 return var; 3262 } 3263 3264 static struct field_var *create_field_var(struct hist_trigger_data *hist_data, 3265 struct trace_event_file *file, 3266 char *field_name) 3267 { 3268 struct hist_field *val = NULL, *var = NULL; 3269 unsigned long flags = HIST_FIELD_FL_VAR; 3270 struct trace_array *tr = file->tr; 3271 struct field_var *field_var; 3272 int ret = 0; 3273 3274 if (hist_data->n_field_vars >= SYNTH_FIELDS_MAX) { 3275 hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name)); 3276 ret = -EINVAL; 3277 goto err; 3278 } 3279 3280 val = parse_atom(hist_data, file, field_name, &flags, NULL); 3281 if (IS_ERR(val)) { 3282 hist_err(tr, HIST_ERR_FIELD_VAR_PARSE_FAIL, errpos(field_name)); 3283 ret = PTR_ERR(val); 3284 goto err; 3285 } 3286 3287 var = create_var(hist_data, file, field_name, val->size, val->type); 3288 if (IS_ERR(var)) { 3289 hist_err(tr, HIST_ERR_VAR_CREATE_FIND_FAIL, errpos(field_name)); 3290 destroy_hist_field(val, 0); 3291 ret = PTR_ERR(var); 3292 goto err; 3293 } 3294 3295 field_var = kzalloc(sizeof(struct field_var), GFP_KERNEL); 3296 if (!field_var) { 3297 destroy_hist_field(val, 0); 3298 kfree_const(var->type); 3299 kfree(var->var.name); 3300 kfree(var); 3301 ret = -ENOMEM; 3302 goto err; 3303 } 3304 3305 field_var->var = var; 3306 field_var->val = val; 3307 out: 3308 return field_var; 3309 err: 3310 field_var = ERR_PTR(ret); 3311 goto out; 3312 } 3313 3314 /** 3315 * create_target_field_var - Automatically create a variable for a field 3316 * @target_hist_data: The target hist trigger 3317 * @subsys_name: Optional subsystem name 3318 * @event_name: Optional event name 3319 * @var_name: The name of the field (and the resulting variable) 3320 * 3321 * Hist trigger actions fetch data from variables, not directly from 3322 * events. However, for convenience, users are allowed to directly 3323 * specify an event field in an action, which will be automatically 3324 * converted into a variable on their behalf. 3325 * 3326 * This function creates a field variable with the name var_name on 3327 * the hist trigger currently being defined on the target event. If 3328 * subsys_name and event_name are specified, this function simply 3329 * verifies that they do in fact match the target event subsystem and 3330 * event name. 3331 * 3332 * Return: The variable created for the field. 3333 */ 3334 static struct field_var * 3335 create_target_field_var(struct hist_trigger_data *target_hist_data, 3336 char *subsys_name, char *event_name, char *var_name) 3337 { 3338 struct trace_event_file *file = target_hist_data->event_file; 3339 3340 if (subsys_name) { 3341 struct trace_event_call *call; 3342 3343 if (!event_name) 3344 return NULL; 3345 3346 call = file->event_call; 3347 3348 if (strcmp(subsys_name, call->class->system) != 0) 3349 return NULL; 3350 3351 if (strcmp(event_name, trace_event_name(call)) != 0) 3352 return NULL; 3353 } 3354 3355 return create_field_var(target_hist_data, file, var_name); 3356 } 3357 3358 static bool check_track_val_max(u64 track_val, u64 var_val) 3359 { 3360 if (var_val <= track_val) 3361 return false; 3362 3363 return true; 3364 } 3365 3366 static bool check_track_val_changed(u64 track_val, u64 var_val) 3367 { 3368 if (var_val == track_val) 3369 return false; 3370 3371 return true; 3372 } 3373 3374 static u64 get_track_val(struct hist_trigger_data *hist_data, 3375 struct tracing_map_elt *elt, 3376 struct action_data *data) 3377 { 3378 unsigned int track_var_idx = data->track_data.track_var->var.idx; 3379 u64 track_val; 3380 3381 track_val = tracing_map_read_var(elt, track_var_idx); 3382 3383 return track_val; 3384 } 3385 3386 static void save_track_val(struct hist_trigger_data *hist_data, 3387 struct tracing_map_elt *elt, 3388 struct action_data *data, u64 var_val) 3389 { 3390 unsigned int track_var_idx = data->track_data.track_var->var.idx; 3391 3392 tracing_map_set_var(elt, track_var_idx, var_val); 3393 } 3394 3395 static void save_track_data(struct hist_trigger_data *hist_data, 3396 struct tracing_map_elt *elt, 3397 struct trace_buffer *buffer, void *rec, 3398 struct ring_buffer_event *rbe, void *key, 3399 struct action_data *data, u64 *var_ref_vals) 3400 { 3401 if (data->track_data.save_data) 3402 data->track_data.save_data(hist_data, elt, buffer, rec, rbe, 3403 key, data, var_ref_vals); 3404 } 3405 3406 static bool check_track_val(struct tracing_map_elt *elt, 3407 struct action_data *data, 3408 u64 var_val) 3409 { 3410 struct hist_trigger_data *hist_data; 3411 u64 track_val; 3412 3413 hist_data = data->track_data.track_var->hist_data; 3414 track_val = get_track_val(hist_data, elt, data); 3415 3416 return data->track_data.check_val(track_val, var_val); 3417 } 3418 3419 #ifdef CONFIG_TRACER_SNAPSHOT 3420 static bool cond_snapshot_update(struct trace_array *tr, void *cond_data) 3421 { 3422 /* called with tr->max_lock held */ 3423 struct track_data *track_data = tr->cond_snapshot->cond_data; 3424 struct hist_elt_data *elt_data, *track_elt_data; 3425 struct snapshot_context *context = cond_data; 3426 struct action_data *action; 3427 u64 track_val; 3428 3429 if (!track_data) 3430 return false; 3431 3432 action = track_data->action_data; 3433 3434 track_val = get_track_val(track_data->hist_data, context->elt, 3435 track_data->action_data); 3436 3437 if (!action->track_data.check_val(track_data->track_val, track_val)) 3438 return false; 3439 3440 track_data->track_val = track_val; 3441 memcpy(track_data->key, context->key, track_data->key_len); 3442 3443 elt_data = context->elt->private_data; 3444 track_elt_data = track_data->elt.private_data; 3445 if (elt_data->comm) 3446 strscpy(track_elt_data->comm, elt_data->comm, TASK_COMM_LEN); 3447 3448 track_data->updated = true; 3449 3450 return true; 3451 } 3452 3453 static void save_track_data_snapshot(struct hist_trigger_data *hist_data, 3454 struct tracing_map_elt *elt, 3455 struct trace_buffer *buffer, void *rec, 3456 struct ring_buffer_event *rbe, void *key, 3457 struct action_data *data, 3458 u64 *var_ref_vals) 3459 { 3460 struct trace_event_file *file = hist_data->event_file; 3461 struct snapshot_context context; 3462 3463 context.elt = elt; 3464 context.key = key; 3465 3466 tracing_snapshot_cond(file->tr, &context); 3467 } 3468 3469 static void hist_trigger_print_key(struct seq_file *m, 3470 struct hist_trigger_data *hist_data, 3471 void *key, 3472 struct tracing_map_elt *elt); 3473 3474 static struct action_data *snapshot_action(struct hist_trigger_data *hist_data) 3475 { 3476 unsigned int i; 3477 3478 if (!hist_data->n_actions) 3479 return NULL; 3480 3481 for (i = 0; i < hist_data->n_actions; i++) { 3482 struct action_data *data = hist_data->actions[i]; 3483 3484 if (data->action == ACTION_SNAPSHOT) 3485 return data; 3486 } 3487 3488 return NULL; 3489 } 3490 3491 static void track_data_snapshot_print(struct seq_file *m, 3492 struct hist_trigger_data *hist_data) 3493 { 3494 struct trace_event_file *file = hist_data->event_file; 3495 struct track_data *track_data; 3496 struct action_data *action; 3497 3498 track_data = tracing_cond_snapshot_data(file->tr); 3499 if (!track_data) 3500 return; 3501 3502 if (!track_data->updated) 3503 return; 3504 3505 action = snapshot_action(hist_data); 3506 if (!action) 3507 return; 3508 3509 seq_puts(m, "\nSnapshot taken (see tracing/snapshot). Details:\n"); 3510 seq_printf(m, "\ttriggering value { %s(%s) }: %10llu", 3511 action->handler == HANDLER_ONMAX ? "onmax" : "onchange", 3512 action->track_data.var_str, track_data->track_val); 3513 3514 seq_puts(m, "\ttriggered by event with key: "); 3515 hist_trigger_print_key(m, hist_data, track_data->key, &track_data->elt); 3516 seq_putc(m, '\n'); 3517 } 3518 #else 3519 static bool cond_snapshot_update(struct trace_array *tr, void *cond_data) 3520 { 3521 return false; 3522 } 3523 static void save_track_data_snapshot(struct hist_trigger_data *hist_data, 3524 struct tracing_map_elt *elt, 3525 struct trace_buffer *buffer, void *rec, 3526 struct ring_buffer_event *rbe, void *key, 3527 struct action_data *data, 3528 u64 *var_ref_vals) {} 3529 static void track_data_snapshot_print(struct seq_file *m, 3530 struct hist_trigger_data *hist_data) {} 3531 #endif /* CONFIG_TRACER_SNAPSHOT */ 3532 3533 static void track_data_print(struct seq_file *m, 3534 struct hist_trigger_data *hist_data, 3535 struct tracing_map_elt *elt, 3536 struct action_data *data) 3537 { 3538 u64 track_val = get_track_val(hist_data, elt, data); 3539 unsigned int i, save_var_idx; 3540 3541 if (data->handler == HANDLER_ONMAX) 3542 seq_printf(m, "\n\tmax: %10llu", track_val); 3543 else if (data->handler == HANDLER_ONCHANGE) 3544 seq_printf(m, "\n\tchanged: %10llu", track_val); 3545 3546 if (data->action == ACTION_SNAPSHOT) 3547 return; 3548 3549 for (i = 0; i < hist_data->n_save_vars; i++) { 3550 struct hist_field *save_val = hist_data->save_vars[i]->val; 3551 struct hist_field *save_var = hist_data->save_vars[i]->var; 3552 u64 val; 3553 3554 save_var_idx = save_var->var.idx; 3555 3556 val = tracing_map_read_var(elt, save_var_idx); 3557 3558 if (save_val->flags & HIST_FIELD_FL_STRING) { 3559 seq_printf(m, " %s: %-32s", save_var->var.name, 3560 (char *)(uintptr_t)(val)); 3561 } else 3562 seq_printf(m, " %s: %10llu", save_var->var.name, val); 3563 } 3564 } 3565 3566 static void ontrack_action(struct hist_trigger_data *hist_data, 3567 struct tracing_map_elt *elt, 3568 struct trace_buffer *buffer, void *rec, 3569 struct ring_buffer_event *rbe, void *key, 3570 struct action_data *data, u64 *var_ref_vals) 3571 { 3572 u64 var_val = var_ref_vals[data->track_data.var_ref->var_ref_idx]; 3573 3574 if (check_track_val(elt, data, var_val)) { 3575 save_track_val(hist_data, elt, data, var_val); 3576 save_track_data(hist_data, elt, buffer, rec, rbe, 3577 key, data, var_ref_vals); 3578 } 3579 } 3580 3581 static void action_data_destroy(struct action_data *data) 3582 { 3583 unsigned int i; 3584 3585 lockdep_assert_held(&event_mutex); 3586 3587 kfree(data->action_name); 3588 3589 for (i = 0; i < data->n_params; i++) 3590 kfree(data->params[i]); 3591 3592 if (data->synth_event) 3593 data->synth_event->ref--; 3594 3595 kfree(data->synth_event_name); 3596 3597 kfree(data); 3598 } 3599 3600 static void track_data_destroy(struct hist_trigger_data *hist_data, 3601 struct action_data *data) 3602 { 3603 struct trace_event_file *file = hist_data->event_file; 3604 3605 destroy_hist_field(data->track_data.track_var, 0); 3606 3607 if (data->action == ACTION_SNAPSHOT) { 3608 struct track_data *track_data; 3609 3610 track_data = tracing_cond_snapshot_data(file->tr); 3611 if (track_data && track_data->hist_data == hist_data) { 3612 tracing_snapshot_cond_disable(file->tr); 3613 track_data_free(track_data); 3614 } 3615 } 3616 3617 kfree(data->track_data.var_str); 3618 3619 action_data_destroy(data); 3620 } 3621 3622 static int action_create(struct hist_trigger_data *hist_data, 3623 struct action_data *data); 3624 3625 static int track_data_create(struct hist_trigger_data *hist_data, 3626 struct action_data *data) 3627 { 3628 struct hist_field *var_field, *ref_field, *track_var = NULL; 3629 struct trace_event_file *file = hist_data->event_file; 3630 struct trace_array *tr = file->tr; 3631 char *track_data_var_str; 3632 int ret = 0; 3633 3634 track_data_var_str = data->track_data.var_str; 3635 if (track_data_var_str[0] != '$') { 3636 hist_err(tr, HIST_ERR_ONX_NOT_VAR, errpos(track_data_var_str)); 3637 return -EINVAL; 3638 } 3639 track_data_var_str++; 3640 3641 var_field = find_target_event_var(hist_data, NULL, NULL, track_data_var_str); 3642 if (!var_field) { 3643 hist_err(tr, HIST_ERR_ONX_VAR_NOT_FOUND, errpos(track_data_var_str)); 3644 return -EINVAL; 3645 } 3646 3647 ref_field = create_var_ref(hist_data, var_field, NULL, NULL); 3648 if (!ref_field) 3649 return -ENOMEM; 3650 3651 data->track_data.var_ref = ref_field; 3652 3653 if (data->handler == HANDLER_ONMAX) 3654 track_var = create_var(hist_data, file, "__max", sizeof(u64), "u64"); 3655 if (IS_ERR(track_var)) { 3656 hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0); 3657 ret = PTR_ERR(track_var); 3658 goto out; 3659 } 3660 3661 if (data->handler == HANDLER_ONCHANGE) 3662 track_var = create_var(hist_data, file, "__change", sizeof(u64), "u64"); 3663 if (IS_ERR(track_var)) { 3664 hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0); 3665 ret = PTR_ERR(track_var); 3666 goto out; 3667 } 3668 data->track_data.track_var = track_var; 3669 3670 ret = action_create(hist_data, data); 3671 out: 3672 return ret; 3673 } 3674 3675 static int parse_action_params(struct trace_array *tr, char *params, 3676 struct action_data *data) 3677 { 3678 char *param, *saved_param; 3679 bool first_param = true; 3680 int ret = 0; 3681 3682 while (params) { 3683 if (data->n_params >= SYNTH_FIELDS_MAX) { 3684 hist_err(tr, HIST_ERR_TOO_MANY_PARAMS, 0); 3685 ret = -EINVAL; 3686 goto out; 3687 } 3688 3689 param = strsep(¶ms, ","); 3690 if (!param) { 3691 hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, 0); 3692 ret = -EINVAL; 3693 goto out; 3694 } 3695 3696 param = strstrip(param); 3697 if (strlen(param) < 2) { 3698 hist_err(tr, HIST_ERR_INVALID_PARAM, errpos(param)); 3699 ret = -EINVAL; 3700 goto out; 3701 } 3702 3703 saved_param = kstrdup(param, GFP_KERNEL); 3704 if (!saved_param) { 3705 ret = -ENOMEM; 3706 goto out; 3707 } 3708 3709 if (first_param && data->use_trace_keyword) { 3710 data->synth_event_name = saved_param; 3711 first_param = false; 3712 continue; 3713 } 3714 first_param = false; 3715 3716 data->params[data->n_params++] = saved_param; 3717 } 3718 out: 3719 return ret; 3720 } 3721 3722 static int action_parse(struct trace_array *tr, char *str, struct action_data *data, 3723 enum handler_id handler) 3724 { 3725 char *action_name; 3726 int ret = 0; 3727 3728 strsep(&str, "."); 3729 if (!str) { 3730 hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0); 3731 ret = -EINVAL; 3732 goto out; 3733 } 3734 3735 action_name = strsep(&str, "("); 3736 if (!action_name || !str) { 3737 hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0); 3738 ret = -EINVAL; 3739 goto out; 3740 } 3741 3742 if (str_has_prefix(action_name, "save")) { 3743 char *params = strsep(&str, ")"); 3744 3745 if (!params) { 3746 hist_err(tr, HIST_ERR_NO_SAVE_PARAMS, 0); 3747 ret = -EINVAL; 3748 goto out; 3749 } 3750 3751 ret = parse_action_params(tr, params, data); 3752 if (ret) 3753 goto out; 3754 3755 if (handler == HANDLER_ONMAX) 3756 data->track_data.check_val = check_track_val_max; 3757 else if (handler == HANDLER_ONCHANGE) 3758 data->track_data.check_val = check_track_val_changed; 3759 else { 3760 hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name)); 3761 ret = -EINVAL; 3762 goto out; 3763 } 3764 3765 data->track_data.save_data = save_track_data_vars; 3766 data->fn = ontrack_action; 3767 data->action = ACTION_SAVE; 3768 } else if (str_has_prefix(action_name, "snapshot")) { 3769 char *params = strsep(&str, ")"); 3770 3771 if (!str) { 3772 hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(params)); 3773 ret = -EINVAL; 3774 goto out; 3775 } 3776 3777 if (handler == HANDLER_ONMAX) 3778 data->track_data.check_val = check_track_val_max; 3779 else if (handler == HANDLER_ONCHANGE) 3780 data->track_data.check_val = check_track_val_changed; 3781 else { 3782 hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name)); 3783 ret = -EINVAL; 3784 goto out; 3785 } 3786 3787 data->track_data.save_data = save_track_data_snapshot; 3788 data->fn = ontrack_action; 3789 data->action = ACTION_SNAPSHOT; 3790 } else { 3791 char *params = strsep(&str, ")"); 3792 3793 if (str_has_prefix(action_name, "trace")) 3794 data->use_trace_keyword = true; 3795 3796 if (params) { 3797 ret = parse_action_params(tr, params, data); 3798 if (ret) 3799 goto out; 3800 } 3801 3802 if (handler == HANDLER_ONMAX) 3803 data->track_data.check_val = check_track_val_max; 3804 else if (handler == HANDLER_ONCHANGE) 3805 data->track_data.check_val = check_track_val_changed; 3806 3807 if (handler != HANDLER_ONMATCH) { 3808 data->track_data.save_data = action_trace; 3809 data->fn = ontrack_action; 3810 } else 3811 data->fn = action_trace; 3812 3813 data->action = ACTION_TRACE; 3814 } 3815 3816 data->action_name = kstrdup(action_name, GFP_KERNEL); 3817 if (!data->action_name) { 3818 ret = -ENOMEM; 3819 goto out; 3820 } 3821 3822 data->handler = handler; 3823 out: 3824 return ret; 3825 } 3826 3827 static struct action_data *track_data_parse(struct hist_trigger_data *hist_data, 3828 char *str, enum handler_id handler) 3829 { 3830 struct action_data *data; 3831 int ret = -EINVAL; 3832 char *var_str; 3833 3834 data = kzalloc(sizeof(*data), GFP_KERNEL); 3835 if (!data) 3836 return ERR_PTR(-ENOMEM); 3837 3838 var_str = strsep(&str, ")"); 3839 if (!var_str || !str) { 3840 ret = -EINVAL; 3841 goto free; 3842 } 3843 3844 data->track_data.var_str = kstrdup(var_str, GFP_KERNEL); 3845 if (!data->track_data.var_str) { 3846 ret = -ENOMEM; 3847 goto free; 3848 } 3849 3850 ret = action_parse(hist_data->event_file->tr, str, data, handler); 3851 if (ret) 3852 goto free; 3853 out: 3854 return data; 3855 free: 3856 track_data_destroy(hist_data, data); 3857 data = ERR_PTR(ret); 3858 goto out; 3859 } 3860 3861 static void onmatch_destroy(struct action_data *data) 3862 { 3863 kfree(data->match_data.event); 3864 kfree(data->match_data.event_system); 3865 3866 action_data_destroy(data); 3867 } 3868 3869 static void destroy_field_var(struct field_var *field_var) 3870 { 3871 if (!field_var) 3872 return; 3873 3874 destroy_hist_field(field_var->var, 0); 3875 destroy_hist_field(field_var->val, 0); 3876 3877 kfree(field_var); 3878 } 3879 3880 static void destroy_field_vars(struct hist_trigger_data *hist_data) 3881 { 3882 unsigned int i; 3883 3884 for (i = 0; i < hist_data->n_field_vars; i++) 3885 destroy_field_var(hist_data->field_vars[i]); 3886 3887 for (i = 0; i < hist_data->n_save_vars; i++) 3888 destroy_field_var(hist_data->save_vars[i]); 3889 } 3890 3891 static void save_field_var(struct hist_trigger_data *hist_data, 3892 struct field_var *field_var) 3893 { 3894 hist_data->field_vars[hist_data->n_field_vars++] = field_var; 3895 3896 /* Stack traces are saved in the string storage too */ 3897 if (field_var->val->flags & (HIST_FIELD_FL_STRING | HIST_FIELD_FL_STACKTRACE)) 3898 hist_data->n_field_var_str++; 3899 } 3900 3901 3902 static int check_synth_field(struct synth_event *event, 3903 struct hist_field *hist_field, 3904 unsigned int field_pos) 3905 { 3906 struct synth_field *field; 3907 3908 if (field_pos >= event->n_fields) 3909 return -EINVAL; 3910 3911 field = event->fields[field_pos]; 3912 3913 /* 3914 * A dynamic string synth field can accept static or 3915 * dynamic. A static string synth field can only accept a 3916 * same-sized static string, which is checked for later. 3917 */ 3918 if (strstr(hist_field->type, "char[") && field->is_string 3919 && field->is_dynamic) 3920 return 0; 3921 3922 if (strstr(hist_field->type, "long[") && field->is_stack) 3923 return 0; 3924 3925 if (strcmp(field->type, hist_field->type) != 0) { 3926 if (field->size != hist_field->size || 3927 (!field->is_string && field->is_signed != hist_field->is_signed)) 3928 return -EINVAL; 3929 } 3930 3931 return 0; 3932 } 3933 3934 static struct hist_field * 3935 trace_action_find_var(struct hist_trigger_data *hist_data, 3936 struct action_data *data, 3937 char *system, char *event, char *var) 3938 { 3939 struct trace_array *tr = hist_data->event_file->tr; 3940 struct hist_field *hist_field; 3941 3942 var++; /* skip '$' */ 3943 3944 hist_field = find_target_event_var(hist_data, system, event, var); 3945 if (!hist_field) { 3946 if (!system && data->handler == HANDLER_ONMATCH) { 3947 system = data->match_data.event_system; 3948 event = data->match_data.event; 3949 } 3950 3951 hist_field = find_event_var(hist_data, system, event, var); 3952 } 3953 3954 if (!hist_field) 3955 hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, errpos(var)); 3956 3957 return hist_field; 3958 } 3959 3960 static struct hist_field * 3961 trace_action_create_field_var(struct hist_trigger_data *hist_data, 3962 struct action_data *data, char *system, 3963 char *event, char *var) 3964 { 3965 struct hist_field *hist_field = NULL; 3966 struct field_var *field_var; 3967 3968 /* 3969 * First try to create a field var on the target event (the 3970 * currently being defined). This will create a variable for 3971 * unqualified fields on the target event, or if qualified, 3972 * target fields that have qualified names matching the target. 3973 */ 3974 field_var = create_target_field_var(hist_data, system, event, var); 3975 3976 if (field_var && !IS_ERR(field_var)) { 3977 save_field_var(hist_data, field_var); 3978 hist_field = field_var->var; 3979 } else { 3980 field_var = NULL; 3981 /* 3982 * If no explicit system.event is specified, default to 3983 * looking for fields on the onmatch(system.event.xxx) 3984 * event. 3985 */ 3986 if (!system && data->handler == HANDLER_ONMATCH) { 3987 system = data->match_data.event_system; 3988 event = data->match_data.event; 3989 } 3990 3991 if (!event) 3992 goto free; 3993 /* 3994 * At this point, we're looking at a field on another 3995 * event. Because we can't modify a hist trigger on 3996 * another event to add a variable for a field, we need 3997 * to create a new trigger on that event and create the 3998 * variable at the same time. 3999 */ 4000 hist_field = create_field_var_hist(hist_data, system, event, var); 4001 if (IS_ERR(hist_field)) 4002 goto free; 4003 } 4004 out: 4005 return hist_field; 4006 free: 4007 destroy_field_var(field_var); 4008 hist_field = NULL; 4009 goto out; 4010 } 4011 4012 static int trace_action_create(struct hist_trigger_data *hist_data, 4013 struct action_data *data) 4014 { 4015 struct trace_array *tr = hist_data->event_file->tr; 4016 char *event_name, *param, *system = NULL; 4017 struct hist_field *hist_field, *var_ref; 4018 unsigned int i; 4019 unsigned int field_pos = 0; 4020 struct synth_event *event; 4021 char *synth_event_name; 4022 int var_ref_idx, ret = 0; 4023 4024 lockdep_assert_held(&event_mutex); 4025 4026 /* Sanity check to avoid out-of-bound write on 'data->var_ref_idx' */ 4027 if (data->n_params > SYNTH_FIELDS_MAX) 4028 return -EINVAL; 4029 4030 if (data->use_trace_keyword) 4031 synth_event_name = data->synth_event_name; 4032 else 4033 synth_event_name = data->action_name; 4034 4035 event = find_synth_event(synth_event_name); 4036 if (!event) { 4037 hist_err(tr, HIST_ERR_SYNTH_EVENT_NOT_FOUND, errpos(synth_event_name)); 4038 return -EINVAL; 4039 } 4040 4041 event->ref++; 4042 4043 for (i = 0; i < data->n_params; i++) { 4044 char *p; 4045 4046 p = param = kstrdup(data->params[i], GFP_KERNEL); 4047 if (!param) { 4048 ret = -ENOMEM; 4049 goto err; 4050 } 4051 4052 system = strsep(¶m, "."); 4053 if (!param) { 4054 param = (char *)system; 4055 system = event_name = NULL; 4056 } else { 4057 event_name = strsep(¶m, "."); 4058 if (!param) { 4059 kfree(p); 4060 ret = -EINVAL; 4061 goto err; 4062 } 4063 } 4064 4065 if (param[0] == '$') 4066 hist_field = trace_action_find_var(hist_data, data, 4067 system, event_name, 4068 param); 4069 else 4070 hist_field = trace_action_create_field_var(hist_data, 4071 data, 4072 system, 4073 event_name, 4074 param); 4075 4076 if (!hist_field) { 4077 kfree(p); 4078 ret = -EINVAL; 4079 goto err; 4080 } 4081 4082 if (check_synth_field(event, hist_field, field_pos) == 0) { 4083 var_ref = create_var_ref(hist_data, hist_field, 4084 system, event_name); 4085 if (!var_ref) { 4086 kfree(p); 4087 ret = -ENOMEM; 4088 goto err; 4089 } 4090 4091 var_ref_idx = find_var_ref_idx(hist_data, var_ref); 4092 if (WARN_ON(var_ref_idx < 0)) { 4093 kfree(p); 4094 ret = var_ref_idx; 4095 goto err; 4096 } 4097 4098 data->var_ref_idx[i] = var_ref_idx; 4099 4100 field_pos++; 4101 kfree(p); 4102 continue; 4103 } 4104 4105 hist_err(tr, HIST_ERR_SYNTH_TYPE_MISMATCH, errpos(param)); 4106 kfree(p); 4107 ret = -EINVAL; 4108 goto err; 4109 } 4110 4111 if (field_pos != event->n_fields) { 4112 hist_err(tr, HIST_ERR_SYNTH_COUNT_MISMATCH, errpos(event->name)); 4113 ret = -EINVAL; 4114 goto err; 4115 } 4116 4117 data->synth_event = event; 4118 out: 4119 return ret; 4120 err: 4121 event->ref--; 4122 4123 goto out; 4124 } 4125 4126 static int action_create(struct hist_trigger_data *hist_data, 4127 struct action_data *data) 4128 { 4129 struct trace_event_file *file = hist_data->event_file; 4130 struct trace_array *tr = file->tr; 4131 struct track_data *track_data; 4132 struct field_var *field_var; 4133 unsigned int i; 4134 char *param; 4135 int ret = 0; 4136 4137 if (data->action == ACTION_TRACE) 4138 return trace_action_create(hist_data, data); 4139 4140 if (data->action == ACTION_SNAPSHOT) { 4141 track_data = track_data_alloc(hist_data->key_size, data, hist_data); 4142 if (IS_ERR(track_data)) { 4143 ret = PTR_ERR(track_data); 4144 goto out; 4145 } 4146 4147 ret = tracing_snapshot_cond_enable(file->tr, track_data, 4148 cond_snapshot_update); 4149 if (ret) 4150 track_data_free(track_data); 4151 4152 goto out; 4153 } 4154 4155 if (data->action == ACTION_SAVE) { 4156 if (hist_data->n_save_vars) { 4157 ret = -EEXIST; 4158 hist_err(tr, HIST_ERR_TOO_MANY_SAVE_ACTIONS, 0); 4159 goto out; 4160 } 4161 4162 for (i = 0; i < data->n_params; i++) { 4163 param = kstrdup(data->params[i], GFP_KERNEL); 4164 if (!param) { 4165 ret = -ENOMEM; 4166 goto out; 4167 } 4168 4169 field_var = create_target_field_var(hist_data, NULL, NULL, param); 4170 if (IS_ERR(field_var)) { 4171 hist_err(tr, HIST_ERR_FIELD_VAR_CREATE_FAIL, 4172 errpos(param)); 4173 ret = PTR_ERR(field_var); 4174 kfree(param); 4175 goto out; 4176 } 4177 4178 hist_data->save_vars[hist_data->n_save_vars++] = field_var; 4179 if (field_var->val->flags & 4180 (HIST_FIELD_FL_STRING | HIST_FIELD_FL_STACKTRACE)) 4181 hist_data->n_save_var_str++; 4182 kfree(param); 4183 } 4184 } 4185 out: 4186 return ret; 4187 } 4188 4189 static int onmatch_create(struct hist_trigger_data *hist_data, 4190 struct action_data *data) 4191 { 4192 return action_create(hist_data, data); 4193 } 4194 4195 static struct action_data *onmatch_parse(struct trace_array *tr, char *str) 4196 { 4197 char *match_event, *match_event_system; 4198 struct action_data *data; 4199 int ret = -EINVAL; 4200 4201 data = kzalloc(sizeof(*data), GFP_KERNEL); 4202 if (!data) 4203 return ERR_PTR(-ENOMEM); 4204 4205 match_event = strsep(&str, ")"); 4206 if (!match_event || !str) { 4207 hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(match_event)); 4208 goto free; 4209 } 4210 4211 match_event_system = strsep(&match_event, "."); 4212 if (!match_event) { 4213 hist_err(tr, HIST_ERR_SUBSYS_NOT_FOUND, errpos(match_event_system)); 4214 goto free; 4215 } 4216 4217 if (IS_ERR(event_file(tr, match_event_system, match_event))) { 4218 hist_err(tr, HIST_ERR_INVALID_SUBSYS_EVENT, errpos(match_event)); 4219 goto free; 4220 } 4221 4222 data->match_data.event = kstrdup(match_event, GFP_KERNEL); 4223 if (!data->match_data.event) { 4224 ret = -ENOMEM; 4225 goto free; 4226 } 4227 4228 data->match_data.event_system = kstrdup(match_event_system, GFP_KERNEL); 4229 if (!data->match_data.event_system) { 4230 ret = -ENOMEM; 4231 goto free; 4232 } 4233 4234 ret = action_parse(tr, str, data, HANDLER_ONMATCH); 4235 if (ret) 4236 goto free; 4237 out: 4238 return data; 4239 free: 4240 onmatch_destroy(data); 4241 data = ERR_PTR(ret); 4242 goto out; 4243 } 4244 4245 static int create_hitcount_val(struct hist_trigger_data *hist_data) 4246 { 4247 hist_data->fields[HITCOUNT_IDX] = 4248 create_hist_field(hist_data, NULL, HIST_FIELD_FL_HITCOUNT, NULL); 4249 if (!hist_data->fields[HITCOUNT_IDX]) 4250 return -ENOMEM; 4251 4252 hist_data->n_vals++; 4253 hist_data->n_fields++; 4254 4255 if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX)) 4256 return -EINVAL; 4257 4258 return 0; 4259 } 4260 4261 static int __create_val_field(struct hist_trigger_data *hist_data, 4262 unsigned int val_idx, 4263 struct trace_event_file *file, 4264 char *var_name, char *field_str, 4265 unsigned long flags) 4266 { 4267 struct hist_field *hist_field; 4268 int ret = 0, n_subexprs = 0; 4269 4270 hist_field = parse_expr(hist_data, file, field_str, flags, var_name, &n_subexprs); 4271 if (IS_ERR(hist_field)) { 4272 ret = PTR_ERR(hist_field); 4273 goto out; 4274 } 4275 4276 /* values and variables should not have some modifiers */ 4277 if (hist_field->flags & HIST_FIELD_FL_VAR) { 4278 /* Variable */ 4279 if (hist_field->flags & (HIST_FIELD_FL_GRAPH | HIST_FIELD_FL_PERCENT | 4280 HIST_FIELD_FL_BUCKET | HIST_FIELD_FL_LOG2)) 4281 goto err; 4282 } else { 4283 /* Value */ 4284 if (hist_field->flags & (HIST_FIELD_FL_GRAPH | HIST_FIELD_FL_PERCENT | 4285 HIST_FIELD_FL_BUCKET | HIST_FIELD_FL_LOG2 | 4286 HIST_FIELD_FL_SYM | HIST_FIELD_FL_SYM_OFFSET | 4287 HIST_FIELD_FL_SYSCALL | HIST_FIELD_FL_STACKTRACE)) 4288 goto err; 4289 } 4290 4291 hist_data->fields[val_idx] = hist_field; 4292 4293 ++hist_data->n_vals; 4294 ++hist_data->n_fields; 4295 4296 if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX)) 4297 ret = -EINVAL; 4298 out: 4299 return ret; 4300 err: 4301 hist_err(file->tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(field_str)); 4302 return -EINVAL; 4303 } 4304 4305 static int create_val_field(struct hist_trigger_data *hist_data, 4306 unsigned int val_idx, 4307 struct trace_event_file *file, 4308 char *field_str) 4309 { 4310 if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX)) 4311 return -EINVAL; 4312 4313 return __create_val_field(hist_data, val_idx, file, NULL, field_str, 0); 4314 } 4315 4316 static const char no_comm[] = "(no comm)"; 4317 4318 static u64 hist_field_execname(struct hist_field *hist_field, 4319 struct tracing_map_elt *elt, 4320 struct trace_buffer *buffer, 4321 struct ring_buffer_event *rbe, 4322 void *event) 4323 { 4324 struct hist_elt_data *elt_data; 4325 4326 if (WARN_ON_ONCE(!elt)) 4327 return (u64)(unsigned long)no_comm; 4328 4329 elt_data = elt->private_data; 4330 4331 if (WARN_ON_ONCE(!elt_data->comm)) 4332 return (u64)(unsigned long)no_comm; 4333 4334 return (u64)(unsigned long)(elt_data->comm); 4335 } 4336 4337 static u64 hist_field_stack(struct hist_field *hist_field, 4338 struct tracing_map_elt *elt, 4339 struct trace_buffer *buffer, 4340 struct ring_buffer_event *rbe, 4341 void *event) 4342 { 4343 u32 str_item = *(u32 *)(event + hist_field->field->offset); 4344 int str_loc = str_item & 0xffff; 4345 char *addr = (char *)(event + str_loc); 4346 4347 return (u64)(unsigned long)addr; 4348 } 4349 4350 static u64 hist_fn_call(struct hist_field *hist_field, 4351 struct tracing_map_elt *elt, 4352 struct trace_buffer *buffer, 4353 struct ring_buffer_event *rbe, 4354 void *event) 4355 { 4356 switch (hist_field->fn_num) { 4357 case HIST_FIELD_FN_VAR_REF: 4358 return hist_field_var_ref(hist_field, elt, buffer, rbe, event); 4359 case HIST_FIELD_FN_COUNTER: 4360 return hist_field_counter(hist_field, elt, buffer, rbe, event); 4361 case HIST_FIELD_FN_CONST: 4362 return hist_field_const(hist_field, elt, buffer, rbe, event); 4363 case HIST_FIELD_FN_LOG2: 4364 return hist_field_log2(hist_field, elt, buffer, rbe, event); 4365 case HIST_FIELD_FN_BUCKET: 4366 return hist_field_bucket(hist_field, elt, buffer, rbe, event); 4367 case HIST_FIELD_FN_TIMESTAMP: 4368 return hist_field_timestamp(hist_field, elt, buffer, rbe, event); 4369 case HIST_FIELD_FN_CPU: 4370 return hist_field_cpu(hist_field, elt, buffer, rbe, event); 4371 case HIST_FIELD_FN_COMM: 4372 return hist_field_comm(hist_field, elt, buffer, rbe, event); 4373 case HIST_FIELD_FN_STRING: 4374 return hist_field_string(hist_field, elt, buffer, rbe, event); 4375 case HIST_FIELD_FN_DYNSTRING: 4376 return hist_field_dynstring(hist_field, elt, buffer, rbe, event); 4377 case HIST_FIELD_FN_RELDYNSTRING: 4378 return hist_field_reldynstring(hist_field, elt, buffer, rbe, event); 4379 case HIST_FIELD_FN_PSTRING: 4380 return hist_field_pstring(hist_field, elt, buffer, rbe, event); 4381 case HIST_FIELD_FN_S64: 4382 return hist_field_s64(hist_field, elt, buffer, rbe, event); 4383 case HIST_FIELD_FN_U64: 4384 return hist_field_u64(hist_field, elt, buffer, rbe, event); 4385 case HIST_FIELD_FN_S32: 4386 return hist_field_s32(hist_field, elt, buffer, rbe, event); 4387 case HIST_FIELD_FN_U32: 4388 return hist_field_u32(hist_field, elt, buffer, rbe, event); 4389 case HIST_FIELD_FN_S16: 4390 return hist_field_s16(hist_field, elt, buffer, rbe, event); 4391 case HIST_FIELD_FN_U16: 4392 return hist_field_u16(hist_field, elt, buffer, rbe, event); 4393 case HIST_FIELD_FN_S8: 4394 return hist_field_s8(hist_field, elt, buffer, rbe, event); 4395 case HIST_FIELD_FN_U8: 4396 return hist_field_u8(hist_field, elt, buffer, rbe, event); 4397 case HIST_FIELD_FN_UMINUS: 4398 return hist_field_unary_minus(hist_field, elt, buffer, rbe, event); 4399 case HIST_FIELD_FN_MINUS: 4400 return hist_field_minus(hist_field, elt, buffer, rbe, event); 4401 case HIST_FIELD_FN_PLUS: 4402 return hist_field_plus(hist_field, elt, buffer, rbe, event); 4403 case HIST_FIELD_FN_DIV: 4404 return hist_field_div(hist_field, elt, buffer, rbe, event); 4405 case HIST_FIELD_FN_MULT: 4406 return hist_field_mult(hist_field, elt, buffer, rbe, event); 4407 case HIST_FIELD_FN_DIV_POWER2: 4408 return div_by_power_of_two(hist_field, elt, buffer, rbe, event); 4409 case HIST_FIELD_FN_DIV_NOT_POWER2: 4410 return div_by_not_power_of_two(hist_field, elt, buffer, rbe, event); 4411 case HIST_FIELD_FN_DIV_MULT_SHIFT: 4412 return div_by_mult_and_shift(hist_field, elt, buffer, rbe, event); 4413 case HIST_FIELD_FN_EXECNAME: 4414 return hist_field_execname(hist_field, elt, buffer, rbe, event); 4415 case HIST_FIELD_FN_STACK: 4416 return hist_field_stack(hist_field, elt, buffer, rbe, event); 4417 default: 4418 return 0; 4419 } 4420 } 4421 4422 /* Convert a var that points to common_pid.execname to a string */ 4423 static void update_var_execname(struct hist_field *hist_field) 4424 { 4425 hist_field->flags = HIST_FIELD_FL_STRING | HIST_FIELD_FL_VAR | 4426 HIST_FIELD_FL_EXECNAME; 4427 hist_field->size = MAX_FILTER_STR_VAL; 4428 hist_field->is_signed = 0; 4429 4430 kfree_const(hist_field->type); 4431 hist_field->type = "char[]"; 4432 4433 hist_field->fn_num = HIST_FIELD_FN_EXECNAME; 4434 } 4435 4436 static int create_var_field(struct hist_trigger_data *hist_data, 4437 unsigned int val_idx, 4438 struct trace_event_file *file, 4439 char *var_name, char *expr_str) 4440 { 4441 struct trace_array *tr = hist_data->event_file->tr; 4442 unsigned long flags = 0; 4443 int ret; 4444 4445 if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX)) 4446 return -EINVAL; 4447 4448 if (find_var(hist_data, file, var_name) && !hist_data->remove) { 4449 hist_err(tr, HIST_ERR_DUPLICATE_VAR, errpos(var_name)); 4450 return -EINVAL; 4451 } 4452 4453 flags |= HIST_FIELD_FL_VAR; 4454 hist_data->n_vars++; 4455 if (WARN_ON(hist_data->n_vars > TRACING_MAP_VARS_MAX)) 4456 return -EINVAL; 4457 4458 ret = __create_val_field(hist_data, val_idx, file, var_name, expr_str, flags); 4459 4460 if (!ret && hist_data->fields[val_idx]->flags & HIST_FIELD_FL_EXECNAME) 4461 update_var_execname(hist_data->fields[val_idx]); 4462 4463 if (!ret && hist_data->fields[val_idx]->flags & 4464 (HIST_FIELD_FL_STRING | HIST_FIELD_FL_STACKTRACE)) 4465 hist_data->fields[val_idx]->var_str_idx = hist_data->n_var_str++; 4466 4467 return ret; 4468 } 4469 4470 static int create_val_fields(struct hist_trigger_data *hist_data, 4471 struct trace_event_file *file) 4472 { 4473 unsigned int i, j = 1, n_hitcount = 0; 4474 char *fields_str, *field_str; 4475 int ret; 4476 4477 ret = create_hitcount_val(hist_data); 4478 if (ret) 4479 goto out; 4480 4481 fields_str = hist_data->attrs->vals_str; 4482 if (!fields_str) 4483 goto out; 4484 4485 for (i = 0, j = 1; i < TRACING_MAP_VALS_MAX && 4486 j < TRACING_MAP_VALS_MAX; i++) { 4487 field_str = strsep(&fields_str, ","); 4488 if (!field_str) 4489 break; 4490 4491 if (strcmp(field_str, "hitcount") == 0) { 4492 if (!n_hitcount++) 4493 continue; 4494 } 4495 4496 ret = create_val_field(hist_data, j++, file, field_str); 4497 if (ret) 4498 goto out; 4499 } 4500 4501 if (fields_str && (strcmp(fields_str, "hitcount") != 0)) 4502 ret = -EINVAL; 4503 out: 4504 /* There is only raw hitcount but nohitcount suppresses it. */ 4505 if (j == 1 && hist_data->attrs->no_hitcount) { 4506 hist_err(hist_data->event_file->tr, HIST_ERR_NEED_NOHC_VAL, 0); 4507 ret = -ENOENT; 4508 } 4509 4510 return ret; 4511 } 4512 4513 static int create_key_field(struct hist_trigger_data *hist_data, 4514 unsigned int key_idx, 4515 unsigned int key_offset, 4516 struct trace_event_file *file, 4517 char *field_str) 4518 { 4519 struct trace_array *tr = hist_data->event_file->tr; 4520 struct hist_field *hist_field = NULL; 4521 unsigned long flags = 0; 4522 unsigned int key_size; 4523 int ret = 0, n_subexprs = 0; 4524 4525 if (WARN_ON(key_idx >= HIST_FIELDS_MAX)) 4526 return -EINVAL; 4527 4528 flags |= HIST_FIELD_FL_KEY; 4529 4530 if (strcmp(field_str, "stacktrace") == 0) { 4531 flags |= HIST_FIELD_FL_STACKTRACE; 4532 key_size = sizeof(unsigned long) * HIST_STACKTRACE_DEPTH; 4533 hist_field = create_hist_field(hist_data, NULL, flags, NULL); 4534 } else { 4535 hist_field = parse_expr(hist_data, file, field_str, flags, 4536 NULL, &n_subexprs); 4537 if (IS_ERR(hist_field)) { 4538 ret = PTR_ERR(hist_field); 4539 goto out; 4540 } 4541 4542 if (field_has_hist_vars(hist_field, 0)) { 4543 hist_err(tr, HIST_ERR_INVALID_REF_KEY, errpos(field_str)); 4544 destroy_hist_field(hist_field, 0); 4545 ret = -EINVAL; 4546 goto out; 4547 } 4548 4549 key_size = hist_field->size; 4550 } 4551 4552 hist_data->fields[key_idx] = hist_field; 4553 4554 key_size = ALIGN(key_size, sizeof(u64)); 4555 hist_data->fields[key_idx]->size = key_size; 4556 hist_data->fields[key_idx]->offset = key_offset; 4557 4558 hist_data->key_size += key_size; 4559 4560 if (hist_data->key_size > HIST_KEY_SIZE_MAX) { 4561 ret = -EINVAL; 4562 goto out; 4563 } 4564 4565 hist_data->n_keys++; 4566 hist_data->n_fields++; 4567 4568 if (WARN_ON(hist_data->n_keys > TRACING_MAP_KEYS_MAX)) 4569 return -EINVAL; 4570 4571 ret = key_size; 4572 out: 4573 return ret; 4574 } 4575 4576 static int create_key_fields(struct hist_trigger_data *hist_data, 4577 struct trace_event_file *file) 4578 { 4579 unsigned int i, key_offset = 0, n_vals = hist_data->n_vals; 4580 char *fields_str, *field_str; 4581 int ret = -EINVAL; 4582 4583 fields_str = hist_data->attrs->keys_str; 4584 if (!fields_str) 4585 goto out; 4586 4587 for (i = n_vals; i < n_vals + TRACING_MAP_KEYS_MAX; i++) { 4588 field_str = strsep(&fields_str, ","); 4589 if (!field_str) 4590 break; 4591 ret = create_key_field(hist_data, i, key_offset, 4592 file, field_str); 4593 if (ret < 0) 4594 goto out; 4595 key_offset += ret; 4596 } 4597 if (fields_str) { 4598 ret = -EINVAL; 4599 goto out; 4600 } 4601 ret = 0; 4602 out: 4603 return ret; 4604 } 4605 4606 static int create_var_fields(struct hist_trigger_data *hist_data, 4607 struct trace_event_file *file) 4608 { 4609 unsigned int i, j = hist_data->n_vals; 4610 int ret = 0; 4611 4612 unsigned int n_vars = hist_data->attrs->var_defs.n_vars; 4613 4614 for (i = 0; i < n_vars; i++) { 4615 char *var_name = hist_data->attrs->var_defs.name[i]; 4616 char *expr = hist_data->attrs->var_defs.expr[i]; 4617 4618 ret = create_var_field(hist_data, j++, file, var_name, expr); 4619 if (ret) 4620 goto out; 4621 } 4622 out: 4623 return ret; 4624 } 4625 4626 static void free_var_defs(struct hist_trigger_data *hist_data) 4627 { 4628 unsigned int i; 4629 4630 for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) { 4631 kfree(hist_data->attrs->var_defs.name[i]); 4632 kfree(hist_data->attrs->var_defs.expr[i]); 4633 } 4634 4635 hist_data->attrs->var_defs.n_vars = 0; 4636 } 4637 4638 static int parse_var_defs(struct hist_trigger_data *hist_data) 4639 { 4640 struct trace_array *tr = hist_data->event_file->tr; 4641 char *s, *str, *var_name, *field_str; 4642 unsigned int i, j, n_vars = 0; 4643 int ret = 0; 4644 4645 for (i = 0; i < hist_data->attrs->n_assignments; i++) { 4646 str = hist_data->attrs->assignment_str[i]; 4647 for (j = 0; j < TRACING_MAP_VARS_MAX; j++) { 4648 field_str = strsep(&str, ","); 4649 if (!field_str) 4650 break; 4651 4652 var_name = strsep(&field_str, "="); 4653 if (!var_name || !field_str) { 4654 hist_err(tr, HIST_ERR_MALFORMED_ASSIGNMENT, 4655 errpos(var_name)); 4656 ret = -EINVAL; 4657 goto free; 4658 } 4659 4660 if (n_vars == TRACING_MAP_VARS_MAX) { 4661 hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(var_name)); 4662 ret = -EINVAL; 4663 goto free; 4664 } 4665 4666 s = kstrdup(var_name, GFP_KERNEL); 4667 if (!s) { 4668 ret = -ENOMEM; 4669 goto free; 4670 } 4671 hist_data->attrs->var_defs.name[n_vars] = s; 4672 4673 s = kstrdup(field_str, GFP_KERNEL); 4674 if (!s) { 4675 kfree(hist_data->attrs->var_defs.name[n_vars]); 4676 hist_data->attrs->var_defs.name[n_vars] = NULL; 4677 ret = -ENOMEM; 4678 goto free; 4679 } 4680 hist_data->attrs->var_defs.expr[n_vars++] = s; 4681 4682 hist_data->attrs->var_defs.n_vars = n_vars; 4683 } 4684 } 4685 4686 return ret; 4687 free: 4688 free_var_defs(hist_data); 4689 4690 return ret; 4691 } 4692 4693 static int create_hist_fields(struct hist_trigger_data *hist_data, 4694 struct trace_event_file *file) 4695 { 4696 int ret; 4697 4698 ret = parse_var_defs(hist_data); 4699 if (ret) 4700 return ret; 4701 4702 ret = create_val_fields(hist_data, file); 4703 if (ret) 4704 goto out; 4705 4706 ret = create_var_fields(hist_data, file); 4707 if (ret) 4708 goto out; 4709 4710 ret = create_key_fields(hist_data, file); 4711 4712 out: 4713 free_var_defs(hist_data); 4714 4715 return ret; 4716 } 4717 4718 static int is_descending(struct trace_array *tr, const char *str) 4719 { 4720 if (!str) 4721 return 0; 4722 4723 if (strcmp(str, "descending") == 0) 4724 return 1; 4725 4726 if (strcmp(str, "ascending") == 0) 4727 return 0; 4728 4729 hist_err(tr, HIST_ERR_INVALID_SORT_MODIFIER, errpos((char *)str)); 4730 4731 return -EINVAL; 4732 } 4733 4734 static int create_sort_keys(struct hist_trigger_data *hist_data) 4735 { 4736 struct trace_array *tr = hist_data->event_file->tr; 4737 char *fields_str = hist_data->attrs->sort_key_str; 4738 struct tracing_map_sort_key *sort_key; 4739 int descending, ret = 0; 4740 unsigned int i, j, k; 4741 4742 hist_data->n_sort_keys = 1; /* we always have at least one, hitcount */ 4743 4744 if (!fields_str) 4745 goto out; 4746 4747 for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) { 4748 struct hist_field *hist_field; 4749 char *field_str, *field_name; 4750 const char *test_name; 4751 4752 sort_key = &hist_data->sort_keys[i]; 4753 4754 field_str = strsep(&fields_str, ","); 4755 if (!field_str) 4756 break; 4757 4758 if (!*field_str) { 4759 ret = -EINVAL; 4760 hist_err(tr, HIST_ERR_EMPTY_SORT_FIELD, errpos("sort=")); 4761 break; 4762 } 4763 4764 if ((i == TRACING_MAP_SORT_KEYS_MAX - 1) && fields_str) { 4765 hist_err(tr, HIST_ERR_TOO_MANY_SORT_FIELDS, errpos("sort=")); 4766 ret = -EINVAL; 4767 break; 4768 } 4769 4770 field_name = strsep(&field_str, "."); 4771 if (!field_name || !*field_name) { 4772 ret = -EINVAL; 4773 hist_err(tr, HIST_ERR_EMPTY_SORT_FIELD, errpos("sort=")); 4774 break; 4775 } 4776 4777 if (strcmp(field_name, "hitcount") == 0) { 4778 descending = is_descending(tr, field_str); 4779 if (descending < 0) { 4780 ret = descending; 4781 break; 4782 } 4783 sort_key->descending = descending; 4784 continue; 4785 } 4786 4787 for (j = 1, k = 1; j < hist_data->n_fields; j++) { 4788 unsigned int idx; 4789 4790 hist_field = hist_data->fields[j]; 4791 if (hist_field->flags & HIST_FIELD_FL_VAR) 4792 continue; 4793 4794 idx = k++; 4795 4796 test_name = hist_field_name(hist_field, 0); 4797 4798 if (strcmp(field_name, test_name) == 0) { 4799 sort_key->field_idx = idx; 4800 descending = is_descending(tr, field_str); 4801 if (descending < 0) { 4802 ret = descending; 4803 goto out; 4804 } 4805 sort_key->descending = descending; 4806 break; 4807 } 4808 } 4809 if (j == hist_data->n_fields) { 4810 ret = -EINVAL; 4811 hist_err(tr, HIST_ERR_INVALID_SORT_FIELD, errpos(field_name)); 4812 break; 4813 } 4814 } 4815 4816 hist_data->n_sort_keys = i; 4817 out: 4818 return ret; 4819 } 4820 4821 static void destroy_actions(struct hist_trigger_data *hist_data) 4822 { 4823 unsigned int i; 4824 4825 for (i = 0; i < hist_data->n_actions; i++) { 4826 struct action_data *data = hist_data->actions[i]; 4827 4828 if (data->handler == HANDLER_ONMATCH) 4829 onmatch_destroy(data); 4830 else if (data->handler == HANDLER_ONMAX || 4831 data->handler == HANDLER_ONCHANGE) 4832 track_data_destroy(hist_data, data); 4833 else 4834 kfree(data); 4835 } 4836 } 4837 4838 static int parse_actions(struct hist_trigger_data *hist_data) 4839 { 4840 struct trace_array *tr = hist_data->event_file->tr; 4841 struct action_data *data; 4842 unsigned int i; 4843 int ret = 0; 4844 char *str; 4845 int len; 4846 4847 for (i = 0; i < hist_data->attrs->n_actions; i++) { 4848 enum handler_id hid = 0; 4849 char *action_str; 4850 4851 str = hist_data->attrs->action_str[i]; 4852 4853 if ((len = str_has_prefix(str, "onmatch("))) 4854 hid = HANDLER_ONMATCH; 4855 else if ((len = str_has_prefix(str, "onmax("))) 4856 hid = HANDLER_ONMAX; 4857 else if ((len = str_has_prefix(str, "onchange("))) 4858 hid = HANDLER_ONCHANGE; 4859 4860 action_str = str + len; 4861 4862 switch (hid) { 4863 case HANDLER_ONMATCH: 4864 data = onmatch_parse(tr, action_str); 4865 break; 4866 case HANDLER_ONMAX: 4867 case HANDLER_ONCHANGE: 4868 data = track_data_parse(hist_data, action_str, hid); 4869 break; 4870 default: 4871 data = ERR_PTR(-EINVAL); 4872 break; 4873 } 4874 4875 if (IS_ERR(data)) { 4876 ret = PTR_ERR(data); 4877 break; 4878 } 4879 4880 hist_data->actions[hist_data->n_actions++] = data; 4881 } 4882 4883 return ret; 4884 } 4885 4886 static int create_actions(struct hist_trigger_data *hist_data) 4887 { 4888 struct action_data *data; 4889 unsigned int i; 4890 int ret = 0; 4891 4892 for (i = 0; i < hist_data->attrs->n_actions; i++) { 4893 data = hist_data->actions[i]; 4894 4895 if (data->handler == HANDLER_ONMATCH) { 4896 ret = onmatch_create(hist_data, data); 4897 if (ret) 4898 break; 4899 } else if (data->handler == HANDLER_ONMAX || 4900 data->handler == HANDLER_ONCHANGE) { 4901 ret = track_data_create(hist_data, data); 4902 if (ret) 4903 break; 4904 } else { 4905 ret = -EINVAL; 4906 break; 4907 } 4908 } 4909 4910 return ret; 4911 } 4912 4913 static void print_actions(struct seq_file *m, 4914 struct hist_trigger_data *hist_data, 4915 struct tracing_map_elt *elt) 4916 { 4917 unsigned int i; 4918 4919 for (i = 0; i < hist_data->n_actions; i++) { 4920 struct action_data *data = hist_data->actions[i]; 4921 4922 if (data->action == ACTION_SNAPSHOT) 4923 continue; 4924 4925 if (data->handler == HANDLER_ONMAX || 4926 data->handler == HANDLER_ONCHANGE) 4927 track_data_print(m, hist_data, elt, data); 4928 } 4929 } 4930 4931 static void print_action_spec(struct seq_file *m, 4932 struct hist_trigger_data *hist_data, 4933 struct action_data *data) 4934 { 4935 unsigned int i; 4936 4937 if (data->action == ACTION_SAVE) { 4938 for (i = 0; i < hist_data->n_save_vars; i++) { 4939 seq_printf(m, "%s", hist_data->save_vars[i]->var->var.name); 4940 if (i < hist_data->n_save_vars - 1) 4941 seq_puts(m, ","); 4942 } 4943 } else if (data->action == ACTION_TRACE) { 4944 if (data->use_trace_keyword) 4945 seq_printf(m, "%s", data->synth_event_name); 4946 for (i = 0; i < data->n_params; i++) { 4947 if (i || data->use_trace_keyword) 4948 seq_puts(m, ","); 4949 seq_printf(m, "%s", data->params[i]); 4950 } 4951 } 4952 } 4953 4954 static void print_track_data_spec(struct seq_file *m, 4955 struct hist_trigger_data *hist_data, 4956 struct action_data *data) 4957 { 4958 if (data->handler == HANDLER_ONMAX) 4959 seq_puts(m, ":onmax("); 4960 else if (data->handler == HANDLER_ONCHANGE) 4961 seq_puts(m, ":onchange("); 4962 seq_printf(m, "%s", data->track_data.var_str); 4963 seq_printf(m, ").%s(", data->action_name); 4964 4965 print_action_spec(m, hist_data, data); 4966 4967 seq_puts(m, ")"); 4968 } 4969 4970 static void print_onmatch_spec(struct seq_file *m, 4971 struct hist_trigger_data *hist_data, 4972 struct action_data *data) 4973 { 4974 seq_printf(m, ":onmatch(%s.%s).", data->match_data.event_system, 4975 data->match_data.event); 4976 4977 seq_printf(m, "%s(", data->action_name); 4978 4979 print_action_spec(m, hist_data, data); 4980 4981 seq_puts(m, ")"); 4982 } 4983 4984 static bool actions_match(struct hist_trigger_data *hist_data, 4985 struct hist_trigger_data *hist_data_test) 4986 { 4987 unsigned int i, j; 4988 4989 if (hist_data->n_actions != hist_data_test->n_actions) 4990 return false; 4991 4992 for (i = 0; i < hist_data->n_actions; i++) { 4993 struct action_data *data = hist_data->actions[i]; 4994 struct action_data *data_test = hist_data_test->actions[i]; 4995 char *action_name, *action_name_test; 4996 4997 if (data->handler != data_test->handler) 4998 return false; 4999 if (data->action != data_test->action) 5000 return false; 5001 5002 if (data->n_params != data_test->n_params) 5003 return false; 5004 5005 for (j = 0; j < data->n_params; j++) { 5006 if (strcmp(data->params[j], data_test->params[j]) != 0) 5007 return false; 5008 } 5009 5010 if (data->use_trace_keyword) 5011 action_name = data->synth_event_name; 5012 else 5013 action_name = data->action_name; 5014 5015 if (data_test->use_trace_keyword) 5016 action_name_test = data_test->synth_event_name; 5017 else 5018 action_name_test = data_test->action_name; 5019 5020 if (strcmp(action_name, action_name_test) != 0) 5021 return false; 5022 5023 if (data->handler == HANDLER_ONMATCH) { 5024 if (strcmp(data->match_data.event_system, 5025 data_test->match_data.event_system) != 0) 5026 return false; 5027 if (strcmp(data->match_data.event, 5028 data_test->match_data.event) != 0) 5029 return false; 5030 } else if (data->handler == HANDLER_ONMAX || 5031 data->handler == HANDLER_ONCHANGE) { 5032 if (strcmp(data->track_data.var_str, 5033 data_test->track_data.var_str) != 0) 5034 return false; 5035 } 5036 } 5037 5038 return true; 5039 } 5040 5041 5042 static void print_actions_spec(struct seq_file *m, 5043 struct hist_trigger_data *hist_data) 5044 { 5045 unsigned int i; 5046 5047 for (i = 0; i < hist_data->n_actions; i++) { 5048 struct action_data *data = hist_data->actions[i]; 5049 5050 if (data->handler == HANDLER_ONMATCH) 5051 print_onmatch_spec(m, hist_data, data); 5052 else if (data->handler == HANDLER_ONMAX || 5053 data->handler == HANDLER_ONCHANGE) 5054 print_track_data_spec(m, hist_data, data); 5055 } 5056 } 5057 5058 static void destroy_field_var_hists(struct hist_trigger_data *hist_data) 5059 { 5060 unsigned int i; 5061 5062 for (i = 0; i < hist_data->n_field_var_hists; i++) { 5063 kfree(hist_data->field_var_hists[i]->cmd); 5064 kfree(hist_data->field_var_hists[i]); 5065 } 5066 } 5067 5068 static void destroy_hist_data(struct hist_trigger_data *hist_data) 5069 { 5070 if (!hist_data) 5071 return; 5072 5073 destroy_hist_trigger_attrs(hist_data->attrs); 5074 destroy_hist_fields(hist_data); 5075 tracing_map_destroy(hist_data->map); 5076 5077 destroy_actions(hist_data); 5078 destroy_field_vars(hist_data); 5079 destroy_field_var_hists(hist_data); 5080 5081 kfree(hist_data); 5082 } 5083 5084 static int create_tracing_map_fields(struct hist_trigger_data *hist_data) 5085 { 5086 struct tracing_map *map = hist_data->map; 5087 struct ftrace_event_field *field; 5088 struct hist_field *hist_field; 5089 int i, idx = 0; 5090 5091 for_each_hist_field(i, hist_data) { 5092 hist_field = hist_data->fields[i]; 5093 if (hist_field->flags & HIST_FIELD_FL_KEY) { 5094 tracing_map_cmp_fn_t cmp_fn; 5095 5096 field = hist_field->field; 5097 5098 if (hist_field->flags & HIST_FIELD_FL_STACKTRACE) 5099 cmp_fn = tracing_map_cmp_none; 5100 else if (!field || hist_field->flags & HIST_FIELD_FL_CPU) 5101 cmp_fn = tracing_map_cmp_num(hist_field->size, 5102 hist_field->is_signed); 5103 else if (is_string_field(field)) 5104 cmp_fn = tracing_map_cmp_string; 5105 else 5106 cmp_fn = tracing_map_cmp_num(field->size, 5107 field->is_signed); 5108 idx = tracing_map_add_key_field(map, 5109 hist_field->offset, 5110 cmp_fn); 5111 } else if (!(hist_field->flags & HIST_FIELD_FL_VAR)) 5112 idx = tracing_map_add_sum_field(map); 5113 5114 if (idx < 0) 5115 return idx; 5116 5117 if (hist_field->flags & HIST_FIELD_FL_VAR) { 5118 idx = tracing_map_add_var(map); 5119 if (idx < 0) 5120 return idx; 5121 hist_field->var.idx = idx; 5122 hist_field->var.hist_data = hist_data; 5123 } 5124 } 5125 5126 return 0; 5127 } 5128 5129 static struct hist_trigger_data * 5130 create_hist_data(unsigned int map_bits, 5131 struct hist_trigger_attrs *attrs, 5132 struct trace_event_file *file, 5133 bool remove) 5134 { 5135 const struct tracing_map_ops *map_ops = NULL; 5136 struct hist_trigger_data *hist_data; 5137 int ret = 0; 5138 5139 hist_data = kzalloc(sizeof(*hist_data), GFP_KERNEL); 5140 if (!hist_data) 5141 return ERR_PTR(-ENOMEM); 5142 5143 hist_data->attrs = attrs; 5144 hist_data->remove = remove; 5145 hist_data->event_file = file; 5146 5147 ret = parse_actions(hist_data); 5148 if (ret) 5149 goto free; 5150 5151 ret = create_hist_fields(hist_data, file); 5152 if (ret) 5153 goto free; 5154 5155 ret = create_sort_keys(hist_data); 5156 if (ret) 5157 goto free; 5158 5159 map_ops = &hist_trigger_elt_data_ops; 5160 5161 hist_data->map = tracing_map_create(map_bits, hist_data->key_size, 5162 map_ops, hist_data); 5163 if (IS_ERR(hist_data->map)) { 5164 ret = PTR_ERR(hist_data->map); 5165 hist_data->map = NULL; 5166 goto free; 5167 } 5168 5169 ret = create_tracing_map_fields(hist_data); 5170 if (ret) 5171 goto free; 5172 out: 5173 return hist_data; 5174 free: 5175 hist_data->attrs = NULL; 5176 5177 destroy_hist_data(hist_data); 5178 5179 hist_data = ERR_PTR(ret); 5180 5181 goto out; 5182 } 5183 5184 static void hist_trigger_elt_update(struct hist_trigger_data *hist_data, 5185 struct tracing_map_elt *elt, 5186 struct trace_buffer *buffer, void *rec, 5187 struct ring_buffer_event *rbe, 5188 u64 *var_ref_vals) 5189 { 5190 struct hist_elt_data *elt_data; 5191 struct hist_field *hist_field; 5192 unsigned int i, var_idx; 5193 u64 hist_val; 5194 5195 elt_data = elt->private_data; 5196 elt_data->var_ref_vals = var_ref_vals; 5197 5198 for_each_hist_val_field(i, hist_data) { 5199 hist_field = hist_data->fields[i]; 5200 hist_val = hist_fn_call(hist_field, elt, buffer, rbe, rec); 5201 if (hist_field->flags & HIST_FIELD_FL_VAR) { 5202 var_idx = hist_field->var.idx; 5203 5204 if (hist_field->flags & 5205 (HIST_FIELD_FL_STRING | HIST_FIELD_FL_STACKTRACE)) { 5206 unsigned int str_start, var_str_idx, idx; 5207 char *str, *val_str; 5208 unsigned int size; 5209 5210 str_start = hist_data->n_field_var_str + 5211 hist_data->n_save_var_str; 5212 var_str_idx = hist_field->var_str_idx; 5213 idx = str_start + var_str_idx; 5214 5215 str = elt_data->field_var_str[idx]; 5216 val_str = (char *)(uintptr_t)hist_val; 5217 5218 if (hist_field->flags & HIST_FIELD_FL_STRING) { 5219 size = min(hist_field->size, STR_VAR_LEN_MAX); 5220 strscpy(str, val_str, size); 5221 } else { 5222 char *stack_start = str + sizeof(unsigned long); 5223 int e; 5224 5225 e = stack_trace_save((void *)stack_start, 5226 HIST_STACKTRACE_DEPTH, 5227 HIST_STACKTRACE_SKIP); 5228 if (e < HIST_STACKTRACE_DEPTH - 1) 5229 ((unsigned long *)stack_start)[e] = 0; 5230 *((unsigned long *)str) = e; 5231 } 5232 hist_val = (u64)(uintptr_t)str; 5233 } 5234 tracing_map_set_var(elt, var_idx, hist_val); 5235 continue; 5236 } 5237 tracing_map_update_sum(elt, i, hist_val); 5238 } 5239 5240 for_each_hist_key_field(i, hist_data) { 5241 hist_field = hist_data->fields[i]; 5242 if (hist_field->flags & HIST_FIELD_FL_VAR) { 5243 hist_val = hist_fn_call(hist_field, elt, buffer, rbe, rec); 5244 var_idx = hist_field->var.idx; 5245 tracing_map_set_var(elt, var_idx, hist_val); 5246 } 5247 } 5248 5249 update_field_vars(hist_data, elt, buffer, rbe, rec); 5250 } 5251 5252 static inline void add_to_key(char *compound_key, void *key, 5253 struct hist_field *key_field, void *rec) 5254 { 5255 size_t size = key_field->size; 5256 5257 if (key_field->flags & HIST_FIELD_FL_STRING) { 5258 5259 if (key_field->flags & HIST_FIELD_FL_COMM) { 5260 size = strlen((char *)key); 5261 } else { 5262 struct ftrace_event_field *field; 5263 5264 field = key_field->field; 5265 if (field->filter_type == FILTER_DYN_STRING || 5266 field->filter_type == FILTER_RDYN_STRING) 5267 size = *(u32 *)(rec + field->offset) >> 16; 5268 else if (field->filter_type == FILTER_STATIC_STRING) 5269 size = field->size; 5270 } 5271 5272 /* ensure NULL-termination */ 5273 if (size > key_field->size - 1) 5274 size = key_field->size - 1; 5275 } 5276 memcpy(compound_key + key_field->offset, key, size); 5277 } 5278 5279 static void 5280 hist_trigger_actions(struct hist_trigger_data *hist_data, 5281 struct tracing_map_elt *elt, 5282 struct trace_buffer *buffer, void *rec, 5283 struct ring_buffer_event *rbe, void *key, 5284 u64 *var_ref_vals) 5285 { 5286 struct action_data *data; 5287 unsigned int i; 5288 5289 for (i = 0; i < hist_data->n_actions; i++) { 5290 data = hist_data->actions[i]; 5291 data->fn(hist_data, elt, buffer, rec, rbe, key, data, var_ref_vals); 5292 } 5293 } 5294 5295 /* 5296 * The hist_pad structure is used to save information to create 5297 * a histogram from the histogram trigger. It's too big to store 5298 * on the stack, so when the histogram trigger is initialized 5299 * a percpu array of 4 hist_pad structures is allocated. 5300 * This will cover every context from normal, softirq, irq and NMI 5301 * in the very unlikely event that a trigger happens at each of 5302 * these contexts and interrupts a currently active trigger. 5303 */ 5304 struct hist_pad { 5305 unsigned long entries[HIST_STACKTRACE_DEPTH]; 5306 u64 var_ref_vals[TRACING_MAP_VARS_MAX]; 5307 char compound_key[HIST_KEY_SIZE_MAX]; 5308 }; 5309 5310 static struct hist_pad __percpu *hist_pads; 5311 static DEFINE_PER_CPU(int, hist_pad_cnt); 5312 static refcount_t hist_pad_ref; 5313 5314 /* One hist_pad for every context (normal, softirq, irq, NMI) */ 5315 #define MAX_HIST_CNT 4 5316 5317 static int alloc_hist_pad(void) 5318 { 5319 lockdep_assert_held(&event_mutex); 5320 5321 if (refcount_read(&hist_pad_ref)) { 5322 refcount_inc(&hist_pad_ref); 5323 return 0; 5324 } 5325 5326 hist_pads = __alloc_percpu(sizeof(struct hist_pad) * MAX_HIST_CNT, 5327 __alignof__(struct hist_pad)); 5328 if (!hist_pads) 5329 return -ENOMEM; 5330 5331 refcount_set(&hist_pad_ref, 1); 5332 return 0; 5333 } 5334 5335 static void free_hist_pad(void) 5336 { 5337 lockdep_assert_held(&event_mutex); 5338 5339 if (!refcount_dec_and_test(&hist_pad_ref)) 5340 return; 5341 5342 free_percpu(hist_pads); 5343 hist_pads = NULL; 5344 } 5345 5346 static struct hist_pad *get_hist_pad(void) 5347 { 5348 struct hist_pad *hist_pad; 5349 int cnt; 5350 5351 if (WARN_ON_ONCE(!hist_pads)) 5352 return NULL; 5353 5354 preempt_disable(); 5355 5356 hist_pad = per_cpu_ptr(hist_pads, smp_processor_id()); 5357 5358 if (this_cpu_read(hist_pad_cnt) == MAX_HIST_CNT) { 5359 preempt_enable(); 5360 return NULL; 5361 } 5362 5363 cnt = this_cpu_inc_return(hist_pad_cnt) - 1; 5364 5365 return &hist_pad[cnt]; 5366 } 5367 5368 static void put_hist_pad(void) 5369 { 5370 this_cpu_dec(hist_pad_cnt); 5371 preempt_enable(); 5372 } 5373 5374 static void event_hist_trigger(struct event_trigger_data *data, 5375 struct trace_buffer *buffer, void *rec, 5376 struct ring_buffer_event *rbe) 5377 { 5378 struct hist_trigger_data *hist_data = data->private_data; 5379 bool use_compound_key = (hist_data->n_keys > 1); 5380 struct tracing_map_elt *elt = NULL; 5381 struct hist_field *key_field; 5382 struct hist_pad *hist_pad; 5383 u64 field_contents; 5384 void *key = NULL; 5385 unsigned int i; 5386 5387 if (unlikely(!rbe)) 5388 return; 5389 5390 hist_pad = get_hist_pad(); 5391 if (!hist_pad) 5392 return; 5393 5394 memset(hist_pad->compound_key, 0, hist_data->key_size); 5395 5396 for_each_hist_key_field(i, hist_data) { 5397 key_field = hist_data->fields[i]; 5398 5399 if (key_field->flags & HIST_FIELD_FL_STACKTRACE) { 5400 unsigned long *entries = hist_pad->entries; 5401 5402 memset(entries, 0, HIST_STACKTRACE_SIZE); 5403 if (key_field->field) { 5404 unsigned long *stack, n_entries; 5405 5406 field_contents = hist_fn_call(key_field, elt, buffer, rbe, rec); 5407 stack = (unsigned long *)(long)field_contents; 5408 n_entries = *stack; 5409 memcpy(entries, ++stack, n_entries * sizeof(unsigned long)); 5410 } else { 5411 stack_trace_save(entries, HIST_STACKTRACE_DEPTH, 5412 HIST_STACKTRACE_SKIP); 5413 } 5414 key = entries; 5415 } else { 5416 field_contents = hist_fn_call(key_field, elt, buffer, rbe, rec); 5417 if (key_field->flags & HIST_FIELD_FL_STRING) { 5418 key = (void *)(unsigned long)field_contents; 5419 use_compound_key = true; 5420 } else 5421 key = (void *)&field_contents; 5422 } 5423 5424 if (use_compound_key) 5425 add_to_key(hist_pad->compound_key, key, key_field, rec); 5426 } 5427 5428 if (use_compound_key) 5429 key = hist_pad->compound_key; 5430 5431 if (hist_data->n_var_refs && 5432 !resolve_var_refs(hist_data, key, hist_pad->var_ref_vals, false)) 5433 goto out; 5434 5435 elt = tracing_map_insert(hist_data->map, key); 5436 if (!elt) 5437 goto out; 5438 5439 hist_trigger_elt_update(hist_data, elt, buffer, rec, rbe, hist_pad->var_ref_vals); 5440 5441 if (resolve_var_refs(hist_data, key, hist_pad->var_ref_vals, true)) { 5442 hist_trigger_actions(hist_data, elt, buffer, rec, rbe, 5443 key, hist_pad->var_ref_vals); 5444 } 5445 5446 hist_poll_wakeup(); 5447 5448 out: 5449 put_hist_pad(); 5450 } 5451 5452 static void hist_trigger_stacktrace_print(struct seq_file *m, 5453 unsigned long *stacktrace_entries, 5454 unsigned int max_entries) 5455 { 5456 unsigned int spaces = 8; 5457 unsigned int i; 5458 5459 for (i = 0; i < max_entries; i++) { 5460 if (!stacktrace_entries[i]) 5461 return; 5462 5463 seq_printf(m, "%*c", 1 + spaces, ' '); 5464 seq_printf(m, "%pS\n", (void*)stacktrace_entries[i]); 5465 } 5466 } 5467 5468 static void hist_trigger_print_key(struct seq_file *m, 5469 struct hist_trigger_data *hist_data, 5470 void *key, 5471 struct tracing_map_elt *elt) 5472 { 5473 struct hist_field *key_field; 5474 bool multiline = false; 5475 const char *field_name; 5476 unsigned int i; 5477 u64 uval; 5478 5479 seq_puts(m, "{ "); 5480 5481 for_each_hist_key_field(i, hist_data) { 5482 key_field = hist_data->fields[i]; 5483 5484 if (i > hist_data->n_vals) 5485 seq_puts(m, ", "); 5486 5487 field_name = hist_field_name(key_field, 0); 5488 5489 if (key_field->flags & HIST_FIELD_FL_HEX) { 5490 uval = *(u64 *)(key + key_field->offset); 5491 seq_printf(m, "%s: %llx", field_name, uval); 5492 } else if (key_field->flags & HIST_FIELD_FL_SYM) { 5493 uval = *(u64 *)(key + key_field->offset); 5494 seq_printf(m, "%s: [%llx] %-45ps", field_name, 5495 uval, (void *)(uintptr_t)uval); 5496 } else if (key_field->flags & HIST_FIELD_FL_SYM_OFFSET) { 5497 uval = *(u64 *)(key + key_field->offset); 5498 seq_printf(m, "%s: [%llx] %-55pS", field_name, 5499 uval, (void *)(uintptr_t)uval); 5500 } else if (key_field->flags & HIST_FIELD_FL_EXECNAME) { 5501 struct hist_elt_data *elt_data = elt->private_data; 5502 char *comm; 5503 5504 if (WARN_ON_ONCE(!elt_data)) 5505 return; 5506 5507 comm = elt_data->comm; 5508 5509 uval = *(u64 *)(key + key_field->offset); 5510 seq_printf(m, "%s: %-16s[%10llu]", field_name, 5511 comm, uval); 5512 } else if (key_field->flags & HIST_FIELD_FL_SYSCALL) { 5513 const char *syscall_name; 5514 5515 uval = *(u64 *)(key + key_field->offset); 5516 syscall_name = get_syscall_name(uval); 5517 if (!syscall_name) 5518 syscall_name = "unknown_syscall"; 5519 5520 seq_printf(m, "%s: %-30s[%3llu]", field_name, 5521 syscall_name, uval); 5522 } else if (key_field->flags & HIST_FIELD_FL_STACKTRACE) { 5523 if (key_field->field) 5524 seq_printf(m, "%s.stacktrace", key_field->field->name); 5525 else 5526 seq_puts(m, "common_stacktrace:\n"); 5527 hist_trigger_stacktrace_print(m, 5528 key + key_field->offset, 5529 HIST_STACKTRACE_DEPTH); 5530 multiline = true; 5531 } else if (key_field->flags & HIST_FIELD_FL_LOG2) { 5532 seq_printf(m, "%s: ~ 2^%-2llu", field_name, 5533 *(u64 *)(key + key_field->offset)); 5534 } else if (key_field->flags & HIST_FIELD_FL_BUCKET) { 5535 unsigned long buckets = key_field->buckets; 5536 uval = *(u64 *)(key + key_field->offset); 5537 seq_printf(m, "%s: ~ %llu-%llu", field_name, 5538 uval, uval + buckets -1); 5539 } else if (key_field->flags & HIST_FIELD_FL_STRING) { 5540 seq_printf(m, "%s: %-50s", field_name, 5541 (char *)(key + key_field->offset)); 5542 } else { 5543 uval = *(u64 *)(key + key_field->offset); 5544 seq_printf(m, "%s: %10llu", field_name, uval); 5545 } 5546 } 5547 5548 if (!multiline) 5549 seq_puts(m, " "); 5550 5551 seq_puts(m, "}"); 5552 } 5553 5554 /* Get the 100 times of the percentage of @val in @total */ 5555 static inline unsigned int __get_percentage(u64 val, u64 total) 5556 { 5557 if (!total) 5558 goto div0; 5559 5560 if (val < (U64_MAX / 10000)) 5561 return (unsigned int)div64_ul(val * 10000, total); 5562 5563 total = div64_u64(total, 10000); 5564 if (!total) 5565 goto div0; 5566 5567 return (unsigned int)div64_ul(val, total); 5568 div0: 5569 return val ? UINT_MAX : 0; 5570 } 5571 5572 #define BAR_CHAR '#' 5573 5574 static inline const char *__fill_bar_str(char *buf, int size, u64 val, u64 max) 5575 { 5576 unsigned int len = __get_percentage(val, max); 5577 int i; 5578 5579 if (len == UINT_MAX) { 5580 snprintf(buf, size, "[ERROR]"); 5581 return buf; 5582 } 5583 5584 len = len * size / 10000; 5585 for (i = 0; i < len && i < size; i++) 5586 buf[i] = BAR_CHAR; 5587 while (i < size) 5588 buf[i++] = ' '; 5589 buf[size] = '\0'; 5590 5591 return buf; 5592 } 5593 5594 struct hist_val_stat { 5595 u64 max; 5596 u64 total; 5597 }; 5598 5599 static void hist_trigger_print_val(struct seq_file *m, unsigned int idx, 5600 const char *field_name, unsigned long flags, 5601 struct hist_val_stat *stats, 5602 struct tracing_map_elt *elt) 5603 { 5604 u64 val = tracing_map_read_sum(elt, idx); 5605 unsigned int pc; 5606 char bar[21]; 5607 5608 if (flags & HIST_FIELD_FL_PERCENT) { 5609 pc = __get_percentage(val, stats[idx].total); 5610 if (pc == UINT_MAX) 5611 seq_printf(m, " %s (%%):[ERROR]", field_name); 5612 else 5613 seq_printf(m, " %s (%%): %3u.%02u", field_name, 5614 pc / 100, pc % 100); 5615 } else if (flags & HIST_FIELD_FL_GRAPH) { 5616 seq_printf(m, " %s: %20s", field_name, 5617 __fill_bar_str(bar, 20, val, stats[idx].max)); 5618 } else if (flags & HIST_FIELD_FL_HEX) { 5619 seq_printf(m, " %s: %10llx", field_name, val); 5620 } else { 5621 seq_printf(m, " %s: %10llu", field_name, val); 5622 } 5623 } 5624 5625 static void hist_trigger_entry_print(struct seq_file *m, 5626 struct hist_trigger_data *hist_data, 5627 struct hist_val_stat *stats, 5628 void *key, 5629 struct tracing_map_elt *elt) 5630 { 5631 const char *field_name; 5632 unsigned int i = HITCOUNT_IDX; 5633 unsigned long flags; 5634 5635 hist_trigger_print_key(m, hist_data, key, elt); 5636 5637 /* At first, show the raw hitcount if !nohitcount */ 5638 if (!hist_data->attrs->no_hitcount) 5639 hist_trigger_print_val(m, i, "hitcount", 0, stats, elt); 5640 5641 for (i = 1; i < hist_data->n_vals; i++) { 5642 field_name = hist_field_name(hist_data->fields[i], 0); 5643 flags = hist_data->fields[i]->flags; 5644 if (flags & HIST_FIELD_FL_VAR || flags & HIST_FIELD_FL_EXPR) 5645 continue; 5646 5647 seq_puts(m, " "); 5648 hist_trigger_print_val(m, i, field_name, flags, stats, elt); 5649 } 5650 5651 print_actions(m, hist_data, elt); 5652 5653 seq_puts(m, "\n"); 5654 } 5655 5656 static int print_entries(struct seq_file *m, 5657 struct hist_trigger_data *hist_data) 5658 { 5659 struct tracing_map_sort_entry **sort_entries = NULL; 5660 struct tracing_map *map = hist_data->map; 5661 int i, j, n_entries; 5662 struct hist_val_stat *stats = NULL; 5663 u64 val; 5664 5665 n_entries = tracing_map_sort_entries(map, hist_data->sort_keys, 5666 hist_data->n_sort_keys, 5667 &sort_entries); 5668 if (n_entries < 0) 5669 return n_entries; 5670 5671 /* Calculate the max and the total for each field if needed. */ 5672 for (j = 0; j < hist_data->n_vals; j++) { 5673 if (!(hist_data->fields[j]->flags & 5674 (HIST_FIELD_FL_PERCENT | HIST_FIELD_FL_GRAPH))) 5675 continue; 5676 if (!stats) { 5677 stats = kcalloc(hist_data->n_vals, sizeof(*stats), 5678 GFP_KERNEL); 5679 if (!stats) { 5680 n_entries = -ENOMEM; 5681 goto out; 5682 } 5683 } 5684 for (i = 0; i < n_entries; i++) { 5685 val = tracing_map_read_sum(sort_entries[i]->elt, j); 5686 stats[j].total += val; 5687 if (stats[j].max < val) 5688 stats[j].max = val; 5689 } 5690 } 5691 5692 for (i = 0; i < n_entries; i++) 5693 hist_trigger_entry_print(m, hist_data, stats, 5694 sort_entries[i]->key, 5695 sort_entries[i]->elt); 5696 5697 kfree(stats); 5698 out: 5699 tracing_map_destroy_sort_entries(sort_entries, n_entries); 5700 5701 return n_entries; 5702 } 5703 5704 static void hist_trigger_show(struct seq_file *m, 5705 struct event_trigger_data *data, int n) 5706 { 5707 struct hist_trigger_data *hist_data; 5708 int n_entries; 5709 5710 if (n > 0) 5711 seq_puts(m, "\n\n"); 5712 5713 seq_puts(m, "# event histogram\n#\n# trigger info: "); 5714 data->cmd_ops->print(m, data); 5715 seq_puts(m, "#\n\n"); 5716 5717 hist_data = data->private_data; 5718 n_entries = print_entries(m, hist_data); 5719 if (n_entries < 0) 5720 n_entries = 0; 5721 5722 track_data_snapshot_print(m, hist_data); 5723 5724 seq_printf(m, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n", 5725 (u64)atomic64_read(&hist_data->map->hits), 5726 n_entries, (u64)atomic64_read(&hist_data->map->drops)); 5727 } 5728 5729 struct hist_file_data { 5730 struct file *file; 5731 u64 last_read; 5732 u64 last_act; 5733 }; 5734 5735 static u64 get_hist_hit_count(struct trace_event_file *event_file) 5736 { 5737 struct hist_trigger_data *hist_data; 5738 struct event_trigger_data *data; 5739 u64 ret = 0; 5740 5741 list_for_each_entry(data, &event_file->triggers, list) { 5742 if (data->cmd_ops->trigger_type == ETT_EVENT_HIST) { 5743 hist_data = data->private_data; 5744 ret += atomic64_read(&hist_data->map->hits); 5745 } 5746 } 5747 return ret; 5748 } 5749 5750 static int hist_show(struct seq_file *m, void *v) 5751 { 5752 struct hist_file_data *hist_file = m->private; 5753 struct event_trigger_data *data; 5754 struct trace_event_file *event_file; 5755 int n = 0; 5756 5757 guard(mutex)(&event_mutex); 5758 5759 event_file = event_file_file(hist_file->file); 5760 if (unlikely(!event_file)) 5761 return -ENODEV; 5762 5763 list_for_each_entry(data, &event_file->triggers, list) { 5764 if (data->cmd_ops->trigger_type == ETT_EVENT_HIST) 5765 hist_trigger_show(m, data, n++); 5766 } 5767 hist_file->last_read = get_hist_hit_count(event_file); 5768 /* 5769 * Update last_act too so that poll()/POLLPRI can wait for the next 5770 * event after any syscall on hist file. 5771 */ 5772 hist_file->last_act = hist_file->last_read; 5773 5774 return 0; 5775 } 5776 5777 static __poll_t event_hist_poll(struct file *file, struct poll_table_struct *wait) 5778 { 5779 struct trace_event_file *event_file; 5780 struct seq_file *m = file->private_data; 5781 struct hist_file_data *hist_file = m->private; 5782 __poll_t ret = 0; 5783 u64 cnt; 5784 5785 guard(mutex)(&event_mutex); 5786 5787 event_file = event_file_data(file); 5788 if (!event_file) 5789 return EPOLLERR; 5790 5791 hist_poll_wait(file, wait); 5792 5793 cnt = get_hist_hit_count(event_file); 5794 if (hist_file->last_read != cnt) 5795 ret |= EPOLLIN | EPOLLRDNORM; 5796 if (hist_file->last_act != cnt) { 5797 hist_file->last_act = cnt; 5798 ret |= EPOLLPRI; 5799 } 5800 5801 return ret; 5802 } 5803 5804 static int event_hist_release(struct inode *inode, struct file *file) 5805 { 5806 struct seq_file *m = file->private_data; 5807 struct hist_file_data *hist_file = m->private; 5808 5809 kfree(hist_file); 5810 return tracing_single_release_file_tr(inode, file); 5811 } 5812 5813 static int event_hist_open(struct inode *inode, struct file *file) 5814 { 5815 struct trace_event_file *event_file; 5816 struct hist_file_data *hist_file; 5817 int ret; 5818 5819 ret = tracing_open_file_tr(inode, file); 5820 if (ret) 5821 return ret; 5822 5823 guard(mutex)(&event_mutex); 5824 5825 event_file = event_file_data(file); 5826 if (!event_file) { 5827 ret = -ENODEV; 5828 goto err; 5829 } 5830 5831 hist_file = kzalloc(sizeof(*hist_file), GFP_KERNEL); 5832 if (!hist_file) { 5833 ret = -ENOMEM; 5834 goto err; 5835 } 5836 5837 hist_file->file = file; 5838 hist_file->last_act = get_hist_hit_count(event_file); 5839 5840 /* Clear private_data to avoid warning in single_open() */ 5841 file->private_data = NULL; 5842 ret = single_open(file, hist_show, hist_file); 5843 if (ret) { 5844 kfree(hist_file); 5845 goto err; 5846 } 5847 5848 return 0; 5849 err: 5850 tracing_release_file_tr(inode, file); 5851 return ret; 5852 } 5853 5854 const struct file_operations event_hist_fops = { 5855 .open = event_hist_open, 5856 .read = seq_read, 5857 .llseek = seq_lseek, 5858 .release = event_hist_release, 5859 .poll = event_hist_poll, 5860 }; 5861 5862 #ifdef CONFIG_HIST_TRIGGERS_DEBUG 5863 5864 #undef C 5865 #define C(a, b) b 5866 5867 static const char * const field_funcs[] = { FIELD_FUNCS }; 5868 5869 static void hist_field_debug_show_flags(struct seq_file *m, 5870 unsigned long flags) 5871 { 5872 seq_puts(m, " flags:\n"); 5873 5874 if (flags & HIST_FIELD_FL_KEY) 5875 seq_puts(m, " HIST_FIELD_FL_KEY\n"); 5876 else if (flags & HIST_FIELD_FL_HITCOUNT) 5877 seq_puts(m, " VAL: HIST_FIELD_FL_HITCOUNT\n"); 5878 else if (flags & HIST_FIELD_FL_VAR) 5879 seq_puts(m, " HIST_FIELD_FL_VAR\n"); 5880 else if (flags & HIST_FIELD_FL_VAR_REF) 5881 seq_puts(m, " HIST_FIELD_FL_VAR_REF\n"); 5882 else 5883 seq_puts(m, " VAL: normal u64 value\n"); 5884 5885 if (flags & HIST_FIELD_FL_ALIAS) 5886 seq_puts(m, " HIST_FIELD_FL_ALIAS\n"); 5887 else if (flags & HIST_FIELD_FL_CONST) 5888 seq_puts(m, " HIST_FIELD_FL_CONST\n"); 5889 } 5890 5891 static int hist_field_debug_show(struct seq_file *m, 5892 struct hist_field *field, unsigned long flags) 5893 { 5894 if ((field->flags & flags) != flags) { 5895 seq_printf(m, "ERROR: bad flags - %lx\n", flags); 5896 return -EINVAL; 5897 } 5898 5899 hist_field_debug_show_flags(m, field->flags); 5900 if (field->field) 5901 seq_printf(m, " ftrace_event_field name: %s\n", 5902 field->field->name); 5903 5904 if (field->flags & HIST_FIELD_FL_VAR) { 5905 seq_printf(m, " var.name: %s\n", field->var.name); 5906 seq_printf(m, " var.idx (into tracing_map_elt.vars[]): %u\n", 5907 field->var.idx); 5908 } 5909 5910 if (field->flags & HIST_FIELD_FL_CONST) 5911 seq_printf(m, " constant: %llu\n", field->constant); 5912 5913 if (field->flags & HIST_FIELD_FL_ALIAS) 5914 seq_printf(m, " var_ref_idx (into hist_data->var_refs[]): %u\n", 5915 field->var_ref_idx); 5916 5917 if (field->flags & HIST_FIELD_FL_VAR_REF) { 5918 seq_printf(m, " name: %s\n", field->name); 5919 seq_printf(m, " var.idx (into tracing_map_elt.vars[]): %u\n", 5920 field->var.idx); 5921 seq_printf(m, " var.hist_data: %p\n", field->var.hist_data); 5922 seq_printf(m, " var_ref_idx (into hist_data->var_refs[]): %u\n", 5923 field->var_ref_idx); 5924 if (field->system) 5925 seq_printf(m, " system: %s\n", field->system); 5926 if (field->event_name) 5927 seq_printf(m, " event_name: %s\n", field->event_name); 5928 } 5929 5930 seq_printf(m, " type: %s\n", field->type); 5931 seq_printf(m, " size: %u\n", field->size); 5932 seq_printf(m, " is_signed: %u\n", field->is_signed); 5933 seq_printf(m, " function: hist_field_%s()\n", field_funcs[field->fn_num]); 5934 5935 return 0; 5936 } 5937 5938 static int field_var_debug_show(struct seq_file *m, 5939 struct field_var *field_var, unsigned int i, 5940 bool save_vars) 5941 { 5942 const char *vars_name = save_vars ? "save_vars" : "field_vars"; 5943 struct hist_field *field; 5944 int ret = 0; 5945 5946 seq_printf(m, "\n hist_data->%s[%d]:\n", vars_name, i); 5947 5948 field = field_var->var; 5949 5950 seq_printf(m, "\n %s[%d].var:\n", vars_name, i); 5951 5952 hist_field_debug_show_flags(m, field->flags); 5953 seq_printf(m, " var.name: %s\n", field->var.name); 5954 seq_printf(m, " var.idx (into tracing_map_elt.vars[]): %u\n", 5955 field->var.idx); 5956 5957 field = field_var->val; 5958 5959 seq_printf(m, "\n %s[%d].val:\n", vars_name, i); 5960 if (field->field) 5961 seq_printf(m, " ftrace_event_field name: %s\n", 5962 field->field->name); 5963 else { 5964 ret = -EINVAL; 5965 goto out; 5966 } 5967 5968 seq_printf(m, " type: %s\n", field->type); 5969 seq_printf(m, " size: %u\n", field->size); 5970 seq_printf(m, " is_signed: %u\n", field->is_signed); 5971 out: 5972 return ret; 5973 } 5974 5975 static int hist_action_debug_show(struct seq_file *m, 5976 struct action_data *data, int i) 5977 { 5978 int ret = 0; 5979 5980 if (data->handler == HANDLER_ONMAX || 5981 data->handler == HANDLER_ONCHANGE) { 5982 seq_printf(m, "\n hist_data->actions[%d].track_data.var_ref:\n", i); 5983 ret = hist_field_debug_show(m, data->track_data.var_ref, 5984 HIST_FIELD_FL_VAR_REF); 5985 if (ret) 5986 goto out; 5987 5988 seq_printf(m, "\n hist_data->actions[%d].track_data.track_var:\n", i); 5989 ret = hist_field_debug_show(m, data->track_data.track_var, 5990 HIST_FIELD_FL_VAR); 5991 if (ret) 5992 goto out; 5993 } 5994 5995 if (data->handler == HANDLER_ONMATCH) { 5996 seq_printf(m, "\n hist_data->actions[%d].match_data.event_system: %s\n", 5997 i, data->match_data.event_system); 5998 seq_printf(m, " hist_data->actions[%d].match_data.event: %s\n", 5999 i, data->match_data.event); 6000 } 6001 out: 6002 return ret; 6003 } 6004 6005 static int hist_actions_debug_show(struct seq_file *m, 6006 struct hist_trigger_data *hist_data) 6007 { 6008 int i, ret = 0; 6009 6010 if (hist_data->n_actions) 6011 seq_puts(m, "\n action tracking variables (for onmax()/onchange()/onmatch()):\n"); 6012 6013 for (i = 0; i < hist_data->n_actions; i++) { 6014 struct action_data *action = hist_data->actions[i]; 6015 6016 ret = hist_action_debug_show(m, action, i); 6017 if (ret) 6018 goto out; 6019 } 6020 6021 if (hist_data->n_save_vars) 6022 seq_puts(m, "\n save action variables (save() params):\n"); 6023 6024 for (i = 0; i < hist_data->n_save_vars; i++) { 6025 ret = field_var_debug_show(m, hist_data->save_vars[i], i, true); 6026 if (ret) 6027 goto out; 6028 } 6029 out: 6030 return ret; 6031 } 6032 6033 static void hist_trigger_debug_show(struct seq_file *m, 6034 struct event_trigger_data *data, int n) 6035 { 6036 struct hist_trigger_data *hist_data; 6037 int i, ret; 6038 6039 if (n > 0) 6040 seq_puts(m, "\n\n"); 6041 6042 seq_puts(m, "# event histogram\n#\n# trigger info: "); 6043 data->cmd_ops->print(m, data); 6044 seq_puts(m, "#\n\n"); 6045 6046 hist_data = data->private_data; 6047 6048 seq_printf(m, "hist_data: %p\n\n", hist_data); 6049 seq_printf(m, " n_vals: %u\n", hist_data->n_vals); 6050 seq_printf(m, " n_keys: %u\n", hist_data->n_keys); 6051 seq_printf(m, " n_fields: %u\n", hist_data->n_fields); 6052 6053 seq_puts(m, "\n val fields:\n\n"); 6054 6055 seq_puts(m, " hist_data->fields[0]:\n"); 6056 ret = hist_field_debug_show(m, hist_data->fields[0], 6057 HIST_FIELD_FL_HITCOUNT); 6058 if (ret) 6059 return; 6060 6061 for (i = 1; i < hist_data->n_vals; i++) { 6062 seq_printf(m, "\n hist_data->fields[%d]:\n", i); 6063 ret = hist_field_debug_show(m, hist_data->fields[i], 0); 6064 if (ret) 6065 return; 6066 } 6067 6068 seq_puts(m, "\n key fields:\n"); 6069 6070 for (i = hist_data->n_vals; i < hist_data->n_fields; i++) { 6071 seq_printf(m, "\n hist_data->fields[%d]:\n", i); 6072 ret = hist_field_debug_show(m, hist_data->fields[i], 6073 HIST_FIELD_FL_KEY); 6074 if (ret) 6075 return; 6076 } 6077 6078 if (hist_data->n_var_refs) 6079 seq_puts(m, "\n variable reference fields:\n"); 6080 6081 for (i = 0; i < hist_data->n_var_refs; i++) { 6082 seq_printf(m, "\n hist_data->var_refs[%d]:\n", i); 6083 ret = hist_field_debug_show(m, hist_data->var_refs[i], 6084 HIST_FIELD_FL_VAR_REF); 6085 if (ret) 6086 return; 6087 } 6088 6089 if (hist_data->n_field_vars) 6090 seq_puts(m, "\n field variables:\n"); 6091 6092 for (i = 0; i < hist_data->n_field_vars; i++) { 6093 ret = field_var_debug_show(m, hist_data->field_vars[i], i, false); 6094 if (ret) 6095 return; 6096 } 6097 6098 ret = hist_actions_debug_show(m, hist_data); 6099 if (ret) 6100 return; 6101 } 6102 6103 static int hist_debug_show(struct seq_file *m, void *v) 6104 { 6105 struct event_trigger_data *data; 6106 struct trace_event_file *event_file; 6107 int n = 0; 6108 6109 guard(mutex)(&event_mutex); 6110 6111 event_file = event_file_file(m->private); 6112 if (unlikely(!event_file)) 6113 return -ENODEV; 6114 6115 list_for_each_entry(data, &event_file->triggers, list) { 6116 if (data->cmd_ops->trigger_type == ETT_EVENT_HIST) 6117 hist_trigger_debug_show(m, data, n++); 6118 } 6119 return 0; 6120 } 6121 6122 static int event_hist_debug_open(struct inode *inode, struct file *file) 6123 { 6124 int ret; 6125 6126 ret = tracing_open_file_tr(inode, file); 6127 if (ret) 6128 return ret; 6129 6130 /* Clear private_data to avoid warning in single_open() */ 6131 file->private_data = NULL; 6132 ret = single_open(file, hist_debug_show, file); 6133 if (ret) 6134 tracing_release_file_tr(inode, file); 6135 return ret; 6136 } 6137 6138 const struct file_operations event_hist_debug_fops = { 6139 .open = event_hist_debug_open, 6140 .read = seq_read, 6141 .llseek = seq_lseek, 6142 .release = tracing_single_release_file_tr, 6143 }; 6144 #endif 6145 6146 static void hist_field_print(struct seq_file *m, struct hist_field *hist_field) 6147 { 6148 const char *field_name = hist_field_name(hist_field, 0); 6149 6150 if (hist_field->var.name) 6151 seq_printf(m, "%s=", hist_field->var.name); 6152 6153 if (hist_field->flags & HIST_FIELD_FL_CPU) 6154 seq_puts(m, "common_cpu"); 6155 if (hist_field->flags & HIST_FIELD_FL_COMM) 6156 seq_puts(m, "common_comm"); 6157 else if (hist_field->flags & HIST_FIELD_FL_CONST) 6158 seq_printf(m, "%llu", hist_field->constant); 6159 else if (field_name) { 6160 if (hist_field->flags & HIST_FIELD_FL_VAR_REF || 6161 hist_field->flags & HIST_FIELD_FL_ALIAS) 6162 seq_putc(m, '$'); 6163 seq_printf(m, "%s", field_name); 6164 } else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP) 6165 seq_puts(m, "common_timestamp"); 6166 6167 if (hist_field->flags) { 6168 if (!(hist_field->flags & HIST_FIELD_FL_VAR_REF) && 6169 !(hist_field->flags & HIST_FIELD_FL_EXPR) && 6170 !(hist_field->flags & HIST_FIELD_FL_STACKTRACE)) { 6171 const char *flags = get_hist_field_flags(hist_field); 6172 6173 if (flags) 6174 seq_printf(m, ".%s", flags); 6175 } 6176 } 6177 if (hist_field->buckets) 6178 seq_printf(m, "=%ld", hist_field->buckets); 6179 } 6180 6181 static int event_hist_trigger_print(struct seq_file *m, 6182 struct event_trigger_data *data) 6183 { 6184 struct hist_trigger_data *hist_data = data->private_data; 6185 struct hist_field *field; 6186 bool have_var = false; 6187 bool show_val = false; 6188 unsigned int i; 6189 6190 seq_puts(m, HIST_PREFIX); 6191 6192 if (data->name) 6193 seq_printf(m, "%s:", data->name); 6194 6195 seq_puts(m, "keys="); 6196 6197 for_each_hist_key_field(i, hist_data) { 6198 field = hist_data->fields[i]; 6199 6200 if (i > hist_data->n_vals) 6201 seq_puts(m, ","); 6202 6203 if (field->flags & HIST_FIELD_FL_STACKTRACE) { 6204 if (field->field) 6205 seq_printf(m, "%s.stacktrace", field->field->name); 6206 else 6207 seq_puts(m, "common_stacktrace"); 6208 } else 6209 hist_field_print(m, field); 6210 } 6211 6212 seq_puts(m, ":vals="); 6213 6214 for_each_hist_val_field(i, hist_data) { 6215 field = hist_data->fields[i]; 6216 if (field->flags & HIST_FIELD_FL_VAR) { 6217 have_var = true; 6218 continue; 6219 } 6220 6221 if (i == HITCOUNT_IDX) { 6222 if (hist_data->attrs->no_hitcount) 6223 continue; 6224 seq_puts(m, "hitcount"); 6225 } else { 6226 if (show_val) 6227 seq_puts(m, ","); 6228 hist_field_print(m, field); 6229 } 6230 show_val = true; 6231 } 6232 6233 if (have_var) { 6234 unsigned int n = 0; 6235 6236 seq_puts(m, ":"); 6237 6238 for_each_hist_val_field(i, hist_data) { 6239 field = hist_data->fields[i]; 6240 6241 if (field->flags & HIST_FIELD_FL_VAR) { 6242 if (n++) 6243 seq_puts(m, ","); 6244 hist_field_print(m, field); 6245 } 6246 } 6247 } 6248 6249 seq_puts(m, ":sort="); 6250 6251 for (i = 0; i < hist_data->n_sort_keys; i++) { 6252 struct tracing_map_sort_key *sort_key; 6253 unsigned int idx, first_key_idx; 6254 6255 /* skip VAR vals */ 6256 first_key_idx = hist_data->n_vals - hist_data->n_vars; 6257 6258 sort_key = &hist_data->sort_keys[i]; 6259 idx = sort_key->field_idx; 6260 6261 if (WARN_ON(idx >= HIST_FIELDS_MAX)) 6262 return -EINVAL; 6263 6264 if (i > 0) 6265 seq_puts(m, ","); 6266 6267 if (idx == HITCOUNT_IDX) 6268 seq_puts(m, "hitcount"); 6269 else { 6270 if (idx >= first_key_idx) 6271 idx += hist_data->n_vars; 6272 hist_field_print(m, hist_data->fields[idx]); 6273 } 6274 6275 if (sort_key->descending) 6276 seq_puts(m, ".descending"); 6277 } 6278 seq_printf(m, ":size=%u", (1 << hist_data->map->map_bits)); 6279 if (hist_data->enable_timestamps) 6280 seq_printf(m, ":clock=%s", hist_data->attrs->clock); 6281 if (hist_data->attrs->no_hitcount) 6282 seq_puts(m, ":nohitcount"); 6283 6284 print_actions_spec(m, hist_data); 6285 6286 if (data->filter_str) 6287 seq_printf(m, " if %s", data->filter_str); 6288 6289 if (data->paused) 6290 seq_puts(m, " [paused]"); 6291 else 6292 seq_puts(m, " [active]"); 6293 6294 seq_putc(m, '\n'); 6295 6296 return 0; 6297 } 6298 6299 static int event_hist_trigger_init(struct event_trigger_data *data) 6300 { 6301 struct hist_trigger_data *hist_data = data->private_data; 6302 6303 if (alloc_hist_pad() < 0) 6304 return -ENOMEM; 6305 6306 if (!data->ref && hist_data->attrs->name) 6307 save_named_trigger(hist_data->attrs->name, data); 6308 6309 data->ref++; 6310 6311 return 0; 6312 } 6313 6314 static void unregister_field_var_hists(struct hist_trigger_data *hist_data) 6315 { 6316 struct trace_event_file *file; 6317 unsigned int i; 6318 char *cmd; 6319 int ret; 6320 6321 for (i = 0; i < hist_data->n_field_var_hists; i++) { 6322 file = hist_data->field_var_hists[i]->hist_data->event_file; 6323 cmd = hist_data->field_var_hists[i]->cmd; 6324 ret = event_hist_trigger_parse(&trigger_hist_cmd, file, 6325 "!hist", "hist", cmd); 6326 WARN_ON_ONCE(ret < 0); 6327 } 6328 } 6329 6330 static void event_hist_trigger_free(struct event_trigger_data *data) 6331 { 6332 struct hist_trigger_data *hist_data = data->private_data; 6333 6334 if (WARN_ON_ONCE(data->ref <= 0)) 6335 return; 6336 6337 data->ref--; 6338 if (!data->ref) { 6339 if (data->name) 6340 del_named_trigger(data); 6341 6342 trigger_data_free(data); 6343 6344 remove_hist_vars(hist_data); 6345 6346 unregister_field_var_hists(hist_data); 6347 6348 destroy_hist_data(hist_data); 6349 } 6350 free_hist_pad(); 6351 } 6352 6353 static int event_hist_trigger_named_init(struct event_trigger_data *data) 6354 { 6355 int ret; 6356 6357 data->ref++; 6358 6359 save_named_trigger(data->named_data->name, data); 6360 6361 ret = event_hist_trigger_init(data->named_data); 6362 if (ret < 0) { 6363 kfree(data->cmd_ops); 6364 data->cmd_ops = &trigger_hist_cmd; 6365 } 6366 6367 return ret; 6368 } 6369 6370 static void event_hist_trigger_named_free(struct event_trigger_data *data) 6371 { 6372 if (WARN_ON_ONCE(data->ref <= 0)) 6373 return; 6374 6375 event_hist_trigger_free(data->named_data); 6376 6377 data->ref--; 6378 if (!data->ref) { 6379 struct event_command *cmd_ops = data->cmd_ops; 6380 6381 del_named_trigger(data); 6382 trigger_data_free(data); 6383 kfree(cmd_ops); 6384 } 6385 } 6386 6387 static void hist_clear(struct event_trigger_data *data) 6388 { 6389 struct hist_trigger_data *hist_data = data->private_data; 6390 6391 if (data->name) 6392 pause_named_trigger(data); 6393 6394 tracepoint_synchronize_unregister(); 6395 6396 tracing_map_clear(hist_data->map); 6397 6398 if (data->name) 6399 unpause_named_trigger(data); 6400 } 6401 6402 static bool compatible_field(struct ftrace_event_field *field, 6403 struct ftrace_event_field *test_field) 6404 { 6405 if (field == test_field) 6406 return true; 6407 if (field == NULL || test_field == NULL) 6408 return false; 6409 if (strcmp(field->name, test_field->name) != 0) 6410 return false; 6411 if (strcmp(field->type, test_field->type) != 0) 6412 return false; 6413 if (field->size != test_field->size) 6414 return false; 6415 if (field->is_signed != test_field->is_signed) 6416 return false; 6417 6418 return true; 6419 } 6420 6421 static bool hist_trigger_match(struct event_trigger_data *data, 6422 struct event_trigger_data *data_test, 6423 struct event_trigger_data *named_data, 6424 bool ignore_filter) 6425 { 6426 struct tracing_map_sort_key *sort_key, *sort_key_test; 6427 struct hist_trigger_data *hist_data, *hist_data_test; 6428 struct hist_field *key_field, *key_field_test; 6429 unsigned int i; 6430 6431 if (named_data && (named_data != data_test) && 6432 (named_data != data_test->named_data)) 6433 return false; 6434 6435 if (!named_data && is_named_trigger(data_test)) 6436 return false; 6437 6438 hist_data = data->private_data; 6439 hist_data_test = data_test->private_data; 6440 6441 if (hist_data->n_vals != hist_data_test->n_vals || 6442 hist_data->n_fields != hist_data_test->n_fields || 6443 hist_data->n_sort_keys != hist_data_test->n_sort_keys) 6444 return false; 6445 6446 if (!ignore_filter) { 6447 if ((data->filter_str && !data_test->filter_str) || 6448 (!data->filter_str && data_test->filter_str)) 6449 return false; 6450 } 6451 6452 for_each_hist_field(i, hist_data) { 6453 key_field = hist_data->fields[i]; 6454 key_field_test = hist_data_test->fields[i]; 6455 6456 if (key_field->flags != key_field_test->flags) 6457 return false; 6458 if (!compatible_field(key_field->field, key_field_test->field)) 6459 return false; 6460 if (key_field->offset != key_field_test->offset) 6461 return false; 6462 if (key_field->size != key_field_test->size) 6463 return false; 6464 if (key_field->is_signed != key_field_test->is_signed) 6465 return false; 6466 if (!!key_field->var.name != !!key_field_test->var.name) 6467 return false; 6468 if (key_field->var.name && 6469 strcmp(key_field->var.name, key_field_test->var.name) != 0) 6470 return false; 6471 } 6472 6473 for (i = 0; i < hist_data->n_sort_keys; i++) { 6474 sort_key = &hist_data->sort_keys[i]; 6475 sort_key_test = &hist_data_test->sort_keys[i]; 6476 6477 if (sort_key->field_idx != sort_key_test->field_idx || 6478 sort_key->descending != sort_key_test->descending) 6479 return false; 6480 } 6481 6482 if (!ignore_filter && data->filter_str && 6483 (strcmp(data->filter_str, data_test->filter_str) != 0)) 6484 return false; 6485 6486 if (!actions_match(hist_data, hist_data_test)) 6487 return false; 6488 6489 return true; 6490 } 6491 6492 static bool existing_hist_update_only(char *glob, 6493 struct event_trigger_data *data, 6494 struct trace_event_file *file) 6495 { 6496 struct hist_trigger_data *hist_data = data->private_data; 6497 struct event_trigger_data *test, *named_data = NULL; 6498 bool updated = false; 6499 6500 if (!hist_data->attrs->pause && !hist_data->attrs->cont && 6501 !hist_data->attrs->clear) 6502 goto out; 6503 6504 if (hist_data->attrs->name) { 6505 named_data = find_named_trigger(hist_data->attrs->name); 6506 if (named_data) { 6507 if (!hist_trigger_match(data, named_data, named_data, 6508 true)) 6509 goto out; 6510 } 6511 } 6512 6513 if (hist_data->attrs->name && !named_data) 6514 goto out; 6515 6516 list_for_each_entry(test, &file->triggers, list) { 6517 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 6518 if (!hist_trigger_match(data, test, named_data, false)) 6519 continue; 6520 if (hist_data->attrs->pause) 6521 test->paused = true; 6522 else if (hist_data->attrs->cont) 6523 test->paused = false; 6524 else if (hist_data->attrs->clear) 6525 hist_clear(test); 6526 updated = true; 6527 goto out; 6528 } 6529 } 6530 out: 6531 return updated; 6532 } 6533 6534 /* 6535 * Set or disable using the per CPU trace_buffer_event when possible. 6536 */ 6537 static int tracing_set_filter_buffering(struct trace_array *tr, bool set) 6538 { 6539 guard(mutex)(&trace_types_lock); 6540 6541 if (set && tr->no_filter_buffering_ref++) 6542 return 0; 6543 6544 if (!set) { 6545 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) 6546 return -EINVAL; 6547 6548 --tr->no_filter_buffering_ref; 6549 } 6550 6551 return 0; 6552 } 6553 6554 static int hist_register_trigger(char *glob, 6555 struct event_trigger_data *data, 6556 struct trace_event_file *file) 6557 { 6558 struct hist_trigger_data *hist_data = data->private_data; 6559 struct event_trigger_data *test, *named_data = NULL; 6560 struct trace_array *tr = file->tr; 6561 int ret = 0; 6562 6563 if (hist_data->attrs->name) { 6564 named_data = find_named_trigger(hist_data->attrs->name); 6565 if (named_data) { 6566 if (!hist_trigger_match(data, named_data, named_data, 6567 true)) { 6568 hist_err(tr, HIST_ERR_NAMED_MISMATCH, errpos(hist_data->attrs->name)); 6569 ret = -EINVAL; 6570 goto out; 6571 } 6572 } 6573 } 6574 6575 if (hist_data->attrs->name && !named_data) 6576 goto new; 6577 6578 lockdep_assert_held(&event_mutex); 6579 6580 list_for_each_entry(test, &file->triggers, list) { 6581 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 6582 if (hist_trigger_match(data, test, named_data, false)) { 6583 hist_err(tr, HIST_ERR_TRIGGER_EEXIST, 0); 6584 ret = -EEXIST; 6585 goto out; 6586 } 6587 } 6588 } 6589 new: 6590 if (hist_data->attrs->cont || hist_data->attrs->clear) { 6591 hist_err(tr, HIST_ERR_TRIGGER_ENOENT_CLEAR, 0); 6592 ret = -ENOENT; 6593 goto out; 6594 } 6595 6596 if (hist_data->attrs->pause) 6597 data->paused = true; 6598 6599 if (named_data) { 6600 struct event_command *cmd_ops; 6601 6602 data->private_data = named_data->private_data; 6603 set_named_trigger_data(data, named_data); 6604 /* Copy the command ops and update some of the functions */ 6605 cmd_ops = kmalloc(sizeof(*cmd_ops), GFP_KERNEL); 6606 if (!cmd_ops) { 6607 ret = -ENOMEM; 6608 goto out; 6609 } 6610 *cmd_ops = *data->cmd_ops; 6611 cmd_ops->init = event_hist_trigger_named_init; 6612 cmd_ops->free = event_hist_trigger_named_free; 6613 data->cmd_ops = cmd_ops; 6614 } 6615 6616 if (data->cmd_ops->init) { 6617 ret = data->cmd_ops->init(data); 6618 if (ret < 0) 6619 goto out; 6620 } 6621 6622 if (hist_data->enable_timestamps) { 6623 char *clock = hist_data->attrs->clock; 6624 6625 ret = tracing_set_clock(file->tr, hist_data->attrs->clock); 6626 if (ret) { 6627 hist_err(tr, HIST_ERR_SET_CLOCK_FAIL, errpos(clock)); 6628 goto out; 6629 } 6630 6631 tracing_set_filter_buffering(file->tr, true); 6632 } 6633 6634 if (named_data) 6635 destroy_hist_data(hist_data); 6636 out: 6637 return ret; 6638 } 6639 6640 static int hist_trigger_enable(struct event_trigger_data *data, 6641 struct trace_event_file *file) 6642 { 6643 int ret = 0; 6644 6645 list_add_tail_rcu(&data->list, &file->triggers); 6646 6647 update_cond_flag(file); 6648 6649 if (trace_event_trigger_enable_disable(file, 1) < 0) { 6650 list_del_rcu(&data->list); 6651 update_cond_flag(file); 6652 ret--; 6653 } 6654 6655 return ret; 6656 } 6657 6658 static bool have_hist_trigger_match(struct event_trigger_data *data, 6659 struct trace_event_file *file) 6660 { 6661 struct hist_trigger_data *hist_data = data->private_data; 6662 struct event_trigger_data *test, *named_data = NULL; 6663 bool match = false; 6664 6665 lockdep_assert_held(&event_mutex); 6666 6667 if (hist_data->attrs->name) 6668 named_data = find_named_trigger(hist_data->attrs->name); 6669 6670 list_for_each_entry(test, &file->triggers, list) { 6671 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 6672 if (hist_trigger_match(data, test, named_data, false)) { 6673 match = true; 6674 break; 6675 } 6676 } 6677 } 6678 6679 return match; 6680 } 6681 6682 static bool hist_trigger_check_refs(struct event_trigger_data *data, 6683 struct trace_event_file *file) 6684 { 6685 struct hist_trigger_data *hist_data = data->private_data; 6686 struct event_trigger_data *test, *named_data = NULL; 6687 6688 lockdep_assert_held(&event_mutex); 6689 6690 if (hist_data->attrs->name) 6691 named_data = find_named_trigger(hist_data->attrs->name); 6692 6693 list_for_each_entry(test, &file->triggers, list) { 6694 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 6695 if (!hist_trigger_match(data, test, named_data, false)) 6696 continue; 6697 hist_data = test->private_data; 6698 if (check_var_refs(hist_data)) 6699 return true; 6700 break; 6701 } 6702 } 6703 6704 return false; 6705 } 6706 6707 static void hist_unregister_trigger(char *glob, 6708 struct event_trigger_data *data, 6709 struct trace_event_file *file) 6710 { 6711 struct event_trigger_data *test = NULL, *iter, *named_data = NULL; 6712 struct hist_trigger_data *hist_data = data->private_data; 6713 6714 lockdep_assert_held(&event_mutex); 6715 6716 if (hist_data->attrs->name) 6717 named_data = find_named_trigger(hist_data->attrs->name); 6718 6719 list_for_each_entry(iter, &file->triggers, list) { 6720 if (iter->cmd_ops->trigger_type == ETT_EVENT_HIST) { 6721 if (!hist_trigger_match(data, iter, named_data, false)) 6722 continue; 6723 test = iter; 6724 list_del_rcu(&test->list); 6725 trace_event_trigger_enable_disable(file, 0); 6726 update_cond_flag(file); 6727 break; 6728 } 6729 } 6730 6731 if (test && test->cmd_ops->free) 6732 test->cmd_ops->free(test); 6733 6734 if (hist_data->enable_timestamps) { 6735 if (!hist_data->remove || test) 6736 tracing_set_filter_buffering(file->tr, false); 6737 } 6738 } 6739 6740 static bool hist_file_check_refs(struct trace_event_file *file) 6741 { 6742 struct hist_trigger_data *hist_data; 6743 struct event_trigger_data *test; 6744 6745 lockdep_assert_held(&event_mutex); 6746 6747 list_for_each_entry(test, &file->triggers, list) { 6748 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 6749 hist_data = test->private_data; 6750 if (check_var_refs(hist_data)) 6751 return true; 6752 } 6753 } 6754 6755 return false; 6756 } 6757 6758 static void hist_unreg_all(struct trace_event_file *file) 6759 { 6760 struct event_trigger_data *test, *n; 6761 struct hist_trigger_data *hist_data; 6762 struct synth_event *se; 6763 const char *se_name; 6764 6765 lockdep_assert_held(&event_mutex); 6766 6767 if (hist_file_check_refs(file)) 6768 return; 6769 6770 list_for_each_entry_safe(test, n, &file->triggers, list) { 6771 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 6772 hist_data = test->private_data; 6773 list_del_rcu(&test->list); 6774 trace_event_trigger_enable_disable(file, 0); 6775 6776 se_name = trace_event_name(file->event_call); 6777 se = find_synth_event(se_name); 6778 if (se) 6779 se->ref--; 6780 6781 update_cond_flag(file); 6782 if (hist_data->enable_timestamps) 6783 tracing_set_filter_buffering(file->tr, false); 6784 if (test->cmd_ops->free) 6785 test->cmd_ops->free(test); 6786 } 6787 } 6788 } 6789 6790 static int event_hist_trigger_parse(struct event_command *cmd_ops, 6791 struct trace_event_file *file, 6792 char *glob, char *cmd, 6793 char *param_and_filter) 6794 { 6795 unsigned int hist_trigger_bits = TRACING_MAP_BITS_DEFAULT; 6796 struct event_trigger_data *trigger_data; 6797 struct hist_trigger_attrs *attrs; 6798 struct hist_trigger_data *hist_data; 6799 char *param, *filter, *p, *start; 6800 struct synth_event *se; 6801 const char *se_name; 6802 bool remove; 6803 int ret = 0; 6804 6805 lockdep_assert_held(&event_mutex); 6806 6807 if (WARN_ON(!glob)) 6808 return -EINVAL; 6809 6810 if (glob[0]) { 6811 hist_err_clear(); 6812 last_cmd_set(file, param_and_filter); 6813 } 6814 6815 remove = event_trigger_check_remove(glob); 6816 6817 if (event_trigger_empty_param(param_and_filter)) 6818 return -EINVAL; 6819 6820 /* 6821 * separate the trigger from the filter (k:v [if filter]) 6822 * allowing for whitespace in the trigger 6823 */ 6824 p = param = param_and_filter; 6825 do { 6826 p = strstr(p, "if"); 6827 if (!p) 6828 break; 6829 if (p == param_and_filter) 6830 return -EINVAL; 6831 if (*(p - 1) != ' ' && *(p - 1) != '\t') { 6832 p++; 6833 continue; 6834 } 6835 if (p >= param_and_filter + strlen(param_and_filter) - (sizeof("if") - 1) - 1) 6836 return -EINVAL; 6837 if (*(p + sizeof("if") - 1) != ' ' && *(p + sizeof("if") - 1) != '\t') { 6838 p++; 6839 continue; 6840 } 6841 break; 6842 } while (1); 6843 6844 if (!p) 6845 filter = NULL; 6846 else { 6847 *(p - 1) = '\0'; 6848 filter = strstrip(p); 6849 param = strstrip(param); 6850 } 6851 6852 /* 6853 * To simplify arithmetic expression parsing, replace occurrences of 6854 * '.sym-offset' modifier with '.symXoffset' 6855 */ 6856 start = strstr(param, ".sym-offset"); 6857 while (start) { 6858 *(start + 4) = 'X'; 6859 start = strstr(start + 11, ".sym-offset"); 6860 } 6861 6862 attrs = parse_hist_trigger_attrs(file->tr, param); 6863 if (IS_ERR(attrs)) 6864 return PTR_ERR(attrs); 6865 6866 if (attrs->map_bits) 6867 hist_trigger_bits = attrs->map_bits; 6868 6869 hist_data = create_hist_data(hist_trigger_bits, attrs, file, remove); 6870 if (IS_ERR(hist_data)) { 6871 destroy_hist_trigger_attrs(attrs); 6872 return PTR_ERR(hist_data); 6873 } 6874 6875 trigger_data = trigger_data_alloc(cmd_ops, cmd, param, hist_data); 6876 if (!trigger_data) { 6877 ret = -ENOMEM; 6878 goto out_free; 6879 } 6880 6881 ret = event_trigger_set_filter(cmd_ops, file, filter, trigger_data); 6882 if (ret < 0) 6883 goto out_free; 6884 6885 if (remove) { 6886 if (!have_hist_trigger_match(trigger_data, file)) 6887 goto out_free; 6888 6889 if (hist_trigger_check_refs(trigger_data, file)) { 6890 ret = -EBUSY; 6891 goto out_free; 6892 } 6893 6894 event_trigger_unregister(cmd_ops, file, glob+1, trigger_data); 6895 se_name = trace_event_name(file->event_call); 6896 se = find_synth_event(se_name); 6897 if (se) 6898 se->ref--; 6899 ret = 0; 6900 goto out_free; 6901 } 6902 6903 if (existing_hist_update_only(glob, trigger_data, file)) 6904 goto out_free; 6905 6906 if (!get_named_trigger_data(trigger_data)) { 6907 6908 ret = create_actions(hist_data); 6909 if (ret) 6910 goto out_free; 6911 6912 if (has_hist_vars(hist_data) || hist_data->n_var_refs) { 6913 ret = save_hist_vars(hist_data); 6914 if (ret) 6915 goto out_free; 6916 } 6917 6918 ret = tracing_map_init(hist_data->map); 6919 if (ret) 6920 goto out_free; 6921 } 6922 6923 ret = event_trigger_register(cmd_ops, file, glob, trigger_data); 6924 if (ret < 0) 6925 goto out_free; 6926 6927 ret = hist_trigger_enable(trigger_data, file); 6928 if (ret) 6929 goto out_unreg; 6930 6931 se_name = trace_event_name(file->event_call); 6932 se = find_synth_event(se_name); 6933 if (se) 6934 se->ref++; 6935 out: 6936 if (ret == 0 && glob[0]) 6937 hist_err_clear(); 6938 6939 return ret; 6940 out_unreg: 6941 event_trigger_unregister(cmd_ops, file, glob+1, trigger_data); 6942 out_free: 6943 remove_hist_vars(hist_data); 6944 6945 trigger_data_free(trigger_data); 6946 6947 destroy_hist_data(hist_data); 6948 goto out; 6949 } 6950 6951 static struct event_command trigger_hist_cmd = { 6952 .name = "hist", 6953 .trigger_type = ETT_EVENT_HIST, 6954 .flags = EVENT_CMD_FL_NEEDS_REC, 6955 .parse = event_hist_trigger_parse, 6956 .reg = hist_register_trigger, 6957 .unreg = hist_unregister_trigger, 6958 .unreg_all = hist_unreg_all, 6959 .set_filter = set_trigger_filter, 6960 .trigger = event_hist_trigger, 6961 .print = event_hist_trigger_print, 6962 .init = event_hist_trigger_init, 6963 .free = event_hist_trigger_free, 6964 }; 6965 6966 __init int register_trigger_hist_cmd(void) 6967 { 6968 int ret; 6969 6970 ret = register_event_command(&trigger_hist_cmd); 6971 WARN_ON(ret < 0); 6972 6973 return ret; 6974 } 6975 6976 static void 6977 hist_enable_trigger(struct event_trigger_data *data, 6978 struct trace_buffer *buffer, void *rec, 6979 struct ring_buffer_event *event) 6980 { 6981 struct enable_trigger_data *enable_data = data->private_data; 6982 struct event_trigger_data *test; 6983 6984 list_for_each_entry_rcu(test, &enable_data->file->triggers, list, 6985 lockdep_is_held(&event_mutex)) { 6986 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 6987 if (enable_data->enable) 6988 test->paused = false; 6989 else 6990 test->paused = true; 6991 } 6992 } 6993 } 6994 6995 static void hist_enable_unreg_all(struct trace_event_file *file) 6996 { 6997 struct event_trigger_data *test, *n; 6998 6999 list_for_each_entry_safe(test, n, &file->triggers, list) { 7000 if (test->cmd_ops->trigger_type == ETT_HIST_ENABLE) { 7001 list_del_rcu(&test->list); 7002 update_cond_flag(file); 7003 trace_event_trigger_enable_disable(file, 0); 7004 if (test->cmd_ops->free) 7005 test->cmd_ops->free(test); 7006 } 7007 } 7008 } 7009 7010 static struct event_command trigger_hist_enable_cmd = { 7011 .name = ENABLE_HIST_STR, 7012 .trigger_type = ETT_HIST_ENABLE, 7013 .parse = event_enable_trigger_parse, 7014 .reg = event_enable_register_trigger, 7015 .unreg = event_enable_unregister_trigger, 7016 .unreg_all = hist_enable_unreg_all, 7017 .set_filter = set_trigger_filter, 7018 .trigger = hist_enable_trigger, 7019 .count_func = event_trigger_count, 7020 .print = event_enable_trigger_print, 7021 .init = event_trigger_init, 7022 .free = event_enable_trigger_free, 7023 }; 7024 7025 static struct event_command trigger_hist_disable_cmd = { 7026 .name = DISABLE_HIST_STR, 7027 .trigger_type = ETT_HIST_ENABLE, 7028 .parse = event_enable_trigger_parse, 7029 .reg = event_enable_register_trigger, 7030 .unreg = event_enable_unregister_trigger, 7031 .unreg_all = hist_enable_unreg_all, 7032 .set_filter = set_trigger_filter, 7033 .trigger = hist_enable_trigger, 7034 .count_func = event_trigger_count, 7035 .print = event_enable_trigger_print, 7036 .init = event_trigger_init, 7037 .free = event_enable_trigger_free, 7038 }; 7039 7040 static __init void unregister_trigger_hist_enable_disable_cmds(void) 7041 { 7042 unregister_event_command(&trigger_hist_enable_cmd); 7043 unregister_event_command(&trigger_hist_disable_cmd); 7044 } 7045 7046 __init int register_trigger_hist_enable_disable_cmds(void) 7047 { 7048 int ret; 7049 7050 ret = register_event_command(&trigger_hist_enable_cmd); 7051 if (WARN_ON(ret < 0)) 7052 return ret; 7053 ret = register_event_command(&trigger_hist_disable_cmd); 7054 if (WARN_ON(ret < 0)) 7055 unregister_trigger_hist_enable_disable_cmds(); 7056 7057 return ret; 7058 } 7059