1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * trace_boot.c 4 * Tracing kernel boot-time 5 */ 6 7 #define pr_fmt(fmt) "trace_boot: " fmt 8 9 #include <linux/bootconfig.h> 10 #include <linux/cpumask.h> 11 #include <linux/ftrace.h> 12 #include <linux/init.h> 13 #include <linux/kernel.h> 14 #include <linux/mutex.h> 15 #include <linux/string.h> 16 #include <linux/slab.h> 17 #include <linux/trace.h> 18 #include <linux/trace_events.h> 19 20 #include "trace.h" 21 22 #define MAX_BUF_LEN 256 23 24 static void __init 25 trace_boot_set_instance_options(struct trace_array *tr, struct xbc_node *node) 26 { 27 struct xbc_node *anode; 28 const char *p; 29 char buf[MAX_BUF_LEN]; 30 unsigned long v = 0; 31 32 /* Common ftrace options */ 33 xbc_node_for_each_array_value(node, "options", anode, p) { 34 if (strscpy(buf, p, ARRAY_SIZE(buf)) < 0) { 35 pr_err("String is too long: %s\n", p); 36 continue; 37 } 38 39 if (trace_set_options(tr, buf) < 0) 40 pr_err("Failed to set option: %s\n", buf); 41 } 42 43 p = xbc_node_find_value(node, "tracing_on", NULL); 44 if (p && *p != '\0') { 45 if (kstrtoul(p, 10, &v)) 46 pr_err("Failed to set tracing on: %s\n", p); 47 if (v) 48 tracer_tracing_on(tr); 49 else 50 tracer_tracing_off(tr); 51 } 52 53 p = xbc_node_find_value(node, "trace_clock", NULL); 54 if (p && *p != '\0') { 55 if (tracing_set_clock(tr, p) < 0) 56 pr_err("Failed to set trace clock: %s\n", p); 57 } 58 59 p = xbc_node_find_value(node, "buffer_size", NULL); 60 if (p && *p != '\0') { 61 v = memparse(p, NULL); 62 if (v < PAGE_SIZE) 63 pr_err("Buffer size is too small: %s\n", p); 64 if (trace_array_is_readonly(tr) || 65 tracing_resize_ring_buffer(tr, v, RING_BUFFER_ALL_CPUS) < 0) 66 pr_err("Failed to resize trace buffer to %s\n", p); 67 } 68 69 p = xbc_node_find_value(node, "cpumask", NULL); 70 if (p && *p != '\0') { 71 cpumask_var_t new_mask; 72 73 if (alloc_cpumask_var(&new_mask, GFP_KERNEL)) { 74 if (cpumask_parse(p, new_mask) < 0 || 75 tracing_set_cpumask(tr, new_mask) < 0) 76 pr_err("Failed to set new CPU mask %s\n", p); 77 free_cpumask_var(new_mask); 78 } 79 } 80 } 81 82 #ifdef CONFIG_EVENT_TRACING 83 static void __init 84 trace_boot_enable_events(struct trace_array *tr, struct xbc_node *node) 85 { 86 struct xbc_node *anode; 87 char buf[MAX_BUF_LEN]; 88 const char *p; 89 90 xbc_node_for_each_array_value(node, "events", anode, p) { 91 if (strscpy(buf, p, ARRAY_SIZE(buf)) < 0) { 92 pr_err("String is too long: %s\n", p); 93 continue; 94 } 95 96 if (ftrace_set_clr_event(tr, buf, 1) < 0) 97 pr_err("Failed to enable event: %s\n", p); 98 } 99 } 100 101 #ifdef CONFIG_KPROBE_EVENTS 102 static int __init 103 trace_boot_add_kprobe_event(struct xbc_node *node, const char *event) 104 { 105 struct dynevent_cmd cmd; 106 struct xbc_node *anode; 107 char buf[MAX_BUF_LEN]; 108 const char *val; 109 int ret = 0; 110 111 xbc_node_for_each_array_value(node, "probes", anode, val) { 112 kprobe_event_cmd_init(&cmd, buf, MAX_BUF_LEN); 113 114 ret = kprobe_event_gen_cmd_start(&cmd, event, val); 115 if (ret) { 116 pr_err("Failed to generate probe: %s\n", buf); 117 break; 118 } 119 120 ret = kprobe_event_gen_cmd_end(&cmd); 121 if (ret) { 122 pr_err("Failed to add probe: %s\n", buf); 123 break; 124 } 125 } 126 127 return ret; 128 } 129 #else 130 static inline int __init 131 trace_boot_add_kprobe_event(struct xbc_node *node, const char *event) 132 { 133 pr_err("Kprobe event is not supported.\n"); 134 return -ENOTSUPP; 135 } 136 #endif 137 138 #ifdef CONFIG_SYNTH_EVENTS 139 static int __init 140 trace_boot_add_synth_event(struct xbc_node *node, const char *event) 141 { 142 struct dynevent_cmd cmd; 143 struct xbc_node *anode; 144 char buf[MAX_BUF_LEN]; 145 const char *p; 146 int ret; 147 148 synth_event_cmd_init(&cmd, buf, MAX_BUF_LEN); 149 150 ret = synth_event_gen_cmd_start(&cmd, event, NULL); 151 if (ret) 152 return ret; 153 154 xbc_node_for_each_array_value(node, "fields", anode, p) { 155 ret = synth_event_add_field_str(&cmd, p); 156 if (ret) 157 return ret; 158 } 159 160 ret = synth_event_gen_cmd_end(&cmd); 161 if (ret < 0) 162 pr_err("Failed to add synthetic event: %s\n", buf); 163 164 return ret; 165 } 166 #else 167 static inline int __init 168 trace_boot_add_synth_event(struct xbc_node *node, const char *event) 169 { 170 pr_err("Synthetic event is not supported.\n"); 171 return -ENOTSUPP; 172 } 173 #endif 174 175 #ifdef CONFIG_HIST_TRIGGERS 176 static int __init __printf(3, 4) 177 append_printf(char **bufp, char *end, const char *fmt, ...) 178 { 179 va_list args; 180 int ret; 181 182 if (*bufp == end) 183 return -ENOSPC; 184 185 va_start(args, fmt); 186 ret = vsnprintf(*bufp, end - *bufp, fmt, args); 187 if (ret < end - *bufp) { 188 *bufp += ret; 189 } else { 190 *bufp = end; 191 ret = -ERANGE; 192 } 193 va_end(args); 194 195 return ret; 196 } 197 198 static int __init 199 append_str_nospace(char **bufp, char *end, const char *str) 200 { 201 char *p = *bufp; 202 int len; 203 204 while (p < end - 1 && *str != '\0') { 205 if (!isspace(*str)) 206 *(p++) = *str; 207 str++; 208 } 209 *p = '\0'; 210 if (p == end - 1) { 211 *bufp = end; 212 return -ENOSPC; 213 } 214 len = p - *bufp; 215 *bufp = p; 216 return (int)len; 217 } 218 219 static int __init 220 trace_boot_hist_add_array(struct xbc_node *hnode, char **bufp, 221 char *end, const char *key) 222 { 223 struct xbc_node *anode; 224 const char *p; 225 char sep; 226 227 p = xbc_node_find_value(hnode, key, &anode); 228 if (p) { 229 if (!anode) { 230 pr_err("hist.%s requires value(s).\n", key); 231 return -EINVAL; 232 } 233 234 append_printf(bufp, end, ":%s", key); 235 sep = '='; 236 xbc_array_for_each_value(anode, p) { 237 append_printf(bufp, end, "%c%s", sep, p); 238 if (sep == '=') 239 sep = ','; 240 } 241 } else 242 return -ENOENT; 243 244 return 0; 245 } 246 247 static int __init 248 trace_boot_hist_add_one_handler(struct xbc_node *hnode, char **bufp, 249 char *end, const char *handler, 250 const char *param) 251 { 252 struct xbc_node *knode, *anode; 253 const char *p; 254 char sep; 255 256 /* Compose 'handler' parameter */ 257 p = xbc_node_find_value(hnode, param, NULL); 258 if (!p) { 259 pr_err("hist.%s requires '%s' option.\n", 260 xbc_node_get_data(hnode), param); 261 return -EINVAL; 262 } 263 append_printf(bufp, end, ":%s(%s)", handler, p); 264 265 /* Compose 'action' parameter */ 266 knode = xbc_node_find_subkey(hnode, "trace"); 267 if (!knode) 268 knode = xbc_node_find_subkey(hnode, "save"); 269 270 if (knode) { 271 anode = xbc_node_get_child(knode); 272 if (!anode || !xbc_node_is_value(anode)) { 273 pr_err("hist.%s.%s requires value(s).\n", 274 xbc_node_get_data(hnode), 275 xbc_node_get_data(knode)); 276 return -EINVAL; 277 } 278 279 append_printf(bufp, end, ".%s", xbc_node_get_data(knode)); 280 sep = '('; 281 xbc_array_for_each_value(anode, p) { 282 append_printf(bufp, end, "%c%s", sep, p); 283 if (sep == '(') 284 sep = ','; 285 } 286 append_printf(bufp, end, ")"); 287 } else if (xbc_node_find_subkey(hnode, "snapshot")) { 288 append_printf(bufp, end, ".snapshot()"); 289 } else { 290 pr_err("hist.%s requires an action.\n", 291 xbc_node_get_data(hnode)); 292 return -EINVAL; 293 } 294 295 return 0; 296 } 297 298 static int __init 299 trace_boot_hist_add_handlers(struct xbc_node *hnode, char **bufp, 300 char *end, const char *param) 301 { 302 struct xbc_node *node; 303 const char *p, *handler; 304 int ret = 0; 305 306 handler = xbc_node_get_data(hnode); 307 308 xbc_node_for_each_subkey(hnode, node) { 309 p = xbc_node_get_data(node); 310 if (!isdigit(p[0])) 311 continue; 312 /* All digit started node should be instances. */ 313 ret = trace_boot_hist_add_one_handler(node, bufp, end, handler, param); 314 if (ret < 0) 315 break; 316 } 317 318 if (xbc_node_find_subkey(hnode, param)) 319 ret = trace_boot_hist_add_one_handler(hnode, bufp, end, handler, param); 320 321 return ret; 322 } 323 324 /* 325 * Histogram boottime tracing syntax. 326 * 327 * ftrace.[instance.INSTANCE.]event.GROUP.EVENT.hist[.N] { 328 * keys = <KEY>[,...] 329 * values = <VAL>[,...] 330 * sort = <SORT-KEY>[,...] 331 * size = <ENTRIES> 332 * name = <HISTNAME> 333 * var { <VAR> = <EXPR> ... } 334 * pause|continue|clear 335 * onmax|onchange[.N] { var = <VAR>; <ACTION> [= <PARAM>] } 336 * onmatch[.N] { event = <EVENT>; <ACTION> [= <PARAM>] } 337 * filter = <FILTER> 338 * } 339 * 340 * Where <ACTION> are; 341 * 342 * trace = <EVENT>, <ARG1>[, ...] 343 * save = <ARG1>[, ...] 344 * snapshot 345 */ 346 static int __init 347 trace_boot_compose_hist_cmd(struct xbc_node *hnode, char *buf, size_t size) 348 { 349 struct xbc_node *node, *knode; 350 char *end = buf + size; 351 const char *p; 352 int ret = 0; 353 354 append_printf(&buf, end, "hist"); 355 356 ret = trace_boot_hist_add_array(hnode, &buf, end, "keys"); 357 if (ret < 0) { 358 if (ret == -ENOENT) 359 pr_err("hist requires keys.\n"); 360 return -EINVAL; 361 } 362 363 ret = trace_boot_hist_add_array(hnode, &buf, end, "values"); 364 if (ret == -EINVAL) 365 return ret; 366 ret = trace_boot_hist_add_array(hnode, &buf, end, "sort"); 367 if (ret == -EINVAL) 368 return ret; 369 370 p = xbc_node_find_value(hnode, "size", NULL); 371 if (p) 372 append_printf(&buf, end, ":size=%s", p); 373 374 p = xbc_node_find_value(hnode, "name", NULL); 375 if (p) 376 append_printf(&buf, end, ":name=%s", p); 377 378 node = xbc_node_find_subkey(hnode, "var"); 379 if (node) { 380 xbc_node_for_each_key_value(node, knode, p) { 381 /* Expression must not include spaces. */ 382 append_printf(&buf, end, ":%s=", 383 xbc_node_get_data(knode)); 384 append_str_nospace(&buf, end, p); 385 } 386 } 387 388 /* Histogram control attributes (mutual exclusive) */ 389 if (xbc_node_find_value(hnode, "pause", NULL)) 390 append_printf(&buf, end, ":pause"); 391 else if (xbc_node_find_value(hnode, "continue", NULL)) 392 append_printf(&buf, end, ":continue"); 393 else if (xbc_node_find_value(hnode, "clear", NULL)) 394 append_printf(&buf, end, ":clear"); 395 396 /* Histogram handler and actions */ 397 node = xbc_node_find_subkey(hnode, "onmax"); 398 if (node && trace_boot_hist_add_handlers(node, &buf, end, "var") < 0) 399 return -EINVAL; 400 node = xbc_node_find_subkey(hnode, "onchange"); 401 if (node && trace_boot_hist_add_handlers(node, &buf, end, "var") < 0) 402 return -EINVAL; 403 node = xbc_node_find_subkey(hnode, "onmatch"); 404 if (node && trace_boot_hist_add_handlers(node, &buf, end, "event") < 0) 405 return -EINVAL; 406 407 p = xbc_node_find_value(hnode, "filter", NULL); 408 if (p) 409 append_printf(&buf, end, " if %s", p); 410 411 if (buf == end) { 412 pr_err("hist exceeds the max command length.\n"); 413 return -E2BIG; 414 } 415 416 return 0; 417 } 418 419 static void __init 420 trace_boot_init_histograms(struct trace_event_file *file, 421 struct xbc_node *hnode, char *buf, size_t size) 422 { 423 struct xbc_node *node; 424 const char *p; 425 char *tmp; 426 427 xbc_node_for_each_subkey(hnode, node) { 428 p = xbc_node_get_data(node); 429 if (!isdigit(p[0])) 430 continue; 431 /* All digit started node should be instances. */ 432 if (trace_boot_compose_hist_cmd(node, buf, size) == 0) { 433 tmp = kstrdup(buf, GFP_KERNEL); 434 if (!tmp) 435 return; 436 if (trigger_process_regex(file, buf) < 0) 437 pr_err("Failed to apply hist trigger: %s\n", tmp); 438 kfree(tmp); 439 } 440 } 441 442 if (xbc_node_find_subkey(hnode, "keys")) { 443 if (trace_boot_compose_hist_cmd(hnode, buf, size) == 0) { 444 tmp = kstrdup(buf, GFP_KERNEL); 445 if (!tmp) 446 return; 447 if (trigger_process_regex(file, buf) < 0) 448 pr_err("Failed to apply hist trigger: %s\n", tmp); 449 kfree(tmp); 450 } 451 } 452 } 453 #else 454 static void __init 455 trace_boot_init_histograms(struct trace_event_file *file, 456 struct xbc_node *hnode, char *buf, size_t size) 457 { 458 /* do nothing */ 459 } 460 #endif 461 462 static void __init 463 trace_boot_init_one_event(struct trace_array *tr, struct xbc_node *gnode, 464 struct xbc_node *enode) 465 { 466 struct trace_event_file *file; 467 struct xbc_node *anode; 468 char buf[MAX_BUF_LEN]; 469 const char *p, *group, *event; 470 471 group = xbc_node_get_data(gnode); 472 event = xbc_node_get_data(enode); 473 474 if (!strcmp(group, "kprobes")) 475 if (trace_boot_add_kprobe_event(enode, event) < 0) 476 return; 477 if (!strcmp(group, "synthetic")) 478 if (trace_boot_add_synth_event(enode, event) < 0) 479 return; 480 481 mutex_lock(&event_mutex); 482 file = find_event_file(tr, group, event); 483 if (!file) { 484 pr_err("Failed to find event: %s:%s\n", group, event); 485 goto out; 486 } 487 488 p = xbc_node_find_value(enode, "filter", NULL); 489 if (p && *p != '\0') { 490 if (strscpy(buf, p, ARRAY_SIZE(buf)) < 0) 491 pr_err("filter string is too long: %s\n", p); 492 else if (apply_event_filter(file, buf) < 0) 493 pr_err("Failed to apply filter: %s\n", buf); 494 } 495 496 if (IS_ENABLED(CONFIG_HIST_TRIGGERS)) { 497 xbc_node_for_each_array_value(enode, "actions", anode, p) { 498 if (strscpy(buf, p, ARRAY_SIZE(buf)) < 0) 499 pr_err("action string is too long: %s\n", p); 500 else if (trigger_process_regex(file, buf) < 0) 501 pr_err("Failed to apply an action: %s\n", p); 502 } 503 anode = xbc_node_find_subkey(enode, "hist"); 504 if (anode) 505 trace_boot_init_histograms(file, anode, buf, ARRAY_SIZE(buf)); 506 } else if (xbc_node_find_value(enode, "actions", NULL)) 507 pr_err("Failed to apply event actions because CONFIG_HIST_TRIGGERS is not set.\n"); 508 509 if (xbc_node_find_value(enode, "enable", NULL)) { 510 if (trace_event_enable_disable(file, 1, 0) < 0) 511 pr_err("Failed to enable event node: %s:%s\n", 512 group, event); 513 } 514 out: 515 mutex_unlock(&event_mutex); 516 } 517 518 static void __init 519 trace_boot_init_events(struct trace_array *tr, struct xbc_node *node) 520 { 521 struct xbc_node *gnode, *enode; 522 bool enable, enable_all = false; 523 const char *data; 524 525 node = xbc_node_find_subkey(node, "event"); 526 if (!node) 527 return; 528 /* per-event key starts with "event.GROUP.EVENT" */ 529 xbc_node_for_each_subkey(node, gnode) { 530 data = xbc_node_get_data(gnode); 531 if (!strcmp(data, "enable")) { 532 enable_all = true; 533 continue; 534 } 535 enable = false; 536 xbc_node_for_each_subkey(gnode, enode) { 537 data = xbc_node_get_data(enode); 538 if (!strcmp(data, "enable")) { 539 enable = true; 540 continue; 541 } 542 trace_boot_init_one_event(tr, gnode, enode); 543 } 544 /* Event enablement must be done after event settings */ 545 if (enable) { 546 data = xbc_node_get_data(gnode); 547 trace_array_set_clr_event(tr, data, NULL, true); 548 } 549 } 550 /* Ditto */ 551 if (enable_all) 552 trace_array_set_clr_event(tr, NULL, NULL, true); 553 } 554 #else 555 #define trace_boot_enable_events(tr, node) do {} while (0) 556 #define trace_boot_init_events(tr, node) do {} while (0) 557 #endif 558 559 #ifdef CONFIG_DYNAMIC_FTRACE 560 static void __init 561 trace_boot_set_ftrace_filter(struct trace_array *tr, struct xbc_node *node) 562 { 563 struct xbc_node *anode; 564 const char *p; 565 char *q; 566 567 xbc_node_for_each_array_value(node, "ftrace.filters", anode, p) { 568 q = kstrdup(p, GFP_KERNEL); 569 if (!q) 570 return; 571 if (ftrace_set_filter(tr->ops, q, strlen(q), 0) < 0) 572 pr_err("Failed to add %s to ftrace filter\n", p); 573 else 574 ftrace_filter_param = true; 575 kfree(q); 576 } 577 xbc_node_for_each_array_value(node, "ftrace.notraces", anode, p) { 578 q = kstrdup(p, GFP_KERNEL); 579 if (!q) 580 return; 581 if (ftrace_set_notrace(tr->ops, q, strlen(q), 0) < 0) 582 pr_err("Failed to add %s to ftrace filter\n", p); 583 else 584 ftrace_filter_param = true; 585 kfree(q); 586 } 587 } 588 #else 589 #define trace_boot_set_ftrace_filter(tr, node) do {} while (0) 590 #endif 591 592 static void __init 593 trace_boot_enable_tracer(struct trace_array *tr, struct xbc_node *node) 594 { 595 const char *p; 596 597 trace_boot_set_ftrace_filter(tr, node); 598 599 p = xbc_node_find_value(node, "tracer", NULL); 600 if (p && *p != '\0') { 601 if (trace_array_is_readonly(tr) || tracing_set_tracer(tr, p) < 0) 602 pr_err("Failed to set given tracer: %s\n", p); 603 } 604 605 /* Since tracer can free snapshot buffer, allocate snapshot here.*/ 606 if (xbc_node_find_value(node, "alloc_snapshot", NULL)) { 607 if (tracing_alloc_snapshot_instance(tr) < 0) 608 pr_err("Failed to allocate snapshot buffer\n"); 609 } 610 } 611 612 static void __init 613 trace_boot_init_one_instance(struct trace_array *tr, struct xbc_node *node) 614 { 615 trace_boot_set_instance_options(tr, node); 616 trace_boot_init_events(tr, node); 617 trace_boot_enable_events(tr, node); 618 trace_boot_enable_tracer(tr, node); 619 } 620 621 static void __init 622 trace_boot_init_instances(struct xbc_node *node) 623 { 624 struct xbc_node *inode; 625 struct trace_array *tr; 626 const char *p; 627 628 node = xbc_node_find_subkey(node, "instance"); 629 if (!node) 630 return; 631 632 xbc_node_for_each_subkey(node, inode) { 633 p = xbc_node_get_data(inode); 634 if (!p || *p == '\0') 635 continue; 636 637 tr = trace_array_get_by_name(p, NULL); 638 if (!tr) { 639 pr_err("Failed to get trace instance %s\n", p); 640 continue; 641 } 642 trace_boot_init_one_instance(tr, inode); 643 trace_array_put(tr); 644 } 645 } 646 647 static int __init trace_boot_init(void) 648 { 649 struct xbc_node *trace_node; 650 struct trace_array *tr; 651 652 trace_node = xbc_find_node("ftrace"); 653 if (!trace_node) 654 return 0; 655 656 tr = top_trace_array(); 657 if (!tr) 658 return 0; 659 660 /* Global trace array is also one instance */ 661 trace_boot_init_one_instance(tr, trace_node); 662 trace_boot_init_instances(trace_node); 663 664 disable_tracing_selftest("running boot-time tracing"); 665 666 return 0; 667 } 668 /* 669 * Start tracing at the end of core-initcall, so that it starts tracing 670 * from the beginning of postcore_initcall. 671 */ 672 core_initcall_sync(trace_boot_init); 673