1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * trace_boot.c 4 * Tracing kernel boot-time 5 */ 6 7 #define pr_fmt(fmt) "trace_boot: " fmt 8 9 #include <linux/bootconfig.h> 10 #include <linux/cpumask.h> 11 #include <linux/ftrace.h> 12 #include <linux/init.h> 13 #include <linux/kernel.h> 14 #include <linux/mutex.h> 15 #include <linux/string.h> 16 #include <linux/slab.h> 17 #include <linux/trace.h> 18 #include <linux/trace_events.h> 19 20 #include "trace.h" 21 22 #define MAX_BUF_LEN 256 23 24 static void __init 25 trace_boot_set_instance_options(struct trace_array *tr, struct xbc_node *node) 26 { 27 struct xbc_node *anode; 28 const char *p; 29 char buf[MAX_BUF_LEN]; 30 unsigned long v = 0; 31 32 /* Common ftrace options */ 33 xbc_node_for_each_array_value(node, "options", anode, p) { 34 if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf)) { 35 pr_err("String is too long: %s\n", p); 36 continue; 37 } 38 39 if (trace_set_options(tr, buf) < 0) 40 pr_err("Failed to set option: %s\n", buf); 41 } 42 43 p = xbc_node_find_value(node, "tracing_on", NULL); 44 if (p && *p != '\0') { 45 if (kstrtoul(p, 10, &v)) 46 pr_err("Failed to set tracing on: %s\n", p); 47 if (v) 48 tracer_tracing_on(tr); 49 else 50 tracer_tracing_off(tr); 51 } 52 53 p = xbc_node_find_value(node, "trace_clock", NULL); 54 if (p && *p != '\0') { 55 if (tracing_set_clock(tr, p) < 0) 56 pr_err("Failed to set trace clock: %s\n", p); 57 } 58 59 p = xbc_node_find_value(node, "buffer_size", NULL); 60 if (p && *p != '\0') { 61 v = memparse(p, NULL); 62 if (v < PAGE_SIZE) 63 pr_err("Buffer size is too small: %s\n", p); 64 if (tracing_resize_ring_buffer(tr, v, RING_BUFFER_ALL_CPUS) < 0) 65 pr_err("Failed to resize trace buffer to %s\n", p); 66 } 67 68 p = xbc_node_find_value(node, "cpumask", NULL); 69 if (p && *p != '\0') { 70 cpumask_var_t new_mask; 71 72 if (alloc_cpumask_var(&new_mask, GFP_KERNEL)) { 73 if (cpumask_parse(p, new_mask) < 0 || 74 tracing_set_cpumask(tr, new_mask) < 0) 75 pr_err("Failed to set new CPU mask %s\n", p); 76 free_cpumask_var(new_mask); 77 } 78 } 79 } 80 81 #ifdef CONFIG_EVENT_TRACING 82 static void __init 83 trace_boot_enable_events(struct trace_array *tr, struct xbc_node *node) 84 { 85 struct xbc_node *anode; 86 char buf[MAX_BUF_LEN]; 87 const char *p; 88 89 xbc_node_for_each_array_value(node, "events", anode, p) { 90 if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf)) { 91 pr_err("String is too long: %s\n", p); 92 continue; 93 } 94 95 if (ftrace_set_clr_event(tr, buf, 1) < 0) 96 pr_err("Failed to enable event: %s\n", p); 97 } 98 } 99 100 #ifdef CONFIG_KPROBE_EVENTS 101 static int __init 102 trace_boot_add_kprobe_event(struct xbc_node *node, const char *event) 103 { 104 struct dynevent_cmd cmd; 105 struct xbc_node *anode; 106 char buf[MAX_BUF_LEN]; 107 const char *val; 108 int ret = 0; 109 110 xbc_node_for_each_array_value(node, "probes", anode, val) { 111 kprobe_event_cmd_init(&cmd, buf, MAX_BUF_LEN); 112 113 ret = kprobe_event_gen_cmd_start(&cmd, event, val); 114 if (ret) { 115 pr_err("Failed to generate probe: %s\n", buf); 116 break; 117 } 118 119 ret = kprobe_event_gen_cmd_end(&cmd); 120 if (ret) { 121 pr_err("Failed to add probe: %s\n", buf); 122 break; 123 } 124 } 125 126 return ret; 127 } 128 #else 129 static inline int __init 130 trace_boot_add_kprobe_event(struct xbc_node *node, const char *event) 131 { 132 pr_err("Kprobe event is not supported.\n"); 133 return -ENOTSUPP; 134 } 135 #endif 136 137 #ifdef CONFIG_SYNTH_EVENTS 138 static int __init 139 trace_boot_add_synth_event(struct xbc_node *node, const char *event) 140 { 141 struct dynevent_cmd cmd; 142 struct xbc_node *anode; 143 char buf[MAX_BUF_LEN]; 144 const char *p; 145 int ret; 146 147 synth_event_cmd_init(&cmd, buf, MAX_BUF_LEN); 148 149 ret = synth_event_gen_cmd_start(&cmd, event, NULL); 150 if (ret) 151 return ret; 152 153 xbc_node_for_each_array_value(node, "fields", anode, p) { 154 ret = synth_event_add_field_str(&cmd, p); 155 if (ret) 156 return ret; 157 } 158 159 ret = synth_event_gen_cmd_end(&cmd); 160 if (ret < 0) 161 pr_err("Failed to add synthetic event: %s\n", buf); 162 163 return ret; 164 } 165 #else 166 static inline int __init 167 trace_boot_add_synth_event(struct xbc_node *node, const char *event) 168 { 169 pr_err("Synthetic event is not supported.\n"); 170 return -ENOTSUPP; 171 } 172 #endif 173 174 static void __init 175 trace_boot_init_one_event(struct trace_array *tr, struct xbc_node *gnode, 176 struct xbc_node *enode) 177 { 178 struct trace_event_file *file; 179 struct xbc_node *anode; 180 char buf[MAX_BUF_LEN]; 181 const char *p, *group, *event; 182 183 group = xbc_node_get_data(gnode); 184 event = xbc_node_get_data(enode); 185 186 if (!strcmp(group, "kprobes")) 187 if (trace_boot_add_kprobe_event(enode, event) < 0) 188 return; 189 if (!strcmp(group, "synthetic")) 190 if (trace_boot_add_synth_event(enode, event) < 0) 191 return; 192 193 mutex_lock(&event_mutex); 194 file = find_event_file(tr, group, event); 195 if (!file) { 196 pr_err("Failed to find event: %s:%s\n", group, event); 197 goto out; 198 } 199 200 p = xbc_node_find_value(enode, "filter", NULL); 201 if (p && *p != '\0') { 202 if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf)) 203 pr_err("filter string is too long: %s\n", p); 204 else if (apply_event_filter(file, buf) < 0) 205 pr_err("Failed to apply filter: %s\n", buf); 206 } 207 208 xbc_node_for_each_array_value(enode, "actions", anode, p) { 209 if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf)) 210 pr_err("action string is too long: %s\n", p); 211 else if (trigger_process_regex(file, buf) < 0) 212 pr_err("Failed to apply an action: %s\n", buf); 213 } 214 215 if (xbc_node_find_value(enode, "enable", NULL)) { 216 if (trace_event_enable_disable(file, 1, 0) < 0) 217 pr_err("Failed to enable event node: %s:%s\n", 218 group, event); 219 } 220 out: 221 mutex_unlock(&event_mutex); 222 } 223 224 static void __init 225 trace_boot_init_events(struct trace_array *tr, struct xbc_node *node) 226 { 227 struct xbc_node *gnode, *enode; 228 229 node = xbc_node_find_child(node, "event"); 230 if (!node) 231 return; 232 /* per-event key starts with "event.GROUP.EVENT" */ 233 xbc_node_for_each_child(node, gnode) 234 xbc_node_for_each_child(gnode, enode) 235 trace_boot_init_one_event(tr, gnode, enode); 236 } 237 #else 238 #define trace_boot_enable_events(tr, node) do {} while (0) 239 #define trace_boot_init_events(tr, node) do {} while (0) 240 #endif 241 242 #ifdef CONFIG_DYNAMIC_FTRACE 243 static void __init 244 trace_boot_set_ftrace_filter(struct trace_array *tr, struct xbc_node *node) 245 { 246 struct xbc_node *anode; 247 const char *p; 248 char *q; 249 250 xbc_node_for_each_array_value(node, "ftrace.filters", anode, p) { 251 q = kstrdup(p, GFP_KERNEL); 252 if (!q) 253 return; 254 if (ftrace_set_filter(tr->ops, q, strlen(q), 0) < 0) 255 pr_err("Failed to add %s to ftrace filter\n", p); 256 else 257 ftrace_filter_param = true; 258 kfree(q); 259 } 260 xbc_node_for_each_array_value(node, "ftrace.notraces", anode, p) { 261 q = kstrdup(p, GFP_KERNEL); 262 if (!q) 263 return; 264 if (ftrace_set_notrace(tr->ops, q, strlen(q), 0) < 0) 265 pr_err("Failed to add %s to ftrace filter\n", p); 266 else 267 ftrace_filter_param = true; 268 kfree(q); 269 } 270 } 271 #else 272 #define trace_boot_set_ftrace_filter(tr, node) do {} while (0) 273 #endif 274 275 static void __init 276 trace_boot_enable_tracer(struct trace_array *tr, struct xbc_node *node) 277 { 278 const char *p; 279 280 trace_boot_set_ftrace_filter(tr, node); 281 282 p = xbc_node_find_value(node, "tracer", NULL); 283 if (p && *p != '\0') { 284 if (tracing_set_tracer(tr, p) < 0) 285 pr_err("Failed to set given tracer: %s\n", p); 286 } 287 288 /* Since tracer can free snapshot buffer, allocate snapshot here.*/ 289 if (xbc_node_find_value(node, "alloc_snapshot", NULL)) { 290 if (tracing_alloc_snapshot_instance(tr) < 0) 291 pr_err("Failed to allocate snapshot buffer\n"); 292 } 293 } 294 295 static void __init 296 trace_boot_init_one_instance(struct trace_array *tr, struct xbc_node *node) 297 { 298 trace_boot_set_instance_options(tr, node); 299 trace_boot_init_events(tr, node); 300 trace_boot_enable_events(tr, node); 301 trace_boot_enable_tracer(tr, node); 302 } 303 304 static void __init 305 trace_boot_init_instances(struct xbc_node *node) 306 { 307 struct xbc_node *inode; 308 struct trace_array *tr; 309 const char *p; 310 311 node = xbc_node_find_child(node, "instance"); 312 if (!node) 313 return; 314 315 xbc_node_for_each_child(node, inode) { 316 p = xbc_node_get_data(inode); 317 if (!p || *p == '\0') 318 continue; 319 320 tr = trace_array_get_by_name(p); 321 if (!tr) { 322 pr_err("Failed to get trace instance %s\n", p); 323 continue; 324 } 325 trace_boot_init_one_instance(tr, inode); 326 trace_array_put(tr); 327 } 328 } 329 330 static int __init trace_boot_init(void) 331 { 332 struct xbc_node *trace_node; 333 struct trace_array *tr; 334 335 trace_node = xbc_find_node("ftrace"); 336 if (!trace_node) 337 return 0; 338 339 tr = top_trace_array(); 340 if (!tr) 341 return 0; 342 343 /* Global trace array is also one instance */ 344 trace_boot_init_one_instance(tr, trace_node); 345 trace_boot_init_instances(trace_node); 346 347 disable_tracing_selftest("running boot-time tracing"); 348 349 return 0; 350 } 351 /* 352 * Start tracing at the end of core-initcall, so that it starts tracing 353 * from the beginning of postcore_initcall. 354 */ 355 core_initcall_sync(trace_boot_init); 356