1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * trace_events_synth - synthetic trace events
4 *
5 * Copyright (C) 2015, 2020 Tom Zanussi <tom.zanussi@linux.intel.com>
6 */
7
8 #include <linux/module.h>
9 #include <linux/kallsyms.h>
10 #include <linux/security.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/stacktrace.h>
14 #include <linux/rculist.h>
15 #include <linux/tracefs.h>
16
17 /* for gfp flag names */
18 #include <linux/trace_events.h>
19 #include <trace/events/mmflags.h>
20 #include "trace_probe.h"
21 #include "trace_probe_kernel.h"
22
23 #include "trace_synth.h"
24
25 #undef ERRORS
26 #define ERRORS \
27 C(BAD_NAME, "Illegal name"), \
28 C(INVALID_CMD, "Command must be of the form: <name> field[;field] ..."),\
29 C(INVALID_DYN_CMD, "Command must be of the form: s or -:[synthetic/]<name> field[;field] ..."),\
30 C(EVENT_EXISTS, "Event already exists"), \
31 C(TOO_MANY_FIELDS, "Too many fields"), \
32 C(INCOMPLETE_TYPE, "Incomplete type"), \
33 C(INVALID_TYPE, "Invalid type"), \
34 C(INVALID_FIELD, "Invalid field"), \
35 C(INVALID_ARRAY_SPEC, "Invalid array specification"),
36
37 #undef C
38 #define C(a, b) SYNTH_ERR_##a
39
40 enum { ERRORS };
41
42 #undef C
43 #define C(a, b) b
44
45 static const char *err_text[] = { ERRORS };
46
47 static DEFINE_MUTEX(lastcmd_mutex);
48 static char *last_cmd;
49
errpos(const char * str)50 static int errpos(const char *str)
51 {
52 guard(mutex)(&lastcmd_mutex);
53 if (!str || !last_cmd)
54 return 0;
55
56 return err_pos(last_cmd, str);
57 }
58
last_cmd_set(const char * str)59 static void last_cmd_set(const char *str)
60 {
61 if (!str)
62 return;
63
64 mutex_lock(&lastcmd_mutex);
65 kfree(last_cmd);
66 last_cmd = kstrdup(str, GFP_KERNEL);
67 mutex_unlock(&lastcmd_mutex);
68 }
69
synth_err(u8 err_type,u16 err_pos)70 static void synth_err(u8 err_type, u16 err_pos)
71 {
72 guard(mutex)(&lastcmd_mutex);
73 if (!last_cmd)
74 return;
75
76 tracing_log_err(NULL, "synthetic_events", last_cmd, err_text,
77 err_type, err_pos);
78 }
79
80 static int create_synth_event(const char *raw_command);
81 static int synth_event_show(struct seq_file *m, struct dyn_event *ev);
82 static int synth_event_release(struct dyn_event *ev);
83 static bool synth_event_is_busy(struct dyn_event *ev);
84 static bool synth_event_match(const char *system, const char *event,
85 int argc, const char **argv, struct dyn_event *ev);
86
87 static struct dyn_event_operations synth_event_ops = {
88 .create = create_synth_event,
89 .show = synth_event_show,
90 .is_busy = synth_event_is_busy,
91 .free = synth_event_release,
92 .match = synth_event_match,
93 };
94
is_synth_event(struct dyn_event * ev)95 static bool is_synth_event(struct dyn_event *ev)
96 {
97 return ev->ops == &synth_event_ops;
98 }
99
to_synth_event(struct dyn_event * ev)100 static struct synth_event *to_synth_event(struct dyn_event *ev)
101 {
102 return container_of(ev, struct synth_event, devent);
103 }
104
synth_event_is_busy(struct dyn_event * ev)105 static bool synth_event_is_busy(struct dyn_event *ev)
106 {
107 struct synth_event *event = to_synth_event(ev);
108
109 return event->ref != 0;
110 }
111
synth_event_match(const char * system,const char * event,int argc,const char ** argv,struct dyn_event * ev)112 static bool synth_event_match(const char *system, const char *event,
113 int argc, const char **argv, struct dyn_event *ev)
114 {
115 struct synth_event *sev = to_synth_event(ev);
116
117 return strcmp(sev->name, event) == 0 &&
118 (!system || strcmp(system, SYNTH_SYSTEM) == 0);
119 }
120
121 struct synth_trace_event {
122 struct trace_entry ent;
123 union trace_synth_field fields[];
124 };
125
synth_event_define_fields(struct trace_event_call * call)126 static int synth_event_define_fields(struct trace_event_call *call)
127 {
128 struct synth_trace_event trace;
129 int offset = offsetof(typeof(trace), fields);
130 struct synth_event *event = call->data;
131 unsigned int i, size, n_u64;
132 char *name, *type;
133 bool is_signed;
134 int ret = 0;
135
136 for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
137 size = event->fields[i]->size;
138 is_signed = event->fields[i]->is_signed;
139 type = event->fields[i]->type;
140 name = event->fields[i]->name;
141 ret = trace_define_field(call, type, name, offset, size,
142 is_signed, FILTER_OTHER);
143 if (ret)
144 break;
145
146 event->fields[i]->offset = n_u64;
147
148 if (event->fields[i]->is_string && !event->fields[i]->is_dynamic) {
149 offset += STR_VAR_LEN_MAX;
150 n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
151 } else {
152 offset += sizeof(u64);
153 n_u64++;
154 }
155 }
156
157 event->n_u64 = n_u64;
158
159 return ret;
160 }
161
synth_field_signed(char * type)162 static bool synth_field_signed(char *type)
163 {
164 if (str_has_prefix(type, "u"))
165 return false;
166 if (strcmp(type, "gfp_t") == 0)
167 return false;
168
169 return true;
170 }
171
synth_field_is_string(char * type)172 static int synth_field_is_string(char *type)
173 {
174 if (strstr(type, "char[") != NULL)
175 return true;
176
177 return false;
178 }
179
synth_field_is_stack(char * type)180 static int synth_field_is_stack(char *type)
181 {
182 if (strstr(type, "long[") != NULL)
183 return true;
184
185 return false;
186 }
187
synth_field_string_size(char * type)188 static int synth_field_string_size(char *type)
189 {
190 char buf[4], *end, *start;
191 unsigned int len;
192 int size, err;
193
194 start = strstr(type, "char[");
195 if (start == NULL)
196 return -EINVAL;
197 start += sizeof("char[") - 1;
198
199 end = strchr(type, ']');
200 if (!end || end < start || type + strlen(type) > end + 1)
201 return -EINVAL;
202
203 len = end - start;
204 if (len > 3)
205 return -EINVAL;
206
207 if (len == 0)
208 return 0; /* variable-length string */
209
210 memcpy(buf, start, len);
211 buf[len] = '\0';
212
213 err = kstrtouint(buf, 0, &size);
214 if (err)
215 return err;
216
217 if (size > STR_VAR_LEN_MAX)
218 return -EINVAL;
219
220 return size;
221 }
222
synth_field_size(char * type)223 static int synth_field_size(char *type)
224 {
225 int size = 0;
226
227 if (strcmp(type, "s64") == 0)
228 size = sizeof(s64);
229 else if (strcmp(type, "u64") == 0)
230 size = sizeof(u64);
231 else if (strcmp(type, "s32") == 0)
232 size = sizeof(s32);
233 else if (strcmp(type, "u32") == 0)
234 size = sizeof(u32);
235 else if (strcmp(type, "s16") == 0)
236 size = sizeof(s16);
237 else if (strcmp(type, "u16") == 0)
238 size = sizeof(u16);
239 else if (strcmp(type, "s8") == 0)
240 size = sizeof(s8);
241 else if (strcmp(type, "u8") == 0)
242 size = sizeof(u8);
243 else if (strcmp(type, "char") == 0)
244 size = sizeof(char);
245 else if (strcmp(type, "unsigned char") == 0)
246 size = sizeof(unsigned char);
247 else if (strcmp(type, "int") == 0)
248 size = sizeof(int);
249 else if (strcmp(type, "unsigned int") == 0)
250 size = sizeof(unsigned int);
251 else if (strcmp(type, "long") == 0)
252 size = sizeof(long);
253 else if (strcmp(type, "unsigned long") == 0)
254 size = sizeof(unsigned long);
255 else if (strcmp(type, "bool") == 0)
256 size = sizeof(bool);
257 else if (strcmp(type, "pid_t") == 0)
258 size = sizeof(pid_t);
259 else if (strcmp(type, "gfp_t") == 0)
260 size = sizeof(gfp_t);
261 else if (synth_field_is_string(type))
262 size = synth_field_string_size(type);
263 else if (synth_field_is_stack(type))
264 size = 0;
265
266 return size;
267 }
268
synth_field_fmt(char * type)269 static const char *synth_field_fmt(char *type)
270 {
271 const char *fmt = "%llu";
272
273 if (strcmp(type, "s64") == 0)
274 fmt = "%lld";
275 else if (strcmp(type, "u64") == 0)
276 fmt = "%llu";
277 else if (strcmp(type, "s32") == 0)
278 fmt = "%d";
279 else if (strcmp(type, "u32") == 0)
280 fmt = "%u";
281 else if (strcmp(type, "s16") == 0)
282 fmt = "%d";
283 else if (strcmp(type, "u16") == 0)
284 fmt = "%u";
285 else if (strcmp(type, "s8") == 0)
286 fmt = "%d";
287 else if (strcmp(type, "u8") == 0)
288 fmt = "%u";
289 else if (strcmp(type, "char") == 0)
290 fmt = "%d";
291 else if (strcmp(type, "unsigned char") == 0)
292 fmt = "%u";
293 else if (strcmp(type, "int") == 0)
294 fmt = "%d";
295 else if (strcmp(type, "unsigned int") == 0)
296 fmt = "%u";
297 else if (strcmp(type, "long") == 0)
298 fmt = "%ld";
299 else if (strcmp(type, "unsigned long") == 0)
300 fmt = "%lu";
301 else if (strcmp(type, "bool") == 0)
302 fmt = "%d";
303 else if (strcmp(type, "pid_t") == 0)
304 fmt = "%d";
305 else if (strcmp(type, "gfp_t") == 0)
306 fmt = "%x";
307 else if (synth_field_is_string(type))
308 fmt = "%s";
309 else if (synth_field_is_stack(type))
310 fmt = "%s";
311
312 return fmt;
313 }
314
print_synth_event_num_val(struct trace_seq * s,char * print_fmt,char * name,int size,union trace_synth_field * val,char * space)315 static void print_synth_event_num_val(struct trace_seq *s,
316 char *print_fmt, char *name,
317 int size, union trace_synth_field *val, char *space)
318 {
319 switch (size) {
320 case 1:
321 trace_seq_printf(s, print_fmt, name, val->as_u8, space);
322 break;
323
324 case 2:
325 trace_seq_printf(s, print_fmt, name, val->as_u16, space);
326 break;
327
328 case 4:
329 trace_seq_printf(s, print_fmt, name, val->as_u32, space);
330 break;
331
332 default:
333 trace_seq_printf(s, print_fmt, name, val->as_u64, space);
334 break;
335 }
336 }
337
print_synth_event(struct trace_iterator * iter,int flags,struct trace_event * event)338 static enum print_line_t print_synth_event(struct trace_iterator *iter,
339 int flags,
340 struct trace_event *event)
341 {
342 struct trace_array *tr = iter->tr;
343 struct trace_seq *s = &iter->seq;
344 struct synth_trace_event *entry;
345 struct synth_event *se;
346 unsigned int i, j, n_u64;
347 char print_fmt[32];
348 const char *fmt;
349
350 entry = (struct synth_trace_event *)iter->ent;
351 se = container_of(event, struct synth_event, call.event);
352
353 trace_seq_printf(s, "%s: ", se->name);
354
355 for (i = 0, n_u64 = 0; i < se->n_fields; i++) {
356 if (trace_seq_has_overflowed(s))
357 goto end;
358
359 fmt = synth_field_fmt(se->fields[i]->type);
360
361 /* parameter types */
362 if (tr && tr->trace_flags & TRACE_ITER(VERBOSE))
363 trace_seq_printf(s, "%s ", fmt);
364
365 snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt);
366
367 /* parameter values */
368 if (se->fields[i]->is_string) {
369 if (se->fields[i]->is_dynamic) {
370 union trace_synth_field *data = &entry->fields[n_u64];
371
372 trace_seq_printf(s, print_fmt, se->fields[i]->name,
373 (char *)entry + data->as_dynamic.offset,
374 i == se->n_fields - 1 ? "" : " ");
375 n_u64++;
376 } else {
377 trace_seq_printf(s, print_fmt, se->fields[i]->name,
378 (char *)&entry->fields[n_u64].as_u64,
379 i == se->n_fields - 1 ? "" : " ");
380 n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
381 }
382 } else if (se->fields[i]->is_stack) {
383 union trace_synth_field *data = &entry->fields[n_u64];
384 unsigned long *p = (void *)entry + data->as_dynamic.offset;
385
386 trace_seq_printf(s, "%s=STACK:\n", se->fields[i]->name);
387 for (j = 1; j < data->as_dynamic.len / sizeof(long); j++)
388 trace_seq_printf(s, "=> %pS\n", (void *)p[j]);
389 n_u64++;
390 } else {
391 struct trace_print_flags __flags[] = {
392 __def_gfpflag_names, {-1, NULL} };
393 char *space = (i == se->n_fields - 1 ? "" : " ");
394
395 print_synth_event_num_val(s, print_fmt,
396 se->fields[i]->name,
397 se->fields[i]->size,
398 &entry->fields[n_u64],
399 space);
400
401 if (strcmp(se->fields[i]->type, "gfp_t") == 0) {
402 trace_seq_puts(s, " (");
403 trace_print_flags_seq(s, "|",
404 entry->fields[n_u64].as_u64,
405 __flags);
406 trace_seq_putc(s, ')');
407 }
408 n_u64++;
409 }
410 }
411 end:
412 trace_seq_putc(s, '\n');
413
414 return trace_handle_return(s);
415 }
416
417 static struct trace_event_functions synth_event_funcs = {
418 .trace = print_synth_event
419 };
420
trace_string(struct synth_trace_event * entry,struct synth_event * event,char * str_val,bool is_dynamic,unsigned int data_size,unsigned int * n_u64)421 static unsigned int trace_string(struct synth_trace_event *entry,
422 struct synth_event *event,
423 char *str_val,
424 bool is_dynamic,
425 unsigned int data_size,
426 unsigned int *n_u64)
427 {
428 unsigned int len = 0;
429 char *str_field;
430 int ret;
431
432 if (is_dynamic) {
433 union trace_synth_field *data = &entry->fields[*n_u64];
434
435 len = fetch_store_strlen((unsigned long)str_val);
436 data->as_dynamic.offset = struct_size(entry, fields, event->n_u64) + data_size;
437 data->as_dynamic.len = len;
438
439 ret = fetch_store_string((unsigned long)str_val, &entry->fields[*n_u64], entry);
440
441 (*n_u64)++;
442 } else {
443 str_field = (char *)&entry->fields[*n_u64].as_u64;
444
445 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
446 if ((unsigned long)str_val < TASK_SIZE)
447 ret = strncpy_from_user_nofault(str_field, (const void __user *)str_val, STR_VAR_LEN_MAX);
448 else
449 #endif
450 ret = strncpy_from_kernel_nofault(str_field, str_val, STR_VAR_LEN_MAX);
451
452 if (ret < 0)
453 strcpy(str_field, FAULT_STRING);
454
455 (*n_u64) += STR_VAR_LEN_MAX / sizeof(u64);
456 }
457
458 return len;
459 }
460
trace_stack(struct synth_trace_event * entry,struct synth_event * event,long * stack,unsigned int data_size,unsigned int * n_u64)461 static unsigned int trace_stack(struct synth_trace_event *entry,
462 struct synth_event *event,
463 long *stack,
464 unsigned int data_size,
465 unsigned int *n_u64)
466 {
467 union trace_synth_field *data = &entry->fields[*n_u64];
468 unsigned int len;
469 u32 data_offset;
470 void *data_loc;
471
472 data_offset = struct_size(entry, fields, event->n_u64);
473 data_offset += data_size;
474
475 for (len = 0; len < HIST_STACKTRACE_DEPTH; len++) {
476 if (!stack[len])
477 break;
478 }
479
480 len *= sizeof(long);
481
482 /* Find the dynamic section to copy the stack into. */
483 data_loc = (void *)entry + data_offset;
484 memcpy(data_loc, stack, len);
485
486 /* Fill in the field that holds the offset/len combo */
487
488 data->as_dynamic.offset = data_offset;
489 data->as_dynamic.len = len;
490
491 (*n_u64)++;
492
493 return len;
494 }
495
trace_event_raw_event_synth(void * __data,u64 * var_ref_vals,unsigned int * var_ref_idx)496 static notrace void trace_event_raw_event_synth(void *__data,
497 u64 *var_ref_vals,
498 unsigned int *var_ref_idx)
499 {
500 unsigned int i, n_u64, val_idx, len, data_size = 0;
501 struct trace_event_file *trace_file = __data;
502 struct synth_trace_event *entry;
503 struct trace_event_buffer fbuffer;
504 struct trace_buffer *buffer;
505 struct synth_event *event;
506 int fields_size = 0;
507
508 event = trace_file->event_call->data;
509
510 if (trace_trigger_soft_disabled(trace_file))
511 return;
512
513 fields_size = event->n_u64 * sizeof(u64);
514
515 for (i = 0; i < event->n_dynamic_fields; i++) {
516 unsigned int field_pos = event->dynamic_fields[i]->field_pos;
517 char *str_val;
518
519 val_idx = var_ref_idx[field_pos];
520 str_val = (char *)(long)var_ref_vals[val_idx];
521
522 if (event->dynamic_fields[i]->is_stack) {
523 /* reserve one extra element for size */
524 len = *((unsigned long *)str_val) + 1;
525 len *= sizeof(unsigned long);
526 } else {
527 len = fetch_store_strlen((unsigned long)str_val);
528 }
529
530 fields_size += len;
531 }
532
533 /*
534 * Avoid ring buffer recursion detection, as this event
535 * is being performed within another event.
536 */
537 buffer = trace_file->tr->array_buffer.buffer;
538 guard(ring_buffer_nest)(buffer);
539
540 entry = trace_event_buffer_reserve(&fbuffer, trace_file,
541 sizeof(*entry) + fields_size);
542 if (!entry)
543 return;
544
545 for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
546 val_idx = var_ref_idx[i];
547 if (event->fields[i]->is_string) {
548 char *str_val = (char *)(long)var_ref_vals[val_idx];
549
550 len = trace_string(entry, event, str_val,
551 event->fields[i]->is_dynamic,
552 data_size, &n_u64);
553 data_size += len; /* only dynamic string increments */
554 } else if (event->fields[i]->is_stack) {
555 long *stack = (long *)(long)var_ref_vals[val_idx];
556
557 len = trace_stack(entry, event, stack,
558 data_size, &n_u64);
559 data_size += len;
560 } else {
561 struct synth_field *field = event->fields[i];
562 u64 val = var_ref_vals[val_idx];
563
564 switch (field->size) {
565 case 1:
566 entry->fields[n_u64].as_u8 = (u8)val;
567 break;
568
569 case 2:
570 entry->fields[n_u64].as_u16 = (u16)val;
571 break;
572
573 case 4:
574 entry->fields[n_u64].as_u32 = (u32)val;
575 break;
576
577 default:
578 entry->fields[n_u64].as_u64 = val;
579 break;
580 }
581 n_u64++;
582 }
583 }
584
585 trace_event_buffer_commit(&fbuffer);
586 }
587
free_synth_event_print_fmt(struct trace_event_call * call)588 static void free_synth_event_print_fmt(struct trace_event_call *call)
589 {
590 if (call) {
591 kfree(call->print_fmt);
592 call->print_fmt = NULL;
593 }
594 }
595
__set_synth_event_print_fmt(struct synth_event * event,char * buf,int len)596 static int __set_synth_event_print_fmt(struct synth_event *event,
597 char *buf, int len)
598 {
599 const char *fmt;
600 int pos = 0;
601 int i;
602
603 /* When len=0, we just calculate the needed length */
604 #define LEN_OR_ZERO (len ? len - pos : 0)
605
606 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
607 for (i = 0; i < event->n_fields; i++) {
608 fmt = synth_field_fmt(event->fields[i]->type);
609 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s%s",
610 event->fields[i]->name, fmt,
611 i == event->n_fields - 1 ? "" : " ");
612 }
613 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
614
615 for (i = 0; i < event->n_fields; i++) {
616 if (event->fields[i]->is_string &&
617 event->fields[i]->is_dynamic)
618 pos += snprintf(buf + pos, LEN_OR_ZERO,
619 ", __get_str(%s)", event->fields[i]->name);
620 else if (event->fields[i]->is_stack)
621 pos += snprintf(buf + pos, LEN_OR_ZERO,
622 ", __get_stacktrace(%s)", event->fields[i]->name);
623 else
624 pos += snprintf(buf + pos, LEN_OR_ZERO,
625 ", REC->%s", event->fields[i]->name);
626 }
627
628 #undef LEN_OR_ZERO
629
630 /* return the length of print_fmt */
631 return pos;
632 }
633
set_synth_event_print_fmt(struct trace_event_call * call)634 static int set_synth_event_print_fmt(struct trace_event_call *call)
635 {
636 struct synth_event *event = call->data;
637 char *print_fmt;
638 int len;
639
640 /* First: called with 0 length to calculate the needed length */
641 len = __set_synth_event_print_fmt(event, NULL, 0);
642
643 print_fmt = kmalloc(len + 1, GFP_KERNEL);
644 if (!print_fmt)
645 return -ENOMEM;
646
647 /* Second: actually write the @print_fmt */
648 __set_synth_event_print_fmt(event, print_fmt, len + 1);
649 call->print_fmt = print_fmt;
650
651 return 0;
652 }
653
free_synth_field(struct synth_field * field)654 static void free_synth_field(struct synth_field *field)
655 {
656 kfree(field->type);
657 kfree(field->name);
658 kfree(field);
659 }
660
check_field_version(const char * prefix,const char * field_type,const char * field_name)661 static int check_field_version(const char *prefix, const char *field_type,
662 const char *field_name)
663 {
664 /*
665 * For backward compatibility, the old synthetic event command
666 * format did not require semicolons, and in order to not
667 * break user space, that old format must still work. If a new
668 * feature is added, then the format that uses the new feature
669 * will be required to have semicolons, as nothing that uses
670 * the old format would be using the new, yet to be created,
671 * feature. When a new feature is added, this will detect it,
672 * and return a number greater than 1, and require the format
673 * to use semicolons.
674 */
675 return 1;
676 }
677
parse_synth_field(int argc,char ** argv,int * consumed,int * field_version)678 static struct synth_field *parse_synth_field(int argc, char **argv,
679 int *consumed, int *field_version)
680 {
681 const char *prefix = NULL, *field_type = argv[0], *field_name, *array;
682 struct synth_field *field;
683 int len, ret = -ENOMEM;
684 struct seq_buf s;
685 ssize_t size;
686
687 if (!strcmp(field_type, "unsigned")) {
688 if (argc < 3) {
689 synth_err(SYNTH_ERR_INCOMPLETE_TYPE, errpos(field_type));
690 return ERR_PTR(-EINVAL);
691 }
692 prefix = "unsigned ";
693 field_type = argv[1];
694 field_name = argv[2];
695 *consumed += 3;
696 } else {
697 field_name = argv[1];
698 *consumed += 2;
699 }
700
701 if (!field_name) {
702 synth_err(SYNTH_ERR_INVALID_FIELD, errpos(field_type));
703 return ERR_PTR(-EINVAL);
704 }
705
706 *field_version = check_field_version(prefix, field_type, field_name);
707
708 field = kzalloc(sizeof(*field), GFP_KERNEL);
709 if (!field)
710 return ERR_PTR(-ENOMEM);
711
712 len = strlen(field_name);
713 array = strchr(field_name, '[');
714 if (array)
715 len -= strlen(array);
716
717 field->name = kmemdup_nul(field_name, len, GFP_KERNEL);
718 if (!field->name)
719 goto free;
720
721 if (!is_good_name(field->name)) {
722 synth_err(SYNTH_ERR_BAD_NAME, errpos(field_name));
723 ret = -EINVAL;
724 goto free;
725 }
726
727 len = strlen(field_type) + 1;
728
729 if (array)
730 len += strlen(array);
731
732 if (prefix)
733 len += strlen(prefix);
734
735 field->type = kzalloc(len, GFP_KERNEL);
736 if (!field->type)
737 goto free;
738
739 seq_buf_init(&s, field->type, len);
740 if (prefix)
741 seq_buf_puts(&s, prefix);
742 seq_buf_puts(&s, field_type);
743 if (array)
744 seq_buf_puts(&s, array);
745 if (WARN_ON_ONCE(!seq_buf_buffer_left(&s)))
746 goto free;
747
748 s.buffer[s.len] = '\0';
749
750 size = synth_field_size(field->type);
751 if (size < 0) {
752 if (array)
753 synth_err(SYNTH_ERR_INVALID_ARRAY_SPEC, errpos(field_name));
754 else
755 synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type));
756 ret = -EINVAL;
757 goto free;
758 } else if (size == 0) {
759 if (synth_field_is_string(field->type) ||
760 synth_field_is_stack(field->type)) {
761 char *type;
762
763 len = sizeof("__data_loc ") + strlen(field->type) + 1;
764 type = kzalloc(len, GFP_KERNEL);
765 if (!type)
766 goto free;
767
768 seq_buf_init(&s, type, len);
769 seq_buf_puts(&s, "__data_loc ");
770 seq_buf_puts(&s, field->type);
771
772 if (WARN_ON_ONCE(!seq_buf_buffer_left(&s)))
773 goto free;
774 s.buffer[s.len] = '\0';
775
776 kfree(field->type);
777 field->type = type;
778
779 field->is_dynamic = true;
780 size = sizeof(u64);
781 } else {
782 synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type));
783 ret = -EINVAL;
784 goto free;
785 }
786 }
787 field->size = size;
788
789 if (synth_field_is_string(field->type))
790 field->is_string = true;
791 else if (synth_field_is_stack(field->type))
792 field->is_stack = true;
793
794 field->is_signed = synth_field_signed(field->type);
795 out:
796 return field;
797 free:
798 free_synth_field(field);
799 field = ERR_PTR(ret);
800 goto out;
801 }
802
free_synth_tracepoint(struct tracepoint * tp)803 static void free_synth_tracepoint(struct tracepoint *tp)
804 {
805 if (!tp)
806 return;
807
808 kfree(tp->name);
809 kfree(tp);
810 }
811
alloc_synth_tracepoint(char * name)812 static struct tracepoint *alloc_synth_tracepoint(char *name)
813 {
814 struct tracepoint *tp;
815
816 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
817 if (!tp)
818 return ERR_PTR(-ENOMEM);
819
820 tp->name = kstrdup(name, GFP_KERNEL);
821 if (!tp->name) {
822 kfree(tp);
823 return ERR_PTR(-ENOMEM);
824 }
825
826 return tp;
827 }
828
find_synth_event(const char * name)829 struct synth_event *find_synth_event(const char *name)
830 {
831 struct dyn_event *pos;
832 struct synth_event *event;
833
834 for_each_dyn_event(pos) {
835 if (!is_synth_event(pos))
836 continue;
837 event = to_synth_event(pos);
838 if (strcmp(event->name, name) == 0)
839 return event;
840 }
841
842 return NULL;
843 }
844
845 static struct trace_event_fields synth_event_fields_array[] = {
846 { .type = TRACE_FUNCTION_TYPE,
847 .define_fields = synth_event_define_fields },
848 {}
849 };
850
synth_event_reg(struct trace_event_call * call,enum trace_reg type,void * data)851 static int synth_event_reg(struct trace_event_call *call,
852 enum trace_reg type, void *data)
853 {
854 struct synth_event *event = container_of(call, struct synth_event, call);
855
856 switch (type) {
857 #ifdef CONFIG_PERF_EVENTS
858 case TRACE_REG_PERF_REGISTER:
859 #endif
860 case TRACE_REG_REGISTER:
861 if (!try_module_get(event->mod))
862 return -EBUSY;
863 break;
864 default:
865 break;
866 }
867
868 int ret = trace_event_reg(call, type, data);
869
870 switch (type) {
871 #ifdef CONFIG_PERF_EVENTS
872 case TRACE_REG_PERF_UNREGISTER:
873 #endif
874 case TRACE_REG_UNREGISTER:
875 module_put(event->mod);
876 break;
877 default:
878 break;
879 }
880 return ret;
881 }
882
register_synth_event(struct synth_event * event)883 static int register_synth_event(struct synth_event *event)
884 {
885 struct trace_event_call *call = &event->call;
886 int ret = 0;
887
888 event->call.class = &event->class;
889 event->class.system = kstrdup(SYNTH_SYSTEM, GFP_KERNEL);
890 if (!event->class.system) {
891 ret = -ENOMEM;
892 goto out;
893 }
894
895 event->tp = alloc_synth_tracepoint(event->name);
896 if (IS_ERR(event->tp)) {
897 ret = PTR_ERR(event->tp);
898 event->tp = NULL;
899 goto out;
900 }
901
902 INIT_LIST_HEAD(&call->class->fields);
903 call->event.funcs = &synth_event_funcs;
904 call->class->fields_array = synth_event_fields_array;
905
906 ret = register_trace_event(&call->event);
907 if (!ret) {
908 ret = -ENODEV;
909 goto out;
910 }
911 call->flags = TRACE_EVENT_FL_TRACEPOINT;
912 call->class->reg = synth_event_reg;
913 call->class->probe = trace_event_raw_event_synth;
914 call->data = event;
915 call->tp = event->tp;
916
917 ret = trace_add_event_call(call);
918 if (ret) {
919 pr_warn("Failed to register synthetic event: %s\n",
920 trace_event_name(call));
921 goto err;
922 }
923
924 ret = set_synth_event_print_fmt(call);
925 /* unregister_trace_event() will be called inside */
926 if (ret < 0)
927 trace_remove_event_call(call);
928 out:
929 return ret;
930 err:
931 unregister_trace_event(&call->event);
932 goto out;
933 }
934
unregister_synth_event(struct synth_event * event)935 static int unregister_synth_event(struct synth_event *event)
936 {
937 struct trace_event_call *call = &event->call;
938 int ret;
939
940 ret = trace_remove_event_call(call);
941
942 return ret;
943 }
944
free_synth_event(struct synth_event * event)945 static void free_synth_event(struct synth_event *event)
946 {
947 unsigned int i;
948
949 if (!event)
950 return;
951
952 for (i = 0; i < event->n_fields; i++)
953 free_synth_field(event->fields[i]);
954
955 kfree(event->fields);
956 kfree(event->dynamic_fields);
957 kfree(event->name);
958 kfree(event->class.system);
959 free_synth_tracepoint(event->tp);
960 free_synth_event_print_fmt(&event->call);
961 kfree(event);
962 }
963
alloc_synth_event(const char * name,int n_fields,struct synth_field ** fields)964 static struct synth_event *alloc_synth_event(const char *name, int n_fields,
965 struct synth_field **fields)
966 {
967 unsigned int i, j, n_dynamic_fields = 0;
968 struct synth_event *event;
969
970 event = kzalloc(sizeof(*event), GFP_KERNEL);
971 if (!event) {
972 event = ERR_PTR(-ENOMEM);
973 goto out;
974 }
975
976 event->name = kstrdup(name, GFP_KERNEL);
977 if (!event->name) {
978 kfree(event);
979 event = ERR_PTR(-ENOMEM);
980 goto out;
981 }
982
983 event->fields = kcalloc(n_fields, sizeof(*event->fields), GFP_KERNEL);
984 if (!event->fields) {
985 free_synth_event(event);
986 event = ERR_PTR(-ENOMEM);
987 goto out;
988 }
989
990 for (i = 0; i < n_fields; i++)
991 if (fields[i]->is_dynamic)
992 n_dynamic_fields++;
993
994 if (n_dynamic_fields) {
995 event->dynamic_fields = kcalloc(n_dynamic_fields,
996 sizeof(*event->dynamic_fields),
997 GFP_KERNEL);
998 if (!event->dynamic_fields) {
999 free_synth_event(event);
1000 event = ERR_PTR(-ENOMEM);
1001 goto out;
1002 }
1003 }
1004
1005 dyn_event_init(&event->devent, &synth_event_ops);
1006
1007 for (i = 0, j = 0; i < n_fields; i++) {
1008 fields[i]->field_pos = i;
1009 event->fields[i] = fields[i];
1010
1011 if (fields[i]->is_dynamic)
1012 event->dynamic_fields[j++] = fields[i];
1013 }
1014 event->n_dynamic_fields = j;
1015 event->n_fields = n_fields;
1016 out:
1017 return event;
1018 }
1019
synth_event_check_arg_fn(void * data)1020 static int synth_event_check_arg_fn(void *data)
1021 {
1022 struct dynevent_arg_pair *arg_pair = data;
1023 int size;
1024
1025 size = synth_field_size((char *)arg_pair->lhs);
1026 if (size == 0) {
1027 if (strstr((char *)arg_pair->lhs, "["))
1028 return 0;
1029 }
1030
1031 return size ? 0 : -EINVAL;
1032 }
1033
1034 /**
1035 * synth_event_add_field - Add a new field to a synthetic event cmd
1036 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1037 * @type: The type of the new field to add
1038 * @name: The name of the new field to add
1039 *
1040 * Add a new field to a synthetic event cmd object. Field ordering is in
1041 * the same order the fields are added.
1042 *
1043 * See synth_field_size() for available types. If field_name contains
1044 * [n] the field is considered to be an array.
1045 *
1046 * Return: 0 if successful, error otherwise.
1047 */
synth_event_add_field(struct dynevent_cmd * cmd,const char * type,const char * name)1048 int synth_event_add_field(struct dynevent_cmd *cmd, const char *type,
1049 const char *name)
1050 {
1051 struct dynevent_arg_pair arg_pair;
1052 int ret;
1053
1054 if (cmd->type != DYNEVENT_TYPE_SYNTH)
1055 return -EINVAL;
1056
1057 if (!type || !name)
1058 return -EINVAL;
1059
1060 dynevent_arg_pair_init(&arg_pair, 0, ';');
1061
1062 arg_pair.lhs = type;
1063 arg_pair.rhs = name;
1064
1065 ret = dynevent_arg_pair_add(cmd, &arg_pair, synth_event_check_arg_fn);
1066 if (ret)
1067 return ret;
1068
1069 if (++cmd->n_fields > SYNTH_FIELDS_MAX)
1070 ret = -EINVAL;
1071
1072 return ret;
1073 }
1074 EXPORT_SYMBOL_GPL(synth_event_add_field);
1075
1076 /**
1077 * synth_event_add_field_str - Add a new field to a synthetic event cmd
1078 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1079 * @type_name: The type and name of the new field to add, as a single string
1080 *
1081 * Add a new field to a synthetic event cmd object, as a single
1082 * string. The @type_name string is expected to be of the form 'type
1083 * name', which will be appended by ';'. No sanity checking is done -
1084 * what's passed in is assumed to already be well-formed. Field
1085 * ordering is in the same order the fields are added.
1086 *
1087 * See synth_field_size() for available types. If field_name contains
1088 * [n] the field is considered to be an array.
1089 *
1090 * Return: 0 if successful, error otherwise.
1091 */
synth_event_add_field_str(struct dynevent_cmd * cmd,const char * type_name)1092 int synth_event_add_field_str(struct dynevent_cmd *cmd, const char *type_name)
1093 {
1094 struct dynevent_arg arg;
1095 int ret;
1096
1097 if (cmd->type != DYNEVENT_TYPE_SYNTH)
1098 return -EINVAL;
1099
1100 if (!type_name)
1101 return -EINVAL;
1102
1103 dynevent_arg_init(&arg, ';');
1104
1105 arg.str = type_name;
1106
1107 ret = dynevent_arg_add(cmd, &arg, NULL);
1108 if (ret)
1109 return ret;
1110
1111 if (++cmd->n_fields > SYNTH_FIELDS_MAX)
1112 ret = -EINVAL;
1113
1114 return ret;
1115 }
1116 EXPORT_SYMBOL_GPL(synth_event_add_field_str);
1117
1118 /**
1119 * synth_event_add_fields - Add multiple fields to a synthetic event cmd
1120 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1121 * @fields: An array of type/name field descriptions
1122 * @n_fields: The number of field descriptions contained in the fields array
1123 *
1124 * Add a new set of fields to a synthetic event cmd object. The event
1125 * fields that will be defined for the event should be passed in as an
1126 * array of struct synth_field_desc, and the number of elements in the
1127 * array passed in as n_fields. Field ordering will retain the
1128 * ordering given in the fields array.
1129 *
1130 * See synth_field_size() for available types. If field_name contains
1131 * [n] the field is considered to be an array.
1132 *
1133 * Return: 0 if successful, error otherwise.
1134 */
synth_event_add_fields(struct dynevent_cmd * cmd,struct synth_field_desc * fields,unsigned int n_fields)1135 int synth_event_add_fields(struct dynevent_cmd *cmd,
1136 struct synth_field_desc *fields,
1137 unsigned int n_fields)
1138 {
1139 unsigned int i;
1140 int ret = 0;
1141
1142 for (i = 0; i < n_fields; i++) {
1143 if (fields[i].type == NULL || fields[i].name == NULL) {
1144 ret = -EINVAL;
1145 break;
1146 }
1147
1148 ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
1149 if (ret)
1150 break;
1151 }
1152
1153 return ret;
1154 }
1155 EXPORT_SYMBOL_GPL(synth_event_add_fields);
1156
1157 /**
1158 * __synth_event_gen_cmd_start - Start a synthetic event command from arg list
1159 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1160 * @name: The name of the synthetic event
1161 * @mod: The module creating the event, NULL if not created from a module
1162 * @...: Variable number of arg (pairs), one pair for each field
1163 *
1164 * NOTE: Users normally won't want to call this function directly, but
1165 * rather use the synth_event_gen_cmd_start() wrapper, which
1166 * automatically adds a NULL to the end of the arg list. If this
1167 * function is used directly, make sure the last arg in the variable
1168 * arg list is NULL.
1169 *
1170 * Generate a synthetic event command to be executed by
1171 * synth_event_gen_cmd_end(). This function can be used to generate
1172 * the complete command or only the first part of it; in the latter
1173 * case, synth_event_add_field(), synth_event_add_field_str(), or
1174 * synth_event_add_fields() can be used to add more fields following
1175 * this.
1176 *
1177 * There should be an even number variable args, each pair consisting
1178 * of a type followed by a field name.
1179 *
1180 * See synth_field_size() for available types. If field_name contains
1181 * [n] the field is considered to be an array.
1182 *
1183 * Return: 0 if successful, error otherwise.
1184 */
__synth_event_gen_cmd_start(struct dynevent_cmd * cmd,const char * name,struct module * mod,...)1185 int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd, const char *name,
1186 struct module *mod, ...)
1187 {
1188 struct dynevent_arg arg;
1189 va_list args;
1190 int ret;
1191
1192 cmd->event_name = name;
1193 cmd->private_data = mod;
1194
1195 if (cmd->type != DYNEVENT_TYPE_SYNTH)
1196 return -EINVAL;
1197
1198 dynevent_arg_init(&arg, 0);
1199 arg.str = name;
1200 ret = dynevent_arg_add(cmd, &arg, NULL);
1201 if (ret)
1202 return ret;
1203
1204 va_start(args, mod);
1205 for (;;) {
1206 const char *type, *name;
1207
1208 type = va_arg(args, const char *);
1209 if (!type)
1210 break;
1211 name = va_arg(args, const char *);
1212 if (!name)
1213 break;
1214
1215 if (++cmd->n_fields > SYNTH_FIELDS_MAX) {
1216 ret = -EINVAL;
1217 break;
1218 }
1219
1220 ret = synth_event_add_field(cmd, type, name);
1221 if (ret)
1222 break;
1223 }
1224 va_end(args);
1225
1226 return ret;
1227 }
1228 EXPORT_SYMBOL_GPL(__synth_event_gen_cmd_start);
1229
1230 /**
1231 * synth_event_gen_cmd_array_start - Start synthetic event command from an array
1232 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1233 * @name: The name of the synthetic event
1234 * @mod: The module creating the event, NULL if not created from a module
1235 * @fields: An array of type/name field descriptions
1236 * @n_fields: The number of field descriptions contained in the fields array
1237 *
1238 * Generate a synthetic event command to be executed by
1239 * synth_event_gen_cmd_end(). This function can be used to generate
1240 * the complete command or only the first part of it; in the latter
1241 * case, synth_event_add_field(), synth_event_add_field_str(), or
1242 * synth_event_add_fields() can be used to add more fields following
1243 * this.
1244 *
1245 * The event fields that will be defined for the event should be
1246 * passed in as an array of struct synth_field_desc, and the number of
1247 * elements in the array passed in as n_fields. Field ordering will
1248 * retain the ordering given in the fields array.
1249 *
1250 * See synth_field_size() for available types. If field_name contains
1251 * [n] the field is considered to be an array.
1252 *
1253 * Return: 0 if successful, error otherwise.
1254 */
synth_event_gen_cmd_array_start(struct dynevent_cmd * cmd,const char * name,struct module * mod,struct synth_field_desc * fields,unsigned int n_fields)1255 int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd, const char *name,
1256 struct module *mod,
1257 struct synth_field_desc *fields,
1258 unsigned int n_fields)
1259 {
1260 struct dynevent_arg arg;
1261 unsigned int i;
1262 int ret = 0;
1263
1264 cmd->event_name = name;
1265 cmd->private_data = mod;
1266
1267 if (cmd->type != DYNEVENT_TYPE_SYNTH)
1268 return -EINVAL;
1269
1270 if (n_fields > SYNTH_FIELDS_MAX)
1271 return -EINVAL;
1272
1273 dynevent_arg_init(&arg, 0);
1274 arg.str = name;
1275 ret = dynevent_arg_add(cmd, &arg, NULL);
1276 if (ret)
1277 return ret;
1278
1279 for (i = 0; i < n_fields; i++) {
1280 if (fields[i].type == NULL || fields[i].name == NULL)
1281 return -EINVAL;
1282
1283 ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
1284 if (ret)
1285 break;
1286 }
1287
1288 return ret;
1289 }
1290 EXPORT_SYMBOL_GPL(synth_event_gen_cmd_array_start);
1291
__create_synth_event(const char * name,const char * raw_fields)1292 static int __create_synth_event(const char *name, const char *raw_fields)
1293 {
1294 char **argv, *field_str, *tmp_fields, *saved_fields = NULL;
1295 struct synth_field *field, *fields[SYNTH_FIELDS_MAX];
1296 int consumed, cmd_version = 1, n_fields_this_loop;
1297 int i, argc, n_fields = 0, ret = 0;
1298 struct synth_event *event = NULL;
1299
1300 /*
1301 * Argument syntax:
1302 * - Add synthetic event: <event_name> field[;field] ...
1303 * - Remove synthetic event: !<event_name> field[;field] ...
1304 * where 'field' = type field_name
1305 */
1306
1307 if (name[0] == '\0') {
1308 synth_err(SYNTH_ERR_INVALID_CMD, 0);
1309 return -EINVAL;
1310 }
1311
1312 if (!is_good_name(name)) {
1313 synth_err(SYNTH_ERR_BAD_NAME, errpos(name));
1314 return -EINVAL;
1315 }
1316
1317 mutex_lock(&event_mutex);
1318
1319 event = find_synth_event(name);
1320 if (event) {
1321 synth_err(SYNTH_ERR_EVENT_EXISTS, errpos(name));
1322 ret = -EEXIST;
1323 goto err;
1324 }
1325
1326 tmp_fields = saved_fields = kstrdup(raw_fields, GFP_KERNEL);
1327 if (!tmp_fields) {
1328 ret = -ENOMEM;
1329 goto err;
1330 }
1331
1332 while ((field_str = strsep(&tmp_fields, ";")) != NULL) {
1333 argv = argv_split(GFP_KERNEL, field_str, &argc);
1334 if (!argv) {
1335 ret = -ENOMEM;
1336 goto err;
1337 }
1338
1339 if (!argc) {
1340 argv_free(argv);
1341 continue;
1342 }
1343
1344 n_fields_this_loop = 0;
1345 consumed = 0;
1346 while (argc > consumed) {
1347 int field_version;
1348
1349 field = parse_synth_field(argc - consumed,
1350 argv + consumed, &consumed,
1351 &field_version);
1352 if (IS_ERR(field)) {
1353 ret = PTR_ERR(field);
1354 goto err_free_arg;
1355 }
1356
1357 /*
1358 * Track the highest version of any field we
1359 * found in the command.
1360 */
1361 if (field_version > cmd_version)
1362 cmd_version = field_version;
1363
1364 /*
1365 * Now sort out what is and isn't valid for
1366 * each supported version.
1367 *
1368 * If we see more than 1 field per loop, it
1369 * means we have multiple fields between
1370 * semicolons, and that's something we no
1371 * longer support in a version 2 or greater
1372 * command.
1373 */
1374 if (cmd_version > 1 && n_fields_this_loop >= 1) {
1375 synth_err(SYNTH_ERR_INVALID_CMD, errpos(field_str));
1376 ret = -EINVAL;
1377 goto err_free_arg;
1378 }
1379
1380 if (n_fields == SYNTH_FIELDS_MAX) {
1381 synth_err(SYNTH_ERR_TOO_MANY_FIELDS, 0);
1382 ret = -EINVAL;
1383 goto err_free_arg;
1384 }
1385 fields[n_fields++] = field;
1386
1387 n_fields_this_loop++;
1388 }
1389 argv_free(argv);
1390
1391 if (consumed < argc) {
1392 synth_err(SYNTH_ERR_INVALID_CMD, 0);
1393 ret = -EINVAL;
1394 goto err;
1395 }
1396
1397 }
1398
1399 if (n_fields == 0) {
1400 synth_err(SYNTH_ERR_INVALID_CMD, 0);
1401 ret = -EINVAL;
1402 goto err;
1403 }
1404
1405 event = alloc_synth_event(name, n_fields, fields);
1406 if (IS_ERR(event)) {
1407 ret = PTR_ERR(event);
1408 event = NULL;
1409 goto err;
1410 }
1411 ret = register_synth_event(event);
1412 if (!ret)
1413 dyn_event_add(&event->devent, &event->call);
1414 else
1415 free_synth_event(event);
1416 out:
1417 mutex_unlock(&event_mutex);
1418
1419 kfree(saved_fields);
1420
1421 return ret;
1422 err_free_arg:
1423 argv_free(argv);
1424 err:
1425 for (i = 0; i < n_fields; i++)
1426 free_synth_field(fields[i]);
1427
1428 goto out;
1429 }
1430
1431 /**
1432 * synth_event_create - Create a new synthetic event
1433 * @name: The name of the new synthetic event
1434 * @fields: An array of type/name field descriptions
1435 * @n_fields: The number of field descriptions contained in the fields array
1436 * @mod: The module creating the event, NULL if not created from a module
1437 *
1438 * Create a new synthetic event with the given name under the
1439 * trace/events/synthetic/ directory. The event fields that will be
1440 * defined for the event should be passed in as an array of struct
1441 * synth_field_desc, and the number elements in the array passed in as
1442 * n_fields. Field ordering will retain the ordering given in the
1443 * fields array.
1444 *
1445 * If the new synthetic event is being created from a module, the mod
1446 * param must be non-NULL. This will ensure that the trace buffer
1447 * won't contain unreadable events.
1448 *
1449 * The new synth event should be deleted using synth_event_delete()
1450 * function. The new synthetic event can be generated from modules or
1451 * other kernel code using trace_synth_event() and related functions.
1452 *
1453 * Return: 0 if successful, error otherwise.
1454 */
synth_event_create(const char * name,struct synth_field_desc * fields,unsigned int n_fields,struct module * mod)1455 int synth_event_create(const char *name, struct synth_field_desc *fields,
1456 unsigned int n_fields, struct module *mod)
1457 {
1458 struct dynevent_cmd cmd;
1459 char *buf;
1460 int ret;
1461
1462 buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
1463 if (!buf)
1464 return -ENOMEM;
1465
1466 synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
1467
1468 ret = synth_event_gen_cmd_array_start(&cmd, name, mod,
1469 fields, n_fields);
1470 if (ret)
1471 goto out;
1472
1473 ret = synth_event_gen_cmd_end(&cmd);
1474 out:
1475 kfree(buf);
1476
1477 return ret;
1478 }
1479 EXPORT_SYMBOL_GPL(synth_event_create);
1480
destroy_synth_event(struct synth_event * se)1481 static int destroy_synth_event(struct synth_event *se)
1482 {
1483 int ret;
1484
1485 if (se->ref)
1486 return -EBUSY;
1487
1488 if (trace_event_dyn_busy(&se->call))
1489 return -EBUSY;
1490
1491 ret = unregister_synth_event(se);
1492 if (!ret) {
1493 dyn_event_remove(&se->devent);
1494 free_synth_event(se);
1495 }
1496
1497 return ret;
1498 }
1499
1500 /**
1501 * synth_event_delete - Delete a synthetic event
1502 * @event_name: The name of the new synthetic event
1503 *
1504 * Delete a synthetic event that was created with synth_event_create().
1505 *
1506 * Return: 0 if successful, error otherwise.
1507 */
synth_event_delete(const char * event_name)1508 int synth_event_delete(const char *event_name)
1509 {
1510 struct synth_event *se = NULL;
1511 struct module *mod = NULL;
1512 int ret = -ENOENT;
1513
1514 mutex_lock(&event_mutex);
1515 se = find_synth_event(event_name);
1516 if (se) {
1517 mod = se->mod;
1518 ret = destroy_synth_event(se);
1519 }
1520 mutex_unlock(&event_mutex);
1521
1522 if (mod) {
1523 /*
1524 * It is safest to reset the ring buffer if the module
1525 * being unloaded registered any events that were
1526 * used. The only worry is if a new module gets
1527 * loaded, and takes on the same id as the events of
1528 * this module. When printing out the buffer, traced
1529 * events left over from this module may be passed to
1530 * the new module events and unexpected results may
1531 * occur.
1532 */
1533 tracing_reset_all_online_cpus();
1534 }
1535
1536 return ret;
1537 }
1538 EXPORT_SYMBOL_GPL(synth_event_delete);
1539
check_command(const char * raw_command)1540 static int check_command(const char *raw_command)
1541 {
1542 char **argv = NULL, *cmd, *saved_cmd, *name_and_field;
1543 int argc, ret = 0;
1544
1545 cmd = saved_cmd = kstrdup(raw_command, GFP_KERNEL);
1546 if (!cmd)
1547 return -ENOMEM;
1548
1549 name_and_field = strsep(&cmd, ";");
1550 if (!name_and_field) {
1551 ret = -EINVAL;
1552 goto free;
1553 }
1554
1555 if (name_and_field[0] == '!')
1556 goto free;
1557
1558 argv = argv_split(GFP_KERNEL, name_and_field, &argc);
1559 if (!argv) {
1560 ret = -ENOMEM;
1561 goto free;
1562 }
1563 argv_free(argv);
1564
1565 if (argc < 3)
1566 ret = -EINVAL;
1567 free:
1568 kfree(saved_cmd);
1569
1570 return ret;
1571 }
1572
create_or_delete_synth_event(const char * raw_command)1573 static int create_or_delete_synth_event(const char *raw_command)
1574 {
1575 char *name = NULL, *fields, *p;
1576 int ret = 0;
1577
1578 raw_command = skip_spaces(raw_command);
1579 if (raw_command[0] == '\0')
1580 return ret;
1581
1582 last_cmd_set(raw_command);
1583
1584 ret = check_command(raw_command);
1585 if (ret) {
1586 synth_err(SYNTH_ERR_INVALID_CMD, 0);
1587 return ret;
1588 }
1589
1590 p = strpbrk(raw_command, " \t");
1591 if (!p && raw_command[0] != '!') {
1592 synth_err(SYNTH_ERR_INVALID_CMD, 0);
1593 ret = -EINVAL;
1594 goto free;
1595 }
1596
1597 name = kmemdup_nul(raw_command, p ? p - raw_command : strlen(raw_command), GFP_KERNEL);
1598 if (!name)
1599 return -ENOMEM;
1600
1601 if (name[0] == '!') {
1602 ret = synth_event_delete(name + 1);
1603 goto free;
1604 }
1605
1606 fields = skip_spaces(p);
1607
1608 ret = __create_synth_event(name, fields);
1609 free:
1610 kfree(name);
1611
1612 return ret;
1613 }
1614
synth_event_run_command(struct dynevent_cmd * cmd)1615 static int synth_event_run_command(struct dynevent_cmd *cmd)
1616 {
1617 struct synth_event *se;
1618 int ret;
1619
1620 ret = create_or_delete_synth_event(cmd->seq.buffer);
1621 if (ret)
1622 return ret;
1623
1624 se = find_synth_event(cmd->event_name);
1625 if (WARN_ON(!se))
1626 return -ENOENT;
1627
1628 se->mod = cmd->private_data;
1629
1630 return ret;
1631 }
1632
1633 /**
1634 * synth_event_cmd_init - Initialize a synthetic event command object
1635 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1636 * @buf: A pointer to the buffer used to build the command
1637 * @maxlen: The length of the buffer passed in @buf
1638 *
1639 * Initialize a synthetic event command object. Use this before
1640 * calling any of the other dyenvent_cmd functions.
1641 */
synth_event_cmd_init(struct dynevent_cmd * cmd,char * buf,int maxlen)1642 void synth_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
1643 {
1644 dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_SYNTH,
1645 synth_event_run_command);
1646 }
1647 EXPORT_SYMBOL_GPL(synth_event_cmd_init);
1648
1649 static inline int
__synth_event_trace_init(struct trace_event_file * file,struct synth_event_trace_state * trace_state)1650 __synth_event_trace_init(struct trace_event_file *file,
1651 struct synth_event_trace_state *trace_state)
1652 {
1653 int ret = 0;
1654
1655 memset(trace_state, '\0', sizeof(*trace_state));
1656
1657 /*
1658 * Normal event tracing doesn't get called at all unless the
1659 * ENABLED bit is set (which attaches the probe thus allowing
1660 * this code to be called, etc). Because this is called
1661 * directly by the user, we don't have that but we still need
1662 * to honor not logging when disabled. For the iterated
1663 * trace case, we save the enabled state upon start and just
1664 * ignore the following data calls.
1665 */
1666 if (!(file->flags & EVENT_FILE_FL_ENABLED) ||
1667 trace_trigger_soft_disabled(file)) {
1668 trace_state->disabled = true;
1669 ret = -ENOENT;
1670 goto out;
1671 }
1672
1673 trace_state->event = file->event_call->data;
1674 out:
1675 return ret;
1676 }
1677
1678 static inline int
__synth_event_trace_start(struct trace_event_file * file,struct synth_event_trace_state * trace_state,int dynamic_fields_size)1679 __synth_event_trace_start(struct trace_event_file *file,
1680 struct synth_event_trace_state *trace_state,
1681 int dynamic_fields_size)
1682 {
1683 int entry_size, fields_size = 0;
1684 int ret = 0;
1685
1686 fields_size = trace_state->event->n_u64 * sizeof(u64);
1687 fields_size += dynamic_fields_size;
1688
1689 /*
1690 * Avoid ring buffer recursion detection, as this event
1691 * is being performed within another event.
1692 */
1693 trace_state->buffer = file->tr->array_buffer.buffer;
1694 ring_buffer_nest_start(trace_state->buffer);
1695
1696 entry_size = sizeof(*trace_state->entry) + fields_size;
1697 trace_state->entry = trace_event_buffer_reserve(&trace_state->fbuffer,
1698 file,
1699 entry_size);
1700 if (!trace_state->entry) {
1701 ring_buffer_nest_end(trace_state->buffer);
1702 ret = -EINVAL;
1703 }
1704
1705 return ret;
1706 }
1707
1708 static inline void
__synth_event_trace_end(struct synth_event_trace_state * trace_state)1709 __synth_event_trace_end(struct synth_event_trace_state *trace_state)
1710 {
1711 trace_event_buffer_commit(&trace_state->fbuffer);
1712
1713 ring_buffer_nest_end(trace_state->buffer);
1714 }
1715
1716 /**
1717 * synth_event_trace - Trace a synthetic event
1718 * @file: The trace_event_file representing the synthetic event
1719 * @n_vals: The number of values in vals
1720 * @...: Variable number of args containing the event values
1721 *
1722 * Trace a synthetic event using the values passed in the variable
1723 * argument list.
1724 *
1725 * The argument list should be a list 'n_vals' u64 values. The number
1726 * of vals must match the number of field in the synthetic event, and
1727 * must be in the same order as the synthetic event fields.
1728 *
1729 * All vals should be cast to u64, and string vals are just pointers
1730 * to strings, cast to u64. Strings will be copied into space
1731 * reserved in the event for the string, using these pointers.
1732 *
1733 * Return: 0 on success, err otherwise.
1734 */
synth_event_trace(struct trace_event_file * file,unsigned int n_vals,...)1735 int synth_event_trace(struct trace_event_file *file, unsigned int n_vals, ...)
1736 {
1737 unsigned int i, n_u64, len, data_size = 0;
1738 struct synth_event_trace_state state;
1739 va_list args;
1740 int ret;
1741
1742 ret = __synth_event_trace_init(file, &state);
1743 if (ret) {
1744 if (ret == -ENOENT)
1745 ret = 0; /* just disabled, not really an error */
1746 return ret;
1747 }
1748
1749 if (state.event->n_dynamic_fields) {
1750 va_start(args, n_vals);
1751
1752 for (i = 0; i < state.event->n_fields; i++) {
1753 u64 val = va_arg(args, u64);
1754
1755 if (state.event->fields[i]->is_string &&
1756 state.event->fields[i]->is_dynamic) {
1757 char *str_val = (char *)(long)val;
1758
1759 data_size += strlen(str_val) + 1;
1760 }
1761 }
1762
1763 va_end(args);
1764 }
1765
1766 ret = __synth_event_trace_start(file, &state, data_size);
1767 if (ret)
1768 return ret;
1769
1770 if (n_vals != state.event->n_fields) {
1771 ret = -EINVAL;
1772 goto out;
1773 }
1774
1775 data_size = 0;
1776
1777 va_start(args, n_vals);
1778 for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
1779 u64 val;
1780
1781 val = va_arg(args, u64);
1782
1783 if (state.event->fields[i]->is_string) {
1784 char *str_val = (char *)(long)val;
1785
1786 len = trace_string(state.entry, state.event, str_val,
1787 state.event->fields[i]->is_dynamic,
1788 data_size, &n_u64);
1789 data_size += len; /* only dynamic string increments */
1790 } else {
1791 struct synth_field *field = state.event->fields[i];
1792
1793 switch (field->size) {
1794 case 1:
1795 state.entry->fields[n_u64].as_u8 = (u8)val;
1796 break;
1797
1798 case 2:
1799 state.entry->fields[n_u64].as_u16 = (u16)val;
1800 break;
1801
1802 case 4:
1803 state.entry->fields[n_u64].as_u32 = (u32)val;
1804 break;
1805
1806 default:
1807 state.entry->fields[n_u64].as_u64 = val;
1808 break;
1809 }
1810 n_u64++;
1811 }
1812 }
1813 va_end(args);
1814 out:
1815 __synth_event_trace_end(&state);
1816
1817 return ret;
1818 }
1819 EXPORT_SYMBOL_GPL(synth_event_trace);
1820
1821 /**
1822 * synth_event_trace_array - Trace a synthetic event from an array
1823 * @file: The trace_event_file representing the synthetic event
1824 * @vals: Array of values
1825 * @n_vals: The number of values in vals
1826 *
1827 * Trace a synthetic event using the values passed in as 'vals'.
1828 *
1829 * The 'vals' array is just an array of 'n_vals' u64. The number of
1830 * vals must match the number of field in the synthetic event, and
1831 * must be in the same order as the synthetic event fields.
1832 *
1833 * All vals should be cast to u64, and string vals are just pointers
1834 * to strings, cast to u64. Strings will be copied into space
1835 * reserved in the event for the string, using these pointers.
1836 *
1837 * Return: 0 on success, err otherwise.
1838 */
synth_event_trace_array(struct trace_event_file * file,u64 * vals,unsigned int n_vals)1839 int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
1840 unsigned int n_vals)
1841 {
1842 unsigned int i, n_u64, field_pos, len, data_size = 0;
1843 struct synth_event_trace_state state;
1844 char *str_val;
1845 int ret;
1846
1847 ret = __synth_event_trace_init(file, &state);
1848 if (ret) {
1849 if (ret == -ENOENT)
1850 ret = 0; /* just disabled, not really an error */
1851 return ret;
1852 }
1853
1854 if (state.event->n_dynamic_fields) {
1855 for (i = 0; i < state.event->n_dynamic_fields; i++) {
1856 field_pos = state.event->dynamic_fields[i]->field_pos;
1857 str_val = (char *)(long)vals[field_pos];
1858 len = strlen(str_val) + 1;
1859 data_size += len;
1860 }
1861 }
1862
1863 ret = __synth_event_trace_start(file, &state, data_size);
1864 if (ret)
1865 return ret;
1866
1867 if (n_vals != state.event->n_fields) {
1868 ret = -EINVAL;
1869 goto out;
1870 }
1871
1872 data_size = 0;
1873
1874 for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
1875 if (state.event->fields[i]->is_string) {
1876 char *str_val = (char *)(long)vals[i];
1877
1878 len = trace_string(state.entry, state.event, str_val,
1879 state.event->fields[i]->is_dynamic,
1880 data_size, &n_u64);
1881 data_size += len; /* only dynamic string increments */
1882 } else {
1883 struct synth_field *field = state.event->fields[i];
1884 u64 val = vals[i];
1885
1886 switch (field->size) {
1887 case 1:
1888 state.entry->fields[n_u64].as_u8 = (u8)val;
1889 break;
1890
1891 case 2:
1892 state.entry->fields[n_u64].as_u16 = (u16)val;
1893 break;
1894
1895 case 4:
1896 state.entry->fields[n_u64].as_u32 = (u32)val;
1897 break;
1898
1899 default:
1900 state.entry->fields[n_u64].as_u64 = val;
1901 break;
1902 }
1903 n_u64++;
1904 }
1905 }
1906 out:
1907 __synth_event_trace_end(&state);
1908
1909 return ret;
1910 }
1911 EXPORT_SYMBOL_GPL(synth_event_trace_array);
1912
1913 /**
1914 * synth_event_trace_start - Start piecewise synthetic event trace
1915 * @file: The trace_event_file representing the synthetic event
1916 * @trace_state: A pointer to object tracking the piecewise trace state
1917 *
1918 * Start the trace of a synthetic event field-by-field rather than all
1919 * at once.
1920 *
1921 * This function 'opens' an event trace, which means space is reserved
1922 * for the event in the trace buffer, after which the event's
1923 * individual field values can be set through either
1924 * synth_event_add_next_val() or synth_event_add_val().
1925 *
1926 * A pointer to a trace_state object is passed in, which will keep
1927 * track of the current event trace state until the event trace is
1928 * closed (and the event finally traced) using
1929 * synth_event_trace_end().
1930 *
1931 * Note that synth_event_trace_end() must be called after all values
1932 * have been added for each event trace, regardless of whether adding
1933 * all field values succeeded or not.
1934 *
1935 * Note also that for a given event trace, all fields must be added
1936 * using either synth_event_add_next_val() or synth_event_add_val()
1937 * but not both together or interleaved.
1938 *
1939 * Return: 0 on success, err otherwise.
1940 */
synth_event_trace_start(struct trace_event_file * file,struct synth_event_trace_state * trace_state)1941 int synth_event_trace_start(struct trace_event_file *file,
1942 struct synth_event_trace_state *trace_state)
1943 {
1944 int ret;
1945
1946 if (!trace_state)
1947 return -EINVAL;
1948
1949 ret = __synth_event_trace_init(file, trace_state);
1950 if (ret) {
1951 if (ret == -ENOENT)
1952 ret = 0; /* just disabled, not really an error */
1953 return ret;
1954 }
1955
1956 if (trace_state->event->n_dynamic_fields)
1957 return -ENOTSUPP;
1958
1959 ret = __synth_event_trace_start(file, trace_state, 0);
1960
1961 return ret;
1962 }
1963 EXPORT_SYMBOL_GPL(synth_event_trace_start);
1964
__synth_event_add_val(const char * field_name,u64 val,struct synth_event_trace_state * trace_state)1965 static int __synth_event_add_val(const char *field_name, u64 val,
1966 struct synth_event_trace_state *trace_state)
1967 {
1968 struct synth_field *field = NULL;
1969 struct synth_trace_event *entry;
1970 struct synth_event *event;
1971 int i, ret = 0;
1972
1973 if (!trace_state) {
1974 ret = -EINVAL;
1975 goto out;
1976 }
1977
1978 /* can't mix add_next_synth_val() with add_synth_val() */
1979 if (field_name) {
1980 if (trace_state->add_next) {
1981 ret = -EINVAL;
1982 goto out;
1983 }
1984 trace_state->add_name = true;
1985 } else {
1986 if (trace_state->add_name) {
1987 ret = -EINVAL;
1988 goto out;
1989 }
1990 trace_state->add_next = true;
1991 }
1992
1993 if (trace_state->disabled)
1994 goto out;
1995
1996 event = trace_state->event;
1997 if (trace_state->add_name) {
1998 for (i = 0; i < event->n_fields; i++) {
1999 field = event->fields[i];
2000 if (strcmp(field->name, field_name) == 0)
2001 break;
2002 }
2003 if (!field) {
2004 ret = -EINVAL;
2005 goto out;
2006 }
2007 } else {
2008 if (trace_state->cur_field >= event->n_fields) {
2009 ret = -EINVAL;
2010 goto out;
2011 }
2012 field = event->fields[trace_state->cur_field++];
2013 }
2014
2015 entry = trace_state->entry;
2016 if (field->is_string) {
2017 char *str_val = (char *)(long)val;
2018 char *str_field;
2019
2020 if (field->is_dynamic) { /* add_val can't do dynamic strings */
2021 ret = -EINVAL;
2022 goto out;
2023 }
2024
2025 if (!str_val) {
2026 ret = -EINVAL;
2027 goto out;
2028 }
2029
2030 str_field = (char *)&entry->fields[field->offset];
2031 strscpy(str_field, str_val, STR_VAR_LEN_MAX);
2032 } else {
2033 switch (field->size) {
2034 case 1:
2035 trace_state->entry->fields[field->offset].as_u8 = (u8)val;
2036 break;
2037
2038 case 2:
2039 trace_state->entry->fields[field->offset].as_u16 = (u16)val;
2040 break;
2041
2042 case 4:
2043 trace_state->entry->fields[field->offset].as_u32 = (u32)val;
2044 break;
2045
2046 default:
2047 trace_state->entry->fields[field->offset].as_u64 = val;
2048 break;
2049 }
2050 }
2051 out:
2052 return ret;
2053 }
2054
2055 /**
2056 * synth_event_add_next_val - Add the next field's value to an open synth trace
2057 * @val: The value to set the next field to
2058 * @trace_state: A pointer to object tracking the piecewise trace state
2059 *
2060 * Set the value of the next field in an event that's been opened by
2061 * synth_event_trace_start().
2062 *
2063 * The val param should be the value cast to u64. If the value points
2064 * to a string, the val param should be a char * cast to u64.
2065 *
2066 * This function assumes all the fields in an event are to be set one
2067 * after another - successive calls to this function are made, one for
2068 * each field, in the order of the fields in the event, until all
2069 * fields have been set. If you'd rather set each field individually
2070 * without regard to ordering, synth_event_add_val() can be used
2071 * instead.
2072 *
2073 * Note however that synth_event_add_next_val() and
2074 * synth_event_add_val() can't be intermixed for a given event trace -
2075 * one or the other but not both can be used at the same time.
2076 *
2077 * Note also that synth_event_trace_end() must be called after all
2078 * values have been added for each event trace, regardless of whether
2079 * adding all field values succeeded or not.
2080 *
2081 * Return: 0 on success, err otherwise.
2082 */
synth_event_add_next_val(u64 val,struct synth_event_trace_state * trace_state)2083 int synth_event_add_next_val(u64 val,
2084 struct synth_event_trace_state *trace_state)
2085 {
2086 return __synth_event_add_val(NULL, val, trace_state);
2087 }
2088 EXPORT_SYMBOL_GPL(synth_event_add_next_val);
2089
2090 /**
2091 * synth_event_add_val - Add a named field's value to an open synth trace
2092 * @field_name: The name of the synthetic event field value to set
2093 * @val: The value to set the named field to
2094 * @trace_state: A pointer to object tracking the piecewise trace state
2095 *
2096 * Set the value of the named field in an event that's been opened by
2097 * synth_event_trace_start().
2098 *
2099 * The val param should be the value cast to u64. If the value points
2100 * to a string, the val param should be a char * cast to u64.
2101 *
2102 * This function looks up the field name, and if found, sets the field
2103 * to the specified value. This lookup makes this function more
2104 * expensive than synth_event_add_next_val(), so use that or the
2105 * none-piecewise synth_event_trace() instead if efficiency is more
2106 * important.
2107 *
2108 * Note however that synth_event_add_next_val() and
2109 * synth_event_add_val() can't be intermixed for a given event trace -
2110 * one or the other but not both can be used at the same time.
2111 *
2112 * Note also that synth_event_trace_end() must be called after all
2113 * values have been added for each event trace, regardless of whether
2114 * adding all field values succeeded or not.
2115 *
2116 * Return: 0 on success, err otherwise.
2117 */
synth_event_add_val(const char * field_name,u64 val,struct synth_event_trace_state * trace_state)2118 int synth_event_add_val(const char *field_name, u64 val,
2119 struct synth_event_trace_state *trace_state)
2120 {
2121 return __synth_event_add_val(field_name, val, trace_state);
2122 }
2123 EXPORT_SYMBOL_GPL(synth_event_add_val);
2124
2125 /**
2126 * synth_event_trace_end - End piecewise synthetic event trace
2127 * @trace_state: A pointer to object tracking the piecewise trace state
2128 *
2129 * End the trace of a synthetic event opened by
2130 * synth_event_trace__start().
2131 *
2132 * This function 'closes' an event trace, which basically means that
2133 * it commits the reserved event and cleans up other loose ends.
2134 *
2135 * A pointer to a trace_state object is passed in, which will keep
2136 * track of the current event trace state opened with
2137 * synth_event_trace_start().
2138 *
2139 * Note that this function must be called after all values have been
2140 * added for each event trace, regardless of whether adding all field
2141 * values succeeded or not.
2142 *
2143 * Return: 0 on success, err otherwise.
2144 */
synth_event_trace_end(struct synth_event_trace_state * trace_state)2145 int synth_event_trace_end(struct synth_event_trace_state *trace_state)
2146 {
2147 if (!trace_state)
2148 return -EINVAL;
2149
2150 __synth_event_trace_end(trace_state);
2151
2152 return 0;
2153 }
2154 EXPORT_SYMBOL_GPL(synth_event_trace_end);
2155
create_synth_event(const char * raw_command)2156 static int create_synth_event(const char *raw_command)
2157 {
2158 char *fields, *p;
2159 const char *name;
2160 int len, ret = 0;
2161
2162 raw_command = skip_spaces(raw_command);
2163 if (raw_command[0] == '\0')
2164 return ret;
2165
2166 last_cmd_set(raw_command);
2167
2168 name = raw_command;
2169
2170 /* Don't try to process if not our system */
2171 if (name[0] != 's' || name[1] != ':')
2172 return -ECANCELED;
2173 name += 2;
2174
2175 p = strpbrk(raw_command, " \t");
2176 if (!p) {
2177 synth_err(SYNTH_ERR_INVALID_CMD, 0);
2178 return -EINVAL;
2179 }
2180
2181 fields = skip_spaces(p);
2182
2183 /* This interface accepts group name prefix */
2184 if (strchr(name, '/')) {
2185 len = str_has_prefix(name, SYNTH_SYSTEM "/");
2186 if (len == 0) {
2187 synth_err(SYNTH_ERR_INVALID_DYN_CMD, 0);
2188 return -EINVAL;
2189 }
2190 name += len;
2191 }
2192
2193 len = name - raw_command;
2194
2195 ret = check_command(raw_command + len);
2196 if (ret) {
2197 synth_err(SYNTH_ERR_INVALID_CMD, 0);
2198 return ret;
2199 }
2200
2201 name = kmemdup_nul(raw_command + len, p - raw_command - len, GFP_KERNEL);
2202 if (!name)
2203 return -ENOMEM;
2204
2205 ret = __create_synth_event(name, fields);
2206
2207 kfree(name);
2208
2209 return ret;
2210 }
2211
synth_event_release(struct dyn_event * ev)2212 static int synth_event_release(struct dyn_event *ev)
2213 {
2214 struct synth_event *event = to_synth_event(ev);
2215 int ret;
2216
2217 if (event->ref)
2218 return -EBUSY;
2219
2220 if (trace_event_dyn_busy(&event->call))
2221 return -EBUSY;
2222
2223 ret = unregister_synth_event(event);
2224 if (ret)
2225 return ret;
2226
2227 dyn_event_remove(ev);
2228 free_synth_event(event);
2229 return 0;
2230 }
2231
__synth_event_show(struct seq_file * m,struct synth_event * event)2232 static int __synth_event_show(struct seq_file *m, struct synth_event *event)
2233 {
2234 struct synth_field *field;
2235 unsigned int i;
2236 char *type, *t;
2237
2238 seq_printf(m, "%s\t", event->name);
2239
2240 for (i = 0; i < event->n_fields; i++) {
2241 field = event->fields[i];
2242
2243 type = field->type;
2244 t = strstr(type, "__data_loc");
2245 if (t) { /* __data_loc belongs in format but not event desc */
2246 t += sizeof("__data_loc");
2247 type = t;
2248 }
2249
2250 /* parameter values */
2251 seq_printf(m, "%s %s%s", type, field->name,
2252 i == event->n_fields - 1 ? "" : "; ");
2253 }
2254
2255 seq_putc(m, '\n');
2256
2257 return 0;
2258 }
2259
synth_event_show(struct seq_file * m,struct dyn_event * ev)2260 static int synth_event_show(struct seq_file *m, struct dyn_event *ev)
2261 {
2262 struct synth_event *event = to_synth_event(ev);
2263
2264 seq_printf(m, "s:%s/", event->class.system);
2265
2266 return __synth_event_show(m, event);
2267 }
2268
synth_events_seq_show(struct seq_file * m,void * v)2269 static int synth_events_seq_show(struct seq_file *m, void *v)
2270 {
2271 struct dyn_event *ev = v;
2272
2273 if (!is_synth_event(ev))
2274 return 0;
2275
2276 return __synth_event_show(m, to_synth_event(ev));
2277 }
2278
2279 static const struct seq_operations synth_events_seq_op = {
2280 .start = dyn_event_seq_start,
2281 .next = dyn_event_seq_next,
2282 .stop = dyn_event_seq_stop,
2283 .show = synth_events_seq_show,
2284 };
2285
synth_events_open(struct inode * inode,struct file * file)2286 static int synth_events_open(struct inode *inode, struct file *file)
2287 {
2288 int ret;
2289
2290 ret = security_locked_down(LOCKDOWN_TRACEFS);
2291 if (ret)
2292 return ret;
2293
2294 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
2295 ret = dyn_events_release_all(&synth_event_ops);
2296 if (ret < 0)
2297 return ret;
2298 }
2299
2300 return seq_open(file, &synth_events_seq_op);
2301 }
2302
synth_events_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)2303 static ssize_t synth_events_write(struct file *file,
2304 const char __user *buffer,
2305 size_t count, loff_t *ppos)
2306 {
2307 return trace_parse_run_command(file, buffer, count, ppos,
2308 create_or_delete_synth_event);
2309 }
2310
2311 static const struct file_operations synth_events_fops = {
2312 .open = synth_events_open,
2313 .write = synth_events_write,
2314 .read = seq_read,
2315 .llseek = seq_lseek,
2316 .release = seq_release,
2317 };
2318
2319 /*
2320 * Register dynevent at core_initcall. This allows kernel to setup kprobe
2321 * events in postcore_initcall without tracefs.
2322 */
trace_events_synth_init_early(void)2323 static __init int trace_events_synth_init_early(void)
2324 {
2325 int err = 0;
2326
2327 err = dyn_event_register(&synth_event_ops);
2328 if (err)
2329 pr_warn("Could not register synth_event_ops\n");
2330
2331 return err;
2332 }
2333 core_initcall(trace_events_synth_init_early);
2334
trace_events_synth_init(void)2335 static __init int trace_events_synth_init(void)
2336 {
2337 struct dentry *entry = NULL;
2338 int err = 0;
2339 err = tracing_init_dentry();
2340 if (err)
2341 goto err;
2342
2343 entry = tracefs_create_file("synthetic_events", TRACE_MODE_WRITE,
2344 NULL, NULL, &synth_events_fops);
2345 if (!entry) {
2346 err = -ENODEV;
2347 goto err;
2348 }
2349
2350 return err;
2351 err:
2352 pr_warn("Could not create tracefs 'synthetic_events' entry\n");
2353
2354 return err;
2355 }
2356
2357 fs_initcall(trace_events_synth_init);
2358