xref: /linux/kernel/trace/trace_events_synth.c (revision 0526b56cbc3c489642bd6a5fe4b718dea7ef0ee8)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * trace_events_synth - synthetic trace events
4  *
5  * Copyright (C) 2015, 2020 Tom Zanussi <tom.zanussi@linux.intel.com>
6  */
7 
8 #include <linux/module.h>
9 #include <linux/kallsyms.h>
10 #include <linux/security.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/stacktrace.h>
14 #include <linux/rculist.h>
15 #include <linux/tracefs.h>
16 
17 /* for gfp flag names */
18 #include <linux/trace_events.h>
19 #include <trace/events/mmflags.h>
20 #include "trace_probe.h"
21 #include "trace_probe_kernel.h"
22 
23 #include "trace_synth.h"
24 
25 #undef ERRORS
26 #define ERRORS	\
27 	C(BAD_NAME,		"Illegal name"),		\
28 	C(INVALID_CMD,		"Command must be of the form: <name> field[;field] ..."),\
29 	C(INVALID_DYN_CMD,	"Command must be of the form: s or -:[synthetic/]<name> field[;field] ..."),\
30 	C(EVENT_EXISTS,		"Event already exists"),	\
31 	C(TOO_MANY_FIELDS,	"Too many fields"),		\
32 	C(INCOMPLETE_TYPE,	"Incomplete type"),		\
33 	C(INVALID_TYPE,		"Invalid type"),		\
34 	C(INVALID_FIELD,        "Invalid field"),		\
35 	C(INVALID_ARRAY_SPEC,	"Invalid array specification"),
36 
37 #undef C
38 #define C(a, b)		SYNTH_ERR_##a
39 
40 enum { ERRORS };
41 
42 #undef C
43 #define C(a, b)		b
44 
45 static const char *err_text[] = { ERRORS };
46 
47 static DEFINE_MUTEX(lastcmd_mutex);
48 static char *last_cmd;
49 
50 static int errpos(const char *str)
51 {
52 	int ret = 0;
53 
54 	mutex_lock(&lastcmd_mutex);
55 	if (!str || !last_cmd)
56 		goto out;
57 
58 	ret = err_pos(last_cmd, str);
59  out:
60 	mutex_unlock(&lastcmd_mutex);
61 	return ret;
62 }
63 
64 static void last_cmd_set(const char *str)
65 {
66 	if (!str)
67 		return;
68 
69 	mutex_lock(&lastcmd_mutex);
70 	kfree(last_cmd);
71 	last_cmd = kstrdup(str, GFP_KERNEL);
72 	mutex_unlock(&lastcmd_mutex);
73 }
74 
75 static void synth_err(u8 err_type, u16 err_pos)
76 {
77 	mutex_lock(&lastcmd_mutex);
78 	if (!last_cmd)
79 		goto out;
80 
81 	tracing_log_err(NULL, "synthetic_events", last_cmd, err_text,
82 			err_type, err_pos);
83  out:
84 	mutex_unlock(&lastcmd_mutex);
85 }
86 
87 static int create_synth_event(const char *raw_command);
88 static int synth_event_show(struct seq_file *m, struct dyn_event *ev);
89 static int synth_event_release(struct dyn_event *ev);
90 static bool synth_event_is_busy(struct dyn_event *ev);
91 static bool synth_event_match(const char *system, const char *event,
92 			int argc, const char **argv, struct dyn_event *ev);
93 
94 static struct dyn_event_operations synth_event_ops = {
95 	.create = create_synth_event,
96 	.show = synth_event_show,
97 	.is_busy = synth_event_is_busy,
98 	.free = synth_event_release,
99 	.match = synth_event_match,
100 };
101 
102 static bool is_synth_event(struct dyn_event *ev)
103 {
104 	return ev->ops == &synth_event_ops;
105 }
106 
107 static struct synth_event *to_synth_event(struct dyn_event *ev)
108 {
109 	return container_of(ev, struct synth_event, devent);
110 }
111 
112 static bool synth_event_is_busy(struct dyn_event *ev)
113 {
114 	struct synth_event *event = to_synth_event(ev);
115 
116 	return event->ref != 0;
117 }
118 
119 static bool synth_event_match(const char *system, const char *event,
120 			int argc, const char **argv, struct dyn_event *ev)
121 {
122 	struct synth_event *sev = to_synth_event(ev);
123 
124 	return strcmp(sev->name, event) == 0 &&
125 		(!system || strcmp(system, SYNTH_SYSTEM) == 0);
126 }
127 
128 struct synth_trace_event {
129 	struct trace_entry	ent;
130 	u64			fields[];
131 };
132 
133 static int synth_event_define_fields(struct trace_event_call *call)
134 {
135 	struct synth_trace_event trace;
136 	int offset = offsetof(typeof(trace), fields);
137 	struct synth_event *event = call->data;
138 	unsigned int i, size, n_u64;
139 	char *name, *type;
140 	bool is_signed;
141 	int ret = 0;
142 
143 	for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
144 		size = event->fields[i]->size;
145 		is_signed = event->fields[i]->is_signed;
146 		type = event->fields[i]->type;
147 		name = event->fields[i]->name;
148 		ret = trace_define_field(call, type, name, offset, size,
149 					 is_signed, FILTER_OTHER);
150 		if (ret)
151 			break;
152 
153 		event->fields[i]->offset = n_u64;
154 
155 		if (event->fields[i]->is_string && !event->fields[i]->is_dynamic) {
156 			offset += STR_VAR_LEN_MAX;
157 			n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
158 		} else {
159 			offset += sizeof(u64);
160 			n_u64++;
161 		}
162 	}
163 
164 	event->n_u64 = n_u64;
165 
166 	return ret;
167 }
168 
169 static bool synth_field_signed(char *type)
170 {
171 	if (str_has_prefix(type, "u"))
172 		return false;
173 	if (strcmp(type, "gfp_t") == 0)
174 		return false;
175 
176 	return true;
177 }
178 
179 static int synth_field_is_string(char *type)
180 {
181 	if (strstr(type, "char[") != NULL)
182 		return true;
183 
184 	return false;
185 }
186 
187 static int synth_field_is_stack(char *type)
188 {
189 	if (strstr(type, "long[") != NULL)
190 		return true;
191 
192 	return false;
193 }
194 
195 static int synth_field_string_size(char *type)
196 {
197 	char buf[4], *end, *start;
198 	unsigned int len;
199 	int size, err;
200 
201 	start = strstr(type, "char[");
202 	if (start == NULL)
203 		return -EINVAL;
204 	start += sizeof("char[") - 1;
205 
206 	end = strchr(type, ']');
207 	if (!end || end < start || type + strlen(type) > end + 1)
208 		return -EINVAL;
209 
210 	len = end - start;
211 	if (len > 3)
212 		return -EINVAL;
213 
214 	if (len == 0)
215 		return 0; /* variable-length string */
216 
217 	strncpy(buf, start, len);
218 	buf[len] = '\0';
219 
220 	err = kstrtouint(buf, 0, &size);
221 	if (err)
222 		return err;
223 
224 	if (size > STR_VAR_LEN_MAX)
225 		return -EINVAL;
226 
227 	return size;
228 }
229 
230 static int synth_field_size(char *type)
231 {
232 	int size = 0;
233 
234 	if (strcmp(type, "s64") == 0)
235 		size = sizeof(s64);
236 	else if (strcmp(type, "u64") == 0)
237 		size = sizeof(u64);
238 	else if (strcmp(type, "s32") == 0)
239 		size = sizeof(s32);
240 	else if (strcmp(type, "u32") == 0)
241 		size = sizeof(u32);
242 	else if (strcmp(type, "s16") == 0)
243 		size = sizeof(s16);
244 	else if (strcmp(type, "u16") == 0)
245 		size = sizeof(u16);
246 	else if (strcmp(type, "s8") == 0)
247 		size = sizeof(s8);
248 	else if (strcmp(type, "u8") == 0)
249 		size = sizeof(u8);
250 	else if (strcmp(type, "char") == 0)
251 		size = sizeof(char);
252 	else if (strcmp(type, "unsigned char") == 0)
253 		size = sizeof(unsigned char);
254 	else if (strcmp(type, "int") == 0)
255 		size = sizeof(int);
256 	else if (strcmp(type, "unsigned int") == 0)
257 		size = sizeof(unsigned int);
258 	else if (strcmp(type, "long") == 0)
259 		size = sizeof(long);
260 	else if (strcmp(type, "unsigned long") == 0)
261 		size = sizeof(unsigned long);
262 	else if (strcmp(type, "bool") == 0)
263 		size = sizeof(bool);
264 	else if (strcmp(type, "pid_t") == 0)
265 		size = sizeof(pid_t);
266 	else if (strcmp(type, "gfp_t") == 0)
267 		size = sizeof(gfp_t);
268 	else if (synth_field_is_string(type))
269 		size = synth_field_string_size(type);
270 	else if (synth_field_is_stack(type))
271 		size = 0;
272 
273 	return size;
274 }
275 
276 static const char *synth_field_fmt(char *type)
277 {
278 	const char *fmt = "%llu";
279 
280 	if (strcmp(type, "s64") == 0)
281 		fmt = "%lld";
282 	else if (strcmp(type, "u64") == 0)
283 		fmt = "%llu";
284 	else if (strcmp(type, "s32") == 0)
285 		fmt = "%d";
286 	else if (strcmp(type, "u32") == 0)
287 		fmt = "%u";
288 	else if (strcmp(type, "s16") == 0)
289 		fmt = "%d";
290 	else if (strcmp(type, "u16") == 0)
291 		fmt = "%u";
292 	else if (strcmp(type, "s8") == 0)
293 		fmt = "%d";
294 	else if (strcmp(type, "u8") == 0)
295 		fmt = "%u";
296 	else if (strcmp(type, "char") == 0)
297 		fmt = "%d";
298 	else if (strcmp(type, "unsigned char") == 0)
299 		fmt = "%u";
300 	else if (strcmp(type, "int") == 0)
301 		fmt = "%d";
302 	else if (strcmp(type, "unsigned int") == 0)
303 		fmt = "%u";
304 	else if (strcmp(type, "long") == 0)
305 		fmt = "%ld";
306 	else if (strcmp(type, "unsigned long") == 0)
307 		fmt = "%lu";
308 	else if (strcmp(type, "bool") == 0)
309 		fmt = "%d";
310 	else if (strcmp(type, "pid_t") == 0)
311 		fmt = "%d";
312 	else if (strcmp(type, "gfp_t") == 0)
313 		fmt = "%x";
314 	else if (synth_field_is_string(type))
315 		fmt = "%.*s";
316 	else if (synth_field_is_stack(type))
317 		fmt = "%s";
318 
319 	return fmt;
320 }
321 
322 static void print_synth_event_num_val(struct trace_seq *s,
323 				      char *print_fmt, char *name,
324 				      int size, u64 val, char *space)
325 {
326 	switch (size) {
327 	case 1:
328 		trace_seq_printf(s, print_fmt, name, (u8)val, space);
329 		break;
330 
331 	case 2:
332 		trace_seq_printf(s, print_fmt, name, (u16)val, space);
333 		break;
334 
335 	case 4:
336 		trace_seq_printf(s, print_fmt, name, (u32)val, space);
337 		break;
338 
339 	default:
340 		trace_seq_printf(s, print_fmt, name, val, space);
341 		break;
342 	}
343 }
344 
345 static enum print_line_t print_synth_event(struct trace_iterator *iter,
346 					   int flags,
347 					   struct trace_event *event)
348 {
349 	struct trace_array *tr = iter->tr;
350 	struct trace_seq *s = &iter->seq;
351 	struct synth_trace_event *entry;
352 	struct synth_event *se;
353 	unsigned int i, n_u64;
354 	char print_fmt[32];
355 	const char *fmt;
356 
357 	entry = (struct synth_trace_event *)iter->ent;
358 	se = container_of(event, struct synth_event, call.event);
359 
360 	trace_seq_printf(s, "%s: ", se->name);
361 
362 	for (i = 0, n_u64 = 0; i < se->n_fields; i++) {
363 		if (trace_seq_has_overflowed(s))
364 			goto end;
365 
366 		fmt = synth_field_fmt(se->fields[i]->type);
367 
368 		/* parameter types */
369 		if (tr && tr->trace_flags & TRACE_ITER_VERBOSE)
370 			trace_seq_printf(s, "%s ", fmt);
371 
372 		snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt);
373 
374 		/* parameter values */
375 		if (se->fields[i]->is_string) {
376 			if (se->fields[i]->is_dynamic) {
377 				u32 offset, data_offset;
378 				char *str_field;
379 
380 				offset = (u32)entry->fields[n_u64];
381 				data_offset = offset & 0xffff;
382 
383 				str_field = (char *)entry + data_offset;
384 
385 				trace_seq_printf(s, print_fmt, se->fields[i]->name,
386 						 STR_VAR_LEN_MAX,
387 						 str_field,
388 						 i == se->n_fields - 1 ? "" : " ");
389 				n_u64++;
390 			} else {
391 				trace_seq_printf(s, print_fmt, se->fields[i]->name,
392 						 STR_VAR_LEN_MAX,
393 						 (char *)&entry->fields[n_u64],
394 						 i == se->n_fields - 1 ? "" : " ");
395 				n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
396 			}
397 		} else if (se->fields[i]->is_stack) {
398 			u32 offset, data_offset, len;
399 			unsigned long *p, *end;
400 
401 			offset = (u32)entry->fields[n_u64];
402 			data_offset = offset & 0xffff;
403 			len = offset >> 16;
404 
405 			p = (void *)entry + data_offset;
406 			end = (void *)p + len - (sizeof(long) - 1);
407 
408 			trace_seq_printf(s, "%s=STACK:\n", se->fields[i]->name);
409 
410 			for (; *p && p < end; p++)
411 				trace_seq_printf(s, "=> %pS\n", (void *)*p);
412 			n_u64++;
413 
414 		} else {
415 			struct trace_print_flags __flags[] = {
416 			    __def_gfpflag_names, {-1, NULL} };
417 			char *space = (i == se->n_fields - 1 ? "" : " ");
418 
419 			print_synth_event_num_val(s, print_fmt,
420 						  se->fields[i]->name,
421 						  se->fields[i]->size,
422 						  entry->fields[n_u64],
423 						  space);
424 
425 			if (strcmp(se->fields[i]->type, "gfp_t") == 0) {
426 				trace_seq_puts(s, " (");
427 				trace_print_flags_seq(s, "|",
428 						      entry->fields[n_u64],
429 						      __flags);
430 				trace_seq_putc(s, ')');
431 			}
432 			n_u64++;
433 		}
434 	}
435 end:
436 	trace_seq_putc(s, '\n');
437 
438 	return trace_handle_return(s);
439 }
440 
441 static struct trace_event_functions synth_event_funcs = {
442 	.trace		= print_synth_event
443 };
444 
445 static unsigned int trace_string(struct synth_trace_event *entry,
446 				 struct synth_event *event,
447 				 char *str_val,
448 				 bool is_dynamic,
449 				 unsigned int data_size,
450 				 unsigned int *n_u64)
451 {
452 	unsigned int len = 0;
453 	char *str_field;
454 	int ret;
455 
456 	if (is_dynamic) {
457 		u32 data_offset;
458 
459 		data_offset = struct_size(entry, fields, event->n_u64);
460 		data_offset += data_size;
461 
462 		len = fetch_store_strlen((unsigned long)str_val);
463 
464 		data_offset |= len << 16;
465 		*(u32 *)&entry->fields[*n_u64] = data_offset;
466 
467 		ret = fetch_store_string((unsigned long)str_val, &entry->fields[*n_u64], entry);
468 
469 		(*n_u64)++;
470 	} else {
471 		str_field = (char *)&entry->fields[*n_u64];
472 
473 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
474 		if ((unsigned long)str_val < TASK_SIZE)
475 			ret = strncpy_from_user_nofault(str_field, str_val, STR_VAR_LEN_MAX);
476 		else
477 #endif
478 			ret = strncpy_from_kernel_nofault(str_field, str_val, STR_VAR_LEN_MAX);
479 
480 		if (ret < 0)
481 			strcpy(str_field, FAULT_STRING);
482 
483 		(*n_u64) += STR_VAR_LEN_MAX / sizeof(u64);
484 	}
485 
486 	return len;
487 }
488 
489 static unsigned int trace_stack(struct synth_trace_event *entry,
490 				 struct synth_event *event,
491 				 long *stack,
492 				 unsigned int data_size,
493 				 unsigned int *n_u64)
494 {
495 	unsigned int len;
496 	u32 data_offset;
497 	void *data_loc;
498 
499 	data_offset = struct_size(entry, fields, event->n_u64);
500 	data_offset += data_size;
501 
502 	for (len = 0; len < HIST_STACKTRACE_DEPTH; len++) {
503 		if (!stack[len])
504 			break;
505 	}
506 
507 	/* Include the zero'd element if it fits */
508 	if (len < HIST_STACKTRACE_DEPTH)
509 		len++;
510 
511 	len *= sizeof(long);
512 
513 	/* Find the dynamic section to copy the stack into. */
514 	data_loc = (void *)entry + data_offset;
515 	memcpy(data_loc, stack, len);
516 
517 	/* Fill in the field that holds the offset/len combo */
518 	data_offset |= len << 16;
519 	*(u32 *)&entry->fields[*n_u64] = data_offset;
520 
521 	(*n_u64)++;
522 
523 	return len;
524 }
525 
526 static notrace void trace_event_raw_event_synth(void *__data,
527 						u64 *var_ref_vals,
528 						unsigned int *var_ref_idx)
529 {
530 	unsigned int i, n_u64, val_idx, len, data_size = 0;
531 	struct trace_event_file *trace_file = __data;
532 	struct synth_trace_event *entry;
533 	struct trace_event_buffer fbuffer;
534 	struct trace_buffer *buffer;
535 	struct synth_event *event;
536 	int fields_size = 0;
537 
538 	event = trace_file->event_call->data;
539 
540 	if (trace_trigger_soft_disabled(trace_file))
541 		return;
542 
543 	fields_size = event->n_u64 * sizeof(u64);
544 
545 	for (i = 0; i < event->n_dynamic_fields; i++) {
546 		unsigned int field_pos = event->dynamic_fields[i]->field_pos;
547 		char *str_val;
548 
549 		val_idx = var_ref_idx[field_pos];
550 		str_val = (char *)(long)var_ref_vals[val_idx];
551 
552 		if (event->dynamic_fields[i]->is_stack) {
553 			len = *((unsigned long *)str_val);
554 			len *= sizeof(unsigned long);
555 		} else {
556 			len = fetch_store_strlen((unsigned long)str_val);
557 		}
558 
559 		fields_size += len;
560 	}
561 
562 	/*
563 	 * Avoid ring buffer recursion detection, as this event
564 	 * is being performed within another event.
565 	 */
566 	buffer = trace_file->tr->array_buffer.buffer;
567 	ring_buffer_nest_start(buffer);
568 
569 	entry = trace_event_buffer_reserve(&fbuffer, trace_file,
570 					   sizeof(*entry) + fields_size);
571 	if (!entry)
572 		goto out;
573 
574 	for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
575 		val_idx = var_ref_idx[i];
576 		if (event->fields[i]->is_string) {
577 			char *str_val = (char *)(long)var_ref_vals[val_idx];
578 
579 			len = trace_string(entry, event, str_val,
580 					   event->fields[i]->is_dynamic,
581 					   data_size, &n_u64);
582 			data_size += len; /* only dynamic string increments */
583 		} else if (event->fields[i]->is_stack) {
584 			long *stack = (long *)(long)var_ref_vals[val_idx];
585 
586 			len = trace_stack(entry, event, stack,
587 					   data_size, &n_u64);
588 			data_size += len;
589 		} else {
590 			struct synth_field *field = event->fields[i];
591 			u64 val = var_ref_vals[val_idx];
592 
593 			switch (field->size) {
594 			case 1:
595 				*(u8 *)&entry->fields[n_u64] = (u8)val;
596 				break;
597 
598 			case 2:
599 				*(u16 *)&entry->fields[n_u64] = (u16)val;
600 				break;
601 
602 			case 4:
603 				*(u32 *)&entry->fields[n_u64] = (u32)val;
604 				break;
605 
606 			default:
607 				entry->fields[n_u64] = val;
608 				break;
609 			}
610 			n_u64++;
611 		}
612 	}
613 
614 	trace_event_buffer_commit(&fbuffer);
615 out:
616 	ring_buffer_nest_end(buffer);
617 }
618 
619 static void free_synth_event_print_fmt(struct trace_event_call *call)
620 {
621 	if (call) {
622 		kfree(call->print_fmt);
623 		call->print_fmt = NULL;
624 	}
625 }
626 
627 static int __set_synth_event_print_fmt(struct synth_event *event,
628 				       char *buf, int len)
629 {
630 	const char *fmt;
631 	int pos = 0;
632 	int i;
633 
634 	/* When len=0, we just calculate the needed length */
635 #define LEN_OR_ZERO (len ? len - pos : 0)
636 
637 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
638 	for (i = 0; i < event->n_fields; i++) {
639 		fmt = synth_field_fmt(event->fields[i]->type);
640 		pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s%s",
641 				event->fields[i]->name, fmt,
642 				i == event->n_fields - 1 ? "" : ", ");
643 	}
644 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
645 
646 	for (i = 0; i < event->n_fields; i++) {
647 		if (event->fields[i]->is_string &&
648 		    event->fields[i]->is_dynamic)
649 			pos += snprintf(buf + pos, LEN_OR_ZERO,
650 				", __get_str(%s)", event->fields[i]->name);
651 		else if (event->fields[i]->is_stack)
652 			pos += snprintf(buf + pos, LEN_OR_ZERO,
653 				", __get_stacktrace(%s)", event->fields[i]->name);
654 		else
655 			pos += snprintf(buf + pos, LEN_OR_ZERO,
656 					", REC->%s", event->fields[i]->name);
657 	}
658 
659 #undef LEN_OR_ZERO
660 
661 	/* return the length of print_fmt */
662 	return pos;
663 }
664 
665 static int set_synth_event_print_fmt(struct trace_event_call *call)
666 {
667 	struct synth_event *event = call->data;
668 	char *print_fmt;
669 	int len;
670 
671 	/* First: called with 0 length to calculate the needed length */
672 	len = __set_synth_event_print_fmt(event, NULL, 0);
673 
674 	print_fmt = kmalloc(len + 1, GFP_KERNEL);
675 	if (!print_fmt)
676 		return -ENOMEM;
677 
678 	/* Second: actually write the @print_fmt */
679 	__set_synth_event_print_fmt(event, print_fmt, len + 1);
680 	call->print_fmt = print_fmt;
681 
682 	return 0;
683 }
684 
685 static void free_synth_field(struct synth_field *field)
686 {
687 	kfree(field->type);
688 	kfree(field->name);
689 	kfree(field);
690 }
691 
692 static int check_field_version(const char *prefix, const char *field_type,
693 			       const char *field_name)
694 {
695 	/*
696 	 * For backward compatibility, the old synthetic event command
697 	 * format did not require semicolons, and in order to not
698 	 * break user space, that old format must still work. If a new
699 	 * feature is added, then the format that uses the new feature
700 	 * will be required to have semicolons, as nothing that uses
701 	 * the old format would be using the new, yet to be created,
702 	 * feature. When a new feature is added, this will detect it,
703 	 * and return a number greater than 1, and require the format
704 	 * to use semicolons.
705 	 */
706 	return 1;
707 }
708 
709 static struct synth_field *parse_synth_field(int argc, char **argv,
710 					     int *consumed, int *field_version)
711 {
712 	const char *prefix = NULL, *field_type = argv[0], *field_name, *array;
713 	struct synth_field *field;
714 	int len, ret = -ENOMEM;
715 	struct seq_buf s;
716 	ssize_t size;
717 
718 	if (!strcmp(field_type, "unsigned")) {
719 		if (argc < 3) {
720 			synth_err(SYNTH_ERR_INCOMPLETE_TYPE, errpos(field_type));
721 			return ERR_PTR(-EINVAL);
722 		}
723 		prefix = "unsigned ";
724 		field_type = argv[1];
725 		field_name = argv[2];
726 		*consumed += 3;
727 	} else {
728 		field_name = argv[1];
729 		*consumed += 2;
730 	}
731 
732 	if (!field_name) {
733 		synth_err(SYNTH_ERR_INVALID_FIELD, errpos(field_type));
734 		return ERR_PTR(-EINVAL);
735 	}
736 
737 	*field_version = check_field_version(prefix, field_type, field_name);
738 
739 	field = kzalloc(sizeof(*field), GFP_KERNEL);
740 	if (!field)
741 		return ERR_PTR(-ENOMEM);
742 
743 	len = strlen(field_name);
744 	array = strchr(field_name, '[');
745 	if (array)
746 		len -= strlen(array);
747 
748 	field->name = kmemdup_nul(field_name, len, GFP_KERNEL);
749 	if (!field->name)
750 		goto free;
751 
752 	if (!is_good_name(field->name)) {
753 		synth_err(SYNTH_ERR_BAD_NAME, errpos(field_name));
754 		ret = -EINVAL;
755 		goto free;
756 	}
757 
758 	len = strlen(field_type) + 1;
759 
760 	if (array)
761 		len += strlen(array);
762 
763 	if (prefix)
764 		len += strlen(prefix);
765 
766 	field->type = kzalloc(len, GFP_KERNEL);
767 	if (!field->type)
768 		goto free;
769 
770 	seq_buf_init(&s, field->type, len);
771 	if (prefix)
772 		seq_buf_puts(&s, prefix);
773 	seq_buf_puts(&s, field_type);
774 	if (array)
775 		seq_buf_puts(&s, array);
776 	if (WARN_ON_ONCE(!seq_buf_buffer_left(&s)))
777 		goto free;
778 
779 	s.buffer[s.len] = '\0';
780 
781 	size = synth_field_size(field->type);
782 	if (size < 0) {
783 		if (array)
784 			synth_err(SYNTH_ERR_INVALID_ARRAY_SPEC, errpos(field_name));
785 		else
786 			synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type));
787 		ret = -EINVAL;
788 		goto free;
789 	} else if (size == 0) {
790 		if (synth_field_is_string(field->type) ||
791 		    synth_field_is_stack(field->type)) {
792 			char *type;
793 
794 			len = sizeof("__data_loc ") + strlen(field->type) + 1;
795 			type = kzalloc(len, GFP_KERNEL);
796 			if (!type)
797 				goto free;
798 
799 			seq_buf_init(&s, type, len);
800 			seq_buf_puts(&s, "__data_loc ");
801 			seq_buf_puts(&s, field->type);
802 
803 			if (WARN_ON_ONCE(!seq_buf_buffer_left(&s)))
804 				goto free;
805 			s.buffer[s.len] = '\0';
806 
807 			kfree(field->type);
808 			field->type = type;
809 
810 			field->is_dynamic = true;
811 			size = sizeof(u64);
812 		} else {
813 			synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type));
814 			ret = -EINVAL;
815 			goto free;
816 		}
817 	}
818 	field->size = size;
819 
820 	if (synth_field_is_string(field->type))
821 		field->is_string = true;
822 	else if (synth_field_is_stack(field->type))
823 		field->is_stack = true;
824 
825 	field->is_signed = synth_field_signed(field->type);
826  out:
827 	return field;
828  free:
829 	free_synth_field(field);
830 	field = ERR_PTR(ret);
831 	goto out;
832 }
833 
834 static void free_synth_tracepoint(struct tracepoint *tp)
835 {
836 	if (!tp)
837 		return;
838 
839 	kfree(tp->name);
840 	kfree(tp);
841 }
842 
843 static struct tracepoint *alloc_synth_tracepoint(char *name)
844 {
845 	struct tracepoint *tp;
846 
847 	tp = kzalloc(sizeof(*tp), GFP_KERNEL);
848 	if (!tp)
849 		return ERR_PTR(-ENOMEM);
850 
851 	tp->name = kstrdup(name, GFP_KERNEL);
852 	if (!tp->name) {
853 		kfree(tp);
854 		return ERR_PTR(-ENOMEM);
855 	}
856 
857 	return tp;
858 }
859 
860 struct synth_event *find_synth_event(const char *name)
861 {
862 	struct dyn_event *pos;
863 	struct synth_event *event;
864 
865 	for_each_dyn_event(pos) {
866 		if (!is_synth_event(pos))
867 			continue;
868 		event = to_synth_event(pos);
869 		if (strcmp(event->name, name) == 0)
870 			return event;
871 	}
872 
873 	return NULL;
874 }
875 
876 static struct trace_event_fields synth_event_fields_array[] = {
877 	{ .type = TRACE_FUNCTION_TYPE,
878 	  .define_fields = synth_event_define_fields },
879 	{}
880 };
881 
882 static int register_synth_event(struct synth_event *event)
883 {
884 	struct trace_event_call *call = &event->call;
885 	int ret = 0;
886 
887 	event->call.class = &event->class;
888 	event->class.system = kstrdup(SYNTH_SYSTEM, GFP_KERNEL);
889 	if (!event->class.system) {
890 		ret = -ENOMEM;
891 		goto out;
892 	}
893 
894 	event->tp = alloc_synth_tracepoint(event->name);
895 	if (IS_ERR(event->tp)) {
896 		ret = PTR_ERR(event->tp);
897 		event->tp = NULL;
898 		goto out;
899 	}
900 
901 	INIT_LIST_HEAD(&call->class->fields);
902 	call->event.funcs = &synth_event_funcs;
903 	call->class->fields_array = synth_event_fields_array;
904 
905 	ret = register_trace_event(&call->event);
906 	if (!ret) {
907 		ret = -ENODEV;
908 		goto out;
909 	}
910 	call->flags = TRACE_EVENT_FL_TRACEPOINT;
911 	call->class->reg = trace_event_reg;
912 	call->class->probe = trace_event_raw_event_synth;
913 	call->data = event;
914 	call->tp = event->tp;
915 
916 	ret = trace_add_event_call(call);
917 	if (ret) {
918 		pr_warn("Failed to register synthetic event: %s\n",
919 			trace_event_name(call));
920 		goto err;
921 	}
922 
923 	ret = set_synth_event_print_fmt(call);
924 	/* unregister_trace_event() will be called inside */
925 	if (ret < 0)
926 		trace_remove_event_call(call);
927  out:
928 	return ret;
929  err:
930 	unregister_trace_event(&call->event);
931 	goto out;
932 }
933 
934 static int unregister_synth_event(struct synth_event *event)
935 {
936 	struct trace_event_call *call = &event->call;
937 	int ret;
938 
939 	ret = trace_remove_event_call(call);
940 
941 	return ret;
942 }
943 
944 static void free_synth_event(struct synth_event *event)
945 {
946 	unsigned int i;
947 
948 	if (!event)
949 		return;
950 
951 	for (i = 0; i < event->n_fields; i++)
952 		free_synth_field(event->fields[i]);
953 
954 	kfree(event->fields);
955 	kfree(event->dynamic_fields);
956 	kfree(event->name);
957 	kfree(event->class.system);
958 	free_synth_tracepoint(event->tp);
959 	free_synth_event_print_fmt(&event->call);
960 	kfree(event);
961 }
962 
963 static struct synth_event *alloc_synth_event(const char *name, int n_fields,
964 					     struct synth_field **fields)
965 {
966 	unsigned int i, j, n_dynamic_fields = 0;
967 	struct synth_event *event;
968 
969 	event = kzalloc(sizeof(*event), GFP_KERNEL);
970 	if (!event) {
971 		event = ERR_PTR(-ENOMEM);
972 		goto out;
973 	}
974 
975 	event->name = kstrdup(name, GFP_KERNEL);
976 	if (!event->name) {
977 		kfree(event);
978 		event = ERR_PTR(-ENOMEM);
979 		goto out;
980 	}
981 
982 	event->fields = kcalloc(n_fields, sizeof(*event->fields), GFP_KERNEL);
983 	if (!event->fields) {
984 		free_synth_event(event);
985 		event = ERR_PTR(-ENOMEM);
986 		goto out;
987 	}
988 
989 	for (i = 0; i < n_fields; i++)
990 		if (fields[i]->is_dynamic)
991 			n_dynamic_fields++;
992 
993 	if (n_dynamic_fields) {
994 		event->dynamic_fields = kcalloc(n_dynamic_fields,
995 						sizeof(*event->dynamic_fields),
996 						GFP_KERNEL);
997 		if (!event->dynamic_fields) {
998 			free_synth_event(event);
999 			event = ERR_PTR(-ENOMEM);
1000 			goto out;
1001 		}
1002 	}
1003 
1004 	dyn_event_init(&event->devent, &synth_event_ops);
1005 
1006 	for (i = 0, j = 0; i < n_fields; i++) {
1007 		fields[i]->field_pos = i;
1008 		event->fields[i] = fields[i];
1009 
1010 		if (fields[i]->is_dynamic)
1011 			event->dynamic_fields[j++] = fields[i];
1012 	}
1013 	event->n_dynamic_fields = j;
1014 	event->n_fields = n_fields;
1015  out:
1016 	return event;
1017 }
1018 
1019 static int synth_event_check_arg_fn(void *data)
1020 {
1021 	struct dynevent_arg_pair *arg_pair = data;
1022 	int size;
1023 
1024 	size = synth_field_size((char *)arg_pair->lhs);
1025 	if (size == 0) {
1026 		if (strstr((char *)arg_pair->lhs, "["))
1027 			return 0;
1028 	}
1029 
1030 	return size ? 0 : -EINVAL;
1031 }
1032 
1033 /**
1034  * synth_event_add_field - Add a new field to a synthetic event cmd
1035  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1036  * @type: The type of the new field to add
1037  * @name: The name of the new field to add
1038  *
1039  * Add a new field to a synthetic event cmd object.  Field ordering is in
1040  * the same order the fields are added.
1041  *
1042  * See synth_field_size() for available types. If field_name contains
1043  * [n] the field is considered to be an array.
1044  *
1045  * Return: 0 if successful, error otherwise.
1046  */
1047 int synth_event_add_field(struct dynevent_cmd *cmd, const char *type,
1048 			  const char *name)
1049 {
1050 	struct dynevent_arg_pair arg_pair;
1051 	int ret;
1052 
1053 	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1054 		return -EINVAL;
1055 
1056 	if (!type || !name)
1057 		return -EINVAL;
1058 
1059 	dynevent_arg_pair_init(&arg_pair, 0, ';');
1060 
1061 	arg_pair.lhs = type;
1062 	arg_pair.rhs = name;
1063 
1064 	ret = dynevent_arg_pair_add(cmd, &arg_pair, synth_event_check_arg_fn);
1065 	if (ret)
1066 		return ret;
1067 
1068 	if (++cmd->n_fields > SYNTH_FIELDS_MAX)
1069 		ret = -EINVAL;
1070 
1071 	return ret;
1072 }
1073 EXPORT_SYMBOL_GPL(synth_event_add_field);
1074 
1075 /**
1076  * synth_event_add_field_str - Add a new field to a synthetic event cmd
1077  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1078  * @type_name: The type and name of the new field to add, as a single string
1079  *
1080  * Add a new field to a synthetic event cmd object, as a single
1081  * string.  The @type_name string is expected to be of the form 'type
1082  * name', which will be appended by ';'.  No sanity checking is done -
1083  * what's passed in is assumed to already be well-formed.  Field
1084  * ordering is in the same order the fields are added.
1085  *
1086  * See synth_field_size() for available types. If field_name contains
1087  * [n] the field is considered to be an array.
1088  *
1089  * Return: 0 if successful, error otherwise.
1090  */
1091 int synth_event_add_field_str(struct dynevent_cmd *cmd, const char *type_name)
1092 {
1093 	struct dynevent_arg arg;
1094 	int ret;
1095 
1096 	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1097 		return -EINVAL;
1098 
1099 	if (!type_name)
1100 		return -EINVAL;
1101 
1102 	dynevent_arg_init(&arg, ';');
1103 
1104 	arg.str = type_name;
1105 
1106 	ret = dynevent_arg_add(cmd, &arg, NULL);
1107 	if (ret)
1108 		return ret;
1109 
1110 	if (++cmd->n_fields > SYNTH_FIELDS_MAX)
1111 		ret = -EINVAL;
1112 
1113 	return ret;
1114 }
1115 EXPORT_SYMBOL_GPL(synth_event_add_field_str);
1116 
1117 /**
1118  * synth_event_add_fields - Add multiple fields to a synthetic event cmd
1119  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1120  * @fields: An array of type/name field descriptions
1121  * @n_fields: The number of field descriptions contained in the fields array
1122  *
1123  * Add a new set of fields to a synthetic event cmd object.  The event
1124  * fields that will be defined for the event should be passed in as an
1125  * array of struct synth_field_desc, and the number of elements in the
1126  * array passed in as n_fields.  Field ordering will retain the
1127  * ordering given in the fields array.
1128  *
1129  * See synth_field_size() for available types. If field_name contains
1130  * [n] the field is considered to be an array.
1131  *
1132  * Return: 0 if successful, error otherwise.
1133  */
1134 int synth_event_add_fields(struct dynevent_cmd *cmd,
1135 			   struct synth_field_desc *fields,
1136 			   unsigned int n_fields)
1137 {
1138 	unsigned int i;
1139 	int ret = 0;
1140 
1141 	for (i = 0; i < n_fields; i++) {
1142 		if (fields[i].type == NULL || fields[i].name == NULL) {
1143 			ret = -EINVAL;
1144 			break;
1145 		}
1146 
1147 		ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
1148 		if (ret)
1149 			break;
1150 	}
1151 
1152 	return ret;
1153 }
1154 EXPORT_SYMBOL_GPL(synth_event_add_fields);
1155 
1156 /**
1157  * __synth_event_gen_cmd_start - Start a synthetic event command from arg list
1158  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1159  * @name: The name of the synthetic event
1160  * @mod: The module creating the event, NULL if not created from a module
1161  * @args: Variable number of arg (pairs), one pair for each field
1162  *
1163  * NOTE: Users normally won't want to call this function directly, but
1164  * rather use the synth_event_gen_cmd_start() wrapper, which
1165  * automatically adds a NULL to the end of the arg list.  If this
1166  * function is used directly, make sure the last arg in the variable
1167  * arg list is NULL.
1168  *
1169  * Generate a synthetic event command to be executed by
1170  * synth_event_gen_cmd_end().  This function can be used to generate
1171  * the complete command or only the first part of it; in the latter
1172  * case, synth_event_add_field(), synth_event_add_field_str(), or
1173  * synth_event_add_fields() can be used to add more fields following
1174  * this.
1175  *
1176  * There should be an even number variable args, each pair consisting
1177  * of a type followed by a field name.
1178  *
1179  * See synth_field_size() for available types. If field_name contains
1180  * [n] the field is considered to be an array.
1181  *
1182  * Return: 0 if successful, error otherwise.
1183  */
1184 int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd, const char *name,
1185 				struct module *mod, ...)
1186 {
1187 	struct dynevent_arg arg;
1188 	va_list args;
1189 	int ret;
1190 
1191 	cmd->event_name = name;
1192 	cmd->private_data = mod;
1193 
1194 	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1195 		return -EINVAL;
1196 
1197 	dynevent_arg_init(&arg, 0);
1198 	arg.str = name;
1199 	ret = dynevent_arg_add(cmd, &arg, NULL);
1200 	if (ret)
1201 		return ret;
1202 
1203 	va_start(args, mod);
1204 	for (;;) {
1205 		const char *type, *name;
1206 
1207 		type = va_arg(args, const char *);
1208 		if (!type)
1209 			break;
1210 		name = va_arg(args, const char *);
1211 		if (!name)
1212 			break;
1213 
1214 		if (++cmd->n_fields > SYNTH_FIELDS_MAX) {
1215 			ret = -EINVAL;
1216 			break;
1217 		}
1218 
1219 		ret = synth_event_add_field(cmd, type, name);
1220 		if (ret)
1221 			break;
1222 	}
1223 	va_end(args);
1224 
1225 	return ret;
1226 }
1227 EXPORT_SYMBOL_GPL(__synth_event_gen_cmd_start);
1228 
1229 /**
1230  * synth_event_gen_cmd_array_start - Start synthetic event command from an array
1231  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1232  * @name: The name of the synthetic event
1233  * @fields: An array of type/name field descriptions
1234  * @n_fields: The number of field descriptions contained in the fields array
1235  *
1236  * Generate a synthetic event command to be executed by
1237  * synth_event_gen_cmd_end().  This function can be used to generate
1238  * the complete command or only the first part of it; in the latter
1239  * case, synth_event_add_field(), synth_event_add_field_str(), or
1240  * synth_event_add_fields() can be used to add more fields following
1241  * this.
1242  *
1243  * The event fields that will be defined for the event should be
1244  * passed in as an array of struct synth_field_desc, and the number of
1245  * elements in the array passed in as n_fields.  Field ordering will
1246  * retain the ordering given in the fields array.
1247  *
1248  * See synth_field_size() for available types. If field_name contains
1249  * [n] the field is considered to be an array.
1250  *
1251  * Return: 0 if successful, error otherwise.
1252  */
1253 int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd, const char *name,
1254 				    struct module *mod,
1255 				    struct synth_field_desc *fields,
1256 				    unsigned int n_fields)
1257 {
1258 	struct dynevent_arg arg;
1259 	unsigned int i;
1260 	int ret = 0;
1261 
1262 	cmd->event_name = name;
1263 	cmd->private_data = mod;
1264 
1265 	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1266 		return -EINVAL;
1267 
1268 	if (n_fields > SYNTH_FIELDS_MAX)
1269 		return -EINVAL;
1270 
1271 	dynevent_arg_init(&arg, 0);
1272 	arg.str = name;
1273 	ret = dynevent_arg_add(cmd, &arg, NULL);
1274 	if (ret)
1275 		return ret;
1276 
1277 	for (i = 0; i < n_fields; i++) {
1278 		if (fields[i].type == NULL || fields[i].name == NULL)
1279 			return -EINVAL;
1280 
1281 		ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
1282 		if (ret)
1283 			break;
1284 	}
1285 
1286 	return ret;
1287 }
1288 EXPORT_SYMBOL_GPL(synth_event_gen_cmd_array_start);
1289 
1290 static int __create_synth_event(const char *name, const char *raw_fields)
1291 {
1292 	char **argv, *field_str, *tmp_fields, *saved_fields = NULL;
1293 	struct synth_field *field, *fields[SYNTH_FIELDS_MAX];
1294 	int consumed, cmd_version = 1, n_fields_this_loop;
1295 	int i, argc, n_fields = 0, ret = 0;
1296 	struct synth_event *event = NULL;
1297 
1298 	/*
1299 	 * Argument syntax:
1300 	 *  - Add synthetic event: <event_name> field[;field] ...
1301 	 *  - Remove synthetic event: !<event_name> field[;field] ...
1302 	 *      where 'field' = type field_name
1303 	 */
1304 
1305 	if (name[0] == '\0') {
1306 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
1307 		return -EINVAL;
1308 	}
1309 
1310 	if (!is_good_name(name)) {
1311 		synth_err(SYNTH_ERR_BAD_NAME, errpos(name));
1312 		return -EINVAL;
1313 	}
1314 
1315 	mutex_lock(&event_mutex);
1316 
1317 	event = find_synth_event(name);
1318 	if (event) {
1319 		synth_err(SYNTH_ERR_EVENT_EXISTS, errpos(name));
1320 		ret = -EEXIST;
1321 		goto err;
1322 	}
1323 
1324 	tmp_fields = saved_fields = kstrdup(raw_fields, GFP_KERNEL);
1325 	if (!tmp_fields) {
1326 		ret = -ENOMEM;
1327 		goto err;
1328 	}
1329 
1330 	while ((field_str = strsep(&tmp_fields, ";")) != NULL) {
1331 		argv = argv_split(GFP_KERNEL, field_str, &argc);
1332 		if (!argv) {
1333 			ret = -ENOMEM;
1334 			goto err;
1335 		}
1336 
1337 		if (!argc) {
1338 			argv_free(argv);
1339 			continue;
1340 		}
1341 
1342 		n_fields_this_loop = 0;
1343 		consumed = 0;
1344 		while (argc > consumed) {
1345 			int field_version;
1346 
1347 			field = parse_synth_field(argc - consumed,
1348 						  argv + consumed, &consumed,
1349 						  &field_version);
1350 			if (IS_ERR(field)) {
1351 				ret = PTR_ERR(field);
1352 				goto err_free_arg;
1353 			}
1354 
1355 			/*
1356 			 * Track the highest version of any field we
1357 			 * found in the command.
1358 			 */
1359 			if (field_version > cmd_version)
1360 				cmd_version = field_version;
1361 
1362 			/*
1363 			 * Now sort out what is and isn't valid for
1364 			 * each supported version.
1365 			 *
1366 			 * If we see more than 1 field per loop, it
1367 			 * means we have multiple fields between
1368 			 * semicolons, and that's something we no
1369 			 * longer support in a version 2 or greater
1370 			 * command.
1371 			 */
1372 			if (cmd_version > 1 && n_fields_this_loop >= 1) {
1373 				synth_err(SYNTH_ERR_INVALID_CMD, errpos(field_str));
1374 				ret = -EINVAL;
1375 				goto err_free_arg;
1376 			}
1377 
1378 			if (n_fields == SYNTH_FIELDS_MAX) {
1379 				synth_err(SYNTH_ERR_TOO_MANY_FIELDS, 0);
1380 				ret = -EINVAL;
1381 				goto err_free_arg;
1382 			}
1383 			fields[n_fields++] = field;
1384 
1385 			n_fields_this_loop++;
1386 		}
1387 		argv_free(argv);
1388 
1389 		if (consumed < argc) {
1390 			synth_err(SYNTH_ERR_INVALID_CMD, 0);
1391 			ret = -EINVAL;
1392 			goto err;
1393 		}
1394 
1395 	}
1396 
1397 	if (n_fields == 0) {
1398 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
1399 		ret = -EINVAL;
1400 		goto err;
1401 	}
1402 
1403 	event = alloc_synth_event(name, n_fields, fields);
1404 	if (IS_ERR(event)) {
1405 		ret = PTR_ERR(event);
1406 		event = NULL;
1407 		goto err;
1408 	}
1409 	ret = register_synth_event(event);
1410 	if (!ret)
1411 		dyn_event_add(&event->devent, &event->call);
1412 	else
1413 		free_synth_event(event);
1414  out:
1415 	mutex_unlock(&event_mutex);
1416 
1417 	kfree(saved_fields);
1418 
1419 	return ret;
1420  err_free_arg:
1421 	argv_free(argv);
1422  err:
1423 	for (i = 0; i < n_fields; i++)
1424 		free_synth_field(fields[i]);
1425 
1426 	goto out;
1427 }
1428 
1429 /**
1430  * synth_event_create - Create a new synthetic event
1431  * @name: The name of the new synthetic event
1432  * @fields: An array of type/name field descriptions
1433  * @n_fields: The number of field descriptions contained in the fields array
1434  * @mod: The module creating the event, NULL if not created from a module
1435  *
1436  * Create a new synthetic event with the given name under the
1437  * trace/events/synthetic/ directory.  The event fields that will be
1438  * defined for the event should be passed in as an array of struct
1439  * synth_field_desc, and the number elements in the array passed in as
1440  * n_fields. Field ordering will retain the ordering given in the
1441  * fields array.
1442  *
1443  * If the new synthetic event is being created from a module, the mod
1444  * param must be non-NULL.  This will ensure that the trace buffer
1445  * won't contain unreadable events.
1446  *
1447  * The new synth event should be deleted using synth_event_delete()
1448  * function.  The new synthetic event can be generated from modules or
1449  * other kernel code using trace_synth_event() and related functions.
1450  *
1451  * Return: 0 if successful, error otherwise.
1452  */
1453 int synth_event_create(const char *name, struct synth_field_desc *fields,
1454 		       unsigned int n_fields, struct module *mod)
1455 {
1456 	struct dynevent_cmd cmd;
1457 	char *buf;
1458 	int ret;
1459 
1460 	buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
1461 	if (!buf)
1462 		return -ENOMEM;
1463 
1464 	synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
1465 
1466 	ret = synth_event_gen_cmd_array_start(&cmd, name, mod,
1467 					      fields, n_fields);
1468 	if (ret)
1469 		goto out;
1470 
1471 	ret = synth_event_gen_cmd_end(&cmd);
1472  out:
1473 	kfree(buf);
1474 
1475 	return ret;
1476 }
1477 EXPORT_SYMBOL_GPL(synth_event_create);
1478 
1479 static int destroy_synth_event(struct synth_event *se)
1480 {
1481 	int ret;
1482 
1483 	if (se->ref)
1484 		return -EBUSY;
1485 
1486 	if (trace_event_dyn_busy(&se->call))
1487 		return -EBUSY;
1488 
1489 	ret = unregister_synth_event(se);
1490 	if (!ret) {
1491 		dyn_event_remove(&se->devent);
1492 		free_synth_event(se);
1493 	}
1494 
1495 	return ret;
1496 }
1497 
1498 /**
1499  * synth_event_delete - Delete a synthetic event
1500  * @event_name: The name of the new synthetic event
1501  *
1502  * Delete a synthetic event that was created with synth_event_create().
1503  *
1504  * Return: 0 if successful, error otherwise.
1505  */
1506 int synth_event_delete(const char *event_name)
1507 {
1508 	struct synth_event *se = NULL;
1509 	struct module *mod = NULL;
1510 	int ret = -ENOENT;
1511 
1512 	mutex_lock(&event_mutex);
1513 	se = find_synth_event(event_name);
1514 	if (se) {
1515 		mod = se->mod;
1516 		ret = destroy_synth_event(se);
1517 	}
1518 	mutex_unlock(&event_mutex);
1519 
1520 	if (mod) {
1521 		/*
1522 		 * It is safest to reset the ring buffer if the module
1523 		 * being unloaded registered any events that were
1524 		 * used. The only worry is if a new module gets
1525 		 * loaded, and takes on the same id as the events of
1526 		 * this module. When printing out the buffer, traced
1527 		 * events left over from this module may be passed to
1528 		 * the new module events and unexpected results may
1529 		 * occur.
1530 		 */
1531 		tracing_reset_all_online_cpus();
1532 	}
1533 
1534 	return ret;
1535 }
1536 EXPORT_SYMBOL_GPL(synth_event_delete);
1537 
1538 static int check_command(const char *raw_command)
1539 {
1540 	char **argv = NULL, *cmd, *saved_cmd, *name_and_field;
1541 	int argc, ret = 0;
1542 
1543 	cmd = saved_cmd = kstrdup(raw_command, GFP_KERNEL);
1544 	if (!cmd)
1545 		return -ENOMEM;
1546 
1547 	name_and_field = strsep(&cmd, ";");
1548 	if (!name_and_field) {
1549 		ret = -EINVAL;
1550 		goto free;
1551 	}
1552 
1553 	if (name_and_field[0] == '!')
1554 		goto free;
1555 
1556 	argv = argv_split(GFP_KERNEL, name_and_field, &argc);
1557 	if (!argv) {
1558 		ret = -ENOMEM;
1559 		goto free;
1560 	}
1561 	argv_free(argv);
1562 
1563 	if (argc < 3)
1564 		ret = -EINVAL;
1565 free:
1566 	kfree(saved_cmd);
1567 
1568 	return ret;
1569 }
1570 
1571 static int create_or_delete_synth_event(const char *raw_command)
1572 {
1573 	char *name = NULL, *fields, *p;
1574 	int ret = 0;
1575 
1576 	raw_command = skip_spaces(raw_command);
1577 	if (raw_command[0] == '\0')
1578 		return ret;
1579 
1580 	last_cmd_set(raw_command);
1581 
1582 	ret = check_command(raw_command);
1583 	if (ret) {
1584 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
1585 		return ret;
1586 	}
1587 
1588 	p = strpbrk(raw_command, " \t");
1589 	if (!p && raw_command[0] != '!') {
1590 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
1591 		ret = -EINVAL;
1592 		goto free;
1593 	}
1594 
1595 	name = kmemdup_nul(raw_command, p ? p - raw_command : strlen(raw_command), GFP_KERNEL);
1596 	if (!name)
1597 		return -ENOMEM;
1598 
1599 	if (name[0] == '!') {
1600 		ret = synth_event_delete(name + 1);
1601 		goto free;
1602 	}
1603 
1604 	fields = skip_spaces(p);
1605 
1606 	ret = __create_synth_event(name, fields);
1607 free:
1608 	kfree(name);
1609 
1610 	return ret;
1611 }
1612 
1613 static int synth_event_run_command(struct dynevent_cmd *cmd)
1614 {
1615 	struct synth_event *se;
1616 	int ret;
1617 
1618 	ret = create_or_delete_synth_event(cmd->seq.buffer);
1619 	if (ret)
1620 		return ret;
1621 
1622 	se = find_synth_event(cmd->event_name);
1623 	if (WARN_ON(!se))
1624 		return -ENOENT;
1625 
1626 	se->mod = cmd->private_data;
1627 
1628 	return ret;
1629 }
1630 
1631 /**
1632  * synth_event_cmd_init - Initialize a synthetic event command object
1633  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1634  * @buf: A pointer to the buffer used to build the command
1635  * @maxlen: The length of the buffer passed in @buf
1636  *
1637  * Initialize a synthetic event command object.  Use this before
1638  * calling any of the other dyenvent_cmd functions.
1639  */
1640 void synth_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
1641 {
1642 	dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_SYNTH,
1643 			  synth_event_run_command);
1644 }
1645 EXPORT_SYMBOL_GPL(synth_event_cmd_init);
1646 
1647 static inline int
1648 __synth_event_trace_init(struct trace_event_file *file,
1649 			 struct synth_event_trace_state *trace_state)
1650 {
1651 	int ret = 0;
1652 
1653 	memset(trace_state, '\0', sizeof(*trace_state));
1654 
1655 	/*
1656 	 * Normal event tracing doesn't get called at all unless the
1657 	 * ENABLED bit is set (which attaches the probe thus allowing
1658 	 * this code to be called, etc).  Because this is called
1659 	 * directly by the user, we don't have that but we still need
1660 	 * to honor not logging when disabled.  For the iterated
1661 	 * trace case, we save the enabled state upon start and just
1662 	 * ignore the following data calls.
1663 	 */
1664 	if (!(file->flags & EVENT_FILE_FL_ENABLED) ||
1665 	    trace_trigger_soft_disabled(file)) {
1666 		trace_state->disabled = true;
1667 		ret = -ENOENT;
1668 		goto out;
1669 	}
1670 
1671 	trace_state->event = file->event_call->data;
1672 out:
1673 	return ret;
1674 }
1675 
1676 static inline int
1677 __synth_event_trace_start(struct trace_event_file *file,
1678 			  struct synth_event_trace_state *trace_state,
1679 			  int dynamic_fields_size)
1680 {
1681 	int entry_size, fields_size = 0;
1682 	int ret = 0;
1683 
1684 	fields_size = trace_state->event->n_u64 * sizeof(u64);
1685 	fields_size += dynamic_fields_size;
1686 
1687 	/*
1688 	 * Avoid ring buffer recursion detection, as this event
1689 	 * is being performed within another event.
1690 	 */
1691 	trace_state->buffer = file->tr->array_buffer.buffer;
1692 	ring_buffer_nest_start(trace_state->buffer);
1693 
1694 	entry_size = sizeof(*trace_state->entry) + fields_size;
1695 	trace_state->entry = trace_event_buffer_reserve(&trace_state->fbuffer,
1696 							file,
1697 							entry_size);
1698 	if (!trace_state->entry) {
1699 		ring_buffer_nest_end(trace_state->buffer);
1700 		ret = -EINVAL;
1701 	}
1702 
1703 	return ret;
1704 }
1705 
1706 static inline void
1707 __synth_event_trace_end(struct synth_event_trace_state *trace_state)
1708 {
1709 	trace_event_buffer_commit(&trace_state->fbuffer);
1710 
1711 	ring_buffer_nest_end(trace_state->buffer);
1712 }
1713 
1714 /**
1715  * synth_event_trace - Trace a synthetic event
1716  * @file: The trace_event_file representing the synthetic event
1717  * @n_vals: The number of values in vals
1718  * @args: Variable number of args containing the event values
1719  *
1720  * Trace a synthetic event using the values passed in the variable
1721  * argument list.
1722  *
1723  * The argument list should be a list 'n_vals' u64 values.  The number
1724  * of vals must match the number of field in the synthetic event, and
1725  * must be in the same order as the synthetic event fields.
1726  *
1727  * All vals should be cast to u64, and string vals are just pointers
1728  * to strings, cast to u64.  Strings will be copied into space
1729  * reserved in the event for the string, using these pointers.
1730  *
1731  * Return: 0 on success, err otherwise.
1732  */
1733 int synth_event_trace(struct trace_event_file *file, unsigned int n_vals, ...)
1734 {
1735 	unsigned int i, n_u64, len, data_size = 0;
1736 	struct synth_event_trace_state state;
1737 	va_list args;
1738 	int ret;
1739 
1740 	ret = __synth_event_trace_init(file, &state);
1741 	if (ret) {
1742 		if (ret == -ENOENT)
1743 			ret = 0; /* just disabled, not really an error */
1744 		return ret;
1745 	}
1746 
1747 	if (state.event->n_dynamic_fields) {
1748 		va_start(args, n_vals);
1749 
1750 		for (i = 0; i < state.event->n_fields; i++) {
1751 			u64 val = va_arg(args, u64);
1752 
1753 			if (state.event->fields[i]->is_string &&
1754 			    state.event->fields[i]->is_dynamic) {
1755 				char *str_val = (char *)(long)val;
1756 
1757 				data_size += strlen(str_val) + 1;
1758 			}
1759 		}
1760 
1761 		va_end(args);
1762 	}
1763 
1764 	ret = __synth_event_trace_start(file, &state, data_size);
1765 	if (ret)
1766 		return ret;
1767 
1768 	if (n_vals != state.event->n_fields) {
1769 		ret = -EINVAL;
1770 		goto out;
1771 	}
1772 
1773 	data_size = 0;
1774 
1775 	va_start(args, n_vals);
1776 	for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
1777 		u64 val;
1778 
1779 		val = va_arg(args, u64);
1780 
1781 		if (state.event->fields[i]->is_string) {
1782 			char *str_val = (char *)(long)val;
1783 
1784 			len = trace_string(state.entry, state.event, str_val,
1785 					   state.event->fields[i]->is_dynamic,
1786 					   data_size, &n_u64);
1787 			data_size += len; /* only dynamic string increments */
1788 		} else {
1789 			struct synth_field *field = state.event->fields[i];
1790 
1791 			switch (field->size) {
1792 			case 1:
1793 				*(u8 *)&state.entry->fields[n_u64] = (u8)val;
1794 				break;
1795 
1796 			case 2:
1797 				*(u16 *)&state.entry->fields[n_u64] = (u16)val;
1798 				break;
1799 
1800 			case 4:
1801 				*(u32 *)&state.entry->fields[n_u64] = (u32)val;
1802 				break;
1803 
1804 			default:
1805 				state.entry->fields[n_u64] = val;
1806 				break;
1807 			}
1808 			n_u64++;
1809 		}
1810 	}
1811 	va_end(args);
1812 out:
1813 	__synth_event_trace_end(&state);
1814 
1815 	return ret;
1816 }
1817 EXPORT_SYMBOL_GPL(synth_event_trace);
1818 
1819 /**
1820  * synth_event_trace_array - Trace a synthetic event from an array
1821  * @file: The trace_event_file representing the synthetic event
1822  * @vals: Array of values
1823  * @n_vals: The number of values in vals
1824  *
1825  * Trace a synthetic event using the values passed in as 'vals'.
1826  *
1827  * The 'vals' array is just an array of 'n_vals' u64.  The number of
1828  * vals must match the number of field in the synthetic event, and
1829  * must be in the same order as the synthetic event fields.
1830  *
1831  * All vals should be cast to u64, and string vals are just pointers
1832  * to strings, cast to u64.  Strings will be copied into space
1833  * reserved in the event for the string, using these pointers.
1834  *
1835  * Return: 0 on success, err otherwise.
1836  */
1837 int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
1838 			    unsigned int n_vals)
1839 {
1840 	unsigned int i, n_u64, field_pos, len, data_size = 0;
1841 	struct synth_event_trace_state state;
1842 	char *str_val;
1843 	int ret;
1844 
1845 	ret = __synth_event_trace_init(file, &state);
1846 	if (ret) {
1847 		if (ret == -ENOENT)
1848 			ret = 0; /* just disabled, not really an error */
1849 		return ret;
1850 	}
1851 
1852 	if (state.event->n_dynamic_fields) {
1853 		for (i = 0; i < state.event->n_dynamic_fields; i++) {
1854 			field_pos = state.event->dynamic_fields[i]->field_pos;
1855 			str_val = (char *)(long)vals[field_pos];
1856 			len = strlen(str_val) + 1;
1857 			data_size += len;
1858 		}
1859 	}
1860 
1861 	ret = __synth_event_trace_start(file, &state, data_size);
1862 	if (ret)
1863 		return ret;
1864 
1865 	if (n_vals != state.event->n_fields) {
1866 		ret = -EINVAL;
1867 		goto out;
1868 	}
1869 
1870 	data_size = 0;
1871 
1872 	for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
1873 		if (state.event->fields[i]->is_string) {
1874 			char *str_val = (char *)(long)vals[i];
1875 
1876 			len = trace_string(state.entry, state.event, str_val,
1877 					   state.event->fields[i]->is_dynamic,
1878 					   data_size, &n_u64);
1879 			data_size += len; /* only dynamic string increments */
1880 		} else {
1881 			struct synth_field *field = state.event->fields[i];
1882 			u64 val = vals[i];
1883 
1884 			switch (field->size) {
1885 			case 1:
1886 				*(u8 *)&state.entry->fields[n_u64] = (u8)val;
1887 				break;
1888 
1889 			case 2:
1890 				*(u16 *)&state.entry->fields[n_u64] = (u16)val;
1891 				break;
1892 
1893 			case 4:
1894 				*(u32 *)&state.entry->fields[n_u64] = (u32)val;
1895 				break;
1896 
1897 			default:
1898 				state.entry->fields[n_u64] = val;
1899 				break;
1900 			}
1901 			n_u64++;
1902 		}
1903 	}
1904 out:
1905 	__synth_event_trace_end(&state);
1906 
1907 	return ret;
1908 }
1909 EXPORT_SYMBOL_GPL(synth_event_trace_array);
1910 
1911 /**
1912  * synth_event_trace_start - Start piecewise synthetic event trace
1913  * @file: The trace_event_file representing the synthetic event
1914  * @trace_state: A pointer to object tracking the piecewise trace state
1915  *
1916  * Start the trace of a synthetic event field-by-field rather than all
1917  * at once.
1918  *
1919  * This function 'opens' an event trace, which means space is reserved
1920  * for the event in the trace buffer, after which the event's
1921  * individual field values can be set through either
1922  * synth_event_add_next_val() or synth_event_add_val().
1923  *
1924  * A pointer to a trace_state object is passed in, which will keep
1925  * track of the current event trace state until the event trace is
1926  * closed (and the event finally traced) using
1927  * synth_event_trace_end().
1928  *
1929  * Note that synth_event_trace_end() must be called after all values
1930  * have been added for each event trace, regardless of whether adding
1931  * all field values succeeded or not.
1932  *
1933  * Note also that for a given event trace, all fields must be added
1934  * using either synth_event_add_next_val() or synth_event_add_val()
1935  * but not both together or interleaved.
1936  *
1937  * Return: 0 on success, err otherwise.
1938  */
1939 int synth_event_trace_start(struct trace_event_file *file,
1940 			    struct synth_event_trace_state *trace_state)
1941 {
1942 	int ret;
1943 
1944 	if (!trace_state)
1945 		return -EINVAL;
1946 
1947 	ret = __synth_event_trace_init(file, trace_state);
1948 	if (ret) {
1949 		if (ret == -ENOENT)
1950 			ret = 0; /* just disabled, not really an error */
1951 		return ret;
1952 	}
1953 
1954 	if (trace_state->event->n_dynamic_fields)
1955 		return -ENOTSUPP;
1956 
1957 	ret = __synth_event_trace_start(file, trace_state, 0);
1958 
1959 	return ret;
1960 }
1961 EXPORT_SYMBOL_GPL(synth_event_trace_start);
1962 
1963 static int __synth_event_add_val(const char *field_name, u64 val,
1964 				 struct synth_event_trace_state *trace_state)
1965 {
1966 	struct synth_field *field = NULL;
1967 	struct synth_trace_event *entry;
1968 	struct synth_event *event;
1969 	int i, ret = 0;
1970 
1971 	if (!trace_state) {
1972 		ret = -EINVAL;
1973 		goto out;
1974 	}
1975 
1976 	/* can't mix add_next_synth_val() with add_synth_val() */
1977 	if (field_name) {
1978 		if (trace_state->add_next) {
1979 			ret = -EINVAL;
1980 			goto out;
1981 		}
1982 		trace_state->add_name = true;
1983 	} else {
1984 		if (trace_state->add_name) {
1985 			ret = -EINVAL;
1986 			goto out;
1987 		}
1988 		trace_state->add_next = true;
1989 	}
1990 
1991 	if (trace_state->disabled)
1992 		goto out;
1993 
1994 	event = trace_state->event;
1995 	if (trace_state->add_name) {
1996 		for (i = 0; i < event->n_fields; i++) {
1997 			field = event->fields[i];
1998 			if (strcmp(field->name, field_name) == 0)
1999 				break;
2000 		}
2001 		if (!field) {
2002 			ret = -EINVAL;
2003 			goto out;
2004 		}
2005 	} else {
2006 		if (trace_state->cur_field >= event->n_fields) {
2007 			ret = -EINVAL;
2008 			goto out;
2009 		}
2010 		field = event->fields[trace_state->cur_field++];
2011 	}
2012 
2013 	entry = trace_state->entry;
2014 	if (field->is_string) {
2015 		char *str_val = (char *)(long)val;
2016 		char *str_field;
2017 
2018 		if (field->is_dynamic) { /* add_val can't do dynamic strings */
2019 			ret = -EINVAL;
2020 			goto out;
2021 		}
2022 
2023 		if (!str_val) {
2024 			ret = -EINVAL;
2025 			goto out;
2026 		}
2027 
2028 		str_field = (char *)&entry->fields[field->offset];
2029 		strscpy(str_field, str_val, STR_VAR_LEN_MAX);
2030 	} else {
2031 		switch (field->size) {
2032 		case 1:
2033 			*(u8 *)&trace_state->entry->fields[field->offset] = (u8)val;
2034 			break;
2035 
2036 		case 2:
2037 			*(u16 *)&trace_state->entry->fields[field->offset] = (u16)val;
2038 			break;
2039 
2040 		case 4:
2041 			*(u32 *)&trace_state->entry->fields[field->offset] = (u32)val;
2042 			break;
2043 
2044 		default:
2045 			trace_state->entry->fields[field->offset] = val;
2046 			break;
2047 		}
2048 	}
2049  out:
2050 	return ret;
2051 }
2052 
2053 /**
2054  * synth_event_add_next_val - Add the next field's value to an open synth trace
2055  * @val: The value to set the next field to
2056  * @trace_state: A pointer to object tracking the piecewise trace state
2057  *
2058  * Set the value of the next field in an event that's been opened by
2059  * synth_event_trace_start().
2060  *
2061  * The val param should be the value cast to u64.  If the value points
2062  * to a string, the val param should be a char * cast to u64.
2063  *
2064  * This function assumes all the fields in an event are to be set one
2065  * after another - successive calls to this function are made, one for
2066  * each field, in the order of the fields in the event, until all
2067  * fields have been set.  If you'd rather set each field individually
2068  * without regard to ordering, synth_event_add_val() can be used
2069  * instead.
2070  *
2071  * Note however that synth_event_add_next_val() and
2072  * synth_event_add_val() can't be intermixed for a given event trace -
2073  * one or the other but not both can be used at the same time.
2074  *
2075  * Note also that synth_event_trace_end() must be called after all
2076  * values have been added for each event trace, regardless of whether
2077  * adding all field values succeeded or not.
2078  *
2079  * Return: 0 on success, err otherwise.
2080  */
2081 int synth_event_add_next_val(u64 val,
2082 			     struct synth_event_trace_state *trace_state)
2083 {
2084 	return __synth_event_add_val(NULL, val, trace_state);
2085 }
2086 EXPORT_SYMBOL_GPL(synth_event_add_next_val);
2087 
2088 /**
2089  * synth_event_add_val - Add a named field's value to an open synth trace
2090  * @field_name: The name of the synthetic event field value to set
2091  * @val: The value to set the named field to
2092  * @trace_state: A pointer to object tracking the piecewise trace state
2093  *
2094  * Set the value of the named field in an event that's been opened by
2095  * synth_event_trace_start().
2096  *
2097  * The val param should be the value cast to u64.  If the value points
2098  * to a string, the val param should be a char * cast to u64.
2099  *
2100  * This function looks up the field name, and if found, sets the field
2101  * to the specified value.  This lookup makes this function more
2102  * expensive than synth_event_add_next_val(), so use that or the
2103  * none-piecewise synth_event_trace() instead if efficiency is more
2104  * important.
2105  *
2106  * Note however that synth_event_add_next_val() and
2107  * synth_event_add_val() can't be intermixed for a given event trace -
2108  * one or the other but not both can be used at the same time.
2109  *
2110  * Note also that synth_event_trace_end() must be called after all
2111  * values have been added for each event trace, regardless of whether
2112  * adding all field values succeeded or not.
2113  *
2114  * Return: 0 on success, err otherwise.
2115  */
2116 int synth_event_add_val(const char *field_name, u64 val,
2117 			struct synth_event_trace_state *trace_state)
2118 {
2119 	return __synth_event_add_val(field_name, val, trace_state);
2120 }
2121 EXPORT_SYMBOL_GPL(synth_event_add_val);
2122 
2123 /**
2124  * synth_event_trace_end - End piecewise synthetic event trace
2125  * @trace_state: A pointer to object tracking the piecewise trace state
2126  *
2127  * End the trace of a synthetic event opened by
2128  * synth_event_trace__start().
2129  *
2130  * This function 'closes' an event trace, which basically means that
2131  * it commits the reserved event and cleans up other loose ends.
2132  *
2133  * A pointer to a trace_state object is passed in, which will keep
2134  * track of the current event trace state opened with
2135  * synth_event_trace_start().
2136  *
2137  * Note that this function must be called after all values have been
2138  * added for each event trace, regardless of whether adding all field
2139  * values succeeded or not.
2140  *
2141  * Return: 0 on success, err otherwise.
2142  */
2143 int synth_event_trace_end(struct synth_event_trace_state *trace_state)
2144 {
2145 	if (!trace_state)
2146 		return -EINVAL;
2147 
2148 	__synth_event_trace_end(trace_state);
2149 
2150 	return 0;
2151 }
2152 EXPORT_SYMBOL_GPL(synth_event_trace_end);
2153 
2154 static int create_synth_event(const char *raw_command)
2155 {
2156 	char *fields, *p;
2157 	const char *name;
2158 	int len, ret = 0;
2159 
2160 	raw_command = skip_spaces(raw_command);
2161 	if (raw_command[0] == '\0')
2162 		return ret;
2163 
2164 	last_cmd_set(raw_command);
2165 
2166 	name = raw_command;
2167 
2168 	/* Don't try to process if not our system */
2169 	if (name[0] != 's' || name[1] != ':')
2170 		return -ECANCELED;
2171 	name += 2;
2172 
2173 	p = strpbrk(raw_command, " \t");
2174 	if (!p) {
2175 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
2176 		return -EINVAL;
2177 	}
2178 
2179 	fields = skip_spaces(p);
2180 
2181 	/* This interface accepts group name prefix */
2182 	if (strchr(name, '/')) {
2183 		len = str_has_prefix(name, SYNTH_SYSTEM "/");
2184 		if (len == 0) {
2185 			synth_err(SYNTH_ERR_INVALID_DYN_CMD, 0);
2186 			return -EINVAL;
2187 		}
2188 		name += len;
2189 	}
2190 
2191 	len = name - raw_command;
2192 
2193 	ret = check_command(raw_command + len);
2194 	if (ret) {
2195 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
2196 		return ret;
2197 	}
2198 
2199 	name = kmemdup_nul(raw_command + len, p - raw_command - len, GFP_KERNEL);
2200 	if (!name)
2201 		return -ENOMEM;
2202 
2203 	ret = __create_synth_event(name, fields);
2204 
2205 	kfree(name);
2206 
2207 	return ret;
2208 }
2209 
2210 static int synth_event_release(struct dyn_event *ev)
2211 {
2212 	struct synth_event *event = to_synth_event(ev);
2213 	int ret;
2214 
2215 	if (event->ref)
2216 		return -EBUSY;
2217 
2218 	if (trace_event_dyn_busy(&event->call))
2219 		return -EBUSY;
2220 
2221 	ret = unregister_synth_event(event);
2222 	if (ret)
2223 		return ret;
2224 
2225 	dyn_event_remove(ev);
2226 	free_synth_event(event);
2227 	return 0;
2228 }
2229 
2230 static int __synth_event_show(struct seq_file *m, struct synth_event *event)
2231 {
2232 	struct synth_field *field;
2233 	unsigned int i;
2234 	char *type, *t;
2235 
2236 	seq_printf(m, "%s\t", event->name);
2237 
2238 	for (i = 0; i < event->n_fields; i++) {
2239 		field = event->fields[i];
2240 
2241 		type = field->type;
2242 		t = strstr(type, "__data_loc");
2243 		if (t) { /* __data_loc belongs in format but not event desc */
2244 			t += sizeof("__data_loc");
2245 			type = t;
2246 		}
2247 
2248 		/* parameter values */
2249 		seq_printf(m, "%s %s%s", type, field->name,
2250 			   i == event->n_fields - 1 ? "" : "; ");
2251 	}
2252 
2253 	seq_putc(m, '\n');
2254 
2255 	return 0;
2256 }
2257 
2258 static int synth_event_show(struct seq_file *m, struct dyn_event *ev)
2259 {
2260 	struct synth_event *event = to_synth_event(ev);
2261 
2262 	seq_printf(m, "s:%s/", event->class.system);
2263 
2264 	return __synth_event_show(m, event);
2265 }
2266 
2267 static int synth_events_seq_show(struct seq_file *m, void *v)
2268 {
2269 	struct dyn_event *ev = v;
2270 
2271 	if (!is_synth_event(ev))
2272 		return 0;
2273 
2274 	return __synth_event_show(m, to_synth_event(ev));
2275 }
2276 
2277 static const struct seq_operations synth_events_seq_op = {
2278 	.start	= dyn_event_seq_start,
2279 	.next	= dyn_event_seq_next,
2280 	.stop	= dyn_event_seq_stop,
2281 	.show	= synth_events_seq_show,
2282 };
2283 
2284 static int synth_events_open(struct inode *inode, struct file *file)
2285 {
2286 	int ret;
2287 
2288 	ret = security_locked_down(LOCKDOWN_TRACEFS);
2289 	if (ret)
2290 		return ret;
2291 
2292 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
2293 		ret = dyn_events_release_all(&synth_event_ops);
2294 		if (ret < 0)
2295 			return ret;
2296 	}
2297 
2298 	return seq_open(file, &synth_events_seq_op);
2299 }
2300 
2301 static ssize_t synth_events_write(struct file *file,
2302 				  const char __user *buffer,
2303 				  size_t count, loff_t *ppos)
2304 {
2305 	return trace_parse_run_command(file, buffer, count, ppos,
2306 				       create_or_delete_synth_event);
2307 }
2308 
2309 static const struct file_operations synth_events_fops = {
2310 	.open           = synth_events_open,
2311 	.write		= synth_events_write,
2312 	.read           = seq_read,
2313 	.llseek         = seq_lseek,
2314 	.release        = seq_release,
2315 };
2316 
2317 /*
2318  * Register dynevent at core_initcall. This allows kernel to setup kprobe
2319  * events in postcore_initcall without tracefs.
2320  */
2321 static __init int trace_events_synth_init_early(void)
2322 {
2323 	int err = 0;
2324 
2325 	err = dyn_event_register(&synth_event_ops);
2326 	if (err)
2327 		pr_warn("Could not register synth_event_ops\n");
2328 
2329 	return err;
2330 }
2331 core_initcall(trace_events_synth_init_early);
2332 
2333 static __init int trace_events_synth_init(void)
2334 {
2335 	struct dentry *entry = NULL;
2336 	int err = 0;
2337 	err = tracing_init_dentry();
2338 	if (err)
2339 		goto err;
2340 
2341 	entry = tracefs_create_file("synthetic_events", TRACE_MODE_WRITE,
2342 				    NULL, NULL, &synth_events_fops);
2343 	if (!entry) {
2344 		err = -ENODEV;
2345 		goto err;
2346 	}
2347 
2348 	return err;
2349  err:
2350 	pr_warn("Could not create tracefs 'synthetic_events' entry\n");
2351 
2352 	return err;
2353 }
2354 
2355 fs_initcall(trace_events_synth_init);
2356