xref: /linux/kernel/trace/trace_events_synth.c (revision 96378b2088faea68f1fb05ea6b9a566fc569a44c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * trace_events_synth - synthetic trace events
4  *
5  * Copyright (C) 2015, 2020 Tom Zanussi <tom.zanussi@linux.intel.com>
6  */
7 
8 #include <linux/module.h>
9 #include <linux/kallsyms.h>
10 #include <linux/security.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/stacktrace.h>
14 #include <linux/rculist.h>
15 #include <linux/tracefs.h>
16 
17 /* for gfp flag names */
18 #include <linux/trace_events.h>
19 #include <trace/events/mmflags.h>
20 
21 #include "trace_synth.h"
22 
23 #undef ERRORS
24 #define ERRORS	\
25 	C(BAD_NAME,		"Illegal name"),		\
26 	C(CMD_INCOMPLETE,	"Incomplete command"),		\
27 	C(EVENT_EXISTS,		"Event already exists"),	\
28 	C(TOO_MANY_FIELDS,	"Too many fields"),		\
29 	C(INCOMPLETE_TYPE,	"Incomplete type"),		\
30 	C(INVALID_TYPE,		"Invalid type"),		\
31 	C(INVALID_FIELD,	"Invalid field"),		\
32 	C(CMD_TOO_LONG,		"Command too long"),
33 
34 #undef C
35 #define C(a, b)		SYNTH_ERR_##a
36 
37 enum { ERRORS };
38 
39 #undef C
40 #define C(a, b)		b
41 
42 static const char *err_text[] = { ERRORS };
43 
44 static char last_cmd[MAX_FILTER_STR_VAL];
45 
46 static int errpos(const char *str)
47 {
48 	return err_pos(last_cmd, str);
49 }
50 
51 static void last_cmd_set(char *str)
52 {
53 	if (!str)
54 		return;
55 
56 	strncpy(last_cmd, str, MAX_FILTER_STR_VAL - 1);
57 }
58 
59 static void synth_err(u8 err_type, u8 err_pos)
60 {
61 	tracing_log_err(NULL, "synthetic_events", last_cmd, err_text,
62 			err_type, err_pos);
63 }
64 
65 static int create_synth_event(int argc, const char **argv);
66 static int synth_event_show(struct seq_file *m, struct dyn_event *ev);
67 static int synth_event_release(struct dyn_event *ev);
68 static bool synth_event_is_busy(struct dyn_event *ev);
69 static bool synth_event_match(const char *system, const char *event,
70 			int argc, const char **argv, struct dyn_event *ev);
71 
72 static struct dyn_event_operations synth_event_ops = {
73 	.create = create_synth_event,
74 	.show = synth_event_show,
75 	.is_busy = synth_event_is_busy,
76 	.free = synth_event_release,
77 	.match = synth_event_match,
78 };
79 
80 static bool is_synth_event(struct dyn_event *ev)
81 {
82 	return ev->ops == &synth_event_ops;
83 }
84 
85 static struct synth_event *to_synth_event(struct dyn_event *ev)
86 {
87 	return container_of(ev, struct synth_event, devent);
88 }
89 
90 static bool synth_event_is_busy(struct dyn_event *ev)
91 {
92 	struct synth_event *event = to_synth_event(ev);
93 
94 	return event->ref != 0;
95 }
96 
97 static bool synth_event_match(const char *system, const char *event,
98 			int argc, const char **argv, struct dyn_event *ev)
99 {
100 	struct synth_event *sev = to_synth_event(ev);
101 
102 	return strcmp(sev->name, event) == 0 &&
103 		(!system || strcmp(system, SYNTH_SYSTEM) == 0);
104 }
105 
106 struct synth_trace_event {
107 	struct trace_entry	ent;
108 	u64			fields[];
109 };
110 
111 static int synth_event_define_fields(struct trace_event_call *call)
112 {
113 	struct synth_trace_event trace;
114 	int offset = offsetof(typeof(trace), fields);
115 	struct synth_event *event = call->data;
116 	unsigned int i, size, n_u64;
117 	char *name, *type;
118 	bool is_signed;
119 	int ret = 0;
120 
121 	for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
122 		size = event->fields[i]->size;
123 		is_signed = event->fields[i]->is_signed;
124 		type = event->fields[i]->type;
125 		name = event->fields[i]->name;
126 		ret = trace_define_field(call, type, name, offset, size,
127 					 is_signed, FILTER_OTHER);
128 		if (ret)
129 			break;
130 
131 		event->fields[i]->offset = n_u64;
132 
133 		if (event->fields[i]->is_string && !event->fields[i]->is_dynamic) {
134 			offset += STR_VAR_LEN_MAX;
135 			n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
136 		} else {
137 			offset += sizeof(u64);
138 			n_u64++;
139 		}
140 	}
141 
142 	event->n_u64 = n_u64;
143 
144 	return ret;
145 }
146 
147 static bool synth_field_signed(char *type)
148 {
149 	if (str_has_prefix(type, "u"))
150 		return false;
151 	if (strcmp(type, "gfp_t") == 0)
152 		return false;
153 
154 	return true;
155 }
156 
157 static int synth_field_is_string(char *type)
158 {
159 	if (strstr(type, "char[") != NULL)
160 		return true;
161 
162 	return false;
163 }
164 
165 static int synth_field_string_size(char *type)
166 {
167 	char buf[4], *end, *start;
168 	unsigned int len;
169 	int size, err;
170 
171 	start = strstr(type, "char[");
172 	if (start == NULL)
173 		return -EINVAL;
174 	start += sizeof("char[") - 1;
175 
176 	end = strchr(type, ']');
177 	if (!end || end < start)
178 		return -EINVAL;
179 
180 	len = end - start;
181 	if (len > 3)
182 		return -EINVAL;
183 
184 	if (len == 0)
185 		return 0; /* variable-length string */
186 
187 	strncpy(buf, start, len);
188 	buf[len] = '\0';
189 
190 	err = kstrtouint(buf, 0, &size);
191 	if (err)
192 		return err;
193 
194 	if (size > STR_VAR_LEN_MAX)
195 		return -EINVAL;
196 
197 	return size;
198 }
199 
200 static int synth_field_size(char *type)
201 {
202 	int size = 0;
203 
204 	if (strcmp(type, "s64") == 0)
205 		size = sizeof(s64);
206 	else if (strcmp(type, "u64") == 0)
207 		size = sizeof(u64);
208 	else if (strcmp(type, "s32") == 0)
209 		size = sizeof(s32);
210 	else if (strcmp(type, "u32") == 0)
211 		size = sizeof(u32);
212 	else if (strcmp(type, "s16") == 0)
213 		size = sizeof(s16);
214 	else if (strcmp(type, "u16") == 0)
215 		size = sizeof(u16);
216 	else if (strcmp(type, "s8") == 0)
217 		size = sizeof(s8);
218 	else if (strcmp(type, "u8") == 0)
219 		size = sizeof(u8);
220 	else if (strcmp(type, "char") == 0)
221 		size = sizeof(char);
222 	else if (strcmp(type, "unsigned char") == 0)
223 		size = sizeof(unsigned char);
224 	else if (strcmp(type, "int") == 0)
225 		size = sizeof(int);
226 	else if (strcmp(type, "unsigned int") == 0)
227 		size = sizeof(unsigned int);
228 	else if (strcmp(type, "long") == 0)
229 		size = sizeof(long);
230 	else if (strcmp(type, "unsigned long") == 0)
231 		size = sizeof(unsigned long);
232 	else if (strcmp(type, "pid_t") == 0)
233 		size = sizeof(pid_t);
234 	else if (strcmp(type, "gfp_t") == 0)
235 		size = sizeof(gfp_t);
236 	else if (synth_field_is_string(type))
237 		size = synth_field_string_size(type);
238 
239 	return size;
240 }
241 
242 static const char *synth_field_fmt(char *type)
243 {
244 	const char *fmt = "%llu";
245 
246 	if (strcmp(type, "s64") == 0)
247 		fmt = "%lld";
248 	else if (strcmp(type, "u64") == 0)
249 		fmt = "%llu";
250 	else if (strcmp(type, "s32") == 0)
251 		fmt = "%d";
252 	else if (strcmp(type, "u32") == 0)
253 		fmt = "%u";
254 	else if (strcmp(type, "s16") == 0)
255 		fmt = "%d";
256 	else if (strcmp(type, "u16") == 0)
257 		fmt = "%u";
258 	else if (strcmp(type, "s8") == 0)
259 		fmt = "%d";
260 	else if (strcmp(type, "u8") == 0)
261 		fmt = "%u";
262 	else if (strcmp(type, "char") == 0)
263 		fmt = "%d";
264 	else if (strcmp(type, "unsigned char") == 0)
265 		fmt = "%u";
266 	else if (strcmp(type, "int") == 0)
267 		fmt = "%d";
268 	else if (strcmp(type, "unsigned int") == 0)
269 		fmt = "%u";
270 	else if (strcmp(type, "long") == 0)
271 		fmt = "%ld";
272 	else if (strcmp(type, "unsigned long") == 0)
273 		fmt = "%lu";
274 	else if (strcmp(type, "pid_t") == 0)
275 		fmt = "%d";
276 	else if (strcmp(type, "gfp_t") == 0)
277 		fmt = "%x";
278 	else if (synth_field_is_string(type))
279 		fmt = "%.*s";
280 
281 	return fmt;
282 }
283 
284 static void print_synth_event_num_val(struct trace_seq *s,
285 				      char *print_fmt, char *name,
286 				      int size, u64 val, char *space)
287 {
288 	switch (size) {
289 	case 1:
290 		trace_seq_printf(s, print_fmt, name, (u8)val, space);
291 		break;
292 
293 	case 2:
294 		trace_seq_printf(s, print_fmt, name, (u16)val, space);
295 		break;
296 
297 	case 4:
298 		trace_seq_printf(s, print_fmt, name, (u32)val, space);
299 		break;
300 
301 	default:
302 		trace_seq_printf(s, print_fmt, name, val, space);
303 		break;
304 	}
305 }
306 
307 static enum print_line_t print_synth_event(struct trace_iterator *iter,
308 					   int flags,
309 					   struct trace_event *event)
310 {
311 	struct trace_array *tr = iter->tr;
312 	struct trace_seq *s = &iter->seq;
313 	struct synth_trace_event *entry;
314 	struct synth_event *se;
315 	unsigned int i, n_u64;
316 	char print_fmt[32];
317 	const char *fmt;
318 
319 	entry = (struct synth_trace_event *)iter->ent;
320 	se = container_of(event, struct synth_event, call.event);
321 
322 	trace_seq_printf(s, "%s: ", se->name);
323 
324 	for (i = 0, n_u64 = 0; i < se->n_fields; i++) {
325 		if (trace_seq_has_overflowed(s))
326 			goto end;
327 
328 		fmt = synth_field_fmt(se->fields[i]->type);
329 
330 		/* parameter types */
331 		if (tr && tr->trace_flags & TRACE_ITER_VERBOSE)
332 			trace_seq_printf(s, "%s ", fmt);
333 
334 		snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt);
335 
336 		/* parameter values */
337 		if (se->fields[i]->is_string) {
338 			if (se->fields[i]->is_dynamic) {
339 				u32 offset, data_offset;
340 				char *str_field;
341 
342 				offset = (u32)entry->fields[n_u64];
343 				data_offset = offset & 0xffff;
344 
345 				str_field = (char *)entry + data_offset;
346 
347 				trace_seq_printf(s, print_fmt, se->fields[i]->name,
348 						 STR_VAR_LEN_MAX,
349 						 str_field,
350 						 i == se->n_fields - 1 ? "" : " ");
351 				n_u64++;
352 			} else {
353 				trace_seq_printf(s, print_fmt, se->fields[i]->name,
354 						 STR_VAR_LEN_MAX,
355 						 (char *)&entry->fields[n_u64],
356 						 i == se->n_fields - 1 ? "" : " ");
357 				n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
358 			}
359 		} else {
360 			struct trace_print_flags __flags[] = {
361 			    __def_gfpflag_names, {-1, NULL} };
362 			char *space = (i == se->n_fields - 1 ? "" : " ");
363 
364 			print_synth_event_num_val(s, print_fmt,
365 						  se->fields[i]->name,
366 						  se->fields[i]->size,
367 						  entry->fields[n_u64],
368 						  space);
369 
370 			if (strcmp(se->fields[i]->type, "gfp_t") == 0) {
371 				trace_seq_puts(s, " (");
372 				trace_print_flags_seq(s, "|",
373 						      entry->fields[n_u64],
374 						      __flags);
375 				trace_seq_putc(s, ')');
376 			}
377 			n_u64++;
378 		}
379 	}
380 end:
381 	trace_seq_putc(s, '\n');
382 
383 	return trace_handle_return(s);
384 }
385 
386 static struct trace_event_functions synth_event_funcs = {
387 	.trace		= print_synth_event
388 };
389 
390 static unsigned int trace_string(struct synth_trace_event *entry,
391 				 struct synth_event *event,
392 				 char *str_val,
393 				 bool is_dynamic,
394 				 unsigned int data_size,
395 				 unsigned int *n_u64)
396 {
397 	unsigned int len = 0;
398 	char *str_field;
399 
400 	if (is_dynamic) {
401 		u32 data_offset;
402 
403 		data_offset = offsetof(typeof(*entry), fields);
404 		data_offset += event->n_u64 * sizeof(u64);
405 		data_offset += data_size;
406 
407 		str_field = (char *)entry + data_offset;
408 
409 		len = strlen(str_val) + 1;
410 		strscpy(str_field, str_val, len);
411 
412 		data_offset |= len << 16;
413 		*(u32 *)&entry->fields[*n_u64] = data_offset;
414 
415 		(*n_u64)++;
416 	} else {
417 		str_field = (char *)&entry->fields[*n_u64];
418 
419 		strscpy(str_field, str_val, STR_VAR_LEN_MAX);
420 		(*n_u64) += STR_VAR_LEN_MAX / sizeof(u64);
421 	}
422 
423 	return len;
424 }
425 
426 static notrace void trace_event_raw_event_synth(void *__data,
427 						u64 *var_ref_vals,
428 						unsigned int *var_ref_idx)
429 {
430 	unsigned int i, n_u64, val_idx, len, data_size = 0;
431 	struct trace_event_file *trace_file = __data;
432 	struct synth_trace_event *entry;
433 	struct trace_event_buffer fbuffer;
434 	struct trace_buffer *buffer;
435 	struct synth_event *event;
436 	int fields_size = 0;
437 
438 	event = trace_file->event_call->data;
439 
440 	if (trace_trigger_soft_disabled(trace_file))
441 		return;
442 
443 	fields_size = event->n_u64 * sizeof(u64);
444 
445 	for (i = 0; i < event->n_dynamic_fields; i++) {
446 		unsigned int field_pos = event->dynamic_fields[i]->field_pos;
447 		char *str_val;
448 
449 		val_idx = var_ref_idx[field_pos];
450 		str_val = (char *)(long)var_ref_vals[val_idx];
451 
452 		len = strlen(str_val) + 1;
453 
454 		fields_size += len;
455 	}
456 
457 	/*
458 	 * Avoid ring buffer recursion detection, as this event
459 	 * is being performed within another event.
460 	 */
461 	buffer = trace_file->tr->array_buffer.buffer;
462 	ring_buffer_nest_start(buffer);
463 
464 	entry = trace_event_buffer_reserve(&fbuffer, trace_file,
465 					   sizeof(*entry) + fields_size);
466 	if (!entry)
467 		goto out;
468 
469 	for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
470 		val_idx = var_ref_idx[i];
471 		if (event->fields[i]->is_string) {
472 			char *str_val = (char *)(long)var_ref_vals[val_idx];
473 
474 			len = trace_string(entry, event, str_val,
475 					   event->fields[i]->is_dynamic,
476 					   data_size, &n_u64);
477 			data_size += len; /* only dynamic string increments */
478 		} else {
479 			struct synth_field *field = event->fields[i];
480 			u64 val = var_ref_vals[val_idx];
481 
482 			switch (field->size) {
483 			case 1:
484 				*(u8 *)&entry->fields[n_u64] = (u8)val;
485 				break;
486 
487 			case 2:
488 				*(u16 *)&entry->fields[n_u64] = (u16)val;
489 				break;
490 
491 			case 4:
492 				*(u32 *)&entry->fields[n_u64] = (u32)val;
493 				break;
494 
495 			default:
496 				entry->fields[n_u64] = val;
497 				break;
498 			}
499 			n_u64++;
500 		}
501 	}
502 
503 	trace_event_buffer_commit(&fbuffer);
504 out:
505 	ring_buffer_nest_end(buffer);
506 }
507 
508 static void free_synth_event_print_fmt(struct trace_event_call *call)
509 {
510 	if (call) {
511 		kfree(call->print_fmt);
512 		call->print_fmt = NULL;
513 	}
514 }
515 
516 static int __set_synth_event_print_fmt(struct synth_event *event,
517 				       char *buf, int len)
518 {
519 	const char *fmt;
520 	int pos = 0;
521 	int i;
522 
523 	/* When len=0, we just calculate the needed length */
524 #define LEN_OR_ZERO (len ? len - pos : 0)
525 
526 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
527 	for (i = 0; i < event->n_fields; i++) {
528 		fmt = synth_field_fmt(event->fields[i]->type);
529 		pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s%s",
530 				event->fields[i]->name, fmt,
531 				i == event->n_fields - 1 ? "" : ", ");
532 	}
533 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
534 
535 	for (i = 0; i < event->n_fields; i++) {
536 		if (event->fields[i]->is_string &&
537 		    event->fields[i]->is_dynamic)
538 			pos += snprintf(buf + pos, LEN_OR_ZERO,
539 				", __get_str(%s)", event->fields[i]->name);
540 		else
541 			pos += snprintf(buf + pos, LEN_OR_ZERO,
542 					", REC->%s", event->fields[i]->name);
543 	}
544 
545 #undef LEN_OR_ZERO
546 
547 	/* return the length of print_fmt */
548 	return pos;
549 }
550 
551 static int set_synth_event_print_fmt(struct trace_event_call *call)
552 {
553 	struct synth_event *event = call->data;
554 	char *print_fmt;
555 	int len;
556 
557 	/* First: called with 0 length to calculate the needed length */
558 	len = __set_synth_event_print_fmt(event, NULL, 0);
559 
560 	print_fmt = kmalloc(len + 1, GFP_KERNEL);
561 	if (!print_fmt)
562 		return -ENOMEM;
563 
564 	/* Second: actually write the @print_fmt */
565 	__set_synth_event_print_fmt(event, print_fmt, len + 1);
566 	call->print_fmt = print_fmt;
567 
568 	return 0;
569 }
570 
571 static void free_synth_field(struct synth_field *field)
572 {
573 	kfree(field->type);
574 	kfree(field->name);
575 	kfree(field);
576 }
577 
578 static struct synth_field *parse_synth_field(int argc, const char **argv,
579 					     int *consumed)
580 {
581 	struct synth_field *field;
582 	const char *prefix = NULL, *field_type = argv[0], *field_name, *array;
583 	int len, ret = 0;
584 	ssize_t size;
585 
586 	if (field_type[0] == ';')
587 		field_type++;
588 
589 	if (!strcmp(field_type, "unsigned")) {
590 		if (argc < 3) {
591 			synth_err(SYNTH_ERR_INCOMPLETE_TYPE, errpos(field_type));
592 			return ERR_PTR(-EINVAL);
593 		}
594 		prefix = "unsigned ";
595 		field_type = argv[1];
596 		field_name = argv[2];
597 		*consumed = 3;
598 	} else {
599 		field_name = argv[1];
600 		*consumed = 2;
601 	}
602 
603 	field = kzalloc(sizeof(*field), GFP_KERNEL);
604 	if (!field)
605 		return ERR_PTR(-ENOMEM);
606 
607 	len = strlen(field_name);
608 	array = strchr(field_name, '[');
609 	if (array)
610 		len -= strlen(array);
611 	else if (field_name[len - 1] == ';')
612 		len--;
613 
614 	field->name = kmemdup_nul(field_name, len, GFP_KERNEL);
615 	if (!field->name) {
616 		ret = -ENOMEM;
617 		goto free;
618 	}
619 	if (!is_good_name(field->name)) {
620 		synth_err(SYNTH_ERR_BAD_NAME, errpos(field_name));
621 		ret = -EINVAL;
622 		goto free;
623 	}
624 
625 	if (field_type[0] == ';')
626 		field_type++;
627 	len = strlen(field_type) + 1;
628 	if (array)
629 		len += strlen(array);
630 	if (prefix)
631 		len += strlen(prefix);
632 
633 	field->type = kzalloc(len, GFP_KERNEL);
634 	if (!field->type) {
635 		ret = -ENOMEM;
636 		goto free;
637 	}
638 	if (prefix)
639 		strcat(field->type, prefix);
640 	strcat(field->type, field_type);
641 	if (array) {
642 		strcat(field->type, array);
643 		if (field->type[len - 1] == ';')
644 			field->type[len - 1] = '\0';
645 	}
646 
647 	size = synth_field_size(field->type);
648 	if (size < 0) {
649 		synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type));
650 		ret = -EINVAL;
651 		goto free;
652 	} else if (size == 0) {
653 		if (synth_field_is_string(field->type)) {
654 			char *type;
655 
656 			type = kzalloc(sizeof("__data_loc ") + strlen(field->type) + 1, GFP_KERNEL);
657 			if (!type) {
658 				ret = -ENOMEM;
659 				goto free;
660 			}
661 
662 			strcat(type, "__data_loc ");
663 			strcat(type, field->type);
664 			kfree(field->type);
665 			field->type = type;
666 
667 			field->is_dynamic = true;
668 			size = sizeof(u64);
669 		} else {
670 			synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type));
671 			ret = -EINVAL;
672 			goto free;
673 		}
674 	}
675 	field->size = size;
676 
677 	if (synth_field_is_string(field->type))
678 		field->is_string = true;
679 
680 	field->is_signed = synth_field_signed(field->type);
681  out:
682 	return field;
683  free:
684 	free_synth_field(field);
685 	field = ERR_PTR(ret);
686 	goto out;
687 }
688 
689 static void free_synth_tracepoint(struct tracepoint *tp)
690 {
691 	if (!tp)
692 		return;
693 
694 	kfree(tp->name);
695 	kfree(tp);
696 }
697 
698 static struct tracepoint *alloc_synth_tracepoint(char *name)
699 {
700 	struct tracepoint *tp;
701 
702 	tp = kzalloc(sizeof(*tp), GFP_KERNEL);
703 	if (!tp)
704 		return ERR_PTR(-ENOMEM);
705 
706 	tp->name = kstrdup(name, GFP_KERNEL);
707 	if (!tp->name) {
708 		kfree(tp);
709 		return ERR_PTR(-ENOMEM);
710 	}
711 
712 	return tp;
713 }
714 
715 struct synth_event *find_synth_event(const char *name)
716 {
717 	struct dyn_event *pos;
718 	struct synth_event *event;
719 
720 	for_each_dyn_event(pos) {
721 		if (!is_synth_event(pos))
722 			continue;
723 		event = to_synth_event(pos);
724 		if (strcmp(event->name, name) == 0)
725 			return event;
726 	}
727 
728 	return NULL;
729 }
730 
731 static struct trace_event_fields synth_event_fields_array[] = {
732 	{ .type = TRACE_FUNCTION_TYPE,
733 	  .define_fields = synth_event_define_fields },
734 	{}
735 };
736 
737 static int register_synth_event(struct synth_event *event)
738 {
739 	struct trace_event_call *call = &event->call;
740 	int ret = 0;
741 
742 	event->call.class = &event->class;
743 	event->class.system = kstrdup(SYNTH_SYSTEM, GFP_KERNEL);
744 	if (!event->class.system) {
745 		ret = -ENOMEM;
746 		goto out;
747 	}
748 
749 	event->tp = alloc_synth_tracepoint(event->name);
750 	if (IS_ERR(event->tp)) {
751 		ret = PTR_ERR(event->tp);
752 		event->tp = NULL;
753 		goto out;
754 	}
755 
756 	INIT_LIST_HEAD(&call->class->fields);
757 	call->event.funcs = &synth_event_funcs;
758 	call->class->fields_array = synth_event_fields_array;
759 
760 	ret = register_trace_event(&call->event);
761 	if (!ret) {
762 		ret = -ENODEV;
763 		goto out;
764 	}
765 	call->flags = TRACE_EVENT_FL_TRACEPOINT;
766 	call->class->reg = trace_event_reg;
767 	call->class->probe = trace_event_raw_event_synth;
768 	call->data = event;
769 	call->tp = event->tp;
770 
771 	ret = trace_add_event_call(call);
772 	if (ret) {
773 		pr_warn("Failed to register synthetic event: %s\n",
774 			trace_event_name(call));
775 		goto err;
776 	}
777 
778 	ret = set_synth_event_print_fmt(call);
779 	if (ret < 0) {
780 		trace_remove_event_call(call);
781 		goto err;
782 	}
783  out:
784 	return ret;
785  err:
786 	unregister_trace_event(&call->event);
787 	goto out;
788 }
789 
790 static int unregister_synth_event(struct synth_event *event)
791 {
792 	struct trace_event_call *call = &event->call;
793 	int ret;
794 
795 	ret = trace_remove_event_call(call);
796 
797 	return ret;
798 }
799 
800 static void free_synth_event(struct synth_event *event)
801 {
802 	unsigned int i;
803 
804 	if (!event)
805 		return;
806 
807 	for (i = 0; i < event->n_fields; i++)
808 		free_synth_field(event->fields[i]);
809 
810 	kfree(event->fields);
811 	kfree(event->dynamic_fields);
812 	kfree(event->name);
813 	kfree(event->class.system);
814 	free_synth_tracepoint(event->tp);
815 	free_synth_event_print_fmt(&event->call);
816 	kfree(event);
817 }
818 
819 static struct synth_event *alloc_synth_event(const char *name, int n_fields,
820 					     struct synth_field **fields)
821 {
822 	unsigned int i, j, n_dynamic_fields = 0;
823 	struct synth_event *event;
824 
825 	event = kzalloc(sizeof(*event), GFP_KERNEL);
826 	if (!event) {
827 		event = ERR_PTR(-ENOMEM);
828 		goto out;
829 	}
830 
831 	event->name = kstrdup(name, GFP_KERNEL);
832 	if (!event->name) {
833 		kfree(event);
834 		event = ERR_PTR(-ENOMEM);
835 		goto out;
836 	}
837 
838 	event->fields = kcalloc(n_fields, sizeof(*event->fields), GFP_KERNEL);
839 	if (!event->fields) {
840 		free_synth_event(event);
841 		event = ERR_PTR(-ENOMEM);
842 		goto out;
843 	}
844 
845 	for (i = 0; i < n_fields; i++)
846 		if (fields[i]->is_dynamic)
847 			n_dynamic_fields++;
848 
849 	if (n_dynamic_fields) {
850 		event->dynamic_fields = kcalloc(n_dynamic_fields,
851 						sizeof(*event->dynamic_fields),
852 						GFP_KERNEL);
853 		if (!event->dynamic_fields) {
854 			free_synth_event(event);
855 			event = ERR_PTR(-ENOMEM);
856 			goto out;
857 		}
858 	}
859 
860 	dyn_event_init(&event->devent, &synth_event_ops);
861 
862 	for (i = 0, j = 0; i < n_fields; i++) {
863 		event->fields[i] = fields[i];
864 
865 		if (fields[i]->is_dynamic) {
866 			event->dynamic_fields[j] = fields[i];
867 			event->dynamic_fields[j]->field_pos = i;
868 			event->dynamic_fields[j++] = fields[i];
869 			event->n_dynamic_fields++;
870 		}
871 	}
872 	event->n_fields = n_fields;
873  out:
874 	return event;
875 }
876 
877 static int synth_event_check_arg_fn(void *data)
878 {
879 	struct dynevent_arg_pair *arg_pair = data;
880 	int size;
881 
882 	size = synth_field_size((char *)arg_pair->lhs);
883 	if (size == 0) {
884 		if (strstr((char *)arg_pair->lhs, "["))
885 			return 0;
886 	}
887 
888 	return size ? 0 : -EINVAL;
889 }
890 
891 /**
892  * synth_event_add_field - Add a new field to a synthetic event cmd
893  * @cmd: A pointer to the dynevent_cmd struct representing the new event
894  * @type: The type of the new field to add
895  * @name: The name of the new field to add
896  *
897  * Add a new field to a synthetic event cmd object.  Field ordering is in
898  * the same order the fields are added.
899  *
900  * See synth_field_size() for available types. If field_name contains
901  * [n] the field is considered to be an array.
902  *
903  * Return: 0 if successful, error otherwise.
904  */
905 int synth_event_add_field(struct dynevent_cmd *cmd, const char *type,
906 			  const char *name)
907 {
908 	struct dynevent_arg_pair arg_pair;
909 	int ret;
910 
911 	if (cmd->type != DYNEVENT_TYPE_SYNTH)
912 		return -EINVAL;
913 
914 	if (!type || !name)
915 		return -EINVAL;
916 
917 	dynevent_arg_pair_init(&arg_pair, 0, ';');
918 
919 	arg_pair.lhs = type;
920 	arg_pair.rhs = name;
921 
922 	ret = dynevent_arg_pair_add(cmd, &arg_pair, synth_event_check_arg_fn);
923 	if (ret)
924 		return ret;
925 
926 	if (++cmd->n_fields > SYNTH_FIELDS_MAX)
927 		ret = -EINVAL;
928 
929 	return ret;
930 }
931 EXPORT_SYMBOL_GPL(synth_event_add_field);
932 
933 /**
934  * synth_event_add_field_str - Add a new field to a synthetic event cmd
935  * @cmd: A pointer to the dynevent_cmd struct representing the new event
936  * @type_name: The type and name of the new field to add, as a single string
937  *
938  * Add a new field to a synthetic event cmd object, as a single
939  * string.  The @type_name string is expected to be of the form 'type
940  * name', which will be appended by ';'.  No sanity checking is done -
941  * what's passed in is assumed to already be well-formed.  Field
942  * ordering is in the same order the fields are added.
943  *
944  * See synth_field_size() for available types. If field_name contains
945  * [n] the field is considered to be an array.
946  *
947  * Return: 0 if successful, error otherwise.
948  */
949 int synth_event_add_field_str(struct dynevent_cmd *cmd, const char *type_name)
950 {
951 	struct dynevent_arg arg;
952 	int ret;
953 
954 	if (cmd->type != DYNEVENT_TYPE_SYNTH)
955 		return -EINVAL;
956 
957 	if (!type_name)
958 		return -EINVAL;
959 
960 	dynevent_arg_init(&arg, ';');
961 
962 	arg.str = type_name;
963 
964 	ret = dynevent_arg_add(cmd, &arg, NULL);
965 	if (ret)
966 		return ret;
967 
968 	if (++cmd->n_fields > SYNTH_FIELDS_MAX)
969 		ret = -EINVAL;
970 
971 	return ret;
972 }
973 EXPORT_SYMBOL_GPL(synth_event_add_field_str);
974 
975 /**
976  * synth_event_add_fields - Add multiple fields to a synthetic event cmd
977  * @cmd: A pointer to the dynevent_cmd struct representing the new event
978  * @fields: An array of type/name field descriptions
979  * @n_fields: The number of field descriptions contained in the fields array
980  *
981  * Add a new set of fields to a synthetic event cmd object.  The event
982  * fields that will be defined for the event should be passed in as an
983  * array of struct synth_field_desc, and the number of elements in the
984  * array passed in as n_fields.  Field ordering will retain the
985  * ordering given in the fields array.
986  *
987  * See synth_field_size() for available types. If field_name contains
988  * [n] the field is considered to be an array.
989  *
990  * Return: 0 if successful, error otherwise.
991  */
992 int synth_event_add_fields(struct dynevent_cmd *cmd,
993 			   struct synth_field_desc *fields,
994 			   unsigned int n_fields)
995 {
996 	unsigned int i;
997 	int ret = 0;
998 
999 	for (i = 0; i < n_fields; i++) {
1000 		if (fields[i].type == NULL || fields[i].name == NULL) {
1001 			ret = -EINVAL;
1002 			break;
1003 		}
1004 
1005 		ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
1006 		if (ret)
1007 			break;
1008 	}
1009 
1010 	return ret;
1011 }
1012 EXPORT_SYMBOL_GPL(synth_event_add_fields);
1013 
1014 /**
1015  * __synth_event_gen_cmd_start - Start a synthetic event command from arg list
1016  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1017  * @name: The name of the synthetic event
1018  * @mod: The module creating the event, NULL if not created from a module
1019  * @args: Variable number of arg (pairs), one pair for each field
1020  *
1021  * NOTE: Users normally won't want to call this function directly, but
1022  * rather use the synth_event_gen_cmd_start() wrapper, which
1023  * automatically adds a NULL to the end of the arg list.  If this
1024  * function is used directly, make sure the last arg in the variable
1025  * arg list is NULL.
1026  *
1027  * Generate a synthetic event command to be executed by
1028  * synth_event_gen_cmd_end().  This function can be used to generate
1029  * the complete command or only the first part of it; in the latter
1030  * case, synth_event_add_field(), synth_event_add_field_str(), or
1031  * synth_event_add_fields() can be used to add more fields following
1032  * this.
1033  *
1034  * There should be an even number variable args, each pair consisting
1035  * of a type followed by a field name.
1036  *
1037  * See synth_field_size() for available types. If field_name contains
1038  * [n] the field is considered to be an array.
1039  *
1040  * Return: 0 if successful, error otherwise.
1041  */
1042 int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd, const char *name,
1043 				struct module *mod, ...)
1044 {
1045 	struct dynevent_arg arg;
1046 	va_list args;
1047 	int ret;
1048 
1049 	cmd->event_name = name;
1050 	cmd->private_data = mod;
1051 
1052 	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1053 		return -EINVAL;
1054 
1055 	dynevent_arg_init(&arg, 0);
1056 	arg.str = name;
1057 	ret = dynevent_arg_add(cmd, &arg, NULL);
1058 	if (ret)
1059 		return ret;
1060 
1061 	va_start(args, mod);
1062 	for (;;) {
1063 		const char *type, *name;
1064 
1065 		type = va_arg(args, const char *);
1066 		if (!type)
1067 			break;
1068 		name = va_arg(args, const char *);
1069 		if (!name)
1070 			break;
1071 
1072 		if (++cmd->n_fields > SYNTH_FIELDS_MAX) {
1073 			ret = -EINVAL;
1074 			break;
1075 		}
1076 
1077 		ret = synth_event_add_field(cmd, type, name);
1078 		if (ret)
1079 			break;
1080 	}
1081 	va_end(args);
1082 
1083 	return ret;
1084 }
1085 EXPORT_SYMBOL_GPL(__synth_event_gen_cmd_start);
1086 
1087 /**
1088  * synth_event_gen_cmd_array_start - Start synthetic event command from an array
1089  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1090  * @name: The name of the synthetic event
1091  * @fields: An array of type/name field descriptions
1092  * @n_fields: The number of field descriptions contained in the fields array
1093  *
1094  * Generate a synthetic event command to be executed by
1095  * synth_event_gen_cmd_end().  This function can be used to generate
1096  * the complete command or only the first part of it; in the latter
1097  * case, synth_event_add_field(), synth_event_add_field_str(), or
1098  * synth_event_add_fields() can be used to add more fields following
1099  * this.
1100  *
1101  * The event fields that will be defined for the event should be
1102  * passed in as an array of struct synth_field_desc, and the number of
1103  * elements in the array passed in as n_fields.  Field ordering will
1104  * retain the ordering given in the fields array.
1105  *
1106  * See synth_field_size() for available types. If field_name contains
1107  * [n] the field is considered to be an array.
1108  *
1109  * Return: 0 if successful, error otherwise.
1110  */
1111 int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd, const char *name,
1112 				    struct module *mod,
1113 				    struct synth_field_desc *fields,
1114 				    unsigned int n_fields)
1115 {
1116 	struct dynevent_arg arg;
1117 	unsigned int i;
1118 	int ret = 0;
1119 
1120 	cmd->event_name = name;
1121 	cmd->private_data = mod;
1122 
1123 	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1124 		return -EINVAL;
1125 
1126 	if (n_fields > SYNTH_FIELDS_MAX)
1127 		return -EINVAL;
1128 
1129 	dynevent_arg_init(&arg, 0);
1130 	arg.str = name;
1131 	ret = dynevent_arg_add(cmd, &arg, NULL);
1132 	if (ret)
1133 		return ret;
1134 
1135 	for (i = 0; i < n_fields; i++) {
1136 		if (fields[i].type == NULL || fields[i].name == NULL)
1137 			return -EINVAL;
1138 
1139 		ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
1140 		if (ret)
1141 			break;
1142 	}
1143 
1144 	return ret;
1145 }
1146 EXPORT_SYMBOL_GPL(synth_event_gen_cmd_array_start);
1147 
1148 static int save_cmdstr(int argc, const char *name, const char **argv)
1149 {
1150 	struct seq_buf s;
1151 	char *buf;
1152 	int i;
1153 
1154 	buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
1155 	if (!buf)
1156 		return -ENOMEM;
1157 
1158 	seq_buf_init(&s, buf, MAX_DYNEVENT_CMD_LEN);
1159 
1160 	seq_buf_puts(&s, name);
1161 
1162 	for (i = 0; i < argc; i++) {
1163 		seq_buf_putc(&s, ' ');
1164 		seq_buf_puts(&s, argv[i]);
1165 	}
1166 
1167 	if (!seq_buf_buffer_left(&s)) {
1168 		synth_err(SYNTH_ERR_CMD_TOO_LONG, 0);
1169 		kfree(buf);
1170 		return -EINVAL;
1171 	}
1172 	buf[s.len] = 0;
1173 	last_cmd_set(buf);
1174 
1175 	kfree(buf);
1176 	return 0;
1177 }
1178 
1179 static int __create_synth_event(int argc, const char *name, const char **argv)
1180 {
1181 	struct synth_field *field, *fields[SYNTH_FIELDS_MAX];
1182 	struct synth_event *event = NULL;
1183 	int i, consumed = 0, n_fields = 0, ret = 0;
1184 
1185 	ret = save_cmdstr(argc, name, argv);
1186 	if (ret)
1187 		return ret;
1188 
1189 	/*
1190 	 * Argument syntax:
1191 	 *  - Add synthetic event: <event_name> field[;field] ...
1192 	 *  - Remove synthetic event: !<event_name> field[;field] ...
1193 	 *      where 'field' = type field_name
1194 	 */
1195 
1196 	if (name[0] == '\0' || argc < 1) {
1197 		synth_err(SYNTH_ERR_CMD_INCOMPLETE, 0);
1198 		return -EINVAL;
1199 	}
1200 
1201 	mutex_lock(&event_mutex);
1202 
1203 	if (!is_good_name(name)) {
1204 		synth_err(SYNTH_ERR_BAD_NAME, errpos(name));
1205 		ret = -EINVAL;
1206 		goto out;
1207 	}
1208 
1209 	event = find_synth_event(name);
1210 	if (event) {
1211 		synth_err(SYNTH_ERR_EVENT_EXISTS, errpos(name));
1212 		ret = -EEXIST;
1213 		goto out;
1214 	}
1215 
1216 	for (i = 0; i < argc - 1; i++) {
1217 		if (strcmp(argv[i], ";") == 0)
1218 			continue;
1219 		if (n_fields == SYNTH_FIELDS_MAX) {
1220 			synth_err(SYNTH_ERR_TOO_MANY_FIELDS, 0);
1221 			ret = -EINVAL;
1222 			goto err;
1223 		}
1224 
1225 		field = parse_synth_field(argc - i, &argv[i], &consumed);
1226 		if (IS_ERR(field)) {
1227 			ret = PTR_ERR(field);
1228 			goto err;
1229 		}
1230 		fields[n_fields++] = field;
1231 		i += consumed - 1;
1232 	}
1233 
1234 	if (i < argc && strcmp(argv[i], ";") != 0) {
1235 		synth_err(SYNTH_ERR_INVALID_FIELD, errpos(argv[i]));
1236 		ret = -EINVAL;
1237 		goto err;
1238 	}
1239 
1240 	event = alloc_synth_event(name, n_fields, fields);
1241 	if (IS_ERR(event)) {
1242 		ret = PTR_ERR(event);
1243 		event = NULL;
1244 		goto err;
1245 	}
1246 	ret = register_synth_event(event);
1247 	if (!ret)
1248 		dyn_event_add(&event->devent);
1249 	else
1250 		free_synth_event(event);
1251  out:
1252 	mutex_unlock(&event_mutex);
1253 
1254 	return ret;
1255  err:
1256 	for (i = 0; i < n_fields; i++)
1257 		free_synth_field(fields[i]);
1258 
1259 	goto out;
1260 }
1261 
1262 /**
1263  * synth_event_create - Create a new synthetic event
1264  * @name: The name of the new sythetic event
1265  * @fields: An array of type/name field descriptions
1266  * @n_fields: The number of field descriptions contained in the fields array
1267  * @mod: The module creating the event, NULL if not created from a module
1268  *
1269  * Create a new synthetic event with the given name under the
1270  * trace/events/synthetic/ directory.  The event fields that will be
1271  * defined for the event should be passed in as an array of struct
1272  * synth_field_desc, and the number elements in the array passed in as
1273  * n_fields. Field ordering will retain the ordering given in the
1274  * fields array.
1275  *
1276  * If the new synthetic event is being created from a module, the mod
1277  * param must be non-NULL.  This will ensure that the trace buffer
1278  * won't contain unreadable events.
1279  *
1280  * The new synth event should be deleted using synth_event_delete()
1281  * function.  The new synthetic event can be generated from modules or
1282  * other kernel code using trace_synth_event() and related functions.
1283  *
1284  * Return: 0 if successful, error otherwise.
1285  */
1286 int synth_event_create(const char *name, struct synth_field_desc *fields,
1287 		       unsigned int n_fields, struct module *mod)
1288 {
1289 	struct dynevent_cmd cmd;
1290 	char *buf;
1291 	int ret;
1292 
1293 	buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
1294 	if (!buf)
1295 		return -ENOMEM;
1296 
1297 	synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
1298 
1299 	ret = synth_event_gen_cmd_array_start(&cmd, name, mod,
1300 					      fields, n_fields);
1301 	if (ret)
1302 		goto out;
1303 
1304 	ret = synth_event_gen_cmd_end(&cmd);
1305  out:
1306 	kfree(buf);
1307 
1308 	return ret;
1309 }
1310 EXPORT_SYMBOL_GPL(synth_event_create);
1311 
1312 static int destroy_synth_event(struct synth_event *se)
1313 {
1314 	int ret;
1315 
1316 	if (se->ref)
1317 		ret = -EBUSY;
1318 	else {
1319 		ret = unregister_synth_event(se);
1320 		if (!ret) {
1321 			dyn_event_remove(&se->devent);
1322 			free_synth_event(se);
1323 		}
1324 	}
1325 
1326 	return ret;
1327 }
1328 
1329 /**
1330  * synth_event_delete - Delete a synthetic event
1331  * @event_name: The name of the new sythetic event
1332  *
1333  * Delete a synthetic event that was created with synth_event_create().
1334  *
1335  * Return: 0 if successful, error otherwise.
1336  */
1337 int synth_event_delete(const char *event_name)
1338 {
1339 	struct synth_event *se = NULL;
1340 	struct module *mod = NULL;
1341 	int ret = -ENOENT;
1342 
1343 	mutex_lock(&event_mutex);
1344 	se = find_synth_event(event_name);
1345 	if (se) {
1346 		mod = se->mod;
1347 		ret = destroy_synth_event(se);
1348 	}
1349 	mutex_unlock(&event_mutex);
1350 
1351 	if (mod) {
1352 		mutex_lock(&trace_types_lock);
1353 		/*
1354 		 * It is safest to reset the ring buffer if the module
1355 		 * being unloaded registered any events that were
1356 		 * used. The only worry is if a new module gets
1357 		 * loaded, and takes on the same id as the events of
1358 		 * this module. When printing out the buffer, traced
1359 		 * events left over from this module may be passed to
1360 		 * the new module events and unexpected results may
1361 		 * occur.
1362 		 */
1363 		tracing_reset_all_online_cpus();
1364 		mutex_unlock(&trace_types_lock);
1365 	}
1366 
1367 	return ret;
1368 }
1369 EXPORT_SYMBOL_GPL(synth_event_delete);
1370 
1371 static int create_or_delete_synth_event(int argc, char **argv)
1372 {
1373 	const char *name = argv[0];
1374 	int ret;
1375 
1376 	/* trace_run_command() ensures argc != 0 */
1377 	if (name[0] == '!') {
1378 		ret = synth_event_delete(name + 1);
1379 		return ret;
1380 	}
1381 
1382 	ret = __create_synth_event(argc - 1, name, (const char **)argv + 1);
1383 	return ret == -ECANCELED ? -EINVAL : ret;
1384 }
1385 
1386 static int synth_event_run_command(struct dynevent_cmd *cmd)
1387 {
1388 	struct synth_event *se;
1389 	int ret;
1390 
1391 	ret = trace_run_command(cmd->seq.buffer, create_or_delete_synth_event);
1392 	if (ret)
1393 		return ret;
1394 
1395 	se = find_synth_event(cmd->event_name);
1396 	if (WARN_ON(!se))
1397 		return -ENOENT;
1398 
1399 	se->mod = cmd->private_data;
1400 
1401 	return ret;
1402 }
1403 
1404 /**
1405  * synth_event_cmd_init - Initialize a synthetic event command object
1406  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1407  * @buf: A pointer to the buffer used to build the command
1408  * @maxlen: The length of the buffer passed in @buf
1409  *
1410  * Initialize a synthetic event command object.  Use this before
1411  * calling any of the other dyenvent_cmd functions.
1412  */
1413 void synth_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
1414 {
1415 	dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_SYNTH,
1416 			  synth_event_run_command);
1417 }
1418 EXPORT_SYMBOL_GPL(synth_event_cmd_init);
1419 
1420 static inline int
1421 __synth_event_trace_init(struct trace_event_file *file,
1422 			 struct synth_event_trace_state *trace_state)
1423 {
1424 	int ret = 0;
1425 
1426 	memset(trace_state, '\0', sizeof(*trace_state));
1427 
1428 	/*
1429 	 * Normal event tracing doesn't get called at all unless the
1430 	 * ENABLED bit is set (which attaches the probe thus allowing
1431 	 * this code to be called, etc).  Because this is called
1432 	 * directly by the user, we don't have that but we still need
1433 	 * to honor not logging when disabled.  For the iterated
1434 	 * trace case, we save the enabed state upon start and just
1435 	 * ignore the following data calls.
1436 	 */
1437 	if (!(file->flags & EVENT_FILE_FL_ENABLED) ||
1438 	    trace_trigger_soft_disabled(file)) {
1439 		trace_state->disabled = true;
1440 		ret = -ENOENT;
1441 		goto out;
1442 	}
1443 
1444 	trace_state->event = file->event_call->data;
1445 out:
1446 	return ret;
1447 }
1448 
1449 static inline int
1450 __synth_event_trace_start(struct trace_event_file *file,
1451 			  struct synth_event_trace_state *trace_state,
1452 			  int dynamic_fields_size)
1453 {
1454 	int entry_size, fields_size = 0;
1455 	int ret = 0;
1456 
1457 	fields_size = trace_state->event->n_u64 * sizeof(u64);
1458 	fields_size += dynamic_fields_size;
1459 
1460 	/*
1461 	 * Avoid ring buffer recursion detection, as this event
1462 	 * is being performed within another event.
1463 	 */
1464 	trace_state->buffer = file->tr->array_buffer.buffer;
1465 	ring_buffer_nest_start(trace_state->buffer);
1466 
1467 	entry_size = sizeof(*trace_state->entry) + fields_size;
1468 	trace_state->entry = trace_event_buffer_reserve(&trace_state->fbuffer,
1469 							file,
1470 							entry_size);
1471 	if (!trace_state->entry) {
1472 		ring_buffer_nest_end(trace_state->buffer);
1473 		ret = -EINVAL;
1474 	}
1475 
1476 	return ret;
1477 }
1478 
1479 static inline void
1480 __synth_event_trace_end(struct synth_event_trace_state *trace_state)
1481 {
1482 	trace_event_buffer_commit(&trace_state->fbuffer);
1483 
1484 	ring_buffer_nest_end(trace_state->buffer);
1485 }
1486 
1487 /**
1488  * synth_event_trace - Trace a synthetic event
1489  * @file: The trace_event_file representing the synthetic event
1490  * @n_vals: The number of values in vals
1491  * @args: Variable number of args containing the event values
1492  *
1493  * Trace a synthetic event using the values passed in the variable
1494  * argument list.
1495  *
1496  * The argument list should be a list 'n_vals' u64 values.  The number
1497  * of vals must match the number of field in the synthetic event, and
1498  * must be in the same order as the synthetic event fields.
1499  *
1500  * All vals should be cast to u64, and string vals are just pointers
1501  * to strings, cast to u64.  Strings will be copied into space
1502  * reserved in the event for the string, using these pointers.
1503  *
1504  * Return: 0 on success, err otherwise.
1505  */
1506 int synth_event_trace(struct trace_event_file *file, unsigned int n_vals, ...)
1507 {
1508 	unsigned int i, n_u64, len, data_size = 0;
1509 	struct synth_event_trace_state state;
1510 	va_list args;
1511 	int ret;
1512 
1513 	ret = __synth_event_trace_init(file, &state);
1514 	if (ret) {
1515 		if (ret == -ENOENT)
1516 			ret = 0; /* just disabled, not really an error */
1517 		return ret;
1518 	}
1519 
1520 	if (state.event->n_dynamic_fields) {
1521 		va_start(args, n_vals);
1522 
1523 		for (i = 0; i < state.event->n_fields; i++) {
1524 			u64 val = va_arg(args, u64);
1525 
1526 			if (state.event->fields[i]->is_string &&
1527 			    state.event->fields[i]->is_dynamic) {
1528 				char *str_val = (char *)(long)val;
1529 
1530 				data_size += strlen(str_val) + 1;
1531 			}
1532 		}
1533 
1534 		va_end(args);
1535 	}
1536 
1537 	ret = __synth_event_trace_start(file, &state, data_size);
1538 	if (ret)
1539 		return ret;
1540 
1541 	if (n_vals != state.event->n_fields) {
1542 		ret = -EINVAL;
1543 		goto out;
1544 	}
1545 
1546 	data_size = 0;
1547 
1548 	va_start(args, n_vals);
1549 	for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
1550 		u64 val;
1551 
1552 		val = va_arg(args, u64);
1553 
1554 		if (state.event->fields[i]->is_string) {
1555 			char *str_val = (char *)(long)val;
1556 
1557 			len = trace_string(state.entry, state.event, str_val,
1558 					   state.event->fields[i]->is_dynamic,
1559 					   data_size, &n_u64);
1560 			data_size += len; /* only dynamic string increments */
1561 		} else {
1562 			struct synth_field *field = state.event->fields[i];
1563 
1564 			switch (field->size) {
1565 			case 1:
1566 				*(u8 *)&state.entry->fields[n_u64] = (u8)val;
1567 				break;
1568 
1569 			case 2:
1570 				*(u16 *)&state.entry->fields[n_u64] = (u16)val;
1571 				break;
1572 
1573 			case 4:
1574 				*(u32 *)&state.entry->fields[n_u64] = (u32)val;
1575 				break;
1576 
1577 			default:
1578 				state.entry->fields[n_u64] = val;
1579 				break;
1580 			}
1581 			n_u64++;
1582 		}
1583 	}
1584 	va_end(args);
1585 out:
1586 	__synth_event_trace_end(&state);
1587 
1588 	return ret;
1589 }
1590 EXPORT_SYMBOL_GPL(synth_event_trace);
1591 
1592 /**
1593  * synth_event_trace_array - Trace a synthetic event from an array
1594  * @file: The trace_event_file representing the synthetic event
1595  * @vals: Array of values
1596  * @n_vals: The number of values in vals
1597  *
1598  * Trace a synthetic event using the values passed in as 'vals'.
1599  *
1600  * The 'vals' array is just an array of 'n_vals' u64.  The number of
1601  * vals must match the number of field in the synthetic event, and
1602  * must be in the same order as the synthetic event fields.
1603  *
1604  * All vals should be cast to u64, and string vals are just pointers
1605  * to strings, cast to u64.  Strings will be copied into space
1606  * reserved in the event for the string, using these pointers.
1607  *
1608  * Return: 0 on success, err otherwise.
1609  */
1610 int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
1611 			    unsigned int n_vals)
1612 {
1613 	unsigned int i, n_u64, field_pos, len, data_size = 0;
1614 	struct synth_event_trace_state state;
1615 	char *str_val;
1616 	int ret;
1617 
1618 	ret = __synth_event_trace_init(file, &state);
1619 	if (ret) {
1620 		if (ret == -ENOENT)
1621 			ret = 0; /* just disabled, not really an error */
1622 		return ret;
1623 	}
1624 
1625 	if (state.event->n_dynamic_fields) {
1626 		for (i = 0; i < state.event->n_dynamic_fields; i++) {
1627 			field_pos = state.event->dynamic_fields[i]->field_pos;
1628 			str_val = (char *)(long)vals[field_pos];
1629 			len = strlen(str_val) + 1;
1630 			data_size += len;
1631 		}
1632 	}
1633 
1634 	ret = __synth_event_trace_start(file, &state, data_size);
1635 	if (ret)
1636 		return ret;
1637 
1638 	if (n_vals != state.event->n_fields) {
1639 		ret = -EINVAL;
1640 		goto out;
1641 	}
1642 
1643 	data_size = 0;
1644 
1645 	for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
1646 		if (state.event->fields[i]->is_string) {
1647 			char *str_val = (char *)(long)vals[i];
1648 
1649 			len = trace_string(state.entry, state.event, str_val,
1650 					   state.event->fields[i]->is_dynamic,
1651 					   data_size, &n_u64);
1652 			data_size += len; /* only dynamic string increments */
1653 		} else {
1654 			struct synth_field *field = state.event->fields[i];
1655 			u64 val = vals[i];
1656 
1657 			switch (field->size) {
1658 			case 1:
1659 				*(u8 *)&state.entry->fields[n_u64] = (u8)val;
1660 				break;
1661 
1662 			case 2:
1663 				*(u16 *)&state.entry->fields[n_u64] = (u16)val;
1664 				break;
1665 
1666 			case 4:
1667 				*(u32 *)&state.entry->fields[n_u64] = (u32)val;
1668 				break;
1669 
1670 			default:
1671 				state.entry->fields[n_u64] = val;
1672 				break;
1673 			}
1674 			n_u64++;
1675 		}
1676 	}
1677 out:
1678 	__synth_event_trace_end(&state);
1679 
1680 	return ret;
1681 }
1682 EXPORT_SYMBOL_GPL(synth_event_trace_array);
1683 
1684 /**
1685  * synth_event_trace_start - Start piecewise synthetic event trace
1686  * @file: The trace_event_file representing the synthetic event
1687  * @trace_state: A pointer to object tracking the piecewise trace state
1688  *
1689  * Start the trace of a synthetic event field-by-field rather than all
1690  * at once.
1691  *
1692  * This function 'opens' an event trace, which means space is reserved
1693  * for the event in the trace buffer, after which the event's
1694  * individual field values can be set through either
1695  * synth_event_add_next_val() or synth_event_add_val().
1696  *
1697  * A pointer to a trace_state object is passed in, which will keep
1698  * track of the current event trace state until the event trace is
1699  * closed (and the event finally traced) using
1700  * synth_event_trace_end().
1701  *
1702  * Note that synth_event_trace_end() must be called after all values
1703  * have been added for each event trace, regardless of whether adding
1704  * all field values succeeded or not.
1705  *
1706  * Note also that for a given event trace, all fields must be added
1707  * using either synth_event_add_next_val() or synth_event_add_val()
1708  * but not both together or interleaved.
1709  *
1710  * Return: 0 on success, err otherwise.
1711  */
1712 int synth_event_trace_start(struct trace_event_file *file,
1713 			    struct synth_event_trace_state *trace_state)
1714 {
1715 	int ret;
1716 
1717 	if (!trace_state)
1718 		return -EINVAL;
1719 
1720 	ret = __synth_event_trace_init(file, trace_state);
1721 	if (ret) {
1722 		if (ret == -ENOENT)
1723 			ret = 0; /* just disabled, not really an error */
1724 		return ret;
1725 	}
1726 
1727 	if (trace_state->event->n_dynamic_fields)
1728 		return -ENOTSUPP;
1729 
1730 	ret = __synth_event_trace_start(file, trace_state, 0);
1731 
1732 	return ret;
1733 }
1734 EXPORT_SYMBOL_GPL(synth_event_trace_start);
1735 
1736 static int __synth_event_add_val(const char *field_name, u64 val,
1737 				 struct synth_event_trace_state *trace_state)
1738 {
1739 	struct synth_field *field = NULL;
1740 	struct synth_trace_event *entry;
1741 	struct synth_event *event;
1742 	int i, ret = 0;
1743 
1744 	if (!trace_state) {
1745 		ret = -EINVAL;
1746 		goto out;
1747 	}
1748 
1749 	/* can't mix add_next_synth_val() with add_synth_val() */
1750 	if (field_name) {
1751 		if (trace_state->add_next) {
1752 			ret = -EINVAL;
1753 			goto out;
1754 		}
1755 		trace_state->add_name = true;
1756 	} else {
1757 		if (trace_state->add_name) {
1758 			ret = -EINVAL;
1759 			goto out;
1760 		}
1761 		trace_state->add_next = true;
1762 	}
1763 
1764 	if (trace_state->disabled)
1765 		goto out;
1766 
1767 	event = trace_state->event;
1768 	if (trace_state->add_name) {
1769 		for (i = 0; i < event->n_fields; i++) {
1770 			field = event->fields[i];
1771 			if (strcmp(field->name, field_name) == 0)
1772 				break;
1773 		}
1774 		if (!field) {
1775 			ret = -EINVAL;
1776 			goto out;
1777 		}
1778 	} else {
1779 		if (trace_state->cur_field >= event->n_fields) {
1780 			ret = -EINVAL;
1781 			goto out;
1782 		}
1783 		field = event->fields[trace_state->cur_field++];
1784 	}
1785 
1786 	entry = trace_state->entry;
1787 	if (field->is_string) {
1788 		char *str_val = (char *)(long)val;
1789 		char *str_field;
1790 
1791 		if (field->is_dynamic) { /* add_val can't do dynamic strings */
1792 			ret = -EINVAL;
1793 			goto out;
1794 		}
1795 
1796 		if (!str_val) {
1797 			ret = -EINVAL;
1798 			goto out;
1799 		}
1800 
1801 		str_field = (char *)&entry->fields[field->offset];
1802 		strscpy(str_field, str_val, STR_VAR_LEN_MAX);
1803 	} else {
1804 		switch (field->size) {
1805 		case 1:
1806 			*(u8 *)&trace_state->entry->fields[field->offset] = (u8)val;
1807 			break;
1808 
1809 		case 2:
1810 			*(u16 *)&trace_state->entry->fields[field->offset] = (u16)val;
1811 			break;
1812 
1813 		case 4:
1814 			*(u32 *)&trace_state->entry->fields[field->offset] = (u32)val;
1815 			break;
1816 
1817 		default:
1818 			trace_state->entry->fields[field->offset] = val;
1819 			break;
1820 		}
1821 	}
1822  out:
1823 	return ret;
1824 }
1825 
1826 /**
1827  * synth_event_add_next_val - Add the next field's value to an open synth trace
1828  * @val: The value to set the next field to
1829  * @trace_state: A pointer to object tracking the piecewise trace state
1830  *
1831  * Set the value of the next field in an event that's been opened by
1832  * synth_event_trace_start().
1833  *
1834  * The val param should be the value cast to u64.  If the value points
1835  * to a string, the val param should be a char * cast to u64.
1836  *
1837  * This function assumes all the fields in an event are to be set one
1838  * after another - successive calls to this function are made, one for
1839  * each field, in the order of the fields in the event, until all
1840  * fields have been set.  If you'd rather set each field individually
1841  * without regard to ordering, synth_event_add_val() can be used
1842  * instead.
1843  *
1844  * Note however that synth_event_add_next_val() and
1845  * synth_event_add_val() can't be intermixed for a given event trace -
1846  * one or the other but not both can be used at the same time.
1847  *
1848  * Note also that synth_event_trace_end() must be called after all
1849  * values have been added for each event trace, regardless of whether
1850  * adding all field values succeeded or not.
1851  *
1852  * Return: 0 on success, err otherwise.
1853  */
1854 int synth_event_add_next_val(u64 val,
1855 			     struct synth_event_trace_state *trace_state)
1856 {
1857 	return __synth_event_add_val(NULL, val, trace_state);
1858 }
1859 EXPORT_SYMBOL_GPL(synth_event_add_next_val);
1860 
1861 /**
1862  * synth_event_add_val - Add a named field's value to an open synth trace
1863  * @field_name: The name of the synthetic event field value to set
1864  * @val: The value to set the next field to
1865  * @trace_state: A pointer to object tracking the piecewise trace state
1866  *
1867  * Set the value of the named field in an event that's been opened by
1868  * synth_event_trace_start().
1869  *
1870  * The val param should be the value cast to u64.  If the value points
1871  * to a string, the val param should be a char * cast to u64.
1872  *
1873  * This function looks up the field name, and if found, sets the field
1874  * to the specified value.  This lookup makes this function more
1875  * expensive than synth_event_add_next_val(), so use that or the
1876  * none-piecewise synth_event_trace() instead if efficiency is more
1877  * important.
1878  *
1879  * Note however that synth_event_add_next_val() and
1880  * synth_event_add_val() can't be intermixed for a given event trace -
1881  * one or the other but not both can be used at the same time.
1882  *
1883  * Note also that synth_event_trace_end() must be called after all
1884  * values have been added for each event trace, regardless of whether
1885  * adding all field values succeeded or not.
1886  *
1887  * Return: 0 on success, err otherwise.
1888  */
1889 int synth_event_add_val(const char *field_name, u64 val,
1890 			struct synth_event_trace_state *trace_state)
1891 {
1892 	return __synth_event_add_val(field_name, val, trace_state);
1893 }
1894 EXPORT_SYMBOL_GPL(synth_event_add_val);
1895 
1896 /**
1897  * synth_event_trace_end - End piecewise synthetic event trace
1898  * @trace_state: A pointer to object tracking the piecewise trace state
1899  *
1900  * End the trace of a synthetic event opened by
1901  * synth_event_trace__start().
1902  *
1903  * This function 'closes' an event trace, which basically means that
1904  * it commits the reserved event and cleans up other loose ends.
1905  *
1906  * A pointer to a trace_state object is passed in, which will keep
1907  * track of the current event trace state opened with
1908  * synth_event_trace_start().
1909  *
1910  * Note that this function must be called after all values have been
1911  * added for each event trace, regardless of whether adding all field
1912  * values succeeded or not.
1913  *
1914  * Return: 0 on success, err otherwise.
1915  */
1916 int synth_event_trace_end(struct synth_event_trace_state *trace_state)
1917 {
1918 	if (!trace_state)
1919 		return -EINVAL;
1920 
1921 	__synth_event_trace_end(trace_state);
1922 
1923 	return 0;
1924 }
1925 EXPORT_SYMBOL_GPL(synth_event_trace_end);
1926 
1927 static int create_synth_event(int argc, const char **argv)
1928 {
1929 	const char *name = argv[0];
1930 	int len;
1931 
1932 	if (name[0] != 's' || name[1] != ':')
1933 		return -ECANCELED;
1934 	name += 2;
1935 
1936 	/* This interface accepts group name prefix */
1937 	if (strchr(name, '/')) {
1938 		len = str_has_prefix(name, SYNTH_SYSTEM "/");
1939 		if (len == 0)
1940 			return -EINVAL;
1941 		name += len;
1942 	}
1943 	return __create_synth_event(argc - 1, name, argv + 1);
1944 }
1945 
1946 static int synth_event_release(struct dyn_event *ev)
1947 {
1948 	struct synth_event *event = to_synth_event(ev);
1949 	int ret;
1950 
1951 	if (event->ref)
1952 		return -EBUSY;
1953 
1954 	ret = unregister_synth_event(event);
1955 	if (ret)
1956 		return ret;
1957 
1958 	dyn_event_remove(ev);
1959 	free_synth_event(event);
1960 	return 0;
1961 }
1962 
1963 static int __synth_event_show(struct seq_file *m, struct synth_event *event)
1964 {
1965 	struct synth_field *field;
1966 	unsigned int i;
1967 	char *type, *t;
1968 
1969 	seq_printf(m, "%s\t", event->name);
1970 
1971 	for (i = 0; i < event->n_fields; i++) {
1972 		field = event->fields[i];
1973 
1974 		type = field->type;
1975 		t = strstr(type, "__data_loc");
1976 		if (t) { /* __data_loc belongs in format but not event desc */
1977 			t += sizeof("__data_loc");
1978 			type = t;
1979 		}
1980 
1981 		/* parameter values */
1982 		seq_printf(m, "%s %s%s", type, field->name,
1983 			   i == event->n_fields - 1 ? "" : "; ");
1984 	}
1985 
1986 	seq_putc(m, '\n');
1987 
1988 	return 0;
1989 }
1990 
1991 static int synth_event_show(struct seq_file *m, struct dyn_event *ev)
1992 {
1993 	struct synth_event *event = to_synth_event(ev);
1994 
1995 	seq_printf(m, "s:%s/", event->class.system);
1996 
1997 	return __synth_event_show(m, event);
1998 }
1999 
2000 static int synth_events_seq_show(struct seq_file *m, void *v)
2001 {
2002 	struct dyn_event *ev = v;
2003 
2004 	if (!is_synth_event(ev))
2005 		return 0;
2006 
2007 	return __synth_event_show(m, to_synth_event(ev));
2008 }
2009 
2010 static const struct seq_operations synth_events_seq_op = {
2011 	.start	= dyn_event_seq_start,
2012 	.next	= dyn_event_seq_next,
2013 	.stop	= dyn_event_seq_stop,
2014 	.show	= synth_events_seq_show,
2015 };
2016 
2017 static int synth_events_open(struct inode *inode, struct file *file)
2018 {
2019 	int ret;
2020 
2021 	ret = security_locked_down(LOCKDOWN_TRACEFS);
2022 	if (ret)
2023 		return ret;
2024 
2025 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
2026 		ret = dyn_events_release_all(&synth_event_ops);
2027 		if (ret < 0)
2028 			return ret;
2029 	}
2030 
2031 	return seq_open(file, &synth_events_seq_op);
2032 }
2033 
2034 static ssize_t synth_events_write(struct file *file,
2035 				  const char __user *buffer,
2036 				  size_t count, loff_t *ppos)
2037 {
2038 	return trace_parse_run_command(file, buffer, count, ppos,
2039 				       create_or_delete_synth_event);
2040 }
2041 
2042 static const struct file_operations synth_events_fops = {
2043 	.open           = synth_events_open,
2044 	.write		= synth_events_write,
2045 	.read           = seq_read,
2046 	.llseek         = seq_lseek,
2047 	.release        = seq_release,
2048 };
2049 
2050 /*
2051  * Register dynevent at core_initcall. This allows kernel to setup kprobe
2052  * events in postcore_initcall without tracefs.
2053  */
2054 static __init int trace_events_synth_init_early(void)
2055 {
2056 	int err = 0;
2057 
2058 	err = dyn_event_register(&synth_event_ops);
2059 	if (err)
2060 		pr_warn("Could not register synth_event_ops\n");
2061 
2062 	return err;
2063 }
2064 core_initcall(trace_events_synth_init_early);
2065 
2066 static __init int trace_events_synth_init(void)
2067 {
2068 	struct dentry *entry = NULL;
2069 	int err = 0;
2070 	err = tracing_init_dentry();
2071 	if (err)
2072 		goto err;
2073 
2074 	entry = tracefs_create_file("synthetic_events", 0644, NULL,
2075 				    NULL, &synth_events_fops);
2076 	if (!entry) {
2077 		err = -ENODEV;
2078 		goto err;
2079 	}
2080 
2081 	return err;
2082  err:
2083 	pr_warn("Could not create tracefs 'synthetic_events' entry\n");
2084 
2085 	return err;
2086 }
2087 
2088 fs_initcall(trace_events_synth_init);
2089