xref: /linux/kernel/trace/trace_events_synth.c (revision 79790b6818e96c58fe2bffee1b418c16e64e7b80)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * trace_events_synth - synthetic trace events
4  *
5  * Copyright (C) 2015, 2020 Tom Zanussi <tom.zanussi@linux.intel.com>
6  */
7 
8 #include <linux/module.h>
9 #include <linux/kallsyms.h>
10 #include <linux/security.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/stacktrace.h>
14 #include <linux/rculist.h>
15 #include <linux/tracefs.h>
16 
17 /* for gfp flag names */
18 #include <linux/trace_events.h>
19 #include <trace/events/mmflags.h>
20 #include "trace_probe.h"
21 #include "trace_probe_kernel.h"
22 
23 #include "trace_synth.h"
24 
25 #undef ERRORS
26 #define ERRORS	\
27 	C(BAD_NAME,		"Illegal name"),		\
28 	C(INVALID_CMD,		"Command must be of the form: <name> field[;field] ..."),\
29 	C(INVALID_DYN_CMD,	"Command must be of the form: s or -:[synthetic/]<name> field[;field] ..."),\
30 	C(EVENT_EXISTS,		"Event already exists"),	\
31 	C(TOO_MANY_FIELDS,	"Too many fields"),		\
32 	C(INCOMPLETE_TYPE,	"Incomplete type"),		\
33 	C(INVALID_TYPE,		"Invalid type"),		\
34 	C(INVALID_FIELD,        "Invalid field"),		\
35 	C(INVALID_ARRAY_SPEC,	"Invalid array specification"),
36 
37 #undef C
38 #define C(a, b)		SYNTH_ERR_##a
39 
40 enum { ERRORS };
41 
42 #undef C
43 #define C(a, b)		b
44 
45 static const char *err_text[] = { ERRORS };
46 
47 static DEFINE_MUTEX(lastcmd_mutex);
48 static char *last_cmd;
49 
errpos(const char * str)50 static int errpos(const char *str)
51 {
52 	int ret = 0;
53 
54 	mutex_lock(&lastcmd_mutex);
55 	if (!str || !last_cmd)
56 		goto out;
57 
58 	ret = err_pos(last_cmd, str);
59  out:
60 	mutex_unlock(&lastcmd_mutex);
61 	return ret;
62 }
63 
last_cmd_set(const char * str)64 static void last_cmd_set(const char *str)
65 {
66 	if (!str)
67 		return;
68 
69 	mutex_lock(&lastcmd_mutex);
70 	kfree(last_cmd);
71 	last_cmd = kstrdup(str, GFP_KERNEL);
72 	mutex_unlock(&lastcmd_mutex);
73 }
74 
synth_err(u8 err_type,u16 err_pos)75 static void synth_err(u8 err_type, u16 err_pos)
76 {
77 	mutex_lock(&lastcmd_mutex);
78 	if (!last_cmd)
79 		goto out;
80 
81 	tracing_log_err(NULL, "synthetic_events", last_cmd, err_text,
82 			err_type, err_pos);
83  out:
84 	mutex_unlock(&lastcmd_mutex);
85 }
86 
87 static int create_synth_event(const char *raw_command);
88 static int synth_event_show(struct seq_file *m, struct dyn_event *ev);
89 static int synth_event_release(struct dyn_event *ev);
90 static bool synth_event_is_busy(struct dyn_event *ev);
91 static bool synth_event_match(const char *system, const char *event,
92 			int argc, const char **argv, struct dyn_event *ev);
93 
94 static struct dyn_event_operations synth_event_ops = {
95 	.create = create_synth_event,
96 	.show = synth_event_show,
97 	.is_busy = synth_event_is_busy,
98 	.free = synth_event_release,
99 	.match = synth_event_match,
100 };
101 
is_synth_event(struct dyn_event * ev)102 static bool is_synth_event(struct dyn_event *ev)
103 {
104 	return ev->ops == &synth_event_ops;
105 }
106 
to_synth_event(struct dyn_event * ev)107 static struct synth_event *to_synth_event(struct dyn_event *ev)
108 {
109 	return container_of(ev, struct synth_event, devent);
110 }
111 
synth_event_is_busy(struct dyn_event * ev)112 static bool synth_event_is_busy(struct dyn_event *ev)
113 {
114 	struct synth_event *event = to_synth_event(ev);
115 
116 	return event->ref != 0;
117 }
118 
synth_event_match(const char * system,const char * event,int argc,const char ** argv,struct dyn_event * ev)119 static bool synth_event_match(const char *system, const char *event,
120 			int argc, const char **argv, struct dyn_event *ev)
121 {
122 	struct synth_event *sev = to_synth_event(ev);
123 
124 	return strcmp(sev->name, event) == 0 &&
125 		(!system || strcmp(system, SYNTH_SYSTEM) == 0);
126 }
127 
128 struct synth_trace_event {
129 	struct trace_entry	ent;
130 	union trace_synth_field	fields[];
131 };
132 
synth_event_define_fields(struct trace_event_call * call)133 static int synth_event_define_fields(struct trace_event_call *call)
134 {
135 	struct synth_trace_event trace;
136 	int offset = offsetof(typeof(trace), fields);
137 	struct synth_event *event = call->data;
138 	unsigned int i, size, n_u64;
139 	char *name, *type;
140 	bool is_signed;
141 	int ret = 0;
142 
143 	for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
144 		size = event->fields[i]->size;
145 		is_signed = event->fields[i]->is_signed;
146 		type = event->fields[i]->type;
147 		name = event->fields[i]->name;
148 		ret = trace_define_field(call, type, name, offset, size,
149 					 is_signed, FILTER_OTHER);
150 		if (ret)
151 			break;
152 
153 		event->fields[i]->offset = n_u64;
154 
155 		if (event->fields[i]->is_string && !event->fields[i]->is_dynamic) {
156 			offset += STR_VAR_LEN_MAX;
157 			n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
158 		} else {
159 			offset += sizeof(u64);
160 			n_u64++;
161 		}
162 	}
163 
164 	event->n_u64 = n_u64;
165 
166 	return ret;
167 }
168 
synth_field_signed(char * type)169 static bool synth_field_signed(char *type)
170 {
171 	if (str_has_prefix(type, "u"))
172 		return false;
173 	if (strcmp(type, "gfp_t") == 0)
174 		return false;
175 
176 	return true;
177 }
178 
synth_field_is_string(char * type)179 static int synth_field_is_string(char *type)
180 {
181 	if (strstr(type, "char[") != NULL)
182 		return true;
183 
184 	return false;
185 }
186 
synth_field_is_stack(char * type)187 static int synth_field_is_stack(char *type)
188 {
189 	if (strstr(type, "long[") != NULL)
190 		return true;
191 
192 	return false;
193 }
194 
synth_field_string_size(char * type)195 static int synth_field_string_size(char *type)
196 {
197 	char buf[4], *end, *start;
198 	unsigned int len;
199 	int size, err;
200 
201 	start = strstr(type, "char[");
202 	if (start == NULL)
203 		return -EINVAL;
204 	start += sizeof("char[") - 1;
205 
206 	end = strchr(type, ']');
207 	if (!end || end < start || type + strlen(type) > end + 1)
208 		return -EINVAL;
209 
210 	len = end - start;
211 	if (len > 3)
212 		return -EINVAL;
213 
214 	if (len == 0)
215 		return 0; /* variable-length string */
216 
217 	strncpy(buf, start, len);
218 	buf[len] = '\0';
219 
220 	err = kstrtouint(buf, 0, &size);
221 	if (err)
222 		return err;
223 
224 	if (size > STR_VAR_LEN_MAX)
225 		return -EINVAL;
226 
227 	return size;
228 }
229 
synth_field_size(char * type)230 static int synth_field_size(char *type)
231 {
232 	int size = 0;
233 
234 	if (strcmp(type, "s64") == 0)
235 		size = sizeof(s64);
236 	else if (strcmp(type, "u64") == 0)
237 		size = sizeof(u64);
238 	else if (strcmp(type, "s32") == 0)
239 		size = sizeof(s32);
240 	else if (strcmp(type, "u32") == 0)
241 		size = sizeof(u32);
242 	else if (strcmp(type, "s16") == 0)
243 		size = sizeof(s16);
244 	else if (strcmp(type, "u16") == 0)
245 		size = sizeof(u16);
246 	else if (strcmp(type, "s8") == 0)
247 		size = sizeof(s8);
248 	else if (strcmp(type, "u8") == 0)
249 		size = sizeof(u8);
250 	else if (strcmp(type, "char") == 0)
251 		size = sizeof(char);
252 	else if (strcmp(type, "unsigned char") == 0)
253 		size = sizeof(unsigned char);
254 	else if (strcmp(type, "int") == 0)
255 		size = sizeof(int);
256 	else if (strcmp(type, "unsigned int") == 0)
257 		size = sizeof(unsigned int);
258 	else if (strcmp(type, "long") == 0)
259 		size = sizeof(long);
260 	else if (strcmp(type, "unsigned long") == 0)
261 		size = sizeof(unsigned long);
262 	else if (strcmp(type, "bool") == 0)
263 		size = sizeof(bool);
264 	else if (strcmp(type, "pid_t") == 0)
265 		size = sizeof(pid_t);
266 	else if (strcmp(type, "gfp_t") == 0)
267 		size = sizeof(gfp_t);
268 	else if (synth_field_is_string(type))
269 		size = synth_field_string_size(type);
270 	else if (synth_field_is_stack(type))
271 		size = 0;
272 
273 	return size;
274 }
275 
synth_field_fmt(char * type)276 static const char *synth_field_fmt(char *type)
277 {
278 	const char *fmt = "%llu";
279 
280 	if (strcmp(type, "s64") == 0)
281 		fmt = "%lld";
282 	else if (strcmp(type, "u64") == 0)
283 		fmt = "%llu";
284 	else if (strcmp(type, "s32") == 0)
285 		fmt = "%d";
286 	else if (strcmp(type, "u32") == 0)
287 		fmt = "%u";
288 	else if (strcmp(type, "s16") == 0)
289 		fmt = "%d";
290 	else if (strcmp(type, "u16") == 0)
291 		fmt = "%u";
292 	else if (strcmp(type, "s8") == 0)
293 		fmt = "%d";
294 	else if (strcmp(type, "u8") == 0)
295 		fmt = "%u";
296 	else if (strcmp(type, "char") == 0)
297 		fmt = "%d";
298 	else if (strcmp(type, "unsigned char") == 0)
299 		fmt = "%u";
300 	else if (strcmp(type, "int") == 0)
301 		fmt = "%d";
302 	else if (strcmp(type, "unsigned int") == 0)
303 		fmt = "%u";
304 	else if (strcmp(type, "long") == 0)
305 		fmt = "%ld";
306 	else if (strcmp(type, "unsigned long") == 0)
307 		fmt = "%lu";
308 	else if (strcmp(type, "bool") == 0)
309 		fmt = "%d";
310 	else if (strcmp(type, "pid_t") == 0)
311 		fmt = "%d";
312 	else if (strcmp(type, "gfp_t") == 0)
313 		fmt = "%x";
314 	else if (synth_field_is_string(type))
315 		fmt = "%.*s";
316 	else if (synth_field_is_stack(type))
317 		fmt = "%s";
318 
319 	return fmt;
320 }
321 
print_synth_event_num_val(struct trace_seq * s,char * print_fmt,char * name,int size,union trace_synth_field * val,char * space)322 static void print_synth_event_num_val(struct trace_seq *s,
323 				      char *print_fmt, char *name,
324 				      int size, union trace_synth_field *val, char *space)
325 {
326 	switch (size) {
327 	case 1:
328 		trace_seq_printf(s, print_fmt, name, val->as_u8, space);
329 		break;
330 
331 	case 2:
332 		trace_seq_printf(s, print_fmt, name, val->as_u16, space);
333 		break;
334 
335 	case 4:
336 		trace_seq_printf(s, print_fmt, name, val->as_u32, space);
337 		break;
338 
339 	default:
340 		trace_seq_printf(s, print_fmt, name, val->as_u64, space);
341 		break;
342 	}
343 }
344 
print_synth_event(struct trace_iterator * iter,int flags,struct trace_event * event)345 static enum print_line_t print_synth_event(struct trace_iterator *iter,
346 					   int flags,
347 					   struct trace_event *event)
348 {
349 	struct trace_array *tr = iter->tr;
350 	struct trace_seq *s = &iter->seq;
351 	struct synth_trace_event *entry;
352 	struct synth_event *se;
353 	unsigned int i, j, n_u64;
354 	char print_fmt[32];
355 	const char *fmt;
356 
357 	entry = (struct synth_trace_event *)iter->ent;
358 	se = container_of(event, struct synth_event, call.event);
359 
360 	trace_seq_printf(s, "%s: ", se->name);
361 
362 	for (i = 0, n_u64 = 0; i < se->n_fields; i++) {
363 		if (trace_seq_has_overflowed(s))
364 			goto end;
365 
366 		fmt = synth_field_fmt(se->fields[i]->type);
367 
368 		/* parameter types */
369 		if (tr && tr->trace_flags & TRACE_ITER_VERBOSE)
370 			trace_seq_printf(s, "%s ", fmt);
371 
372 		snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt);
373 
374 		/* parameter values */
375 		if (se->fields[i]->is_string) {
376 			if (se->fields[i]->is_dynamic) {
377 				union trace_synth_field *data = &entry->fields[n_u64];
378 
379 				trace_seq_printf(s, print_fmt, se->fields[i]->name,
380 						 STR_VAR_LEN_MAX,
381 						 (char *)entry + data->as_dynamic.offset,
382 						 i == se->n_fields - 1 ? "" : " ");
383 				n_u64++;
384 			} else {
385 				trace_seq_printf(s, print_fmt, se->fields[i]->name,
386 						 STR_VAR_LEN_MAX,
387 						 (char *)&entry->fields[n_u64].as_u64,
388 						 i == se->n_fields - 1 ? "" : " ");
389 				n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
390 			}
391 		} else if (se->fields[i]->is_stack) {
392 			union trace_synth_field *data = &entry->fields[n_u64];
393 			unsigned long *p = (void *)entry + data->as_dynamic.offset;
394 
395 			trace_seq_printf(s, "%s=STACK:\n", se->fields[i]->name);
396 			for (j = 1; j < data->as_dynamic.len / sizeof(long); j++)
397 				trace_seq_printf(s, "=> %pS\n", (void *)p[j]);
398 			n_u64++;
399 		} else {
400 			struct trace_print_flags __flags[] = {
401 			    __def_gfpflag_names, {-1, NULL} };
402 			char *space = (i == se->n_fields - 1 ? "" : " ");
403 
404 			print_synth_event_num_val(s, print_fmt,
405 						  se->fields[i]->name,
406 						  se->fields[i]->size,
407 						  &entry->fields[n_u64],
408 						  space);
409 
410 			if (strcmp(se->fields[i]->type, "gfp_t") == 0) {
411 				trace_seq_puts(s, " (");
412 				trace_print_flags_seq(s, "|",
413 						      entry->fields[n_u64].as_u64,
414 						      __flags);
415 				trace_seq_putc(s, ')');
416 			}
417 			n_u64++;
418 		}
419 	}
420 end:
421 	trace_seq_putc(s, '\n');
422 
423 	return trace_handle_return(s);
424 }
425 
426 static struct trace_event_functions synth_event_funcs = {
427 	.trace		= print_synth_event
428 };
429 
trace_string(struct synth_trace_event * entry,struct synth_event * event,char * str_val,bool is_dynamic,unsigned int data_size,unsigned int * n_u64)430 static unsigned int trace_string(struct synth_trace_event *entry,
431 				 struct synth_event *event,
432 				 char *str_val,
433 				 bool is_dynamic,
434 				 unsigned int data_size,
435 				 unsigned int *n_u64)
436 {
437 	unsigned int len = 0;
438 	char *str_field;
439 	int ret;
440 
441 	if (is_dynamic) {
442 		union trace_synth_field *data = &entry->fields[*n_u64];
443 
444 		len = fetch_store_strlen((unsigned long)str_val);
445 		data->as_dynamic.offset = struct_size(entry, fields, event->n_u64) + data_size;
446 		data->as_dynamic.len = len;
447 
448 		ret = fetch_store_string((unsigned long)str_val, &entry->fields[*n_u64], entry);
449 
450 		(*n_u64)++;
451 	} else {
452 		str_field = (char *)&entry->fields[*n_u64].as_u64;
453 
454 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
455 		if ((unsigned long)str_val < TASK_SIZE)
456 			ret = strncpy_from_user_nofault(str_field, (const void __user *)str_val, STR_VAR_LEN_MAX);
457 		else
458 #endif
459 			ret = strncpy_from_kernel_nofault(str_field, str_val, STR_VAR_LEN_MAX);
460 
461 		if (ret < 0)
462 			strcpy(str_field, FAULT_STRING);
463 
464 		(*n_u64) += STR_VAR_LEN_MAX / sizeof(u64);
465 	}
466 
467 	return len;
468 }
469 
trace_stack(struct synth_trace_event * entry,struct synth_event * event,long * stack,unsigned int data_size,unsigned int * n_u64)470 static unsigned int trace_stack(struct synth_trace_event *entry,
471 				 struct synth_event *event,
472 				 long *stack,
473 				 unsigned int data_size,
474 				 unsigned int *n_u64)
475 {
476 	union trace_synth_field *data = &entry->fields[*n_u64];
477 	unsigned int len;
478 	u32 data_offset;
479 	void *data_loc;
480 
481 	data_offset = struct_size(entry, fields, event->n_u64);
482 	data_offset += data_size;
483 
484 	for (len = 0; len < HIST_STACKTRACE_DEPTH; len++) {
485 		if (!stack[len])
486 			break;
487 	}
488 
489 	len *= sizeof(long);
490 
491 	/* Find the dynamic section to copy the stack into. */
492 	data_loc = (void *)entry + data_offset;
493 	memcpy(data_loc, stack, len);
494 
495 	/* Fill in the field that holds the offset/len combo */
496 
497 	data->as_dynamic.offset = data_offset;
498 	data->as_dynamic.len = len;
499 
500 	(*n_u64)++;
501 
502 	return len;
503 }
504 
trace_event_raw_event_synth(void * __data,u64 * var_ref_vals,unsigned int * var_ref_idx)505 static notrace void trace_event_raw_event_synth(void *__data,
506 						u64 *var_ref_vals,
507 						unsigned int *var_ref_idx)
508 {
509 	unsigned int i, n_u64, val_idx, len, data_size = 0;
510 	struct trace_event_file *trace_file = __data;
511 	struct synth_trace_event *entry;
512 	struct trace_event_buffer fbuffer;
513 	struct trace_buffer *buffer;
514 	struct synth_event *event;
515 	int fields_size = 0;
516 
517 	event = trace_file->event_call->data;
518 
519 	if (trace_trigger_soft_disabled(trace_file))
520 		return;
521 
522 	fields_size = event->n_u64 * sizeof(u64);
523 
524 	for (i = 0; i < event->n_dynamic_fields; i++) {
525 		unsigned int field_pos = event->dynamic_fields[i]->field_pos;
526 		char *str_val;
527 
528 		val_idx = var_ref_idx[field_pos];
529 		str_val = (char *)(long)var_ref_vals[val_idx];
530 
531 		if (event->dynamic_fields[i]->is_stack) {
532 			/* reserve one extra element for size */
533 			len = *((unsigned long *)str_val) + 1;
534 			len *= sizeof(unsigned long);
535 		} else {
536 			len = fetch_store_strlen((unsigned long)str_val);
537 		}
538 
539 		fields_size += len;
540 	}
541 
542 	/*
543 	 * Avoid ring buffer recursion detection, as this event
544 	 * is being performed within another event.
545 	 */
546 	buffer = trace_file->tr->array_buffer.buffer;
547 	ring_buffer_nest_start(buffer);
548 
549 	entry = trace_event_buffer_reserve(&fbuffer, trace_file,
550 					   sizeof(*entry) + fields_size);
551 	if (!entry)
552 		goto out;
553 
554 	for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
555 		val_idx = var_ref_idx[i];
556 		if (event->fields[i]->is_string) {
557 			char *str_val = (char *)(long)var_ref_vals[val_idx];
558 
559 			len = trace_string(entry, event, str_val,
560 					   event->fields[i]->is_dynamic,
561 					   data_size, &n_u64);
562 			data_size += len; /* only dynamic string increments */
563 		} else if (event->fields[i]->is_stack) {
564 			long *stack = (long *)(long)var_ref_vals[val_idx];
565 
566 			len = trace_stack(entry, event, stack,
567 					   data_size, &n_u64);
568 			data_size += len;
569 		} else {
570 			struct synth_field *field = event->fields[i];
571 			u64 val = var_ref_vals[val_idx];
572 
573 			switch (field->size) {
574 			case 1:
575 				entry->fields[n_u64].as_u8 = (u8)val;
576 				break;
577 
578 			case 2:
579 				entry->fields[n_u64].as_u16 = (u16)val;
580 				break;
581 
582 			case 4:
583 				entry->fields[n_u64].as_u32 = (u32)val;
584 				break;
585 
586 			default:
587 				entry->fields[n_u64].as_u64 = val;
588 				break;
589 			}
590 			n_u64++;
591 		}
592 	}
593 
594 	trace_event_buffer_commit(&fbuffer);
595 out:
596 	ring_buffer_nest_end(buffer);
597 }
598 
free_synth_event_print_fmt(struct trace_event_call * call)599 static void free_synth_event_print_fmt(struct trace_event_call *call)
600 {
601 	if (call) {
602 		kfree(call->print_fmt);
603 		call->print_fmt = NULL;
604 	}
605 }
606 
__set_synth_event_print_fmt(struct synth_event * event,char * buf,int len)607 static int __set_synth_event_print_fmt(struct synth_event *event,
608 				       char *buf, int len)
609 {
610 	const char *fmt;
611 	int pos = 0;
612 	int i;
613 
614 	/* When len=0, we just calculate the needed length */
615 #define LEN_OR_ZERO (len ? len - pos : 0)
616 
617 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
618 	for (i = 0; i < event->n_fields; i++) {
619 		fmt = synth_field_fmt(event->fields[i]->type);
620 		pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s%s",
621 				event->fields[i]->name, fmt,
622 				i == event->n_fields - 1 ? "" : ", ");
623 	}
624 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
625 
626 	for (i = 0; i < event->n_fields; i++) {
627 		if (event->fields[i]->is_string &&
628 		    event->fields[i]->is_dynamic)
629 			pos += snprintf(buf + pos, LEN_OR_ZERO,
630 				", __get_str(%s)", event->fields[i]->name);
631 		else if (event->fields[i]->is_stack)
632 			pos += snprintf(buf + pos, LEN_OR_ZERO,
633 				", __get_stacktrace(%s)", event->fields[i]->name);
634 		else
635 			pos += snprintf(buf + pos, LEN_OR_ZERO,
636 					", REC->%s", event->fields[i]->name);
637 	}
638 
639 #undef LEN_OR_ZERO
640 
641 	/* return the length of print_fmt */
642 	return pos;
643 }
644 
set_synth_event_print_fmt(struct trace_event_call * call)645 static int set_synth_event_print_fmt(struct trace_event_call *call)
646 {
647 	struct synth_event *event = call->data;
648 	char *print_fmt;
649 	int len;
650 
651 	/* First: called with 0 length to calculate the needed length */
652 	len = __set_synth_event_print_fmt(event, NULL, 0);
653 
654 	print_fmt = kmalloc(len + 1, GFP_KERNEL);
655 	if (!print_fmt)
656 		return -ENOMEM;
657 
658 	/* Second: actually write the @print_fmt */
659 	__set_synth_event_print_fmt(event, print_fmt, len + 1);
660 	call->print_fmt = print_fmt;
661 
662 	return 0;
663 }
664 
free_synth_field(struct synth_field * field)665 static void free_synth_field(struct synth_field *field)
666 {
667 	kfree(field->type);
668 	kfree(field->name);
669 	kfree(field);
670 }
671 
check_field_version(const char * prefix,const char * field_type,const char * field_name)672 static int check_field_version(const char *prefix, const char *field_type,
673 			       const char *field_name)
674 {
675 	/*
676 	 * For backward compatibility, the old synthetic event command
677 	 * format did not require semicolons, and in order to not
678 	 * break user space, that old format must still work. If a new
679 	 * feature is added, then the format that uses the new feature
680 	 * will be required to have semicolons, as nothing that uses
681 	 * the old format would be using the new, yet to be created,
682 	 * feature. When a new feature is added, this will detect it,
683 	 * and return a number greater than 1, and require the format
684 	 * to use semicolons.
685 	 */
686 	return 1;
687 }
688 
parse_synth_field(int argc,char ** argv,int * consumed,int * field_version)689 static struct synth_field *parse_synth_field(int argc, char **argv,
690 					     int *consumed, int *field_version)
691 {
692 	const char *prefix = NULL, *field_type = argv[0], *field_name, *array;
693 	struct synth_field *field;
694 	int len, ret = -ENOMEM;
695 	struct seq_buf s;
696 	ssize_t size;
697 
698 	if (!strcmp(field_type, "unsigned")) {
699 		if (argc < 3) {
700 			synth_err(SYNTH_ERR_INCOMPLETE_TYPE, errpos(field_type));
701 			return ERR_PTR(-EINVAL);
702 		}
703 		prefix = "unsigned ";
704 		field_type = argv[1];
705 		field_name = argv[2];
706 		*consumed += 3;
707 	} else {
708 		field_name = argv[1];
709 		*consumed += 2;
710 	}
711 
712 	if (!field_name) {
713 		synth_err(SYNTH_ERR_INVALID_FIELD, errpos(field_type));
714 		return ERR_PTR(-EINVAL);
715 	}
716 
717 	*field_version = check_field_version(prefix, field_type, field_name);
718 
719 	field = kzalloc(sizeof(*field), GFP_KERNEL);
720 	if (!field)
721 		return ERR_PTR(-ENOMEM);
722 
723 	len = strlen(field_name);
724 	array = strchr(field_name, '[');
725 	if (array)
726 		len -= strlen(array);
727 
728 	field->name = kmemdup_nul(field_name, len, GFP_KERNEL);
729 	if (!field->name)
730 		goto free;
731 
732 	if (!is_good_name(field->name)) {
733 		synth_err(SYNTH_ERR_BAD_NAME, errpos(field_name));
734 		ret = -EINVAL;
735 		goto free;
736 	}
737 
738 	len = strlen(field_type) + 1;
739 
740 	if (array)
741 		len += strlen(array);
742 
743 	if (prefix)
744 		len += strlen(prefix);
745 
746 	field->type = kzalloc(len, GFP_KERNEL);
747 	if (!field->type)
748 		goto free;
749 
750 	seq_buf_init(&s, field->type, len);
751 	if (prefix)
752 		seq_buf_puts(&s, prefix);
753 	seq_buf_puts(&s, field_type);
754 	if (array)
755 		seq_buf_puts(&s, array);
756 	if (WARN_ON_ONCE(!seq_buf_buffer_left(&s)))
757 		goto free;
758 
759 	s.buffer[s.len] = '\0';
760 
761 	size = synth_field_size(field->type);
762 	if (size < 0) {
763 		if (array)
764 			synth_err(SYNTH_ERR_INVALID_ARRAY_SPEC, errpos(field_name));
765 		else
766 			synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type));
767 		ret = -EINVAL;
768 		goto free;
769 	} else if (size == 0) {
770 		if (synth_field_is_string(field->type) ||
771 		    synth_field_is_stack(field->type)) {
772 			char *type;
773 
774 			len = sizeof("__data_loc ") + strlen(field->type) + 1;
775 			type = kzalloc(len, GFP_KERNEL);
776 			if (!type)
777 				goto free;
778 
779 			seq_buf_init(&s, type, len);
780 			seq_buf_puts(&s, "__data_loc ");
781 			seq_buf_puts(&s, field->type);
782 
783 			if (WARN_ON_ONCE(!seq_buf_buffer_left(&s)))
784 				goto free;
785 			s.buffer[s.len] = '\0';
786 
787 			kfree(field->type);
788 			field->type = type;
789 
790 			field->is_dynamic = true;
791 			size = sizeof(u64);
792 		} else {
793 			synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type));
794 			ret = -EINVAL;
795 			goto free;
796 		}
797 	}
798 	field->size = size;
799 
800 	if (synth_field_is_string(field->type))
801 		field->is_string = true;
802 	else if (synth_field_is_stack(field->type))
803 		field->is_stack = true;
804 
805 	field->is_signed = synth_field_signed(field->type);
806  out:
807 	return field;
808  free:
809 	free_synth_field(field);
810 	field = ERR_PTR(ret);
811 	goto out;
812 }
813 
free_synth_tracepoint(struct tracepoint * tp)814 static void free_synth_tracepoint(struct tracepoint *tp)
815 {
816 	if (!tp)
817 		return;
818 
819 	kfree(tp->name);
820 	kfree(tp);
821 }
822 
alloc_synth_tracepoint(char * name)823 static struct tracepoint *alloc_synth_tracepoint(char *name)
824 {
825 	struct tracepoint *tp;
826 
827 	tp = kzalloc(sizeof(*tp), GFP_KERNEL);
828 	if (!tp)
829 		return ERR_PTR(-ENOMEM);
830 
831 	tp->name = kstrdup(name, GFP_KERNEL);
832 	if (!tp->name) {
833 		kfree(tp);
834 		return ERR_PTR(-ENOMEM);
835 	}
836 
837 	return tp;
838 }
839 
find_synth_event(const char * name)840 struct synth_event *find_synth_event(const char *name)
841 {
842 	struct dyn_event *pos;
843 	struct synth_event *event;
844 
845 	for_each_dyn_event(pos) {
846 		if (!is_synth_event(pos))
847 			continue;
848 		event = to_synth_event(pos);
849 		if (strcmp(event->name, name) == 0)
850 			return event;
851 	}
852 
853 	return NULL;
854 }
855 
856 static struct trace_event_fields synth_event_fields_array[] = {
857 	{ .type = TRACE_FUNCTION_TYPE,
858 	  .define_fields = synth_event_define_fields },
859 	{}
860 };
861 
register_synth_event(struct synth_event * event)862 static int register_synth_event(struct synth_event *event)
863 {
864 	struct trace_event_call *call = &event->call;
865 	int ret = 0;
866 
867 	event->call.class = &event->class;
868 	event->class.system = kstrdup(SYNTH_SYSTEM, GFP_KERNEL);
869 	if (!event->class.system) {
870 		ret = -ENOMEM;
871 		goto out;
872 	}
873 
874 	event->tp = alloc_synth_tracepoint(event->name);
875 	if (IS_ERR(event->tp)) {
876 		ret = PTR_ERR(event->tp);
877 		event->tp = NULL;
878 		goto out;
879 	}
880 
881 	INIT_LIST_HEAD(&call->class->fields);
882 	call->event.funcs = &synth_event_funcs;
883 	call->class->fields_array = synth_event_fields_array;
884 
885 	ret = register_trace_event(&call->event);
886 	if (!ret) {
887 		ret = -ENODEV;
888 		goto out;
889 	}
890 	call->flags = TRACE_EVENT_FL_TRACEPOINT;
891 	call->class->reg = trace_event_reg;
892 	call->class->probe = trace_event_raw_event_synth;
893 	call->data = event;
894 	call->tp = event->tp;
895 
896 	ret = trace_add_event_call(call);
897 	if (ret) {
898 		pr_warn("Failed to register synthetic event: %s\n",
899 			trace_event_name(call));
900 		goto err;
901 	}
902 
903 	ret = set_synth_event_print_fmt(call);
904 	/* unregister_trace_event() will be called inside */
905 	if (ret < 0)
906 		trace_remove_event_call(call);
907  out:
908 	return ret;
909  err:
910 	unregister_trace_event(&call->event);
911 	goto out;
912 }
913 
unregister_synth_event(struct synth_event * event)914 static int unregister_synth_event(struct synth_event *event)
915 {
916 	struct trace_event_call *call = &event->call;
917 	int ret;
918 
919 	ret = trace_remove_event_call(call);
920 
921 	return ret;
922 }
923 
free_synth_event(struct synth_event * event)924 static void free_synth_event(struct synth_event *event)
925 {
926 	unsigned int i;
927 
928 	if (!event)
929 		return;
930 
931 	for (i = 0; i < event->n_fields; i++)
932 		free_synth_field(event->fields[i]);
933 
934 	kfree(event->fields);
935 	kfree(event->dynamic_fields);
936 	kfree(event->name);
937 	kfree(event->class.system);
938 	free_synth_tracepoint(event->tp);
939 	free_synth_event_print_fmt(&event->call);
940 	kfree(event);
941 }
942 
alloc_synth_event(const char * name,int n_fields,struct synth_field ** fields)943 static struct synth_event *alloc_synth_event(const char *name, int n_fields,
944 					     struct synth_field **fields)
945 {
946 	unsigned int i, j, n_dynamic_fields = 0;
947 	struct synth_event *event;
948 
949 	event = kzalloc(sizeof(*event), GFP_KERNEL);
950 	if (!event) {
951 		event = ERR_PTR(-ENOMEM);
952 		goto out;
953 	}
954 
955 	event->name = kstrdup(name, GFP_KERNEL);
956 	if (!event->name) {
957 		kfree(event);
958 		event = ERR_PTR(-ENOMEM);
959 		goto out;
960 	}
961 
962 	event->fields = kcalloc(n_fields, sizeof(*event->fields), GFP_KERNEL);
963 	if (!event->fields) {
964 		free_synth_event(event);
965 		event = ERR_PTR(-ENOMEM);
966 		goto out;
967 	}
968 
969 	for (i = 0; i < n_fields; i++)
970 		if (fields[i]->is_dynamic)
971 			n_dynamic_fields++;
972 
973 	if (n_dynamic_fields) {
974 		event->dynamic_fields = kcalloc(n_dynamic_fields,
975 						sizeof(*event->dynamic_fields),
976 						GFP_KERNEL);
977 		if (!event->dynamic_fields) {
978 			free_synth_event(event);
979 			event = ERR_PTR(-ENOMEM);
980 			goto out;
981 		}
982 	}
983 
984 	dyn_event_init(&event->devent, &synth_event_ops);
985 
986 	for (i = 0, j = 0; i < n_fields; i++) {
987 		fields[i]->field_pos = i;
988 		event->fields[i] = fields[i];
989 
990 		if (fields[i]->is_dynamic)
991 			event->dynamic_fields[j++] = fields[i];
992 	}
993 	event->n_dynamic_fields = j;
994 	event->n_fields = n_fields;
995  out:
996 	return event;
997 }
998 
synth_event_check_arg_fn(void * data)999 static int synth_event_check_arg_fn(void *data)
1000 {
1001 	struct dynevent_arg_pair *arg_pair = data;
1002 	int size;
1003 
1004 	size = synth_field_size((char *)arg_pair->lhs);
1005 	if (size == 0) {
1006 		if (strstr((char *)arg_pair->lhs, "["))
1007 			return 0;
1008 	}
1009 
1010 	return size ? 0 : -EINVAL;
1011 }
1012 
1013 /**
1014  * synth_event_add_field - Add a new field to a synthetic event cmd
1015  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1016  * @type: The type of the new field to add
1017  * @name: The name of the new field to add
1018  *
1019  * Add a new field to a synthetic event cmd object.  Field ordering is in
1020  * the same order the fields are added.
1021  *
1022  * See synth_field_size() for available types. If field_name contains
1023  * [n] the field is considered to be an array.
1024  *
1025  * Return: 0 if successful, error otherwise.
1026  */
synth_event_add_field(struct dynevent_cmd * cmd,const char * type,const char * name)1027 int synth_event_add_field(struct dynevent_cmd *cmd, const char *type,
1028 			  const char *name)
1029 {
1030 	struct dynevent_arg_pair arg_pair;
1031 	int ret;
1032 
1033 	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1034 		return -EINVAL;
1035 
1036 	if (!type || !name)
1037 		return -EINVAL;
1038 
1039 	dynevent_arg_pair_init(&arg_pair, 0, ';');
1040 
1041 	arg_pair.lhs = type;
1042 	arg_pair.rhs = name;
1043 
1044 	ret = dynevent_arg_pair_add(cmd, &arg_pair, synth_event_check_arg_fn);
1045 	if (ret)
1046 		return ret;
1047 
1048 	if (++cmd->n_fields > SYNTH_FIELDS_MAX)
1049 		ret = -EINVAL;
1050 
1051 	return ret;
1052 }
1053 EXPORT_SYMBOL_GPL(synth_event_add_field);
1054 
1055 /**
1056  * synth_event_add_field_str - Add a new field to a synthetic event cmd
1057  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1058  * @type_name: The type and name of the new field to add, as a single string
1059  *
1060  * Add a new field to a synthetic event cmd object, as a single
1061  * string.  The @type_name string is expected to be of the form 'type
1062  * name', which will be appended by ';'.  No sanity checking is done -
1063  * what's passed in is assumed to already be well-formed.  Field
1064  * ordering is in the same order the fields are added.
1065  *
1066  * See synth_field_size() for available types. If field_name contains
1067  * [n] the field is considered to be an array.
1068  *
1069  * Return: 0 if successful, error otherwise.
1070  */
synth_event_add_field_str(struct dynevent_cmd * cmd,const char * type_name)1071 int synth_event_add_field_str(struct dynevent_cmd *cmd, const char *type_name)
1072 {
1073 	struct dynevent_arg arg;
1074 	int ret;
1075 
1076 	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1077 		return -EINVAL;
1078 
1079 	if (!type_name)
1080 		return -EINVAL;
1081 
1082 	dynevent_arg_init(&arg, ';');
1083 
1084 	arg.str = type_name;
1085 
1086 	ret = dynevent_arg_add(cmd, &arg, NULL);
1087 	if (ret)
1088 		return ret;
1089 
1090 	if (++cmd->n_fields > SYNTH_FIELDS_MAX)
1091 		ret = -EINVAL;
1092 
1093 	return ret;
1094 }
1095 EXPORT_SYMBOL_GPL(synth_event_add_field_str);
1096 
1097 /**
1098  * synth_event_add_fields - Add multiple fields to a synthetic event cmd
1099  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1100  * @fields: An array of type/name field descriptions
1101  * @n_fields: The number of field descriptions contained in the fields array
1102  *
1103  * Add a new set of fields to a synthetic event cmd object.  The event
1104  * fields that will be defined for the event should be passed in as an
1105  * array of struct synth_field_desc, and the number of elements in the
1106  * array passed in as n_fields.  Field ordering will retain the
1107  * ordering given in the fields array.
1108  *
1109  * See synth_field_size() for available types. If field_name contains
1110  * [n] the field is considered to be an array.
1111  *
1112  * Return: 0 if successful, error otherwise.
1113  */
synth_event_add_fields(struct dynevent_cmd * cmd,struct synth_field_desc * fields,unsigned int n_fields)1114 int synth_event_add_fields(struct dynevent_cmd *cmd,
1115 			   struct synth_field_desc *fields,
1116 			   unsigned int n_fields)
1117 {
1118 	unsigned int i;
1119 	int ret = 0;
1120 
1121 	for (i = 0; i < n_fields; i++) {
1122 		if (fields[i].type == NULL || fields[i].name == NULL) {
1123 			ret = -EINVAL;
1124 			break;
1125 		}
1126 
1127 		ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
1128 		if (ret)
1129 			break;
1130 	}
1131 
1132 	return ret;
1133 }
1134 EXPORT_SYMBOL_GPL(synth_event_add_fields);
1135 
1136 /**
1137  * __synth_event_gen_cmd_start - Start a synthetic event command from arg list
1138  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1139  * @name: The name of the synthetic event
1140  * @mod: The module creating the event, NULL if not created from a module
1141  * @...: Variable number of arg (pairs), one pair for each field
1142  *
1143  * NOTE: Users normally won't want to call this function directly, but
1144  * rather use the synth_event_gen_cmd_start() wrapper, which
1145  * automatically adds a NULL to the end of the arg list.  If this
1146  * function is used directly, make sure the last arg in the variable
1147  * arg list is NULL.
1148  *
1149  * Generate a synthetic event command to be executed by
1150  * synth_event_gen_cmd_end().  This function can be used to generate
1151  * the complete command or only the first part of it; in the latter
1152  * case, synth_event_add_field(), synth_event_add_field_str(), or
1153  * synth_event_add_fields() can be used to add more fields following
1154  * this.
1155  *
1156  * There should be an even number variable args, each pair consisting
1157  * of a type followed by a field name.
1158  *
1159  * See synth_field_size() for available types. If field_name contains
1160  * [n] the field is considered to be an array.
1161  *
1162  * Return: 0 if successful, error otherwise.
1163  */
__synth_event_gen_cmd_start(struct dynevent_cmd * cmd,const char * name,struct module * mod,...)1164 int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd, const char *name,
1165 				struct module *mod, ...)
1166 {
1167 	struct dynevent_arg arg;
1168 	va_list args;
1169 	int ret;
1170 
1171 	cmd->event_name = name;
1172 	cmd->private_data = mod;
1173 
1174 	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1175 		return -EINVAL;
1176 
1177 	dynevent_arg_init(&arg, 0);
1178 	arg.str = name;
1179 	ret = dynevent_arg_add(cmd, &arg, NULL);
1180 	if (ret)
1181 		return ret;
1182 
1183 	va_start(args, mod);
1184 	for (;;) {
1185 		const char *type, *name;
1186 
1187 		type = va_arg(args, const char *);
1188 		if (!type)
1189 			break;
1190 		name = va_arg(args, const char *);
1191 		if (!name)
1192 			break;
1193 
1194 		if (++cmd->n_fields > SYNTH_FIELDS_MAX) {
1195 			ret = -EINVAL;
1196 			break;
1197 		}
1198 
1199 		ret = synth_event_add_field(cmd, type, name);
1200 		if (ret)
1201 			break;
1202 	}
1203 	va_end(args);
1204 
1205 	return ret;
1206 }
1207 EXPORT_SYMBOL_GPL(__synth_event_gen_cmd_start);
1208 
1209 /**
1210  * synth_event_gen_cmd_array_start - Start synthetic event command from an array
1211  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1212  * @name: The name of the synthetic event
1213  * @mod: The module creating the event, NULL if not created from a module
1214  * @fields: An array of type/name field descriptions
1215  * @n_fields: The number of field descriptions contained in the fields array
1216  *
1217  * Generate a synthetic event command to be executed by
1218  * synth_event_gen_cmd_end().  This function can be used to generate
1219  * the complete command or only the first part of it; in the latter
1220  * case, synth_event_add_field(), synth_event_add_field_str(), or
1221  * synth_event_add_fields() can be used to add more fields following
1222  * this.
1223  *
1224  * The event fields that will be defined for the event should be
1225  * passed in as an array of struct synth_field_desc, and the number of
1226  * elements in the array passed in as n_fields.  Field ordering will
1227  * retain the ordering given in the fields array.
1228  *
1229  * See synth_field_size() for available types. If field_name contains
1230  * [n] the field is considered to be an array.
1231  *
1232  * Return: 0 if successful, error otherwise.
1233  */
synth_event_gen_cmd_array_start(struct dynevent_cmd * cmd,const char * name,struct module * mod,struct synth_field_desc * fields,unsigned int n_fields)1234 int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd, const char *name,
1235 				    struct module *mod,
1236 				    struct synth_field_desc *fields,
1237 				    unsigned int n_fields)
1238 {
1239 	struct dynevent_arg arg;
1240 	unsigned int i;
1241 	int ret = 0;
1242 
1243 	cmd->event_name = name;
1244 	cmd->private_data = mod;
1245 
1246 	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1247 		return -EINVAL;
1248 
1249 	if (n_fields > SYNTH_FIELDS_MAX)
1250 		return -EINVAL;
1251 
1252 	dynevent_arg_init(&arg, 0);
1253 	arg.str = name;
1254 	ret = dynevent_arg_add(cmd, &arg, NULL);
1255 	if (ret)
1256 		return ret;
1257 
1258 	for (i = 0; i < n_fields; i++) {
1259 		if (fields[i].type == NULL || fields[i].name == NULL)
1260 			return -EINVAL;
1261 
1262 		ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
1263 		if (ret)
1264 			break;
1265 	}
1266 
1267 	return ret;
1268 }
1269 EXPORT_SYMBOL_GPL(synth_event_gen_cmd_array_start);
1270 
__create_synth_event(const char * name,const char * raw_fields)1271 static int __create_synth_event(const char *name, const char *raw_fields)
1272 {
1273 	char **argv, *field_str, *tmp_fields, *saved_fields = NULL;
1274 	struct synth_field *field, *fields[SYNTH_FIELDS_MAX];
1275 	int consumed, cmd_version = 1, n_fields_this_loop;
1276 	int i, argc, n_fields = 0, ret = 0;
1277 	struct synth_event *event = NULL;
1278 
1279 	/*
1280 	 * Argument syntax:
1281 	 *  - Add synthetic event: <event_name> field[;field] ...
1282 	 *  - Remove synthetic event: !<event_name> field[;field] ...
1283 	 *      where 'field' = type field_name
1284 	 */
1285 
1286 	if (name[0] == '\0') {
1287 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
1288 		return -EINVAL;
1289 	}
1290 
1291 	if (!is_good_name(name)) {
1292 		synth_err(SYNTH_ERR_BAD_NAME, errpos(name));
1293 		return -EINVAL;
1294 	}
1295 
1296 	mutex_lock(&event_mutex);
1297 
1298 	event = find_synth_event(name);
1299 	if (event) {
1300 		synth_err(SYNTH_ERR_EVENT_EXISTS, errpos(name));
1301 		ret = -EEXIST;
1302 		goto err;
1303 	}
1304 
1305 	tmp_fields = saved_fields = kstrdup(raw_fields, GFP_KERNEL);
1306 	if (!tmp_fields) {
1307 		ret = -ENOMEM;
1308 		goto err;
1309 	}
1310 
1311 	while ((field_str = strsep(&tmp_fields, ";")) != NULL) {
1312 		argv = argv_split(GFP_KERNEL, field_str, &argc);
1313 		if (!argv) {
1314 			ret = -ENOMEM;
1315 			goto err;
1316 		}
1317 
1318 		if (!argc) {
1319 			argv_free(argv);
1320 			continue;
1321 		}
1322 
1323 		n_fields_this_loop = 0;
1324 		consumed = 0;
1325 		while (argc > consumed) {
1326 			int field_version;
1327 
1328 			field = parse_synth_field(argc - consumed,
1329 						  argv + consumed, &consumed,
1330 						  &field_version);
1331 			if (IS_ERR(field)) {
1332 				ret = PTR_ERR(field);
1333 				goto err_free_arg;
1334 			}
1335 
1336 			/*
1337 			 * Track the highest version of any field we
1338 			 * found in the command.
1339 			 */
1340 			if (field_version > cmd_version)
1341 				cmd_version = field_version;
1342 
1343 			/*
1344 			 * Now sort out what is and isn't valid for
1345 			 * each supported version.
1346 			 *
1347 			 * If we see more than 1 field per loop, it
1348 			 * means we have multiple fields between
1349 			 * semicolons, and that's something we no
1350 			 * longer support in a version 2 or greater
1351 			 * command.
1352 			 */
1353 			if (cmd_version > 1 && n_fields_this_loop >= 1) {
1354 				synth_err(SYNTH_ERR_INVALID_CMD, errpos(field_str));
1355 				ret = -EINVAL;
1356 				goto err_free_arg;
1357 			}
1358 
1359 			if (n_fields == SYNTH_FIELDS_MAX) {
1360 				synth_err(SYNTH_ERR_TOO_MANY_FIELDS, 0);
1361 				ret = -EINVAL;
1362 				goto err_free_arg;
1363 			}
1364 			fields[n_fields++] = field;
1365 
1366 			n_fields_this_loop++;
1367 		}
1368 		argv_free(argv);
1369 
1370 		if (consumed < argc) {
1371 			synth_err(SYNTH_ERR_INVALID_CMD, 0);
1372 			ret = -EINVAL;
1373 			goto err;
1374 		}
1375 
1376 	}
1377 
1378 	if (n_fields == 0) {
1379 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
1380 		ret = -EINVAL;
1381 		goto err;
1382 	}
1383 
1384 	event = alloc_synth_event(name, n_fields, fields);
1385 	if (IS_ERR(event)) {
1386 		ret = PTR_ERR(event);
1387 		event = NULL;
1388 		goto err;
1389 	}
1390 	ret = register_synth_event(event);
1391 	if (!ret)
1392 		dyn_event_add(&event->devent, &event->call);
1393 	else
1394 		free_synth_event(event);
1395  out:
1396 	mutex_unlock(&event_mutex);
1397 
1398 	kfree(saved_fields);
1399 
1400 	return ret;
1401  err_free_arg:
1402 	argv_free(argv);
1403  err:
1404 	for (i = 0; i < n_fields; i++)
1405 		free_synth_field(fields[i]);
1406 
1407 	goto out;
1408 }
1409 
1410 /**
1411  * synth_event_create - Create a new synthetic event
1412  * @name: The name of the new synthetic event
1413  * @fields: An array of type/name field descriptions
1414  * @n_fields: The number of field descriptions contained in the fields array
1415  * @mod: The module creating the event, NULL if not created from a module
1416  *
1417  * Create a new synthetic event with the given name under the
1418  * trace/events/synthetic/ directory.  The event fields that will be
1419  * defined for the event should be passed in as an array of struct
1420  * synth_field_desc, and the number elements in the array passed in as
1421  * n_fields. Field ordering will retain the ordering given in the
1422  * fields array.
1423  *
1424  * If the new synthetic event is being created from a module, the mod
1425  * param must be non-NULL.  This will ensure that the trace buffer
1426  * won't contain unreadable events.
1427  *
1428  * The new synth event should be deleted using synth_event_delete()
1429  * function.  The new synthetic event can be generated from modules or
1430  * other kernel code using trace_synth_event() and related functions.
1431  *
1432  * Return: 0 if successful, error otherwise.
1433  */
synth_event_create(const char * name,struct synth_field_desc * fields,unsigned int n_fields,struct module * mod)1434 int synth_event_create(const char *name, struct synth_field_desc *fields,
1435 		       unsigned int n_fields, struct module *mod)
1436 {
1437 	struct dynevent_cmd cmd;
1438 	char *buf;
1439 	int ret;
1440 
1441 	buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
1442 	if (!buf)
1443 		return -ENOMEM;
1444 
1445 	synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
1446 
1447 	ret = synth_event_gen_cmd_array_start(&cmd, name, mod,
1448 					      fields, n_fields);
1449 	if (ret)
1450 		goto out;
1451 
1452 	ret = synth_event_gen_cmd_end(&cmd);
1453  out:
1454 	kfree(buf);
1455 
1456 	return ret;
1457 }
1458 EXPORT_SYMBOL_GPL(synth_event_create);
1459 
destroy_synth_event(struct synth_event * se)1460 static int destroy_synth_event(struct synth_event *se)
1461 {
1462 	int ret;
1463 
1464 	if (se->ref)
1465 		return -EBUSY;
1466 
1467 	if (trace_event_dyn_busy(&se->call))
1468 		return -EBUSY;
1469 
1470 	ret = unregister_synth_event(se);
1471 	if (!ret) {
1472 		dyn_event_remove(&se->devent);
1473 		free_synth_event(se);
1474 	}
1475 
1476 	return ret;
1477 }
1478 
1479 /**
1480  * synth_event_delete - Delete a synthetic event
1481  * @event_name: The name of the new synthetic event
1482  *
1483  * Delete a synthetic event that was created with synth_event_create().
1484  *
1485  * Return: 0 if successful, error otherwise.
1486  */
synth_event_delete(const char * event_name)1487 int synth_event_delete(const char *event_name)
1488 {
1489 	struct synth_event *se = NULL;
1490 	struct module *mod = NULL;
1491 	int ret = -ENOENT;
1492 
1493 	mutex_lock(&event_mutex);
1494 	se = find_synth_event(event_name);
1495 	if (se) {
1496 		mod = se->mod;
1497 		ret = destroy_synth_event(se);
1498 	}
1499 	mutex_unlock(&event_mutex);
1500 
1501 	if (mod) {
1502 		/*
1503 		 * It is safest to reset the ring buffer if the module
1504 		 * being unloaded registered any events that were
1505 		 * used. The only worry is if a new module gets
1506 		 * loaded, and takes on the same id as the events of
1507 		 * this module. When printing out the buffer, traced
1508 		 * events left over from this module may be passed to
1509 		 * the new module events and unexpected results may
1510 		 * occur.
1511 		 */
1512 		tracing_reset_all_online_cpus();
1513 	}
1514 
1515 	return ret;
1516 }
1517 EXPORT_SYMBOL_GPL(synth_event_delete);
1518 
check_command(const char * raw_command)1519 static int check_command(const char *raw_command)
1520 {
1521 	char **argv = NULL, *cmd, *saved_cmd, *name_and_field;
1522 	int argc, ret = 0;
1523 
1524 	cmd = saved_cmd = kstrdup(raw_command, GFP_KERNEL);
1525 	if (!cmd)
1526 		return -ENOMEM;
1527 
1528 	name_and_field = strsep(&cmd, ";");
1529 	if (!name_and_field) {
1530 		ret = -EINVAL;
1531 		goto free;
1532 	}
1533 
1534 	if (name_and_field[0] == '!')
1535 		goto free;
1536 
1537 	argv = argv_split(GFP_KERNEL, name_and_field, &argc);
1538 	if (!argv) {
1539 		ret = -ENOMEM;
1540 		goto free;
1541 	}
1542 	argv_free(argv);
1543 
1544 	if (argc < 3)
1545 		ret = -EINVAL;
1546 free:
1547 	kfree(saved_cmd);
1548 
1549 	return ret;
1550 }
1551 
create_or_delete_synth_event(const char * raw_command)1552 static int create_or_delete_synth_event(const char *raw_command)
1553 {
1554 	char *name = NULL, *fields, *p;
1555 	int ret = 0;
1556 
1557 	raw_command = skip_spaces(raw_command);
1558 	if (raw_command[0] == '\0')
1559 		return ret;
1560 
1561 	last_cmd_set(raw_command);
1562 
1563 	ret = check_command(raw_command);
1564 	if (ret) {
1565 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
1566 		return ret;
1567 	}
1568 
1569 	p = strpbrk(raw_command, " \t");
1570 	if (!p && raw_command[0] != '!') {
1571 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
1572 		ret = -EINVAL;
1573 		goto free;
1574 	}
1575 
1576 	name = kmemdup_nul(raw_command, p ? p - raw_command : strlen(raw_command), GFP_KERNEL);
1577 	if (!name)
1578 		return -ENOMEM;
1579 
1580 	if (name[0] == '!') {
1581 		ret = synth_event_delete(name + 1);
1582 		goto free;
1583 	}
1584 
1585 	fields = skip_spaces(p);
1586 
1587 	ret = __create_synth_event(name, fields);
1588 free:
1589 	kfree(name);
1590 
1591 	return ret;
1592 }
1593 
synth_event_run_command(struct dynevent_cmd * cmd)1594 static int synth_event_run_command(struct dynevent_cmd *cmd)
1595 {
1596 	struct synth_event *se;
1597 	int ret;
1598 
1599 	ret = create_or_delete_synth_event(cmd->seq.buffer);
1600 	if (ret)
1601 		return ret;
1602 
1603 	se = find_synth_event(cmd->event_name);
1604 	if (WARN_ON(!se))
1605 		return -ENOENT;
1606 
1607 	se->mod = cmd->private_data;
1608 
1609 	return ret;
1610 }
1611 
1612 /**
1613  * synth_event_cmd_init - Initialize a synthetic event command object
1614  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1615  * @buf: A pointer to the buffer used to build the command
1616  * @maxlen: The length of the buffer passed in @buf
1617  *
1618  * Initialize a synthetic event command object.  Use this before
1619  * calling any of the other dyenvent_cmd functions.
1620  */
synth_event_cmd_init(struct dynevent_cmd * cmd,char * buf,int maxlen)1621 void synth_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
1622 {
1623 	dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_SYNTH,
1624 			  synth_event_run_command);
1625 }
1626 EXPORT_SYMBOL_GPL(synth_event_cmd_init);
1627 
1628 static inline int
__synth_event_trace_init(struct trace_event_file * file,struct synth_event_trace_state * trace_state)1629 __synth_event_trace_init(struct trace_event_file *file,
1630 			 struct synth_event_trace_state *trace_state)
1631 {
1632 	int ret = 0;
1633 
1634 	memset(trace_state, '\0', sizeof(*trace_state));
1635 
1636 	/*
1637 	 * Normal event tracing doesn't get called at all unless the
1638 	 * ENABLED bit is set (which attaches the probe thus allowing
1639 	 * this code to be called, etc).  Because this is called
1640 	 * directly by the user, we don't have that but we still need
1641 	 * to honor not logging when disabled.  For the iterated
1642 	 * trace case, we save the enabled state upon start and just
1643 	 * ignore the following data calls.
1644 	 */
1645 	if (!(file->flags & EVENT_FILE_FL_ENABLED) ||
1646 	    trace_trigger_soft_disabled(file)) {
1647 		trace_state->disabled = true;
1648 		ret = -ENOENT;
1649 		goto out;
1650 	}
1651 
1652 	trace_state->event = file->event_call->data;
1653 out:
1654 	return ret;
1655 }
1656 
1657 static inline int
__synth_event_trace_start(struct trace_event_file * file,struct synth_event_trace_state * trace_state,int dynamic_fields_size)1658 __synth_event_trace_start(struct trace_event_file *file,
1659 			  struct synth_event_trace_state *trace_state,
1660 			  int dynamic_fields_size)
1661 {
1662 	int entry_size, fields_size = 0;
1663 	int ret = 0;
1664 
1665 	fields_size = trace_state->event->n_u64 * sizeof(u64);
1666 	fields_size += dynamic_fields_size;
1667 
1668 	/*
1669 	 * Avoid ring buffer recursion detection, as this event
1670 	 * is being performed within another event.
1671 	 */
1672 	trace_state->buffer = file->tr->array_buffer.buffer;
1673 	ring_buffer_nest_start(trace_state->buffer);
1674 
1675 	entry_size = sizeof(*trace_state->entry) + fields_size;
1676 	trace_state->entry = trace_event_buffer_reserve(&trace_state->fbuffer,
1677 							file,
1678 							entry_size);
1679 	if (!trace_state->entry) {
1680 		ring_buffer_nest_end(trace_state->buffer);
1681 		ret = -EINVAL;
1682 	}
1683 
1684 	return ret;
1685 }
1686 
1687 static inline void
__synth_event_trace_end(struct synth_event_trace_state * trace_state)1688 __synth_event_trace_end(struct synth_event_trace_state *trace_state)
1689 {
1690 	trace_event_buffer_commit(&trace_state->fbuffer);
1691 
1692 	ring_buffer_nest_end(trace_state->buffer);
1693 }
1694 
1695 /**
1696  * synth_event_trace - Trace a synthetic event
1697  * @file: The trace_event_file representing the synthetic event
1698  * @n_vals: The number of values in vals
1699  * @...: Variable number of args containing the event values
1700  *
1701  * Trace a synthetic event using the values passed in the variable
1702  * argument list.
1703  *
1704  * The argument list should be a list 'n_vals' u64 values.  The number
1705  * of vals must match the number of field in the synthetic event, and
1706  * must be in the same order as the synthetic event fields.
1707  *
1708  * All vals should be cast to u64, and string vals are just pointers
1709  * to strings, cast to u64.  Strings will be copied into space
1710  * reserved in the event for the string, using these pointers.
1711  *
1712  * Return: 0 on success, err otherwise.
1713  */
synth_event_trace(struct trace_event_file * file,unsigned int n_vals,...)1714 int synth_event_trace(struct trace_event_file *file, unsigned int n_vals, ...)
1715 {
1716 	unsigned int i, n_u64, len, data_size = 0;
1717 	struct synth_event_trace_state state;
1718 	va_list args;
1719 	int ret;
1720 
1721 	ret = __synth_event_trace_init(file, &state);
1722 	if (ret) {
1723 		if (ret == -ENOENT)
1724 			ret = 0; /* just disabled, not really an error */
1725 		return ret;
1726 	}
1727 
1728 	if (state.event->n_dynamic_fields) {
1729 		va_start(args, n_vals);
1730 
1731 		for (i = 0; i < state.event->n_fields; i++) {
1732 			u64 val = va_arg(args, u64);
1733 
1734 			if (state.event->fields[i]->is_string &&
1735 			    state.event->fields[i]->is_dynamic) {
1736 				char *str_val = (char *)(long)val;
1737 
1738 				data_size += strlen(str_val) + 1;
1739 			}
1740 		}
1741 
1742 		va_end(args);
1743 	}
1744 
1745 	ret = __synth_event_trace_start(file, &state, data_size);
1746 	if (ret)
1747 		return ret;
1748 
1749 	if (n_vals != state.event->n_fields) {
1750 		ret = -EINVAL;
1751 		goto out;
1752 	}
1753 
1754 	data_size = 0;
1755 
1756 	va_start(args, n_vals);
1757 	for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
1758 		u64 val;
1759 
1760 		val = va_arg(args, u64);
1761 
1762 		if (state.event->fields[i]->is_string) {
1763 			char *str_val = (char *)(long)val;
1764 
1765 			len = trace_string(state.entry, state.event, str_val,
1766 					   state.event->fields[i]->is_dynamic,
1767 					   data_size, &n_u64);
1768 			data_size += len; /* only dynamic string increments */
1769 		} else {
1770 			struct synth_field *field = state.event->fields[i];
1771 
1772 			switch (field->size) {
1773 			case 1:
1774 				state.entry->fields[n_u64].as_u8 = (u8)val;
1775 				break;
1776 
1777 			case 2:
1778 				state.entry->fields[n_u64].as_u16 = (u16)val;
1779 				break;
1780 
1781 			case 4:
1782 				state.entry->fields[n_u64].as_u32 = (u32)val;
1783 				break;
1784 
1785 			default:
1786 				state.entry->fields[n_u64].as_u64 = val;
1787 				break;
1788 			}
1789 			n_u64++;
1790 		}
1791 	}
1792 	va_end(args);
1793 out:
1794 	__synth_event_trace_end(&state);
1795 
1796 	return ret;
1797 }
1798 EXPORT_SYMBOL_GPL(synth_event_trace);
1799 
1800 /**
1801  * synth_event_trace_array - Trace a synthetic event from an array
1802  * @file: The trace_event_file representing the synthetic event
1803  * @vals: Array of values
1804  * @n_vals: The number of values in vals
1805  *
1806  * Trace a synthetic event using the values passed in as 'vals'.
1807  *
1808  * The 'vals' array is just an array of 'n_vals' u64.  The number of
1809  * vals must match the number of field in the synthetic event, and
1810  * must be in the same order as the synthetic event fields.
1811  *
1812  * All vals should be cast to u64, and string vals are just pointers
1813  * to strings, cast to u64.  Strings will be copied into space
1814  * reserved in the event for the string, using these pointers.
1815  *
1816  * Return: 0 on success, err otherwise.
1817  */
synth_event_trace_array(struct trace_event_file * file,u64 * vals,unsigned int n_vals)1818 int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
1819 			    unsigned int n_vals)
1820 {
1821 	unsigned int i, n_u64, field_pos, len, data_size = 0;
1822 	struct synth_event_trace_state state;
1823 	char *str_val;
1824 	int ret;
1825 
1826 	ret = __synth_event_trace_init(file, &state);
1827 	if (ret) {
1828 		if (ret == -ENOENT)
1829 			ret = 0; /* just disabled, not really an error */
1830 		return ret;
1831 	}
1832 
1833 	if (state.event->n_dynamic_fields) {
1834 		for (i = 0; i < state.event->n_dynamic_fields; i++) {
1835 			field_pos = state.event->dynamic_fields[i]->field_pos;
1836 			str_val = (char *)(long)vals[field_pos];
1837 			len = strlen(str_val) + 1;
1838 			data_size += len;
1839 		}
1840 	}
1841 
1842 	ret = __synth_event_trace_start(file, &state, data_size);
1843 	if (ret)
1844 		return ret;
1845 
1846 	if (n_vals != state.event->n_fields) {
1847 		ret = -EINVAL;
1848 		goto out;
1849 	}
1850 
1851 	data_size = 0;
1852 
1853 	for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
1854 		if (state.event->fields[i]->is_string) {
1855 			char *str_val = (char *)(long)vals[i];
1856 
1857 			len = trace_string(state.entry, state.event, str_val,
1858 					   state.event->fields[i]->is_dynamic,
1859 					   data_size, &n_u64);
1860 			data_size += len; /* only dynamic string increments */
1861 		} else {
1862 			struct synth_field *field = state.event->fields[i];
1863 			u64 val = vals[i];
1864 
1865 			switch (field->size) {
1866 			case 1:
1867 				state.entry->fields[n_u64].as_u8 = (u8)val;
1868 				break;
1869 
1870 			case 2:
1871 				state.entry->fields[n_u64].as_u16 = (u16)val;
1872 				break;
1873 
1874 			case 4:
1875 				state.entry->fields[n_u64].as_u32 = (u32)val;
1876 				break;
1877 
1878 			default:
1879 				state.entry->fields[n_u64].as_u64 = val;
1880 				break;
1881 			}
1882 			n_u64++;
1883 		}
1884 	}
1885 out:
1886 	__synth_event_trace_end(&state);
1887 
1888 	return ret;
1889 }
1890 EXPORT_SYMBOL_GPL(synth_event_trace_array);
1891 
1892 /**
1893  * synth_event_trace_start - Start piecewise synthetic event trace
1894  * @file: The trace_event_file representing the synthetic event
1895  * @trace_state: A pointer to object tracking the piecewise trace state
1896  *
1897  * Start the trace of a synthetic event field-by-field rather than all
1898  * at once.
1899  *
1900  * This function 'opens' an event trace, which means space is reserved
1901  * for the event in the trace buffer, after which the event's
1902  * individual field values can be set through either
1903  * synth_event_add_next_val() or synth_event_add_val().
1904  *
1905  * A pointer to a trace_state object is passed in, which will keep
1906  * track of the current event trace state until the event trace is
1907  * closed (and the event finally traced) using
1908  * synth_event_trace_end().
1909  *
1910  * Note that synth_event_trace_end() must be called after all values
1911  * have been added for each event trace, regardless of whether adding
1912  * all field values succeeded or not.
1913  *
1914  * Note also that for a given event trace, all fields must be added
1915  * using either synth_event_add_next_val() or synth_event_add_val()
1916  * but not both together or interleaved.
1917  *
1918  * Return: 0 on success, err otherwise.
1919  */
synth_event_trace_start(struct trace_event_file * file,struct synth_event_trace_state * trace_state)1920 int synth_event_trace_start(struct trace_event_file *file,
1921 			    struct synth_event_trace_state *trace_state)
1922 {
1923 	int ret;
1924 
1925 	if (!trace_state)
1926 		return -EINVAL;
1927 
1928 	ret = __synth_event_trace_init(file, trace_state);
1929 	if (ret) {
1930 		if (ret == -ENOENT)
1931 			ret = 0; /* just disabled, not really an error */
1932 		return ret;
1933 	}
1934 
1935 	if (trace_state->event->n_dynamic_fields)
1936 		return -ENOTSUPP;
1937 
1938 	ret = __synth_event_trace_start(file, trace_state, 0);
1939 
1940 	return ret;
1941 }
1942 EXPORT_SYMBOL_GPL(synth_event_trace_start);
1943 
__synth_event_add_val(const char * field_name,u64 val,struct synth_event_trace_state * trace_state)1944 static int __synth_event_add_val(const char *field_name, u64 val,
1945 				 struct synth_event_trace_state *trace_state)
1946 {
1947 	struct synth_field *field = NULL;
1948 	struct synth_trace_event *entry;
1949 	struct synth_event *event;
1950 	int i, ret = 0;
1951 
1952 	if (!trace_state) {
1953 		ret = -EINVAL;
1954 		goto out;
1955 	}
1956 
1957 	/* can't mix add_next_synth_val() with add_synth_val() */
1958 	if (field_name) {
1959 		if (trace_state->add_next) {
1960 			ret = -EINVAL;
1961 			goto out;
1962 		}
1963 		trace_state->add_name = true;
1964 	} else {
1965 		if (trace_state->add_name) {
1966 			ret = -EINVAL;
1967 			goto out;
1968 		}
1969 		trace_state->add_next = true;
1970 	}
1971 
1972 	if (trace_state->disabled)
1973 		goto out;
1974 
1975 	event = trace_state->event;
1976 	if (trace_state->add_name) {
1977 		for (i = 0; i < event->n_fields; i++) {
1978 			field = event->fields[i];
1979 			if (strcmp(field->name, field_name) == 0)
1980 				break;
1981 		}
1982 		if (!field) {
1983 			ret = -EINVAL;
1984 			goto out;
1985 		}
1986 	} else {
1987 		if (trace_state->cur_field >= event->n_fields) {
1988 			ret = -EINVAL;
1989 			goto out;
1990 		}
1991 		field = event->fields[trace_state->cur_field++];
1992 	}
1993 
1994 	entry = trace_state->entry;
1995 	if (field->is_string) {
1996 		char *str_val = (char *)(long)val;
1997 		char *str_field;
1998 
1999 		if (field->is_dynamic) { /* add_val can't do dynamic strings */
2000 			ret = -EINVAL;
2001 			goto out;
2002 		}
2003 
2004 		if (!str_val) {
2005 			ret = -EINVAL;
2006 			goto out;
2007 		}
2008 
2009 		str_field = (char *)&entry->fields[field->offset];
2010 		strscpy(str_field, str_val, STR_VAR_LEN_MAX);
2011 	} else {
2012 		switch (field->size) {
2013 		case 1:
2014 			trace_state->entry->fields[field->offset].as_u8 = (u8)val;
2015 			break;
2016 
2017 		case 2:
2018 			trace_state->entry->fields[field->offset].as_u16 = (u16)val;
2019 			break;
2020 
2021 		case 4:
2022 			trace_state->entry->fields[field->offset].as_u32 = (u32)val;
2023 			break;
2024 
2025 		default:
2026 			trace_state->entry->fields[field->offset].as_u64 = val;
2027 			break;
2028 		}
2029 	}
2030  out:
2031 	return ret;
2032 }
2033 
2034 /**
2035  * synth_event_add_next_val - Add the next field's value to an open synth trace
2036  * @val: The value to set the next field to
2037  * @trace_state: A pointer to object tracking the piecewise trace state
2038  *
2039  * Set the value of the next field in an event that's been opened by
2040  * synth_event_trace_start().
2041  *
2042  * The val param should be the value cast to u64.  If the value points
2043  * to a string, the val param should be a char * cast to u64.
2044  *
2045  * This function assumes all the fields in an event are to be set one
2046  * after another - successive calls to this function are made, one for
2047  * each field, in the order of the fields in the event, until all
2048  * fields have been set.  If you'd rather set each field individually
2049  * without regard to ordering, synth_event_add_val() can be used
2050  * instead.
2051  *
2052  * Note however that synth_event_add_next_val() and
2053  * synth_event_add_val() can't be intermixed for a given event trace -
2054  * one or the other but not both can be used at the same time.
2055  *
2056  * Note also that synth_event_trace_end() must be called after all
2057  * values have been added for each event trace, regardless of whether
2058  * adding all field values succeeded or not.
2059  *
2060  * Return: 0 on success, err otherwise.
2061  */
synth_event_add_next_val(u64 val,struct synth_event_trace_state * trace_state)2062 int synth_event_add_next_val(u64 val,
2063 			     struct synth_event_trace_state *trace_state)
2064 {
2065 	return __synth_event_add_val(NULL, val, trace_state);
2066 }
2067 EXPORT_SYMBOL_GPL(synth_event_add_next_val);
2068 
2069 /**
2070  * synth_event_add_val - Add a named field's value to an open synth trace
2071  * @field_name: The name of the synthetic event field value to set
2072  * @val: The value to set the named field to
2073  * @trace_state: A pointer to object tracking the piecewise trace state
2074  *
2075  * Set the value of the named field in an event that's been opened by
2076  * synth_event_trace_start().
2077  *
2078  * The val param should be the value cast to u64.  If the value points
2079  * to a string, the val param should be a char * cast to u64.
2080  *
2081  * This function looks up the field name, and if found, sets the field
2082  * to the specified value.  This lookup makes this function more
2083  * expensive than synth_event_add_next_val(), so use that or the
2084  * none-piecewise synth_event_trace() instead if efficiency is more
2085  * important.
2086  *
2087  * Note however that synth_event_add_next_val() and
2088  * synth_event_add_val() can't be intermixed for a given event trace -
2089  * one or the other but not both can be used at the same time.
2090  *
2091  * Note also that synth_event_trace_end() must be called after all
2092  * values have been added for each event trace, regardless of whether
2093  * adding all field values succeeded or not.
2094  *
2095  * Return: 0 on success, err otherwise.
2096  */
synth_event_add_val(const char * field_name,u64 val,struct synth_event_trace_state * trace_state)2097 int synth_event_add_val(const char *field_name, u64 val,
2098 			struct synth_event_trace_state *trace_state)
2099 {
2100 	return __synth_event_add_val(field_name, val, trace_state);
2101 }
2102 EXPORT_SYMBOL_GPL(synth_event_add_val);
2103 
2104 /**
2105  * synth_event_trace_end - End piecewise synthetic event trace
2106  * @trace_state: A pointer to object tracking the piecewise trace state
2107  *
2108  * End the trace of a synthetic event opened by
2109  * synth_event_trace__start().
2110  *
2111  * This function 'closes' an event trace, which basically means that
2112  * it commits the reserved event and cleans up other loose ends.
2113  *
2114  * A pointer to a trace_state object is passed in, which will keep
2115  * track of the current event trace state opened with
2116  * synth_event_trace_start().
2117  *
2118  * Note that this function must be called after all values have been
2119  * added for each event trace, regardless of whether adding all field
2120  * values succeeded or not.
2121  *
2122  * Return: 0 on success, err otherwise.
2123  */
synth_event_trace_end(struct synth_event_trace_state * trace_state)2124 int synth_event_trace_end(struct synth_event_trace_state *trace_state)
2125 {
2126 	if (!trace_state)
2127 		return -EINVAL;
2128 
2129 	__synth_event_trace_end(trace_state);
2130 
2131 	return 0;
2132 }
2133 EXPORT_SYMBOL_GPL(synth_event_trace_end);
2134 
create_synth_event(const char * raw_command)2135 static int create_synth_event(const char *raw_command)
2136 {
2137 	char *fields, *p;
2138 	const char *name;
2139 	int len, ret = 0;
2140 
2141 	raw_command = skip_spaces(raw_command);
2142 	if (raw_command[0] == '\0')
2143 		return ret;
2144 
2145 	last_cmd_set(raw_command);
2146 
2147 	name = raw_command;
2148 
2149 	/* Don't try to process if not our system */
2150 	if (name[0] != 's' || name[1] != ':')
2151 		return -ECANCELED;
2152 	name += 2;
2153 
2154 	p = strpbrk(raw_command, " \t");
2155 	if (!p) {
2156 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
2157 		return -EINVAL;
2158 	}
2159 
2160 	fields = skip_spaces(p);
2161 
2162 	/* This interface accepts group name prefix */
2163 	if (strchr(name, '/')) {
2164 		len = str_has_prefix(name, SYNTH_SYSTEM "/");
2165 		if (len == 0) {
2166 			synth_err(SYNTH_ERR_INVALID_DYN_CMD, 0);
2167 			return -EINVAL;
2168 		}
2169 		name += len;
2170 	}
2171 
2172 	len = name - raw_command;
2173 
2174 	ret = check_command(raw_command + len);
2175 	if (ret) {
2176 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
2177 		return ret;
2178 	}
2179 
2180 	name = kmemdup_nul(raw_command + len, p - raw_command - len, GFP_KERNEL);
2181 	if (!name)
2182 		return -ENOMEM;
2183 
2184 	ret = __create_synth_event(name, fields);
2185 
2186 	kfree(name);
2187 
2188 	return ret;
2189 }
2190 
synth_event_release(struct dyn_event * ev)2191 static int synth_event_release(struct dyn_event *ev)
2192 {
2193 	struct synth_event *event = to_synth_event(ev);
2194 	int ret;
2195 
2196 	if (event->ref)
2197 		return -EBUSY;
2198 
2199 	if (trace_event_dyn_busy(&event->call))
2200 		return -EBUSY;
2201 
2202 	ret = unregister_synth_event(event);
2203 	if (ret)
2204 		return ret;
2205 
2206 	dyn_event_remove(ev);
2207 	free_synth_event(event);
2208 	return 0;
2209 }
2210 
__synth_event_show(struct seq_file * m,struct synth_event * event)2211 static int __synth_event_show(struct seq_file *m, struct synth_event *event)
2212 {
2213 	struct synth_field *field;
2214 	unsigned int i;
2215 	char *type, *t;
2216 
2217 	seq_printf(m, "%s\t", event->name);
2218 
2219 	for (i = 0; i < event->n_fields; i++) {
2220 		field = event->fields[i];
2221 
2222 		type = field->type;
2223 		t = strstr(type, "__data_loc");
2224 		if (t) { /* __data_loc belongs in format but not event desc */
2225 			t += sizeof("__data_loc");
2226 			type = t;
2227 		}
2228 
2229 		/* parameter values */
2230 		seq_printf(m, "%s %s%s", type, field->name,
2231 			   i == event->n_fields - 1 ? "" : "; ");
2232 	}
2233 
2234 	seq_putc(m, '\n');
2235 
2236 	return 0;
2237 }
2238 
synth_event_show(struct seq_file * m,struct dyn_event * ev)2239 static int synth_event_show(struct seq_file *m, struct dyn_event *ev)
2240 {
2241 	struct synth_event *event = to_synth_event(ev);
2242 
2243 	seq_printf(m, "s:%s/", event->class.system);
2244 
2245 	return __synth_event_show(m, event);
2246 }
2247 
synth_events_seq_show(struct seq_file * m,void * v)2248 static int synth_events_seq_show(struct seq_file *m, void *v)
2249 {
2250 	struct dyn_event *ev = v;
2251 
2252 	if (!is_synth_event(ev))
2253 		return 0;
2254 
2255 	return __synth_event_show(m, to_synth_event(ev));
2256 }
2257 
2258 static const struct seq_operations synth_events_seq_op = {
2259 	.start	= dyn_event_seq_start,
2260 	.next	= dyn_event_seq_next,
2261 	.stop	= dyn_event_seq_stop,
2262 	.show	= synth_events_seq_show,
2263 };
2264 
synth_events_open(struct inode * inode,struct file * file)2265 static int synth_events_open(struct inode *inode, struct file *file)
2266 {
2267 	int ret;
2268 
2269 	ret = security_locked_down(LOCKDOWN_TRACEFS);
2270 	if (ret)
2271 		return ret;
2272 
2273 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
2274 		ret = dyn_events_release_all(&synth_event_ops);
2275 		if (ret < 0)
2276 			return ret;
2277 	}
2278 
2279 	return seq_open(file, &synth_events_seq_op);
2280 }
2281 
synth_events_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)2282 static ssize_t synth_events_write(struct file *file,
2283 				  const char __user *buffer,
2284 				  size_t count, loff_t *ppos)
2285 {
2286 	return trace_parse_run_command(file, buffer, count, ppos,
2287 				       create_or_delete_synth_event);
2288 }
2289 
2290 static const struct file_operations synth_events_fops = {
2291 	.open           = synth_events_open,
2292 	.write		= synth_events_write,
2293 	.read           = seq_read,
2294 	.llseek         = seq_lseek,
2295 	.release        = seq_release,
2296 };
2297 
2298 /*
2299  * Register dynevent at core_initcall. This allows kernel to setup kprobe
2300  * events in postcore_initcall without tracefs.
2301  */
trace_events_synth_init_early(void)2302 static __init int trace_events_synth_init_early(void)
2303 {
2304 	int err = 0;
2305 
2306 	err = dyn_event_register(&synth_event_ops);
2307 	if (err)
2308 		pr_warn("Could not register synth_event_ops\n");
2309 
2310 	return err;
2311 }
2312 core_initcall(trace_events_synth_init_early);
2313 
trace_events_synth_init(void)2314 static __init int trace_events_synth_init(void)
2315 {
2316 	struct dentry *entry = NULL;
2317 	int err = 0;
2318 	err = tracing_init_dentry();
2319 	if (err)
2320 		goto err;
2321 
2322 	entry = tracefs_create_file("synthetic_events", TRACE_MODE_WRITE,
2323 				    NULL, NULL, &synth_events_fops);
2324 	if (!entry) {
2325 		err = -ENODEV;
2326 		goto err;
2327 	}
2328 
2329 	return err;
2330  err:
2331 	pr_warn("Could not create tracefs 'synthetic_events' entry\n");
2332 
2333 	return err;
2334 }
2335 
2336 fs_initcall(trace_events_synth_init);
2337