xref: /linux/kernel/trace/trace_printk.c (revision 473e470f16f98569d59adc11c4a318780fb68fe9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * trace binary printk
4  *
5  * Copyright (C) 2008 Lai Jiangshan <laijs@cn.fujitsu.com>
6  *
7  */
8 #include <linux/seq_file.h>
9 #include <linux/security.h>
10 #include <linux/uaccess.h>
11 #include <linux/kernel.h>
12 #include <linux/ftrace.h>
13 #include <linux/string.h>
14 #include <linux/module.h>
15 #include <linux/mutex.h>
16 #include <linux/ctype.h>
17 #include <linux/list.h>
18 #include <linux/slab.h>
19 
20 #include "trace.h"
21 
22 #ifdef CONFIG_MODULES
23 
24 /*
25  * modules trace_printk()'s formats are autosaved in struct trace_bprintk_fmt
26  * which are queued on trace_bprintk_fmt_list.
27  */
28 static LIST_HEAD(trace_bprintk_fmt_list);
29 
30 /* serialize accesses to trace_bprintk_fmt_list */
31 static DEFINE_MUTEX(btrace_mutex);
32 
33 struct trace_bprintk_fmt {
34 	struct list_head list;
35 	const char *fmt;
36 };
37 
38 static inline struct trace_bprintk_fmt *lookup_format(const char *fmt)
39 {
40 	struct trace_bprintk_fmt *pos;
41 
42 	if (!fmt)
43 		return ERR_PTR(-EINVAL);
44 
45 	list_for_each_entry(pos, &trace_bprintk_fmt_list, list) {
46 		if (!strcmp(pos->fmt, fmt))
47 			return pos;
48 	}
49 	return NULL;
50 }
51 
52 static
53 void hold_module_trace_bprintk_format(const char **start, const char **end)
54 {
55 	const char **iter;
56 	char *fmt;
57 
58 	/* allocate the trace_printk per cpu buffers */
59 	if (start != end)
60 		trace_printk_init_buffers();
61 
62 	mutex_lock(&btrace_mutex);
63 	for (iter = start; iter < end; iter++) {
64 		struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter);
65 		if (tb_fmt) {
66 			if (!IS_ERR(tb_fmt))
67 				*iter = tb_fmt->fmt;
68 			continue;
69 		}
70 
71 		fmt = NULL;
72 		tb_fmt = kmalloc_obj(*tb_fmt);
73 		if (tb_fmt) {
74 			fmt = kmalloc(strlen(*iter) + 1, GFP_KERNEL);
75 			if (fmt) {
76 				list_add_tail(&tb_fmt->list, &trace_bprintk_fmt_list);
77 				strcpy(fmt, *iter);
78 				tb_fmt->fmt = fmt;
79 			} else
80 				kfree(tb_fmt);
81 		}
82 		*iter = fmt;
83 
84 	}
85 	mutex_unlock(&btrace_mutex);
86 }
87 
88 static int module_trace_bprintk_format_notify(struct notifier_block *self,
89 		unsigned long val, void *data)
90 {
91 	struct module *mod = data;
92 	if (mod->num_trace_bprintk_fmt) {
93 		const char **start = mod->trace_bprintk_fmt_start;
94 		const char **end = start + mod->num_trace_bprintk_fmt;
95 
96 		if (val == MODULE_STATE_COMING)
97 			hold_module_trace_bprintk_format(start, end);
98 	}
99 	return NOTIFY_OK;
100 }
101 
102 /*
103  * The debugfs/tracing/printk_formats file maps the addresses with
104  * the ASCII formats that are used in the bprintk events in the
105  * buffer. For userspace tools to be able to decode the events from
106  * the buffer, they need to be able to map the address with the format.
107  *
108  * The addresses of the bprintk formats are in their own section
109  * __trace_printk_fmt. But for modules we copy them into a link list.
110  * The code to print the formats and their addresses passes around the
111  * address of the fmt string. If the fmt address passed into the seq
112  * functions is within the kernel core __trace_printk_fmt section, then
113  * it simply uses the next pointer in the list.
114  *
115  * When the fmt pointer is outside the kernel core __trace_printk_fmt
116  * section, then we need to read the link list pointers. The trick is
117  * we pass the address of the string to the seq function just like
118  * we do for the kernel core formats. To get back the structure that
119  * holds the format, we simply use container_of() and then go to the
120  * next format in the list.
121  */
122 static const char **
123 find_next_mod_format(int start_index, void *v, const char **fmt, loff_t *pos)
124 {
125 	struct trace_bprintk_fmt *mod_fmt;
126 
127 	if (list_empty(&trace_bprintk_fmt_list))
128 		return NULL;
129 
130 	/*
131 	 * v will point to the address of the fmt record from t_next
132 	 * v will be NULL from t_start.
133 	 * If this is the first pointer or called from start
134 	 * then we need to walk the list.
135 	 */
136 	if (!v || start_index == *pos) {
137 		struct trace_bprintk_fmt *p;
138 
139 		/* search the module list */
140 		list_for_each_entry(p, &trace_bprintk_fmt_list, list) {
141 			if (start_index == *pos)
142 				return &p->fmt;
143 			start_index++;
144 		}
145 		/* pos > index */
146 		return NULL;
147 	}
148 
149 	/*
150 	 * v points to the address of the fmt field in the mod list
151 	 * structure that holds the module print format.
152 	 */
153 	mod_fmt = container_of(v, typeof(*mod_fmt), fmt);
154 	if (mod_fmt->list.next == &trace_bprintk_fmt_list)
155 		return NULL;
156 
157 	mod_fmt = container_of(mod_fmt->list.next, typeof(*mod_fmt), list);
158 
159 	return &mod_fmt->fmt;
160 }
161 
162 static void format_mod_start(void)
163 {
164 	mutex_lock(&btrace_mutex);
165 }
166 
167 static void format_mod_stop(void)
168 {
169 	mutex_unlock(&btrace_mutex);
170 }
171 
172 #else /* !CONFIG_MODULES */
173 __init static int
174 module_trace_bprintk_format_notify(struct notifier_block *self,
175 		unsigned long val, void *data)
176 {
177 	return NOTIFY_OK;
178 }
179 static inline const char **
180 find_next_mod_format(int start_index, void *v, const char **fmt, loff_t *pos)
181 {
182 	return NULL;
183 }
184 static inline void format_mod_start(void) { }
185 static inline void format_mod_stop(void) { }
186 #endif /* CONFIG_MODULES */
187 
188 static bool __read_mostly trace_printk_enabled = true;
189 
190 void trace_printk_control(bool enabled)
191 {
192 	trace_printk_enabled = enabled;
193 }
194 
195 __initdata_or_module static
196 struct notifier_block module_trace_bprintk_format_nb = {
197 	.notifier_call = module_trace_bprintk_format_notify,
198 };
199 
200 __printf(2, 3)
201 int __trace_bprintk(unsigned long ip, const char *fmt, ...)
202 {
203 	int ret;
204 	va_list ap;
205 
206 	if (unlikely(!fmt))
207 		return 0;
208 
209 	if (!trace_printk_enabled)
210 		return 0;
211 
212 	va_start(ap, fmt);
213 	ret = trace_vbprintk(ip, fmt, ap);
214 	va_end(ap);
215 	return ret;
216 }
217 EXPORT_SYMBOL_GPL(__trace_bprintk);
218 
219 int __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap)
220 {
221 	if (unlikely(!fmt))
222 		return 0;
223 
224 	if (!trace_printk_enabled)
225 		return 0;
226 
227 	return trace_vbprintk(ip, fmt, ap);
228 }
229 EXPORT_SYMBOL_GPL(__ftrace_vbprintk);
230 
231 int __trace_printk(unsigned long ip, const char *fmt, ...)
232 {
233 	int ret;
234 	va_list ap;
235 
236 	if (!trace_printk_enabled)
237 		return 0;
238 
239 	va_start(ap, fmt);
240 	ret = trace_vprintk(ip, fmt, ap);
241 	va_end(ap);
242 	return ret;
243 }
244 EXPORT_SYMBOL_GPL(__trace_printk);
245 
246 int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap)
247 {
248 	if (!trace_printk_enabled)
249 		return 0;
250 
251 	return trace_vprintk(ip, fmt, ap);
252 }
253 EXPORT_SYMBOL_GPL(__ftrace_vprintk);
254 
255 bool trace_is_tracepoint_string(const char *str)
256 {
257 	const char **ptr = __start___tracepoint_str;
258 
259 	for (ptr = __start___tracepoint_str; ptr < __stop___tracepoint_str; ptr++) {
260 		if (str == *ptr)
261 			return true;
262 	}
263 	return false;
264 }
265 
266 static const char **find_next(void *v, loff_t *pos)
267 {
268 	const char **fmt = v;
269 	int start_index;
270 	int last_index;
271 
272 	start_index = __stop___trace_bprintk_fmt - __start___trace_bprintk_fmt;
273 
274 	if (*pos < start_index)
275 		return __start___trace_bprintk_fmt + *pos;
276 
277 	/*
278 	 * The __tracepoint_str section is treated the same as the
279 	 * __trace_printk_fmt section. The difference is that the
280 	 * __trace_printk_fmt section should only be used by trace_printk()
281 	 * in a debugging environment, as if anything exists in that section
282 	 * the trace_prink() helper buffers are allocated, which would just
283 	 * waste space in a production environment.
284 	 *
285 	 * The __tracepoint_str sections on the other hand are used by
286 	 * tracepoints which need to map pointers to their strings to
287 	 * the ASCII text for userspace.
288 	 */
289 	last_index = start_index;
290 	start_index = __stop___tracepoint_str - __start___tracepoint_str;
291 
292 	if (*pos < last_index + start_index)
293 		return __start___tracepoint_str + (*pos - last_index);
294 
295 	start_index += last_index;
296 	return find_next_mod_format(start_index, v, fmt, pos);
297 }
298 
299 static void *
300 t_start(struct seq_file *m, loff_t *pos)
301 {
302 	format_mod_start();
303 	return find_next(NULL, pos);
304 }
305 
306 static void *t_next(struct seq_file *m, void * v, loff_t *pos)
307 {
308 	(*pos)++;
309 	return find_next(v, pos);
310 }
311 
312 static int t_show(struct seq_file *m, void *v)
313 {
314 	const char **fmt = v;
315 	const char *str = *fmt;
316 	int i;
317 
318 	if (!*fmt)
319 		return 0;
320 
321 	seq_printf(m, "0x%lx : \"", *(unsigned long *)fmt);
322 
323 	/*
324 	 * Tabs and new lines need to be converted.
325 	 */
326 	for (i = 0; str[i]; i++) {
327 		switch (str[i]) {
328 		case '\n':
329 			seq_puts(m, "\\n");
330 			break;
331 		case '\t':
332 			seq_puts(m, "\\t");
333 			break;
334 		case '\\':
335 			seq_putc(m, '\\');
336 			break;
337 		case '"':
338 			seq_puts(m, "\\\"");
339 			break;
340 		default:
341 			seq_putc(m, str[i]);
342 		}
343 	}
344 	seq_puts(m, "\"\n");
345 
346 	return 0;
347 }
348 
349 static void t_stop(struct seq_file *m, void *p)
350 {
351 	format_mod_stop();
352 }
353 
354 static const struct seq_operations show_format_seq_ops = {
355 	.start = t_start,
356 	.next = t_next,
357 	.show = t_show,
358 	.stop = t_stop,
359 };
360 
361 static int
362 ftrace_formats_open(struct inode *inode, struct file *file)
363 {
364 	int ret;
365 
366 	ret = security_locked_down(LOCKDOWN_TRACEFS);
367 	if (ret)
368 		return ret;
369 
370 	return seq_open(file, &show_format_seq_ops);
371 }
372 
373 static const struct file_operations ftrace_formats_fops = {
374 	.open = ftrace_formats_open,
375 	.read = seq_read,
376 	.llseek = seq_lseek,
377 	.release = seq_release,
378 };
379 
380 static __always_inline bool printk_binsafe(struct trace_array *tr)
381 {
382 	/*
383 	 * The binary format of traceprintk can cause a crash if used
384 	 * by a buffer from another boot. Force the use of the
385 	 * non binary version of trace_printk if the trace_printk
386 	 * buffer is a boot mapped ring buffer.
387 	 */
388 	return !(tr->flags & TRACE_ARRAY_FL_BOOT);
389 }
390 
391 int __trace_array_puts(struct trace_array *tr, unsigned long ip,
392 		       const char *str, int size)
393 {
394 	struct ring_buffer_event *event;
395 	struct trace_buffer *buffer;
396 	struct print_entry *entry;
397 	unsigned int trace_ctx;
398 	int alloc;
399 
400 	if (!(tr->trace_flags & TRACE_ITER(PRINTK)))
401 		return 0;
402 
403 	if (unlikely(tracing_selftest_running &&
404 		     (tr->flags & TRACE_ARRAY_FL_GLOBAL)))
405 		return 0;
406 
407 	if (unlikely(tracing_disabled))
408 		return 0;
409 
410 	alloc = sizeof(*entry) + size + 2; /* possible \n added */
411 
412 	trace_ctx = tracing_gen_ctx();
413 	buffer = tr->array_buffer.buffer;
414 	guard(ring_buffer_nest)(buffer);
415 	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
416 					    trace_ctx);
417 	if (!event)
418 		return 0;
419 
420 	entry = ring_buffer_event_data(event);
421 	entry->ip = ip;
422 
423 	memcpy(&entry->buf, str, size);
424 
425 	/* Add a newline if necessary */
426 	if (entry->buf[size - 1] != '\n') {
427 		entry->buf[size] = '\n';
428 		entry->buf[size + 1] = '\0';
429 	} else
430 		entry->buf[size] = '\0';
431 
432 	__buffer_unlock_commit(buffer, event);
433 	ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
434 	return size;
435 }
436 EXPORT_SYMBOL_GPL(__trace_array_puts);
437 
438 /**
439  * __trace_puts - write a constant string into the trace buffer.
440  * @ip:	   The address of the caller
441  * @str:   The constant string to write
442  */
443 int __trace_puts(unsigned long ip, const char *str)
444 {
445 	return __trace_array_puts(printk_trace, ip, str, strlen(str));
446 }
447 EXPORT_SYMBOL_GPL(__trace_puts);
448 
449 /**
450  * __trace_bputs - write the pointer to a constant string into trace buffer
451  * @ip:	   The address of the caller
452  * @str:   The constant string to write to the buffer to
453  */
454 int __trace_bputs(unsigned long ip, const char *str)
455 {
456 	struct trace_array *tr = READ_ONCE(printk_trace);
457 	struct ring_buffer_event *event;
458 	struct trace_buffer *buffer;
459 	struct bputs_entry *entry;
460 	unsigned int trace_ctx;
461 	int size = sizeof(struct bputs_entry);
462 
463 	if (!printk_binsafe(tr))
464 		return __trace_puts(ip, str);
465 
466 	if (!(tr->trace_flags & TRACE_ITER(PRINTK)))
467 		return 0;
468 
469 	if (unlikely(tracing_selftest_running || tracing_disabled))
470 		return 0;
471 
472 	trace_ctx = tracing_gen_ctx();
473 	buffer = tr->array_buffer.buffer;
474 
475 	guard(ring_buffer_nest)(buffer);
476 	event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
477 					    trace_ctx);
478 	if (!event)
479 		return 0;
480 
481 	entry = ring_buffer_event_data(event);
482 	entry->ip			= ip;
483 	entry->str			= str;
484 
485 	__buffer_unlock_commit(buffer, event);
486 	ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
487 
488 	return 1;
489 }
490 EXPORT_SYMBOL_GPL(__trace_bputs);
491 
492 /* created for use with alloc_percpu */
493 struct trace_buffer_struct {
494 	int nesting;
495 	char buffer[4][TRACE_BUF_SIZE];
496 };
497 
498 static struct trace_buffer_struct __percpu *trace_percpu_buffer;
499 
500 /*
501  * This allows for lockless recording.  If we're nested too deeply, then
502  * this returns NULL.
503  */
504 static char *get_trace_buf(void)
505 {
506 	struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
507 
508 	if (!trace_percpu_buffer || buffer->nesting >= 4)
509 		return NULL;
510 
511 	buffer->nesting++;
512 
513 	/* Interrupts must see nesting incremented before we use the buffer */
514 	barrier();
515 	return &buffer->buffer[buffer->nesting - 1][0];
516 }
517 
518 static void put_trace_buf(void)
519 {
520 	/* Don't let the decrement of nesting leak before this */
521 	barrier();
522 	this_cpu_dec(trace_percpu_buffer->nesting);
523 }
524 
525 static int alloc_percpu_trace_buffer(void)
526 {
527 	struct trace_buffer_struct __percpu *buffers;
528 
529 	if (trace_percpu_buffer)
530 		return 0;
531 
532 	buffers = alloc_percpu(struct trace_buffer_struct);
533 	if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
534 		return -ENOMEM;
535 
536 	trace_percpu_buffer = buffers;
537 	return 0;
538 }
539 
540 static int buffers_allocated;
541 
542 void trace_printk_init_buffers(void)
543 {
544 	if (buffers_allocated)
545 		return;
546 
547 	if (alloc_percpu_trace_buffer())
548 		return;
549 
550 	/* trace_printk() is for debug use only. Don't use it in production. */
551 
552 	pr_warn("\n");
553 	pr_warn("**********************************************************\n");
554 	pr_warn("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
555 	pr_warn("**                                                      **\n");
556 	pr_warn("** trace_printk() being used. Allocating extra memory.  **\n");
557 	pr_warn("**                                                      **\n");
558 	pr_warn("** This means that this is a DEBUG kernel and it is     **\n");
559 	pr_warn("** unsafe for production use.                           **\n");
560 	pr_warn("**                                                      **\n");
561 	pr_warn("** If you see this message and you are not debugging    **\n");
562 	pr_warn("** the kernel, report this immediately to your vendor!  **\n");
563 	pr_warn("**                                                      **\n");
564 	pr_warn("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
565 	pr_warn("**********************************************************\n");
566 
567 	/* Expand the buffers to set size */
568 	if (tracing_update_buffers(NULL) < 0)
569 		pr_err("Failed to expand tracing buffers for trace_printk() calls\n");
570 	else
571 		buffers_allocated = 1;
572 
573 	/*
574 	 * trace_printk_init_buffers() can be called by modules.
575 	 * If that happens, then we need to start cmdline recording
576 	 * directly here.
577 	 */
578 	if (system_state == SYSTEM_RUNNING)
579 		tracing_start_cmdline_record();
580 }
581 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
582 
583 void trace_printk_start_comm(void)
584 {
585 	/* Start tracing comms if trace printk is set */
586 	if (!buffers_allocated)
587 		return;
588 	tracing_start_cmdline_record();
589 }
590 
591 void trace_printk_start_stop_comm(int enabled)
592 {
593 	if (!buffers_allocated)
594 		return;
595 
596 	if (enabled)
597 		tracing_start_cmdline_record();
598 	else
599 		tracing_stop_cmdline_record();
600 }
601 
602 /**
603  * trace_vbprintk - write binary msg to tracing buffer
604  * @ip:    The address of the caller
605  * @fmt:   The string format to write to the buffer
606  * @args:  Arguments for @fmt
607  */
608 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
609 {
610 	struct ring_buffer_event *event;
611 	struct trace_buffer *buffer;
612 	struct trace_array *tr = READ_ONCE(printk_trace);
613 	struct bprint_entry *entry;
614 	unsigned int trace_ctx;
615 	char *tbuffer;
616 	int len = 0, size;
617 
618 	if (!printk_binsafe(tr))
619 		return trace_vprintk(ip, fmt, args);
620 
621 	if (unlikely(tracing_selftest_running || tracing_disabled))
622 		return 0;
623 
624 	/* Don't pollute graph traces with trace_vprintk internals */
625 	pause_graph_tracing();
626 
627 	trace_ctx = tracing_gen_ctx();
628 	guard(preempt_notrace)();
629 
630 	tbuffer = get_trace_buf();
631 	if (!tbuffer) {
632 		len = 0;
633 		goto out_nobuffer;
634 	}
635 
636 	len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
637 
638 	if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
639 		goto out_put;
640 
641 	size = sizeof(*entry) + sizeof(u32) * len;
642 	buffer = tr->array_buffer.buffer;
643 	scoped_guard(ring_buffer_nest, buffer) {
644 		event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
645 						    trace_ctx);
646 		if (!event)
647 			goto out_put;
648 		entry = ring_buffer_event_data(event);
649 		entry->ip			= ip;
650 		entry->fmt			= fmt;
651 
652 		memcpy(entry->buf, tbuffer, sizeof(u32) * len);
653 		__buffer_unlock_commit(buffer, event);
654 		ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
655 	}
656 out_put:
657 	put_trace_buf();
658 
659 out_nobuffer:
660 	unpause_graph_tracing();
661 
662 	return len;
663 }
664 EXPORT_SYMBOL_GPL(trace_vbprintk);
665 
666 static __printf(3, 0)
667 int __trace_array_vprintk(struct trace_buffer *buffer,
668 			  unsigned long ip, const char *fmt, va_list args)
669 {
670 	struct ring_buffer_event *event;
671 	int len = 0, size;
672 	struct print_entry *entry;
673 	unsigned int trace_ctx;
674 	char *tbuffer;
675 
676 	if (unlikely(tracing_disabled))
677 		return 0;
678 
679 	/* Don't pollute graph traces with trace_vprintk internals */
680 	pause_graph_tracing();
681 
682 	trace_ctx = tracing_gen_ctx();
683 	guard(preempt_notrace)();
684 
685 
686 	tbuffer = get_trace_buf();
687 	if (!tbuffer) {
688 		len = 0;
689 		goto out_nobuffer;
690 	}
691 
692 	len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
693 
694 	size = sizeof(*entry) + len + 1;
695 	scoped_guard(ring_buffer_nest, buffer) {
696 		event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
697 						    trace_ctx);
698 		if (!event)
699 			goto out;
700 		entry = ring_buffer_event_data(event);
701 		entry->ip = ip;
702 
703 		memcpy(&entry->buf, tbuffer, len + 1);
704 		__buffer_unlock_commit(buffer, event);
705 		ftrace_trace_stack(printk_trace, buffer, trace_ctx, 6, NULL);
706 	}
707 out:
708 	put_trace_buf();
709 
710 out_nobuffer:
711 	unpause_graph_tracing();
712 
713 	return len;
714 }
715 
716 int trace_array_vprintk(struct trace_array *tr,
717 			unsigned long ip, const char *fmt, va_list args)
718 {
719 	if (tracing_selftest_running && (tr->flags & TRACE_ARRAY_FL_GLOBAL))
720 		return 0;
721 
722 	return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
723 }
724 
725 /**
726  * trace_array_printk - Print a message to a specific instance
727  * @tr: The instance trace_array descriptor
728  * @ip: The instruction pointer that this is called from.
729  * @fmt: The format to print (printf format)
730  *
731  * If a subsystem sets up its own instance, they have the right to
732  * printk strings into their tracing instance buffer using this
733  * function. Note, this function will not write into the top level
734  * buffer (use trace_printk() for that), as writing into the top level
735  * buffer should only have events that can be individually disabled.
736  * trace_printk() is only used for debugging a kernel, and should not
737  * be ever incorporated in normal use.
738  *
739  * trace_array_printk() can be used, as it will not add noise to the
740  * top level tracing buffer.
741  *
742  * Note, trace_array_init_printk() must be called on @tr before this
743  * can be used.
744  */
745 int trace_array_printk(struct trace_array *tr,
746 		       unsigned long ip, const char *fmt, ...)
747 {
748 	int ret;
749 	va_list ap;
750 
751 	if (!tr)
752 		return -ENOENT;
753 
754 	/* This is only allowed for created instances */
755 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
756 		return 0;
757 
758 	if (!(tr->trace_flags & TRACE_ITER(PRINTK)))
759 		return 0;
760 
761 	va_start(ap, fmt);
762 	ret = trace_array_vprintk(tr, ip, fmt, ap);
763 	va_end(ap);
764 	return ret;
765 }
766 EXPORT_SYMBOL_GPL(trace_array_printk);
767 
768 /**
769  * trace_array_init_printk - Initialize buffers for trace_array_printk()
770  * @tr: The trace array to initialize the buffers for
771  *
772  * As trace_array_printk() only writes into instances, they are OK to
773  * have in the kernel (unlike trace_printk()). This needs to be called
774  * before trace_array_printk() can be used on a trace_array.
775  */
776 int trace_array_init_printk(struct trace_array *tr)
777 {
778 	if (!tr)
779 		return -ENOENT;
780 
781 	/* This is only allowed for created instances */
782 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
783 		return -EINVAL;
784 
785 	return alloc_percpu_trace_buffer();
786 }
787 EXPORT_SYMBOL_GPL(trace_array_init_printk);
788 
789 int trace_array_printk_buf(struct trace_buffer *buffer,
790 			   unsigned long ip, const char *fmt, ...)
791 {
792 	int ret;
793 	va_list ap;
794 
795 	if (!(printk_trace->trace_flags & TRACE_ITER(PRINTK)))
796 		return 0;
797 
798 	va_start(ap, fmt);
799 	ret = __trace_array_vprintk(buffer, ip, fmt, ap);
800 	va_end(ap);
801 	return ret;
802 }
803 
804 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
805 {
806 	return trace_array_vprintk(printk_trace, ip, fmt, args);
807 }
808 EXPORT_SYMBOL_GPL(trace_vprintk);
809 
810 static __init int init_trace_printk_function_export(void)
811 {
812 	int ret;
813 
814 	ret = tracing_init_dentry();
815 	if (ret)
816 		return 0;
817 
818 	trace_create_file("printk_formats", TRACE_MODE_READ, NULL,
819 				    NULL, &ftrace_formats_fops);
820 
821 	return 0;
822 }
823 
824 fs_initcall(init_trace_printk_function_export);
825 
826 static __init int init_trace_printk(void)
827 {
828 	return register_module_notifier(&module_trace_bprintk_format_nb);
829 }
830 
831 early_initcall(init_trace_printk);
832