xref: /linux/kernel/trace/trace_printk.c (revision 23b0f90ba871f096474e1c27c3d14f455189d2d9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * trace binary printk
4  *
5  * Copyright (C) 2008 Lai Jiangshan <laijs@cn.fujitsu.com>
6  *
7  */
8 #include <linux/seq_file.h>
9 #include <linux/security.h>
10 #include <linux/uaccess.h>
11 #include <linux/kernel.h>
12 #include <linux/ftrace.h>
13 #include <linux/string.h>
14 #include <linux/module.h>
15 #include <linux/mutex.h>
16 #include <linux/ctype.h>
17 #include <linux/list.h>
18 #include <linux/slab.h>
19 
20 #include "trace.h"
21 
22 #ifdef CONFIG_MODULES
23 
24 /*
25  * modules trace_printk()'s formats are autosaved in struct trace_bprintk_fmt
26  * which are queued on trace_bprintk_fmt_list.
27  */
28 static LIST_HEAD(trace_bprintk_fmt_list);
29 
30 /* serialize accesses to trace_bprintk_fmt_list */
31 static DEFINE_MUTEX(btrace_mutex);
32 
33 struct trace_bprintk_fmt {
34 	struct list_head list;
35 	const char *fmt;
36 };
37 
38 static inline struct trace_bprintk_fmt *lookup_format(const char *fmt)
39 {
40 	struct trace_bprintk_fmt *pos;
41 
42 	if (!fmt)
43 		return ERR_PTR(-EINVAL);
44 
45 	list_for_each_entry(pos, &trace_bprintk_fmt_list, list) {
46 		if (!strcmp(pos->fmt, fmt))
47 			return pos;
48 	}
49 	return NULL;
50 }
51 
52 static
53 void hold_module_trace_bprintk_format(const char **start, const char **end)
54 {
55 	const char **iter;
56 	char *fmt;
57 
58 	/* allocate the trace_printk per cpu buffers */
59 	if (start != end)
60 		trace_printk_init_buffers();
61 
62 	mutex_lock(&btrace_mutex);
63 	for (iter = start; iter < end; iter++) {
64 		struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter);
65 		if (tb_fmt) {
66 			if (!IS_ERR(tb_fmt))
67 				*iter = tb_fmt->fmt;
68 			continue;
69 		}
70 
71 		fmt = NULL;
72 		tb_fmt = kmalloc(sizeof(*tb_fmt), GFP_KERNEL);
73 		if (tb_fmt) {
74 			fmt = kmalloc(strlen(*iter) + 1, GFP_KERNEL);
75 			if (fmt) {
76 				list_add_tail(&tb_fmt->list, &trace_bprintk_fmt_list);
77 				strcpy(fmt, *iter);
78 				tb_fmt->fmt = fmt;
79 			} else
80 				kfree(tb_fmt);
81 		}
82 		*iter = fmt;
83 
84 	}
85 	mutex_unlock(&btrace_mutex);
86 }
87 
88 static int module_trace_bprintk_format_notify(struct notifier_block *self,
89 		unsigned long val, void *data)
90 {
91 	struct module *mod = data;
92 	if (mod->num_trace_bprintk_fmt) {
93 		const char **start = mod->trace_bprintk_fmt_start;
94 		const char **end = start + mod->num_trace_bprintk_fmt;
95 
96 		if (val == MODULE_STATE_COMING)
97 			hold_module_trace_bprintk_format(start, end);
98 	}
99 	return NOTIFY_OK;
100 }
101 
102 /*
103  * The debugfs/tracing/printk_formats file maps the addresses with
104  * the ASCII formats that are used in the bprintk events in the
105  * buffer. For userspace tools to be able to decode the events from
106  * the buffer, they need to be able to map the address with the format.
107  *
108  * The addresses of the bprintk formats are in their own section
109  * __trace_printk_fmt. But for modules we copy them into a link list.
110  * The code to print the formats and their addresses passes around the
111  * address of the fmt string. If the fmt address passed into the seq
112  * functions is within the kernel core __trace_printk_fmt section, then
113  * it simply uses the next pointer in the list.
114  *
115  * When the fmt pointer is outside the kernel core __trace_printk_fmt
116  * section, then we need to read the link list pointers. The trick is
117  * we pass the address of the string to the seq function just like
118  * we do for the kernel core formats. To get back the structure that
119  * holds the format, we simply use container_of() and then go to the
120  * next format in the list.
121  */
122 static const char **
123 find_next_mod_format(int start_index, void *v, const char **fmt, loff_t *pos)
124 {
125 	struct trace_bprintk_fmt *mod_fmt;
126 
127 	if (list_empty(&trace_bprintk_fmt_list))
128 		return NULL;
129 
130 	/*
131 	 * v will point to the address of the fmt record from t_next
132 	 * v will be NULL from t_start.
133 	 * If this is the first pointer or called from start
134 	 * then we need to walk the list.
135 	 */
136 	if (!v || start_index == *pos) {
137 		struct trace_bprintk_fmt *p;
138 
139 		/* search the module list */
140 		list_for_each_entry(p, &trace_bprintk_fmt_list, list) {
141 			if (start_index == *pos)
142 				return &p->fmt;
143 			start_index++;
144 		}
145 		/* pos > index */
146 		return NULL;
147 	}
148 
149 	/*
150 	 * v points to the address of the fmt field in the mod list
151 	 * structure that holds the module print format.
152 	 */
153 	mod_fmt = container_of(v, typeof(*mod_fmt), fmt);
154 	if (mod_fmt->list.next == &trace_bprintk_fmt_list)
155 		return NULL;
156 
157 	mod_fmt = container_of(mod_fmt->list.next, typeof(*mod_fmt), list);
158 
159 	return &mod_fmt->fmt;
160 }
161 
162 static void format_mod_start(void)
163 {
164 	mutex_lock(&btrace_mutex);
165 }
166 
167 static void format_mod_stop(void)
168 {
169 	mutex_unlock(&btrace_mutex);
170 }
171 
172 #else /* !CONFIG_MODULES */
173 __init static int
174 module_trace_bprintk_format_notify(struct notifier_block *self,
175 		unsigned long val, void *data)
176 {
177 	return NOTIFY_OK;
178 }
179 static inline const char **
180 find_next_mod_format(int start_index, void *v, const char **fmt, loff_t *pos)
181 {
182 	return NULL;
183 }
184 static inline void format_mod_start(void) { }
185 static inline void format_mod_stop(void) { }
186 #endif /* CONFIG_MODULES */
187 
188 static bool __read_mostly trace_printk_enabled = true;
189 
190 void trace_printk_control(bool enabled)
191 {
192 	trace_printk_enabled = enabled;
193 }
194 
195 __initdata_or_module static
196 struct notifier_block module_trace_bprintk_format_nb = {
197 	.notifier_call = module_trace_bprintk_format_notify,
198 };
199 
200 int __trace_bprintk(unsigned long ip, const char *fmt, ...)
201 {
202 	int ret;
203 	va_list ap;
204 
205 	if (unlikely(!fmt))
206 		return 0;
207 
208 	if (!trace_printk_enabled)
209 		return 0;
210 
211 	va_start(ap, fmt);
212 	ret = trace_vbprintk(ip, fmt, ap);
213 	va_end(ap);
214 	return ret;
215 }
216 EXPORT_SYMBOL_GPL(__trace_bprintk);
217 
218 int __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap)
219 {
220 	if (unlikely(!fmt))
221 		return 0;
222 
223 	if (!trace_printk_enabled)
224 		return 0;
225 
226 	return trace_vbprintk(ip, fmt, ap);
227 }
228 EXPORT_SYMBOL_GPL(__ftrace_vbprintk);
229 
230 int __trace_printk(unsigned long ip, const char *fmt, ...)
231 {
232 	int ret;
233 	va_list ap;
234 
235 	if (!trace_printk_enabled)
236 		return 0;
237 
238 	va_start(ap, fmt);
239 	ret = trace_vprintk(ip, fmt, ap);
240 	va_end(ap);
241 	return ret;
242 }
243 EXPORT_SYMBOL_GPL(__trace_printk);
244 
245 int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap)
246 {
247 	if (!trace_printk_enabled)
248 		return 0;
249 
250 	return trace_vprintk(ip, fmt, ap);
251 }
252 EXPORT_SYMBOL_GPL(__ftrace_vprintk);
253 
254 bool trace_is_tracepoint_string(const char *str)
255 {
256 	const char **ptr = __start___tracepoint_str;
257 
258 	for (ptr = __start___tracepoint_str; ptr < __stop___tracepoint_str; ptr++) {
259 		if (str == *ptr)
260 			return true;
261 	}
262 	return false;
263 }
264 
265 static const char **find_next(void *v, loff_t *pos)
266 {
267 	const char **fmt = v;
268 	int start_index;
269 	int last_index;
270 
271 	start_index = __stop___trace_bprintk_fmt - __start___trace_bprintk_fmt;
272 
273 	if (*pos < start_index)
274 		return __start___trace_bprintk_fmt + *pos;
275 
276 	/*
277 	 * The __tracepoint_str section is treated the same as the
278 	 * __trace_printk_fmt section. The difference is that the
279 	 * __trace_printk_fmt section should only be used by trace_printk()
280 	 * in a debugging environment, as if anything exists in that section
281 	 * the trace_prink() helper buffers are allocated, which would just
282 	 * waste space in a production environment.
283 	 *
284 	 * The __tracepoint_str sections on the other hand are used by
285 	 * tracepoints which need to map pointers to their strings to
286 	 * the ASCII text for userspace.
287 	 */
288 	last_index = start_index;
289 	start_index = __stop___tracepoint_str - __start___tracepoint_str;
290 
291 	if (*pos < last_index + start_index)
292 		return __start___tracepoint_str + (*pos - last_index);
293 
294 	start_index += last_index;
295 	return find_next_mod_format(start_index, v, fmt, pos);
296 }
297 
298 static void *
299 t_start(struct seq_file *m, loff_t *pos)
300 {
301 	format_mod_start();
302 	return find_next(NULL, pos);
303 }
304 
305 static void *t_next(struct seq_file *m, void * v, loff_t *pos)
306 {
307 	(*pos)++;
308 	return find_next(v, pos);
309 }
310 
311 static int t_show(struct seq_file *m, void *v)
312 {
313 	const char **fmt = v;
314 	const char *str = *fmt;
315 	int i;
316 
317 	if (!*fmt)
318 		return 0;
319 
320 	seq_printf(m, "0x%lx : \"", *(unsigned long *)fmt);
321 
322 	/*
323 	 * Tabs and new lines need to be converted.
324 	 */
325 	for (i = 0; str[i]; i++) {
326 		switch (str[i]) {
327 		case '\n':
328 			seq_puts(m, "\\n");
329 			break;
330 		case '\t':
331 			seq_puts(m, "\\t");
332 			break;
333 		case '\\':
334 			seq_putc(m, '\\');
335 			break;
336 		case '"':
337 			seq_puts(m, "\\\"");
338 			break;
339 		default:
340 			seq_putc(m, str[i]);
341 		}
342 	}
343 	seq_puts(m, "\"\n");
344 
345 	return 0;
346 }
347 
348 static void t_stop(struct seq_file *m, void *p)
349 {
350 	format_mod_stop();
351 }
352 
353 static const struct seq_operations show_format_seq_ops = {
354 	.start = t_start,
355 	.next = t_next,
356 	.show = t_show,
357 	.stop = t_stop,
358 };
359 
360 static int
361 ftrace_formats_open(struct inode *inode, struct file *file)
362 {
363 	int ret;
364 
365 	ret = security_locked_down(LOCKDOWN_TRACEFS);
366 	if (ret)
367 		return ret;
368 
369 	return seq_open(file, &show_format_seq_ops);
370 }
371 
372 static const struct file_operations ftrace_formats_fops = {
373 	.open = ftrace_formats_open,
374 	.read = seq_read,
375 	.llseek = seq_lseek,
376 	.release = seq_release,
377 };
378 
379 static __always_inline bool printk_binsafe(struct trace_array *tr)
380 {
381 	/*
382 	 * The binary format of traceprintk can cause a crash if used
383 	 * by a buffer from another boot. Force the use of the
384 	 * non binary version of trace_printk if the trace_printk
385 	 * buffer is a boot mapped ring buffer.
386 	 */
387 	return !(tr->flags & TRACE_ARRAY_FL_BOOT);
388 }
389 
390 int __trace_array_puts(struct trace_array *tr, unsigned long ip,
391 		       const char *str, int size)
392 {
393 	struct ring_buffer_event *event;
394 	struct trace_buffer *buffer;
395 	struct print_entry *entry;
396 	unsigned int trace_ctx;
397 	int alloc;
398 
399 	if (!(tr->trace_flags & TRACE_ITER(PRINTK)))
400 		return 0;
401 
402 	if (unlikely(tracing_selftest_running &&
403 		     (tr->flags & TRACE_ARRAY_FL_GLOBAL)))
404 		return 0;
405 
406 	if (unlikely(tracing_disabled))
407 		return 0;
408 
409 	alloc = sizeof(*entry) + size + 2; /* possible \n added */
410 
411 	trace_ctx = tracing_gen_ctx();
412 	buffer = tr->array_buffer.buffer;
413 	guard(ring_buffer_nest)(buffer);
414 	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
415 					    trace_ctx);
416 	if (!event)
417 		return 0;
418 
419 	entry = ring_buffer_event_data(event);
420 	entry->ip = ip;
421 
422 	memcpy(&entry->buf, str, size);
423 
424 	/* Add a newline if necessary */
425 	if (entry->buf[size - 1] != '\n') {
426 		entry->buf[size] = '\n';
427 		entry->buf[size + 1] = '\0';
428 	} else
429 		entry->buf[size] = '\0';
430 
431 	__buffer_unlock_commit(buffer, event);
432 	ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
433 	return size;
434 }
435 EXPORT_SYMBOL_GPL(__trace_array_puts);
436 
437 /**
438  * __trace_puts - write a constant string into the trace buffer.
439  * @ip:	   The address of the caller
440  * @str:   The constant string to write
441  */
442 int __trace_puts(unsigned long ip, const char *str)
443 {
444 	return __trace_array_puts(printk_trace, ip, str, strlen(str));
445 }
446 EXPORT_SYMBOL_GPL(__trace_puts);
447 
448 /**
449  * __trace_bputs - write the pointer to a constant string into trace buffer
450  * @ip:	   The address of the caller
451  * @str:   The constant string to write to the buffer to
452  */
453 int __trace_bputs(unsigned long ip, const char *str)
454 {
455 	struct trace_array *tr = READ_ONCE(printk_trace);
456 	struct ring_buffer_event *event;
457 	struct trace_buffer *buffer;
458 	struct bputs_entry *entry;
459 	unsigned int trace_ctx;
460 	int size = sizeof(struct bputs_entry);
461 
462 	if (!printk_binsafe(tr))
463 		return __trace_puts(ip, str);
464 
465 	if (!(tr->trace_flags & TRACE_ITER(PRINTK)))
466 		return 0;
467 
468 	if (unlikely(tracing_selftest_running || tracing_disabled))
469 		return 0;
470 
471 	trace_ctx = tracing_gen_ctx();
472 	buffer = tr->array_buffer.buffer;
473 
474 	guard(ring_buffer_nest)(buffer);
475 	event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
476 					    trace_ctx);
477 	if (!event)
478 		return 0;
479 
480 	entry = ring_buffer_event_data(event);
481 	entry->ip			= ip;
482 	entry->str			= str;
483 
484 	__buffer_unlock_commit(buffer, event);
485 	ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
486 
487 	return 1;
488 }
489 EXPORT_SYMBOL_GPL(__trace_bputs);
490 
491 /* created for use with alloc_percpu */
492 struct trace_buffer_struct {
493 	int nesting;
494 	char buffer[4][TRACE_BUF_SIZE];
495 };
496 
497 static struct trace_buffer_struct __percpu *trace_percpu_buffer;
498 
499 /*
500  * This allows for lockless recording.  If we're nested too deeply, then
501  * this returns NULL.
502  */
503 static char *get_trace_buf(void)
504 {
505 	struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
506 
507 	if (!trace_percpu_buffer || buffer->nesting >= 4)
508 		return NULL;
509 
510 	buffer->nesting++;
511 
512 	/* Interrupts must see nesting incremented before we use the buffer */
513 	barrier();
514 	return &buffer->buffer[buffer->nesting - 1][0];
515 }
516 
517 static void put_trace_buf(void)
518 {
519 	/* Don't let the decrement of nesting leak before this */
520 	barrier();
521 	this_cpu_dec(trace_percpu_buffer->nesting);
522 }
523 
524 static int alloc_percpu_trace_buffer(void)
525 {
526 	struct trace_buffer_struct __percpu *buffers;
527 
528 	if (trace_percpu_buffer)
529 		return 0;
530 
531 	buffers = alloc_percpu(struct trace_buffer_struct);
532 	if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
533 		return -ENOMEM;
534 
535 	trace_percpu_buffer = buffers;
536 	return 0;
537 }
538 
539 static int buffers_allocated;
540 
541 void trace_printk_init_buffers(void)
542 {
543 	if (buffers_allocated)
544 		return;
545 
546 	if (alloc_percpu_trace_buffer())
547 		return;
548 
549 	/* trace_printk() is for debug use only. Don't use it in production. */
550 
551 	pr_warn("\n");
552 	pr_warn("**********************************************************\n");
553 	pr_warn("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
554 	pr_warn("**                                                      **\n");
555 	pr_warn("** trace_printk() being used. Allocating extra memory.  **\n");
556 	pr_warn("**                                                      **\n");
557 	pr_warn("** This means that this is a DEBUG kernel and it is     **\n");
558 	pr_warn("** unsafe for production use.                           **\n");
559 	pr_warn("**                                                      **\n");
560 	pr_warn("** If you see this message and you are not debugging    **\n");
561 	pr_warn("** the kernel, report this immediately to your vendor!  **\n");
562 	pr_warn("**                                                      **\n");
563 	pr_warn("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
564 	pr_warn("**********************************************************\n");
565 
566 	/* Expand the buffers to set size */
567 	if (tracing_update_buffers(NULL) < 0)
568 		pr_err("Failed to expand tracing buffers for trace_printk() calls\n");
569 	else
570 		buffers_allocated = 1;
571 
572 	/*
573 	 * trace_printk_init_buffers() can be called by modules.
574 	 * If that happens, then we need to start cmdline recording
575 	 * directly here.
576 	 */
577 	if (system_state == SYSTEM_RUNNING)
578 		tracing_start_cmdline_record();
579 }
580 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
581 
582 void trace_printk_start_comm(void)
583 {
584 	/* Start tracing comms if trace printk is set */
585 	if (!buffers_allocated)
586 		return;
587 	tracing_start_cmdline_record();
588 }
589 
590 void trace_printk_start_stop_comm(int enabled)
591 {
592 	if (!buffers_allocated)
593 		return;
594 
595 	if (enabled)
596 		tracing_start_cmdline_record();
597 	else
598 		tracing_stop_cmdline_record();
599 }
600 
601 /**
602  * trace_vbprintk - write binary msg to tracing buffer
603  * @ip:    The address of the caller
604  * @fmt:   The string format to write to the buffer
605  * @args:  Arguments for @fmt
606  */
607 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
608 {
609 	struct ring_buffer_event *event;
610 	struct trace_buffer *buffer;
611 	struct trace_array *tr = READ_ONCE(printk_trace);
612 	struct bprint_entry *entry;
613 	unsigned int trace_ctx;
614 	char *tbuffer;
615 	int len = 0, size;
616 
617 	if (!printk_binsafe(tr))
618 		return trace_vprintk(ip, fmt, args);
619 
620 	if (unlikely(tracing_selftest_running || tracing_disabled))
621 		return 0;
622 
623 	/* Don't pollute graph traces with trace_vprintk internals */
624 	pause_graph_tracing();
625 
626 	trace_ctx = tracing_gen_ctx();
627 	guard(preempt_notrace)();
628 
629 	tbuffer = get_trace_buf();
630 	if (!tbuffer) {
631 		len = 0;
632 		goto out_nobuffer;
633 	}
634 
635 	len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
636 
637 	if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
638 		goto out_put;
639 
640 	size = sizeof(*entry) + sizeof(u32) * len;
641 	buffer = tr->array_buffer.buffer;
642 	scoped_guard(ring_buffer_nest, buffer) {
643 		event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
644 						    trace_ctx);
645 		if (!event)
646 			goto out_put;
647 		entry = ring_buffer_event_data(event);
648 		entry->ip			= ip;
649 		entry->fmt			= fmt;
650 
651 		memcpy(entry->buf, tbuffer, sizeof(u32) * len);
652 		__buffer_unlock_commit(buffer, event);
653 		ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
654 	}
655 out_put:
656 	put_trace_buf();
657 
658 out_nobuffer:
659 	unpause_graph_tracing();
660 
661 	return len;
662 }
663 EXPORT_SYMBOL_GPL(trace_vbprintk);
664 
665 static __printf(3, 0)
666 int __trace_array_vprintk(struct trace_buffer *buffer,
667 			  unsigned long ip, const char *fmt, va_list args)
668 {
669 	struct ring_buffer_event *event;
670 	int len = 0, size;
671 	struct print_entry *entry;
672 	unsigned int trace_ctx;
673 	char *tbuffer;
674 
675 	if (unlikely(tracing_disabled))
676 		return 0;
677 
678 	/* Don't pollute graph traces with trace_vprintk internals */
679 	pause_graph_tracing();
680 
681 	trace_ctx = tracing_gen_ctx();
682 	guard(preempt_notrace)();
683 
684 
685 	tbuffer = get_trace_buf();
686 	if (!tbuffer) {
687 		len = 0;
688 		goto out_nobuffer;
689 	}
690 
691 	len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
692 
693 	size = sizeof(*entry) + len + 1;
694 	scoped_guard(ring_buffer_nest, buffer) {
695 		event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
696 						    trace_ctx);
697 		if (!event)
698 			goto out;
699 		entry = ring_buffer_event_data(event);
700 		entry->ip = ip;
701 
702 		memcpy(&entry->buf, tbuffer, len + 1);
703 		__buffer_unlock_commit(buffer, event);
704 		ftrace_trace_stack(printk_trace, buffer, trace_ctx, 6, NULL);
705 	}
706 out:
707 	put_trace_buf();
708 
709 out_nobuffer:
710 	unpause_graph_tracing();
711 
712 	return len;
713 }
714 
715 int trace_array_vprintk(struct trace_array *tr,
716 			unsigned long ip, const char *fmt, va_list args)
717 {
718 	if (tracing_selftest_running && (tr->flags & TRACE_ARRAY_FL_GLOBAL))
719 		return 0;
720 
721 	return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
722 }
723 
724 /**
725  * trace_array_printk - Print a message to a specific instance
726  * @tr: The instance trace_array descriptor
727  * @ip: The instruction pointer that this is called from.
728  * @fmt: The format to print (printf format)
729  *
730  * If a subsystem sets up its own instance, they have the right to
731  * printk strings into their tracing instance buffer using this
732  * function. Note, this function will not write into the top level
733  * buffer (use trace_printk() for that), as writing into the top level
734  * buffer should only have events that can be individually disabled.
735  * trace_printk() is only used for debugging a kernel, and should not
736  * be ever incorporated in normal use.
737  *
738  * trace_array_printk() can be used, as it will not add noise to the
739  * top level tracing buffer.
740  *
741  * Note, trace_array_init_printk() must be called on @tr before this
742  * can be used.
743  */
744 int trace_array_printk(struct trace_array *tr,
745 		       unsigned long ip, const char *fmt, ...)
746 {
747 	int ret;
748 	va_list ap;
749 
750 	if (!tr)
751 		return -ENOENT;
752 
753 	/* This is only allowed for created instances */
754 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
755 		return 0;
756 
757 	if (!(tr->trace_flags & TRACE_ITER(PRINTK)))
758 		return 0;
759 
760 	va_start(ap, fmt);
761 	ret = trace_array_vprintk(tr, ip, fmt, ap);
762 	va_end(ap);
763 	return ret;
764 }
765 EXPORT_SYMBOL_GPL(trace_array_printk);
766 
767 /**
768  * trace_array_init_printk - Initialize buffers for trace_array_printk()
769  * @tr: The trace array to initialize the buffers for
770  *
771  * As trace_array_printk() only writes into instances, they are OK to
772  * have in the kernel (unlike trace_printk()). This needs to be called
773  * before trace_array_printk() can be used on a trace_array.
774  */
775 int trace_array_init_printk(struct trace_array *tr)
776 {
777 	if (!tr)
778 		return -ENOENT;
779 
780 	/* This is only allowed for created instances */
781 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
782 		return -EINVAL;
783 
784 	return alloc_percpu_trace_buffer();
785 }
786 EXPORT_SYMBOL_GPL(trace_array_init_printk);
787 
788 int trace_array_printk_buf(struct trace_buffer *buffer,
789 			   unsigned long ip, const char *fmt, ...)
790 {
791 	int ret;
792 	va_list ap;
793 
794 	if (!(printk_trace->trace_flags & TRACE_ITER(PRINTK)))
795 		return 0;
796 
797 	va_start(ap, fmt);
798 	ret = __trace_array_vprintk(buffer, ip, fmt, ap);
799 	va_end(ap);
800 	return ret;
801 }
802 
803 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
804 {
805 	return trace_array_vprintk(printk_trace, ip, fmt, args);
806 }
807 EXPORT_SYMBOL_GPL(trace_vprintk);
808 
809 static __init int init_trace_printk_function_export(void)
810 {
811 	int ret;
812 
813 	ret = tracing_init_dentry();
814 	if (ret)
815 		return 0;
816 
817 	trace_create_file("printk_formats", TRACE_MODE_READ, NULL,
818 				    NULL, &ftrace_formats_fops);
819 
820 	return 0;
821 }
822 
823 fs_initcall(init_trace_printk_function_export);
824 
825 static __init int init_trace_printk(void)
826 {
827 	return register_module_notifier(&module_trace_bprintk_format_nb);
828 }
829 
830 early_initcall(init_trace_printk);
831