xref: /linux/kernel/printk/printk.c (revision 6f7e6393d1ce636bb7ec77a7fe7b77458fddf701)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/kernel/printk.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  *
7  * Modified to make sys_syslog() more flexible: added commands to
8  * return the last 4k of kernel messages, regardless of whether
9  * they've been read or not.  Added option to suppress kernel printk's
10  * to the console.  Added hook for sending the console messages
11  * elsewhere, in preparation for a serial line console (someday).
12  * Ted Ts'o, 2/11/93.
13  * Modified for sysctl support, 1/8/97, Chris Horn.
14  * Fixed SMP synchronization, 08/08/99, Manfred Spraul
15  *     manfred@colorfullife.com
16  * Rewrote bits to get rid of console_lock
17  *	01Mar01 Andrew Morton
18  */
19 
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 
22 #include <linux/kernel.h>
23 #include <linux/mm.h>
24 #include <linux/tty.h>
25 #include <linux/tty_driver.h>
26 #include <linux/console.h>
27 #include <linux/init.h>
28 #include <linux/jiffies.h>
29 #include <linux/nmi.h>
30 #include <linux/module.h>
31 #include <linux/moduleparam.h>
32 #include <linux/delay.h>
33 #include <linux/smp.h>
34 #include <linux/security.h>
35 #include <linux/memblock.h>
36 #include <linux/syscalls.h>
37 #include <linux/syscore_ops.h>
38 #include <linux/vmcore_info.h>
39 #include <linux/ratelimit.h>
40 #include <linux/kmsg_dump.h>
41 #include <linux/syslog.h>
42 #include <linux/cpu.h>
43 #include <linux/rculist.h>
44 #include <linux/poll.h>
45 #include <linux/irq_work.h>
46 #include <linux/ctype.h>
47 #include <linux/uio.h>
48 #include <linux/sched/clock.h>
49 #include <linux/sched/debug.h>
50 #include <linux/sched/task_stack.h>
51 #include <linux/panic.h>
52 
53 #include <linux/uaccess.h>
54 #include <asm/sections.h>
55 
56 #include <trace/events/initcall.h>
57 #define CREATE_TRACE_POINTS
58 #include <trace/events/printk.h>
59 
60 #include "printk_ringbuffer.h"
61 #include "console_cmdline.h"
62 #include "braille.h"
63 #include "internal.h"
64 
65 int console_printk[4] = {
66 	CONSOLE_LOGLEVEL_DEFAULT,	/* console_loglevel */
67 	MESSAGE_LOGLEVEL_DEFAULT,	/* default_message_loglevel */
68 	CONSOLE_LOGLEVEL_MIN,		/* minimum_console_loglevel */
69 	CONSOLE_LOGLEVEL_DEFAULT,	/* default_console_loglevel */
70 };
71 EXPORT_SYMBOL_GPL(console_printk);
72 
73 atomic_t ignore_console_lock_warning __read_mostly = ATOMIC_INIT(0);
74 EXPORT_SYMBOL(ignore_console_lock_warning);
75 
76 EXPORT_TRACEPOINT_SYMBOL_GPL(console);
77 
78 /*
79  * Low level drivers may need that to know if they can schedule in
80  * their unblank() callback or not. So let's export it.
81  */
82 int oops_in_progress;
83 EXPORT_SYMBOL(oops_in_progress);
84 
85 /*
86  * console_mutex protects console_list updates and console->flags updates.
87  * The flags are synchronized only for consoles that are registered, i.e.
88  * accessible via the console list.
89  */
90 static DEFINE_MUTEX(console_mutex);
91 
92 /*
93  * console_sem protects updates to console->seq
94  * and also provides serialization for console printing.
95  */
96 static DEFINE_SEMAPHORE(console_sem, 1);
97 HLIST_HEAD(console_list);
98 EXPORT_SYMBOL_GPL(console_list);
99 DEFINE_STATIC_SRCU(console_srcu);
100 
101 /*
102  * System may need to suppress printk message under certain
103  * circumstances, like after kernel panic happens.
104  */
105 int __read_mostly suppress_printk;
106 
107 #ifdef CONFIG_LOCKDEP
108 static struct lockdep_map console_lock_dep_map = {
109 	.name = "console_lock"
110 };
111 
112 void lockdep_assert_console_list_lock_held(void)
113 {
114 	lockdep_assert_held(&console_mutex);
115 }
116 EXPORT_SYMBOL(lockdep_assert_console_list_lock_held);
117 #endif
118 
119 #ifdef CONFIG_DEBUG_LOCK_ALLOC
120 bool console_srcu_read_lock_is_held(void)
121 {
122 	return srcu_read_lock_held(&console_srcu);
123 }
124 EXPORT_SYMBOL(console_srcu_read_lock_is_held);
125 #endif
126 
127 enum devkmsg_log_bits {
128 	__DEVKMSG_LOG_BIT_ON = 0,
129 	__DEVKMSG_LOG_BIT_OFF,
130 	__DEVKMSG_LOG_BIT_LOCK,
131 };
132 
133 enum devkmsg_log_masks {
134 	DEVKMSG_LOG_MASK_ON             = BIT(__DEVKMSG_LOG_BIT_ON),
135 	DEVKMSG_LOG_MASK_OFF            = BIT(__DEVKMSG_LOG_BIT_OFF),
136 	DEVKMSG_LOG_MASK_LOCK           = BIT(__DEVKMSG_LOG_BIT_LOCK),
137 };
138 
139 /* Keep both the 'on' and 'off' bits clear, i.e. ratelimit by default: */
140 #define DEVKMSG_LOG_MASK_DEFAULT	0
141 
142 static unsigned int __read_mostly devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT;
143 
144 static int __control_devkmsg(char *str)
145 {
146 	size_t len;
147 
148 	if (!str)
149 		return -EINVAL;
150 
151 	len = str_has_prefix(str, "on");
152 	if (len) {
153 		devkmsg_log = DEVKMSG_LOG_MASK_ON;
154 		return len;
155 	}
156 
157 	len = str_has_prefix(str, "off");
158 	if (len) {
159 		devkmsg_log = DEVKMSG_LOG_MASK_OFF;
160 		return len;
161 	}
162 
163 	len = str_has_prefix(str, "ratelimit");
164 	if (len) {
165 		devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT;
166 		return len;
167 	}
168 
169 	return -EINVAL;
170 }
171 
172 static int __init control_devkmsg(char *str)
173 {
174 	if (__control_devkmsg(str) < 0) {
175 		pr_warn("printk.devkmsg: bad option string '%s'\n", str);
176 		return 1;
177 	}
178 
179 	/*
180 	 * Set sysctl string accordingly:
181 	 */
182 	if (devkmsg_log == DEVKMSG_LOG_MASK_ON)
183 		strscpy(devkmsg_log_str, "on");
184 	else if (devkmsg_log == DEVKMSG_LOG_MASK_OFF)
185 		strscpy(devkmsg_log_str, "off");
186 	/* else "ratelimit" which is set by default. */
187 
188 	/*
189 	 * Sysctl cannot change it anymore. The kernel command line setting of
190 	 * this parameter is to force the setting to be permanent throughout the
191 	 * runtime of the system. This is a precation measure against userspace
192 	 * trying to be a smarta** and attempting to change it up on us.
193 	 */
194 	devkmsg_log |= DEVKMSG_LOG_MASK_LOCK;
195 
196 	return 1;
197 }
198 __setup("printk.devkmsg=", control_devkmsg);
199 
200 char devkmsg_log_str[DEVKMSG_STR_MAX_SIZE] = "ratelimit";
201 #if defined(CONFIG_PRINTK) && defined(CONFIG_SYSCTL)
202 int devkmsg_sysctl_set_loglvl(const struct ctl_table *table, int write,
203 			      void *buffer, size_t *lenp, loff_t *ppos)
204 {
205 	char old_str[DEVKMSG_STR_MAX_SIZE];
206 	unsigned int old;
207 	int err;
208 
209 	if (write) {
210 		if (devkmsg_log & DEVKMSG_LOG_MASK_LOCK)
211 			return -EINVAL;
212 
213 		old = devkmsg_log;
214 		strscpy(old_str, devkmsg_log_str);
215 	}
216 
217 	err = proc_dostring(table, write, buffer, lenp, ppos);
218 	if (err)
219 		return err;
220 
221 	if (write) {
222 		err = __control_devkmsg(devkmsg_log_str);
223 
224 		/*
225 		 * Do not accept an unknown string OR a known string with
226 		 * trailing crap...
227 		 */
228 		if (err < 0 || (err + 1 != *lenp)) {
229 
230 			/* ... and restore old setting. */
231 			devkmsg_log = old;
232 			strscpy(devkmsg_log_str, old_str);
233 
234 			return -EINVAL;
235 		}
236 	}
237 
238 	return 0;
239 }
240 #endif /* CONFIG_PRINTK && CONFIG_SYSCTL */
241 
242 /**
243  * console_list_lock - Lock the console list
244  *
245  * For console list or console->flags updates
246  */
247 void console_list_lock(void)
248 	__acquires(&console_mutex)
249 {
250 	/*
251 	 * In unregister_console() and console_force_preferred_locked(),
252 	 * synchronize_srcu() is called with the console_list_lock held.
253 	 * Therefore it is not allowed that the console_list_lock is taken
254 	 * with the srcu_lock held.
255 	 *
256 	 * Detecting if this context is really in the read-side critical
257 	 * section is only possible if the appropriate debug options are
258 	 * enabled.
259 	 */
260 	WARN_ON_ONCE(debug_lockdep_rcu_enabled() &&
261 		     srcu_read_lock_held(&console_srcu));
262 
263 	mutex_lock(&console_mutex);
264 }
265 EXPORT_SYMBOL(console_list_lock);
266 
267 /**
268  * console_list_unlock - Unlock the console list
269  *
270  * Counterpart to console_list_lock()
271  */
272 void console_list_unlock(void)
273 	__releases(&console_mutex)
274 {
275 	mutex_unlock(&console_mutex);
276 }
277 EXPORT_SYMBOL(console_list_unlock);
278 
279 /**
280  * console_srcu_read_lock - Register a new reader for the
281  *	SRCU-protected console list
282  *
283  * Use for_each_console_srcu() to iterate the console list
284  *
285  * Context: Any context.
286  * Return: A cookie to pass to console_srcu_read_unlock().
287  */
288 int console_srcu_read_lock(void)
289 	__acquires(&console_srcu)
290 {
291 	return srcu_read_lock_nmisafe(&console_srcu);
292 }
293 EXPORT_SYMBOL(console_srcu_read_lock);
294 
295 /**
296  * console_srcu_read_unlock - Unregister an old reader from
297  *	the SRCU-protected console list
298  * @cookie: cookie returned from console_srcu_read_lock()
299  *
300  * Counterpart to console_srcu_read_lock()
301  */
302 void console_srcu_read_unlock(int cookie)
303 	__releases(&console_srcu)
304 {
305 	srcu_read_unlock_nmisafe(&console_srcu, cookie);
306 }
307 EXPORT_SYMBOL(console_srcu_read_unlock);
308 
309 /*
310  * Helper macros to handle lockdep when locking/unlocking console_sem. We use
311  * macros instead of functions so that _RET_IP_ contains useful information.
312  */
313 #define down_console_sem() do { \
314 	down(&console_sem);\
315 	mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_);\
316 } while (0)
317 
318 static int __down_trylock_console_sem(unsigned long ip)
319 {
320 	int lock_failed;
321 	unsigned long flags;
322 
323 	/*
324 	 * Here and in __up_console_sem() we need to be in safe mode,
325 	 * because spindump/WARN/etc from under console ->lock will
326 	 * deadlock in printk()->down_trylock_console_sem() otherwise.
327 	 */
328 	printk_safe_enter_irqsave(flags);
329 	lock_failed = down_trylock(&console_sem);
330 	printk_safe_exit_irqrestore(flags);
331 
332 	if (lock_failed)
333 		return 1;
334 	mutex_acquire(&console_lock_dep_map, 0, 1, ip);
335 	return 0;
336 }
337 #define down_trylock_console_sem() __down_trylock_console_sem(_RET_IP_)
338 
339 static void __up_console_sem(unsigned long ip)
340 {
341 	unsigned long flags;
342 
343 	mutex_release(&console_lock_dep_map, ip);
344 
345 	printk_safe_enter_irqsave(flags);
346 	up(&console_sem);
347 	printk_safe_exit_irqrestore(flags);
348 }
349 #define up_console_sem() __up_console_sem(_RET_IP_)
350 
351 /*
352  * This is used for debugging the mess that is the VT code by
353  * keeping track if we have the console semaphore held. It's
354  * definitely not the perfect debug tool (we don't know if _WE_
355  * hold it and are racing, but it helps tracking those weird code
356  * paths in the console code where we end up in places I want
357  * locked without the console semaphore held).
358  */
359 static int console_locked;
360 
361 /*
362  *	Array of consoles built from command line options (console=)
363  */
364 
365 #define MAX_CMDLINECONSOLES 8
366 
367 static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES];
368 
369 static int preferred_console = -1;
370 int console_set_on_cmdline;
371 EXPORT_SYMBOL(console_set_on_cmdline);
372 
373 /* Flag: console code may call schedule() */
374 static int console_may_schedule;
375 
376 enum con_msg_format_flags {
377 	MSG_FORMAT_DEFAULT	= 0,
378 	MSG_FORMAT_SYSLOG	= (1 << 0),
379 };
380 
381 static int console_msg_format = MSG_FORMAT_DEFAULT;
382 
383 /*
384  * The printk log buffer consists of a sequenced collection of records, each
385  * containing variable length message text. Every record also contains its
386  * own meta-data (@info).
387  *
388  * Every record meta-data carries the timestamp in microseconds, as well as
389  * the standard userspace syslog level and syslog facility. The usual kernel
390  * messages use LOG_KERN; userspace-injected messages always carry a matching
391  * syslog facility, by default LOG_USER. The origin of every message can be
392  * reliably determined that way.
393  *
394  * The human readable log message of a record is available in @text, the
395  * length of the message text in @text_len. The stored message is not
396  * terminated.
397  *
398  * Optionally, a record can carry a dictionary of properties (key/value
399  * pairs), to provide userspace with a machine-readable message context.
400  *
401  * Examples for well-defined, commonly used property names are:
402  *   DEVICE=b12:8               device identifier
403  *                                b12:8         block dev_t
404  *                                c127:3        char dev_t
405  *                                n8            netdev ifindex
406  *                                +sound:card0  subsystem:devname
407  *   SUBSYSTEM=pci              driver-core subsystem name
408  *
409  * Valid characters in property names are [a-zA-Z0-9.-_]. Property names
410  * and values are terminated by a '\0' character.
411  *
412  * Example of record values:
413  *   record.text_buf                = "it's a line" (unterminated)
414  *   record.info.seq                = 56
415  *   record.info.ts_nsec            = 36863
416  *   record.info.text_len           = 11
417  *   record.info.facility           = 0 (LOG_KERN)
418  *   record.info.flags              = 0
419  *   record.info.level              = 3 (LOG_ERR)
420  *   record.info.caller_id          = 299 (task 299)
421  *   record.info.dev_info.subsystem = "pci" (terminated)
422  *   record.info.dev_info.device    = "+pci:0000:00:01.0" (terminated)
423  *
424  * The 'struct printk_info' buffer must never be directly exported to
425  * userspace, it is a kernel-private implementation detail that might
426  * need to be changed in the future, when the requirements change.
427  *
428  * /dev/kmsg exports the structured data in the following line format:
429  *   "<level>,<sequnum>,<timestamp>,<contflag>[,additional_values, ... ];<message text>\n"
430  *
431  * Users of the export format should ignore possible additional values
432  * separated by ',', and find the message after the ';' character.
433  *
434  * The optional key/value pairs are attached as continuation lines starting
435  * with a space character and terminated by a newline. All possible
436  * non-prinatable characters are escaped in the "\xff" notation.
437  */
438 
439 /* syslog_lock protects syslog_* variables and write access to clear_seq. */
440 static DEFINE_MUTEX(syslog_lock);
441 
442 /*
443  * Specifies if a legacy console is registered. If legacy consoles are
444  * present, it is necessary to perform the console lock/unlock dance
445  * whenever console flushing should occur.
446  */
447 bool have_legacy_console;
448 
449 /*
450  * Specifies if an nbcon console is registered. If nbcon consoles are present,
451  * synchronous printing of legacy consoles will not occur during panic until
452  * the backtrace has been stored to the ringbuffer.
453  */
454 bool have_nbcon_console;
455 
456 /*
457  * Specifies if a boot console is registered. If boot consoles are present,
458  * nbcon consoles cannot print simultaneously and must be synchronized by
459  * the console lock. This is because boot consoles and nbcon consoles may
460  * have mapped the same hardware.
461  */
462 bool have_boot_console;
463 
464 /* See printk_legacy_allow_panic_sync() for details. */
465 bool legacy_allow_panic_sync;
466 
467 /* Avoid using irq_work when suspending. */
468 bool console_irqwork_blocked;
469 
470 #ifdef CONFIG_PRINTK
471 DECLARE_WAIT_QUEUE_HEAD(log_wait);
472 static DECLARE_WAIT_QUEUE_HEAD(legacy_wait);
473 /* All 3 protected by @syslog_lock. */
474 /* the next printk record to read by syslog(READ) or /proc/kmsg */
475 static u64 syslog_seq;
476 static size_t syslog_partial;
477 static bool syslog_time;
478 
479 /* True when _all_ printer threads are available for printing. */
480 bool printk_kthreads_running;
481 
482 struct latched_seq {
483 	seqcount_latch_t	latch;
484 	u64			val[2];
485 };
486 
487 /*
488  * The next printk record to read after the last 'clear' command. There are
489  * two copies (updated with seqcount_latch) so that reads can locklessly
490  * access a valid value. Writers are synchronized by @syslog_lock.
491  */
492 static struct latched_seq clear_seq = {
493 	.latch		= SEQCNT_LATCH_ZERO(clear_seq.latch),
494 	.val[0]		= 0,
495 	.val[1]		= 0,
496 };
497 
498 #define LOG_LEVEL(v)		((v) & 0x07)
499 #define LOG_FACILITY(v)		((v) >> 3 & 0xff)
500 
501 /* record buffer */
502 #define LOG_ALIGN __alignof__(unsigned long)
503 #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
504 #define LOG_BUF_LEN_MAX ((u32)1 << 31)
505 static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
506 static char *log_buf = __log_buf;
507 static u32 log_buf_len = __LOG_BUF_LEN;
508 
509 /*
510  * Define the average message size. This only affects the number of
511  * descriptors that will be available. Underestimating is better than
512  * overestimating (too many available descriptors is better than not enough).
513  */
514 #define PRB_AVGBITS 5	/* 32 character average length */
515 
516 #if CONFIG_LOG_BUF_SHIFT <= PRB_AVGBITS
517 #error CONFIG_LOG_BUF_SHIFT value too small.
518 #endif
519 _DEFINE_PRINTKRB(printk_rb_static, CONFIG_LOG_BUF_SHIFT - PRB_AVGBITS,
520 		 PRB_AVGBITS, &__log_buf[0]);
521 
522 static struct printk_ringbuffer printk_rb_dynamic;
523 
524 struct printk_ringbuffer *prb = &printk_rb_static;
525 
526 /*
527  * We cannot access per-CPU data (e.g. per-CPU flush irq_work) before
528  * per_cpu_areas are initialised. This variable is set to true when
529  * it's safe to access per-CPU data.
530  */
531 static bool __printk_percpu_data_ready __ro_after_init;
532 
533 bool printk_percpu_data_ready(void)
534 {
535 	return __printk_percpu_data_ready;
536 }
537 
538 /* Must be called under syslog_lock. */
539 static void latched_seq_write(struct latched_seq *ls, u64 val)
540 {
541 	write_seqcount_latch_begin(&ls->latch);
542 	ls->val[0] = val;
543 	write_seqcount_latch(&ls->latch);
544 	ls->val[1] = val;
545 	write_seqcount_latch_end(&ls->latch);
546 }
547 
548 /* Can be called from any context. */
549 static u64 latched_seq_read_nolock(struct latched_seq *ls)
550 {
551 	unsigned int seq;
552 	unsigned int idx;
553 	u64 val;
554 
555 	do {
556 		seq = read_seqcount_latch(&ls->latch);
557 		idx = seq & 0x1;
558 		val = ls->val[idx];
559 	} while (read_seqcount_latch_retry(&ls->latch, seq));
560 
561 	return val;
562 }
563 
564 /* Return log buffer address */
565 char *log_buf_addr_get(void)
566 {
567 	return log_buf;
568 }
569 
570 /* Return log buffer size */
571 u32 log_buf_len_get(void)
572 {
573 	return log_buf_len;
574 }
575 
576 /*
577  * Define how much of the log buffer we could take at maximum. The value
578  * must be greater than two. Note that only half of the buffer is available
579  * when the index points to the middle.
580  */
581 #define MAX_LOG_TAKE_PART 4
582 static const char trunc_msg[] = "<truncated>";
583 
584 static void truncate_msg(u16 *text_len, u16 *trunc_msg_len)
585 {
586 	/*
587 	 * The message should not take the whole buffer. Otherwise, it might
588 	 * get removed too soon.
589 	 */
590 	u32 max_text_len = log_buf_len / MAX_LOG_TAKE_PART;
591 
592 	if (*text_len > max_text_len)
593 		*text_len = max_text_len;
594 
595 	/* enable the warning message (if there is room) */
596 	*trunc_msg_len = strlen(trunc_msg);
597 	if (*text_len >= *trunc_msg_len)
598 		*text_len -= *trunc_msg_len;
599 	else
600 		*trunc_msg_len = 0;
601 }
602 
603 int dmesg_restrict = IS_ENABLED(CONFIG_SECURITY_DMESG_RESTRICT);
604 
605 static int syslog_action_restricted(int type)
606 {
607 	if (dmesg_restrict)
608 		return 1;
609 	/*
610 	 * Unless restricted, we allow "read all" and "get buffer size"
611 	 * for everybody.
612 	 */
613 	return type != SYSLOG_ACTION_READ_ALL &&
614 	       type != SYSLOG_ACTION_SIZE_BUFFER;
615 }
616 
617 static int check_syslog_permissions(int type, int source)
618 {
619 	/*
620 	 * If this is from /proc/kmsg and we've already opened it, then we've
621 	 * already done the capabilities checks at open time.
622 	 */
623 	if (source == SYSLOG_FROM_PROC && type != SYSLOG_ACTION_OPEN)
624 		goto ok;
625 
626 	if (syslog_action_restricted(type)) {
627 		if (capable(CAP_SYSLOG))
628 			goto ok;
629 		return -EPERM;
630 	}
631 ok:
632 	return security_syslog(type);
633 }
634 
635 static void append_char(char **pp, char *e, char c)
636 {
637 	if (*pp < e)
638 		*(*pp)++ = c;
639 }
640 
641 static ssize_t info_print_ext_header(char *buf, size_t size,
642 				     struct printk_info *info)
643 {
644 	u64 ts_usec = info->ts_nsec;
645 	char caller[20];
646 #ifdef CONFIG_PRINTK_CALLER
647 	u32 id = info->caller_id;
648 
649 	snprintf(caller, sizeof(caller), ",caller=%c%u",
650 		 id & 0x80000000 ? 'C' : 'T', id & ~0x80000000);
651 #else
652 	caller[0] = '\0';
653 #endif
654 
655 	do_div(ts_usec, 1000);
656 
657 	return scnprintf(buf, size, "%u,%llu,%llu,%c%s;",
658 			 (info->facility << 3) | info->level, info->seq,
659 			 ts_usec, info->flags & LOG_CONT ? 'c' : '-', caller);
660 }
661 
662 static ssize_t msg_add_ext_text(char *buf, size_t size,
663 				const char *text, size_t text_len,
664 				unsigned char endc)
665 {
666 	char *p = buf, *e = buf + size;
667 	size_t i;
668 
669 	/* escape non-printable characters */
670 	for (i = 0; i < text_len; i++) {
671 		unsigned char c = text[i];
672 
673 		if (c < ' ' || c >= 127 || c == '\\')
674 			p += scnprintf(p, e - p, "\\x%02x", c);
675 		else
676 			append_char(&p, e, c);
677 	}
678 	append_char(&p, e, endc);
679 
680 	return p - buf;
681 }
682 
683 static ssize_t msg_add_dict_text(char *buf, size_t size,
684 				 const char *key, const char *val)
685 {
686 	size_t val_len = strlen(val);
687 	ssize_t len;
688 
689 	if (!val_len)
690 		return 0;
691 
692 	len = msg_add_ext_text(buf, size, "", 0, ' ');	/* dict prefix */
693 	len += msg_add_ext_text(buf + len, size - len, key, strlen(key), '=');
694 	len += msg_add_ext_text(buf + len, size - len, val, val_len, '\n');
695 
696 	return len;
697 }
698 
699 static ssize_t msg_print_ext_body(char *buf, size_t size,
700 				  char *text, size_t text_len,
701 				  struct dev_printk_info *dev_info)
702 {
703 	ssize_t len;
704 
705 	len = msg_add_ext_text(buf, size, text, text_len, '\n');
706 
707 	if (!dev_info)
708 		goto out;
709 
710 	len += msg_add_dict_text(buf + len, size - len, "SUBSYSTEM",
711 				 dev_info->subsystem);
712 	len += msg_add_dict_text(buf + len, size - len, "DEVICE",
713 				 dev_info->device);
714 out:
715 	return len;
716 }
717 
718 /* /dev/kmsg - userspace message inject/listen interface */
719 struct devkmsg_user {
720 	atomic64_t seq;
721 	struct ratelimit_state rs;
722 	struct mutex lock;
723 	struct printk_buffers pbufs;
724 };
725 
726 static __printf(3, 4) __cold
727 int devkmsg_emit(int facility, int level, const char *fmt, ...)
728 {
729 	va_list args;
730 	int r;
731 
732 	va_start(args, fmt);
733 	r = vprintk_emit(facility, level, NULL, fmt, args);
734 	va_end(args);
735 
736 	return r;
737 }
738 
739 static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from)
740 {
741 	char *buf, *line;
742 	int level = default_message_loglevel;
743 	int facility = 1;	/* LOG_USER */
744 	struct file *file = iocb->ki_filp;
745 	struct devkmsg_user *user = file->private_data;
746 	size_t len = iov_iter_count(from);
747 	ssize_t ret = len;
748 
749 	if (len > PRINTKRB_RECORD_MAX)
750 		return -EINVAL;
751 
752 	/* Ignore when user logging is disabled. */
753 	if (devkmsg_log & DEVKMSG_LOG_MASK_OFF)
754 		return len;
755 
756 	/* Ratelimit when not explicitly enabled. */
757 	if (!(devkmsg_log & DEVKMSG_LOG_MASK_ON)) {
758 		if (!___ratelimit(&user->rs, current->comm))
759 			return ret;
760 	}
761 
762 	buf = kmalloc(len+1, GFP_KERNEL);
763 	if (buf == NULL)
764 		return -ENOMEM;
765 
766 	buf[len] = '\0';
767 	if (!copy_from_iter_full(buf, len, from)) {
768 		kfree(buf);
769 		return -EFAULT;
770 	}
771 
772 	/*
773 	 * Extract and skip the syslog prefix <[0-9]*>. Coming from userspace
774 	 * the decimal value represents 32bit, the lower 3 bit are the log
775 	 * level, the rest are the log facility.
776 	 *
777 	 * If no prefix or no userspace facility is specified, we
778 	 * enforce LOG_USER, to be able to reliably distinguish
779 	 * kernel-generated messages from userspace-injected ones.
780 	 */
781 	line = buf;
782 	if (line[0] == '<') {
783 		char *endp = NULL;
784 		unsigned int u;
785 
786 		u = simple_strtoul(line + 1, &endp, 10);
787 		if (endp && endp[0] == '>') {
788 			level = LOG_LEVEL(u);
789 			if (LOG_FACILITY(u) != 0)
790 				facility = LOG_FACILITY(u);
791 			endp++;
792 			line = endp;
793 		}
794 	}
795 
796 	devkmsg_emit(facility, level, "%s", line);
797 	kfree(buf);
798 	return ret;
799 }
800 
801 static ssize_t devkmsg_read(struct file *file, char __user *buf,
802 			    size_t count, loff_t *ppos)
803 {
804 	struct devkmsg_user *user = file->private_data;
805 	char *outbuf = &user->pbufs.outbuf[0];
806 	struct printk_message pmsg = {
807 		.pbufs = &user->pbufs,
808 	};
809 	ssize_t ret;
810 
811 	ret = mutex_lock_interruptible(&user->lock);
812 	if (ret)
813 		return ret;
814 
815 	if (!printk_get_next_message(&pmsg, atomic64_read(&user->seq), true, false)) {
816 		if (file->f_flags & O_NONBLOCK) {
817 			ret = -EAGAIN;
818 			goto out;
819 		}
820 
821 		/*
822 		 * Guarantee this task is visible on the waitqueue before
823 		 * checking the wake condition.
824 		 *
825 		 * The full memory barrier within set_current_state() of
826 		 * prepare_to_wait_event() pairs with the full memory barrier
827 		 * within wq_has_sleeper().
828 		 *
829 		 * This pairs with __wake_up_klogd:A.
830 		 */
831 		ret = wait_event_interruptible(log_wait,
832 				printk_get_next_message(&pmsg, atomic64_read(&user->seq), true,
833 							false)); /* LMM(devkmsg_read:A) */
834 		if (ret)
835 			goto out;
836 	}
837 
838 	if (pmsg.dropped) {
839 		/* our last seen message is gone, return error and reset */
840 		atomic64_set(&user->seq, pmsg.seq);
841 		ret = -EPIPE;
842 		goto out;
843 	}
844 
845 	atomic64_set(&user->seq, pmsg.seq + 1);
846 
847 	if (pmsg.outbuf_len > count) {
848 		ret = -EINVAL;
849 		goto out;
850 	}
851 
852 	if (copy_to_user(buf, outbuf, pmsg.outbuf_len)) {
853 		ret = -EFAULT;
854 		goto out;
855 	}
856 	ret = pmsg.outbuf_len;
857 out:
858 	mutex_unlock(&user->lock);
859 	return ret;
860 }
861 
862 /*
863  * Be careful when modifying this function!!!
864  *
865  * Only few operations are supported because the device works only with the
866  * entire variable length messages (records). Non-standard values are
867  * returned in the other cases and has been this way for quite some time.
868  * User space applications might depend on this behavior.
869  */
870 static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
871 {
872 	struct devkmsg_user *user = file->private_data;
873 	loff_t ret = 0;
874 
875 	if (offset)
876 		return -ESPIPE;
877 
878 	switch (whence) {
879 	case SEEK_SET:
880 		/* the first record */
881 		atomic64_set(&user->seq, prb_first_valid_seq(prb));
882 		break;
883 	case SEEK_DATA:
884 		/*
885 		 * The first record after the last SYSLOG_ACTION_CLEAR,
886 		 * like issued by 'dmesg -c'. Reading /dev/kmsg itself
887 		 * changes no global state, and does not clear anything.
888 		 */
889 		atomic64_set(&user->seq, latched_seq_read_nolock(&clear_seq));
890 		break;
891 	case SEEK_END:
892 		/* after the last record */
893 		atomic64_set(&user->seq, prb_next_seq(prb));
894 		break;
895 	default:
896 		ret = -EINVAL;
897 	}
898 	return ret;
899 }
900 
901 static __poll_t devkmsg_poll(struct file *file, poll_table *wait)
902 {
903 	struct devkmsg_user *user = file->private_data;
904 	struct printk_info info;
905 	__poll_t ret = 0;
906 
907 	poll_wait(file, &log_wait, wait);
908 
909 	if (prb_read_valid_info(prb, atomic64_read(&user->seq), &info, NULL)) {
910 		/* return error when data has vanished underneath us */
911 		if (info.seq != atomic64_read(&user->seq))
912 			ret = EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI;
913 		else
914 			ret = EPOLLIN|EPOLLRDNORM;
915 	}
916 
917 	return ret;
918 }
919 
920 static int devkmsg_open(struct inode *inode, struct file *file)
921 {
922 	struct devkmsg_user *user;
923 	int err;
924 
925 	if (devkmsg_log & DEVKMSG_LOG_MASK_OFF)
926 		return -EPERM;
927 
928 	/* write-only does not need any file context */
929 	if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
930 		err = check_syslog_permissions(SYSLOG_ACTION_READ_ALL,
931 					       SYSLOG_FROM_READER);
932 		if (err)
933 			return err;
934 	}
935 
936 	user = kvmalloc(sizeof(struct devkmsg_user), GFP_KERNEL);
937 	if (!user)
938 		return -ENOMEM;
939 
940 	ratelimit_default_init(&user->rs);
941 	ratelimit_set_flags(&user->rs, RATELIMIT_MSG_ON_RELEASE);
942 
943 	mutex_init(&user->lock);
944 
945 	atomic64_set(&user->seq, prb_first_valid_seq(prb));
946 
947 	file->private_data = user;
948 	return 0;
949 }
950 
951 static int devkmsg_release(struct inode *inode, struct file *file)
952 {
953 	struct devkmsg_user *user = file->private_data;
954 
955 	ratelimit_state_exit(&user->rs);
956 
957 	mutex_destroy(&user->lock);
958 	kvfree(user);
959 	return 0;
960 }
961 
962 const struct file_operations kmsg_fops = {
963 	.open = devkmsg_open,
964 	.read = devkmsg_read,
965 	.write_iter = devkmsg_write,
966 	.llseek = devkmsg_llseek,
967 	.poll = devkmsg_poll,
968 	.release = devkmsg_release,
969 };
970 
971 #ifdef CONFIG_VMCORE_INFO
972 /*
973  * This appends the listed symbols to /proc/vmcore
974  *
975  * /proc/vmcore is used by various utilities, like crash and makedumpfile to
976  * obtain access to symbols that are otherwise very difficult to locate.  These
977  * symbols are specifically used so that utilities can access and extract the
978  * dmesg log from a vmcore file after a crash.
979  */
980 void log_buf_vmcoreinfo_setup(void)
981 {
982 	struct dev_printk_info *dev_info = NULL;
983 
984 	VMCOREINFO_SYMBOL(prb);
985 	VMCOREINFO_SYMBOL(printk_rb_static);
986 	VMCOREINFO_SYMBOL(clear_seq);
987 
988 	/*
989 	 * Export struct size and field offsets. User space tools can
990 	 * parse it and detect any changes to structure down the line.
991 	 */
992 
993 	VMCOREINFO_STRUCT_SIZE(printk_ringbuffer);
994 	VMCOREINFO_OFFSET(printk_ringbuffer, desc_ring);
995 	VMCOREINFO_OFFSET(printk_ringbuffer, text_data_ring);
996 	VMCOREINFO_OFFSET(printk_ringbuffer, fail);
997 
998 	VMCOREINFO_STRUCT_SIZE(prb_desc_ring);
999 	VMCOREINFO_OFFSET(prb_desc_ring, count_bits);
1000 	VMCOREINFO_OFFSET(prb_desc_ring, descs);
1001 	VMCOREINFO_OFFSET(prb_desc_ring, infos);
1002 	VMCOREINFO_OFFSET(prb_desc_ring, head_id);
1003 	VMCOREINFO_OFFSET(prb_desc_ring, tail_id);
1004 
1005 	VMCOREINFO_STRUCT_SIZE(prb_desc);
1006 	VMCOREINFO_OFFSET(prb_desc, state_var);
1007 	VMCOREINFO_OFFSET(prb_desc, text_blk_lpos);
1008 
1009 	VMCOREINFO_STRUCT_SIZE(prb_data_blk_lpos);
1010 	VMCOREINFO_OFFSET(prb_data_blk_lpos, begin);
1011 	VMCOREINFO_OFFSET(prb_data_blk_lpos, next);
1012 
1013 	VMCOREINFO_STRUCT_SIZE(printk_info);
1014 	VMCOREINFO_OFFSET(printk_info, seq);
1015 	VMCOREINFO_OFFSET(printk_info, ts_nsec);
1016 	VMCOREINFO_OFFSET(printk_info, text_len);
1017 	VMCOREINFO_OFFSET(printk_info, caller_id);
1018 	VMCOREINFO_OFFSET(printk_info, dev_info);
1019 
1020 	VMCOREINFO_STRUCT_SIZE(dev_printk_info);
1021 	VMCOREINFO_OFFSET(dev_printk_info, subsystem);
1022 	VMCOREINFO_LENGTH(printk_info_subsystem, sizeof(dev_info->subsystem));
1023 	VMCOREINFO_OFFSET(dev_printk_info, device);
1024 	VMCOREINFO_LENGTH(printk_info_device, sizeof(dev_info->device));
1025 
1026 	VMCOREINFO_STRUCT_SIZE(prb_data_ring);
1027 	VMCOREINFO_OFFSET(prb_data_ring, size_bits);
1028 	VMCOREINFO_OFFSET(prb_data_ring, data);
1029 	VMCOREINFO_OFFSET(prb_data_ring, head_lpos);
1030 	VMCOREINFO_OFFSET(prb_data_ring, tail_lpos);
1031 
1032 	VMCOREINFO_SIZE(atomic_long_t);
1033 	VMCOREINFO_TYPE_OFFSET(atomic_long_t, counter);
1034 
1035 	VMCOREINFO_STRUCT_SIZE(latched_seq);
1036 	VMCOREINFO_OFFSET(latched_seq, val);
1037 }
1038 #endif
1039 
1040 /* requested log_buf_len from kernel cmdline */
1041 static unsigned long __initdata new_log_buf_len;
1042 
1043 /* we practice scaling the ring buffer by powers of 2 */
1044 static void __init log_buf_len_update(u64 size)
1045 {
1046 	if (size > (u64)LOG_BUF_LEN_MAX) {
1047 		size = (u64)LOG_BUF_LEN_MAX;
1048 		pr_err("log_buf over 2G is not supported.\n");
1049 	}
1050 
1051 	if (size)
1052 		size = roundup_pow_of_two(size);
1053 	if (size > log_buf_len)
1054 		new_log_buf_len = (unsigned long)size;
1055 }
1056 
1057 /* save requested log_buf_len since it's too early to process it */
1058 static int __init log_buf_len_setup(char *str)
1059 {
1060 	u64 size;
1061 
1062 	if (!str)
1063 		return -EINVAL;
1064 
1065 	size = memparse(str, &str);
1066 
1067 	log_buf_len_update(size);
1068 
1069 	return 0;
1070 }
1071 early_param("log_buf_len", log_buf_len_setup);
1072 
1073 #ifdef CONFIG_SMP
1074 #define __LOG_CPU_MAX_BUF_LEN (1 << CONFIG_LOG_CPU_MAX_BUF_SHIFT)
1075 
1076 static void __init log_buf_add_cpu(void)
1077 {
1078 	unsigned int cpu_extra;
1079 
1080 	/*
1081 	 * archs should set up cpu_possible_bits properly with
1082 	 * set_cpu_possible() after setup_arch() but just in
1083 	 * case lets ensure this is valid.
1084 	 */
1085 	if (num_possible_cpus() == 1)
1086 		return;
1087 
1088 	cpu_extra = (num_possible_cpus() - 1) * __LOG_CPU_MAX_BUF_LEN;
1089 
1090 	/* by default this will only continue through for large > 64 CPUs */
1091 	if (cpu_extra <= __LOG_BUF_LEN / 2)
1092 		return;
1093 
1094 	pr_info("log_buf_len individual max cpu contribution: %d bytes\n",
1095 		__LOG_CPU_MAX_BUF_LEN);
1096 	pr_info("log_buf_len total cpu_extra contributions: %d bytes\n",
1097 		cpu_extra);
1098 	pr_info("log_buf_len min size: %d bytes\n", __LOG_BUF_LEN);
1099 
1100 	log_buf_len_update(cpu_extra + __LOG_BUF_LEN);
1101 }
1102 #else /* !CONFIG_SMP */
1103 static inline void log_buf_add_cpu(void) {}
1104 #endif /* CONFIG_SMP */
1105 
1106 static void __init set_percpu_data_ready(void)
1107 {
1108 	__printk_percpu_data_ready = true;
1109 }
1110 
1111 static unsigned int __init add_to_rb(struct printk_ringbuffer *rb,
1112 				     struct printk_record *r)
1113 {
1114 	struct prb_reserved_entry e;
1115 	struct printk_record dest_r;
1116 
1117 	prb_rec_init_wr(&dest_r, r->info->text_len);
1118 
1119 	if (!prb_reserve(&e, rb, &dest_r))
1120 		return 0;
1121 
1122 	memcpy(&dest_r.text_buf[0], &r->text_buf[0], r->info->text_len);
1123 	dest_r.info->text_len = r->info->text_len;
1124 	dest_r.info->facility = r->info->facility;
1125 	dest_r.info->level = r->info->level;
1126 	dest_r.info->flags = r->info->flags;
1127 	dest_r.info->ts_nsec = r->info->ts_nsec;
1128 	dest_r.info->caller_id = r->info->caller_id;
1129 	memcpy(&dest_r.info->dev_info, &r->info->dev_info, sizeof(dest_r.info->dev_info));
1130 
1131 	prb_final_commit(&e);
1132 
1133 	return prb_record_text_space(&e);
1134 }
1135 
1136 static char setup_text_buf[PRINTKRB_RECORD_MAX] __initdata;
1137 
1138 static void print_log_buf_usage_stats(void)
1139 {
1140 	unsigned int descs_count = log_buf_len >> PRB_AVGBITS;
1141 	size_t meta_data_size;
1142 
1143 	meta_data_size = descs_count * (sizeof(struct prb_desc) + sizeof(struct printk_info));
1144 
1145 	pr_info("log buffer data + meta data: %u + %zu = %zu bytes\n",
1146 		log_buf_len, meta_data_size, log_buf_len + meta_data_size);
1147 }
1148 
1149 void __init setup_log_buf(int early)
1150 {
1151 	struct printk_info *new_infos;
1152 	unsigned int new_descs_count;
1153 	struct prb_desc *new_descs;
1154 	struct printk_info info;
1155 	struct printk_record r;
1156 	unsigned int text_size;
1157 	size_t new_descs_size;
1158 	size_t new_infos_size;
1159 	unsigned long flags;
1160 	char *new_log_buf;
1161 	unsigned int free;
1162 	u64 seq;
1163 
1164 	/*
1165 	 * Some archs call setup_log_buf() multiple times - first is very
1166 	 * early, e.g. from setup_arch(), and second - when percpu_areas
1167 	 * are initialised.
1168 	 */
1169 	if (!early)
1170 		set_percpu_data_ready();
1171 
1172 	if (log_buf != __log_buf)
1173 		return;
1174 
1175 	if (!early && !new_log_buf_len)
1176 		log_buf_add_cpu();
1177 
1178 	if (!new_log_buf_len) {
1179 		/* Show the memory stats only once. */
1180 		if (!early)
1181 			goto out;
1182 
1183 		return;
1184 	}
1185 
1186 	new_descs_count = new_log_buf_len >> PRB_AVGBITS;
1187 	if (new_descs_count == 0) {
1188 		pr_err("new_log_buf_len: %lu too small\n", new_log_buf_len);
1189 		goto out;
1190 	}
1191 
1192 	new_log_buf = memblock_alloc(new_log_buf_len, LOG_ALIGN);
1193 	if (unlikely(!new_log_buf)) {
1194 		pr_err("log_buf_len: %lu text bytes not available\n",
1195 		       new_log_buf_len);
1196 		goto out;
1197 	}
1198 
1199 	new_descs_size = new_descs_count * sizeof(struct prb_desc);
1200 	new_descs = memblock_alloc(new_descs_size, LOG_ALIGN);
1201 	if (unlikely(!new_descs)) {
1202 		pr_err("log_buf_len: %zu desc bytes not available\n",
1203 		       new_descs_size);
1204 		goto err_free_log_buf;
1205 	}
1206 
1207 	new_infos_size = new_descs_count * sizeof(struct printk_info);
1208 	new_infos = memblock_alloc(new_infos_size, LOG_ALIGN);
1209 	if (unlikely(!new_infos)) {
1210 		pr_err("log_buf_len: %zu info bytes not available\n",
1211 		       new_infos_size);
1212 		goto err_free_descs;
1213 	}
1214 
1215 	prb_rec_init_rd(&r, &info, &setup_text_buf[0], sizeof(setup_text_buf));
1216 
1217 	prb_init(&printk_rb_dynamic,
1218 		 new_log_buf, ilog2(new_log_buf_len),
1219 		 new_descs, ilog2(new_descs_count),
1220 		 new_infos);
1221 
1222 	local_irq_save(flags);
1223 
1224 	log_buf_len = new_log_buf_len;
1225 	log_buf = new_log_buf;
1226 	new_log_buf_len = 0;
1227 
1228 	free = __LOG_BUF_LEN;
1229 	prb_for_each_record(0, &printk_rb_static, seq, &r) {
1230 		text_size = add_to_rb(&printk_rb_dynamic, &r);
1231 		if (text_size > free)
1232 			free = 0;
1233 		else
1234 			free -= text_size;
1235 	}
1236 
1237 	prb = &printk_rb_dynamic;
1238 
1239 	local_irq_restore(flags);
1240 
1241 	/*
1242 	 * Copy any remaining messages that might have appeared from
1243 	 * NMI context after copying but before switching to the
1244 	 * dynamic buffer.
1245 	 */
1246 	prb_for_each_record(seq, &printk_rb_static, seq, &r) {
1247 		text_size = add_to_rb(&printk_rb_dynamic, &r);
1248 		if (text_size > free)
1249 			free = 0;
1250 		else
1251 			free -= text_size;
1252 	}
1253 
1254 	if (seq != prb_next_seq(&printk_rb_static)) {
1255 		pr_err("dropped %llu messages\n",
1256 		       prb_next_seq(&printk_rb_static) - seq);
1257 	}
1258 
1259 	print_log_buf_usage_stats();
1260 	pr_info("early log buf free: %u(%u%%)\n",
1261 		free, (free * 100) / __LOG_BUF_LEN);
1262 	return;
1263 
1264 err_free_descs:
1265 	memblock_free(new_descs, new_descs_size);
1266 err_free_log_buf:
1267 	memblock_free(new_log_buf, new_log_buf_len);
1268 out:
1269 	print_log_buf_usage_stats();
1270 }
1271 
1272 static bool __read_mostly ignore_loglevel;
1273 
1274 static int __init ignore_loglevel_setup(char *str)
1275 {
1276 	ignore_loglevel = true;
1277 	pr_info("debug: ignoring loglevel setting.\n");
1278 
1279 	return 0;
1280 }
1281 
1282 early_param("ignore_loglevel", ignore_loglevel_setup);
1283 module_param(ignore_loglevel, bool, S_IRUGO | S_IWUSR);
1284 MODULE_PARM_DESC(ignore_loglevel,
1285 		 "ignore loglevel setting (prints all kernel messages to the console)");
1286 
1287 static bool suppress_message_printing(int level)
1288 {
1289 	return (level >= console_loglevel && !ignore_loglevel);
1290 }
1291 
1292 #ifdef CONFIG_BOOT_PRINTK_DELAY
1293 
1294 static int boot_delay; /* msecs delay after each printk during bootup */
1295 static unsigned long long loops_per_msec;	/* based on boot_delay */
1296 
1297 static int __init boot_delay_setup(char *str)
1298 {
1299 	unsigned long lpj;
1300 
1301 	lpj = preset_lpj ? preset_lpj : 1000000;	/* some guess */
1302 	loops_per_msec = (unsigned long long)lpj / 1000 * HZ;
1303 
1304 	get_option(&str, &boot_delay);
1305 	if (boot_delay > 10 * 1000)
1306 		boot_delay = 0;
1307 
1308 	pr_debug("boot_delay: %u, preset_lpj: %ld, lpj: %lu, "
1309 		"HZ: %d, loops_per_msec: %llu\n",
1310 		boot_delay, preset_lpj, lpj, HZ, loops_per_msec);
1311 	return 0;
1312 }
1313 early_param("boot_delay", boot_delay_setup);
1314 
1315 static void boot_delay_msec(int level)
1316 {
1317 	unsigned long long k;
1318 	unsigned long timeout;
1319 	bool suppress = !is_printk_force_console() &&
1320 			suppress_message_printing(level);
1321 
1322 	if ((boot_delay == 0 || system_state >= SYSTEM_RUNNING) || suppress)
1323 		return;
1324 
1325 	k = (unsigned long long)loops_per_msec * boot_delay;
1326 
1327 	timeout = jiffies + msecs_to_jiffies(boot_delay);
1328 	while (k) {
1329 		k--;
1330 		cpu_relax();
1331 		/*
1332 		 * use (volatile) jiffies to prevent
1333 		 * compiler reduction; loop termination via jiffies
1334 		 * is secondary and may or may not happen.
1335 		 */
1336 		if (time_after(jiffies, timeout))
1337 			break;
1338 		touch_nmi_watchdog();
1339 	}
1340 }
1341 #else
1342 static inline void boot_delay_msec(int level)
1343 {
1344 }
1345 #endif
1346 
1347 static bool printk_time = IS_ENABLED(CONFIG_PRINTK_TIME);
1348 module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
1349 
1350 static size_t print_syslog(unsigned int level, char *buf)
1351 {
1352 	return sprintf(buf, "<%u>", level);
1353 }
1354 
1355 static size_t print_time(u64 ts, char *buf)
1356 {
1357 	unsigned long rem_nsec = do_div(ts, 1000000000);
1358 
1359 	return sprintf(buf, "[%5lu.%06lu]",
1360 		       (unsigned long)ts, rem_nsec / 1000);
1361 }
1362 
1363 #ifdef CONFIG_PRINTK_CALLER
1364 static size_t print_caller(u32 id, char *buf)
1365 {
1366 	char caller[12];
1367 
1368 	snprintf(caller, sizeof(caller), "%c%u",
1369 		 id & 0x80000000 ? 'C' : 'T', id & ~0x80000000);
1370 	return sprintf(buf, "[%6s]", caller);
1371 }
1372 #else
1373 #define print_caller(id, buf) 0
1374 #endif
1375 
1376 static size_t info_print_prefix(const struct printk_info  *info, bool syslog,
1377 				bool time, char *buf)
1378 {
1379 	size_t len = 0;
1380 
1381 	if (syslog)
1382 		len = print_syslog((info->facility << 3) | info->level, buf);
1383 
1384 	if (time)
1385 		len += print_time(info->ts_nsec, buf + len);
1386 
1387 	len += print_caller(info->caller_id, buf + len);
1388 
1389 	if (IS_ENABLED(CONFIG_PRINTK_CALLER) || time) {
1390 		buf[len++] = ' ';
1391 		buf[len] = '\0';
1392 	}
1393 
1394 	return len;
1395 }
1396 
1397 /*
1398  * Prepare the record for printing. The text is shifted within the given
1399  * buffer to avoid a need for another one. The following operations are
1400  * done:
1401  *
1402  *   - Add prefix for each line.
1403  *   - Drop truncated lines that no longer fit into the buffer.
1404  *   - Add the trailing newline that has been removed in vprintk_store().
1405  *   - Add a string terminator.
1406  *
1407  * Since the produced string is always terminated, the maximum possible
1408  * return value is @r->text_buf_size - 1;
1409  *
1410  * Return: The length of the updated/prepared text, including the added
1411  * prefixes and the newline. The terminator is not counted. The dropped
1412  * line(s) are not counted.
1413  */
1414 static size_t record_print_text(struct printk_record *r, bool syslog,
1415 				bool time)
1416 {
1417 	size_t text_len = r->info->text_len;
1418 	size_t buf_size = r->text_buf_size;
1419 	char *text = r->text_buf;
1420 	char prefix[PRINTK_PREFIX_MAX];
1421 	bool truncated = false;
1422 	size_t prefix_len;
1423 	size_t line_len;
1424 	size_t len = 0;
1425 	char *next;
1426 
1427 	/*
1428 	 * If the message was truncated because the buffer was not large
1429 	 * enough, treat the available text as if it were the full text.
1430 	 */
1431 	if (text_len > buf_size)
1432 		text_len = buf_size;
1433 
1434 	prefix_len = info_print_prefix(r->info, syslog, time, prefix);
1435 
1436 	/*
1437 	 * @text_len: bytes of unprocessed text
1438 	 * @line_len: bytes of current line _without_ newline
1439 	 * @text:     pointer to beginning of current line
1440 	 * @len:      number of bytes prepared in r->text_buf
1441 	 */
1442 	for (;;) {
1443 		next = memchr(text, '\n', text_len);
1444 		if (next) {
1445 			line_len = next - text;
1446 		} else {
1447 			/* Drop truncated line(s). */
1448 			if (truncated)
1449 				break;
1450 			line_len = text_len;
1451 		}
1452 
1453 		/*
1454 		 * Truncate the text if there is not enough space to add the
1455 		 * prefix and a trailing newline and a terminator.
1456 		 */
1457 		if (len + prefix_len + text_len + 1 + 1 > buf_size) {
1458 			/* Drop even the current line if no space. */
1459 			if (len + prefix_len + line_len + 1 + 1 > buf_size)
1460 				break;
1461 
1462 			text_len = buf_size - len - prefix_len - 1 - 1;
1463 			truncated = true;
1464 		}
1465 
1466 		memmove(text + prefix_len, text, text_len);
1467 		memcpy(text, prefix, prefix_len);
1468 
1469 		/*
1470 		 * Increment the prepared length to include the text and
1471 		 * prefix that were just moved+copied. Also increment for the
1472 		 * newline at the end of this line. If this is the last line,
1473 		 * there is no newline, but it will be added immediately below.
1474 		 */
1475 		len += prefix_len + line_len + 1;
1476 		if (text_len == line_len) {
1477 			/*
1478 			 * This is the last line. Add the trailing newline
1479 			 * removed in vprintk_store().
1480 			 */
1481 			text[prefix_len + line_len] = '\n';
1482 			break;
1483 		}
1484 
1485 		/*
1486 		 * Advance beyond the added prefix and the related line with
1487 		 * its newline.
1488 		 */
1489 		text += prefix_len + line_len + 1;
1490 
1491 		/*
1492 		 * The remaining text has only decreased by the line with its
1493 		 * newline.
1494 		 *
1495 		 * Note that @text_len can become zero. It happens when @text
1496 		 * ended with a newline (either due to truncation or the
1497 		 * original string ending with "\n\n"). The loop is correctly
1498 		 * repeated and (if not truncated) an empty line with a prefix
1499 		 * will be prepared.
1500 		 */
1501 		text_len -= line_len + 1;
1502 	}
1503 
1504 	/*
1505 	 * If a buffer was provided, it will be terminated. Space for the
1506 	 * string terminator is guaranteed to be available. The terminator is
1507 	 * not counted in the return value.
1508 	 */
1509 	if (buf_size > 0)
1510 		r->text_buf[len] = 0;
1511 
1512 	return len;
1513 }
1514 
1515 static size_t get_record_print_text_size(struct printk_info *info,
1516 					 unsigned int line_count,
1517 					 bool syslog, bool time)
1518 {
1519 	char prefix[PRINTK_PREFIX_MAX];
1520 	size_t prefix_len;
1521 
1522 	prefix_len = info_print_prefix(info, syslog, time, prefix);
1523 
1524 	/*
1525 	 * Each line will be preceded with a prefix. The intermediate
1526 	 * newlines are already within the text, but a final trailing
1527 	 * newline will be added.
1528 	 */
1529 	return ((prefix_len * line_count) + info->text_len + 1);
1530 }
1531 
1532 /*
1533  * Beginning with @start_seq, find the first record where it and all following
1534  * records up to (but not including) @max_seq fit into @size.
1535  *
1536  * @max_seq is simply an upper bound and does not need to exist. If the caller
1537  * does not require an upper bound, -1 can be used for @max_seq.
1538  */
1539 static u64 find_first_fitting_seq(u64 start_seq, u64 max_seq, size_t size,
1540 				  bool syslog, bool time)
1541 {
1542 	struct printk_info info;
1543 	unsigned int line_count;
1544 	size_t len = 0;
1545 	u64 seq;
1546 
1547 	/* Determine the size of the records up to @max_seq. */
1548 	prb_for_each_info(start_seq, prb, seq, &info, &line_count) {
1549 		if (info.seq >= max_seq)
1550 			break;
1551 		len += get_record_print_text_size(&info, line_count, syslog, time);
1552 	}
1553 
1554 	/*
1555 	 * Adjust the upper bound for the next loop to avoid subtracting
1556 	 * lengths that were never added.
1557 	 */
1558 	if (seq < max_seq)
1559 		max_seq = seq;
1560 
1561 	/*
1562 	 * Move first record forward until length fits into the buffer. Ignore
1563 	 * newest messages that were not counted in the above cycle. Messages
1564 	 * might appear and get lost in the meantime. This is a best effort
1565 	 * that prevents an infinite loop that could occur with a retry.
1566 	 */
1567 	prb_for_each_info(start_seq, prb, seq, &info, &line_count) {
1568 		if (len <= size || info.seq >= max_seq)
1569 			break;
1570 		len -= get_record_print_text_size(&info, line_count, syslog, time);
1571 	}
1572 
1573 	return seq;
1574 }
1575 
1576 /* The caller is responsible for making sure @size is greater than 0. */
1577 static int syslog_print(char __user *buf, int size)
1578 {
1579 	struct printk_info info;
1580 	struct printk_record r;
1581 	char *text;
1582 	int len = 0;
1583 	u64 seq;
1584 
1585 	text = kmalloc(PRINTK_MESSAGE_MAX, GFP_KERNEL);
1586 	if (!text)
1587 		return -ENOMEM;
1588 
1589 	prb_rec_init_rd(&r, &info, text, PRINTK_MESSAGE_MAX);
1590 
1591 	mutex_lock(&syslog_lock);
1592 
1593 	/*
1594 	 * Wait for the @syslog_seq record to be available. @syslog_seq may
1595 	 * change while waiting.
1596 	 */
1597 	do {
1598 		seq = syslog_seq;
1599 
1600 		mutex_unlock(&syslog_lock);
1601 		/*
1602 		 * Guarantee this task is visible on the waitqueue before
1603 		 * checking the wake condition.
1604 		 *
1605 		 * The full memory barrier within set_current_state() of
1606 		 * prepare_to_wait_event() pairs with the full memory barrier
1607 		 * within wq_has_sleeper().
1608 		 *
1609 		 * This pairs with __wake_up_klogd:A.
1610 		 */
1611 		len = wait_event_interruptible(log_wait,
1612 				prb_read_valid(prb, seq, NULL)); /* LMM(syslog_print:A) */
1613 		mutex_lock(&syslog_lock);
1614 
1615 		if (len)
1616 			goto out;
1617 	} while (syslog_seq != seq);
1618 
1619 	/*
1620 	 * Copy records that fit into the buffer. The above cycle makes sure
1621 	 * that the first record is always available.
1622 	 */
1623 	do {
1624 		size_t n;
1625 		size_t skip;
1626 		int err;
1627 
1628 		if (!prb_read_valid(prb, syslog_seq, &r))
1629 			break;
1630 
1631 		if (r.info->seq != syslog_seq) {
1632 			/* message is gone, move to next valid one */
1633 			syslog_seq = r.info->seq;
1634 			syslog_partial = 0;
1635 		}
1636 
1637 		/*
1638 		 * To keep reading/counting partial line consistent,
1639 		 * use printk_time value as of the beginning of a line.
1640 		 */
1641 		if (!syslog_partial)
1642 			syslog_time = printk_time;
1643 
1644 		skip = syslog_partial;
1645 		n = record_print_text(&r, true, syslog_time);
1646 		if (n - syslog_partial <= size) {
1647 			/* message fits into buffer, move forward */
1648 			syslog_seq = r.info->seq + 1;
1649 			n -= syslog_partial;
1650 			syslog_partial = 0;
1651 		} else if (!len){
1652 			/* partial read(), remember position */
1653 			n = size;
1654 			syslog_partial += n;
1655 		} else
1656 			n = 0;
1657 
1658 		if (!n)
1659 			break;
1660 
1661 		mutex_unlock(&syslog_lock);
1662 		err = copy_to_user(buf, text + skip, n);
1663 		mutex_lock(&syslog_lock);
1664 
1665 		if (err) {
1666 			if (!len)
1667 				len = -EFAULT;
1668 			break;
1669 		}
1670 
1671 		len += n;
1672 		size -= n;
1673 		buf += n;
1674 	} while (size);
1675 out:
1676 	mutex_unlock(&syslog_lock);
1677 	kfree(text);
1678 	return len;
1679 }
1680 
1681 static int syslog_print_all(char __user *buf, int size, bool clear)
1682 {
1683 	struct printk_info info;
1684 	struct printk_record r;
1685 	char *text;
1686 	int len = 0;
1687 	u64 seq;
1688 	bool time;
1689 
1690 	text = kmalloc(PRINTK_MESSAGE_MAX, GFP_KERNEL);
1691 	if (!text)
1692 		return -ENOMEM;
1693 
1694 	time = printk_time;
1695 	/*
1696 	 * Find first record that fits, including all following records,
1697 	 * into the user-provided buffer for this dump.
1698 	 */
1699 	seq = find_first_fitting_seq(latched_seq_read_nolock(&clear_seq), -1,
1700 				     size, true, time);
1701 
1702 	prb_rec_init_rd(&r, &info, text, PRINTK_MESSAGE_MAX);
1703 
1704 	prb_for_each_record(seq, prb, seq, &r) {
1705 		int textlen;
1706 
1707 		textlen = record_print_text(&r, true, time);
1708 
1709 		if (len + textlen > size) {
1710 			seq--;
1711 			break;
1712 		}
1713 
1714 		if (copy_to_user(buf + len, text, textlen))
1715 			len = -EFAULT;
1716 		else
1717 			len += textlen;
1718 
1719 		if (len < 0)
1720 			break;
1721 	}
1722 
1723 	if (clear) {
1724 		mutex_lock(&syslog_lock);
1725 		latched_seq_write(&clear_seq, seq);
1726 		mutex_unlock(&syslog_lock);
1727 	}
1728 
1729 	kfree(text);
1730 	return len;
1731 }
1732 
1733 static void syslog_clear(void)
1734 {
1735 	mutex_lock(&syslog_lock);
1736 	latched_seq_write(&clear_seq, prb_next_seq(prb));
1737 	mutex_unlock(&syslog_lock);
1738 }
1739 
1740 int do_syslog(int type, char __user *buf, int len, int source)
1741 {
1742 	struct printk_info info;
1743 	bool clear = false;
1744 	static int saved_console_loglevel = LOGLEVEL_DEFAULT;
1745 	int error;
1746 
1747 	error = check_syslog_permissions(type, source);
1748 	if (error)
1749 		return error;
1750 
1751 	switch (type) {
1752 	case SYSLOG_ACTION_CLOSE:	/* Close log */
1753 		break;
1754 	case SYSLOG_ACTION_OPEN:	/* Open log */
1755 		break;
1756 	case SYSLOG_ACTION_READ:	/* Read from log */
1757 		if (!buf || len < 0)
1758 			return -EINVAL;
1759 		if (!len)
1760 			return 0;
1761 		if (!access_ok(buf, len))
1762 			return -EFAULT;
1763 		error = syslog_print(buf, len);
1764 		break;
1765 	/* Read/clear last kernel messages */
1766 	case SYSLOG_ACTION_READ_CLEAR:
1767 		clear = true;
1768 		fallthrough;
1769 	/* Read last kernel messages */
1770 	case SYSLOG_ACTION_READ_ALL:
1771 		if (!buf || len < 0)
1772 			return -EINVAL;
1773 		if (!len)
1774 			return 0;
1775 		if (!access_ok(buf, len))
1776 			return -EFAULT;
1777 		error = syslog_print_all(buf, len, clear);
1778 		break;
1779 	/* Clear ring buffer */
1780 	case SYSLOG_ACTION_CLEAR:
1781 		syslog_clear();
1782 		break;
1783 	/* Disable logging to console */
1784 	case SYSLOG_ACTION_CONSOLE_OFF:
1785 		if (saved_console_loglevel == LOGLEVEL_DEFAULT)
1786 			saved_console_loglevel = console_loglevel;
1787 		console_loglevel = minimum_console_loglevel;
1788 		break;
1789 	/* Enable logging to console */
1790 	case SYSLOG_ACTION_CONSOLE_ON:
1791 		if (saved_console_loglevel != LOGLEVEL_DEFAULT) {
1792 			console_loglevel = saved_console_loglevel;
1793 			saved_console_loglevel = LOGLEVEL_DEFAULT;
1794 		}
1795 		break;
1796 	/* Set level of messages printed to console */
1797 	case SYSLOG_ACTION_CONSOLE_LEVEL:
1798 		if (len < 1 || len > 8)
1799 			return -EINVAL;
1800 		if (len < minimum_console_loglevel)
1801 			len = minimum_console_loglevel;
1802 		console_loglevel = len;
1803 		/* Implicitly re-enable logging to console */
1804 		saved_console_loglevel = LOGLEVEL_DEFAULT;
1805 		break;
1806 	/* Number of chars in the log buffer */
1807 	case SYSLOG_ACTION_SIZE_UNREAD:
1808 		mutex_lock(&syslog_lock);
1809 		if (!prb_read_valid_info(prb, syslog_seq, &info, NULL)) {
1810 			/* No unread messages. */
1811 			mutex_unlock(&syslog_lock);
1812 			return 0;
1813 		}
1814 		if (info.seq != syslog_seq) {
1815 			/* messages are gone, move to first one */
1816 			syslog_seq = info.seq;
1817 			syslog_partial = 0;
1818 		}
1819 		if (source == SYSLOG_FROM_PROC) {
1820 			/*
1821 			 * Short-cut for poll(/"proc/kmsg") which simply checks
1822 			 * for pending data, not the size; return the count of
1823 			 * records, not the length.
1824 			 */
1825 			error = prb_next_seq(prb) - syslog_seq;
1826 		} else {
1827 			bool time = syslog_partial ? syslog_time : printk_time;
1828 			unsigned int line_count;
1829 			u64 seq;
1830 
1831 			prb_for_each_info(syslog_seq, prb, seq, &info,
1832 					  &line_count) {
1833 				error += get_record_print_text_size(&info, line_count,
1834 								    true, time);
1835 				time = printk_time;
1836 			}
1837 			error -= syslog_partial;
1838 		}
1839 		mutex_unlock(&syslog_lock);
1840 		break;
1841 	/* Size of the log buffer */
1842 	case SYSLOG_ACTION_SIZE_BUFFER:
1843 		error = log_buf_len;
1844 		break;
1845 	default:
1846 		error = -EINVAL;
1847 		break;
1848 	}
1849 
1850 	return error;
1851 }
1852 
1853 SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
1854 {
1855 	return do_syslog(type, buf, len, SYSLOG_FROM_READER);
1856 }
1857 
1858 /*
1859  * Special console_lock variants that help to reduce the risk of soft-lockups.
1860  * They allow to pass console_lock to another printk() call using a busy wait.
1861  */
1862 
1863 #ifdef CONFIG_LOCKDEP
1864 static struct lockdep_map console_owner_dep_map = {
1865 	.name = "console_owner"
1866 };
1867 #endif
1868 
1869 static DEFINE_RAW_SPINLOCK(console_owner_lock);
1870 static struct task_struct *console_owner;
1871 static bool console_waiter;
1872 
1873 /**
1874  * console_lock_spinning_enable - mark beginning of code where another
1875  *	thread might safely busy wait
1876  *
1877  * This basically converts console_lock into a spinlock. This marks
1878  * the section where the console_lock owner can not sleep, because
1879  * there may be a waiter spinning (like a spinlock). Also it must be
1880  * ready to hand over the lock at the end of the section.
1881  */
1882 void console_lock_spinning_enable(void)
1883 {
1884 	/*
1885 	 * Do not use spinning in panic(). The panic CPU wants to keep the lock.
1886 	 * Non-panic CPUs abandon the flush anyway.
1887 	 *
1888 	 * Just keep the lockdep annotation. The panic-CPU should avoid
1889 	 * taking console_owner_lock because it might cause a deadlock.
1890 	 * This looks like the easiest way how to prevent false lockdep
1891 	 * reports without handling races a lockless way.
1892 	 */
1893 	if (panic_in_progress())
1894 		goto lockdep;
1895 
1896 	raw_spin_lock(&console_owner_lock);
1897 	console_owner = current;
1898 	raw_spin_unlock(&console_owner_lock);
1899 
1900 lockdep:
1901 	/* The waiter may spin on us after setting console_owner */
1902 	spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
1903 }
1904 
1905 /**
1906  * console_lock_spinning_disable_and_check - mark end of code where another
1907  *	thread was able to busy wait and check if there is a waiter
1908  * @cookie: cookie returned from console_srcu_read_lock()
1909  *
1910  * This is called at the end of the section where spinning is allowed.
1911  * It has two functions. First, it is a signal that it is no longer
1912  * safe to start busy waiting for the lock. Second, it checks if
1913  * there is a busy waiter and passes the lock rights to her.
1914  *
1915  * Important: Callers lose both the console_lock and the SRCU read lock if
1916  *	there was a busy waiter. They must not touch items synchronized by
1917  *	console_lock or SRCU read lock in this case.
1918  *
1919  * Return: 1 if the lock rights were passed, 0 otherwise.
1920  */
1921 int console_lock_spinning_disable_and_check(int cookie)
1922 {
1923 	int waiter;
1924 
1925 	/*
1926 	 * Ignore spinning waiters during panic() because they might get stopped
1927 	 * or blocked at any time,
1928 	 *
1929 	 * It is safe because nobody is allowed to start spinning during panic
1930 	 * in the first place. If there has been a waiter then non panic CPUs
1931 	 * might stay spinning. They would get stopped anyway. The panic context
1932 	 * will never start spinning and an interrupted spin on panic CPU will
1933 	 * never continue.
1934 	 */
1935 	if (panic_in_progress()) {
1936 		/* Keep lockdep happy. */
1937 		spin_release(&console_owner_dep_map, _THIS_IP_);
1938 		return 0;
1939 	}
1940 
1941 	raw_spin_lock(&console_owner_lock);
1942 	waiter = READ_ONCE(console_waiter);
1943 	console_owner = NULL;
1944 	raw_spin_unlock(&console_owner_lock);
1945 
1946 	if (!waiter) {
1947 		spin_release(&console_owner_dep_map, _THIS_IP_);
1948 		return 0;
1949 	}
1950 
1951 	/* The waiter is now free to continue */
1952 	WRITE_ONCE(console_waiter, false);
1953 
1954 	spin_release(&console_owner_dep_map, _THIS_IP_);
1955 
1956 	/*
1957 	 * Preserve lockdep lock ordering. Release the SRCU read lock before
1958 	 * releasing the console_lock.
1959 	 */
1960 	console_srcu_read_unlock(cookie);
1961 
1962 	/*
1963 	 * Hand off console_lock to waiter. The waiter will perform
1964 	 * the up(). After this, the waiter is the console_lock owner.
1965 	 */
1966 	mutex_release(&console_lock_dep_map, _THIS_IP_);
1967 	return 1;
1968 }
1969 
1970 /**
1971  * console_trylock_spinning - try to get console_lock by busy waiting
1972  *
1973  * This allows to busy wait for the console_lock when the current
1974  * owner is running in specially marked sections. It means that
1975  * the current owner is running and cannot reschedule until it
1976  * is ready to lose the lock.
1977  *
1978  * Return: 1 if we got the lock, 0 othrewise
1979  */
1980 static int console_trylock_spinning(void)
1981 {
1982 	struct task_struct *owner = NULL;
1983 	bool waiter;
1984 	bool spin = false;
1985 	unsigned long flags;
1986 
1987 	if (console_trylock())
1988 		return 1;
1989 
1990 	/*
1991 	 * It's unsafe to spin once a panic has begun. If we are the
1992 	 * panic CPU, we may have already halted the owner of the
1993 	 * console_sem. If we are not the panic CPU, then we should
1994 	 * avoid taking console_sem, so the panic CPU has a better
1995 	 * chance of cleanly acquiring it later.
1996 	 */
1997 	if (panic_in_progress())
1998 		return 0;
1999 
2000 	printk_safe_enter_irqsave(flags);
2001 
2002 	raw_spin_lock(&console_owner_lock);
2003 	owner = READ_ONCE(console_owner);
2004 	waiter = READ_ONCE(console_waiter);
2005 	if (!waiter && owner && owner != current) {
2006 		WRITE_ONCE(console_waiter, true);
2007 		spin = true;
2008 	}
2009 	raw_spin_unlock(&console_owner_lock);
2010 
2011 	/*
2012 	 * If there is an active printk() writing to the
2013 	 * consoles, instead of having it write our data too,
2014 	 * see if we can offload that load from the active
2015 	 * printer, and do some printing ourselves.
2016 	 * Go into a spin only if there isn't already a waiter
2017 	 * spinning, and there is an active printer, and
2018 	 * that active printer isn't us (recursive printk?).
2019 	 */
2020 	if (!spin) {
2021 		printk_safe_exit_irqrestore(flags);
2022 		return 0;
2023 	}
2024 
2025 	/* We spin waiting for the owner to release us */
2026 	spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
2027 	/* Owner will clear console_waiter on hand off */
2028 	while (READ_ONCE(console_waiter))
2029 		cpu_relax();
2030 	spin_release(&console_owner_dep_map, _THIS_IP_);
2031 
2032 	printk_safe_exit_irqrestore(flags);
2033 	/*
2034 	 * The owner passed the console lock to us.
2035 	 * Since we did not spin on console lock, annotate
2036 	 * this as a trylock. Otherwise lockdep will
2037 	 * complain.
2038 	 */
2039 	mutex_acquire(&console_lock_dep_map, 0, 1, _THIS_IP_);
2040 
2041 	/*
2042 	 * Update @console_may_schedule for trylock because the previous
2043 	 * owner may have been schedulable.
2044 	 */
2045 	console_may_schedule = 0;
2046 
2047 	return 1;
2048 }
2049 
2050 /*
2051  * Recursion is tracked separately on each CPU. If NMIs are supported, an
2052  * additional NMI context per CPU is also separately tracked. Until per-CPU
2053  * is available, a separate "early tracking" is performed.
2054  */
2055 static DEFINE_PER_CPU(u8, printk_count);
2056 static u8 printk_count_early;
2057 #ifdef CONFIG_HAVE_NMI
2058 static DEFINE_PER_CPU(u8, printk_count_nmi);
2059 static u8 printk_count_nmi_early;
2060 #endif
2061 
2062 /*
2063  * Recursion is limited to keep the output sane. printk() should not require
2064  * more than 1 level of recursion (allowing, for example, printk() to trigger
2065  * a WARN), but a higher value is used in case some printk-internal errors
2066  * exist, such as the ringbuffer validation checks failing.
2067  */
2068 #define PRINTK_MAX_RECURSION 3
2069 
2070 /*
2071  * Return a pointer to the dedicated counter for the CPU+context of the
2072  * caller.
2073  */
2074 static u8 *__printk_recursion_counter(void)
2075 {
2076 #ifdef CONFIG_HAVE_NMI
2077 	if (in_nmi()) {
2078 		if (printk_percpu_data_ready())
2079 			return this_cpu_ptr(&printk_count_nmi);
2080 		return &printk_count_nmi_early;
2081 	}
2082 #endif
2083 	if (printk_percpu_data_ready())
2084 		return this_cpu_ptr(&printk_count);
2085 	return &printk_count_early;
2086 }
2087 
2088 /*
2089  * Enter recursion tracking. Interrupts are disabled to simplify tracking.
2090  * The caller must check the boolean return value to see if the recursion is
2091  * allowed. On failure, interrupts are not disabled.
2092  *
2093  * @recursion_ptr must be a variable of type (u8 *) and is the same variable
2094  * that is passed to printk_exit_irqrestore().
2095  */
2096 #define printk_enter_irqsave(recursion_ptr, flags)	\
2097 ({							\
2098 	bool success = true;				\
2099 							\
2100 	typecheck(u8 *, recursion_ptr);			\
2101 	local_irq_save(flags);				\
2102 	(recursion_ptr) = __printk_recursion_counter();	\
2103 	if (*(recursion_ptr) > PRINTK_MAX_RECURSION) {	\
2104 		local_irq_restore(flags);		\
2105 		success = false;			\
2106 	} else {					\
2107 		(*(recursion_ptr))++;			\
2108 	}						\
2109 	success;					\
2110 })
2111 
2112 /* Exit recursion tracking, restoring interrupts. */
2113 #define printk_exit_irqrestore(recursion_ptr, flags)	\
2114 	do {						\
2115 		typecheck(u8 *, recursion_ptr);		\
2116 		(*(recursion_ptr))--;			\
2117 		local_irq_restore(flags);		\
2118 	} while (0)
2119 
2120 int printk_delay_msec __read_mostly;
2121 
2122 static inline void printk_delay(int level)
2123 {
2124 	boot_delay_msec(level);
2125 
2126 	if (unlikely(printk_delay_msec)) {
2127 		int m = printk_delay_msec;
2128 
2129 		while (m--) {
2130 			mdelay(1);
2131 			touch_nmi_watchdog();
2132 		}
2133 	}
2134 }
2135 
2136 static inline u32 printk_caller_id(void)
2137 {
2138 	return in_task() ? task_pid_nr(current) :
2139 		0x80000000 + smp_processor_id();
2140 }
2141 
2142 /**
2143  * printk_parse_prefix - Parse level and control flags.
2144  *
2145  * @text:     The terminated text message.
2146  * @level:    A pointer to the current level value, will be updated.
2147  * @flags:    A pointer to the current printk_info flags, will be updated.
2148  *
2149  * @level may be NULL if the caller is not interested in the parsed value.
2150  * Otherwise the variable pointed to by @level must be set to
2151  * LOGLEVEL_DEFAULT in order to be updated with the parsed value.
2152  *
2153  * @flags may be NULL if the caller is not interested in the parsed value.
2154  * Otherwise the variable pointed to by @flags will be OR'd with the parsed
2155  * value.
2156  *
2157  * Return: The length of the parsed level and control flags.
2158  */
2159 u16 printk_parse_prefix(const char *text, int *level,
2160 			enum printk_info_flags *flags)
2161 {
2162 	u16 prefix_len = 0;
2163 	int kern_level;
2164 
2165 	while (*text) {
2166 		kern_level = printk_get_level(text);
2167 		if (!kern_level)
2168 			break;
2169 
2170 		switch (kern_level) {
2171 		case '0' ... '7':
2172 			if (level && *level == LOGLEVEL_DEFAULT)
2173 				*level = kern_level - '0';
2174 			break;
2175 		case 'c':	/* KERN_CONT */
2176 			if (flags)
2177 				*flags |= LOG_CONT;
2178 		}
2179 
2180 		prefix_len += 2;
2181 		text += 2;
2182 	}
2183 
2184 	return prefix_len;
2185 }
2186 
2187 __printf(5, 0)
2188 static u16 printk_sprint(char *text, u16 size, int facility,
2189 			 enum printk_info_flags *flags, const char *fmt,
2190 			 va_list args)
2191 {
2192 	u16 text_len;
2193 
2194 	text_len = vscnprintf(text, size, fmt, args);
2195 
2196 	/* Mark and strip a trailing newline. */
2197 	if (text_len && text[text_len - 1] == '\n') {
2198 		text_len--;
2199 		*flags |= LOG_NEWLINE;
2200 	}
2201 
2202 	/* Strip log level and control flags. */
2203 	if (facility == 0) {
2204 		u16 prefix_len;
2205 
2206 		prefix_len = printk_parse_prefix(text, NULL, NULL);
2207 		if (prefix_len) {
2208 			text_len -= prefix_len;
2209 			memmove(text, text + prefix_len, text_len);
2210 		}
2211 	}
2212 
2213 	trace_console(text, text_len);
2214 
2215 	return text_len;
2216 }
2217 
2218 __printf(4, 0)
2219 int vprintk_store(int facility, int level,
2220 		  const struct dev_printk_info *dev_info,
2221 		  const char *fmt, va_list args)
2222 {
2223 	struct prb_reserved_entry e;
2224 	enum printk_info_flags flags = 0;
2225 	struct printk_record r;
2226 	unsigned long irqflags;
2227 	u16 trunc_msg_len = 0;
2228 	char prefix_buf[8];
2229 	u8 *recursion_ptr;
2230 	u16 reserve_size;
2231 	va_list args2;
2232 	u32 caller_id;
2233 	u16 text_len;
2234 	int ret = 0;
2235 	u64 ts_nsec;
2236 
2237 	if (!printk_enter_irqsave(recursion_ptr, irqflags))
2238 		return 0;
2239 
2240 	/*
2241 	 * Since the duration of printk() can vary depending on the message
2242 	 * and state of the ringbuffer, grab the timestamp now so that it is
2243 	 * close to the call of printk(). This provides a more deterministic
2244 	 * timestamp with respect to the caller.
2245 	 */
2246 	ts_nsec = local_clock();
2247 
2248 	caller_id = printk_caller_id();
2249 
2250 	/*
2251 	 * The sprintf needs to come first since the syslog prefix might be
2252 	 * passed in as a parameter. An extra byte must be reserved so that
2253 	 * later the vscnprintf() into the reserved buffer has room for the
2254 	 * terminating '\0', which is not counted by vsnprintf().
2255 	 */
2256 	va_copy(args2, args);
2257 	reserve_size = vsnprintf(&prefix_buf[0], sizeof(prefix_buf), fmt, args2) + 1;
2258 	va_end(args2);
2259 
2260 	if (reserve_size > PRINTKRB_RECORD_MAX)
2261 		reserve_size = PRINTKRB_RECORD_MAX;
2262 
2263 	/* Extract log level or control flags. */
2264 	if (facility == 0)
2265 		printk_parse_prefix(&prefix_buf[0], &level, &flags);
2266 
2267 	if (level == LOGLEVEL_DEFAULT)
2268 		level = default_message_loglevel;
2269 
2270 	if (dev_info)
2271 		flags |= LOG_NEWLINE;
2272 
2273 	if (is_printk_force_console())
2274 		flags |= LOG_FORCE_CON;
2275 
2276 	if (flags & LOG_CONT) {
2277 		prb_rec_init_wr(&r, reserve_size);
2278 		if (prb_reserve_in_last(&e, prb, &r, caller_id, PRINTKRB_RECORD_MAX)) {
2279 			text_len = printk_sprint(&r.text_buf[r.info->text_len], reserve_size,
2280 						 facility, &flags, fmt, args);
2281 			r.info->text_len += text_len;
2282 
2283 			if (flags & LOG_FORCE_CON)
2284 				r.info->flags |= LOG_FORCE_CON;
2285 
2286 			if (flags & LOG_NEWLINE) {
2287 				r.info->flags |= LOG_NEWLINE;
2288 				prb_final_commit(&e);
2289 			} else {
2290 				prb_commit(&e);
2291 			}
2292 
2293 			ret = text_len;
2294 			goto out;
2295 		}
2296 	}
2297 
2298 	/*
2299 	 * Explicitly initialize the record before every prb_reserve() call.
2300 	 * prb_reserve_in_last() and prb_reserve() purposely invalidate the
2301 	 * structure when they fail.
2302 	 */
2303 	prb_rec_init_wr(&r, reserve_size);
2304 	if (!prb_reserve(&e, prb, &r)) {
2305 		/* truncate the message if it is too long for empty buffer */
2306 		truncate_msg(&reserve_size, &trunc_msg_len);
2307 
2308 		prb_rec_init_wr(&r, reserve_size + trunc_msg_len);
2309 		if (!prb_reserve(&e, prb, &r))
2310 			goto out;
2311 	}
2312 
2313 	/* fill message */
2314 	text_len = printk_sprint(&r.text_buf[0], reserve_size, facility, &flags, fmt, args);
2315 	if (trunc_msg_len)
2316 		memcpy(&r.text_buf[text_len], trunc_msg, trunc_msg_len);
2317 	r.info->text_len = text_len + trunc_msg_len;
2318 	r.info->facility = facility;
2319 	r.info->level = level & 7;
2320 	r.info->flags = flags & 0x1f;
2321 	r.info->ts_nsec = ts_nsec;
2322 	r.info->caller_id = caller_id;
2323 	if (dev_info)
2324 		memcpy(&r.info->dev_info, dev_info, sizeof(r.info->dev_info));
2325 
2326 	/* A message without a trailing newline can be continued. */
2327 	if (!(flags & LOG_NEWLINE))
2328 		prb_commit(&e);
2329 	else
2330 		prb_final_commit(&e);
2331 
2332 	ret = text_len + trunc_msg_len;
2333 out:
2334 	printk_exit_irqrestore(recursion_ptr, irqflags);
2335 	return ret;
2336 }
2337 
2338 /*
2339  * This acts as a one-way switch to allow legacy consoles to print from
2340  * the printk() caller context on a panic CPU. It also attempts to flush
2341  * the legacy consoles in this context.
2342  */
2343 void printk_legacy_allow_panic_sync(void)
2344 {
2345 	struct console_flush_type ft;
2346 
2347 	legacy_allow_panic_sync = true;
2348 
2349 	printk_get_console_flush_type(&ft);
2350 	if (ft.legacy_direct) {
2351 		if (console_trylock())
2352 			console_unlock();
2353 	}
2354 }
2355 
2356 bool __read_mostly debug_non_panic_cpus;
2357 
2358 #ifdef CONFIG_PRINTK_CALLER
2359 static int __init debug_non_panic_cpus_setup(char *str)
2360 {
2361 	debug_non_panic_cpus = true;
2362 	pr_info("allow messages from non-panic CPUs in panic()\n");
2363 
2364 	return 0;
2365 }
2366 early_param("debug_non_panic_cpus", debug_non_panic_cpus_setup);
2367 module_param(debug_non_panic_cpus, bool, 0644);
2368 MODULE_PARM_DESC(debug_non_panic_cpus,
2369 		 "allow messages from non-panic CPUs in panic()");
2370 #endif
2371 
2372 asmlinkage int vprintk_emit(int facility, int level,
2373 			    const struct dev_printk_info *dev_info,
2374 			    const char *fmt, va_list args)
2375 {
2376 	struct console_flush_type ft;
2377 	int printed_len;
2378 
2379 	/* Suppress unimportant messages after panic happens */
2380 	if (unlikely(suppress_printk))
2381 		return 0;
2382 
2383 	/*
2384 	 * The messages on the panic CPU are the most important. If
2385 	 * non-panic CPUs are generating any messages, they will be
2386 	 * silently dropped.
2387 	 */
2388 	if (panic_on_other_cpu() &&
2389 	    !debug_non_panic_cpus &&
2390 	    !panic_triggering_all_cpu_backtrace)
2391 		return 0;
2392 
2393 	printk_get_console_flush_type(&ft);
2394 
2395 	/* If called from the scheduler, we can not call up(). */
2396 	if (level == LOGLEVEL_SCHED) {
2397 		level = LOGLEVEL_DEFAULT;
2398 		ft.legacy_offload |= ft.legacy_direct && !console_irqwork_blocked;
2399 		ft.legacy_direct = false;
2400 	}
2401 
2402 	printk_delay(level);
2403 
2404 	printed_len = vprintk_store(facility, level, dev_info, fmt, args);
2405 
2406 	if (ft.nbcon_atomic)
2407 		nbcon_atomic_flush_pending();
2408 
2409 	if (ft.nbcon_offload)
2410 		nbcon_kthreads_wake();
2411 
2412 	if (ft.legacy_direct) {
2413 		/*
2414 		 * The caller may be holding system-critical or
2415 		 * timing-sensitive locks. Disable preemption during
2416 		 * printing of all remaining records to all consoles so that
2417 		 * this context can return as soon as possible. Hopefully
2418 		 * another printk() caller will take over the printing.
2419 		 */
2420 		preempt_disable();
2421 		/*
2422 		 * Try to acquire and then immediately release the console
2423 		 * semaphore. The release will print out buffers. With the
2424 		 * spinning variant, this context tries to take over the
2425 		 * printing from another printing context.
2426 		 */
2427 		if (console_trylock_spinning())
2428 			console_unlock();
2429 		preempt_enable();
2430 	}
2431 
2432 	if (ft.legacy_offload)
2433 		defer_console_output();
2434 	else if (!console_irqwork_blocked)
2435 		wake_up_klogd();
2436 
2437 	return printed_len;
2438 }
2439 EXPORT_SYMBOL(vprintk_emit);
2440 
2441 int vprintk_default(const char *fmt, va_list args)
2442 {
2443 	return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, fmt, args);
2444 }
2445 EXPORT_SYMBOL_GPL(vprintk_default);
2446 
2447 asmlinkage __visible int _printk(const char *fmt, ...)
2448 {
2449 	va_list args;
2450 	int r;
2451 
2452 	va_start(args, fmt);
2453 	r = vprintk(fmt, args);
2454 	va_end(args);
2455 
2456 	return r;
2457 }
2458 EXPORT_SYMBOL(_printk);
2459 
2460 static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress);
2461 
2462 #else /* CONFIG_PRINTK */
2463 
2464 #define printk_time		false
2465 
2466 #define prb_read_valid(rb, seq, r)	false
2467 #define prb_first_valid_seq(rb)		0
2468 #define prb_next_seq(rb)		0
2469 
2470 static u64 syslog_seq;
2471 
2472 static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; }
2473 
2474 #endif /* CONFIG_PRINTK */
2475 
2476 #ifdef CONFIG_EARLY_PRINTK
2477 struct console *early_console;
2478 
2479 asmlinkage __visible void early_printk(const char *fmt, ...)
2480 {
2481 	va_list ap;
2482 	char buf[512];
2483 	int n;
2484 
2485 	if (!early_console)
2486 		return;
2487 
2488 	va_start(ap, fmt);
2489 	n = vscnprintf(buf, sizeof(buf), fmt, ap);
2490 	va_end(ap);
2491 
2492 	early_console->write(early_console, buf, n);
2493 }
2494 #endif
2495 
2496 static void set_user_specified(struct console_cmdline *c, bool user_specified)
2497 {
2498 	if (!user_specified)
2499 		return;
2500 
2501 	/*
2502 	 * @c console was defined by the user on the command line.
2503 	 * Do not clear when added twice also by SPCR or the device tree.
2504 	 */
2505 	c->user_specified = true;
2506 	/* At least one console defined by the user on the command line. */
2507 	console_set_on_cmdline = 1;
2508 }
2509 
2510 static int __add_preferred_console(const char *name, const short idx,
2511 				   const char *devname, char *options,
2512 				   char *brl_options, bool user_specified)
2513 {
2514 	struct console_cmdline *c;
2515 	int i;
2516 
2517 	if (!name && !devname)
2518 		return -EINVAL;
2519 
2520 	/*
2521 	 * We use a signed short index for struct console for device drivers to
2522 	 * indicate a not yet assigned index or port. However, a negative index
2523 	 * value is not valid when the console name and index are defined on
2524 	 * the command line.
2525 	 */
2526 	if (name && idx < 0)
2527 		return -EINVAL;
2528 
2529 	/*
2530 	 *	See if this tty is not yet registered, and
2531 	 *	if we have a slot free.
2532 	 */
2533 	for (i = 0, c = console_cmdline;
2534 	     i < MAX_CMDLINECONSOLES && (c->name[0] || c->devname[0]);
2535 	     i++, c++) {
2536 		if ((name && strcmp(c->name, name) == 0 && c->index == idx) ||
2537 		    (devname && strcmp(c->devname, devname) == 0)) {
2538 			if (!brl_options)
2539 				preferred_console = i;
2540 			set_user_specified(c, user_specified);
2541 			return 0;
2542 		}
2543 	}
2544 	if (i == MAX_CMDLINECONSOLES)
2545 		return -E2BIG;
2546 	if (!brl_options)
2547 		preferred_console = i;
2548 	if (name)
2549 		strscpy(c->name, name);
2550 	if (devname)
2551 		strscpy(c->devname, devname);
2552 	c->options = options;
2553 	set_user_specified(c, user_specified);
2554 	braille_set_options(c, brl_options);
2555 
2556 	c->index = idx;
2557 	return 0;
2558 }
2559 
2560 static int __init console_msg_format_setup(char *str)
2561 {
2562 	if (!strcmp(str, "syslog"))
2563 		console_msg_format = MSG_FORMAT_SYSLOG;
2564 	if (!strcmp(str, "default"))
2565 		console_msg_format = MSG_FORMAT_DEFAULT;
2566 	return 1;
2567 }
2568 __setup("console_msg_format=", console_msg_format_setup);
2569 
2570 /*
2571  * Set up a console.  Called via do_early_param() in init/main.c
2572  * for each "console=" parameter in the boot command line.
2573  */
2574 static int __init console_setup(char *str)
2575 {
2576 	static_assert(sizeof(console_cmdline[0].devname) >= sizeof(console_cmdline[0].name) + 4);
2577 	char buf[sizeof(console_cmdline[0].devname)];
2578 	char *brl_options = NULL;
2579 	char *ttyname = NULL;
2580 	char *devname = NULL;
2581 	char *options;
2582 	char *s;
2583 	int idx;
2584 
2585 	/*
2586 	 * console="" or console=null have been suggested as a way to
2587 	 * disable console output. Use ttynull that has been created
2588 	 * for exactly this purpose.
2589 	 */
2590 	if (str[0] == 0 || strcmp(str, "null") == 0) {
2591 		__add_preferred_console("ttynull", 0, NULL, NULL, NULL, true);
2592 		return 1;
2593 	}
2594 
2595 	if (_braille_console_setup(&str, &brl_options))
2596 		return 1;
2597 
2598 	/* For a DEVNAME:0.0 style console the character device is unknown early */
2599 	if (strchr(str, ':'))
2600 		devname = buf;
2601 	else
2602 		ttyname = buf;
2603 
2604 	/*
2605 	 * Decode str into name, index, options.
2606 	 */
2607 	if (ttyname && isdigit(str[0]))
2608 		scnprintf(buf, sizeof(buf), "ttyS%s", str);
2609 	else
2610 		strscpy(buf, str);
2611 
2612 	options = strchr(str, ',');
2613 	if (options)
2614 		*(options++) = 0;
2615 
2616 #ifdef __sparc__
2617 	if (!strcmp(str, "ttya"))
2618 		strscpy(buf, "ttyS0");
2619 	if (!strcmp(str, "ttyb"))
2620 		strscpy(buf, "ttyS1");
2621 #endif
2622 
2623 	for (s = buf; *s; s++)
2624 		if ((ttyname && isdigit(*s)) || *s == ',')
2625 			break;
2626 
2627 	/* @idx will get defined when devname matches. */
2628 	if (devname)
2629 		idx = -1;
2630 	else
2631 		idx = simple_strtoul(s, NULL, 10);
2632 
2633 	*s = 0;
2634 
2635 	__add_preferred_console(ttyname, idx, devname, options, brl_options, true);
2636 	return 1;
2637 }
2638 __setup("console=", console_setup);
2639 
2640 /**
2641  * add_preferred_console - add a device to the list of preferred consoles.
2642  * @name: device name
2643  * @idx: device index
2644  * @options: options for this console
2645  *
2646  * The last preferred console added will be used for kernel messages
2647  * and stdin/out/err for init.  Normally this is used by console_setup
2648  * above to handle user-supplied console arguments; however it can also
2649  * be used by arch-specific code either to override the user or more
2650  * commonly to provide a default console (ie from PROM variables) when
2651  * the user has not supplied one.
2652  */
2653 int add_preferred_console(const char *name, const short idx, char *options)
2654 {
2655 	return __add_preferred_console(name, idx, NULL, options, NULL, false);
2656 }
2657 
2658 /**
2659  * match_devname_and_update_preferred_console - Update a preferred console
2660  *	when matching devname is found.
2661  * @devname: DEVNAME:0.0 style device name
2662  * @name: Name of the corresponding console driver, e.g. "ttyS"
2663  * @idx: Console index, e.g. port number.
2664  *
2665  * The function checks whether a device with the given @devname is
2666  * preferred via the console=DEVNAME:0.0 command line option.
2667  * It fills the missing console driver name and console index
2668  * so that a later register_console() call could find (match)
2669  * and enable this device.
2670  *
2671  * It might be used when a driver subsystem initializes particular
2672  * devices with already known DEVNAME:0.0 style names. And it
2673  * could predict which console driver name and index this device
2674  * would later get associated with.
2675  *
2676  * Return: 0 on success, negative error code on failure.
2677  */
2678 int match_devname_and_update_preferred_console(const char *devname,
2679 					       const char *name,
2680 					       const short idx)
2681 {
2682 	struct console_cmdline *c = console_cmdline;
2683 	int i;
2684 
2685 	if (!devname || !strlen(devname) || !name || !strlen(name) || idx < 0)
2686 		return -EINVAL;
2687 
2688 	for (i = 0; i < MAX_CMDLINECONSOLES && (c->name[0] || c->devname[0]);
2689 	     i++, c++) {
2690 		if (!strcmp(devname, c->devname)) {
2691 			pr_info("associate the preferred console \"%s\" with \"%s%d\"\n",
2692 				devname, name, idx);
2693 			strscpy(c->name, name);
2694 			c->index = idx;
2695 			return 0;
2696 		}
2697 	}
2698 
2699 	return -ENOENT;
2700 }
2701 EXPORT_SYMBOL_GPL(match_devname_and_update_preferred_console);
2702 
2703 bool console_suspend_enabled = true;
2704 EXPORT_SYMBOL(console_suspend_enabled);
2705 
2706 static int __init console_suspend_disable(char *str)
2707 {
2708 	console_suspend_enabled = false;
2709 	return 1;
2710 }
2711 __setup("no_console_suspend", console_suspend_disable);
2712 module_param_named(console_suspend, console_suspend_enabled,
2713 		bool, S_IRUGO | S_IWUSR);
2714 MODULE_PARM_DESC(console_suspend, "suspend console during suspend"
2715 	" and hibernate operations");
2716 
2717 static bool printk_console_no_auto_verbose;
2718 
2719 void console_verbose(void)
2720 {
2721 	if (console_loglevel && !printk_console_no_auto_verbose)
2722 		console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH;
2723 }
2724 EXPORT_SYMBOL_GPL(console_verbose);
2725 
2726 module_param_named(console_no_auto_verbose, printk_console_no_auto_verbose, bool, 0644);
2727 MODULE_PARM_DESC(console_no_auto_verbose, "Disable console loglevel raise to highest on oops/panic/etc");
2728 
2729 /**
2730  * console_suspend_all - suspend the console subsystem
2731  *
2732  * This disables printk() while we go into suspend states
2733  */
2734 void console_suspend_all(void)
2735 {
2736 	struct console *con;
2737 
2738 	if (console_suspend_enabled)
2739 		pr_info("Suspending console(s) (use no_console_suspend to debug)\n");
2740 
2741 	/*
2742 	 * Flush any console backlog and then avoid queueing irq_work until
2743 	 * console_resume_all(). Until then deferred printing is no longer
2744 	 * triggered, NBCON consoles transition to atomic flushing, and
2745 	 * any klogd waiters are not triggered.
2746 	 */
2747 	pr_flush(1000, true);
2748 	console_irqwork_blocked = true;
2749 
2750 	if (!console_suspend_enabled)
2751 		return;
2752 
2753 	console_list_lock();
2754 	for_each_console(con)
2755 		console_srcu_write_flags(con, con->flags | CON_SUSPENDED);
2756 	console_list_unlock();
2757 
2758 	/*
2759 	 * Ensure that all SRCU list walks have completed. All printing
2760 	 * contexts must be able to see that they are suspended so that it
2761 	 * is guaranteed that all printing has stopped when this function
2762 	 * completes.
2763 	 */
2764 	synchronize_srcu(&console_srcu);
2765 }
2766 
2767 void console_resume_all(void)
2768 {
2769 	struct console_flush_type ft;
2770 	struct console *con;
2771 
2772 	/*
2773 	 * Allow queueing irq_work. After restoring console state, deferred
2774 	 * printing and any klogd waiters need to be triggered in case there
2775 	 * is now a console backlog.
2776 	 */
2777 	console_irqwork_blocked = false;
2778 
2779 	if (console_suspend_enabled) {
2780 		console_list_lock();
2781 		for_each_console(con)
2782 			console_srcu_write_flags(con, con->flags & ~CON_SUSPENDED);
2783 		console_list_unlock();
2784 
2785 		/*
2786 		 * Ensure that all SRCU list walks have completed. All printing
2787 		 * contexts must be able to see they are no longer suspended so
2788 		 * that they are guaranteed to wake up and resume printing.
2789 		 */
2790 		synchronize_srcu(&console_srcu);
2791 	}
2792 
2793 	printk_get_console_flush_type(&ft);
2794 	if (ft.nbcon_offload)
2795 		nbcon_kthreads_wake();
2796 	if (ft.legacy_offload)
2797 		defer_console_output();
2798 	else
2799 		wake_up_klogd();
2800 
2801 	pr_flush(1000, true);
2802 }
2803 
2804 /**
2805  * console_cpu_notify - print deferred console messages after CPU hotplug
2806  * @cpu: unused
2807  *
2808  * If printk() is called from a CPU that is not online yet, the messages
2809  * will be printed on the console only if there are CON_ANYTIME consoles.
2810  * This function is called when a new CPU comes online (or fails to come
2811  * up) or goes offline.
2812  */
2813 static int console_cpu_notify(unsigned int cpu)
2814 {
2815 	struct console_flush_type ft;
2816 
2817 	if (!cpuhp_tasks_frozen) {
2818 		printk_get_console_flush_type(&ft);
2819 		if (ft.nbcon_atomic)
2820 			nbcon_atomic_flush_pending();
2821 		if (ft.legacy_direct) {
2822 			if (console_trylock())
2823 				console_unlock();
2824 		}
2825 	}
2826 	return 0;
2827 }
2828 
2829 /**
2830  * console_lock - block the console subsystem from printing
2831  *
2832  * Acquires a lock which guarantees that no consoles will
2833  * be in or enter their write() callback.
2834  *
2835  * Can sleep, returns nothing.
2836  */
2837 void console_lock(void)
2838 {
2839 	might_sleep();
2840 
2841 	/* On panic, the console_lock must be left to the panic cpu. */
2842 	while (panic_on_other_cpu())
2843 		msleep(1000);
2844 
2845 	down_console_sem();
2846 	console_locked = 1;
2847 	console_may_schedule = 1;
2848 }
2849 EXPORT_SYMBOL(console_lock);
2850 
2851 /**
2852  * console_trylock - try to block the console subsystem from printing
2853  *
2854  * Try to acquire a lock which guarantees that no consoles will
2855  * be in or enter their write() callback.
2856  *
2857  * returns 1 on success, and 0 on failure to acquire the lock.
2858  */
2859 int console_trylock(void)
2860 {
2861 	/* On panic, the console_lock must be left to the panic cpu. */
2862 	if (panic_on_other_cpu())
2863 		return 0;
2864 	if (down_trylock_console_sem())
2865 		return 0;
2866 	console_locked = 1;
2867 	console_may_schedule = 0;
2868 	return 1;
2869 }
2870 EXPORT_SYMBOL(console_trylock);
2871 
2872 int is_console_locked(void)
2873 {
2874 	return console_locked;
2875 }
2876 EXPORT_SYMBOL(is_console_locked);
2877 
2878 static void __console_unlock(void)
2879 {
2880 	console_locked = 0;
2881 	up_console_sem();
2882 }
2883 
2884 #ifdef CONFIG_PRINTK
2885 
2886 /*
2887  * Prepend the message in @pmsg->pbufs->outbuf. This is achieved by shifting
2888  * the existing message over and inserting the scratchbuf message.
2889  *
2890  * @pmsg is the original printk message.
2891  * @fmt is the printf format of the message which will prepend the existing one.
2892  *
2893  * If there is not enough space in @pmsg->pbufs->outbuf, the existing
2894  * message text will be sufficiently truncated.
2895  *
2896  * If @pmsg->pbufs->outbuf is modified, @pmsg->outbuf_len is updated.
2897  */
2898 __printf(2, 3)
2899 static void console_prepend_message(struct printk_message *pmsg, const char *fmt, ...)
2900 {
2901 	struct printk_buffers *pbufs = pmsg->pbufs;
2902 	const size_t scratchbuf_sz = sizeof(pbufs->scratchbuf);
2903 	const size_t outbuf_sz = sizeof(pbufs->outbuf);
2904 	char *scratchbuf = &pbufs->scratchbuf[0];
2905 	char *outbuf = &pbufs->outbuf[0];
2906 	va_list args;
2907 	size_t len;
2908 
2909 	va_start(args, fmt);
2910 	len = vscnprintf(scratchbuf, scratchbuf_sz, fmt, args);
2911 	va_end(args);
2912 
2913 	/*
2914 	 * Make sure outbuf is sufficiently large before prepending.
2915 	 * Keep at least the prefix when the message must be truncated.
2916 	 * It is a rather theoretical problem when someone tries to
2917 	 * use a minimalist buffer.
2918 	 */
2919 	if (WARN_ON_ONCE(len + PRINTK_PREFIX_MAX >= outbuf_sz))
2920 		return;
2921 
2922 	if (pmsg->outbuf_len + len >= outbuf_sz) {
2923 		/* Truncate the message, but keep it terminated. */
2924 		pmsg->outbuf_len = outbuf_sz - (len + 1);
2925 		outbuf[pmsg->outbuf_len] = 0;
2926 	}
2927 
2928 	memmove(outbuf + len, outbuf, pmsg->outbuf_len + 1);
2929 	memcpy(outbuf, scratchbuf, len);
2930 	pmsg->outbuf_len += len;
2931 }
2932 
2933 /*
2934  * Prepend the message in @pmsg->pbufs->outbuf with a "dropped message".
2935  * @pmsg->outbuf_len is updated appropriately.
2936  *
2937  * @pmsg is the printk message to prepend.
2938  *
2939  * @dropped is the dropped count to report in the dropped message.
2940  */
2941 void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped)
2942 {
2943 	console_prepend_message(pmsg, "** %lu printk messages dropped **\n", dropped);
2944 }
2945 
2946 /*
2947  * Prepend the message in @pmsg->pbufs->outbuf with a "replay message".
2948  * @pmsg->outbuf_len is updated appropriately.
2949  *
2950  * @pmsg is the printk message to prepend.
2951  */
2952 void console_prepend_replay(struct printk_message *pmsg)
2953 {
2954 	console_prepend_message(pmsg, "** replaying previous printk message **\n");
2955 }
2956 
2957 /*
2958  * Read and format the specified record (or a later record if the specified
2959  * record is not available).
2960  *
2961  * @pmsg will contain the formatted result. @pmsg->pbufs must point to a
2962  * struct printk_buffers.
2963  *
2964  * @seq is the record to read and format. If it is not available, the next
2965  * valid record is read.
2966  *
2967  * @is_extended specifies if the message should be formatted for extended
2968  * console output.
2969  *
2970  * @may_supress specifies if records may be skipped based on loglevel.
2971  *
2972  * Returns false if no record is available. Otherwise true and all fields
2973  * of @pmsg are valid. (See the documentation of struct printk_message
2974  * for information about the @pmsg fields.)
2975  */
2976 bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
2977 			     bool is_extended, bool may_suppress)
2978 {
2979 	struct printk_buffers *pbufs = pmsg->pbufs;
2980 	const size_t scratchbuf_sz = sizeof(pbufs->scratchbuf);
2981 	const size_t outbuf_sz = sizeof(pbufs->outbuf);
2982 	char *scratchbuf = &pbufs->scratchbuf[0];
2983 	char *outbuf = &pbufs->outbuf[0];
2984 	struct printk_info info;
2985 	struct printk_record r;
2986 	size_t len = 0;
2987 	bool force_con;
2988 
2989 	/*
2990 	 * Formatting extended messages requires a separate buffer, so use the
2991 	 * scratch buffer to read in the ringbuffer text.
2992 	 *
2993 	 * Formatting normal messages is done in-place, so read the ringbuffer
2994 	 * text directly into the output buffer.
2995 	 */
2996 	if (is_extended)
2997 		prb_rec_init_rd(&r, &info, scratchbuf, scratchbuf_sz);
2998 	else
2999 		prb_rec_init_rd(&r, &info, outbuf, outbuf_sz);
3000 
3001 	if (!prb_read_valid(prb, seq, &r))
3002 		return false;
3003 
3004 	pmsg->seq = r.info->seq;
3005 	pmsg->dropped = r.info->seq - seq;
3006 	force_con = r.info->flags & LOG_FORCE_CON;
3007 
3008 	/*
3009 	 * Skip records that are not forced to be printed on consoles and that
3010 	 * has level above the console loglevel.
3011 	 */
3012 	if (!force_con && may_suppress && suppress_message_printing(r.info->level))
3013 		goto out;
3014 
3015 	if (is_extended) {
3016 		len = info_print_ext_header(outbuf, outbuf_sz, r.info);
3017 		len += msg_print_ext_body(outbuf + len, outbuf_sz - len,
3018 					  &r.text_buf[0], r.info->text_len, &r.info->dev_info);
3019 	} else {
3020 		len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time);
3021 	}
3022 out:
3023 	pmsg->outbuf_len = len;
3024 	return true;
3025 }
3026 
3027 /*
3028  * The legacy console always acquires a spinlock_t from its printing
3029  * callback. This violates lock nesting if the caller acquired an always
3030  * spinning lock (raw_spinlock_t) while invoking printk(). This is not a
3031  * problem on PREEMPT_RT because legacy consoles print always from a
3032  * dedicated thread and never from within printk(). Therefore we tell
3033  * lockdep that a sleeping spin lock (spinlock_t) is valid here.
3034  */
3035 #ifdef CONFIG_PREEMPT_RT
3036 static inline void printk_legacy_allow_spinlock_enter(void) { }
3037 static inline void printk_legacy_allow_spinlock_exit(void) { }
3038 #else
3039 static DEFINE_WAIT_OVERRIDE_MAP(printk_legacy_map, LD_WAIT_CONFIG);
3040 
3041 static inline void printk_legacy_allow_spinlock_enter(void)
3042 {
3043 	lock_map_acquire_try(&printk_legacy_map);
3044 }
3045 
3046 static inline void printk_legacy_allow_spinlock_exit(void)
3047 {
3048 	lock_map_release(&printk_legacy_map);
3049 }
3050 #endif /* CONFIG_PREEMPT_RT */
3051 
3052 /*
3053  * Used as the printk buffers for non-panic, serialized console printing.
3054  * This is for legacy (!CON_NBCON) as well as all boot (CON_BOOT) consoles.
3055  * Its usage requires the console_lock held.
3056  */
3057 struct printk_buffers printk_shared_pbufs;
3058 
3059 /*
3060  * Print one record for the given console. The record printed is whatever
3061  * record is the next available record for the given console.
3062  *
3063  * @handover will be set to true if a printk waiter has taken over the
3064  * console_lock, in which case the caller is no longer holding both the
3065  * console_lock and the SRCU read lock. Otherwise it is set to false.
3066  *
3067  * @cookie is the cookie from the SRCU read lock.
3068  *
3069  * Returns false if the given console has no next record to print, otherwise
3070  * true.
3071  *
3072  * Requires the console_lock and the SRCU read lock.
3073  */
3074 static bool console_emit_next_record(struct console *con, bool *handover, int cookie)
3075 {
3076 	bool is_extended = console_srcu_read_flags(con) & CON_EXTENDED;
3077 	char *outbuf = &printk_shared_pbufs.outbuf[0];
3078 	struct printk_message pmsg = {
3079 		.pbufs = &printk_shared_pbufs,
3080 	};
3081 	unsigned long flags;
3082 
3083 	*handover = false;
3084 
3085 	if (!printk_get_next_message(&pmsg, con->seq, is_extended, true))
3086 		return false;
3087 
3088 	con->dropped += pmsg.dropped;
3089 
3090 	/* Skip messages of formatted length 0. */
3091 	if (pmsg.outbuf_len == 0) {
3092 		con->seq = pmsg.seq + 1;
3093 		goto skip;
3094 	}
3095 
3096 	if (con->dropped && !is_extended) {
3097 		console_prepend_dropped(&pmsg, con->dropped);
3098 		con->dropped = 0;
3099 	}
3100 
3101 	/* Write everything out to the hardware. */
3102 
3103 	if (force_legacy_kthread() && !panic_in_progress()) {
3104 		/*
3105 		 * With forced threading this function is in a task context
3106 		 * (either legacy kthread or get_init_console_seq()). There
3107 		 * is no need for concern about printk reentrance, handovers,
3108 		 * or lockdep complaints.
3109 		 */
3110 
3111 		con->write(con, outbuf, pmsg.outbuf_len);
3112 		con->seq = pmsg.seq + 1;
3113 	} else {
3114 		/*
3115 		 * While actively printing out messages, if another printk()
3116 		 * were to occur on another CPU, it may wait for this one to
3117 		 * finish. This task can not be preempted if there is a
3118 		 * waiter waiting to take over.
3119 		 *
3120 		 * Interrupts are disabled because the hand over to a waiter
3121 		 * must not be interrupted until the hand over is completed
3122 		 * (@console_waiter is cleared).
3123 		 */
3124 		printk_safe_enter_irqsave(flags);
3125 		console_lock_spinning_enable();
3126 
3127 		/* Do not trace print latency. */
3128 		stop_critical_timings();
3129 
3130 		printk_legacy_allow_spinlock_enter();
3131 		con->write(con, outbuf, pmsg.outbuf_len);
3132 		printk_legacy_allow_spinlock_exit();
3133 
3134 		start_critical_timings();
3135 
3136 		con->seq = pmsg.seq + 1;
3137 
3138 		*handover = console_lock_spinning_disable_and_check(cookie);
3139 		printk_safe_exit_irqrestore(flags);
3140 	}
3141 skip:
3142 	return true;
3143 }
3144 
3145 #else
3146 
3147 static bool console_emit_next_record(struct console *con, bool *handover, int cookie)
3148 {
3149 	*handover = false;
3150 	return false;
3151 }
3152 
3153 static inline void printk_kthreads_check_locked(void) { }
3154 
3155 #endif /* CONFIG_PRINTK */
3156 
3157 
3158 /*
3159  * Print out one record for each console.
3160  *
3161  * @do_cond_resched is set by the caller. It can be true only in schedulable
3162  * context.
3163  *
3164  * @next_seq is set to the sequence number after the last available record.
3165  * The value is valid only when all usable consoles were flushed. It is
3166  * when the function returns true (can do the job) and @try_again parameter
3167  * is set to false, see below.
3168  *
3169  * @handover will be set to true if a printk waiter has taken over the
3170  * console_lock, in which case the caller is no longer holding the
3171  * console_lock. Otherwise it is set to false.
3172  *
3173  * @try_again will be set to true when it still makes sense to call this
3174  * function again. The function could do the job, see the return value.
3175  * And some consoles still make progress.
3176  *
3177  * Returns true when the function could do the job. Some consoles are usable,
3178  * and there was no takeover and no panic_on_other_cpu().
3179  *
3180  * Requires the console_lock.
3181  */
3182 static bool console_flush_one_record(bool do_cond_resched, u64 *next_seq, bool *handover,
3183 				     bool *try_again)
3184 {
3185 	struct console_flush_type ft;
3186 	bool any_usable = false;
3187 	struct console *con;
3188 	int cookie;
3189 
3190 	*try_again = false;
3191 
3192 	printk_get_console_flush_type(&ft);
3193 
3194 	cookie = console_srcu_read_lock();
3195 	for_each_console_srcu(con) {
3196 		short flags = console_srcu_read_flags(con);
3197 		u64 printk_seq;
3198 		bool progress;
3199 
3200 		/*
3201 		 * console_flush_one_record() is only responsible for
3202 		 * nbcon consoles when the nbcon consoles cannot print via
3203 		 * their atomic or threaded flushing.
3204 		 */
3205 		if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload))
3206 			continue;
3207 
3208 		if (!console_is_usable(con, flags, !do_cond_resched))
3209 			continue;
3210 		any_usable = true;
3211 
3212 		if (flags & CON_NBCON) {
3213 			progress = nbcon_legacy_emit_next_record(con, handover, cookie,
3214 								 !do_cond_resched);
3215 			printk_seq = nbcon_seq_read(con);
3216 		} else {
3217 			progress = console_emit_next_record(con, handover, cookie);
3218 			printk_seq = con->seq;
3219 		}
3220 
3221 		/*
3222 		 * If a handover has occurred, the SRCU read lock
3223 		 * is already released.
3224 		 */
3225 		if (*handover)
3226 			goto fail;
3227 
3228 		/* Track the next of the highest seq flushed. */
3229 		if (printk_seq > *next_seq)
3230 			*next_seq = printk_seq;
3231 
3232 		if (!progress)
3233 			continue;
3234 
3235 		/*
3236 		 * An usable console made a progress. There might still be
3237 		 * pending messages.
3238 		 */
3239 		*try_again = true;
3240 
3241 		/* Allow panic_cpu to take over the consoles safely. */
3242 		if (panic_on_other_cpu())
3243 			goto fail_srcu;
3244 
3245 		if (do_cond_resched)
3246 			cond_resched();
3247 	}
3248 	console_srcu_read_unlock(cookie);
3249 
3250 	return any_usable;
3251 
3252 fail_srcu:
3253 	console_srcu_read_unlock(cookie);
3254 fail:
3255 	*try_again = false;
3256 	return false;
3257 }
3258 
3259 /*
3260  * Print out all remaining records to all consoles.
3261  *
3262  * @do_cond_resched is set by the caller. It can be true only in schedulable
3263  * context.
3264  *
3265  * @next_seq is set to the sequence number after the last available record.
3266  * The value is valid only when this function returns true. It means that all
3267  * usable consoles are completely flushed.
3268  *
3269  * @handover will be set to true if a printk waiter has taken over the
3270  * console_lock, in which case the caller is no longer holding the
3271  * console_lock. Otherwise it is set to false.
3272  *
3273  * Returns true when there was at least one usable console and all messages
3274  * were flushed to all usable consoles. A returned false informs the caller
3275  * that everything was not flushed (either there were no usable consoles or
3276  * another context has taken over printing or it is a panic situation and this
3277  * is not the panic CPU). Regardless the reason, the caller should assume it
3278  * is not useful to immediately try again.
3279  *
3280  * Requires the console_lock.
3281  */
3282 static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handover)
3283 {
3284 	bool try_again;
3285 	bool ret;
3286 
3287 	*next_seq = 0;
3288 	*handover = false;
3289 
3290 	do {
3291 		ret = console_flush_one_record(do_cond_resched, next_seq,
3292 					       handover, &try_again);
3293 	} while (try_again);
3294 
3295 	return ret;
3296 }
3297 
3298 static void __console_flush_and_unlock(void)
3299 {
3300 	bool do_cond_resched;
3301 	bool handover;
3302 	bool flushed;
3303 	u64 next_seq;
3304 
3305 	/*
3306 	 * Console drivers are called with interrupts disabled, so
3307 	 * @console_may_schedule should be cleared before; however, we may
3308 	 * end up dumping a lot of lines, for example, if called from
3309 	 * console registration path, and should invoke cond_resched()
3310 	 * between lines if allowable.  Not doing so can cause a very long
3311 	 * scheduling stall on a slow console leading to RCU stall and
3312 	 * softlockup warnings which exacerbate the issue with more
3313 	 * messages practically incapacitating the system. Therefore, create
3314 	 * a local to use for the printing loop.
3315 	 */
3316 	do_cond_resched = console_may_schedule;
3317 
3318 	do {
3319 		console_may_schedule = 0;
3320 
3321 		flushed = console_flush_all(do_cond_resched, &next_seq, &handover);
3322 		if (!handover)
3323 			__console_unlock();
3324 
3325 		/*
3326 		 * Abort if there was a failure to flush all messages to all
3327 		 * usable consoles. Either it is not possible to flush (in
3328 		 * which case it would be an infinite loop of retrying) or
3329 		 * another context has taken over printing.
3330 		 */
3331 		if (!flushed)
3332 			break;
3333 
3334 		/*
3335 		 * Some context may have added new records after
3336 		 * console_flush_all() but before unlocking the console.
3337 		 * Re-check if there is a new record to flush. If the trylock
3338 		 * fails, another context is already handling the printing.
3339 		 */
3340 	} while (prb_read_valid(prb, next_seq, NULL) && console_trylock());
3341 }
3342 
3343 /**
3344  * console_unlock - unblock the legacy console subsystem from printing
3345  *
3346  * Releases the console_lock which the caller holds to block printing of
3347  * the legacy console subsystem.
3348  *
3349  * While the console_lock was held, console output may have been buffered
3350  * by printk(). If this is the case, console_unlock() emits the output on
3351  * legacy consoles prior to releasing the lock.
3352  *
3353  * console_unlock(); may be called from any context.
3354  */
3355 void console_unlock(void)
3356 {
3357 	struct console_flush_type ft;
3358 
3359 	printk_get_console_flush_type(&ft);
3360 	if (ft.legacy_direct)
3361 		__console_flush_and_unlock();
3362 	else
3363 		__console_unlock();
3364 }
3365 EXPORT_SYMBOL(console_unlock);
3366 
3367 /**
3368  * console_conditional_schedule - yield the CPU if required
3369  *
3370  * If the console code is currently allowed to sleep, and
3371  * if this CPU should yield the CPU to another task, do
3372  * so here.
3373  *
3374  * Must be called within console_lock();.
3375  */
3376 void __sched console_conditional_schedule(void)
3377 {
3378 	if (console_may_schedule)
3379 		cond_resched();
3380 }
3381 EXPORT_SYMBOL(console_conditional_schedule);
3382 
3383 void console_unblank(void)
3384 {
3385 	bool found_unblank = false;
3386 	struct console *c;
3387 	int cookie;
3388 
3389 	/*
3390 	 * First check if there are any consoles implementing the unblank()
3391 	 * callback. If not, there is no reason to continue and take the
3392 	 * console lock, which in particular can be dangerous if
3393 	 * @oops_in_progress is set.
3394 	 */
3395 	cookie = console_srcu_read_lock();
3396 	for_each_console_srcu(c) {
3397 		if (!console_is_usable(c, console_srcu_read_flags(c), true))
3398 			continue;
3399 
3400 		if (c->unblank) {
3401 			found_unblank = true;
3402 			break;
3403 		}
3404 	}
3405 	console_srcu_read_unlock(cookie);
3406 	if (!found_unblank)
3407 		return;
3408 
3409 	/*
3410 	 * Stop console printing because the unblank() callback may
3411 	 * assume the console is not within its write() callback.
3412 	 *
3413 	 * If @oops_in_progress is set, this may be an atomic context.
3414 	 * In that case, attempt a trylock as best-effort.
3415 	 */
3416 	if (oops_in_progress) {
3417 		/* Semaphores are not NMI-safe. */
3418 		if (in_nmi())
3419 			return;
3420 
3421 		/*
3422 		 * Attempting to trylock the console lock can deadlock
3423 		 * if another CPU was stopped while modifying the
3424 		 * semaphore. "Hope and pray" that this is not the
3425 		 * current situation.
3426 		 */
3427 		if (down_trylock_console_sem() != 0)
3428 			return;
3429 	} else
3430 		console_lock();
3431 
3432 	console_locked = 1;
3433 	console_may_schedule = 0;
3434 
3435 	cookie = console_srcu_read_lock();
3436 	for_each_console_srcu(c) {
3437 		if (!console_is_usable(c, console_srcu_read_flags(c), true))
3438 			continue;
3439 
3440 		if (c->unblank)
3441 			c->unblank();
3442 	}
3443 	console_srcu_read_unlock(cookie);
3444 
3445 	console_unlock();
3446 
3447 	if (!oops_in_progress)
3448 		pr_flush(1000, true);
3449 }
3450 
3451 /*
3452  * Rewind all consoles to the oldest available record.
3453  *
3454  * IMPORTANT: The function is safe only when called under
3455  *            console_lock(). It is not enforced because
3456  *            it is used as a best effort in panic().
3457  */
3458 static void __console_rewind_all(void)
3459 {
3460 	struct console *c;
3461 	short flags;
3462 	int cookie;
3463 	u64 seq;
3464 
3465 	seq = prb_first_valid_seq(prb);
3466 
3467 	cookie = console_srcu_read_lock();
3468 	for_each_console_srcu(c) {
3469 		flags = console_srcu_read_flags(c);
3470 
3471 		if (flags & CON_NBCON) {
3472 			nbcon_seq_force(c, seq);
3473 		} else {
3474 			/*
3475 			 * This assignment is safe only when called under
3476 			 * console_lock(). On panic, legacy consoles are
3477 			 * only best effort.
3478 			 */
3479 			c->seq = seq;
3480 		}
3481 	}
3482 	console_srcu_read_unlock(cookie);
3483 }
3484 
3485 /**
3486  * console_flush_on_panic - flush console content on panic
3487  * @mode: flush all messages in buffer or just the pending ones
3488  *
3489  * Immediately output all pending messages no matter what.
3490  */
3491 void console_flush_on_panic(enum con_flush_mode mode)
3492 {
3493 	struct console_flush_type ft;
3494 	bool handover;
3495 	u64 next_seq;
3496 
3497 	/*
3498 	 * Ignore the console lock and flush out the messages. Attempting a
3499 	 * trylock would not be useful because:
3500 	 *
3501 	 *   - if it is contended, it must be ignored anyway
3502 	 *   - console_lock() and console_trylock() block and fail
3503 	 *     respectively in panic for non-panic CPUs
3504 	 *   - semaphores are not NMI-safe
3505 	 */
3506 
3507 	/*
3508 	 * If another context is holding the console lock,
3509 	 * @console_may_schedule might be set. Clear it so that
3510 	 * this context does not call cond_resched() while flushing.
3511 	 */
3512 	console_may_schedule = 0;
3513 
3514 	if (mode == CONSOLE_REPLAY_ALL)
3515 		__console_rewind_all();
3516 
3517 	printk_get_console_flush_type(&ft);
3518 	if (ft.nbcon_atomic)
3519 		nbcon_atomic_flush_pending();
3520 
3521 	/* Flush legacy consoles once allowed, even when dangerous. */
3522 	if (legacy_allow_panic_sync)
3523 		console_flush_all(false, &next_seq, &handover);
3524 }
3525 
3526 /*
3527  * Return the console tty driver structure and its associated index
3528  */
3529 struct tty_driver *console_device(int *index)
3530 {
3531 	struct console *c;
3532 	struct tty_driver *driver = NULL;
3533 	int cookie;
3534 
3535 	/*
3536 	 * Take console_lock to serialize device() callback with
3537 	 * other console operations. For example, fg_console is
3538 	 * modified under console_lock when switching vt.
3539 	 */
3540 	console_lock();
3541 
3542 	cookie = console_srcu_read_lock();
3543 	for_each_console_srcu(c) {
3544 		if (!c->device)
3545 			continue;
3546 		driver = c->device(c, index);
3547 		if (driver)
3548 			break;
3549 	}
3550 	console_srcu_read_unlock(cookie);
3551 
3552 	console_unlock();
3553 	return driver;
3554 }
3555 
3556 /*
3557  * Prevent further output on the passed console device so that (for example)
3558  * serial drivers can suspend console output before suspending a port, and can
3559  * re-enable output afterwards.
3560  */
3561 void console_suspend(struct console *console)
3562 {
3563 	__pr_flush(console, 1000, true);
3564 	console_list_lock();
3565 	console_srcu_write_flags(console, console->flags & ~CON_ENABLED);
3566 	console_list_unlock();
3567 
3568 	/*
3569 	 * Ensure that all SRCU list walks have completed. All contexts must
3570 	 * be able to see that this console is disabled so that (for example)
3571 	 * the caller can suspend the port without risk of another context
3572 	 * using the port.
3573 	 */
3574 	synchronize_srcu(&console_srcu);
3575 }
3576 EXPORT_SYMBOL(console_suspend);
3577 
3578 void console_resume(struct console *console)
3579 {
3580 	struct console_flush_type ft;
3581 	bool is_nbcon;
3582 
3583 	console_list_lock();
3584 	console_srcu_write_flags(console, console->flags | CON_ENABLED);
3585 	is_nbcon = console->flags & CON_NBCON;
3586 	console_list_unlock();
3587 
3588 	/*
3589 	 * Ensure that all SRCU list walks have completed. The related
3590 	 * printing context must be able to see it is enabled so that
3591 	 * it is guaranteed to wake up and resume printing.
3592 	 */
3593 	synchronize_srcu(&console_srcu);
3594 
3595 	printk_get_console_flush_type(&ft);
3596 	if (is_nbcon && ft.nbcon_offload)
3597 		nbcon_kthread_wake(console);
3598 	else if (ft.legacy_offload)
3599 		defer_console_output();
3600 
3601 	__pr_flush(console, 1000, true);
3602 }
3603 EXPORT_SYMBOL(console_resume);
3604 
3605 #ifdef CONFIG_PRINTK
3606 static int unregister_console_locked(struct console *console);
3607 
3608 /* True when system boot is far enough to create printer threads. */
3609 bool printk_kthreads_ready __ro_after_init;
3610 
3611 static struct task_struct *printk_legacy_kthread;
3612 
3613 static bool legacy_kthread_should_wakeup(void)
3614 {
3615 	struct console_flush_type ft;
3616 	struct console *con;
3617 	bool ret = false;
3618 	int cookie;
3619 
3620 	if (kthread_should_stop())
3621 		return true;
3622 
3623 	printk_get_console_flush_type(&ft);
3624 
3625 	cookie = console_srcu_read_lock();
3626 	for_each_console_srcu(con) {
3627 		short flags = console_srcu_read_flags(con);
3628 		u64 printk_seq;
3629 
3630 		/*
3631 		 * The legacy printer thread is only responsible for nbcon
3632 		 * consoles when the nbcon consoles cannot print via their
3633 		 * atomic or threaded flushing.
3634 		 */
3635 		if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload))
3636 			continue;
3637 
3638 		if (!console_is_usable(con, flags, false))
3639 			continue;
3640 
3641 		if (flags & CON_NBCON) {
3642 			printk_seq = nbcon_seq_read(con);
3643 		} else {
3644 			/*
3645 			 * It is safe to read @seq because only this
3646 			 * thread context updates @seq.
3647 			 */
3648 			printk_seq = con->seq;
3649 		}
3650 
3651 		if (prb_read_valid(prb, printk_seq, NULL)) {
3652 			ret = true;
3653 			break;
3654 		}
3655 	}
3656 	console_srcu_read_unlock(cookie);
3657 
3658 	return ret;
3659 }
3660 
3661 static int legacy_kthread_func(void *unused)
3662 {
3663 	bool try_again;
3664 
3665 wait_for_event:
3666 	wait_event_interruptible(legacy_wait, legacy_kthread_should_wakeup());
3667 
3668 	do {
3669 		bool handover = false;
3670 		u64 next_seq = 0;
3671 
3672 		if (kthread_should_stop())
3673 			return 0;
3674 
3675 		console_lock();
3676 		console_flush_one_record(true, &next_seq, &handover, &try_again);
3677 		if (!handover)
3678 			__console_unlock();
3679 
3680 	} while (try_again);
3681 
3682 	goto wait_for_event;
3683 }
3684 
3685 static bool legacy_kthread_create(void)
3686 {
3687 	struct task_struct *kt;
3688 
3689 	lockdep_assert_console_list_lock_held();
3690 
3691 	kt = kthread_run(legacy_kthread_func, NULL, "pr/legacy");
3692 	if (WARN_ON(IS_ERR(kt))) {
3693 		pr_err("failed to start legacy printing thread\n");
3694 		return false;
3695 	}
3696 
3697 	printk_legacy_kthread = kt;
3698 
3699 	/*
3700 	 * It is important that console printing threads are scheduled
3701 	 * shortly after a printk call and with generous runtime budgets.
3702 	 */
3703 	sched_set_normal(printk_legacy_kthread, -20);
3704 
3705 	return true;
3706 }
3707 
3708 /**
3709  * printk_kthreads_shutdown - shutdown all threaded printers
3710  * @data: syscore context
3711  *
3712  * On system shutdown all threaded printers are stopped. This allows printk
3713  * to transition back to atomic printing, thus providing a robust mechanism
3714  * for the final shutdown/reboot messages to be output.
3715  */
3716 static void printk_kthreads_shutdown(void *data)
3717 {
3718 	struct console *con;
3719 
3720 	console_list_lock();
3721 	if (printk_kthreads_running) {
3722 		printk_kthreads_running = false;
3723 
3724 		for_each_console(con) {
3725 			if (con->flags & CON_NBCON)
3726 				nbcon_kthread_stop(con);
3727 		}
3728 
3729 		/*
3730 		 * The threads may have been stopped while printing a
3731 		 * backlog. Flush any records left over.
3732 		 */
3733 		nbcon_atomic_flush_pending();
3734 	}
3735 	console_list_unlock();
3736 }
3737 
3738 static const struct syscore_ops printk_syscore_ops = {
3739 	.shutdown = printk_kthreads_shutdown,
3740 };
3741 
3742 static struct syscore printk_syscore = {
3743 	.ops = &printk_syscore_ops,
3744 };
3745 
3746 /*
3747  * If appropriate, start nbcon kthreads and set @printk_kthreads_running.
3748  * If any kthreads fail to start, those consoles are unregistered.
3749  *
3750  * Must be called under console_list_lock().
3751  */
3752 static void printk_kthreads_check_locked(void)
3753 {
3754 	struct hlist_node *tmp;
3755 	struct console *con;
3756 
3757 	lockdep_assert_console_list_lock_held();
3758 
3759 	if (!printk_kthreads_ready)
3760 		return;
3761 
3762 	/* Start or stop the legacy kthread when needed. */
3763 	if (have_legacy_console || have_boot_console) {
3764 		if (!printk_legacy_kthread &&
3765 		    force_legacy_kthread() &&
3766 		    !legacy_kthread_create()) {
3767 			/*
3768 			 * All legacy consoles must be unregistered. If there
3769 			 * are any nbcon consoles, they will set up their own
3770 			 * kthread.
3771 			 */
3772 			hlist_for_each_entry_safe(con, tmp, &console_list, node) {
3773 				if (con->flags & CON_NBCON)
3774 					continue;
3775 
3776 				unregister_console_locked(con);
3777 			}
3778 		}
3779 	} else if (printk_legacy_kthread) {
3780 		kthread_stop(printk_legacy_kthread);
3781 		printk_legacy_kthread = NULL;
3782 	}
3783 
3784 	/*
3785 	 * Printer threads cannot be started as long as any boot console is
3786 	 * registered because there is no way to synchronize the hardware
3787 	 * registers between boot console code and regular console code.
3788 	 * It can only be known that there will be no new boot consoles when
3789 	 * an nbcon console is registered.
3790 	 */
3791 	if (have_boot_console || !have_nbcon_console) {
3792 		/* Clear flag in case all nbcon consoles unregistered. */
3793 		printk_kthreads_running = false;
3794 		return;
3795 	}
3796 
3797 	if (printk_kthreads_running)
3798 		return;
3799 
3800 	hlist_for_each_entry_safe(con, tmp, &console_list, node) {
3801 		if (!(con->flags & CON_NBCON))
3802 			continue;
3803 
3804 		if (!nbcon_kthread_create(con))
3805 			unregister_console_locked(con);
3806 	}
3807 
3808 	printk_kthreads_running = true;
3809 }
3810 
3811 static int __init printk_set_kthreads_ready(void)
3812 {
3813 	register_syscore(&printk_syscore);
3814 
3815 	console_list_lock();
3816 	printk_kthreads_ready = true;
3817 	printk_kthreads_check_locked();
3818 	console_list_unlock();
3819 
3820 	return 0;
3821 }
3822 early_initcall(printk_set_kthreads_ready);
3823 #endif /* CONFIG_PRINTK */
3824 
3825 static int __read_mostly keep_bootcon;
3826 
3827 static int __init keep_bootcon_setup(char *str)
3828 {
3829 	keep_bootcon = 1;
3830 	pr_info("debug: skip boot console de-registration.\n");
3831 
3832 	return 0;
3833 }
3834 
3835 early_param("keep_bootcon", keep_bootcon_setup);
3836 
3837 static int console_call_setup(struct console *newcon, char *options)
3838 {
3839 	int err;
3840 
3841 	if (!newcon->setup)
3842 		return 0;
3843 
3844 	/* Synchronize with possible boot console. */
3845 	console_lock();
3846 	err = newcon->setup(newcon, options);
3847 	console_unlock();
3848 
3849 	return err;
3850 }
3851 
3852 /*
3853  * This is called by register_console() to try to match
3854  * the newly registered console with any of the ones selected
3855  * by either the command line or add_preferred_console() and
3856  * setup/enable it.
3857  *
3858  * Care need to be taken with consoles that are statically
3859  * enabled such as netconsole
3860  */
3861 static int try_enable_preferred_console(struct console *newcon,
3862 					bool user_specified)
3863 {
3864 	struct console_cmdline *c;
3865 	int i, err;
3866 
3867 	for (i = 0, c = console_cmdline;
3868 	     i < MAX_CMDLINECONSOLES && (c->name[0] || c->devname[0]);
3869 	     i++, c++) {
3870 		/* Console not yet initialized? */
3871 		if (!c->name[0])
3872 			continue;
3873 		if (c->user_specified != user_specified)
3874 			continue;
3875 		if (!newcon->match ||
3876 		    newcon->match(newcon, c->name, c->index, c->options) != 0) {
3877 			/* default matching */
3878 			BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name));
3879 			if (strcmp(c->name, newcon->name) != 0)
3880 				continue;
3881 			if (newcon->index >= 0 &&
3882 			    newcon->index != c->index)
3883 				continue;
3884 			if (newcon->index < 0)
3885 				newcon->index = c->index;
3886 
3887 			if (_braille_register_console(newcon, c))
3888 				return 0;
3889 
3890 			err = console_call_setup(newcon, c->options);
3891 			if (err)
3892 				return err;
3893 		}
3894 		newcon->flags |= CON_ENABLED;
3895 		if (i == preferred_console)
3896 			newcon->flags |= CON_CONSDEV;
3897 		return 0;
3898 	}
3899 
3900 	/*
3901 	 * Some consoles, such as pstore and netconsole, can be enabled even
3902 	 * without matching. Accept the pre-enabled consoles only when match()
3903 	 * and setup() had a chance to be called.
3904 	 */
3905 	if (newcon->flags & CON_ENABLED && c->user_specified ==	user_specified)
3906 		return 0;
3907 
3908 	return -ENOENT;
3909 }
3910 
3911 /* Try to enable the console unconditionally */
3912 static void try_enable_default_console(struct console *newcon)
3913 {
3914 	if (newcon->index < 0)
3915 		newcon->index = 0;
3916 
3917 	if (console_call_setup(newcon, NULL) != 0)
3918 		return;
3919 
3920 	newcon->flags |= CON_ENABLED;
3921 
3922 	if (newcon->device)
3923 		newcon->flags |= CON_CONSDEV;
3924 }
3925 
3926 /* Return the starting sequence number for a newly registered console. */
3927 static u64 get_init_console_seq(struct console *newcon, bool bootcon_registered)
3928 {
3929 	struct console *con;
3930 	bool handover;
3931 	u64 init_seq;
3932 
3933 	if (newcon->flags & (CON_PRINTBUFFER | CON_BOOT)) {
3934 		/* Get a consistent copy of @syslog_seq. */
3935 		mutex_lock(&syslog_lock);
3936 		init_seq = syslog_seq;
3937 		mutex_unlock(&syslog_lock);
3938 	} else {
3939 		/* Begin with next message added to ringbuffer. */
3940 		init_seq = prb_next_seq(prb);
3941 
3942 		/*
3943 		 * If any enabled boot consoles are due to be unregistered
3944 		 * shortly, some may not be caught up and may be the same
3945 		 * device as @newcon. Since it is not known which boot console
3946 		 * is the same device, flush all consoles and, if necessary,
3947 		 * start with the message of the enabled boot console that is
3948 		 * the furthest behind.
3949 		 */
3950 		if (bootcon_registered && !keep_bootcon) {
3951 			/*
3952 			 * Hold the console_lock to stop console printing and
3953 			 * guarantee safe access to console->seq.
3954 			 */
3955 			console_lock();
3956 
3957 			/*
3958 			 * Flush all consoles and set the console to start at
3959 			 * the next unprinted sequence number.
3960 			 */
3961 			if (!console_flush_all(true, &init_seq, &handover)) {
3962 				/*
3963 				 * Flushing failed. Just choose the lowest
3964 				 * sequence of the enabled boot consoles.
3965 				 */
3966 
3967 				/*
3968 				 * If there was a handover, this context no
3969 				 * longer holds the console_lock.
3970 				 */
3971 				if (handover)
3972 					console_lock();
3973 
3974 				init_seq = prb_next_seq(prb);
3975 				for_each_console(con) {
3976 					u64 seq;
3977 
3978 					if (!(con->flags & CON_BOOT) ||
3979 					    !(con->flags & CON_ENABLED)) {
3980 						continue;
3981 					}
3982 
3983 					if (con->flags & CON_NBCON)
3984 						seq = nbcon_seq_read(con);
3985 					else
3986 						seq = con->seq;
3987 
3988 					if (seq < init_seq)
3989 						init_seq = seq;
3990 				}
3991 			}
3992 
3993 			console_unlock();
3994 		}
3995 	}
3996 
3997 	return init_seq;
3998 }
3999 
4000 #define console_first()				\
4001 	hlist_entry(console_list.first, struct console, node)
4002 
4003 static int unregister_console_locked(struct console *console);
4004 
4005 /*
4006  * The console driver calls this routine during kernel initialization
4007  * to register the console printing procedure with printk() and to
4008  * print any messages that were printed by the kernel before the
4009  * console driver was initialized.
4010  *
4011  * This can happen pretty early during the boot process (because of
4012  * early_printk) - sometimes before setup_arch() completes - be careful
4013  * of what kernel features are used - they may not be initialised yet.
4014  *
4015  * There are two types of consoles - bootconsoles (early_printk) and
4016  * "real" consoles (everything which is not a bootconsole) which are
4017  * handled differently.
4018  *  - Any number of bootconsoles can be registered at any time.
4019  *  - As soon as a "real" console is registered, all bootconsoles
4020  *    will be unregistered automatically.
4021  *  - Once a "real" console is registered, any attempt to register a
4022  *    bootconsoles will be rejected
4023  */
4024 void register_console(struct console *newcon)
4025 {
4026 	bool use_device_lock = (newcon->flags & CON_NBCON) && newcon->write_atomic;
4027 	bool bootcon_registered = false;
4028 	bool realcon_registered = false;
4029 	struct console *con;
4030 	unsigned long flags;
4031 	u64 init_seq;
4032 	int err;
4033 
4034 	console_list_lock();
4035 
4036 	for_each_console(con) {
4037 		if (WARN(con == newcon, "console '%s%d' already registered\n",
4038 					 con->name, con->index)) {
4039 			goto unlock;
4040 		}
4041 
4042 		if (con->flags & CON_BOOT)
4043 			bootcon_registered = true;
4044 		else
4045 			realcon_registered = true;
4046 	}
4047 
4048 	/* Do not register boot consoles when there already is a real one. */
4049 	if ((newcon->flags & CON_BOOT) && realcon_registered) {
4050 		pr_info("Too late to register bootconsole %s%d\n",
4051 			newcon->name, newcon->index);
4052 		goto unlock;
4053 	}
4054 
4055 	if (newcon->flags & CON_NBCON) {
4056 		/*
4057 		 * Ensure the nbcon console buffers can be allocated
4058 		 * before modifying any global data.
4059 		 */
4060 		if (!nbcon_alloc(newcon))
4061 			goto unlock;
4062 	}
4063 
4064 	/*
4065 	 * See if we want to enable this console driver by default.
4066 	 *
4067 	 * Nope when a console is preferred by the command line, device
4068 	 * tree, or SPCR.
4069 	 *
4070 	 * The first real console with tty binding (driver) wins. More
4071 	 * consoles might get enabled before the right one is found.
4072 	 *
4073 	 * Note that a console with tty binding will have CON_CONSDEV
4074 	 * flag set and will be first in the list.
4075 	 */
4076 	if (preferred_console < 0) {
4077 		if (hlist_empty(&console_list) || !console_first()->device ||
4078 		    console_first()->flags & CON_BOOT) {
4079 			try_enable_default_console(newcon);
4080 		}
4081 	}
4082 
4083 	/* See if this console matches one we selected on the command line */
4084 	err = try_enable_preferred_console(newcon, true);
4085 
4086 	/* If not, try to match against the platform default(s) */
4087 	if (err == -ENOENT)
4088 		err = try_enable_preferred_console(newcon, false);
4089 
4090 	/* printk() messages are not printed to the Braille console. */
4091 	if (err || newcon->flags & CON_BRL) {
4092 		if (newcon->flags & CON_NBCON)
4093 			nbcon_free(newcon);
4094 		goto unlock;
4095 	}
4096 
4097 	/*
4098 	 * If we have a bootconsole, and are switching to a real console,
4099 	 * don't print everything out again, since when the boot console, and
4100 	 * the real console are the same physical device, it's annoying to
4101 	 * see the beginning boot messages twice
4102 	 */
4103 	if (bootcon_registered &&
4104 	    ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV)) {
4105 		newcon->flags &= ~CON_PRINTBUFFER;
4106 	}
4107 
4108 	newcon->dropped = 0;
4109 	init_seq = get_init_console_seq(newcon, bootcon_registered);
4110 
4111 	if (newcon->flags & CON_NBCON) {
4112 		have_nbcon_console = true;
4113 		nbcon_seq_force(newcon, init_seq);
4114 	} else {
4115 		have_legacy_console = true;
4116 		newcon->seq = init_seq;
4117 	}
4118 
4119 	if (newcon->flags & CON_BOOT)
4120 		have_boot_console = true;
4121 
4122 	/*
4123 	 * If another context is actively using the hardware of this new
4124 	 * console, it will not be aware of the nbcon synchronization. This
4125 	 * is a risk that two contexts could access the hardware
4126 	 * simultaneously if this new console is used for atomic printing
4127 	 * and the other context is still using the hardware.
4128 	 *
4129 	 * Use the driver synchronization to ensure that the hardware is not
4130 	 * in use while this new console transitions to being registered.
4131 	 */
4132 	if (use_device_lock)
4133 		newcon->device_lock(newcon, &flags);
4134 
4135 	/*
4136 	 * Put this console in the list - keep the
4137 	 * preferred driver at the head of the list.
4138 	 */
4139 	if (hlist_empty(&console_list)) {
4140 		/* Ensure CON_CONSDEV is always set for the head. */
4141 		newcon->flags |= CON_CONSDEV;
4142 		hlist_add_head_rcu(&newcon->node, &console_list);
4143 
4144 	} else if (newcon->flags & CON_CONSDEV) {
4145 		/* Only the new head can have CON_CONSDEV set. */
4146 		console_srcu_write_flags(console_first(), console_first()->flags & ~CON_CONSDEV);
4147 		hlist_add_head_rcu(&newcon->node, &console_list);
4148 
4149 	} else {
4150 		hlist_add_behind_rcu(&newcon->node, console_list.first);
4151 	}
4152 
4153 	/*
4154 	 * No need to synchronize SRCU here! The caller does not rely
4155 	 * on all contexts being able to see the new console before
4156 	 * register_console() completes.
4157 	 */
4158 
4159 	/* This new console is now registered. */
4160 	if (use_device_lock)
4161 		newcon->device_unlock(newcon, flags);
4162 
4163 	console_sysfs_notify();
4164 
4165 	/*
4166 	 * By unregistering the bootconsoles after we enable the real console
4167 	 * we get the "console xxx enabled" message on all the consoles -
4168 	 * boot consoles, real consoles, etc - this is to ensure that end
4169 	 * users know there might be something in the kernel's log buffer that
4170 	 * went to the bootconsole (that they do not see on the real console)
4171 	 */
4172 	con_printk(KERN_INFO, newcon, "enabled\n");
4173 	if (bootcon_registered &&
4174 	    ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV) &&
4175 	    !keep_bootcon) {
4176 		struct hlist_node *tmp;
4177 
4178 		hlist_for_each_entry_safe(con, tmp, &console_list, node) {
4179 			if (con->flags & CON_BOOT)
4180 				unregister_console_locked(con);
4181 		}
4182 	}
4183 
4184 	/* Changed console list, may require printer threads to start/stop. */
4185 	printk_kthreads_check_locked();
4186 unlock:
4187 	console_list_unlock();
4188 }
4189 EXPORT_SYMBOL(register_console);
4190 
4191 /* Must be called under console_list_lock(). */
4192 static int unregister_console_locked(struct console *console)
4193 {
4194 	bool use_device_lock = (console->flags & CON_NBCON) && console->write_atomic;
4195 	bool found_legacy_con = false;
4196 	bool found_nbcon_con = false;
4197 	bool found_boot_con = false;
4198 	unsigned long flags;
4199 	struct console *c;
4200 	int res;
4201 
4202 	lockdep_assert_console_list_lock_held();
4203 
4204 	con_printk(KERN_INFO, console, "disabled\n");
4205 
4206 	res = _braille_unregister_console(console);
4207 	if (res < 0)
4208 		return res;
4209 	if (res > 0)
4210 		return 0;
4211 
4212 	if (!console_is_registered_locked(console))
4213 		res = -ENODEV;
4214 	else if (console_is_usable(console, console->flags, true))
4215 		__pr_flush(console, 1000, true);
4216 
4217 	/* Disable it unconditionally */
4218 	console_srcu_write_flags(console, console->flags & ~CON_ENABLED);
4219 
4220 	if (res < 0)
4221 		return res;
4222 
4223 	/*
4224 	 * Use the driver synchronization to ensure that the hardware is not
4225 	 * in use while this console transitions to being unregistered.
4226 	 */
4227 	if (use_device_lock)
4228 		console->device_lock(console, &flags);
4229 
4230 	hlist_del_init_rcu(&console->node);
4231 
4232 	if (use_device_lock)
4233 		console->device_unlock(console, flags);
4234 
4235 	/*
4236 	 * <HISTORICAL>
4237 	 * If this isn't the last console and it has CON_CONSDEV set, we
4238 	 * need to set it on the next preferred console.
4239 	 * </HISTORICAL>
4240 	 *
4241 	 * The above makes no sense as there is no guarantee that the next
4242 	 * console has any device attached. Oh well....
4243 	 */
4244 	if (!hlist_empty(&console_list) && console->flags & CON_CONSDEV)
4245 		console_srcu_write_flags(console_first(), console_first()->flags | CON_CONSDEV);
4246 
4247 	/*
4248 	 * Ensure that all SRCU list walks have completed. All contexts
4249 	 * must not be able to see this console in the list so that any
4250 	 * exit/cleanup routines can be performed safely.
4251 	 */
4252 	synchronize_srcu(&console_srcu);
4253 
4254 	/*
4255 	 * With this console gone, the global flags tracking registered
4256 	 * console types may have changed. Update them.
4257 	 */
4258 	for_each_console(c) {
4259 		if (c->flags & CON_BOOT)
4260 			found_boot_con = true;
4261 
4262 		if (c->flags & CON_NBCON)
4263 			found_nbcon_con = true;
4264 		else
4265 			found_legacy_con = true;
4266 	}
4267 	if (!found_boot_con)
4268 		have_boot_console = found_boot_con;
4269 	if (!found_legacy_con)
4270 		have_legacy_console = found_legacy_con;
4271 	if (!found_nbcon_con)
4272 		have_nbcon_console = found_nbcon_con;
4273 
4274 	/* @have_nbcon_console must be updated before calling nbcon_free(). */
4275 	if (console->flags & CON_NBCON)
4276 		nbcon_free(console);
4277 
4278 	console_sysfs_notify();
4279 
4280 	if (console->exit)
4281 		res = console->exit(console);
4282 
4283 	/* Changed console list, may require printer threads to start/stop. */
4284 	printk_kthreads_check_locked();
4285 
4286 	return res;
4287 }
4288 
4289 int unregister_console(struct console *console)
4290 {
4291 	int res;
4292 
4293 	console_list_lock();
4294 	res = unregister_console_locked(console);
4295 	console_list_unlock();
4296 	return res;
4297 }
4298 EXPORT_SYMBOL(unregister_console);
4299 
4300 /**
4301  * console_force_preferred_locked - force a registered console preferred
4302  * @con: The registered console to force preferred.
4303  *
4304  * Must be called under console_list_lock().
4305  */
4306 void console_force_preferred_locked(struct console *con)
4307 {
4308 	struct console *cur_pref_con;
4309 
4310 	if (!console_is_registered_locked(con))
4311 		return;
4312 
4313 	cur_pref_con = console_first();
4314 
4315 	/* Already preferred? */
4316 	if (cur_pref_con == con)
4317 		return;
4318 
4319 	/*
4320 	 * Delete, but do not re-initialize the entry. This allows the console
4321 	 * to continue to appear registered (via any hlist_unhashed_lockless()
4322 	 * checks), even though it was briefly removed from the console list.
4323 	 */
4324 	hlist_del_rcu(&con->node);
4325 
4326 	/*
4327 	 * Ensure that all SRCU list walks have completed so that the console
4328 	 * can be added to the beginning of the console list and its forward
4329 	 * list pointer can be re-initialized.
4330 	 */
4331 	synchronize_srcu(&console_srcu);
4332 
4333 	con->flags |= CON_CONSDEV;
4334 	WARN_ON(!con->device);
4335 
4336 	/* Only the new head can have CON_CONSDEV set. */
4337 	console_srcu_write_flags(cur_pref_con, cur_pref_con->flags & ~CON_CONSDEV);
4338 	hlist_add_head_rcu(&con->node, &console_list);
4339 }
4340 EXPORT_SYMBOL(console_force_preferred_locked);
4341 
4342 /*
4343  * Initialize the console device. This is called *early*, so
4344  * we can't necessarily depend on lots of kernel help here.
4345  * Just do some early initializations, and do the complex setup
4346  * later.
4347  */
4348 void __init console_init(void)
4349 {
4350 	int ret;
4351 	initcall_t call;
4352 	initcall_entry_t *ce;
4353 
4354 #ifdef CONFIG_NULL_TTY_DEFAULT_CONSOLE
4355 	if (!console_set_on_cmdline)
4356 		add_preferred_console("ttynull", 0, NULL);
4357 #endif
4358 
4359 	/* Setup the default TTY line discipline. */
4360 	n_tty_init();
4361 
4362 	/*
4363 	 * set up the console device so that later boot sequences can
4364 	 * inform about problems etc..
4365 	 */
4366 	ce = __con_initcall_start;
4367 	trace_initcall_level("console");
4368 	while (ce < __con_initcall_end) {
4369 		call = initcall_from_entry(ce);
4370 		trace_initcall_start(call);
4371 		ret = call();
4372 		trace_initcall_finish(call, ret);
4373 		ce++;
4374 	}
4375 }
4376 
4377 /*
4378  * Some boot consoles access data that is in the init section and which will
4379  * be discarded after the initcalls have been run. To make sure that no code
4380  * will access this data, unregister the boot consoles in a late initcall.
4381  *
4382  * If for some reason, such as deferred probe or the driver being a loadable
4383  * module, the real console hasn't registered yet at this point, there will
4384  * be a brief interval in which no messages are logged to the console, which
4385  * makes it difficult to diagnose problems that occur during this time.
4386  *
4387  * To mitigate this problem somewhat, only unregister consoles whose memory
4388  * intersects with the init section. Note that all other boot consoles will
4389  * get unregistered when the real preferred console is registered.
4390  */
4391 static int __init printk_late_init(void)
4392 {
4393 	struct hlist_node *tmp;
4394 	struct console *con;
4395 	int ret;
4396 
4397 	console_list_lock();
4398 	hlist_for_each_entry_safe(con, tmp, &console_list, node) {
4399 		if (!(con->flags & CON_BOOT))
4400 			continue;
4401 
4402 		/* Check addresses that might be used for enabled consoles. */
4403 		if (init_section_intersects(con, sizeof(*con)) ||
4404 		    init_section_contains(con->write, 0) ||
4405 		    init_section_contains(con->read, 0) ||
4406 		    init_section_contains(con->device, 0) ||
4407 		    init_section_contains(con->unblank, 0) ||
4408 		    init_section_contains(con->data, 0)) {
4409 			/*
4410 			 * Please, consider moving the reported consoles out
4411 			 * of the init section.
4412 			 */
4413 			pr_warn("bootconsole [%s%d] uses init memory and must be disabled even before the real one is ready\n",
4414 				con->name, con->index);
4415 			unregister_console_locked(con);
4416 		}
4417 	}
4418 	console_list_unlock();
4419 
4420 	ret = cpuhp_setup_state_nocalls(CPUHP_PRINTK_DEAD, "printk:dead", NULL,
4421 					console_cpu_notify);
4422 	WARN_ON(ret < 0);
4423 	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "printk:online",
4424 					console_cpu_notify, NULL);
4425 	WARN_ON(ret < 0);
4426 	printk_sysctl_init();
4427 	return 0;
4428 }
4429 late_initcall(printk_late_init);
4430 
4431 #if defined CONFIG_PRINTK
4432 /* If @con is specified, only wait for that console. Otherwise wait for all. */
4433 static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress)
4434 {
4435 	unsigned long timeout_jiffies = msecs_to_jiffies(timeout_ms);
4436 	unsigned long remaining_jiffies = timeout_jiffies;
4437 	struct console_flush_type ft;
4438 	struct console *c;
4439 	u64 last_diff = 0;
4440 	u64 printk_seq;
4441 	short flags;
4442 	int cookie;
4443 	u64 diff;
4444 	u64 seq;
4445 
4446 	/* Sorry, pr_flush() will not work this early. */
4447 	if (system_state < SYSTEM_SCHEDULING)
4448 		return false;
4449 
4450 	might_sleep();
4451 
4452 	seq = prb_next_reserve_seq(prb);
4453 
4454 	/* Flush the consoles so that records up to @seq are printed. */
4455 	printk_get_console_flush_type(&ft);
4456 	if (ft.nbcon_atomic)
4457 		nbcon_atomic_flush_pending();
4458 	if (ft.legacy_direct) {
4459 		console_lock();
4460 		console_unlock();
4461 	}
4462 
4463 	for (;;) {
4464 		unsigned long begin_jiffies;
4465 		unsigned long slept_jiffies;
4466 
4467 		diff = 0;
4468 
4469 		/*
4470 		 * Hold the console_lock to guarantee safe access to
4471 		 * console->seq. Releasing console_lock flushes more
4472 		 * records in case @seq is still not printed on all
4473 		 * usable consoles.
4474 		 *
4475 		 * Holding the console_lock is not necessary if there
4476 		 * are no legacy or boot consoles. However, such a
4477 		 * console could register at any time. Always hold the
4478 		 * console_lock as a precaution rather than
4479 		 * synchronizing against register_console().
4480 		 */
4481 		console_lock();
4482 
4483 		cookie = console_srcu_read_lock();
4484 		for_each_console_srcu(c) {
4485 			if (con && con != c)
4486 				continue;
4487 
4488 			flags = console_srcu_read_flags(c);
4489 
4490 			/*
4491 			 * If consoles are not usable, it cannot be expected
4492 			 * that they make forward progress, so only increment
4493 			 * @diff for usable consoles.
4494 			 */
4495 			if (!console_is_usable(c, flags, true) &&
4496 			    !console_is_usable(c, flags, false)) {
4497 				continue;
4498 			}
4499 
4500 			if (flags & CON_NBCON) {
4501 				printk_seq = nbcon_seq_read(c);
4502 			} else {
4503 				printk_seq = c->seq;
4504 			}
4505 
4506 			if (printk_seq < seq)
4507 				diff += seq - printk_seq;
4508 		}
4509 		console_srcu_read_unlock(cookie);
4510 
4511 		if (diff != last_diff && reset_on_progress)
4512 			remaining_jiffies = timeout_jiffies;
4513 
4514 		console_unlock();
4515 
4516 		/* Note: @diff is 0 if there are no usable consoles. */
4517 		if (diff == 0 || remaining_jiffies == 0)
4518 			break;
4519 
4520 		/* msleep(1) might sleep much longer. Check time by jiffies. */
4521 		begin_jiffies = jiffies;
4522 		msleep(1);
4523 		slept_jiffies = jiffies - begin_jiffies;
4524 
4525 		remaining_jiffies -= min(slept_jiffies, remaining_jiffies);
4526 
4527 		last_diff = diff;
4528 	}
4529 
4530 	return (diff == 0);
4531 }
4532 
4533 /**
4534  * pr_flush() - Wait for printing threads to catch up.
4535  *
4536  * @timeout_ms:        The maximum time (in ms) to wait.
4537  * @reset_on_progress: Reset the timeout if forward progress is seen.
4538  *
4539  * A value of 0 for @timeout_ms means no waiting will occur. A value of -1
4540  * represents infinite waiting.
4541  *
4542  * If @reset_on_progress is true, the timeout will be reset whenever any
4543  * printer has been seen to make some forward progress.
4544  *
4545  * Context: Process context. May sleep while acquiring console lock.
4546  * Return: true if all usable printers are caught up.
4547  */
4548 bool pr_flush(int timeout_ms, bool reset_on_progress)
4549 {
4550 	return __pr_flush(NULL, timeout_ms, reset_on_progress);
4551 }
4552 
4553 /*
4554  * Delayed printk version, for scheduler-internal messages:
4555  */
4556 #define PRINTK_PENDING_WAKEUP	0x01
4557 #define PRINTK_PENDING_OUTPUT	0x02
4558 
4559 static DEFINE_PER_CPU(int, printk_pending);
4560 
4561 static void wake_up_klogd_work_func(struct irq_work *irq_work)
4562 {
4563 	int pending = this_cpu_xchg(printk_pending, 0);
4564 
4565 	if (pending & PRINTK_PENDING_OUTPUT) {
4566 		if (force_legacy_kthread()) {
4567 			if (printk_legacy_kthread)
4568 				wake_up_interruptible(&legacy_wait);
4569 		} else {
4570 			if (console_trylock())
4571 				console_unlock();
4572 		}
4573 	}
4574 
4575 	if (pending & PRINTK_PENDING_WAKEUP)
4576 		wake_up_interruptible(&log_wait);
4577 }
4578 
4579 static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) =
4580 	IRQ_WORK_INIT_LAZY(wake_up_klogd_work_func);
4581 
4582 static void __wake_up_klogd(int val)
4583 {
4584 	if (!printk_percpu_data_ready())
4585 		return;
4586 
4587 	/*
4588 	 * It is not allowed to call this function when console irq_work
4589 	 * is blocked.
4590 	 */
4591 	if (WARN_ON_ONCE(console_irqwork_blocked))
4592 		return;
4593 
4594 	preempt_disable();
4595 	/*
4596 	 * Guarantee any new records can be seen by tasks preparing to wait
4597 	 * before this context checks if the wait queue is empty.
4598 	 *
4599 	 * The full memory barrier within wq_has_sleeper() pairs with the full
4600 	 * memory barrier within set_current_state() of
4601 	 * prepare_to_wait_event(), which is called after ___wait_event() adds
4602 	 * the waiter but before it has checked the wait condition.
4603 	 *
4604 	 * This pairs with devkmsg_read:A and syslog_print:A.
4605 	 */
4606 	if (wq_has_sleeper(&log_wait) || /* LMM(__wake_up_klogd:A) */
4607 	    (val & PRINTK_PENDING_OUTPUT)) {
4608 		this_cpu_or(printk_pending, val);
4609 		irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
4610 	}
4611 	preempt_enable();
4612 }
4613 
4614 /**
4615  * wake_up_klogd - Wake kernel logging daemon
4616  *
4617  * Use this function when new records have been added to the ringbuffer
4618  * and the console printing of those records has already occurred or is
4619  * known to be handled by some other context. This function will only
4620  * wake the logging daemon.
4621  *
4622  * Context: Any context.
4623  */
4624 void wake_up_klogd(void)
4625 {
4626 	__wake_up_klogd(PRINTK_PENDING_WAKEUP);
4627 }
4628 
4629 /**
4630  * defer_console_output - Wake kernel logging daemon and trigger
4631  *	console printing in a deferred context
4632  *
4633  * Use this function when new records have been added to the ringbuffer,
4634  * this context is responsible for console printing those records, but
4635  * the current context is not allowed to perform the console printing.
4636  * Trigger an irq_work context to perform the console printing. This
4637  * function also wakes the logging daemon.
4638  *
4639  * Context: Any context.
4640  */
4641 void defer_console_output(void)
4642 {
4643 	/*
4644 	 * New messages may have been added directly to the ringbuffer
4645 	 * using vprintk_store(), so wake any waiters as well.
4646 	 */
4647 	__wake_up_klogd(PRINTK_PENDING_WAKEUP | PRINTK_PENDING_OUTPUT);
4648 }
4649 
4650 /**
4651  * printk_trigger_flush - Attempt to flush printk buffer to consoles.
4652  *
4653  * If possible, flush the printk buffer to all consoles in the caller's
4654  * context. If offloading is available, trigger deferred printing.
4655  *
4656  * This is best effort. Depending on the system state, console states,
4657  * and caller context, no actual flushing may result from this call.
4658  */
4659 void printk_trigger_flush(void)
4660 {
4661 	struct console_flush_type ft;
4662 
4663 	printk_get_console_flush_type(&ft);
4664 	if (ft.nbcon_atomic)
4665 		nbcon_atomic_flush_pending();
4666 	if (ft.nbcon_offload)
4667 		nbcon_kthreads_wake();
4668 	if (ft.legacy_direct) {
4669 		if (console_trylock())
4670 			console_unlock();
4671 	}
4672 	if (ft.legacy_offload)
4673 		defer_console_output();
4674 }
4675 
4676 int vprintk_deferred(const char *fmt, va_list args)
4677 {
4678 	return vprintk_emit(0, LOGLEVEL_SCHED, NULL, fmt, args);
4679 }
4680 
4681 int _printk_deferred(const char *fmt, ...)
4682 {
4683 	va_list args;
4684 	int r;
4685 
4686 	va_start(args, fmt);
4687 	r = vprintk_deferred(fmt, args);
4688 	va_end(args);
4689 
4690 	return r;
4691 }
4692 
4693 /*
4694  * printk rate limiting, lifted from the networking subsystem.
4695  *
4696  * This enforces a rate limit: not more than 10 kernel messages
4697  * every 5s to make a denial-of-service attack impossible.
4698  */
4699 DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10);
4700 
4701 int __printk_ratelimit(const char *func)
4702 {
4703 	return ___ratelimit(&printk_ratelimit_state, func);
4704 }
4705 EXPORT_SYMBOL(__printk_ratelimit);
4706 
4707 /**
4708  * printk_timed_ratelimit - caller-controlled printk ratelimiting
4709  * @caller_jiffies: pointer to caller's state
4710  * @interval_msecs: minimum interval between prints
4711  *
4712  * printk_timed_ratelimit() returns true if more than @interval_msecs
4713  * milliseconds have elapsed since the last time printk_timed_ratelimit()
4714  * returned true.
4715  */
4716 bool printk_timed_ratelimit(unsigned long *caller_jiffies,
4717 			unsigned int interval_msecs)
4718 {
4719 	unsigned long elapsed = jiffies - *caller_jiffies;
4720 
4721 	if (*caller_jiffies && elapsed <= msecs_to_jiffies(interval_msecs))
4722 		return false;
4723 
4724 	*caller_jiffies = jiffies;
4725 	return true;
4726 }
4727 EXPORT_SYMBOL(printk_timed_ratelimit);
4728 
4729 static DEFINE_SPINLOCK(dump_list_lock);
4730 static LIST_HEAD(dump_list);
4731 
4732 /**
4733  * kmsg_dump_register - register a kernel log dumper.
4734  * @dumper: pointer to the kmsg_dumper structure
4735  *
4736  * Adds a kernel log dumper to the system. The dump callback in the
4737  * structure will be called when the kernel oopses or panics and must be
4738  * set. Returns zero on success and %-EINVAL or %-EBUSY otherwise.
4739  */
4740 int kmsg_dump_register(struct kmsg_dumper *dumper)
4741 {
4742 	unsigned long flags;
4743 	int err = -EBUSY;
4744 
4745 	/* The dump callback needs to be set */
4746 	if (!dumper->dump)
4747 		return -EINVAL;
4748 
4749 	spin_lock_irqsave(&dump_list_lock, flags);
4750 	/* Don't allow registering multiple times */
4751 	if (!dumper->registered) {
4752 		dumper->registered = 1;
4753 		list_add_tail_rcu(&dumper->list, &dump_list);
4754 		err = 0;
4755 	}
4756 	spin_unlock_irqrestore(&dump_list_lock, flags);
4757 
4758 	return err;
4759 }
4760 EXPORT_SYMBOL_GPL(kmsg_dump_register);
4761 
4762 /**
4763  * kmsg_dump_unregister - unregister a kmsg dumper.
4764  * @dumper: pointer to the kmsg_dumper structure
4765  *
4766  * Removes a dump device from the system. Returns zero on success and
4767  * %-EINVAL otherwise.
4768  */
4769 int kmsg_dump_unregister(struct kmsg_dumper *dumper)
4770 {
4771 	unsigned long flags;
4772 	int err = -EINVAL;
4773 
4774 	spin_lock_irqsave(&dump_list_lock, flags);
4775 	if (dumper->registered) {
4776 		dumper->registered = 0;
4777 		list_del_rcu(&dumper->list);
4778 		err = 0;
4779 	}
4780 	spin_unlock_irqrestore(&dump_list_lock, flags);
4781 	synchronize_rcu();
4782 
4783 	return err;
4784 }
4785 EXPORT_SYMBOL_GPL(kmsg_dump_unregister);
4786 
4787 static bool always_kmsg_dump;
4788 module_param_named(always_kmsg_dump, always_kmsg_dump, bool, S_IRUGO | S_IWUSR);
4789 
4790 const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason)
4791 {
4792 	switch (reason) {
4793 	case KMSG_DUMP_PANIC:
4794 		return "Panic";
4795 	case KMSG_DUMP_OOPS:
4796 		return "Oops";
4797 	case KMSG_DUMP_EMERG:
4798 		return "Emergency";
4799 	case KMSG_DUMP_SHUTDOWN:
4800 		return "Shutdown";
4801 	default:
4802 		return "Unknown";
4803 	}
4804 }
4805 EXPORT_SYMBOL_GPL(kmsg_dump_reason_str);
4806 
4807 /**
4808  * kmsg_dump_desc - dump kernel log to kernel message dumpers.
4809  * @reason: the reason (oops, panic etc) for dumping
4810  * @desc: a short string to describe what caused the panic or oops. Can be NULL
4811  * if no additional description is available.
4812  *
4813  * Call each of the registered dumper's dump() callback, which can
4814  * retrieve the kmsg records with kmsg_dump_get_line() or
4815  * kmsg_dump_get_buffer().
4816  */
4817 void kmsg_dump_desc(enum kmsg_dump_reason reason, const char *desc)
4818 {
4819 	struct kmsg_dumper *dumper;
4820 	struct kmsg_dump_detail detail = {
4821 		.reason = reason,
4822 		.description = desc};
4823 
4824 	rcu_read_lock();
4825 	list_for_each_entry_rcu(dumper, &dump_list, list) {
4826 		enum kmsg_dump_reason max_reason = dumper->max_reason;
4827 
4828 		/*
4829 		 * If client has not provided a specific max_reason, default
4830 		 * to KMSG_DUMP_OOPS, unless always_kmsg_dump was set.
4831 		 */
4832 		if (max_reason == KMSG_DUMP_UNDEF) {
4833 			max_reason = always_kmsg_dump ? KMSG_DUMP_MAX :
4834 							KMSG_DUMP_OOPS;
4835 		}
4836 		if (reason > max_reason)
4837 			continue;
4838 
4839 		/* invoke dumper which will iterate over records */
4840 		dumper->dump(dumper, &detail);
4841 	}
4842 	rcu_read_unlock();
4843 }
4844 
4845 /**
4846  * kmsg_dump_get_line - retrieve one kmsg log line
4847  * @iter: kmsg dump iterator
4848  * @syslog: include the "<4>" prefixes
4849  * @line: buffer to copy the line to
4850  * @size: maximum size of the buffer
4851  * @len: length of line placed into buffer
4852  *
4853  * Start at the beginning of the kmsg buffer, with the oldest kmsg
4854  * record, and copy one record into the provided buffer.
4855  *
4856  * Consecutive calls will return the next available record moving
4857  * towards the end of the buffer with the youngest messages.
4858  *
4859  * A return value of FALSE indicates that there are no more records to
4860  * read.
4861  */
4862 bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog,
4863 			char *line, size_t size, size_t *len)
4864 {
4865 	u64 min_seq = latched_seq_read_nolock(&clear_seq);
4866 	struct printk_info info;
4867 	unsigned int line_count;
4868 	struct printk_record r;
4869 	size_t l = 0;
4870 	bool ret = false;
4871 
4872 	if (iter->cur_seq < min_seq)
4873 		iter->cur_seq = min_seq;
4874 
4875 	prb_rec_init_rd(&r, &info, line, size);
4876 
4877 	/* Read text or count text lines? */
4878 	if (line) {
4879 		if (!prb_read_valid(prb, iter->cur_seq, &r))
4880 			goto out;
4881 		l = record_print_text(&r, syslog, printk_time);
4882 	} else {
4883 		if (!prb_read_valid_info(prb, iter->cur_seq,
4884 					 &info, &line_count)) {
4885 			goto out;
4886 		}
4887 		l = get_record_print_text_size(&info, line_count, syslog,
4888 					       printk_time);
4889 
4890 	}
4891 
4892 	iter->cur_seq = r.info->seq + 1;
4893 	ret = true;
4894 out:
4895 	if (len)
4896 		*len = l;
4897 	return ret;
4898 }
4899 EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
4900 
4901 /**
4902  * kmsg_dump_get_buffer - copy kmsg log lines
4903  * @iter: kmsg dump iterator
4904  * @syslog: include the "<4>" prefixes
4905  * @buf: buffer to copy the line to
4906  * @size: maximum size of the buffer
4907  * @len_out: length of line placed into buffer
4908  *
4909  * Start at the end of the kmsg buffer and fill the provided buffer
4910  * with as many of the *youngest* kmsg records that fit into it.
4911  * If the buffer is large enough, all available kmsg records will be
4912  * copied with a single call.
4913  *
4914  * Consecutive calls will fill the buffer with the next block of
4915  * available older records, not including the earlier retrieved ones.
4916  *
4917  * A return value of FALSE indicates that there are no more records to
4918  * read.
4919  */
4920 bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog,
4921 			  char *buf, size_t size, size_t *len_out)
4922 {
4923 	u64 min_seq = latched_seq_read_nolock(&clear_seq);
4924 	struct printk_info info;
4925 	struct printk_record r;
4926 	u64 seq;
4927 	u64 next_seq;
4928 	size_t len = 0;
4929 	bool ret = false;
4930 	bool time = printk_time;
4931 
4932 	if (!buf || !size)
4933 		goto out;
4934 
4935 	if (iter->cur_seq < min_seq)
4936 		iter->cur_seq = min_seq;
4937 
4938 	if (prb_read_valid_info(prb, iter->cur_seq, &info, NULL)) {
4939 		if (info.seq != iter->cur_seq) {
4940 			/* messages are gone, move to first available one */
4941 			iter->cur_seq = info.seq;
4942 		}
4943 	}
4944 
4945 	/* last entry */
4946 	if (iter->cur_seq >= iter->next_seq)
4947 		goto out;
4948 
4949 	/*
4950 	 * Find first record that fits, including all following records,
4951 	 * into the user-provided buffer for this dump. Pass in size-1
4952 	 * because this function (by way of record_print_text()) will
4953 	 * not write more than size-1 bytes of text into @buf.
4954 	 */
4955 	seq = find_first_fitting_seq(iter->cur_seq, iter->next_seq,
4956 				     size - 1, syslog, time);
4957 
4958 	/*
4959 	 * Next kmsg_dump_get_buffer() invocation will dump block of
4960 	 * older records stored right before this one.
4961 	 */
4962 	next_seq = seq;
4963 
4964 	prb_rec_init_rd(&r, &info, buf, size);
4965 
4966 	prb_for_each_record(seq, prb, seq, &r) {
4967 		if (r.info->seq >= iter->next_seq)
4968 			break;
4969 
4970 		len += record_print_text(&r, syslog, time);
4971 
4972 		/* Adjust record to store to remaining buffer space. */
4973 		prb_rec_init_rd(&r, &info, buf + len, size - len);
4974 	}
4975 
4976 	iter->next_seq = next_seq;
4977 	ret = true;
4978 out:
4979 	if (len_out)
4980 		*len_out = len;
4981 	return ret;
4982 }
4983 EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
4984 
4985 /**
4986  * kmsg_dump_rewind - reset the iterator
4987  * @iter: kmsg dump iterator
4988  *
4989  * Reset the dumper's iterator so that kmsg_dump_get_line() and
4990  * kmsg_dump_get_buffer() can be called again and used multiple
4991  * times within the same dumper.dump() callback.
4992  */
4993 void kmsg_dump_rewind(struct kmsg_dump_iter *iter)
4994 {
4995 	iter->cur_seq = latched_seq_read_nolock(&clear_seq);
4996 	iter->next_seq = prb_next_seq(prb);
4997 }
4998 EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
4999 
5000 /**
5001  * console_try_replay_all - try to replay kernel log on consoles
5002  *
5003  * Try to obtain lock on console subsystem and replay all
5004  * available records in printk buffer on the consoles.
5005  * Does nothing if lock is not obtained.
5006  *
5007  * Context: Any, except for NMI.
5008  */
5009 void console_try_replay_all(void)
5010 {
5011 	struct console_flush_type ft;
5012 
5013 	printk_get_console_flush_type(&ft);
5014 	if (console_trylock()) {
5015 		__console_rewind_all();
5016 		if (ft.nbcon_atomic)
5017 			nbcon_atomic_flush_pending();
5018 		if (ft.nbcon_offload)
5019 			nbcon_kthreads_wake();
5020 		if (ft.legacy_offload)
5021 			defer_console_output();
5022 		/* Consoles are flushed as part of console_unlock(). */
5023 		console_unlock();
5024 	}
5025 }
5026 #endif
5027 
5028 #ifdef CONFIG_SMP
5029 static atomic_t printk_cpu_sync_owner = ATOMIC_INIT(-1);
5030 static atomic_t printk_cpu_sync_nested = ATOMIC_INIT(0);
5031 
5032 bool is_printk_cpu_sync_owner(void)
5033 {
5034 	return (atomic_read(&printk_cpu_sync_owner) == raw_smp_processor_id());
5035 }
5036 
5037 /**
5038  * __printk_cpu_sync_wait() - Busy wait until the printk cpu-reentrant
5039  *                            spinning lock is not owned by any CPU.
5040  *
5041  * Context: Any context.
5042  */
5043 void __printk_cpu_sync_wait(void)
5044 {
5045 	do {
5046 		cpu_relax();
5047 	} while (atomic_read(&printk_cpu_sync_owner) != -1);
5048 }
5049 EXPORT_SYMBOL(__printk_cpu_sync_wait);
5050 
5051 /**
5052  * __printk_cpu_sync_try_get() - Try to acquire the printk cpu-reentrant
5053  *                               spinning lock.
5054  *
5055  * If no processor has the lock, the calling processor takes the lock and
5056  * becomes the owner. If the calling processor is already the owner of the
5057  * lock, this function succeeds immediately.
5058  *
5059  * Context: Any context. Expects interrupts to be disabled.
5060  * Return: 1 on success, otherwise 0.
5061  */
5062 int __printk_cpu_sync_try_get(void)
5063 {
5064 	int cpu;
5065 	int old;
5066 
5067 	cpu = smp_processor_id();
5068 
5069 	/*
5070 	 * Guarantee loads and stores from this CPU when it is the lock owner
5071 	 * are _not_ visible to the previous lock owner. This pairs with
5072 	 * __printk_cpu_sync_put:B.
5073 	 *
5074 	 * Memory barrier involvement:
5075 	 *
5076 	 * If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B,
5077 	 * then __printk_cpu_sync_put:A can never read from
5078 	 * __printk_cpu_sync_try_get:B.
5079 	 *
5080 	 * Relies on:
5081 	 *
5082 	 * RELEASE from __printk_cpu_sync_put:A to __printk_cpu_sync_put:B
5083 	 * of the previous CPU
5084 	 *    matching
5085 	 * ACQUIRE from __printk_cpu_sync_try_get:A to
5086 	 * __printk_cpu_sync_try_get:B of this CPU
5087 	 */
5088 	old = atomic_cmpxchg_acquire(&printk_cpu_sync_owner, -1,
5089 				     cpu); /* LMM(__printk_cpu_sync_try_get:A) */
5090 	if (old == -1) {
5091 		/*
5092 		 * This CPU is now the owner and begins loading/storing
5093 		 * data: LMM(__printk_cpu_sync_try_get:B)
5094 		 */
5095 		return 1;
5096 
5097 	} else if (old == cpu) {
5098 		/* This CPU is already the owner. */
5099 		atomic_inc(&printk_cpu_sync_nested);
5100 		return 1;
5101 	}
5102 
5103 	return 0;
5104 }
5105 EXPORT_SYMBOL(__printk_cpu_sync_try_get);
5106 
5107 /**
5108  * __printk_cpu_sync_put() - Release the printk cpu-reentrant spinning lock.
5109  *
5110  * The calling processor must be the owner of the lock.
5111  *
5112  * Context: Any context. Expects interrupts to be disabled.
5113  */
5114 void __printk_cpu_sync_put(void)
5115 {
5116 	if (atomic_read(&printk_cpu_sync_nested)) {
5117 		atomic_dec(&printk_cpu_sync_nested);
5118 		return;
5119 	}
5120 
5121 	/*
5122 	 * This CPU is finished loading/storing data:
5123 	 * LMM(__printk_cpu_sync_put:A)
5124 	 */
5125 
5126 	/*
5127 	 * Guarantee loads and stores from this CPU when it was the
5128 	 * lock owner are visible to the next lock owner. This pairs
5129 	 * with __printk_cpu_sync_try_get:A.
5130 	 *
5131 	 * Memory barrier involvement:
5132 	 *
5133 	 * If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B,
5134 	 * then __printk_cpu_sync_try_get:B reads from __printk_cpu_sync_put:A.
5135 	 *
5136 	 * Relies on:
5137 	 *
5138 	 * RELEASE from __printk_cpu_sync_put:A to __printk_cpu_sync_put:B
5139 	 * of this CPU
5140 	 *    matching
5141 	 * ACQUIRE from __printk_cpu_sync_try_get:A to
5142 	 * __printk_cpu_sync_try_get:B of the next CPU
5143 	 */
5144 	atomic_set_release(&printk_cpu_sync_owner,
5145 			   -1); /* LMM(__printk_cpu_sync_put:B) */
5146 }
5147 EXPORT_SYMBOL(__printk_cpu_sync_put);
5148 #endif /* CONFIG_SMP */
5149