1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/kernel/printk.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
7 * Modified to make sys_syslog() more flexible: added commands to
8 * return the last 4k of kernel messages, regardless of whether
9 * they've been read or not. Added option to suppress kernel printk's
10 * to the console. Added hook for sending the console messages
11 * elsewhere, in preparation for a serial line console (someday).
12 * Ted Ts'o, 2/11/93.
13 * Modified for sysctl support, 1/8/97, Chris Horn.
14 * Fixed SMP synchronization, 08/08/99, Manfred Spraul
15 * manfred@colorfullife.com
16 * Rewrote bits to get rid of console_lock
17 * 01Mar01 Andrew Morton
18 */
19
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22 #include <linux/kernel.h>
23 #include <linux/mm.h>
24 #include <linux/tty.h>
25 #include <linux/tty_driver.h>
26 #include <linux/console.h>
27 #include <linux/init.h>
28 #include <linux/jiffies.h>
29 #include <linux/nmi.h>
30 #include <linux/module.h>
31 #include <linux/moduleparam.h>
32 #include <linux/delay.h>
33 #include <linux/smp.h>
34 #include <linux/security.h>
35 #include <linux/memblock.h>
36 #include <linux/syscalls.h>
37 #include <linux/syscore_ops.h>
38 #include <linux/vmcore_info.h>
39 #include <linux/ratelimit.h>
40 #include <linux/kmsg_dump.h>
41 #include <linux/syslog.h>
42 #include <linux/cpu.h>
43 #include <linux/rculist.h>
44 #include <linux/poll.h>
45 #include <linux/irq_work.h>
46 #include <linux/ctype.h>
47 #include <linux/uio.h>
48 #include <linux/sched/clock.h>
49 #include <linux/sched/debug.h>
50 #include <linux/sched/task_stack.h>
51 #include <linux/panic.h>
52
53 #include <linux/uaccess.h>
54 #include <asm/sections.h>
55
56 #include <trace/events/initcall.h>
57 #define CREATE_TRACE_POINTS
58 #include <trace/events/printk.h>
59
60 #include "printk_ringbuffer.h"
61 #include "console_cmdline.h"
62 #include "braille.h"
63 #include "internal.h"
64
65 int console_printk[4] = {
66 CONSOLE_LOGLEVEL_DEFAULT, /* console_loglevel */
67 MESSAGE_LOGLEVEL_DEFAULT, /* default_message_loglevel */
68 CONSOLE_LOGLEVEL_MIN, /* minimum_console_loglevel */
69 CONSOLE_LOGLEVEL_DEFAULT, /* default_console_loglevel */
70 };
71 EXPORT_SYMBOL_GPL(console_printk);
72
73 atomic_t ignore_console_lock_warning __read_mostly = ATOMIC_INIT(0);
74 EXPORT_SYMBOL(ignore_console_lock_warning);
75
76 EXPORT_TRACEPOINT_SYMBOL_GPL(console);
77
78 /*
79 * Low level drivers may need that to know if they can schedule in
80 * their unblank() callback or not. So let's export it.
81 */
82 int oops_in_progress;
83 EXPORT_SYMBOL(oops_in_progress);
84
85 /*
86 * console_mutex protects console_list updates and console->flags updates.
87 * The flags are synchronized only for consoles that are registered, i.e.
88 * accessible via the console list.
89 */
90 static DEFINE_MUTEX(console_mutex);
91
92 /*
93 * console_sem protects updates to console->seq
94 * and also provides serialization for console printing.
95 */
96 static DEFINE_SEMAPHORE(console_sem, 1);
97 HLIST_HEAD(console_list);
98 EXPORT_SYMBOL_GPL(console_list);
99 DEFINE_STATIC_SRCU(console_srcu);
100
101 /*
102 * System may need to suppress printk message under certain
103 * circumstances, like after kernel panic happens.
104 */
105 int __read_mostly suppress_printk;
106
107 #ifdef CONFIG_LOCKDEP
108 static struct lockdep_map console_lock_dep_map = {
109 .name = "console_lock"
110 };
111
lockdep_assert_console_list_lock_held(void)112 void lockdep_assert_console_list_lock_held(void)
113 {
114 lockdep_assert_held(&console_mutex);
115 }
116 EXPORT_SYMBOL(lockdep_assert_console_list_lock_held);
117 #endif
118
119 #ifdef CONFIG_DEBUG_LOCK_ALLOC
console_srcu_read_lock_is_held(void)120 bool console_srcu_read_lock_is_held(void)
121 {
122 return srcu_read_lock_held(&console_srcu);
123 }
124 EXPORT_SYMBOL(console_srcu_read_lock_is_held);
125 #endif
126
127 enum devkmsg_log_bits {
128 __DEVKMSG_LOG_BIT_ON = 0,
129 __DEVKMSG_LOG_BIT_OFF,
130 __DEVKMSG_LOG_BIT_LOCK,
131 };
132
133 enum devkmsg_log_masks {
134 DEVKMSG_LOG_MASK_ON = BIT(__DEVKMSG_LOG_BIT_ON),
135 DEVKMSG_LOG_MASK_OFF = BIT(__DEVKMSG_LOG_BIT_OFF),
136 DEVKMSG_LOG_MASK_LOCK = BIT(__DEVKMSG_LOG_BIT_LOCK),
137 };
138
139 /* Keep both the 'on' and 'off' bits clear, i.e. ratelimit by default: */
140 #define DEVKMSG_LOG_MASK_DEFAULT 0
141
142 static unsigned int __read_mostly devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT;
143
__control_devkmsg(char * str)144 static int __control_devkmsg(char *str)
145 {
146 size_t len;
147
148 if (!str)
149 return -EINVAL;
150
151 len = str_has_prefix(str, "on");
152 if (len) {
153 devkmsg_log = DEVKMSG_LOG_MASK_ON;
154 return len;
155 }
156
157 len = str_has_prefix(str, "off");
158 if (len) {
159 devkmsg_log = DEVKMSG_LOG_MASK_OFF;
160 return len;
161 }
162
163 len = str_has_prefix(str, "ratelimit");
164 if (len) {
165 devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT;
166 return len;
167 }
168
169 return -EINVAL;
170 }
171
control_devkmsg(char * str)172 static int __init control_devkmsg(char *str)
173 {
174 if (__control_devkmsg(str) < 0) {
175 pr_warn("printk.devkmsg: bad option string '%s'\n", str);
176 return 1;
177 }
178
179 /*
180 * Set sysctl string accordingly:
181 */
182 if (devkmsg_log == DEVKMSG_LOG_MASK_ON)
183 strscpy(devkmsg_log_str, "on");
184 else if (devkmsg_log == DEVKMSG_LOG_MASK_OFF)
185 strscpy(devkmsg_log_str, "off");
186 /* else "ratelimit" which is set by default. */
187
188 /*
189 * Sysctl cannot change it anymore. The kernel command line setting of
190 * this parameter is to force the setting to be permanent throughout the
191 * runtime of the system. This is a precation measure against userspace
192 * trying to be a smarta** and attempting to change it up on us.
193 */
194 devkmsg_log |= DEVKMSG_LOG_MASK_LOCK;
195
196 return 1;
197 }
198 __setup("printk.devkmsg=", control_devkmsg);
199
200 char devkmsg_log_str[DEVKMSG_STR_MAX_SIZE] = "ratelimit";
201 #if defined(CONFIG_PRINTK) && defined(CONFIG_SYSCTL)
devkmsg_sysctl_set_loglvl(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)202 int devkmsg_sysctl_set_loglvl(const struct ctl_table *table, int write,
203 void *buffer, size_t *lenp, loff_t *ppos)
204 {
205 char old_str[DEVKMSG_STR_MAX_SIZE];
206 unsigned int old;
207 int err;
208
209 if (write) {
210 if (devkmsg_log & DEVKMSG_LOG_MASK_LOCK)
211 return -EINVAL;
212
213 old = devkmsg_log;
214 strscpy(old_str, devkmsg_log_str);
215 }
216
217 err = proc_dostring(table, write, buffer, lenp, ppos);
218 if (err)
219 return err;
220
221 if (write) {
222 err = __control_devkmsg(devkmsg_log_str);
223
224 /*
225 * Do not accept an unknown string OR a known string with
226 * trailing crap...
227 */
228 if (err < 0 || (err + 1 != *lenp)) {
229
230 /* ... and restore old setting. */
231 devkmsg_log = old;
232 strscpy(devkmsg_log_str, old_str);
233
234 return -EINVAL;
235 }
236 }
237
238 return 0;
239 }
240 #endif /* CONFIG_PRINTK && CONFIG_SYSCTL */
241
242 /**
243 * console_list_lock - Lock the console list
244 *
245 * For console list or console->flags updates
246 */
console_list_lock(void)247 void console_list_lock(void)
248 {
249 /*
250 * In unregister_console() and console_force_preferred_locked(),
251 * synchronize_srcu() is called with the console_list_lock held.
252 * Therefore it is not allowed that the console_list_lock is taken
253 * with the srcu_lock held.
254 *
255 * Detecting if this context is really in the read-side critical
256 * section is only possible if the appropriate debug options are
257 * enabled.
258 */
259 WARN_ON_ONCE(debug_lockdep_rcu_enabled() &&
260 srcu_read_lock_held(&console_srcu));
261
262 mutex_lock(&console_mutex);
263 }
264 EXPORT_SYMBOL(console_list_lock);
265
266 /**
267 * console_list_unlock - Unlock the console list
268 *
269 * Counterpart to console_list_lock()
270 */
console_list_unlock(void)271 void console_list_unlock(void)
272 {
273 mutex_unlock(&console_mutex);
274 }
275 EXPORT_SYMBOL(console_list_unlock);
276
277 /**
278 * console_srcu_read_lock - Register a new reader for the
279 * SRCU-protected console list
280 *
281 * Use for_each_console_srcu() to iterate the console list
282 *
283 * Context: Any context.
284 * Return: A cookie to pass to console_srcu_read_unlock().
285 */
console_srcu_read_lock(void)286 int console_srcu_read_lock(void)
287 __acquires(&console_srcu)
288 {
289 return srcu_read_lock_nmisafe(&console_srcu);
290 }
291 EXPORT_SYMBOL(console_srcu_read_lock);
292
293 /**
294 * console_srcu_read_unlock - Unregister an old reader from
295 * the SRCU-protected console list
296 * @cookie: cookie returned from console_srcu_read_lock()
297 *
298 * Counterpart to console_srcu_read_lock()
299 */
console_srcu_read_unlock(int cookie)300 void console_srcu_read_unlock(int cookie)
301 __releases(&console_srcu)
302 {
303 srcu_read_unlock_nmisafe(&console_srcu, cookie);
304 }
305 EXPORT_SYMBOL(console_srcu_read_unlock);
306
307 /*
308 * Helper macros to handle lockdep when locking/unlocking console_sem. We use
309 * macros instead of functions so that _RET_IP_ contains useful information.
310 */
311 #define down_console_sem() do { \
312 down(&console_sem);\
313 mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_);\
314 } while (0)
315
__down_trylock_console_sem(unsigned long ip)316 static int __down_trylock_console_sem(unsigned long ip)
317 {
318 int lock_failed;
319 unsigned long flags;
320
321 /*
322 * Here and in __up_console_sem() we need to be in safe mode,
323 * because spindump/WARN/etc from under console ->lock will
324 * deadlock in printk()->down_trylock_console_sem() otherwise.
325 */
326 printk_safe_enter_irqsave(flags);
327 lock_failed = down_trylock(&console_sem);
328 printk_safe_exit_irqrestore(flags);
329
330 if (lock_failed)
331 return 1;
332 mutex_acquire(&console_lock_dep_map, 0, 1, ip);
333 return 0;
334 }
335 #define down_trylock_console_sem() __down_trylock_console_sem(_RET_IP_)
336
__up_console_sem(unsigned long ip)337 static void __up_console_sem(unsigned long ip)
338 {
339 unsigned long flags;
340
341 mutex_release(&console_lock_dep_map, ip);
342
343 printk_safe_enter_irqsave(flags);
344 up(&console_sem);
345 printk_safe_exit_irqrestore(flags);
346 }
347 #define up_console_sem() __up_console_sem(_RET_IP_)
348
349 /*
350 * This is used for debugging the mess that is the VT code by
351 * keeping track if we have the console semaphore held. It's
352 * definitely not the perfect debug tool (we don't know if _WE_
353 * hold it and are racing, but it helps tracking those weird code
354 * paths in the console code where we end up in places I want
355 * locked without the console semaphore held).
356 */
357 static int console_locked;
358
359 /*
360 * Array of consoles built from command line options (console=)
361 */
362
363 #define MAX_CMDLINECONSOLES 8
364
365 static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES];
366
367 static int preferred_console = -1;
368 int console_set_on_cmdline;
369 EXPORT_SYMBOL(console_set_on_cmdline);
370
371 /* Flag: console code may call schedule() */
372 static int console_may_schedule;
373
374 enum con_msg_format_flags {
375 MSG_FORMAT_DEFAULT = 0,
376 MSG_FORMAT_SYSLOG = (1 << 0),
377 };
378
379 static int console_msg_format = MSG_FORMAT_DEFAULT;
380
381 /*
382 * The printk log buffer consists of a sequenced collection of records, each
383 * containing variable length message text. Every record also contains its
384 * own meta-data (@info).
385 *
386 * Every record meta-data carries the timestamp in microseconds, as well as
387 * the standard userspace syslog level and syslog facility. The usual kernel
388 * messages use LOG_KERN; userspace-injected messages always carry a matching
389 * syslog facility, by default LOG_USER. The origin of every message can be
390 * reliably determined that way.
391 *
392 * The human readable log message of a record is available in @text, the
393 * length of the message text in @text_len. The stored message is not
394 * terminated.
395 *
396 * Optionally, a record can carry a dictionary of properties (key/value
397 * pairs), to provide userspace with a machine-readable message context.
398 *
399 * Examples for well-defined, commonly used property names are:
400 * DEVICE=b12:8 device identifier
401 * b12:8 block dev_t
402 * c127:3 char dev_t
403 * n8 netdev ifindex
404 * +sound:card0 subsystem:devname
405 * SUBSYSTEM=pci driver-core subsystem name
406 *
407 * Valid characters in property names are [a-zA-Z0-9.-_]. Property names
408 * and values are terminated by a '\0' character.
409 *
410 * Example of record values:
411 * record.text_buf = "it's a line" (unterminated)
412 * record.info.seq = 56
413 * record.info.ts_nsec = 36863
414 * record.info.text_len = 11
415 * record.info.facility = 0 (LOG_KERN)
416 * record.info.flags = 0
417 * record.info.level = 3 (LOG_ERR)
418 * record.info.caller_id = 299 (task 299)
419 * record.info.dev_info.subsystem = "pci" (terminated)
420 * record.info.dev_info.device = "+pci:0000:00:01.0" (terminated)
421 *
422 * The 'struct printk_info' buffer must never be directly exported to
423 * userspace, it is a kernel-private implementation detail that might
424 * need to be changed in the future, when the requirements change.
425 *
426 * /dev/kmsg exports the structured data in the following line format:
427 * "<level>,<sequnum>,<timestamp>,<contflag>[,additional_values, ... ];<message text>\n"
428 *
429 * Users of the export format should ignore possible additional values
430 * separated by ',', and find the message after the ';' character.
431 *
432 * The optional key/value pairs are attached as continuation lines starting
433 * with a space character and terminated by a newline. All possible
434 * non-prinatable characters are escaped in the "\xff" notation.
435 */
436
437 /* syslog_lock protects syslog_* variables and write access to clear_seq. */
438 static DEFINE_MUTEX(syslog_lock);
439
440 /*
441 * Specifies if a legacy console is registered. If legacy consoles are
442 * present, it is necessary to perform the console lock/unlock dance
443 * whenever console flushing should occur.
444 */
445 bool have_legacy_console;
446
447 /*
448 * Specifies if an nbcon console is registered. If nbcon consoles are present,
449 * synchronous printing of legacy consoles will not occur during panic until
450 * the backtrace has been stored to the ringbuffer.
451 */
452 bool have_nbcon_console;
453
454 /*
455 * Specifies if a boot console is registered. If boot consoles are present,
456 * nbcon consoles cannot print simultaneously and must be synchronized by
457 * the console lock. This is because boot consoles and nbcon consoles may
458 * have mapped the same hardware.
459 */
460 bool have_boot_console;
461
462 /* See printk_legacy_allow_panic_sync() for details. */
463 bool legacy_allow_panic_sync;
464
465 /* Avoid using irq_work when suspending. */
466 bool console_irqwork_blocked;
467
468 #ifdef CONFIG_PRINTK
469 DECLARE_WAIT_QUEUE_HEAD(log_wait);
470 static DECLARE_WAIT_QUEUE_HEAD(legacy_wait);
471 /* All 3 protected by @syslog_lock. */
472 /* the next printk record to read by syslog(READ) or /proc/kmsg */
473 static u64 syslog_seq;
474 static size_t syslog_partial;
475 static bool syslog_time;
476
477 /* True when _all_ printer threads are available for printing. */
478 bool printk_kthreads_running;
479
480 struct latched_seq {
481 seqcount_latch_t latch;
482 u64 val[2];
483 };
484
485 /*
486 * The next printk record to read after the last 'clear' command. There are
487 * two copies (updated with seqcount_latch) so that reads can locklessly
488 * access a valid value. Writers are synchronized by @syslog_lock.
489 */
490 static struct latched_seq clear_seq = {
491 .latch = SEQCNT_LATCH_ZERO(clear_seq.latch),
492 .val[0] = 0,
493 .val[1] = 0,
494 };
495
496 #define LOG_LEVEL(v) ((v) & 0x07)
497 #define LOG_FACILITY(v) ((v) >> 3 & 0xff)
498
499 /* record buffer */
500 #define LOG_ALIGN __alignof__(unsigned long)
501 #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
502 #define LOG_BUF_LEN_MAX ((u32)1 << 31)
503 static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
504 static char *log_buf = __log_buf;
505 static u32 log_buf_len = __LOG_BUF_LEN;
506
507 /*
508 * Define the average message size. This only affects the number of
509 * descriptors that will be available. Underestimating is better than
510 * overestimating (too many available descriptors is better than not enough).
511 */
512 #define PRB_AVGBITS 5 /* 32 character average length */
513
514 #if CONFIG_LOG_BUF_SHIFT <= PRB_AVGBITS
515 #error CONFIG_LOG_BUF_SHIFT value too small.
516 #endif
517 _DEFINE_PRINTKRB(printk_rb_static, CONFIG_LOG_BUF_SHIFT - PRB_AVGBITS,
518 PRB_AVGBITS, &__log_buf[0]);
519
520 static struct printk_ringbuffer printk_rb_dynamic;
521
522 struct printk_ringbuffer *prb = &printk_rb_static;
523
524 /*
525 * We cannot access per-CPU data (e.g. per-CPU flush irq_work) before
526 * per_cpu_areas are initialised. This variable is set to true when
527 * it's safe to access per-CPU data.
528 */
529 static bool __printk_percpu_data_ready __ro_after_init;
530
printk_percpu_data_ready(void)531 bool printk_percpu_data_ready(void)
532 {
533 return __printk_percpu_data_ready;
534 }
535
536 /* Must be called under syslog_lock. */
latched_seq_write(struct latched_seq * ls,u64 val)537 static void latched_seq_write(struct latched_seq *ls, u64 val)
538 {
539 write_seqcount_latch_begin(&ls->latch);
540 ls->val[0] = val;
541 write_seqcount_latch(&ls->latch);
542 ls->val[1] = val;
543 write_seqcount_latch_end(&ls->latch);
544 }
545
546 /* Can be called from any context. */
latched_seq_read_nolock(struct latched_seq * ls)547 static u64 latched_seq_read_nolock(struct latched_seq *ls)
548 {
549 unsigned int seq;
550 unsigned int idx;
551 u64 val;
552
553 do {
554 seq = read_seqcount_latch(&ls->latch);
555 idx = seq & 0x1;
556 val = ls->val[idx];
557 } while (read_seqcount_latch_retry(&ls->latch, seq));
558
559 return val;
560 }
561
562 /* Return log buffer address */
log_buf_addr_get(void)563 char *log_buf_addr_get(void)
564 {
565 return log_buf;
566 }
567
568 /* Return log buffer size */
log_buf_len_get(void)569 u32 log_buf_len_get(void)
570 {
571 return log_buf_len;
572 }
573
574 /*
575 * Define how much of the log buffer we could take at maximum. The value
576 * must be greater than two. Note that only half of the buffer is available
577 * when the index points to the middle.
578 */
579 #define MAX_LOG_TAKE_PART 4
580 static const char trunc_msg[] = "<truncated>";
581
truncate_msg(u16 * text_len,u16 * trunc_msg_len)582 static void truncate_msg(u16 *text_len, u16 *trunc_msg_len)
583 {
584 /*
585 * The message should not take the whole buffer. Otherwise, it might
586 * get removed too soon.
587 */
588 u32 max_text_len = log_buf_len / MAX_LOG_TAKE_PART;
589
590 if (*text_len > max_text_len)
591 *text_len = max_text_len;
592
593 /* enable the warning message (if there is room) */
594 *trunc_msg_len = strlen(trunc_msg);
595 if (*text_len >= *trunc_msg_len)
596 *text_len -= *trunc_msg_len;
597 else
598 *trunc_msg_len = 0;
599 }
600
601 int dmesg_restrict = IS_ENABLED(CONFIG_SECURITY_DMESG_RESTRICT);
602
syslog_action_restricted(int type)603 static int syslog_action_restricted(int type)
604 {
605 if (dmesg_restrict)
606 return 1;
607 /*
608 * Unless restricted, we allow "read all" and "get buffer size"
609 * for everybody.
610 */
611 return type != SYSLOG_ACTION_READ_ALL &&
612 type != SYSLOG_ACTION_SIZE_BUFFER;
613 }
614
check_syslog_permissions(int type,int source)615 static int check_syslog_permissions(int type, int source)
616 {
617 /*
618 * If this is from /proc/kmsg and we've already opened it, then we've
619 * already done the capabilities checks at open time.
620 */
621 if (source == SYSLOG_FROM_PROC && type != SYSLOG_ACTION_OPEN)
622 goto ok;
623
624 if (syslog_action_restricted(type)) {
625 if (capable(CAP_SYSLOG))
626 goto ok;
627 return -EPERM;
628 }
629 ok:
630 return security_syslog(type);
631 }
632
append_char(char ** pp,char * e,char c)633 static void append_char(char **pp, char *e, char c)
634 {
635 if (*pp < e)
636 *(*pp)++ = c;
637 }
638
info_print_ext_header(char * buf,size_t size,struct printk_info * info)639 static ssize_t info_print_ext_header(char *buf, size_t size,
640 struct printk_info *info)
641 {
642 u64 ts_usec = info->ts_nsec;
643 char caller[20];
644 #ifdef CONFIG_PRINTK_CALLER
645 u32 id = info->caller_id;
646
647 snprintf(caller, sizeof(caller), ",caller=%c%u",
648 id & 0x80000000 ? 'C' : 'T', id & ~0x80000000);
649 #else
650 caller[0] = '\0';
651 #endif
652
653 do_div(ts_usec, 1000);
654
655 return scnprintf(buf, size, "%u,%llu,%llu,%c%s;",
656 (info->facility << 3) | info->level, info->seq,
657 ts_usec, info->flags & LOG_CONT ? 'c' : '-', caller);
658 }
659
msg_add_ext_text(char * buf,size_t size,const char * text,size_t text_len,unsigned char endc)660 static ssize_t msg_add_ext_text(char *buf, size_t size,
661 const char *text, size_t text_len,
662 unsigned char endc)
663 {
664 char *p = buf, *e = buf + size;
665 size_t i;
666
667 /* escape non-printable characters */
668 for (i = 0; i < text_len; i++) {
669 unsigned char c = text[i];
670
671 if (c < ' ' || c >= 127 || c == '\\')
672 p += scnprintf(p, e - p, "\\x%02x", c);
673 else
674 append_char(&p, e, c);
675 }
676 append_char(&p, e, endc);
677
678 return p - buf;
679 }
680
msg_add_dict_text(char * buf,size_t size,const char * key,const char * val)681 static ssize_t msg_add_dict_text(char *buf, size_t size,
682 const char *key, const char *val)
683 {
684 size_t val_len = strlen(val);
685 ssize_t len;
686
687 if (!val_len)
688 return 0;
689
690 len = msg_add_ext_text(buf, size, "", 0, ' '); /* dict prefix */
691 len += msg_add_ext_text(buf + len, size - len, key, strlen(key), '=');
692 len += msg_add_ext_text(buf + len, size - len, val, val_len, '\n');
693
694 return len;
695 }
696
msg_print_ext_body(char * buf,size_t size,char * text,size_t text_len,struct dev_printk_info * dev_info)697 static ssize_t msg_print_ext_body(char *buf, size_t size,
698 char *text, size_t text_len,
699 struct dev_printk_info *dev_info)
700 {
701 ssize_t len;
702
703 len = msg_add_ext_text(buf, size, text, text_len, '\n');
704
705 if (!dev_info)
706 goto out;
707
708 len += msg_add_dict_text(buf + len, size - len, "SUBSYSTEM",
709 dev_info->subsystem);
710 len += msg_add_dict_text(buf + len, size - len, "DEVICE",
711 dev_info->device);
712 out:
713 return len;
714 }
715
716 /* /dev/kmsg - userspace message inject/listen interface */
717 struct devkmsg_user {
718 atomic64_t seq;
719 struct ratelimit_state rs;
720 struct mutex lock;
721 struct printk_buffers pbufs;
722 };
723
724 static __printf(3, 4) __cold
devkmsg_emit(int facility,int level,const char * fmt,...)725 int devkmsg_emit(int facility, int level, const char *fmt, ...)
726 {
727 va_list args;
728 int r;
729
730 va_start(args, fmt);
731 r = vprintk_emit(facility, level, NULL, fmt, args);
732 va_end(args);
733
734 return r;
735 }
736
devkmsg_write(struct kiocb * iocb,struct iov_iter * from)737 static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from)
738 {
739 char *buf, *line;
740 int level = default_message_loglevel;
741 int facility = 1; /* LOG_USER */
742 struct file *file = iocb->ki_filp;
743 struct devkmsg_user *user = file->private_data;
744 size_t len = iov_iter_count(from);
745 ssize_t ret = len;
746
747 if (len > PRINTKRB_RECORD_MAX)
748 return -EINVAL;
749
750 /* Ignore when user logging is disabled. */
751 if (devkmsg_log & DEVKMSG_LOG_MASK_OFF)
752 return len;
753
754 /* Ratelimit when not explicitly enabled. */
755 if (!(devkmsg_log & DEVKMSG_LOG_MASK_ON)) {
756 if (!___ratelimit(&user->rs, current->comm))
757 return ret;
758 }
759
760 buf = kmalloc(len+1, GFP_KERNEL);
761 if (buf == NULL)
762 return -ENOMEM;
763
764 buf[len] = '\0';
765 if (!copy_from_iter_full(buf, len, from)) {
766 kfree(buf);
767 return -EFAULT;
768 }
769
770 /*
771 * Extract and skip the syslog prefix <[0-9]*>. Coming from userspace
772 * the decimal value represents 32bit, the lower 3 bit are the log
773 * level, the rest are the log facility.
774 *
775 * If no prefix or no userspace facility is specified, we
776 * enforce LOG_USER, to be able to reliably distinguish
777 * kernel-generated messages from userspace-injected ones.
778 */
779 line = buf;
780 if (line[0] == '<') {
781 char *endp = NULL;
782 unsigned int u;
783
784 u = simple_strtoul(line + 1, &endp, 10);
785 if (endp && endp[0] == '>') {
786 level = LOG_LEVEL(u);
787 if (LOG_FACILITY(u) != 0)
788 facility = LOG_FACILITY(u);
789 endp++;
790 line = endp;
791 }
792 }
793
794 devkmsg_emit(facility, level, "%s", line);
795 kfree(buf);
796 return ret;
797 }
798
devkmsg_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)799 static ssize_t devkmsg_read(struct file *file, char __user *buf,
800 size_t count, loff_t *ppos)
801 {
802 struct devkmsg_user *user = file->private_data;
803 char *outbuf = &user->pbufs.outbuf[0];
804 struct printk_message pmsg = {
805 .pbufs = &user->pbufs,
806 };
807 ssize_t ret;
808
809 ret = mutex_lock_interruptible(&user->lock);
810 if (ret)
811 return ret;
812
813 if (!printk_get_next_message(&pmsg, atomic64_read(&user->seq), true, false)) {
814 if (file->f_flags & O_NONBLOCK) {
815 ret = -EAGAIN;
816 goto out;
817 }
818
819 /*
820 * Guarantee this task is visible on the waitqueue before
821 * checking the wake condition.
822 *
823 * The full memory barrier within set_current_state() of
824 * prepare_to_wait_event() pairs with the full memory barrier
825 * within wq_has_sleeper().
826 *
827 * This pairs with __wake_up_klogd:A.
828 */
829 ret = wait_event_interruptible(log_wait,
830 printk_get_next_message(&pmsg, atomic64_read(&user->seq), true,
831 false)); /* LMM(devkmsg_read:A) */
832 if (ret)
833 goto out;
834 }
835
836 if (pmsg.dropped) {
837 /* our last seen message is gone, return error and reset */
838 atomic64_set(&user->seq, pmsg.seq);
839 ret = -EPIPE;
840 goto out;
841 }
842
843 atomic64_set(&user->seq, pmsg.seq + 1);
844
845 if (pmsg.outbuf_len > count) {
846 ret = -EINVAL;
847 goto out;
848 }
849
850 if (copy_to_user(buf, outbuf, pmsg.outbuf_len)) {
851 ret = -EFAULT;
852 goto out;
853 }
854 ret = pmsg.outbuf_len;
855 out:
856 mutex_unlock(&user->lock);
857 return ret;
858 }
859
860 /*
861 * Be careful when modifying this function!!!
862 *
863 * Only few operations are supported because the device works only with the
864 * entire variable length messages (records). Non-standard values are
865 * returned in the other cases and has been this way for quite some time.
866 * User space applications might depend on this behavior.
867 */
devkmsg_llseek(struct file * file,loff_t offset,int whence)868 static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
869 {
870 struct devkmsg_user *user = file->private_data;
871 loff_t ret = 0;
872
873 if (offset)
874 return -ESPIPE;
875
876 switch (whence) {
877 case SEEK_SET:
878 /* the first record */
879 atomic64_set(&user->seq, prb_first_valid_seq(prb));
880 break;
881 case SEEK_DATA:
882 /*
883 * The first record after the last SYSLOG_ACTION_CLEAR,
884 * like issued by 'dmesg -c'. Reading /dev/kmsg itself
885 * changes no global state, and does not clear anything.
886 */
887 atomic64_set(&user->seq, latched_seq_read_nolock(&clear_seq));
888 break;
889 case SEEK_END:
890 /* after the last record */
891 atomic64_set(&user->seq, prb_next_seq(prb));
892 break;
893 default:
894 ret = -EINVAL;
895 }
896 return ret;
897 }
898
devkmsg_poll(struct file * file,poll_table * wait)899 static __poll_t devkmsg_poll(struct file *file, poll_table *wait)
900 {
901 struct devkmsg_user *user = file->private_data;
902 struct printk_info info;
903 __poll_t ret = 0;
904
905 poll_wait(file, &log_wait, wait);
906
907 if (prb_read_valid_info(prb, atomic64_read(&user->seq), &info, NULL)) {
908 /* return error when data has vanished underneath us */
909 if (info.seq != atomic64_read(&user->seq))
910 ret = EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI;
911 else
912 ret = EPOLLIN|EPOLLRDNORM;
913 }
914
915 return ret;
916 }
917
devkmsg_open(struct inode * inode,struct file * file)918 static int devkmsg_open(struct inode *inode, struct file *file)
919 {
920 struct devkmsg_user *user;
921 int err;
922
923 if (devkmsg_log & DEVKMSG_LOG_MASK_OFF)
924 return -EPERM;
925
926 /* write-only does not need any file context */
927 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
928 err = check_syslog_permissions(SYSLOG_ACTION_READ_ALL,
929 SYSLOG_FROM_READER);
930 if (err)
931 return err;
932 }
933
934 user = kvmalloc(sizeof(struct devkmsg_user), GFP_KERNEL);
935 if (!user)
936 return -ENOMEM;
937
938 ratelimit_default_init(&user->rs);
939 ratelimit_set_flags(&user->rs, RATELIMIT_MSG_ON_RELEASE);
940
941 mutex_init(&user->lock);
942
943 atomic64_set(&user->seq, prb_first_valid_seq(prb));
944
945 file->private_data = user;
946 return 0;
947 }
948
devkmsg_release(struct inode * inode,struct file * file)949 static int devkmsg_release(struct inode *inode, struct file *file)
950 {
951 struct devkmsg_user *user = file->private_data;
952
953 ratelimit_state_exit(&user->rs);
954
955 mutex_destroy(&user->lock);
956 kvfree(user);
957 return 0;
958 }
959
960 const struct file_operations kmsg_fops = {
961 .open = devkmsg_open,
962 .read = devkmsg_read,
963 .write_iter = devkmsg_write,
964 .llseek = devkmsg_llseek,
965 .poll = devkmsg_poll,
966 .release = devkmsg_release,
967 };
968
969 #ifdef CONFIG_VMCORE_INFO
970 /*
971 * This appends the listed symbols to /proc/vmcore
972 *
973 * /proc/vmcore is used by various utilities, like crash and makedumpfile to
974 * obtain access to symbols that are otherwise very difficult to locate. These
975 * symbols are specifically used so that utilities can access and extract the
976 * dmesg log from a vmcore file after a crash.
977 */
log_buf_vmcoreinfo_setup(void)978 void log_buf_vmcoreinfo_setup(void)
979 {
980 struct dev_printk_info *dev_info = NULL;
981
982 VMCOREINFO_SYMBOL(prb);
983 VMCOREINFO_SYMBOL(printk_rb_static);
984 VMCOREINFO_SYMBOL(clear_seq);
985
986 /*
987 * Export struct size and field offsets. User space tools can
988 * parse it and detect any changes to structure down the line.
989 */
990
991 VMCOREINFO_STRUCT_SIZE(printk_ringbuffer);
992 VMCOREINFO_OFFSET(printk_ringbuffer, desc_ring);
993 VMCOREINFO_OFFSET(printk_ringbuffer, text_data_ring);
994 VMCOREINFO_OFFSET(printk_ringbuffer, fail);
995
996 VMCOREINFO_STRUCT_SIZE(prb_desc_ring);
997 VMCOREINFO_OFFSET(prb_desc_ring, count_bits);
998 VMCOREINFO_OFFSET(prb_desc_ring, descs);
999 VMCOREINFO_OFFSET(prb_desc_ring, infos);
1000 VMCOREINFO_OFFSET(prb_desc_ring, head_id);
1001 VMCOREINFO_OFFSET(prb_desc_ring, tail_id);
1002
1003 VMCOREINFO_STRUCT_SIZE(prb_desc);
1004 VMCOREINFO_OFFSET(prb_desc, state_var);
1005 VMCOREINFO_OFFSET(prb_desc, text_blk_lpos);
1006
1007 VMCOREINFO_STRUCT_SIZE(prb_data_blk_lpos);
1008 VMCOREINFO_OFFSET(prb_data_blk_lpos, begin);
1009 VMCOREINFO_OFFSET(prb_data_blk_lpos, next);
1010
1011 VMCOREINFO_STRUCT_SIZE(printk_info);
1012 VMCOREINFO_OFFSET(printk_info, seq);
1013 VMCOREINFO_OFFSET(printk_info, ts_nsec);
1014 VMCOREINFO_OFFSET(printk_info, text_len);
1015 VMCOREINFO_OFFSET(printk_info, caller_id);
1016 VMCOREINFO_OFFSET(printk_info, dev_info);
1017
1018 VMCOREINFO_STRUCT_SIZE(dev_printk_info);
1019 VMCOREINFO_OFFSET(dev_printk_info, subsystem);
1020 VMCOREINFO_LENGTH(printk_info_subsystem, sizeof(dev_info->subsystem));
1021 VMCOREINFO_OFFSET(dev_printk_info, device);
1022 VMCOREINFO_LENGTH(printk_info_device, sizeof(dev_info->device));
1023
1024 VMCOREINFO_STRUCT_SIZE(prb_data_ring);
1025 VMCOREINFO_OFFSET(prb_data_ring, size_bits);
1026 VMCOREINFO_OFFSET(prb_data_ring, data);
1027 VMCOREINFO_OFFSET(prb_data_ring, head_lpos);
1028 VMCOREINFO_OFFSET(prb_data_ring, tail_lpos);
1029
1030 VMCOREINFO_SIZE(atomic_long_t);
1031 VMCOREINFO_TYPE_OFFSET(atomic_long_t, counter);
1032
1033 VMCOREINFO_STRUCT_SIZE(latched_seq);
1034 VMCOREINFO_OFFSET(latched_seq, val);
1035 }
1036 #endif
1037
1038 /* requested log_buf_len from kernel cmdline */
1039 static unsigned long __initdata new_log_buf_len;
1040
1041 /* we practice scaling the ring buffer by powers of 2 */
log_buf_len_update(u64 size)1042 static void __init log_buf_len_update(u64 size)
1043 {
1044 if (size > (u64)LOG_BUF_LEN_MAX) {
1045 size = (u64)LOG_BUF_LEN_MAX;
1046 pr_err("log_buf over 2G is not supported.\n");
1047 }
1048
1049 if (size)
1050 size = roundup_pow_of_two(size);
1051 if (size > log_buf_len)
1052 new_log_buf_len = (unsigned long)size;
1053 }
1054
1055 /* save requested log_buf_len since it's too early to process it */
log_buf_len_setup(char * str)1056 static int __init log_buf_len_setup(char *str)
1057 {
1058 u64 size;
1059
1060 if (!str)
1061 return -EINVAL;
1062
1063 size = memparse(str, &str);
1064
1065 log_buf_len_update(size);
1066
1067 return 0;
1068 }
1069 early_param("log_buf_len", log_buf_len_setup);
1070
1071 #ifdef CONFIG_SMP
1072 #define __LOG_CPU_MAX_BUF_LEN (1 << CONFIG_LOG_CPU_MAX_BUF_SHIFT)
1073
log_buf_add_cpu(void)1074 static void __init log_buf_add_cpu(void)
1075 {
1076 unsigned int cpu_extra;
1077
1078 /*
1079 * archs should set up cpu_possible_bits properly with
1080 * set_cpu_possible() after setup_arch() but just in
1081 * case lets ensure this is valid.
1082 */
1083 if (num_possible_cpus() == 1)
1084 return;
1085
1086 cpu_extra = (num_possible_cpus() - 1) * __LOG_CPU_MAX_BUF_LEN;
1087
1088 /* by default this will only continue through for large > 64 CPUs */
1089 if (cpu_extra <= __LOG_BUF_LEN / 2)
1090 return;
1091
1092 pr_info("log_buf_len individual max cpu contribution: %d bytes\n",
1093 __LOG_CPU_MAX_BUF_LEN);
1094 pr_info("log_buf_len total cpu_extra contributions: %d bytes\n",
1095 cpu_extra);
1096 pr_info("log_buf_len min size: %d bytes\n", __LOG_BUF_LEN);
1097
1098 log_buf_len_update(cpu_extra + __LOG_BUF_LEN);
1099 }
1100 #else /* !CONFIG_SMP */
log_buf_add_cpu(void)1101 static inline void log_buf_add_cpu(void) {}
1102 #endif /* CONFIG_SMP */
1103
set_percpu_data_ready(void)1104 static void __init set_percpu_data_ready(void)
1105 {
1106 __printk_percpu_data_ready = true;
1107 }
1108
add_to_rb(struct printk_ringbuffer * rb,struct printk_record * r)1109 static unsigned int __init add_to_rb(struct printk_ringbuffer *rb,
1110 struct printk_record *r)
1111 {
1112 struct prb_reserved_entry e;
1113 struct printk_record dest_r;
1114
1115 prb_rec_init_wr(&dest_r, r->info->text_len);
1116
1117 if (!prb_reserve(&e, rb, &dest_r))
1118 return 0;
1119
1120 memcpy(&dest_r.text_buf[0], &r->text_buf[0], r->info->text_len);
1121 dest_r.info->text_len = r->info->text_len;
1122 dest_r.info->facility = r->info->facility;
1123 dest_r.info->level = r->info->level;
1124 dest_r.info->flags = r->info->flags;
1125 dest_r.info->ts_nsec = r->info->ts_nsec;
1126 dest_r.info->caller_id = r->info->caller_id;
1127 memcpy(&dest_r.info->dev_info, &r->info->dev_info, sizeof(dest_r.info->dev_info));
1128
1129 prb_final_commit(&e);
1130
1131 return prb_record_text_space(&e);
1132 }
1133
1134 static char setup_text_buf[PRINTKRB_RECORD_MAX] __initdata;
1135
print_log_buf_usage_stats(void)1136 static void print_log_buf_usage_stats(void)
1137 {
1138 unsigned int descs_count = log_buf_len >> PRB_AVGBITS;
1139 size_t meta_data_size;
1140
1141 meta_data_size = descs_count * (sizeof(struct prb_desc) + sizeof(struct printk_info));
1142
1143 pr_info("log buffer data + meta data: %u + %zu = %zu bytes\n",
1144 log_buf_len, meta_data_size, log_buf_len + meta_data_size);
1145 }
1146
setup_log_buf(int early)1147 void __init setup_log_buf(int early)
1148 {
1149 struct printk_info *new_infos;
1150 unsigned int new_descs_count;
1151 struct prb_desc *new_descs;
1152 struct printk_info info;
1153 struct printk_record r;
1154 unsigned int text_size;
1155 size_t new_descs_size;
1156 size_t new_infos_size;
1157 unsigned long flags;
1158 char *new_log_buf;
1159 unsigned int free;
1160 u64 seq;
1161
1162 /*
1163 * Some archs call setup_log_buf() multiple times - first is very
1164 * early, e.g. from setup_arch(), and second - when percpu_areas
1165 * are initialised.
1166 */
1167 if (!early)
1168 set_percpu_data_ready();
1169
1170 if (log_buf != __log_buf)
1171 return;
1172
1173 if (!early && !new_log_buf_len)
1174 log_buf_add_cpu();
1175
1176 if (!new_log_buf_len) {
1177 /* Show the memory stats only once. */
1178 if (!early)
1179 goto out;
1180
1181 return;
1182 }
1183
1184 new_descs_count = new_log_buf_len >> PRB_AVGBITS;
1185 if (new_descs_count == 0) {
1186 pr_err("new_log_buf_len: %lu too small\n", new_log_buf_len);
1187 goto out;
1188 }
1189
1190 new_log_buf = memblock_alloc(new_log_buf_len, LOG_ALIGN);
1191 if (unlikely(!new_log_buf)) {
1192 pr_err("log_buf_len: %lu text bytes not available\n",
1193 new_log_buf_len);
1194 goto out;
1195 }
1196
1197 new_descs_size = new_descs_count * sizeof(struct prb_desc);
1198 new_descs = memblock_alloc(new_descs_size, LOG_ALIGN);
1199 if (unlikely(!new_descs)) {
1200 pr_err("log_buf_len: %zu desc bytes not available\n",
1201 new_descs_size);
1202 goto err_free_log_buf;
1203 }
1204
1205 new_infos_size = new_descs_count * sizeof(struct printk_info);
1206 new_infos = memblock_alloc(new_infos_size, LOG_ALIGN);
1207 if (unlikely(!new_infos)) {
1208 pr_err("log_buf_len: %zu info bytes not available\n",
1209 new_infos_size);
1210 goto err_free_descs;
1211 }
1212
1213 prb_rec_init_rd(&r, &info, &setup_text_buf[0], sizeof(setup_text_buf));
1214
1215 prb_init(&printk_rb_dynamic,
1216 new_log_buf, ilog2(new_log_buf_len),
1217 new_descs, ilog2(new_descs_count),
1218 new_infos);
1219
1220 local_irq_save(flags);
1221
1222 log_buf_len = new_log_buf_len;
1223 log_buf = new_log_buf;
1224 new_log_buf_len = 0;
1225
1226 free = __LOG_BUF_LEN;
1227 prb_for_each_record(0, &printk_rb_static, seq, &r) {
1228 text_size = add_to_rb(&printk_rb_dynamic, &r);
1229 if (text_size > free)
1230 free = 0;
1231 else
1232 free -= text_size;
1233 }
1234
1235 prb = &printk_rb_dynamic;
1236
1237 local_irq_restore(flags);
1238
1239 /*
1240 * Copy any remaining messages that might have appeared from
1241 * NMI context after copying but before switching to the
1242 * dynamic buffer.
1243 */
1244 prb_for_each_record(seq, &printk_rb_static, seq, &r) {
1245 text_size = add_to_rb(&printk_rb_dynamic, &r);
1246 if (text_size > free)
1247 free = 0;
1248 else
1249 free -= text_size;
1250 }
1251
1252 if (seq != prb_next_seq(&printk_rb_static)) {
1253 pr_err("dropped %llu messages\n",
1254 prb_next_seq(&printk_rb_static) - seq);
1255 }
1256
1257 print_log_buf_usage_stats();
1258 pr_info("early log buf free: %u(%u%%)\n",
1259 free, (free * 100) / __LOG_BUF_LEN);
1260 return;
1261
1262 err_free_descs:
1263 memblock_free(new_descs, new_descs_size);
1264 err_free_log_buf:
1265 memblock_free(new_log_buf, new_log_buf_len);
1266 out:
1267 print_log_buf_usage_stats();
1268 }
1269
1270 static bool __read_mostly ignore_loglevel;
1271
ignore_loglevel_setup(char * str)1272 static int __init ignore_loglevel_setup(char *str)
1273 {
1274 ignore_loglevel = true;
1275 pr_info("debug: ignoring loglevel setting.\n");
1276
1277 return 0;
1278 }
1279
1280 early_param("ignore_loglevel", ignore_loglevel_setup);
1281 module_param(ignore_loglevel, bool, S_IRUGO | S_IWUSR);
1282 MODULE_PARM_DESC(ignore_loglevel,
1283 "ignore loglevel setting (prints all kernel messages to the console)");
1284
suppress_message_printing(int level)1285 static bool suppress_message_printing(int level)
1286 {
1287 return (level >= console_loglevel && !ignore_loglevel);
1288 }
1289
1290 #ifdef CONFIG_BOOT_PRINTK_DELAY
1291
1292 static int boot_delay; /* msecs delay after each printk during bootup */
1293 static unsigned long long loops_per_msec; /* based on boot_delay */
1294
boot_delay_setup(char * str)1295 static int __init boot_delay_setup(char *str)
1296 {
1297 unsigned long lpj;
1298
1299 lpj = preset_lpj ? preset_lpj : 1000000; /* some guess */
1300 loops_per_msec = (unsigned long long)lpj / 1000 * HZ;
1301
1302 get_option(&str, &boot_delay);
1303 if (boot_delay > 10 * 1000)
1304 boot_delay = 0;
1305
1306 pr_debug("boot_delay: %u, preset_lpj: %ld, lpj: %lu, "
1307 "HZ: %d, loops_per_msec: %llu\n",
1308 boot_delay, preset_lpj, lpj, HZ, loops_per_msec);
1309 return 0;
1310 }
1311 early_param("boot_delay", boot_delay_setup);
1312
boot_delay_msec(int level)1313 static void boot_delay_msec(int level)
1314 {
1315 unsigned long long k;
1316 unsigned long timeout;
1317 bool suppress = !is_printk_force_console() &&
1318 suppress_message_printing(level);
1319
1320 if ((boot_delay == 0 || system_state >= SYSTEM_RUNNING) || suppress)
1321 return;
1322
1323 k = (unsigned long long)loops_per_msec * boot_delay;
1324
1325 timeout = jiffies + msecs_to_jiffies(boot_delay);
1326 while (k) {
1327 k--;
1328 cpu_relax();
1329 /*
1330 * use (volatile) jiffies to prevent
1331 * compiler reduction; loop termination via jiffies
1332 * is secondary and may or may not happen.
1333 */
1334 if (time_after(jiffies, timeout))
1335 break;
1336 touch_nmi_watchdog();
1337 }
1338 }
1339 #else
boot_delay_msec(int level)1340 static inline void boot_delay_msec(int level)
1341 {
1342 }
1343 #endif
1344
1345 static bool printk_time = IS_ENABLED(CONFIG_PRINTK_TIME);
1346 module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
1347
print_syslog(unsigned int level,char * buf)1348 static size_t print_syslog(unsigned int level, char *buf)
1349 {
1350 return sprintf(buf, "<%u>", level);
1351 }
1352
print_time(u64 ts,char * buf)1353 static size_t print_time(u64 ts, char *buf)
1354 {
1355 unsigned long rem_nsec = do_div(ts, 1000000000);
1356
1357 return sprintf(buf, "[%5lu.%06lu]",
1358 (unsigned long)ts, rem_nsec / 1000);
1359 }
1360
1361 #ifdef CONFIG_PRINTK_CALLER
print_caller(u32 id,char * buf)1362 static size_t print_caller(u32 id, char *buf)
1363 {
1364 char caller[12];
1365
1366 snprintf(caller, sizeof(caller), "%c%u",
1367 id & 0x80000000 ? 'C' : 'T', id & ~0x80000000);
1368 return sprintf(buf, "[%6s]", caller);
1369 }
1370 #else
1371 #define print_caller(id, buf) 0
1372 #endif
1373
info_print_prefix(const struct printk_info * info,bool syslog,bool time,char * buf)1374 static size_t info_print_prefix(const struct printk_info *info, bool syslog,
1375 bool time, char *buf)
1376 {
1377 size_t len = 0;
1378
1379 if (syslog)
1380 len = print_syslog((info->facility << 3) | info->level, buf);
1381
1382 if (time)
1383 len += print_time(info->ts_nsec, buf + len);
1384
1385 len += print_caller(info->caller_id, buf + len);
1386
1387 if (IS_ENABLED(CONFIG_PRINTK_CALLER) || time) {
1388 buf[len++] = ' ';
1389 buf[len] = '\0';
1390 }
1391
1392 return len;
1393 }
1394
1395 /*
1396 * Prepare the record for printing. The text is shifted within the given
1397 * buffer to avoid a need for another one. The following operations are
1398 * done:
1399 *
1400 * - Add prefix for each line.
1401 * - Drop truncated lines that no longer fit into the buffer.
1402 * - Add the trailing newline that has been removed in vprintk_store().
1403 * - Add a string terminator.
1404 *
1405 * Since the produced string is always terminated, the maximum possible
1406 * return value is @r->text_buf_size - 1;
1407 *
1408 * Return: The length of the updated/prepared text, including the added
1409 * prefixes and the newline. The terminator is not counted. The dropped
1410 * line(s) are not counted.
1411 */
record_print_text(struct printk_record * r,bool syslog,bool time)1412 static size_t record_print_text(struct printk_record *r, bool syslog,
1413 bool time)
1414 {
1415 size_t text_len = r->info->text_len;
1416 size_t buf_size = r->text_buf_size;
1417 char *text = r->text_buf;
1418 char prefix[PRINTK_PREFIX_MAX];
1419 bool truncated = false;
1420 size_t prefix_len;
1421 size_t line_len;
1422 size_t len = 0;
1423 char *next;
1424
1425 /*
1426 * If the message was truncated because the buffer was not large
1427 * enough, treat the available text as if it were the full text.
1428 */
1429 if (text_len > buf_size)
1430 text_len = buf_size;
1431
1432 prefix_len = info_print_prefix(r->info, syslog, time, prefix);
1433
1434 /*
1435 * @text_len: bytes of unprocessed text
1436 * @line_len: bytes of current line _without_ newline
1437 * @text: pointer to beginning of current line
1438 * @len: number of bytes prepared in r->text_buf
1439 */
1440 for (;;) {
1441 next = memchr(text, '\n', text_len);
1442 if (next) {
1443 line_len = next - text;
1444 } else {
1445 /* Drop truncated line(s). */
1446 if (truncated)
1447 break;
1448 line_len = text_len;
1449 }
1450
1451 /*
1452 * Truncate the text if there is not enough space to add the
1453 * prefix and a trailing newline and a terminator.
1454 */
1455 if (len + prefix_len + text_len + 1 + 1 > buf_size) {
1456 /* Drop even the current line if no space. */
1457 if (len + prefix_len + line_len + 1 + 1 > buf_size)
1458 break;
1459
1460 text_len = buf_size - len - prefix_len - 1 - 1;
1461 truncated = true;
1462 }
1463
1464 memmove(text + prefix_len, text, text_len);
1465 memcpy(text, prefix, prefix_len);
1466
1467 /*
1468 * Increment the prepared length to include the text and
1469 * prefix that were just moved+copied. Also increment for the
1470 * newline at the end of this line. If this is the last line,
1471 * there is no newline, but it will be added immediately below.
1472 */
1473 len += prefix_len + line_len + 1;
1474 if (text_len == line_len) {
1475 /*
1476 * This is the last line. Add the trailing newline
1477 * removed in vprintk_store().
1478 */
1479 text[prefix_len + line_len] = '\n';
1480 break;
1481 }
1482
1483 /*
1484 * Advance beyond the added prefix and the related line with
1485 * its newline.
1486 */
1487 text += prefix_len + line_len + 1;
1488
1489 /*
1490 * The remaining text has only decreased by the line with its
1491 * newline.
1492 *
1493 * Note that @text_len can become zero. It happens when @text
1494 * ended with a newline (either due to truncation or the
1495 * original string ending with "\n\n"). The loop is correctly
1496 * repeated and (if not truncated) an empty line with a prefix
1497 * will be prepared.
1498 */
1499 text_len -= line_len + 1;
1500 }
1501
1502 /*
1503 * If a buffer was provided, it will be terminated. Space for the
1504 * string terminator is guaranteed to be available. The terminator is
1505 * not counted in the return value.
1506 */
1507 if (buf_size > 0)
1508 r->text_buf[len] = 0;
1509
1510 return len;
1511 }
1512
get_record_print_text_size(struct printk_info * info,unsigned int line_count,bool syslog,bool time)1513 static size_t get_record_print_text_size(struct printk_info *info,
1514 unsigned int line_count,
1515 bool syslog, bool time)
1516 {
1517 char prefix[PRINTK_PREFIX_MAX];
1518 size_t prefix_len;
1519
1520 prefix_len = info_print_prefix(info, syslog, time, prefix);
1521
1522 /*
1523 * Each line will be preceded with a prefix. The intermediate
1524 * newlines are already within the text, but a final trailing
1525 * newline will be added.
1526 */
1527 return ((prefix_len * line_count) + info->text_len + 1);
1528 }
1529
1530 /*
1531 * Beginning with @start_seq, find the first record where it and all following
1532 * records up to (but not including) @max_seq fit into @size.
1533 *
1534 * @max_seq is simply an upper bound and does not need to exist. If the caller
1535 * does not require an upper bound, -1 can be used for @max_seq.
1536 */
find_first_fitting_seq(u64 start_seq,u64 max_seq,size_t size,bool syslog,bool time)1537 static u64 find_first_fitting_seq(u64 start_seq, u64 max_seq, size_t size,
1538 bool syslog, bool time)
1539 {
1540 struct printk_info info;
1541 unsigned int line_count;
1542 size_t len = 0;
1543 u64 seq;
1544
1545 /* Determine the size of the records up to @max_seq. */
1546 prb_for_each_info(start_seq, prb, seq, &info, &line_count) {
1547 if (info.seq >= max_seq)
1548 break;
1549 len += get_record_print_text_size(&info, line_count, syslog, time);
1550 }
1551
1552 /*
1553 * Adjust the upper bound for the next loop to avoid subtracting
1554 * lengths that were never added.
1555 */
1556 if (seq < max_seq)
1557 max_seq = seq;
1558
1559 /*
1560 * Move first record forward until length fits into the buffer. Ignore
1561 * newest messages that were not counted in the above cycle. Messages
1562 * might appear and get lost in the meantime. This is a best effort
1563 * that prevents an infinite loop that could occur with a retry.
1564 */
1565 prb_for_each_info(start_seq, prb, seq, &info, &line_count) {
1566 if (len <= size || info.seq >= max_seq)
1567 break;
1568 len -= get_record_print_text_size(&info, line_count, syslog, time);
1569 }
1570
1571 return seq;
1572 }
1573
1574 /* The caller is responsible for making sure @size is greater than 0. */
syslog_print(char __user * buf,int size)1575 static int syslog_print(char __user *buf, int size)
1576 {
1577 struct printk_info info;
1578 struct printk_record r;
1579 char *text;
1580 int len = 0;
1581 u64 seq;
1582
1583 text = kmalloc(PRINTK_MESSAGE_MAX, GFP_KERNEL);
1584 if (!text)
1585 return -ENOMEM;
1586
1587 prb_rec_init_rd(&r, &info, text, PRINTK_MESSAGE_MAX);
1588
1589 mutex_lock(&syslog_lock);
1590
1591 /*
1592 * Wait for the @syslog_seq record to be available. @syslog_seq may
1593 * change while waiting.
1594 */
1595 do {
1596 seq = syslog_seq;
1597
1598 mutex_unlock(&syslog_lock);
1599 /*
1600 * Guarantee this task is visible on the waitqueue before
1601 * checking the wake condition.
1602 *
1603 * The full memory barrier within set_current_state() of
1604 * prepare_to_wait_event() pairs with the full memory barrier
1605 * within wq_has_sleeper().
1606 *
1607 * This pairs with __wake_up_klogd:A.
1608 */
1609 len = wait_event_interruptible(log_wait,
1610 prb_read_valid(prb, seq, NULL)); /* LMM(syslog_print:A) */
1611 mutex_lock(&syslog_lock);
1612
1613 if (len)
1614 goto out;
1615 } while (syslog_seq != seq);
1616
1617 /*
1618 * Copy records that fit into the buffer. The above cycle makes sure
1619 * that the first record is always available.
1620 */
1621 do {
1622 size_t n;
1623 size_t skip;
1624 int err;
1625
1626 if (!prb_read_valid(prb, syslog_seq, &r))
1627 break;
1628
1629 if (r.info->seq != syslog_seq) {
1630 /* message is gone, move to next valid one */
1631 syslog_seq = r.info->seq;
1632 syslog_partial = 0;
1633 }
1634
1635 /*
1636 * To keep reading/counting partial line consistent,
1637 * use printk_time value as of the beginning of a line.
1638 */
1639 if (!syslog_partial)
1640 syslog_time = printk_time;
1641
1642 skip = syslog_partial;
1643 n = record_print_text(&r, true, syslog_time);
1644 if (n - syslog_partial <= size) {
1645 /* message fits into buffer, move forward */
1646 syslog_seq = r.info->seq + 1;
1647 n -= syslog_partial;
1648 syslog_partial = 0;
1649 } else if (!len){
1650 /* partial read(), remember position */
1651 n = size;
1652 syslog_partial += n;
1653 } else
1654 n = 0;
1655
1656 if (!n)
1657 break;
1658
1659 mutex_unlock(&syslog_lock);
1660 err = copy_to_user(buf, text + skip, n);
1661 mutex_lock(&syslog_lock);
1662
1663 if (err) {
1664 if (!len)
1665 len = -EFAULT;
1666 break;
1667 }
1668
1669 len += n;
1670 size -= n;
1671 buf += n;
1672 } while (size);
1673 out:
1674 mutex_unlock(&syslog_lock);
1675 kfree(text);
1676 return len;
1677 }
1678
syslog_print_all(char __user * buf,int size,bool clear)1679 static int syslog_print_all(char __user *buf, int size, bool clear)
1680 {
1681 struct printk_info info;
1682 struct printk_record r;
1683 char *text;
1684 int len = 0;
1685 u64 seq;
1686 bool time;
1687
1688 text = kmalloc(PRINTK_MESSAGE_MAX, GFP_KERNEL);
1689 if (!text)
1690 return -ENOMEM;
1691
1692 time = printk_time;
1693 /*
1694 * Find first record that fits, including all following records,
1695 * into the user-provided buffer for this dump.
1696 */
1697 seq = find_first_fitting_seq(latched_seq_read_nolock(&clear_seq), -1,
1698 size, true, time);
1699
1700 prb_rec_init_rd(&r, &info, text, PRINTK_MESSAGE_MAX);
1701
1702 prb_for_each_record(seq, prb, seq, &r) {
1703 int textlen;
1704
1705 textlen = record_print_text(&r, true, time);
1706
1707 if (len + textlen > size) {
1708 seq--;
1709 break;
1710 }
1711
1712 if (copy_to_user(buf + len, text, textlen))
1713 len = -EFAULT;
1714 else
1715 len += textlen;
1716
1717 if (len < 0)
1718 break;
1719 }
1720
1721 if (clear) {
1722 mutex_lock(&syslog_lock);
1723 latched_seq_write(&clear_seq, seq);
1724 mutex_unlock(&syslog_lock);
1725 }
1726
1727 kfree(text);
1728 return len;
1729 }
1730
syslog_clear(void)1731 static void syslog_clear(void)
1732 {
1733 mutex_lock(&syslog_lock);
1734 latched_seq_write(&clear_seq, prb_next_seq(prb));
1735 mutex_unlock(&syslog_lock);
1736 }
1737
do_syslog(int type,char __user * buf,int len,int source)1738 int do_syslog(int type, char __user *buf, int len, int source)
1739 {
1740 struct printk_info info;
1741 bool clear = false;
1742 static int saved_console_loglevel = LOGLEVEL_DEFAULT;
1743 int error;
1744
1745 error = check_syslog_permissions(type, source);
1746 if (error)
1747 return error;
1748
1749 switch (type) {
1750 case SYSLOG_ACTION_CLOSE: /* Close log */
1751 break;
1752 case SYSLOG_ACTION_OPEN: /* Open log */
1753 break;
1754 case SYSLOG_ACTION_READ: /* Read from log */
1755 if (!buf || len < 0)
1756 return -EINVAL;
1757 if (!len)
1758 return 0;
1759 if (!access_ok(buf, len))
1760 return -EFAULT;
1761 error = syslog_print(buf, len);
1762 break;
1763 /* Read/clear last kernel messages */
1764 case SYSLOG_ACTION_READ_CLEAR:
1765 clear = true;
1766 fallthrough;
1767 /* Read last kernel messages */
1768 case SYSLOG_ACTION_READ_ALL:
1769 if (!buf || len < 0)
1770 return -EINVAL;
1771 if (!len)
1772 return 0;
1773 if (!access_ok(buf, len))
1774 return -EFAULT;
1775 error = syslog_print_all(buf, len, clear);
1776 break;
1777 /* Clear ring buffer */
1778 case SYSLOG_ACTION_CLEAR:
1779 syslog_clear();
1780 break;
1781 /* Disable logging to console */
1782 case SYSLOG_ACTION_CONSOLE_OFF:
1783 if (saved_console_loglevel == LOGLEVEL_DEFAULT)
1784 saved_console_loglevel = console_loglevel;
1785 console_loglevel = minimum_console_loglevel;
1786 break;
1787 /* Enable logging to console */
1788 case SYSLOG_ACTION_CONSOLE_ON:
1789 if (saved_console_loglevel != LOGLEVEL_DEFAULT) {
1790 console_loglevel = saved_console_loglevel;
1791 saved_console_loglevel = LOGLEVEL_DEFAULT;
1792 }
1793 break;
1794 /* Set level of messages printed to console */
1795 case SYSLOG_ACTION_CONSOLE_LEVEL:
1796 if (len < 1 || len > 8)
1797 return -EINVAL;
1798 if (len < minimum_console_loglevel)
1799 len = minimum_console_loglevel;
1800 console_loglevel = len;
1801 /* Implicitly re-enable logging to console */
1802 saved_console_loglevel = LOGLEVEL_DEFAULT;
1803 break;
1804 /* Number of chars in the log buffer */
1805 case SYSLOG_ACTION_SIZE_UNREAD:
1806 mutex_lock(&syslog_lock);
1807 if (!prb_read_valid_info(prb, syslog_seq, &info, NULL)) {
1808 /* No unread messages. */
1809 mutex_unlock(&syslog_lock);
1810 return 0;
1811 }
1812 if (info.seq != syslog_seq) {
1813 /* messages are gone, move to first one */
1814 syslog_seq = info.seq;
1815 syslog_partial = 0;
1816 }
1817 if (source == SYSLOG_FROM_PROC) {
1818 /*
1819 * Short-cut for poll(/"proc/kmsg") which simply checks
1820 * for pending data, not the size; return the count of
1821 * records, not the length.
1822 */
1823 error = prb_next_seq(prb) - syslog_seq;
1824 } else {
1825 bool time = syslog_partial ? syslog_time : printk_time;
1826 unsigned int line_count;
1827 u64 seq;
1828
1829 prb_for_each_info(syslog_seq, prb, seq, &info,
1830 &line_count) {
1831 error += get_record_print_text_size(&info, line_count,
1832 true, time);
1833 time = printk_time;
1834 }
1835 error -= syslog_partial;
1836 }
1837 mutex_unlock(&syslog_lock);
1838 break;
1839 /* Size of the log buffer */
1840 case SYSLOG_ACTION_SIZE_BUFFER:
1841 error = log_buf_len;
1842 break;
1843 default:
1844 error = -EINVAL;
1845 break;
1846 }
1847
1848 return error;
1849 }
1850
SYSCALL_DEFINE3(syslog,int,type,char __user *,buf,int,len)1851 SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
1852 {
1853 return do_syslog(type, buf, len, SYSLOG_FROM_READER);
1854 }
1855
1856 /*
1857 * Special console_lock variants that help to reduce the risk of soft-lockups.
1858 * They allow to pass console_lock to another printk() call using a busy wait.
1859 */
1860
1861 #ifdef CONFIG_LOCKDEP
1862 static struct lockdep_map console_owner_dep_map = {
1863 .name = "console_owner"
1864 };
1865 #endif
1866
1867 static DEFINE_RAW_SPINLOCK(console_owner_lock);
1868 static struct task_struct *console_owner;
1869 static bool console_waiter;
1870
1871 /**
1872 * console_lock_spinning_enable - mark beginning of code where another
1873 * thread might safely busy wait
1874 *
1875 * This basically converts console_lock into a spinlock. This marks
1876 * the section where the console_lock owner can not sleep, because
1877 * there may be a waiter spinning (like a spinlock). Also it must be
1878 * ready to hand over the lock at the end of the section.
1879 */
console_lock_spinning_enable(void)1880 void console_lock_spinning_enable(void)
1881 {
1882 /*
1883 * Do not use spinning in panic(). The panic CPU wants to keep the lock.
1884 * Non-panic CPUs abandon the flush anyway.
1885 *
1886 * Just keep the lockdep annotation. The panic-CPU should avoid
1887 * taking console_owner_lock because it might cause a deadlock.
1888 * This looks like the easiest way how to prevent false lockdep
1889 * reports without handling races a lockless way.
1890 */
1891 if (panic_in_progress())
1892 goto lockdep;
1893
1894 raw_spin_lock(&console_owner_lock);
1895 console_owner = current;
1896 raw_spin_unlock(&console_owner_lock);
1897
1898 lockdep:
1899 /* The waiter may spin on us after setting console_owner */
1900 spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
1901 }
1902
1903 /**
1904 * console_lock_spinning_disable_and_check - mark end of code where another
1905 * thread was able to busy wait and check if there is a waiter
1906 * @cookie: cookie returned from console_srcu_read_lock()
1907 *
1908 * This is called at the end of the section where spinning is allowed.
1909 * It has two functions. First, it is a signal that it is no longer
1910 * safe to start busy waiting for the lock. Second, it checks if
1911 * there is a busy waiter and passes the lock rights to her.
1912 *
1913 * Important: Callers lose both the console_lock and the SRCU read lock if
1914 * there was a busy waiter. They must not touch items synchronized by
1915 * console_lock or SRCU read lock in this case.
1916 *
1917 * Return: 1 if the lock rights were passed, 0 otherwise.
1918 */
console_lock_spinning_disable_and_check(int cookie)1919 int console_lock_spinning_disable_and_check(int cookie)
1920 {
1921 int waiter;
1922
1923 /*
1924 * Ignore spinning waiters during panic() because they might get stopped
1925 * or blocked at any time,
1926 *
1927 * It is safe because nobody is allowed to start spinning during panic
1928 * in the first place. If there has been a waiter then non panic CPUs
1929 * might stay spinning. They would get stopped anyway. The panic context
1930 * will never start spinning and an interrupted spin on panic CPU will
1931 * never continue.
1932 */
1933 if (panic_in_progress()) {
1934 /* Keep lockdep happy. */
1935 spin_release(&console_owner_dep_map, _THIS_IP_);
1936 return 0;
1937 }
1938
1939 raw_spin_lock(&console_owner_lock);
1940 waiter = READ_ONCE(console_waiter);
1941 console_owner = NULL;
1942 raw_spin_unlock(&console_owner_lock);
1943
1944 if (!waiter) {
1945 spin_release(&console_owner_dep_map, _THIS_IP_);
1946 return 0;
1947 }
1948
1949 /* The waiter is now free to continue */
1950 WRITE_ONCE(console_waiter, false);
1951
1952 spin_release(&console_owner_dep_map, _THIS_IP_);
1953
1954 /*
1955 * Preserve lockdep lock ordering. Release the SRCU read lock before
1956 * releasing the console_lock.
1957 */
1958 console_srcu_read_unlock(cookie);
1959
1960 /*
1961 * Hand off console_lock to waiter. The waiter will perform
1962 * the up(). After this, the waiter is the console_lock owner.
1963 */
1964 mutex_release(&console_lock_dep_map, _THIS_IP_);
1965 return 1;
1966 }
1967
1968 /**
1969 * console_trylock_spinning - try to get console_lock by busy waiting
1970 *
1971 * This allows to busy wait for the console_lock when the current
1972 * owner is running in specially marked sections. It means that
1973 * the current owner is running and cannot reschedule until it
1974 * is ready to lose the lock.
1975 *
1976 * Return: 1 if we got the lock, 0 othrewise
1977 */
console_trylock_spinning(void)1978 static int console_trylock_spinning(void)
1979 {
1980 struct task_struct *owner = NULL;
1981 bool waiter;
1982 bool spin = false;
1983 unsigned long flags;
1984
1985 if (console_trylock())
1986 return 1;
1987
1988 /*
1989 * It's unsafe to spin once a panic has begun. If we are the
1990 * panic CPU, we may have already halted the owner of the
1991 * console_sem. If we are not the panic CPU, then we should
1992 * avoid taking console_sem, so the panic CPU has a better
1993 * chance of cleanly acquiring it later.
1994 */
1995 if (panic_in_progress())
1996 return 0;
1997
1998 printk_safe_enter_irqsave(flags);
1999
2000 raw_spin_lock(&console_owner_lock);
2001 owner = READ_ONCE(console_owner);
2002 waiter = READ_ONCE(console_waiter);
2003 if (!waiter && owner && owner != current) {
2004 WRITE_ONCE(console_waiter, true);
2005 spin = true;
2006 }
2007 raw_spin_unlock(&console_owner_lock);
2008
2009 /*
2010 * If there is an active printk() writing to the
2011 * consoles, instead of having it write our data too,
2012 * see if we can offload that load from the active
2013 * printer, and do some printing ourselves.
2014 * Go into a spin only if there isn't already a waiter
2015 * spinning, and there is an active printer, and
2016 * that active printer isn't us (recursive printk?).
2017 */
2018 if (!spin) {
2019 printk_safe_exit_irqrestore(flags);
2020 return 0;
2021 }
2022
2023 /* We spin waiting for the owner to release us */
2024 spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
2025 /* Owner will clear console_waiter on hand off */
2026 while (READ_ONCE(console_waiter))
2027 cpu_relax();
2028 spin_release(&console_owner_dep_map, _THIS_IP_);
2029
2030 printk_safe_exit_irqrestore(flags);
2031 /*
2032 * The owner passed the console lock to us.
2033 * Since we did not spin on console lock, annotate
2034 * this as a trylock. Otherwise lockdep will
2035 * complain.
2036 */
2037 mutex_acquire(&console_lock_dep_map, 0, 1, _THIS_IP_);
2038
2039 /*
2040 * Update @console_may_schedule for trylock because the previous
2041 * owner may have been schedulable.
2042 */
2043 console_may_schedule = 0;
2044
2045 return 1;
2046 }
2047
2048 /*
2049 * Recursion is tracked separately on each CPU. If NMIs are supported, an
2050 * additional NMI context per CPU is also separately tracked. Until per-CPU
2051 * is available, a separate "early tracking" is performed.
2052 */
2053 static DEFINE_PER_CPU(u8, printk_count);
2054 static u8 printk_count_early;
2055 #ifdef CONFIG_HAVE_NMI
2056 static DEFINE_PER_CPU(u8, printk_count_nmi);
2057 static u8 printk_count_nmi_early;
2058 #endif
2059
2060 /*
2061 * Recursion is limited to keep the output sane. printk() should not require
2062 * more than 1 level of recursion (allowing, for example, printk() to trigger
2063 * a WARN), but a higher value is used in case some printk-internal errors
2064 * exist, such as the ringbuffer validation checks failing.
2065 */
2066 #define PRINTK_MAX_RECURSION 3
2067
2068 /*
2069 * Return a pointer to the dedicated counter for the CPU+context of the
2070 * caller.
2071 */
__printk_recursion_counter(void)2072 static u8 *__printk_recursion_counter(void)
2073 {
2074 #ifdef CONFIG_HAVE_NMI
2075 if (in_nmi()) {
2076 if (printk_percpu_data_ready())
2077 return this_cpu_ptr(&printk_count_nmi);
2078 return &printk_count_nmi_early;
2079 }
2080 #endif
2081 if (printk_percpu_data_ready())
2082 return this_cpu_ptr(&printk_count);
2083 return &printk_count_early;
2084 }
2085
2086 /*
2087 * Enter recursion tracking. Interrupts are disabled to simplify tracking.
2088 * The caller must check the boolean return value to see if the recursion is
2089 * allowed. On failure, interrupts are not disabled.
2090 *
2091 * @recursion_ptr must be a variable of type (u8 *) and is the same variable
2092 * that is passed to printk_exit_irqrestore().
2093 */
2094 #define printk_enter_irqsave(recursion_ptr, flags) \
2095 ({ \
2096 bool success = true; \
2097 \
2098 typecheck(u8 *, recursion_ptr); \
2099 local_irq_save(flags); \
2100 (recursion_ptr) = __printk_recursion_counter(); \
2101 if (*(recursion_ptr) > PRINTK_MAX_RECURSION) { \
2102 local_irq_restore(flags); \
2103 success = false; \
2104 } else { \
2105 (*(recursion_ptr))++; \
2106 } \
2107 success; \
2108 })
2109
2110 /* Exit recursion tracking, restoring interrupts. */
2111 #define printk_exit_irqrestore(recursion_ptr, flags) \
2112 do { \
2113 typecheck(u8 *, recursion_ptr); \
2114 (*(recursion_ptr))--; \
2115 local_irq_restore(flags); \
2116 } while (0)
2117
2118 int printk_delay_msec __read_mostly;
2119
printk_delay(int level)2120 static inline void printk_delay(int level)
2121 {
2122 boot_delay_msec(level);
2123
2124 if (unlikely(printk_delay_msec)) {
2125 int m = printk_delay_msec;
2126
2127 while (m--) {
2128 mdelay(1);
2129 touch_nmi_watchdog();
2130 }
2131 }
2132 }
2133
printk_caller_id(void)2134 static inline u32 printk_caller_id(void)
2135 {
2136 return in_task() ? task_pid_nr(current) :
2137 0x80000000 + smp_processor_id();
2138 }
2139
2140 /**
2141 * printk_parse_prefix - Parse level and control flags.
2142 *
2143 * @text: The terminated text message.
2144 * @level: A pointer to the current level value, will be updated.
2145 * @flags: A pointer to the current printk_info flags, will be updated.
2146 *
2147 * @level may be NULL if the caller is not interested in the parsed value.
2148 * Otherwise the variable pointed to by @level must be set to
2149 * LOGLEVEL_DEFAULT in order to be updated with the parsed value.
2150 *
2151 * @flags may be NULL if the caller is not interested in the parsed value.
2152 * Otherwise the variable pointed to by @flags will be OR'd with the parsed
2153 * value.
2154 *
2155 * Return: The length of the parsed level and control flags.
2156 */
printk_parse_prefix(const char * text,int * level,enum printk_info_flags * flags)2157 u16 printk_parse_prefix(const char *text, int *level,
2158 enum printk_info_flags *flags)
2159 {
2160 u16 prefix_len = 0;
2161 int kern_level;
2162
2163 while (*text) {
2164 kern_level = printk_get_level(text);
2165 if (!kern_level)
2166 break;
2167
2168 switch (kern_level) {
2169 case '0' ... '7':
2170 if (level && *level == LOGLEVEL_DEFAULT)
2171 *level = kern_level - '0';
2172 break;
2173 case 'c': /* KERN_CONT */
2174 if (flags)
2175 *flags |= LOG_CONT;
2176 }
2177
2178 prefix_len += 2;
2179 text += 2;
2180 }
2181
2182 return prefix_len;
2183 }
2184
2185 __printf(5, 0)
printk_sprint(char * text,u16 size,int facility,enum printk_info_flags * flags,const char * fmt,va_list args)2186 static u16 printk_sprint(char *text, u16 size, int facility,
2187 enum printk_info_flags *flags, const char *fmt,
2188 va_list args)
2189 {
2190 u16 text_len;
2191
2192 text_len = vscnprintf(text, size, fmt, args);
2193
2194 /* Mark and strip a trailing newline. */
2195 if (text_len && text[text_len - 1] == '\n') {
2196 text_len--;
2197 *flags |= LOG_NEWLINE;
2198 }
2199
2200 /* Strip log level and control flags. */
2201 if (facility == 0) {
2202 u16 prefix_len;
2203
2204 prefix_len = printk_parse_prefix(text, NULL, NULL);
2205 if (prefix_len) {
2206 text_len -= prefix_len;
2207 memmove(text, text + prefix_len, text_len);
2208 }
2209 }
2210
2211 trace_console(text, text_len);
2212
2213 return text_len;
2214 }
2215
2216 __printf(4, 0)
vprintk_store(int facility,int level,const struct dev_printk_info * dev_info,const char * fmt,va_list args)2217 int vprintk_store(int facility, int level,
2218 const struct dev_printk_info *dev_info,
2219 const char *fmt, va_list args)
2220 {
2221 struct prb_reserved_entry e;
2222 enum printk_info_flags flags = 0;
2223 struct printk_record r;
2224 unsigned long irqflags;
2225 u16 trunc_msg_len = 0;
2226 char prefix_buf[8];
2227 u8 *recursion_ptr;
2228 u16 reserve_size;
2229 va_list args2;
2230 u32 caller_id;
2231 u16 text_len;
2232 int ret = 0;
2233 u64 ts_nsec;
2234
2235 if (!printk_enter_irqsave(recursion_ptr, irqflags))
2236 return 0;
2237
2238 /*
2239 * Since the duration of printk() can vary depending on the message
2240 * and state of the ringbuffer, grab the timestamp now so that it is
2241 * close to the call of printk(). This provides a more deterministic
2242 * timestamp with respect to the caller.
2243 */
2244 ts_nsec = local_clock();
2245
2246 caller_id = printk_caller_id();
2247
2248 /*
2249 * The sprintf needs to come first since the syslog prefix might be
2250 * passed in as a parameter. An extra byte must be reserved so that
2251 * later the vscnprintf() into the reserved buffer has room for the
2252 * terminating '\0', which is not counted by vsnprintf().
2253 */
2254 va_copy(args2, args);
2255 reserve_size = vsnprintf(&prefix_buf[0], sizeof(prefix_buf), fmt, args2) + 1;
2256 va_end(args2);
2257
2258 if (reserve_size > PRINTKRB_RECORD_MAX)
2259 reserve_size = PRINTKRB_RECORD_MAX;
2260
2261 /* Extract log level or control flags. */
2262 if (facility == 0)
2263 printk_parse_prefix(&prefix_buf[0], &level, &flags);
2264
2265 if (level == LOGLEVEL_DEFAULT)
2266 level = default_message_loglevel;
2267
2268 if (dev_info)
2269 flags |= LOG_NEWLINE;
2270
2271 if (is_printk_force_console())
2272 flags |= LOG_FORCE_CON;
2273
2274 if (flags & LOG_CONT) {
2275 prb_rec_init_wr(&r, reserve_size);
2276 if (prb_reserve_in_last(&e, prb, &r, caller_id, PRINTKRB_RECORD_MAX)) {
2277 text_len = printk_sprint(&r.text_buf[r.info->text_len], reserve_size,
2278 facility, &flags, fmt, args);
2279 r.info->text_len += text_len;
2280
2281 if (flags & LOG_FORCE_CON)
2282 r.info->flags |= LOG_FORCE_CON;
2283
2284 if (flags & LOG_NEWLINE) {
2285 r.info->flags |= LOG_NEWLINE;
2286 prb_final_commit(&e);
2287 } else {
2288 prb_commit(&e);
2289 }
2290
2291 ret = text_len;
2292 goto out;
2293 }
2294 }
2295
2296 /*
2297 * Explicitly initialize the record before every prb_reserve() call.
2298 * prb_reserve_in_last() and prb_reserve() purposely invalidate the
2299 * structure when they fail.
2300 */
2301 prb_rec_init_wr(&r, reserve_size);
2302 if (!prb_reserve(&e, prb, &r)) {
2303 /* truncate the message if it is too long for empty buffer */
2304 truncate_msg(&reserve_size, &trunc_msg_len);
2305
2306 prb_rec_init_wr(&r, reserve_size + trunc_msg_len);
2307 if (!prb_reserve(&e, prb, &r))
2308 goto out;
2309 }
2310
2311 /* fill message */
2312 text_len = printk_sprint(&r.text_buf[0], reserve_size, facility, &flags, fmt, args);
2313 if (trunc_msg_len)
2314 memcpy(&r.text_buf[text_len], trunc_msg, trunc_msg_len);
2315 r.info->text_len = text_len + trunc_msg_len;
2316 r.info->facility = facility;
2317 r.info->level = level & 7;
2318 r.info->flags = flags & 0x1f;
2319 r.info->ts_nsec = ts_nsec;
2320 r.info->caller_id = caller_id;
2321 if (dev_info)
2322 memcpy(&r.info->dev_info, dev_info, sizeof(r.info->dev_info));
2323
2324 /* A message without a trailing newline can be continued. */
2325 if (!(flags & LOG_NEWLINE))
2326 prb_commit(&e);
2327 else
2328 prb_final_commit(&e);
2329
2330 ret = text_len + trunc_msg_len;
2331 out:
2332 printk_exit_irqrestore(recursion_ptr, irqflags);
2333 return ret;
2334 }
2335
2336 /*
2337 * This acts as a one-way switch to allow legacy consoles to print from
2338 * the printk() caller context on a panic CPU. It also attempts to flush
2339 * the legacy consoles in this context.
2340 */
printk_legacy_allow_panic_sync(void)2341 void printk_legacy_allow_panic_sync(void)
2342 {
2343 struct console_flush_type ft;
2344
2345 legacy_allow_panic_sync = true;
2346
2347 printk_get_console_flush_type(&ft);
2348 if (ft.legacy_direct) {
2349 if (console_trylock())
2350 console_unlock();
2351 }
2352 }
2353
2354 bool __read_mostly debug_non_panic_cpus;
2355
2356 #ifdef CONFIG_PRINTK_CALLER
debug_non_panic_cpus_setup(char * str)2357 static int __init debug_non_panic_cpus_setup(char *str)
2358 {
2359 debug_non_panic_cpus = true;
2360 pr_info("allow messages from non-panic CPUs in panic()\n");
2361
2362 return 0;
2363 }
2364 early_param("debug_non_panic_cpus", debug_non_panic_cpus_setup);
2365 module_param(debug_non_panic_cpus, bool, 0644);
2366 MODULE_PARM_DESC(debug_non_panic_cpus,
2367 "allow messages from non-panic CPUs in panic()");
2368 #endif
2369
vprintk_emit(int facility,int level,const struct dev_printk_info * dev_info,const char * fmt,va_list args)2370 asmlinkage int vprintk_emit(int facility, int level,
2371 const struct dev_printk_info *dev_info,
2372 const char *fmt, va_list args)
2373 {
2374 struct console_flush_type ft;
2375 int printed_len;
2376
2377 /* Suppress unimportant messages after panic happens */
2378 if (unlikely(suppress_printk))
2379 return 0;
2380
2381 /*
2382 * The messages on the panic CPU are the most important. If
2383 * non-panic CPUs are generating any messages, they will be
2384 * silently dropped.
2385 */
2386 if (panic_on_other_cpu() &&
2387 !debug_non_panic_cpus &&
2388 !panic_triggering_all_cpu_backtrace)
2389 return 0;
2390
2391 printk_get_console_flush_type(&ft);
2392
2393 /* If called from the scheduler, we can not call up(). */
2394 if (level == LOGLEVEL_SCHED) {
2395 level = LOGLEVEL_DEFAULT;
2396 ft.legacy_offload |= ft.legacy_direct && !console_irqwork_blocked;
2397 ft.legacy_direct = false;
2398 }
2399
2400 printk_delay(level);
2401
2402 printed_len = vprintk_store(facility, level, dev_info, fmt, args);
2403
2404 if (ft.nbcon_atomic)
2405 nbcon_atomic_flush_pending();
2406
2407 if (ft.nbcon_offload)
2408 nbcon_kthreads_wake();
2409
2410 if (ft.legacy_direct) {
2411 /*
2412 * The caller may be holding system-critical or
2413 * timing-sensitive locks. Disable preemption during
2414 * printing of all remaining records to all consoles so that
2415 * this context can return as soon as possible. Hopefully
2416 * another printk() caller will take over the printing.
2417 */
2418 preempt_disable();
2419 /*
2420 * Try to acquire and then immediately release the console
2421 * semaphore. The release will print out buffers. With the
2422 * spinning variant, this context tries to take over the
2423 * printing from another printing context.
2424 */
2425 if (console_trylock_spinning())
2426 console_unlock();
2427 preempt_enable();
2428 }
2429
2430 if (ft.legacy_offload)
2431 defer_console_output();
2432 else if (!console_irqwork_blocked)
2433 wake_up_klogd();
2434
2435 return printed_len;
2436 }
2437 EXPORT_SYMBOL(vprintk_emit);
2438
vprintk_default(const char * fmt,va_list args)2439 int vprintk_default(const char *fmt, va_list args)
2440 {
2441 return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, fmt, args);
2442 }
2443 EXPORT_SYMBOL_GPL(vprintk_default);
2444
_printk(const char * fmt,...)2445 asmlinkage __visible int _printk(const char *fmt, ...)
2446 {
2447 va_list args;
2448 int r;
2449
2450 va_start(args, fmt);
2451 r = vprintk(fmt, args);
2452 va_end(args);
2453
2454 return r;
2455 }
2456 EXPORT_SYMBOL(_printk);
2457
2458 static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress);
2459
2460 #else /* CONFIG_PRINTK */
2461
2462 #define printk_time false
2463
2464 #define prb_read_valid(rb, seq, r) false
2465 #define prb_first_valid_seq(rb) 0
2466 #define prb_next_seq(rb) 0
2467
2468 static u64 syslog_seq;
2469
__pr_flush(struct console * con,int timeout_ms,bool reset_on_progress)2470 static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; }
2471
2472 #endif /* CONFIG_PRINTK */
2473
2474 #ifdef CONFIG_EARLY_PRINTK
2475 struct console *early_console;
2476
early_printk(const char * fmt,...)2477 asmlinkage __visible void early_printk(const char *fmt, ...)
2478 {
2479 va_list ap;
2480 char buf[512];
2481 int n;
2482
2483 if (!early_console)
2484 return;
2485
2486 va_start(ap, fmt);
2487 n = vscnprintf(buf, sizeof(buf), fmt, ap);
2488 va_end(ap);
2489
2490 early_console->write(early_console, buf, n);
2491 }
2492 #endif
2493
set_user_specified(struct console_cmdline * c,bool user_specified)2494 static void set_user_specified(struct console_cmdline *c, bool user_specified)
2495 {
2496 if (!user_specified)
2497 return;
2498
2499 /*
2500 * @c console was defined by the user on the command line.
2501 * Do not clear when added twice also by SPCR or the device tree.
2502 */
2503 c->user_specified = true;
2504 /* At least one console defined by the user on the command line. */
2505 console_set_on_cmdline = 1;
2506 }
2507
__add_preferred_console(const char * name,const short idx,const char * devname,char * options,char * brl_options,bool user_specified)2508 static int __add_preferred_console(const char *name, const short idx,
2509 const char *devname, char *options,
2510 char *brl_options, bool user_specified)
2511 {
2512 struct console_cmdline *c;
2513 int i;
2514
2515 if (!name && !devname)
2516 return -EINVAL;
2517
2518 /*
2519 * We use a signed short index for struct console for device drivers to
2520 * indicate a not yet assigned index or port. However, a negative index
2521 * value is not valid when the console name and index are defined on
2522 * the command line.
2523 */
2524 if (name && idx < 0)
2525 return -EINVAL;
2526
2527 /*
2528 * See if this tty is not yet registered, and
2529 * if we have a slot free.
2530 */
2531 for (i = 0, c = console_cmdline;
2532 i < MAX_CMDLINECONSOLES && (c->name[0] || c->devname[0]);
2533 i++, c++) {
2534 if ((name && strcmp(c->name, name) == 0 && c->index == idx) ||
2535 (devname && strcmp(c->devname, devname) == 0)) {
2536 if (!brl_options)
2537 preferred_console = i;
2538 set_user_specified(c, user_specified);
2539 return 0;
2540 }
2541 }
2542 if (i == MAX_CMDLINECONSOLES)
2543 return -E2BIG;
2544 if (!brl_options)
2545 preferred_console = i;
2546 if (name)
2547 strscpy(c->name, name);
2548 if (devname)
2549 strscpy(c->devname, devname);
2550 c->options = options;
2551 set_user_specified(c, user_specified);
2552 braille_set_options(c, brl_options);
2553
2554 c->index = idx;
2555 return 0;
2556 }
2557
console_msg_format_setup(char * str)2558 static int __init console_msg_format_setup(char *str)
2559 {
2560 if (!strcmp(str, "syslog"))
2561 console_msg_format = MSG_FORMAT_SYSLOG;
2562 if (!strcmp(str, "default"))
2563 console_msg_format = MSG_FORMAT_DEFAULT;
2564 return 1;
2565 }
2566 __setup("console_msg_format=", console_msg_format_setup);
2567
2568 /*
2569 * Set up a console. Called via do_early_param() in init/main.c
2570 * for each "console=" parameter in the boot command line.
2571 */
console_setup(char * str)2572 static int __init console_setup(char *str)
2573 {
2574 static_assert(sizeof(console_cmdline[0].devname) >= sizeof(console_cmdline[0].name) + 4);
2575 char buf[sizeof(console_cmdline[0].devname)];
2576 char *brl_options = NULL;
2577 char *ttyname = NULL;
2578 char *devname = NULL;
2579 char *options;
2580 char *s;
2581 int idx;
2582
2583 /*
2584 * console="" or console=null have been suggested as a way to
2585 * disable console output. Use ttynull that has been created
2586 * for exactly this purpose.
2587 */
2588 if (str[0] == 0 || strcmp(str, "null") == 0) {
2589 __add_preferred_console("ttynull", 0, NULL, NULL, NULL, true);
2590 return 1;
2591 }
2592
2593 if (_braille_console_setup(&str, &brl_options))
2594 return 1;
2595
2596 /* For a DEVNAME:0.0 style console the character device is unknown early */
2597 if (strchr(str, ':'))
2598 devname = buf;
2599 else
2600 ttyname = buf;
2601
2602 /*
2603 * Decode str into name, index, options.
2604 */
2605 if (ttyname && isdigit(str[0]))
2606 scnprintf(buf, sizeof(buf), "ttyS%s", str);
2607 else
2608 strscpy(buf, str);
2609
2610 options = strchr(str, ',');
2611 if (options)
2612 *(options++) = 0;
2613
2614 #ifdef __sparc__
2615 if (!strcmp(str, "ttya"))
2616 strscpy(buf, "ttyS0");
2617 if (!strcmp(str, "ttyb"))
2618 strscpy(buf, "ttyS1");
2619 #endif
2620
2621 for (s = buf; *s; s++)
2622 if ((ttyname && isdigit(*s)) || *s == ',')
2623 break;
2624
2625 /* @idx will get defined when devname matches. */
2626 if (devname)
2627 idx = -1;
2628 else
2629 idx = simple_strtoul(s, NULL, 10);
2630
2631 *s = 0;
2632
2633 __add_preferred_console(ttyname, idx, devname, options, brl_options, true);
2634 return 1;
2635 }
2636 __setup("console=", console_setup);
2637
2638 /**
2639 * add_preferred_console - add a device to the list of preferred consoles.
2640 * @name: device name
2641 * @idx: device index
2642 * @options: options for this console
2643 *
2644 * The last preferred console added will be used for kernel messages
2645 * and stdin/out/err for init. Normally this is used by console_setup
2646 * above to handle user-supplied console arguments; however it can also
2647 * be used by arch-specific code either to override the user or more
2648 * commonly to provide a default console (ie from PROM variables) when
2649 * the user has not supplied one.
2650 */
add_preferred_console(const char * name,const short idx,char * options)2651 int add_preferred_console(const char *name, const short idx, char *options)
2652 {
2653 return __add_preferred_console(name, idx, NULL, options, NULL, false);
2654 }
2655
2656 /**
2657 * match_devname_and_update_preferred_console - Update a preferred console
2658 * when matching devname is found.
2659 * @devname: DEVNAME:0.0 style device name
2660 * @name: Name of the corresponding console driver, e.g. "ttyS"
2661 * @idx: Console index, e.g. port number.
2662 *
2663 * The function checks whether a device with the given @devname is
2664 * preferred via the console=DEVNAME:0.0 command line option.
2665 * It fills the missing console driver name and console index
2666 * so that a later register_console() call could find (match)
2667 * and enable this device.
2668 *
2669 * It might be used when a driver subsystem initializes particular
2670 * devices with already known DEVNAME:0.0 style names. And it
2671 * could predict which console driver name and index this device
2672 * would later get associated with.
2673 *
2674 * Return: 0 on success, negative error code on failure.
2675 */
match_devname_and_update_preferred_console(const char * devname,const char * name,const short idx)2676 int match_devname_and_update_preferred_console(const char *devname,
2677 const char *name,
2678 const short idx)
2679 {
2680 struct console_cmdline *c = console_cmdline;
2681 int i;
2682
2683 if (!devname || !strlen(devname) || !name || !strlen(name) || idx < 0)
2684 return -EINVAL;
2685
2686 for (i = 0; i < MAX_CMDLINECONSOLES && (c->name[0] || c->devname[0]);
2687 i++, c++) {
2688 if (!strcmp(devname, c->devname)) {
2689 pr_info("associate the preferred console \"%s\" with \"%s%d\"\n",
2690 devname, name, idx);
2691 strscpy(c->name, name);
2692 c->index = idx;
2693 return 0;
2694 }
2695 }
2696
2697 return -ENOENT;
2698 }
2699 EXPORT_SYMBOL_GPL(match_devname_and_update_preferred_console);
2700
2701 bool console_suspend_enabled = true;
2702 EXPORT_SYMBOL(console_suspend_enabled);
2703
console_suspend_disable(char * str)2704 static int __init console_suspend_disable(char *str)
2705 {
2706 console_suspend_enabled = false;
2707 return 1;
2708 }
2709 __setup("no_console_suspend", console_suspend_disable);
2710 module_param_named(console_suspend, console_suspend_enabled,
2711 bool, S_IRUGO | S_IWUSR);
2712 MODULE_PARM_DESC(console_suspend, "suspend console during suspend"
2713 " and hibernate operations");
2714
2715 static bool printk_console_no_auto_verbose;
2716
console_verbose(void)2717 void console_verbose(void)
2718 {
2719 if (console_loglevel && !printk_console_no_auto_verbose)
2720 console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH;
2721 }
2722 EXPORT_SYMBOL_GPL(console_verbose);
2723
2724 module_param_named(console_no_auto_verbose, printk_console_no_auto_verbose, bool, 0644);
2725 MODULE_PARM_DESC(console_no_auto_verbose, "Disable console loglevel raise to highest on oops/panic/etc");
2726
2727 /**
2728 * console_suspend_all - suspend the console subsystem
2729 *
2730 * This disables printk() while we go into suspend states
2731 */
console_suspend_all(void)2732 void console_suspend_all(void)
2733 {
2734 struct console *con;
2735
2736 if (console_suspend_enabled)
2737 pr_info("Suspending console(s) (use no_console_suspend to debug)\n");
2738
2739 /*
2740 * Flush any console backlog and then avoid queueing irq_work until
2741 * console_resume_all(). Until then deferred printing is no longer
2742 * triggered, NBCON consoles transition to atomic flushing, and
2743 * any klogd waiters are not triggered.
2744 */
2745 pr_flush(1000, true);
2746 console_irqwork_blocked = true;
2747
2748 if (!console_suspend_enabled)
2749 return;
2750
2751 console_list_lock();
2752 for_each_console(con)
2753 console_srcu_write_flags(con, con->flags | CON_SUSPENDED);
2754 console_list_unlock();
2755
2756 /*
2757 * Ensure that all SRCU list walks have completed. All printing
2758 * contexts must be able to see that they are suspended so that it
2759 * is guaranteed that all printing has stopped when this function
2760 * completes.
2761 */
2762 synchronize_srcu(&console_srcu);
2763 }
2764
console_resume_all(void)2765 void console_resume_all(void)
2766 {
2767 struct console_flush_type ft;
2768 struct console *con;
2769
2770 /*
2771 * Allow queueing irq_work. After restoring console state, deferred
2772 * printing and any klogd waiters need to be triggered in case there
2773 * is now a console backlog.
2774 */
2775 console_irqwork_blocked = false;
2776
2777 if (console_suspend_enabled) {
2778 console_list_lock();
2779 for_each_console(con)
2780 console_srcu_write_flags(con, con->flags & ~CON_SUSPENDED);
2781 console_list_unlock();
2782
2783 /*
2784 * Ensure that all SRCU list walks have completed. All printing
2785 * contexts must be able to see they are no longer suspended so
2786 * that they are guaranteed to wake up and resume printing.
2787 */
2788 synchronize_srcu(&console_srcu);
2789 }
2790
2791 printk_get_console_flush_type(&ft);
2792 if (ft.nbcon_offload)
2793 nbcon_kthreads_wake();
2794 if (ft.legacy_offload)
2795 defer_console_output();
2796 else
2797 wake_up_klogd();
2798
2799 pr_flush(1000, true);
2800 }
2801
2802 /**
2803 * console_cpu_notify - print deferred console messages after CPU hotplug
2804 * @cpu: unused
2805 *
2806 * If printk() is called from a CPU that is not online yet, the messages
2807 * will be printed on the console only if there are CON_ANYTIME consoles.
2808 * This function is called when a new CPU comes online (or fails to come
2809 * up) or goes offline.
2810 */
console_cpu_notify(unsigned int cpu)2811 static int console_cpu_notify(unsigned int cpu)
2812 {
2813 struct console_flush_type ft;
2814
2815 if (!cpuhp_tasks_frozen) {
2816 printk_get_console_flush_type(&ft);
2817 if (ft.nbcon_atomic)
2818 nbcon_atomic_flush_pending();
2819 if (ft.legacy_direct) {
2820 if (console_trylock())
2821 console_unlock();
2822 }
2823 }
2824 return 0;
2825 }
2826
2827 /**
2828 * console_lock - block the console subsystem from printing
2829 *
2830 * Acquires a lock which guarantees that no consoles will
2831 * be in or enter their write() callback.
2832 *
2833 * Can sleep, returns nothing.
2834 */
console_lock(void)2835 void console_lock(void)
2836 {
2837 might_sleep();
2838
2839 /* On panic, the console_lock must be left to the panic cpu. */
2840 while (panic_on_other_cpu())
2841 msleep(1000);
2842
2843 down_console_sem();
2844 console_locked = 1;
2845 console_may_schedule = 1;
2846 }
2847 EXPORT_SYMBOL(console_lock);
2848
2849 /**
2850 * console_trylock - try to block the console subsystem from printing
2851 *
2852 * Try to acquire a lock which guarantees that no consoles will
2853 * be in or enter their write() callback.
2854 *
2855 * returns 1 on success, and 0 on failure to acquire the lock.
2856 */
console_trylock(void)2857 int console_trylock(void)
2858 {
2859 /* On panic, the console_lock must be left to the panic cpu. */
2860 if (panic_on_other_cpu())
2861 return 0;
2862 if (down_trylock_console_sem())
2863 return 0;
2864 console_locked = 1;
2865 console_may_schedule = 0;
2866 return 1;
2867 }
2868 EXPORT_SYMBOL(console_trylock);
2869
is_console_locked(void)2870 int is_console_locked(void)
2871 {
2872 return console_locked;
2873 }
2874 EXPORT_SYMBOL(is_console_locked);
2875
__console_unlock(void)2876 static void __console_unlock(void)
2877 {
2878 console_locked = 0;
2879 up_console_sem();
2880 }
2881
2882 #ifdef CONFIG_PRINTK
2883
2884 /*
2885 * Prepend the message in @pmsg->pbufs->outbuf. This is achieved by shifting
2886 * the existing message over and inserting the scratchbuf message.
2887 *
2888 * @pmsg is the original printk message.
2889 * @fmt is the printf format of the message which will prepend the existing one.
2890 *
2891 * If there is not enough space in @pmsg->pbufs->outbuf, the existing
2892 * message text will be sufficiently truncated.
2893 *
2894 * If @pmsg->pbufs->outbuf is modified, @pmsg->outbuf_len is updated.
2895 */
2896 __printf(2, 3)
console_prepend_message(struct printk_message * pmsg,const char * fmt,...)2897 static void console_prepend_message(struct printk_message *pmsg, const char *fmt, ...)
2898 {
2899 struct printk_buffers *pbufs = pmsg->pbufs;
2900 const size_t scratchbuf_sz = sizeof(pbufs->scratchbuf);
2901 const size_t outbuf_sz = sizeof(pbufs->outbuf);
2902 char *scratchbuf = &pbufs->scratchbuf[0];
2903 char *outbuf = &pbufs->outbuf[0];
2904 va_list args;
2905 size_t len;
2906
2907 va_start(args, fmt);
2908 len = vscnprintf(scratchbuf, scratchbuf_sz, fmt, args);
2909 va_end(args);
2910
2911 /*
2912 * Make sure outbuf is sufficiently large before prepending.
2913 * Keep at least the prefix when the message must be truncated.
2914 * It is a rather theoretical problem when someone tries to
2915 * use a minimalist buffer.
2916 */
2917 if (WARN_ON_ONCE(len + PRINTK_PREFIX_MAX >= outbuf_sz))
2918 return;
2919
2920 if (pmsg->outbuf_len + len >= outbuf_sz) {
2921 /* Truncate the message, but keep it terminated. */
2922 pmsg->outbuf_len = outbuf_sz - (len + 1);
2923 outbuf[pmsg->outbuf_len] = 0;
2924 }
2925
2926 memmove(outbuf + len, outbuf, pmsg->outbuf_len + 1);
2927 memcpy(outbuf, scratchbuf, len);
2928 pmsg->outbuf_len += len;
2929 }
2930
2931 /*
2932 * Prepend the message in @pmsg->pbufs->outbuf with a "dropped message".
2933 * @pmsg->outbuf_len is updated appropriately.
2934 *
2935 * @pmsg is the printk message to prepend.
2936 *
2937 * @dropped is the dropped count to report in the dropped message.
2938 */
console_prepend_dropped(struct printk_message * pmsg,unsigned long dropped)2939 void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped)
2940 {
2941 console_prepend_message(pmsg, "** %lu printk messages dropped **\n", dropped);
2942 }
2943
2944 /*
2945 * Prepend the message in @pmsg->pbufs->outbuf with a "replay message".
2946 * @pmsg->outbuf_len is updated appropriately.
2947 *
2948 * @pmsg is the printk message to prepend.
2949 */
console_prepend_replay(struct printk_message * pmsg)2950 void console_prepend_replay(struct printk_message *pmsg)
2951 {
2952 console_prepend_message(pmsg, "** replaying previous printk message **\n");
2953 }
2954
2955 /*
2956 * Read and format the specified record (or a later record if the specified
2957 * record is not available).
2958 *
2959 * @pmsg will contain the formatted result. @pmsg->pbufs must point to a
2960 * struct printk_buffers.
2961 *
2962 * @seq is the record to read and format. If it is not available, the next
2963 * valid record is read.
2964 *
2965 * @is_extended specifies if the message should be formatted for extended
2966 * console output.
2967 *
2968 * @may_supress specifies if records may be skipped based on loglevel.
2969 *
2970 * Returns false if no record is available. Otherwise true and all fields
2971 * of @pmsg are valid. (See the documentation of struct printk_message
2972 * for information about the @pmsg fields.)
2973 */
printk_get_next_message(struct printk_message * pmsg,u64 seq,bool is_extended,bool may_suppress)2974 bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
2975 bool is_extended, bool may_suppress)
2976 {
2977 struct printk_buffers *pbufs = pmsg->pbufs;
2978 const size_t scratchbuf_sz = sizeof(pbufs->scratchbuf);
2979 const size_t outbuf_sz = sizeof(pbufs->outbuf);
2980 char *scratchbuf = &pbufs->scratchbuf[0];
2981 char *outbuf = &pbufs->outbuf[0];
2982 struct printk_info info;
2983 struct printk_record r;
2984 size_t len = 0;
2985 bool force_con;
2986
2987 /*
2988 * Formatting extended messages requires a separate buffer, so use the
2989 * scratch buffer to read in the ringbuffer text.
2990 *
2991 * Formatting normal messages is done in-place, so read the ringbuffer
2992 * text directly into the output buffer.
2993 */
2994 if (is_extended)
2995 prb_rec_init_rd(&r, &info, scratchbuf, scratchbuf_sz);
2996 else
2997 prb_rec_init_rd(&r, &info, outbuf, outbuf_sz);
2998
2999 if (!prb_read_valid(prb, seq, &r))
3000 return false;
3001
3002 pmsg->seq = r.info->seq;
3003 pmsg->dropped = r.info->seq - seq;
3004 force_con = r.info->flags & LOG_FORCE_CON;
3005
3006 /*
3007 * Skip records that are not forced to be printed on consoles and that
3008 * has level above the console loglevel.
3009 */
3010 if (!force_con && may_suppress && suppress_message_printing(r.info->level))
3011 goto out;
3012
3013 if (is_extended) {
3014 len = info_print_ext_header(outbuf, outbuf_sz, r.info);
3015 len += msg_print_ext_body(outbuf + len, outbuf_sz - len,
3016 &r.text_buf[0], r.info->text_len, &r.info->dev_info);
3017 } else {
3018 len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time);
3019 }
3020 out:
3021 pmsg->outbuf_len = len;
3022 return true;
3023 }
3024
3025 /*
3026 * The legacy console always acquires a spinlock_t from its printing
3027 * callback. This violates lock nesting if the caller acquired an always
3028 * spinning lock (raw_spinlock_t) while invoking printk(). This is not a
3029 * problem on PREEMPT_RT because legacy consoles print always from a
3030 * dedicated thread and never from within printk(). Therefore we tell
3031 * lockdep that a sleeping spin lock (spinlock_t) is valid here.
3032 */
3033 #ifdef CONFIG_PREEMPT_RT
printk_legacy_allow_spinlock_enter(void)3034 static inline void printk_legacy_allow_spinlock_enter(void) { }
printk_legacy_allow_spinlock_exit(void)3035 static inline void printk_legacy_allow_spinlock_exit(void) { }
3036 #else
3037 static DEFINE_WAIT_OVERRIDE_MAP(printk_legacy_map, LD_WAIT_CONFIG);
3038
printk_legacy_allow_spinlock_enter(void)3039 static inline void printk_legacy_allow_spinlock_enter(void)
3040 {
3041 lock_map_acquire_try(&printk_legacy_map);
3042 }
3043
printk_legacy_allow_spinlock_exit(void)3044 static inline void printk_legacy_allow_spinlock_exit(void)
3045 {
3046 lock_map_release(&printk_legacy_map);
3047 }
3048 #endif /* CONFIG_PREEMPT_RT */
3049
3050 /*
3051 * Used as the printk buffers for non-panic, serialized console printing.
3052 * This is for legacy (!CON_NBCON) as well as all boot (CON_BOOT) consoles.
3053 * Its usage requires the console_lock held.
3054 */
3055 struct printk_buffers printk_shared_pbufs;
3056
3057 /*
3058 * Print one record for the given console. The record printed is whatever
3059 * record is the next available record for the given console.
3060 *
3061 * @handover will be set to true if a printk waiter has taken over the
3062 * console_lock, in which case the caller is no longer holding both the
3063 * console_lock and the SRCU read lock. Otherwise it is set to false.
3064 *
3065 * @cookie is the cookie from the SRCU read lock.
3066 *
3067 * Returns false if the given console has no next record to print, otherwise
3068 * true.
3069 *
3070 * Requires the console_lock and the SRCU read lock.
3071 */
console_emit_next_record(struct console * con,bool * handover,int cookie)3072 static bool console_emit_next_record(struct console *con, bool *handover, int cookie)
3073 {
3074 bool is_extended = console_srcu_read_flags(con) & CON_EXTENDED;
3075 char *outbuf = &printk_shared_pbufs.outbuf[0];
3076 struct printk_message pmsg = {
3077 .pbufs = &printk_shared_pbufs,
3078 };
3079 unsigned long flags;
3080
3081 *handover = false;
3082
3083 if (!printk_get_next_message(&pmsg, con->seq, is_extended, true))
3084 return false;
3085
3086 con->dropped += pmsg.dropped;
3087
3088 /* Skip messages of formatted length 0. */
3089 if (pmsg.outbuf_len == 0) {
3090 con->seq = pmsg.seq + 1;
3091 goto skip;
3092 }
3093
3094 if (con->dropped && !is_extended) {
3095 console_prepend_dropped(&pmsg, con->dropped);
3096 con->dropped = 0;
3097 }
3098
3099 /* Write everything out to the hardware. */
3100
3101 if (force_legacy_kthread() && !panic_in_progress()) {
3102 /*
3103 * With forced threading this function is in a task context
3104 * (either legacy kthread or get_init_console_seq()). There
3105 * is no need for concern about printk reentrance, handovers,
3106 * or lockdep complaints.
3107 */
3108
3109 con->write(con, outbuf, pmsg.outbuf_len);
3110 con->seq = pmsg.seq + 1;
3111 } else {
3112 /*
3113 * While actively printing out messages, if another printk()
3114 * were to occur on another CPU, it may wait for this one to
3115 * finish. This task can not be preempted if there is a
3116 * waiter waiting to take over.
3117 *
3118 * Interrupts are disabled because the hand over to a waiter
3119 * must not be interrupted until the hand over is completed
3120 * (@console_waiter is cleared).
3121 */
3122 printk_safe_enter_irqsave(flags);
3123 console_lock_spinning_enable();
3124
3125 /* Do not trace print latency. */
3126 stop_critical_timings();
3127
3128 printk_legacy_allow_spinlock_enter();
3129 con->write(con, outbuf, pmsg.outbuf_len);
3130 printk_legacy_allow_spinlock_exit();
3131
3132 start_critical_timings();
3133
3134 con->seq = pmsg.seq + 1;
3135
3136 *handover = console_lock_spinning_disable_and_check(cookie);
3137 printk_safe_exit_irqrestore(flags);
3138 }
3139 skip:
3140 return true;
3141 }
3142
3143 #else
3144
console_emit_next_record(struct console * con,bool * handover,int cookie)3145 static bool console_emit_next_record(struct console *con, bool *handover, int cookie)
3146 {
3147 *handover = false;
3148 return false;
3149 }
3150
printk_kthreads_check_locked(void)3151 static inline void printk_kthreads_check_locked(void) { }
3152
3153 #endif /* CONFIG_PRINTK */
3154
3155
3156 /*
3157 * Print out one record for each console.
3158 *
3159 * @do_cond_resched is set by the caller. It can be true only in schedulable
3160 * context.
3161 *
3162 * @next_seq is set to the sequence number after the last available record.
3163 * The value is valid only when all usable consoles were flushed. It is
3164 * when the function returns true (can do the job) and @try_again parameter
3165 * is set to false, see below.
3166 *
3167 * @handover will be set to true if a printk waiter has taken over the
3168 * console_lock, in which case the caller is no longer holding the
3169 * console_lock. Otherwise it is set to false.
3170 *
3171 * @try_again will be set to true when it still makes sense to call this
3172 * function again. The function could do the job, see the return value.
3173 * And some consoles still make progress.
3174 *
3175 * Returns true when the function could do the job. Some consoles are usable,
3176 * and there was no takeover and no panic_on_other_cpu().
3177 *
3178 * Requires the console_lock.
3179 */
console_flush_one_record(bool do_cond_resched,u64 * next_seq,bool * handover,bool * try_again)3180 static bool console_flush_one_record(bool do_cond_resched, u64 *next_seq, bool *handover,
3181 bool *try_again)
3182 {
3183 struct console_flush_type ft;
3184 bool any_usable = false;
3185 struct console *con;
3186 int cookie;
3187
3188 *try_again = false;
3189
3190 printk_get_console_flush_type(&ft);
3191
3192 cookie = console_srcu_read_lock();
3193 for_each_console_srcu(con) {
3194 short flags = console_srcu_read_flags(con);
3195 u64 printk_seq;
3196 bool progress;
3197
3198 /*
3199 * console_flush_one_record() is only responsible for
3200 * nbcon consoles when the nbcon consoles cannot print via
3201 * their atomic or threaded flushing.
3202 */
3203 if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload))
3204 continue;
3205
3206 if (!console_is_usable(con, flags, !do_cond_resched))
3207 continue;
3208 any_usable = true;
3209
3210 if (flags & CON_NBCON) {
3211 progress = nbcon_legacy_emit_next_record(con, handover, cookie,
3212 !do_cond_resched);
3213 printk_seq = nbcon_seq_read(con);
3214 } else {
3215 progress = console_emit_next_record(con, handover, cookie);
3216 printk_seq = con->seq;
3217 }
3218
3219 /*
3220 * If a handover has occurred, the SRCU read lock
3221 * is already released.
3222 */
3223 if (*handover)
3224 goto fail;
3225
3226 /* Track the next of the highest seq flushed. */
3227 if (printk_seq > *next_seq)
3228 *next_seq = printk_seq;
3229
3230 if (!progress)
3231 continue;
3232
3233 /*
3234 * An usable console made a progress. There might still be
3235 * pending messages.
3236 */
3237 *try_again = true;
3238
3239 /* Allow panic_cpu to take over the consoles safely. */
3240 if (panic_on_other_cpu())
3241 goto fail_srcu;
3242
3243 if (do_cond_resched)
3244 cond_resched();
3245 }
3246 console_srcu_read_unlock(cookie);
3247
3248 return any_usable;
3249
3250 fail_srcu:
3251 console_srcu_read_unlock(cookie);
3252 fail:
3253 *try_again = false;
3254 return false;
3255 }
3256
3257 /*
3258 * Print out all remaining records to all consoles.
3259 *
3260 * @do_cond_resched is set by the caller. It can be true only in schedulable
3261 * context.
3262 *
3263 * @next_seq is set to the sequence number after the last available record.
3264 * The value is valid only when this function returns true. It means that all
3265 * usable consoles are completely flushed.
3266 *
3267 * @handover will be set to true if a printk waiter has taken over the
3268 * console_lock, in which case the caller is no longer holding the
3269 * console_lock. Otherwise it is set to false.
3270 *
3271 * Returns true when there was at least one usable console and all messages
3272 * were flushed to all usable consoles. A returned false informs the caller
3273 * that everything was not flushed (either there were no usable consoles or
3274 * another context has taken over printing or it is a panic situation and this
3275 * is not the panic CPU). Regardless the reason, the caller should assume it
3276 * is not useful to immediately try again.
3277 *
3278 * Requires the console_lock.
3279 */
console_flush_all(bool do_cond_resched,u64 * next_seq,bool * handover)3280 static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handover)
3281 {
3282 bool try_again;
3283 bool ret;
3284
3285 *next_seq = 0;
3286 *handover = false;
3287
3288 do {
3289 ret = console_flush_one_record(do_cond_resched, next_seq,
3290 handover, &try_again);
3291 } while (try_again);
3292
3293 return ret;
3294 }
3295
__console_flush_and_unlock(void)3296 static void __console_flush_and_unlock(void)
3297 {
3298 bool do_cond_resched;
3299 bool handover;
3300 bool flushed;
3301 u64 next_seq;
3302
3303 /*
3304 * Console drivers are called with interrupts disabled, so
3305 * @console_may_schedule should be cleared before; however, we may
3306 * end up dumping a lot of lines, for example, if called from
3307 * console registration path, and should invoke cond_resched()
3308 * between lines if allowable. Not doing so can cause a very long
3309 * scheduling stall on a slow console leading to RCU stall and
3310 * softlockup warnings which exacerbate the issue with more
3311 * messages practically incapacitating the system. Therefore, create
3312 * a local to use for the printing loop.
3313 */
3314 do_cond_resched = console_may_schedule;
3315
3316 do {
3317 console_may_schedule = 0;
3318
3319 flushed = console_flush_all(do_cond_resched, &next_seq, &handover);
3320 if (!handover)
3321 __console_unlock();
3322
3323 /*
3324 * Abort if there was a failure to flush all messages to all
3325 * usable consoles. Either it is not possible to flush (in
3326 * which case it would be an infinite loop of retrying) or
3327 * another context has taken over printing.
3328 */
3329 if (!flushed)
3330 break;
3331
3332 /*
3333 * Some context may have added new records after
3334 * console_flush_all() but before unlocking the console.
3335 * Re-check if there is a new record to flush. If the trylock
3336 * fails, another context is already handling the printing.
3337 */
3338 } while (prb_read_valid(prb, next_seq, NULL) && console_trylock());
3339 }
3340
3341 /**
3342 * console_unlock - unblock the legacy console subsystem from printing
3343 *
3344 * Releases the console_lock which the caller holds to block printing of
3345 * the legacy console subsystem.
3346 *
3347 * While the console_lock was held, console output may have been buffered
3348 * by printk(). If this is the case, console_unlock() emits the output on
3349 * legacy consoles prior to releasing the lock.
3350 *
3351 * console_unlock(); may be called from any context.
3352 */
console_unlock(void)3353 void console_unlock(void)
3354 {
3355 struct console_flush_type ft;
3356
3357 printk_get_console_flush_type(&ft);
3358 if (ft.legacy_direct)
3359 __console_flush_and_unlock();
3360 else
3361 __console_unlock();
3362 }
3363 EXPORT_SYMBOL(console_unlock);
3364
3365 /**
3366 * console_conditional_schedule - yield the CPU if required
3367 *
3368 * If the console code is currently allowed to sleep, and
3369 * if this CPU should yield the CPU to another task, do
3370 * so here.
3371 *
3372 * Must be called within console_lock();.
3373 */
console_conditional_schedule(void)3374 void __sched console_conditional_schedule(void)
3375 {
3376 if (console_may_schedule)
3377 cond_resched();
3378 }
3379 EXPORT_SYMBOL(console_conditional_schedule);
3380
console_unblank(void)3381 void console_unblank(void)
3382 {
3383 bool found_unblank = false;
3384 struct console *c;
3385 int cookie;
3386
3387 /*
3388 * First check if there are any consoles implementing the unblank()
3389 * callback. If not, there is no reason to continue and take the
3390 * console lock, which in particular can be dangerous if
3391 * @oops_in_progress is set.
3392 */
3393 cookie = console_srcu_read_lock();
3394 for_each_console_srcu(c) {
3395 if (!console_is_usable(c, console_srcu_read_flags(c), true))
3396 continue;
3397
3398 if (c->unblank) {
3399 found_unblank = true;
3400 break;
3401 }
3402 }
3403 console_srcu_read_unlock(cookie);
3404 if (!found_unblank)
3405 return;
3406
3407 /*
3408 * Stop console printing because the unblank() callback may
3409 * assume the console is not within its write() callback.
3410 *
3411 * If @oops_in_progress is set, this may be an atomic context.
3412 * In that case, attempt a trylock as best-effort.
3413 */
3414 if (oops_in_progress) {
3415 /* Semaphores are not NMI-safe. */
3416 if (in_nmi())
3417 return;
3418
3419 /*
3420 * Attempting to trylock the console lock can deadlock
3421 * if another CPU was stopped while modifying the
3422 * semaphore. "Hope and pray" that this is not the
3423 * current situation.
3424 */
3425 if (down_trylock_console_sem() != 0)
3426 return;
3427 } else
3428 console_lock();
3429
3430 console_locked = 1;
3431 console_may_schedule = 0;
3432
3433 cookie = console_srcu_read_lock();
3434 for_each_console_srcu(c) {
3435 if (!console_is_usable(c, console_srcu_read_flags(c), true))
3436 continue;
3437
3438 if (c->unblank)
3439 c->unblank();
3440 }
3441 console_srcu_read_unlock(cookie);
3442
3443 console_unlock();
3444
3445 if (!oops_in_progress)
3446 pr_flush(1000, true);
3447 }
3448
3449 /*
3450 * Rewind all consoles to the oldest available record.
3451 *
3452 * IMPORTANT: The function is safe only when called under
3453 * console_lock(). It is not enforced because
3454 * it is used as a best effort in panic().
3455 */
__console_rewind_all(void)3456 static void __console_rewind_all(void)
3457 {
3458 struct console *c;
3459 short flags;
3460 int cookie;
3461 u64 seq;
3462
3463 seq = prb_first_valid_seq(prb);
3464
3465 cookie = console_srcu_read_lock();
3466 for_each_console_srcu(c) {
3467 flags = console_srcu_read_flags(c);
3468
3469 if (flags & CON_NBCON) {
3470 nbcon_seq_force(c, seq);
3471 } else {
3472 /*
3473 * This assignment is safe only when called under
3474 * console_lock(). On panic, legacy consoles are
3475 * only best effort.
3476 */
3477 c->seq = seq;
3478 }
3479 }
3480 console_srcu_read_unlock(cookie);
3481 }
3482
3483 /**
3484 * console_flush_on_panic - flush console content on panic
3485 * @mode: flush all messages in buffer or just the pending ones
3486 *
3487 * Immediately output all pending messages no matter what.
3488 */
console_flush_on_panic(enum con_flush_mode mode)3489 void console_flush_on_panic(enum con_flush_mode mode)
3490 {
3491 struct console_flush_type ft;
3492 bool handover;
3493 u64 next_seq;
3494
3495 /*
3496 * Ignore the console lock and flush out the messages. Attempting a
3497 * trylock would not be useful because:
3498 *
3499 * - if it is contended, it must be ignored anyway
3500 * - console_lock() and console_trylock() block and fail
3501 * respectively in panic for non-panic CPUs
3502 * - semaphores are not NMI-safe
3503 */
3504
3505 /*
3506 * If another context is holding the console lock,
3507 * @console_may_schedule might be set. Clear it so that
3508 * this context does not call cond_resched() while flushing.
3509 */
3510 console_may_schedule = 0;
3511
3512 if (mode == CONSOLE_REPLAY_ALL)
3513 __console_rewind_all();
3514
3515 printk_get_console_flush_type(&ft);
3516 if (ft.nbcon_atomic)
3517 nbcon_atomic_flush_pending();
3518
3519 /* Flush legacy consoles once allowed, even when dangerous. */
3520 if (legacy_allow_panic_sync)
3521 console_flush_all(false, &next_seq, &handover);
3522 }
3523
3524 /*
3525 * Return the console tty driver structure and its associated index
3526 */
console_device(int * index)3527 struct tty_driver *console_device(int *index)
3528 {
3529 struct console *c;
3530 struct tty_driver *driver = NULL;
3531 int cookie;
3532
3533 /*
3534 * Take console_lock to serialize device() callback with
3535 * other console operations. For example, fg_console is
3536 * modified under console_lock when switching vt.
3537 */
3538 console_lock();
3539
3540 cookie = console_srcu_read_lock();
3541 for_each_console_srcu(c) {
3542 if (!c->device)
3543 continue;
3544 driver = c->device(c, index);
3545 if (driver)
3546 break;
3547 }
3548 console_srcu_read_unlock(cookie);
3549
3550 console_unlock();
3551 return driver;
3552 }
3553
3554 /*
3555 * Prevent further output on the passed console device so that (for example)
3556 * serial drivers can suspend console output before suspending a port, and can
3557 * re-enable output afterwards.
3558 */
console_suspend(struct console * console)3559 void console_suspend(struct console *console)
3560 {
3561 __pr_flush(console, 1000, true);
3562 console_list_lock();
3563 console_srcu_write_flags(console, console->flags & ~CON_ENABLED);
3564 console_list_unlock();
3565
3566 /*
3567 * Ensure that all SRCU list walks have completed. All contexts must
3568 * be able to see that this console is disabled so that (for example)
3569 * the caller can suspend the port without risk of another context
3570 * using the port.
3571 */
3572 synchronize_srcu(&console_srcu);
3573 }
3574 EXPORT_SYMBOL(console_suspend);
3575
console_resume(struct console * console)3576 void console_resume(struct console *console)
3577 {
3578 struct console_flush_type ft;
3579 bool is_nbcon;
3580
3581 console_list_lock();
3582 console_srcu_write_flags(console, console->flags | CON_ENABLED);
3583 is_nbcon = console->flags & CON_NBCON;
3584 console_list_unlock();
3585
3586 /*
3587 * Ensure that all SRCU list walks have completed. The related
3588 * printing context must be able to see it is enabled so that
3589 * it is guaranteed to wake up and resume printing.
3590 */
3591 synchronize_srcu(&console_srcu);
3592
3593 printk_get_console_flush_type(&ft);
3594 if (is_nbcon && ft.nbcon_offload)
3595 nbcon_kthread_wake(console);
3596 else if (ft.legacy_offload)
3597 defer_console_output();
3598
3599 __pr_flush(console, 1000, true);
3600 }
3601 EXPORT_SYMBOL(console_resume);
3602
3603 #ifdef CONFIG_PRINTK
3604 static int unregister_console_locked(struct console *console);
3605
3606 /* True when system boot is far enough to create printer threads. */
3607 bool printk_kthreads_ready __ro_after_init;
3608
3609 static struct task_struct *printk_legacy_kthread;
3610
legacy_kthread_should_wakeup(void)3611 static bool legacy_kthread_should_wakeup(void)
3612 {
3613 struct console_flush_type ft;
3614 struct console *con;
3615 bool ret = false;
3616 int cookie;
3617
3618 if (kthread_should_stop())
3619 return true;
3620
3621 printk_get_console_flush_type(&ft);
3622
3623 cookie = console_srcu_read_lock();
3624 for_each_console_srcu(con) {
3625 short flags = console_srcu_read_flags(con);
3626 u64 printk_seq;
3627
3628 /*
3629 * The legacy printer thread is only responsible for nbcon
3630 * consoles when the nbcon consoles cannot print via their
3631 * atomic or threaded flushing.
3632 */
3633 if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload))
3634 continue;
3635
3636 if (!console_is_usable(con, flags, false))
3637 continue;
3638
3639 if (flags & CON_NBCON) {
3640 printk_seq = nbcon_seq_read(con);
3641 } else {
3642 /*
3643 * It is safe to read @seq because only this
3644 * thread context updates @seq.
3645 */
3646 printk_seq = con->seq;
3647 }
3648
3649 if (prb_read_valid(prb, printk_seq, NULL)) {
3650 ret = true;
3651 break;
3652 }
3653 }
3654 console_srcu_read_unlock(cookie);
3655
3656 return ret;
3657 }
3658
legacy_kthread_func(void * unused)3659 static int legacy_kthread_func(void *unused)
3660 {
3661 bool try_again;
3662
3663 wait_for_event:
3664 wait_event_interruptible(legacy_wait, legacy_kthread_should_wakeup());
3665
3666 do {
3667 bool handover = false;
3668 u64 next_seq = 0;
3669
3670 if (kthread_should_stop())
3671 return 0;
3672
3673 console_lock();
3674 console_flush_one_record(true, &next_seq, &handover, &try_again);
3675 if (!handover)
3676 __console_unlock();
3677
3678 } while (try_again);
3679
3680 goto wait_for_event;
3681 }
3682
legacy_kthread_create(void)3683 static bool legacy_kthread_create(void)
3684 {
3685 struct task_struct *kt;
3686
3687 lockdep_assert_console_list_lock_held();
3688
3689 kt = kthread_run(legacy_kthread_func, NULL, "pr/legacy");
3690 if (WARN_ON(IS_ERR(kt))) {
3691 pr_err("failed to start legacy printing thread\n");
3692 return false;
3693 }
3694
3695 printk_legacy_kthread = kt;
3696
3697 /*
3698 * It is important that console printing threads are scheduled
3699 * shortly after a printk call and with generous runtime budgets.
3700 */
3701 sched_set_normal(printk_legacy_kthread, -20);
3702
3703 return true;
3704 }
3705
3706 /**
3707 * printk_kthreads_shutdown - shutdown all threaded printers
3708 *
3709 * On system shutdown all threaded printers are stopped. This allows printk
3710 * to transition back to atomic printing, thus providing a robust mechanism
3711 * for the final shutdown/reboot messages to be output.
3712 */
printk_kthreads_shutdown(void)3713 static void printk_kthreads_shutdown(void)
3714 {
3715 struct console *con;
3716
3717 console_list_lock();
3718 if (printk_kthreads_running) {
3719 printk_kthreads_running = false;
3720
3721 for_each_console(con) {
3722 if (con->flags & CON_NBCON)
3723 nbcon_kthread_stop(con);
3724 }
3725
3726 /*
3727 * The threads may have been stopped while printing a
3728 * backlog. Flush any records left over.
3729 */
3730 nbcon_atomic_flush_pending();
3731 }
3732 console_list_unlock();
3733 }
3734
3735 static struct syscore_ops printk_syscore_ops = {
3736 .shutdown = printk_kthreads_shutdown,
3737 };
3738
3739 /*
3740 * If appropriate, start nbcon kthreads and set @printk_kthreads_running.
3741 * If any kthreads fail to start, those consoles are unregistered.
3742 *
3743 * Must be called under console_list_lock().
3744 */
printk_kthreads_check_locked(void)3745 static void printk_kthreads_check_locked(void)
3746 {
3747 struct hlist_node *tmp;
3748 struct console *con;
3749
3750 lockdep_assert_console_list_lock_held();
3751
3752 if (!printk_kthreads_ready)
3753 return;
3754
3755 /* Start or stop the legacy kthread when needed. */
3756 if (have_legacy_console || have_boot_console) {
3757 if (!printk_legacy_kthread &&
3758 force_legacy_kthread() &&
3759 !legacy_kthread_create()) {
3760 /*
3761 * All legacy consoles must be unregistered. If there
3762 * are any nbcon consoles, they will set up their own
3763 * kthread.
3764 */
3765 hlist_for_each_entry_safe(con, tmp, &console_list, node) {
3766 if (con->flags & CON_NBCON)
3767 continue;
3768
3769 unregister_console_locked(con);
3770 }
3771 }
3772 } else if (printk_legacy_kthread) {
3773 kthread_stop(printk_legacy_kthread);
3774 printk_legacy_kthread = NULL;
3775 }
3776
3777 /*
3778 * Printer threads cannot be started as long as any boot console is
3779 * registered because there is no way to synchronize the hardware
3780 * registers between boot console code and regular console code.
3781 * It can only be known that there will be no new boot consoles when
3782 * an nbcon console is registered.
3783 */
3784 if (have_boot_console || !have_nbcon_console) {
3785 /* Clear flag in case all nbcon consoles unregistered. */
3786 printk_kthreads_running = false;
3787 return;
3788 }
3789
3790 if (printk_kthreads_running)
3791 return;
3792
3793 hlist_for_each_entry_safe(con, tmp, &console_list, node) {
3794 if (!(con->flags & CON_NBCON))
3795 continue;
3796
3797 if (!nbcon_kthread_create(con))
3798 unregister_console_locked(con);
3799 }
3800
3801 printk_kthreads_running = true;
3802 }
3803
printk_set_kthreads_ready(void)3804 static int __init printk_set_kthreads_ready(void)
3805 {
3806 register_syscore_ops(&printk_syscore_ops);
3807
3808 console_list_lock();
3809 printk_kthreads_ready = true;
3810 printk_kthreads_check_locked();
3811 console_list_unlock();
3812
3813 return 0;
3814 }
3815 early_initcall(printk_set_kthreads_ready);
3816 #endif /* CONFIG_PRINTK */
3817
3818 static int __read_mostly keep_bootcon;
3819
keep_bootcon_setup(char * str)3820 static int __init keep_bootcon_setup(char *str)
3821 {
3822 keep_bootcon = 1;
3823 pr_info("debug: skip boot console de-registration.\n");
3824
3825 return 0;
3826 }
3827
3828 early_param("keep_bootcon", keep_bootcon_setup);
3829
console_call_setup(struct console * newcon,char * options)3830 static int console_call_setup(struct console *newcon, char *options)
3831 {
3832 int err;
3833
3834 if (!newcon->setup)
3835 return 0;
3836
3837 /* Synchronize with possible boot console. */
3838 console_lock();
3839 err = newcon->setup(newcon, options);
3840 console_unlock();
3841
3842 return err;
3843 }
3844
3845 /*
3846 * This is called by register_console() to try to match
3847 * the newly registered console with any of the ones selected
3848 * by either the command line or add_preferred_console() and
3849 * setup/enable it.
3850 *
3851 * Care need to be taken with consoles that are statically
3852 * enabled such as netconsole
3853 */
try_enable_preferred_console(struct console * newcon,bool user_specified)3854 static int try_enable_preferred_console(struct console *newcon,
3855 bool user_specified)
3856 {
3857 struct console_cmdline *c;
3858 int i, err;
3859
3860 for (i = 0, c = console_cmdline;
3861 i < MAX_CMDLINECONSOLES && (c->name[0] || c->devname[0]);
3862 i++, c++) {
3863 /* Console not yet initialized? */
3864 if (!c->name[0])
3865 continue;
3866 if (c->user_specified != user_specified)
3867 continue;
3868 if (!newcon->match ||
3869 newcon->match(newcon, c->name, c->index, c->options) != 0) {
3870 /* default matching */
3871 BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name));
3872 if (strcmp(c->name, newcon->name) != 0)
3873 continue;
3874 if (newcon->index >= 0 &&
3875 newcon->index != c->index)
3876 continue;
3877 if (newcon->index < 0)
3878 newcon->index = c->index;
3879
3880 if (_braille_register_console(newcon, c))
3881 return 0;
3882
3883 err = console_call_setup(newcon, c->options);
3884 if (err)
3885 return err;
3886 }
3887 newcon->flags |= CON_ENABLED;
3888 if (i == preferred_console)
3889 newcon->flags |= CON_CONSDEV;
3890 return 0;
3891 }
3892
3893 /*
3894 * Some consoles, such as pstore and netconsole, can be enabled even
3895 * without matching. Accept the pre-enabled consoles only when match()
3896 * and setup() had a chance to be called.
3897 */
3898 if (newcon->flags & CON_ENABLED && c->user_specified == user_specified)
3899 return 0;
3900
3901 return -ENOENT;
3902 }
3903
3904 /* Try to enable the console unconditionally */
try_enable_default_console(struct console * newcon)3905 static void try_enable_default_console(struct console *newcon)
3906 {
3907 if (newcon->index < 0)
3908 newcon->index = 0;
3909
3910 if (console_call_setup(newcon, NULL) != 0)
3911 return;
3912
3913 newcon->flags |= CON_ENABLED;
3914
3915 if (newcon->device)
3916 newcon->flags |= CON_CONSDEV;
3917 }
3918
3919 /* Return the starting sequence number for a newly registered console. */
get_init_console_seq(struct console * newcon,bool bootcon_registered)3920 static u64 get_init_console_seq(struct console *newcon, bool bootcon_registered)
3921 {
3922 struct console *con;
3923 bool handover;
3924 u64 init_seq;
3925
3926 if (newcon->flags & (CON_PRINTBUFFER | CON_BOOT)) {
3927 /* Get a consistent copy of @syslog_seq. */
3928 mutex_lock(&syslog_lock);
3929 init_seq = syslog_seq;
3930 mutex_unlock(&syslog_lock);
3931 } else {
3932 /* Begin with next message added to ringbuffer. */
3933 init_seq = prb_next_seq(prb);
3934
3935 /*
3936 * If any enabled boot consoles are due to be unregistered
3937 * shortly, some may not be caught up and may be the same
3938 * device as @newcon. Since it is not known which boot console
3939 * is the same device, flush all consoles and, if necessary,
3940 * start with the message of the enabled boot console that is
3941 * the furthest behind.
3942 */
3943 if (bootcon_registered && !keep_bootcon) {
3944 /*
3945 * Hold the console_lock to stop console printing and
3946 * guarantee safe access to console->seq.
3947 */
3948 console_lock();
3949
3950 /*
3951 * Flush all consoles and set the console to start at
3952 * the next unprinted sequence number.
3953 */
3954 if (!console_flush_all(true, &init_seq, &handover)) {
3955 /*
3956 * Flushing failed. Just choose the lowest
3957 * sequence of the enabled boot consoles.
3958 */
3959
3960 /*
3961 * If there was a handover, this context no
3962 * longer holds the console_lock.
3963 */
3964 if (handover)
3965 console_lock();
3966
3967 init_seq = prb_next_seq(prb);
3968 for_each_console(con) {
3969 u64 seq;
3970
3971 if (!(con->flags & CON_BOOT) ||
3972 !(con->flags & CON_ENABLED)) {
3973 continue;
3974 }
3975
3976 if (con->flags & CON_NBCON)
3977 seq = nbcon_seq_read(con);
3978 else
3979 seq = con->seq;
3980
3981 if (seq < init_seq)
3982 init_seq = seq;
3983 }
3984 }
3985
3986 console_unlock();
3987 }
3988 }
3989
3990 return init_seq;
3991 }
3992
3993 #define console_first() \
3994 hlist_entry(console_list.first, struct console, node)
3995
3996 static int unregister_console_locked(struct console *console);
3997
3998 /*
3999 * The console driver calls this routine during kernel initialization
4000 * to register the console printing procedure with printk() and to
4001 * print any messages that were printed by the kernel before the
4002 * console driver was initialized.
4003 *
4004 * This can happen pretty early during the boot process (because of
4005 * early_printk) - sometimes before setup_arch() completes - be careful
4006 * of what kernel features are used - they may not be initialised yet.
4007 *
4008 * There are two types of consoles - bootconsoles (early_printk) and
4009 * "real" consoles (everything which is not a bootconsole) which are
4010 * handled differently.
4011 * - Any number of bootconsoles can be registered at any time.
4012 * - As soon as a "real" console is registered, all bootconsoles
4013 * will be unregistered automatically.
4014 * - Once a "real" console is registered, any attempt to register a
4015 * bootconsoles will be rejected
4016 */
register_console(struct console * newcon)4017 void register_console(struct console *newcon)
4018 {
4019 bool use_device_lock = (newcon->flags & CON_NBCON) && newcon->write_atomic;
4020 bool bootcon_registered = false;
4021 bool realcon_registered = false;
4022 struct console *con;
4023 unsigned long flags;
4024 u64 init_seq;
4025 int err;
4026
4027 console_list_lock();
4028
4029 for_each_console(con) {
4030 if (WARN(con == newcon, "console '%s%d' already registered\n",
4031 con->name, con->index)) {
4032 goto unlock;
4033 }
4034
4035 if (con->flags & CON_BOOT)
4036 bootcon_registered = true;
4037 else
4038 realcon_registered = true;
4039 }
4040
4041 /* Do not register boot consoles when there already is a real one. */
4042 if ((newcon->flags & CON_BOOT) && realcon_registered) {
4043 pr_info("Too late to register bootconsole %s%d\n",
4044 newcon->name, newcon->index);
4045 goto unlock;
4046 }
4047
4048 if (newcon->flags & CON_NBCON) {
4049 /*
4050 * Ensure the nbcon console buffers can be allocated
4051 * before modifying any global data.
4052 */
4053 if (!nbcon_alloc(newcon))
4054 goto unlock;
4055 }
4056
4057 /*
4058 * See if we want to enable this console driver by default.
4059 *
4060 * Nope when a console is preferred by the command line, device
4061 * tree, or SPCR.
4062 *
4063 * The first real console with tty binding (driver) wins. More
4064 * consoles might get enabled before the right one is found.
4065 *
4066 * Note that a console with tty binding will have CON_CONSDEV
4067 * flag set and will be first in the list.
4068 */
4069 if (preferred_console < 0) {
4070 if (hlist_empty(&console_list) || !console_first()->device ||
4071 console_first()->flags & CON_BOOT) {
4072 try_enable_default_console(newcon);
4073 }
4074 }
4075
4076 /* See if this console matches one we selected on the command line */
4077 err = try_enable_preferred_console(newcon, true);
4078
4079 /* If not, try to match against the platform default(s) */
4080 if (err == -ENOENT)
4081 err = try_enable_preferred_console(newcon, false);
4082
4083 /* printk() messages are not printed to the Braille console. */
4084 if (err || newcon->flags & CON_BRL) {
4085 if (newcon->flags & CON_NBCON)
4086 nbcon_free(newcon);
4087 goto unlock;
4088 }
4089
4090 /*
4091 * If we have a bootconsole, and are switching to a real console,
4092 * don't print everything out again, since when the boot console, and
4093 * the real console are the same physical device, it's annoying to
4094 * see the beginning boot messages twice
4095 */
4096 if (bootcon_registered &&
4097 ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV)) {
4098 newcon->flags &= ~CON_PRINTBUFFER;
4099 }
4100
4101 newcon->dropped = 0;
4102 init_seq = get_init_console_seq(newcon, bootcon_registered);
4103
4104 if (newcon->flags & CON_NBCON) {
4105 have_nbcon_console = true;
4106 nbcon_seq_force(newcon, init_seq);
4107 } else {
4108 have_legacy_console = true;
4109 newcon->seq = init_seq;
4110 }
4111
4112 if (newcon->flags & CON_BOOT)
4113 have_boot_console = true;
4114
4115 /*
4116 * If another context is actively using the hardware of this new
4117 * console, it will not be aware of the nbcon synchronization. This
4118 * is a risk that two contexts could access the hardware
4119 * simultaneously if this new console is used for atomic printing
4120 * and the other context is still using the hardware.
4121 *
4122 * Use the driver synchronization to ensure that the hardware is not
4123 * in use while this new console transitions to being registered.
4124 */
4125 if (use_device_lock)
4126 newcon->device_lock(newcon, &flags);
4127
4128 /*
4129 * Put this console in the list - keep the
4130 * preferred driver at the head of the list.
4131 */
4132 if (hlist_empty(&console_list)) {
4133 /* Ensure CON_CONSDEV is always set for the head. */
4134 newcon->flags |= CON_CONSDEV;
4135 hlist_add_head_rcu(&newcon->node, &console_list);
4136
4137 } else if (newcon->flags & CON_CONSDEV) {
4138 /* Only the new head can have CON_CONSDEV set. */
4139 console_srcu_write_flags(console_first(), console_first()->flags & ~CON_CONSDEV);
4140 hlist_add_head_rcu(&newcon->node, &console_list);
4141
4142 } else {
4143 hlist_add_behind_rcu(&newcon->node, console_list.first);
4144 }
4145
4146 /*
4147 * No need to synchronize SRCU here! The caller does not rely
4148 * on all contexts being able to see the new console before
4149 * register_console() completes.
4150 */
4151
4152 /* This new console is now registered. */
4153 if (use_device_lock)
4154 newcon->device_unlock(newcon, flags);
4155
4156 console_sysfs_notify();
4157
4158 /*
4159 * By unregistering the bootconsoles after we enable the real console
4160 * we get the "console xxx enabled" message on all the consoles -
4161 * boot consoles, real consoles, etc - this is to ensure that end
4162 * users know there might be something in the kernel's log buffer that
4163 * went to the bootconsole (that they do not see on the real console)
4164 */
4165 con_printk(KERN_INFO, newcon, "enabled\n");
4166 if (bootcon_registered &&
4167 ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV) &&
4168 !keep_bootcon) {
4169 struct hlist_node *tmp;
4170
4171 hlist_for_each_entry_safe(con, tmp, &console_list, node) {
4172 if (con->flags & CON_BOOT)
4173 unregister_console_locked(con);
4174 }
4175 }
4176
4177 /* Changed console list, may require printer threads to start/stop. */
4178 printk_kthreads_check_locked();
4179 unlock:
4180 console_list_unlock();
4181 }
4182 EXPORT_SYMBOL(register_console);
4183
4184 /* Must be called under console_list_lock(). */
unregister_console_locked(struct console * console)4185 static int unregister_console_locked(struct console *console)
4186 {
4187 bool use_device_lock = (console->flags & CON_NBCON) && console->write_atomic;
4188 bool found_legacy_con = false;
4189 bool found_nbcon_con = false;
4190 bool found_boot_con = false;
4191 unsigned long flags;
4192 struct console *c;
4193 int res;
4194
4195 lockdep_assert_console_list_lock_held();
4196
4197 con_printk(KERN_INFO, console, "disabled\n");
4198
4199 res = _braille_unregister_console(console);
4200 if (res < 0)
4201 return res;
4202 if (res > 0)
4203 return 0;
4204
4205 if (!console_is_registered_locked(console))
4206 res = -ENODEV;
4207 else if (console_is_usable(console, console->flags, true))
4208 __pr_flush(console, 1000, true);
4209
4210 /* Disable it unconditionally */
4211 console_srcu_write_flags(console, console->flags & ~CON_ENABLED);
4212
4213 if (res < 0)
4214 return res;
4215
4216 /*
4217 * Use the driver synchronization to ensure that the hardware is not
4218 * in use while this console transitions to being unregistered.
4219 */
4220 if (use_device_lock)
4221 console->device_lock(console, &flags);
4222
4223 hlist_del_init_rcu(&console->node);
4224
4225 if (use_device_lock)
4226 console->device_unlock(console, flags);
4227
4228 /*
4229 * <HISTORICAL>
4230 * If this isn't the last console and it has CON_CONSDEV set, we
4231 * need to set it on the next preferred console.
4232 * </HISTORICAL>
4233 *
4234 * The above makes no sense as there is no guarantee that the next
4235 * console has any device attached. Oh well....
4236 */
4237 if (!hlist_empty(&console_list) && console->flags & CON_CONSDEV)
4238 console_srcu_write_flags(console_first(), console_first()->flags | CON_CONSDEV);
4239
4240 /*
4241 * Ensure that all SRCU list walks have completed. All contexts
4242 * must not be able to see this console in the list so that any
4243 * exit/cleanup routines can be performed safely.
4244 */
4245 synchronize_srcu(&console_srcu);
4246
4247 /*
4248 * With this console gone, the global flags tracking registered
4249 * console types may have changed. Update them.
4250 */
4251 for_each_console(c) {
4252 if (c->flags & CON_BOOT)
4253 found_boot_con = true;
4254
4255 if (c->flags & CON_NBCON)
4256 found_nbcon_con = true;
4257 else
4258 found_legacy_con = true;
4259 }
4260 if (!found_boot_con)
4261 have_boot_console = found_boot_con;
4262 if (!found_legacy_con)
4263 have_legacy_console = found_legacy_con;
4264 if (!found_nbcon_con)
4265 have_nbcon_console = found_nbcon_con;
4266
4267 /* @have_nbcon_console must be updated before calling nbcon_free(). */
4268 if (console->flags & CON_NBCON)
4269 nbcon_free(console);
4270
4271 console_sysfs_notify();
4272
4273 if (console->exit)
4274 res = console->exit(console);
4275
4276 /* Changed console list, may require printer threads to start/stop. */
4277 printk_kthreads_check_locked();
4278
4279 return res;
4280 }
4281
unregister_console(struct console * console)4282 int unregister_console(struct console *console)
4283 {
4284 int res;
4285
4286 console_list_lock();
4287 res = unregister_console_locked(console);
4288 console_list_unlock();
4289 return res;
4290 }
4291 EXPORT_SYMBOL(unregister_console);
4292
4293 /**
4294 * console_force_preferred_locked - force a registered console preferred
4295 * @con: The registered console to force preferred.
4296 *
4297 * Must be called under console_list_lock().
4298 */
console_force_preferred_locked(struct console * con)4299 void console_force_preferred_locked(struct console *con)
4300 {
4301 struct console *cur_pref_con;
4302
4303 if (!console_is_registered_locked(con))
4304 return;
4305
4306 cur_pref_con = console_first();
4307
4308 /* Already preferred? */
4309 if (cur_pref_con == con)
4310 return;
4311
4312 /*
4313 * Delete, but do not re-initialize the entry. This allows the console
4314 * to continue to appear registered (via any hlist_unhashed_lockless()
4315 * checks), even though it was briefly removed from the console list.
4316 */
4317 hlist_del_rcu(&con->node);
4318
4319 /*
4320 * Ensure that all SRCU list walks have completed so that the console
4321 * can be added to the beginning of the console list and its forward
4322 * list pointer can be re-initialized.
4323 */
4324 synchronize_srcu(&console_srcu);
4325
4326 con->flags |= CON_CONSDEV;
4327 WARN_ON(!con->device);
4328
4329 /* Only the new head can have CON_CONSDEV set. */
4330 console_srcu_write_flags(cur_pref_con, cur_pref_con->flags & ~CON_CONSDEV);
4331 hlist_add_head_rcu(&con->node, &console_list);
4332 }
4333 EXPORT_SYMBOL(console_force_preferred_locked);
4334
4335 /*
4336 * Initialize the console device. This is called *early*, so
4337 * we can't necessarily depend on lots of kernel help here.
4338 * Just do some early initializations, and do the complex setup
4339 * later.
4340 */
console_init(void)4341 void __init console_init(void)
4342 {
4343 int ret;
4344 initcall_t call;
4345 initcall_entry_t *ce;
4346
4347 #ifdef CONFIG_NULL_TTY_DEFAULT_CONSOLE
4348 if (!console_set_on_cmdline)
4349 add_preferred_console("ttynull", 0, NULL);
4350 #endif
4351
4352 /* Setup the default TTY line discipline. */
4353 n_tty_init();
4354
4355 /*
4356 * set up the console device so that later boot sequences can
4357 * inform about problems etc..
4358 */
4359 ce = __con_initcall_start;
4360 trace_initcall_level("console");
4361 while (ce < __con_initcall_end) {
4362 call = initcall_from_entry(ce);
4363 trace_initcall_start(call);
4364 ret = call();
4365 trace_initcall_finish(call, ret);
4366 ce++;
4367 }
4368 }
4369
4370 /*
4371 * Some boot consoles access data that is in the init section and which will
4372 * be discarded after the initcalls have been run. To make sure that no code
4373 * will access this data, unregister the boot consoles in a late initcall.
4374 *
4375 * If for some reason, such as deferred probe or the driver being a loadable
4376 * module, the real console hasn't registered yet at this point, there will
4377 * be a brief interval in which no messages are logged to the console, which
4378 * makes it difficult to diagnose problems that occur during this time.
4379 *
4380 * To mitigate this problem somewhat, only unregister consoles whose memory
4381 * intersects with the init section. Note that all other boot consoles will
4382 * get unregistered when the real preferred console is registered.
4383 */
printk_late_init(void)4384 static int __init printk_late_init(void)
4385 {
4386 struct hlist_node *tmp;
4387 struct console *con;
4388 int ret;
4389
4390 console_list_lock();
4391 hlist_for_each_entry_safe(con, tmp, &console_list, node) {
4392 if (!(con->flags & CON_BOOT))
4393 continue;
4394
4395 /* Check addresses that might be used for enabled consoles. */
4396 if (init_section_intersects(con, sizeof(*con)) ||
4397 init_section_contains(con->write, 0) ||
4398 init_section_contains(con->read, 0) ||
4399 init_section_contains(con->device, 0) ||
4400 init_section_contains(con->unblank, 0) ||
4401 init_section_contains(con->data, 0)) {
4402 /*
4403 * Please, consider moving the reported consoles out
4404 * of the init section.
4405 */
4406 pr_warn("bootconsole [%s%d] uses init memory and must be disabled even before the real one is ready\n",
4407 con->name, con->index);
4408 unregister_console_locked(con);
4409 }
4410 }
4411 console_list_unlock();
4412
4413 ret = cpuhp_setup_state_nocalls(CPUHP_PRINTK_DEAD, "printk:dead", NULL,
4414 console_cpu_notify);
4415 WARN_ON(ret < 0);
4416 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "printk:online",
4417 console_cpu_notify, NULL);
4418 WARN_ON(ret < 0);
4419 printk_sysctl_init();
4420 return 0;
4421 }
4422 late_initcall(printk_late_init);
4423
4424 #if defined CONFIG_PRINTK
4425 /* If @con is specified, only wait for that console. Otherwise wait for all. */
__pr_flush(struct console * con,int timeout_ms,bool reset_on_progress)4426 static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress)
4427 {
4428 unsigned long timeout_jiffies = msecs_to_jiffies(timeout_ms);
4429 unsigned long remaining_jiffies = timeout_jiffies;
4430 struct console_flush_type ft;
4431 struct console *c;
4432 u64 last_diff = 0;
4433 u64 printk_seq;
4434 short flags;
4435 int cookie;
4436 u64 diff;
4437 u64 seq;
4438
4439 /* Sorry, pr_flush() will not work this early. */
4440 if (system_state < SYSTEM_SCHEDULING)
4441 return false;
4442
4443 might_sleep();
4444
4445 seq = prb_next_reserve_seq(prb);
4446
4447 /* Flush the consoles so that records up to @seq are printed. */
4448 printk_get_console_flush_type(&ft);
4449 if (ft.nbcon_atomic)
4450 nbcon_atomic_flush_pending();
4451 if (ft.legacy_direct) {
4452 console_lock();
4453 console_unlock();
4454 }
4455
4456 for (;;) {
4457 unsigned long begin_jiffies;
4458 unsigned long slept_jiffies;
4459
4460 diff = 0;
4461
4462 /*
4463 * Hold the console_lock to guarantee safe access to
4464 * console->seq. Releasing console_lock flushes more
4465 * records in case @seq is still not printed on all
4466 * usable consoles.
4467 *
4468 * Holding the console_lock is not necessary if there
4469 * are no legacy or boot consoles. However, such a
4470 * console could register at any time. Always hold the
4471 * console_lock as a precaution rather than
4472 * synchronizing against register_console().
4473 */
4474 console_lock();
4475
4476 cookie = console_srcu_read_lock();
4477 for_each_console_srcu(c) {
4478 if (con && con != c)
4479 continue;
4480
4481 flags = console_srcu_read_flags(c);
4482
4483 /*
4484 * If consoles are not usable, it cannot be expected
4485 * that they make forward progress, so only increment
4486 * @diff for usable consoles.
4487 */
4488 if (!console_is_usable(c, flags, true) &&
4489 !console_is_usable(c, flags, false)) {
4490 continue;
4491 }
4492
4493 if (flags & CON_NBCON) {
4494 printk_seq = nbcon_seq_read(c);
4495 } else {
4496 printk_seq = c->seq;
4497 }
4498
4499 if (printk_seq < seq)
4500 diff += seq - printk_seq;
4501 }
4502 console_srcu_read_unlock(cookie);
4503
4504 if (diff != last_diff && reset_on_progress)
4505 remaining_jiffies = timeout_jiffies;
4506
4507 console_unlock();
4508
4509 /* Note: @diff is 0 if there are no usable consoles. */
4510 if (diff == 0 || remaining_jiffies == 0)
4511 break;
4512
4513 /* msleep(1) might sleep much longer. Check time by jiffies. */
4514 begin_jiffies = jiffies;
4515 msleep(1);
4516 slept_jiffies = jiffies - begin_jiffies;
4517
4518 remaining_jiffies -= min(slept_jiffies, remaining_jiffies);
4519
4520 last_diff = diff;
4521 }
4522
4523 return (diff == 0);
4524 }
4525
4526 /**
4527 * pr_flush() - Wait for printing threads to catch up.
4528 *
4529 * @timeout_ms: The maximum time (in ms) to wait.
4530 * @reset_on_progress: Reset the timeout if forward progress is seen.
4531 *
4532 * A value of 0 for @timeout_ms means no waiting will occur. A value of -1
4533 * represents infinite waiting.
4534 *
4535 * If @reset_on_progress is true, the timeout will be reset whenever any
4536 * printer has been seen to make some forward progress.
4537 *
4538 * Context: Process context. May sleep while acquiring console lock.
4539 * Return: true if all usable printers are caught up.
4540 */
pr_flush(int timeout_ms,bool reset_on_progress)4541 bool pr_flush(int timeout_ms, bool reset_on_progress)
4542 {
4543 return __pr_flush(NULL, timeout_ms, reset_on_progress);
4544 }
4545
4546 /*
4547 * Delayed printk version, for scheduler-internal messages:
4548 */
4549 #define PRINTK_PENDING_WAKEUP 0x01
4550 #define PRINTK_PENDING_OUTPUT 0x02
4551
4552 static DEFINE_PER_CPU(int, printk_pending);
4553
wake_up_klogd_work_func(struct irq_work * irq_work)4554 static void wake_up_klogd_work_func(struct irq_work *irq_work)
4555 {
4556 int pending = this_cpu_xchg(printk_pending, 0);
4557
4558 if (pending & PRINTK_PENDING_OUTPUT) {
4559 if (force_legacy_kthread()) {
4560 if (printk_legacy_kthread)
4561 wake_up_interruptible(&legacy_wait);
4562 } else {
4563 if (console_trylock())
4564 console_unlock();
4565 }
4566 }
4567
4568 if (pending & PRINTK_PENDING_WAKEUP)
4569 wake_up_interruptible(&log_wait);
4570 }
4571
4572 static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) =
4573 IRQ_WORK_INIT_LAZY(wake_up_klogd_work_func);
4574
__wake_up_klogd(int val)4575 static void __wake_up_klogd(int val)
4576 {
4577 if (!printk_percpu_data_ready())
4578 return;
4579
4580 /*
4581 * It is not allowed to call this function when console irq_work
4582 * is blocked.
4583 */
4584 if (WARN_ON_ONCE(console_irqwork_blocked))
4585 return;
4586
4587 preempt_disable();
4588 /*
4589 * Guarantee any new records can be seen by tasks preparing to wait
4590 * before this context checks if the wait queue is empty.
4591 *
4592 * The full memory barrier within wq_has_sleeper() pairs with the full
4593 * memory barrier within set_current_state() of
4594 * prepare_to_wait_event(), which is called after ___wait_event() adds
4595 * the waiter but before it has checked the wait condition.
4596 *
4597 * This pairs with devkmsg_read:A and syslog_print:A.
4598 */
4599 if (wq_has_sleeper(&log_wait) || /* LMM(__wake_up_klogd:A) */
4600 (val & PRINTK_PENDING_OUTPUT)) {
4601 this_cpu_or(printk_pending, val);
4602 irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
4603 }
4604 preempt_enable();
4605 }
4606
4607 /**
4608 * wake_up_klogd - Wake kernel logging daemon
4609 *
4610 * Use this function when new records have been added to the ringbuffer
4611 * and the console printing of those records has already occurred or is
4612 * known to be handled by some other context. This function will only
4613 * wake the logging daemon.
4614 *
4615 * Context: Any context.
4616 */
wake_up_klogd(void)4617 void wake_up_klogd(void)
4618 {
4619 __wake_up_klogd(PRINTK_PENDING_WAKEUP);
4620 }
4621
4622 /**
4623 * defer_console_output - Wake kernel logging daemon and trigger
4624 * console printing in a deferred context
4625 *
4626 * Use this function when new records have been added to the ringbuffer,
4627 * this context is responsible for console printing those records, but
4628 * the current context is not allowed to perform the console printing.
4629 * Trigger an irq_work context to perform the console printing. This
4630 * function also wakes the logging daemon.
4631 *
4632 * Context: Any context.
4633 */
defer_console_output(void)4634 void defer_console_output(void)
4635 {
4636 /*
4637 * New messages may have been added directly to the ringbuffer
4638 * using vprintk_store(), so wake any waiters as well.
4639 */
4640 __wake_up_klogd(PRINTK_PENDING_WAKEUP | PRINTK_PENDING_OUTPUT);
4641 }
4642
4643 /**
4644 * printk_trigger_flush - Attempt to flush printk buffer to consoles.
4645 *
4646 * If possible, flush the printk buffer to all consoles in the caller's
4647 * context. If offloading is available, trigger deferred printing.
4648 *
4649 * This is best effort. Depending on the system state, console states,
4650 * and caller context, no actual flushing may result from this call.
4651 */
printk_trigger_flush(void)4652 void printk_trigger_flush(void)
4653 {
4654 struct console_flush_type ft;
4655
4656 printk_get_console_flush_type(&ft);
4657 if (ft.nbcon_atomic)
4658 nbcon_atomic_flush_pending();
4659 if (ft.nbcon_offload)
4660 nbcon_kthreads_wake();
4661 if (ft.legacy_direct) {
4662 if (console_trylock())
4663 console_unlock();
4664 }
4665 if (ft.legacy_offload)
4666 defer_console_output();
4667 }
4668
vprintk_deferred(const char * fmt,va_list args)4669 int vprintk_deferred(const char *fmt, va_list args)
4670 {
4671 return vprintk_emit(0, LOGLEVEL_SCHED, NULL, fmt, args);
4672 }
4673
_printk_deferred(const char * fmt,...)4674 int _printk_deferred(const char *fmt, ...)
4675 {
4676 va_list args;
4677 int r;
4678
4679 va_start(args, fmt);
4680 r = vprintk_deferred(fmt, args);
4681 va_end(args);
4682
4683 return r;
4684 }
4685
4686 /*
4687 * printk rate limiting, lifted from the networking subsystem.
4688 *
4689 * This enforces a rate limit: not more than 10 kernel messages
4690 * every 5s to make a denial-of-service attack impossible.
4691 */
4692 DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10);
4693
__printk_ratelimit(const char * func)4694 int __printk_ratelimit(const char *func)
4695 {
4696 return ___ratelimit(&printk_ratelimit_state, func);
4697 }
4698 EXPORT_SYMBOL(__printk_ratelimit);
4699
4700 /**
4701 * printk_timed_ratelimit - caller-controlled printk ratelimiting
4702 * @caller_jiffies: pointer to caller's state
4703 * @interval_msecs: minimum interval between prints
4704 *
4705 * printk_timed_ratelimit() returns true if more than @interval_msecs
4706 * milliseconds have elapsed since the last time printk_timed_ratelimit()
4707 * returned true.
4708 */
printk_timed_ratelimit(unsigned long * caller_jiffies,unsigned int interval_msecs)4709 bool printk_timed_ratelimit(unsigned long *caller_jiffies,
4710 unsigned int interval_msecs)
4711 {
4712 unsigned long elapsed = jiffies - *caller_jiffies;
4713
4714 if (*caller_jiffies && elapsed <= msecs_to_jiffies(interval_msecs))
4715 return false;
4716
4717 *caller_jiffies = jiffies;
4718 return true;
4719 }
4720 EXPORT_SYMBOL(printk_timed_ratelimit);
4721
4722 static DEFINE_SPINLOCK(dump_list_lock);
4723 static LIST_HEAD(dump_list);
4724
4725 /**
4726 * kmsg_dump_register - register a kernel log dumper.
4727 * @dumper: pointer to the kmsg_dumper structure
4728 *
4729 * Adds a kernel log dumper to the system. The dump callback in the
4730 * structure will be called when the kernel oopses or panics and must be
4731 * set. Returns zero on success and %-EINVAL or %-EBUSY otherwise.
4732 */
kmsg_dump_register(struct kmsg_dumper * dumper)4733 int kmsg_dump_register(struct kmsg_dumper *dumper)
4734 {
4735 unsigned long flags;
4736 int err = -EBUSY;
4737
4738 /* The dump callback needs to be set */
4739 if (!dumper->dump)
4740 return -EINVAL;
4741
4742 spin_lock_irqsave(&dump_list_lock, flags);
4743 /* Don't allow registering multiple times */
4744 if (!dumper->registered) {
4745 dumper->registered = 1;
4746 list_add_tail_rcu(&dumper->list, &dump_list);
4747 err = 0;
4748 }
4749 spin_unlock_irqrestore(&dump_list_lock, flags);
4750
4751 return err;
4752 }
4753 EXPORT_SYMBOL_GPL(kmsg_dump_register);
4754
4755 /**
4756 * kmsg_dump_unregister - unregister a kmsg dumper.
4757 * @dumper: pointer to the kmsg_dumper structure
4758 *
4759 * Removes a dump device from the system. Returns zero on success and
4760 * %-EINVAL otherwise.
4761 */
kmsg_dump_unregister(struct kmsg_dumper * dumper)4762 int kmsg_dump_unregister(struct kmsg_dumper *dumper)
4763 {
4764 unsigned long flags;
4765 int err = -EINVAL;
4766
4767 spin_lock_irqsave(&dump_list_lock, flags);
4768 if (dumper->registered) {
4769 dumper->registered = 0;
4770 list_del_rcu(&dumper->list);
4771 err = 0;
4772 }
4773 spin_unlock_irqrestore(&dump_list_lock, flags);
4774 synchronize_rcu();
4775
4776 return err;
4777 }
4778 EXPORT_SYMBOL_GPL(kmsg_dump_unregister);
4779
4780 static bool always_kmsg_dump;
4781 module_param_named(always_kmsg_dump, always_kmsg_dump, bool, S_IRUGO | S_IWUSR);
4782
kmsg_dump_reason_str(enum kmsg_dump_reason reason)4783 const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason)
4784 {
4785 switch (reason) {
4786 case KMSG_DUMP_PANIC:
4787 return "Panic";
4788 case KMSG_DUMP_OOPS:
4789 return "Oops";
4790 case KMSG_DUMP_EMERG:
4791 return "Emergency";
4792 case KMSG_DUMP_SHUTDOWN:
4793 return "Shutdown";
4794 default:
4795 return "Unknown";
4796 }
4797 }
4798 EXPORT_SYMBOL_GPL(kmsg_dump_reason_str);
4799
4800 /**
4801 * kmsg_dump_desc - dump kernel log to kernel message dumpers.
4802 * @reason: the reason (oops, panic etc) for dumping
4803 * @desc: a short string to describe what caused the panic or oops. Can be NULL
4804 * if no additional description is available.
4805 *
4806 * Call each of the registered dumper's dump() callback, which can
4807 * retrieve the kmsg records with kmsg_dump_get_line() or
4808 * kmsg_dump_get_buffer().
4809 */
kmsg_dump_desc(enum kmsg_dump_reason reason,const char * desc)4810 void kmsg_dump_desc(enum kmsg_dump_reason reason, const char *desc)
4811 {
4812 struct kmsg_dumper *dumper;
4813 struct kmsg_dump_detail detail = {
4814 .reason = reason,
4815 .description = desc};
4816
4817 rcu_read_lock();
4818 list_for_each_entry_rcu(dumper, &dump_list, list) {
4819 enum kmsg_dump_reason max_reason = dumper->max_reason;
4820
4821 /*
4822 * If client has not provided a specific max_reason, default
4823 * to KMSG_DUMP_OOPS, unless always_kmsg_dump was set.
4824 */
4825 if (max_reason == KMSG_DUMP_UNDEF) {
4826 max_reason = always_kmsg_dump ? KMSG_DUMP_MAX :
4827 KMSG_DUMP_OOPS;
4828 }
4829 if (reason > max_reason)
4830 continue;
4831
4832 /* invoke dumper which will iterate over records */
4833 dumper->dump(dumper, &detail);
4834 }
4835 rcu_read_unlock();
4836 }
4837
4838 /**
4839 * kmsg_dump_get_line - retrieve one kmsg log line
4840 * @iter: kmsg dump iterator
4841 * @syslog: include the "<4>" prefixes
4842 * @line: buffer to copy the line to
4843 * @size: maximum size of the buffer
4844 * @len: length of line placed into buffer
4845 *
4846 * Start at the beginning of the kmsg buffer, with the oldest kmsg
4847 * record, and copy one record into the provided buffer.
4848 *
4849 * Consecutive calls will return the next available record moving
4850 * towards the end of the buffer with the youngest messages.
4851 *
4852 * A return value of FALSE indicates that there are no more records to
4853 * read.
4854 */
kmsg_dump_get_line(struct kmsg_dump_iter * iter,bool syslog,char * line,size_t size,size_t * len)4855 bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog,
4856 char *line, size_t size, size_t *len)
4857 {
4858 u64 min_seq = latched_seq_read_nolock(&clear_seq);
4859 struct printk_info info;
4860 unsigned int line_count;
4861 struct printk_record r;
4862 size_t l = 0;
4863 bool ret = false;
4864
4865 if (iter->cur_seq < min_seq)
4866 iter->cur_seq = min_seq;
4867
4868 prb_rec_init_rd(&r, &info, line, size);
4869
4870 /* Read text or count text lines? */
4871 if (line) {
4872 if (!prb_read_valid(prb, iter->cur_seq, &r))
4873 goto out;
4874 l = record_print_text(&r, syslog, printk_time);
4875 } else {
4876 if (!prb_read_valid_info(prb, iter->cur_seq,
4877 &info, &line_count)) {
4878 goto out;
4879 }
4880 l = get_record_print_text_size(&info, line_count, syslog,
4881 printk_time);
4882
4883 }
4884
4885 iter->cur_seq = r.info->seq + 1;
4886 ret = true;
4887 out:
4888 if (len)
4889 *len = l;
4890 return ret;
4891 }
4892 EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
4893
4894 /**
4895 * kmsg_dump_get_buffer - copy kmsg log lines
4896 * @iter: kmsg dump iterator
4897 * @syslog: include the "<4>" prefixes
4898 * @buf: buffer to copy the line to
4899 * @size: maximum size of the buffer
4900 * @len_out: length of line placed into buffer
4901 *
4902 * Start at the end of the kmsg buffer and fill the provided buffer
4903 * with as many of the *youngest* kmsg records that fit into it.
4904 * If the buffer is large enough, all available kmsg records will be
4905 * copied with a single call.
4906 *
4907 * Consecutive calls will fill the buffer with the next block of
4908 * available older records, not including the earlier retrieved ones.
4909 *
4910 * A return value of FALSE indicates that there are no more records to
4911 * read.
4912 */
kmsg_dump_get_buffer(struct kmsg_dump_iter * iter,bool syslog,char * buf,size_t size,size_t * len_out)4913 bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog,
4914 char *buf, size_t size, size_t *len_out)
4915 {
4916 u64 min_seq = latched_seq_read_nolock(&clear_seq);
4917 struct printk_info info;
4918 struct printk_record r;
4919 u64 seq;
4920 u64 next_seq;
4921 size_t len = 0;
4922 bool ret = false;
4923 bool time = printk_time;
4924
4925 if (!buf || !size)
4926 goto out;
4927
4928 if (iter->cur_seq < min_seq)
4929 iter->cur_seq = min_seq;
4930
4931 if (prb_read_valid_info(prb, iter->cur_seq, &info, NULL)) {
4932 if (info.seq != iter->cur_seq) {
4933 /* messages are gone, move to first available one */
4934 iter->cur_seq = info.seq;
4935 }
4936 }
4937
4938 /* last entry */
4939 if (iter->cur_seq >= iter->next_seq)
4940 goto out;
4941
4942 /*
4943 * Find first record that fits, including all following records,
4944 * into the user-provided buffer for this dump. Pass in size-1
4945 * because this function (by way of record_print_text()) will
4946 * not write more than size-1 bytes of text into @buf.
4947 */
4948 seq = find_first_fitting_seq(iter->cur_seq, iter->next_seq,
4949 size - 1, syslog, time);
4950
4951 /*
4952 * Next kmsg_dump_get_buffer() invocation will dump block of
4953 * older records stored right before this one.
4954 */
4955 next_seq = seq;
4956
4957 prb_rec_init_rd(&r, &info, buf, size);
4958
4959 prb_for_each_record(seq, prb, seq, &r) {
4960 if (r.info->seq >= iter->next_seq)
4961 break;
4962
4963 len += record_print_text(&r, syslog, time);
4964
4965 /* Adjust record to store to remaining buffer space. */
4966 prb_rec_init_rd(&r, &info, buf + len, size - len);
4967 }
4968
4969 iter->next_seq = next_seq;
4970 ret = true;
4971 out:
4972 if (len_out)
4973 *len_out = len;
4974 return ret;
4975 }
4976 EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
4977
4978 /**
4979 * kmsg_dump_rewind - reset the iterator
4980 * @iter: kmsg dump iterator
4981 *
4982 * Reset the dumper's iterator so that kmsg_dump_get_line() and
4983 * kmsg_dump_get_buffer() can be called again and used multiple
4984 * times within the same dumper.dump() callback.
4985 */
kmsg_dump_rewind(struct kmsg_dump_iter * iter)4986 void kmsg_dump_rewind(struct kmsg_dump_iter *iter)
4987 {
4988 iter->cur_seq = latched_seq_read_nolock(&clear_seq);
4989 iter->next_seq = prb_next_seq(prb);
4990 }
4991 EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
4992
4993 /**
4994 * console_try_replay_all - try to replay kernel log on consoles
4995 *
4996 * Try to obtain lock on console subsystem and replay all
4997 * available records in printk buffer on the consoles.
4998 * Does nothing if lock is not obtained.
4999 *
5000 * Context: Any, except for NMI.
5001 */
console_try_replay_all(void)5002 void console_try_replay_all(void)
5003 {
5004 struct console_flush_type ft;
5005
5006 printk_get_console_flush_type(&ft);
5007 if (console_trylock()) {
5008 __console_rewind_all();
5009 if (ft.nbcon_atomic)
5010 nbcon_atomic_flush_pending();
5011 if (ft.nbcon_offload)
5012 nbcon_kthreads_wake();
5013 if (ft.legacy_offload)
5014 defer_console_output();
5015 /* Consoles are flushed as part of console_unlock(). */
5016 console_unlock();
5017 }
5018 }
5019 #endif
5020
5021 #ifdef CONFIG_SMP
5022 static atomic_t printk_cpu_sync_owner = ATOMIC_INIT(-1);
5023 static atomic_t printk_cpu_sync_nested = ATOMIC_INIT(0);
5024
is_printk_cpu_sync_owner(void)5025 bool is_printk_cpu_sync_owner(void)
5026 {
5027 return (atomic_read(&printk_cpu_sync_owner) == raw_smp_processor_id());
5028 }
5029
5030 /**
5031 * __printk_cpu_sync_wait() - Busy wait until the printk cpu-reentrant
5032 * spinning lock is not owned by any CPU.
5033 *
5034 * Context: Any context.
5035 */
__printk_cpu_sync_wait(void)5036 void __printk_cpu_sync_wait(void)
5037 {
5038 do {
5039 cpu_relax();
5040 } while (atomic_read(&printk_cpu_sync_owner) != -1);
5041 }
5042 EXPORT_SYMBOL(__printk_cpu_sync_wait);
5043
5044 /**
5045 * __printk_cpu_sync_try_get() - Try to acquire the printk cpu-reentrant
5046 * spinning lock.
5047 *
5048 * If no processor has the lock, the calling processor takes the lock and
5049 * becomes the owner. If the calling processor is already the owner of the
5050 * lock, this function succeeds immediately.
5051 *
5052 * Context: Any context. Expects interrupts to be disabled.
5053 * Return: 1 on success, otherwise 0.
5054 */
__printk_cpu_sync_try_get(void)5055 int __printk_cpu_sync_try_get(void)
5056 {
5057 int cpu;
5058 int old;
5059
5060 cpu = smp_processor_id();
5061
5062 /*
5063 * Guarantee loads and stores from this CPU when it is the lock owner
5064 * are _not_ visible to the previous lock owner. This pairs with
5065 * __printk_cpu_sync_put:B.
5066 *
5067 * Memory barrier involvement:
5068 *
5069 * If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B,
5070 * then __printk_cpu_sync_put:A can never read from
5071 * __printk_cpu_sync_try_get:B.
5072 *
5073 * Relies on:
5074 *
5075 * RELEASE from __printk_cpu_sync_put:A to __printk_cpu_sync_put:B
5076 * of the previous CPU
5077 * matching
5078 * ACQUIRE from __printk_cpu_sync_try_get:A to
5079 * __printk_cpu_sync_try_get:B of this CPU
5080 */
5081 old = atomic_cmpxchg_acquire(&printk_cpu_sync_owner, -1,
5082 cpu); /* LMM(__printk_cpu_sync_try_get:A) */
5083 if (old == -1) {
5084 /*
5085 * This CPU is now the owner and begins loading/storing
5086 * data: LMM(__printk_cpu_sync_try_get:B)
5087 */
5088 return 1;
5089
5090 } else if (old == cpu) {
5091 /* This CPU is already the owner. */
5092 atomic_inc(&printk_cpu_sync_nested);
5093 return 1;
5094 }
5095
5096 return 0;
5097 }
5098 EXPORT_SYMBOL(__printk_cpu_sync_try_get);
5099
5100 /**
5101 * __printk_cpu_sync_put() - Release the printk cpu-reentrant spinning lock.
5102 *
5103 * The calling processor must be the owner of the lock.
5104 *
5105 * Context: Any context. Expects interrupts to be disabled.
5106 */
__printk_cpu_sync_put(void)5107 void __printk_cpu_sync_put(void)
5108 {
5109 if (atomic_read(&printk_cpu_sync_nested)) {
5110 atomic_dec(&printk_cpu_sync_nested);
5111 return;
5112 }
5113
5114 /*
5115 * This CPU is finished loading/storing data:
5116 * LMM(__printk_cpu_sync_put:A)
5117 */
5118
5119 /*
5120 * Guarantee loads and stores from this CPU when it was the
5121 * lock owner are visible to the next lock owner. This pairs
5122 * with __printk_cpu_sync_try_get:A.
5123 *
5124 * Memory barrier involvement:
5125 *
5126 * If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B,
5127 * then __printk_cpu_sync_try_get:B reads from __printk_cpu_sync_put:A.
5128 *
5129 * Relies on:
5130 *
5131 * RELEASE from __printk_cpu_sync_put:A to __printk_cpu_sync_put:B
5132 * of this CPU
5133 * matching
5134 * ACQUIRE from __printk_cpu_sync_try_get:A to
5135 * __printk_cpu_sync_try_get:B of the next CPU
5136 */
5137 atomic_set_release(&printk_cpu_sync_owner,
5138 -1); /* LMM(__printk_cpu_sync_put:B) */
5139 }
5140 EXPORT_SYMBOL(__printk_cpu_sync_put);
5141 #endif /* CONFIG_SMP */
5142