1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/kernel/printk.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
7 * Modified to make sys_syslog() more flexible: added commands to
8 * return the last 4k of kernel messages, regardless of whether
9 * they've been read or not. Added option to suppress kernel printk's
10 * to the console. Added hook for sending the console messages
11 * elsewhere, in preparation for a serial line console (someday).
12 * Ted Ts'o, 2/11/93.
13 * Modified for sysctl support, 1/8/97, Chris Horn.
14 * Fixed SMP synchronization, 08/08/99, Manfred Spraul
15 * manfred@colorfullife.com
16 * Rewrote bits to get rid of console_lock
17 * 01Mar01 Andrew Morton
18 */
19
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22 #include <linux/kernel.h>
23 #include <linux/mm.h>
24 #include <linux/tty.h>
25 #include <linux/tty_driver.h>
26 #include <linux/console.h>
27 #include <linux/init.h>
28 #include <linux/jiffies.h>
29 #include <linux/nmi.h>
30 #include <linux/module.h>
31 #include <linux/moduleparam.h>
32 #include <linux/delay.h>
33 #include <linux/smp.h>
34 #include <linux/security.h>
35 #include <linux/memblock.h>
36 #include <linux/syscalls.h>
37 #include <linux/syscore_ops.h>
38 #include <linux/vmcore_info.h>
39 #include <linux/ratelimit.h>
40 #include <linux/kmsg_dump.h>
41 #include <linux/syslog.h>
42 #include <linux/cpu.h>
43 #include <linux/rculist.h>
44 #include <linux/poll.h>
45 #include <linux/irq_work.h>
46 #include <linux/ctype.h>
47 #include <linux/uio.h>
48 #include <linux/sched/clock.h>
49 #include <linux/sched/debug.h>
50 #include <linux/sched/task_stack.h>
51 #include <linux/panic.h>
52
53 #include <linux/uaccess.h>
54 #include <asm/sections.h>
55
56 #include <trace/events/initcall.h>
57 #define CREATE_TRACE_POINTS
58 #include <trace/events/printk.h>
59
60 #include "printk_ringbuffer.h"
61 #include "console_cmdline.h"
62 #include "braille.h"
63 #include "internal.h"
64
65 int console_printk[4] = {
66 CONSOLE_LOGLEVEL_DEFAULT, /* console_loglevel */
67 MESSAGE_LOGLEVEL_DEFAULT, /* default_message_loglevel */
68 CONSOLE_LOGLEVEL_MIN, /* minimum_console_loglevel */
69 CONSOLE_LOGLEVEL_DEFAULT, /* default_console_loglevel */
70 };
71 EXPORT_SYMBOL_GPL(console_printk);
72
73 atomic_t ignore_console_lock_warning __read_mostly = ATOMIC_INIT(0);
74 EXPORT_SYMBOL(ignore_console_lock_warning);
75
76 EXPORT_TRACEPOINT_SYMBOL_GPL(console);
77
78 /*
79 * Low level drivers may need that to know if they can schedule in
80 * their unblank() callback or not. So let's export it.
81 */
82 int oops_in_progress;
83 EXPORT_SYMBOL(oops_in_progress);
84
85 /*
86 * console_mutex protects console_list updates and console->flags updates.
87 * The flags are synchronized only for consoles that are registered, i.e.
88 * accessible via the console list.
89 */
90 static DEFINE_MUTEX(console_mutex);
91
92 /*
93 * console_sem protects updates to console->seq
94 * and also provides serialization for console printing.
95 */
96 static DEFINE_SEMAPHORE(console_sem, 1);
97 HLIST_HEAD(console_list);
98 EXPORT_SYMBOL_GPL(console_list);
99 DEFINE_STATIC_SRCU(console_srcu);
100
101 /*
102 * System may need to suppress printk message under certain
103 * circumstances, like after kernel panic happens.
104 */
105 int __read_mostly suppress_printk;
106
107 #ifdef CONFIG_LOCKDEP
108 static struct lockdep_map console_lock_dep_map = {
109 .name = "console_lock"
110 };
111
lockdep_assert_console_list_lock_held(void)112 void lockdep_assert_console_list_lock_held(void)
113 {
114 lockdep_assert_held(&console_mutex);
115 }
116 EXPORT_SYMBOL(lockdep_assert_console_list_lock_held);
117 #endif
118
119 #ifdef CONFIG_DEBUG_LOCK_ALLOC
console_srcu_read_lock_is_held(void)120 bool console_srcu_read_lock_is_held(void)
121 {
122 return srcu_read_lock_held(&console_srcu);
123 }
124 EXPORT_SYMBOL(console_srcu_read_lock_is_held);
125 #endif
126
127 enum devkmsg_log_bits {
128 __DEVKMSG_LOG_BIT_ON = 0,
129 __DEVKMSG_LOG_BIT_OFF,
130 __DEVKMSG_LOG_BIT_LOCK,
131 };
132
133 enum devkmsg_log_masks {
134 DEVKMSG_LOG_MASK_ON = BIT(__DEVKMSG_LOG_BIT_ON),
135 DEVKMSG_LOG_MASK_OFF = BIT(__DEVKMSG_LOG_BIT_OFF),
136 DEVKMSG_LOG_MASK_LOCK = BIT(__DEVKMSG_LOG_BIT_LOCK),
137 };
138
139 /* Keep both the 'on' and 'off' bits clear, i.e. ratelimit by default: */
140 #define DEVKMSG_LOG_MASK_DEFAULT 0
141
142 static unsigned int __read_mostly devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT;
143
__control_devkmsg(char * str)144 static int __control_devkmsg(char *str)
145 {
146 size_t len;
147
148 if (!str)
149 return -EINVAL;
150
151 len = str_has_prefix(str, "on");
152 if (len) {
153 devkmsg_log = DEVKMSG_LOG_MASK_ON;
154 return len;
155 }
156
157 len = str_has_prefix(str, "off");
158 if (len) {
159 devkmsg_log = DEVKMSG_LOG_MASK_OFF;
160 return len;
161 }
162
163 len = str_has_prefix(str, "ratelimit");
164 if (len) {
165 devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT;
166 return len;
167 }
168
169 return -EINVAL;
170 }
171
control_devkmsg(char * str)172 static int __init control_devkmsg(char *str)
173 {
174 if (__control_devkmsg(str) < 0) {
175 pr_warn("printk.devkmsg: bad option string '%s'\n", str);
176 return 1;
177 }
178
179 /*
180 * Set sysctl string accordingly:
181 */
182 if (devkmsg_log == DEVKMSG_LOG_MASK_ON)
183 strscpy(devkmsg_log_str, "on");
184 else if (devkmsg_log == DEVKMSG_LOG_MASK_OFF)
185 strscpy(devkmsg_log_str, "off");
186 /* else "ratelimit" which is set by default. */
187
188 /*
189 * Sysctl cannot change it anymore. The kernel command line setting of
190 * this parameter is to force the setting to be permanent throughout the
191 * runtime of the system. This is a precation measure against userspace
192 * trying to be a smarta** and attempting to change it up on us.
193 */
194 devkmsg_log |= DEVKMSG_LOG_MASK_LOCK;
195
196 return 1;
197 }
198 __setup("printk.devkmsg=", control_devkmsg);
199
200 char devkmsg_log_str[DEVKMSG_STR_MAX_SIZE] = "ratelimit";
201 #if defined(CONFIG_PRINTK) && defined(CONFIG_SYSCTL)
devkmsg_sysctl_set_loglvl(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)202 int devkmsg_sysctl_set_loglvl(const struct ctl_table *table, int write,
203 void *buffer, size_t *lenp, loff_t *ppos)
204 {
205 char old_str[DEVKMSG_STR_MAX_SIZE];
206 unsigned int old;
207 int err;
208
209 if (write) {
210 if (devkmsg_log & DEVKMSG_LOG_MASK_LOCK)
211 return -EINVAL;
212
213 old = devkmsg_log;
214 strscpy(old_str, devkmsg_log_str);
215 }
216
217 err = proc_dostring(table, write, buffer, lenp, ppos);
218 if (err)
219 return err;
220
221 if (write) {
222 err = __control_devkmsg(devkmsg_log_str);
223
224 /*
225 * Do not accept an unknown string OR a known string with
226 * trailing crap...
227 */
228 if (err < 0 || (err + 1 != *lenp)) {
229
230 /* ... and restore old setting. */
231 devkmsg_log = old;
232 strscpy(devkmsg_log_str, old_str);
233
234 return -EINVAL;
235 }
236 }
237
238 return 0;
239 }
240 #endif /* CONFIG_PRINTK && CONFIG_SYSCTL */
241
242 /**
243 * console_list_lock - Lock the console list
244 *
245 * For console list or console->flags updates
246 */
console_list_lock(void)247 void console_list_lock(void)
248 {
249 /*
250 * In unregister_console() and console_force_preferred_locked(),
251 * synchronize_srcu() is called with the console_list_lock held.
252 * Therefore it is not allowed that the console_list_lock is taken
253 * with the srcu_lock held.
254 *
255 * Detecting if this context is really in the read-side critical
256 * section is only possible if the appropriate debug options are
257 * enabled.
258 */
259 WARN_ON_ONCE(debug_lockdep_rcu_enabled() &&
260 srcu_read_lock_held(&console_srcu));
261
262 mutex_lock(&console_mutex);
263 }
264 EXPORT_SYMBOL(console_list_lock);
265
266 /**
267 * console_list_unlock - Unlock the console list
268 *
269 * Counterpart to console_list_lock()
270 */
console_list_unlock(void)271 void console_list_unlock(void)
272 {
273 mutex_unlock(&console_mutex);
274 }
275 EXPORT_SYMBOL(console_list_unlock);
276
277 /**
278 * console_srcu_read_lock - Register a new reader for the
279 * SRCU-protected console list
280 *
281 * Use for_each_console_srcu() to iterate the console list
282 *
283 * Context: Any context.
284 * Return: A cookie to pass to console_srcu_read_unlock().
285 */
console_srcu_read_lock(void)286 int console_srcu_read_lock(void)
287 __acquires(&console_srcu)
288 {
289 return srcu_read_lock_nmisafe(&console_srcu);
290 }
291 EXPORT_SYMBOL(console_srcu_read_lock);
292
293 /**
294 * console_srcu_read_unlock - Unregister an old reader from
295 * the SRCU-protected console list
296 * @cookie: cookie returned from console_srcu_read_lock()
297 *
298 * Counterpart to console_srcu_read_lock()
299 */
console_srcu_read_unlock(int cookie)300 void console_srcu_read_unlock(int cookie)
301 __releases(&console_srcu)
302 {
303 srcu_read_unlock_nmisafe(&console_srcu, cookie);
304 }
305 EXPORT_SYMBOL(console_srcu_read_unlock);
306
307 /*
308 * Helper macros to handle lockdep when locking/unlocking console_sem. We use
309 * macros instead of functions so that _RET_IP_ contains useful information.
310 */
311 #define down_console_sem() do { \
312 down(&console_sem);\
313 mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_);\
314 } while (0)
315
__down_trylock_console_sem(unsigned long ip)316 static int __down_trylock_console_sem(unsigned long ip)
317 {
318 int lock_failed;
319 unsigned long flags;
320
321 /*
322 * Here and in __up_console_sem() we need to be in safe mode,
323 * because spindump/WARN/etc from under console ->lock will
324 * deadlock in printk()->down_trylock_console_sem() otherwise.
325 */
326 printk_safe_enter_irqsave(flags);
327 lock_failed = down_trylock(&console_sem);
328 printk_safe_exit_irqrestore(flags);
329
330 if (lock_failed)
331 return 1;
332 mutex_acquire(&console_lock_dep_map, 0, 1, ip);
333 return 0;
334 }
335 #define down_trylock_console_sem() __down_trylock_console_sem(_RET_IP_)
336
__up_console_sem(unsigned long ip)337 static void __up_console_sem(unsigned long ip)
338 {
339 unsigned long flags;
340
341 mutex_release(&console_lock_dep_map, ip);
342
343 printk_safe_enter_irqsave(flags);
344 up(&console_sem);
345 printk_safe_exit_irqrestore(flags);
346 }
347 #define up_console_sem() __up_console_sem(_RET_IP_)
348
349 /*
350 * This is used for debugging the mess that is the VT code by
351 * keeping track if we have the console semaphore held. It's
352 * definitely not the perfect debug tool (we don't know if _WE_
353 * hold it and are racing, but it helps tracking those weird code
354 * paths in the console code where we end up in places I want
355 * locked without the console semaphore held).
356 */
357 static int console_locked;
358
359 /*
360 * Array of consoles built from command line options (console=)
361 */
362
363 #define MAX_CMDLINECONSOLES 8
364
365 static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES];
366
367 static int preferred_console = -1;
368 int console_set_on_cmdline;
369 EXPORT_SYMBOL(console_set_on_cmdline);
370
371 /* Flag: console code may call schedule() */
372 static int console_may_schedule;
373
374 enum con_msg_format_flags {
375 MSG_FORMAT_DEFAULT = 0,
376 MSG_FORMAT_SYSLOG = (1 << 0),
377 };
378
379 static int console_msg_format = MSG_FORMAT_DEFAULT;
380
381 /*
382 * The printk log buffer consists of a sequenced collection of records, each
383 * containing variable length message text. Every record also contains its
384 * own meta-data (@info).
385 *
386 * Every record meta-data carries the timestamp in microseconds, as well as
387 * the standard userspace syslog level and syslog facility. The usual kernel
388 * messages use LOG_KERN; userspace-injected messages always carry a matching
389 * syslog facility, by default LOG_USER. The origin of every message can be
390 * reliably determined that way.
391 *
392 * The human readable log message of a record is available in @text, the
393 * length of the message text in @text_len. The stored message is not
394 * terminated.
395 *
396 * Optionally, a record can carry a dictionary of properties (key/value
397 * pairs), to provide userspace with a machine-readable message context.
398 *
399 * Examples for well-defined, commonly used property names are:
400 * DEVICE=b12:8 device identifier
401 * b12:8 block dev_t
402 * c127:3 char dev_t
403 * n8 netdev ifindex
404 * +sound:card0 subsystem:devname
405 * SUBSYSTEM=pci driver-core subsystem name
406 *
407 * Valid characters in property names are [a-zA-Z0-9.-_]. Property names
408 * and values are terminated by a '\0' character.
409 *
410 * Example of record values:
411 * record.text_buf = "it's a line" (unterminated)
412 * record.info.seq = 56
413 * record.info.ts_nsec = 36863
414 * record.info.text_len = 11
415 * record.info.facility = 0 (LOG_KERN)
416 * record.info.flags = 0
417 * record.info.level = 3 (LOG_ERR)
418 * record.info.caller_id = 299 (task 299)
419 * record.info.dev_info.subsystem = "pci" (terminated)
420 * record.info.dev_info.device = "+pci:0000:00:01.0" (terminated)
421 *
422 * The 'struct printk_info' buffer must never be directly exported to
423 * userspace, it is a kernel-private implementation detail that might
424 * need to be changed in the future, when the requirements change.
425 *
426 * /dev/kmsg exports the structured data in the following line format:
427 * "<level>,<sequnum>,<timestamp>,<contflag>[,additional_values, ... ];<message text>\n"
428 *
429 * Users of the export format should ignore possible additional values
430 * separated by ',', and find the message after the ';' character.
431 *
432 * The optional key/value pairs are attached as continuation lines starting
433 * with a space character and terminated by a newline. All possible
434 * non-prinatable characters are escaped in the "\xff" notation.
435 */
436
437 /* syslog_lock protects syslog_* variables and write access to clear_seq. */
438 static DEFINE_MUTEX(syslog_lock);
439
440 /*
441 * Specifies if a legacy console is registered. If legacy consoles are
442 * present, it is necessary to perform the console lock/unlock dance
443 * whenever console flushing should occur.
444 */
445 bool have_legacy_console;
446
447 /*
448 * Specifies if an nbcon console is registered. If nbcon consoles are present,
449 * synchronous printing of legacy consoles will not occur during panic until
450 * the backtrace has been stored to the ringbuffer.
451 */
452 bool have_nbcon_console;
453
454 /*
455 * Specifies if a boot console is registered. If boot consoles are present,
456 * nbcon consoles cannot print simultaneously and must be synchronized by
457 * the console lock. This is because boot consoles and nbcon consoles may
458 * have mapped the same hardware.
459 */
460 bool have_boot_console;
461
462 /* See printk_legacy_allow_panic_sync() for details. */
463 bool legacy_allow_panic_sync;
464
465 #ifdef CONFIG_PRINTK
466 DECLARE_WAIT_QUEUE_HEAD(log_wait);
467 static DECLARE_WAIT_QUEUE_HEAD(legacy_wait);
468 /* All 3 protected by @syslog_lock. */
469 /* the next printk record to read by syslog(READ) or /proc/kmsg */
470 static u64 syslog_seq;
471 static size_t syslog_partial;
472 static bool syslog_time;
473
474 /* True when _all_ printer threads are available for printing. */
475 bool printk_kthreads_running;
476
477 struct latched_seq {
478 seqcount_latch_t latch;
479 u64 val[2];
480 };
481
482 /*
483 * The next printk record to read after the last 'clear' command. There are
484 * two copies (updated with seqcount_latch) so that reads can locklessly
485 * access a valid value. Writers are synchronized by @syslog_lock.
486 */
487 static struct latched_seq clear_seq = {
488 .latch = SEQCNT_LATCH_ZERO(clear_seq.latch),
489 .val[0] = 0,
490 .val[1] = 0,
491 };
492
493 #define LOG_LEVEL(v) ((v) & 0x07)
494 #define LOG_FACILITY(v) ((v) >> 3 & 0xff)
495
496 /* record buffer */
497 #define LOG_ALIGN __alignof__(unsigned long)
498 #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
499 #define LOG_BUF_LEN_MAX ((u32)1 << 31)
500 static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
501 static char *log_buf = __log_buf;
502 static u32 log_buf_len = __LOG_BUF_LEN;
503
504 /*
505 * Define the average message size. This only affects the number of
506 * descriptors that will be available. Underestimating is better than
507 * overestimating (too many available descriptors is better than not enough).
508 */
509 #define PRB_AVGBITS 5 /* 32 character average length */
510
511 #if CONFIG_LOG_BUF_SHIFT <= PRB_AVGBITS
512 #error CONFIG_LOG_BUF_SHIFT value too small.
513 #endif
514 _DEFINE_PRINTKRB(printk_rb_static, CONFIG_LOG_BUF_SHIFT - PRB_AVGBITS,
515 PRB_AVGBITS, &__log_buf[0]);
516
517 static struct printk_ringbuffer printk_rb_dynamic;
518
519 struct printk_ringbuffer *prb = &printk_rb_static;
520
521 /*
522 * We cannot access per-CPU data (e.g. per-CPU flush irq_work) before
523 * per_cpu_areas are initialised. This variable is set to true when
524 * it's safe to access per-CPU data.
525 */
526 static bool __printk_percpu_data_ready __ro_after_init;
527
printk_percpu_data_ready(void)528 bool printk_percpu_data_ready(void)
529 {
530 return __printk_percpu_data_ready;
531 }
532
533 /* Must be called under syslog_lock. */
latched_seq_write(struct latched_seq * ls,u64 val)534 static void latched_seq_write(struct latched_seq *ls, u64 val)
535 {
536 write_seqcount_latch_begin(&ls->latch);
537 ls->val[0] = val;
538 write_seqcount_latch(&ls->latch);
539 ls->val[1] = val;
540 write_seqcount_latch_end(&ls->latch);
541 }
542
543 /* Can be called from any context. */
latched_seq_read_nolock(struct latched_seq * ls)544 static u64 latched_seq_read_nolock(struct latched_seq *ls)
545 {
546 unsigned int seq;
547 unsigned int idx;
548 u64 val;
549
550 do {
551 seq = read_seqcount_latch(&ls->latch);
552 idx = seq & 0x1;
553 val = ls->val[idx];
554 } while (read_seqcount_latch_retry(&ls->latch, seq));
555
556 return val;
557 }
558
559 /* Return log buffer address */
log_buf_addr_get(void)560 char *log_buf_addr_get(void)
561 {
562 return log_buf;
563 }
564
565 /* Return log buffer size */
log_buf_len_get(void)566 u32 log_buf_len_get(void)
567 {
568 return log_buf_len;
569 }
570
571 /*
572 * Define how much of the log buffer we could take at maximum. The value
573 * must be greater than two. Note that only half of the buffer is available
574 * when the index points to the middle.
575 */
576 #define MAX_LOG_TAKE_PART 4
577 static const char trunc_msg[] = "<truncated>";
578
truncate_msg(u16 * text_len,u16 * trunc_msg_len)579 static void truncate_msg(u16 *text_len, u16 *trunc_msg_len)
580 {
581 /*
582 * The message should not take the whole buffer. Otherwise, it might
583 * get removed too soon.
584 */
585 u32 max_text_len = log_buf_len / MAX_LOG_TAKE_PART;
586
587 if (*text_len > max_text_len)
588 *text_len = max_text_len;
589
590 /* enable the warning message (if there is room) */
591 *trunc_msg_len = strlen(trunc_msg);
592 if (*text_len >= *trunc_msg_len)
593 *text_len -= *trunc_msg_len;
594 else
595 *trunc_msg_len = 0;
596 }
597
598 int dmesg_restrict = IS_ENABLED(CONFIG_SECURITY_DMESG_RESTRICT);
599
syslog_action_restricted(int type)600 static int syslog_action_restricted(int type)
601 {
602 if (dmesg_restrict)
603 return 1;
604 /*
605 * Unless restricted, we allow "read all" and "get buffer size"
606 * for everybody.
607 */
608 return type != SYSLOG_ACTION_READ_ALL &&
609 type != SYSLOG_ACTION_SIZE_BUFFER;
610 }
611
check_syslog_permissions(int type,int source)612 static int check_syslog_permissions(int type, int source)
613 {
614 /*
615 * If this is from /proc/kmsg and we've already opened it, then we've
616 * already done the capabilities checks at open time.
617 */
618 if (source == SYSLOG_FROM_PROC && type != SYSLOG_ACTION_OPEN)
619 goto ok;
620
621 if (syslog_action_restricted(type)) {
622 if (capable(CAP_SYSLOG))
623 goto ok;
624 return -EPERM;
625 }
626 ok:
627 return security_syslog(type);
628 }
629
append_char(char ** pp,char * e,char c)630 static void append_char(char **pp, char *e, char c)
631 {
632 if (*pp < e)
633 *(*pp)++ = c;
634 }
635
info_print_ext_header(char * buf,size_t size,struct printk_info * info)636 static ssize_t info_print_ext_header(char *buf, size_t size,
637 struct printk_info *info)
638 {
639 u64 ts_usec = info->ts_nsec;
640 char caller[20];
641 #ifdef CONFIG_PRINTK_CALLER
642 u32 id = info->caller_id;
643
644 snprintf(caller, sizeof(caller), ",caller=%c%u",
645 id & 0x80000000 ? 'C' : 'T', id & ~0x80000000);
646 #else
647 caller[0] = '\0';
648 #endif
649
650 do_div(ts_usec, 1000);
651
652 return scnprintf(buf, size, "%u,%llu,%llu,%c%s;",
653 (info->facility << 3) | info->level, info->seq,
654 ts_usec, info->flags & LOG_CONT ? 'c' : '-', caller);
655 }
656
msg_add_ext_text(char * buf,size_t size,const char * text,size_t text_len,unsigned char endc)657 static ssize_t msg_add_ext_text(char *buf, size_t size,
658 const char *text, size_t text_len,
659 unsigned char endc)
660 {
661 char *p = buf, *e = buf + size;
662 size_t i;
663
664 /* escape non-printable characters */
665 for (i = 0; i < text_len; i++) {
666 unsigned char c = text[i];
667
668 if (c < ' ' || c >= 127 || c == '\\')
669 p += scnprintf(p, e - p, "\\x%02x", c);
670 else
671 append_char(&p, e, c);
672 }
673 append_char(&p, e, endc);
674
675 return p - buf;
676 }
677
msg_add_dict_text(char * buf,size_t size,const char * key,const char * val)678 static ssize_t msg_add_dict_text(char *buf, size_t size,
679 const char *key, const char *val)
680 {
681 size_t val_len = strlen(val);
682 ssize_t len;
683
684 if (!val_len)
685 return 0;
686
687 len = msg_add_ext_text(buf, size, "", 0, ' '); /* dict prefix */
688 len += msg_add_ext_text(buf + len, size - len, key, strlen(key), '=');
689 len += msg_add_ext_text(buf + len, size - len, val, val_len, '\n');
690
691 return len;
692 }
693
msg_print_ext_body(char * buf,size_t size,char * text,size_t text_len,struct dev_printk_info * dev_info)694 static ssize_t msg_print_ext_body(char *buf, size_t size,
695 char *text, size_t text_len,
696 struct dev_printk_info *dev_info)
697 {
698 ssize_t len;
699
700 len = msg_add_ext_text(buf, size, text, text_len, '\n');
701
702 if (!dev_info)
703 goto out;
704
705 len += msg_add_dict_text(buf + len, size - len, "SUBSYSTEM",
706 dev_info->subsystem);
707 len += msg_add_dict_text(buf + len, size - len, "DEVICE",
708 dev_info->device);
709 out:
710 return len;
711 }
712
713 /* /dev/kmsg - userspace message inject/listen interface */
714 struct devkmsg_user {
715 atomic64_t seq;
716 struct ratelimit_state rs;
717 struct mutex lock;
718 struct printk_buffers pbufs;
719 };
720
721 static __printf(3, 4) __cold
devkmsg_emit(int facility,int level,const char * fmt,...)722 int devkmsg_emit(int facility, int level, const char *fmt, ...)
723 {
724 va_list args;
725 int r;
726
727 va_start(args, fmt);
728 r = vprintk_emit(facility, level, NULL, fmt, args);
729 va_end(args);
730
731 return r;
732 }
733
devkmsg_write(struct kiocb * iocb,struct iov_iter * from)734 static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from)
735 {
736 char *buf, *line;
737 int level = default_message_loglevel;
738 int facility = 1; /* LOG_USER */
739 struct file *file = iocb->ki_filp;
740 struct devkmsg_user *user = file->private_data;
741 size_t len = iov_iter_count(from);
742 ssize_t ret = len;
743
744 if (len > PRINTKRB_RECORD_MAX)
745 return -EINVAL;
746
747 /* Ignore when user logging is disabled. */
748 if (devkmsg_log & DEVKMSG_LOG_MASK_OFF)
749 return len;
750
751 /* Ratelimit when not explicitly enabled. */
752 if (!(devkmsg_log & DEVKMSG_LOG_MASK_ON)) {
753 if (!___ratelimit(&user->rs, current->comm))
754 return ret;
755 }
756
757 buf = kmalloc(len+1, GFP_KERNEL);
758 if (buf == NULL)
759 return -ENOMEM;
760
761 buf[len] = '\0';
762 if (!copy_from_iter_full(buf, len, from)) {
763 kfree(buf);
764 return -EFAULT;
765 }
766
767 /*
768 * Extract and skip the syslog prefix <[0-9]*>. Coming from userspace
769 * the decimal value represents 32bit, the lower 3 bit are the log
770 * level, the rest are the log facility.
771 *
772 * If no prefix or no userspace facility is specified, we
773 * enforce LOG_USER, to be able to reliably distinguish
774 * kernel-generated messages from userspace-injected ones.
775 */
776 line = buf;
777 if (line[0] == '<') {
778 char *endp = NULL;
779 unsigned int u;
780
781 u = simple_strtoul(line + 1, &endp, 10);
782 if (endp && endp[0] == '>') {
783 level = LOG_LEVEL(u);
784 if (LOG_FACILITY(u) != 0)
785 facility = LOG_FACILITY(u);
786 endp++;
787 line = endp;
788 }
789 }
790
791 devkmsg_emit(facility, level, "%s", line);
792 kfree(buf);
793 return ret;
794 }
795
devkmsg_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)796 static ssize_t devkmsg_read(struct file *file, char __user *buf,
797 size_t count, loff_t *ppos)
798 {
799 struct devkmsg_user *user = file->private_data;
800 char *outbuf = &user->pbufs.outbuf[0];
801 struct printk_message pmsg = {
802 .pbufs = &user->pbufs,
803 };
804 ssize_t ret;
805
806 ret = mutex_lock_interruptible(&user->lock);
807 if (ret)
808 return ret;
809
810 if (!printk_get_next_message(&pmsg, atomic64_read(&user->seq), true, false)) {
811 if (file->f_flags & O_NONBLOCK) {
812 ret = -EAGAIN;
813 goto out;
814 }
815
816 /*
817 * Guarantee this task is visible on the waitqueue before
818 * checking the wake condition.
819 *
820 * The full memory barrier within set_current_state() of
821 * prepare_to_wait_event() pairs with the full memory barrier
822 * within wq_has_sleeper().
823 *
824 * This pairs with __wake_up_klogd:A.
825 */
826 ret = wait_event_interruptible(log_wait,
827 printk_get_next_message(&pmsg, atomic64_read(&user->seq), true,
828 false)); /* LMM(devkmsg_read:A) */
829 if (ret)
830 goto out;
831 }
832
833 if (pmsg.dropped) {
834 /* our last seen message is gone, return error and reset */
835 atomic64_set(&user->seq, pmsg.seq);
836 ret = -EPIPE;
837 goto out;
838 }
839
840 atomic64_set(&user->seq, pmsg.seq + 1);
841
842 if (pmsg.outbuf_len > count) {
843 ret = -EINVAL;
844 goto out;
845 }
846
847 if (copy_to_user(buf, outbuf, pmsg.outbuf_len)) {
848 ret = -EFAULT;
849 goto out;
850 }
851 ret = pmsg.outbuf_len;
852 out:
853 mutex_unlock(&user->lock);
854 return ret;
855 }
856
857 /*
858 * Be careful when modifying this function!!!
859 *
860 * Only few operations are supported because the device works only with the
861 * entire variable length messages (records). Non-standard values are
862 * returned in the other cases and has been this way for quite some time.
863 * User space applications might depend on this behavior.
864 */
devkmsg_llseek(struct file * file,loff_t offset,int whence)865 static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
866 {
867 struct devkmsg_user *user = file->private_data;
868 loff_t ret = 0;
869
870 if (offset)
871 return -ESPIPE;
872
873 switch (whence) {
874 case SEEK_SET:
875 /* the first record */
876 atomic64_set(&user->seq, prb_first_valid_seq(prb));
877 break;
878 case SEEK_DATA:
879 /*
880 * The first record after the last SYSLOG_ACTION_CLEAR,
881 * like issued by 'dmesg -c'. Reading /dev/kmsg itself
882 * changes no global state, and does not clear anything.
883 */
884 atomic64_set(&user->seq, latched_seq_read_nolock(&clear_seq));
885 break;
886 case SEEK_END:
887 /* after the last record */
888 atomic64_set(&user->seq, prb_next_seq(prb));
889 break;
890 default:
891 ret = -EINVAL;
892 }
893 return ret;
894 }
895
devkmsg_poll(struct file * file,poll_table * wait)896 static __poll_t devkmsg_poll(struct file *file, poll_table *wait)
897 {
898 struct devkmsg_user *user = file->private_data;
899 struct printk_info info;
900 __poll_t ret = 0;
901
902 poll_wait(file, &log_wait, wait);
903
904 if (prb_read_valid_info(prb, atomic64_read(&user->seq), &info, NULL)) {
905 /* return error when data has vanished underneath us */
906 if (info.seq != atomic64_read(&user->seq))
907 ret = EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI;
908 else
909 ret = EPOLLIN|EPOLLRDNORM;
910 }
911
912 return ret;
913 }
914
devkmsg_open(struct inode * inode,struct file * file)915 static int devkmsg_open(struct inode *inode, struct file *file)
916 {
917 struct devkmsg_user *user;
918 int err;
919
920 if (devkmsg_log & DEVKMSG_LOG_MASK_OFF)
921 return -EPERM;
922
923 /* write-only does not need any file context */
924 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
925 err = check_syslog_permissions(SYSLOG_ACTION_READ_ALL,
926 SYSLOG_FROM_READER);
927 if (err)
928 return err;
929 }
930
931 user = kvmalloc(sizeof(struct devkmsg_user), GFP_KERNEL);
932 if (!user)
933 return -ENOMEM;
934
935 ratelimit_default_init(&user->rs);
936 ratelimit_set_flags(&user->rs, RATELIMIT_MSG_ON_RELEASE);
937
938 mutex_init(&user->lock);
939
940 atomic64_set(&user->seq, prb_first_valid_seq(prb));
941
942 file->private_data = user;
943 return 0;
944 }
945
devkmsg_release(struct inode * inode,struct file * file)946 static int devkmsg_release(struct inode *inode, struct file *file)
947 {
948 struct devkmsg_user *user = file->private_data;
949
950 ratelimit_state_exit(&user->rs);
951
952 mutex_destroy(&user->lock);
953 kvfree(user);
954 return 0;
955 }
956
957 const struct file_operations kmsg_fops = {
958 .open = devkmsg_open,
959 .read = devkmsg_read,
960 .write_iter = devkmsg_write,
961 .llseek = devkmsg_llseek,
962 .poll = devkmsg_poll,
963 .release = devkmsg_release,
964 };
965
966 #ifdef CONFIG_VMCORE_INFO
967 /*
968 * This appends the listed symbols to /proc/vmcore
969 *
970 * /proc/vmcore is used by various utilities, like crash and makedumpfile to
971 * obtain access to symbols that are otherwise very difficult to locate. These
972 * symbols are specifically used so that utilities can access and extract the
973 * dmesg log from a vmcore file after a crash.
974 */
log_buf_vmcoreinfo_setup(void)975 void log_buf_vmcoreinfo_setup(void)
976 {
977 struct dev_printk_info *dev_info = NULL;
978
979 VMCOREINFO_SYMBOL(prb);
980 VMCOREINFO_SYMBOL(printk_rb_static);
981 VMCOREINFO_SYMBOL(clear_seq);
982
983 /*
984 * Export struct size and field offsets. User space tools can
985 * parse it and detect any changes to structure down the line.
986 */
987
988 VMCOREINFO_STRUCT_SIZE(printk_ringbuffer);
989 VMCOREINFO_OFFSET(printk_ringbuffer, desc_ring);
990 VMCOREINFO_OFFSET(printk_ringbuffer, text_data_ring);
991 VMCOREINFO_OFFSET(printk_ringbuffer, fail);
992
993 VMCOREINFO_STRUCT_SIZE(prb_desc_ring);
994 VMCOREINFO_OFFSET(prb_desc_ring, count_bits);
995 VMCOREINFO_OFFSET(prb_desc_ring, descs);
996 VMCOREINFO_OFFSET(prb_desc_ring, infos);
997 VMCOREINFO_OFFSET(prb_desc_ring, head_id);
998 VMCOREINFO_OFFSET(prb_desc_ring, tail_id);
999
1000 VMCOREINFO_STRUCT_SIZE(prb_desc);
1001 VMCOREINFO_OFFSET(prb_desc, state_var);
1002 VMCOREINFO_OFFSET(prb_desc, text_blk_lpos);
1003
1004 VMCOREINFO_STRUCT_SIZE(prb_data_blk_lpos);
1005 VMCOREINFO_OFFSET(prb_data_blk_lpos, begin);
1006 VMCOREINFO_OFFSET(prb_data_blk_lpos, next);
1007
1008 VMCOREINFO_STRUCT_SIZE(printk_info);
1009 VMCOREINFO_OFFSET(printk_info, seq);
1010 VMCOREINFO_OFFSET(printk_info, ts_nsec);
1011 VMCOREINFO_OFFSET(printk_info, text_len);
1012 VMCOREINFO_OFFSET(printk_info, caller_id);
1013 VMCOREINFO_OFFSET(printk_info, dev_info);
1014
1015 VMCOREINFO_STRUCT_SIZE(dev_printk_info);
1016 VMCOREINFO_OFFSET(dev_printk_info, subsystem);
1017 VMCOREINFO_LENGTH(printk_info_subsystem, sizeof(dev_info->subsystem));
1018 VMCOREINFO_OFFSET(dev_printk_info, device);
1019 VMCOREINFO_LENGTH(printk_info_device, sizeof(dev_info->device));
1020
1021 VMCOREINFO_STRUCT_SIZE(prb_data_ring);
1022 VMCOREINFO_OFFSET(prb_data_ring, size_bits);
1023 VMCOREINFO_OFFSET(prb_data_ring, data);
1024 VMCOREINFO_OFFSET(prb_data_ring, head_lpos);
1025 VMCOREINFO_OFFSET(prb_data_ring, tail_lpos);
1026
1027 VMCOREINFO_SIZE(atomic_long_t);
1028 VMCOREINFO_TYPE_OFFSET(atomic_long_t, counter);
1029
1030 VMCOREINFO_STRUCT_SIZE(latched_seq);
1031 VMCOREINFO_OFFSET(latched_seq, val);
1032 }
1033 #endif
1034
1035 /* requested log_buf_len from kernel cmdline */
1036 static unsigned long __initdata new_log_buf_len;
1037
1038 /* we practice scaling the ring buffer by powers of 2 */
log_buf_len_update(u64 size)1039 static void __init log_buf_len_update(u64 size)
1040 {
1041 if (size > (u64)LOG_BUF_LEN_MAX) {
1042 size = (u64)LOG_BUF_LEN_MAX;
1043 pr_err("log_buf over 2G is not supported.\n");
1044 }
1045
1046 if (size)
1047 size = roundup_pow_of_two(size);
1048 if (size > log_buf_len)
1049 new_log_buf_len = (unsigned long)size;
1050 }
1051
1052 /* save requested log_buf_len since it's too early to process it */
log_buf_len_setup(char * str)1053 static int __init log_buf_len_setup(char *str)
1054 {
1055 u64 size;
1056
1057 if (!str)
1058 return -EINVAL;
1059
1060 size = memparse(str, &str);
1061
1062 log_buf_len_update(size);
1063
1064 return 0;
1065 }
1066 early_param("log_buf_len", log_buf_len_setup);
1067
1068 #ifdef CONFIG_SMP
1069 #define __LOG_CPU_MAX_BUF_LEN (1 << CONFIG_LOG_CPU_MAX_BUF_SHIFT)
1070
log_buf_add_cpu(void)1071 static void __init log_buf_add_cpu(void)
1072 {
1073 unsigned int cpu_extra;
1074
1075 /*
1076 * archs should set up cpu_possible_bits properly with
1077 * set_cpu_possible() after setup_arch() but just in
1078 * case lets ensure this is valid.
1079 */
1080 if (num_possible_cpus() == 1)
1081 return;
1082
1083 cpu_extra = (num_possible_cpus() - 1) * __LOG_CPU_MAX_BUF_LEN;
1084
1085 /* by default this will only continue through for large > 64 CPUs */
1086 if (cpu_extra <= __LOG_BUF_LEN / 2)
1087 return;
1088
1089 pr_info("log_buf_len individual max cpu contribution: %d bytes\n",
1090 __LOG_CPU_MAX_BUF_LEN);
1091 pr_info("log_buf_len total cpu_extra contributions: %d bytes\n",
1092 cpu_extra);
1093 pr_info("log_buf_len min size: %d bytes\n", __LOG_BUF_LEN);
1094
1095 log_buf_len_update(cpu_extra + __LOG_BUF_LEN);
1096 }
1097 #else /* !CONFIG_SMP */
log_buf_add_cpu(void)1098 static inline void log_buf_add_cpu(void) {}
1099 #endif /* CONFIG_SMP */
1100
set_percpu_data_ready(void)1101 static void __init set_percpu_data_ready(void)
1102 {
1103 __printk_percpu_data_ready = true;
1104 }
1105
add_to_rb(struct printk_ringbuffer * rb,struct printk_record * r)1106 static unsigned int __init add_to_rb(struct printk_ringbuffer *rb,
1107 struct printk_record *r)
1108 {
1109 struct prb_reserved_entry e;
1110 struct printk_record dest_r;
1111
1112 prb_rec_init_wr(&dest_r, r->info->text_len);
1113
1114 if (!prb_reserve(&e, rb, &dest_r))
1115 return 0;
1116
1117 memcpy(&dest_r.text_buf[0], &r->text_buf[0], r->info->text_len);
1118 dest_r.info->text_len = r->info->text_len;
1119 dest_r.info->facility = r->info->facility;
1120 dest_r.info->level = r->info->level;
1121 dest_r.info->flags = r->info->flags;
1122 dest_r.info->ts_nsec = r->info->ts_nsec;
1123 dest_r.info->caller_id = r->info->caller_id;
1124 memcpy(&dest_r.info->dev_info, &r->info->dev_info, sizeof(dest_r.info->dev_info));
1125
1126 prb_final_commit(&e);
1127
1128 return prb_record_text_space(&e);
1129 }
1130
1131 static char setup_text_buf[PRINTKRB_RECORD_MAX] __initdata;
1132
print_log_buf_usage_stats(void)1133 static void print_log_buf_usage_stats(void)
1134 {
1135 unsigned int descs_count = log_buf_len >> PRB_AVGBITS;
1136 size_t meta_data_size;
1137
1138 meta_data_size = descs_count * (sizeof(struct prb_desc) + sizeof(struct printk_info));
1139
1140 pr_info("log buffer data + meta data: %u + %zu = %zu bytes\n",
1141 log_buf_len, meta_data_size, log_buf_len + meta_data_size);
1142 }
1143
setup_log_buf(int early)1144 void __init setup_log_buf(int early)
1145 {
1146 struct printk_info *new_infos;
1147 unsigned int new_descs_count;
1148 struct prb_desc *new_descs;
1149 struct printk_info info;
1150 struct printk_record r;
1151 unsigned int text_size;
1152 size_t new_descs_size;
1153 size_t new_infos_size;
1154 unsigned long flags;
1155 char *new_log_buf;
1156 unsigned int free;
1157 u64 seq;
1158
1159 /*
1160 * Some archs call setup_log_buf() multiple times - first is very
1161 * early, e.g. from setup_arch(), and second - when percpu_areas
1162 * are initialised.
1163 */
1164 if (!early)
1165 set_percpu_data_ready();
1166
1167 if (log_buf != __log_buf)
1168 return;
1169
1170 if (!early && !new_log_buf_len)
1171 log_buf_add_cpu();
1172
1173 if (!new_log_buf_len) {
1174 /* Show the memory stats only once. */
1175 if (!early)
1176 goto out;
1177
1178 return;
1179 }
1180
1181 new_descs_count = new_log_buf_len >> PRB_AVGBITS;
1182 if (new_descs_count == 0) {
1183 pr_err("new_log_buf_len: %lu too small\n", new_log_buf_len);
1184 goto out;
1185 }
1186
1187 new_log_buf = memblock_alloc(new_log_buf_len, LOG_ALIGN);
1188 if (unlikely(!new_log_buf)) {
1189 pr_err("log_buf_len: %lu text bytes not available\n",
1190 new_log_buf_len);
1191 goto out;
1192 }
1193
1194 new_descs_size = new_descs_count * sizeof(struct prb_desc);
1195 new_descs = memblock_alloc(new_descs_size, LOG_ALIGN);
1196 if (unlikely(!new_descs)) {
1197 pr_err("log_buf_len: %zu desc bytes not available\n",
1198 new_descs_size);
1199 goto err_free_log_buf;
1200 }
1201
1202 new_infos_size = new_descs_count * sizeof(struct printk_info);
1203 new_infos = memblock_alloc(new_infos_size, LOG_ALIGN);
1204 if (unlikely(!new_infos)) {
1205 pr_err("log_buf_len: %zu info bytes not available\n",
1206 new_infos_size);
1207 goto err_free_descs;
1208 }
1209
1210 prb_rec_init_rd(&r, &info, &setup_text_buf[0], sizeof(setup_text_buf));
1211
1212 prb_init(&printk_rb_dynamic,
1213 new_log_buf, ilog2(new_log_buf_len),
1214 new_descs, ilog2(new_descs_count),
1215 new_infos);
1216
1217 local_irq_save(flags);
1218
1219 log_buf_len = new_log_buf_len;
1220 log_buf = new_log_buf;
1221 new_log_buf_len = 0;
1222
1223 free = __LOG_BUF_LEN;
1224 prb_for_each_record(0, &printk_rb_static, seq, &r) {
1225 text_size = add_to_rb(&printk_rb_dynamic, &r);
1226 if (text_size > free)
1227 free = 0;
1228 else
1229 free -= text_size;
1230 }
1231
1232 prb = &printk_rb_dynamic;
1233
1234 local_irq_restore(flags);
1235
1236 /*
1237 * Copy any remaining messages that might have appeared from
1238 * NMI context after copying but before switching to the
1239 * dynamic buffer.
1240 */
1241 prb_for_each_record(seq, &printk_rb_static, seq, &r) {
1242 text_size = add_to_rb(&printk_rb_dynamic, &r);
1243 if (text_size > free)
1244 free = 0;
1245 else
1246 free -= text_size;
1247 }
1248
1249 if (seq != prb_next_seq(&printk_rb_static)) {
1250 pr_err("dropped %llu messages\n",
1251 prb_next_seq(&printk_rb_static) - seq);
1252 }
1253
1254 print_log_buf_usage_stats();
1255 pr_info("early log buf free: %u(%u%%)\n",
1256 free, (free * 100) / __LOG_BUF_LEN);
1257 return;
1258
1259 err_free_descs:
1260 memblock_free(new_descs, new_descs_size);
1261 err_free_log_buf:
1262 memblock_free(new_log_buf, new_log_buf_len);
1263 out:
1264 print_log_buf_usage_stats();
1265 }
1266
1267 static bool __read_mostly ignore_loglevel;
1268
ignore_loglevel_setup(char * str)1269 static int __init ignore_loglevel_setup(char *str)
1270 {
1271 ignore_loglevel = true;
1272 pr_info("debug: ignoring loglevel setting.\n");
1273
1274 return 0;
1275 }
1276
1277 early_param("ignore_loglevel", ignore_loglevel_setup);
1278 module_param(ignore_loglevel, bool, S_IRUGO | S_IWUSR);
1279 MODULE_PARM_DESC(ignore_loglevel,
1280 "ignore loglevel setting (prints all kernel messages to the console)");
1281
suppress_message_printing(int level)1282 static bool suppress_message_printing(int level)
1283 {
1284 return (level >= console_loglevel && !ignore_loglevel);
1285 }
1286
1287 #ifdef CONFIG_BOOT_PRINTK_DELAY
1288
1289 static int boot_delay; /* msecs delay after each printk during bootup */
1290 static unsigned long long loops_per_msec; /* based on boot_delay */
1291
boot_delay_setup(char * str)1292 static int __init boot_delay_setup(char *str)
1293 {
1294 unsigned long lpj;
1295
1296 lpj = preset_lpj ? preset_lpj : 1000000; /* some guess */
1297 loops_per_msec = (unsigned long long)lpj / 1000 * HZ;
1298
1299 get_option(&str, &boot_delay);
1300 if (boot_delay > 10 * 1000)
1301 boot_delay = 0;
1302
1303 pr_debug("boot_delay: %u, preset_lpj: %ld, lpj: %lu, "
1304 "HZ: %d, loops_per_msec: %llu\n",
1305 boot_delay, preset_lpj, lpj, HZ, loops_per_msec);
1306 return 0;
1307 }
1308 early_param("boot_delay", boot_delay_setup);
1309
boot_delay_msec(int level)1310 static void boot_delay_msec(int level)
1311 {
1312 unsigned long long k;
1313 unsigned long timeout;
1314 bool suppress = !is_printk_force_console() &&
1315 suppress_message_printing(level);
1316
1317 if ((boot_delay == 0 || system_state >= SYSTEM_RUNNING) || suppress)
1318 return;
1319
1320 k = (unsigned long long)loops_per_msec * boot_delay;
1321
1322 timeout = jiffies + msecs_to_jiffies(boot_delay);
1323 while (k) {
1324 k--;
1325 cpu_relax();
1326 /*
1327 * use (volatile) jiffies to prevent
1328 * compiler reduction; loop termination via jiffies
1329 * is secondary and may or may not happen.
1330 */
1331 if (time_after(jiffies, timeout))
1332 break;
1333 touch_nmi_watchdog();
1334 }
1335 }
1336 #else
boot_delay_msec(int level)1337 static inline void boot_delay_msec(int level)
1338 {
1339 }
1340 #endif
1341
1342 static bool printk_time = IS_ENABLED(CONFIG_PRINTK_TIME);
1343 module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
1344
print_syslog(unsigned int level,char * buf)1345 static size_t print_syslog(unsigned int level, char *buf)
1346 {
1347 return sprintf(buf, "<%u>", level);
1348 }
1349
print_time(u64 ts,char * buf)1350 static size_t print_time(u64 ts, char *buf)
1351 {
1352 unsigned long rem_nsec = do_div(ts, 1000000000);
1353
1354 return sprintf(buf, "[%5lu.%06lu]",
1355 (unsigned long)ts, rem_nsec / 1000);
1356 }
1357
1358 #ifdef CONFIG_PRINTK_CALLER
print_caller(u32 id,char * buf)1359 static size_t print_caller(u32 id, char *buf)
1360 {
1361 char caller[12];
1362
1363 snprintf(caller, sizeof(caller), "%c%u",
1364 id & 0x80000000 ? 'C' : 'T', id & ~0x80000000);
1365 return sprintf(buf, "[%6s]", caller);
1366 }
1367 #else
1368 #define print_caller(id, buf) 0
1369 #endif
1370
info_print_prefix(const struct printk_info * info,bool syslog,bool time,char * buf)1371 static size_t info_print_prefix(const struct printk_info *info, bool syslog,
1372 bool time, char *buf)
1373 {
1374 size_t len = 0;
1375
1376 if (syslog)
1377 len = print_syslog((info->facility << 3) | info->level, buf);
1378
1379 if (time)
1380 len += print_time(info->ts_nsec, buf + len);
1381
1382 len += print_caller(info->caller_id, buf + len);
1383
1384 if (IS_ENABLED(CONFIG_PRINTK_CALLER) || time) {
1385 buf[len++] = ' ';
1386 buf[len] = '\0';
1387 }
1388
1389 return len;
1390 }
1391
1392 /*
1393 * Prepare the record for printing. The text is shifted within the given
1394 * buffer to avoid a need for another one. The following operations are
1395 * done:
1396 *
1397 * - Add prefix for each line.
1398 * - Drop truncated lines that no longer fit into the buffer.
1399 * - Add the trailing newline that has been removed in vprintk_store().
1400 * - Add a string terminator.
1401 *
1402 * Since the produced string is always terminated, the maximum possible
1403 * return value is @r->text_buf_size - 1;
1404 *
1405 * Return: The length of the updated/prepared text, including the added
1406 * prefixes and the newline. The terminator is not counted. The dropped
1407 * line(s) are not counted.
1408 */
record_print_text(struct printk_record * r,bool syslog,bool time)1409 static size_t record_print_text(struct printk_record *r, bool syslog,
1410 bool time)
1411 {
1412 size_t text_len = r->info->text_len;
1413 size_t buf_size = r->text_buf_size;
1414 char *text = r->text_buf;
1415 char prefix[PRINTK_PREFIX_MAX];
1416 bool truncated = false;
1417 size_t prefix_len;
1418 size_t line_len;
1419 size_t len = 0;
1420 char *next;
1421
1422 /*
1423 * If the message was truncated because the buffer was not large
1424 * enough, treat the available text as if it were the full text.
1425 */
1426 if (text_len > buf_size)
1427 text_len = buf_size;
1428
1429 prefix_len = info_print_prefix(r->info, syslog, time, prefix);
1430
1431 /*
1432 * @text_len: bytes of unprocessed text
1433 * @line_len: bytes of current line _without_ newline
1434 * @text: pointer to beginning of current line
1435 * @len: number of bytes prepared in r->text_buf
1436 */
1437 for (;;) {
1438 next = memchr(text, '\n', text_len);
1439 if (next) {
1440 line_len = next - text;
1441 } else {
1442 /* Drop truncated line(s). */
1443 if (truncated)
1444 break;
1445 line_len = text_len;
1446 }
1447
1448 /*
1449 * Truncate the text if there is not enough space to add the
1450 * prefix and a trailing newline and a terminator.
1451 */
1452 if (len + prefix_len + text_len + 1 + 1 > buf_size) {
1453 /* Drop even the current line if no space. */
1454 if (len + prefix_len + line_len + 1 + 1 > buf_size)
1455 break;
1456
1457 text_len = buf_size - len - prefix_len - 1 - 1;
1458 truncated = true;
1459 }
1460
1461 memmove(text + prefix_len, text, text_len);
1462 memcpy(text, prefix, prefix_len);
1463
1464 /*
1465 * Increment the prepared length to include the text and
1466 * prefix that were just moved+copied. Also increment for the
1467 * newline at the end of this line. If this is the last line,
1468 * there is no newline, but it will be added immediately below.
1469 */
1470 len += prefix_len + line_len + 1;
1471 if (text_len == line_len) {
1472 /*
1473 * This is the last line. Add the trailing newline
1474 * removed in vprintk_store().
1475 */
1476 text[prefix_len + line_len] = '\n';
1477 break;
1478 }
1479
1480 /*
1481 * Advance beyond the added prefix and the related line with
1482 * its newline.
1483 */
1484 text += prefix_len + line_len + 1;
1485
1486 /*
1487 * The remaining text has only decreased by the line with its
1488 * newline.
1489 *
1490 * Note that @text_len can become zero. It happens when @text
1491 * ended with a newline (either due to truncation or the
1492 * original string ending with "\n\n"). The loop is correctly
1493 * repeated and (if not truncated) an empty line with a prefix
1494 * will be prepared.
1495 */
1496 text_len -= line_len + 1;
1497 }
1498
1499 /*
1500 * If a buffer was provided, it will be terminated. Space for the
1501 * string terminator is guaranteed to be available. The terminator is
1502 * not counted in the return value.
1503 */
1504 if (buf_size > 0)
1505 r->text_buf[len] = 0;
1506
1507 return len;
1508 }
1509
get_record_print_text_size(struct printk_info * info,unsigned int line_count,bool syslog,bool time)1510 static size_t get_record_print_text_size(struct printk_info *info,
1511 unsigned int line_count,
1512 bool syslog, bool time)
1513 {
1514 char prefix[PRINTK_PREFIX_MAX];
1515 size_t prefix_len;
1516
1517 prefix_len = info_print_prefix(info, syslog, time, prefix);
1518
1519 /*
1520 * Each line will be preceded with a prefix. The intermediate
1521 * newlines are already within the text, but a final trailing
1522 * newline will be added.
1523 */
1524 return ((prefix_len * line_count) + info->text_len + 1);
1525 }
1526
1527 /*
1528 * Beginning with @start_seq, find the first record where it and all following
1529 * records up to (but not including) @max_seq fit into @size.
1530 *
1531 * @max_seq is simply an upper bound and does not need to exist. If the caller
1532 * does not require an upper bound, -1 can be used for @max_seq.
1533 */
find_first_fitting_seq(u64 start_seq,u64 max_seq,size_t size,bool syslog,bool time)1534 static u64 find_first_fitting_seq(u64 start_seq, u64 max_seq, size_t size,
1535 bool syslog, bool time)
1536 {
1537 struct printk_info info;
1538 unsigned int line_count;
1539 size_t len = 0;
1540 u64 seq;
1541
1542 /* Determine the size of the records up to @max_seq. */
1543 prb_for_each_info(start_seq, prb, seq, &info, &line_count) {
1544 if (info.seq >= max_seq)
1545 break;
1546 len += get_record_print_text_size(&info, line_count, syslog, time);
1547 }
1548
1549 /*
1550 * Adjust the upper bound for the next loop to avoid subtracting
1551 * lengths that were never added.
1552 */
1553 if (seq < max_seq)
1554 max_seq = seq;
1555
1556 /*
1557 * Move first record forward until length fits into the buffer. Ignore
1558 * newest messages that were not counted in the above cycle. Messages
1559 * might appear and get lost in the meantime. This is a best effort
1560 * that prevents an infinite loop that could occur with a retry.
1561 */
1562 prb_for_each_info(start_seq, prb, seq, &info, &line_count) {
1563 if (len <= size || info.seq >= max_seq)
1564 break;
1565 len -= get_record_print_text_size(&info, line_count, syslog, time);
1566 }
1567
1568 return seq;
1569 }
1570
1571 /* The caller is responsible for making sure @size is greater than 0. */
syslog_print(char __user * buf,int size)1572 static int syslog_print(char __user *buf, int size)
1573 {
1574 struct printk_info info;
1575 struct printk_record r;
1576 char *text;
1577 int len = 0;
1578 u64 seq;
1579
1580 text = kmalloc(PRINTK_MESSAGE_MAX, GFP_KERNEL);
1581 if (!text)
1582 return -ENOMEM;
1583
1584 prb_rec_init_rd(&r, &info, text, PRINTK_MESSAGE_MAX);
1585
1586 mutex_lock(&syslog_lock);
1587
1588 /*
1589 * Wait for the @syslog_seq record to be available. @syslog_seq may
1590 * change while waiting.
1591 */
1592 do {
1593 seq = syslog_seq;
1594
1595 mutex_unlock(&syslog_lock);
1596 /*
1597 * Guarantee this task is visible on the waitqueue before
1598 * checking the wake condition.
1599 *
1600 * The full memory barrier within set_current_state() of
1601 * prepare_to_wait_event() pairs with the full memory barrier
1602 * within wq_has_sleeper().
1603 *
1604 * This pairs with __wake_up_klogd:A.
1605 */
1606 len = wait_event_interruptible(log_wait,
1607 prb_read_valid(prb, seq, NULL)); /* LMM(syslog_print:A) */
1608 mutex_lock(&syslog_lock);
1609
1610 if (len)
1611 goto out;
1612 } while (syslog_seq != seq);
1613
1614 /*
1615 * Copy records that fit into the buffer. The above cycle makes sure
1616 * that the first record is always available.
1617 */
1618 do {
1619 size_t n;
1620 size_t skip;
1621 int err;
1622
1623 if (!prb_read_valid(prb, syslog_seq, &r))
1624 break;
1625
1626 if (r.info->seq != syslog_seq) {
1627 /* message is gone, move to next valid one */
1628 syslog_seq = r.info->seq;
1629 syslog_partial = 0;
1630 }
1631
1632 /*
1633 * To keep reading/counting partial line consistent,
1634 * use printk_time value as of the beginning of a line.
1635 */
1636 if (!syslog_partial)
1637 syslog_time = printk_time;
1638
1639 skip = syslog_partial;
1640 n = record_print_text(&r, true, syslog_time);
1641 if (n - syslog_partial <= size) {
1642 /* message fits into buffer, move forward */
1643 syslog_seq = r.info->seq + 1;
1644 n -= syslog_partial;
1645 syslog_partial = 0;
1646 } else if (!len){
1647 /* partial read(), remember position */
1648 n = size;
1649 syslog_partial += n;
1650 } else
1651 n = 0;
1652
1653 if (!n)
1654 break;
1655
1656 mutex_unlock(&syslog_lock);
1657 err = copy_to_user(buf, text + skip, n);
1658 mutex_lock(&syslog_lock);
1659
1660 if (err) {
1661 if (!len)
1662 len = -EFAULT;
1663 break;
1664 }
1665
1666 len += n;
1667 size -= n;
1668 buf += n;
1669 } while (size);
1670 out:
1671 mutex_unlock(&syslog_lock);
1672 kfree(text);
1673 return len;
1674 }
1675
syslog_print_all(char __user * buf,int size,bool clear)1676 static int syslog_print_all(char __user *buf, int size, bool clear)
1677 {
1678 struct printk_info info;
1679 struct printk_record r;
1680 char *text;
1681 int len = 0;
1682 u64 seq;
1683 bool time;
1684
1685 text = kmalloc(PRINTK_MESSAGE_MAX, GFP_KERNEL);
1686 if (!text)
1687 return -ENOMEM;
1688
1689 time = printk_time;
1690 /*
1691 * Find first record that fits, including all following records,
1692 * into the user-provided buffer for this dump.
1693 */
1694 seq = find_first_fitting_seq(latched_seq_read_nolock(&clear_seq), -1,
1695 size, true, time);
1696
1697 prb_rec_init_rd(&r, &info, text, PRINTK_MESSAGE_MAX);
1698
1699 prb_for_each_record(seq, prb, seq, &r) {
1700 int textlen;
1701
1702 textlen = record_print_text(&r, true, time);
1703
1704 if (len + textlen > size) {
1705 seq--;
1706 break;
1707 }
1708
1709 if (copy_to_user(buf + len, text, textlen))
1710 len = -EFAULT;
1711 else
1712 len += textlen;
1713
1714 if (len < 0)
1715 break;
1716 }
1717
1718 if (clear) {
1719 mutex_lock(&syslog_lock);
1720 latched_seq_write(&clear_seq, seq);
1721 mutex_unlock(&syslog_lock);
1722 }
1723
1724 kfree(text);
1725 return len;
1726 }
1727
syslog_clear(void)1728 static void syslog_clear(void)
1729 {
1730 mutex_lock(&syslog_lock);
1731 latched_seq_write(&clear_seq, prb_next_seq(prb));
1732 mutex_unlock(&syslog_lock);
1733 }
1734
do_syslog(int type,char __user * buf,int len,int source)1735 int do_syslog(int type, char __user *buf, int len, int source)
1736 {
1737 struct printk_info info;
1738 bool clear = false;
1739 static int saved_console_loglevel = LOGLEVEL_DEFAULT;
1740 int error;
1741
1742 error = check_syslog_permissions(type, source);
1743 if (error)
1744 return error;
1745
1746 switch (type) {
1747 case SYSLOG_ACTION_CLOSE: /* Close log */
1748 break;
1749 case SYSLOG_ACTION_OPEN: /* Open log */
1750 break;
1751 case SYSLOG_ACTION_READ: /* Read from log */
1752 if (!buf || len < 0)
1753 return -EINVAL;
1754 if (!len)
1755 return 0;
1756 if (!access_ok(buf, len))
1757 return -EFAULT;
1758 error = syslog_print(buf, len);
1759 break;
1760 /* Read/clear last kernel messages */
1761 case SYSLOG_ACTION_READ_CLEAR:
1762 clear = true;
1763 fallthrough;
1764 /* Read last kernel messages */
1765 case SYSLOG_ACTION_READ_ALL:
1766 if (!buf || len < 0)
1767 return -EINVAL;
1768 if (!len)
1769 return 0;
1770 if (!access_ok(buf, len))
1771 return -EFAULT;
1772 error = syslog_print_all(buf, len, clear);
1773 break;
1774 /* Clear ring buffer */
1775 case SYSLOG_ACTION_CLEAR:
1776 syslog_clear();
1777 break;
1778 /* Disable logging to console */
1779 case SYSLOG_ACTION_CONSOLE_OFF:
1780 if (saved_console_loglevel == LOGLEVEL_DEFAULT)
1781 saved_console_loglevel = console_loglevel;
1782 console_loglevel = minimum_console_loglevel;
1783 break;
1784 /* Enable logging to console */
1785 case SYSLOG_ACTION_CONSOLE_ON:
1786 if (saved_console_loglevel != LOGLEVEL_DEFAULT) {
1787 console_loglevel = saved_console_loglevel;
1788 saved_console_loglevel = LOGLEVEL_DEFAULT;
1789 }
1790 break;
1791 /* Set level of messages printed to console */
1792 case SYSLOG_ACTION_CONSOLE_LEVEL:
1793 if (len < 1 || len > 8)
1794 return -EINVAL;
1795 if (len < minimum_console_loglevel)
1796 len = minimum_console_loglevel;
1797 console_loglevel = len;
1798 /* Implicitly re-enable logging to console */
1799 saved_console_loglevel = LOGLEVEL_DEFAULT;
1800 break;
1801 /* Number of chars in the log buffer */
1802 case SYSLOG_ACTION_SIZE_UNREAD:
1803 mutex_lock(&syslog_lock);
1804 if (!prb_read_valid_info(prb, syslog_seq, &info, NULL)) {
1805 /* No unread messages. */
1806 mutex_unlock(&syslog_lock);
1807 return 0;
1808 }
1809 if (info.seq != syslog_seq) {
1810 /* messages are gone, move to first one */
1811 syslog_seq = info.seq;
1812 syslog_partial = 0;
1813 }
1814 if (source == SYSLOG_FROM_PROC) {
1815 /*
1816 * Short-cut for poll(/"proc/kmsg") which simply checks
1817 * for pending data, not the size; return the count of
1818 * records, not the length.
1819 */
1820 error = prb_next_seq(prb) - syslog_seq;
1821 } else {
1822 bool time = syslog_partial ? syslog_time : printk_time;
1823 unsigned int line_count;
1824 u64 seq;
1825
1826 prb_for_each_info(syslog_seq, prb, seq, &info,
1827 &line_count) {
1828 error += get_record_print_text_size(&info, line_count,
1829 true, time);
1830 time = printk_time;
1831 }
1832 error -= syslog_partial;
1833 }
1834 mutex_unlock(&syslog_lock);
1835 break;
1836 /* Size of the log buffer */
1837 case SYSLOG_ACTION_SIZE_BUFFER:
1838 error = log_buf_len;
1839 break;
1840 default:
1841 error = -EINVAL;
1842 break;
1843 }
1844
1845 return error;
1846 }
1847
SYSCALL_DEFINE3(syslog,int,type,char __user *,buf,int,len)1848 SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
1849 {
1850 return do_syslog(type, buf, len, SYSLOG_FROM_READER);
1851 }
1852
1853 /*
1854 * Special console_lock variants that help to reduce the risk of soft-lockups.
1855 * They allow to pass console_lock to another printk() call using a busy wait.
1856 */
1857
1858 #ifdef CONFIG_LOCKDEP
1859 static struct lockdep_map console_owner_dep_map = {
1860 .name = "console_owner"
1861 };
1862 #endif
1863
1864 static DEFINE_RAW_SPINLOCK(console_owner_lock);
1865 static struct task_struct *console_owner;
1866 static bool console_waiter;
1867
1868 /**
1869 * console_lock_spinning_enable - mark beginning of code where another
1870 * thread might safely busy wait
1871 *
1872 * This basically converts console_lock into a spinlock. This marks
1873 * the section where the console_lock owner can not sleep, because
1874 * there may be a waiter spinning (like a spinlock). Also it must be
1875 * ready to hand over the lock at the end of the section.
1876 */
console_lock_spinning_enable(void)1877 void console_lock_spinning_enable(void)
1878 {
1879 /*
1880 * Do not use spinning in panic(). The panic CPU wants to keep the lock.
1881 * Non-panic CPUs abandon the flush anyway.
1882 *
1883 * Just keep the lockdep annotation. The panic-CPU should avoid
1884 * taking console_owner_lock because it might cause a deadlock.
1885 * This looks like the easiest way how to prevent false lockdep
1886 * reports without handling races a lockless way.
1887 */
1888 if (panic_in_progress())
1889 goto lockdep;
1890
1891 raw_spin_lock(&console_owner_lock);
1892 console_owner = current;
1893 raw_spin_unlock(&console_owner_lock);
1894
1895 lockdep:
1896 /* The waiter may spin on us after setting console_owner */
1897 spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
1898 }
1899
1900 /**
1901 * console_lock_spinning_disable_and_check - mark end of code where another
1902 * thread was able to busy wait and check if there is a waiter
1903 * @cookie: cookie returned from console_srcu_read_lock()
1904 *
1905 * This is called at the end of the section where spinning is allowed.
1906 * It has two functions. First, it is a signal that it is no longer
1907 * safe to start busy waiting for the lock. Second, it checks if
1908 * there is a busy waiter and passes the lock rights to her.
1909 *
1910 * Important: Callers lose both the console_lock and the SRCU read lock if
1911 * there was a busy waiter. They must not touch items synchronized by
1912 * console_lock or SRCU read lock in this case.
1913 *
1914 * Return: 1 if the lock rights were passed, 0 otherwise.
1915 */
console_lock_spinning_disable_and_check(int cookie)1916 int console_lock_spinning_disable_and_check(int cookie)
1917 {
1918 int waiter;
1919
1920 /*
1921 * Ignore spinning waiters during panic() because they might get stopped
1922 * or blocked at any time,
1923 *
1924 * It is safe because nobody is allowed to start spinning during panic
1925 * in the first place. If there has been a waiter then non panic CPUs
1926 * might stay spinning. They would get stopped anyway. The panic context
1927 * will never start spinning and an interrupted spin on panic CPU will
1928 * never continue.
1929 */
1930 if (panic_in_progress()) {
1931 /* Keep lockdep happy. */
1932 spin_release(&console_owner_dep_map, _THIS_IP_);
1933 return 0;
1934 }
1935
1936 raw_spin_lock(&console_owner_lock);
1937 waiter = READ_ONCE(console_waiter);
1938 console_owner = NULL;
1939 raw_spin_unlock(&console_owner_lock);
1940
1941 if (!waiter) {
1942 spin_release(&console_owner_dep_map, _THIS_IP_);
1943 return 0;
1944 }
1945
1946 /* The waiter is now free to continue */
1947 WRITE_ONCE(console_waiter, false);
1948
1949 spin_release(&console_owner_dep_map, _THIS_IP_);
1950
1951 /*
1952 * Preserve lockdep lock ordering. Release the SRCU read lock before
1953 * releasing the console_lock.
1954 */
1955 console_srcu_read_unlock(cookie);
1956
1957 /*
1958 * Hand off console_lock to waiter. The waiter will perform
1959 * the up(). After this, the waiter is the console_lock owner.
1960 */
1961 mutex_release(&console_lock_dep_map, _THIS_IP_);
1962 return 1;
1963 }
1964
1965 /**
1966 * console_trylock_spinning - try to get console_lock by busy waiting
1967 *
1968 * This allows to busy wait for the console_lock when the current
1969 * owner is running in specially marked sections. It means that
1970 * the current owner is running and cannot reschedule until it
1971 * is ready to lose the lock.
1972 *
1973 * Return: 1 if we got the lock, 0 othrewise
1974 */
console_trylock_spinning(void)1975 static int console_trylock_spinning(void)
1976 {
1977 struct task_struct *owner = NULL;
1978 bool waiter;
1979 bool spin = false;
1980 unsigned long flags;
1981
1982 if (console_trylock())
1983 return 1;
1984
1985 /*
1986 * It's unsafe to spin once a panic has begun. If we are the
1987 * panic CPU, we may have already halted the owner of the
1988 * console_sem. If we are not the panic CPU, then we should
1989 * avoid taking console_sem, so the panic CPU has a better
1990 * chance of cleanly acquiring it later.
1991 */
1992 if (panic_in_progress())
1993 return 0;
1994
1995 printk_safe_enter_irqsave(flags);
1996
1997 raw_spin_lock(&console_owner_lock);
1998 owner = READ_ONCE(console_owner);
1999 waiter = READ_ONCE(console_waiter);
2000 if (!waiter && owner && owner != current) {
2001 WRITE_ONCE(console_waiter, true);
2002 spin = true;
2003 }
2004 raw_spin_unlock(&console_owner_lock);
2005
2006 /*
2007 * If there is an active printk() writing to the
2008 * consoles, instead of having it write our data too,
2009 * see if we can offload that load from the active
2010 * printer, and do some printing ourselves.
2011 * Go into a spin only if there isn't already a waiter
2012 * spinning, and there is an active printer, and
2013 * that active printer isn't us (recursive printk?).
2014 */
2015 if (!spin) {
2016 printk_safe_exit_irqrestore(flags);
2017 return 0;
2018 }
2019
2020 /* We spin waiting for the owner to release us */
2021 spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
2022 /* Owner will clear console_waiter on hand off */
2023 while (READ_ONCE(console_waiter))
2024 cpu_relax();
2025 spin_release(&console_owner_dep_map, _THIS_IP_);
2026
2027 printk_safe_exit_irqrestore(flags);
2028 /*
2029 * The owner passed the console lock to us.
2030 * Since we did not spin on console lock, annotate
2031 * this as a trylock. Otherwise lockdep will
2032 * complain.
2033 */
2034 mutex_acquire(&console_lock_dep_map, 0, 1, _THIS_IP_);
2035
2036 /*
2037 * Update @console_may_schedule for trylock because the previous
2038 * owner may have been schedulable.
2039 */
2040 console_may_schedule = 0;
2041
2042 return 1;
2043 }
2044
2045 /*
2046 * Recursion is tracked separately on each CPU. If NMIs are supported, an
2047 * additional NMI context per CPU is also separately tracked. Until per-CPU
2048 * is available, a separate "early tracking" is performed.
2049 */
2050 static DEFINE_PER_CPU(u8, printk_count);
2051 static u8 printk_count_early;
2052 #ifdef CONFIG_HAVE_NMI
2053 static DEFINE_PER_CPU(u8, printk_count_nmi);
2054 static u8 printk_count_nmi_early;
2055 #endif
2056
2057 /*
2058 * Recursion is limited to keep the output sane. printk() should not require
2059 * more than 1 level of recursion (allowing, for example, printk() to trigger
2060 * a WARN), but a higher value is used in case some printk-internal errors
2061 * exist, such as the ringbuffer validation checks failing.
2062 */
2063 #define PRINTK_MAX_RECURSION 3
2064
2065 /*
2066 * Return a pointer to the dedicated counter for the CPU+context of the
2067 * caller.
2068 */
__printk_recursion_counter(void)2069 static u8 *__printk_recursion_counter(void)
2070 {
2071 #ifdef CONFIG_HAVE_NMI
2072 if (in_nmi()) {
2073 if (printk_percpu_data_ready())
2074 return this_cpu_ptr(&printk_count_nmi);
2075 return &printk_count_nmi_early;
2076 }
2077 #endif
2078 if (printk_percpu_data_ready())
2079 return this_cpu_ptr(&printk_count);
2080 return &printk_count_early;
2081 }
2082
2083 /*
2084 * Enter recursion tracking. Interrupts are disabled to simplify tracking.
2085 * The caller must check the boolean return value to see if the recursion is
2086 * allowed. On failure, interrupts are not disabled.
2087 *
2088 * @recursion_ptr must be a variable of type (u8 *) and is the same variable
2089 * that is passed to printk_exit_irqrestore().
2090 */
2091 #define printk_enter_irqsave(recursion_ptr, flags) \
2092 ({ \
2093 bool success = true; \
2094 \
2095 typecheck(u8 *, recursion_ptr); \
2096 local_irq_save(flags); \
2097 (recursion_ptr) = __printk_recursion_counter(); \
2098 if (*(recursion_ptr) > PRINTK_MAX_RECURSION) { \
2099 local_irq_restore(flags); \
2100 success = false; \
2101 } else { \
2102 (*(recursion_ptr))++; \
2103 } \
2104 success; \
2105 })
2106
2107 /* Exit recursion tracking, restoring interrupts. */
2108 #define printk_exit_irqrestore(recursion_ptr, flags) \
2109 do { \
2110 typecheck(u8 *, recursion_ptr); \
2111 (*(recursion_ptr))--; \
2112 local_irq_restore(flags); \
2113 } while (0)
2114
2115 int printk_delay_msec __read_mostly;
2116
printk_delay(int level)2117 static inline void printk_delay(int level)
2118 {
2119 boot_delay_msec(level);
2120
2121 if (unlikely(printk_delay_msec)) {
2122 int m = printk_delay_msec;
2123
2124 while (m--) {
2125 mdelay(1);
2126 touch_nmi_watchdog();
2127 }
2128 }
2129 }
2130
printk_caller_id(void)2131 static inline u32 printk_caller_id(void)
2132 {
2133 return in_task() ? task_pid_nr(current) :
2134 0x80000000 + smp_processor_id();
2135 }
2136
2137 /**
2138 * printk_parse_prefix - Parse level and control flags.
2139 *
2140 * @text: The terminated text message.
2141 * @level: A pointer to the current level value, will be updated.
2142 * @flags: A pointer to the current printk_info flags, will be updated.
2143 *
2144 * @level may be NULL if the caller is not interested in the parsed value.
2145 * Otherwise the variable pointed to by @level must be set to
2146 * LOGLEVEL_DEFAULT in order to be updated with the parsed value.
2147 *
2148 * @flags may be NULL if the caller is not interested in the parsed value.
2149 * Otherwise the variable pointed to by @flags will be OR'd with the parsed
2150 * value.
2151 *
2152 * Return: The length of the parsed level and control flags.
2153 */
printk_parse_prefix(const char * text,int * level,enum printk_info_flags * flags)2154 u16 printk_parse_prefix(const char *text, int *level,
2155 enum printk_info_flags *flags)
2156 {
2157 u16 prefix_len = 0;
2158 int kern_level;
2159
2160 while (*text) {
2161 kern_level = printk_get_level(text);
2162 if (!kern_level)
2163 break;
2164
2165 switch (kern_level) {
2166 case '0' ... '7':
2167 if (level && *level == LOGLEVEL_DEFAULT)
2168 *level = kern_level - '0';
2169 break;
2170 case 'c': /* KERN_CONT */
2171 if (flags)
2172 *flags |= LOG_CONT;
2173 }
2174
2175 prefix_len += 2;
2176 text += 2;
2177 }
2178
2179 return prefix_len;
2180 }
2181
2182 __printf(5, 0)
printk_sprint(char * text,u16 size,int facility,enum printk_info_flags * flags,const char * fmt,va_list args)2183 static u16 printk_sprint(char *text, u16 size, int facility,
2184 enum printk_info_flags *flags, const char *fmt,
2185 va_list args)
2186 {
2187 u16 text_len;
2188
2189 text_len = vscnprintf(text, size, fmt, args);
2190
2191 /* Mark and strip a trailing newline. */
2192 if (text_len && text[text_len - 1] == '\n') {
2193 text_len--;
2194 *flags |= LOG_NEWLINE;
2195 }
2196
2197 /* Strip log level and control flags. */
2198 if (facility == 0) {
2199 u16 prefix_len;
2200
2201 prefix_len = printk_parse_prefix(text, NULL, NULL);
2202 if (prefix_len) {
2203 text_len -= prefix_len;
2204 memmove(text, text + prefix_len, text_len);
2205 }
2206 }
2207
2208 trace_console(text, text_len);
2209
2210 return text_len;
2211 }
2212
2213 __printf(4, 0)
vprintk_store(int facility,int level,const struct dev_printk_info * dev_info,const char * fmt,va_list args)2214 int vprintk_store(int facility, int level,
2215 const struct dev_printk_info *dev_info,
2216 const char *fmt, va_list args)
2217 {
2218 struct prb_reserved_entry e;
2219 enum printk_info_flags flags = 0;
2220 struct printk_record r;
2221 unsigned long irqflags;
2222 u16 trunc_msg_len = 0;
2223 char prefix_buf[8];
2224 u8 *recursion_ptr;
2225 u16 reserve_size;
2226 va_list args2;
2227 u32 caller_id;
2228 u16 text_len;
2229 int ret = 0;
2230 u64 ts_nsec;
2231
2232 if (!printk_enter_irqsave(recursion_ptr, irqflags))
2233 return 0;
2234
2235 /*
2236 * Since the duration of printk() can vary depending on the message
2237 * and state of the ringbuffer, grab the timestamp now so that it is
2238 * close to the call of printk(). This provides a more deterministic
2239 * timestamp with respect to the caller.
2240 */
2241 ts_nsec = local_clock();
2242
2243 caller_id = printk_caller_id();
2244
2245 /*
2246 * The sprintf needs to come first since the syslog prefix might be
2247 * passed in as a parameter. An extra byte must be reserved so that
2248 * later the vscnprintf() into the reserved buffer has room for the
2249 * terminating '\0', which is not counted by vsnprintf().
2250 */
2251 va_copy(args2, args);
2252 reserve_size = vsnprintf(&prefix_buf[0], sizeof(prefix_buf), fmt, args2) + 1;
2253 va_end(args2);
2254
2255 if (reserve_size > PRINTKRB_RECORD_MAX)
2256 reserve_size = PRINTKRB_RECORD_MAX;
2257
2258 /* Extract log level or control flags. */
2259 if (facility == 0)
2260 printk_parse_prefix(&prefix_buf[0], &level, &flags);
2261
2262 if (level == LOGLEVEL_DEFAULT)
2263 level = default_message_loglevel;
2264
2265 if (dev_info)
2266 flags |= LOG_NEWLINE;
2267
2268 if (is_printk_force_console())
2269 flags |= LOG_FORCE_CON;
2270
2271 if (flags & LOG_CONT) {
2272 prb_rec_init_wr(&r, reserve_size);
2273 if (prb_reserve_in_last(&e, prb, &r, caller_id, PRINTKRB_RECORD_MAX)) {
2274 text_len = printk_sprint(&r.text_buf[r.info->text_len], reserve_size,
2275 facility, &flags, fmt, args);
2276 r.info->text_len += text_len;
2277
2278 if (flags & LOG_FORCE_CON)
2279 r.info->flags |= LOG_FORCE_CON;
2280
2281 if (flags & LOG_NEWLINE) {
2282 r.info->flags |= LOG_NEWLINE;
2283 prb_final_commit(&e);
2284 } else {
2285 prb_commit(&e);
2286 }
2287
2288 ret = text_len;
2289 goto out;
2290 }
2291 }
2292
2293 /*
2294 * Explicitly initialize the record before every prb_reserve() call.
2295 * prb_reserve_in_last() and prb_reserve() purposely invalidate the
2296 * structure when they fail.
2297 */
2298 prb_rec_init_wr(&r, reserve_size);
2299 if (!prb_reserve(&e, prb, &r)) {
2300 /* truncate the message if it is too long for empty buffer */
2301 truncate_msg(&reserve_size, &trunc_msg_len);
2302
2303 prb_rec_init_wr(&r, reserve_size + trunc_msg_len);
2304 if (!prb_reserve(&e, prb, &r))
2305 goto out;
2306 }
2307
2308 /* fill message */
2309 text_len = printk_sprint(&r.text_buf[0], reserve_size, facility, &flags, fmt, args);
2310 if (trunc_msg_len)
2311 memcpy(&r.text_buf[text_len], trunc_msg, trunc_msg_len);
2312 r.info->text_len = text_len + trunc_msg_len;
2313 r.info->facility = facility;
2314 r.info->level = level & 7;
2315 r.info->flags = flags & 0x1f;
2316 r.info->ts_nsec = ts_nsec;
2317 r.info->caller_id = caller_id;
2318 if (dev_info)
2319 memcpy(&r.info->dev_info, dev_info, sizeof(r.info->dev_info));
2320
2321 /* A message without a trailing newline can be continued. */
2322 if (!(flags & LOG_NEWLINE))
2323 prb_commit(&e);
2324 else
2325 prb_final_commit(&e);
2326
2327 ret = text_len + trunc_msg_len;
2328 out:
2329 printk_exit_irqrestore(recursion_ptr, irqflags);
2330 return ret;
2331 }
2332
2333 /*
2334 * This acts as a one-way switch to allow legacy consoles to print from
2335 * the printk() caller context on a panic CPU. It also attempts to flush
2336 * the legacy consoles in this context.
2337 */
printk_legacy_allow_panic_sync(void)2338 void printk_legacy_allow_panic_sync(void)
2339 {
2340 struct console_flush_type ft;
2341
2342 legacy_allow_panic_sync = true;
2343
2344 printk_get_console_flush_type(&ft);
2345 if (ft.legacy_direct) {
2346 if (console_trylock())
2347 console_unlock();
2348 }
2349 }
2350
2351 bool __read_mostly debug_non_panic_cpus;
2352
2353 #ifdef CONFIG_PRINTK_CALLER
debug_non_panic_cpus_setup(char * str)2354 static int __init debug_non_panic_cpus_setup(char *str)
2355 {
2356 debug_non_panic_cpus = true;
2357 pr_info("allow messages from non-panic CPUs in panic()\n");
2358
2359 return 0;
2360 }
2361 early_param("debug_non_panic_cpus", debug_non_panic_cpus_setup);
2362 module_param(debug_non_panic_cpus, bool, 0644);
2363 MODULE_PARM_DESC(debug_non_panic_cpus,
2364 "allow messages from non-panic CPUs in panic()");
2365 #endif
2366
vprintk_emit(int facility,int level,const struct dev_printk_info * dev_info,const char * fmt,va_list args)2367 asmlinkage int vprintk_emit(int facility, int level,
2368 const struct dev_printk_info *dev_info,
2369 const char *fmt, va_list args)
2370 {
2371 struct console_flush_type ft;
2372 int printed_len;
2373
2374 /* Suppress unimportant messages after panic happens */
2375 if (unlikely(suppress_printk))
2376 return 0;
2377
2378 /*
2379 * The messages on the panic CPU are the most important. If
2380 * non-panic CPUs are generating any messages, they will be
2381 * silently dropped.
2382 */
2383 if (panic_on_other_cpu() &&
2384 !debug_non_panic_cpus &&
2385 !panic_triggering_all_cpu_backtrace)
2386 return 0;
2387
2388 printk_get_console_flush_type(&ft);
2389
2390 /* If called from the scheduler, we can not call up(). */
2391 if (level == LOGLEVEL_SCHED) {
2392 level = LOGLEVEL_DEFAULT;
2393 ft.legacy_offload |= ft.legacy_direct;
2394 ft.legacy_direct = false;
2395 }
2396
2397 printk_delay(level);
2398
2399 printed_len = vprintk_store(facility, level, dev_info, fmt, args);
2400
2401 if (ft.nbcon_atomic)
2402 nbcon_atomic_flush_pending();
2403
2404 if (ft.nbcon_offload)
2405 nbcon_kthreads_wake();
2406
2407 if (ft.legacy_direct) {
2408 /*
2409 * The caller may be holding system-critical or
2410 * timing-sensitive locks. Disable preemption during
2411 * printing of all remaining records to all consoles so that
2412 * this context can return as soon as possible. Hopefully
2413 * another printk() caller will take over the printing.
2414 */
2415 preempt_disable();
2416 /*
2417 * Try to acquire and then immediately release the console
2418 * semaphore. The release will print out buffers. With the
2419 * spinning variant, this context tries to take over the
2420 * printing from another printing context.
2421 */
2422 if (console_trylock_spinning())
2423 console_unlock();
2424 preempt_enable();
2425 }
2426
2427 if (ft.legacy_offload)
2428 defer_console_output();
2429 else
2430 wake_up_klogd();
2431
2432 return printed_len;
2433 }
2434 EXPORT_SYMBOL(vprintk_emit);
2435
vprintk_default(const char * fmt,va_list args)2436 int vprintk_default(const char *fmt, va_list args)
2437 {
2438 return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, fmt, args);
2439 }
2440 EXPORT_SYMBOL_GPL(vprintk_default);
2441
_printk(const char * fmt,...)2442 asmlinkage __visible int _printk(const char *fmt, ...)
2443 {
2444 va_list args;
2445 int r;
2446
2447 va_start(args, fmt);
2448 r = vprintk(fmt, args);
2449 va_end(args);
2450
2451 return r;
2452 }
2453 EXPORT_SYMBOL(_printk);
2454
2455 static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress);
2456
2457 #else /* CONFIG_PRINTK */
2458
2459 #define printk_time false
2460
2461 #define prb_read_valid(rb, seq, r) false
2462 #define prb_first_valid_seq(rb) 0
2463 #define prb_next_seq(rb) 0
2464
2465 static u64 syslog_seq;
2466
__pr_flush(struct console * con,int timeout_ms,bool reset_on_progress)2467 static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; }
2468
2469 #endif /* CONFIG_PRINTK */
2470
2471 #ifdef CONFIG_EARLY_PRINTK
2472 struct console *early_console;
2473
early_printk(const char * fmt,...)2474 asmlinkage __visible void early_printk(const char *fmt, ...)
2475 {
2476 va_list ap;
2477 char buf[512];
2478 int n;
2479
2480 if (!early_console)
2481 return;
2482
2483 va_start(ap, fmt);
2484 n = vscnprintf(buf, sizeof(buf), fmt, ap);
2485 va_end(ap);
2486
2487 early_console->write(early_console, buf, n);
2488 }
2489 #endif
2490
set_user_specified(struct console_cmdline * c,bool user_specified)2491 static void set_user_specified(struct console_cmdline *c, bool user_specified)
2492 {
2493 if (!user_specified)
2494 return;
2495
2496 /*
2497 * @c console was defined by the user on the command line.
2498 * Do not clear when added twice also by SPCR or the device tree.
2499 */
2500 c->user_specified = true;
2501 /* At least one console defined by the user on the command line. */
2502 console_set_on_cmdline = 1;
2503 }
2504
__add_preferred_console(const char * name,const short idx,const char * devname,char * options,char * brl_options,bool user_specified)2505 static int __add_preferred_console(const char *name, const short idx,
2506 const char *devname, char *options,
2507 char *brl_options, bool user_specified)
2508 {
2509 struct console_cmdline *c;
2510 int i;
2511
2512 if (!name && !devname)
2513 return -EINVAL;
2514
2515 /*
2516 * We use a signed short index for struct console for device drivers to
2517 * indicate a not yet assigned index or port. However, a negative index
2518 * value is not valid when the console name and index are defined on
2519 * the command line.
2520 */
2521 if (name && idx < 0)
2522 return -EINVAL;
2523
2524 /*
2525 * See if this tty is not yet registered, and
2526 * if we have a slot free.
2527 */
2528 for (i = 0, c = console_cmdline;
2529 i < MAX_CMDLINECONSOLES && (c->name[0] || c->devname[0]);
2530 i++, c++) {
2531 if ((name && strcmp(c->name, name) == 0 && c->index == idx) ||
2532 (devname && strcmp(c->devname, devname) == 0)) {
2533 if (!brl_options)
2534 preferred_console = i;
2535 set_user_specified(c, user_specified);
2536 return 0;
2537 }
2538 }
2539 if (i == MAX_CMDLINECONSOLES)
2540 return -E2BIG;
2541 if (!brl_options)
2542 preferred_console = i;
2543 if (name)
2544 strscpy(c->name, name);
2545 if (devname)
2546 strscpy(c->devname, devname);
2547 c->options = options;
2548 set_user_specified(c, user_specified);
2549 braille_set_options(c, brl_options);
2550
2551 c->index = idx;
2552 return 0;
2553 }
2554
console_msg_format_setup(char * str)2555 static int __init console_msg_format_setup(char *str)
2556 {
2557 if (!strcmp(str, "syslog"))
2558 console_msg_format = MSG_FORMAT_SYSLOG;
2559 if (!strcmp(str, "default"))
2560 console_msg_format = MSG_FORMAT_DEFAULT;
2561 return 1;
2562 }
2563 __setup("console_msg_format=", console_msg_format_setup);
2564
2565 /*
2566 * Set up a console. Called via do_early_param() in init/main.c
2567 * for each "console=" parameter in the boot command line.
2568 */
console_setup(char * str)2569 static int __init console_setup(char *str)
2570 {
2571 static_assert(sizeof(console_cmdline[0].devname) >= sizeof(console_cmdline[0].name) + 4);
2572 char buf[sizeof(console_cmdline[0].devname)];
2573 char *brl_options = NULL;
2574 char *ttyname = NULL;
2575 char *devname = NULL;
2576 char *options;
2577 char *s;
2578 int idx;
2579
2580 /*
2581 * console="" or console=null have been suggested as a way to
2582 * disable console output. Use ttynull that has been created
2583 * for exactly this purpose.
2584 */
2585 if (str[0] == 0 || strcmp(str, "null") == 0) {
2586 __add_preferred_console("ttynull", 0, NULL, NULL, NULL, true);
2587 return 1;
2588 }
2589
2590 if (_braille_console_setup(&str, &brl_options))
2591 return 1;
2592
2593 /* For a DEVNAME:0.0 style console the character device is unknown early */
2594 if (strchr(str, ':'))
2595 devname = buf;
2596 else
2597 ttyname = buf;
2598
2599 /*
2600 * Decode str into name, index, options.
2601 */
2602 if (ttyname && isdigit(str[0]))
2603 scnprintf(buf, sizeof(buf), "ttyS%s", str);
2604 else
2605 strscpy(buf, str);
2606
2607 options = strchr(str, ',');
2608 if (options)
2609 *(options++) = 0;
2610
2611 #ifdef __sparc__
2612 if (!strcmp(str, "ttya"))
2613 strscpy(buf, "ttyS0");
2614 if (!strcmp(str, "ttyb"))
2615 strscpy(buf, "ttyS1");
2616 #endif
2617
2618 for (s = buf; *s; s++)
2619 if ((ttyname && isdigit(*s)) || *s == ',')
2620 break;
2621
2622 /* @idx will get defined when devname matches. */
2623 if (devname)
2624 idx = -1;
2625 else
2626 idx = simple_strtoul(s, NULL, 10);
2627
2628 *s = 0;
2629
2630 __add_preferred_console(ttyname, idx, devname, options, brl_options, true);
2631 return 1;
2632 }
2633 __setup("console=", console_setup);
2634
2635 /**
2636 * add_preferred_console - add a device to the list of preferred consoles.
2637 * @name: device name
2638 * @idx: device index
2639 * @options: options for this console
2640 *
2641 * The last preferred console added will be used for kernel messages
2642 * and stdin/out/err for init. Normally this is used by console_setup
2643 * above to handle user-supplied console arguments; however it can also
2644 * be used by arch-specific code either to override the user or more
2645 * commonly to provide a default console (ie from PROM variables) when
2646 * the user has not supplied one.
2647 */
add_preferred_console(const char * name,const short idx,char * options)2648 int add_preferred_console(const char *name, const short idx, char *options)
2649 {
2650 return __add_preferred_console(name, idx, NULL, options, NULL, false);
2651 }
2652
2653 /**
2654 * match_devname_and_update_preferred_console - Update a preferred console
2655 * when matching devname is found.
2656 * @devname: DEVNAME:0.0 style device name
2657 * @name: Name of the corresponding console driver, e.g. "ttyS"
2658 * @idx: Console index, e.g. port number.
2659 *
2660 * The function checks whether a device with the given @devname is
2661 * preferred via the console=DEVNAME:0.0 command line option.
2662 * It fills the missing console driver name and console index
2663 * so that a later register_console() call could find (match)
2664 * and enable this device.
2665 *
2666 * It might be used when a driver subsystem initializes particular
2667 * devices with already known DEVNAME:0.0 style names. And it
2668 * could predict which console driver name and index this device
2669 * would later get associated with.
2670 *
2671 * Return: 0 on success, negative error code on failure.
2672 */
match_devname_and_update_preferred_console(const char * devname,const char * name,const short idx)2673 int match_devname_and_update_preferred_console(const char *devname,
2674 const char *name,
2675 const short idx)
2676 {
2677 struct console_cmdline *c = console_cmdline;
2678 int i;
2679
2680 if (!devname || !strlen(devname) || !name || !strlen(name) || idx < 0)
2681 return -EINVAL;
2682
2683 for (i = 0; i < MAX_CMDLINECONSOLES && (c->name[0] || c->devname[0]);
2684 i++, c++) {
2685 if (!strcmp(devname, c->devname)) {
2686 pr_info("associate the preferred console \"%s\" with \"%s%d\"\n",
2687 devname, name, idx);
2688 strscpy(c->name, name);
2689 c->index = idx;
2690 return 0;
2691 }
2692 }
2693
2694 return -ENOENT;
2695 }
2696 EXPORT_SYMBOL_GPL(match_devname_and_update_preferred_console);
2697
2698 bool console_suspend_enabled = true;
2699 EXPORT_SYMBOL(console_suspend_enabled);
2700
console_suspend_disable(char * str)2701 static int __init console_suspend_disable(char *str)
2702 {
2703 console_suspend_enabled = false;
2704 return 1;
2705 }
2706 __setup("no_console_suspend", console_suspend_disable);
2707 module_param_named(console_suspend, console_suspend_enabled,
2708 bool, S_IRUGO | S_IWUSR);
2709 MODULE_PARM_DESC(console_suspend, "suspend console during suspend"
2710 " and hibernate operations");
2711
2712 static bool printk_console_no_auto_verbose;
2713
console_verbose(void)2714 void console_verbose(void)
2715 {
2716 if (console_loglevel && !printk_console_no_auto_verbose)
2717 console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH;
2718 }
2719 EXPORT_SYMBOL_GPL(console_verbose);
2720
2721 module_param_named(console_no_auto_verbose, printk_console_no_auto_verbose, bool, 0644);
2722 MODULE_PARM_DESC(console_no_auto_verbose, "Disable console loglevel raise to highest on oops/panic/etc");
2723
2724 /**
2725 * console_suspend_all - suspend the console subsystem
2726 *
2727 * This disables printk() while we go into suspend states
2728 */
console_suspend_all(void)2729 void console_suspend_all(void)
2730 {
2731 struct console *con;
2732
2733 if (!console_suspend_enabled)
2734 return;
2735 pr_info("Suspending console(s) (use no_console_suspend to debug)\n");
2736 pr_flush(1000, true);
2737
2738 console_list_lock();
2739 for_each_console(con)
2740 console_srcu_write_flags(con, con->flags | CON_SUSPENDED);
2741 console_list_unlock();
2742
2743 /*
2744 * Ensure that all SRCU list walks have completed. All printing
2745 * contexts must be able to see that they are suspended so that it
2746 * is guaranteed that all printing has stopped when this function
2747 * completes.
2748 */
2749 synchronize_srcu(&console_srcu);
2750 }
2751
console_resume_all(void)2752 void console_resume_all(void)
2753 {
2754 struct console_flush_type ft;
2755 struct console *con;
2756
2757 if (!console_suspend_enabled)
2758 return;
2759
2760 console_list_lock();
2761 for_each_console(con)
2762 console_srcu_write_flags(con, con->flags & ~CON_SUSPENDED);
2763 console_list_unlock();
2764
2765 /*
2766 * Ensure that all SRCU list walks have completed. All printing
2767 * contexts must be able to see they are no longer suspended so
2768 * that they are guaranteed to wake up and resume printing.
2769 */
2770 synchronize_srcu(&console_srcu);
2771
2772 printk_get_console_flush_type(&ft);
2773 if (ft.nbcon_offload)
2774 nbcon_kthreads_wake();
2775 if (ft.legacy_offload)
2776 defer_console_output();
2777
2778 pr_flush(1000, true);
2779 }
2780
2781 /**
2782 * console_cpu_notify - print deferred console messages after CPU hotplug
2783 * @cpu: unused
2784 *
2785 * If printk() is called from a CPU that is not online yet, the messages
2786 * will be printed on the console only if there are CON_ANYTIME consoles.
2787 * This function is called when a new CPU comes online (or fails to come
2788 * up) or goes offline.
2789 */
console_cpu_notify(unsigned int cpu)2790 static int console_cpu_notify(unsigned int cpu)
2791 {
2792 struct console_flush_type ft;
2793
2794 if (!cpuhp_tasks_frozen) {
2795 printk_get_console_flush_type(&ft);
2796 if (ft.nbcon_atomic)
2797 nbcon_atomic_flush_pending();
2798 if (ft.legacy_direct) {
2799 if (console_trylock())
2800 console_unlock();
2801 }
2802 }
2803 return 0;
2804 }
2805
2806 /**
2807 * console_lock - block the console subsystem from printing
2808 *
2809 * Acquires a lock which guarantees that no consoles will
2810 * be in or enter their write() callback.
2811 *
2812 * Can sleep, returns nothing.
2813 */
console_lock(void)2814 void console_lock(void)
2815 {
2816 might_sleep();
2817
2818 /* On panic, the console_lock must be left to the panic cpu. */
2819 while (panic_on_other_cpu())
2820 msleep(1000);
2821
2822 down_console_sem();
2823 console_locked = 1;
2824 console_may_schedule = 1;
2825 }
2826 EXPORT_SYMBOL(console_lock);
2827
2828 /**
2829 * console_trylock - try to block the console subsystem from printing
2830 *
2831 * Try to acquire a lock which guarantees that no consoles will
2832 * be in or enter their write() callback.
2833 *
2834 * returns 1 on success, and 0 on failure to acquire the lock.
2835 */
console_trylock(void)2836 int console_trylock(void)
2837 {
2838 /* On panic, the console_lock must be left to the panic cpu. */
2839 if (panic_on_other_cpu())
2840 return 0;
2841 if (down_trylock_console_sem())
2842 return 0;
2843 console_locked = 1;
2844 console_may_schedule = 0;
2845 return 1;
2846 }
2847 EXPORT_SYMBOL(console_trylock);
2848
is_console_locked(void)2849 int is_console_locked(void)
2850 {
2851 return console_locked;
2852 }
2853 EXPORT_SYMBOL(is_console_locked);
2854
__console_unlock(void)2855 static void __console_unlock(void)
2856 {
2857 console_locked = 0;
2858 up_console_sem();
2859 }
2860
2861 #ifdef CONFIG_PRINTK
2862
2863 /*
2864 * Prepend the message in @pmsg->pbufs->outbuf. This is achieved by shifting
2865 * the existing message over and inserting the scratchbuf message.
2866 *
2867 * @pmsg is the original printk message.
2868 * @fmt is the printf format of the message which will prepend the existing one.
2869 *
2870 * If there is not enough space in @pmsg->pbufs->outbuf, the existing
2871 * message text will be sufficiently truncated.
2872 *
2873 * If @pmsg->pbufs->outbuf is modified, @pmsg->outbuf_len is updated.
2874 */
2875 __printf(2, 3)
console_prepend_message(struct printk_message * pmsg,const char * fmt,...)2876 static void console_prepend_message(struct printk_message *pmsg, const char *fmt, ...)
2877 {
2878 struct printk_buffers *pbufs = pmsg->pbufs;
2879 const size_t scratchbuf_sz = sizeof(pbufs->scratchbuf);
2880 const size_t outbuf_sz = sizeof(pbufs->outbuf);
2881 char *scratchbuf = &pbufs->scratchbuf[0];
2882 char *outbuf = &pbufs->outbuf[0];
2883 va_list args;
2884 size_t len;
2885
2886 va_start(args, fmt);
2887 len = vscnprintf(scratchbuf, scratchbuf_sz, fmt, args);
2888 va_end(args);
2889
2890 /*
2891 * Make sure outbuf is sufficiently large before prepending.
2892 * Keep at least the prefix when the message must be truncated.
2893 * It is a rather theoretical problem when someone tries to
2894 * use a minimalist buffer.
2895 */
2896 if (WARN_ON_ONCE(len + PRINTK_PREFIX_MAX >= outbuf_sz))
2897 return;
2898
2899 if (pmsg->outbuf_len + len >= outbuf_sz) {
2900 /* Truncate the message, but keep it terminated. */
2901 pmsg->outbuf_len = outbuf_sz - (len + 1);
2902 outbuf[pmsg->outbuf_len] = 0;
2903 }
2904
2905 memmove(outbuf + len, outbuf, pmsg->outbuf_len + 1);
2906 memcpy(outbuf, scratchbuf, len);
2907 pmsg->outbuf_len += len;
2908 }
2909
2910 /*
2911 * Prepend the message in @pmsg->pbufs->outbuf with a "dropped message".
2912 * @pmsg->outbuf_len is updated appropriately.
2913 *
2914 * @pmsg is the printk message to prepend.
2915 *
2916 * @dropped is the dropped count to report in the dropped message.
2917 */
console_prepend_dropped(struct printk_message * pmsg,unsigned long dropped)2918 void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped)
2919 {
2920 console_prepend_message(pmsg, "** %lu printk messages dropped **\n", dropped);
2921 }
2922
2923 /*
2924 * Prepend the message in @pmsg->pbufs->outbuf with a "replay message".
2925 * @pmsg->outbuf_len is updated appropriately.
2926 *
2927 * @pmsg is the printk message to prepend.
2928 */
console_prepend_replay(struct printk_message * pmsg)2929 void console_prepend_replay(struct printk_message *pmsg)
2930 {
2931 console_prepend_message(pmsg, "** replaying previous printk message **\n");
2932 }
2933
2934 /*
2935 * Read and format the specified record (or a later record if the specified
2936 * record is not available).
2937 *
2938 * @pmsg will contain the formatted result. @pmsg->pbufs must point to a
2939 * struct printk_buffers.
2940 *
2941 * @seq is the record to read and format. If it is not available, the next
2942 * valid record is read.
2943 *
2944 * @is_extended specifies if the message should be formatted for extended
2945 * console output.
2946 *
2947 * @may_supress specifies if records may be skipped based on loglevel.
2948 *
2949 * Returns false if no record is available. Otherwise true and all fields
2950 * of @pmsg are valid. (See the documentation of struct printk_message
2951 * for information about the @pmsg fields.)
2952 */
printk_get_next_message(struct printk_message * pmsg,u64 seq,bool is_extended,bool may_suppress)2953 bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
2954 bool is_extended, bool may_suppress)
2955 {
2956 struct printk_buffers *pbufs = pmsg->pbufs;
2957 const size_t scratchbuf_sz = sizeof(pbufs->scratchbuf);
2958 const size_t outbuf_sz = sizeof(pbufs->outbuf);
2959 char *scratchbuf = &pbufs->scratchbuf[0];
2960 char *outbuf = &pbufs->outbuf[0];
2961 struct printk_info info;
2962 struct printk_record r;
2963 size_t len = 0;
2964 bool force_con;
2965
2966 /*
2967 * Formatting extended messages requires a separate buffer, so use the
2968 * scratch buffer to read in the ringbuffer text.
2969 *
2970 * Formatting normal messages is done in-place, so read the ringbuffer
2971 * text directly into the output buffer.
2972 */
2973 if (is_extended)
2974 prb_rec_init_rd(&r, &info, scratchbuf, scratchbuf_sz);
2975 else
2976 prb_rec_init_rd(&r, &info, outbuf, outbuf_sz);
2977
2978 if (!prb_read_valid(prb, seq, &r))
2979 return false;
2980
2981 pmsg->seq = r.info->seq;
2982 pmsg->dropped = r.info->seq - seq;
2983 force_con = r.info->flags & LOG_FORCE_CON;
2984
2985 /*
2986 * Skip records that are not forced to be printed on consoles and that
2987 * has level above the console loglevel.
2988 */
2989 if (!force_con && may_suppress && suppress_message_printing(r.info->level))
2990 goto out;
2991
2992 if (is_extended) {
2993 len = info_print_ext_header(outbuf, outbuf_sz, r.info);
2994 len += msg_print_ext_body(outbuf + len, outbuf_sz - len,
2995 &r.text_buf[0], r.info->text_len, &r.info->dev_info);
2996 } else {
2997 len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time);
2998 }
2999 out:
3000 pmsg->outbuf_len = len;
3001 return true;
3002 }
3003
3004 /*
3005 * Legacy console printing from printk() caller context does not respect
3006 * raw_spinlock/spinlock nesting. For !PREEMPT_RT the lockdep warning is a
3007 * false positive. For PREEMPT_RT the false positive condition does not
3008 * occur.
3009 *
3010 * This map is used to temporarily establish LD_WAIT_SLEEP context for the
3011 * console write() callback when legacy printing to avoid false positive
3012 * lockdep complaints, thus allowing lockdep to continue to function for
3013 * real issues.
3014 */
3015 #ifdef CONFIG_PREEMPT_RT
printk_legacy_allow_spinlock_enter(void)3016 static inline void printk_legacy_allow_spinlock_enter(void) { }
printk_legacy_allow_spinlock_exit(void)3017 static inline void printk_legacy_allow_spinlock_exit(void) { }
3018 #else
3019 static DEFINE_WAIT_OVERRIDE_MAP(printk_legacy_map, LD_WAIT_SLEEP);
3020
printk_legacy_allow_spinlock_enter(void)3021 static inline void printk_legacy_allow_spinlock_enter(void)
3022 {
3023 lock_map_acquire_try(&printk_legacy_map);
3024 }
3025
printk_legacy_allow_spinlock_exit(void)3026 static inline void printk_legacy_allow_spinlock_exit(void)
3027 {
3028 lock_map_release(&printk_legacy_map);
3029 }
3030 #endif /* CONFIG_PREEMPT_RT */
3031
3032 /*
3033 * Used as the printk buffers for non-panic, serialized console printing.
3034 * This is for legacy (!CON_NBCON) as well as all boot (CON_BOOT) consoles.
3035 * Its usage requires the console_lock held.
3036 */
3037 struct printk_buffers printk_shared_pbufs;
3038
3039 /*
3040 * Print one record for the given console. The record printed is whatever
3041 * record is the next available record for the given console.
3042 *
3043 * @handover will be set to true if a printk waiter has taken over the
3044 * console_lock, in which case the caller is no longer holding both the
3045 * console_lock and the SRCU read lock. Otherwise it is set to false.
3046 *
3047 * @cookie is the cookie from the SRCU read lock.
3048 *
3049 * Returns false if the given console has no next record to print, otherwise
3050 * true.
3051 *
3052 * Requires the console_lock and the SRCU read lock.
3053 */
console_emit_next_record(struct console * con,bool * handover,int cookie)3054 static bool console_emit_next_record(struct console *con, bool *handover, int cookie)
3055 {
3056 bool is_extended = console_srcu_read_flags(con) & CON_EXTENDED;
3057 char *outbuf = &printk_shared_pbufs.outbuf[0];
3058 struct printk_message pmsg = {
3059 .pbufs = &printk_shared_pbufs,
3060 };
3061 unsigned long flags;
3062
3063 *handover = false;
3064
3065 if (!printk_get_next_message(&pmsg, con->seq, is_extended, true))
3066 return false;
3067
3068 con->dropped += pmsg.dropped;
3069
3070 /* Skip messages of formatted length 0. */
3071 if (pmsg.outbuf_len == 0) {
3072 con->seq = pmsg.seq + 1;
3073 goto skip;
3074 }
3075
3076 if (con->dropped && !is_extended) {
3077 console_prepend_dropped(&pmsg, con->dropped);
3078 con->dropped = 0;
3079 }
3080
3081 /* Write everything out to the hardware. */
3082
3083 if (force_legacy_kthread() && !panic_in_progress()) {
3084 /*
3085 * With forced threading this function is in a task context
3086 * (either legacy kthread or get_init_console_seq()). There
3087 * is no need for concern about printk reentrance, handovers,
3088 * or lockdep complaints.
3089 */
3090
3091 con->write(con, outbuf, pmsg.outbuf_len);
3092 con->seq = pmsg.seq + 1;
3093 } else {
3094 /*
3095 * While actively printing out messages, if another printk()
3096 * were to occur on another CPU, it may wait for this one to
3097 * finish. This task can not be preempted if there is a
3098 * waiter waiting to take over.
3099 *
3100 * Interrupts are disabled because the hand over to a waiter
3101 * must not be interrupted until the hand over is completed
3102 * (@console_waiter is cleared).
3103 */
3104 printk_safe_enter_irqsave(flags);
3105 console_lock_spinning_enable();
3106
3107 /* Do not trace print latency. */
3108 stop_critical_timings();
3109
3110 printk_legacy_allow_spinlock_enter();
3111 con->write(con, outbuf, pmsg.outbuf_len);
3112 printk_legacy_allow_spinlock_exit();
3113
3114 start_critical_timings();
3115
3116 con->seq = pmsg.seq + 1;
3117
3118 *handover = console_lock_spinning_disable_and_check(cookie);
3119 printk_safe_exit_irqrestore(flags);
3120 }
3121 skip:
3122 return true;
3123 }
3124
3125 #else
3126
console_emit_next_record(struct console * con,bool * handover,int cookie)3127 static bool console_emit_next_record(struct console *con, bool *handover, int cookie)
3128 {
3129 *handover = false;
3130 return false;
3131 }
3132
printk_kthreads_check_locked(void)3133 static inline void printk_kthreads_check_locked(void) { }
3134
3135 #endif /* CONFIG_PRINTK */
3136
3137 /*
3138 * Print out all remaining records to all consoles.
3139 *
3140 * @do_cond_resched is set by the caller. It can be true only in schedulable
3141 * context.
3142 *
3143 * @next_seq is set to the sequence number after the last available record.
3144 * The value is valid only when this function returns true. It means that all
3145 * usable consoles are completely flushed.
3146 *
3147 * @handover will be set to true if a printk waiter has taken over the
3148 * console_lock, in which case the caller is no longer holding the
3149 * console_lock. Otherwise it is set to false.
3150 *
3151 * Returns true when there was at least one usable console and all messages
3152 * were flushed to all usable consoles. A returned false informs the caller
3153 * that everything was not flushed (either there were no usable consoles or
3154 * another context has taken over printing or it is a panic situation and this
3155 * is not the panic CPU). Regardless the reason, the caller should assume it
3156 * is not useful to immediately try again.
3157 *
3158 * Requires the console_lock.
3159 */
console_flush_all(bool do_cond_resched,u64 * next_seq,bool * handover)3160 static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handover)
3161 {
3162 struct console_flush_type ft;
3163 bool any_usable = false;
3164 struct console *con;
3165 bool any_progress;
3166 int cookie;
3167
3168 *next_seq = 0;
3169 *handover = false;
3170
3171 do {
3172 any_progress = false;
3173
3174 printk_get_console_flush_type(&ft);
3175
3176 cookie = console_srcu_read_lock();
3177 for_each_console_srcu(con) {
3178 short flags = console_srcu_read_flags(con);
3179 u64 printk_seq;
3180 bool progress;
3181
3182 /*
3183 * console_flush_all() is only responsible for nbcon
3184 * consoles when the nbcon consoles cannot print via
3185 * their atomic or threaded flushing.
3186 */
3187 if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload))
3188 continue;
3189
3190 if (!console_is_usable(con, flags, !do_cond_resched))
3191 continue;
3192 any_usable = true;
3193
3194 if (flags & CON_NBCON) {
3195 progress = nbcon_legacy_emit_next_record(con, handover, cookie,
3196 !do_cond_resched);
3197 printk_seq = nbcon_seq_read(con);
3198 } else {
3199 progress = console_emit_next_record(con, handover, cookie);
3200 printk_seq = con->seq;
3201 }
3202
3203 /*
3204 * If a handover has occurred, the SRCU read lock
3205 * is already released.
3206 */
3207 if (*handover)
3208 return false;
3209
3210 /* Track the next of the highest seq flushed. */
3211 if (printk_seq > *next_seq)
3212 *next_seq = printk_seq;
3213
3214 if (!progress)
3215 continue;
3216 any_progress = true;
3217
3218 /* Allow panic_cpu to take over the consoles safely. */
3219 if (panic_on_other_cpu())
3220 goto abandon;
3221
3222 if (do_cond_resched)
3223 cond_resched();
3224 }
3225 console_srcu_read_unlock(cookie);
3226 } while (any_progress);
3227
3228 return any_usable;
3229
3230 abandon:
3231 console_srcu_read_unlock(cookie);
3232 return false;
3233 }
3234
__console_flush_and_unlock(void)3235 static void __console_flush_and_unlock(void)
3236 {
3237 bool do_cond_resched;
3238 bool handover;
3239 bool flushed;
3240 u64 next_seq;
3241
3242 /*
3243 * Console drivers are called with interrupts disabled, so
3244 * @console_may_schedule should be cleared before; however, we may
3245 * end up dumping a lot of lines, for example, if called from
3246 * console registration path, and should invoke cond_resched()
3247 * between lines if allowable. Not doing so can cause a very long
3248 * scheduling stall on a slow console leading to RCU stall and
3249 * softlockup warnings which exacerbate the issue with more
3250 * messages practically incapacitating the system. Therefore, create
3251 * a local to use for the printing loop.
3252 */
3253 do_cond_resched = console_may_schedule;
3254
3255 do {
3256 console_may_schedule = 0;
3257
3258 flushed = console_flush_all(do_cond_resched, &next_seq, &handover);
3259 if (!handover)
3260 __console_unlock();
3261
3262 /*
3263 * Abort if there was a failure to flush all messages to all
3264 * usable consoles. Either it is not possible to flush (in
3265 * which case it would be an infinite loop of retrying) or
3266 * another context has taken over printing.
3267 */
3268 if (!flushed)
3269 break;
3270
3271 /*
3272 * Some context may have added new records after
3273 * console_flush_all() but before unlocking the console.
3274 * Re-check if there is a new record to flush. If the trylock
3275 * fails, another context is already handling the printing.
3276 */
3277 } while (prb_read_valid(prb, next_seq, NULL) && console_trylock());
3278 }
3279
3280 /**
3281 * console_unlock - unblock the legacy console subsystem from printing
3282 *
3283 * Releases the console_lock which the caller holds to block printing of
3284 * the legacy console subsystem.
3285 *
3286 * While the console_lock was held, console output may have been buffered
3287 * by printk(). If this is the case, console_unlock() emits the output on
3288 * legacy consoles prior to releasing the lock.
3289 *
3290 * console_unlock(); may be called from any context.
3291 */
console_unlock(void)3292 void console_unlock(void)
3293 {
3294 struct console_flush_type ft;
3295
3296 printk_get_console_flush_type(&ft);
3297 if (ft.legacy_direct)
3298 __console_flush_and_unlock();
3299 else
3300 __console_unlock();
3301 }
3302 EXPORT_SYMBOL(console_unlock);
3303
3304 /**
3305 * console_conditional_schedule - yield the CPU if required
3306 *
3307 * If the console code is currently allowed to sleep, and
3308 * if this CPU should yield the CPU to another task, do
3309 * so here.
3310 *
3311 * Must be called within console_lock();.
3312 */
console_conditional_schedule(void)3313 void __sched console_conditional_schedule(void)
3314 {
3315 if (console_may_schedule)
3316 cond_resched();
3317 }
3318 EXPORT_SYMBOL(console_conditional_schedule);
3319
console_unblank(void)3320 void console_unblank(void)
3321 {
3322 bool found_unblank = false;
3323 struct console *c;
3324 int cookie;
3325
3326 /*
3327 * First check if there are any consoles implementing the unblank()
3328 * callback. If not, there is no reason to continue and take the
3329 * console lock, which in particular can be dangerous if
3330 * @oops_in_progress is set.
3331 */
3332 cookie = console_srcu_read_lock();
3333 for_each_console_srcu(c) {
3334 short flags = console_srcu_read_flags(c);
3335
3336 if (flags & CON_SUSPENDED)
3337 continue;
3338
3339 if ((flags & CON_ENABLED) && c->unblank) {
3340 found_unblank = true;
3341 break;
3342 }
3343 }
3344 console_srcu_read_unlock(cookie);
3345 if (!found_unblank)
3346 return;
3347
3348 /*
3349 * Stop console printing because the unblank() callback may
3350 * assume the console is not within its write() callback.
3351 *
3352 * If @oops_in_progress is set, this may be an atomic context.
3353 * In that case, attempt a trylock as best-effort.
3354 */
3355 if (oops_in_progress) {
3356 /* Semaphores are not NMI-safe. */
3357 if (in_nmi())
3358 return;
3359
3360 /*
3361 * Attempting to trylock the console lock can deadlock
3362 * if another CPU was stopped while modifying the
3363 * semaphore. "Hope and pray" that this is not the
3364 * current situation.
3365 */
3366 if (down_trylock_console_sem() != 0)
3367 return;
3368 } else
3369 console_lock();
3370
3371 console_locked = 1;
3372 console_may_schedule = 0;
3373
3374 cookie = console_srcu_read_lock();
3375 for_each_console_srcu(c) {
3376 short flags = console_srcu_read_flags(c);
3377
3378 if (flags & CON_SUSPENDED)
3379 continue;
3380
3381 if ((flags & CON_ENABLED) && c->unblank)
3382 c->unblank();
3383 }
3384 console_srcu_read_unlock(cookie);
3385
3386 console_unlock();
3387
3388 if (!oops_in_progress)
3389 pr_flush(1000, true);
3390 }
3391
3392 /*
3393 * Rewind all consoles to the oldest available record.
3394 *
3395 * IMPORTANT: The function is safe only when called under
3396 * console_lock(). It is not enforced because
3397 * it is used as a best effort in panic().
3398 */
__console_rewind_all(void)3399 static void __console_rewind_all(void)
3400 {
3401 struct console *c;
3402 short flags;
3403 int cookie;
3404 u64 seq;
3405
3406 seq = prb_first_valid_seq(prb);
3407
3408 cookie = console_srcu_read_lock();
3409 for_each_console_srcu(c) {
3410 flags = console_srcu_read_flags(c);
3411
3412 if (flags & CON_NBCON) {
3413 nbcon_seq_force(c, seq);
3414 } else {
3415 /*
3416 * This assignment is safe only when called under
3417 * console_lock(). On panic, legacy consoles are
3418 * only best effort.
3419 */
3420 c->seq = seq;
3421 }
3422 }
3423 console_srcu_read_unlock(cookie);
3424 }
3425
3426 /**
3427 * console_flush_on_panic - flush console content on panic
3428 * @mode: flush all messages in buffer or just the pending ones
3429 *
3430 * Immediately output all pending messages no matter what.
3431 */
console_flush_on_panic(enum con_flush_mode mode)3432 void console_flush_on_panic(enum con_flush_mode mode)
3433 {
3434 struct console_flush_type ft;
3435 bool handover;
3436 u64 next_seq;
3437
3438 /*
3439 * Ignore the console lock and flush out the messages. Attempting a
3440 * trylock would not be useful because:
3441 *
3442 * - if it is contended, it must be ignored anyway
3443 * - console_lock() and console_trylock() block and fail
3444 * respectively in panic for non-panic CPUs
3445 * - semaphores are not NMI-safe
3446 */
3447
3448 /*
3449 * If another context is holding the console lock,
3450 * @console_may_schedule might be set. Clear it so that
3451 * this context does not call cond_resched() while flushing.
3452 */
3453 console_may_schedule = 0;
3454
3455 if (mode == CONSOLE_REPLAY_ALL)
3456 __console_rewind_all();
3457
3458 printk_get_console_flush_type(&ft);
3459 if (ft.nbcon_atomic)
3460 nbcon_atomic_flush_pending();
3461
3462 /* Flush legacy consoles once allowed, even when dangerous. */
3463 if (legacy_allow_panic_sync)
3464 console_flush_all(false, &next_seq, &handover);
3465 }
3466
3467 /*
3468 * Return the console tty driver structure and its associated index
3469 */
console_device(int * index)3470 struct tty_driver *console_device(int *index)
3471 {
3472 struct console *c;
3473 struct tty_driver *driver = NULL;
3474 int cookie;
3475
3476 /*
3477 * Take console_lock to serialize device() callback with
3478 * other console operations. For example, fg_console is
3479 * modified under console_lock when switching vt.
3480 */
3481 console_lock();
3482
3483 cookie = console_srcu_read_lock();
3484 for_each_console_srcu(c) {
3485 if (!c->device)
3486 continue;
3487 driver = c->device(c, index);
3488 if (driver)
3489 break;
3490 }
3491 console_srcu_read_unlock(cookie);
3492
3493 console_unlock();
3494 return driver;
3495 }
3496
3497 /*
3498 * Prevent further output on the passed console device so that (for example)
3499 * serial drivers can suspend console output before suspending a port, and can
3500 * re-enable output afterwards.
3501 */
console_suspend(struct console * console)3502 void console_suspend(struct console *console)
3503 {
3504 __pr_flush(console, 1000, true);
3505 console_list_lock();
3506 console_srcu_write_flags(console, console->flags & ~CON_ENABLED);
3507 console_list_unlock();
3508
3509 /*
3510 * Ensure that all SRCU list walks have completed. All contexts must
3511 * be able to see that this console is disabled so that (for example)
3512 * the caller can suspend the port without risk of another context
3513 * using the port.
3514 */
3515 synchronize_srcu(&console_srcu);
3516 }
3517 EXPORT_SYMBOL(console_suspend);
3518
console_resume(struct console * console)3519 void console_resume(struct console *console)
3520 {
3521 struct console_flush_type ft;
3522 bool is_nbcon;
3523
3524 console_list_lock();
3525 console_srcu_write_flags(console, console->flags | CON_ENABLED);
3526 is_nbcon = console->flags & CON_NBCON;
3527 console_list_unlock();
3528
3529 /*
3530 * Ensure that all SRCU list walks have completed. The related
3531 * printing context must be able to see it is enabled so that
3532 * it is guaranteed to wake up and resume printing.
3533 */
3534 synchronize_srcu(&console_srcu);
3535
3536 printk_get_console_flush_type(&ft);
3537 if (is_nbcon && ft.nbcon_offload)
3538 nbcon_kthread_wake(console);
3539 else if (ft.legacy_offload)
3540 defer_console_output();
3541
3542 __pr_flush(console, 1000, true);
3543 }
3544 EXPORT_SYMBOL(console_resume);
3545
3546 #ifdef CONFIG_PRINTK
3547 static int unregister_console_locked(struct console *console);
3548
3549 /* True when system boot is far enough to create printer threads. */
3550 bool printk_kthreads_ready __ro_after_init;
3551
3552 static struct task_struct *printk_legacy_kthread;
3553
legacy_kthread_should_wakeup(void)3554 static bool legacy_kthread_should_wakeup(void)
3555 {
3556 struct console_flush_type ft;
3557 struct console *con;
3558 bool ret = false;
3559 int cookie;
3560
3561 if (kthread_should_stop())
3562 return true;
3563
3564 printk_get_console_flush_type(&ft);
3565
3566 cookie = console_srcu_read_lock();
3567 for_each_console_srcu(con) {
3568 short flags = console_srcu_read_flags(con);
3569 u64 printk_seq;
3570
3571 /*
3572 * The legacy printer thread is only responsible for nbcon
3573 * consoles when the nbcon consoles cannot print via their
3574 * atomic or threaded flushing.
3575 */
3576 if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload))
3577 continue;
3578
3579 if (!console_is_usable(con, flags, false))
3580 continue;
3581
3582 if (flags & CON_NBCON) {
3583 printk_seq = nbcon_seq_read(con);
3584 } else {
3585 /*
3586 * It is safe to read @seq because only this
3587 * thread context updates @seq.
3588 */
3589 printk_seq = con->seq;
3590 }
3591
3592 if (prb_read_valid(prb, printk_seq, NULL)) {
3593 ret = true;
3594 break;
3595 }
3596 }
3597 console_srcu_read_unlock(cookie);
3598
3599 return ret;
3600 }
3601
legacy_kthread_func(void * unused)3602 static int legacy_kthread_func(void *unused)
3603 {
3604 for (;;) {
3605 wait_event_interruptible(legacy_wait, legacy_kthread_should_wakeup());
3606
3607 if (kthread_should_stop())
3608 break;
3609
3610 console_lock();
3611 __console_flush_and_unlock();
3612 }
3613
3614 return 0;
3615 }
3616
legacy_kthread_create(void)3617 static bool legacy_kthread_create(void)
3618 {
3619 struct task_struct *kt;
3620
3621 lockdep_assert_console_list_lock_held();
3622
3623 kt = kthread_run(legacy_kthread_func, NULL, "pr/legacy");
3624 if (WARN_ON(IS_ERR(kt))) {
3625 pr_err("failed to start legacy printing thread\n");
3626 return false;
3627 }
3628
3629 printk_legacy_kthread = kt;
3630
3631 /*
3632 * It is important that console printing threads are scheduled
3633 * shortly after a printk call and with generous runtime budgets.
3634 */
3635 sched_set_normal(printk_legacy_kthread, -20);
3636
3637 return true;
3638 }
3639
3640 /**
3641 * printk_kthreads_shutdown - shutdown all threaded printers
3642 *
3643 * On system shutdown all threaded printers are stopped. This allows printk
3644 * to transition back to atomic printing, thus providing a robust mechanism
3645 * for the final shutdown/reboot messages to be output.
3646 */
printk_kthreads_shutdown(void)3647 static void printk_kthreads_shutdown(void)
3648 {
3649 struct console *con;
3650
3651 console_list_lock();
3652 if (printk_kthreads_running) {
3653 printk_kthreads_running = false;
3654
3655 for_each_console(con) {
3656 if (con->flags & CON_NBCON)
3657 nbcon_kthread_stop(con);
3658 }
3659
3660 /*
3661 * The threads may have been stopped while printing a
3662 * backlog. Flush any records left over.
3663 */
3664 nbcon_atomic_flush_pending();
3665 }
3666 console_list_unlock();
3667 }
3668
3669 static struct syscore_ops printk_syscore_ops = {
3670 .shutdown = printk_kthreads_shutdown,
3671 };
3672
3673 /*
3674 * If appropriate, start nbcon kthreads and set @printk_kthreads_running.
3675 * If any kthreads fail to start, those consoles are unregistered.
3676 *
3677 * Must be called under console_list_lock().
3678 */
printk_kthreads_check_locked(void)3679 static void printk_kthreads_check_locked(void)
3680 {
3681 struct hlist_node *tmp;
3682 struct console *con;
3683
3684 lockdep_assert_console_list_lock_held();
3685
3686 if (!printk_kthreads_ready)
3687 return;
3688
3689 /* Start or stop the legacy kthread when needed. */
3690 if (have_legacy_console || have_boot_console) {
3691 if (!printk_legacy_kthread &&
3692 force_legacy_kthread() &&
3693 !legacy_kthread_create()) {
3694 /*
3695 * All legacy consoles must be unregistered. If there
3696 * are any nbcon consoles, they will set up their own
3697 * kthread.
3698 */
3699 hlist_for_each_entry_safe(con, tmp, &console_list, node) {
3700 if (con->flags & CON_NBCON)
3701 continue;
3702
3703 unregister_console_locked(con);
3704 }
3705 }
3706 } else if (printk_legacy_kthread) {
3707 kthread_stop(printk_legacy_kthread);
3708 printk_legacy_kthread = NULL;
3709 }
3710
3711 /*
3712 * Printer threads cannot be started as long as any boot console is
3713 * registered because there is no way to synchronize the hardware
3714 * registers between boot console code and regular console code.
3715 * It can only be known that there will be no new boot consoles when
3716 * an nbcon console is registered.
3717 */
3718 if (have_boot_console || !have_nbcon_console) {
3719 /* Clear flag in case all nbcon consoles unregistered. */
3720 printk_kthreads_running = false;
3721 return;
3722 }
3723
3724 if (printk_kthreads_running)
3725 return;
3726
3727 hlist_for_each_entry_safe(con, tmp, &console_list, node) {
3728 if (!(con->flags & CON_NBCON))
3729 continue;
3730
3731 if (!nbcon_kthread_create(con))
3732 unregister_console_locked(con);
3733 }
3734
3735 printk_kthreads_running = true;
3736 }
3737
printk_set_kthreads_ready(void)3738 static int __init printk_set_kthreads_ready(void)
3739 {
3740 register_syscore_ops(&printk_syscore_ops);
3741
3742 console_list_lock();
3743 printk_kthreads_ready = true;
3744 printk_kthreads_check_locked();
3745 console_list_unlock();
3746
3747 return 0;
3748 }
3749 early_initcall(printk_set_kthreads_ready);
3750 #endif /* CONFIG_PRINTK */
3751
3752 static int __read_mostly keep_bootcon;
3753
keep_bootcon_setup(char * str)3754 static int __init keep_bootcon_setup(char *str)
3755 {
3756 keep_bootcon = 1;
3757 pr_info("debug: skip boot console de-registration.\n");
3758
3759 return 0;
3760 }
3761
3762 early_param("keep_bootcon", keep_bootcon_setup);
3763
console_call_setup(struct console * newcon,char * options)3764 static int console_call_setup(struct console *newcon, char *options)
3765 {
3766 int err;
3767
3768 if (!newcon->setup)
3769 return 0;
3770
3771 /* Synchronize with possible boot console. */
3772 console_lock();
3773 err = newcon->setup(newcon, options);
3774 console_unlock();
3775
3776 return err;
3777 }
3778
3779 /*
3780 * This is called by register_console() to try to match
3781 * the newly registered console with any of the ones selected
3782 * by either the command line or add_preferred_console() and
3783 * setup/enable it.
3784 *
3785 * Care need to be taken with consoles that are statically
3786 * enabled such as netconsole
3787 */
try_enable_preferred_console(struct console * newcon,bool user_specified)3788 static int try_enable_preferred_console(struct console *newcon,
3789 bool user_specified)
3790 {
3791 struct console_cmdline *c;
3792 int i, err;
3793
3794 for (i = 0, c = console_cmdline;
3795 i < MAX_CMDLINECONSOLES && (c->name[0] || c->devname[0]);
3796 i++, c++) {
3797 /* Console not yet initialized? */
3798 if (!c->name[0])
3799 continue;
3800 if (c->user_specified != user_specified)
3801 continue;
3802 if (!newcon->match ||
3803 newcon->match(newcon, c->name, c->index, c->options) != 0) {
3804 /* default matching */
3805 BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name));
3806 if (strcmp(c->name, newcon->name) != 0)
3807 continue;
3808 if (newcon->index >= 0 &&
3809 newcon->index != c->index)
3810 continue;
3811 if (newcon->index < 0)
3812 newcon->index = c->index;
3813
3814 if (_braille_register_console(newcon, c))
3815 return 0;
3816
3817 err = console_call_setup(newcon, c->options);
3818 if (err)
3819 return err;
3820 }
3821 newcon->flags |= CON_ENABLED;
3822 if (i == preferred_console)
3823 newcon->flags |= CON_CONSDEV;
3824 return 0;
3825 }
3826
3827 /*
3828 * Some consoles, such as pstore and netconsole, can be enabled even
3829 * without matching. Accept the pre-enabled consoles only when match()
3830 * and setup() had a chance to be called.
3831 */
3832 if (newcon->flags & CON_ENABLED && c->user_specified == user_specified)
3833 return 0;
3834
3835 return -ENOENT;
3836 }
3837
3838 /* Try to enable the console unconditionally */
try_enable_default_console(struct console * newcon)3839 static void try_enable_default_console(struct console *newcon)
3840 {
3841 if (newcon->index < 0)
3842 newcon->index = 0;
3843
3844 if (console_call_setup(newcon, NULL) != 0)
3845 return;
3846
3847 newcon->flags |= CON_ENABLED;
3848
3849 if (newcon->device)
3850 newcon->flags |= CON_CONSDEV;
3851 }
3852
3853 /* Return the starting sequence number for a newly registered console. */
get_init_console_seq(struct console * newcon,bool bootcon_registered)3854 static u64 get_init_console_seq(struct console *newcon, bool bootcon_registered)
3855 {
3856 struct console *con;
3857 bool handover;
3858 u64 init_seq;
3859
3860 if (newcon->flags & (CON_PRINTBUFFER | CON_BOOT)) {
3861 /* Get a consistent copy of @syslog_seq. */
3862 mutex_lock(&syslog_lock);
3863 init_seq = syslog_seq;
3864 mutex_unlock(&syslog_lock);
3865 } else {
3866 /* Begin with next message added to ringbuffer. */
3867 init_seq = prb_next_seq(prb);
3868
3869 /*
3870 * If any enabled boot consoles are due to be unregistered
3871 * shortly, some may not be caught up and may be the same
3872 * device as @newcon. Since it is not known which boot console
3873 * is the same device, flush all consoles and, if necessary,
3874 * start with the message of the enabled boot console that is
3875 * the furthest behind.
3876 */
3877 if (bootcon_registered && !keep_bootcon) {
3878 /*
3879 * Hold the console_lock to stop console printing and
3880 * guarantee safe access to console->seq.
3881 */
3882 console_lock();
3883
3884 /*
3885 * Flush all consoles and set the console to start at
3886 * the next unprinted sequence number.
3887 */
3888 if (!console_flush_all(true, &init_seq, &handover)) {
3889 /*
3890 * Flushing failed. Just choose the lowest
3891 * sequence of the enabled boot consoles.
3892 */
3893
3894 /*
3895 * If there was a handover, this context no
3896 * longer holds the console_lock.
3897 */
3898 if (handover)
3899 console_lock();
3900
3901 init_seq = prb_next_seq(prb);
3902 for_each_console(con) {
3903 u64 seq;
3904
3905 if (!(con->flags & CON_BOOT) ||
3906 !(con->flags & CON_ENABLED)) {
3907 continue;
3908 }
3909
3910 if (con->flags & CON_NBCON)
3911 seq = nbcon_seq_read(con);
3912 else
3913 seq = con->seq;
3914
3915 if (seq < init_seq)
3916 init_seq = seq;
3917 }
3918 }
3919
3920 console_unlock();
3921 }
3922 }
3923
3924 return init_seq;
3925 }
3926
3927 #define console_first() \
3928 hlist_entry(console_list.first, struct console, node)
3929
3930 static int unregister_console_locked(struct console *console);
3931
3932 /*
3933 * The console driver calls this routine during kernel initialization
3934 * to register the console printing procedure with printk() and to
3935 * print any messages that were printed by the kernel before the
3936 * console driver was initialized.
3937 *
3938 * This can happen pretty early during the boot process (because of
3939 * early_printk) - sometimes before setup_arch() completes - be careful
3940 * of what kernel features are used - they may not be initialised yet.
3941 *
3942 * There are two types of consoles - bootconsoles (early_printk) and
3943 * "real" consoles (everything which is not a bootconsole) which are
3944 * handled differently.
3945 * - Any number of bootconsoles can be registered at any time.
3946 * - As soon as a "real" console is registered, all bootconsoles
3947 * will be unregistered automatically.
3948 * - Once a "real" console is registered, any attempt to register a
3949 * bootconsoles will be rejected
3950 */
register_console(struct console * newcon)3951 void register_console(struct console *newcon)
3952 {
3953 bool use_device_lock = (newcon->flags & CON_NBCON) && newcon->write_atomic;
3954 bool bootcon_registered = false;
3955 bool realcon_registered = false;
3956 struct console *con;
3957 unsigned long flags;
3958 u64 init_seq;
3959 int err;
3960
3961 console_list_lock();
3962
3963 for_each_console(con) {
3964 if (WARN(con == newcon, "console '%s%d' already registered\n",
3965 con->name, con->index)) {
3966 goto unlock;
3967 }
3968
3969 if (con->flags & CON_BOOT)
3970 bootcon_registered = true;
3971 else
3972 realcon_registered = true;
3973 }
3974
3975 /* Do not register boot consoles when there already is a real one. */
3976 if ((newcon->flags & CON_BOOT) && realcon_registered) {
3977 pr_info("Too late to register bootconsole %s%d\n",
3978 newcon->name, newcon->index);
3979 goto unlock;
3980 }
3981
3982 if (newcon->flags & CON_NBCON) {
3983 /*
3984 * Ensure the nbcon console buffers can be allocated
3985 * before modifying any global data.
3986 */
3987 if (!nbcon_alloc(newcon))
3988 goto unlock;
3989 }
3990
3991 /*
3992 * See if we want to enable this console driver by default.
3993 *
3994 * Nope when a console is preferred by the command line, device
3995 * tree, or SPCR.
3996 *
3997 * The first real console with tty binding (driver) wins. More
3998 * consoles might get enabled before the right one is found.
3999 *
4000 * Note that a console with tty binding will have CON_CONSDEV
4001 * flag set and will be first in the list.
4002 */
4003 if (preferred_console < 0) {
4004 if (hlist_empty(&console_list) || !console_first()->device ||
4005 console_first()->flags & CON_BOOT) {
4006 try_enable_default_console(newcon);
4007 }
4008 }
4009
4010 /* See if this console matches one we selected on the command line */
4011 err = try_enable_preferred_console(newcon, true);
4012
4013 /* If not, try to match against the platform default(s) */
4014 if (err == -ENOENT)
4015 err = try_enable_preferred_console(newcon, false);
4016
4017 /* printk() messages are not printed to the Braille console. */
4018 if (err || newcon->flags & CON_BRL) {
4019 if (newcon->flags & CON_NBCON)
4020 nbcon_free(newcon);
4021 goto unlock;
4022 }
4023
4024 /*
4025 * If we have a bootconsole, and are switching to a real console,
4026 * don't print everything out again, since when the boot console, and
4027 * the real console are the same physical device, it's annoying to
4028 * see the beginning boot messages twice
4029 */
4030 if (bootcon_registered &&
4031 ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV)) {
4032 newcon->flags &= ~CON_PRINTBUFFER;
4033 }
4034
4035 newcon->dropped = 0;
4036 init_seq = get_init_console_seq(newcon, bootcon_registered);
4037
4038 if (newcon->flags & CON_NBCON) {
4039 have_nbcon_console = true;
4040 nbcon_seq_force(newcon, init_seq);
4041 } else {
4042 have_legacy_console = true;
4043 newcon->seq = init_seq;
4044 }
4045
4046 if (newcon->flags & CON_BOOT)
4047 have_boot_console = true;
4048
4049 /*
4050 * If another context is actively using the hardware of this new
4051 * console, it will not be aware of the nbcon synchronization. This
4052 * is a risk that two contexts could access the hardware
4053 * simultaneously if this new console is used for atomic printing
4054 * and the other context is still using the hardware.
4055 *
4056 * Use the driver synchronization to ensure that the hardware is not
4057 * in use while this new console transitions to being registered.
4058 */
4059 if (use_device_lock)
4060 newcon->device_lock(newcon, &flags);
4061
4062 /*
4063 * Put this console in the list - keep the
4064 * preferred driver at the head of the list.
4065 */
4066 if (hlist_empty(&console_list)) {
4067 /* Ensure CON_CONSDEV is always set for the head. */
4068 newcon->flags |= CON_CONSDEV;
4069 hlist_add_head_rcu(&newcon->node, &console_list);
4070
4071 } else if (newcon->flags & CON_CONSDEV) {
4072 /* Only the new head can have CON_CONSDEV set. */
4073 console_srcu_write_flags(console_first(), console_first()->flags & ~CON_CONSDEV);
4074 hlist_add_head_rcu(&newcon->node, &console_list);
4075
4076 } else {
4077 hlist_add_behind_rcu(&newcon->node, console_list.first);
4078 }
4079
4080 /*
4081 * No need to synchronize SRCU here! The caller does not rely
4082 * on all contexts being able to see the new console before
4083 * register_console() completes.
4084 */
4085
4086 /* This new console is now registered. */
4087 if (use_device_lock)
4088 newcon->device_unlock(newcon, flags);
4089
4090 console_sysfs_notify();
4091
4092 /*
4093 * By unregistering the bootconsoles after we enable the real console
4094 * we get the "console xxx enabled" message on all the consoles -
4095 * boot consoles, real consoles, etc - this is to ensure that end
4096 * users know there might be something in the kernel's log buffer that
4097 * went to the bootconsole (that they do not see on the real console)
4098 */
4099 con_printk(KERN_INFO, newcon, "enabled\n");
4100 if (bootcon_registered &&
4101 ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV) &&
4102 !keep_bootcon) {
4103 struct hlist_node *tmp;
4104
4105 hlist_for_each_entry_safe(con, tmp, &console_list, node) {
4106 if (con->flags & CON_BOOT)
4107 unregister_console_locked(con);
4108 }
4109 }
4110
4111 /* Changed console list, may require printer threads to start/stop. */
4112 printk_kthreads_check_locked();
4113 unlock:
4114 console_list_unlock();
4115 }
4116 EXPORT_SYMBOL(register_console);
4117
4118 /* Must be called under console_list_lock(). */
unregister_console_locked(struct console * console)4119 static int unregister_console_locked(struct console *console)
4120 {
4121 bool use_device_lock = (console->flags & CON_NBCON) && console->write_atomic;
4122 bool found_legacy_con = false;
4123 bool found_nbcon_con = false;
4124 bool found_boot_con = false;
4125 unsigned long flags;
4126 struct console *c;
4127 int res;
4128
4129 lockdep_assert_console_list_lock_held();
4130
4131 con_printk(KERN_INFO, console, "disabled\n");
4132
4133 res = _braille_unregister_console(console);
4134 if (res < 0)
4135 return res;
4136 if (res > 0)
4137 return 0;
4138
4139 if (!console_is_registered_locked(console))
4140 res = -ENODEV;
4141 else if (console_is_usable(console, console->flags, true))
4142 __pr_flush(console, 1000, true);
4143
4144 /* Disable it unconditionally */
4145 console_srcu_write_flags(console, console->flags & ~CON_ENABLED);
4146
4147 if (res < 0)
4148 return res;
4149
4150 /*
4151 * Use the driver synchronization to ensure that the hardware is not
4152 * in use while this console transitions to being unregistered.
4153 */
4154 if (use_device_lock)
4155 console->device_lock(console, &flags);
4156
4157 hlist_del_init_rcu(&console->node);
4158
4159 if (use_device_lock)
4160 console->device_unlock(console, flags);
4161
4162 /*
4163 * <HISTORICAL>
4164 * If this isn't the last console and it has CON_CONSDEV set, we
4165 * need to set it on the next preferred console.
4166 * </HISTORICAL>
4167 *
4168 * The above makes no sense as there is no guarantee that the next
4169 * console has any device attached. Oh well....
4170 */
4171 if (!hlist_empty(&console_list) && console->flags & CON_CONSDEV)
4172 console_srcu_write_flags(console_first(), console_first()->flags | CON_CONSDEV);
4173
4174 /*
4175 * Ensure that all SRCU list walks have completed. All contexts
4176 * must not be able to see this console in the list so that any
4177 * exit/cleanup routines can be performed safely.
4178 */
4179 synchronize_srcu(&console_srcu);
4180
4181 /*
4182 * With this console gone, the global flags tracking registered
4183 * console types may have changed. Update them.
4184 */
4185 for_each_console(c) {
4186 if (c->flags & CON_BOOT)
4187 found_boot_con = true;
4188
4189 if (c->flags & CON_NBCON)
4190 found_nbcon_con = true;
4191 else
4192 found_legacy_con = true;
4193 }
4194 if (!found_boot_con)
4195 have_boot_console = found_boot_con;
4196 if (!found_legacy_con)
4197 have_legacy_console = found_legacy_con;
4198 if (!found_nbcon_con)
4199 have_nbcon_console = found_nbcon_con;
4200
4201 /* @have_nbcon_console must be updated before calling nbcon_free(). */
4202 if (console->flags & CON_NBCON)
4203 nbcon_free(console);
4204
4205 console_sysfs_notify();
4206
4207 if (console->exit)
4208 res = console->exit(console);
4209
4210 /* Changed console list, may require printer threads to start/stop. */
4211 printk_kthreads_check_locked();
4212
4213 return res;
4214 }
4215
unregister_console(struct console * console)4216 int unregister_console(struct console *console)
4217 {
4218 int res;
4219
4220 console_list_lock();
4221 res = unregister_console_locked(console);
4222 console_list_unlock();
4223 return res;
4224 }
4225 EXPORT_SYMBOL(unregister_console);
4226
4227 /**
4228 * console_force_preferred_locked - force a registered console preferred
4229 * @con: The registered console to force preferred.
4230 *
4231 * Must be called under console_list_lock().
4232 */
console_force_preferred_locked(struct console * con)4233 void console_force_preferred_locked(struct console *con)
4234 {
4235 struct console *cur_pref_con;
4236
4237 if (!console_is_registered_locked(con))
4238 return;
4239
4240 cur_pref_con = console_first();
4241
4242 /* Already preferred? */
4243 if (cur_pref_con == con)
4244 return;
4245
4246 /*
4247 * Delete, but do not re-initialize the entry. This allows the console
4248 * to continue to appear registered (via any hlist_unhashed_lockless()
4249 * checks), even though it was briefly removed from the console list.
4250 */
4251 hlist_del_rcu(&con->node);
4252
4253 /*
4254 * Ensure that all SRCU list walks have completed so that the console
4255 * can be added to the beginning of the console list and its forward
4256 * list pointer can be re-initialized.
4257 */
4258 synchronize_srcu(&console_srcu);
4259
4260 con->flags |= CON_CONSDEV;
4261 WARN_ON(!con->device);
4262
4263 /* Only the new head can have CON_CONSDEV set. */
4264 console_srcu_write_flags(cur_pref_con, cur_pref_con->flags & ~CON_CONSDEV);
4265 hlist_add_head_rcu(&con->node, &console_list);
4266 }
4267 EXPORT_SYMBOL(console_force_preferred_locked);
4268
4269 /*
4270 * Initialize the console device. This is called *early*, so
4271 * we can't necessarily depend on lots of kernel help here.
4272 * Just do some early initializations, and do the complex setup
4273 * later.
4274 */
console_init(void)4275 void __init console_init(void)
4276 {
4277 int ret;
4278 initcall_t call;
4279 initcall_entry_t *ce;
4280
4281 #ifdef CONFIG_NULL_TTY_DEFAULT_CONSOLE
4282 if (!console_set_on_cmdline)
4283 add_preferred_console("ttynull", 0, NULL);
4284 #endif
4285
4286 /* Setup the default TTY line discipline. */
4287 n_tty_init();
4288
4289 /*
4290 * set up the console device so that later boot sequences can
4291 * inform about problems etc..
4292 */
4293 ce = __con_initcall_start;
4294 trace_initcall_level("console");
4295 while (ce < __con_initcall_end) {
4296 call = initcall_from_entry(ce);
4297 trace_initcall_start(call);
4298 ret = call();
4299 trace_initcall_finish(call, ret);
4300 ce++;
4301 }
4302 }
4303
4304 /*
4305 * Some boot consoles access data that is in the init section and which will
4306 * be discarded after the initcalls have been run. To make sure that no code
4307 * will access this data, unregister the boot consoles in a late initcall.
4308 *
4309 * If for some reason, such as deferred probe or the driver being a loadable
4310 * module, the real console hasn't registered yet at this point, there will
4311 * be a brief interval in which no messages are logged to the console, which
4312 * makes it difficult to diagnose problems that occur during this time.
4313 *
4314 * To mitigate this problem somewhat, only unregister consoles whose memory
4315 * intersects with the init section. Note that all other boot consoles will
4316 * get unregistered when the real preferred console is registered.
4317 */
printk_late_init(void)4318 static int __init printk_late_init(void)
4319 {
4320 struct hlist_node *tmp;
4321 struct console *con;
4322 int ret;
4323
4324 console_list_lock();
4325 hlist_for_each_entry_safe(con, tmp, &console_list, node) {
4326 if (!(con->flags & CON_BOOT))
4327 continue;
4328
4329 /* Check addresses that might be used for enabled consoles. */
4330 if (init_section_intersects(con, sizeof(*con)) ||
4331 init_section_contains(con->write, 0) ||
4332 init_section_contains(con->read, 0) ||
4333 init_section_contains(con->device, 0) ||
4334 init_section_contains(con->unblank, 0) ||
4335 init_section_contains(con->data, 0)) {
4336 /*
4337 * Please, consider moving the reported consoles out
4338 * of the init section.
4339 */
4340 pr_warn("bootconsole [%s%d] uses init memory and must be disabled even before the real one is ready\n",
4341 con->name, con->index);
4342 unregister_console_locked(con);
4343 }
4344 }
4345 console_list_unlock();
4346
4347 ret = cpuhp_setup_state_nocalls(CPUHP_PRINTK_DEAD, "printk:dead", NULL,
4348 console_cpu_notify);
4349 WARN_ON(ret < 0);
4350 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "printk:online",
4351 console_cpu_notify, NULL);
4352 WARN_ON(ret < 0);
4353 printk_sysctl_init();
4354 return 0;
4355 }
4356 late_initcall(printk_late_init);
4357
4358 #if defined CONFIG_PRINTK
4359 /* If @con is specified, only wait for that console. Otherwise wait for all. */
__pr_flush(struct console * con,int timeout_ms,bool reset_on_progress)4360 static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress)
4361 {
4362 unsigned long timeout_jiffies = msecs_to_jiffies(timeout_ms);
4363 unsigned long remaining_jiffies = timeout_jiffies;
4364 struct console_flush_type ft;
4365 struct console *c;
4366 u64 last_diff = 0;
4367 u64 printk_seq;
4368 short flags;
4369 int cookie;
4370 u64 diff;
4371 u64 seq;
4372
4373 /* Sorry, pr_flush() will not work this early. */
4374 if (system_state < SYSTEM_SCHEDULING)
4375 return false;
4376
4377 might_sleep();
4378
4379 seq = prb_next_reserve_seq(prb);
4380
4381 /* Flush the consoles so that records up to @seq are printed. */
4382 printk_get_console_flush_type(&ft);
4383 if (ft.nbcon_atomic)
4384 nbcon_atomic_flush_pending();
4385 if (ft.legacy_direct) {
4386 console_lock();
4387 console_unlock();
4388 }
4389
4390 for (;;) {
4391 unsigned long begin_jiffies;
4392 unsigned long slept_jiffies;
4393
4394 diff = 0;
4395
4396 /*
4397 * Hold the console_lock to guarantee safe access to
4398 * console->seq. Releasing console_lock flushes more
4399 * records in case @seq is still not printed on all
4400 * usable consoles.
4401 *
4402 * Holding the console_lock is not necessary if there
4403 * are no legacy or boot consoles. However, such a
4404 * console could register at any time. Always hold the
4405 * console_lock as a precaution rather than
4406 * synchronizing against register_console().
4407 */
4408 console_lock();
4409
4410 cookie = console_srcu_read_lock();
4411 for_each_console_srcu(c) {
4412 if (con && con != c)
4413 continue;
4414
4415 flags = console_srcu_read_flags(c);
4416
4417 /*
4418 * If consoles are not usable, it cannot be expected
4419 * that they make forward progress, so only increment
4420 * @diff for usable consoles.
4421 */
4422 if (!console_is_usable(c, flags, true) &&
4423 !console_is_usable(c, flags, false)) {
4424 continue;
4425 }
4426
4427 if (flags & CON_NBCON) {
4428 printk_seq = nbcon_seq_read(c);
4429 } else {
4430 printk_seq = c->seq;
4431 }
4432
4433 if (printk_seq < seq)
4434 diff += seq - printk_seq;
4435 }
4436 console_srcu_read_unlock(cookie);
4437
4438 if (diff != last_diff && reset_on_progress)
4439 remaining_jiffies = timeout_jiffies;
4440
4441 console_unlock();
4442
4443 /* Note: @diff is 0 if there are no usable consoles. */
4444 if (diff == 0 || remaining_jiffies == 0)
4445 break;
4446
4447 /* msleep(1) might sleep much longer. Check time by jiffies. */
4448 begin_jiffies = jiffies;
4449 msleep(1);
4450 slept_jiffies = jiffies - begin_jiffies;
4451
4452 remaining_jiffies -= min(slept_jiffies, remaining_jiffies);
4453
4454 last_diff = diff;
4455 }
4456
4457 return (diff == 0);
4458 }
4459
4460 /**
4461 * pr_flush() - Wait for printing threads to catch up.
4462 *
4463 * @timeout_ms: The maximum time (in ms) to wait.
4464 * @reset_on_progress: Reset the timeout if forward progress is seen.
4465 *
4466 * A value of 0 for @timeout_ms means no waiting will occur. A value of -1
4467 * represents infinite waiting.
4468 *
4469 * If @reset_on_progress is true, the timeout will be reset whenever any
4470 * printer has been seen to make some forward progress.
4471 *
4472 * Context: Process context. May sleep while acquiring console lock.
4473 * Return: true if all usable printers are caught up.
4474 */
pr_flush(int timeout_ms,bool reset_on_progress)4475 bool pr_flush(int timeout_ms, bool reset_on_progress)
4476 {
4477 return __pr_flush(NULL, timeout_ms, reset_on_progress);
4478 }
4479
4480 /*
4481 * Delayed printk version, for scheduler-internal messages:
4482 */
4483 #define PRINTK_PENDING_WAKEUP 0x01
4484 #define PRINTK_PENDING_OUTPUT 0x02
4485
4486 static DEFINE_PER_CPU(int, printk_pending);
4487
wake_up_klogd_work_func(struct irq_work * irq_work)4488 static void wake_up_klogd_work_func(struct irq_work *irq_work)
4489 {
4490 int pending = this_cpu_xchg(printk_pending, 0);
4491
4492 if (pending & PRINTK_PENDING_OUTPUT) {
4493 if (force_legacy_kthread()) {
4494 if (printk_legacy_kthread)
4495 wake_up_interruptible(&legacy_wait);
4496 } else {
4497 if (console_trylock())
4498 console_unlock();
4499 }
4500 }
4501
4502 if (pending & PRINTK_PENDING_WAKEUP)
4503 wake_up_interruptible(&log_wait);
4504 }
4505
4506 static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) =
4507 IRQ_WORK_INIT_LAZY(wake_up_klogd_work_func);
4508
__wake_up_klogd(int val)4509 static void __wake_up_klogd(int val)
4510 {
4511 if (!printk_percpu_data_ready())
4512 return;
4513
4514 preempt_disable();
4515 /*
4516 * Guarantee any new records can be seen by tasks preparing to wait
4517 * before this context checks if the wait queue is empty.
4518 *
4519 * The full memory barrier within wq_has_sleeper() pairs with the full
4520 * memory barrier within set_current_state() of
4521 * prepare_to_wait_event(), which is called after ___wait_event() adds
4522 * the waiter but before it has checked the wait condition.
4523 *
4524 * This pairs with devkmsg_read:A and syslog_print:A.
4525 */
4526 if (wq_has_sleeper(&log_wait) || /* LMM(__wake_up_klogd:A) */
4527 (val & PRINTK_PENDING_OUTPUT)) {
4528 this_cpu_or(printk_pending, val);
4529 irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
4530 }
4531 preempt_enable();
4532 }
4533
4534 /**
4535 * wake_up_klogd - Wake kernel logging daemon
4536 *
4537 * Use this function when new records have been added to the ringbuffer
4538 * and the console printing of those records has already occurred or is
4539 * known to be handled by some other context. This function will only
4540 * wake the logging daemon.
4541 *
4542 * Context: Any context.
4543 */
wake_up_klogd(void)4544 void wake_up_klogd(void)
4545 {
4546 __wake_up_klogd(PRINTK_PENDING_WAKEUP);
4547 }
4548
4549 /**
4550 * defer_console_output - Wake kernel logging daemon and trigger
4551 * console printing in a deferred context
4552 *
4553 * Use this function when new records have been added to the ringbuffer,
4554 * this context is responsible for console printing those records, but
4555 * the current context is not allowed to perform the console printing.
4556 * Trigger an irq_work context to perform the console printing. This
4557 * function also wakes the logging daemon.
4558 *
4559 * Context: Any context.
4560 */
defer_console_output(void)4561 void defer_console_output(void)
4562 {
4563 /*
4564 * New messages may have been added directly to the ringbuffer
4565 * using vprintk_store(), so wake any waiters as well.
4566 */
4567 __wake_up_klogd(PRINTK_PENDING_WAKEUP | PRINTK_PENDING_OUTPUT);
4568 }
4569
printk_trigger_flush(void)4570 void printk_trigger_flush(void)
4571 {
4572 defer_console_output();
4573 }
4574
vprintk_deferred(const char * fmt,va_list args)4575 int vprintk_deferred(const char *fmt, va_list args)
4576 {
4577 return vprintk_emit(0, LOGLEVEL_SCHED, NULL, fmt, args);
4578 }
4579
_printk_deferred(const char * fmt,...)4580 int _printk_deferred(const char *fmt, ...)
4581 {
4582 va_list args;
4583 int r;
4584
4585 va_start(args, fmt);
4586 r = vprintk_deferred(fmt, args);
4587 va_end(args);
4588
4589 return r;
4590 }
4591
4592 /*
4593 * printk rate limiting, lifted from the networking subsystem.
4594 *
4595 * This enforces a rate limit: not more than 10 kernel messages
4596 * every 5s to make a denial-of-service attack impossible.
4597 */
4598 DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10);
4599
__printk_ratelimit(const char * func)4600 int __printk_ratelimit(const char *func)
4601 {
4602 return ___ratelimit(&printk_ratelimit_state, func);
4603 }
4604 EXPORT_SYMBOL(__printk_ratelimit);
4605
4606 /**
4607 * printk_timed_ratelimit - caller-controlled printk ratelimiting
4608 * @caller_jiffies: pointer to caller's state
4609 * @interval_msecs: minimum interval between prints
4610 *
4611 * printk_timed_ratelimit() returns true if more than @interval_msecs
4612 * milliseconds have elapsed since the last time printk_timed_ratelimit()
4613 * returned true.
4614 */
printk_timed_ratelimit(unsigned long * caller_jiffies,unsigned int interval_msecs)4615 bool printk_timed_ratelimit(unsigned long *caller_jiffies,
4616 unsigned int interval_msecs)
4617 {
4618 unsigned long elapsed = jiffies - *caller_jiffies;
4619
4620 if (*caller_jiffies && elapsed <= msecs_to_jiffies(interval_msecs))
4621 return false;
4622
4623 *caller_jiffies = jiffies;
4624 return true;
4625 }
4626 EXPORT_SYMBOL(printk_timed_ratelimit);
4627
4628 static DEFINE_SPINLOCK(dump_list_lock);
4629 static LIST_HEAD(dump_list);
4630
4631 /**
4632 * kmsg_dump_register - register a kernel log dumper.
4633 * @dumper: pointer to the kmsg_dumper structure
4634 *
4635 * Adds a kernel log dumper to the system. The dump callback in the
4636 * structure will be called when the kernel oopses or panics and must be
4637 * set. Returns zero on success and %-EINVAL or %-EBUSY otherwise.
4638 */
kmsg_dump_register(struct kmsg_dumper * dumper)4639 int kmsg_dump_register(struct kmsg_dumper *dumper)
4640 {
4641 unsigned long flags;
4642 int err = -EBUSY;
4643
4644 /* The dump callback needs to be set */
4645 if (!dumper->dump)
4646 return -EINVAL;
4647
4648 spin_lock_irqsave(&dump_list_lock, flags);
4649 /* Don't allow registering multiple times */
4650 if (!dumper->registered) {
4651 dumper->registered = 1;
4652 list_add_tail_rcu(&dumper->list, &dump_list);
4653 err = 0;
4654 }
4655 spin_unlock_irqrestore(&dump_list_lock, flags);
4656
4657 return err;
4658 }
4659 EXPORT_SYMBOL_GPL(kmsg_dump_register);
4660
4661 /**
4662 * kmsg_dump_unregister - unregister a kmsg dumper.
4663 * @dumper: pointer to the kmsg_dumper structure
4664 *
4665 * Removes a dump device from the system. Returns zero on success and
4666 * %-EINVAL otherwise.
4667 */
kmsg_dump_unregister(struct kmsg_dumper * dumper)4668 int kmsg_dump_unregister(struct kmsg_dumper *dumper)
4669 {
4670 unsigned long flags;
4671 int err = -EINVAL;
4672
4673 spin_lock_irqsave(&dump_list_lock, flags);
4674 if (dumper->registered) {
4675 dumper->registered = 0;
4676 list_del_rcu(&dumper->list);
4677 err = 0;
4678 }
4679 spin_unlock_irqrestore(&dump_list_lock, flags);
4680 synchronize_rcu();
4681
4682 return err;
4683 }
4684 EXPORT_SYMBOL_GPL(kmsg_dump_unregister);
4685
4686 static bool always_kmsg_dump;
4687 module_param_named(always_kmsg_dump, always_kmsg_dump, bool, S_IRUGO | S_IWUSR);
4688
kmsg_dump_reason_str(enum kmsg_dump_reason reason)4689 const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason)
4690 {
4691 switch (reason) {
4692 case KMSG_DUMP_PANIC:
4693 return "Panic";
4694 case KMSG_DUMP_OOPS:
4695 return "Oops";
4696 case KMSG_DUMP_EMERG:
4697 return "Emergency";
4698 case KMSG_DUMP_SHUTDOWN:
4699 return "Shutdown";
4700 default:
4701 return "Unknown";
4702 }
4703 }
4704 EXPORT_SYMBOL_GPL(kmsg_dump_reason_str);
4705
4706 /**
4707 * kmsg_dump_desc - dump kernel log to kernel message dumpers.
4708 * @reason: the reason (oops, panic etc) for dumping
4709 * @desc: a short string to describe what caused the panic or oops. Can be NULL
4710 * if no additional description is available.
4711 *
4712 * Call each of the registered dumper's dump() callback, which can
4713 * retrieve the kmsg records with kmsg_dump_get_line() or
4714 * kmsg_dump_get_buffer().
4715 */
kmsg_dump_desc(enum kmsg_dump_reason reason,const char * desc)4716 void kmsg_dump_desc(enum kmsg_dump_reason reason, const char *desc)
4717 {
4718 struct kmsg_dumper *dumper;
4719 struct kmsg_dump_detail detail = {
4720 .reason = reason,
4721 .description = desc};
4722
4723 rcu_read_lock();
4724 list_for_each_entry_rcu(dumper, &dump_list, list) {
4725 enum kmsg_dump_reason max_reason = dumper->max_reason;
4726
4727 /*
4728 * If client has not provided a specific max_reason, default
4729 * to KMSG_DUMP_OOPS, unless always_kmsg_dump was set.
4730 */
4731 if (max_reason == KMSG_DUMP_UNDEF) {
4732 max_reason = always_kmsg_dump ? KMSG_DUMP_MAX :
4733 KMSG_DUMP_OOPS;
4734 }
4735 if (reason > max_reason)
4736 continue;
4737
4738 /* invoke dumper which will iterate over records */
4739 dumper->dump(dumper, &detail);
4740 }
4741 rcu_read_unlock();
4742 }
4743
4744 /**
4745 * kmsg_dump_get_line - retrieve one kmsg log line
4746 * @iter: kmsg dump iterator
4747 * @syslog: include the "<4>" prefixes
4748 * @line: buffer to copy the line to
4749 * @size: maximum size of the buffer
4750 * @len: length of line placed into buffer
4751 *
4752 * Start at the beginning of the kmsg buffer, with the oldest kmsg
4753 * record, and copy one record into the provided buffer.
4754 *
4755 * Consecutive calls will return the next available record moving
4756 * towards the end of the buffer with the youngest messages.
4757 *
4758 * A return value of FALSE indicates that there are no more records to
4759 * read.
4760 */
kmsg_dump_get_line(struct kmsg_dump_iter * iter,bool syslog,char * line,size_t size,size_t * len)4761 bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog,
4762 char *line, size_t size, size_t *len)
4763 {
4764 u64 min_seq = latched_seq_read_nolock(&clear_seq);
4765 struct printk_info info;
4766 unsigned int line_count;
4767 struct printk_record r;
4768 size_t l = 0;
4769 bool ret = false;
4770
4771 if (iter->cur_seq < min_seq)
4772 iter->cur_seq = min_seq;
4773
4774 prb_rec_init_rd(&r, &info, line, size);
4775
4776 /* Read text or count text lines? */
4777 if (line) {
4778 if (!prb_read_valid(prb, iter->cur_seq, &r))
4779 goto out;
4780 l = record_print_text(&r, syslog, printk_time);
4781 } else {
4782 if (!prb_read_valid_info(prb, iter->cur_seq,
4783 &info, &line_count)) {
4784 goto out;
4785 }
4786 l = get_record_print_text_size(&info, line_count, syslog,
4787 printk_time);
4788
4789 }
4790
4791 iter->cur_seq = r.info->seq + 1;
4792 ret = true;
4793 out:
4794 if (len)
4795 *len = l;
4796 return ret;
4797 }
4798 EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
4799
4800 /**
4801 * kmsg_dump_get_buffer - copy kmsg log lines
4802 * @iter: kmsg dump iterator
4803 * @syslog: include the "<4>" prefixes
4804 * @buf: buffer to copy the line to
4805 * @size: maximum size of the buffer
4806 * @len_out: length of line placed into buffer
4807 *
4808 * Start at the end of the kmsg buffer and fill the provided buffer
4809 * with as many of the *youngest* kmsg records that fit into it.
4810 * If the buffer is large enough, all available kmsg records will be
4811 * copied with a single call.
4812 *
4813 * Consecutive calls will fill the buffer with the next block of
4814 * available older records, not including the earlier retrieved ones.
4815 *
4816 * A return value of FALSE indicates that there are no more records to
4817 * read.
4818 */
kmsg_dump_get_buffer(struct kmsg_dump_iter * iter,bool syslog,char * buf,size_t size,size_t * len_out)4819 bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog,
4820 char *buf, size_t size, size_t *len_out)
4821 {
4822 u64 min_seq = latched_seq_read_nolock(&clear_seq);
4823 struct printk_info info;
4824 struct printk_record r;
4825 u64 seq;
4826 u64 next_seq;
4827 size_t len = 0;
4828 bool ret = false;
4829 bool time = printk_time;
4830
4831 if (!buf || !size)
4832 goto out;
4833
4834 if (iter->cur_seq < min_seq)
4835 iter->cur_seq = min_seq;
4836
4837 if (prb_read_valid_info(prb, iter->cur_seq, &info, NULL)) {
4838 if (info.seq != iter->cur_seq) {
4839 /* messages are gone, move to first available one */
4840 iter->cur_seq = info.seq;
4841 }
4842 }
4843
4844 /* last entry */
4845 if (iter->cur_seq >= iter->next_seq)
4846 goto out;
4847
4848 /*
4849 * Find first record that fits, including all following records,
4850 * into the user-provided buffer for this dump. Pass in size-1
4851 * because this function (by way of record_print_text()) will
4852 * not write more than size-1 bytes of text into @buf.
4853 */
4854 seq = find_first_fitting_seq(iter->cur_seq, iter->next_seq,
4855 size - 1, syslog, time);
4856
4857 /*
4858 * Next kmsg_dump_get_buffer() invocation will dump block of
4859 * older records stored right before this one.
4860 */
4861 next_seq = seq;
4862
4863 prb_rec_init_rd(&r, &info, buf, size);
4864
4865 prb_for_each_record(seq, prb, seq, &r) {
4866 if (r.info->seq >= iter->next_seq)
4867 break;
4868
4869 len += record_print_text(&r, syslog, time);
4870
4871 /* Adjust record to store to remaining buffer space. */
4872 prb_rec_init_rd(&r, &info, buf + len, size - len);
4873 }
4874
4875 iter->next_seq = next_seq;
4876 ret = true;
4877 out:
4878 if (len_out)
4879 *len_out = len;
4880 return ret;
4881 }
4882 EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
4883
4884 /**
4885 * kmsg_dump_rewind - reset the iterator
4886 * @iter: kmsg dump iterator
4887 *
4888 * Reset the dumper's iterator so that kmsg_dump_get_line() and
4889 * kmsg_dump_get_buffer() can be called again and used multiple
4890 * times within the same dumper.dump() callback.
4891 */
kmsg_dump_rewind(struct kmsg_dump_iter * iter)4892 void kmsg_dump_rewind(struct kmsg_dump_iter *iter)
4893 {
4894 iter->cur_seq = latched_seq_read_nolock(&clear_seq);
4895 iter->next_seq = prb_next_seq(prb);
4896 }
4897 EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
4898
4899 /**
4900 * console_try_replay_all - try to replay kernel log on consoles
4901 *
4902 * Try to obtain lock on console subsystem and replay all
4903 * available records in printk buffer on the consoles.
4904 * Does nothing if lock is not obtained.
4905 *
4906 * Context: Any, except for NMI.
4907 */
console_try_replay_all(void)4908 void console_try_replay_all(void)
4909 {
4910 struct console_flush_type ft;
4911
4912 printk_get_console_flush_type(&ft);
4913 if (console_trylock()) {
4914 __console_rewind_all();
4915 if (ft.nbcon_atomic)
4916 nbcon_atomic_flush_pending();
4917 if (ft.nbcon_offload)
4918 nbcon_kthreads_wake();
4919 if (ft.legacy_offload)
4920 defer_console_output();
4921 /* Consoles are flushed as part of console_unlock(). */
4922 console_unlock();
4923 }
4924 }
4925 #endif
4926
4927 #ifdef CONFIG_SMP
4928 static atomic_t printk_cpu_sync_owner = ATOMIC_INIT(-1);
4929 static atomic_t printk_cpu_sync_nested = ATOMIC_INIT(0);
4930
is_printk_cpu_sync_owner(void)4931 bool is_printk_cpu_sync_owner(void)
4932 {
4933 return (atomic_read(&printk_cpu_sync_owner) == raw_smp_processor_id());
4934 }
4935
4936 /**
4937 * __printk_cpu_sync_wait() - Busy wait until the printk cpu-reentrant
4938 * spinning lock is not owned by any CPU.
4939 *
4940 * Context: Any context.
4941 */
__printk_cpu_sync_wait(void)4942 void __printk_cpu_sync_wait(void)
4943 {
4944 do {
4945 cpu_relax();
4946 } while (atomic_read(&printk_cpu_sync_owner) != -1);
4947 }
4948 EXPORT_SYMBOL(__printk_cpu_sync_wait);
4949
4950 /**
4951 * __printk_cpu_sync_try_get() - Try to acquire the printk cpu-reentrant
4952 * spinning lock.
4953 *
4954 * If no processor has the lock, the calling processor takes the lock and
4955 * becomes the owner. If the calling processor is already the owner of the
4956 * lock, this function succeeds immediately.
4957 *
4958 * Context: Any context. Expects interrupts to be disabled.
4959 * Return: 1 on success, otherwise 0.
4960 */
__printk_cpu_sync_try_get(void)4961 int __printk_cpu_sync_try_get(void)
4962 {
4963 int cpu;
4964 int old;
4965
4966 cpu = smp_processor_id();
4967
4968 /*
4969 * Guarantee loads and stores from this CPU when it is the lock owner
4970 * are _not_ visible to the previous lock owner. This pairs with
4971 * __printk_cpu_sync_put:B.
4972 *
4973 * Memory barrier involvement:
4974 *
4975 * If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B,
4976 * then __printk_cpu_sync_put:A can never read from
4977 * __printk_cpu_sync_try_get:B.
4978 *
4979 * Relies on:
4980 *
4981 * RELEASE from __printk_cpu_sync_put:A to __printk_cpu_sync_put:B
4982 * of the previous CPU
4983 * matching
4984 * ACQUIRE from __printk_cpu_sync_try_get:A to
4985 * __printk_cpu_sync_try_get:B of this CPU
4986 */
4987 old = atomic_cmpxchg_acquire(&printk_cpu_sync_owner, -1,
4988 cpu); /* LMM(__printk_cpu_sync_try_get:A) */
4989 if (old == -1) {
4990 /*
4991 * This CPU is now the owner and begins loading/storing
4992 * data: LMM(__printk_cpu_sync_try_get:B)
4993 */
4994 return 1;
4995
4996 } else if (old == cpu) {
4997 /* This CPU is already the owner. */
4998 atomic_inc(&printk_cpu_sync_nested);
4999 return 1;
5000 }
5001
5002 return 0;
5003 }
5004 EXPORT_SYMBOL(__printk_cpu_sync_try_get);
5005
5006 /**
5007 * __printk_cpu_sync_put() - Release the printk cpu-reentrant spinning lock.
5008 *
5009 * The calling processor must be the owner of the lock.
5010 *
5011 * Context: Any context. Expects interrupts to be disabled.
5012 */
__printk_cpu_sync_put(void)5013 void __printk_cpu_sync_put(void)
5014 {
5015 if (atomic_read(&printk_cpu_sync_nested)) {
5016 atomic_dec(&printk_cpu_sync_nested);
5017 return;
5018 }
5019
5020 /*
5021 * This CPU is finished loading/storing data:
5022 * LMM(__printk_cpu_sync_put:A)
5023 */
5024
5025 /*
5026 * Guarantee loads and stores from this CPU when it was the
5027 * lock owner are visible to the next lock owner. This pairs
5028 * with __printk_cpu_sync_try_get:A.
5029 *
5030 * Memory barrier involvement:
5031 *
5032 * If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B,
5033 * then __printk_cpu_sync_try_get:B reads from __printk_cpu_sync_put:A.
5034 *
5035 * Relies on:
5036 *
5037 * RELEASE from __printk_cpu_sync_put:A to __printk_cpu_sync_put:B
5038 * of this CPU
5039 * matching
5040 * ACQUIRE from __printk_cpu_sync_try_get:A to
5041 * __printk_cpu_sync_try_get:B of the next CPU
5042 */
5043 atomic_set_release(&printk_cpu_sync_owner,
5044 -1); /* LMM(__printk_cpu_sync_put:B) */
5045 }
5046 EXPORT_SYMBOL(__printk_cpu_sync_put);
5047 #endif /* CONFIG_SMP */
5048