1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/kernel/printk.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
7 * Modified to make sys_syslog() more flexible: added commands to
8 * return the last 4k of kernel messages, regardless of whether
9 * they've been read or not. Added option to suppress kernel printk's
10 * to the console. Added hook for sending the console messages
11 * elsewhere, in preparation for a serial line console (someday).
12 * Ted Ts'o, 2/11/93.
13 * Modified for sysctl support, 1/8/97, Chris Horn.
14 * Fixed SMP synchronization, 08/08/99, Manfred Spraul
15 * manfred@colorfullife.com
16 * Rewrote bits to get rid of console_lock
17 * 01Mar01 Andrew Morton
18 */
19
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22 #include <linux/kernel.h>
23 #include <linux/mm.h>
24 #include <linux/tty.h>
25 #include <linux/tty_driver.h>
26 #include <linux/console.h>
27 #include <linux/init.h>
28 #include <linux/jiffies.h>
29 #include <linux/nmi.h>
30 #include <linux/module.h>
31 #include <linux/moduleparam.h>
32 #include <linux/delay.h>
33 #include <linux/smp.h>
34 #include <linux/security.h>
35 #include <linux/memblock.h>
36 #include <linux/syscalls.h>
37 #include <linux/syscore_ops.h>
38 #include <linux/vmcore_info.h>
39 #include <linux/ratelimit.h>
40 #include <linux/kmsg_dump.h>
41 #include <linux/syslog.h>
42 #include <linux/cpu.h>
43 #include <linux/rculist.h>
44 #include <linux/poll.h>
45 #include <linux/irq_work.h>
46 #include <linux/ctype.h>
47 #include <linux/uio.h>
48 #include <linux/sched/clock.h>
49 #include <linux/sched/debug.h>
50 #include <linux/sched/task_stack.h>
51 #include <linux/panic.h>
52
53 #include <linux/uaccess.h>
54 #include <asm/sections.h>
55
56 #include <trace/events/initcall.h>
57 #define CREATE_TRACE_POINTS
58 #include <trace/events/printk.h>
59
60 #include "printk_ringbuffer.h"
61 #include "console_cmdline.h"
62 #include "braille.h"
63 #include "internal.h"
64
65 int console_printk[4] = {
66 CONSOLE_LOGLEVEL_DEFAULT, /* console_loglevel */
67 MESSAGE_LOGLEVEL_DEFAULT, /* default_message_loglevel */
68 CONSOLE_LOGLEVEL_MIN, /* minimum_console_loglevel */
69 CONSOLE_LOGLEVEL_DEFAULT, /* default_console_loglevel */
70 };
71 EXPORT_SYMBOL_GPL(console_printk);
72
73 atomic_t ignore_console_lock_warning __read_mostly = ATOMIC_INIT(0);
74 EXPORT_SYMBOL(ignore_console_lock_warning);
75
76 EXPORT_TRACEPOINT_SYMBOL_GPL(console);
77
78 /*
79 * Low level drivers may need that to know if they can schedule in
80 * their unblank() callback or not. So let's export it.
81 */
82 int oops_in_progress;
83 EXPORT_SYMBOL(oops_in_progress);
84
85 /*
86 * console_mutex protects console_list updates and console->flags updates.
87 * The flags are synchronized only for consoles that are registered, i.e.
88 * accessible via the console list.
89 */
90 static DEFINE_MUTEX(console_mutex);
91
92 /*
93 * console_sem protects updates to console->seq
94 * and also provides serialization for console printing.
95 */
96 static DEFINE_SEMAPHORE(console_sem, 1);
97 HLIST_HEAD(console_list);
98 EXPORT_SYMBOL_GPL(console_list);
99 DEFINE_STATIC_SRCU(console_srcu);
100
101 /*
102 * System may need to suppress printk message under certain
103 * circumstances, like after kernel panic happens.
104 */
105 int __read_mostly suppress_printk;
106
107 #ifdef CONFIG_LOCKDEP
108 static struct lockdep_map console_lock_dep_map = {
109 .name = "console_lock"
110 };
111
lockdep_assert_console_list_lock_held(void)112 void lockdep_assert_console_list_lock_held(void)
113 {
114 lockdep_assert_held(&console_mutex);
115 }
116 EXPORT_SYMBOL(lockdep_assert_console_list_lock_held);
117 #endif
118
119 #ifdef CONFIG_DEBUG_LOCK_ALLOC
console_srcu_read_lock_is_held(void)120 bool console_srcu_read_lock_is_held(void)
121 {
122 return srcu_read_lock_held(&console_srcu);
123 }
124 EXPORT_SYMBOL(console_srcu_read_lock_is_held);
125 #endif
126
127 enum devkmsg_log_bits {
128 __DEVKMSG_LOG_BIT_ON = 0,
129 __DEVKMSG_LOG_BIT_OFF,
130 __DEVKMSG_LOG_BIT_LOCK,
131 };
132
133 enum devkmsg_log_masks {
134 DEVKMSG_LOG_MASK_ON = BIT(__DEVKMSG_LOG_BIT_ON),
135 DEVKMSG_LOG_MASK_OFF = BIT(__DEVKMSG_LOG_BIT_OFF),
136 DEVKMSG_LOG_MASK_LOCK = BIT(__DEVKMSG_LOG_BIT_LOCK),
137 };
138
139 /* Keep both the 'on' and 'off' bits clear, i.e. ratelimit by default: */
140 #define DEVKMSG_LOG_MASK_DEFAULT 0
141
142 static unsigned int __read_mostly devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT;
143
__control_devkmsg(char * str)144 static int __control_devkmsg(char *str)
145 {
146 size_t len;
147
148 if (!str)
149 return -EINVAL;
150
151 len = str_has_prefix(str, "on");
152 if (len) {
153 devkmsg_log = DEVKMSG_LOG_MASK_ON;
154 return len;
155 }
156
157 len = str_has_prefix(str, "off");
158 if (len) {
159 devkmsg_log = DEVKMSG_LOG_MASK_OFF;
160 return len;
161 }
162
163 len = str_has_prefix(str, "ratelimit");
164 if (len) {
165 devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT;
166 return len;
167 }
168
169 return -EINVAL;
170 }
171
control_devkmsg(char * str)172 static int __init control_devkmsg(char *str)
173 {
174 if (__control_devkmsg(str) < 0) {
175 pr_warn("printk.devkmsg: bad option string '%s'\n", str);
176 return 1;
177 }
178
179 /*
180 * Set sysctl string accordingly:
181 */
182 if (devkmsg_log == DEVKMSG_LOG_MASK_ON)
183 strscpy(devkmsg_log_str, "on");
184 else if (devkmsg_log == DEVKMSG_LOG_MASK_OFF)
185 strscpy(devkmsg_log_str, "off");
186 /* else "ratelimit" which is set by default. */
187
188 /*
189 * Sysctl cannot change it anymore. The kernel command line setting of
190 * this parameter is to force the setting to be permanent throughout the
191 * runtime of the system. This is a precation measure against userspace
192 * trying to be a smarta** and attempting to change it up on us.
193 */
194 devkmsg_log |= DEVKMSG_LOG_MASK_LOCK;
195
196 return 1;
197 }
198 __setup("printk.devkmsg=", control_devkmsg);
199
200 char devkmsg_log_str[DEVKMSG_STR_MAX_SIZE] = "ratelimit";
201 #if defined(CONFIG_PRINTK) && defined(CONFIG_SYSCTL)
devkmsg_sysctl_set_loglvl(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)202 int devkmsg_sysctl_set_loglvl(const struct ctl_table *table, int write,
203 void *buffer, size_t *lenp, loff_t *ppos)
204 {
205 char old_str[DEVKMSG_STR_MAX_SIZE];
206 unsigned int old;
207 int err;
208
209 if (write) {
210 if (devkmsg_log & DEVKMSG_LOG_MASK_LOCK)
211 return -EINVAL;
212
213 old = devkmsg_log;
214 strscpy(old_str, devkmsg_log_str);
215 }
216
217 err = proc_dostring(table, write, buffer, lenp, ppos);
218 if (err)
219 return err;
220
221 if (write) {
222 err = __control_devkmsg(devkmsg_log_str);
223
224 /*
225 * Do not accept an unknown string OR a known string with
226 * trailing crap...
227 */
228 if (err < 0 || (err + 1 != *lenp)) {
229
230 /* ... and restore old setting. */
231 devkmsg_log = old;
232 strscpy(devkmsg_log_str, old_str);
233
234 return -EINVAL;
235 }
236 }
237
238 return 0;
239 }
240 #endif /* CONFIG_PRINTK && CONFIG_SYSCTL */
241
242 /**
243 * console_list_lock - Lock the console list
244 *
245 * For console list or console->flags updates
246 */
console_list_lock(void)247 void console_list_lock(void)
248 __acquires(&console_mutex)
249 {
250 /*
251 * In unregister_console() and console_force_preferred_locked(),
252 * synchronize_srcu() is called with the console_list_lock held.
253 * Therefore it is not allowed that the console_list_lock is taken
254 * with the srcu_lock held.
255 *
256 * Detecting if this context is really in the read-side critical
257 * section is only possible if the appropriate debug options are
258 * enabled.
259 */
260 WARN_ON_ONCE(debug_lockdep_rcu_enabled() &&
261 srcu_read_lock_held(&console_srcu));
262
263 mutex_lock(&console_mutex);
264 }
265 EXPORT_SYMBOL(console_list_lock);
266
267 /**
268 * console_list_unlock - Unlock the console list
269 *
270 * Counterpart to console_list_lock()
271 */
console_list_unlock(void)272 void console_list_unlock(void)
273 __releases(&console_mutex)
274 {
275 mutex_unlock(&console_mutex);
276 }
277 EXPORT_SYMBOL(console_list_unlock);
278
279 /**
280 * console_srcu_read_lock - Register a new reader for the
281 * SRCU-protected console list
282 *
283 * Use for_each_console_srcu() to iterate the console list
284 *
285 * Context: Any context.
286 * Return: A cookie to pass to console_srcu_read_unlock().
287 */
console_srcu_read_lock(void)288 int console_srcu_read_lock(void)
289 __acquires(&console_srcu)
290 {
291 return srcu_read_lock_nmisafe(&console_srcu);
292 }
293 EXPORT_SYMBOL(console_srcu_read_lock);
294
295 /**
296 * console_srcu_read_unlock - Unregister an old reader from
297 * the SRCU-protected console list
298 * @cookie: cookie returned from console_srcu_read_lock()
299 *
300 * Counterpart to console_srcu_read_lock()
301 */
console_srcu_read_unlock(int cookie)302 void console_srcu_read_unlock(int cookie)
303 __releases(&console_srcu)
304 {
305 srcu_read_unlock_nmisafe(&console_srcu, cookie);
306 }
307 EXPORT_SYMBOL(console_srcu_read_unlock);
308
309 /*
310 * Helper macros to handle lockdep when locking/unlocking console_sem. We use
311 * macros instead of functions so that _RET_IP_ contains useful information.
312 */
313 #define down_console_sem() do { \
314 down(&console_sem);\
315 mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_);\
316 } while (0)
317
__down_trylock_console_sem(unsigned long ip)318 static int __down_trylock_console_sem(unsigned long ip)
319 {
320 int lock_failed;
321 unsigned long flags;
322
323 /*
324 * Here and in __up_console_sem() we need to be in safe mode,
325 * because spindump/WARN/etc from under console ->lock will
326 * deadlock in printk()->down_trylock_console_sem() otherwise.
327 */
328 printk_safe_enter_irqsave(flags);
329 lock_failed = down_trylock(&console_sem);
330 printk_safe_exit_irqrestore(flags);
331
332 if (lock_failed)
333 return 1;
334 mutex_acquire(&console_lock_dep_map, 0, 1, ip);
335 return 0;
336 }
337 #define down_trylock_console_sem() __down_trylock_console_sem(_RET_IP_)
338
__up_console_sem(unsigned long ip)339 static void __up_console_sem(unsigned long ip)
340 {
341 unsigned long flags;
342
343 mutex_release(&console_lock_dep_map, ip);
344
345 printk_safe_enter_irqsave(flags);
346 up(&console_sem);
347 printk_safe_exit_irqrestore(flags);
348 }
349 #define up_console_sem() __up_console_sem(_RET_IP_)
350
351 /*
352 * This is used for debugging the mess that is the VT code by
353 * keeping track if we have the console semaphore held. It's
354 * definitely not the perfect debug tool (we don't know if _WE_
355 * hold it and are racing, but it helps tracking those weird code
356 * paths in the console code where we end up in places I want
357 * locked without the console semaphore held).
358 */
359 static int console_locked;
360
361 /*
362 * Array of consoles built from command line options (console=)
363 */
364
365 #define MAX_CMDLINECONSOLES 8
366
367 static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES];
368
369 static int preferred_console = -1;
370 int console_set_on_cmdline;
371 EXPORT_SYMBOL(console_set_on_cmdline);
372
373 /* Flag: console code may call schedule() */
374 static int console_may_schedule;
375
376 enum con_msg_format_flags {
377 MSG_FORMAT_DEFAULT = 0,
378 MSG_FORMAT_SYSLOG = (1 << 0),
379 };
380
381 static int console_msg_format = MSG_FORMAT_DEFAULT;
382
383 /*
384 * The printk log buffer consists of a sequenced collection of records, each
385 * containing variable length message text. Every record also contains its
386 * own meta-data (@info).
387 *
388 * Every record meta-data carries the timestamp in microseconds, as well as
389 * the standard userspace syslog level and syslog facility. The usual kernel
390 * messages use LOG_KERN; userspace-injected messages always carry a matching
391 * syslog facility, by default LOG_USER. The origin of every message can be
392 * reliably determined that way.
393 *
394 * The human readable log message of a record is available in @text, the
395 * length of the message text in @text_len. The stored message is not
396 * terminated.
397 *
398 * Optionally, a record can carry a dictionary of properties (key/value
399 * pairs), to provide userspace with a machine-readable message context.
400 *
401 * Examples for well-defined, commonly used property names are:
402 * DEVICE=b12:8 device identifier
403 * b12:8 block dev_t
404 * c127:3 char dev_t
405 * n8 netdev ifindex
406 * +sound:card0 subsystem:devname
407 * SUBSYSTEM=pci driver-core subsystem name
408 *
409 * Valid characters in property names are [a-zA-Z0-9.-_]. Property names
410 * and values are terminated by a '\0' character.
411 *
412 * Example of record values:
413 * record.text_buf = "it's a line" (unterminated)
414 * record.info.seq = 56
415 * record.info.ts_nsec = 36863
416 * record.info.text_len = 11
417 * record.info.facility = 0 (LOG_KERN)
418 * record.info.flags = 0
419 * record.info.level = 3 (LOG_ERR)
420 * record.info.caller_id = 299 (task 299)
421 * record.info.dev_info.subsystem = "pci" (terminated)
422 * record.info.dev_info.device = "+pci:0000:00:01.0" (terminated)
423 *
424 * The 'struct printk_info' buffer must never be directly exported to
425 * userspace, it is a kernel-private implementation detail that might
426 * need to be changed in the future, when the requirements change.
427 *
428 * /dev/kmsg exports the structured data in the following line format:
429 * "<level>,<sequnum>,<timestamp>,<contflag>[,additional_values, ... ];<message text>\n"
430 *
431 * Users of the export format should ignore possible additional values
432 * separated by ',', and find the message after the ';' character.
433 *
434 * The optional key/value pairs are attached as continuation lines starting
435 * with a space character and terminated by a newline. All possible
436 * non-prinatable characters are escaped in the "\xff" notation.
437 */
438
439 /* syslog_lock protects syslog_* variables and write access to clear_seq. */
440 static DEFINE_MUTEX(syslog_lock);
441
442 /*
443 * Specifies if a legacy console is registered. If legacy consoles are
444 * present, it is necessary to perform the console lock/unlock dance
445 * whenever console flushing should occur.
446 */
447 bool have_legacy_console;
448
449 /*
450 * Specifies if an nbcon console is registered. If nbcon consoles are present,
451 * synchronous printing of legacy consoles will not occur during panic until
452 * the backtrace has been stored to the ringbuffer.
453 */
454 bool have_nbcon_console;
455
456 /*
457 * Specifies if a boot console is registered. If boot consoles are present,
458 * nbcon consoles cannot print simultaneously and must be synchronized by
459 * the console lock. This is because boot consoles and nbcon consoles may
460 * have mapped the same hardware.
461 */
462 bool have_boot_console;
463
464 /* See printk_legacy_allow_panic_sync() for details. */
465 bool legacy_allow_panic_sync;
466
467 /* Avoid using irq_work when suspending. */
468 bool console_irqwork_blocked;
469
470 #ifdef CONFIG_PRINTK
471 DECLARE_WAIT_QUEUE_HEAD(log_wait);
472 static DECLARE_WAIT_QUEUE_HEAD(legacy_wait);
473 /* All 3 protected by @syslog_lock. */
474 /* the next printk record to read by syslog(READ) or /proc/kmsg */
475 static u64 syslog_seq;
476 static size_t syslog_partial;
477 static bool syslog_time;
478
479 /* True when _all_ printer threads are available for printing. */
480 bool printk_kthreads_running;
481
482 struct latched_seq {
483 seqcount_latch_t latch;
484 u64 val[2];
485 };
486
487 /*
488 * The next printk record to read after the last 'clear' command. There are
489 * two copies (updated with seqcount_latch) so that reads can locklessly
490 * access a valid value. Writers are synchronized by @syslog_lock.
491 */
492 static struct latched_seq clear_seq = {
493 .latch = SEQCNT_LATCH_ZERO(clear_seq.latch),
494 .val[0] = 0,
495 .val[1] = 0,
496 };
497
498 #define LOG_LEVEL(v) ((v) & 0x07)
499 #define LOG_FACILITY(v) ((v) >> 3 & 0xff)
500
501 /* record buffer */
502 #define LOG_ALIGN __alignof__(unsigned long)
503 #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
504 #define LOG_BUF_LEN_MAX ((u32)1 << 31)
505 static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
506 static char *log_buf = __log_buf;
507 static u32 log_buf_len = __LOG_BUF_LEN;
508
509 /*
510 * Define the average message size. This only affects the number of
511 * descriptors that will be available. Underestimating is better than
512 * overestimating (too many available descriptors is better than not enough).
513 */
514 #define PRB_AVGBITS 5 /* 32 character average length */
515
516 #if CONFIG_LOG_BUF_SHIFT <= PRB_AVGBITS
517 #error CONFIG_LOG_BUF_SHIFT value too small.
518 #endif
519 _DEFINE_PRINTKRB(printk_rb_static, CONFIG_LOG_BUF_SHIFT - PRB_AVGBITS,
520 PRB_AVGBITS, &__log_buf[0]);
521
522 static struct printk_ringbuffer printk_rb_dynamic;
523
524 struct printk_ringbuffer *prb = &printk_rb_static;
525
526 /*
527 * We cannot access per-CPU data (e.g. per-CPU flush irq_work) before
528 * per_cpu_areas are initialised. This variable is set to true when
529 * it's safe to access per-CPU data.
530 */
531 static bool __printk_percpu_data_ready __ro_after_init;
532
printk_percpu_data_ready(void)533 bool printk_percpu_data_ready(void)
534 {
535 return __printk_percpu_data_ready;
536 }
537
538 /* Must be called under syslog_lock. */
latched_seq_write(struct latched_seq * ls,u64 val)539 static void latched_seq_write(struct latched_seq *ls, u64 val)
540 {
541 write_seqcount_latch_begin(&ls->latch);
542 ls->val[0] = val;
543 write_seqcount_latch(&ls->latch);
544 ls->val[1] = val;
545 write_seqcount_latch_end(&ls->latch);
546 }
547
548 /* Can be called from any context. */
latched_seq_read_nolock(struct latched_seq * ls)549 static u64 latched_seq_read_nolock(struct latched_seq *ls)
550 {
551 unsigned int seq;
552 unsigned int idx;
553 u64 val;
554
555 do {
556 seq = read_seqcount_latch(&ls->latch);
557 idx = seq & 0x1;
558 val = ls->val[idx];
559 } while (read_seqcount_latch_retry(&ls->latch, seq));
560
561 return val;
562 }
563
564 /* Return log buffer address */
log_buf_addr_get(void)565 char *log_buf_addr_get(void)
566 {
567 return log_buf;
568 }
569
570 /* Return log buffer size */
log_buf_len_get(void)571 u32 log_buf_len_get(void)
572 {
573 return log_buf_len;
574 }
575
576 /*
577 * Define how much of the log buffer we could take at maximum. The value
578 * must be greater than two. Note that only half of the buffer is available
579 * when the index points to the middle.
580 */
581 #define MAX_LOG_TAKE_PART 4
582 static const char trunc_msg[] = "<truncated>";
583
truncate_msg(u16 * text_len,u16 * trunc_msg_len)584 static void truncate_msg(u16 *text_len, u16 *trunc_msg_len)
585 {
586 /*
587 * The message should not take the whole buffer. Otherwise, it might
588 * get removed too soon.
589 */
590 u32 max_text_len = log_buf_len / MAX_LOG_TAKE_PART;
591
592 if (*text_len > max_text_len)
593 *text_len = max_text_len;
594
595 /* enable the warning message (if there is room) */
596 *trunc_msg_len = strlen(trunc_msg);
597 if (*text_len >= *trunc_msg_len)
598 *text_len -= *trunc_msg_len;
599 else
600 *trunc_msg_len = 0;
601 }
602
603 int dmesg_restrict = IS_ENABLED(CONFIG_SECURITY_DMESG_RESTRICT);
604
syslog_action_restricted(int type)605 static int syslog_action_restricted(int type)
606 {
607 if (dmesg_restrict)
608 return 1;
609 /*
610 * Unless restricted, we allow "read all" and "get buffer size"
611 * for everybody.
612 */
613 return type != SYSLOG_ACTION_READ_ALL &&
614 type != SYSLOG_ACTION_SIZE_BUFFER;
615 }
616
check_syslog_permissions(int type,int source)617 static int check_syslog_permissions(int type, int source)
618 {
619 /*
620 * If this is from /proc/kmsg and we've already opened it, then we've
621 * already done the capabilities checks at open time.
622 */
623 if (source == SYSLOG_FROM_PROC && type != SYSLOG_ACTION_OPEN)
624 goto ok;
625
626 if (syslog_action_restricted(type)) {
627 if (capable(CAP_SYSLOG))
628 goto ok;
629 return -EPERM;
630 }
631 ok:
632 return security_syslog(type);
633 }
634
append_char(char ** pp,char * e,char c)635 static void append_char(char **pp, char *e, char c)
636 {
637 if (*pp < e)
638 *(*pp)++ = c;
639 }
640
info_print_ext_header(char * buf,size_t size,struct printk_info * info)641 static ssize_t info_print_ext_header(char *buf, size_t size,
642 struct printk_info *info)
643 {
644 u64 ts_usec = info->ts_nsec;
645 char caller[20];
646 #ifdef CONFIG_PRINTK_CALLER
647 u32 id = info->caller_id;
648
649 snprintf(caller, sizeof(caller), ",caller=%c%u",
650 id & 0x80000000 ? 'C' : 'T', id & ~0x80000000);
651 #else
652 caller[0] = '\0';
653 #endif
654
655 do_div(ts_usec, 1000);
656
657 return scnprintf(buf, size, "%u,%llu,%llu,%c%s;",
658 (info->facility << 3) | info->level, info->seq,
659 ts_usec, info->flags & LOG_CONT ? 'c' : '-', caller);
660 }
661
msg_add_ext_text(char * buf,size_t size,const char * text,size_t text_len,unsigned char endc)662 static ssize_t msg_add_ext_text(char *buf, size_t size,
663 const char *text, size_t text_len,
664 unsigned char endc)
665 {
666 char *p = buf, *e = buf + size;
667 size_t i;
668
669 /* escape non-printable characters */
670 for (i = 0; i < text_len; i++) {
671 unsigned char c = text[i];
672
673 if (c < ' ' || c >= 127 || c == '\\')
674 p += scnprintf(p, e - p, "\\x%02x", c);
675 else
676 append_char(&p, e, c);
677 }
678 append_char(&p, e, endc);
679
680 return p - buf;
681 }
682
msg_add_dict_text(char * buf,size_t size,const char * key,const char * val)683 static ssize_t msg_add_dict_text(char *buf, size_t size,
684 const char *key, const char *val)
685 {
686 size_t val_len = strlen(val);
687 ssize_t len;
688
689 if (!val_len)
690 return 0;
691
692 len = msg_add_ext_text(buf, size, "", 0, ' '); /* dict prefix */
693 len += msg_add_ext_text(buf + len, size - len, key, strlen(key), '=');
694 len += msg_add_ext_text(buf + len, size - len, val, val_len, '\n');
695
696 return len;
697 }
698
msg_print_ext_body(char * buf,size_t size,char * text,size_t text_len,struct dev_printk_info * dev_info)699 static ssize_t msg_print_ext_body(char *buf, size_t size,
700 char *text, size_t text_len,
701 struct dev_printk_info *dev_info)
702 {
703 ssize_t len;
704
705 len = msg_add_ext_text(buf, size, text, text_len, '\n');
706
707 if (!dev_info)
708 goto out;
709
710 len += msg_add_dict_text(buf + len, size - len, "SUBSYSTEM",
711 dev_info->subsystem);
712 len += msg_add_dict_text(buf + len, size - len, "DEVICE",
713 dev_info->device);
714 out:
715 return len;
716 }
717
718 /* /dev/kmsg - userspace message inject/listen interface */
719 struct devkmsg_user {
720 atomic64_t seq;
721 struct ratelimit_state rs;
722 struct mutex lock;
723 struct printk_buffers pbufs;
724 };
725
726 static __printf(3, 4) __cold
devkmsg_emit(int facility,int level,const char * fmt,...)727 int devkmsg_emit(int facility, int level, const char *fmt, ...)
728 {
729 va_list args;
730 int r;
731
732 va_start(args, fmt);
733 r = vprintk_emit(facility, level, NULL, fmt, args);
734 va_end(args);
735
736 return r;
737 }
738
devkmsg_write(struct kiocb * iocb,struct iov_iter * from)739 static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from)
740 {
741 char *buf, *line;
742 int level = default_message_loglevel;
743 int facility = 1; /* LOG_USER */
744 struct file *file = iocb->ki_filp;
745 struct devkmsg_user *user = file->private_data;
746 size_t len = iov_iter_count(from);
747 ssize_t ret = len;
748
749 if (len > PRINTKRB_RECORD_MAX)
750 return -EINVAL;
751
752 /* Ignore when user logging is disabled. */
753 if (devkmsg_log & DEVKMSG_LOG_MASK_OFF)
754 return len;
755
756 /* Ratelimit when not explicitly enabled. */
757 if (!(devkmsg_log & DEVKMSG_LOG_MASK_ON)) {
758 if (!___ratelimit(&user->rs, current->comm))
759 return ret;
760 }
761
762 buf = kmalloc(len+1, GFP_KERNEL);
763 if (buf == NULL)
764 return -ENOMEM;
765
766 buf[len] = '\0';
767 if (!copy_from_iter_full(buf, len, from)) {
768 kfree(buf);
769 return -EFAULT;
770 }
771
772 /*
773 * Extract and skip the syslog prefix <[0-9]*>. Coming from userspace
774 * the decimal value represents 32bit, the lower 3 bit are the log
775 * level, the rest are the log facility.
776 *
777 * If no prefix or no userspace facility is specified, we
778 * enforce LOG_USER, to be able to reliably distinguish
779 * kernel-generated messages from userspace-injected ones.
780 */
781 line = buf;
782 if (line[0] == '<') {
783 char *endp = NULL;
784 unsigned int u;
785
786 u = simple_strtoul(line + 1, &endp, 10);
787 if (endp && endp[0] == '>') {
788 level = LOG_LEVEL(u);
789 if (LOG_FACILITY(u) != 0)
790 facility = LOG_FACILITY(u);
791 endp++;
792 line = endp;
793 }
794 }
795
796 devkmsg_emit(facility, level, "%s", line);
797 kfree(buf);
798 return ret;
799 }
800
devkmsg_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)801 static ssize_t devkmsg_read(struct file *file, char __user *buf,
802 size_t count, loff_t *ppos)
803 {
804 struct devkmsg_user *user = file->private_data;
805 char *outbuf = &user->pbufs.outbuf[0];
806 struct printk_message pmsg = {
807 .pbufs = &user->pbufs,
808 };
809 ssize_t ret;
810
811 ret = mutex_lock_interruptible(&user->lock);
812 if (ret)
813 return ret;
814
815 if (!printk_get_next_message(&pmsg, atomic64_read(&user->seq), true, false)) {
816 if (file->f_flags & O_NONBLOCK) {
817 ret = -EAGAIN;
818 goto out;
819 }
820
821 /*
822 * Guarantee this task is visible on the waitqueue before
823 * checking the wake condition.
824 *
825 * The full memory barrier within set_current_state() of
826 * prepare_to_wait_event() pairs with the full memory barrier
827 * within wq_has_sleeper().
828 *
829 * This pairs with __wake_up_klogd:A.
830 */
831 ret = wait_event_interruptible(log_wait,
832 printk_get_next_message(&pmsg, atomic64_read(&user->seq), true,
833 false)); /* LMM(devkmsg_read:A) */
834 if (ret)
835 goto out;
836 }
837
838 if (pmsg.dropped) {
839 /* our last seen message is gone, return error and reset */
840 atomic64_set(&user->seq, pmsg.seq);
841 ret = -EPIPE;
842 goto out;
843 }
844
845 atomic64_set(&user->seq, pmsg.seq + 1);
846
847 if (pmsg.outbuf_len > count) {
848 ret = -EINVAL;
849 goto out;
850 }
851
852 if (copy_to_user(buf, outbuf, pmsg.outbuf_len)) {
853 ret = -EFAULT;
854 goto out;
855 }
856 ret = pmsg.outbuf_len;
857 out:
858 mutex_unlock(&user->lock);
859 return ret;
860 }
861
862 /*
863 * Be careful when modifying this function!!!
864 *
865 * Only few operations are supported because the device works only with the
866 * entire variable length messages (records). Non-standard values are
867 * returned in the other cases and has been this way for quite some time.
868 * User space applications might depend on this behavior.
869 */
devkmsg_llseek(struct file * file,loff_t offset,int whence)870 static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
871 {
872 struct devkmsg_user *user = file->private_data;
873 loff_t ret = 0;
874
875 if (offset)
876 return -ESPIPE;
877
878 switch (whence) {
879 case SEEK_SET:
880 /* the first record */
881 atomic64_set(&user->seq, prb_first_valid_seq(prb));
882 break;
883 case SEEK_DATA:
884 /*
885 * The first record after the last SYSLOG_ACTION_CLEAR,
886 * like issued by 'dmesg -c'. Reading /dev/kmsg itself
887 * changes no global state, and does not clear anything.
888 */
889 atomic64_set(&user->seq, latched_seq_read_nolock(&clear_seq));
890 break;
891 case SEEK_END:
892 /* after the last record */
893 atomic64_set(&user->seq, prb_next_seq(prb));
894 break;
895 default:
896 ret = -EINVAL;
897 }
898 return ret;
899 }
900
devkmsg_poll(struct file * file,poll_table * wait)901 static __poll_t devkmsg_poll(struct file *file, poll_table *wait)
902 {
903 struct devkmsg_user *user = file->private_data;
904 struct printk_info info;
905 __poll_t ret = 0;
906
907 poll_wait(file, &log_wait, wait);
908
909 if (prb_read_valid_info(prb, atomic64_read(&user->seq), &info, NULL)) {
910 /* return error when data has vanished underneath us */
911 if (info.seq != atomic64_read(&user->seq))
912 ret = EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI;
913 else
914 ret = EPOLLIN|EPOLLRDNORM;
915 }
916
917 return ret;
918 }
919
devkmsg_open(struct inode * inode,struct file * file)920 static int devkmsg_open(struct inode *inode, struct file *file)
921 {
922 struct devkmsg_user *user;
923 int err;
924
925 if (devkmsg_log & DEVKMSG_LOG_MASK_OFF)
926 return -EPERM;
927
928 /* write-only does not need any file context */
929 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
930 err = check_syslog_permissions(SYSLOG_ACTION_READ_ALL,
931 SYSLOG_FROM_READER);
932 if (err)
933 return err;
934 }
935
936 user = kvmalloc_obj(struct devkmsg_user, GFP_KERNEL);
937 if (!user)
938 return -ENOMEM;
939
940 ratelimit_default_init(&user->rs);
941 ratelimit_set_flags(&user->rs, RATELIMIT_MSG_ON_RELEASE);
942
943 mutex_init(&user->lock);
944
945 atomic64_set(&user->seq, prb_first_valid_seq(prb));
946
947 file->private_data = user;
948 return 0;
949 }
950
devkmsg_release(struct inode * inode,struct file * file)951 static int devkmsg_release(struct inode *inode, struct file *file)
952 {
953 struct devkmsg_user *user = file->private_data;
954
955 ratelimit_state_exit(&user->rs);
956
957 mutex_destroy(&user->lock);
958 kvfree(user);
959 return 0;
960 }
961
962 const struct file_operations kmsg_fops = {
963 .open = devkmsg_open,
964 .read = devkmsg_read,
965 .write_iter = devkmsg_write,
966 .llseek = devkmsg_llseek,
967 .poll = devkmsg_poll,
968 .release = devkmsg_release,
969 };
970
971 #ifdef CONFIG_VMCORE_INFO
972 /*
973 * This appends the listed symbols to /proc/vmcore
974 *
975 * /proc/vmcore is used by various utilities, like crash and makedumpfile to
976 * obtain access to symbols that are otherwise very difficult to locate. These
977 * symbols are specifically used so that utilities can access and extract the
978 * dmesg log from a vmcore file after a crash.
979 */
log_buf_vmcoreinfo_setup(void)980 void log_buf_vmcoreinfo_setup(void)
981 {
982 struct dev_printk_info *dev_info = NULL;
983
984 VMCOREINFO_SYMBOL(prb);
985 VMCOREINFO_SYMBOL(printk_rb_static);
986 VMCOREINFO_SYMBOL(clear_seq);
987
988 /*
989 * Export struct size and field offsets. User space tools can
990 * parse it and detect any changes to structure down the line.
991 */
992
993 VMCOREINFO_STRUCT_SIZE(printk_ringbuffer);
994 VMCOREINFO_OFFSET(printk_ringbuffer, desc_ring);
995 VMCOREINFO_OFFSET(printk_ringbuffer, text_data_ring);
996 VMCOREINFO_OFFSET(printk_ringbuffer, fail);
997
998 VMCOREINFO_STRUCT_SIZE(prb_desc_ring);
999 VMCOREINFO_OFFSET(prb_desc_ring, count_bits);
1000 VMCOREINFO_OFFSET(prb_desc_ring, descs);
1001 VMCOREINFO_OFFSET(prb_desc_ring, infos);
1002 VMCOREINFO_OFFSET(prb_desc_ring, head_id);
1003 VMCOREINFO_OFFSET(prb_desc_ring, tail_id);
1004
1005 VMCOREINFO_STRUCT_SIZE(prb_desc);
1006 VMCOREINFO_OFFSET(prb_desc, state_var);
1007 VMCOREINFO_OFFSET(prb_desc, text_blk_lpos);
1008
1009 VMCOREINFO_STRUCT_SIZE(prb_data_blk_lpos);
1010 VMCOREINFO_OFFSET(prb_data_blk_lpos, begin);
1011 VMCOREINFO_OFFSET(prb_data_blk_lpos, next);
1012
1013 VMCOREINFO_STRUCT_SIZE(printk_info);
1014 VMCOREINFO_OFFSET(printk_info, seq);
1015 VMCOREINFO_OFFSET(printk_info, ts_nsec);
1016 VMCOREINFO_OFFSET(printk_info, text_len);
1017 VMCOREINFO_OFFSET(printk_info, caller_id);
1018 VMCOREINFO_OFFSET(printk_info, dev_info);
1019
1020 VMCOREINFO_STRUCT_SIZE(dev_printk_info);
1021 VMCOREINFO_OFFSET(dev_printk_info, subsystem);
1022 VMCOREINFO_LENGTH(printk_info_subsystem, sizeof(dev_info->subsystem));
1023 VMCOREINFO_OFFSET(dev_printk_info, device);
1024 VMCOREINFO_LENGTH(printk_info_device, sizeof(dev_info->device));
1025
1026 VMCOREINFO_STRUCT_SIZE(prb_data_ring);
1027 VMCOREINFO_OFFSET(prb_data_ring, size_bits);
1028 VMCOREINFO_OFFSET(prb_data_ring, data);
1029 VMCOREINFO_OFFSET(prb_data_ring, head_lpos);
1030 VMCOREINFO_OFFSET(prb_data_ring, tail_lpos);
1031
1032 VMCOREINFO_SIZE(atomic_long_t);
1033 VMCOREINFO_TYPE_OFFSET(atomic_long_t, counter);
1034
1035 VMCOREINFO_STRUCT_SIZE(latched_seq);
1036 VMCOREINFO_OFFSET(latched_seq, val);
1037 }
1038 #endif
1039
1040 /* requested log_buf_len from kernel cmdline */
1041 static unsigned long __initdata new_log_buf_len;
1042
1043 /* we practice scaling the ring buffer by powers of 2 */
log_buf_len_update(u64 size)1044 static void __init log_buf_len_update(u64 size)
1045 {
1046 if (size > (u64)LOG_BUF_LEN_MAX) {
1047 size = (u64)LOG_BUF_LEN_MAX;
1048 pr_err("log_buf over 2G is not supported.\n");
1049 }
1050
1051 if (size)
1052 size = roundup_pow_of_two(size);
1053 if (size > log_buf_len)
1054 new_log_buf_len = (unsigned long)size;
1055 }
1056
1057 /* save requested log_buf_len since it's too early to process it */
log_buf_len_setup(char * str)1058 static int __init log_buf_len_setup(char *str)
1059 {
1060 u64 size;
1061
1062 if (!str)
1063 return -EINVAL;
1064
1065 size = memparse(str, &str);
1066
1067 log_buf_len_update(size);
1068
1069 return 0;
1070 }
1071 early_param("log_buf_len", log_buf_len_setup);
1072
1073 #ifdef CONFIG_SMP
1074 #define __LOG_CPU_MAX_BUF_LEN (1 << CONFIG_LOG_CPU_MAX_BUF_SHIFT)
1075
log_buf_add_cpu(void)1076 static void __init log_buf_add_cpu(void)
1077 {
1078 unsigned int cpu_extra;
1079
1080 /*
1081 * archs should set up cpu_possible_bits properly with
1082 * set_cpu_possible() after setup_arch() but just in
1083 * case lets ensure this is valid.
1084 */
1085 if (num_possible_cpus() == 1)
1086 return;
1087
1088 cpu_extra = (num_possible_cpus() - 1) * __LOG_CPU_MAX_BUF_LEN;
1089
1090 /* by default this will only continue through for large > 64 CPUs */
1091 if (cpu_extra <= __LOG_BUF_LEN / 2)
1092 return;
1093
1094 pr_info("log_buf_len individual max cpu contribution: %d bytes\n",
1095 __LOG_CPU_MAX_BUF_LEN);
1096 pr_info("log_buf_len total cpu_extra contributions: %d bytes\n",
1097 cpu_extra);
1098 pr_info("log_buf_len min size: %d bytes\n", __LOG_BUF_LEN);
1099
1100 log_buf_len_update(cpu_extra + __LOG_BUF_LEN);
1101 }
1102 #else /* !CONFIG_SMP */
log_buf_add_cpu(void)1103 static inline void log_buf_add_cpu(void) {}
1104 #endif /* CONFIG_SMP */
1105
set_percpu_data_ready(void)1106 static void __init set_percpu_data_ready(void)
1107 {
1108 __printk_percpu_data_ready = true;
1109 }
1110
add_to_rb(struct printk_ringbuffer * rb,struct printk_record * r)1111 static unsigned int __init add_to_rb(struct printk_ringbuffer *rb,
1112 struct printk_record *r)
1113 {
1114 struct prb_reserved_entry e;
1115 struct printk_record dest_r;
1116
1117 prb_rec_init_wr(&dest_r, r->info->text_len);
1118
1119 if (!prb_reserve(&e, rb, &dest_r))
1120 return 0;
1121
1122 memcpy(&dest_r.text_buf[0], &r->text_buf[0], r->info->text_len);
1123 dest_r.info->text_len = r->info->text_len;
1124 dest_r.info->facility = r->info->facility;
1125 dest_r.info->level = r->info->level;
1126 dest_r.info->flags = r->info->flags;
1127 dest_r.info->ts_nsec = r->info->ts_nsec;
1128 dest_r.info->caller_id = r->info->caller_id;
1129 memcpy(&dest_r.info->dev_info, &r->info->dev_info, sizeof(dest_r.info->dev_info));
1130
1131 prb_final_commit(&e);
1132
1133 return prb_record_text_space(&e);
1134 }
1135
1136 static char setup_text_buf[PRINTKRB_RECORD_MAX] __initdata;
1137
print_log_buf_usage_stats(void)1138 static void print_log_buf_usage_stats(void)
1139 {
1140 unsigned int descs_count = log_buf_len >> PRB_AVGBITS;
1141 size_t meta_data_size;
1142
1143 meta_data_size = descs_count * (sizeof(struct prb_desc) + sizeof(struct printk_info));
1144
1145 pr_info("log buffer data + meta data: %u + %zu = %zu bytes\n",
1146 log_buf_len, meta_data_size, log_buf_len + meta_data_size);
1147 }
1148
setup_log_buf(int early)1149 void __init setup_log_buf(int early)
1150 {
1151 struct printk_info *new_infos;
1152 unsigned int new_descs_count;
1153 struct prb_desc *new_descs;
1154 struct printk_info info;
1155 struct printk_record r;
1156 unsigned int text_size;
1157 size_t new_descs_size;
1158 size_t new_infos_size;
1159 unsigned long flags;
1160 char *new_log_buf;
1161 unsigned int free;
1162 u64 seq;
1163
1164 /*
1165 * Some archs call setup_log_buf() multiple times - first is very
1166 * early, e.g. from setup_arch(), and second - when percpu_areas
1167 * are initialised.
1168 */
1169 if (!early)
1170 set_percpu_data_ready();
1171
1172 if (log_buf != __log_buf)
1173 return;
1174
1175 if (!early && !new_log_buf_len)
1176 log_buf_add_cpu();
1177
1178 if (!new_log_buf_len) {
1179 /* Show the memory stats only once. */
1180 if (!early)
1181 goto out;
1182
1183 return;
1184 }
1185
1186 new_descs_count = new_log_buf_len >> PRB_AVGBITS;
1187 if (new_descs_count == 0) {
1188 pr_err("new_log_buf_len: %lu too small\n", new_log_buf_len);
1189 goto out;
1190 }
1191
1192 new_log_buf = memblock_alloc(new_log_buf_len, LOG_ALIGN);
1193 if (unlikely(!new_log_buf)) {
1194 pr_err("log_buf_len: %lu text bytes not available\n",
1195 new_log_buf_len);
1196 goto out;
1197 }
1198
1199 new_descs_size = new_descs_count * sizeof(struct prb_desc);
1200 new_descs = memblock_alloc(new_descs_size, LOG_ALIGN);
1201 if (unlikely(!new_descs)) {
1202 pr_err("log_buf_len: %zu desc bytes not available\n",
1203 new_descs_size);
1204 goto err_free_log_buf;
1205 }
1206
1207 new_infos_size = new_descs_count * sizeof(struct printk_info);
1208 new_infos = memblock_alloc(new_infos_size, LOG_ALIGN);
1209 if (unlikely(!new_infos)) {
1210 pr_err("log_buf_len: %zu info bytes not available\n",
1211 new_infos_size);
1212 goto err_free_descs;
1213 }
1214
1215 prb_rec_init_rd(&r, &info, &setup_text_buf[0], sizeof(setup_text_buf));
1216
1217 prb_init(&printk_rb_dynamic,
1218 new_log_buf, ilog2(new_log_buf_len),
1219 new_descs, ilog2(new_descs_count),
1220 new_infos);
1221
1222 local_irq_save(flags);
1223
1224 log_buf_len = new_log_buf_len;
1225 log_buf = new_log_buf;
1226 new_log_buf_len = 0;
1227
1228 free = __LOG_BUF_LEN;
1229 prb_for_each_record(0, &printk_rb_static, seq, &r) {
1230 text_size = add_to_rb(&printk_rb_dynamic, &r);
1231 if (text_size > free)
1232 free = 0;
1233 else
1234 free -= text_size;
1235 }
1236
1237 prb = &printk_rb_dynamic;
1238
1239 local_irq_restore(flags);
1240
1241 /*
1242 * Copy any remaining messages that might have appeared from
1243 * NMI context after copying but before switching to the
1244 * dynamic buffer.
1245 */
1246 prb_for_each_record(seq, &printk_rb_static, seq, &r) {
1247 text_size = add_to_rb(&printk_rb_dynamic, &r);
1248 if (text_size > free)
1249 free = 0;
1250 else
1251 free -= text_size;
1252 }
1253
1254 if (seq != prb_next_seq(&printk_rb_static)) {
1255 pr_err("dropped %llu messages\n",
1256 prb_next_seq(&printk_rb_static) - seq);
1257 }
1258
1259 print_log_buf_usage_stats();
1260 pr_info("early log buf free: %u(%u%%)\n",
1261 free, (free * 100) / __LOG_BUF_LEN);
1262 return;
1263
1264 err_free_descs:
1265 memblock_free(new_descs, new_descs_size);
1266 err_free_log_buf:
1267 memblock_free(new_log_buf, new_log_buf_len);
1268 out:
1269 print_log_buf_usage_stats();
1270 }
1271
1272 static bool __read_mostly ignore_loglevel;
1273
ignore_loglevel_setup(char * str)1274 static int __init ignore_loglevel_setup(char *str)
1275 {
1276 ignore_loglevel = true;
1277 pr_info("debug: ignoring loglevel setting.\n");
1278
1279 return 0;
1280 }
1281
1282 early_param("ignore_loglevel", ignore_loglevel_setup);
1283 module_param(ignore_loglevel, bool, S_IRUGO | S_IWUSR);
1284 MODULE_PARM_DESC(ignore_loglevel,
1285 "ignore loglevel setting (prints all kernel messages to the console)");
1286
suppress_message_printing(int level)1287 static bool suppress_message_printing(int level)
1288 {
1289 return (level >= console_loglevel && !ignore_loglevel);
1290 }
1291
1292 #ifdef CONFIG_BOOT_PRINTK_DELAY
1293
1294 static int boot_delay; /* msecs delay after each printk during bootup */
1295 static unsigned long long loops_per_msec; /* based on boot_delay */
1296
boot_delay_setup(char * str)1297 static int __init boot_delay_setup(char *str)
1298 {
1299 unsigned long lpj;
1300
1301 lpj = preset_lpj ? preset_lpj : 1000000; /* some guess */
1302 loops_per_msec = (unsigned long long)lpj / 1000 * HZ;
1303
1304 get_option(&str, &boot_delay);
1305 if (boot_delay > 10 * 1000)
1306 boot_delay = 0;
1307
1308 pr_debug("boot_delay: %u, preset_lpj: %ld, lpj: %lu, "
1309 "HZ: %d, loops_per_msec: %llu\n",
1310 boot_delay, preset_lpj, lpj, HZ, loops_per_msec);
1311 return 0;
1312 }
1313 early_param("boot_delay", boot_delay_setup);
1314
boot_delay_msec(int level)1315 static void boot_delay_msec(int level)
1316 {
1317 unsigned long long k;
1318 unsigned long timeout;
1319 bool suppress = !is_printk_force_console() &&
1320 suppress_message_printing(level);
1321
1322 if ((boot_delay == 0 || system_state >= SYSTEM_RUNNING) || suppress)
1323 return;
1324
1325 k = (unsigned long long)loops_per_msec * boot_delay;
1326
1327 timeout = jiffies + msecs_to_jiffies(boot_delay);
1328 while (k) {
1329 k--;
1330 cpu_relax();
1331 /*
1332 * use (volatile) jiffies to prevent
1333 * compiler reduction; loop termination via jiffies
1334 * is secondary and may or may not happen.
1335 */
1336 if (time_after(jiffies, timeout))
1337 break;
1338 touch_nmi_watchdog();
1339 }
1340 }
1341 #else
boot_delay_msec(int level)1342 static inline void boot_delay_msec(int level)
1343 {
1344 }
1345 #endif
1346
1347 static bool printk_time = IS_ENABLED(CONFIG_PRINTK_TIME);
1348 module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
1349
print_syslog(unsigned int level,char * buf)1350 static size_t print_syslog(unsigned int level, char *buf)
1351 {
1352 return sprintf(buf, "<%u>", level);
1353 }
1354
print_time(u64 ts,char * buf)1355 static size_t print_time(u64 ts, char *buf)
1356 {
1357 unsigned long rem_nsec = do_div(ts, 1000000000);
1358
1359 return sprintf(buf, "[%5lu.%06lu]",
1360 (unsigned long)ts, rem_nsec / 1000);
1361 }
1362
1363 #ifdef CONFIG_PRINTK_CALLER
print_caller(u32 id,char * buf)1364 static size_t print_caller(u32 id, char *buf)
1365 {
1366 char caller[12];
1367
1368 snprintf(caller, sizeof(caller), "%c%u",
1369 id & 0x80000000 ? 'C' : 'T', id & ~0x80000000);
1370 return sprintf(buf, "[%6s]", caller);
1371 }
1372 #else
1373 #define print_caller(id, buf) 0
1374 #endif
1375
info_print_prefix(const struct printk_info * info,bool syslog,bool time,char * buf)1376 static size_t info_print_prefix(const struct printk_info *info, bool syslog,
1377 bool time, char *buf)
1378 {
1379 size_t len = 0;
1380
1381 if (syslog)
1382 len = print_syslog((info->facility << 3) | info->level, buf);
1383
1384 if (time)
1385 len += print_time(info->ts_nsec, buf + len);
1386
1387 len += print_caller(info->caller_id, buf + len);
1388
1389 if (IS_ENABLED(CONFIG_PRINTK_CALLER) || time) {
1390 buf[len++] = ' ';
1391 buf[len] = '\0';
1392 }
1393
1394 return len;
1395 }
1396
1397 /*
1398 * Prepare the record for printing. The text is shifted within the given
1399 * buffer to avoid a need for another one. The following operations are
1400 * done:
1401 *
1402 * - Add prefix for each line.
1403 * - Drop truncated lines that no longer fit into the buffer.
1404 * - Add the trailing newline that has been removed in vprintk_store().
1405 * - Add a string terminator.
1406 *
1407 * Since the produced string is always terminated, the maximum possible
1408 * return value is @r->text_buf_size - 1;
1409 *
1410 * Return: The length of the updated/prepared text, including the added
1411 * prefixes and the newline. The terminator is not counted. The dropped
1412 * line(s) are not counted.
1413 */
record_print_text(struct printk_record * r,bool syslog,bool time)1414 static size_t record_print_text(struct printk_record *r, bool syslog,
1415 bool time)
1416 {
1417 size_t text_len = r->info->text_len;
1418 size_t buf_size = r->text_buf_size;
1419 char *text = r->text_buf;
1420 char prefix[PRINTK_PREFIX_MAX];
1421 bool truncated = false;
1422 size_t prefix_len;
1423 size_t line_len;
1424 size_t len = 0;
1425 char *next;
1426
1427 /*
1428 * If the message was truncated because the buffer was not large
1429 * enough, treat the available text as if it were the full text.
1430 */
1431 if (text_len > buf_size)
1432 text_len = buf_size;
1433
1434 prefix_len = info_print_prefix(r->info, syslog, time, prefix);
1435
1436 /*
1437 * @text_len: bytes of unprocessed text
1438 * @line_len: bytes of current line _without_ newline
1439 * @text: pointer to beginning of current line
1440 * @len: number of bytes prepared in r->text_buf
1441 */
1442 for (;;) {
1443 next = memchr(text, '\n', text_len);
1444 if (next) {
1445 line_len = next - text;
1446 } else {
1447 /* Drop truncated line(s). */
1448 if (truncated)
1449 break;
1450 line_len = text_len;
1451 }
1452
1453 /*
1454 * Truncate the text if there is not enough space to add the
1455 * prefix and a trailing newline and a terminator.
1456 */
1457 if (len + prefix_len + text_len + 1 + 1 > buf_size) {
1458 /* Drop even the current line if no space. */
1459 if (len + prefix_len + line_len + 1 + 1 > buf_size)
1460 break;
1461
1462 text_len = buf_size - len - prefix_len - 1 - 1;
1463 truncated = true;
1464 }
1465
1466 memmove(text + prefix_len, text, text_len);
1467 memcpy(text, prefix, prefix_len);
1468
1469 /*
1470 * Increment the prepared length to include the text and
1471 * prefix that were just moved+copied. Also increment for the
1472 * newline at the end of this line. If this is the last line,
1473 * there is no newline, but it will be added immediately below.
1474 */
1475 len += prefix_len + line_len + 1;
1476 if (text_len == line_len) {
1477 /*
1478 * This is the last line. Add the trailing newline
1479 * removed in vprintk_store().
1480 */
1481 text[prefix_len + line_len] = '\n';
1482 break;
1483 }
1484
1485 /*
1486 * Advance beyond the added prefix and the related line with
1487 * its newline.
1488 */
1489 text += prefix_len + line_len + 1;
1490
1491 /*
1492 * The remaining text has only decreased by the line with its
1493 * newline.
1494 *
1495 * Note that @text_len can become zero. It happens when @text
1496 * ended with a newline (either due to truncation or the
1497 * original string ending with "\n\n"). The loop is correctly
1498 * repeated and (if not truncated) an empty line with a prefix
1499 * will be prepared.
1500 */
1501 text_len -= line_len + 1;
1502 }
1503
1504 /*
1505 * If a buffer was provided, it will be terminated. Space for the
1506 * string terminator is guaranteed to be available. The terminator is
1507 * not counted in the return value.
1508 */
1509 if (buf_size > 0)
1510 r->text_buf[len] = 0;
1511
1512 return len;
1513 }
1514
get_record_print_text_size(struct printk_info * info,unsigned int line_count,bool syslog,bool time)1515 static size_t get_record_print_text_size(struct printk_info *info,
1516 unsigned int line_count,
1517 bool syslog, bool time)
1518 {
1519 char prefix[PRINTK_PREFIX_MAX];
1520 size_t prefix_len;
1521
1522 prefix_len = info_print_prefix(info, syslog, time, prefix);
1523
1524 /*
1525 * Each line will be preceded with a prefix. The intermediate
1526 * newlines are already within the text, but a final trailing
1527 * newline will be added.
1528 */
1529 return ((prefix_len * line_count) + info->text_len + 1);
1530 }
1531
1532 /*
1533 * Beginning with @start_seq, find the first record where it and all following
1534 * records up to (but not including) @max_seq fit into @size.
1535 *
1536 * @max_seq is simply an upper bound and does not need to exist. If the caller
1537 * does not require an upper bound, -1 can be used for @max_seq.
1538 */
find_first_fitting_seq(u64 start_seq,u64 max_seq,size_t size,bool syslog,bool time)1539 static u64 find_first_fitting_seq(u64 start_seq, u64 max_seq, size_t size,
1540 bool syslog, bool time)
1541 {
1542 struct printk_info info;
1543 unsigned int line_count;
1544 size_t len = 0;
1545 u64 seq;
1546
1547 /* Determine the size of the records up to @max_seq. */
1548 prb_for_each_info(start_seq, prb, seq, &info, &line_count) {
1549 if (info.seq >= max_seq)
1550 break;
1551 len += get_record_print_text_size(&info, line_count, syslog, time);
1552 }
1553
1554 /*
1555 * Adjust the upper bound for the next loop to avoid subtracting
1556 * lengths that were never added.
1557 */
1558 if (seq < max_seq)
1559 max_seq = seq;
1560
1561 /*
1562 * Move first record forward until length fits into the buffer. Ignore
1563 * newest messages that were not counted in the above cycle. Messages
1564 * might appear and get lost in the meantime. This is a best effort
1565 * that prevents an infinite loop that could occur with a retry.
1566 */
1567 prb_for_each_info(start_seq, prb, seq, &info, &line_count) {
1568 if (len <= size || info.seq >= max_seq)
1569 break;
1570 len -= get_record_print_text_size(&info, line_count, syslog, time);
1571 }
1572
1573 return seq;
1574 }
1575
1576 /* The caller is responsible for making sure @size is greater than 0. */
syslog_print(char __user * buf,int size)1577 static int syslog_print(char __user *buf, int size)
1578 {
1579 struct printk_info info;
1580 struct printk_record r;
1581 char *text;
1582 int len = 0;
1583 u64 seq;
1584
1585 text = kmalloc(PRINTK_MESSAGE_MAX, GFP_KERNEL);
1586 if (!text)
1587 return -ENOMEM;
1588
1589 prb_rec_init_rd(&r, &info, text, PRINTK_MESSAGE_MAX);
1590
1591 mutex_lock(&syslog_lock);
1592
1593 /*
1594 * Wait for the @syslog_seq record to be available. @syslog_seq may
1595 * change while waiting.
1596 */
1597 do {
1598 seq = syslog_seq;
1599
1600 mutex_unlock(&syslog_lock);
1601 /*
1602 * Guarantee this task is visible on the waitqueue before
1603 * checking the wake condition.
1604 *
1605 * The full memory barrier within set_current_state() of
1606 * prepare_to_wait_event() pairs with the full memory barrier
1607 * within wq_has_sleeper().
1608 *
1609 * This pairs with __wake_up_klogd:A.
1610 */
1611 len = wait_event_interruptible(log_wait,
1612 prb_read_valid(prb, seq, NULL)); /* LMM(syslog_print:A) */
1613 mutex_lock(&syslog_lock);
1614
1615 if (len)
1616 goto out;
1617 } while (syslog_seq != seq);
1618
1619 /*
1620 * Copy records that fit into the buffer. The above cycle makes sure
1621 * that the first record is always available.
1622 */
1623 do {
1624 size_t n;
1625 size_t skip;
1626 int err;
1627
1628 if (!prb_read_valid(prb, syslog_seq, &r))
1629 break;
1630
1631 if (r.info->seq != syslog_seq) {
1632 /* message is gone, move to next valid one */
1633 syslog_seq = r.info->seq;
1634 syslog_partial = 0;
1635 }
1636
1637 /*
1638 * To keep reading/counting partial line consistent,
1639 * use printk_time value as of the beginning of a line.
1640 */
1641 if (!syslog_partial)
1642 syslog_time = printk_time;
1643
1644 skip = syslog_partial;
1645 n = record_print_text(&r, true, syslog_time);
1646 if (n - syslog_partial <= size) {
1647 /* message fits into buffer, move forward */
1648 syslog_seq = r.info->seq + 1;
1649 n -= syslog_partial;
1650 syslog_partial = 0;
1651 } else if (!len){
1652 /* partial read(), remember position */
1653 n = size;
1654 syslog_partial += n;
1655 } else
1656 n = 0;
1657
1658 if (!n)
1659 break;
1660
1661 mutex_unlock(&syslog_lock);
1662 err = copy_to_user(buf, text + skip, n);
1663 mutex_lock(&syslog_lock);
1664
1665 if (err) {
1666 if (!len)
1667 len = -EFAULT;
1668 break;
1669 }
1670
1671 len += n;
1672 size -= n;
1673 buf += n;
1674 } while (size);
1675 out:
1676 mutex_unlock(&syslog_lock);
1677 kfree(text);
1678 return len;
1679 }
1680
syslog_print_all(char __user * buf,int size,bool clear)1681 static int syslog_print_all(char __user *buf, int size, bool clear)
1682 {
1683 struct printk_info info;
1684 struct printk_record r;
1685 char *text;
1686 int len = 0;
1687 u64 seq;
1688 bool time;
1689
1690 text = kmalloc(PRINTK_MESSAGE_MAX, GFP_KERNEL);
1691 if (!text)
1692 return -ENOMEM;
1693
1694 time = printk_time;
1695 /*
1696 * Find first record that fits, including all following records,
1697 * into the user-provided buffer for this dump.
1698 */
1699 seq = find_first_fitting_seq(latched_seq_read_nolock(&clear_seq), -1,
1700 size, true, time);
1701
1702 prb_rec_init_rd(&r, &info, text, PRINTK_MESSAGE_MAX);
1703
1704 prb_for_each_record(seq, prb, seq, &r) {
1705 int textlen;
1706
1707 textlen = record_print_text(&r, true, time);
1708
1709 if (len + textlen > size) {
1710 seq--;
1711 break;
1712 }
1713
1714 if (copy_to_user(buf + len, text, textlen))
1715 len = -EFAULT;
1716 else
1717 len += textlen;
1718
1719 if (len < 0)
1720 break;
1721 }
1722
1723 if (clear) {
1724 mutex_lock(&syslog_lock);
1725 latched_seq_write(&clear_seq, seq);
1726 mutex_unlock(&syslog_lock);
1727 }
1728
1729 kfree(text);
1730 return len;
1731 }
1732
syslog_clear(void)1733 static void syslog_clear(void)
1734 {
1735 mutex_lock(&syslog_lock);
1736 latched_seq_write(&clear_seq, prb_next_seq(prb));
1737 mutex_unlock(&syslog_lock);
1738 }
1739
do_syslog(int type,char __user * buf,int len,int source)1740 int do_syslog(int type, char __user *buf, int len, int source)
1741 {
1742 struct printk_info info;
1743 bool clear = false;
1744 static int saved_console_loglevel = LOGLEVEL_DEFAULT;
1745 int error;
1746
1747 error = check_syslog_permissions(type, source);
1748 if (error)
1749 return error;
1750
1751 switch (type) {
1752 case SYSLOG_ACTION_CLOSE: /* Close log */
1753 break;
1754 case SYSLOG_ACTION_OPEN: /* Open log */
1755 break;
1756 case SYSLOG_ACTION_READ: /* Read from log */
1757 if (!buf || len < 0)
1758 return -EINVAL;
1759 if (!len)
1760 return 0;
1761 if (!access_ok(buf, len))
1762 return -EFAULT;
1763 error = syslog_print(buf, len);
1764 break;
1765 /* Read/clear last kernel messages */
1766 case SYSLOG_ACTION_READ_CLEAR:
1767 clear = true;
1768 fallthrough;
1769 /* Read last kernel messages */
1770 case SYSLOG_ACTION_READ_ALL:
1771 if (!buf || len < 0)
1772 return -EINVAL;
1773 if (!len)
1774 return 0;
1775 if (!access_ok(buf, len))
1776 return -EFAULT;
1777 error = syslog_print_all(buf, len, clear);
1778 break;
1779 /* Clear ring buffer */
1780 case SYSLOG_ACTION_CLEAR:
1781 syslog_clear();
1782 break;
1783 /* Disable logging to console */
1784 case SYSLOG_ACTION_CONSOLE_OFF:
1785 if (saved_console_loglevel == LOGLEVEL_DEFAULT)
1786 saved_console_loglevel = console_loglevel;
1787 console_loglevel = minimum_console_loglevel;
1788 break;
1789 /* Enable logging to console */
1790 case SYSLOG_ACTION_CONSOLE_ON:
1791 if (saved_console_loglevel != LOGLEVEL_DEFAULT) {
1792 console_loglevel = saved_console_loglevel;
1793 saved_console_loglevel = LOGLEVEL_DEFAULT;
1794 }
1795 break;
1796 /* Set level of messages printed to console */
1797 case SYSLOG_ACTION_CONSOLE_LEVEL:
1798 if (len < 1 || len > 8)
1799 return -EINVAL;
1800 if (len < minimum_console_loglevel)
1801 len = minimum_console_loglevel;
1802 console_loglevel = len;
1803 /* Implicitly re-enable logging to console */
1804 saved_console_loglevel = LOGLEVEL_DEFAULT;
1805 break;
1806 /* Number of chars in the log buffer */
1807 case SYSLOG_ACTION_SIZE_UNREAD:
1808 mutex_lock(&syslog_lock);
1809 if (!prb_read_valid_info(prb, syslog_seq, &info, NULL)) {
1810 /* No unread messages. */
1811 mutex_unlock(&syslog_lock);
1812 return 0;
1813 }
1814 if (info.seq != syslog_seq) {
1815 /* messages are gone, move to first one */
1816 syslog_seq = info.seq;
1817 syslog_partial = 0;
1818 }
1819 if (source == SYSLOG_FROM_PROC) {
1820 /*
1821 * Short-cut for poll(/"proc/kmsg") which simply checks
1822 * for pending data, not the size; return the count of
1823 * records, not the length.
1824 */
1825 error = prb_next_seq(prb) - syslog_seq;
1826 } else {
1827 bool time = syslog_partial ? syslog_time : printk_time;
1828 unsigned int line_count;
1829 u64 seq;
1830
1831 prb_for_each_info(syslog_seq, prb, seq, &info,
1832 &line_count) {
1833 error += get_record_print_text_size(&info, line_count,
1834 true, time);
1835 time = printk_time;
1836 }
1837 error -= syslog_partial;
1838 }
1839 mutex_unlock(&syslog_lock);
1840 break;
1841 /* Size of the log buffer */
1842 case SYSLOG_ACTION_SIZE_BUFFER:
1843 error = log_buf_len;
1844 break;
1845 default:
1846 error = -EINVAL;
1847 break;
1848 }
1849
1850 return error;
1851 }
1852
SYSCALL_DEFINE3(syslog,int,type,char __user *,buf,int,len)1853 SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
1854 {
1855 return do_syslog(type, buf, len, SYSLOG_FROM_READER);
1856 }
1857
1858 /*
1859 * Special console_lock variants that help to reduce the risk of soft-lockups.
1860 * They allow to pass console_lock to another printk() call using a busy wait.
1861 */
1862
1863 #ifdef CONFIG_LOCKDEP
1864 static struct lockdep_map console_owner_dep_map = {
1865 .name = "console_owner"
1866 };
1867 #endif
1868
1869 static DEFINE_RAW_SPINLOCK(console_owner_lock);
1870 static struct task_struct *console_owner;
1871 static bool console_waiter;
1872
1873 /**
1874 * console_lock_spinning_enable - mark beginning of code where another
1875 * thread might safely busy wait
1876 *
1877 * This basically converts console_lock into a spinlock. This marks
1878 * the section where the console_lock owner can not sleep, because
1879 * there may be a waiter spinning (like a spinlock). Also it must be
1880 * ready to hand over the lock at the end of the section.
1881 */
console_lock_spinning_enable(void)1882 void console_lock_spinning_enable(void)
1883 {
1884 /*
1885 * Do not use spinning in panic(). The panic CPU wants to keep the lock.
1886 * Non-panic CPUs abandon the flush anyway.
1887 *
1888 * Just keep the lockdep annotation. The panic-CPU should avoid
1889 * taking console_owner_lock because it might cause a deadlock.
1890 * This looks like the easiest way how to prevent false lockdep
1891 * reports without handling races a lockless way.
1892 */
1893 if (panic_in_progress())
1894 goto lockdep;
1895
1896 raw_spin_lock(&console_owner_lock);
1897 console_owner = current;
1898 raw_spin_unlock(&console_owner_lock);
1899
1900 lockdep:
1901 /* The waiter may spin on us after setting console_owner */
1902 spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
1903 }
1904
1905 /**
1906 * console_lock_spinning_disable_and_check - mark end of code where another
1907 * thread was able to busy wait and check if there is a waiter
1908 * @cookie: cookie returned from console_srcu_read_lock()
1909 *
1910 * This is called at the end of the section where spinning is allowed.
1911 * It has two functions. First, it is a signal that it is no longer
1912 * safe to start busy waiting for the lock. Second, it checks if
1913 * there is a busy waiter and passes the lock rights to her.
1914 *
1915 * Important: Callers lose both the console_lock and the SRCU read lock if
1916 * there was a busy waiter. They must not touch items synchronized by
1917 * console_lock or SRCU read lock in this case.
1918 *
1919 * Return: 1 if the lock rights were passed, 0 otherwise.
1920 */
console_lock_spinning_disable_and_check(int cookie)1921 int console_lock_spinning_disable_and_check(int cookie)
1922 {
1923 int waiter;
1924
1925 /*
1926 * Ignore spinning waiters during panic() because they might get stopped
1927 * or blocked at any time,
1928 *
1929 * It is safe because nobody is allowed to start spinning during panic
1930 * in the first place. If there has been a waiter then non panic CPUs
1931 * might stay spinning. They would get stopped anyway. The panic context
1932 * will never start spinning and an interrupted spin on panic CPU will
1933 * never continue.
1934 */
1935 if (panic_in_progress()) {
1936 /* Keep lockdep happy. */
1937 spin_release(&console_owner_dep_map, _THIS_IP_);
1938 return 0;
1939 }
1940
1941 raw_spin_lock(&console_owner_lock);
1942 waiter = READ_ONCE(console_waiter);
1943 console_owner = NULL;
1944 raw_spin_unlock(&console_owner_lock);
1945
1946 if (!waiter) {
1947 spin_release(&console_owner_dep_map, _THIS_IP_);
1948 return 0;
1949 }
1950
1951 /* The waiter is now free to continue */
1952 WRITE_ONCE(console_waiter, false);
1953
1954 spin_release(&console_owner_dep_map, _THIS_IP_);
1955
1956 /*
1957 * Preserve lockdep lock ordering. Release the SRCU read lock before
1958 * releasing the console_lock.
1959 */
1960 console_srcu_read_unlock(cookie);
1961
1962 /*
1963 * Hand off console_lock to waiter. The waiter will perform
1964 * the up(). After this, the waiter is the console_lock owner.
1965 */
1966 mutex_release(&console_lock_dep_map, _THIS_IP_);
1967 return 1;
1968 }
1969
1970 /**
1971 * console_trylock_spinning - try to get console_lock by busy waiting
1972 *
1973 * This allows to busy wait for the console_lock when the current
1974 * owner is running in specially marked sections. It means that
1975 * the current owner is running and cannot reschedule until it
1976 * is ready to lose the lock.
1977 *
1978 * Return: 1 if we got the lock, 0 othrewise
1979 */
console_trylock_spinning(void)1980 static int console_trylock_spinning(void)
1981 {
1982 struct task_struct *owner = NULL;
1983 bool waiter;
1984 bool spin = false;
1985 unsigned long flags;
1986
1987 if (console_trylock())
1988 return 1;
1989
1990 /*
1991 * It's unsafe to spin once a panic has begun. If we are the
1992 * panic CPU, we may have already halted the owner of the
1993 * console_sem. If we are not the panic CPU, then we should
1994 * avoid taking console_sem, so the panic CPU has a better
1995 * chance of cleanly acquiring it later.
1996 */
1997 if (panic_in_progress())
1998 return 0;
1999
2000 printk_safe_enter_irqsave(flags);
2001
2002 raw_spin_lock(&console_owner_lock);
2003 owner = READ_ONCE(console_owner);
2004 waiter = READ_ONCE(console_waiter);
2005 if (!waiter && owner && owner != current) {
2006 WRITE_ONCE(console_waiter, true);
2007 spin = true;
2008 }
2009 raw_spin_unlock(&console_owner_lock);
2010
2011 /*
2012 * If there is an active printk() writing to the
2013 * consoles, instead of having it write our data too,
2014 * see if we can offload that load from the active
2015 * printer, and do some printing ourselves.
2016 * Go into a spin only if there isn't already a waiter
2017 * spinning, and there is an active printer, and
2018 * that active printer isn't us (recursive printk?).
2019 */
2020 if (!spin) {
2021 printk_safe_exit_irqrestore(flags);
2022 return 0;
2023 }
2024
2025 /* We spin waiting for the owner to release us */
2026 spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
2027 /* Owner will clear console_waiter on hand off */
2028 while (READ_ONCE(console_waiter))
2029 cpu_relax();
2030 spin_release(&console_owner_dep_map, _THIS_IP_);
2031
2032 printk_safe_exit_irqrestore(flags);
2033 /*
2034 * The owner passed the console lock to us.
2035 * Since we did not spin on console lock, annotate
2036 * this as a trylock. Otherwise lockdep will
2037 * complain.
2038 */
2039 mutex_acquire(&console_lock_dep_map, 0, 1, _THIS_IP_);
2040
2041 /*
2042 * Update @console_may_schedule for trylock because the previous
2043 * owner may have been schedulable.
2044 */
2045 console_may_schedule = 0;
2046
2047 return 1;
2048 }
2049
2050 /*
2051 * Recursion is tracked separately on each CPU. If NMIs are supported, an
2052 * additional NMI context per CPU is also separately tracked. Until per-CPU
2053 * is available, a separate "early tracking" is performed.
2054 */
2055 static DEFINE_PER_CPU(u8, printk_count);
2056 static u8 printk_count_early;
2057 #ifdef CONFIG_HAVE_NMI
2058 static DEFINE_PER_CPU(u8, printk_count_nmi);
2059 static u8 printk_count_nmi_early;
2060 #endif
2061
2062 /*
2063 * Recursion is limited to keep the output sane. printk() should not require
2064 * more than 1 level of recursion (allowing, for example, printk() to trigger
2065 * a WARN), but a higher value is used in case some printk-internal errors
2066 * exist, such as the ringbuffer validation checks failing.
2067 */
2068 #define PRINTK_MAX_RECURSION 3
2069
2070 /*
2071 * Return a pointer to the dedicated counter for the CPU+context of the
2072 * caller.
2073 */
__printk_recursion_counter(void)2074 static u8 *__printk_recursion_counter(void)
2075 {
2076 #ifdef CONFIG_HAVE_NMI
2077 if (in_nmi()) {
2078 if (printk_percpu_data_ready())
2079 return this_cpu_ptr(&printk_count_nmi);
2080 return &printk_count_nmi_early;
2081 }
2082 #endif
2083 if (printk_percpu_data_ready())
2084 return this_cpu_ptr(&printk_count);
2085 return &printk_count_early;
2086 }
2087
2088 /*
2089 * Enter recursion tracking. Interrupts are disabled to simplify tracking.
2090 * The caller must check the boolean return value to see if the recursion is
2091 * allowed. On failure, interrupts are not disabled.
2092 *
2093 * @recursion_ptr must be a variable of type (u8 *) and is the same variable
2094 * that is passed to printk_exit_irqrestore().
2095 */
2096 #define printk_enter_irqsave(recursion_ptr, flags) \
2097 ({ \
2098 bool success = true; \
2099 \
2100 typecheck(u8 *, recursion_ptr); \
2101 local_irq_save(flags); \
2102 (recursion_ptr) = __printk_recursion_counter(); \
2103 if (*(recursion_ptr) > PRINTK_MAX_RECURSION) { \
2104 local_irq_restore(flags); \
2105 success = false; \
2106 } else { \
2107 (*(recursion_ptr))++; \
2108 } \
2109 success; \
2110 })
2111
2112 /* Exit recursion tracking, restoring interrupts. */
2113 #define printk_exit_irqrestore(recursion_ptr, flags) \
2114 do { \
2115 typecheck(u8 *, recursion_ptr); \
2116 (*(recursion_ptr))--; \
2117 local_irq_restore(flags); \
2118 } while (0)
2119
2120 int printk_delay_msec __read_mostly;
2121
printk_delay(int level)2122 static inline void printk_delay(int level)
2123 {
2124 boot_delay_msec(level);
2125
2126 if (unlikely(printk_delay_msec)) {
2127 int m = printk_delay_msec;
2128
2129 while (m--) {
2130 mdelay(1);
2131 touch_nmi_watchdog();
2132 }
2133 }
2134 }
2135
2136 #define CALLER_ID_MASK 0x80000000
2137
printk_caller_id(void)2138 static inline u32 printk_caller_id(void)
2139 {
2140 return in_task() ? task_pid_nr(current) :
2141 CALLER_ID_MASK + smp_processor_id();
2142 }
2143
2144 #ifdef CONFIG_PRINTK_EXECUTION_CTX
2145 /* Store the opposite info than caller_id. */
printk_caller_id2(void)2146 static u32 printk_caller_id2(void)
2147 {
2148 return !in_task() ? task_pid_nr(current) :
2149 CALLER_ID_MASK + smp_processor_id();
2150 }
2151
printk_info_get_pid(const struct printk_info * info)2152 static pid_t printk_info_get_pid(const struct printk_info *info)
2153 {
2154 u32 caller_id = info->caller_id;
2155 u32 caller_id2 = info->caller_id2;
2156
2157 return caller_id & CALLER_ID_MASK ? caller_id2 : caller_id;
2158 }
2159
printk_info_get_cpu(const struct printk_info * info)2160 static int printk_info_get_cpu(const struct printk_info *info)
2161 {
2162 u32 caller_id = info->caller_id;
2163 u32 caller_id2 = info->caller_id2;
2164
2165 return ((caller_id & CALLER_ID_MASK ?
2166 caller_id : caller_id2) & ~CALLER_ID_MASK);
2167 }
2168 #endif
2169
2170 /**
2171 * printk_parse_prefix - Parse level and control flags.
2172 *
2173 * @text: The terminated text message.
2174 * @level: A pointer to the current level value, will be updated.
2175 * @flags: A pointer to the current printk_info flags, will be updated.
2176 *
2177 * @level may be NULL if the caller is not interested in the parsed value.
2178 * Otherwise the variable pointed to by @level must be set to
2179 * LOGLEVEL_DEFAULT in order to be updated with the parsed value.
2180 *
2181 * @flags may be NULL if the caller is not interested in the parsed value.
2182 * Otherwise the variable pointed to by @flags will be OR'd with the parsed
2183 * value.
2184 *
2185 * Return: The length of the parsed level and control flags.
2186 */
printk_parse_prefix(const char * text,int * level,enum printk_info_flags * flags)2187 u16 printk_parse_prefix(const char *text, int *level,
2188 enum printk_info_flags *flags)
2189 {
2190 u16 prefix_len = 0;
2191 int kern_level;
2192
2193 while (*text) {
2194 kern_level = printk_get_level(text);
2195 if (!kern_level)
2196 break;
2197
2198 switch (kern_level) {
2199 case '0' ... '7':
2200 if (level && *level == LOGLEVEL_DEFAULT)
2201 *level = kern_level - '0';
2202 break;
2203 case 'c': /* KERN_CONT */
2204 if (flags)
2205 *flags |= LOG_CONT;
2206 }
2207
2208 prefix_len += 2;
2209 text += 2;
2210 }
2211
2212 return prefix_len;
2213 }
2214
2215 __printf(5, 0)
printk_sprint(char * text,u16 size,int facility,enum printk_info_flags * flags,const char * fmt,va_list args)2216 static u16 printk_sprint(char *text, u16 size, int facility,
2217 enum printk_info_flags *flags, const char *fmt,
2218 va_list args)
2219 {
2220 u16 text_len;
2221
2222 text_len = vscnprintf(text, size, fmt, args);
2223
2224 /* Mark and strip a trailing newline. */
2225 if (text_len && text[text_len - 1] == '\n') {
2226 text_len--;
2227 *flags |= LOG_NEWLINE;
2228 }
2229
2230 /* Strip log level and control flags. */
2231 if (facility == 0) {
2232 u16 prefix_len;
2233
2234 prefix_len = printk_parse_prefix(text, NULL, NULL);
2235 if (prefix_len) {
2236 text_len -= prefix_len;
2237 memmove(text, text + prefix_len, text_len);
2238 }
2239 }
2240
2241 trace_console(text, text_len);
2242
2243 return text_len;
2244 }
2245
2246 #ifdef CONFIG_PRINTK_EXECUTION_CTX
printk_store_execution_ctx(struct printk_info * info)2247 static void printk_store_execution_ctx(struct printk_info *info)
2248 {
2249 info->caller_id2 = printk_caller_id2();
2250 get_task_comm(info->comm, current);
2251 }
2252
pmsg_load_execution_ctx(struct printk_message * pmsg,const struct printk_info * info)2253 static void pmsg_load_execution_ctx(struct printk_message *pmsg,
2254 const struct printk_info *info)
2255 {
2256 pmsg->cpu = printk_info_get_cpu(info);
2257 pmsg->pid = printk_info_get_pid(info);
2258 memcpy(pmsg->comm, info->comm, sizeof(pmsg->comm));
2259 static_assert(sizeof(pmsg->comm) == sizeof(info->comm));
2260 }
2261 #else
printk_store_execution_ctx(struct printk_info * info)2262 static void printk_store_execution_ctx(struct printk_info *info) {}
2263
pmsg_load_execution_ctx(struct printk_message * pmsg,const struct printk_info * info)2264 static void pmsg_load_execution_ctx(struct printk_message *pmsg,
2265 const struct printk_info *info) {}
2266 #endif
2267
2268 __printf(4, 0)
vprintk_store(int facility,int level,const struct dev_printk_info * dev_info,const char * fmt,va_list args)2269 int vprintk_store(int facility, int level,
2270 const struct dev_printk_info *dev_info,
2271 const char *fmt, va_list args)
2272 {
2273 struct prb_reserved_entry e;
2274 enum printk_info_flags flags = 0;
2275 struct printk_record r;
2276 unsigned long irqflags;
2277 u16 trunc_msg_len = 0;
2278 char prefix_buf[8];
2279 u8 *recursion_ptr;
2280 u16 reserve_size;
2281 va_list args2;
2282 u32 caller_id;
2283 u16 text_len;
2284 int ret = 0;
2285 u64 ts_nsec;
2286
2287 if (!printk_enter_irqsave(recursion_ptr, irqflags))
2288 return 0;
2289
2290 /*
2291 * Since the duration of printk() can vary depending on the message
2292 * and state of the ringbuffer, grab the timestamp now so that it is
2293 * close to the call of printk(). This provides a more deterministic
2294 * timestamp with respect to the caller.
2295 */
2296 ts_nsec = local_clock();
2297
2298 caller_id = printk_caller_id();
2299
2300 /*
2301 * The sprintf needs to come first since the syslog prefix might be
2302 * passed in as a parameter. An extra byte must be reserved so that
2303 * later the vscnprintf() into the reserved buffer has room for the
2304 * terminating '\0', which is not counted by vsnprintf().
2305 */
2306 va_copy(args2, args);
2307 reserve_size = vsnprintf(&prefix_buf[0], sizeof(prefix_buf), fmt, args2) + 1;
2308 va_end(args2);
2309
2310 if (reserve_size > PRINTKRB_RECORD_MAX)
2311 reserve_size = PRINTKRB_RECORD_MAX;
2312
2313 /* Extract log level or control flags. */
2314 if (facility == 0)
2315 printk_parse_prefix(&prefix_buf[0], &level, &flags);
2316
2317 if (level == LOGLEVEL_DEFAULT)
2318 level = default_message_loglevel;
2319
2320 if (dev_info)
2321 flags |= LOG_NEWLINE;
2322
2323 if (is_printk_force_console())
2324 flags |= LOG_FORCE_CON;
2325
2326 if (flags & LOG_CONT) {
2327 prb_rec_init_wr(&r, reserve_size);
2328 if (prb_reserve_in_last(&e, prb, &r, caller_id, PRINTKRB_RECORD_MAX)) {
2329 text_len = printk_sprint(&r.text_buf[r.info->text_len], reserve_size,
2330 facility, &flags, fmt, args);
2331 r.info->text_len += text_len;
2332
2333 if (flags & LOG_FORCE_CON)
2334 r.info->flags |= LOG_FORCE_CON;
2335
2336 if (flags & LOG_NEWLINE) {
2337 r.info->flags |= LOG_NEWLINE;
2338 prb_final_commit(&e);
2339 } else {
2340 prb_commit(&e);
2341 }
2342
2343 ret = text_len;
2344 goto out;
2345 }
2346 }
2347
2348 /*
2349 * Explicitly initialize the record before every prb_reserve() call.
2350 * prb_reserve_in_last() and prb_reserve() purposely invalidate the
2351 * structure when they fail.
2352 */
2353 prb_rec_init_wr(&r, reserve_size);
2354 if (!prb_reserve(&e, prb, &r)) {
2355 /* truncate the message if it is too long for empty buffer */
2356 truncate_msg(&reserve_size, &trunc_msg_len);
2357
2358 prb_rec_init_wr(&r, reserve_size + trunc_msg_len);
2359 if (!prb_reserve(&e, prb, &r))
2360 goto out;
2361 }
2362
2363 /* fill message */
2364 text_len = printk_sprint(&r.text_buf[0], reserve_size, facility, &flags, fmt, args);
2365 if (trunc_msg_len)
2366 memcpy(&r.text_buf[text_len], trunc_msg, trunc_msg_len);
2367 r.info->text_len = text_len + trunc_msg_len;
2368 r.info->facility = facility;
2369 r.info->level = level & 7;
2370 r.info->flags = flags & 0x1f;
2371 r.info->ts_nsec = ts_nsec;
2372 r.info->caller_id = caller_id;
2373 if (dev_info)
2374 memcpy(&r.info->dev_info, dev_info, sizeof(r.info->dev_info));
2375 printk_store_execution_ctx(r.info);
2376
2377 /* A message without a trailing newline can be continued. */
2378 if (!(flags & LOG_NEWLINE))
2379 prb_commit(&e);
2380 else
2381 prb_final_commit(&e);
2382
2383 ret = text_len + trunc_msg_len;
2384 out:
2385 printk_exit_irqrestore(recursion_ptr, irqflags);
2386 return ret;
2387 }
2388
2389 /*
2390 * This acts as a one-way switch to allow legacy consoles to print from
2391 * the printk() caller context on a panic CPU. It also attempts to flush
2392 * the legacy consoles in this context.
2393 */
printk_legacy_allow_panic_sync(void)2394 void printk_legacy_allow_panic_sync(void)
2395 {
2396 struct console_flush_type ft;
2397
2398 legacy_allow_panic_sync = true;
2399
2400 printk_get_console_flush_type(&ft);
2401 if (ft.legacy_direct) {
2402 if (console_trylock())
2403 console_unlock();
2404 }
2405 }
2406
2407 bool __read_mostly debug_non_panic_cpus;
2408
2409 #ifdef CONFIG_PRINTK_CALLER
debug_non_panic_cpus_setup(char * str)2410 static int __init debug_non_panic_cpus_setup(char *str)
2411 {
2412 debug_non_panic_cpus = true;
2413 pr_info("allow messages from non-panic CPUs in panic()\n");
2414
2415 return 0;
2416 }
2417 early_param("debug_non_panic_cpus", debug_non_panic_cpus_setup);
2418 module_param(debug_non_panic_cpus, bool, 0644);
2419 MODULE_PARM_DESC(debug_non_panic_cpus,
2420 "allow messages from non-panic CPUs in panic()");
2421 #endif
2422
vprintk_emit(int facility,int level,const struct dev_printk_info * dev_info,const char * fmt,va_list args)2423 asmlinkage int vprintk_emit(int facility, int level,
2424 const struct dev_printk_info *dev_info,
2425 const char *fmt, va_list args)
2426 {
2427 struct console_flush_type ft;
2428 int printed_len;
2429
2430 /* Suppress unimportant messages after panic happens */
2431 if (unlikely(suppress_printk))
2432 return 0;
2433
2434 /*
2435 * The messages on the panic CPU are the most important. If
2436 * non-panic CPUs are generating any messages, they will be
2437 * silently dropped.
2438 */
2439 if (panic_on_other_cpu() &&
2440 !debug_non_panic_cpus &&
2441 !panic_triggering_all_cpu_backtrace)
2442 return 0;
2443
2444 printk_get_console_flush_type(&ft);
2445
2446 /* If called from the scheduler, we can not call up(). */
2447 if (level == LOGLEVEL_SCHED) {
2448 level = LOGLEVEL_DEFAULT;
2449 ft.legacy_offload |= ft.legacy_direct && !console_irqwork_blocked;
2450 ft.legacy_direct = false;
2451 }
2452
2453 printk_delay(level);
2454
2455 printed_len = vprintk_store(facility, level, dev_info, fmt, args);
2456
2457 if (ft.nbcon_atomic)
2458 nbcon_atomic_flush_pending();
2459
2460 if (ft.nbcon_offload)
2461 nbcon_kthreads_wake();
2462
2463 if (ft.legacy_direct) {
2464 /*
2465 * The caller may be holding system-critical or
2466 * timing-sensitive locks. Disable preemption during
2467 * printing of all remaining records to all consoles so that
2468 * this context can return as soon as possible. Hopefully
2469 * another printk() caller will take over the printing.
2470 */
2471 preempt_disable();
2472 /*
2473 * Try to acquire and then immediately release the console
2474 * semaphore. The release will print out buffers. With the
2475 * spinning variant, this context tries to take over the
2476 * printing from another printing context.
2477 */
2478 if (console_trylock_spinning())
2479 console_unlock();
2480 preempt_enable();
2481 }
2482
2483 if (ft.legacy_offload)
2484 defer_console_output();
2485 else if (!console_irqwork_blocked)
2486 wake_up_klogd();
2487
2488 return printed_len;
2489 }
2490 EXPORT_SYMBOL(vprintk_emit);
2491
vprintk_default(const char * fmt,va_list args)2492 int vprintk_default(const char *fmt, va_list args)
2493 {
2494 return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, fmt, args);
2495 }
2496 EXPORT_SYMBOL_GPL(vprintk_default);
2497
_printk(const char * fmt,...)2498 asmlinkage __visible int _printk(const char *fmt, ...)
2499 {
2500 va_list args;
2501 int r;
2502
2503 va_start(args, fmt);
2504 r = vprintk(fmt, args);
2505 va_end(args);
2506
2507 return r;
2508 }
2509 EXPORT_SYMBOL(_printk);
2510
2511 static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress);
2512
2513 #else /* CONFIG_PRINTK */
2514
2515 #define printk_time false
2516
2517 #define prb_read_valid(rb, seq, r) false
2518 #define prb_first_valid_seq(rb) 0
2519 #define prb_next_seq(rb) 0
2520
2521 static u64 syslog_seq;
2522
__pr_flush(struct console * con,int timeout_ms,bool reset_on_progress)2523 static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; }
2524
2525 #endif /* CONFIG_PRINTK */
2526
2527 #ifdef CONFIG_EARLY_PRINTK
2528 struct console *early_console;
2529
early_printk(const char * fmt,...)2530 asmlinkage __visible void early_printk(const char *fmt, ...)
2531 {
2532 va_list ap;
2533 char buf[512];
2534 int n;
2535
2536 if (!early_console)
2537 return;
2538
2539 va_start(ap, fmt);
2540 n = vscnprintf(buf, sizeof(buf), fmt, ap);
2541 va_end(ap);
2542
2543 early_console->write(early_console, buf, n);
2544 }
2545 #endif
2546
set_user_specified(struct console_cmdline * c,bool user_specified)2547 static void set_user_specified(struct console_cmdline *c, bool user_specified)
2548 {
2549 if (!user_specified)
2550 return;
2551
2552 /*
2553 * @c console was defined by the user on the command line.
2554 * Do not clear when added twice also by SPCR or the device tree.
2555 */
2556 c->user_specified = true;
2557 /* At least one console defined by the user on the command line. */
2558 console_set_on_cmdline = 1;
2559 }
2560
__add_preferred_console(const char * name,const short idx,const char * devname,char * options,char * brl_options,bool user_specified)2561 static int __add_preferred_console(const char *name, const short idx,
2562 const char *devname, char *options,
2563 char *brl_options, bool user_specified)
2564 {
2565 struct console_cmdline *c;
2566 int i;
2567
2568 if (!name && !devname)
2569 return -EINVAL;
2570
2571 /*
2572 * We use a signed short index for struct console for device drivers to
2573 * indicate a not yet assigned index or port. However, a negative index
2574 * value is not valid when the console name and index are defined on
2575 * the command line.
2576 */
2577 if (name && idx < 0)
2578 return -EINVAL;
2579
2580 /*
2581 * See if this tty is not yet registered, and
2582 * if we have a slot free.
2583 */
2584 for (i = 0, c = console_cmdline;
2585 i < MAX_CMDLINECONSOLES && (c->name[0] || c->devname[0]);
2586 i++, c++) {
2587 if ((name && strcmp(c->name, name) == 0 && c->index == idx) ||
2588 (devname && strcmp(c->devname, devname) == 0)) {
2589 if (!brl_options)
2590 preferred_console = i;
2591 set_user_specified(c, user_specified);
2592 return 0;
2593 }
2594 }
2595 if (i == MAX_CMDLINECONSOLES)
2596 return -E2BIG;
2597 if (!brl_options)
2598 preferred_console = i;
2599 if (name)
2600 strscpy(c->name, name);
2601 if (devname)
2602 strscpy(c->devname, devname);
2603 c->options = options;
2604 set_user_specified(c, user_specified);
2605 braille_set_options(c, brl_options);
2606
2607 c->index = idx;
2608 return 0;
2609 }
2610
console_msg_format_setup(char * str)2611 static int __init console_msg_format_setup(char *str)
2612 {
2613 if (!strcmp(str, "syslog"))
2614 console_msg_format = MSG_FORMAT_SYSLOG;
2615 if (!strcmp(str, "default"))
2616 console_msg_format = MSG_FORMAT_DEFAULT;
2617 return 1;
2618 }
2619 __setup("console_msg_format=", console_msg_format_setup);
2620
2621 /*
2622 * Set up a console. Called via do_early_param() in init/main.c
2623 * for each "console=" parameter in the boot command line.
2624 */
console_setup(char * str)2625 static int __init console_setup(char *str)
2626 {
2627 static_assert(sizeof(console_cmdline[0].devname) >= sizeof(console_cmdline[0].name) + 4);
2628 char buf[sizeof(console_cmdline[0].devname)];
2629 char *brl_options = NULL;
2630 char *ttyname = NULL;
2631 char *devname = NULL;
2632 char *options;
2633 char *s;
2634 int idx;
2635
2636 /*
2637 * console="" or console=null have been suggested as a way to
2638 * disable console output. Use ttynull that has been created
2639 * for exactly this purpose.
2640 */
2641 if (str[0] == 0 || strcmp(str, "null") == 0) {
2642 __add_preferred_console("ttynull", 0, NULL, NULL, NULL, true);
2643 return 1;
2644 }
2645
2646 if (_braille_console_setup(&str, &brl_options))
2647 return 1;
2648
2649 /* For a DEVNAME:0.0 style console the character device is unknown early */
2650 if (strchr(str, ':'))
2651 devname = buf;
2652 else
2653 ttyname = buf;
2654
2655 /*
2656 * Decode str into name, index, options.
2657 */
2658 if (ttyname && isdigit(str[0]))
2659 scnprintf(buf, sizeof(buf), "ttyS%s", str);
2660 else
2661 strscpy(buf, str);
2662
2663 options = strchr(str, ',');
2664 if (options)
2665 *(options++) = 0;
2666
2667 #ifdef __sparc__
2668 if (!strcmp(str, "ttya"))
2669 strscpy(buf, "ttyS0");
2670 if (!strcmp(str, "ttyb"))
2671 strscpy(buf, "ttyS1");
2672 #endif
2673
2674 for (s = buf; *s; s++)
2675 if ((ttyname && isdigit(*s)) || *s == ',')
2676 break;
2677
2678 /* @idx will get defined when devname matches. */
2679 if (devname)
2680 idx = -1;
2681 else
2682 idx = simple_strtoul(s, NULL, 10);
2683
2684 *s = 0;
2685
2686 __add_preferred_console(ttyname, idx, devname, options, brl_options, true);
2687 return 1;
2688 }
2689 __setup("console=", console_setup);
2690
2691 /**
2692 * add_preferred_console - add a device to the list of preferred consoles.
2693 * @name: device name
2694 * @idx: device index
2695 * @options: options for this console
2696 *
2697 * The last preferred console added will be used for kernel messages
2698 * and stdin/out/err for init. Normally this is used by console_setup
2699 * above to handle user-supplied console arguments; however it can also
2700 * be used by arch-specific code either to override the user or more
2701 * commonly to provide a default console (ie from PROM variables) when
2702 * the user has not supplied one.
2703 */
add_preferred_console(const char * name,const short idx,char * options)2704 int add_preferred_console(const char *name, const short idx, char *options)
2705 {
2706 return __add_preferred_console(name, idx, NULL, options, NULL, false);
2707 }
2708
2709 /**
2710 * match_devname_and_update_preferred_console - Update a preferred console
2711 * when matching devname is found.
2712 * @devname: DEVNAME:0.0 style device name
2713 * @name: Name of the corresponding console driver, e.g. "ttyS"
2714 * @idx: Console index, e.g. port number.
2715 *
2716 * The function checks whether a device with the given @devname is
2717 * preferred via the console=DEVNAME:0.0 command line option.
2718 * It fills the missing console driver name and console index
2719 * so that a later register_console() call could find (match)
2720 * and enable this device.
2721 *
2722 * It might be used when a driver subsystem initializes particular
2723 * devices with already known DEVNAME:0.0 style names. And it
2724 * could predict which console driver name and index this device
2725 * would later get associated with.
2726 *
2727 * Return: 0 on success, negative error code on failure.
2728 */
match_devname_and_update_preferred_console(const char * devname,const char * name,const short idx)2729 int match_devname_and_update_preferred_console(const char *devname,
2730 const char *name,
2731 const short idx)
2732 {
2733 struct console_cmdline *c = console_cmdline;
2734 int i;
2735
2736 if (!devname || !strlen(devname) || !name || !strlen(name) || idx < 0)
2737 return -EINVAL;
2738
2739 for (i = 0; i < MAX_CMDLINECONSOLES && (c->name[0] || c->devname[0]);
2740 i++, c++) {
2741 if (!strcmp(devname, c->devname)) {
2742 pr_info("associate the preferred console \"%s\" with \"%s%d\"\n",
2743 devname, name, idx);
2744 strscpy(c->name, name);
2745 c->index = idx;
2746 return 0;
2747 }
2748 }
2749
2750 return -ENOENT;
2751 }
2752 EXPORT_SYMBOL_GPL(match_devname_and_update_preferred_console);
2753
2754 bool console_suspend_enabled = true;
2755 EXPORT_SYMBOL(console_suspend_enabled);
2756
console_suspend_disable(char * str)2757 static int __init console_suspend_disable(char *str)
2758 {
2759 console_suspend_enabled = false;
2760 return 1;
2761 }
2762 __setup("no_console_suspend", console_suspend_disable);
2763 module_param_named(console_suspend, console_suspend_enabled,
2764 bool, S_IRUGO | S_IWUSR);
2765 MODULE_PARM_DESC(console_suspend, "suspend console during suspend"
2766 " and hibernate operations");
2767
2768 static bool printk_console_no_auto_verbose;
2769
console_verbose(void)2770 void console_verbose(void)
2771 {
2772 if (console_loglevel && !printk_console_no_auto_verbose)
2773 console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH;
2774 }
2775 EXPORT_SYMBOL_GPL(console_verbose);
2776
2777 module_param_named(console_no_auto_verbose, printk_console_no_auto_verbose, bool, 0644);
2778 MODULE_PARM_DESC(console_no_auto_verbose, "Disable console loglevel raise to highest on oops/panic/etc");
2779
2780 /**
2781 * console_suspend_all - suspend the console subsystem
2782 *
2783 * This disables printk() while we go into suspend states
2784 */
console_suspend_all(void)2785 void console_suspend_all(void)
2786 {
2787 struct console *con;
2788
2789 if (console_suspend_enabled)
2790 pr_info("Suspending console(s) (use no_console_suspend to debug)\n");
2791
2792 /*
2793 * Flush any console backlog and then avoid queueing irq_work until
2794 * console_resume_all(). Until then deferred printing is no longer
2795 * triggered, NBCON consoles transition to atomic flushing, and
2796 * any klogd waiters are not triggered.
2797 */
2798 pr_flush(1000, true);
2799 console_irqwork_blocked = true;
2800
2801 if (!console_suspend_enabled)
2802 return;
2803
2804 console_list_lock();
2805 for_each_console(con)
2806 console_srcu_write_flags(con, con->flags | CON_SUSPENDED);
2807 console_list_unlock();
2808
2809 /*
2810 * Ensure that all SRCU list walks have completed. All printing
2811 * contexts must be able to see that they are suspended so that it
2812 * is guaranteed that all printing has stopped when this function
2813 * completes.
2814 */
2815 synchronize_srcu(&console_srcu);
2816 }
2817
console_resume_all(void)2818 void console_resume_all(void)
2819 {
2820 struct console_flush_type ft;
2821 struct console *con;
2822
2823 /*
2824 * Allow queueing irq_work. After restoring console state, deferred
2825 * printing and any klogd waiters need to be triggered in case there
2826 * is now a console backlog.
2827 */
2828 console_irqwork_blocked = false;
2829
2830 if (console_suspend_enabled) {
2831 console_list_lock();
2832 for_each_console(con)
2833 console_srcu_write_flags(con, con->flags & ~CON_SUSPENDED);
2834 console_list_unlock();
2835
2836 /*
2837 * Ensure that all SRCU list walks have completed. All printing
2838 * contexts must be able to see they are no longer suspended so
2839 * that they are guaranteed to wake up and resume printing.
2840 */
2841 synchronize_srcu(&console_srcu);
2842 }
2843
2844 printk_get_console_flush_type(&ft);
2845 if (ft.nbcon_offload)
2846 nbcon_kthreads_wake();
2847 if (ft.legacy_offload)
2848 defer_console_output();
2849 else
2850 wake_up_klogd();
2851
2852 pr_flush(1000, true);
2853 }
2854
2855 /**
2856 * console_cpu_notify - print deferred console messages after CPU hotplug
2857 * @cpu: unused
2858 *
2859 * If printk() is called from a CPU that is not online yet, the messages
2860 * will be printed on the console only if there are CON_ANYTIME consoles.
2861 * This function is called when a new CPU comes online (or fails to come
2862 * up) or goes offline.
2863 */
console_cpu_notify(unsigned int cpu)2864 static int console_cpu_notify(unsigned int cpu)
2865 {
2866 struct console_flush_type ft;
2867
2868 if (!cpuhp_tasks_frozen) {
2869 printk_get_console_flush_type(&ft);
2870 if (ft.nbcon_atomic)
2871 nbcon_atomic_flush_pending();
2872 if (ft.legacy_direct) {
2873 if (console_trylock())
2874 console_unlock();
2875 }
2876 }
2877 return 0;
2878 }
2879
2880 /**
2881 * console_lock - block the console subsystem from printing
2882 *
2883 * Acquires a lock which guarantees that no consoles will
2884 * be in or enter their write() callback.
2885 *
2886 * Can sleep, returns nothing.
2887 */
console_lock(void)2888 void console_lock(void)
2889 {
2890 might_sleep();
2891
2892 /* On panic, the console_lock must be left to the panic cpu. */
2893 while (panic_on_other_cpu())
2894 msleep(1000);
2895
2896 down_console_sem();
2897 console_locked = 1;
2898 console_may_schedule = 1;
2899 }
2900 EXPORT_SYMBOL(console_lock);
2901
2902 /**
2903 * console_trylock - try to block the console subsystem from printing
2904 *
2905 * Try to acquire a lock which guarantees that no consoles will
2906 * be in or enter their write() callback.
2907 *
2908 * returns 1 on success, and 0 on failure to acquire the lock.
2909 */
console_trylock(void)2910 int console_trylock(void)
2911 {
2912 /* On panic, the console_lock must be left to the panic cpu. */
2913 if (panic_on_other_cpu())
2914 return 0;
2915 if (down_trylock_console_sem())
2916 return 0;
2917 console_locked = 1;
2918 console_may_schedule = 0;
2919 return 1;
2920 }
2921 EXPORT_SYMBOL(console_trylock);
2922
is_console_locked(void)2923 int is_console_locked(void)
2924 {
2925 return console_locked;
2926 }
2927 EXPORT_SYMBOL(is_console_locked);
2928
__console_unlock(void)2929 static void __console_unlock(void)
2930 {
2931 console_locked = 0;
2932 up_console_sem();
2933 }
2934
2935 #ifdef CONFIG_PRINTK
2936
2937 /*
2938 * Prepend the message in @pmsg->pbufs->outbuf. This is achieved by shifting
2939 * the existing message over and inserting the scratchbuf message.
2940 *
2941 * @pmsg is the original printk message.
2942 * @fmt is the printf format of the message which will prepend the existing one.
2943 *
2944 * If there is not enough space in @pmsg->pbufs->outbuf, the existing
2945 * message text will be sufficiently truncated.
2946 *
2947 * If @pmsg->pbufs->outbuf is modified, @pmsg->outbuf_len is updated.
2948 */
2949 __printf(2, 3)
console_prepend_message(struct printk_message * pmsg,const char * fmt,...)2950 static void console_prepend_message(struct printk_message *pmsg, const char *fmt, ...)
2951 {
2952 struct printk_buffers *pbufs = pmsg->pbufs;
2953 const size_t scratchbuf_sz = sizeof(pbufs->scratchbuf);
2954 const size_t outbuf_sz = sizeof(pbufs->outbuf);
2955 char *scratchbuf = &pbufs->scratchbuf[0];
2956 char *outbuf = &pbufs->outbuf[0];
2957 va_list args;
2958 size_t len;
2959
2960 va_start(args, fmt);
2961 len = vscnprintf(scratchbuf, scratchbuf_sz, fmt, args);
2962 va_end(args);
2963
2964 /*
2965 * Make sure outbuf is sufficiently large before prepending.
2966 * Keep at least the prefix when the message must be truncated.
2967 * It is a rather theoretical problem when someone tries to
2968 * use a minimalist buffer.
2969 */
2970 if (WARN_ON_ONCE(len + PRINTK_PREFIX_MAX >= outbuf_sz))
2971 return;
2972
2973 if (pmsg->outbuf_len + len >= outbuf_sz) {
2974 /* Truncate the message, but keep it terminated. */
2975 pmsg->outbuf_len = outbuf_sz - (len + 1);
2976 outbuf[pmsg->outbuf_len] = 0;
2977 }
2978
2979 memmove(outbuf + len, outbuf, pmsg->outbuf_len + 1);
2980 memcpy(outbuf, scratchbuf, len);
2981 pmsg->outbuf_len += len;
2982 }
2983
2984 /*
2985 * Prepend the message in @pmsg->pbufs->outbuf with a "dropped message".
2986 * @pmsg->outbuf_len is updated appropriately.
2987 *
2988 * @pmsg is the printk message to prepend.
2989 *
2990 * @dropped is the dropped count to report in the dropped message.
2991 */
console_prepend_dropped(struct printk_message * pmsg,unsigned long dropped)2992 void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped)
2993 {
2994 console_prepend_message(pmsg, "** %lu printk messages dropped **\n", dropped);
2995 }
2996
2997 /*
2998 * Prepend the message in @pmsg->pbufs->outbuf with a "replay message".
2999 * @pmsg->outbuf_len is updated appropriately.
3000 *
3001 * @pmsg is the printk message to prepend.
3002 */
console_prepend_replay(struct printk_message * pmsg)3003 void console_prepend_replay(struct printk_message *pmsg)
3004 {
3005 console_prepend_message(pmsg, "** replaying previous printk message **\n");
3006 }
3007
3008 /*
3009 * Read and format the specified record (or a later record if the specified
3010 * record is not available).
3011 *
3012 * @pmsg will contain the formatted result. @pmsg->pbufs must point to a
3013 * struct printk_buffers.
3014 *
3015 * @seq is the record to read and format. If it is not available, the next
3016 * valid record is read.
3017 *
3018 * @is_extended specifies if the message should be formatted for extended
3019 * console output.
3020 *
3021 * @may_supress specifies if records may be skipped based on loglevel.
3022 *
3023 * Returns false if no record is available. Otherwise true and all fields
3024 * of @pmsg are valid. (See the documentation of struct printk_message
3025 * for information about the @pmsg fields.)
3026 */
printk_get_next_message(struct printk_message * pmsg,u64 seq,bool is_extended,bool may_suppress)3027 bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
3028 bool is_extended, bool may_suppress)
3029 {
3030 struct printk_buffers *pbufs = pmsg->pbufs;
3031 const size_t scratchbuf_sz = sizeof(pbufs->scratchbuf);
3032 const size_t outbuf_sz = sizeof(pbufs->outbuf);
3033 char *scratchbuf = &pbufs->scratchbuf[0];
3034 char *outbuf = &pbufs->outbuf[0];
3035 struct printk_info info;
3036 struct printk_record r;
3037 size_t len = 0;
3038 bool force_con;
3039
3040 /*
3041 * Formatting extended messages requires a separate buffer, so use the
3042 * scratch buffer to read in the ringbuffer text.
3043 *
3044 * Formatting normal messages is done in-place, so read the ringbuffer
3045 * text directly into the output buffer.
3046 */
3047 if (is_extended)
3048 prb_rec_init_rd(&r, &info, scratchbuf, scratchbuf_sz);
3049 else
3050 prb_rec_init_rd(&r, &info, outbuf, outbuf_sz);
3051
3052 if (!prb_read_valid(prb, seq, &r))
3053 return false;
3054
3055 pmsg->seq = r.info->seq;
3056 pmsg->dropped = r.info->seq - seq;
3057 force_con = r.info->flags & LOG_FORCE_CON;
3058 pmsg_load_execution_ctx(pmsg, r.info);
3059
3060 /*
3061 * Skip records that are not forced to be printed on consoles and that
3062 * has level above the console loglevel.
3063 */
3064 if (!force_con && may_suppress && suppress_message_printing(r.info->level))
3065 goto out;
3066
3067 if (is_extended) {
3068 len = info_print_ext_header(outbuf, outbuf_sz, r.info);
3069 len += msg_print_ext_body(outbuf + len, outbuf_sz - len,
3070 &r.text_buf[0], r.info->text_len, &r.info->dev_info);
3071 } else {
3072 len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time);
3073 }
3074 out:
3075 pmsg->outbuf_len = len;
3076 return true;
3077 }
3078
3079 /*
3080 * The legacy console always acquires a spinlock_t from its printing
3081 * callback. This violates lock nesting if the caller acquired an always
3082 * spinning lock (raw_spinlock_t) while invoking printk(). This is not a
3083 * problem on PREEMPT_RT because legacy consoles print always from a
3084 * dedicated thread and never from within printk(). Therefore we tell
3085 * lockdep that a sleeping spin lock (spinlock_t) is valid here.
3086 */
3087 #ifdef CONFIG_PREEMPT_RT
printk_legacy_allow_spinlock_enter(void)3088 static inline void printk_legacy_allow_spinlock_enter(void) { }
printk_legacy_allow_spinlock_exit(void)3089 static inline void printk_legacy_allow_spinlock_exit(void) { }
3090 #else
3091 static DEFINE_WAIT_OVERRIDE_MAP(printk_legacy_map, LD_WAIT_CONFIG);
3092
printk_legacy_allow_spinlock_enter(void)3093 static inline void printk_legacy_allow_spinlock_enter(void)
3094 {
3095 lock_map_acquire_try(&printk_legacy_map);
3096 }
3097
printk_legacy_allow_spinlock_exit(void)3098 static inline void printk_legacy_allow_spinlock_exit(void)
3099 {
3100 lock_map_release(&printk_legacy_map);
3101 }
3102 #endif /* CONFIG_PREEMPT_RT */
3103
3104 /*
3105 * Used as the printk buffers for non-panic, serialized console printing.
3106 * This is for legacy (!CON_NBCON) as well as all boot (CON_BOOT) consoles.
3107 * Its usage requires the console_lock held.
3108 */
3109 struct printk_buffers printk_shared_pbufs;
3110
3111 /*
3112 * Print one record for the given console. The record printed is whatever
3113 * record is the next available record for the given console.
3114 *
3115 * @handover will be set to true if a printk waiter has taken over the
3116 * console_lock, in which case the caller is no longer holding both the
3117 * console_lock and the SRCU read lock. Otherwise it is set to false.
3118 *
3119 * @cookie is the cookie from the SRCU read lock.
3120 *
3121 * Returns false if the given console has no next record to print, otherwise
3122 * true.
3123 *
3124 * Requires the console_lock and the SRCU read lock.
3125 */
console_emit_next_record(struct console * con,bool * handover,int cookie)3126 static bool console_emit_next_record(struct console *con, bool *handover, int cookie)
3127 {
3128 bool is_extended = console_srcu_read_flags(con) & CON_EXTENDED;
3129 char *outbuf = &printk_shared_pbufs.outbuf[0];
3130 struct printk_message pmsg = {
3131 .pbufs = &printk_shared_pbufs,
3132 };
3133 unsigned long flags;
3134
3135 *handover = false;
3136
3137 if (!printk_get_next_message(&pmsg, con->seq, is_extended, true))
3138 return false;
3139
3140 con->dropped += pmsg.dropped;
3141
3142 /* Skip messages of formatted length 0. */
3143 if (pmsg.outbuf_len == 0) {
3144 con->seq = pmsg.seq + 1;
3145 goto skip;
3146 }
3147
3148 if (con->dropped && !is_extended) {
3149 console_prepend_dropped(&pmsg, con->dropped);
3150 con->dropped = 0;
3151 }
3152
3153 /* Write everything out to the hardware. */
3154
3155 if (force_legacy_kthread() && !panic_in_progress()) {
3156 /*
3157 * With forced threading this function is in a task context
3158 * (either legacy kthread or get_init_console_seq()). There
3159 * is no need for concern about printk reentrance, handovers,
3160 * or lockdep complaints.
3161 */
3162
3163 con->write(con, outbuf, pmsg.outbuf_len);
3164 con->seq = pmsg.seq + 1;
3165 } else {
3166 /*
3167 * While actively printing out messages, if another printk()
3168 * were to occur on another CPU, it may wait for this one to
3169 * finish. This task can not be preempted if there is a
3170 * waiter waiting to take over.
3171 *
3172 * Interrupts are disabled because the hand over to a waiter
3173 * must not be interrupted until the hand over is completed
3174 * (@console_waiter is cleared).
3175 */
3176 printk_safe_enter_irqsave(flags);
3177 console_lock_spinning_enable();
3178
3179 /* Do not trace print latency. */
3180 stop_critical_timings();
3181
3182 printk_legacy_allow_spinlock_enter();
3183 con->write(con, outbuf, pmsg.outbuf_len);
3184 printk_legacy_allow_spinlock_exit();
3185
3186 start_critical_timings();
3187
3188 con->seq = pmsg.seq + 1;
3189
3190 *handover = console_lock_spinning_disable_and_check(cookie);
3191 printk_safe_exit_irqrestore(flags);
3192 }
3193 skip:
3194 return true;
3195 }
3196
3197 #else
3198
console_emit_next_record(struct console * con,bool * handover,int cookie)3199 static bool console_emit_next_record(struct console *con, bool *handover, int cookie)
3200 {
3201 *handover = false;
3202 return false;
3203 }
3204
printk_kthreads_check_locked(void)3205 static inline void printk_kthreads_check_locked(void) { }
3206
3207 #endif /* CONFIG_PRINTK */
3208
3209
3210 /*
3211 * Print out one record for each console.
3212 *
3213 * @do_cond_resched is set by the caller. It can be true only in schedulable
3214 * context.
3215 *
3216 * @next_seq is set to the sequence number after the last available record.
3217 * The value is valid only when all usable consoles were flushed. It is
3218 * when the function returns true (can do the job) and @try_again parameter
3219 * is set to false, see below.
3220 *
3221 * @handover will be set to true if a printk waiter has taken over the
3222 * console_lock, in which case the caller is no longer holding the
3223 * console_lock. Otherwise it is set to false.
3224 *
3225 * @try_again will be set to true when it still makes sense to call this
3226 * function again. The function could do the job, see the return value.
3227 * And some consoles still make progress.
3228 *
3229 * Returns true when the function could do the job. Some consoles are usable,
3230 * and there was no takeover and no panic_on_other_cpu().
3231 *
3232 * Requires the console_lock.
3233 */
console_flush_one_record(bool do_cond_resched,u64 * next_seq,bool * handover,bool * try_again)3234 static bool console_flush_one_record(bool do_cond_resched, u64 *next_seq, bool *handover,
3235 bool *try_again)
3236 {
3237 struct console_flush_type ft;
3238 bool any_usable = false;
3239 struct console *con;
3240 int cookie;
3241
3242 *try_again = false;
3243
3244 printk_get_console_flush_type(&ft);
3245
3246 cookie = console_srcu_read_lock();
3247 for_each_console_srcu(con) {
3248 short flags = console_srcu_read_flags(con);
3249 u64 printk_seq;
3250 bool progress;
3251
3252 /*
3253 * console_flush_one_record() is only responsible for
3254 * nbcon consoles when the nbcon consoles cannot print via
3255 * their atomic or threaded flushing.
3256 */
3257 if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload))
3258 continue;
3259
3260 if (!console_is_usable(con, flags, !do_cond_resched))
3261 continue;
3262 any_usable = true;
3263
3264 if (flags & CON_NBCON) {
3265 progress = nbcon_legacy_emit_next_record(con, handover, cookie,
3266 !do_cond_resched);
3267 printk_seq = nbcon_seq_read(con);
3268 } else {
3269 progress = console_emit_next_record(con, handover, cookie);
3270 printk_seq = con->seq;
3271 }
3272
3273 /*
3274 * If a handover has occurred, the SRCU read lock
3275 * is already released.
3276 */
3277 if (*handover)
3278 goto fail;
3279
3280 /* Track the next of the highest seq flushed. */
3281 if (printk_seq > *next_seq)
3282 *next_seq = printk_seq;
3283
3284 if (!progress)
3285 continue;
3286
3287 /*
3288 * An usable console made a progress. There might still be
3289 * pending messages.
3290 */
3291 *try_again = true;
3292
3293 /* Allow panic_cpu to take over the consoles safely. */
3294 if (panic_on_other_cpu())
3295 goto fail_srcu;
3296
3297 if (do_cond_resched)
3298 cond_resched();
3299 }
3300 console_srcu_read_unlock(cookie);
3301
3302 return any_usable;
3303
3304 fail_srcu:
3305 console_srcu_read_unlock(cookie);
3306 fail:
3307 *try_again = false;
3308 return false;
3309 }
3310
3311 /*
3312 * Print out all remaining records to all consoles.
3313 *
3314 * @do_cond_resched is set by the caller. It can be true only in schedulable
3315 * context.
3316 *
3317 * @next_seq is set to the sequence number after the last available record.
3318 * The value is valid only when this function returns true. It means that all
3319 * usable consoles are completely flushed.
3320 *
3321 * @handover will be set to true if a printk waiter has taken over the
3322 * console_lock, in which case the caller is no longer holding the
3323 * console_lock. Otherwise it is set to false.
3324 *
3325 * Returns true when there was at least one usable console and all messages
3326 * were flushed to all usable consoles. A returned false informs the caller
3327 * that everything was not flushed (either there were no usable consoles or
3328 * another context has taken over printing or it is a panic situation and this
3329 * is not the panic CPU). Regardless the reason, the caller should assume it
3330 * is not useful to immediately try again.
3331 *
3332 * Requires the console_lock.
3333 */
console_flush_all(bool do_cond_resched,u64 * next_seq,bool * handover)3334 static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handover)
3335 {
3336 bool try_again;
3337 bool ret;
3338
3339 *next_seq = 0;
3340 *handover = false;
3341
3342 do {
3343 ret = console_flush_one_record(do_cond_resched, next_seq,
3344 handover, &try_again);
3345 } while (try_again);
3346
3347 return ret;
3348 }
3349
__console_flush_and_unlock(void)3350 static void __console_flush_and_unlock(void)
3351 {
3352 bool do_cond_resched;
3353 bool handover;
3354 bool flushed;
3355 u64 next_seq;
3356
3357 /*
3358 * Console drivers are called with interrupts disabled, so
3359 * @console_may_schedule should be cleared before; however, we may
3360 * end up dumping a lot of lines, for example, if called from
3361 * console registration path, and should invoke cond_resched()
3362 * between lines if allowable. Not doing so can cause a very long
3363 * scheduling stall on a slow console leading to RCU stall and
3364 * softlockup warnings which exacerbate the issue with more
3365 * messages practically incapacitating the system. Therefore, create
3366 * a local to use for the printing loop.
3367 */
3368 do_cond_resched = console_may_schedule;
3369
3370 do {
3371 console_may_schedule = 0;
3372
3373 flushed = console_flush_all(do_cond_resched, &next_seq, &handover);
3374 if (!handover)
3375 __console_unlock();
3376
3377 /*
3378 * Abort if there was a failure to flush all messages to all
3379 * usable consoles. Either it is not possible to flush (in
3380 * which case it would be an infinite loop of retrying) or
3381 * another context has taken over printing.
3382 */
3383 if (!flushed)
3384 break;
3385
3386 /*
3387 * Some context may have added new records after
3388 * console_flush_all() but before unlocking the console.
3389 * Re-check if there is a new record to flush. If the trylock
3390 * fails, another context is already handling the printing.
3391 */
3392 } while (prb_read_valid(prb, next_seq, NULL) && console_trylock());
3393 }
3394
3395 /**
3396 * console_unlock - unblock the legacy console subsystem from printing
3397 *
3398 * Releases the console_lock which the caller holds to block printing of
3399 * the legacy console subsystem.
3400 *
3401 * While the console_lock was held, console output may have been buffered
3402 * by printk(). If this is the case, console_unlock() emits the output on
3403 * legacy consoles prior to releasing the lock.
3404 *
3405 * console_unlock(); may be called from any context.
3406 */
console_unlock(void)3407 void console_unlock(void)
3408 {
3409 struct console_flush_type ft;
3410
3411 printk_get_console_flush_type(&ft);
3412 if (ft.legacy_direct)
3413 __console_flush_and_unlock();
3414 else
3415 __console_unlock();
3416 }
3417 EXPORT_SYMBOL(console_unlock);
3418
console_unblank(void)3419 void console_unblank(void)
3420 {
3421 bool found_unblank = false;
3422 struct console *c;
3423 int cookie;
3424
3425 /*
3426 * First check if there are any consoles implementing the unblank()
3427 * callback. If not, there is no reason to continue and take the
3428 * console lock, which in particular can be dangerous if
3429 * @oops_in_progress is set.
3430 */
3431 cookie = console_srcu_read_lock();
3432 for_each_console_srcu(c) {
3433 if (!console_is_usable(c, console_srcu_read_flags(c), true))
3434 continue;
3435
3436 if (c->unblank) {
3437 found_unblank = true;
3438 break;
3439 }
3440 }
3441 console_srcu_read_unlock(cookie);
3442 if (!found_unblank)
3443 return;
3444
3445 /*
3446 * Stop console printing because the unblank() callback may
3447 * assume the console is not within its write() callback.
3448 *
3449 * If @oops_in_progress is set, this may be an atomic context.
3450 * In that case, attempt a trylock as best-effort.
3451 */
3452 if (oops_in_progress) {
3453 /* Semaphores are not NMI-safe. */
3454 if (in_nmi())
3455 return;
3456
3457 /*
3458 * Attempting to trylock the console lock can deadlock
3459 * if another CPU was stopped while modifying the
3460 * semaphore. "Hope and pray" that this is not the
3461 * current situation.
3462 */
3463 if (down_trylock_console_sem() != 0)
3464 return;
3465 } else
3466 console_lock();
3467
3468 console_locked = 1;
3469 console_may_schedule = 0;
3470
3471 cookie = console_srcu_read_lock();
3472 for_each_console_srcu(c) {
3473 if (!console_is_usable(c, console_srcu_read_flags(c), true))
3474 continue;
3475
3476 if (c->unblank)
3477 c->unblank();
3478 }
3479 console_srcu_read_unlock(cookie);
3480
3481 console_unlock();
3482
3483 if (!oops_in_progress)
3484 pr_flush(1000, true);
3485 }
3486
3487 /*
3488 * Rewind all consoles to the oldest available record.
3489 *
3490 * IMPORTANT: The function is safe only when called under
3491 * console_lock(). It is not enforced because
3492 * it is used as a best effort in panic().
3493 */
__console_rewind_all(void)3494 static void __console_rewind_all(void)
3495 {
3496 struct console *c;
3497 short flags;
3498 int cookie;
3499 u64 seq;
3500
3501 seq = prb_first_valid_seq(prb);
3502
3503 cookie = console_srcu_read_lock();
3504 for_each_console_srcu(c) {
3505 flags = console_srcu_read_flags(c);
3506
3507 if (flags & CON_NBCON) {
3508 nbcon_seq_force(c, seq);
3509 } else {
3510 /*
3511 * This assignment is safe only when called under
3512 * console_lock(). On panic, legacy consoles are
3513 * only best effort.
3514 */
3515 c->seq = seq;
3516 }
3517 }
3518 console_srcu_read_unlock(cookie);
3519 }
3520
3521 /**
3522 * console_flush_on_panic - flush console content on panic
3523 * @mode: flush all messages in buffer or just the pending ones
3524 *
3525 * Immediately output all pending messages no matter what.
3526 */
console_flush_on_panic(enum con_flush_mode mode)3527 void console_flush_on_panic(enum con_flush_mode mode)
3528 {
3529 struct console_flush_type ft;
3530 bool handover;
3531 u64 next_seq;
3532
3533 /*
3534 * Ignore the console lock and flush out the messages. Attempting a
3535 * trylock would not be useful because:
3536 *
3537 * - if it is contended, it must be ignored anyway
3538 * - console_lock() and console_trylock() block and fail
3539 * respectively in panic for non-panic CPUs
3540 * - semaphores are not NMI-safe
3541 */
3542
3543 /*
3544 * If another context is holding the console lock,
3545 * @console_may_schedule might be set. Clear it so that
3546 * this context does not call cond_resched() while flushing.
3547 */
3548 console_may_schedule = 0;
3549
3550 if (mode == CONSOLE_REPLAY_ALL)
3551 __console_rewind_all();
3552
3553 printk_get_console_flush_type(&ft);
3554 if (ft.nbcon_atomic)
3555 nbcon_atomic_flush_pending();
3556
3557 /* Flush legacy consoles once allowed, even when dangerous. */
3558 if (legacy_allow_panic_sync)
3559 console_flush_all(false, &next_seq, &handover);
3560 }
3561
3562 /*
3563 * Return the console tty driver structure and its associated index
3564 */
console_device(int * index)3565 struct tty_driver *console_device(int *index)
3566 {
3567 struct console *c;
3568 struct tty_driver *driver = NULL;
3569 int cookie;
3570
3571 /*
3572 * Take console_lock to serialize device() callback with
3573 * other console operations. For example, fg_console is
3574 * modified under console_lock when switching vt.
3575 */
3576 console_lock();
3577
3578 cookie = console_srcu_read_lock();
3579 for_each_console_srcu(c) {
3580 if (!c->device)
3581 continue;
3582 driver = c->device(c, index);
3583 if (driver)
3584 break;
3585 }
3586 console_srcu_read_unlock(cookie);
3587
3588 console_unlock();
3589 return driver;
3590 }
3591
3592 /*
3593 * Prevent further output on the passed console device so that (for example)
3594 * serial drivers can suspend console output before suspending a port, and can
3595 * re-enable output afterwards.
3596 */
console_suspend(struct console * console)3597 void console_suspend(struct console *console)
3598 {
3599 __pr_flush(console, 1000, true);
3600 console_list_lock();
3601 console_srcu_write_flags(console, console->flags & ~CON_ENABLED);
3602 console_list_unlock();
3603
3604 /*
3605 * Ensure that all SRCU list walks have completed. All contexts must
3606 * be able to see that this console is disabled so that (for example)
3607 * the caller can suspend the port without risk of another context
3608 * using the port.
3609 */
3610 synchronize_srcu(&console_srcu);
3611 }
3612 EXPORT_SYMBOL(console_suspend);
3613
console_resume(struct console * console)3614 void console_resume(struct console *console)
3615 {
3616 struct console_flush_type ft;
3617 bool is_nbcon;
3618
3619 console_list_lock();
3620 console_srcu_write_flags(console, console->flags | CON_ENABLED);
3621 is_nbcon = console->flags & CON_NBCON;
3622 console_list_unlock();
3623
3624 /*
3625 * Ensure that all SRCU list walks have completed. The related
3626 * printing context must be able to see it is enabled so that
3627 * it is guaranteed to wake up and resume printing.
3628 */
3629 synchronize_srcu(&console_srcu);
3630
3631 printk_get_console_flush_type(&ft);
3632 if (is_nbcon && ft.nbcon_offload)
3633 nbcon_kthread_wake(console);
3634 else if (ft.legacy_offload)
3635 defer_console_output();
3636
3637 __pr_flush(console, 1000, true);
3638 }
3639 EXPORT_SYMBOL(console_resume);
3640
3641 #ifdef CONFIG_PRINTK
3642 static int unregister_console_locked(struct console *console);
3643
3644 /* True when system boot is far enough to create printer threads. */
3645 bool printk_kthreads_ready __ro_after_init;
3646
3647 static struct task_struct *printk_legacy_kthread;
3648
legacy_kthread_should_wakeup(void)3649 static bool legacy_kthread_should_wakeup(void)
3650 {
3651 struct console_flush_type ft;
3652 struct console *con;
3653 bool ret = false;
3654 int cookie;
3655
3656 if (kthread_should_stop())
3657 return true;
3658
3659 printk_get_console_flush_type(&ft);
3660
3661 cookie = console_srcu_read_lock();
3662 for_each_console_srcu(con) {
3663 short flags = console_srcu_read_flags(con);
3664 u64 printk_seq;
3665
3666 /*
3667 * The legacy printer thread is only responsible for nbcon
3668 * consoles when the nbcon consoles cannot print via their
3669 * atomic or threaded flushing.
3670 */
3671 if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload))
3672 continue;
3673
3674 if (!console_is_usable(con, flags, false))
3675 continue;
3676
3677 if (flags & CON_NBCON) {
3678 printk_seq = nbcon_seq_read(con);
3679 } else {
3680 /*
3681 * It is safe to read @seq because only this
3682 * thread context updates @seq.
3683 */
3684 printk_seq = con->seq;
3685 }
3686
3687 if (prb_read_valid(prb, printk_seq, NULL)) {
3688 ret = true;
3689 break;
3690 }
3691 }
3692 console_srcu_read_unlock(cookie);
3693
3694 return ret;
3695 }
3696
legacy_kthread_func(void * unused)3697 static int legacy_kthread_func(void *unused)
3698 {
3699 bool try_again;
3700
3701 wait_for_event:
3702 wait_event_interruptible(legacy_wait, legacy_kthread_should_wakeup());
3703
3704 do {
3705 bool handover = false;
3706 u64 next_seq = 0;
3707
3708 if (kthread_should_stop())
3709 return 0;
3710
3711 console_lock();
3712 console_flush_one_record(true, &next_seq, &handover, &try_again);
3713 if (!handover)
3714 __console_unlock();
3715
3716 } while (try_again);
3717
3718 goto wait_for_event;
3719 }
3720
legacy_kthread_create(void)3721 static bool legacy_kthread_create(void)
3722 {
3723 struct task_struct *kt;
3724
3725 lockdep_assert_console_list_lock_held();
3726
3727 kt = kthread_run(legacy_kthread_func, NULL, "pr/legacy");
3728 if (WARN_ON(IS_ERR(kt))) {
3729 pr_err("failed to start legacy printing thread\n");
3730 return false;
3731 }
3732
3733 printk_legacy_kthread = kt;
3734
3735 /*
3736 * It is important that console printing threads are scheduled
3737 * shortly after a printk call and with generous runtime budgets.
3738 */
3739 sched_set_normal(printk_legacy_kthread, -20);
3740
3741 return true;
3742 }
3743
3744 /**
3745 * printk_kthreads_shutdown - shutdown all threaded printers
3746 * @data: syscore context
3747 *
3748 * On system shutdown all threaded printers are stopped. This allows printk
3749 * to transition back to atomic printing, thus providing a robust mechanism
3750 * for the final shutdown/reboot messages to be output.
3751 */
printk_kthreads_shutdown(void * data)3752 static void printk_kthreads_shutdown(void *data)
3753 {
3754 struct console *con;
3755
3756 console_list_lock();
3757 if (printk_kthreads_running) {
3758 printk_kthreads_running = false;
3759
3760 for_each_console(con) {
3761 if (con->flags & CON_NBCON)
3762 nbcon_kthread_stop(con);
3763 }
3764
3765 /*
3766 * The threads may have been stopped while printing a
3767 * backlog. Flush any records left over.
3768 */
3769 nbcon_atomic_flush_pending();
3770 }
3771 console_list_unlock();
3772 }
3773
3774 static const struct syscore_ops printk_syscore_ops = {
3775 .shutdown = printk_kthreads_shutdown,
3776 };
3777
3778 static struct syscore printk_syscore = {
3779 .ops = &printk_syscore_ops,
3780 };
3781
3782 /*
3783 * If appropriate, start nbcon kthreads and set @printk_kthreads_running.
3784 * If any kthreads fail to start, those consoles are unregistered.
3785 *
3786 * Must be called under console_list_lock().
3787 */
printk_kthreads_check_locked(void)3788 static void printk_kthreads_check_locked(void)
3789 {
3790 struct hlist_node *tmp;
3791 struct console *con;
3792
3793 lockdep_assert_console_list_lock_held();
3794
3795 if (!printk_kthreads_ready)
3796 return;
3797
3798 /* Start or stop the legacy kthread when needed. */
3799 if (have_legacy_console || have_boot_console) {
3800 if (!printk_legacy_kthread &&
3801 force_legacy_kthread() &&
3802 !legacy_kthread_create()) {
3803 /*
3804 * All legacy consoles must be unregistered. If there
3805 * are any nbcon consoles, they will set up their own
3806 * kthread.
3807 */
3808 hlist_for_each_entry_safe(con, tmp, &console_list, node) {
3809 if (con->flags & CON_NBCON)
3810 continue;
3811
3812 unregister_console_locked(con);
3813 }
3814 }
3815 } else if (printk_legacy_kthread) {
3816 kthread_stop(printk_legacy_kthread);
3817 printk_legacy_kthread = NULL;
3818 }
3819
3820 /*
3821 * Printer threads cannot be started as long as any boot console is
3822 * registered because there is no way to synchronize the hardware
3823 * registers between boot console code and regular console code.
3824 * It can only be known that there will be no new boot consoles when
3825 * an nbcon console is registered.
3826 */
3827 if (have_boot_console || !have_nbcon_console) {
3828 /* Clear flag in case all nbcon consoles unregistered. */
3829 printk_kthreads_running = false;
3830 return;
3831 }
3832
3833 if (printk_kthreads_running)
3834 return;
3835
3836 hlist_for_each_entry_safe(con, tmp, &console_list, node) {
3837 if (!(con->flags & CON_NBCON))
3838 continue;
3839
3840 if (!nbcon_kthread_create(con))
3841 unregister_console_locked(con);
3842 }
3843
3844 printk_kthreads_running = true;
3845 }
3846
printk_set_kthreads_ready(void)3847 static int __init printk_set_kthreads_ready(void)
3848 {
3849 register_syscore(&printk_syscore);
3850
3851 console_list_lock();
3852 printk_kthreads_ready = true;
3853 printk_kthreads_check_locked();
3854 console_list_unlock();
3855
3856 return 0;
3857 }
3858 early_initcall(printk_set_kthreads_ready);
3859 #endif /* CONFIG_PRINTK */
3860
3861 static int __read_mostly keep_bootcon;
3862
keep_bootcon_setup(char * str)3863 static int __init keep_bootcon_setup(char *str)
3864 {
3865 keep_bootcon = 1;
3866 pr_info("debug: skip boot console de-registration.\n");
3867
3868 return 0;
3869 }
3870
3871 early_param("keep_bootcon", keep_bootcon_setup);
3872
console_call_setup(struct console * newcon,char * options)3873 static int console_call_setup(struct console *newcon, char *options)
3874 {
3875 int err;
3876
3877 if (!newcon->setup)
3878 return 0;
3879
3880 /* Synchronize with possible boot console. */
3881 console_lock();
3882 err = newcon->setup(newcon, options);
3883 console_unlock();
3884
3885 return err;
3886 }
3887
3888 /*
3889 * This is called by register_console() to try to match
3890 * the newly registered console with any of the ones selected
3891 * by either the command line or add_preferred_console() and
3892 * setup/enable it.
3893 *
3894 * Care need to be taken with consoles that are statically
3895 * enabled such as netconsole
3896 */
try_enable_preferred_console(struct console * newcon,bool user_specified)3897 static int try_enable_preferred_console(struct console *newcon,
3898 bool user_specified)
3899 {
3900 struct console_cmdline *c;
3901 int i, err;
3902
3903 for (i = 0, c = console_cmdline;
3904 i < MAX_CMDLINECONSOLES && (c->name[0] || c->devname[0]);
3905 i++, c++) {
3906 /* Console not yet initialized? */
3907 if (!c->name[0])
3908 continue;
3909 if (c->user_specified != user_specified)
3910 continue;
3911 if (!newcon->match ||
3912 newcon->match(newcon, c->name, c->index, c->options) != 0) {
3913 /* default matching */
3914 BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name));
3915 if (strcmp(c->name, newcon->name) != 0)
3916 continue;
3917 if (newcon->index >= 0 &&
3918 newcon->index != c->index)
3919 continue;
3920 if (newcon->index < 0)
3921 newcon->index = c->index;
3922
3923 if (_braille_register_console(newcon, c))
3924 return 0;
3925
3926 err = console_call_setup(newcon, c->options);
3927 if (err)
3928 return err;
3929 }
3930 newcon->flags |= CON_ENABLED;
3931 if (i == preferred_console)
3932 newcon->flags |= CON_CONSDEV;
3933 return 0;
3934 }
3935
3936 /*
3937 * Some consoles, such as pstore and netconsole, can be enabled even
3938 * without matching. Accept the pre-enabled consoles only when match()
3939 * and setup() had a chance to be called.
3940 */
3941 if (newcon->flags & CON_ENABLED && c->user_specified == user_specified)
3942 return 0;
3943
3944 return -ENOENT;
3945 }
3946
3947 /* Try to enable the console unconditionally */
try_enable_default_console(struct console * newcon)3948 static void try_enable_default_console(struct console *newcon)
3949 {
3950 if (newcon->index < 0)
3951 newcon->index = 0;
3952
3953 if (console_call_setup(newcon, NULL) != 0)
3954 return;
3955
3956 newcon->flags |= CON_ENABLED;
3957
3958 if (newcon->device)
3959 newcon->flags |= CON_CONSDEV;
3960 }
3961
3962 /* Return the starting sequence number for a newly registered console. */
get_init_console_seq(struct console * newcon,bool bootcon_registered)3963 static u64 get_init_console_seq(struct console *newcon, bool bootcon_registered)
3964 {
3965 struct console *con;
3966 bool handover;
3967 u64 init_seq;
3968
3969 if (newcon->flags & (CON_PRINTBUFFER | CON_BOOT)) {
3970 /* Get a consistent copy of @syslog_seq. */
3971 mutex_lock(&syslog_lock);
3972 init_seq = syslog_seq;
3973 mutex_unlock(&syslog_lock);
3974 } else {
3975 /* Begin with next message added to ringbuffer. */
3976 init_seq = prb_next_seq(prb);
3977
3978 /*
3979 * If any enabled boot consoles are due to be unregistered
3980 * shortly, some may not be caught up and may be the same
3981 * device as @newcon. Since it is not known which boot console
3982 * is the same device, flush all consoles and, if necessary,
3983 * start with the message of the enabled boot console that is
3984 * the furthest behind.
3985 */
3986 if (bootcon_registered && !keep_bootcon) {
3987 /*
3988 * Hold the console_lock to stop console printing and
3989 * guarantee safe access to console->seq.
3990 */
3991 console_lock();
3992
3993 /*
3994 * Flush all consoles and set the console to start at
3995 * the next unprinted sequence number.
3996 */
3997 if (!console_flush_all(true, &init_seq, &handover)) {
3998 /*
3999 * Flushing failed. Just choose the lowest
4000 * sequence of the enabled boot consoles.
4001 */
4002
4003 /*
4004 * If there was a handover, this context no
4005 * longer holds the console_lock.
4006 */
4007 if (handover)
4008 console_lock();
4009
4010 init_seq = prb_next_seq(prb);
4011 for_each_console(con) {
4012 u64 seq;
4013
4014 if (!(con->flags & CON_BOOT) ||
4015 !(con->flags & CON_ENABLED)) {
4016 continue;
4017 }
4018
4019 if (con->flags & CON_NBCON)
4020 seq = nbcon_seq_read(con);
4021 else
4022 seq = con->seq;
4023
4024 if (seq < init_seq)
4025 init_seq = seq;
4026 }
4027 }
4028
4029 console_unlock();
4030 }
4031 }
4032
4033 return init_seq;
4034 }
4035
4036 #define console_first() \
4037 hlist_entry(console_list.first, struct console, node)
4038
4039 static int unregister_console_locked(struct console *console);
4040
4041 /*
4042 * The console driver calls this routine during kernel initialization
4043 * to register the console printing procedure with printk() and to
4044 * print any messages that were printed by the kernel before the
4045 * console driver was initialized.
4046 *
4047 * This can happen pretty early during the boot process (because of
4048 * early_printk) - sometimes before setup_arch() completes - be careful
4049 * of what kernel features are used - they may not be initialised yet.
4050 *
4051 * There are two types of consoles - bootconsoles (early_printk) and
4052 * "real" consoles (everything which is not a bootconsole) which are
4053 * handled differently.
4054 * - Any number of bootconsoles can be registered at any time.
4055 * - As soon as a "real" console is registered, all bootconsoles
4056 * will be unregistered automatically.
4057 * - Once a "real" console is registered, any attempt to register a
4058 * bootconsoles will be rejected
4059 */
register_console(struct console * newcon)4060 void register_console(struct console *newcon)
4061 {
4062 bool use_device_lock = (newcon->flags & CON_NBCON) && newcon->write_atomic;
4063 bool bootcon_registered = false;
4064 bool realcon_registered = false;
4065 struct console *con;
4066 unsigned long flags;
4067 u64 init_seq;
4068 int err;
4069
4070 console_list_lock();
4071
4072 for_each_console(con) {
4073 if (WARN(con == newcon, "console '%s%d' already registered\n",
4074 con->name, con->index)) {
4075 goto unlock;
4076 }
4077
4078 if (con->flags & CON_BOOT)
4079 bootcon_registered = true;
4080 else
4081 realcon_registered = true;
4082 }
4083
4084 /* Do not register boot consoles when there already is a real one. */
4085 if ((newcon->flags & CON_BOOT) && realcon_registered) {
4086 pr_info("Too late to register bootconsole %s%d\n",
4087 newcon->name, newcon->index);
4088 goto unlock;
4089 }
4090
4091 if (newcon->flags & CON_NBCON) {
4092 /*
4093 * Ensure the nbcon console buffers can be allocated
4094 * before modifying any global data.
4095 */
4096 if (!nbcon_alloc(newcon))
4097 goto unlock;
4098 }
4099
4100 /*
4101 * See if we want to enable this console driver by default.
4102 *
4103 * Nope when a console is preferred by the command line, device
4104 * tree, or SPCR.
4105 *
4106 * The first real console with tty binding (driver) wins. More
4107 * consoles might get enabled before the right one is found.
4108 *
4109 * Note that a console with tty binding will have CON_CONSDEV
4110 * flag set and will be first in the list.
4111 */
4112 if (preferred_console < 0) {
4113 if (hlist_empty(&console_list) || !console_first()->device ||
4114 console_first()->flags & CON_BOOT) {
4115 try_enable_default_console(newcon);
4116 }
4117 }
4118
4119 /* See if this console matches one we selected on the command line */
4120 err = try_enable_preferred_console(newcon, true);
4121
4122 /* If not, try to match against the platform default(s) */
4123 if (err == -ENOENT)
4124 err = try_enable_preferred_console(newcon, false);
4125
4126 /* printk() messages are not printed to the Braille console. */
4127 if (err || newcon->flags & CON_BRL) {
4128 if (newcon->flags & CON_NBCON)
4129 nbcon_free(newcon);
4130 goto unlock;
4131 }
4132
4133 /*
4134 * If we have a bootconsole, and are switching to a real console,
4135 * don't print everything out again, since when the boot console, and
4136 * the real console are the same physical device, it's annoying to
4137 * see the beginning boot messages twice
4138 */
4139 if (bootcon_registered &&
4140 ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV)) {
4141 newcon->flags &= ~CON_PRINTBUFFER;
4142 }
4143
4144 newcon->dropped = 0;
4145 init_seq = get_init_console_seq(newcon, bootcon_registered);
4146
4147 if (newcon->flags & CON_NBCON) {
4148 have_nbcon_console = true;
4149 nbcon_seq_force(newcon, init_seq);
4150 } else {
4151 have_legacy_console = true;
4152 newcon->seq = init_seq;
4153 }
4154
4155 if (newcon->flags & CON_BOOT)
4156 have_boot_console = true;
4157
4158 /*
4159 * If another context is actively using the hardware of this new
4160 * console, it will not be aware of the nbcon synchronization. This
4161 * is a risk that two contexts could access the hardware
4162 * simultaneously if this new console is used for atomic printing
4163 * and the other context is still using the hardware.
4164 *
4165 * Use the driver synchronization to ensure that the hardware is not
4166 * in use while this new console transitions to being registered.
4167 */
4168 if (use_device_lock)
4169 newcon->device_lock(newcon, &flags);
4170
4171 /*
4172 * Put this console in the list - keep the
4173 * preferred driver at the head of the list.
4174 */
4175 if (hlist_empty(&console_list)) {
4176 /* Ensure CON_CONSDEV is always set for the head. */
4177 newcon->flags |= CON_CONSDEV;
4178 hlist_add_head_rcu(&newcon->node, &console_list);
4179
4180 } else if (newcon->flags & CON_CONSDEV) {
4181 /* Only the new head can have CON_CONSDEV set. */
4182 console_srcu_write_flags(console_first(), console_first()->flags & ~CON_CONSDEV);
4183 hlist_add_head_rcu(&newcon->node, &console_list);
4184
4185 } else {
4186 hlist_add_behind_rcu(&newcon->node, console_list.first);
4187 }
4188
4189 /*
4190 * No need to synchronize SRCU here! The caller does not rely
4191 * on all contexts being able to see the new console before
4192 * register_console() completes.
4193 */
4194
4195 /* This new console is now registered. */
4196 if (use_device_lock)
4197 newcon->device_unlock(newcon, flags);
4198
4199 console_sysfs_notify();
4200
4201 /*
4202 * By unregistering the bootconsoles after we enable the real console
4203 * we get the "console xxx enabled" message on all the consoles -
4204 * boot consoles, real consoles, etc - this is to ensure that end
4205 * users know there might be something in the kernel's log buffer that
4206 * went to the bootconsole (that they do not see on the real console)
4207 */
4208 con_printk(KERN_INFO, newcon, "enabled\n");
4209 if (bootcon_registered &&
4210 ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV) &&
4211 !keep_bootcon) {
4212 struct hlist_node *tmp;
4213
4214 hlist_for_each_entry_safe(con, tmp, &console_list, node) {
4215 if (con->flags & CON_BOOT)
4216 unregister_console_locked(con);
4217 }
4218 }
4219
4220 /* Changed console list, may require printer threads to start/stop. */
4221 printk_kthreads_check_locked();
4222 unlock:
4223 console_list_unlock();
4224 }
4225 EXPORT_SYMBOL(register_console);
4226
4227 /* Must be called under console_list_lock(). */
unregister_console_locked(struct console * console)4228 static int unregister_console_locked(struct console *console)
4229 {
4230 bool use_device_lock = (console->flags & CON_NBCON) && console->write_atomic;
4231 bool found_legacy_con = false;
4232 bool found_nbcon_con = false;
4233 bool found_boot_con = false;
4234 unsigned long flags;
4235 struct console *c;
4236 int res;
4237
4238 lockdep_assert_console_list_lock_held();
4239
4240 con_printk(KERN_INFO, console, "disabled\n");
4241
4242 res = _braille_unregister_console(console);
4243 if (res < 0)
4244 return res;
4245 if (res > 0)
4246 return 0;
4247
4248 if (!console_is_registered_locked(console))
4249 res = -ENODEV;
4250 else if (console_is_usable(console, console->flags, true))
4251 __pr_flush(console, 1000, true);
4252
4253 /* Disable it unconditionally */
4254 console_srcu_write_flags(console, console->flags & ~CON_ENABLED);
4255
4256 if (res < 0)
4257 return res;
4258
4259 /*
4260 * Use the driver synchronization to ensure that the hardware is not
4261 * in use while this console transitions to being unregistered.
4262 */
4263 if (use_device_lock)
4264 console->device_lock(console, &flags);
4265
4266 hlist_del_init_rcu(&console->node);
4267
4268 if (use_device_lock)
4269 console->device_unlock(console, flags);
4270
4271 /*
4272 * <HISTORICAL>
4273 * If this isn't the last console and it has CON_CONSDEV set, we
4274 * need to set it on the next preferred console.
4275 * </HISTORICAL>
4276 *
4277 * The above makes no sense as there is no guarantee that the next
4278 * console has any device attached. Oh well....
4279 */
4280 if (!hlist_empty(&console_list) && console->flags & CON_CONSDEV)
4281 console_srcu_write_flags(console_first(), console_first()->flags | CON_CONSDEV);
4282
4283 /*
4284 * Ensure that all SRCU list walks have completed. All contexts
4285 * must not be able to see this console in the list so that any
4286 * exit/cleanup routines can be performed safely.
4287 */
4288 synchronize_srcu(&console_srcu);
4289
4290 /*
4291 * With this console gone, the global flags tracking registered
4292 * console types may have changed. Update them.
4293 */
4294 for_each_console(c) {
4295 if (c->flags & CON_BOOT)
4296 found_boot_con = true;
4297
4298 if (c->flags & CON_NBCON)
4299 found_nbcon_con = true;
4300 else
4301 found_legacy_con = true;
4302 }
4303 if (!found_boot_con)
4304 have_boot_console = found_boot_con;
4305 if (!found_legacy_con)
4306 have_legacy_console = found_legacy_con;
4307 if (!found_nbcon_con)
4308 have_nbcon_console = found_nbcon_con;
4309
4310 /* @have_nbcon_console must be updated before calling nbcon_free(). */
4311 if (console->flags & CON_NBCON)
4312 nbcon_free(console);
4313
4314 console_sysfs_notify();
4315
4316 if (console->exit)
4317 res = console->exit(console);
4318
4319 /* Changed console list, may require printer threads to start/stop. */
4320 printk_kthreads_check_locked();
4321
4322 return res;
4323 }
4324
unregister_console(struct console * console)4325 int unregister_console(struct console *console)
4326 {
4327 int res;
4328
4329 console_list_lock();
4330 res = unregister_console_locked(console);
4331 console_list_unlock();
4332 return res;
4333 }
4334 EXPORT_SYMBOL(unregister_console);
4335
4336 /**
4337 * console_force_preferred_locked - force a registered console preferred
4338 * @con: The registered console to force preferred.
4339 *
4340 * Must be called under console_list_lock().
4341 */
console_force_preferred_locked(struct console * con)4342 void console_force_preferred_locked(struct console *con)
4343 {
4344 struct console *cur_pref_con;
4345
4346 if (!console_is_registered_locked(con))
4347 return;
4348
4349 cur_pref_con = console_first();
4350
4351 /* Already preferred? */
4352 if (cur_pref_con == con)
4353 return;
4354
4355 /*
4356 * Delete, but do not re-initialize the entry. This allows the console
4357 * to continue to appear registered (via any hlist_unhashed_lockless()
4358 * checks), even though it was briefly removed from the console list.
4359 */
4360 hlist_del_rcu(&con->node);
4361
4362 /*
4363 * Ensure that all SRCU list walks have completed so that the console
4364 * can be added to the beginning of the console list and its forward
4365 * list pointer can be re-initialized.
4366 */
4367 synchronize_srcu(&console_srcu);
4368
4369 con->flags |= CON_CONSDEV;
4370 WARN_ON(!con->device);
4371
4372 /* Only the new head can have CON_CONSDEV set. */
4373 console_srcu_write_flags(cur_pref_con, cur_pref_con->flags & ~CON_CONSDEV);
4374 hlist_add_head_rcu(&con->node, &console_list);
4375 }
4376 EXPORT_SYMBOL(console_force_preferred_locked);
4377
4378 /*
4379 * Initialize the console device. This is called *early*, so
4380 * we can't necessarily depend on lots of kernel help here.
4381 * Just do some early initializations, and do the complex setup
4382 * later.
4383 */
console_init(void)4384 void __init console_init(void)
4385 {
4386 int ret;
4387 initcall_t call;
4388 initcall_entry_t *ce;
4389
4390 #ifdef CONFIG_NULL_TTY_DEFAULT_CONSOLE
4391 if (!console_set_on_cmdline)
4392 add_preferred_console("ttynull", 0, NULL);
4393 #endif
4394
4395 /* Setup the default TTY line discipline. */
4396 n_tty_init();
4397
4398 /*
4399 * set up the console device so that later boot sequences can
4400 * inform about problems etc..
4401 */
4402 ce = __con_initcall_start;
4403 trace_initcall_level("console");
4404 while (ce < __con_initcall_end) {
4405 call = initcall_from_entry(ce);
4406 trace_initcall_start(call);
4407 ret = call();
4408 trace_initcall_finish(call, ret);
4409 ce++;
4410 }
4411 }
4412
4413 /*
4414 * Some boot consoles access data that is in the init section and which will
4415 * be discarded after the initcalls have been run. To make sure that no code
4416 * will access this data, unregister the boot consoles in a late initcall.
4417 *
4418 * If for some reason, such as deferred probe or the driver being a loadable
4419 * module, the real console hasn't registered yet at this point, there will
4420 * be a brief interval in which no messages are logged to the console, which
4421 * makes it difficult to diagnose problems that occur during this time.
4422 *
4423 * To mitigate this problem somewhat, only unregister consoles whose memory
4424 * intersects with the init section. Note that all other boot consoles will
4425 * get unregistered when the real preferred console is registered.
4426 */
printk_late_init(void)4427 static int __init printk_late_init(void)
4428 {
4429 struct hlist_node *tmp;
4430 struct console *con;
4431 int ret;
4432
4433 console_list_lock();
4434 hlist_for_each_entry_safe(con, tmp, &console_list, node) {
4435 if (!(con->flags & CON_BOOT))
4436 continue;
4437
4438 /* Check addresses that might be used for enabled consoles. */
4439 if (init_section_intersects(con, sizeof(*con)) ||
4440 init_section_contains(con->write, 0) ||
4441 init_section_contains(con->read, 0) ||
4442 init_section_contains(con->device, 0) ||
4443 init_section_contains(con->unblank, 0) ||
4444 init_section_contains(con->data, 0)) {
4445 /*
4446 * Please, consider moving the reported consoles out
4447 * of the init section.
4448 */
4449 pr_warn("bootconsole [%s%d] uses init memory and must be disabled even before the real one is ready\n",
4450 con->name, con->index);
4451 unregister_console_locked(con);
4452 }
4453 }
4454 console_list_unlock();
4455
4456 ret = cpuhp_setup_state_nocalls(CPUHP_PRINTK_DEAD, "printk:dead", NULL,
4457 console_cpu_notify);
4458 WARN_ON(ret < 0);
4459 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "printk:online",
4460 console_cpu_notify, NULL);
4461 WARN_ON(ret < 0);
4462 printk_sysctl_init();
4463 return 0;
4464 }
4465 late_initcall(printk_late_init);
4466
4467 #if defined CONFIG_PRINTK
4468 /* If @con is specified, only wait for that console. Otherwise wait for all. */
__pr_flush(struct console * con,int timeout_ms,bool reset_on_progress)4469 static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress)
4470 {
4471 unsigned long timeout_jiffies = msecs_to_jiffies(timeout_ms);
4472 unsigned long remaining_jiffies = timeout_jiffies;
4473 struct console_flush_type ft;
4474 struct console *c;
4475 u64 last_diff = 0;
4476 u64 printk_seq;
4477 short flags;
4478 int cookie;
4479 u64 diff;
4480 u64 seq;
4481
4482 /* Sorry, pr_flush() will not work this early. */
4483 if (system_state < SYSTEM_SCHEDULING)
4484 return false;
4485
4486 might_sleep();
4487
4488 seq = prb_next_reserve_seq(prb);
4489
4490 /* Flush the consoles so that records up to @seq are printed. */
4491 printk_get_console_flush_type(&ft);
4492 if (ft.nbcon_atomic)
4493 nbcon_atomic_flush_pending();
4494 if (ft.legacy_direct) {
4495 console_lock();
4496 console_unlock();
4497 }
4498
4499 for (;;) {
4500 unsigned long begin_jiffies;
4501 unsigned long slept_jiffies;
4502
4503 diff = 0;
4504
4505 /*
4506 * Hold the console_lock to guarantee safe access to
4507 * console->seq. Releasing console_lock flushes more
4508 * records in case @seq is still not printed on all
4509 * usable consoles.
4510 *
4511 * Holding the console_lock is not necessary if there
4512 * are no legacy or boot consoles. However, such a
4513 * console could register at any time. Always hold the
4514 * console_lock as a precaution rather than
4515 * synchronizing against register_console().
4516 */
4517 console_lock();
4518
4519 cookie = console_srcu_read_lock();
4520 for_each_console_srcu(c) {
4521 if (con && con != c)
4522 continue;
4523
4524 flags = console_srcu_read_flags(c);
4525
4526 /*
4527 * If consoles are not usable, it cannot be expected
4528 * that they make forward progress, so only increment
4529 * @diff for usable consoles.
4530 */
4531 if (!console_is_usable(c, flags, true) &&
4532 !console_is_usable(c, flags, false)) {
4533 continue;
4534 }
4535
4536 if (flags & CON_NBCON) {
4537 printk_seq = nbcon_seq_read(c);
4538 } else {
4539 printk_seq = c->seq;
4540 }
4541
4542 if (printk_seq < seq)
4543 diff += seq - printk_seq;
4544 }
4545 console_srcu_read_unlock(cookie);
4546
4547 if (diff != last_diff && reset_on_progress)
4548 remaining_jiffies = timeout_jiffies;
4549
4550 console_unlock();
4551
4552 /* Note: @diff is 0 if there are no usable consoles. */
4553 if (diff == 0 || remaining_jiffies == 0)
4554 break;
4555
4556 /* msleep(1) might sleep much longer. Check time by jiffies. */
4557 begin_jiffies = jiffies;
4558 msleep(1);
4559 slept_jiffies = jiffies - begin_jiffies;
4560
4561 remaining_jiffies -= min(slept_jiffies, remaining_jiffies);
4562
4563 last_diff = diff;
4564 }
4565
4566 return (diff == 0);
4567 }
4568
4569 /**
4570 * pr_flush() - Wait for printing threads to catch up.
4571 *
4572 * @timeout_ms: The maximum time (in ms) to wait.
4573 * @reset_on_progress: Reset the timeout if forward progress is seen.
4574 *
4575 * A value of 0 for @timeout_ms means no waiting will occur. A value of -1
4576 * represents infinite waiting.
4577 *
4578 * If @reset_on_progress is true, the timeout will be reset whenever any
4579 * printer has been seen to make some forward progress.
4580 *
4581 * Context: Process context. May sleep while acquiring console lock.
4582 * Return: true if all usable printers are caught up.
4583 */
pr_flush(int timeout_ms,bool reset_on_progress)4584 bool pr_flush(int timeout_ms, bool reset_on_progress)
4585 {
4586 return __pr_flush(NULL, timeout_ms, reset_on_progress);
4587 }
4588
4589 /*
4590 * Delayed printk version, for scheduler-internal messages:
4591 */
4592 #define PRINTK_PENDING_WAKEUP 0x01
4593 #define PRINTK_PENDING_OUTPUT 0x02
4594
4595 static DEFINE_PER_CPU(int, printk_pending);
4596
wake_up_klogd_work_func(struct irq_work * irq_work)4597 static void wake_up_klogd_work_func(struct irq_work *irq_work)
4598 {
4599 int pending = this_cpu_xchg(printk_pending, 0);
4600
4601 if (pending & PRINTK_PENDING_OUTPUT) {
4602 if (force_legacy_kthread()) {
4603 if (printk_legacy_kthread)
4604 wake_up_interruptible(&legacy_wait);
4605 } else {
4606 if (console_trylock())
4607 console_unlock();
4608 }
4609 }
4610
4611 if (pending & PRINTK_PENDING_WAKEUP)
4612 wake_up_interruptible(&log_wait);
4613 }
4614
4615 static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) =
4616 IRQ_WORK_INIT_LAZY(wake_up_klogd_work_func);
4617
__wake_up_klogd(int val)4618 static void __wake_up_klogd(int val)
4619 {
4620 if (!printk_percpu_data_ready())
4621 return;
4622
4623 /*
4624 * It is not allowed to call this function when console irq_work
4625 * is blocked.
4626 */
4627 if (WARN_ON_ONCE(console_irqwork_blocked))
4628 return;
4629
4630 preempt_disable();
4631 /*
4632 * Guarantee any new records can be seen by tasks preparing to wait
4633 * before this context checks if the wait queue is empty.
4634 *
4635 * The full memory barrier within wq_has_sleeper() pairs with the full
4636 * memory barrier within set_current_state() of
4637 * prepare_to_wait_event(), which is called after ___wait_event() adds
4638 * the waiter but before it has checked the wait condition.
4639 *
4640 * This pairs with devkmsg_read:A and syslog_print:A.
4641 */
4642 if (wq_has_sleeper(&log_wait) || /* LMM(__wake_up_klogd:A) */
4643 (val & PRINTK_PENDING_OUTPUT)) {
4644 this_cpu_or(printk_pending, val);
4645 irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
4646 }
4647 preempt_enable();
4648 }
4649
4650 /**
4651 * wake_up_klogd - Wake kernel logging daemon
4652 *
4653 * Use this function when new records have been added to the ringbuffer
4654 * and the console printing of those records has already occurred or is
4655 * known to be handled by some other context. This function will only
4656 * wake the logging daemon.
4657 *
4658 * Context: Any context.
4659 */
wake_up_klogd(void)4660 void wake_up_klogd(void)
4661 {
4662 __wake_up_klogd(PRINTK_PENDING_WAKEUP);
4663 }
4664
4665 /**
4666 * defer_console_output - Wake kernel logging daemon and trigger
4667 * console printing in a deferred context
4668 *
4669 * Use this function when new records have been added to the ringbuffer,
4670 * this context is responsible for console printing those records, but
4671 * the current context is not allowed to perform the console printing.
4672 * Trigger an irq_work context to perform the console printing. This
4673 * function also wakes the logging daemon.
4674 *
4675 * Context: Any context.
4676 */
defer_console_output(void)4677 void defer_console_output(void)
4678 {
4679 /*
4680 * New messages may have been added directly to the ringbuffer
4681 * using vprintk_store(), so wake any waiters as well.
4682 */
4683 __wake_up_klogd(PRINTK_PENDING_WAKEUP | PRINTK_PENDING_OUTPUT);
4684 }
4685
4686 /**
4687 * printk_trigger_flush - Attempt to flush printk buffer to consoles.
4688 *
4689 * If possible, flush the printk buffer to all consoles in the caller's
4690 * context. If offloading is available, trigger deferred printing.
4691 *
4692 * This is best effort. Depending on the system state, console states,
4693 * and caller context, no actual flushing may result from this call.
4694 */
printk_trigger_flush(void)4695 void printk_trigger_flush(void)
4696 {
4697 struct console_flush_type ft;
4698
4699 printk_get_console_flush_type(&ft);
4700 if (ft.nbcon_atomic)
4701 nbcon_atomic_flush_pending();
4702 if (ft.nbcon_offload)
4703 nbcon_kthreads_wake();
4704 if (ft.legacy_direct) {
4705 if (console_trylock())
4706 console_unlock();
4707 }
4708 if (ft.legacy_offload)
4709 defer_console_output();
4710 }
4711
vprintk_deferred(const char * fmt,va_list args)4712 int vprintk_deferred(const char *fmt, va_list args)
4713 {
4714 return vprintk_emit(0, LOGLEVEL_SCHED, NULL, fmt, args);
4715 }
4716
_printk_deferred(const char * fmt,...)4717 int _printk_deferred(const char *fmt, ...)
4718 {
4719 va_list args;
4720 int r;
4721
4722 va_start(args, fmt);
4723 r = vprintk_deferred(fmt, args);
4724 va_end(args);
4725
4726 return r;
4727 }
4728
4729 /*
4730 * printk rate limiting, lifted from the networking subsystem.
4731 *
4732 * This enforces a rate limit: not more than 10 kernel messages
4733 * every 5s to make a denial-of-service attack impossible.
4734 */
4735 DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10);
4736
__printk_ratelimit(const char * func)4737 int __printk_ratelimit(const char *func)
4738 {
4739 return ___ratelimit(&printk_ratelimit_state, func);
4740 }
4741 EXPORT_SYMBOL(__printk_ratelimit);
4742
4743 /**
4744 * printk_timed_ratelimit - caller-controlled printk ratelimiting
4745 * @caller_jiffies: pointer to caller's state
4746 * @interval_msecs: minimum interval between prints
4747 *
4748 * printk_timed_ratelimit() returns true if more than @interval_msecs
4749 * milliseconds have elapsed since the last time printk_timed_ratelimit()
4750 * returned true.
4751 */
printk_timed_ratelimit(unsigned long * caller_jiffies,unsigned int interval_msecs)4752 bool printk_timed_ratelimit(unsigned long *caller_jiffies,
4753 unsigned int interval_msecs)
4754 {
4755 unsigned long elapsed = jiffies - *caller_jiffies;
4756
4757 if (*caller_jiffies && elapsed <= msecs_to_jiffies(interval_msecs))
4758 return false;
4759
4760 *caller_jiffies = jiffies;
4761 return true;
4762 }
4763 EXPORT_SYMBOL(printk_timed_ratelimit);
4764
4765 static DEFINE_SPINLOCK(dump_list_lock);
4766 static LIST_HEAD(dump_list);
4767
4768 /**
4769 * kmsg_dump_register - register a kernel log dumper.
4770 * @dumper: pointer to the kmsg_dumper structure
4771 *
4772 * Adds a kernel log dumper to the system. The dump callback in the
4773 * structure will be called when the kernel oopses or panics and must be
4774 * set. Returns zero on success and %-EINVAL or %-EBUSY otherwise.
4775 */
kmsg_dump_register(struct kmsg_dumper * dumper)4776 int kmsg_dump_register(struct kmsg_dumper *dumper)
4777 {
4778 unsigned long flags;
4779 int err = -EBUSY;
4780
4781 /* The dump callback needs to be set */
4782 if (!dumper->dump)
4783 return -EINVAL;
4784
4785 spin_lock_irqsave(&dump_list_lock, flags);
4786 /* Don't allow registering multiple times */
4787 if (!dumper->registered) {
4788 dumper->registered = 1;
4789 list_add_tail_rcu(&dumper->list, &dump_list);
4790 err = 0;
4791 }
4792 spin_unlock_irqrestore(&dump_list_lock, flags);
4793
4794 return err;
4795 }
4796 EXPORT_SYMBOL_GPL(kmsg_dump_register);
4797
4798 /**
4799 * kmsg_dump_unregister - unregister a kmsg dumper.
4800 * @dumper: pointer to the kmsg_dumper structure
4801 *
4802 * Removes a dump device from the system. Returns zero on success and
4803 * %-EINVAL otherwise.
4804 */
kmsg_dump_unregister(struct kmsg_dumper * dumper)4805 int kmsg_dump_unregister(struct kmsg_dumper *dumper)
4806 {
4807 unsigned long flags;
4808 int err = -EINVAL;
4809
4810 spin_lock_irqsave(&dump_list_lock, flags);
4811 if (dumper->registered) {
4812 dumper->registered = 0;
4813 list_del_rcu(&dumper->list);
4814 err = 0;
4815 }
4816 spin_unlock_irqrestore(&dump_list_lock, flags);
4817 synchronize_rcu();
4818
4819 return err;
4820 }
4821 EXPORT_SYMBOL_GPL(kmsg_dump_unregister);
4822
4823 static bool always_kmsg_dump;
4824 module_param_named(always_kmsg_dump, always_kmsg_dump, bool, S_IRUGO | S_IWUSR);
4825
kmsg_dump_reason_str(enum kmsg_dump_reason reason)4826 const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason)
4827 {
4828 switch (reason) {
4829 case KMSG_DUMP_PANIC:
4830 return "Panic";
4831 case KMSG_DUMP_OOPS:
4832 return "Oops";
4833 case KMSG_DUMP_EMERG:
4834 return "Emergency";
4835 case KMSG_DUMP_SHUTDOWN:
4836 return "Shutdown";
4837 default:
4838 return "Unknown";
4839 }
4840 }
4841 EXPORT_SYMBOL_GPL(kmsg_dump_reason_str);
4842
4843 /**
4844 * kmsg_dump_desc - dump kernel log to kernel message dumpers.
4845 * @reason: the reason (oops, panic etc) for dumping
4846 * @desc: a short string to describe what caused the panic or oops. Can be NULL
4847 * if no additional description is available.
4848 *
4849 * Call each of the registered dumper's dump() callback, which can
4850 * retrieve the kmsg records with kmsg_dump_get_line() or
4851 * kmsg_dump_get_buffer().
4852 */
kmsg_dump_desc(enum kmsg_dump_reason reason,const char * desc)4853 void kmsg_dump_desc(enum kmsg_dump_reason reason, const char *desc)
4854 {
4855 struct kmsg_dumper *dumper;
4856 struct kmsg_dump_detail detail = {
4857 .reason = reason,
4858 .description = desc};
4859
4860 rcu_read_lock();
4861 list_for_each_entry_rcu(dumper, &dump_list, list) {
4862 enum kmsg_dump_reason max_reason = dumper->max_reason;
4863
4864 /*
4865 * If client has not provided a specific max_reason, default
4866 * to KMSG_DUMP_OOPS, unless always_kmsg_dump was set.
4867 */
4868 if (max_reason == KMSG_DUMP_UNDEF) {
4869 max_reason = always_kmsg_dump ? KMSG_DUMP_MAX :
4870 KMSG_DUMP_OOPS;
4871 }
4872 if (reason > max_reason)
4873 continue;
4874
4875 /* invoke dumper which will iterate over records */
4876 dumper->dump(dumper, &detail);
4877 }
4878 rcu_read_unlock();
4879 }
4880
4881 /**
4882 * kmsg_dump_get_line - retrieve one kmsg log line
4883 * @iter: kmsg dump iterator
4884 * @syslog: include the "<4>" prefixes
4885 * @line: buffer to copy the line to
4886 * @size: maximum size of the buffer
4887 * @len: length of line placed into buffer
4888 *
4889 * Start at the beginning of the kmsg buffer, with the oldest kmsg
4890 * record, and copy one record into the provided buffer.
4891 *
4892 * Consecutive calls will return the next available record moving
4893 * towards the end of the buffer with the youngest messages.
4894 *
4895 * A return value of FALSE indicates that there are no more records to
4896 * read.
4897 */
kmsg_dump_get_line(struct kmsg_dump_iter * iter,bool syslog,char * line,size_t size,size_t * len)4898 bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog,
4899 char *line, size_t size, size_t *len)
4900 {
4901 u64 min_seq = latched_seq_read_nolock(&clear_seq);
4902 struct printk_info info;
4903 unsigned int line_count;
4904 struct printk_record r;
4905 size_t l = 0;
4906 bool ret = false;
4907
4908 if (iter->cur_seq < min_seq)
4909 iter->cur_seq = min_seq;
4910
4911 prb_rec_init_rd(&r, &info, line, size);
4912
4913 /* Read text or count text lines? */
4914 if (line) {
4915 if (!prb_read_valid(prb, iter->cur_seq, &r))
4916 goto out;
4917 l = record_print_text(&r, syslog, printk_time);
4918 } else {
4919 if (!prb_read_valid_info(prb, iter->cur_seq,
4920 &info, &line_count)) {
4921 goto out;
4922 }
4923 l = get_record_print_text_size(&info, line_count, syslog,
4924 printk_time);
4925
4926 }
4927
4928 iter->cur_seq = r.info->seq + 1;
4929 ret = true;
4930 out:
4931 if (len)
4932 *len = l;
4933 return ret;
4934 }
4935 EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
4936
4937 /**
4938 * kmsg_dump_get_buffer - copy kmsg log lines
4939 * @iter: kmsg dump iterator
4940 * @syslog: include the "<4>" prefixes
4941 * @buf: buffer to copy the line to
4942 * @size: maximum size of the buffer
4943 * @len_out: length of line placed into buffer
4944 *
4945 * Start at the end of the kmsg buffer and fill the provided buffer
4946 * with as many of the *youngest* kmsg records that fit into it.
4947 * If the buffer is large enough, all available kmsg records will be
4948 * copied with a single call.
4949 *
4950 * Consecutive calls will fill the buffer with the next block of
4951 * available older records, not including the earlier retrieved ones.
4952 *
4953 * A return value of FALSE indicates that there are no more records to
4954 * read.
4955 */
kmsg_dump_get_buffer(struct kmsg_dump_iter * iter,bool syslog,char * buf,size_t size,size_t * len_out)4956 bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog,
4957 char *buf, size_t size, size_t *len_out)
4958 {
4959 u64 min_seq = latched_seq_read_nolock(&clear_seq);
4960 struct printk_info info;
4961 struct printk_record r;
4962 u64 seq;
4963 u64 next_seq;
4964 size_t len = 0;
4965 bool ret = false;
4966 bool time = printk_time;
4967
4968 if (!buf || !size)
4969 goto out;
4970
4971 if (iter->cur_seq < min_seq)
4972 iter->cur_seq = min_seq;
4973
4974 if (prb_read_valid_info(prb, iter->cur_seq, &info, NULL)) {
4975 if (info.seq != iter->cur_seq) {
4976 /* messages are gone, move to first available one */
4977 iter->cur_seq = info.seq;
4978 }
4979 }
4980
4981 /* last entry */
4982 if (iter->cur_seq >= iter->next_seq)
4983 goto out;
4984
4985 /*
4986 * Find first record that fits, including all following records,
4987 * into the user-provided buffer for this dump. Pass in size-1
4988 * because this function (by way of record_print_text()) will
4989 * not write more than size-1 bytes of text into @buf.
4990 */
4991 seq = find_first_fitting_seq(iter->cur_seq, iter->next_seq,
4992 size - 1, syslog, time);
4993
4994 /*
4995 * Next kmsg_dump_get_buffer() invocation will dump block of
4996 * older records stored right before this one.
4997 */
4998 next_seq = seq;
4999
5000 prb_rec_init_rd(&r, &info, buf, size);
5001
5002 prb_for_each_record(seq, prb, seq, &r) {
5003 if (r.info->seq >= iter->next_seq)
5004 break;
5005
5006 len += record_print_text(&r, syslog, time);
5007
5008 /* Adjust record to store to remaining buffer space. */
5009 prb_rec_init_rd(&r, &info, buf + len, size - len);
5010 }
5011
5012 iter->next_seq = next_seq;
5013 ret = true;
5014 out:
5015 if (len_out)
5016 *len_out = len;
5017 return ret;
5018 }
5019 EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
5020
5021 /**
5022 * kmsg_dump_rewind - reset the iterator
5023 * @iter: kmsg dump iterator
5024 *
5025 * Reset the dumper's iterator so that kmsg_dump_get_line() and
5026 * kmsg_dump_get_buffer() can be called again and used multiple
5027 * times within the same dumper.dump() callback.
5028 */
kmsg_dump_rewind(struct kmsg_dump_iter * iter)5029 void kmsg_dump_rewind(struct kmsg_dump_iter *iter)
5030 {
5031 iter->cur_seq = latched_seq_read_nolock(&clear_seq);
5032 iter->next_seq = prb_next_seq(prb);
5033 }
5034 EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
5035
5036 /**
5037 * console_try_replay_all - try to replay kernel log on consoles
5038 *
5039 * Try to obtain lock on console subsystem and replay all
5040 * available records in printk buffer on the consoles.
5041 * Does nothing if lock is not obtained.
5042 *
5043 * Context: Any, except for NMI.
5044 */
console_try_replay_all(void)5045 void console_try_replay_all(void)
5046 {
5047 struct console_flush_type ft;
5048
5049 printk_get_console_flush_type(&ft);
5050 if (console_trylock()) {
5051 __console_rewind_all();
5052 if (ft.nbcon_atomic)
5053 nbcon_atomic_flush_pending();
5054 if (ft.nbcon_offload)
5055 nbcon_kthreads_wake();
5056 if (ft.legacy_offload)
5057 defer_console_output();
5058 /* Consoles are flushed as part of console_unlock(). */
5059 console_unlock();
5060 }
5061 }
5062 #endif
5063
5064 #ifdef CONFIG_SMP
5065 static atomic_t printk_cpu_sync_owner = ATOMIC_INIT(-1);
5066 static atomic_t printk_cpu_sync_nested = ATOMIC_INIT(0);
5067
is_printk_cpu_sync_owner(void)5068 bool is_printk_cpu_sync_owner(void)
5069 {
5070 return (atomic_read(&printk_cpu_sync_owner) == raw_smp_processor_id());
5071 }
5072
5073 /**
5074 * __printk_cpu_sync_wait() - Busy wait until the printk cpu-reentrant
5075 * spinning lock is not owned by any CPU.
5076 *
5077 * Context: Any context.
5078 */
__printk_cpu_sync_wait(void)5079 void __printk_cpu_sync_wait(void)
5080 {
5081 do {
5082 cpu_relax();
5083 } while (atomic_read(&printk_cpu_sync_owner) != -1);
5084 }
5085 EXPORT_SYMBOL(__printk_cpu_sync_wait);
5086
5087 /**
5088 * __printk_cpu_sync_try_get() - Try to acquire the printk cpu-reentrant
5089 * spinning lock.
5090 *
5091 * If no processor has the lock, the calling processor takes the lock and
5092 * becomes the owner. If the calling processor is already the owner of the
5093 * lock, this function succeeds immediately.
5094 *
5095 * Context: Any context. Expects interrupts to be disabled.
5096 * Return: 1 on success, otherwise 0.
5097 */
__printk_cpu_sync_try_get(void)5098 int __printk_cpu_sync_try_get(void)
5099 {
5100 int cpu;
5101 int old;
5102
5103 cpu = smp_processor_id();
5104
5105 /*
5106 * Guarantee loads and stores from this CPU when it is the lock owner
5107 * are _not_ visible to the previous lock owner. This pairs with
5108 * __printk_cpu_sync_put:B.
5109 *
5110 * Memory barrier involvement:
5111 *
5112 * If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B,
5113 * then __printk_cpu_sync_put:A can never read from
5114 * __printk_cpu_sync_try_get:B.
5115 *
5116 * Relies on:
5117 *
5118 * RELEASE from __printk_cpu_sync_put:A to __printk_cpu_sync_put:B
5119 * of the previous CPU
5120 * matching
5121 * ACQUIRE from __printk_cpu_sync_try_get:A to
5122 * __printk_cpu_sync_try_get:B of this CPU
5123 */
5124 old = atomic_cmpxchg_acquire(&printk_cpu_sync_owner, -1,
5125 cpu); /* LMM(__printk_cpu_sync_try_get:A) */
5126 if (old == -1) {
5127 /*
5128 * This CPU is now the owner and begins loading/storing
5129 * data: LMM(__printk_cpu_sync_try_get:B)
5130 */
5131 return 1;
5132
5133 } else if (old == cpu) {
5134 /* This CPU is already the owner. */
5135 atomic_inc(&printk_cpu_sync_nested);
5136 return 1;
5137 }
5138
5139 return 0;
5140 }
5141 EXPORT_SYMBOL(__printk_cpu_sync_try_get);
5142
5143 /**
5144 * __printk_cpu_sync_put() - Release the printk cpu-reentrant spinning lock.
5145 *
5146 * The calling processor must be the owner of the lock.
5147 *
5148 * Context: Any context. Expects interrupts to be disabled.
5149 */
__printk_cpu_sync_put(void)5150 void __printk_cpu_sync_put(void)
5151 {
5152 if (atomic_read(&printk_cpu_sync_nested)) {
5153 atomic_dec(&printk_cpu_sync_nested);
5154 return;
5155 }
5156
5157 /*
5158 * This CPU is finished loading/storing data:
5159 * LMM(__printk_cpu_sync_put:A)
5160 */
5161
5162 /*
5163 * Guarantee loads and stores from this CPU when it was the
5164 * lock owner are visible to the next lock owner. This pairs
5165 * with __printk_cpu_sync_try_get:A.
5166 *
5167 * Memory barrier involvement:
5168 *
5169 * If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B,
5170 * then __printk_cpu_sync_try_get:B reads from __printk_cpu_sync_put:A.
5171 *
5172 * Relies on:
5173 *
5174 * RELEASE from __printk_cpu_sync_put:A to __printk_cpu_sync_put:B
5175 * of this CPU
5176 * matching
5177 * ACQUIRE from __printk_cpu_sync_try_get:A to
5178 * __printk_cpu_sync_try_get:B of the next CPU
5179 */
5180 atomic_set_release(&printk_cpu_sync_owner,
5181 -1); /* LMM(__printk_cpu_sync_put:B) */
5182 }
5183 EXPORT_SYMBOL(__printk_cpu_sync_put);
5184 #endif /* CONFIG_SMP */
5185