1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/kernel/printk.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 * 7 * Modified to make sys_syslog() more flexible: added commands to 8 * return the last 4k of kernel messages, regardless of whether 9 * they've been read or not. Added option to suppress kernel printk's 10 * to the console. Added hook for sending the console messages 11 * elsewhere, in preparation for a serial line console (someday). 12 * Ted Ts'o, 2/11/93. 13 * Modified for sysctl support, 1/8/97, Chris Horn. 14 * Fixed SMP synchronization, 08/08/99, Manfred Spraul 15 * manfred@colorfullife.com 16 * Rewrote bits to get rid of console_lock 17 * 01Mar01 Andrew Morton 18 */ 19 20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 21 22 #include <linux/kernel.h> 23 #include <linux/mm.h> 24 #include <linux/tty.h> 25 #include <linux/tty_driver.h> 26 #include <linux/console.h> 27 #include <linux/init.h> 28 #include <linux/jiffies.h> 29 #include <linux/nmi.h> 30 #include <linux/module.h> 31 #include <linux/moduleparam.h> 32 #include <linux/delay.h> 33 #include <linux/smp.h> 34 #include <linux/security.h> 35 #include <linux/memblock.h> 36 #include <linux/syscalls.h> 37 #include <linux/syscore_ops.h> 38 #include <linux/vmcore_info.h> 39 #include <linux/ratelimit.h> 40 #include <linux/kmsg_dump.h> 41 #include <linux/syslog.h> 42 #include <linux/cpu.h> 43 #include <linux/rculist.h> 44 #include <linux/poll.h> 45 #include <linux/irq_work.h> 46 #include <linux/ctype.h> 47 #include <linux/uio.h> 48 #include <linux/sched/clock.h> 49 #include <linux/sched/debug.h> 50 #include <linux/sched/task_stack.h> 51 52 #include <linux/uaccess.h> 53 #include <asm/sections.h> 54 55 #include <trace/events/initcall.h> 56 #define CREATE_TRACE_POINTS 57 #include <trace/events/printk.h> 58 59 #include "printk_ringbuffer.h" 60 #include "console_cmdline.h" 61 #include "braille.h" 62 #include "internal.h" 63 64 int console_printk[4] = { 65 CONSOLE_LOGLEVEL_DEFAULT, /* console_loglevel */ 66 MESSAGE_LOGLEVEL_DEFAULT, /* default_message_loglevel */ 67 CONSOLE_LOGLEVEL_MIN, /* minimum_console_loglevel */ 68 CONSOLE_LOGLEVEL_DEFAULT, /* default_console_loglevel */ 69 }; 70 EXPORT_SYMBOL_GPL(console_printk); 71 72 atomic_t ignore_console_lock_warning __read_mostly = ATOMIC_INIT(0); 73 EXPORT_SYMBOL(ignore_console_lock_warning); 74 75 EXPORT_TRACEPOINT_SYMBOL_GPL(console); 76 77 /* 78 * Low level drivers may need that to know if they can schedule in 79 * their unblank() callback or not. So let's export it. 80 */ 81 int oops_in_progress; 82 EXPORT_SYMBOL(oops_in_progress); 83 84 /* 85 * console_mutex protects console_list updates and console->flags updates. 86 * The flags are synchronized only for consoles that are registered, i.e. 87 * accessible via the console list. 88 */ 89 static DEFINE_MUTEX(console_mutex); 90 91 /* 92 * console_sem protects updates to console->seq 93 * and also provides serialization for console printing. 94 */ 95 static DEFINE_SEMAPHORE(console_sem, 1); 96 HLIST_HEAD(console_list); 97 EXPORT_SYMBOL_GPL(console_list); 98 DEFINE_STATIC_SRCU(console_srcu); 99 100 /* 101 * System may need to suppress printk message under certain 102 * circumstances, like after kernel panic happens. 103 */ 104 int __read_mostly suppress_printk; 105 106 #ifdef CONFIG_LOCKDEP 107 static struct lockdep_map console_lock_dep_map = { 108 .name = "console_lock" 109 }; 110 111 void lockdep_assert_console_list_lock_held(void) 112 { 113 lockdep_assert_held(&console_mutex); 114 } 115 EXPORT_SYMBOL(lockdep_assert_console_list_lock_held); 116 #endif 117 118 #ifdef CONFIG_DEBUG_LOCK_ALLOC 119 bool console_srcu_read_lock_is_held(void) 120 { 121 return srcu_read_lock_held(&console_srcu); 122 } 123 EXPORT_SYMBOL(console_srcu_read_lock_is_held); 124 #endif 125 126 enum devkmsg_log_bits { 127 __DEVKMSG_LOG_BIT_ON = 0, 128 __DEVKMSG_LOG_BIT_OFF, 129 __DEVKMSG_LOG_BIT_LOCK, 130 }; 131 132 enum devkmsg_log_masks { 133 DEVKMSG_LOG_MASK_ON = BIT(__DEVKMSG_LOG_BIT_ON), 134 DEVKMSG_LOG_MASK_OFF = BIT(__DEVKMSG_LOG_BIT_OFF), 135 DEVKMSG_LOG_MASK_LOCK = BIT(__DEVKMSG_LOG_BIT_LOCK), 136 }; 137 138 /* Keep both the 'on' and 'off' bits clear, i.e. ratelimit by default: */ 139 #define DEVKMSG_LOG_MASK_DEFAULT 0 140 141 static unsigned int __read_mostly devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT; 142 143 static int __control_devkmsg(char *str) 144 { 145 size_t len; 146 147 if (!str) 148 return -EINVAL; 149 150 len = str_has_prefix(str, "on"); 151 if (len) { 152 devkmsg_log = DEVKMSG_LOG_MASK_ON; 153 return len; 154 } 155 156 len = str_has_prefix(str, "off"); 157 if (len) { 158 devkmsg_log = DEVKMSG_LOG_MASK_OFF; 159 return len; 160 } 161 162 len = str_has_prefix(str, "ratelimit"); 163 if (len) { 164 devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT; 165 return len; 166 } 167 168 return -EINVAL; 169 } 170 171 static int __init control_devkmsg(char *str) 172 { 173 if (__control_devkmsg(str) < 0) { 174 pr_warn("printk.devkmsg: bad option string '%s'\n", str); 175 return 1; 176 } 177 178 /* 179 * Set sysctl string accordingly: 180 */ 181 if (devkmsg_log == DEVKMSG_LOG_MASK_ON) 182 strscpy(devkmsg_log_str, "on"); 183 else if (devkmsg_log == DEVKMSG_LOG_MASK_OFF) 184 strscpy(devkmsg_log_str, "off"); 185 /* else "ratelimit" which is set by default. */ 186 187 /* 188 * Sysctl cannot change it anymore. The kernel command line setting of 189 * this parameter is to force the setting to be permanent throughout the 190 * runtime of the system. This is a precation measure against userspace 191 * trying to be a smarta** and attempting to change it up on us. 192 */ 193 devkmsg_log |= DEVKMSG_LOG_MASK_LOCK; 194 195 return 1; 196 } 197 __setup("printk.devkmsg=", control_devkmsg); 198 199 char devkmsg_log_str[DEVKMSG_STR_MAX_SIZE] = "ratelimit"; 200 #if defined(CONFIG_PRINTK) && defined(CONFIG_SYSCTL) 201 int devkmsg_sysctl_set_loglvl(const struct ctl_table *table, int write, 202 void *buffer, size_t *lenp, loff_t *ppos) 203 { 204 char old_str[DEVKMSG_STR_MAX_SIZE]; 205 unsigned int old; 206 int err; 207 208 if (write) { 209 if (devkmsg_log & DEVKMSG_LOG_MASK_LOCK) 210 return -EINVAL; 211 212 old = devkmsg_log; 213 strscpy(old_str, devkmsg_log_str); 214 } 215 216 err = proc_dostring(table, write, buffer, lenp, ppos); 217 if (err) 218 return err; 219 220 if (write) { 221 err = __control_devkmsg(devkmsg_log_str); 222 223 /* 224 * Do not accept an unknown string OR a known string with 225 * trailing crap... 226 */ 227 if (err < 0 || (err + 1 != *lenp)) { 228 229 /* ... and restore old setting. */ 230 devkmsg_log = old; 231 strscpy(devkmsg_log_str, old_str); 232 233 return -EINVAL; 234 } 235 } 236 237 return 0; 238 } 239 #endif /* CONFIG_PRINTK && CONFIG_SYSCTL */ 240 241 /** 242 * console_list_lock - Lock the console list 243 * 244 * For console list or console->flags updates 245 */ 246 void console_list_lock(void) 247 { 248 /* 249 * In unregister_console() and console_force_preferred_locked(), 250 * synchronize_srcu() is called with the console_list_lock held. 251 * Therefore it is not allowed that the console_list_lock is taken 252 * with the srcu_lock held. 253 * 254 * Detecting if this context is really in the read-side critical 255 * section is only possible if the appropriate debug options are 256 * enabled. 257 */ 258 WARN_ON_ONCE(debug_lockdep_rcu_enabled() && 259 srcu_read_lock_held(&console_srcu)); 260 261 mutex_lock(&console_mutex); 262 } 263 EXPORT_SYMBOL(console_list_lock); 264 265 /** 266 * console_list_unlock - Unlock the console list 267 * 268 * Counterpart to console_list_lock() 269 */ 270 void console_list_unlock(void) 271 { 272 mutex_unlock(&console_mutex); 273 } 274 EXPORT_SYMBOL(console_list_unlock); 275 276 /** 277 * console_srcu_read_lock - Register a new reader for the 278 * SRCU-protected console list 279 * 280 * Use for_each_console_srcu() to iterate the console list 281 * 282 * Context: Any context. 283 * Return: A cookie to pass to console_srcu_read_unlock(). 284 */ 285 int console_srcu_read_lock(void) 286 __acquires(&console_srcu) 287 { 288 return srcu_read_lock_nmisafe(&console_srcu); 289 } 290 EXPORT_SYMBOL(console_srcu_read_lock); 291 292 /** 293 * console_srcu_read_unlock - Unregister an old reader from 294 * the SRCU-protected console list 295 * @cookie: cookie returned from console_srcu_read_lock() 296 * 297 * Counterpart to console_srcu_read_lock() 298 */ 299 void console_srcu_read_unlock(int cookie) 300 __releases(&console_srcu) 301 { 302 srcu_read_unlock_nmisafe(&console_srcu, cookie); 303 } 304 EXPORT_SYMBOL(console_srcu_read_unlock); 305 306 /* 307 * Helper macros to handle lockdep when locking/unlocking console_sem. We use 308 * macros instead of functions so that _RET_IP_ contains useful information. 309 */ 310 #define down_console_sem() do { \ 311 down(&console_sem);\ 312 mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_);\ 313 } while (0) 314 315 static int __down_trylock_console_sem(unsigned long ip) 316 { 317 int lock_failed; 318 unsigned long flags; 319 320 /* 321 * Here and in __up_console_sem() we need to be in safe mode, 322 * because spindump/WARN/etc from under console ->lock will 323 * deadlock in printk()->down_trylock_console_sem() otherwise. 324 */ 325 printk_safe_enter_irqsave(flags); 326 lock_failed = down_trylock(&console_sem); 327 printk_safe_exit_irqrestore(flags); 328 329 if (lock_failed) 330 return 1; 331 mutex_acquire(&console_lock_dep_map, 0, 1, ip); 332 return 0; 333 } 334 #define down_trylock_console_sem() __down_trylock_console_sem(_RET_IP_) 335 336 static void __up_console_sem(unsigned long ip) 337 { 338 unsigned long flags; 339 340 mutex_release(&console_lock_dep_map, ip); 341 342 printk_safe_enter_irqsave(flags); 343 up(&console_sem); 344 printk_safe_exit_irqrestore(flags); 345 } 346 #define up_console_sem() __up_console_sem(_RET_IP_) 347 348 static bool panic_in_progress(void) 349 { 350 return unlikely(atomic_read(&panic_cpu) != PANIC_CPU_INVALID); 351 } 352 353 /* Return true if a panic is in progress on the current CPU. */ 354 bool this_cpu_in_panic(void) 355 { 356 /* 357 * We can use raw_smp_processor_id() here because it is impossible for 358 * the task to be migrated to the panic_cpu, or away from it. If 359 * panic_cpu has already been set, and we're not currently executing on 360 * that CPU, then we never will be. 361 */ 362 return unlikely(atomic_read(&panic_cpu) == raw_smp_processor_id()); 363 } 364 365 /* 366 * Return true if a panic is in progress on a remote CPU. 367 * 368 * On true, the local CPU should immediately release any printing resources 369 * that may be needed by the panic CPU. 370 */ 371 bool other_cpu_in_panic(void) 372 { 373 return (panic_in_progress() && !this_cpu_in_panic()); 374 } 375 376 /* 377 * This is used for debugging the mess that is the VT code by 378 * keeping track if we have the console semaphore held. It's 379 * definitely not the perfect debug tool (we don't know if _WE_ 380 * hold it and are racing, but it helps tracking those weird code 381 * paths in the console code where we end up in places I want 382 * locked without the console semaphore held). 383 */ 384 static int console_locked; 385 386 /* 387 * Array of consoles built from command line options (console=) 388 */ 389 390 #define MAX_CMDLINECONSOLES 8 391 392 static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES]; 393 394 static int preferred_console = -1; 395 int console_set_on_cmdline; 396 EXPORT_SYMBOL(console_set_on_cmdline); 397 398 /* Flag: console code may call schedule() */ 399 static int console_may_schedule; 400 401 enum con_msg_format_flags { 402 MSG_FORMAT_DEFAULT = 0, 403 MSG_FORMAT_SYSLOG = (1 << 0), 404 }; 405 406 static int console_msg_format = MSG_FORMAT_DEFAULT; 407 408 /* 409 * The printk log buffer consists of a sequenced collection of records, each 410 * containing variable length message text. Every record also contains its 411 * own meta-data (@info). 412 * 413 * Every record meta-data carries the timestamp in microseconds, as well as 414 * the standard userspace syslog level and syslog facility. The usual kernel 415 * messages use LOG_KERN; userspace-injected messages always carry a matching 416 * syslog facility, by default LOG_USER. The origin of every message can be 417 * reliably determined that way. 418 * 419 * The human readable log message of a record is available in @text, the 420 * length of the message text in @text_len. The stored message is not 421 * terminated. 422 * 423 * Optionally, a record can carry a dictionary of properties (key/value 424 * pairs), to provide userspace with a machine-readable message context. 425 * 426 * Examples for well-defined, commonly used property names are: 427 * DEVICE=b12:8 device identifier 428 * b12:8 block dev_t 429 * c127:3 char dev_t 430 * n8 netdev ifindex 431 * +sound:card0 subsystem:devname 432 * SUBSYSTEM=pci driver-core subsystem name 433 * 434 * Valid characters in property names are [a-zA-Z0-9.-_]. Property names 435 * and values are terminated by a '\0' character. 436 * 437 * Example of record values: 438 * record.text_buf = "it's a line" (unterminated) 439 * record.info.seq = 56 440 * record.info.ts_nsec = 36863 441 * record.info.text_len = 11 442 * record.info.facility = 0 (LOG_KERN) 443 * record.info.flags = 0 444 * record.info.level = 3 (LOG_ERR) 445 * record.info.caller_id = 299 (task 299) 446 * record.info.dev_info.subsystem = "pci" (terminated) 447 * record.info.dev_info.device = "+pci:0000:00:01.0" (terminated) 448 * 449 * The 'struct printk_info' buffer must never be directly exported to 450 * userspace, it is a kernel-private implementation detail that might 451 * need to be changed in the future, when the requirements change. 452 * 453 * /dev/kmsg exports the structured data in the following line format: 454 * "<level>,<sequnum>,<timestamp>,<contflag>[,additional_values, ... ];<message text>\n" 455 * 456 * Users of the export format should ignore possible additional values 457 * separated by ',', and find the message after the ';' character. 458 * 459 * The optional key/value pairs are attached as continuation lines starting 460 * with a space character and terminated by a newline. All possible 461 * non-prinatable characters are escaped in the "\xff" notation. 462 */ 463 464 /* syslog_lock protects syslog_* variables and write access to clear_seq. */ 465 static DEFINE_MUTEX(syslog_lock); 466 467 /* 468 * Specifies if a legacy console is registered. If legacy consoles are 469 * present, it is necessary to perform the console lock/unlock dance 470 * whenever console flushing should occur. 471 */ 472 bool have_legacy_console; 473 474 /* 475 * Specifies if an nbcon console is registered. If nbcon consoles are present, 476 * synchronous printing of legacy consoles will not occur during panic until 477 * the backtrace has been stored to the ringbuffer. 478 */ 479 bool have_nbcon_console; 480 481 /* 482 * Specifies if a boot console is registered. If boot consoles are present, 483 * nbcon consoles cannot print simultaneously and must be synchronized by 484 * the console lock. This is because boot consoles and nbcon consoles may 485 * have mapped the same hardware. 486 */ 487 bool have_boot_console; 488 489 /* See printk_legacy_allow_panic_sync() for details. */ 490 bool legacy_allow_panic_sync; 491 492 #ifdef CONFIG_PRINTK 493 DECLARE_WAIT_QUEUE_HEAD(log_wait); 494 static DECLARE_WAIT_QUEUE_HEAD(legacy_wait); 495 /* All 3 protected by @syslog_lock. */ 496 /* the next printk record to read by syslog(READ) or /proc/kmsg */ 497 static u64 syslog_seq; 498 static size_t syslog_partial; 499 static bool syslog_time; 500 501 /* True when _all_ printer threads are available for printing. */ 502 bool printk_kthreads_running; 503 504 struct latched_seq { 505 seqcount_latch_t latch; 506 u64 val[2]; 507 }; 508 509 /* 510 * The next printk record to read after the last 'clear' command. There are 511 * two copies (updated with seqcount_latch) so that reads can locklessly 512 * access a valid value. Writers are synchronized by @syslog_lock. 513 */ 514 static struct latched_seq clear_seq = { 515 .latch = SEQCNT_LATCH_ZERO(clear_seq.latch), 516 .val[0] = 0, 517 .val[1] = 0, 518 }; 519 520 #define LOG_LEVEL(v) ((v) & 0x07) 521 #define LOG_FACILITY(v) ((v) >> 3 & 0xff) 522 523 /* record buffer */ 524 #define LOG_ALIGN __alignof__(unsigned long) 525 #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT) 526 #define LOG_BUF_LEN_MAX (u32)(1 << 31) 527 static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN); 528 static char *log_buf = __log_buf; 529 static u32 log_buf_len = __LOG_BUF_LEN; 530 531 /* 532 * Define the average message size. This only affects the number of 533 * descriptors that will be available. Underestimating is better than 534 * overestimating (too many available descriptors is better than not enough). 535 */ 536 #define PRB_AVGBITS 5 /* 32 character average length */ 537 538 #if CONFIG_LOG_BUF_SHIFT <= PRB_AVGBITS 539 #error CONFIG_LOG_BUF_SHIFT value too small. 540 #endif 541 _DEFINE_PRINTKRB(printk_rb_static, CONFIG_LOG_BUF_SHIFT - PRB_AVGBITS, 542 PRB_AVGBITS, &__log_buf[0]); 543 544 static struct printk_ringbuffer printk_rb_dynamic; 545 546 struct printk_ringbuffer *prb = &printk_rb_static; 547 548 /* 549 * We cannot access per-CPU data (e.g. per-CPU flush irq_work) before 550 * per_cpu_areas are initialised. This variable is set to true when 551 * it's safe to access per-CPU data. 552 */ 553 static bool __printk_percpu_data_ready __ro_after_init; 554 555 bool printk_percpu_data_ready(void) 556 { 557 return __printk_percpu_data_ready; 558 } 559 560 /* Must be called under syslog_lock. */ 561 static void latched_seq_write(struct latched_seq *ls, u64 val) 562 { 563 write_seqcount_latch_begin(&ls->latch); 564 ls->val[0] = val; 565 write_seqcount_latch(&ls->latch); 566 ls->val[1] = val; 567 write_seqcount_latch_end(&ls->latch); 568 } 569 570 /* Can be called from any context. */ 571 static u64 latched_seq_read_nolock(struct latched_seq *ls) 572 { 573 unsigned int seq; 574 unsigned int idx; 575 u64 val; 576 577 do { 578 seq = read_seqcount_latch(&ls->latch); 579 idx = seq & 0x1; 580 val = ls->val[idx]; 581 } while (read_seqcount_latch_retry(&ls->latch, seq)); 582 583 return val; 584 } 585 586 /* Return log buffer address */ 587 char *log_buf_addr_get(void) 588 { 589 return log_buf; 590 } 591 592 /* Return log buffer size */ 593 u32 log_buf_len_get(void) 594 { 595 return log_buf_len; 596 } 597 598 /* 599 * Define how much of the log buffer we could take at maximum. The value 600 * must be greater than two. Note that only half of the buffer is available 601 * when the index points to the middle. 602 */ 603 #define MAX_LOG_TAKE_PART 4 604 static const char trunc_msg[] = "<truncated>"; 605 606 static void truncate_msg(u16 *text_len, u16 *trunc_msg_len) 607 { 608 /* 609 * The message should not take the whole buffer. Otherwise, it might 610 * get removed too soon. 611 */ 612 u32 max_text_len = log_buf_len / MAX_LOG_TAKE_PART; 613 614 if (*text_len > max_text_len) 615 *text_len = max_text_len; 616 617 /* enable the warning message (if there is room) */ 618 *trunc_msg_len = strlen(trunc_msg); 619 if (*text_len >= *trunc_msg_len) 620 *text_len -= *trunc_msg_len; 621 else 622 *trunc_msg_len = 0; 623 } 624 625 int dmesg_restrict = IS_ENABLED(CONFIG_SECURITY_DMESG_RESTRICT); 626 627 static int syslog_action_restricted(int type) 628 { 629 if (dmesg_restrict) 630 return 1; 631 /* 632 * Unless restricted, we allow "read all" and "get buffer size" 633 * for everybody. 634 */ 635 return type != SYSLOG_ACTION_READ_ALL && 636 type != SYSLOG_ACTION_SIZE_BUFFER; 637 } 638 639 static int check_syslog_permissions(int type, int source) 640 { 641 /* 642 * If this is from /proc/kmsg and we've already opened it, then we've 643 * already done the capabilities checks at open time. 644 */ 645 if (source == SYSLOG_FROM_PROC && type != SYSLOG_ACTION_OPEN) 646 goto ok; 647 648 if (syslog_action_restricted(type)) { 649 if (capable(CAP_SYSLOG)) 650 goto ok; 651 return -EPERM; 652 } 653 ok: 654 return security_syslog(type); 655 } 656 657 static void append_char(char **pp, char *e, char c) 658 { 659 if (*pp < e) 660 *(*pp)++ = c; 661 } 662 663 static ssize_t info_print_ext_header(char *buf, size_t size, 664 struct printk_info *info) 665 { 666 u64 ts_usec = info->ts_nsec; 667 char caller[20]; 668 #ifdef CONFIG_PRINTK_CALLER 669 u32 id = info->caller_id; 670 671 snprintf(caller, sizeof(caller), ",caller=%c%u", 672 id & 0x80000000 ? 'C' : 'T', id & ~0x80000000); 673 #else 674 caller[0] = '\0'; 675 #endif 676 677 do_div(ts_usec, 1000); 678 679 return scnprintf(buf, size, "%u,%llu,%llu,%c%s;", 680 (info->facility << 3) | info->level, info->seq, 681 ts_usec, info->flags & LOG_CONT ? 'c' : '-', caller); 682 } 683 684 static ssize_t msg_add_ext_text(char *buf, size_t size, 685 const char *text, size_t text_len, 686 unsigned char endc) 687 { 688 char *p = buf, *e = buf + size; 689 size_t i; 690 691 /* escape non-printable characters */ 692 for (i = 0; i < text_len; i++) { 693 unsigned char c = text[i]; 694 695 if (c < ' ' || c >= 127 || c == '\\') 696 p += scnprintf(p, e - p, "\\x%02x", c); 697 else 698 append_char(&p, e, c); 699 } 700 append_char(&p, e, endc); 701 702 return p - buf; 703 } 704 705 static ssize_t msg_add_dict_text(char *buf, size_t size, 706 const char *key, const char *val) 707 { 708 size_t val_len = strlen(val); 709 ssize_t len; 710 711 if (!val_len) 712 return 0; 713 714 len = msg_add_ext_text(buf, size, "", 0, ' '); /* dict prefix */ 715 len += msg_add_ext_text(buf + len, size - len, key, strlen(key), '='); 716 len += msg_add_ext_text(buf + len, size - len, val, val_len, '\n'); 717 718 return len; 719 } 720 721 static ssize_t msg_print_ext_body(char *buf, size_t size, 722 char *text, size_t text_len, 723 struct dev_printk_info *dev_info) 724 { 725 ssize_t len; 726 727 len = msg_add_ext_text(buf, size, text, text_len, '\n'); 728 729 if (!dev_info) 730 goto out; 731 732 len += msg_add_dict_text(buf + len, size - len, "SUBSYSTEM", 733 dev_info->subsystem); 734 len += msg_add_dict_text(buf + len, size - len, "DEVICE", 735 dev_info->device); 736 out: 737 return len; 738 } 739 740 /* /dev/kmsg - userspace message inject/listen interface */ 741 struct devkmsg_user { 742 atomic64_t seq; 743 struct ratelimit_state rs; 744 struct mutex lock; 745 struct printk_buffers pbufs; 746 }; 747 748 static __printf(3, 4) __cold 749 int devkmsg_emit(int facility, int level, const char *fmt, ...) 750 { 751 va_list args; 752 int r; 753 754 va_start(args, fmt); 755 r = vprintk_emit(facility, level, NULL, fmt, args); 756 va_end(args); 757 758 return r; 759 } 760 761 static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from) 762 { 763 char *buf, *line; 764 int level = default_message_loglevel; 765 int facility = 1; /* LOG_USER */ 766 struct file *file = iocb->ki_filp; 767 struct devkmsg_user *user = file->private_data; 768 size_t len = iov_iter_count(from); 769 ssize_t ret = len; 770 771 if (len > PRINTKRB_RECORD_MAX) 772 return -EINVAL; 773 774 /* Ignore when user logging is disabled. */ 775 if (devkmsg_log & DEVKMSG_LOG_MASK_OFF) 776 return len; 777 778 /* Ratelimit when not explicitly enabled. */ 779 if (!(devkmsg_log & DEVKMSG_LOG_MASK_ON)) { 780 if (!___ratelimit(&user->rs, current->comm)) 781 return ret; 782 } 783 784 buf = kmalloc(len+1, GFP_KERNEL); 785 if (buf == NULL) 786 return -ENOMEM; 787 788 buf[len] = '\0'; 789 if (!copy_from_iter_full(buf, len, from)) { 790 kfree(buf); 791 return -EFAULT; 792 } 793 794 /* 795 * Extract and skip the syslog prefix <[0-9]*>. Coming from userspace 796 * the decimal value represents 32bit, the lower 3 bit are the log 797 * level, the rest are the log facility. 798 * 799 * If no prefix or no userspace facility is specified, we 800 * enforce LOG_USER, to be able to reliably distinguish 801 * kernel-generated messages from userspace-injected ones. 802 */ 803 line = buf; 804 if (line[0] == '<') { 805 char *endp = NULL; 806 unsigned int u; 807 808 u = simple_strtoul(line + 1, &endp, 10); 809 if (endp && endp[0] == '>') { 810 level = LOG_LEVEL(u); 811 if (LOG_FACILITY(u) != 0) 812 facility = LOG_FACILITY(u); 813 endp++; 814 line = endp; 815 } 816 } 817 818 devkmsg_emit(facility, level, "%s", line); 819 kfree(buf); 820 return ret; 821 } 822 823 static ssize_t devkmsg_read(struct file *file, char __user *buf, 824 size_t count, loff_t *ppos) 825 { 826 struct devkmsg_user *user = file->private_data; 827 char *outbuf = &user->pbufs.outbuf[0]; 828 struct printk_message pmsg = { 829 .pbufs = &user->pbufs, 830 }; 831 ssize_t ret; 832 833 ret = mutex_lock_interruptible(&user->lock); 834 if (ret) 835 return ret; 836 837 if (!printk_get_next_message(&pmsg, atomic64_read(&user->seq), true, false)) { 838 if (file->f_flags & O_NONBLOCK) { 839 ret = -EAGAIN; 840 goto out; 841 } 842 843 /* 844 * Guarantee this task is visible on the waitqueue before 845 * checking the wake condition. 846 * 847 * The full memory barrier within set_current_state() of 848 * prepare_to_wait_event() pairs with the full memory barrier 849 * within wq_has_sleeper(). 850 * 851 * This pairs with __wake_up_klogd:A. 852 */ 853 ret = wait_event_interruptible(log_wait, 854 printk_get_next_message(&pmsg, atomic64_read(&user->seq), true, 855 false)); /* LMM(devkmsg_read:A) */ 856 if (ret) 857 goto out; 858 } 859 860 if (pmsg.dropped) { 861 /* our last seen message is gone, return error and reset */ 862 atomic64_set(&user->seq, pmsg.seq); 863 ret = -EPIPE; 864 goto out; 865 } 866 867 atomic64_set(&user->seq, pmsg.seq + 1); 868 869 if (pmsg.outbuf_len > count) { 870 ret = -EINVAL; 871 goto out; 872 } 873 874 if (copy_to_user(buf, outbuf, pmsg.outbuf_len)) { 875 ret = -EFAULT; 876 goto out; 877 } 878 ret = pmsg.outbuf_len; 879 out: 880 mutex_unlock(&user->lock); 881 return ret; 882 } 883 884 /* 885 * Be careful when modifying this function!!! 886 * 887 * Only few operations are supported because the device works only with the 888 * entire variable length messages (records). Non-standard values are 889 * returned in the other cases and has been this way for quite some time. 890 * User space applications might depend on this behavior. 891 */ 892 static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence) 893 { 894 struct devkmsg_user *user = file->private_data; 895 loff_t ret = 0; 896 897 if (offset) 898 return -ESPIPE; 899 900 switch (whence) { 901 case SEEK_SET: 902 /* the first record */ 903 atomic64_set(&user->seq, prb_first_valid_seq(prb)); 904 break; 905 case SEEK_DATA: 906 /* 907 * The first record after the last SYSLOG_ACTION_CLEAR, 908 * like issued by 'dmesg -c'. Reading /dev/kmsg itself 909 * changes no global state, and does not clear anything. 910 */ 911 atomic64_set(&user->seq, latched_seq_read_nolock(&clear_seq)); 912 break; 913 case SEEK_END: 914 /* after the last record */ 915 atomic64_set(&user->seq, prb_next_seq(prb)); 916 break; 917 default: 918 ret = -EINVAL; 919 } 920 return ret; 921 } 922 923 static __poll_t devkmsg_poll(struct file *file, poll_table *wait) 924 { 925 struct devkmsg_user *user = file->private_data; 926 struct printk_info info; 927 __poll_t ret = 0; 928 929 poll_wait(file, &log_wait, wait); 930 931 if (prb_read_valid_info(prb, atomic64_read(&user->seq), &info, NULL)) { 932 /* return error when data has vanished underneath us */ 933 if (info.seq != atomic64_read(&user->seq)) 934 ret = EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI; 935 else 936 ret = EPOLLIN|EPOLLRDNORM; 937 } 938 939 return ret; 940 } 941 942 static int devkmsg_open(struct inode *inode, struct file *file) 943 { 944 struct devkmsg_user *user; 945 int err; 946 947 if (devkmsg_log & DEVKMSG_LOG_MASK_OFF) 948 return -EPERM; 949 950 /* write-only does not need any file context */ 951 if ((file->f_flags & O_ACCMODE) != O_WRONLY) { 952 err = check_syslog_permissions(SYSLOG_ACTION_READ_ALL, 953 SYSLOG_FROM_READER); 954 if (err) 955 return err; 956 } 957 958 user = kvmalloc(sizeof(struct devkmsg_user), GFP_KERNEL); 959 if (!user) 960 return -ENOMEM; 961 962 ratelimit_default_init(&user->rs); 963 ratelimit_set_flags(&user->rs, RATELIMIT_MSG_ON_RELEASE); 964 965 mutex_init(&user->lock); 966 967 atomic64_set(&user->seq, prb_first_valid_seq(prb)); 968 969 file->private_data = user; 970 return 0; 971 } 972 973 static int devkmsg_release(struct inode *inode, struct file *file) 974 { 975 struct devkmsg_user *user = file->private_data; 976 977 ratelimit_state_exit(&user->rs); 978 979 mutex_destroy(&user->lock); 980 kvfree(user); 981 return 0; 982 } 983 984 const struct file_operations kmsg_fops = { 985 .open = devkmsg_open, 986 .read = devkmsg_read, 987 .write_iter = devkmsg_write, 988 .llseek = devkmsg_llseek, 989 .poll = devkmsg_poll, 990 .release = devkmsg_release, 991 }; 992 993 #ifdef CONFIG_VMCORE_INFO 994 /* 995 * This appends the listed symbols to /proc/vmcore 996 * 997 * /proc/vmcore is used by various utilities, like crash and makedumpfile to 998 * obtain access to symbols that are otherwise very difficult to locate. These 999 * symbols are specifically used so that utilities can access and extract the 1000 * dmesg log from a vmcore file after a crash. 1001 */ 1002 void log_buf_vmcoreinfo_setup(void) 1003 { 1004 struct dev_printk_info *dev_info = NULL; 1005 1006 VMCOREINFO_SYMBOL(prb); 1007 VMCOREINFO_SYMBOL(printk_rb_static); 1008 VMCOREINFO_SYMBOL(clear_seq); 1009 1010 /* 1011 * Export struct size and field offsets. User space tools can 1012 * parse it and detect any changes to structure down the line. 1013 */ 1014 1015 VMCOREINFO_STRUCT_SIZE(printk_ringbuffer); 1016 VMCOREINFO_OFFSET(printk_ringbuffer, desc_ring); 1017 VMCOREINFO_OFFSET(printk_ringbuffer, text_data_ring); 1018 VMCOREINFO_OFFSET(printk_ringbuffer, fail); 1019 1020 VMCOREINFO_STRUCT_SIZE(prb_desc_ring); 1021 VMCOREINFO_OFFSET(prb_desc_ring, count_bits); 1022 VMCOREINFO_OFFSET(prb_desc_ring, descs); 1023 VMCOREINFO_OFFSET(prb_desc_ring, infos); 1024 VMCOREINFO_OFFSET(prb_desc_ring, head_id); 1025 VMCOREINFO_OFFSET(prb_desc_ring, tail_id); 1026 1027 VMCOREINFO_STRUCT_SIZE(prb_desc); 1028 VMCOREINFO_OFFSET(prb_desc, state_var); 1029 VMCOREINFO_OFFSET(prb_desc, text_blk_lpos); 1030 1031 VMCOREINFO_STRUCT_SIZE(prb_data_blk_lpos); 1032 VMCOREINFO_OFFSET(prb_data_blk_lpos, begin); 1033 VMCOREINFO_OFFSET(prb_data_blk_lpos, next); 1034 1035 VMCOREINFO_STRUCT_SIZE(printk_info); 1036 VMCOREINFO_OFFSET(printk_info, seq); 1037 VMCOREINFO_OFFSET(printk_info, ts_nsec); 1038 VMCOREINFO_OFFSET(printk_info, text_len); 1039 VMCOREINFO_OFFSET(printk_info, caller_id); 1040 VMCOREINFO_OFFSET(printk_info, dev_info); 1041 1042 VMCOREINFO_STRUCT_SIZE(dev_printk_info); 1043 VMCOREINFO_OFFSET(dev_printk_info, subsystem); 1044 VMCOREINFO_LENGTH(printk_info_subsystem, sizeof(dev_info->subsystem)); 1045 VMCOREINFO_OFFSET(dev_printk_info, device); 1046 VMCOREINFO_LENGTH(printk_info_device, sizeof(dev_info->device)); 1047 1048 VMCOREINFO_STRUCT_SIZE(prb_data_ring); 1049 VMCOREINFO_OFFSET(prb_data_ring, size_bits); 1050 VMCOREINFO_OFFSET(prb_data_ring, data); 1051 VMCOREINFO_OFFSET(prb_data_ring, head_lpos); 1052 VMCOREINFO_OFFSET(prb_data_ring, tail_lpos); 1053 1054 VMCOREINFO_SIZE(atomic_long_t); 1055 VMCOREINFO_TYPE_OFFSET(atomic_long_t, counter); 1056 1057 VMCOREINFO_STRUCT_SIZE(latched_seq); 1058 VMCOREINFO_OFFSET(latched_seq, val); 1059 } 1060 #endif 1061 1062 /* requested log_buf_len from kernel cmdline */ 1063 static unsigned long __initdata new_log_buf_len; 1064 1065 /* we practice scaling the ring buffer by powers of 2 */ 1066 static void __init log_buf_len_update(u64 size) 1067 { 1068 if (size > (u64)LOG_BUF_LEN_MAX) { 1069 size = (u64)LOG_BUF_LEN_MAX; 1070 pr_err("log_buf over 2G is not supported.\n"); 1071 } 1072 1073 if (size) 1074 size = roundup_pow_of_two(size); 1075 if (size > log_buf_len) 1076 new_log_buf_len = (unsigned long)size; 1077 } 1078 1079 /* save requested log_buf_len since it's too early to process it */ 1080 static int __init log_buf_len_setup(char *str) 1081 { 1082 u64 size; 1083 1084 if (!str) 1085 return -EINVAL; 1086 1087 size = memparse(str, &str); 1088 1089 log_buf_len_update(size); 1090 1091 return 0; 1092 } 1093 early_param("log_buf_len", log_buf_len_setup); 1094 1095 #ifdef CONFIG_SMP 1096 #define __LOG_CPU_MAX_BUF_LEN (1 << CONFIG_LOG_CPU_MAX_BUF_SHIFT) 1097 1098 static void __init log_buf_add_cpu(void) 1099 { 1100 unsigned int cpu_extra; 1101 1102 /* 1103 * archs should set up cpu_possible_bits properly with 1104 * set_cpu_possible() after setup_arch() but just in 1105 * case lets ensure this is valid. 1106 */ 1107 if (num_possible_cpus() == 1) 1108 return; 1109 1110 cpu_extra = (num_possible_cpus() - 1) * __LOG_CPU_MAX_BUF_LEN; 1111 1112 /* by default this will only continue through for large > 64 CPUs */ 1113 if (cpu_extra <= __LOG_BUF_LEN / 2) 1114 return; 1115 1116 pr_info("log_buf_len individual max cpu contribution: %d bytes\n", 1117 __LOG_CPU_MAX_BUF_LEN); 1118 pr_info("log_buf_len total cpu_extra contributions: %d bytes\n", 1119 cpu_extra); 1120 pr_info("log_buf_len min size: %d bytes\n", __LOG_BUF_LEN); 1121 1122 log_buf_len_update(cpu_extra + __LOG_BUF_LEN); 1123 } 1124 #else /* !CONFIG_SMP */ 1125 static inline void log_buf_add_cpu(void) {} 1126 #endif /* CONFIG_SMP */ 1127 1128 static void __init set_percpu_data_ready(void) 1129 { 1130 __printk_percpu_data_ready = true; 1131 } 1132 1133 static unsigned int __init add_to_rb(struct printk_ringbuffer *rb, 1134 struct printk_record *r) 1135 { 1136 struct prb_reserved_entry e; 1137 struct printk_record dest_r; 1138 1139 prb_rec_init_wr(&dest_r, r->info->text_len); 1140 1141 if (!prb_reserve(&e, rb, &dest_r)) 1142 return 0; 1143 1144 memcpy(&dest_r.text_buf[0], &r->text_buf[0], r->info->text_len); 1145 dest_r.info->text_len = r->info->text_len; 1146 dest_r.info->facility = r->info->facility; 1147 dest_r.info->level = r->info->level; 1148 dest_r.info->flags = r->info->flags; 1149 dest_r.info->ts_nsec = r->info->ts_nsec; 1150 dest_r.info->caller_id = r->info->caller_id; 1151 memcpy(&dest_r.info->dev_info, &r->info->dev_info, sizeof(dest_r.info->dev_info)); 1152 1153 prb_final_commit(&e); 1154 1155 return prb_record_text_space(&e); 1156 } 1157 1158 static char setup_text_buf[PRINTKRB_RECORD_MAX] __initdata; 1159 1160 void __init setup_log_buf(int early) 1161 { 1162 struct printk_info *new_infos; 1163 unsigned int new_descs_count; 1164 struct prb_desc *new_descs; 1165 struct printk_info info; 1166 struct printk_record r; 1167 unsigned int text_size; 1168 size_t new_descs_size; 1169 size_t new_infos_size; 1170 unsigned long flags; 1171 char *new_log_buf; 1172 unsigned int free; 1173 u64 seq; 1174 1175 /* 1176 * Some archs call setup_log_buf() multiple times - first is very 1177 * early, e.g. from setup_arch(), and second - when percpu_areas 1178 * are initialised. 1179 */ 1180 if (!early) 1181 set_percpu_data_ready(); 1182 1183 if (log_buf != __log_buf) 1184 return; 1185 1186 if (!early && !new_log_buf_len) 1187 log_buf_add_cpu(); 1188 1189 if (!new_log_buf_len) 1190 return; 1191 1192 new_descs_count = new_log_buf_len >> PRB_AVGBITS; 1193 if (new_descs_count == 0) { 1194 pr_err("new_log_buf_len: %lu too small\n", new_log_buf_len); 1195 return; 1196 } 1197 1198 new_log_buf = memblock_alloc(new_log_buf_len, LOG_ALIGN); 1199 if (unlikely(!new_log_buf)) { 1200 pr_err("log_buf_len: %lu text bytes not available\n", 1201 new_log_buf_len); 1202 return; 1203 } 1204 1205 new_descs_size = new_descs_count * sizeof(struct prb_desc); 1206 new_descs = memblock_alloc(new_descs_size, LOG_ALIGN); 1207 if (unlikely(!new_descs)) { 1208 pr_err("log_buf_len: %zu desc bytes not available\n", 1209 new_descs_size); 1210 goto err_free_log_buf; 1211 } 1212 1213 new_infos_size = new_descs_count * sizeof(struct printk_info); 1214 new_infos = memblock_alloc(new_infos_size, LOG_ALIGN); 1215 if (unlikely(!new_infos)) { 1216 pr_err("log_buf_len: %zu info bytes not available\n", 1217 new_infos_size); 1218 goto err_free_descs; 1219 } 1220 1221 prb_rec_init_rd(&r, &info, &setup_text_buf[0], sizeof(setup_text_buf)); 1222 1223 prb_init(&printk_rb_dynamic, 1224 new_log_buf, ilog2(new_log_buf_len), 1225 new_descs, ilog2(new_descs_count), 1226 new_infos); 1227 1228 local_irq_save(flags); 1229 1230 log_buf_len = new_log_buf_len; 1231 log_buf = new_log_buf; 1232 new_log_buf_len = 0; 1233 1234 free = __LOG_BUF_LEN; 1235 prb_for_each_record(0, &printk_rb_static, seq, &r) { 1236 text_size = add_to_rb(&printk_rb_dynamic, &r); 1237 if (text_size > free) 1238 free = 0; 1239 else 1240 free -= text_size; 1241 } 1242 1243 prb = &printk_rb_dynamic; 1244 1245 local_irq_restore(flags); 1246 1247 /* 1248 * Copy any remaining messages that might have appeared from 1249 * NMI context after copying but before switching to the 1250 * dynamic buffer. 1251 */ 1252 prb_for_each_record(seq, &printk_rb_static, seq, &r) { 1253 text_size = add_to_rb(&printk_rb_dynamic, &r); 1254 if (text_size > free) 1255 free = 0; 1256 else 1257 free -= text_size; 1258 } 1259 1260 if (seq != prb_next_seq(&printk_rb_static)) { 1261 pr_err("dropped %llu messages\n", 1262 prb_next_seq(&printk_rb_static) - seq); 1263 } 1264 1265 pr_info("log_buf_len: %u bytes\n", log_buf_len); 1266 pr_info("early log buf free: %u(%u%%)\n", 1267 free, (free * 100) / __LOG_BUF_LEN); 1268 return; 1269 1270 err_free_descs: 1271 memblock_free(new_descs, new_descs_size); 1272 err_free_log_buf: 1273 memblock_free(new_log_buf, new_log_buf_len); 1274 } 1275 1276 static bool __read_mostly ignore_loglevel; 1277 1278 static int __init ignore_loglevel_setup(char *str) 1279 { 1280 ignore_loglevel = true; 1281 pr_info("debug: ignoring loglevel setting.\n"); 1282 1283 return 0; 1284 } 1285 1286 early_param("ignore_loglevel", ignore_loglevel_setup); 1287 module_param(ignore_loglevel, bool, S_IRUGO | S_IWUSR); 1288 MODULE_PARM_DESC(ignore_loglevel, 1289 "ignore loglevel setting (prints all kernel messages to the console)"); 1290 1291 static bool suppress_message_printing(int level) 1292 { 1293 return (level >= console_loglevel && !ignore_loglevel); 1294 } 1295 1296 #ifdef CONFIG_BOOT_PRINTK_DELAY 1297 1298 static int boot_delay; /* msecs delay after each printk during bootup */ 1299 static unsigned long long loops_per_msec; /* based on boot_delay */ 1300 1301 static int __init boot_delay_setup(char *str) 1302 { 1303 unsigned long lpj; 1304 1305 lpj = preset_lpj ? preset_lpj : 1000000; /* some guess */ 1306 loops_per_msec = (unsigned long long)lpj / 1000 * HZ; 1307 1308 get_option(&str, &boot_delay); 1309 if (boot_delay > 10 * 1000) 1310 boot_delay = 0; 1311 1312 pr_debug("boot_delay: %u, preset_lpj: %ld, lpj: %lu, " 1313 "HZ: %d, loops_per_msec: %llu\n", 1314 boot_delay, preset_lpj, lpj, HZ, loops_per_msec); 1315 return 0; 1316 } 1317 early_param("boot_delay", boot_delay_setup); 1318 1319 static void boot_delay_msec(int level) 1320 { 1321 unsigned long long k; 1322 unsigned long timeout; 1323 1324 if ((boot_delay == 0 || system_state >= SYSTEM_RUNNING) 1325 || suppress_message_printing(level)) { 1326 return; 1327 } 1328 1329 k = (unsigned long long)loops_per_msec * boot_delay; 1330 1331 timeout = jiffies + msecs_to_jiffies(boot_delay); 1332 while (k) { 1333 k--; 1334 cpu_relax(); 1335 /* 1336 * use (volatile) jiffies to prevent 1337 * compiler reduction; loop termination via jiffies 1338 * is secondary and may or may not happen. 1339 */ 1340 if (time_after(jiffies, timeout)) 1341 break; 1342 touch_nmi_watchdog(); 1343 } 1344 } 1345 #else 1346 static inline void boot_delay_msec(int level) 1347 { 1348 } 1349 #endif 1350 1351 static bool printk_time = IS_ENABLED(CONFIG_PRINTK_TIME); 1352 module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR); 1353 1354 static size_t print_syslog(unsigned int level, char *buf) 1355 { 1356 return sprintf(buf, "<%u>", level); 1357 } 1358 1359 static size_t print_time(u64 ts, char *buf) 1360 { 1361 unsigned long rem_nsec = do_div(ts, 1000000000); 1362 1363 return sprintf(buf, "[%5lu.%06lu]", 1364 (unsigned long)ts, rem_nsec / 1000); 1365 } 1366 1367 #ifdef CONFIG_PRINTK_CALLER 1368 static size_t print_caller(u32 id, char *buf) 1369 { 1370 char caller[12]; 1371 1372 snprintf(caller, sizeof(caller), "%c%u", 1373 id & 0x80000000 ? 'C' : 'T', id & ~0x80000000); 1374 return sprintf(buf, "[%6s]", caller); 1375 } 1376 #else 1377 #define print_caller(id, buf) 0 1378 #endif 1379 1380 static size_t info_print_prefix(const struct printk_info *info, bool syslog, 1381 bool time, char *buf) 1382 { 1383 size_t len = 0; 1384 1385 if (syslog) 1386 len = print_syslog((info->facility << 3) | info->level, buf); 1387 1388 if (time) 1389 len += print_time(info->ts_nsec, buf + len); 1390 1391 len += print_caller(info->caller_id, buf + len); 1392 1393 if (IS_ENABLED(CONFIG_PRINTK_CALLER) || time) { 1394 buf[len++] = ' '; 1395 buf[len] = '\0'; 1396 } 1397 1398 return len; 1399 } 1400 1401 /* 1402 * Prepare the record for printing. The text is shifted within the given 1403 * buffer to avoid a need for another one. The following operations are 1404 * done: 1405 * 1406 * - Add prefix for each line. 1407 * - Drop truncated lines that no longer fit into the buffer. 1408 * - Add the trailing newline that has been removed in vprintk_store(). 1409 * - Add a string terminator. 1410 * 1411 * Since the produced string is always terminated, the maximum possible 1412 * return value is @r->text_buf_size - 1; 1413 * 1414 * Return: The length of the updated/prepared text, including the added 1415 * prefixes and the newline. The terminator is not counted. The dropped 1416 * line(s) are not counted. 1417 */ 1418 static size_t record_print_text(struct printk_record *r, bool syslog, 1419 bool time) 1420 { 1421 size_t text_len = r->info->text_len; 1422 size_t buf_size = r->text_buf_size; 1423 char *text = r->text_buf; 1424 char prefix[PRINTK_PREFIX_MAX]; 1425 bool truncated = false; 1426 size_t prefix_len; 1427 size_t line_len; 1428 size_t len = 0; 1429 char *next; 1430 1431 /* 1432 * If the message was truncated because the buffer was not large 1433 * enough, treat the available text as if it were the full text. 1434 */ 1435 if (text_len > buf_size) 1436 text_len = buf_size; 1437 1438 prefix_len = info_print_prefix(r->info, syslog, time, prefix); 1439 1440 /* 1441 * @text_len: bytes of unprocessed text 1442 * @line_len: bytes of current line _without_ newline 1443 * @text: pointer to beginning of current line 1444 * @len: number of bytes prepared in r->text_buf 1445 */ 1446 for (;;) { 1447 next = memchr(text, '\n', text_len); 1448 if (next) { 1449 line_len = next - text; 1450 } else { 1451 /* Drop truncated line(s). */ 1452 if (truncated) 1453 break; 1454 line_len = text_len; 1455 } 1456 1457 /* 1458 * Truncate the text if there is not enough space to add the 1459 * prefix and a trailing newline and a terminator. 1460 */ 1461 if (len + prefix_len + text_len + 1 + 1 > buf_size) { 1462 /* Drop even the current line if no space. */ 1463 if (len + prefix_len + line_len + 1 + 1 > buf_size) 1464 break; 1465 1466 text_len = buf_size - len - prefix_len - 1 - 1; 1467 truncated = true; 1468 } 1469 1470 memmove(text + prefix_len, text, text_len); 1471 memcpy(text, prefix, prefix_len); 1472 1473 /* 1474 * Increment the prepared length to include the text and 1475 * prefix that were just moved+copied. Also increment for the 1476 * newline at the end of this line. If this is the last line, 1477 * there is no newline, but it will be added immediately below. 1478 */ 1479 len += prefix_len + line_len + 1; 1480 if (text_len == line_len) { 1481 /* 1482 * This is the last line. Add the trailing newline 1483 * removed in vprintk_store(). 1484 */ 1485 text[prefix_len + line_len] = '\n'; 1486 break; 1487 } 1488 1489 /* 1490 * Advance beyond the added prefix and the related line with 1491 * its newline. 1492 */ 1493 text += prefix_len + line_len + 1; 1494 1495 /* 1496 * The remaining text has only decreased by the line with its 1497 * newline. 1498 * 1499 * Note that @text_len can become zero. It happens when @text 1500 * ended with a newline (either due to truncation or the 1501 * original string ending with "\n\n"). The loop is correctly 1502 * repeated and (if not truncated) an empty line with a prefix 1503 * will be prepared. 1504 */ 1505 text_len -= line_len + 1; 1506 } 1507 1508 /* 1509 * If a buffer was provided, it will be terminated. Space for the 1510 * string terminator is guaranteed to be available. The terminator is 1511 * not counted in the return value. 1512 */ 1513 if (buf_size > 0) 1514 r->text_buf[len] = 0; 1515 1516 return len; 1517 } 1518 1519 static size_t get_record_print_text_size(struct printk_info *info, 1520 unsigned int line_count, 1521 bool syslog, bool time) 1522 { 1523 char prefix[PRINTK_PREFIX_MAX]; 1524 size_t prefix_len; 1525 1526 prefix_len = info_print_prefix(info, syslog, time, prefix); 1527 1528 /* 1529 * Each line will be preceded with a prefix. The intermediate 1530 * newlines are already within the text, but a final trailing 1531 * newline will be added. 1532 */ 1533 return ((prefix_len * line_count) + info->text_len + 1); 1534 } 1535 1536 /* 1537 * Beginning with @start_seq, find the first record where it and all following 1538 * records up to (but not including) @max_seq fit into @size. 1539 * 1540 * @max_seq is simply an upper bound and does not need to exist. If the caller 1541 * does not require an upper bound, -1 can be used for @max_seq. 1542 */ 1543 static u64 find_first_fitting_seq(u64 start_seq, u64 max_seq, size_t size, 1544 bool syslog, bool time) 1545 { 1546 struct printk_info info; 1547 unsigned int line_count; 1548 size_t len = 0; 1549 u64 seq; 1550 1551 /* Determine the size of the records up to @max_seq. */ 1552 prb_for_each_info(start_seq, prb, seq, &info, &line_count) { 1553 if (info.seq >= max_seq) 1554 break; 1555 len += get_record_print_text_size(&info, line_count, syslog, time); 1556 } 1557 1558 /* 1559 * Adjust the upper bound for the next loop to avoid subtracting 1560 * lengths that were never added. 1561 */ 1562 if (seq < max_seq) 1563 max_seq = seq; 1564 1565 /* 1566 * Move first record forward until length fits into the buffer. Ignore 1567 * newest messages that were not counted in the above cycle. Messages 1568 * might appear and get lost in the meantime. This is a best effort 1569 * that prevents an infinite loop that could occur with a retry. 1570 */ 1571 prb_for_each_info(start_seq, prb, seq, &info, &line_count) { 1572 if (len <= size || info.seq >= max_seq) 1573 break; 1574 len -= get_record_print_text_size(&info, line_count, syslog, time); 1575 } 1576 1577 return seq; 1578 } 1579 1580 /* The caller is responsible for making sure @size is greater than 0. */ 1581 static int syslog_print(char __user *buf, int size) 1582 { 1583 struct printk_info info; 1584 struct printk_record r; 1585 char *text; 1586 int len = 0; 1587 u64 seq; 1588 1589 text = kmalloc(PRINTK_MESSAGE_MAX, GFP_KERNEL); 1590 if (!text) 1591 return -ENOMEM; 1592 1593 prb_rec_init_rd(&r, &info, text, PRINTK_MESSAGE_MAX); 1594 1595 mutex_lock(&syslog_lock); 1596 1597 /* 1598 * Wait for the @syslog_seq record to be available. @syslog_seq may 1599 * change while waiting. 1600 */ 1601 do { 1602 seq = syslog_seq; 1603 1604 mutex_unlock(&syslog_lock); 1605 /* 1606 * Guarantee this task is visible on the waitqueue before 1607 * checking the wake condition. 1608 * 1609 * The full memory barrier within set_current_state() of 1610 * prepare_to_wait_event() pairs with the full memory barrier 1611 * within wq_has_sleeper(). 1612 * 1613 * This pairs with __wake_up_klogd:A. 1614 */ 1615 len = wait_event_interruptible(log_wait, 1616 prb_read_valid(prb, seq, NULL)); /* LMM(syslog_print:A) */ 1617 mutex_lock(&syslog_lock); 1618 1619 if (len) 1620 goto out; 1621 } while (syslog_seq != seq); 1622 1623 /* 1624 * Copy records that fit into the buffer. The above cycle makes sure 1625 * that the first record is always available. 1626 */ 1627 do { 1628 size_t n; 1629 size_t skip; 1630 int err; 1631 1632 if (!prb_read_valid(prb, syslog_seq, &r)) 1633 break; 1634 1635 if (r.info->seq != syslog_seq) { 1636 /* message is gone, move to next valid one */ 1637 syslog_seq = r.info->seq; 1638 syslog_partial = 0; 1639 } 1640 1641 /* 1642 * To keep reading/counting partial line consistent, 1643 * use printk_time value as of the beginning of a line. 1644 */ 1645 if (!syslog_partial) 1646 syslog_time = printk_time; 1647 1648 skip = syslog_partial; 1649 n = record_print_text(&r, true, syslog_time); 1650 if (n - syslog_partial <= size) { 1651 /* message fits into buffer, move forward */ 1652 syslog_seq = r.info->seq + 1; 1653 n -= syslog_partial; 1654 syslog_partial = 0; 1655 } else if (!len){ 1656 /* partial read(), remember position */ 1657 n = size; 1658 syslog_partial += n; 1659 } else 1660 n = 0; 1661 1662 if (!n) 1663 break; 1664 1665 mutex_unlock(&syslog_lock); 1666 err = copy_to_user(buf, text + skip, n); 1667 mutex_lock(&syslog_lock); 1668 1669 if (err) { 1670 if (!len) 1671 len = -EFAULT; 1672 break; 1673 } 1674 1675 len += n; 1676 size -= n; 1677 buf += n; 1678 } while (size); 1679 out: 1680 mutex_unlock(&syslog_lock); 1681 kfree(text); 1682 return len; 1683 } 1684 1685 static int syslog_print_all(char __user *buf, int size, bool clear) 1686 { 1687 struct printk_info info; 1688 struct printk_record r; 1689 char *text; 1690 int len = 0; 1691 u64 seq; 1692 bool time; 1693 1694 text = kmalloc(PRINTK_MESSAGE_MAX, GFP_KERNEL); 1695 if (!text) 1696 return -ENOMEM; 1697 1698 time = printk_time; 1699 /* 1700 * Find first record that fits, including all following records, 1701 * into the user-provided buffer for this dump. 1702 */ 1703 seq = find_first_fitting_seq(latched_seq_read_nolock(&clear_seq), -1, 1704 size, true, time); 1705 1706 prb_rec_init_rd(&r, &info, text, PRINTK_MESSAGE_MAX); 1707 1708 prb_for_each_record(seq, prb, seq, &r) { 1709 int textlen; 1710 1711 textlen = record_print_text(&r, true, time); 1712 1713 if (len + textlen > size) { 1714 seq--; 1715 break; 1716 } 1717 1718 if (copy_to_user(buf + len, text, textlen)) 1719 len = -EFAULT; 1720 else 1721 len += textlen; 1722 1723 if (len < 0) 1724 break; 1725 } 1726 1727 if (clear) { 1728 mutex_lock(&syslog_lock); 1729 latched_seq_write(&clear_seq, seq); 1730 mutex_unlock(&syslog_lock); 1731 } 1732 1733 kfree(text); 1734 return len; 1735 } 1736 1737 static void syslog_clear(void) 1738 { 1739 mutex_lock(&syslog_lock); 1740 latched_seq_write(&clear_seq, prb_next_seq(prb)); 1741 mutex_unlock(&syslog_lock); 1742 } 1743 1744 int do_syslog(int type, char __user *buf, int len, int source) 1745 { 1746 struct printk_info info; 1747 bool clear = false; 1748 static int saved_console_loglevel = LOGLEVEL_DEFAULT; 1749 int error; 1750 1751 error = check_syslog_permissions(type, source); 1752 if (error) 1753 return error; 1754 1755 switch (type) { 1756 case SYSLOG_ACTION_CLOSE: /* Close log */ 1757 break; 1758 case SYSLOG_ACTION_OPEN: /* Open log */ 1759 break; 1760 case SYSLOG_ACTION_READ: /* Read from log */ 1761 if (!buf || len < 0) 1762 return -EINVAL; 1763 if (!len) 1764 return 0; 1765 if (!access_ok(buf, len)) 1766 return -EFAULT; 1767 error = syslog_print(buf, len); 1768 break; 1769 /* Read/clear last kernel messages */ 1770 case SYSLOG_ACTION_READ_CLEAR: 1771 clear = true; 1772 fallthrough; 1773 /* Read last kernel messages */ 1774 case SYSLOG_ACTION_READ_ALL: 1775 if (!buf || len < 0) 1776 return -EINVAL; 1777 if (!len) 1778 return 0; 1779 if (!access_ok(buf, len)) 1780 return -EFAULT; 1781 error = syslog_print_all(buf, len, clear); 1782 break; 1783 /* Clear ring buffer */ 1784 case SYSLOG_ACTION_CLEAR: 1785 syslog_clear(); 1786 break; 1787 /* Disable logging to console */ 1788 case SYSLOG_ACTION_CONSOLE_OFF: 1789 if (saved_console_loglevel == LOGLEVEL_DEFAULT) 1790 saved_console_loglevel = console_loglevel; 1791 console_loglevel = minimum_console_loglevel; 1792 break; 1793 /* Enable logging to console */ 1794 case SYSLOG_ACTION_CONSOLE_ON: 1795 if (saved_console_loglevel != LOGLEVEL_DEFAULT) { 1796 console_loglevel = saved_console_loglevel; 1797 saved_console_loglevel = LOGLEVEL_DEFAULT; 1798 } 1799 break; 1800 /* Set level of messages printed to console */ 1801 case SYSLOG_ACTION_CONSOLE_LEVEL: 1802 if (len < 1 || len > 8) 1803 return -EINVAL; 1804 if (len < minimum_console_loglevel) 1805 len = minimum_console_loglevel; 1806 console_loglevel = len; 1807 /* Implicitly re-enable logging to console */ 1808 saved_console_loglevel = LOGLEVEL_DEFAULT; 1809 break; 1810 /* Number of chars in the log buffer */ 1811 case SYSLOG_ACTION_SIZE_UNREAD: 1812 mutex_lock(&syslog_lock); 1813 if (!prb_read_valid_info(prb, syslog_seq, &info, NULL)) { 1814 /* No unread messages. */ 1815 mutex_unlock(&syslog_lock); 1816 return 0; 1817 } 1818 if (info.seq != syslog_seq) { 1819 /* messages are gone, move to first one */ 1820 syslog_seq = info.seq; 1821 syslog_partial = 0; 1822 } 1823 if (source == SYSLOG_FROM_PROC) { 1824 /* 1825 * Short-cut for poll(/"proc/kmsg") which simply checks 1826 * for pending data, not the size; return the count of 1827 * records, not the length. 1828 */ 1829 error = prb_next_seq(prb) - syslog_seq; 1830 } else { 1831 bool time = syslog_partial ? syslog_time : printk_time; 1832 unsigned int line_count; 1833 u64 seq; 1834 1835 prb_for_each_info(syslog_seq, prb, seq, &info, 1836 &line_count) { 1837 error += get_record_print_text_size(&info, line_count, 1838 true, time); 1839 time = printk_time; 1840 } 1841 error -= syslog_partial; 1842 } 1843 mutex_unlock(&syslog_lock); 1844 break; 1845 /* Size of the log buffer */ 1846 case SYSLOG_ACTION_SIZE_BUFFER: 1847 error = log_buf_len; 1848 break; 1849 default: 1850 error = -EINVAL; 1851 break; 1852 } 1853 1854 return error; 1855 } 1856 1857 SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len) 1858 { 1859 return do_syslog(type, buf, len, SYSLOG_FROM_READER); 1860 } 1861 1862 /* 1863 * Special console_lock variants that help to reduce the risk of soft-lockups. 1864 * They allow to pass console_lock to another printk() call using a busy wait. 1865 */ 1866 1867 #ifdef CONFIG_LOCKDEP 1868 static struct lockdep_map console_owner_dep_map = { 1869 .name = "console_owner" 1870 }; 1871 #endif 1872 1873 static DEFINE_RAW_SPINLOCK(console_owner_lock); 1874 static struct task_struct *console_owner; 1875 static bool console_waiter; 1876 1877 /** 1878 * console_lock_spinning_enable - mark beginning of code where another 1879 * thread might safely busy wait 1880 * 1881 * This basically converts console_lock into a spinlock. This marks 1882 * the section where the console_lock owner can not sleep, because 1883 * there may be a waiter spinning (like a spinlock). Also it must be 1884 * ready to hand over the lock at the end of the section. 1885 */ 1886 void console_lock_spinning_enable(void) 1887 { 1888 /* 1889 * Do not use spinning in panic(). The panic CPU wants to keep the lock. 1890 * Non-panic CPUs abandon the flush anyway. 1891 * 1892 * Just keep the lockdep annotation. The panic-CPU should avoid 1893 * taking console_owner_lock because it might cause a deadlock. 1894 * This looks like the easiest way how to prevent false lockdep 1895 * reports without handling races a lockless way. 1896 */ 1897 if (panic_in_progress()) 1898 goto lockdep; 1899 1900 raw_spin_lock(&console_owner_lock); 1901 console_owner = current; 1902 raw_spin_unlock(&console_owner_lock); 1903 1904 lockdep: 1905 /* The waiter may spin on us after setting console_owner */ 1906 spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_); 1907 } 1908 1909 /** 1910 * console_lock_spinning_disable_and_check - mark end of code where another 1911 * thread was able to busy wait and check if there is a waiter 1912 * @cookie: cookie returned from console_srcu_read_lock() 1913 * 1914 * This is called at the end of the section where spinning is allowed. 1915 * It has two functions. First, it is a signal that it is no longer 1916 * safe to start busy waiting for the lock. Second, it checks if 1917 * there is a busy waiter and passes the lock rights to her. 1918 * 1919 * Important: Callers lose both the console_lock and the SRCU read lock if 1920 * there was a busy waiter. They must not touch items synchronized by 1921 * console_lock or SRCU read lock in this case. 1922 * 1923 * Return: 1 if the lock rights were passed, 0 otherwise. 1924 */ 1925 int console_lock_spinning_disable_and_check(int cookie) 1926 { 1927 int waiter; 1928 1929 /* 1930 * Ignore spinning waiters during panic() because they might get stopped 1931 * or blocked at any time, 1932 * 1933 * It is safe because nobody is allowed to start spinning during panic 1934 * in the first place. If there has been a waiter then non panic CPUs 1935 * might stay spinning. They would get stopped anyway. The panic context 1936 * will never start spinning and an interrupted spin on panic CPU will 1937 * never continue. 1938 */ 1939 if (panic_in_progress()) { 1940 /* Keep lockdep happy. */ 1941 spin_release(&console_owner_dep_map, _THIS_IP_); 1942 return 0; 1943 } 1944 1945 raw_spin_lock(&console_owner_lock); 1946 waiter = READ_ONCE(console_waiter); 1947 console_owner = NULL; 1948 raw_spin_unlock(&console_owner_lock); 1949 1950 if (!waiter) { 1951 spin_release(&console_owner_dep_map, _THIS_IP_); 1952 return 0; 1953 } 1954 1955 /* The waiter is now free to continue */ 1956 WRITE_ONCE(console_waiter, false); 1957 1958 spin_release(&console_owner_dep_map, _THIS_IP_); 1959 1960 /* 1961 * Preserve lockdep lock ordering. Release the SRCU read lock before 1962 * releasing the console_lock. 1963 */ 1964 console_srcu_read_unlock(cookie); 1965 1966 /* 1967 * Hand off console_lock to waiter. The waiter will perform 1968 * the up(). After this, the waiter is the console_lock owner. 1969 */ 1970 mutex_release(&console_lock_dep_map, _THIS_IP_); 1971 return 1; 1972 } 1973 1974 /** 1975 * console_trylock_spinning - try to get console_lock by busy waiting 1976 * 1977 * This allows to busy wait for the console_lock when the current 1978 * owner is running in specially marked sections. It means that 1979 * the current owner is running and cannot reschedule until it 1980 * is ready to lose the lock. 1981 * 1982 * Return: 1 if we got the lock, 0 othrewise 1983 */ 1984 static int console_trylock_spinning(void) 1985 { 1986 struct task_struct *owner = NULL; 1987 bool waiter; 1988 bool spin = false; 1989 unsigned long flags; 1990 1991 if (console_trylock()) 1992 return 1; 1993 1994 /* 1995 * It's unsafe to spin once a panic has begun. If we are the 1996 * panic CPU, we may have already halted the owner of the 1997 * console_sem. If we are not the panic CPU, then we should 1998 * avoid taking console_sem, so the panic CPU has a better 1999 * chance of cleanly acquiring it later. 2000 */ 2001 if (panic_in_progress()) 2002 return 0; 2003 2004 printk_safe_enter_irqsave(flags); 2005 2006 raw_spin_lock(&console_owner_lock); 2007 owner = READ_ONCE(console_owner); 2008 waiter = READ_ONCE(console_waiter); 2009 if (!waiter && owner && owner != current) { 2010 WRITE_ONCE(console_waiter, true); 2011 spin = true; 2012 } 2013 raw_spin_unlock(&console_owner_lock); 2014 2015 /* 2016 * If there is an active printk() writing to the 2017 * consoles, instead of having it write our data too, 2018 * see if we can offload that load from the active 2019 * printer, and do some printing ourselves. 2020 * Go into a spin only if there isn't already a waiter 2021 * spinning, and there is an active printer, and 2022 * that active printer isn't us (recursive printk?). 2023 */ 2024 if (!spin) { 2025 printk_safe_exit_irqrestore(flags); 2026 return 0; 2027 } 2028 2029 /* We spin waiting for the owner to release us */ 2030 spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_); 2031 /* Owner will clear console_waiter on hand off */ 2032 while (READ_ONCE(console_waiter)) 2033 cpu_relax(); 2034 spin_release(&console_owner_dep_map, _THIS_IP_); 2035 2036 printk_safe_exit_irqrestore(flags); 2037 /* 2038 * The owner passed the console lock to us. 2039 * Since we did not spin on console lock, annotate 2040 * this as a trylock. Otherwise lockdep will 2041 * complain. 2042 */ 2043 mutex_acquire(&console_lock_dep_map, 0, 1, _THIS_IP_); 2044 2045 /* 2046 * Update @console_may_schedule for trylock because the previous 2047 * owner may have been schedulable. 2048 */ 2049 console_may_schedule = 0; 2050 2051 return 1; 2052 } 2053 2054 /* 2055 * Recursion is tracked separately on each CPU. If NMIs are supported, an 2056 * additional NMI context per CPU is also separately tracked. Until per-CPU 2057 * is available, a separate "early tracking" is performed. 2058 */ 2059 static DEFINE_PER_CPU(u8, printk_count); 2060 static u8 printk_count_early; 2061 #ifdef CONFIG_HAVE_NMI 2062 static DEFINE_PER_CPU(u8, printk_count_nmi); 2063 static u8 printk_count_nmi_early; 2064 #endif 2065 2066 /* 2067 * Recursion is limited to keep the output sane. printk() should not require 2068 * more than 1 level of recursion (allowing, for example, printk() to trigger 2069 * a WARN), but a higher value is used in case some printk-internal errors 2070 * exist, such as the ringbuffer validation checks failing. 2071 */ 2072 #define PRINTK_MAX_RECURSION 3 2073 2074 /* 2075 * Return a pointer to the dedicated counter for the CPU+context of the 2076 * caller. 2077 */ 2078 static u8 *__printk_recursion_counter(void) 2079 { 2080 #ifdef CONFIG_HAVE_NMI 2081 if (in_nmi()) { 2082 if (printk_percpu_data_ready()) 2083 return this_cpu_ptr(&printk_count_nmi); 2084 return &printk_count_nmi_early; 2085 } 2086 #endif 2087 if (printk_percpu_data_ready()) 2088 return this_cpu_ptr(&printk_count); 2089 return &printk_count_early; 2090 } 2091 2092 /* 2093 * Enter recursion tracking. Interrupts are disabled to simplify tracking. 2094 * The caller must check the boolean return value to see if the recursion is 2095 * allowed. On failure, interrupts are not disabled. 2096 * 2097 * @recursion_ptr must be a variable of type (u8 *) and is the same variable 2098 * that is passed to printk_exit_irqrestore(). 2099 */ 2100 #define printk_enter_irqsave(recursion_ptr, flags) \ 2101 ({ \ 2102 bool success = true; \ 2103 \ 2104 typecheck(u8 *, recursion_ptr); \ 2105 local_irq_save(flags); \ 2106 (recursion_ptr) = __printk_recursion_counter(); \ 2107 if (*(recursion_ptr) > PRINTK_MAX_RECURSION) { \ 2108 local_irq_restore(flags); \ 2109 success = false; \ 2110 } else { \ 2111 (*(recursion_ptr))++; \ 2112 } \ 2113 success; \ 2114 }) 2115 2116 /* Exit recursion tracking, restoring interrupts. */ 2117 #define printk_exit_irqrestore(recursion_ptr, flags) \ 2118 do { \ 2119 typecheck(u8 *, recursion_ptr); \ 2120 (*(recursion_ptr))--; \ 2121 local_irq_restore(flags); \ 2122 } while (0) 2123 2124 int printk_delay_msec __read_mostly; 2125 2126 static inline void printk_delay(int level) 2127 { 2128 boot_delay_msec(level); 2129 2130 if (unlikely(printk_delay_msec)) { 2131 int m = printk_delay_msec; 2132 2133 while (m--) { 2134 mdelay(1); 2135 touch_nmi_watchdog(); 2136 } 2137 } 2138 } 2139 2140 static inline u32 printk_caller_id(void) 2141 { 2142 return in_task() ? task_pid_nr(current) : 2143 0x80000000 + smp_processor_id(); 2144 } 2145 2146 /** 2147 * printk_parse_prefix - Parse level and control flags. 2148 * 2149 * @text: The terminated text message. 2150 * @level: A pointer to the current level value, will be updated. 2151 * @flags: A pointer to the current printk_info flags, will be updated. 2152 * 2153 * @level may be NULL if the caller is not interested in the parsed value. 2154 * Otherwise the variable pointed to by @level must be set to 2155 * LOGLEVEL_DEFAULT in order to be updated with the parsed value. 2156 * 2157 * @flags may be NULL if the caller is not interested in the parsed value. 2158 * Otherwise the variable pointed to by @flags will be OR'd with the parsed 2159 * value. 2160 * 2161 * Return: The length of the parsed level and control flags. 2162 */ 2163 u16 printk_parse_prefix(const char *text, int *level, 2164 enum printk_info_flags *flags) 2165 { 2166 u16 prefix_len = 0; 2167 int kern_level; 2168 2169 while (*text) { 2170 kern_level = printk_get_level(text); 2171 if (!kern_level) 2172 break; 2173 2174 switch (kern_level) { 2175 case '0' ... '7': 2176 if (level && *level == LOGLEVEL_DEFAULT) 2177 *level = kern_level - '0'; 2178 break; 2179 case 'c': /* KERN_CONT */ 2180 if (flags) 2181 *flags |= LOG_CONT; 2182 } 2183 2184 prefix_len += 2; 2185 text += 2; 2186 } 2187 2188 return prefix_len; 2189 } 2190 2191 __printf(5, 0) 2192 static u16 printk_sprint(char *text, u16 size, int facility, 2193 enum printk_info_flags *flags, const char *fmt, 2194 va_list args) 2195 { 2196 u16 text_len; 2197 2198 text_len = vscnprintf(text, size, fmt, args); 2199 2200 /* Mark and strip a trailing newline. */ 2201 if (text_len && text[text_len - 1] == '\n') { 2202 text_len--; 2203 *flags |= LOG_NEWLINE; 2204 } 2205 2206 /* Strip log level and control flags. */ 2207 if (facility == 0) { 2208 u16 prefix_len; 2209 2210 prefix_len = printk_parse_prefix(text, NULL, NULL); 2211 if (prefix_len) { 2212 text_len -= prefix_len; 2213 memmove(text, text + prefix_len, text_len); 2214 } 2215 } 2216 2217 trace_console(text, text_len); 2218 2219 return text_len; 2220 } 2221 2222 __printf(4, 0) 2223 int vprintk_store(int facility, int level, 2224 const struct dev_printk_info *dev_info, 2225 const char *fmt, va_list args) 2226 { 2227 struct prb_reserved_entry e; 2228 enum printk_info_flags flags = 0; 2229 struct printk_record r; 2230 unsigned long irqflags; 2231 u16 trunc_msg_len = 0; 2232 char prefix_buf[8]; 2233 u8 *recursion_ptr; 2234 u16 reserve_size; 2235 va_list args2; 2236 u32 caller_id; 2237 u16 text_len; 2238 int ret = 0; 2239 u64 ts_nsec; 2240 2241 if (!printk_enter_irqsave(recursion_ptr, irqflags)) 2242 return 0; 2243 2244 /* 2245 * Since the duration of printk() can vary depending on the message 2246 * and state of the ringbuffer, grab the timestamp now so that it is 2247 * close to the call of printk(). This provides a more deterministic 2248 * timestamp with respect to the caller. 2249 */ 2250 ts_nsec = local_clock(); 2251 2252 caller_id = printk_caller_id(); 2253 2254 /* 2255 * The sprintf needs to come first since the syslog prefix might be 2256 * passed in as a parameter. An extra byte must be reserved so that 2257 * later the vscnprintf() into the reserved buffer has room for the 2258 * terminating '\0', which is not counted by vsnprintf(). 2259 */ 2260 va_copy(args2, args); 2261 reserve_size = vsnprintf(&prefix_buf[0], sizeof(prefix_buf), fmt, args2) + 1; 2262 va_end(args2); 2263 2264 if (reserve_size > PRINTKRB_RECORD_MAX) 2265 reserve_size = PRINTKRB_RECORD_MAX; 2266 2267 /* Extract log level or control flags. */ 2268 if (facility == 0) 2269 printk_parse_prefix(&prefix_buf[0], &level, &flags); 2270 2271 if (level == LOGLEVEL_DEFAULT) 2272 level = default_message_loglevel; 2273 2274 if (dev_info) 2275 flags |= LOG_NEWLINE; 2276 2277 if (flags & LOG_CONT) { 2278 prb_rec_init_wr(&r, reserve_size); 2279 if (prb_reserve_in_last(&e, prb, &r, caller_id, PRINTKRB_RECORD_MAX)) { 2280 text_len = printk_sprint(&r.text_buf[r.info->text_len], reserve_size, 2281 facility, &flags, fmt, args); 2282 r.info->text_len += text_len; 2283 2284 if (flags & LOG_NEWLINE) { 2285 r.info->flags |= LOG_NEWLINE; 2286 prb_final_commit(&e); 2287 } else { 2288 prb_commit(&e); 2289 } 2290 2291 ret = text_len; 2292 goto out; 2293 } 2294 } 2295 2296 /* 2297 * Explicitly initialize the record before every prb_reserve() call. 2298 * prb_reserve_in_last() and prb_reserve() purposely invalidate the 2299 * structure when they fail. 2300 */ 2301 prb_rec_init_wr(&r, reserve_size); 2302 if (!prb_reserve(&e, prb, &r)) { 2303 /* truncate the message if it is too long for empty buffer */ 2304 truncate_msg(&reserve_size, &trunc_msg_len); 2305 2306 prb_rec_init_wr(&r, reserve_size + trunc_msg_len); 2307 if (!prb_reserve(&e, prb, &r)) 2308 goto out; 2309 } 2310 2311 /* fill message */ 2312 text_len = printk_sprint(&r.text_buf[0], reserve_size, facility, &flags, fmt, args); 2313 if (trunc_msg_len) 2314 memcpy(&r.text_buf[text_len], trunc_msg, trunc_msg_len); 2315 r.info->text_len = text_len + trunc_msg_len; 2316 r.info->facility = facility; 2317 r.info->level = level & 7; 2318 r.info->flags = flags & 0x1f; 2319 r.info->ts_nsec = ts_nsec; 2320 r.info->caller_id = caller_id; 2321 if (dev_info) 2322 memcpy(&r.info->dev_info, dev_info, sizeof(r.info->dev_info)); 2323 2324 /* A message without a trailing newline can be continued. */ 2325 if (!(flags & LOG_NEWLINE)) 2326 prb_commit(&e); 2327 else 2328 prb_final_commit(&e); 2329 2330 ret = text_len + trunc_msg_len; 2331 out: 2332 printk_exit_irqrestore(recursion_ptr, irqflags); 2333 return ret; 2334 } 2335 2336 /* 2337 * This acts as a one-way switch to allow legacy consoles to print from 2338 * the printk() caller context on a panic CPU. It also attempts to flush 2339 * the legacy consoles in this context. 2340 */ 2341 void printk_legacy_allow_panic_sync(void) 2342 { 2343 struct console_flush_type ft; 2344 2345 legacy_allow_panic_sync = true; 2346 2347 printk_get_console_flush_type(&ft); 2348 if (ft.legacy_direct) { 2349 if (console_trylock()) 2350 console_unlock(); 2351 } 2352 } 2353 2354 asmlinkage int vprintk_emit(int facility, int level, 2355 const struct dev_printk_info *dev_info, 2356 const char *fmt, va_list args) 2357 { 2358 struct console_flush_type ft; 2359 int printed_len; 2360 2361 /* Suppress unimportant messages after panic happens */ 2362 if (unlikely(suppress_printk)) 2363 return 0; 2364 2365 /* 2366 * The messages on the panic CPU are the most important. If 2367 * non-panic CPUs are generating any messages, they will be 2368 * silently dropped. 2369 */ 2370 if (other_cpu_in_panic() && !panic_triggering_all_cpu_backtrace) 2371 return 0; 2372 2373 printk_get_console_flush_type(&ft); 2374 2375 /* If called from the scheduler, we can not call up(). */ 2376 if (level == LOGLEVEL_SCHED) { 2377 level = LOGLEVEL_DEFAULT; 2378 ft.legacy_offload |= ft.legacy_direct; 2379 ft.legacy_direct = false; 2380 } 2381 2382 printk_delay(level); 2383 2384 printed_len = vprintk_store(facility, level, dev_info, fmt, args); 2385 2386 if (ft.nbcon_atomic) 2387 nbcon_atomic_flush_pending(); 2388 2389 if (ft.nbcon_offload) 2390 nbcon_kthreads_wake(); 2391 2392 if (ft.legacy_direct) { 2393 /* 2394 * The caller may be holding system-critical or 2395 * timing-sensitive locks. Disable preemption during 2396 * printing of all remaining records to all consoles so that 2397 * this context can return as soon as possible. Hopefully 2398 * another printk() caller will take over the printing. 2399 */ 2400 preempt_disable(); 2401 /* 2402 * Try to acquire and then immediately release the console 2403 * semaphore. The release will print out buffers. With the 2404 * spinning variant, this context tries to take over the 2405 * printing from another printing context. 2406 */ 2407 if (console_trylock_spinning()) 2408 console_unlock(); 2409 preempt_enable(); 2410 } 2411 2412 if (ft.legacy_offload) 2413 defer_console_output(); 2414 else 2415 wake_up_klogd(); 2416 2417 return printed_len; 2418 } 2419 EXPORT_SYMBOL(vprintk_emit); 2420 2421 int vprintk_default(const char *fmt, va_list args) 2422 { 2423 return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, fmt, args); 2424 } 2425 EXPORT_SYMBOL_GPL(vprintk_default); 2426 2427 asmlinkage __visible int _printk(const char *fmt, ...) 2428 { 2429 va_list args; 2430 int r; 2431 2432 va_start(args, fmt); 2433 r = vprintk(fmt, args); 2434 va_end(args); 2435 2436 return r; 2437 } 2438 EXPORT_SYMBOL(_printk); 2439 2440 static bool pr_flush(int timeout_ms, bool reset_on_progress); 2441 static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress); 2442 2443 #else /* CONFIG_PRINTK */ 2444 2445 #define printk_time false 2446 2447 #define prb_read_valid(rb, seq, r) false 2448 #define prb_first_valid_seq(rb) 0 2449 #define prb_next_seq(rb) 0 2450 2451 static u64 syslog_seq; 2452 2453 static bool pr_flush(int timeout_ms, bool reset_on_progress) { return true; } 2454 static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; } 2455 2456 #endif /* CONFIG_PRINTK */ 2457 2458 #ifdef CONFIG_EARLY_PRINTK 2459 struct console *early_console; 2460 2461 asmlinkage __visible void early_printk(const char *fmt, ...) 2462 { 2463 va_list ap; 2464 char buf[512]; 2465 int n; 2466 2467 if (!early_console) 2468 return; 2469 2470 va_start(ap, fmt); 2471 n = vscnprintf(buf, sizeof(buf), fmt, ap); 2472 va_end(ap); 2473 2474 early_console->write(early_console, buf, n); 2475 } 2476 #endif 2477 2478 static void set_user_specified(struct console_cmdline *c, bool user_specified) 2479 { 2480 if (!user_specified) 2481 return; 2482 2483 /* 2484 * @c console was defined by the user on the command line. 2485 * Do not clear when added twice also by SPCR or the device tree. 2486 */ 2487 c->user_specified = true; 2488 /* At least one console defined by the user on the command line. */ 2489 console_set_on_cmdline = 1; 2490 } 2491 2492 static int __add_preferred_console(const char *name, const short idx, 2493 const char *devname, char *options, 2494 char *brl_options, bool user_specified) 2495 { 2496 struct console_cmdline *c; 2497 int i; 2498 2499 if (!name && !devname) 2500 return -EINVAL; 2501 2502 /* 2503 * We use a signed short index for struct console for device drivers to 2504 * indicate a not yet assigned index or port. However, a negative index 2505 * value is not valid when the console name and index are defined on 2506 * the command line. 2507 */ 2508 if (name && idx < 0) 2509 return -EINVAL; 2510 2511 /* 2512 * See if this tty is not yet registered, and 2513 * if we have a slot free. 2514 */ 2515 for (i = 0, c = console_cmdline; 2516 i < MAX_CMDLINECONSOLES && (c->name[0] || c->devname[0]); 2517 i++, c++) { 2518 if ((name && strcmp(c->name, name) == 0 && c->index == idx) || 2519 (devname && strcmp(c->devname, devname) == 0)) { 2520 if (!brl_options) 2521 preferred_console = i; 2522 set_user_specified(c, user_specified); 2523 return 0; 2524 } 2525 } 2526 if (i == MAX_CMDLINECONSOLES) 2527 return -E2BIG; 2528 if (!brl_options) 2529 preferred_console = i; 2530 if (name) 2531 strscpy(c->name, name); 2532 if (devname) 2533 strscpy(c->devname, devname); 2534 c->options = options; 2535 set_user_specified(c, user_specified); 2536 braille_set_options(c, brl_options); 2537 2538 c->index = idx; 2539 return 0; 2540 } 2541 2542 static int __init console_msg_format_setup(char *str) 2543 { 2544 if (!strcmp(str, "syslog")) 2545 console_msg_format = MSG_FORMAT_SYSLOG; 2546 if (!strcmp(str, "default")) 2547 console_msg_format = MSG_FORMAT_DEFAULT; 2548 return 1; 2549 } 2550 __setup("console_msg_format=", console_msg_format_setup); 2551 2552 /* 2553 * Set up a console. Called via do_early_param() in init/main.c 2554 * for each "console=" parameter in the boot command line. 2555 */ 2556 static int __init console_setup(char *str) 2557 { 2558 static_assert(sizeof(console_cmdline[0].devname) >= sizeof(console_cmdline[0].name) + 4); 2559 char buf[sizeof(console_cmdline[0].devname)]; 2560 char *brl_options = NULL; 2561 char *ttyname = NULL; 2562 char *devname = NULL; 2563 char *options; 2564 char *s; 2565 int idx; 2566 2567 /* 2568 * console="" or console=null have been suggested as a way to 2569 * disable console output. Use ttynull that has been created 2570 * for exactly this purpose. 2571 */ 2572 if (str[0] == 0 || strcmp(str, "null") == 0) { 2573 __add_preferred_console("ttynull", 0, NULL, NULL, NULL, true); 2574 return 1; 2575 } 2576 2577 if (_braille_console_setup(&str, &brl_options)) 2578 return 1; 2579 2580 /* For a DEVNAME:0.0 style console the character device is unknown early */ 2581 if (strchr(str, ':')) 2582 devname = buf; 2583 else 2584 ttyname = buf; 2585 2586 /* 2587 * Decode str into name, index, options. 2588 */ 2589 if (ttyname && isdigit(str[0])) 2590 scnprintf(buf, sizeof(buf), "ttyS%s", str); 2591 else 2592 strscpy(buf, str); 2593 2594 options = strchr(str, ','); 2595 if (options) 2596 *(options++) = 0; 2597 2598 #ifdef __sparc__ 2599 if (!strcmp(str, "ttya")) 2600 strscpy(buf, "ttyS0"); 2601 if (!strcmp(str, "ttyb")) 2602 strscpy(buf, "ttyS1"); 2603 #endif 2604 2605 for (s = buf; *s; s++) 2606 if ((ttyname && isdigit(*s)) || *s == ',') 2607 break; 2608 2609 /* @idx will get defined when devname matches. */ 2610 if (devname) 2611 idx = -1; 2612 else 2613 idx = simple_strtoul(s, NULL, 10); 2614 2615 *s = 0; 2616 2617 __add_preferred_console(ttyname, idx, devname, options, brl_options, true); 2618 return 1; 2619 } 2620 __setup("console=", console_setup); 2621 2622 /** 2623 * add_preferred_console - add a device to the list of preferred consoles. 2624 * @name: device name 2625 * @idx: device index 2626 * @options: options for this console 2627 * 2628 * The last preferred console added will be used for kernel messages 2629 * and stdin/out/err for init. Normally this is used by console_setup 2630 * above to handle user-supplied console arguments; however it can also 2631 * be used by arch-specific code either to override the user or more 2632 * commonly to provide a default console (ie from PROM variables) when 2633 * the user has not supplied one. 2634 */ 2635 int add_preferred_console(const char *name, const short idx, char *options) 2636 { 2637 return __add_preferred_console(name, idx, NULL, options, NULL, false); 2638 } 2639 2640 /** 2641 * match_devname_and_update_preferred_console - Update a preferred console 2642 * when matching devname is found. 2643 * @devname: DEVNAME:0.0 style device name 2644 * @name: Name of the corresponding console driver, e.g. "ttyS" 2645 * @idx: Console index, e.g. port number. 2646 * 2647 * The function checks whether a device with the given @devname is 2648 * preferred via the console=DEVNAME:0.0 command line option. 2649 * It fills the missing console driver name and console index 2650 * so that a later register_console() call could find (match) 2651 * and enable this device. 2652 * 2653 * It might be used when a driver subsystem initializes particular 2654 * devices with already known DEVNAME:0.0 style names. And it 2655 * could predict which console driver name and index this device 2656 * would later get associated with. 2657 * 2658 * Return: 0 on success, negative error code on failure. 2659 */ 2660 int match_devname_and_update_preferred_console(const char *devname, 2661 const char *name, 2662 const short idx) 2663 { 2664 struct console_cmdline *c = console_cmdline; 2665 int i; 2666 2667 if (!devname || !strlen(devname) || !name || !strlen(name) || idx < 0) 2668 return -EINVAL; 2669 2670 for (i = 0; i < MAX_CMDLINECONSOLES && (c->name[0] || c->devname[0]); 2671 i++, c++) { 2672 if (!strcmp(devname, c->devname)) { 2673 pr_info("associate the preferred console \"%s\" with \"%s%d\"\n", 2674 devname, name, idx); 2675 strscpy(c->name, name); 2676 c->index = idx; 2677 return 0; 2678 } 2679 } 2680 2681 return -ENOENT; 2682 } 2683 EXPORT_SYMBOL_GPL(match_devname_and_update_preferred_console); 2684 2685 bool console_suspend_enabled = true; 2686 EXPORT_SYMBOL(console_suspend_enabled); 2687 2688 static int __init console_suspend_disable(char *str) 2689 { 2690 console_suspend_enabled = false; 2691 return 1; 2692 } 2693 __setup("no_console_suspend", console_suspend_disable); 2694 module_param_named(console_suspend, console_suspend_enabled, 2695 bool, S_IRUGO | S_IWUSR); 2696 MODULE_PARM_DESC(console_suspend, "suspend console during suspend" 2697 " and hibernate operations"); 2698 2699 static bool printk_console_no_auto_verbose; 2700 2701 void console_verbose(void) 2702 { 2703 if (console_loglevel && !printk_console_no_auto_verbose) 2704 console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH; 2705 } 2706 EXPORT_SYMBOL_GPL(console_verbose); 2707 2708 module_param_named(console_no_auto_verbose, printk_console_no_auto_verbose, bool, 0644); 2709 MODULE_PARM_DESC(console_no_auto_verbose, "Disable console loglevel raise to highest on oops/panic/etc"); 2710 2711 /** 2712 * suspend_console - suspend the console subsystem 2713 * 2714 * This disables printk() while we go into suspend states 2715 */ 2716 void suspend_console(void) 2717 { 2718 struct console *con; 2719 2720 if (!console_suspend_enabled) 2721 return; 2722 pr_info("Suspending console(s) (use no_console_suspend to debug)\n"); 2723 pr_flush(1000, true); 2724 2725 console_list_lock(); 2726 for_each_console(con) 2727 console_srcu_write_flags(con, con->flags | CON_SUSPENDED); 2728 console_list_unlock(); 2729 2730 /* 2731 * Ensure that all SRCU list walks have completed. All printing 2732 * contexts must be able to see that they are suspended so that it 2733 * is guaranteed that all printing has stopped when this function 2734 * completes. 2735 */ 2736 synchronize_srcu(&console_srcu); 2737 } 2738 2739 void resume_console(void) 2740 { 2741 struct console_flush_type ft; 2742 struct console *con; 2743 2744 if (!console_suspend_enabled) 2745 return; 2746 2747 console_list_lock(); 2748 for_each_console(con) 2749 console_srcu_write_flags(con, con->flags & ~CON_SUSPENDED); 2750 console_list_unlock(); 2751 2752 /* 2753 * Ensure that all SRCU list walks have completed. All printing 2754 * contexts must be able to see they are no longer suspended so 2755 * that they are guaranteed to wake up and resume printing. 2756 */ 2757 synchronize_srcu(&console_srcu); 2758 2759 printk_get_console_flush_type(&ft); 2760 if (ft.nbcon_offload) 2761 nbcon_kthreads_wake(); 2762 if (ft.legacy_offload) 2763 defer_console_output(); 2764 2765 pr_flush(1000, true); 2766 } 2767 2768 /** 2769 * console_cpu_notify - print deferred console messages after CPU hotplug 2770 * @cpu: unused 2771 * 2772 * If printk() is called from a CPU that is not online yet, the messages 2773 * will be printed on the console only if there are CON_ANYTIME consoles. 2774 * This function is called when a new CPU comes online (or fails to come 2775 * up) or goes offline. 2776 */ 2777 static int console_cpu_notify(unsigned int cpu) 2778 { 2779 struct console_flush_type ft; 2780 2781 if (!cpuhp_tasks_frozen) { 2782 printk_get_console_flush_type(&ft); 2783 if (ft.nbcon_atomic) 2784 nbcon_atomic_flush_pending(); 2785 if (ft.legacy_direct) { 2786 if (console_trylock()) 2787 console_unlock(); 2788 } 2789 } 2790 return 0; 2791 } 2792 2793 /** 2794 * console_lock - block the console subsystem from printing 2795 * 2796 * Acquires a lock which guarantees that no consoles will 2797 * be in or enter their write() callback. 2798 * 2799 * Can sleep, returns nothing. 2800 */ 2801 void console_lock(void) 2802 { 2803 might_sleep(); 2804 2805 /* On panic, the console_lock must be left to the panic cpu. */ 2806 while (other_cpu_in_panic()) 2807 msleep(1000); 2808 2809 down_console_sem(); 2810 console_locked = 1; 2811 console_may_schedule = 1; 2812 } 2813 EXPORT_SYMBOL(console_lock); 2814 2815 /** 2816 * console_trylock - try to block the console subsystem from printing 2817 * 2818 * Try to acquire a lock which guarantees that no consoles will 2819 * be in or enter their write() callback. 2820 * 2821 * returns 1 on success, and 0 on failure to acquire the lock. 2822 */ 2823 int console_trylock(void) 2824 { 2825 /* On panic, the console_lock must be left to the panic cpu. */ 2826 if (other_cpu_in_panic()) 2827 return 0; 2828 if (down_trylock_console_sem()) 2829 return 0; 2830 console_locked = 1; 2831 console_may_schedule = 0; 2832 return 1; 2833 } 2834 EXPORT_SYMBOL(console_trylock); 2835 2836 int is_console_locked(void) 2837 { 2838 return console_locked; 2839 } 2840 EXPORT_SYMBOL(is_console_locked); 2841 2842 static void __console_unlock(void) 2843 { 2844 console_locked = 0; 2845 up_console_sem(); 2846 } 2847 2848 #ifdef CONFIG_PRINTK 2849 2850 /* 2851 * Prepend the message in @pmsg->pbufs->outbuf. This is achieved by shifting 2852 * the existing message over and inserting the scratchbuf message. 2853 * 2854 * @pmsg is the original printk message. 2855 * @fmt is the printf format of the message which will prepend the existing one. 2856 * 2857 * If there is not enough space in @pmsg->pbufs->outbuf, the existing 2858 * message text will be sufficiently truncated. 2859 * 2860 * If @pmsg->pbufs->outbuf is modified, @pmsg->outbuf_len is updated. 2861 */ 2862 __printf(2, 3) 2863 static void console_prepend_message(struct printk_message *pmsg, const char *fmt, ...) 2864 { 2865 struct printk_buffers *pbufs = pmsg->pbufs; 2866 const size_t scratchbuf_sz = sizeof(pbufs->scratchbuf); 2867 const size_t outbuf_sz = sizeof(pbufs->outbuf); 2868 char *scratchbuf = &pbufs->scratchbuf[0]; 2869 char *outbuf = &pbufs->outbuf[0]; 2870 va_list args; 2871 size_t len; 2872 2873 va_start(args, fmt); 2874 len = vscnprintf(scratchbuf, scratchbuf_sz, fmt, args); 2875 va_end(args); 2876 2877 /* 2878 * Make sure outbuf is sufficiently large before prepending. 2879 * Keep at least the prefix when the message must be truncated. 2880 * It is a rather theoretical problem when someone tries to 2881 * use a minimalist buffer. 2882 */ 2883 if (WARN_ON_ONCE(len + PRINTK_PREFIX_MAX >= outbuf_sz)) 2884 return; 2885 2886 if (pmsg->outbuf_len + len >= outbuf_sz) { 2887 /* Truncate the message, but keep it terminated. */ 2888 pmsg->outbuf_len = outbuf_sz - (len + 1); 2889 outbuf[pmsg->outbuf_len] = 0; 2890 } 2891 2892 memmove(outbuf + len, outbuf, pmsg->outbuf_len + 1); 2893 memcpy(outbuf, scratchbuf, len); 2894 pmsg->outbuf_len += len; 2895 } 2896 2897 /* 2898 * Prepend the message in @pmsg->pbufs->outbuf with a "dropped message". 2899 * @pmsg->outbuf_len is updated appropriately. 2900 * 2901 * @pmsg is the printk message to prepend. 2902 * 2903 * @dropped is the dropped count to report in the dropped message. 2904 */ 2905 void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped) 2906 { 2907 console_prepend_message(pmsg, "** %lu printk messages dropped **\n", dropped); 2908 } 2909 2910 /* 2911 * Prepend the message in @pmsg->pbufs->outbuf with a "replay message". 2912 * @pmsg->outbuf_len is updated appropriately. 2913 * 2914 * @pmsg is the printk message to prepend. 2915 */ 2916 void console_prepend_replay(struct printk_message *pmsg) 2917 { 2918 console_prepend_message(pmsg, "** replaying previous printk message **\n"); 2919 } 2920 2921 /* 2922 * Read and format the specified record (or a later record if the specified 2923 * record is not available). 2924 * 2925 * @pmsg will contain the formatted result. @pmsg->pbufs must point to a 2926 * struct printk_buffers. 2927 * 2928 * @seq is the record to read and format. If it is not available, the next 2929 * valid record is read. 2930 * 2931 * @is_extended specifies if the message should be formatted for extended 2932 * console output. 2933 * 2934 * @may_supress specifies if records may be skipped based on loglevel. 2935 * 2936 * Returns false if no record is available. Otherwise true and all fields 2937 * of @pmsg are valid. (See the documentation of struct printk_message 2938 * for information about the @pmsg fields.) 2939 */ 2940 bool printk_get_next_message(struct printk_message *pmsg, u64 seq, 2941 bool is_extended, bool may_suppress) 2942 { 2943 struct printk_buffers *pbufs = pmsg->pbufs; 2944 const size_t scratchbuf_sz = sizeof(pbufs->scratchbuf); 2945 const size_t outbuf_sz = sizeof(pbufs->outbuf); 2946 char *scratchbuf = &pbufs->scratchbuf[0]; 2947 char *outbuf = &pbufs->outbuf[0]; 2948 struct printk_info info; 2949 struct printk_record r; 2950 size_t len = 0; 2951 2952 /* 2953 * Formatting extended messages requires a separate buffer, so use the 2954 * scratch buffer to read in the ringbuffer text. 2955 * 2956 * Formatting normal messages is done in-place, so read the ringbuffer 2957 * text directly into the output buffer. 2958 */ 2959 if (is_extended) 2960 prb_rec_init_rd(&r, &info, scratchbuf, scratchbuf_sz); 2961 else 2962 prb_rec_init_rd(&r, &info, outbuf, outbuf_sz); 2963 2964 if (!prb_read_valid(prb, seq, &r)) 2965 return false; 2966 2967 pmsg->seq = r.info->seq; 2968 pmsg->dropped = r.info->seq - seq; 2969 2970 /* Skip record that has level above the console loglevel. */ 2971 if (may_suppress && suppress_message_printing(r.info->level)) 2972 goto out; 2973 2974 if (is_extended) { 2975 len = info_print_ext_header(outbuf, outbuf_sz, r.info); 2976 len += msg_print_ext_body(outbuf + len, outbuf_sz - len, 2977 &r.text_buf[0], r.info->text_len, &r.info->dev_info); 2978 } else { 2979 len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time); 2980 } 2981 out: 2982 pmsg->outbuf_len = len; 2983 return true; 2984 } 2985 2986 /* 2987 * Legacy console printing from printk() caller context does not respect 2988 * raw_spinlock/spinlock nesting. For !PREEMPT_RT the lockdep warning is a 2989 * false positive. For PREEMPT_RT the false positive condition does not 2990 * occur. 2991 * 2992 * This map is used to temporarily establish LD_WAIT_SLEEP context for the 2993 * console write() callback when legacy printing to avoid false positive 2994 * lockdep complaints, thus allowing lockdep to continue to function for 2995 * real issues. 2996 */ 2997 #ifdef CONFIG_PREEMPT_RT 2998 static inline void printk_legacy_allow_spinlock_enter(void) { } 2999 static inline void printk_legacy_allow_spinlock_exit(void) { } 3000 #else 3001 static DEFINE_WAIT_OVERRIDE_MAP(printk_legacy_map, LD_WAIT_SLEEP); 3002 3003 static inline void printk_legacy_allow_spinlock_enter(void) 3004 { 3005 lock_map_acquire_try(&printk_legacy_map); 3006 } 3007 3008 static inline void printk_legacy_allow_spinlock_exit(void) 3009 { 3010 lock_map_release(&printk_legacy_map); 3011 } 3012 #endif /* CONFIG_PREEMPT_RT */ 3013 3014 /* 3015 * Used as the printk buffers for non-panic, serialized console printing. 3016 * This is for legacy (!CON_NBCON) as well as all boot (CON_BOOT) consoles. 3017 * Its usage requires the console_lock held. 3018 */ 3019 struct printk_buffers printk_shared_pbufs; 3020 3021 /* 3022 * Print one record for the given console. The record printed is whatever 3023 * record is the next available record for the given console. 3024 * 3025 * @handover will be set to true if a printk waiter has taken over the 3026 * console_lock, in which case the caller is no longer holding both the 3027 * console_lock and the SRCU read lock. Otherwise it is set to false. 3028 * 3029 * @cookie is the cookie from the SRCU read lock. 3030 * 3031 * Returns false if the given console has no next record to print, otherwise 3032 * true. 3033 * 3034 * Requires the console_lock and the SRCU read lock. 3035 */ 3036 static bool console_emit_next_record(struct console *con, bool *handover, int cookie) 3037 { 3038 bool is_extended = console_srcu_read_flags(con) & CON_EXTENDED; 3039 char *outbuf = &printk_shared_pbufs.outbuf[0]; 3040 struct printk_message pmsg = { 3041 .pbufs = &printk_shared_pbufs, 3042 }; 3043 unsigned long flags; 3044 3045 *handover = false; 3046 3047 if (!printk_get_next_message(&pmsg, con->seq, is_extended, true)) 3048 return false; 3049 3050 con->dropped += pmsg.dropped; 3051 3052 /* Skip messages of formatted length 0. */ 3053 if (pmsg.outbuf_len == 0) { 3054 con->seq = pmsg.seq + 1; 3055 goto skip; 3056 } 3057 3058 if (con->dropped && !is_extended) { 3059 console_prepend_dropped(&pmsg, con->dropped); 3060 con->dropped = 0; 3061 } 3062 3063 /* Write everything out to the hardware. */ 3064 3065 if (force_legacy_kthread() && !panic_in_progress()) { 3066 /* 3067 * With forced threading this function is in a task context 3068 * (either legacy kthread or get_init_console_seq()). There 3069 * is no need for concern about printk reentrance, handovers, 3070 * or lockdep complaints. 3071 */ 3072 3073 con->write(con, outbuf, pmsg.outbuf_len); 3074 con->seq = pmsg.seq + 1; 3075 } else { 3076 /* 3077 * While actively printing out messages, if another printk() 3078 * were to occur on another CPU, it may wait for this one to 3079 * finish. This task can not be preempted if there is a 3080 * waiter waiting to take over. 3081 * 3082 * Interrupts are disabled because the hand over to a waiter 3083 * must not be interrupted until the hand over is completed 3084 * (@console_waiter is cleared). 3085 */ 3086 printk_safe_enter_irqsave(flags); 3087 console_lock_spinning_enable(); 3088 3089 /* Do not trace print latency. */ 3090 stop_critical_timings(); 3091 3092 printk_legacy_allow_spinlock_enter(); 3093 con->write(con, outbuf, pmsg.outbuf_len); 3094 printk_legacy_allow_spinlock_exit(); 3095 3096 start_critical_timings(); 3097 3098 con->seq = pmsg.seq + 1; 3099 3100 *handover = console_lock_spinning_disable_and_check(cookie); 3101 printk_safe_exit_irqrestore(flags); 3102 } 3103 skip: 3104 return true; 3105 } 3106 3107 #else 3108 3109 static bool console_emit_next_record(struct console *con, bool *handover, int cookie) 3110 { 3111 *handover = false; 3112 return false; 3113 } 3114 3115 static inline void printk_kthreads_check_locked(void) { } 3116 3117 #endif /* CONFIG_PRINTK */ 3118 3119 /* 3120 * Print out all remaining records to all consoles. 3121 * 3122 * @do_cond_resched is set by the caller. It can be true only in schedulable 3123 * context. 3124 * 3125 * @next_seq is set to the sequence number after the last available record. 3126 * The value is valid only when this function returns true. It means that all 3127 * usable consoles are completely flushed. 3128 * 3129 * @handover will be set to true if a printk waiter has taken over the 3130 * console_lock, in which case the caller is no longer holding the 3131 * console_lock. Otherwise it is set to false. 3132 * 3133 * Returns true when there was at least one usable console and all messages 3134 * were flushed to all usable consoles. A returned false informs the caller 3135 * that everything was not flushed (either there were no usable consoles or 3136 * another context has taken over printing or it is a panic situation and this 3137 * is not the panic CPU). Regardless the reason, the caller should assume it 3138 * is not useful to immediately try again. 3139 * 3140 * Requires the console_lock. 3141 */ 3142 static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handover) 3143 { 3144 struct console_flush_type ft; 3145 bool any_usable = false; 3146 struct console *con; 3147 bool any_progress; 3148 int cookie; 3149 3150 *next_seq = 0; 3151 *handover = false; 3152 3153 do { 3154 any_progress = false; 3155 3156 printk_get_console_flush_type(&ft); 3157 3158 cookie = console_srcu_read_lock(); 3159 for_each_console_srcu(con) { 3160 short flags = console_srcu_read_flags(con); 3161 u64 printk_seq; 3162 bool progress; 3163 3164 /* 3165 * console_flush_all() is only responsible for nbcon 3166 * consoles when the nbcon consoles cannot print via 3167 * their atomic or threaded flushing. 3168 */ 3169 if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload)) 3170 continue; 3171 3172 if (!console_is_usable(con, flags, !do_cond_resched)) 3173 continue; 3174 any_usable = true; 3175 3176 if (flags & CON_NBCON) { 3177 progress = nbcon_legacy_emit_next_record(con, handover, cookie, 3178 !do_cond_resched); 3179 printk_seq = nbcon_seq_read(con); 3180 } else { 3181 progress = console_emit_next_record(con, handover, cookie); 3182 printk_seq = con->seq; 3183 } 3184 3185 /* 3186 * If a handover has occurred, the SRCU read lock 3187 * is already released. 3188 */ 3189 if (*handover) 3190 return false; 3191 3192 /* Track the next of the highest seq flushed. */ 3193 if (printk_seq > *next_seq) 3194 *next_seq = printk_seq; 3195 3196 if (!progress) 3197 continue; 3198 any_progress = true; 3199 3200 /* Allow panic_cpu to take over the consoles safely. */ 3201 if (other_cpu_in_panic()) 3202 goto abandon; 3203 3204 if (do_cond_resched) 3205 cond_resched(); 3206 } 3207 console_srcu_read_unlock(cookie); 3208 } while (any_progress); 3209 3210 return any_usable; 3211 3212 abandon: 3213 console_srcu_read_unlock(cookie); 3214 return false; 3215 } 3216 3217 static void __console_flush_and_unlock(void) 3218 { 3219 bool do_cond_resched; 3220 bool handover; 3221 bool flushed; 3222 u64 next_seq; 3223 3224 /* 3225 * Console drivers are called with interrupts disabled, so 3226 * @console_may_schedule should be cleared before; however, we may 3227 * end up dumping a lot of lines, for example, if called from 3228 * console registration path, and should invoke cond_resched() 3229 * between lines if allowable. Not doing so can cause a very long 3230 * scheduling stall on a slow console leading to RCU stall and 3231 * softlockup warnings which exacerbate the issue with more 3232 * messages practically incapacitating the system. Therefore, create 3233 * a local to use for the printing loop. 3234 */ 3235 do_cond_resched = console_may_schedule; 3236 3237 do { 3238 console_may_schedule = 0; 3239 3240 flushed = console_flush_all(do_cond_resched, &next_seq, &handover); 3241 if (!handover) 3242 __console_unlock(); 3243 3244 /* 3245 * Abort if there was a failure to flush all messages to all 3246 * usable consoles. Either it is not possible to flush (in 3247 * which case it would be an infinite loop of retrying) or 3248 * another context has taken over printing. 3249 */ 3250 if (!flushed) 3251 break; 3252 3253 /* 3254 * Some context may have added new records after 3255 * console_flush_all() but before unlocking the console. 3256 * Re-check if there is a new record to flush. If the trylock 3257 * fails, another context is already handling the printing. 3258 */ 3259 } while (prb_read_valid(prb, next_seq, NULL) && console_trylock()); 3260 } 3261 3262 /** 3263 * console_unlock - unblock the legacy console subsystem from printing 3264 * 3265 * Releases the console_lock which the caller holds to block printing of 3266 * the legacy console subsystem. 3267 * 3268 * While the console_lock was held, console output may have been buffered 3269 * by printk(). If this is the case, console_unlock() emits the output on 3270 * legacy consoles prior to releasing the lock. 3271 * 3272 * console_unlock(); may be called from any context. 3273 */ 3274 void console_unlock(void) 3275 { 3276 struct console_flush_type ft; 3277 3278 printk_get_console_flush_type(&ft); 3279 if (ft.legacy_direct) 3280 __console_flush_and_unlock(); 3281 else 3282 __console_unlock(); 3283 } 3284 EXPORT_SYMBOL(console_unlock); 3285 3286 /** 3287 * console_conditional_schedule - yield the CPU if required 3288 * 3289 * If the console code is currently allowed to sleep, and 3290 * if this CPU should yield the CPU to another task, do 3291 * so here. 3292 * 3293 * Must be called within console_lock();. 3294 */ 3295 void __sched console_conditional_schedule(void) 3296 { 3297 if (console_may_schedule) 3298 cond_resched(); 3299 } 3300 EXPORT_SYMBOL(console_conditional_schedule); 3301 3302 void console_unblank(void) 3303 { 3304 bool found_unblank = false; 3305 struct console *c; 3306 int cookie; 3307 3308 /* 3309 * First check if there are any consoles implementing the unblank() 3310 * callback. If not, there is no reason to continue and take the 3311 * console lock, which in particular can be dangerous if 3312 * @oops_in_progress is set. 3313 */ 3314 cookie = console_srcu_read_lock(); 3315 for_each_console_srcu(c) { 3316 if ((console_srcu_read_flags(c) & CON_ENABLED) && c->unblank) { 3317 found_unblank = true; 3318 break; 3319 } 3320 } 3321 console_srcu_read_unlock(cookie); 3322 if (!found_unblank) 3323 return; 3324 3325 /* 3326 * Stop console printing because the unblank() callback may 3327 * assume the console is not within its write() callback. 3328 * 3329 * If @oops_in_progress is set, this may be an atomic context. 3330 * In that case, attempt a trylock as best-effort. 3331 */ 3332 if (oops_in_progress) { 3333 /* Semaphores are not NMI-safe. */ 3334 if (in_nmi()) 3335 return; 3336 3337 /* 3338 * Attempting to trylock the console lock can deadlock 3339 * if another CPU was stopped while modifying the 3340 * semaphore. "Hope and pray" that this is not the 3341 * current situation. 3342 */ 3343 if (down_trylock_console_sem() != 0) 3344 return; 3345 } else 3346 console_lock(); 3347 3348 console_locked = 1; 3349 console_may_schedule = 0; 3350 3351 cookie = console_srcu_read_lock(); 3352 for_each_console_srcu(c) { 3353 if ((console_srcu_read_flags(c) & CON_ENABLED) && c->unblank) 3354 c->unblank(); 3355 } 3356 console_srcu_read_unlock(cookie); 3357 3358 console_unlock(); 3359 3360 if (!oops_in_progress) 3361 pr_flush(1000, true); 3362 } 3363 3364 /* 3365 * Rewind all consoles to the oldest available record. 3366 * 3367 * IMPORTANT: The function is safe only when called under 3368 * console_lock(). It is not enforced because 3369 * it is used as a best effort in panic(). 3370 */ 3371 static void __console_rewind_all(void) 3372 { 3373 struct console *c; 3374 short flags; 3375 int cookie; 3376 u64 seq; 3377 3378 seq = prb_first_valid_seq(prb); 3379 3380 cookie = console_srcu_read_lock(); 3381 for_each_console_srcu(c) { 3382 flags = console_srcu_read_flags(c); 3383 3384 if (flags & CON_NBCON) { 3385 nbcon_seq_force(c, seq); 3386 } else { 3387 /* 3388 * This assignment is safe only when called under 3389 * console_lock(). On panic, legacy consoles are 3390 * only best effort. 3391 */ 3392 c->seq = seq; 3393 } 3394 } 3395 console_srcu_read_unlock(cookie); 3396 } 3397 3398 /** 3399 * console_flush_on_panic - flush console content on panic 3400 * @mode: flush all messages in buffer or just the pending ones 3401 * 3402 * Immediately output all pending messages no matter what. 3403 */ 3404 void console_flush_on_panic(enum con_flush_mode mode) 3405 { 3406 struct console_flush_type ft; 3407 bool handover; 3408 u64 next_seq; 3409 3410 /* 3411 * Ignore the console lock and flush out the messages. Attempting a 3412 * trylock would not be useful because: 3413 * 3414 * - if it is contended, it must be ignored anyway 3415 * - console_lock() and console_trylock() block and fail 3416 * respectively in panic for non-panic CPUs 3417 * - semaphores are not NMI-safe 3418 */ 3419 3420 /* 3421 * If another context is holding the console lock, 3422 * @console_may_schedule might be set. Clear it so that 3423 * this context does not call cond_resched() while flushing. 3424 */ 3425 console_may_schedule = 0; 3426 3427 if (mode == CONSOLE_REPLAY_ALL) 3428 __console_rewind_all(); 3429 3430 printk_get_console_flush_type(&ft); 3431 if (ft.nbcon_atomic) 3432 nbcon_atomic_flush_pending(); 3433 3434 /* Flush legacy consoles once allowed, even when dangerous. */ 3435 if (legacy_allow_panic_sync) 3436 console_flush_all(false, &next_seq, &handover); 3437 } 3438 3439 /* 3440 * Return the console tty driver structure and its associated index 3441 */ 3442 struct tty_driver *console_device(int *index) 3443 { 3444 struct console *c; 3445 struct tty_driver *driver = NULL; 3446 int cookie; 3447 3448 /* 3449 * Take console_lock to serialize device() callback with 3450 * other console operations. For example, fg_console is 3451 * modified under console_lock when switching vt. 3452 */ 3453 console_lock(); 3454 3455 cookie = console_srcu_read_lock(); 3456 for_each_console_srcu(c) { 3457 if (!c->device) 3458 continue; 3459 driver = c->device(c, index); 3460 if (driver) 3461 break; 3462 } 3463 console_srcu_read_unlock(cookie); 3464 3465 console_unlock(); 3466 return driver; 3467 } 3468 3469 /* 3470 * Prevent further output on the passed console device so that (for example) 3471 * serial drivers can disable console output before suspending a port, and can 3472 * re-enable output afterwards. 3473 */ 3474 void console_stop(struct console *console) 3475 { 3476 __pr_flush(console, 1000, true); 3477 console_list_lock(); 3478 console_srcu_write_flags(console, console->flags & ~CON_ENABLED); 3479 console_list_unlock(); 3480 3481 /* 3482 * Ensure that all SRCU list walks have completed. All contexts must 3483 * be able to see that this console is disabled so that (for example) 3484 * the caller can suspend the port without risk of another context 3485 * using the port. 3486 */ 3487 synchronize_srcu(&console_srcu); 3488 } 3489 EXPORT_SYMBOL(console_stop); 3490 3491 void console_start(struct console *console) 3492 { 3493 struct console_flush_type ft; 3494 bool is_nbcon; 3495 3496 console_list_lock(); 3497 console_srcu_write_flags(console, console->flags | CON_ENABLED); 3498 is_nbcon = console->flags & CON_NBCON; 3499 console_list_unlock(); 3500 3501 /* 3502 * Ensure that all SRCU list walks have completed. The related 3503 * printing context must be able to see it is enabled so that 3504 * it is guaranteed to wake up and resume printing. 3505 */ 3506 synchronize_srcu(&console_srcu); 3507 3508 printk_get_console_flush_type(&ft); 3509 if (is_nbcon && ft.nbcon_offload) 3510 nbcon_kthread_wake(console); 3511 else if (ft.legacy_offload) 3512 defer_console_output(); 3513 3514 __pr_flush(console, 1000, true); 3515 } 3516 EXPORT_SYMBOL(console_start); 3517 3518 #ifdef CONFIG_PRINTK 3519 static int unregister_console_locked(struct console *console); 3520 3521 /* True when system boot is far enough to create printer threads. */ 3522 static bool printk_kthreads_ready __ro_after_init; 3523 3524 static struct task_struct *printk_legacy_kthread; 3525 3526 static bool legacy_kthread_should_wakeup(void) 3527 { 3528 struct console_flush_type ft; 3529 struct console *con; 3530 bool ret = false; 3531 int cookie; 3532 3533 if (kthread_should_stop()) 3534 return true; 3535 3536 printk_get_console_flush_type(&ft); 3537 3538 cookie = console_srcu_read_lock(); 3539 for_each_console_srcu(con) { 3540 short flags = console_srcu_read_flags(con); 3541 u64 printk_seq; 3542 3543 /* 3544 * The legacy printer thread is only responsible for nbcon 3545 * consoles when the nbcon consoles cannot print via their 3546 * atomic or threaded flushing. 3547 */ 3548 if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload)) 3549 continue; 3550 3551 if (!console_is_usable(con, flags, false)) 3552 continue; 3553 3554 if (flags & CON_NBCON) { 3555 printk_seq = nbcon_seq_read(con); 3556 } else { 3557 /* 3558 * It is safe to read @seq because only this 3559 * thread context updates @seq. 3560 */ 3561 printk_seq = con->seq; 3562 } 3563 3564 if (prb_read_valid(prb, printk_seq, NULL)) { 3565 ret = true; 3566 break; 3567 } 3568 } 3569 console_srcu_read_unlock(cookie); 3570 3571 return ret; 3572 } 3573 3574 static int legacy_kthread_func(void *unused) 3575 { 3576 for (;;) { 3577 wait_event_interruptible(legacy_wait, legacy_kthread_should_wakeup()); 3578 3579 if (kthread_should_stop()) 3580 break; 3581 3582 console_lock(); 3583 __console_flush_and_unlock(); 3584 } 3585 3586 return 0; 3587 } 3588 3589 static bool legacy_kthread_create(void) 3590 { 3591 struct task_struct *kt; 3592 3593 lockdep_assert_console_list_lock_held(); 3594 3595 kt = kthread_run(legacy_kthread_func, NULL, "pr/legacy"); 3596 if (WARN_ON(IS_ERR(kt))) { 3597 pr_err("failed to start legacy printing thread\n"); 3598 return false; 3599 } 3600 3601 printk_legacy_kthread = kt; 3602 3603 /* 3604 * It is important that console printing threads are scheduled 3605 * shortly after a printk call and with generous runtime budgets. 3606 */ 3607 sched_set_normal(printk_legacy_kthread, -20); 3608 3609 return true; 3610 } 3611 3612 /** 3613 * printk_kthreads_shutdown - shutdown all threaded printers 3614 * 3615 * On system shutdown all threaded printers are stopped. This allows printk 3616 * to transition back to atomic printing, thus providing a robust mechanism 3617 * for the final shutdown/reboot messages to be output. 3618 */ 3619 static void printk_kthreads_shutdown(void) 3620 { 3621 struct console *con; 3622 3623 console_list_lock(); 3624 if (printk_kthreads_running) { 3625 printk_kthreads_running = false; 3626 3627 for_each_console(con) { 3628 if (con->flags & CON_NBCON) 3629 nbcon_kthread_stop(con); 3630 } 3631 3632 /* 3633 * The threads may have been stopped while printing a 3634 * backlog. Flush any records left over. 3635 */ 3636 nbcon_atomic_flush_pending(); 3637 } 3638 console_list_unlock(); 3639 } 3640 3641 static struct syscore_ops printk_syscore_ops = { 3642 .shutdown = printk_kthreads_shutdown, 3643 }; 3644 3645 /* 3646 * If appropriate, start nbcon kthreads and set @printk_kthreads_running. 3647 * If any kthreads fail to start, those consoles are unregistered. 3648 * 3649 * Must be called under console_list_lock(). 3650 */ 3651 static void printk_kthreads_check_locked(void) 3652 { 3653 struct hlist_node *tmp; 3654 struct console *con; 3655 3656 lockdep_assert_console_list_lock_held(); 3657 3658 if (!printk_kthreads_ready) 3659 return; 3660 3661 if (have_legacy_console || have_boot_console) { 3662 if (!printk_legacy_kthread && 3663 force_legacy_kthread() && 3664 !legacy_kthread_create()) { 3665 /* 3666 * All legacy consoles must be unregistered. If there 3667 * are any nbcon consoles, they will set up their own 3668 * kthread. 3669 */ 3670 hlist_for_each_entry_safe(con, tmp, &console_list, node) { 3671 if (con->flags & CON_NBCON) 3672 continue; 3673 3674 unregister_console_locked(con); 3675 } 3676 } 3677 } else if (printk_legacy_kthread) { 3678 kthread_stop(printk_legacy_kthread); 3679 printk_legacy_kthread = NULL; 3680 } 3681 3682 /* 3683 * Printer threads cannot be started as long as any boot console is 3684 * registered because there is no way to synchronize the hardware 3685 * registers between boot console code and regular console code. 3686 * It can only be known that there will be no new boot consoles when 3687 * an nbcon console is registered. 3688 */ 3689 if (have_boot_console || !have_nbcon_console) { 3690 /* Clear flag in case all nbcon consoles unregistered. */ 3691 printk_kthreads_running = false; 3692 return; 3693 } 3694 3695 if (printk_kthreads_running) 3696 return; 3697 3698 hlist_for_each_entry_safe(con, tmp, &console_list, node) { 3699 if (!(con->flags & CON_NBCON)) 3700 continue; 3701 3702 if (!nbcon_kthread_create(con)) 3703 unregister_console_locked(con); 3704 } 3705 3706 printk_kthreads_running = true; 3707 } 3708 3709 static int __init printk_set_kthreads_ready(void) 3710 { 3711 register_syscore_ops(&printk_syscore_ops); 3712 3713 console_list_lock(); 3714 printk_kthreads_ready = true; 3715 printk_kthreads_check_locked(); 3716 console_list_unlock(); 3717 3718 return 0; 3719 } 3720 early_initcall(printk_set_kthreads_ready); 3721 #endif /* CONFIG_PRINTK */ 3722 3723 static int __read_mostly keep_bootcon; 3724 3725 static int __init keep_bootcon_setup(char *str) 3726 { 3727 keep_bootcon = 1; 3728 pr_info("debug: skip boot console de-registration.\n"); 3729 3730 return 0; 3731 } 3732 3733 early_param("keep_bootcon", keep_bootcon_setup); 3734 3735 static int console_call_setup(struct console *newcon, char *options) 3736 { 3737 int err; 3738 3739 if (!newcon->setup) 3740 return 0; 3741 3742 /* Synchronize with possible boot console. */ 3743 console_lock(); 3744 err = newcon->setup(newcon, options); 3745 console_unlock(); 3746 3747 return err; 3748 } 3749 3750 /* 3751 * This is called by register_console() to try to match 3752 * the newly registered console with any of the ones selected 3753 * by either the command line or add_preferred_console() and 3754 * setup/enable it. 3755 * 3756 * Care need to be taken with consoles that are statically 3757 * enabled such as netconsole 3758 */ 3759 static int try_enable_preferred_console(struct console *newcon, 3760 bool user_specified) 3761 { 3762 struct console_cmdline *c; 3763 int i, err; 3764 3765 for (i = 0, c = console_cmdline; 3766 i < MAX_CMDLINECONSOLES && (c->name[0] || c->devname[0]); 3767 i++, c++) { 3768 /* Console not yet initialized? */ 3769 if (!c->name[0]) 3770 continue; 3771 if (c->user_specified != user_specified) 3772 continue; 3773 if (!newcon->match || 3774 newcon->match(newcon, c->name, c->index, c->options) != 0) { 3775 /* default matching */ 3776 BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name)); 3777 if (strcmp(c->name, newcon->name) != 0) 3778 continue; 3779 if (newcon->index >= 0 && 3780 newcon->index != c->index) 3781 continue; 3782 if (newcon->index < 0) 3783 newcon->index = c->index; 3784 3785 if (_braille_register_console(newcon, c)) 3786 return 0; 3787 3788 err = console_call_setup(newcon, c->options); 3789 if (err) 3790 return err; 3791 } 3792 newcon->flags |= CON_ENABLED; 3793 if (i == preferred_console) 3794 newcon->flags |= CON_CONSDEV; 3795 return 0; 3796 } 3797 3798 /* 3799 * Some consoles, such as pstore and netconsole, can be enabled even 3800 * without matching. Accept the pre-enabled consoles only when match() 3801 * and setup() had a chance to be called. 3802 */ 3803 if (newcon->flags & CON_ENABLED && c->user_specified == user_specified) 3804 return 0; 3805 3806 return -ENOENT; 3807 } 3808 3809 /* Try to enable the console unconditionally */ 3810 static void try_enable_default_console(struct console *newcon) 3811 { 3812 if (newcon->index < 0) 3813 newcon->index = 0; 3814 3815 if (console_call_setup(newcon, NULL) != 0) 3816 return; 3817 3818 newcon->flags |= CON_ENABLED; 3819 3820 if (newcon->device) 3821 newcon->flags |= CON_CONSDEV; 3822 } 3823 3824 /* Return the starting sequence number for a newly registered console. */ 3825 static u64 get_init_console_seq(struct console *newcon, bool bootcon_registered) 3826 { 3827 struct console *con; 3828 bool handover; 3829 u64 init_seq; 3830 3831 if (newcon->flags & (CON_PRINTBUFFER | CON_BOOT)) { 3832 /* Get a consistent copy of @syslog_seq. */ 3833 mutex_lock(&syslog_lock); 3834 init_seq = syslog_seq; 3835 mutex_unlock(&syslog_lock); 3836 } else { 3837 /* Begin with next message added to ringbuffer. */ 3838 init_seq = prb_next_seq(prb); 3839 3840 /* 3841 * If any enabled boot consoles are due to be unregistered 3842 * shortly, some may not be caught up and may be the same 3843 * device as @newcon. Since it is not known which boot console 3844 * is the same device, flush all consoles and, if necessary, 3845 * start with the message of the enabled boot console that is 3846 * the furthest behind. 3847 */ 3848 if (bootcon_registered && !keep_bootcon) { 3849 /* 3850 * Hold the console_lock to stop console printing and 3851 * guarantee safe access to console->seq. 3852 */ 3853 console_lock(); 3854 3855 /* 3856 * Flush all consoles and set the console to start at 3857 * the next unprinted sequence number. 3858 */ 3859 if (!console_flush_all(true, &init_seq, &handover)) { 3860 /* 3861 * Flushing failed. Just choose the lowest 3862 * sequence of the enabled boot consoles. 3863 */ 3864 3865 /* 3866 * If there was a handover, this context no 3867 * longer holds the console_lock. 3868 */ 3869 if (handover) 3870 console_lock(); 3871 3872 init_seq = prb_next_seq(prb); 3873 for_each_console(con) { 3874 u64 seq; 3875 3876 if (!(con->flags & CON_BOOT) || 3877 !(con->flags & CON_ENABLED)) { 3878 continue; 3879 } 3880 3881 if (con->flags & CON_NBCON) 3882 seq = nbcon_seq_read(con); 3883 else 3884 seq = con->seq; 3885 3886 if (seq < init_seq) 3887 init_seq = seq; 3888 } 3889 } 3890 3891 console_unlock(); 3892 } 3893 } 3894 3895 return init_seq; 3896 } 3897 3898 #define console_first() \ 3899 hlist_entry(console_list.first, struct console, node) 3900 3901 static int unregister_console_locked(struct console *console); 3902 3903 /* 3904 * The console driver calls this routine during kernel initialization 3905 * to register the console printing procedure with printk() and to 3906 * print any messages that were printed by the kernel before the 3907 * console driver was initialized. 3908 * 3909 * This can happen pretty early during the boot process (because of 3910 * early_printk) - sometimes before setup_arch() completes - be careful 3911 * of what kernel features are used - they may not be initialised yet. 3912 * 3913 * There are two types of consoles - bootconsoles (early_printk) and 3914 * "real" consoles (everything which is not a bootconsole) which are 3915 * handled differently. 3916 * - Any number of bootconsoles can be registered at any time. 3917 * - As soon as a "real" console is registered, all bootconsoles 3918 * will be unregistered automatically. 3919 * - Once a "real" console is registered, any attempt to register a 3920 * bootconsoles will be rejected 3921 */ 3922 void register_console(struct console *newcon) 3923 { 3924 bool use_device_lock = (newcon->flags & CON_NBCON) && newcon->write_atomic; 3925 bool bootcon_registered = false; 3926 bool realcon_registered = false; 3927 struct console *con; 3928 unsigned long flags; 3929 u64 init_seq; 3930 int err; 3931 3932 console_list_lock(); 3933 3934 for_each_console(con) { 3935 if (WARN(con == newcon, "console '%s%d' already registered\n", 3936 con->name, con->index)) { 3937 goto unlock; 3938 } 3939 3940 if (con->flags & CON_BOOT) 3941 bootcon_registered = true; 3942 else 3943 realcon_registered = true; 3944 } 3945 3946 /* Do not register boot consoles when there already is a real one. */ 3947 if ((newcon->flags & CON_BOOT) && realcon_registered) { 3948 pr_info("Too late to register bootconsole %s%d\n", 3949 newcon->name, newcon->index); 3950 goto unlock; 3951 } 3952 3953 if (newcon->flags & CON_NBCON) { 3954 /* 3955 * Ensure the nbcon console buffers can be allocated 3956 * before modifying any global data. 3957 */ 3958 if (!nbcon_alloc(newcon)) 3959 goto unlock; 3960 } 3961 3962 /* 3963 * See if we want to enable this console driver by default. 3964 * 3965 * Nope when a console is preferred by the command line, device 3966 * tree, or SPCR. 3967 * 3968 * The first real console with tty binding (driver) wins. More 3969 * consoles might get enabled before the right one is found. 3970 * 3971 * Note that a console with tty binding will have CON_CONSDEV 3972 * flag set and will be first in the list. 3973 */ 3974 if (preferred_console < 0) { 3975 if (hlist_empty(&console_list) || !console_first()->device || 3976 console_first()->flags & CON_BOOT) { 3977 try_enable_default_console(newcon); 3978 } 3979 } 3980 3981 /* See if this console matches one we selected on the command line */ 3982 err = try_enable_preferred_console(newcon, true); 3983 3984 /* If not, try to match against the platform default(s) */ 3985 if (err == -ENOENT) 3986 err = try_enable_preferred_console(newcon, false); 3987 3988 /* printk() messages are not printed to the Braille console. */ 3989 if (err || newcon->flags & CON_BRL) { 3990 if (newcon->flags & CON_NBCON) 3991 nbcon_free(newcon); 3992 goto unlock; 3993 } 3994 3995 /* 3996 * If we have a bootconsole, and are switching to a real console, 3997 * don't print everything out again, since when the boot console, and 3998 * the real console are the same physical device, it's annoying to 3999 * see the beginning boot messages twice 4000 */ 4001 if (bootcon_registered && 4002 ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV)) { 4003 newcon->flags &= ~CON_PRINTBUFFER; 4004 } 4005 4006 newcon->dropped = 0; 4007 init_seq = get_init_console_seq(newcon, bootcon_registered); 4008 4009 if (newcon->flags & CON_NBCON) { 4010 have_nbcon_console = true; 4011 nbcon_seq_force(newcon, init_seq); 4012 } else { 4013 have_legacy_console = true; 4014 newcon->seq = init_seq; 4015 } 4016 4017 if (newcon->flags & CON_BOOT) 4018 have_boot_console = true; 4019 4020 /* 4021 * If another context is actively using the hardware of this new 4022 * console, it will not be aware of the nbcon synchronization. This 4023 * is a risk that two contexts could access the hardware 4024 * simultaneously if this new console is used for atomic printing 4025 * and the other context is still using the hardware. 4026 * 4027 * Use the driver synchronization to ensure that the hardware is not 4028 * in use while this new console transitions to being registered. 4029 */ 4030 if (use_device_lock) 4031 newcon->device_lock(newcon, &flags); 4032 4033 /* 4034 * Put this console in the list - keep the 4035 * preferred driver at the head of the list. 4036 */ 4037 if (hlist_empty(&console_list)) { 4038 /* Ensure CON_CONSDEV is always set for the head. */ 4039 newcon->flags |= CON_CONSDEV; 4040 hlist_add_head_rcu(&newcon->node, &console_list); 4041 4042 } else if (newcon->flags & CON_CONSDEV) { 4043 /* Only the new head can have CON_CONSDEV set. */ 4044 console_srcu_write_flags(console_first(), console_first()->flags & ~CON_CONSDEV); 4045 hlist_add_head_rcu(&newcon->node, &console_list); 4046 4047 } else { 4048 hlist_add_behind_rcu(&newcon->node, console_list.first); 4049 } 4050 4051 /* 4052 * No need to synchronize SRCU here! The caller does not rely 4053 * on all contexts being able to see the new console before 4054 * register_console() completes. 4055 */ 4056 4057 /* This new console is now registered. */ 4058 if (use_device_lock) 4059 newcon->device_unlock(newcon, flags); 4060 4061 console_sysfs_notify(); 4062 4063 /* 4064 * By unregistering the bootconsoles after we enable the real console 4065 * we get the "console xxx enabled" message on all the consoles - 4066 * boot consoles, real consoles, etc - this is to ensure that end 4067 * users know there might be something in the kernel's log buffer that 4068 * went to the bootconsole (that they do not see on the real console) 4069 */ 4070 con_printk(KERN_INFO, newcon, "enabled\n"); 4071 if (bootcon_registered && 4072 ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV) && 4073 !keep_bootcon) { 4074 struct hlist_node *tmp; 4075 4076 hlist_for_each_entry_safe(con, tmp, &console_list, node) { 4077 if (con->flags & CON_BOOT) 4078 unregister_console_locked(con); 4079 } 4080 } 4081 4082 /* Changed console list, may require printer threads to start/stop. */ 4083 printk_kthreads_check_locked(); 4084 unlock: 4085 console_list_unlock(); 4086 } 4087 EXPORT_SYMBOL(register_console); 4088 4089 /* Must be called under console_list_lock(). */ 4090 static int unregister_console_locked(struct console *console) 4091 { 4092 bool use_device_lock = (console->flags & CON_NBCON) && console->write_atomic; 4093 bool found_legacy_con = false; 4094 bool found_nbcon_con = false; 4095 bool found_boot_con = false; 4096 unsigned long flags; 4097 struct console *c; 4098 int res; 4099 4100 lockdep_assert_console_list_lock_held(); 4101 4102 con_printk(KERN_INFO, console, "disabled\n"); 4103 4104 res = _braille_unregister_console(console); 4105 if (res < 0) 4106 return res; 4107 if (res > 0) 4108 return 0; 4109 4110 if (!console_is_registered_locked(console)) 4111 res = -ENODEV; 4112 else if (console_is_usable(console, console->flags, true)) 4113 __pr_flush(console, 1000, true); 4114 4115 /* Disable it unconditionally */ 4116 console_srcu_write_flags(console, console->flags & ~CON_ENABLED); 4117 4118 if (res < 0) 4119 return res; 4120 4121 /* 4122 * Use the driver synchronization to ensure that the hardware is not 4123 * in use while this console transitions to being unregistered. 4124 */ 4125 if (use_device_lock) 4126 console->device_lock(console, &flags); 4127 4128 hlist_del_init_rcu(&console->node); 4129 4130 if (use_device_lock) 4131 console->device_unlock(console, flags); 4132 4133 /* 4134 * <HISTORICAL> 4135 * If this isn't the last console and it has CON_CONSDEV set, we 4136 * need to set it on the next preferred console. 4137 * </HISTORICAL> 4138 * 4139 * The above makes no sense as there is no guarantee that the next 4140 * console has any device attached. Oh well.... 4141 */ 4142 if (!hlist_empty(&console_list) && console->flags & CON_CONSDEV) 4143 console_srcu_write_flags(console_first(), console_first()->flags | CON_CONSDEV); 4144 4145 /* 4146 * Ensure that all SRCU list walks have completed. All contexts 4147 * must not be able to see this console in the list so that any 4148 * exit/cleanup routines can be performed safely. 4149 */ 4150 synchronize_srcu(&console_srcu); 4151 4152 if (console->flags & CON_NBCON) 4153 nbcon_free(console); 4154 4155 console_sysfs_notify(); 4156 4157 if (console->exit) 4158 res = console->exit(console); 4159 4160 /* 4161 * With this console gone, the global flags tracking registered 4162 * console types may have changed. Update them. 4163 */ 4164 for_each_console(c) { 4165 if (c->flags & CON_BOOT) 4166 found_boot_con = true; 4167 4168 if (c->flags & CON_NBCON) 4169 found_nbcon_con = true; 4170 else 4171 found_legacy_con = true; 4172 } 4173 if (!found_boot_con) 4174 have_boot_console = found_boot_con; 4175 if (!found_legacy_con) 4176 have_legacy_console = found_legacy_con; 4177 if (!found_nbcon_con) 4178 have_nbcon_console = found_nbcon_con; 4179 4180 /* Changed console list, may require printer threads to start/stop. */ 4181 printk_kthreads_check_locked(); 4182 4183 return res; 4184 } 4185 4186 int unregister_console(struct console *console) 4187 { 4188 int res; 4189 4190 console_list_lock(); 4191 res = unregister_console_locked(console); 4192 console_list_unlock(); 4193 return res; 4194 } 4195 EXPORT_SYMBOL(unregister_console); 4196 4197 /** 4198 * console_force_preferred_locked - force a registered console preferred 4199 * @con: The registered console to force preferred. 4200 * 4201 * Must be called under console_list_lock(). 4202 */ 4203 void console_force_preferred_locked(struct console *con) 4204 { 4205 struct console *cur_pref_con; 4206 4207 if (!console_is_registered_locked(con)) 4208 return; 4209 4210 cur_pref_con = console_first(); 4211 4212 /* Already preferred? */ 4213 if (cur_pref_con == con) 4214 return; 4215 4216 /* 4217 * Delete, but do not re-initialize the entry. This allows the console 4218 * to continue to appear registered (via any hlist_unhashed_lockless() 4219 * checks), even though it was briefly removed from the console list. 4220 */ 4221 hlist_del_rcu(&con->node); 4222 4223 /* 4224 * Ensure that all SRCU list walks have completed so that the console 4225 * can be added to the beginning of the console list and its forward 4226 * list pointer can be re-initialized. 4227 */ 4228 synchronize_srcu(&console_srcu); 4229 4230 con->flags |= CON_CONSDEV; 4231 WARN_ON(!con->device); 4232 4233 /* Only the new head can have CON_CONSDEV set. */ 4234 console_srcu_write_flags(cur_pref_con, cur_pref_con->flags & ~CON_CONSDEV); 4235 hlist_add_head_rcu(&con->node, &console_list); 4236 } 4237 EXPORT_SYMBOL(console_force_preferred_locked); 4238 4239 /* 4240 * Initialize the console device. This is called *early*, so 4241 * we can't necessarily depend on lots of kernel help here. 4242 * Just do some early initializations, and do the complex setup 4243 * later. 4244 */ 4245 void __init console_init(void) 4246 { 4247 int ret; 4248 initcall_t call; 4249 initcall_entry_t *ce; 4250 4251 /* Setup the default TTY line discipline. */ 4252 n_tty_init(); 4253 4254 /* 4255 * set up the console device so that later boot sequences can 4256 * inform about problems etc.. 4257 */ 4258 ce = __con_initcall_start; 4259 trace_initcall_level("console"); 4260 while (ce < __con_initcall_end) { 4261 call = initcall_from_entry(ce); 4262 trace_initcall_start(call); 4263 ret = call(); 4264 trace_initcall_finish(call, ret); 4265 ce++; 4266 } 4267 } 4268 4269 /* 4270 * Some boot consoles access data that is in the init section and which will 4271 * be discarded after the initcalls have been run. To make sure that no code 4272 * will access this data, unregister the boot consoles in a late initcall. 4273 * 4274 * If for some reason, such as deferred probe or the driver being a loadable 4275 * module, the real console hasn't registered yet at this point, there will 4276 * be a brief interval in which no messages are logged to the console, which 4277 * makes it difficult to diagnose problems that occur during this time. 4278 * 4279 * To mitigate this problem somewhat, only unregister consoles whose memory 4280 * intersects with the init section. Note that all other boot consoles will 4281 * get unregistered when the real preferred console is registered. 4282 */ 4283 static int __init printk_late_init(void) 4284 { 4285 struct hlist_node *tmp; 4286 struct console *con; 4287 int ret; 4288 4289 console_list_lock(); 4290 hlist_for_each_entry_safe(con, tmp, &console_list, node) { 4291 if (!(con->flags & CON_BOOT)) 4292 continue; 4293 4294 /* Check addresses that might be used for enabled consoles. */ 4295 if (init_section_intersects(con, sizeof(*con)) || 4296 init_section_contains(con->write, 0) || 4297 init_section_contains(con->read, 0) || 4298 init_section_contains(con->device, 0) || 4299 init_section_contains(con->unblank, 0) || 4300 init_section_contains(con->data, 0)) { 4301 /* 4302 * Please, consider moving the reported consoles out 4303 * of the init section. 4304 */ 4305 pr_warn("bootconsole [%s%d] uses init memory and must be disabled even before the real one is ready\n", 4306 con->name, con->index); 4307 unregister_console_locked(con); 4308 } 4309 } 4310 console_list_unlock(); 4311 4312 ret = cpuhp_setup_state_nocalls(CPUHP_PRINTK_DEAD, "printk:dead", NULL, 4313 console_cpu_notify); 4314 WARN_ON(ret < 0); 4315 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "printk:online", 4316 console_cpu_notify, NULL); 4317 WARN_ON(ret < 0); 4318 printk_sysctl_init(); 4319 return 0; 4320 } 4321 late_initcall(printk_late_init); 4322 4323 #if defined CONFIG_PRINTK 4324 /* If @con is specified, only wait for that console. Otherwise wait for all. */ 4325 static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) 4326 { 4327 unsigned long timeout_jiffies = msecs_to_jiffies(timeout_ms); 4328 unsigned long remaining_jiffies = timeout_jiffies; 4329 struct console_flush_type ft; 4330 struct console *c; 4331 u64 last_diff = 0; 4332 u64 printk_seq; 4333 short flags; 4334 int cookie; 4335 u64 diff; 4336 u64 seq; 4337 4338 /* Sorry, pr_flush() will not work this early. */ 4339 if (system_state < SYSTEM_SCHEDULING) 4340 return false; 4341 4342 might_sleep(); 4343 4344 seq = prb_next_reserve_seq(prb); 4345 4346 /* Flush the consoles so that records up to @seq are printed. */ 4347 printk_get_console_flush_type(&ft); 4348 if (ft.nbcon_atomic) 4349 nbcon_atomic_flush_pending(); 4350 if (ft.legacy_direct) { 4351 console_lock(); 4352 console_unlock(); 4353 } 4354 4355 for (;;) { 4356 unsigned long begin_jiffies; 4357 unsigned long slept_jiffies; 4358 4359 diff = 0; 4360 4361 /* 4362 * Hold the console_lock to guarantee safe access to 4363 * console->seq. Releasing console_lock flushes more 4364 * records in case @seq is still not printed on all 4365 * usable consoles. 4366 * 4367 * Holding the console_lock is not necessary if there 4368 * are no legacy or boot consoles. However, such a 4369 * console could register at any time. Always hold the 4370 * console_lock as a precaution rather than 4371 * synchronizing against register_console(). 4372 */ 4373 console_lock(); 4374 4375 cookie = console_srcu_read_lock(); 4376 for_each_console_srcu(c) { 4377 if (con && con != c) 4378 continue; 4379 4380 flags = console_srcu_read_flags(c); 4381 4382 /* 4383 * If consoles are not usable, it cannot be expected 4384 * that they make forward progress, so only increment 4385 * @diff for usable consoles. 4386 */ 4387 if (!console_is_usable(c, flags, true) && 4388 !console_is_usable(c, flags, false)) { 4389 continue; 4390 } 4391 4392 if (flags & CON_NBCON) { 4393 printk_seq = nbcon_seq_read(c); 4394 } else { 4395 printk_seq = c->seq; 4396 } 4397 4398 if (printk_seq < seq) 4399 diff += seq - printk_seq; 4400 } 4401 console_srcu_read_unlock(cookie); 4402 4403 if (diff != last_diff && reset_on_progress) 4404 remaining_jiffies = timeout_jiffies; 4405 4406 console_unlock(); 4407 4408 /* Note: @diff is 0 if there are no usable consoles. */ 4409 if (diff == 0 || remaining_jiffies == 0) 4410 break; 4411 4412 /* msleep(1) might sleep much longer. Check time by jiffies. */ 4413 begin_jiffies = jiffies; 4414 msleep(1); 4415 slept_jiffies = jiffies - begin_jiffies; 4416 4417 remaining_jiffies -= min(slept_jiffies, remaining_jiffies); 4418 4419 last_diff = diff; 4420 } 4421 4422 return (diff == 0); 4423 } 4424 4425 /** 4426 * pr_flush() - Wait for printing threads to catch up. 4427 * 4428 * @timeout_ms: The maximum time (in ms) to wait. 4429 * @reset_on_progress: Reset the timeout if forward progress is seen. 4430 * 4431 * A value of 0 for @timeout_ms means no waiting will occur. A value of -1 4432 * represents infinite waiting. 4433 * 4434 * If @reset_on_progress is true, the timeout will be reset whenever any 4435 * printer has been seen to make some forward progress. 4436 * 4437 * Context: Process context. May sleep while acquiring console lock. 4438 * Return: true if all usable printers are caught up. 4439 */ 4440 static bool pr_flush(int timeout_ms, bool reset_on_progress) 4441 { 4442 return __pr_flush(NULL, timeout_ms, reset_on_progress); 4443 } 4444 4445 /* 4446 * Delayed printk version, for scheduler-internal messages: 4447 */ 4448 #define PRINTK_PENDING_WAKEUP 0x01 4449 #define PRINTK_PENDING_OUTPUT 0x02 4450 4451 static DEFINE_PER_CPU(int, printk_pending); 4452 4453 static void wake_up_klogd_work_func(struct irq_work *irq_work) 4454 { 4455 int pending = this_cpu_xchg(printk_pending, 0); 4456 4457 if (pending & PRINTK_PENDING_OUTPUT) { 4458 if (force_legacy_kthread()) { 4459 if (printk_legacy_kthread) 4460 wake_up_interruptible(&legacy_wait); 4461 } else { 4462 if (console_trylock()) 4463 console_unlock(); 4464 } 4465 } 4466 4467 if (pending & PRINTK_PENDING_WAKEUP) 4468 wake_up_interruptible(&log_wait); 4469 } 4470 4471 static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = 4472 IRQ_WORK_INIT_LAZY(wake_up_klogd_work_func); 4473 4474 static void __wake_up_klogd(int val) 4475 { 4476 if (!printk_percpu_data_ready()) 4477 return; 4478 4479 preempt_disable(); 4480 /* 4481 * Guarantee any new records can be seen by tasks preparing to wait 4482 * before this context checks if the wait queue is empty. 4483 * 4484 * The full memory barrier within wq_has_sleeper() pairs with the full 4485 * memory barrier within set_current_state() of 4486 * prepare_to_wait_event(), which is called after ___wait_event() adds 4487 * the waiter but before it has checked the wait condition. 4488 * 4489 * This pairs with devkmsg_read:A and syslog_print:A. 4490 */ 4491 if (wq_has_sleeper(&log_wait) || /* LMM(__wake_up_klogd:A) */ 4492 (val & PRINTK_PENDING_OUTPUT)) { 4493 this_cpu_or(printk_pending, val); 4494 irq_work_queue(this_cpu_ptr(&wake_up_klogd_work)); 4495 } 4496 preempt_enable(); 4497 } 4498 4499 /** 4500 * wake_up_klogd - Wake kernel logging daemon 4501 * 4502 * Use this function when new records have been added to the ringbuffer 4503 * and the console printing of those records has already occurred or is 4504 * known to be handled by some other context. This function will only 4505 * wake the logging daemon. 4506 * 4507 * Context: Any context. 4508 */ 4509 void wake_up_klogd(void) 4510 { 4511 __wake_up_klogd(PRINTK_PENDING_WAKEUP); 4512 } 4513 4514 /** 4515 * defer_console_output - Wake kernel logging daemon and trigger 4516 * console printing in a deferred context 4517 * 4518 * Use this function when new records have been added to the ringbuffer, 4519 * this context is responsible for console printing those records, but 4520 * the current context is not allowed to perform the console printing. 4521 * Trigger an irq_work context to perform the console printing. This 4522 * function also wakes the logging daemon. 4523 * 4524 * Context: Any context. 4525 */ 4526 void defer_console_output(void) 4527 { 4528 /* 4529 * New messages may have been added directly to the ringbuffer 4530 * using vprintk_store(), so wake any waiters as well. 4531 */ 4532 __wake_up_klogd(PRINTK_PENDING_WAKEUP | PRINTK_PENDING_OUTPUT); 4533 } 4534 4535 void printk_trigger_flush(void) 4536 { 4537 defer_console_output(); 4538 } 4539 4540 int vprintk_deferred(const char *fmt, va_list args) 4541 { 4542 return vprintk_emit(0, LOGLEVEL_SCHED, NULL, fmt, args); 4543 } 4544 4545 int _printk_deferred(const char *fmt, ...) 4546 { 4547 va_list args; 4548 int r; 4549 4550 va_start(args, fmt); 4551 r = vprintk_deferred(fmt, args); 4552 va_end(args); 4553 4554 return r; 4555 } 4556 4557 /* 4558 * printk rate limiting, lifted from the networking subsystem. 4559 * 4560 * This enforces a rate limit: not more than 10 kernel messages 4561 * every 5s to make a denial-of-service attack impossible. 4562 */ 4563 DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10); 4564 4565 int __printk_ratelimit(const char *func) 4566 { 4567 return ___ratelimit(&printk_ratelimit_state, func); 4568 } 4569 EXPORT_SYMBOL(__printk_ratelimit); 4570 4571 /** 4572 * printk_timed_ratelimit - caller-controlled printk ratelimiting 4573 * @caller_jiffies: pointer to caller's state 4574 * @interval_msecs: minimum interval between prints 4575 * 4576 * printk_timed_ratelimit() returns true if more than @interval_msecs 4577 * milliseconds have elapsed since the last time printk_timed_ratelimit() 4578 * returned true. 4579 */ 4580 bool printk_timed_ratelimit(unsigned long *caller_jiffies, 4581 unsigned int interval_msecs) 4582 { 4583 unsigned long elapsed = jiffies - *caller_jiffies; 4584 4585 if (*caller_jiffies && elapsed <= msecs_to_jiffies(interval_msecs)) 4586 return false; 4587 4588 *caller_jiffies = jiffies; 4589 return true; 4590 } 4591 EXPORT_SYMBOL(printk_timed_ratelimit); 4592 4593 static DEFINE_SPINLOCK(dump_list_lock); 4594 static LIST_HEAD(dump_list); 4595 4596 /** 4597 * kmsg_dump_register - register a kernel log dumper. 4598 * @dumper: pointer to the kmsg_dumper structure 4599 * 4600 * Adds a kernel log dumper to the system. The dump callback in the 4601 * structure will be called when the kernel oopses or panics and must be 4602 * set. Returns zero on success and %-EINVAL or %-EBUSY otherwise. 4603 */ 4604 int kmsg_dump_register(struct kmsg_dumper *dumper) 4605 { 4606 unsigned long flags; 4607 int err = -EBUSY; 4608 4609 /* The dump callback needs to be set */ 4610 if (!dumper->dump) 4611 return -EINVAL; 4612 4613 spin_lock_irqsave(&dump_list_lock, flags); 4614 /* Don't allow registering multiple times */ 4615 if (!dumper->registered) { 4616 dumper->registered = 1; 4617 list_add_tail_rcu(&dumper->list, &dump_list); 4618 err = 0; 4619 } 4620 spin_unlock_irqrestore(&dump_list_lock, flags); 4621 4622 return err; 4623 } 4624 EXPORT_SYMBOL_GPL(kmsg_dump_register); 4625 4626 /** 4627 * kmsg_dump_unregister - unregister a kmsg dumper. 4628 * @dumper: pointer to the kmsg_dumper structure 4629 * 4630 * Removes a dump device from the system. Returns zero on success and 4631 * %-EINVAL otherwise. 4632 */ 4633 int kmsg_dump_unregister(struct kmsg_dumper *dumper) 4634 { 4635 unsigned long flags; 4636 int err = -EINVAL; 4637 4638 spin_lock_irqsave(&dump_list_lock, flags); 4639 if (dumper->registered) { 4640 dumper->registered = 0; 4641 list_del_rcu(&dumper->list); 4642 err = 0; 4643 } 4644 spin_unlock_irqrestore(&dump_list_lock, flags); 4645 synchronize_rcu(); 4646 4647 return err; 4648 } 4649 EXPORT_SYMBOL_GPL(kmsg_dump_unregister); 4650 4651 static bool always_kmsg_dump; 4652 module_param_named(always_kmsg_dump, always_kmsg_dump, bool, S_IRUGO | S_IWUSR); 4653 4654 const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason) 4655 { 4656 switch (reason) { 4657 case KMSG_DUMP_PANIC: 4658 return "Panic"; 4659 case KMSG_DUMP_OOPS: 4660 return "Oops"; 4661 case KMSG_DUMP_EMERG: 4662 return "Emergency"; 4663 case KMSG_DUMP_SHUTDOWN: 4664 return "Shutdown"; 4665 default: 4666 return "Unknown"; 4667 } 4668 } 4669 EXPORT_SYMBOL_GPL(kmsg_dump_reason_str); 4670 4671 /** 4672 * kmsg_dump_desc - dump kernel log to kernel message dumpers. 4673 * @reason: the reason (oops, panic etc) for dumping 4674 * @desc: a short string to describe what caused the panic or oops. Can be NULL 4675 * if no additional description is available. 4676 * 4677 * Call each of the registered dumper's dump() callback, which can 4678 * retrieve the kmsg records with kmsg_dump_get_line() or 4679 * kmsg_dump_get_buffer(). 4680 */ 4681 void kmsg_dump_desc(enum kmsg_dump_reason reason, const char *desc) 4682 { 4683 struct kmsg_dumper *dumper; 4684 struct kmsg_dump_detail detail = { 4685 .reason = reason, 4686 .description = desc}; 4687 4688 rcu_read_lock(); 4689 list_for_each_entry_rcu(dumper, &dump_list, list) { 4690 enum kmsg_dump_reason max_reason = dumper->max_reason; 4691 4692 /* 4693 * If client has not provided a specific max_reason, default 4694 * to KMSG_DUMP_OOPS, unless always_kmsg_dump was set. 4695 */ 4696 if (max_reason == KMSG_DUMP_UNDEF) { 4697 max_reason = always_kmsg_dump ? KMSG_DUMP_MAX : 4698 KMSG_DUMP_OOPS; 4699 } 4700 if (reason > max_reason) 4701 continue; 4702 4703 /* invoke dumper which will iterate over records */ 4704 dumper->dump(dumper, &detail); 4705 } 4706 rcu_read_unlock(); 4707 } 4708 4709 /** 4710 * kmsg_dump_get_line - retrieve one kmsg log line 4711 * @iter: kmsg dump iterator 4712 * @syslog: include the "<4>" prefixes 4713 * @line: buffer to copy the line to 4714 * @size: maximum size of the buffer 4715 * @len: length of line placed into buffer 4716 * 4717 * Start at the beginning of the kmsg buffer, with the oldest kmsg 4718 * record, and copy one record into the provided buffer. 4719 * 4720 * Consecutive calls will return the next available record moving 4721 * towards the end of the buffer with the youngest messages. 4722 * 4723 * A return value of FALSE indicates that there are no more records to 4724 * read. 4725 */ 4726 bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog, 4727 char *line, size_t size, size_t *len) 4728 { 4729 u64 min_seq = latched_seq_read_nolock(&clear_seq); 4730 struct printk_info info; 4731 unsigned int line_count; 4732 struct printk_record r; 4733 size_t l = 0; 4734 bool ret = false; 4735 4736 if (iter->cur_seq < min_seq) 4737 iter->cur_seq = min_seq; 4738 4739 prb_rec_init_rd(&r, &info, line, size); 4740 4741 /* Read text or count text lines? */ 4742 if (line) { 4743 if (!prb_read_valid(prb, iter->cur_seq, &r)) 4744 goto out; 4745 l = record_print_text(&r, syslog, printk_time); 4746 } else { 4747 if (!prb_read_valid_info(prb, iter->cur_seq, 4748 &info, &line_count)) { 4749 goto out; 4750 } 4751 l = get_record_print_text_size(&info, line_count, syslog, 4752 printk_time); 4753 4754 } 4755 4756 iter->cur_seq = r.info->seq + 1; 4757 ret = true; 4758 out: 4759 if (len) 4760 *len = l; 4761 return ret; 4762 } 4763 EXPORT_SYMBOL_GPL(kmsg_dump_get_line); 4764 4765 /** 4766 * kmsg_dump_get_buffer - copy kmsg log lines 4767 * @iter: kmsg dump iterator 4768 * @syslog: include the "<4>" prefixes 4769 * @buf: buffer to copy the line to 4770 * @size: maximum size of the buffer 4771 * @len_out: length of line placed into buffer 4772 * 4773 * Start at the end of the kmsg buffer and fill the provided buffer 4774 * with as many of the *youngest* kmsg records that fit into it. 4775 * If the buffer is large enough, all available kmsg records will be 4776 * copied with a single call. 4777 * 4778 * Consecutive calls will fill the buffer with the next block of 4779 * available older records, not including the earlier retrieved ones. 4780 * 4781 * A return value of FALSE indicates that there are no more records to 4782 * read. 4783 */ 4784 bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog, 4785 char *buf, size_t size, size_t *len_out) 4786 { 4787 u64 min_seq = latched_seq_read_nolock(&clear_seq); 4788 struct printk_info info; 4789 struct printk_record r; 4790 u64 seq; 4791 u64 next_seq; 4792 size_t len = 0; 4793 bool ret = false; 4794 bool time = printk_time; 4795 4796 if (!buf || !size) 4797 goto out; 4798 4799 if (iter->cur_seq < min_seq) 4800 iter->cur_seq = min_seq; 4801 4802 if (prb_read_valid_info(prb, iter->cur_seq, &info, NULL)) { 4803 if (info.seq != iter->cur_seq) { 4804 /* messages are gone, move to first available one */ 4805 iter->cur_seq = info.seq; 4806 } 4807 } 4808 4809 /* last entry */ 4810 if (iter->cur_seq >= iter->next_seq) 4811 goto out; 4812 4813 /* 4814 * Find first record that fits, including all following records, 4815 * into the user-provided buffer for this dump. Pass in size-1 4816 * because this function (by way of record_print_text()) will 4817 * not write more than size-1 bytes of text into @buf. 4818 */ 4819 seq = find_first_fitting_seq(iter->cur_seq, iter->next_seq, 4820 size - 1, syslog, time); 4821 4822 /* 4823 * Next kmsg_dump_get_buffer() invocation will dump block of 4824 * older records stored right before this one. 4825 */ 4826 next_seq = seq; 4827 4828 prb_rec_init_rd(&r, &info, buf, size); 4829 4830 prb_for_each_record(seq, prb, seq, &r) { 4831 if (r.info->seq >= iter->next_seq) 4832 break; 4833 4834 len += record_print_text(&r, syslog, time); 4835 4836 /* Adjust record to store to remaining buffer space. */ 4837 prb_rec_init_rd(&r, &info, buf + len, size - len); 4838 } 4839 4840 iter->next_seq = next_seq; 4841 ret = true; 4842 out: 4843 if (len_out) 4844 *len_out = len; 4845 return ret; 4846 } 4847 EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer); 4848 4849 /** 4850 * kmsg_dump_rewind - reset the iterator 4851 * @iter: kmsg dump iterator 4852 * 4853 * Reset the dumper's iterator so that kmsg_dump_get_line() and 4854 * kmsg_dump_get_buffer() can be called again and used multiple 4855 * times within the same dumper.dump() callback. 4856 */ 4857 void kmsg_dump_rewind(struct kmsg_dump_iter *iter) 4858 { 4859 iter->cur_seq = latched_seq_read_nolock(&clear_seq); 4860 iter->next_seq = prb_next_seq(prb); 4861 } 4862 EXPORT_SYMBOL_GPL(kmsg_dump_rewind); 4863 4864 /** 4865 * console_try_replay_all - try to replay kernel log on consoles 4866 * 4867 * Try to obtain lock on console subsystem and replay all 4868 * available records in printk buffer on the consoles. 4869 * Does nothing if lock is not obtained. 4870 * 4871 * Context: Any, except for NMI. 4872 */ 4873 void console_try_replay_all(void) 4874 { 4875 struct console_flush_type ft; 4876 4877 printk_get_console_flush_type(&ft); 4878 if (console_trylock()) { 4879 __console_rewind_all(); 4880 if (ft.nbcon_atomic) 4881 nbcon_atomic_flush_pending(); 4882 if (ft.nbcon_offload) 4883 nbcon_kthreads_wake(); 4884 if (ft.legacy_offload) 4885 defer_console_output(); 4886 /* Consoles are flushed as part of console_unlock(). */ 4887 console_unlock(); 4888 } 4889 } 4890 #endif 4891 4892 #ifdef CONFIG_SMP 4893 static atomic_t printk_cpu_sync_owner = ATOMIC_INIT(-1); 4894 static atomic_t printk_cpu_sync_nested = ATOMIC_INIT(0); 4895 4896 /** 4897 * __printk_cpu_sync_wait() - Busy wait until the printk cpu-reentrant 4898 * spinning lock is not owned by any CPU. 4899 * 4900 * Context: Any context. 4901 */ 4902 void __printk_cpu_sync_wait(void) 4903 { 4904 do { 4905 cpu_relax(); 4906 } while (atomic_read(&printk_cpu_sync_owner) != -1); 4907 } 4908 EXPORT_SYMBOL(__printk_cpu_sync_wait); 4909 4910 /** 4911 * __printk_cpu_sync_try_get() - Try to acquire the printk cpu-reentrant 4912 * spinning lock. 4913 * 4914 * If no processor has the lock, the calling processor takes the lock and 4915 * becomes the owner. If the calling processor is already the owner of the 4916 * lock, this function succeeds immediately. 4917 * 4918 * Context: Any context. Expects interrupts to be disabled. 4919 * Return: 1 on success, otherwise 0. 4920 */ 4921 int __printk_cpu_sync_try_get(void) 4922 { 4923 int cpu; 4924 int old; 4925 4926 cpu = smp_processor_id(); 4927 4928 /* 4929 * Guarantee loads and stores from this CPU when it is the lock owner 4930 * are _not_ visible to the previous lock owner. This pairs with 4931 * __printk_cpu_sync_put:B. 4932 * 4933 * Memory barrier involvement: 4934 * 4935 * If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B, 4936 * then __printk_cpu_sync_put:A can never read from 4937 * __printk_cpu_sync_try_get:B. 4938 * 4939 * Relies on: 4940 * 4941 * RELEASE from __printk_cpu_sync_put:A to __printk_cpu_sync_put:B 4942 * of the previous CPU 4943 * matching 4944 * ACQUIRE from __printk_cpu_sync_try_get:A to 4945 * __printk_cpu_sync_try_get:B of this CPU 4946 */ 4947 old = atomic_cmpxchg_acquire(&printk_cpu_sync_owner, -1, 4948 cpu); /* LMM(__printk_cpu_sync_try_get:A) */ 4949 if (old == -1) { 4950 /* 4951 * This CPU is now the owner and begins loading/storing 4952 * data: LMM(__printk_cpu_sync_try_get:B) 4953 */ 4954 return 1; 4955 4956 } else if (old == cpu) { 4957 /* This CPU is already the owner. */ 4958 atomic_inc(&printk_cpu_sync_nested); 4959 return 1; 4960 } 4961 4962 return 0; 4963 } 4964 EXPORT_SYMBOL(__printk_cpu_sync_try_get); 4965 4966 /** 4967 * __printk_cpu_sync_put() - Release the printk cpu-reentrant spinning lock. 4968 * 4969 * The calling processor must be the owner of the lock. 4970 * 4971 * Context: Any context. Expects interrupts to be disabled. 4972 */ 4973 void __printk_cpu_sync_put(void) 4974 { 4975 if (atomic_read(&printk_cpu_sync_nested)) { 4976 atomic_dec(&printk_cpu_sync_nested); 4977 return; 4978 } 4979 4980 /* 4981 * This CPU is finished loading/storing data: 4982 * LMM(__printk_cpu_sync_put:A) 4983 */ 4984 4985 /* 4986 * Guarantee loads and stores from this CPU when it was the 4987 * lock owner are visible to the next lock owner. This pairs 4988 * with __printk_cpu_sync_try_get:A. 4989 * 4990 * Memory barrier involvement: 4991 * 4992 * If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B, 4993 * then __printk_cpu_sync_try_get:B reads from __printk_cpu_sync_put:A. 4994 * 4995 * Relies on: 4996 * 4997 * RELEASE from __printk_cpu_sync_put:A to __printk_cpu_sync_put:B 4998 * of this CPU 4999 * matching 5000 * ACQUIRE from __printk_cpu_sync_try_get:A to 5001 * __printk_cpu_sync_try_get:B of the next CPU 5002 */ 5003 atomic_set_release(&printk_cpu_sync_owner, 5004 -1); /* LMM(__printk_cpu_sync_put:B) */ 5005 } 5006 EXPORT_SYMBOL(__printk_cpu_sync_put); 5007 #endif /* CONFIG_SMP */ 5008