1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/kernel/printk.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 * 7 * Modified to make sys_syslog() more flexible: added commands to 8 * return the last 4k of kernel messages, regardless of whether 9 * they've been read or not. Added option to suppress kernel printk's 10 * to the console. Added hook for sending the console messages 11 * elsewhere, in preparation for a serial line console (someday). 12 * Ted Ts'o, 2/11/93. 13 * Modified for sysctl support, 1/8/97, Chris Horn. 14 * Fixed SMP synchronization, 08/08/99, Manfred Spraul 15 * manfred@colorfullife.com 16 * Rewrote bits to get rid of console_lock 17 * 01Mar01 Andrew Morton 18 */ 19 20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 21 22 #include <linux/kernel.h> 23 #include <linux/mm.h> 24 #include <linux/tty.h> 25 #include <linux/tty_driver.h> 26 #include <linux/console.h> 27 #include <linux/init.h> 28 #include <linux/jiffies.h> 29 #include <linux/nmi.h> 30 #include <linux/module.h> 31 #include <linux/moduleparam.h> 32 #include <linux/delay.h> 33 #include <linux/smp.h> 34 #include <linux/security.h> 35 #include <linux/memblock.h> 36 #include <linux/syscalls.h> 37 #include <linux/syscore_ops.h> 38 #include <linux/vmcore_info.h> 39 #include <linux/ratelimit.h> 40 #include <linux/kmsg_dump.h> 41 #include <linux/syslog.h> 42 #include <linux/cpu.h> 43 #include <linux/rculist.h> 44 #include <linux/poll.h> 45 #include <linux/irq_work.h> 46 #include <linux/ctype.h> 47 #include <linux/uio.h> 48 #include <linux/sched/clock.h> 49 #include <linux/sched/debug.h> 50 #include <linux/sched/task_stack.h> 51 #include <linux/panic.h> 52 53 #include <linux/uaccess.h> 54 #include <asm/sections.h> 55 56 #include <trace/events/initcall.h> 57 #define CREATE_TRACE_POINTS 58 #include <trace/events/printk.h> 59 60 #include "printk_ringbuffer.h" 61 #include "console_cmdline.h" 62 #include "braille.h" 63 #include "internal.h" 64 65 int console_printk[4] = { 66 CONSOLE_LOGLEVEL_DEFAULT, /* console_loglevel */ 67 MESSAGE_LOGLEVEL_DEFAULT, /* default_message_loglevel */ 68 CONSOLE_LOGLEVEL_MIN, /* minimum_console_loglevel */ 69 CONSOLE_LOGLEVEL_DEFAULT, /* default_console_loglevel */ 70 }; 71 EXPORT_SYMBOL_GPL(console_printk); 72 73 atomic_t ignore_console_lock_warning __read_mostly = ATOMIC_INIT(0); 74 EXPORT_SYMBOL(ignore_console_lock_warning); 75 76 EXPORT_TRACEPOINT_SYMBOL_GPL(console); 77 78 /* 79 * Low level drivers may need that to know if they can schedule in 80 * their unblank() callback or not. So let's export it. 81 */ 82 int oops_in_progress; 83 EXPORT_SYMBOL(oops_in_progress); 84 85 /* 86 * console_mutex protects console_list updates and console->flags updates. 87 * The flags are synchronized only for consoles that are registered, i.e. 88 * accessible via the console list. 89 */ 90 static DEFINE_MUTEX(console_mutex); 91 92 /* 93 * console_sem protects updates to console->seq 94 * and also provides serialization for console printing. 95 */ 96 static DEFINE_SEMAPHORE(console_sem, 1); 97 HLIST_HEAD(console_list); 98 EXPORT_SYMBOL_GPL(console_list); 99 DEFINE_STATIC_SRCU(console_srcu); 100 101 /* 102 * System may need to suppress printk message under certain 103 * circumstances, like after kernel panic happens. 104 */ 105 int __read_mostly suppress_printk; 106 107 #ifdef CONFIG_LOCKDEP 108 static struct lockdep_map console_lock_dep_map = { 109 .name = "console_lock" 110 }; 111 112 void lockdep_assert_console_list_lock_held(void) 113 { 114 lockdep_assert_held(&console_mutex); 115 } 116 EXPORT_SYMBOL(lockdep_assert_console_list_lock_held); 117 #endif 118 119 #ifdef CONFIG_DEBUG_LOCK_ALLOC 120 bool console_srcu_read_lock_is_held(void) 121 { 122 return srcu_read_lock_held(&console_srcu); 123 } 124 EXPORT_SYMBOL(console_srcu_read_lock_is_held); 125 #endif 126 127 enum devkmsg_log_bits { 128 __DEVKMSG_LOG_BIT_ON = 0, 129 __DEVKMSG_LOG_BIT_OFF, 130 __DEVKMSG_LOG_BIT_LOCK, 131 }; 132 133 enum devkmsg_log_masks { 134 DEVKMSG_LOG_MASK_ON = BIT(__DEVKMSG_LOG_BIT_ON), 135 DEVKMSG_LOG_MASK_OFF = BIT(__DEVKMSG_LOG_BIT_OFF), 136 DEVKMSG_LOG_MASK_LOCK = BIT(__DEVKMSG_LOG_BIT_LOCK), 137 }; 138 139 /* Keep both the 'on' and 'off' bits clear, i.e. ratelimit by default: */ 140 #define DEVKMSG_LOG_MASK_DEFAULT 0 141 142 static unsigned int __read_mostly devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT; 143 144 static int __control_devkmsg(char *str) 145 { 146 size_t len; 147 148 if (!str) 149 return -EINVAL; 150 151 len = str_has_prefix(str, "on"); 152 if (len) { 153 devkmsg_log = DEVKMSG_LOG_MASK_ON; 154 return len; 155 } 156 157 len = str_has_prefix(str, "off"); 158 if (len) { 159 devkmsg_log = DEVKMSG_LOG_MASK_OFF; 160 return len; 161 } 162 163 len = str_has_prefix(str, "ratelimit"); 164 if (len) { 165 devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT; 166 return len; 167 } 168 169 return -EINVAL; 170 } 171 172 static int __init control_devkmsg(char *str) 173 { 174 if (__control_devkmsg(str) < 0) { 175 pr_warn("printk.devkmsg: bad option string '%s'\n", str); 176 return 1; 177 } 178 179 /* 180 * Set sysctl string accordingly: 181 */ 182 if (devkmsg_log == DEVKMSG_LOG_MASK_ON) 183 strscpy(devkmsg_log_str, "on"); 184 else if (devkmsg_log == DEVKMSG_LOG_MASK_OFF) 185 strscpy(devkmsg_log_str, "off"); 186 /* else "ratelimit" which is set by default. */ 187 188 /* 189 * Sysctl cannot change it anymore. The kernel command line setting of 190 * this parameter is to force the setting to be permanent throughout the 191 * runtime of the system. This is a precation measure against userspace 192 * trying to be a smarta** and attempting to change it up on us. 193 */ 194 devkmsg_log |= DEVKMSG_LOG_MASK_LOCK; 195 196 return 1; 197 } 198 __setup("printk.devkmsg=", control_devkmsg); 199 200 char devkmsg_log_str[DEVKMSG_STR_MAX_SIZE] = "ratelimit"; 201 #if defined(CONFIG_PRINTK) && defined(CONFIG_SYSCTL) 202 int devkmsg_sysctl_set_loglvl(const struct ctl_table *table, int write, 203 void *buffer, size_t *lenp, loff_t *ppos) 204 { 205 char old_str[DEVKMSG_STR_MAX_SIZE]; 206 unsigned int old; 207 int err; 208 209 if (write) { 210 if (devkmsg_log & DEVKMSG_LOG_MASK_LOCK) 211 return -EINVAL; 212 213 old = devkmsg_log; 214 strscpy(old_str, devkmsg_log_str); 215 } 216 217 err = proc_dostring(table, write, buffer, lenp, ppos); 218 if (err) 219 return err; 220 221 if (write) { 222 err = __control_devkmsg(devkmsg_log_str); 223 224 /* 225 * Do not accept an unknown string OR a known string with 226 * trailing crap... 227 */ 228 if (err < 0 || (err + 1 != *lenp)) { 229 230 /* ... and restore old setting. */ 231 devkmsg_log = old; 232 strscpy(devkmsg_log_str, old_str); 233 234 return -EINVAL; 235 } 236 } 237 238 return 0; 239 } 240 #endif /* CONFIG_PRINTK && CONFIG_SYSCTL */ 241 242 /** 243 * console_list_lock - Lock the console list 244 * 245 * For console list or console->flags updates 246 */ 247 void console_list_lock(void) 248 __acquires(&console_mutex) 249 { 250 /* 251 * In unregister_console() and console_force_preferred_locked(), 252 * synchronize_srcu() is called with the console_list_lock held. 253 * Therefore it is not allowed that the console_list_lock is taken 254 * with the srcu_lock held. 255 * 256 * Detecting if this context is really in the read-side critical 257 * section is only possible if the appropriate debug options are 258 * enabled. 259 */ 260 WARN_ON_ONCE(debug_lockdep_rcu_enabled() && 261 srcu_read_lock_held(&console_srcu)); 262 263 mutex_lock(&console_mutex); 264 } 265 EXPORT_SYMBOL(console_list_lock); 266 267 /** 268 * console_list_unlock - Unlock the console list 269 * 270 * Counterpart to console_list_lock() 271 */ 272 void console_list_unlock(void) 273 __releases(&console_mutex) 274 { 275 mutex_unlock(&console_mutex); 276 } 277 EXPORT_SYMBOL(console_list_unlock); 278 279 /** 280 * console_srcu_read_lock - Register a new reader for the 281 * SRCU-protected console list 282 * 283 * Use for_each_console_srcu() to iterate the console list 284 * 285 * Context: Any context. 286 * Return: A cookie to pass to console_srcu_read_unlock(). 287 */ 288 int console_srcu_read_lock(void) 289 __acquires(&console_srcu) 290 { 291 return srcu_read_lock_nmisafe(&console_srcu); 292 } 293 EXPORT_SYMBOL(console_srcu_read_lock); 294 295 /** 296 * console_srcu_read_unlock - Unregister an old reader from 297 * the SRCU-protected console list 298 * @cookie: cookie returned from console_srcu_read_lock() 299 * 300 * Counterpart to console_srcu_read_lock() 301 */ 302 void console_srcu_read_unlock(int cookie) 303 __releases(&console_srcu) 304 { 305 srcu_read_unlock_nmisafe(&console_srcu, cookie); 306 } 307 EXPORT_SYMBOL(console_srcu_read_unlock); 308 309 /* 310 * Helper macros to handle lockdep when locking/unlocking console_sem. We use 311 * macros instead of functions so that _RET_IP_ contains useful information. 312 */ 313 #define down_console_sem() do { \ 314 down(&console_sem);\ 315 mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_);\ 316 } while (0) 317 318 static int __down_trylock_console_sem(unsigned long ip) 319 { 320 int lock_failed; 321 unsigned long flags; 322 323 /* 324 * Here and in __up_console_sem() we need to be in safe mode, 325 * because spindump/WARN/etc from under console ->lock will 326 * deadlock in printk()->down_trylock_console_sem() otherwise. 327 */ 328 printk_safe_enter_irqsave(flags); 329 lock_failed = down_trylock(&console_sem); 330 printk_safe_exit_irqrestore(flags); 331 332 if (lock_failed) 333 return 1; 334 mutex_acquire(&console_lock_dep_map, 0, 1, ip); 335 return 0; 336 } 337 #define down_trylock_console_sem() __down_trylock_console_sem(_RET_IP_) 338 339 static void __up_console_sem(unsigned long ip) 340 { 341 unsigned long flags; 342 343 mutex_release(&console_lock_dep_map, ip); 344 345 printk_safe_enter_irqsave(flags); 346 up(&console_sem); 347 printk_safe_exit_irqrestore(flags); 348 } 349 #define up_console_sem() __up_console_sem(_RET_IP_) 350 351 /* 352 * This is used for debugging the mess that is the VT code by 353 * keeping track if we have the console semaphore held. It's 354 * definitely not the perfect debug tool (we don't know if _WE_ 355 * hold it and are racing, but it helps tracking those weird code 356 * paths in the console code where we end up in places I want 357 * locked without the console semaphore held). 358 */ 359 static int console_locked; 360 361 /* 362 * Array of consoles built from command line options (console=) 363 */ 364 365 #define MAX_CMDLINECONSOLES 8 366 367 static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES]; 368 369 static int preferred_console = -1; 370 int console_set_on_cmdline; 371 EXPORT_SYMBOL(console_set_on_cmdline); 372 373 /* Flag: console code may call schedule() */ 374 static int console_may_schedule; 375 376 enum con_msg_format_flags { 377 MSG_FORMAT_DEFAULT = 0, 378 MSG_FORMAT_SYSLOG = (1 << 0), 379 }; 380 381 static int console_msg_format = MSG_FORMAT_DEFAULT; 382 383 /* 384 * The printk log buffer consists of a sequenced collection of records, each 385 * containing variable length message text. Every record also contains its 386 * own meta-data (@info). 387 * 388 * Every record meta-data carries the timestamp in microseconds, as well as 389 * the standard userspace syslog level and syslog facility. The usual kernel 390 * messages use LOG_KERN; userspace-injected messages always carry a matching 391 * syslog facility, by default LOG_USER. The origin of every message can be 392 * reliably determined that way. 393 * 394 * The human readable log message of a record is available in @text, the 395 * length of the message text in @text_len. The stored message is not 396 * terminated. 397 * 398 * Optionally, a record can carry a dictionary of properties (key/value 399 * pairs), to provide userspace with a machine-readable message context. 400 * 401 * Examples for well-defined, commonly used property names are: 402 * DEVICE=b12:8 device identifier 403 * b12:8 block dev_t 404 * c127:3 char dev_t 405 * n8 netdev ifindex 406 * +sound:card0 subsystem:devname 407 * SUBSYSTEM=pci driver-core subsystem name 408 * 409 * Valid characters in property names are [a-zA-Z0-9.-_]. Property names 410 * and values are terminated by a '\0' character. 411 * 412 * Example of record values: 413 * record.text_buf = "it's a line" (unterminated) 414 * record.info.seq = 56 415 * record.info.ts_nsec = 36863 416 * record.info.text_len = 11 417 * record.info.facility = 0 (LOG_KERN) 418 * record.info.flags = 0 419 * record.info.level = 3 (LOG_ERR) 420 * record.info.caller_id = 299 (task 299) 421 * record.info.dev_info.subsystem = "pci" (terminated) 422 * record.info.dev_info.device = "+pci:0000:00:01.0" (terminated) 423 * 424 * The 'struct printk_info' buffer must never be directly exported to 425 * userspace, it is a kernel-private implementation detail that might 426 * need to be changed in the future, when the requirements change. 427 * 428 * /dev/kmsg exports the structured data in the following line format: 429 * "<level>,<sequnum>,<timestamp>,<contflag>[,additional_values, ... ];<message text>\n" 430 * 431 * Users of the export format should ignore possible additional values 432 * separated by ',', and find the message after the ';' character. 433 * 434 * The optional key/value pairs are attached as continuation lines starting 435 * with a space character and terminated by a newline. All possible 436 * non-prinatable characters are escaped in the "\xff" notation. 437 */ 438 439 /* syslog_lock protects syslog_* variables and write access to clear_seq. */ 440 static DEFINE_MUTEX(syslog_lock); 441 442 /* 443 * Specifies if a legacy console is registered. If legacy consoles are 444 * present, it is necessary to perform the console lock/unlock dance 445 * whenever console flushing should occur. 446 */ 447 bool have_legacy_console; 448 449 /* 450 * Specifies if an nbcon console is registered. If nbcon consoles are present, 451 * synchronous printing of legacy consoles will not occur during panic until 452 * the backtrace has been stored to the ringbuffer. 453 */ 454 bool have_nbcon_console; 455 456 /* 457 * Specifies if a boot console is registered. If boot consoles are present, 458 * nbcon consoles cannot print simultaneously and must be synchronized by 459 * the console lock. This is because boot consoles and nbcon consoles may 460 * have mapped the same hardware. 461 */ 462 bool have_boot_console; 463 464 /* See printk_legacy_allow_panic_sync() for details. */ 465 bool legacy_allow_panic_sync; 466 467 /* Avoid using irq_work when suspending. */ 468 bool console_irqwork_blocked; 469 470 #ifdef CONFIG_PRINTK 471 DECLARE_WAIT_QUEUE_HEAD(log_wait); 472 static DECLARE_WAIT_QUEUE_HEAD(legacy_wait); 473 /* All 3 protected by @syslog_lock. */ 474 /* the next printk record to read by syslog(READ) or /proc/kmsg */ 475 static u64 syslog_seq; 476 static size_t syslog_partial; 477 static bool syslog_time; 478 479 /* True when _all_ printer threads are available for printing. */ 480 bool printk_kthreads_running; 481 482 struct latched_seq { 483 seqcount_latch_t latch; 484 u64 val[2]; 485 }; 486 487 /* 488 * The next printk record to read after the last 'clear' command. There are 489 * two copies (updated with seqcount_latch) so that reads can locklessly 490 * access a valid value. Writers are synchronized by @syslog_lock. 491 */ 492 static struct latched_seq clear_seq = { 493 .latch = SEQCNT_LATCH_ZERO(clear_seq.latch), 494 .val[0] = 0, 495 .val[1] = 0, 496 }; 497 498 #define LOG_LEVEL(v) ((v) & 0x07) 499 #define LOG_FACILITY(v) ((v) >> 3 & 0xff) 500 501 /* record buffer */ 502 #define LOG_ALIGN __alignof__(unsigned long) 503 #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT) 504 #define LOG_BUF_LEN_MAX ((u32)1 << 31) 505 static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN); 506 static char *log_buf = __log_buf; 507 static u32 log_buf_len = __LOG_BUF_LEN; 508 509 /* 510 * Define the average message size. This only affects the number of 511 * descriptors that will be available. Underestimating is better than 512 * overestimating (too many available descriptors is better than not enough). 513 */ 514 #define PRB_AVGBITS 5 /* 32 character average length */ 515 516 #if CONFIG_LOG_BUF_SHIFT <= PRB_AVGBITS 517 #error CONFIG_LOG_BUF_SHIFT value too small. 518 #endif 519 _DEFINE_PRINTKRB(printk_rb_static, CONFIG_LOG_BUF_SHIFT - PRB_AVGBITS, 520 PRB_AVGBITS, &__log_buf[0]); 521 522 static struct printk_ringbuffer printk_rb_dynamic; 523 524 struct printk_ringbuffer *prb = &printk_rb_static; 525 526 /* 527 * We cannot access per-CPU data (e.g. per-CPU flush irq_work) before 528 * per_cpu_areas are initialised. This variable is set to true when 529 * it's safe to access per-CPU data. 530 */ 531 static bool __printk_percpu_data_ready __ro_after_init; 532 533 bool printk_percpu_data_ready(void) 534 { 535 return __printk_percpu_data_ready; 536 } 537 538 /* Must be called under syslog_lock. */ 539 static void latched_seq_write(struct latched_seq *ls, u64 val) 540 { 541 write_seqcount_latch_begin(&ls->latch); 542 ls->val[0] = val; 543 write_seqcount_latch(&ls->latch); 544 ls->val[1] = val; 545 write_seqcount_latch_end(&ls->latch); 546 } 547 548 /* Can be called from any context. */ 549 static u64 latched_seq_read_nolock(struct latched_seq *ls) 550 { 551 unsigned int seq; 552 unsigned int idx; 553 u64 val; 554 555 do { 556 seq = read_seqcount_latch(&ls->latch); 557 idx = seq & 0x1; 558 val = ls->val[idx]; 559 } while (read_seqcount_latch_retry(&ls->latch, seq)); 560 561 return val; 562 } 563 564 /* Return log buffer address */ 565 char *log_buf_addr_get(void) 566 { 567 return log_buf; 568 } 569 570 /* Return log buffer size */ 571 u32 log_buf_len_get(void) 572 { 573 return log_buf_len; 574 } 575 576 /* 577 * Define how much of the log buffer we could take at maximum. The value 578 * must be greater than two. Note that only half of the buffer is available 579 * when the index points to the middle. 580 */ 581 #define MAX_LOG_TAKE_PART 4 582 static const char trunc_msg[] = "<truncated>"; 583 584 static void truncate_msg(u16 *text_len, u16 *trunc_msg_len) 585 { 586 /* 587 * The message should not take the whole buffer. Otherwise, it might 588 * get removed too soon. 589 */ 590 u32 max_text_len = log_buf_len / MAX_LOG_TAKE_PART; 591 592 if (*text_len > max_text_len) 593 *text_len = max_text_len; 594 595 /* enable the warning message (if there is room) */ 596 *trunc_msg_len = strlen(trunc_msg); 597 if (*text_len >= *trunc_msg_len) 598 *text_len -= *trunc_msg_len; 599 else 600 *trunc_msg_len = 0; 601 } 602 603 int dmesg_restrict = IS_ENABLED(CONFIG_SECURITY_DMESG_RESTRICT); 604 605 static int syslog_action_restricted(int type) 606 { 607 if (dmesg_restrict) 608 return 1; 609 /* 610 * Unless restricted, we allow "read all" and "get buffer size" 611 * for everybody. 612 */ 613 return type != SYSLOG_ACTION_READ_ALL && 614 type != SYSLOG_ACTION_SIZE_BUFFER; 615 } 616 617 static int check_syslog_permissions(int type, int source) 618 { 619 /* 620 * If this is from /proc/kmsg and we've already opened it, then we've 621 * already done the capabilities checks at open time. 622 */ 623 if (source == SYSLOG_FROM_PROC && type != SYSLOG_ACTION_OPEN) 624 goto ok; 625 626 if (syslog_action_restricted(type)) { 627 if (capable(CAP_SYSLOG)) 628 goto ok; 629 return -EPERM; 630 } 631 ok: 632 return security_syslog(type); 633 } 634 635 static void append_char(char **pp, char *e, char c) 636 { 637 if (*pp < e) 638 *(*pp)++ = c; 639 } 640 641 static ssize_t info_print_ext_header(char *buf, size_t size, 642 struct printk_info *info) 643 { 644 u64 ts_usec = info->ts_nsec; 645 char caller[20]; 646 #ifdef CONFIG_PRINTK_CALLER 647 u32 id = info->caller_id; 648 649 snprintf(caller, sizeof(caller), ",caller=%c%u", 650 id & 0x80000000 ? 'C' : 'T', id & ~0x80000000); 651 #else 652 caller[0] = '\0'; 653 #endif 654 655 do_div(ts_usec, 1000); 656 657 return scnprintf(buf, size, "%u,%llu,%llu,%c%s;", 658 (info->facility << 3) | info->level, info->seq, 659 ts_usec, info->flags & LOG_CONT ? 'c' : '-', caller); 660 } 661 662 static ssize_t msg_add_ext_text(char *buf, size_t size, 663 const char *text, size_t text_len, 664 unsigned char endc) 665 { 666 char *p = buf, *e = buf + size; 667 size_t i; 668 669 /* escape non-printable characters */ 670 for (i = 0; i < text_len; i++) { 671 unsigned char c = text[i]; 672 673 if (c < ' ' || c >= 127 || c == '\\') 674 p += scnprintf(p, e - p, "\\x%02x", c); 675 else 676 append_char(&p, e, c); 677 } 678 append_char(&p, e, endc); 679 680 return p - buf; 681 } 682 683 static ssize_t msg_add_dict_text(char *buf, size_t size, 684 const char *key, const char *val) 685 { 686 size_t val_len = strlen(val); 687 ssize_t len; 688 689 if (!val_len) 690 return 0; 691 692 len = msg_add_ext_text(buf, size, "", 0, ' '); /* dict prefix */ 693 len += msg_add_ext_text(buf + len, size - len, key, strlen(key), '='); 694 len += msg_add_ext_text(buf + len, size - len, val, val_len, '\n'); 695 696 return len; 697 } 698 699 static ssize_t msg_print_ext_body(char *buf, size_t size, 700 char *text, size_t text_len, 701 struct dev_printk_info *dev_info) 702 { 703 ssize_t len; 704 705 len = msg_add_ext_text(buf, size, text, text_len, '\n'); 706 707 if (!dev_info) 708 goto out; 709 710 len += msg_add_dict_text(buf + len, size - len, "SUBSYSTEM", 711 dev_info->subsystem); 712 len += msg_add_dict_text(buf + len, size - len, "DEVICE", 713 dev_info->device); 714 out: 715 return len; 716 } 717 718 /* /dev/kmsg - userspace message inject/listen interface */ 719 struct devkmsg_user { 720 atomic64_t seq; 721 struct ratelimit_state rs; 722 struct mutex lock; 723 struct printk_buffers pbufs; 724 }; 725 726 static __printf(3, 4) __cold 727 int devkmsg_emit(int facility, int level, const char *fmt, ...) 728 { 729 va_list args; 730 int r; 731 732 va_start(args, fmt); 733 r = vprintk_emit(facility, level, NULL, fmt, args); 734 va_end(args); 735 736 return r; 737 } 738 739 static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from) 740 { 741 char *buf, *line; 742 int level = default_message_loglevel; 743 int facility = 1; /* LOG_USER */ 744 struct file *file = iocb->ki_filp; 745 struct devkmsg_user *user = file->private_data; 746 size_t len = iov_iter_count(from); 747 ssize_t ret = len; 748 749 if (len > PRINTKRB_RECORD_MAX) 750 return -EINVAL; 751 752 /* Ignore when user logging is disabled. */ 753 if (devkmsg_log & DEVKMSG_LOG_MASK_OFF) 754 return len; 755 756 /* Ratelimit when not explicitly enabled. */ 757 if (!(devkmsg_log & DEVKMSG_LOG_MASK_ON)) { 758 if (!___ratelimit(&user->rs, current->comm)) 759 return ret; 760 } 761 762 buf = kmalloc(len+1, GFP_KERNEL); 763 if (buf == NULL) 764 return -ENOMEM; 765 766 buf[len] = '\0'; 767 if (!copy_from_iter_full(buf, len, from)) { 768 kfree(buf); 769 return -EFAULT; 770 } 771 772 /* 773 * Extract and skip the syslog prefix <[0-9]*>. Coming from userspace 774 * the decimal value represents 32bit, the lower 3 bit are the log 775 * level, the rest are the log facility. 776 * 777 * If no prefix or no userspace facility is specified, we 778 * enforce LOG_USER, to be able to reliably distinguish 779 * kernel-generated messages from userspace-injected ones. 780 */ 781 line = buf; 782 if (line[0] == '<') { 783 char *endp = NULL; 784 unsigned int u; 785 786 u = simple_strtoul(line + 1, &endp, 10); 787 if (endp && endp[0] == '>') { 788 level = LOG_LEVEL(u); 789 if (LOG_FACILITY(u) != 0) 790 facility = LOG_FACILITY(u); 791 endp++; 792 line = endp; 793 } 794 } 795 796 devkmsg_emit(facility, level, "%s", line); 797 kfree(buf); 798 return ret; 799 } 800 801 static ssize_t devkmsg_read(struct file *file, char __user *buf, 802 size_t count, loff_t *ppos) 803 { 804 struct devkmsg_user *user = file->private_data; 805 char *outbuf = &user->pbufs.outbuf[0]; 806 struct printk_message pmsg = { 807 .pbufs = &user->pbufs, 808 }; 809 ssize_t ret; 810 811 ret = mutex_lock_interruptible(&user->lock); 812 if (ret) 813 return ret; 814 815 if (!printk_get_next_message(&pmsg, atomic64_read(&user->seq), true, false)) { 816 if (file->f_flags & O_NONBLOCK) { 817 ret = -EAGAIN; 818 goto out; 819 } 820 821 /* 822 * Guarantee this task is visible on the waitqueue before 823 * checking the wake condition. 824 * 825 * The full memory barrier within set_current_state() of 826 * prepare_to_wait_event() pairs with the full memory barrier 827 * within wq_has_sleeper(). 828 * 829 * This pairs with __wake_up_klogd:A. 830 */ 831 ret = wait_event_interruptible(log_wait, 832 printk_get_next_message(&pmsg, atomic64_read(&user->seq), true, 833 false)); /* LMM(devkmsg_read:A) */ 834 if (ret) 835 goto out; 836 } 837 838 if (pmsg.dropped) { 839 /* our last seen message is gone, return error and reset */ 840 atomic64_set(&user->seq, pmsg.seq); 841 ret = -EPIPE; 842 goto out; 843 } 844 845 atomic64_set(&user->seq, pmsg.seq + 1); 846 847 if (pmsg.outbuf_len > count) { 848 ret = -EINVAL; 849 goto out; 850 } 851 852 if (copy_to_user(buf, outbuf, pmsg.outbuf_len)) { 853 ret = -EFAULT; 854 goto out; 855 } 856 ret = pmsg.outbuf_len; 857 out: 858 mutex_unlock(&user->lock); 859 return ret; 860 } 861 862 /* 863 * Be careful when modifying this function!!! 864 * 865 * Only few operations are supported because the device works only with the 866 * entire variable length messages (records). Non-standard values are 867 * returned in the other cases and has been this way for quite some time. 868 * User space applications might depend on this behavior. 869 */ 870 static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence) 871 { 872 struct devkmsg_user *user = file->private_data; 873 loff_t ret = 0; 874 875 if (offset) 876 return -ESPIPE; 877 878 switch (whence) { 879 case SEEK_SET: 880 /* the first record */ 881 atomic64_set(&user->seq, prb_first_valid_seq(prb)); 882 break; 883 case SEEK_DATA: 884 /* 885 * The first record after the last SYSLOG_ACTION_CLEAR, 886 * like issued by 'dmesg -c'. Reading /dev/kmsg itself 887 * changes no global state, and does not clear anything. 888 */ 889 atomic64_set(&user->seq, latched_seq_read_nolock(&clear_seq)); 890 break; 891 case SEEK_END: 892 /* after the last record */ 893 atomic64_set(&user->seq, prb_next_seq(prb)); 894 break; 895 default: 896 ret = -EINVAL; 897 } 898 return ret; 899 } 900 901 static __poll_t devkmsg_poll(struct file *file, poll_table *wait) 902 { 903 struct devkmsg_user *user = file->private_data; 904 struct printk_info info; 905 __poll_t ret = 0; 906 907 poll_wait(file, &log_wait, wait); 908 909 if (prb_read_valid_info(prb, atomic64_read(&user->seq), &info, NULL)) { 910 /* return error when data has vanished underneath us */ 911 if (info.seq != atomic64_read(&user->seq)) 912 ret = EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI; 913 else 914 ret = EPOLLIN|EPOLLRDNORM; 915 } 916 917 return ret; 918 } 919 920 static int devkmsg_open(struct inode *inode, struct file *file) 921 { 922 struct devkmsg_user *user; 923 int err; 924 925 if (devkmsg_log & DEVKMSG_LOG_MASK_OFF) 926 return -EPERM; 927 928 /* write-only does not need any file context */ 929 if ((file->f_flags & O_ACCMODE) != O_WRONLY) { 930 err = check_syslog_permissions(SYSLOG_ACTION_READ_ALL, 931 SYSLOG_FROM_READER); 932 if (err) 933 return err; 934 } 935 936 user = kvmalloc(sizeof(struct devkmsg_user), GFP_KERNEL); 937 if (!user) 938 return -ENOMEM; 939 940 ratelimit_default_init(&user->rs); 941 ratelimit_set_flags(&user->rs, RATELIMIT_MSG_ON_RELEASE); 942 943 mutex_init(&user->lock); 944 945 atomic64_set(&user->seq, prb_first_valid_seq(prb)); 946 947 file->private_data = user; 948 return 0; 949 } 950 951 static int devkmsg_release(struct inode *inode, struct file *file) 952 { 953 struct devkmsg_user *user = file->private_data; 954 955 ratelimit_state_exit(&user->rs); 956 957 mutex_destroy(&user->lock); 958 kvfree(user); 959 return 0; 960 } 961 962 const struct file_operations kmsg_fops = { 963 .open = devkmsg_open, 964 .read = devkmsg_read, 965 .write_iter = devkmsg_write, 966 .llseek = devkmsg_llseek, 967 .poll = devkmsg_poll, 968 .release = devkmsg_release, 969 }; 970 971 #ifdef CONFIG_VMCORE_INFO 972 /* 973 * This appends the listed symbols to /proc/vmcore 974 * 975 * /proc/vmcore is used by various utilities, like crash and makedumpfile to 976 * obtain access to symbols that are otherwise very difficult to locate. These 977 * symbols are specifically used so that utilities can access and extract the 978 * dmesg log from a vmcore file after a crash. 979 */ 980 void log_buf_vmcoreinfo_setup(void) 981 { 982 struct dev_printk_info *dev_info = NULL; 983 984 VMCOREINFO_SYMBOL(prb); 985 VMCOREINFO_SYMBOL(printk_rb_static); 986 VMCOREINFO_SYMBOL(clear_seq); 987 988 /* 989 * Export struct size and field offsets. User space tools can 990 * parse it and detect any changes to structure down the line. 991 */ 992 993 VMCOREINFO_STRUCT_SIZE(printk_ringbuffer); 994 VMCOREINFO_OFFSET(printk_ringbuffer, desc_ring); 995 VMCOREINFO_OFFSET(printk_ringbuffer, text_data_ring); 996 VMCOREINFO_OFFSET(printk_ringbuffer, fail); 997 998 VMCOREINFO_STRUCT_SIZE(prb_desc_ring); 999 VMCOREINFO_OFFSET(prb_desc_ring, count_bits); 1000 VMCOREINFO_OFFSET(prb_desc_ring, descs); 1001 VMCOREINFO_OFFSET(prb_desc_ring, infos); 1002 VMCOREINFO_OFFSET(prb_desc_ring, head_id); 1003 VMCOREINFO_OFFSET(prb_desc_ring, tail_id); 1004 1005 VMCOREINFO_STRUCT_SIZE(prb_desc); 1006 VMCOREINFO_OFFSET(prb_desc, state_var); 1007 VMCOREINFO_OFFSET(prb_desc, text_blk_lpos); 1008 1009 VMCOREINFO_STRUCT_SIZE(prb_data_blk_lpos); 1010 VMCOREINFO_OFFSET(prb_data_blk_lpos, begin); 1011 VMCOREINFO_OFFSET(prb_data_blk_lpos, next); 1012 1013 VMCOREINFO_STRUCT_SIZE(printk_info); 1014 VMCOREINFO_OFFSET(printk_info, seq); 1015 VMCOREINFO_OFFSET(printk_info, ts_nsec); 1016 VMCOREINFO_OFFSET(printk_info, text_len); 1017 VMCOREINFO_OFFSET(printk_info, caller_id); 1018 VMCOREINFO_OFFSET(printk_info, dev_info); 1019 1020 VMCOREINFO_STRUCT_SIZE(dev_printk_info); 1021 VMCOREINFO_OFFSET(dev_printk_info, subsystem); 1022 VMCOREINFO_LENGTH(printk_info_subsystem, sizeof(dev_info->subsystem)); 1023 VMCOREINFO_OFFSET(dev_printk_info, device); 1024 VMCOREINFO_LENGTH(printk_info_device, sizeof(dev_info->device)); 1025 1026 VMCOREINFO_STRUCT_SIZE(prb_data_ring); 1027 VMCOREINFO_OFFSET(prb_data_ring, size_bits); 1028 VMCOREINFO_OFFSET(prb_data_ring, data); 1029 VMCOREINFO_OFFSET(prb_data_ring, head_lpos); 1030 VMCOREINFO_OFFSET(prb_data_ring, tail_lpos); 1031 1032 VMCOREINFO_SIZE(atomic_long_t); 1033 VMCOREINFO_TYPE_OFFSET(atomic_long_t, counter); 1034 1035 VMCOREINFO_STRUCT_SIZE(latched_seq); 1036 VMCOREINFO_OFFSET(latched_seq, val); 1037 } 1038 #endif 1039 1040 /* requested log_buf_len from kernel cmdline */ 1041 static unsigned long __initdata new_log_buf_len; 1042 1043 /* we practice scaling the ring buffer by powers of 2 */ 1044 static void __init log_buf_len_update(u64 size) 1045 { 1046 if (size > (u64)LOG_BUF_LEN_MAX) { 1047 size = (u64)LOG_BUF_LEN_MAX; 1048 pr_err("log_buf over 2G is not supported.\n"); 1049 } 1050 1051 if (size) 1052 size = roundup_pow_of_two(size); 1053 if (size > log_buf_len) 1054 new_log_buf_len = (unsigned long)size; 1055 } 1056 1057 /* save requested log_buf_len since it's too early to process it */ 1058 static int __init log_buf_len_setup(char *str) 1059 { 1060 u64 size; 1061 1062 if (!str) 1063 return -EINVAL; 1064 1065 size = memparse(str, &str); 1066 1067 log_buf_len_update(size); 1068 1069 return 0; 1070 } 1071 early_param("log_buf_len", log_buf_len_setup); 1072 1073 #ifdef CONFIG_SMP 1074 #define __LOG_CPU_MAX_BUF_LEN (1 << CONFIG_LOG_CPU_MAX_BUF_SHIFT) 1075 1076 static void __init log_buf_add_cpu(void) 1077 { 1078 unsigned int cpu_extra; 1079 1080 /* 1081 * archs should set up cpu_possible_bits properly with 1082 * set_cpu_possible() after setup_arch() but just in 1083 * case lets ensure this is valid. 1084 */ 1085 if (num_possible_cpus() == 1) 1086 return; 1087 1088 cpu_extra = (num_possible_cpus() - 1) * __LOG_CPU_MAX_BUF_LEN; 1089 1090 /* by default this will only continue through for large > 64 CPUs */ 1091 if (cpu_extra <= __LOG_BUF_LEN / 2) 1092 return; 1093 1094 pr_info("log_buf_len individual max cpu contribution: %d bytes\n", 1095 __LOG_CPU_MAX_BUF_LEN); 1096 pr_info("log_buf_len total cpu_extra contributions: %d bytes\n", 1097 cpu_extra); 1098 pr_info("log_buf_len min size: %d bytes\n", __LOG_BUF_LEN); 1099 1100 log_buf_len_update(cpu_extra + __LOG_BUF_LEN); 1101 } 1102 #else /* !CONFIG_SMP */ 1103 static inline void log_buf_add_cpu(void) {} 1104 #endif /* CONFIG_SMP */ 1105 1106 static void __init set_percpu_data_ready(void) 1107 { 1108 __printk_percpu_data_ready = true; 1109 } 1110 1111 static unsigned int __init add_to_rb(struct printk_ringbuffer *rb, 1112 struct printk_record *r) 1113 { 1114 struct prb_reserved_entry e; 1115 struct printk_record dest_r; 1116 1117 prb_rec_init_wr(&dest_r, r->info->text_len); 1118 1119 if (!prb_reserve(&e, rb, &dest_r)) 1120 return 0; 1121 1122 memcpy(&dest_r.text_buf[0], &r->text_buf[0], r->info->text_len); 1123 dest_r.info->text_len = r->info->text_len; 1124 dest_r.info->facility = r->info->facility; 1125 dest_r.info->level = r->info->level; 1126 dest_r.info->flags = r->info->flags; 1127 dest_r.info->ts_nsec = r->info->ts_nsec; 1128 dest_r.info->caller_id = r->info->caller_id; 1129 memcpy(&dest_r.info->dev_info, &r->info->dev_info, sizeof(dest_r.info->dev_info)); 1130 1131 prb_final_commit(&e); 1132 1133 return prb_record_text_space(&e); 1134 } 1135 1136 static char setup_text_buf[PRINTKRB_RECORD_MAX] __initdata; 1137 1138 static void print_log_buf_usage_stats(void) 1139 { 1140 unsigned int descs_count = log_buf_len >> PRB_AVGBITS; 1141 size_t meta_data_size; 1142 1143 meta_data_size = descs_count * (sizeof(struct prb_desc) + sizeof(struct printk_info)); 1144 1145 pr_info("log buffer data + meta data: %u + %zu = %zu bytes\n", 1146 log_buf_len, meta_data_size, log_buf_len + meta_data_size); 1147 } 1148 1149 void __init setup_log_buf(int early) 1150 { 1151 struct printk_info *new_infos; 1152 unsigned int new_descs_count; 1153 struct prb_desc *new_descs; 1154 struct printk_info info; 1155 struct printk_record r; 1156 unsigned int text_size; 1157 size_t new_descs_size; 1158 size_t new_infos_size; 1159 unsigned long flags; 1160 char *new_log_buf; 1161 unsigned int free; 1162 u64 seq; 1163 1164 /* 1165 * Some archs call setup_log_buf() multiple times - first is very 1166 * early, e.g. from setup_arch(), and second - when percpu_areas 1167 * are initialised. 1168 */ 1169 if (!early) 1170 set_percpu_data_ready(); 1171 1172 if (log_buf != __log_buf) 1173 return; 1174 1175 if (!early && !new_log_buf_len) 1176 log_buf_add_cpu(); 1177 1178 if (!new_log_buf_len) { 1179 /* Show the memory stats only once. */ 1180 if (!early) 1181 goto out; 1182 1183 return; 1184 } 1185 1186 new_descs_count = new_log_buf_len >> PRB_AVGBITS; 1187 if (new_descs_count == 0) { 1188 pr_err("new_log_buf_len: %lu too small\n", new_log_buf_len); 1189 goto out; 1190 } 1191 1192 new_log_buf = memblock_alloc(new_log_buf_len, LOG_ALIGN); 1193 if (unlikely(!new_log_buf)) { 1194 pr_err("log_buf_len: %lu text bytes not available\n", 1195 new_log_buf_len); 1196 goto out; 1197 } 1198 1199 new_descs_size = new_descs_count * sizeof(struct prb_desc); 1200 new_descs = memblock_alloc(new_descs_size, LOG_ALIGN); 1201 if (unlikely(!new_descs)) { 1202 pr_err("log_buf_len: %zu desc bytes not available\n", 1203 new_descs_size); 1204 goto err_free_log_buf; 1205 } 1206 1207 new_infos_size = new_descs_count * sizeof(struct printk_info); 1208 new_infos = memblock_alloc(new_infos_size, LOG_ALIGN); 1209 if (unlikely(!new_infos)) { 1210 pr_err("log_buf_len: %zu info bytes not available\n", 1211 new_infos_size); 1212 goto err_free_descs; 1213 } 1214 1215 prb_rec_init_rd(&r, &info, &setup_text_buf[0], sizeof(setup_text_buf)); 1216 1217 prb_init(&printk_rb_dynamic, 1218 new_log_buf, ilog2(new_log_buf_len), 1219 new_descs, ilog2(new_descs_count), 1220 new_infos); 1221 1222 local_irq_save(flags); 1223 1224 log_buf_len = new_log_buf_len; 1225 log_buf = new_log_buf; 1226 new_log_buf_len = 0; 1227 1228 free = __LOG_BUF_LEN; 1229 prb_for_each_record(0, &printk_rb_static, seq, &r) { 1230 text_size = add_to_rb(&printk_rb_dynamic, &r); 1231 if (text_size > free) 1232 free = 0; 1233 else 1234 free -= text_size; 1235 } 1236 1237 prb = &printk_rb_dynamic; 1238 1239 local_irq_restore(flags); 1240 1241 /* 1242 * Copy any remaining messages that might have appeared from 1243 * NMI context after copying but before switching to the 1244 * dynamic buffer. 1245 */ 1246 prb_for_each_record(seq, &printk_rb_static, seq, &r) { 1247 text_size = add_to_rb(&printk_rb_dynamic, &r); 1248 if (text_size > free) 1249 free = 0; 1250 else 1251 free -= text_size; 1252 } 1253 1254 if (seq != prb_next_seq(&printk_rb_static)) { 1255 pr_err("dropped %llu messages\n", 1256 prb_next_seq(&printk_rb_static) - seq); 1257 } 1258 1259 print_log_buf_usage_stats(); 1260 pr_info("early log buf free: %u(%u%%)\n", 1261 free, (free * 100) / __LOG_BUF_LEN); 1262 return; 1263 1264 err_free_descs: 1265 memblock_free(new_descs, new_descs_size); 1266 err_free_log_buf: 1267 memblock_free(new_log_buf, new_log_buf_len); 1268 out: 1269 print_log_buf_usage_stats(); 1270 } 1271 1272 static bool __read_mostly ignore_loglevel; 1273 1274 static int __init ignore_loglevel_setup(char *str) 1275 { 1276 ignore_loglevel = true; 1277 pr_info("debug: ignoring loglevel setting.\n"); 1278 1279 return 0; 1280 } 1281 1282 early_param("ignore_loglevel", ignore_loglevel_setup); 1283 module_param(ignore_loglevel, bool, S_IRUGO | S_IWUSR); 1284 MODULE_PARM_DESC(ignore_loglevel, 1285 "ignore loglevel setting (prints all kernel messages to the console)"); 1286 1287 static bool suppress_message_printing(int level) 1288 { 1289 return (level >= console_loglevel && !ignore_loglevel); 1290 } 1291 1292 #ifdef CONFIG_BOOT_PRINTK_DELAY 1293 1294 static int boot_delay; /* msecs delay after each printk during bootup */ 1295 static unsigned long long loops_per_msec; /* based on boot_delay */ 1296 1297 static int __init boot_delay_setup(char *str) 1298 { 1299 unsigned long lpj; 1300 1301 lpj = preset_lpj ? preset_lpj : 1000000; /* some guess */ 1302 loops_per_msec = (unsigned long long)lpj / 1000 * HZ; 1303 1304 get_option(&str, &boot_delay); 1305 if (boot_delay > 10 * 1000) 1306 boot_delay = 0; 1307 1308 pr_debug("boot_delay: %u, preset_lpj: %ld, lpj: %lu, " 1309 "HZ: %d, loops_per_msec: %llu\n", 1310 boot_delay, preset_lpj, lpj, HZ, loops_per_msec); 1311 return 0; 1312 } 1313 early_param("boot_delay", boot_delay_setup); 1314 1315 static void boot_delay_msec(int level) 1316 { 1317 unsigned long long k; 1318 unsigned long timeout; 1319 bool suppress = !is_printk_force_console() && 1320 suppress_message_printing(level); 1321 1322 if ((boot_delay == 0 || system_state >= SYSTEM_RUNNING) || suppress) 1323 return; 1324 1325 k = (unsigned long long)loops_per_msec * boot_delay; 1326 1327 timeout = jiffies + msecs_to_jiffies(boot_delay); 1328 while (k) { 1329 k--; 1330 cpu_relax(); 1331 /* 1332 * use (volatile) jiffies to prevent 1333 * compiler reduction; loop termination via jiffies 1334 * is secondary and may or may not happen. 1335 */ 1336 if (time_after(jiffies, timeout)) 1337 break; 1338 touch_nmi_watchdog(); 1339 } 1340 } 1341 #else 1342 static inline void boot_delay_msec(int level) 1343 { 1344 } 1345 #endif 1346 1347 static bool printk_time = IS_ENABLED(CONFIG_PRINTK_TIME); 1348 module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR); 1349 1350 static size_t print_syslog(unsigned int level, char *buf) 1351 { 1352 return sprintf(buf, "<%u>", level); 1353 } 1354 1355 static size_t print_time(u64 ts, char *buf) 1356 { 1357 unsigned long rem_nsec = do_div(ts, 1000000000); 1358 1359 return sprintf(buf, "[%5lu.%06lu]", 1360 (unsigned long)ts, rem_nsec / 1000); 1361 } 1362 1363 #ifdef CONFIG_PRINTK_CALLER 1364 static size_t print_caller(u32 id, char *buf) 1365 { 1366 char caller[12]; 1367 1368 snprintf(caller, sizeof(caller), "%c%u", 1369 id & 0x80000000 ? 'C' : 'T', id & ~0x80000000); 1370 return sprintf(buf, "[%6s]", caller); 1371 } 1372 #else 1373 #define print_caller(id, buf) 0 1374 #endif 1375 1376 static size_t info_print_prefix(const struct printk_info *info, bool syslog, 1377 bool time, char *buf) 1378 { 1379 size_t len = 0; 1380 1381 if (syslog) 1382 len = print_syslog((info->facility << 3) | info->level, buf); 1383 1384 if (time) 1385 len += print_time(info->ts_nsec, buf + len); 1386 1387 len += print_caller(info->caller_id, buf + len); 1388 1389 if (IS_ENABLED(CONFIG_PRINTK_CALLER) || time) { 1390 buf[len++] = ' '; 1391 buf[len] = '\0'; 1392 } 1393 1394 return len; 1395 } 1396 1397 /* 1398 * Prepare the record for printing. The text is shifted within the given 1399 * buffer to avoid a need for another one. The following operations are 1400 * done: 1401 * 1402 * - Add prefix for each line. 1403 * - Drop truncated lines that no longer fit into the buffer. 1404 * - Add the trailing newline that has been removed in vprintk_store(). 1405 * - Add a string terminator. 1406 * 1407 * Since the produced string is always terminated, the maximum possible 1408 * return value is @r->text_buf_size - 1; 1409 * 1410 * Return: The length of the updated/prepared text, including the added 1411 * prefixes and the newline. The terminator is not counted. The dropped 1412 * line(s) are not counted. 1413 */ 1414 static size_t record_print_text(struct printk_record *r, bool syslog, 1415 bool time) 1416 { 1417 size_t text_len = r->info->text_len; 1418 size_t buf_size = r->text_buf_size; 1419 char *text = r->text_buf; 1420 char prefix[PRINTK_PREFIX_MAX]; 1421 bool truncated = false; 1422 size_t prefix_len; 1423 size_t line_len; 1424 size_t len = 0; 1425 char *next; 1426 1427 /* 1428 * If the message was truncated because the buffer was not large 1429 * enough, treat the available text as if it were the full text. 1430 */ 1431 if (text_len > buf_size) 1432 text_len = buf_size; 1433 1434 prefix_len = info_print_prefix(r->info, syslog, time, prefix); 1435 1436 /* 1437 * @text_len: bytes of unprocessed text 1438 * @line_len: bytes of current line _without_ newline 1439 * @text: pointer to beginning of current line 1440 * @len: number of bytes prepared in r->text_buf 1441 */ 1442 for (;;) { 1443 next = memchr(text, '\n', text_len); 1444 if (next) { 1445 line_len = next - text; 1446 } else { 1447 /* Drop truncated line(s). */ 1448 if (truncated) 1449 break; 1450 line_len = text_len; 1451 } 1452 1453 /* 1454 * Truncate the text if there is not enough space to add the 1455 * prefix and a trailing newline and a terminator. 1456 */ 1457 if (len + prefix_len + text_len + 1 + 1 > buf_size) { 1458 /* Drop even the current line if no space. */ 1459 if (len + prefix_len + line_len + 1 + 1 > buf_size) 1460 break; 1461 1462 text_len = buf_size - len - prefix_len - 1 - 1; 1463 truncated = true; 1464 } 1465 1466 memmove(text + prefix_len, text, text_len); 1467 memcpy(text, prefix, prefix_len); 1468 1469 /* 1470 * Increment the prepared length to include the text and 1471 * prefix that were just moved+copied. Also increment for the 1472 * newline at the end of this line. If this is the last line, 1473 * there is no newline, but it will be added immediately below. 1474 */ 1475 len += prefix_len + line_len + 1; 1476 if (text_len == line_len) { 1477 /* 1478 * This is the last line. Add the trailing newline 1479 * removed in vprintk_store(). 1480 */ 1481 text[prefix_len + line_len] = '\n'; 1482 break; 1483 } 1484 1485 /* 1486 * Advance beyond the added prefix and the related line with 1487 * its newline. 1488 */ 1489 text += prefix_len + line_len + 1; 1490 1491 /* 1492 * The remaining text has only decreased by the line with its 1493 * newline. 1494 * 1495 * Note that @text_len can become zero. It happens when @text 1496 * ended with a newline (either due to truncation or the 1497 * original string ending with "\n\n"). The loop is correctly 1498 * repeated and (if not truncated) an empty line with a prefix 1499 * will be prepared. 1500 */ 1501 text_len -= line_len + 1; 1502 } 1503 1504 /* 1505 * If a buffer was provided, it will be terminated. Space for the 1506 * string terminator is guaranteed to be available. The terminator is 1507 * not counted in the return value. 1508 */ 1509 if (buf_size > 0) 1510 r->text_buf[len] = 0; 1511 1512 return len; 1513 } 1514 1515 static size_t get_record_print_text_size(struct printk_info *info, 1516 unsigned int line_count, 1517 bool syslog, bool time) 1518 { 1519 char prefix[PRINTK_PREFIX_MAX]; 1520 size_t prefix_len; 1521 1522 prefix_len = info_print_prefix(info, syslog, time, prefix); 1523 1524 /* 1525 * Each line will be preceded with a prefix. The intermediate 1526 * newlines are already within the text, but a final trailing 1527 * newline will be added. 1528 */ 1529 return ((prefix_len * line_count) + info->text_len + 1); 1530 } 1531 1532 /* 1533 * Beginning with @start_seq, find the first record where it and all following 1534 * records up to (but not including) @max_seq fit into @size. 1535 * 1536 * @max_seq is simply an upper bound and does not need to exist. If the caller 1537 * does not require an upper bound, -1 can be used for @max_seq. 1538 */ 1539 static u64 find_first_fitting_seq(u64 start_seq, u64 max_seq, size_t size, 1540 bool syslog, bool time) 1541 { 1542 struct printk_info info; 1543 unsigned int line_count; 1544 size_t len = 0; 1545 u64 seq; 1546 1547 /* Determine the size of the records up to @max_seq. */ 1548 prb_for_each_info(start_seq, prb, seq, &info, &line_count) { 1549 if (info.seq >= max_seq) 1550 break; 1551 len += get_record_print_text_size(&info, line_count, syslog, time); 1552 } 1553 1554 /* 1555 * Adjust the upper bound for the next loop to avoid subtracting 1556 * lengths that were never added. 1557 */ 1558 if (seq < max_seq) 1559 max_seq = seq; 1560 1561 /* 1562 * Move first record forward until length fits into the buffer. Ignore 1563 * newest messages that were not counted in the above cycle. Messages 1564 * might appear and get lost in the meantime. This is a best effort 1565 * that prevents an infinite loop that could occur with a retry. 1566 */ 1567 prb_for_each_info(start_seq, prb, seq, &info, &line_count) { 1568 if (len <= size || info.seq >= max_seq) 1569 break; 1570 len -= get_record_print_text_size(&info, line_count, syslog, time); 1571 } 1572 1573 return seq; 1574 } 1575 1576 /* The caller is responsible for making sure @size is greater than 0. */ 1577 static int syslog_print(char __user *buf, int size) 1578 { 1579 struct printk_info info; 1580 struct printk_record r; 1581 char *text; 1582 int len = 0; 1583 u64 seq; 1584 1585 text = kmalloc(PRINTK_MESSAGE_MAX, GFP_KERNEL); 1586 if (!text) 1587 return -ENOMEM; 1588 1589 prb_rec_init_rd(&r, &info, text, PRINTK_MESSAGE_MAX); 1590 1591 mutex_lock(&syslog_lock); 1592 1593 /* 1594 * Wait for the @syslog_seq record to be available. @syslog_seq may 1595 * change while waiting. 1596 */ 1597 do { 1598 seq = syslog_seq; 1599 1600 mutex_unlock(&syslog_lock); 1601 /* 1602 * Guarantee this task is visible on the waitqueue before 1603 * checking the wake condition. 1604 * 1605 * The full memory barrier within set_current_state() of 1606 * prepare_to_wait_event() pairs with the full memory barrier 1607 * within wq_has_sleeper(). 1608 * 1609 * This pairs with __wake_up_klogd:A. 1610 */ 1611 len = wait_event_interruptible(log_wait, 1612 prb_read_valid(prb, seq, NULL)); /* LMM(syslog_print:A) */ 1613 mutex_lock(&syslog_lock); 1614 1615 if (len) 1616 goto out; 1617 } while (syslog_seq != seq); 1618 1619 /* 1620 * Copy records that fit into the buffer. The above cycle makes sure 1621 * that the first record is always available. 1622 */ 1623 do { 1624 size_t n; 1625 size_t skip; 1626 int err; 1627 1628 if (!prb_read_valid(prb, syslog_seq, &r)) 1629 break; 1630 1631 if (r.info->seq != syslog_seq) { 1632 /* message is gone, move to next valid one */ 1633 syslog_seq = r.info->seq; 1634 syslog_partial = 0; 1635 } 1636 1637 /* 1638 * To keep reading/counting partial line consistent, 1639 * use printk_time value as of the beginning of a line. 1640 */ 1641 if (!syslog_partial) 1642 syslog_time = printk_time; 1643 1644 skip = syslog_partial; 1645 n = record_print_text(&r, true, syslog_time); 1646 if (n - syslog_partial <= size) { 1647 /* message fits into buffer, move forward */ 1648 syslog_seq = r.info->seq + 1; 1649 n -= syslog_partial; 1650 syslog_partial = 0; 1651 } else if (!len){ 1652 /* partial read(), remember position */ 1653 n = size; 1654 syslog_partial += n; 1655 } else 1656 n = 0; 1657 1658 if (!n) 1659 break; 1660 1661 mutex_unlock(&syslog_lock); 1662 err = copy_to_user(buf, text + skip, n); 1663 mutex_lock(&syslog_lock); 1664 1665 if (err) { 1666 if (!len) 1667 len = -EFAULT; 1668 break; 1669 } 1670 1671 len += n; 1672 size -= n; 1673 buf += n; 1674 } while (size); 1675 out: 1676 mutex_unlock(&syslog_lock); 1677 kfree(text); 1678 return len; 1679 } 1680 1681 static int syslog_print_all(char __user *buf, int size, bool clear) 1682 { 1683 struct printk_info info; 1684 struct printk_record r; 1685 char *text; 1686 int len = 0; 1687 u64 seq; 1688 bool time; 1689 1690 text = kmalloc(PRINTK_MESSAGE_MAX, GFP_KERNEL); 1691 if (!text) 1692 return -ENOMEM; 1693 1694 time = printk_time; 1695 /* 1696 * Find first record that fits, including all following records, 1697 * into the user-provided buffer for this dump. 1698 */ 1699 seq = find_first_fitting_seq(latched_seq_read_nolock(&clear_seq), -1, 1700 size, true, time); 1701 1702 prb_rec_init_rd(&r, &info, text, PRINTK_MESSAGE_MAX); 1703 1704 prb_for_each_record(seq, prb, seq, &r) { 1705 int textlen; 1706 1707 textlen = record_print_text(&r, true, time); 1708 1709 if (len + textlen > size) { 1710 seq--; 1711 break; 1712 } 1713 1714 if (copy_to_user(buf + len, text, textlen)) 1715 len = -EFAULT; 1716 else 1717 len += textlen; 1718 1719 if (len < 0) 1720 break; 1721 } 1722 1723 if (clear) { 1724 mutex_lock(&syslog_lock); 1725 latched_seq_write(&clear_seq, seq); 1726 mutex_unlock(&syslog_lock); 1727 } 1728 1729 kfree(text); 1730 return len; 1731 } 1732 1733 static void syslog_clear(void) 1734 { 1735 mutex_lock(&syslog_lock); 1736 latched_seq_write(&clear_seq, prb_next_seq(prb)); 1737 mutex_unlock(&syslog_lock); 1738 } 1739 1740 int do_syslog(int type, char __user *buf, int len, int source) 1741 { 1742 struct printk_info info; 1743 bool clear = false; 1744 static int saved_console_loglevel = LOGLEVEL_DEFAULT; 1745 int error; 1746 1747 error = check_syslog_permissions(type, source); 1748 if (error) 1749 return error; 1750 1751 switch (type) { 1752 case SYSLOG_ACTION_CLOSE: /* Close log */ 1753 break; 1754 case SYSLOG_ACTION_OPEN: /* Open log */ 1755 break; 1756 case SYSLOG_ACTION_READ: /* Read from log */ 1757 if (!buf || len < 0) 1758 return -EINVAL; 1759 if (!len) 1760 return 0; 1761 if (!access_ok(buf, len)) 1762 return -EFAULT; 1763 error = syslog_print(buf, len); 1764 break; 1765 /* Read/clear last kernel messages */ 1766 case SYSLOG_ACTION_READ_CLEAR: 1767 clear = true; 1768 fallthrough; 1769 /* Read last kernel messages */ 1770 case SYSLOG_ACTION_READ_ALL: 1771 if (!buf || len < 0) 1772 return -EINVAL; 1773 if (!len) 1774 return 0; 1775 if (!access_ok(buf, len)) 1776 return -EFAULT; 1777 error = syslog_print_all(buf, len, clear); 1778 break; 1779 /* Clear ring buffer */ 1780 case SYSLOG_ACTION_CLEAR: 1781 syslog_clear(); 1782 break; 1783 /* Disable logging to console */ 1784 case SYSLOG_ACTION_CONSOLE_OFF: 1785 if (saved_console_loglevel == LOGLEVEL_DEFAULT) 1786 saved_console_loglevel = console_loglevel; 1787 console_loglevel = minimum_console_loglevel; 1788 break; 1789 /* Enable logging to console */ 1790 case SYSLOG_ACTION_CONSOLE_ON: 1791 if (saved_console_loglevel != LOGLEVEL_DEFAULT) { 1792 console_loglevel = saved_console_loglevel; 1793 saved_console_loglevel = LOGLEVEL_DEFAULT; 1794 } 1795 break; 1796 /* Set level of messages printed to console */ 1797 case SYSLOG_ACTION_CONSOLE_LEVEL: 1798 if (len < 1 || len > 8) 1799 return -EINVAL; 1800 if (len < minimum_console_loglevel) 1801 len = minimum_console_loglevel; 1802 console_loglevel = len; 1803 /* Implicitly re-enable logging to console */ 1804 saved_console_loglevel = LOGLEVEL_DEFAULT; 1805 break; 1806 /* Number of chars in the log buffer */ 1807 case SYSLOG_ACTION_SIZE_UNREAD: 1808 mutex_lock(&syslog_lock); 1809 if (!prb_read_valid_info(prb, syslog_seq, &info, NULL)) { 1810 /* No unread messages. */ 1811 mutex_unlock(&syslog_lock); 1812 return 0; 1813 } 1814 if (info.seq != syslog_seq) { 1815 /* messages are gone, move to first one */ 1816 syslog_seq = info.seq; 1817 syslog_partial = 0; 1818 } 1819 if (source == SYSLOG_FROM_PROC) { 1820 /* 1821 * Short-cut for poll(/"proc/kmsg") which simply checks 1822 * for pending data, not the size; return the count of 1823 * records, not the length. 1824 */ 1825 error = prb_next_seq(prb) - syslog_seq; 1826 } else { 1827 bool time = syslog_partial ? syslog_time : printk_time; 1828 unsigned int line_count; 1829 u64 seq; 1830 1831 prb_for_each_info(syslog_seq, prb, seq, &info, 1832 &line_count) { 1833 error += get_record_print_text_size(&info, line_count, 1834 true, time); 1835 time = printk_time; 1836 } 1837 error -= syslog_partial; 1838 } 1839 mutex_unlock(&syslog_lock); 1840 break; 1841 /* Size of the log buffer */ 1842 case SYSLOG_ACTION_SIZE_BUFFER: 1843 error = log_buf_len; 1844 break; 1845 default: 1846 error = -EINVAL; 1847 break; 1848 } 1849 1850 return error; 1851 } 1852 1853 SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len) 1854 { 1855 return do_syslog(type, buf, len, SYSLOG_FROM_READER); 1856 } 1857 1858 /* 1859 * Special console_lock variants that help to reduce the risk of soft-lockups. 1860 * They allow to pass console_lock to another printk() call using a busy wait. 1861 */ 1862 1863 #ifdef CONFIG_LOCKDEP 1864 static struct lockdep_map console_owner_dep_map = { 1865 .name = "console_owner" 1866 }; 1867 #endif 1868 1869 static DEFINE_RAW_SPINLOCK(console_owner_lock); 1870 static struct task_struct *console_owner; 1871 static bool console_waiter; 1872 1873 /** 1874 * console_lock_spinning_enable - mark beginning of code where another 1875 * thread might safely busy wait 1876 * 1877 * This basically converts console_lock into a spinlock. This marks 1878 * the section where the console_lock owner can not sleep, because 1879 * there may be a waiter spinning (like a spinlock). Also it must be 1880 * ready to hand over the lock at the end of the section. 1881 */ 1882 void console_lock_spinning_enable(void) 1883 { 1884 /* 1885 * Do not use spinning in panic(). The panic CPU wants to keep the lock. 1886 * Non-panic CPUs abandon the flush anyway. 1887 * 1888 * Just keep the lockdep annotation. The panic-CPU should avoid 1889 * taking console_owner_lock because it might cause a deadlock. 1890 * This looks like the easiest way how to prevent false lockdep 1891 * reports without handling races a lockless way. 1892 */ 1893 if (panic_in_progress()) 1894 goto lockdep; 1895 1896 raw_spin_lock(&console_owner_lock); 1897 console_owner = current; 1898 raw_spin_unlock(&console_owner_lock); 1899 1900 lockdep: 1901 /* The waiter may spin on us after setting console_owner */ 1902 spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_); 1903 } 1904 1905 /** 1906 * console_lock_spinning_disable_and_check - mark end of code where another 1907 * thread was able to busy wait and check if there is a waiter 1908 * @cookie: cookie returned from console_srcu_read_lock() 1909 * 1910 * This is called at the end of the section where spinning is allowed. 1911 * It has two functions. First, it is a signal that it is no longer 1912 * safe to start busy waiting for the lock. Second, it checks if 1913 * there is a busy waiter and passes the lock rights to her. 1914 * 1915 * Important: Callers lose both the console_lock and the SRCU read lock if 1916 * there was a busy waiter. They must not touch items synchronized by 1917 * console_lock or SRCU read lock in this case. 1918 * 1919 * Return: 1 if the lock rights were passed, 0 otherwise. 1920 */ 1921 int console_lock_spinning_disable_and_check(int cookie) 1922 { 1923 int waiter; 1924 1925 /* 1926 * Ignore spinning waiters during panic() because they might get stopped 1927 * or blocked at any time, 1928 * 1929 * It is safe because nobody is allowed to start spinning during panic 1930 * in the first place. If there has been a waiter then non panic CPUs 1931 * might stay spinning. They would get stopped anyway. The panic context 1932 * will never start spinning and an interrupted spin on panic CPU will 1933 * never continue. 1934 */ 1935 if (panic_in_progress()) { 1936 /* Keep lockdep happy. */ 1937 spin_release(&console_owner_dep_map, _THIS_IP_); 1938 return 0; 1939 } 1940 1941 raw_spin_lock(&console_owner_lock); 1942 waiter = READ_ONCE(console_waiter); 1943 console_owner = NULL; 1944 raw_spin_unlock(&console_owner_lock); 1945 1946 if (!waiter) { 1947 spin_release(&console_owner_dep_map, _THIS_IP_); 1948 return 0; 1949 } 1950 1951 /* The waiter is now free to continue */ 1952 WRITE_ONCE(console_waiter, false); 1953 1954 spin_release(&console_owner_dep_map, _THIS_IP_); 1955 1956 /* 1957 * Preserve lockdep lock ordering. Release the SRCU read lock before 1958 * releasing the console_lock. 1959 */ 1960 console_srcu_read_unlock(cookie); 1961 1962 /* 1963 * Hand off console_lock to waiter. The waiter will perform 1964 * the up(). After this, the waiter is the console_lock owner. 1965 */ 1966 mutex_release(&console_lock_dep_map, _THIS_IP_); 1967 return 1; 1968 } 1969 1970 /** 1971 * console_trylock_spinning - try to get console_lock by busy waiting 1972 * 1973 * This allows to busy wait for the console_lock when the current 1974 * owner is running in specially marked sections. It means that 1975 * the current owner is running and cannot reschedule until it 1976 * is ready to lose the lock. 1977 * 1978 * Return: 1 if we got the lock, 0 othrewise 1979 */ 1980 static int console_trylock_spinning(void) 1981 { 1982 struct task_struct *owner = NULL; 1983 bool waiter; 1984 bool spin = false; 1985 unsigned long flags; 1986 1987 if (console_trylock()) 1988 return 1; 1989 1990 /* 1991 * It's unsafe to spin once a panic has begun. If we are the 1992 * panic CPU, we may have already halted the owner of the 1993 * console_sem. If we are not the panic CPU, then we should 1994 * avoid taking console_sem, so the panic CPU has a better 1995 * chance of cleanly acquiring it later. 1996 */ 1997 if (panic_in_progress()) 1998 return 0; 1999 2000 printk_safe_enter_irqsave(flags); 2001 2002 raw_spin_lock(&console_owner_lock); 2003 owner = READ_ONCE(console_owner); 2004 waiter = READ_ONCE(console_waiter); 2005 if (!waiter && owner && owner != current) { 2006 WRITE_ONCE(console_waiter, true); 2007 spin = true; 2008 } 2009 raw_spin_unlock(&console_owner_lock); 2010 2011 /* 2012 * If there is an active printk() writing to the 2013 * consoles, instead of having it write our data too, 2014 * see if we can offload that load from the active 2015 * printer, and do some printing ourselves. 2016 * Go into a spin only if there isn't already a waiter 2017 * spinning, and there is an active printer, and 2018 * that active printer isn't us (recursive printk?). 2019 */ 2020 if (!spin) { 2021 printk_safe_exit_irqrestore(flags); 2022 return 0; 2023 } 2024 2025 /* We spin waiting for the owner to release us */ 2026 spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_); 2027 /* Owner will clear console_waiter on hand off */ 2028 while (READ_ONCE(console_waiter)) 2029 cpu_relax(); 2030 spin_release(&console_owner_dep_map, _THIS_IP_); 2031 2032 printk_safe_exit_irqrestore(flags); 2033 /* 2034 * The owner passed the console lock to us. 2035 * Since we did not spin on console lock, annotate 2036 * this as a trylock. Otherwise lockdep will 2037 * complain. 2038 */ 2039 mutex_acquire(&console_lock_dep_map, 0, 1, _THIS_IP_); 2040 2041 /* 2042 * Update @console_may_schedule for trylock because the previous 2043 * owner may have been schedulable. 2044 */ 2045 console_may_schedule = 0; 2046 2047 return 1; 2048 } 2049 2050 /* 2051 * Recursion is tracked separately on each CPU. If NMIs are supported, an 2052 * additional NMI context per CPU is also separately tracked. Until per-CPU 2053 * is available, a separate "early tracking" is performed. 2054 */ 2055 static DEFINE_PER_CPU(u8, printk_count); 2056 static u8 printk_count_early; 2057 #ifdef CONFIG_HAVE_NMI 2058 static DEFINE_PER_CPU(u8, printk_count_nmi); 2059 static u8 printk_count_nmi_early; 2060 #endif 2061 2062 /* 2063 * Recursion is limited to keep the output sane. printk() should not require 2064 * more than 1 level of recursion (allowing, for example, printk() to trigger 2065 * a WARN), but a higher value is used in case some printk-internal errors 2066 * exist, such as the ringbuffer validation checks failing. 2067 */ 2068 #define PRINTK_MAX_RECURSION 3 2069 2070 /* 2071 * Return a pointer to the dedicated counter for the CPU+context of the 2072 * caller. 2073 */ 2074 static u8 *__printk_recursion_counter(void) 2075 { 2076 #ifdef CONFIG_HAVE_NMI 2077 if (in_nmi()) { 2078 if (printk_percpu_data_ready()) 2079 return this_cpu_ptr(&printk_count_nmi); 2080 return &printk_count_nmi_early; 2081 } 2082 #endif 2083 if (printk_percpu_data_ready()) 2084 return this_cpu_ptr(&printk_count); 2085 return &printk_count_early; 2086 } 2087 2088 /* 2089 * Enter recursion tracking. Interrupts are disabled to simplify tracking. 2090 * The caller must check the boolean return value to see if the recursion is 2091 * allowed. On failure, interrupts are not disabled. 2092 * 2093 * @recursion_ptr must be a variable of type (u8 *) and is the same variable 2094 * that is passed to printk_exit_irqrestore(). 2095 */ 2096 #define printk_enter_irqsave(recursion_ptr, flags) \ 2097 ({ \ 2098 bool success = true; \ 2099 \ 2100 typecheck(u8 *, recursion_ptr); \ 2101 local_irq_save(flags); \ 2102 (recursion_ptr) = __printk_recursion_counter(); \ 2103 if (*(recursion_ptr) > PRINTK_MAX_RECURSION) { \ 2104 local_irq_restore(flags); \ 2105 success = false; \ 2106 } else { \ 2107 (*(recursion_ptr))++; \ 2108 } \ 2109 success; \ 2110 }) 2111 2112 /* Exit recursion tracking, restoring interrupts. */ 2113 #define printk_exit_irqrestore(recursion_ptr, flags) \ 2114 do { \ 2115 typecheck(u8 *, recursion_ptr); \ 2116 (*(recursion_ptr))--; \ 2117 local_irq_restore(flags); \ 2118 } while (0) 2119 2120 int printk_delay_msec __read_mostly; 2121 2122 static inline void printk_delay(int level) 2123 { 2124 boot_delay_msec(level); 2125 2126 if (unlikely(printk_delay_msec)) { 2127 int m = printk_delay_msec; 2128 2129 while (m--) { 2130 mdelay(1); 2131 touch_nmi_watchdog(); 2132 } 2133 } 2134 } 2135 2136 #define CALLER_ID_MASK 0x80000000 2137 2138 static inline u32 printk_caller_id(void) 2139 { 2140 return in_task() ? task_pid_nr(current) : 2141 CALLER_ID_MASK + smp_processor_id(); 2142 } 2143 2144 #ifdef CONFIG_PRINTK_EXECUTION_CTX 2145 /* Store the opposite info than caller_id. */ 2146 static u32 printk_caller_id2(void) 2147 { 2148 return !in_task() ? task_pid_nr(current) : 2149 CALLER_ID_MASK + smp_processor_id(); 2150 } 2151 2152 static pid_t printk_info_get_pid(const struct printk_info *info) 2153 { 2154 u32 caller_id = info->caller_id; 2155 u32 caller_id2 = info->caller_id2; 2156 2157 return caller_id & CALLER_ID_MASK ? caller_id2 : caller_id; 2158 } 2159 2160 static int printk_info_get_cpu(const struct printk_info *info) 2161 { 2162 u32 caller_id = info->caller_id; 2163 u32 caller_id2 = info->caller_id2; 2164 2165 return ((caller_id & CALLER_ID_MASK ? 2166 caller_id : caller_id2) & ~CALLER_ID_MASK); 2167 } 2168 #endif 2169 2170 /** 2171 * printk_parse_prefix - Parse level and control flags. 2172 * 2173 * @text: The terminated text message. 2174 * @level: A pointer to the current level value, will be updated. 2175 * @flags: A pointer to the current printk_info flags, will be updated. 2176 * 2177 * @level may be NULL if the caller is not interested in the parsed value. 2178 * Otherwise the variable pointed to by @level must be set to 2179 * LOGLEVEL_DEFAULT in order to be updated with the parsed value. 2180 * 2181 * @flags may be NULL if the caller is not interested in the parsed value. 2182 * Otherwise the variable pointed to by @flags will be OR'd with the parsed 2183 * value. 2184 * 2185 * Return: The length of the parsed level and control flags. 2186 */ 2187 u16 printk_parse_prefix(const char *text, int *level, 2188 enum printk_info_flags *flags) 2189 { 2190 u16 prefix_len = 0; 2191 int kern_level; 2192 2193 while (*text) { 2194 kern_level = printk_get_level(text); 2195 if (!kern_level) 2196 break; 2197 2198 switch (kern_level) { 2199 case '0' ... '7': 2200 if (level && *level == LOGLEVEL_DEFAULT) 2201 *level = kern_level - '0'; 2202 break; 2203 case 'c': /* KERN_CONT */ 2204 if (flags) 2205 *flags |= LOG_CONT; 2206 } 2207 2208 prefix_len += 2; 2209 text += 2; 2210 } 2211 2212 return prefix_len; 2213 } 2214 2215 __printf(5, 0) 2216 static u16 printk_sprint(char *text, u16 size, int facility, 2217 enum printk_info_flags *flags, const char *fmt, 2218 va_list args) 2219 { 2220 u16 text_len; 2221 2222 text_len = vscnprintf(text, size, fmt, args); 2223 2224 /* Mark and strip a trailing newline. */ 2225 if (text_len && text[text_len - 1] == '\n') { 2226 text_len--; 2227 *flags |= LOG_NEWLINE; 2228 } 2229 2230 /* Strip log level and control flags. */ 2231 if (facility == 0) { 2232 u16 prefix_len; 2233 2234 prefix_len = printk_parse_prefix(text, NULL, NULL); 2235 if (prefix_len) { 2236 text_len -= prefix_len; 2237 memmove(text, text + prefix_len, text_len); 2238 } 2239 } 2240 2241 trace_console(text, text_len); 2242 2243 return text_len; 2244 } 2245 2246 #ifdef CONFIG_PRINTK_EXECUTION_CTX 2247 static void printk_store_execution_ctx(struct printk_info *info) 2248 { 2249 info->caller_id2 = printk_caller_id2(); 2250 get_task_comm(info->comm, current); 2251 } 2252 2253 static void pmsg_load_execution_ctx(struct printk_message *pmsg, 2254 const struct printk_info *info) 2255 { 2256 pmsg->cpu = printk_info_get_cpu(info); 2257 pmsg->pid = printk_info_get_pid(info); 2258 memcpy(pmsg->comm, info->comm, sizeof(pmsg->comm)); 2259 static_assert(sizeof(pmsg->comm) == sizeof(info->comm)); 2260 } 2261 #else 2262 static void printk_store_execution_ctx(struct printk_info *info) {} 2263 2264 static void pmsg_load_execution_ctx(struct printk_message *pmsg, 2265 const struct printk_info *info) {} 2266 #endif 2267 2268 __printf(4, 0) 2269 int vprintk_store(int facility, int level, 2270 const struct dev_printk_info *dev_info, 2271 const char *fmt, va_list args) 2272 { 2273 struct prb_reserved_entry e; 2274 enum printk_info_flags flags = 0; 2275 struct printk_record r; 2276 unsigned long irqflags; 2277 u16 trunc_msg_len = 0; 2278 char prefix_buf[8]; 2279 u8 *recursion_ptr; 2280 u16 reserve_size; 2281 va_list args2; 2282 u32 caller_id; 2283 u16 text_len; 2284 int ret = 0; 2285 u64 ts_nsec; 2286 2287 if (!printk_enter_irqsave(recursion_ptr, irqflags)) 2288 return 0; 2289 2290 /* 2291 * Since the duration of printk() can vary depending on the message 2292 * and state of the ringbuffer, grab the timestamp now so that it is 2293 * close to the call of printk(). This provides a more deterministic 2294 * timestamp with respect to the caller. 2295 */ 2296 ts_nsec = local_clock(); 2297 2298 caller_id = printk_caller_id(); 2299 2300 /* 2301 * The sprintf needs to come first since the syslog prefix might be 2302 * passed in as a parameter. An extra byte must be reserved so that 2303 * later the vscnprintf() into the reserved buffer has room for the 2304 * terminating '\0', which is not counted by vsnprintf(). 2305 */ 2306 va_copy(args2, args); 2307 reserve_size = vsnprintf(&prefix_buf[0], sizeof(prefix_buf), fmt, args2) + 1; 2308 va_end(args2); 2309 2310 if (reserve_size > PRINTKRB_RECORD_MAX) 2311 reserve_size = PRINTKRB_RECORD_MAX; 2312 2313 /* Extract log level or control flags. */ 2314 if (facility == 0) 2315 printk_parse_prefix(&prefix_buf[0], &level, &flags); 2316 2317 if (level == LOGLEVEL_DEFAULT) 2318 level = default_message_loglevel; 2319 2320 if (dev_info) 2321 flags |= LOG_NEWLINE; 2322 2323 if (is_printk_force_console()) 2324 flags |= LOG_FORCE_CON; 2325 2326 if (flags & LOG_CONT) { 2327 prb_rec_init_wr(&r, reserve_size); 2328 if (prb_reserve_in_last(&e, prb, &r, caller_id, PRINTKRB_RECORD_MAX)) { 2329 text_len = printk_sprint(&r.text_buf[r.info->text_len], reserve_size, 2330 facility, &flags, fmt, args); 2331 r.info->text_len += text_len; 2332 2333 if (flags & LOG_FORCE_CON) 2334 r.info->flags |= LOG_FORCE_CON; 2335 2336 if (flags & LOG_NEWLINE) { 2337 r.info->flags |= LOG_NEWLINE; 2338 prb_final_commit(&e); 2339 } else { 2340 prb_commit(&e); 2341 } 2342 2343 ret = text_len; 2344 goto out; 2345 } 2346 } 2347 2348 /* 2349 * Explicitly initialize the record before every prb_reserve() call. 2350 * prb_reserve_in_last() and prb_reserve() purposely invalidate the 2351 * structure when they fail. 2352 */ 2353 prb_rec_init_wr(&r, reserve_size); 2354 if (!prb_reserve(&e, prb, &r)) { 2355 /* truncate the message if it is too long for empty buffer */ 2356 truncate_msg(&reserve_size, &trunc_msg_len); 2357 2358 prb_rec_init_wr(&r, reserve_size + trunc_msg_len); 2359 if (!prb_reserve(&e, prb, &r)) 2360 goto out; 2361 } 2362 2363 /* fill message */ 2364 text_len = printk_sprint(&r.text_buf[0], reserve_size, facility, &flags, fmt, args); 2365 if (trunc_msg_len) 2366 memcpy(&r.text_buf[text_len], trunc_msg, trunc_msg_len); 2367 r.info->text_len = text_len + trunc_msg_len; 2368 r.info->facility = facility; 2369 r.info->level = level & 7; 2370 r.info->flags = flags & 0x1f; 2371 r.info->ts_nsec = ts_nsec; 2372 r.info->caller_id = caller_id; 2373 if (dev_info) 2374 memcpy(&r.info->dev_info, dev_info, sizeof(r.info->dev_info)); 2375 printk_store_execution_ctx(r.info); 2376 2377 /* A message without a trailing newline can be continued. */ 2378 if (!(flags & LOG_NEWLINE)) 2379 prb_commit(&e); 2380 else 2381 prb_final_commit(&e); 2382 2383 ret = text_len + trunc_msg_len; 2384 out: 2385 printk_exit_irqrestore(recursion_ptr, irqflags); 2386 return ret; 2387 } 2388 2389 /* 2390 * This acts as a one-way switch to allow legacy consoles to print from 2391 * the printk() caller context on a panic CPU. It also attempts to flush 2392 * the legacy consoles in this context. 2393 */ 2394 void printk_legacy_allow_panic_sync(void) 2395 { 2396 struct console_flush_type ft; 2397 2398 legacy_allow_panic_sync = true; 2399 2400 printk_get_console_flush_type(&ft); 2401 if (ft.legacy_direct) { 2402 if (console_trylock()) 2403 console_unlock(); 2404 } 2405 } 2406 2407 bool __read_mostly debug_non_panic_cpus; 2408 2409 #ifdef CONFIG_PRINTK_CALLER 2410 static int __init debug_non_panic_cpus_setup(char *str) 2411 { 2412 debug_non_panic_cpus = true; 2413 pr_info("allow messages from non-panic CPUs in panic()\n"); 2414 2415 return 0; 2416 } 2417 early_param("debug_non_panic_cpus", debug_non_panic_cpus_setup); 2418 module_param(debug_non_panic_cpus, bool, 0644); 2419 MODULE_PARM_DESC(debug_non_panic_cpus, 2420 "allow messages from non-panic CPUs in panic()"); 2421 #endif 2422 2423 asmlinkage int vprintk_emit(int facility, int level, 2424 const struct dev_printk_info *dev_info, 2425 const char *fmt, va_list args) 2426 { 2427 struct console_flush_type ft; 2428 int printed_len; 2429 2430 /* Suppress unimportant messages after panic happens */ 2431 if (unlikely(suppress_printk)) 2432 return 0; 2433 2434 /* 2435 * The messages on the panic CPU are the most important. If 2436 * non-panic CPUs are generating any messages, they will be 2437 * silently dropped. 2438 */ 2439 if (panic_on_other_cpu() && 2440 !debug_non_panic_cpus && 2441 !panic_triggering_all_cpu_backtrace) 2442 return 0; 2443 2444 printk_get_console_flush_type(&ft); 2445 2446 /* If called from the scheduler, we can not call up(). */ 2447 if (level == LOGLEVEL_SCHED) { 2448 level = LOGLEVEL_DEFAULT; 2449 ft.legacy_offload |= ft.legacy_direct && !console_irqwork_blocked; 2450 ft.legacy_direct = false; 2451 } 2452 2453 printk_delay(level); 2454 2455 printed_len = vprintk_store(facility, level, dev_info, fmt, args); 2456 2457 if (ft.nbcon_atomic) 2458 nbcon_atomic_flush_pending(); 2459 2460 if (ft.nbcon_offload) 2461 nbcon_kthreads_wake(); 2462 2463 if (ft.legacy_direct) { 2464 /* 2465 * The caller may be holding system-critical or 2466 * timing-sensitive locks. Disable preemption during 2467 * printing of all remaining records to all consoles so that 2468 * this context can return as soon as possible. Hopefully 2469 * another printk() caller will take over the printing. 2470 */ 2471 preempt_disable(); 2472 /* 2473 * Try to acquire and then immediately release the console 2474 * semaphore. The release will print out buffers. With the 2475 * spinning variant, this context tries to take over the 2476 * printing from another printing context. 2477 */ 2478 if (console_trylock_spinning()) 2479 console_unlock(); 2480 preempt_enable(); 2481 } 2482 2483 if (ft.legacy_offload) 2484 defer_console_output(); 2485 else if (!console_irqwork_blocked) 2486 wake_up_klogd(); 2487 2488 return printed_len; 2489 } 2490 EXPORT_SYMBOL(vprintk_emit); 2491 2492 int vprintk_default(const char *fmt, va_list args) 2493 { 2494 return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, fmt, args); 2495 } 2496 EXPORT_SYMBOL_GPL(vprintk_default); 2497 2498 asmlinkage __visible int _printk(const char *fmt, ...) 2499 { 2500 va_list args; 2501 int r; 2502 2503 va_start(args, fmt); 2504 r = vprintk(fmt, args); 2505 va_end(args); 2506 2507 return r; 2508 } 2509 EXPORT_SYMBOL(_printk); 2510 2511 static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress); 2512 2513 #else /* CONFIG_PRINTK */ 2514 2515 #define printk_time false 2516 2517 #define prb_read_valid(rb, seq, r) false 2518 #define prb_first_valid_seq(rb) 0 2519 #define prb_next_seq(rb) 0 2520 2521 static u64 syslog_seq; 2522 2523 static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; } 2524 2525 #endif /* CONFIG_PRINTK */ 2526 2527 #ifdef CONFIG_EARLY_PRINTK 2528 struct console *early_console; 2529 2530 asmlinkage __visible void early_printk(const char *fmt, ...) 2531 { 2532 va_list ap; 2533 char buf[512]; 2534 int n; 2535 2536 if (!early_console) 2537 return; 2538 2539 va_start(ap, fmt); 2540 n = vscnprintf(buf, sizeof(buf), fmt, ap); 2541 va_end(ap); 2542 2543 early_console->write(early_console, buf, n); 2544 } 2545 #endif 2546 2547 static void set_user_specified(struct console_cmdline *c, bool user_specified) 2548 { 2549 if (!user_specified) 2550 return; 2551 2552 /* 2553 * @c console was defined by the user on the command line. 2554 * Do not clear when added twice also by SPCR or the device tree. 2555 */ 2556 c->user_specified = true; 2557 /* At least one console defined by the user on the command line. */ 2558 console_set_on_cmdline = 1; 2559 } 2560 2561 static int __add_preferred_console(const char *name, const short idx, 2562 const char *devname, char *options, 2563 char *brl_options, bool user_specified) 2564 { 2565 struct console_cmdline *c; 2566 int i; 2567 2568 if (!name && !devname) 2569 return -EINVAL; 2570 2571 /* 2572 * We use a signed short index for struct console for device drivers to 2573 * indicate a not yet assigned index or port. However, a negative index 2574 * value is not valid when the console name and index are defined on 2575 * the command line. 2576 */ 2577 if (name && idx < 0) 2578 return -EINVAL; 2579 2580 /* 2581 * See if this tty is not yet registered, and 2582 * if we have a slot free. 2583 */ 2584 for (i = 0, c = console_cmdline; 2585 i < MAX_CMDLINECONSOLES && (c->name[0] || c->devname[0]); 2586 i++, c++) { 2587 if ((name && strcmp(c->name, name) == 0 && c->index == idx) || 2588 (devname && strcmp(c->devname, devname) == 0)) { 2589 if (!brl_options) 2590 preferred_console = i; 2591 set_user_specified(c, user_specified); 2592 return 0; 2593 } 2594 } 2595 if (i == MAX_CMDLINECONSOLES) 2596 return -E2BIG; 2597 if (!brl_options) 2598 preferred_console = i; 2599 if (name) 2600 strscpy(c->name, name); 2601 if (devname) 2602 strscpy(c->devname, devname); 2603 c->options = options; 2604 set_user_specified(c, user_specified); 2605 braille_set_options(c, brl_options); 2606 2607 c->index = idx; 2608 return 0; 2609 } 2610 2611 static int __init console_msg_format_setup(char *str) 2612 { 2613 if (!strcmp(str, "syslog")) 2614 console_msg_format = MSG_FORMAT_SYSLOG; 2615 if (!strcmp(str, "default")) 2616 console_msg_format = MSG_FORMAT_DEFAULT; 2617 return 1; 2618 } 2619 __setup("console_msg_format=", console_msg_format_setup); 2620 2621 /* 2622 * Set up a console. Called via do_early_param() in init/main.c 2623 * for each "console=" parameter in the boot command line. 2624 */ 2625 static int __init console_setup(char *str) 2626 { 2627 static_assert(sizeof(console_cmdline[0].devname) >= sizeof(console_cmdline[0].name) + 4); 2628 char buf[sizeof(console_cmdline[0].devname)]; 2629 char *brl_options = NULL; 2630 char *ttyname = NULL; 2631 char *devname = NULL; 2632 char *options; 2633 char *s; 2634 int idx; 2635 2636 /* 2637 * console="" or console=null have been suggested as a way to 2638 * disable console output. Use ttynull that has been created 2639 * for exactly this purpose. 2640 */ 2641 if (str[0] == 0 || strcmp(str, "null") == 0) { 2642 __add_preferred_console("ttynull", 0, NULL, NULL, NULL, true); 2643 return 1; 2644 } 2645 2646 if (_braille_console_setup(&str, &brl_options)) 2647 return 1; 2648 2649 /* For a DEVNAME:0.0 style console the character device is unknown early */ 2650 if (strchr(str, ':')) 2651 devname = buf; 2652 else 2653 ttyname = buf; 2654 2655 /* 2656 * Decode str into name, index, options. 2657 */ 2658 if (ttyname && isdigit(str[0])) 2659 scnprintf(buf, sizeof(buf), "ttyS%s", str); 2660 else 2661 strscpy(buf, str); 2662 2663 options = strchr(str, ','); 2664 if (options) 2665 *(options++) = 0; 2666 2667 #ifdef __sparc__ 2668 if (!strcmp(str, "ttya")) 2669 strscpy(buf, "ttyS0"); 2670 if (!strcmp(str, "ttyb")) 2671 strscpy(buf, "ttyS1"); 2672 #endif 2673 2674 for (s = buf; *s; s++) 2675 if ((ttyname && isdigit(*s)) || *s == ',') 2676 break; 2677 2678 /* @idx will get defined when devname matches. */ 2679 if (devname) 2680 idx = -1; 2681 else 2682 idx = simple_strtoul(s, NULL, 10); 2683 2684 *s = 0; 2685 2686 __add_preferred_console(ttyname, idx, devname, options, brl_options, true); 2687 return 1; 2688 } 2689 __setup("console=", console_setup); 2690 2691 /** 2692 * add_preferred_console - add a device to the list of preferred consoles. 2693 * @name: device name 2694 * @idx: device index 2695 * @options: options for this console 2696 * 2697 * The last preferred console added will be used for kernel messages 2698 * and stdin/out/err for init. Normally this is used by console_setup 2699 * above to handle user-supplied console arguments; however it can also 2700 * be used by arch-specific code either to override the user or more 2701 * commonly to provide a default console (ie from PROM variables) when 2702 * the user has not supplied one. 2703 */ 2704 int add_preferred_console(const char *name, const short idx, char *options) 2705 { 2706 return __add_preferred_console(name, idx, NULL, options, NULL, false); 2707 } 2708 2709 /** 2710 * match_devname_and_update_preferred_console - Update a preferred console 2711 * when matching devname is found. 2712 * @devname: DEVNAME:0.0 style device name 2713 * @name: Name of the corresponding console driver, e.g. "ttyS" 2714 * @idx: Console index, e.g. port number. 2715 * 2716 * The function checks whether a device with the given @devname is 2717 * preferred via the console=DEVNAME:0.0 command line option. 2718 * It fills the missing console driver name and console index 2719 * so that a later register_console() call could find (match) 2720 * and enable this device. 2721 * 2722 * It might be used when a driver subsystem initializes particular 2723 * devices with already known DEVNAME:0.0 style names. And it 2724 * could predict which console driver name and index this device 2725 * would later get associated with. 2726 * 2727 * Return: 0 on success, negative error code on failure. 2728 */ 2729 int match_devname_and_update_preferred_console(const char *devname, 2730 const char *name, 2731 const short idx) 2732 { 2733 struct console_cmdline *c = console_cmdline; 2734 int i; 2735 2736 if (!devname || !strlen(devname) || !name || !strlen(name) || idx < 0) 2737 return -EINVAL; 2738 2739 for (i = 0; i < MAX_CMDLINECONSOLES && (c->name[0] || c->devname[0]); 2740 i++, c++) { 2741 if (!strcmp(devname, c->devname)) { 2742 pr_info("associate the preferred console \"%s\" with \"%s%d\"\n", 2743 devname, name, idx); 2744 strscpy(c->name, name); 2745 c->index = idx; 2746 return 0; 2747 } 2748 } 2749 2750 return -ENOENT; 2751 } 2752 EXPORT_SYMBOL_GPL(match_devname_and_update_preferred_console); 2753 2754 bool console_suspend_enabled = true; 2755 EXPORT_SYMBOL(console_suspend_enabled); 2756 2757 static int __init console_suspend_disable(char *str) 2758 { 2759 console_suspend_enabled = false; 2760 return 1; 2761 } 2762 __setup("no_console_suspend", console_suspend_disable); 2763 module_param_named(console_suspend, console_suspend_enabled, 2764 bool, S_IRUGO | S_IWUSR); 2765 MODULE_PARM_DESC(console_suspend, "suspend console during suspend" 2766 " and hibernate operations"); 2767 2768 static bool printk_console_no_auto_verbose; 2769 2770 void console_verbose(void) 2771 { 2772 if (console_loglevel && !printk_console_no_auto_verbose) 2773 console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH; 2774 } 2775 EXPORT_SYMBOL_GPL(console_verbose); 2776 2777 module_param_named(console_no_auto_verbose, printk_console_no_auto_verbose, bool, 0644); 2778 MODULE_PARM_DESC(console_no_auto_verbose, "Disable console loglevel raise to highest on oops/panic/etc"); 2779 2780 /** 2781 * console_suspend_all - suspend the console subsystem 2782 * 2783 * This disables printk() while we go into suspend states 2784 */ 2785 void console_suspend_all(void) 2786 { 2787 struct console *con; 2788 2789 if (console_suspend_enabled) 2790 pr_info("Suspending console(s) (use no_console_suspend to debug)\n"); 2791 2792 /* 2793 * Flush any console backlog and then avoid queueing irq_work until 2794 * console_resume_all(). Until then deferred printing is no longer 2795 * triggered, NBCON consoles transition to atomic flushing, and 2796 * any klogd waiters are not triggered. 2797 */ 2798 pr_flush(1000, true); 2799 console_irqwork_blocked = true; 2800 2801 if (!console_suspend_enabled) 2802 return; 2803 2804 console_list_lock(); 2805 for_each_console(con) 2806 console_srcu_write_flags(con, con->flags | CON_SUSPENDED); 2807 console_list_unlock(); 2808 2809 /* 2810 * Ensure that all SRCU list walks have completed. All printing 2811 * contexts must be able to see that they are suspended so that it 2812 * is guaranteed that all printing has stopped when this function 2813 * completes. 2814 */ 2815 synchronize_srcu(&console_srcu); 2816 } 2817 2818 void console_resume_all(void) 2819 { 2820 struct console_flush_type ft; 2821 struct console *con; 2822 2823 /* 2824 * Allow queueing irq_work. After restoring console state, deferred 2825 * printing and any klogd waiters need to be triggered in case there 2826 * is now a console backlog. 2827 */ 2828 console_irqwork_blocked = false; 2829 2830 if (console_suspend_enabled) { 2831 console_list_lock(); 2832 for_each_console(con) 2833 console_srcu_write_flags(con, con->flags & ~CON_SUSPENDED); 2834 console_list_unlock(); 2835 2836 /* 2837 * Ensure that all SRCU list walks have completed. All printing 2838 * contexts must be able to see they are no longer suspended so 2839 * that they are guaranteed to wake up and resume printing. 2840 */ 2841 synchronize_srcu(&console_srcu); 2842 } 2843 2844 printk_get_console_flush_type(&ft); 2845 if (ft.nbcon_offload) 2846 nbcon_kthreads_wake(); 2847 if (ft.legacy_offload) 2848 defer_console_output(); 2849 else 2850 wake_up_klogd(); 2851 2852 pr_flush(1000, true); 2853 } 2854 2855 /** 2856 * console_cpu_notify - print deferred console messages after CPU hotplug 2857 * @cpu: unused 2858 * 2859 * If printk() is called from a CPU that is not online yet, the messages 2860 * will be printed on the console only if there are CON_ANYTIME consoles. 2861 * This function is called when a new CPU comes online (or fails to come 2862 * up) or goes offline. 2863 */ 2864 static int console_cpu_notify(unsigned int cpu) 2865 { 2866 struct console_flush_type ft; 2867 2868 if (!cpuhp_tasks_frozen) { 2869 printk_get_console_flush_type(&ft); 2870 if (ft.nbcon_atomic) 2871 nbcon_atomic_flush_pending(); 2872 if (ft.legacy_direct) { 2873 if (console_trylock()) 2874 console_unlock(); 2875 } 2876 } 2877 return 0; 2878 } 2879 2880 /** 2881 * console_lock - block the console subsystem from printing 2882 * 2883 * Acquires a lock which guarantees that no consoles will 2884 * be in or enter their write() callback. 2885 * 2886 * Can sleep, returns nothing. 2887 */ 2888 void console_lock(void) 2889 { 2890 might_sleep(); 2891 2892 /* On panic, the console_lock must be left to the panic cpu. */ 2893 while (panic_on_other_cpu()) 2894 msleep(1000); 2895 2896 down_console_sem(); 2897 console_locked = 1; 2898 console_may_schedule = 1; 2899 } 2900 EXPORT_SYMBOL(console_lock); 2901 2902 /** 2903 * console_trylock - try to block the console subsystem from printing 2904 * 2905 * Try to acquire a lock which guarantees that no consoles will 2906 * be in or enter their write() callback. 2907 * 2908 * returns 1 on success, and 0 on failure to acquire the lock. 2909 */ 2910 int console_trylock(void) 2911 { 2912 /* On panic, the console_lock must be left to the panic cpu. */ 2913 if (panic_on_other_cpu()) 2914 return 0; 2915 if (down_trylock_console_sem()) 2916 return 0; 2917 console_locked = 1; 2918 console_may_schedule = 0; 2919 return 1; 2920 } 2921 EXPORT_SYMBOL(console_trylock); 2922 2923 int is_console_locked(void) 2924 { 2925 return console_locked; 2926 } 2927 EXPORT_SYMBOL(is_console_locked); 2928 2929 static void __console_unlock(void) 2930 { 2931 console_locked = 0; 2932 up_console_sem(); 2933 } 2934 2935 #ifdef CONFIG_PRINTK 2936 2937 /* 2938 * Prepend the message in @pmsg->pbufs->outbuf. This is achieved by shifting 2939 * the existing message over and inserting the scratchbuf message. 2940 * 2941 * @pmsg is the original printk message. 2942 * @fmt is the printf format of the message which will prepend the existing one. 2943 * 2944 * If there is not enough space in @pmsg->pbufs->outbuf, the existing 2945 * message text will be sufficiently truncated. 2946 * 2947 * If @pmsg->pbufs->outbuf is modified, @pmsg->outbuf_len is updated. 2948 */ 2949 __printf(2, 3) 2950 static void console_prepend_message(struct printk_message *pmsg, const char *fmt, ...) 2951 { 2952 struct printk_buffers *pbufs = pmsg->pbufs; 2953 const size_t scratchbuf_sz = sizeof(pbufs->scratchbuf); 2954 const size_t outbuf_sz = sizeof(pbufs->outbuf); 2955 char *scratchbuf = &pbufs->scratchbuf[0]; 2956 char *outbuf = &pbufs->outbuf[0]; 2957 va_list args; 2958 size_t len; 2959 2960 va_start(args, fmt); 2961 len = vscnprintf(scratchbuf, scratchbuf_sz, fmt, args); 2962 va_end(args); 2963 2964 /* 2965 * Make sure outbuf is sufficiently large before prepending. 2966 * Keep at least the prefix when the message must be truncated. 2967 * It is a rather theoretical problem when someone tries to 2968 * use a minimalist buffer. 2969 */ 2970 if (WARN_ON_ONCE(len + PRINTK_PREFIX_MAX >= outbuf_sz)) 2971 return; 2972 2973 if (pmsg->outbuf_len + len >= outbuf_sz) { 2974 /* Truncate the message, but keep it terminated. */ 2975 pmsg->outbuf_len = outbuf_sz - (len + 1); 2976 outbuf[pmsg->outbuf_len] = 0; 2977 } 2978 2979 memmove(outbuf + len, outbuf, pmsg->outbuf_len + 1); 2980 memcpy(outbuf, scratchbuf, len); 2981 pmsg->outbuf_len += len; 2982 } 2983 2984 /* 2985 * Prepend the message in @pmsg->pbufs->outbuf with a "dropped message". 2986 * @pmsg->outbuf_len is updated appropriately. 2987 * 2988 * @pmsg is the printk message to prepend. 2989 * 2990 * @dropped is the dropped count to report in the dropped message. 2991 */ 2992 void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped) 2993 { 2994 console_prepend_message(pmsg, "** %lu printk messages dropped **\n", dropped); 2995 } 2996 2997 /* 2998 * Prepend the message in @pmsg->pbufs->outbuf with a "replay message". 2999 * @pmsg->outbuf_len is updated appropriately. 3000 * 3001 * @pmsg is the printk message to prepend. 3002 */ 3003 void console_prepend_replay(struct printk_message *pmsg) 3004 { 3005 console_prepend_message(pmsg, "** replaying previous printk message **\n"); 3006 } 3007 3008 /* 3009 * Read and format the specified record (or a later record if the specified 3010 * record is not available). 3011 * 3012 * @pmsg will contain the formatted result. @pmsg->pbufs must point to a 3013 * struct printk_buffers. 3014 * 3015 * @seq is the record to read and format. If it is not available, the next 3016 * valid record is read. 3017 * 3018 * @is_extended specifies if the message should be formatted for extended 3019 * console output. 3020 * 3021 * @may_supress specifies if records may be skipped based on loglevel. 3022 * 3023 * Returns false if no record is available. Otherwise true and all fields 3024 * of @pmsg are valid. (See the documentation of struct printk_message 3025 * for information about the @pmsg fields.) 3026 */ 3027 bool printk_get_next_message(struct printk_message *pmsg, u64 seq, 3028 bool is_extended, bool may_suppress) 3029 { 3030 struct printk_buffers *pbufs = pmsg->pbufs; 3031 const size_t scratchbuf_sz = sizeof(pbufs->scratchbuf); 3032 const size_t outbuf_sz = sizeof(pbufs->outbuf); 3033 char *scratchbuf = &pbufs->scratchbuf[0]; 3034 char *outbuf = &pbufs->outbuf[0]; 3035 struct printk_info info; 3036 struct printk_record r; 3037 size_t len = 0; 3038 bool force_con; 3039 3040 /* 3041 * Formatting extended messages requires a separate buffer, so use the 3042 * scratch buffer to read in the ringbuffer text. 3043 * 3044 * Formatting normal messages is done in-place, so read the ringbuffer 3045 * text directly into the output buffer. 3046 */ 3047 if (is_extended) 3048 prb_rec_init_rd(&r, &info, scratchbuf, scratchbuf_sz); 3049 else 3050 prb_rec_init_rd(&r, &info, outbuf, outbuf_sz); 3051 3052 if (!prb_read_valid(prb, seq, &r)) 3053 return false; 3054 3055 pmsg->seq = r.info->seq; 3056 pmsg->dropped = r.info->seq - seq; 3057 force_con = r.info->flags & LOG_FORCE_CON; 3058 pmsg_load_execution_ctx(pmsg, r.info); 3059 3060 /* 3061 * Skip records that are not forced to be printed on consoles and that 3062 * has level above the console loglevel. 3063 */ 3064 if (!force_con && may_suppress && suppress_message_printing(r.info->level)) 3065 goto out; 3066 3067 if (is_extended) { 3068 len = info_print_ext_header(outbuf, outbuf_sz, r.info); 3069 len += msg_print_ext_body(outbuf + len, outbuf_sz - len, 3070 &r.text_buf[0], r.info->text_len, &r.info->dev_info); 3071 } else { 3072 len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time); 3073 } 3074 out: 3075 pmsg->outbuf_len = len; 3076 return true; 3077 } 3078 3079 /* 3080 * The legacy console always acquires a spinlock_t from its printing 3081 * callback. This violates lock nesting if the caller acquired an always 3082 * spinning lock (raw_spinlock_t) while invoking printk(). This is not a 3083 * problem on PREEMPT_RT because legacy consoles print always from a 3084 * dedicated thread and never from within printk(). Therefore we tell 3085 * lockdep that a sleeping spin lock (spinlock_t) is valid here. 3086 */ 3087 #ifdef CONFIG_PREEMPT_RT 3088 static inline void printk_legacy_allow_spinlock_enter(void) { } 3089 static inline void printk_legacy_allow_spinlock_exit(void) { } 3090 #else 3091 static DEFINE_WAIT_OVERRIDE_MAP(printk_legacy_map, LD_WAIT_CONFIG); 3092 3093 static inline void printk_legacy_allow_spinlock_enter(void) 3094 { 3095 lock_map_acquire_try(&printk_legacy_map); 3096 } 3097 3098 static inline void printk_legacy_allow_spinlock_exit(void) 3099 { 3100 lock_map_release(&printk_legacy_map); 3101 } 3102 #endif /* CONFIG_PREEMPT_RT */ 3103 3104 /* 3105 * Used as the printk buffers for non-panic, serialized console printing. 3106 * This is for legacy (!CON_NBCON) as well as all boot (CON_BOOT) consoles. 3107 * Its usage requires the console_lock held. 3108 */ 3109 struct printk_buffers printk_shared_pbufs; 3110 3111 /* 3112 * Print one record for the given console. The record printed is whatever 3113 * record is the next available record for the given console. 3114 * 3115 * @handover will be set to true if a printk waiter has taken over the 3116 * console_lock, in which case the caller is no longer holding both the 3117 * console_lock and the SRCU read lock. Otherwise it is set to false. 3118 * 3119 * @cookie is the cookie from the SRCU read lock. 3120 * 3121 * Returns false if the given console has no next record to print, otherwise 3122 * true. 3123 * 3124 * Requires the console_lock and the SRCU read lock. 3125 */ 3126 static bool console_emit_next_record(struct console *con, bool *handover, int cookie) 3127 { 3128 bool is_extended = console_srcu_read_flags(con) & CON_EXTENDED; 3129 char *outbuf = &printk_shared_pbufs.outbuf[0]; 3130 struct printk_message pmsg = { 3131 .pbufs = &printk_shared_pbufs, 3132 }; 3133 unsigned long flags; 3134 3135 *handover = false; 3136 3137 if (!printk_get_next_message(&pmsg, con->seq, is_extended, true)) 3138 return false; 3139 3140 con->dropped += pmsg.dropped; 3141 3142 /* Skip messages of formatted length 0. */ 3143 if (pmsg.outbuf_len == 0) { 3144 con->seq = pmsg.seq + 1; 3145 goto skip; 3146 } 3147 3148 if (con->dropped && !is_extended) { 3149 console_prepend_dropped(&pmsg, con->dropped); 3150 con->dropped = 0; 3151 } 3152 3153 /* Write everything out to the hardware. */ 3154 3155 if (force_legacy_kthread() && !panic_in_progress()) { 3156 /* 3157 * With forced threading this function is in a task context 3158 * (either legacy kthread or get_init_console_seq()). There 3159 * is no need for concern about printk reentrance, handovers, 3160 * or lockdep complaints. 3161 */ 3162 3163 con->write(con, outbuf, pmsg.outbuf_len); 3164 con->seq = pmsg.seq + 1; 3165 } else { 3166 /* 3167 * While actively printing out messages, if another printk() 3168 * were to occur on another CPU, it may wait for this one to 3169 * finish. This task can not be preempted if there is a 3170 * waiter waiting to take over. 3171 * 3172 * Interrupts are disabled because the hand over to a waiter 3173 * must not be interrupted until the hand over is completed 3174 * (@console_waiter is cleared). 3175 */ 3176 printk_safe_enter_irqsave(flags); 3177 console_lock_spinning_enable(); 3178 3179 /* Do not trace print latency. */ 3180 stop_critical_timings(); 3181 3182 printk_legacy_allow_spinlock_enter(); 3183 con->write(con, outbuf, pmsg.outbuf_len); 3184 printk_legacy_allow_spinlock_exit(); 3185 3186 start_critical_timings(); 3187 3188 con->seq = pmsg.seq + 1; 3189 3190 *handover = console_lock_spinning_disable_and_check(cookie); 3191 printk_safe_exit_irqrestore(flags); 3192 } 3193 skip: 3194 return true; 3195 } 3196 3197 #else 3198 3199 static bool console_emit_next_record(struct console *con, bool *handover, int cookie) 3200 { 3201 *handover = false; 3202 return false; 3203 } 3204 3205 static inline void printk_kthreads_check_locked(void) { } 3206 3207 #endif /* CONFIG_PRINTK */ 3208 3209 3210 /* 3211 * Print out one record for each console. 3212 * 3213 * @do_cond_resched is set by the caller. It can be true only in schedulable 3214 * context. 3215 * 3216 * @next_seq is set to the sequence number after the last available record. 3217 * The value is valid only when all usable consoles were flushed. It is 3218 * when the function returns true (can do the job) and @try_again parameter 3219 * is set to false, see below. 3220 * 3221 * @handover will be set to true if a printk waiter has taken over the 3222 * console_lock, in which case the caller is no longer holding the 3223 * console_lock. Otherwise it is set to false. 3224 * 3225 * @try_again will be set to true when it still makes sense to call this 3226 * function again. The function could do the job, see the return value. 3227 * And some consoles still make progress. 3228 * 3229 * Returns true when the function could do the job. Some consoles are usable, 3230 * and there was no takeover and no panic_on_other_cpu(). 3231 * 3232 * Requires the console_lock. 3233 */ 3234 static bool console_flush_one_record(bool do_cond_resched, u64 *next_seq, bool *handover, 3235 bool *try_again) 3236 { 3237 struct console_flush_type ft; 3238 bool any_usable = false; 3239 struct console *con; 3240 int cookie; 3241 3242 *try_again = false; 3243 3244 printk_get_console_flush_type(&ft); 3245 3246 cookie = console_srcu_read_lock(); 3247 for_each_console_srcu(con) { 3248 short flags = console_srcu_read_flags(con); 3249 u64 printk_seq; 3250 bool progress; 3251 3252 /* 3253 * console_flush_one_record() is only responsible for 3254 * nbcon consoles when the nbcon consoles cannot print via 3255 * their atomic or threaded flushing. 3256 */ 3257 if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload)) 3258 continue; 3259 3260 if (!console_is_usable(con, flags, !do_cond_resched)) 3261 continue; 3262 any_usable = true; 3263 3264 if (flags & CON_NBCON) { 3265 progress = nbcon_legacy_emit_next_record(con, handover, cookie, 3266 !do_cond_resched); 3267 printk_seq = nbcon_seq_read(con); 3268 } else { 3269 progress = console_emit_next_record(con, handover, cookie); 3270 printk_seq = con->seq; 3271 } 3272 3273 /* 3274 * If a handover has occurred, the SRCU read lock 3275 * is already released. 3276 */ 3277 if (*handover) 3278 goto fail; 3279 3280 /* Track the next of the highest seq flushed. */ 3281 if (printk_seq > *next_seq) 3282 *next_seq = printk_seq; 3283 3284 if (!progress) 3285 continue; 3286 3287 /* 3288 * An usable console made a progress. There might still be 3289 * pending messages. 3290 */ 3291 *try_again = true; 3292 3293 /* Allow panic_cpu to take over the consoles safely. */ 3294 if (panic_on_other_cpu()) 3295 goto fail_srcu; 3296 3297 if (do_cond_resched) 3298 cond_resched(); 3299 } 3300 console_srcu_read_unlock(cookie); 3301 3302 return any_usable; 3303 3304 fail_srcu: 3305 console_srcu_read_unlock(cookie); 3306 fail: 3307 *try_again = false; 3308 return false; 3309 } 3310 3311 /* 3312 * Print out all remaining records to all consoles. 3313 * 3314 * @do_cond_resched is set by the caller. It can be true only in schedulable 3315 * context. 3316 * 3317 * @next_seq is set to the sequence number after the last available record. 3318 * The value is valid only when this function returns true. It means that all 3319 * usable consoles are completely flushed. 3320 * 3321 * @handover will be set to true if a printk waiter has taken over the 3322 * console_lock, in which case the caller is no longer holding the 3323 * console_lock. Otherwise it is set to false. 3324 * 3325 * Returns true when there was at least one usable console and all messages 3326 * were flushed to all usable consoles. A returned false informs the caller 3327 * that everything was not flushed (either there were no usable consoles or 3328 * another context has taken over printing or it is a panic situation and this 3329 * is not the panic CPU). Regardless the reason, the caller should assume it 3330 * is not useful to immediately try again. 3331 * 3332 * Requires the console_lock. 3333 */ 3334 static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handover) 3335 { 3336 bool try_again; 3337 bool ret; 3338 3339 *next_seq = 0; 3340 *handover = false; 3341 3342 do { 3343 ret = console_flush_one_record(do_cond_resched, next_seq, 3344 handover, &try_again); 3345 } while (try_again); 3346 3347 return ret; 3348 } 3349 3350 static void __console_flush_and_unlock(void) 3351 { 3352 bool do_cond_resched; 3353 bool handover; 3354 bool flushed; 3355 u64 next_seq; 3356 3357 /* 3358 * Console drivers are called with interrupts disabled, so 3359 * @console_may_schedule should be cleared before; however, we may 3360 * end up dumping a lot of lines, for example, if called from 3361 * console registration path, and should invoke cond_resched() 3362 * between lines if allowable. Not doing so can cause a very long 3363 * scheduling stall on a slow console leading to RCU stall and 3364 * softlockup warnings which exacerbate the issue with more 3365 * messages practically incapacitating the system. Therefore, create 3366 * a local to use for the printing loop. 3367 */ 3368 do_cond_resched = console_may_schedule; 3369 3370 do { 3371 console_may_schedule = 0; 3372 3373 flushed = console_flush_all(do_cond_resched, &next_seq, &handover); 3374 if (!handover) 3375 __console_unlock(); 3376 3377 /* 3378 * Abort if there was a failure to flush all messages to all 3379 * usable consoles. Either it is not possible to flush (in 3380 * which case it would be an infinite loop of retrying) or 3381 * another context has taken over printing. 3382 */ 3383 if (!flushed) 3384 break; 3385 3386 /* 3387 * Some context may have added new records after 3388 * console_flush_all() but before unlocking the console. 3389 * Re-check if there is a new record to flush. If the trylock 3390 * fails, another context is already handling the printing. 3391 */ 3392 } while (prb_read_valid(prb, next_seq, NULL) && console_trylock()); 3393 } 3394 3395 /** 3396 * console_unlock - unblock the legacy console subsystem from printing 3397 * 3398 * Releases the console_lock which the caller holds to block printing of 3399 * the legacy console subsystem. 3400 * 3401 * While the console_lock was held, console output may have been buffered 3402 * by printk(). If this is the case, console_unlock() emits the output on 3403 * legacy consoles prior to releasing the lock. 3404 * 3405 * console_unlock(); may be called from any context. 3406 */ 3407 void console_unlock(void) 3408 { 3409 struct console_flush_type ft; 3410 3411 printk_get_console_flush_type(&ft); 3412 if (ft.legacy_direct) 3413 __console_flush_and_unlock(); 3414 else 3415 __console_unlock(); 3416 } 3417 EXPORT_SYMBOL(console_unlock); 3418 3419 /** 3420 * console_conditional_schedule - yield the CPU if required 3421 * 3422 * If the console code is currently allowed to sleep, and 3423 * if this CPU should yield the CPU to another task, do 3424 * so here. 3425 * 3426 * Must be called within console_lock();. 3427 */ 3428 void __sched console_conditional_schedule(void) 3429 { 3430 if (console_may_schedule) 3431 cond_resched(); 3432 } 3433 EXPORT_SYMBOL(console_conditional_schedule); 3434 3435 void console_unblank(void) 3436 { 3437 bool found_unblank = false; 3438 struct console *c; 3439 int cookie; 3440 3441 /* 3442 * First check if there are any consoles implementing the unblank() 3443 * callback. If not, there is no reason to continue and take the 3444 * console lock, which in particular can be dangerous if 3445 * @oops_in_progress is set. 3446 */ 3447 cookie = console_srcu_read_lock(); 3448 for_each_console_srcu(c) { 3449 if (!console_is_usable(c, console_srcu_read_flags(c), true)) 3450 continue; 3451 3452 if (c->unblank) { 3453 found_unblank = true; 3454 break; 3455 } 3456 } 3457 console_srcu_read_unlock(cookie); 3458 if (!found_unblank) 3459 return; 3460 3461 /* 3462 * Stop console printing because the unblank() callback may 3463 * assume the console is not within its write() callback. 3464 * 3465 * If @oops_in_progress is set, this may be an atomic context. 3466 * In that case, attempt a trylock as best-effort. 3467 */ 3468 if (oops_in_progress) { 3469 /* Semaphores are not NMI-safe. */ 3470 if (in_nmi()) 3471 return; 3472 3473 /* 3474 * Attempting to trylock the console lock can deadlock 3475 * if another CPU was stopped while modifying the 3476 * semaphore. "Hope and pray" that this is not the 3477 * current situation. 3478 */ 3479 if (down_trylock_console_sem() != 0) 3480 return; 3481 } else 3482 console_lock(); 3483 3484 console_locked = 1; 3485 console_may_schedule = 0; 3486 3487 cookie = console_srcu_read_lock(); 3488 for_each_console_srcu(c) { 3489 if (!console_is_usable(c, console_srcu_read_flags(c), true)) 3490 continue; 3491 3492 if (c->unblank) 3493 c->unblank(); 3494 } 3495 console_srcu_read_unlock(cookie); 3496 3497 console_unlock(); 3498 3499 if (!oops_in_progress) 3500 pr_flush(1000, true); 3501 } 3502 3503 /* 3504 * Rewind all consoles to the oldest available record. 3505 * 3506 * IMPORTANT: The function is safe only when called under 3507 * console_lock(). It is not enforced because 3508 * it is used as a best effort in panic(). 3509 */ 3510 static void __console_rewind_all(void) 3511 { 3512 struct console *c; 3513 short flags; 3514 int cookie; 3515 u64 seq; 3516 3517 seq = prb_first_valid_seq(prb); 3518 3519 cookie = console_srcu_read_lock(); 3520 for_each_console_srcu(c) { 3521 flags = console_srcu_read_flags(c); 3522 3523 if (flags & CON_NBCON) { 3524 nbcon_seq_force(c, seq); 3525 } else { 3526 /* 3527 * This assignment is safe only when called under 3528 * console_lock(). On panic, legacy consoles are 3529 * only best effort. 3530 */ 3531 c->seq = seq; 3532 } 3533 } 3534 console_srcu_read_unlock(cookie); 3535 } 3536 3537 /** 3538 * console_flush_on_panic - flush console content on panic 3539 * @mode: flush all messages in buffer or just the pending ones 3540 * 3541 * Immediately output all pending messages no matter what. 3542 */ 3543 void console_flush_on_panic(enum con_flush_mode mode) 3544 { 3545 struct console_flush_type ft; 3546 bool handover; 3547 u64 next_seq; 3548 3549 /* 3550 * Ignore the console lock and flush out the messages. Attempting a 3551 * trylock would not be useful because: 3552 * 3553 * - if it is contended, it must be ignored anyway 3554 * - console_lock() and console_trylock() block and fail 3555 * respectively in panic for non-panic CPUs 3556 * - semaphores are not NMI-safe 3557 */ 3558 3559 /* 3560 * If another context is holding the console lock, 3561 * @console_may_schedule might be set. Clear it so that 3562 * this context does not call cond_resched() while flushing. 3563 */ 3564 console_may_schedule = 0; 3565 3566 if (mode == CONSOLE_REPLAY_ALL) 3567 __console_rewind_all(); 3568 3569 printk_get_console_flush_type(&ft); 3570 if (ft.nbcon_atomic) 3571 nbcon_atomic_flush_pending(); 3572 3573 /* Flush legacy consoles once allowed, even when dangerous. */ 3574 if (legacy_allow_panic_sync) 3575 console_flush_all(false, &next_seq, &handover); 3576 } 3577 3578 /* 3579 * Return the console tty driver structure and its associated index 3580 */ 3581 struct tty_driver *console_device(int *index) 3582 { 3583 struct console *c; 3584 struct tty_driver *driver = NULL; 3585 int cookie; 3586 3587 /* 3588 * Take console_lock to serialize device() callback with 3589 * other console operations. For example, fg_console is 3590 * modified under console_lock when switching vt. 3591 */ 3592 console_lock(); 3593 3594 cookie = console_srcu_read_lock(); 3595 for_each_console_srcu(c) { 3596 if (!c->device) 3597 continue; 3598 driver = c->device(c, index); 3599 if (driver) 3600 break; 3601 } 3602 console_srcu_read_unlock(cookie); 3603 3604 console_unlock(); 3605 return driver; 3606 } 3607 3608 /* 3609 * Prevent further output on the passed console device so that (for example) 3610 * serial drivers can suspend console output before suspending a port, and can 3611 * re-enable output afterwards. 3612 */ 3613 void console_suspend(struct console *console) 3614 { 3615 __pr_flush(console, 1000, true); 3616 console_list_lock(); 3617 console_srcu_write_flags(console, console->flags & ~CON_ENABLED); 3618 console_list_unlock(); 3619 3620 /* 3621 * Ensure that all SRCU list walks have completed. All contexts must 3622 * be able to see that this console is disabled so that (for example) 3623 * the caller can suspend the port without risk of another context 3624 * using the port. 3625 */ 3626 synchronize_srcu(&console_srcu); 3627 } 3628 EXPORT_SYMBOL(console_suspend); 3629 3630 void console_resume(struct console *console) 3631 { 3632 struct console_flush_type ft; 3633 bool is_nbcon; 3634 3635 console_list_lock(); 3636 console_srcu_write_flags(console, console->flags | CON_ENABLED); 3637 is_nbcon = console->flags & CON_NBCON; 3638 console_list_unlock(); 3639 3640 /* 3641 * Ensure that all SRCU list walks have completed. The related 3642 * printing context must be able to see it is enabled so that 3643 * it is guaranteed to wake up and resume printing. 3644 */ 3645 synchronize_srcu(&console_srcu); 3646 3647 printk_get_console_flush_type(&ft); 3648 if (is_nbcon && ft.nbcon_offload) 3649 nbcon_kthread_wake(console); 3650 else if (ft.legacy_offload) 3651 defer_console_output(); 3652 3653 __pr_flush(console, 1000, true); 3654 } 3655 EXPORT_SYMBOL(console_resume); 3656 3657 #ifdef CONFIG_PRINTK 3658 static int unregister_console_locked(struct console *console); 3659 3660 /* True when system boot is far enough to create printer threads. */ 3661 bool printk_kthreads_ready __ro_after_init; 3662 3663 static struct task_struct *printk_legacy_kthread; 3664 3665 static bool legacy_kthread_should_wakeup(void) 3666 { 3667 struct console_flush_type ft; 3668 struct console *con; 3669 bool ret = false; 3670 int cookie; 3671 3672 if (kthread_should_stop()) 3673 return true; 3674 3675 printk_get_console_flush_type(&ft); 3676 3677 cookie = console_srcu_read_lock(); 3678 for_each_console_srcu(con) { 3679 short flags = console_srcu_read_flags(con); 3680 u64 printk_seq; 3681 3682 /* 3683 * The legacy printer thread is only responsible for nbcon 3684 * consoles when the nbcon consoles cannot print via their 3685 * atomic or threaded flushing. 3686 */ 3687 if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload)) 3688 continue; 3689 3690 if (!console_is_usable(con, flags, false)) 3691 continue; 3692 3693 if (flags & CON_NBCON) { 3694 printk_seq = nbcon_seq_read(con); 3695 } else { 3696 /* 3697 * It is safe to read @seq because only this 3698 * thread context updates @seq. 3699 */ 3700 printk_seq = con->seq; 3701 } 3702 3703 if (prb_read_valid(prb, printk_seq, NULL)) { 3704 ret = true; 3705 break; 3706 } 3707 } 3708 console_srcu_read_unlock(cookie); 3709 3710 return ret; 3711 } 3712 3713 static int legacy_kthread_func(void *unused) 3714 { 3715 bool try_again; 3716 3717 wait_for_event: 3718 wait_event_interruptible(legacy_wait, legacy_kthread_should_wakeup()); 3719 3720 do { 3721 bool handover = false; 3722 u64 next_seq = 0; 3723 3724 if (kthread_should_stop()) 3725 return 0; 3726 3727 console_lock(); 3728 console_flush_one_record(true, &next_seq, &handover, &try_again); 3729 if (!handover) 3730 __console_unlock(); 3731 3732 } while (try_again); 3733 3734 goto wait_for_event; 3735 } 3736 3737 static bool legacy_kthread_create(void) 3738 { 3739 struct task_struct *kt; 3740 3741 lockdep_assert_console_list_lock_held(); 3742 3743 kt = kthread_run(legacy_kthread_func, NULL, "pr/legacy"); 3744 if (WARN_ON(IS_ERR(kt))) { 3745 pr_err("failed to start legacy printing thread\n"); 3746 return false; 3747 } 3748 3749 printk_legacy_kthread = kt; 3750 3751 /* 3752 * It is important that console printing threads are scheduled 3753 * shortly after a printk call and with generous runtime budgets. 3754 */ 3755 sched_set_normal(printk_legacy_kthread, -20); 3756 3757 return true; 3758 } 3759 3760 /** 3761 * printk_kthreads_shutdown - shutdown all threaded printers 3762 * @data: syscore context 3763 * 3764 * On system shutdown all threaded printers are stopped. This allows printk 3765 * to transition back to atomic printing, thus providing a robust mechanism 3766 * for the final shutdown/reboot messages to be output. 3767 */ 3768 static void printk_kthreads_shutdown(void *data) 3769 { 3770 struct console *con; 3771 3772 console_list_lock(); 3773 if (printk_kthreads_running) { 3774 printk_kthreads_running = false; 3775 3776 for_each_console(con) { 3777 if (con->flags & CON_NBCON) 3778 nbcon_kthread_stop(con); 3779 } 3780 3781 /* 3782 * The threads may have been stopped while printing a 3783 * backlog. Flush any records left over. 3784 */ 3785 nbcon_atomic_flush_pending(); 3786 } 3787 console_list_unlock(); 3788 } 3789 3790 static const struct syscore_ops printk_syscore_ops = { 3791 .shutdown = printk_kthreads_shutdown, 3792 }; 3793 3794 static struct syscore printk_syscore = { 3795 .ops = &printk_syscore_ops, 3796 }; 3797 3798 /* 3799 * If appropriate, start nbcon kthreads and set @printk_kthreads_running. 3800 * If any kthreads fail to start, those consoles are unregistered. 3801 * 3802 * Must be called under console_list_lock(). 3803 */ 3804 static void printk_kthreads_check_locked(void) 3805 { 3806 struct hlist_node *tmp; 3807 struct console *con; 3808 3809 lockdep_assert_console_list_lock_held(); 3810 3811 if (!printk_kthreads_ready) 3812 return; 3813 3814 /* Start or stop the legacy kthread when needed. */ 3815 if (have_legacy_console || have_boot_console) { 3816 if (!printk_legacy_kthread && 3817 force_legacy_kthread() && 3818 !legacy_kthread_create()) { 3819 /* 3820 * All legacy consoles must be unregistered. If there 3821 * are any nbcon consoles, they will set up their own 3822 * kthread. 3823 */ 3824 hlist_for_each_entry_safe(con, tmp, &console_list, node) { 3825 if (con->flags & CON_NBCON) 3826 continue; 3827 3828 unregister_console_locked(con); 3829 } 3830 } 3831 } else if (printk_legacy_kthread) { 3832 kthread_stop(printk_legacy_kthread); 3833 printk_legacy_kthread = NULL; 3834 } 3835 3836 /* 3837 * Printer threads cannot be started as long as any boot console is 3838 * registered because there is no way to synchronize the hardware 3839 * registers between boot console code and regular console code. 3840 * It can only be known that there will be no new boot consoles when 3841 * an nbcon console is registered. 3842 */ 3843 if (have_boot_console || !have_nbcon_console) { 3844 /* Clear flag in case all nbcon consoles unregistered. */ 3845 printk_kthreads_running = false; 3846 return; 3847 } 3848 3849 if (printk_kthreads_running) 3850 return; 3851 3852 hlist_for_each_entry_safe(con, tmp, &console_list, node) { 3853 if (!(con->flags & CON_NBCON)) 3854 continue; 3855 3856 if (!nbcon_kthread_create(con)) 3857 unregister_console_locked(con); 3858 } 3859 3860 printk_kthreads_running = true; 3861 } 3862 3863 static int __init printk_set_kthreads_ready(void) 3864 { 3865 register_syscore(&printk_syscore); 3866 3867 console_list_lock(); 3868 printk_kthreads_ready = true; 3869 printk_kthreads_check_locked(); 3870 console_list_unlock(); 3871 3872 return 0; 3873 } 3874 early_initcall(printk_set_kthreads_ready); 3875 #endif /* CONFIG_PRINTK */ 3876 3877 static int __read_mostly keep_bootcon; 3878 3879 static int __init keep_bootcon_setup(char *str) 3880 { 3881 keep_bootcon = 1; 3882 pr_info("debug: skip boot console de-registration.\n"); 3883 3884 return 0; 3885 } 3886 3887 early_param("keep_bootcon", keep_bootcon_setup); 3888 3889 static int console_call_setup(struct console *newcon, char *options) 3890 { 3891 int err; 3892 3893 if (!newcon->setup) 3894 return 0; 3895 3896 /* Synchronize with possible boot console. */ 3897 console_lock(); 3898 err = newcon->setup(newcon, options); 3899 console_unlock(); 3900 3901 return err; 3902 } 3903 3904 /* 3905 * This is called by register_console() to try to match 3906 * the newly registered console with any of the ones selected 3907 * by either the command line or add_preferred_console() and 3908 * setup/enable it. 3909 * 3910 * Care need to be taken with consoles that are statically 3911 * enabled such as netconsole 3912 */ 3913 static int try_enable_preferred_console(struct console *newcon, 3914 bool user_specified) 3915 { 3916 struct console_cmdline *c; 3917 int i, err; 3918 3919 for (i = 0, c = console_cmdline; 3920 i < MAX_CMDLINECONSOLES && (c->name[0] || c->devname[0]); 3921 i++, c++) { 3922 /* Console not yet initialized? */ 3923 if (!c->name[0]) 3924 continue; 3925 if (c->user_specified != user_specified) 3926 continue; 3927 if (!newcon->match || 3928 newcon->match(newcon, c->name, c->index, c->options) != 0) { 3929 /* default matching */ 3930 BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name)); 3931 if (strcmp(c->name, newcon->name) != 0) 3932 continue; 3933 if (newcon->index >= 0 && 3934 newcon->index != c->index) 3935 continue; 3936 if (newcon->index < 0) 3937 newcon->index = c->index; 3938 3939 if (_braille_register_console(newcon, c)) 3940 return 0; 3941 3942 err = console_call_setup(newcon, c->options); 3943 if (err) 3944 return err; 3945 } 3946 newcon->flags |= CON_ENABLED; 3947 if (i == preferred_console) 3948 newcon->flags |= CON_CONSDEV; 3949 return 0; 3950 } 3951 3952 /* 3953 * Some consoles, such as pstore and netconsole, can be enabled even 3954 * without matching. Accept the pre-enabled consoles only when match() 3955 * and setup() had a chance to be called. 3956 */ 3957 if (newcon->flags & CON_ENABLED && c->user_specified == user_specified) 3958 return 0; 3959 3960 return -ENOENT; 3961 } 3962 3963 /* Try to enable the console unconditionally */ 3964 static void try_enable_default_console(struct console *newcon) 3965 { 3966 if (newcon->index < 0) 3967 newcon->index = 0; 3968 3969 if (console_call_setup(newcon, NULL) != 0) 3970 return; 3971 3972 newcon->flags |= CON_ENABLED; 3973 3974 if (newcon->device) 3975 newcon->flags |= CON_CONSDEV; 3976 } 3977 3978 /* Return the starting sequence number for a newly registered console. */ 3979 static u64 get_init_console_seq(struct console *newcon, bool bootcon_registered) 3980 { 3981 struct console *con; 3982 bool handover; 3983 u64 init_seq; 3984 3985 if (newcon->flags & (CON_PRINTBUFFER | CON_BOOT)) { 3986 /* Get a consistent copy of @syslog_seq. */ 3987 mutex_lock(&syslog_lock); 3988 init_seq = syslog_seq; 3989 mutex_unlock(&syslog_lock); 3990 } else { 3991 /* Begin with next message added to ringbuffer. */ 3992 init_seq = prb_next_seq(prb); 3993 3994 /* 3995 * If any enabled boot consoles are due to be unregistered 3996 * shortly, some may not be caught up and may be the same 3997 * device as @newcon. Since it is not known which boot console 3998 * is the same device, flush all consoles and, if necessary, 3999 * start with the message of the enabled boot console that is 4000 * the furthest behind. 4001 */ 4002 if (bootcon_registered && !keep_bootcon) { 4003 /* 4004 * Hold the console_lock to stop console printing and 4005 * guarantee safe access to console->seq. 4006 */ 4007 console_lock(); 4008 4009 /* 4010 * Flush all consoles and set the console to start at 4011 * the next unprinted sequence number. 4012 */ 4013 if (!console_flush_all(true, &init_seq, &handover)) { 4014 /* 4015 * Flushing failed. Just choose the lowest 4016 * sequence of the enabled boot consoles. 4017 */ 4018 4019 /* 4020 * If there was a handover, this context no 4021 * longer holds the console_lock. 4022 */ 4023 if (handover) 4024 console_lock(); 4025 4026 init_seq = prb_next_seq(prb); 4027 for_each_console(con) { 4028 u64 seq; 4029 4030 if (!(con->flags & CON_BOOT) || 4031 !(con->flags & CON_ENABLED)) { 4032 continue; 4033 } 4034 4035 if (con->flags & CON_NBCON) 4036 seq = nbcon_seq_read(con); 4037 else 4038 seq = con->seq; 4039 4040 if (seq < init_seq) 4041 init_seq = seq; 4042 } 4043 } 4044 4045 console_unlock(); 4046 } 4047 } 4048 4049 return init_seq; 4050 } 4051 4052 #define console_first() \ 4053 hlist_entry(console_list.first, struct console, node) 4054 4055 static int unregister_console_locked(struct console *console); 4056 4057 /* 4058 * The console driver calls this routine during kernel initialization 4059 * to register the console printing procedure with printk() and to 4060 * print any messages that were printed by the kernel before the 4061 * console driver was initialized. 4062 * 4063 * This can happen pretty early during the boot process (because of 4064 * early_printk) - sometimes before setup_arch() completes - be careful 4065 * of what kernel features are used - they may not be initialised yet. 4066 * 4067 * There are two types of consoles - bootconsoles (early_printk) and 4068 * "real" consoles (everything which is not a bootconsole) which are 4069 * handled differently. 4070 * - Any number of bootconsoles can be registered at any time. 4071 * - As soon as a "real" console is registered, all bootconsoles 4072 * will be unregistered automatically. 4073 * - Once a "real" console is registered, any attempt to register a 4074 * bootconsoles will be rejected 4075 */ 4076 void register_console(struct console *newcon) 4077 { 4078 bool use_device_lock = (newcon->flags & CON_NBCON) && newcon->write_atomic; 4079 bool bootcon_registered = false; 4080 bool realcon_registered = false; 4081 struct console *con; 4082 unsigned long flags; 4083 u64 init_seq; 4084 int err; 4085 4086 console_list_lock(); 4087 4088 for_each_console(con) { 4089 if (WARN(con == newcon, "console '%s%d' already registered\n", 4090 con->name, con->index)) { 4091 goto unlock; 4092 } 4093 4094 if (con->flags & CON_BOOT) 4095 bootcon_registered = true; 4096 else 4097 realcon_registered = true; 4098 } 4099 4100 /* Do not register boot consoles when there already is a real one. */ 4101 if ((newcon->flags & CON_BOOT) && realcon_registered) { 4102 pr_info("Too late to register bootconsole %s%d\n", 4103 newcon->name, newcon->index); 4104 goto unlock; 4105 } 4106 4107 if (newcon->flags & CON_NBCON) { 4108 /* 4109 * Ensure the nbcon console buffers can be allocated 4110 * before modifying any global data. 4111 */ 4112 if (!nbcon_alloc(newcon)) 4113 goto unlock; 4114 } 4115 4116 /* 4117 * See if we want to enable this console driver by default. 4118 * 4119 * Nope when a console is preferred by the command line, device 4120 * tree, or SPCR. 4121 * 4122 * The first real console with tty binding (driver) wins. More 4123 * consoles might get enabled before the right one is found. 4124 * 4125 * Note that a console with tty binding will have CON_CONSDEV 4126 * flag set and will be first in the list. 4127 */ 4128 if (preferred_console < 0) { 4129 if (hlist_empty(&console_list) || !console_first()->device || 4130 console_first()->flags & CON_BOOT) { 4131 try_enable_default_console(newcon); 4132 } 4133 } 4134 4135 /* See if this console matches one we selected on the command line */ 4136 err = try_enable_preferred_console(newcon, true); 4137 4138 /* If not, try to match against the platform default(s) */ 4139 if (err == -ENOENT) 4140 err = try_enable_preferred_console(newcon, false); 4141 4142 /* printk() messages are not printed to the Braille console. */ 4143 if (err || newcon->flags & CON_BRL) { 4144 if (newcon->flags & CON_NBCON) 4145 nbcon_free(newcon); 4146 goto unlock; 4147 } 4148 4149 /* 4150 * If we have a bootconsole, and are switching to a real console, 4151 * don't print everything out again, since when the boot console, and 4152 * the real console are the same physical device, it's annoying to 4153 * see the beginning boot messages twice 4154 */ 4155 if (bootcon_registered && 4156 ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV)) { 4157 newcon->flags &= ~CON_PRINTBUFFER; 4158 } 4159 4160 newcon->dropped = 0; 4161 init_seq = get_init_console_seq(newcon, bootcon_registered); 4162 4163 if (newcon->flags & CON_NBCON) { 4164 have_nbcon_console = true; 4165 nbcon_seq_force(newcon, init_seq); 4166 } else { 4167 have_legacy_console = true; 4168 newcon->seq = init_seq; 4169 } 4170 4171 if (newcon->flags & CON_BOOT) 4172 have_boot_console = true; 4173 4174 /* 4175 * If another context is actively using the hardware of this new 4176 * console, it will not be aware of the nbcon synchronization. This 4177 * is a risk that two contexts could access the hardware 4178 * simultaneously if this new console is used for atomic printing 4179 * and the other context is still using the hardware. 4180 * 4181 * Use the driver synchronization to ensure that the hardware is not 4182 * in use while this new console transitions to being registered. 4183 */ 4184 if (use_device_lock) 4185 newcon->device_lock(newcon, &flags); 4186 4187 /* 4188 * Put this console in the list - keep the 4189 * preferred driver at the head of the list. 4190 */ 4191 if (hlist_empty(&console_list)) { 4192 /* Ensure CON_CONSDEV is always set for the head. */ 4193 newcon->flags |= CON_CONSDEV; 4194 hlist_add_head_rcu(&newcon->node, &console_list); 4195 4196 } else if (newcon->flags & CON_CONSDEV) { 4197 /* Only the new head can have CON_CONSDEV set. */ 4198 console_srcu_write_flags(console_first(), console_first()->flags & ~CON_CONSDEV); 4199 hlist_add_head_rcu(&newcon->node, &console_list); 4200 4201 } else { 4202 hlist_add_behind_rcu(&newcon->node, console_list.first); 4203 } 4204 4205 /* 4206 * No need to synchronize SRCU here! The caller does not rely 4207 * on all contexts being able to see the new console before 4208 * register_console() completes. 4209 */ 4210 4211 /* This new console is now registered. */ 4212 if (use_device_lock) 4213 newcon->device_unlock(newcon, flags); 4214 4215 console_sysfs_notify(); 4216 4217 /* 4218 * By unregistering the bootconsoles after we enable the real console 4219 * we get the "console xxx enabled" message on all the consoles - 4220 * boot consoles, real consoles, etc - this is to ensure that end 4221 * users know there might be something in the kernel's log buffer that 4222 * went to the bootconsole (that they do not see on the real console) 4223 */ 4224 con_printk(KERN_INFO, newcon, "enabled\n"); 4225 if (bootcon_registered && 4226 ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV) && 4227 !keep_bootcon) { 4228 struct hlist_node *tmp; 4229 4230 hlist_for_each_entry_safe(con, tmp, &console_list, node) { 4231 if (con->flags & CON_BOOT) 4232 unregister_console_locked(con); 4233 } 4234 } 4235 4236 /* Changed console list, may require printer threads to start/stop. */ 4237 printk_kthreads_check_locked(); 4238 unlock: 4239 console_list_unlock(); 4240 } 4241 EXPORT_SYMBOL(register_console); 4242 4243 /* Must be called under console_list_lock(). */ 4244 static int unregister_console_locked(struct console *console) 4245 { 4246 bool use_device_lock = (console->flags & CON_NBCON) && console->write_atomic; 4247 bool found_legacy_con = false; 4248 bool found_nbcon_con = false; 4249 bool found_boot_con = false; 4250 unsigned long flags; 4251 struct console *c; 4252 int res; 4253 4254 lockdep_assert_console_list_lock_held(); 4255 4256 con_printk(KERN_INFO, console, "disabled\n"); 4257 4258 res = _braille_unregister_console(console); 4259 if (res < 0) 4260 return res; 4261 if (res > 0) 4262 return 0; 4263 4264 if (!console_is_registered_locked(console)) 4265 res = -ENODEV; 4266 else if (console_is_usable(console, console->flags, true)) 4267 __pr_flush(console, 1000, true); 4268 4269 /* Disable it unconditionally */ 4270 console_srcu_write_flags(console, console->flags & ~CON_ENABLED); 4271 4272 if (res < 0) 4273 return res; 4274 4275 /* 4276 * Use the driver synchronization to ensure that the hardware is not 4277 * in use while this console transitions to being unregistered. 4278 */ 4279 if (use_device_lock) 4280 console->device_lock(console, &flags); 4281 4282 hlist_del_init_rcu(&console->node); 4283 4284 if (use_device_lock) 4285 console->device_unlock(console, flags); 4286 4287 /* 4288 * <HISTORICAL> 4289 * If this isn't the last console and it has CON_CONSDEV set, we 4290 * need to set it on the next preferred console. 4291 * </HISTORICAL> 4292 * 4293 * The above makes no sense as there is no guarantee that the next 4294 * console has any device attached. Oh well.... 4295 */ 4296 if (!hlist_empty(&console_list) && console->flags & CON_CONSDEV) 4297 console_srcu_write_flags(console_first(), console_first()->flags | CON_CONSDEV); 4298 4299 /* 4300 * Ensure that all SRCU list walks have completed. All contexts 4301 * must not be able to see this console in the list so that any 4302 * exit/cleanup routines can be performed safely. 4303 */ 4304 synchronize_srcu(&console_srcu); 4305 4306 /* 4307 * With this console gone, the global flags tracking registered 4308 * console types may have changed. Update them. 4309 */ 4310 for_each_console(c) { 4311 if (c->flags & CON_BOOT) 4312 found_boot_con = true; 4313 4314 if (c->flags & CON_NBCON) 4315 found_nbcon_con = true; 4316 else 4317 found_legacy_con = true; 4318 } 4319 if (!found_boot_con) 4320 have_boot_console = found_boot_con; 4321 if (!found_legacy_con) 4322 have_legacy_console = found_legacy_con; 4323 if (!found_nbcon_con) 4324 have_nbcon_console = found_nbcon_con; 4325 4326 /* @have_nbcon_console must be updated before calling nbcon_free(). */ 4327 if (console->flags & CON_NBCON) 4328 nbcon_free(console); 4329 4330 console_sysfs_notify(); 4331 4332 if (console->exit) 4333 res = console->exit(console); 4334 4335 /* Changed console list, may require printer threads to start/stop. */ 4336 printk_kthreads_check_locked(); 4337 4338 return res; 4339 } 4340 4341 int unregister_console(struct console *console) 4342 { 4343 int res; 4344 4345 console_list_lock(); 4346 res = unregister_console_locked(console); 4347 console_list_unlock(); 4348 return res; 4349 } 4350 EXPORT_SYMBOL(unregister_console); 4351 4352 /** 4353 * console_force_preferred_locked - force a registered console preferred 4354 * @con: The registered console to force preferred. 4355 * 4356 * Must be called under console_list_lock(). 4357 */ 4358 void console_force_preferred_locked(struct console *con) 4359 { 4360 struct console *cur_pref_con; 4361 4362 if (!console_is_registered_locked(con)) 4363 return; 4364 4365 cur_pref_con = console_first(); 4366 4367 /* Already preferred? */ 4368 if (cur_pref_con == con) 4369 return; 4370 4371 /* 4372 * Delete, but do not re-initialize the entry. This allows the console 4373 * to continue to appear registered (via any hlist_unhashed_lockless() 4374 * checks), even though it was briefly removed from the console list. 4375 */ 4376 hlist_del_rcu(&con->node); 4377 4378 /* 4379 * Ensure that all SRCU list walks have completed so that the console 4380 * can be added to the beginning of the console list and its forward 4381 * list pointer can be re-initialized. 4382 */ 4383 synchronize_srcu(&console_srcu); 4384 4385 con->flags |= CON_CONSDEV; 4386 WARN_ON(!con->device); 4387 4388 /* Only the new head can have CON_CONSDEV set. */ 4389 console_srcu_write_flags(cur_pref_con, cur_pref_con->flags & ~CON_CONSDEV); 4390 hlist_add_head_rcu(&con->node, &console_list); 4391 } 4392 EXPORT_SYMBOL(console_force_preferred_locked); 4393 4394 /* 4395 * Initialize the console device. This is called *early*, so 4396 * we can't necessarily depend on lots of kernel help here. 4397 * Just do some early initializations, and do the complex setup 4398 * later. 4399 */ 4400 void __init console_init(void) 4401 { 4402 int ret; 4403 initcall_t call; 4404 initcall_entry_t *ce; 4405 4406 #ifdef CONFIG_NULL_TTY_DEFAULT_CONSOLE 4407 if (!console_set_on_cmdline) 4408 add_preferred_console("ttynull", 0, NULL); 4409 #endif 4410 4411 /* Setup the default TTY line discipline. */ 4412 n_tty_init(); 4413 4414 /* 4415 * set up the console device so that later boot sequences can 4416 * inform about problems etc.. 4417 */ 4418 ce = __con_initcall_start; 4419 trace_initcall_level("console"); 4420 while (ce < __con_initcall_end) { 4421 call = initcall_from_entry(ce); 4422 trace_initcall_start(call); 4423 ret = call(); 4424 trace_initcall_finish(call, ret); 4425 ce++; 4426 } 4427 } 4428 4429 /* 4430 * Some boot consoles access data that is in the init section and which will 4431 * be discarded after the initcalls have been run. To make sure that no code 4432 * will access this data, unregister the boot consoles in a late initcall. 4433 * 4434 * If for some reason, such as deferred probe or the driver being a loadable 4435 * module, the real console hasn't registered yet at this point, there will 4436 * be a brief interval in which no messages are logged to the console, which 4437 * makes it difficult to diagnose problems that occur during this time. 4438 * 4439 * To mitigate this problem somewhat, only unregister consoles whose memory 4440 * intersects with the init section. Note that all other boot consoles will 4441 * get unregistered when the real preferred console is registered. 4442 */ 4443 static int __init printk_late_init(void) 4444 { 4445 struct hlist_node *tmp; 4446 struct console *con; 4447 int ret; 4448 4449 console_list_lock(); 4450 hlist_for_each_entry_safe(con, tmp, &console_list, node) { 4451 if (!(con->flags & CON_BOOT)) 4452 continue; 4453 4454 /* Check addresses that might be used for enabled consoles. */ 4455 if (init_section_intersects(con, sizeof(*con)) || 4456 init_section_contains(con->write, 0) || 4457 init_section_contains(con->read, 0) || 4458 init_section_contains(con->device, 0) || 4459 init_section_contains(con->unblank, 0) || 4460 init_section_contains(con->data, 0)) { 4461 /* 4462 * Please, consider moving the reported consoles out 4463 * of the init section. 4464 */ 4465 pr_warn("bootconsole [%s%d] uses init memory and must be disabled even before the real one is ready\n", 4466 con->name, con->index); 4467 unregister_console_locked(con); 4468 } 4469 } 4470 console_list_unlock(); 4471 4472 ret = cpuhp_setup_state_nocalls(CPUHP_PRINTK_DEAD, "printk:dead", NULL, 4473 console_cpu_notify); 4474 WARN_ON(ret < 0); 4475 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "printk:online", 4476 console_cpu_notify, NULL); 4477 WARN_ON(ret < 0); 4478 printk_sysctl_init(); 4479 return 0; 4480 } 4481 late_initcall(printk_late_init); 4482 4483 #if defined CONFIG_PRINTK 4484 /* If @con is specified, only wait for that console. Otherwise wait for all. */ 4485 static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) 4486 { 4487 unsigned long timeout_jiffies = msecs_to_jiffies(timeout_ms); 4488 unsigned long remaining_jiffies = timeout_jiffies; 4489 struct console_flush_type ft; 4490 struct console *c; 4491 u64 last_diff = 0; 4492 u64 printk_seq; 4493 short flags; 4494 int cookie; 4495 u64 diff; 4496 u64 seq; 4497 4498 /* Sorry, pr_flush() will not work this early. */ 4499 if (system_state < SYSTEM_SCHEDULING) 4500 return false; 4501 4502 might_sleep(); 4503 4504 seq = prb_next_reserve_seq(prb); 4505 4506 /* Flush the consoles so that records up to @seq are printed. */ 4507 printk_get_console_flush_type(&ft); 4508 if (ft.nbcon_atomic) 4509 nbcon_atomic_flush_pending(); 4510 if (ft.legacy_direct) { 4511 console_lock(); 4512 console_unlock(); 4513 } 4514 4515 for (;;) { 4516 unsigned long begin_jiffies; 4517 unsigned long slept_jiffies; 4518 4519 diff = 0; 4520 4521 /* 4522 * Hold the console_lock to guarantee safe access to 4523 * console->seq. Releasing console_lock flushes more 4524 * records in case @seq is still not printed on all 4525 * usable consoles. 4526 * 4527 * Holding the console_lock is not necessary if there 4528 * are no legacy or boot consoles. However, such a 4529 * console could register at any time. Always hold the 4530 * console_lock as a precaution rather than 4531 * synchronizing against register_console(). 4532 */ 4533 console_lock(); 4534 4535 cookie = console_srcu_read_lock(); 4536 for_each_console_srcu(c) { 4537 if (con && con != c) 4538 continue; 4539 4540 flags = console_srcu_read_flags(c); 4541 4542 /* 4543 * If consoles are not usable, it cannot be expected 4544 * that they make forward progress, so only increment 4545 * @diff for usable consoles. 4546 */ 4547 if (!console_is_usable(c, flags, true) && 4548 !console_is_usable(c, flags, false)) { 4549 continue; 4550 } 4551 4552 if (flags & CON_NBCON) { 4553 printk_seq = nbcon_seq_read(c); 4554 } else { 4555 printk_seq = c->seq; 4556 } 4557 4558 if (printk_seq < seq) 4559 diff += seq - printk_seq; 4560 } 4561 console_srcu_read_unlock(cookie); 4562 4563 if (diff != last_diff && reset_on_progress) 4564 remaining_jiffies = timeout_jiffies; 4565 4566 console_unlock(); 4567 4568 /* Note: @diff is 0 if there are no usable consoles. */ 4569 if (diff == 0 || remaining_jiffies == 0) 4570 break; 4571 4572 /* msleep(1) might sleep much longer. Check time by jiffies. */ 4573 begin_jiffies = jiffies; 4574 msleep(1); 4575 slept_jiffies = jiffies - begin_jiffies; 4576 4577 remaining_jiffies -= min(slept_jiffies, remaining_jiffies); 4578 4579 last_diff = diff; 4580 } 4581 4582 return (diff == 0); 4583 } 4584 4585 /** 4586 * pr_flush() - Wait for printing threads to catch up. 4587 * 4588 * @timeout_ms: The maximum time (in ms) to wait. 4589 * @reset_on_progress: Reset the timeout if forward progress is seen. 4590 * 4591 * A value of 0 for @timeout_ms means no waiting will occur. A value of -1 4592 * represents infinite waiting. 4593 * 4594 * If @reset_on_progress is true, the timeout will be reset whenever any 4595 * printer has been seen to make some forward progress. 4596 * 4597 * Context: Process context. May sleep while acquiring console lock. 4598 * Return: true if all usable printers are caught up. 4599 */ 4600 bool pr_flush(int timeout_ms, bool reset_on_progress) 4601 { 4602 return __pr_flush(NULL, timeout_ms, reset_on_progress); 4603 } 4604 4605 /* 4606 * Delayed printk version, for scheduler-internal messages: 4607 */ 4608 #define PRINTK_PENDING_WAKEUP 0x01 4609 #define PRINTK_PENDING_OUTPUT 0x02 4610 4611 static DEFINE_PER_CPU(int, printk_pending); 4612 4613 static void wake_up_klogd_work_func(struct irq_work *irq_work) 4614 { 4615 int pending = this_cpu_xchg(printk_pending, 0); 4616 4617 if (pending & PRINTK_PENDING_OUTPUT) { 4618 if (force_legacy_kthread()) { 4619 if (printk_legacy_kthread) 4620 wake_up_interruptible(&legacy_wait); 4621 } else { 4622 if (console_trylock()) 4623 console_unlock(); 4624 } 4625 } 4626 4627 if (pending & PRINTK_PENDING_WAKEUP) 4628 wake_up_interruptible(&log_wait); 4629 } 4630 4631 static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = 4632 IRQ_WORK_INIT_LAZY(wake_up_klogd_work_func); 4633 4634 static void __wake_up_klogd(int val) 4635 { 4636 if (!printk_percpu_data_ready()) 4637 return; 4638 4639 /* 4640 * It is not allowed to call this function when console irq_work 4641 * is blocked. 4642 */ 4643 if (WARN_ON_ONCE(console_irqwork_blocked)) 4644 return; 4645 4646 preempt_disable(); 4647 /* 4648 * Guarantee any new records can be seen by tasks preparing to wait 4649 * before this context checks if the wait queue is empty. 4650 * 4651 * The full memory barrier within wq_has_sleeper() pairs with the full 4652 * memory barrier within set_current_state() of 4653 * prepare_to_wait_event(), which is called after ___wait_event() adds 4654 * the waiter but before it has checked the wait condition. 4655 * 4656 * This pairs with devkmsg_read:A and syslog_print:A. 4657 */ 4658 if (wq_has_sleeper(&log_wait) || /* LMM(__wake_up_klogd:A) */ 4659 (val & PRINTK_PENDING_OUTPUT)) { 4660 this_cpu_or(printk_pending, val); 4661 irq_work_queue(this_cpu_ptr(&wake_up_klogd_work)); 4662 } 4663 preempt_enable(); 4664 } 4665 4666 /** 4667 * wake_up_klogd - Wake kernel logging daemon 4668 * 4669 * Use this function when new records have been added to the ringbuffer 4670 * and the console printing of those records has already occurred or is 4671 * known to be handled by some other context. This function will only 4672 * wake the logging daemon. 4673 * 4674 * Context: Any context. 4675 */ 4676 void wake_up_klogd(void) 4677 { 4678 __wake_up_klogd(PRINTK_PENDING_WAKEUP); 4679 } 4680 4681 /** 4682 * defer_console_output - Wake kernel logging daemon and trigger 4683 * console printing in a deferred context 4684 * 4685 * Use this function when new records have been added to the ringbuffer, 4686 * this context is responsible for console printing those records, but 4687 * the current context is not allowed to perform the console printing. 4688 * Trigger an irq_work context to perform the console printing. This 4689 * function also wakes the logging daemon. 4690 * 4691 * Context: Any context. 4692 */ 4693 void defer_console_output(void) 4694 { 4695 /* 4696 * New messages may have been added directly to the ringbuffer 4697 * using vprintk_store(), so wake any waiters as well. 4698 */ 4699 __wake_up_klogd(PRINTK_PENDING_WAKEUP | PRINTK_PENDING_OUTPUT); 4700 } 4701 4702 /** 4703 * printk_trigger_flush - Attempt to flush printk buffer to consoles. 4704 * 4705 * If possible, flush the printk buffer to all consoles in the caller's 4706 * context. If offloading is available, trigger deferred printing. 4707 * 4708 * This is best effort. Depending on the system state, console states, 4709 * and caller context, no actual flushing may result from this call. 4710 */ 4711 void printk_trigger_flush(void) 4712 { 4713 struct console_flush_type ft; 4714 4715 printk_get_console_flush_type(&ft); 4716 if (ft.nbcon_atomic) 4717 nbcon_atomic_flush_pending(); 4718 if (ft.nbcon_offload) 4719 nbcon_kthreads_wake(); 4720 if (ft.legacy_direct) { 4721 if (console_trylock()) 4722 console_unlock(); 4723 } 4724 if (ft.legacy_offload) 4725 defer_console_output(); 4726 } 4727 4728 int vprintk_deferred(const char *fmt, va_list args) 4729 { 4730 return vprintk_emit(0, LOGLEVEL_SCHED, NULL, fmt, args); 4731 } 4732 4733 int _printk_deferred(const char *fmt, ...) 4734 { 4735 va_list args; 4736 int r; 4737 4738 va_start(args, fmt); 4739 r = vprintk_deferred(fmt, args); 4740 va_end(args); 4741 4742 return r; 4743 } 4744 4745 /* 4746 * printk rate limiting, lifted from the networking subsystem. 4747 * 4748 * This enforces a rate limit: not more than 10 kernel messages 4749 * every 5s to make a denial-of-service attack impossible. 4750 */ 4751 DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10); 4752 4753 int __printk_ratelimit(const char *func) 4754 { 4755 return ___ratelimit(&printk_ratelimit_state, func); 4756 } 4757 EXPORT_SYMBOL(__printk_ratelimit); 4758 4759 /** 4760 * printk_timed_ratelimit - caller-controlled printk ratelimiting 4761 * @caller_jiffies: pointer to caller's state 4762 * @interval_msecs: minimum interval between prints 4763 * 4764 * printk_timed_ratelimit() returns true if more than @interval_msecs 4765 * milliseconds have elapsed since the last time printk_timed_ratelimit() 4766 * returned true. 4767 */ 4768 bool printk_timed_ratelimit(unsigned long *caller_jiffies, 4769 unsigned int interval_msecs) 4770 { 4771 unsigned long elapsed = jiffies - *caller_jiffies; 4772 4773 if (*caller_jiffies && elapsed <= msecs_to_jiffies(interval_msecs)) 4774 return false; 4775 4776 *caller_jiffies = jiffies; 4777 return true; 4778 } 4779 EXPORT_SYMBOL(printk_timed_ratelimit); 4780 4781 static DEFINE_SPINLOCK(dump_list_lock); 4782 static LIST_HEAD(dump_list); 4783 4784 /** 4785 * kmsg_dump_register - register a kernel log dumper. 4786 * @dumper: pointer to the kmsg_dumper structure 4787 * 4788 * Adds a kernel log dumper to the system. The dump callback in the 4789 * structure will be called when the kernel oopses or panics and must be 4790 * set. Returns zero on success and %-EINVAL or %-EBUSY otherwise. 4791 */ 4792 int kmsg_dump_register(struct kmsg_dumper *dumper) 4793 { 4794 unsigned long flags; 4795 int err = -EBUSY; 4796 4797 /* The dump callback needs to be set */ 4798 if (!dumper->dump) 4799 return -EINVAL; 4800 4801 spin_lock_irqsave(&dump_list_lock, flags); 4802 /* Don't allow registering multiple times */ 4803 if (!dumper->registered) { 4804 dumper->registered = 1; 4805 list_add_tail_rcu(&dumper->list, &dump_list); 4806 err = 0; 4807 } 4808 spin_unlock_irqrestore(&dump_list_lock, flags); 4809 4810 return err; 4811 } 4812 EXPORT_SYMBOL_GPL(kmsg_dump_register); 4813 4814 /** 4815 * kmsg_dump_unregister - unregister a kmsg dumper. 4816 * @dumper: pointer to the kmsg_dumper structure 4817 * 4818 * Removes a dump device from the system. Returns zero on success and 4819 * %-EINVAL otherwise. 4820 */ 4821 int kmsg_dump_unregister(struct kmsg_dumper *dumper) 4822 { 4823 unsigned long flags; 4824 int err = -EINVAL; 4825 4826 spin_lock_irqsave(&dump_list_lock, flags); 4827 if (dumper->registered) { 4828 dumper->registered = 0; 4829 list_del_rcu(&dumper->list); 4830 err = 0; 4831 } 4832 spin_unlock_irqrestore(&dump_list_lock, flags); 4833 synchronize_rcu(); 4834 4835 return err; 4836 } 4837 EXPORT_SYMBOL_GPL(kmsg_dump_unregister); 4838 4839 static bool always_kmsg_dump; 4840 module_param_named(always_kmsg_dump, always_kmsg_dump, bool, S_IRUGO | S_IWUSR); 4841 4842 const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason) 4843 { 4844 switch (reason) { 4845 case KMSG_DUMP_PANIC: 4846 return "Panic"; 4847 case KMSG_DUMP_OOPS: 4848 return "Oops"; 4849 case KMSG_DUMP_EMERG: 4850 return "Emergency"; 4851 case KMSG_DUMP_SHUTDOWN: 4852 return "Shutdown"; 4853 default: 4854 return "Unknown"; 4855 } 4856 } 4857 EXPORT_SYMBOL_GPL(kmsg_dump_reason_str); 4858 4859 /** 4860 * kmsg_dump_desc - dump kernel log to kernel message dumpers. 4861 * @reason: the reason (oops, panic etc) for dumping 4862 * @desc: a short string to describe what caused the panic or oops. Can be NULL 4863 * if no additional description is available. 4864 * 4865 * Call each of the registered dumper's dump() callback, which can 4866 * retrieve the kmsg records with kmsg_dump_get_line() or 4867 * kmsg_dump_get_buffer(). 4868 */ 4869 void kmsg_dump_desc(enum kmsg_dump_reason reason, const char *desc) 4870 { 4871 struct kmsg_dumper *dumper; 4872 struct kmsg_dump_detail detail = { 4873 .reason = reason, 4874 .description = desc}; 4875 4876 rcu_read_lock(); 4877 list_for_each_entry_rcu(dumper, &dump_list, list) { 4878 enum kmsg_dump_reason max_reason = dumper->max_reason; 4879 4880 /* 4881 * If client has not provided a specific max_reason, default 4882 * to KMSG_DUMP_OOPS, unless always_kmsg_dump was set. 4883 */ 4884 if (max_reason == KMSG_DUMP_UNDEF) { 4885 max_reason = always_kmsg_dump ? KMSG_DUMP_MAX : 4886 KMSG_DUMP_OOPS; 4887 } 4888 if (reason > max_reason) 4889 continue; 4890 4891 /* invoke dumper which will iterate over records */ 4892 dumper->dump(dumper, &detail); 4893 } 4894 rcu_read_unlock(); 4895 } 4896 4897 /** 4898 * kmsg_dump_get_line - retrieve one kmsg log line 4899 * @iter: kmsg dump iterator 4900 * @syslog: include the "<4>" prefixes 4901 * @line: buffer to copy the line to 4902 * @size: maximum size of the buffer 4903 * @len: length of line placed into buffer 4904 * 4905 * Start at the beginning of the kmsg buffer, with the oldest kmsg 4906 * record, and copy one record into the provided buffer. 4907 * 4908 * Consecutive calls will return the next available record moving 4909 * towards the end of the buffer with the youngest messages. 4910 * 4911 * A return value of FALSE indicates that there are no more records to 4912 * read. 4913 */ 4914 bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog, 4915 char *line, size_t size, size_t *len) 4916 { 4917 u64 min_seq = latched_seq_read_nolock(&clear_seq); 4918 struct printk_info info; 4919 unsigned int line_count; 4920 struct printk_record r; 4921 size_t l = 0; 4922 bool ret = false; 4923 4924 if (iter->cur_seq < min_seq) 4925 iter->cur_seq = min_seq; 4926 4927 prb_rec_init_rd(&r, &info, line, size); 4928 4929 /* Read text or count text lines? */ 4930 if (line) { 4931 if (!prb_read_valid(prb, iter->cur_seq, &r)) 4932 goto out; 4933 l = record_print_text(&r, syslog, printk_time); 4934 } else { 4935 if (!prb_read_valid_info(prb, iter->cur_seq, 4936 &info, &line_count)) { 4937 goto out; 4938 } 4939 l = get_record_print_text_size(&info, line_count, syslog, 4940 printk_time); 4941 4942 } 4943 4944 iter->cur_seq = r.info->seq + 1; 4945 ret = true; 4946 out: 4947 if (len) 4948 *len = l; 4949 return ret; 4950 } 4951 EXPORT_SYMBOL_GPL(kmsg_dump_get_line); 4952 4953 /** 4954 * kmsg_dump_get_buffer - copy kmsg log lines 4955 * @iter: kmsg dump iterator 4956 * @syslog: include the "<4>" prefixes 4957 * @buf: buffer to copy the line to 4958 * @size: maximum size of the buffer 4959 * @len_out: length of line placed into buffer 4960 * 4961 * Start at the end of the kmsg buffer and fill the provided buffer 4962 * with as many of the *youngest* kmsg records that fit into it. 4963 * If the buffer is large enough, all available kmsg records will be 4964 * copied with a single call. 4965 * 4966 * Consecutive calls will fill the buffer with the next block of 4967 * available older records, not including the earlier retrieved ones. 4968 * 4969 * A return value of FALSE indicates that there are no more records to 4970 * read. 4971 */ 4972 bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog, 4973 char *buf, size_t size, size_t *len_out) 4974 { 4975 u64 min_seq = latched_seq_read_nolock(&clear_seq); 4976 struct printk_info info; 4977 struct printk_record r; 4978 u64 seq; 4979 u64 next_seq; 4980 size_t len = 0; 4981 bool ret = false; 4982 bool time = printk_time; 4983 4984 if (!buf || !size) 4985 goto out; 4986 4987 if (iter->cur_seq < min_seq) 4988 iter->cur_seq = min_seq; 4989 4990 if (prb_read_valid_info(prb, iter->cur_seq, &info, NULL)) { 4991 if (info.seq != iter->cur_seq) { 4992 /* messages are gone, move to first available one */ 4993 iter->cur_seq = info.seq; 4994 } 4995 } 4996 4997 /* last entry */ 4998 if (iter->cur_seq >= iter->next_seq) 4999 goto out; 5000 5001 /* 5002 * Find first record that fits, including all following records, 5003 * into the user-provided buffer for this dump. Pass in size-1 5004 * because this function (by way of record_print_text()) will 5005 * not write more than size-1 bytes of text into @buf. 5006 */ 5007 seq = find_first_fitting_seq(iter->cur_seq, iter->next_seq, 5008 size - 1, syslog, time); 5009 5010 /* 5011 * Next kmsg_dump_get_buffer() invocation will dump block of 5012 * older records stored right before this one. 5013 */ 5014 next_seq = seq; 5015 5016 prb_rec_init_rd(&r, &info, buf, size); 5017 5018 prb_for_each_record(seq, prb, seq, &r) { 5019 if (r.info->seq >= iter->next_seq) 5020 break; 5021 5022 len += record_print_text(&r, syslog, time); 5023 5024 /* Adjust record to store to remaining buffer space. */ 5025 prb_rec_init_rd(&r, &info, buf + len, size - len); 5026 } 5027 5028 iter->next_seq = next_seq; 5029 ret = true; 5030 out: 5031 if (len_out) 5032 *len_out = len; 5033 return ret; 5034 } 5035 EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer); 5036 5037 /** 5038 * kmsg_dump_rewind - reset the iterator 5039 * @iter: kmsg dump iterator 5040 * 5041 * Reset the dumper's iterator so that kmsg_dump_get_line() and 5042 * kmsg_dump_get_buffer() can be called again and used multiple 5043 * times within the same dumper.dump() callback. 5044 */ 5045 void kmsg_dump_rewind(struct kmsg_dump_iter *iter) 5046 { 5047 iter->cur_seq = latched_seq_read_nolock(&clear_seq); 5048 iter->next_seq = prb_next_seq(prb); 5049 } 5050 EXPORT_SYMBOL_GPL(kmsg_dump_rewind); 5051 5052 /** 5053 * console_try_replay_all - try to replay kernel log on consoles 5054 * 5055 * Try to obtain lock on console subsystem and replay all 5056 * available records in printk buffer on the consoles. 5057 * Does nothing if lock is not obtained. 5058 * 5059 * Context: Any, except for NMI. 5060 */ 5061 void console_try_replay_all(void) 5062 { 5063 struct console_flush_type ft; 5064 5065 printk_get_console_flush_type(&ft); 5066 if (console_trylock()) { 5067 __console_rewind_all(); 5068 if (ft.nbcon_atomic) 5069 nbcon_atomic_flush_pending(); 5070 if (ft.nbcon_offload) 5071 nbcon_kthreads_wake(); 5072 if (ft.legacy_offload) 5073 defer_console_output(); 5074 /* Consoles are flushed as part of console_unlock(). */ 5075 console_unlock(); 5076 } 5077 } 5078 #endif 5079 5080 #ifdef CONFIG_SMP 5081 static atomic_t printk_cpu_sync_owner = ATOMIC_INIT(-1); 5082 static atomic_t printk_cpu_sync_nested = ATOMIC_INIT(0); 5083 5084 bool is_printk_cpu_sync_owner(void) 5085 { 5086 return (atomic_read(&printk_cpu_sync_owner) == raw_smp_processor_id()); 5087 } 5088 5089 /** 5090 * __printk_cpu_sync_wait() - Busy wait until the printk cpu-reentrant 5091 * spinning lock is not owned by any CPU. 5092 * 5093 * Context: Any context. 5094 */ 5095 void __printk_cpu_sync_wait(void) 5096 { 5097 do { 5098 cpu_relax(); 5099 } while (atomic_read(&printk_cpu_sync_owner) != -1); 5100 } 5101 EXPORT_SYMBOL(__printk_cpu_sync_wait); 5102 5103 /** 5104 * __printk_cpu_sync_try_get() - Try to acquire the printk cpu-reentrant 5105 * spinning lock. 5106 * 5107 * If no processor has the lock, the calling processor takes the lock and 5108 * becomes the owner. If the calling processor is already the owner of the 5109 * lock, this function succeeds immediately. 5110 * 5111 * Context: Any context. Expects interrupts to be disabled. 5112 * Return: 1 on success, otherwise 0. 5113 */ 5114 int __printk_cpu_sync_try_get(void) 5115 { 5116 int cpu; 5117 int old; 5118 5119 cpu = smp_processor_id(); 5120 5121 /* 5122 * Guarantee loads and stores from this CPU when it is the lock owner 5123 * are _not_ visible to the previous lock owner. This pairs with 5124 * __printk_cpu_sync_put:B. 5125 * 5126 * Memory barrier involvement: 5127 * 5128 * If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B, 5129 * then __printk_cpu_sync_put:A can never read from 5130 * __printk_cpu_sync_try_get:B. 5131 * 5132 * Relies on: 5133 * 5134 * RELEASE from __printk_cpu_sync_put:A to __printk_cpu_sync_put:B 5135 * of the previous CPU 5136 * matching 5137 * ACQUIRE from __printk_cpu_sync_try_get:A to 5138 * __printk_cpu_sync_try_get:B of this CPU 5139 */ 5140 old = atomic_cmpxchg_acquire(&printk_cpu_sync_owner, -1, 5141 cpu); /* LMM(__printk_cpu_sync_try_get:A) */ 5142 if (old == -1) { 5143 /* 5144 * This CPU is now the owner and begins loading/storing 5145 * data: LMM(__printk_cpu_sync_try_get:B) 5146 */ 5147 return 1; 5148 5149 } else if (old == cpu) { 5150 /* This CPU is already the owner. */ 5151 atomic_inc(&printk_cpu_sync_nested); 5152 return 1; 5153 } 5154 5155 return 0; 5156 } 5157 EXPORT_SYMBOL(__printk_cpu_sync_try_get); 5158 5159 /** 5160 * __printk_cpu_sync_put() - Release the printk cpu-reentrant spinning lock. 5161 * 5162 * The calling processor must be the owner of the lock. 5163 * 5164 * Context: Any context. Expects interrupts to be disabled. 5165 */ 5166 void __printk_cpu_sync_put(void) 5167 { 5168 if (atomic_read(&printk_cpu_sync_nested)) { 5169 atomic_dec(&printk_cpu_sync_nested); 5170 return; 5171 } 5172 5173 /* 5174 * This CPU is finished loading/storing data: 5175 * LMM(__printk_cpu_sync_put:A) 5176 */ 5177 5178 /* 5179 * Guarantee loads and stores from this CPU when it was the 5180 * lock owner are visible to the next lock owner. This pairs 5181 * with __printk_cpu_sync_try_get:A. 5182 * 5183 * Memory barrier involvement: 5184 * 5185 * If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B, 5186 * then __printk_cpu_sync_try_get:B reads from __printk_cpu_sync_put:A. 5187 * 5188 * Relies on: 5189 * 5190 * RELEASE from __printk_cpu_sync_put:A to __printk_cpu_sync_put:B 5191 * of this CPU 5192 * matching 5193 * ACQUIRE from __printk_cpu_sync_try_get:A to 5194 * __printk_cpu_sync_try_get:B of the next CPU 5195 */ 5196 atomic_set_release(&printk_cpu_sync_owner, 5197 -1); /* LMM(__printk_cpu_sync_put:B) */ 5198 } 5199 EXPORT_SYMBOL(__printk_cpu_sync_put); 5200 #endif /* CONFIG_SMP */ 5201