1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright (C) 2022 Linutronix GmbH, John Ogness
3 // Copyright (C) 2022 Intel, Thomas Gleixner
4
5 #include <linux/atomic.h>
6 #include <linux/bug.h>
7 #include <linux/console.h>
8 #include <linux/delay.h>
9 #include <linux/errno.h>
10 #include <linux/export.h>
11 #include <linux/init.h>
12 #include <linux/irqflags.h>
13 #include <linux/kdb.h>
14 #include <linux/kthread.h>
15 #include <linux/minmax.h>
16 #include <linux/panic.h>
17 #include <linux/percpu.h>
18 #include <linux/preempt.h>
19 #include <linux/slab.h>
20 #include <linux/smp.h>
21 #include <linux/stddef.h>
22 #include <linux/string.h>
23 #include <linux/types.h>
24 #include "internal.h"
25 #include "printk_ringbuffer.h"
26 /*
27 * Printk console printing implementation for consoles which does not depend
28 * on the legacy style console_lock mechanism.
29 *
30 * The state of the console is maintained in the "nbcon_state" atomic
31 * variable.
32 *
33 * The console is locked when:
34 *
35 * - The 'prio' field contains the priority of the context that owns the
36 * console. Only higher priority contexts are allowed to take over the
37 * lock. A value of 0 (NBCON_PRIO_NONE) means the console is not locked.
38 *
39 * - The 'cpu' field denotes on which CPU the console is locked. It is used
40 * to prevent busy waiting on the same CPU. Also it informs the lock owner
41 * that it has lost the lock in a more complex scenario when the lock was
42 * taken over by a higher priority context, released, and taken on another
43 * CPU with the same priority as the interrupted owner.
44 *
45 * The acquire mechanism uses a few more fields:
46 *
47 * - The 'req_prio' field is used by the handover approach to make the
48 * current owner aware that there is a context with a higher priority
49 * waiting for the friendly handover.
50 *
51 * - The 'unsafe' field allows to take over the console in a safe way in the
52 * middle of emitting a message. The field is set only when accessing some
53 * shared resources or when the console device is manipulated. It can be
54 * cleared, for example, after emitting one character when the console
55 * device is in a consistent state.
56 *
57 * - The 'unsafe_takeover' field is set when a hostile takeover took the
58 * console in an unsafe state. The console will stay in the unsafe state
59 * until re-initialized.
60 *
61 * The acquire mechanism uses three approaches:
62 *
63 * 1) Direct acquire when the console is not owned or is owned by a lower
64 * priority context and is in a safe state.
65 *
66 * 2) Friendly handover mechanism uses a request/grant handshake. It is used
67 * when the current owner has lower priority and the console is in an
68 * unsafe state.
69 *
70 * The requesting context:
71 *
72 * a) Sets its priority into the 'req_prio' field.
73 *
74 * b) Waits (with a timeout) for the owning context to unlock the
75 * console.
76 *
77 * c) Takes the lock and clears the 'req_prio' field.
78 *
79 * The owning context:
80 *
81 * a) Observes the 'req_prio' field set on exit from the unsafe
82 * console state.
83 *
84 * b) Gives up console ownership by clearing the 'prio' field.
85 *
86 * 3) Unsafe hostile takeover allows to take over the lock even when the
87 * console is an unsafe state. It is used only in panic() by the final
88 * attempt to flush consoles in a try and hope mode.
89 *
90 * Note that separate record buffers are used in panic(). As a result,
91 * the messages can be read and formatted without any risk even after
92 * using the hostile takeover in unsafe state.
93 *
94 * The release function simply clears the 'prio' field.
95 *
96 * All operations on @console::nbcon_state are atomic cmpxchg based to
97 * handle concurrency.
98 *
99 * The acquire/release functions implement only minimal policies:
100 *
101 * - Preference for higher priority contexts.
102 * - Protection of the panic CPU.
103 *
104 * All other policy decisions must be made at the call sites:
105 *
106 * - What is marked as an unsafe section.
107 * - Whether to spin-wait if there is already an owner and the console is
108 * in an unsafe state.
109 * - Whether to attempt an unsafe hostile takeover.
110 *
111 * The design allows to implement the well known:
112 *
113 * acquire()
114 * output_one_printk_record()
115 * release()
116 *
117 * The output of one printk record might be interrupted with a higher priority
118 * context. The new owner is supposed to reprint the entire interrupted record
119 * from scratch.
120 */
121
122 /* Counter of active nbcon emergency contexts. */
123 static atomic_t nbcon_cpu_emergency_cnt = ATOMIC_INIT(0);
124
125 /**
126 * nbcon_state_set - Helper function to set the console state
127 * @con: Console to update
128 * @new: The new state to write
129 *
130 * Only to be used when the console is not yet or no longer visible in the
131 * system. Otherwise use nbcon_state_try_cmpxchg().
132 */
nbcon_state_set(struct console * con,struct nbcon_state * new)133 static inline void nbcon_state_set(struct console *con, struct nbcon_state *new)
134 {
135 atomic_set(&ACCESS_PRIVATE(con, nbcon_state), new->atom);
136 }
137
138 /**
139 * nbcon_state_read - Helper function to read the console state
140 * @con: Console to read
141 * @state: The state to store the result
142 */
nbcon_state_read(struct console * con,struct nbcon_state * state)143 static inline void nbcon_state_read(struct console *con, struct nbcon_state *state)
144 {
145 state->atom = atomic_read(&ACCESS_PRIVATE(con, nbcon_state));
146 }
147
148 /**
149 * nbcon_state_try_cmpxchg() - Helper function for atomic_try_cmpxchg() on console state
150 * @con: Console to update
151 * @cur: Old/expected state
152 * @new: New state
153 *
154 * Return: True on success. False on fail and @cur is updated.
155 */
nbcon_state_try_cmpxchg(struct console * con,struct nbcon_state * cur,struct nbcon_state * new)156 static inline bool nbcon_state_try_cmpxchg(struct console *con, struct nbcon_state *cur,
157 struct nbcon_state *new)
158 {
159 return atomic_try_cmpxchg(&ACCESS_PRIVATE(con, nbcon_state), &cur->atom, new->atom);
160 }
161
162 /**
163 * nbcon_seq_read - Read the current console sequence
164 * @con: Console to read the sequence of
165 *
166 * Return: Sequence number of the next record to print on @con.
167 */
nbcon_seq_read(struct console * con)168 u64 nbcon_seq_read(struct console *con)
169 {
170 unsigned long nbcon_seq = atomic_long_read(&ACCESS_PRIVATE(con, nbcon_seq));
171
172 return __ulseq_to_u64seq(prb, nbcon_seq);
173 }
174
175 /**
176 * nbcon_seq_force - Force console sequence to a specific value
177 * @con: Console to work on
178 * @seq: Sequence number value to set
179 *
180 * Only to be used during init (before registration) or in extreme situations
181 * (such as panic with CONSOLE_REPLAY_ALL).
182 */
nbcon_seq_force(struct console * con,u64 seq)183 void nbcon_seq_force(struct console *con, u64 seq)
184 {
185 /*
186 * If the specified record no longer exists, the oldest available record
187 * is chosen. This is especially important on 32bit systems because only
188 * the lower 32 bits of the sequence number are stored. The upper 32 bits
189 * are derived from the sequence numbers available in the ringbuffer.
190 */
191 u64 valid_seq = max_t(u64, seq, prb_first_valid_seq(prb));
192
193 atomic_long_set(&ACCESS_PRIVATE(con, nbcon_seq), __u64seq_to_ulseq(valid_seq));
194 }
195
196 /**
197 * nbcon_seq_try_update - Try to update the console sequence number
198 * @ctxt: Pointer to an acquire context that contains
199 * all information about the acquire mode
200 * @new_seq: The new sequence number to set
201 *
202 * @ctxt->seq is updated to the new value of @con::nbcon_seq (expanded to
203 * the 64bit value). This could be a different value than @new_seq if
204 * nbcon_seq_force() was used or the current context no longer owns the
205 * console. In the later case, it will stop printing anyway.
206 */
nbcon_seq_try_update(struct nbcon_context * ctxt,u64 new_seq)207 static void nbcon_seq_try_update(struct nbcon_context *ctxt, u64 new_seq)
208 {
209 unsigned long nbcon_seq = __u64seq_to_ulseq(ctxt->seq);
210 struct console *con = ctxt->console;
211
212 if (atomic_long_try_cmpxchg(&ACCESS_PRIVATE(con, nbcon_seq), &nbcon_seq,
213 __u64seq_to_ulseq(new_seq))) {
214 ctxt->seq = new_seq;
215 } else {
216 ctxt->seq = nbcon_seq_read(con);
217 }
218 }
219
220 /**
221 * nbcon_context_try_acquire_direct - Try to acquire directly
222 * @ctxt: The context of the caller
223 * @cur: The current console state
224 * @is_reacquire: This acquire is a reacquire
225 *
226 * Acquire the console when it is released. Also acquire the console when
227 * the current owner has a lower priority and the console is in a safe state.
228 *
229 * Return: 0 on success. Otherwise, an error code on failure. Also @cur
230 * is updated to the latest state when failed to modify it.
231 *
232 * Errors:
233 *
234 * -EPERM: A panic is in progress and this is neither the panic
235 * CPU nor is this a reacquire. Or the current owner or
236 * waiter has the same or higher priority. No acquire
237 * method can be successful in these cases.
238 *
239 * -EBUSY: The current owner has a lower priority but the console
240 * in an unsafe state. The caller should try using
241 * the handover acquire method.
242 */
nbcon_context_try_acquire_direct(struct nbcon_context * ctxt,struct nbcon_state * cur,bool is_reacquire)243 static int nbcon_context_try_acquire_direct(struct nbcon_context *ctxt,
244 struct nbcon_state *cur, bool is_reacquire)
245 {
246 unsigned int cpu = smp_processor_id();
247 struct console *con = ctxt->console;
248 struct nbcon_state new;
249
250 do {
251 /*
252 * Panic does not imply that the console is owned. However,
253 * since all non-panic CPUs are stopped during panic(), it
254 * is safer to have them avoid gaining console ownership.
255 *
256 * One exception is when kdb has locked for printing on this CPU.
257 *
258 * Second exception is a reacquire (and an unsafe takeover
259 * has not previously occurred) then it is allowed to attempt
260 * a direct acquire in panic. This gives console drivers an
261 * opportunity to perform any necessary cleanup if they were
262 * interrupted by the panic CPU while printing.
263 */
264 if (panic_on_other_cpu() &&
265 !kdb_printf_on_this_cpu() &&
266 (!is_reacquire || cur->unsafe_takeover)) {
267 return -EPERM;
268 }
269
270 if (ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio)
271 return -EPERM;
272
273 if (cur->unsafe)
274 return -EBUSY;
275
276 /*
277 * The console should never be safe for a direct acquire
278 * if an unsafe hostile takeover has ever happened.
279 */
280 WARN_ON_ONCE(cur->unsafe_takeover);
281
282 new.atom = cur->atom;
283 new.prio = ctxt->prio;
284 new.req_prio = NBCON_PRIO_NONE;
285 new.unsafe = cur->unsafe_takeover;
286 new.cpu = cpu;
287
288 } while (!nbcon_state_try_cmpxchg(con, cur, &new));
289
290 return 0;
291 }
292
nbcon_waiter_matches(struct nbcon_state * cur,int expected_prio)293 static bool nbcon_waiter_matches(struct nbcon_state *cur, int expected_prio)
294 {
295 /*
296 * The request context is well defined by the @req_prio because:
297 *
298 * - Only a context with a priority higher than the owner can become
299 * a waiter.
300 * - Only a context with a priority higher than the waiter can
301 * directly take over the request.
302 * - There are only three priorities.
303 * - Only one CPU is allowed to request PANIC priority.
304 * - Lower priorities are ignored during panic() until reboot.
305 *
306 * As a result, the following scenario is *not* possible:
307 *
308 * 1. This context is currently a waiter.
309 * 2. Another context with a higher priority than this context
310 * directly takes ownership.
311 * 3. The higher priority context releases the ownership.
312 * 4. Another lower priority context takes the ownership.
313 * 5. Another context with the same priority as this context
314 * creates a request and starts waiting.
315 *
316 * Event #1 implies this context is EMERGENCY.
317 * Event #2 implies the new context is PANIC.
318 * Event #3 occurs when panic() has flushed the console.
319 * Event #4 occurs when a non-panic CPU reacquires.
320 * Event #5 is not possible due to the panic_on_other_cpu() check
321 * in nbcon_context_try_acquire_handover().
322 */
323
324 return (cur->req_prio == expected_prio);
325 }
326
327 /**
328 * nbcon_context_try_acquire_requested - Try to acquire after having
329 * requested a handover
330 * @ctxt: The context of the caller
331 * @cur: The current console state
332 *
333 * This is a helper function for nbcon_context_try_acquire_handover().
334 * It is called when the console is in an unsafe state. The current
335 * owner will release the console on exit from the unsafe region.
336 *
337 * Return: 0 on success and @cur is updated to the new console state.
338 * Otherwise an error code on failure.
339 *
340 * Errors:
341 *
342 * -EPERM: A panic is in progress and this is not the panic CPU
343 * or this context is no longer the waiter.
344 *
345 * -EBUSY: The console is still locked. The caller should
346 * continue waiting.
347 *
348 * Note: The caller must still remove the request when an error has occurred
349 * except when this context is no longer the waiter.
350 */
nbcon_context_try_acquire_requested(struct nbcon_context * ctxt,struct nbcon_state * cur)351 static int nbcon_context_try_acquire_requested(struct nbcon_context *ctxt,
352 struct nbcon_state *cur)
353 {
354 unsigned int cpu = smp_processor_id();
355 struct console *con = ctxt->console;
356 struct nbcon_state new;
357
358 /* Note that the caller must still remove the request! */
359 if (panic_on_other_cpu())
360 return -EPERM;
361
362 /*
363 * Note that the waiter will also change if there was an unsafe
364 * hostile takeover.
365 */
366 if (!nbcon_waiter_matches(cur, ctxt->prio))
367 return -EPERM;
368
369 /* If still locked, caller should continue waiting. */
370 if (cur->prio != NBCON_PRIO_NONE)
371 return -EBUSY;
372
373 /*
374 * The previous owner should have never released ownership
375 * in an unsafe region.
376 */
377 WARN_ON_ONCE(cur->unsafe);
378
379 new.atom = cur->atom;
380 new.prio = ctxt->prio;
381 new.req_prio = NBCON_PRIO_NONE;
382 new.unsafe = cur->unsafe_takeover;
383 new.cpu = cpu;
384
385 if (!nbcon_state_try_cmpxchg(con, cur, &new)) {
386 /*
387 * The acquire could fail only when it has been taken
388 * over by a higher priority context.
389 */
390 WARN_ON_ONCE(nbcon_waiter_matches(cur, ctxt->prio));
391 return -EPERM;
392 }
393
394 /* Handover success. This context now owns the console. */
395 return 0;
396 }
397
398 /**
399 * nbcon_context_try_acquire_handover - Try to acquire via handover
400 * @ctxt: The context of the caller
401 * @cur: The current console state
402 *
403 * The function must be called only when the context has higher priority
404 * than the current owner and the console is in an unsafe state.
405 * It is the case when nbcon_context_try_acquire_direct() returns -EBUSY.
406 *
407 * The function sets "req_prio" field to make the current owner aware of
408 * the request. Then it waits until the current owner releases the console,
409 * or an even higher context takes over the request, or timeout expires.
410 *
411 * The current owner checks the "req_prio" field on exit from the unsafe
412 * region and releases the console. It does not touch the "req_prio" field
413 * so that the console stays reserved for the waiter.
414 *
415 * Return: 0 on success. Otherwise, an error code on failure. Also @cur
416 * is updated to the latest state when failed to modify it.
417 *
418 * Errors:
419 *
420 * -EPERM: A panic is in progress and this is not the panic CPU.
421 * Or a higher priority context has taken over the
422 * console or the handover request.
423 *
424 * -EBUSY: The current owner is on the same CPU so that the hand
425 * shake could not work. Or the current owner is not
426 * willing to wait (zero timeout). Or the console does
427 * not enter the safe state before timeout passed. The
428 * caller might still use the unsafe hostile takeover
429 * when allowed.
430 *
431 * -EAGAIN: @cur has changed when creating the handover request.
432 * The caller should retry with direct acquire.
433 */
nbcon_context_try_acquire_handover(struct nbcon_context * ctxt,struct nbcon_state * cur)434 static int nbcon_context_try_acquire_handover(struct nbcon_context *ctxt,
435 struct nbcon_state *cur)
436 {
437 unsigned int cpu = smp_processor_id();
438 struct console *con = ctxt->console;
439 struct nbcon_state new;
440 int timeout;
441 int request_err = -EBUSY;
442
443 /*
444 * Check that the handover is called when the direct acquire failed
445 * with -EBUSY.
446 */
447 WARN_ON_ONCE(ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio);
448 WARN_ON_ONCE(!cur->unsafe);
449
450 /*
451 * Panic does not imply that the console is owned. However, it
452 * is critical that non-panic CPUs during panic are unable to
453 * wait for a handover in order to satisfy the assumptions of
454 * nbcon_waiter_matches(). In particular, the assumption that
455 * lower priorities are ignored during panic.
456 */
457 if (panic_on_other_cpu())
458 return -EPERM;
459
460 /* Handover is not possible on the same CPU. */
461 if (cur->cpu == cpu)
462 return -EBUSY;
463
464 /*
465 * Console stays unsafe after an unsafe takeover until re-initialized.
466 * Waiting is not going to help in this case.
467 */
468 if (cur->unsafe_takeover)
469 return -EBUSY;
470
471 /* Is the caller willing to wait? */
472 if (ctxt->spinwait_max_us == 0)
473 return -EBUSY;
474
475 /*
476 * Setup a request for the handover. The caller should try to acquire
477 * the console directly when the current state has been modified.
478 */
479 new.atom = cur->atom;
480 new.req_prio = ctxt->prio;
481 if (!nbcon_state_try_cmpxchg(con, cur, &new))
482 return -EAGAIN;
483
484 cur->atom = new.atom;
485
486 /* Wait until there is no owner and then acquire the console. */
487 for (timeout = ctxt->spinwait_max_us; timeout >= 0; timeout--) {
488 /* On successful acquire, this request is cleared. */
489 request_err = nbcon_context_try_acquire_requested(ctxt, cur);
490 if (!request_err)
491 return 0;
492
493 /*
494 * If the acquire should be aborted, it must be ensured
495 * that the request is removed before returning to caller.
496 */
497 if (request_err == -EPERM)
498 break;
499
500 udelay(1);
501
502 /* Re-read the state because some time has passed. */
503 nbcon_state_read(con, cur);
504 }
505
506 /* Timed out or aborted. Carefully remove handover request. */
507 do {
508 /*
509 * No need to remove request if there is a new waiter. This
510 * can only happen if a higher priority context has taken over
511 * the console or the handover request.
512 */
513 if (!nbcon_waiter_matches(cur, ctxt->prio))
514 return -EPERM;
515
516 /* Unset request for handover. */
517 new.atom = cur->atom;
518 new.req_prio = NBCON_PRIO_NONE;
519 if (nbcon_state_try_cmpxchg(con, cur, &new)) {
520 /*
521 * Request successfully unset. Report failure of
522 * acquiring via handover.
523 */
524 cur->atom = new.atom;
525 return request_err;
526 }
527
528 /*
529 * Unable to remove request. Try to acquire in case
530 * the owner has released the lock.
531 */
532 } while (nbcon_context_try_acquire_requested(ctxt, cur));
533
534 /* Lucky timing. The acquire succeeded while removing the request. */
535 return 0;
536 }
537
538 /**
539 * nbcon_context_try_acquire_hostile - Acquire via unsafe hostile takeover
540 * @ctxt: The context of the caller
541 * @cur: The current console state
542 *
543 * Acquire the console even in the unsafe state.
544 *
545 * It can be permitted by setting the 'allow_unsafe_takeover' field only
546 * by the final attempt to flush messages in panic().
547 *
548 * Return: 0 on success. -EPERM when not allowed by the context.
549 */
nbcon_context_try_acquire_hostile(struct nbcon_context * ctxt,struct nbcon_state * cur)550 static int nbcon_context_try_acquire_hostile(struct nbcon_context *ctxt,
551 struct nbcon_state *cur)
552 {
553 unsigned int cpu = smp_processor_id();
554 struct console *con = ctxt->console;
555 struct nbcon_state new;
556
557 if (!ctxt->allow_unsafe_takeover)
558 return -EPERM;
559
560 /* Ensure caller is allowed to perform unsafe hostile takeovers. */
561 if (WARN_ON_ONCE(ctxt->prio != NBCON_PRIO_PANIC))
562 return -EPERM;
563
564 /*
565 * Check that try_acquire_direct() and try_acquire_handover() returned
566 * -EBUSY in the right situation.
567 */
568 WARN_ON_ONCE(ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio);
569 WARN_ON_ONCE(cur->unsafe != true);
570
571 do {
572 new.atom = cur->atom;
573 new.cpu = cpu;
574 new.prio = ctxt->prio;
575 new.unsafe |= cur->unsafe_takeover;
576 new.unsafe_takeover |= cur->unsafe;
577
578 } while (!nbcon_state_try_cmpxchg(con, cur, &new));
579
580 return 0;
581 }
582
583 static struct printk_buffers panic_nbcon_pbufs;
584
585 /**
586 * nbcon_context_try_acquire - Try to acquire nbcon console
587 * @ctxt: The context of the caller
588 * @is_reacquire: This acquire is a reacquire
589 *
590 * Context: Under @ctxt->con->device_lock() or local_irq_save().
591 * Return: True if the console was acquired. False otherwise.
592 *
593 * If the caller allowed an unsafe hostile takeover, on success the
594 * caller should check the current console state to see if it is
595 * in an unsafe state. Otherwise, on success the caller may assume
596 * the console is not in an unsafe state.
597 */
nbcon_context_try_acquire(struct nbcon_context * ctxt,bool is_reacquire)598 static bool nbcon_context_try_acquire(struct nbcon_context *ctxt, bool is_reacquire)
599 {
600 struct console *con = ctxt->console;
601 struct nbcon_state cur;
602 int err;
603
604 nbcon_state_read(con, &cur);
605 try_again:
606 err = nbcon_context_try_acquire_direct(ctxt, &cur, is_reacquire);
607 if (err != -EBUSY)
608 goto out;
609
610 err = nbcon_context_try_acquire_handover(ctxt, &cur);
611 if (err == -EAGAIN)
612 goto try_again;
613 if (err != -EBUSY)
614 goto out;
615
616 err = nbcon_context_try_acquire_hostile(ctxt, &cur);
617 out:
618 if (err)
619 return false;
620
621 /* Acquire succeeded. */
622
623 /* Assign the appropriate buffer for this context. */
624 if (panic_on_this_cpu())
625 ctxt->pbufs = &panic_nbcon_pbufs;
626 else
627 ctxt->pbufs = con->pbufs;
628
629 /* Set the record sequence for this context to print. */
630 ctxt->seq = nbcon_seq_read(ctxt->console);
631
632 return true;
633 }
634
nbcon_owner_matches(struct nbcon_state * cur,int expected_cpu,int expected_prio)635 static bool nbcon_owner_matches(struct nbcon_state *cur, int expected_cpu,
636 int expected_prio)
637 {
638 /*
639 * A similar function, nbcon_waiter_matches(), only deals with
640 * EMERGENCY and PANIC priorities. However, this function must also
641 * deal with the NORMAL priority, which requires additional checks
642 * and constraints.
643 *
644 * For the case where preemption and interrupts are disabled, it is
645 * enough to also verify that the owning CPU has not changed.
646 *
647 * For the case where preemption or interrupts are enabled, an
648 * external synchronization method *must* be used. In particular,
649 * the driver-specific locking mechanism used in device_lock()
650 * (including disabling migration) should be used. It prevents
651 * scenarios such as:
652 *
653 * 1. [Task A] owns a context with NBCON_PRIO_NORMAL on [CPU X] and
654 * is scheduled out.
655 * 2. Another context takes over the lock with NBCON_PRIO_EMERGENCY
656 * and releases it.
657 * 3. [Task B] acquires a context with NBCON_PRIO_NORMAL on [CPU X]
658 * and is scheduled out.
659 * 4. [Task A] gets running on [CPU X] and sees that the console is
660 * still owned by a task on [CPU X] with NBON_PRIO_NORMAL. Thus
661 * [Task A] thinks it is the owner when it is not.
662 */
663
664 if (cur->prio != expected_prio)
665 return false;
666
667 if (cur->cpu != expected_cpu)
668 return false;
669
670 return true;
671 }
672
673 /**
674 * nbcon_context_release - Release the console
675 * @ctxt: The nbcon context from nbcon_context_try_acquire()
676 */
nbcon_context_release(struct nbcon_context * ctxt)677 static void nbcon_context_release(struct nbcon_context *ctxt)
678 {
679 unsigned int cpu = smp_processor_id();
680 struct console *con = ctxt->console;
681 struct nbcon_state cur;
682 struct nbcon_state new;
683
684 nbcon_state_read(con, &cur);
685
686 do {
687 if (!nbcon_owner_matches(&cur, cpu, ctxt->prio))
688 break;
689
690 new.atom = cur.atom;
691 new.prio = NBCON_PRIO_NONE;
692
693 /*
694 * If @unsafe_takeover is set, it is kept set so that
695 * the state remains permanently unsafe.
696 */
697 new.unsafe |= cur.unsafe_takeover;
698
699 } while (!nbcon_state_try_cmpxchg(con, &cur, &new));
700
701 ctxt->pbufs = NULL;
702 }
703
704 /**
705 * nbcon_context_can_proceed - Check whether ownership can proceed
706 * @ctxt: The nbcon context from nbcon_context_try_acquire()
707 * @cur: The current console state
708 *
709 * Return: True if this context still owns the console. False if
710 * ownership was handed over or taken.
711 *
712 * Must be invoked when entering the unsafe state to make sure that it still
713 * owns the lock. Also must be invoked when exiting the unsafe context
714 * to eventually free the lock for a higher priority context which asked
715 * for the friendly handover.
716 *
717 * It can be called inside an unsafe section when the console is just
718 * temporary in safe state instead of exiting and entering the unsafe
719 * state.
720 *
721 * Also it can be called in the safe context before doing an expensive
722 * safe operation. It does not make sense to do the operation when
723 * a higher priority context took the lock.
724 *
725 * When this function returns false then the calling context no longer owns
726 * the console and is no longer allowed to go forward. In this case it must
727 * back out immediately and carefully. The buffer content is also no longer
728 * trusted since it no longer belongs to the calling context.
729 */
nbcon_context_can_proceed(struct nbcon_context * ctxt,struct nbcon_state * cur)730 static bool nbcon_context_can_proceed(struct nbcon_context *ctxt, struct nbcon_state *cur)
731 {
732 unsigned int cpu = smp_processor_id();
733
734 /* Make sure this context still owns the console. */
735 if (!nbcon_owner_matches(cur, cpu, ctxt->prio))
736 return false;
737
738 /* The console owner can proceed if there is no waiter. */
739 if (cur->req_prio == NBCON_PRIO_NONE)
740 return true;
741
742 /*
743 * A console owner within an unsafe region is always allowed to
744 * proceed, even if there are waiters. It can perform a handover
745 * when exiting the unsafe region. Otherwise the waiter will
746 * need to perform an unsafe hostile takeover.
747 */
748 if (cur->unsafe)
749 return true;
750
751 /* Waiters always have higher priorities than owners. */
752 WARN_ON_ONCE(cur->req_prio <= cur->prio);
753
754 /*
755 * Having a safe point for take over and eventually a few
756 * duplicated characters or a full line is way better than a
757 * hostile takeover. Post processing can take care of the garbage.
758 * Release and hand over.
759 */
760 nbcon_context_release(ctxt);
761
762 /*
763 * It is not clear whether the waiter really took over ownership. The
764 * outermost callsite must make the final decision whether console
765 * ownership is needed for it to proceed. If yes, it must reacquire
766 * ownership (possibly hostile) before carefully proceeding.
767 *
768 * The calling context no longer owns the console so go back all the
769 * way instead of trying to implement reacquire heuristics in tons of
770 * places.
771 */
772 return false;
773 }
774
775 /**
776 * nbcon_can_proceed - Check whether ownership can proceed
777 * @wctxt: The write context that was handed to the write function
778 *
779 * Return: True if this context still owns the console. False if
780 * ownership was handed over or taken.
781 *
782 * It is used in nbcon_enter_unsafe() to make sure that it still owns the
783 * lock. Also it is used in nbcon_exit_unsafe() to eventually free the lock
784 * for a higher priority context which asked for the friendly handover.
785 *
786 * It can be called inside an unsafe section when the console is just
787 * temporary in safe state instead of exiting and entering the unsafe state.
788 *
789 * Also it can be called in the safe context before doing an expensive safe
790 * operation. It does not make sense to do the operation when a higher
791 * priority context took the lock.
792 *
793 * When this function returns false then the calling context no longer owns
794 * the console and is no longer allowed to go forward. In this case it must
795 * back out immediately and carefully. The buffer content is also no longer
796 * trusted since it no longer belongs to the calling context.
797 */
nbcon_can_proceed(struct nbcon_write_context * wctxt)798 bool nbcon_can_proceed(struct nbcon_write_context *wctxt)
799 {
800 struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
801 struct console *con = ctxt->console;
802 struct nbcon_state cur;
803
804 nbcon_state_read(con, &cur);
805
806 return nbcon_context_can_proceed(ctxt, &cur);
807 }
808 EXPORT_SYMBOL_GPL(nbcon_can_proceed);
809
810 #define nbcon_context_enter_unsafe(c) __nbcon_context_update_unsafe(c, true)
811 #define nbcon_context_exit_unsafe(c) __nbcon_context_update_unsafe(c, false)
812
813 /**
814 * __nbcon_context_update_unsafe - Update the unsafe bit in @con->nbcon_state
815 * @ctxt: The nbcon context from nbcon_context_try_acquire()
816 * @unsafe: The new value for the unsafe bit
817 *
818 * Return: True if the unsafe state was updated and this context still
819 * owns the console. Otherwise false if ownership was handed
820 * over or taken.
821 *
822 * This function allows console owners to modify the unsafe status of the
823 * console.
824 *
825 * When this function returns false then the calling context no longer owns
826 * the console and is no longer allowed to go forward. In this case it must
827 * back out immediately and carefully. The buffer content is also no longer
828 * trusted since it no longer belongs to the calling context.
829 *
830 * Internal helper to avoid duplicated code.
831 */
__nbcon_context_update_unsafe(struct nbcon_context * ctxt,bool unsafe)832 static bool __nbcon_context_update_unsafe(struct nbcon_context *ctxt, bool unsafe)
833 {
834 struct console *con = ctxt->console;
835 struct nbcon_state cur;
836 struct nbcon_state new;
837
838 nbcon_state_read(con, &cur);
839
840 do {
841 /*
842 * The unsafe bit must not be cleared if an
843 * unsafe hostile takeover has occurred.
844 */
845 if (!unsafe && cur.unsafe_takeover)
846 goto out;
847
848 if (!nbcon_context_can_proceed(ctxt, &cur))
849 return false;
850
851 new.atom = cur.atom;
852 new.unsafe = unsafe;
853 } while (!nbcon_state_try_cmpxchg(con, &cur, &new));
854
855 cur.atom = new.atom;
856 out:
857 return nbcon_context_can_proceed(ctxt, &cur);
858 }
859
nbcon_write_context_set_buf(struct nbcon_write_context * wctxt,char * buf,unsigned int len)860 void nbcon_write_context_set_buf(struct nbcon_write_context *wctxt,
861 char *buf, unsigned int len)
862 {
863 struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
864 struct console *con = ctxt->console;
865 struct nbcon_state cur;
866
867 wctxt->outbuf = buf;
868 wctxt->len = len;
869 nbcon_state_read(con, &cur);
870 wctxt->unsafe_takeover = cur.unsafe_takeover;
871 }
872
873 /**
874 * nbcon_enter_unsafe - Enter an unsafe region in the driver
875 * @wctxt: The write context that was handed to the write function
876 *
877 * Return: True if this context still owns the console. False if
878 * ownership was handed over or taken.
879 *
880 * When this function returns false then the calling context no longer owns
881 * the console and is no longer allowed to go forward. In this case it must
882 * back out immediately and carefully. The buffer content is also no longer
883 * trusted since it no longer belongs to the calling context.
884 */
nbcon_enter_unsafe(struct nbcon_write_context * wctxt)885 bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt)
886 {
887 struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
888 bool is_owner;
889
890 is_owner = nbcon_context_enter_unsafe(ctxt);
891 if (!is_owner)
892 nbcon_write_context_set_buf(wctxt, NULL, 0);
893 return is_owner;
894 }
895 EXPORT_SYMBOL_GPL(nbcon_enter_unsafe);
896
897 /**
898 * nbcon_exit_unsafe - Exit an unsafe region in the driver
899 * @wctxt: The write context that was handed to the write function
900 *
901 * Return: True if this context still owns the console. False if
902 * ownership was handed over or taken.
903 *
904 * When this function returns false then the calling context no longer owns
905 * the console and is no longer allowed to go forward. In this case it must
906 * back out immediately and carefully. The buffer content is also no longer
907 * trusted since it no longer belongs to the calling context.
908 */
nbcon_exit_unsafe(struct nbcon_write_context * wctxt)909 bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt)
910 {
911 struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
912 bool ret;
913
914 ret = nbcon_context_exit_unsafe(ctxt);
915 if (!ret)
916 nbcon_write_context_set_buf(wctxt, NULL, 0);
917 return ret;
918 }
919 EXPORT_SYMBOL_GPL(nbcon_exit_unsafe);
920
921 /**
922 * nbcon_reacquire_nobuf - Reacquire a console after losing ownership
923 * while printing
924 * @wctxt: The write context that was handed to the write callback
925 *
926 * Since ownership can be lost at any time due to handover or takeover, a
927 * printing context _must_ be prepared to back out immediately and
928 * carefully. However, there are scenarios where the printing context must
929 * reacquire ownership in order to finalize or revert hardware changes.
930 *
931 * This function allows a printing context to reacquire ownership using the
932 * same priority as its previous ownership.
933 *
934 * Note that after a successful reacquire the printing context will have no
935 * output buffer because that has been lost. This function cannot be used to
936 * resume printing.
937 */
nbcon_reacquire_nobuf(struct nbcon_write_context * wctxt)938 void nbcon_reacquire_nobuf(struct nbcon_write_context *wctxt)
939 {
940 struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
941
942 while (!nbcon_context_try_acquire(ctxt, true))
943 cpu_relax();
944
945 nbcon_write_context_set_buf(wctxt, NULL, 0);
946 }
947 EXPORT_SYMBOL_GPL(nbcon_reacquire_nobuf);
948
949 #ifdef CONFIG_PRINTK_EXECUTION_CTX
wctxt_load_execution_ctx(struct nbcon_write_context * wctxt,struct printk_message * pmsg)950 static void wctxt_load_execution_ctx(struct nbcon_write_context *wctxt,
951 struct printk_message *pmsg)
952 {
953 wctxt->cpu = pmsg->cpu;
954 wctxt->pid = pmsg->pid;
955 memcpy(wctxt->comm, pmsg->comm, sizeof(wctxt->comm));
956 static_assert(sizeof(wctxt->comm) == sizeof(pmsg->comm));
957 }
958 #else
wctxt_load_execution_ctx(struct nbcon_write_context * wctxt,struct printk_message * pmsg)959 static void wctxt_load_execution_ctx(struct nbcon_write_context *wctxt,
960 struct printk_message *pmsg) {}
961 #endif
962
963 /**
964 * nbcon_emit_next_record - Emit a record in the acquired context
965 * @wctxt: The write context that will be handed to the write function
966 * @use_atomic: True if the write_atomic() callback is to be used
967 *
968 * Return: True if this context still owns the console. False if
969 * ownership was handed over or taken.
970 *
971 * When this function returns false then the calling context no longer owns
972 * the console and is no longer allowed to go forward. In this case it must
973 * back out immediately and carefully. The buffer content is also no longer
974 * trusted since it no longer belongs to the calling context. If the caller
975 * wants to do more it must reacquire the console first.
976 *
977 * When true is returned, @wctxt->ctxt.backlog indicates whether there are
978 * still records pending in the ringbuffer,
979 */
nbcon_emit_next_record(struct nbcon_write_context * wctxt,bool use_atomic)980 static bool nbcon_emit_next_record(struct nbcon_write_context *wctxt, bool use_atomic)
981 {
982 struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
983 struct console *con = ctxt->console;
984 bool is_extended = console_srcu_read_flags(con) & CON_EXTENDED;
985 struct printk_message pmsg = {
986 .pbufs = ctxt->pbufs,
987 };
988 unsigned long con_dropped;
989 struct nbcon_state cur;
990 unsigned long dropped;
991 unsigned long ulseq;
992
993 /*
994 * This function should never be called for consoles that have not
995 * implemented the necessary callback for writing: i.e. legacy
996 * consoles and, when atomic, nbcon consoles with no write_atomic().
997 * Handle it as if ownership was lost and try to continue.
998 *
999 * Note that for nbcon consoles the write_thread() callback is
1000 * mandatory and was already checked in nbcon_alloc().
1001 */
1002 if (WARN_ON_ONCE((use_atomic && !con->write_atomic) ||
1003 !(console_srcu_read_flags(con) & CON_NBCON))) {
1004 nbcon_context_release(ctxt);
1005 return false;
1006 }
1007
1008 /*
1009 * The printk buffers are filled within an unsafe section. This
1010 * prevents NBCON_PRIO_NORMAL and NBCON_PRIO_EMERGENCY from
1011 * clobbering each other.
1012 */
1013
1014 if (!nbcon_context_enter_unsafe(ctxt))
1015 return false;
1016
1017 ctxt->backlog = printk_get_next_message(&pmsg, ctxt->seq, is_extended, true);
1018 if (!ctxt->backlog)
1019 return nbcon_context_exit_unsafe(ctxt);
1020
1021 /*
1022 * @con->dropped is not protected in case of an unsafe hostile
1023 * takeover. In that situation the update can be racy so
1024 * annotate it accordingly.
1025 */
1026 con_dropped = data_race(READ_ONCE(con->dropped));
1027
1028 dropped = con_dropped + pmsg.dropped;
1029 if (dropped && !is_extended)
1030 console_prepend_dropped(&pmsg, dropped);
1031
1032 /*
1033 * If the previous owner was assigned the same record, this context
1034 * has taken over ownership and is replaying the record. Prepend a
1035 * message to let the user know the record is replayed.
1036 */
1037 ulseq = atomic_long_read(&ACCESS_PRIVATE(con, nbcon_prev_seq));
1038 if (__ulseq_to_u64seq(prb, ulseq) == pmsg.seq) {
1039 console_prepend_replay(&pmsg);
1040 } else {
1041 /*
1042 * Ensure this context is still the owner before trying to
1043 * update @nbcon_prev_seq. Otherwise the value in @ulseq may
1044 * not be from the previous owner and instead be some later
1045 * value from the context that took over ownership.
1046 */
1047 nbcon_state_read(con, &cur);
1048 if (!nbcon_context_can_proceed(ctxt, &cur))
1049 return false;
1050
1051 atomic_long_try_cmpxchg(&ACCESS_PRIVATE(con, nbcon_prev_seq), &ulseq,
1052 __u64seq_to_ulseq(pmsg.seq));
1053 }
1054
1055 if (!nbcon_context_exit_unsafe(ctxt))
1056 return false;
1057
1058 /* For skipped records just update seq/dropped in @con. */
1059 if (pmsg.outbuf_len == 0)
1060 goto update_con;
1061
1062 /* Initialize the write context for driver callbacks. */
1063 nbcon_write_context_set_buf(wctxt, &pmsg.pbufs->outbuf[0], pmsg.outbuf_len);
1064
1065 wctxt_load_execution_ctx(wctxt, &pmsg);
1066
1067 if (use_atomic)
1068 con->write_atomic(con, wctxt);
1069 else
1070 con->write_thread(con, wctxt);
1071
1072 if (!wctxt->outbuf) {
1073 /*
1074 * Ownership was lost and reacquired by the driver. Handle it
1075 * as if ownership was lost.
1076 */
1077 nbcon_context_release(ctxt);
1078 return false;
1079 }
1080
1081 /*
1082 * Ownership may have been lost but _not_ reacquired by the driver.
1083 * This case is detected and handled when entering unsafe to update
1084 * dropped/seq values.
1085 */
1086
1087 /*
1088 * Since any dropped message was successfully output, reset the
1089 * dropped count for the console.
1090 */
1091 dropped = 0;
1092 update_con:
1093 /*
1094 * The dropped count and the sequence number are updated within an
1095 * unsafe section. This limits update races to the panic context and
1096 * allows the panic context to win.
1097 */
1098
1099 if (!nbcon_context_enter_unsafe(ctxt))
1100 return false;
1101
1102 if (dropped != con_dropped) {
1103 /* Counterpart to the READ_ONCE() above. */
1104 WRITE_ONCE(con->dropped, dropped);
1105 }
1106
1107 nbcon_seq_try_update(ctxt, pmsg.seq + 1);
1108
1109 return nbcon_context_exit_unsafe(ctxt);
1110 }
1111
1112 /*
1113 * nbcon_emit_one - Print one record for an nbcon console using the
1114 * specified callback
1115 * @wctxt: An initialized write context struct to use for this context
1116 * @use_atomic: True if the write_atomic() callback is to be used
1117 *
1118 * Return: True, when a record has been printed and there are still
1119 * pending records. The caller might want to continue flushing.
1120 *
1121 * False, when there is no pending record, or when the console
1122 * context cannot be acquired, or the ownership has been lost.
1123 * The caller should give up. Either the job is done, cannot be
1124 * done, or will be handled by the owning context.
1125 *
1126 * This is an internal helper to handle the locking of the console before
1127 * calling nbcon_emit_next_record().
1128 */
nbcon_emit_one(struct nbcon_write_context * wctxt,bool use_atomic)1129 static bool nbcon_emit_one(struct nbcon_write_context *wctxt, bool use_atomic)
1130 {
1131 struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
1132 struct console *con = ctxt->console;
1133 unsigned long flags;
1134 bool ret = false;
1135
1136 if (!use_atomic) {
1137 con->device_lock(con, &flags);
1138
1139 /*
1140 * Ensure this stays on the CPU to make handover and
1141 * takeover possible.
1142 */
1143 cant_migrate();
1144 }
1145
1146 if (!nbcon_context_try_acquire(ctxt, false))
1147 goto out;
1148
1149 /*
1150 * nbcon_emit_next_record() returns false when the console was
1151 * handed over or taken over. In both cases the context is no
1152 * longer valid.
1153 *
1154 * The higher priority printing context takes over responsibility
1155 * to print the pending records.
1156 */
1157 if (!nbcon_emit_next_record(wctxt, use_atomic))
1158 goto out;
1159
1160 nbcon_context_release(ctxt);
1161
1162 ret = ctxt->backlog;
1163 out:
1164 if (!use_atomic)
1165 con->device_unlock(con, flags);
1166 return ret;
1167 }
1168
1169 /**
1170 * nbcon_kthread_should_wakeup - Check whether a printer thread should wakeup
1171 * @con: Console to operate on
1172 * @ctxt: The nbcon context from nbcon_context_try_acquire()
1173 *
1174 * Return: True if the thread should shutdown or if the console is
1175 * allowed to print and a record is available. False otherwise.
1176 *
1177 * After the thread wakes up, it must first check if it should shutdown before
1178 * attempting any printing.
1179 */
nbcon_kthread_should_wakeup(struct console * con,struct nbcon_context * ctxt)1180 static bool nbcon_kthread_should_wakeup(struct console *con, struct nbcon_context *ctxt)
1181 {
1182 bool ret = false;
1183 short flags;
1184 int cookie;
1185
1186 if (kthread_should_stop())
1187 return true;
1188
1189 /*
1190 * Block the kthread when the system is in an emergency or panic mode.
1191 * It increases the chance that these contexts would be able to show
1192 * the messages directly. And it reduces the risk of interrupted writes
1193 * where the context with a higher priority takes over the nbcon console
1194 * ownership in the middle of a message.
1195 */
1196 if (unlikely(atomic_read(&nbcon_cpu_emergency_cnt)) ||
1197 unlikely(panic_in_progress()))
1198 return false;
1199
1200 cookie = console_srcu_read_lock();
1201
1202 flags = console_srcu_read_flags(con);
1203 if (console_is_usable(con, flags, false)) {
1204 /* Bring the sequence in @ctxt up to date */
1205 ctxt->seq = nbcon_seq_read(con);
1206
1207 ret = prb_read_valid(prb, ctxt->seq, NULL);
1208 }
1209
1210 console_srcu_read_unlock(cookie);
1211 return ret;
1212 }
1213
1214 /**
1215 * nbcon_kthread_func - The printer thread function
1216 * @__console: Console to operate on
1217 *
1218 * Return: 0
1219 */
nbcon_kthread_func(void * __console)1220 static int nbcon_kthread_func(void *__console)
1221 {
1222 struct console *con = __console;
1223 struct nbcon_write_context wctxt = {
1224 .ctxt.console = con,
1225 .ctxt.prio = NBCON_PRIO_NORMAL,
1226 };
1227 struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
1228 short con_flags;
1229 bool backlog;
1230 int cookie;
1231
1232 wait_for_event:
1233 /*
1234 * Guarantee this task is visible on the rcuwait before
1235 * checking the wake condition.
1236 *
1237 * The full memory barrier within set_current_state() of
1238 * ___rcuwait_wait_event() pairs with the full memory
1239 * barrier within rcuwait_has_sleeper().
1240 *
1241 * This pairs with rcuwait_has_sleeper:A and nbcon_kthread_wake:A.
1242 */
1243 rcuwait_wait_event(&con->rcuwait,
1244 nbcon_kthread_should_wakeup(con, ctxt),
1245 TASK_INTERRUPTIBLE); /* LMM(nbcon_kthread_func:A) */
1246
1247 do {
1248 if (kthread_should_stop())
1249 return 0;
1250
1251 /*
1252 * Block the kthread when the system is in an emergency or panic
1253 * mode. See nbcon_kthread_should_wakeup() for more details.
1254 */
1255 if (unlikely(atomic_read(&nbcon_cpu_emergency_cnt)) ||
1256 unlikely(panic_in_progress()))
1257 goto wait_for_event;
1258
1259 backlog = false;
1260
1261 /*
1262 * Keep the srcu read lock around the entire operation so that
1263 * synchronize_srcu() can guarantee that the kthread stopped
1264 * or suspended printing.
1265 */
1266 cookie = console_srcu_read_lock();
1267
1268 con_flags = console_srcu_read_flags(con);
1269
1270 if (console_is_usable(con, con_flags, false))
1271 backlog = nbcon_emit_one(&wctxt, false);
1272
1273 console_srcu_read_unlock(cookie);
1274
1275 cond_resched();
1276
1277 } while (backlog);
1278
1279 goto wait_for_event;
1280 }
1281
1282 /**
1283 * nbcon_irq_work - irq work to wake console printer thread
1284 * @irq_work: The irq work to operate on
1285 */
nbcon_irq_work(struct irq_work * irq_work)1286 static void nbcon_irq_work(struct irq_work *irq_work)
1287 {
1288 struct console *con = container_of(irq_work, struct console, irq_work);
1289
1290 nbcon_kthread_wake(con);
1291 }
1292
rcuwait_has_sleeper(struct rcuwait * w)1293 static inline bool rcuwait_has_sleeper(struct rcuwait *w)
1294 {
1295 /*
1296 * Guarantee any new records can be seen by tasks preparing to wait
1297 * before this context checks if the rcuwait is empty.
1298 *
1299 * This full memory barrier pairs with the full memory barrier within
1300 * set_current_state() of ___rcuwait_wait_event(), which is called
1301 * after prepare_to_rcuwait() adds the waiter but before it has
1302 * checked the wait condition.
1303 *
1304 * This pairs with nbcon_kthread_func:A.
1305 */
1306 smp_mb(); /* LMM(rcuwait_has_sleeper:A) */
1307 return rcuwait_active(w);
1308 }
1309
1310 /**
1311 * nbcon_kthreads_wake - Wake up printing threads using irq_work
1312 */
nbcon_kthreads_wake(void)1313 void nbcon_kthreads_wake(void)
1314 {
1315 struct console *con;
1316 int cookie;
1317
1318 if (!printk_kthreads_running)
1319 return;
1320
1321 /*
1322 * It is not allowed to call this function when console irq_work
1323 * is blocked.
1324 */
1325 if (WARN_ON_ONCE(console_irqwork_blocked))
1326 return;
1327
1328 cookie = console_srcu_read_lock();
1329 for_each_console_srcu(con) {
1330 if (!(console_srcu_read_flags(con) & CON_NBCON))
1331 continue;
1332
1333 /*
1334 * Only schedule irq_work if the printing thread is
1335 * actively waiting. If not waiting, the thread will
1336 * notice by itself that it has work to do.
1337 */
1338 if (rcuwait_has_sleeper(&con->rcuwait))
1339 irq_work_queue(&con->irq_work);
1340 }
1341 console_srcu_read_unlock(cookie);
1342 }
1343
1344 /*
1345 * nbcon_kthread_stop - Stop a console printer thread
1346 * @con: Console to operate on
1347 */
nbcon_kthread_stop(struct console * con)1348 void nbcon_kthread_stop(struct console *con)
1349 {
1350 lockdep_assert_console_list_lock_held();
1351
1352 if (!con->kthread)
1353 return;
1354
1355 kthread_stop(con->kthread);
1356 con->kthread = NULL;
1357 }
1358
1359 /**
1360 * nbcon_kthread_create - Create a console printer thread
1361 * @con: Console to operate on
1362 *
1363 * Return: True if the kthread was started or already exists.
1364 * Otherwise false and @con must not be registered.
1365 *
1366 * This function is called when it will be expected that nbcon consoles are
1367 * flushed using the kthread. The messages printed with NBCON_PRIO_NORMAL
1368 * will be no longer flushed by the legacy loop. This is why failure must
1369 * be fatal for console registration.
1370 *
1371 * If @con was already registered and this function fails, @con must be
1372 * unregistered before the global state variable @printk_kthreads_running
1373 * can be set.
1374 */
nbcon_kthread_create(struct console * con)1375 bool nbcon_kthread_create(struct console *con)
1376 {
1377 struct task_struct *kt;
1378
1379 lockdep_assert_console_list_lock_held();
1380
1381 if (con->kthread)
1382 return true;
1383
1384 kt = kthread_run(nbcon_kthread_func, con, "pr/%s%d", con->name, con->index);
1385 if (WARN_ON(IS_ERR(kt))) {
1386 con_printk(KERN_ERR, con, "failed to start printing thread\n");
1387 return false;
1388 }
1389
1390 con->kthread = kt;
1391
1392 /*
1393 * It is important that console printing threads are scheduled
1394 * shortly after a printk call and with generous runtime budgets.
1395 */
1396 sched_set_normal(con->kthread, -20);
1397
1398 return true;
1399 }
1400
1401 /* Track the nbcon emergency nesting per CPU. */
1402 static DEFINE_PER_CPU(unsigned int, nbcon_pcpu_emergency_nesting);
1403 static unsigned int early_nbcon_pcpu_emergency_nesting __initdata;
1404
1405 /**
1406 * nbcon_get_cpu_emergency_nesting - Get the per CPU emergency nesting pointer
1407 *
1408 * Context: For reading, any context. For writing, any context which could
1409 * not be migrated to another CPU.
1410 * Return: Either a pointer to the per CPU emergency nesting counter of
1411 * the current CPU or to the init data during early boot.
1412 *
1413 * The function is safe for reading per-CPU variables in any context because
1414 * preemption is disabled if the current CPU is in the emergency state. See
1415 * also nbcon_cpu_emergency_enter().
1416 */
nbcon_get_cpu_emergency_nesting(void)1417 static __ref unsigned int *nbcon_get_cpu_emergency_nesting(void)
1418 {
1419 /*
1420 * The value of __printk_percpu_data_ready gets set in normal
1421 * context and before SMP initialization. As a result it could
1422 * never change while inside an nbcon emergency section.
1423 */
1424 if (!printk_percpu_data_ready())
1425 return &early_nbcon_pcpu_emergency_nesting;
1426
1427 return raw_cpu_ptr(&nbcon_pcpu_emergency_nesting);
1428 }
1429
1430 /**
1431 * nbcon_get_default_prio - The appropriate nbcon priority to use for nbcon
1432 * printing on the current CPU
1433 *
1434 * Context: Any context.
1435 * Return: The nbcon_prio to use for acquiring an nbcon console in this
1436 * context for printing.
1437 *
1438 * The function is safe for reading per-CPU data in any context because
1439 * preemption is disabled if the current CPU is in the emergency or panic
1440 * state.
1441 */
nbcon_get_default_prio(void)1442 enum nbcon_prio nbcon_get_default_prio(void)
1443 {
1444 unsigned int *cpu_emergency_nesting;
1445
1446 if (panic_on_this_cpu())
1447 return NBCON_PRIO_PANIC;
1448
1449 cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting();
1450 if (*cpu_emergency_nesting)
1451 return NBCON_PRIO_EMERGENCY;
1452
1453 return NBCON_PRIO_NORMAL;
1454 }
1455
1456 /*
1457 * Track if it is allowed to perform unsafe hostile takeovers of console
1458 * ownership. When true, console drivers might perform unsafe actions while
1459 * printing. It is externally available via nbcon_allow_unsafe_takeover().
1460 */
1461 static bool panic_nbcon_allow_unsafe_takeover;
1462
1463 /**
1464 * nbcon_allow_unsafe_takeover - Check if unsafe console takeovers are allowed
1465 *
1466 * Return: True, when it is permitted to perform unsafe console printing
1467 *
1468 * This is also used by console_is_usable() to determine if it is allowed to
1469 * call write_atomic() callbacks flagged as unsafe (CON_NBCON_ATOMIC_UNSAFE).
1470 */
nbcon_allow_unsafe_takeover(void)1471 bool nbcon_allow_unsafe_takeover(void)
1472 {
1473 return panic_on_this_cpu() && panic_nbcon_allow_unsafe_takeover;
1474 }
1475
1476 /**
1477 * nbcon_legacy_emit_next_record - Print one record for an nbcon console
1478 * in legacy contexts
1479 * @con: The console to print on
1480 * @handover: Will be set to true if a printk waiter has taken over the
1481 * console_lock, in which case the caller is no longer holding
1482 * both the console_lock and the SRCU read lock. Otherwise it
1483 * is set to false.
1484 * @cookie: The cookie from the SRCU read lock.
1485 * @use_atomic: Set true when called in an atomic or unknown context.
1486 * It affects which nbcon callback will be used: write_atomic()
1487 * or write_thread().
1488 *
1489 * When false, the write_thread() callback is used and would be
1490 * called in a preemtible context unless disabled by the
1491 * device_lock. The legacy handover is not allowed in this mode.
1492 *
1493 * Context: Any context except NMI.
1494 * Return: True, when a record has been printed and there are still
1495 * pending records. The caller might want to continue flushing.
1496 *
1497 * False, when there is no pending record, or when the console
1498 * context cannot be acquired, or the ownership has been lost.
1499 * The caller should give up. Either the job is done, cannot be
1500 * done, or will be handled by the owning context.
1501 *
1502 * This function is meant to be called by console_flush_all() to print records
1503 * on nbcon consoles from legacy context (printing via console unlocking).
1504 * Essentially it is the nbcon version of console_emit_next_record().
1505 */
nbcon_legacy_emit_next_record(struct console * con,bool * handover,int cookie,bool use_atomic)1506 bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
1507 int cookie, bool use_atomic)
1508 {
1509 struct nbcon_write_context wctxt = { };
1510 struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
1511 unsigned long flags;
1512 bool progress;
1513
1514 ctxt->console = con;
1515 ctxt->prio = nbcon_get_default_prio();
1516
1517 if (use_atomic) {
1518 /*
1519 * In an atomic or unknown context, use the same procedure as
1520 * in console_emit_next_record(). It allows to handover.
1521 */
1522 printk_safe_enter_irqsave(flags);
1523 console_lock_spinning_enable();
1524 stop_critical_timings();
1525 }
1526
1527 progress = nbcon_emit_one(&wctxt, use_atomic);
1528
1529 if (use_atomic) {
1530 start_critical_timings();
1531 *handover = console_lock_spinning_disable_and_check(cookie);
1532 printk_safe_exit_irqrestore(flags);
1533 } else {
1534 /* Non-atomic does not perform legacy spinning handovers. */
1535 *handover = false;
1536 }
1537
1538 return progress;
1539 }
1540
1541 /**
1542 * __nbcon_atomic_flush_pending_con - Flush specified nbcon console using its
1543 * write_atomic() callback
1544 * @con: The nbcon console to flush
1545 * @stop_seq: Flush up until this record
1546 *
1547 * Return: 0 if @con was flushed up to @stop_seq Otherwise, error code on
1548 * failure.
1549 *
1550 * Errors:
1551 *
1552 * -EPERM: Unable to acquire console ownership.
1553 *
1554 * -EAGAIN: Another context took over ownership while printing.
1555 *
1556 * -ENOENT: A record before @stop_seq is not available.
1557 *
1558 * If flushing up to @stop_seq was not successful, it only makes sense for the
1559 * caller to try again when -EAGAIN was returned. When -EPERM is returned,
1560 * this context is not allowed to acquire the console. When -ENOENT is
1561 * returned, it cannot be expected that the unfinalized record will become
1562 * available.
1563 */
__nbcon_atomic_flush_pending_con(struct console * con,u64 stop_seq)1564 static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq)
1565 {
1566 struct nbcon_write_context wctxt = { };
1567 struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
1568 int err = 0;
1569
1570 ctxt->console = con;
1571 ctxt->spinwait_max_us = 2000;
1572 ctxt->prio = nbcon_get_default_prio();
1573 ctxt->allow_unsafe_takeover = nbcon_allow_unsafe_takeover();
1574
1575 while (nbcon_seq_read(con) < stop_seq) {
1576 /*
1577 * Atomic flushing does not use console driver synchronization
1578 * (i.e. it does not hold the port lock for uart consoles).
1579 * Therefore IRQs must be disabled to avoid being interrupted
1580 * and then calling into a driver that will deadlock trying
1581 * to acquire console ownership.
1582 */
1583 scoped_guard(irqsave) {
1584 if (!nbcon_context_try_acquire(ctxt, false))
1585 return -EPERM;
1586
1587 /*
1588 * nbcon_emit_next_record() returns false when
1589 * the console was handed over or taken over.
1590 * In both cases the context is no longer valid.
1591 */
1592 if (!nbcon_emit_next_record(&wctxt, true))
1593 return -EAGAIN;
1594
1595 nbcon_context_release(ctxt);
1596 }
1597
1598 if (!ctxt->backlog) {
1599 /* Are there reserved but not yet finalized records? */
1600 if (nbcon_seq_read(con) < stop_seq)
1601 err = -ENOENT;
1602 break;
1603 }
1604 }
1605
1606 return err;
1607 }
1608
1609 /**
1610 * nbcon_atomic_flush_pending_con - Flush specified nbcon console using its
1611 * write_atomic() callback
1612 * @con: The nbcon console to flush
1613 * @stop_seq: Flush up until this record
1614 *
1615 * This will stop flushing before @stop_seq if another context has ownership.
1616 * That context is then responsible for the flushing. Likewise, if new records
1617 * are added while this context was flushing and there is no other context
1618 * to handle the printing, this context must also flush those records.
1619 */
nbcon_atomic_flush_pending_con(struct console * con,u64 stop_seq)1620 static void nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq)
1621 {
1622 struct console_flush_type ft;
1623 int err;
1624
1625 again:
1626 err = __nbcon_atomic_flush_pending_con(con, stop_seq);
1627
1628 /*
1629 * If there was a new owner (-EPERM, -EAGAIN), that context is
1630 * responsible for completing.
1631 *
1632 * Do not wait for records not yet finalized (-ENOENT) to avoid a
1633 * possible deadlock. They will either get flushed by the writer or
1634 * eventually skipped on panic CPU.
1635 */
1636 if (err)
1637 return;
1638
1639 /*
1640 * If flushing was successful but more records are available, this
1641 * context must flush those remaining records if the printer thread
1642 * is not available do it.
1643 */
1644 printk_get_console_flush_type(&ft);
1645 if (!ft.nbcon_offload &&
1646 prb_read_valid(prb, nbcon_seq_read(con), NULL)) {
1647 stop_seq = prb_next_reserve_seq(prb);
1648 goto again;
1649 }
1650 }
1651
1652 /**
1653 * __nbcon_atomic_flush_pending - Flush all nbcon consoles using their
1654 * write_atomic() callback
1655 * @stop_seq: Flush up until this record
1656 */
__nbcon_atomic_flush_pending(u64 stop_seq)1657 static void __nbcon_atomic_flush_pending(u64 stop_seq)
1658 {
1659 struct console *con;
1660 int cookie;
1661
1662 cookie = console_srcu_read_lock();
1663 for_each_console_srcu(con) {
1664 short flags = console_srcu_read_flags(con);
1665
1666 if (!(flags & CON_NBCON))
1667 continue;
1668
1669 if (!console_is_usable(con, flags, true))
1670 continue;
1671
1672 if (nbcon_seq_read(con) >= stop_seq)
1673 continue;
1674
1675 nbcon_atomic_flush_pending_con(con, stop_seq);
1676 }
1677 console_srcu_read_unlock(cookie);
1678 }
1679
1680 /**
1681 * nbcon_atomic_flush_pending - Flush all nbcon consoles using their
1682 * write_atomic() callback
1683 *
1684 * Flush the backlog up through the currently newest record. Any new
1685 * records added while flushing will not be flushed if there is another
1686 * context available to handle the flushing. This is to avoid one CPU
1687 * printing unbounded because other CPUs continue to add records.
1688 */
nbcon_atomic_flush_pending(void)1689 void nbcon_atomic_flush_pending(void)
1690 {
1691 __nbcon_atomic_flush_pending(prb_next_reserve_seq(prb));
1692 }
1693
1694 /**
1695 * nbcon_atomic_flush_unsafe - Flush all nbcon consoles using their
1696 * write_atomic() callback and allowing unsafe hostile takeovers
1697 *
1698 * Flush the backlog up through the currently newest record. Unsafe hostile
1699 * takeovers will be performed, if necessary.
1700 */
nbcon_atomic_flush_unsafe(void)1701 void nbcon_atomic_flush_unsafe(void)
1702 {
1703 panic_nbcon_allow_unsafe_takeover = true;
1704 __nbcon_atomic_flush_pending(prb_next_reserve_seq(prb));
1705 panic_nbcon_allow_unsafe_takeover = false;
1706 }
1707
1708 /**
1709 * nbcon_cpu_emergency_enter - Enter an emergency section where printk()
1710 * messages for that CPU are flushed directly
1711 *
1712 * Context: Any context. Disables preemption.
1713 *
1714 * When within an emergency section, printk() calls will attempt to flush any
1715 * pending messages in the ringbuffer.
1716 */
nbcon_cpu_emergency_enter(void)1717 void nbcon_cpu_emergency_enter(void)
1718 {
1719 unsigned int *cpu_emergency_nesting;
1720
1721 preempt_disable();
1722
1723 atomic_inc(&nbcon_cpu_emergency_cnt);
1724
1725 cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting();
1726 (*cpu_emergency_nesting)++;
1727 }
1728
1729 /**
1730 * nbcon_cpu_emergency_exit - Exit an emergency section
1731 *
1732 * Context: Within an emergency section. Enables preemption.
1733 */
nbcon_cpu_emergency_exit(void)1734 void nbcon_cpu_emergency_exit(void)
1735 {
1736 unsigned int *cpu_emergency_nesting;
1737
1738 cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting();
1739 if (!WARN_ON_ONCE(*cpu_emergency_nesting == 0))
1740 (*cpu_emergency_nesting)--;
1741
1742 /*
1743 * Wake up kthreads because there might be some pending messages
1744 * added by other CPUs with normal priority since the last flush
1745 * in the emergency context.
1746 */
1747 if (!WARN_ON_ONCE(atomic_read(&nbcon_cpu_emergency_cnt) == 0)) {
1748 if (atomic_dec_return(&nbcon_cpu_emergency_cnt) == 0) {
1749 struct console_flush_type ft;
1750
1751 printk_get_console_flush_type(&ft);
1752 if (ft.nbcon_offload)
1753 nbcon_kthreads_wake();
1754 }
1755 }
1756
1757 preempt_enable();
1758 }
1759
1760 /**
1761 * nbcon_alloc - Allocate and init the nbcon console specific data
1762 * @con: Console to initialize
1763 *
1764 * Return: True if the console was fully allocated and initialized.
1765 * Otherwise @con must not be registered.
1766 *
1767 * When allocation and init was successful, the console must be properly
1768 * freed using nbcon_free() once it is no longer needed.
1769 */
nbcon_alloc(struct console * con)1770 bool nbcon_alloc(struct console *con)
1771 {
1772 struct nbcon_state state = { };
1773
1774 /* Synchronize the kthread start. */
1775 lockdep_assert_console_list_lock_held();
1776
1777 /* Check for mandatory nbcon callbacks. */
1778 if (WARN_ON(!con->write_thread ||
1779 !con->device_lock ||
1780 !con->device_unlock)) {
1781 return false;
1782 }
1783
1784 rcuwait_init(&con->rcuwait);
1785 init_irq_work(&con->irq_work, nbcon_irq_work);
1786 atomic_long_set(&ACCESS_PRIVATE(con, nbcon_prev_seq), -1UL);
1787 nbcon_state_set(con, &state);
1788
1789 /*
1790 * Initialize @nbcon_seq to the highest possible sequence number so
1791 * that practically speaking it will have nothing to print until a
1792 * desired initial sequence number has been set via nbcon_seq_force().
1793 */
1794 atomic_long_set(&ACCESS_PRIVATE(con, nbcon_seq), ULSEQ_MAX(prb));
1795
1796 if (con->flags & CON_BOOT) {
1797 /*
1798 * Boot console printing is synchronized with legacy console
1799 * printing, so boot consoles can share the same global printk
1800 * buffers.
1801 */
1802 con->pbufs = &printk_shared_pbufs;
1803 } else {
1804 con->pbufs = kmalloc_obj(*con->pbufs);
1805 if (!con->pbufs) {
1806 con_printk(KERN_ERR, con, "failed to allocate printing buffer\n");
1807 return false;
1808 }
1809
1810 if (printk_kthreads_ready && !have_boot_console) {
1811 if (!nbcon_kthread_create(con)) {
1812 kfree(con->pbufs);
1813 con->pbufs = NULL;
1814 return false;
1815 }
1816
1817 /* Might be the first kthread. */
1818 printk_kthreads_running = true;
1819 }
1820 }
1821
1822 return true;
1823 }
1824
1825 /**
1826 * nbcon_free - Free and cleanup the nbcon console specific data
1827 * @con: Console to free/cleanup nbcon data
1828 *
1829 * Important: @have_nbcon_console must be updated before calling
1830 * this function. In particular, it can be set only when there
1831 * is still another nbcon console registered.
1832 */
nbcon_free(struct console * con)1833 void nbcon_free(struct console *con)
1834 {
1835 struct nbcon_state state = { };
1836
1837 /* Synchronize the kthread stop. */
1838 lockdep_assert_console_list_lock_held();
1839
1840 if (printk_kthreads_running) {
1841 nbcon_kthread_stop(con);
1842
1843 /* Might be the last nbcon console.
1844 *
1845 * Do not rely on printk_kthreads_check_locked(). It is not
1846 * called in some code paths, see nbcon_free() callers.
1847 */
1848 if (!have_nbcon_console)
1849 printk_kthreads_running = false;
1850 }
1851
1852 nbcon_state_set(con, &state);
1853
1854 /* Boot consoles share global printk buffers. */
1855 if (!(con->flags & CON_BOOT))
1856 kfree(con->pbufs);
1857
1858 con->pbufs = NULL;
1859 }
1860
1861 /**
1862 * nbcon_device_try_acquire - Try to acquire nbcon console and enter unsafe
1863 * section
1864 * @con: The nbcon console to acquire
1865 *
1866 * Context: Under the locking mechanism implemented in
1867 * @con->device_lock() including disabling migration.
1868 * Return: True if the console was acquired. False otherwise.
1869 *
1870 * Console drivers will usually use their own internal synchronization
1871 * mechasism to synchronize between console printing and non-printing
1872 * activities (such as setting baud rates). However, nbcon console drivers
1873 * supporting atomic consoles may also want to mark unsafe sections when
1874 * performing non-printing activities in order to synchronize against their
1875 * atomic_write() callback.
1876 *
1877 * This function acquires the nbcon console using priority NBCON_PRIO_NORMAL
1878 * and marks it unsafe for handover/takeover.
1879 */
nbcon_device_try_acquire(struct console * con)1880 bool nbcon_device_try_acquire(struct console *con)
1881 {
1882 struct nbcon_context *ctxt = &ACCESS_PRIVATE(con, nbcon_device_ctxt);
1883
1884 cant_migrate();
1885
1886 memset(ctxt, 0, sizeof(*ctxt));
1887 ctxt->console = con;
1888 ctxt->prio = NBCON_PRIO_NORMAL;
1889
1890 if (!nbcon_context_try_acquire(ctxt, false))
1891 return false;
1892
1893 if (!nbcon_context_enter_unsafe(ctxt))
1894 return false;
1895
1896 return true;
1897 }
1898 EXPORT_SYMBOL_GPL(nbcon_device_try_acquire);
1899
1900 /**
1901 * nbcon_device_release - Exit unsafe section and release the nbcon console
1902 * @con: The nbcon console acquired in nbcon_device_try_acquire()
1903 */
nbcon_device_release(struct console * con)1904 void nbcon_device_release(struct console *con)
1905 {
1906 struct nbcon_context *ctxt = &ACCESS_PRIVATE(con, nbcon_device_ctxt);
1907 struct console_flush_type ft;
1908 int cookie;
1909
1910 if (!nbcon_context_exit_unsafe(ctxt))
1911 return;
1912
1913 nbcon_context_release(ctxt);
1914
1915 /*
1916 * This context must flush any new records added while the console
1917 * was locked if the printer thread is not available to do it. The
1918 * console_srcu_read_lock must be taken to ensure the console is
1919 * usable throughout flushing.
1920 */
1921 cookie = console_srcu_read_lock();
1922 printk_get_console_flush_type(&ft);
1923 if (console_is_usable(con, console_srcu_read_flags(con), true) &&
1924 !ft.nbcon_offload &&
1925 prb_read_valid(prb, nbcon_seq_read(con), NULL)) {
1926 /*
1927 * If nbcon_atomic flushing is not available, fallback to
1928 * using the legacy loop.
1929 */
1930 if (ft.nbcon_atomic) {
1931 __nbcon_atomic_flush_pending_con(con, prb_next_reserve_seq(prb));
1932 } else if (ft.legacy_direct) {
1933 if (console_trylock())
1934 console_unlock();
1935 } else if (ft.legacy_offload) {
1936 defer_console_output();
1937 }
1938 }
1939 console_srcu_read_unlock(cookie);
1940 }
1941 EXPORT_SYMBOL_GPL(nbcon_device_release);
1942
1943 /**
1944 * nbcon_kdb_try_acquire - Try to acquire nbcon console and enter unsafe
1945 * section
1946 * @con: The nbcon console to acquire
1947 * @wctxt: The nbcon write context to be used on success
1948 *
1949 * Context: Under console_srcu_read_lock() for emitting a single kdb message
1950 * using the given con->write_atomic() callback. Can be called
1951 * only when the console is usable at the moment.
1952 *
1953 * Return: True if the console was acquired. False otherwise.
1954 *
1955 * kdb emits messages on consoles registered for printk() without
1956 * storing them into the ring buffer. It has to acquire the console
1957 * ownerhip so that it could call con->write_atomic() callback a safe way.
1958 *
1959 * This function acquires the nbcon console using priority NBCON_PRIO_EMERGENCY
1960 * and marks it unsafe for handover/takeover.
1961 */
nbcon_kdb_try_acquire(struct console * con,struct nbcon_write_context * wctxt)1962 bool nbcon_kdb_try_acquire(struct console *con,
1963 struct nbcon_write_context *wctxt)
1964 {
1965 struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
1966
1967 memset(ctxt, 0, sizeof(*ctxt));
1968 ctxt->console = con;
1969 ctxt->prio = NBCON_PRIO_EMERGENCY;
1970
1971 if (!nbcon_context_try_acquire(ctxt, false))
1972 return false;
1973
1974 if (!nbcon_context_enter_unsafe(ctxt))
1975 return false;
1976
1977 return true;
1978 }
1979
1980 /**
1981 * nbcon_kdb_release - Exit unsafe section and release the nbcon console
1982 *
1983 * @wctxt: The nbcon write context initialized by a successful
1984 * nbcon_kdb_try_acquire()
1985 */
nbcon_kdb_release(struct nbcon_write_context * wctxt)1986 void nbcon_kdb_release(struct nbcon_write_context *wctxt)
1987 {
1988 struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
1989
1990 if (!nbcon_context_exit_unsafe(ctxt))
1991 return;
1992
1993 nbcon_context_release(ctxt);
1994
1995 /*
1996 * Flush any new printk() messages added when the console was blocked.
1997 * Only the console used by the given write context was blocked.
1998 * The console was locked only when the write_atomic() callback
1999 * was usable.
2000 */
2001 __nbcon_atomic_flush_pending_con(ctxt->console, prb_next_reserve_seq(prb));
2002 }
2003