xref: /linux/kernel/printk/nbcon.c (revision 4d38b88fd17e9989429e65420bf3c33ca53b2085)
1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright (C) 2022 Linutronix GmbH, John Ogness
3 // Copyright (C) 2022 Intel, Thomas Gleixner
4 
5 #include <linux/atomic.h>
6 #include <linux/bug.h>
7 #include <linux/console.h>
8 #include <linux/delay.h>
9 #include <linux/errno.h>
10 #include <linux/export.h>
11 #include <linux/init.h>
12 #include <linux/irqflags.h>
13 #include <linux/kdb.h>
14 #include <linux/kthread.h>
15 #include <linux/minmax.h>
16 #include <linux/panic.h>
17 #include <linux/percpu.h>
18 #include <linux/preempt.h>
19 #include <linux/slab.h>
20 #include <linux/smp.h>
21 #include <linux/stddef.h>
22 #include <linux/string.h>
23 #include <linux/types.h>
24 #include "internal.h"
25 #include "printk_ringbuffer.h"
26 /*
27  * Printk console printing implementation for consoles which does not depend
28  * on the legacy style console_lock mechanism.
29  *
30  * The state of the console is maintained in the "nbcon_state" atomic
31  * variable.
32  *
33  * The console is locked when:
34  *
35  *   - The 'prio' field contains the priority of the context that owns the
36  *     console. Only higher priority contexts are allowed to take over the
37  *     lock. A value of 0 (NBCON_PRIO_NONE) means the console is not locked.
38  *
39  *   - The 'cpu' field denotes on which CPU the console is locked. It is used
40  *     to prevent busy waiting on the same CPU. Also it informs the lock owner
41  *     that it has lost the lock in a more complex scenario when the lock was
42  *     taken over by a higher priority context, released, and taken on another
43  *     CPU with the same priority as the interrupted owner.
44  *
45  * The acquire mechanism uses a few more fields:
46  *
47  *   - The 'req_prio' field is used by the handover approach to make the
48  *     current owner aware that there is a context with a higher priority
49  *     waiting for the friendly handover.
50  *
51  *   - The 'unsafe' field allows to take over the console in a safe way in the
52  *     middle of emitting a message. The field is set only when accessing some
53  *     shared resources or when the console device is manipulated. It can be
54  *     cleared, for example, after emitting one character when the console
55  *     device is in a consistent state.
56  *
57  *   - The 'unsafe_takeover' field is set when a hostile takeover took the
58  *     console in an unsafe state. The console will stay in the unsafe state
59  *     until re-initialized.
60  *
61  * The acquire mechanism uses three approaches:
62  *
63  *   1) Direct acquire when the console is not owned or is owned by a lower
64  *      priority context and is in a safe state.
65  *
66  *   2) Friendly handover mechanism uses a request/grant handshake. It is used
67  *      when the current owner has lower priority and the console is in an
68  *      unsafe state.
69  *
70  *      The requesting context:
71  *
72  *        a) Sets its priority into the 'req_prio' field.
73  *
74  *        b) Waits (with a timeout) for the owning context to unlock the
75  *           console.
76  *
77  *        c) Takes the lock and clears the 'req_prio' field.
78  *
79  *      The owning context:
80  *
81  *        a) Observes the 'req_prio' field set on exit from the unsafe
82  *           console state.
83  *
84  *        b) Gives up console ownership by clearing the 'prio' field.
85  *
86  *   3) Unsafe hostile takeover allows to take over the lock even when the
87  *      console is an unsafe state. It is used only in panic() by the final
88  *      attempt to flush consoles in a try and hope mode.
89  *
90  *      Note that separate record buffers are used in panic(). As a result,
91  *      the messages can be read and formatted without any risk even after
92  *      using the hostile takeover in unsafe state.
93  *
94  * The release function simply clears the 'prio' field.
95  *
96  * All operations on @console::nbcon_state are atomic cmpxchg based to
97  * handle concurrency.
98  *
99  * The acquire/release functions implement only minimal policies:
100  *
101  *   - Preference for higher priority contexts.
102  *   - Protection of the panic CPU.
103  *
104  * All other policy decisions must be made at the call sites:
105  *
106  *   - What is marked as an unsafe section.
107  *   - Whether to spin-wait if there is already an owner and the console is
108  *     in an unsafe state.
109  *   - Whether to attempt an unsafe hostile takeover.
110  *
111  * The design allows to implement the well known:
112  *
113  *     acquire()
114  *     output_one_printk_record()
115  *     release()
116  *
117  * The output of one printk record might be interrupted with a higher priority
118  * context. The new owner is supposed to reprint the entire interrupted record
119  * from scratch.
120  */
121 
122 /* Counter of active nbcon emergency contexts. */
123 static atomic_t nbcon_cpu_emergency_cnt = ATOMIC_INIT(0);
124 
125 /**
126  * nbcon_state_set - Helper function to set the console state
127  * @con:	Console to update
128  * @new:	The new state to write
129  *
130  * Only to be used when the console is not yet or no longer visible in the
131  * system. Otherwise use nbcon_state_try_cmpxchg().
132  */
nbcon_state_set(struct console * con,struct nbcon_state * new)133 static inline void nbcon_state_set(struct console *con, struct nbcon_state *new)
134 {
135 	atomic_set(&ACCESS_PRIVATE(con, nbcon_state), new->atom);
136 }
137 
138 /**
139  * nbcon_state_read - Helper function to read the console state
140  * @con:	Console to read
141  * @state:	The state to store the result
142  */
nbcon_state_read(struct console * con,struct nbcon_state * state)143 static inline void nbcon_state_read(struct console *con, struct nbcon_state *state)
144 {
145 	state->atom = atomic_read(&ACCESS_PRIVATE(con, nbcon_state));
146 }
147 
148 /**
149  * nbcon_state_try_cmpxchg() - Helper function for atomic_try_cmpxchg() on console state
150  * @con:	Console to update
151  * @cur:	Old/expected state
152  * @new:	New state
153  *
154  * Return: True on success. False on fail and @cur is updated.
155  */
nbcon_state_try_cmpxchg(struct console * con,struct nbcon_state * cur,struct nbcon_state * new)156 static inline bool nbcon_state_try_cmpxchg(struct console *con, struct nbcon_state *cur,
157 					   struct nbcon_state *new)
158 {
159 	return atomic_try_cmpxchg(&ACCESS_PRIVATE(con, nbcon_state), &cur->atom, new->atom);
160 }
161 
162 /**
163  * nbcon_seq_read - Read the current console sequence
164  * @con:	Console to read the sequence of
165  *
166  * Return:	Sequence number of the next record to print on @con.
167  */
nbcon_seq_read(struct console * con)168 u64 nbcon_seq_read(struct console *con)
169 {
170 	unsigned long nbcon_seq = atomic_long_read(&ACCESS_PRIVATE(con, nbcon_seq));
171 
172 	return __ulseq_to_u64seq(prb, nbcon_seq);
173 }
174 
175 /**
176  * nbcon_seq_force - Force console sequence to a specific value
177  * @con:	Console to work on
178  * @seq:	Sequence number value to set
179  *
180  * Only to be used during init (before registration) or in extreme situations
181  * (such as panic with CONSOLE_REPLAY_ALL).
182  */
nbcon_seq_force(struct console * con,u64 seq)183 void nbcon_seq_force(struct console *con, u64 seq)
184 {
185 	/*
186 	 * If the specified record no longer exists, the oldest available record
187 	 * is chosen. This is especially important on 32bit systems because only
188 	 * the lower 32 bits of the sequence number are stored. The upper 32 bits
189 	 * are derived from the sequence numbers available in the ringbuffer.
190 	 */
191 	u64 valid_seq = max_t(u64, seq, prb_first_valid_seq(prb));
192 
193 	atomic_long_set(&ACCESS_PRIVATE(con, nbcon_seq), __u64seq_to_ulseq(valid_seq));
194 }
195 
196 /**
197  * nbcon_seq_try_update - Try to update the console sequence number
198  * @ctxt:	Pointer to an acquire context that contains
199  *		all information about the acquire mode
200  * @new_seq:	The new sequence number to set
201  *
202  * @ctxt->seq is updated to the new value of @con::nbcon_seq (expanded to
203  * the 64bit value). This could be a different value than @new_seq if
204  * nbcon_seq_force() was used or the current context no longer owns the
205  * console. In the later case, it will stop printing anyway.
206  */
nbcon_seq_try_update(struct nbcon_context * ctxt,u64 new_seq)207 static void nbcon_seq_try_update(struct nbcon_context *ctxt, u64 new_seq)
208 {
209 	unsigned long nbcon_seq = __u64seq_to_ulseq(ctxt->seq);
210 	struct console *con = ctxt->console;
211 
212 	if (atomic_long_try_cmpxchg(&ACCESS_PRIVATE(con, nbcon_seq), &nbcon_seq,
213 				    __u64seq_to_ulseq(new_seq))) {
214 		ctxt->seq = new_seq;
215 	} else {
216 		ctxt->seq = nbcon_seq_read(con);
217 	}
218 }
219 
220 /**
221  * nbcon_context_try_acquire_direct - Try to acquire directly
222  * @ctxt:		The context of the caller
223  * @cur:		The current console state
224  * @is_reacquire:	This acquire is a reacquire
225  *
226  * Acquire the console when it is released. Also acquire the console when
227  * the current owner has a lower priority and the console is in a safe state.
228  *
229  * Return:	0 on success. Otherwise, an error code on failure. Also @cur
230  *		is updated to the latest state when failed to modify it.
231  *
232  * Errors:
233  *
234  *	-EPERM:		A panic is in progress and this is neither the panic
235  *			CPU nor is this a reacquire. Or the current owner or
236  *			waiter has the same or higher priority. No acquire
237  *			method can be successful in these cases.
238  *
239  *	-EBUSY:		The current owner has a lower priority but the console
240  *			in an unsafe state. The caller should try using
241  *			the handover acquire method.
242  */
nbcon_context_try_acquire_direct(struct nbcon_context * ctxt,struct nbcon_state * cur,bool is_reacquire)243 static int nbcon_context_try_acquire_direct(struct nbcon_context *ctxt,
244 					    struct nbcon_state *cur, bool is_reacquire)
245 {
246 	unsigned int cpu = smp_processor_id();
247 	struct console *con = ctxt->console;
248 	struct nbcon_state new;
249 
250 	do {
251 		/*
252 		 * Panic does not imply that the console is owned. However,
253 		 * since all non-panic CPUs are stopped during panic(), it
254 		 * is safer to have them avoid gaining console ownership.
255 		 *
256 		 * One exception is when kdb has locked for printing on this CPU.
257 		 *
258 		 * Second exception is a reacquire (and an unsafe takeover
259 		 * has not previously occurred) then it is allowed to attempt
260 		 * a direct acquire in panic. This gives console drivers an
261 		 * opportunity to perform any necessary cleanup if they were
262 		 * interrupted by the panic CPU while printing.
263 		 */
264 		if (panic_on_other_cpu() &&
265 		    !kdb_printf_on_this_cpu() &&
266 		    (!is_reacquire || cur->unsafe_takeover)) {
267 			return -EPERM;
268 		}
269 
270 		if (ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio)
271 			return -EPERM;
272 
273 		if (cur->unsafe)
274 			return -EBUSY;
275 
276 		/*
277 		 * The console should never be safe for a direct acquire
278 		 * if an unsafe hostile takeover has ever happened.
279 		 */
280 		WARN_ON_ONCE(cur->unsafe_takeover);
281 
282 		new.atom = cur->atom;
283 		new.prio	= ctxt->prio;
284 		new.req_prio	= NBCON_PRIO_NONE;
285 		new.unsafe	= cur->unsafe_takeover;
286 		new.cpu		= cpu;
287 
288 	} while (!nbcon_state_try_cmpxchg(con, cur, &new));
289 
290 	return 0;
291 }
292 
nbcon_waiter_matches(struct nbcon_state * cur,int expected_prio)293 static bool nbcon_waiter_matches(struct nbcon_state *cur, int expected_prio)
294 {
295 	/*
296 	 * The request context is well defined by the @req_prio because:
297 	 *
298 	 * - Only a context with a priority higher than the owner can become
299 	 *   a waiter.
300 	 * - Only a context with a priority higher than the waiter can
301 	 *   directly take over the request.
302 	 * - There are only three priorities.
303 	 * - Only one CPU is allowed to request PANIC priority.
304 	 * - Lower priorities are ignored during panic() until reboot.
305 	 *
306 	 * As a result, the following scenario is *not* possible:
307 	 *
308 	 * 1. This context is currently a waiter.
309 	 * 2. Another context with a higher priority than this context
310 	 *    directly takes ownership.
311 	 * 3. The higher priority context releases the ownership.
312 	 * 4. Another lower priority context takes the ownership.
313 	 * 5. Another context with the same priority as this context
314 	 *    creates a request and starts waiting.
315 	 *
316 	 * Event #1 implies this context is EMERGENCY.
317 	 * Event #2 implies the new context is PANIC.
318 	 * Event #3 occurs when panic() has flushed the console.
319 	 * Event #4 occurs when a non-panic CPU reacquires.
320 	 * Event #5 is not possible due to the panic_on_other_cpu() check
321 	 *          in nbcon_context_try_acquire_handover().
322 	 */
323 
324 	return (cur->req_prio == expected_prio);
325 }
326 
327 /**
328  * nbcon_context_try_acquire_requested - Try to acquire after having
329  *					 requested a handover
330  * @ctxt:	The context of the caller
331  * @cur:	The current console state
332  *
333  * This is a helper function for nbcon_context_try_acquire_handover().
334  * It is called when the console is in an unsafe state. The current
335  * owner will release the console on exit from the unsafe region.
336  *
337  * Return:	0 on success and @cur is updated to the new console state.
338  *		Otherwise an error code on failure.
339  *
340  * Errors:
341  *
342  *	-EPERM:		A panic is in progress and this is not the panic CPU
343  *			or this context is no longer the waiter.
344  *
345  *	-EBUSY:		The console is still locked. The caller should
346  *			continue waiting.
347  *
348  * Note: The caller must still remove the request when an error has occurred
349  *       except when this context is no longer the waiter.
350  */
nbcon_context_try_acquire_requested(struct nbcon_context * ctxt,struct nbcon_state * cur)351 static int nbcon_context_try_acquire_requested(struct nbcon_context *ctxt,
352 					       struct nbcon_state *cur)
353 {
354 	unsigned int cpu = smp_processor_id();
355 	struct console *con = ctxt->console;
356 	struct nbcon_state new;
357 
358 	/* Note that the caller must still remove the request! */
359 	if (panic_on_other_cpu())
360 		return -EPERM;
361 
362 	/*
363 	 * Note that the waiter will also change if there was an unsafe
364 	 * hostile takeover.
365 	 */
366 	if (!nbcon_waiter_matches(cur, ctxt->prio))
367 		return -EPERM;
368 
369 	/* If still locked, caller should continue waiting. */
370 	if (cur->prio != NBCON_PRIO_NONE)
371 		return -EBUSY;
372 
373 	/*
374 	 * The previous owner should have never released ownership
375 	 * in an unsafe region.
376 	 */
377 	WARN_ON_ONCE(cur->unsafe);
378 
379 	new.atom = cur->atom;
380 	new.prio	= ctxt->prio;
381 	new.req_prio	= NBCON_PRIO_NONE;
382 	new.unsafe	= cur->unsafe_takeover;
383 	new.cpu		= cpu;
384 
385 	if (!nbcon_state_try_cmpxchg(con, cur, &new)) {
386 		/*
387 		 * The acquire could fail only when it has been taken
388 		 * over by a higher priority context.
389 		 */
390 		WARN_ON_ONCE(nbcon_waiter_matches(cur, ctxt->prio));
391 		return -EPERM;
392 	}
393 
394 	/* Handover success. This context now owns the console. */
395 	return 0;
396 }
397 
398 /**
399  * nbcon_context_try_acquire_handover - Try to acquire via handover
400  * @ctxt:	The context of the caller
401  * @cur:	The current console state
402  *
403  * The function must be called only when the context has higher priority
404  * than the current owner and the console is in an unsafe state.
405  * It is the case when nbcon_context_try_acquire_direct() returns -EBUSY.
406  *
407  * The function sets "req_prio" field to make the current owner aware of
408  * the request. Then it waits until the current owner releases the console,
409  * or an even higher context takes over the request, or timeout expires.
410  *
411  * The current owner checks the "req_prio" field on exit from the unsafe
412  * region and releases the console. It does not touch the "req_prio" field
413  * so that the console stays reserved for the waiter.
414  *
415  * Return:	0 on success. Otherwise, an error code on failure. Also @cur
416  *		is updated to the latest state when failed to modify it.
417  *
418  * Errors:
419  *
420  *	-EPERM:		A panic is in progress and this is not the panic CPU.
421  *			Or a higher priority context has taken over the
422  *			console or the handover request.
423  *
424  *	-EBUSY:		The current owner is on the same CPU so that the hand
425  *			shake could not work. Or the current owner is not
426  *			willing to wait (zero timeout). Or the console does
427  *			not enter the safe state before timeout passed. The
428  *			caller might still use the unsafe hostile takeover
429  *			when allowed.
430  *
431  *	-EAGAIN:	@cur has changed when creating the handover request.
432  *			The caller should retry with direct acquire.
433  */
nbcon_context_try_acquire_handover(struct nbcon_context * ctxt,struct nbcon_state * cur)434 static int nbcon_context_try_acquire_handover(struct nbcon_context *ctxt,
435 					      struct nbcon_state *cur)
436 {
437 	unsigned int cpu = smp_processor_id();
438 	struct console *con = ctxt->console;
439 	struct nbcon_state new;
440 	int timeout;
441 	int request_err = -EBUSY;
442 
443 	/*
444 	 * Check that the handover is called when the direct acquire failed
445 	 * with -EBUSY.
446 	 */
447 	WARN_ON_ONCE(ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio);
448 	WARN_ON_ONCE(!cur->unsafe);
449 
450 	/*
451 	 * Panic does not imply that the console is owned. However, it
452 	 * is critical that non-panic CPUs during panic are unable to
453 	 * wait for a handover in order to satisfy the assumptions of
454 	 * nbcon_waiter_matches(). In particular, the assumption that
455 	 * lower priorities are ignored during panic.
456 	 */
457 	if (panic_on_other_cpu())
458 		return -EPERM;
459 
460 	/* Handover is not possible on the same CPU. */
461 	if (cur->cpu == cpu)
462 		return -EBUSY;
463 
464 	/*
465 	 * Console stays unsafe after an unsafe takeover until re-initialized.
466 	 * Waiting is not going to help in this case.
467 	 */
468 	if (cur->unsafe_takeover)
469 		return -EBUSY;
470 
471 	/* Is the caller willing to wait? */
472 	if (ctxt->spinwait_max_us == 0)
473 		return -EBUSY;
474 
475 	/*
476 	 * Setup a request for the handover. The caller should try to acquire
477 	 * the console directly when the current state has been modified.
478 	 */
479 	new.atom = cur->atom;
480 	new.req_prio = ctxt->prio;
481 	if (!nbcon_state_try_cmpxchg(con, cur, &new))
482 		return -EAGAIN;
483 
484 	cur->atom = new.atom;
485 
486 	/* Wait until there is no owner and then acquire the console. */
487 	for (timeout = ctxt->spinwait_max_us; timeout >= 0; timeout--) {
488 		/* On successful acquire, this request is cleared. */
489 		request_err = nbcon_context_try_acquire_requested(ctxt, cur);
490 		if (!request_err)
491 			return 0;
492 
493 		/*
494 		 * If the acquire should be aborted, it must be ensured
495 		 * that the request is removed before returning to caller.
496 		 */
497 		if (request_err == -EPERM)
498 			break;
499 
500 		udelay(1);
501 
502 		/* Re-read the state because some time has passed. */
503 		nbcon_state_read(con, cur);
504 	}
505 
506 	/* Timed out or aborted. Carefully remove handover request. */
507 	do {
508 		/*
509 		 * No need to remove request if there is a new waiter. This
510 		 * can only happen if a higher priority context has taken over
511 		 * the console or the handover request.
512 		 */
513 		if (!nbcon_waiter_matches(cur, ctxt->prio))
514 			return -EPERM;
515 
516 		/* Unset request for handover. */
517 		new.atom = cur->atom;
518 		new.req_prio = NBCON_PRIO_NONE;
519 		if (nbcon_state_try_cmpxchg(con, cur, &new)) {
520 			/*
521 			 * Request successfully unset. Report failure of
522 			 * acquiring via handover.
523 			 */
524 			cur->atom = new.atom;
525 			return request_err;
526 		}
527 
528 		/*
529 		 * Unable to remove request. Try to acquire in case
530 		 * the owner has released the lock.
531 		 */
532 	} while (nbcon_context_try_acquire_requested(ctxt, cur));
533 
534 	/* Lucky timing. The acquire succeeded while removing the request. */
535 	return 0;
536 }
537 
538 /**
539  * nbcon_context_try_acquire_hostile - Acquire via unsafe hostile takeover
540  * @ctxt:	The context of the caller
541  * @cur:	The current console state
542  *
543  * Acquire the console even in the unsafe state.
544  *
545  * It can be permitted by setting the 'allow_unsafe_takeover' field only
546  * by the final attempt to flush messages in panic().
547  *
548  * Return:	0 on success. -EPERM when not allowed by the context.
549  */
nbcon_context_try_acquire_hostile(struct nbcon_context * ctxt,struct nbcon_state * cur)550 static int nbcon_context_try_acquire_hostile(struct nbcon_context *ctxt,
551 					     struct nbcon_state *cur)
552 {
553 	unsigned int cpu = smp_processor_id();
554 	struct console *con = ctxt->console;
555 	struct nbcon_state new;
556 
557 	if (!ctxt->allow_unsafe_takeover)
558 		return -EPERM;
559 
560 	/* Ensure caller is allowed to perform unsafe hostile takeovers. */
561 	if (WARN_ON_ONCE(ctxt->prio != NBCON_PRIO_PANIC))
562 		return -EPERM;
563 
564 	/*
565 	 * Check that try_acquire_direct() and try_acquire_handover() returned
566 	 * -EBUSY in the right situation.
567 	 */
568 	WARN_ON_ONCE(ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio);
569 	WARN_ON_ONCE(cur->unsafe != true);
570 
571 	do {
572 		new.atom = cur->atom;
573 		new.cpu			= cpu;
574 		new.prio		= ctxt->prio;
575 		new.unsafe		|= cur->unsafe_takeover;
576 		new.unsafe_takeover	|= cur->unsafe;
577 
578 	} while (!nbcon_state_try_cmpxchg(con, cur, &new));
579 
580 	return 0;
581 }
582 
583 static struct printk_buffers panic_nbcon_pbufs;
584 
585 /**
586  * nbcon_context_try_acquire - Try to acquire nbcon console
587  * @ctxt:		The context of the caller
588  * @is_reacquire:	This acquire is a reacquire
589  *
590  * Context:	Under @ctxt->con->device_lock() or local_irq_save().
591  * Return:	True if the console was acquired. False otherwise.
592  *
593  * If the caller allowed an unsafe hostile takeover, on success the
594  * caller should check the current console state to see if it is
595  * in an unsafe state. Otherwise, on success the caller may assume
596  * the console is not in an unsafe state.
597  */
nbcon_context_try_acquire(struct nbcon_context * ctxt,bool is_reacquire)598 static bool nbcon_context_try_acquire(struct nbcon_context *ctxt, bool is_reacquire)
599 {
600 	struct console *con = ctxt->console;
601 	struct nbcon_state cur;
602 	int err;
603 
604 	nbcon_state_read(con, &cur);
605 try_again:
606 	err = nbcon_context_try_acquire_direct(ctxt, &cur, is_reacquire);
607 	if (err != -EBUSY)
608 		goto out;
609 
610 	err = nbcon_context_try_acquire_handover(ctxt, &cur);
611 	if (err == -EAGAIN)
612 		goto try_again;
613 	if (err != -EBUSY)
614 		goto out;
615 
616 	err = nbcon_context_try_acquire_hostile(ctxt, &cur);
617 out:
618 	if (err)
619 		return false;
620 
621 	/* Acquire succeeded. */
622 
623 	/* Assign the appropriate buffer for this context. */
624 	if (panic_on_this_cpu())
625 		ctxt->pbufs = &panic_nbcon_pbufs;
626 	else
627 		ctxt->pbufs = con->pbufs;
628 
629 	/* Set the record sequence for this context to print. */
630 	ctxt->seq = nbcon_seq_read(ctxt->console);
631 
632 	return true;
633 }
634 
nbcon_owner_matches(struct nbcon_state * cur,int expected_cpu,int expected_prio)635 static bool nbcon_owner_matches(struct nbcon_state *cur, int expected_cpu,
636 				int expected_prio)
637 {
638 	/*
639 	 * A similar function, nbcon_waiter_matches(), only deals with
640 	 * EMERGENCY and PANIC priorities. However, this function must also
641 	 * deal with the NORMAL priority, which requires additional checks
642 	 * and constraints.
643 	 *
644 	 * For the case where preemption and interrupts are disabled, it is
645 	 * enough to also verify that the owning CPU has not changed.
646 	 *
647 	 * For the case where preemption or interrupts are enabled, an
648 	 * external synchronization method *must* be used. In particular,
649 	 * the driver-specific locking mechanism used in device_lock()
650 	 * (including disabling migration) should be used. It prevents
651 	 * scenarios such as:
652 	 *
653 	 * 1. [Task A] owns a context with NBCON_PRIO_NORMAL on [CPU X] and
654 	 *    is scheduled out.
655 	 * 2. Another context takes over the lock with NBCON_PRIO_EMERGENCY
656 	 *    and releases it.
657 	 * 3. [Task B] acquires a context with NBCON_PRIO_NORMAL on [CPU X]
658 	 *    and is scheduled out.
659 	 * 4. [Task A] gets running on [CPU X] and sees that the console is
660 	 *    still owned by a task on [CPU X] with NBON_PRIO_NORMAL. Thus
661 	 *    [Task A] thinks it is the owner when it is not.
662 	 */
663 
664 	if (cur->prio != expected_prio)
665 		return false;
666 
667 	if (cur->cpu != expected_cpu)
668 		return false;
669 
670 	return true;
671 }
672 
673 /**
674  * nbcon_context_release - Release the console
675  * @ctxt:	The nbcon context from nbcon_context_try_acquire()
676  */
nbcon_context_release(struct nbcon_context * ctxt)677 static void nbcon_context_release(struct nbcon_context *ctxt)
678 {
679 	unsigned int cpu = smp_processor_id();
680 	struct console *con = ctxt->console;
681 	struct nbcon_state cur;
682 	struct nbcon_state new;
683 
684 	nbcon_state_read(con, &cur);
685 
686 	do {
687 		if (!nbcon_owner_matches(&cur, cpu, ctxt->prio))
688 			break;
689 
690 		new.atom = cur.atom;
691 		new.prio = NBCON_PRIO_NONE;
692 
693 		/*
694 		 * If @unsafe_takeover is set, it is kept set so that
695 		 * the state remains permanently unsafe.
696 		 */
697 		new.unsafe |= cur.unsafe_takeover;
698 
699 	} while (!nbcon_state_try_cmpxchg(con, &cur, &new));
700 
701 	ctxt->pbufs = NULL;
702 }
703 
704 /**
705  * nbcon_context_can_proceed - Check whether ownership can proceed
706  * @ctxt:	The nbcon context from nbcon_context_try_acquire()
707  * @cur:	The current console state
708  *
709  * Return:	True if this context still owns the console. False if
710  *		ownership was handed over or taken.
711  *
712  * Must be invoked when entering the unsafe state to make sure that it still
713  * owns the lock. Also must be invoked when exiting the unsafe context
714  * to eventually free the lock for a higher priority context which asked
715  * for the friendly handover.
716  *
717  * It can be called inside an unsafe section when the console is just
718  * temporary in safe state instead of exiting and entering the unsafe
719  * state.
720  *
721  * Also it can be called in the safe context before doing an expensive
722  * safe operation. It does not make sense to do the operation when
723  * a higher priority context took the lock.
724  *
725  * When this function returns false then the calling context no longer owns
726  * the console and is no longer allowed to go forward. In this case it must
727  * back out immediately and carefully. The buffer content is also no longer
728  * trusted since it no longer belongs to the calling context.
729  */
nbcon_context_can_proceed(struct nbcon_context * ctxt,struct nbcon_state * cur)730 static bool nbcon_context_can_proceed(struct nbcon_context *ctxt, struct nbcon_state *cur)
731 {
732 	unsigned int cpu = smp_processor_id();
733 
734 	/* Make sure this context still owns the console. */
735 	if (!nbcon_owner_matches(cur, cpu, ctxt->prio))
736 		return false;
737 
738 	/* The console owner can proceed if there is no waiter. */
739 	if (cur->req_prio == NBCON_PRIO_NONE)
740 		return true;
741 
742 	/*
743 	 * A console owner within an unsafe region is always allowed to
744 	 * proceed, even if there are waiters. It can perform a handover
745 	 * when exiting the unsafe region. Otherwise the waiter will
746 	 * need to perform an unsafe hostile takeover.
747 	 */
748 	if (cur->unsafe)
749 		return true;
750 
751 	/* Waiters always have higher priorities than owners. */
752 	WARN_ON_ONCE(cur->req_prio <= cur->prio);
753 
754 	/*
755 	 * Having a safe point for take over and eventually a few
756 	 * duplicated characters or a full line is way better than a
757 	 * hostile takeover. Post processing can take care of the garbage.
758 	 * Release and hand over.
759 	 */
760 	nbcon_context_release(ctxt);
761 
762 	/*
763 	 * It is not clear whether the waiter really took over ownership. The
764 	 * outermost callsite must make the final decision whether console
765 	 * ownership is needed for it to proceed. If yes, it must reacquire
766 	 * ownership (possibly hostile) before carefully proceeding.
767 	 *
768 	 * The calling context no longer owns the console so go back all the
769 	 * way instead of trying to implement reacquire heuristics in tons of
770 	 * places.
771 	 */
772 	return false;
773 }
774 
775 /**
776  * nbcon_can_proceed - Check whether ownership can proceed
777  * @wctxt:	The write context that was handed to the write function
778  *
779  * Return:	True if this context still owns the console. False if
780  *		ownership was handed over or taken.
781  *
782  * It is used in nbcon_enter_unsafe() to make sure that it still owns the
783  * lock. Also it is used in nbcon_exit_unsafe() to eventually free the lock
784  * for a higher priority context which asked for the friendly handover.
785  *
786  * It can be called inside an unsafe section when the console is just
787  * temporary in safe state instead of exiting and entering the unsafe state.
788  *
789  * Also it can be called in the safe context before doing an expensive safe
790  * operation. It does not make sense to do the operation when a higher
791  * priority context took the lock.
792  *
793  * When this function returns false then the calling context no longer owns
794  * the console and is no longer allowed to go forward. In this case it must
795  * back out immediately and carefully. The buffer content is also no longer
796  * trusted since it no longer belongs to the calling context.
797  */
nbcon_can_proceed(struct nbcon_write_context * wctxt)798 bool nbcon_can_proceed(struct nbcon_write_context *wctxt)
799 {
800 	struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
801 	struct console *con = ctxt->console;
802 	struct nbcon_state cur;
803 
804 	nbcon_state_read(con, &cur);
805 
806 	return nbcon_context_can_proceed(ctxt, &cur);
807 }
808 EXPORT_SYMBOL_GPL(nbcon_can_proceed);
809 
810 #define nbcon_context_enter_unsafe(c)	__nbcon_context_update_unsafe(c, true)
811 #define nbcon_context_exit_unsafe(c)	__nbcon_context_update_unsafe(c, false)
812 
813 /**
814  * __nbcon_context_update_unsafe - Update the unsafe bit in @con->nbcon_state
815  * @ctxt:	The nbcon context from nbcon_context_try_acquire()
816  * @unsafe:	The new value for the unsafe bit
817  *
818  * Return:	True if the unsafe state was updated and this context still
819  *		owns the console. Otherwise false if ownership was handed
820  *		over or taken.
821  *
822  * This function allows console owners to modify the unsafe status of the
823  * console.
824  *
825  * When this function returns false then the calling context no longer owns
826  * the console and is no longer allowed to go forward. In this case it must
827  * back out immediately and carefully. The buffer content is also no longer
828  * trusted since it no longer belongs to the calling context.
829  *
830  * Internal helper to avoid duplicated code.
831  */
__nbcon_context_update_unsafe(struct nbcon_context * ctxt,bool unsafe)832 static bool __nbcon_context_update_unsafe(struct nbcon_context *ctxt, bool unsafe)
833 {
834 	struct console *con = ctxt->console;
835 	struct nbcon_state cur;
836 	struct nbcon_state new;
837 
838 	nbcon_state_read(con, &cur);
839 
840 	do {
841 		/*
842 		 * The unsafe bit must not be cleared if an
843 		 * unsafe hostile takeover has occurred.
844 		 */
845 		if (!unsafe && cur.unsafe_takeover)
846 			goto out;
847 
848 		if (!nbcon_context_can_proceed(ctxt, &cur))
849 			return false;
850 
851 		new.atom = cur.atom;
852 		new.unsafe = unsafe;
853 	} while (!nbcon_state_try_cmpxchg(con, &cur, &new));
854 
855 	cur.atom = new.atom;
856 out:
857 	return nbcon_context_can_proceed(ctxt, &cur);
858 }
859 
nbcon_write_context_set_buf(struct nbcon_write_context * wctxt,char * buf,unsigned int len)860 void nbcon_write_context_set_buf(struct nbcon_write_context *wctxt,
861 				 char *buf, unsigned int len)
862 {
863 	struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
864 	struct console *con = ctxt->console;
865 	struct nbcon_state cur;
866 
867 	wctxt->outbuf = buf;
868 	wctxt->len = len;
869 	nbcon_state_read(con, &cur);
870 	wctxt->unsafe_takeover = cur.unsafe_takeover;
871 }
872 
873 /**
874  * nbcon_enter_unsafe - Enter an unsafe region in the driver
875  * @wctxt:	The write context that was handed to the write function
876  *
877  * Return:	True if this context still owns the console. False if
878  *		ownership was handed over or taken.
879  *
880  * When this function returns false then the calling context no longer owns
881  * the console and is no longer allowed to go forward. In this case it must
882  * back out immediately and carefully. The buffer content is also no longer
883  * trusted since it no longer belongs to the calling context.
884  */
nbcon_enter_unsafe(struct nbcon_write_context * wctxt)885 bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt)
886 {
887 	struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
888 	bool is_owner;
889 
890 	is_owner = nbcon_context_enter_unsafe(ctxt);
891 	if (!is_owner)
892 		nbcon_write_context_set_buf(wctxt, NULL, 0);
893 	return is_owner;
894 }
895 EXPORT_SYMBOL_GPL(nbcon_enter_unsafe);
896 
897 /**
898  * nbcon_exit_unsafe - Exit an unsafe region in the driver
899  * @wctxt:	The write context that was handed to the write function
900  *
901  * Return:	True if this context still owns the console. False if
902  *		ownership was handed over or taken.
903  *
904  * When this function returns false then the calling context no longer owns
905  * the console and is no longer allowed to go forward. In this case it must
906  * back out immediately and carefully. The buffer content is also no longer
907  * trusted since it no longer belongs to the calling context.
908  */
nbcon_exit_unsafe(struct nbcon_write_context * wctxt)909 bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt)
910 {
911 	struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
912 	bool ret;
913 
914 	ret = nbcon_context_exit_unsafe(ctxt);
915 	if (!ret)
916 		nbcon_write_context_set_buf(wctxt, NULL, 0);
917 	return ret;
918 }
919 EXPORT_SYMBOL_GPL(nbcon_exit_unsafe);
920 
921 /**
922  * nbcon_reacquire_nobuf - Reacquire a console after losing ownership
923  *				while printing
924  * @wctxt:	The write context that was handed to the write callback
925  *
926  * Since ownership can be lost at any time due to handover or takeover, a
927  * printing context _must_ be prepared to back out immediately and
928  * carefully. However, there are scenarios where the printing context must
929  * reacquire ownership in order to finalize or revert hardware changes.
930  *
931  * This function allows a printing context to reacquire ownership using the
932  * same priority as its previous ownership.
933  *
934  * Note that after a successful reacquire the printing context will have no
935  * output buffer because that has been lost. This function cannot be used to
936  * resume printing.
937  */
nbcon_reacquire_nobuf(struct nbcon_write_context * wctxt)938 void nbcon_reacquire_nobuf(struct nbcon_write_context *wctxt)
939 {
940 	struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
941 
942 	while (!nbcon_context_try_acquire(ctxt, true))
943 		cpu_relax();
944 
945 	nbcon_write_context_set_buf(wctxt, NULL, 0);
946 }
947 EXPORT_SYMBOL_GPL(nbcon_reacquire_nobuf);
948 
949 /**
950  * nbcon_emit_next_record - Emit a record in the acquired context
951  * @wctxt:	The write context that will be handed to the write function
952  * @use_atomic:	True if the write_atomic() callback is to be used
953  *
954  * Return:	True if this context still owns the console. False if
955  *		ownership was handed over or taken.
956  *
957  * When this function returns false then the calling context no longer owns
958  * the console and is no longer allowed to go forward. In this case it must
959  * back out immediately and carefully. The buffer content is also no longer
960  * trusted since it no longer belongs to the calling context. If the caller
961  * wants to do more it must reacquire the console first.
962  *
963  * When true is returned, @wctxt->ctxt.backlog indicates whether there are
964  * still records pending in the ringbuffer,
965  */
nbcon_emit_next_record(struct nbcon_write_context * wctxt,bool use_atomic)966 static bool nbcon_emit_next_record(struct nbcon_write_context *wctxt, bool use_atomic)
967 {
968 	struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
969 	struct console *con = ctxt->console;
970 	bool is_extended = console_srcu_read_flags(con) & CON_EXTENDED;
971 	struct printk_message pmsg = {
972 		.pbufs = ctxt->pbufs,
973 	};
974 	unsigned long con_dropped;
975 	struct nbcon_state cur;
976 	unsigned long dropped;
977 	unsigned long ulseq;
978 
979 	/*
980 	 * This function should never be called for consoles that have not
981 	 * implemented the necessary callback for writing: i.e. legacy
982 	 * consoles and, when atomic, nbcon consoles with no write_atomic().
983 	 * Handle it as if ownership was lost and try to continue.
984 	 *
985 	 * Note that for nbcon consoles the write_thread() callback is
986 	 * mandatory and was already checked in nbcon_alloc().
987 	 */
988 	if (WARN_ON_ONCE((use_atomic && !con->write_atomic) ||
989 			 !(console_srcu_read_flags(con) & CON_NBCON))) {
990 		nbcon_context_release(ctxt);
991 		return false;
992 	}
993 
994 	/*
995 	 * The printk buffers are filled within an unsafe section. This
996 	 * prevents NBCON_PRIO_NORMAL and NBCON_PRIO_EMERGENCY from
997 	 * clobbering each other.
998 	 */
999 
1000 	if (!nbcon_context_enter_unsafe(ctxt))
1001 		return false;
1002 
1003 	ctxt->backlog = printk_get_next_message(&pmsg, ctxt->seq, is_extended, true);
1004 	if (!ctxt->backlog)
1005 		return nbcon_context_exit_unsafe(ctxt);
1006 
1007 	/*
1008 	 * @con->dropped is not protected in case of an unsafe hostile
1009 	 * takeover. In that situation the update can be racy so
1010 	 * annotate it accordingly.
1011 	 */
1012 	con_dropped = data_race(READ_ONCE(con->dropped));
1013 
1014 	dropped = con_dropped + pmsg.dropped;
1015 	if (dropped && !is_extended)
1016 		console_prepend_dropped(&pmsg, dropped);
1017 
1018 	/*
1019 	 * If the previous owner was assigned the same record, this context
1020 	 * has taken over ownership and is replaying the record. Prepend a
1021 	 * message to let the user know the record is replayed.
1022 	 */
1023 	ulseq = atomic_long_read(&ACCESS_PRIVATE(con, nbcon_prev_seq));
1024 	if (__ulseq_to_u64seq(prb, ulseq) == pmsg.seq) {
1025 		console_prepend_replay(&pmsg);
1026 	} else {
1027 		/*
1028 		 * Ensure this context is still the owner before trying to
1029 		 * update @nbcon_prev_seq. Otherwise the value in @ulseq may
1030 		 * not be from the previous owner and instead be some later
1031 		 * value from the context that took over ownership.
1032 		 */
1033 		nbcon_state_read(con, &cur);
1034 		if (!nbcon_context_can_proceed(ctxt, &cur))
1035 			return false;
1036 
1037 		atomic_long_try_cmpxchg(&ACCESS_PRIVATE(con, nbcon_prev_seq), &ulseq,
1038 					__u64seq_to_ulseq(pmsg.seq));
1039 	}
1040 
1041 	if (!nbcon_context_exit_unsafe(ctxt))
1042 		return false;
1043 
1044 	/* For skipped records just update seq/dropped in @con. */
1045 	if (pmsg.outbuf_len == 0)
1046 		goto update_con;
1047 
1048 	/* Initialize the write context for driver callbacks. */
1049 	nbcon_write_context_set_buf(wctxt, &pmsg.pbufs->outbuf[0], pmsg.outbuf_len);
1050 
1051 	if (use_atomic)
1052 		con->write_atomic(con, wctxt);
1053 	else
1054 		con->write_thread(con, wctxt);
1055 
1056 	if (!wctxt->outbuf) {
1057 		/*
1058 		 * Ownership was lost and reacquired by the driver. Handle it
1059 		 * as if ownership was lost.
1060 		 */
1061 		nbcon_context_release(ctxt);
1062 		return false;
1063 	}
1064 
1065 	/*
1066 	 * Ownership may have been lost but _not_ reacquired by the driver.
1067 	 * This case is detected and handled when entering unsafe to update
1068 	 * dropped/seq values.
1069 	 */
1070 
1071 	/*
1072 	 * Since any dropped message was successfully output, reset the
1073 	 * dropped count for the console.
1074 	 */
1075 	dropped = 0;
1076 update_con:
1077 	/*
1078 	 * The dropped count and the sequence number are updated within an
1079 	 * unsafe section. This limits update races to the panic context and
1080 	 * allows the panic context to win.
1081 	 */
1082 
1083 	if (!nbcon_context_enter_unsafe(ctxt))
1084 		return false;
1085 
1086 	if (dropped != con_dropped) {
1087 		/* Counterpart to the READ_ONCE() above. */
1088 		WRITE_ONCE(con->dropped, dropped);
1089 	}
1090 
1091 	nbcon_seq_try_update(ctxt, pmsg.seq + 1);
1092 
1093 	return nbcon_context_exit_unsafe(ctxt);
1094 }
1095 
1096 /*
1097  * nbcon_emit_one - Print one record for an nbcon console using the
1098  *			specified callback
1099  * @wctxt:	An initialized write context struct to use for this context
1100  * @use_atomic:	True if the write_atomic() callback is to be used
1101  *
1102  * Return:	True, when a record has been printed and there are still
1103  *		pending records. The caller might want to continue flushing.
1104  *
1105  *		False, when there is no pending record, or when the console
1106  *		context cannot be acquired, or the ownership has been lost.
1107  *		The caller should give up. Either the job is done, cannot be
1108  *		done, or will be handled by the owning context.
1109  *
1110  * This is an internal helper to handle the locking of the console before
1111  * calling nbcon_emit_next_record().
1112  */
nbcon_emit_one(struct nbcon_write_context * wctxt,bool use_atomic)1113 static bool nbcon_emit_one(struct nbcon_write_context *wctxt, bool use_atomic)
1114 {
1115 	struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
1116 	struct console *con = ctxt->console;
1117 	unsigned long flags;
1118 	bool ret = false;
1119 
1120 	if (!use_atomic) {
1121 		con->device_lock(con, &flags);
1122 
1123 		/*
1124 		 * Ensure this stays on the CPU to make handover and
1125 		 * takeover possible.
1126 		 */
1127 		cant_migrate();
1128 	}
1129 
1130 	if (!nbcon_context_try_acquire(ctxt, false))
1131 		goto out;
1132 
1133 	/*
1134 	 * nbcon_emit_next_record() returns false when the console was
1135 	 * handed over or taken over. In both cases the context is no
1136 	 * longer valid.
1137 	 *
1138 	 * The higher priority printing context takes over responsibility
1139 	 * to print the pending records.
1140 	 */
1141 	if (!nbcon_emit_next_record(wctxt, use_atomic))
1142 		goto out;
1143 
1144 	nbcon_context_release(ctxt);
1145 
1146 	ret = ctxt->backlog;
1147 out:
1148 	if (!use_atomic)
1149 		con->device_unlock(con, flags);
1150 	return ret;
1151 }
1152 
1153 /**
1154  * nbcon_kthread_should_wakeup - Check whether a printer thread should wakeup
1155  * @con:	Console to operate on
1156  * @ctxt:	The nbcon context from nbcon_context_try_acquire()
1157  *
1158  * Return:	True if the thread should shutdown or if the console is
1159  *		allowed to print and a record is available. False otherwise.
1160  *
1161  * After the thread wakes up, it must first check if it should shutdown before
1162  * attempting any printing.
1163  */
nbcon_kthread_should_wakeup(struct console * con,struct nbcon_context * ctxt)1164 static bool nbcon_kthread_should_wakeup(struct console *con, struct nbcon_context *ctxt)
1165 {
1166 	bool ret = false;
1167 	short flags;
1168 	int cookie;
1169 
1170 	if (kthread_should_stop())
1171 		return true;
1172 
1173 	/*
1174 	 * Block the kthread when the system is in an emergency or panic mode.
1175 	 * It increases the chance that these contexts would be able to show
1176 	 * the messages directly. And it reduces the risk of interrupted writes
1177 	 * where the context with a higher priority takes over the nbcon console
1178 	 * ownership in the middle of a message.
1179 	 */
1180 	if (unlikely(atomic_read(&nbcon_cpu_emergency_cnt)) ||
1181 	    unlikely(panic_in_progress()))
1182 		return false;
1183 
1184 	cookie = console_srcu_read_lock();
1185 
1186 	flags = console_srcu_read_flags(con);
1187 	if (console_is_usable(con, flags, false)) {
1188 		/* Bring the sequence in @ctxt up to date */
1189 		ctxt->seq = nbcon_seq_read(con);
1190 
1191 		ret = prb_read_valid(prb, ctxt->seq, NULL);
1192 	}
1193 
1194 	console_srcu_read_unlock(cookie);
1195 	return ret;
1196 }
1197 
1198 /**
1199  * nbcon_kthread_func - The printer thread function
1200  * @__console:	Console to operate on
1201  *
1202  * Return:	0
1203  */
nbcon_kthread_func(void * __console)1204 static int nbcon_kthread_func(void *__console)
1205 {
1206 	struct console *con = __console;
1207 	struct nbcon_write_context wctxt = {
1208 		.ctxt.console	= con,
1209 		.ctxt.prio	= NBCON_PRIO_NORMAL,
1210 	};
1211 	struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
1212 	short con_flags;
1213 	bool backlog;
1214 	int cookie;
1215 
1216 wait_for_event:
1217 	/*
1218 	 * Guarantee this task is visible on the rcuwait before
1219 	 * checking the wake condition.
1220 	 *
1221 	 * The full memory barrier within set_current_state() of
1222 	 * ___rcuwait_wait_event() pairs with the full memory
1223 	 * barrier within rcuwait_has_sleeper().
1224 	 *
1225 	 * This pairs with rcuwait_has_sleeper:A and nbcon_kthread_wake:A.
1226 	 */
1227 	rcuwait_wait_event(&con->rcuwait,
1228 			   nbcon_kthread_should_wakeup(con, ctxt),
1229 			   TASK_INTERRUPTIBLE); /* LMM(nbcon_kthread_func:A) */
1230 
1231 	do {
1232 		if (kthread_should_stop())
1233 			return 0;
1234 
1235 		/*
1236 		 * Block the kthread when the system is in an emergency or panic
1237 		 * mode. See nbcon_kthread_should_wakeup() for more details.
1238 		 */
1239 		if (unlikely(atomic_read(&nbcon_cpu_emergency_cnt)) ||
1240 		    unlikely(panic_in_progress()))
1241 			goto wait_for_event;
1242 
1243 		backlog = false;
1244 
1245 		/*
1246 		 * Keep the srcu read lock around the entire operation so that
1247 		 * synchronize_srcu() can guarantee that the kthread stopped
1248 		 * or suspended printing.
1249 		 */
1250 		cookie = console_srcu_read_lock();
1251 
1252 		con_flags = console_srcu_read_flags(con);
1253 
1254 		if (console_is_usable(con, con_flags, false))
1255 			backlog = nbcon_emit_one(&wctxt, false);
1256 
1257 		console_srcu_read_unlock(cookie);
1258 
1259 		cond_resched();
1260 
1261 	} while (backlog);
1262 
1263 	goto wait_for_event;
1264 }
1265 
1266 /**
1267  * nbcon_irq_work - irq work to wake console printer thread
1268  * @irq_work:	The irq work to operate on
1269  */
nbcon_irq_work(struct irq_work * irq_work)1270 static void nbcon_irq_work(struct irq_work *irq_work)
1271 {
1272 	struct console *con = container_of(irq_work, struct console, irq_work);
1273 
1274 	nbcon_kthread_wake(con);
1275 }
1276 
rcuwait_has_sleeper(struct rcuwait * w)1277 static inline bool rcuwait_has_sleeper(struct rcuwait *w)
1278 {
1279 	/*
1280 	 * Guarantee any new records can be seen by tasks preparing to wait
1281 	 * before this context checks if the rcuwait is empty.
1282 	 *
1283 	 * This full memory barrier pairs with the full memory barrier within
1284 	 * set_current_state() of ___rcuwait_wait_event(), which is called
1285 	 * after prepare_to_rcuwait() adds the waiter but before it has
1286 	 * checked the wait condition.
1287 	 *
1288 	 * This pairs with nbcon_kthread_func:A.
1289 	 */
1290 	smp_mb(); /* LMM(rcuwait_has_sleeper:A) */
1291 	return rcuwait_active(w);
1292 }
1293 
1294 /**
1295  * nbcon_kthreads_wake - Wake up printing threads using irq_work
1296  */
nbcon_kthreads_wake(void)1297 void nbcon_kthreads_wake(void)
1298 {
1299 	struct console *con;
1300 	int cookie;
1301 
1302 	if (!printk_kthreads_running)
1303 		return;
1304 
1305 	/*
1306 	 * It is not allowed to call this function when console irq_work
1307 	 * is blocked.
1308 	 */
1309 	if (WARN_ON_ONCE(console_irqwork_blocked))
1310 		return;
1311 
1312 	cookie = console_srcu_read_lock();
1313 	for_each_console_srcu(con) {
1314 		if (!(console_srcu_read_flags(con) & CON_NBCON))
1315 			continue;
1316 
1317 		/*
1318 		 * Only schedule irq_work if the printing thread is
1319 		 * actively waiting. If not waiting, the thread will
1320 		 * notice by itself that it has work to do.
1321 		 */
1322 		if (rcuwait_has_sleeper(&con->rcuwait))
1323 			irq_work_queue(&con->irq_work);
1324 	}
1325 	console_srcu_read_unlock(cookie);
1326 }
1327 
1328 /*
1329  * nbcon_kthread_stop - Stop a console printer thread
1330  * @con:	Console to operate on
1331  */
nbcon_kthread_stop(struct console * con)1332 void nbcon_kthread_stop(struct console *con)
1333 {
1334 	lockdep_assert_console_list_lock_held();
1335 
1336 	if (!con->kthread)
1337 		return;
1338 
1339 	kthread_stop(con->kthread);
1340 	con->kthread = NULL;
1341 }
1342 
1343 /**
1344  * nbcon_kthread_create - Create a console printer thread
1345  * @con:	Console to operate on
1346  *
1347  * Return:	True if the kthread was started or already exists.
1348  *		Otherwise false and @con must not be registered.
1349  *
1350  * This function is called when it will be expected that nbcon consoles are
1351  * flushed using the kthread. The messages printed with NBCON_PRIO_NORMAL
1352  * will be no longer flushed by the legacy loop. This is why failure must
1353  * be fatal for console registration.
1354  *
1355  * If @con was already registered and this function fails, @con must be
1356  * unregistered before the global state variable @printk_kthreads_running
1357  * can be set.
1358  */
nbcon_kthread_create(struct console * con)1359 bool nbcon_kthread_create(struct console *con)
1360 {
1361 	struct task_struct *kt;
1362 
1363 	lockdep_assert_console_list_lock_held();
1364 
1365 	if (con->kthread)
1366 		return true;
1367 
1368 	kt = kthread_run(nbcon_kthread_func, con, "pr/%s%d", con->name, con->index);
1369 	if (WARN_ON(IS_ERR(kt))) {
1370 		con_printk(KERN_ERR, con, "failed to start printing thread\n");
1371 		return false;
1372 	}
1373 
1374 	con->kthread = kt;
1375 
1376 	/*
1377 	 * It is important that console printing threads are scheduled
1378 	 * shortly after a printk call and with generous runtime budgets.
1379 	 */
1380 	sched_set_normal(con->kthread, -20);
1381 
1382 	return true;
1383 }
1384 
1385 /* Track the nbcon emergency nesting per CPU. */
1386 static DEFINE_PER_CPU(unsigned int, nbcon_pcpu_emergency_nesting);
1387 static unsigned int early_nbcon_pcpu_emergency_nesting __initdata;
1388 
1389 /**
1390  * nbcon_get_cpu_emergency_nesting - Get the per CPU emergency nesting pointer
1391  *
1392  * Context:	For reading, any context. For writing, any context which could
1393  *		not be migrated to another CPU.
1394  * Return:	Either a pointer to the per CPU emergency nesting counter of
1395  *		the current CPU or to the init data during early boot.
1396  *
1397  * The function is safe for reading per-CPU variables in any context because
1398  * preemption is disabled if the current CPU is in the emergency state. See
1399  * also nbcon_cpu_emergency_enter().
1400  */
nbcon_get_cpu_emergency_nesting(void)1401 static __ref unsigned int *nbcon_get_cpu_emergency_nesting(void)
1402 {
1403 	/*
1404 	 * The value of __printk_percpu_data_ready gets set in normal
1405 	 * context and before SMP initialization. As a result it could
1406 	 * never change while inside an nbcon emergency section.
1407 	 */
1408 	if (!printk_percpu_data_ready())
1409 		return &early_nbcon_pcpu_emergency_nesting;
1410 
1411 	return raw_cpu_ptr(&nbcon_pcpu_emergency_nesting);
1412 }
1413 
1414 /**
1415  * nbcon_get_default_prio - The appropriate nbcon priority to use for nbcon
1416  *				printing on the current CPU
1417  *
1418  * Context:	Any context.
1419  * Return:	The nbcon_prio to use for acquiring an nbcon console in this
1420  *		context for printing.
1421  *
1422  * The function is safe for reading per-CPU data in any context because
1423  * preemption is disabled if the current CPU is in the emergency or panic
1424  * state.
1425  */
nbcon_get_default_prio(void)1426 enum nbcon_prio nbcon_get_default_prio(void)
1427 {
1428 	unsigned int *cpu_emergency_nesting;
1429 
1430 	if (panic_on_this_cpu())
1431 		return NBCON_PRIO_PANIC;
1432 
1433 	cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting();
1434 	if (*cpu_emergency_nesting)
1435 		return NBCON_PRIO_EMERGENCY;
1436 
1437 	return NBCON_PRIO_NORMAL;
1438 }
1439 
1440 /*
1441  * Track if it is allowed to perform unsafe hostile takeovers of console
1442  * ownership. When true, console drivers might perform unsafe actions while
1443  * printing. It is externally available via nbcon_allow_unsafe_takeover().
1444  */
1445 static bool panic_nbcon_allow_unsafe_takeover;
1446 
1447 /**
1448  * nbcon_allow_unsafe_takeover - Check if unsafe console takeovers are allowed
1449  *
1450  * Return:	True, when it is permitted to perform unsafe console printing
1451  *
1452  * This is also used by console_is_usable() to determine if it is allowed to
1453  * call write_atomic() callbacks flagged as unsafe (CON_NBCON_ATOMIC_UNSAFE).
1454  */
nbcon_allow_unsafe_takeover(void)1455 bool nbcon_allow_unsafe_takeover(void)
1456 {
1457 	return panic_on_this_cpu() && panic_nbcon_allow_unsafe_takeover;
1458 }
1459 
1460 /**
1461  * nbcon_legacy_emit_next_record - Print one record for an nbcon console
1462  *					in legacy contexts
1463  * @con:	The console to print on
1464  * @handover:	Will be set to true if a printk waiter has taken over the
1465  *		console_lock, in which case the caller is no longer holding
1466  *		both the console_lock and the SRCU read lock. Otherwise it
1467  *		is set to false.
1468  * @cookie:	The cookie from the SRCU read lock.
1469  * @use_atomic: Set true when called in an atomic or unknown context.
1470  *		It affects which nbcon callback will be used: write_atomic()
1471  *		or write_thread().
1472  *
1473  *		When false, the write_thread() callback is used and would be
1474  *		called in a preemtible context unless disabled by the
1475  *		device_lock. The legacy handover is not allowed in this mode.
1476  *
1477  * Context:	Any context except NMI.
1478  * Return:	True, when a record has been printed and there are still
1479  *		pending records. The caller might want to continue flushing.
1480  *
1481  *		False, when there is no pending record, or when the console
1482  *		context cannot be acquired, or the ownership has been lost.
1483  *		The caller should give up. Either the job is done, cannot be
1484  *		done, or will be handled by the owning context.
1485  *
1486  * This function is meant to be called by console_flush_all() to print records
1487  * on nbcon consoles from legacy context (printing via console unlocking).
1488  * Essentially it is the nbcon version of console_emit_next_record().
1489  */
nbcon_legacy_emit_next_record(struct console * con,bool * handover,int cookie,bool use_atomic)1490 bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
1491 				   int cookie, bool use_atomic)
1492 {
1493 	struct nbcon_write_context wctxt = { };
1494 	struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
1495 	unsigned long flags;
1496 	bool progress;
1497 
1498 	ctxt->console	= con;
1499 	ctxt->prio	= nbcon_get_default_prio();
1500 
1501 	if (use_atomic) {
1502 		/*
1503 		 * In an atomic or unknown context, use the same procedure as
1504 		 * in console_emit_next_record(). It allows to handover.
1505 		 */
1506 		printk_safe_enter_irqsave(flags);
1507 		console_lock_spinning_enable();
1508 		stop_critical_timings();
1509 	}
1510 
1511 	progress = nbcon_emit_one(&wctxt, use_atomic);
1512 
1513 	if (use_atomic) {
1514 		start_critical_timings();
1515 		*handover = console_lock_spinning_disable_and_check(cookie);
1516 		printk_safe_exit_irqrestore(flags);
1517 	} else {
1518 		/* Non-atomic does not perform legacy spinning handovers. */
1519 		*handover = false;
1520 	}
1521 
1522 	return progress;
1523 }
1524 
1525 /**
1526  * __nbcon_atomic_flush_pending_con - Flush specified nbcon console using its
1527  *					write_atomic() callback
1528  * @con:			The nbcon console to flush
1529  * @stop_seq:			Flush up until this record
1530  *
1531  * Return:	0 if @con was flushed up to @stop_seq Otherwise, error code on
1532  *		failure.
1533  *
1534  * Errors:
1535  *
1536  *	-EPERM:		Unable to acquire console ownership.
1537  *
1538  *	-EAGAIN:	Another context took over ownership while printing.
1539  *
1540  *	-ENOENT:	A record before @stop_seq is not available.
1541  *
1542  * If flushing up to @stop_seq was not successful, it only makes sense for the
1543  * caller to try again when -EAGAIN was returned. When -EPERM is returned,
1544  * this context is not allowed to acquire the console. When -ENOENT is
1545  * returned, it cannot be expected that the unfinalized record will become
1546  * available.
1547  */
__nbcon_atomic_flush_pending_con(struct console * con,u64 stop_seq)1548 static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq)
1549 {
1550 	struct nbcon_write_context wctxt = { };
1551 	struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
1552 	int err = 0;
1553 
1554 	ctxt->console			= con;
1555 	ctxt->spinwait_max_us		= 2000;
1556 	ctxt->prio			= nbcon_get_default_prio();
1557 	ctxt->allow_unsafe_takeover	= nbcon_allow_unsafe_takeover();
1558 
1559 	while (nbcon_seq_read(con) < stop_seq) {
1560 		if (!nbcon_context_try_acquire(ctxt, false))
1561 			return -EPERM;
1562 
1563 		/*
1564 		 * nbcon_emit_next_record() returns false when the console was
1565 		 * handed over or taken over. In both cases the context is no
1566 		 * longer valid.
1567 		 */
1568 		if (!nbcon_emit_next_record(&wctxt, true))
1569 			return -EAGAIN;
1570 
1571 		nbcon_context_release(ctxt);
1572 
1573 		if (!ctxt->backlog) {
1574 			/* Are there reserved but not yet finalized records? */
1575 			if (nbcon_seq_read(con) < stop_seq)
1576 				err = -ENOENT;
1577 			break;
1578 		}
1579 	}
1580 
1581 	return err;
1582 }
1583 
1584 /**
1585  * nbcon_atomic_flush_pending_con - Flush specified nbcon console using its
1586  *					write_atomic() callback
1587  * @con:			The nbcon console to flush
1588  * @stop_seq:			Flush up until this record
1589  *
1590  * This will stop flushing before @stop_seq if another context has ownership.
1591  * That context is then responsible for the flushing. Likewise, if new records
1592  * are added while this context was flushing and there is no other context
1593  * to handle the printing, this context must also flush those records.
1594  */
nbcon_atomic_flush_pending_con(struct console * con,u64 stop_seq)1595 static void nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq)
1596 {
1597 	struct console_flush_type ft;
1598 	unsigned long flags;
1599 	int err;
1600 
1601 again:
1602 	/*
1603 	 * Atomic flushing does not use console driver synchronization (i.e.
1604 	 * it does not hold the port lock for uart consoles). Therefore IRQs
1605 	 * must be disabled to avoid being interrupted and then calling into
1606 	 * a driver that will deadlock trying to acquire console ownership.
1607 	 */
1608 	local_irq_save(flags);
1609 
1610 	err = __nbcon_atomic_flush_pending_con(con, stop_seq);
1611 
1612 	local_irq_restore(flags);
1613 
1614 	/*
1615 	 * If there was a new owner (-EPERM, -EAGAIN), that context is
1616 	 * responsible for completing.
1617 	 *
1618 	 * Do not wait for records not yet finalized (-ENOENT) to avoid a
1619 	 * possible deadlock. They will either get flushed by the writer or
1620 	 * eventually skipped on panic CPU.
1621 	 */
1622 	if (err)
1623 		return;
1624 
1625 	/*
1626 	 * If flushing was successful but more records are available, this
1627 	 * context must flush those remaining records if the printer thread
1628 	 * is not available do it.
1629 	 */
1630 	printk_get_console_flush_type(&ft);
1631 	if (!ft.nbcon_offload &&
1632 	    prb_read_valid(prb, nbcon_seq_read(con), NULL)) {
1633 		stop_seq = prb_next_reserve_seq(prb);
1634 		goto again;
1635 	}
1636 }
1637 
1638 /**
1639  * __nbcon_atomic_flush_pending - Flush all nbcon consoles using their
1640  *					write_atomic() callback
1641  * @stop_seq:			Flush up until this record
1642  */
__nbcon_atomic_flush_pending(u64 stop_seq)1643 static void __nbcon_atomic_flush_pending(u64 stop_seq)
1644 {
1645 	struct console *con;
1646 	int cookie;
1647 
1648 	cookie = console_srcu_read_lock();
1649 	for_each_console_srcu(con) {
1650 		short flags = console_srcu_read_flags(con);
1651 
1652 		if (!(flags & CON_NBCON))
1653 			continue;
1654 
1655 		if (!console_is_usable(con, flags, true))
1656 			continue;
1657 
1658 		if (nbcon_seq_read(con) >= stop_seq)
1659 			continue;
1660 
1661 		nbcon_atomic_flush_pending_con(con, stop_seq);
1662 	}
1663 	console_srcu_read_unlock(cookie);
1664 }
1665 
1666 /**
1667  * nbcon_atomic_flush_pending - Flush all nbcon consoles using their
1668  *				write_atomic() callback
1669  *
1670  * Flush the backlog up through the currently newest record. Any new
1671  * records added while flushing will not be flushed if there is another
1672  * context available to handle the flushing. This is to avoid one CPU
1673  * printing unbounded because other CPUs continue to add records.
1674  */
nbcon_atomic_flush_pending(void)1675 void nbcon_atomic_flush_pending(void)
1676 {
1677 	__nbcon_atomic_flush_pending(prb_next_reserve_seq(prb));
1678 }
1679 
1680 /**
1681  * nbcon_atomic_flush_unsafe - Flush all nbcon consoles using their
1682  *	write_atomic() callback and allowing unsafe hostile takeovers
1683  *
1684  * Flush the backlog up through the currently newest record. Unsafe hostile
1685  * takeovers will be performed, if necessary.
1686  */
nbcon_atomic_flush_unsafe(void)1687 void nbcon_atomic_flush_unsafe(void)
1688 {
1689 	panic_nbcon_allow_unsafe_takeover = true;
1690 	__nbcon_atomic_flush_pending(prb_next_reserve_seq(prb));
1691 	panic_nbcon_allow_unsafe_takeover = false;
1692 }
1693 
1694 /**
1695  * nbcon_cpu_emergency_enter - Enter an emergency section where printk()
1696  *				messages for that CPU are flushed directly
1697  *
1698  * Context:	Any context. Disables preemption.
1699  *
1700  * When within an emergency section, printk() calls will attempt to flush any
1701  * pending messages in the ringbuffer.
1702  */
nbcon_cpu_emergency_enter(void)1703 void nbcon_cpu_emergency_enter(void)
1704 {
1705 	unsigned int *cpu_emergency_nesting;
1706 
1707 	preempt_disable();
1708 
1709 	atomic_inc(&nbcon_cpu_emergency_cnt);
1710 
1711 	cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting();
1712 	(*cpu_emergency_nesting)++;
1713 }
1714 
1715 /**
1716  * nbcon_cpu_emergency_exit - Exit an emergency section
1717  *
1718  * Context:	Within an emergency section. Enables preemption.
1719  */
nbcon_cpu_emergency_exit(void)1720 void nbcon_cpu_emergency_exit(void)
1721 {
1722 	unsigned int *cpu_emergency_nesting;
1723 
1724 	cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting();
1725 	if (!WARN_ON_ONCE(*cpu_emergency_nesting == 0))
1726 		(*cpu_emergency_nesting)--;
1727 
1728 	/*
1729 	 * Wake up kthreads because there might be some pending messages
1730 	 * added by other CPUs with normal priority since the last flush
1731 	 * in the emergency context.
1732 	 */
1733 	if (!WARN_ON_ONCE(atomic_read(&nbcon_cpu_emergency_cnt) == 0)) {
1734 		if (atomic_dec_return(&nbcon_cpu_emergency_cnt) == 0) {
1735 			struct console_flush_type ft;
1736 
1737 			printk_get_console_flush_type(&ft);
1738 			if (ft.nbcon_offload)
1739 				nbcon_kthreads_wake();
1740 		}
1741 	}
1742 
1743 	preempt_enable();
1744 }
1745 
1746 /**
1747  * nbcon_alloc - Allocate and init the nbcon console specific data
1748  * @con:	Console to initialize
1749  *
1750  * Return:	True if the console was fully allocated and initialized.
1751  *		Otherwise @con must not be registered.
1752  *
1753  * When allocation and init was successful, the console must be properly
1754  * freed using nbcon_free() once it is no longer needed.
1755  */
nbcon_alloc(struct console * con)1756 bool nbcon_alloc(struct console *con)
1757 {
1758 	struct nbcon_state state = { };
1759 
1760 	/* Synchronize the kthread start. */
1761 	lockdep_assert_console_list_lock_held();
1762 
1763 	/* The write_thread() callback is mandatory. */
1764 	if (WARN_ON(!con->write_thread))
1765 		return false;
1766 
1767 	rcuwait_init(&con->rcuwait);
1768 	init_irq_work(&con->irq_work, nbcon_irq_work);
1769 	atomic_long_set(&ACCESS_PRIVATE(con, nbcon_prev_seq), -1UL);
1770 	nbcon_state_set(con, &state);
1771 
1772 	/*
1773 	 * Initialize @nbcon_seq to the highest possible sequence number so
1774 	 * that practically speaking it will have nothing to print until a
1775 	 * desired initial sequence number has been set via nbcon_seq_force().
1776 	 */
1777 	atomic_long_set(&ACCESS_PRIVATE(con, nbcon_seq), ULSEQ_MAX(prb));
1778 
1779 	if (con->flags & CON_BOOT) {
1780 		/*
1781 		 * Boot console printing is synchronized with legacy console
1782 		 * printing, so boot consoles can share the same global printk
1783 		 * buffers.
1784 		 */
1785 		con->pbufs = &printk_shared_pbufs;
1786 	} else {
1787 		con->pbufs = kmalloc(sizeof(*con->pbufs), GFP_KERNEL);
1788 		if (!con->pbufs) {
1789 			con_printk(KERN_ERR, con, "failed to allocate printing buffer\n");
1790 			return false;
1791 		}
1792 
1793 		if (printk_kthreads_ready && !have_boot_console) {
1794 			if (!nbcon_kthread_create(con)) {
1795 				kfree(con->pbufs);
1796 				con->pbufs = NULL;
1797 				return false;
1798 			}
1799 
1800 			/* Might be the first kthread. */
1801 			printk_kthreads_running = true;
1802 		}
1803 	}
1804 
1805 	return true;
1806 }
1807 
1808 /**
1809  * nbcon_free - Free and cleanup the nbcon console specific data
1810  * @con:	Console to free/cleanup nbcon data
1811  *
1812  * Important: @have_nbcon_console must be updated before calling
1813  *	this function. In particular, it can be set only when there
1814  *	is still another nbcon console registered.
1815  */
nbcon_free(struct console * con)1816 void nbcon_free(struct console *con)
1817 {
1818 	struct nbcon_state state = { };
1819 
1820 	/* Synchronize the kthread stop. */
1821 	lockdep_assert_console_list_lock_held();
1822 
1823 	if (printk_kthreads_running) {
1824 		nbcon_kthread_stop(con);
1825 
1826 		/* Might be the last nbcon console.
1827 		 *
1828 		 * Do not rely on printk_kthreads_check_locked(). It is not
1829 		 * called in some code paths, see nbcon_free() callers.
1830 		 */
1831 		if (!have_nbcon_console)
1832 			printk_kthreads_running = false;
1833 	}
1834 
1835 	nbcon_state_set(con, &state);
1836 
1837 	/* Boot consoles share global printk buffers. */
1838 	if (!(con->flags & CON_BOOT))
1839 		kfree(con->pbufs);
1840 
1841 	con->pbufs = NULL;
1842 }
1843 
1844 /**
1845  * nbcon_device_try_acquire - Try to acquire nbcon console and enter unsafe
1846  *				section
1847  * @con:	The nbcon console to acquire
1848  *
1849  * Context:	Under the locking mechanism implemented in
1850  *		@con->device_lock() including disabling migration.
1851  * Return:	True if the console was acquired. False otherwise.
1852  *
1853  * Console drivers will usually use their own internal synchronization
1854  * mechasism to synchronize between console printing and non-printing
1855  * activities (such as setting baud rates). However, nbcon console drivers
1856  * supporting atomic consoles may also want to mark unsafe sections when
1857  * performing non-printing activities in order to synchronize against their
1858  * atomic_write() callback.
1859  *
1860  * This function acquires the nbcon console using priority NBCON_PRIO_NORMAL
1861  * and marks it unsafe for handover/takeover.
1862  */
nbcon_device_try_acquire(struct console * con)1863 bool nbcon_device_try_acquire(struct console *con)
1864 {
1865 	struct nbcon_context *ctxt = &ACCESS_PRIVATE(con, nbcon_device_ctxt);
1866 
1867 	cant_migrate();
1868 
1869 	memset(ctxt, 0, sizeof(*ctxt));
1870 	ctxt->console	= con;
1871 	ctxt->prio	= NBCON_PRIO_NORMAL;
1872 
1873 	if (!nbcon_context_try_acquire(ctxt, false))
1874 		return false;
1875 
1876 	if (!nbcon_context_enter_unsafe(ctxt))
1877 		return false;
1878 
1879 	return true;
1880 }
1881 EXPORT_SYMBOL_GPL(nbcon_device_try_acquire);
1882 
1883 /**
1884  * nbcon_device_release - Exit unsafe section and release the nbcon console
1885  * @con:	The nbcon console acquired in nbcon_device_try_acquire()
1886  */
nbcon_device_release(struct console * con)1887 void nbcon_device_release(struct console *con)
1888 {
1889 	struct nbcon_context *ctxt = &ACCESS_PRIVATE(con, nbcon_device_ctxt);
1890 	struct console_flush_type ft;
1891 	int cookie;
1892 
1893 	if (!nbcon_context_exit_unsafe(ctxt))
1894 		return;
1895 
1896 	nbcon_context_release(ctxt);
1897 
1898 	/*
1899 	 * This context must flush any new records added while the console
1900 	 * was locked if the printer thread is not available to do it. The
1901 	 * console_srcu_read_lock must be taken to ensure the console is
1902 	 * usable throughout flushing.
1903 	 */
1904 	cookie = console_srcu_read_lock();
1905 	printk_get_console_flush_type(&ft);
1906 	if (console_is_usable(con, console_srcu_read_flags(con), true) &&
1907 	    !ft.nbcon_offload &&
1908 	    prb_read_valid(prb, nbcon_seq_read(con), NULL)) {
1909 		/*
1910 		 * If nbcon_atomic flushing is not available, fallback to
1911 		 * using the legacy loop.
1912 		 */
1913 		if (ft.nbcon_atomic) {
1914 			__nbcon_atomic_flush_pending_con(con, prb_next_reserve_seq(prb));
1915 		} else if (ft.legacy_direct) {
1916 			if (console_trylock())
1917 				console_unlock();
1918 		} else if (ft.legacy_offload) {
1919 			defer_console_output();
1920 		}
1921 	}
1922 	console_srcu_read_unlock(cookie);
1923 }
1924 EXPORT_SYMBOL_GPL(nbcon_device_release);
1925 
1926 /**
1927  * nbcon_kdb_try_acquire - Try to acquire nbcon console and enter unsafe
1928  *			   section
1929  * @con:	The nbcon console to acquire
1930  * @wctxt:	The nbcon write context to be used on success
1931  *
1932  * Context:	Under console_srcu_read_lock() for emitting a single kdb message
1933  *		using the given con->write_atomic() callback. Can be called
1934  *		only when the console is usable at the moment.
1935  *
1936  * Return:	True if the console was acquired. False otherwise.
1937  *
1938  * kdb emits messages on consoles registered for printk() without
1939  * storing them into the ring buffer. It has to acquire the console
1940  * ownerhip so that it could call con->write_atomic() callback a safe way.
1941  *
1942  * This function acquires the nbcon console using priority NBCON_PRIO_EMERGENCY
1943  * and marks it unsafe for handover/takeover.
1944  */
nbcon_kdb_try_acquire(struct console * con,struct nbcon_write_context * wctxt)1945 bool nbcon_kdb_try_acquire(struct console *con,
1946 			   struct nbcon_write_context *wctxt)
1947 {
1948 	struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
1949 
1950 	memset(ctxt, 0, sizeof(*ctxt));
1951 	ctxt->console = con;
1952 	ctxt->prio    = NBCON_PRIO_EMERGENCY;
1953 
1954 	if (!nbcon_context_try_acquire(ctxt, false))
1955 		return false;
1956 
1957 	if (!nbcon_context_enter_unsafe(ctxt))
1958 		return false;
1959 
1960 	return true;
1961 }
1962 
1963 /**
1964  * nbcon_kdb_release - Exit unsafe section and release the nbcon console
1965  *
1966  * @wctxt:	The nbcon write context initialized by a successful
1967  *		nbcon_kdb_try_acquire()
1968  */
nbcon_kdb_release(struct nbcon_write_context * wctxt)1969 void nbcon_kdb_release(struct nbcon_write_context *wctxt)
1970 {
1971 	struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
1972 
1973 	if (!nbcon_context_exit_unsafe(ctxt))
1974 		return;
1975 
1976 	nbcon_context_release(ctxt);
1977 
1978 	/*
1979 	 * Flush any new printk() messages added when the console was blocked.
1980 	 * Only the console used by the given write context was	blocked.
1981 	 * The console was locked only when the write_atomic() callback
1982 	 * was usable.
1983 	 */
1984 	__nbcon_atomic_flush_pending_con(ctxt->console, prb_next_reserve_seq(prb));
1985 }
1986