Lines Matching refs:con
133 static inline void nbcon_state_set(struct console *con, struct nbcon_state *new) in nbcon_state_set() argument
135 atomic_set(&ACCESS_PRIVATE(con, nbcon_state), new->atom); in nbcon_state_set()
143 static inline void nbcon_state_read(struct console *con, struct nbcon_state *state) in nbcon_state_read() argument
145 state->atom = atomic_read(&ACCESS_PRIVATE(con, nbcon_state)); in nbcon_state_read()
156 static inline bool nbcon_state_try_cmpxchg(struct console *con, struct nbcon_state *cur, in nbcon_state_try_cmpxchg() argument
159 return atomic_try_cmpxchg(&ACCESS_PRIVATE(con, nbcon_state), &cur->atom, new->atom); in nbcon_state_try_cmpxchg()
168 u64 nbcon_seq_read(struct console *con) in nbcon_seq_read() argument
170 unsigned long nbcon_seq = atomic_long_read(&ACCESS_PRIVATE(con, nbcon_seq)); in nbcon_seq_read()
183 void nbcon_seq_force(struct console *con, u64 seq) in nbcon_seq_force() argument
193 atomic_long_set(&ACCESS_PRIVATE(con, nbcon_seq), __u64seq_to_ulseq(valid_seq)); in nbcon_seq_force()
210 struct console *con = ctxt->console; in nbcon_seq_try_update() local
212 if (atomic_long_try_cmpxchg(&ACCESS_PRIVATE(con, nbcon_seq), &nbcon_seq, in nbcon_seq_try_update()
216 ctxt->seq = nbcon_seq_read(con); in nbcon_seq_try_update()
247 struct console *con = ctxt->console; in nbcon_context_try_acquire_direct() local
288 } while (!nbcon_state_try_cmpxchg(con, cur, &new)); in nbcon_context_try_acquire_direct()
355 struct console *con = ctxt->console; in nbcon_context_try_acquire_requested() local
385 if (!nbcon_state_try_cmpxchg(con, cur, &new)) { in nbcon_context_try_acquire_requested()
438 struct console *con = ctxt->console; in nbcon_context_try_acquire_handover() local
481 if (!nbcon_state_try_cmpxchg(con, cur, &new)) in nbcon_context_try_acquire_handover()
503 nbcon_state_read(con, cur); in nbcon_context_try_acquire_handover()
519 if (nbcon_state_try_cmpxchg(con, cur, &new)) { in nbcon_context_try_acquire_handover()
554 struct console *con = ctxt->console; in nbcon_context_try_acquire_hostile() local
578 } while (!nbcon_state_try_cmpxchg(con, cur, &new)); in nbcon_context_try_acquire_hostile()
600 struct console *con = ctxt->console; in nbcon_context_try_acquire() local
604 nbcon_state_read(con, &cur); in nbcon_context_try_acquire()
627 ctxt->pbufs = con->pbufs; in nbcon_context_try_acquire()
680 struct console *con = ctxt->console; in nbcon_context_release() local
684 nbcon_state_read(con, &cur); in nbcon_context_release()
699 } while (!nbcon_state_try_cmpxchg(con, &cur, &new)); in nbcon_context_release()
801 struct console *con = ctxt->console; in nbcon_can_proceed() local
804 nbcon_state_read(con, &cur); in nbcon_can_proceed()
834 struct console *con = ctxt->console; in __nbcon_context_update_unsafe() local
838 nbcon_state_read(con, &cur); in __nbcon_context_update_unsafe()
853 } while (!nbcon_state_try_cmpxchg(con, &cur, &new)); in __nbcon_context_update_unsafe()
864 struct console *con = ctxt->console; in nbcon_write_context_set_buf() local
869 nbcon_state_read(con, &cur); in nbcon_write_context_set_buf()
969 struct console *con = ctxt->console; in nbcon_emit_next_record() local
970 bool is_extended = console_srcu_read_flags(con) & CON_EXTENDED; in nbcon_emit_next_record()
988 if (WARN_ON_ONCE((use_atomic && !con->write_atomic) || in nbcon_emit_next_record()
989 !(console_srcu_read_flags(con) & CON_NBCON))) { in nbcon_emit_next_record()
1012 con_dropped = data_race(READ_ONCE(con->dropped)); in nbcon_emit_next_record()
1023 ulseq = atomic_long_read(&ACCESS_PRIVATE(con, nbcon_prev_seq)); in nbcon_emit_next_record()
1033 nbcon_state_read(con, &cur); in nbcon_emit_next_record()
1037 atomic_long_try_cmpxchg(&ACCESS_PRIVATE(con, nbcon_prev_seq), &ulseq, in nbcon_emit_next_record()
1052 con->write_atomic(con, wctxt); in nbcon_emit_next_record()
1054 con->write_thread(con, wctxt); in nbcon_emit_next_record()
1088 WRITE_ONCE(con->dropped, dropped); in nbcon_emit_next_record()
1116 struct console *con = ctxt->console; in nbcon_emit_one() local
1121 con->device_lock(con, &flags); in nbcon_emit_one()
1149 con->device_unlock(con, flags); in nbcon_emit_one()
1164 static bool nbcon_kthread_should_wakeup(struct console *con, struct nbcon_context *ctxt) in nbcon_kthread_should_wakeup() argument
1186 flags = console_srcu_read_flags(con); in nbcon_kthread_should_wakeup()
1187 if (console_is_usable(con, flags, false)) { in nbcon_kthread_should_wakeup()
1189 ctxt->seq = nbcon_seq_read(con); in nbcon_kthread_should_wakeup()
1206 struct console *con = __console; in nbcon_kthread_func() local
1208 .ctxt.console = con, in nbcon_kthread_func()
1227 rcuwait_wait_event(&con->rcuwait, in nbcon_kthread_func()
1228 nbcon_kthread_should_wakeup(con, ctxt), in nbcon_kthread_func()
1252 con_flags = console_srcu_read_flags(con); in nbcon_kthread_func()
1254 if (console_is_usable(con, con_flags, false)) in nbcon_kthread_func()
1272 struct console *con = container_of(irq_work, struct console, irq_work); in nbcon_irq_work() local
1274 nbcon_kthread_wake(con); in nbcon_irq_work()
1299 struct console *con; in nbcon_kthreads_wake() local
1313 for_each_console_srcu(con) { in nbcon_kthreads_wake()
1314 if (!(console_srcu_read_flags(con) & CON_NBCON)) in nbcon_kthreads_wake()
1322 if (rcuwait_has_sleeper(&con->rcuwait)) in nbcon_kthreads_wake()
1323 irq_work_queue(&con->irq_work); in nbcon_kthreads_wake()
1332 void nbcon_kthread_stop(struct console *con) in nbcon_kthread_stop() argument
1336 if (!con->kthread) in nbcon_kthread_stop()
1339 kthread_stop(con->kthread); in nbcon_kthread_stop()
1340 con->kthread = NULL; in nbcon_kthread_stop()
1359 bool nbcon_kthread_create(struct console *con) in nbcon_kthread_create() argument
1365 if (con->kthread) in nbcon_kthread_create()
1368 kt = kthread_run(nbcon_kthread_func, con, "pr/%s%d", con->name, con->index); in nbcon_kthread_create()
1370 con_printk(KERN_ERR, con, "failed to start printing thread\n"); in nbcon_kthread_create()
1374 con->kthread = kt; in nbcon_kthread_create()
1380 sched_set_normal(con->kthread, -20); in nbcon_kthread_create()
1490 bool nbcon_legacy_emit_next_record(struct console *con, bool *handover, in nbcon_legacy_emit_next_record() argument
1498 ctxt->console = con; in nbcon_legacy_emit_next_record()
1548 static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq) in __nbcon_atomic_flush_pending_con() argument
1554 ctxt->console = con; in __nbcon_atomic_flush_pending_con()
1559 while (nbcon_seq_read(con) < stop_seq) { in __nbcon_atomic_flush_pending_con()
1575 if (nbcon_seq_read(con) < stop_seq) in __nbcon_atomic_flush_pending_con()
1595 static void nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq) in nbcon_atomic_flush_pending_con() argument
1610 err = __nbcon_atomic_flush_pending_con(con, stop_seq); in nbcon_atomic_flush_pending_con()
1632 prb_read_valid(prb, nbcon_seq_read(con), NULL)) { in nbcon_atomic_flush_pending_con()
1645 struct console *con; in __nbcon_atomic_flush_pending() local
1649 for_each_console_srcu(con) { in __nbcon_atomic_flush_pending()
1650 short flags = console_srcu_read_flags(con); in __nbcon_atomic_flush_pending()
1655 if (!console_is_usable(con, flags, true)) in __nbcon_atomic_flush_pending()
1658 if (nbcon_seq_read(con) >= stop_seq) in __nbcon_atomic_flush_pending()
1661 nbcon_atomic_flush_pending_con(con, stop_seq); in __nbcon_atomic_flush_pending()
1756 bool nbcon_alloc(struct console *con) in nbcon_alloc() argument
1764 if (WARN_ON(!con->write_thread)) in nbcon_alloc()
1767 rcuwait_init(&con->rcuwait); in nbcon_alloc()
1768 init_irq_work(&con->irq_work, nbcon_irq_work); in nbcon_alloc()
1769 atomic_long_set(&ACCESS_PRIVATE(con, nbcon_prev_seq), -1UL); in nbcon_alloc()
1770 nbcon_state_set(con, &state); in nbcon_alloc()
1777 atomic_long_set(&ACCESS_PRIVATE(con, nbcon_seq), ULSEQ_MAX(prb)); in nbcon_alloc()
1779 if (con->flags & CON_BOOT) { in nbcon_alloc()
1785 con->pbufs = &printk_shared_pbufs; in nbcon_alloc()
1787 con->pbufs = kmalloc(sizeof(*con->pbufs), GFP_KERNEL); in nbcon_alloc()
1788 if (!con->pbufs) { in nbcon_alloc()
1789 con_printk(KERN_ERR, con, "failed to allocate printing buffer\n"); in nbcon_alloc()
1794 if (!nbcon_kthread_create(con)) { in nbcon_alloc()
1795 kfree(con->pbufs); in nbcon_alloc()
1796 con->pbufs = NULL; in nbcon_alloc()
1816 void nbcon_free(struct console *con) in nbcon_free() argument
1824 nbcon_kthread_stop(con); in nbcon_free()
1835 nbcon_state_set(con, &state); in nbcon_free()
1838 if (!(con->flags & CON_BOOT)) in nbcon_free()
1839 kfree(con->pbufs); in nbcon_free()
1841 con->pbufs = NULL; in nbcon_free()
1863 bool nbcon_device_try_acquire(struct console *con) in nbcon_device_try_acquire() argument
1865 struct nbcon_context *ctxt = &ACCESS_PRIVATE(con, nbcon_device_ctxt); in nbcon_device_try_acquire()
1870 ctxt->console = con; in nbcon_device_try_acquire()
1887 void nbcon_device_release(struct console *con) in nbcon_device_release() argument
1889 struct nbcon_context *ctxt = &ACCESS_PRIVATE(con, nbcon_device_ctxt); in nbcon_device_release()
1906 if (console_is_usable(con, console_srcu_read_flags(con), true) && in nbcon_device_release()
1908 prb_read_valid(prb, nbcon_seq_read(con), NULL)) { in nbcon_device_release()
1914 __nbcon_atomic_flush_pending_con(con, prb_next_reserve_seq(prb)); in nbcon_device_release()
1945 bool nbcon_kdb_try_acquire(struct console *con, in nbcon_kdb_try_acquire() argument
1951 ctxt->console = con; in nbcon_kdb_try_acquire()