Lines Matching +full:low +full:- +full:side

1 // SPDX-License-Identifier: GPL-2.0-only
13 * Many thanks to Gilad Ben-Yossef, Paul McKenney, Ingo Molnar, Andrew Morton,
41 /* Record the current task on exiting RCU-tasks (dyntick-idle entry). */
45 WRITE_ONCE(current->rcu_tasks_idle_cpu, smp_processor_id()); in rcu_task_exit()
49 /* Record no current task on entering RCU-tasks (dyntick-idle exit). */
53 WRITE_ONCE(current->rcu_tasks_idle_cpu, -1); in rcu_task_enter()
62 current->trc_reader_special.b.need_mb = true; in rcu_task_trace_heavyweight_enter()
71 current->trc_reader_special.b.need_mb = false; in rcu_task_trace_heavyweight_exit()
86 * CPUs seeing atomic_add_return() must see prior RCU read-side in ct_kernel_exit_state()
107 * and we also must force ordering with the next RCU read-side in ct_kernel_enter_state()
118 * idle loop or adaptive-tickless usermode execution.
120 * We crowbar the ->nmi_nesting field to zero to allow for
129 WRITE_ONCE(ct->nmi_nesting, 0); in ct_kernel_exit()
134 ct->nesting--; in ct_kernel_exit()
145 instrument_atomic_write(&ct->state, sizeof(ct->state)); in ct_kernel_exit()
148 WRITE_ONCE(ct->nesting, 0); /* Avoid irq-access tearing. */ in ct_kernel_exit()
157 * idle loop or adaptive-tickless usermode execution.
159 * We crowbar the ->nmi_nesting field to CT_NESTING_IRQ_NONIDLE to
173 ct->nesting++; in ct_kernel_enter()
183 instrument_atomic_write(&ct->state, sizeof(ct->state)); in ct_kernel_enter()
187 WRITE_ONCE(ct->nesting, 1); in ct_kernel_enter()
189 WRITE_ONCE(ct->nmi_nesting, CT_NESTING_IRQ_NONIDLE); in ct_kernel_enter()
194 * ct_nmi_exit - inform RCU of exit from NMI context
197 * RCU-idle period, update ct->state and ct->nmi_nesting
198 * to let the RCU grace-period handling know that the CPU is back to
199 * being RCU-idle.
210 * Check for ->nmi_nesting underflow and bad CT state. in ct_nmi_exit()
218 * If the nesting level is not 1, the CPU wasn't RCU-idle, so in ct_nmi_exit()
219 * leave it in non-RCU-idle state. in ct_nmi_exit()
222 trace_rcu_watching(TPS("--="), ct_nmi_nesting(), ct_nmi_nesting() - 2, in ct_nmi_exit()
224 WRITE_ONCE(ct->nmi_nesting, /* No store tearing. */ in ct_nmi_exit()
225 ct_nmi_nesting() - 2); in ct_nmi_exit()
230 /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */ in ct_nmi_exit()
232 WRITE_ONCE(ct->nmi_nesting, 0); /* Avoid store tearing. */ in ct_nmi_exit()
235 instrument_atomic_write(&ct->state, sizeof(ct->state)); in ct_nmi_exit()
247 * ct_nmi_enter - inform RCU of entry to NMI context
249 * If the CPU was idle from RCU's viewpoint, update ct->state and
250 * ct->nmi_nesting to let the RCU grace-period handling know
268 * to mark non-idle and increment ->nmi_nesting by one. in ct_nmi_enter()
269 * Otherwise, increment ->nmi_nesting by two. This means in ct_nmi_enter()
270 * if ->nmi_nesting is equal to one, we are guaranteed in ct_nmi_enter()
271 * to be in the outermost NMI handler that interrupted an RCU-idle in ct_nmi_enter()
285 instrument_atomic_read(&ct->state, sizeof(ct->state)); in ct_nmi_enter()
287 instrument_atomic_write(&ct->state, sizeof(ct->state)); in ct_nmi_enter()
301 WRITE_ONCE(ct->nmi_nesting, /* Prevent store tearing. */ in ct_nmi_enter()
307 * ct_idle_enter - inform RCU that current CPU is entering idle
309 * Enter idle mode, in other words, -leave- the mode in which RCU
310 * read-side critical sections can occur. (Though RCU read-side
325 * ct_idle_exit - inform RCU that current CPU is leaving idle
327 * Exit idle mode, in other words, -enter- the mode in which RCU
328 * read-side critical sections can occur.
338 ct_kernel_enter(false, CT_RCU_WATCHING - CT_STATE_IDLE); in ct_idle_exit()
344 * ct_irq_enter - inform RCU that current CPU is entering irq away from idle
347 * idle mode, in other words, entering the mode in which read-side critical
372 * ct_irq_exit - inform RCU that current CPU is exiting irq towards idle
375 * idle mode, in other words, leaving the mode in which read-side critical
458 * __ct_user_enter - Inform the context tracking that the CPU is going
461 * @state: userspace context-tracking state to enter.
465 * instructions to execute won't use any RCU read side critical section
474 WARN_ON_ONCE(!current->mm); in __ct_user_enter()
480 if (ct->active) { in __ct_user_enter()
482 * At this stage, only low level arch entry code remains and in __ct_user_enter()
484 * any RCU read-side critical section until the next call to in __ct_user_enter()
510 * Special case if we only track user <-> kernel transitions for tickless in __ct_user_enter()
515 raw_atomic_set(&ct->state, state); in __ct_user_enter()
532 raw_atomic_set(&ct->state, state); in __ct_user_enter()
540 raw_atomic_add(state, &ct->state); in __ct_user_enter()
580 * user_enter_callable() - Unfortunate ASM callable version of user_enter() for
582 * static key from low level code.
597 * __ct_user_exit - Inform the context tracking that the CPU is
600 * @state: userspace context-tracking state being exited from.
603 * guest space before any use of RCU read side critical section. This
607 * This call supports re-entrancy. This way it can be called from any exception
618 if (ct->active) { in __ct_user_exit()
621 * run a RCU read side critical section anytime. in __ct_user_exit()
623 ct_kernel_enter(true, CT_RCU_WATCHING - state); in __ct_user_exit()
632 * Special case if we only track user <-> kernel transitions for tickless in __ct_user_exit()
637 raw_atomic_set(&ct->state, CT_STATE_KERNEL); in __ct_user_exit()
642 raw_atomic_set(&ct->state, CT_STATE_KERNEL); in __ct_user_exit()
650 raw_atomic_sub(state, &ct->state); in __ct_user_exit()
682 * user_exit_callable() - Unfortunate ASM callable version of user_exit() for
684 * static key from low level code.