Home
last modified time | relevance | path

Searched refs:rcu_data (Results 1 – 10 of 10) sorted by relevance

/linux/kernel/rcu/
H A Dtree_nocb.h20 static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp) in rcu_current_is_nocb_kthread()
92 static void rcu_nocb_bypass_lock(struct rcu_data *rdp) in rcu_nocb_bypass_lock()
110 static bool rcu_nocb_bypass_trylock(struct rcu_data *rdp) in rcu_nocb_bypass_trylock()
119 static void rcu_nocb_bypass_unlock(struct rcu_data *rdp) in rcu_nocb_bypass_unlock()
130 static void rcu_nocb_lock(struct rcu_data *rdp) in rcu_nocb_lock()
142 static void rcu_nocb_unlock(struct rcu_data *rdp) in rcu_nocb_unlock()
154 static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp, in rcu_nocb_unlock_irqrestore()
166 static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp) in rcu_lockdep_assert_cblist_protected()
193 static bool __wake_nocb_gp(struct rcu_data *rdp_gp, in __wake_nocb_gp()
194 struct rcu_data *rdp, in __wake_nocb_gp()
[all …]
H A Dtree.h178 struct rcu_data { struct
254 struct rcu_data *nocb_toggling_rdp; /* rdp queued for (de-)offloading */ argument
257 struct rcu_data *nocb_gp_rdp ____cacheline_internodealigned_in_smp; argument
481 static bool rcu_is_callbacks_kthread(struct rcu_data *rdp);
486 static void zero_cpu_stall_ticks(struct rcu_data *rdp);
490 static bool wake_nocb_gp(struct rcu_data *rdp, bool force);
491 static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
493 static void call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *head,
495 static void __maybe_unused __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
497 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level);
[all …]
H A Dtree_plugin.h16 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp) in rcu_rdp_is_offloaded()
33 rdp == this_cpu_ptr(&rcu_data)) || in rcu_rdp_is_offloaded()
162 static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_preempt_ctxt_queue()
300 if (__this_cpu_read(rcu_data.cpu_no_qs.b.norm)) { in rcu_qs()
302 __this_cpu_read(rcu_data.gp_seq), in rcu_qs()
304 __this_cpu_write(rcu_data.cpu_no_qs.b.norm, false); in rcu_qs()
326 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_note_context_switch()
484 struct rcu_data *rdp; in rcu_preempt_deferred_qs_irqrestore()
494 rdp = this_cpu_ptr(&rcu_data); in rcu_preempt_deferred_qs_irqrestore()
599 return (__this_cpu_read(rcu_data.cpu_no_qs.b.exp) || in rcu_preempt_need_deferred_qs()
[all …]
H A Dtree.c80 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
154 static void rcu_report_exp_rdp(struct rcu_data *rdp);
156 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
157 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp);
158 static bool rcu_rdp_cpu_online(struct rcu_data *rdp);
243 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_get_n_cbs_cpu()
315 * @rdp: The rcu_data corresponding to the CPU for which to check EQS.
324 static bool rcu_watching_snap_stopped_since(struct rcu_data *rd
[all...]
H A Dtree_exp.h237 struct rcu_data *rdp; in rcu_report_exp_cpu_mult()
246 rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_report_exp_cpu_mult()
258 static void rcu_report_exp_rdp(struct rcu_data *rdp) in rcu_report_exp_rdp()
289 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id()); in exp_funnel_lock()
358 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in __sync_rcu_exp_select_node_cpus()
400 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in __sync_rcu_exp_select_node_cpus()
566 struct rcu_data *rdp; in synchronize_rcu_expedited_stall()
572 rdp = per_cpu_ptr(&rcu_data, cpu); in synchronize_rcu_expedited_stall()
618 struct rcu_data *rdp; in synchronize_rcu_expedited_wait()
632 rdp = per_cpu_ptr(&rcu_data, cpu); in synchronize_rcu_expedited_wait()
[all …]
H A Dtree_stall.h188 static void zero_cpu_stall_ticks(struct rcu_data *rdp) in zero_cpu_stall_ticks()
223 struct rcu_data *rdp; in rcu_iw_handler()
226 rdp = container_of(iwp, struct rcu_data, rcu_iw); in rcu_iw_handler()
421 static bool rcu_is_rcuc_kthread_starving(struct rcu_data *rdp, unsigned long *jp) in rcu_is_rcuc_kthread_starving()
445 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in print_cpu_stat_info()
486 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in print_cpu_stall_info()
549 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_check_gp_kthread_starvation()
677 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in print_cpu_stall()
737 static void check_cpu_stall(struct rcu_data *rdp) in check_cpu_stall()
903 struct rcu_data *rdp; in show_rcu_gp_kthreads()
[all …]
/linux/Documentation/RCU/Design/Data-Structures/
H A DData-Structures.rst29 to 16 ``rcu_data`` structures associated with it, so that there are
30 ``NR_CPUS`` number of ``rcu_data`` structures, one for each possible CPU.
42 Quiescent states are recorded by the per-CPU ``rcu_data`` structures,
124 ``rcu_head`` structures, which are queued on ``rcu_data`` structures
140 #. The fields in ``rcu_data`` are private to the corresponding CPU,
155 ``rcu_node`` and ``rcu_data`` structures, tracks grace periods,
173 #. ``rcu_data``: This per-CPU structure is the focus of quiescent-state
190 and ``rcu_data`` data structures.
197 between the ``rcu_node`` and ``rcu_data`` structures, tracks grace
206 Relationship to rcu_node and rcu_data Structures
[all …]
/linux/Documentation/RCU/
H A Drcubarrier.rst238 4 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
246 Lines 3 and 4 locate RCU's internal per-CPU rcu_data structure,
/linux/Documentation/RCU/Design/Memory-Ordering/
H A DTree-RCU-Memory-Ordering.rst205 4 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
/linux/tools/memory-model/Documentation/
H A Dsimple.txt127 within its instance of the per-CPU rcu_data structure, and then uses data