Lines Matching refs:rcu_data
80 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
154 static void rcu_report_exp_rdp(struct rcu_data *rdp);
156 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
157 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp);
158 static bool rcu_rdp_cpu_online(struct rcu_data *rdp);
243 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
315 * @rdp: The rcu_data corresponding to the CPU for which to check EQS.
324 static bool rcu_watching_snap_stopped_since(struct rcu_data *rdp, int snap)
374 raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
520 static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
586 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
649 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
697 return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) &&
698 !rcu_rdp_is_offloaded(this_cpu_ptr(&rcu_data));
706 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp)
758 smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
764 * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
768 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
783 static int rcu_watching_snap_save(struct rcu_data *rdp)
815 static int rcu_watching_snap_recheck(struct rcu_data *rdp)
952 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
963 * @rdp: The rcu_data corresponding to the CPU from which to start.
976 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
1037 /* Push furthest requested GP to leaf node and rcu_data structure. */
1054 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1135 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1177 * of ->gp_seq_needed in the rcu_data structure, and if that indicates
1182 struct rcu_data *rdp)
1211 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1235 struct rcu_data *rdp)
1260 * Update CPU-local rcu_data state to record the beginnings and ends of
1265 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
1314 static void note_gp_changes(struct rcu_data *rdp)
1801 struct rcu_data *rdp;
1923 rdp = this_cpu_ptr(&rcu_data);
2107 struct rcu_data *rdp;
2148 rdp = this_cpu_ptr(&rcu_data);
2156 rdp = per_cpu_ptr(&rcu_data, cpu);
2175 rdp = this_cpu_ptr(&rcu_data);
2389 * Record a quiescent state for the specified CPU to that CPU's rcu_data
2393 rcu_report_qs_rdp(struct rcu_data *rdp)
2443 * is not yet aware, and if so, set up local rcu_data state for it.
2448 rcu_check_quiescent_state(struct rcu_data *rdp)
2490 static void rcu_do_batch(struct rcu_data *rdp)
2652 WARN_ON_ONCE(time_before(j, __this_cpu_read(rcu_data.last_sched_clock)));
2653 __this_cpu_write(rcu_data.last_sched_clock, j);
2657 raw_cpu_inc(rcu_data.ticks_this_gp);
2659 if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
2665 __this_cpu_write(rcu_data.rcu_urgent_qs, false);
2684 static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
2714 struct rcu_data *rdp;
2717 rdp = per_cpu_ptr(&rcu_data, cpu);
2753 rnp = raw_cpu_read(rcu_data.mynode);
2790 struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
2859 __this_cpu_write(rcu_data.rcu_cpu_has_work, 1);
2860 t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task);
2862 rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status));
2881 per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
2886 return __this_cpu_read(rcu_data.rcu_cpu_has_work);
2896 unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status);
2897 char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work);
2898 unsigned long *j = this_cpu_ptr(&rcu_data.rcuc_activity);
2928 .store = &rcu_data.rcu_cpu_kthread_task,
2944 per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
2952 static void rcutree_enqueue(struct rcu_data *rdp, struct rcu_head *head, rcu_callback_t func)
2968 static void call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
3024 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp)
3047 static void check_cb_ovld(struct rcu_data *rdp)
3066 struct rcu_data *rdp;
3088 rdp = this_cpu_ptr(&rcu_data);
3261 * the rcu_data structure is to permit this code to be extracted from
4117 for (rnp = this_cpu_ptr(&rcu_data)->mynode; rnp; rnp = rnp->parent)
4194 struct rcu_data *rdp;
4199 rdp = this_cpu_ptr(&rcu_data);
4412 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
4493 static void rcu_barrier_entrain(struct rcu_data *rdp)
4533 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4556 struct rcu_data *rdp;
4595 rdp = per_cpu_ptr(&rcu_data, cpu);
4637 rdp = per_cpu_ptr(&rcu_data, cpu);
4735 * Is the CPU corresponding to the specified rcu_data structure online
4739 static bool rcu_rdp_cpu_online(struct rcu_data *rdp)
4746 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4768 struct rcu_data *rdp;
4774 rdp = this_cpu_ptr(&rcu_data);
4878 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4969 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
5028 struct rcu_data *rdp;
5032 rdp = per_cpu_ptr(&rcu_data, cpu);
5077 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
5089 struct rcu_data *rdp;
5092 rdp = per_cpu_ptr(&rcu_data, cpu);
5124 struct rcu_data *rdp;
5129 rdp = per_cpu_ptr(&rcu_data, cpu);
5182 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
5222 struct rcu_data *my_rdp;
5224 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
5238 my_rdp = this_cpu_ptr(&rcu_data);
5292 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
5308 struct rcu_data *rdp;
5311 rdp = per_cpu_ptr(&rcu_data, cpu);
5358 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
5502 per_cpu_ptr(&rcu_data, i)->mynode = rnp;
5503 per_cpu_ptr(&rcu_data, i)->barrier_head.next =
5504 &per_cpu_ptr(&rcu_data, i)->barrier_head;