Lines Matching refs:rnp
150 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
152 static struct task_struct *rcu_boost_task(struct rcu_node *rnp);
156 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
768 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
770 raw_lockdep_assert_held_rcu_node(rnp);
772 rnp->gp_seq))
774 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
775 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
790 * rnp locking tree since rcu_gp_init() and up to the current leaf rnp
819 struct rcu_node *rnp = rdp->mynode;
831 rcu_gpnum_ovf(rnp, rdp);
857 __func__, rnp->grplo, rnp->grphi, rnp->level,
858 (long)rnp->gp_seq, (long)rnp->completedqs);
859 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
922 !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
923 (rnp->ffmask & rdp->grpmask)) {
925 rdp->rcu_iw_gp_seq = rnp->gp_seq;
952 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
955 trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
956 gp_seq_req, rnp->level,
957 rnp->grplo, rnp->grphi, s);
980 struct rcu_node *rnp;
993 for (rnp = rnp_start; 1; rnp = rnp->parent) {
994 if (rnp != rnp_start)
995 raw_spin_lock_rcu_node(rnp);
996 if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) ||
997 rcu_seq_started(&rnp->gp_seq, gp_seq_req) ||
998 (rnp != rnp_start &&
999 rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) {
1000 trace_rcu_this_gp(rnp, rdp, gp_seq_req,
1004 WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req);
1005 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) {
1016 if (rnp != rnp_start && rnp->parent != NULL)
1017 raw_spin_unlock_rcu_node(rnp);
1018 if (!rnp->parent)
1024 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
1027 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
1031 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
1038 if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
1039 WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed);
1040 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1042 if (rnp != rnp_start)
1043 raw_spin_unlock_rcu_node(rnp);
1051 static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
1056 needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
1058 rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */
1059 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq,
1133 * The caller must hold rnp->lock with interrupts disabled.
1135 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1141 raw_lockdep_assert_held_rcu_node(rnp);
1161 ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
1181 static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp,
1194 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
1195 needwake = rcu_accelerate_cbs(rnp, rdp);
1196 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1209 * The caller must hold rnp->lock with interrupts disabled.
1211 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1214 raw_lockdep_assert_held_rcu_node(rnp);
1224 rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
1227 return rcu_accelerate_cbs(rnp, rdp);
1234 static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp,
1238 if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || !raw_spin_trylock_rcu_node(rnp))
1241 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))
1242 WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp));
1243 raw_spin_unlock_rcu_node(rnp);
1265 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
1271 raw_lockdep_assert_held_rcu_node(rnp);
1273 if (rdp->gp_seq == rnp->gp_seq)
1277 if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
1280 ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */
1285 ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */
1287 rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask);
1291 if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) ||
1298 trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart"));
1299 need_qs = !!(rnp->qsmask & rdp->grpmask);
1304 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */
1305 if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap)
1306 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1310 rcu_gpnum_ovf(rnp, rdp);
1318 struct rcu_node *rnp;
1321 rnp = rdp->mynode;
1322 if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) &&
1324 !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
1328 needwake = __note_gp_changes(rnp, rdp);
1329 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1406 struct rcu_node *rnp = rcu_get_root();
1409 raw_lockdep_assert_held_rcu_node(rnp);
1422 struct rcu_node *rnp = rcu_get_root();
1425 raw_lockdep_assert_held_rcu_node(rnp);
1444 struct rcu_node *rnp = rcu_get_root();
1449 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1453 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1461 struct rcu_node *rnp = rcu_get_root();
1466 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1470 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1802 struct rcu_node *rnp = rcu_get_root();
1806 raw_spin_lock_irq_rcu_node(rnp);
1809 raw_spin_unlock_irq_rcu_node(rnp);
1819 raw_spin_unlock_irq_rcu_node(rnp);
1831 raw_spin_unlock_irq_rcu_node(rnp);
1857 rcu_for_each_leaf_node(rnp) {
1860 raw_spin_lock_rcu_node(rnp);
1861 if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1862 !rnp->wait_blkd_tasks) {
1864 raw_spin_unlock_rcu_node(rnp);
1871 oldmask = rnp->qsmaskinit;
1872 rnp->qsmaskinit = rnp->qsmaskinitnext;
1875 if (!oldmask != !rnp->qsmaskinit) {
1877 if (!rnp->wait_blkd_tasks) /* Ever offline? */
1878 rcu_init_new_rnp(rnp);
1879 } else if (rcu_preempt_has_tasks(rnp)) {
1880 rnp->wait_blkd_tasks = true; /* blocked tasks */
1882 rcu_cleanup_dead_rnp(rnp);
1894 if (rnp->wait_blkd_tasks &&
1895 (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) {
1896 rnp->wait_blkd_tasks = false;
1897 if (!rnp->qsmaskinit)
1898 rcu_cleanup_dead_rnp(rnp);
1901 raw_spin_unlock_rcu_node(rnp);
1920 rcu_for_each_node_breadth_first(rnp) {
1922 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1924 rcu_preempt_check_blocked_tasks(rnp);
1925 rnp->qsmask = rnp->qsmaskinit;
1926 WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq);
1927 if (rnp == rdp->mynode)
1928 (void)__note_gp_changes(rnp, rdp);
1929 rcu_preempt_boost_start_gp(rnp);
1930 trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq,
1931 rnp->level, rnp->grplo,
1932 rnp->grphi, rnp->qsmask);
1934 mask = rnp->qsmask & ~rnp->qsmaskinitnext;
1935 rnp->rcu_gp_init_mask = mask;
1936 if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
1937 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
1939 raw_spin_unlock_irq_rcu_node(rnp);
1957 struct rcu_node *rnp = rcu_get_root();
1969 if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
1981 struct rcu_node *rnp = rcu_get_root();
2005 raw_spin_lock_irq_rcu_node(rnp);
2007 raw_spin_unlock_irq_rcu_node(rnp);
2020 struct rcu_node *rnp = rcu_get_root();
2052 * has ended, leave the loop. The rcu_preempt_blocked_readers_cgp(rnp) check
2059 if (!READ_ONCE(rnp->qsmask) &&
2060 !rcu_preempt_blocked_readers_cgp(rnp))
2108 struct rcu_node *rnp = rcu_get_root();
2112 raw_spin_lock_irq_rcu_node(rnp);
2127 raw_spin_unlock_irq_rcu_node(rnp);
2140 rcu_for_each_node_breadth_first(rnp) {
2141 raw_spin_lock_irq_rcu_node(rnp);
2142 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
2143 dump_blkd_tasks(rnp, 10);
2144 WARN_ON_ONCE(rnp->qsmask);
2145 WRITE_ONCE(rnp->gp_seq, new_gp_seq);
2146 if (!rnp->parent)
2149 if (rnp == rdp->mynode)
2150 needgp = __note_gp_changes(rnp, rdp) || needgp;
2152 needgp = rcu_future_gp_cleanup(rnp) || needgp;
2154 if (rcu_is_leaf_node(rnp))
2155 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) {
2157 check_cb_ovld_locked(rdp, rnp);
2159 sq = rcu_nocb_gp_get(rnp);
2160 raw_spin_unlock_irq_rcu_node(rnp);
2166 rnp = rcu_get_root();
2167 raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */
2176 if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
2177 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
2183 if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) {
2208 raw_spin_unlock_irq_rcu_node(rnp);
2262 * period. Note that the caller must hold rnp->lock, which is released
2282 * are valid only if rnp->gp_seq is equal to gps. That structure's lock
2289 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
2291 __releases(rnp->lock)
2296 raw_lockdep_assert_held_rcu_node(rnp);
2300 if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) {
2306 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2310 WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
2311 rcu_preempt_blocked_readers_cgp(rnp));
2312 WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask);
2313 trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq,
2314 mask, rnp->qsmask, rnp->level,
2315 rnp->grplo, rnp->grphi,
2316 !!rnp->gp_tasks);
2317 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2320 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2323 rnp->completedqs = rnp->gp_seq;
2324 mask = rnp->grpmask;
2325 if (rnp->parent == NULL) {
2331 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2332 rnp_c = rnp;
2333 rnp = rnp->parent;
2334 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2343 rcu_report_qs_rsp(flags); /* releases rnp->lock. */
2349 * RCU grace period. The caller must hold the corresponding rnp->lock with
2354 rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
2355 __releases(rnp->lock)
2361 raw_lockdep_assert_held_rcu_node(rnp);
2363 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
2364 rnp->qsmask != 0) {
2365 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2369 rnp->completedqs = rnp->gp_seq;
2370 rnp_p = rnp->parent;
2381 gps = rnp->gp_seq;
2382 mask = rnp->grpmask;
2383 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
2397 struct rcu_node *rnp;
2400 rnp = rdp->mynode;
2401 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2402 if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
2412 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2417 if ((rnp->qsmask & mask) == 0) {
2418 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2432 WARN_ON_ONCE(rcu_accelerate_cbs(rnp, rdp));
2436 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2437 /* ^^^ Released rnp->lock */
2521 * completion (materialized by rnp->gp_seq update) thanks to the
2688 struct rcu_node *rnp;
2692 rcu_for_each_leaf_node(rnp) {
2697 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2698 rcu_state.cbovldnext |= !!rnp->cbovldmask;
2699 if (rnp->qsmask == 0) {
2700 if (rcu_preempt_blocked_readers_cgp(rnp)) {
2706 rcu_initiate_boost(rnp, flags);
2707 /* rcu_initiate_boost() releases rnp->lock */
2710 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2713 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) {
2727 /* Idle/offline CPUs, report (releases rnp->lock). */
2728 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2731 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2734 for_each_leaf_node_cpu_mask(rnp, cpu, rsmask)
2747 struct rcu_node *rnp;
2753 rnp = raw_cpu_read(rcu_data.mynode);
2754 for (; rnp != NULL; rnp = rnp->parent) {
2756 !raw_spin_trylock(&rnp->fqslock);
2761 rnp_old = rnp;
2763 /* rnp_old == rcu_get_root(), rnp == NULL. */
2791 struct rcu_node *rnp = rdp->mynode;
2814 rcu_accelerate_cbs_unlocked(rnp, rdp);
2818 rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
3024 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp)
3026 raw_lockdep_assert_held_rcu_node(rnp);
3030 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask);
3032 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask);
3049 struct rcu_node *const rnp = rdp->mynode;
3053 !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask)))
3055 raw_spin_lock_rcu_node(rnp);
3056 check_cb_ovld_locked(rdp, rnp);
3057 raw_spin_unlock_rcu_node(rnp);
4087 struct rcu_node *rnp;
4117 for (rnp = this_cpu_ptr(&rcu_data)->mynode; rnp; rnp = rnp->parent)
4118 rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq;
4174 struct rcu_node *rnp = rcu_get_root();
4181 rgosp->rgos_norm = rcu_seq_snap(&rnp->gp_seq);
4195 struct rcu_node *rnp;
4200 rnp = rdp->mynode;
4201 raw_spin_lock_rcu_node(rnp); // irqs already disabled.
4208 needwake = rcu_start_this_gp(rnp, rdp, rcu_seq_snap(&rcu_state.gp_seq));
4209 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4336 struct rcu_node *rnp = rcu_get_root();
4340 rcu_seq_done_exact(&rnp->gp_seq, rgosp->rgos_norm) ||
4413 struct rcu_node *rnp = rdp->mynode;
4449 if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq ||
4729 static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
4731 return READ_ONCE(rnp->qsmaskinitnext);
4818 struct rcu_node *rnp = rnp_leaf;
4826 mask = rnp->grpmask;
4827 rnp = rnp->parent;
4828 if (!rnp)
4830 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
4831 rnp->qsmaskinit &= ~mask;
4833 WARN_ON_ONCE(rnp->qsmask);
4834 if (rnp->qsmaskinit) {
4835 raw_spin_unlock_rcu_node(rnp);
4839 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
4853 struct rcu_node *rnp = rnp_leaf;
4856 WARN_ON_ONCE(rnp->wait_blkd_tasks);
4858 mask = rnp->grpmask;
4859 rnp = rnp->parent;
4860 if (rnp == NULL)
4862 raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
4863 oldmask = rnp->qsmaskinit;
4864 rnp->qsmaskinit |= mask;
4865 raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
4897 static void rcu_spawn_exp_par_gp_kworker(struct rcu_node *rnp)
4902 int rnp_index = rnp - rcu_get_root();
4904 if (rnp->exp_kworker)
4910 rnp->grplo, rnp->grphi);
4913 WRITE_ONCE(rnp->exp_kworker, kworker);
4919 static struct task_struct *rcu_exp_par_gp_task(struct rcu_node *rnp)
4921 struct kthread_worker *kworker = READ_ONCE(rnp->exp_kworker);
4945 static void rcu_spawn_rnp_kthreads(struct rcu_node *rnp)
4948 mutex_lock(&rnp->kthread_mutex);
4949 rcu_spawn_one_boost_kthread(rnp);
4950 rcu_spawn_exp_par_gp_kworker(rnp);
4951 mutex_unlock(&rnp->kthread_mutex);
4970 struct rcu_node *rnp = rcu_get_root();
4973 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4978 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
4992 rnp = rdp->mynode;
4993 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
4994 rdp->gp_seq = READ_ONCE(rnp->gp_seq);
5002 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
5003 rcu_spawn_rnp_kthreads(rnp);
5016 * held, so the value of rnp->qsmaskinit will be stable.
5029 struct rcu_node *rnp;
5033 rnp = rdp->mynode;
5035 task_boost = rcu_boost_task(rnp);
5036 task_exp = rcu_exp_par_gp_task(rnp);
5048 mutex_lock(&rnp->kthread_mutex);
5049 mask = rcu_rnp_online_cpus(rnp);
5050 for_each_leaf_node_possible_cpu(rnp, cpu)
5051 if ((mask & leaf_node_cpu_bit(rnp, cpu)) &&
5067 mutex_unlock(&rnp->kthread_mutex);
5090 struct rcu_node *rnp;
5093 rnp = rdp->mynode;
5094 raw_spin_lock_irqsave_rcu_node(rnp, flags);
5095 rnp->ffmask |= rdp->grpmask;
5096 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
5125 struct rcu_node *rnp;
5134 rnp = rdp->mynode;
5139 raw_spin_lock_rcu_node(rnp);
5140 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask);
5142 newcpu = !(rnp->expmaskinitnext & mask);
5143 rnp->expmaskinitnext |= mask;
5147 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
5152 if (WARN_ON_ONCE(rnp->qsmask & mask)) { /* RCU waiting on incoming CPU? */
5159 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
5161 raw_spin_unlock_rcu_node(rnp);
5183 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
5198 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
5201 if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
5204 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
5205 raw_spin_lock_irqsave_rcu_node(rnp, flags);
5207 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask);
5208 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
5293 struct rcu_node *rnp = rdp->mynode;
5295 blkd = !!(READ_ONCE(rnp->qsmask) & rdp->grpmask);
5296 trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
5309 struct rcu_node *rnp;
5312 rnp = rdp->mynode;
5313 raw_spin_lock_irqsave_rcu_node(rnp, flags);
5314 rnp->ffmask &= ~rdp->grpmask;
5315 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
5355 struct rcu_node *rnp;
5368 rnp = rcu_get_root();
5369 raw_spin_lock_irqsave_rcu_node(rnp, flags);
5374 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
5404 struct rcu_node *rnp;
5412 rcu_for_each_node_breadth_first(rnp)
5413 rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq;
5435 struct rcu_node *rnp;
5454 rnp = rcu_state.level[i];
5455 for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
5456 raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
5457 lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
5459 raw_spin_lock_init(&rnp->fqslock);
5460 lockdep_set_class_and_name(&rnp->fqslock,
5462 rnp->gp_seq = rcu_state.gp_seq;
5463 rnp->gp_seq_needed = rcu_state.gp_seq;
5464 rnp->completedqs = rcu_state.gp_seq;
5465 rnp->qsmask = 0;
5466 rnp->qsmaskinit = 0;
5467 rnp->grplo = j * cpustride;
5468 rnp->grphi = (j + 1) * cpustride - 1;
5469 if (rnp->grphi >= nr_cpu_ids)
5470 rnp->grphi = nr_cpu_ids - 1;
5472 rnp->grpnum = 0;
5473 rnp->grpmask = 0;
5474 rnp->parent = NULL;
5476 rnp->grpnum = j % levelspread[i - 1];
5477 rnp->grpmask = BIT(rnp->grpnum);
5478 rnp->parent = rcu_state.level[i - 1] +
5481 rnp->level = i;
5482 INIT_LIST_HEAD(&rnp->blkd_tasks);
5483 rcu_init_one_nocb(rnp);
5484 init_waitqueue_head(&rnp->exp_wq[0]);
5485 init_waitqueue_head(&rnp->exp_wq[1]);
5486 init_waitqueue_head(&rnp->exp_wq[2]);
5487 init_waitqueue_head(&rnp->exp_wq[3]);
5488 spin_lock_init(&rnp->exp_lock);
5489 mutex_init(&rnp->kthread_mutex);
5490 raw_spin_lock_init(&rnp->exp_poll_lock);
5491 rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED;
5492 INIT_WORK(&rnp->exp_poll_wq, sync_rcu_do_polled_gp);
5498 rnp = rcu_first_leaf_node();
5500 while (i > rnp->grphi)
5501 rnp++;
5502 per_cpu_ptr(&rcu_data, i)->mynode = rnp;
5632 struct rcu_node *rnp;
5636 rcu_for_each_node_breadth_first(rnp) {
5637 if (rnp->level != level) {
5640 level = rnp->level;
5642 pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum);