Lines Matching refs:rcu_state

83 static struct rcu_state rcu_state = {
84 .level = { &rcu_state.node[0] },
87 .barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex),
88 .barrier_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.barrier_lock),
91 .exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex),
92 .exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex),
94 .srs_cleanup_work = __WORK_INITIALIZER(rcu_state.srs_cleanup_work,
98 .nocb_mutex = __MUTEX_INITIALIZER(rcu_state.nocb_mutex),
213 return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq));
507 return READ_ONCE(rcu_state.gp_seq);
519 return rcu_state.expedited_sequence;
524 * Return the root node of the rcu_state structure.
528 return &rcu_state.node[0];
536 *flags = READ_ONCE(rcu_state.gp_flags);
537 *gp_seq = rcu_seq_current(&rcu_state.gp_seq);
777 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
809 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
861 (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
862 time_after(jiffies, rcu_state.jiffies_resched) ||
863 rcu_state.cbovld)) {
867 } else if (time_after(jiffies, rcu_state.gp_start + jtsq)) {
881 rcu_state.cbovld)) {
894 if (time_after(jiffies, rcu_state.jiffies_resched)) {
934 trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
1007 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT);
1008 WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
1009 if (!READ_ONCE(rcu_state.gp_kthread)) {
1013 trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq"));
1092 struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
1095 !READ_ONCE(rcu_state.gp_flags) || !t)
1097 WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
1098 WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
1099 swake_up_one_online(&rcu_state.gp_wq);
1138 gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq);
1144 trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccWaitCB"));
1146 trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccReadyCB"));
1167 c = rcu_seq_snap(&rcu_state.gp_seq);
1261 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend"));
1277 trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart"));
1344 !(rcu_seq_ctr(rcu_state.gp_seq) % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
1391 if (!rcu_seq_state(rcu_state.gp_seq_polled))
1392 rcu_seq_start(&rcu_state.gp_seq_polled);
1395 *snap = rcu_state.gp_seq_polled;
1409 if (*snap && *snap == rcu_state.gp_seq_polled) {
1410 rcu_seq_end(&rcu_state.gp_seq_polled);
1411 rcu_state.gp_seq_polled_snap = 0;
1412 rcu_state.gp_seq_polled_exp_snap = 0;
1580 return &(rcu_state.srs_wait_nodes)[0].node <= node &&
1581 node <= &(rcu_state.srs_wait_nodes)[SR_NORMAL_GP_WAIT_HEAD_MAX - 1].node;
1590 sr_wn = &(rcu_state.srs_wait_nodes)[i];
1641 done = smp_load_acquire(&rcu_state.srs_done_tail);
1668 atomic_dec_return_release(&rcu_state.srs_cleanups_pending);
1679 wait_tail = rcu_state.srs_wait_tail;
1683 rcu_state.srs_wait_tail = NULL;
1684 ASSERT_EXCLUSIVE_WRITER(rcu_state.srs_wait_tail);
1711 !atomic_read_acquire(&rcu_state.srs_cleanups_pending)) {
1717 ASSERT_EXCLUSIVE_WRITER(rcu_state.srs_done_tail);
1718 smp_store_release(&rcu_state.srs_done_tail, wait_tail);
1726 atomic_inc(&rcu_state.srs_cleanups_pending);
1727 if (!queue_work(sync_wq, &rcu_state.srs_cleanup_work))
1728 atomic_dec(&rcu_state.srs_cleanups_pending);
1741 first = READ_ONCE(rcu_state.srs_next.first);
1753 llist_add(wait_head, &rcu_state.srs_next);
1760 WARN_ON_ONCE(rcu_state.srs_wait_tail != NULL);
1761 rcu_state.srs_wait_tail = wait_head;
1762 ASSERT_EXCLUSIVE_WRITER(rcu_state.srs_wait_tail);
1769 llist_add((struct llist_node *) &rs->head, &rcu_state.srs_next);
1784 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1786 if (!rcu_state.gp_flags) {
1791 WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */
1805 rcu_seq_start(&rcu_state.gp_seq);
1806 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
1808 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start"));
1809 rcu_poll_gp_seq_start(&rcu_state.gp_seq_polled_snap);
1834 WRITE_ONCE(rcu_state.gp_state, RCU_GP_ONOFF);
1838 arch_spin_lock(&rcu_state.ofl_lock);
1844 arch_spin_unlock(&rcu_state.ofl_lock);
1881 arch_spin_unlock(&rcu_state.ofl_lock);
1890 * layout of the tree within the rcu_state.node[] array. Note that
1898 WRITE_ONCE(rcu_state.gp_state, RCU_GP_INIT);
1905 WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq);
1909 trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq,
1920 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1943 *gfp = READ_ONCE(rcu_state.gp_flags);
1959 int nr_fqs = READ_ONCE(rcu_state.nr_fqs_jiffies_stall);
1962 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1963 WRITE_ONCE(rcu_state.n_force_qs, rcu_state.n_force_qs + 1);
1969 WRITE_ONCE(rcu_state.jiffies_stall,
1972 WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, --nr_fqs);
1983 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
1985 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags & ~RCU_GP_FLAG_FQS);
2002 if (rcu_state.cbovld)
2006 if (rcu_state.cbovld) {
2011 if (!ret || time_before(jiffies + j, rcu_state.jiffies_force_qs)) {
2012 WRITE_ONCE(rcu_state.jiffies_force_qs, jiffies + j);
2018 WRITE_ONCE(rcu_state.jiffies_kick_kthreads,
2021 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2023 WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_FQS);
2024 (void)swait_event_idle_timeout_exclusive(rcu_state.gp_wq,
2027 WRITE_ONCE(rcu_state.gp_state, RCU_GP_DOING_FQS);
2042 if (!time_after(rcu_state.jiffies_force_qs, jiffies) ||
2044 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2050 gf = rcu_state.cbovld ? RCU_GP_FLAG_OVLD : 0;
2052 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2055 WRITE_ONCE(rcu_state.gp_activity, jiffies);
2061 WRITE_ONCE(rcu_state.gp_activity, jiffies);
2063 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2067 if (time_after(jiffies, rcu_state.jiffies_force_qs))
2070 j = rcu_state.jiffies_force_qs - j;
2090 WRITE_ONCE(rcu_state.gp_activity, jiffies);
2092 rcu_state.gp_end = jiffies;
2093 gp_duration = rcu_state.gp_end - rcu_state.gp_start;
2094 if (gp_duration > rcu_state.gp_max)
2095 rcu_state.gp_max = gp_duration;
2105 rcu_poll_gp_seq_end(&rcu_state.gp_seq_polled_snap);
2117 new_gp_seq = rcu_state.gp_seq;
2142 WRITE_ONCE(rcu_state.gp_activity, jiffies);
2149 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end"));
2150 rcu_seq_end(&rcu_state.gp_seq);
2151 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
2152 WRITE_ONCE(rcu_state.gp_state, RCU_GP_IDLE);
2174 WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT);
2175 WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
2176 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("newreq"));
2185 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags & RCU_GP_FLAG_INIT);
2207 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2209 WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_GPS);
2210 swait_event_idle_exclusive(rcu_state.gp_wq,
2211 READ_ONCE(rcu_state.gp_flags) &
2214 WRITE_ONCE(rcu_state.gp_state, RCU_GP_DONE_GPS);
2219 WRITE_ONCE(rcu_state.gp_activity, jiffies);
2221 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2229 WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANUP);
2231 WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANED);
2236 * Report a full set of quiescent states to the rcu_state data structure.
2249 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_FQS);
2292 trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq,
2485 trace_rcu_batch_start(rcu_state.name,
2487 trace_rcu_batch_end(rcu_state.name, 0,
2521 trace_rcu_batch_start(rcu_state.name,
2541 trace_rcu_invoke_callback(rcu_state.name, rhp);
2582 trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(),
2597 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2669 rcu_state.cbovld = rcu_state.cbovldnext;
2670 rcu_state.cbovldnext = false;
2677 rcu_state.cbovldnext |= !!rnp->cbovldmask;
2734 ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) ||
2747 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
2751 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_FQS);
2935 trace_rcu_kvfree_callback(rcu_state.name, head,
2939 trace_rcu_callback(rcu_state.name, head,
2981 if (READ_ONCE(rcu_state.n_force_qs) == rdp->n_force_qs_snap &&
2984 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
3202 trace_rcu_sr_normal(rcu_state.name, &rs.head, TPS("request"));
3229 trace_rcu_sr_normal(rcu_state.name, &rs.head, TPS("complete"));
3293 rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_snap);
3294 rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_snap);
3302 rcu_state.gp_seq += (1 << RCU_SEQ_CTR_SHIFT);
3304 rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq;
3338 return rcu_seq_snap(&rcu_state.gp_seq_polled);
3368 rgosp->rgos_exp = rcu_seq_snap(&rcu_state.expedited_sequence);
3393 needwake = rcu_start_this_gp(rnp, rdp, rcu_seq_snap(&rcu_state.gp_seq));
3472 rcu_seq_done_exact(&rcu_state.gp_seq_polled, oldstate)) {
3505 * ->gp_seq field be checked instead of that of the rcu_state structure.
3508 * field is updated and the time that the rcu_state structure's ->gp_seq
3521 rcu_seq_done_exact(&rcu_state.expedited_sequence, rgosp->rgos_exp)) {
3607 time_before(jiffies, READ_ONCE(rcu_state.gp_start) +
3642 trace_rcu_barrier(rcu_state.name, s, cpu,
3643 atomic_read(&rcu_state.barrier_cpu_count), done);
3650 * Note that the value of rcu_state.barrier_sequence must be captured
3658 unsigned long __maybe_unused s = rcu_state.barrier_sequence;
3661 if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) {
3663 complete(&rcu_state.barrier_completion);
3674 unsigned long gseq = READ_ONCE(rcu_state.barrier_sequence);
3679 lockdep_assert_held(&rcu_state.barrier_lock);
3682 rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
3695 atomic_inc(&rcu_state.barrier_cpu_count);
3698 rcu_barrier_trace(TPS("IRQNQ"), -1, rcu_state.barrier_sequence);
3717 raw_spin_lock(&rcu_state.barrier_lock);
3719 raw_spin_unlock(&rcu_state.barrier_lock);
3736 unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
3741 mutex_lock(&rcu_state.barrier_mutex);
3744 if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
3745 rcu_barrier_trace(TPS("EarlyExit"), -1, rcu_state.barrier_sequence);
3747 mutex_unlock(&rcu_state.barrier_mutex);
3752 raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
3753 rcu_seq_start(&rcu_state.barrier_sequence);
3754 gseq = rcu_state.barrier_sequence;
3755 rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);
3764 init_completion(&rcu_state.barrier_completion);
3765 atomic_set(&rcu_state.barrier_cpu_count, 2);
3766 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
3778 raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
3781 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
3782 rcu_barrier_trace(TPS("NQ"), cpu, rcu_state.barrier_sequence);
3788 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
3789 rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu, rcu_state.barrier_sequence);
3792 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
3798 rcu_barrier_trace(TPS("OnlineQ"), cpu, rcu_state.barrier_sequence);
3805 if (atomic_sub_and_test(2, &rcu_state.barrier_cpu_count))
3806 complete(&rcu_state.barrier_completion);
3809 wait_for_completion(&rcu_state.barrier_completion);
3812 rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence);
3813 rcu_seq_end(&rcu_state.barrier_sequence);
3814 gseq = rcu_state.barrier_sequence;
3822 mutex_unlock(&rcu_state.barrier_mutex);
3851 unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
3856 if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
3961 if (rcu_rdp_cpu_online(rdp) || arch_spin_is_locked(&rcu_state.ofl_lock))
3974 return !!READ_ONCE(rcu_state.n_online_cpus);
4064 rdp->barrier_seq_snap = rcu_state.barrier_sequence;
4065 rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
4067 rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
4163 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
4189 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
4193 ASSERT_EXCLUSIVE_WRITER(rcu_state.n_online_cpus);
4194 WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus + 1);
4262 arch_spin_lock(&rcu_state.ofl_lock);
4264 raw_spin_lock(&rcu_state.barrier_lock);
4267 raw_spin_unlock(&rcu_state.barrier_lock);
4271 smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + newcpu); /* ^^^ */
4272 ASSERT_EXCLUSIVE_WRITER(rcu_state.ncpus);
4274 rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4275 rdp->rcu_onl_gp_state = READ_ONCE(rcu_state.gp_state);
4289 arch_spin_unlock(&rcu_state.ofl_lock);
4323 arch_spin_lock(&rcu_state.ofl_lock);
4325 rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4326 rdp->rcu_ofl_gp_state = READ_ONCE(rcu_state.gp_state);
4335 arch_spin_unlock(&rcu_state.ofl_lock);
4356 raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
4358 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
4373 raw_spin_unlock(&rcu_state.barrier_lock); /* irqs remain disabled. */
4404 ASSERT_EXCLUSIVE_WRITER(rcu_state.n_online_cpus);
4405 WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1);
4422 trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
4485 t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
4494 WRITE_ONCE(rcu_state.gp_activity, jiffies);
4495 WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
4497 smp_store_release(&rcu_state.gp_kthread, t); /* ^^^ */
4537 rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq;
4546 * Helper function for rcu_init() that initializes the rcu_state structure.
4570 rcu_state.level[i] =
4571 rcu_state.level[i - 1] + num_rcu_lvl[i - 1];
4578 rnp = rcu_state.level[i];
4586 rnp->gp_seq = rcu_state.gp_seq;
4587 rnp->gp_seq_needed = rcu_state.gp_seq;
4588 rnp->completedqs = rcu_state.gp_seq;
4602 rnp->parent = rcu_state.level[i - 1] +
4620 init_swait_queue_head(&rcu_state.gp_wq);
4621 init_swait_queue_head(&rcu_state.expedited_wq);
4658 * the ->node array in the rcu_state structure.
4750 * with the rcu_state structure.