Lines Matching +full:acquisition +full:- +full:time +full:- +full:ns

1 // SPDX-License-Identifier: GPL-2.0+
3 * Sleepable Read-Copy Update mechanism for mutual exclusion.
11 * For detailed explanation of Read-Copy Update mechanism see -
33 /* Holdoff in nanoseconds for auto-expediting. */
38 /* Overflow-check frequency. N bits roughly says every 2**N grace periods. */
45 * 1: Convert at init_srcu_struct() time.
47 * 3: Decide at boot time based on system shape (default).
63 /* Number of CPUs to trigger init_srcu_struct()-time transition to big. */
71 /* Early-boot callback-management, so early that no lock is required! */
80 /* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
117 * Initialize SRCU per-CPU data. Note that statically allocated
120 * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
128 * Initialize the per-CPU srcu_data array, which feeds into the in init_srcu_struct_data()
131 BUILD_BUG_ON(ARRAY_SIZE(sdp->srcu_lock_count) != in init_srcu_struct_data()
132 ARRAY_SIZE(sdp->srcu_unlock_count)); in init_srcu_struct_data()
134 sdp = per_cpu_ptr(ssp->sda, cpu); in init_srcu_struct_data()
136 rcu_segcblist_init(&sdp->srcu_cblist); in init_srcu_struct_data()
137 sdp->srcu_cblist_invoking = false; in init_srcu_struct_data()
138 sdp->srcu_gp_seq_needed = ssp->srcu_sup->srcu_gp_seq; in init_srcu_struct_data()
139 sdp->srcu_gp_seq_needed_exp = ssp->srcu_sup->srcu_gp_seq; in init_srcu_struct_data()
140 sdp->srcu_barrier_head.next = &sdp->srcu_barrier_head; in init_srcu_struct_data()
141 sdp->mynode = NULL; in init_srcu_struct_data()
142 sdp->cpu = cpu; in init_srcu_struct_data()
143 INIT_WORK(&sdp->work, srcu_invoke_callbacks); in init_srcu_struct_data()
144 timer_setup(&sdp->delay_work, srcu_delay_timer, 0); in init_srcu_struct_data()
145 sdp->ssp = ssp; in init_srcu_struct_data()
177 ssp->srcu_sup->node = kcalloc(rcu_num_nodes, sizeof(*ssp->srcu_sup->node), gfp_flags); in init_srcu_struct_nodes()
178 if (!ssp->srcu_sup->node) in init_srcu_struct_nodes()
182 ssp->srcu_sup->level[0] = &ssp->srcu_sup->node[0]; in init_srcu_struct_nodes()
184 ssp->srcu_sup->level[i] = ssp->srcu_sup->level[i - 1] + num_rcu_lvl[i - 1]; in init_srcu_struct_nodes()
190 BUILD_BUG_ON(ARRAY_SIZE(snp->srcu_have_cbs) != in init_srcu_struct_nodes()
191 ARRAY_SIZE(snp->srcu_data_have_cbs)); in init_srcu_struct_nodes()
192 for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) { in init_srcu_struct_nodes()
193 snp->srcu_have_cbs[i] = SRCU_SNP_INIT_SEQ; in init_srcu_struct_nodes()
194 snp->srcu_data_have_cbs[i] = 0; in init_srcu_struct_nodes()
196 snp->srcu_gp_seq_needed_exp = SRCU_SNP_INIT_SEQ; in init_srcu_struct_nodes()
197 snp->grplo = -1; in init_srcu_struct_nodes()
198 snp->grphi = -1; in init_srcu_struct_nodes()
199 if (snp == &ssp->srcu_sup->node[0]) { in init_srcu_struct_nodes()
201 snp->srcu_parent = NULL; in init_srcu_struct_nodes()
205 /* Non-root node. */ in init_srcu_struct_nodes()
206 if (snp == ssp->srcu_sup->level[level + 1]) in init_srcu_struct_nodes()
208 snp->srcu_parent = ssp->srcu_sup->level[level - 1] + in init_srcu_struct_nodes()
209 (snp - ssp->srcu_sup->level[level]) / in init_srcu_struct_nodes()
210 levelspread[level - 1]; in init_srcu_struct_nodes()
214 * Initialize the per-CPU srcu_data array, which feeds into the in init_srcu_struct_nodes()
217 level = rcu_num_lvls - 1; in init_srcu_struct_nodes()
218 snp_first = ssp->srcu_sup->level[level]; in init_srcu_struct_nodes()
220 sdp = per_cpu_ptr(ssp->sda, cpu); in init_srcu_struct_nodes()
221 sdp->mynode = &snp_first[cpu / levelspread[level]]; in init_srcu_struct_nodes()
222 for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) { in init_srcu_struct_nodes()
223 if (snp->grplo < 0) in init_srcu_struct_nodes()
224 snp->grplo = cpu; in init_srcu_struct_nodes()
225 snp->grphi = cpu; in init_srcu_struct_nodes()
227 sdp->grpmask = 1UL << (cpu - sdp->mynode->grplo); in init_srcu_struct_nodes()
229 smp_store_release(&ssp->srcu_sup->srcu_size_state, SRCU_SIZE_WAIT_BARRIER); in init_srcu_struct_nodes()
234 * Initialize non-compile-time initialized fields, including the
236 * tells us that ->sda has already been wired up to srcu_data.
241 ssp->srcu_sup = kzalloc(sizeof(*ssp->srcu_sup), GFP_KERNEL); in init_srcu_struct_fields()
242 if (!ssp->srcu_sup) in init_srcu_struct_fields()
243 return -ENOMEM; in init_srcu_struct_fields()
245 spin_lock_init(&ACCESS_PRIVATE(ssp->srcu_sup, lock)); in init_srcu_struct_fields()
246 ssp->srcu_sup->srcu_size_state = SRCU_SIZE_SMALL; in init_srcu_struct_fields()
247 ssp->srcu_sup->node = NULL; in init_srcu_struct_fields()
248 mutex_init(&ssp->srcu_sup->srcu_cb_mutex); in init_srcu_struct_fields()
249 mutex_init(&ssp->srcu_sup->srcu_gp_mutex); in init_srcu_struct_fields()
250 ssp->srcu_idx = 0; in init_srcu_struct_fields()
251 ssp->srcu_sup->srcu_gp_seq = SRCU_GP_SEQ_INITIAL_VAL; in init_srcu_struct_fields()
252 ssp->srcu_sup->srcu_barrier_seq = 0; in init_srcu_struct_fields()
253 mutex_init(&ssp->srcu_sup->srcu_barrier_mutex); in init_srcu_struct_fields()
254 atomic_set(&ssp->srcu_sup->srcu_barrier_cpu_cnt, 0); in init_srcu_struct_fields()
255 INIT_DELAYED_WORK(&ssp->srcu_sup->work, process_srcu); in init_srcu_struct_fields()
256 ssp->srcu_sup->sda_is_static = is_static; in init_srcu_struct_fields()
258 ssp->sda = alloc_percpu(struct srcu_data); in init_srcu_struct_fields()
259 if (!ssp->sda) in init_srcu_struct_fields()
262 ssp->srcu_sup->srcu_gp_seq_needed_exp = SRCU_GP_SEQ_INITIAL_VAL; in init_srcu_struct_fields()
263 ssp->srcu_sup->srcu_last_gp_end = ktime_get_mono_fast_ns(); in init_srcu_struct_fields()
264 if (READ_ONCE(ssp->srcu_sup->srcu_size_state) == SRCU_SIZE_SMALL && SRCU_SIZING_IS_INIT()) { in init_srcu_struct_fields()
267 WRITE_ONCE(ssp->srcu_sup->srcu_size_state, SRCU_SIZE_BIG); in init_srcu_struct_fields()
269 ssp->srcu_sup->srcu_ssp = ssp; in init_srcu_struct_fields()
270 smp_store_release(&ssp->srcu_sup->srcu_gp_seq_needed, in init_srcu_struct_fields()
276 free_percpu(ssp->sda); in init_srcu_struct_fields()
277 ssp->sda = NULL; in init_srcu_struct_fields()
281 kfree(ssp->srcu_sup); in init_srcu_struct_fields()
282 ssp->srcu_sup = NULL; in init_srcu_struct_fields()
284 return -ENOMEM; in init_srcu_struct_fields()
292 /* Don't re-initialize a lock while it is held. */ in __init_srcu_struct()
294 lockdep_init_map(&ssp->dep_map, name, key, 0); in __init_srcu_struct()
302 * init_srcu_struct - initialize a sleep-RCU structure
322 lockdep_assert_held(&ACCESS_PRIVATE(ssp->srcu_sup, lock)); in __srcu_transition_to_big()
323 smp_store_release(&ssp->srcu_sup->srcu_size_state, SRCU_SIZE_ALLOC); in __srcu_transition_to_big()
333 /* Double-checked locking on ->srcu_size-state. */ in srcu_transition_to_big()
334 if (smp_load_acquire(&ssp->srcu_sup->srcu_size_state) != SRCU_SIZE_SMALL) in srcu_transition_to_big()
336 spin_lock_irqsave_rcu_node(ssp->srcu_sup, flags); in srcu_transition_to_big()
337 if (smp_load_acquire(&ssp->srcu_sup->srcu_size_state) != SRCU_SIZE_SMALL) { in srcu_transition_to_big()
338 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags); in srcu_transition_to_big()
342 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags); in srcu_transition_to_big()
346 * Check to see if the just-encountered contention event justifies
353 if (!SRCU_SIZING_IS_CONTEND() || ssp->srcu_sup->srcu_size_state) in spin_lock_irqsave_check_contention()
356 if (ssp->srcu_sup->srcu_size_jiffies != j) { in spin_lock_irqsave_check_contention()
357 ssp->srcu_sup->srcu_size_jiffies = j; in spin_lock_irqsave_check_contention()
358 ssp->srcu_sup->srcu_n_lock_retries = 0; in spin_lock_irqsave_check_contention()
360 if (++ssp->srcu_sup->srcu_n_lock_retries <= small_contention_lim) in spin_lock_irqsave_check_contention()
366 * Acquire the specified srcu_data structure's ->lock, but check for
373 struct srcu_struct *ssp = sdp->ssp; in spin_lock_irqsave_sdp_contention()
377 spin_lock_irqsave_rcu_node(ssp->srcu_sup, *flags); in spin_lock_irqsave_sdp_contention()
379 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, *flags); in spin_lock_irqsave_sdp_contention()
384 * Acquire the specified srcu_struct structure's ->lock, but check for
391 if (spin_trylock_irqsave_rcu_node(ssp->srcu_sup, *flags)) in spin_lock_irqsave_ssp_contention()
393 spin_lock_irqsave_rcu_node(ssp->srcu_sup, *flags); in spin_lock_irqsave_ssp_contention()
398 * First-use initialization of statically allocated srcu_struct
400 * done with compile-time initialization, so this check is added
401 * to each update-side SRCU primitive. Use ssp->lock, which -is-
402 * compile-time initialized, to resolve races involving multiple
403 * CPUs trying to garner first-use privileges.
410 if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_sup->srcu_gp_seq_needed))) /*^^^*/ in check_init_srcu_struct()
412 spin_lock_irqsave_rcu_node(ssp->srcu_sup, flags); in check_init_srcu_struct()
413 if (!rcu_seq_state(ssp->srcu_sup->srcu_gp_seq_needed)) { in check_init_srcu_struct()
414 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags); in check_init_srcu_struct()
418 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags); in check_init_srcu_struct()
426 struct srcu_usage *sup = ssp->srcu_sup; in srcu_gp_is_expedited()
428 return ULONG_CMP_LT(READ_ONCE(sup->srcu_gp_seq), READ_ONCE(sup->srcu_gp_seq_needed_exp)); in srcu_gp_is_expedited()
432 * Computes approximate total of the readers' ->srcu_lock_count[] values
433 * for the rank of per-CPU counters specified by idx, and returns true if
444 struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu); in srcu_readers_lock_idx()
446 sum += atomic_long_read(&sdp->srcu_lock_count[idx]); in srcu_readers_lock_idx()
448 mask = mask | READ_ONCE(sdp->srcu_reader_flavor); in srcu_readers_lock_idx()
450 WARN_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && (mask & (mask - 1)), in srcu_readers_lock_idx()
458 * Returns approximate total of the readers' ->srcu_unlock_count[] values
459 * for the rank of per-CPU counters specified by idx.
468 struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu); in srcu_readers_unlock_idx()
470 sum += atomic_long_read(&sdp->srcu_unlock_count[idx]); in srcu_readers_unlock_idx()
471 mask = mask | READ_ONCE(sdp->srcu_reader_flavor); in srcu_readers_unlock_idx()
473 WARN_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && (mask & (mask - 1)), in srcu_readers_unlock_idx()
480 * Return true if the number of pre-existing readers is determined to
512 * the current ->srcu_idx but not yet have incremented its CPU's in srcu_readers_active_idx_check()
513 * ->srcu_lock_count[idx] counter. In fact, it is possible in srcu_readers_active_idx_check()
515 * ->srcu_idx and incrementing ->srcu_lock_count[idx]. And there in srcu_readers_active_idx_check()
521 * code for a long time. That now-preempted updater has already in srcu_readers_active_idx_check()
522 * flipped ->srcu_idx (possibly during the preceding grace period), in srcu_readers_active_idx_check()
524 * period), and summed up the ->srcu_unlock_count[idx] counters. in srcu_readers_active_idx_check()
526 * increment the old ->srcu_idx value's ->srcu_lock_count[idx] in srcu_readers_active_idx_check()
530 * the old value of ->srcu_idx and is just about to use that value in srcu_readers_active_idx_check()
531 * to index its increment of ->srcu_lock_count[idx]. But as soon as in srcu_readers_active_idx_check()
532 * it leaves that SRCU read-side critical section, it will increment in srcu_readers_active_idx_check()
533 * ->srcu_unlock_count[idx], which must follow the updater's above in srcu_readers_active_idx_check()
535 * an smp_mb() and a later fetch from ->srcu_idx, that task will be in srcu_readers_active_idx_check()
537 * ->srcu_unlock_count[idx] in __srcu_read_unlock() is after the in srcu_readers_active_idx_check()
538 * smp_mb(), and the fetch from ->srcu_idx in __srcu_read_lock() in srcu_readers_active_idx_check()
540 * value of ->srcu_idx until the -second- __srcu_read_lock(), in srcu_readers_active_idx_check()
542 * ->srcu_lock_count[idx] for the old value of ->srcu_idx twice, in srcu_readers_active_idx_check()
550 * ->srcu_lock_count[idx] for the old index, where Nc is the number in srcu_readers_active_idx_check()
560 * comfortably beyond excessive. Especially on 64-bit systems, in srcu_readers_active_idx_check()
568 * srcu_readers_active - returns true if there are readers. and false
574 * can be useful as an error check at cleanup time.
582 struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu); in srcu_readers_active()
584 sum += atomic_long_read(&sdp->srcu_lock_count[0]); in srcu_readers_active()
585 sum += atomic_long_read(&sdp->srcu_lock_count[1]); in srcu_readers_active()
586 sum -= atomic_long_read(&sdp->srcu_unlock_count[0]); in srcu_readers_active()
587 sum -= atomic_long_read(&sdp->srcu_unlock_count[1]); in srcu_readers_active()
594 * synchronize_srcu_expedited(). We spin for a fixed time period
595 * (defined below, boot time configurable) to allow SRCU readers to exit
596 * their read-side critical sections. If there are still some readers
597 * after one jiffy, we repeatedly block for one jiffy time periods.
598 * The blocking time is increased as the grace-period age increases,
599 * with max blocking time capped at 10 jiffies.
609 #define SRCU_DEFAULT_MAX_NODELAY_PHASE_LO 3UL // Lowmark on default per-GP-phase
610 // no-delay instances.
611 #define SRCU_DEFAULT_MAX_NODELAY_PHASE_HI 1000UL // Highmark on default per-GP-phase
612 // no-delay instances.
617 // per-GP-phase no-delay instances adjusted to allow non-sleeping poll upto
618 // one jiffies time duration. Mult by 2 is done to factor in the srcu_get_delay()
623 // Maximum per-GP-phase consecutive no-delay instances.
632 // Maximum consecutive no-delay instances.
640 * Return grace-period delay, zero if there are expedited grace
648 struct srcu_usage *sup = ssp->srcu_sup; in srcu_get_delay()
652 if (rcu_seq_state(READ_ONCE(sup->srcu_gp_seq))) { in srcu_get_delay()
653 j = jiffies - 1; in srcu_get_delay()
654 gpstart = READ_ONCE(sup->srcu_gp_start); in srcu_get_delay()
656 jbase += j - gpstart; in srcu_get_delay()
658 ASSERT_EXCLUSIVE_WRITER(sup->srcu_n_exp_nodelay); in srcu_get_delay()
659 WRITE_ONCE(sup->srcu_n_exp_nodelay, READ_ONCE(sup->srcu_n_exp_nodelay) + 1); in srcu_get_delay()
660 if (READ_ONCE(sup->srcu_n_exp_nodelay) > srcu_max_nodelay_phase) in srcu_get_delay()
668 * cleanup_srcu_struct - deconstruct a sleep-RCU structure
677 struct srcu_usage *sup = ssp->srcu_sup; in cleanup_srcu_struct()
683 flush_delayed_work(&sup->work); in cleanup_srcu_struct()
685 struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu); in cleanup_srcu_struct()
687 del_timer_sync(&sdp->delay_work); in cleanup_srcu_struct()
688 flush_work(&sdp->work); in cleanup_srcu_struct()
689 if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist))) in cleanup_srcu_struct()
692 if (WARN_ON(rcu_seq_state(READ_ONCE(sup->srcu_gp_seq)) != SRCU_STATE_IDLE) || in cleanup_srcu_struct()
693 WARN_ON(rcu_seq_current(&sup->srcu_gp_seq) != sup->srcu_gp_seq_needed) || in cleanup_srcu_struct()
696 __func__, ssp, rcu_seq_state(READ_ONCE(sup->srcu_gp_seq)), in cleanup_srcu_struct()
697 rcu_seq_current(&sup->srcu_gp_seq), sup->srcu_gp_seq_needed); in cleanup_srcu_struct()
703 kfree(sup->node); in cleanup_srcu_struct()
704 sup->node = NULL; in cleanup_srcu_struct()
705 sup->srcu_size_state = SRCU_SIZE_SMALL; in cleanup_srcu_struct()
706 if (!sup->sda_is_static) { in cleanup_srcu_struct()
707 free_percpu(ssp->sda); in cleanup_srcu_struct()
708 ssp->sda = NULL; in cleanup_srcu_struct()
710 ssp->srcu_sup = NULL; in cleanup_srcu_struct()
723 /* NMI-unsafe use in NMI is a bad sign, as is multi-bit read_flavor values. */ in __srcu_check_read_flavor()
725 WARN_ON_ONCE(read_flavor & (read_flavor - 1)); in __srcu_check_read_flavor()
727 sdp = raw_cpu_ptr(ssp->sda); in __srcu_check_read_flavor()
728 old_read_flavor = READ_ONCE(sdp->srcu_reader_flavor); in __srcu_check_read_flavor()
730 old_read_flavor = cmpxchg(&sdp->srcu_reader_flavor, 0, read_flavor); in __srcu_check_read_flavor()
734 …WARN_ONCE(old_read_flavor != read_flavor, "CPU %d old state %d new state %d\n", sdp->cpu, old_read… in __srcu_check_read_flavor()
739 * Counts the new reader in the appropriate per-CPU element of the
747 idx = READ_ONCE(ssp->srcu_idx) & 0x1; in __srcu_read_lock()
748 this_cpu_inc(ssp->sda->srcu_lock_count[idx].counter); in __srcu_read_lock()
755 * Removes the count for the old reader from the appropriate per-CPU
762 this_cpu_inc(ssp->sda->srcu_unlock_count[idx].counter); in __srcu_read_unlock()
769 * Counts the new reader in the appropriate per-CPU element of the
770 * srcu_struct, but in an NMI-safe manner using RMW atomics.
776 struct srcu_data *sdp = raw_cpu_ptr(ssp->sda); in __srcu_read_lock_nmisafe()
778 idx = READ_ONCE(ssp->srcu_idx) & 0x1; in __srcu_read_lock_nmisafe()
779 atomic_long_inc(&sdp->srcu_lock_count[idx]); in __srcu_read_lock_nmisafe()
786 * Removes the count for the old reader from the appropriate per-CPU
792 struct srcu_data *sdp = raw_cpu_ptr(ssp->sda); in __srcu_read_unlock_nmisafe()
795 atomic_long_inc(&sdp->srcu_unlock_count[idx]); in __srcu_read_unlock_nmisafe()
808 lockdep_assert_held(&ACCESS_PRIVATE(ssp->srcu_sup, lock)); in srcu_gp_start()
809 WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_sup->srcu_gp_seq, ssp->srcu_sup->srcu_gp_seq_needed)); in srcu_gp_start()
810 WRITE_ONCE(ssp->srcu_sup->srcu_gp_start, jiffies); in srcu_gp_start()
811 WRITE_ONCE(ssp->srcu_sup->srcu_n_exp_nodelay, 0); in srcu_gp_start()
812 smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */ in srcu_gp_start()
813 rcu_seq_start(&ssp->srcu_sup->srcu_gp_seq); in srcu_gp_start()
814 state = rcu_seq_state(ssp->srcu_sup->srcu_gp_seq); in srcu_gp_start()
823 queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work); in srcu_delay_timer()
830 queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work); in srcu_queue_delayed_work_on()
834 timer_reduce(&sdp->delay_work, jiffies + delay); in srcu_queue_delayed_work_on()
849 * just-completed grace period, the one corresponding to idx. If possible,
857 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { in srcu_schedule_cbs_snp()
858 if (!(mask & (1UL << (cpu - snp->grplo)))) in srcu_schedule_cbs_snp()
860 srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay); in srcu_schedule_cbs_snp()
868 * The ->srcu_cb_mutex acquisition does not protect any data, but
870 * are initiating callback invocation. This allows the ->srcu_have_cbs[]
886 struct srcu_usage *sup = ssp->srcu_sup; in srcu_gp_end()
889 mutex_lock(&sup->srcu_cb_mutex); in srcu_gp_end()
893 idx = rcu_seq_state(sup->srcu_gp_seq); in srcu_gp_end()
898 WRITE_ONCE(sup->srcu_last_gp_end, ktime_get_mono_fast_ns()); in srcu_gp_end()
899 rcu_seq_end(&sup->srcu_gp_seq); in srcu_gp_end()
900 gpseq = rcu_seq_current(&sup->srcu_gp_seq); in srcu_gp_end()
901 if (ULONG_CMP_LT(sup->srcu_gp_seq_needed_exp, gpseq)) in srcu_gp_end()
902 WRITE_ONCE(sup->srcu_gp_seq_needed_exp, gpseq); in srcu_gp_end()
904 mutex_unlock(&sup->srcu_gp_mutex); in srcu_gp_end()
908 ss_state = smp_load_acquire(&sup->srcu_size_state); in srcu_gp_end()
910 srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, get_boot_cpu_id()), in srcu_gp_end()
913 idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs); in srcu_gp_end()
917 last_lvl = snp >= sup->level[rcu_num_lvls - 1]; in srcu_gp_end()
919 cbs = ss_state < SRCU_SIZE_BIG || snp->srcu_have_cbs[idx] == gpseq; in srcu_gp_end()
920 snp->srcu_have_cbs[idx] = gpseq; in srcu_gp_end()
921 rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1); in srcu_gp_end()
922 sgsne = snp->srcu_gp_seq_needed_exp; in srcu_gp_end()
924 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, gpseq); in srcu_gp_end()
928 mask = snp->srcu_data_have_cbs[idx]; in srcu_gp_end()
929 snp->srcu_data_have_cbs[idx] = 0; in srcu_gp_end()
939 sdp = per_cpu_ptr(ssp->sda, cpu); in srcu_gp_end()
941 if (ULONG_CMP_GE(gpseq, sdp->srcu_gp_seq_needed + 100)) in srcu_gp_end()
942 sdp->srcu_gp_seq_needed = gpseq; in srcu_gp_end()
943 if (ULONG_CMP_GE(gpseq, sdp->srcu_gp_seq_needed_exp + 100)) in srcu_gp_end()
944 sdp->srcu_gp_seq_needed_exp = gpseq; in srcu_gp_end()
949 mutex_unlock(&sup->srcu_cb_mutex); in srcu_gp_end()
953 gpseq = rcu_seq_current(&sup->srcu_gp_seq); in srcu_gp_end()
955 ULONG_CMP_LT(gpseq, sup->srcu_gp_seq_needed)) { in srcu_gp_end()
968 smp_store_release(&sup->srcu_size_state, ss_state + 1); in srcu_gp_end()
973 * Funnel-locking scheme to scalably mediate many concurrent expedited
974 * grace-period requests. This function is invoked for the first known
986 for (; snp != NULL; snp = snp->srcu_parent) { in srcu_funnel_exp_start()
987 sgsne = READ_ONCE(snp->srcu_gp_seq_needed_exp); in srcu_funnel_exp_start()
988 if (WARN_ON_ONCE(rcu_seq_done(&ssp->srcu_sup->srcu_gp_seq, s)) || in srcu_funnel_exp_start()
992 sgsne = snp->srcu_gp_seq_needed_exp; in srcu_funnel_exp_start()
997 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s); in srcu_funnel_exp_start()
1001 if (ULONG_CMP_LT(ssp->srcu_sup->srcu_gp_seq_needed_exp, s)) in srcu_funnel_exp_start()
1002 WRITE_ONCE(ssp->srcu_sup->srcu_gp_seq_needed_exp, s); in srcu_funnel_exp_start()
1003 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags); in srcu_funnel_exp_start()
1007 * Funnel-locking scheme to scalably mediate many concurrent grace-period
1009 * period s. Losers must either ensure that their desired grace-period
1023 int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs); in srcu_funnel_gp_start()
1028 struct srcu_usage *sup = ssp->srcu_sup; in srcu_funnel_gp_start()
1031 if (smp_load_acquire(&sup->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER) in srcu_funnel_gp_start()
1034 snp_leaf = sdp->mynode; in srcu_funnel_gp_start()
1038 for (snp = snp_leaf; snp != NULL; snp = snp->srcu_parent) { in srcu_funnel_gp_start()
1039 if (WARN_ON_ONCE(rcu_seq_done(&sup->srcu_gp_seq, s)) && snp != snp_leaf) in srcu_funnel_gp_start()
1042 snp_seq = snp->srcu_have_cbs[idx]; in srcu_funnel_gp_start()
1045 snp->srcu_data_have_cbs[idx] |= sdp->grpmask; in srcu_funnel_gp_start()
1055 snp->srcu_have_cbs[idx] = s; in srcu_funnel_gp_start()
1057 snp->srcu_data_have_cbs[idx] |= sdp->grpmask; in srcu_funnel_gp_start()
1058 sgsne = snp->srcu_gp_seq_needed_exp; in srcu_funnel_gp_start()
1060 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s); in srcu_funnel_gp_start()
1066 if (ULONG_CMP_LT(sup->srcu_gp_seq_needed, s)) { in srcu_funnel_gp_start()
1071 smp_store_release(&sup->srcu_gp_seq_needed, s); /*^^^*/ in srcu_funnel_gp_start()
1073 if (!do_norm && ULONG_CMP_LT(sup->srcu_gp_seq_needed_exp, s)) in srcu_funnel_gp_start()
1074 WRITE_ONCE(sup->srcu_gp_seq_needed_exp, s); in srcu_funnel_gp_start()
1077 if (!WARN_ON_ONCE(rcu_seq_done(&sup->srcu_gp_seq, s)) && in srcu_funnel_gp_start()
1078 rcu_seq_state(sup->srcu_gp_seq) == SRCU_STATE_IDLE) { in srcu_funnel_gp_start()
1079 WARN_ON_ONCE(ULONG_CMP_GE(sup->srcu_gp_seq, sup->srcu_gp_seq_needed)); in srcu_funnel_gp_start()
1088 queue_delayed_work(rcu_gp_wq, &sup->work, in srcu_funnel_gp_start()
1090 else if (list_empty(&sup->work.work.entry)) in srcu_funnel_gp_start()
1091 list_add(&sup->work.work.entry, &srcu_boot_list); in srcu_funnel_gp_start()
1098 * loop an additional time if there is an expedited grace period pending.
1099 * The caller must ensure that ->srcu_idx is not changed while checking.
1110 if ((--trycount + curdelay) <= 0) in try_check_zero()
1117 * Increment the ->srcu_idx counter so that future SRCU readers will
1118 * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows
1119 * us to wait for pre-existing readers in a starvation-free manner.
1124 * Because the flip of ->srcu_idx is executed only if the in srcu_flip()
1126 * the ->srcu_unlock_count[] and ->srcu_lock_count[] sums matched in srcu_flip()
1131 * __srcu_read_lock(), that reader was using a value of ->srcu_idx in srcu_flip()
1136 * value of ->srcu_idx. in srcu_flip()
1138 * This sum-equality check and ordering also ensures that if in srcu_flip()
1140 * ->srcu_idx, this updater's earlier scans cannot have seen in srcu_flip()
1155 WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1); // Flip the counter. in srcu_flip()
1173 * Note that it is OK for several current from-idle requests for a new
1184 * This function is also subject to counter-wrap errors, but let's face
1185 * it, if this function was preempted for enough time for the counters
1188 * negligible when amortized over that time period, and the extra latency
1189 * of a needlessly non-expedited grace period is similarly negligible.
1201 if (this_cpu_read(ssp->sda->srcu_reader_flavor) & SRCU_READ_FLAVOR_LITE) in srcu_should_expedite()
1204 sdp = raw_cpu_ptr(ssp->sda); in srcu_should_expedite()
1206 if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) { in srcu_should_expedite()
1218 /* First, see if enough time has passed since the last GP. */ in srcu_should_expedite()
1220 tlast = READ_ONCE(ssp->srcu_sup->srcu_last_gp_end); in srcu_should_expedite()
1226 curseq = rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq); in srcu_should_expedite()
1227 smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */ in srcu_should_expedite()
1228 if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_sup->srcu_gp_seq_needed))) in srcu_should_expedite()
1230 smp_mb(); /* Order ->srcu_gp_seq with prior access. */ in srcu_should_expedite()
1231 if (curseq != rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq)) in srcu_should_expedite()
1244 * Start an SRCU grace period, and also queue the callback if non-NULL.
1261 * SRCU read-side critical section so that the grace-period in srcu_gp_start_if_needed()
1265 ss_state = smp_load_acquire(&ssp->srcu_sup->srcu_size_state); in srcu_gp_start_if_needed()
1267 sdp = per_cpu_ptr(ssp->sda, get_boot_cpu_id()); in srcu_gp_start_if_needed()
1269 sdp = raw_cpu_ptr(ssp->sda); in srcu_gp_start_if_needed()
1272 rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp); in srcu_gp_start_if_needed()
1309 s = rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq); in srcu_gp_start_if_needed()
1311 rcu_segcblist_advance(&sdp->srcu_cblist, in srcu_gp_start_if_needed()
1312 rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq)); in srcu_gp_start_if_needed()
1320 WARN_ON_ONCE(!rcu_segcblist_accelerate(&sdp->srcu_cblist, s)); in srcu_gp_start_if_needed()
1322 if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) { in srcu_gp_start_if_needed()
1323 sdp->srcu_gp_seq_needed = s; in srcu_gp_start_if_needed()
1326 if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) { in srcu_gp_start_if_needed()
1327 sdp->srcu_gp_seq_needed_exp = s; in srcu_gp_start_if_needed()
1336 sdp_mynode = sdp->mynode; in srcu_gp_start_if_needed()
1349 * grace-period processing if it is not already running.
1352 * all pre-existing SRCU read-side critical section. On systems with
1355 * its last corresponding SRCU read-side critical section whose beginning
1357 * an SRCU read-side critical section that continues beyond the start of
1359 * but before the beginning of that SRCU read-side critical section.
1365 * B are guaranteed to execute a full memory barrier during the time
1379 WRITE_ONCE(rhp->func, srcu_leak_callback); in __call_srcu()
1383 rhp->func = func; in __call_srcu()
1388 * call_srcu() - Queue a callback for invocation after an SRCU grace period
1393 * The callback function will be invoked some time after a full SRCU
1394 * grace period elapses, in other words after all pre-existing SRCU
1395 * read-side critical sections have completed. However, the callback
1396 * function might well execute concurrently with other SRCU read-side
1398 * read-side critical sections are delimited by srcu_read_lock() and
1418 srcu_lock_sync(&ssp->dep_map); in __synchronize_srcu()
1424 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section"); in __synchronize_srcu()
1447 * synchronize_srcu_expedited - Brute-force SRCU grace period
1454 * memory-ordering properties as does synchronize_srcu().
1463 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
1468 * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first,
1474 * SRCU read-side critical section; doing so will result in deadlock.
1476 * srcu_struct from some other srcu_struct's read-side critical section,
1479 * There are memory-ordering constraints implied by synchronize_srcu().
1482 * the end of its last corresponding SRCU read-side critical section
1484 * each CPU having an SRCU read-side critical section that extends beyond
1487 * the beginning of that SRCU read-side critical section. Note that these
1497 * Of course, these memory-ordering guarantees apply only when
1501 * Implementation of these memory-ordering guarantees is similar to
1520 * get_state_synchronize_srcu - Provide an end-of-grace-period cookie
1531 // Any prior manipulation of SRCU-protected data must happen in get_state_synchronize_srcu()
1532 // before the load from ->srcu_gp_seq. in get_state_synchronize_srcu()
1534 return rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq); in get_state_synchronize_srcu()
1539 * start_poll_synchronize_srcu - Provide cookie and start grace period
1555 * poll_state_synchronize_srcu - Has cookie's grace period ended?
1561 * returns @true if an SRCU grace period elapsed since the time that the
1565 * This is more pronounced on 32-bit systems where cookies are 32 bits,
1567 * 25-microsecond expedited SRCU grace periods. However, a more likely
1569 * one-millisecond SRCU grace periods. Of course, wrapping in a 64-bit
1575 * a 16-bit cookie, which rcutorture routinely wraps in a matter of a
1582 !rcu_seq_done(&ssp->srcu_sup->srcu_gp_seq, cookie)) in poll_state_synchronize_srcu()
1599 rhp->next = rhp; // Mark the callback as having been invoked. in srcu_barrier_cb()
1601 ssp = sdp->ssp; in srcu_barrier_cb()
1602 if (atomic_dec_and_test(&ssp->srcu_sup->srcu_barrier_cpu_cnt)) in srcu_barrier_cb()
1603 complete(&ssp->srcu_sup->srcu_barrier_completion); in srcu_barrier_cb()
1608 * structure's ->cblist. but only if that ->cblist already has at least one
1617 atomic_inc(&ssp->srcu_sup->srcu_barrier_cpu_cnt); in srcu_barrier_one_cpu()
1618 sdp->srcu_barrier_head.func = srcu_barrier_cb; in srcu_barrier_one_cpu()
1619 debug_rcu_head_queue(&sdp->srcu_barrier_head); in srcu_barrier_one_cpu()
1620 if (!rcu_segcblist_entrain(&sdp->srcu_cblist, in srcu_barrier_one_cpu()
1621 &sdp->srcu_barrier_head)) { in srcu_barrier_one_cpu()
1622 debug_rcu_head_unqueue(&sdp->srcu_barrier_head); in srcu_barrier_one_cpu()
1623 atomic_dec(&ssp->srcu_sup->srcu_barrier_cpu_cnt); in srcu_barrier_one_cpu()
1629 * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
1630 * @ssp: srcu_struct on which to wait for in-flight callbacks.
1636 unsigned long s = rcu_seq_snap(&ssp->srcu_sup->srcu_barrier_seq); in srcu_barrier()
1639 mutex_lock(&ssp->srcu_sup->srcu_barrier_mutex); in srcu_barrier()
1640 if (rcu_seq_done(&ssp->srcu_sup->srcu_barrier_seq, s)) { in srcu_barrier()
1642 mutex_unlock(&ssp->srcu_sup->srcu_barrier_mutex); in srcu_barrier()
1645 rcu_seq_start(&ssp->srcu_sup->srcu_barrier_seq); in srcu_barrier()
1646 init_completion(&ssp->srcu_sup->srcu_barrier_completion); in srcu_barrier()
1649 atomic_set(&ssp->srcu_sup->srcu_barrier_cpu_cnt, 1); in srcu_barrier()
1652 if (smp_load_acquire(&ssp->srcu_sup->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER) in srcu_barrier()
1653 srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, get_boot_cpu_id())); in srcu_barrier()
1656 srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, cpu)); in srcu_barrier()
1660 if (atomic_dec_and_test(&ssp->srcu_sup->srcu_barrier_cpu_cnt)) in srcu_barrier()
1661 complete(&ssp->srcu_sup->srcu_barrier_completion); in srcu_barrier()
1662 wait_for_completion(&ssp->srcu_sup->srcu_barrier_completion); in srcu_barrier()
1664 rcu_seq_end(&ssp->srcu_sup->srcu_barrier_seq); in srcu_barrier()
1665 mutex_unlock(&ssp->srcu_sup->srcu_barrier_mutex); in srcu_barrier()
1670 * srcu_batches_completed - return batches completed.
1678 return READ_ONCE(ssp->srcu_idx); in srcu_batches_completed()
1683 * Core SRCU state machine. Push state bits of ->srcu_gp_seq
1691 mutex_lock(&ssp->srcu_sup->srcu_gp_mutex); in srcu_advance_state()
1695 * fetching ->srcu_idx for their index, at any point in time there in srcu_advance_state()
1700 * The load-acquire ensures that we see the accesses performed in srcu_advance_state()
1703 idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_sup->srcu_gp_seq)); /* ^^^ */ in srcu_advance_state()
1705 spin_lock_irq_rcu_node(ssp->srcu_sup); in srcu_advance_state()
1706 if (ULONG_CMP_GE(ssp->srcu_sup->srcu_gp_seq, ssp->srcu_sup->srcu_gp_seq_needed)) { in srcu_advance_state()
1707 WARN_ON_ONCE(rcu_seq_state(ssp->srcu_sup->srcu_gp_seq)); in srcu_advance_state()
1708 spin_unlock_irq_rcu_node(ssp->srcu_sup); in srcu_advance_state()
1709 mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex); in srcu_advance_state()
1712 idx = rcu_seq_state(READ_ONCE(ssp->srcu_sup->srcu_gp_seq)); in srcu_advance_state()
1715 spin_unlock_irq_rcu_node(ssp->srcu_sup); in srcu_advance_state()
1717 mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex); in srcu_advance_state()
1722 if (rcu_seq_state(READ_ONCE(ssp->srcu_sup->srcu_gp_seq)) == SRCU_STATE_SCAN1) { in srcu_advance_state()
1723 idx = 1 ^ (ssp->srcu_idx & 1); in srcu_advance_state()
1725 mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex); in srcu_advance_state()
1729 spin_lock_irq_rcu_node(ssp->srcu_sup); in srcu_advance_state()
1730 rcu_seq_set_state(&ssp->srcu_sup->srcu_gp_seq, SRCU_STATE_SCAN2); in srcu_advance_state()
1731 ssp->srcu_sup->srcu_n_exp_nodelay = 0; in srcu_advance_state()
1732 spin_unlock_irq_rcu_node(ssp->srcu_sup); in srcu_advance_state()
1735 if (rcu_seq_state(READ_ONCE(ssp->srcu_sup->srcu_gp_seq)) == SRCU_STATE_SCAN2) { in srcu_advance_state()
1738 * SRCU read-side critical sections are normally short, in srcu_advance_state()
1741 idx = 1 ^ (ssp->srcu_idx & 1); in srcu_advance_state()
1743 mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex); in srcu_advance_state()
1746 ssp->srcu_sup->srcu_n_exp_nodelay = 0; in srcu_advance_state()
1747 srcu_gp_end(ssp); /* Releases ->srcu_gp_mutex. */ in srcu_advance_state()
1768 ssp = sdp->ssp; in srcu_invoke_callbacks()
1771 WARN_ON_ONCE(!rcu_segcblist_segempty(&sdp->srcu_cblist, RCU_NEXT_TAIL)); in srcu_invoke_callbacks()
1772 rcu_segcblist_advance(&sdp->srcu_cblist, in srcu_invoke_callbacks()
1773 rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq)); in srcu_invoke_callbacks()
1775 * Although this function is theoretically re-entrant, concurrent in srcu_invoke_callbacks()
1779 if (sdp->srcu_cblist_invoking || in srcu_invoke_callbacks()
1780 !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) { in srcu_invoke_callbacks()
1786 sdp->srcu_cblist_invoking = true; in srcu_invoke_callbacks()
1787 rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs); in srcu_invoke_callbacks()
1795 rhp->func(rhp); in srcu_invoke_callbacks()
1805 rcu_segcblist_add_len(&sdp->srcu_cblist, -len); in srcu_invoke_callbacks()
1806 sdp->srcu_cblist_invoking = false; in srcu_invoke_callbacks()
1807 more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist); in srcu_invoke_callbacks()
1816 * more SRCU callbacks queued, otherwise put SRCU into not-running state.
1822 spin_lock_irq_rcu_node(ssp->srcu_sup); in srcu_reschedule()
1823 if (ULONG_CMP_GE(ssp->srcu_sup->srcu_gp_seq, ssp->srcu_sup->srcu_gp_seq_needed)) { in srcu_reschedule()
1824 if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_sup->srcu_gp_seq))) { in srcu_reschedule()
1825 /* All requests fulfilled, time to go idle. */ in srcu_reschedule()
1828 } else if (!rcu_seq_state(ssp->srcu_sup->srcu_gp_seq)) { in srcu_reschedule()
1832 spin_unlock_irq_rcu_node(ssp->srcu_sup); in srcu_reschedule()
1835 queue_delayed_work(rcu_gp_wq, &ssp->srcu_sup->work, delay); in srcu_reschedule()
1839 * This is the work-queue function that handles SRCU grace periods.
1849 ssp = sup->srcu_ssp; in process_srcu()
1854 WRITE_ONCE(sup->reschedule_count, 0); in process_srcu()
1857 if (READ_ONCE(sup->reschedule_jiffies) == j) { in process_srcu()
1858 ASSERT_EXCLUSIVE_WRITER(sup->reschedule_count); in process_srcu()
1859 WRITE_ONCE(sup->reschedule_count, READ_ONCE(sup->reschedule_count) + 1); in process_srcu()
1860 if (READ_ONCE(sup->reschedule_count) > srcu_max_nodelay) in process_srcu()
1863 WRITE_ONCE(sup->reschedule_count, 1); in process_srcu()
1864 WRITE_ONCE(sup->reschedule_jiffies, j); in process_srcu()
1874 *gp_seq = rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq); in srcutorture_get_gp_data()
1896 int ss_state = READ_ONCE(ssp->srcu_sup->srcu_size_state); in srcu_torture_stats_print()
1899 idx = ssp->srcu_idx & 0x1; in srcu_torture_stats_print()
1901 ss_state_idx = ARRAY_SIZE(srcu_size_state_name) - 1; in srcu_torture_stats_print()
1903 tt, tf, rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq), ss_state, in srcu_torture_stats_print()
1905 if (!ssp->sda) { in srcu_torture_stats_print()
1907 pr_cont(" No per-CPU srcu_data structures (->sda == NULL).\n"); in srcu_torture_stats_print()
1909 pr_cont(" per-CPU(idx=%d):", idx); in srcu_torture_stats_print()
1916 sdp = per_cpu_ptr(ssp->sda, cpu); in srcu_torture_stats_print()
1917 u0 = data_race(atomic_long_read(&sdp->srcu_unlock_count[!idx])); in srcu_torture_stats_print()
1918 u1 = data_race(atomic_long_read(&sdp->srcu_unlock_count[idx])); in srcu_torture_stats_print()
1926 l0 = data_race(atomic_long_read(&sdp->srcu_lock_count[!idx])); in srcu_torture_stats_print()
1927 l1 = data_race(atomic_long_read(&sdp->srcu_lock_count[idx])); in srcu_torture_stats_print()
1929 c0 = l0 - u0; in srcu_torture_stats_print()
1930 c1 = l1 - u1; in srcu_torture_stats_print()
1933 "C."[rcu_segcblist_empty(&sdp->srcu_cblist)]); in srcu_torture_stats_print()
1948 pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff); in srcu_bootup_announce()
1950 pr_info("\tNon-default retry check delay of %lu us.\n", srcu_retry_check_delay); in srcu_bootup_announce()
1952 pr_info("\tNon-default max no-delay of %lu.\n", srcu_max_nodelay); in srcu_bootup_announce()
1953 pr_info("\tMax phase no-delay instances is %lu.\n", srcu_max_nodelay_phase); in srcu_bootup_announce()
1962 /* Decide on srcu_struct-size strategy. */ in srcu_init()
1982 list_del_init(&sup->work.work.entry); in srcu_init()
1984 sup->srcu_size_state == SRCU_SIZE_SMALL) in srcu_init()
1985 sup->srcu_size_state = SRCU_SIZE_ALLOC; in srcu_init()
1986 queue_work(rcu_gp_wq, &sup->work.work); in srcu_init()
1992 /* Initialize any global-scope srcu_struct structures used by this module. */
1997 struct srcu_struct **sspp = mod->srcu_struct_ptrs; in srcu_module_coming()
1999 for (i = 0; i < mod->num_srcu_structs; i++) { in srcu_module_coming()
2001 ssp->sda = alloc_percpu(struct srcu_data); in srcu_module_coming()
2002 if (WARN_ON_ONCE(!ssp->sda)) in srcu_module_coming()
2003 return -ENOMEM; in srcu_module_coming()
2008 /* Clean up any global-scope srcu_struct structures used by this module. */
2013 struct srcu_struct **sspp = mod->srcu_struct_ptrs; in srcu_module_going()
2015 for (i = 0; i < mod->num_srcu_structs; i++) { in srcu_module_going()
2017 if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_sup->srcu_gp_seq_needed)) && in srcu_module_going()
2018 !WARN_ON_ONCE(!ssp->srcu_sup->sda_is_static)) in srcu_module_going()
2021 free_percpu(ssp->sda); in srcu_module_going()