Lines Matching defs:ssp

76 static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay);
123 static void init_srcu_struct_data(struct srcu_struct *ssp)
133 sdp = per_cpu_ptr(ssp->sda, cpu);
137 sdp->srcu_gp_seq_needed = ssp->srcu_sup->srcu_gp_seq;
138 sdp->srcu_gp_seq_needed_exp = ssp->srcu_sup->srcu_gp_seq;
144 sdp->ssp = ssp;
164 static bool init_srcu_struct_nodes(struct srcu_struct *ssp, gfp_t gfp_flags)
176 ssp->srcu_sup->node = kcalloc(rcu_num_nodes, sizeof(*ssp->srcu_sup->node), gfp_flags);
177 if (!ssp->srcu_sup->node)
181 ssp->srcu_sup->level[0] = &ssp->srcu_sup->node[0];
183 ssp->srcu_sup->level[i] = ssp->srcu_sup->level[i - 1] + num_rcu_lvl[i - 1];
187 srcu_for_each_node_breadth_first(ssp, snp) {
198 if (snp == &ssp->srcu_sup->node[0]) {
205 if (snp == ssp->srcu_sup->level[level + 1])
207 snp->srcu_parent = ssp->srcu_sup->level[level - 1] +
208 (snp - ssp->srcu_sup->level[level]) /
217 snp_first = ssp->srcu_sup->level[level];
219 sdp = per_cpu_ptr(ssp->sda, cpu);
228 smp_store_release(&ssp->srcu_sup->srcu_size_state, SRCU_SIZE_WAIT_BARRIER);
237 static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
240 ssp->srcu_sup = kzalloc(sizeof(*ssp->srcu_sup), GFP_KERNEL);
241 if (!ssp->srcu_sup)
244 spin_lock_init(&ACCESS_PRIVATE(ssp->srcu_sup, lock));
245 ssp->srcu_sup->srcu_size_state = SRCU_SIZE_SMALL;
246 ssp->srcu_sup->node = NULL;
247 mutex_init(&ssp->srcu_sup->srcu_cb_mutex);
248 mutex_init(&ssp->srcu_sup->srcu_gp_mutex);
249 ssp->srcu_sup->srcu_gp_seq = SRCU_GP_SEQ_INITIAL_VAL;
250 ssp->srcu_sup->srcu_barrier_seq = 0;
251 mutex_init(&ssp->srcu_sup->srcu_barrier_mutex);
252 atomic_set(&ssp->srcu_sup->srcu_barrier_cpu_cnt, 0);
253 INIT_DELAYED_WORK(&ssp->srcu_sup->work, process_srcu);
254 ssp->srcu_sup->sda_is_static = is_static;
256 ssp->sda = alloc_percpu(struct srcu_data);
257 ssp->srcu_ctrp = &ssp->sda->srcu_ctrs[0];
259 if (!ssp->sda)
261 init_srcu_struct_data(ssp);
262 ssp->srcu_sup->srcu_gp_seq_needed_exp = SRCU_GP_SEQ_INITIAL_VAL;
263 ssp->srcu_sup->srcu_last_gp_end = ktime_get_mono_fast_ns();
264 if (READ_ONCE(ssp->srcu_sup->srcu_size_state) == SRCU_SIZE_SMALL && SRCU_SIZING_IS_INIT()) {
265 if (!init_srcu_struct_nodes(ssp, GFP_ATOMIC))
267 WRITE_ONCE(ssp->srcu_sup->srcu_size_state, SRCU_SIZE_BIG);
269 ssp->srcu_sup->srcu_ssp = ssp;
270 smp_store_release(&ssp->srcu_sup->srcu_gp_seq_needed,
276 free_percpu(ssp->sda);
277 ssp->sda = NULL;
281 kfree(ssp->srcu_sup);
282 ssp->srcu_sup = NULL;
289 int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
293 debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
294 lockdep_init_map(&ssp->dep_map, name, key, 0);
295 return init_srcu_struct_fields(ssp, false);
303 * @ssp: structure to initialize.
309 int init_srcu_struct(struct srcu_struct *ssp)
311 return init_srcu_struct_fields(ssp, false);
320 static void __srcu_transition_to_big(struct srcu_struct *ssp)
322 lockdep_assert_held(&ACCESS_PRIVATE(ssp->srcu_sup, lock));
323 smp_store_release(&ssp->srcu_sup->srcu_size_state, SRCU_SIZE_ALLOC);
329 static void srcu_transition_to_big(struct srcu_struct *ssp)
334 if (smp_load_acquire(&ssp->srcu_sup->srcu_size_state) != SRCU_SIZE_SMALL)
336 spin_lock_irqsave_rcu_node(ssp->srcu_sup, flags);
337 if (smp_load_acquire(&ssp->srcu_sup->srcu_size_state) != SRCU_SIZE_SMALL) {
338 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags);
341 __srcu_transition_to_big(ssp);
342 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags);
349 static void spin_lock_irqsave_check_contention(struct srcu_struct *ssp)
353 if (!SRCU_SIZING_IS_CONTEND() || ssp->srcu_sup->srcu_size_state)
356 if (ssp->srcu_sup->srcu_size_jiffies != j) {
357 ssp->srcu_sup->srcu_size_jiffies = j;
358 ssp->srcu_sup->srcu_n_lock_retries = 0;
360 if (++ssp->srcu_sup->srcu_n_lock_retries <= small_contention_lim)
362 __srcu_transition_to_big(ssp);
373 struct srcu_struct *ssp = sdp->ssp;
377 spin_lock_irqsave_rcu_node(ssp->srcu_sup, *flags);
378 spin_lock_irqsave_check_contention(ssp);
379 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, *flags);
389 static void spin_lock_irqsave_ssp_contention(struct srcu_struct *ssp, unsigned long *flags)
391 if (spin_trylock_irqsave_rcu_node(ssp->srcu_sup, *flags))
393 spin_lock_irqsave_rcu_node(ssp->srcu_sup, *flags);
394 spin_lock_irqsave_check_contention(ssp);
401 * to each update-side SRCU primitive. Use ssp->lock, which -is-
405 static void check_init_srcu_struct(struct srcu_struct *ssp)
410 if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_sup->srcu_gp_seq_needed))) /*^^^*/
412 spin_lock_irqsave_rcu_node(ssp->srcu_sup, flags);
413 if (!rcu_seq_state(ssp->srcu_sup->srcu_gp_seq_needed)) {
414 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags);
417 init_srcu_struct_fields(ssp, true);
418 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags);
424 static bool srcu_gp_is_expedited(struct srcu_struct *ssp)
426 struct srcu_usage *sup = ssp->srcu_sup;
437 static bool srcu_readers_lock_idx(struct srcu_struct *ssp, int idx, bool gp, unsigned long unlocks)
444 struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
451 "Mixed reader flavors for srcu_struct at %ps.\n", ssp);
461 static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx, unsigned long *rdm)
468 struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
474 "Mixed reader flavors for srcu_struct at %ps.\n", ssp);
483 static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx)
489 unlocks = srcu_readers_unlock_idx(ssp, idx, &rdm);
505 else if (srcu_gp_is_expedited(ssp))
567 return srcu_readers_lock_idx(ssp, idx, did_gp, unlocks);
573 * @ssp: which srcu_struct to count active readers (holding srcu_read_lock).
579 static bool srcu_readers_active(struct srcu_struct *ssp)
585 struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
646 static unsigned long srcu_get_delay(struct srcu_struct *ssp)
651 struct srcu_usage *sup = ssp->srcu_sup;
653 lockdep_assert_held(&ACCESS_PRIVATE(ssp->srcu_sup, lock));
654 if (srcu_gp_is_expedited(ssp))
673 * @ssp: structure to clean up.
678 void cleanup_srcu_struct(struct srcu_struct *ssp)
682 struct srcu_usage *sup = ssp->srcu_sup;
684 spin_lock_irq_rcu_node(ssp->srcu_sup);
685 delay = srcu_get_delay(ssp);
686 spin_unlock_irq_rcu_node(ssp->srcu_sup);
689 if (WARN_ON(srcu_readers_active(ssp)))
693 struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
702 WARN_ON(srcu_readers_active(ssp))) {
704 __func__, ssp, rcu_seq_state(READ_ONCE(sup->srcu_gp_seq)),
715 free_percpu(ssp->sda);
716 ssp->sda = NULL;
718 ssp->srcu_sup = NULL;
726 void __srcu_check_read_flavor(struct srcu_struct *ssp, int read_flavor)
735 sdp = raw_cpu_ptr(ssp->sda);
752 int __srcu_read_lock(struct srcu_struct *ssp)
754 struct srcu_ctr __percpu *scp = READ_ONCE(ssp->srcu_ctrp);
758 return __srcu_ptr_to_ctr(ssp, scp);
767 void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
770 this_cpu_inc(__srcu_ctr_to_ptr(ssp, idx)->srcu_unlocks.counter);
781 int __srcu_read_lock_nmisafe(struct srcu_struct *ssp)
783 struct srcu_ctr __percpu *scpp = READ_ONCE(ssp->srcu_ctrp);
788 return __srcu_ptr_to_ctr(ssp, scpp);
797 void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
800 atomic_long_inc(&raw_cpu_ptr(__srcu_ctr_to_ptr(ssp, idx))->srcu_unlocks);
809 static void srcu_gp_start(struct srcu_struct *ssp)
813 lockdep_assert_held(&ACCESS_PRIVATE(ssp->srcu_sup, lock));
814 WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_sup->srcu_gp_seq, ssp->srcu_sup->srcu_gp_seq_needed));
815 WRITE_ONCE(ssp->srcu_sup->srcu_gp_start, jiffies);
816 WRITE_ONCE(ssp->srcu_sup->srcu_n_exp_nodelay, 0);
818 rcu_seq_start(&ssp->srcu_sup->srcu_gp_seq);
819 state = rcu_seq_state(ssp->srcu_sup->srcu_gp_seq);
857 static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp,
865 srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
878 static void srcu_gp_end(struct srcu_struct *ssp)
891 struct srcu_usage *sup = ssp->srcu_sup;
900 if (srcu_gp_is_expedited(ssp))
915 srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, get_boot_cpu_id()),
919 srcu_for_each_node_breadth_first(ssp, snp) {
937 srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay);
944 sdp = per_cpu_ptr(ssp->sda, cpu);
961 srcu_gp_start(ssp);
963 srcu_reschedule(ssp, 0);
971 init_srcu_struct_nodes(ssp, GFP_KERNEL);
984 static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp,
993 if (WARN_ON_ONCE(rcu_seq_done(&ssp->srcu_sup->srcu_gp_seq, s)) ||
1005 spin_lock_irqsave_ssp_contention(ssp, &flags);
1006 if (ULONG_CMP_LT(ssp->srcu_sup->srcu_gp_seq_needed_exp, s))
1007 WRITE_ONCE(ssp->srcu_sup->srcu_gp_seq_needed_exp, s);
1008 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags);
1024 static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
1033 struct srcu_usage *sup = ssp->srcu_sup;
1057 srcu_funnel_exp_start(ssp, snp, s);
1070 spin_lock_irqsave_ssp_contention(ssp, &flags);
1084 srcu_gp_start(ssp);
1093 !!srcu_get_delay(ssp));
1105 static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount)
1109 spin_lock_irq_rcu_node(ssp->srcu_sup);
1110 curdelay = !srcu_get_delay(ssp);
1111 spin_unlock_irq_rcu_node(ssp->srcu_sup);
1114 if (srcu_readers_active_idx_check(ssp, idx))
1127 static void srcu_flip(struct srcu_struct *ssp)
1161 WRITE_ONCE(ssp->srcu_ctrp,
1162 &ssp->sda->srcu_ctrs[!(ssp->srcu_ctrp - &ssp->sda->srcu_ctrs[0])]);
1208 static bool srcu_should_expedite(struct srcu_struct *ssp)
1216 check_init_srcu_struct(ssp);
1218 if (this_cpu_read(ssp->sda->srcu_reader_flavor) & SRCU_READ_FLAVOR_SLOWGP)
1221 sdp = raw_cpu_ptr(ssp->sda);
1237 tlast = READ_ONCE(ssp->srcu_sup->srcu_last_gp_end);
1243 curseq = rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq);
1245 if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_sup->srcu_gp_seq_needed)))
1248 if (curseq != rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq))
1263 static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
1275 check_init_srcu_struct(ssp);
1281 idx = __srcu_read_lock_nmisafe(ssp);
1282 ss_state = smp_load_acquire(&ssp->srcu_sup->srcu_size_state);
1284 sdp = per_cpu_ptr(ssp->sda, get_boot_cpu_id());
1286 sdp = raw_cpu_ptr(ssp->sda);
1326 s = rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq);
1329 rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
1356 srcu_funnel_gp_start(ssp, sdp, s, do_norm);
1358 srcu_funnel_exp_start(ssp, sdp_mynode, s);
1359 __srcu_read_unlock_nmisafe(ssp, idx);
1391 static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
1401 (void)srcu_gp_start_if_needed(ssp, rhp, do_norm);
1406 * @ssp: srcu_struct in queue the callback
1425 void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
1428 __call_srcu(ssp, rhp, func, true);
1435 static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm)
1439 srcu_lock_sync(&ssp->dep_map);
1441 RCU_LOCKDEP_WARN(lockdep_is_held(ssp) ||
1450 check_init_srcu_struct(ssp);
1453 __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm);
1469 * @ssp: srcu_struct with which to synchronize.
1477 void synchronize_srcu_expedited(struct srcu_struct *ssp)
1479 __synchronize_srcu(ssp, rcu_gp_is_normal());
1485 * @ssp: srcu_struct with which to synchronize.
1489 * the index=!(ssp->srcu_ctrp - &ssp->sda->srcu_ctrs[0]) to drain to zero
1532 void synchronize_srcu(struct srcu_struct *ssp)
1534 if (srcu_should_expedite(ssp) || rcu_gp_is_expedited())
1535 synchronize_srcu_expedited(ssp);
1537 __synchronize_srcu(ssp, true);
1543 * @ssp: srcu_struct to provide cookie for.
1551 unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp)
1556 return rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq);
1562 * @ssp: srcu_struct to provide cookie for.
1570 unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp)
1572 return srcu_gp_start_if_needed(ssp, NULL, true);
1578 * @ssp: srcu_struct to provide cookie for.
1601 bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)
1604 !rcu_seq_done_exact(&ssp->srcu_sup->srcu_gp_seq, cookie))
1619 struct srcu_struct *ssp;
1623 ssp = sdp->ssp;
1624 if (atomic_dec_and_test(&ssp->srcu_sup->srcu_barrier_cpu_cnt))
1625 complete(&ssp->srcu_sup->srcu_barrier_completion);
1636 static void srcu_barrier_one_cpu(struct srcu_struct *ssp, struct srcu_data *sdp)
1639 atomic_inc(&ssp->srcu_sup->srcu_barrier_cpu_cnt);
1645 atomic_dec(&ssp->srcu_sup->srcu_barrier_cpu_cnt);
1652 * @ssp: srcu_struct on which to wait for in-flight callbacks.
1654 void srcu_barrier(struct srcu_struct *ssp)
1658 unsigned long s = rcu_seq_snap(&ssp->srcu_sup->srcu_barrier_seq);
1660 check_init_srcu_struct(ssp);
1661 mutex_lock(&ssp->srcu_sup->srcu_barrier_mutex);
1662 if (rcu_seq_done(&ssp->srcu_sup->srcu_barrier_seq, s)) {
1664 mutex_unlock(&ssp->srcu_sup->srcu_barrier_mutex);
1667 rcu_seq_start(&ssp->srcu_sup->srcu_barrier_seq);
1668 init_completion(&ssp->srcu_sup->srcu_barrier_completion);
1671 atomic_set(&ssp->srcu_sup->srcu_barrier_cpu_cnt, 1);
1673 idx = __srcu_read_lock_nmisafe(ssp);
1674 if (smp_load_acquire(&ssp->srcu_sup->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
1675 srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, get_boot_cpu_id()));
1678 srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, cpu));
1679 __srcu_read_unlock_nmisafe(ssp, idx);
1682 if (atomic_dec_and_test(&ssp->srcu_sup->srcu_barrier_cpu_cnt))
1683 complete(&ssp->srcu_sup->srcu_barrier_completion);
1684 wait_for_completion(&ssp->srcu_sup->srcu_barrier_completion);
1686 rcu_seq_end(&ssp->srcu_sup->srcu_barrier_seq);
1687 mutex_unlock(&ssp->srcu_sup->srcu_barrier_mutex);
1693 * @ssp: srcu_struct on which to report batch completion.
1698 unsigned long srcu_batches_completed(struct srcu_struct *ssp)
1700 return READ_ONCE(ssp->srcu_sup->srcu_gp_seq);
1709 static void srcu_advance_state(struct srcu_struct *ssp)
1713 mutex_lock(&ssp->srcu_sup->srcu_gp_mutex);
1725 idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_sup->srcu_gp_seq)); /* ^^^ */
1727 spin_lock_irq_rcu_node(ssp->srcu_sup);
1728 if (ULONG_CMP_GE(ssp->srcu_sup->srcu_gp_seq, ssp->srcu_sup->srcu_gp_seq_needed)) {
1729 WARN_ON_ONCE(rcu_seq_state(ssp->srcu_sup->srcu_gp_seq));
1730 spin_unlock_irq_rcu_node(ssp->srcu_sup);
1731 mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex);
1734 idx = rcu_seq_state(READ_ONCE(ssp->srcu_sup->srcu_gp_seq));
1736 srcu_gp_start(ssp);
1737 spin_unlock_irq_rcu_node(ssp->srcu_sup);
1739 mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex);
1744 if (rcu_seq_state(READ_ONCE(ssp->srcu_sup->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
1745 idx = !(ssp->srcu_ctrp - &ssp->sda->srcu_ctrs[0]);
1746 if (!try_check_zero(ssp, idx, 1)) {
1747 mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex);
1750 srcu_flip(ssp);
1751 spin_lock_irq_rcu_node(ssp->srcu_sup);
1752 rcu_seq_set_state(&ssp->srcu_sup->srcu_gp_seq, SRCU_STATE_SCAN2);
1753 ssp->srcu_sup->srcu_n_exp_nodelay = 0;
1754 spin_unlock_irq_rcu_node(ssp->srcu_sup);
1757 if (rcu_seq_state(READ_ONCE(ssp->srcu_sup->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
1763 idx = !(ssp->srcu_ctrp - &ssp->sda->srcu_ctrs[0]);
1764 if (!try_check_zero(ssp, idx, 2)) {
1765 mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex);
1768 ssp->srcu_sup->srcu_n_exp_nodelay = 0;
1769 srcu_gp_end(ssp); /* Releases ->srcu_gp_mutex. */
1786 struct srcu_struct *ssp;
1790 ssp = sdp->ssp;
1795 rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
1840 static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay)
1844 spin_lock_irq_rcu_node(ssp->srcu_sup);
1845 if (ULONG_CMP_GE(ssp->srcu_sup->srcu_gp_seq, ssp->srcu_sup->srcu_gp_seq_needed)) {
1846 if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_sup->srcu_gp_seq))) {
1850 } else if (!rcu_seq_state(ssp->srcu_sup->srcu_gp_seq)) {
1852 srcu_gp_start(ssp);
1854 spin_unlock_irq_rcu_node(ssp->srcu_sup);
1857 queue_delayed_work(rcu_gp_wq, &ssp->srcu_sup->work, delay);
1867 struct srcu_struct *ssp;
1871 ssp = sup->srcu_ssp;
1873 srcu_advance_state(ssp);
1874 spin_lock_irq_rcu_node(ssp->srcu_sup);
1875 curdelay = srcu_get_delay(ssp);
1876 spin_unlock_irq_rcu_node(ssp->srcu_sup);
1891 srcu_reschedule(ssp, curdelay);
1894 void srcutorture_get_gp_data(struct srcu_struct *ssp, int *flags,
1898 *gp_seq = rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq);
1915 void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf)
1920 int ss_state = READ_ONCE(ssp->srcu_sup->srcu_size_state);
1923 idx = ssp->srcu_ctrp - &ssp->sda->srcu_ctrs[0];
1927 tt, tf, rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq), ss_state,
1929 if (!ssp->sda) {
1940 sdp = per_cpu_ptr(ssp->sda, cpu);
1964 srcu_transition_to_big(ssp);
2020 struct srcu_struct *ssp;
2024 ssp = *(sspp++);
2025 ssp->sda = alloc_percpu(struct srcu_data);
2026 if (WARN_ON_ONCE(!ssp->sda))
2028 ssp->srcu_ctrp = &ssp->sda->srcu_ctrs[0];
2037 struct srcu_struct *ssp;
2041 ssp = *(sspp++);
2042 if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_sup->srcu_gp_seq_needed)) &&
2043 !WARN_ON_ONCE(!ssp->srcu_sup->sda_is_static))
2044 cleanup_srcu_struct(ssp);
2045 if (!WARN_ON(srcu_readers_active(ssp)))
2046 free_percpu(ssp->sda);