xref: /linux/kernel/rcu/srcutree.c (revision e206f33e2c0774276a0497fe538472e12016a362)
1e7ee1501SPaul E. McKenney // SPDX-License-Identifier: GPL-2.0+
2dad81a20SPaul E. McKenney /*
3dad81a20SPaul E. McKenney  * Sleepable Read-Copy Update mechanism for mutual exclusion.
4dad81a20SPaul E. McKenney  *
5dad81a20SPaul E. McKenney  * Copyright (C) IBM Corporation, 2006
6dad81a20SPaul E. McKenney  * Copyright (C) Fujitsu, 2012
7dad81a20SPaul E. McKenney  *
865bb0dc4SSeongJae Park  * Authors: Paul McKenney <paulmck@linux.ibm.com>
9dad81a20SPaul E. McKenney  *	   Lai Jiangshan <laijs@cn.fujitsu.com>
10dad81a20SPaul E. McKenney  *
11dad81a20SPaul E. McKenney  * For detailed explanation of Read-Copy Update mechanism see -
12dad81a20SPaul E. McKenney  *		Documentation/RCU/ *.txt
13dad81a20SPaul E. McKenney  *
14dad81a20SPaul E. McKenney  */
15dad81a20SPaul E. McKenney 
16a7538352SJoe Perches #define pr_fmt(fmt) "rcu: " fmt
17a7538352SJoe Perches 
18dad81a20SPaul E. McKenney #include <linux/export.h>
19dad81a20SPaul E. McKenney #include <linux/mutex.h>
20dad81a20SPaul E. McKenney #include <linux/percpu.h>
21dad81a20SPaul E. McKenney #include <linux/preempt.h>
22dad81a20SPaul E. McKenney #include <linux/rcupdate_wait.h>
23dad81a20SPaul E. McKenney #include <linux/sched.h>
24dad81a20SPaul E. McKenney #include <linux/smp.h>
25dad81a20SPaul E. McKenney #include <linux/delay.h>
2622607d66SPaul E. McKenney #include <linux/module.h>
272ec30311SPaul E. McKenney #include <linux/slab.h>
28dad81a20SPaul E. McKenney #include <linux/srcu.h>
29dad81a20SPaul E. McKenney 
30dad81a20SPaul E. McKenney #include "rcu.h"
3145753c5fSIngo Molnar #include "rcu_segcblist.h"
32dad81a20SPaul E. McKenney 
330c8e0e3cSPaul E. McKenney /* Holdoff in nanoseconds for auto-expediting. */
340c8e0e3cSPaul E. McKenney #define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000)
350c8e0e3cSPaul E. McKenney static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF;
3622607d66SPaul E. McKenney module_param(exp_holdoff, ulong, 0444);
3722607d66SPaul E. McKenney 
38c350c008SPaul E. McKenney /* Overflow-check frequency.  N bits roughly says every 2**N grace periods. */
39c350c008SPaul E. McKenney static ulong counter_wrap_check = (ULONG_MAX >> 2);
40c350c008SPaul E. McKenney module_param(counter_wrap_check, ulong, 0444);
41c350c008SPaul E. McKenney 
42c69a00a1SPaul E. McKenney /*
43c69a00a1SPaul E. McKenney  * Control conversion to SRCU_SIZE_BIG:
44a57ffb3cSPaul E. McKenney  *    0: Don't convert at all.
45c69a00a1SPaul E. McKenney  *    1: Convert at init_srcu_struct() time.
46c69a00a1SPaul E. McKenney  *    2: Convert when rcutorture invokes srcu_torture_stats_print().
47a57ffb3cSPaul E. McKenney  *    3: Decide at boot time based on system shape (default).
489f2e91d9SPaul E. McKenney  * 0x1x: Convert when excessive contention encountered.
49c69a00a1SPaul E. McKenney  */
509f2e91d9SPaul E. McKenney #define SRCU_SIZING_NONE	0
519f2e91d9SPaul E. McKenney #define SRCU_SIZING_INIT	1
529f2e91d9SPaul E. McKenney #define SRCU_SIZING_TORTURE	2
539f2e91d9SPaul E. McKenney #define SRCU_SIZING_AUTO	3
549f2e91d9SPaul E. McKenney #define SRCU_SIZING_CONTEND	0x10
559f2e91d9SPaul E. McKenney #define SRCU_SIZING_IS(x) ((convert_to_big & ~SRCU_SIZING_CONTEND) == x)
569f2e91d9SPaul E. McKenney #define SRCU_SIZING_IS_NONE() (SRCU_SIZING_IS(SRCU_SIZING_NONE))
579f2e91d9SPaul E. McKenney #define SRCU_SIZING_IS_INIT() (SRCU_SIZING_IS(SRCU_SIZING_INIT))
589f2e91d9SPaul E. McKenney #define SRCU_SIZING_IS_TORTURE() (SRCU_SIZING_IS(SRCU_SIZING_TORTURE))
599f2e91d9SPaul E. McKenney #define SRCU_SIZING_IS_CONTEND() (convert_to_big & SRCU_SIZING_CONTEND)
60a57ffb3cSPaul E. McKenney static int convert_to_big = SRCU_SIZING_AUTO;
61c69a00a1SPaul E. McKenney module_param(convert_to_big, int, 0444);
62c69a00a1SPaul E. McKenney 
63a57ffb3cSPaul E. McKenney /* Number of CPUs to trigger init_srcu_struct()-time transition to big. */
64a57ffb3cSPaul E. McKenney static int big_cpu_lim __read_mostly = 128;
65a57ffb3cSPaul E. McKenney module_param(big_cpu_lim, int, 0444);
66a57ffb3cSPaul E. McKenney 
679f2e91d9SPaul E. McKenney /* Contention events per jiffy to initiate transition to big. */
689f2e91d9SPaul E. McKenney static int small_contention_lim __read_mostly = 100;
699f2e91d9SPaul E. McKenney module_param(small_contention_lim, int, 0444);
709f2e91d9SPaul E. McKenney 
71e0fcba9aSPaul E. McKenney /* Early-boot callback-management, so early that no lock is required! */
72e0fcba9aSPaul E. McKenney static LIST_HEAD(srcu_boot_list);
73e0fcba9aSPaul E. McKenney static bool __read_mostly srcu_init_done;
74e0fcba9aSPaul E. McKenney 
75da915ad5SPaul E. McKenney static void srcu_invoke_callbacks(struct work_struct *work);
76aacb5d91SPaul E. McKenney static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay);
770d8a1e83SPaul E. McKenney static void process_srcu(struct work_struct *work);
78e81baf4cSSebastian Andrzej Siewior static void srcu_delay_timer(struct timer_list *t);
79da915ad5SPaul E. McKenney 
80d6331980SPaul E. McKenney /* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
81d6331980SPaul E. McKenney #define spin_lock_rcu_node(p)							\
82d6331980SPaul E. McKenney do {										\
83d6331980SPaul E. McKenney 	spin_lock(&ACCESS_PRIVATE(p, lock));					\
84d6331980SPaul E. McKenney 	smp_mb__after_unlock_lock();						\
85d6331980SPaul E. McKenney } while (0)
86d6331980SPaul E. McKenney 
87d6331980SPaul E. McKenney #define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock))
88d6331980SPaul E. McKenney 
89d6331980SPaul E. McKenney #define spin_lock_irq_rcu_node(p)						\
90d6331980SPaul E. McKenney do {										\
91d6331980SPaul E. McKenney 	spin_lock_irq(&ACCESS_PRIVATE(p, lock));				\
92d6331980SPaul E. McKenney 	smp_mb__after_unlock_lock();						\
93d6331980SPaul E. McKenney } while (0)
94d6331980SPaul E. McKenney 
95d6331980SPaul E. McKenney #define spin_unlock_irq_rcu_node(p)						\
96d6331980SPaul E. McKenney 	spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
97d6331980SPaul E. McKenney 
98d6331980SPaul E. McKenney #define spin_lock_irqsave_rcu_node(p, flags)					\
99d6331980SPaul E. McKenney do {										\
100d6331980SPaul E. McKenney 	spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags);			\
101d6331980SPaul E. McKenney 	smp_mb__after_unlock_lock();						\
102d6331980SPaul E. McKenney } while (0)
103d6331980SPaul E. McKenney 
1049f2e91d9SPaul E. McKenney #define spin_trylock_irqsave_rcu_node(p, flags)					\
1059f2e91d9SPaul E. McKenney ({										\
1069f2e91d9SPaul E. McKenney 	bool ___locked = spin_trylock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
1079f2e91d9SPaul E. McKenney 										\
1089f2e91d9SPaul E. McKenney 	if (___locked)								\
1099f2e91d9SPaul E. McKenney 		smp_mb__after_unlock_lock();					\
1109f2e91d9SPaul E. McKenney 	___locked;								\
1119f2e91d9SPaul E. McKenney })
1129f2e91d9SPaul E. McKenney 
113d6331980SPaul E. McKenney #define spin_unlock_irqrestore_rcu_node(p, flags)				\
114d6331980SPaul E. McKenney 	spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags)			\
115d6331980SPaul E. McKenney 
116da915ad5SPaul E. McKenney /*
1172ec30311SPaul E. McKenney  * Initialize SRCU per-CPU data.  Note that statically allocated
118da915ad5SPaul E. McKenney  * srcu_struct structures might already have srcu_read_lock() and
119da915ad5SPaul E. McKenney  * srcu_read_unlock() running against them.  So if the is_static parameter
120da915ad5SPaul E. McKenney  * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
121da915ad5SPaul E. McKenney  */
1222ec30311SPaul E. McKenney static void init_srcu_struct_data(struct srcu_struct *ssp)
1232ec30311SPaul E. McKenney {
1242ec30311SPaul E. McKenney 	int cpu;
1252ec30311SPaul E. McKenney 	struct srcu_data *sdp;
1262ec30311SPaul E. McKenney 
1272ec30311SPaul E. McKenney 	/*
1282ec30311SPaul E. McKenney 	 * Initialize the per-CPU srcu_data array, which feeds into the
1292ec30311SPaul E. McKenney 	 * leaves of the srcu_node tree.
1302ec30311SPaul E. McKenney 	 */
1312ec30311SPaul E. McKenney 	WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
1322ec30311SPaul E. McKenney 		     ARRAY_SIZE(sdp->srcu_unlock_count));
1332ec30311SPaul E. McKenney 	for_each_possible_cpu(cpu) {
1342ec30311SPaul E. McKenney 		sdp = per_cpu_ptr(ssp->sda, cpu);
1352ec30311SPaul E. McKenney 		spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
1362ec30311SPaul E. McKenney 		rcu_segcblist_init(&sdp->srcu_cblist);
1372ec30311SPaul E. McKenney 		sdp->srcu_cblist_invoking = false;
13803200b5cSPaul E. McKenney 		sdp->srcu_gp_seq_needed = ssp->srcu_sup->srcu_gp_seq;
13903200b5cSPaul E. McKenney 		sdp->srcu_gp_seq_needed_exp = ssp->srcu_sup->srcu_gp_seq;
1402ec30311SPaul E. McKenney 		sdp->mynode = NULL;
1412ec30311SPaul E. McKenney 		sdp->cpu = cpu;
1422ec30311SPaul E. McKenney 		INIT_WORK(&sdp->work, srcu_invoke_callbacks);
1432ec30311SPaul E. McKenney 		timer_setup(&sdp->delay_work, srcu_delay_timer, 0);
1442ec30311SPaul E. McKenney 		sdp->ssp = ssp;
1452ec30311SPaul E. McKenney 	}
1462ec30311SPaul E. McKenney }
1472ec30311SPaul E. McKenney 
148cbdc98e9SPaul E. McKenney /* Invalid seq state, used during snp node initialization */
149cbdc98e9SPaul E. McKenney #define SRCU_SNP_INIT_SEQ		0x2
150cbdc98e9SPaul E. McKenney 
151cbdc98e9SPaul E. McKenney /*
152cbdc98e9SPaul E. McKenney  * Check whether sequence number corresponding to snp node,
153cbdc98e9SPaul E. McKenney  * is invalid.
154cbdc98e9SPaul E. McKenney  */
155cbdc98e9SPaul E. McKenney static inline bool srcu_invl_snp_seq(unsigned long s)
156cbdc98e9SPaul E. McKenney {
15750be0c04SPingfan Liu 	return s == SRCU_SNP_INIT_SEQ;
158cbdc98e9SPaul E. McKenney }
159cbdc98e9SPaul E. McKenney 
1602ec30311SPaul E. McKenney /*
1612ec30311SPaul E. McKenney  * Allocated and initialize SRCU combining tree.  Returns @true if
1622ec30311SPaul E. McKenney  * allocation succeeded and @false otherwise.
1632ec30311SPaul E. McKenney  */
164c69a00a1SPaul E. McKenney static bool init_srcu_struct_nodes(struct srcu_struct *ssp, gfp_t gfp_flags)
165dad81a20SPaul E. McKenney {
166da915ad5SPaul E. McKenney 	int cpu;
167da915ad5SPaul E. McKenney 	int i;
168da915ad5SPaul E. McKenney 	int level = 0;
169da915ad5SPaul E. McKenney 	int levelspread[RCU_NUM_LVLS];
170da915ad5SPaul E. McKenney 	struct srcu_data *sdp;
171da915ad5SPaul E. McKenney 	struct srcu_node *snp;
172da915ad5SPaul E. McKenney 	struct srcu_node *snp_first;
173da915ad5SPaul E. McKenney 
174b5befe84SFrederic Weisbecker 	/* Initialize geometry if it has not already been initialized. */
175b5befe84SFrederic Weisbecker 	rcu_init_geometry();
17695433f72SPaul E. McKenney 	ssp->srcu_sup->node = kcalloc(rcu_num_nodes, sizeof(*ssp->srcu_sup->node), gfp_flags);
17795433f72SPaul E. McKenney 	if (!ssp->srcu_sup->node)
1782ec30311SPaul E. McKenney 		return false;
179b5befe84SFrederic Weisbecker 
180da915ad5SPaul E. McKenney 	/* Work out the overall tree geometry. */
181208f41b1SPaul E. McKenney 	ssp->srcu_sup->level[0] = &ssp->srcu_sup->node[0];
182da915ad5SPaul E. McKenney 	for (i = 1; i < rcu_num_lvls; i++)
183208f41b1SPaul E. McKenney 		ssp->srcu_sup->level[i] = ssp->srcu_sup->level[i - 1] + num_rcu_lvl[i - 1];
184da915ad5SPaul E. McKenney 	rcu_init_levelspread(levelspread, num_rcu_lvl);
185da915ad5SPaul E. McKenney 
186da915ad5SPaul E. McKenney 	/* Each pass through this loop initializes one srcu_node structure. */
187aacb5d91SPaul E. McKenney 	srcu_for_each_node_breadth_first(ssp, snp) {
188d6331980SPaul E. McKenney 		spin_lock_init(&ACCESS_PRIVATE(snp, lock));
189c7e88067SPaul E. McKenney 		WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
190c7e88067SPaul E. McKenney 			     ARRAY_SIZE(snp->srcu_data_have_cbs));
191c7e88067SPaul E. McKenney 		for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
192cbdc98e9SPaul E. McKenney 			snp->srcu_have_cbs[i] = SRCU_SNP_INIT_SEQ;
193c7e88067SPaul E. McKenney 			snp->srcu_data_have_cbs[i] = 0;
194c7e88067SPaul E. McKenney 		}
195cbdc98e9SPaul E. McKenney 		snp->srcu_gp_seq_needed_exp = SRCU_SNP_INIT_SEQ;
196da915ad5SPaul E. McKenney 		snp->grplo = -1;
197da915ad5SPaul E. McKenney 		snp->grphi = -1;
19895433f72SPaul E. McKenney 		if (snp == &ssp->srcu_sup->node[0]) {
199da915ad5SPaul E. McKenney 			/* Root node, special case. */
200da915ad5SPaul E. McKenney 			snp->srcu_parent = NULL;
201da915ad5SPaul E. McKenney 			continue;
202da915ad5SPaul E. McKenney 		}
203da915ad5SPaul E. McKenney 
204da915ad5SPaul E. McKenney 		/* Non-root node. */
205208f41b1SPaul E. McKenney 		if (snp == ssp->srcu_sup->level[level + 1])
206da915ad5SPaul E. McKenney 			level++;
207208f41b1SPaul E. McKenney 		snp->srcu_parent = ssp->srcu_sup->level[level - 1] +
208208f41b1SPaul E. McKenney 				   (snp - ssp->srcu_sup->level[level]) /
209da915ad5SPaul E. McKenney 				   levelspread[level - 1];
210da915ad5SPaul E. McKenney 	}
211da915ad5SPaul E. McKenney 
212da915ad5SPaul E. McKenney 	/*
213da915ad5SPaul E. McKenney 	 * Initialize the per-CPU srcu_data array, which feeds into the
214da915ad5SPaul E. McKenney 	 * leaves of the srcu_node tree.
215da915ad5SPaul E. McKenney 	 */
216da915ad5SPaul E. McKenney 	level = rcu_num_lvls - 1;
217208f41b1SPaul E. McKenney 	snp_first = ssp->srcu_sup->level[level];
218da915ad5SPaul E. McKenney 	for_each_possible_cpu(cpu) {
219aacb5d91SPaul E. McKenney 		sdp = per_cpu_ptr(ssp->sda, cpu);
220da915ad5SPaul E. McKenney 		sdp->mynode = &snp_first[cpu / levelspread[level]];
221da915ad5SPaul E. McKenney 		for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
222da915ad5SPaul E. McKenney 			if (snp->grplo < 0)
223da915ad5SPaul E. McKenney 				snp->grplo = cpu;
224da915ad5SPaul E. McKenney 			snp->grphi = cpu;
225da915ad5SPaul E. McKenney 		}
226d8d5b7bfSDenis Arefev 		sdp->grpmask = 1UL << (cpu - sdp->mynode->grplo);
227da915ad5SPaul E. McKenney 	}
228a0d8cbd3SPaul E. McKenney 	smp_store_release(&ssp->srcu_sup->srcu_size_state, SRCU_SIZE_WAIT_BARRIER);
2292ec30311SPaul E. McKenney 	return true;
230da915ad5SPaul E. McKenney }
231da915ad5SPaul E. McKenney 
232da915ad5SPaul E. McKenney /*
233da915ad5SPaul E. McKenney  * Initialize non-compile-time initialized fields, including the
234994f7068SPaul E. McKenney  * associated srcu_node and srcu_data structures.  The is_static parameter
235994f7068SPaul E. McKenney  * tells us that ->sda has already been wired up to srcu_data.
236da915ad5SPaul E. McKenney  */
237aacb5d91SPaul E. McKenney static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
238da915ad5SPaul E. McKenney {
23995433f72SPaul E. McKenney 	if (!is_static)
24095433f72SPaul E. McKenney 		ssp->srcu_sup = kzalloc(sizeof(*ssp->srcu_sup), GFP_KERNEL);
24195433f72SPaul E. McKenney 	if (!ssp->srcu_sup)
24295433f72SPaul E. McKenney 		return -ENOMEM;
2430839ade9SPaul E. McKenney 	if (!is_static)
244b3fb11f7SPaul E. McKenney 		spin_lock_init(&ACCESS_PRIVATE(ssp->srcu_sup, lock));
245a0d8cbd3SPaul E. McKenney 	ssp->srcu_sup->srcu_size_state = SRCU_SIZE_SMALL;
24695433f72SPaul E. McKenney 	ssp->srcu_sup->node = NULL;
247574dc1a7SPaul E. McKenney 	mutex_init(&ssp->srcu_sup->srcu_cb_mutex);
248e3a6ab25SPaul E. McKenney 	mutex_init(&ssp->srcu_sup->srcu_gp_mutex);
249aacb5d91SPaul E. McKenney 	ssp->srcu_idx = 0;
25003200b5cSPaul E. McKenney 	ssp->srcu_sup->srcu_gp_seq = 0;
251d20162e0SPaul E. McKenney 	ssp->srcu_sup->srcu_barrier_seq = 0;
252d20162e0SPaul E. McKenney 	mutex_init(&ssp->srcu_sup->srcu_barrier_mutex);
253d20162e0SPaul E. McKenney 	atomic_set(&ssp->srcu_sup->srcu_barrier_cpu_cnt, 0);
254fd1b3f8eSPaul E. McKenney 	INIT_DELAYED_WORK(&ssp->srcu_sup->work, process_srcu);
255660349acSPaul E. McKenney 	ssp->srcu_sup->sda_is_static = is_static;
256da915ad5SPaul E. McKenney 	if (!is_static)
257aacb5d91SPaul E. McKenney 		ssp->sda = alloc_percpu(struct srcu_data);
258f0a31b26SJoel Fernandes (Google) 	if (!ssp->sda)
259f0a31b26SJoel Fernandes (Google) 		goto err_free_sup;
2602ec30311SPaul E. McKenney 	init_srcu_struct_data(ssp);
26103200b5cSPaul E. McKenney 	ssp->srcu_sup->srcu_gp_seq_needed_exp = 0;
26203200b5cSPaul E. McKenney 	ssp->srcu_sup->srcu_last_gp_end = ktime_get_mono_fast_ns();
263a0d8cbd3SPaul E. McKenney 	if (READ_ONCE(ssp->srcu_sup->srcu_size_state) == SRCU_SIZE_SMALL && SRCU_SIZING_IS_INIT()) {
264f0a31b26SJoel Fernandes (Google) 		if (!init_srcu_struct_nodes(ssp, GFP_ATOMIC))
265f0a31b26SJoel Fernandes (Google) 			goto err_free_sda;
266a0d8cbd3SPaul E. McKenney 		WRITE_ONCE(ssp->srcu_sup->srcu_size_state, SRCU_SIZE_BIG);
2672ec30311SPaul E. McKenney 	}
268fd1b3f8eSPaul E. McKenney 	ssp->srcu_sup->srcu_ssp = ssp;
26903200b5cSPaul E. McKenney 	smp_store_release(&ssp->srcu_sup->srcu_gp_seq_needed, 0); /* Init done. */
27050edb988SPaul E. McKenney 	return 0;
271f0a31b26SJoel Fernandes (Google) 
272f0a31b26SJoel Fernandes (Google) err_free_sda:
273f0a31b26SJoel Fernandes (Google) 	if (!is_static) {
274f0a31b26SJoel Fernandes (Google) 		free_percpu(ssp->sda);
275f0a31b26SJoel Fernandes (Google) 		ssp->sda = NULL;
276f0a31b26SJoel Fernandes (Google) 	}
277f0a31b26SJoel Fernandes (Google) err_free_sup:
278f0a31b26SJoel Fernandes (Google) 	if (!is_static) {
279f0a31b26SJoel Fernandes (Google) 		kfree(ssp->srcu_sup);
280f0a31b26SJoel Fernandes (Google) 		ssp->srcu_sup = NULL;
281f0a31b26SJoel Fernandes (Google) 	}
282f0a31b26SJoel Fernandes (Google) 	return -ENOMEM;
283dad81a20SPaul E. McKenney }
284dad81a20SPaul E. McKenney 
285dad81a20SPaul E. McKenney #ifdef CONFIG_DEBUG_LOCK_ALLOC
286dad81a20SPaul E. McKenney 
287aacb5d91SPaul E. McKenney int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
288dad81a20SPaul E. McKenney 		       struct lock_class_key *key)
289dad81a20SPaul E. McKenney {
290dad81a20SPaul E. McKenney 	/* Don't re-initialize a lock while it is held. */
291aacb5d91SPaul E. McKenney 	debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
292aacb5d91SPaul E. McKenney 	lockdep_init_map(&ssp->dep_map, name, key, 0);
293aacb5d91SPaul E. McKenney 	return init_srcu_struct_fields(ssp, false);
294dad81a20SPaul E. McKenney }
295dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(__init_srcu_struct);
296dad81a20SPaul E. McKenney 
297dad81a20SPaul E. McKenney #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
298dad81a20SPaul E. McKenney 
299dad81a20SPaul E. McKenney /**
300dad81a20SPaul E. McKenney  * init_srcu_struct - initialize a sleep-RCU structure
301aacb5d91SPaul E. McKenney  * @ssp: structure to initialize.
302dad81a20SPaul E. McKenney  *
303dad81a20SPaul E. McKenney  * Must invoke this on a given srcu_struct before passing that srcu_struct
304dad81a20SPaul E. McKenney  * to any other function.  Each srcu_struct represents a separate domain
305dad81a20SPaul E. McKenney  * of SRCU protection.
306dad81a20SPaul E. McKenney  */
307aacb5d91SPaul E. McKenney int init_srcu_struct(struct srcu_struct *ssp)
308dad81a20SPaul E. McKenney {
309aacb5d91SPaul E. McKenney 	return init_srcu_struct_fields(ssp, false);
310dad81a20SPaul E. McKenney }
311dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(init_srcu_struct);
312dad81a20SPaul E. McKenney 
313dad81a20SPaul E. McKenney #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
314dad81a20SPaul E. McKenney 
315dad81a20SPaul E. McKenney /*
3169f2e91d9SPaul E. McKenney  * Initiate a transition to SRCU_SIZE_BIG with lock held.
3179f2e91d9SPaul E. McKenney  */
3189f2e91d9SPaul E. McKenney static void __srcu_transition_to_big(struct srcu_struct *ssp)
3199f2e91d9SPaul E. McKenney {
320b3fb11f7SPaul E. McKenney 	lockdep_assert_held(&ACCESS_PRIVATE(ssp->srcu_sup, lock));
321a0d8cbd3SPaul E. McKenney 	smp_store_release(&ssp->srcu_sup->srcu_size_state, SRCU_SIZE_ALLOC);
3229f2e91d9SPaul E. McKenney }
3239f2e91d9SPaul E. McKenney 
3249f2e91d9SPaul E. McKenney /*
32599659f64SPaul E. McKenney  * Initiate an idempotent transition to SRCU_SIZE_BIG.
32699659f64SPaul E. McKenney  */
32799659f64SPaul E. McKenney static void srcu_transition_to_big(struct srcu_struct *ssp)
32899659f64SPaul E. McKenney {
32999659f64SPaul E. McKenney 	unsigned long flags;
33099659f64SPaul E. McKenney 
33199659f64SPaul E. McKenney 	/* Double-checked locking on ->srcu_size-state. */
332a0d8cbd3SPaul E. McKenney 	if (smp_load_acquire(&ssp->srcu_sup->srcu_size_state) != SRCU_SIZE_SMALL)
33399659f64SPaul E. McKenney 		return;
334b3fb11f7SPaul E. McKenney 	spin_lock_irqsave_rcu_node(ssp->srcu_sup, flags);
335a0d8cbd3SPaul E. McKenney 	if (smp_load_acquire(&ssp->srcu_sup->srcu_size_state) != SRCU_SIZE_SMALL) {
336b3fb11f7SPaul E. McKenney 		spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags);
33799659f64SPaul E. McKenney 		return;
33899659f64SPaul E. McKenney 	}
3399f2e91d9SPaul E. McKenney 	__srcu_transition_to_big(ssp);
340b3fb11f7SPaul E. McKenney 	spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags);
34199659f64SPaul E. McKenney }
34299659f64SPaul E. McKenney 
34399659f64SPaul E. McKenney /*
344c2445d38SPaul E. McKenney  * Check to see if the just-encountered contention event justifies
345c2445d38SPaul E. McKenney  * a transition to SRCU_SIZE_BIG.
3469f2e91d9SPaul E. McKenney  */
347c2445d38SPaul E. McKenney static void spin_lock_irqsave_check_contention(struct srcu_struct *ssp)
3489f2e91d9SPaul E. McKenney {
3499f2e91d9SPaul E. McKenney 	unsigned long j;
3509f2e91d9SPaul E. McKenney 
351a0d8cbd3SPaul E. McKenney 	if (!SRCU_SIZING_IS_CONTEND() || ssp->srcu_sup->srcu_size_state)
3529f2e91d9SPaul E. McKenney 		return;
3539f2e91d9SPaul E. McKenney 	j = jiffies;
3543b46679cSPaul E. McKenney 	if (ssp->srcu_sup->srcu_size_jiffies != j) {
3553b46679cSPaul E. McKenney 		ssp->srcu_sup->srcu_size_jiffies = j;
3563b46679cSPaul E. McKenney 		ssp->srcu_sup->srcu_n_lock_retries = 0;
3579f2e91d9SPaul E. McKenney 	}
3583b46679cSPaul E. McKenney 	if (++ssp->srcu_sup->srcu_n_lock_retries <= small_contention_lim)
3599f2e91d9SPaul E. McKenney 		return;
3609f2e91d9SPaul E. McKenney 	__srcu_transition_to_big(ssp);
3619f2e91d9SPaul E. McKenney }
3629f2e91d9SPaul E. McKenney 
3639f2e91d9SPaul E. McKenney /*
364c2445d38SPaul E. McKenney  * Acquire the specified srcu_data structure's ->lock, but check for
365c2445d38SPaul E. McKenney  * excessive contention, which results in initiation of a transition
366c2445d38SPaul E. McKenney  * to SRCU_SIZE_BIG.  But only if the srcutree.convert_to_big module
367c2445d38SPaul E. McKenney  * parameter permits this.
368c2445d38SPaul E. McKenney  */
369c2445d38SPaul E. McKenney static void spin_lock_irqsave_sdp_contention(struct srcu_data *sdp, unsigned long *flags)
370c2445d38SPaul E. McKenney {
371c2445d38SPaul E. McKenney 	struct srcu_struct *ssp = sdp->ssp;
372c2445d38SPaul E. McKenney 
373c2445d38SPaul E. McKenney 	if (spin_trylock_irqsave_rcu_node(sdp, *flags))
374c2445d38SPaul E. McKenney 		return;
375b3fb11f7SPaul E. McKenney 	spin_lock_irqsave_rcu_node(ssp->srcu_sup, *flags);
376c2445d38SPaul E. McKenney 	spin_lock_irqsave_check_contention(ssp);
377b3fb11f7SPaul E. McKenney 	spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, *flags);
378c2445d38SPaul E. McKenney 	spin_lock_irqsave_rcu_node(sdp, *flags);
379c2445d38SPaul E. McKenney }
380c2445d38SPaul E. McKenney 
381c2445d38SPaul E. McKenney /*
382c2445d38SPaul E. McKenney  * Acquire the specified srcu_struct structure's ->lock, but check for
383c2445d38SPaul E. McKenney  * excessive contention, which results in initiation of a transition
384c2445d38SPaul E. McKenney  * to SRCU_SIZE_BIG.  But only if the srcutree.convert_to_big module
385c2445d38SPaul E. McKenney  * parameter permits this.
386c2445d38SPaul E. McKenney  */
387c2445d38SPaul E. McKenney static void spin_lock_irqsave_ssp_contention(struct srcu_struct *ssp, unsigned long *flags)
388c2445d38SPaul E. McKenney {
389b3fb11f7SPaul E. McKenney 	if (spin_trylock_irqsave_rcu_node(ssp->srcu_sup, *flags))
390c2445d38SPaul E. McKenney 		return;
391b3fb11f7SPaul E. McKenney 	spin_lock_irqsave_rcu_node(ssp->srcu_sup, *flags);
392c2445d38SPaul E. McKenney 	spin_lock_irqsave_check_contention(ssp);
393c2445d38SPaul E. McKenney }
394c2445d38SPaul E. McKenney 
395c2445d38SPaul E. McKenney /*
396da915ad5SPaul E. McKenney  * First-use initialization of statically allocated srcu_struct
397da915ad5SPaul E. McKenney  * structure.  Wiring up the combining tree is more than can be
398da915ad5SPaul E. McKenney  * done with compile-time initialization, so this check is added
399aacb5d91SPaul E. McKenney  * to each update-side SRCU primitive.  Use ssp->lock, which -is-
400da915ad5SPaul E. McKenney  * compile-time initialized, to resolve races involving multiple
401da915ad5SPaul E. McKenney  * CPUs trying to garner first-use privileges.
402da915ad5SPaul E. McKenney  */
403aacb5d91SPaul E. McKenney static void check_init_srcu_struct(struct srcu_struct *ssp)
404da915ad5SPaul E. McKenney {
405da915ad5SPaul E. McKenney 	unsigned long flags;
406da915ad5SPaul E. McKenney 
407da915ad5SPaul E. McKenney 	/* The smp_load_acquire() pairs with the smp_store_release(). */
40803200b5cSPaul E. McKenney 	if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_sup->srcu_gp_seq_needed))) /*^^^*/
409da915ad5SPaul E. McKenney 		return; /* Already initialized. */
410b3fb11f7SPaul E. McKenney 	spin_lock_irqsave_rcu_node(ssp->srcu_sup, flags);
41103200b5cSPaul E. McKenney 	if (!rcu_seq_state(ssp->srcu_sup->srcu_gp_seq_needed)) {
412b3fb11f7SPaul E. McKenney 		spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags);
413da915ad5SPaul E. McKenney 		return;
414da915ad5SPaul E. McKenney 	}
415aacb5d91SPaul E. McKenney 	init_srcu_struct_fields(ssp, true);
416b3fb11f7SPaul E. McKenney 	spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags);
417da915ad5SPaul E. McKenney }
418da915ad5SPaul E. McKenney 
419da915ad5SPaul E. McKenney /*
420da915ad5SPaul E. McKenney  * Returns approximate total of the readers' ->srcu_lock_count[] values
421da915ad5SPaul E. McKenney  * for the rank of per-CPU counters specified by idx.
422dad81a20SPaul E. McKenney  */
423aacb5d91SPaul E. McKenney static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx)
424dad81a20SPaul E. McKenney {
425dad81a20SPaul E. McKenney 	int cpu;
426dad81a20SPaul E. McKenney 	unsigned long sum = 0;
427dad81a20SPaul E. McKenney 
428dad81a20SPaul E. McKenney 	for_each_possible_cpu(cpu) {
429aacb5d91SPaul E. McKenney 		struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
430dad81a20SPaul E. McKenney 
4315d0f5953SPaul E. McKenney 		sum += atomic_long_read(&cpuc->srcu_lock_count[idx]);
432dad81a20SPaul E. McKenney 	}
433dad81a20SPaul E. McKenney 	return sum;
434dad81a20SPaul E. McKenney }
435dad81a20SPaul E. McKenney 
436dad81a20SPaul E. McKenney /*
437da915ad5SPaul E. McKenney  * Returns approximate total of the readers' ->srcu_unlock_count[] values
438da915ad5SPaul E. McKenney  * for the rank of per-CPU counters specified by idx.
439dad81a20SPaul E. McKenney  */
440aacb5d91SPaul E. McKenney static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx)
441dad81a20SPaul E. McKenney {
442dad81a20SPaul E. McKenney 	int cpu;
44336f65f1dSPaul E. McKenney 	unsigned long mask = 0;
444dad81a20SPaul E. McKenney 	unsigned long sum = 0;
445dad81a20SPaul E. McKenney 
446dad81a20SPaul E. McKenney 	for_each_possible_cpu(cpu) {
447aacb5d91SPaul E. McKenney 		struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
448dad81a20SPaul E. McKenney 
4495d0f5953SPaul E. McKenney 		sum += atomic_long_read(&cpuc->srcu_unlock_count[idx]);
45036f65f1dSPaul E. McKenney 		if (IS_ENABLED(CONFIG_PROVE_RCU))
45136f65f1dSPaul E. McKenney 			mask = mask | READ_ONCE(cpuc->srcu_nmi_safety);
452dad81a20SPaul E. McKenney 	}
45336f65f1dSPaul E. McKenney 	WARN_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && (mask & (mask >> 1)),
45436f65f1dSPaul E. McKenney 		  "Mixed NMI-safe readers for srcu_struct at %ps.\n", ssp);
455dad81a20SPaul E. McKenney 	return sum;
456dad81a20SPaul E. McKenney }
457dad81a20SPaul E. McKenney 
458dad81a20SPaul E. McKenney /*
459dad81a20SPaul E. McKenney  * Return true if the number of pre-existing readers is determined to
460dad81a20SPaul E. McKenney  * be zero.
461dad81a20SPaul E. McKenney  */
462aacb5d91SPaul E. McKenney static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx)
463dad81a20SPaul E. McKenney {
464dad81a20SPaul E. McKenney 	unsigned long unlocks;
465dad81a20SPaul E. McKenney 
466aacb5d91SPaul E. McKenney 	unlocks = srcu_readers_unlock_idx(ssp, idx);
467dad81a20SPaul E. McKenney 
468dad81a20SPaul E. McKenney 	/*
469dad81a20SPaul E. McKenney 	 * Make sure that a lock is always counted if the corresponding
470dad81a20SPaul E. McKenney 	 * unlock is counted. Needs to be a smp_mb() as the read side may
471dad81a20SPaul E. McKenney 	 * contain a read from a variable that is written to before the
472dad81a20SPaul E. McKenney 	 * synchronize_srcu() in the write side. In this case smp_mb()s
473dad81a20SPaul E. McKenney 	 * A and B act like the store buffering pattern.
474dad81a20SPaul E. McKenney 	 *
475dad81a20SPaul E. McKenney 	 * This smp_mb() also pairs with smp_mb() C to prevent accesses
476dad81a20SPaul E. McKenney 	 * after the synchronize_srcu() from being executed before the
477dad81a20SPaul E. McKenney 	 * grace period ends.
478dad81a20SPaul E. McKenney 	 */
479dad81a20SPaul E. McKenney 	smp_mb(); /* A */
480dad81a20SPaul E. McKenney 
481dad81a20SPaul E. McKenney 	/*
482dad81a20SPaul E. McKenney 	 * If the locks are the same as the unlocks, then there must have
4830cd4b50bSPaul E. McKenney 	 * been no readers on this index at some point in this function.
4840cd4b50bSPaul E. McKenney 	 * But there might be more readers, as a task might have read
4850cd4b50bSPaul E. McKenney 	 * the current ->srcu_idx but not yet have incremented its CPU's
4860cd4b50bSPaul E. McKenney 	 * ->srcu_lock_count[idx] counter.  In fact, it is possible
4870cd4b50bSPaul E. McKenney 	 * that most of the tasks have been preempted between fetching
4880cd4b50bSPaul E. McKenney 	 * ->srcu_idx and incrementing ->srcu_lock_count[idx].  And there
4890cd4b50bSPaul E. McKenney 	 * could be almost (ULONG_MAX / sizeof(struct task_struct)) tasks
4900cd4b50bSPaul E. McKenney 	 * in a system whose address space was fully populated with memory.
4910cd4b50bSPaul E. McKenney 	 * Call this quantity Nt.
492dad81a20SPaul E. McKenney 	 *
4930cd4b50bSPaul E. McKenney 	 * So suppose that the updater is preempted at this point in the
4940cd4b50bSPaul E. McKenney 	 * code for a long time.  That now-preempted updater has already
4950cd4b50bSPaul E. McKenney 	 * flipped ->srcu_idx (possibly during the preceding grace period),
4960cd4b50bSPaul E. McKenney 	 * done an smp_mb() (again, possibly during the preceding grace
4970cd4b50bSPaul E. McKenney 	 * period), and summed up the ->srcu_unlock_count[idx] counters.
4980cd4b50bSPaul E. McKenney 	 * How many times can a given one of the aforementioned Nt tasks
4990cd4b50bSPaul E. McKenney 	 * increment the old ->srcu_idx value's ->srcu_lock_count[idx]
5000cd4b50bSPaul E. McKenney 	 * counter, in the absence of nesting?
501881ec9d2SPaul E. McKenney 	 *
5020cd4b50bSPaul E. McKenney 	 * It can clearly do so once, given that it has already fetched
5030cd4b50bSPaul E. McKenney 	 * the old value of ->srcu_idx and is just about to use that value
5040cd4b50bSPaul E. McKenney 	 * to index its increment of ->srcu_lock_count[idx].  But as soon as
5050cd4b50bSPaul E. McKenney 	 * it leaves that SRCU read-side critical section, it will increment
5060cd4b50bSPaul E. McKenney 	 * ->srcu_unlock_count[idx], which must follow the updater's above
5070cd4b50bSPaul E. McKenney 	 * read from that same value.  Thus, as soon the reading task does
5080cd4b50bSPaul E. McKenney 	 * an smp_mb() and a later fetch from ->srcu_idx, that task will be
5090cd4b50bSPaul E. McKenney 	 * guaranteed to get the new index.  Except that the increment of
5100cd4b50bSPaul E. McKenney 	 * ->srcu_unlock_count[idx] in __srcu_read_unlock() is after the
5110cd4b50bSPaul E. McKenney 	 * smp_mb(), and the fetch from ->srcu_idx in __srcu_read_lock()
5120cd4b50bSPaul E. McKenney 	 * is before the smp_mb().  Thus, that task might not see the new
5130cd4b50bSPaul E. McKenney 	 * value of ->srcu_idx until the -second- __srcu_read_lock(),
5140cd4b50bSPaul E. McKenney 	 * which in turn means that this task might well increment
5150cd4b50bSPaul E. McKenney 	 * ->srcu_lock_count[idx] for the old value of ->srcu_idx twice,
5160cd4b50bSPaul E. McKenney 	 * not just once.
5170cd4b50bSPaul E. McKenney 	 *
5180cd4b50bSPaul E. McKenney 	 * However, it is important to note that a given smp_mb() takes
5190cd4b50bSPaul E. McKenney 	 * effect not just for the task executing it, but also for any
5200cd4b50bSPaul E. McKenney 	 * later task running on that same CPU.
5210cd4b50bSPaul E. McKenney 	 *
5220cd4b50bSPaul E. McKenney 	 * That is, there can be almost Nt + Nc further increments of
5230cd4b50bSPaul E. McKenney 	 * ->srcu_lock_count[idx] for the old index, where Nc is the number
5240cd4b50bSPaul E. McKenney 	 * of CPUs.  But this is OK because the size of the task_struct
5250cd4b50bSPaul E. McKenney 	 * structure limits the value of Nt and current systems limit Nc
5260cd4b50bSPaul E. McKenney 	 * to a few thousand.
5270cd4b50bSPaul E. McKenney 	 *
5280cd4b50bSPaul E. McKenney 	 * OK, but what about nesting?  This does impose a limit on
5290cd4b50bSPaul E. McKenney 	 * nesting of half of the size of the task_struct structure
5300cd4b50bSPaul E. McKenney 	 * (measured in bytes), which should be sufficient.  A late 2022
5310cd4b50bSPaul E. McKenney 	 * TREE01 rcutorture run reported this size to be no less than
5320cd4b50bSPaul E. McKenney 	 * 9408 bytes, allowing up to 4704 levels of nesting, which is
5330cd4b50bSPaul E. McKenney 	 * comfortably beyond excessive.  Especially on 64-bit systems,
5340cd4b50bSPaul E. McKenney 	 * which are unlikely to be configured with an address space fully
5350cd4b50bSPaul E. McKenney 	 * populated with memory, at least not anytime soon.
536dad81a20SPaul E. McKenney 	 */
537aacb5d91SPaul E. McKenney 	return srcu_readers_lock_idx(ssp, idx) == unlocks;
538dad81a20SPaul E. McKenney }
539dad81a20SPaul E. McKenney 
540dad81a20SPaul E. McKenney /**
541dad81a20SPaul E. McKenney  * srcu_readers_active - returns true if there are readers. and false
542dad81a20SPaul E. McKenney  *                       otherwise
543aacb5d91SPaul E. McKenney  * @ssp: which srcu_struct to count active readers (holding srcu_read_lock).
544dad81a20SPaul E. McKenney  *
545dad81a20SPaul E. McKenney  * Note that this is not an atomic primitive, and can therefore suffer
546dad81a20SPaul E. McKenney  * severe errors when invoked on an active srcu_struct.  That said, it
547dad81a20SPaul E. McKenney  * can be useful as an error check at cleanup time.
548dad81a20SPaul E. McKenney  */
549aacb5d91SPaul E. McKenney static bool srcu_readers_active(struct srcu_struct *ssp)
550dad81a20SPaul E. McKenney {
551dad81a20SPaul E. McKenney 	int cpu;
552dad81a20SPaul E. McKenney 	unsigned long sum = 0;
553dad81a20SPaul E. McKenney 
554dad81a20SPaul E. McKenney 	for_each_possible_cpu(cpu) {
555aacb5d91SPaul E. McKenney 		struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
556dad81a20SPaul E. McKenney 
5575d0f5953SPaul E. McKenney 		sum += atomic_long_read(&cpuc->srcu_lock_count[0]);
5585d0f5953SPaul E. McKenney 		sum += atomic_long_read(&cpuc->srcu_lock_count[1]);
5595d0f5953SPaul E. McKenney 		sum -= atomic_long_read(&cpuc->srcu_unlock_count[0]);
5605d0f5953SPaul E. McKenney 		sum -= atomic_long_read(&cpuc->srcu_unlock_count[1]);
561dad81a20SPaul E. McKenney 	}
562dad81a20SPaul E. McKenney 	return sum;
563dad81a20SPaul E. McKenney }
564dad81a20SPaul E. McKenney 
5654f2bfd94SNeeraj Upadhyay /*
5664f2bfd94SNeeraj Upadhyay  * We use an adaptive strategy for synchronize_srcu() and especially for
5674f2bfd94SNeeraj Upadhyay  * synchronize_srcu_expedited().  We spin for a fixed time period
5684f2bfd94SNeeraj Upadhyay  * (defined below, boot time configurable) to allow SRCU readers to exit
5694f2bfd94SNeeraj Upadhyay  * their read-side critical sections.  If there are still some readers
5704f2bfd94SNeeraj Upadhyay  * after one jiffy, we repeatedly block for one jiffy time periods.
5714f2bfd94SNeeraj Upadhyay  * The blocking time is increased as the grace-period age increases,
5724f2bfd94SNeeraj Upadhyay  * with max blocking time capped at 10 jiffies.
5734f2bfd94SNeeraj Upadhyay  */
5744f2bfd94SNeeraj Upadhyay #define SRCU_DEFAULT_RETRY_CHECK_DELAY		5
5754f2bfd94SNeeraj Upadhyay 
5764f2bfd94SNeeraj Upadhyay static ulong srcu_retry_check_delay = SRCU_DEFAULT_RETRY_CHECK_DELAY;
5774f2bfd94SNeeraj Upadhyay module_param(srcu_retry_check_delay, ulong, 0444);
5784f2bfd94SNeeraj Upadhyay 
579282d8998SPaul E. McKenney #define SRCU_INTERVAL		1		// Base delay if no expedited GPs pending.
580282d8998SPaul E. McKenney #define SRCU_MAX_INTERVAL	10		// Maximum incremental delay from slow readers.
5814f2bfd94SNeeraj Upadhyay 
5824f2bfd94SNeeraj Upadhyay #define SRCU_DEFAULT_MAX_NODELAY_PHASE_LO	3UL	// Lowmark on default per-GP-phase
5834f2bfd94SNeeraj Upadhyay 							// no-delay instances.
5844f2bfd94SNeeraj Upadhyay #define SRCU_DEFAULT_MAX_NODELAY_PHASE_HI	1000UL	// Highmark on default per-GP-phase
5854f2bfd94SNeeraj Upadhyay 							// no-delay instances.
5864f2bfd94SNeeraj Upadhyay 
5874f2bfd94SNeeraj Upadhyay #define SRCU_UL_CLAMP_LO(val, low)	((val) > (low) ? (val) : (low))
5884f2bfd94SNeeraj Upadhyay #define SRCU_UL_CLAMP_HI(val, high)	((val) < (high) ? (val) : (high))
5894f2bfd94SNeeraj Upadhyay #define SRCU_UL_CLAMP(val, low, high)	SRCU_UL_CLAMP_HI(SRCU_UL_CLAMP_LO((val), (low)), (high))
5904f2bfd94SNeeraj Upadhyay // per-GP-phase no-delay instances adjusted to allow non-sleeping poll upto
5914f2bfd94SNeeraj Upadhyay // one jiffies time duration. Mult by 2 is done to factor in the srcu_get_delay()
5924f2bfd94SNeeraj Upadhyay // called from process_srcu().
5934f2bfd94SNeeraj Upadhyay #define SRCU_DEFAULT_MAX_NODELAY_PHASE_ADJUSTED	\
5944f2bfd94SNeeraj Upadhyay 	(2UL * USEC_PER_SEC / HZ / SRCU_DEFAULT_RETRY_CHECK_DELAY)
5954f2bfd94SNeeraj Upadhyay 
5964f2bfd94SNeeraj Upadhyay // Maximum per-GP-phase consecutive no-delay instances.
5974f2bfd94SNeeraj Upadhyay #define SRCU_DEFAULT_MAX_NODELAY_PHASE	\
5984f2bfd94SNeeraj Upadhyay 	SRCU_UL_CLAMP(SRCU_DEFAULT_MAX_NODELAY_PHASE_ADJUSTED,	\
5994f2bfd94SNeeraj Upadhyay 		      SRCU_DEFAULT_MAX_NODELAY_PHASE_LO,	\
6004f2bfd94SNeeraj Upadhyay 		      SRCU_DEFAULT_MAX_NODELAY_PHASE_HI)
6014f2bfd94SNeeraj Upadhyay 
6024f2bfd94SNeeraj Upadhyay static ulong srcu_max_nodelay_phase = SRCU_DEFAULT_MAX_NODELAY_PHASE;
6034f2bfd94SNeeraj Upadhyay module_param(srcu_max_nodelay_phase, ulong, 0444);
6044f2bfd94SNeeraj Upadhyay 
6054f2bfd94SNeeraj Upadhyay // Maximum consecutive no-delay instances.
6064f2bfd94SNeeraj Upadhyay #define SRCU_DEFAULT_MAX_NODELAY	(SRCU_DEFAULT_MAX_NODELAY_PHASE > 100 ?	\
6074f2bfd94SNeeraj Upadhyay 					 SRCU_DEFAULT_MAX_NODELAY_PHASE : 100)
6084f2bfd94SNeeraj Upadhyay 
6094f2bfd94SNeeraj Upadhyay static ulong srcu_max_nodelay = SRCU_DEFAULT_MAX_NODELAY;
6104f2bfd94SNeeraj Upadhyay module_param(srcu_max_nodelay, ulong, 0444);
611dad81a20SPaul E. McKenney 
6121e9a038bSPaul E. McKenney /*
6131e9a038bSPaul E. McKenney  * Return grace-period delay, zero if there are expedited grace
6141e9a038bSPaul E. McKenney  * periods pending, SRCU_INTERVAL otherwise.
6151e9a038bSPaul E. McKenney  */
616aacb5d91SPaul E. McKenney static unsigned long srcu_get_delay(struct srcu_struct *ssp)
6171e9a038bSPaul E. McKenney {
6188f870e6eSPaul E. McKenney 	unsigned long gpstart;
6198f870e6eSPaul E. McKenney 	unsigned long j;
620282d8998SPaul E. McKenney 	unsigned long jbase = SRCU_INTERVAL;
621eabe7625SPaul E. McKenney 	struct srcu_usage *sup = ssp->srcu_sup;
622282d8998SPaul E. McKenney 
623eabe7625SPaul E. McKenney 	if (ULONG_CMP_LT(READ_ONCE(sup->srcu_gp_seq), READ_ONCE(sup->srcu_gp_seq_needed_exp)))
624282d8998SPaul E. McKenney 		jbase = 0;
625eabe7625SPaul E. McKenney 	if (rcu_seq_state(READ_ONCE(sup->srcu_gp_seq))) {
6268f870e6eSPaul E. McKenney 		j = jiffies - 1;
627eabe7625SPaul E. McKenney 		gpstart = READ_ONCE(sup->srcu_gp_start);
6288f870e6eSPaul E. McKenney 		if (time_after(j, gpstart))
6298f870e6eSPaul E. McKenney 			jbase += j - gpstart;
630282d8998SPaul E. McKenney 		if (!jbase) {
631eabe7625SPaul E. McKenney 			WRITE_ONCE(sup->srcu_n_exp_nodelay, READ_ONCE(sup->srcu_n_exp_nodelay) + 1);
632eabe7625SPaul E. McKenney 			if (READ_ONCE(sup->srcu_n_exp_nodelay) > srcu_max_nodelay_phase)
633282d8998SPaul E. McKenney 				jbase = 1;
634282d8998SPaul E. McKenney 		}
6358f870e6eSPaul E. McKenney 	}
636282d8998SPaul E. McKenney 	return jbase > SRCU_MAX_INTERVAL ? SRCU_MAX_INTERVAL : jbase;
6371e9a038bSPaul E. McKenney }
6381e9a038bSPaul E. McKenney 
639f5ad3991SPaul E. McKenney /**
640f5ad3991SPaul E. McKenney  * cleanup_srcu_struct - deconstruct a sleep-RCU structure
641f5ad3991SPaul E. McKenney  * @ssp: structure to clean up.
642f5ad3991SPaul E. McKenney  *
643f5ad3991SPaul E. McKenney  * Must invoke this after you are finished using a given srcu_struct that
644f5ad3991SPaul E. McKenney  * was initialized via init_srcu_struct(), else you leak memory.
645f5ad3991SPaul E. McKenney  */
646f5ad3991SPaul E. McKenney void cleanup_srcu_struct(struct srcu_struct *ssp)
647dad81a20SPaul E. McKenney {
648da915ad5SPaul E. McKenney 	int cpu;
6495ff8319fSPaul E. McKenney 	struct srcu_usage *sup = ssp->srcu_sup;
650da915ad5SPaul E. McKenney 
651aacb5d91SPaul E. McKenney 	if (WARN_ON(!srcu_get_delay(ssp)))
652f7194ac3SPaul E. McKenney 		return; /* Just leak it! */
653aacb5d91SPaul E. McKenney 	if (WARN_ON(srcu_readers_active(ssp)))
654f7194ac3SPaul E. McKenney 		return; /* Just leak it! */
6555ff8319fSPaul E. McKenney 	flush_delayed_work(&sup->work);
656e81baf4cSSebastian Andrzej Siewior 	for_each_possible_cpu(cpu) {
657e81baf4cSSebastian Andrzej Siewior 		struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
658e81baf4cSSebastian Andrzej Siewior 
659e81baf4cSSebastian Andrzej Siewior 		del_timer_sync(&sdp->delay_work);
660e81baf4cSSebastian Andrzej Siewior 		flush_work(&sdp->work);
6615cdfd174SPaul E. McKenney 		if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist)))
6625cdfd174SPaul E. McKenney 			return; /* Forgot srcu_barrier(), so just leak it! */
663f7194ac3SPaul E. McKenney 	}
6645ff8319fSPaul E. McKenney 	if (WARN_ON(rcu_seq_state(READ_ONCE(sup->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
6655ff8319fSPaul E. McKenney 	    WARN_ON(rcu_seq_current(&sup->srcu_gp_seq) != sup->srcu_gp_seq_needed) ||
666aacb5d91SPaul E. McKenney 	    WARN_ON(srcu_readers_active(ssp))) {
6678ed00760SPaul E. McKenney 		pr_info("%s: Active srcu_struct %p read state: %d gp state: %lu/%lu\n",
6685ff8319fSPaul E. McKenney 			__func__, ssp, rcu_seq_state(READ_ONCE(sup->srcu_gp_seq)),
6695ff8319fSPaul E. McKenney 			rcu_seq_current(&sup->srcu_gp_seq), sup->srcu_gp_seq_needed);
670d7b0615cSPaul E. McKenney 		return; // Caller forgot to stop doing call_srcu()?
671d7b0615cSPaul E. McKenney 			// Or caller invoked start_poll_synchronize_srcu()
672d7b0615cSPaul E. McKenney 			// and then cleanup_srcu_struct() before that grace
673d7b0615cSPaul E. McKenney 			// period ended?
674dad81a20SPaul E. McKenney 	}
6755ff8319fSPaul E. McKenney 	kfree(sup->node);
6765ff8319fSPaul E. McKenney 	sup->node = NULL;
6775ff8319fSPaul E. McKenney 	sup->srcu_size_state = SRCU_SIZE_SMALL;
6785ff8319fSPaul E. McKenney 	if (!sup->sda_is_static) {
679aacb5d91SPaul E. McKenney 		free_percpu(ssp->sda);
680aacb5d91SPaul E. McKenney 		ssp->sda = NULL;
6815ff8319fSPaul E. McKenney 		kfree(sup);
68295433f72SPaul E. McKenney 		ssp->srcu_sup = NULL;
68346470cf8SPaul E. McKenney 	}
684dad81a20SPaul E. McKenney }
685f5ad3991SPaul E. McKenney EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
686dad81a20SPaul E. McKenney 
687e29a4915SFrederic Weisbecker #ifdef CONFIG_PROVE_RCU
688dad81a20SPaul E. McKenney /*
68927120e7dSPaul E. McKenney  * Check for consistent NMI safety.
69027120e7dSPaul E. McKenney  */
691e29a4915SFrederic Weisbecker void srcu_check_nmi_safety(struct srcu_struct *ssp, bool nmi_safe)
69227120e7dSPaul E. McKenney {
69327120e7dSPaul E. McKenney 	int nmi_safe_mask = 1 << nmi_safe;
69427120e7dSPaul E. McKenney 	int old_nmi_safe_mask;
69527120e7dSPaul E. McKenney 	struct srcu_data *sdp;
69627120e7dSPaul E. McKenney 
6976b77bb9bSFrederic Weisbecker 	/* NMI-unsafe use in NMI is a bad sign */
6986b77bb9bSFrederic Weisbecker 	WARN_ON_ONCE(!nmi_safe && in_nmi());
69927120e7dSPaul E. McKenney 	sdp = raw_cpu_ptr(ssp->sda);
70027120e7dSPaul E. McKenney 	old_nmi_safe_mask = READ_ONCE(sdp->srcu_nmi_safety);
70127120e7dSPaul E. McKenney 	if (!old_nmi_safe_mask) {
70227120e7dSPaul E. McKenney 		WRITE_ONCE(sdp->srcu_nmi_safety, nmi_safe_mask);
70327120e7dSPaul E. McKenney 		return;
70427120e7dSPaul E. McKenney 	}
70527120e7dSPaul E. McKenney 	WARN_ONCE(old_nmi_safe_mask != nmi_safe_mask, "CPU %d old state %d new state %d\n", sdp->cpu, old_nmi_safe_mask, nmi_safe_mask);
70627120e7dSPaul E. McKenney }
707e29a4915SFrederic Weisbecker EXPORT_SYMBOL_GPL(srcu_check_nmi_safety);
708e29a4915SFrederic Weisbecker #endif /* CONFIG_PROVE_RCU */
70927120e7dSPaul E. McKenney 
71027120e7dSPaul E. McKenney /*
711dad81a20SPaul E. McKenney  * Counts the new reader in the appropriate per-CPU element of the
712cdf7abc4SPaolo Bonzini  * srcu_struct.
713dad81a20SPaul E. McKenney  * Returns an index that must be passed to the matching srcu_read_unlock().
714dad81a20SPaul E. McKenney  */
715aacb5d91SPaul E. McKenney int __srcu_read_lock(struct srcu_struct *ssp)
716dad81a20SPaul E. McKenney {
717dad81a20SPaul E. McKenney 	int idx;
718dad81a20SPaul E. McKenney 
719aacb5d91SPaul E. McKenney 	idx = READ_ONCE(ssp->srcu_idx) & 0x1;
7205d0f5953SPaul E. McKenney 	this_cpu_inc(ssp->sda->srcu_lock_count[idx].counter);
721dad81a20SPaul E. McKenney 	smp_mb(); /* B */  /* Avoid leaking the critical section. */
722dad81a20SPaul E. McKenney 	return idx;
723dad81a20SPaul E. McKenney }
724dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(__srcu_read_lock);
725dad81a20SPaul E. McKenney 
726dad81a20SPaul E. McKenney /*
727dad81a20SPaul E. McKenney  * Removes the count for the old reader from the appropriate per-CPU
728dad81a20SPaul E. McKenney  * element of the srcu_struct.  Note that this may well be a different
729dad81a20SPaul E. McKenney  * CPU than that which was incremented by the corresponding srcu_read_lock().
730dad81a20SPaul E. McKenney  */
731aacb5d91SPaul E. McKenney void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
732dad81a20SPaul E. McKenney {
733dad81a20SPaul E. McKenney 	smp_mb(); /* C */  /* Avoid leaking the critical section. */
7345d0f5953SPaul E. McKenney 	this_cpu_inc(ssp->sda->srcu_unlock_count[idx].counter);
735dad81a20SPaul E. McKenney }
736dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(__srcu_read_unlock);
737dad81a20SPaul E. McKenney 
7382e83b879SPaul E. McKenney #ifdef CONFIG_NEED_SRCU_NMI_SAFE
7392e83b879SPaul E. McKenney 
7402e83b879SPaul E. McKenney /*
7412e83b879SPaul E. McKenney  * Counts the new reader in the appropriate per-CPU element of the
7422e83b879SPaul E. McKenney  * srcu_struct, but in an NMI-safe manner using RMW atomics.
7432e83b879SPaul E. McKenney  * Returns an index that must be passed to the matching srcu_read_unlock().
7442e83b879SPaul E. McKenney  */
745e29a4915SFrederic Weisbecker int __srcu_read_lock_nmisafe(struct srcu_struct *ssp)
7462e83b879SPaul E. McKenney {
7472e83b879SPaul E. McKenney 	int idx;
7482e83b879SPaul E. McKenney 	struct srcu_data *sdp = raw_cpu_ptr(ssp->sda);
7492e83b879SPaul E. McKenney 
7502e83b879SPaul E. McKenney 	idx = READ_ONCE(ssp->srcu_idx) & 0x1;
7512e83b879SPaul E. McKenney 	atomic_long_inc(&sdp->srcu_lock_count[idx]);
7522e83b879SPaul E. McKenney 	smp_mb__after_atomic(); /* B */  /* Avoid leaking the critical section. */
7532e83b879SPaul E. McKenney 	return idx;
7542e83b879SPaul E. McKenney }
7552e83b879SPaul E. McKenney EXPORT_SYMBOL_GPL(__srcu_read_lock_nmisafe);
7562e83b879SPaul E. McKenney 
7572e83b879SPaul E. McKenney /*
7582e83b879SPaul E. McKenney  * Removes the count for the old reader from the appropriate per-CPU
7592e83b879SPaul E. McKenney  * element of the srcu_struct.  Note that this may well be a different
7602e83b879SPaul E. McKenney  * CPU than that which was incremented by the corresponding srcu_read_lock().
7612e83b879SPaul E. McKenney  */
762e29a4915SFrederic Weisbecker void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
7632e83b879SPaul E. McKenney {
7642e83b879SPaul E. McKenney 	struct srcu_data *sdp = raw_cpu_ptr(ssp->sda);
7652e83b879SPaul E. McKenney 
7662e83b879SPaul E. McKenney 	smp_mb__before_atomic(); /* C */  /* Avoid leaking the critical section. */
7672e83b879SPaul E. McKenney 	atomic_long_inc(&sdp->srcu_unlock_count[idx]);
7682e83b879SPaul E. McKenney }
7692e83b879SPaul E. McKenney EXPORT_SYMBOL_GPL(__srcu_read_unlock_nmisafe);
7702e83b879SPaul E. McKenney 
7712e83b879SPaul E. McKenney #endif // CONFIG_NEED_SRCU_NMI_SAFE
7722e83b879SPaul E. McKenney 
773dad81a20SPaul E. McKenney /*
774dad81a20SPaul E. McKenney  * Start an SRCU grace period.
775dad81a20SPaul E. McKenney  */
776aacb5d91SPaul E. McKenney static void srcu_gp_start(struct srcu_struct *ssp)
777dad81a20SPaul E. McKenney {
778dad81a20SPaul E. McKenney 	int state;
779dad81a20SPaul E. McKenney 
780b3fb11f7SPaul E. McKenney 	lockdep_assert_held(&ACCESS_PRIVATE(ssp->srcu_sup, lock));
78103200b5cSPaul E. McKenney 	WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_sup->srcu_gp_seq, ssp->srcu_sup->srcu_gp_seq_needed));
78203200b5cSPaul E. McKenney 	WRITE_ONCE(ssp->srcu_sup->srcu_gp_start, jiffies);
7833b46679cSPaul E. McKenney 	WRITE_ONCE(ssp->srcu_sup->srcu_n_exp_nodelay, 0);
7842da4b2a7SPaul E. McKenney 	smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
78503200b5cSPaul E. McKenney 	rcu_seq_start(&ssp->srcu_sup->srcu_gp_seq);
78603200b5cSPaul E. McKenney 	state = rcu_seq_state(ssp->srcu_sup->srcu_gp_seq);
787dad81a20SPaul E. McKenney 	WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
788dad81a20SPaul E. McKenney }
789dad81a20SPaul E. McKenney 
790da915ad5SPaul E. McKenney 
791e81baf4cSSebastian Andrzej Siewior static void srcu_delay_timer(struct timer_list *t)
792da915ad5SPaul E. McKenney {
793e81baf4cSSebastian Andrzej Siewior 	struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work);
794e81baf4cSSebastian Andrzej Siewior 
795e81baf4cSSebastian Andrzej Siewior 	queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
796da915ad5SPaul E. McKenney }
797da915ad5SPaul E. McKenney 
798e81baf4cSSebastian Andrzej Siewior static void srcu_queue_delayed_work_on(struct srcu_data *sdp,
799da915ad5SPaul E. McKenney 				       unsigned long delay)
800da915ad5SPaul E. McKenney {
801e81baf4cSSebastian Andrzej Siewior 	if (!delay) {
802e81baf4cSSebastian Andrzej Siewior 		queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
803e81baf4cSSebastian Andrzej Siewior 		return;
804e81baf4cSSebastian Andrzej Siewior 	}
805da915ad5SPaul E. McKenney 
806e81baf4cSSebastian Andrzej Siewior 	timer_reduce(&sdp->delay_work, jiffies + delay);
807da915ad5SPaul E. McKenney }
808da915ad5SPaul E. McKenney 
809da915ad5SPaul E. McKenney /*
810da915ad5SPaul E. McKenney  * Schedule callback invocation for the specified srcu_data structure,
811da915ad5SPaul E. McKenney  * if possible, on the corresponding CPU.
812da915ad5SPaul E. McKenney  */
813da915ad5SPaul E. McKenney static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
814da915ad5SPaul E. McKenney {
815e81baf4cSSebastian Andrzej Siewior 	srcu_queue_delayed_work_on(sdp, delay);
816da915ad5SPaul E. McKenney }
817da915ad5SPaul E. McKenney 
818da915ad5SPaul E. McKenney /*
819da915ad5SPaul E. McKenney  * Schedule callback invocation for all srcu_data structures associated
820c7e88067SPaul E. McKenney  * with the specified srcu_node structure that have callbacks for the
821c7e88067SPaul E. McKenney  * just-completed grace period, the one corresponding to idx.  If possible,
822c7e88067SPaul E. McKenney  * schedule this invocation on the corresponding CPUs.
823da915ad5SPaul E. McKenney  */
824aacb5d91SPaul E. McKenney static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp,
8251e9a038bSPaul E. McKenney 				  unsigned long mask, unsigned long delay)
826da915ad5SPaul E. McKenney {
827da915ad5SPaul E. McKenney 	int cpu;
828da915ad5SPaul E. McKenney 
829c7e88067SPaul E. McKenney 	for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
830d8d5b7bfSDenis Arefev 		if (!(mask & (1UL << (cpu - snp->grplo))))
831c7e88067SPaul E. McKenney 			continue;
832aacb5d91SPaul E. McKenney 		srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
833da915ad5SPaul E. McKenney 	}
834c7e88067SPaul E. McKenney }
835da915ad5SPaul E. McKenney 
836da915ad5SPaul E. McKenney /*
837da915ad5SPaul E. McKenney  * Note the end of an SRCU grace period.  Initiates callback invocation
838da915ad5SPaul E. McKenney  * and starts a new grace period if needed.
839da915ad5SPaul E. McKenney  *
840da915ad5SPaul E. McKenney  * The ->srcu_cb_mutex acquisition does not protect any data, but
841da915ad5SPaul E. McKenney  * instead prevents more than one grace period from starting while we
842da915ad5SPaul E. McKenney  * are initiating callback invocation.  This allows the ->srcu_have_cbs[]
843da915ad5SPaul E. McKenney  * array to have a finite number of elements.
844da915ad5SPaul E. McKenney  */
845aacb5d91SPaul E. McKenney static void srcu_gp_end(struct srcu_struct *ssp)
846da915ad5SPaul E. McKenney {
8474f2bfd94SNeeraj Upadhyay 	unsigned long cbdelay = 1;
848da915ad5SPaul E. McKenney 	bool cbs;
8498ddbd883SIldar Ismagilov 	bool last_lvl;
850c350c008SPaul E. McKenney 	int cpu;
851c350c008SPaul E. McKenney 	unsigned long flags;
852da915ad5SPaul E. McKenney 	unsigned long gpseq;
853da915ad5SPaul E. McKenney 	int idx;
854c7e88067SPaul E. McKenney 	unsigned long mask;
855c350c008SPaul E. McKenney 	struct srcu_data *sdp;
856cbdc98e9SPaul E. McKenney 	unsigned long sgsne;
857da915ad5SPaul E. McKenney 	struct srcu_node *snp;
858e2f63836SPaul E. McKenney 	int ss_state;
8596c366522SPaul E. McKenney 	struct srcu_usage *sup = ssp->srcu_sup;
860da915ad5SPaul E. McKenney 
861da915ad5SPaul E. McKenney 	/* Prevent more than one additional grace period. */
8626c366522SPaul E. McKenney 	mutex_lock(&sup->srcu_cb_mutex);
863da915ad5SPaul E. McKenney 
864da915ad5SPaul E. McKenney 	/* End the current grace period. */
8656c366522SPaul E. McKenney 	spin_lock_irq_rcu_node(sup);
8666c366522SPaul E. McKenney 	idx = rcu_seq_state(sup->srcu_gp_seq);
867da915ad5SPaul E. McKenney 	WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
8686c366522SPaul E. McKenney 	if (ULONG_CMP_LT(READ_ONCE(sup->srcu_gp_seq), READ_ONCE(sup->srcu_gp_seq_needed_exp)))
8694f2bfd94SNeeraj Upadhyay 		cbdelay = 0;
8704f2bfd94SNeeraj Upadhyay 
8716c366522SPaul E. McKenney 	WRITE_ONCE(sup->srcu_last_gp_end, ktime_get_mono_fast_ns());
8726c366522SPaul E. McKenney 	rcu_seq_end(&sup->srcu_gp_seq);
8736c366522SPaul E. McKenney 	gpseq = rcu_seq_current(&sup->srcu_gp_seq);
8746c366522SPaul E. McKenney 	if (ULONG_CMP_LT(sup->srcu_gp_seq_needed_exp, gpseq))
8756c366522SPaul E. McKenney 		WRITE_ONCE(sup->srcu_gp_seq_needed_exp, gpseq);
8766c366522SPaul E. McKenney 	spin_unlock_irq_rcu_node(sup);
8776c366522SPaul E. McKenney 	mutex_unlock(&sup->srcu_gp_mutex);
878da915ad5SPaul E. McKenney 	/* A new grace period can start at this point.  But only one. */
879da915ad5SPaul E. McKenney 
880da915ad5SPaul E. McKenney 	/* Initiate callback invocation as needed. */
8816c366522SPaul E. McKenney 	ss_state = smp_load_acquire(&sup->srcu_size_state);
882c69a00a1SPaul E. McKenney 	if (ss_state < SRCU_SIZE_WAIT_BARRIER) {
8837f24626dSPingfan Liu 		srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, get_boot_cpu_id()),
8847f24626dSPingfan Liu 					cbdelay);
885994f7068SPaul E. McKenney 	} else {
886da915ad5SPaul E. McKenney 		idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
887aacb5d91SPaul E. McKenney 		srcu_for_each_node_breadth_first(ssp, snp) {
888d6331980SPaul E. McKenney 			spin_lock_irq_rcu_node(snp);
889da915ad5SPaul E. McKenney 			cbs = false;
8906c366522SPaul E. McKenney 			last_lvl = snp >= sup->level[rcu_num_lvls - 1];
8918ddbd883SIldar Ismagilov 			if (last_lvl)
892c69a00a1SPaul E. McKenney 				cbs = ss_state < SRCU_SIZE_BIG || snp->srcu_have_cbs[idx] == gpseq;
893da915ad5SPaul E. McKenney 			snp->srcu_have_cbs[idx] = gpseq;
894da915ad5SPaul E. McKenney 			rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
895cbdc98e9SPaul E. McKenney 			sgsne = snp->srcu_gp_seq_needed_exp;
896cbdc98e9SPaul E. McKenney 			if (srcu_invl_snp_seq(sgsne) || ULONG_CMP_LT(sgsne, gpseq))
8977ff8b450SPaul E. McKenney 				WRITE_ONCE(snp->srcu_gp_seq_needed_exp, gpseq);
898c69a00a1SPaul E. McKenney 			if (ss_state < SRCU_SIZE_BIG)
899c69a00a1SPaul E. McKenney 				mask = ~0;
900c69a00a1SPaul E. McKenney 			else
901c7e88067SPaul E. McKenney 				mask = snp->srcu_data_have_cbs[idx];
902c7e88067SPaul E. McKenney 			snp->srcu_data_have_cbs[idx] = 0;
903d6331980SPaul E. McKenney 			spin_unlock_irq_rcu_node(snp);
904a3883df3SPaul E. McKenney 			if (cbs)
905aacb5d91SPaul E. McKenney 				srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay);
906994f7068SPaul E. McKenney 		}
907994f7068SPaul E. McKenney 	}
908c350c008SPaul E. McKenney 
909c350c008SPaul E. McKenney 	/* Occasionally prevent srcu_data counter wrap. */
910994f7068SPaul E. McKenney 	if (!(gpseq & counter_wrap_check))
911994f7068SPaul E. McKenney 		for_each_possible_cpu(cpu) {
912aacb5d91SPaul E. McKenney 			sdp = per_cpu_ptr(ssp->sda, cpu);
913d6331980SPaul E. McKenney 			spin_lock_irqsave_rcu_node(sdp, flags);
914994f7068SPaul E. McKenney 			if (ULONG_CMP_GE(gpseq, sdp->srcu_gp_seq_needed + 100))
915c350c008SPaul E. McKenney 				sdp->srcu_gp_seq_needed = gpseq;
916994f7068SPaul E. McKenney 			if (ULONG_CMP_GE(gpseq, sdp->srcu_gp_seq_needed_exp + 100))
917a35d13ecSIldar Ismagilov 				sdp->srcu_gp_seq_needed_exp = gpseq;
918d6331980SPaul E. McKenney 			spin_unlock_irqrestore_rcu_node(sdp, flags);
919c350c008SPaul E. McKenney 		}
920da915ad5SPaul E. McKenney 
921da915ad5SPaul E. McKenney 	/* Callback initiation done, allow grace periods after next. */
9226c366522SPaul E. McKenney 	mutex_unlock(&sup->srcu_cb_mutex);
923da915ad5SPaul E. McKenney 
924da915ad5SPaul E. McKenney 	/* Start a new grace period if needed. */
9256c366522SPaul E. McKenney 	spin_lock_irq_rcu_node(sup);
9266c366522SPaul E. McKenney 	gpseq = rcu_seq_current(&sup->srcu_gp_seq);
927da915ad5SPaul E. McKenney 	if (!rcu_seq_state(gpseq) &&
9286c366522SPaul E. McKenney 	    ULONG_CMP_LT(gpseq, sup->srcu_gp_seq_needed)) {
929aacb5d91SPaul E. McKenney 		srcu_gp_start(ssp);
9306c366522SPaul E. McKenney 		spin_unlock_irq_rcu_node(sup);
931aacb5d91SPaul E. McKenney 		srcu_reschedule(ssp, 0);
932da915ad5SPaul E. McKenney 	} else {
9336c366522SPaul E. McKenney 		spin_unlock_irq_rcu_node(sup);
934da915ad5SPaul E. McKenney 	}
935e2f63836SPaul E. McKenney 
936e2f63836SPaul E. McKenney 	/* Transition to big if needed. */
937e2f63836SPaul E. McKenney 	if (ss_state != SRCU_SIZE_SMALL && ss_state != SRCU_SIZE_BIG) {
938e2f63836SPaul E. McKenney 		if (ss_state == SRCU_SIZE_ALLOC)
939c69a00a1SPaul E. McKenney 			init_srcu_struct_nodes(ssp, GFP_KERNEL);
940e2f63836SPaul E. McKenney 		else
9416c366522SPaul E. McKenney 			smp_store_release(&sup->srcu_size_state, ss_state + 1);
942e2f63836SPaul E. McKenney 	}
943da915ad5SPaul E. McKenney }
944da915ad5SPaul E. McKenney 
945da915ad5SPaul E. McKenney /*
9461e9a038bSPaul E. McKenney  * Funnel-locking scheme to scalably mediate many concurrent expedited
9471e9a038bSPaul E. McKenney  * grace-period requests.  This function is invoked for the first known
9481e9a038bSPaul E. McKenney  * expedited request for a grace period that has already been requested,
9491e9a038bSPaul E. McKenney  * but without expediting.  To start a completely new grace period,
9501e9a038bSPaul E. McKenney  * whether expedited or not, use srcu_funnel_gp_start() instead.
9511e9a038bSPaul E. McKenney  */
952aacb5d91SPaul E. McKenney static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp,
9531e9a038bSPaul E. McKenney 				  unsigned long s)
9541e9a038bSPaul E. McKenney {
9551e9a038bSPaul E. McKenney 	unsigned long flags;
956cbdc98e9SPaul E. McKenney 	unsigned long sgsne;
9571e9a038bSPaul E. McKenney 
958994f7068SPaul E. McKenney 	if (snp)
9591e9a038bSPaul E. McKenney 		for (; snp != NULL; snp = snp->srcu_parent) {
960cbdc98e9SPaul E. McKenney 			sgsne = READ_ONCE(snp->srcu_gp_seq_needed_exp);
96103200b5cSPaul E. McKenney 			if (WARN_ON_ONCE(rcu_seq_done(&ssp->srcu_sup->srcu_gp_seq, s)) ||
962cbdc98e9SPaul E. McKenney 			    (!srcu_invl_snp_seq(sgsne) && ULONG_CMP_GE(sgsne, s)))
9631e9a038bSPaul E. McKenney 				return;
964d6331980SPaul E. McKenney 			spin_lock_irqsave_rcu_node(snp, flags);
965cbdc98e9SPaul E. McKenney 			sgsne = snp->srcu_gp_seq_needed_exp;
966cbdc98e9SPaul E. McKenney 			if (!srcu_invl_snp_seq(sgsne) && ULONG_CMP_GE(sgsne, s)) {
967d6331980SPaul E. McKenney 				spin_unlock_irqrestore_rcu_node(snp, flags);
9681e9a038bSPaul E. McKenney 				return;
9691e9a038bSPaul E. McKenney 			}
9701e9a038bSPaul E. McKenney 			WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
971d6331980SPaul E. McKenney 			spin_unlock_irqrestore_rcu_node(snp, flags);
9721e9a038bSPaul E. McKenney 		}
9739f2e91d9SPaul E. McKenney 	spin_lock_irqsave_ssp_contention(ssp, &flags);
97403200b5cSPaul E. McKenney 	if (ULONG_CMP_LT(ssp->srcu_sup->srcu_gp_seq_needed_exp, s))
97503200b5cSPaul E. McKenney 		WRITE_ONCE(ssp->srcu_sup->srcu_gp_seq_needed_exp, s);
976b3fb11f7SPaul E. McKenney 	spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags);
9771e9a038bSPaul E. McKenney }
9781e9a038bSPaul E. McKenney 
9791e9a038bSPaul E. McKenney /*
980da915ad5SPaul E. McKenney  * Funnel-locking scheme to scalably mediate many concurrent grace-period
981da915ad5SPaul E. McKenney  * requests.  The winner has to do the work of actually starting grace
982da915ad5SPaul E. McKenney  * period s.  Losers must either ensure that their desired grace-period
983da915ad5SPaul E. McKenney  * number is recorded on at least their leaf srcu_node structure, or they
984da915ad5SPaul E. McKenney  * must take steps to invoke their own callbacks.
98517294ce6SPaul E. McKenney  *
98617294ce6SPaul E. McKenney  * Note that this function also does the work of srcu_funnel_exp_start(),
98717294ce6SPaul E. McKenney  * in some cases by directly invoking it.
9881bafbfb3SPingfan Liu  *
9891bafbfb3SPingfan Liu  * The srcu read lock should be hold around this function. And s is a seq snap
9901bafbfb3SPingfan Liu  * after holding that lock.
991da915ad5SPaul E. McKenney  */
992aacb5d91SPaul E. McKenney static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
9931e9a038bSPaul E. McKenney 				 unsigned long s, bool do_norm)
994da915ad5SPaul E. McKenney {
995da915ad5SPaul E. McKenney 	unsigned long flags;
996da915ad5SPaul E. McKenney 	int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs);
997cbdc98e9SPaul E. McKenney 	unsigned long sgsne;
9987b9e9b58SPaul E. McKenney 	struct srcu_node *snp;
9990b56f953SNeeraj Upadhyay 	struct srcu_node *snp_leaf;
1000da915ad5SPaul E. McKenney 	unsigned long snp_seq;
1001cefc0a59SPaul E. McKenney 	struct srcu_usage *sup = ssp->srcu_sup;
1002da915ad5SPaul E. McKenney 
10030b56f953SNeeraj Upadhyay 	/* Ensure that snp node tree is fully initialized before traversing it */
1004cefc0a59SPaul E. McKenney 	if (smp_load_acquire(&sup->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
10050b56f953SNeeraj Upadhyay 		snp_leaf = NULL;
10060b56f953SNeeraj Upadhyay 	else
10070b56f953SNeeraj Upadhyay 		snp_leaf = sdp->mynode;
10080b56f953SNeeraj Upadhyay 
1009994f7068SPaul E. McKenney 	if (snp_leaf)
1010da915ad5SPaul E. McKenney 		/* Each pass through the loop does one level of the srcu_node tree. */
10117b9e9b58SPaul E. McKenney 		for (snp = snp_leaf; snp != NULL; snp = snp->srcu_parent) {
1012cefc0a59SPaul E. McKenney 			if (WARN_ON_ONCE(rcu_seq_done(&sup->srcu_gp_seq, s)) && snp != snp_leaf)
1013da915ad5SPaul E. McKenney 				return; /* GP already done and CBs recorded. */
1014d6331980SPaul E. McKenney 			spin_lock_irqsave_rcu_node(snp, flags);
1015da915ad5SPaul E. McKenney 			snp_seq = snp->srcu_have_cbs[idx];
1016cbdc98e9SPaul E. McKenney 			if (!srcu_invl_snp_seq(snp_seq) && ULONG_CMP_GE(snp_seq, s)) {
10177b9e9b58SPaul E. McKenney 				if (snp == snp_leaf && snp_seq == s)
1018c7e88067SPaul E. McKenney 					snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
1019d6331980SPaul E. McKenney 				spin_unlock_irqrestore_rcu_node(snp, flags);
10207b9e9b58SPaul E. McKenney 				if (snp == snp_leaf && snp_seq != s) {
1021aeb9b39bSPaul E. McKenney 					srcu_schedule_cbs_sdp(sdp, do_norm ? SRCU_INTERVAL : 0);
10221e9a038bSPaul E. McKenney 					return;
1023da915ad5SPaul E. McKenney 				}
10241e9a038bSPaul E. McKenney 				if (!do_norm)
1025aacb5d91SPaul E. McKenney 					srcu_funnel_exp_start(ssp, snp, s);
1026da915ad5SPaul E. McKenney 				return;
1027da915ad5SPaul E. McKenney 			}
1028da915ad5SPaul E. McKenney 			snp->srcu_have_cbs[idx] = s;
10297b9e9b58SPaul E. McKenney 			if (snp == snp_leaf)
1030c7e88067SPaul E. McKenney 				snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
1031cbdc98e9SPaul E. McKenney 			sgsne = snp->srcu_gp_seq_needed_exp;
1032cbdc98e9SPaul E. McKenney 			if (!do_norm && (srcu_invl_snp_seq(sgsne) || ULONG_CMP_LT(sgsne, s)))
10337ff8b450SPaul E. McKenney 				WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
1034d6331980SPaul E. McKenney 			spin_unlock_irqrestore_rcu_node(snp, flags);
1035da915ad5SPaul E. McKenney 		}
1036da915ad5SPaul E. McKenney 
1037da915ad5SPaul E. McKenney 	/* Top of tree, must ensure the grace period will be started. */
10389f2e91d9SPaul E. McKenney 	spin_lock_irqsave_ssp_contention(ssp, &flags);
1039cefc0a59SPaul E. McKenney 	if (ULONG_CMP_LT(sup->srcu_gp_seq_needed, s)) {
1040da915ad5SPaul E. McKenney 		/*
1041da915ad5SPaul E. McKenney 		 * Record need for grace period s.  Pair with load
1042da915ad5SPaul E. McKenney 		 * acquire setting up for initialization.
1043da915ad5SPaul E. McKenney 		 */
1044cefc0a59SPaul E. McKenney 		smp_store_release(&sup->srcu_gp_seq_needed, s); /*^^^*/
1045da915ad5SPaul E. McKenney 	}
1046cefc0a59SPaul E. McKenney 	if (!do_norm && ULONG_CMP_LT(sup->srcu_gp_seq_needed_exp, s))
1047cefc0a59SPaul E. McKenney 		WRITE_ONCE(sup->srcu_gp_seq_needed_exp, s);
1048da915ad5SPaul E. McKenney 
10491bafbfb3SPingfan Liu 	/* If grace period not already in progress, start it. */
1050cefc0a59SPaul E. McKenney 	if (!WARN_ON_ONCE(rcu_seq_done(&sup->srcu_gp_seq, s)) &&
1051cefc0a59SPaul E. McKenney 	    rcu_seq_state(sup->srcu_gp_seq) == SRCU_STATE_IDLE) {
1052cefc0a59SPaul E. McKenney 		WARN_ON_ONCE(ULONG_CMP_GE(sup->srcu_gp_seq, sup->srcu_gp_seq_needed));
1053aacb5d91SPaul E. McKenney 		srcu_gp_start(ssp);
1054ee5e2448SPaul E. McKenney 
1055ee5e2448SPaul E. McKenney 		// And how can that list_add() in the "else" clause
1056ee5e2448SPaul E. McKenney 		// possibly be safe for concurrent execution?  Well,
1057ee5e2448SPaul E. McKenney 		// it isn't.  And it does not have to be.  After all, it
1058ee5e2448SPaul E. McKenney 		// can only be executed during early boot when there is only
1059ee5e2448SPaul E. McKenney 		// the one boot CPU running with interrupts still disabled.
1060e0fcba9aSPaul E. McKenney 		if (likely(srcu_init_done))
1061cefc0a59SPaul E. McKenney 			queue_delayed_work(rcu_gp_wq, &sup->work,
1062282d8998SPaul E. McKenney 					   !!srcu_get_delay(ssp));
1063cefc0a59SPaul E. McKenney 		else if (list_empty(&sup->work.work.entry))
1064cefc0a59SPaul E. McKenney 			list_add(&sup->work.work.entry, &srcu_boot_list);
1065da915ad5SPaul E. McKenney 	}
1066cefc0a59SPaul E. McKenney 	spin_unlock_irqrestore_rcu_node(sup, flags);
1067da915ad5SPaul E. McKenney }
1068da915ad5SPaul E. McKenney 
1069da915ad5SPaul E. McKenney /*
1070dad81a20SPaul E. McKenney  * Wait until all readers counted by array index idx complete, but
1071dad81a20SPaul E. McKenney  * loop an additional time if there is an expedited grace period pending.
1072da915ad5SPaul E. McKenney  * The caller must ensure that ->srcu_idx is not changed while checking.
1073dad81a20SPaul E. McKenney  */
1074aacb5d91SPaul E. McKenney static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount)
1075dad81a20SPaul E. McKenney {
10764f2bfd94SNeeraj Upadhyay 	unsigned long curdelay;
10774f2bfd94SNeeraj Upadhyay 
10784f2bfd94SNeeraj Upadhyay 	curdelay = !srcu_get_delay(ssp);
10794f2bfd94SNeeraj Upadhyay 
1080dad81a20SPaul E. McKenney 	for (;;) {
1081aacb5d91SPaul E. McKenney 		if (srcu_readers_active_idx_check(ssp, idx))
1082dad81a20SPaul E. McKenney 			return true;
10834f2bfd94SNeeraj Upadhyay 		if ((--trycount + curdelay) <= 0)
1084dad81a20SPaul E. McKenney 			return false;
10854f2bfd94SNeeraj Upadhyay 		udelay(srcu_retry_check_delay);
1086dad81a20SPaul E. McKenney 	}
1087dad81a20SPaul E. McKenney }
1088dad81a20SPaul E. McKenney 
1089dad81a20SPaul E. McKenney /*
1090da915ad5SPaul E. McKenney  * Increment the ->srcu_idx counter so that future SRCU readers will
1091da915ad5SPaul E. McKenney  * use the other rank of the ->srcu_(un)lock_count[] arrays.  This allows
1092dad81a20SPaul E. McKenney  * us to wait for pre-existing readers in a starvation-free manner.
1093dad81a20SPaul E. McKenney  */
1094aacb5d91SPaul E. McKenney static void srcu_flip(struct srcu_struct *ssp)
1095dad81a20SPaul E. McKenney {
1096881ec9d2SPaul E. McKenney 	/*
1097754aa642SJoel Fernandes (Google) 	 * Because the flip of ->srcu_idx is executed only if the
1098754aa642SJoel Fernandes (Google) 	 * preceding call to srcu_readers_active_idx_check() found that
1099754aa642SJoel Fernandes (Google) 	 * the ->srcu_unlock_count[] and ->srcu_lock_count[] sums matched
1100754aa642SJoel Fernandes (Google) 	 * and because that summing uses atomic_long_read(), there is
1101754aa642SJoel Fernandes (Google) 	 * ordering due to a control dependency between that summing and
1102754aa642SJoel Fernandes (Google) 	 * the WRITE_ONCE() in this call to srcu_flip().  This ordering
1103754aa642SJoel Fernandes (Google) 	 * ensures that if this updater saw a given reader's increment from
1104754aa642SJoel Fernandes (Google) 	 * __srcu_read_lock(), that reader was using a value of ->srcu_idx
1105754aa642SJoel Fernandes (Google) 	 * from before the previous call to srcu_flip(), which should be
1106754aa642SJoel Fernandes (Google) 	 * quite rare.  This ordering thus helps forward progress because
1107754aa642SJoel Fernandes (Google) 	 * the grace period could otherwise be delayed by additional
1108754aa642SJoel Fernandes (Google) 	 * calls to __srcu_read_lock() using that old (soon to be new)
1109754aa642SJoel Fernandes (Google) 	 * value of ->srcu_idx.
1110754aa642SJoel Fernandes (Google) 	 *
1111754aa642SJoel Fernandes (Google) 	 * This sum-equality check and ordering also ensures that if
1112754aa642SJoel Fernandes (Google) 	 * a given call to __srcu_read_lock() uses the new value of
1113754aa642SJoel Fernandes (Google) 	 * ->srcu_idx, this updater's earlier scans cannot have seen
1114754aa642SJoel Fernandes (Google) 	 * that reader's increments, which is all to the good, because
1115754aa642SJoel Fernandes (Google) 	 * this grace period need not wait on that reader.  After all,
1116754aa642SJoel Fernandes (Google) 	 * if those earlier scans had seen that reader, there would have
1117754aa642SJoel Fernandes (Google) 	 * been a sum mismatch and this code would not be reached.
1118754aa642SJoel Fernandes (Google) 	 *
1119754aa642SJoel Fernandes (Google) 	 * This means that the following smp_mb() is redundant, but
1120754aa642SJoel Fernandes (Google) 	 * it stays until either (1) Compilers learn about this sort of
1121754aa642SJoel Fernandes (Google) 	 * control dependency or (2) Some production workload running on
1122754aa642SJoel Fernandes (Google) 	 * a production system is unduly delayed by this slowpath smp_mb().
1123881ec9d2SPaul E. McKenney 	 */
1124881ec9d2SPaul E. McKenney 	smp_mb(); /* E */  /* Pairs with B and C. */
1125881ec9d2SPaul E. McKenney 
1126754aa642SJoel Fernandes (Google) 	WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1); // Flip the counter.
1127dad81a20SPaul E. McKenney 
1128dad81a20SPaul E. McKenney 	/*
1129dad81a20SPaul E. McKenney 	 * Ensure that if the updater misses an __srcu_read_unlock()
1130dafc4d16SPaul E. McKenney 	 * increment, that task's __srcu_read_lock() following its next
1131dafc4d16SPaul E. McKenney 	 * __srcu_read_lock() or __srcu_read_unlock() will see the above
1132dafc4d16SPaul E. McKenney 	 * counter update.  Note that both this memory barrier and the
1133dafc4d16SPaul E. McKenney 	 * one in srcu_readers_active_idx_check() provide the guarantee
1134dafc4d16SPaul E. McKenney 	 * for __srcu_read_lock().
1135dad81a20SPaul E. McKenney 	 */
1136dad81a20SPaul E. McKenney 	smp_mb(); /* D */  /* Pairs with C. */
1137dad81a20SPaul E. McKenney }
1138dad81a20SPaul E. McKenney 
1139dad81a20SPaul E. McKenney /*
11402da4b2a7SPaul E. McKenney  * If SRCU is likely idle, return true, otherwise return false.
11412da4b2a7SPaul E. McKenney  *
11422da4b2a7SPaul E. McKenney  * Note that it is OK for several current from-idle requests for a new
11432da4b2a7SPaul E. McKenney  * grace period from idle to specify expediting because they will all end
11442da4b2a7SPaul E. McKenney  * up requesting the same grace period anyhow.  So no loss.
11452da4b2a7SPaul E. McKenney  *
11462da4b2a7SPaul E. McKenney  * Note also that if any CPU (including the current one) is still invoking
11472da4b2a7SPaul E. McKenney  * callbacks, this function will nevertheless say "idle".  This is not
11482da4b2a7SPaul E. McKenney  * ideal, but the overhead of checking all CPUs' callback lists is even
11492da4b2a7SPaul E. McKenney  * less ideal, especially on large systems.  Furthermore, the wakeup
11502da4b2a7SPaul E. McKenney  * can happen before the callback is fully removed, so we have no choice
11512da4b2a7SPaul E. McKenney  * but to accept this type of error.
11522da4b2a7SPaul E. McKenney  *
11532da4b2a7SPaul E. McKenney  * This function is also subject to counter-wrap errors, but let's face
11542da4b2a7SPaul E. McKenney  * it, if this function was preempted for enough time for the counters
11552da4b2a7SPaul E. McKenney  * to wrap, it really doesn't matter whether or not we expedite the grace
11562da4b2a7SPaul E. McKenney  * period.  The extra overhead of a needlessly expedited grace period is
11577fef6cffSEthon Paul  * negligible when amortized over that time period, and the extra latency
11582da4b2a7SPaul E. McKenney  * of a needlessly non-expedited grace period is similarly negligible.
11592da4b2a7SPaul E. McKenney  */
1160aacb5d91SPaul E. McKenney static bool srcu_might_be_idle(struct srcu_struct *ssp)
11612da4b2a7SPaul E. McKenney {
116222607d66SPaul E. McKenney 	unsigned long curseq;
11632da4b2a7SPaul E. McKenney 	unsigned long flags;
11642da4b2a7SPaul E. McKenney 	struct srcu_data *sdp;
116522607d66SPaul E. McKenney 	unsigned long t;
1166844a378dSPaul E. McKenney 	unsigned long tlast;
11672da4b2a7SPaul E. McKenney 
1168bde50d8fSSebastian Andrzej Siewior 	check_init_srcu_struct(ssp);
11692da4b2a7SPaul E. McKenney 	/* If the local srcu_data structure has callbacks, not idle.  */
1170bde50d8fSSebastian Andrzej Siewior 	sdp = raw_cpu_ptr(ssp->sda);
1171bde50d8fSSebastian Andrzej Siewior 	spin_lock_irqsave_rcu_node(sdp, flags);
11722da4b2a7SPaul E. McKenney 	if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
1173bde50d8fSSebastian Andrzej Siewior 		spin_unlock_irqrestore_rcu_node(sdp, flags);
11742da4b2a7SPaul E. McKenney 		return false; /* Callbacks already present, so not idle. */
11752da4b2a7SPaul E. McKenney 	}
1176bde50d8fSSebastian Andrzej Siewior 	spin_unlock_irqrestore_rcu_node(sdp, flags);
11772da4b2a7SPaul E. McKenney 
11782da4b2a7SPaul E. McKenney 	/*
1179a616aec9SIngo Molnar 	 * No local callbacks, so probabilistically probe global state.
11802da4b2a7SPaul E. McKenney 	 * Exact information would require acquiring locks, which would
1181a616aec9SIngo Molnar 	 * kill scalability, hence the probabilistic nature of the probe.
11822da4b2a7SPaul E. McKenney 	 */
118322607d66SPaul E. McKenney 
118422607d66SPaul E. McKenney 	/* First, see if enough time has passed since the last GP. */
118522607d66SPaul E. McKenney 	t = ktime_get_mono_fast_ns();
118603200b5cSPaul E. McKenney 	tlast = READ_ONCE(ssp->srcu_sup->srcu_last_gp_end);
118722607d66SPaul E. McKenney 	if (exp_holdoff == 0 ||
1188844a378dSPaul E. McKenney 	    time_in_range_open(t, tlast, tlast + exp_holdoff))
118922607d66SPaul E. McKenney 		return false; /* Too soon after last GP. */
119022607d66SPaul E. McKenney 
119122607d66SPaul E. McKenney 	/* Next, check for probable idleness. */
119203200b5cSPaul E. McKenney 	curseq = rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq);
11932da4b2a7SPaul E. McKenney 	smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */
119403200b5cSPaul E. McKenney 	if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_sup->srcu_gp_seq_needed)))
11952da4b2a7SPaul E. McKenney 		return false; /* Grace period in progress, so not idle. */
11962da4b2a7SPaul E. McKenney 	smp_mb(); /* Order ->srcu_gp_seq with prior access. */
119703200b5cSPaul E. McKenney 	if (curseq != rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq))
11982da4b2a7SPaul E. McKenney 		return false; /* GP # changed, so not idle. */
11992da4b2a7SPaul E. McKenney 	return true; /* With reasonable probability, idle! */
12002da4b2a7SPaul E. McKenney }
12012da4b2a7SPaul E. McKenney 
12022da4b2a7SPaul E. McKenney /*
1203a602538eSPaul E. McKenney  * SRCU callback function to leak a callback.
1204a602538eSPaul E. McKenney  */
1205a602538eSPaul E. McKenney static void srcu_leak_callback(struct rcu_head *rhp)
1206a602538eSPaul E. McKenney {
1207a602538eSPaul E. McKenney }
1208a602538eSPaul E. McKenney 
1209a602538eSPaul E. McKenney /*
121029d2bb94SPaul E. McKenney  * Start an SRCU grace period, and also queue the callback if non-NULL.
121129d2bb94SPaul E. McKenney  */
12125358c9faSPaul E. McKenney static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
12135358c9faSPaul E. McKenney 					     struct rcu_head *rhp, bool do_norm)
121429d2bb94SPaul E. McKenney {
121529d2bb94SPaul E. McKenney 	unsigned long flags;
121629d2bb94SPaul E. McKenney 	int idx;
121729d2bb94SPaul E. McKenney 	bool needexp = false;
121829d2bb94SPaul E. McKenney 	bool needgp = false;
121929d2bb94SPaul E. McKenney 	unsigned long s;
122029d2bb94SPaul E. McKenney 	struct srcu_data *sdp;
12210b56f953SNeeraj Upadhyay 	struct srcu_node *sdp_mynode;
12220b56f953SNeeraj Upadhyay 	int ss_state;
122329d2bb94SPaul E. McKenney 
12245358c9faSPaul E. McKenney 	check_init_srcu_struct(ssp);
1225ae3c0706SFrederic Weisbecker 	/*
1226ae3c0706SFrederic Weisbecker 	 * While starting a new grace period, make sure we are in an
1227ae3c0706SFrederic Weisbecker 	 * SRCU read-side critical section so that the grace-period
1228ae3c0706SFrederic Weisbecker 	 * sequence number cannot wrap around in the meantime.
1229ae3c0706SFrederic Weisbecker 	 */
1230e29a4915SFrederic Weisbecker 	idx = __srcu_read_lock_nmisafe(ssp);
1231a0d8cbd3SPaul E. McKenney 	ss_state = smp_load_acquire(&ssp->srcu_sup->srcu_size_state);
12320b56f953SNeeraj Upadhyay 	if (ss_state < SRCU_SIZE_WAIT_CALL)
12337f24626dSPingfan Liu 		sdp = per_cpu_ptr(ssp->sda, get_boot_cpu_id());
1234994f7068SPaul E. McKenney 	else
123529d2bb94SPaul E. McKenney 		sdp = raw_cpu_ptr(ssp->sda);
1236c2445d38SPaul E. McKenney 	spin_lock_irqsave_sdp_contention(sdp, &flags);
12375358c9faSPaul E. McKenney 	if (rhp)
123829d2bb94SPaul E. McKenney 		rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
12394a8e65b0SFrederic Weisbecker 	/*
124067050837SJoel Fernandes (Google) 	 * It's crucial to capture the snapshot 's' for acceleration before
124167050837SJoel Fernandes (Google) 	 * reading the current gp_seq that is used for advancing. This is
124267050837SJoel Fernandes (Google) 	 * essential because if the acceleration snapshot is taken after a
124367050837SJoel Fernandes (Google) 	 * failed advancement attempt, there's a risk that a grace period may
124467050837SJoel Fernandes (Google) 	 * conclude and a new one may start in the interim. If the snapshot is
124567050837SJoel Fernandes (Google) 	 * captured after this sequence of events, the acceleration snapshot 's'
124667050837SJoel Fernandes (Google) 	 * could be excessively advanced, leading to acceleration failure.
124767050837SJoel Fernandes (Google) 	 * In such a scenario, an 'acceleration leak' can occur, where new
124867050837SJoel Fernandes (Google) 	 * callbacks become indefinitely stuck in the RCU_NEXT_TAIL segment.
124967050837SJoel Fernandes (Google) 	 * Also note that encountering advancing failures is a normal
125067050837SJoel Fernandes (Google) 	 * occurrence when the grace period for RCU_WAIT_TAIL is in progress.
12514a8e65b0SFrederic Weisbecker 	 *
125267050837SJoel Fernandes (Google) 	 * To see this, consider the following events which occur if
125367050837SJoel Fernandes (Google) 	 * rcu_seq_snap() were to be called after advance:
12544a8e65b0SFrederic Weisbecker 	 *
12554a8e65b0SFrederic Weisbecker 	 *  1) The RCU_WAIT_TAIL segment has callbacks (gp_num = X + 4) and the
12564a8e65b0SFrederic Weisbecker 	 *     RCU_NEXT_READY_TAIL also has callbacks (gp_num = X + 8).
12574a8e65b0SFrederic Weisbecker 	 *
12584a8e65b0SFrederic Weisbecker 	 *  2) The grace period for RCU_WAIT_TAIL is seen as started but not
12594a8e65b0SFrederic Weisbecker 	 *     completed so rcu_seq_current() returns X + SRCU_STATE_SCAN1.
12604a8e65b0SFrederic Weisbecker 	 *
12614a8e65b0SFrederic Weisbecker 	 *  3) This value is passed to rcu_segcblist_advance() which can't move
12624a8e65b0SFrederic Weisbecker 	 *     any segment forward and fails.
12634a8e65b0SFrederic Weisbecker 	 *
12644a8e65b0SFrederic Weisbecker 	 *  4) srcu_gp_start_if_needed() still proceeds with callback acceleration.
12654a8e65b0SFrederic Weisbecker 	 *     But then the call to rcu_seq_snap() observes the grace period for the
12664a8e65b0SFrederic Weisbecker 	 *     RCU_WAIT_TAIL segment as completed and the subsequent one for the
12674a8e65b0SFrederic Weisbecker 	 *     RCU_NEXT_READY_TAIL segment as started (ie: X + 4 + SRCU_STATE_SCAN1)
12684a8e65b0SFrederic Weisbecker 	 *     so it returns a snapshot of the next grace period, which is X + 12.
12694a8e65b0SFrederic Weisbecker 	 *
12704a8e65b0SFrederic Weisbecker 	 *  5) The value of X + 12 is passed to rcu_segcblist_accelerate() but the
12714a8e65b0SFrederic Weisbecker 	 *     freshly enqueued callback in RCU_NEXT_TAIL can't move to
12724a8e65b0SFrederic Weisbecker 	 *     RCU_NEXT_READY_TAIL which already has callbacks for a previous grace
12734a8e65b0SFrederic Weisbecker 	 *     period (gp_num = X + 8). So acceleration fails.
12744a8e65b0SFrederic Weisbecker 	 */
12754a8e65b0SFrederic Weisbecker 	s = rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq);
127694c55b9eSFrederic Weisbecker 	if (rhp) {
127729d2bb94SPaul E. McKenney 		rcu_segcblist_advance(&sdp->srcu_cblist,
127803200b5cSPaul E. McKenney 				      rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
127967050837SJoel Fernandes (Google) 		/*
128067050837SJoel Fernandes (Google) 		 * Acceleration can never fail because the base current gp_seq
128167050837SJoel Fernandes (Google) 		 * used for acceleration is <= the value of gp_seq used for
128267050837SJoel Fernandes (Google) 		 * advancing. This means that RCU_NEXT_TAIL segment will
128367050837SJoel Fernandes (Google) 		 * always be able to be emptied by the acceleration into the
128467050837SJoel Fernandes (Google) 		 * RCU_NEXT_READY_TAIL or RCU_WAIT_TAIL segments.
128567050837SJoel Fernandes (Google) 		 */
128694c55b9eSFrederic Weisbecker 		WARN_ON_ONCE(!rcu_segcblist_accelerate(&sdp->srcu_cblist, s));
128794c55b9eSFrederic Weisbecker 	}
128829d2bb94SPaul E. McKenney 	if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
128929d2bb94SPaul E. McKenney 		sdp->srcu_gp_seq_needed = s;
129029d2bb94SPaul E. McKenney 		needgp = true;
129129d2bb94SPaul E. McKenney 	}
129229d2bb94SPaul E. McKenney 	if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
129329d2bb94SPaul E. McKenney 		sdp->srcu_gp_seq_needed_exp = s;
129429d2bb94SPaul E. McKenney 		needexp = true;
129529d2bb94SPaul E. McKenney 	}
129629d2bb94SPaul E. McKenney 	spin_unlock_irqrestore_rcu_node(sdp, flags);
12970b56f953SNeeraj Upadhyay 
12980b56f953SNeeraj Upadhyay 	/* Ensure that snp node tree is fully initialized before traversing it */
12990b56f953SNeeraj Upadhyay 	if (ss_state < SRCU_SIZE_WAIT_BARRIER)
13000b56f953SNeeraj Upadhyay 		sdp_mynode = NULL;
13010b56f953SNeeraj Upadhyay 	else
13020b56f953SNeeraj Upadhyay 		sdp_mynode = sdp->mynode;
13030b56f953SNeeraj Upadhyay 
130429d2bb94SPaul E. McKenney 	if (needgp)
130529d2bb94SPaul E. McKenney 		srcu_funnel_gp_start(ssp, sdp, s, do_norm);
130629d2bb94SPaul E. McKenney 	else if (needexp)
13070b56f953SNeeraj Upadhyay 		srcu_funnel_exp_start(ssp, sdp_mynode, s);
1308e29a4915SFrederic Weisbecker 	__srcu_read_unlock_nmisafe(ssp, idx);
13095358c9faSPaul E. McKenney 	return s;
131029d2bb94SPaul E. McKenney }
131129d2bb94SPaul E. McKenney 
131229d2bb94SPaul E. McKenney /*
1313da915ad5SPaul E. McKenney  * Enqueue an SRCU callback on the srcu_data structure associated with
1314da915ad5SPaul E. McKenney  * the current CPU and the specified srcu_struct structure, initiating
1315da915ad5SPaul E. McKenney  * grace-period processing if it is not already running.
1316dad81a20SPaul E. McKenney  *
1317dad81a20SPaul E. McKenney  * Note that all CPUs must agree that the grace period extended beyond
1318dad81a20SPaul E. McKenney  * all pre-existing SRCU read-side critical section.  On systems with
1319dad81a20SPaul E. McKenney  * more than one CPU, this means that when "func()" is invoked, each CPU
1320dad81a20SPaul E. McKenney  * is guaranteed to have executed a full memory barrier since the end of
1321dad81a20SPaul E. McKenney  * its last corresponding SRCU read-side critical section whose beginning
13225ef98a63SPaul E. McKenney  * preceded the call to call_srcu().  It also means that each CPU executing
1323dad81a20SPaul E. McKenney  * an SRCU read-side critical section that continues beyond the start of
13245ef98a63SPaul E. McKenney  * "func()" must have executed a memory barrier after the call_srcu()
1325dad81a20SPaul E. McKenney  * but before the beginning of that SRCU read-side critical section.
1326dad81a20SPaul E. McKenney  * Note that these guarantees include CPUs that are offline, idle, or
1327dad81a20SPaul E. McKenney  * executing in user mode, as well as CPUs that are executing in the kernel.
1328dad81a20SPaul E. McKenney  *
13295ef98a63SPaul E. McKenney  * Furthermore, if CPU A invoked call_srcu() and CPU B invoked the
1330dad81a20SPaul E. McKenney  * resulting SRCU callback function "func()", then both CPU A and CPU
1331dad81a20SPaul E. McKenney  * B are guaranteed to execute a full memory barrier during the time
13325ef98a63SPaul E. McKenney  * interval between the call to call_srcu() and the invocation of "func()".
1333dad81a20SPaul E. McKenney  * This guarantee applies even if CPU A and CPU B are the same CPU (but
1334dad81a20SPaul E. McKenney  * again only if the system has more than one CPU).
1335dad81a20SPaul E. McKenney  *
1336dad81a20SPaul E. McKenney  * Of course, these guarantees apply only for invocations of call_srcu(),
1337dad81a20SPaul E. McKenney  * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
1338dad81a20SPaul E. McKenney  * srcu_struct structure.
1339dad81a20SPaul E. McKenney  */
134011b00045SJiang Biao static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
13411e9a038bSPaul E. McKenney 			rcu_callback_t func, bool do_norm)
1342dad81a20SPaul E. McKenney {
1343a602538eSPaul E. McKenney 	if (debug_rcu_head_queue(rhp)) {
1344a602538eSPaul E. McKenney 		/* Probable double call_srcu(), so leak the callback. */
1345a602538eSPaul E. McKenney 		WRITE_ONCE(rhp->func, srcu_leak_callback);
1346a602538eSPaul E. McKenney 		WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n");
1347a602538eSPaul E. McKenney 		return;
1348a602538eSPaul E. McKenney 	}
1349da915ad5SPaul E. McKenney 	rhp->func = func;
13505358c9faSPaul E. McKenney 	(void)srcu_gp_start_if_needed(ssp, rhp, do_norm);
13511e9a038bSPaul E. McKenney }
13521e9a038bSPaul E. McKenney 
13535a0465e1SPaul E. McKenney /**
13545a0465e1SPaul E. McKenney  * call_srcu() - Queue a callback for invocation after an SRCU grace period
1355aacb5d91SPaul E. McKenney  * @ssp: srcu_struct in queue the callback
135627fdb35fSPaul E. McKenney  * @rhp: structure to be used for queueing the SRCU callback.
13575a0465e1SPaul E. McKenney  * @func: function to be invoked after the SRCU grace period
13585a0465e1SPaul E. McKenney  *
13595a0465e1SPaul E. McKenney  * The callback function will be invoked some time after a full SRCU
13605a0465e1SPaul E. McKenney  * grace period elapses, in other words after all pre-existing SRCU
13615a0465e1SPaul E. McKenney  * read-side critical sections have completed.  However, the callback
13625a0465e1SPaul E. McKenney  * function might well execute concurrently with other SRCU read-side
13635a0465e1SPaul E. McKenney  * critical sections that started after call_srcu() was invoked.  SRCU
13645a0465e1SPaul E. McKenney  * read-side critical sections are delimited by srcu_read_lock() and
13655a0465e1SPaul E. McKenney  * srcu_read_unlock(), and may be nested.
13665a0465e1SPaul E. McKenney  *
13675a0465e1SPaul E. McKenney  * The callback will be invoked from process context, but must nevertheless
13685a0465e1SPaul E. McKenney  * be fast and must not block.
13695a0465e1SPaul E. McKenney  */
1370aacb5d91SPaul E. McKenney void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
13711e9a038bSPaul E. McKenney 	       rcu_callback_t func)
13721e9a038bSPaul E. McKenney {
1373aacb5d91SPaul E. McKenney 	__call_srcu(ssp, rhp, func, true);
1374dad81a20SPaul E. McKenney }
1375dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(call_srcu);
1376dad81a20SPaul E. McKenney 
1377dad81a20SPaul E. McKenney /*
1378dad81a20SPaul E. McKenney  * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
1379dad81a20SPaul E. McKenney  */
1380aacb5d91SPaul E. McKenney static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm)
1381dad81a20SPaul E. McKenney {
1382dad81a20SPaul E. McKenney 	struct rcu_synchronize rcu;
1383dad81a20SPaul E. McKenney 
1384f0f44752SBoqun Feng 	srcu_lock_sync(&ssp->dep_map);
1385f0f44752SBoqun Feng 
1386f505d434SJakub Kicinski 	RCU_LOCKDEP_WARN(lockdep_is_held(ssp) ||
1387dad81a20SPaul E. McKenney 			 lock_is_held(&rcu_bh_lock_map) ||
1388dad81a20SPaul E. McKenney 			 lock_is_held(&rcu_lock_map) ||
1389dad81a20SPaul E. McKenney 			 lock_is_held(&rcu_sched_lock_map),
1390dad81a20SPaul E. McKenney 			 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
1391dad81a20SPaul E. McKenney 
1392dad81a20SPaul E. McKenney 	if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
1393dad81a20SPaul E. McKenney 		return;
1394dad81a20SPaul E. McKenney 	might_sleep();
1395aacb5d91SPaul E. McKenney 	check_init_srcu_struct(ssp);
1396dad81a20SPaul E. McKenney 	init_completion(&rcu.completion);
1397da915ad5SPaul E. McKenney 	init_rcu_head_on_stack(&rcu.head);
1398aacb5d91SPaul E. McKenney 	__call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm);
1399dad81a20SPaul E. McKenney 	wait_for_completion(&rcu.completion);
1400da915ad5SPaul E. McKenney 	destroy_rcu_head_on_stack(&rcu.head);
140135732cf9SPaul E. McKenney 
140235732cf9SPaul E. McKenney 	/*
140335732cf9SPaul E. McKenney 	 * Make sure that later code is ordered after the SRCU grace
1404d6331980SPaul E. McKenney 	 * period.  This pairs with the spin_lock_irq_rcu_node()
140535732cf9SPaul E. McKenney 	 * in srcu_invoke_callbacks().  Unlike Tree RCU, this is needed
140635732cf9SPaul E. McKenney 	 * because the current CPU might have been totally uninvolved with
140735732cf9SPaul E. McKenney 	 * (and thus unordered against) that grace period.
140835732cf9SPaul E. McKenney 	 */
140935732cf9SPaul E. McKenney 	smp_mb();
1410dad81a20SPaul E. McKenney }
1411dad81a20SPaul E. McKenney 
1412dad81a20SPaul E. McKenney /**
1413dad81a20SPaul E. McKenney  * synchronize_srcu_expedited - Brute-force SRCU grace period
1414aacb5d91SPaul E. McKenney  * @ssp: srcu_struct with which to synchronize.
1415dad81a20SPaul E. McKenney  *
1416dad81a20SPaul E. McKenney  * Wait for an SRCU grace period to elapse, but be more aggressive about
1417dad81a20SPaul E. McKenney  * spinning rather than blocking when waiting.
1418dad81a20SPaul E. McKenney  *
1419dad81a20SPaul E. McKenney  * Note that synchronize_srcu_expedited() has the same deadlock and
1420dad81a20SPaul E. McKenney  * memory-ordering properties as does synchronize_srcu().
1421dad81a20SPaul E. McKenney  */
1422aacb5d91SPaul E. McKenney void synchronize_srcu_expedited(struct srcu_struct *ssp)
1423dad81a20SPaul E. McKenney {
1424aacb5d91SPaul E. McKenney 	__synchronize_srcu(ssp, rcu_gp_is_normal());
1425dad81a20SPaul E. McKenney }
1426dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
1427dad81a20SPaul E. McKenney 
1428dad81a20SPaul E. McKenney /**
1429dad81a20SPaul E. McKenney  * synchronize_srcu - wait for prior SRCU read-side critical-section completion
1430aacb5d91SPaul E. McKenney  * @ssp: srcu_struct with which to synchronize.
1431dad81a20SPaul E. McKenney  *
1432dad81a20SPaul E. McKenney  * Wait for the count to drain to zero of both indexes. To avoid the
1433dad81a20SPaul E. McKenney  * possible starvation of synchronize_srcu(), it waits for the count of
1434da915ad5SPaul E. McKenney  * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first,
1435da915ad5SPaul E. McKenney  * and then flip the srcu_idx and wait for the count of the other index.
1436dad81a20SPaul E. McKenney  *
1437dad81a20SPaul E. McKenney  * Can block; must be called from process context.
1438dad81a20SPaul E. McKenney  *
1439dad81a20SPaul E. McKenney  * Note that it is illegal to call synchronize_srcu() from the corresponding
1440dad81a20SPaul E. McKenney  * SRCU read-side critical section; doing so will result in deadlock.
1441dad81a20SPaul E. McKenney  * However, it is perfectly legal to call synchronize_srcu() on one
1442dad81a20SPaul E. McKenney  * srcu_struct from some other srcu_struct's read-side critical section,
1443dad81a20SPaul E. McKenney  * as long as the resulting graph of srcu_structs is acyclic.
1444dad81a20SPaul E. McKenney  *
1445dad81a20SPaul E. McKenney  * There are memory-ordering constraints implied by synchronize_srcu().
1446dad81a20SPaul E. McKenney  * On systems with more than one CPU, when synchronize_srcu() returns,
1447dad81a20SPaul E. McKenney  * each CPU is guaranteed to have executed a full memory barrier since
14486eb95cc4SPaul E. McKenney  * the end of its last corresponding SRCU read-side critical section
1449dad81a20SPaul E. McKenney  * whose beginning preceded the call to synchronize_srcu().  In addition,
1450dad81a20SPaul E. McKenney  * each CPU having an SRCU read-side critical section that extends beyond
1451dad81a20SPaul E. McKenney  * the return from synchronize_srcu() is guaranteed to have executed a
1452dad81a20SPaul E. McKenney  * full memory barrier after the beginning of synchronize_srcu() and before
1453dad81a20SPaul E. McKenney  * the beginning of that SRCU read-side critical section.  Note that these
1454dad81a20SPaul E. McKenney  * guarantees include CPUs that are offline, idle, or executing in user mode,
1455dad81a20SPaul E. McKenney  * as well as CPUs that are executing in the kernel.
1456dad81a20SPaul E. McKenney  *
1457dad81a20SPaul E. McKenney  * Furthermore, if CPU A invoked synchronize_srcu(), which returned
1458dad81a20SPaul E. McKenney  * to its caller on CPU B, then both CPU A and CPU B are guaranteed
1459dad81a20SPaul E. McKenney  * to have executed a full memory barrier during the execution of
1460dad81a20SPaul E. McKenney  * synchronize_srcu().  This guarantee applies even if CPU A and CPU B
1461dad81a20SPaul E. McKenney  * are the same CPU, but again only if the system has more than one CPU.
1462dad81a20SPaul E. McKenney  *
1463dad81a20SPaul E. McKenney  * Of course, these memory-ordering guarantees apply only when
1464dad81a20SPaul E. McKenney  * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are
1465dad81a20SPaul E. McKenney  * passed the same srcu_struct structure.
14662da4b2a7SPaul E. McKenney  *
14673d3a0d1bSPaul E. McKenney  * Implementation of these memory-ordering guarantees is similar to
14683d3a0d1bSPaul E. McKenney  * that of synchronize_rcu().
14693d3a0d1bSPaul E. McKenney  *
14702da4b2a7SPaul E. McKenney  * If SRCU is likely idle, expedite the first request.  This semantic
14712da4b2a7SPaul E. McKenney  * was provided by Classic SRCU, and is relied upon by its users, so TREE
14722da4b2a7SPaul E. McKenney  * SRCU must also provide it.  Note that detecting idleness is heuristic
14732da4b2a7SPaul E. McKenney  * and subject to both false positives and negatives.
1474dad81a20SPaul E. McKenney  */
1475aacb5d91SPaul E. McKenney void synchronize_srcu(struct srcu_struct *ssp)
1476dad81a20SPaul E. McKenney {
1477aacb5d91SPaul E. McKenney 	if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited())
1478aacb5d91SPaul E. McKenney 		synchronize_srcu_expedited(ssp);
1479dad81a20SPaul E. McKenney 	else
1480aacb5d91SPaul E. McKenney 		__synchronize_srcu(ssp, true);
1481dad81a20SPaul E. McKenney }
1482dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(synchronize_srcu);
1483dad81a20SPaul E. McKenney 
14845358c9faSPaul E. McKenney /**
14855358c9faSPaul E. McKenney  * get_state_synchronize_srcu - Provide an end-of-grace-period cookie
14865358c9faSPaul E. McKenney  * @ssp: srcu_struct to provide cookie for.
14875358c9faSPaul E. McKenney  *
14885358c9faSPaul E. McKenney  * This function returns a cookie that can be passed to
14895358c9faSPaul E. McKenney  * poll_state_synchronize_srcu(), which will return true if a full grace
14905358c9faSPaul E. McKenney  * period has elapsed in the meantime.  It is the caller's responsibility
14915358c9faSPaul E. McKenney  * to make sure that grace period happens, for example, by invoking
14925358c9faSPaul E. McKenney  * call_srcu() after return from get_state_synchronize_srcu().
14935358c9faSPaul E. McKenney  */
14945358c9faSPaul E. McKenney unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp)
14955358c9faSPaul E. McKenney {
14965358c9faSPaul E. McKenney 	// Any prior manipulation of SRCU-protected data must happen
14975358c9faSPaul E. McKenney 	// before the load from ->srcu_gp_seq.
14985358c9faSPaul E. McKenney 	smp_mb();
149903200b5cSPaul E. McKenney 	return rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq);
15005358c9faSPaul E. McKenney }
15015358c9faSPaul E. McKenney EXPORT_SYMBOL_GPL(get_state_synchronize_srcu);
15025358c9faSPaul E. McKenney 
15035358c9faSPaul E. McKenney /**
15045358c9faSPaul E. McKenney  * start_poll_synchronize_srcu - Provide cookie and start grace period
15055358c9faSPaul E. McKenney  * @ssp: srcu_struct to provide cookie for.
15065358c9faSPaul E. McKenney  *
15075358c9faSPaul E. McKenney  * This function returns a cookie that can be passed to
15085358c9faSPaul E. McKenney  * poll_state_synchronize_srcu(), which will return true if a full grace
15095358c9faSPaul E. McKenney  * period has elapsed in the meantime.  Unlike get_state_synchronize_srcu(),
15105358c9faSPaul E. McKenney  * this function also ensures that any needed SRCU grace period will be
15115358c9faSPaul E. McKenney  * started.  This convenience does come at a cost in terms of CPU overhead.
15125358c9faSPaul E. McKenney  */
15135358c9faSPaul E. McKenney unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp)
15145358c9faSPaul E. McKenney {
15155358c9faSPaul E. McKenney 	return srcu_gp_start_if_needed(ssp, NULL, true);
15165358c9faSPaul E. McKenney }
15175358c9faSPaul E. McKenney EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu);
15185358c9faSPaul E. McKenney 
15195358c9faSPaul E. McKenney /**
15205358c9faSPaul E. McKenney  * poll_state_synchronize_srcu - Has cookie's grace period ended?
15215358c9faSPaul E. McKenney  * @ssp: srcu_struct to provide cookie for.
15225358c9faSPaul E. McKenney  * @cookie: Return value from get_state_synchronize_srcu() or start_poll_synchronize_srcu().
15235358c9faSPaul E. McKenney  *
15245358c9faSPaul E. McKenney  * This function takes the cookie that was returned from either
15255358c9faSPaul E. McKenney  * get_state_synchronize_srcu() or start_poll_synchronize_srcu(), and
15265358c9faSPaul E. McKenney  * returns @true if an SRCU grace period elapsed since the time that the
15275358c9faSPaul E. McKenney  * cookie was created.
15284e7ccfaeSPaul E. McKenney  *
15294e7ccfaeSPaul E. McKenney  * Because cookies are finite in size, wrapping/overflow is possible.
15304e7ccfaeSPaul E. McKenney  * This is more pronounced on 32-bit systems where cookies are 32 bits,
15314e7ccfaeSPaul E. McKenney  * where in theory wrapping could happen in about 14 hours assuming
15324e7ccfaeSPaul E. McKenney  * 25-microsecond expedited SRCU grace periods.  However, a more likely
15334e7ccfaeSPaul E. McKenney  * overflow lower bound is on the order of 24 days in the case of
15344e7ccfaeSPaul E. McKenney  * one-millisecond SRCU grace periods.  Of course, wrapping in a 64-bit
15354e7ccfaeSPaul E. McKenney  * system requires geologic timespans, as in more than seven million years
15364e7ccfaeSPaul E. McKenney  * even for expedited SRCU grace periods.
15374e7ccfaeSPaul E. McKenney  *
15384e7ccfaeSPaul E. McKenney  * Wrapping/overflow is much more of an issue for CONFIG_SMP=n systems
15394e7ccfaeSPaul E. McKenney  * that also have CONFIG_PREEMPTION=n, which selects Tiny SRCU.  This uses
15404e7ccfaeSPaul E. McKenney  * a 16-bit cookie, which rcutorture routinely wraps in a matter of a
15414e7ccfaeSPaul E. McKenney  * few minutes.  If this proves to be a problem, this counter will be
15424e7ccfaeSPaul E. McKenney  * expanded to the same size as for Tree SRCU.
15435358c9faSPaul E. McKenney  */
15445358c9faSPaul E. McKenney bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)
15455358c9faSPaul E. McKenney {
1546*e206f33eSPaul E. McKenney 	if (cookie != SRCU_GET_STATE_COMPLETED &&
1547*e206f33eSPaul E. McKenney 	    !rcu_seq_done(&ssp->srcu_sup->srcu_gp_seq, cookie))
15485358c9faSPaul E. McKenney 		return false;
15495358c9faSPaul E. McKenney 	// Ensure that the end of the SRCU grace period happens before
15505358c9faSPaul E. McKenney 	// any subsequent code that the caller might execute.
15515358c9faSPaul E. McKenney 	smp_mb(); // ^^^
15525358c9faSPaul E. McKenney 	return true;
15535358c9faSPaul E. McKenney }
15545358c9faSPaul E. McKenney EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu);
15555358c9faSPaul E. McKenney 
1556da915ad5SPaul E. McKenney /*
1557da915ad5SPaul E. McKenney  * Callback function for srcu_barrier() use.
1558da915ad5SPaul E. McKenney  */
1559da915ad5SPaul E. McKenney static void srcu_barrier_cb(struct rcu_head *rhp)
1560da915ad5SPaul E. McKenney {
1561da915ad5SPaul E. McKenney 	struct srcu_data *sdp;
1562aacb5d91SPaul E. McKenney 	struct srcu_struct *ssp;
1563da915ad5SPaul E. McKenney 
1564da915ad5SPaul E. McKenney 	sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
1565aacb5d91SPaul E. McKenney 	ssp = sdp->ssp;
1566d20162e0SPaul E. McKenney 	if (atomic_dec_and_test(&ssp->srcu_sup->srcu_barrier_cpu_cnt))
1567d20162e0SPaul E. McKenney 		complete(&ssp->srcu_sup->srcu_barrier_completion);
1568da915ad5SPaul E. McKenney }
1569da915ad5SPaul E. McKenney 
1570994f7068SPaul E. McKenney /*
1571994f7068SPaul E. McKenney  * Enqueue an srcu_barrier() callback on the specified srcu_data
1572994f7068SPaul E. McKenney  * structure's ->cblist.  but only if that ->cblist already has at least one
1573994f7068SPaul E. McKenney  * callback enqueued.  Note that if a CPU already has callbacks enqueue,
1574994f7068SPaul E. McKenney  * it must have already registered the need for a future grace period,
1575994f7068SPaul E. McKenney  * so all we need do is enqueue a callback that will use the same grace
1576994f7068SPaul E. McKenney  * period as the last callback already in the queue.
1577994f7068SPaul E. McKenney  */
1578994f7068SPaul E. McKenney static void srcu_barrier_one_cpu(struct srcu_struct *ssp, struct srcu_data *sdp)
1579994f7068SPaul E. McKenney {
1580994f7068SPaul E. McKenney 	spin_lock_irq_rcu_node(sdp);
1581d20162e0SPaul E. McKenney 	atomic_inc(&ssp->srcu_sup->srcu_barrier_cpu_cnt);
1582994f7068SPaul E. McKenney 	sdp->srcu_barrier_head.func = srcu_barrier_cb;
1583994f7068SPaul E. McKenney 	debug_rcu_head_queue(&sdp->srcu_barrier_head);
1584994f7068SPaul E. McKenney 	if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
1585994f7068SPaul E. McKenney 				   &sdp->srcu_barrier_head)) {
1586994f7068SPaul E. McKenney 		debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
1587d20162e0SPaul E. McKenney 		atomic_dec(&ssp->srcu_sup->srcu_barrier_cpu_cnt);
1588994f7068SPaul E. McKenney 	}
1589994f7068SPaul E. McKenney 	spin_unlock_irq_rcu_node(sdp);
1590994f7068SPaul E. McKenney }
1591994f7068SPaul E. McKenney 
1592dad81a20SPaul E. McKenney /**
1593dad81a20SPaul E. McKenney  * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
1594aacb5d91SPaul E. McKenney  * @ssp: srcu_struct on which to wait for in-flight callbacks.
1595dad81a20SPaul E. McKenney  */
1596aacb5d91SPaul E. McKenney void srcu_barrier(struct srcu_struct *ssp)
1597dad81a20SPaul E. McKenney {
1598da915ad5SPaul E. McKenney 	int cpu;
1599e2f63836SPaul E. McKenney 	int idx;
1600d20162e0SPaul E. McKenney 	unsigned long s = rcu_seq_snap(&ssp->srcu_sup->srcu_barrier_seq);
1601da915ad5SPaul E. McKenney 
1602aacb5d91SPaul E. McKenney 	check_init_srcu_struct(ssp);
1603d20162e0SPaul E. McKenney 	mutex_lock(&ssp->srcu_sup->srcu_barrier_mutex);
1604d20162e0SPaul E. McKenney 	if (rcu_seq_done(&ssp->srcu_sup->srcu_barrier_seq, s)) {
1605da915ad5SPaul E. McKenney 		smp_mb(); /* Force ordering following return. */
1606d20162e0SPaul E. McKenney 		mutex_unlock(&ssp->srcu_sup->srcu_barrier_mutex);
1607da915ad5SPaul E. McKenney 		return; /* Someone else did our work for us. */
1608da915ad5SPaul E. McKenney 	}
1609d20162e0SPaul E. McKenney 	rcu_seq_start(&ssp->srcu_sup->srcu_barrier_seq);
1610d20162e0SPaul E. McKenney 	init_completion(&ssp->srcu_sup->srcu_barrier_completion);
1611da915ad5SPaul E. McKenney 
1612da915ad5SPaul E. McKenney 	/* Initial count prevents reaching zero until all CBs are posted. */
1613d20162e0SPaul E. McKenney 	atomic_set(&ssp->srcu_sup->srcu_barrier_cpu_cnt, 1);
1614da915ad5SPaul E. McKenney 
1615e29a4915SFrederic Weisbecker 	idx = __srcu_read_lock_nmisafe(ssp);
1616a0d8cbd3SPaul E. McKenney 	if (smp_load_acquire(&ssp->srcu_sup->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
16177f24626dSPingfan Liu 		srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda,	get_boot_cpu_id()));
1618994f7068SPaul E. McKenney 	else
1619994f7068SPaul E. McKenney 		for_each_possible_cpu(cpu)
1620994f7068SPaul E. McKenney 			srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, cpu));
1621e29a4915SFrederic Weisbecker 	__srcu_read_unlock_nmisafe(ssp, idx);
1622da915ad5SPaul E. McKenney 
1623da915ad5SPaul E. McKenney 	/* Remove the initial count, at which point reaching zero can happen. */
1624d20162e0SPaul E. McKenney 	if (atomic_dec_and_test(&ssp->srcu_sup->srcu_barrier_cpu_cnt))
1625d20162e0SPaul E. McKenney 		complete(&ssp->srcu_sup->srcu_barrier_completion);
1626d20162e0SPaul E. McKenney 	wait_for_completion(&ssp->srcu_sup->srcu_barrier_completion);
1627da915ad5SPaul E. McKenney 
1628d20162e0SPaul E. McKenney 	rcu_seq_end(&ssp->srcu_sup->srcu_barrier_seq);
1629d20162e0SPaul E. McKenney 	mutex_unlock(&ssp->srcu_sup->srcu_barrier_mutex);
1630dad81a20SPaul E. McKenney }
1631dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(srcu_barrier);
1632dad81a20SPaul E. McKenney 
1633dad81a20SPaul E. McKenney /**
1634dad81a20SPaul E. McKenney  * srcu_batches_completed - return batches completed.
1635aacb5d91SPaul E. McKenney  * @ssp: srcu_struct on which to report batch completion.
1636dad81a20SPaul E. McKenney  *
1637dad81a20SPaul E. McKenney  * Report the number of batches, correlated with, but not necessarily
1638dad81a20SPaul E. McKenney  * precisely the same as, the number of grace periods that have elapsed.
1639dad81a20SPaul E. McKenney  */
1640aacb5d91SPaul E. McKenney unsigned long srcu_batches_completed(struct srcu_struct *ssp)
1641dad81a20SPaul E. McKenney {
164239f91504SPaul E. McKenney 	return READ_ONCE(ssp->srcu_idx);
1643dad81a20SPaul E. McKenney }
1644dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(srcu_batches_completed);
1645dad81a20SPaul E. McKenney 
1646dad81a20SPaul E. McKenney /*
1647da915ad5SPaul E. McKenney  * Core SRCU state machine.  Push state bits of ->srcu_gp_seq
1648da915ad5SPaul E. McKenney  * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has
1649da915ad5SPaul E. McKenney  * completed in that state.
1650dad81a20SPaul E. McKenney  */
1651aacb5d91SPaul E. McKenney static void srcu_advance_state(struct srcu_struct *ssp)
1652dad81a20SPaul E. McKenney {
1653dad81a20SPaul E. McKenney 	int idx;
1654dad81a20SPaul E. McKenney 
1655e3a6ab25SPaul E. McKenney 	mutex_lock(&ssp->srcu_sup->srcu_gp_mutex);
1656da915ad5SPaul E. McKenney 
1657dad81a20SPaul E. McKenney 	/*
1658dad81a20SPaul E. McKenney 	 * Because readers might be delayed for an extended period after
1659da915ad5SPaul E. McKenney 	 * fetching ->srcu_idx for their index, at any point in time there
1660dad81a20SPaul E. McKenney 	 * might well be readers using both idx=0 and idx=1.  We therefore
1661dad81a20SPaul E. McKenney 	 * need to wait for readers to clear from both index values before
1662dad81a20SPaul E. McKenney 	 * invoking a callback.
1663dad81a20SPaul E. McKenney 	 *
1664dad81a20SPaul E. McKenney 	 * The load-acquire ensures that we see the accesses performed
1665dad81a20SPaul E. McKenney 	 * by the prior grace period.
1666dad81a20SPaul E. McKenney 	 */
166703200b5cSPaul E. McKenney 	idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_sup->srcu_gp_seq)); /* ^^^ */
1668dad81a20SPaul E. McKenney 	if (idx == SRCU_STATE_IDLE) {
1669b3fb11f7SPaul E. McKenney 		spin_lock_irq_rcu_node(ssp->srcu_sup);
167003200b5cSPaul E. McKenney 		if (ULONG_CMP_GE(ssp->srcu_sup->srcu_gp_seq, ssp->srcu_sup->srcu_gp_seq_needed)) {
167103200b5cSPaul E. McKenney 			WARN_ON_ONCE(rcu_seq_state(ssp->srcu_sup->srcu_gp_seq));
1672b3fb11f7SPaul E. McKenney 			spin_unlock_irq_rcu_node(ssp->srcu_sup);
1673e3a6ab25SPaul E. McKenney 			mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex);
1674dad81a20SPaul E. McKenney 			return;
1675dad81a20SPaul E. McKenney 		}
167603200b5cSPaul E. McKenney 		idx = rcu_seq_state(READ_ONCE(ssp->srcu_sup->srcu_gp_seq));
1677dad81a20SPaul E. McKenney 		if (idx == SRCU_STATE_IDLE)
1678aacb5d91SPaul E. McKenney 			srcu_gp_start(ssp);
1679b3fb11f7SPaul E. McKenney 		spin_unlock_irq_rcu_node(ssp->srcu_sup);
1680da915ad5SPaul E. McKenney 		if (idx != SRCU_STATE_IDLE) {
1681e3a6ab25SPaul E. McKenney 			mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex);
1682dad81a20SPaul E. McKenney 			return; /* Someone else started the grace period. */
1683dad81a20SPaul E. McKenney 		}
1684da915ad5SPaul E. McKenney 	}
1685dad81a20SPaul E. McKenney 
168603200b5cSPaul E. McKenney 	if (rcu_seq_state(READ_ONCE(ssp->srcu_sup->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
1687aacb5d91SPaul E. McKenney 		idx = 1 ^ (ssp->srcu_idx & 1);
1688aacb5d91SPaul E. McKenney 		if (!try_check_zero(ssp, idx, 1)) {
1689e3a6ab25SPaul E. McKenney 			mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex);
1690dad81a20SPaul E. McKenney 			return; /* readers present, retry later. */
1691da915ad5SPaul E. McKenney 		}
1692aacb5d91SPaul E. McKenney 		srcu_flip(ssp);
1693b3fb11f7SPaul E. McKenney 		spin_lock_irq_rcu_node(ssp->srcu_sup);
169403200b5cSPaul E. McKenney 		rcu_seq_set_state(&ssp->srcu_sup->srcu_gp_seq, SRCU_STATE_SCAN2);
16953b46679cSPaul E. McKenney 		ssp->srcu_sup->srcu_n_exp_nodelay = 0;
1696b3fb11f7SPaul E. McKenney 		spin_unlock_irq_rcu_node(ssp->srcu_sup);
1697dad81a20SPaul E. McKenney 	}
1698dad81a20SPaul E. McKenney 
169903200b5cSPaul E. McKenney 	if (rcu_seq_state(READ_ONCE(ssp->srcu_sup->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
1700dad81a20SPaul E. McKenney 
1701dad81a20SPaul E. McKenney 		/*
1702dad81a20SPaul E. McKenney 		 * SRCU read-side critical sections are normally short,
1703dad81a20SPaul E. McKenney 		 * so check at least twice in quick succession after a flip.
1704dad81a20SPaul E. McKenney 		 */
1705aacb5d91SPaul E. McKenney 		idx = 1 ^ (ssp->srcu_idx & 1);
1706aacb5d91SPaul E. McKenney 		if (!try_check_zero(ssp, idx, 2)) {
1707e3a6ab25SPaul E. McKenney 			mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex);
1708da915ad5SPaul E. McKenney 			return; /* readers present, retry later. */
1709da915ad5SPaul E. McKenney 		}
17103b46679cSPaul E. McKenney 		ssp->srcu_sup->srcu_n_exp_nodelay = 0;
1711aacb5d91SPaul E. McKenney 		srcu_gp_end(ssp);  /* Releases ->srcu_gp_mutex. */
1712dad81a20SPaul E. McKenney 	}
1713dad81a20SPaul E. McKenney }
1714dad81a20SPaul E. McKenney 
1715dad81a20SPaul E. McKenney /*
1716dad81a20SPaul E. McKenney  * Invoke a limited number of SRCU callbacks that have passed through
1717dad81a20SPaul E. McKenney  * their grace period.  If there are more to do, SRCU will reschedule
1718dad81a20SPaul E. McKenney  * the workqueue.  Note that needed memory barriers have been executed
1719dad81a20SPaul E. McKenney  * in this task's context by srcu_readers_active_idx_check().
1720dad81a20SPaul E. McKenney  */
1721da915ad5SPaul E. McKenney static void srcu_invoke_callbacks(struct work_struct *work)
1722dad81a20SPaul E. McKenney {
1723ae5c2341SJoel Fernandes (Google) 	long len;
1724da915ad5SPaul E. McKenney 	bool more;
1725dad81a20SPaul E. McKenney 	struct rcu_cblist ready_cbs;
1726dad81a20SPaul E. McKenney 	struct rcu_head *rhp;
1727da915ad5SPaul E. McKenney 	struct srcu_data *sdp;
1728aacb5d91SPaul E. McKenney 	struct srcu_struct *ssp;
1729dad81a20SPaul E. McKenney 
1730e81baf4cSSebastian Andrzej Siewior 	sdp = container_of(work, struct srcu_data, work);
1731e81baf4cSSebastian Andrzej Siewior 
1732aacb5d91SPaul E. McKenney 	ssp = sdp->ssp;
1733dad81a20SPaul E. McKenney 	rcu_cblist_init(&ready_cbs);
1734d6331980SPaul E. McKenney 	spin_lock_irq_rcu_node(sdp);
17358a77f38bSFrederic Weisbecker 	WARN_ON_ONCE(!rcu_segcblist_segempty(&sdp->srcu_cblist, RCU_NEXT_TAIL));
1736da915ad5SPaul E. McKenney 	rcu_segcblist_advance(&sdp->srcu_cblist,
173703200b5cSPaul E. McKenney 			      rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
1738c21357e4SFrederic Weisbecker 	/*
1739c21357e4SFrederic Weisbecker 	 * Although this function is theoretically re-entrant, concurrent
1740c21357e4SFrederic Weisbecker 	 * callbacks invocation is disallowed to avoid executing an SRCU barrier
1741c21357e4SFrederic Weisbecker 	 * too early.
1742c21357e4SFrederic Weisbecker 	 */
1743da915ad5SPaul E. McKenney 	if (sdp->srcu_cblist_invoking ||
1744da915ad5SPaul E. McKenney 	    !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
1745d6331980SPaul E. McKenney 		spin_unlock_irq_rcu_node(sdp);
1746da915ad5SPaul E. McKenney 		return;  /* Someone else on the job or nothing to do. */
1747da915ad5SPaul E. McKenney 	}
1748da915ad5SPaul E. McKenney 
1749da915ad5SPaul E. McKenney 	/* We are on the job!  Extract and invoke ready callbacks. */
1750da915ad5SPaul E. McKenney 	sdp->srcu_cblist_invoking = true;
1751da915ad5SPaul E. McKenney 	rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
1752ae5c2341SJoel Fernandes (Google) 	len = ready_cbs.len;
1753d6331980SPaul E. McKenney 	spin_unlock_irq_rcu_node(sdp);
1754dad81a20SPaul E. McKenney 	rhp = rcu_cblist_dequeue(&ready_cbs);
1755dad81a20SPaul E. McKenney 	for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
1756a602538eSPaul E. McKenney 		debug_rcu_head_unqueue(rhp);
17572cbc482dSZhen Lei 		debug_rcu_head_callback(rhp);
1758dad81a20SPaul E. McKenney 		local_bh_disable();
1759dad81a20SPaul E. McKenney 		rhp->func(rhp);
1760dad81a20SPaul E. McKenney 		local_bh_enable();
1761dad81a20SPaul E. McKenney 	}
1762ae5c2341SJoel Fernandes (Google) 	WARN_ON_ONCE(ready_cbs.len);
1763da915ad5SPaul E. McKenney 
1764da915ad5SPaul E. McKenney 	/*
1765da915ad5SPaul E. McKenney 	 * Update counts, accelerate new callbacks, and if needed,
1766da915ad5SPaul E. McKenney 	 * schedule another round of callback invocation.
1767da915ad5SPaul E. McKenney 	 */
1768d6331980SPaul E. McKenney 	spin_lock_irq_rcu_node(sdp);
1769ae5c2341SJoel Fernandes (Google) 	rcu_segcblist_add_len(&sdp->srcu_cblist, -len);
1770da915ad5SPaul E. McKenney 	sdp->srcu_cblist_invoking = false;
1771da915ad5SPaul E. McKenney 	more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
1772d6331980SPaul E. McKenney 	spin_unlock_irq_rcu_node(sdp);
1773c21357e4SFrederic Weisbecker 	/* An SRCU barrier or callbacks from previous nesting work pending */
1774da915ad5SPaul E. McKenney 	if (more)
1775da915ad5SPaul E. McKenney 		srcu_schedule_cbs_sdp(sdp, 0);
1776dad81a20SPaul E. McKenney }
1777dad81a20SPaul E. McKenney 
1778dad81a20SPaul E. McKenney /*
1779dad81a20SPaul E. McKenney  * Finished one round of SRCU grace period.  Start another if there are
1780dad81a20SPaul E. McKenney  * more SRCU callbacks queued, otherwise put SRCU into not-running state.
1781dad81a20SPaul E. McKenney  */
1782aacb5d91SPaul E. McKenney static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay)
1783dad81a20SPaul E. McKenney {
1784da915ad5SPaul E. McKenney 	bool pushgp = true;
1785dad81a20SPaul E. McKenney 
1786b3fb11f7SPaul E. McKenney 	spin_lock_irq_rcu_node(ssp->srcu_sup);
178703200b5cSPaul E. McKenney 	if (ULONG_CMP_GE(ssp->srcu_sup->srcu_gp_seq, ssp->srcu_sup->srcu_gp_seq_needed)) {
178803200b5cSPaul E. McKenney 		if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_sup->srcu_gp_seq))) {
1789da915ad5SPaul E. McKenney 			/* All requests fulfilled, time to go idle. */
1790da915ad5SPaul E. McKenney 			pushgp = false;
1791dad81a20SPaul E. McKenney 		}
179203200b5cSPaul E. McKenney 	} else if (!rcu_seq_state(ssp->srcu_sup->srcu_gp_seq)) {
1793da915ad5SPaul E. McKenney 		/* Outstanding request and no GP.  Start one. */
1794aacb5d91SPaul E. McKenney 		srcu_gp_start(ssp);
1795da915ad5SPaul E. McKenney 	}
1796b3fb11f7SPaul E. McKenney 	spin_unlock_irq_rcu_node(ssp->srcu_sup);
1797dad81a20SPaul E. McKenney 
1798da915ad5SPaul E. McKenney 	if (pushgp)
1799fd1b3f8eSPaul E. McKenney 		queue_delayed_work(rcu_gp_wq, &ssp->srcu_sup->work, delay);
1800dad81a20SPaul E. McKenney }
1801dad81a20SPaul E. McKenney 
1802dad81a20SPaul E. McKenney /*
1803dad81a20SPaul E. McKenney  * This is the work-queue function that handles SRCU grace periods.
1804dad81a20SPaul E. McKenney  */
18050d8a1e83SPaul E. McKenney static void process_srcu(struct work_struct *work)
1806dad81a20SPaul E. McKenney {
1807282d8998SPaul E. McKenney 	unsigned long curdelay;
1808282d8998SPaul E. McKenney 	unsigned long j;
1809aacb5d91SPaul E. McKenney 	struct srcu_struct *ssp;
1810fd1b3f8eSPaul E. McKenney 	struct srcu_usage *sup;
1811dad81a20SPaul E. McKenney 
1812fd1b3f8eSPaul E. McKenney 	sup = container_of(work, struct srcu_usage, work.work);
1813fd1b3f8eSPaul E. McKenney 	ssp = sup->srcu_ssp;
1814dad81a20SPaul E. McKenney 
1815aacb5d91SPaul E. McKenney 	srcu_advance_state(ssp);
1816282d8998SPaul E. McKenney 	curdelay = srcu_get_delay(ssp);
1817282d8998SPaul E. McKenney 	if (curdelay) {
1818fd1b3f8eSPaul E. McKenney 		WRITE_ONCE(sup->reschedule_count, 0);
1819282d8998SPaul E. McKenney 	} else {
1820282d8998SPaul E. McKenney 		j = jiffies;
1821fd1b3f8eSPaul E. McKenney 		if (READ_ONCE(sup->reschedule_jiffies) == j) {
1822fd1b3f8eSPaul E. McKenney 			WRITE_ONCE(sup->reschedule_count, READ_ONCE(sup->reschedule_count) + 1);
1823fd1b3f8eSPaul E. McKenney 			if (READ_ONCE(sup->reschedule_count) > srcu_max_nodelay)
1824282d8998SPaul E. McKenney 				curdelay = 1;
1825282d8998SPaul E. McKenney 		} else {
1826fd1b3f8eSPaul E. McKenney 			WRITE_ONCE(sup->reschedule_count, 1);
1827fd1b3f8eSPaul E. McKenney 			WRITE_ONCE(sup->reschedule_jiffies, j);
1828282d8998SPaul E. McKenney 		}
1829282d8998SPaul E. McKenney 	}
1830282d8998SPaul E. McKenney 	srcu_reschedule(ssp, curdelay);
1831dad81a20SPaul E. McKenney }
18327f6733c3SPaul E. McKenney 
1833dddcddefSZqiang void srcutorture_get_gp_data(struct srcu_struct *ssp, int *flags,
1834aebc8264SPaul E. McKenney 			     unsigned long *gp_seq)
18357f6733c3SPaul E. McKenney {
18367f6733c3SPaul E. McKenney 	*flags = 0;
183703200b5cSPaul E. McKenney 	*gp_seq = rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq);
18387f6733c3SPaul E. McKenney }
18397f6733c3SPaul E. McKenney EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
18401f4f6da1SPaul E. McKenney 
18413bedebcfSPaul E. McKenney static const char * const srcu_size_state_name[] = {
18423bedebcfSPaul E. McKenney 	"SRCU_SIZE_SMALL",
18433bedebcfSPaul E. McKenney 	"SRCU_SIZE_ALLOC",
18443bedebcfSPaul E. McKenney 	"SRCU_SIZE_WAIT_BARRIER",
18453bedebcfSPaul E. McKenney 	"SRCU_SIZE_WAIT_CALL",
18463bedebcfSPaul E. McKenney 	"SRCU_SIZE_WAIT_CBS1",
18473bedebcfSPaul E. McKenney 	"SRCU_SIZE_WAIT_CBS2",
18483bedebcfSPaul E. McKenney 	"SRCU_SIZE_WAIT_CBS3",
18493bedebcfSPaul E. McKenney 	"SRCU_SIZE_WAIT_CBS4",
18503bedebcfSPaul E. McKenney 	"SRCU_SIZE_BIG",
18513bedebcfSPaul E. McKenney 	"SRCU_SIZE_???",
18523bedebcfSPaul E. McKenney };
18533bedebcfSPaul E. McKenney 
1854aacb5d91SPaul E. McKenney void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf)
1855115a1a52SPaul E. McKenney {
1856115a1a52SPaul E. McKenney 	int cpu;
1857115a1a52SPaul E. McKenney 	int idx;
1858ac3748c6SPaul E. McKenney 	unsigned long s0 = 0, s1 = 0;
1859a0d8cbd3SPaul E. McKenney 	int ss_state = READ_ONCE(ssp->srcu_sup->srcu_size_state);
18603bedebcfSPaul E. McKenney 	int ss_state_idx = ss_state;
1861115a1a52SPaul E. McKenney 
1862aacb5d91SPaul E. McKenney 	idx = ssp->srcu_idx & 0x1;
18633bedebcfSPaul E. McKenney 	if (ss_state < 0 || ss_state >= ARRAY_SIZE(srcu_size_state_name))
18643bedebcfSPaul E. McKenney 		ss_state_idx = ARRAY_SIZE(srcu_size_state_name) - 1;
18654a230f80SPaul E. McKenney 	pr_alert("%s%s Tree SRCU g%ld state %d (%s)",
186603200b5cSPaul E. McKenney 		 tt, tf, rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq), ss_state,
18674a230f80SPaul E. McKenney 		 srcu_size_state_name[ss_state_idx]);
18684a230f80SPaul E. McKenney 	if (!ssp->sda) {
18694a230f80SPaul E. McKenney 		// Called after cleanup_srcu_struct(), perhaps.
18704a230f80SPaul E. McKenney 		pr_cont(" No per-CPU srcu_data structures (->sda == NULL).\n");
18714a230f80SPaul E. McKenney 	} else {
18724a230f80SPaul E. McKenney 		pr_cont(" per-CPU(idx=%d):", idx);
1873115a1a52SPaul E. McKenney 		for_each_possible_cpu(cpu) {
1874115a1a52SPaul E. McKenney 			unsigned long l0, l1;
1875115a1a52SPaul E. McKenney 			unsigned long u0, u1;
1876115a1a52SPaul E. McKenney 			long c0, c1;
18775ab07a8dSPaul E. McKenney 			struct srcu_data *sdp;
1878115a1a52SPaul E. McKenney 
1879aacb5d91SPaul E. McKenney 			sdp = per_cpu_ptr(ssp->sda, cpu);
18805d0f5953SPaul E. McKenney 			u0 = data_race(atomic_long_read(&sdp->srcu_unlock_count[!idx]));
18815d0f5953SPaul E. McKenney 			u1 = data_race(atomic_long_read(&sdp->srcu_unlock_count[idx]));
1882115a1a52SPaul E. McKenney 
1883115a1a52SPaul E. McKenney 			/*
1884115a1a52SPaul E. McKenney 			 * Make sure that a lock is always counted if the corresponding
1885115a1a52SPaul E. McKenney 			 * unlock is counted.
1886115a1a52SPaul E. McKenney 			 */
1887115a1a52SPaul E. McKenney 			smp_rmb();
1888115a1a52SPaul E. McKenney 
18895d0f5953SPaul E. McKenney 			l0 = data_race(atomic_long_read(&sdp->srcu_lock_count[!idx]));
18905d0f5953SPaul E. McKenney 			l1 = data_race(atomic_long_read(&sdp->srcu_lock_count[idx]));
1891115a1a52SPaul E. McKenney 
1892115a1a52SPaul E. McKenney 			c0 = l0 - u0;
1893115a1a52SPaul E. McKenney 			c1 = l1 - u1;
18947e210a65SPaul E. McKenney 			pr_cont(" %d(%ld,%ld %c)",
18957e210a65SPaul E. McKenney 				cpu, c0, c1,
18967e210a65SPaul E. McKenney 				"C."[rcu_segcblist_empty(&sdp->srcu_cblist)]);
1897ac3748c6SPaul E. McKenney 			s0 += c0;
1898ac3748c6SPaul E. McKenney 			s1 += c1;
1899115a1a52SPaul E. McKenney 		}
1900ac3748c6SPaul E. McKenney 		pr_cont(" T(%ld,%ld)\n", s0, s1);
19014a230f80SPaul E. McKenney 	}
19029f2e91d9SPaul E. McKenney 	if (SRCU_SIZING_IS_TORTURE())
190399659f64SPaul E. McKenney 		srcu_transition_to_big(ssp);
1904115a1a52SPaul E. McKenney }
1905115a1a52SPaul E. McKenney EXPORT_SYMBOL_GPL(srcu_torture_stats_print);
1906115a1a52SPaul E. McKenney 
19071f4f6da1SPaul E. McKenney static int __init srcu_bootup_announce(void)
19081f4f6da1SPaul E. McKenney {
19091f4f6da1SPaul E. McKenney 	pr_info("Hierarchical SRCU implementation.\n");
19100c8e0e3cSPaul E. McKenney 	if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF)
19110c8e0e3cSPaul E. McKenney 		pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff);
19124f2bfd94SNeeraj Upadhyay 	if (srcu_retry_check_delay != SRCU_DEFAULT_RETRY_CHECK_DELAY)
19134f2bfd94SNeeraj Upadhyay 		pr_info("\tNon-default retry check delay of %lu us.\n", srcu_retry_check_delay);
19144f2bfd94SNeeraj Upadhyay 	if (srcu_max_nodelay != SRCU_DEFAULT_MAX_NODELAY)
19154f2bfd94SNeeraj Upadhyay 		pr_info("\tNon-default max no-delay of %lu.\n", srcu_max_nodelay);
19164f2bfd94SNeeraj Upadhyay 	pr_info("\tMax phase no-delay instances is %lu.\n", srcu_max_nodelay_phase);
19171f4f6da1SPaul E. McKenney 	return 0;
19181f4f6da1SPaul E. McKenney }
19191f4f6da1SPaul E. McKenney early_initcall(srcu_bootup_announce);
1920e0fcba9aSPaul E. McKenney 
1921e0fcba9aSPaul E. McKenney void __init srcu_init(void)
1922e0fcba9aSPaul E. McKenney {
1923fd1b3f8eSPaul E. McKenney 	struct srcu_usage *sup;
1924e0fcba9aSPaul E. McKenney 
1925a57ffb3cSPaul E. McKenney 	/* Decide on srcu_struct-size strategy. */
1926a57ffb3cSPaul E. McKenney 	if (SRCU_SIZING_IS(SRCU_SIZING_AUTO)) {
1927a57ffb3cSPaul E. McKenney 		if (nr_cpu_ids >= big_cpu_lim) {
1928a57ffb3cSPaul E. McKenney 			convert_to_big = SRCU_SIZING_INIT; // Don't bother waiting for contention.
1929a57ffb3cSPaul E. McKenney 			pr_info("%s: Setting srcu_struct sizes to big.\n", __func__);
1930a57ffb3cSPaul E. McKenney 		} else {
1931a57ffb3cSPaul E. McKenney 			convert_to_big = SRCU_SIZING_NONE | SRCU_SIZING_CONTEND;
1932a57ffb3cSPaul E. McKenney 			pr_info("%s: Setting srcu_struct sizes based on contention.\n", __func__);
1933a57ffb3cSPaul E. McKenney 		}
1934a57ffb3cSPaul E. McKenney 	}
1935a57ffb3cSPaul E. McKenney 
19368e9c01c7SFrederic Weisbecker 	/*
19378e9c01c7SFrederic Weisbecker 	 * Once that is set, call_srcu() can follow the normal path and
19388e9c01c7SFrederic Weisbecker 	 * queue delayed work. This must follow RCU workqueues creation
19398e9c01c7SFrederic Weisbecker 	 * and timers initialization.
19408e9c01c7SFrederic Weisbecker 	 */
1941e0fcba9aSPaul E. McKenney 	srcu_init_done = true;
1942e0fcba9aSPaul E. McKenney 	while (!list_empty(&srcu_boot_list)) {
1943fd1b3f8eSPaul E. McKenney 		sup = list_first_entry(&srcu_boot_list, struct srcu_usage,
19444e6ea4efSPaul E. McKenney 				      work.work.entry);
1945fd1b3f8eSPaul E. McKenney 		list_del_init(&sup->work.work.entry);
1946a0d8cbd3SPaul E. McKenney 		if (SRCU_SIZING_IS(SRCU_SIZING_INIT) &&
1947fd1b3f8eSPaul E. McKenney 		    sup->srcu_size_state == SRCU_SIZE_SMALL)
1948fd1b3f8eSPaul E. McKenney 			sup->srcu_size_state = SRCU_SIZE_ALLOC;
1949fd1b3f8eSPaul E. McKenney 		queue_work(rcu_gp_wq, &sup->work.work);
1950e0fcba9aSPaul E. McKenney 	}
1951e0fcba9aSPaul E. McKenney }
1952fe15b50cSPaul E. McKenney 
1953fe15b50cSPaul E. McKenney #ifdef CONFIG_MODULES
1954fe15b50cSPaul E. McKenney 
1955fe15b50cSPaul E. McKenney /* Initialize any global-scope srcu_struct structures used by this module. */
1956fe15b50cSPaul E. McKenney static int srcu_module_coming(struct module *mod)
1957fe15b50cSPaul E. McKenney {
1958fe15b50cSPaul E. McKenney 	int i;
1959f4d01a25SPaul E. McKenney 	struct srcu_struct *ssp;
1960fe15b50cSPaul E. McKenney 	struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1961fe15b50cSPaul E. McKenney 
1962fe15b50cSPaul E. McKenney 	for (i = 0; i < mod->num_srcu_structs; i++) {
1963f4d01a25SPaul E. McKenney 		ssp = *(sspp++);
1964f4d01a25SPaul E. McKenney 		ssp->sda = alloc_percpu(struct srcu_data);
1965f4d01a25SPaul E. McKenney 		if (WARN_ON_ONCE(!ssp->sda))
1966f4d01a25SPaul E. McKenney 			return -ENOMEM;
1967fe15b50cSPaul E. McKenney 	}
1968fe15b50cSPaul E. McKenney 	return 0;
1969fe15b50cSPaul E. McKenney }
1970fe15b50cSPaul E. McKenney 
1971fe15b50cSPaul E. McKenney /* Clean up any global-scope srcu_struct structures used by this module. */
1972fe15b50cSPaul E. McKenney static void srcu_module_going(struct module *mod)
1973fe15b50cSPaul E. McKenney {
1974fe15b50cSPaul E. McKenney 	int i;
1975f4d01a25SPaul E. McKenney 	struct srcu_struct *ssp;
1976fe15b50cSPaul E. McKenney 	struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1977fe15b50cSPaul E. McKenney 
1978f4d01a25SPaul E. McKenney 	for (i = 0; i < mod->num_srcu_structs; i++) {
1979f4d01a25SPaul E. McKenney 		ssp = *(sspp++);
198003200b5cSPaul E. McKenney 		if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_sup->srcu_gp_seq_needed)) &&
1981660349acSPaul E. McKenney 		    !WARN_ON_ONCE(!ssp->srcu_sup->sda_is_static))
1982f4d01a25SPaul E. McKenney 			cleanup_srcu_struct(ssp);
1983a7bf4d7cSPaul E. McKenney 		if (!WARN_ON(srcu_readers_active(ssp)))
1984f4d01a25SPaul E. McKenney 			free_percpu(ssp->sda);
1985f4d01a25SPaul E. McKenney 	}
1986fe15b50cSPaul E. McKenney }
1987fe15b50cSPaul E. McKenney 
1988fe15b50cSPaul E. McKenney /* Handle one module, either coming or going. */
1989fe15b50cSPaul E. McKenney static int srcu_module_notify(struct notifier_block *self,
1990fe15b50cSPaul E. McKenney 			      unsigned long val, void *data)
1991fe15b50cSPaul E. McKenney {
1992fe15b50cSPaul E. McKenney 	struct module *mod = data;
1993fe15b50cSPaul E. McKenney 	int ret = 0;
1994fe15b50cSPaul E. McKenney 
1995fe15b50cSPaul E. McKenney 	switch (val) {
1996fe15b50cSPaul E. McKenney 	case MODULE_STATE_COMING:
1997fe15b50cSPaul E. McKenney 		ret = srcu_module_coming(mod);
1998fe15b50cSPaul E. McKenney 		break;
1999fe15b50cSPaul E. McKenney 	case MODULE_STATE_GOING:
2000fe15b50cSPaul E. McKenney 		srcu_module_going(mod);
2001fe15b50cSPaul E. McKenney 		break;
2002fe15b50cSPaul E. McKenney 	default:
2003fe15b50cSPaul E. McKenney 		break;
2004fe15b50cSPaul E. McKenney 	}
2005fe15b50cSPaul E. McKenney 	return ret;
2006fe15b50cSPaul E. McKenney }
2007fe15b50cSPaul E. McKenney 
2008fe15b50cSPaul E. McKenney static struct notifier_block srcu_module_nb = {
2009fe15b50cSPaul E. McKenney 	.notifier_call = srcu_module_notify,
2010fe15b50cSPaul E. McKenney 	.priority = 0,
2011fe15b50cSPaul E. McKenney };
2012fe15b50cSPaul E. McKenney 
2013fe15b50cSPaul E. McKenney static __init int init_srcu_module_notifier(void)
2014fe15b50cSPaul E. McKenney {
2015fe15b50cSPaul E. McKenney 	int ret;
2016fe15b50cSPaul E. McKenney 
2017fe15b50cSPaul E. McKenney 	ret = register_module_notifier(&srcu_module_nb);
2018fe15b50cSPaul E. McKenney 	if (ret)
2019fe15b50cSPaul E. McKenney 		pr_warn("Failed to register srcu module notifier\n");
2020fe15b50cSPaul E. McKenney 	return ret;
2021fe15b50cSPaul E. McKenney }
2022fe15b50cSPaul E. McKenney late_initcall(init_srcu_module_notifier);
2023fe15b50cSPaul E. McKenney 
2024fe15b50cSPaul E. McKenney #endif /* #ifdef CONFIG_MODULES */
2025