xref: /linux/kernel/rcu/srcutree.c (revision 4a230f8046454df18139ed1232f1a1e8a6dd36c5)
1e7ee1501SPaul E. McKenney // SPDX-License-Identifier: GPL-2.0+
2dad81a20SPaul E. McKenney /*
3dad81a20SPaul E. McKenney  * Sleepable Read-Copy Update mechanism for mutual exclusion.
4dad81a20SPaul E. McKenney  *
5dad81a20SPaul E. McKenney  * Copyright (C) IBM Corporation, 2006
6dad81a20SPaul E. McKenney  * Copyright (C) Fujitsu, 2012
7dad81a20SPaul E. McKenney  *
865bb0dc4SSeongJae Park  * Authors: Paul McKenney <paulmck@linux.ibm.com>
9dad81a20SPaul E. McKenney  *	   Lai Jiangshan <laijs@cn.fujitsu.com>
10dad81a20SPaul E. McKenney  *
11dad81a20SPaul E. McKenney  * For detailed explanation of Read-Copy Update mechanism see -
12dad81a20SPaul E. McKenney  *		Documentation/RCU/ *.txt
13dad81a20SPaul E. McKenney  *
14dad81a20SPaul E. McKenney  */
15dad81a20SPaul E. McKenney 
16a7538352SJoe Perches #define pr_fmt(fmt) "rcu: " fmt
17a7538352SJoe Perches 
18dad81a20SPaul E. McKenney #include <linux/export.h>
19dad81a20SPaul E. McKenney #include <linux/mutex.h>
20dad81a20SPaul E. McKenney #include <linux/percpu.h>
21dad81a20SPaul E. McKenney #include <linux/preempt.h>
22dad81a20SPaul E. McKenney #include <linux/rcupdate_wait.h>
23dad81a20SPaul E. McKenney #include <linux/sched.h>
24dad81a20SPaul E. McKenney #include <linux/smp.h>
25dad81a20SPaul E. McKenney #include <linux/delay.h>
2622607d66SPaul E. McKenney #include <linux/module.h>
272ec30311SPaul E. McKenney #include <linux/slab.h>
28dad81a20SPaul E. McKenney #include <linux/srcu.h>
29dad81a20SPaul E. McKenney 
30dad81a20SPaul E. McKenney #include "rcu.h"
3145753c5fSIngo Molnar #include "rcu_segcblist.h"
32dad81a20SPaul E. McKenney 
330c8e0e3cSPaul E. McKenney /* Holdoff in nanoseconds for auto-expediting. */
340c8e0e3cSPaul E. McKenney #define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000)
350c8e0e3cSPaul E. McKenney static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF;
3622607d66SPaul E. McKenney module_param(exp_holdoff, ulong, 0444);
3722607d66SPaul E. McKenney 
38c350c008SPaul E. McKenney /* Overflow-check frequency.  N bits roughly says every 2**N grace periods. */
39c350c008SPaul E. McKenney static ulong counter_wrap_check = (ULONG_MAX >> 2);
40c350c008SPaul E. McKenney module_param(counter_wrap_check, ulong, 0444);
41c350c008SPaul E. McKenney 
42c69a00a1SPaul E. McKenney /*
43c69a00a1SPaul E. McKenney  * Control conversion to SRCU_SIZE_BIG:
44c69a00a1SPaul E. McKenney  * 0: Don't convert at all (default).
45c69a00a1SPaul E. McKenney  * 1: Convert at init_srcu_struct() time.
46c69a00a1SPaul E. McKenney  * 2: Convert when rcutorture invokes srcu_torture_stats_print().
47c69a00a1SPaul E. McKenney  */
48c69a00a1SPaul E. McKenney static int convert_to_big;
49c69a00a1SPaul E. McKenney module_param(convert_to_big, int, 0444);
50c69a00a1SPaul E. McKenney 
51e0fcba9aSPaul E. McKenney /* Early-boot callback-management, so early that no lock is required! */
52e0fcba9aSPaul E. McKenney static LIST_HEAD(srcu_boot_list);
53e0fcba9aSPaul E. McKenney static bool __read_mostly srcu_init_done;
54e0fcba9aSPaul E. McKenney 
55da915ad5SPaul E. McKenney static void srcu_invoke_callbacks(struct work_struct *work);
56aacb5d91SPaul E. McKenney static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay);
570d8a1e83SPaul E. McKenney static void process_srcu(struct work_struct *work);
58e81baf4cSSebastian Andrzej Siewior static void srcu_delay_timer(struct timer_list *t);
59da915ad5SPaul E. McKenney 
60d6331980SPaul E. McKenney /* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
61d6331980SPaul E. McKenney #define spin_lock_rcu_node(p)					\
62d6331980SPaul E. McKenney do {									\
63d6331980SPaul E. McKenney 	spin_lock(&ACCESS_PRIVATE(p, lock));			\
64d6331980SPaul E. McKenney 	smp_mb__after_unlock_lock();					\
65d6331980SPaul E. McKenney } while (0)
66d6331980SPaul E. McKenney 
67d6331980SPaul E. McKenney #define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock))
68d6331980SPaul E. McKenney 
69d6331980SPaul E. McKenney #define spin_lock_irq_rcu_node(p)					\
70d6331980SPaul E. McKenney do {									\
71d6331980SPaul E. McKenney 	spin_lock_irq(&ACCESS_PRIVATE(p, lock));			\
72d6331980SPaul E. McKenney 	smp_mb__after_unlock_lock();					\
73d6331980SPaul E. McKenney } while (0)
74d6331980SPaul E. McKenney 
75d6331980SPaul E. McKenney #define spin_unlock_irq_rcu_node(p)					\
76d6331980SPaul E. McKenney 	spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
77d6331980SPaul E. McKenney 
78d6331980SPaul E. McKenney #define spin_lock_irqsave_rcu_node(p, flags)			\
79d6331980SPaul E. McKenney do {									\
80d6331980SPaul E. McKenney 	spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags);	\
81d6331980SPaul E. McKenney 	smp_mb__after_unlock_lock();					\
82d6331980SPaul E. McKenney } while (0)
83d6331980SPaul E. McKenney 
84d6331980SPaul E. McKenney #define spin_unlock_irqrestore_rcu_node(p, flags)			\
85d6331980SPaul E. McKenney 	spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags)	\
86d6331980SPaul E. McKenney 
87da915ad5SPaul E. McKenney /*
882ec30311SPaul E. McKenney  * Initialize SRCU per-CPU data.  Note that statically allocated
89da915ad5SPaul E. McKenney  * srcu_struct structures might already have srcu_read_lock() and
90da915ad5SPaul E. McKenney  * srcu_read_unlock() running against them.  So if the is_static parameter
91da915ad5SPaul E. McKenney  * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
92da915ad5SPaul E. McKenney  */
932ec30311SPaul E. McKenney static void init_srcu_struct_data(struct srcu_struct *ssp)
942ec30311SPaul E. McKenney {
952ec30311SPaul E. McKenney 	int cpu;
962ec30311SPaul E. McKenney 	struct srcu_data *sdp;
972ec30311SPaul E. McKenney 
982ec30311SPaul E. McKenney 	/*
992ec30311SPaul E. McKenney 	 * Initialize the per-CPU srcu_data array, which feeds into the
1002ec30311SPaul E. McKenney 	 * leaves of the srcu_node tree.
1012ec30311SPaul E. McKenney 	 */
1022ec30311SPaul E. McKenney 	WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
1032ec30311SPaul E. McKenney 		     ARRAY_SIZE(sdp->srcu_unlock_count));
1042ec30311SPaul E. McKenney 	for_each_possible_cpu(cpu) {
1052ec30311SPaul E. McKenney 		sdp = per_cpu_ptr(ssp->sda, cpu);
1062ec30311SPaul E. McKenney 		spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
1072ec30311SPaul E. McKenney 		rcu_segcblist_init(&sdp->srcu_cblist);
1082ec30311SPaul E. McKenney 		sdp->srcu_cblist_invoking = false;
1092ec30311SPaul E. McKenney 		sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq;
1102ec30311SPaul E. McKenney 		sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq;
1112ec30311SPaul E. McKenney 		sdp->mynode = NULL;
1122ec30311SPaul E. McKenney 		sdp->cpu = cpu;
1132ec30311SPaul E. McKenney 		INIT_WORK(&sdp->work, srcu_invoke_callbacks);
1142ec30311SPaul E. McKenney 		timer_setup(&sdp->delay_work, srcu_delay_timer, 0);
1152ec30311SPaul E. McKenney 		sdp->ssp = ssp;
1162ec30311SPaul E. McKenney 	}
1172ec30311SPaul E. McKenney }
1182ec30311SPaul E. McKenney 
119cbdc98e9SPaul E. McKenney /* Invalid seq state, used during snp node initialization */
120cbdc98e9SPaul E. McKenney #define SRCU_SNP_INIT_SEQ		0x2
121cbdc98e9SPaul E. McKenney 
122cbdc98e9SPaul E. McKenney /*
123cbdc98e9SPaul E. McKenney  * Check whether sequence number corresponding to snp node,
124cbdc98e9SPaul E. McKenney  * is invalid.
125cbdc98e9SPaul E. McKenney  */
126cbdc98e9SPaul E. McKenney static inline bool srcu_invl_snp_seq(unsigned long s)
127cbdc98e9SPaul E. McKenney {
128cbdc98e9SPaul E. McKenney 	return rcu_seq_state(s) == SRCU_SNP_INIT_SEQ;
129cbdc98e9SPaul E. McKenney }
130cbdc98e9SPaul E. McKenney 
1312ec30311SPaul E. McKenney /*
1322ec30311SPaul E. McKenney  * Allocated and initialize SRCU combining tree.  Returns @true if
1332ec30311SPaul E. McKenney  * allocation succeeded and @false otherwise.
1342ec30311SPaul E. McKenney  */
135c69a00a1SPaul E. McKenney static bool init_srcu_struct_nodes(struct srcu_struct *ssp, gfp_t gfp_flags)
136dad81a20SPaul E. McKenney {
137da915ad5SPaul E. McKenney 	int cpu;
138da915ad5SPaul E. McKenney 	int i;
139da915ad5SPaul E. McKenney 	int level = 0;
140da915ad5SPaul E. McKenney 	int levelspread[RCU_NUM_LVLS];
141da915ad5SPaul E. McKenney 	struct srcu_data *sdp;
142da915ad5SPaul E. McKenney 	struct srcu_node *snp;
143da915ad5SPaul E. McKenney 	struct srcu_node *snp_first;
144da915ad5SPaul E. McKenney 
145b5befe84SFrederic Weisbecker 	/* Initialize geometry if it has not already been initialized. */
146b5befe84SFrederic Weisbecker 	rcu_init_geometry();
147c69a00a1SPaul E. McKenney 	ssp->node = kcalloc(rcu_num_nodes, sizeof(*ssp->node), gfp_flags);
1482ec30311SPaul E. McKenney 	if (!ssp->node)
1492ec30311SPaul E. McKenney 		return false;
150b5befe84SFrederic Weisbecker 
151da915ad5SPaul E. McKenney 	/* Work out the overall tree geometry. */
152aacb5d91SPaul E. McKenney 	ssp->level[0] = &ssp->node[0];
153da915ad5SPaul E. McKenney 	for (i = 1; i < rcu_num_lvls; i++)
154aacb5d91SPaul E. McKenney 		ssp->level[i] = ssp->level[i - 1] + num_rcu_lvl[i - 1];
155da915ad5SPaul E. McKenney 	rcu_init_levelspread(levelspread, num_rcu_lvl);
156da915ad5SPaul E. McKenney 
157da915ad5SPaul E. McKenney 	/* Each pass through this loop initializes one srcu_node structure. */
158aacb5d91SPaul E. McKenney 	srcu_for_each_node_breadth_first(ssp, snp) {
159d6331980SPaul E. McKenney 		spin_lock_init(&ACCESS_PRIVATE(snp, lock));
160c7e88067SPaul E. McKenney 		WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
161c7e88067SPaul E. McKenney 			     ARRAY_SIZE(snp->srcu_data_have_cbs));
162c7e88067SPaul E. McKenney 		for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
163cbdc98e9SPaul E. McKenney 			snp->srcu_have_cbs[i] = SRCU_SNP_INIT_SEQ;
164c7e88067SPaul E. McKenney 			snp->srcu_data_have_cbs[i] = 0;
165c7e88067SPaul E. McKenney 		}
166cbdc98e9SPaul E. McKenney 		snp->srcu_gp_seq_needed_exp = SRCU_SNP_INIT_SEQ;
167da915ad5SPaul E. McKenney 		snp->grplo = -1;
168da915ad5SPaul E. McKenney 		snp->grphi = -1;
169aacb5d91SPaul E. McKenney 		if (snp == &ssp->node[0]) {
170da915ad5SPaul E. McKenney 			/* Root node, special case. */
171da915ad5SPaul E. McKenney 			snp->srcu_parent = NULL;
172da915ad5SPaul E. McKenney 			continue;
173da915ad5SPaul E. McKenney 		}
174da915ad5SPaul E. McKenney 
175da915ad5SPaul E. McKenney 		/* Non-root node. */
176aacb5d91SPaul E. McKenney 		if (snp == ssp->level[level + 1])
177da915ad5SPaul E. McKenney 			level++;
178aacb5d91SPaul E. McKenney 		snp->srcu_parent = ssp->level[level - 1] +
179aacb5d91SPaul E. McKenney 				   (snp - ssp->level[level]) /
180da915ad5SPaul E. McKenney 				   levelspread[level - 1];
181da915ad5SPaul E. McKenney 	}
182da915ad5SPaul E. McKenney 
183da915ad5SPaul E. McKenney 	/*
184da915ad5SPaul E. McKenney 	 * Initialize the per-CPU srcu_data array, which feeds into the
185da915ad5SPaul E. McKenney 	 * leaves of the srcu_node tree.
186da915ad5SPaul E. McKenney 	 */
187da915ad5SPaul E. McKenney 	level = rcu_num_lvls - 1;
188aacb5d91SPaul E. McKenney 	snp_first = ssp->level[level];
189da915ad5SPaul E. McKenney 	for_each_possible_cpu(cpu) {
190aacb5d91SPaul E. McKenney 		sdp = per_cpu_ptr(ssp->sda, cpu);
191da915ad5SPaul E. McKenney 		sdp->mynode = &snp_first[cpu / levelspread[level]];
192da915ad5SPaul E. McKenney 		for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
193da915ad5SPaul E. McKenney 			if (snp->grplo < 0)
194da915ad5SPaul E. McKenney 				snp->grplo = cpu;
195da915ad5SPaul E. McKenney 			snp->grphi = cpu;
196da915ad5SPaul E. McKenney 		}
197c7e88067SPaul E. McKenney 		sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
198da915ad5SPaul E. McKenney 	}
199994f7068SPaul E. McKenney 	smp_store_release(&ssp->srcu_size_state, SRCU_SIZE_WAIT_BARRIER);
2002ec30311SPaul E. McKenney 	return true;
201da915ad5SPaul E. McKenney }
202da915ad5SPaul E. McKenney 
203da915ad5SPaul E. McKenney /*
204da915ad5SPaul E. McKenney  * Initialize non-compile-time initialized fields, including the
205994f7068SPaul E. McKenney  * associated srcu_node and srcu_data structures.  The is_static parameter
206994f7068SPaul E. McKenney  * tells us that ->sda has already been wired up to srcu_data.
207da915ad5SPaul E. McKenney  */
208aacb5d91SPaul E. McKenney static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
209da915ad5SPaul E. McKenney {
210994f7068SPaul E. McKenney 	ssp->srcu_size_state = SRCU_SIZE_SMALL;
2112ec30311SPaul E. McKenney 	ssp->node = NULL;
212aacb5d91SPaul E. McKenney 	mutex_init(&ssp->srcu_cb_mutex);
213aacb5d91SPaul E. McKenney 	mutex_init(&ssp->srcu_gp_mutex);
214aacb5d91SPaul E. McKenney 	ssp->srcu_idx = 0;
215aacb5d91SPaul E. McKenney 	ssp->srcu_gp_seq = 0;
216aacb5d91SPaul E. McKenney 	ssp->srcu_barrier_seq = 0;
217aacb5d91SPaul E. McKenney 	mutex_init(&ssp->srcu_barrier_mutex);
218aacb5d91SPaul E. McKenney 	atomic_set(&ssp->srcu_barrier_cpu_cnt, 0);
219aacb5d91SPaul E. McKenney 	INIT_DELAYED_WORK(&ssp->work, process_srcu);
220da915ad5SPaul E. McKenney 	if (!is_static)
221aacb5d91SPaul E. McKenney 		ssp->sda = alloc_percpu(struct srcu_data);
22250edb988SPaul E. McKenney 	if (!ssp->sda)
22350edb988SPaul E. McKenney 		return -ENOMEM;
2242ec30311SPaul E. McKenney 	init_srcu_struct_data(ssp);
225c69a00a1SPaul E. McKenney 	ssp->srcu_gp_seq_needed_exp = 0;
226c69a00a1SPaul E. McKenney 	ssp->srcu_last_gp_end = ktime_get_mono_fast_ns();
227c69a00a1SPaul E. McKenney 	if (READ_ONCE(ssp->srcu_size_state) == SRCU_SIZE_SMALL && convert_to_big == 1) {
228c69a00a1SPaul E. McKenney 		if (!init_srcu_struct_nodes(ssp, GFP_ATOMIC)) {
2292ec30311SPaul E. McKenney 			if (!is_static) {
2302ec30311SPaul E. McKenney 				free_percpu(ssp->sda);
2312ec30311SPaul E. McKenney 				ssp->sda = NULL;
2322ec30311SPaul E. McKenney 				return -ENOMEM;
2332ec30311SPaul E. McKenney 			}
2342ec30311SPaul E. McKenney 		} else {
235c69a00a1SPaul E. McKenney 			WRITE_ONCE(ssp->srcu_size_state, SRCU_SIZE_BIG);
2362ec30311SPaul E. McKenney 		}
237c69a00a1SPaul E. McKenney 	}
238aacb5d91SPaul E. McKenney 	smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */
23950edb988SPaul E. McKenney 	return 0;
240dad81a20SPaul E. McKenney }
241dad81a20SPaul E. McKenney 
242dad81a20SPaul E. McKenney #ifdef CONFIG_DEBUG_LOCK_ALLOC
243dad81a20SPaul E. McKenney 
244aacb5d91SPaul E. McKenney int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
245dad81a20SPaul E. McKenney 		       struct lock_class_key *key)
246dad81a20SPaul E. McKenney {
247dad81a20SPaul E. McKenney 	/* Don't re-initialize a lock while it is held. */
248aacb5d91SPaul E. McKenney 	debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
249aacb5d91SPaul E. McKenney 	lockdep_init_map(&ssp->dep_map, name, key, 0);
250aacb5d91SPaul E. McKenney 	spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
251aacb5d91SPaul E. McKenney 	return init_srcu_struct_fields(ssp, false);
252dad81a20SPaul E. McKenney }
253dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(__init_srcu_struct);
254dad81a20SPaul E. McKenney 
255dad81a20SPaul E. McKenney #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
256dad81a20SPaul E. McKenney 
257dad81a20SPaul E. McKenney /**
258dad81a20SPaul E. McKenney  * init_srcu_struct - initialize a sleep-RCU structure
259aacb5d91SPaul E. McKenney  * @ssp: structure to initialize.
260dad81a20SPaul E. McKenney  *
261dad81a20SPaul E. McKenney  * Must invoke this on a given srcu_struct before passing that srcu_struct
262dad81a20SPaul E. McKenney  * to any other function.  Each srcu_struct represents a separate domain
263dad81a20SPaul E. McKenney  * of SRCU protection.
264dad81a20SPaul E. McKenney  */
265aacb5d91SPaul E. McKenney int init_srcu_struct(struct srcu_struct *ssp)
266dad81a20SPaul E. McKenney {
267aacb5d91SPaul E. McKenney 	spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
268aacb5d91SPaul E. McKenney 	return init_srcu_struct_fields(ssp, false);
269dad81a20SPaul E. McKenney }
270dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(init_srcu_struct);
271dad81a20SPaul E. McKenney 
272dad81a20SPaul E. McKenney #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
273dad81a20SPaul E. McKenney 
274dad81a20SPaul E. McKenney /*
275da915ad5SPaul E. McKenney  * First-use initialization of statically allocated srcu_struct
276da915ad5SPaul E. McKenney  * structure.  Wiring up the combining tree is more than can be
277da915ad5SPaul E. McKenney  * done with compile-time initialization, so this check is added
278aacb5d91SPaul E. McKenney  * to each update-side SRCU primitive.  Use ssp->lock, which -is-
279da915ad5SPaul E. McKenney  * compile-time initialized, to resolve races involving multiple
280da915ad5SPaul E. McKenney  * CPUs trying to garner first-use privileges.
281da915ad5SPaul E. McKenney  */
282aacb5d91SPaul E. McKenney static void check_init_srcu_struct(struct srcu_struct *ssp)
283da915ad5SPaul E. McKenney {
284da915ad5SPaul E. McKenney 	unsigned long flags;
285da915ad5SPaul E. McKenney 
286da915ad5SPaul E. McKenney 	/* The smp_load_acquire() pairs with the smp_store_release(). */
287aacb5d91SPaul E. McKenney 	if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed))) /*^^^*/
288da915ad5SPaul E. McKenney 		return; /* Already initialized. */
289aacb5d91SPaul E. McKenney 	spin_lock_irqsave_rcu_node(ssp, flags);
290aacb5d91SPaul E. McKenney 	if (!rcu_seq_state(ssp->srcu_gp_seq_needed)) {
291aacb5d91SPaul E. McKenney 		spin_unlock_irqrestore_rcu_node(ssp, flags);
292da915ad5SPaul E. McKenney 		return;
293da915ad5SPaul E. McKenney 	}
294aacb5d91SPaul E. McKenney 	init_srcu_struct_fields(ssp, true);
295aacb5d91SPaul E. McKenney 	spin_unlock_irqrestore_rcu_node(ssp, flags);
296da915ad5SPaul E. McKenney }
297da915ad5SPaul E. McKenney 
298da915ad5SPaul E. McKenney /*
299da915ad5SPaul E. McKenney  * Returns approximate total of the readers' ->srcu_lock_count[] values
300da915ad5SPaul E. McKenney  * for the rank of per-CPU counters specified by idx.
301dad81a20SPaul E. McKenney  */
302aacb5d91SPaul E. McKenney static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx)
303dad81a20SPaul E. McKenney {
304dad81a20SPaul E. McKenney 	int cpu;
305dad81a20SPaul E. McKenney 	unsigned long sum = 0;
306dad81a20SPaul E. McKenney 
307dad81a20SPaul E. McKenney 	for_each_possible_cpu(cpu) {
308aacb5d91SPaul E. McKenney 		struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
309dad81a20SPaul E. McKenney 
310da915ad5SPaul E. McKenney 		sum += READ_ONCE(cpuc->srcu_lock_count[idx]);
311dad81a20SPaul E. McKenney 	}
312dad81a20SPaul E. McKenney 	return sum;
313dad81a20SPaul E. McKenney }
314dad81a20SPaul E. McKenney 
315dad81a20SPaul E. McKenney /*
316da915ad5SPaul E. McKenney  * Returns approximate total of the readers' ->srcu_unlock_count[] values
317da915ad5SPaul E. McKenney  * for the rank of per-CPU counters specified by idx.
318dad81a20SPaul E. McKenney  */
319aacb5d91SPaul E. McKenney static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx)
320dad81a20SPaul E. McKenney {
321dad81a20SPaul E. McKenney 	int cpu;
322dad81a20SPaul E. McKenney 	unsigned long sum = 0;
323dad81a20SPaul E. McKenney 
324dad81a20SPaul E. McKenney 	for_each_possible_cpu(cpu) {
325aacb5d91SPaul E. McKenney 		struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
326dad81a20SPaul E. McKenney 
327da915ad5SPaul E. McKenney 		sum += READ_ONCE(cpuc->srcu_unlock_count[idx]);
328dad81a20SPaul E. McKenney 	}
329dad81a20SPaul E. McKenney 	return sum;
330dad81a20SPaul E. McKenney }
331dad81a20SPaul E. McKenney 
332dad81a20SPaul E. McKenney /*
333dad81a20SPaul E. McKenney  * Return true if the number of pre-existing readers is determined to
334dad81a20SPaul E. McKenney  * be zero.
335dad81a20SPaul E. McKenney  */
336aacb5d91SPaul E. McKenney static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx)
337dad81a20SPaul E. McKenney {
338dad81a20SPaul E. McKenney 	unsigned long unlocks;
339dad81a20SPaul E. McKenney 
340aacb5d91SPaul E. McKenney 	unlocks = srcu_readers_unlock_idx(ssp, idx);
341dad81a20SPaul E. McKenney 
342dad81a20SPaul E. McKenney 	/*
343dad81a20SPaul E. McKenney 	 * Make sure that a lock is always counted if the corresponding
344dad81a20SPaul E. McKenney 	 * unlock is counted. Needs to be a smp_mb() as the read side may
345dad81a20SPaul E. McKenney 	 * contain a read from a variable that is written to before the
346dad81a20SPaul E. McKenney 	 * synchronize_srcu() in the write side. In this case smp_mb()s
347dad81a20SPaul E. McKenney 	 * A and B act like the store buffering pattern.
348dad81a20SPaul E. McKenney 	 *
349dad81a20SPaul E. McKenney 	 * This smp_mb() also pairs with smp_mb() C to prevent accesses
350dad81a20SPaul E. McKenney 	 * after the synchronize_srcu() from being executed before the
351dad81a20SPaul E. McKenney 	 * grace period ends.
352dad81a20SPaul E. McKenney 	 */
353dad81a20SPaul E. McKenney 	smp_mb(); /* A */
354dad81a20SPaul E. McKenney 
355dad81a20SPaul E. McKenney 	/*
356dad81a20SPaul E. McKenney 	 * If the locks are the same as the unlocks, then there must have
357dad81a20SPaul E. McKenney 	 * been no readers on this index at some time in between. This does
358dad81a20SPaul E. McKenney 	 * not mean that there are no more readers, as one could have read
359dad81a20SPaul E. McKenney 	 * the current index but not have incremented the lock counter yet.
360dad81a20SPaul E. McKenney 	 *
361881ec9d2SPaul E. McKenney 	 * So suppose that the updater is preempted here for so long
362881ec9d2SPaul E. McKenney 	 * that more than ULONG_MAX non-nested readers come and go in
363881ec9d2SPaul E. McKenney 	 * the meantime.  It turns out that this cannot result in overflow
364881ec9d2SPaul E. McKenney 	 * because if a reader modifies its unlock count after we read it
365881ec9d2SPaul E. McKenney 	 * above, then that reader's next load of ->srcu_idx is guaranteed
366881ec9d2SPaul E. McKenney 	 * to get the new value, which will cause it to operate on the
367881ec9d2SPaul E. McKenney 	 * other bank of counters, where it cannot contribute to the
368881ec9d2SPaul E. McKenney 	 * overflow of these counters.  This means that there is a maximum
369881ec9d2SPaul E. McKenney 	 * of 2*NR_CPUS increments, which cannot overflow given current
370881ec9d2SPaul E. McKenney 	 * systems, especially not on 64-bit systems.
371881ec9d2SPaul E. McKenney 	 *
372881ec9d2SPaul E. McKenney 	 * OK, how about nesting?  This does impose a limit on nesting
373881ec9d2SPaul E. McKenney 	 * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient,
374881ec9d2SPaul E. McKenney 	 * especially on 64-bit systems.
375dad81a20SPaul E. McKenney 	 */
376aacb5d91SPaul E. McKenney 	return srcu_readers_lock_idx(ssp, idx) == unlocks;
377dad81a20SPaul E. McKenney }
378dad81a20SPaul E. McKenney 
379dad81a20SPaul E. McKenney /**
380dad81a20SPaul E. McKenney  * srcu_readers_active - returns true if there are readers. and false
381dad81a20SPaul E. McKenney  *                       otherwise
382aacb5d91SPaul E. McKenney  * @ssp: which srcu_struct to count active readers (holding srcu_read_lock).
383dad81a20SPaul E. McKenney  *
384dad81a20SPaul E. McKenney  * Note that this is not an atomic primitive, and can therefore suffer
385dad81a20SPaul E. McKenney  * severe errors when invoked on an active srcu_struct.  That said, it
386dad81a20SPaul E. McKenney  * can be useful as an error check at cleanup time.
387dad81a20SPaul E. McKenney  */
388aacb5d91SPaul E. McKenney static bool srcu_readers_active(struct srcu_struct *ssp)
389dad81a20SPaul E. McKenney {
390dad81a20SPaul E. McKenney 	int cpu;
391dad81a20SPaul E. McKenney 	unsigned long sum = 0;
392dad81a20SPaul E. McKenney 
393dad81a20SPaul E. McKenney 	for_each_possible_cpu(cpu) {
394aacb5d91SPaul E. McKenney 		struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
395dad81a20SPaul E. McKenney 
396da915ad5SPaul E. McKenney 		sum += READ_ONCE(cpuc->srcu_lock_count[0]);
397da915ad5SPaul E. McKenney 		sum += READ_ONCE(cpuc->srcu_lock_count[1]);
398da915ad5SPaul E. McKenney 		sum -= READ_ONCE(cpuc->srcu_unlock_count[0]);
399da915ad5SPaul E. McKenney 		sum -= READ_ONCE(cpuc->srcu_unlock_count[1]);
400dad81a20SPaul E. McKenney 	}
401dad81a20SPaul E. McKenney 	return sum;
402dad81a20SPaul E. McKenney }
403dad81a20SPaul E. McKenney 
404dad81a20SPaul E. McKenney #define SRCU_INTERVAL		1
405dad81a20SPaul E. McKenney 
4061e9a038bSPaul E. McKenney /*
4071e9a038bSPaul E. McKenney  * Return grace-period delay, zero if there are expedited grace
4081e9a038bSPaul E. McKenney  * periods pending, SRCU_INTERVAL otherwise.
4091e9a038bSPaul E. McKenney  */
410aacb5d91SPaul E. McKenney static unsigned long srcu_get_delay(struct srcu_struct *ssp)
4111e9a038bSPaul E. McKenney {
412cbdc98e9SPaul E. McKenney 	if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq), READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
4131e9a038bSPaul E. McKenney 		return 0;
4141e9a038bSPaul E. McKenney 	return SRCU_INTERVAL;
4151e9a038bSPaul E. McKenney }
4161e9a038bSPaul E. McKenney 
417f5ad3991SPaul E. McKenney /**
418f5ad3991SPaul E. McKenney  * cleanup_srcu_struct - deconstruct a sleep-RCU structure
419f5ad3991SPaul E. McKenney  * @ssp: structure to clean up.
420f5ad3991SPaul E. McKenney  *
421f5ad3991SPaul E. McKenney  * Must invoke this after you are finished using a given srcu_struct that
422f5ad3991SPaul E. McKenney  * was initialized via init_srcu_struct(), else you leak memory.
423f5ad3991SPaul E. McKenney  */
424f5ad3991SPaul E. McKenney void cleanup_srcu_struct(struct srcu_struct *ssp)
425dad81a20SPaul E. McKenney {
426da915ad5SPaul E. McKenney 	int cpu;
427da915ad5SPaul E. McKenney 
428aacb5d91SPaul E. McKenney 	if (WARN_ON(!srcu_get_delay(ssp)))
429f7194ac3SPaul E. McKenney 		return; /* Just leak it! */
430aacb5d91SPaul E. McKenney 	if (WARN_ON(srcu_readers_active(ssp)))
431f7194ac3SPaul E. McKenney 		return; /* Just leak it! */
432aacb5d91SPaul E. McKenney 	flush_delayed_work(&ssp->work);
433e81baf4cSSebastian Andrzej Siewior 	for_each_possible_cpu(cpu) {
434e81baf4cSSebastian Andrzej Siewior 		struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
435e81baf4cSSebastian Andrzej Siewior 
436e81baf4cSSebastian Andrzej Siewior 		del_timer_sync(&sdp->delay_work);
437e81baf4cSSebastian Andrzej Siewior 		flush_work(&sdp->work);
4385cdfd174SPaul E. McKenney 		if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist)))
4395cdfd174SPaul E. McKenney 			return; /* Forgot srcu_barrier(), so just leak it! */
440f7194ac3SPaul E. McKenney 	}
441aacb5d91SPaul E. McKenney 	if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
4428ed00760SPaul E. McKenney 	    WARN_ON(rcu_seq_current(&ssp->srcu_gp_seq) != ssp->srcu_gp_seq_needed) ||
443aacb5d91SPaul E. McKenney 	    WARN_ON(srcu_readers_active(ssp))) {
4448ed00760SPaul E. McKenney 		pr_info("%s: Active srcu_struct %p read state: %d gp state: %lu/%lu\n",
4458ed00760SPaul E. McKenney 			__func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)),
4468ed00760SPaul E. McKenney 			rcu_seq_current(&ssp->srcu_gp_seq), ssp->srcu_gp_seq_needed);
447dad81a20SPaul E. McKenney 		return; /* Caller forgot to stop doing call_srcu()? */
448dad81a20SPaul E. McKenney 	}
449aacb5d91SPaul E. McKenney 	free_percpu(ssp->sda);
450aacb5d91SPaul E. McKenney 	ssp->sda = NULL;
4512ec30311SPaul E. McKenney 	kfree(ssp->node);
4522ec30311SPaul E. McKenney 	ssp->node = NULL;
453994f7068SPaul E. McKenney 	ssp->srcu_size_state = SRCU_SIZE_SMALL;
454dad81a20SPaul E. McKenney }
455f5ad3991SPaul E. McKenney EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
456dad81a20SPaul E. McKenney 
457dad81a20SPaul E. McKenney /*
458dad81a20SPaul E. McKenney  * Counts the new reader in the appropriate per-CPU element of the
459cdf7abc4SPaolo Bonzini  * srcu_struct.
460dad81a20SPaul E. McKenney  * Returns an index that must be passed to the matching srcu_read_unlock().
461dad81a20SPaul E. McKenney  */
462aacb5d91SPaul E. McKenney int __srcu_read_lock(struct srcu_struct *ssp)
463dad81a20SPaul E. McKenney {
464dad81a20SPaul E. McKenney 	int idx;
465dad81a20SPaul E. McKenney 
466aacb5d91SPaul E. McKenney 	idx = READ_ONCE(ssp->srcu_idx) & 0x1;
467aacb5d91SPaul E. McKenney 	this_cpu_inc(ssp->sda->srcu_lock_count[idx]);
468dad81a20SPaul E. McKenney 	smp_mb(); /* B */  /* Avoid leaking the critical section. */
469dad81a20SPaul E. McKenney 	return idx;
470dad81a20SPaul E. McKenney }
471dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(__srcu_read_lock);
472dad81a20SPaul E. McKenney 
473dad81a20SPaul E. McKenney /*
474dad81a20SPaul E. McKenney  * Removes the count for the old reader from the appropriate per-CPU
475dad81a20SPaul E. McKenney  * element of the srcu_struct.  Note that this may well be a different
476dad81a20SPaul E. McKenney  * CPU than that which was incremented by the corresponding srcu_read_lock().
477dad81a20SPaul E. McKenney  */
478aacb5d91SPaul E. McKenney void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
479dad81a20SPaul E. McKenney {
480dad81a20SPaul E. McKenney 	smp_mb(); /* C */  /* Avoid leaking the critical section. */
481aacb5d91SPaul E. McKenney 	this_cpu_inc(ssp->sda->srcu_unlock_count[idx]);
482dad81a20SPaul E. McKenney }
483dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(__srcu_read_unlock);
484dad81a20SPaul E. McKenney 
485dad81a20SPaul E. McKenney /*
486dad81a20SPaul E. McKenney  * We use an adaptive strategy for synchronize_srcu() and especially for
487dad81a20SPaul E. McKenney  * synchronize_srcu_expedited().  We spin for a fixed time period
488dad81a20SPaul E. McKenney  * (defined below) to allow SRCU readers to exit their read-side critical
489dad81a20SPaul E. McKenney  * sections.  If there are still some readers after a few microseconds,
490dad81a20SPaul E. McKenney  * we repeatedly block for 1-millisecond time periods.
491dad81a20SPaul E. McKenney  */
492dad81a20SPaul E. McKenney #define SRCU_RETRY_CHECK_DELAY		5
493dad81a20SPaul E. McKenney 
494dad81a20SPaul E. McKenney /*
495dad81a20SPaul E. McKenney  * Start an SRCU grace period.
496dad81a20SPaul E. McKenney  */
497aacb5d91SPaul E. McKenney static void srcu_gp_start(struct srcu_struct *ssp)
498dad81a20SPaul E. McKenney {
499aacb5d91SPaul E. McKenney 	struct srcu_data *sdp = this_cpu_ptr(ssp->sda);
500dad81a20SPaul E. McKenney 	int state;
501dad81a20SPaul E. McKenney 
502994f7068SPaul E. McKenney 	if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
503994f7068SPaul E. McKenney 		sdp = per_cpu_ptr(ssp->sda, 0);
504994f7068SPaul E. McKenney 	else
505994f7068SPaul E. McKenney 		sdp = this_cpu_ptr(ssp->sda);
506aacb5d91SPaul E. McKenney 	lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock));
507aacb5d91SPaul E. McKenney 	WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
508eb4c2382SDennis Krein 	spin_lock_rcu_node(sdp);  /* Interrupts already disabled. */
509da915ad5SPaul E. McKenney 	rcu_segcblist_advance(&sdp->srcu_cblist,
510aacb5d91SPaul E. McKenney 			      rcu_seq_current(&ssp->srcu_gp_seq));
511da915ad5SPaul E. McKenney 	(void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
512aacb5d91SPaul E. McKenney 				       rcu_seq_snap(&ssp->srcu_gp_seq));
513eb4c2382SDennis Krein 	spin_unlock_rcu_node(sdp);  /* Interrupts remain disabled. */
5142da4b2a7SPaul E. McKenney 	smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
515aacb5d91SPaul E. McKenney 	rcu_seq_start(&ssp->srcu_gp_seq);
51671042606SPaul E. McKenney 	state = rcu_seq_state(ssp->srcu_gp_seq);
517dad81a20SPaul E. McKenney 	WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
518dad81a20SPaul E. McKenney }
519dad81a20SPaul E. McKenney 
520da915ad5SPaul E. McKenney 
521e81baf4cSSebastian Andrzej Siewior static void srcu_delay_timer(struct timer_list *t)
522da915ad5SPaul E. McKenney {
523e81baf4cSSebastian Andrzej Siewior 	struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work);
524e81baf4cSSebastian Andrzej Siewior 
525e81baf4cSSebastian Andrzej Siewior 	queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
526da915ad5SPaul E. McKenney }
527da915ad5SPaul E. McKenney 
528e81baf4cSSebastian Andrzej Siewior static void srcu_queue_delayed_work_on(struct srcu_data *sdp,
529da915ad5SPaul E. McKenney 				       unsigned long delay)
530da915ad5SPaul E. McKenney {
531e81baf4cSSebastian Andrzej Siewior 	if (!delay) {
532e81baf4cSSebastian Andrzej Siewior 		queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
533e81baf4cSSebastian Andrzej Siewior 		return;
534e81baf4cSSebastian Andrzej Siewior 	}
535da915ad5SPaul E. McKenney 
536e81baf4cSSebastian Andrzej Siewior 	timer_reduce(&sdp->delay_work, jiffies + delay);
537da915ad5SPaul E. McKenney }
538da915ad5SPaul E. McKenney 
539da915ad5SPaul E. McKenney /*
540da915ad5SPaul E. McKenney  * Schedule callback invocation for the specified srcu_data structure,
541da915ad5SPaul E. McKenney  * if possible, on the corresponding CPU.
542da915ad5SPaul E. McKenney  */
543da915ad5SPaul E. McKenney static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
544da915ad5SPaul E. McKenney {
545e81baf4cSSebastian Andrzej Siewior 	srcu_queue_delayed_work_on(sdp, delay);
546da915ad5SPaul E. McKenney }
547da915ad5SPaul E. McKenney 
548da915ad5SPaul E. McKenney /*
549da915ad5SPaul E. McKenney  * Schedule callback invocation for all srcu_data structures associated
550c7e88067SPaul E. McKenney  * with the specified srcu_node structure that have callbacks for the
551c7e88067SPaul E. McKenney  * just-completed grace period, the one corresponding to idx.  If possible,
552c7e88067SPaul E. McKenney  * schedule this invocation on the corresponding CPUs.
553da915ad5SPaul E. McKenney  */
554aacb5d91SPaul E. McKenney static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp,
5551e9a038bSPaul E. McKenney 				  unsigned long mask, unsigned long delay)
556da915ad5SPaul E. McKenney {
557da915ad5SPaul E. McKenney 	int cpu;
558da915ad5SPaul E. McKenney 
559c7e88067SPaul E. McKenney 	for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
560c7e88067SPaul E. McKenney 		if (!(mask & (1 << (cpu - snp->grplo))))
561c7e88067SPaul E. McKenney 			continue;
562aacb5d91SPaul E. McKenney 		srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
563da915ad5SPaul E. McKenney 	}
564c7e88067SPaul E. McKenney }
565da915ad5SPaul E. McKenney 
566da915ad5SPaul E. McKenney /*
567da915ad5SPaul E. McKenney  * Note the end of an SRCU grace period.  Initiates callback invocation
568da915ad5SPaul E. McKenney  * and starts a new grace period if needed.
569da915ad5SPaul E. McKenney  *
570da915ad5SPaul E. McKenney  * The ->srcu_cb_mutex acquisition does not protect any data, but
571da915ad5SPaul E. McKenney  * instead prevents more than one grace period from starting while we
572da915ad5SPaul E. McKenney  * are initiating callback invocation.  This allows the ->srcu_have_cbs[]
573da915ad5SPaul E. McKenney  * array to have a finite number of elements.
574da915ad5SPaul E. McKenney  */
575aacb5d91SPaul E. McKenney static void srcu_gp_end(struct srcu_struct *ssp)
576da915ad5SPaul E. McKenney {
5771e9a038bSPaul E. McKenney 	unsigned long cbdelay;
578da915ad5SPaul E. McKenney 	bool cbs;
5798ddbd883SIldar Ismagilov 	bool last_lvl;
580c350c008SPaul E. McKenney 	int cpu;
581c350c008SPaul E. McKenney 	unsigned long flags;
582da915ad5SPaul E. McKenney 	unsigned long gpseq;
583da915ad5SPaul E. McKenney 	int idx;
584c7e88067SPaul E. McKenney 	unsigned long mask;
585c350c008SPaul E. McKenney 	struct srcu_data *sdp;
586cbdc98e9SPaul E. McKenney 	unsigned long sgsne;
587da915ad5SPaul E. McKenney 	struct srcu_node *snp;
588e2f63836SPaul E. McKenney 	int ss_state;
589da915ad5SPaul E. McKenney 
590da915ad5SPaul E. McKenney 	/* Prevent more than one additional grace period. */
591aacb5d91SPaul E. McKenney 	mutex_lock(&ssp->srcu_cb_mutex);
592da915ad5SPaul E. McKenney 
593da915ad5SPaul E. McKenney 	/* End the current grace period. */
594aacb5d91SPaul E. McKenney 	spin_lock_irq_rcu_node(ssp);
595aacb5d91SPaul E. McKenney 	idx = rcu_seq_state(ssp->srcu_gp_seq);
596da915ad5SPaul E. McKenney 	WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
597aacb5d91SPaul E. McKenney 	cbdelay = srcu_get_delay(ssp);
598844a378dSPaul E. McKenney 	WRITE_ONCE(ssp->srcu_last_gp_end, ktime_get_mono_fast_ns());
599aacb5d91SPaul E. McKenney 	rcu_seq_end(&ssp->srcu_gp_seq);
600aacb5d91SPaul E. McKenney 	gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
601aacb5d91SPaul E. McKenney 	if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq))
6028c9e0cb3SPaul E. McKenney 		WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, gpseq);
603aacb5d91SPaul E. McKenney 	spin_unlock_irq_rcu_node(ssp);
604aacb5d91SPaul E. McKenney 	mutex_unlock(&ssp->srcu_gp_mutex);
605da915ad5SPaul E. McKenney 	/* A new grace period can start at this point.  But only one. */
606da915ad5SPaul E. McKenney 
607da915ad5SPaul E. McKenney 	/* Initiate callback invocation as needed. */
608c69a00a1SPaul E. McKenney 	ss_state = smp_load_acquire(&ssp->srcu_size_state);
609c69a00a1SPaul E. McKenney 	if (ss_state < SRCU_SIZE_WAIT_BARRIER) {
610994f7068SPaul E. McKenney 		srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, 0), cbdelay);
611994f7068SPaul E. McKenney 	} else {
612da915ad5SPaul E. McKenney 		idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
613aacb5d91SPaul E. McKenney 		srcu_for_each_node_breadth_first(ssp, snp) {
614d6331980SPaul E. McKenney 			spin_lock_irq_rcu_node(snp);
615da915ad5SPaul E. McKenney 			cbs = false;
616aacb5d91SPaul E. McKenney 			last_lvl = snp >= ssp->level[rcu_num_lvls - 1];
6178ddbd883SIldar Ismagilov 			if (last_lvl)
618c69a00a1SPaul E. McKenney 				cbs = ss_state < SRCU_SIZE_BIG || snp->srcu_have_cbs[idx] == gpseq;
619da915ad5SPaul E. McKenney 			snp->srcu_have_cbs[idx] = gpseq;
620da915ad5SPaul E. McKenney 			rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
621cbdc98e9SPaul E. McKenney 			sgsne = snp->srcu_gp_seq_needed_exp;
622cbdc98e9SPaul E. McKenney 			if (srcu_invl_snp_seq(sgsne) || ULONG_CMP_LT(sgsne, gpseq))
6237ff8b450SPaul E. McKenney 				WRITE_ONCE(snp->srcu_gp_seq_needed_exp, gpseq);
624c69a00a1SPaul E. McKenney 			if (ss_state < SRCU_SIZE_BIG)
625c69a00a1SPaul E. McKenney 				mask = ~0;
626c69a00a1SPaul E. McKenney 			else
627c7e88067SPaul E. McKenney 				mask = snp->srcu_data_have_cbs[idx];
628c7e88067SPaul E. McKenney 			snp->srcu_data_have_cbs[idx] = 0;
629d6331980SPaul E. McKenney 			spin_unlock_irq_rcu_node(snp);
630a3883df3SPaul E. McKenney 			if (cbs)
631aacb5d91SPaul E. McKenney 				srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay);
632994f7068SPaul E. McKenney 		}
633994f7068SPaul E. McKenney 	}
634c350c008SPaul E. McKenney 
635c350c008SPaul E. McKenney 	/* Occasionally prevent srcu_data counter wrap. */
636994f7068SPaul E. McKenney 	if (!(gpseq & counter_wrap_check))
637994f7068SPaul E. McKenney 		for_each_possible_cpu(cpu) {
638aacb5d91SPaul E. McKenney 			sdp = per_cpu_ptr(ssp->sda, cpu);
639d6331980SPaul E. McKenney 			spin_lock_irqsave_rcu_node(sdp, flags);
640994f7068SPaul E. McKenney 			if (ULONG_CMP_GE(gpseq, sdp->srcu_gp_seq_needed + 100))
641c350c008SPaul E. McKenney 				sdp->srcu_gp_seq_needed = gpseq;
642994f7068SPaul E. McKenney 			if (ULONG_CMP_GE(gpseq, sdp->srcu_gp_seq_needed_exp + 100))
643a35d13ecSIldar Ismagilov 				sdp->srcu_gp_seq_needed_exp = gpseq;
644d6331980SPaul E. McKenney 			spin_unlock_irqrestore_rcu_node(sdp, flags);
645c350c008SPaul E. McKenney 		}
646da915ad5SPaul E. McKenney 
647da915ad5SPaul E. McKenney 	/* Callback initiation done, allow grace periods after next. */
648aacb5d91SPaul E. McKenney 	mutex_unlock(&ssp->srcu_cb_mutex);
649da915ad5SPaul E. McKenney 
650da915ad5SPaul E. McKenney 	/* Start a new grace period if needed. */
651aacb5d91SPaul E. McKenney 	spin_lock_irq_rcu_node(ssp);
652aacb5d91SPaul E. McKenney 	gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
653da915ad5SPaul E. McKenney 	if (!rcu_seq_state(gpseq) &&
654aacb5d91SPaul E. McKenney 	    ULONG_CMP_LT(gpseq, ssp->srcu_gp_seq_needed)) {
655aacb5d91SPaul E. McKenney 		srcu_gp_start(ssp);
656aacb5d91SPaul E. McKenney 		spin_unlock_irq_rcu_node(ssp);
657aacb5d91SPaul E. McKenney 		srcu_reschedule(ssp, 0);
658da915ad5SPaul E. McKenney 	} else {
659aacb5d91SPaul E. McKenney 		spin_unlock_irq_rcu_node(ssp);
660da915ad5SPaul E. McKenney 	}
661e2f63836SPaul E. McKenney 
662e2f63836SPaul E. McKenney 	/* Transition to big if needed. */
663e2f63836SPaul E. McKenney 	if (ss_state != SRCU_SIZE_SMALL && ss_state != SRCU_SIZE_BIG) {
664e2f63836SPaul E. McKenney 		if (ss_state == SRCU_SIZE_ALLOC)
665c69a00a1SPaul E. McKenney 			init_srcu_struct_nodes(ssp, GFP_KERNEL);
666e2f63836SPaul E. McKenney 		else
667e2f63836SPaul E. McKenney 			smp_store_release(&ssp->srcu_size_state, ss_state + 1);
668e2f63836SPaul E. McKenney 	}
669da915ad5SPaul E. McKenney }
670da915ad5SPaul E. McKenney 
671da915ad5SPaul E. McKenney /*
6721e9a038bSPaul E. McKenney  * Funnel-locking scheme to scalably mediate many concurrent expedited
6731e9a038bSPaul E. McKenney  * grace-period requests.  This function is invoked for the first known
6741e9a038bSPaul E. McKenney  * expedited request for a grace period that has already been requested,
6751e9a038bSPaul E. McKenney  * but without expediting.  To start a completely new grace period,
6761e9a038bSPaul E. McKenney  * whether expedited or not, use srcu_funnel_gp_start() instead.
6771e9a038bSPaul E. McKenney  */
678aacb5d91SPaul E. McKenney static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp,
6791e9a038bSPaul E. McKenney 				  unsigned long s)
6801e9a038bSPaul E. McKenney {
6811e9a038bSPaul E. McKenney 	unsigned long flags;
682cbdc98e9SPaul E. McKenney 	unsigned long sgsne;
6831e9a038bSPaul E. McKenney 
684994f7068SPaul E. McKenney 	if (snp)
6851e9a038bSPaul E. McKenney 		for (; snp != NULL; snp = snp->srcu_parent) {
686cbdc98e9SPaul E. McKenney 			sgsne = READ_ONCE(snp->srcu_gp_seq_needed_exp);
687aacb5d91SPaul E. McKenney 			if (rcu_seq_done(&ssp->srcu_gp_seq, s) ||
688cbdc98e9SPaul E. McKenney 			    (!srcu_invl_snp_seq(sgsne) && ULONG_CMP_GE(sgsne, s)))
6891e9a038bSPaul E. McKenney 				return;
690d6331980SPaul E. McKenney 			spin_lock_irqsave_rcu_node(snp, flags);
691cbdc98e9SPaul E. McKenney 			sgsne = snp->srcu_gp_seq_needed_exp;
692cbdc98e9SPaul E. McKenney 			if (!srcu_invl_snp_seq(sgsne) && ULONG_CMP_GE(sgsne, s)) {
693d6331980SPaul E. McKenney 				spin_unlock_irqrestore_rcu_node(snp, flags);
6941e9a038bSPaul E. McKenney 				return;
6951e9a038bSPaul E. McKenney 			}
6961e9a038bSPaul E. McKenney 			WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
697d6331980SPaul E. McKenney 			spin_unlock_irqrestore_rcu_node(snp, flags);
6981e9a038bSPaul E. McKenney 		}
699aacb5d91SPaul E. McKenney 	spin_lock_irqsave_rcu_node(ssp, flags);
700aacb5d91SPaul E. McKenney 	if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
7018c9e0cb3SPaul E. McKenney 		WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s);
702aacb5d91SPaul E. McKenney 	spin_unlock_irqrestore_rcu_node(ssp, flags);
7031e9a038bSPaul E. McKenney }
7041e9a038bSPaul E. McKenney 
7051e9a038bSPaul E. McKenney /*
706da915ad5SPaul E. McKenney  * Funnel-locking scheme to scalably mediate many concurrent grace-period
707da915ad5SPaul E. McKenney  * requests.  The winner has to do the work of actually starting grace
708da915ad5SPaul E. McKenney  * period s.  Losers must either ensure that their desired grace-period
709da915ad5SPaul E. McKenney  * number is recorded on at least their leaf srcu_node structure, or they
710da915ad5SPaul E. McKenney  * must take steps to invoke their own callbacks.
71117294ce6SPaul E. McKenney  *
71217294ce6SPaul E. McKenney  * Note that this function also does the work of srcu_funnel_exp_start(),
71317294ce6SPaul E. McKenney  * in some cases by directly invoking it.
714da915ad5SPaul E. McKenney  */
715aacb5d91SPaul E. McKenney static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
7161e9a038bSPaul E. McKenney 				 unsigned long s, bool do_norm)
717da915ad5SPaul E. McKenney {
718da915ad5SPaul E. McKenney 	unsigned long flags;
719da915ad5SPaul E. McKenney 	int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs);
720cbdc98e9SPaul E. McKenney 	unsigned long sgsne;
7217b9e9b58SPaul E. McKenney 	struct srcu_node *snp;
7220b56f953SNeeraj Upadhyay 	struct srcu_node *snp_leaf;
723da915ad5SPaul E. McKenney 	unsigned long snp_seq;
724da915ad5SPaul E. McKenney 
7250b56f953SNeeraj Upadhyay 	/* Ensure that snp node tree is fully initialized before traversing it */
7260b56f953SNeeraj Upadhyay 	if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
7270b56f953SNeeraj Upadhyay 		snp_leaf = NULL;
7280b56f953SNeeraj Upadhyay 	else
7290b56f953SNeeraj Upadhyay 		snp_leaf = sdp->mynode;
7300b56f953SNeeraj Upadhyay 
731994f7068SPaul E. McKenney 	if (snp_leaf)
732da915ad5SPaul E. McKenney 		/* Each pass through the loop does one level of the srcu_node tree. */
7337b9e9b58SPaul E. McKenney 		for (snp = snp_leaf; snp != NULL; snp = snp->srcu_parent) {
7347b9e9b58SPaul E. McKenney 			if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != snp_leaf)
735da915ad5SPaul E. McKenney 				return; /* GP already done and CBs recorded. */
736d6331980SPaul E. McKenney 			spin_lock_irqsave_rcu_node(snp, flags);
737da915ad5SPaul E. McKenney 			snp_seq = snp->srcu_have_cbs[idx];
738cbdc98e9SPaul E. McKenney 			if (!srcu_invl_snp_seq(snp_seq) && ULONG_CMP_GE(snp_seq, s)) {
7397b9e9b58SPaul E. McKenney 				if (snp == snp_leaf && snp_seq == s)
740c7e88067SPaul E. McKenney 					snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
741d6331980SPaul E. McKenney 				spin_unlock_irqrestore_rcu_node(snp, flags);
7427b9e9b58SPaul E. McKenney 				if (snp == snp_leaf && snp_seq != s) {
743aeb9b39bSPaul E. McKenney 					srcu_schedule_cbs_sdp(sdp, do_norm ? SRCU_INTERVAL : 0);
7441e9a038bSPaul E. McKenney 					return;
745da915ad5SPaul E. McKenney 				}
7461e9a038bSPaul E. McKenney 				if (!do_norm)
747aacb5d91SPaul E. McKenney 					srcu_funnel_exp_start(ssp, snp, s);
748da915ad5SPaul E. McKenney 				return;
749da915ad5SPaul E. McKenney 			}
750da915ad5SPaul E. McKenney 			snp->srcu_have_cbs[idx] = s;
7517b9e9b58SPaul E. McKenney 			if (snp == snp_leaf)
752c7e88067SPaul E. McKenney 				snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
753cbdc98e9SPaul E. McKenney 			sgsne = snp->srcu_gp_seq_needed_exp;
754cbdc98e9SPaul E. McKenney 			if (!do_norm && (srcu_invl_snp_seq(sgsne) || ULONG_CMP_LT(sgsne, s)))
7557ff8b450SPaul E. McKenney 				WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
756d6331980SPaul E. McKenney 			spin_unlock_irqrestore_rcu_node(snp, flags);
757da915ad5SPaul E. McKenney 		}
758da915ad5SPaul E. McKenney 
759da915ad5SPaul E. McKenney 	/* Top of tree, must ensure the grace period will be started. */
760aacb5d91SPaul E. McKenney 	spin_lock_irqsave_rcu_node(ssp, flags);
761aacb5d91SPaul E. McKenney 	if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) {
762da915ad5SPaul E. McKenney 		/*
763da915ad5SPaul E. McKenney 		 * Record need for grace period s.  Pair with load
764da915ad5SPaul E. McKenney 		 * acquire setting up for initialization.
765da915ad5SPaul E. McKenney 		 */
766aacb5d91SPaul E. McKenney 		smp_store_release(&ssp->srcu_gp_seq_needed, s); /*^^^*/
767da915ad5SPaul E. McKenney 	}
768aacb5d91SPaul E. McKenney 	if (!do_norm && ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
7698c9e0cb3SPaul E. McKenney 		WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s);
770da915ad5SPaul E. McKenney 
771da915ad5SPaul E. McKenney 	/* If grace period not already done and none in progress, start it. */
772aacb5d91SPaul E. McKenney 	if (!rcu_seq_done(&ssp->srcu_gp_seq, s) &&
773aacb5d91SPaul E. McKenney 	    rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) {
774aacb5d91SPaul E. McKenney 		WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
775aacb5d91SPaul E. McKenney 		srcu_gp_start(ssp);
776e0fcba9aSPaul E. McKenney 		if (likely(srcu_init_done))
777aacb5d91SPaul E. McKenney 			queue_delayed_work(rcu_gp_wq, &ssp->work,
778aacb5d91SPaul E. McKenney 					   srcu_get_delay(ssp));
779aacb5d91SPaul E. McKenney 		else if (list_empty(&ssp->work.work.entry))
780aacb5d91SPaul E. McKenney 			list_add(&ssp->work.work.entry, &srcu_boot_list);
781da915ad5SPaul E. McKenney 	}
782aacb5d91SPaul E. McKenney 	spin_unlock_irqrestore_rcu_node(ssp, flags);
783da915ad5SPaul E. McKenney }
784da915ad5SPaul E. McKenney 
785da915ad5SPaul E. McKenney /*
786dad81a20SPaul E. McKenney  * Wait until all readers counted by array index idx complete, but
787dad81a20SPaul E. McKenney  * loop an additional time if there is an expedited grace period pending.
788da915ad5SPaul E. McKenney  * The caller must ensure that ->srcu_idx is not changed while checking.
789dad81a20SPaul E. McKenney  */
790aacb5d91SPaul E. McKenney static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount)
791dad81a20SPaul E. McKenney {
792dad81a20SPaul E. McKenney 	for (;;) {
793aacb5d91SPaul E. McKenney 		if (srcu_readers_active_idx_check(ssp, idx))
794dad81a20SPaul E. McKenney 			return true;
795aacb5d91SPaul E. McKenney 		if (--trycount + !srcu_get_delay(ssp) <= 0)
796dad81a20SPaul E. McKenney 			return false;
797dad81a20SPaul E. McKenney 		udelay(SRCU_RETRY_CHECK_DELAY);
798dad81a20SPaul E. McKenney 	}
799dad81a20SPaul E. McKenney }
800dad81a20SPaul E. McKenney 
801dad81a20SPaul E. McKenney /*
802da915ad5SPaul E. McKenney  * Increment the ->srcu_idx counter so that future SRCU readers will
803da915ad5SPaul E. McKenney  * use the other rank of the ->srcu_(un)lock_count[] arrays.  This allows
804dad81a20SPaul E. McKenney  * us to wait for pre-existing readers in a starvation-free manner.
805dad81a20SPaul E. McKenney  */
806aacb5d91SPaul E. McKenney static void srcu_flip(struct srcu_struct *ssp)
807dad81a20SPaul E. McKenney {
808881ec9d2SPaul E. McKenney 	/*
809881ec9d2SPaul E. McKenney 	 * Ensure that if this updater saw a given reader's increment
810881ec9d2SPaul E. McKenney 	 * from __srcu_read_lock(), that reader was using an old value
811881ec9d2SPaul E. McKenney 	 * of ->srcu_idx.  Also ensure that if a given reader sees the
812881ec9d2SPaul E. McKenney 	 * new value of ->srcu_idx, this updater's earlier scans cannot
813881ec9d2SPaul E. McKenney 	 * have seen that reader's increments (which is OK, because this
814881ec9d2SPaul E. McKenney 	 * grace period need not wait on that reader).
815881ec9d2SPaul E. McKenney 	 */
816881ec9d2SPaul E. McKenney 	smp_mb(); /* E */  /* Pairs with B and C. */
817881ec9d2SPaul E. McKenney 
818aacb5d91SPaul E. McKenney 	WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
819dad81a20SPaul E. McKenney 
820dad81a20SPaul E. McKenney 	/*
821dad81a20SPaul E. McKenney 	 * Ensure that if the updater misses an __srcu_read_unlock()
822dad81a20SPaul E. McKenney 	 * increment, that task's next __srcu_read_lock() will see the
823dad81a20SPaul E. McKenney 	 * above counter update.  Note that both this memory barrier
824dad81a20SPaul E. McKenney 	 * and the one in srcu_readers_active_idx_check() provide the
825dad81a20SPaul E. McKenney 	 * guarantee for __srcu_read_lock().
826dad81a20SPaul E. McKenney 	 */
827dad81a20SPaul E. McKenney 	smp_mb(); /* D */  /* Pairs with C. */
828dad81a20SPaul E. McKenney }
829dad81a20SPaul E. McKenney 
830dad81a20SPaul E. McKenney /*
8312da4b2a7SPaul E. McKenney  * If SRCU is likely idle, return true, otherwise return false.
8322da4b2a7SPaul E. McKenney  *
8332da4b2a7SPaul E. McKenney  * Note that it is OK for several current from-idle requests for a new
8342da4b2a7SPaul E. McKenney  * grace period from idle to specify expediting because they will all end
8352da4b2a7SPaul E. McKenney  * up requesting the same grace period anyhow.  So no loss.
8362da4b2a7SPaul E. McKenney  *
8372da4b2a7SPaul E. McKenney  * Note also that if any CPU (including the current one) is still invoking
8382da4b2a7SPaul E. McKenney  * callbacks, this function will nevertheless say "idle".  This is not
8392da4b2a7SPaul E. McKenney  * ideal, but the overhead of checking all CPUs' callback lists is even
8402da4b2a7SPaul E. McKenney  * less ideal, especially on large systems.  Furthermore, the wakeup
8412da4b2a7SPaul E. McKenney  * can happen before the callback is fully removed, so we have no choice
8422da4b2a7SPaul E. McKenney  * but to accept this type of error.
8432da4b2a7SPaul E. McKenney  *
8442da4b2a7SPaul E. McKenney  * This function is also subject to counter-wrap errors, but let's face
8452da4b2a7SPaul E. McKenney  * it, if this function was preempted for enough time for the counters
8462da4b2a7SPaul E. McKenney  * to wrap, it really doesn't matter whether or not we expedite the grace
8472da4b2a7SPaul E. McKenney  * period.  The extra overhead of a needlessly expedited grace period is
8487fef6cffSEthon Paul  * negligible when amortized over that time period, and the extra latency
8492da4b2a7SPaul E. McKenney  * of a needlessly non-expedited grace period is similarly negligible.
8502da4b2a7SPaul E. McKenney  */
851aacb5d91SPaul E. McKenney static bool srcu_might_be_idle(struct srcu_struct *ssp)
8522da4b2a7SPaul E. McKenney {
85322607d66SPaul E. McKenney 	unsigned long curseq;
8542da4b2a7SPaul E. McKenney 	unsigned long flags;
8552da4b2a7SPaul E. McKenney 	struct srcu_data *sdp;
85622607d66SPaul E. McKenney 	unsigned long t;
857844a378dSPaul E. McKenney 	unsigned long tlast;
8582da4b2a7SPaul E. McKenney 
859bde50d8fSSebastian Andrzej Siewior 	check_init_srcu_struct(ssp);
8602da4b2a7SPaul E. McKenney 	/* If the local srcu_data structure has callbacks, not idle.  */
861bde50d8fSSebastian Andrzej Siewior 	sdp = raw_cpu_ptr(ssp->sda);
862bde50d8fSSebastian Andrzej Siewior 	spin_lock_irqsave_rcu_node(sdp, flags);
8632da4b2a7SPaul E. McKenney 	if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
864bde50d8fSSebastian Andrzej Siewior 		spin_unlock_irqrestore_rcu_node(sdp, flags);
8652da4b2a7SPaul E. McKenney 		return false; /* Callbacks already present, so not idle. */
8662da4b2a7SPaul E. McKenney 	}
867bde50d8fSSebastian Andrzej Siewior 	spin_unlock_irqrestore_rcu_node(sdp, flags);
8682da4b2a7SPaul E. McKenney 
8692da4b2a7SPaul E. McKenney 	/*
870a616aec9SIngo Molnar 	 * No local callbacks, so probabilistically probe global state.
8712da4b2a7SPaul E. McKenney 	 * Exact information would require acquiring locks, which would
872a616aec9SIngo Molnar 	 * kill scalability, hence the probabilistic nature of the probe.
8732da4b2a7SPaul E. McKenney 	 */
87422607d66SPaul E. McKenney 
87522607d66SPaul E. McKenney 	/* First, see if enough time has passed since the last GP. */
87622607d66SPaul E. McKenney 	t = ktime_get_mono_fast_ns();
877844a378dSPaul E. McKenney 	tlast = READ_ONCE(ssp->srcu_last_gp_end);
87822607d66SPaul E. McKenney 	if (exp_holdoff == 0 ||
879844a378dSPaul E. McKenney 	    time_in_range_open(t, tlast, tlast + exp_holdoff))
88022607d66SPaul E. McKenney 		return false; /* Too soon after last GP. */
88122607d66SPaul E. McKenney 
88222607d66SPaul E. McKenney 	/* Next, check for probable idleness. */
883aacb5d91SPaul E. McKenney 	curseq = rcu_seq_current(&ssp->srcu_gp_seq);
8842da4b2a7SPaul E. McKenney 	smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */
885aacb5d91SPaul E. McKenney 	if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_gp_seq_needed)))
8862da4b2a7SPaul E. McKenney 		return false; /* Grace period in progress, so not idle. */
8872da4b2a7SPaul E. McKenney 	smp_mb(); /* Order ->srcu_gp_seq with prior access. */
888aacb5d91SPaul E. McKenney 	if (curseq != rcu_seq_current(&ssp->srcu_gp_seq))
8892da4b2a7SPaul E. McKenney 		return false; /* GP # changed, so not idle. */
8902da4b2a7SPaul E. McKenney 	return true; /* With reasonable probability, idle! */
8912da4b2a7SPaul E. McKenney }
8922da4b2a7SPaul E. McKenney 
8932da4b2a7SPaul E. McKenney /*
894a602538eSPaul E. McKenney  * SRCU callback function to leak a callback.
895a602538eSPaul E. McKenney  */
896a602538eSPaul E. McKenney static void srcu_leak_callback(struct rcu_head *rhp)
897a602538eSPaul E. McKenney {
898a602538eSPaul E. McKenney }
899a602538eSPaul E. McKenney 
900a602538eSPaul E. McKenney /*
90129d2bb94SPaul E. McKenney  * Start an SRCU grace period, and also queue the callback if non-NULL.
90229d2bb94SPaul E. McKenney  */
9035358c9faSPaul E. McKenney static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
9045358c9faSPaul E. McKenney 					     struct rcu_head *rhp, bool do_norm)
90529d2bb94SPaul E. McKenney {
90629d2bb94SPaul E. McKenney 	unsigned long flags;
90729d2bb94SPaul E. McKenney 	int idx;
90829d2bb94SPaul E. McKenney 	bool needexp = false;
90929d2bb94SPaul E. McKenney 	bool needgp = false;
91029d2bb94SPaul E. McKenney 	unsigned long s;
91129d2bb94SPaul E. McKenney 	struct srcu_data *sdp;
9120b56f953SNeeraj Upadhyay 	struct srcu_node *sdp_mynode;
9130b56f953SNeeraj Upadhyay 	int ss_state;
91429d2bb94SPaul E. McKenney 
9155358c9faSPaul E. McKenney 	check_init_srcu_struct(ssp);
91629d2bb94SPaul E. McKenney 	idx = srcu_read_lock(ssp);
9170b56f953SNeeraj Upadhyay 	ss_state = smp_load_acquire(&ssp->srcu_size_state);
9180b56f953SNeeraj Upadhyay 	if (ss_state < SRCU_SIZE_WAIT_CALL)
919994f7068SPaul E. McKenney 		sdp = per_cpu_ptr(ssp->sda, 0);
920994f7068SPaul E. McKenney 	else
92129d2bb94SPaul E. McKenney 		sdp = raw_cpu_ptr(ssp->sda);
92229d2bb94SPaul E. McKenney 	spin_lock_irqsave_rcu_node(sdp, flags);
9235358c9faSPaul E. McKenney 	if (rhp)
92429d2bb94SPaul E. McKenney 		rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
92529d2bb94SPaul E. McKenney 	rcu_segcblist_advance(&sdp->srcu_cblist,
92629d2bb94SPaul E. McKenney 			      rcu_seq_current(&ssp->srcu_gp_seq));
92729d2bb94SPaul E. McKenney 	s = rcu_seq_snap(&ssp->srcu_gp_seq);
92829d2bb94SPaul E. McKenney 	(void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
92929d2bb94SPaul E. McKenney 	if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
93029d2bb94SPaul E. McKenney 		sdp->srcu_gp_seq_needed = s;
93129d2bb94SPaul E. McKenney 		needgp = true;
93229d2bb94SPaul E. McKenney 	}
93329d2bb94SPaul E. McKenney 	if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
93429d2bb94SPaul E. McKenney 		sdp->srcu_gp_seq_needed_exp = s;
93529d2bb94SPaul E. McKenney 		needexp = true;
93629d2bb94SPaul E. McKenney 	}
93729d2bb94SPaul E. McKenney 	spin_unlock_irqrestore_rcu_node(sdp, flags);
9380b56f953SNeeraj Upadhyay 
9390b56f953SNeeraj Upadhyay 	/* Ensure that snp node tree is fully initialized before traversing it */
9400b56f953SNeeraj Upadhyay 	if (ss_state < SRCU_SIZE_WAIT_BARRIER)
9410b56f953SNeeraj Upadhyay 		sdp_mynode = NULL;
9420b56f953SNeeraj Upadhyay 	else
9430b56f953SNeeraj Upadhyay 		sdp_mynode = sdp->mynode;
9440b56f953SNeeraj Upadhyay 
94529d2bb94SPaul E. McKenney 	if (needgp)
94629d2bb94SPaul E. McKenney 		srcu_funnel_gp_start(ssp, sdp, s, do_norm);
94729d2bb94SPaul E. McKenney 	else if (needexp)
9480b56f953SNeeraj Upadhyay 		srcu_funnel_exp_start(ssp, sdp_mynode, s);
94929d2bb94SPaul E. McKenney 	srcu_read_unlock(ssp, idx);
9505358c9faSPaul E. McKenney 	return s;
95129d2bb94SPaul E. McKenney }
95229d2bb94SPaul E. McKenney 
95329d2bb94SPaul E. McKenney /*
954da915ad5SPaul E. McKenney  * Enqueue an SRCU callback on the srcu_data structure associated with
955da915ad5SPaul E. McKenney  * the current CPU and the specified srcu_struct structure, initiating
956da915ad5SPaul E. McKenney  * grace-period processing if it is not already running.
957dad81a20SPaul E. McKenney  *
958dad81a20SPaul E. McKenney  * Note that all CPUs must agree that the grace period extended beyond
959dad81a20SPaul E. McKenney  * all pre-existing SRCU read-side critical section.  On systems with
960dad81a20SPaul E. McKenney  * more than one CPU, this means that when "func()" is invoked, each CPU
961dad81a20SPaul E. McKenney  * is guaranteed to have executed a full memory barrier since the end of
962dad81a20SPaul E. McKenney  * its last corresponding SRCU read-side critical section whose beginning
9635ef98a63SPaul E. McKenney  * preceded the call to call_srcu().  It also means that each CPU executing
964dad81a20SPaul E. McKenney  * an SRCU read-side critical section that continues beyond the start of
9655ef98a63SPaul E. McKenney  * "func()" must have executed a memory barrier after the call_srcu()
966dad81a20SPaul E. McKenney  * but before the beginning of that SRCU read-side critical section.
967dad81a20SPaul E. McKenney  * Note that these guarantees include CPUs that are offline, idle, or
968dad81a20SPaul E. McKenney  * executing in user mode, as well as CPUs that are executing in the kernel.
969dad81a20SPaul E. McKenney  *
9705ef98a63SPaul E. McKenney  * Furthermore, if CPU A invoked call_srcu() and CPU B invoked the
971dad81a20SPaul E. McKenney  * resulting SRCU callback function "func()", then both CPU A and CPU
972dad81a20SPaul E. McKenney  * B are guaranteed to execute a full memory barrier during the time
9735ef98a63SPaul E. McKenney  * interval between the call to call_srcu() and the invocation of "func()".
974dad81a20SPaul E. McKenney  * This guarantee applies even if CPU A and CPU B are the same CPU (but
975dad81a20SPaul E. McKenney  * again only if the system has more than one CPU).
976dad81a20SPaul E. McKenney  *
977dad81a20SPaul E. McKenney  * Of course, these guarantees apply only for invocations of call_srcu(),
978dad81a20SPaul E. McKenney  * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
979dad81a20SPaul E. McKenney  * srcu_struct structure.
980dad81a20SPaul E. McKenney  */
98111b00045SJiang Biao static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
9821e9a038bSPaul E. McKenney 			rcu_callback_t func, bool do_norm)
983dad81a20SPaul E. McKenney {
984a602538eSPaul E. McKenney 	if (debug_rcu_head_queue(rhp)) {
985a602538eSPaul E. McKenney 		/* Probable double call_srcu(), so leak the callback. */
986a602538eSPaul E. McKenney 		WRITE_ONCE(rhp->func, srcu_leak_callback);
987a602538eSPaul E. McKenney 		WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n");
988a602538eSPaul E. McKenney 		return;
989a602538eSPaul E. McKenney 	}
990da915ad5SPaul E. McKenney 	rhp->func = func;
9915358c9faSPaul E. McKenney 	(void)srcu_gp_start_if_needed(ssp, rhp, do_norm);
9921e9a038bSPaul E. McKenney }
9931e9a038bSPaul E. McKenney 
9945a0465e1SPaul E. McKenney /**
9955a0465e1SPaul E. McKenney  * call_srcu() - Queue a callback for invocation after an SRCU grace period
996aacb5d91SPaul E. McKenney  * @ssp: srcu_struct in queue the callback
99727fdb35fSPaul E. McKenney  * @rhp: structure to be used for queueing the SRCU callback.
9985a0465e1SPaul E. McKenney  * @func: function to be invoked after the SRCU grace period
9995a0465e1SPaul E. McKenney  *
10005a0465e1SPaul E. McKenney  * The callback function will be invoked some time after a full SRCU
10015a0465e1SPaul E. McKenney  * grace period elapses, in other words after all pre-existing SRCU
10025a0465e1SPaul E. McKenney  * read-side critical sections have completed.  However, the callback
10035a0465e1SPaul E. McKenney  * function might well execute concurrently with other SRCU read-side
10045a0465e1SPaul E. McKenney  * critical sections that started after call_srcu() was invoked.  SRCU
10055a0465e1SPaul E. McKenney  * read-side critical sections are delimited by srcu_read_lock() and
10065a0465e1SPaul E. McKenney  * srcu_read_unlock(), and may be nested.
10075a0465e1SPaul E. McKenney  *
10085a0465e1SPaul E. McKenney  * The callback will be invoked from process context, but must nevertheless
10095a0465e1SPaul E. McKenney  * be fast and must not block.
10105a0465e1SPaul E. McKenney  */
1011aacb5d91SPaul E. McKenney void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
10121e9a038bSPaul E. McKenney 	       rcu_callback_t func)
10131e9a038bSPaul E. McKenney {
1014aacb5d91SPaul E. McKenney 	__call_srcu(ssp, rhp, func, true);
1015dad81a20SPaul E. McKenney }
1016dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(call_srcu);
1017dad81a20SPaul E. McKenney 
1018dad81a20SPaul E. McKenney /*
1019dad81a20SPaul E. McKenney  * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
1020dad81a20SPaul E. McKenney  */
1021aacb5d91SPaul E. McKenney static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm)
1022dad81a20SPaul E. McKenney {
1023dad81a20SPaul E. McKenney 	struct rcu_synchronize rcu;
1024dad81a20SPaul E. McKenney 
1025f505d434SJakub Kicinski 	RCU_LOCKDEP_WARN(lockdep_is_held(ssp) ||
1026dad81a20SPaul E. McKenney 			 lock_is_held(&rcu_bh_lock_map) ||
1027dad81a20SPaul E. McKenney 			 lock_is_held(&rcu_lock_map) ||
1028dad81a20SPaul E. McKenney 			 lock_is_held(&rcu_sched_lock_map),
1029dad81a20SPaul E. McKenney 			 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
1030dad81a20SPaul E. McKenney 
1031dad81a20SPaul E. McKenney 	if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
1032dad81a20SPaul E. McKenney 		return;
1033dad81a20SPaul E. McKenney 	might_sleep();
1034aacb5d91SPaul E. McKenney 	check_init_srcu_struct(ssp);
1035dad81a20SPaul E. McKenney 	init_completion(&rcu.completion);
1036da915ad5SPaul E. McKenney 	init_rcu_head_on_stack(&rcu.head);
1037aacb5d91SPaul E. McKenney 	__call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm);
1038dad81a20SPaul E. McKenney 	wait_for_completion(&rcu.completion);
1039da915ad5SPaul E. McKenney 	destroy_rcu_head_on_stack(&rcu.head);
104035732cf9SPaul E. McKenney 
104135732cf9SPaul E. McKenney 	/*
104235732cf9SPaul E. McKenney 	 * Make sure that later code is ordered after the SRCU grace
1043d6331980SPaul E. McKenney 	 * period.  This pairs with the spin_lock_irq_rcu_node()
104435732cf9SPaul E. McKenney 	 * in srcu_invoke_callbacks().  Unlike Tree RCU, this is needed
104535732cf9SPaul E. McKenney 	 * because the current CPU might have been totally uninvolved with
104635732cf9SPaul E. McKenney 	 * (and thus unordered against) that grace period.
104735732cf9SPaul E. McKenney 	 */
104835732cf9SPaul E. McKenney 	smp_mb();
1049dad81a20SPaul E. McKenney }
1050dad81a20SPaul E. McKenney 
1051dad81a20SPaul E. McKenney /**
1052dad81a20SPaul E. McKenney  * synchronize_srcu_expedited - Brute-force SRCU grace period
1053aacb5d91SPaul E. McKenney  * @ssp: srcu_struct with which to synchronize.
1054dad81a20SPaul E. McKenney  *
1055dad81a20SPaul E. McKenney  * Wait for an SRCU grace period to elapse, but be more aggressive about
1056dad81a20SPaul E. McKenney  * spinning rather than blocking when waiting.
1057dad81a20SPaul E. McKenney  *
1058dad81a20SPaul E. McKenney  * Note that synchronize_srcu_expedited() has the same deadlock and
1059dad81a20SPaul E. McKenney  * memory-ordering properties as does synchronize_srcu().
1060dad81a20SPaul E. McKenney  */
1061aacb5d91SPaul E. McKenney void synchronize_srcu_expedited(struct srcu_struct *ssp)
1062dad81a20SPaul E. McKenney {
1063aacb5d91SPaul E. McKenney 	__synchronize_srcu(ssp, rcu_gp_is_normal());
1064dad81a20SPaul E. McKenney }
1065dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
1066dad81a20SPaul E. McKenney 
1067dad81a20SPaul E. McKenney /**
1068dad81a20SPaul E. McKenney  * synchronize_srcu - wait for prior SRCU read-side critical-section completion
1069aacb5d91SPaul E. McKenney  * @ssp: srcu_struct with which to synchronize.
1070dad81a20SPaul E. McKenney  *
1071dad81a20SPaul E. McKenney  * Wait for the count to drain to zero of both indexes. To avoid the
1072dad81a20SPaul E. McKenney  * possible starvation of synchronize_srcu(), it waits for the count of
1073da915ad5SPaul E. McKenney  * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first,
1074da915ad5SPaul E. McKenney  * and then flip the srcu_idx and wait for the count of the other index.
1075dad81a20SPaul E. McKenney  *
1076dad81a20SPaul E. McKenney  * Can block; must be called from process context.
1077dad81a20SPaul E. McKenney  *
1078dad81a20SPaul E. McKenney  * Note that it is illegal to call synchronize_srcu() from the corresponding
1079dad81a20SPaul E. McKenney  * SRCU read-side critical section; doing so will result in deadlock.
1080dad81a20SPaul E. McKenney  * However, it is perfectly legal to call synchronize_srcu() on one
1081dad81a20SPaul E. McKenney  * srcu_struct from some other srcu_struct's read-side critical section,
1082dad81a20SPaul E. McKenney  * as long as the resulting graph of srcu_structs is acyclic.
1083dad81a20SPaul E. McKenney  *
1084dad81a20SPaul E. McKenney  * There are memory-ordering constraints implied by synchronize_srcu().
1085dad81a20SPaul E. McKenney  * On systems with more than one CPU, when synchronize_srcu() returns,
1086dad81a20SPaul E. McKenney  * each CPU is guaranteed to have executed a full memory barrier since
10876eb95cc4SPaul E. McKenney  * the end of its last corresponding SRCU read-side critical section
1088dad81a20SPaul E. McKenney  * whose beginning preceded the call to synchronize_srcu().  In addition,
1089dad81a20SPaul E. McKenney  * each CPU having an SRCU read-side critical section that extends beyond
1090dad81a20SPaul E. McKenney  * the return from synchronize_srcu() is guaranteed to have executed a
1091dad81a20SPaul E. McKenney  * full memory barrier after the beginning of synchronize_srcu() and before
1092dad81a20SPaul E. McKenney  * the beginning of that SRCU read-side critical section.  Note that these
1093dad81a20SPaul E. McKenney  * guarantees include CPUs that are offline, idle, or executing in user mode,
1094dad81a20SPaul E. McKenney  * as well as CPUs that are executing in the kernel.
1095dad81a20SPaul E. McKenney  *
1096dad81a20SPaul E. McKenney  * Furthermore, if CPU A invoked synchronize_srcu(), which returned
1097dad81a20SPaul E. McKenney  * to its caller on CPU B, then both CPU A and CPU B are guaranteed
1098dad81a20SPaul E. McKenney  * to have executed a full memory barrier during the execution of
1099dad81a20SPaul E. McKenney  * synchronize_srcu().  This guarantee applies even if CPU A and CPU B
1100dad81a20SPaul E. McKenney  * are the same CPU, but again only if the system has more than one CPU.
1101dad81a20SPaul E. McKenney  *
1102dad81a20SPaul E. McKenney  * Of course, these memory-ordering guarantees apply only when
1103dad81a20SPaul E. McKenney  * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are
1104dad81a20SPaul E. McKenney  * passed the same srcu_struct structure.
11052da4b2a7SPaul E. McKenney  *
11063d3a0d1bSPaul E. McKenney  * Implementation of these memory-ordering guarantees is similar to
11073d3a0d1bSPaul E. McKenney  * that of synchronize_rcu().
11083d3a0d1bSPaul E. McKenney  *
11092da4b2a7SPaul E. McKenney  * If SRCU is likely idle, expedite the first request.  This semantic
11102da4b2a7SPaul E. McKenney  * was provided by Classic SRCU, and is relied upon by its users, so TREE
11112da4b2a7SPaul E. McKenney  * SRCU must also provide it.  Note that detecting idleness is heuristic
11122da4b2a7SPaul E. McKenney  * and subject to both false positives and negatives.
1113dad81a20SPaul E. McKenney  */
1114aacb5d91SPaul E. McKenney void synchronize_srcu(struct srcu_struct *ssp)
1115dad81a20SPaul E. McKenney {
1116aacb5d91SPaul E. McKenney 	if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited())
1117aacb5d91SPaul E. McKenney 		synchronize_srcu_expedited(ssp);
1118dad81a20SPaul E. McKenney 	else
1119aacb5d91SPaul E. McKenney 		__synchronize_srcu(ssp, true);
1120dad81a20SPaul E. McKenney }
1121dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(synchronize_srcu);
1122dad81a20SPaul E. McKenney 
11235358c9faSPaul E. McKenney /**
11245358c9faSPaul E. McKenney  * get_state_synchronize_srcu - Provide an end-of-grace-period cookie
11255358c9faSPaul E. McKenney  * @ssp: srcu_struct to provide cookie for.
11265358c9faSPaul E. McKenney  *
11275358c9faSPaul E. McKenney  * This function returns a cookie that can be passed to
11285358c9faSPaul E. McKenney  * poll_state_synchronize_srcu(), which will return true if a full grace
11295358c9faSPaul E. McKenney  * period has elapsed in the meantime.  It is the caller's responsibility
11305358c9faSPaul E. McKenney  * to make sure that grace period happens, for example, by invoking
11315358c9faSPaul E. McKenney  * call_srcu() after return from get_state_synchronize_srcu().
11325358c9faSPaul E. McKenney  */
11335358c9faSPaul E. McKenney unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp)
11345358c9faSPaul E. McKenney {
11355358c9faSPaul E. McKenney 	// Any prior manipulation of SRCU-protected data must happen
11365358c9faSPaul E. McKenney 	// before the load from ->srcu_gp_seq.
11375358c9faSPaul E. McKenney 	smp_mb();
11385358c9faSPaul E. McKenney 	return rcu_seq_snap(&ssp->srcu_gp_seq);
11395358c9faSPaul E. McKenney }
11405358c9faSPaul E. McKenney EXPORT_SYMBOL_GPL(get_state_synchronize_srcu);
11415358c9faSPaul E. McKenney 
11425358c9faSPaul E. McKenney /**
11435358c9faSPaul E. McKenney  * start_poll_synchronize_srcu - Provide cookie and start grace period
11445358c9faSPaul E. McKenney  * @ssp: srcu_struct to provide cookie for.
11455358c9faSPaul E. McKenney  *
11465358c9faSPaul E. McKenney  * This function returns a cookie that can be passed to
11475358c9faSPaul E. McKenney  * poll_state_synchronize_srcu(), which will return true if a full grace
11485358c9faSPaul E. McKenney  * period has elapsed in the meantime.  Unlike get_state_synchronize_srcu(),
11495358c9faSPaul E. McKenney  * this function also ensures that any needed SRCU grace period will be
11505358c9faSPaul E. McKenney  * started.  This convenience does come at a cost in terms of CPU overhead.
11515358c9faSPaul E. McKenney  */
11525358c9faSPaul E. McKenney unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp)
11535358c9faSPaul E. McKenney {
11545358c9faSPaul E. McKenney 	return srcu_gp_start_if_needed(ssp, NULL, true);
11555358c9faSPaul E. McKenney }
11565358c9faSPaul E. McKenney EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu);
11575358c9faSPaul E. McKenney 
11585358c9faSPaul E. McKenney /**
11595358c9faSPaul E. McKenney  * poll_state_synchronize_srcu - Has cookie's grace period ended?
11605358c9faSPaul E. McKenney  * @ssp: srcu_struct to provide cookie for.
11615358c9faSPaul E. McKenney  * @cookie: Return value from get_state_synchronize_srcu() or start_poll_synchronize_srcu().
11625358c9faSPaul E. McKenney  *
11635358c9faSPaul E. McKenney  * This function takes the cookie that was returned from either
11645358c9faSPaul E. McKenney  * get_state_synchronize_srcu() or start_poll_synchronize_srcu(), and
11655358c9faSPaul E. McKenney  * returns @true if an SRCU grace period elapsed since the time that the
11665358c9faSPaul E. McKenney  * cookie was created.
11674e7ccfaeSPaul E. McKenney  *
11684e7ccfaeSPaul E. McKenney  * Because cookies are finite in size, wrapping/overflow is possible.
11694e7ccfaeSPaul E. McKenney  * This is more pronounced on 32-bit systems where cookies are 32 bits,
11704e7ccfaeSPaul E. McKenney  * where in theory wrapping could happen in about 14 hours assuming
11714e7ccfaeSPaul E. McKenney  * 25-microsecond expedited SRCU grace periods.  However, a more likely
11724e7ccfaeSPaul E. McKenney  * overflow lower bound is on the order of 24 days in the case of
11734e7ccfaeSPaul E. McKenney  * one-millisecond SRCU grace periods.  Of course, wrapping in a 64-bit
11744e7ccfaeSPaul E. McKenney  * system requires geologic timespans, as in more than seven million years
11754e7ccfaeSPaul E. McKenney  * even for expedited SRCU grace periods.
11764e7ccfaeSPaul E. McKenney  *
11774e7ccfaeSPaul E. McKenney  * Wrapping/overflow is much more of an issue for CONFIG_SMP=n systems
11784e7ccfaeSPaul E. McKenney  * that also have CONFIG_PREEMPTION=n, which selects Tiny SRCU.  This uses
11794e7ccfaeSPaul E. McKenney  * a 16-bit cookie, which rcutorture routinely wraps in a matter of a
11804e7ccfaeSPaul E. McKenney  * few minutes.  If this proves to be a problem, this counter will be
11814e7ccfaeSPaul E. McKenney  * expanded to the same size as for Tree SRCU.
11825358c9faSPaul E. McKenney  */
11835358c9faSPaul E. McKenney bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)
11845358c9faSPaul E. McKenney {
11855358c9faSPaul E. McKenney 	if (!rcu_seq_done(&ssp->srcu_gp_seq, cookie))
11865358c9faSPaul E. McKenney 		return false;
11875358c9faSPaul E. McKenney 	// Ensure that the end of the SRCU grace period happens before
11885358c9faSPaul E. McKenney 	// any subsequent code that the caller might execute.
11895358c9faSPaul E. McKenney 	smp_mb(); // ^^^
11905358c9faSPaul E. McKenney 	return true;
11915358c9faSPaul E. McKenney }
11925358c9faSPaul E. McKenney EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu);
11935358c9faSPaul E. McKenney 
1194da915ad5SPaul E. McKenney /*
1195da915ad5SPaul E. McKenney  * Callback function for srcu_barrier() use.
1196da915ad5SPaul E. McKenney  */
1197da915ad5SPaul E. McKenney static void srcu_barrier_cb(struct rcu_head *rhp)
1198da915ad5SPaul E. McKenney {
1199da915ad5SPaul E. McKenney 	struct srcu_data *sdp;
1200aacb5d91SPaul E. McKenney 	struct srcu_struct *ssp;
1201da915ad5SPaul E. McKenney 
1202da915ad5SPaul E. McKenney 	sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
1203aacb5d91SPaul E. McKenney 	ssp = sdp->ssp;
1204aacb5d91SPaul E. McKenney 	if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
1205aacb5d91SPaul E. McKenney 		complete(&ssp->srcu_barrier_completion);
1206da915ad5SPaul E. McKenney }
1207da915ad5SPaul E. McKenney 
1208994f7068SPaul E. McKenney /*
1209994f7068SPaul E. McKenney  * Enqueue an srcu_barrier() callback on the specified srcu_data
1210994f7068SPaul E. McKenney  * structure's ->cblist.  but only if that ->cblist already has at least one
1211994f7068SPaul E. McKenney  * callback enqueued.  Note that if a CPU already has callbacks enqueue,
1212994f7068SPaul E. McKenney  * it must have already registered the need for a future grace period,
1213994f7068SPaul E. McKenney  * so all we need do is enqueue a callback that will use the same grace
1214994f7068SPaul E. McKenney  * period as the last callback already in the queue.
1215994f7068SPaul E. McKenney  */
1216994f7068SPaul E. McKenney static void srcu_barrier_one_cpu(struct srcu_struct *ssp, struct srcu_data *sdp)
1217994f7068SPaul E. McKenney {
1218994f7068SPaul E. McKenney 	spin_lock_irq_rcu_node(sdp);
1219994f7068SPaul E. McKenney 	atomic_inc(&ssp->srcu_barrier_cpu_cnt);
1220994f7068SPaul E. McKenney 	sdp->srcu_barrier_head.func = srcu_barrier_cb;
1221994f7068SPaul E. McKenney 	debug_rcu_head_queue(&sdp->srcu_barrier_head);
1222994f7068SPaul E. McKenney 	if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
1223994f7068SPaul E. McKenney 				   &sdp->srcu_barrier_head)) {
1224994f7068SPaul E. McKenney 		debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
1225994f7068SPaul E. McKenney 		atomic_dec(&ssp->srcu_barrier_cpu_cnt);
1226994f7068SPaul E. McKenney 	}
1227994f7068SPaul E. McKenney 	spin_unlock_irq_rcu_node(sdp);
1228994f7068SPaul E. McKenney }
1229994f7068SPaul E. McKenney 
1230dad81a20SPaul E. McKenney /**
1231dad81a20SPaul E. McKenney  * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
1232aacb5d91SPaul E. McKenney  * @ssp: srcu_struct on which to wait for in-flight callbacks.
1233dad81a20SPaul E. McKenney  */
1234aacb5d91SPaul E. McKenney void srcu_barrier(struct srcu_struct *ssp)
1235dad81a20SPaul E. McKenney {
1236da915ad5SPaul E. McKenney 	int cpu;
1237e2f63836SPaul E. McKenney 	int idx;
1238aacb5d91SPaul E. McKenney 	unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq);
1239da915ad5SPaul E. McKenney 
1240aacb5d91SPaul E. McKenney 	check_init_srcu_struct(ssp);
1241aacb5d91SPaul E. McKenney 	mutex_lock(&ssp->srcu_barrier_mutex);
1242aacb5d91SPaul E. McKenney 	if (rcu_seq_done(&ssp->srcu_barrier_seq, s)) {
1243da915ad5SPaul E. McKenney 		smp_mb(); /* Force ordering following return. */
1244aacb5d91SPaul E. McKenney 		mutex_unlock(&ssp->srcu_barrier_mutex);
1245da915ad5SPaul E. McKenney 		return; /* Someone else did our work for us. */
1246da915ad5SPaul E. McKenney 	}
1247aacb5d91SPaul E. McKenney 	rcu_seq_start(&ssp->srcu_barrier_seq);
1248aacb5d91SPaul E. McKenney 	init_completion(&ssp->srcu_barrier_completion);
1249da915ad5SPaul E. McKenney 
1250da915ad5SPaul E. McKenney 	/* Initial count prevents reaching zero until all CBs are posted. */
1251aacb5d91SPaul E. McKenney 	atomic_set(&ssp->srcu_barrier_cpu_cnt, 1);
1252da915ad5SPaul E. McKenney 
1253e2f63836SPaul E. McKenney 	idx = srcu_read_lock(ssp);
1254994f7068SPaul E. McKenney 	if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
1255994f7068SPaul E. McKenney 		srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, 0));
1256994f7068SPaul E. McKenney 	else
1257994f7068SPaul E. McKenney 		for_each_possible_cpu(cpu)
1258994f7068SPaul E. McKenney 			srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, cpu));
1259e2f63836SPaul E. McKenney 	srcu_read_unlock(ssp, idx);
1260da915ad5SPaul E. McKenney 
1261da915ad5SPaul E. McKenney 	/* Remove the initial count, at which point reaching zero can happen. */
1262aacb5d91SPaul E. McKenney 	if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
1263aacb5d91SPaul E. McKenney 		complete(&ssp->srcu_barrier_completion);
1264aacb5d91SPaul E. McKenney 	wait_for_completion(&ssp->srcu_barrier_completion);
1265da915ad5SPaul E. McKenney 
1266aacb5d91SPaul E. McKenney 	rcu_seq_end(&ssp->srcu_barrier_seq);
1267aacb5d91SPaul E. McKenney 	mutex_unlock(&ssp->srcu_barrier_mutex);
1268dad81a20SPaul E. McKenney }
1269dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(srcu_barrier);
1270dad81a20SPaul E. McKenney 
1271dad81a20SPaul E. McKenney /**
1272dad81a20SPaul E. McKenney  * srcu_batches_completed - return batches completed.
1273aacb5d91SPaul E. McKenney  * @ssp: srcu_struct on which to report batch completion.
1274dad81a20SPaul E. McKenney  *
1275dad81a20SPaul E. McKenney  * Report the number of batches, correlated with, but not necessarily
1276dad81a20SPaul E. McKenney  * precisely the same as, the number of grace periods that have elapsed.
1277dad81a20SPaul E. McKenney  */
1278aacb5d91SPaul E. McKenney unsigned long srcu_batches_completed(struct srcu_struct *ssp)
1279dad81a20SPaul E. McKenney {
128039f91504SPaul E. McKenney 	return READ_ONCE(ssp->srcu_idx);
1281dad81a20SPaul E. McKenney }
1282dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(srcu_batches_completed);
1283dad81a20SPaul E. McKenney 
1284dad81a20SPaul E. McKenney /*
1285da915ad5SPaul E. McKenney  * Core SRCU state machine.  Push state bits of ->srcu_gp_seq
1286da915ad5SPaul E. McKenney  * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has
1287da915ad5SPaul E. McKenney  * completed in that state.
1288dad81a20SPaul E. McKenney  */
1289aacb5d91SPaul E. McKenney static void srcu_advance_state(struct srcu_struct *ssp)
1290dad81a20SPaul E. McKenney {
1291dad81a20SPaul E. McKenney 	int idx;
1292dad81a20SPaul E. McKenney 
1293aacb5d91SPaul E. McKenney 	mutex_lock(&ssp->srcu_gp_mutex);
1294da915ad5SPaul E. McKenney 
1295dad81a20SPaul E. McKenney 	/*
1296dad81a20SPaul E. McKenney 	 * Because readers might be delayed for an extended period after
1297da915ad5SPaul E. McKenney 	 * fetching ->srcu_idx for their index, at any point in time there
1298dad81a20SPaul E. McKenney 	 * might well be readers using both idx=0 and idx=1.  We therefore
1299dad81a20SPaul E. McKenney 	 * need to wait for readers to clear from both index values before
1300dad81a20SPaul E. McKenney 	 * invoking a callback.
1301dad81a20SPaul E. McKenney 	 *
1302dad81a20SPaul E. McKenney 	 * The load-acquire ensures that we see the accesses performed
1303dad81a20SPaul E. McKenney 	 * by the prior grace period.
1304dad81a20SPaul E. McKenney 	 */
1305aacb5d91SPaul E. McKenney 	idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq)); /* ^^^ */
1306dad81a20SPaul E. McKenney 	if (idx == SRCU_STATE_IDLE) {
1307aacb5d91SPaul E. McKenney 		spin_lock_irq_rcu_node(ssp);
1308aacb5d91SPaul E. McKenney 		if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
1309aacb5d91SPaul E. McKenney 			WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq));
1310aacb5d91SPaul E. McKenney 			spin_unlock_irq_rcu_node(ssp);
1311aacb5d91SPaul E. McKenney 			mutex_unlock(&ssp->srcu_gp_mutex);
1312dad81a20SPaul E. McKenney 			return;
1313dad81a20SPaul E. McKenney 		}
1314aacb5d91SPaul E. McKenney 		idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq));
1315dad81a20SPaul E. McKenney 		if (idx == SRCU_STATE_IDLE)
1316aacb5d91SPaul E. McKenney 			srcu_gp_start(ssp);
1317aacb5d91SPaul E. McKenney 		spin_unlock_irq_rcu_node(ssp);
1318da915ad5SPaul E. McKenney 		if (idx != SRCU_STATE_IDLE) {
1319aacb5d91SPaul E. McKenney 			mutex_unlock(&ssp->srcu_gp_mutex);
1320dad81a20SPaul E. McKenney 			return; /* Someone else started the grace period. */
1321dad81a20SPaul E. McKenney 		}
1322da915ad5SPaul E. McKenney 	}
1323dad81a20SPaul E. McKenney 
1324aacb5d91SPaul E. McKenney 	if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
1325aacb5d91SPaul E. McKenney 		idx = 1 ^ (ssp->srcu_idx & 1);
1326aacb5d91SPaul E. McKenney 		if (!try_check_zero(ssp, idx, 1)) {
1327aacb5d91SPaul E. McKenney 			mutex_unlock(&ssp->srcu_gp_mutex);
1328dad81a20SPaul E. McKenney 			return; /* readers present, retry later. */
1329da915ad5SPaul E. McKenney 		}
1330aacb5d91SPaul E. McKenney 		srcu_flip(ssp);
133171042606SPaul E. McKenney 		spin_lock_irq_rcu_node(ssp);
1332aacb5d91SPaul E. McKenney 		rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2);
133371042606SPaul E. McKenney 		spin_unlock_irq_rcu_node(ssp);
1334dad81a20SPaul E. McKenney 	}
1335dad81a20SPaul E. McKenney 
1336aacb5d91SPaul E. McKenney 	if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
1337dad81a20SPaul E. McKenney 
1338dad81a20SPaul E. McKenney 		/*
1339dad81a20SPaul E. McKenney 		 * SRCU read-side critical sections are normally short,
1340dad81a20SPaul E. McKenney 		 * so check at least twice in quick succession after a flip.
1341dad81a20SPaul E. McKenney 		 */
1342aacb5d91SPaul E. McKenney 		idx = 1 ^ (ssp->srcu_idx & 1);
1343aacb5d91SPaul E. McKenney 		if (!try_check_zero(ssp, idx, 2)) {
1344aacb5d91SPaul E. McKenney 			mutex_unlock(&ssp->srcu_gp_mutex);
1345da915ad5SPaul E. McKenney 			return; /* readers present, retry later. */
1346da915ad5SPaul E. McKenney 		}
1347aacb5d91SPaul E. McKenney 		srcu_gp_end(ssp);  /* Releases ->srcu_gp_mutex. */
1348dad81a20SPaul E. McKenney 	}
1349dad81a20SPaul E. McKenney }
1350dad81a20SPaul E. McKenney 
1351dad81a20SPaul E. McKenney /*
1352dad81a20SPaul E. McKenney  * Invoke a limited number of SRCU callbacks that have passed through
1353dad81a20SPaul E. McKenney  * their grace period.  If there are more to do, SRCU will reschedule
1354dad81a20SPaul E. McKenney  * the workqueue.  Note that needed memory barriers have been executed
1355dad81a20SPaul E. McKenney  * in this task's context by srcu_readers_active_idx_check().
1356dad81a20SPaul E. McKenney  */
1357da915ad5SPaul E. McKenney static void srcu_invoke_callbacks(struct work_struct *work)
1358dad81a20SPaul E. McKenney {
1359ae5c2341SJoel Fernandes (Google) 	long len;
1360da915ad5SPaul E. McKenney 	bool more;
1361dad81a20SPaul E. McKenney 	struct rcu_cblist ready_cbs;
1362dad81a20SPaul E. McKenney 	struct rcu_head *rhp;
1363da915ad5SPaul E. McKenney 	struct srcu_data *sdp;
1364aacb5d91SPaul E. McKenney 	struct srcu_struct *ssp;
1365dad81a20SPaul E. McKenney 
1366e81baf4cSSebastian Andrzej Siewior 	sdp = container_of(work, struct srcu_data, work);
1367e81baf4cSSebastian Andrzej Siewior 
1368aacb5d91SPaul E. McKenney 	ssp = sdp->ssp;
1369dad81a20SPaul E. McKenney 	rcu_cblist_init(&ready_cbs);
1370d6331980SPaul E. McKenney 	spin_lock_irq_rcu_node(sdp);
1371da915ad5SPaul E. McKenney 	rcu_segcblist_advance(&sdp->srcu_cblist,
1372aacb5d91SPaul E. McKenney 			      rcu_seq_current(&ssp->srcu_gp_seq));
1373da915ad5SPaul E. McKenney 	if (sdp->srcu_cblist_invoking ||
1374da915ad5SPaul E. McKenney 	    !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
1375d6331980SPaul E. McKenney 		spin_unlock_irq_rcu_node(sdp);
1376da915ad5SPaul E. McKenney 		return;  /* Someone else on the job or nothing to do. */
1377da915ad5SPaul E. McKenney 	}
1378da915ad5SPaul E. McKenney 
1379da915ad5SPaul E. McKenney 	/* We are on the job!  Extract and invoke ready callbacks. */
1380da915ad5SPaul E. McKenney 	sdp->srcu_cblist_invoking = true;
1381da915ad5SPaul E. McKenney 	rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
1382ae5c2341SJoel Fernandes (Google) 	len = ready_cbs.len;
1383d6331980SPaul E. McKenney 	spin_unlock_irq_rcu_node(sdp);
1384dad81a20SPaul E. McKenney 	rhp = rcu_cblist_dequeue(&ready_cbs);
1385dad81a20SPaul E. McKenney 	for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
1386a602538eSPaul E. McKenney 		debug_rcu_head_unqueue(rhp);
1387dad81a20SPaul E. McKenney 		local_bh_disable();
1388dad81a20SPaul E. McKenney 		rhp->func(rhp);
1389dad81a20SPaul E. McKenney 		local_bh_enable();
1390dad81a20SPaul E. McKenney 	}
1391ae5c2341SJoel Fernandes (Google) 	WARN_ON_ONCE(ready_cbs.len);
1392da915ad5SPaul E. McKenney 
1393da915ad5SPaul E. McKenney 	/*
1394da915ad5SPaul E. McKenney 	 * Update counts, accelerate new callbacks, and if needed,
1395da915ad5SPaul E. McKenney 	 * schedule another round of callback invocation.
1396da915ad5SPaul E. McKenney 	 */
1397d6331980SPaul E. McKenney 	spin_lock_irq_rcu_node(sdp);
1398ae5c2341SJoel Fernandes (Google) 	rcu_segcblist_add_len(&sdp->srcu_cblist, -len);
1399da915ad5SPaul E. McKenney 	(void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
1400aacb5d91SPaul E. McKenney 				       rcu_seq_snap(&ssp->srcu_gp_seq));
1401da915ad5SPaul E. McKenney 	sdp->srcu_cblist_invoking = false;
1402da915ad5SPaul E. McKenney 	more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
1403d6331980SPaul E. McKenney 	spin_unlock_irq_rcu_node(sdp);
1404da915ad5SPaul E. McKenney 	if (more)
1405da915ad5SPaul E. McKenney 		srcu_schedule_cbs_sdp(sdp, 0);
1406dad81a20SPaul E. McKenney }
1407dad81a20SPaul E. McKenney 
1408dad81a20SPaul E. McKenney /*
1409dad81a20SPaul E. McKenney  * Finished one round of SRCU grace period.  Start another if there are
1410dad81a20SPaul E. McKenney  * more SRCU callbacks queued, otherwise put SRCU into not-running state.
1411dad81a20SPaul E. McKenney  */
1412aacb5d91SPaul E. McKenney static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay)
1413dad81a20SPaul E. McKenney {
1414da915ad5SPaul E. McKenney 	bool pushgp = true;
1415dad81a20SPaul E. McKenney 
1416aacb5d91SPaul E. McKenney 	spin_lock_irq_rcu_node(ssp);
1417aacb5d91SPaul E. McKenney 	if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
1418aacb5d91SPaul E. McKenney 		if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq))) {
1419da915ad5SPaul E. McKenney 			/* All requests fulfilled, time to go idle. */
1420da915ad5SPaul E. McKenney 			pushgp = false;
1421dad81a20SPaul E. McKenney 		}
1422aacb5d91SPaul E. McKenney 	} else if (!rcu_seq_state(ssp->srcu_gp_seq)) {
1423da915ad5SPaul E. McKenney 		/* Outstanding request and no GP.  Start one. */
1424aacb5d91SPaul E. McKenney 		srcu_gp_start(ssp);
1425da915ad5SPaul E. McKenney 	}
1426aacb5d91SPaul E. McKenney 	spin_unlock_irq_rcu_node(ssp);
1427dad81a20SPaul E. McKenney 
1428da915ad5SPaul E. McKenney 	if (pushgp)
1429aacb5d91SPaul E. McKenney 		queue_delayed_work(rcu_gp_wq, &ssp->work, delay);
1430dad81a20SPaul E. McKenney }
1431dad81a20SPaul E. McKenney 
1432dad81a20SPaul E. McKenney /*
1433dad81a20SPaul E. McKenney  * This is the work-queue function that handles SRCU grace periods.
1434dad81a20SPaul E. McKenney  */
14350d8a1e83SPaul E. McKenney static void process_srcu(struct work_struct *work)
1436dad81a20SPaul E. McKenney {
1437aacb5d91SPaul E. McKenney 	struct srcu_struct *ssp;
1438dad81a20SPaul E. McKenney 
1439aacb5d91SPaul E. McKenney 	ssp = container_of(work, struct srcu_struct, work.work);
1440dad81a20SPaul E. McKenney 
1441aacb5d91SPaul E. McKenney 	srcu_advance_state(ssp);
1442aacb5d91SPaul E. McKenney 	srcu_reschedule(ssp, srcu_get_delay(ssp));
1443dad81a20SPaul E. McKenney }
14447f6733c3SPaul E. McKenney 
14457f6733c3SPaul E. McKenney void srcutorture_get_gp_data(enum rcutorture_type test_type,
1446aacb5d91SPaul E. McKenney 			     struct srcu_struct *ssp, int *flags,
1447aebc8264SPaul E. McKenney 			     unsigned long *gp_seq)
14487f6733c3SPaul E. McKenney {
14497f6733c3SPaul E. McKenney 	if (test_type != SRCU_FLAVOR)
14507f6733c3SPaul E. McKenney 		return;
14517f6733c3SPaul E. McKenney 	*flags = 0;
1452aacb5d91SPaul E. McKenney 	*gp_seq = rcu_seq_current(&ssp->srcu_gp_seq);
14537f6733c3SPaul E. McKenney }
14547f6733c3SPaul E. McKenney EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
14551f4f6da1SPaul E. McKenney 
14563bedebcfSPaul E. McKenney static const char * const srcu_size_state_name[] = {
14573bedebcfSPaul E. McKenney 	"SRCU_SIZE_SMALL",
14583bedebcfSPaul E. McKenney 	"SRCU_SIZE_ALLOC",
14593bedebcfSPaul E. McKenney 	"SRCU_SIZE_WAIT_BARRIER",
14603bedebcfSPaul E. McKenney 	"SRCU_SIZE_WAIT_CALL",
14613bedebcfSPaul E. McKenney 	"SRCU_SIZE_WAIT_CBS1",
14623bedebcfSPaul E. McKenney 	"SRCU_SIZE_WAIT_CBS2",
14633bedebcfSPaul E. McKenney 	"SRCU_SIZE_WAIT_CBS3",
14643bedebcfSPaul E. McKenney 	"SRCU_SIZE_WAIT_CBS4",
14653bedebcfSPaul E. McKenney 	"SRCU_SIZE_BIG",
14663bedebcfSPaul E. McKenney 	"SRCU_SIZE_???",
14673bedebcfSPaul E. McKenney };
14683bedebcfSPaul E. McKenney 
1469aacb5d91SPaul E. McKenney void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf)
1470115a1a52SPaul E. McKenney {
1471115a1a52SPaul E. McKenney 	int cpu;
1472115a1a52SPaul E. McKenney 	int idx;
1473ac3748c6SPaul E. McKenney 	unsigned long s0 = 0, s1 = 0;
14743bedebcfSPaul E. McKenney 	int ss_state = READ_ONCE(ssp->srcu_size_state);
14753bedebcfSPaul E. McKenney 	int ss_state_idx = ss_state;
1476115a1a52SPaul E. McKenney 
1477aacb5d91SPaul E. McKenney 	idx = ssp->srcu_idx & 0x1;
14783bedebcfSPaul E. McKenney 	if (ss_state < 0 || ss_state >= ARRAY_SIZE(srcu_size_state_name))
14793bedebcfSPaul E. McKenney 		ss_state_idx = ARRAY_SIZE(srcu_size_state_name) - 1;
1480*4a230f80SPaul E. McKenney 	pr_alert("%s%s Tree SRCU g%ld state %d (%s)",
14813bedebcfSPaul E. McKenney 		 tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), ss_state,
1482*4a230f80SPaul E. McKenney 		 srcu_size_state_name[ss_state_idx]);
1483*4a230f80SPaul E. McKenney 	if (!ssp->sda) {
1484*4a230f80SPaul E. McKenney 		// Called after cleanup_srcu_struct(), perhaps.
1485*4a230f80SPaul E. McKenney 		pr_cont(" No per-CPU srcu_data structures (->sda == NULL).\n");
1486*4a230f80SPaul E. McKenney 	} else {
1487*4a230f80SPaul E. McKenney 		pr_cont(" per-CPU(idx=%d):", idx);
1488115a1a52SPaul E. McKenney 		for_each_possible_cpu(cpu) {
1489115a1a52SPaul E. McKenney 			unsigned long l0, l1;
1490115a1a52SPaul E. McKenney 			unsigned long u0, u1;
1491115a1a52SPaul E. McKenney 			long c0, c1;
14925ab07a8dSPaul E. McKenney 			struct srcu_data *sdp;
1493115a1a52SPaul E. McKenney 
1494aacb5d91SPaul E. McKenney 			sdp = per_cpu_ptr(ssp->sda, cpu);
1495b68c6146SPaul E. McKenney 			u0 = data_race(sdp->srcu_unlock_count[!idx]);
1496b68c6146SPaul E. McKenney 			u1 = data_race(sdp->srcu_unlock_count[idx]);
1497115a1a52SPaul E. McKenney 
1498115a1a52SPaul E. McKenney 			/*
1499115a1a52SPaul E. McKenney 			 * Make sure that a lock is always counted if the corresponding
1500115a1a52SPaul E. McKenney 			 * unlock is counted.
1501115a1a52SPaul E. McKenney 			 */
1502115a1a52SPaul E. McKenney 			smp_rmb();
1503115a1a52SPaul E. McKenney 
1504b68c6146SPaul E. McKenney 			l0 = data_race(sdp->srcu_lock_count[!idx]);
1505b68c6146SPaul E. McKenney 			l1 = data_race(sdp->srcu_lock_count[idx]);
1506115a1a52SPaul E. McKenney 
1507115a1a52SPaul E. McKenney 			c0 = l0 - u0;
1508115a1a52SPaul E. McKenney 			c1 = l1 - u1;
15097e210a65SPaul E. McKenney 			pr_cont(" %d(%ld,%ld %c)",
15107e210a65SPaul E. McKenney 				cpu, c0, c1,
15117e210a65SPaul E. McKenney 				"C."[rcu_segcblist_empty(&sdp->srcu_cblist)]);
1512ac3748c6SPaul E. McKenney 			s0 += c0;
1513ac3748c6SPaul E. McKenney 			s1 += c1;
1514115a1a52SPaul E. McKenney 		}
1515ac3748c6SPaul E. McKenney 		pr_cont(" T(%ld,%ld)\n", s0, s1);
1516*4a230f80SPaul E. McKenney 	}
1517c69a00a1SPaul E. McKenney 	if (READ_ONCE(ssp->srcu_size_state) == SRCU_SIZE_SMALL && convert_to_big == 2)
1518c69a00a1SPaul E. McKenney 		WRITE_ONCE(ssp->srcu_size_state, SRCU_SIZE_ALLOC);
1519115a1a52SPaul E. McKenney }
1520115a1a52SPaul E. McKenney EXPORT_SYMBOL_GPL(srcu_torture_stats_print);
1521115a1a52SPaul E. McKenney 
15221f4f6da1SPaul E. McKenney static int __init srcu_bootup_announce(void)
15231f4f6da1SPaul E. McKenney {
15241f4f6da1SPaul E. McKenney 	pr_info("Hierarchical SRCU implementation.\n");
15250c8e0e3cSPaul E. McKenney 	if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF)
15260c8e0e3cSPaul E. McKenney 		pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff);
15271f4f6da1SPaul E. McKenney 	return 0;
15281f4f6da1SPaul E. McKenney }
15291f4f6da1SPaul E. McKenney early_initcall(srcu_bootup_announce);
1530e0fcba9aSPaul E. McKenney 
1531e0fcba9aSPaul E. McKenney void __init srcu_init(void)
1532e0fcba9aSPaul E. McKenney {
1533aacb5d91SPaul E. McKenney 	struct srcu_struct *ssp;
1534e0fcba9aSPaul E. McKenney 
15358e9c01c7SFrederic Weisbecker 	/*
15368e9c01c7SFrederic Weisbecker 	 * Once that is set, call_srcu() can follow the normal path and
15378e9c01c7SFrederic Weisbecker 	 * queue delayed work. This must follow RCU workqueues creation
15388e9c01c7SFrederic Weisbecker 	 * and timers initialization.
15398e9c01c7SFrederic Weisbecker 	 */
1540e0fcba9aSPaul E. McKenney 	srcu_init_done = true;
1541e0fcba9aSPaul E. McKenney 	while (!list_empty(&srcu_boot_list)) {
1542aacb5d91SPaul E. McKenney 		ssp = list_first_entry(&srcu_boot_list, struct srcu_struct,
15434e6ea4efSPaul E. McKenney 				      work.work.entry);
1544aacb5d91SPaul E. McKenney 		list_del_init(&ssp->work.work.entry);
1545aacb5d91SPaul E. McKenney 		queue_work(rcu_gp_wq, &ssp->work.work);
1546e0fcba9aSPaul E. McKenney 	}
1547e0fcba9aSPaul E. McKenney }
1548fe15b50cSPaul E. McKenney 
1549fe15b50cSPaul E. McKenney #ifdef CONFIG_MODULES
1550fe15b50cSPaul E. McKenney 
1551fe15b50cSPaul E. McKenney /* Initialize any global-scope srcu_struct structures used by this module. */
1552fe15b50cSPaul E. McKenney static int srcu_module_coming(struct module *mod)
1553fe15b50cSPaul E. McKenney {
1554fe15b50cSPaul E. McKenney 	int i;
1555fe15b50cSPaul E. McKenney 	struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1556fe15b50cSPaul E. McKenney 	int ret;
1557fe15b50cSPaul E. McKenney 
1558fe15b50cSPaul E. McKenney 	for (i = 0; i < mod->num_srcu_structs; i++) {
1559fe15b50cSPaul E. McKenney 		ret = init_srcu_struct(*(sspp++));
1560fe15b50cSPaul E. McKenney 		if (WARN_ON_ONCE(ret))
1561fe15b50cSPaul E. McKenney 			return ret;
1562fe15b50cSPaul E. McKenney 	}
1563fe15b50cSPaul E. McKenney 	return 0;
1564fe15b50cSPaul E. McKenney }
1565fe15b50cSPaul E. McKenney 
1566fe15b50cSPaul E. McKenney /* Clean up any global-scope srcu_struct structures used by this module. */
1567fe15b50cSPaul E. McKenney static void srcu_module_going(struct module *mod)
1568fe15b50cSPaul E. McKenney {
1569fe15b50cSPaul E. McKenney 	int i;
1570fe15b50cSPaul E. McKenney 	struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1571fe15b50cSPaul E. McKenney 
1572fe15b50cSPaul E. McKenney 	for (i = 0; i < mod->num_srcu_structs; i++)
1573fe15b50cSPaul E. McKenney 		cleanup_srcu_struct(*(sspp++));
1574fe15b50cSPaul E. McKenney }
1575fe15b50cSPaul E. McKenney 
1576fe15b50cSPaul E. McKenney /* Handle one module, either coming or going. */
1577fe15b50cSPaul E. McKenney static int srcu_module_notify(struct notifier_block *self,
1578fe15b50cSPaul E. McKenney 			      unsigned long val, void *data)
1579fe15b50cSPaul E. McKenney {
1580fe15b50cSPaul E. McKenney 	struct module *mod = data;
1581fe15b50cSPaul E. McKenney 	int ret = 0;
1582fe15b50cSPaul E. McKenney 
1583fe15b50cSPaul E. McKenney 	switch (val) {
1584fe15b50cSPaul E. McKenney 	case MODULE_STATE_COMING:
1585fe15b50cSPaul E. McKenney 		ret = srcu_module_coming(mod);
1586fe15b50cSPaul E. McKenney 		break;
1587fe15b50cSPaul E. McKenney 	case MODULE_STATE_GOING:
1588fe15b50cSPaul E. McKenney 		srcu_module_going(mod);
1589fe15b50cSPaul E. McKenney 		break;
1590fe15b50cSPaul E. McKenney 	default:
1591fe15b50cSPaul E. McKenney 		break;
1592fe15b50cSPaul E. McKenney 	}
1593fe15b50cSPaul E. McKenney 	return ret;
1594fe15b50cSPaul E. McKenney }
1595fe15b50cSPaul E. McKenney 
1596fe15b50cSPaul E. McKenney static struct notifier_block srcu_module_nb = {
1597fe15b50cSPaul E. McKenney 	.notifier_call = srcu_module_notify,
1598fe15b50cSPaul E. McKenney 	.priority = 0,
1599fe15b50cSPaul E. McKenney };
1600fe15b50cSPaul E. McKenney 
1601fe15b50cSPaul E. McKenney static __init int init_srcu_module_notifier(void)
1602fe15b50cSPaul E. McKenney {
1603fe15b50cSPaul E. McKenney 	int ret;
1604fe15b50cSPaul E. McKenney 
1605fe15b50cSPaul E. McKenney 	ret = register_module_notifier(&srcu_module_nb);
1606fe15b50cSPaul E. McKenney 	if (ret)
1607fe15b50cSPaul E. McKenney 		pr_warn("Failed to register srcu module notifier\n");
1608fe15b50cSPaul E. McKenney 	return ret;
1609fe15b50cSPaul E. McKenney }
1610fe15b50cSPaul E. McKenney late_initcall(init_srcu_module_notifier);
1611fe15b50cSPaul E. McKenney 
1612fe15b50cSPaul E. McKenney #endif /* #ifdef CONFIG_MODULES */
1613