xref: /linux/kernel/rcu/srcutree.c (revision 36f65f1d1553e35cd9e6b281271f40d639a128c3)
1e7ee1501SPaul E. McKenney // SPDX-License-Identifier: GPL-2.0+
2dad81a20SPaul E. McKenney /*
3dad81a20SPaul E. McKenney  * Sleepable Read-Copy Update mechanism for mutual exclusion.
4dad81a20SPaul E. McKenney  *
5dad81a20SPaul E. McKenney  * Copyright (C) IBM Corporation, 2006
6dad81a20SPaul E. McKenney  * Copyright (C) Fujitsu, 2012
7dad81a20SPaul E. McKenney  *
865bb0dc4SSeongJae Park  * Authors: Paul McKenney <paulmck@linux.ibm.com>
9dad81a20SPaul E. McKenney  *	   Lai Jiangshan <laijs@cn.fujitsu.com>
10dad81a20SPaul E. McKenney  *
11dad81a20SPaul E. McKenney  * For detailed explanation of Read-Copy Update mechanism see -
12dad81a20SPaul E. McKenney  *		Documentation/RCU/ *.txt
13dad81a20SPaul E. McKenney  *
14dad81a20SPaul E. McKenney  */
15dad81a20SPaul E. McKenney 
16a7538352SJoe Perches #define pr_fmt(fmt) "rcu: " fmt
17a7538352SJoe Perches 
18dad81a20SPaul E. McKenney #include <linux/export.h>
19dad81a20SPaul E. McKenney #include <linux/mutex.h>
20dad81a20SPaul E. McKenney #include <linux/percpu.h>
21dad81a20SPaul E. McKenney #include <linux/preempt.h>
22dad81a20SPaul E. McKenney #include <linux/rcupdate_wait.h>
23dad81a20SPaul E. McKenney #include <linux/sched.h>
24dad81a20SPaul E. McKenney #include <linux/smp.h>
25dad81a20SPaul E. McKenney #include <linux/delay.h>
2622607d66SPaul E. McKenney #include <linux/module.h>
272ec30311SPaul E. McKenney #include <linux/slab.h>
28dad81a20SPaul E. McKenney #include <linux/srcu.h>
29dad81a20SPaul E. McKenney 
30dad81a20SPaul E. McKenney #include "rcu.h"
3145753c5fSIngo Molnar #include "rcu_segcblist.h"
32dad81a20SPaul E. McKenney 
330c8e0e3cSPaul E. McKenney /* Holdoff in nanoseconds for auto-expediting. */
340c8e0e3cSPaul E. McKenney #define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000)
350c8e0e3cSPaul E. McKenney static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF;
3622607d66SPaul E. McKenney module_param(exp_holdoff, ulong, 0444);
3722607d66SPaul E. McKenney 
38c350c008SPaul E. McKenney /* Overflow-check frequency.  N bits roughly says every 2**N grace periods. */
39c350c008SPaul E. McKenney static ulong counter_wrap_check = (ULONG_MAX >> 2);
40c350c008SPaul E. McKenney module_param(counter_wrap_check, ulong, 0444);
41c350c008SPaul E. McKenney 
42c69a00a1SPaul E. McKenney /*
43c69a00a1SPaul E. McKenney  * Control conversion to SRCU_SIZE_BIG:
44a57ffb3cSPaul E. McKenney  *    0: Don't convert at all.
45c69a00a1SPaul E. McKenney  *    1: Convert at init_srcu_struct() time.
46c69a00a1SPaul E. McKenney  *    2: Convert when rcutorture invokes srcu_torture_stats_print().
47a57ffb3cSPaul E. McKenney  *    3: Decide at boot time based on system shape (default).
489f2e91d9SPaul E. McKenney  * 0x1x: Convert when excessive contention encountered.
49c69a00a1SPaul E. McKenney  */
509f2e91d9SPaul E. McKenney #define SRCU_SIZING_NONE	0
519f2e91d9SPaul E. McKenney #define SRCU_SIZING_INIT	1
529f2e91d9SPaul E. McKenney #define SRCU_SIZING_TORTURE	2
539f2e91d9SPaul E. McKenney #define SRCU_SIZING_AUTO	3
549f2e91d9SPaul E. McKenney #define SRCU_SIZING_CONTEND	0x10
559f2e91d9SPaul E. McKenney #define SRCU_SIZING_IS(x) ((convert_to_big & ~SRCU_SIZING_CONTEND) == x)
569f2e91d9SPaul E. McKenney #define SRCU_SIZING_IS_NONE() (SRCU_SIZING_IS(SRCU_SIZING_NONE))
579f2e91d9SPaul E. McKenney #define SRCU_SIZING_IS_INIT() (SRCU_SIZING_IS(SRCU_SIZING_INIT))
589f2e91d9SPaul E. McKenney #define SRCU_SIZING_IS_TORTURE() (SRCU_SIZING_IS(SRCU_SIZING_TORTURE))
599f2e91d9SPaul E. McKenney #define SRCU_SIZING_IS_CONTEND() (convert_to_big & SRCU_SIZING_CONTEND)
60a57ffb3cSPaul E. McKenney static int convert_to_big = SRCU_SIZING_AUTO;
61c69a00a1SPaul E. McKenney module_param(convert_to_big, int, 0444);
62c69a00a1SPaul E. McKenney 
63a57ffb3cSPaul E. McKenney /* Number of CPUs to trigger init_srcu_struct()-time transition to big. */
64a57ffb3cSPaul E. McKenney static int big_cpu_lim __read_mostly = 128;
65a57ffb3cSPaul E. McKenney module_param(big_cpu_lim, int, 0444);
66a57ffb3cSPaul E. McKenney 
679f2e91d9SPaul E. McKenney /* Contention events per jiffy to initiate transition to big. */
689f2e91d9SPaul E. McKenney static int small_contention_lim __read_mostly = 100;
699f2e91d9SPaul E. McKenney module_param(small_contention_lim, int, 0444);
709f2e91d9SPaul E. McKenney 
71e0fcba9aSPaul E. McKenney /* Early-boot callback-management, so early that no lock is required! */
72e0fcba9aSPaul E. McKenney static LIST_HEAD(srcu_boot_list);
73e0fcba9aSPaul E. McKenney static bool __read_mostly srcu_init_done;
74e0fcba9aSPaul E. McKenney 
75da915ad5SPaul E. McKenney static void srcu_invoke_callbacks(struct work_struct *work);
76aacb5d91SPaul E. McKenney static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay);
770d8a1e83SPaul E. McKenney static void process_srcu(struct work_struct *work);
78e81baf4cSSebastian Andrzej Siewior static void srcu_delay_timer(struct timer_list *t);
79da915ad5SPaul E. McKenney 
80d6331980SPaul E. McKenney /* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
81d6331980SPaul E. McKenney #define spin_lock_rcu_node(p)							\
82d6331980SPaul E. McKenney do {										\
83d6331980SPaul E. McKenney 	spin_lock(&ACCESS_PRIVATE(p, lock));					\
84d6331980SPaul E. McKenney 	smp_mb__after_unlock_lock();						\
85d6331980SPaul E. McKenney } while (0)
86d6331980SPaul E. McKenney 
87d6331980SPaul E. McKenney #define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock))
88d6331980SPaul E. McKenney 
89d6331980SPaul E. McKenney #define spin_lock_irq_rcu_node(p)						\
90d6331980SPaul E. McKenney do {										\
91d6331980SPaul E. McKenney 	spin_lock_irq(&ACCESS_PRIVATE(p, lock));				\
92d6331980SPaul E. McKenney 	smp_mb__after_unlock_lock();						\
93d6331980SPaul E. McKenney } while (0)
94d6331980SPaul E. McKenney 
95d6331980SPaul E. McKenney #define spin_unlock_irq_rcu_node(p)						\
96d6331980SPaul E. McKenney 	spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
97d6331980SPaul E. McKenney 
98d6331980SPaul E. McKenney #define spin_lock_irqsave_rcu_node(p, flags)					\
99d6331980SPaul E. McKenney do {										\
100d6331980SPaul E. McKenney 	spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags);			\
101d6331980SPaul E. McKenney 	smp_mb__after_unlock_lock();						\
102d6331980SPaul E. McKenney } while (0)
103d6331980SPaul E. McKenney 
1049f2e91d9SPaul E. McKenney #define spin_trylock_irqsave_rcu_node(p, flags)					\
1059f2e91d9SPaul E. McKenney ({										\
1069f2e91d9SPaul E. McKenney 	bool ___locked = spin_trylock_irqsave(&ACCESS_PRIVATE(p, lock), flags);	\
1079f2e91d9SPaul E. McKenney 										\
1089f2e91d9SPaul E. McKenney 	if (___locked)								\
1099f2e91d9SPaul E. McKenney 		smp_mb__after_unlock_lock();					\
1109f2e91d9SPaul E. McKenney 	___locked;								\
1119f2e91d9SPaul E. McKenney })
1129f2e91d9SPaul E. McKenney 
113d6331980SPaul E. McKenney #define spin_unlock_irqrestore_rcu_node(p, flags)				\
114d6331980SPaul E. McKenney 	spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags)			\
115d6331980SPaul E. McKenney 
116da915ad5SPaul E. McKenney /*
1172ec30311SPaul E. McKenney  * Initialize SRCU per-CPU data.  Note that statically allocated
118da915ad5SPaul E. McKenney  * srcu_struct structures might already have srcu_read_lock() and
119da915ad5SPaul E. McKenney  * srcu_read_unlock() running against them.  So if the is_static parameter
120da915ad5SPaul E. McKenney  * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
121da915ad5SPaul E. McKenney  */
1222ec30311SPaul E. McKenney static void init_srcu_struct_data(struct srcu_struct *ssp)
1232ec30311SPaul E. McKenney {
1242ec30311SPaul E. McKenney 	int cpu;
1252ec30311SPaul E. McKenney 	struct srcu_data *sdp;
1262ec30311SPaul E. McKenney 
1272ec30311SPaul E. McKenney 	/*
1282ec30311SPaul E. McKenney 	 * Initialize the per-CPU srcu_data array, which feeds into the
1292ec30311SPaul E. McKenney 	 * leaves of the srcu_node tree.
1302ec30311SPaul E. McKenney 	 */
1312ec30311SPaul E. McKenney 	WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
1322ec30311SPaul E. McKenney 		     ARRAY_SIZE(sdp->srcu_unlock_count));
1332ec30311SPaul E. McKenney 	for_each_possible_cpu(cpu) {
1342ec30311SPaul E. McKenney 		sdp = per_cpu_ptr(ssp->sda, cpu);
1352ec30311SPaul E. McKenney 		spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
1362ec30311SPaul E. McKenney 		rcu_segcblist_init(&sdp->srcu_cblist);
1372ec30311SPaul E. McKenney 		sdp->srcu_cblist_invoking = false;
1382ec30311SPaul E. McKenney 		sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq;
1392ec30311SPaul E. McKenney 		sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq;
1402ec30311SPaul E. McKenney 		sdp->mynode = NULL;
1412ec30311SPaul E. McKenney 		sdp->cpu = cpu;
1422ec30311SPaul E. McKenney 		INIT_WORK(&sdp->work, srcu_invoke_callbacks);
1432ec30311SPaul E. McKenney 		timer_setup(&sdp->delay_work, srcu_delay_timer, 0);
1442ec30311SPaul E. McKenney 		sdp->ssp = ssp;
1452ec30311SPaul E. McKenney 	}
1462ec30311SPaul E. McKenney }
1472ec30311SPaul E. McKenney 
148cbdc98e9SPaul E. McKenney /* Invalid seq state, used during snp node initialization */
149cbdc98e9SPaul E. McKenney #define SRCU_SNP_INIT_SEQ		0x2
150cbdc98e9SPaul E. McKenney 
151cbdc98e9SPaul E. McKenney /*
152cbdc98e9SPaul E. McKenney  * Check whether sequence number corresponding to snp node,
153cbdc98e9SPaul E. McKenney  * is invalid.
154cbdc98e9SPaul E. McKenney  */
155cbdc98e9SPaul E. McKenney static inline bool srcu_invl_snp_seq(unsigned long s)
156cbdc98e9SPaul E. McKenney {
157cbdc98e9SPaul E. McKenney 	return rcu_seq_state(s) == SRCU_SNP_INIT_SEQ;
158cbdc98e9SPaul E. McKenney }
159cbdc98e9SPaul E. McKenney 
1602ec30311SPaul E. McKenney /*
1612ec30311SPaul E. McKenney  * Allocated and initialize SRCU combining tree.  Returns @true if
1622ec30311SPaul E. McKenney  * allocation succeeded and @false otherwise.
1632ec30311SPaul E. McKenney  */
164c69a00a1SPaul E. McKenney static bool init_srcu_struct_nodes(struct srcu_struct *ssp, gfp_t gfp_flags)
165dad81a20SPaul E. McKenney {
166da915ad5SPaul E. McKenney 	int cpu;
167da915ad5SPaul E. McKenney 	int i;
168da915ad5SPaul E. McKenney 	int level = 0;
169da915ad5SPaul E. McKenney 	int levelspread[RCU_NUM_LVLS];
170da915ad5SPaul E. McKenney 	struct srcu_data *sdp;
171da915ad5SPaul E. McKenney 	struct srcu_node *snp;
172da915ad5SPaul E. McKenney 	struct srcu_node *snp_first;
173da915ad5SPaul E. McKenney 
174b5befe84SFrederic Weisbecker 	/* Initialize geometry if it has not already been initialized. */
175b5befe84SFrederic Weisbecker 	rcu_init_geometry();
176c69a00a1SPaul E. McKenney 	ssp->node = kcalloc(rcu_num_nodes, sizeof(*ssp->node), gfp_flags);
1772ec30311SPaul E. McKenney 	if (!ssp->node)
1782ec30311SPaul E. McKenney 		return false;
179b5befe84SFrederic Weisbecker 
180da915ad5SPaul E. McKenney 	/* Work out the overall tree geometry. */
181aacb5d91SPaul E. McKenney 	ssp->level[0] = &ssp->node[0];
182da915ad5SPaul E. McKenney 	for (i = 1; i < rcu_num_lvls; i++)
183aacb5d91SPaul E. McKenney 		ssp->level[i] = ssp->level[i - 1] + num_rcu_lvl[i - 1];
184da915ad5SPaul E. McKenney 	rcu_init_levelspread(levelspread, num_rcu_lvl);
185da915ad5SPaul E. McKenney 
186da915ad5SPaul E. McKenney 	/* Each pass through this loop initializes one srcu_node structure. */
187aacb5d91SPaul E. McKenney 	srcu_for_each_node_breadth_first(ssp, snp) {
188d6331980SPaul E. McKenney 		spin_lock_init(&ACCESS_PRIVATE(snp, lock));
189c7e88067SPaul E. McKenney 		WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
190c7e88067SPaul E. McKenney 			     ARRAY_SIZE(snp->srcu_data_have_cbs));
191c7e88067SPaul E. McKenney 		for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
192cbdc98e9SPaul E. McKenney 			snp->srcu_have_cbs[i] = SRCU_SNP_INIT_SEQ;
193c7e88067SPaul E. McKenney 			snp->srcu_data_have_cbs[i] = 0;
194c7e88067SPaul E. McKenney 		}
195cbdc98e9SPaul E. McKenney 		snp->srcu_gp_seq_needed_exp = SRCU_SNP_INIT_SEQ;
196da915ad5SPaul E. McKenney 		snp->grplo = -1;
197da915ad5SPaul E. McKenney 		snp->grphi = -1;
198aacb5d91SPaul E. McKenney 		if (snp == &ssp->node[0]) {
199da915ad5SPaul E. McKenney 			/* Root node, special case. */
200da915ad5SPaul E. McKenney 			snp->srcu_parent = NULL;
201da915ad5SPaul E. McKenney 			continue;
202da915ad5SPaul E. McKenney 		}
203da915ad5SPaul E. McKenney 
204da915ad5SPaul E. McKenney 		/* Non-root node. */
205aacb5d91SPaul E. McKenney 		if (snp == ssp->level[level + 1])
206da915ad5SPaul E. McKenney 			level++;
207aacb5d91SPaul E. McKenney 		snp->srcu_parent = ssp->level[level - 1] +
208aacb5d91SPaul E. McKenney 				   (snp - ssp->level[level]) /
209da915ad5SPaul E. McKenney 				   levelspread[level - 1];
210da915ad5SPaul E. McKenney 	}
211da915ad5SPaul E. McKenney 
212da915ad5SPaul E. McKenney 	/*
213da915ad5SPaul E. McKenney 	 * Initialize the per-CPU srcu_data array, which feeds into the
214da915ad5SPaul E. McKenney 	 * leaves of the srcu_node tree.
215da915ad5SPaul E. McKenney 	 */
216da915ad5SPaul E. McKenney 	level = rcu_num_lvls - 1;
217aacb5d91SPaul E. McKenney 	snp_first = ssp->level[level];
218da915ad5SPaul E. McKenney 	for_each_possible_cpu(cpu) {
219aacb5d91SPaul E. McKenney 		sdp = per_cpu_ptr(ssp->sda, cpu);
220da915ad5SPaul E. McKenney 		sdp->mynode = &snp_first[cpu / levelspread[level]];
221da915ad5SPaul E. McKenney 		for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
222da915ad5SPaul E. McKenney 			if (snp->grplo < 0)
223da915ad5SPaul E. McKenney 				snp->grplo = cpu;
224da915ad5SPaul E. McKenney 			snp->grphi = cpu;
225da915ad5SPaul E. McKenney 		}
226c7e88067SPaul E. McKenney 		sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
227da915ad5SPaul E. McKenney 	}
228994f7068SPaul E. McKenney 	smp_store_release(&ssp->srcu_size_state, SRCU_SIZE_WAIT_BARRIER);
2292ec30311SPaul E. McKenney 	return true;
230da915ad5SPaul E. McKenney }
231da915ad5SPaul E. McKenney 
232da915ad5SPaul E. McKenney /*
233da915ad5SPaul E. McKenney  * Initialize non-compile-time initialized fields, including the
234994f7068SPaul E. McKenney  * associated srcu_node and srcu_data structures.  The is_static parameter
235994f7068SPaul E. McKenney  * tells us that ->sda has already been wired up to srcu_data.
236da915ad5SPaul E. McKenney  */
237aacb5d91SPaul E. McKenney static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
238da915ad5SPaul E. McKenney {
239994f7068SPaul E. McKenney 	ssp->srcu_size_state = SRCU_SIZE_SMALL;
2402ec30311SPaul E. McKenney 	ssp->node = NULL;
241aacb5d91SPaul E. McKenney 	mutex_init(&ssp->srcu_cb_mutex);
242aacb5d91SPaul E. McKenney 	mutex_init(&ssp->srcu_gp_mutex);
243aacb5d91SPaul E. McKenney 	ssp->srcu_idx = 0;
244aacb5d91SPaul E. McKenney 	ssp->srcu_gp_seq = 0;
245aacb5d91SPaul E. McKenney 	ssp->srcu_barrier_seq = 0;
246aacb5d91SPaul E. McKenney 	mutex_init(&ssp->srcu_barrier_mutex);
247aacb5d91SPaul E. McKenney 	atomic_set(&ssp->srcu_barrier_cpu_cnt, 0);
248aacb5d91SPaul E. McKenney 	INIT_DELAYED_WORK(&ssp->work, process_srcu);
24946470cf8SPaul E. McKenney 	ssp->sda_is_static = is_static;
250da915ad5SPaul E. McKenney 	if (!is_static)
251aacb5d91SPaul E. McKenney 		ssp->sda = alloc_percpu(struct srcu_data);
25250edb988SPaul E. McKenney 	if (!ssp->sda)
25350edb988SPaul E. McKenney 		return -ENOMEM;
2542ec30311SPaul E. McKenney 	init_srcu_struct_data(ssp);
255c69a00a1SPaul E. McKenney 	ssp->srcu_gp_seq_needed_exp = 0;
256c69a00a1SPaul E. McKenney 	ssp->srcu_last_gp_end = ktime_get_mono_fast_ns();
2579f2e91d9SPaul E. McKenney 	if (READ_ONCE(ssp->srcu_size_state) == SRCU_SIZE_SMALL && SRCU_SIZING_IS_INIT()) {
258c69a00a1SPaul E. McKenney 		if (!init_srcu_struct_nodes(ssp, GFP_ATOMIC)) {
25946470cf8SPaul E. McKenney 			if (!ssp->sda_is_static) {
2602ec30311SPaul E. McKenney 				free_percpu(ssp->sda);
2612ec30311SPaul E. McKenney 				ssp->sda = NULL;
2622ec30311SPaul E. McKenney 				return -ENOMEM;
2632ec30311SPaul E. McKenney 			}
2642ec30311SPaul E. McKenney 		} else {
265c69a00a1SPaul E. McKenney 			WRITE_ONCE(ssp->srcu_size_state, SRCU_SIZE_BIG);
2662ec30311SPaul E. McKenney 		}
267c69a00a1SPaul E. McKenney 	}
268aacb5d91SPaul E. McKenney 	smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */
26950edb988SPaul E. McKenney 	return 0;
270dad81a20SPaul E. McKenney }
271dad81a20SPaul E. McKenney 
272dad81a20SPaul E. McKenney #ifdef CONFIG_DEBUG_LOCK_ALLOC
273dad81a20SPaul E. McKenney 
274aacb5d91SPaul E. McKenney int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
275dad81a20SPaul E. McKenney 		       struct lock_class_key *key)
276dad81a20SPaul E. McKenney {
277dad81a20SPaul E. McKenney 	/* Don't re-initialize a lock while it is held. */
278aacb5d91SPaul E. McKenney 	debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
279aacb5d91SPaul E. McKenney 	lockdep_init_map(&ssp->dep_map, name, key, 0);
280aacb5d91SPaul E. McKenney 	spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
281aacb5d91SPaul E. McKenney 	return init_srcu_struct_fields(ssp, false);
282dad81a20SPaul E. McKenney }
283dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(__init_srcu_struct);
284dad81a20SPaul E. McKenney 
285dad81a20SPaul E. McKenney #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
286dad81a20SPaul E. McKenney 
287dad81a20SPaul E. McKenney /**
288dad81a20SPaul E. McKenney  * init_srcu_struct - initialize a sleep-RCU structure
289aacb5d91SPaul E. McKenney  * @ssp: structure to initialize.
290dad81a20SPaul E. McKenney  *
291dad81a20SPaul E. McKenney  * Must invoke this on a given srcu_struct before passing that srcu_struct
292dad81a20SPaul E. McKenney  * to any other function.  Each srcu_struct represents a separate domain
293dad81a20SPaul E. McKenney  * of SRCU protection.
294dad81a20SPaul E. McKenney  */
295aacb5d91SPaul E. McKenney int init_srcu_struct(struct srcu_struct *ssp)
296dad81a20SPaul E. McKenney {
297aacb5d91SPaul E. McKenney 	spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
298aacb5d91SPaul E. McKenney 	return init_srcu_struct_fields(ssp, false);
299dad81a20SPaul E. McKenney }
300dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(init_srcu_struct);
301dad81a20SPaul E. McKenney 
302dad81a20SPaul E. McKenney #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
303dad81a20SPaul E. McKenney 
304dad81a20SPaul E. McKenney /*
3059f2e91d9SPaul E. McKenney  * Initiate a transition to SRCU_SIZE_BIG with lock held.
3069f2e91d9SPaul E. McKenney  */
3079f2e91d9SPaul E. McKenney static void __srcu_transition_to_big(struct srcu_struct *ssp)
3089f2e91d9SPaul E. McKenney {
3099f2e91d9SPaul E. McKenney 	lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock));
3109f2e91d9SPaul E. McKenney 	smp_store_release(&ssp->srcu_size_state, SRCU_SIZE_ALLOC);
3119f2e91d9SPaul E. McKenney }
3129f2e91d9SPaul E. McKenney 
3139f2e91d9SPaul E. McKenney /*
31499659f64SPaul E. McKenney  * Initiate an idempotent transition to SRCU_SIZE_BIG.
31599659f64SPaul E. McKenney  */
31699659f64SPaul E. McKenney static void srcu_transition_to_big(struct srcu_struct *ssp)
31799659f64SPaul E. McKenney {
31899659f64SPaul E. McKenney 	unsigned long flags;
31999659f64SPaul E. McKenney 
32099659f64SPaul E. McKenney 	/* Double-checked locking on ->srcu_size-state. */
32199659f64SPaul E. McKenney 	if (smp_load_acquire(&ssp->srcu_size_state) != SRCU_SIZE_SMALL)
32299659f64SPaul E. McKenney 		return;
32399659f64SPaul E. McKenney 	spin_lock_irqsave_rcu_node(ssp, flags);
32499659f64SPaul E. McKenney 	if (smp_load_acquire(&ssp->srcu_size_state) != SRCU_SIZE_SMALL) {
32599659f64SPaul E. McKenney 		spin_unlock_irqrestore_rcu_node(ssp, flags);
32699659f64SPaul E. McKenney 		return;
32799659f64SPaul E. McKenney 	}
3289f2e91d9SPaul E. McKenney 	__srcu_transition_to_big(ssp);
32999659f64SPaul E. McKenney 	spin_unlock_irqrestore_rcu_node(ssp, flags);
33099659f64SPaul E. McKenney }
33199659f64SPaul E. McKenney 
33299659f64SPaul E. McKenney /*
333c2445d38SPaul E. McKenney  * Check to see if the just-encountered contention event justifies
334c2445d38SPaul E. McKenney  * a transition to SRCU_SIZE_BIG.
3359f2e91d9SPaul E. McKenney  */
336c2445d38SPaul E. McKenney static void spin_lock_irqsave_check_contention(struct srcu_struct *ssp)
3379f2e91d9SPaul E. McKenney {
3389f2e91d9SPaul E. McKenney 	unsigned long j;
3399f2e91d9SPaul E. McKenney 
3409f2e91d9SPaul E. McKenney 	if (!SRCU_SIZING_IS_CONTEND() || ssp->srcu_size_state)
3419f2e91d9SPaul E. McKenney 		return;
3429f2e91d9SPaul E. McKenney 	j = jiffies;
3439f2e91d9SPaul E. McKenney 	if (ssp->srcu_size_jiffies != j) {
3449f2e91d9SPaul E. McKenney 		ssp->srcu_size_jiffies = j;
3459f2e91d9SPaul E. McKenney 		ssp->srcu_n_lock_retries = 0;
3469f2e91d9SPaul E. McKenney 	}
3479f2e91d9SPaul E. McKenney 	if (++ssp->srcu_n_lock_retries <= small_contention_lim)
3489f2e91d9SPaul E. McKenney 		return;
3499f2e91d9SPaul E. McKenney 	__srcu_transition_to_big(ssp);
3509f2e91d9SPaul E. McKenney }
3519f2e91d9SPaul E. McKenney 
3529f2e91d9SPaul E. McKenney /*
353c2445d38SPaul E. McKenney  * Acquire the specified srcu_data structure's ->lock, but check for
354c2445d38SPaul E. McKenney  * excessive contention, which results in initiation of a transition
355c2445d38SPaul E. McKenney  * to SRCU_SIZE_BIG.  But only if the srcutree.convert_to_big module
356c2445d38SPaul E. McKenney  * parameter permits this.
357c2445d38SPaul E. McKenney  */
358c2445d38SPaul E. McKenney static void spin_lock_irqsave_sdp_contention(struct srcu_data *sdp, unsigned long *flags)
359c2445d38SPaul E. McKenney {
360c2445d38SPaul E. McKenney 	struct srcu_struct *ssp = sdp->ssp;
361c2445d38SPaul E. McKenney 
362c2445d38SPaul E. McKenney 	if (spin_trylock_irqsave_rcu_node(sdp, *flags))
363c2445d38SPaul E. McKenney 		return;
364c2445d38SPaul E. McKenney 	spin_lock_irqsave_rcu_node(ssp, *flags);
365c2445d38SPaul E. McKenney 	spin_lock_irqsave_check_contention(ssp);
366c2445d38SPaul E. McKenney 	spin_unlock_irqrestore_rcu_node(ssp, *flags);
367c2445d38SPaul E. McKenney 	spin_lock_irqsave_rcu_node(sdp, *flags);
368c2445d38SPaul E. McKenney }
369c2445d38SPaul E. McKenney 
370c2445d38SPaul E. McKenney /*
371c2445d38SPaul E. McKenney  * Acquire the specified srcu_struct structure's ->lock, but check for
372c2445d38SPaul E. McKenney  * excessive contention, which results in initiation of a transition
373c2445d38SPaul E. McKenney  * to SRCU_SIZE_BIG.  But only if the srcutree.convert_to_big module
374c2445d38SPaul E. McKenney  * parameter permits this.
375c2445d38SPaul E. McKenney  */
376c2445d38SPaul E. McKenney static void spin_lock_irqsave_ssp_contention(struct srcu_struct *ssp, unsigned long *flags)
377c2445d38SPaul E. McKenney {
378c2445d38SPaul E. McKenney 	if (spin_trylock_irqsave_rcu_node(ssp, *flags))
379c2445d38SPaul E. McKenney 		return;
380c2445d38SPaul E. McKenney 	spin_lock_irqsave_rcu_node(ssp, *flags);
381c2445d38SPaul E. McKenney 	spin_lock_irqsave_check_contention(ssp);
382c2445d38SPaul E. McKenney }
383c2445d38SPaul E. McKenney 
384c2445d38SPaul E. McKenney /*
385da915ad5SPaul E. McKenney  * First-use initialization of statically allocated srcu_struct
386da915ad5SPaul E. McKenney  * structure.  Wiring up the combining tree is more than can be
387da915ad5SPaul E. McKenney  * done with compile-time initialization, so this check is added
388aacb5d91SPaul E. McKenney  * to each update-side SRCU primitive.  Use ssp->lock, which -is-
389da915ad5SPaul E. McKenney  * compile-time initialized, to resolve races involving multiple
390da915ad5SPaul E. McKenney  * CPUs trying to garner first-use privileges.
391da915ad5SPaul E. McKenney  */
392aacb5d91SPaul E. McKenney static void check_init_srcu_struct(struct srcu_struct *ssp)
393da915ad5SPaul E. McKenney {
394da915ad5SPaul E. McKenney 	unsigned long flags;
395da915ad5SPaul E. McKenney 
396da915ad5SPaul E. McKenney 	/* The smp_load_acquire() pairs with the smp_store_release(). */
397aacb5d91SPaul E. McKenney 	if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed))) /*^^^*/
398da915ad5SPaul E. McKenney 		return; /* Already initialized. */
399aacb5d91SPaul E. McKenney 	spin_lock_irqsave_rcu_node(ssp, flags);
400aacb5d91SPaul E. McKenney 	if (!rcu_seq_state(ssp->srcu_gp_seq_needed)) {
401aacb5d91SPaul E. McKenney 		spin_unlock_irqrestore_rcu_node(ssp, flags);
402da915ad5SPaul E. McKenney 		return;
403da915ad5SPaul E. McKenney 	}
404aacb5d91SPaul E. McKenney 	init_srcu_struct_fields(ssp, true);
405aacb5d91SPaul E. McKenney 	spin_unlock_irqrestore_rcu_node(ssp, flags);
406da915ad5SPaul E. McKenney }
407da915ad5SPaul E. McKenney 
408da915ad5SPaul E. McKenney /*
409da915ad5SPaul E. McKenney  * Returns approximate total of the readers' ->srcu_lock_count[] values
410da915ad5SPaul E. McKenney  * for the rank of per-CPU counters specified by idx.
411dad81a20SPaul E. McKenney  */
412aacb5d91SPaul E. McKenney static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx)
413dad81a20SPaul E. McKenney {
414dad81a20SPaul E. McKenney 	int cpu;
415dad81a20SPaul E. McKenney 	unsigned long sum = 0;
416dad81a20SPaul E. McKenney 
417dad81a20SPaul E. McKenney 	for_each_possible_cpu(cpu) {
418aacb5d91SPaul E. McKenney 		struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
419dad81a20SPaul E. McKenney 
4205d0f5953SPaul E. McKenney 		sum += atomic_long_read(&cpuc->srcu_lock_count[idx]);
421dad81a20SPaul E. McKenney 	}
422dad81a20SPaul E. McKenney 	return sum;
423dad81a20SPaul E. McKenney }
424dad81a20SPaul E. McKenney 
425dad81a20SPaul E. McKenney /*
426da915ad5SPaul E. McKenney  * Returns approximate total of the readers' ->srcu_unlock_count[] values
427da915ad5SPaul E. McKenney  * for the rank of per-CPU counters specified by idx.
428dad81a20SPaul E. McKenney  */
429aacb5d91SPaul E. McKenney static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx)
430dad81a20SPaul E. McKenney {
431dad81a20SPaul E. McKenney 	int cpu;
432*36f65f1dSPaul E. McKenney 	unsigned long mask = 0;
433dad81a20SPaul E. McKenney 	unsigned long sum = 0;
434dad81a20SPaul E. McKenney 
435dad81a20SPaul E. McKenney 	for_each_possible_cpu(cpu) {
436aacb5d91SPaul E. McKenney 		struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
437dad81a20SPaul E. McKenney 
4385d0f5953SPaul E. McKenney 		sum += atomic_long_read(&cpuc->srcu_unlock_count[idx]);
439*36f65f1dSPaul E. McKenney 		if (IS_ENABLED(CONFIG_PROVE_RCU))
440*36f65f1dSPaul E. McKenney 			mask = mask | READ_ONCE(cpuc->srcu_nmi_safety);
441dad81a20SPaul E. McKenney 	}
442*36f65f1dSPaul E. McKenney 	WARN_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && (mask & (mask >> 1)),
443*36f65f1dSPaul E. McKenney 		  "Mixed NMI-safe readers for srcu_struct at %ps.\n", ssp);
444dad81a20SPaul E. McKenney 	return sum;
445dad81a20SPaul E. McKenney }
446dad81a20SPaul E. McKenney 
447dad81a20SPaul E. McKenney /*
448dad81a20SPaul E. McKenney  * Return true if the number of pre-existing readers is determined to
449dad81a20SPaul E. McKenney  * be zero.
450dad81a20SPaul E. McKenney  */
451aacb5d91SPaul E. McKenney static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx)
452dad81a20SPaul E. McKenney {
453dad81a20SPaul E. McKenney 	unsigned long unlocks;
454dad81a20SPaul E. McKenney 
455aacb5d91SPaul E. McKenney 	unlocks = srcu_readers_unlock_idx(ssp, idx);
456dad81a20SPaul E. McKenney 
457dad81a20SPaul E. McKenney 	/*
458dad81a20SPaul E. McKenney 	 * Make sure that a lock is always counted if the corresponding
459dad81a20SPaul E. McKenney 	 * unlock is counted. Needs to be a smp_mb() as the read side may
460dad81a20SPaul E. McKenney 	 * contain a read from a variable that is written to before the
461dad81a20SPaul E. McKenney 	 * synchronize_srcu() in the write side. In this case smp_mb()s
462dad81a20SPaul E. McKenney 	 * A and B act like the store buffering pattern.
463dad81a20SPaul E. McKenney 	 *
464dad81a20SPaul E. McKenney 	 * This smp_mb() also pairs with smp_mb() C to prevent accesses
465dad81a20SPaul E. McKenney 	 * after the synchronize_srcu() from being executed before the
466dad81a20SPaul E. McKenney 	 * grace period ends.
467dad81a20SPaul E. McKenney 	 */
468dad81a20SPaul E. McKenney 	smp_mb(); /* A */
469dad81a20SPaul E. McKenney 
470dad81a20SPaul E. McKenney 	/*
471dad81a20SPaul E. McKenney 	 * If the locks are the same as the unlocks, then there must have
472dad81a20SPaul E. McKenney 	 * been no readers on this index at some time in between. This does
473dad81a20SPaul E. McKenney 	 * not mean that there are no more readers, as one could have read
474dad81a20SPaul E. McKenney 	 * the current index but not have incremented the lock counter yet.
475dad81a20SPaul E. McKenney 	 *
476881ec9d2SPaul E. McKenney 	 * So suppose that the updater is preempted here for so long
477881ec9d2SPaul E. McKenney 	 * that more than ULONG_MAX non-nested readers come and go in
478881ec9d2SPaul E. McKenney 	 * the meantime.  It turns out that this cannot result in overflow
479881ec9d2SPaul E. McKenney 	 * because if a reader modifies its unlock count after we read it
480881ec9d2SPaul E. McKenney 	 * above, then that reader's next load of ->srcu_idx is guaranteed
481881ec9d2SPaul E. McKenney 	 * to get the new value, which will cause it to operate on the
482881ec9d2SPaul E. McKenney 	 * other bank of counters, where it cannot contribute to the
483881ec9d2SPaul E. McKenney 	 * overflow of these counters.  This means that there is a maximum
484881ec9d2SPaul E. McKenney 	 * of 2*NR_CPUS increments, which cannot overflow given current
485881ec9d2SPaul E. McKenney 	 * systems, especially not on 64-bit systems.
486881ec9d2SPaul E. McKenney 	 *
487881ec9d2SPaul E. McKenney 	 * OK, how about nesting?  This does impose a limit on nesting
488881ec9d2SPaul E. McKenney 	 * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient,
489881ec9d2SPaul E. McKenney 	 * especially on 64-bit systems.
490dad81a20SPaul E. McKenney 	 */
491aacb5d91SPaul E. McKenney 	return srcu_readers_lock_idx(ssp, idx) == unlocks;
492dad81a20SPaul E. McKenney }
493dad81a20SPaul E. McKenney 
494dad81a20SPaul E. McKenney /**
495dad81a20SPaul E. McKenney  * srcu_readers_active - returns true if there are readers. and false
496dad81a20SPaul E. McKenney  *                       otherwise
497aacb5d91SPaul E. McKenney  * @ssp: which srcu_struct to count active readers (holding srcu_read_lock).
498dad81a20SPaul E. McKenney  *
499dad81a20SPaul E. McKenney  * Note that this is not an atomic primitive, and can therefore suffer
500dad81a20SPaul E. McKenney  * severe errors when invoked on an active srcu_struct.  That said, it
501dad81a20SPaul E. McKenney  * can be useful as an error check at cleanup time.
502dad81a20SPaul E. McKenney  */
503aacb5d91SPaul E. McKenney static bool srcu_readers_active(struct srcu_struct *ssp)
504dad81a20SPaul E. McKenney {
505dad81a20SPaul E. McKenney 	int cpu;
506dad81a20SPaul E. McKenney 	unsigned long sum = 0;
507dad81a20SPaul E. McKenney 
508dad81a20SPaul E. McKenney 	for_each_possible_cpu(cpu) {
509aacb5d91SPaul E. McKenney 		struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
510dad81a20SPaul E. McKenney 
5115d0f5953SPaul E. McKenney 		sum += atomic_long_read(&cpuc->srcu_lock_count[0]);
5125d0f5953SPaul E. McKenney 		sum += atomic_long_read(&cpuc->srcu_lock_count[1]);
5135d0f5953SPaul E. McKenney 		sum -= atomic_long_read(&cpuc->srcu_unlock_count[0]);
5145d0f5953SPaul E. McKenney 		sum -= atomic_long_read(&cpuc->srcu_unlock_count[1]);
515dad81a20SPaul E. McKenney 	}
516dad81a20SPaul E. McKenney 	return sum;
517dad81a20SPaul E. McKenney }
518dad81a20SPaul E. McKenney 
5194f2bfd94SNeeraj Upadhyay /*
5204f2bfd94SNeeraj Upadhyay  * We use an adaptive strategy for synchronize_srcu() and especially for
5214f2bfd94SNeeraj Upadhyay  * synchronize_srcu_expedited().  We spin for a fixed time period
5224f2bfd94SNeeraj Upadhyay  * (defined below, boot time configurable) to allow SRCU readers to exit
5234f2bfd94SNeeraj Upadhyay  * their read-side critical sections.  If there are still some readers
5244f2bfd94SNeeraj Upadhyay  * after one jiffy, we repeatedly block for one jiffy time periods.
5254f2bfd94SNeeraj Upadhyay  * The blocking time is increased as the grace-period age increases,
5264f2bfd94SNeeraj Upadhyay  * with max blocking time capped at 10 jiffies.
5274f2bfd94SNeeraj Upadhyay  */
5284f2bfd94SNeeraj Upadhyay #define SRCU_DEFAULT_RETRY_CHECK_DELAY		5
5294f2bfd94SNeeraj Upadhyay 
5304f2bfd94SNeeraj Upadhyay static ulong srcu_retry_check_delay = SRCU_DEFAULT_RETRY_CHECK_DELAY;
5314f2bfd94SNeeraj Upadhyay module_param(srcu_retry_check_delay, ulong, 0444);
5324f2bfd94SNeeraj Upadhyay 
533282d8998SPaul E. McKenney #define SRCU_INTERVAL		1		// Base delay if no expedited GPs pending.
534282d8998SPaul E. McKenney #define SRCU_MAX_INTERVAL	10		// Maximum incremental delay from slow readers.
5354f2bfd94SNeeraj Upadhyay 
5364f2bfd94SNeeraj Upadhyay #define SRCU_DEFAULT_MAX_NODELAY_PHASE_LO	3UL	// Lowmark on default per-GP-phase
5374f2bfd94SNeeraj Upadhyay 							// no-delay instances.
5384f2bfd94SNeeraj Upadhyay #define SRCU_DEFAULT_MAX_NODELAY_PHASE_HI	1000UL	// Highmark on default per-GP-phase
5394f2bfd94SNeeraj Upadhyay 							// no-delay instances.
5404f2bfd94SNeeraj Upadhyay 
5414f2bfd94SNeeraj Upadhyay #define SRCU_UL_CLAMP_LO(val, low)	((val) > (low) ? (val) : (low))
5424f2bfd94SNeeraj Upadhyay #define SRCU_UL_CLAMP_HI(val, high)	((val) < (high) ? (val) : (high))
5434f2bfd94SNeeraj Upadhyay #define SRCU_UL_CLAMP(val, low, high)	SRCU_UL_CLAMP_HI(SRCU_UL_CLAMP_LO((val), (low)), (high))
5444f2bfd94SNeeraj Upadhyay // per-GP-phase no-delay instances adjusted to allow non-sleeping poll upto
5454f2bfd94SNeeraj Upadhyay // one jiffies time duration. Mult by 2 is done to factor in the srcu_get_delay()
5464f2bfd94SNeeraj Upadhyay // called from process_srcu().
5474f2bfd94SNeeraj Upadhyay #define SRCU_DEFAULT_MAX_NODELAY_PHASE_ADJUSTED	\
5484f2bfd94SNeeraj Upadhyay 	(2UL * USEC_PER_SEC / HZ / SRCU_DEFAULT_RETRY_CHECK_DELAY)
5494f2bfd94SNeeraj Upadhyay 
5504f2bfd94SNeeraj Upadhyay // Maximum per-GP-phase consecutive no-delay instances.
5514f2bfd94SNeeraj Upadhyay #define SRCU_DEFAULT_MAX_NODELAY_PHASE	\
5524f2bfd94SNeeraj Upadhyay 	SRCU_UL_CLAMP(SRCU_DEFAULT_MAX_NODELAY_PHASE_ADJUSTED,	\
5534f2bfd94SNeeraj Upadhyay 		      SRCU_DEFAULT_MAX_NODELAY_PHASE_LO,	\
5544f2bfd94SNeeraj Upadhyay 		      SRCU_DEFAULT_MAX_NODELAY_PHASE_HI)
5554f2bfd94SNeeraj Upadhyay 
5564f2bfd94SNeeraj Upadhyay static ulong srcu_max_nodelay_phase = SRCU_DEFAULT_MAX_NODELAY_PHASE;
5574f2bfd94SNeeraj Upadhyay module_param(srcu_max_nodelay_phase, ulong, 0444);
5584f2bfd94SNeeraj Upadhyay 
5594f2bfd94SNeeraj Upadhyay // Maximum consecutive no-delay instances.
5604f2bfd94SNeeraj Upadhyay #define SRCU_DEFAULT_MAX_NODELAY	(SRCU_DEFAULT_MAX_NODELAY_PHASE > 100 ?	\
5614f2bfd94SNeeraj Upadhyay 					 SRCU_DEFAULT_MAX_NODELAY_PHASE : 100)
5624f2bfd94SNeeraj Upadhyay 
5634f2bfd94SNeeraj Upadhyay static ulong srcu_max_nodelay = SRCU_DEFAULT_MAX_NODELAY;
5644f2bfd94SNeeraj Upadhyay module_param(srcu_max_nodelay, ulong, 0444);
565dad81a20SPaul E. McKenney 
5661e9a038bSPaul E. McKenney /*
5671e9a038bSPaul E. McKenney  * Return grace-period delay, zero if there are expedited grace
5681e9a038bSPaul E. McKenney  * periods pending, SRCU_INTERVAL otherwise.
5691e9a038bSPaul E. McKenney  */
570aacb5d91SPaul E. McKenney static unsigned long srcu_get_delay(struct srcu_struct *ssp)
5711e9a038bSPaul E. McKenney {
5728f870e6eSPaul E. McKenney 	unsigned long gpstart;
5738f870e6eSPaul E. McKenney 	unsigned long j;
574282d8998SPaul E. McKenney 	unsigned long jbase = SRCU_INTERVAL;
575282d8998SPaul E. McKenney 
576cbdc98e9SPaul E. McKenney 	if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq), READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
577282d8998SPaul E. McKenney 		jbase = 0;
5788f870e6eSPaul E. McKenney 	if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq))) {
5798f870e6eSPaul E. McKenney 		j = jiffies - 1;
5808f870e6eSPaul E. McKenney 		gpstart = READ_ONCE(ssp->srcu_gp_start);
5818f870e6eSPaul E. McKenney 		if (time_after(j, gpstart))
5828f870e6eSPaul E. McKenney 			jbase += j - gpstart;
583282d8998SPaul E. McKenney 		if (!jbase) {
584282d8998SPaul E. McKenney 			WRITE_ONCE(ssp->srcu_n_exp_nodelay, READ_ONCE(ssp->srcu_n_exp_nodelay) + 1);
5854f2bfd94SNeeraj Upadhyay 			if (READ_ONCE(ssp->srcu_n_exp_nodelay) > srcu_max_nodelay_phase)
586282d8998SPaul E. McKenney 				jbase = 1;
587282d8998SPaul E. McKenney 		}
5888f870e6eSPaul E. McKenney 	}
589282d8998SPaul E. McKenney 	return jbase > SRCU_MAX_INTERVAL ? SRCU_MAX_INTERVAL : jbase;
5901e9a038bSPaul E. McKenney }
5911e9a038bSPaul E. McKenney 
592f5ad3991SPaul E. McKenney /**
593f5ad3991SPaul E. McKenney  * cleanup_srcu_struct - deconstruct a sleep-RCU structure
594f5ad3991SPaul E. McKenney  * @ssp: structure to clean up.
595f5ad3991SPaul E. McKenney  *
596f5ad3991SPaul E. McKenney  * Must invoke this after you are finished using a given srcu_struct that
597f5ad3991SPaul E. McKenney  * was initialized via init_srcu_struct(), else you leak memory.
598f5ad3991SPaul E. McKenney  */
599f5ad3991SPaul E. McKenney void cleanup_srcu_struct(struct srcu_struct *ssp)
600dad81a20SPaul E. McKenney {
601da915ad5SPaul E. McKenney 	int cpu;
602da915ad5SPaul E. McKenney 
603aacb5d91SPaul E. McKenney 	if (WARN_ON(!srcu_get_delay(ssp)))
604f7194ac3SPaul E. McKenney 		return; /* Just leak it! */
605aacb5d91SPaul E. McKenney 	if (WARN_ON(srcu_readers_active(ssp)))
606f7194ac3SPaul E. McKenney 		return; /* Just leak it! */
607aacb5d91SPaul E. McKenney 	flush_delayed_work(&ssp->work);
608e81baf4cSSebastian Andrzej Siewior 	for_each_possible_cpu(cpu) {
609e81baf4cSSebastian Andrzej Siewior 		struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
610e81baf4cSSebastian Andrzej Siewior 
611e81baf4cSSebastian Andrzej Siewior 		del_timer_sync(&sdp->delay_work);
612e81baf4cSSebastian Andrzej Siewior 		flush_work(&sdp->work);
6135cdfd174SPaul E. McKenney 		if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist)))
6145cdfd174SPaul E. McKenney 			return; /* Forgot srcu_barrier(), so just leak it! */
615f7194ac3SPaul E. McKenney 	}
616aacb5d91SPaul E. McKenney 	if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
6178ed00760SPaul E. McKenney 	    WARN_ON(rcu_seq_current(&ssp->srcu_gp_seq) != ssp->srcu_gp_seq_needed) ||
618aacb5d91SPaul E. McKenney 	    WARN_ON(srcu_readers_active(ssp))) {
6198ed00760SPaul E. McKenney 		pr_info("%s: Active srcu_struct %p read state: %d gp state: %lu/%lu\n",
6208ed00760SPaul E. McKenney 			__func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)),
6218ed00760SPaul E. McKenney 			rcu_seq_current(&ssp->srcu_gp_seq), ssp->srcu_gp_seq_needed);
622dad81a20SPaul E. McKenney 		return; /* Caller forgot to stop doing call_srcu()? */
623dad81a20SPaul E. McKenney 	}
62446470cf8SPaul E. McKenney 	if (!ssp->sda_is_static) {
625aacb5d91SPaul E. McKenney 		free_percpu(ssp->sda);
626aacb5d91SPaul E. McKenney 		ssp->sda = NULL;
62746470cf8SPaul E. McKenney 	}
6282ec30311SPaul E. McKenney 	kfree(ssp->node);
6292ec30311SPaul E. McKenney 	ssp->node = NULL;
630994f7068SPaul E. McKenney 	ssp->srcu_size_state = SRCU_SIZE_SMALL;
631dad81a20SPaul E. McKenney }
632f5ad3991SPaul E. McKenney EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
633dad81a20SPaul E. McKenney 
634dad81a20SPaul E. McKenney /*
63527120e7dSPaul E. McKenney  * Check for consistent NMI safety.
63627120e7dSPaul E. McKenney  */
63727120e7dSPaul E. McKenney static void srcu_check_nmi_safety(struct srcu_struct *ssp, bool nmi_safe)
63827120e7dSPaul E. McKenney {
63927120e7dSPaul E. McKenney 	int nmi_safe_mask = 1 << nmi_safe;
64027120e7dSPaul E. McKenney 	int old_nmi_safe_mask;
64127120e7dSPaul E. McKenney 	struct srcu_data *sdp;
64227120e7dSPaul E. McKenney 
64327120e7dSPaul E. McKenney 	if (!IS_ENABLED(CONFIG_PROVE_RCU))
64427120e7dSPaul E. McKenney 		return;
64527120e7dSPaul E. McKenney 	sdp = raw_cpu_ptr(ssp->sda);
64627120e7dSPaul E. McKenney 	old_nmi_safe_mask = READ_ONCE(sdp->srcu_nmi_safety);
64727120e7dSPaul E. McKenney 	if (!old_nmi_safe_mask) {
64827120e7dSPaul E. McKenney 		WRITE_ONCE(sdp->srcu_nmi_safety, nmi_safe_mask);
64927120e7dSPaul E. McKenney 		return;
65027120e7dSPaul E. McKenney 	}
65127120e7dSPaul E. McKenney 	WARN_ONCE(old_nmi_safe_mask != nmi_safe_mask, "CPU %d old state %d new state %d\n", sdp->cpu, old_nmi_safe_mask, nmi_safe_mask);
65227120e7dSPaul E. McKenney }
65327120e7dSPaul E. McKenney 
65427120e7dSPaul E. McKenney /*
655dad81a20SPaul E. McKenney  * Counts the new reader in the appropriate per-CPU element of the
656cdf7abc4SPaolo Bonzini  * srcu_struct.
657dad81a20SPaul E. McKenney  * Returns an index that must be passed to the matching srcu_read_unlock().
658dad81a20SPaul E. McKenney  */
659aacb5d91SPaul E. McKenney int __srcu_read_lock(struct srcu_struct *ssp)
660dad81a20SPaul E. McKenney {
661dad81a20SPaul E. McKenney 	int idx;
662dad81a20SPaul E. McKenney 
663aacb5d91SPaul E. McKenney 	idx = READ_ONCE(ssp->srcu_idx) & 0x1;
6645d0f5953SPaul E. McKenney 	this_cpu_inc(ssp->sda->srcu_lock_count[idx].counter);
665dad81a20SPaul E. McKenney 	smp_mb(); /* B */  /* Avoid leaking the critical section. */
66627120e7dSPaul E. McKenney 	srcu_check_nmi_safety(ssp, false);
667dad81a20SPaul E. McKenney 	return idx;
668dad81a20SPaul E. McKenney }
669dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(__srcu_read_lock);
670dad81a20SPaul E. McKenney 
671dad81a20SPaul E. McKenney /*
672dad81a20SPaul E. McKenney  * Removes the count for the old reader from the appropriate per-CPU
673dad81a20SPaul E. McKenney  * element of the srcu_struct.  Note that this may well be a different
674dad81a20SPaul E. McKenney  * CPU than that which was incremented by the corresponding srcu_read_lock().
675dad81a20SPaul E. McKenney  */
676aacb5d91SPaul E. McKenney void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
677dad81a20SPaul E. McKenney {
678dad81a20SPaul E. McKenney 	smp_mb(); /* C */  /* Avoid leaking the critical section. */
6795d0f5953SPaul E. McKenney 	this_cpu_inc(ssp->sda->srcu_unlock_count[idx].counter);
68027120e7dSPaul E. McKenney 	srcu_check_nmi_safety(ssp, false);
681dad81a20SPaul E. McKenney }
682dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(__srcu_read_unlock);
683dad81a20SPaul E. McKenney 
6842e83b879SPaul E. McKenney #ifdef CONFIG_NEED_SRCU_NMI_SAFE
6852e83b879SPaul E. McKenney 
6862e83b879SPaul E. McKenney /*
6872e83b879SPaul E. McKenney  * Counts the new reader in the appropriate per-CPU element of the
6882e83b879SPaul E. McKenney  * srcu_struct, but in an NMI-safe manner using RMW atomics.
6892e83b879SPaul E. McKenney  * Returns an index that must be passed to the matching srcu_read_unlock().
6902e83b879SPaul E. McKenney  */
69127120e7dSPaul E. McKenney int __srcu_read_lock_nmisafe(struct srcu_struct *ssp, bool chknmisafe)
6922e83b879SPaul E. McKenney {
6932e83b879SPaul E. McKenney 	int idx;
6942e83b879SPaul E. McKenney 	struct srcu_data *sdp = raw_cpu_ptr(ssp->sda);
6952e83b879SPaul E. McKenney 
6962e83b879SPaul E. McKenney 	idx = READ_ONCE(ssp->srcu_idx) & 0x1;
6972e83b879SPaul E. McKenney 	atomic_long_inc(&sdp->srcu_lock_count[idx]);
6982e83b879SPaul E. McKenney 	smp_mb__after_atomic(); /* B */  /* Avoid leaking the critical section. */
69927120e7dSPaul E. McKenney 	if (chknmisafe)
70027120e7dSPaul E. McKenney 		srcu_check_nmi_safety(ssp, true);
7012e83b879SPaul E. McKenney 	return idx;
7022e83b879SPaul E. McKenney }
7032e83b879SPaul E. McKenney EXPORT_SYMBOL_GPL(__srcu_read_lock_nmisafe);
7042e83b879SPaul E. McKenney 
7052e83b879SPaul E. McKenney /*
7062e83b879SPaul E. McKenney  * Removes the count for the old reader from the appropriate per-CPU
7072e83b879SPaul E. McKenney  * element of the srcu_struct.  Note that this may well be a different
7082e83b879SPaul E. McKenney  * CPU than that which was incremented by the corresponding srcu_read_lock().
7092e83b879SPaul E. McKenney  */
71027120e7dSPaul E. McKenney void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx, bool chknmisafe)
7112e83b879SPaul E. McKenney {
7122e83b879SPaul E. McKenney 	struct srcu_data *sdp = raw_cpu_ptr(ssp->sda);
7132e83b879SPaul E. McKenney 
7142e83b879SPaul E. McKenney 	smp_mb__before_atomic(); /* C */  /* Avoid leaking the critical section. */
7152e83b879SPaul E. McKenney 	atomic_long_inc(&sdp->srcu_unlock_count[idx]);
71627120e7dSPaul E. McKenney 	if (chknmisafe)
71727120e7dSPaul E. McKenney 		srcu_check_nmi_safety(ssp, true);
7182e83b879SPaul E. McKenney }
7192e83b879SPaul E. McKenney EXPORT_SYMBOL_GPL(__srcu_read_unlock_nmisafe);
7202e83b879SPaul E. McKenney 
7212e83b879SPaul E. McKenney #endif // CONFIG_NEED_SRCU_NMI_SAFE
7222e83b879SPaul E. McKenney 
723dad81a20SPaul E. McKenney /*
724dad81a20SPaul E. McKenney  * Start an SRCU grace period.
725dad81a20SPaul E. McKenney  */
726aacb5d91SPaul E. McKenney static void srcu_gp_start(struct srcu_struct *ssp)
727dad81a20SPaul E. McKenney {
728586e31d5SLukas Bulwahn 	struct srcu_data *sdp;
729dad81a20SPaul E. McKenney 	int state;
730dad81a20SPaul E. McKenney 
731994f7068SPaul E. McKenney 	if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
732994f7068SPaul E. McKenney 		sdp = per_cpu_ptr(ssp->sda, 0);
733994f7068SPaul E. McKenney 	else
734994f7068SPaul E. McKenney 		sdp = this_cpu_ptr(ssp->sda);
735aacb5d91SPaul E. McKenney 	lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock));
736aacb5d91SPaul E. McKenney 	WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
737eb4c2382SDennis Krein 	spin_lock_rcu_node(sdp);  /* Interrupts already disabled. */
738da915ad5SPaul E. McKenney 	rcu_segcblist_advance(&sdp->srcu_cblist,
739aacb5d91SPaul E. McKenney 			      rcu_seq_current(&ssp->srcu_gp_seq));
740da915ad5SPaul E. McKenney 	(void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
741aacb5d91SPaul E. McKenney 				       rcu_seq_snap(&ssp->srcu_gp_seq));
742eb4c2382SDennis Krein 	spin_unlock_rcu_node(sdp);  /* Interrupts remain disabled. */
743282d8998SPaul E. McKenney 	WRITE_ONCE(ssp->srcu_gp_start, jiffies);
744282d8998SPaul E. McKenney 	WRITE_ONCE(ssp->srcu_n_exp_nodelay, 0);
7452da4b2a7SPaul E. McKenney 	smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
746aacb5d91SPaul E. McKenney 	rcu_seq_start(&ssp->srcu_gp_seq);
74771042606SPaul E. McKenney 	state = rcu_seq_state(ssp->srcu_gp_seq);
748dad81a20SPaul E. McKenney 	WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
749dad81a20SPaul E. McKenney }
750dad81a20SPaul E. McKenney 
751da915ad5SPaul E. McKenney 
752e81baf4cSSebastian Andrzej Siewior static void srcu_delay_timer(struct timer_list *t)
753da915ad5SPaul E. McKenney {
754e81baf4cSSebastian Andrzej Siewior 	struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work);
755e81baf4cSSebastian Andrzej Siewior 
756e81baf4cSSebastian Andrzej Siewior 	queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
757da915ad5SPaul E. McKenney }
758da915ad5SPaul E. McKenney 
759e81baf4cSSebastian Andrzej Siewior static void srcu_queue_delayed_work_on(struct srcu_data *sdp,
760da915ad5SPaul E. McKenney 				       unsigned long delay)
761da915ad5SPaul E. McKenney {
762e81baf4cSSebastian Andrzej Siewior 	if (!delay) {
763e81baf4cSSebastian Andrzej Siewior 		queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
764e81baf4cSSebastian Andrzej Siewior 		return;
765e81baf4cSSebastian Andrzej Siewior 	}
766da915ad5SPaul E. McKenney 
767e81baf4cSSebastian Andrzej Siewior 	timer_reduce(&sdp->delay_work, jiffies + delay);
768da915ad5SPaul E. McKenney }
769da915ad5SPaul E. McKenney 
770da915ad5SPaul E. McKenney /*
771da915ad5SPaul E. McKenney  * Schedule callback invocation for the specified srcu_data structure,
772da915ad5SPaul E. McKenney  * if possible, on the corresponding CPU.
773da915ad5SPaul E. McKenney  */
774da915ad5SPaul E. McKenney static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
775da915ad5SPaul E. McKenney {
776e81baf4cSSebastian Andrzej Siewior 	srcu_queue_delayed_work_on(sdp, delay);
777da915ad5SPaul E. McKenney }
778da915ad5SPaul E. McKenney 
779da915ad5SPaul E. McKenney /*
780da915ad5SPaul E. McKenney  * Schedule callback invocation for all srcu_data structures associated
781c7e88067SPaul E. McKenney  * with the specified srcu_node structure that have callbacks for the
782c7e88067SPaul E. McKenney  * just-completed grace period, the one corresponding to idx.  If possible,
783c7e88067SPaul E. McKenney  * schedule this invocation on the corresponding CPUs.
784da915ad5SPaul E. McKenney  */
785aacb5d91SPaul E. McKenney static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp,
7861e9a038bSPaul E. McKenney 				  unsigned long mask, unsigned long delay)
787da915ad5SPaul E. McKenney {
788da915ad5SPaul E. McKenney 	int cpu;
789da915ad5SPaul E. McKenney 
790c7e88067SPaul E. McKenney 	for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
791c7e88067SPaul E. McKenney 		if (!(mask & (1 << (cpu - snp->grplo))))
792c7e88067SPaul E. McKenney 			continue;
793aacb5d91SPaul E. McKenney 		srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
794da915ad5SPaul E. McKenney 	}
795c7e88067SPaul E. McKenney }
796da915ad5SPaul E. McKenney 
797da915ad5SPaul E. McKenney /*
798da915ad5SPaul E. McKenney  * Note the end of an SRCU grace period.  Initiates callback invocation
799da915ad5SPaul E. McKenney  * and starts a new grace period if needed.
800da915ad5SPaul E. McKenney  *
801da915ad5SPaul E. McKenney  * The ->srcu_cb_mutex acquisition does not protect any data, but
802da915ad5SPaul E. McKenney  * instead prevents more than one grace period from starting while we
803da915ad5SPaul E. McKenney  * are initiating callback invocation.  This allows the ->srcu_have_cbs[]
804da915ad5SPaul E. McKenney  * array to have a finite number of elements.
805da915ad5SPaul E. McKenney  */
806aacb5d91SPaul E. McKenney static void srcu_gp_end(struct srcu_struct *ssp)
807da915ad5SPaul E. McKenney {
8084f2bfd94SNeeraj Upadhyay 	unsigned long cbdelay = 1;
809da915ad5SPaul E. McKenney 	bool cbs;
8108ddbd883SIldar Ismagilov 	bool last_lvl;
811c350c008SPaul E. McKenney 	int cpu;
812c350c008SPaul E. McKenney 	unsigned long flags;
813da915ad5SPaul E. McKenney 	unsigned long gpseq;
814da915ad5SPaul E. McKenney 	int idx;
815c7e88067SPaul E. McKenney 	unsigned long mask;
816c350c008SPaul E. McKenney 	struct srcu_data *sdp;
817cbdc98e9SPaul E. McKenney 	unsigned long sgsne;
818da915ad5SPaul E. McKenney 	struct srcu_node *snp;
819e2f63836SPaul E. McKenney 	int ss_state;
820da915ad5SPaul E. McKenney 
821da915ad5SPaul E. McKenney 	/* Prevent more than one additional grace period. */
822aacb5d91SPaul E. McKenney 	mutex_lock(&ssp->srcu_cb_mutex);
823da915ad5SPaul E. McKenney 
824da915ad5SPaul E. McKenney 	/* End the current grace period. */
825aacb5d91SPaul E. McKenney 	spin_lock_irq_rcu_node(ssp);
826aacb5d91SPaul E. McKenney 	idx = rcu_seq_state(ssp->srcu_gp_seq);
827da915ad5SPaul E. McKenney 	WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
8284f2bfd94SNeeraj Upadhyay 	if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq), READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
8294f2bfd94SNeeraj Upadhyay 		cbdelay = 0;
8304f2bfd94SNeeraj Upadhyay 
831844a378dSPaul E. McKenney 	WRITE_ONCE(ssp->srcu_last_gp_end, ktime_get_mono_fast_ns());
832aacb5d91SPaul E. McKenney 	rcu_seq_end(&ssp->srcu_gp_seq);
833aacb5d91SPaul E. McKenney 	gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
834aacb5d91SPaul E. McKenney 	if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq))
8358c9e0cb3SPaul E. McKenney 		WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, gpseq);
836aacb5d91SPaul E. McKenney 	spin_unlock_irq_rcu_node(ssp);
837aacb5d91SPaul E. McKenney 	mutex_unlock(&ssp->srcu_gp_mutex);
838da915ad5SPaul E. McKenney 	/* A new grace period can start at this point.  But only one. */
839da915ad5SPaul E. McKenney 
840da915ad5SPaul E. McKenney 	/* Initiate callback invocation as needed. */
841c69a00a1SPaul E. McKenney 	ss_state = smp_load_acquire(&ssp->srcu_size_state);
842c69a00a1SPaul E. McKenney 	if (ss_state < SRCU_SIZE_WAIT_BARRIER) {
843994f7068SPaul E. McKenney 		srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, 0), cbdelay);
844994f7068SPaul E. McKenney 	} else {
845da915ad5SPaul E. McKenney 		idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
846aacb5d91SPaul E. McKenney 		srcu_for_each_node_breadth_first(ssp, snp) {
847d6331980SPaul E. McKenney 			spin_lock_irq_rcu_node(snp);
848da915ad5SPaul E. McKenney 			cbs = false;
849aacb5d91SPaul E. McKenney 			last_lvl = snp >= ssp->level[rcu_num_lvls - 1];
8508ddbd883SIldar Ismagilov 			if (last_lvl)
851c69a00a1SPaul E. McKenney 				cbs = ss_state < SRCU_SIZE_BIG || snp->srcu_have_cbs[idx] == gpseq;
852da915ad5SPaul E. McKenney 			snp->srcu_have_cbs[idx] = gpseq;
853da915ad5SPaul E. McKenney 			rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
854cbdc98e9SPaul E. McKenney 			sgsne = snp->srcu_gp_seq_needed_exp;
855cbdc98e9SPaul E. McKenney 			if (srcu_invl_snp_seq(sgsne) || ULONG_CMP_LT(sgsne, gpseq))
8567ff8b450SPaul E. McKenney 				WRITE_ONCE(snp->srcu_gp_seq_needed_exp, gpseq);
857c69a00a1SPaul E. McKenney 			if (ss_state < SRCU_SIZE_BIG)
858c69a00a1SPaul E. McKenney 				mask = ~0;
859c69a00a1SPaul E. McKenney 			else
860c7e88067SPaul E. McKenney 				mask = snp->srcu_data_have_cbs[idx];
861c7e88067SPaul E. McKenney 			snp->srcu_data_have_cbs[idx] = 0;
862d6331980SPaul E. McKenney 			spin_unlock_irq_rcu_node(snp);
863a3883df3SPaul E. McKenney 			if (cbs)
864aacb5d91SPaul E. McKenney 				srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay);
865994f7068SPaul E. McKenney 		}
866994f7068SPaul E. McKenney 	}
867c350c008SPaul E. McKenney 
868c350c008SPaul E. McKenney 	/* Occasionally prevent srcu_data counter wrap. */
869994f7068SPaul E. McKenney 	if (!(gpseq & counter_wrap_check))
870994f7068SPaul E. McKenney 		for_each_possible_cpu(cpu) {
871aacb5d91SPaul E. McKenney 			sdp = per_cpu_ptr(ssp->sda, cpu);
872d6331980SPaul E. McKenney 			spin_lock_irqsave_rcu_node(sdp, flags);
873994f7068SPaul E. McKenney 			if (ULONG_CMP_GE(gpseq, sdp->srcu_gp_seq_needed + 100))
874c350c008SPaul E. McKenney 				sdp->srcu_gp_seq_needed = gpseq;
875994f7068SPaul E. McKenney 			if (ULONG_CMP_GE(gpseq, sdp->srcu_gp_seq_needed_exp + 100))
876a35d13ecSIldar Ismagilov 				sdp->srcu_gp_seq_needed_exp = gpseq;
877d6331980SPaul E. McKenney 			spin_unlock_irqrestore_rcu_node(sdp, flags);
878c350c008SPaul E. McKenney 		}
879da915ad5SPaul E. McKenney 
880da915ad5SPaul E. McKenney 	/* Callback initiation done, allow grace periods after next. */
881aacb5d91SPaul E. McKenney 	mutex_unlock(&ssp->srcu_cb_mutex);
882da915ad5SPaul E. McKenney 
883da915ad5SPaul E. McKenney 	/* Start a new grace period if needed. */
884aacb5d91SPaul E. McKenney 	spin_lock_irq_rcu_node(ssp);
885aacb5d91SPaul E. McKenney 	gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
886da915ad5SPaul E. McKenney 	if (!rcu_seq_state(gpseq) &&
887aacb5d91SPaul E. McKenney 	    ULONG_CMP_LT(gpseq, ssp->srcu_gp_seq_needed)) {
888aacb5d91SPaul E. McKenney 		srcu_gp_start(ssp);
889aacb5d91SPaul E. McKenney 		spin_unlock_irq_rcu_node(ssp);
890aacb5d91SPaul E. McKenney 		srcu_reschedule(ssp, 0);
891da915ad5SPaul E. McKenney 	} else {
892aacb5d91SPaul E. McKenney 		spin_unlock_irq_rcu_node(ssp);
893da915ad5SPaul E. McKenney 	}
894e2f63836SPaul E. McKenney 
895e2f63836SPaul E. McKenney 	/* Transition to big if needed. */
896e2f63836SPaul E. McKenney 	if (ss_state != SRCU_SIZE_SMALL && ss_state != SRCU_SIZE_BIG) {
897e2f63836SPaul E. McKenney 		if (ss_state == SRCU_SIZE_ALLOC)
898c69a00a1SPaul E. McKenney 			init_srcu_struct_nodes(ssp, GFP_KERNEL);
899e2f63836SPaul E. McKenney 		else
900e2f63836SPaul E. McKenney 			smp_store_release(&ssp->srcu_size_state, ss_state + 1);
901e2f63836SPaul E. McKenney 	}
902da915ad5SPaul E. McKenney }
903da915ad5SPaul E. McKenney 
904da915ad5SPaul E. McKenney /*
9051e9a038bSPaul E. McKenney  * Funnel-locking scheme to scalably mediate many concurrent expedited
9061e9a038bSPaul E. McKenney  * grace-period requests.  This function is invoked for the first known
9071e9a038bSPaul E. McKenney  * expedited request for a grace period that has already been requested,
9081e9a038bSPaul E. McKenney  * but without expediting.  To start a completely new grace period,
9091e9a038bSPaul E. McKenney  * whether expedited or not, use srcu_funnel_gp_start() instead.
9101e9a038bSPaul E. McKenney  */
911aacb5d91SPaul E. McKenney static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp,
9121e9a038bSPaul E. McKenney 				  unsigned long s)
9131e9a038bSPaul E. McKenney {
9141e9a038bSPaul E. McKenney 	unsigned long flags;
915cbdc98e9SPaul E. McKenney 	unsigned long sgsne;
9161e9a038bSPaul E. McKenney 
917994f7068SPaul E. McKenney 	if (snp)
9181e9a038bSPaul E. McKenney 		for (; snp != NULL; snp = snp->srcu_parent) {
919cbdc98e9SPaul E. McKenney 			sgsne = READ_ONCE(snp->srcu_gp_seq_needed_exp);
920aacb5d91SPaul E. McKenney 			if (rcu_seq_done(&ssp->srcu_gp_seq, s) ||
921cbdc98e9SPaul E. McKenney 			    (!srcu_invl_snp_seq(sgsne) && ULONG_CMP_GE(sgsne, s)))
9221e9a038bSPaul E. McKenney 				return;
923d6331980SPaul E. McKenney 			spin_lock_irqsave_rcu_node(snp, flags);
924cbdc98e9SPaul E. McKenney 			sgsne = snp->srcu_gp_seq_needed_exp;
925cbdc98e9SPaul E. McKenney 			if (!srcu_invl_snp_seq(sgsne) && ULONG_CMP_GE(sgsne, s)) {
926d6331980SPaul E. McKenney 				spin_unlock_irqrestore_rcu_node(snp, flags);
9271e9a038bSPaul E. McKenney 				return;
9281e9a038bSPaul E. McKenney 			}
9291e9a038bSPaul E. McKenney 			WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
930d6331980SPaul E. McKenney 			spin_unlock_irqrestore_rcu_node(snp, flags);
9311e9a038bSPaul E. McKenney 		}
9329f2e91d9SPaul E. McKenney 	spin_lock_irqsave_ssp_contention(ssp, &flags);
933aacb5d91SPaul E. McKenney 	if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
9348c9e0cb3SPaul E. McKenney 		WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s);
935aacb5d91SPaul E. McKenney 	spin_unlock_irqrestore_rcu_node(ssp, flags);
9361e9a038bSPaul E. McKenney }
9371e9a038bSPaul E. McKenney 
9381e9a038bSPaul E. McKenney /*
939da915ad5SPaul E. McKenney  * Funnel-locking scheme to scalably mediate many concurrent grace-period
940da915ad5SPaul E. McKenney  * requests.  The winner has to do the work of actually starting grace
941da915ad5SPaul E. McKenney  * period s.  Losers must either ensure that their desired grace-period
942da915ad5SPaul E. McKenney  * number is recorded on at least their leaf srcu_node structure, or they
943da915ad5SPaul E. McKenney  * must take steps to invoke their own callbacks.
94417294ce6SPaul E. McKenney  *
94517294ce6SPaul E. McKenney  * Note that this function also does the work of srcu_funnel_exp_start(),
94617294ce6SPaul E. McKenney  * in some cases by directly invoking it.
947da915ad5SPaul E. McKenney  */
948aacb5d91SPaul E. McKenney static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
9491e9a038bSPaul E. McKenney 				 unsigned long s, bool do_norm)
950da915ad5SPaul E. McKenney {
951da915ad5SPaul E. McKenney 	unsigned long flags;
952da915ad5SPaul E. McKenney 	int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs);
953cbdc98e9SPaul E. McKenney 	unsigned long sgsne;
9547b9e9b58SPaul E. McKenney 	struct srcu_node *snp;
9550b56f953SNeeraj Upadhyay 	struct srcu_node *snp_leaf;
956da915ad5SPaul E. McKenney 	unsigned long snp_seq;
957da915ad5SPaul E. McKenney 
9580b56f953SNeeraj Upadhyay 	/* Ensure that snp node tree is fully initialized before traversing it */
9590b56f953SNeeraj Upadhyay 	if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
9600b56f953SNeeraj Upadhyay 		snp_leaf = NULL;
9610b56f953SNeeraj Upadhyay 	else
9620b56f953SNeeraj Upadhyay 		snp_leaf = sdp->mynode;
9630b56f953SNeeraj Upadhyay 
964994f7068SPaul E. McKenney 	if (snp_leaf)
965da915ad5SPaul E. McKenney 		/* Each pass through the loop does one level of the srcu_node tree. */
9667b9e9b58SPaul E. McKenney 		for (snp = snp_leaf; snp != NULL; snp = snp->srcu_parent) {
9677b9e9b58SPaul E. McKenney 			if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != snp_leaf)
968da915ad5SPaul E. McKenney 				return; /* GP already done and CBs recorded. */
969d6331980SPaul E. McKenney 			spin_lock_irqsave_rcu_node(snp, flags);
970da915ad5SPaul E. McKenney 			snp_seq = snp->srcu_have_cbs[idx];
971cbdc98e9SPaul E. McKenney 			if (!srcu_invl_snp_seq(snp_seq) && ULONG_CMP_GE(snp_seq, s)) {
9727b9e9b58SPaul E. McKenney 				if (snp == snp_leaf && snp_seq == s)
973c7e88067SPaul E. McKenney 					snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
974d6331980SPaul E. McKenney 				spin_unlock_irqrestore_rcu_node(snp, flags);
9757b9e9b58SPaul E. McKenney 				if (snp == snp_leaf && snp_seq != s) {
976aeb9b39bSPaul E. McKenney 					srcu_schedule_cbs_sdp(sdp, do_norm ? SRCU_INTERVAL : 0);
9771e9a038bSPaul E. McKenney 					return;
978da915ad5SPaul E. McKenney 				}
9791e9a038bSPaul E. McKenney 				if (!do_norm)
980aacb5d91SPaul E. McKenney 					srcu_funnel_exp_start(ssp, snp, s);
981da915ad5SPaul E. McKenney 				return;
982da915ad5SPaul E. McKenney 			}
983da915ad5SPaul E. McKenney 			snp->srcu_have_cbs[idx] = s;
9847b9e9b58SPaul E. McKenney 			if (snp == snp_leaf)
985c7e88067SPaul E. McKenney 				snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
986cbdc98e9SPaul E. McKenney 			sgsne = snp->srcu_gp_seq_needed_exp;
987cbdc98e9SPaul E. McKenney 			if (!do_norm && (srcu_invl_snp_seq(sgsne) || ULONG_CMP_LT(sgsne, s)))
9887ff8b450SPaul E. McKenney 				WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
989d6331980SPaul E. McKenney 			spin_unlock_irqrestore_rcu_node(snp, flags);
990da915ad5SPaul E. McKenney 		}
991da915ad5SPaul E. McKenney 
992da915ad5SPaul E. McKenney 	/* Top of tree, must ensure the grace period will be started. */
9939f2e91d9SPaul E. McKenney 	spin_lock_irqsave_ssp_contention(ssp, &flags);
994aacb5d91SPaul E. McKenney 	if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) {
995da915ad5SPaul E. McKenney 		/*
996da915ad5SPaul E. McKenney 		 * Record need for grace period s.  Pair with load
997da915ad5SPaul E. McKenney 		 * acquire setting up for initialization.
998da915ad5SPaul E. McKenney 		 */
999aacb5d91SPaul E. McKenney 		smp_store_release(&ssp->srcu_gp_seq_needed, s); /*^^^*/
1000da915ad5SPaul E. McKenney 	}
1001aacb5d91SPaul E. McKenney 	if (!do_norm && ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
10028c9e0cb3SPaul E. McKenney 		WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s);
1003da915ad5SPaul E. McKenney 
1004da915ad5SPaul E. McKenney 	/* If grace period not already done and none in progress, start it. */
1005aacb5d91SPaul E. McKenney 	if (!rcu_seq_done(&ssp->srcu_gp_seq, s) &&
1006aacb5d91SPaul E. McKenney 	    rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) {
1007aacb5d91SPaul E. McKenney 		WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
1008aacb5d91SPaul E. McKenney 		srcu_gp_start(ssp);
1009ee5e2448SPaul E. McKenney 
1010ee5e2448SPaul E. McKenney 		// And how can that list_add() in the "else" clause
1011ee5e2448SPaul E. McKenney 		// possibly be safe for concurrent execution?  Well,
1012ee5e2448SPaul E. McKenney 		// it isn't.  And it does not have to be.  After all, it
1013ee5e2448SPaul E. McKenney 		// can only be executed during early boot when there is only
1014ee5e2448SPaul E. McKenney 		// the one boot CPU running with interrupts still disabled.
1015e0fcba9aSPaul E. McKenney 		if (likely(srcu_init_done))
1016aacb5d91SPaul E. McKenney 			queue_delayed_work(rcu_gp_wq, &ssp->work,
1017282d8998SPaul E. McKenney 					   !!srcu_get_delay(ssp));
1018aacb5d91SPaul E. McKenney 		else if (list_empty(&ssp->work.work.entry))
1019aacb5d91SPaul E. McKenney 			list_add(&ssp->work.work.entry, &srcu_boot_list);
1020da915ad5SPaul E. McKenney 	}
1021aacb5d91SPaul E. McKenney 	spin_unlock_irqrestore_rcu_node(ssp, flags);
1022da915ad5SPaul E. McKenney }
1023da915ad5SPaul E. McKenney 
1024da915ad5SPaul E. McKenney /*
1025dad81a20SPaul E. McKenney  * Wait until all readers counted by array index idx complete, but
1026dad81a20SPaul E. McKenney  * loop an additional time if there is an expedited grace period pending.
1027da915ad5SPaul E. McKenney  * The caller must ensure that ->srcu_idx is not changed while checking.
1028dad81a20SPaul E. McKenney  */
1029aacb5d91SPaul E. McKenney static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount)
1030dad81a20SPaul E. McKenney {
10314f2bfd94SNeeraj Upadhyay 	unsigned long curdelay;
10324f2bfd94SNeeraj Upadhyay 
10334f2bfd94SNeeraj Upadhyay 	curdelay = !srcu_get_delay(ssp);
10344f2bfd94SNeeraj Upadhyay 
1035dad81a20SPaul E. McKenney 	for (;;) {
1036aacb5d91SPaul E. McKenney 		if (srcu_readers_active_idx_check(ssp, idx))
1037dad81a20SPaul E. McKenney 			return true;
10384f2bfd94SNeeraj Upadhyay 		if ((--trycount + curdelay) <= 0)
1039dad81a20SPaul E. McKenney 			return false;
10404f2bfd94SNeeraj Upadhyay 		udelay(srcu_retry_check_delay);
1041dad81a20SPaul E. McKenney 	}
1042dad81a20SPaul E. McKenney }
1043dad81a20SPaul E. McKenney 
1044dad81a20SPaul E. McKenney /*
1045da915ad5SPaul E. McKenney  * Increment the ->srcu_idx counter so that future SRCU readers will
1046da915ad5SPaul E. McKenney  * use the other rank of the ->srcu_(un)lock_count[] arrays.  This allows
1047dad81a20SPaul E. McKenney  * us to wait for pre-existing readers in a starvation-free manner.
1048dad81a20SPaul E. McKenney  */
1049aacb5d91SPaul E. McKenney static void srcu_flip(struct srcu_struct *ssp)
1050dad81a20SPaul E. McKenney {
1051881ec9d2SPaul E. McKenney 	/*
1052881ec9d2SPaul E. McKenney 	 * Ensure that if this updater saw a given reader's increment
1053881ec9d2SPaul E. McKenney 	 * from __srcu_read_lock(), that reader was using an old value
1054881ec9d2SPaul E. McKenney 	 * of ->srcu_idx.  Also ensure that if a given reader sees the
1055881ec9d2SPaul E. McKenney 	 * new value of ->srcu_idx, this updater's earlier scans cannot
1056881ec9d2SPaul E. McKenney 	 * have seen that reader's increments (which is OK, because this
1057881ec9d2SPaul E. McKenney 	 * grace period need not wait on that reader).
1058881ec9d2SPaul E. McKenney 	 */
1059881ec9d2SPaul E. McKenney 	smp_mb(); /* E */  /* Pairs with B and C. */
1060881ec9d2SPaul E. McKenney 
1061aacb5d91SPaul E. McKenney 	WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
1062dad81a20SPaul E. McKenney 
1063dad81a20SPaul E. McKenney 	/*
1064dad81a20SPaul E. McKenney 	 * Ensure that if the updater misses an __srcu_read_unlock()
1065dad81a20SPaul E. McKenney 	 * increment, that task's next __srcu_read_lock() will see the
1066dad81a20SPaul E. McKenney 	 * above counter update.  Note that both this memory barrier
1067dad81a20SPaul E. McKenney 	 * and the one in srcu_readers_active_idx_check() provide the
1068dad81a20SPaul E. McKenney 	 * guarantee for __srcu_read_lock().
1069dad81a20SPaul E. McKenney 	 */
1070dad81a20SPaul E. McKenney 	smp_mb(); /* D */  /* Pairs with C. */
1071dad81a20SPaul E. McKenney }
1072dad81a20SPaul E. McKenney 
1073dad81a20SPaul E. McKenney /*
10742da4b2a7SPaul E. McKenney  * If SRCU is likely idle, return true, otherwise return false.
10752da4b2a7SPaul E. McKenney  *
10762da4b2a7SPaul E. McKenney  * Note that it is OK for several current from-idle requests for a new
10772da4b2a7SPaul E. McKenney  * grace period from idle to specify expediting because they will all end
10782da4b2a7SPaul E. McKenney  * up requesting the same grace period anyhow.  So no loss.
10792da4b2a7SPaul E. McKenney  *
10802da4b2a7SPaul E. McKenney  * Note also that if any CPU (including the current one) is still invoking
10812da4b2a7SPaul E. McKenney  * callbacks, this function will nevertheless say "idle".  This is not
10822da4b2a7SPaul E. McKenney  * ideal, but the overhead of checking all CPUs' callback lists is even
10832da4b2a7SPaul E. McKenney  * less ideal, especially on large systems.  Furthermore, the wakeup
10842da4b2a7SPaul E. McKenney  * can happen before the callback is fully removed, so we have no choice
10852da4b2a7SPaul E. McKenney  * but to accept this type of error.
10862da4b2a7SPaul E. McKenney  *
10872da4b2a7SPaul E. McKenney  * This function is also subject to counter-wrap errors, but let's face
10882da4b2a7SPaul E. McKenney  * it, if this function was preempted for enough time for the counters
10892da4b2a7SPaul E. McKenney  * to wrap, it really doesn't matter whether or not we expedite the grace
10902da4b2a7SPaul E. McKenney  * period.  The extra overhead of a needlessly expedited grace period is
10917fef6cffSEthon Paul  * negligible when amortized over that time period, and the extra latency
10922da4b2a7SPaul E. McKenney  * of a needlessly non-expedited grace period is similarly negligible.
10932da4b2a7SPaul E. McKenney  */
1094aacb5d91SPaul E. McKenney static bool srcu_might_be_idle(struct srcu_struct *ssp)
10952da4b2a7SPaul E. McKenney {
109622607d66SPaul E. McKenney 	unsigned long curseq;
10972da4b2a7SPaul E. McKenney 	unsigned long flags;
10982da4b2a7SPaul E. McKenney 	struct srcu_data *sdp;
109922607d66SPaul E. McKenney 	unsigned long t;
1100844a378dSPaul E. McKenney 	unsigned long tlast;
11012da4b2a7SPaul E. McKenney 
1102bde50d8fSSebastian Andrzej Siewior 	check_init_srcu_struct(ssp);
11032da4b2a7SPaul E. McKenney 	/* If the local srcu_data structure has callbacks, not idle.  */
1104bde50d8fSSebastian Andrzej Siewior 	sdp = raw_cpu_ptr(ssp->sda);
1105bde50d8fSSebastian Andrzej Siewior 	spin_lock_irqsave_rcu_node(sdp, flags);
11062da4b2a7SPaul E. McKenney 	if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
1107bde50d8fSSebastian Andrzej Siewior 		spin_unlock_irqrestore_rcu_node(sdp, flags);
11082da4b2a7SPaul E. McKenney 		return false; /* Callbacks already present, so not idle. */
11092da4b2a7SPaul E. McKenney 	}
1110bde50d8fSSebastian Andrzej Siewior 	spin_unlock_irqrestore_rcu_node(sdp, flags);
11112da4b2a7SPaul E. McKenney 
11122da4b2a7SPaul E. McKenney 	/*
1113a616aec9SIngo Molnar 	 * No local callbacks, so probabilistically probe global state.
11142da4b2a7SPaul E. McKenney 	 * Exact information would require acquiring locks, which would
1115a616aec9SIngo Molnar 	 * kill scalability, hence the probabilistic nature of the probe.
11162da4b2a7SPaul E. McKenney 	 */
111722607d66SPaul E. McKenney 
111822607d66SPaul E. McKenney 	/* First, see if enough time has passed since the last GP. */
111922607d66SPaul E. McKenney 	t = ktime_get_mono_fast_ns();
1120844a378dSPaul E. McKenney 	tlast = READ_ONCE(ssp->srcu_last_gp_end);
112122607d66SPaul E. McKenney 	if (exp_holdoff == 0 ||
1122844a378dSPaul E. McKenney 	    time_in_range_open(t, tlast, tlast + exp_holdoff))
112322607d66SPaul E. McKenney 		return false; /* Too soon after last GP. */
112422607d66SPaul E. McKenney 
112522607d66SPaul E. McKenney 	/* Next, check for probable idleness. */
1126aacb5d91SPaul E. McKenney 	curseq = rcu_seq_current(&ssp->srcu_gp_seq);
11272da4b2a7SPaul E. McKenney 	smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */
1128aacb5d91SPaul E. McKenney 	if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_gp_seq_needed)))
11292da4b2a7SPaul E. McKenney 		return false; /* Grace period in progress, so not idle. */
11302da4b2a7SPaul E. McKenney 	smp_mb(); /* Order ->srcu_gp_seq with prior access. */
1131aacb5d91SPaul E. McKenney 	if (curseq != rcu_seq_current(&ssp->srcu_gp_seq))
11322da4b2a7SPaul E. McKenney 		return false; /* GP # changed, so not idle. */
11332da4b2a7SPaul E. McKenney 	return true; /* With reasonable probability, idle! */
11342da4b2a7SPaul E. McKenney }
11352da4b2a7SPaul E. McKenney 
11362da4b2a7SPaul E. McKenney /*
1137a602538eSPaul E. McKenney  * SRCU callback function to leak a callback.
1138a602538eSPaul E. McKenney  */
1139a602538eSPaul E. McKenney static void srcu_leak_callback(struct rcu_head *rhp)
1140a602538eSPaul E. McKenney {
1141a602538eSPaul E. McKenney }
1142a602538eSPaul E. McKenney 
1143a602538eSPaul E. McKenney /*
114429d2bb94SPaul E. McKenney  * Start an SRCU grace period, and also queue the callback if non-NULL.
114529d2bb94SPaul E. McKenney  */
11465358c9faSPaul E. McKenney static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
11475358c9faSPaul E. McKenney 					     struct rcu_head *rhp, bool do_norm)
114829d2bb94SPaul E. McKenney {
114929d2bb94SPaul E. McKenney 	unsigned long flags;
115029d2bb94SPaul E. McKenney 	int idx;
115129d2bb94SPaul E. McKenney 	bool needexp = false;
115229d2bb94SPaul E. McKenney 	bool needgp = false;
115329d2bb94SPaul E. McKenney 	unsigned long s;
115429d2bb94SPaul E. McKenney 	struct srcu_data *sdp;
11550b56f953SNeeraj Upadhyay 	struct srcu_node *sdp_mynode;
11560b56f953SNeeraj Upadhyay 	int ss_state;
115729d2bb94SPaul E. McKenney 
11585358c9faSPaul E. McKenney 	check_init_srcu_struct(ssp);
115927120e7dSPaul E. McKenney 	idx = __srcu_read_lock_nmisafe(ssp, false);
11600b56f953SNeeraj Upadhyay 	ss_state = smp_load_acquire(&ssp->srcu_size_state);
11610b56f953SNeeraj Upadhyay 	if (ss_state < SRCU_SIZE_WAIT_CALL)
1162994f7068SPaul E. McKenney 		sdp = per_cpu_ptr(ssp->sda, 0);
1163994f7068SPaul E. McKenney 	else
116429d2bb94SPaul E. McKenney 		sdp = raw_cpu_ptr(ssp->sda);
1165c2445d38SPaul E. McKenney 	spin_lock_irqsave_sdp_contention(sdp, &flags);
11665358c9faSPaul E. McKenney 	if (rhp)
116729d2bb94SPaul E. McKenney 		rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
116829d2bb94SPaul E. McKenney 	rcu_segcblist_advance(&sdp->srcu_cblist,
116929d2bb94SPaul E. McKenney 			      rcu_seq_current(&ssp->srcu_gp_seq));
117029d2bb94SPaul E. McKenney 	s = rcu_seq_snap(&ssp->srcu_gp_seq);
117129d2bb94SPaul E. McKenney 	(void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
117229d2bb94SPaul E. McKenney 	if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
117329d2bb94SPaul E. McKenney 		sdp->srcu_gp_seq_needed = s;
117429d2bb94SPaul E. McKenney 		needgp = true;
117529d2bb94SPaul E. McKenney 	}
117629d2bb94SPaul E. McKenney 	if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
117729d2bb94SPaul E. McKenney 		sdp->srcu_gp_seq_needed_exp = s;
117829d2bb94SPaul E. McKenney 		needexp = true;
117929d2bb94SPaul E. McKenney 	}
118029d2bb94SPaul E. McKenney 	spin_unlock_irqrestore_rcu_node(sdp, flags);
11810b56f953SNeeraj Upadhyay 
11820b56f953SNeeraj Upadhyay 	/* Ensure that snp node tree is fully initialized before traversing it */
11830b56f953SNeeraj Upadhyay 	if (ss_state < SRCU_SIZE_WAIT_BARRIER)
11840b56f953SNeeraj Upadhyay 		sdp_mynode = NULL;
11850b56f953SNeeraj Upadhyay 	else
11860b56f953SNeeraj Upadhyay 		sdp_mynode = sdp->mynode;
11870b56f953SNeeraj Upadhyay 
118829d2bb94SPaul E. McKenney 	if (needgp)
118929d2bb94SPaul E. McKenney 		srcu_funnel_gp_start(ssp, sdp, s, do_norm);
119029d2bb94SPaul E. McKenney 	else if (needexp)
11910b56f953SNeeraj Upadhyay 		srcu_funnel_exp_start(ssp, sdp_mynode, s);
119227120e7dSPaul E. McKenney 	__srcu_read_unlock_nmisafe(ssp, idx, false);
11935358c9faSPaul E. McKenney 	return s;
119429d2bb94SPaul E. McKenney }
119529d2bb94SPaul E. McKenney 
119629d2bb94SPaul E. McKenney /*
1197da915ad5SPaul E. McKenney  * Enqueue an SRCU callback on the srcu_data structure associated with
1198da915ad5SPaul E. McKenney  * the current CPU and the specified srcu_struct structure, initiating
1199da915ad5SPaul E. McKenney  * grace-period processing if it is not already running.
1200dad81a20SPaul E. McKenney  *
1201dad81a20SPaul E. McKenney  * Note that all CPUs must agree that the grace period extended beyond
1202dad81a20SPaul E. McKenney  * all pre-existing SRCU read-side critical section.  On systems with
1203dad81a20SPaul E. McKenney  * more than one CPU, this means that when "func()" is invoked, each CPU
1204dad81a20SPaul E. McKenney  * is guaranteed to have executed a full memory barrier since the end of
1205dad81a20SPaul E. McKenney  * its last corresponding SRCU read-side critical section whose beginning
12065ef98a63SPaul E. McKenney  * preceded the call to call_srcu().  It also means that each CPU executing
1207dad81a20SPaul E. McKenney  * an SRCU read-side critical section that continues beyond the start of
12085ef98a63SPaul E. McKenney  * "func()" must have executed a memory barrier after the call_srcu()
1209dad81a20SPaul E. McKenney  * but before the beginning of that SRCU read-side critical section.
1210dad81a20SPaul E. McKenney  * Note that these guarantees include CPUs that are offline, idle, or
1211dad81a20SPaul E. McKenney  * executing in user mode, as well as CPUs that are executing in the kernel.
1212dad81a20SPaul E. McKenney  *
12135ef98a63SPaul E. McKenney  * Furthermore, if CPU A invoked call_srcu() and CPU B invoked the
1214dad81a20SPaul E. McKenney  * resulting SRCU callback function "func()", then both CPU A and CPU
1215dad81a20SPaul E. McKenney  * B are guaranteed to execute a full memory barrier during the time
12165ef98a63SPaul E. McKenney  * interval between the call to call_srcu() and the invocation of "func()".
1217dad81a20SPaul E. McKenney  * This guarantee applies even if CPU A and CPU B are the same CPU (but
1218dad81a20SPaul E. McKenney  * again only if the system has more than one CPU).
1219dad81a20SPaul E. McKenney  *
1220dad81a20SPaul E. McKenney  * Of course, these guarantees apply only for invocations of call_srcu(),
1221dad81a20SPaul E. McKenney  * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
1222dad81a20SPaul E. McKenney  * srcu_struct structure.
1223dad81a20SPaul E. McKenney  */
122411b00045SJiang Biao static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
12251e9a038bSPaul E. McKenney 			rcu_callback_t func, bool do_norm)
1226dad81a20SPaul E. McKenney {
1227a602538eSPaul E. McKenney 	if (debug_rcu_head_queue(rhp)) {
1228a602538eSPaul E. McKenney 		/* Probable double call_srcu(), so leak the callback. */
1229a602538eSPaul E. McKenney 		WRITE_ONCE(rhp->func, srcu_leak_callback);
1230a602538eSPaul E. McKenney 		WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n");
1231a602538eSPaul E. McKenney 		return;
1232a602538eSPaul E. McKenney 	}
1233da915ad5SPaul E. McKenney 	rhp->func = func;
12345358c9faSPaul E. McKenney 	(void)srcu_gp_start_if_needed(ssp, rhp, do_norm);
12351e9a038bSPaul E. McKenney }
12361e9a038bSPaul E. McKenney 
12375a0465e1SPaul E. McKenney /**
12385a0465e1SPaul E. McKenney  * call_srcu() - Queue a callback for invocation after an SRCU grace period
1239aacb5d91SPaul E. McKenney  * @ssp: srcu_struct in queue the callback
124027fdb35fSPaul E. McKenney  * @rhp: structure to be used for queueing the SRCU callback.
12415a0465e1SPaul E. McKenney  * @func: function to be invoked after the SRCU grace period
12425a0465e1SPaul E. McKenney  *
12435a0465e1SPaul E. McKenney  * The callback function will be invoked some time after a full SRCU
12445a0465e1SPaul E. McKenney  * grace period elapses, in other words after all pre-existing SRCU
12455a0465e1SPaul E. McKenney  * read-side critical sections have completed.  However, the callback
12465a0465e1SPaul E. McKenney  * function might well execute concurrently with other SRCU read-side
12475a0465e1SPaul E. McKenney  * critical sections that started after call_srcu() was invoked.  SRCU
12485a0465e1SPaul E. McKenney  * read-side critical sections are delimited by srcu_read_lock() and
12495a0465e1SPaul E. McKenney  * srcu_read_unlock(), and may be nested.
12505a0465e1SPaul E. McKenney  *
12515a0465e1SPaul E. McKenney  * The callback will be invoked from process context, but must nevertheless
12525a0465e1SPaul E. McKenney  * be fast and must not block.
12535a0465e1SPaul E. McKenney  */
1254aacb5d91SPaul E. McKenney void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
12551e9a038bSPaul E. McKenney 	       rcu_callback_t func)
12561e9a038bSPaul E. McKenney {
1257aacb5d91SPaul E. McKenney 	__call_srcu(ssp, rhp, func, true);
1258dad81a20SPaul E. McKenney }
1259dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(call_srcu);
1260dad81a20SPaul E. McKenney 
1261dad81a20SPaul E. McKenney /*
1262dad81a20SPaul E. McKenney  * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
1263dad81a20SPaul E. McKenney  */
1264aacb5d91SPaul E. McKenney static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm)
1265dad81a20SPaul E. McKenney {
1266dad81a20SPaul E. McKenney 	struct rcu_synchronize rcu;
1267dad81a20SPaul E. McKenney 
1268f505d434SJakub Kicinski 	RCU_LOCKDEP_WARN(lockdep_is_held(ssp) ||
1269dad81a20SPaul E. McKenney 			 lock_is_held(&rcu_bh_lock_map) ||
1270dad81a20SPaul E. McKenney 			 lock_is_held(&rcu_lock_map) ||
1271dad81a20SPaul E. McKenney 			 lock_is_held(&rcu_sched_lock_map),
1272dad81a20SPaul E. McKenney 			 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
1273dad81a20SPaul E. McKenney 
1274dad81a20SPaul E. McKenney 	if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
1275dad81a20SPaul E. McKenney 		return;
1276dad81a20SPaul E. McKenney 	might_sleep();
1277aacb5d91SPaul E. McKenney 	check_init_srcu_struct(ssp);
1278dad81a20SPaul E. McKenney 	init_completion(&rcu.completion);
1279da915ad5SPaul E. McKenney 	init_rcu_head_on_stack(&rcu.head);
1280aacb5d91SPaul E. McKenney 	__call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm);
1281dad81a20SPaul E. McKenney 	wait_for_completion(&rcu.completion);
1282da915ad5SPaul E. McKenney 	destroy_rcu_head_on_stack(&rcu.head);
128335732cf9SPaul E. McKenney 
128435732cf9SPaul E. McKenney 	/*
128535732cf9SPaul E. McKenney 	 * Make sure that later code is ordered after the SRCU grace
1286d6331980SPaul E. McKenney 	 * period.  This pairs with the spin_lock_irq_rcu_node()
128735732cf9SPaul E. McKenney 	 * in srcu_invoke_callbacks().  Unlike Tree RCU, this is needed
128835732cf9SPaul E. McKenney 	 * because the current CPU might have been totally uninvolved with
128935732cf9SPaul E. McKenney 	 * (and thus unordered against) that grace period.
129035732cf9SPaul E. McKenney 	 */
129135732cf9SPaul E. McKenney 	smp_mb();
1292dad81a20SPaul E. McKenney }
1293dad81a20SPaul E. McKenney 
1294dad81a20SPaul E. McKenney /**
1295dad81a20SPaul E. McKenney  * synchronize_srcu_expedited - Brute-force SRCU grace period
1296aacb5d91SPaul E. McKenney  * @ssp: srcu_struct with which to synchronize.
1297dad81a20SPaul E. McKenney  *
1298dad81a20SPaul E. McKenney  * Wait for an SRCU grace period to elapse, but be more aggressive about
1299dad81a20SPaul E. McKenney  * spinning rather than blocking when waiting.
1300dad81a20SPaul E. McKenney  *
1301dad81a20SPaul E. McKenney  * Note that synchronize_srcu_expedited() has the same deadlock and
1302dad81a20SPaul E. McKenney  * memory-ordering properties as does synchronize_srcu().
1303dad81a20SPaul E. McKenney  */
1304aacb5d91SPaul E. McKenney void synchronize_srcu_expedited(struct srcu_struct *ssp)
1305dad81a20SPaul E. McKenney {
1306aacb5d91SPaul E. McKenney 	__synchronize_srcu(ssp, rcu_gp_is_normal());
1307dad81a20SPaul E. McKenney }
1308dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
1309dad81a20SPaul E. McKenney 
1310dad81a20SPaul E. McKenney /**
1311dad81a20SPaul E. McKenney  * synchronize_srcu - wait for prior SRCU read-side critical-section completion
1312aacb5d91SPaul E. McKenney  * @ssp: srcu_struct with which to synchronize.
1313dad81a20SPaul E. McKenney  *
1314dad81a20SPaul E. McKenney  * Wait for the count to drain to zero of both indexes. To avoid the
1315dad81a20SPaul E. McKenney  * possible starvation of synchronize_srcu(), it waits for the count of
1316da915ad5SPaul E. McKenney  * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first,
1317da915ad5SPaul E. McKenney  * and then flip the srcu_idx and wait for the count of the other index.
1318dad81a20SPaul E. McKenney  *
1319dad81a20SPaul E. McKenney  * Can block; must be called from process context.
1320dad81a20SPaul E. McKenney  *
1321dad81a20SPaul E. McKenney  * Note that it is illegal to call synchronize_srcu() from the corresponding
1322dad81a20SPaul E. McKenney  * SRCU read-side critical section; doing so will result in deadlock.
1323dad81a20SPaul E. McKenney  * However, it is perfectly legal to call synchronize_srcu() on one
1324dad81a20SPaul E. McKenney  * srcu_struct from some other srcu_struct's read-side critical section,
1325dad81a20SPaul E. McKenney  * as long as the resulting graph of srcu_structs is acyclic.
1326dad81a20SPaul E. McKenney  *
1327dad81a20SPaul E. McKenney  * There are memory-ordering constraints implied by synchronize_srcu().
1328dad81a20SPaul E. McKenney  * On systems with more than one CPU, when synchronize_srcu() returns,
1329dad81a20SPaul E. McKenney  * each CPU is guaranteed to have executed a full memory barrier since
13306eb95cc4SPaul E. McKenney  * the end of its last corresponding SRCU read-side critical section
1331dad81a20SPaul E. McKenney  * whose beginning preceded the call to synchronize_srcu().  In addition,
1332dad81a20SPaul E. McKenney  * each CPU having an SRCU read-side critical section that extends beyond
1333dad81a20SPaul E. McKenney  * the return from synchronize_srcu() is guaranteed to have executed a
1334dad81a20SPaul E. McKenney  * full memory barrier after the beginning of synchronize_srcu() and before
1335dad81a20SPaul E. McKenney  * the beginning of that SRCU read-side critical section.  Note that these
1336dad81a20SPaul E. McKenney  * guarantees include CPUs that are offline, idle, or executing in user mode,
1337dad81a20SPaul E. McKenney  * as well as CPUs that are executing in the kernel.
1338dad81a20SPaul E. McKenney  *
1339dad81a20SPaul E. McKenney  * Furthermore, if CPU A invoked synchronize_srcu(), which returned
1340dad81a20SPaul E. McKenney  * to its caller on CPU B, then both CPU A and CPU B are guaranteed
1341dad81a20SPaul E. McKenney  * to have executed a full memory barrier during the execution of
1342dad81a20SPaul E. McKenney  * synchronize_srcu().  This guarantee applies even if CPU A and CPU B
1343dad81a20SPaul E. McKenney  * are the same CPU, but again only if the system has more than one CPU.
1344dad81a20SPaul E. McKenney  *
1345dad81a20SPaul E. McKenney  * Of course, these memory-ordering guarantees apply only when
1346dad81a20SPaul E. McKenney  * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are
1347dad81a20SPaul E. McKenney  * passed the same srcu_struct structure.
13482da4b2a7SPaul E. McKenney  *
13493d3a0d1bSPaul E. McKenney  * Implementation of these memory-ordering guarantees is similar to
13503d3a0d1bSPaul E. McKenney  * that of synchronize_rcu().
13513d3a0d1bSPaul E. McKenney  *
13522da4b2a7SPaul E. McKenney  * If SRCU is likely idle, expedite the first request.  This semantic
13532da4b2a7SPaul E. McKenney  * was provided by Classic SRCU, and is relied upon by its users, so TREE
13542da4b2a7SPaul E. McKenney  * SRCU must also provide it.  Note that detecting idleness is heuristic
13552da4b2a7SPaul E. McKenney  * and subject to both false positives and negatives.
1356dad81a20SPaul E. McKenney  */
1357aacb5d91SPaul E. McKenney void synchronize_srcu(struct srcu_struct *ssp)
1358dad81a20SPaul E. McKenney {
1359aacb5d91SPaul E. McKenney 	if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited())
1360aacb5d91SPaul E. McKenney 		synchronize_srcu_expedited(ssp);
1361dad81a20SPaul E. McKenney 	else
1362aacb5d91SPaul E. McKenney 		__synchronize_srcu(ssp, true);
1363dad81a20SPaul E. McKenney }
1364dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(synchronize_srcu);
1365dad81a20SPaul E. McKenney 
13665358c9faSPaul E. McKenney /**
13675358c9faSPaul E. McKenney  * get_state_synchronize_srcu - Provide an end-of-grace-period cookie
13685358c9faSPaul E. McKenney  * @ssp: srcu_struct to provide cookie for.
13695358c9faSPaul E. McKenney  *
13705358c9faSPaul E. McKenney  * This function returns a cookie that can be passed to
13715358c9faSPaul E. McKenney  * poll_state_synchronize_srcu(), which will return true if a full grace
13725358c9faSPaul E. McKenney  * period has elapsed in the meantime.  It is the caller's responsibility
13735358c9faSPaul E. McKenney  * to make sure that grace period happens, for example, by invoking
13745358c9faSPaul E. McKenney  * call_srcu() after return from get_state_synchronize_srcu().
13755358c9faSPaul E. McKenney  */
13765358c9faSPaul E. McKenney unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp)
13775358c9faSPaul E. McKenney {
13785358c9faSPaul E. McKenney 	// Any prior manipulation of SRCU-protected data must happen
13795358c9faSPaul E. McKenney 	// before the load from ->srcu_gp_seq.
13805358c9faSPaul E. McKenney 	smp_mb();
13815358c9faSPaul E. McKenney 	return rcu_seq_snap(&ssp->srcu_gp_seq);
13825358c9faSPaul E. McKenney }
13835358c9faSPaul E. McKenney EXPORT_SYMBOL_GPL(get_state_synchronize_srcu);
13845358c9faSPaul E. McKenney 
13855358c9faSPaul E. McKenney /**
13865358c9faSPaul E. McKenney  * start_poll_synchronize_srcu - Provide cookie and start grace period
13875358c9faSPaul E. McKenney  * @ssp: srcu_struct to provide cookie for.
13885358c9faSPaul E. McKenney  *
13895358c9faSPaul E. McKenney  * This function returns a cookie that can be passed to
13905358c9faSPaul E. McKenney  * poll_state_synchronize_srcu(), which will return true if a full grace
13915358c9faSPaul E. McKenney  * period has elapsed in the meantime.  Unlike get_state_synchronize_srcu(),
13925358c9faSPaul E. McKenney  * this function also ensures that any needed SRCU grace period will be
13935358c9faSPaul E. McKenney  * started.  This convenience does come at a cost in terms of CPU overhead.
13945358c9faSPaul E. McKenney  */
13955358c9faSPaul E. McKenney unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp)
13965358c9faSPaul E. McKenney {
13975358c9faSPaul E. McKenney 	return srcu_gp_start_if_needed(ssp, NULL, true);
13985358c9faSPaul E. McKenney }
13995358c9faSPaul E. McKenney EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu);
14005358c9faSPaul E. McKenney 
14015358c9faSPaul E. McKenney /**
14025358c9faSPaul E. McKenney  * poll_state_synchronize_srcu - Has cookie's grace period ended?
14035358c9faSPaul E. McKenney  * @ssp: srcu_struct to provide cookie for.
14045358c9faSPaul E. McKenney  * @cookie: Return value from get_state_synchronize_srcu() or start_poll_synchronize_srcu().
14055358c9faSPaul E. McKenney  *
14065358c9faSPaul E. McKenney  * This function takes the cookie that was returned from either
14075358c9faSPaul E. McKenney  * get_state_synchronize_srcu() or start_poll_synchronize_srcu(), and
14085358c9faSPaul E. McKenney  * returns @true if an SRCU grace period elapsed since the time that the
14095358c9faSPaul E. McKenney  * cookie was created.
14104e7ccfaeSPaul E. McKenney  *
14114e7ccfaeSPaul E. McKenney  * Because cookies are finite in size, wrapping/overflow is possible.
14124e7ccfaeSPaul E. McKenney  * This is more pronounced on 32-bit systems where cookies are 32 bits,
14134e7ccfaeSPaul E. McKenney  * where in theory wrapping could happen in about 14 hours assuming
14144e7ccfaeSPaul E. McKenney  * 25-microsecond expedited SRCU grace periods.  However, a more likely
14154e7ccfaeSPaul E. McKenney  * overflow lower bound is on the order of 24 days in the case of
14164e7ccfaeSPaul E. McKenney  * one-millisecond SRCU grace periods.  Of course, wrapping in a 64-bit
14174e7ccfaeSPaul E. McKenney  * system requires geologic timespans, as in more than seven million years
14184e7ccfaeSPaul E. McKenney  * even for expedited SRCU grace periods.
14194e7ccfaeSPaul E. McKenney  *
14204e7ccfaeSPaul E. McKenney  * Wrapping/overflow is much more of an issue for CONFIG_SMP=n systems
14214e7ccfaeSPaul E. McKenney  * that also have CONFIG_PREEMPTION=n, which selects Tiny SRCU.  This uses
14224e7ccfaeSPaul E. McKenney  * a 16-bit cookie, which rcutorture routinely wraps in a matter of a
14234e7ccfaeSPaul E. McKenney  * few minutes.  If this proves to be a problem, this counter will be
14244e7ccfaeSPaul E. McKenney  * expanded to the same size as for Tree SRCU.
14255358c9faSPaul E. McKenney  */
14265358c9faSPaul E. McKenney bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)
14275358c9faSPaul E. McKenney {
14285358c9faSPaul E. McKenney 	if (!rcu_seq_done(&ssp->srcu_gp_seq, cookie))
14295358c9faSPaul E. McKenney 		return false;
14305358c9faSPaul E. McKenney 	// Ensure that the end of the SRCU grace period happens before
14315358c9faSPaul E. McKenney 	// any subsequent code that the caller might execute.
14325358c9faSPaul E. McKenney 	smp_mb(); // ^^^
14335358c9faSPaul E. McKenney 	return true;
14345358c9faSPaul E. McKenney }
14355358c9faSPaul E. McKenney EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu);
14365358c9faSPaul E. McKenney 
1437da915ad5SPaul E. McKenney /*
1438da915ad5SPaul E. McKenney  * Callback function for srcu_barrier() use.
1439da915ad5SPaul E. McKenney  */
1440da915ad5SPaul E. McKenney static void srcu_barrier_cb(struct rcu_head *rhp)
1441da915ad5SPaul E. McKenney {
1442da915ad5SPaul E. McKenney 	struct srcu_data *sdp;
1443aacb5d91SPaul E. McKenney 	struct srcu_struct *ssp;
1444da915ad5SPaul E. McKenney 
1445da915ad5SPaul E. McKenney 	sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
1446aacb5d91SPaul E. McKenney 	ssp = sdp->ssp;
1447aacb5d91SPaul E. McKenney 	if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
1448aacb5d91SPaul E. McKenney 		complete(&ssp->srcu_barrier_completion);
1449da915ad5SPaul E. McKenney }
1450da915ad5SPaul E. McKenney 
1451994f7068SPaul E. McKenney /*
1452994f7068SPaul E. McKenney  * Enqueue an srcu_barrier() callback on the specified srcu_data
1453994f7068SPaul E. McKenney  * structure's ->cblist.  but only if that ->cblist already has at least one
1454994f7068SPaul E. McKenney  * callback enqueued.  Note that if a CPU already has callbacks enqueue,
1455994f7068SPaul E. McKenney  * it must have already registered the need for a future grace period,
1456994f7068SPaul E. McKenney  * so all we need do is enqueue a callback that will use the same grace
1457994f7068SPaul E. McKenney  * period as the last callback already in the queue.
1458994f7068SPaul E. McKenney  */
1459994f7068SPaul E. McKenney static void srcu_barrier_one_cpu(struct srcu_struct *ssp, struct srcu_data *sdp)
1460994f7068SPaul E. McKenney {
1461994f7068SPaul E. McKenney 	spin_lock_irq_rcu_node(sdp);
1462994f7068SPaul E. McKenney 	atomic_inc(&ssp->srcu_barrier_cpu_cnt);
1463994f7068SPaul E. McKenney 	sdp->srcu_barrier_head.func = srcu_barrier_cb;
1464994f7068SPaul E. McKenney 	debug_rcu_head_queue(&sdp->srcu_barrier_head);
1465994f7068SPaul E. McKenney 	if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
1466994f7068SPaul E. McKenney 				   &sdp->srcu_barrier_head)) {
1467994f7068SPaul E. McKenney 		debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
1468994f7068SPaul E. McKenney 		atomic_dec(&ssp->srcu_barrier_cpu_cnt);
1469994f7068SPaul E. McKenney 	}
1470994f7068SPaul E. McKenney 	spin_unlock_irq_rcu_node(sdp);
1471994f7068SPaul E. McKenney }
1472994f7068SPaul E. McKenney 
1473dad81a20SPaul E. McKenney /**
1474dad81a20SPaul E. McKenney  * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
1475aacb5d91SPaul E. McKenney  * @ssp: srcu_struct on which to wait for in-flight callbacks.
1476dad81a20SPaul E. McKenney  */
1477aacb5d91SPaul E. McKenney void srcu_barrier(struct srcu_struct *ssp)
1478dad81a20SPaul E. McKenney {
1479da915ad5SPaul E. McKenney 	int cpu;
1480e2f63836SPaul E. McKenney 	int idx;
1481aacb5d91SPaul E. McKenney 	unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq);
1482da915ad5SPaul E. McKenney 
1483aacb5d91SPaul E. McKenney 	check_init_srcu_struct(ssp);
1484aacb5d91SPaul E. McKenney 	mutex_lock(&ssp->srcu_barrier_mutex);
1485aacb5d91SPaul E. McKenney 	if (rcu_seq_done(&ssp->srcu_barrier_seq, s)) {
1486da915ad5SPaul E. McKenney 		smp_mb(); /* Force ordering following return. */
1487aacb5d91SPaul E. McKenney 		mutex_unlock(&ssp->srcu_barrier_mutex);
1488da915ad5SPaul E. McKenney 		return; /* Someone else did our work for us. */
1489da915ad5SPaul E. McKenney 	}
1490aacb5d91SPaul E. McKenney 	rcu_seq_start(&ssp->srcu_barrier_seq);
1491aacb5d91SPaul E. McKenney 	init_completion(&ssp->srcu_barrier_completion);
1492da915ad5SPaul E. McKenney 
1493da915ad5SPaul E. McKenney 	/* Initial count prevents reaching zero until all CBs are posted. */
1494aacb5d91SPaul E. McKenney 	atomic_set(&ssp->srcu_barrier_cpu_cnt, 1);
1495da915ad5SPaul E. McKenney 
149627120e7dSPaul E. McKenney 	idx = __srcu_read_lock_nmisafe(ssp, false);
1497994f7068SPaul E. McKenney 	if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
1498994f7068SPaul E. McKenney 		srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, 0));
1499994f7068SPaul E. McKenney 	else
1500994f7068SPaul E. McKenney 		for_each_possible_cpu(cpu)
1501994f7068SPaul E. McKenney 			srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, cpu));
150227120e7dSPaul E. McKenney 	__srcu_read_unlock_nmisafe(ssp, idx, false);
1503da915ad5SPaul E. McKenney 
1504da915ad5SPaul E. McKenney 	/* Remove the initial count, at which point reaching zero can happen. */
1505aacb5d91SPaul E. McKenney 	if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
1506aacb5d91SPaul E. McKenney 		complete(&ssp->srcu_barrier_completion);
1507aacb5d91SPaul E. McKenney 	wait_for_completion(&ssp->srcu_barrier_completion);
1508da915ad5SPaul E. McKenney 
1509aacb5d91SPaul E. McKenney 	rcu_seq_end(&ssp->srcu_barrier_seq);
1510aacb5d91SPaul E. McKenney 	mutex_unlock(&ssp->srcu_barrier_mutex);
1511dad81a20SPaul E. McKenney }
1512dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(srcu_barrier);
1513dad81a20SPaul E. McKenney 
1514dad81a20SPaul E. McKenney /**
1515dad81a20SPaul E. McKenney  * srcu_batches_completed - return batches completed.
1516aacb5d91SPaul E. McKenney  * @ssp: srcu_struct on which to report batch completion.
1517dad81a20SPaul E. McKenney  *
1518dad81a20SPaul E. McKenney  * Report the number of batches, correlated with, but not necessarily
1519dad81a20SPaul E. McKenney  * precisely the same as, the number of grace periods that have elapsed.
1520dad81a20SPaul E. McKenney  */
1521aacb5d91SPaul E. McKenney unsigned long srcu_batches_completed(struct srcu_struct *ssp)
1522dad81a20SPaul E. McKenney {
152339f91504SPaul E. McKenney 	return READ_ONCE(ssp->srcu_idx);
1524dad81a20SPaul E. McKenney }
1525dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(srcu_batches_completed);
1526dad81a20SPaul E. McKenney 
1527dad81a20SPaul E. McKenney /*
1528da915ad5SPaul E. McKenney  * Core SRCU state machine.  Push state bits of ->srcu_gp_seq
1529da915ad5SPaul E. McKenney  * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has
1530da915ad5SPaul E. McKenney  * completed in that state.
1531dad81a20SPaul E. McKenney  */
1532aacb5d91SPaul E. McKenney static void srcu_advance_state(struct srcu_struct *ssp)
1533dad81a20SPaul E. McKenney {
1534dad81a20SPaul E. McKenney 	int idx;
1535dad81a20SPaul E. McKenney 
1536aacb5d91SPaul E. McKenney 	mutex_lock(&ssp->srcu_gp_mutex);
1537da915ad5SPaul E. McKenney 
1538dad81a20SPaul E. McKenney 	/*
1539dad81a20SPaul E. McKenney 	 * Because readers might be delayed for an extended period after
1540da915ad5SPaul E. McKenney 	 * fetching ->srcu_idx for their index, at any point in time there
1541dad81a20SPaul E. McKenney 	 * might well be readers using both idx=0 and idx=1.  We therefore
1542dad81a20SPaul E. McKenney 	 * need to wait for readers to clear from both index values before
1543dad81a20SPaul E. McKenney 	 * invoking a callback.
1544dad81a20SPaul E. McKenney 	 *
1545dad81a20SPaul E. McKenney 	 * The load-acquire ensures that we see the accesses performed
1546dad81a20SPaul E. McKenney 	 * by the prior grace period.
1547dad81a20SPaul E. McKenney 	 */
1548aacb5d91SPaul E. McKenney 	idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq)); /* ^^^ */
1549dad81a20SPaul E. McKenney 	if (idx == SRCU_STATE_IDLE) {
1550aacb5d91SPaul E. McKenney 		spin_lock_irq_rcu_node(ssp);
1551aacb5d91SPaul E. McKenney 		if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
1552aacb5d91SPaul E. McKenney 			WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq));
1553aacb5d91SPaul E. McKenney 			spin_unlock_irq_rcu_node(ssp);
1554aacb5d91SPaul E. McKenney 			mutex_unlock(&ssp->srcu_gp_mutex);
1555dad81a20SPaul E. McKenney 			return;
1556dad81a20SPaul E. McKenney 		}
1557aacb5d91SPaul E. McKenney 		idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq));
1558dad81a20SPaul E. McKenney 		if (idx == SRCU_STATE_IDLE)
1559aacb5d91SPaul E. McKenney 			srcu_gp_start(ssp);
1560aacb5d91SPaul E. McKenney 		spin_unlock_irq_rcu_node(ssp);
1561da915ad5SPaul E. McKenney 		if (idx != SRCU_STATE_IDLE) {
1562aacb5d91SPaul E. McKenney 			mutex_unlock(&ssp->srcu_gp_mutex);
1563dad81a20SPaul E. McKenney 			return; /* Someone else started the grace period. */
1564dad81a20SPaul E. McKenney 		}
1565da915ad5SPaul E. McKenney 	}
1566dad81a20SPaul E. McKenney 
1567aacb5d91SPaul E. McKenney 	if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
1568aacb5d91SPaul E. McKenney 		idx = 1 ^ (ssp->srcu_idx & 1);
1569aacb5d91SPaul E. McKenney 		if (!try_check_zero(ssp, idx, 1)) {
1570aacb5d91SPaul E. McKenney 			mutex_unlock(&ssp->srcu_gp_mutex);
1571dad81a20SPaul E. McKenney 			return; /* readers present, retry later. */
1572da915ad5SPaul E. McKenney 		}
1573aacb5d91SPaul E. McKenney 		srcu_flip(ssp);
157471042606SPaul E. McKenney 		spin_lock_irq_rcu_node(ssp);
1575aacb5d91SPaul E. McKenney 		rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2);
1576282d8998SPaul E. McKenney 		ssp->srcu_n_exp_nodelay = 0;
157771042606SPaul E. McKenney 		spin_unlock_irq_rcu_node(ssp);
1578dad81a20SPaul E. McKenney 	}
1579dad81a20SPaul E. McKenney 
1580aacb5d91SPaul E. McKenney 	if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
1581dad81a20SPaul E. McKenney 
1582dad81a20SPaul E. McKenney 		/*
1583dad81a20SPaul E. McKenney 		 * SRCU read-side critical sections are normally short,
1584dad81a20SPaul E. McKenney 		 * so check at least twice in quick succession after a flip.
1585dad81a20SPaul E. McKenney 		 */
1586aacb5d91SPaul E. McKenney 		idx = 1 ^ (ssp->srcu_idx & 1);
1587aacb5d91SPaul E. McKenney 		if (!try_check_zero(ssp, idx, 2)) {
1588aacb5d91SPaul E. McKenney 			mutex_unlock(&ssp->srcu_gp_mutex);
1589da915ad5SPaul E. McKenney 			return; /* readers present, retry later. */
1590da915ad5SPaul E. McKenney 		}
1591282d8998SPaul E. McKenney 		ssp->srcu_n_exp_nodelay = 0;
1592aacb5d91SPaul E. McKenney 		srcu_gp_end(ssp);  /* Releases ->srcu_gp_mutex. */
1593dad81a20SPaul E. McKenney 	}
1594dad81a20SPaul E. McKenney }
1595dad81a20SPaul E. McKenney 
1596dad81a20SPaul E. McKenney /*
1597dad81a20SPaul E. McKenney  * Invoke a limited number of SRCU callbacks that have passed through
1598dad81a20SPaul E. McKenney  * their grace period.  If there are more to do, SRCU will reschedule
1599dad81a20SPaul E. McKenney  * the workqueue.  Note that needed memory barriers have been executed
1600dad81a20SPaul E. McKenney  * in this task's context by srcu_readers_active_idx_check().
1601dad81a20SPaul E. McKenney  */
1602da915ad5SPaul E. McKenney static void srcu_invoke_callbacks(struct work_struct *work)
1603dad81a20SPaul E. McKenney {
1604ae5c2341SJoel Fernandes (Google) 	long len;
1605da915ad5SPaul E. McKenney 	bool more;
1606dad81a20SPaul E. McKenney 	struct rcu_cblist ready_cbs;
1607dad81a20SPaul E. McKenney 	struct rcu_head *rhp;
1608da915ad5SPaul E. McKenney 	struct srcu_data *sdp;
1609aacb5d91SPaul E. McKenney 	struct srcu_struct *ssp;
1610dad81a20SPaul E. McKenney 
1611e81baf4cSSebastian Andrzej Siewior 	sdp = container_of(work, struct srcu_data, work);
1612e81baf4cSSebastian Andrzej Siewior 
1613aacb5d91SPaul E. McKenney 	ssp = sdp->ssp;
1614dad81a20SPaul E. McKenney 	rcu_cblist_init(&ready_cbs);
1615d6331980SPaul E. McKenney 	spin_lock_irq_rcu_node(sdp);
1616da915ad5SPaul E. McKenney 	rcu_segcblist_advance(&sdp->srcu_cblist,
1617aacb5d91SPaul E. McKenney 			      rcu_seq_current(&ssp->srcu_gp_seq));
1618da915ad5SPaul E. McKenney 	if (sdp->srcu_cblist_invoking ||
1619da915ad5SPaul E. McKenney 	    !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
1620d6331980SPaul E. McKenney 		spin_unlock_irq_rcu_node(sdp);
1621da915ad5SPaul E. McKenney 		return;  /* Someone else on the job or nothing to do. */
1622da915ad5SPaul E. McKenney 	}
1623da915ad5SPaul E. McKenney 
1624da915ad5SPaul E. McKenney 	/* We are on the job!  Extract and invoke ready callbacks. */
1625da915ad5SPaul E. McKenney 	sdp->srcu_cblist_invoking = true;
1626da915ad5SPaul E. McKenney 	rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
1627ae5c2341SJoel Fernandes (Google) 	len = ready_cbs.len;
1628d6331980SPaul E. McKenney 	spin_unlock_irq_rcu_node(sdp);
1629dad81a20SPaul E. McKenney 	rhp = rcu_cblist_dequeue(&ready_cbs);
1630dad81a20SPaul E. McKenney 	for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
1631a602538eSPaul E. McKenney 		debug_rcu_head_unqueue(rhp);
1632dad81a20SPaul E. McKenney 		local_bh_disable();
1633dad81a20SPaul E. McKenney 		rhp->func(rhp);
1634dad81a20SPaul E. McKenney 		local_bh_enable();
1635dad81a20SPaul E. McKenney 	}
1636ae5c2341SJoel Fernandes (Google) 	WARN_ON_ONCE(ready_cbs.len);
1637da915ad5SPaul E. McKenney 
1638da915ad5SPaul E. McKenney 	/*
1639da915ad5SPaul E. McKenney 	 * Update counts, accelerate new callbacks, and if needed,
1640da915ad5SPaul E. McKenney 	 * schedule another round of callback invocation.
1641da915ad5SPaul E. McKenney 	 */
1642d6331980SPaul E. McKenney 	spin_lock_irq_rcu_node(sdp);
1643ae5c2341SJoel Fernandes (Google) 	rcu_segcblist_add_len(&sdp->srcu_cblist, -len);
1644da915ad5SPaul E. McKenney 	(void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
1645aacb5d91SPaul E. McKenney 				       rcu_seq_snap(&ssp->srcu_gp_seq));
1646da915ad5SPaul E. McKenney 	sdp->srcu_cblist_invoking = false;
1647da915ad5SPaul E. McKenney 	more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
1648d6331980SPaul E. McKenney 	spin_unlock_irq_rcu_node(sdp);
1649da915ad5SPaul E. McKenney 	if (more)
1650da915ad5SPaul E. McKenney 		srcu_schedule_cbs_sdp(sdp, 0);
1651dad81a20SPaul E. McKenney }
1652dad81a20SPaul E. McKenney 
1653dad81a20SPaul E. McKenney /*
1654dad81a20SPaul E. McKenney  * Finished one round of SRCU grace period.  Start another if there are
1655dad81a20SPaul E. McKenney  * more SRCU callbacks queued, otherwise put SRCU into not-running state.
1656dad81a20SPaul E. McKenney  */
1657aacb5d91SPaul E. McKenney static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay)
1658dad81a20SPaul E. McKenney {
1659da915ad5SPaul E. McKenney 	bool pushgp = true;
1660dad81a20SPaul E. McKenney 
1661aacb5d91SPaul E. McKenney 	spin_lock_irq_rcu_node(ssp);
1662aacb5d91SPaul E. McKenney 	if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
1663aacb5d91SPaul E. McKenney 		if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq))) {
1664da915ad5SPaul E. McKenney 			/* All requests fulfilled, time to go idle. */
1665da915ad5SPaul E. McKenney 			pushgp = false;
1666dad81a20SPaul E. McKenney 		}
1667aacb5d91SPaul E. McKenney 	} else if (!rcu_seq_state(ssp->srcu_gp_seq)) {
1668da915ad5SPaul E. McKenney 		/* Outstanding request and no GP.  Start one. */
1669aacb5d91SPaul E. McKenney 		srcu_gp_start(ssp);
1670da915ad5SPaul E. McKenney 	}
1671aacb5d91SPaul E. McKenney 	spin_unlock_irq_rcu_node(ssp);
1672dad81a20SPaul E. McKenney 
1673da915ad5SPaul E. McKenney 	if (pushgp)
1674aacb5d91SPaul E. McKenney 		queue_delayed_work(rcu_gp_wq, &ssp->work, delay);
1675dad81a20SPaul E. McKenney }
1676dad81a20SPaul E. McKenney 
1677dad81a20SPaul E. McKenney /*
1678dad81a20SPaul E. McKenney  * This is the work-queue function that handles SRCU grace periods.
1679dad81a20SPaul E. McKenney  */
16800d8a1e83SPaul E. McKenney static void process_srcu(struct work_struct *work)
1681dad81a20SPaul E. McKenney {
1682282d8998SPaul E. McKenney 	unsigned long curdelay;
1683282d8998SPaul E. McKenney 	unsigned long j;
1684aacb5d91SPaul E. McKenney 	struct srcu_struct *ssp;
1685dad81a20SPaul E. McKenney 
1686aacb5d91SPaul E. McKenney 	ssp = container_of(work, struct srcu_struct, work.work);
1687dad81a20SPaul E. McKenney 
1688aacb5d91SPaul E. McKenney 	srcu_advance_state(ssp);
1689282d8998SPaul E. McKenney 	curdelay = srcu_get_delay(ssp);
1690282d8998SPaul E. McKenney 	if (curdelay) {
1691282d8998SPaul E. McKenney 		WRITE_ONCE(ssp->reschedule_count, 0);
1692282d8998SPaul E. McKenney 	} else {
1693282d8998SPaul E. McKenney 		j = jiffies;
1694282d8998SPaul E. McKenney 		if (READ_ONCE(ssp->reschedule_jiffies) == j) {
1695282d8998SPaul E. McKenney 			WRITE_ONCE(ssp->reschedule_count, READ_ONCE(ssp->reschedule_count) + 1);
16964f2bfd94SNeeraj Upadhyay 			if (READ_ONCE(ssp->reschedule_count) > srcu_max_nodelay)
1697282d8998SPaul E. McKenney 				curdelay = 1;
1698282d8998SPaul E. McKenney 		} else {
1699282d8998SPaul E. McKenney 			WRITE_ONCE(ssp->reschedule_count, 1);
1700282d8998SPaul E. McKenney 			WRITE_ONCE(ssp->reschedule_jiffies, j);
1701282d8998SPaul E. McKenney 		}
1702282d8998SPaul E. McKenney 	}
1703282d8998SPaul E. McKenney 	srcu_reschedule(ssp, curdelay);
1704dad81a20SPaul E. McKenney }
17057f6733c3SPaul E. McKenney 
17067f6733c3SPaul E. McKenney void srcutorture_get_gp_data(enum rcutorture_type test_type,
1707aacb5d91SPaul E. McKenney 			     struct srcu_struct *ssp, int *flags,
1708aebc8264SPaul E. McKenney 			     unsigned long *gp_seq)
17097f6733c3SPaul E. McKenney {
17107f6733c3SPaul E. McKenney 	if (test_type != SRCU_FLAVOR)
17117f6733c3SPaul E. McKenney 		return;
17127f6733c3SPaul E. McKenney 	*flags = 0;
1713aacb5d91SPaul E. McKenney 	*gp_seq = rcu_seq_current(&ssp->srcu_gp_seq);
17147f6733c3SPaul E. McKenney }
17157f6733c3SPaul E. McKenney EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
17161f4f6da1SPaul E. McKenney 
17173bedebcfSPaul E. McKenney static const char * const srcu_size_state_name[] = {
17183bedebcfSPaul E. McKenney 	"SRCU_SIZE_SMALL",
17193bedebcfSPaul E. McKenney 	"SRCU_SIZE_ALLOC",
17203bedebcfSPaul E. McKenney 	"SRCU_SIZE_WAIT_BARRIER",
17213bedebcfSPaul E. McKenney 	"SRCU_SIZE_WAIT_CALL",
17223bedebcfSPaul E. McKenney 	"SRCU_SIZE_WAIT_CBS1",
17233bedebcfSPaul E. McKenney 	"SRCU_SIZE_WAIT_CBS2",
17243bedebcfSPaul E. McKenney 	"SRCU_SIZE_WAIT_CBS3",
17253bedebcfSPaul E. McKenney 	"SRCU_SIZE_WAIT_CBS4",
17263bedebcfSPaul E. McKenney 	"SRCU_SIZE_BIG",
17273bedebcfSPaul E. McKenney 	"SRCU_SIZE_???",
17283bedebcfSPaul E. McKenney };
17293bedebcfSPaul E. McKenney 
1730aacb5d91SPaul E. McKenney void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf)
1731115a1a52SPaul E. McKenney {
1732115a1a52SPaul E. McKenney 	int cpu;
1733115a1a52SPaul E. McKenney 	int idx;
1734ac3748c6SPaul E. McKenney 	unsigned long s0 = 0, s1 = 0;
17353bedebcfSPaul E. McKenney 	int ss_state = READ_ONCE(ssp->srcu_size_state);
17363bedebcfSPaul E. McKenney 	int ss_state_idx = ss_state;
1737115a1a52SPaul E. McKenney 
1738aacb5d91SPaul E. McKenney 	idx = ssp->srcu_idx & 0x1;
17393bedebcfSPaul E. McKenney 	if (ss_state < 0 || ss_state >= ARRAY_SIZE(srcu_size_state_name))
17403bedebcfSPaul E. McKenney 		ss_state_idx = ARRAY_SIZE(srcu_size_state_name) - 1;
17414a230f80SPaul E. McKenney 	pr_alert("%s%s Tree SRCU g%ld state %d (%s)",
17423bedebcfSPaul E. McKenney 		 tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), ss_state,
17434a230f80SPaul E. McKenney 		 srcu_size_state_name[ss_state_idx]);
17444a230f80SPaul E. McKenney 	if (!ssp->sda) {
17454a230f80SPaul E. McKenney 		// Called after cleanup_srcu_struct(), perhaps.
17464a230f80SPaul E. McKenney 		pr_cont(" No per-CPU srcu_data structures (->sda == NULL).\n");
17474a230f80SPaul E. McKenney 	} else {
17484a230f80SPaul E. McKenney 		pr_cont(" per-CPU(idx=%d):", idx);
1749115a1a52SPaul E. McKenney 		for_each_possible_cpu(cpu) {
1750115a1a52SPaul E. McKenney 			unsigned long l0, l1;
1751115a1a52SPaul E. McKenney 			unsigned long u0, u1;
1752115a1a52SPaul E. McKenney 			long c0, c1;
17535ab07a8dSPaul E. McKenney 			struct srcu_data *sdp;
1754115a1a52SPaul E. McKenney 
1755aacb5d91SPaul E. McKenney 			sdp = per_cpu_ptr(ssp->sda, cpu);
17565d0f5953SPaul E. McKenney 			u0 = data_race(atomic_long_read(&sdp->srcu_unlock_count[!idx]));
17575d0f5953SPaul E. McKenney 			u1 = data_race(atomic_long_read(&sdp->srcu_unlock_count[idx]));
1758115a1a52SPaul E. McKenney 
1759115a1a52SPaul E. McKenney 			/*
1760115a1a52SPaul E. McKenney 			 * Make sure that a lock is always counted if the corresponding
1761115a1a52SPaul E. McKenney 			 * unlock is counted.
1762115a1a52SPaul E. McKenney 			 */
1763115a1a52SPaul E. McKenney 			smp_rmb();
1764115a1a52SPaul E. McKenney 
17655d0f5953SPaul E. McKenney 			l0 = data_race(atomic_long_read(&sdp->srcu_lock_count[!idx]));
17665d0f5953SPaul E. McKenney 			l1 = data_race(atomic_long_read(&sdp->srcu_lock_count[idx]));
1767115a1a52SPaul E. McKenney 
1768115a1a52SPaul E. McKenney 			c0 = l0 - u0;
1769115a1a52SPaul E. McKenney 			c1 = l1 - u1;
17707e210a65SPaul E. McKenney 			pr_cont(" %d(%ld,%ld %c)",
17717e210a65SPaul E. McKenney 				cpu, c0, c1,
17727e210a65SPaul E. McKenney 				"C."[rcu_segcblist_empty(&sdp->srcu_cblist)]);
1773ac3748c6SPaul E. McKenney 			s0 += c0;
1774ac3748c6SPaul E. McKenney 			s1 += c1;
1775115a1a52SPaul E. McKenney 		}
1776ac3748c6SPaul E. McKenney 		pr_cont(" T(%ld,%ld)\n", s0, s1);
17774a230f80SPaul E. McKenney 	}
17789f2e91d9SPaul E. McKenney 	if (SRCU_SIZING_IS_TORTURE())
177999659f64SPaul E. McKenney 		srcu_transition_to_big(ssp);
1780115a1a52SPaul E. McKenney }
1781115a1a52SPaul E. McKenney EXPORT_SYMBOL_GPL(srcu_torture_stats_print);
1782115a1a52SPaul E. McKenney 
17831f4f6da1SPaul E. McKenney static int __init srcu_bootup_announce(void)
17841f4f6da1SPaul E. McKenney {
17851f4f6da1SPaul E. McKenney 	pr_info("Hierarchical SRCU implementation.\n");
17860c8e0e3cSPaul E. McKenney 	if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF)
17870c8e0e3cSPaul E. McKenney 		pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff);
17884f2bfd94SNeeraj Upadhyay 	if (srcu_retry_check_delay != SRCU_DEFAULT_RETRY_CHECK_DELAY)
17894f2bfd94SNeeraj Upadhyay 		pr_info("\tNon-default retry check delay of %lu us.\n", srcu_retry_check_delay);
17904f2bfd94SNeeraj Upadhyay 	if (srcu_max_nodelay != SRCU_DEFAULT_MAX_NODELAY)
17914f2bfd94SNeeraj Upadhyay 		pr_info("\tNon-default max no-delay of %lu.\n", srcu_max_nodelay);
17924f2bfd94SNeeraj Upadhyay 	pr_info("\tMax phase no-delay instances is %lu.\n", srcu_max_nodelay_phase);
17931f4f6da1SPaul E. McKenney 	return 0;
17941f4f6da1SPaul E. McKenney }
17951f4f6da1SPaul E. McKenney early_initcall(srcu_bootup_announce);
1796e0fcba9aSPaul E. McKenney 
1797e0fcba9aSPaul E. McKenney void __init srcu_init(void)
1798e0fcba9aSPaul E. McKenney {
1799aacb5d91SPaul E. McKenney 	struct srcu_struct *ssp;
1800e0fcba9aSPaul E. McKenney 
1801a57ffb3cSPaul E. McKenney 	/* Decide on srcu_struct-size strategy. */
1802a57ffb3cSPaul E. McKenney 	if (SRCU_SIZING_IS(SRCU_SIZING_AUTO)) {
1803a57ffb3cSPaul E. McKenney 		if (nr_cpu_ids >= big_cpu_lim) {
1804a57ffb3cSPaul E. McKenney 			convert_to_big = SRCU_SIZING_INIT; // Don't bother waiting for contention.
1805a57ffb3cSPaul E. McKenney 			pr_info("%s: Setting srcu_struct sizes to big.\n", __func__);
1806a57ffb3cSPaul E. McKenney 		} else {
1807a57ffb3cSPaul E. McKenney 			convert_to_big = SRCU_SIZING_NONE | SRCU_SIZING_CONTEND;
1808a57ffb3cSPaul E. McKenney 			pr_info("%s: Setting srcu_struct sizes based on contention.\n", __func__);
1809a57ffb3cSPaul E. McKenney 		}
1810a57ffb3cSPaul E. McKenney 	}
1811a57ffb3cSPaul E. McKenney 
18128e9c01c7SFrederic Weisbecker 	/*
18138e9c01c7SFrederic Weisbecker 	 * Once that is set, call_srcu() can follow the normal path and
18148e9c01c7SFrederic Weisbecker 	 * queue delayed work. This must follow RCU workqueues creation
18158e9c01c7SFrederic Weisbecker 	 * and timers initialization.
18168e9c01c7SFrederic Weisbecker 	 */
1817e0fcba9aSPaul E. McKenney 	srcu_init_done = true;
1818e0fcba9aSPaul E. McKenney 	while (!list_empty(&srcu_boot_list)) {
1819aacb5d91SPaul E. McKenney 		ssp = list_first_entry(&srcu_boot_list, struct srcu_struct,
18204e6ea4efSPaul E. McKenney 				      work.work.entry);
1821aacb5d91SPaul E. McKenney 		list_del_init(&ssp->work.work.entry);
1822a57ffb3cSPaul E. McKenney 		if (SRCU_SIZING_IS(SRCU_SIZING_INIT) && ssp->srcu_size_state == SRCU_SIZE_SMALL)
1823a57ffb3cSPaul E. McKenney 			ssp->srcu_size_state = SRCU_SIZE_ALLOC;
1824aacb5d91SPaul E. McKenney 		queue_work(rcu_gp_wq, &ssp->work.work);
1825e0fcba9aSPaul E. McKenney 	}
1826e0fcba9aSPaul E. McKenney }
1827fe15b50cSPaul E. McKenney 
1828fe15b50cSPaul E. McKenney #ifdef CONFIG_MODULES
1829fe15b50cSPaul E. McKenney 
1830fe15b50cSPaul E. McKenney /* Initialize any global-scope srcu_struct structures used by this module. */
1831fe15b50cSPaul E. McKenney static int srcu_module_coming(struct module *mod)
1832fe15b50cSPaul E. McKenney {
1833fe15b50cSPaul E. McKenney 	int i;
1834fe15b50cSPaul E. McKenney 	struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1835fe15b50cSPaul E. McKenney 	int ret;
1836fe15b50cSPaul E. McKenney 
1837fe15b50cSPaul E. McKenney 	for (i = 0; i < mod->num_srcu_structs; i++) {
1838fe15b50cSPaul E. McKenney 		ret = init_srcu_struct(*(sspp++));
1839fe15b50cSPaul E. McKenney 		if (WARN_ON_ONCE(ret))
1840fe15b50cSPaul E. McKenney 			return ret;
1841fe15b50cSPaul E. McKenney 	}
1842fe15b50cSPaul E. McKenney 	return 0;
1843fe15b50cSPaul E. McKenney }
1844fe15b50cSPaul E. McKenney 
1845fe15b50cSPaul E. McKenney /* Clean up any global-scope srcu_struct structures used by this module. */
1846fe15b50cSPaul E. McKenney static void srcu_module_going(struct module *mod)
1847fe15b50cSPaul E. McKenney {
1848fe15b50cSPaul E. McKenney 	int i;
1849fe15b50cSPaul E. McKenney 	struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1850fe15b50cSPaul E. McKenney 
1851fe15b50cSPaul E. McKenney 	for (i = 0; i < mod->num_srcu_structs; i++)
1852fe15b50cSPaul E. McKenney 		cleanup_srcu_struct(*(sspp++));
1853fe15b50cSPaul E. McKenney }
1854fe15b50cSPaul E. McKenney 
1855fe15b50cSPaul E. McKenney /* Handle one module, either coming or going. */
1856fe15b50cSPaul E. McKenney static int srcu_module_notify(struct notifier_block *self,
1857fe15b50cSPaul E. McKenney 			      unsigned long val, void *data)
1858fe15b50cSPaul E. McKenney {
1859fe15b50cSPaul E. McKenney 	struct module *mod = data;
1860fe15b50cSPaul E. McKenney 	int ret = 0;
1861fe15b50cSPaul E. McKenney 
1862fe15b50cSPaul E. McKenney 	switch (val) {
1863fe15b50cSPaul E. McKenney 	case MODULE_STATE_COMING:
1864fe15b50cSPaul E. McKenney 		ret = srcu_module_coming(mod);
1865fe15b50cSPaul E. McKenney 		break;
1866fe15b50cSPaul E. McKenney 	case MODULE_STATE_GOING:
1867fe15b50cSPaul E. McKenney 		srcu_module_going(mod);
1868fe15b50cSPaul E. McKenney 		break;
1869fe15b50cSPaul E. McKenney 	default:
1870fe15b50cSPaul E. McKenney 		break;
1871fe15b50cSPaul E. McKenney 	}
1872fe15b50cSPaul E. McKenney 	return ret;
1873fe15b50cSPaul E. McKenney }
1874fe15b50cSPaul E. McKenney 
1875fe15b50cSPaul E. McKenney static struct notifier_block srcu_module_nb = {
1876fe15b50cSPaul E. McKenney 	.notifier_call = srcu_module_notify,
1877fe15b50cSPaul E. McKenney 	.priority = 0,
1878fe15b50cSPaul E. McKenney };
1879fe15b50cSPaul E. McKenney 
1880fe15b50cSPaul E. McKenney static __init int init_srcu_module_notifier(void)
1881fe15b50cSPaul E. McKenney {
1882fe15b50cSPaul E. McKenney 	int ret;
1883fe15b50cSPaul E. McKenney 
1884fe15b50cSPaul E. McKenney 	ret = register_module_notifier(&srcu_module_nb);
1885fe15b50cSPaul E. McKenney 	if (ret)
1886fe15b50cSPaul E. McKenney 		pr_warn("Failed to register srcu module notifier\n");
1887fe15b50cSPaul E. McKenney 	return ret;
1888fe15b50cSPaul E. McKenney }
1889fe15b50cSPaul E. McKenney late_initcall(init_srcu_module_notifier);
1890fe15b50cSPaul E. McKenney 
1891fe15b50cSPaul E. McKenney #endif /* #ifdef CONFIG_MODULES */
1892