1dad81a20SPaul E. McKenney /* 2dad81a20SPaul E. McKenney * Sleepable Read-Copy Update mechanism for mutual exclusion. 3dad81a20SPaul E. McKenney * 4dad81a20SPaul E. McKenney * This program is free software; you can redistribute it and/or modify 5dad81a20SPaul E. McKenney * it under the terms of the GNU General Public License as published by 6dad81a20SPaul E. McKenney * the Free Software Foundation; either version 2 of the License, or 7dad81a20SPaul E. McKenney * (at your option) any later version. 8dad81a20SPaul E. McKenney * 9dad81a20SPaul E. McKenney * This program is distributed in the hope that it will be useful, 10dad81a20SPaul E. McKenney * but WITHOUT ANY WARRANTY; without even the implied warranty of 11dad81a20SPaul E. McKenney * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12dad81a20SPaul E. McKenney * GNU General Public License for more details. 13dad81a20SPaul E. McKenney * 14dad81a20SPaul E. McKenney * You should have received a copy of the GNU General Public License 15dad81a20SPaul E. McKenney * along with this program; if not, you can access it online at 16dad81a20SPaul E. McKenney * http://www.gnu.org/licenses/gpl-2.0.html. 17dad81a20SPaul E. McKenney * 18dad81a20SPaul E. McKenney * Copyright (C) IBM Corporation, 2006 19dad81a20SPaul E. McKenney * Copyright (C) Fujitsu, 2012 20dad81a20SPaul E. McKenney * 21dad81a20SPaul E. McKenney * Author: Paul McKenney <paulmck@us.ibm.com> 22dad81a20SPaul E. McKenney * Lai Jiangshan <laijs@cn.fujitsu.com> 23dad81a20SPaul E. McKenney * 24dad81a20SPaul E. McKenney * For detailed explanation of Read-Copy Update mechanism see - 25dad81a20SPaul E. McKenney * Documentation/RCU/ *.txt 26dad81a20SPaul E. McKenney * 27dad81a20SPaul E. McKenney */ 28dad81a20SPaul E. McKenney 29a7538352SJoe Perches #define pr_fmt(fmt) "rcu: " fmt 30a7538352SJoe Perches 31dad81a20SPaul E. McKenney #include <linux/export.h> 32dad81a20SPaul E. McKenney #include <linux/mutex.h> 33dad81a20SPaul E. McKenney #include <linux/percpu.h> 34dad81a20SPaul E. McKenney #include <linux/preempt.h> 35dad81a20SPaul E. McKenney #include <linux/rcupdate_wait.h> 36dad81a20SPaul E. McKenney #include <linux/sched.h> 37dad81a20SPaul E. McKenney #include <linux/smp.h> 38dad81a20SPaul E. McKenney #include <linux/delay.h> 3922607d66SPaul E. McKenney #include <linux/module.h> 40dad81a20SPaul E. McKenney #include <linux/srcu.h> 41dad81a20SPaul E. McKenney 42dad81a20SPaul E. McKenney #include "rcu.h" 4345753c5fSIngo Molnar #include "rcu_segcblist.h" 44dad81a20SPaul E. McKenney 450c8e0e3cSPaul E. McKenney /* Holdoff in nanoseconds for auto-expediting. */ 460c8e0e3cSPaul E. McKenney #define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000) 470c8e0e3cSPaul E. McKenney static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF; 4822607d66SPaul E. McKenney module_param(exp_holdoff, ulong, 0444); 4922607d66SPaul E. McKenney 50c350c008SPaul E. McKenney /* Overflow-check frequency. N bits roughly says every 2**N grace periods. */ 51c350c008SPaul E. McKenney static ulong counter_wrap_check = (ULONG_MAX >> 2); 52c350c008SPaul E. McKenney module_param(counter_wrap_check, ulong, 0444); 53c350c008SPaul E. McKenney 54e0fcba9aSPaul E. McKenney /* Early-boot callback-management, so early that no lock is required! */ 55e0fcba9aSPaul E. McKenney static LIST_HEAD(srcu_boot_list); 56e0fcba9aSPaul E. McKenney static bool __read_mostly srcu_init_done; 57e0fcba9aSPaul E. McKenney 58da915ad5SPaul E. McKenney static void srcu_invoke_callbacks(struct work_struct *work); 59*aacb5d91SPaul E. McKenney static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay); 600d8a1e83SPaul E. McKenney static void process_srcu(struct work_struct *work); 61da915ad5SPaul E. McKenney 62d6331980SPaul E. McKenney /* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */ 63d6331980SPaul E. McKenney #define spin_lock_rcu_node(p) \ 64d6331980SPaul E. McKenney do { \ 65d6331980SPaul E. McKenney spin_lock(&ACCESS_PRIVATE(p, lock)); \ 66d6331980SPaul E. McKenney smp_mb__after_unlock_lock(); \ 67d6331980SPaul E. McKenney } while (0) 68d6331980SPaul E. McKenney 69d6331980SPaul E. McKenney #define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock)) 70d6331980SPaul E. McKenney 71d6331980SPaul E. McKenney #define spin_lock_irq_rcu_node(p) \ 72d6331980SPaul E. McKenney do { \ 73d6331980SPaul E. McKenney spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \ 74d6331980SPaul E. McKenney smp_mb__after_unlock_lock(); \ 75d6331980SPaul E. McKenney } while (0) 76d6331980SPaul E. McKenney 77d6331980SPaul E. McKenney #define spin_unlock_irq_rcu_node(p) \ 78d6331980SPaul E. McKenney spin_unlock_irq(&ACCESS_PRIVATE(p, lock)) 79d6331980SPaul E. McKenney 80d6331980SPaul E. McKenney #define spin_lock_irqsave_rcu_node(p, flags) \ 81d6331980SPaul E. McKenney do { \ 82d6331980SPaul E. McKenney spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \ 83d6331980SPaul E. McKenney smp_mb__after_unlock_lock(); \ 84d6331980SPaul E. McKenney } while (0) 85d6331980SPaul E. McKenney 86d6331980SPaul E. McKenney #define spin_unlock_irqrestore_rcu_node(p, flags) \ 87d6331980SPaul E. McKenney spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \ 88d6331980SPaul E. McKenney 89da915ad5SPaul E. McKenney /* 90da915ad5SPaul E. McKenney * Initialize SRCU combining tree. Note that statically allocated 91da915ad5SPaul E. McKenney * srcu_struct structures might already have srcu_read_lock() and 92da915ad5SPaul E. McKenney * srcu_read_unlock() running against them. So if the is_static parameter 93da915ad5SPaul E. McKenney * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[]. 94da915ad5SPaul E. McKenney */ 95*aacb5d91SPaul E. McKenney static void init_srcu_struct_nodes(struct srcu_struct *ssp, bool is_static) 96dad81a20SPaul E. McKenney { 97da915ad5SPaul E. McKenney int cpu; 98da915ad5SPaul E. McKenney int i; 99da915ad5SPaul E. McKenney int level = 0; 100da915ad5SPaul E. McKenney int levelspread[RCU_NUM_LVLS]; 101da915ad5SPaul E. McKenney struct srcu_data *sdp; 102da915ad5SPaul E. McKenney struct srcu_node *snp; 103da915ad5SPaul E. McKenney struct srcu_node *snp_first; 104da915ad5SPaul E. McKenney 105da915ad5SPaul E. McKenney /* Work out the overall tree geometry. */ 106*aacb5d91SPaul E. McKenney ssp->level[0] = &ssp->node[0]; 107da915ad5SPaul E. McKenney for (i = 1; i < rcu_num_lvls; i++) 108*aacb5d91SPaul E. McKenney ssp->level[i] = ssp->level[i - 1] + num_rcu_lvl[i - 1]; 109da915ad5SPaul E. McKenney rcu_init_levelspread(levelspread, num_rcu_lvl); 110da915ad5SPaul E. McKenney 111da915ad5SPaul E. McKenney /* Each pass through this loop initializes one srcu_node structure. */ 112*aacb5d91SPaul E. McKenney srcu_for_each_node_breadth_first(ssp, snp) { 113d6331980SPaul E. McKenney spin_lock_init(&ACCESS_PRIVATE(snp, lock)); 114c7e88067SPaul E. McKenney WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) != 115c7e88067SPaul E. McKenney ARRAY_SIZE(snp->srcu_data_have_cbs)); 116c7e88067SPaul E. McKenney for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) { 117da915ad5SPaul E. McKenney snp->srcu_have_cbs[i] = 0; 118c7e88067SPaul E. McKenney snp->srcu_data_have_cbs[i] = 0; 119c7e88067SPaul E. McKenney } 1201e9a038bSPaul E. McKenney snp->srcu_gp_seq_needed_exp = 0; 121da915ad5SPaul E. McKenney snp->grplo = -1; 122da915ad5SPaul E. McKenney snp->grphi = -1; 123*aacb5d91SPaul E. McKenney if (snp == &ssp->node[0]) { 124da915ad5SPaul E. McKenney /* Root node, special case. */ 125da915ad5SPaul E. McKenney snp->srcu_parent = NULL; 126da915ad5SPaul E. McKenney continue; 127da915ad5SPaul E. McKenney } 128da915ad5SPaul E. McKenney 129da915ad5SPaul E. McKenney /* Non-root node. */ 130*aacb5d91SPaul E. McKenney if (snp == ssp->level[level + 1]) 131da915ad5SPaul E. McKenney level++; 132*aacb5d91SPaul E. McKenney snp->srcu_parent = ssp->level[level - 1] + 133*aacb5d91SPaul E. McKenney (snp - ssp->level[level]) / 134da915ad5SPaul E. McKenney levelspread[level - 1]; 135da915ad5SPaul E. McKenney } 136da915ad5SPaul E. McKenney 137da915ad5SPaul E. McKenney /* 138da915ad5SPaul E. McKenney * Initialize the per-CPU srcu_data array, which feeds into the 139da915ad5SPaul E. McKenney * leaves of the srcu_node tree. 140da915ad5SPaul E. McKenney */ 141da915ad5SPaul E. McKenney WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) != 142da915ad5SPaul E. McKenney ARRAY_SIZE(sdp->srcu_unlock_count)); 143da915ad5SPaul E. McKenney level = rcu_num_lvls - 1; 144*aacb5d91SPaul E. McKenney snp_first = ssp->level[level]; 145da915ad5SPaul E. McKenney for_each_possible_cpu(cpu) { 146*aacb5d91SPaul E. McKenney sdp = per_cpu_ptr(ssp->sda, cpu); 147d6331980SPaul E. McKenney spin_lock_init(&ACCESS_PRIVATE(sdp, lock)); 148da915ad5SPaul E. McKenney rcu_segcblist_init(&sdp->srcu_cblist); 149da915ad5SPaul E. McKenney sdp->srcu_cblist_invoking = false; 150*aacb5d91SPaul E. McKenney sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq; 151*aacb5d91SPaul E. McKenney sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq; 152da915ad5SPaul E. McKenney sdp->mynode = &snp_first[cpu / levelspread[level]]; 153da915ad5SPaul E. McKenney for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) { 154da915ad5SPaul E. McKenney if (snp->grplo < 0) 155da915ad5SPaul E. McKenney snp->grplo = cpu; 156da915ad5SPaul E. McKenney snp->grphi = cpu; 157da915ad5SPaul E. McKenney } 158da915ad5SPaul E. McKenney sdp->cpu = cpu; 159da915ad5SPaul E. McKenney INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks); 160*aacb5d91SPaul E. McKenney sdp->ssp = ssp; 161c7e88067SPaul E. McKenney sdp->grpmask = 1 << (cpu - sdp->mynode->grplo); 162da915ad5SPaul E. McKenney if (is_static) 163da915ad5SPaul E. McKenney continue; 164da915ad5SPaul E. McKenney 165da915ad5SPaul E. McKenney /* Dynamically allocated, better be no srcu_read_locks()! */ 166da915ad5SPaul E. McKenney for (i = 0; i < ARRAY_SIZE(sdp->srcu_lock_count); i++) { 167da915ad5SPaul E. McKenney sdp->srcu_lock_count[i] = 0; 168da915ad5SPaul E. McKenney sdp->srcu_unlock_count[i] = 0; 169da915ad5SPaul E. McKenney } 170da915ad5SPaul E. McKenney } 171da915ad5SPaul E. McKenney } 172da915ad5SPaul E. McKenney 173da915ad5SPaul E. McKenney /* 174da915ad5SPaul E. McKenney * Initialize non-compile-time initialized fields, including the 175da915ad5SPaul E. McKenney * associated srcu_node and srcu_data structures. The is_static 176da915ad5SPaul E. McKenney * parameter is passed through to init_srcu_struct_nodes(), and 177da915ad5SPaul E. McKenney * also tells us that ->sda has already been wired up to srcu_data. 178da915ad5SPaul E. McKenney */ 179*aacb5d91SPaul E. McKenney static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static) 180da915ad5SPaul E. McKenney { 181*aacb5d91SPaul E. McKenney mutex_init(&ssp->srcu_cb_mutex); 182*aacb5d91SPaul E. McKenney mutex_init(&ssp->srcu_gp_mutex); 183*aacb5d91SPaul E. McKenney ssp->srcu_idx = 0; 184*aacb5d91SPaul E. McKenney ssp->srcu_gp_seq = 0; 185*aacb5d91SPaul E. McKenney ssp->srcu_barrier_seq = 0; 186*aacb5d91SPaul E. McKenney mutex_init(&ssp->srcu_barrier_mutex); 187*aacb5d91SPaul E. McKenney atomic_set(&ssp->srcu_barrier_cpu_cnt, 0); 188*aacb5d91SPaul E. McKenney INIT_DELAYED_WORK(&ssp->work, process_srcu); 189da915ad5SPaul E. McKenney if (!is_static) 190*aacb5d91SPaul E. McKenney ssp->sda = alloc_percpu(struct srcu_data); 191*aacb5d91SPaul E. McKenney init_srcu_struct_nodes(ssp, is_static); 192*aacb5d91SPaul E. McKenney ssp->srcu_gp_seq_needed_exp = 0; 193*aacb5d91SPaul E. McKenney ssp->srcu_last_gp_end = ktime_get_mono_fast_ns(); 194*aacb5d91SPaul E. McKenney smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */ 195*aacb5d91SPaul E. McKenney return ssp->sda ? 0 : -ENOMEM; 196dad81a20SPaul E. McKenney } 197dad81a20SPaul E. McKenney 198dad81a20SPaul E. McKenney #ifdef CONFIG_DEBUG_LOCK_ALLOC 199dad81a20SPaul E. McKenney 200*aacb5d91SPaul E. McKenney int __init_srcu_struct(struct srcu_struct *ssp, const char *name, 201dad81a20SPaul E. McKenney struct lock_class_key *key) 202dad81a20SPaul E. McKenney { 203dad81a20SPaul E. McKenney /* Don't re-initialize a lock while it is held. */ 204*aacb5d91SPaul E. McKenney debug_check_no_locks_freed((void *)ssp, sizeof(*ssp)); 205*aacb5d91SPaul E. McKenney lockdep_init_map(&ssp->dep_map, name, key, 0); 206*aacb5d91SPaul E. McKenney spin_lock_init(&ACCESS_PRIVATE(ssp, lock)); 207*aacb5d91SPaul E. McKenney return init_srcu_struct_fields(ssp, false); 208dad81a20SPaul E. McKenney } 209dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(__init_srcu_struct); 210dad81a20SPaul E. McKenney 211dad81a20SPaul E. McKenney #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 212dad81a20SPaul E. McKenney 213dad81a20SPaul E. McKenney /** 214dad81a20SPaul E. McKenney * init_srcu_struct - initialize a sleep-RCU structure 215*aacb5d91SPaul E. McKenney * @ssp: structure to initialize. 216dad81a20SPaul E. McKenney * 217dad81a20SPaul E. McKenney * Must invoke this on a given srcu_struct before passing that srcu_struct 218dad81a20SPaul E. McKenney * to any other function. Each srcu_struct represents a separate domain 219dad81a20SPaul E. McKenney * of SRCU protection. 220dad81a20SPaul E. McKenney */ 221*aacb5d91SPaul E. McKenney int init_srcu_struct(struct srcu_struct *ssp) 222dad81a20SPaul E. McKenney { 223*aacb5d91SPaul E. McKenney spin_lock_init(&ACCESS_PRIVATE(ssp, lock)); 224*aacb5d91SPaul E. McKenney return init_srcu_struct_fields(ssp, false); 225dad81a20SPaul E. McKenney } 226dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(init_srcu_struct); 227dad81a20SPaul E. McKenney 228dad81a20SPaul E. McKenney #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 229dad81a20SPaul E. McKenney 230dad81a20SPaul E. McKenney /* 231da915ad5SPaul E. McKenney * First-use initialization of statically allocated srcu_struct 232da915ad5SPaul E. McKenney * structure. Wiring up the combining tree is more than can be 233da915ad5SPaul E. McKenney * done with compile-time initialization, so this check is added 234*aacb5d91SPaul E. McKenney * to each update-side SRCU primitive. Use ssp->lock, which -is- 235da915ad5SPaul E. McKenney * compile-time initialized, to resolve races involving multiple 236da915ad5SPaul E. McKenney * CPUs trying to garner first-use privileges. 237da915ad5SPaul E. McKenney */ 238*aacb5d91SPaul E. McKenney static void check_init_srcu_struct(struct srcu_struct *ssp) 239da915ad5SPaul E. McKenney { 240da915ad5SPaul E. McKenney unsigned long flags; 241da915ad5SPaul E. McKenney 242da915ad5SPaul E. McKenney /* The smp_load_acquire() pairs with the smp_store_release(). */ 243*aacb5d91SPaul E. McKenney if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed))) /*^^^*/ 244da915ad5SPaul E. McKenney return; /* Already initialized. */ 245*aacb5d91SPaul E. McKenney spin_lock_irqsave_rcu_node(ssp, flags); 246*aacb5d91SPaul E. McKenney if (!rcu_seq_state(ssp->srcu_gp_seq_needed)) { 247*aacb5d91SPaul E. McKenney spin_unlock_irqrestore_rcu_node(ssp, flags); 248da915ad5SPaul E. McKenney return; 249da915ad5SPaul E. McKenney } 250*aacb5d91SPaul E. McKenney init_srcu_struct_fields(ssp, true); 251*aacb5d91SPaul E. McKenney spin_unlock_irqrestore_rcu_node(ssp, flags); 252da915ad5SPaul E. McKenney } 253da915ad5SPaul E. McKenney 254da915ad5SPaul E. McKenney /* 255da915ad5SPaul E. McKenney * Returns approximate total of the readers' ->srcu_lock_count[] values 256da915ad5SPaul E. McKenney * for the rank of per-CPU counters specified by idx. 257dad81a20SPaul E. McKenney */ 258*aacb5d91SPaul E. McKenney static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx) 259dad81a20SPaul E. McKenney { 260dad81a20SPaul E. McKenney int cpu; 261dad81a20SPaul E. McKenney unsigned long sum = 0; 262dad81a20SPaul E. McKenney 263dad81a20SPaul E. McKenney for_each_possible_cpu(cpu) { 264*aacb5d91SPaul E. McKenney struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); 265dad81a20SPaul E. McKenney 266da915ad5SPaul E. McKenney sum += READ_ONCE(cpuc->srcu_lock_count[idx]); 267dad81a20SPaul E. McKenney } 268dad81a20SPaul E. McKenney return sum; 269dad81a20SPaul E. McKenney } 270dad81a20SPaul E. McKenney 271dad81a20SPaul E. McKenney /* 272da915ad5SPaul E. McKenney * Returns approximate total of the readers' ->srcu_unlock_count[] values 273da915ad5SPaul E. McKenney * for the rank of per-CPU counters specified by idx. 274dad81a20SPaul E. McKenney */ 275*aacb5d91SPaul E. McKenney static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx) 276dad81a20SPaul E. McKenney { 277dad81a20SPaul E. McKenney int cpu; 278dad81a20SPaul E. McKenney unsigned long sum = 0; 279dad81a20SPaul E. McKenney 280dad81a20SPaul E. McKenney for_each_possible_cpu(cpu) { 281*aacb5d91SPaul E. McKenney struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); 282dad81a20SPaul E. McKenney 283da915ad5SPaul E. McKenney sum += READ_ONCE(cpuc->srcu_unlock_count[idx]); 284dad81a20SPaul E. McKenney } 285dad81a20SPaul E. McKenney return sum; 286dad81a20SPaul E. McKenney } 287dad81a20SPaul E. McKenney 288dad81a20SPaul E. McKenney /* 289dad81a20SPaul E. McKenney * Return true if the number of pre-existing readers is determined to 290dad81a20SPaul E. McKenney * be zero. 291dad81a20SPaul E. McKenney */ 292*aacb5d91SPaul E. McKenney static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx) 293dad81a20SPaul E. McKenney { 294dad81a20SPaul E. McKenney unsigned long unlocks; 295dad81a20SPaul E. McKenney 296*aacb5d91SPaul E. McKenney unlocks = srcu_readers_unlock_idx(ssp, idx); 297dad81a20SPaul E. McKenney 298dad81a20SPaul E. McKenney /* 299dad81a20SPaul E. McKenney * Make sure that a lock is always counted if the corresponding 300dad81a20SPaul E. McKenney * unlock is counted. Needs to be a smp_mb() as the read side may 301dad81a20SPaul E. McKenney * contain a read from a variable that is written to before the 302dad81a20SPaul E. McKenney * synchronize_srcu() in the write side. In this case smp_mb()s 303dad81a20SPaul E. McKenney * A and B act like the store buffering pattern. 304dad81a20SPaul E. McKenney * 305dad81a20SPaul E. McKenney * This smp_mb() also pairs with smp_mb() C to prevent accesses 306dad81a20SPaul E. McKenney * after the synchronize_srcu() from being executed before the 307dad81a20SPaul E. McKenney * grace period ends. 308dad81a20SPaul E. McKenney */ 309dad81a20SPaul E. McKenney smp_mb(); /* A */ 310dad81a20SPaul E. McKenney 311dad81a20SPaul E. McKenney /* 312dad81a20SPaul E. McKenney * If the locks are the same as the unlocks, then there must have 313dad81a20SPaul E. McKenney * been no readers on this index at some time in between. This does 314dad81a20SPaul E. McKenney * not mean that there are no more readers, as one could have read 315dad81a20SPaul E. McKenney * the current index but not have incremented the lock counter yet. 316dad81a20SPaul E. McKenney * 317881ec9d2SPaul E. McKenney * So suppose that the updater is preempted here for so long 318881ec9d2SPaul E. McKenney * that more than ULONG_MAX non-nested readers come and go in 319881ec9d2SPaul E. McKenney * the meantime. It turns out that this cannot result in overflow 320881ec9d2SPaul E. McKenney * because if a reader modifies its unlock count after we read it 321881ec9d2SPaul E. McKenney * above, then that reader's next load of ->srcu_idx is guaranteed 322881ec9d2SPaul E. McKenney * to get the new value, which will cause it to operate on the 323881ec9d2SPaul E. McKenney * other bank of counters, where it cannot contribute to the 324881ec9d2SPaul E. McKenney * overflow of these counters. This means that there is a maximum 325881ec9d2SPaul E. McKenney * of 2*NR_CPUS increments, which cannot overflow given current 326881ec9d2SPaul E. McKenney * systems, especially not on 64-bit systems. 327881ec9d2SPaul E. McKenney * 328881ec9d2SPaul E. McKenney * OK, how about nesting? This does impose a limit on nesting 329881ec9d2SPaul E. McKenney * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient, 330881ec9d2SPaul E. McKenney * especially on 64-bit systems. 331dad81a20SPaul E. McKenney */ 332*aacb5d91SPaul E. McKenney return srcu_readers_lock_idx(ssp, idx) == unlocks; 333dad81a20SPaul E. McKenney } 334dad81a20SPaul E. McKenney 335dad81a20SPaul E. McKenney /** 336dad81a20SPaul E. McKenney * srcu_readers_active - returns true if there are readers. and false 337dad81a20SPaul E. McKenney * otherwise 338*aacb5d91SPaul E. McKenney * @ssp: which srcu_struct to count active readers (holding srcu_read_lock). 339dad81a20SPaul E. McKenney * 340dad81a20SPaul E. McKenney * Note that this is not an atomic primitive, and can therefore suffer 341dad81a20SPaul E. McKenney * severe errors when invoked on an active srcu_struct. That said, it 342dad81a20SPaul E. McKenney * can be useful as an error check at cleanup time. 343dad81a20SPaul E. McKenney */ 344*aacb5d91SPaul E. McKenney static bool srcu_readers_active(struct srcu_struct *ssp) 345dad81a20SPaul E. McKenney { 346dad81a20SPaul E. McKenney int cpu; 347dad81a20SPaul E. McKenney unsigned long sum = 0; 348dad81a20SPaul E. McKenney 349dad81a20SPaul E. McKenney for_each_possible_cpu(cpu) { 350*aacb5d91SPaul E. McKenney struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); 351dad81a20SPaul E. McKenney 352da915ad5SPaul E. McKenney sum += READ_ONCE(cpuc->srcu_lock_count[0]); 353da915ad5SPaul E. McKenney sum += READ_ONCE(cpuc->srcu_lock_count[1]); 354da915ad5SPaul E. McKenney sum -= READ_ONCE(cpuc->srcu_unlock_count[0]); 355da915ad5SPaul E. McKenney sum -= READ_ONCE(cpuc->srcu_unlock_count[1]); 356dad81a20SPaul E. McKenney } 357dad81a20SPaul E. McKenney return sum; 358dad81a20SPaul E. McKenney } 359dad81a20SPaul E. McKenney 360dad81a20SPaul E. McKenney #define SRCU_INTERVAL 1 361dad81a20SPaul E. McKenney 3621e9a038bSPaul E. McKenney /* 3631e9a038bSPaul E. McKenney * Return grace-period delay, zero if there are expedited grace 3641e9a038bSPaul E. McKenney * periods pending, SRCU_INTERVAL otherwise. 3651e9a038bSPaul E. McKenney */ 366*aacb5d91SPaul E. McKenney static unsigned long srcu_get_delay(struct srcu_struct *ssp) 3671e9a038bSPaul E. McKenney { 368*aacb5d91SPaul E. McKenney if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq), 369*aacb5d91SPaul E. McKenney READ_ONCE(ssp->srcu_gp_seq_needed_exp))) 3701e9a038bSPaul E. McKenney return 0; 3711e9a038bSPaul E. McKenney return SRCU_INTERVAL; 3721e9a038bSPaul E. McKenney } 3731e9a038bSPaul E. McKenney 374f7194ac3SPaul E. McKenney /* Helper for cleanup_srcu_struct() and cleanup_srcu_struct_quiesced(). */ 375*aacb5d91SPaul E. McKenney void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced) 376dad81a20SPaul E. McKenney { 377da915ad5SPaul E. McKenney int cpu; 378da915ad5SPaul E. McKenney 379*aacb5d91SPaul E. McKenney if (WARN_ON(!srcu_get_delay(ssp))) 380f7194ac3SPaul E. McKenney return; /* Just leak it! */ 381*aacb5d91SPaul E. McKenney if (WARN_ON(srcu_readers_active(ssp))) 382f7194ac3SPaul E. McKenney return; /* Just leak it! */ 383f7194ac3SPaul E. McKenney if (quiesced) { 384*aacb5d91SPaul E. McKenney if (WARN_ON(delayed_work_pending(&ssp->work))) 385f7194ac3SPaul E. McKenney return; /* Just leak it! */ 386f7194ac3SPaul E. McKenney } else { 387*aacb5d91SPaul E. McKenney flush_delayed_work(&ssp->work); 388f7194ac3SPaul E. McKenney } 389da915ad5SPaul E. McKenney for_each_possible_cpu(cpu) 390f7194ac3SPaul E. McKenney if (quiesced) { 391*aacb5d91SPaul E. McKenney if (WARN_ON(delayed_work_pending(&per_cpu_ptr(ssp->sda, cpu)->work))) 392f7194ac3SPaul E. McKenney return; /* Just leak it! */ 393f7194ac3SPaul E. McKenney } else { 394*aacb5d91SPaul E. McKenney flush_delayed_work(&per_cpu_ptr(ssp->sda, cpu)->work); 395f7194ac3SPaul E. McKenney } 396*aacb5d91SPaul E. McKenney if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) || 397*aacb5d91SPaul E. McKenney WARN_ON(srcu_readers_active(ssp))) { 398a7538352SJoe Perches pr_info("%s: Active srcu_struct %p state: %d\n", 399*aacb5d91SPaul E. McKenney __func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq))); 400dad81a20SPaul E. McKenney return; /* Caller forgot to stop doing call_srcu()? */ 401dad81a20SPaul E. McKenney } 402*aacb5d91SPaul E. McKenney free_percpu(ssp->sda); 403*aacb5d91SPaul E. McKenney ssp->sda = NULL; 404dad81a20SPaul E. McKenney } 405f7194ac3SPaul E. McKenney EXPORT_SYMBOL_GPL(_cleanup_srcu_struct); 406dad81a20SPaul E. McKenney 407dad81a20SPaul E. McKenney /* 408dad81a20SPaul E. McKenney * Counts the new reader in the appropriate per-CPU element of the 409cdf7abc4SPaolo Bonzini * srcu_struct. 410dad81a20SPaul E. McKenney * Returns an index that must be passed to the matching srcu_read_unlock(). 411dad81a20SPaul E. McKenney */ 412*aacb5d91SPaul E. McKenney int __srcu_read_lock(struct srcu_struct *ssp) 413dad81a20SPaul E. McKenney { 414dad81a20SPaul E. McKenney int idx; 415dad81a20SPaul E. McKenney 416*aacb5d91SPaul E. McKenney idx = READ_ONCE(ssp->srcu_idx) & 0x1; 417*aacb5d91SPaul E. McKenney this_cpu_inc(ssp->sda->srcu_lock_count[idx]); 418dad81a20SPaul E. McKenney smp_mb(); /* B */ /* Avoid leaking the critical section. */ 419dad81a20SPaul E. McKenney return idx; 420dad81a20SPaul E. McKenney } 421dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(__srcu_read_lock); 422dad81a20SPaul E. McKenney 423dad81a20SPaul E. McKenney /* 424dad81a20SPaul E. McKenney * Removes the count for the old reader from the appropriate per-CPU 425dad81a20SPaul E. McKenney * element of the srcu_struct. Note that this may well be a different 426dad81a20SPaul E. McKenney * CPU than that which was incremented by the corresponding srcu_read_lock(). 427dad81a20SPaul E. McKenney */ 428*aacb5d91SPaul E. McKenney void __srcu_read_unlock(struct srcu_struct *ssp, int idx) 429dad81a20SPaul E. McKenney { 430dad81a20SPaul E. McKenney smp_mb(); /* C */ /* Avoid leaking the critical section. */ 431*aacb5d91SPaul E. McKenney this_cpu_inc(ssp->sda->srcu_unlock_count[idx]); 432dad81a20SPaul E. McKenney } 433dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(__srcu_read_unlock); 434dad81a20SPaul E. McKenney 435dad81a20SPaul E. McKenney /* 436dad81a20SPaul E. McKenney * We use an adaptive strategy for synchronize_srcu() and especially for 437dad81a20SPaul E. McKenney * synchronize_srcu_expedited(). We spin for a fixed time period 438dad81a20SPaul E. McKenney * (defined below) to allow SRCU readers to exit their read-side critical 439dad81a20SPaul E. McKenney * sections. If there are still some readers after a few microseconds, 440dad81a20SPaul E. McKenney * we repeatedly block for 1-millisecond time periods. 441dad81a20SPaul E. McKenney */ 442dad81a20SPaul E. McKenney #define SRCU_RETRY_CHECK_DELAY 5 443dad81a20SPaul E. McKenney 444dad81a20SPaul E. McKenney /* 445dad81a20SPaul E. McKenney * Start an SRCU grace period. 446dad81a20SPaul E. McKenney */ 447*aacb5d91SPaul E. McKenney static void srcu_gp_start(struct srcu_struct *ssp) 448dad81a20SPaul E. McKenney { 449*aacb5d91SPaul E. McKenney struct srcu_data *sdp = this_cpu_ptr(ssp->sda); 450dad81a20SPaul E. McKenney int state; 451dad81a20SPaul E. McKenney 452*aacb5d91SPaul E. McKenney lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock)); 453*aacb5d91SPaul E. McKenney WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)); 454eb4c2382SDennis Krein spin_lock_rcu_node(sdp); /* Interrupts already disabled. */ 455da915ad5SPaul E. McKenney rcu_segcblist_advance(&sdp->srcu_cblist, 456*aacb5d91SPaul E. McKenney rcu_seq_current(&ssp->srcu_gp_seq)); 457da915ad5SPaul E. McKenney (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, 458*aacb5d91SPaul E. McKenney rcu_seq_snap(&ssp->srcu_gp_seq)); 459eb4c2382SDennis Krein spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */ 4602da4b2a7SPaul E. McKenney smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */ 461*aacb5d91SPaul E. McKenney rcu_seq_start(&ssp->srcu_gp_seq); 462*aacb5d91SPaul E. McKenney state = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)); 463dad81a20SPaul E. McKenney WARN_ON_ONCE(state != SRCU_STATE_SCAN1); 464dad81a20SPaul E. McKenney } 465dad81a20SPaul E. McKenney 466dad81a20SPaul E. McKenney /* 467da915ad5SPaul E. McKenney * Track online CPUs to guide callback workqueue placement. 468da915ad5SPaul E. McKenney */ 469da915ad5SPaul E. McKenney DEFINE_PER_CPU(bool, srcu_online); 470da915ad5SPaul E. McKenney 471da915ad5SPaul E. McKenney void srcu_online_cpu(unsigned int cpu) 472da915ad5SPaul E. McKenney { 473da915ad5SPaul E. McKenney WRITE_ONCE(per_cpu(srcu_online, cpu), true); 474da915ad5SPaul E. McKenney } 475da915ad5SPaul E. McKenney 476da915ad5SPaul E. McKenney void srcu_offline_cpu(unsigned int cpu) 477da915ad5SPaul E. McKenney { 478da915ad5SPaul E. McKenney WRITE_ONCE(per_cpu(srcu_online, cpu), false); 479da915ad5SPaul E. McKenney } 480da915ad5SPaul E. McKenney 481da915ad5SPaul E. McKenney /* 482da915ad5SPaul E. McKenney * Place the workqueue handler on the specified CPU if online, otherwise 483da915ad5SPaul E. McKenney * just run it whereever. This is useful for placing workqueue handlers 484da915ad5SPaul E. McKenney * that are to invoke the specified CPU's callbacks. 485da915ad5SPaul E. McKenney */ 486da915ad5SPaul E. McKenney static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 487da915ad5SPaul E. McKenney struct delayed_work *dwork, 488da915ad5SPaul E. McKenney unsigned long delay) 489da915ad5SPaul E. McKenney { 490da915ad5SPaul E. McKenney bool ret; 491da915ad5SPaul E. McKenney 492da915ad5SPaul E. McKenney preempt_disable(); 493da915ad5SPaul E. McKenney if (READ_ONCE(per_cpu(srcu_online, cpu))) 494da915ad5SPaul E. McKenney ret = queue_delayed_work_on(cpu, wq, dwork, delay); 495da915ad5SPaul E. McKenney else 496da915ad5SPaul E. McKenney ret = queue_delayed_work(wq, dwork, delay); 497da915ad5SPaul E. McKenney preempt_enable(); 498da915ad5SPaul E. McKenney return ret; 499da915ad5SPaul E. McKenney } 500da915ad5SPaul E. McKenney 501da915ad5SPaul E. McKenney /* 502da915ad5SPaul E. McKenney * Schedule callback invocation for the specified srcu_data structure, 503da915ad5SPaul E. McKenney * if possible, on the corresponding CPU. 504da915ad5SPaul E. McKenney */ 505da915ad5SPaul E. McKenney static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay) 506da915ad5SPaul E. McKenney { 507ad7c946bSPaul E. McKenney srcu_queue_delayed_work_on(sdp->cpu, rcu_gp_wq, &sdp->work, delay); 508da915ad5SPaul E. McKenney } 509da915ad5SPaul E. McKenney 510da915ad5SPaul E. McKenney /* 511da915ad5SPaul E. McKenney * Schedule callback invocation for all srcu_data structures associated 512c7e88067SPaul E. McKenney * with the specified srcu_node structure that have callbacks for the 513c7e88067SPaul E. McKenney * just-completed grace period, the one corresponding to idx. If possible, 514c7e88067SPaul E. McKenney * schedule this invocation on the corresponding CPUs. 515da915ad5SPaul E. McKenney */ 516*aacb5d91SPaul E. McKenney static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp, 5171e9a038bSPaul E. McKenney unsigned long mask, unsigned long delay) 518da915ad5SPaul E. McKenney { 519da915ad5SPaul E. McKenney int cpu; 520da915ad5SPaul E. McKenney 521c7e88067SPaul E. McKenney for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { 522c7e88067SPaul E. McKenney if (!(mask & (1 << (cpu - snp->grplo)))) 523c7e88067SPaul E. McKenney continue; 524*aacb5d91SPaul E. McKenney srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay); 525da915ad5SPaul E. McKenney } 526c7e88067SPaul E. McKenney } 527da915ad5SPaul E. McKenney 528da915ad5SPaul E. McKenney /* 529da915ad5SPaul E. McKenney * Note the end of an SRCU grace period. Initiates callback invocation 530da915ad5SPaul E. McKenney * and starts a new grace period if needed. 531da915ad5SPaul E. McKenney * 532da915ad5SPaul E. McKenney * The ->srcu_cb_mutex acquisition does not protect any data, but 533da915ad5SPaul E. McKenney * instead prevents more than one grace period from starting while we 534da915ad5SPaul E. McKenney * are initiating callback invocation. This allows the ->srcu_have_cbs[] 535da915ad5SPaul E. McKenney * array to have a finite number of elements. 536da915ad5SPaul E. McKenney */ 537*aacb5d91SPaul E. McKenney static void srcu_gp_end(struct srcu_struct *ssp) 538da915ad5SPaul E. McKenney { 5391e9a038bSPaul E. McKenney unsigned long cbdelay; 540da915ad5SPaul E. McKenney bool cbs; 5418ddbd883SIldar Ismagilov bool last_lvl; 542c350c008SPaul E. McKenney int cpu; 543c350c008SPaul E. McKenney unsigned long flags; 544da915ad5SPaul E. McKenney unsigned long gpseq; 545da915ad5SPaul E. McKenney int idx; 546c7e88067SPaul E. McKenney unsigned long mask; 547c350c008SPaul E. McKenney struct srcu_data *sdp; 548da915ad5SPaul E. McKenney struct srcu_node *snp; 549da915ad5SPaul E. McKenney 550da915ad5SPaul E. McKenney /* Prevent more than one additional grace period. */ 551*aacb5d91SPaul E. McKenney mutex_lock(&ssp->srcu_cb_mutex); 552da915ad5SPaul E. McKenney 553da915ad5SPaul E. McKenney /* End the current grace period. */ 554*aacb5d91SPaul E. McKenney spin_lock_irq_rcu_node(ssp); 555*aacb5d91SPaul E. McKenney idx = rcu_seq_state(ssp->srcu_gp_seq); 556da915ad5SPaul E. McKenney WARN_ON_ONCE(idx != SRCU_STATE_SCAN2); 557*aacb5d91SPaul E. McKenney cbdelay = srcu_get_delay(ssp); 558*aacb5d91SPaul E. McKenney ssp->srcu_last_gp_end = ktime_get_mono_fast_ns(); 559*aacb5d91SPaul E. McKenney rcu_seq_end(&ssp->srcu_gp_seq); 560*aacb5d91SPaul E. McKenney gpseq = rcu_seq_current(&ssp->srcu_gp_seq); 561*aacb5d91SPaul E. McKenney if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq)) 562*aacb5d91SPaul E. McKenney ssp->srcu_gp_seq_needed_exp = gpseq; 563*aacb5d91SPaul E. McKenney spin_unlock_irq_rcu_node(ssp); 564*aacb5d91SPaul E. McKenney mutex_unlock(&ssp->srcu_gp_mutex); 565da915ad5SPaul E. McKenney /* A new grace period can start at this point. But only one. */ 566da915ad5SPaul E. McKenney 567da915ad5SPaul E. McKenney /* Initiate callback invocation as needed. */ 568da915ad5SPaul E. McKenney idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs); 569*aacb5d91SPaul E. McKenney srcu_for_each_node_breadth_first(ssp, snp) { 570d6331980SPaul E. McKenney spin_lock_irq_rcu_node(snp); 571da915ad5SPaul E. McKenney cbs = false; 572*aacb5d91SPaul E. McKenney last_lvl = snp >= ssp->level[rcu_num_lvls - 1]; 5738ddbd883SIldar Ismagilov if (last_lvl) 574da915ad5SPaul E. McKenney cbs = snp->srcu_have_cbs[idx] == gpseq; 575da915ad5SPaul E. McKenney snp->srcu_have_cbs[idx] = gpseq; 576da915ad5SPaul E. McKenney rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1); 5771e9a038bSPaul E. McKenney if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq)) 5781e9a038bSPaul E. McKenney snp->srcu_gp_seq_needed_exp = gpseq; 579c7e88067SPaul E. McKenney mask = snp->srcu_data_have_cbs[idx]; 580c7e88067SPaul E. McKenney snp->srcu_data_have_cbs[idx] = 0; 581d6331980SPaul E. McKenney spin_unlock_irq_rcu_node(snp); 582a3883df3SPaul E. McKenney if (cbs) 583*aacb5d91SPaul E. McKenney srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay); 584c350c008SPaul E. McKenney 585c350c008SPaul E. McKenney /* Occasionally prevent srcu_data counter wrap. */ 5868ddbd883SIldar Ismagilov if (!(gpseq & counter_wrap_check) && last_lvl) 587c350c008SPaul E. McKenney for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { 588*aacb5d91SPaul E. McKenney sdp = per_cpu_ptr(ssp->sda, cpu); 589d6331980SPaul E. McKenney spin_lock_irqsave_rcu_node(sdp, flags); 590c350c008SPaul E. McKenney if (ULONG_CMP_GE(gpseq, 591c350c008SPaul E. McKenney sdp->srcu_gp_seq_needed + 100)) 592c350c008SPaul E. McKenney sdp->srcu_gp_seq_needed = gpseq; 593a35d13ecSIldar Ismagilov if (ULONG_CMP_GE(gpseq, 594a35d13ecSIldar Ismagilov sdp->srcu_gp_seq_needed_exp + 100)) 595a35d13ecSIldar Ismagilov sdp->srcu_gp_seq_needed_exp = gpseq; 596d6331980SPaul E. McKenney spin_unlock_irqrestore_rcu_node(sdp, flags); 597c350c008SPaul E. McKenney } 598da915ad5SPaul E. McKenney } 599da915ad5SPaul E. McKenney 600da915ad5SPaul E. McKenney /* Callback initiation done, allow grace periods after next. */ 601*aacb5d91SPaul E. McKenney mutex_unlock(&ssp->srcu_cb_mutex); 602da915ad5SPaul E. McKenney 603da915ad5SPaul E. McKenney /* Start a new grace period if needed. */ 604*aacb5d91SPaul E. McKenney spin_lock_irq_rcu_node(ssp); 605*aacb5d91SPaul E. McKenney gpseq = rcu_seq_current(&ssp->srcu_gp_seq); 606da915ad5SPaul E. McKenney if (!rcu_seq_state(gpseq) && 607*aacb5d91SPaul E. McKenney ULONG_CMP_LT(gpseq, ssp->srcu_gp_seq_needed)) { 608*aacb5d91SPaul E. McKenney srcu_gp_start(ssp); 609*aacb5d91SPaul E. McKenney spin_unlock_irq_rcu_node(ssp); 610*aacb5d91SPaul E. McKenney srcu_reschedule(ssp, 0); 611da915ad5SPaul E. McKenney } else { 612*aacb5d91SPaul E. McKenney spin_unlock_irq_rcu_node(ssp); 613da915ad5SPaul E. McKenney } 614da915ad5SPaul E. McKenney } 615da915ad5SPaul E. McKenney 616da915ad5SPaul E. McKenney /* 6171e9a038bSPaul E. McKenney * Funnel-locking scheme to scalably mediate many concurrent expedited 6181e9a038bSPaul E. McKenney * grace-period requests. This function is invoked for the first known 6191e9a038bSPaul E. McKenney * expedited request for a grace period that has already been requested, 6201e9a038bSPaul E. McKenney * but without expediting. To start a completely new grace period, 6211e9a038bSPaul E. McKenney * whether expedited or not, use srcu_funnel_gp_start() instead. 6221e9a038bSPaul E. McKenney */ 623*aacb5d91SPaul E. McKenney static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp, 6241e9a038bSPaul E. McKenney unsigned long s) 6251e9a038bSPaul E. McKenney { 6261e9a038bSPaul E. McKenney unsigned long flags; 6271e9a038bSPaul E. McKenney 6281e9a038bSPaul E. McKenney for (; snp != NULL; snp = snp->srcu_parent) { 629*aacb5d91SPaul E. McKenney if (rcu_seq_done(&ssp->srcu_gp_seq, s) || 6301e9a038bSPaul E. McKenney ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s)) 6311e9a038bSPaul E. McKenney return; 632d6331980SPaul E. McKenney spin_lock_irqsave_rcu_node(snp, flags); 6331e9a038bSPaul E. McKenney if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) { 634d6331980SPaul E. McKenney spin_unlock_irqrestore_rcu_node(snp, flags); 6351e9a038bSPaul E. McKenney return; 6361e9a038bSPaul E. McKenney } 6371e9a038bSPaul E. McKenney WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s); 638d6331980SPaul E. McKenney spin_unlock_irqrestore_rcu_node(snp, flags); 6391e9a038bSPaul E. McKenney } 640*aacb5d91SPaul E. McKenney spin_lock_irqsave_rcu_node(ssp, flags); 641*aacb5d91SPaul E. McKenney if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s)) 642*aacb5d91SPaul E. McKenney ssp->srcu_gp_seq_needed_exp = s; 643*aacb5d91SPaul E. McKenney spin_unlock_irqrestore_rcu_node(ssp, flags); 6441e9a038bSPaul E. McKenney } 6451e9a038bSPaul E. McKenney 6461e9a038bSPaul E. McKenney /* 647da915ad5SPaul E. McKenney * Funnel-locking scheme to scalably mediate many concurrent grace-period 648da915ad5SPaul E. McKenney * requests. The winner has to do the work of actually starting grace 649da915ad5SPaul E. McKenney * period s. Losers must either ensure that their desired grace-period 650da915ad5SPaul E. McKenney * number is recorded on at least their leaf srcu_node structure, or they 651da915ad5SPaul E. McKenney * must take steps to invoke their own callbacks. 65217294ce6SPaul E. McKenney * 65317294ce6SPaul E. McKenney * Note that this function also does the work of srcu_funnel_exp_start(), 65417294ce6SPaul E. McKenney * in some cases by directly invoking it. 655da915ad5SPaul E. McKenney */ 656*aacb5d91SPaul E. McKenney static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp, 6571e9a038bSPaul E. McKenney unsigned long s, bool do_norm) 658da915ad5SPaul E. McKenney { 659da915ad5SPaul E. McKenney unsigned long flags; 660da915ad5SPaul E. McKenney int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs); 661da915ad5SPaul E. McKenney struct srcu_node *snp = sdp->mynode; 662da915ad5SPaul E. McKenney unsigned long snp_seq; 663da915ad5SPaul E. McKenney 664da915ad5SPaul E. McKenney /* Each pass through the loop does one level of the srcu_node tree. */ 665da915ad5SPaul E. McKenney for (; snp != NULL; snp = snp->srcu_parent) { 666*aacb5d91SPaul E. McKenney if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != sdp->mynode) 667da915ad5SPaul E. McKenney return; /* GP already done and CBs recorded. */ 668d6331980SPaul E. McKenney spin_lock_irqsave_rcu_node(snp, flags); 669da915ad5SPaul E. McKenney if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) { 670da915ad5SPaul E. McKenney snp_seq = snp->srcu_have_cbs[idx]; 671c7e88067SPaul E. McKenney if (snp == sdp->mynode && snp_seq == s) 672c7e88067SPaul E. McKenney snp->srcu_data_have_cbs[idx] |= sdp->grpmask; 673d6331980SPaul E. McKenney spin_unlock_irqrestore_rcu_node(snp, flags); 674da915ad5SPaul E. McKenney if (snp == sdp->mynode && snp_seq != s) { 6751e9a038bSPaul E. McKenney srcu_schedule_cbs_sdp(sdp, do_norm 6761e9a038bSPaul E. McKenney ? SRCU_INTERVAL 6771e9a038bSPaul E. McKenney : 0); 6781e9a038bSPaul E. McKenney return; 679da915ad5SPaul E. McKenney } 6801e9a038bSPaul E. McKenney if (!do_norm) 681*aacb5d91SPaul E. McKenney srcu_funnel_exp_start(ssp, snp, s); 682da915ad5SPaul E. McKenney return; 683da915ad5SPaul E. McKenney } 684da915ad5SPaul E. McKenney snp->srcu_have_cbs[idx] = s; 685c7e88067SPaul E. McKenney if (snp == sdp->mynode) 686c7e88067SPaul E. McKenney snp->srcu_data_have_cbs[idx] |= sdp->grpmask; 6871e9a038bSPaul E. McKenney if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s)) 6881e9a038bSPaul E. McKenney snp->srcu_gp_seq_needed_exp = s; 689d6331980SPaul E. McKenney spin_unlock_irqrestore_rcu_node(snp, flags); 690da915ad5SPaul E. McKenney } 691da915ad5SPaul E. McKenney 692da915ad5SPaul E. McKenney /* Top of tree, must ensure the grace period will be started. */ 693*aacb5d91SPaul E. McKenney spin_lock_irqsave_rcu_node(ssp, flags); 694*aacb5d91SPaul E. McKenney if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) { 695da915ad5SPaul E. McKenney /* 696da915ad5SPaul E. McKenney * Record need for grace period s. Pair with load 697da915ad5SPaul E. McKenney * acquire setting up for initialization. 698da915ad5SPaul E. McKenney */ 699*aacb5d91SPaul E. McKenney smp_store_release(&ssp->srcu_gp_seq_needed, s); /*^^^*/ 700da915ad5SPaul E. McKenney } 701*aacb5d91SPaul E. McKenney if (!do_norm && ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s)) 702*aacb5d91SPaul E. McKenney ssp->srcu_gp_seq_needed_exp = s; 703da915ad5SPaul E. McKenney 704da915ad5SPaul E. McKenney /* If grace period not already done and none in progress, start it. */ 705*aacb5d91SPaul E. McKenney if (!rcu_seq_done(&ssp->srcu_gp_seq, s) && 706*aacb5d91SPaul E. McKenney rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) { 707*aacb5d91SPaul E. McKenney WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)); 708*aacb5d91SPaul E. McKenney srcu_gp_start(ssp); 709e0fcba9aSPaul E. McKenney if (likely(srcu_init_done)) 710*aacb5d91SPaul E. McKenney queue_delayed_work(rcu_gp_wq, &ssp->work, 711*aacb5d91SPaul E. McKenney srcu_get_delay(ssp)); 712*aacb5d91SPaul E. McKenney else if (list_empty(&ssp->work.work.entry)) 713*aacb5d91SPaul E. McKenney list_add(&ssp->work.work.entry, &srcu_boot_list); 714da915ad5SPaul E. McKenney } 715*aacb5d91SPaul E. McKenney spin_unlock_irqrestore_rcu_node(ssp, flags); 716da915ad5SPaul E. McKenney } 717da915ad5SPaul E. McKenney 718da915ad5SPaul E. McKenney /* 719dad81a20SPaul E. McKenney * Wait until all readers counted by array index idx complete, but 720dad81a20SPaul E. McKenney * loop an additional time if there is an expedited grace period pending. 721da915ad5SPaul E. McKenney * The caller must ensure that ->srcu_idx is not changed while checking. 722dad81a20SPaul E. McKenney */ 723*aacb5d91SPaul E. McKenney static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount) 724dad81a20SPaul E. McKenney { 725dad81a20SPaul E. McKenney for (;;) { 726*aacb5d91SPaul E. McKenney if (srcu_readers_active_idx_check(ssp, idx)) 727dad81a20SPaul E. McKenney return true; 728*aacb5d91SPaul E. McKenney if (--trycount + !srcu_get_delay(ssp) <= 0) 729dad81a20SPaul E. McKenney return false; 730dad81a20SPaul E. McKenney udelay(SRCU_RETRY_CHECK_DELAY); 731dad81a20SPaul E. McKenney } 732dad81a20SPaul E. McKenney } 733dad81a20SPaul E. McKenney 734dad81a20SPaul E. McKenney /* 735da915ad5SPaul E. McKenney * Increment the ->srcu_idx counter so that future SRCU readers will 736da915ad5SPaul E. McKenney * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows 737dad81a20SPaul E. McKenney * us to wait for pre-existing readers in a starvation-free manner. 738dad81a20SPaul E. McKenney */ 739*aacb5d91SPaul E. McKenney static void srcu_flip(struct srcu_struct *ssp) 740dad81a20SPaul E. McKenney { 741881ec9d2SPaul E. McKenney /* 742881ec9d2SPaul E. McKenney * Ensure that if this updater saw a given reader's increment 743881ec9d2SPaul E. McKenney * from __srcu_read_lock(), that reader was using an old value 744881ec9d2SPaul E. McKenney * of ->srcu_idx. Also ensure that if a given reader sees the 745881ec9d2SPaul E. McKenney * new value of ->srcu_idx, this updater's earlier scans cannot 746881ec9d2SPaul E. McKenney * have seen that reader's increments (which is OK, because this 747881ec9d2SPaul E. McKenney * grace period need not wait on that reader). 748881ec9d2SPaul E. McKenney */ 749881ec9d2SPaul E. McKenney smp_mb(); /* E */ /* Pairs with B and C. */ 750881ec9d2SPaul E. McKenney 751*aacb5d91SPaul E. McKenney WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1); 752dad81a20SPaul E. McKenney 753dad81a20SPaul E. McKenney /* 754dad81a20SPaul E. McKenney * Ensure that if the updater misses an __srcu_read_unlock() 755dad81a20SPaul E. McKenney * increment, that task's next __srcu_read_lock() will see the 756dad81a20SPaul E. McKenney * above counter update. Note that both this memory barrier 757dad81a20SPaul E. McKenney * and the one in srcu_readers_active_idx_check() provide the 758dad81a20SPaul E. McKenney * guarantee for __srcu_read_lock(). 759dad81a20SPaul E. McKenney */ 760dad81a20SPaul E. McKenney smp_mb(); /* D */ /* Pairs with C. */ 761dad81a20SPaul E. McKenney } 762dad81a20SPaul E. McKenney 763dad81a20SPaul E. McKenney /* 7642da4b2a7SPaul E. McKenney * If SRCU is likely idle, return true, otherwise return false. 7652da4b2a7SPaul E. McKenney * 7662da4b2a7SPaul E. McKenney * Note that it is OK for several current from-idle requests for a new 7672da4b2a7SPaul E. McKenney * grace period from idle to specify expediting because they will all end 7682da4b2a7SPaul E. McKenney * up requesting the same grace period anyhow. So no loss. 7692da4b2a7SPaul E. McKenney * 7702da4b2a7SPaul E. McKenney * Note also that if any CPU (including the current one) is still invoking 7712da4b2a7SPaul E. McKenney * callbacks, this function will nevertheless say "idle". This is not 7722da4b2a7SPaul E. McKenney * ideal, but the overhead of checking all CPUs' callback lists is even 7732da4b2a7SPaul E. McKenney * less ideal, especially on large systems. Furthermore, the wakeup 7742da4b2a7SPaul E. McKenney * can happen before the callback is fully removed, so we have no choice 7752da4b2a7SPaul E. McKenney * but to accept this type of error. 7762da4b2a7SPaul E. McKenney * 7772da4b2a7SPaul E. McKenney * This function is also subject to counter-wrap errors, but let's face 7782da4b2a7SPaul E. McKenney * it, if this function was preempted for enough time for the counters 7792da4b2a7SPaul E. McKenney * to wrap, it really doesn't matter whether or not we expedite the grace 7802da4b2a7SPaul E. McKenney * period. The extra overhead of a needlessly expedited grace period is 7812da4b2a7SPaul E. McKenney * negligible when amoritized over that time period, and the extra latency 7822da4b2a7SPaul E. McKenney * of a needlessly non-expedited grace period is similarly negligible. 7832da4b2a7SPaul E. McKenney */ 784*aacb5d91SPaul E. McKenney static bool srcu_might_be_idle(struct srcu_struct *ssp) 7852da4b2a7SPaul E. McKenney { 78622607d66SPaul E. McKenney unsigned long curseq; 7872da4b2a7SPaul E. McKenney unsigned long flags; 7882da4b2a7SPaul E. McKenney struct srcu_data *sdp; 78922607d66SPaul E. McKenney unsigned long t; 7902da4b2a7SPaul E. McKenney 7912da4b2a7SPaul E. McKenney /* If the local srcu_data structure has callbacks, not idle. */ 7922da4b2a7SPaul E. McKenney local_irq_save(flags); 793*aacb5d91SPaul E. McKenney sdp = this_cpu_ptr(ssp->sda); 7942da4b2a7SPaul E. McKenney if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) { 7952da4b2a7SPaul E. McKenney local_irq_restore(flags); 7962da4b2a7SPaul E. McKenney return false; /* Callbacks already present, so not idle. */ 7972da4b2a7SPaul E. McKenney } 7982da4b2a7SPaul E. McKenney local_irq_restore(flags); 7992da4b2a7SPaul E. McKenney 8002da4b2a7SPaul E. McKenney /* 8012da4b2a7SPaul E. McKenney * No local callbacks, so probabalistically probe global state. 8022da4b2a7SPaul E. McKenney * Exact information would require acquiring locks, which would 8032da4b2a7SPaul E. McKenney * kill scalability, hence the probabalistic nature of the probe. 8042da4b2a7SPaul E. McKenney */ 80522607d66SPaul E. McKenney 80622607d66SPaul E. McKenney /* First, see if enough time has passed since the last GP. */ 80722607d66SPaul E. McKenney t = ktime_get_mono_fast_ns(); 80822607d66SPaul E. McKenney if (exp_holdoff == 0 || 809*aacb5d91SPaul E. McKenney time_in_range_open(t, ssp->srcu_last_gp_end, 810*aacb5d91SPaul E. McKenney ssp->srcu_last_gp_end + exp_holdoff)) 81122607d66SPaul E. McKenney return false; /* Too soon after last GP. */ 81222607d66SPaul E. McKenney 81322607d66SPaul E. McKenney /* Next, check for probable idleness. */ 814*aacb5d91SPaul E. McKenney curseq = rcu_seq_current(&ssp->srcu_gp_seq); 8152da4b2a7SPaul E. McKenney smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */ 816*aacb5d91SPaul E. McKenney if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_gp_seq_needed))) 8172da4b2a7SPaul E. McKenney return false; /* Grace period in progress, so not idle. */ 8182da4b2a7SPaul E. McKenney smp_mb(); /* Order ->srcu_gp_seq with prior access. */ 819*aacb5d91SPaul E. McKenney if (curseq != rcu_seq_current(&ssp->srcu_gp_seq)) 8202da4b2a7SPaul E. McKenney return false; /* GP # changed, so not idle. */ 8212da4b2a7SPaul E. McKenney return true; /* With reasonable probability, idle! */ 8222da4b2a7SPaul E. McKenney } 8232da4b2a7SPaul E. McKenney 8242da4b2a7SPaul E. McKenney /* 825a602538eSPaul E. McKenney * SRCU callback function to leak a callback. 826a602538eSPaul E. McKenney */ 827a602538eSPaul E. McKenney static void srcu_leak_callback(struct rcu_head *rhp) 828a602538eSPaul E. McKenney { 829a602538eSPaul E. McKenney } 830a602538eSPaul E. McKenney 831a602538eSPaul E. McKenney /* 832da915ad5SPaul E. McKenney * Enqueue an SRCU callback on the srcu_data structure associated with 833da915ad5SPaul E. McKenney * the current CPU and the specified srcu_struct structure, initiating 834da915ad5SPaul E. McKenney * grace-period processing if it is not already running. 835dad81a20SPaul E. McKenney * 836dad81a20SPaul E. McKenney * Note that all CPUs must agree that the grace period extended beyond 837dad81a20SPaul E. McKenney * all pre-existing SRCU read-side critical section. On systems with 838dad81a20SPaul E. McKenney * more than one CPU, this means that when "func()" is invoked, each CPU 839dad81a20SPaul E. McKenney * is guaranteed to have executed a full memory barrier since the end of 840dad81a20SPaul E. McKenney * its last corresponding SRCU read-side critical section whose beginning 8415ef98a63SPaul E. McKenney * preceded the call to call_srcu(). It also means that each CPU executing 842dad81a20SPaul E. McKenney * an SRCU read-side critical section that continues beyond the start of 8435ef98a63SPaul E. McKenney * "func()" must have executed a memory barrier after the call_srcu() 844dad81a20SPaul E. McKenney * but before the beginning of that SRCU read-side critical section. 845dad81a20SPaul E. McKenney * Note that these guarantees include CPUs that are offline, idle, or 846dad81a20SPaul E. McKenney * executing in user mode, as well as CPUs that are executing in the kernel. 847dad81a20SPaul E. McKenney * 8485ef98a63SPaul E. McKenney * Furthermore, if CPU A invoked call_srcu() and CPU B invoked the 849dad81a20SPaul E. McKenney * resulting SRCU callback function "func()", then both CPU A and CPU 850dad81a20SPaul E. McKenney * B are guaranteed to execute a full memory barrier during the time 8515ef98a63SPaul E. McKenney * interval between the call to call_srcu() and the invocation of "func()". 852dad81a20SPaul E. McKenney * This guarantee applies even if CPU A and CPU B are the same CPU (but 853dad81a20SPaul E. McKenney * again only if the system has more than one CPU). 854dad81a20SPaul E. McKenney * 855dad81a20SPaul E. McKenney * Of course, these guarantees apply only for invocations of call_srcu(), 856dad81a20SPaul E. McKenney * srcu_read_lock(), and srcu_read_unlock() that are all passed the same 857dad81a20SPaul E. McKenney * srcu_struct structure. 858dad81a20SPaul E. McKenney */ 859*aacb5d91SPaul E. McKenney void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, 8601e9a038bSPaul E. McKenney rcu_callback_t func, bool do_norm) 861dad81a20SPaul E. McKenney { 862dad81a20SPaul E. McKenney unsigned long flags; 8630607ba84SPaul E. McKenney int idx; 8641e9a038bSPaul E. McKenney bool needexp = false; 865da915ad5SPaul E. McKenney bool needgp = false; 866da915ad5SPaul E. McKenney unsigned long s; 867da915ad5SPaul E. McKenney struct srcu_data *sdp; 868dad81a20SPaul E. McKenney 869*aacb5d91SPaul E. McKenney check_init_srcu_struct(ssp); 870a602538eSPaul E. McKenney if (debug_rcu_head_queue(rhp)) { 871a602538eSPaul E. McKenney /* Probable double call_srcu(), so leak the callback. */ 872a602538eSPaul E. McKenney WRITE_ONCE(rhp->func, srcu_leak_callback); 873a602538eSPaul E. McKenney WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n"); 874a602538eSPaul E. McKenney return; 875a602538eSPaul E. McKenney } 876da915ad5SPaul E. McKenney rhp->func = func; 877*aacb5d91SPaul E. McKenney idx = srcu_read_lock(ssp); 878da915ad5SPaul E. McKenney local_irq_save(flags); 879*aacb5d91SPaul E. McKenney sdp = this_cpu_ptr(ssp->sda); 880d6331980SPaul E. McKenney spin_lock_rcu_node(sdp); 881da915ad5SPaul E. McKenney rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false); 882da915ad5SPaul E. McKenney rcu_segcblist_advance(&sdp->srcu_cblist, 883*aacb5d91SPaul E. McKenney rcu_seq_current(&ssp->srcu_gp_seq)); 884*aacb5d91SPaul E. McKenney s = rcu_seq_snap(&ssp->srcu_gp_seq); 885da915ad5SPaul E. McKenney (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s); 886da915ad5SPaul E. McKenney if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) { 887da915ad5SPaul E. McKenney sdp->srcu_gp_seq_needed = s; 888da915ad5SPaul E. McKenney needgp = true; 889dad81a20SPaul E. McKenney } 8901e9a038bSPaul E. McKenney if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) { 8911e9a038bSPaul E. McKenney sdp->srcu_gp_seq_needed_exp = s; 8921e9a038bSPaul E. McKenney needexp = true; 8931e9a038bSPaul E. McKenney } 894d6331980SPaul E. McKenney spin_unlock_irqrestore_rcu_node(sdp, flags); 895da915ad5SPaul E. McKenney if (needgp) 896*aacb5d91SPaul E. McKenney srcu_funnel_gp_start(ssp, sdp, s, do_norm); 8971e9a038bSPaul E. McKenney else if (needexp) 898*aacb5d91SPaul E. McKenney srcu_funnel_exp_start(ssp, sdp->mynode, s); 899*aacb5d91SPaul E. McKenney srcu_read_unlock(ssp, idx); 9001e9a038bSPaul E. McKenney } 9011e9a038bSPaul E. McKenney 9025a0465e1SPaul E. McKenney /** 9035a0465e1SPaul E. McKenney * call_srcu() - Queue a callback for invocation after an SRCU grace period 904*aacb5d91SPaul E. McKenney * @ssp: srcu_struct in queue the callback 90527fdb35fSPaul E. McKenney * @rhp: structure to be used for queueing the SRCU callback. 9065a0465e1SPaul E. McKenney * @func: function to be invoked after the SRCU grace period 9075a0465e1SPaul E. McKenney * 9085a0465e1SPaul E. McKenney * The callback function will be invoked some time after a full SRCU 9095a0465e1SPaul E. McKenney * grace period elapses, in other words after all pre-existing SRCU 9105a0465e1SPaul E. McKenney * read-side critical sections have completed. However, the callback 9115a0465e1SPaul E. McKenney * function might well execute concurrently with other SRCU read-side 9125a0465e1SPaul E. McKenney * critical sections that started after call_srcu() was invoked. SRCU 9135a0465e1SPaul E. McKenney * read-side critical sections are delimited by srcu_read_lock() and 9145a0465e1SPaul E. McKenney * srcu_read_unlock(), and may be nested. 9155a0465e1SPaul E. McKenney * 9165a0465e1SPaul E. McKenney * The callback will be invoked from process context, but must nevertheless 9175a0465e1SPaul E. McKenney * be fast and must not block. 9185a0465e1SPaul E. McKenney */ 919*aacb5d91SPaul E. McKenney void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, 9201e9a038bSPaul E. McKenney rcu_callback_t func) 9211e9a038bSPaul E. McKenney { 922*aacb5d91SPaul E. McKenney __call_srcu(ssp, rhp, func, true); 923dad81a20SPaul E. McKenney } 924dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(call_srcu); 925dad81a20SPaul E. McKenney 926dad81a20SPaul E. McKenney /* 927dad81a20SPaul E. McKenney * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). 928dad81a20SPaul E. McKenney */ 929*aacb5d91SPaul E. McKenney static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm) 930dad81a20SPaul E. McKenney { 931dad81a20SPaul E. McKenney struct rcu_synchronize rcu; 932dad81a20SPaul E. McKenney 933*aacb5d91SPaul E. McKenney RCU_LOCKDEP_WARN(lock_is_held(&ssp->dep_map) || 934dad81a20SPaul E. McKenney lock_is_held(&rcu_bh_lock_map) || 935dad81a20SPaul E. McKenney lock_is_held(&rcu_lock_map) || 936dad81a20SPaul E. McKenney lock_is_held(&rcu_sched_lock_map), 937dad81a20SPaul E. McKenney "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section"); 938dad81a20SPaul E. McKenney 939dad81a20SPaul E. McKenney if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) 940dad81a20SPaul E. McKenney return; 941dad81a20SPaul E. McKenney might_sleep(); 942*aacb5d91SPaul E. McKenney check_init_srcu_struct(ssp); 943dad81a20SPaul E. McKenney init_completion(&rcu.completion); 944da915ad5SPaul E. McKenney init_rcu_head_on_stack(&rcu.head); 945*aacb5d91SPaul E. McKenney __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm); 946dad81a20SPaul E. McKenney wait_for_completion(&rcu.completion); 947da915ad5SPaul E. McKenney destroy_rcu_head_on_stack(&rcu.head); 94835732cf9SPaul E. McKenney 94935732cf9SPaul E. McKenney /* 95035732cf9SPaul E. McKenney * Make sure that later code is ordered after the SRCU grace 951d6331980SPaul E. McKenney * period. This pairs with the spin_lock_irq_rcu_node() 95235732cf9SPaul E. McKenney * in srcu_invoke_callbacks(). Unlike Tree RCU, this is needed 95335732cf9SPaul E. McKenney * because the current CPU might have been totally uninvolved with 95435732cf9SPaul E. McKenney * (and thus unordered against) that grace period. 95535732cf9SPaul E. McKenney */ 95635732cf9SPaul E. McKenney smp_mb(); 957dad81a20SPaul E. McKenney } 958dad81a20SPaul E. McKenney 959dad81a20SPaul E. McKenney /** 960dad81a20SPaul E. McKenney * synchronize_srcu_expedited - Brute-force SRCU grace period 961*aacb5d91SPaul E. McKenney * @ssp: srcu_struct with which to synchronize. 962dad81a20SPaul E. McKenney * 963dad81a20SPaul E. McKenney * Wait for an SRCU grace period to elapse, but be more aggressive about 964dad81a20SPaul E. McKenney * spinning rather than blocking when waiting. 965dad81a20SPaul E. McKenney * 966dad81a20SPaul E. McKenney * Note that synchronize_srcu_expedited() has the same deadlock and 967dad81a20SPaul E. McKenney * memory-ordering properties as does synchronize_srcu(). 968dad81a20SPaul E. McKenney */ 969*aacb5d91SPaul E. McKenney void synchronize_srcu_expedited(struct srcu_struct *ssp) 970dad81a20SPaul E. McKenney { 971*aacb5d91SPaul E. McKenney __synchronize_srcu(ssp, rcu_gp_is_normal()); 972dad81a20SPaul E. McKenney } 973dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); 974dad81a20SPaul E. McKenney 975dad81a20SPaul E. McKenney /** 976dad81a20SPaul E. McKenney * synchronize_srcu - wait for prior SRCU read-side critical-section completion 977*aacb5d91SPaul E. McKenney * @ssp: srcu_struct with which to synchronize. 978dad81a20SPaul E. McKenney * 979dad81a20SPaul E. McKenney * Wait for the count to drain to zero of both indexes. To avoid the 980dad81a20SPaul E. McKenney * possible starvation of synchronize_srcu(), it waits for the count of 981da915ad5SPaul E. McKenney * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first, 982da915ad5SPaul E. McKenney * and then flip the srcu_idx and wait for the count of the other index. 983dad81a20SPaul E. McKenney * 984dad81a20SPaul E. McKenney * Can block; must be called from process context. 985dad81a20SPaul E. McKenney * 986dad81a20SPaul E. McKenney * Note that it is illegal to call synchronize_srcu() from the corresponding 987dad81a20SPaul E. McKenney * SRCU read-side critical section; doing so will result in deadlock. 988dad81a20SPaul E. McKenney * However, it is perfectly legal to call synchronize_srcu() on one 989dad81a20SPaul E. McKenney * srcu_struct from some other srcu_struct's read-side critical section, 990dad81a20SPaul E. McKenney * as long as the resulting graph of srcu_structs is acyclic. 991dad81a20SPaul E. McKenney * 992dad81a20SPaul E. McKenney * There are memory-ordering constraints implied by synchronize_srcu(). 993dad81a20SPaul E. McKenney * On systems with more than one CPU, when synchronize_srcu() returns, 994dad81a20SPaul E. McKenney * each CPU is guaranteed to have executed a full memory barrier since 9956eb95cc4SPaul E. McKenney * the end of its last corresponding SRCU read-side critical section 996dad81a20SPaul E. McKenney * whose beginning preceded the call to synchronize_srcu(). In addition, 997dad81a20SPaul E. McKenney * each CPU having an SRCU read-side critical section that extends beyond 998dad81a20SPaul E. McKenney * the return from synchronize_srcu() is guaranteed to have executed a 999dad81a20SPaul E. McKenney * full memory barrier after the beginning of synchronize_srcu() and before 1000dad81a20SPaul E. McKenney * the beginning of that SRCU read-side critical section. Note that these 1001dad81a20SPaul E. McKenney * guarantees include CPUs that are offline, idle, or executing in user mode, 1002dad81a20SPaul E. McKenney * as well as CPUs that are executing in the kernel. 1003dad81a20SPaul E. McKenney * 1004dad81a20SPaul E. McKenney * Furthermore, if CPU A invoked synchronize_srcu(), which returned 1005dad81a20SPaul E. McKenney * to its caller on CPU B, then both CPU A and CPU B are guaranteed 1006dad81a20SPaul E. McKenney * to have executed a full memory barrier during the execution of 1007dad81a20SPaul E. McKenney * synchronize_srcu(). This guarantee applies even if CPU A and CPU B 1008dad81a20SPaul E. McKenney * are the same CPU, but again only if the system has more than one CPU. 1009dad81a20SPaul E. McKenney * 1010dad81a20SPaul E. McKenney * Of course, these memory-ordering guarantees apply only when 1011dad81a20SPaul E. McKenney * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are 1012dad81a20SPaul E. McKenney * passed the same srcu_struct structure. 10132da4b2a7SPaul E. McKenney * 10142da4b2a7SPaul E. McKenney * If SRCU is likely idle, expedite the first request. This semantic 10152da4b2a7SPaul E. McKenney * was provided by Classic SRCU, and is relied upon by its users, so TREE 10162da4b2a7SPaul E. McKenney * SRCU must also provide it. Note that detecting idleness is heuristic 10172da4b2a7SPaul E. McKenney * and subject to both false positives and negatives. 1018dad81a20SPaul E. McKenney */ 1019*aacb5d91SPaul E. McKenney void synchronize_srcu(struct srcu_struct *ssp) 1020dad81a20SPaul E. McKenney { 1021*aacb5d91SPaul E. McKenney if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited()) 1022*aacb5d91SPaul E. McKenney synchronize_srcu_expedited(ssp); 1023dad81a20SPaul E. McKenney else 1024*aacb5d91SPaul E. McKenney __synchronize_srcu(ssp, true); 1025dad81a20SPaul E. McKenney } 1026dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(synchronize_srcu); 1027dad81a20SPaul E. McKenney 1028da915ad5SPaul E. McKenney /* 1029da915ad5SPaul E. McKenney * Callback function for srcu_barrier() use. 1030da915ad5SPaul E. McKenney */ 1031da915ad5SPaul E. McKenney static void srcu_barrier_cb(struct rcu_head *rhp) 1032da915ad5SPaul E. McKenney { 1033da915ad5SPaul E. McKenney struct srcu_data *sdp; 1034*aacb5d91SPaul E. McKenney struct srcu_struct *ssp; 1035da915ad5SPaul E. McKenney 1036da915ad5SPaul E. McKenney sdp = container_of(rhp, struct srcu_data, srcu_barrier_head); 1037*aacb5d91SPaul E. McKenney ssp = sdp->ssp; 1038*aacb5d91SPaul E. McKenney if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt)) 1039*aacb5d91SPaul E. McKenney complete(&ssp->srcu_barrier_completion); 1040da915ad5SPaul E. McKenney } 1041da915ad5SPaul E. McKenney 1042dad81a20SPaul E. McKenney /** 1043dad81a20SPaul E. McKenney * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete. 1044*aacb5d91SPaul E. McKenney * @ssp: srcu_struct on which to wait for in-flight callbacks. 1045dad81a20SPaul E. McKenney */ 1046*aacb5d91SPaul E. McKenney void srcu_barrier(struct srcu_struct *ssp) 1047dad81a20SPaul E. McKenney { 1048da915ad5SPaul E. McKenney int cpu; 1049da915ad5SPaul E. McKenney struct srcu_data *sdp; 1050*aacb5d91SPaul E. McKenney unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq); 1051da915ad5SPaul E. McKenney 1052*aacb5d91SPaul E. McKenney check_init_srcu_struct(ssp); 1053*aacb5d91SPaul E. McKenney mutex_lock(&ssp->srcu_barrier_mutex); 1054*aacb5d91SPaul E. McKenney if (rcu_seq_done(&ssp->srcu_barrier_seq, s)) { 1055da915ad5SPaul E. McKenney smp_mb(); /* Force ordering following return. */ 1056*aacb5d91SPaul E. McKenney mutex_unlock(&ssp->srcu_barrier_mutex); 1057da915ad5SPaul E. McKenney return; /* Someone else did our work for us. */ 1058da915ad5SPaul E. McKenney } 1059*aacb5d91SPaul E. McKenney rcu_seq_start(&ssp->srcu_barrier_seq); 1060*aacb5d91SPaul E. McKenney init_completion(&ssp->srcu_barrier_completion); 1061da915ad5SPaul E. McKenney 1062da915ad5SPaul E. McKenney /* Initial count prevents reaching zero until all CBs are posted. */ 1063*aacb5d91SPaul E. McKenney atomic_set(&ssp->srcu_barrier_cpu_cnt, 1); 1064da915ad5SPaul E. McKenney 1065da915ad5SPaul E. McKenney /* 1066da915ad5SPaul E. McKenney * Each pass through this loop enqueues a callback, but only 1067da915ad5SPaul E. McKenney * on CPUs already having callbacks enqueued. Note that if 1068da915ad5SPaul E. McKenney * a CPU already has callbacks enqueue, it must have already 1069da915ad5SPaul E. McKenney * registered the need for a future grace period, so all we 1070da915ad5SPaul E. McKenney * need do is enqueue a callback that will use the same 1071da915ad5SPaul E. McKenney * grace period as the last callback already in the queue. 1072da915ad5SPaul E. McKenney */ 1073da915ad5SPaul E. McKenney for_each_possible_cpu(cpu) { 1074*aacb5d91SPaul E. McKenney sdp = per_cpu_ptr(ssp->sda, cpu); 1075d6331980SPaul E. McKenney spin_lock_irq_rcu_node(sdp); 1076*aacb5d91SPaul E. McKenney atomic_inc(&ssp->srcu_barrier_cpu_cnt); 1077da915ad5SPaul E. McKenney sdp->srcu_barrier_head.func = srcu_barrier_cb; 1078a602538eSPaul E. McKenney debug_rcu_head_queue(&sdp->srcu_barrier_head); 1079da915ad5SPaul E. McKenney if (!rcu_segcblist_entrain(&sdp->srcu_cblist, 1080a602538eSPaul E. McKenney &sdp->srcu_barrier_head, 0)) { 1081a602538eSPaul E. McKenney debug_rcu_head_unqueue(&sdp->srcu_barrier_head); 1082*aacb5d91SPaul E. McKenney atomic_dec(&ssp->srcu_barrier_cpu_cnt); 1083a602538eSPaul E. McKenney } 1084d6331980SPaul E. McKenney spin_unlock_irq_rcu_node(sdp); 1085da915ad5SPaul E. McKenney } 1086da915ad5SPaul E. McKenney 1087da915ad5SPaul E. McKenney /* Remove the initial count, at which point reaching zero can happen. */ 1088*aacb5d91SPaul E. McKenney if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt)) 1089*aacb5d91SPaul E. McKenney complete(&ssp->srcu_barrier_completion); 1090*aacb5d91SPaul E. McKenney wait_for_completion(&ssp->srcu_barrier_completion); 1091da915ad5SPaul E. McKenney 1092*aacb5d91SPaul E. McKenney rcu_seq_end(&ssp->srcu_barrier_seq); 1093*aacb5d91SPaul E. McKenney mutex_unlock(&ssp->srcu_barrier_mutex); 1094dad81a20SPaul E. McKenney } 1095dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(srcu_barrier); 1096dad81a20SPaul E. McKenney 1097dad81a20SPaul E. McKenney /** 1098dad81a20SPaul E. McKenney * srcu_batches_completed - return batches completed. 1099*aacb5d91SPaul E. McKenney * @ssp: srcu_struct on which to report batch completion. 1100dad81a20SPaul E. McKenney * 1101dad81a20SPaul E. McKenney * Report the number of batches, correlated with, but not necessarily 1102dad81a20SPaul E. McKenney * precisely the same as, the number of grace periods that have elapsed. 1103dad81a20SPaul E. McKenney */ 1104*aacb5d91SPaul E. McKenney unsigned long srcu_batches_completed(struct srcu_struct *ssp) 1105dad81a20SPaul E. McKenney { 1106*aacb5d91SPaul E. McKenney return ssp->srcu_idx; 1107dad81a20SPaul E. McKenney } 1108dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(srcu_batches_completed); 1109dad81a20SPaul E. McKenney 1110dad81a20SPaul E. McKenney /* 1111da915ad5SPaul E. McKenney * Core SRCU state machine. Push state bits of ->srcu_gp_seq 1112da915ad5SPaul E. McKenney * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has 1113da915ad5SPaul E. McKenney * completed in that state. 1114dad81a20SPaul E. McKenney */ 1115*aacb5d91SPaul E. McKenney static void srcu_advance_state(struct srcu_struct *ssp) 1116dad81a20SPaul E. McKenney { 1117dad81a20SPaul E. McKenney int idx; 1118dad81a20SPaul E. McKenney 1119*aacb5d91SPaul E. McKenney mutex_lock(&ssp->srcu_gp_mutex); 1120da915ad5SPaul E. McKenney 1121dad81a20SPaul E. McKenney /* 1122dad81a20SPaul E. McKenney * Because readers might be delayed for an extended period after 1123da915ad5SPaul E. McKenney * fetching ->srcu_idx for their index, at any point in time there 1124dad81a20SPaul E. McKenney * might well be readers using both idx=0 and idx=1. We therefore 1125dad81a20SPaul E. McKenney * need to wait for readers to clear from both index values before 1126dad81a20SPaul E. McKenney * invoking a callback. 1127dad81a20SPaul E. McKenney * 1128dad81a20SPaul E. McKenney * The load-acquire ensures that we see the accesses performed 1129dad81a20SPaul E. McKenney * by the prior grace period. 1130dad81a20SPaul E. McKenney */ 1131*aacb5d91SPaul E. McKenney idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq)); /* ^^^ */ 1132dad81a20SPaul E. McKenney if (idx == SRCU_STATE_IDLE) { 1133*aacb5d91SPaul E. McKenney spin_lock_irq_rcu_node(ssp); 1134*aacb5d91SPaul E. McKenney if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) { 1135*aacb5d91SPaul E. McKenney WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq)); 1136*aacb5d91SPaul E. McKenney spin_unlock_irq_rcu_node(ssp); 1137*aacb5d91SPaul E. McKenney mutex_unlock(&ssp->srcu_gp_mutex); 1138dad81a20SPaul E. McKenney return; 1139dad81a20SPaul E. McKenney } 1140*aacb5d91SPaul E. McKenney idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)); 1141dad81a20SPaul E. McKenney if (idx == SRCU_STATE_IDLE) 1142*aacb5d91SPaul E. McKenney srcu_gp_start(ssp); 1143*aacb5d91SPaul E. McKenney spin_unlock_irq_rcu_node(ssp); 1144da915ad5SPaul E. McKenney if (idx != SRCU_STATE_IDLE) { 1145*aacb5d91SPaul E. McKenney mutex_unlock(&ssp->srcu_gp_mutex); 1146dad81a20SPaul E. McKenney return; /* Someone else started the grace period. */ 1147dad81a20SPaul E. McKenney } 1148da915ad5SPaul E. McKenney } 1149dad81a20SPaul E. McKenney 1150*aacb5d91SPaul E. McKenney if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) { 1151*aacb5d91SPaul E. McKenney idx = 1 ^ (ssp->srcu_idx & 1); 1152*aacb5d91SPaul E. McKenney if (!try_check_zero(ssp, idx, 1)) { 1153*aacb5d91SPaul E. McKenney mutex_unlock(&ssp->srcu_gp_mutex); 1154dad81a20SPaul E. McKenney return; /* readers present, retry later. */ 1155da915ad5SPaul E. McKenney } 1156*aacb5d91SPaul E. McKenney srcu_flip(ssp); 1157*aacb5d91SPaul E. McKenney rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2); 1158dad81a20SPaul E. McKenney } 1159dad81a20SPaul E. McKenney 1160*aacb5d91SPaul E. McKenney if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN2) { 1161dad81a20SPaul E. McKenney 1162dad81a20SPaul E. McKenney /* 1163dad81a20SPaul E. McKenney * SRCU read-side critical sections are normally short, 1164dad81a20SPaul E. McKenney * so check at least twice in quick succession after a flip. 1165dad81a20SPaul E. McKenney */ 1166*aacb5d91SPaul E. McKenney idx = 1 ^ (ssp->srcu_idx & 1); 1167*aacb5d91SPaul E. McKenney if (!try_check_zero(ssp, idx, 2)) { 1168*aacb5d91SPaul E. McKenney mutex_unlock(&ssp->srcu_gp_mutex); 1169da915ad5SPaul E. McKenney return; /* readers present, retry later. */ 1170da915ad5SPaul E. McKenney } 1171*aacb5d91SPaul E. McKenney srcu_gp_end(ssp); /* Releases ->srcu_gp_mutex. */ 1172dad81a20SPaul E. McKenney } 1173dad81a20SPaul E. McKenney } 1174dad81a20SPaul E. McKenney 1175dad81a20SPaul E. McKenney /* 1176dad81a20SPaul E. McKenney * Invoke a limited number of SRCU callbacks that have passed through 1177dad81a20SPaul E. McKenney * their grace period. If there are more to do, SRCU will reschedule 1178dad81a20SPaul E. McKenney * the workqueue. Note that needed memory barriers have been executed 1179dad81a20SPaul E. McKenney * in this task's context by srcu_readers_active_idx_check(). 1180dad81a20SPaul E. McKenney */ 1181da915ad5SPaul E. McKenney static void srcu_invoke_callbacks(struct work_struct *work) 1182dad81a20SPaul E. McKenney { 1183da915ad5SPaul E. McKenney bool more; 1184dad81a20SPaul E. McKenney struct rcu_cblist ready_cbs; 1185dad81a20SPaul E. McKenney struct rcu_head *rhp; 1186da915ad5SPaul E. McKenney struct srcu_data *sdp; 1187*aacb5d91SPaul E. McKenney struct srcu_struct *ssp; 1188dad81a20SPaul E. McKenney 1189da915ad5SPaul E. McKenney sdp = container_of(work, struct srcu_data, work.work); 1190*aacb5d91SPaul E. McKenney ssp = sdp->ssp; 1191dad81a20SPaul E. McKenney rcu_cblist_init(&ready_cbs); 1192d6331980SPaul E. McKenney spin_lock_irq_rcu_node(sdp); 1193da915ad5SPaul E. McKenney rcu_segcblist_advance(&sdp->srcu_cblist, 1194*aacb5d91SPaul E. McKenney rcu_seq_current(&ssp->srcu_gp_seq)); 1195da915ad5SPaul E. McKenney if (sdp->srcu_cblist_invoking || 1196da915ad5SPaul E. McKenney !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) { 1197d6331980SPaul E. McKenney spin_unlock_irq_rcu_node(sdp); 1198da915ad5SPaul E. McKenney return; /* Someone else on the job or nothing to do. */ 1199da915ad5SPaul E. McKenney } 1200da915ad5SPaul E. McKenney 1201da915ad5SPaul E. McKenney /* We are on the job! Extract and invoke ready callbacks. */ 1202da915ad5SPaul E. McKenney sdp->srcu_cblist_invoking = true; 1203da915ad5SPaul E. McKenney rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs); 1204d6331980SPaul E. McKenney spin_unlock_irq_rcu_node(sdp); 1205dad81a20SPaul E. McKenney rhp = rcu_cblist_dequeue(&ready_cbs); 1206dad81a20SPaul E. McKenney for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) { 1207a602538eSPaul E. McKenney debug_rcu_head_unqueue(rhp); 1208dad81a20SPaul E. McKenney local_bh_disable(); 1209dad81a20SPaul E. McKenney rhp->func(rhp); 1210dad81a20SPaul E. McKenney local_bh_enable(); 1211dad81a20SPaul E. McKenney } 1212da915ad5SPaul E. McKenney 1213da915ad5SPaul E. McKenney /* 1214da915ad5SPaul E. McKenney * Update counts, accelerate new callbacks, and if needed, 1215da915ad5SPaul E. McKenney * schedule another round of callback invocation. 1216da915ad5SPaul E. McKenney */ 1217d6331980SPaul E. McKenney spin_lock_irq_rcu_node(sdp); 1218da915ad5SPaul E. McKenney rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs); 1219da915ad5SPaul E. McKenney (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, 1220*aacb5d91SPaul E. McKenney rcu_seq_snap(&ssp->srcu_gp_seq)); 1221da915ad5SPaul E. McKenney sdp->srcu_cblist_invoking = false; 1222da915ad5SPaul E. McKenney more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist); 1223d6331980SPaul E. McKenney spin_unlock_irq_rcu_node(sdp); 1224da915ad5SPaul E. McKenney if (more) 1225da915ad5SPaul E. McKenney srcu_schedule_cbs_sdp(sdp, 0); 1226dad81a20SPaul E. McKenney } 1227dad81a20SPaul E. McKenney 1228dad81a20SPaul E. McKenney /* 1229dad81a20SPaul E. McKenney * Finished one round of SRCU grace period. Start another if there are 1230dad81a20SPaul E. McKenney * more SRCU callbacks queued, otherwise put SRCU into not-running state. 1231dad81a20SPaul E. McKenney */ 1232*aacb5d91SPaul E. McKenney static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay) 1233dad81a20SPaul E. McKenney { 1234da915ad5SPaul E. McKenney bool pushgp = true; 1235dad81a20SPaul E. McKenney 1236*aacb5d91SPaul E. McKenney spin_lock_irq_rcu_node(ssp); 1237*aacb5d91SPaul E. McKenney if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) { 1238*aacb5d91SPaul E. McKenney if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq))) { 1239da915ad5SPaul E. McKenney /* All requests fulfilled, time to go idle. */ 1240da915ad5SPaul E. McKenney pushgp = false; 1241dad81a20SPaul E. McKenney } 1242*aacb5d91SPaul E. McKenney } else if (!rcu_seq_state(ssp->srcu_gp_seq)) { 1243da915ad5SPaul E. McKenney /* Outstanding request and no GP. Start one. */ 1244*aacb5d91SPaul E. McKenney srcu_gp_start(ssp); 1245da915ad5SPaul E. McKenney } 1246*aacb5d91SPaul E. McKenney spin_unlock_irq_rcu_node(ssp); 1247dad81a20SPaul E. McKenney 1248da915ad5SPaul E. McKenney if (pushgp) 1249*aacb5d91SPaul E. McKenney queue_delayed_work(rcu_gp_wq, &ssp->work, delay); 1250dad81a20SPaul E. McKenney } 1251dad81a20SPaul E. McKenney 1252dad81a20SPaul E. McKenney /* 1253dad81a20SPaul E. McKenney * This is the work-queue function that handles SRCU grace periods. 1254dad81a20SPaul E. McKenney */ 12550d8a1e83SPaul E. McKenney static void process_srcu(struct work_struct *work) 1256dad81a20SPaul E. McKenney { 1257*aacb5d91SPaul E. McKenney struct srcu_struct *ssp; 1258dad81a20SPaul E. McKenney 1259*aacb5d91SPaul E. McKenney ssp = container_of(work, struct srcu_struct, work.work); 1260dad81a20SPaul E. McKenney 1261*aacb5d91SPaul E. McKenney srcu_advance_state(ssp); 1262*aacb5d91SPaul E. McKenney srcu_reschedule(ssp, srcu_get_delay(ssp)); 1263dad81a20SPaul E. McKenney } 12647f6733c3SPaul E. McKenney 12657f6733c3SPaul E. McKenney void srcutorture_get_gp_data(enum rcutorture_type test_type, 1266*aacb5d91SPaul E. McKenney struct srcu_struct *ssp, int *flags, 1267aebc8264SPaul E. McKenney unsigned long *gp_seq) 12687f6733c3SPaul E. McKenney { 12697f6733c3SPaul E. McKenney if (test_type != SRCU_FLAVOR) 12707f6733c3SPaul E. McKenney return; 12717f6733c3SPaul E. McKenney *flags = 0; 1272*aacb5d91SPaul E. McKenney *gp_seq = rcu_seq_current(&ssp->srcu_gp_seq); 12737f6733c3SPaul E. McKenney } 12747f6733c3SPaul E. McKenney EXPORT_SYMBOL_GPL(srcutorture_get_gp_data); 12751f4f6da1SPaul E. McKenney 1276*aacb5d91SPaul E. McKenney void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf) 1277115a1a52SPaul E. McKenney { 1278115a1a52SPaul E. McKenney int cpu; 1279115a1a52SPaul E. McKenney int idx; 1280ac3748c6SPaul E. McKenney unsigned long s0 = 0, s1 = 0; 1281115a1a52SPaul E. McKenney 1282*aacb5d91SPaul E. McKenney idx = ssp->srcu_idx & 0x1; 128352e17ba1SPaul E. McKenney pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):", 1284*aacb5d91SPaul E. McKenney tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), idx); 1285115a1a52SPaul E. McKenney for_each_possible_cpu(cpu) { 1286115a1a52SPaul E. McKenney unsigned long l0, l1; 1287115a1a52SPaul E. McKenney unsigned long u0, u1; 1288115a1a52SPaul E. McKenney long c0, c1; 12895ab07a8dSPaul E. McKenney struct srcu_data *sdp; 1290115a1a52SPaul E. McKenney 1291*aacb5d91SPaul E. McKenney sdp = per_cpu_ptr(ssp->sda, cpu); 12925ab07a8dSPaul E. McKenney u0 = sdp->srcu_unlock_count[!idx]; 12935ab07a8dSPaul E. McKenney u1 = sdp->srcu_unlock_count[idx]; 1294115a1a52SPaul E. McKenney 1295115a1a52SPaul E. McKenney /* 1296115a1a52SPaul E. McKenney * Make sure that a lock is always counted if the corresponding 1297115a1a52SPaul E. McKenney * unlock is counted. 1298115a1a52SPaul E. McKenney */ 1299115a1a52SPaul E. McKenney smp_rmb(); 1300115a1a52SPaul E. McKenney 13015ab07a8dSPaul E. McKenney l0 = sdp->srcu_lock_count[!idx]; 13025ab07a8dSPaul E. McKenney l1 = sdp->srcu_lock_count[idx]; 1303115a1a52SPaul E. McKenney 1304115a1a52SPaul E. McKenney c0 = l0 - u0; 1305115a1a52SPaul E. McKenney c1 = l1 - u1; 13065ab07a8dSPaul E. McKenney pr_cont(" %d(%ld,%ld %1p)", 13075ab07a8dSPaul E. McKenney cpu, c0, c1, rcu_segcblist_head(&sdp->srcu_cblist)); 1308ac3748c6SPaul E. McKenney s0 += c0; 1309ac3748c6SPaul E. McKenney s1 += c1; 1310115a1a52SPaul E. McKenney } 1311ac3748c6SPaul E. McKenney pr_cont(" T(%ld,%ld)\n", s0, s1); 1312115a1a52SPaul E. McKenney } 1313115a1a52SPaul E. McKenney EXPORT_SYMBOL_GPL(srcu_torture_stats_print); 1314115a1a52SPaul E. McKenney 13151f4f6da1SPaul E. McKenney static int __init srcu_bootup_announce(void) 13161f4f6da1SPaul E. McKenney { 13171f4f6da1SPaul E. McKenney pr_info("Hierarchical SRCU implementation.\n"); 13180c8e0e3cSPaul E. McKenney if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF) 13190c8e0e3cSPaul E. McKenney pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff); 13201f4f6da1SPaul E. McKenney return 0; 13211f4f6da1SPaul E. McKenney } 13221f4f6da1SPaul E. McKenney early_initcall(srcu_bootup_announce); 1323e0fcba9aSPaul E. McKenney 1324e0fcba9aSPaul E. McKenney void __init srcu_init(void) 1325e0fcba9aSPaul E. McKenney { 1326*aacb5d91SPaul E. McKenney struct srcu_struct *ssp; 1327e0fcba9aSPaul E. McKenney 1328e0fcba9aSPaul E. McKenney srcu_init_done = true; 1329e0fcba9aSPaul E. McKenney while (!list_empty(&srcu_boot_list)) { 1330*aacb5d91SPaul E. McKenney ssp = list_first_entry(&srcu_boot_list, struct srcu_struct, 13314e6ea4efSPaul E. McKenney work.work.entry); 1332*aacb5d91SPaul E. McKenney check_init_srcu_struct(ssp); 1333*aacb5d91SPaul E. McKenney list_del_init(&ssp->work.work.entry); 1334*aacb5d91SPaul E. McKenney queue_work(rcu_gp_wq, &ssp->work.work); 1335e0fcba9aSPaul E. McKenney } 1336e0fcba9aSPaul E. McKenney } 1337