1dad81a20SPaul E. McKenney /* 2dad81a20SPaul E. McKenney * Sleepable Read-Copy Update mechanism for mutual exclusion. 3dad81a20SPaul E. McKenney * 4dad81a20SPaul E. McKenney * This program is free software; you can redistribute it and/or modify 5dad81a20SPaul E. McKenney * it under the terms of the GNU General Public License as published by 6dad81a20SPaul E. McKenney * the Free Software Foundation; either version 2 of the License, or 7dad81a20SPaul E. McKenney * (at your option) any later version. 8dad81a20SPaul E. McKenney * 9dad81a20SPaul E. McKenney * This program is distributed in the hope that it will be useful, 10dad81a20SPaul E. McKenney * but WITHOUT ANY WARRANTY; without even the implied warranty of 11dad81a20SPaul E. McKenney * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12dad81a20SPaul E. McKenney * GNU General Public License for more details. 13dad81a20SPaul E. McKenney * 14dad81a20SPaul E. McKenney * You should have received a copy of the GNU General Public License 15dad81a20SPaul E. McKenney * along with this program; if not, you can access it online at 16dad81a20SPaul E. McKenney * http://www.gnu.org/licenses/gpl-2.0.html. 17dad81a20SPaul E. McKenney * 18dad81a20SPaul E. McKenney * Copyright (C) IBM Corporation, 2006 19dad81a20SPaul E. McKenney * Copyright (C) Fujitsu, 2012 20dad81a20SPaul E. McKenney * 21dad81a20SPaul E. McKenney * Author: Paul McKenney <paulmck@us.ibm.com> 22dad81a20SPaul E. McKenney * Lai Jiangshan <laijs@cn.fujitsu.com> 23dad81a20SPaul E. McKenney * 24dad81a20SPaul E. McKenney * For detailed explanation of Read-Copy Update mechanism see - 25dad81a20SPaul E. McKenney * Documentation/RCU/ *.txt 26dad81a20SPaul E. McKenney * 27dad81a20SPaul E. McKenney */ 28dad81a20SPaul E. McKenney 29dad81a20SPaul E. McKenney #include <linux/export.h> 30dad81a20SPaul E. McKenney #include <linux/mutex.h> 31dad81a20SPaul E. McKenney #include <linux/percpu.h> 32dad81a20SPaul E. McKenney #include <linux/preempt.h> 33dad81a20SPaul E. McKenney #include <linux/rcupdate_wait.h> 34dad81a20SPaul E. McKenney #include <linux/sched.h> 35dad81a20SPaul E. McKenney #include <linux/smp.h> 36dad81a20SPaul E. McKenney #include <linux/delay.h> 3722607d66SPaul E. McKenney #include <linux/module.h> 38dad81a20SPaul E. McKenney #include <linux/srcu.h> 39dad81a20SPaul E. McKenney 40dad81a20SPaul E. McKenney #include "rcu.h" 4145753c5fSIngo Molnar #include "rcu_segcblist.h" 42dad81a20SPaul E. McKenney 430c8e0e3cSPaul E. McKenney /* Holdoff in nanoseconds for auto-expediting. */ 440c8e0e3cSPaul E. McKenney #define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000) 450c8e0e3cSPaul E. McKenney static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF; 4622607d66SPaul E. McKenney module_param(exp_holdoff, ulong, 0444); 4722607d66SPaul E. McKenney 48c350c008SPaul E. McKenney /* Overflow-check frequency. N bits roughly says every 2**N grace periods. */ 49c350c008SPaul E. McKenney static ulong counter_wrap_check = (ULONG_MAX >> 2); 50c350c008SPaul E. McKenney module_param(counter_wrap_check, ulong, 0444); 51c350c008SPaul E. McKenney 52da915ad5SPaul E. McKenney static void srcu_invoke_callbacks(struct work_struct *work); 53da915ad5SPaul E. McKenney static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay); 540d8a1e83SPaul E. McKenney static void process_srcu(struct work_struct *work); 55da915ad5SPaul E. McKenney 56da915ad5SPaul E. McKenney /* 57da915ad5SPaul E. McKenney * Initialize SRCU combining tree. Note that statically allocated 58da915ad5SPaul E. McKenney * srcu_struct structures might already have srcu_read_lock() and 59da915ad5SPaul E. McKenney * srcu_read_unlock() running against them. So if the is_static parameter 60da915ad5SPaul E. McKenney * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[]. 61da915ad5SPaul E. McKenney */ 62da915ad5SPaul E. McKenney static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static) 63dad81a20SPaul E. McKenney { 64da915ad5SPaul E. McKenney int cpu; 65da915ad5SPaul E. McKenney int i; 66da915ad5SPaul E. McKenney int level = 0; 67da915ad5SPaul E. McKenney int levelspread[RCU_NUM_LVLS]; 68da915ad5SPaul E. McKenney struct srcu_data *sdp; 69da915ad5SPaul E. McKenney struct srcu_node *snp; 70da915ad5SPaul E. McKenney struct srcu_node *snp_first; 71da915ad5SPaul E. McKenney 72da915ad5SPaul E. McKenney /* Work out the overall tree geometry. */ 73da915ad5SPaul E. McKenney sp->level[0] = &sp->node[0]; 74da915ad5SPaul E. McKenney for (i = 1; i < rcu_num_lvls; i++) 75da915ad5SPaul E. McKenney sp->level[i] = sp->level[i - 1] + num_rcu_lvl[i - 1]; 76da915ad5SPaul E. McKenney rcu_init_levelspread(levelspread, num_rcu_lvl); 77da915ad5SPaul E. McKenney 78da915ad5SPaul E. McKenney /* Each pass through this loop initializes one srcu_node structure. */ 79da915ad5SPaul E. McKenney rcu_for_each_node_breadth_first(sp, snp) { 80a3883df3SPaul E. McKenney raw_spin_lock_init(&ACCESS_PRIVATE(snp, lock)); 81c7e88067SPaul E. McKenney WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) != 82c7e88067SPaul E. McKenney ARRAY_SIZE(snp->srcu_data_have_cbs)); 83c7e88067SPaul E. McKenney for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) { 84da915ad5SPaul E. McKenney snp->srcu_have_cbs[i] = 0; 85c7e88067SPaul E. McKenney snp->srcu_data_have_cbs[i] = 0; 86c7e88067SPaul E. McKenney } 871e9a038bSPaul E. McKenney snp->srcu_gp_seq_needed_exp = 0; 88da915ad5SPaul E. McKenney snp->grplo = -1; 89da915ad5SPaul E. McKenney snp->grphi = -1; 90da915ad5SPaul E. McKenney if (snp == &sp->node[0]) { 91da915ad5SPaul E. McKenney /* Root node, special case. */ 92da915ad5SPaul E. McKenney snp->srcu_parent = NULL; 93da915ad5SPaul E. McKenney continue; 94da915ad5SPaul E. McKenney } 95da915ad5SPaul E. McKenney 96da915ad5SPaul E. McKenney /* Non-root node. */ 97da915ad5SPaul E. McKenney if (snp == sp->level[level + 1]) 98da915ad5SPaul E. McKenney level++; 99da915ad5SPaul E. McKenney snp->srcu_parent = sp->level[level - 1] + 100da915ad5SPaul E. McKenney (snp - sp->level[level]) / 101da915ad5SPaul E. McKenney levelspread[level - 1]; 102da915ad5SPaul E. McKenney } 103da915ad5SPaul E. McKenney 104da915ad5SPaul E. McKenney /* 105da915ad5SPaul E. McKenney * Initialize the per-CPU srcu_data array, which feeds into the 106da915ad5SPaul E. McKenney * leaves of the srcu_node tree. 107da915ad5SPaul E. McKenney */ 108da915ad5SPaul E. McKenney WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) != 109da915ad5SPaul E. McKenney ARRAY_SIZE(sdp->srcu_unlock_count)); 110da915ad5SPaul E. McKenney level = rcu_num_lvls - 1; 111da915ad5SPaul E. McKenney snp_first = sp->level[level]; 112da915ad5SPaul E. McKenney for_each_possible_cpu(cpu) { 113da915ad5SPaul E. McKenney sdp = per_cpu_ptr(sp->sda, cpu); 114a3883df3SPaul E. McKenney raw_spin_lock_init(&ACCESS_PRIVATE(sdp, lock)); 115da915ad5SPaul E. McKenney rcu_segcblist_init(&sdp->srcu_cblist); 116da915ad5SPaul E. McKenney sdp->srcu_cblist_invoking = false; 117da915ad5SPaul E. McKenney sdp->srcu_gp_seq_needed = sp->srcu_gp_seq; 1181e9a038bSPaul E. McKenney sdp->srcu_gp_seq_needed_exp = sp->srcu_gp_seq; 119da915ad5SPaul E. McKenney sdp->mynode = &snp_first[cpu / levelspread[level]]; 120da915ad5SPaul E. McKenney for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) { 121da915ad5SPaul E. McKenney if (snp->grplo < 0) 122da915ad5SPaul E. McKenney snp->grplo = cpu; 123da915ad5SPaul E. McKenney snp->grphi = cpu; 124da915ad5SPaul E. McKenney } 125da915ad5SPaul E. McKenney sdp->cpu = cpu; 126da915ad5SPaul E. McKenney INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks); 127da915ad5SPaul E. McKenney sdp->sp = sp; 128c7e88067SPaul E. McKenney sdp->grpmask = 1 << (cpu - sdp->mynode->grplo); 129da915ad5SPaul E. McKenney if (is_static) 130da915ad5SPaul E. McKenney continue; 131da915ad5SPaul E. McKenney 132da915ad5SPaul E. McKenney /* Dynamically allocated, better be no srcu_read_locks()! */ 133da915ad5SPaul E. McKenney for (i = 0; i < ARRAY_SIZE(sdp->srcu_lock_count); i++) { 134da915ad5SPaul E. McKenney sdp->srcu_lock_count[i] = 0; 135da915ad5SPaul E. McKenney sdp->srcu_unlock_count[i] = 0; 136da915ad5SPaul E. McKenney } 137da915ad5SPaul E. McKenney } 138da915ad5SPaul E. McKenney } 139da915ad5SPaul E. McKenney 140da915ad5SPaul E. McKenney /* 141da915ad5SPaul E. McKenney * Initialize non-compile-time initialized fields, including the 142da915ad5SPaul E. McKenney * associated srcu_node and srcu_data structures. The is_static 143da915ad5SPaul E. McKenney * parameter is passed through to init_srcu_struct_nodes(), and 144da915ad5SPaul E. McKenney * also tells us that ->sda has already been wired up to srcu_data. 145da915ad5SPaul E. McKenney */ 146da915ad5SPaul E. McKenney static int init_srcu_struct_fields(struct srcu_struct *sp, bool is_static) 147da915ad5SPaul E. McKenney { 148da915ad5SPaul E. McKenney mutex_init(&sp->srcu_cb_mutex); 149da915ad5SPaul E. McKenney mutex_init(&sp->srcu_gp_mutex); 150da915ad5SPaul E. McKenney sp->srcu_idx = 0; 151dad81a20SPaul E. McKenney sp->srcu_gp_seq = 0; 152da915ad5SPaul E. McKenney sp->srcu_barrier_seq = 0; 153da915ad5SPaul E. McKenney mutex_init(&sp->srcu_barrier_mutex); 154da915ad5SPaul E. McKenney atomic_set(&sp->srcu_barrier_cpu_cnt, 0); 155dad81a20SPaul E. McKenney INIT_DELAYED_WORK(&sp->work, process_srcu); 156da915ad5SPaul E. McKenney if (!is_static) 157da915ad5SPaul E. McKenney sp->sda = alloc_percpu(struct srcu_data); 158da915ad5SPaul E. McKenney init_srcu_struct_nodes(sp, is_static); 1591e9a038bSPaul E. McKenney sp->srcu_gp_seq_needed_exp = 0; 16022607d66SPaul E. McKenney sp->srcu_last_gp_end = ktime_get_mono_fast_ns(); 161da915ad5SPaul E. McKenney smp_store_release(&sp->srcu_gp_seq_needed, 0); /* Init done. */ 162da915ad5SPaul E. McKenney return sp->sda ? 0 : -ENOMEM; 163dad81a20SPaul E. McKenney } 164dad81a20SPaul E. McKenney 165dad81a20SPaul E. McKenney #ifdef CONFIG_DEBUG_LOCK_ALLOC 166dad81a20SPaul E. McKenney 167dad81a20SPaul E. McKenney int __init_srcu_struct(struct srcu_struct *sp, const char *name, 168dad81a20SPaul E. McKenney struct lock_class_key *key) 169dad81a20SPaul E. McKenney { 170dad81a20SPaul E. McKenney /* Don't re-initialize a lock while it is held. */ 171dad81a20SPaul E. McKenney debug_check_no_locks_freed((void *)sp, sizeof(*sp)); 172dad81a20SPaul E. McKenney lockdep_init_map(&sp->dep_map, name, key, 0); 173a3883df3SPaul E. McKenney raw_spin_lock_init(&ACCESS_PRIVATE(sp, lock)); 174da915ad5SPaul E. McKenney return init_srcu_struct_fields(sp, false); 175dad81a20SPaul E. McKenney } 176dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(__init_srcu_struct); 177dad81a20SPaul E. McKenney 178dad81a20SPaul E. McKenney #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 179dad81a20SPaul E. McKenney 180dad81a20SPaul E. McKenney /** 181dad81a20SPaul E. McKenney * init_srcu_struct - initialize a sleep-RCU structure 182dad81a20SPaul E. McKenney * @sp: structure to initialize. 183dad81a20SPaul E. McKenney * 184dad81a20SPaul E. McKenney * Must invoke this on a given srcu_struct before passing that srcu_struct 185dad81a20SPaul E. McKenney * to any other function. Each srcu_struct represents a separate domain 186dad81a20SPaul E. McKenney * of SRCU protection. 187dad81a20SPaul E. McKenney */ 188dad81a20SPaul E. McKenney int init_srcu_struct(struct srcu_struct *sp) 189dad81a20SPaul E. McKenney { 190a3883df3SPaul E. McKenney raw_spin_lock_init(&ACCESS_PRIVATE(sp, lock)); 191da915ad5SPaul E. McKenney return init_srcu_struct_fields(sp, false); 192dad81a20SPaul E. McKenney } 193dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(init_srcu_struct); 194dad81a20SPaul E. McKenney 195dad81a20SPaul E. McKenney #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 196dad81a20SPaul E. McKenney 197dad81a20SPaul E. McKenney /* 198da915ad5SPaul E. McKenney * First-use initialization of statically allocated srcu_struct 199da915ad5SPaul E. McKenney * structure. Wiring up the combining tree is more than can be 200da915ad5SPaul E. McKenney * done with compile-time initialization, so this check is added 201a3883df3SPaul E. McKenney * to each update-side SRCU primitive. Use sp->lock, which -is- 202da915ad5SPaul E. McKenney * compile-time initialized, to resolve races involving multiple 203da915ad5SPaul E. McKenney * CPUs trying to garner first-use privileges. 204da915ad5SPaul E. McKenney */ 205da915ad5SPaul E. McKenney static void check_init_srcu_struct(struct srcu_struct *sp) 206da915ad5SPaul E. McKenney { 207da915ad5SPaul E. McKenney unsigned long flags; 208da915ad5SPaul E. McKenney 209da915ad5SPaul E. McKenney WARN_ON_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INIT); 210da915ad5SPaul E. McKenney /* The smp_load_acquire() pairs with the smp_store_release(). */ 211da915ad5SPaul E. McKenney if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed))) /*^^^*/ 212da915ad5SPaul E. McKenney return; /* Already initialized. */ 213a3883df3SPaul E. McKenney raw_spin_lock_irqsave_rcu_node(sp, flags); 214da915ad5SPaul E. McKenney if (!rcu_seq_state(sp->srcu_gp_seq_needed)) { 215a3883df3SPaul E. McKenney raw_spin_unlock_irqrestore_rcu_node(sp, flags); 216da915ad5SPaul E. McKenney return; 217da915ad5SPaul E. McKenney } 218da915ad5SPaul E. McKenney init_srcu_struct_fields(sp, true); 219a3883df3SPaul E. McKenney raw_spin_unlock_irqrestore_rcu_node(sp, flags); 220da915ad5SPaul E. McKenney } 221da915ad5SPaul E. McKenney 222da915ad5SPaul E. McKenney /* 223da915ad5SPaul E. McKenney * Returns approximate total of the readers' ->srcu_lock_count[] values 224da915ad5SPaul E. McKenney * for the rank of per-CPU counters specified by idx. 225dad81a20SPaul E. McKenney */ 226dad81a20SPaul E. McKenney static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx) 227dad81a20SPaul E. McKenney { 228dad81a20SPaul E. McKenney int cpu; 229dad81a20SPaul E. McKenney unsigned long sum = 0; 230dad81a20SPaul E. McKenney 231dad81a20SPaul E. McKenney for_each_possible_cpu(cpu) { 232da915ad5SPaul E. McKenney struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu); 233dad81a20SPaul E. McKenney 234da915ad5SPaul E. McKenney sum += READ_ONCE(cpuc->srcu_lock_count[idx]); 235dad81a20SPaul E. McKenney } 236dad81a20SPaul E. McKenney return sum; 237dad81a20SPaul E. McKenney } 238dad81a20SPaul E. McKenney 239dad81a20SPaul E. McKenney /* 240da915ad5SPaul E. McKenney * Returns approximate total of the readers' ->srcu_unlock_count[] values 241da915ad5SPaul E. McKenney * for the rank of per-CPU counters specified by idx. 242dad81a20SPaul E. McKenney */ 243dad81a20SPaul E. McKenney static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx) 244dad81a20SPaul E. McKenney { 245dad81a20SPaul E. McKenney int cpu; 246dad81a20SPaul E. McKenney unsigned long sum = 0; 247dad81a20SPaul E. McKenney 248dad81a20SPaul E. McKenney for_each_possible_cpu(cpu) { 249da915ad5SPaul E. McKenney struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu); 250dad81a20SPaul E. McKenney 251da915ad5SPaul E. McKenney sum += READ_ONCE(cpuc->srcu_unlock_count[idx]); 252dad81a20SPaul E. McKenney } 253dad81a20SPaul E. McKenney return sum; 254dad81a20SPaul E. McKenney } 255dad81a20SPaul E. McKenney 256dad81a20SPaul E. McKenney /* 257dad81a20SPaul E. McKenney * Return true if the number of pre-existing readers is determined to 258dad81a20SPaul E. McKenney * be zero. 259dad81a20SPaul E. McKenney */ 260dad81a20SPaul E. McKenney static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx) 261dad81a20SPaul E. McKenney { 262dad81a20SPaul E. McKenney unsigned long unlocks; 263dad81a20SPaul E. McKenney 264dad81a20SPaul E. McKenney unlocks = srcu_readers_unlock_idx(sp, idx); 265dad81a20SPaul E. McKenney 266dad81a20SPaul E. McKenney /* 267dad81a20SPaul E. McKenney * Make sure that a lock is always counted if the corresponding 268dad81a20SPaul E. McKenney * unlock is counted. Needs to be a smp_mb() as the read side may 269dad81a20SPaul E. McKenney * contain a read from a variable that is written to before the 270dad81a20SPaul E. McKenney * synchronize_srcu() in the write side. In this case smp_mb()s 271dad81a20SPaul E. McKenney * A and B act like the store buffering pattern. 272dad81a20SPaul E. McKenney * 273dad81a20SPaul E. McKenney * This smp_mb() also pairs with smp_mb() C to prevent accesses 274dad81a20SPaul E. McKenney * after the synchronize_srcu() from being executed before the 275dad81a20SPaul E. McKenney * grace period ends. 276dad81a20SPaul E. McKenney */ 277dad81a20SPaul E. McKenney smp_mb(); /* A */ 278dad81a20SPaul E. McKenney 279dad81a20SPaul E. McKenney /* 280dad81a20SPaul E. McKenney * If the locks are the same as the unlocks, then there must have 281dad81a20SPaul E. McKenney * been no readers on this index at some time in between. This does 282dad81a20SPaul E. McKenney * not mean that there are no more readers, as one could have read 283dad81a20SPaul E. McKenney * the current index but not have incremented the lock counter yet. 284dad81a20SPaul E. McKenney * 285881ec9d2SPaul E. McKenney * So suppose that the updater is preempted here for so long 286881ec9d2SPaul E. McKenney * that more than ULONG_MAX non-nested readers come and go in 287881ec9d2SPaul E. McKenney * the meantime. It turns out that this cannot result in overflow 288881ec9d2SPaul E. McKenney * because if a reader modifies its unlock count after we read it 289881ec9d2SPaul E. McKenney * above, then that reader's next load of ->srcu_idx is guaranteed 290881ec9d2SPaul E. McKenney * to get the new value, which will cause it to operate on the 291881ec9d2SPaul E. McKenney * other bank of counters, where it cannot contribute to the 292881ec9d2SPaul E. McKenney * overflow of these counters. This means that there is a maximum 293881ec9d2SPaul E. McKenney * of 2*NR_CPUS increments, which cannot overflow given current 294881ec9d2SPaul E. McKenney * systems, especially not on 64-bit systems. 295881ec9d2SPaul E. McKenney * 296881ec9d2SPaul E. McKenney * OK, how about nesting? This does impose a limit on nesting 297881ec9d2SPaul E. McKenney * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient, 298881ec9d2SPaul E. McKenney * especially on 64-bit systems. 299dad81a20SPaul E. McKenney */ 300dad81a20SPaul E. McKenney return srcu_readers_lock_idx(sp, idx) == unlocks; 301dad81a20SPaul E. McKenney } 302dad81a20SPaul E. McKenney 303dad81a20SPaul E. McKenney /** 304dad81a20SPaul E. McKenney * srcu_readers_active - returns true if there are readers. and false 305dad81a20SPaul E. McKenney * otherwise 306dad81a20SPaul E. McKenney * @sp: which srcu_struct to count active readers (holding srcu_read_lock). 307dad81a20SPaul E. McKenney * 308dad81a20SPaul E. McKenney * Note that this is not an atomic primitive, and can therefore suffer 309dad81a20SPaul E. McKenney * severe errors when invoked on an active srcu_struct. That said, it 310dad81a20SPaul E. McKenney * can be useful as an error check at cleanup time. 311dad81a20SPaul E. McKenney */ 312dad81a20SPaul E. McKenney static bool srcu_readers_active(struct srcu_struct *sp) 313dad81a20SPaul E. McKenney { 314dad81a20SPaul E. McKenney int cpu; 315dad81a20SPaul E. McKenney unsigned long sum = 0; 316dad81a20SPaul E. McKenney 317dad81a20SPaul E. McKenney for_each_possible_cpu(cpu) { 318da915ad5SPaul E. McKenney struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu); 319dad81a20SPaul E. McKenney 320da915ad5SPaul E. McKenney sum += READ_ONCE(cpuc->srcu_lock_count[0]); 321da915ad5SPaul E. McKenney sum += READ_ONCE(cpuc->srcu_lock_count[1]); 322da915ad5SPaul E. McKenney sum -= READ_ONCE(cpuc->srcu_unlock_count[0]); 323da915ad5SPaul E. McKenney sum -= READ_ONCE(cpuc->srcu_unlock_count[1]); 324dad81a20SPaul E. McKenney } 325dad81a20SPaul E. McKenney return sum; 326dad81a20SPaul E. McKenney } 327dad81a20SPaul E. McKenney 328dad81a20SPaul E. McKenney #define SRCU_INTERVAL 1 329dad81a20SPaul E. McKenney 3301e9a038bSPaul E. McKenney /* 3311e9a038bSPaul E. McKenney * Return grace-period delay, zero if there are expedited grace 3321e9a038bSPaul E. McKenney * periods pending, SRCU_INTERVAL otherwise. 3331e9a038bSPaul E. McKenney */ 3341e9a038bSPaul E. McKenney static unsigned long srcu_get_delay(struct srcu_struct *sp) 3351e9a038bSPaul E. McKenney { 3361e9a038bSPaul E. McKenney if (ULONG_CMP_LT(READ_ONCE(sp->srcu_gp_seq), 3371e9a038bSPaul E. McKenney READ_ONCE(sp->srcu_gp_seq_needed_exp))) 3381e9a038bSPaul E. McKenney return 0; 3391e9a038bSPaul E. McKenney return SRCU_INTERVAL; 3401e9a038bSPaul E. McKenney } 3411e9a038bSPaul E. McKenney 342dad81a20SPaul E. McKenney /** 343dad81a20SPaul E. McKenney * cleanup_srcu_struct - deconstruct a sleep-RCU structure 344dad81a20SPaul E. McKenney * @sp: structure to clean up. 345dad81a20SPaul E. McKenney * 346dad81a20SPaul E. McKenney * Must invoke this after you are finished using a given srcu_struct that 347dad81a20SPaul E. McKenney * was initialized via init_srcu_struct(), else you leak memory. 348dad81a20SPaul E. McKenney */ 349dad81a20SPaul E. McKenney void cleanup_srcu_struct(struct srcu_struct *sp) 350dad81a20SPaul E. McKenney { 351da915ad5SPaul E. McKenney int cpu; 352da915ad5SPaul E. McKenney 3531e9a038bSPaul E. McKenney if (WARN_ON(!srcu_get_delay(sp))) 3541e9a038bSPaul E. McKenney return; /* Leakage unless caller handles error. */ 355dad81a20SPaul E. McKenney if (WARN_ON(srcu_readers_active(sp))) 356dad81a20SPaul E. McKenney return; /* Leakage unless caller handles error. */ 357dad81a20SPaul E. McKenney flush_delayed_work(&sp->work); 358da915ad5SPaul E. McKenney for_each_possible_cpu(cpu) 359da915ad5SPaul E. McKenney flush_delayed_work(&per_cpu_ptr(sp->sda, cpu)->work); 360da915ad5SPaul E. McKenney if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) || 361da915ad5SPaul E. McKenney WARN_ON(srcu_readers_active(sp))) { 362da915ad5SPaul E. McKenney pr_info("cleanup_srcu_struct: Active srcu_struct %p state: %d\n", sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq))); 363dad81a20SPaul E. McKenney return; /* Caller forgot to stop doing call_srcu()? */ 364dad81a20SPaul E. McKenney } 365da915ad5SPaul E. McKenney free_percpu(sp->sda); 366da915ad5SPaul E. McKenney sp->sda = NULL; 367dad81a20SPaul E. McKenney } 368dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(cleanup_srcu_struct); 369dad81a20SPaul E. McKenney 370dad81a20SPaul E. McKenney /* 371dad81a20SPaul E. McKenney * Counts the new reader in the appropriate per-CPU element of the 372cdf7abc4SPaolo Bonzini * srcu_struct. 373dad81a20SPaul E. McKenney * Returns an index that must be passed to the matching srcu_read_unlock(). 374dad81a20SPaul E. McKenney */ 375dad81a20SPaul E. McKenney int __srcu_read_lock(struct srcu_struct *sp) 376dad81a20SPaul E. McKenney { 377dad81a20SPaul E. McKenney int idx; 378dad81a20SPaul E. McKenney 379da915ad5SPaul E. McKenney idx = READ_ONCE(sp->srcu_idx) & 0x1; 380cdf7abc4SPaolo Bonzini this_cpu_inc(sp->sda->srcu_lock_count[idx]); 381dad81a20SPaul E. McKenney smp_mb(); /* B */ /* Avoid leaking the critical section. */ 382dad81a20SPaul E. McKenney return idx; 383dad81a20SPaul E. McKenney } 384dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(__srcu_read_lock); 385dad81a20SPaul E. McKenney 386dad81a20SPaul E. McKenney /* 387dad81a20SPaul E. McKenney * Removes the count for the old reader from the appropriate per-CPU 388dad81a20SPaul E. McKenney * element of the srcu_struct. Note that this may well be a different 389dad81a20SPaul E. McKenney * CPU than that which was incremented by the corresponding srcu_read_lock(). 390dad81a20SPaul E. McKenney */ 391dad81a20SPaul E. McKenney void __srcu_read_unlock(struct srcu_struct *sp, int idx) 392dad81a20SPaul E. McKenney { 393dad81a20SPaul E. McKenney smp_mb(); /* C */ /* Avoid leaking the critical section. */ 394da915ad5SPaul E. McKenney this_cpu_inc(sp->sda->srcu_unlock_count[idx]); 395dad81a20SPaul E. McKenney } 396dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(__srcu_read_unlock); 397dad81a20SPaul E. McKenney 398dad81a20SPaul E. McKenney /* 399dad81a20SPaul E. McKenney * We use an adaptive strategy for synchronize_srcu() and especially for 400dad81a20SPaul E. McKenney * synchronize_srcu_expedited(). We spin for a fixed time period 401dad81a20SPaul E. McKenney * (defined below) to allow SRCU readers to exit their read-side critical 402dad81a20SPaul E. McKenney * sections. If there are still some readers after a few microseconds, 403dad81a20SPaul E. McKenney * we repeatedly block for 1-millisecond time periods. 404dad81a20SPaul E. McKenney */ 405dad81a20SPaul E. McKenney #define SRCU_RETRY_CHECK_DELAY 5 406dad81a20SPaul E. McKenney 407dad81a20SPaul E. McKenney /* 408dad81a20SPaul E. McKenney * Start an SRCU grace period. 409dad81a20SPaul E. McKenney */ 410dad81a20SPaul E. McKenney static void srcu_gp_start(struct srcu_struct *sp) 411dad81a20SPaul E. McKenney { 412da915ad5SPaul E. McKenney struct srcu_data *sdp = this_cpu_ptr(sp->sda); 413dad81a20SPaul E. McKenney int state; 414dad81a20SPaul E. McKenney 415a3883df3SPaul E. McKenney lockdep_assert_held(&sp->lock); 416da915ad5SPaul E. McKenney WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)); 417da915ad5SPaul E. McKenney rcu_segcblist_advance(&sdp->srcu_cblist, 418da915ad5SPaul E. McKenney rcu_seq_current(&sp->srcu_gp_seq)); 419da915ad5SPaul E. McKenney (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, 420dad81a20SPaul E. McKenney rcu_seq_snap(&sp->srcu_gp_seq)); 4212da4b2a7SPaul E. McKenney smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */ 422dad81a20SPaul E. McKenney rcu_seq_start(&sp->srcu_gp_seq); 423dad81a20SPaul E. McKenney state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)); 424dad81a20SPaul E. McKenney WARN_ON_ONCE(state != SRCU_STATE_SCAN1); 425dad81a20SPaul E. McKenney } 426dad81a20SPaul E. McKenney 427dad81a20SPaul E. McKenney /* 428da915ad5SPaul E. McKenney * Track online CPUs to guide callback workqueue placement. 429da915ad5SPaul E. McKenney */ 430da915ad5SPaul E. McKenney DEFINE_PER_CPU(bool, srcu_online); 431da915ad5SPaul E. McKenney 432da915ad5SPaul E. McKenney void srcu_online_cpu(unsigned int cpu) 433da915ad5SPaul E. McKenney { 434da915ad5SPaul E. McKenney WRITE_ONCE(per_cpu(srcu_online, cpu), true); 435da915ad5SPaul E. McKenney } 436da915ad5SPaul E. McKenney 437da915ad5SPaul E. McKenney void srcu_offline_cpu(unsigned int cpu) 438da915ad5SPaul E. McKenney { 439da915ad5SPaul E. McKenney WRITE_ONCE(per_cpu(srcu_online, cpu), false); 440da915ad5SPaul E. McKenney } 441da915ad5SPaul E. McKenney 442da915ad5SPaul E. McKenney /* 443da915ad5SPaul E. McKenney * Place the workqueue handler on the specified CPU if online, otherwise 444da915ad5SPaul E. McKenney * just run it whereever. This is useful for placing workqueue handlers 445da915ad5SPaul E. McKenney * that are to invoke the specified CPU's callbacks. 446da915ad5SPaul E. McKenney */ 447da915ad5SPaul E. McKenney static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 448da915ad5SPaul E. McKenney struct delayed_work *dwork, 449da915ad5SPaul E. McKenney unsigned long delay) 450da915ad5SPaul E. McKenney { 451da915ad5SPaul E. McKenney bool ret; 452da915ad5SPaul E. McKenney 453da915ad5SPaul E. McKenney preempt_disable(); 454da915ad5SPaul E. McKenney if (READ_ONCE(per_cpu(srcu_online, cpu))) 455da915ad5SPaul E. McKenney ret = queue_delayed_work_on(cpu, wq, dwork, delay); 456da915ad5SPaul E. McKenney else 457da915ad5SPaul E. McKenney ret = queue_delayed_work(wq, dwork, delay); 458da915ad5SPaul E. McKenney preempt_enable(); 459da915ad5SPaul E. McKenney return ret; 460da915ad5SPaul E. McKenney } 461da915ad5SPaul E. McKenney 462da915ad5SPaul E. McKenney /* 463da915ad5SPaul E. McKenney * Schedule callback invocation for the specified srcu_data structure, 464da915ad5SPaul E. McKenney * if possible, on the corresponding CPU. 465da915ad5SPaul E. McKenney */ 466da915ad5SPaul E. McKenney static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay) 467da915ad5SPaul E. McKenney { 468da915ad5SPaul E. McKenney srcu_queue_delayed_work_on(sdp->cpu, system_power_efficient_wq, 469da915ad5SPaul E. McKenney &sdp->work, delay); 470da915ad5SPaul E. McKenney } 471da915ad5SPaul E. McKenney 472da915ad5SPaul E. McKenney /* 473da915ad5SPaul E. McKenney * Schedule callback invocation for all srcu_data structures associated 474c7e88067SPaul E. McKenney * with the specified srcu_node structure that have callbacks for the 475c7e88067SPaul E. McKenney * just-completed grace period, the one corresponding to idx. If possible, 476c7e88067SPaul E. McKenney * schedule this invocation on the corresponding CPUs. 477da915ad5SPaul E. McKenney */ 478c7e88067SPaul E. McKenney static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp, 4791e9a038bSPaul E. McKenney unsigned long mask, unsigned long delay) 480da915ad5SPaul E. McKenney { 481da915ad5SPaul E. McKenney int cpu; 482da915ad5SPaul E. McKenney 483c7e88067SPaul E. McKenney for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { 484c7e88067SPaul E. McKenney if (!(mask & (1 << (cpu - snp->grplo)))) 485c7e88067SPaul E. McKenney continue; 4861e9a038bSPaul E. McKenney srcu_schedule_cbs_sdp(per_cpu_ptr(sp->sda, cpu), delay); 487da915ad5SPaul E. McKenney } 488c7e88067SPaul E. McKenney } 489da915ad5SPaul E. McKenney 490da915ad5SPaul E. McKenney /* 491da915ad5SPaul E. McKenney * Note the end of an SRCU grace period. Initiates callback invocation 492da915ad5SPaul E. McKenney * and starts a new grace period if needed. 493da915ad5SPaul E. McKenney * 494da915ad5SPaul E. McKenney * The ->srcu_cb_mutex acquisition does not protect any data, but 495da915ad5SPaul E. McKenney * instead prevents more than one grace period from starting while we 496da915ad5SPaul E. McKenney * are initiating callback invocation. This allows the ->srcu_have_cbs[] 497da915ad5SPaul E. McKenney * array to have a finite number of elements. 498da915ad5SPaul E. McKenney */ 499da915ad5SPaul E. McKenney static void srcu_gp_end(struct srcu_struct *sp) 500da915ad5SPaul E. McKenney { 5011e9a038bSPaul E. McKenney unsigned long cbdelay; 502da915ad5SPaul E. McKenney bool cbs; 503c350c008SPaul E. McKenney int cpu; 504c350c008SPaul E. McKenney unsigned long flags; 505da915ad5SPaul E. McKenney unsigned long gpseq; 506da915ad5SPaul E. McKenney int idx; 507da915ad5SPaul E. McKenney int idxnext; 508c7e88067SPaul E. McKenney unsigned long mask; 509c350c008SPaul E. McKenney struct srcu_data *sdp; 510da915ad5SPaul E. McKenney struct srcu_node *snp; 511da915ad5SPaul E. McKenney 512da915ad5SPaul E. McKenney /* Prevent more than one additional grace period. */ 513da915ad5SPaul E. McKenney mutex_lock(&sp->srcu_cb_mutex); 514da915ad5SPaul E. McKenney 515da915ad5SPaul E. McKenney /* End the current grace period. */ 516a3883df3SPaul E. McKenney raw_spin_lock_irq_rcu_node(sp); 517da915ad5SPaul E. McKenney idx = rcu_seq_state(sp->srcu_gp_seq); 518da915ad5SPaul E. McKenney WARN_ON_ONCE(idx != SRCU_STATE_SCAN2); 5191e9a038bSPaul E. McKenney cbdelay = srcu_get_delay(sp); 52022607d66SPaul E. McKenney sp->srcu_last_gp_end = ktime_get_mono_fast_ns(); 521da915ad5SPaul E. McKenney rcu_seq_end(&sp->srcu_gp_seq); 522da915ad5SPaul E. McKenney gpseq = rcu_seq_current(&sp->srcu_gp_seq); 5231e9a038bSPaul E. McKenney if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq)) 5241e9a038bSPaul E. McKenney sp->srcu_gp_seq_needed_exp = gpseq; 525a3883df3SPaul E. McKenney raw_spin_unlock_irq_rcu_node(sp); 526da915ad5SPaul E. McKenney mutex_unlock(&sp->srcu_gp_mutex); 527da915ad5SPaul E. McKenney /* A new grace period can start at this point. But only one. */ 528da915ad5SPaul E. McKenney 529da915ad5SPaul E. McKenney /* Initiate callback invocation as needed. */ 530da915ad5SPaul E. McKenney idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs); 531da915ad5SPaul E. McKenney idxnext = (idx + 1) % ARRAY_SIZE(snp->srcu_have_cbs); 532da915ad5SPaul E. McKenney rcu_for_each_node_breadth_first(sp, snp) { 533a3883df3SPaul E. McKenney raw_spin_lock_irq_rcu_node(snp); 534da915ad5SPaul E. McKenney cbs = false; 535da915ad5SPaul E. McKenney if (snp >= sp->level[rcu_num_lvls - 1]) 536da915ad5SPaul E. McKenney cbs = snp->srcu_have_cbs[idx] == gpseq; 537da915ad5SPaul E. McKenney snp->srcu_have_cbs[idx] = gpseq; 538da915ad5SPaul E. McKenney rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1); 5391e9a038bSPaul E. McKenney if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq)) 5401e9a038bSPaul E. McKenney snp->srcu_gp_seq_needed_exp = gpseq; 541c7e88067SPaul E. McKenney mask = snp->srcu_data_have_cbs[idx]; 542c7e88067SPaul E. McKenney snp->srcu_data_have_cbs[idx] = 0; 543a3883df3SPaul E. McKenney raw_spin_unlock_irq_rcu_node(snp); 544a3883df3SPaul E. McKenney if (cbs) 5451e9a038bSPaul E. McKenney srcu_schedule_cbs_snp(sp, snp, mask, cbdelay); 546c350c008SPaul E. McKenney 547c350c008SPaul E. McKenney /* Occasionally prevent srcu_data counter wrap. */ 548c350c008SPaul E. McKenney if (!(gpseq & counter_wrap_check)) 549c350c008SPaul E. McKenney for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { 550c350c008SPaul E. McKenney sdp = per_cpu_ptr(sp->sda, cpu); 551a3883df3SPaul E. McKenney raw_spin_lock_irqsave_rcu_node(sdp, flags); 552c350c008SPaul E. McKenney if (ULONG_CMP_GE(gpseq, 553c350c008SPaul E. McKenney sdp->srcu_gp_seq_needed + 100)) 554c350c008SPaul E. McKenney sdp->srcu_gp_seq_needed = gpseq; 555a3883df3SPaul E. McKenney raw_spin_unlock_irqrestore_rcu_node(sdp, flags); 556c350c008SPaul E. McKenney } 557da915ad5SPaul E. McKenney } 558da915ad5SPaul E. McKenney 559da915ad5SPaul E. McKenney /* Callback initiation done, allow grace periods after next. */ 560da915ad5SPaul E. McKenney mutex_unlock(&sp->srcu_cb_mutex); 561da915ad5SPaul E. McKenney 562da915ad5SPaul E. McKenney /* Start a new grace period if needed. */ 563a3883df3SPaul E. McKenney raw_spin_lock_irq_rcu_node(sp); 564da915ad5SPaul E. McKenney gpseq = rcu_seq_current(&sp->srcu_gp_seq); 565da915ad5SPaul E. McKenney if (!rcu_seq_state(gpseq) && 566da915ad5SPaul E. McKenney ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) { 567da915ad5SPaul E. McKenney srcu_gp_start(sp); 568a3883df3SPaul E. McKenney raw_spin_unlock_irq_rcu_node(sp); 569da915ad5SPaul E. McKenney /* Throttle expedited grace periods: Should be rare! */ 5701e9a038bSPaul E. McKenney srcu_reschedule(sp, rcu_seq_ctr(gpseq) & 0x3ff 5711e9a038bSPaul E. McKenney ? 0 : SRCU_INTERVAL); 572da915ad5SPaul E. McKenney } else { 573a3883df3SPaul E. McKenney raw_spin_unlock_irq_rcu_node(sp); 574da915ad5SPaul E. McKenney } 575da915ad5SPaul E. McKenney } 576da915ad5SPaul E. McKenney 577da915ad5SPaul E. McKenney /* 5781e9a038bSPaul E. McKenney * Funnel-locking scheme to scalably mediate many concurrent expedited 5791e9a038bSPaul E. McKenney * grace-period requests. This function is invoked for the first known 5801e9a038bSPaul E. McKenney * expedited request for a grace period that has already been requested, 5811e9a038bSPaul E. McKenney * but without expediting. To start a completely new grace period, 5821e9a038bSPaul E. McKenney * whether expedited or not, use srcu_funnel_gp_start() instead. 5831e9a038bSPaul E. McKenney */ 5841e9a038bSPaul E. McKenney static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp, 5851e9a038bSPaul E. McKenney unsigned long s) 5861e9a038bSPaul E. McKenney { 5871e9a038bSPaul E. McKenney unsigned long flags; 5881e9a038bSPaul E. McKenney 5891e9a038bSPaul E. McKenney for (; snp != NULL; snp = snp->srcu_parent) { 5901e9a038bSPaul E. McKenney if (rcu_seq_done(&sp->srcu_gp_seq, s) || 5911e9a038bSPaul E. McKenney ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s)) 5921e9a038bSPaul E. McKenney return; 593a3883df3SPaul E. McKenney raw_spin_lock_irqsave_rcu_node(snp, flags); 5941e9a038bSPaul E. McKenney if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) { 595a3883df3SPaul E. McKenney raw_spin_unlock_irqrestore_rcu_node(snp, flags); 5961e9a038bSPaul E. McKenney return; 5971e9a038bSPaul E. McKenney } 5981e9a038bSPaul E. McKenney WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s); 599a3883df3SPaul E. McKenney raw_spin_unlock_irqrestore_rcu_node(snp, flags); 6001e9a038bSPaul E. McKenney } 601a3883df3SPaul E. McKenney raw_spin_lock_irqsave_rcu_node(sp, flags); 6021e9a038bSPaul E. McKenney if (!ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s)) 6031e9a038bSPaul E. McKenney sp->srcu_gp_seq_needed_exp = s; 604a3883df3SPaul E. McKenney raw_spin_unlock_irqrestore_rcu_node(sp, flags); 6051e9a038bSPaul E. McKenney } 6061e9a038bSPaul E. McKenney 6071e9a038bSPaul E. McKenney /* 608da915ad5SPaul E. McKenney * Funnel-locking scheme to scalably mediate many concurrent grace-period 609da915ad5SPaul E. McKenney * requests. The winner has to do the work of actually starting grace 610da915ad5SPaul E. McKenney * period s. Losers must either ensure that their desired grace-period 611da915ad5SPaul E. McKenney * number is recorded on at least their leaf srcu_node structure, or they 612da915ad5SPaul E. McKenney * must take steps to invoke their own callbacks. 613da915ad5SPaul E. McKenney */ 6141e9a038bSPaul E. McKenney static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp, 6151e9a038bSPaul E. McKenney unsigned long s, bool do_norm) 616da915ad5SPaul E. McKenney { 617da915ad5SPaul E. McKenney unsigned long flags; 618da915ad5SPaul E. McKenney int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs); 619da915ad5SPaul E. McKenney struct srcu_node *snp = sdp->mynode; 620da915ad5SPaul E. McKenney unsigned long snp_seq; 621da915ad5SPaul E. McKenney 622da915ad5SPaul E. McKenney /* Each pass through the loop does one level of the srcu_node tree. */ 623da915ad5SPaul E. McKenney for (; snp != NULL; snp = snp->srcu_parent) { 624da915ad5SPaul E. McKenney if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode) 625da915ad5SPaul E. McKenney return; /* GP already done and CBs recorded. */ 626a3883df3SPaul E. McKenney raw_spin_lock_irqsave_rcu_node(snp, flags); 627da915ad5SPaul E. McKenney if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) { 628da915ad5SPaul E. McKenney snp_seq = snp->srcu_have_cbs[idx]; 629c7e88067SPaul E. McKenney if (snp == sdp->mynode && snp_seq == s) 630c7e88067SPaul E. McKenney snp->srcu_data_have_cbs[idx] |= sdp->grpmask; 631a3883df3SPaul E. McKenney raw_spin_unlock_irqrestore_rcu_node(snp, flags); 632da915ad5SPaul E. McKenney if (snp == sdp->mynode && snp_seq != s) { 6331e9a038bSPaul E. McKenney srcu_schedule_cbs_sdp(sdp, do_norm 6341e9a038bSPaul E. McKenney ? SRCU_INTERVAL 6351e9a038bSPaul E. McKenney : 0); 6361e9a038bSPaul E. McKenney return; 637da915ad5SPaul E. McKenney } 6381e9a038bSPaul E. McKenney if (!do_norm) 6391e9a038bSPaul E. McKenney srcu_funnel_exp_start(sp, snp, s); 640da915ad5SPaul E. McKenney return; 641da915ad5SPaul E. McKenney } 642da915ad5SPaul E. McKenney snp->srcu_have_cbs[idx] = s; 643c7e88067SPaul E. McKenney if (snp == sdp->mynode) 644c7e88067SPaul E. McKenney snp->srcu_data_have_cbs[idx] |= sdp->grpmask; 6451e9a038bSPaul E. McKenney if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s)) 6461e9a038bSPaul E. McKenney snp->srcu_gp_seq_needed_exp = s; 647a3883df3SPaul E. McKenney raw_spin_unlock_irqrestore_rcu_node(snp, flags); 648da915ad5SPaul E. McKenney } 649da915ad5SPaul E. McKenney 650da915ad5SPaul E. McKenney /* Top of tree, must ensure the grace period will be started. */ 651a3883df3SPaul E. McKenney raw_spin_lock_irqsave_rcu_node(sp, flags); 652da915ad5SPaul E. McKenney if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) { 653da915ad5SPaul E. McKenney /* 654da915ad5SPaul E. McKenney * Record need for grace period s. Pair with load 655da915ad5SPaul E. McKenney * acquire setting up for initialization. 656da915ad5SPaul E. McKenney */ 657da915ad5SPaul E. McKenney smp_store_release(&sp->srcu_gp_seq_needed, s); /*^^^*/ 658da915ad5SPaul E. McKenney } 6591e9a038bSPaul E. McKenney if (!do_norm && ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s)) 6601e9a038bSPaul E. McKenney sp->srcu_gp_seq_needed_exp = s; 661da915ad5SPaul E. McKenney 662da915ad5SPaul E. McKenney /* If grace period not already done and none in progress, start it. */ 663da915ad5SPaul E. McKenney if (!rcu_seq_done(&sp->srcu_gp_seq, s) && 664da915ad5SPaul E. McKenney rcu_seq_state(sp->srcu_gp_seq) == SRCU_STATE_IDLE) { 665da915ad5SPaul E. McKenney WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)); 666da915ad5SPaul E. McKenney srcu_gp_start(sp); 667da915ad5SPaul E. McKenney queue_delayed_work(system_power_efficient_wq, &sp->work, 6681e9a038bSPaul E. McKenney srcu_get_delay(sp)); 669da915ad5SPaul E. McKenney } 670a3883df3SPaul E. McKenney raw_spin_unlock_irqrestore_rcu_node(sp, flags); 671da915ad5SPaul E. McKenney } 672da915ad5SPaul E. McKenney 673da915ad5SPaul E. McKenney /* 674dad81a20SPaul E. McKenney * Wait until all readers counted by array index idx complete, but 675dad81a20SPaul E. McKenney * loop an additional time if there is an expedited grace period pending. 676da915ad5SPaul E. McKenney * The caller must ensure that ->srcu_idx is not changed while checking. 677dad81a20SPaul E. McKenney */ 678dad81a20SPaul E. McKenney static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount) 679dad81a20SPaul E. McKenney { 680dad81a20SPaul E. McKenney for (;;) { 681dad81a20SPaul E. McKenney if (srcu_readers_active_idx_check(sp, idx)) 682dad81a20SPaul E. McKenney return true; 6831e9a038bSPaul E. McKenney if (--trycount + !srcu_get_delay(sp) <= 0) 684dad81a20SPaul E. McKenney return false; 685dad81a20SPaul E. McKenney udelay(SRCU_RETRY_CHECK_DELAY); 686dad81a20SPaul E. McKenney } 687dad81a20SPaul E. McKenney } 688dad81a20SPaul E. McKenney 689dad81a20SPaul E. McKenney /* 690da915ad5SPaul E. McKenney * Increment the ->srcu_idx counter so that future SRCU readers will 691da915ad5SPaul E. McKenney * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows 692dad81a20SPaul E. McKenney * us to wait for pre-existing readers in a starvation-free manner. 693dad81a20SPaul E. McKenney */ 694dad81a20SPaul E. McKenney static void srcu_flip(struct srcu_struct *sp) 695dad81a20SPaul E. McKenney { 696881ec9d2SPaul E. McKenney /* 697881ec9d2SPaul E. McKenney * Ensure that if this updater saw a given reader's increment 698881ec9d2SPaul E. McKenney * from __srcu_read_lock(), that reader was using an old value 699881ec9d2SPaul E. McKenney * of ->srcu_idx. Also ensure that if a given reader sees the 700881ec9d2SPaul E. McKenney * new value of ->srcu_idx, this updater's earlier scans cannot 701881ec9d2SPaul E. McKenney * have seen that reader's increments (which is OK, because this 702881ec9d2SPaul E. McKenney * grace period need not wait on that reader). 703881ec9d2SPaul E. McKenney */ 704881ec9d2SPaul E. McKenney smp_mb(); /* E */ /* Pairs with B and C. */ 705881ec9d2SPaul E. McKenney 706da915ad5SPaul E. McKenney WRITE_ONCE(sp->srcu_idx, sp->srcu_idx + 1); 707dad81a20SPaul E. McKenney 708dad81a20SPaul E. McKenney /* 709dad81a20SPaul E. McKenney * Ensure that if the updater misses an __srcu_read_unlock() 710dad81a20SPaul E. McKenney * increment, that task's next __srcu_read_lock() will see the 711dad81a20SPaul E. McKenney * above counter update. Note that both this memory barrier 712dad81a20SPaul E. McKenney * and the one in srcu_readers_active_idx_check() provide the 713dad81a20SPaul E. McKenney * guarantee for __srcu_read_lock(). 714dad81a20SPaul E. McKenney */ 715dad81a20SPaul E. McKenney smp_mb(); /* D */ /* Pairs with C. */ 716dad81a20SPaul E. McKenney } 717dad81a20SPaul E. McKenney 718dad81a20SPaul E. McKenney /* 7192da4b2a7SPaul E. McKenney * If SRCU is likely idle, return true, otherwise return false. 7202da4b2a7SPaul E. McKenney * 7212da4b2a7SPaul E. McKenney * Note that it is OK for several current from-idle requests for a new 7222da4b2a7SPaul E. McKenney * grace period from idle to specify expediting because they will all end 7232da4b2a7SPaul E. McKenney * up requesting the same grace period anyhow. So no loss. 7242da4b2a7SPaul E. McKenney * 7252da4b2a7SPaul E. McKenney * Note also that if any CPU (including the current one) is still invoking 7262da4b2a7SPaul E. McKenney * callbacks, this function will nevertheless say "idle". This is not 7272da4b2a7SPaul E. McKenney * ideal, but the overhead of checking all CPUs' callback lists is even 7282da4b2a7SPaul E. McKenney * less ideal, especially on large systems. Furthermore, the wakeup 7292da4b2a7SPaul E. McKenney * can happen before the callback is fully removed, so we have no choice 7302da4b2a7SPaul E. McKenney * but to accept this type of error. 7312da4b2a7SPaul E. McKenney * 7322da4b2a7SPaul E. McKenney * This function is also subject to counter-wrap errors, but let's face 7332da4b2a7SPaul E. McKenney * it, if this function was preempted for enough time for the counters 7342da4b2a7SPaul E. McKenney * to wrap, it really doesn't matter whether or not we expedite the grace 7352da4b2a7SPaul E. McKenney * period. The extra overhead of a needlessly expedited grace period is 7362da4b2a7SPaul E. McKenney * negligible when amoritized over that time period, and the extra latency 7372da4b2a7SPaul E. McKenney * of a needlessly non-expedited grace period is similarly negligible. 7382da4b2a7SPaul E. McKenney */ 7392da4b2a7SPaul E. McKenney static bool srcu_might_be_idle(struct srcu_struct *sp) 7402da4b2a7SPaul E. McKenney { 74122607d66SPaul E. McKenney unsigned long curseq; 7422da4b2a7SPaul E. McKenney unsigned long flags; 7432da4b2a7SPaul E. McKenney struct srcu_data *sdp; 74422607d66SPaul E. McKenney unsigned long t; 7452da4b2a7SPaul E. McKenney 7462da4b2a7SPaul E. McKenney /* If the local srcu_data structure has callbacks, not idle. */ 7472da4b2a7SPaul E. McKenney local_irq_save(flags); 7482da4b2a7SPaul E. McKenney sdp = this_cpu_ptr(sp->sda); 7492da4b2a7SPaul E. McKenney if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) { 7502da4b2a7SPaul E. McKenney local_irq_restore(flags); 7512da4b2a7SPaul E. McKenney return false; /* Callbacks already present, so not idle. */ 7522da4b2a7SPaul E. McKenney } 7532da4b2a7SPaul E. McKenney local_irq_restore(flags); 7542da4b2a7SPaul E. McKenney 7552da4b2a7SPaul E. McKenney /* 7562da4b2a7SPaul E. McKenney * No local callbacks, so probabalistically probe global state. 7572da4b2a7SPaul E. McKenney * Exact information would require acquiring locks, which would 7582da4b2a7SPaul E. McKenney * kill scalability, hence the probabalistic nature of the probe. 7592da4b2a7SPaul E. McKenney */ 76022607d66SPaul E. McKenney 76122607d66SPaul E. McKenney /* First, see if enough time has passed since the last GP. */ 76222607d66SPaul E. McKenney t = ktime_get_mono_fast_ns(); 76322607d66SPaul E. McKenney if (exp_holdoff == 0 || 76422607d66SPaul E. McKenney time_in_range_open(t, sp->srcu_last_gp_end, 76522607d66SPaul E. McKenney sp->srcu_last_gp_end + exp_holdoff)) 76622607d66SPaul E. McKenney return false; /* Too soon after last GP. */ 76722607d66SPaul E. McKenney 76822607d66SPaul E. McKenney /* Next, check for probable idleness. */ 7692da4b2a7SPaul E. McKenney curseq = rcu_seq_current(&sp->srcu_gp_seq); 7702da4b2a7SPaul E. McKenney smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */ 7712da4b2a7SPaul E. McKenney if (ULONG_CMP_LT(curseq, READ_ONCE(sp->srcu_gp_seq_needed))) 7722da4b2a7SPaul E. McKenney return false; /* Grace period in progress, so not idle. */ 7732da4b2a7SPaul E. McKenney smp_mb(); /* Order ->srcu_gp_seq with prior access. */ 7742da4b2a7SPaul E. McKenney if (curseq != rcu_seq_current(&sp->srcu_gp_seq)) 7752da4b2a7SPaul E. McKenney return false; /* GP # changed, so not idle. */ 7762da4b2a7SPaul E. McKenney return true; /* With reasonable probability, idle! */ 7772da4b2a7SPaul E. McKenney } 7782da4b2a7SPaul E. McKenney 7792da4b2a7SPaul E. McKenney /* 780a602538eSPaul E. McKenney * SRCU callback function to leak a callback. 781a602538eSPaul E. McKenney */ 782a602538eSPaul E. McKenney static void srcu_leak_callback(struct rcu_head *rhp) 783a602538eSPaul E. McKenney { 784a602538eSPaul E. McKenney } 785a602538eSPaul E. McKenney 786a602538eSPaul E. McKenney /* 787da915ad5SPaul E. McKenney * Enqueue an SRCU callback on the srcu_data structure associated with 788da915ad5SPaul E. McKenney * the current CPU and the specified srcu_struct structure, initiating 789da915ad5SPaul E. McKenney * grace-period processing if it is not already running. 790dad81a20SPaul E. McKenney * 791dad81a20SPaul E. McKenney * Note that all CPUs must agree that the grace period extended beyond 792dad81a20SPaul E. McKenney * all pre-existing SRCU read-side critical section. On systems with 793dad81a20SPaul E. McKenney * more than one CPU, this means that when "func()" is invoked, each CPU 794dad81a20SPaul E. McKenney * is guaranteed to have executed a full memory barrier since the end of 795dad81a20SPaul E. McKenney * its last corresponding SRCU read-side critical section whose beginning 796dad81a20SPaul E. McKenney * preceded the call to call_rcu(). It also means that each CPU executing 797dad81a20SPaul E. McKenney * an SRCU read-side critical section that continues beyond the start of 798dad81a20SPaul E. McKenney * "func()" must have executed a memory barrier after the call_rcu() 799dad81a20SPaul E. McKenney * but before the beginning of that SRCU read-side critical section. 800dad81a20SPaul E. McKenney * Note that these guarantees include CPUs that are offline, idle, or 801dad81a20SPaul E. McKenney * executing in user mode, as well as CPUs that are executing in the kernel. 802dad81a20SPaul E. McKenney * 803dad81a20SPaul E. McKenney * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the 804dad81a20SPaul E. McKenney * resulting SRCU callback function "func()", then both CPU A and CPU 805dad81a20SPaul E. McKenney * B are guaranteed to execute a full memory barrier during the time 806dad81a20SPaul E. McKenney * interval between the call to call_rcu() and the invocation of "func()". 807dad81a20SPaul E. McKenney * This guarantee applies even if CPU A and CPU B are the same CPU (but 808dad81a20SPaul E. McKenney * again only if the system has more than one CPU). 809dad81a20SPaul E. McKenney * 810dad81a20SPaul E. McKenney * Of course, these guarantees apply only for invocations of call_srcu(), 811dad81a20SPaul E. McKenney * srcu_read_lock(), and srcu_read_unlock() that are all passed the same 812dad81a20SPaul E. McKenney * srcu_struct structure. 813dad81a20SPaul E. McKenney */ 8141e9a038bSPaul E. McKenney void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, 8151e9a038bSPaul E. McKenney rcu_callback_t func, bool do_norm) 816dad81a20SPaul E. McKenney { 817dad81a20SPaul E. McKenney unsigned long flags; 8181e9a038bSPaul E. McKenney bool needexp = false; 819da915ad5SPaul E. McKenney bool needgp = false; 820da915ad5SPaul E. McKenney unsigned long s; 821da915ad5SPaul E. McKenney struct srcu_data *sdp; 822dad81a20SPaul E. McKenney 823da915ad5SPaul E. McKenney check_init_srcu_struct(sp); 824a602538eSPaul E. McKenney if (debug_rcu_head_queue(rhp)) { 825a602538eSPaul E. McKenney /* Probable double call_srcu(), so leak the callback. */ 826a602538eSPaul E. McKenney WRITE_ONCE(rhp->func, srcu_leak_callback); 827a602538eSPaul E. McKenney WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n"); 828a602538eSPaul E. McKenney return; 829a602538eSPaul E. McKenney } 830da915ad5SPaul E. McKenney rhp->func = func; 831da915ad5SPaul E. McKenney local_irq_save(flags); 832da915ad5SPaul E. McKenney sdp = this_cpu_ptr(sp->sda); 833a3883df3SPaul E. McKenney raw_spin_lock_rcu_node(sdp); 834da915ad5SPaul E. McKenney rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false); 835da915ad5SPaul E. McKenney rcu_segcblist_advance(&sdp->srcu_cblist, 836da915ad5SPaul E. McKenney rcu_seq_current(&sp->srcu_gp_seq)); 837da915ad5SPaul E. McKenney s = rcu_seq_snap(&sp->srcu_gp_seq); 838da915ad5SPaul E. McKenney (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s); 839da915ad5SPaul E. McKenney if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) { 840da915ad5SPaul E. McKenney sdp->srcu_gp_seq_needed = s; 841da915ad5SPaul E. McKenney needgp = true; 842dad81a20SPaul E. McKenney } 8431e9a038bSPaul E. McKenney if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) { 8441e9a038bSPaul E. McKenney sdp->srcu_gp_seq_needed_exp = s; 8451e9a038bSPaul E. McKenney needexp = true; 8461e9a038bSPaul E. McKenney } 847a3883df3SPaul E. McKenney raw_spin_unlock_irqrestore_rcu_node(sdp, flags); 848da915ad5SPaul E. McKenney if (needgp) 8491e9a038bSPaul E. McKenney srcu_funnel_gp_start(sp, sdp, s, do_norm); 8501e9a038bSPaul E. McKenney else if (needexp) 8511e9a038bSPaul E. McKenney srcu_funnel_exp_start(sp, sdp->mynode, s); 8521e9a038bSPaul E. McKenney } 8531e9a038bSPaul E. McKenney 8545a0465e1SPaul E. McKenney /** 8555a0465e1SPaul E. McKenney * call_srcu() - Queue a callback for invocation after an SRCU grace period 8565a0465e1SPaul E. McKenney * @sp: srcu_struct in queue the callback 857*27fdb35fSPaul E. McKenney * @rhp: structure to be used for queueing the SRCU callback. 8585a0465e1SPaul E. McKenney * @func: function to be invoked after the SRCU grace period 8595a0465e1SPaul E. McKenney * 8605a0465e1SPaul E. McKenney * The callback function will be invoked some time after a full SRCU 8615a0465e1SPaul E. McKenney * grace period elapses, in other words after all pre-existing SRCU 8625a0465e1SPaul E. McKenney * read-side critical sections have completed. However, the callback 8635a0465e1SPaul E. McKenney * function might well execute concurrently with other SRCU read-side 8645a0465e1SPaul E. McKenney * critical sections that started after call_srcu() was invoked. SRCU 8655a0465e1SPaul E. McKenney * read-side critical sections are delimited by srcu_read_lock() and 8665a0465e1SPaul E. McKenney * srcu_read_unlock(), and may be nested. 8675a0465e1SPaul E. McKenney * 8685a0465e1SPaul E. McKenney * The callback will be invoked from process context, but must nevertheless 8695a0465e1SPaul E. McKenney * be fast and must not block. 8705a0465e1SPaul E. McKenney */ 8711e9a038bSPaul E. McKenney void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, 8721e9a038bSPaul E. McKenney rcu_callback_t func) 8731e9a038bSPaul E. McKenney { 8741e9a038bSPaul E. McKenney __call_srcu(sp, rhp, func, true); 875dad81a20SPaul E. McKenney } 876dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(call_srcu); 877dad81a20SPaul E. McKenney 878dad81a20SPaul E. McKenney /* 879dad81a20SPaul E. McKenney * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). 880dad81a20SPaul E. McKenney */ 8811e9a038bSPaul E. McKenney static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm) 882dad81a20SPaul E. McKenney { 883dad81a20SPaul E. McKenney struct rcu_synchronize rcu; 884dad81a20SPaul E. McKenney 885dad81a20SPaul E. McKenney RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) || 886dad81a20SPaul E. McKenney lock_is_held(&rcu_bh_lock_map) || 887dad81a20SPaul E. McKenney lock_is_held(&rcu_lock_map) || 888dad81a20SPaul E. McKenney lock_is_held(&rcu_sched_lock_map), 889dad81a20SPaul E. McKenney "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section"); 890dad81a20SPaul E. McKenney 891dad81a20SPaul E. McKenney if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) 892dad81a20SPaul E. McKenney return; 893dad81a20SPaul E. McKenney might_sleep(); 894da915ad5SPaul E. McKenney check_init_srcu_struct(sp); 895dad81a20SPaul E. McKenney init_completion(&rcu.completion); 896da915ad5SPaul E. McKenney init_rcu_head_on_stack(&rcu.head); 8971e9a038bSPaul E. McKenney __call_srcu(sp, &rcu.head, wakeme_after_rcu, do_norm); 898dad81a20SPaul E. McKenney wait_for_completion(&rcu.completion); 899da915ad5SPaul E. McKenney destroy_rcu_head_on_stack(&rcu.head); 90035732cf9SPaul E. McKenney 90135732cf9SPaul E. McKenney /* 90235732cf9SPaul E. McKenney * Make sure that later code is ordered after the SRCU grace 90335732cf9SPaul E. McKenney * period. This pairs with the raw_spin_lock_irq_rcu_node() 90435732cf9SPaul E. McKenney * in srcu_invoke_callbacks(). Unlike Tree RCU, this is needed 90535732cf9SPaul E. McKenney * because the current CPU might have been totally uninvolved with 90635732cf9SPaul E. McKenney * (and thus unordered against) that grace period. 90735732cf9SPaul E. McKenney */ 90835732cf9SPaul E. McKenney smp_mb(); 909dad81a20SPaul E. McKenney } 910dad81a20SPaul E. McKenney 911dad81a20SPaul E. McKenney /** 912dad81a20SPaul E. McKenney * synchronize_srcu_expedited - Brute-force SRCU grace period 913dad81a20SPaul E. McKenney * @sp: srcu_struct with which to synchronize. 914dad81a20SPaul E. McKenney * 915dad81a20SPaul E. McKenney * Wait for an SRCU grace period to elapse, but be more aggressive about 916dad81a20SPaul E. McKenney * spinning rather than blocking when waiting. 917dad81a20SPaul E. McKenney * 918dad81a20SPaul E. McKenney * Note that synchronize_srcu_expedited() has the same deadlock and 919dad81a20SPaul E. McKenney * memory-ordering properties as does synchronize_srcu(). 920dad81a20SPaul E. McKenney */ 921dad81a20SPaul E. McKenney void synchronize_srcu_expedited(struct srcu_struct *sp) 922dad81a20SPaul E. McKenney { 9231e9a038bSPaul E. McKenney __synchronize_srcu(sp, rcu_gp_is_normal()); 924dad81a20SPaul E. McKenney } 925dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); 926dad81a20SPaul E. McKenney 927dad81a20SPaul E. McKenney /** 928dad81a20SPaul E. McKenney * synchronize_srcu - wait for prior SRCU read-side critical-section completion 929dad81a20SPaul E. McKenney * @sp: srcu_struct with which to synchronize. 930dad81a20SPaul E. McKenney * 931dad81a20SPaul E. McKenney * Wait for the count to drain to zero of both indexes. To avoid the 932dad81a20SPaul E. McKenney * possible starvation of synchronize_srcu(), it waits for the count of 933da915ad5SPaul E. McKenney * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first, 934da915ad5SPaul E. McKenney * and then flip the srcu_idx and wait for the count of the other index. 935dad81a20SPaul E. McKenney * 936dad81a20SPaul E. McKenney * Can block; must be called from process context. 937dad81a20SPaul E. McKenney * 938dad81a20SPaul E. McKenney * Note that it is illegal to call synchronize_srcu() from the corresponding 939dad81a20SPaul E. McKenney * SRCU read-side critical section; doing so will result in deadlock. 940dad81a20SPaul E. McKenney * However, it is perfectly legal to call synchronize_srcu() on one 941dad81a20SPaul E. McKenney * srcu_struct from some other srcu_struct's read-side critical section, 942dad81a20SPaul E. McKenney * as long as the resulting graph of srcu_structs is acyclic. 943dad81a20SPaul E. McKenney * 944dad81a20SPaul E. McKenney * There are memory-ordering constraints implied by synchronize_srcu(). 945dad81a20SPaul E. McKenney * On systems with more than one CPU, when synchronize_srcu() returns, 946dad81a20SPaul E. McKenney * each CPU is guaranteed to have executed a full memory barrier since 947dad81a20SPaul E. McKenney * the end of its last corresponding SRCU-sched read-side critical section 948dad81a20SPaul E. McKenney * whose beginning preceded the call to synchronize_srcu(). In addition, 949dad81a20SPaul E. McKenney * each CPU having an SRCU read-side critical section that extends beyond 950dad81a20SPaul E. McKenney * the return from synchronize_srcu() is guaranteed to have executed a 951dad81a20SPaul E. McKenney * full memory barrier after the beginning of synchronize_srcu() and before 952dad81a20SPaul E. McKenney * the beginning of that SRCU read-side critical section. Note that these 953dad81a20SPaul E. McKenney * guarantees include CPUs that are offline, idle, or executing in user mode, 954dad81a20SPaul E. McKenney * as well as CPUs that are executing in the kernel. 955dad81a20SPaul E. McKenney * 956dad81a20SPaul E. McKenney * Furthermore, if CPU A invoked synchronize_srcu(), which returned 957dad81a20SPaul E. McKenney * to its caller on CPU B, then both CPU A and CPU B are guaranteed 958dad81a20SPaul E. McKenney * to have executed a full memory barrier during the execution of 959dad81a20SPaul E. McKenney * synchronize_srcu(). This guarantee applies even if CPU A and CPU B 960dad81a20SPaul E. McKenney * are the same CPU, but again only if the system has more than one CPU. 961dad81a20SPaul E. McKenney * 962dad81a20SPaul E. McKenney * Of course, these memory-ordering guarantees apply only when 963dad81a20SPaul E. McKenney * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are 964dad81a20SPaul E. McKenney * passed the same srcu_struct structure. 9652da4b2a7SPaul E. McKenney * 9662da4b2a7SPaul E. McKenney * If SRCU is likely idle, expedite the first request. This semantic 9672da4b2a7SPaul E. McKenney * was provided by Classic SRCU, and is relied upon by its users, so TREE 9682da4b2a7SPaul E. McKenney * SRCU must also provide it. Note that detecting idleness is heuristic 9692da4b2a7SPaul E. McKenney * and subject to both false positives and negatives. 970dad81a20SPaul E. McKenney */ 971dad81a20SPaul E. McKenney void synchronize_srcu(struct srcu_struct *sp) 972dad81a20SPaul E. McKenney { 9732da4b2a7SPaul E. McKenney if (srcu_might_be_idle(sp) || rcu_gp_is_expedited()) 974dad81a20SPaul E. McKenney synchronize_srcu_expedited(sp); 975dad81a20SPaul E. McKenney else 9761e9a038bSPaul E. McKenney __synchronize_srcu(sp, true); 977dad81a20SPaul E. McKenney } 978dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(synchronize_srcu); 979dad81a20SPaul E. McKenney 980da915ad5SPaul E. McKenney /* 981da915ad5SPaul E. McKenney * Callback function for srcu_barrier() use. 982da915ad5SPaul E. McKenney */ 983da915ad5SPaul E. McKenney static void srcu_barrier_cb(struct rcu_head *rhp) 984da915ad5SPaul E. McKenney { 985da915ad5SPaul E. McKenney struct srcu_data *sdp; 986da915ad5SPaul E. McKenney struct srcu_struct *sp; 987da915ad5SPaul E. McKenney 988da915ad5SPaul E. McKenney sdp = container_of(rhp, struct srcu_data, srcu_barrier_head); 989da915ad5SPaul E. McKenney sp = sdp->sp; 990da915ad5SPaul E. McKenney if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt)) 991da915ad5SPaul E. McKenney complete(&sp->srcu_barrier_completion); 992da915ad5SPaul E. McKenney } 993da915ad5SPaul E. McKenney 994dad81a20SPaul E. McKenney /** 995dad81a20SPaul E. McKenney * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete. 996dad81a20SPaul E. McKenney * @sp: srcu_struct on which to wait for in-flight callbacks. 997dad81a20SPaul E. McKenney */ 998dad81a20SPaul E. McKenney void srcu_barrier(struct srcu_struct *sp) 999dad81a20SPaul E. McKenney { 1000da915ad5SPaul E. McKenney int cpu; 1001da915ad5SPaul E. McKenney struct srcu_data *sdp; 1002da915ad5SPaul E. McKenney unsigned long s = rcu_seq_snap(&sp->srcu_barrier_seq); 1003da915ad5SPaul E. McKenney 1004da915ad5SPaul E. McKenney check_init_srcu_struct(sp); 1005da915ad5SPaul E. McKenney mutex_lock(&sp->srcu_barrier_mutex); 1006da915ad5SPaul E. McKenney if (rcu_seq_done(&sp->srcu_barrier_seq, s)) { 1007da915ad5SPaul E. McKenney smp_mb(); /* Force ordering following return. */ 1008da915ad5SPaul E. McKenney mutex_unlock(&sp->srcu_barrier_mutex); 1009da915ad5SPaul E. McKenney return; /* Someone else did our work for us. */ 1010da915ad5SPaul E. McKenney } 1011da915ad5SPaul E. McKenney rcu_seq_start(&sp->srcu_barrier_seq); 1012da915ad5SPaul E. McKenney init_completion(&sp->srcu_barrier_completion); 1013da915ad5SPaul E. McKenney 1014da915ad5SPaul E. McKenney /* Initial count prevents reaching zero until all CBs are posted. */ 1015da915ad5SPaul E. McKenney atomic_set(&sp->srcu_barrier_cpu_cnt, 1); 1016da915ad5SPaul E. McKenney 1017da915ad5SPaul E. McKenney /* 1018da915ad5SPaul E. McKenney * Each pass through this loop enqueues a callback, but only 1019da915ad5SPaul E. McKenney * on CPUs already having callbacks enqueued. Note that if 1020da915ad5SPaul E. McKenney * a CPU already has callbacks enqueue, it must have already 1021da915ad5SPaul E. McKenney * registered the need for a future grace period, so all we 1022da915ad5SPaul E. McKenney * need do is enqueue a callback that will use the same 1023da915ad5SPaul E. McKenney * grace period as the last callback already in the queue. 1024da915ad5SPaul E. McKenney */ 1025da915ad5SPaul E. McKenney for_each_possible_cpu(cpu) { 1026da915ad5SPaul E. McKenney sdp = per_cpu_ptr(sp->sda, cpu); 1027a3883df3SPaul E. McKenney raw_spin_lock_irq_rcu_node(sdp); 1028da915ad5SPaul E. McKenney atomic_inc(&sp->srcu_barrier_cpu_cnt); 1029da915ad5SPaul E. McKenney sdp->srcu_barrier_head.func = srcu_barrier_cb; 1030a602538eSPaul E. McKenney debug_rcu_head_queue(&sdp->srcu_barrier_head); 1031da915ad5SPaul E. McKenney if (!rcu_segcblist_entrain(&sdp->srcu_cblist, 1032a602538eSPaul E. McKenney &sdp->srcu_barrier_head, 0)) { 1033a602538eSPaul E. McKenney debug_rcu_head_unqueue(&sdp->srcu_barrier_head); 1034da915ad5SPaul E. McKenney atomic_dec(&sp->srcu_barrier_cpu_cnt); 1035a602538eSPaul E. McKenney } 1036a3883df3SPaul E. McKenney raw_spin_unlock_irq_rcu_node(sdp); 1037da915ad5SPaul E. McKenney } 1038da915ad5SPaul E. McKenney 1039da915ad5SPaul E. McKenney /* Remove the initial count, at which point reaching zero can happen. */ 1040da915ad5SPaul E. McKenney if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt)) 1041da915ad5SPaul E. McKenney complete(&sp->srcu_barrier_completion); 1042da915ad5SPaul E. McKenney wait_for_completion(&sp->srcu_barrier_completion); 1043da915ad5SPaul E. McKenney 1044da915ad5SPaul E. McKenney rcu_seq_end(&sp->srcu_barrier_seq); 1045da915ad5SPaul E. McKenney mutex_unlock(&sp->srcu_barrier_mutex); 1046dad81a20SPaul E. McKenney } 1047dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(srcu_barrier); 1048dad81a20SPaul E. McKenney 1049dad81a20SPaul E. McKenney /** 1050dad81a20SPaul E. McKenney * srcu_batches_completed - return batches completed. 1051dad81a20SPaul E. McKenney * @sp: srcu_struct on which to report batch completion. 1052dad81a20SPaul E. McKenney * 1053dad81a20SPaul E. McKenney * Report the number of batches, correlated with, but not necessarily 1054dad81a20SPaul E. McKenney * precisely the same as, the number of grace periods that have elapsed. 1055dad81a20SPaul E. McKenney */ 1056dad81a20SPaul E. McKenney unsigned long srcu_batches_completed(struct srcu_struct *sp) 1057dad81a20SPaul E. McKenney { 1058da915ad5SPaul E. McKenney return sp->srcu_idx; 1059dad81a20SPaul E. McKenney } 1060dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(srcu_batches_completed); 1061dad81a20SPaul E. McKenney 1062dad81a20SPaul E. McKenney /* 1063da915ad5SPaul E. McKenney * Core SRCU state machine. Push state bits of ->srcu_gp_seq 1064da915ad5SPaul E. McKenney * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has 1065da915ad5SPaul E. McKenney * completed in that state. 1066dad81a20SPaul E. McKenney */ 1067da915ad5SPaul E. McKenney static void srcu_advance_state(struct srcu_struct *sp) 1068dad81a20SPaul E. McKenney { 1069dad81a20SPaul E. McKenney int idx; 1070dad81a20SPaul E. McKenney 1071da915ad5SPaul E. McKenney mutex_lock(&sp->srcu_gp_mutex); 1072da915ad5SPaul E. McKenney 1073dad81a20SPaul E. McKenney /* 1074dad81a20SPaul E. McKenney * Because readers might be delayed for an extended period after 1075da915ad5SPaul E. McKenney * fetching ->srcu_idx for their index, at any point in time there 1076dad81a20SPaul E. McKenney * might well be readers using both idx=0 and idx=1. We therefore 1077dad81a20SPaul E. McKenney * need to wait for readers to clear from both index values before 1078dad81a20SPaul E. McKenney * invoking a callback. 1079dad81a20SPaul E. McKenney * 1080dad81a20SPaul E. McKenney * The load-acquire ensures that we see the accesses performed 1081dad81a20SPaul E. McKenney * by the prior grace period. 1082dad81a20SPaul E. McKenney */ 1083dad81a20SPaul E. McKenney idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */ 1084dad81a20SPaul E. McKenney if (idx == SRCU_STATE_IDLE) { 1085a3883df3SPaul E. McKenney raw_spin_lock_irq_rcu_node(sp); 1086da915ad5SPaul E. McKenney if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) { 1087da915ad5SPaul E. McKenney WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq)); 1088a3883df3SPaul E. McKenney raw_spin_unlock_irq_rcu_node(sp); 1089da915ad5SPaul E. McKenney mutex_unlock(&sp->srcu_gp_mutex); 1090dad81a20SPaul E. McKenney return; 1091dad81a20SPaul E. McKenney } 1092dad81a20SPaul E. McKenney idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)); 1093dad81a20SPaul E. McKenney if (idx == SRCU_STATE_IDLE) 1094dad81a20SPaul E. McKenney srcu_gp_start(sp); 1095a3883df3SPaul E. McKenney raw_spin_unlock_irq_rcu_node(sp); 1096da915ad5SPaul E. McKenney if (idx != SRCU_STATE_IDLE) { 1097da915ad5SPaul E. McKenney mutex_unlock(&sp->srcu_gp_mutex); 1098dad81a20SPaul E. McKenney return; /* Someone else started the grace period. */ 1099dad81a20SPaul E. McKenney } 1100da915ad5SPaul E. McKenney } 1101dad81a20SPaul E. McKenney 1102dad81a20SPaul E. McKenney if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN1) { 1103da915ad5SPaul E. McKenney idx = 1 ^ (sp->srcu_idx & 1); 1104da915ad5SPaul E. McKenney if (!try_check_zero(sp, idx, 1)) { 1105da915ad5SPaul E. McKenney mutex_unlock(&sp->srcu_gp_mutex); 1106dad81a20SPaul E. McKenney return; /* readers present, retry later. */ 1107da915ad5SPaul E. McKenney } 1108dad81a20SPaul E. McKenney srcu_flip(sp); 1109dad81a20SPaul E. McKenney rcu_seq_set_state(&sp->srcu_gp_seq, SRCU_STATE_SCAN2); 1110dad81a20SPaul E. McKenney } 1111dad81a20SPaul E. McKenney 1112dad81a20SPaul E. McKenney if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN2) { 1113dad81a20SPaul E. McKenney 1114dad81a20SPaul E. McKenney /* 1115dad81a20SPaul E. McKenney * SRCU read-side critical sections are normally short, 1116dad81a20SPaul E. McKenney * so check at least twice in quick succession after a flip. 1117dad81a20SPaul E. McKenney */ 1118da915ad5SPaul E. McKenney idx = 1 ^ (sp->srcu_idx & 1); 1119da915ad5SPaul E. McKenney if (!try_check_zero(sp, idx, 2)) { 1120da915ad5SPaul E. McKenney mutex_unlock(&sp->srcu_gp_mutex); 1121da915ad5SPaul E. McKenney return; /* readers present, retry later. */ 1122da915ad5SPaul E. McKenney } 1123da915ad5SPaul E. McKenney srcu_gp_end(sp); /* Releases ->srcu_gp_mutex. */ 1124dad81a20SPaul E. McKenney } 1125dad81a20SPaul E. McKenney } 1126dad81a20SPaul E. McKenney 1127dad81a20SPaul E. McKenney /* 1128dad81a20SPaul E. McKenney * Invoke a limited number of SRCU callbacks that have passed through 1129dad81a20SPaul E. McKenney * their grace period. If there are more to do, SRCU will reschedule 1130dad81a20SPaul E. McKenney * the workqueue. Note that needed memory barriers have been executed 1131dad81a20SPaul E. McKenney * in this task's context by srcu_readers_active_idx_check(). 1132dad81a20SPaul E. McKenney */ 1133da915ad5SPaul E. McKenney static void srcu_invoke_callbacks(struct work_struct *work) 1134dad81a20SPaul E. McKenney { 1135da915ad5SPaul E. McKenney bool more; 1136dad81a20SPaul E. McKenney struct rcu_cblist ready_cbs; 1137dad81a20SPaul E. McKenney struct rcu_head *rhp; 1138da915ad5SPaul E. McKenney struct srcu_data *sdp; 1139da915ad5SPaul E. McKenney struct srcu_struct *sp; 1140dad81a20SPaul E. McKenney 1141da915ad5SPaul E. McKenney sdp = container_of(work, struct srcu_data, work.work); 1142da915ad5SPaul E. McKenney sp = sdp->sp; 1143dad81a20SPaul E. McKenney rcu_cblist_init(&ready_cbs); 1144a3883df3SPaul E. McKenney raw_spin_lock_irq_rcu_node(sdp); 1145da915ad5SPaul E. McKenney rcu_segcblist_advance(&sdp->srcu_cblist, 1146da915ad5SPaul E. McKenney rcu_seq_current(&sp->srcu_gp_seq)); 1147da915ad5SPaul E. McKenney if (sdp->srcu_cblist_invoking || 1148da915ad5SPaul E. McKenney !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) { 1149a3883df3SPaul E. McKenney raw_spin_unlock_irq_rcu_node(sdp); 1150da915ad5SPaul E. McKenney return; /* Someone else on the job or nothing to do. */ 1151da915ad5SPaul E. McKenney } 1152da915ad5SPaul E. McKenney 1153da915ad5SPaul E. McKenney /* We are on the job! Extract and invoke ready callbacks. */ 1154da915ad5SPaul E. McKenney sdp->srcu_cblist_invoking = true; 1155da915ad5SPaul E. McKenney rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs); 1156a3883df3SPaul E. McKenney raw_spin_unlock_irq_rcu_node(sdp); 1157dad81a20SPaul E. McKenney rhp = rcu_cblist_dequeue(&ready_cbs); 1158dad81a20SPaul E. McKenney for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) { 1159a602538eSPaul E. McKenney debug_rcu_head_unqueue(rhp); 1160dad81a20SPaul E. McKenney local_bh_disable(); 1161dad81a20SPaul E. McKenney rhp->func(rhp); 1162dad81a20SPaul E. McKenney local_bh_enable(); 1163dad81a20SPaul E. McKenney } 1164da915ad5SPaul E. McKenney 1165da915ad5SPaul E. McKenney /* 1166da915ad5SPaul E. McKenney * Update counts, accelerate new callbacks, and if needed, 1167da915ad5SPaul E. McKenney * schedule another round of callback invocation. 1168da915ad5SPaul E. McKenney */ 1169a3883df3SPaul E. McKenney raw_spin_lock_irq_rcu_node(sdp); 1170da915ad5SPaul E. McKenney rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs); 1171da915ad5SPaul E. McKenney (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, 1172da915ad5SPaul E. McKenney rcu_seq_snap(&sp->srcu_gp_seq)); 1173da915ad5SPaul E. McKenney sdp->srcu_cblist_invoking = false; 1174da915ad5SPaul E. McKenney more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist); 1175a3883df3SPaul E. McKenney raw_spin_unlock_irq_rcu_node(sdp); 1176da915ad5SPaul E. McKenney if (more) 1177da915ad5SPaul E. McKenney srcu_schedule_cbs_sdp(sdp, 0); 1178dad81a20SPaul E. McKenney } 1179dad81a20SPaul E. McKenney 1180dad81a20SPaul E. McKenney /* 1181dad81a20SPaul E. McKenney * Finished one round of SRCU grace period. Start another if there are 1182dad81a20SPaul E. McKenney * more SRCU callbacks queued, otherwise put SRCU into not-running state. 1183dad81a20SPaul E. McKenney */ 1184dad81a20SPaul E. McKenney static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay) 1185dad81a20SPaul E. McKenney { 1186da915ad5SPaul E. McKenney bool pushgp = true; 1187dad81a20SPaul E. McKenney 1188a3883df3SPaul E. McKenney raw_spin_lock_irq_rcu_node(sp); 1189da915ad5SPaul E. McKenney if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) { 1190da915ad5SPaul E. McKenney if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) { 1191da915ad5SPaul E. McKenney /* All requests fulfilled, time to go idle. */ 1192da915ad5SPaul E. McKenney pushgp = false; 1193dad81a20SPaul E. McKenney } 1194da915ad5SPaul E. McKenney } else if (!rcu_seq_state(sp->srcu_gp_seq)) { 1195da915ad5SPaul E. McKenney /* Outstanding request and no GP. Start one. */ 1196da915ad5SPaul E. McKenney srcu_gp_start(sp); 1197da915ad5SPaul E. McKenney } 1198a3883df3SPaul E. McKenney raw_spin_unlock_irq_rcu_node(sp); 1199dad81a20SPaul E. McKenney 1200da915ad5SPaul E. McKenney if (pushgp) 1201dad81a20SPaul E. McKenney queue_delayed_work(system_power_efficient_wq, &sp->work, delay); 1202dad81a20SPaul E. McKenney } 1203dad81a20SPaul E. McKenney 1204dad81a20SPaul E. McKenney /* 1205dad81a20SPaul E. McKenney * This is the work-queue function that handles SRCU grace periods. 1206dad81a20SPaul E. McKenney */ 12070d8a1e83SPaul E. McKenney static void process_srcu(struct work_struct *work) 1208dad81a20SPaul E. McKenney { 1209dad81a20SPaul E. McKenney struct srcu_struct *sp; 1210dad81a20SPaul E. McKenney 1211dad81a20SPaul E. McKenney sp = container_of(work, struct srcu_struct, work.work); 1212dad81a20SPaul E. McKenney 1213da915ad5SPaul E. McKenney srcu_advance_state(sp); 12141e9a038bSPaul E. McKenney srcu_reschedule(sp, srcu_get_delay(sp)); 1215dad81a20SPaul E. McKenney } 12167f6733c3SPaul E. McKenney 12177f6733c3SPaul E. McKenney void srcutorture_get_gp_data(enum rcutorture_type test_type, 12187f6733c3SPaul E. McKenney struct srcu_struct *sp, int *flags, 12191e9a038bSPaul E. McKenney unsigned long *gpnum, unsigned long *completed) 12207f6733c3SPaul E. McKenney { 12217f6733c3SPaul E. McKenney if (test_type != SRCU_FLAVOR) 12227f6733c3SPaul E. McKenney return; 12237f6733c3SPaul E. McKenney *flags = 0; 12247f6733c3SPaul E. McKenney *completed = rcu_seq_ctr(sp->srcu_gp_seq); 12257f6733c3SPaul E. McKenney *gpnum = rcu_seq_ctr(sp->srcu_gp_seq_needed); 12267f6733c3SPaul E. McKenney } 12277f6733c3SPaul E. McKenney EXPORT_SYMBOL_GPL(srcutorture_get_gp_data); 12281f4f6da1SPaul E. McKenney 1229115a1a52SPaul E. McKenney void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf) 1230115a1a52SPaul E. McKenney { 1231115a1a52SPaul E. McKenney int cpu; 1232115a1a52SPaul E. McKenney int idx; 1233ac3748c6SPaul E. McKenney unsigned long s0 = 0, s1 = 0; 1234115a1a52SPaul E. McKenney 1235115a1a52SPaul E. McKenney idx = sp->srcu_idx & 0x1; 1236115a1a52SPaul E. McKenney pr_alert("%s%s Tree SRCU per-CPU(idx=%d):", tt, tf, idx); 1237115a1a52SPaul E. McKenney for_each_possible_cpu(cpu) { 1238115a1a52SPaul E. McKenney unsigned long l0, l1; 1239115a1a52SPaul E. McKenney unsigned long u0, u1; 1240115a1a52SPaul E. McKenney long c0, c1; 1241115a1a52SPaul E. McKenney struct srcu_data *counts; 1242115a1a52SPaul E. McKenney 1243115a1a52SPaul E. McKenney counts = per_cpu_ptr(sp->sda, cpu); 1244115a1a52SPaul E. McKenney u0 = counts->srcu_unlock_count[!idx]; 1245115a1a52SPaul E. McKenney u1 = counts->srcu_unlock_count[idx]; 1246115a1a52SPaul E. McKenney 1247115a1a52SPaul E. McKenney /* 1248115a1a52SPaul E. McKenney * Make sure that a lock is always counted if the corresponding 1249115a1a52SPaul E. McKenney * unlock is counted. 1250115a1a52SPaul E. McKenney */ 1251115a1a52SPaul E. McKenney smp_rmb(); 1252115a1a52SPaul E. McKenney 1253115a1a52SPaul E. McKenney l0 = counts->srcu_lock_count[!idx]; 1254115a1a52SPaul E. McKenney l1 = counts->srcu_lock_count[idx]; 1255115a1a52SPaul E. McKenney 1256115a1a52SPaul E. McKenney c0 = l0 - u0; 1257115a1a52SPaul E. McKenney c1 = l1 - u1; 1258115a1a52SPaul E. McKenney pr_cont(" %d(%ld,%ld)", cpu, c0, c1); 1259ac3748c6SPaul E. McKenney s0 += c0; 1260ac3748c6SPaul E. McKenney s1 += c1; 1261115a1a52SPaul E. McKenney } 1262ac3748c6SPaul E. McKenney pr_cont(" T(%ld,%ld)\n", s0, s1); 1263115a1a52SPaul E. McKenney } 1264115a1a52SPaul E. McKenney EXPORT_SYMBOL_GPL(srcu_torture_stats_print); 1265115a1a52SPaul E. McKenney 12661f4f6da1SPaul E. McKenney static int __init srcu_bootup_announce(void) 12671f4f6da1SPaul E. McKenney { 12681f4f6da1SPaul E. McKenney pr_info("Hierarchical SRCU implementation.\n"); 12690c8e0e3cSPaul E. McKenney if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF) 12700c8e0e3cSPaul E. McKenney pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff); 12711f4f6da1SPaul E. McKenney return 0; 12721f4f6da1SPaul E. McKenney } 12731f4f6da1SPaul E. McKenney early_initcall(srcu_bootup_announce); 1274