1e7ee1501SPaul E. McKenney // SPDX-License-Identifier: GPL-2.0+ 2dad81a20SPaul E. McKenney /* 3dad81a20SPaul E. McKenney * Sleepable Read-Copy Update mechanism for mutual exclusion. 4dad81a20SPaul E. McKenney * 5dad81a20SPaul E. McKenney * Copyright (C) IBM Corporation, 2006 6dad81a20SPaul E. McKenney * Copyright (C) Fujitsu, 2012 7dad81a20SPaul E. McKenney * 865bb0dc4SSeongJae Park * Authors: Paul McKenney <paulmck@linux.ibm.com> 9dad81a20SPaul E. McKenney * Lai Jiangshan <laijs@cn.fujitsu.com> 10dad81a20SPaul E. McKenney * 11dad81a20SPaul E. McKenney * For detailed explanation of Read-Copy Update mechanism see - 12dad81a20SPaul E. McKenney * Documentation/RCU/ *.txt 13dad81a20SPaul E. McKenney * 14dad81a20SPaul E. McKenney */ 15dad81a20SPaul E. McKenney 16a7538352SJoe Perches #define pr_fmt(fmt) "rcu: " fmt 17a7538352SJoe Perches 18dad81a20SPaul E. McKenney #include <linux/export.h> 19dad81a20SPaul E. McKenney #include <linux/mutex.h> 20dad81a20SPaul E. McKenney #include <linux/percpu.h> 21dad81a20SPaul E. McKenney #include <linux/preempt.h> 22dad81a20SPaul E. McKenney #include <linux/rcupdate_wait.h> 23dad81a20SPaul E. McKenney #include <linux/sched.h> 24dad81a20SPaul E. McKenney #include <linux/smp.h> 25dad81a20SPaul E. McKenney #include <linux/delay.h> 2622607d66SPaul E. McKenney #include <linux/module.h> 27dad81a20SPaul E. McKenney #include <linux/srcu.h> 28dad81a20SPaul E. McKenney 29dad81a20SPaul E. McKenney #include "rcu.h" 3045753c5fSIngo Molnar #include "rcu_segcblist.h" 31dad81a20SPaul E. McKenney 320c8e0e3cSPaul E. McKenney /* Holdoff in nanoseconds for auto-expediting. */ 330c8e0e3cSPaul E. McKenney #define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000) 340c8e0e3cSPaul E. McKenney static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF; 3522607d66SPaul E. McKenney module_param(exp_holdoff, ulong, 0444); 3622607d66SPaul E. McKenney 37c350c008SPaul E. McKenney /* Overflow-check frequency. N bits roughly says every 2**N grace periods. */ 38c350c008SPaul E. McKenney static ulong counter_wrap_check = (ULONG_MAX >> 2); 39c350c008SPaul E. McKenney module_param(counter_wrap_check, ulong, 0444); 40c350c008SPaul E. McKenney 41e0fcba9aSPaul E. McKenney /* Early-boot callback-management, so early that no lock is required! */ 42e0fcba9aSPaul E. McKenney static LIST_HEAD(srcu_boot_list); 43e0fcba9aSPaul E. McKenney static bool __read_mostly srcu_init_done; 44e0fcba9aSPaul E. McKenney 45da915ad5SPaul E. McKenney static void srcu_invoke_callbacks(struct work_struct *work); 46aacb5d91SPaul E. McKenney static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay); 470d8a1e83SPaul E. McKenney static void process_srcu(struct work_struct *work); 48e81baf4cSSebastian Andrzej Siewior static void srcu_delay_timer(struct timer_list *t); 49da915ad5SPaul E. McKenney 50d6331980SPaul E. McKenney /* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */ 51d6331980SPaul E. McKenney #define spin_lock_rcu_node(p) \ 52d6331980SPaul E. McKenney do { \ 53d6331980SPaul E. McKenney spin_lock(&ACCESS_PRIVATE(p, lock)); \ 54d6331980SPaul E. McKenney smp_mb__after_unlock_lock(); \ 55d6331980SPaul E. McKenney } while (0) 56d6331980SPaul E. McKenney 57d6331980SPaul E. McKenney #define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock)) 58d6331980SPaul E. McKenney 59d6331980SPaul E. McKenney #define spin_lock_irq_rcu_node(p) \ 60d6331980SPaul E. McKenney do { \ 61d6331980SPaul E. McKenney spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \ 62d6331980SPaul E. McKenney smp_mb__after_unlock_lock(); \ 63d6331980SPaul E. McKenney } while (0) 64d6331980SPaul E. McKenney 65d6331980SPaul E. McKenney #define spin_unlock_irq_rcu_node(p) \ 66d6331980SPaul E. McKenney spin_unlock_irq(&ACCESS_PRIVATE(p, lock)) 67d6331980SPaul E. McKenney 68d6331980SPaul E. McKenney #define spin_lock_irqsave_rcu_node(p, flags) \ 69d6331980SPaul E. McKenney do { \ 70d6331980SPaul E. McKenney spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \ 71d6331980SPaul E. McKenney smp_mb__after_unlock_lock(); \ 72d6331980SPaul E. McKenney } while (0) 73d6331980SPaul E. McKenney 74d6331980SPaul E. McKenney #define spin_unlock_irqrestore_rcu_node(p, flags) \ 75d6331980SPaul E. McKenney spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \ 76d6331980SPaul E. McKenney 77da915ad5SPaul E. McKenney /* 78da915ad5SPaul E. McKenney * Initialize SRCU combining tree. Note that statically allocated 79da915ad5SPaul E. McKenney * srcu_struct structures might already have srcu_read_lock() and 80da915ad5SPaul E. McKenney * srcu_read_unlock() running against them. So if the is_static parameter 81da915ad5SPaul E. McKenney * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[]. 82da915ad5SPaul E. McKenney */ 8394df76a1SFrederic Weisbecker static void init_srcu_struct_nodes(struct srcu_struct *ssp) 84dad81a20SPaul E. McKenney { 85da915ad5SPaul E. McKenney int cpu; 86da915ad5SPaul E. McKenney int i; 87da915ad5SPaul E. McKenney int level = 0; 88da915ad5SPaul E. McKenney int levelspread[RCU_NUM_LVLS]; 89da915ad5SPaul E. McKenney struct srcu_data *sdp; 90da915ad5SPaul E. McKenney struct srcu_node *snp; 91da915ad5SPaul E. McKenney struct srcu_node *snp_first; 92da915ad5SPaul E. McKenney 93b5befe84SFrederic Weisbecker /* Initialize geometry if it has not already been initialized. */ 94b5befe84SFrederic Weisbecker rcu_init_geometry(); 95b5befe84SFrederic Weisbecker 96da915ad5SPaul E. McKenney /* Work out the overall tree geometry. */ 97aacb5d91SPaul E. McKenney ssp->level[0] = &ssp->node[0]; 98da915ad5SPaul E. McKenney for (i = 1; i < rcu_num_lvls; i++) 99aacb5d91SPaul E. McKenney ssp->level[i] = ssp->level[i - 1] + num_rcu_lvl[i - 1]; 100da915ad5SPaul E. McKenney rcu_init_levelspread(levelspread, num_rcu_lvl); 101da915ad5SPaul E. McKenney 102da915ad5SPaul E. McKenney /* Each pass through this loop initializes one srcu_node structure. */ 103aacb5d91SPaul E. McKenney srcu_for_each_node_breadth_first(ssp, snp) { 104d6331980SPaul E. McKenney spin_lock_init(&ACCESS_PRIVATE(snp, lock)); 105c7e88067SPaul E. McKenney WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) != 106c7e88067SPaul E. McKenney ARRAY_SIZE(snp->srcu_data_have_cbs)); 107c7e88067SPaul E. McKenney for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) { 108da915ad5SPaul E. McKenney snp->srcu_have_cbs[i] = 0; 109c7e88067SPaul E. McKenney snp->srcu_data_have_cbs[i] = 0; 110c7e88067SPaul E. McKenney } 1111e9a038bSPaul E. McKenney snp->srcu_gp_seq_needed_exp = 0; 112da915ad5SPaul E. McKenney snp->grplo = -1; 113da915ad5SPaul E. McKenney snp->grphi = -1; 114aacb5d91SPaul E. McKenney if (snp == &ssp->node[0]) { 115da915ad5SPaul E. McKenney /* Root node, special case. */ 116da915ad5SPaul E. McKenney snp->srcu_parent = NULL; 117da915ad5SPaul E. McKenney continue; 118da915ad5SPaul E. McKenney } 119da915ad5SPaul E. McKenney 120da915ad5SPaul E. McKenney /* Non-root node. */ 121aacb5d91SPaul E. McKenney if (snp == ssp->level[level + 1]) 122da915ad5SPaul E. McKenney level++; 123aacb5d91SPaul E. McKenney snp->srcu_parent = ssp->level[level - 1] + 124aacb5d91SPaul E. McKenney (snp - ssp->level[level]) / 125da915ad5SPaul E. McKenney levelspread[level - 1]; 126da915ad5SPaul E. McKenney } 127da915ad5SPaul E. McKenney 128da915ad5SPaul E. McKenney /* 129da915ad5SPaul E. McKenney * Initialize the per-CPU srcu_data array, which feeds into the 130da915ad5SPaul E. McKenney * leaves of the srcu_node tree. 131da915ad5SPaul E. McKenney */ 132da915ad5SPaul E. McKenney WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) != 133da915ad5SPaul E. McKenney ARRAY_SIZE(sdp->srcu_unlock_count)); 134da915ad5SPaul E. McKenney level = rcu_num_lvls - 1; 135aacb5d91SPaul E. McKenney snp_first = ssp->level[level]; 136da915ad5SPaul E. McKenney for_each_possible_cpu(cpu) { 137aacb5d91SPaul E. McKenney sdp = per_cpu_ptr(ssp->sda, cpu); 138d6331980SPaul E. McKenney spin_lock_init(&ACCESS_PRIVATE(sdp, lock)); 139da915ad5SPaul E. McKenney rcu_segcblist_init(&sdp->srcu_cblist); 140da915ad5SPaul E. McKenney sdp->srcu_cblist_invoking = false; 141aacb5d91SPaul E. McKenney sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq; 142aacb5d91SPaul E. McKenney sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq; 143da915ad5SPaul E. McKenney sdp->mynode = &snp_first[cpu / levelspread[level]]; 144da915ad5SPaul E. McKenney for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) { 145da915ad5SPaul E. McKenney if (snp->grplo < 0) 146da915ad5SPaul E. McKenney snp->grplo = cpu; 147da915ad5SPaul E. McKenney snp->grphi = cpu; 148da915ad5SPaul E. McKenney } 149da915ad5SPaul E. McKenney sdp->cpu = cpu; 150e81baf4cSSebastian Andrzej Siewior INIT_WORK(&sdp->work, srcu_invoke_callbacks); 151e81baf4cSSebastian Andrzej Siewior timer_setup(&sdp->delay_work, srcu_delay_timer, 0); 152aacb5d91SPaul E. McKenney sdp->ssp = ssp; 153c7e88067SPaul E. McKenney sdp->grpmask = 1 << (cpu - sdp->mynode->grplo); 154da915ad5SPaul E. McKenney } 155*994f7068SPaul E. McKenney smp_store_release(&ssp->srcu_size_state, SRCU_SIZE_WAIT_BARRIER); 156da915ad5SPaul E. McKenney } 157da915ad5SPaul E. McKenney 158da915ad5SPaul E. McKenney /* 159da915ad5SPaul E. McKenney * Initialize non-compile-time initialized fields, including the 160*994f7068SPaul E. McKenney * associated srcu_node and srcu_data structures. The is_static parameter 161*994f7068SPaul E. McKenney * tells us that ->sda has already been wired up to srcu_data. 162da915ad5SPaul E. McKenney */ 163aacb5d91SPaul E. McKenney static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static) 164da915ad5SPaul E. McKenney { 165*994f7068SPaul E. McKenney ssp->srcu_size_state = SRCU_SIZE_SMALL; 166aacb5d91SPaul E. McKenney mutex_init(&ssp->srcu_cb_mutex); 167aacb5d91SPaul E. McKenney mutex_init(&ssp->srcu_gp_mutex); 168aacb5d91SPaul E. McKenney ssp->srcu_idx = 0; 169aacb5d91SPaul E. McKenney ssp->srcu_gp_seq = 0; 170aacb5d91SPaul E. McKenney ssp->srcu_barrier_seq = 0; 171aacb5d91SPaul E. McKenney mutex_init(&ssp->srcu_barrier_mutex); 172aacb5d91SPaul E. McKenney atomic_set(&ssp->srcu_barrier_cpu_cnt, 0); 173aacb5d91SPaul E. McKenney INIT_DELAYED_WORK(&ssp->work, process_srcu); 174da915ad5SPaul E. McKenney if (!is_static) 175aacb5d91SPaul E. McKenney ssp->sda = alloc_percpu(struct srcu_data); 17650edb988SPaul E. McKenney if (!ssp->sda) 17750edb988SPaul E. McKenney return -ENOMEM; 17894df76a1SFrederic Weisbecker init_srcu_struct_nodes(ssp); 179*994f7068SPaul E. McKenney ssp->srcu_size_state = SRCU_SIZE_BIG; 180aacb5d91SPaul E. McKenney ssp->srcu_gp_seq_needed_exp = 0; 181aacb5d91SPaul E. McKenney ssp->srcu_last_gp_end = ktime_get_mono_fast_ns(); 182aacb5d91SPaul E. McKenney smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */ 18350edb988SPaul E. McKenney return 0; 184dad81a20SPaul E. McKenney } 185dad81a20SPaul E. McKenney 186dad81a20SPaul E. McKenney #ifdef CONFIG_DEBUG_LOCK_ALLOC 187dad81a20SPaul E. McKenney 188aacb5d91SPaul E. McKenney int __init_srcu_struct(struct srcu_struct *ssp, const char *name, 189dad81a20SPaul E. McKenney struct lock_class_key *key) 190dad81a20SPaul E. McKenney { 191dad81a20SPaul E. McKenney /* Don't re-initialize a lock while it is held. */ 192aacb5d91SPaul E. McKenney debug_check_no_locks_freed((void *)ssp, sizeof(*ssp)); 193aacb5d91SPaul E. McKenney lockdep_init_map(&ssp->dep_map, name, key, 0); 194aacb5d91SPaul E. McKenney spin_lock_init(&ACCESS_PRIVATE(ssp, lock)); 195aacb5d91SPaul E. McKenney return init_srcu_struct_fields(ssp, false); 196dad81a20SPaul E. McKenney } 197dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(__init_srcu_struct); 198dad81a20SPaul E. McKenney 199dad81a20SPaul E. McKenney #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 200dad81a20SPaul E. McKenney 201dad81a20SPaul E. McKenney /** 202dad81a20SPaul E. McKenney * init_srcu_struct - initialize a sleep-RCU structure 203aacb5d91SPaul E. McKenney * @ssp: structure to initialize. 204dad81a20SPaul E. McKenney * 205dad81a20SPaul E. McKenney * Must invoke this on a given srcu_struct before passing that srcu_struct 206dad81a20SPaul E. McKenney * to any other function. Each srcu_struct represents a separate domain 207dad81a20SPaul E. McKenney * of SRCU protection. 208dad81a20SPaul E. McKenney */ 209aacb5d91SPaul E. McKenney int init_srcu_struct(struct srcu_struct *ssp) 210dad81a20SPaul E. McKenney { 211aacb5d91SPaul E. McKenney spin_lock_init(&ACCESS_PRIVATE(ssp, lock)); 212aacb5d91SPaul E. McKenney return init_srcu_struct_fields(ssp, false); 213dad81a20SPaul E. McKenney } 214dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(init_srcu_struct); 215dad81a20SPaul E. McKenney 216dad81a20SPaul E. McKenney #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 217dad81a20SPaul E. McKenney 218dad81a20SPaul E. McKenney /* 219da915ad5SPaul E. McKenney * First-use initialization of statically allocated srcu_struct 220da915ad5SPaul E. McKenney * structure. Wiring up the combining tree is more than can be 221da915ad5SPaul E. McKenney * done with compile-time initialization, so this check is added 222aacb5d91SPaul E. McKenney * to each update-side SRCU primitive. Use ssp->lock, which -is- 223da915ad5SPaul E. McKenney * compile-time initialized, to resolve races involving multiple 224da915ad5SPaul E. McKenney * CPUs trying to garner first-use privileges. 225da915ad5SPaul E. McKenney */ 226aacb5d91SPaul E. McKenney static void check_init_srcu_struct(struct srcu_struct *ssp) 227da915ad5SPaul E. McKenney { 228da915ad5SPaul E. McKenney unsigned long flags; 229da915ad5SPaul E. McKenney 230da915ad5SPaul E. McKenney /* The smp_load_acquire() pairs with the smp_store_release(). */ 231aacb5d91SPaul E. McKenney if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed))) /*^^^*/ 232da915ad5SPaul E. McKenney return; /* Already initialized. */ 233aacb5d91SPaul E. McKenney spin_lock_irqsave_rcu_node(ssp, flags); 234aacb5d91SPaul E. McKenney if (!rcu_seq_state(ssp->srcu_gp_seq_needed)) { 235aacb5d91SPaul E. McKenney spin_unlock_irqrestore_rcu_node(ssp, flags); 236da915ad5SPaul E. McKenney return; 237da915ad5SPaul E. McKenney } 238aacb5d91SPaul E. McKenney init_srcu_struct_fields(ssp, true); 239aacb5d91SPaul E. McKenney spin_unlock_irqrestore_rcu_node(ssp, flags); 240da915ad5SPaul E. McKenney } 241da915ad5SPaul E. McKenney 242da915ad5SPaul E. McKenney /* 243da915ad5SPaul E. McKenney * Returns approximate total of the readers' ->srcu_lock_count[] values 244da915ad5SPaul E. McKenney * for the rank of per-CPU counters specified by idx. 245dad81a20SPaul E. McKenney */ 246aacb5d91SPaul E. McKenney static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx) 247dad81a20SPaul E. McKenney { 248dad81a20SPaul E. McKenney int cpu; 249dad81a20SPaul E. McKenney unsigned long sum = 0; 250dad81a20SPaul E. McKenney 251dad81a20SPaul E. McKenney for_each_possible_cpu(cpu) { 252aacb5d91SPaul E. McKenney struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); 253dad81a20SPaul E. McKenney 254da915ad5SPaul E. McKenney sum += READ_ONCE(cpuc->srcu_lock_count[idx]); 255dad81a20SPaul E. McKenney } 256dad81a20SPaul E. McKenney return sum; 257dad81a20SPaul E. McKenney } 258dad81a20SPaul E. McKenney 259dad81a20SPaul E. McKenney /* 260da915ad5SPaul E. McKenney * Returns approximate total of the readers' ->srcu_unlock_count[] values 261da915ad5SPaul E. McKenney * for the rank of per-CPU counters specified by idx. 262dad81a20SPaul E. McKenney */ 263aacb5d91SPaul E. McKenney static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx) 264dad81a20SPaul E. McKenney { 265dad81a20SPaul E. McKenney int cpu; 266dad81a20SPaul E. McKenney unsigned long sum = 0; 267dad81a20SPaul E. McKenney 268dad81a20SPaul E. McKenney for_each_possible_cpu(cpu) { 269aacb5d91SPaul E. McKenney struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); 270dad81a20SPaul E. McKenney 271da915ad5SPaul E. McKenney sum += READ_ONCE(cpuc->srcu_unlock_count[idx]); 272dad81a20SPaul E. McKenney } 273dad81a20SPaul E. McKenney return sum; 274dad81a20SPaul E. McKenney } 275dad81a20SPaul E. McKenney 276dad81a20SPaul E. McKenney /* 277dad81a20SPaul E. McKenney * Return true if the number of pre-existing readers is determined to 278dad81a20SPaul E. McKenney * be zero. 279dad81a20SPaul E. McKenney */ 280aacb5d91SPaul E. McKenney static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx) 281dad81a20SPaul E. McKenney { 282dad81a20SPaul E. McKenney unsigned long unlocks; 283dad81a20SPaul E. McKenney 284aacb5d91SPaul E. McKenney unlocks = srcu_readers_unlock_idx(ssp, idx); 285dad81a20SPaul E. McKenney 286dad81a20SPaul E. McKenney /* 287dad81a20SPaul E. McKenney * Make sure that a lock is always counted if the corresponding 288dad81a20SPaul E. McKenney * unlock is counted. Needs to be a smp_mb() as the read side may 289dad81a20SPaul E. McKenney * contain a read from a variable that is written to before the 290dad81a20SPaul E. McKenney * synchronize_srcu() in the write side. In this case smp_mb()s 291dad81a20SPaul E. McKenney * A and B act like the store buffering pattern. 292dad81a20SPaul E. McKenney * 293dad81a20SPaul E. McKenney * This smp_mb() also pairs with smp_mb() C to prevent accesses 294dad81a20SPaul E. McKenney * after the synchronize_srcu() from being executed before the 295dad81a20SPaul E. McKenney * grace period ends. 296dad81a20SPaul E. McKenney */ 297dad81a20SPaul E. McKenney smp_mb(); /* A */ 298dad81a20SPaul E. McKenney 299dad81a20SPaul E. McKenney /* 300dad81a20SPaul E. McKenney * If the locks are the same as the unlocks, then there must have 301dad81a20SPaul E. McKenney * been no readers on this index at some time in between. This does 302dad81a20SPaul E. McKenney * not mean that there are no more readers, as one could have read 303dad81a20SPaul E. McKenney * the current index but not have incremented the lock counter yet. 304dad81a20SPaul E. McKenney * 305881ec9d2SPaul E. McKenney * So suppose that the updater is preempted here for so long 306881ec9d2SPaul E. McKenney * that more than ULONG_MAX non-nested readers come and go in 307881ec9d2SPaul E. McKenney * the meantime. It turns out that this cannot result in overflow 308881ec9d2SPaul E. McKenney * because if a reader modifies its unlock count after we read it 309881ec9d2SPaul E. McKenney * above, then that reader's next load of ->srcu_idx is guaranteed 310881ec9d2SPaul E. McKenney * to get the new value, which will cause it to operate on the 311881ec9d2SPaul E. McKenney * other bank of counters, where it cannot contribute to the 312881ec9d2SPaul E. McKenney * overflow of these counters. This means that there is a maximum 313881ec9d2SPaul E. McKenney * of 2*NR_CPUS increments, which cannot overflow given current 314881ec9d2SPaul E. McKenney * systems, especially not on 64-bit systems. 315881ec9d2SPaul E. McKenney * 316881ec9d2SPaul E. McKenney * OK, how about nesting? This does impose a limit on nesting 317881ec9d2SPaul E. McKenney * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient, 318881ec9d2SPaul E. McKenney * especially on 64-bit systems. 319dad81a20SPaul E. McKenney */ 320aacb5d91SPaul E. McKenney return srcu_readers_lock_idx(ssp, idx) == unlocks; 321dad81a20SPaul E. McKenney } 322dad81a20SPaul E. McKenney 323dad81a20SPaul E. McKenney /** 324dad81a20SPaul E. McKenney * srcu_readers_active - returns true if there are readers. and false 325dad81a20SPaul E. McKenney * otherwise 326aacb5d91SPaul E. McKenney * @ssp: which srcu_struct to count active readers (holding srcu_read_lock). 327dad81a20SPaul E. McKenney * 328dad81a20SPaul E. McKenney * Note that this is not an atomic primitive, and can therefore suffer 329dad81a20SPaul E. McKenney * severe errors when invoked on an active srcu_struct. That said, it 330dad81a20SPaul E. McKenney * can be useful as an error check at cleanup time. 331dad81a20SPaul E. McKenney */ 332aacb5d91SPaul E. McKenney static bool srcu_readers_active(struct srcu_struct *ssp) 333dad81a20SPaul E. McKenney { 334dad81a20SPaul E. McKenney int cpu; 335dad81a20SPaul E. McKenney unsigned long sum = 0; 336dad81a20SPaul E. McKenney 337dad81a20SPaul E. McKenney for_each_possible_cpu(cpu) { 338aacb5d91SPaul E. McKenney struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); 339dad81a20SPaul E. McKenney 340da915ad5SPaul E. McKenney sum += READ_ONCE(cpuc->srcu_lock_count[0]); 341da915ad5SPaul E. McKenney sum += READ_ONCE(cpuc->srcu_lock_count[1]); 342da915ad5SPaul E. McKenney sum -= READ_ONCE(cpuc->srcu_unlock_count[0]); 343da915ad5SPaul E. McKenney sum -= READ_ONCE(cpuc->srcu_unlock_count[1]); 344dad81a20SPaul E. McKenney } 345dad81a20SPaul E. McKenney return sum; 346dad81a20SPaul E. McKenney } 347dad81a20SPaul E. McKenney 348dad81a20SPaul E. McKenney #define SRCU_INTERVAL 1 349dad81a20SPaul E. McKenney 3501e9a038bSPaul E. McKenney /* 3511e9a038bSPaul E. McKenney * Return grace-period delay, zero if there are expedited grace 3521e9a038bSPaul E. McKenney * periods pending, SRCU_INTERVAL otherwise. 3531e9a038bSPaul E. McKenney */ 354aacb5d91SPaul E. McKenney static unsigned long srcu_get_delay(struct srcu_struct *ssp) 3551e9a038bSPaul E. McKenney { 356aacb5d91SPaul E. McKenney if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq), 357aacb5d91SPaul E. McKenney READ_ONCE(ssp->srcu_gp_seq_needed_exp))) 3581e9a038bSPaul E. McKenney return 0; 3591e9a038bSPaul E. McKenney return SRCU_INTERVAL; 3601e9a038bSPaul E. McKenney } 3611e9a038bSPaul E. McKenney 362f5ad3991SPaul E. McKenney /** 363f5ad3991SPaul E. McKenney * cleanup_srcu_struct - deconstruct a sleep-RCU structure 364f5ad3991SPaul E. McKenney * @ssp: structure to clean up. 365f5ad3991SPaul E. McKenney * 366f5ad3991SPaul E. McKenney * Must invoke this after you are finished using a given srcu_struct that 367f5ad3991SPaul E. McKenney * was initialized via init_srcu_struct(), else you leak memory. 368f5ad3991SPaul E. McKenney */ 369f5ad3991SPaul E. McKenney void cleanup_srcu_struct(struct srcu_struct *ssp) 370dad81a20SPaul E. McKenney { 371da915ad5SPaul E. McKenney int cpu; 372da915ad5SPaul E. McKenney 373aacb5d91SPaul E. McKenney if (WARN_ON(!srcu_get_delay(ssp))) 374f7194ac3SPaul E. McKenney return; /* Just leak it! */ 375aacb5d91SPaul E. McKenney if (WARN_ON(srcu_readers_active(ssp))) 376f7194ac3SPaul E. McKenney return; /* Just leak it! */ 377aacb5d91SPaul E. McKenney flush_delayed_work(&ssp->work); 378e81baf4cSSebastian Andrzej Siewior for_each_possible_cpu(cpu) { 379e81baf4cSSebastian Andrzej Siewior struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu); 380e81baf4cSSebastian Andrzej Siewior 381e81baf4cSSebastian Andrzej Siewior del_timer_sync(&sdp->delay_work); 382e81baf4cSSebastian Andrzej Siewior flush_work(&sdp->work); 3835cdfd174SPaul E. McKenney if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist))) 3845cdfd174SPaul E. McKenney return; /* Forgot srcu_barrier(), so just leak it! */ 385f7194ac3SPaul E. McKenney } 386aacb5d91SPaul E. McKenney if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) || 3878ed00760SPaul E. McKenney WARN_ON(rcu_seq_current(&ssp->srcu_gp_seq) != ssp->srcu_gp_seq_needed) || 388aacb5d91SPaul E. McKenney WARN_ON(srcu_readers_active(ssp))) { 3898ed00760SPaul E. McKenney pr_info("%s: Active srcu_struct %p read state: %d gp state: %lu/%lu\n", 3908ed00760SPaul E. McKenney __func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)), 3918ed00760SPaul E. McKenney rcu_seq_current(&ssp->srcu_gp_seq), ssp->srcu_gp_seq_needed); 392dad81a20SPaul E. McKenney return; /* Caller forgot to stop doing call_srcu()? */ 393dad81a20SPaul E. McKenney } 394aacb5d91SPaul E. McKenney free_percpu(ssp->sda); 395aacb5d91SPaul E. McKenney ssp->sda = NULL; 396*994f7068SPaul E. McKenney ssp->srcu_size_state = SRCU_SIZE_SMALL; 397dad81a20SPaul E. McKenney } 398f5ad3991SPaul E. McKenney EXPORT_SYMBOL_GPL(cleanup_srcu_struct); 399dad81a20SPaul E. McKenney 400dad81a20SPaul E. McKenney /* 401dad81a20SPaul E. McKenney * Counts the new reader in the appropriate per-CPU element of the 402cdf7abc4SPaolo Bonzini * srcu_struct. 403dad81a20SPaul E. McKenney * Returns an index that must be passed to the matching srcu_read_unlock(). 404dad81a20SPaul E. McKenney */ 405aacb5d91SPaul E. McKenney int __srcu_read_lock(struct srcu_struct *ssp) 406dad81a20SPaul E. McKenney { 407dad81a20SPaul E. McKenney int idx; 408dad81a20SPaul E. McKenney 409aacb5d91SPaul E. McKenney idx = READ_ONCE(ssp->srcu_idx) & 0x1; 410aacb5d91SPaul E. McKenney this_cpu_inc(ssp->sda->srcu_lock_count[idx]); 411dad81a20SPaul E. McKenney smp_mb(); /* B */ /* Avoid leaking the critical section. */ 412dad81a20SPaul E. McKenney return idx; 413dad81a20SPaul E. McKenney } 414dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(__srcu_read_lock); 415dad81a20SPaul E. McKenney 416dad81a20SPaul E. McKenney /* 417dad81a20SPaul E. McKenney * Removes the count for the old reader from the appropriate per-CPU 418dad81a20SPaul E. McKenney * element of the srcu_struct. Note that this may well be a different 419dad81a20SPaul E. McKenney * CPU than that which was incremented by the corresponding srcu_read_lock(). 420dad81a20SPaul E. McKenney */ 421aacb5d91SPaul E. McKenney void __srcu_read_unlock(struct srcu_struct *ssp, int idx) 422dad81a20SPaul E. McKenney { 423dad81a20SPaul E. McKenney smp_mb(); /* C */ /* Avoid leaking the critical section. */ 424aacb5d91SPaul E. McKenney this_cpu_inc(ssp->sda->srcu_unlock_count[idx]); 425dad81a20SPaul E. McKenney } 426dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(__srcu_read_unlock); 427dad81a20SPaul E. McKenney 428dad81a20SPaul E. McKenney /* 429dad81a20SPaul E. McKenney * We use an adaptive strategy for synchronize_srcu() and especially for 430dad81a20SPaul E. McKenney * synchronize_srcu_expedited(). We spin for a fixed time period 431dad81a20SPaul E. McKenney * (defined below) to allow SRCU readers to exit their read-side critical 432dad81a20SPaul E. McKenney * sections. If there are still some readers after a few microseconds, 433dad81a20SPaul E. McKenney * we repeatedly block for 1-millisecond time periods. 434dad81a20SPaul E. McKenney */ 435dad81a20SPaul E. McKenney #define SRCU_RETRY_CHECK_DELAY 5 436dad81a20SPaul E. McKenney 437dad81a20SPaul E. McKenney /* 438dad81a20SPaul E. McKenney * Start an SRCU grace period. 439dad81a20SPaul E. McKenney */ 440aacb5d91SPaul E. McKenney static void srcu_gp_start(struct srcu_struct *ssp) 441dad81a20SPaul E. McKenney { 442aacb5d91SPaul E. McKenney struct srcu_data *sdp = this_cpu_ptr(ssp->sda); 443dad81a20SPaul E. McKenney int state; 444dad81a20SPaul E. McKenney 445*994f7068SPaul E. McKenney if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER) 446*994f7068SPaul E. McKenney sdp = per_cpu_ptr(ssp->sda, 0); 447*994f7068SPaul E. McKenney else 448*994f7068SPaul E. McKenney sdp = this_cpu_ptr(ssp->sda); 449aacb5d91SPaul E. McKenney lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock)); 450aacb5d91SPaul E. McKenney WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)); 451eb4c2382SDennis Krein spin_lock_rcu_node(sdp); /* Interrupts already disabled. */ 452da915ad5SPaul E. McKenney rcu_segcblist_advance(&sdp->srcu_cblist, 453aacb5d91SPaul E. McKenney rcu_seq_current(&ssp->srcu_gp_seq)); 454da915ad5SPaul E. McKenney (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, 455aacb5d91SPaul E. McKenney rcu_seq_snap(&ssp->srcu_gp_seq)); 456eb4c2382SDennis Krein spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */ 4572da4b2a7SPaul E. McKenney smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */ 458aacb5d91SPaul E. McKenney rcu_seq_start(&ssp->srcu_gp_seq); 45971042606SPaul E. McKenney state = rcu_seq_state(ssp->srcu_gp_seq); 460dad81a20SPaul E. McKenney WARN_ON_ONCE(state != SRCU_STATE_SCAN1); 461dad81a20SPaul E. McKenney } 462dad81a20SPaul E. McKenney 463da915ad5SPaul E. McKenney 464e81baf4cSSebastian Andrzej Siewior static void srcu_delay_timer(struct timer_list *t) 465da915ad5SPaul E. McKenney { 466e81baf4cSSebastian Andrzej Siewior struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work); 467e81baf4cSSebastian Andrzej Siewior 468e81baf4cSSebastian Andrzej Siewior queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work); 469da915ad5SPaul E. McKenney } 470da915ad5SPaul E. McKenney 471e81baf4cSSebastian Andrzej Siewior static void srcu_queue_delayed_work_on(struct srcu_data *sdp, 472da915ad5SPaul E. McKenney unsigned long delay) 473da915ad5SPaul E. McKenney { 474e81baf4cSSebastian Andrzej Siewior if (!delay) { 475e81baf4cSSebastian Andrzej Siewior queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work); 476e81baf4cSSebastian Andrzej Siewior return; 477e81baf4cSSebastian Andrzej Siewior } 478da915ad5SPaul E. McKenney 479e81baf4cSSebastian Andrzej Siewior timer_reduce(&sdp->delay_work, jiffies + delay); 480da915ad5SPaul E. McKenney } 481da915ad5SPaul E. McKenney 482da915ad5SPaul E. McKenney /* 483da915ad5SPaul E. McKenney * Schedule callback invocation for the specified srcu_data structure, 484da915ad5SPaul E. McKenney * if possible, on the corresponding CPU. 485da915ad5SPaul E. McKenney */ 486da915ad5SPaul E. McKenney static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay) 487da915ad5SPaul E. McKenney { 488e81baf4cSSebastian Andrzej Siewior srcu_queue_delayed_work_on(sdp, delay); 489da915ad5SPaul E. McKenney } 490da915ad5SPaul E. McKenney 491da915ad5SPaul E. McKenney /* 492da915ad5SPaul E. McKenney * Schedule callback invocation for all srcu_data structures associated 493c7e88067SPaul E. McKenney * with the specified srcu_node structure that have callbacks for the 494c7e88067SPaul E. McKenney * just-completed grace period, the one corresponding to idx. If possible, 495c7e88067SPaul E. McKenney * schedule this invocation on the corresponding CPUs. 496da915ad5SPaul E. McKenney */ 497aacb5d91SPaul E. McKenney static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp, 4981e9a038bSPaul E. McKenney unsigned long mask, unsigned long delay) 499da915ad5SPaul E. McKenney { 500da915ad5SPaul E. McKenney int cpu; 501da915ad5SPaul E. McKenney 502c7e88067SPaul E. McKenney for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { 503c7e88067SPaul E. McKenney if (!(mask & (1 << (cpu - snp->grplo)))) 504c7e88067SPaul E. McKenney continue; 505aacb5d91SPaul E. McKenney srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay); 506da915ad5SPaul E. McKenney } 507c7e88067SPaul E. McKenney } 508da915ad5SPaul E. McKenney 509da915ad5SPaul E. McKenney /* 510da915ad5SPaul E. McKenney * Note the end of an SRCU grace period. Initiates callback invocation 511da915ad5SPaul E. McKenney * and starts a new grace period if needed. 512da915ad5SPaul E. McKenney * 513da915ad5SPaul E. McKenney * The ->srcu_cb_mutex acquisition does not protect any data, but 514da915ad5SPaul E. McKenney * instead prevents more than one grace period from starting while we 515da915ad5SPaul E. McKenney * are initiating callback invocation. This allows the ->srcu_have_cbs[] 516da915ad5SPaul E. McKenney * array to have a finite number of elements. 517da915ad5SPaul E. McKenney */ 518aacb5d91SPaul E. McKenney static void srcu_gp_end(struct srcu_struct *ssp) 519da915ad5SPaul E. McKenney { 5201e9a038bSPaul E. McKenney unsigned long cbdelay; 521da915ad5SPaul E. McKenney bool cbs; 5228ddbd883SIldar Ismagilov bool last_lvl; 523c350c008SPaul E. McKenney int cpu; 524c350c008SPaul E. McKenney unsigned long flags; 525da915ad5SPaul E. McKenney unsigned long gpseq; 526da915ad5SPaul E. McKenney int idx; 527c7e88067SPaul E. McKenney unsigned long mask; 528c350c008SPaul E. McKenney struct srcu_data *sdp; 529da915ad5SPaul E. McKenney struct srcu_node *snp; 530da915ad5SPaul E. McKenney 531da915ad5SPaul E. McKenney /* Prevent more than one additional grace period. */ 532aacb5d91SPaul E. McKenney mutex_lock(&ssp->srcu_cb_mutex); 533da915ad5SPaul E. McKenney 534da915ad5SPaul E. McKenney /* End the current grace period. */ 535aacb5d91SPaul E. McKenney spin_lock_irq_rcu_node(ssp); 536aacb5d91SPaul E. McKenney idx = rcu_seq_state(ssp->srcu_gp_seq); 537da915ad5SPaul E. McKenney WARN_ON_ONCE(idx != SRCU_STATE_SCAN2); 538aacb5d91SPaul E. McKenney cbdelay = srcu_get_delay(ssp); 539844a378dSPaul E. McKenney WRITE_ONCE(ssp->srcu_last_gp_end, ktime_get_mono_fast_ns()); 540aacb5d91SPaul E. McKenney rcu_seq_end(&ssp->srcu_gp_seq); 541aacb5d91SPaul E. McKenney gpseq = rcu_seq_current(&ssp->srcu_gp_seq); 542aacb5d91SPaul E. McKenney if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq)) 5438c9e0cb3SPaul E. McKenney WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, gpseq); 544aacb5d91SPaul E. McKenney spin_unlock_irq_rcu_node(ssp); 545aacb5d91SPaul E. McKenney mutex_unlock(&ssp->srcu_gp_mutex); 546da915ad5SPaul E. McKenney /* A new grace period can start at this point. But only one. */ 547da915ad5SPaul E. McKenney 548da915ad5SPaul E. McKenney /* Initiate callback invocation as needed. */ 549*994f7068SPaul E. McKenney if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER) { 550*994f7068SPaul E. McKenney srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, 0), cbdelay); 551*994f7068SPaul E. McKenney } else { 552da915ad5SPaul E. McKenney idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs); 553aacb5d91SPaul E. McKenney srcu_for_each_node_breadth_first(ssp, snp) { 554d6331980SPaul E. McKenney spin_lock_irq_rcu_node(snp); 555da915ad5SPaul E. McKenney cbs = false; 556aacb5d91SPaul E. McKenney last_lvl = snp >= ssp->level[rcu_num_lvls - 1]; 5578ddbd883SIldar Ismagilov if (last_lvl) 558da915ad5SPaul E. McKenney cbs = snp->srcu_have_cbs[idx] == gpseq; 559da915ad5SPaul E. McKenney snp->srcu_have_cbs[idx] = gpseq; 560da915ad5SPaul E. McKenney rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1); 5611e9a038bSPaul E. McKenney if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq)) 5627ff8b450SPaul E. McKenney WRITE_ONCE(snp->srcu_gp_seq_needed_exp, gpseq); 563c7e88067SPaul E. McKenney mask = snp->srcu_data_have_cbs[idx]; 564c7e88067SPaul E. McKenney snp->srcu_data_have_cbs[idx] = 0; 565d6331980SPaul E. McKenney spin_unlock_irq_rcu_node(snp); 566a3883df3SPaul E. McKenney if (cbs) 567aacb5d91SPaul E. McKenney srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay); 568*994f7068SPaul E. McKenney } 569*994f7068SPaul E. McKenney } 570c350c008SPaul E. McKenney 571c350c008SPaul E. McKenney /* Occasionally prevent srcu_data counter wrap. */ 572*994f7068SPaul E. McKenney if (!(gpseq & counter_wrap_check)) 573*994f7068SPaul E. McKenney for_each_possible_cpu(cpu) { 574aacb5d91SPaul E. McKenney sdp = per_cpu_ptr(ssp->sda, cpu); 575d6331980SPaul E. McKenney spin_lock_irqsave_rcu_node(sdp, flags); 576*994f7068SPaul E. McKenney if (ULONG_CMP_GE(gpseq, sdp->srcu_gp_seq_needed + 100)) 577c350c008SPaul E. McKenney sdp->srcu_gp_seq_needed = gpseq; 578*994f7068SPaul E. McKenney if (ULONG_CMP_GE(gpseq, sdp->srcu_gp_seq_needed_exp + 100)) 579a35d13ecSIldar Ismagilov sdp->srcu_gp_seq_needed_exp = gpseq; 580d6331980SPaul E. McKenney spin_unlock_irqrestore_rcu_node(sdp, flags); 581c350c008SPaul E. McKenney } 582da915ad5SPaul E. McKenney 583da915ad5SPaul E. McKenney /* Callback initiation done, allow grace periods after next. */ 584aacb5d91SPaul E. McKenney mutex_unlock(&ssp->srcu_cb_mutex); 585da915ad5SPaul E. McKenney 586da915ad5SPaul E. McKenney /* Start a new grace period if needed. */ 587aacb5d91SPaul E. McKenney spin_lock_irq_rcu_node(ssp); 588aacb5d91SPaul E. McKenney gpseq = rcu_seq_current(&ssp->srcu_gp_seq); 589da915ad5SPaul E. McKenney if (!rcu_seq_state(gpseq) && 590aacb5d91SPaul E. McKenney ULONG_CMP_LT(gpseq, ssp->srcu_gp_seq_needed)) { 591aacb5d91SPaul E. McKenney srcu_gp_start(ssp); 592aacb5d91SPaul E. McKenney spin_unlock_irq_rcu_node(ssp); 593aacb5d91SPaul E. McKenney srcu_reschedule(ssp, 0); 594da915ad5SPaul E. McKenney } else { 595aacb5d91SPaul E. McKenney spin_unlock_irq_rcu_node(ssp); 596da915ad5SPaul E. McKenney } 597da915ad5SPaul E. McKenney } 598da915ad5SPaul E. McKenney 599da915ad5SPaul E. McKenney /* 6001e9a038bSPaul E. McKenney * Funnel-locking scheme to scalably mediate many concurrent expedited 6011e9a038bSPaul E. McKenney * grace-period requests. This function is invoked for the first known 6021e9a038bSPaul E. McKenney * expedited request for a grace period that has already been requested, 6031e9a038bSPaul E. McKenney * but without expediting. To start a completely new grace period, 6041e9a038bSPaul E. McKenney * whether expedited or not, use srcu_funnel_gp_start() instead. 6051e9a038bSPaul E. McKenney */ 606aacb5d91SPaul E. McKenney static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp, 6071e9a038bSPaul E. McKenney unsigned long s) 6081e9a038bSPaul E. McKenney { 6091e9a038bSPaul E. McKenney unsigned long flags; 6101e9a038bSPaul E. McKenney 611*994f7068SPaul E. McKenney if (snp) 6121e9a038bSPaul E. McKenney for (; snp != NULL; snp = snp->srcu_parent) { 613aacb5d91SPaul E. McKenney if (rcu_seq_done(&ssp->srcu_gp_seq, s) || 6141e9a038bSPaul E. McKenney ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s)) 6151e9a038bSPaul E. McKenney return; 616d6331980SPaul E. McKenney spin_lock_irqsave_rcu_node(snp, flags); 6171e9a038bSPaul E. McKenney if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) { 618d6331980SPaul E. McKenney spin_unlock_irqrestore_rcu_node(snp, flags); 6191e9a038bSPaul E. McKenney return; 6201e9a038bSPaul E. McKenney } 6211e9a038bSPaul E. McKenney WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s); 622d6331980SPaul E. McKenney spin_unlock_irqrestore_rcu_node(snp, flags); 6231e9a038bSPaul E. McKenney } 624aacb5d91SPaul E. McKenney spin_lock_irqsave_rcu_node(ssp, flags); 625aacb5d91SPaul E. McKenney if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s)) 6268c9e0cb3SPaul E. McKenney WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s); 627aacb5d91SPaul E. McKenney spin_unlock_irqrestore_rcu_node(ssp, flags); 6281e9a038bSPaul E. McKenney } 6291e9a038bSPaul E. McKenney 6301e9a038bSPaul E. McKenney /* 631da915ad5SPaul E. McKenney * Funnel-locking scheme to scalably mediate many concurrent grace-period 632da915ad5SPaul E. McKenney * requests. The winner has to do the work of actually starting grace 633da915ad5SPaul E. McKenney * period s. Losers must either ensure that their desired grace-period 634da915ad5SPaul E. McKenney * number is recorded on at least their leaf srcu_node structure, or they 635da915ad5SPaul E. McKenney * must take steps to invoke their own callbacks. 63617294ce6SPaul E. McKenney * 63717294ce6SPaul E. McKenney * Note that this function also does the work of srcu_funnel_exp_start(), 63817294ce6SPaul E. McKenney * in some cases by directly invoking it. 639da915ad5SPaul E. McKenney */ 640aacb5d91SPaul E. McKenney static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp, 6411e9a038bSPaul E. McKenney unsigned long s, bool do_norm) 642da915ad5SPaul E. McKenney { 643da915ad5SPaul E. McKenney unsigned long flags; 644da915ad5SPaul E. McKenney int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs); 6457b9e9b58SPaul E. McKenney struct srcu_node *snp; 646*994f7068SPaul E. McKenney struct srcu_node *snp_leaf = smp_load_acquire(&sdp->mynode); 647da915ad5SPaul E. McKenney unsigned long snp_seq; 648da915ad5SPaul E. McKenney 649*994f7068SPaul E. McKenney if (snp_leaf) 650da915ad5SPaul E. McKenney /* Each pass through the loop does one level of the srcu_node tree. */ 6517b9e9b58SPaul E. McKenney for (snp = snp_leaf; snp != NULL; snp = snp->srcu_parent) { 6527b9e9b58SPaul E. McKenney if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != snp_leaf) 653da915ad5SPaul E. McKenney return; /* GP already done and CBs recorded. */ 654d6331980SPaul E. McKenney spin_lock_irqsave_rcu_node(snp, flags); 655da915ad5SPaul E. McKenney if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) { 656da915ad5SPaul E. McKenney snp_seq = snp->srcu_have_cbs[idx]; 6577b9e9b58SPaul E. McKenney if (snp == snp_leaf && snp_seq == s) 658c7e88067SPaul E. McKenney snp->srcu_data_have_cbs[idx] |= sdp->grpmask; 659d6331980SPaul E. McKenney spin_unlock_irqrestore_rcu_node(snp, flags); 6607b9e9b58SPaul E. McKenney if (snp == snp_leaf && snp_seq != s) { 6611e9a038bSPaul E. McKenney srcu_schedule_cbs_sdp(sdp, do_norm 6621e9a038bSPaul E. McKenney ? SRCU_INTERVAL 6631e9a038bSPaul E. McKenney : 0); 6641e9a038bSPaul E. McKenney return; 665da915ad5SPaul E. McKenney } 6661e9a038bSPaul E. McKenney if (!do_norm) 667aacb5d91SPaul E. McKenney srcu_funnel_exp_start(ssp, snp, s); 668da915ad5SPaul E. McKenney return; 669da915ad5SPaul E. McKenney } 670da915ad5SPaul E. McKenney snp->srcu_have_cbs[idx] = s; 6717b9e9b58SPaul E. McKenney if (snp == snp_leaf) 672c7e88067SPaul E. McKenney snp->srcu_data_have_cbs[idx] |= sdp->grpmask; 6731e9a038bSPaul E. McKenney if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s)) 6747ff8b450SPaul E. McKenney WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s); 675d6331980SPaul E. McKenney spin_unlock_irqrestore_rcu_node(snp, flags); 676da915ad5SPaul E. McKenney } 677da915ad5SPaul E. McKenney 678da915ad5SPaul E. McKenney /* Top of tree, must ensure the grace period will be started. */ 679aacb5d91SPaul E. McKenney spin_lock_irqsave_rcu_node(ssp, flags); 680aacb5d91SPaul E. McKenney if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) { 681da915ad5SPaul E. McKenney /* 682da915ad5SPaul E. McKenney * Record need for grace period s. Pair with load 683da915ad5SPaul E. McKenney * acquire setting up for initialization. 684da915ad5SPaul E. McKenney */ 685aacb5d91SPaul E. McKenney smp_store_release(&ssp->srcu_gp_seq_needed, s); /*^^^*/ 686da915ad5SPaul E. McKenney } 687aacb5d91SPaul E. McKenney if (!do_norm && ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s)) 6888c9e0cb3SPaul E. McKenney WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s); 689da915ad5SPaul E. McKenney 690da915ad5SPaul E. McKenney /* If grace period not already done and none in progress, start it. */ 691aacb5d91SPaul E. McKenney if (!rcu_seq_done(&ssp->srcu_gp_seq, s) && 692aacb5d91SPaul E. McKenney rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) { 693aacb5d91SPaul E. McKenney WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)); 694aacb5d91SPaul E. McKenney srcu_gp_start(ssp); 695e0fcba9aSPaul E. McKenney if (likely(srcu_init_done)) 696aacb5d91SPaul E. McKenney queue_delayed_work(rcu_gp_wq, &ssp->work, 697aacb5d91SPaul E. McKenney srcu_get_delay(ssp)); 698aacb5d91SPaul E. McKenney else if (list_empty(&ssp->work.work.entry)) 699aacb5d91SPaul E. McKenney list_add(&ssp->work.work.entry, &srcu_boot_list); 700da915ad5SPaul E. McKenney } 701aacb5d91SPaul E. McKenney spin_unlock_irqrestore_rcu_node(ssp, flags); 702da915ad5SPaul E. McKenney } 703da915ad5SPaul E. McKenney 704da915ad5SPaul E. McKenney /* 705dad81a20SPaul E. McKenney * Wait until all readers counted by array index idx complete, but 706dad81a20SPaul E. McKenney * loop an additional time if there is an expedited grace period pending. 707da915ad5SPaul E. McKenney * The caller must ensure that ->srcu_idx is not changed while checking. 708dad81a20SPaul E. McKenney */ 709aacb5d91SPaul E. McKenney static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount) 710dad81a20SPaul E. McKenney { 711dad81a20SPaul E. McKenney for (;;) { 712aacb5d91SPaul E. McKenney if (srcu_readers_active_idx_check(ssp, idx)) 713dad81a20SPaul E. McKenney return true; 714aacb5d91SPaul E. McKenney if (--trycount + !srcu_get_delay(ssp) <= 0) 715dad81a20SPaul E. McKenney return false; 716dad81a20SPaul E. McKenney udelay(SRCU_RETRY_CHECK_DELAY); 717dad81a20SPaul E. McKenney } 718dad81a20SPaul E. McKenney } 719dad81a20SPaul E. McKenney 720dad81a20SPaul E. McKenney /* 721da915ad5SPaul E. McKenney * Increment the ->srcu_idx counter so that future SRCU readers will 722da915ad5SPaul E. McKenney * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows 723dad81a20SPaul E. McKenney * us to wait for pre-existing readers in a starvation-free manner. 724dad81a20SPaul E. McKenney */ 725aacb5d91SPaul E. McKenney static void srcu_flip(struct srcu_struct *ssp) 726dad81a20SPaul E. McKenney { 727881ec9d2SPaul E. McKenney /* 728881ec9d2SPaul E. McKenney * Ensure that if this updater saw a given reader's increment 729881ec9d2SPaul E. McKenney * from __srcu_read_lock(), that reader was using an old value 730881ec9d2SPaul E. McKenney * of ->srcu_idx. Also ensure that if a given reader sees the 731881ec9d2SPaul E. McKenney * new value of ->srcu_idx, this updater's earlier scans cannot 732881ec9d2SPaul E. McKenney * have seen that reader's increments (which is OK, because this 733881ec9d2SPaul E. McKenney * grace period need not wait on that reader). 734881ec9d2SPaul E. McKenney */ 735881ec9d2SPaul E. McKenney smp_mb(); /* E */ /* Pairs with B and C. */ 736881ec9d2SPaul E. McKenney 737aacb5d91SPaul E. McKenney WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1); 738dad81a20SPaul E. McKenney 739dad81a20SPaul E. McKenney /* 740dad81a20SPaul E. McKenney * Ensure that if the updater misses an __srcu_read_unlock() 741dad81a20SPaul E. McKenney * increment, that task's next __srcu_read_lock() will see the 742dad81a20SPaul E. McKenney * above counter update. Note that both this memory barrier 743dad81a20SPaul E. McKenney * and the one in srcu_readers_active_idx_check() provide the 744dad81a20SPaul E. McKenney * guarantee for __srcu_read_lock(). 745dad81a20SPaul E. McKenney */ 746dad81a20SPaul E. McKenney smp_mb(); /* D */ /* Pairs with C. */ 747dad81a20SPaul E. McKenney } 748dad81a20SPaul E. McKenney 749dad81a20SPaul E. McKenney /* 7502da4b2a7SPaul E. McKenney * If SRCU is likely idle, return true, otherwise return false. 7512da4b2a7SPaul E. McKenney * 7522da4b2a7SPaul E. McKenney * Note that it is OK for several current from-idle requests for a new 7532da4b2a7SPaul E. McKenney * grace period from idle to specify expediting because they will all end 7542da4b2a7SPaul E. McKenney * up requesting the same grace period anyhow. So no loss. 7552da4b2a7SPaul E. McKenney * 7562da4b2a7SPaul E. McKenney * Note also that if any CPU (including the current one) is still invoking 7572da4b2a7SPaul E. McKenney * callbacks, this function will nevertheless say "idle". This is not 7582da4b2a7SPaul E. McKenney * ideal, but the overhead of checking all CPUs' callback lists is even 7592da4b2a7SPaul E. McKenney * less ideal, especially on large systems. Furthermore, the wakeup 7602da4b2a7SPaul E. McKenney * can happen before the callback is fully removed, so we have no choice 7612da4b2a7SPaul E. McKenney * but to accept this type of error. 7622da4b2a7SPaul E. McKenney * 7632da4b2a7SPaul E. McKenney * This function is also subject to counter-wrap errors, but let's face 7642da4b2a7SPaul E. McKenney * it, if this function was preempted for enough time for the counters 7652da4b2a7SPaul E. McKenney * to wrap, it really doesn't matter whether or not we expedite the grace 7662da4b2a7SPaul E. McKenney * period. The extra overhead of a needlessly expedited grace period is 7677fef6cffSEthon Paul * negligible when amortized over that time period, and the extra latency 7682da4b2a7SPaul E. McKenney * of a needlessly non-expedited grace period is similarly negligible. 7692da4b2a7SPaul E. McKenney */ 770aacb5d91SPaul E. McKenney static bool srcu_might_be_idle(struct srcu_struct *ssp) 7712da4b2a7SPaul E. McKenney { 77222607d66SPaul E. McKenney unsigned long curseq; 7732da4b2a7SPaul E. McKenney unsigned long flags; 7742da4b2a7SPaul E. McKenney struct srcu_data *sdp; 77522607d66SPaul E. McKenney unsigned long t; 776844a378dSPaul E. McKenney unsigned long tlast; 7772da4b2a7SPaul E. McKenney 778bde50d8fSSebastian Andrzej Siewior check_init_srcu_struct(ssp); 7792da4b2a7SPaul E. McKenney /* If the local srcu_data structure has callbacks, not idle. */ 780bde50d8fSSebastian Andrzej Siewior sdp = raw_cpu_ptr(ssp->sda); 781bde50d8fSSebastian Andrzej Siewior spin_lock_irqsave_rcu_node(sdp, flags); 7822da4b2a7SPaul E. McKenney if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) { 783bde50d8fSSebastian Andrzej Siewior spin_unlock_irqrestore_rcu_node(sdp, flags); 7842da4b2a7SPaul E. McKenney return false; /* Callbacks already present, so not idle. */ 7852da4b2a7SPaul E. McKenney } 786bde50d8fSSebastian Andrzej Siewior spin_unlock_irqrestore_rcu_node(sdp, flags); 7872da4b2a7SPaul E. McKenney 7882da4b2a7SPaul E. McKenney /* 789a616aec9SIngo Molnar * No local callbacks, so probabilistically probe global state. 7902da4b2a7SPaul E. McKenney * Exact information would require acquiring locks, which would 791a616aec9SIngo Molnar * kill scalability, hence the probabilistic nature of the probe. 7922da4b2a7SPaul E. McKenney */ 79322607d66SPaul E. McKenney 79422607d66SPaul E. McKenney /* First, see if enough time has passed since the last GP. */ 79522607d66SPaul E. McKenney t = ktime_get_mono_fast_ns(); 796844a378dSPaul E. McKenney tlast = READ_ONCE(ssp->srcu_last_gp_end); 79722607d66SPaul E. McKenney if (exp_holdoff == 0 || 798844a378dSPaul E. McKenney time_in_range_open(t, tlast, tlast + exp_holdoff)) 79922607d66SPaul E. McKenney return false; /* Too soon after last GP. */ 80022607d66SPaul E. McKenney 80122607d66SPaul E. McKenney /* Next, check for probable idleness. */ 802aacb5d91SPaul E. McKenney curseq = rcu_seq_current(&ssp->srcu_gp_seq); 8032da4b2a7SPaul E. McKenney smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */ 804aacb5d91SPaul E. McKenney if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_gp_seq_needed))) 8052da4b2a7SPaul E. McKenney return false; /* Grace period in progress, so not idle. */ 8062da4b2a7SPaul E. McKenney smp_mb(); /* Order ->srcu_gp_seq with prior access. */ 807aacb5d91SPaul E. McKenney if (curseq != rcu_seq_current(&ssp->srcu_gp_seq)) 8082da4b2a7SPaul E. McKenney return false; /* GP # changed, so not idle. */ 8092da4b2a7SPaul E. McKenney return true; /* With reasonable probability, idle! */ 8102da4b2a7SPaul E. McKenney } 8112da4b2a7SPaul E. McKenney 8122da4b2a7SPaul E. McKenney /* 813a602538eSPaul E. McKenney * SRCU callback function to leak a callback. 814a602538eSPaul E. McKenney */ 815a602538eSPaul E. McKenney static void srcu_leak_callback(struct rcu_head *rhp) 816a602538eSPaul E. McKenney { 817a602538eSPaul E. McKenney } 818a602538eSPaul E. McKenney 819a602538eSPaul E. McKenney /* 82029d2bb94SPaul E. McKenney * Start an SRCU grace period, and also queue the callback if non-NULL. 82129d2bb94SPaul E. McKenney */ 8225358c9faSPaul E. McKenney static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp, 8235358c9faSPaul E. McKenney struct rcu_head *rhp, bool do_norm) 82429d2bb94SPaul E. McKenney { 82529d2bb94SPaul E. McKenney unsigned long flags; 82629d2bb94SPaul E. McKenney int idx; 82729d2bb94SPaul E. McKenney bool needexp = false; 82829d2bb94SPaul E. McKenney bool needgp = false; 82929d2bb94SPaul E. McKenney unsigned long s; 83029d2bb94SPaul E. McKenney struct srcu_data *sdp; 83129d2bb94SPaul E. McKenney 8325358c9faSPaul E. McKenney check_init_srcu_struct(ssp); 83329d2bb94SPaul E. McKenney idx = srcu_read_lock(ssp); 834*994f7068SPaul E. McKenney if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_CALL) 835*994f7068SPaul E. McKenney sdp = per_cpu_ptr(ssp->sda, 0); 836*994f7068SPaul E. McKenney else 83729d2bb94SPaul E. McKenney sdp = raw_cpu_ptr(ssp->sda); 83829d2bb94SPaul E. McKenney spin_lock_irqsave_rcu_node(sdp, flags); 8395358c9faSPaul E. McKenney if (rhp) 84029d2bb94SPaul E. McKenney rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp); 84129d2bb94SPaul E. McKenney rcu_segcblist_advance(&sdp->srcu_cblist, 84229d2bb94SPaul E. McKenney rcu_seq_current(&ssp->srcu_gp_seq)); 84329d2bb94SPaul E. McKenney s = rcu_seq_snap(&ssp->srcu_gp_seq); 84429d2bb94SPaul E. McKenney (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s); 84529d2bb94SPaul E. McKenney if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) { 84629d2bb94SPaul E. McKenney sdp->srcu_gp_seq_needed = s; 84729d2bb94SPaul E. McKenney needgp = true; 84829d2bb94SPaul E. McKenney } 84929d2bb94SPaul E. McKenney if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) { 85029d2bb94SPaul E. McKenney sdp->srcu_gp_seq_needed_exp = s; 85129d2bb94SPaul E. McKenney needexp = true; 85229d2bb94SPaul E. McKenney } 85329d2bb94SPaul E. McKenney spin_unlock_irqrestore_rcu_node(sdp, flags); 85429d2bb94SPaul E. McKenney if (needgp) 85529d2bb94SPaul E. McKenney srcu_funnel_gp_start(ssp, sdp, s, do_norm); 85629d2bb94SPaul E. McKenney else if (needexp) 857*994f7068SPaul E. McKenney srcu_funnel_exp_start(ssp, smp_load_acquire(&sdp->mynode), s); 85829d2bb94SPaul E. McKenney srcu_read_unlock(ssp, idx); 8595358c9faSPaul E. McKenney return s; 86029d2bb94SPaul E. McKenney } 86129d2bb94SPaul E. McKenney 86229d2bb94SPaul E. McKenney /* 863da915ad5SPaul E. McKenney * Enqueue an SRCU callback on the srcu_data structure associated with 864da915ad5SPaul E. McKenney * the current CPU and the specified srcu_struct structure, initiating 865da915ad5SPaul E. McKenney * grace-period processing if it is not already running. 866dad81a20SPaul E. McKenney * 867dad81a20SPaul E. McKenney * Note that all CPUs must agree that the grace period extended beyond 868dad81a20SPaul E. McKenney * all pre-existing SRCU read-side critical section. On systems with 869dad81a20SPaul E. McKenney * more than one CPU, this means that when "func()" is invoked, each CPU 870dad81a20SPaul E. McKenney * is guaranteed to have executed a full memory barrier since the end of 871dad81a20SPaul E. McKenney * its last corresponding SRCU read-side critical section whose beginning 8725ef98a63SPaul E. McKenney * preceded the call to call_srcu(). It also means that each CPU executing 873dad81a20SPaul E. McKenney * an SRCU read-side critical section that continues beyond the start of 8745ef98a63SPaul E. McKenney * "func()" must have executed a memory barrier after the call_srcu() 875dad81a20SPaul E. McKenney * but before the beginning of that SRCU read-side critical section. 876dad81a20SPaul E. McKenney * Note that these guarantees include CPUs that are offline, idle, or 877dad81a20SPaul E. McKenney * executing in user mode, as well as CPUs that are executing in the kernel. 878dad81a20SPaul E. McKenney * 8795ef98a63SPaul E. McKenney * Furthermore, if CPU A invoked call_srcu() and CPU B invoked the 880dad81a20SPaul E. McKenney * resulting SRCU callback function "func()", then both CPU A and CPU 881dad81a20SPaul E. McKenney * B are guaranteed to execute a full memory barrier during the time 8825ef98a63SPaul E. McKenney * interval between the call to call_srcu() and the invocation of "func()". 883dad81a20SPaul E. McKenney * This guarantee applies even if CPU A and CPU B are the same CPU (but 884dad81a20SPaul E. McKenney * again only if the system has more than one CPU). 885dad81a20SPaul E. McKenney * 886dad81a20SPaul E. McKenney * Of course, these guarantees apply only for invocations of call_srcu(), 887dad81a20SPaul E. McKenney * srcu_read_lock(), and srcu_read_unlock() that are all passed the same 888dad81a20SPaul E. McKenney * srcu_struct structure. 889dad81a20SPaul E. McKenney */ 89011b00045SJiang Biao static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, 8911e9a038bSPaul E. McKenney rcu_callback_t func, bool do_norm) 892dad81a20SPaul E. McKenney { 893a602538eSPaul E. McKenney if (debug_rcu_head_queue(rhp)) { 894a602538eSPaul E. McKenney /* Probable double call_srcu(), so leak the callback. */ 895a602538eSPaul E. McKenney WRITE_ONCE(rhp->func, srcu_leak_callback); 896a602538eSPaul E. McKenney WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n"); 897a602538eSPaul E. McKenney return; 898a602538eSPaul E. McKenney } 899da915ad5SPaul E. McKenney rhp->func = func; 9005358c9faSPaul E. McKenney (void)srcu_gp_start_if_needed(ssp, rhp, do_norm); 9011e9a038bSPaul E. McKenney } 9021e9a038bSPaul E. McKenney 9035a0465e1SPaul E. McKenney /** 9045a0465e1SPaul E. McKenney * call_srcu() - Queue a callback for invocation after an SRCU grace period 905aacb5d91SPaul E. McKenney * @ssp: srcu_struct in queue the callback 90627fdb35fSPaul E. McKenney * @rhp: structure to be used for queueing the SRCU callback. 9075a0465e1SPaul E. McKenney * @func: function to be invoked after the SRCU grace period 9085a0465e1SPaul E. McKenney * 9095a0465e1SPaul E. McKenney * The callback function will be invoked some time after a full SRCU 9105a0465e1SPaul E. McKenney * grace period elapses, in other words after all pre-existing SRCU 9115a0465e1SPaul E. McKenney * read-side critical sections have completed. However, the callback 9125a0465e1SPaul E. McKenney * function might well execute concurrently with other SRCU read-side 9135a0465e1SPaul E. McKenney * critical sections that started after call_srcu() was invoked. SRCU 9145a0465e1SPaul E. McKenney * read-side critical sections are delimited by srcu_read_lock() and 9155a0465e1SPaul E. McKenney * srcu_read_unlock(), and may be nested. 9165a0465e1SPaul E. McKenney * 9175a0465e1SPaul E. McKenney * The callback will be invoked from process context, but must nevertheless 9185a0465e1SPaul E. McKenney * be fast and must not block. 9195a0465e1SPaul E. McKenney */ 920aacb5d91SPaul E. McKenney void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, 9211e9a038bSPaul E. McKenney rcu_callback_t func) 9221e9a038bSPaul E. McKenney { 923aacb5d91SPaul E. McKenney __call_srcu(ssp, rhp, func, true); 924dad81a20SPaul E. McKenney } 925dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(call_srcu); 926dad81a20SPaul E. McKenney 927dad81a20SPaul E. McKenney /* 928dad81a20SPaul E. McKenney * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). 929dad81a20SPaul E. McKenney */ 930aacb5d91SPaul E. McKenney static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm) 931dad81a20SPaul E. McKenney { 932dad81a20SPaul E. McKenney struct rcu_synchronize rcu; 933dad81a20SPaul E. McKenney 934f505d434SJakub Kicinski RCU_LOCKDEP_WARN(lockdep_is_held(ssp) || 935dad81a20SPaul E. McKenney lock_is_held(&rcu_bh_lock_map) || 936dad81a20SPaul E. McKenney lock_is_held(&rcu_lock_map) || 937dad81a20SPaul E. McKenney lock_is_held(&rcu_sched_lock_map), 938dad81a20SPaul E. McKenney "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section"); 939dad81a20SPaul E. McKenney 940dad81a20SPaul E. McKenney if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) 941dad81a20SPaul E. McKenney return; 942dad81a20SPaul E. McKenney might_sleep(); 943aacb5d91SPaul E. McKenney check_init_srcu_struct(ssp); 944dad81a20SPaul E. McKenney init_completion(&rcu.completion); 945da915ad5SPaul E. McKenney init_rcu_head_on_stack(&rcu.head); 946aacb5d91SPaul E. McKenney __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm); 947dad81a20SPaul E. McKenney wait_for_completion(&rcu.completion); 948da915ad5SPaul E. McKenney destroy_rcu_head_on_stack(&rcu.head); 94935732cf9SPaul E. McKenney 95035732cf9SPaul E. McKenney /* 95135732cf9SPaul E. McKenney * Make sure that later code is ordered after the SRCU grace 952d6331980SPaul E. McKenney * period. This pairs with the spin_lock_irq_rcu_node() 95335732cf9SPaul E. McKenney * in srcu_invoke_callbacks(). Unlike Tree RCU, this is needed 95435732cf9SPaul E. McKenney * because the current CPU might have been totally uninvolved with 95535732cf9SPaul E. McKenney * (and thus unordered against) that grace period. 95635732cf9SPaul E. McKenney */ 95735732cf9SPaul E. McKenney smp_mb(); 958dad81a20SPaul E. McKenney } 959dad81a20SPaul E. McKenney 960dad81a20SPaul E. McKenney /** 961dad81a20SPaul E. McKenney * synchronize_srcu_expedited - Brute-force SRCU grace period 962aacb5d91SPaul E. McKenney * @ssp: srcu_struct with which to synchronize. 963dad81a20SPaul E. McKenney * 964dad81a20SPaul E. McKenney * Wait for an SRCU grace period to elapse, but be more aggressive about 965dad81a20SPaul E. McKenney * spinning rather than blocking when waiting. 966dad81a20SPaul E. McKenney * 967dad81a20SPaul E. McKenney * Note that synchronize_srcu_expedited() has the same deadlock and 968dad81a20SPaul E. McKenney * memory-ordering properties as does synchronize_srcu(). 969dad81a20SPaul E. McKenney */ 970aacb5d91SPaul E. McKenney void synchronize_srcu_expedited(struct srcu_struct *ssp) 971dad81a20SPaul E. McKenney { 972aacb5d91SPaul E. McKenney __synchronize_srcu(ssp, rcu_gp_is_normal()); 973dad81a20SPaul E. McKenney } 974dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); 975dad81a20SPaul E. McKenney 976dad81a20SPaul E. McKenney /** 977dad81a20SPaul E. McKenney * synchronize_srcu - wait for prior SRCU read-side critical-section completion 978aacb5d91SPaul E. McKenney * @ssp: srcu_struct with which to synchronize. 979dad81a20SPaul E. McKenney * 980dad81a20SPaul E. McKenney * Wait for the count to drain to zero of both indexes. To avoid the 981dad81a20SPaul E. McKenney * possible starvation of synchronize_srcu(), it waits for the count of 982da915ad5SPaul E. McKenney * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first, 983da915ad5SPaul E. McKenney * and then flip the srcu_idx and wait for the count of the other index. 984dad81a20SPaul E. McKenney * 985dad81a20SPaul E. McKenney * Can block; must be called from process context. 986dad81a20SPaul E. McKenney * 987dad81a20SPaul E. McKenney * Note that it is illegal to call synchronize_srcu() from the corresponding 988dad81a20SPaul E. McKenney * SRCU read-side critical section; doing so will result in deadlock. 989dad81a20SPaul E. McKenney * However, it is perfectly legal to call synchronize_srcu() on one 990dad81a20SPaul E. McKenney * srcu_struct from some other srcu_struct's read-side critical section, 991dad81a20SPaul E. McKenney * as long as the resulting graph of srcu_structs is acyclic. 992dad81a20SPaul E. McKenney * 993dad81a20SPaul E. McKenney * There are memory-ordering constraints implied by synchronize_srcu(). 994dad81a20SPaul E. McKenney * On systems with more than one CPU, when synchronize_srcu() returns, 995dad81a20SPaul E. McKenney * each CPU is guaranteed to have executed a full memory barrier since 9966eb95cc4SPaul E. McKenney * the end of its last corresponding SRCU read-side critical section 997dad81a20SPaul E. McKenney * whose beginning preceded the call to synchronize_srcu(). In addition, 998dad81a20SPaul E. McKenney * each CPU having an SRCU read-side critical section that extends beyond 999dad81a20SPaul E. McKenney * the return from synchronize_srcu() is guaranteed to have executed a 1000dad81a20SPaul E. McKenney * full memory barrier after the beginning of synchronize_srcu() and before 1001dad81a20SPaul E. McKenney * the beginning of that SRCU read-side critical section. Note that these 1002dad81a20SPaul E. McKenney * guarantees include CPUs that are offline, idle, or executing in user mode, 1003dad81a20SPaul E. McKenney * as well as CPUs that are executing in the kernel. 1004dad81a20SPaul E. McKenney * 1005dad81a20SPaul E. McKenney * Furthermore, if CPU A invoked synchronize_srcu(), which returned 1006dad81a20SPaul E. McKenney * to its caller on CPU B, then both CPU A and CPU B are guaranteed 1007dad81a20SPaul E. McKenney * to have executed a full memory barrier during the execution of 1008dad81a20SPaul E. McKenney * synchronize_srcu(). This guarantee applies even if CPU A and CPU B 1009dad81a20SPaul E. McKenney * are the same CPU, but again only if the system has more than one CPU. 1010dad81a20SPaul E. McKenney * 1011dad81a20SPaul E. McKenney * Of course, these memory-ordering guarantees apply only when 1012dad81a20SPaul E. McKenney * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are 1013dad81a20SPaul E. McKenney * passed the same srcu_struct structure. 10142da4b2a7SPaul E. McKenney * 10153d3a0d1bSPaul E. McKenney * Implementation of these memory-ordering guarantees is similar to 10163d3a0d1bSPaul E. McKenney * that of synchronize_rcu(). 10173d3a0d1bSPaul E. McKenney * 10182da4b2a7SPaul E. McKenney * If SRCU is likely idle, expedite the first request. This semantic 10192da4b2a7SPaul E. McKenney * was provided by Classic SRCU, and is relied upon by its users, so TREE 10202da4b2a7SPaul E. McKenney * SRCU must also provide it. Note that detecting idleness is heuristic 10212da4b2a7SPaul E. McKenney * and subject to both false positives and negatives. 1022dad81a20SPaul E. McKenney */ 1023aacb5d91SPaul E. McKenney void synchronize_srcu(struct srcu_struct *ssp) 1024dad81a20SPaul E. McKenney { 1025aacb5d91SPaul E. McKenney if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited()) 1026aacb5d91SPaul E. McKenney synchronize_srcu_expedited(ssp); 1027dad81a20SPaul E. McKenney else 1028aacb5d91SPaul E. McKenney __synchronize_srcu(ssp, true); 1029dad81a20SPaul E. McKenney } 1030dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(synchronize_srcu); 1031dad81a20SPaul E. McKenney 10325358c9faSPaul E. McKenney /** 10335358c9faSPaul E. McKenney * get_state_synchronize_srcu - Provide an end-of-grace-period cookie 10345358c9faSPaul E. McKenney * @ssp: srcu_struct to provide cookie for. 10355358c9faSPaul E. McKenney * 10365358c9faSPaul E. McKenney * This function returns a cookie that can be passed to 10375358c9faSPaul E. McKenney * poll_state_synchronize_srcu(), which will return true if a full grace 10385358c9faSPaul E. McKenney * period has elapsed in the meantime. It is the caller's responsibility 10395358c9faSPaul E. McKenney * to make sure that grace period happens, for example, by invoking 10405358c9faSPaul E. McKenney * call_srcu() after return from get_state_synchronize_srcu(). 10415358c9faSPaul E. McKenney */ 10425358c9faSPaul E. McKenney unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp) 10435358c9faSPaul E. McKenney { 10445358c9faSPaul E. McKenney // Any prior manipulation of SRCU-protected data must happen 10455358c9faSPaul E. McKenney // before the load from ->srcu_gp_seq. 10465358c9faSPaul E. McKenney smp_mb(); 10475358c9faSPaul E. McKenney return rcu_seq_snap(&ssp->srcu_gp_seq); 10485358c9faSPaul E. McKenney } 10495358c9faSPaul E. McKenney EXPORT_SYMBOL_GPL(get_state_synchronize_srcu); 10505358c9faSPaul E. McKenney 10515358c9faSPaul E. McKenney /** 10525358c9faSPaul E. McKenney * start_poll_synchronize_srcu - Provide cookie and start grace period 10535358c9faSPaul E. McKenney * @ssp: srcu_struct to provide cookie for. 10545358c9faSPaul E. McKenney * 10555358c9faSPaul E. McKenney * This function returns a cookie that can be passed to 10565358c9faSPaul E. McKenney * poll_state_synchronize_srcu(), which will return true if a full grace 10575358c9faSPaul E. McKenney * period has elapsed in the meantime. Unlike get_state_synchronize_srcu(), 10585358c9faSPaul E. McKenney * this function also ensures that any needed SRCU grace period will be 10595358c9faSPaul E. McKenney * started. This convenience does come at a cost in terms of CPU overhead. 10605358c9faSPaul E. McKenney */ 10615358c9faSPaul E. McKenney unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp) 10625358c9faSPaul E. McKenney { 10635358c9faSPaul E. McKenney return srcu_gp_start_if_needed(ssp, NULL, true); 10645358c9faSPaul E. McKenney } 10655358c9faSPaul E. McKenney EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu); 10665358c9faSPaul E. McKenney 10675358c9faSPaul E. McKenney /** 10685358c9faSPaul E. McKenney * poll_state_synchronize_srcu - Has cookie's grace period ended? 10695358c9faSPaul E. McKenney * @ssp: srcu_struct to provide cookie for. 10705358c9faSPaul E. McKenney * @cookie: Return value from get_state_synchronize_srcu() or start_poll_synchronize_srcu(). 10715358c9faSPaul E. McKenney * 10725358c9faSPaul E. McKenney * This function takes the cookie that was returned from either 10735358c9faSPaul E. McKenney * get_state_synchronize_srcu() or start_poll_synchronize_srcu(), and 10745358c9faSPaul E. McKenney * returns @true if an SRCU grace period elapsed since the time that the 10755358c9faSPaul E. McKenney * cookie was created. 10764e7ccfaeSPaul E. McKenney * 10774e7ccfaeSPaul E. McKenney * Because cookies are finite in size, wrapping/overflow is possible. 10784e7ccfaeSPaul E. McKenney * This is more pronounced on 32-bit systems where cookies are 32 bits, 10794e7ccfaeSPaul E. McKenney * where in theory wrapping could happen in about 14 hours assuming 10804e7ccfaeSPaul E. McKenney * 25-microsecond expedited SRCU grace periods. However, a more likely 10814e7ccfaeSPaul E. McKenney * overflow lower bound is on the order of 24 days in the case of 10824e7ccfaeSPaul E. McKenney * one-millisecond SRCU grace periods. Of course, wrapping in a 64-bit 10834e7ccfaeSPaul E. McKenney * system requires geologic timespans, as in more than seven million years 10844e7ccfaeSPaul E. McKenney * even for expedited SRCU grace periods. 10854e7ccfaeSPaul E. McKenney * 10864e7ccfaeSPaul E. McKenney * Wrapping/overflow is much more of an issue for CONFIG_SMP=n systems 10874e7ccfaeSPaul E. McKenney * that also have CONFIG_PREEMPTION=n, which selects Tiny SRCU. This uses 10884e7ccfaeSPaul E. McKenney * a 16-bit cookie, which rcutorture routinely wraps in a matter of a 10894e7ccfaeSPaul E. McKenney * few minutes. If this proves to be a problem, this counter will be 10904e7ccfaeSPaul E. McKenney * expanded to the same size as for Tree SRCU. 10915358c9faSPaul E. McKenney */ 10925358c9faSPaul E. McKenney bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie) 10935358c9faSPaul E. McKenney { 10945358c9faSPaul E. McKenney if (!rcu_seq_done(&ssp->srcu_gp_seq, cookie)) 10955358c9faSPaul E. McKenney return false; 10965358c9faSPaul E. McKenney // Ensure that the end of the SRCU grace period happens before 10975358c9faSPaul E. McKenney // any subsequent code that the caller might execute. 10985358c9faSPaul E. McKenney smp_mb(); // ^^^ 10995358c9faSPaul E. McKenney return true; 11005358c9faSPaul E. McKenney } 11015358c9faSPaul E. McKenney EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu); 11025358c9faSPaul E. McKenney 1103da915ad5SPaul E. McKenney /* 1104da915ad5SPaul E. McKenney * Callback function for srcu_barrier() use. 1105da915ad5SPaul E. McKenney */ 1106da915ad5SPaul E. McKenney static void srcu_barrier_cb(struct rcu_head *rhp) 1107da915ad5SPaul E. McKenney { 1108da915ad5SPaul E. McKenney struct srcu_data *sdp; 1109aacb5d91SPaul E. McKenney struct srcu_struct *ssp; 1110da915ad5SPaul E. McKenney 1111da915ad5SPaul E. McKenney sdp = container_of(rhp, struct srcu_data, srcu_barrier_head); 1112aacb5d91SPaul E. McKenney ssp = sdp->ssp; 1113aacb5d91SPaul E. McKenney if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt)) 1114aacb5d91SPaul E. McKenney complete(&ssp->srcu_barrier_completion); 1115da915ad5SPaul E. McKenney } 1116da915ad5SPaul E. McKenney 1117*994f7068SPaul E. McKenney /* 1118*994f7068SPaul E. McKenney * Enqueue an srcu_barrier() callback on the specified srcu_data 1119*994f7068SPaul E. McKenney * structure's ->cblist. but only if that ->cblist already has at least one 1120*994f7068SPaul E. McKenney * callback enqueued. Note that if a CPU already has callbacks enqueue, 1121*994f7068SPaul E. McKenney * it must have already registered the need for a future grace period, 1122*994f7068SPaul E. McKenney * so all we need do is enqueue a callback that will use the same grace 1123*994f7068SPaul E. McKenney * period as the last callback already in the queue. 1124*994f7068SPaul E. McKenney */ 1125*994f7068SPaul E. McKenney static void srcu_barrier_one_cpu(struct srcu_struct *ssp, struct srcu_data *sdp) 1126*994f7068SPaul E. McKenney { 1127*994f7068SPaul E. McKenney spin_lock_irq_rcu_node(sdp); 1128*994f7068SPaul E. McKenney atomic_inc(&ssp->srcu_barrier_cpu_cnt); 1129*994f7068SPaul E. McKenney sdp->srcu_barrier_head.func = srcu_barrier_cb; 1130*994f7068SPaul E. McKenney debug_rcu_head_queue(&sdp->srcu_barrier_head); 1131*994f7068SPaul E. McKenney if (!rcu_segcblist_entrain(&sdp->srcu_cblist, 1132*994f7068SPaul E. McKenney &sdp->srcu_barrier_head)) { 1133*994f7068SPaul E. McKenney debug_rcu_head_unqueue(&sdp->srcu_barrier_head); 1134*994f7068SPaul E. McKenney atomic_dec(&ssp->srcu_barrier_cpu_cnt); 1135*994f7068SPaul E. McKenney } 1136*994f7068SPaul E. McKenney spin_unlock_irq_rcu_node(sdp); 1137*994f7068SPaul E. McKenney } 1138*994f7068SPaul E. McKenney 1139dad81a20SPaul E. McKenney /** 1140dad81a20SPaul E. McKenney * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete. 1141aacb5d91SPaul E. McKenney * @ssp: srcu_struct on which to wait for in-flight callbacks. 1142dad81a20SPaul E. McKenney */ 1143aacb5d91SPaul E. McKenney void srcu_barrier(struct srcu_struct *ssp) 1144dad81a20SPaul E. McKenney { 1145da915ad5SPaul E. McKenney int cpu; 1146aacb5d91SPaul E. McKenney unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq); 1147da915ad5SPaul E. McKenney 1148aacb5d91SPaul E. McKenney check_init_srcu_struct(ssp); 1149aacb5d91SPaul E. McKenney mutex_lock(&ssp->srcu_barrier_mutex); 1150aacb5d91SPaul E. McKenney if (rcu_seq_done(&ssp->srcu_barrier_seq, s)) { 1151da915ad5SPaul E. McKenney smp_mb(); /* Force ordering following return. */ 1152aacb5d91SPaul E. McKenney mutex_unlock(&ssp->srcu_barrier_mutex); 1153da915ad5SPaul E. McKenney return; /* Someone else did our work for us. */ 1154da915ad5SPaul E. McKenney } 1155aacb5d91SPaul E. McKenney rcu_seq_start(&ssp->srcu_barrier_seq); 1156aacb5d91SPaul E. McKenney init_completion(&ssp->srcu_barrier_completion); 1157da915ad5SPaul E. McKenney 1158da915ad5SPaul E. McKenney /* Initial count prevents reaching zero until all CBs are posted. */ 1159aacb5d91SPaul E. McKenney atomic_set(&ssp->srcu_barrier_cpu_cnt, 1); 1160da915ad5SPaul E. McKenney 1161*994f7068SPaul E. McKenney if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER) 1162*994f7068SPaul E. McKenney srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, 0)); 1163*994f7068SPaul E. McKenney else 1164*994f7068SPaul E. McKenney for_each_possible_cpu(cpu) 1165*994f7068SPaul E. McKenney srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, cpu)); 1166da915ad5SPaul E. McKenney 1167da915ad5SPaul E. McKenney /* Remove the initial count, at which point reaching zero can happen. */ 1168aacb5d91SPaul E. McKenney if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt)) 1169aacb5d91SPaul E. McKenney complete(&ssp->srcu_barrier_completion); 1170aacb5d91SPaul E. McKenney wait_for_completion(&ssp->srcu_barrier_completion); 1171da915ad5SPaul E. McKenney 1172aacb5d91SPaul E. McKenney rcu_seq_end(&ssp->srcu_barrier_seq); 1173aacb5d91SPaul E. McKenney mutex_unlock(&ssp->srcu_barrier_mutex); 1174dad81a20SPaul E. McKenney } 1175dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(srcu_barrier); 1176dad81a20SPaul E. McKenney 1177dad81a20SPaul E. McKenney /** 1178dad81a20SPaul E. McKenney * srcu_batches_completed - return batches completed. 1179aacb5d91SPaul E. McKenney * @ssp: srcu_struct on which to report batch completion. 1180dad81a20SPaul E. McKenney * 1181dad81a20SPaul E. McKenney * Report the number of batches, correlated with, but not necessarily 1182dad81a20SPaul E. McKenney * precisely the same as, the number of grace periods that have elapsed. 1183dad81a20SPaul E. McKenney */ 1184aacb5d91SPaul E. McKenney unsigned long srcu_batches_completed(struct srcu_struct *ssp) 1185dad81a20SPaul E. McKenney { 118639f91504SPaul E. McKenney return READ_ONCE(ssp->srcu_idx); 1187dad81a20SPaul E. McKenney } 1188dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(srcu_batches_completed); 1189dad81a20SPaul E. McKenney 1190dad81a20SPaul E. McKenney /* 1191da915ad5SPaul E. McKenney * Core SRCU state machine. Push state bits of ->srcu_gp_seq 1192da915ad5SPaul E. McKenney * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has 1193da915ad5SPaul E. McKenney * completed in that state. 1194dad81a20SPaul E. McKenney */ 1195aacb5d91SPaul E. McKenney static void srcu_advance_state(struct srcu_struct *ssp) 1196dad81a20SPaul E. McKenney { 1197dad81a20SPaul E. McKenney int idx; 1198dad81a20SPaul E. McKenney 1199aacb5d91SPaul E. McKenney mutex_lock(&ssp->srcu_gp_mutex); 1200da915ad5SPaul E. McKenney 1201dad81a20SPaul E. McKenney /* 1202dad81a20SPaul E. McKenney * Because readers might be delayed for an extended period after 1203da915ad5SPaul E. McKenney * fetching ->srcu_idx for their index, at any point in time there 1204dad81a20SPaul E. McKenney * might well be readers using both idx=0 and idx=1. We therefore 1205dad81a20SPaul E. McKenney * need to wait for readers to clear from both index values before 1206dad81a20SPaul E. McKenney * invoking a callback. 1207dad81a20SPaul E. McKenney * 1208dad81a20SPaul E. McKenney * The load-acquire ensures that we see the accesses performed 1209dad81a20SPaul E. McKenney * by the prior grace period. 1210dad81a20SPaul E. McKenney */ 1211aacb5d91SPaul E. McKenney idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq)); /* ^^^ */ 1212dad81a20SPaul E. McKenney if (idx == SRCU_STATE_IDLE) { 1213aacb5d91SPaul E. McKenney spin_lock_irq_rcu_node(ssp); 1214aacb5d91SPaul E. McKenney if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) { 1215aacb5d91SPaul E. McKenney WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq)); 1216aacb5d91SPaul E. McKenney spin_unlock_irq_rcu_node(ssp); 1217aacb5d91SPaul E. McKenney mutex_unlock(&ssp->srcu_gp_mutex); 1218dad81a20SPaul E. McKenney return; 1219dad81a20SPaul E. McKenney } 1220aacb5d91SPaul E. McKenney idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)); 1221dad81a20SPaul E. McKenney if (idx == SRCU_STATE_IDLE) 1222aacb5d91SPaul E. McKenney srcu_gp_start(ssp); 1223aacb5d91SPaul E. McKenney spin_unlock_irq_rcu_node(ssp); 1224da915ad5SPaul E. McKenney if (idx != SRCU_STATE_IDLE) { 1225aacb5d91SPaul E. McKenney mutex_unlock(&ssp->srcu_gp_mutex); 1226dad81a20SPaul E. McKenney return; /* Someone else started the grace period. */ 1227dad81a20SPaul E. McKenney } 1228da915ad5SPaul E. McKenney } 1229dad81a20SPaul E. McKenney 1230aacb5d91SPaul E. McKenney if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) { 1231aacb5d91SPaul E. McKenney idx = 1 ^ (ssp->srcu_idx & 1); 1232aacb5d91SPaul E. McKenney if (!try_check_zero(ssp, idx, 1)) { 1233aacb5d91SPaul E. McKenney mutex_unlock(&ssp->srcu_gp_mutex); 1234dad81a20SPaul E. McKenney return; /* readers present, retry later. */ 1235da915ad5SPaul E. McKenney } 1236aacb5d91SPaul E. McKenney srcu_flip(ssp); 123771042606SPaul E. McKenney spin_lock_irq_rcu_node(ssp); 1238aacb5d91SPaul E. McKenney rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2); 123971042606SPaul E. McKenney spin_unlock_irq_rcu_node(ssp); 1240dad81a20SPaul E. McKenney } 1241dad81a20SPaul E. McKenney 1242aacb5d91SPaul E. McKenney if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN2) { 1243dad81a20SPaul E. McKenney 1244dad81a20SPaul E. McKenney /* 1245dad81a20SPaul E. McKenney * SRCU read-side critical sections are normally short, 1246dad81a20SPaul E. McKenney * so check at least twice in quick succession after a flip. 1247dad81a20SPaul E. McKenney */ 1248aacb5d91SPaul E. McKenney idx = 1 ^ (ssp->srcu_idx & 1); 1249aacb5d91SPaul E. McKenney if (!try_check_zero(ssp, idx, 2)) { 1250aacb5d91SPaul E. McKenney mutex_unlock(&ssp->srcu_gp_mutex); 1251da915ad5SPaul E. McKenney return; /* readers present, retry later. */ 1252da915ad5SPaul E. McKenney } 1253aacb5d91SPaul E. McKenney srcu_gp_end(ssp); /* Releases ->srcu_gp_mutex. */ 1254dad81a20SPaul E. McKenney } 1255dad81a20SPaul E. McKenney } 1256dad81a20SPaul E. McKenney 1257dad81a20SPaul E. McKenney /* 1258dad81a20SPaul E. McKenney * Invoke a limited number of SRCU callbacks that have passed through 1259dad81a20SPaul E. McKenney * their grace period. If there are more to do, SRCU will reschedule 1260dad81a20SPaul E. McKenney * the workqueue. Note that needed memory barriers have been executed 1261dad81a20SPaul E. McKenney * in this task's context by srcu_readers_active_idx_check(). 1262dad81a20SPaul E. McKenney */ 1263da915ad5SPaul E. McKenney static void srcu_invoke_callbacks(struct work_struct *work) 1264dad81a20SPaul E. McKenney { 1265ae5c2341SJoel Fernandes (Google) long len; 1266da915ad5SPaul E. McKenney bool more; 1267dad81a20SPaul E. McKenney struct rcu_cblist ready_cbs; 1268dad81a20SPaul E. McKenney struct rcu_head *rhp; 1269da915ad5SPaul E. McKenney struct srcu_data *sdp; 1270aacb5d91SPaul E. McKenney struct srcu_struct *ssp; 1271dad81a20SPaul E. McKenney 1272e81baf4cSSebastian Andrzej Siewior sdp = container_of(work, struct srcu_data, work); 1273e81baf4cSSebastian Andrzej Siewior 1274aacb5d91SPaul E. McKenney ssp = sdp->ssp; 1275dad81a20SPaul E. McKenney rcu_cblist_init(&ready_cbs); 1276d6331980SPaul E. McKenney spin_lock_irq_rcu_node(sdp); 1277da915ad5SPaul E. McKenney rcu_segcblist_advance(&sdp->srcu_cblist, 1278aacb5d91SPaul E. McKenney rcu_seq_current(&ssp->srcu_gp_seq)); 1279da915ad5SPaul E. McKenney if (sdp->srcu_cblist_invoking || 1280da915ad5SPaul E. McKenney !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) { 1281d6331980SPaul E. McKenney spin_unlock_irq_rcu_node(sdp); 1282da915ad5SPaul E. McKenney return; /* Someone else on the job or nothing to do. */ 1283da915ad5SPaul E. McKenney } 1284da915ad5SPaul E. McKenney 1285da915ad5SPaul E. McKenney /* We are on the job! Extract and invoke ready callbacks. */ 1286da915ad5SPaul E. McKenney sdp->srcu_cblist_invoking = true; 1287da915ad5SPaul E. McKenney rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs); 1288ae5c2341SJoel Fernandes (Google) len = ready_cbs.len; 1289d6331980SPaul E. McKenney spin_unlock_irq_rcu_node(sdp); 1290dad81a20SPaul E. McKenney rhp = rcu_cblist_dequeue(&ready_cbs); 1291dad81a20SPaul E. McKenney for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) { 1292a602538eSPaul E. McKenney debug_rcu_head_unqueue(rhp); 1293dad81a20SPaul E. McKenney local_bh_disable(); 1294dad81a20SPaul E. McKenney rhp->func(rhp); 1295dad81a20SPaul E. McKenney local_bh_enable(); 1296dad81a20SPaul E. McKenney } 1297ae5c2341SJoel Fernandes (Google) WARN_ON_ONCE(ready_cbs.len); 1298da915ad5SPaul E. McKenney 1299da915ad5SPaul E. McKenney /* 1300da915ad5SPaul E. McKenney * Update counts, accelerate new callbacks, and if needed, 1301da915ad5SPaul E. McKenney * schedule another round of callback invocation. 1302da915ad5SPaul E. McKenney */ 1303d6331980SPaul E. McKenney spin_lock_irq_rcu_node(sdp); 1304ae5c2341SJoel Fernandes (Google) rcu_segcblist_add_len(&sdp->srcu_cblist, -len); 1305da915ad5SPaul E. McKenney (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, 1306aacb5d91SPaul E. McKenney rcu_seq_snap(&ssp->srcu_gp_seq)); 1307da915ad5SPaul E. McKenney sdp->srcu_cblist_invoking = false; 1308da915ad5SPaul E. McKenney more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist); 1309d6331980SPaul E. McKenney spin_unlock_irq_rcu_node(sdp); 1310da915ad5SPaul E. McKenney if (more) 1311da915ad5SPaul E. McKenney srcu_schedule_cbs_sdp(sdp, 0); 1312dad81a20SPaul E. McKenney } 1313dad81a20SPaul E. McKenney 1314dad81a20SPaul E. McKenney /* 1315dad81a20SPaul E. McKenney * Finished one round of SRCU grace period. Start another if there are 1316dad81a20SPaul E. McKenney * more SRCU callbacks queued, otherwise put SRCU into not-running state. 1317dad81a20SPaul E. McKenney */ 1318aacb5d91SPaul E. McKenney static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay) 1319dad81a20SPaul E. McKenney { 1320da915ad5SPaul E. McKenney bool pushgp = true; 1321dad81a20SPaul E. McKenney 1322aacb5d91SPaul E. McKenney spin_lock_irq_rcu_node(ssp); 1323aacb5d91SPaul E. McKenney if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) { 1324aacb5d91SPaul E. McKenney if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq))) { 1325da915ad5SPaul E. McKenney /* All requests fulfilled, time to go idle. */ 1326da915ad5SPaul E. McKenney pushgp = false; 1327dad81a20SPaul E. McKenney } 1328aacb5d91SPaul E. McKenney } else if (!rcu_seq_state(ssp->srcu_gp_seq)) { 1329da915ad5SPaul E. McKenney /* Outstanding request and no GP. Start one. */ 1330aacb5d91SPaul E. McKenney srcu_gp_start(ssp); 1331da915ad5SPaul E. McKenney } 1332aacb5d91SPaul E. McKenney spin_unlock_irq_rcu_node(ssp); 1333dad81a20SPaul E. McKenney 1334da915ad5SPaul E. McKenney if (pushgp) 1335aacb5d91SPaul E. McKenney queue_delayed_work(rcu_gp_wq, &ssp->work, delay); 1336dad81a20SPaul E. McKenney } 1337dad81a20SPaul E. McKenney 1338dad81a20SPaul E. McKenney /* 1339dad81a20SPaul E. McKenney * This is the work-queue function that handles SRCU grace periods. 1340dad81a20SPaul E. McKenney */ 13410d8a1e83SPaul E. McKenney static void process_srcu(struct work_struct *work) 1342dad81a20SPaul E. McKenney { 1343aacb5d91SPaul E. McKenney struct srcu_struct *ssp; 1344dad81a20SPaul E. McKenney 1345aacb5d91SPaul E. McKenney ssp = container_of(work, struct srcu_struct, work.work); 1346dad81a20SPaul E. McKenney 1347aacb5d91SPaul E. McKenney srcu_advance_state(ssp); 1348aacb5d91SPaul E. McKenney srcu_reschedule(ssp, srcu_get_delay(ssp)); 1349dad81a20SPaul E. McKenney } 13507f6733c3SPaul E. McKenney 13517f6733c3SPaul E. McKenney void srcutorture_get_gp_data(enum rcutorture_type test_type, 1352aacb5d91SPaul E. McKenney struct srcu_struct *ssp, int *flags, 1353aebc8264SPaul E. McKenney unsigned long *gp_seq) 13547f6733c3SPaul E. McKenney { 13557f6733c3SPaul E. McKenney if (test_type != SRCU_FLAVOR) 13567f6733c3SPaul E. McKenney return; 13577f6733c3SPaul E. McKenney *flags = 0; 1358aacb5d91SPaul E. McKenney *gp_seq = rcu_seq_current(&ssp->srcu_gp_seq); 13597f6733c3SPaul E. McKenney } 13607f6733c3SPaul E. McKenney EXPORT_SYMBOL_GPL(srcutorture_get_gp_data); 13611f4f6da1SPaul E. McKenney 1362aacb5d91SPaul E. McKenney void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf) 1363115a1a52SPaul E. McKenney { 1364115a1a52SPaul E. McKenney int cpu; 1365115a1a52SPaul E. McKenney int idx; 1366ac3748c6SPaul E. McKenney unsigned long s0 = 0, s1 = 0; 1367115a1a52SPaul E. McKenney 1368aacb5d91SPaul E. McKenney idx = ssp->srcu_idx & 0x1; 136952e17ba1SPaul E. McKenney pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):", 1370aacb5d91SPaul E. McKenney tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), idx); 1371115a1a52SPaul E. McKenney for_each_possible_cpu(cpu) { 1372115a1a52SPaul E. McKenney unsigned long l0, l1; 1373115a1a52SPaul E. McKenney unsigned long u0, u1; 1374115a1a52SPaul E. McKenney long c0, c1; 13755ab07a8dSPaul E. McKenney struct srcu_data *sdp; 1376115a1a52SPaul E. McKenney 1377aacb5d91SPaul E. McKenney sdp = per_cpu_ptr(ssp->sda, cpu); 1378b68c6146SPaul E. McKenney u0 = data_race(sdp->srcu_unlock_count[!idx]); 1379b68c6146SPaul E. McKenney u1 = data_race(sdp->srcu_unlock_count[idx]); 1380115a1a52SPaul E. McKenney 1381115a1a52SPaul E. McKenney /* 1382115a1a52SPaul E. McKenney * Make sure that a lock is always counted if the corresponding 1383115a1a52SPaul E. McKenney * unlock is counted. 1384115a1a52SPaul E. McKenney */ 1385115a1a52SPaul E. McKenney smp_rmb(); 1386115a1a52SPaul E. McKenney 1387b68c6146SPaul E. McKenney l0 = data_race(sdp->srcu_lock_count[!idx]); 1388b68c6146SPaul E. McKenney l1 = data_race(sdp->srcu_lock_count[idx]); 1389115a1a52SPaul E. McKenney 1390115a1a52SPaul E. McKenney c0 = l0 - u0; 1391115a1a52SPaul E. McKenney c1 = l1 - u1; 13927e210a65SPaul E. McKenney pr_cont(" %d(%ld,%ld %c)", 13937e210a65SPaul E. McKenney cpu, c0, c1, 13947e210a65SPaul E. McKenney "C."[rcu_segcblist_empty(&sdp->srcu_cblist)]); 1395ac3748c6SPaul E. McKenney s0 += c0; 1396ac3748c6SPaul E. McKenney s1 += c1; 1397115a1a52SPaul E. McKenney } 1398ac3748c6SPaul E. McKenney pr_cont(" T(%ld,%ld)\n", s0, s1); 1399115a1a52SPaul E. McKenney } 1400115a1a52SPaul E. McKenney EXPORT_SYMBOL_GPL(srcu_torture_stats_print); 1401115a1a52SPaul E. McKenney 14021f4f6da1SPaul E. McKenney static int __init srcu_bootup_announce(void) 14031f4f6da1SPaul E. McKenney { 14041f4f6da1SPaul E. McKenney pr_info("Hierarchical SRCU implementation.\n"); 14050c8e0e3cSPaul E. McKenney if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF) 14060c8e0e3cSPaul E. McKenney pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff); 14071f4f6da1SPaul E. McKenney return 0; 14081f4f6da1SPaul E. McKenney } 14091f4f6da1SPaul E. McKenney early_initcall(srcu_bootup_announce); 1410e0fcba9aSPaul E. McKenney 1411e0fcba9aSPaul E. McKenney void __init srcu_init(void) 1412e0fcba9aSPaul E. McKenney { 1413aacb5d91SPaul E. McKenney struct srcu_struct *ssp; 1414e0fcba9aSPaul E. McKenney 14158e9c01c7SFrederic Weisbecker /* 14168e9c01c7SFrederic Weisbecker * Once that is set, call_srcu() can follow the normal path and 14178e9c01c7SFrederic Weisbecker * queue delayed work. This must follow RCU workqueues creation 14188e9c01c7SFrederic Weisbecker * and timers initialization. 14198e9c01c7SFrederic Weisbecker */ 1420e0fcba9aSPaul E. McKenney srcu_init_done = true; 1421e0fcba9aSPaul E. McKenney while (!list_empty(&srcu_boot_list)) { 1422aacb5d91SPaul E. McKenney ssp = list_first_entry(&srcu_boot_list, struct srcu_struct, 14234e6ea4efSPaul E. McKenney work.work.entry); 1424aacb5d91SPaul E. McKenney list_del_init(&ssp->work.work.entry); 1425aacb5d91SPaul E. McKenney queue_work(rcu_gp_wq, &ssp->work.work); 1426e0fcba9aSPaul E. McKenney } 1427e0fcba9aSPaul E. McKenney } 1428fe15b50cSPaul E. McKenney 1429fe15b50cSPaul E. McKenney #ifdef CONFIG_MODULES 1430fe15b50cSPaul E. McKenney 1431fe15b50cSPaul E. McKenney /* Initialize any global-scope srcu_struct structures used by this module. */ 1432fe15b50cSPaul E. McKenney static int srcu_module_coming(struct module *mod) 1433fe15b50cSPaul E. McKenney { 1434fe15b50cSPaul E. McKenney int i; 1435fe15b50cSPaul E. McKenney struct srcu_struct **sspp = mod->srcu_struct_ptrs; 1436fe15b50cSPaul E. McKenney int ret; 1437fe15b50cSPaul E. McKenney 1438fe15b50cSPaul E. McKenney for (i = 0; i < mod->num_srcu_structs; i++) { 1439fe15b50cSPaul E. McKenney ret = init_srcu_struct(*(sspp++)); 1440fe15b50cSPaul E. McKenney if (WARN_ON_ONCE(ret)) 1441fe15b50cSPaul E. McKenney return ret; 1442fe15b50cSPaul E. McKenney } 1443fe15b50cSPaul E. McKenney return 0; 1444fe15b50cSPaul E. McKenney } 1445fe15b50cSPaul E. McKenney 1446fe15b50cSPaul E. McKenney /* Clean up any global-scope srcu_struct structures used by this module. */ 1447fe15b50cSPaul E. McKenney static void srcu_module_going(struct module *mod) 1448fe15b50cSPaul E. McKenney { 1449fe15b50cSPaul E. McKenney int i; 1450fe15b50cSPaul E. McKenney struct srcu_struct **sspp = mod->srcu_struct_ptrs; 1451fe15b50cSPaul E. McKenney 1452fe15b50cSPaul E. McKenney for (i = 0; i < mod->num_srcu_structs; i++) 1453fe15b50cSPaul E. McKenney cleanup_srcu_struct(*(sspp++)); 1454fe15b50cSPaul E. McKenney } 1455fe15b50cSPaul E. McKenney 1456fe15b50cSPaul E. McKenney /* Handle one module, either coming or going. */ 1457fe15b50cSPaul E. McKenney static int srcu_module_notify(struct notifier_block *self, 1458fe15b50cSPaul E. McKenney unsigned long val, void *data) 1459fe15b50cSPaul E. McKenney { 1460fe15b50cSPaul E. McKenney struct module *mod = data; 1461fe15b50cSPaul E. McKenney int ret = 0; 1462fe15b50cSPaul E. McKenney 1463fe15b50cSPaul E. McKenney switch (val) { 1464fe15b50cSPaul E. McKenney case MODULE_STATE_COMING: 1465fe15b50cSPaul E. McKenney ret = srcu_module_coming(mod); 1466fe15b50cSPaul E. McKenney break; 1467fe15b50cSPaul E. McKenney case MODULE_STATE_GOING: 1468fe15b50cSPaul E. McKenney srcu_module_going(mod); 1469fe15b50cSPaul E. McKenney break; 1470fe15b50cSPaul E. McKenney default: 1471fe15b50cSPaul E. McKenney break; 1472fe15b50cSPaul E. McKenney } 1473fe15b50cSPaul E. McKenney return ret; 1474fe15b50cSPaul E. McKenney } 1475fe15b50cSPaul E. McKenney 1476fe15b50cSPaul E. McKenney static struct notifier_block srcu_module_nb = { 1477fe15b50cSPaul E. McKenney .notifier_call = srcu_module_notify, 1478fe15b50cSPaul E. McKenney .priority = 0, 1479fe15b50cSPaul E. McKenney }; 1480fe15b50cSPaul E. McKenney 1481fe15b50cSPaul E. McKenney static __init int init_srcu_module_notifier(void) 1482fe15b50cSPaul E. McKenney { 1483fe15b50cSPaul E. McKenney int ret; 1484fe15b50cSPaul E. McKenney 1485fe15b50cSPaul E. McKenney ret = register_module_notifier(&srcu_module_nb); 1486fe15b50cSPaul E. McKenney if (ret) 1487fe15b50cSPaul E. McKenney pr_warn("Failed to register srcu module notifier\n"); 1488fe15b50cSPaul E. McKenney return ret; 1489fe15b50cSPaul E. McKenney } 1490fe15b50cSPaul E. McKenney late_initcall(init_srcu_module_notifier); 1491fe15b50cSPaul E. McKenney 1492fe15b50cSPaul E. McKenney #endif /* #ifdef CONFIG_MODULES */ 1493