1e7ee1501SPaul E. McKenney // SPDX-License-Identifier: GPL-2.0+ 2dad81a20SPaul E. McKenney /* 3dad81a20SPaul E. McKenney * Sleepable Read-Copy Update mechanism for mutual exclusion. 4dad81a20SPaul E. McKenney * 5dad81a20SPaul E. McKenney * Copyright (C) IBM Corporation, 2006 6dad81a20SPaul E. McKenney * Copyright (C) Fujitsu, 2012 7dad81a20SPaul E. McKenney * 865bb0dc4SSeongJae Park * Authors: Paul McKenney <paulmck@linux.ibm.com> 9dad81a20SPaul E. McKenney * Lai Jiangshan <laijs@cn.fujitsu.com> 10dad81a20SPaul E. McKenney * 11dad81a20SPaul E. McKenney * For detailed explanation of Read-Copy Update mechanism see - 12dad81a20SPaul E. McKenney * Documentation/RCU/ *.txt 13dad81a20SPaul E. McKenney * 14dad81a20SPaul E. McKenney */ 15dad81a20SPaul E. McKenney 16a7538352SJoe Perches #define pr_fmt(fmt) "rcu: " fmt 17a7538352SJoe Perches 18dad81a20SPaul E. McKenney #include <linux/export.h> 19dad81a20SPaul E. McKenney #include <linux/mutex.h> 20dad81a20SPaul E. McKenney #include <linux/percpu.h> 21dad81a20SPaul E. McKenney #include <linux/preempt.h> 22dad81a20SPaul E. McKenney #include <linux/rcupdate_wait.h> 23dad81a20SPaul E. McKenney #include <linux/sched.h> 24dad81a20SPaul E. McKenney #include <linux/smp.h> 25dad81a20SPaul E. McKenney #include <linux/delay.h> 2622607d66SPaul E. McKenney #include <linux/module.h> 27dad81a20SPaul E. McKenney #include <linux/srcu.h> 28dad81a20SPaul E. McKenney 29dad81a20SPaul E. McKenney #include "rcu.h" 3045753c5fSIngo Molnar #include "rcu_segcblist.h" 31dad81a20SPaul E. McKenney 320c8e0e3cSPaul E. McKenney /* Holdoff in nanoseconds for auto-expediting. */ 330c8e0e3cSPaul E. McKenney #define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000) 340c8e0e3cSPaul E. McKenney static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF; 3522607d66SPaul E. McKenney module_param(exp_holdoff, ulong, 0444); 3622607d66SPaul E. McKenney 37c350c008SPaul E. McKenney /* Overflow-check frequency. N bits roughly says every 2**N grace periods. */ 38c350c008SPaul E. McKenney static ulong counter_wrap_check = (ULONG_MAX >> 2); 39c350c008SPaul E. McKenney module_param(counter_wrap_check, ulong, 0444); 40c350c008SPaul E. McKenney 41e0fcba9aSPaul E. McKenney /* Early-boot callback-management, so early that no lock is required! */ 42e0fcba9aSPaul E. McKenney static LIST_HEAD(srcu_boot_list); 43e0fcba9aSPaul E. McKenney static bool __read_mostly srcu_init_done; 44e0fcba9aSPaul E. McKenney 45da915ad5SPaul E. McKenney static void srcu_invoke_callbacks(struct work_struct *work); 46aacb5d91SPaul E. McKenney static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay); 470d8a1e83SPaul E. McKenney static void process_srcu(struct work_struct *work); 48e81baf4cSSebastian Andrzej Siewior static void srcu_delay_timer(struct timer_list *t); 49da915ad5SPaul E. McKenney 50d6331980SPaul E. McKenney /* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */ 51d6331980SPaul E. McKenney #define spin_lock_rcu_node(p) \ 52d6331980SPaul E. McKenney do { \ 53d6331980SPaul E. McKenney spin_lock(&ACCESS_PRIVATE(p, lock)); \ 54d6331980SPaul E. McKenney smp_mb__after_unlock_lock(); \ 55d6331980SPaul E. McKenney } while (0) 56d6331980SPaul E. McKenney 57d6331980SPaul E. McKenney #define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock)) 58d6331980SPaul E. McKenney 59d6331980SPaul E. McKenney #define spin_lock_irq_rcu_node(p) \ 60d6331980SPaul E. McKenney do { \ 61d6331980SPaul E. McKenney spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \ 62d6331980SPaul E. McKenney smp_mb__after_unlock_lock(); \ 63d6331980SPaul E. McKenney } while (0) 64d6331980SPaul E. McKenney 65d6331980SPaul E. McKenney #define spin_unlock_irq_rcu_node(p) \ 66d6331980SPaul E. McKenney spin_unlock_irq(&ACCESS_PRIVATE(p, lock)) 67d6331980SPaul E. McKenney 68d6331980SPaul E. McKenney #define spin_lock_irqsave_rcu_node(p, flags) \ 69d6331980SPaul E. McKenney do { \ 70d6331980SPaul E. McKenney spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \ 71d6331980SPaul E. McKenney smp_mb__after_unlock_lock(); \ 72d6331980SPaul E. McKenney } while (0) 73d6331980SPaul E. McKenney 74d6331980SPaul E. McKenney #define spin_unlock_irqrestore_rcu_node(p, flags) \ 75d6331980SPaul E. McKenney spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \ 76d6331980SPaul E. McKenney 77da915ad5SPaul E. McKenney /* 78da915ad5SPaul E. McKenney * Initialize SRCU combining tree. Note that statically allocated 79da915ad5SPaul E. McKenney * srcu_struct structures might already have srcu_read_lock() and 80da915ad5SPaul E. McKenney * srcu_read_unlock() running against them. So if the is_static parameter 81da915ad5SPaul E. McKenney * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[]. 82da915ad5SPaul E. McKenney */ 8394df76a1SFrederic Weisbecker static void init_srcu_struct_nodes(struct srcu_struct *ssp) 84dad81a20SPaul E. McKenney { 85da915ad5SPaul E. McKenney int cpu; 86da915ad5SPaul E. McKenney int i; 87da915ad5SPaul E. McKenney int level = 0; 88da915ad5SPaul E. McKenney int levelspread[RCU_NUM_LVLS]; 89da915ad5SPaul E. McKenney struct srcu_data *sdp; 90da915ad5SPaul E. McKenney struct srcu_node *snp; 91da915ad5SPaul E. McKenney struct srcu_node *snp_first; 92da915ad5SPaul E. McKenney 93b5befe84SFrederic Weisbecker /* Initialize geometry if it has not already been initialized. */ 94b5befe84SFrederic Weisbecker rcu_init_geometry(); 95b5befe84SFrederic Weisbecker 96da915ad5SPaul E. McKenney /* Work out the overall tree geometry. */ 97aacb5d91SPaul E. McKenney ssp->level[0] = &ssp->node[0]; 98da915ad5SPaul E. McKenney for (i = 1; i < rcu_num_lvls; i++) 99aacb5d91SPaul E. McKenney ssp->level[i] = ssp->level[i - 1] + num_rcu_lvl[i - 1]; 100da915ad5SPaul E. McKenney rcu_init_levelspread(levelspread, num_rcu_lvl); 101da915ad5SPaul E. McKenney 102da915ad5SPaul E. McKenney /* Each pass through this loop initializes one srcu_node structure. */ 103aacb5d91SPaul E. McKenney srcu_for_each_node_breadth_first(ssp, snp) { 104d6331980SPaul E. McKenney spin_lock_init(&ACCESS_PRIVATE(snp, lock)); 105c7e88067SPaul E. McKenney WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) != 106c7e88067SPaul E. McKenney ARRAY_SIZE(snp->srcu_data_have_cbs)); 107c7e88067SPaul E. McKenney for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) { 108da915ad5SPaul E. McKenney snp->srcu_have_cbs[i] = 0; 109c7e88067SPaul E. McKenney snp->srcu_data_have_cbs[i] = 0; 110c7e88067SPaul E. McKenney } 1111e9a038bSPaul E. McKenney snp->srcu_gp_seq_needed_exp = 0; 112da915ad5SPaul E. McKenney snp->grplo = -1; 113da915ad5SPaul E. McKenney snp->grphi = -1; 114aacb5d91SPaul E. McKenney if (snp == &ssp->node[0]) { 115da915ad5SPaul E. McKenney /* Root node, special case. */ 116da915ad5SPaul E. McKenney snp->srcu_parent = NULL; 117da915ad5SPaul E. McKenney continue; 118da915ad5SPaul E. McKenney } 119da915ad5SPaul E. McKenney 120da915ad5SPaul E. McKenney /* Non-root node. */ 121aacb5d91SPaul E. McKenney if (snp == ssp->level[level + 1]) 122da915ad5SPaul E. McKenney level++; 123aacb5d91SPaul E. McKenney snp->srcu_parent = ssp->level[level - 1] + 124aacb5d91SPaul E. McKenney (snp - ssp->level[level]) / 125da915ad5SPaul E. McKenney levelspread[level - 1]; 126da915ad5SPaul E. McKenney } 127da915ad5SPaul E. McKenney 128da915ad5SPaul E. McKenney /* 129da915ad5SPaul E. McKenney * Initialize the per-CPU srcu_data array, which feeds into the 130da915ad5SPaul E. McKenney * leaves of the srcu_node tree. 131da915ad5SPaul E. McKenney */ 132da915ad5SPaul E. McKenney WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) != 133da915ad5SPaul E. McKenney ARRAY_SIZE(sdp->srcu_unlock_count)); 134da915ad5SPaul E. McKenney level = rcu_num_lvls - 1; 135aacb5d91SPaul E. McKenney snp_first = ssp->level[level]; 136da915ad5SPaul E. McKenney for_each_possible_cpu(cpu) { 137aacb5d91SPaul E. McKenney sdp = per_cpu_ptr(ssp->sda, cpu); 138d6331980SPaul E. McKenney spin_lock_init(&ACCESS_PRIVATE(sdp, lock)); 139da915ad5SPaul E. McKenney rcu_segcblist_init(&sdp->srcu_cblist); 140da915ad5SPaul E. McKenney sdp->srcu_cblist_invoking = false; 141aacb5d91SPaul E. McKenney sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq; 142aacb5d91SPaul E. McKenney sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq; 143da915ad5SPaul E. McKenney sdp->mynode = &snp_first[cpu / levelspread[level]]; 144da915ad5SPaul E. McKenney for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) { 145da915ad5SPaul E. McKenney if (snp->grplo < 0) 146da915ad5SPaul E. McKenney snp->grplo = cpu; 147da915ad5SPaul E. McKenney snp->grphi = cpu; 148da915ad5SPaul E. McKenney } 149da915ad5SPaul E. McKenney sdp->cpu = cpu; 150e81baf4cSSebastian Andrzej Siewior INIT_WORK(&sdp->work, srcu_invoke_callbacks); 151e81baf4cSSebastian Andrzej Siewior timer_setup(&sdp->delay_work, srcu_delay_timer, 0); 152aacb5d91SPaul E. McKenney sdp->ssp = ssp; 153c7e88067SPaul E. McKenney sdp->grpmask = 1 << (cpu - sdp->mynode->grplo); 154da915ad5SPaul E. McKenney } 155da915ad5SPaul E. McKenney } 156da915ad5SPaul E. McKenney 157da915ad5SPaul E. McKenney /* 158da915ad5SPaul E. McKenney * Initialize non-compile-time initialized fields, including the 159da915ad5SPaul E. McKenney * associated srcu_node and srcu_data structures. The is_static 160da915ad5SPaul E. McKenney * parameter is passed through to init_srcu_struct_nodes(), and 161da915ad5SPaul E. McKenney * also tells us that ->sda has already been wired up to srcu_data. 162da915ad5SPaul E. McKenney */ 163aacb5d91SPaul E. McKenney static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static) 164da915ad5SPaul E. McKenney { 165aacb5d91SPaul E. McKenney mutex_init(&ssp->srcu_cb_mutex); 166aacb5d91SPaul E. McKenney mutex_init(&ssp->srcu_gp_mutex); 167aacb5d91SPaul E. McKenney ssp->srcu_idx = 0; 168aacb5d91SPaul E. McKenney ssp->srcu_gp_seq = 0; 169aacb5d91SPaul E. McKenney ssp->srcu_barrier_seq = 0; 170aacb5d91SPaul E. McKenney mutex_init(&ssp->srcu_barrier_mutex); 171aacb5d91SPaul E. McKenney atomic_set(&ssp->srcu_barrier_cpu_cnt, 0); 172aacb5d91SPaul E. McKenney INIT_DELAYED_WORK(&ssp->work, process_srcu); 173da915ad5SPaul E. McKenney if (!is_static) 174aacb5d91SPaul E. McKenney ssp->sda = alloc_percpu(struct srcu_data); 17550edb988SPaul E. McKenney if (!ssp->sda) 17650edb988SPaul E. McKenney return -ENOMEM; 17794df76a1SFrederic Weisbecker init_srcu_struct_nodes(ssp); 178aacb5d91SPaul E. McKenney ssp->srcu_gp_seq_needed_exp = 0; 179aacb5d91SPaul E. McKenney ssp->srcu_last_gp_end = ktime_get_mono_fast_ns(); 180aacb5d91SPaul E. McKenney smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */ 18150edb988SPaul E. McKenney return 0; 182dad81a20SPaul E. McKenney } 183dad81a20SPaul E. McKenney 184dad81a20SPaul E. McKenney #ifdef CONFIG_DEBUG_LOCK_ALLOC 185dad81a20SPaul E. McKenney 186aacb5d91SPaul E. McKenney int __init_srcu_struct(struct srcu_struct *ssp, const char *name, 187dad81a20SPaul E. McKenney struct lock_class_key *key) 188dad81a20SPaul E. McKenney { 189dad81a20SPaul E. McKenney /* Don't re-initialize a lock while it is held. */ 190aacb5d91SPaul E. McKenney debug_check_no_locks_freed((void *)ssp, sizeof(*ssp)); 191aacb5d91SPaul E. McKenney lockdep_init_map(&ssp->dep_map, name, key, 0); 192aacb5d91SPaul E. McKenney spin_lock_init(&ACCESS_PRIVATE(ssp, lock)); 193aacb5d91SPaul E. McKenney return init_srcu_struct_fields(ssp, false); 194dad81a20SPaul E. McKenney } 195dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(__init_srcu_struct); 196dad81a20SPaul E. McKenney 197dad81a20SPaul E. McKenney #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 198dad81a20SPaul E. McKenney 199dad81a20SPaul E. McKenney /** 200dad81a20SPaul E. McKenney * init_srcu_struct - initialize a sleep-RCU structure 201aacb5d91SPaul E. McKenney * @ssp: structure to initialize. 202dad81a20SPaul E. McKenney * 203dad81a20SPaul E. McKenney * Must invoke this on a given srcu_struct before passing that srcu_struct 204dad81a20SPaul E. McKenney * to any other function. Each srcu_struct represents a separate domain 205dad81a20SPaul E. McKenney * of SRCU protection. 206dad81a20SPaul E. McKenney */ 207aacb5d91SPaul E. McKenney int init_srcu_struct(struct srcu_struct *ssp) 208dad81a20SPaul E. McKenney { 209aacb5d91SPaul E. McKenney spin_lock_init(&ACCESS_PRIVATE(ssp, lock)); 210aacb5d91SPaul E. McKenney return init_srcu_struct_fields(ssp, false); 211dad81a20SPaul E. McKenney } 212dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(init_srcu_struct); 213dad81a20SPaul E. McKenney 214dad81a20SPaul E. McKenney #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 215dad81a20SPaul E. McKenney 216dad81a20SPaul E. McKenney /* 217da915ad5SPaul E. McKenney * First-use initialization of statically allocated srcu_struct 218da915ad5SPaul E. McKenney * structure. Wiring up the combining tree is more than can be 219da915ad5SPaul E. McKenney * done with compile-time initialization, so this check is added 220aacb5d91SPaul E. McKenney * to each update-side SRCU primitive. Use ssp->lock, which -is- 221da915ad5SPaul E. McKenney * compile-time initialized, to resolve races involving multiple 222da915ad5SPaul E. McKenney * CPUs trying to garner first-use privileges. 223da915ad5SPaul E. McKenney */ 224aacb5d91SPaul E. McKenney static void check_init_srcu_struct(struct srcu_struct *ssp) 225da915ad5SPaul E. McKenney { 226da915ad5SPaul E. McKenney unsigned long flags; 227da915ad5SPaul E. McKenney 228da915ad5SPaul E. McKenney /* The smp_load_acquire() pairs with the smp_store_release(). */ 229aacb5d91SPaul E. McKenney if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed))) /*^^^*/ 230da915ad5SPaul E. McKenney return; /* Already initialized. */ 231aacb5d91SPaul E. McKenney spin_lock_irqsave_rcu_node(ssp, flags); 232aacb5d91SPaul E. McKenney if (!rcu_seq_state(ssp->srcu_gp_seq_needed)) { 233aacb5d91SPaul E. McKenney spin_unlock_irqrestore_rcu_node(ssp, flags); 234da915ad5SPaul E. McKenney return; 235da915ad5SPaul E. McKenney } 236aacb5d91SPaul E. McKenney init_srcu_struct_fields(ssp, true); 237aacb5d91SPaul E. McKenney spin_unlock_irqrestore_rcu_node(ssp, flags); 238da915ad5SPaul E. McKenney } 239da915ad5SPaul E. McKenney 240da915ad5SPaul E. McKenney /* 241da915ad5SPaul E. McKenney * Returns approximate total of the readers' ->srcu_lock_count[] values 242da915ad5SPaul E. McKenney * for the rank of per-CPU counters specified by idx. 243dad81a20SPaul E. McKenney */ 244aacb5d91SPaul E. McKenney static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx) 245dad81a20SPaul E. McKenney { 246dad81a20SPaul E. McKenney int cpu; 247dad81a20SPaul E. McKenney unsigned long sum = 0; 248dad81a20SPaul E. McKenney 249dad81a20SPaul E. McKenney for_each_possible_cpu(cpu) { 250aacb5d91SPaul E. McKenney struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); 251dad81a20SPaul E. McKenney 252da915ad5SPaul E. McKenney sum += READ_ONCE(cpuc->srcu_lock_count[idx]); 253dad81a20SPaul E. McKenney } 254dad81a20SPaul E. McKenney return sum; 255dad81a20SPaul E. McKenney } 256dad81a20SPaul E. McKenney 257dad81a20SPaul E. McKenney /* 258da915ad5SPaul E. McKenney * Returns approximate total of the readers' ->srcu_unlock_count[] values 259da915ad5SPaul E. McKenney * for the rank of per-CPU counters specified by idx. 260dad81a20SPaul E. McKenney */ 261aacb5d91SPaul E. McKenney static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx) 262dad81a20SPaul E. McKenney { 263dad81a20SPaul E. McKenney int cpu; 264dad81a20SPaul E. McKenney unsigned long sum = 0; 265dad81a20SPaul E. McKenney 266dad81a20SPaul E. McKenney for_each_possible_cpu(cpu) { 267aacb5d91SPaul E. McKenney struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); 268dad81a20SPaul E. McKenney 269da915ad5SPaul E. McKenney sum += READ_ONCE(cpuc->srcu_unlock_count[idx]); 270dad81a20SPaul E. McKenney } 271dad81a20SPaul E. McKenney return sum; 272dad81a20SPaul E. McKenney } 273dad81a20SPaul E. McKenney 274dad81a20SPaul E. McKenney /* 275dad81a20SPaul E. McKenney * Return true if the number of pre-existing readers is determined to 276dad81a20SPaul E. McKenney * be zero. 277dad81a20SPaul E. McKenney */ 278aacb5d91SPaul E. McKenney static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx) 279dad81a20SPaul E. McKenney { 280dad81a20SPaul E. McKenney unsigned long unlocks; 281dad81a20SPaul E. McKenney 282aacb5d91SPaul E. McKenney unlocks = srcu_readers_unlock_idx(ssp, idx); 283dad81a20SPaul E. McKenney 284dad81a20SPaul E. McKenney /* 285dad81a20SPaul E. McKenney * Make sure that a lock is always counted if the corresponding 286dad81a20SPaul E. McKenney * unlock is counted. Needs to be a smp_mb() as the read side may 287dad81a20SPaul E. McKenney * contain a read from a variable that is written to before the 288dad81a20SPaul E. McKenney * synchronize_srcu() in the write side. In this case smp_mb()s 289dad81a20SPaul E. McKenney * A and B act like the store buffering pattern. 290dad81a20SPaul E. McKenney * 291dad81a20SPaul E. McKenney * This smp_mb() also pairs with smp_mb() C to prevent accesses 292dad81a20SPaul E. McKenney * after the synchronize_srcu() from being executed before the 293dad81a20SPaul E. McKenney * grace period ends. 294dad81a20SPaul E. McKenney */ 295dad81a20SPaul E. McKenney smp_mb(); /* A */ 296dad81a20SPaul E. McKenney 297dad81a20SPaul E. McKenney /* 298dad81a20SPaul E. McKenney * If the locks are the same as the unlocks, then there must have 299dad81a20SPaul E. McKenney * been no readers on this index at some time in between. This does 300dad81a20SPaul E. McKenney * not mean that there are no more readers, as one could have read 301dad81a20SPaul E. McKenney * the current index but not have incremented the lock counter yet. 302dad81a20SPaul E. McKenney * 303881ec9d2SPaul E. McKenney * So suppose that the updater is preempted here for so long 304881ec9d2SPaul E. McKenney * that more than ULONG_MAX non-nested readers come and go in 305881ec9d2SPaul E. McKenney * the meantime. It turns out that this cannot result in overflow 306881ec9d2SPaul E. McKenney * because if a reader modifies its unlock count after we read it 307881ec9d2SPaul E. McKenney * above, then that reader's next load of ->srcu_idx is guaranteed 308881ec9d2SPaul E. McKenney * to get the new value, which will cause it to operate on the 309881ec9d2SPaul E. McKenney * other bank of counters, where it cannot contribute to the 310881ec9d2SPaul E. McKenney * overflow of these counters. This means that there is a maximum 311881ec9d2SPaul E. McKenney * of 2*NR_CPUS increments, which cannot overflow given current 312881ec9d2SPaul E. McKenney * systems, especially not on 64-bit systems. 313881ec9d2SPaul E. McKenney * 314881ec9d2SPaul E. McKenney * OK, how about nesting? This does impose a limit on nesting 315881ec9d2SPaul E. McKenney * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient, 316881ec9d2SPaul E. McKenney * especially on 64-bit systems. 317dad81a20SPaul E. McKenney */ 318aacb5d91SPaul E. McKenney return srcu_readers_lock_idx(ssp, idx) == unlocks; 319dad81a20SPaul E. McKenney } 320dad81a20SPaul E. McKenney 321dad81a20SPaul E. McKenney /** 322dad81a20SPaul E. McKenney * srcu_readers_active - returns true if there are readers. and false 323dad81a20SPaul E. McKenney * otherwise 324aacb5d91SPaul E. McKenney * @ssp: which srcu_struct to count active readers (holding srcu_read_lock). 325dad81a20SPaul E. McKenney * 326dad81a20SPaul E. McKenney * Note that this is not an atomic primitive, and can therefore suffer 327dad81a20SPaul E. McKenney * severe errors when invoked on an active srcu_struct. That said, it 328dad81a20SPaul E. McKenney * can be useful as an error check at cleanup time. 329dad81a20SPaul E. McKenney */ 330aacb5d91SPaul E. McKenney static bool srcu_readers_active(struct srcu_struct *ssp) 331dad81a20SPaul E. McKenney { 332dad81a20SPaul E. McKenney int cpu; 333dad81a20SPaul E. McKenney unsigned long sum = 0; 334dad81a20SPaul E. McKenney 335dad81a20SPaul E. McKenney for_each_possible_cpu(cpu) { 336aacb5d91SPaul E. McKenney struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); 337dad81a20SPaul E. McKenney 338da915ad5SPaul E. McKenney sum += READ_ONCE(cpuc->srcu_lock_count[0]); 339da915ad5SPaul E. McKenney sum += READ_ONCE(cpuc->srcu_lock_count[1]); 340da915ad5SPaul E. McKenney sum -= READ_ONCE(cpuc->srcu_unlock_count[0]); 341da915ad5SPaul E. McKenney sum -= READ_ONCE(cpuc->srcu_unlock_count[1]); 342dad81a20SPaul E. McKenney } 343dad81a20SPaul E. McKenney return sum; 344dad81a20SPaul E. McKenney } 345dad81a20SPaul E. McKenney 346dad81a20SPaul E. McKenney #define SRCU_INTERVAL 1 347dad81a20SPaul E. McKenney 3481e9a038bSPaul E. McKenney /* 3491e9a038bSPaul E. McKenney * Return grace-period delay, zero if there are expedited grace 3501e9a038bSPaul E. McKenney * periods pending, SRCU_INTERVAL otherwise. 3511e9a038bSPaul E. McKenney */ 352aacb5d91SPaul E. McKenney static unsigned long srcu_get_delay(struct srcu_struct *ssp) 3531e9a038bSPaul E. McKenney { 354aacb5d91SPaul E. McKenney if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq), 355aacb5d91SPaul E. McKenney READ_ONCE(ssp->srcu_gp_seq_needed_exp))) 3561e9a038bSPaul E. McKenney return 0; 3571e9a038bSPaul E. McKenney return SRCU_INTERVAL; 3581e9a038bSPaul E. McKenney } 3591e9a038bSPaul E. McKenney 360f5ad3991SPaul E. McKenney /** 361f5ad3991SPaul E. McKenney * cleanup_srcu_struct - deconstruct a sleep-RCU structure 362f5ad3991SPaul E. McKenney * @ssp: structure to clean up. 363f5ad3991SPaul E. McKenney * 364f5ad3991SPaul E. McKenney * Must invoke this after you are finished using a given srcu_struct that 365f5ad3991SPaul E. McKenney * was initialized via init_srcu_struct(), else you leak memory. 366f5ad3991SPaul E. McKenney */ 367f5ad3991SPaul E. McKenney void cleanup_srcu_struct(struct srcu_struct *ssp) 368dad81a20SPaul E. McKenney { 369da915ad5SPaul E. McKenney int cpu; 370da915ad5SPaul E. McKenney 371aacb5d91SPaul E. McKenney if (WARN_ON(!srcu_get_delay(ssp))) 372f7194ac3SPaul E. McKenney return; /* Just leak it! */ 373aacb5d91SPaul E. McKenney if (WARN_ON(srcu_readers_active(ssp))) 374f7194ac3SPaul E. McKenney return; /* Just leak it! */ 375aacb5d91SPaul E. McKenney flush_delayed_work(&ssp->work); 376e81baf4cSSebastian Andrzej Siewior for_each_possible_cpu(cpu) { 377e81baf4cSSebastian Andrzej Siewior struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu); 378e81baf4cSSebastian Andrzej Siewior 379e81baf4cSSebastian Andrzej Siewior del_timer_sync(&sdp->delay_work); 380e81baf4cSSebastian Andrzej Siewior flush_work(&sdp->work); 3815cdfd174SPaul E. McKenney if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist))) 3825cdfd174SPaul E. McKenney return; /* Forgot srcu_barrier(), so just leak it! */ 383f7194ac3SPaul E. McKenney } 384aacb5d91SPaul E. McKenney if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) || 385*8ed00760SPaul E. McKenney WARN_ON(rcu_seq_current(&ssp->srcu_gp_seq) != ssp->srcu_gp_seq_needed) || 386aacb5d91SPaul E. McKenney WARN_ON(srcu_readers_active(ssp))) { 387*8ed00760SPaul E. McKenney pr_info("%s: Active srcu_struct %p read state: %d gp state: %lu/%lu\n", 388*8ed00760SPaul E. McKenney __func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)), 389*8ed00760SPaul E. McKenney rcu_seq_current(&ssp->srcu_gp_seq), ssp->srcu_gp_seq_needed); 390dad81a20SPaul E. McKenney return; /* Caller forgot to stop doing call_srcu()? */ 391dad81a20SPaul E. McKenney } 392aacb5d91SPaul E. McKenney free_percpu(ssp->sda); 393aacb5d91SPaul E. McKenney ssp->sda = NULL; 394dad81a20SPaul E. McKenney } 395f5ad3991SPaul E. McKenney EXPORT_SYMBOL_GPL(cleanup_srcu_struct); 396dad81a20SPaul E. McKenney 397dad81a20SPaul E. McKenney /* 398dad81a20SPaul E. McKenney * Counts the new reader in the appropriate per-CPU element of the 399cdf7abc4SPaolo Bonzini * srcu_struct. 400dad81a20SPaul E. McKenney * Returns an index that must be passed to the matching srcu_read_unlock(). 401dad81a20SPaul E. McKenney */ 402aacb5d91SPaul E. McKenney int __srcu_read_lock(struct srcu_struct *ssp) 403dad81a20SPaul E. McKenney { 404dad81a20SPaul E. McKenney int idx; 405dad81a20SPaul E. McKenney 406aacb5d91SPaul E. McKenney idx = READ_ONCE(ssp->srcu_idx) & 0x1; 407aacb5d91SPaul E. McKenney this_cpu_inc(ssp->sda->srcu_lock_count[idx]); 408dad81a20SPaul E. McKenney smp_mb(); /* B */ /* Avoid leaking the critical section. */ 409dad81a20SPaul E. McKenney return idx; 410dad81a20SPaul E. McKenney } 411dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(__srcu_read_lock); 412dad81a20SPaul E. McKenney 413dad81a20SPaul E. McKenney /* 414dad81a20SPaul E. McKenney * Removes the count for the old reader from the appropriate per-CPU 415dad81a20SPaul E. McKenney * element of the srcu_struct. Note that this may well be a different 416dad81a20SPaul E. McKenney * CPU than that which was incremented by the corresponding srcu_read_lock(). 417dad81a20SPaul E. McKenney */ 418aacb5d91SPaul E. McKenney void __srcu_read_unlock(struct srcu_struct *ssp, int idx) 419dad81a20SPaul E. McKenney { 420dad81a20SPaul E. McKenney smp_mb(); /* C */ /* Avoid leaking the critical section. */ 421aacb5d91SPaul E. McKenney this_cpu_inc(ssp->sda->srcu_unlock_count[idx]); 422dad81a20SPaul E. McKenney } 423dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(__srcu_read_unlock); 424dad81a20SPaul E. McKenney 425dad81a20SPaul E. McKenney /* 426dad81a20SPaul E. McKenney * We use an adaptive strategy for synchronize_srcu() and especially for 427dad81a20SPaul E. McKenney * synchronize_srcu_expedited(). We spin for a fixed time period 428dad81a20SPaul E. McKenney * (defined below) to allow SRCU readers to exit their read-side critical 429dad81a20SPaul E. McKenney * sections. If there are still some readers after a few microseconds, 430dad81a20SPaul E. McKenney * we repeatedly block for 1-millisecond time periods. 431dad81a20SPaul E. McKenney */ 432dad81a20SPaul E. McKenney #define SRCU_RETRY_CHECK_DELAY 5 433dad81a20SPaul E. McKenney 434dad81a20SPaul E. McKenney /* 435dad81a20SPaul E. McKenney * Start an SRCU grace period. 436dad81a20SPaul E. McKenney */ 437aacb5d91SPaul E. McKenney static void srcu_gp_start(struct srcu_struct *ssp) 438dad81a20SPaul E. McKenney { 439aacb5d91SPaul E. McKenney struct srcu_data *sdp = this_cpu_ptr(ssp->sda); 440dad81a20SPaul E. McKenney int state; 441dad81a20SPaul E. McKenney 442aacb5d91SPaul E. McKenney lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock)); 443aacb5d91SPaul E. McKenney WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)); 444eb4c2382SDennis Krein spin_lock_rcu_node(sdp); /* Interrupts already disabled. */ 445da915ad5SPaul E. McKenney rcu_segcblist_advance(&sdp->srcu_cblist, 446aacb5d91SPaul E. McKenney rcu_seq_current(&ssp->srcu_gp_seq)); 447da915ad5SPaul E. McKenney (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, 448aacb5d91SPaul E. McKenney rcu_seq_snap(&ssp->srcu_gp_seq)); 449eb4c2382SDennis Krein spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */ 4502da4b2a7SPaul E. McKenney smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */ 451aacb5d91SPaul E. McKenney rcu_seq_start(&ssp->srcu_gp_seq); 45271042606SPaul E. McKenney state = rcu_seq_state(ssp->srcu_gp_seq); 453dad81a20SPaul E. McKenney WARN_ON_ONCE(state != SRCU_STATE_SCAN1); 454dad81a20SPaul E. McKenney } 455dad81a20SPaul E. McKenney 456da915ad5SPaul E. McKenney 457e81baf4cSSebastian Andrzej Siewior static void srcu_delay_timer(struct timer_list *t) 458da915ad5SPaul E. McKenney { 459e81baf4cSSebastian Andrzej Siewior struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work); 460e81baf4cSSebastian Andrzej Siewior 461e81baf4cSSebastian Andrzej Siewior queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work); 462da915ad5SPaul E. McKenney } 463da915ad5SPaul E. McKenney 464e81baf4cSSebastian Andrzej Siewior static void srcu_queue_delayed_work_on(struct srcu_data *sdp, 465da915ad5SPaul E. McKenney unsigned long delay) 466da915ad5SPaul E. McKenney { 467e81baf4cSSebastian Andrzej Siewior if (!delay) { 468e81baf4cSSebastian Andrzej Siewior queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work); 469e81baf4cSSebastian Andrzej Siewior return; 470e81baf4cSSebastian Andrzej Siewior } 471da915ad5SPaul E. McKenney 472e81baf4cSSebastian Andrzej Siewior timer_reduce(&sdp->delay_work, jiffies + delay); 473da915ad5SPaul E. McKenney } 474da915ad5SPaul E. McKenney 475da915ad5SPaul E. McKenney /* 476da915ad5SPaul E. McKenney * Schedule callback invocation for the specified srcu_data structure, 477da915ad5SPaul E. McKenney * if possible, on the corresponding CPU. 478da915ad5SPaul E. McKenney */ 479da915ad5SPaul E. McKenney static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay) 480da915ad5SPaul E. McKenney { 481e81baf4cSSebastian Andrzej Siewior srcu_queue_delayed_work_on(sdp, delay); 482da915ad5SPaul E. McKenney } 483da915ad5SPaul E. McKenney 484da915ad5SPaul E. McKenney /* 485da915ad5SPaul E. McKenney * Schedule callback invocation for all srcu_data structures associated 486c7e88067SPaul E. McKenney * with the specified srcu_node structure that have callbacks for the 487c7e88067SPaul E. McKenney * just-completed grace period, the one corresponding to idx. If possible, 488c7e88067SPaul E. McKenney * schedule this invocation on the corresponding CPUs. 489da915ad5SPaul E. McKenney */ 490aacb5d91SPaul E. McKenney static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp, 4911e9a038bSPaul E. McKenney unsigned long mask, unsigned long delay) 492da915ad5SPaul E. McKenney { 493da915ad5SPaul E. McKenney int cpu; 494da915ad5SPaul E. McKenney 495c7e88067SPaul E. McKenney for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { 496c7e88067SPaul E. McKenney if (!(mask & (1 << (cpu - snp->grplo)))) 497c7e88067SPaul E. McKenney continue; 498aacb5d91SPaul E. McKenney srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay); 499da915ad5SPaul E. McKenney } 500c7e88067SPaul E. McKenney } 501da915ad5SPaul E. McKenney 502da915ad5SPaul E. McKenney /* 503da915ad5SPaul E. McKenney * Note the end of an SRCU grace period. Initiates callback invocation 504da915ad5SPaul E. McKenney * and starts a new grace period if needed. 505da915ad5SPaul E. McKenney * 506da915ad5SPaul E. McKenney * The ->srcu_cb_mutex acquisition does not protect any data, but 507da915ad5SPaul E. McKenney * instead prevents more than one grace period from starting while we 508da915ad5SPaul E. McKenney * are initiating callback invocation. This allows the ->srcu_have_cbs[] 509da915ad5SPaul E. McKenney * array to have a finite number of elements. 510da915ad5SPaul E. McKenney */ 511aacb5d91SPaul E. McKenney static void srcu_gp_end(struct srcu_struct *ssp) 512da915ad5SPaul E. McKenney { 5131e9a038bSPaul E. McKenney unsigned long cbdelay; 514da915ad5SPaul E. McKenney bool cbs; 5158ddbd883SIldar Ismagilov bool last_lvl; 516c350c008SPaul E. McKenney int cpu; 517c350c008SPaul E. McKenney unsigned long flags; 518da915ad5SPaul E. McKenney unsigned long gpseq; 519da915ad5SPaul E. McKenney int idx; 520c7e88067SPaul E. McKenney unsigned long mask; 521c350c008SPaul E. McKenney struct srcu_data *sdp; 522da915ad5SPaul E. McKenney struct srcu_node *snp; 523da915ad5SPaul E. McKenney 524da915ad5SPaul E. McKenney /* Prevent more than one additional grace period. */ 525aacb5d91SPaul E. McKenney mutex_lock(&ssp->srcu_cb_mutex); 526da915ad5SPaul E. McKenney 527da915ad5SPaul E. McKenney /* End the current grace period. */ 528aacb5d91SPaul E. McKenney spin_lock_irq_rcu_node(ssp); 529aacb5d91SPaul E. McKenney idx = rcu_seq_state(ssp->srcu_gp_seq); 530da915ad5SPaul E. McKenney WARN_ON_ONCE(idx != SRCU_STATE_SCAN2); 531aacb5d91SPaul E. McKenney cbdelay = srcu_get_delay(ssp); 532844a378dSPaul E. McKenney WRITE_ONCE(ssp->srcu_last_gp_end, ktime_get_mono_fast_ns()); 533aacb5d91SPaul E. McKenney rcu_seq_end(&ssp->srcu_gp_seq); 534aacb5d91SPaul E. McKenney gpseq = rcu_seq_current(&ssp->srcu_gp_seq); 535aacb5d91SPaul E. McKenney if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq)) 5368c9e0cb3SPaul E. McKenney WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, gpseq); 537aacb5d91SPaul E. McKenney spin_unlock_irq_rcu_node(ssp); 538aacb5d91SPaul E. McKenney mutex_unlock(&ssp->srcu_gp_mutex); 539da915ad5SPaul E. McKenney /* A new grace period can start at this point. But only one. */ 540da915ad5SPaul E. McKenney 541da915ad5SPaul E. McKenney /* Initiate callback invocation as needed. */ 542da915ad5SPaul E. McKenney idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs); 543aacb5d91SPaul E. McKenney srcu_for_each_node_breadth_first(ssp, snp) { 544d6331980SPaul E. McKenney spin_lock_irq_rcu_node(snp); 545da915ad5SPaul E. McKenney cbs = false; 546aacb5d91SPaul E. McKenney last_lvl = snp >= ssp->level[rcu_num_lvls - 1]; 5478ddbd883SIldar Ismagilov if (last_lvl) 548da915ad5SPaul E. McKenney cbs = snp->srcu_have_cbs[idx] == gpseq; 549da915ad5SPaul E. McKenney snp->srcu_have_cbs[idx] = gpseq; 550da915ad5SPaul E. McKenney rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1); 5511e9a038bSPaul E. McKenney if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq)) 5527ff8b450SPaul E. McKenney WRITE_ONCE(snp->srcu_gp_seq_needed_exp, gpseq); 553c7e88067SPaul E. McKenney mask = snp->srcu_data_have_cbs[idx]; 554c7e88067SPaul E. McKenney snp->srcu_data_have_cbs[idx] = 0; 555d6331980SPaul E. McKenney spin_unlock_irq_rcu_node(snp); 556a3883df3SPaul E. McKenney if (cbs) 557aacb5d91SPaul E. McKenney srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay); 558c350c008SPaul E. McKenney 559c350c008SPaul E. McKenney /* Occasionally prevent srcu_data counter wrap. */ 5608ddbd883SIldar Ismagilov if (!(gpseq & counter_wrap_check) && last_lvl) 561c350c008SPaul E. McKenney for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { 562aacb5d91SPaul E. McKenney sdp = per_cpu_ptr(ssp->sda, cpu); 563d6331980SPaul E. McKenney spin_lock_irqsave_rcu_node(sdp, flags); 564c350c008SPaul E. McKenney if (ULONG_CMP_GE(gpseq, 565c350c008SPaul E. McKenney sdp->srcu_gp_seq_needed + 100)) 566c350c008SPaul E. McKenney sdp->srcu_gp_seq_needed = gpseq; 567a35d13ecSIldar Ismagilov if (ULONG_CMP_GE(gpseq, 568a35d13ecSIldar Ismagilov sdp->srcu_gp_seq_needed_exp + 100)) 569a35d13ecSIldar Ismagilov sdp->srcu_gp_seq_needed_exp = gpseq; 570d6331980SPaul E. McKenney spin_unlock_irqrestore_rcu_node(sdp, flags); 571c350c008SPaul E. McKenney } 572da915ad5SPaul E. McKenney } 573da915ad5SPaul E. McKenney 574da915ad5SPaul E. McKenney /* Callback initiation done, allow grace periods after next. */ 575aacb5d91SPaul E. McKenney mutex_unlock(&ssp->srcu_cb_mutex); 576da915ad5SPaul E. McKenney 577da915ad5SPaul E. McKenney /* Start a new grace period if needed. */ 578aacb5d91SPaul E. McKenney spin_lock_irq_rcu_node(ssp); 579aacb5d91SPaul E. McKenney gpseq = rcu_seq_current(&ssp->srcu_gp_seq); 580da915ad5SPaul E. McKenney if (!rcu_seq_state(gpseq) && 581aacb5d91SPaul E. McKenney ULONG_CMP_LT(gpseq, ssp->srcu_gp_seq_needed)) { 582aacb5d91SPaul E. McKenney srcu_gp_start(ssp); 583aacb5d91SPaul E. McKenney spin_unlock_irq_rcu_node(ssp); 584aacb5d91SPaul E. McKenney srcu_reschedule(ssp, 0); 585da915ad5SPaul E. McKenney } else { 586aacb5d91SPaul E. McKenney spin_unlock_irq_rcu_node(ssp); 587da915ad5SPaul E. McKenney } 588da915ad5SPaul E. McKenney } 589da915ad5SPaul E. McKenney 590da915ad5SPaul E. McKenney /* 5911e9a038bSPaul E. McKenney * Funnel-locking scheme to scalably mediate many concurrent expedited 5921e9a038bSPaul E. McKenney * grace-period requests. This function is invoked for the first known 5931e9a038bSPaul E. McKenney * expedited request for a grace period that has already been requested, 5941e9a038bSPaul E. McKenney * but without expediting. To start a completely new grace period, 5951e9a038bSPaul E. McKenney * whether expedited or not, use srcu_funnel_gp_start() instead. 5961e9a038bSPaul E. McKenney */ 597aacb5d91SPaul E. McKenney static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp, 5981e9a038bSPaul E. McKenney unsigned long s) 5991e9a038bSPaul E. McKenney { 6001e9a038bSPaul E. McKenney unsigned long flags; 6011e9a038bSPaul E. McKenney 6021e9a038bSPaul E. McKenney for (; snp != NULL; snp = snp->srcu_parent) { 603aacb5d91SPaul E. McKenney if (rcu_seq_done(&ssp->srcu_gp_seq, s) || 6041e9a038bSPaul E. McKenney ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s)) 6051e9a038bSPaul E. McKenney return; 606d6331980SPaul E. McKenney spin_lock_irqsave_rcu_node(snp, flags); 6071e9a038bSPaul E. McKenney if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) { 608d6331980SPaul E. McKenney spin_unlock_irqrestore_rcu_node(snp, flags); 6091e9a038bSPaul E. McKenney return; 6101e9a038bSPaul E. McKenney } 6111e9a038bSPaul E. McKenney WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s); 612d6331980SPaul E. McKenney spin_unlock_irqrestore_rcu_node(snp, flags); 6131e9a038bSPaul E. McKenney } 614aacb5d91SPaul E. McKenney spin_lock_irqsave_rcu_node(ssp, flags); 615aacb5d91SPaul E. McKenney if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s)) 6168c9e0cb3SPaul E. McKenney WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s); 617aacb5d91SPaul E. McKenney spin_unlock_irqrestore_rcu_node(ssp, flags); 6181e9a038bSPaul E. McKenney } 6191e9a038bSPaul E. McKenney 6201e9a038bSPaul E. McKenney /* 621da915ad5SPaul E. McKenney * Funnel-locking scheme to scalably mediate many concurrent grace-period 622da915ad5SPaul E. McKenney * requests. The winner has to do the work of actually starting grace 623da915ad5SPaul E. McKenney * period s. Losers must either ensure that their desired grace-period 624da915ad5SPaul E. McKenney * number is recorded on at least their leaf srcu_node structure, or they 625da915ad5SPaul E. McKenney * must take steps to invoke their own callbacks. 62617294ce6SPaul E. McKenney * 62717294ce6SPaul E. McKenney * Note that this function also does the work of srcu_funnel_exp_start(), 62817294ce6SPaul E. McKenney * in some cases by directly invoking it. 629da915ad5SPaul E. McKenney */ 630aacb5d91SPaul E. McKenney static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp, 6311e9a038bSPaul E. McKenney unsigned long s, bool do_norm) 632da915ad5SPaul E. McKenney { 633da915ad5SPaul E. McKenney unsigned long flags; 634da915ad5SPaul E. McKenney int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs); 635da915ad5SPaul E. McKenney struct srcu_node *snp = sdp->mynode; 636da915ad5SPaul E. McKenney unsigned long snp_seq; 637da915ad5SPaul E. McKenney 638da915ad5SPaul E. McKenney /* Each pass through the loop does one level of the srcu_node tree. */ 639da915ad5SPaul E. McKenney for (; snp != NULL; snp = snp->srcu_parent) { 640aacb5d91SPaul E. McKenney if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != sdp->mynode) 641da915ad5SPaul E. McKenney return; /* GP already done and CBs recorded. */ 642d6331980SPaul E. McKenney spin_lock_irqsave_rcu_node(snp, flags); 643da915ad5SPaul E. McKenney if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) { 644da915ad5SPaul E. McKenney snp_seq = snp->srcu_have_cbs[idx]; 645c7e88067SPaul E. McKenney if (snp == sdp->mynode && snp_seq == s) 646c7e88067SPaul E. McKenney snp->srcu_data_have_cbs[idx] |= sdp->grpmask; 647d6331980SPaul E. McKenney spin_unlock_irqrestore_rcu_node(snp, flags); 648da915ad5SPaul E. McKenney if (snp == sdp->mynode && snp_seq != s) { 6491e9a038bSPaul E. McKenney srcu_schedule_cbs_sdp(sdp, do_norm 6501e9a038bSPaul E. McKenney ? SRCU_INTERVAL 6511e9a038bSPaul E. McKenney : 0); 6521e9a038bSPaul E. McKenney return; 653da915ad5SPaul E. McKenney } 6541e9a038bSPaul E. McKenney if (!do_norm) 655aacb5d91SPaul E. McKenney srcu_funnel_exp_start(ssp, snp, s); 656da915ad5SPaul E. McKenney return; 657da915ad5SPaul E. McKenney } 658da915ad5SPaul E. McKenney snp->srcu_have_cbs[idx] = s; 659c7e88067SPaul E. McKenney if (snp == sdp->mynode) 660c7e88067SPaul E. McKenney snp->srcu_data_have_cbs[idx] |= sdp->grpmask; 6611e9a038bSPaul E. McKenney if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s)) 6627ff8b450SPaul E. McKenney WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s); 663d6331980SPaul E. McKenney spin_unlock_irqrestore_rcu_node(snp, flags); 664da915ad5SPaul E. McKenney } 665da915ad5SPaul E. McKenney 666da915ad5SPaul E. McKenney /* Top of tree, must ensure the grace period will be started. */ 667aacb5d91SPaul E. McKenney spin_lock_irqsave_rcu_node(ssp, flags); 668aacb5d91SPaul E. McKenney if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) { 669da915ad5SPaul E. McKenney /* 670da915ad5SPaul E. McKenney * Record need for grace period s. Pair with load 671da915ad5SPaul E. McKenney * acquire setting up for initialization. 672da915ad5SPaul E. McKenney */ 673aacb5d91SPaul E. McKenney smp_store_release(&ssp->srcu_gp_seq_needed, s); /*^^^*/ 674da915ad5SPaul E. McKenney } 675aacb5d91SPaul E. McKenney if (!do_norm && ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s)) 6768c9e0cb3SPaul E. McKenney WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s); 677da915ad5SPaul E. McKenney 678da915ad5SPaul E. McKenney /* If grace period not already done and none in progress, start it. */ 679aacb5d91SPaul E. McKenney if (!rcu_seq_done(&ssp->srcu_gp_seq, s) && 680aacb5d91SPaul E. McKenney rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) { 681aacb5d91SPaul E. McKenney WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)); 682aacb5d91SPaul E. McKenney srcu_gp_start(ssp); 683e0fcba9aSPaul E. McKenney if (likely(srcu_init_done)) 684aacb5d91SPaul E. McKenney queue_delayed_work(rcu_gp_wq, &ssp->work, 685aacb5d91SPaul E. McKenney srcu_get_delay(ssp)); 686aacb5d91SPaul E. McKenney else if (list_empty(&ssp->work.work.entry)) 687aacb5d91SPaul E. McKenney list_add(&ssp->work.work.entry, &srcu_boot_list); 688da915ad5SPaul E. McKenney } 689aacb5d91SPaul E. McKenney spin_unlock_irqrestore_rcu_node(ssp, flags); 690da915ad5SPaul E. McKenney } 691da915ad5SPaul E. McKenney 692da915ad5SPaul E. McKenney /* 693dad81a20SPaul E. McKenney * Wait until all readers counted by array index idx complete, but 694dad81a20SPaul E. McKenney * loop an additional time if there is an expedited grace period pending. 695da915ad5SPaul E. McKenney * The caller must ensure that ->srcu_idx is not changed while checking. 696dad81a20SPaul E. McKenney */ 697aacb5d91SPaul E. McKenney static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount) 698dad81a20SPaul E. McKenney { 699dad81a20SPaul E. McKenney for (;;) { 700aacb5d91SPaul E. McKenney if (srcu_readers_active_idx_check(ssp, idx)) 701dad81a20SPaul E. McKenney return true; 702aacb5d91SPaul E. McKenney if (--trycount + !srcu_get_delay(ssp) <= 0) 703dad81a20SPaul E. McKenney return false; 704dad81a20SPaul E. McKenney udelay(SRCU_RETRY_CHECK_DELAY); 705dad81a20SPaul E. McKenney } 706dad81a20SPaul E. McKenney } 707dad81a20SPaul E. McKenney 708dad81a20SPaul E. McKenney /* 709da915ad5SPaul E. McKenney * Increment the ->srcu_idx counter so that future SRCU readers will 710da915ad5SPaul E. McKenney * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows 711dad81a20SPaul E. McKenney * us to wait for pre-existing readers in a starvation-free manner. 712dad81a20SPaul E. McKenney */ 713aacb5d91SPaul E. McKenney static void srcu_flip(struct srcu_struct *ssp) 714dad81a20SPaul E. McKenney { 715881ec9d2SPaul E. McKenney /* 716881ec9d2SPaul E. McKenney * Ensure that if this updater saw a given reader's increment 717881ec9d2SPaul E. McKenney * from __srcu_read_lock(), that reader was using an old value 718881ec9d2SPaul E. McKenney * of ->srcu_idx. Also ensure that if a given reader sees the 719881ec9d2SPaul E. McKenney * new value of ->srcu_idx, this updater's earlier scans cannot 720881ec9d2SPaul E. McKenney * have seen that reader's increments (which is OK, because this 721881ec9d2SPaul E. McKenney * grace period need not wait on that reader). 722881ec9d2SPaul E. McKenney */ 723881ec9d2SPaul E. McKenney smp_mb(); /* E */ /* Pairs with B and C. */ 724881ec9d2SPaul E. McKenney 725aacb5d91SPaul E. McKenney WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1); 726dad81a20SPaul E. McKenney 727dad81a20SPaul E. McKenney /* 728dad81a20SPaul E. McKenney * Ensure that if the updater misses an __srcu_read_unlock() 729dad81a20SPaul E. McKenney * increment, that task's next __srcu_read_lock() will see the 730dad81a20SPaul E. McKenney * above counter update. Note that both this memory barrier 731dad81a20SPaul E. McKenney * and the one in srcu_readers_active_idx_check() provide the 732dad81a20SPaul E. McKenney * guarantee for __srcu_read_lock(). 733dad81a20SPaul E. McKenney */ 734dad81a20SPaul E. McKenney smp_mb(); /* D */ /* Pairs with C. */ 735dad81a20SPaul E. McKenney } 736dad81a20SPaul E. McKenney 737dad81a20SPaul E. McKenney /* 7382da4b2a7SPaul E. McKenney * If SRCU is likely idle, return true, otherwise return false. 7392da4b2a7SPaul E. McKenney * 7402da4b2a7SPaul E. McKenney * Note that it is OK for several current from-idle requests for a new 7412da4b2a7SPaul E. McKenney * grace period from idle to specify expediting because they will all end 7422da4b2a7SPaul E. McKenney * up requesting the same grace period anyhow. So no loss. 7432da4b2a7SPaul E. McKenney * 7442da4b2a7SPaul E. McKenney * Note also that if any CPU (including the current one) is still invoking 7452da4b2a7SPaul E. McKenney * callbacks, this function will nevertheless say "idle". This is not 7462da4b2a7SPaul E. McKenney * ideal, but the overhead of checking all CPUs' callback lists is even 7472da4b2a7SPaul E. McKenney * less ideal, especially on large systems. Furthermore, the wakeup 7482da4b2a7SPaul E. McKenney * can happen before the callback is fully removed, so we have no choice 7492da4b2a7SPaul E. McKenney * but to accept this type of error. 7502da4b2a7SPaul E. McKenney * 7512da4b2a7SPaul E. McKenney * This function is also subject to counter-wrap errors, but let's face 7522da4b2a7SPaul E. McKenney * it, if this function was preempted for enough time for the counters 7532da4b2a7SPaul E. McKenney * to wrap, it really doesn't matter whether or not we expedite the grace 7542da4b2a7SPaul E. McKenney * period. The extra overhead of a needlessly expedited grace period is 7557fef6cffSEthon Paul * negligible when amortized over that time period, and the extra latency 7562da4b2a7SPaul E. McKenney * of a needlessly non-expedited grace period is similarly negligible. 7572da4b2a7SPaul E. McKenney */ 758aacb5d91SPaul E. McKenney static bool srcu_might_be_idle(struct srcu_struct *ssp) 7592da4b2a7SPaul E. McKenney { 76022607d66SPaul E. McKenney unsigned long curseq; 7612da4b2a7SPaul E. McKenney unsigned long flags; 7622da4b2a7SPaul E. McKenney struct srcu_data *sdp; 76322607d66SPaul E. McKenney unsigned long t; 764844a378dSPaul E. McKenney unsigned long tlast; 7652da4b2a7SPaul E. McKenney 766bde50d8fSSebastian Andrzej Siewior check_init_srcu_struct(ssp); 7672da4b2a7SPaul E. McKenney /* If the local srcu_data structure has callbacks, not idle. */ 768bde50d8fSSebastian Andrzej Siewior sdp = raw_cpu_ptr(ssp->sda); 769bde50d8fSSebastian Andrzej Siewior spin_lock_irqsave_rcu_node(sdp, flags); 7702da4b2a7SPaul E. McKenney if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) { 771bde50d8fSSebastian Andrzej Siewior spin_unlock_irqrestore_rcu_node(sdp, flags); 7722da4b2a7SPaul E. McKenney return false; /* Callbacks already present, so not idle. */ 7732da4b2a7SPaul E. McKenney } 774bde50d8fSSebastian Andrzej Siewior spin_unlock_irqrestore_rcu_node(sdp, flags); 7752da4b2a7SPaul E. McKenney 7762da4b2a7SPaul E. McKenney /* 777a616aec9SIngo Molnar * No local callbacks, so probabilistically probe global state. 7782da4b2a7SPaul E. McKenney * Exact information would require acquiring locks, which would 779a616aec9SIngo Molnar * kill scalability, hence the probabilistic nature of the probe. 7802da4b2a7SPaul E. McKenney */ 78122607d66SPaul E. McKenney 78222607d66SPaul E. McKenney /* First, see if enough time has passed since the last GP. */ 78322607d66SPaul E. McKenney t = ktime_get_mono_fast_ns(); 784844a378dSPaul E. McKenney tlast = READ_ONCE(ssp->srcu_last_gp_end); 78522607d66SPaul E. McKenney if (exp_holdoff == 0 || 786844a378dSPaul E. McKenney time_in_range_open(t, tlast, tlast + exp_holdoff)) 78722607d66SPaul E. McKenney return false; /* Too soon after last GP. */ 78822607d66SPaul E. McKenney 78922607d66SPaul E. McKenney /* Next, check for probable idleness. */ 790aacb5d91SPaul E. McKenney curseq = rcu_seq_current(&ssp->srcu_gp_seq); 7912da4b2a7SPaul E. McKenney smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */ 792aacb5d91SPaul E. McKenney if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_gp_seq_needed))) 7932da4b2a7SPaul E. McKenney return false; /* Grace period in progress, so not idle. */ 7942da4b2a7SPaul E. McKenney smp_mb(); /* Order ->srcu_gp_seq with prior access. */ 795aacb5d91SPaul E. McKenney if (curseq != rcu_seq_current(&ssp->srcu_gp_seq)) 7962da4b2a7SPaul E. McKenney return false; /* GP # changed, so not idle. */ 7972da4b2a7SPaul E. McKenney return true; /* With reasonable probability, idle! */ 7982da4b2a7SPaul E. McKenney } 7992da4b2a7SPaul E. McKenney 8002da4b2a7SPaul E. McKenney /* 801a602538eSPaul E. McKenney * SRCU callback function to leak a callback. 802a602538eSPaul E. McKenney */ 803a602538eSPaul E. McKenney static void srcu_leak_callback(struct rcu_head *rhp) 804a602538eSPaul E. McKenney { 805a602538eSPaul E. McKenney } 806a602538eSPaul E. McKenney 807a602538eSPaul E. McKenney /* 80829d2bb94SPaul E. McKenney * Start an SRCU grace period, and also queue the callback if non-NULL. 80929d2bb94SPaul E. McKenney */ 8105358c9faSPaul E. McKenney static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp, 8115358c9faSPaul E. McKenney struct rcu_head *rhp, bool do_norm) 81229d2bb94SPaul E. McKenney { 81329d2bb94SPaul E. McKenney unsigned long flags; 81429d2bb94SPaul E. McKenney int idx; 81529d2bb94SPaul E. McKenney bool needexp = false; 81629d2bb94SPaul E. McKenney bool needgp = false; 81729d2bb94SPaul E. McKenney unsigned long s; 81829d2bb94SPaul E. McKenney struct srcu_data *sdp; 81929d2bb94SPaul E. McKenney 8205358c9faSPaul E. McKenney check_init_srcu_struct(ssp); 82129d2bb94SPaul E. McKenney idx = srcu_read_lock(ssp); 82229d2bb94SPaul E. McKenney sdp = raw_cpu_ptr(ssp->sda); 82329d2bb94SPaul E. McKenney spin_lock_irqsave_rcu_node(sdp, flags); 8245358c9faSPaul E. McKenney if (rhp) 82529d2bb94SPaul E. McKenney rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp); 82629d2bb94SPaul E. McKenney rcu_segcblist_advance(&sdp->srcu_cblist, 82729d2bb94SPaul E. McKenney rcu_seq_current(&ssp->srcu_gp_seq)); 82829d2bb94SPaul E. McKenney s = rcu_seq_snap(&ssp->srcu_gp_seq); 82929d2bb94SPaul E. McKenney (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s); 83029d2bb94SPaul E. McKenney if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) { 83129d2bb94SPaul E. McKenney sdp->srcu_gp_seq_needed = s; 83229d2bb94SPaul E. McKenney needgp = true; 83329d2bb94SPaul E. McKenney } 83429d2bb94SPaul E. McKenney if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) { 83529d2bb94SPaul E. McKenney sdp->srcu_gp_seq_needed_exp = s; 83629d2bb94SPaul E. McKenney needexp = true; 83729d2bb94SPaul E. McKenney } 83829d2bb94SPaul E. McKenney spin_unlock_irqrestore_rcu_node(sdp, flags); 83929d2bb94SPaul E. McKenney if (needgp) 84029d2bb94SPaul E. McKenney srcu_funnel_gp_start(ssp, sdp, s, do_norm); 84129d2bb94SPaul E. McKenney else if (needexp) 84229d2bb94SPaul E. McKenney srcu_funnel_exp_start(ssp, sdp->mynode, s); 84329d2bb94SPaul E. McKenney srcu_read_unlock(ssp, idx); 8445358c9faSPaul E. McKenney return s; 84529d2bb94SPaul E. McKenney } 84629d2bb94SPaul E. McKenney 84729d2bb94SPaul E. McKenney /* 848da915ad5SPaul E. McKenney * Enqueue an SRCU callback on the srcu_data structure associated with 849da915ad5SPaul E. McKenney * the current CPU and the specified srcu_struct structure, initiating 850da915ad5SPaul E. McKenney * grace-period processing if it is not already running. 851dad81a20SPaul E. McKenney * 852dad81a20SPaul E. McKenney * Note that all CPUs must agree that the grace period extended beyond 853dad81a20SPaul E. McKenney * all pre-existing SRCU read-side critical section. On systems with 854dad81a20SPaul E. McKenney * more than one CPU, this means that when "func()" is invoked, each CPU 855dad81a20SPaul E. McKenney * is guaranteed to have executed a full memory barrier since the end of 856dad81a20SPaul E. McKenney * its last corresponding SRCU read-side critical section whose beginning 8575ef98a63SPaul E. McKenney * preceded the call to call_srcu(). It also means that each CPU executing 858dad81a20SPaul E. McKenney * an SRCU read-side critical section that continues beyond the start of 8595ef98a63SPaul E. McKenney * "func()" must have executed a memory barrier after the call_srcu() 860dad81a20SPaul E. McKenney * but before the beginning of that SRCU read-side critical section. 861dad81a20SPaul E. McKenney * Note that these guarantees include CPUs that are offline, idle, or 862dad81a20SPaul E. McKenney * executing in user mode, as well as CPUs that are executing in the kernel. 863dad81a20SPaul E. McKenney * 8645ef98a63SPaul E. McKenney * Furthermore, if CPU A invoked call_srcu() and CPU B invoked the 865dad81a20SPaul E. McKenney * resulting SRCU callback function "func()", then both CPU A and CPU 866dad81a20SPaul E. McKenney * B are guaranteed to execute a full memory barrier during the time 8675ef98a63SPaul E. McKenney * interval between the call to call_srcu() and the invocation of "func()". 868dad81a20SPaul E. McKenney * This guarantee applies even if CPU A and CPU B are the same CPU (but 869dad81a20SPaul E. McKenney * again only if the system has more than one CPU). 870dad81a20SPaul E. McKenney * 871dad81a20SPaul E. McKenney * Of course, these guarantees apply only for invocations of call_srcu(), 872dad81a20SPaul E. McKenney * srcu_read_lock(), and srcu_read_unlock() that are all passed the same 873dad81a20SPaul E. McKenney * srcu_struct structure. 874dad81a20SPaul E. McKenney */ 87511b00045SJiang Biao static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, 8761e9a038bSPaul E. McKenney rcu_callback_t func, bool do_norm) 877dad81a20SPaul E. McKenney { 878a602538eSPaul E. McKenney if (debug_rcu_head_queue(rhp)) { 879a602538eSPaul E. McKenney /* Probable double call_srcu(), so leak the callback. */ 880a602538eSPaul E. McKenney WRITE_ONCE(rhp->func, srcu_leak_callback); 881a602538eSPaul E. McKenney WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n"); 882a602538eSPaul E. McKenney return; 883a602538eSPaul E. McKenney } 884da915ad5SPaul E. McKenney rhp->func = func; 8855358c9faSPaul E. McKenney (void)srcu_gp_start_if_needed(ssp, rhp, do_norm); 8861e9a038bSPaul E. McKenney } 8871e9a038bSPaul E. McKenney 8885a0465e1SPaul E. McKenney /** 8895a0465e1SPaul E. McKenney * call_srcu() - Queue a callback for invocation after an SRCU grace period 890aacb5d91SPaul E. McKenney * @ssp: srcu_struct in queue the callback 89127fdb35fSPaul E. McKenney * @rhp: structure to be used for queueing the SRCU callback. 8925a0465e1SPaul E. McKenney * @func: function to be invoked after the SRCU grace period 8935a0465e1SPaul E. McKenney * 8945a0465e1SPaul E. McKenney * The callback function will be invoked some time after a full SRCU 8955a0465e1SPaul E. McKenney * grace period elapses, in other words after all pre-existing SRCU 8965a0465e1SPaul E. McKenney * read-side critical sections have completed. However, the callback 8975a0465e1SPaul E. McKenney * function might well execute concurrently with other SRCU read-side 8985a0465e1SPaul E. McKenney * critical sections that started after call_srcu() was invoked. SRCU 8995a0465e1SPaul E. McKenney * read-side critical sections are delimited by srcu_read_lock() and 9005a0465e1SPaul E. McKenney * srcu_read_unlock(), and may be nested. 9015a0465e1SPaul E. McKenney * 9025a0465e1SPaul E. McKenney * The callback will be invoked from process context, but must nevertheless 9035a0465e1SPaul E. McKenney * be fast and must not block. 9045a0465e1SPaul E. McKenney */ 905aacb5d91SPaul E. McKenney void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, 9061e9a038bSPaul E. McKenney rcu_callback_t func) 9071e9a038bSPaul E. McKenney { 908aacb5d91SPaul E. McKenney __call_srcu(ssp, rhp, func, true); 909dad81a20SPaul E. McKenney } 910dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(call_srcu); 911dad81a20SPaul E. McKenney 912dad81a20SPaul E. McKenney /* 913dad81a20SPaul E. McKenney * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). 914dad81a20SPaul E. McKenney */ 915aacb5d91SPaul E. McKenney static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm) 916dad81a20SPaul E. McKenney { 917dad81a20SPaul E. McKenney struct rcu_synchronize rcu; 918dad81a20SPaul E. McKenney 919f505d434SJakub Kicinski RCU_LOCKDEP_WARN(lockdep_is_held(ssp) || 920dad81a20SPaul E. McKenney lock_is_held(&rcu_bh_lock_map) || 921dad81a20SPaul E. McKenney lock_is_held(&rcu_lock_map) || 922dad81a20SPaul E. McKenney lock_is_held(&rcu_sched_lock_map), 923dad81a20SPaul E. McKenney "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section"); 924dad81a20SPaul E. McKenney 925dad81a20SPaul E. McKenney if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) 926dad81a20SPaul E. McKenney return; 927dad81a20SPaul E. McKenney might_sleep(); 928aacb5d91SPaul E. McKenney check_init_srcu_struct(ssp); 929dad81a20SPaul E. McKenney init_completion(&rcu.completion); 930da915ad5SPaul E. McKenney init_rcu_head_on_stack(&rcu.head); 931aacb5d91SPaul E. McKenney __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm); 932dad81a20SPaul E. McKenney wait_for_completion(&rcu.completion); 933da915ad5SPaul E. McKenney destroy_rcu_head_on_stack(&rcu.head); 93435732cf9SPaul E. McKenney 93535732cf9SPaul E. McKenney /* 93635732cf9SPaul E. McKenney * Make sure that later code is ordered after the SRCU grace 937d6331980SPaul E. McKenney * period. This pairs with the spin_lock_irq_rcu_node() 93835732cf9SPaul E. McKenney * in srcu_invoke_callbacks(). Unlike Tree RCU, this is needed 93935732cf9SPaul E. McKenney * because the current CPU might have been totally uninvolved with 94035732cf9SPaul E. McKenney * (and thus unordered against) that grace period. 94135732cf9SPaul E. McKenney */ 94235732cf9SPaul E. McKenney smp_mb(); 943dad81a20SPaul E. McKenney } 944dad81a20SPaul E. McKenney 945dad81a20SPaul E. McKenney /** 946dad81a20SPaul E. McKenney * synchronize_srcu_expedited - Brute-force SRCU grace period 947aacb5d91SPaul E. McKenney * @ssp: srcu_struct with which to synchronize. 948dad81a20SPaul E. McKenney * 949dad81a20SPaul E. McKenney * Wait for an SRCU grace period to elapse, but be more aggressive about 950dad81a20SPaul E. McKenney * spinning rather than blocking when waiting. 951dad81a20SPaul E. McKenney * 952dad81a20SPaul E. McKenney * Note that synchronize_srcu_expedited() has the same deadlock and 953dad81a20SPaul E. McKenney * memory-ordering properties as does synchronize_srcu(). 954dad81a20SPaul E. McKenney */ 955aacb5d91SPaul E. McKenney void synchronize_srcu_expedited(struct srcu_struct *ssp) 956dad81a20SPaul E. McKenney { 957aacb5d91SPaul E. McKenney __synchronize_srcu(ssp, rcu_gp_is_normal()); 958dad81a20SPaul E. McKenney } 959dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); 960dad81a20SPaul E. McKenney 961dad81a20SPaul E. McKenney /** 962dad81a20SPaul E. McKenney * synchronize_srcu - wait for prior SRCU read-side critical-section completion 963aacb5d91SPaul E. McKenney * @ssp: srcu_struct with which to synchronize. 964dad81a20SPaul E. McKenney * 965dad81a20SPaul E. McKenney * Wait for the count to drain to zero of both indexes. To avoid the 966dad81a20SPaul E. McKenney * possible starvation of synchronize_srcu(), it waits for the count of 967da915ad5SPaul E. McKenney * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first, 968da915ad5SPaul E. McKenney * and then flip the srcu_idx and wait for the count of the other index. 969dad81a20SPaul E. McKenney * 970dad81a20SPaul E. McKenney * Can block; must be called from process context. 971dad81a20SPaul E. McKenney * 972dad81a20SPaul E. McKenney * Note that it is illegal to call synchronize_srcu() from the corresponding 973dad81a20SPaul E. McKenney * SRCU read-side critical section; doing so will result in deadlock. 974dad81a20SPaul E. McKenney * However, it is perfectly legal to call synchronize_srcu() on one 975dad81a20SPaul E. McKenney * srcu_struct from some other srcu_struct's read-side critical section, 976dad81a20SPaul E. McKenney * as long as the resulting graph of srcu_structs is acyclic. 977dad81a20SPaul E. McKenney * 978dad81a20SPaul E. McKenney * There are memory-ordering constraints implied by synchronize_srcu(). 979dad81a20SPaul E. McKenney * On systems with more than one CPU, when synchronize_srcu() returns, 980dad81a20SPaul E. McKenney * each CPU is guaranteed to have executed a full memory barrier since 9816eb95cc4SPaul E. McKenney * the end of its last corresponding SRCU read-side critical section 982dad81a20SPaul E. McKenney * whose beginning preceded the call to synchronize_srcu(). In addition, 983dad81a20SPaul E. McKenney * each CPU having an SRCU read-side critical section that extends beyond 984dad81a20SPaul E. McKenney * the return from synchronize_srcu() is guaranteed to have executed a 985dad81a20SPaul E. McKenney * full memory barrier after the beginning of synchronize_srcu() and before 986dad81a20SPaul E. McKenney * the beginning of that SRCU read-side critical section. Note that these 987dad81a20SPaul E. McKenney * guarantees include CPUs that are offline, idle, or executing in user mode, 988dad81a20SPaul E. McKenney * as well as CPUs that are executing in the kernel. 989dad81a20SPaul E. McKenney * 990dad81a20SPaul E. McKenney * Furthermore, if CPU A invoked synchronize_srcu(), which returned 991dad81a20SPaul E. McKenney * to its caller on CPU B, then both CPU A and CPU B are guaranteed 992dad81a20SPaul E. McKenney * to have executed a full memory barrier during the execution of 993dad81a20SPaul E. McKenney * synchronize_srcu(). This guarantee applies even if CPU A and CPU B 994dad81a20SPaul E. McKenney * are the same CPU, but again only if the system has more than one CPU. 995dad81a20SPaul E. McKenney * 996dad81a20SPaul E. McKenney * Of course, these memory-ordering guarantees apply only when 997dad81a20SPaul E. McKenney * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are 998dad81a20SPaul E. McKenney * passed the same srcu_struct structure. 9992da4b2a7SPaul E. McKenney * 10003d3a0d1bSPaul E. McKenney * Implementation of these memory-ordering guarantees is similar to 10013d3a0d1bSPaul E. McKenney * that of synchronize_rcu(). 10023d3a0d1bSPaul E. McKenney * 10032da4b2a7SPaul E. McKenney * If SRCU is likely idle, expedite the first request. This semantic 10042da4b2a7SPaul E. McKenney * was provided by Classic SRCU, and is relied upon by its users, so TREE 10052da4b2a7SPaul E. McKenney * SRCU must also provide it. Note that detecting idleness is heuristic 10062da4b2a7SPaul E. McKenney * and subject to both false positives and negatives. 1007dad81a20SPaul E. McKenney */ 1008aacb5d91SPaul E. McKenney void synchronize_srcu(struct srcu_struct *ssp) 1009dad81a20SPaul E. McKenney { 1010aacb5d91SPaul E. McKenney if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited()) 1011aacb5d91SPaul E. McKenney synchronize_srcu_expedited(ssp); 1012dad81a20SPaul E. McKenney else 1013aacb5d91SPaul E. McKenney __synchronize_srcu(ssp, true); 1014dad81a20SPaul E. McKenney } 1015dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(synchronize_srcu); 1016dad81a20SPaul E. McKenney 10175358c9faSPaul E. McKenney /** 10185358c9faSPaul E. McKenney * get_state_synchronize_srcu - Provide an end-of-grace-period cookie 10195358c9faSPaul E. McKenney * @ssp: srcu_struct to provide cookie for. 10205358c9faSPaul E. McKenney * 10215358c9faSPaul E. McKenney * This function returns a cookie that can be passed to 10225358c9faSPaul E. McKenney * poll_state_synchronize_srcu(), which will return true if a full grace 10235358c9faSPaul E. McKenney * period has elapsed in the meantime. It is the caller's responsibility 10245358c9faSPaul E. McKenney * to make sure that grace period happens, for example, by invoking 10255358c9faSPaul E. McKenney * call_srcu() after return from get_state_synchronize_srcu(). 10265358c9faSPaul E. McKenney */ 10275358c9faSPaul E. McKenney unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp) 10285358c9faSPaul E. McKenney { 10295358c9faSPaul E. McKenney // Any prior manipulation of SRCU-protected data must happen 10305358c9faSPaul E. McKenney // before the load from ->srcu_gp_seq. 10315358c9faSPaul E. McKenney smp_mb(); 10325358c9faSPaul E. McKenney return rcu_seq_snap(&ssp->srcu_gp_seq); 10335358c9faSPaul E. McKenney } 10345358c9faSPaul E. McKenney EXPORT_SYMBOL_GPL(get_state_synchronize_srcu); 10355358c9faSPaul E. McKenney 10365358c9faSPaul E. McKenney /** 10375358c9faSPaul E. McKenney * start_poll_synchronize_srcu - Provide cookie and start grace period 10385358c9faSPaul E. McKenney * @ssp: srcu_struct to provide cookie for. 10395358c9faSPaul E. McKenney * 10405358c9faSPaul E. McKenney * This function returns a cookie that can be passed to 10415358c9faSPaul E. McKenney * poll_state_synchronize_srcu(), which will return true if a full grace 10425358c9faSPaul E. McKenney * period has elapsed in the meantime. Unlike get_state_synchronize_srcu(), 10435358c9faSPaul E. McKenney * this function also ensures that any needed SRCU grace period will be 10445358c9faSPaul E. McKenney * started. This convenience does come at a cost in terms of CPU overhead. 10455358c9faSPaul E. McKenney */ 10465358c9faSPaul E. McKenney unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp) 10475358c9faSPaul E. McKenney { 10485358c9faSPaul E. McKenney return srcu_gp_start_if_needed(ssp, NULL, true); 10495358c9faSPaul E. McKenney } 10505358c9faSPaul E. McKenney EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu); 10515358c9faSPaul E. McKenney 10525358c9faSPaul E. McKenney /** 10535358c9faSPaul E. McKenney * poll_state_synchronize_srcu - Has cookie's grace period ended? 10545358c9faSPaul E. McKenney * @ssp: srcu_struct to provide cookie for. 10555358c9faSPaul E. McKenney * @cookie: Return value from get_state_synchronize_srcu() or start_poll_synchronize_srcu(). 10565358c9faSPaul E. McKenney * 10575358c9faSPaul E. McKenney * This function takes the cookie that was returned from either 10585358c9faSPaul E. McKenney * get_state_synchronize_srcu() or start_poll_synchronize_srcu(), and 10595358c9faSPaul E. McKenney * returns @true if an SRCU grace period elapsed since the time that the 10605358c9faSPaul E. McKenney * cookie was created. 10614e7ccfaeSPaul E. McKenney * 10624e7ccfaeSPaul E. McKenney * Because cookies are finite in size, wrapping/overflow is possible. 10634e7ccfaeSPaul E. McKenney * This is more pronounced on 32-bit systems where cookies are 32 bits, 10644e7ccfaeSPaul E. McKenney * where in theory wrapping could happen in about 14 hours assuming 10654e7ccfaeSPaul E. McKenney * 25-microsecond expedited SRCU grace periods. However, a more likely 10664e7ccfaeSPaul E. McKenney * overflow lower bound is on the order of 24 days in the case of 10674e7ccfaeSPaul E. McKenney * one-millisecond SRCU grace periods. Of course, wrapping in a 64-bit 10684e7ccfaeSPaul E. McKenney * system requires geologic timespans, as in more than seven million years 10694e7ccfaeSPaul E. McKenney * even for expedited SRCU grace periods. 10704e7ccfaeSPaul E. McKenney * 10714e7ccfaeSPaul E. McKenney * Wrapping/overflow is much more of an issue for CONFIG_SMP=n systems 10724e7ccfaeSPaul E. McKenney * that also have CONFIG_PREEMPTION=n, which selects Tiny SRCU. This uses 10734e7ccfaeSPaul E. McKenney * a 16-bit cookie, which rcutorture routinely wraps in a matter of a 10744e7ccfaeSPaul E. McKenney * few minutes. If this proves to be a problem, this counter will be 10754e7ccfaeSPaul E. McKenney * expanded to the same size as for Tree SRCU. 10765358c9faSPaul E. McKenney */ 10775358c9faSPaul E. McKenney bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie) 10785358c9faSPaul E. McKenney { 10795358c9faSPaul E. McKenney if (!rcu_seq_done(&ssp->srcu_gp_seq, cookie)) 10805358c9faSPaul E. McKenney return false; 10815358c9faSPaul E. McKenney // Ensure that the end of the SRCU grace period happens before 10825358c9faSPaul E. McKenney // any subsequent code that the caller might execute. 10835358c9faSPaul E. McKenney smp_mb(); // ^^^ 10845358c9faSPaul E. McKenney return true; 10855358c9faSPaul E. McKenney } 10865358c9faSPaul E. McKenney EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu); 10875358c9faSPaul E. McKenney 1088da915ad5SPaul E. McKenney /* 1089da915ad5SPaul E. McKenney * Callback function for srcu_barrier() use. 1090da915ad5SPaul E. McKenney */ 1091da915ad5SPaul E. McKenney static void srcu_barrier_cb(struct rcu_head *rhp) 1092da915ad5SPaul E. McKenney { 1093da915ad5SPaul E. McKenney struct srcu_data *sdp; 1094aacb5d91SPaul E. McKenney struct srcu_struct *ssp; 1095da915ad5SPaul E. McKenney 1096da915ad5SPaul E. McKenney sdp = container_of(rhp, struct srcu_data, srcu_barrier_head); 1097aacb5d91SPaul E. McKenney ssp = sdp->ssp; 1098aacb5d91SPaul E. McKenney if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt)) 1099aacb5d91SPaul E. McKenney complete(&ssp->srcu_barrier_completion); 1100da915ad5SPaul E. McKenney } 1101da915ad5SPaul E. McKenney 1102dad81a20SPaul E. McKenney /** 1103dad81a20SPaul E. McKenney * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete. 1104aacb5d91SPaul E. McKenney * @ssp: srcu_struct on which to wait for in-flight callbacks. 1105dad81a20SPaul E. McKenney */ 1106aacb5d91SPaul E. McKenney void srcu_barrier(struct srcu_struct *ssp) 1107dad81a20SPaul E. McKenney { 1108da915ad5SPaul E. McKenney int cpu; 1109da915ad5SPaul E. McKenney struct srcu_data *sdp; 1110aacb5d91SPaul E. McKenney unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq); 1111da915ad5SPaul E. McKenney 1112aacb5d91SPaul E. McKenney check_init_srcu_struct(ssp); 1113aacb5d91SPaul E. McKenney mutex_lock(&ssp->srcu_barrier_mutex); 1114aacb5d91SPaul E. McKenney if (rcu_seq_done(&ssp->srcu_barrier_seq, s)) { 1115da915ad5SPaul E. McKenney smp_mb(); /* Force ordering following return. */ 1116aacb5d91SPaul E. McKenney mutex_unlock(&ssp->srcu_barrier_mutex); 1117da915ad5SPaul E. McKenney return; /* Someone else did our work for us. */ 1118da915ad5SPaul E. McKenney } 1119aacb5d91SPaul E. McKenney rcu_seq_start(&ssp->srcu_barrier_seq); 1120aacb5d91SPaul E. McKenney init_completion(&ssp->srcu_barrier_completion); 1121da915ad5SPaul E. McKenney 1122da915ad5SPaul E. McKenney /* Initial count prevents reaching zero until all CBs are posted. */ 1123aacb5d91SPaul E. McKenney atomic_set(&ssp->srcu_barrier_cpu_cnt, 1); 1124da915ad5SPaul E. McKenney 1125da915ad5SPaul E. McKenney /* 1126da915ad5SPaul E. McKenney * Each pass through this loop enqueues a callback, but only 1127da915ad5SPaul E. McKenney * on CPUs already having callbacks enqueued. Note that if 1128da915ad5SPaul E. McKenney * a CPU already has callbacks enqueue, it must have already 1129da915ad5SPaul E. McKenney * registered the need for a future grace period, so all we 1130da915ad5SPaul E. McKenney * need do is enqueue a callback that will use the same 1131da915ad5SPaul E. McKenney * grace period as the last callback already in the queue. 1132da915ad5SPaul E. McKenney */ 1133da915ad5SPaul E. McKenney for_each_possible_cpu(cpu) { 1134aacb5d91SPaul E. McKenney sdp = per_cpu_ptr(ssp->sda, cpu); 1135d6331980SPaul E. McKenney spin_lock_irq_rcu_node(sdp); 1136aacb5d91SPaul E. McKenney atomic_inc(&ssp->srcu_barrier_cpu_cnt); 1137da915ad5SPaul E. McKenney sdp->srcu_barrier_head.func = srcu_barrier_cb; 1138a602538eSPaul E. McKenney debug_rcu_head_queue(&sdp->srcu_barrier_head); 1139da915ad5SPaul E. McKenney if (!rcu_segcblist_entrain(&sdp->srcu_cblist, 114077a40f97SJoel Fernandes (Google) &sdp->srcu_barrier_head)) { 1141a602538eSPaul E. McKenney debug_rcu_head_unqueue(&sdp->srcu_barrier_head); 1142aacb5d91SPaul E. McKenney atomic_dec(&ssp->srcu_barrier_cpu_cnt); 1143a602538eSPaul E. McKenney } 1144d6331980SPaul E. McKenney spin_unlock_irq_rcu_node(sdp); 1145da915ad5SPaul E. McKenney } 1146da915ad5SPaul E. McKenney 1147da915ad5SPaul E. McKenney /* Remove the initial count, at which point reaching zero can happen. */ 1148aacb5d91SPaul E. McKenney if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt)) 1149aacb5d91SPaul E. McKenney complete(&ssp->srcu_barrier_completion); 1150aacb5d91SPaul E. McKenney wait_for_completion(&ssp->srcu_barrier_completion); 1151da915ad5SPaul E. McKenney 1152aacb5d91SPaul E. McKenney rcu_seq_end(&ssp->srcu_barrier_seq); 1153aacb5d91SPaul E. McKenney mutex_unlock(&ssp->srcu_barrier_mutex); 1154dad81a20SPaul E. McKenney } 1155dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(srcu_barrier); 1156dad81a20SPaul E. McKenney 1157dad81a20SPaul E. McKenney /** 1158dad81a20SPaul E. McKenney * srcu_batches_completed - return batches completed. 1159aacb5d91SPaul E. McKenney * @ssp: srcu_struct on which to report batch completion. 1160dad81a20SPaul E. McKenney * 1161dad81a20SPaul E. McKenney * Report the number of batches, correlated with, but not necessarily 1162dad81a20SPaul E. McKenney * precisely the same as, the number of grace periods that have elapsed. 1163dad81a20SPaul E. McKenney */ 1164aacb5d91SPaul E. McKenney unsigned long srcu_batches_completed(struct srcu_struct *ssp) 1165dad81a20SPaul E. McKenney { 116639f91504SPaul E. McKenney return READ_ONCE(ssp->srcu_idx); 1167dad81a20SPaul E. McKenney } 1168dad81a20SPaul E. McKenney EXPORT_SYMBOL_GPL(srcu_batches_completed); 1169dad81a20SPaul E. McKenney 1170dad81a20SPaul E. McKenney /* 1171da915ad5SPaul E. McKenney * Core SRCU state machine. Push state bits of ->srcu_gp_seq 1172da915ad5SPaul E. McKenney * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has 1173da915ad5SPaul E. McKenney * completed in that state. 1174dad81a20SPaul E. McKenney */ 1175aacb5d91SPaul E. McKenney static void srcu_advance_state(struct srcu_struct *ssp) 1176dad81a20SPaul E. McKenney { 1177dad81a20SPaul E. McKenney int idx; 1178dad81a20SPaul E. McKenney 1179aacb5d91SPaul E. McKenney mutex_lock(&ssp->srcu_gp_mutex); 1180da915ad5SPaul E. McKenney 1181dad81a20SPaul E. McKenney /* 1182dad81a20SPaul E. McKenney * Because readers might be delayed for an extended period after 1183da915ad5SPaul E. McKenney * fetching ->srcu_idx for their index, at any point in time there 1184dad81a20SPaul E. McKenney * might well be readers using both idx=0 and idx=1. We therefore 1185dad81a20SPaul E. McKenney * need to wait for readers to clear from both index values before 1186dad81a20SPaul E. McKenney * invoking a callback. 1187dad81a20SPaul E. McKenney * 1188dad81a20SPaul E. McKenney * The load-acquire ensures that we see the accesses performed 1189dad81a20SPaul E. McKenney * by the prior grace period. 1190dad81a20SPaul E. McKenney */ 1191aacb5d91SPaul E. McKenney idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq)); /* ^^^ */ 1192dad81a20SPaul E. McKenney if (idx == SRCU_STATE_IDLE) { 1193aacb5d91SPaul E. McKenney spin_lock_irq_rcu_node(ssp); 1194aacb5d91SPaul E. McKenney if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) { 1195aacb5d91SPaul E. McKenney WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq)); 1196aacb5d91SPaul E. McKenney spin_unlock_irq_rcu_node(ssp); 1197aacb5d91SPaul E. McKenney mutex_unlock(&ssp->srcu_gp_mutex); 1198dad81a20SPaul E. McKenney return; 1199dad81a20SPaul E. McKenney } 1200aacb5d91SPaul E. McKenney idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)); 1201dad81a20SPaul E. McKenney if (idx == SRCU_STATE_IDLE) 1202aacb5d91SPaul E. McKenney srcu_gp_start(ssp); 1203aacb5d91SPaul E. McKenney spin_unlock_irq_rcu_node(ssp); 1204da915ad5SPaul E. McKenney if (idx != SRCU_STATE_IDLE) { 1205aacb5d91SPaul E. McKenney mutex_unlock(&ssp->srcu_gp_mutex); 1206dad81a20SPaul E. McKenney return; /* Someone else started the grace period. */ 1207dad81a20SPaul E. McKenney } 1208da915ad5SPaul E. McKenney } 1209dad81a20SPaul E. McKenney 1210aacb5d91SPaul E. McKenney if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) { 1211aacb5d91SPaul E. McKenney idx = 1 ^ (ssp->srcu_idx & 1); 1212aacb5d91SPaul E. McKenney if (!try_check_zero(ssp, idx, 1)) { 1213aacb5d91SPaul E. McKenney mutex_unlock(&ssp->srcu_gp_mutex); 1214dad81a20SPaul E. McKenney return; /* readers present, retry later. */ 1215da915ad5SPaul E. McKenney } 1216aacb5d91SPaul E. McKenney srcu_flip(ssp); 121771042606SPaul E. McKenney spin_lock_irq_rcu_node(ssp); 1218aacb5d91SPaul E. McKenney rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2); 121971042606SPaul E. McKenney spin_unlock_irq_rcu_node(ssp); 1220dad81a20SPaul E. McKenney } 1221dad81a20SPaul E. McKenney 1222aacb5d91SPaul E. McKenney if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN2) { 1223dad81a20SPaul E. McKenney 1224dad81a20SPaul E. McKenney /* 1225dad81a20SPaul E. McKenney * SRCU read-side critical sections are normally short, 1226dad81a20SPaul E. McKenney * so check at least twice in quick succession after a flip. 1227dad81a20SPaul E. McKenney */ 1228aacb5d91SPaul E. McKenney idx = 1 ^ (ssp->srcu_idx & 1); 1229aacb5d91SPaul E. McKenney if (!try_check_zero(ssp, idx, 2)) { 1230aacb5d91SPaul E. McKenney mutex_unlock(&ssp->srcu_gp_mutex); 1231da915ad5SPaul E. McKenney return; /* readers present, retry later. */ 1232da915ad5SPaul E. McKenney } 1233aacb5d91SPaul E. McKenney srcu_gp_end(ssp); /* Releases ->srcu_gp_mutex. */ 1234dad81a20SPaul E. McKenney } 1235dad81a20SPaul E. McKenney } 1236dad81a20SPaul E. McKenney 1237dad81a20SPaul E. McKenney /* 1238dad81a20SPaul E. McKenney * Invoke a limited number of SRCU callbacks that have passed through 1239dad81a20SPaul E. McKenney * their grace period. If there are more to do, SRCU will reschedule 1240dad81a20SPaul E. McKenney * the workqueue. Note that needed memory barriers have been executed 1241dad81a20SPaul E. McKenney * in this task's context by srcu_readers_active_idx_check(). 1242dad81a20SPaul E. McKenney */ 1243da915ad5SPaul E. McKenney static void srcu_invoke_callbacks(struct work_struct *work) 1244dad81a20SPaul E. McKenney { 1245ae5c2341SJoel Fernandes (Google) long len; 1246da915ad5SPaul E. McKenney bool more; 1247dad81a20SPaul E. McKenney struct rcu_cblist ready_cbs; 1248dad81a20SPaul E. McKenney struct rcu_head *rhp; 1249da915ad5SPaul E. McKenney struct srcu_data *sdp; 1250aacb5d91SPaul E. McKenney struct srcu_struct *ssp; 1251dad81a20SPaul E. McKenney 1252e81baf4cSSebastian Andrzej Siewior sdp = container_of(work, struct srcu_data, work); 1253e81baf4cSSebastian Andrzej Siewior 1254aacb5d91SPaul E. McKenney ssp = sdp->ssp; 1255dad81a20SPaul E. McKenney rcu_cblist_init(&ready_cbs); 1256d6331980SPaul E. McKenney spin_lock_irq_rcu_node(sdp); 1257da915ad5SPaul E. McKenney rcu_segcblist_advance(&sdp->srcu_cblist, 1258aacb5d91SPaul E. McKenney rcu_seq_current(&ssp->srcu_gp_seq)); 1259da915ad5SPaul E. McKenney if (sdp->srcu_cblist_invoking || 1260da915ad5SPaul E. McKenney !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) { 1261d6331980SPaul E. McKenney spin_unlock_irq_rcu_node(sdp); 1262da915ad5SPaul E. McKenney return; /* Someone else on the job or nothing to do. */ 1263da915ad5SPaul E. McKenney } 1264da915ad5SPaul E. McKenney 1265da915ad5SPaul E. McKenney /* We are on the job! Extract and invoke ready callbacks. */ 1266da915ad5SPaul E. McKenney sdp->srcu_cblist_invoking = true; 1267da915ad5SPaul E. McKenney rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs); 1268ae5c2341SJoel Fernandes (Google) len = ready_cbs.len; 1269d6331980SPaul E. McKenney spin_unlock_irq_rcu_node(sdp); 1270dad81a20SPaul E. McKenney rhp = rcu_cblist_dequeue(&ready_cbs); 1271dad81a20SPaul E. McKenney for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) { 1272a602538eSPaul E. McKenney debug_rcu_head_unqueue(rhp); 1273dad81a20SPaul E. McKenney local_bh_disable(); 1274dad81a20SPaul E. McKenney rhp->func(rhp); 1275dad81a20SPaul E. McKenney local_bh_enable(); 1276dad81a20SPaul E. McKenney } 1277ae5c2341SJoel Fernandes (Google) WARN_ON_ONCE(ready_cbs.len); 1278da915ad5SPaul E. McKenney 1279da915ad5SPaul E. McKenney /* 1280da915ad5SPaul E. McKenney * Update counts, accelerate new callbacks, and if needed, 1281da915ad5SPaul E. McKenney * schedule another round of callback invocation. 1282da915ad5SPaul E. McKenney */ 1283d6331980SPaul E. McKenney spin_lock_irq_rcu_node(sdp); 1284ae5c2341SJoel Fernandes (Google) rcu_segcblist_add_len(&sdp->srcu_cblist, -len); 1285da915ad5SPaul E. McKenney (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, 1286aacb5d91SPaul E. McKenney rcu_seq_snap(&ssp->srcu_gp_seq)); 1287da915ad5SPaul E. McKenney sdp->srcu_cblist_invoking = false; 1288da915ad5SPaul E. McKenney more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist); 1289d6331980SPaul E. McKenney spin_unlock_irq_rcu_node(sdp); 1290da915ad5SPaul E. McKenney if (more) 1291da915ad5SPaul E. McKenney srcu_schedule_cbs_sdp(sdp, 0); 1292dad81a20SPaul E. McKenney } 1293dad81a20SPaul E. McKenney 1294dad81a20SPaul E. McKenney /* 1295dad81a20SPaul E. McKenney * Finished one round of SRCU grace period. Start another if there are 1296dad81a20SPaul E. McKenney * more SRCU callbacks queued, otherwise put SRCU into not-running state. 1297dad81a20SPaul E. McKenney */ 1298aacb5d91SPaul E. McKenney static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay) 1299dad81a20SPaul E. McKenney { 1300da915ad5SPaul E. McKenney bool pushgp = true; 1301dad81a20SPaul E. McKenney 1302aacb5d91SPaul E. McKenney spin_lock_irq_rcu_node(ssp); 1303aacb5d91SPaul E. McKenney if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) { 1304aacb5d91SPaul E. McKenney if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq))) { 1305da915ad5SPaul E. McKenney /* All requests fulfilled, time to go idle. */ 1306da915ad5SPaul E. McKenney pushgp = false; 1307dad81a20SPaul E. McKenney } 1308aacb5d91SPaul E. McKenney } else if (!rcu_seq_state(ssp->srcu_gp_seq)) { 1309da915ad5SPaul E. McKenney /* Outstanding request and no GP. Start one. */ 1310aacb5d91SPaul E. McKenney srcu_gp_start(ssp); 1311da915ad5SPaul E. McKenney } 1312aacb5d91SPaul E. McKenney spin_unlock_irq_rcu_node(ssp); 1313dad81a20SPaul E. McKenney 1314da915ad5SPaul E. McKenney if (pushgp) 1315aacb5d91SPaul E. McKenney queue_delayed_work(rcu_gp_wq, &ssp->work, delay); 1316dad81a20SPaul E. McKenney } 1317dad81a20SPaul E. McKenney 1318dad81a20SPaul E. McKenney /* 1319dad81a20SPaul E. McKenney * This is the work-queue function that handles SRCU grace periods. 1320dad81a20SPaul E. McKenney */ 13210d8a1e83SPaul E. McKenney static void process_srcu(struct work_struct *work) 1322dad81a20SPaul E. McKenney { 1323aacb5d91SPaul E. McKenney struct srcu_struct *ssp; 1324dad81a20SPaul E. McKenney 1325aacb5d91SPaul E. McKenney ssp = container_of(work, struct srcu_struct, work.work); 1326dad81a20SPaul E. McKenney 1327aacb5d91SPaul E. McKenney srcu_advance_state(ssp); 1328aacb5d91SPaul E. McKenney srcu_reschedule(ssp, srcu_get_delay(ssp)); 1329dad81a20SPaul E. McKenney } 13307f6733c3SPaul E. McKenney 13317f6733c3SPaul E. McKenney void srcutorture_get_gp_data(enum rcutorture_type test_type, 1332aacb5d91SPaul E. McKenney struct srcu_struct *ssp, int *flags, 1333aebc8264SPaul E. McKenney unsigned long *gp_seq) 13347f6733c3SPaul E. McKenney { 13357f6733c3SPaul E. McKenney if (test_type != SRCU_FLAVOR) 13367f6733c3SPaul E. McKenney return; 13377f6733c3SPaul E. McKenney *flags = 0; 1338aacb5d91SPaul E. McKenney *gp_seq = rcu_seq_current(&ssp->srcu_gp_seq); 13397f6733c3SPaul E. McKenney } 13407f6733c3SPaul E. McKenney EXPORT_SYMBOL_GPL(srcutorture_get_gp_data); 13411f4f6da1SPaul E. McKenney 1342aacb5d91SPaul E. McKenney void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf) 1343115a1a52SPaul E. McKenney { 1344115a1a52SPaul E. McKenney int cpu; 1345115a1a52SPaul E. McKenney int idx; 1346ac3748c6SPaul E. McKenney unsigned long s0 = 0, s1 = 0; 1347115a1a52SPaul E. McKenney 1348aacb5d91SPaul E. McKenney idx = ssp->srcu_idx & 0x1; 134952e17ba1SPaul E. McKenney pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):", 1350aacb5d91SPaul E. McKenney tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), idx); 1351115a1a52SPaul E. McKenney for_each_possible_cpu(cpu) { 1352115a1a52SPaul E. McKenney unsigned long l0, l1; 1353115a1a52SPaul E. McKenney unsigned long u0, u1; 1354115a1a52SPaul E. McKenney long c0, c1; 13555ab07a8dSPaul E. McKenney struct srcu_data *sdp; 1356115a1a52SPaul E. McKenney 1357aacb5d91SPaul E. McKenney sdp = per_cpu_ptr(ssp->sda, cpu); 1358b68c6146SPaul E. McKenney u0 = data_race(sdp->srcu_unlock_count[!idx]); 1359b68c6146SPaul E. McKenney u1 = data_race(sdp->srcu_unlock_count[idx]); 1360115a1a52SPaul E. McKenney 1361115a1a52SPaul E. McKenney /* 1362115a1a52SPaul E. McKenney * Make sure that a lock is always counted if the corresponding 1363115a1a52SPaul E. McKenney * unlock is counted. 1364115a1a52SPaul E. McKenney */ 1365115a1a52SPaul E. McKenney smp_rmb(); 1366115a1a52SPaul E. McKenney 1367b68c6146SPaul E. McKenney l0 = data_race(sdp->srcu_lock_count[!idx]); 1368b68c6146SPaul E. McKenney l1 = data_race(sdp->srcu_lock_count[idx]); 1369115a1a52SPaul E. McKenney 1370115a1a52SPaul E. McKenney c0 = l0 - u0; 1371115a1a52SPaul E. McKenney c1 = l1 - u1; 13727e210a65SPaul E. McKenney pr_cont(" %d(%ld,%ld %c)", 13737e210a65SPaul E. McKenney cpu, c0, c1, 13747e210a65SPaul E. McKenney "C."[rcu_segcblist_empty(&sdp->srcu_cblist)]); 1375ac3748c6SPaul E. McKenney s0 += c0; 1376ac3748c6SPaul E. McKenney s1 += c1; 1377115a1a52SPaul E. McKenney } 1378ac3748c6SPaul E. McKenney pr_cont(" T(%ld,%ld)\n", s0, s1); 1379115a1a52SPaul E. McKenney } 1380115a1a52SPaul E. McKenney EXPORT_SYMBOL_GPL(srcu_torture_stats_print); 1381115a1a52SPaul E. McKenney 13821f4f6da1SPaul E. McKenney static int __init srcu_bootup_announce(void) 13831f4f6da1SPaul E. McKenney { 13841f4f6da1SPaul E. McKenney pr_info("Hierarchical SRCU implementation.\n"); 13850c8e0e3cSPaul E. McKenney if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF) 13860c8e0e3cSPaul E. McKenney pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff); 13871f4f6da1SPaul E. McKenney return 0; 13881f4f6da1SPaul E. McKenney } 13891f4f6da1SPaul E. McKenney early_initcall(srcu_bootup_announce); 1390e0fcba9aSPaul E. McKenney 1391e0fcba9aSPaul E. McKenney void __init srcu_init(void) 1392e0fcba9aSPaul E. McKenney { 1393aacb5d91SPaul E. McKenney struct srcu_struct *ssp; 1394e0fcba9aSPaul E. McKenney 13958e9c01c7SFrederic Weisbecker /* 13968e9c01c7SFrederic Weisbecker * Once that is set, call_srcu() can follow the normal path and 13978e9c01c7SFrederic Weisbecker * queue delayed work. This must follow RCU workqueues creation 13988e9c01c7SFrederic Weisbecker * and timers initialization. 13998e9c01c7SFrederic Weisbecker */ 1400e0fcba9aSPaul E. McKenney srcu_init_done = true; 1401e0fcba9aSPaul E. McKenney while (!list_empty(&srcu_boot_list)) { 1402aacb5d91SPaul E. McKenney ssp = list_first_entry(&srcu_boot_list, struct srcu_struct, 14034e6ea4efSPaul E. McKenney work.work.entry); 1404aacb5d91SPaul E. McKenney list_del_init(&ssp->work.work.entry); 1405aacb5d91SPaul E. McKenney queue_work(rcu_gp_wq, &ssp->work.work); 1406e0fcba9aSPaul E. McKenney } 1407e0fcba9aSPaul E. McKenney } 1408fe15b50cSPaul E. McKenney 1409fe15b50cSPaul E. McKenney #ifdef CONFIG_MODULES 1410fe15b50cSPaul E. McKenney 1411fe15b50cSPaul E. McKenney /* Initialize any global-scope srcu_struct structures used by this module. */ 1412fe15b50cSPaul E. McKenney static int srcu_module_coming(struct module *mod) 1413fe15b50cSPaul E. McKenney { 1414fe15b50cSPaul E. McKenney int i; 1415fe15b50cSPaul E. McKenney struct srcu_struct **sspp = mod->srcu_struct_ptrs; 1416fe15b50cSPaul E. McKenney int ret; 1417fe15b50cSPaul E. McKenney 1418fe15b50cSPaul E. McKenney for (i = 0; i < mod->num_srcu_structs; i++) { 1419fe15b50cSPaul E. McKenney ret = init_srcu_struct(*(sspp++)); 1420fe15b50cSPaul E. McKenney if (WARN_ON_ONCE(ret)) 1421fe15b50cSPaul E. McKenney return ret; 1422fe15b50cSPaul E. McKenney } 1423fe15b50cSPaul E. McKenney return 0; 1424fe15b50cSPaul E. McKenney } 1425fe15b50cSPaul E. McKenney 1426fe15b50cSPaul E. McKenney /* Clean up any global-scope srcu_struct structures used by this module. */ 1427fe15b50cSPaul E. McKenney static void srcu_module_going(struct module *mod) 1428fe15b50cSPaul E. McKenney { 1429fe15b50cSPaul E. McKenney int i; 1430fe15b50cSPaul E. McKenney struct srcu_struct **sspp = mod->srcu_struct_ptrs; 1431fe15b50cSPaul E. McKenney 1432fe15b50cSPaul E. McKenney for (i = 0; i < mod->num_srcu_structs; i++) 1433fe15b50cSPaul E. McKenney cleanup_srcu_struct(*(sspp++)); 1434fe15b50cSPaul E. McKenney } 1435fe15b50cSPaul E. McKenney 1436fe15b50cSPaul E. McKenney /* Handle one module, either coming or going. */ 1437fe15b50cSPaul E. McKenney static int srcu_module_notify(struct notifier_block *self, 1438fe15b50cSPaul E. McKenney unsigned long val, void *data) 1439fe15b50cSPaul E. McKenney { 1440fe15b50cSPaul E. McKenney struct module *mod = data; 1441fe15b50cSPaul E. McKenney int ret = 0; 1442fe15b50cSPaul E. McKenney 1443fe15b50cSPaul E. McKenney switch (val) { 1444fe15b50cSPaul E. McKenney case MODULE_STATE_COMING: 1445fe15b50cSPaul E. McKenney ret = srcu_module_coming(mod); 1446fe15b50cSPaul E. McKenney break; 1447fe15b50cSPaul E. McKenney case MODULE_STATE_GOING: 1448fe15b50cSPaul E. McKenney srcu_module_going(mod); 1449fe15b50cSPaul E. McKenney break; 1450fe15b50cSPaul E. McKenney default: 1451fe15b50cSPaul E. McKenney break; 1452fe15b50cSPaul E. McKenney } 1453fe15b50cSPaul E. McKenney return ret; 1454fe15b50cSPaul E. McKenney } 1455fe15b50cSPaul E. McKenney 1456fe15b50cSPaul E. McKenney static struct notifier_block srcu_module_nb = { 1457fe15b50cSPaul E. McKenney .notifier_call = srcu_module_notify, 1458fe15b50cSPaul E. McKenney .priority = 0, 1459fe15b50cSPaul E. McKenney }; 1460fe15b50cSPaul E. McKenney 1461fe15b50cSPaul E. McKenney static __init int init_srcu_module_notifier(void) 1462fe15b50cSPaul E. McKenney { 1463fe15b50cSPaul E. McKenney int ret; 1464fe15b50cSPaul E. McKenney 1465fe15b50cSPaul E. McKenney ret = register_module_notifier(&srcu_module_nb); 1466fe15b50cSPaul E. McKenney if (ret) 1467fe15b50cSPaul E. McKenney pr_warn("Failed to register srcu module notifier\n"); 1468fe15b50cSPaul E. McKenney return ret; 1469fe15b50cSPaul E. McKenney } 1470fe15b50cSPaul E. McKenney late_initcall(init_srcu_module_notifier); 1471fe15b50cSPaul E. McKenney 1472fe15b50cSPaul E. McKenney #endif /* #ifdef CONFIG_MODULES */ 1473