122e40925SPaul E. McKenney /* SPDX-License-Identifier: GPL-2.0+ */ 24102adabSPaul E. McKenney /* 34102adabSPaul E. McKenney * Read-Copy Update mechanism for mutual exclusion (tree-based version) 44102adabSPaul E. McKenney * Internal non-public definitions. 54102adabSPaul E. McKenney * 64102adabSPaul E. McKenney * Copyright IBM Corporation, 2008 74102adabSPaul E. McKenney * 84102adabSPaul E. McKenney * Author: Ingo Molnar <mingo@elte.hu> 922e40925SPaul E. McKenney * Paul E. McKenney <paulmck@linux.ibm.com> 104102adabSPaul E. McKenney */ 114102adabSPaul E. McKenney 124102adabSPaul E. McKenney #include <linux/cache.h> 139621fbeeSKalesh Singh #include <linux/kthread.h> 144102adabSPaul E. McKenney #include <linux/spinlock.h> 15037741a6SIngo Molnar #include <linux/rtmutex.h> 164102adabSPaul E. McKenney #include <linux/threads.h> 174102adabSPaul E. McKenney #include <linux/cpumask.h> 184102adabSPaul E. McKenney #include <linux/seqlock.h> 19abedf8e2SPaul Gortmaker #include <linux/swait.h> 20f2425b4eSPaul E. McKenney #include <linux/rcu_node_tree.h> 214102adabSPaul E. McKenney 2245753c5fSIngo Molnar #include "rcu_segcblist.h" 2345753c5fSIngo Molnar 2423da2ad6SFrederic Weisbecker /* Communicate arguments to a kthread worker handler. */ 2525f3d7efSPaul E. McKenney struct rcu_exp_work { 2625f3d7efSPaul E. McKenney unsigned long rew_s; 279621fbeeSKalesh Singh struct kthread_work rew_work; 2825f3d7efSPaul E. McKenney }; 2925f3d7efSPaul E. McKenney 304102adabSPaul E. McKenney /* RCU's kthread states for tracing. */ 314102adabSPaul E. McKenney #define RCU_KTHREAD_STOPPED 0 324102adabSPaul E. McKenney #define RCU_KTHREAD_RUNNING 1 334102adabSPaul E. McKenney #define RCU_KTHREAD_WAITING 2 344102adabSPaul E. McKenney #define RCU_KTHREAD_OFFCPU 3 354102adabSPaul E. McKenney #define RCU_KTHREAD_YIELDING 4 364102adabSPaul E. McKenney #define RCU_KTHREAD_MAX 4 374102adabSPaul E. McKenney 384102adabSPaul E. McKenney /* 394102adabSPaul E. McKenney * Definition for node within the RCU grace-period-detection hierarchy. 404102adabSPaul E. McKenney */ 414102adabSPaul E. McKenney struct rcu_node { 4267c583a7SBoqun Feng raw_spinlock_t __private lock; /* Root rcu_node's lock protects */ 4367c583a7SBoqun Feng /* some rcu_state fields as well as */ 4467c583a7SBoqun Feng /* following. */ 45360fbbb4SLihao Liang unsigned long gp_seq; /* Track rsp->gp_seq. */ 46adbccddbSJoel Fernandes (Google) unsigned long gp_seq_needed; /* Track furthest future GP request. */ 474bc8d555SPaul E. McKenney unsigned long completedqs; /* All QSes done for this node. */ 484102adabSPaul E. McKenney unsigned long qsmask; /* CPUs or groups that need to switch in */ 494102adabSPaul E. McKenney /* order for current grace period to proceed.*/ 504102adabSPaul E. McKenney /* In leaf rcu_node, each bit corresponds to */ 514102adabSPaul E. McKenney /* an rcu_data structure, otherwise, each */ 524102adabSPaul E. McKenney /* bit corresponds to a child rcu_node */ 534102adabSPaul E. McKenney /* structure. */ 54f2e2df59SPaul E. McKenney unsigned long rcu_gp_init_mask; /* Mask of offline CPUs at GP init. */ 554102adabSPaul E. McKenney unsigned long qsmaskinit; 56b9585e94SPaul E. McKenney /* Per-GP initial value for qsmask. */ 570aa04b05SPaul E. McKenney /* Initialized from ->qsmaskinitnext at the */ 580aa04b05SPaul E. McKenney /* beginning of each grace period. */ 590aa04b05SPaul E. McKenney unsigned long qsmaskinitnext; 60b9585e94SPaul E. McKenney unsigned long expmask; /* CPUs or groups that need to check in */ 61b9585e94SPaul E. McKenney /* to allow the current expedited GP */ 62b9585e94SPaul E. McKenney /* to complete. */ 63b9585e94SPaul E. McKenney unsigned long expmaskinit; 64b9585e94SPaul E. McKenney /* Per-GP initial values for expmask. */ 65b9585e94SPaul E. McKenney /* Initialized from ->expmaskinitnext at the */ 66b9585e94SPaul E. McKenney /* beginning of each expedited GP. */ 67b9585e94SPaul E. McKenney unsigned long expmaskinitnext; 68b9585e94SPaul E. McKenney /* Online CPUs for next expedited GP. */ 691de6e56dSPaul E. McKenney /* Any CPU that has ever been online will */ 701de6e56dSPaul E. McKenney /* have its bit set. */ 718e5e6215SFrederic Weisbecker struct kthread_worker *exp_kworker; 728e5e6215SFrederic Weisbecker /* Workers performing per node expedited GP */ 738e5e6215SFrederic Weisbecker /* initialization. */ 74b2b00ddfSPaul E. McKenney unsigned long cbovldmask; 75b2b00ddfSPaul E. McKenney /* CPUs experiencing callback overload. */ 769b9500daSPaul E. McKenney unsigned long ffmask; /* Fully functional CPUs. */ 774102adabSPaul E. McKenney unsigned long grpmask; /* Mask to apply to parent qsmask. */ 784102adabSPaul E. McKenney /* Only one bit will be set in this mask. */ 79a2dae430SWei Yang int grplo; /* lowest-numbered CPU here. */ 80a2dae430SWei Yang int grphi; /* highest-numbered CPU here. */ 817a0c2b09SWei Yang u8 grpnum; /* group number for next level up. */ 824102adabSPaul E. McKenney u8 level; /* root is at level 0. */ 830aa04b05SPaul E. McKenney bool wait_blkd_tasks;/* Necessary to wait for blocked tasks to */ 840aa04b05SPaul E. McKenney /* exit RCU read-side critical sections */ 850aa04b05SPaul E. McKenney /* before propagating offline up the */ 860aa04b05SPaul E. McKenney /* rcu_node tree? */ 874102adabSPaul E. McKenney struct rcu_node *parent; 884102adabSPaul E. McKenney struct list_head blkd_tasks; 894102adabSPaul E. McKenney /* Tasks blocked in RCU read-side critical */ 904102adabSPaul E. McKenney /* section. Tasks are placed at the head */ 914102adabSPaul E. McKenney /* of this list and age towards the tail. */ 924102adabSPaul E. McKenney struct list_head *gp_tasks; 934102adabSPaul E. McKenney /* Pointer to the first task blocking the */ 944102adabSPaul E. McKenney /* current grace period, or NULL if there */ 954102adabSPaul E. McKenney /* is no such task. */ 964102adabSPaul E. McKenney struct list_head *exp_tasks; 974102adabSPaul E. McKenney /* Pointer to the first task blocking the */ 984102adabSPaul E. McKenney /* current expedited grace period, or NULL */ 994102adabSPaul E. McKenney /* if there is no such task. If there */ 1004102adabSPaul E. McKenney /* is no current expedited grace period, */ 1014102adabSPaul E. McKenney /* then there can cannot be any such task. */ 1024102adabSPaul E. McKenney struct list_head *boost_tasks; 1034102adabSPaul E. McKenney /* Pointer to first task that needs to be */ 1044102adabSPaul E. McKenney /* priority boosted, or NULL if no priority */ 1054102adabSPaul E. McKenney /* boosting is needed for this rcu_node */ 1064102adabSPaul E. McKenney /* structure. If there are no tasks */ 1074102adabSPaul E. McKenney /* queued on this rcu_node structure that */ 1084102adabSPaul E. McKenney /* are blocking the current grace period, */ 1094102adabSPaul E. McKenney /* there can be no such task. */ 110abaa93d9SPaul E. McKenney struct rt_mutex boost_mtx; 111abaa93d9SPaul E. McKenney /* Used only for the priority-boosting */ 112abaa93d9SPaul E. McKenney /* side effect, not as a lock. */ 1134102adabSPaul E. McKenney unsigned long boost_time; 1144102adabSPaul E. McKenney /* When to start boosting (jiffies). */ 1157836b270SFrederic Weisbecker struct mutex kthread_mutex; 116218b957aSDavid Woodhouse /* Exclusion for thread spawning and affinity */ 117218b957aSDavid Woodhouse /* manipulation. */ 1184102adabSPaul E. McKenney struct task_struct *boost_kthread_task; 1194102adabSPaul E. McKenney /* kthread that takes care of priority */ 1204102adabSPaul E. McKenney /* boosting for this rcu_node structure. */ 1214102adabSPaul E. McKenney unsigned int boost_kthread_status; 1224102adabSPaul E. McKenney /* State of boost_kthread_task for tracing. */ 123396eba65SPaul E. McKenney unsigned long n_boosts; /* Number of boosts for this rcu_node structure. */ 1244102adabSPaul E. McKenney #ifdef CONFIG_RCU_NOCB_CPU 125abedf8e2SPaul Gortmaker struct swait_queue_head nocb_gp_wq[2]; 1264102adabSPaul E. McKenney /* Place for rcu_nocb_kthread() to wait GP. */ 1274102adabSPaul E. McKenney #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ 1284102adabSPaul E. McKenney raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp; 129385b73c0SPaul E. McKenney 130f6a12f34SPaul E. McKenney spinlock_t exp_lock ____cacheline_internodealigned_in_smp; 131f6a12f34SPaul E. McKenney unsigned long exp_seq_rq; 1323b5f668eSPaul E. McKenney wait_queue_head_t exp_wq[4]; 13325f3d7efSPaul E. McKenney struct rcu_exp_work rew; 13425f3d7efSPaul E. McKenney bool exp_need_flush; /* Need to flush workitem? */ 135d96c52feSPaul E. McKenney raw_spinlock_t exp_poll_lock; 136d96c52feSPaul E. McKenney /* Lock and data for polled expedited grace periods. */ 137d96c52feSPaul E. McKenney unsigned long exp_seq_poll_rq; 138d96c52feSPaul E. McKenney struct work_struct exp_poll_wq; 1394102adabSPaul E. McKenney } ____cacheline_internodealigned_in_smp; 1404102adabSPaul E. McKenney 1414102adabSPaul E. McKenney /* 142bc75e999SMark Rutland * Bitmasks in an rcu_node cover the interval [grplo, grphi] of CPU IDs, and 143bc75e999SMark Rutland * are indexed relative to this interval rather than the global CPU ID space. 144bc75e999SMark Rutland * This generates the bit for a CPU in node-local masks. 145bc75e999SMark Rutland */ 146df63fa5bSPaul E. McKenney #define leaf_node_cpu_bit(rnp, cpu) (BIT((cpu) - (rnp)->grplo)) 147bc75e999SMark Rutland 148bc75e999SMark Rutland /* 1495b74c458SPaul E. McKenney * Union to allow "aggregate OR" operation on the need for a quiescent 1505b74c458SPaul E. McKenney * state by the normal and expedited grace periods. 1515b74c458SPaul E. McKenney */ 1525b74c458SPaul E. McKenney union rcu_noqs { 1535b74c458SPaul E. McKenney struct { 1545b74c458SPaul E. McKenney u8 norm; 1555b74c458SPaul E. McKenney u8 exp; 1565b74c458SPaul E. McKenney } b; /* Bits. */ 1575b74c458SPaul E. McKenney u16 s; /* Set of bits, aggregate OR here. */ 1585b74c458SPaul E. McKenney }; 1595b74c458SPaul E. McKenney 160be42f00bSZhen Lei /* 161be42f00bSZhen Lei * Record the snapshot of the core stats at half of the first RCU stall timeout. 162be42f00bSZhen Lei * The member gp_seq is used to ensure that all members are updated only once 163be42f00bSZhen Lei * during the sampling period. The snapshot is taken only if this gp_seq is not 164be42f00bSZhen Lei * equal to rdp->gp_seq. 165be42f00bSZhen Lei */ 166be42f00bSZhen Lei struct rcu_snap_record { 167be42f00bSZhen Lei unsigned long gp_seq; /* Track rdp->gp_seq counter */ 168be42f00bSZhen Lei u64 cputime_irq; /* Accumulated cputime of hard irqs */ 169be42f00bSZhen Lei u64 cputime_softirq;/* Accumulated cputime of soft irqs */ 170be42f00bSZhen Lei u64 cputime_system; /* Accumulated cputime of kernel tasks */ 171be42f00bSZhen Lei unsigned long nr_hardirqs; /* Accumulated number of hard irqs */ 172be42f00bSZhen Lei unsigned int nr_softirqs; /* Accumulated number of soft irqs */ 173be42f00bSZhen Lei unsigned long long nr_csw; /* Accumulated number of task switches */ 174be42f00bSZhen Lei unsigned long jiffies; /* Track jiffies value */ 175be42f00bSZhen Lei }; 176be42f00bSZhen Lei 1774102adabSPaul E. McKenney /* Per-CPU data for read-copy update. */ 1784102adabSPaul E. McKenney struct rcu_data { 1794102adabSPaul E. McKenney /* 1) quiescent-state and grace-period handling : */ 180360fbbb4SLihao Liang unsigned long gp_seq; /* Track rsp->gp_seq counter. */ 181adbccddbSJoel Fernandes (Google) unsigned long gp_seq_needed; /* Track furthest future GP request. */ 1825b74c458SPaul E. McKenney union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */ 183a616aec9SIngo Molnar bool core_needs_qs; /* Core waits for quiescent state. */ 1844102adabSPaul E. McKenney bool beenonline; /* CPU online at least once. */ 185ff3bb6f4SPaul E. McKenney bool gpwrap; /* Possible ->gp_seq wrap. */ 186c0f97f20SPaul E. McKenney bool cpu_started; /* RCU watching this onlining CPU. */ 1874102adabSPaul E. McKenney struct rcu_node *mynode; /* This CPU's leaf of hierarchy */ 1884102adabSPaul E. McKenney unsigned long grpmask; /* Mask to apply to leaf qsmask. */ 1894102adabSPaul E. McKenney unsigned long ticks_this_gp; /* The number of scheduling-clock */ 1904102adabSPaul E. McKenney /* ticks this CPU has handled */ 1914102adabSPaul E. McKenney /* during and after the last grace */ 1924102adabSPaul E. McKenney /* period it is aware of. */ 1930864f057SPaul E. McKenney struct irq_work defer_qs_iw; /* Obtain later scheduler attention. */ 1940864f057SPaul E. McKenney bool defer_qs_iw_pending; /* Scheduler attention pending? */ 195a657f261SPaul E. McKenney struct work_struct strict_work; /* Schedule readers for strict GPs. */ 1964102adabSPaul E. McKenney 1974102adabSPaul E. McKenney /* 2) batch handling */ 19815fecf89SPaul E. McKenney struct rcu_segcblist cblist; /* Segmented callback list, with */ 19915fecf89SPaul E. McKenney /* different callbacks waiting for */ 20015fecf89SPaul E. McKenney /* different grace periods. */ 2014102adabSPaul E. McKenney long qlen_last_fqs_check; 2024102adabSPaul E. McKenney /* qlen at last check for QS forcing */ 203e816d56fSPaul E. McKenney unsigned long n_cbs_invoked; /* # callbacks invoked since boot. */ 2044102adabSPaul E. McKenney unsigned long n_force_qs_snap; 2054102adabSPaul E. McKenney /* did other CPU force QS recently? */ 2064102adabSPaul E. McKenney long blimit; /* Upper limit on a processed batch */ 2074102adabSPaul E. McKenney 2084102adabSPaul E. McKenney /* 3) dynticks interface. */ 2094102adabSPaul E. McKenney int dynticks_snap; /* Per-GP tracking for dynticks. */ 210dc5a4f29SPaul E. McKenney bool rcu_need_heavy_qs; /* GP old, so heavy quiescent state! */ 211cc72046cSPaul E. McKenney bool rcu_urgent_qs; /* GP old need light quiescent state. */ 21266e4c33bSPaul E. McKenney bool rcu_forced_tick; /* Forced tick to provide QS. */ 213df1e849aSPaul E. McKenney bool rcu_forced_tick_exp; /* ... provide QS to expedited GP. */ 2144102adabSPaul E. McKenney 2158d8a9d0eSPaul E. McKenney /* 4) rcu_barrier(), OOM callbacks, and expediting. */ 216a16578ddSPaul E. McKenney unsigned long barrier_seq_snap; /* Snap of rcu_state.barrier_sequence. */ 2174102adabSPaul E. McKenney struct rcu_head barrier_head; 2180742ac3eSPaul E. McKenney int exp_dynticks_snap; /* Double-check need for IPI. */ 2194102adabSPaul E. McKenney 2208d8a9d0eSPaul E. McKenney /* 5) Callback offloading. */ 2214102adabSPaul E. McKenney #ifdef CONFIG_RCU_NOCB_CPU 22212f54c3aSPaul E. McKenney struct swait_queue_head nocb_cb_wq; /* For nocb kthreads to sleep on. */ 223d97b0781SFrederic Weisbecker struct swait_queue_head nocb_state_wq; /* For offloading state changes */ 22412f54c3aSPaul E. McKenney struct task_struct *nocb_gp_kthread; 2258be6e1b1SPaul E. McKenney raw_spinlock_t nocb_lock; /* Guard following pair of fields. */ 2269fdd3bc9SPaul E. McKenney int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */ 2278be6e1b1SPaul E. McKenney struct timer_list nocb_timer; /* Enforce finite deferral. */ 228d1b222c6SPaul E. McKenney unsigned long nocb_gp_adv_time; /* Last call_rcu() CB adv (jiffies). */ 22902e30241SNeeraj Upadhyay struct mutex nocb_gp_kthread_mutex; /* Exclusion for nocb gp kthread */ 23002e30241SNeeraj Upadhyay /* spawning */ 231d1b222c6SPaul E. McKenney 232d1b222c6SPaul E. McKenney /* The following fields are used by call_rcu, hence own cacheline. */ 233d1b222c6SPaul E. McKenney raw_spinlock_t nocb_bypass_lock ____cacheline_internodealigned_in_smp; 234d1b222c6SPaul E. McKenney struct rcu_cblist nocb_bypass; /* Lock-contention-bypass CB list. */ 235d1b222c6SPaul E. McKenney unsigned long nocb_bypass_first; /* Time (jiffies) of first enqueue. */ 236d1b222c6SPaul E. McKenney unsigned long nocb_nobypass_last; /* Last ->cblist enqueue (jiffies). */ 237d1b222c6SPaul E. McKenney int nocb_nobypass_count; /* # ->cblist enqueues at ^^^ time. */ 238fbce7497SPaul E. McKenney 2396484fe54SPaul E. McKenney /* The following fields are used by GP kthread, hence own cacheline. */ 2404fd8c5f1SPaul E. McKenney raw_spinlock_t nocb_gp_lock ____cacheline_internodealigned_in_smp; 241f7a81b12SPaul E. McKenney u8 nocb_gp_sleep; /* Is the nocb GP thread asleep? */ 242f7a81b12SPaul E. McKenney u8 nocb_gp_bypass; /* Found a bypass on last scan? */ 243f7a81b12SPaul E. McKenney u8 nocb_gp_gp; /* GP to wait for on last scan? */ 244f7a81b12SPaul E. McKenney unsigned long nocb_gp_seq; /* If so, ->gp_seq to wait for. */ 245f7a81b12SPaul E. McKenney unsigned long nocb_gp_loops; /* # passes through wait code. */ 24612f54c3aSPaul E. McKenney struct swait_queue_head nocb_gp_wq; /* For nocb kthreads to sleep on. */ 2475d6742b3SPaul E. McKenney bool nocb_cb_sleep; /* Is the nocb CB thread asleep? */ 24812f54c3aSPaul E. McKenney struct task_struct *nocb_cb_kthread; 2492ebc45c4SFrederic Weisbecker struct list_head nocb_head_rdp; /* 2502ebc45c4SFrederic Weisbecker * Head of rcu_data list in wakeup chain, 2512ebc45c4SFrederic Weisbecker * if rdp_gp. 2522ebc45c4SFrederic Weisbecker */ 2532ebc45c4SFrederic Weisbecker struct list_head nocb_entry_rdp; /* rcu_data node in wakeup chain. */ 2541598f4a4SFrederic Weisbecker struct rcu_data *nocb_toggling_rdp; /* rdp queued for (de-)offloading */ 255fbce7497SPaul E. McKenney 256d1b222c6SPaul E. McKenney /* The following fields are used by CB kthread, hence new cacheline. */ 25758bf6f77SPaul E. McKenney struct rcu_data *nocb_gp_rdp ____cacheline_internodealigned_in_smp; 2586484fe54SPaul E. McKenney /* GP rdp takes GP-end wakeups. */ 2594102adabSPaul E. McKenney #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ 2604102adabSPaul E. McKenney 26137f62d7cSPaul E. McKenney /* 6) RCU priority boosting. */ 26237f62d7cSPaul E. McKenney struct task_struct *rcu_cpu_kthread_task; 26337f62d7cSPaul E. McKenney /* rcuc per-CPU kthread or NULL. */ 2646ffdde28SPaul E. McKenney unsigned int rcu_cpu_kthread_status; 265f7e972eeSPaul E. McKenney char rcu_cpu_has_work; 266c9515875SZqiang unsigned long rcuc_activity; 26737f62d7cSPaul E. McKenney 26837f62d7cSPaul E. McKenney /* 7) Diagnostic data, including RCU CPU stall warnings. */ 2694102adabSPaul E. McKenney unsigned int softirq_snap; /* Snapshot of softirq activity. */ 2709b9500daSPaul E. McKenney /* ->rcu_iw* fields protected by leaf rcu_node ->lock. */ 2719b9500daSPaul E. McKenney struct irq_work rcu_iw; /* Check for non-irq activity. */ 2729b9500daSPaul E. McKenney bool rcu_iw_pending; /* Is ->rcu_iw pending? */ 2738aa670cdSPaul E. McKenney unsigned long rcu_iw_gp_seq; /* ->gp_seq associated with ->rcu_iw. */ 27457738942SPaul E. McKenney unsigned long rcu_ofl_gp_seq; /* ->gp_seq at last offline. */ 275ae2b217aSPaul E. McKenney short rcu_ofl_gp_state; /* ->gp_state at last offline. */ 27657738942SPaul E. McKenney unsigned long rcu_onl_gp_seq; /* ->gp_seq at last online. */ 277ae2b217aSPaul E. McKenney short rcu_onl_gp_state; /* ->gp_state at last online. */ 278d3052109SPaul E. McKenney unsigned long last_fqs_resched; /* Time of last rcu_resched(). */ 279c708b08cSPaul E. McKenney unsigned long last_sched_clock; /* Jiffies of last rcu_sched_clock_irq(). */ 280be42f00bSZhen Lei struct rcu_snap_record snap_record; /* Snapshot of core stats at half of */ 281be42f00bSZhen Lei /* the first RCU stall timeout */ 2824102adabSPaul E. McKenney 2833cb278e7SJoel Fernandes (Google) long lazy_len; /* Length of buffered lazy callbacks. */ 2844102adabSPaul E. McKenney int cpu; 2854102adabSPaul E. McKenney }; 2864102adabSPaul E. McKenney 2879fdd3bc9SPaul E. McKenney /* Values for nocb_defer_wakeup field in struct rcu_data. */ 288511324e4SPaul E. McKenney #define RCU_NOCB_WAKE_NOT 0 289e75bcd48SFrederic Weisbecker #define RCU_NOCB_WAKE_BYPASS 1 2903cb278e7SJoel Fernandes (Google) #define RCU_NOCB_WAKE_LAZY 2 2913cb278e7SJoel Fernandes (Google) #define RCU_NOCB_WAKE 3 2923cb278e7SJoel Fernandes (Google) #define RCU_NOCB_WAKE_FORCE 4 2939fdd3bc9SPaul E. McKenney 2944102adabSPaul E. McKenney #define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500)) 2954102adabSPaul E. McKenney /* For jiffies_till_first_fqs and */ 2964102adabSPaul E. McKenney /* and jiffies_till_next_fqs. */ 2974102adabSPaul E. McKenney 2984102adabSPaul E. McKenney #define RCU_JIFFIES_FQS_DIV 256 /* Very large systems need more */ 2994102adabSPaul E. McKenney /* delay between bouts of */ 3004102adabSPaul E. McKenney /* quiescent-state forcing. */ 3014102adabSPaul E. McKenney 3024102adabSPaul E. McKenney #define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time to take */ 3034102adabSPaul E. McKenney /* at least one scheduling clock */ 3044102adabSPaul E. McKenney /* irq before ratting on them. */ 3054102adabSPaul E. McKenney 3064102adabSPaul E. McKenney #define rcu_wait(cond) \ 3074102adabSPaul E. McKenney do { \ 3084102adabSPaul E. McKenney for (;;) { \ 3094102adabSPaul E. McKenney set_current_state(TASK_INTERRUPTIBLE); \ 3104102adabSPaul E. McKenney if (cond) \ 3114102adabSPaul E. McKenney break; \ 3124102adabSPaul E. McKenney schedule(); \ 3134102adabSPaul E. McKenney } \ 3144102adabSPaul E. McKenney __set_current_state(TASK_RUNNING); \ 3154102adabSPaul E. McKenney } while (0) 3164102adabSPaul E. McKenney 3174102adabSPaul E. McKenney /* 318462df2f5SUladzislau Rezki (Sony) * A max threshold for synchronize_rcu() users which are 319462df2f5SUladzislau Rezki (Sony) * awaken directly by the rcu_gp_kthread(). Left part is 320462df2f5SUladzislau Rezki (Sony) * deferred to the main worker. 321462df2f5SUladzislau Rezki (Sony) */ 322462df2f5SUladzislau Rezki (Sony) #define SR_MAX_USERS_WAKE_FROM_GP 5 323dfd458a9SUladzislau Rezki (Sony) #define SR_NORMAL_GP_WAIT_HEAD_MAX 5 324dfd458a9SUladzislau Rezki (Sony) 325dfd458a9SUladzislau Rezki (Sony) struct sr_wait_node { 326dfd458a9SUladzislau Rezki (Sony) atomic_t inuse; 327dfd458a9SUladzislau Rezki (Sony) struct llist_node node; 328dfd458a9SUladzislau Rezki (Sony) }; 329dfd458a9SUladzislau Rezki (Sony) 3304102adabSPaul E. McKenney /* 3314102adabSPaul E. McKenney * RCU global state, including node hierarchy. This hierarchy is 3324102adabSPaul E. McKenney * represented in "heap" form in a dense array. The root (first level) 3334102adabSPaul E. McKenney * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second 3344102adabSPaul E. McKenney * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]), 3354102adabSPaul E. McKenney * and the third level in ->node[m+1] and following (->node[m+1] referenced 3364102adabSPaul E. McKenney * by ->level[2]). The number of levels is determined by the number of 3374102adabSPaul E. McKenney * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy" 3384102adabSPaul E. McKenney * consisting of a single rcu_node. 3394102adabSPaul E. McKenney */ 3404102adabSPaul E. McKenney struct rcu_state { 3414102adabSPaul E. McKenney struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */ 342032dfc87SAlexander Gordeev struct rcu_node *level[RCU_NUM_LVLS + 1]; 343032dfc87SAlexander Gordeev /* Hierarchy levels (+1 to */ 344032dfc87SAlexander Gordeev /* shut bogus gcc warning) */ 345b9585e94SPaul E. McKenney int ncpus; /* # CPUs seen so far. */ 346ed73860cSNeeraj Upadhyay int n_online_cpus; /* # CPUs online for RCU. */ 3474102adabSPaul E. McKenney 3484102adabSPaul E. McKenney /* The following fields are guarded by the root rcu_node's lock. */ 3494102adabSPaul E. McKenney 350eae9f147SNeeraj Upadhyay unsigned long gp_seq ____cacheline_internodealigned_in_smp; 351eae9f147SNeeraj Upadhyay /* Grace-period sequence #. */ 35200943a60SWei Yang unsigned long gp_max; /* Maximum GP duration in */ 35300943a60SWei Yang /* jiffies. */ 3544102adabSPaul E. McKenney struct task_struct *gp_kthread; /* Task for grace periods. */ 355abedf8e2SPaul Gortmaker struct swait_queue_head gp_wq; /* Where GP task waits. */ 356afea227fSPaul E. McKenney short gp_flags; /* Commands for GP task. */ 357afea227fSPaul E. McKenney short gp_state; /* GP kthread sleep state. */ 358fd897573SPaul E. McKenney unsigned long gp_wake_time; /* Last GP kthread wake. */ 359fd897573SPaul E. McKenney unsigned long gp_wake_seq; /* ->gp_seq at ^^^. */ 360bf95b2bcSPaul E. McKenney unsigned long gp_seq_polled; /* GP seq for polled API. */ 361bf95b2bcSPaul E. McKenney unsigned long gp_seq_polled_snap; /* ->gp_seq_polled at normal GP start. */ 362dd041405SPaul E. McKenney unsigned long gp_seq_polled_exp_snap; /* ->gp_seq_polled at expedited GP start. */ 3634102adabSPaul E. McKenney 3644102adabSPaul E. McKenney /* End of fields guarded by root rcu_node's lock. */ 3654102adabSPaul E. McKenney 3664102adabSPaul E. McKenney struct mutex barrier_mutex; /* Guards barrier fields. */ 3674102adabSPaul E. McKenney atomic_t barrier_cpu_count; /* # CPUs waiting on. */ 3684102adabSPaul E. McKenney struct completion barrier_completion; /* Wake at barrier end. */ 3694f525a52SPaul E. McKenney unsigned long barrier_sequence; /* ++ at start and end of */ 370dd46a788SPaul E. McKenney /* rcu_barrier(). */ 3714102adabSPaul E. McKenney /* End of fields guarded by barrier_mutex. */ 3724102adabSPaul E. McKenney 37380b3fd47SPaul E. McKenney raw_spinlock_t barrier_lock; /* Protects ->barrier_seq_snap. */ 37480b3fd47SPaul E. McKenney 375f6a12f34SPaul E. McKenney struct mutex exp_mutex; /* Serialize expedited GP. */ 3763b5f668eSPaul E. McKenney struct mutex exp_wake_mutex; /* Serialize wakeup. */ 377d6ada2cfSPaul E. McKenney unsigned long expedited_sequence; /* Take a ticket. */ 3783a6d7c64SPeter Zijlstra atomic_t expedited_need_qs; /* # CPUs left to check in. */ 379abedf8e2SPaul Gortmaker struct swait_queue_head expedited_wq; /* Wait for check-ins. */ 380b9585e94SPaul E. McKenney int ncpus_snap; /* # CPUs seen last time. */ 381b2b00ddfSPaul E. McKenney u8 cbovld; /* Callback overload now? */ 382b2b00ddfSPaul E. McKenney u8 cbovldnext; /* ^ ^ next time? */ 3834102adabSPaul E. McKenney 3844102adabSPaul E. McKenney unsigned long jiffies_force_qs; /* Time at which to invoke */ 3854102adabSPaul E. McKenney /* force_quiescent_state(). */ 3868c7c4829SPaul E. McKenney unsigned long jiffies_kick_kthreads; /* Time at which to kick */ 3878c7c4829SPaul E. McKenney /* kthreads, if configured. */ 3884102adabSPaul E. McKenney unsigned long n_force_qs; /* Number of calls to */ 3894102adabSPaul E. McKenney /* force_quiescent_state(). */ 3904102adabSPaul E. McKenney unsigned long gp_start; /* Time at which GP started, */ 3914102adabSPaul E. McKenney /* but in jiffies. */ 392c51d7b5eSPaul E. McKenney unsigned long gp_end; /* Time last GP ended, again */ 393c51d7b5eSPaul E. McKenney /* in jiffies. */ 3946ccd2ecdSPaul E. McKenney unsigned long gp_activity; /* Time of last GP kthread */ 3956ccd2ecdSPaul E. McKenney /* activity in jiffies. */ 39626d950a9SPaul E. McKenney unsigned long gp_req_activity; /* Time of last GP request */ 39726d950a9SPaul E. McKenney /* in jiffies. */ 3984102adabSPaul E. McKenney unsigned long jiffies_stall; /* Time at which to check */ 3994102adabSPaul E. McKenney /* for CPU stalls. */ 400b96e7a5fSJoel Fernandes (Google) int nr_fqs_jiffies_stall; /* Number of fqs loops after 401b96e7a5fSJoel Fernandes (Google) * which read jiffies and set 402b96e7a5fSJoel Fernandes (Google) * jiffies_stall. Stall 403b96e7a5fSJoel Fernandes (Google) * warnings disabled if !0. */ 4046193c76aSPaul E. McKenney unsigned long jiffies_resched; /* Time at which to resched */ 4056193c76aSPaul E. McKenney /* a reluctant CPU. */ 406fc908ed3SPaul E. McKenney unsigned long n_force_qs_gpstart; /* Snapshot of n_force_qs at */ 407fc908ed3SPaul E. McKenney /* GP start. */ 4084102adabSPaul E. McKenney const char *name; /* Name of structure. */ 4094102adabSPaul E. McKenney char abbr; /* Abbreviated name. */ 4101e64b15aSPaul E. McKenney 41182980b16SDavid Woodhouse arch_spinlock_t ofl_lock ____cacheline_internodealigned_in_smp; 4121e64b15aSPaul E. McKenney /* Synchronize offline with */ 4131e64b15aSPaul E. McKenney /* GP pre-initialization. */ 4148d2aaa9bSFrederic Weisbecker int nocb_is_setup; /* nocb is setup from boot */ 415dfd458a9SUladzislau Rezki (Sony) 416dfd458a9SUladzislau Rezki (Sony) /* synchronize_rcu() part. */ 417dfd458a9SUladzislau Rezki (Sony) struct llist_head srs_next; /* request a GP users. */ 418dfd458a9SUladzislau Rezki (Sony) struct llist_node *srs_wait_tail; /* wait for GP users. */ 419dfd458a9SUladzislau Rezki (Sony) struct llist_node *srs_done_tail; /* ready for GP users. */ 420dfd458a9SUladzislau Rezki (Sony) struct sr_wait_node srs_wait_nodes[SR_NORMAL_GP_WAIT_HEAD_MAX]; 421dfd458a9SUladzislau Rezki (Sony) struct work_struct srs_cleanup_work; 422*6f948568SJoel Fernandes (Google) atomic_t srs_cleanups_pending; /* srs inflight worker cleanups. */ 4234102adabSPaul E. McKenney }; 4244102adabSPaul E. McKenney 4254102adabSPaul E. McKenney /* Values for rcu_state structure's gp_flags field. */ 4264102adabSPaul E. McKenney #define RCU_GP_FLAG_INIT 0x1 /* Need grace-period initialization. */ 4274102adabSPaul E. McKenney #define RCU_GP_FLAG_FQS 0x2 /* Need grace-period quiescent-state forcing. */ 4281fca4d12SPaul E. McKenney #define RCU_GP_FLAG_OVLD 0x4 /* Experiencing callback overload. */ 4294102adabSPaul E. McKenney 430c34d2f41SPaul E. McKenney /* Values for rcu_state structure's gp_state field. */ 43177f81fe0SPetr Mladek #define RCU_GP_IDLE 0 /* Initial state and no GP in progress. */ 432afea227fSPaul E. McKenney #define RCU_GP_WAIT_GPS 1 /* Wait for grace-period start. */ 433319362c9SPaul E. McKenney #define RCU_GP_DONE_GPS 2 /* Wait done for grace-period start. */ 434fea3f222SPaul E. McKenney #define RCU_GP_ONOFF 3 /* Grace-period initialization hotplug. */ 435fea3f222SPaul E. McKenney #define RCU_GP_INIT 4 /* Grace-period initialization. */ 436fea3f222SPaul E. McKenney #define RCU_GP_WAIT_FQS 5 /* Wait for force-quiescent-state time. */ 437fea3f222SPaul E. McKenney #define RCU_GP_DOING_FQS 6 /* Wait done for force-quiescent-state time. */ 438fea3f222SPaul E. McKenney #define RCU_GP_CLEANUP 7 /* Grace-period cleanup started. */ 439fea3f222SPaul E. McKenney #define RCU_GP_CLEANED 8 /* Grace-period cleanup complete. */ 440afea227fSPaul E. McKenney 441358be2d3SPaul E. McKenney /* 442358be2d3SPaul E. McKenney * In order to export the rcu_state name to the tracing tools, it 443358be2d3SPaul E. McKenney * needs to be added in the __tracepoint_string section. 444358be2d3SPaul E. McKenney * This requires defining a separate variable tp_<sname>_varname 445358be2d3SPaul E. McKenney * that points to the string being used, and this will allow 446358be2d3SPaul E. McKenney * the tracing userspace tools to be able to decipher the string 447358be2d3SPaul E. McKenney * address to the matching string. 448358be2d3SPaul E. McKenney */ 449358be2d3SPaul E. McKenney #ifdef CONFIG_PREEMPT_RCU 450358be2d3SPaul E. McKenney #define RCU_ABBR 'p' 451358be2d3SPaul E. McKenney #define RCU_NAME_RAW "rcu_preempt" 452358be2d3SPaul E. McKenney #else /* #ifdef CONFIG_PREEMPT_RCU */ 453358be2d3SPaul E. McKenney #define RCU_ABBR 's' 454358be2d3SPaul E. McKenney #define RCU_NAME_RAW "rcu_sched" 455358be2d3SPaul E. McKenney #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ 456358be2d3SPaul E. McKenney #ifndef CONFIG_TRACING 457358be2d3SPaul E. McKenney #define RCU_NAME RCU_NAME_RAW 458358be2d3SPaul E. McKenney #else /* #ifdef CONFIG_TRACING */ 459358be2d3SPaul E. McKenney static char rcu_name[] = RCU_NAME_RAW; 460358be2d3SPaul E. McKenney static const char *tp_rcu_varname __used __tracepoint_string = rcu_name; 461358be2d3SPaul E. McKenney #define RCU_NAME rcu_name 462358be2d3SPaul E. McKenney #endif /* #else #ifdef CONFIG_TRACING */ 4636b50e119SPaul E. McKenney 46432255d51SPaul E. McKenney /* Forward declarations for tree_plugin.h */ 4654102adabSPaul E. McKenney static void rcu_bootup_announce(void); 46645975c7dSPaul E. McKenney static void rcu_qs(void); 4674102adabSPaul E. McKenney static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); 4684102adabSPaul E. McKenney #ifdef CONFIG_HOTPLUG_CPU 4698af3a5e7SPaul E. McKenney static bool rcu_preempt_has_tasks(struct rcu_node *rnp); 4704102adabSPaul E. McKenney #endif /* #ifdef CONFIG_HOTPLUG_CPU */ 47174611ecbSPaul E. McKenney static int rcu_print_task_exp_stall(struct rcu_node *rnp); 47281ab59a3SPaul E. McKenney static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); 473c98cac60SPaul E. McKenney static void rcu_flavor_sched_clock_irq(int user); 47481ab59a3SPaul E. McKenney static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck); 4754102adabSPaul E. McKenney static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); 4764102adabSPaul E. McKenney static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); 47751038506SZqiang static bool rcu_is_callbacks_kthread(struct rcu_data *rdp); 47848d07c04SSebastian Andrzej Siewior static void rcu_cpu_kthread_setup(unsigned int cpu); 4793ef5a1c3SPaul E. McKenney static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp); 4800aa04b05SPaul E. McKenney static bool rcu_preempt_has_tasks(struct rcu_node *rnp); 4813e310098SPaul E. McKenney static bool rcu_preempt_need_deferred_qs(struct task_struct *t); 4824102adabSPaul E. McKenney static void zero_cpu_stall_ticks(struct rcu_data *rdp); 483abedf8e2SPaul Gortmaker static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp); 484abedf8e2SPaul Gortmaker static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq); 4854102adabSPaul E. McKenney static void rcu_init_one_nocb(struct rcu_node *rnp); 486b8f7aca3SFrederic Weisbecker static bool wake_nocb_gp(struct rcu_data *rdp, bool force); 487d1b222c6SPaul E. McKenney static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp, 4883cb278e7SJoel Fernandes (Google) unsigned long j, bool lazy); 489afd4e696SFrederic Weisbecker static void call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *head, 490afd4e696SFrederic Weisbecker rcu_callback_t func, unsigned long flags, bool lazy); 491afd4e696SFrederic Weisbecker static void __maybe_unused __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty, 49296d3fd0dSPaul E. McKenney unsigned long flags); 49387090516SFrederic Weisbecker static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level); 494f8bb5caeSFrederic Weisbecker static bool do_nocb_deferred_wakeup(struct rcu_data *rdp); 4954102adabSPaul E. McKenney static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp); 496ad368d15SPaul E. McKenney static void rcu_spawn_cpu_nocb_kthread(int cpu); 497f7a81b12SPaul E. McKenney static void show_rcu_nocb_state(struct rcu_data *rdp); 4985d6742b3SPaul E. McKenney static void rcu_nocb_lock(struct rcu_data *rdp); 4995d6742b3SPaul E. McKenney static void rcu_nocb_unlock(struct rcu_data *rdp); 5005d6742b3SPaul E. McKenney static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp, 5015d6742b3SPaul E. McKenney unsigned long flags); 502d1b222c6SPaul E. McKenney static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp); 50335ce7f29SPaul E. McKenney #ifdef CONFIG_RCU_NOCB_CPU 5044580b054SPaul E. McKenney static void __init rcu_organize_nocb_kthreads(void); 505118e0d4aSFrederic Weisbecker 506118e0d4aSFrederic Weisbecker /* 507118e0d4aSFrederic Weisbecker * Disable IRQs before checking offloaded state so that local 508118e0d4aSFrederic Weisbecker * locking is safe against concurrent de-offloading. 509118e0d4aSFrederic Weisbecker */ 51081c0b3d7SPaul E. McKenney #define rcu_nocb_lock_irqsave(rdp, flags) \ 51181c0b3d7SPaul E. McKenney do { \ 51281c0b3d7SPaul E. McKenney local_irq_save(flags); \ 513118e0d4aSFrederic Weisbecker if (rcu_segcblist_is_offloaded(&(rdp)->cblist)) \ 514118e0d4aSFrederic Weisbecker raw_spin_lock(&(rdp)->nocb_lock); \ 51581c0b3d7SPaul E. McKenney } while (0) 51681c0b3d7SPaul E. McKenney #else /* #ifdef CONFIG_RCU_NOCB_CPU */ 51781c0b3d7SPaul E. McKenney #define rcu_nocb_lock_irqsave(rdp, flags) local_irq_save(flags) 51881c0b3d7SPaul E. McKenney #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ 51981c0b3d7SPaul E. McKenney 5204102adabSPaul E. McKenney static void rcu_bind_gp_kthread(void); 5214580b054SPaul E. McKenney static bool rcu_nohz_full_cpu(void); 52232255d51SPaul E. McKenney 52332255d51SPaul E. McKenney /* Forward declarations for tree_stall.h */ 52432255d51SPaul E. McKenney static void record_gp_stall_check_time(void); 5257ac1907cSPaul E. McKenney static void rcu_iw_handler(struct irq_work *iwp); 52632255d51SPaul E. McKenney static void check_cpu_stall(struct rcu_data *rdp); 527b51bcbbfSPaul E. McKenney static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp, 528b51bcbbfSPaul E. McKenney const unsigned long gpssdelay); 529d96c52feSPaul E. McKenney 530d96c52feSPaul E. McKenney /* Forward declarations for tree_exp.h. */ 531d96c52feSPaul E. McKenney static void sync_rcu_do_polled_gp(struct work_struct *wp); 532