1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Sleepable Read-Copy Update mechanism for mutual exclusion, 4 * tiny version for non-preemptible single-CPU use. 5 * 6 * Copyright (C) IBM Corporation, 2017 7 * 8 * Author: Paul McKenney <paulmck@linux.ibm.com> 9 */ 10 11 #include <linux/export.h> 12 #include <linux/mutex.h> 13 #include <linux/preempt.h> 14 #include <linux/rcupdate_wait.h> 15 #include <linux/sched.h> 16 #include <linux/delay.h> 17 #include <linux/srcu.h> 18 19 #include <linux/rcu_node_tree.h> 20 #include "rcu_segcblist.h" 21 #include "rcu.h" 22 23 int rcu_scheduler_active __read_mostly; 24 static LIST_HEAD(srcu_boot_list); 25 static bool srcu_init_done; 26 27 static int init_srcu_struct_fields(struct srcu_struct *ssp) 28 { 29 ssp->srcu_lock_nesting[0] = 0; 30 ssp->srcu_lock_nesting[1] = 0; 31 init_swait_queue_head(&ssp->srcu_wq); 32 ssp->srcu_cb_head = NULL; 33 ssp->srcu_cb_tail = &ssp->srcu_cb_head; 34 ssp->srcu_gp_running = false; 35 ssp->srcu_gp_waiting = false; 36 ssp->srcu_idx = 0; 37 ssp->srcu_idx_max = 0; 38 INIT_WORK(&ssp->srcu_work, srcu_drive_gp); 39 INIT_LIST_HEAD(&ssp->srcu_work.entry); 40 return 0; 41 } 42 43 #ifdef CONFIG_DEBUG_LOCK_ALLOC 44 45 int __init_srcu_struct(struct srcu_struct *ssp, const char *name, 46 struct lock_class_key *key) 47 { 48 /* Don't re-initialize a lock while it is held. */ 49 debug_check_no_locks_freed((void *)ssp, sizeof(*ssp)); 50 lockdep_init_map(&ssp->dep_map, name, key, 0); 51 return init_srcu_struct_fields(ssp); 52 } 53 EXPORT_SYMBOL_GPL(__init_srcu_struct); 54 55 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 56 57 /* 58 * init_srcu_struct - initialize a sleep-RCU structure 59 * @ssp: structure to initialize. 60 * 61 * Must invoke this on a given srcu_struct before passing that srcu_struct 62 * to any other function. Each srcu_struct represents a separate domain 63 * of SRCU protection. 64 */ 65 int init_srcu_struct(struct srcu_struct *ssp) 66 { 67 return init_srcu_struct_fields(ssp); 68 } 69 EXPORT_SYMBOL_GPL(init_srcu_struct); 70 71 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 72 73 /* 74 * cleanup_srcu_struct - deconstruct a sleep-RCU structure 75 * @ssp: structure to clean up. 76 * 77 * Must invoke this after you are finished using a given srcu_struct that 78 * was initialized via init_srcu_struct(), else you leak memory. 79 */ 80 void cleanup_srcu_struct(struct srcu_struct *ssp) 81 { 82 WARN_ON(ssp->srcu_lock_nesting[0] || ssp->srcu_lock_nesting[1]); 83 flush_work(&ssp->srcu_work); 84 WARN_ON(ssp->srcu_gp_running); 85 WARN_ON(ssp->srcu_gp_waiting); 86 WARN_ON(ssp->srcu_cb_head); 87 WARN_ON(&ssp->srcu_cb_head != ssp->srcu_cb_tail); 88 WARN_ON(ssp->srcu_idx != ssp->srcu_idx_max); 89 WARN_ON(ssp->srcu_idx & 0x1); 90 } 91 EXPORT_SYMBOL_GPL(cleanup_srcu_struct); 92 93 /* 94 * Removes the count for the old reader from the appropriate element of 95 * the srcu_struct. 96 */ 97 void __srcu_read_unlock(struct srcu_struct *ssp, int idx) 98 { 99 int newval = READ_ONCE(ssp->srcu_lock_nesting[idx]) - 1; 100 101 WRITE_ONCE(ssp->srcu_lock_nesting[idx], newval); 102 if (!newval && READ_ONCE(ssp->srcu_gp_waiting) && in_task()) 103 swake_up_one(&ssp->srcu_wq); 104 } 105 EXPORT_SYMBOL_GPL(__srcu_read_unlock); 106 107 /* 108 * Workqueue handler to drive one grace period and invoke any callbacks 109 * that become ready as a result. Single-CPU and !PREEMPTION operation 110 * means that we get away with murder on synchronization. ;-) 111 */ 112 void srcu_drive_gp(struct work_struct *wp) 113 { 114 int idx; 115 struct rcu_head *lh; 116 struct rcu_head *rhp; 117 struct srcu_struct *ssp; 118 119 ssp = container_of(wp, struct srcu_struct, srcu_work); 120 if (ssp->srcu_gp_running || ULONG_CMP_GE(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max))) 121 return; /* Already running or nothing to do. */ 122 123 /* Remove recently arrived callbacks and wait for readers. */ 124 WRITE_ONCE(ssp->srcu_gp_running, true); 125 local_irq_disable(); 126 lh = ssp->srcu_cb_head; 127 ssp->srcu_cb_head = NULL; 128 ssp->srcu_cb_tail = &ssp->srcu_cb_head; 129 local_irq_enable(); 130 idx = (ssp->srcu_idx & 0x2) / 2; 131 WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1); 132 WRITE_ONCE(ssp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */ 133 swait_event_exclusive(ssp->srcu_wq, !READ_ONCE(ssp->srcu_lock_nesting[idx])); 134 WRITE_ONCE(ssp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */ 135 WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1); 136 137 /* Invoke the callbacks we removed above. */ 138 while (lh) { 139 rhp = lh; 140 lh = lh->next; 141 local_bh_disable(); 142 rhp->func(rhp); 143 local_bh_enable(); 144 } 145 146 /* 147 * Enable rescheduling, and if there are more callbacks, 148 * reschedule ourselves. This can race with a call_srcu() 149 * at interrupt level, but the ->srcu_gp_running checks will 150 * straighten that out. 151 */ 152 WRITE_ONCE(ssp->srcu_gp_running, false); 153 if (ULONG_CMP_LT(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max))) 154 schedule_work(&ssp->srcu_work); 155 } 156 EXPORT_SYMBOL_GPL(srcu_drive_gp); 157 158 static void srcu_gp_start_if_needed(struct srcu_struct *ssp) 159 { 160 unsigned long cookie; 161 162 cookie = get_state_synchronize_srcu(ssp); 163 if (ULONG_CMP_GE(READ_ONCE(ssp->srcu_idx_max), cookie)) 164 return; 165 WRITE_ONCE(ssp->srcu_idx_max, cookie); 166 if (!READ_ONCE(ssp->srcu_gp_running)) { 167 if (likely(srcu_init_done)) 168 schedule_work(&ssp->srcu_work); 169 else if (list_empty(&ssp->srcu_work.entry)) 170 list_add(&ssp->srcu_work.entry, &srcu_boot_list); 171 } 172 } 173 174 /* 175 * Enqueue an SRCU callback on the specified srcu_struct structure, 176 * initiating grace-period processing if it is not already running. 177 */ 178 void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, 179 rcu_callback_t func) 180 { 181 unsigned long flags; 182 183 rhp->func = func; 184 rhp->next = NULL; 185 local_irq_save(flags); 186 *ssp->srcu_cb_tail = rhp; 187 ssp->srcu_cb_tail = &rhp->next; 188 local_irq_restore(flags); 189 srcu_gp_start_if_needed(ssp); 190 } 191 EXPORT_SYMBOL_GPL(call_srcu); 192 193 /* 194 * synchronize_srcu - wait for prior SRCU read-side critical-section completion 195 */ 196 void synchronize_srcu(struct srcu_struct *ssp) 197 { 198 struct rcu_synchronize rs; 199 200 RCU_LOCKDEP_WARN(lockdep_is_held(ssp) || 201 lock_is_held(&rcu_bh_lock_map) || 202 lock_is_held(&rcu_lock_map) || 203 lock_is_held(&rcu_sched_lock_map), 204 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section"); 205 206 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) 207 return; 208 209 might_sleep(); 210 init_rcu_head_on_stack(&rs.head); 211 init_completion(&rs.completion); 212 call_srcu(ssp, &rs.head, wakeme_after_rcu); 213 wait_for_completion(&rs.completion); 214 destroy_rcu_head_on_stack(&rs.head); 215 } 216 EXPORT_SYMBOL_GPL(synchronize_srcu); 217 218 /* 219 * get_state_synchronize_srcu - Provide an end-of-grace-period cookie 220 */ 221 unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp) 222 { 223 unsigned long ret; 224 225 barrier(); 226 ret = (READ_ONCE(ssp->srcu_idx) + 3) & ~0x1; 227 barrier(); 228 return ret; 229 } 230 EXPORT_SYMBOL_GPL(get_state_synchronize_srcu); 231 232 /* 233 * start_poll_synchronize_srcu - Provide cookie and start grace period 234 * 235 * The difference between this and get_state_synchronize_srcu() is that 236 * this function ensures that the poll_state_synchronize_srcu() will 237 * eventually return the value true. 238 */ 239 unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp) 240 { 241 unsigned long ret = get_state_synchronize_srcu(ssp); 242 243 srcu_gp_start_if_needed(ssp); 244 return ret; 245 } 246 EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu); 247 248 /* 249 * poll_state_synchronize_srcu - Has cookie's grace period ended? 250 */ 251 bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie) 252 { 253 unsigned long cur_s = READ_ONCE(ssp->srcu_idx); 254 255 barrier(); 256 return ULONG_CMP_GE(cur_s, cookie) || ULONG_CMP_LT(cur_s, cookie - 3); 257 } 258 EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu); 259 260 /* Lockdep diagnostics. */ 261 void __init rcu_scheduler_starting(void) 262 { 263 rcu_scheduler_active = RCU_SCHEDULER_RUNNING; 264 } 265 266 /* 267 * Queue work for srcu_struct structures with early boot callbacks. 268 * The work won't actually execute until the workqueue initialization 269 * phase that takes place after the scheduler starts. 270 */ 271 void __init srcu_init(void) 272 { 273 struct srcu_struct *ssp; 274 275 srcu_init_done = true; 276 while (!list_empty(&srcu_boot_list)) { 277 ssp = list_first_entry(&srcu_boot_list, 278 struct srcu_struct, srcu_work.entry); 279 list_del_init(&ssp->srcu_work.entry); 280 schedule_work(&ssp->srcu_work); 281 } 282 } 283