1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Sleepable Read-Copy Update mechanism for mutual exclusion,
4 * tiny version for non-preemptible single-CPU use.
5 *
6 * Copyright (C) IBM Corporation, 2017
7 *
8 * Author: Paul McKenney <paulmck@linux.ibm.com>
9 */
10
11 #include <linux/export.h>
12 #include <linux/mutex.h>
13 #include <linux/preempt.h>
14 #include <linux/rcupdate_wait.h>
15 #include <linux/sched.h>
16 #include <linux/delay.h>
17 #include <linux/srcu.h>
18
19 #include <linux/rcu_node_tree.h>
20 #include "rcu_segcblist.h"
21 #include "rcu.h"
22
23 #ifndef CONFIG_TREE_RCU
24 int rcu_scheduler_active __read_mostly;
25 #else // #ifndef CONFIG_TREE_RCU
26 extern int rcu_scheduler_active;
27 #endif // #else // #ifndef CONFIG_TREE_RCU
28 static LIST_HEAD(srcu_boot_list);
29 static bool srcu_init_done;
30
init_srcu_struct_fields(struct srcu_struct * ssp)31 static int init_srcu_struct_fields(struct srcu_struct *ssp)
32 {
33 ssp->srcu_lock_nesting[0] = 0;
34 ssp->srcu_lock_nesting[1] = 0;
35 init_swait_queue_head(&ssp->srcu_wq);
36 ssp->srcu_cb_head = NULL;
37 ssp->srcu_cb_tail = &ssp->srcu_cb_head;
38 ssp->srcu_gp_running = false;
39 ssp->srcu_gp_waiting = false;
40 ssp->srcu_idx = 0;
41 ssp->srcu_idx_max = 0;
42 INIT_WORK(&ssp->srcu_work, srcu_drive_gp);
43 INIT_LIST_HEAD(&ssp->srcu_work.entry);
44 return 0;
45 }
46
47 #ifdef CONFIG_DEBUG_LOCK_ALLOC
48
__init_srcu_struct(struct srcu_struct * ssp,const char * name,struct lock_class_key * key)49 int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
50 struct lock_class_key *key)
51 {
52 /* Don't re-initialize a lock while it is held. */
53 debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
54 lockdep_init_map(&ssp->dep_map, name, key, 0);
55 return init_srcu_struct_fields(ssp);
56 }
57 EXPORT_SYMBOL_GPL(__init_srcu_struct);
58
59 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
60
61 /*
62 * init_srcu_struct - initialize a sleep-RCU structure
63 * @ssp: structure to initialize.
64 *
65 * Must invoke this on a given srcu_struct before passing that srcu_struct
66 * to any other function. Each srcu_struct represents a separate domain
67 * of SRCU protection.
68 */
init_srcu_struct(struct srcu_struct * ssp)69 int init_srcu_struct(struct srcu_struct *ssp)
70 {
71 return init_srcu_struct_fields(ssp);
72 }
73 EXPORT_SYMBOL_GPL(init_srcu_struct);
74
75 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
76
77 /*
78 * cleanup_srcu_struct - deconstruct a sleep-RCU structure
79 * @ssp: structure to clean up.
80 *
81 * Must invoke this after you are finished using a given srcu_struct that
82 * was initialized via init_srcu_struct(), else you leak memory.
83 */
cleanup_srcu_struct(struct srcu_struct * ssp)84 void cleanup_srcu_struct(struct srcu_struct *ssp)
85 {
86 WARN_ON(ssp->srcu_lock_nesting[0] || ssp->srcu_lock_nesting[1]);
87 flush_work(&ssp->srcu_work);
88 WARN_ON(ssp->srcu_gp_running);
89 WARN_ON(ssp->srcu_gp_waiting);
90 WARN_ON(ssp->srcu_cb_head);
91 WARN_ON(&ssp->srcu_cb_head != ssp->srcu_cb_tail);
92 WARN_ON(ssp->srcu_idx != ssp->srcu_idx_max);
93 WARN_ON(ssp->srcu_idx & 0x1);
94 }
95 EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
96
97 /*
98 * Removes the count for the old reader from the appropriate element of
99 * the srcu_struct.
100 */
__srcu_read_unlock(struct srcu_struct * ssp,int idx)101 void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
102 {
103 int newval;
104
105 preempt_disable(); // Needed for PREEMPT_LAZY
106 newval = READ_ONCE(ssp->srcu_lock_nesting[idx]) - 1;
107 WRITE_ONCE(ssp->srcu_lock_nesting[idx], newval);
108 preempt_enable();
109 if (!newval && READ_ONCE(ssp->srcu_gp_waiting) && in_task() && !irqs_disabled())
110 swake_up_one(&ssp->srcu_wq);
111 }
112 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
113
114 /*
115 * Workqueue handler to drive one grace period and invoke any callbacks
116 * that become ready as a result. Single-CPU operation and preemption
117 * disabling mean that we get away with murder on synchronization. ;-)
118 */
srcu_drive_gp(struct work_struct * wp)119 void srcu_drive_gp(struct work_struct *wp)
120 {
121 int idx;
122 struct rcu_head *lh;
123 struct rcu_head *rhp;
124 struct srcu_struct *ssp;
125
126 ssp = container_of(wp, struct srcu_struct, srcu_work);
127 preempt_disable(); // Needed for PREEMPT_LAZY
128 if (ssp->srcu_gp_running || ULONG_CMP_GE(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max))) {
129 preempt_enable();
130 return; /* Already running or nothing to do. */
131 }
132
133 /* Remove recently arrived callbacks and wait for readers. */
134 WRITE_ONCE(ssp->srcu_gp_running, true);
135 local_irq_disable();
136 lh = ssp->srcu_cb_head;
137 ssp->srcu_cb_head = NULL;
138 ssp->srcu_cb_tail = &ssp->srcu_cb_head;
139 local_irq_enable();
140 idx = (ssp->srcu_idx & 0x2) / 2;
141 WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
142 WRITE_ONCE(ssp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */
143 preempt_enable();
144 do {
145 // Deadlock issues prevent __srcu_read_unlock() from
146 // doing an unconditional wakeup, so polling is required.
147 swait_event_timeout_exclusive(ssp->srcu_wq,
148 !READ_ONCE(ssp->srcu_lock_nesting[idx]), HZ / 10);
149 } while (READ_ONCE(ssp->srcu_lock_nesting[idx]));
150 preempt_disable(); // Needed for PREEMPT_LAZY
151 WRITE_ONCE(ssp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */
152 WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
153 preempt_enable();
154
155 /* Invoke the callbacks we removed above. */
156 while (lh) {
157 rhp = lh;
158 lh = lh->next;
159 debug_rcu_head_callback(rhp);
160 local_bh_disable();
161 rhp->func(rhp);
162 local_bh_enable();
163 }
164
165 /*
166 * Enable rescheduling, and if there are more callbacks,
167 * reschedule ourselves. This can race with a call_srcu()
168 * at interrupt level, but the ->srcu_gp_running checks will
169 * straighten that out.
170 */
171 preempt_disable(); // Needed for PREEMPT_LAZY
172 WRITE_ONCE(ssp->srcu_gp_running, false);
173 idx = ULONG_CMP_LT(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max));
174 preempt_enable();
175 if (idx)
176 schedule_work(&ssp->srcu_work);
177 }
178 EXPORT_SYMBOL_GPL(srcu_drive_gp);
179
srcu_gp_start_if_needed(struct srcu_struct * ssp)180 static void srcu_gp_start_if_needed(struct srcu_struct *ssp)
181 {
182 unsigned long cookie;
183
184 lockdep_assert_preemption_disabled(); // Needed for PREEMPT_LAZY
185 cookie = get_state_synchronize_srcu(ssp);
186 if (ULONG_CMP_GE(READ_ONCE(ssp->srcu_idx_max), cookie)) {
187 return;
188 }
189 WRITE_ONCE(ssp->srcu_idx_max, cookie);
190 if (!READ_ONCE(ssp->srcu_gp_running)) {
191 if (likely(srcu_init_done))
192 schedule_work(&ssp->srcu_work);
193 else if (list_empty(&ssp->srcu_work.entry))
194 list_add(&ssp->srcu_work.entry, &srcu_boot_list);
195 }
196 }
197
198 /*
199 * Enqueue an SRCU callback on the specified srcu_struct structure,
200 * initiating grace-period processing if it is not already running.
201 */
call_srcu(struct srcu_struct * ssp,struct rcu_head * rhp,rcu_callback_t func)202 void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
203 rcu_callback_t func)
204 {
205 unsigned long flags;
206
207 rhp->func = func;
208 rhp->next = NULL;
209 preempt_disable(); // Needed for PREEMPT_LAZY
210 local_irq_save(flags);
211 *ssp->srcu_cb_tail = rhp;
212 ssp->srcu_cb_tail = &rhp->next;
213 local_irq_restore(flags);
214 srcu_gp_start_if_needed(ssp);
215 preempt_enable();
216 }
217 EXPORT_SYMBOL_GPL(call_srcu);
218
219 /*
220 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
221 */
synchronize_srcu(struct srcu_struct * ssp)222 void synchronize_srcu(struct srcu_struct *ssp)
223 {
224 struct rcu_synchronize rs;
225
226 srcu_lock_sync(&ssp->dep_map);
227
228 RCU_LOCKDEP_WARN(lockdep_is_held(ssp) ||
229 lock_is_held(&rcu_bh_lock_map) ||
230 lock_is_held(&rcu_lock_map) ||
231 lock_is_held(&rcu_sched_lock_map),
232 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
233
234 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
235 return;
236
237 might_sleep();
238 init_rcu_head_on_stack(&rs.head);
239 init_completion(&rs.completion);
240 call_srcu(ssp, &rs.head, wakeme_after_rcu);
241 wait_for_completion(&rs.completion);
242 destroy_rcu_head_on_stack(&rs.head);
243 }
244 EXPORT_SYMBOL_GPL(synchronize_srcu);
245
246 /*
247 * get_state_synchronize_srcu - Provide an end-of-grace-period cookie
248 */
get_state_synchronize_srcu(struct srcu_struct * ssp)249 unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp)
250 {
251 unsigned long ret;
252
253 barrier();
254 ret = (READ_ONCE(ssp->srcu_idx) + 3) & ~0x1;
255 barrier();
256 return ret;
257 }
258 EXPORT_SYMBOL_GPL(get_state_synchronize_srcu);
259
260 /*
261 * start_poll_synchronize_srcu - Provide cookie and start grace period
262 *
263 * The difference between this and get_state_synchronize_srcu() is that
264 * this function ensures that the poll_state_synchronize_srcu() will
265 * eventually return the value true.
266 */
start_poll_synchronize_srcu(struct srcu_struct * ssp)267 unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp)
268 {
269 unsigned long ret;
270
271 preempt_disable(); // Needed for PREEMPT_LAZY
272 ret = get_state_synchronize_srcu(ssp);
273 srcu_gp_start_if_needed(ssp);
274 preempt_enable();
275 return ret;
276 }
277 EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu);
278
279 /*
280 * poll_state_synchronize_srcu - Has cookie's grace period ended?
281 */
poll_state_synchronize_srcu(struct srcu_struct * ssp,unsigned long cookie)282 bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)
283 {
284 unsigned long cur_s = READ_ONCE(ssp->srcu_idx);
285
286 barrier();
287 return cookie == SRCU_GET_STATE_COMPLETED ||
288 ULONG_CMP_GE(cur_s, cookie) || ULONG_CMP_LT(cur_s, cookie - 3);
289 }
290 EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu);
291
292 #ifndef CONFIG_TREE_RCU
293 /* Lockdep diagnostics. */
rcu_scheduler_starting(void)294 void __init rcu_scheduler_starting(void)
295 {
296 rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
297 }
298 #endif // #ifndef CONFIG_TREE_RCU
299
300 /*
301 * Queue work for srcu_struct structures with early boot callbacks.
302 * The work won't actually execute until the workqueue initialization
303 * phase that takes place after the scheduler starts.
304 */
srcu_init(void)305 void __init srcu_init(void)
306 {
307 struct srcu_struct *ssp;
308
309 srcu_init_done = true;
310 while (!list_empty(&srcu_boot_list)) {
311 ssp = list_first_entry(&srcu_boot_list,
312 struct srcu_struct, srcu_work.entry);
313 list_del_init(&ssp->srcu_work.entry);
314 schedule_work(&ssp->srcu_work);
315 }
316 }
317