xref: /freebsd/sys/compat/linuxkpi/common/src/linux_rcu.c (revision 184c1b943937986c81e1996d999d21626ec7a4ff)
1 /*-
2  * Copyright (c) 2016 Matthew Macy (mmacy@mattmacy.io)
3  * Copyright (c) 2017-2020 Hans Petter Selasky (hselasky@freebsd.org)
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice unmodified, this list of conditions, and the following
11  *    disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <sys/types.h>
32 #include <sys/systm.h>
33 #include <sys/malloc.h>
34 #include <sys/kernel.h>
35 #include <sys/lock.h>
36 #include <sys/mutex.h>
37 #include <sys/proc.h>
38 #include <sys/sched.h>
39 #include <sys/smp.h>
40 #include <sys/queue.h>
41 #include <sys/taskqueue.h>
42 #include <sys/kdb.h>
43 
44 #include <ck_epoch.h>
45 
46 #include <linux/rcupdate.h>
47 #include <linux/srcu.h>
48 #include <linux/slab.h>
49 #include <linux/kernel.h>
50 #include <linux/compat.h>
51 
52 /*
53  * By defining CONFIG_NO_RCU_SKIP LinuxKPI RCU locks and asserts will
54  * not be skipped during panic().
55  */
56 #ifdef CONFIG_NO_RCU_SKIP
57 #define	RCU_SKIP(void) 0
58 #else
59 #define	RCU_SKIP(void)	unlikely(SCHEDULER_STOPPED() || kdb_active)
60 #endif
61 
62 struct callback_head {
63 	STAILQ_ENTRY(callback_head) entry;
64 	rcu_callback_t func;
65 };
66 
67 struct linux_epoch_head {
68 	STAILQ_HEAD(, callback_head) cb_head;
69 	struct mtx lock;
70 	struct task task;
71 } __aligned(CACHE_LINE_SIZE);
72 
73 struct linux_epoch_record {
74 	ck_epoch_record_t epoch_record;
75 	TAILQ_HEAD(, task_struct) ts_head;
76 	int cpuid;
77 	int type;
78 } __aligned(CACHE_LINE_SIZE);
79 
80 /*
81  * Verify that "struct rcu_head" is big enough to hold "struct
82  * callback_head". This has been done to avoid having to add special
83  * compile flags for including ck_epoch.h to all clients of the
84  * LinuxKPI.
85  */
86 CTASSERT(sizeof(struct rcu_head) == sizeof(struct callback_head));
87 
88 /*
89  * Verify that "epoch_record" is at beginning of "struct
90  * linux_epoch_record":
91  */
92 CTASSERT(offsetof(struct linux_epoch_record, epoch_record) == 0);
93 
94 CTASSERT(TS_RCU_TYPE_MAX == RCU_TYPE_MAX);
95 
96 static ck_epoch_t linux_epoch[RCU_TYPE_MAX];
97 static struct linux_epoch_head linux_epoch_head[RCU_TYPE_MAX];
98 DPCPU_DEFINE_STATIC(struct linux_epoch_record, linux_epoch_record[RCU_TYPE_MAX]);
99 
100 static void linux_rcu_cleaner_func(void *, int);
101 
102 static void
103 linux_rcu_runtime_init(void *arg __unused)
104 {
105 	struct linux_epoch_head *head;
106 	int i;
107 	int j;
108 
109 	for (j = 0; j != RCU_TYPE_MAX; j++) {
110 		ck_epoch_init(&linux_epoch[j]);
111 
112 		head = &linux_epoch_head[j];
113 
114 		mtx_init(&head->lock, "LRCU-HEAD", NULL, MTX_DEF);
115 		TASK_INIT(&head->task, 0, linux_rcu_cleaner_func, head);
116 		STAILQ_INIT(&head->cb_head);
117 
118 		CPU_FOREACH(i) {
119 			struct linux_epoch_record *record;
120 
121 			record = &DPCPU_ID_GET(i, linux_epoch_record[j]);
122 
123 			record->cpuid = i;
124 			record->type = j;
125 			ck_epoch_register(&linux_epoch[j],
126 			    &record->epoch_record, NULL);
127 			TAILQ_INIT(&record->ts_head);
128 		}
129 	}
130 }
131 SYSINIT(linux_rcu_runtime, SI_SUB_CPU, SI_ORDER_ANY, linux_rcu_runtime_init, NULL);
132 
133 static void
134 linux_rcu_runtime_uninit(void *arg __unused)
135 {
136 	struct linux_epoch_head *head;
137 	int j;
138 
139 	for (j = 0; j != RCU_TYPE_MAX; j++) {
140 		head = &linux_epoch_head[j];
141 
142 		mtx_destroy(&head->lock);
143 	}
144 }
145 SYSUNINIT(linux_rcu_runtime, SI_SUB_LOCK, SI_ORDER_SECOND, linux_rcu_runtime_uninit, NULL);
146 
147 static void
148 linux_rcu_cleaner_func(void *context, int pending __unused)
149 {
150 	struct linux_epoch_head *head;
151 	struct callback_head *rcu;
152 	STAILQ_HEAD(, callback_head) tmp_head;
153 	uintptr_t offset;
154 
155 	linux_set_current(curthread);
156 
157 	head = context;
158 
159 	/* move current callbacks into own queue */
160 	mtx_lock(&head->lock);
161 	STAILQ_INIT(&tmp_head);
162 	STAILQ_CONCAT(&tmp_head, &head->cb_head);
163 	mtx_unlock(&head->lock);
164 
165 	/* synchronize */
166 	linux_synchronize_rcu(head - linux_epoch_head);
167 
168 	/* dispatch all callbacks, if any */
169 	while ((rcu = STAILQ_FIRST(&tmp_head)) != NULL) {
170 		STAILQ_REMOVE_HEAD(&tmp_head, entry);
171 
172 		offset = (uintptr_t)rcu->func;
173 
174 		if (offset < LINUX_KFREE_RCU_OFFSET_MAX)
175 			kfree((char *)rcu - offset);
176 		else
177 			rcu->func((struct rcu_head *)rcu);
178 	}
179 }
180 
181 void
182 linux_rcu_read_lock(unsigned type)
183 {
184 	struct linux_epoch_record *record;
185 	struct task_struct *ts;
186 
187 	MPASS(type < RCU_TYPE_MAX);
188 
189 	if (RCU_SKIP())
190 		return;
191 
192 	/*
193 	 * Pin thread to current CPU so that the unlock code gets the
194 	 * same per-CPU epoch record:
195 	 */
196 	sched_pin();
197 
198 	record = &DPCPU_GET(linux_epoch_record[type]);
199 	ts = current;
200 
201 	/*
202 	 * Use a critical section to prevent recursion inside
203 	 * ck_epoch_begin(). Else this function supports recursion.
204 	 */
205 	critical_enter();
206 	ck_epoch_begin(&record->epoch_record, NULL);
207 	ts->rcu_recurse[type]++;
208 	if (ts->rcu_recurse[type] == 1)
209 		TAILQ_INSERT_TAIL(&record->ts_head, ts, rcu_entry[type]);
210 	critical_exit();
211 }
212 
213 void
214 linux_rcu_read_unlock(unsigned type)
215 {
216 	struct linux_epoch_record *record;
217 	struct task_struct *ts;
218 
219 	MPASS(type < RCU_TYPE_MAX);
220 
221 	if (RCU_SKIP())
222 		return;
223 
224 	record = &DPCPU_GET(linux_epoch_record[type]);
225 	ts = current;
226 
227 	/*
228 	 * Use a critical section to prevent recursion inside
229 	 * ck_epoch_end(). Else this function supports recursion.
230 	 */
231 	critical_enter();
232 	ck_epoch_end(&record->epoch_record, NULL);
233 	ts->rcu_recurse[type]--;
234 	if (ts->rcu_recurse[type] == 0)
235 		TAILQ_REMOVE(&record->ts_head, ts, rcu_entry[type]);
236 	critical_exit();
237 
238 	sched_unpin();
239 }
240 
241 static void
242 linux_synchronize_rcu_cb(ck_epoch_t *epoch __unused, ck_epoch_record_t *epoch_record, void *arg __unused)
243 {
244 	struct linux_epoch_record *record =
245 	    container_of(epoch_record, struct linux_epoch_record, epoch_record);
246 	struct thread *td = curthread;
247 	struct task_struct *ts;
248 
249 	/* check if blocked on the current CPU */
250 	if (record->cpuid == PCPU_GET(cpuid)) {
251 		bool is_sleeping = 0;
252 		u_char prio = 0;
253 
254 		/*
255 		 * Find the lowest priority or sleeping thread which
256 		 * is blocking synchronization on this CPU core. All
257 		 * the threads in the queue are CPU-pinned and cannot
258 		 * go anywhere while the current thread is locked.
259 		 */
260 		TAILQ_FOREACH(ts, &record->ts_head, rcu_entry[record->type]) {
261 			if (ts->task_thread->td_priority > prio)
262 				prio = ts->task_thread->td_priority;
263 			is_sleeping |= (ts->task_thread->td_inhibitors != 0);
264 		}
265 
266 		if (is_sleeping) {
267 			thread_unlock(td);
268 			pause("W", 1);
269 			thread_lock(td);
270 		} else {
271 			/* set new thread priority */
272 			sched_prio(td, prio);
273 			/* task switch */
274 			mi_switch(SW_VOL | SWT_RELINQUISH);
275 			/*
276 			 * It is important the thread lock is dropped
277 			 * while yielding to allow other threads to
278 			 * acquire the lock pointed to by
279 			 * TDQ_LOCKPTR(td). Currently mi_switch() will
280 			 * unlock the thread lock before
281 			 * returning. Else a deadlock like situation
282 			 * might happen.
283 			 */
284 			thread_lock(td);
285 		}
286 	} else {
287 		/*
288 		 * To avoid spinning move execution to the other CPU
289 		 * which is blocking synchronization. Set highest
290 		 * thread priority so that code gets run. The thread
291 		 * priority will be restored later.
292 		 */
293 		sched_prio(td, 0);
294 		sched_bind(td, record->cpuid);
295 	}
296 }
297 
298 void
299 linux_synchronize_rcu(unsigned type)
300 {
301 	struct thread *td;
302 	int was_bound;
303 	int old_cpu;
304 	int old_pinned;
305 	u_char old_prio;
306 
307 	MPASS(type < RCU_TYPE_MAX);
308 
309 	if (RCU_SKIP())
310 		return;
311 
312 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
313 	    "linux_synchronize_rcu() can sleep");
314 
315 	td = curthread;
316 	DROP_GIANT();
317 
318 	/*
319 	 * Synchronizing RCU might change the CPU core this function
320 	 * is running on. Save current values:
321 	 */
322 	thread_lock(td);
323 
324 	old_cpu = PCPU_GET(cpuid);
325 	old_pinned = td->td_pinned;
326 	old_prio = td->td_priority;
327 	was_bound = sched_is_bound(td);
328 	sched_unbind(td);
329 	td->td_pinned = 0;
330 	sched_bind(td, old_cpu);
331 
332 	ck_epoch_synchronize_wait(&linux_epoch[type],
333 	    &linux_synchronize_rcu_cb, NULL);
334 
335 	/* restore CPU binding, if any */
336 	if (was_bound != 0) {
337 		sched_bind(td, old_cpu);
338 	} else {
339 		/* get thread back to initial CPU, if any */
340 		if (old_pinned != 0)
341 			sched_bind(td, old_cpu);
342 		sched_unbind(td);
343 	}
344 	/* restore pinned after bind */
345 	td->td_pinned = old_pinned;
346 
347 	/* restore thread priority */
348 	sched_prio(td, old_prio);
349 	thread_unlock(td);
350 
351 	PICKUP_GIANT();
352 }
353 
354 void
355 linux_rcu_barrier(unsigned type)
356 {
357 	struct linux_epoch_head *head;
358 
359 	MPASS(type < RCU_TYPE_MAX);
360 
361 	linux_synchronize_rcu(type);
362 
363 	head = &linux_epoch_head[type];
364 
365 	/* wait for callbacks to complete */
366 	taskqueue_drain(taskqueue_fast, &head->task);
367 }
368 
369 void
370 linux_call_rcu(unsigned type, struct rcu_head *context, rcu_callback_t func)
371 {
372 	struct callback_head *rcu;
373 	struct linux_epoch_head *head;
374 
375 	MPASS(type < RCU_TYPE_MAX);
376 
377 	rcu = (struct callback_head *)context;
378 	head = &linux_epoch_head[type];
379 
380 	mtx_lock(&head->lock);
381 	rcu->func = func;
382 	STAILQ_INSERT_TAIL(&head->cb_head, rcu, entry);
383 	taskqueue_enqueue(taskqueue_fast, &head->task);
384 	mtx_unlock(&head->lock);
385 }
386 
387 int
388 init_srcu_struct(struct srcu_struct *srcu)
389 {
390 	return (0);
391 }
392 
393 void
394 cleanup_srcu_struct(struct srcu_struct *srcu)
395 {
396 }
397 
398 int
399 srcu_read_lock(struct srcu_struct *srcu)
400 {
401 	linux_rcu_read_lock(RCU_TYPE_SLEEPABLE);
402 	return (0);
403 }
404 
405 void
406 srcu_read_unlock(struct srcu_struct *srcu, int key __unused)
407 {
408 	linux_rcu_read_unlock(RCU_TYPE_SLEEPABLE);
409 }
410 
411 void
412 synchronize_srcu(struct srcu_struct *srcu)
413 {
414 	linux_synchronize_rcu(RCU_TYPE_SLEEPABLE);
415 }
416 
417 void
418 srcu_barrier(struct srcu_struct *srcu)
419 {
420 	linux_rcu_barrier(RCU_TYPE_SLEEPABLE);
421 }
422