xref: /freebsd/sys/kern/subr_epoch.c (revision 1ade1c411307f58d4518938883116c31766faaf7)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2018, Matthew Macy <mmacy@freebsd.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/types.h>
34 #include <sys/systm.h>
35 #include <sys/counter.h>
36 #include <sys/epoch.h>
37 #include <sys/gtaskqueue.h>
38 #include <sys/kernel.h>
39 #include <sys/limits.h>
40 #include <sys/lock.h>
41 #include <sys/malloc.h>
42 #include <sys/mutex.h>
43 #include <sys/pcpu.h>
44 #include <sys/proc.h>
45 #include <sys/sched.h>
46 #include <sys/smp.h>
47 #include <sys/sysctl.h>
48 #include <sys/turnstile.h>
49 #include <vm/vm.h>
50 #include <vm/vm_extern.h>
51 #include <vm/vm_kern.h>
52 #include <vm/uma.h>
53 
54 #include <ck_epoch.h>
55 
56 static MALLOC_DEFINE(M_EPOCH, "epoch", "epoch based reclamation");
57 
58 /* arbitrary --- needs benchmarking */
59 #define MAX_ADAPTIVE_SPIN 100
60 #define MAX_EPOCHS 64
61 
62 CTASSERT(sizeof(ck_epoch_entry_t) == sizeof(struct epoch_context));
63 SYSCTL_NODE(_kern, OID_AUTO, epoch, CTLFLAG_RW, 0, "epoch information");
64 SYSCTL_NODE(_kern_epoch, OID_AUTO, stats, CTLFLAG_RW, 0, "epoch stats");
65 
66 /* Stats. */
67 static counter_u64_t block_count;
68 
69 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, nblocked, CTLFLAG_RW,
70     &block_count, "# of times a thread was in an epoch when epoch_wait was called");
71 static counter_u64_t migrate_count;
72 
73 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, migrations, CTLFLAG_RW,
74     &migrate_count, "# of times thread was migrated to another CPU in epoch_wait");
75 static counter_u64_t turnstile_count;
76 
77 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, ncontended, CTLFLAG_RW,
78     &turnstile_count, "# of times a thread was blocked on a lock in an epoch during an epoch_wait");
79 static counter_u64_t switch_count;
80 
81 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, switches, CTLFLAG_RW,
82     &switch_count, "# of times a thread voluntarily context switched in epoch_wait");
83 static counter_u64_t epoch_call_count;
84 
85 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_calls, CTLFLAG_RW,
86     &epoch_call_count, "# of times a callback was deferred");
87 static counter_u64_t epoch_call_task_count;
88 
89 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_call_tasks, CTLFLAG_RW,
90     &epoch_call_task_count, "# of times a callback task was run");
91 
92 TAILQ_HEAD (threadlist, thread);
93 
94 CK_STACK_CONTAINER(struct ck_epoch_entry, stack_entry,
95     ck_epoch_entry_container)
96 
97 epoch_t	allepochs[MAX_EPOCHS];
98 
99 DPCPU_DEFINE(struct grouptask, epoch_cb_task);
100 DPCPU_DEFINE(int, epoch_cb_count);
101 
102 static __read_mostly int inited;
103 static __read_mostly int epoch_count;
104 __read_mostly epoch_t global_epoch;
105 __read_mostly epoch_t global_epoch_preempt;
106 
107 static void epoch_call_task(void *context __unused);
108 static 	uma_zone_t pcpu_zone_record;
109 
110 static void
111 epoch_init(void *arg __unused)
112 {
113 	int cpu;
114 
115 	block_count = counter_u64_alloc(M_WAITOK);
116 	migrate_count = counter_u64_alloc(M_WAITOK);
117 	turnstile_count = counter_u64_alloc(M_WAITOK);
118 	switch_count = counter_u64_alloc(M_WAITOK);
119 	epoch_call_count = counter_u64_alloc(M_WAITOK);
120 	epoch_call_task_count = counter_u64_alloc(M_WAITOK);
121 
122 	pcpu_zone_record = uma_zcreate("epoch_record pcpu", sizeof(struct epoch_record),
123 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_PCPU);
124 	CPU_FOREACH(cpu) {
125 		GROUPTASK_INIT(DPCPU_ID_PTR(cpu, epoch_cb_task), 0, epoch_call_task, NULL);
126 		taskqgroup_attach_cpu(qgroup_softirq, DPCPU_ID_PTR(cpu, epoch_cb_task), NULL, cpu, -1, "epoch call task");
127 	}
128 	inited = 1;
129 	global_epoch = epoch_alloc(0);
130 	global_epoch_preempt = epoch_alloc(EPOCH_PREEMPT);
131 }
132 SYSINIT(epoch, SI_SUB_TASKQ + 1, SI_ORDER_FIRST, epoch_init, NULL);
133 
134 #if !defined(EARLY_AP_STARTUP)
135 static void
136 epoch_init_smp(void *dummy __unused)
137 {
138 	inited = 2;
139 }
140 SYSINIT(epoch_smp, SI_SUB_SMP + 1, SI_ORDER_FIRST, epoch_init_smp, NULL);
141 #endif
142 
143 static void
144 epoch_ctor(epoch_t epoch)
145 {
146 	epoch_record_t er;
147 	int cpu;
148 
149 	epoch->e_pcpu_record = uma_zalloc_pcpu(pcpu_zone_record, M_WAITOK);
150 	CPU_FOREACH(cpu) {
151 		er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
152 		bzero(er, sizeof(*er));
153 		ck_epoch_register(&epoch->e_epoch, &er->er_read_record, NULL);
154 		ck_epoch_register(&epoch->e_epoch, &er->er_write_record, NULL);
155 		TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist);
156 		er->er_cpuid = cpu;
157 	}
158 }
159 
160 epoch_t
161 epoch_alloc(int flags)
162 {
163 	epoch_t epoch;
164 
165 	if (__predict_false(!inited))
166 		panic("%s called too early in boot", __func__);
167 	epoch = malloc(sizeof(struct epoch), M_EPOCH, M_ZERO | M_WAITOK);
168 	ck_epoch_init(&epoch->e_epoch);
169 	epoch_ctor(epoch);
170 	MPASS(epoch_count < MAX_EPOCHS - 2);
171 	epoch->e_flags = flags;
172 	epoch->e_idx = epoch_count;
173 	allepochs[epoch_count++] = epoch;
174 	return (epoch);
175 }
176 
177 void
178 epoch_free(epoch_t epoch)
179 {
180 #ifdef INVARIANTS
181 	struct epoch_record *er;
182 	int cpu;
183 
184 	CPU_FOREACH(cpu) {
185 		er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
186 		MPASS(TAILQ_EMPTY(&er->er_tdlist));
187 	}
188 #endif
189 	allepochs[epoch->e_idx] = NULL;
190 	epoch_wait(global_epoch);
191 	uma_zfree_pcpu(pcpu_zone_record, epoch->e_pcpu_record);
192 	free(epoch, M_EPOCH);
193 }
194 
195 void
196 epoch_enter_preempt_KBI(epoch_t epoch, epoch_tracker_t et)
197 {
198 
199 	epoch_enter_preempt(epoch, et);
200 }
201 
202 void
203 epoch_exit_preempt_KBI(epoch_t epoch, epoch_tracker_t et)
204 {
205 
206 	epoch_exit_preempt(epoch, et);
207 }
208 
209 void
210 epoch_enter_KBI(epoch_t epoch)
211 {
212 
213 	epoch_enter(epoch);
214 }
215 
216 void
217 epoch_exit_KBI(epoch_t epoch)
218 {
219 
220 	epoch_exit(epoch);
221 }
222 
223 /*
224  * epoch_block_handler_preempt is a callback from the ck code when another thread is
225  * currently in an epoch section.
226  */
227 static void
228 epoch_block_handler_preempt(struct ck_epoch *global __unused, ck_epoch_record_t *cr,
229     void *arg __unused)
230 {
231 	epoch_record_t record;
232 	struct thread *td, *owner, *curwaittd;
233 	struct epoch_thread *tdwait;
234 	struct turnstile *ts;
235 	struct lock_object *lock;
236 	int spincount, gen;
237 	int locksheld __unused;
238 
239 	record = __containerof(cr, struct epoch_record, er_read_record);
240 	td = curthread;
241 	locksheld = td->td_locks;
242 	spincount = 0;
243 	counter_u64_add(block_count, 1);
244 	/*
245 	 * We lost a race and there's no longer any threads
246 	 * on the CPU in an epoch section.
247 	 */
248 	if (TAILQ_EMPTY(&record->er_tdlist))
249 		return;
250 
251 	if (record->er_cpuid != curcpu) {
252 		/*
253 		 * If the head of the list is running, we can wait for it
254 		 * to remove itself from the list and thus save us the
255 		 * overhead of a migration
256 		 */
257 		gen = record->er_gen;
258 		thread_unlock(td);
259 		/*
260 		 * We can't actually check if the waiting thread is running
261 		 * so we simply poll for it to exit before giving up and
262 		 * migrating.
263 		 */
264 		do {
265 			cpu_spinwait();
266 		} while (!TAILQ_EMPTY(&record->er_tdlist) &&
267 				 gen == record->er_gen &&
268 				 spincount++ < MAX_ADAPTIVE_SPIN);
269 		thread_lock(td);
270 		/*
271 		 * If the generation has changed we can poll again
272 		 * otherwise we need to migrate.
273 		 */
274 		if (gen != record->er_gen)
275 			return;
276 		/*
277 		 * Being on the same CPU as that of the record on which
278 		 * we need to wait allows us access to the thread
279 		 * list associated with that CPU. We can then examine the
280 		 * oldest thread in the queue and wait on its turnstile
281 		 * until it resumes and so on until a grace period
282 		 * elapses.
283 		 *
284 		 */
285 		counter_u64_add(migrate_count, 1);
286 		sched_bind(td, record->er_cpuid);
287 		/*
288 		 * At this point we need to return to the ck code
289 		 * to scan to see if a grace period has elapsed.
290 		 * We can't move on to check the thread list, because
291 		 * in the meantime new threads may have arrived that
292 		 * in fact belong to a different epoch.
293 		 */
294 		return;
295 	}
296 	/*
297 	 * Try to find a thread in an epoch section on this CPU
298 	 * waiting on a turnstile. Otherwise find the lowest
299 	 * priority thread (highest prio value) and drop our priority
300 	 * to match to allow it to run.
301 	 */
302 	TAILQ_FOREACH(tdwait, &record->er_tdlist, et_link) {
303 		/*
304 		 * Propagate our priority to any other waiters to prevent us
305 		 * from starving them. They will have their original priority
306 		 * restore on exit from epoch_wait().
307 		 */
308 		curwaittd = tdwait->et_td;
309 		if (!TD_IS_INHIBITED(curwaittd) && curwaittd->td_priority > td->td_priority) {
310 			critical_enter();
311 			thread_unlock(td);
312 			thread_lock(curwaittd);
313 			sched_prio(curwaittd, td->td_priority);
314 			thread_unlock(curwaittd);
315 			thread_lock(td);
316 			critical_exit();
317 		}
318 		if (TD_IS_INHIBITED(curwaittd) && TD_ON_LOCK(curwaittd) &&
319 		    ((ts = curwaittd->td_blocked) != NULL)) {
320 			/*
321 			 * We unlock td to allow turnstile_wait to reacquire the
322 			 * the thread lock. Before unlocking it we enter a critical
323 			 * section to prevent preemption after we reenable interrupts
324 			 * by dropping the thread lock in order to prevent curwaittd
325 			 * from getting to run.
326 			 */
327 			critical_enter();
328 			thread_unlock(td);
329 			owner = turnstile_lock(ts, &lock);
330 			/*
331 			 * The owner pointer indicates that the lock succeeded. Only
332 			 * in case we hold the lock and the turnstile we locked is still
333 			 * the one that curwaittd is blocked on can we continue. Otherwise
334 			 * The turnstile pointer has been changed out from underneath
335 			 * us, as in the case where the lock holder has signalled curwaittd,
336 			 * and we need to continue.
337 			 */
338 			if (owner != NULL && ts == curwaittd->td_blocked) {
339 				MPASS(TD_IS_INHIBITED(curwaittd) && TD_ON_LOCK(curwaittd));
340 				critical_exit();
341 				turnstile_wait(ts, owner, curwaittd->td_tsqueue);
342 				counter_u64_add(turnstile_count, 1);
343 				thread_lock(td);
344 				return;
345 			} else if (owner != NULL)
346 				turnstile_unlock(ts, lock);
347 			thread_lock(td);
348 			critical_exit();
349 			KASSERT(td->td_locks == locksheld,
350 			    ("%d extra locks held", td->td_locks - locksheld));
351 		}
352 	}
353 	/*
354 	 * We didn't find any threads actually blocked on a lock
355 	 * so we have nothing to do except context switch away.
356 	 */
357 	counter_u64_add(switch_count, 1);
358 	mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
359 
360 	/*
361 	 * Release the thread lock while yielding to
362 	 * allow other threads to acquire the lock
363 	 * pointed to by TDQ_LOCKPTR(td). Else a
364 	 * deadlock like situation might happen. (HPS)
365 	 */
366 	thread_unlock(td);
367 	thread_lock(td);
368 }
369 
370 void
371 epoch_wait_preempt(epoch_t epoch)
372 {
373 	struct thread *td;
374 	int was_bound;
375 	int old_cpu;
376 	int old_pinned;
377 	u_char old_prio;
378 	int locks __unused;
379 
380 	MPASS(cold || epoch != NULL);
381 	INIT_CHECK(epoch);
382 	td = curthread;
383 #ifdef INVARIANTS
384 	locks = curthread->td_locks;
385 	MPASS(epoch->e_flags & EPOCH_PREEMPT);
386 	if ((epoch->e_flags & EPOCH_LOCKED) == 0)
387 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
388 		    "epoch_wait() can be long running");
389 	KASSERT(!in_epoch(epoch),
390 			("epoch_wait_preempt() called in the middle "
391 			 "of an epoch section of the same epoch"));
392 #endif
393 	thread_lock(td);
394 	DROP_GIANT();
395 
396 	old_cpu = PCPU_GET(cpuid);
397 	old_pinned = td->td_pinned;
398 	old_prio = td->td_priority;
399 	was_bound = sched_is_bound(td);
400 	sched_unbind(td);
401 	td->td_pinned = 0;
402 	sched_bind(td, old_cpu);
403 
404 	ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler_preempt, NULL);
405 
406 	/* restore CPU binding, if any */
407 	if (was_bound != 0) {
408 		sched_bind(td, old_cpu);
409 	} else {
410 		/* get thread back to initial CPU, if any */
411 		if (old_pinned != 0)
412 			sched_bind(td, old_cpu);
413 		sched_unbind(td);
414 	}
415 	/* restore pinned after bind */
416 	td->td_pinned = old_pinned;
417 
418 	/* restore thread priority */
419 	sched_prio(td, old_prio);
420 	thread_unlock(td);
421 	PICKUP_GIANT();
422 	KASSERT(td->td_locks == locks,
423 	    ("%d residual locks held", td->td_locks - locks));
424 }
425 
426 static void
427 epoch_block_handler(struct ck_epoch *g __unused, ck_epoch_record_t *c __unused,
428     void *arg __unused)
429 {
430 	cpu_spinwait();
431 }
432 
433 void
434 epoch_wait(epoch_t epoch)
435 {
436 
437 	MPASS(cold || epoch != NULL);
438 	INIT_CHECK(epoch);
439 	MPASS(epoch->e_flags == 0);
440 	critical_enter();
441 	ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler, NULL);
442 	critical_exit();
443 }
444 
445 void
446 epoch_call(epoch_t epoch, epoch_context_t ctx, void (*callback) (epoch_context_t))
447 {
448 	epoch_record_t er;
449 	ck_epoch_entry_t *cb;
450 
451 	cb = (void *)ctx;
452 
453 	MPASS(callback);
454 	/* too early in boot to have epoch set up */
455 	if (__predict_false(epoch == NULL))
456 		goto boottime;
457 #if !defined(EARLY_AP_STARTUP)
458 	if (__predict_false(inited < 2))
459 		goto boottime;
460 #endif
461 
462 	critical_enter();
463 	*DPCPU_PTR(epoch_cb_count) += 1;
464 	er = epoch_currecord(epoch);
465 	ck_epoch_call(&er->er_write_record, cb, (ck_epoch_cb_t *)callback);
466 	critical_exit();
467 	return;
468 boottime:
469 	callback(ctx);
470 }
471 
472 static void
473 epoch_call_task(void *arg __unused)
474 {
475 	ck_stack_entry_t *cursor, *head, *next;
476 	ck_epoch_record_t *record;
477 	epoch_record_t er;
478 	epoch_t epoch;
479 	ck_stack_t cb_stack;
480 	int i, npending, total;
481 
482 	ck_stack_init(&cb_stack);
483 	critical_enter();
484 	epoch_enter(global_epoch);
485 	for (total = i = 0; i < epoch_count; i++) {
486 		if (__predict_false((epoch = allepochs[i]) == NULL))
487 			continue;
488 		er = epoch_currecord(epoch);
489 		record = &er->er_write_record;
490 		if ((npending = record->n_pending) == 0)
491 			continue;
492 		ck_epoch_poll_deferred(record, &cb_stack);
493 		total += npending - record->n_pending;
494 	}
495 	epoch_exit(global_epoch);
496 	*DPCPU_PTR(epoch_cb_count) -= total;
497 	critical_exit();
498 
499 	counter_u64_add(epoch_call_count, total);
500 	counter_u64_add(epoch_call_task_count, 1);
501 
502 	head = ck_stack_batch_pop_npsc(&cb_stack);
503 	for (cursor = head; cursor != NULL; cursor = next) {
504 		struct ck_epoch_entry *entry =
505 		ck_epoch_entry_container(cursor);
506 
507 		next = CK_STACK_NEXT(cursor);
508 		entry->function(entry);
509 	}
510 }
511 
512 int
513 in_epoch_verbose(epoch_t epoch, int dump_onfail)
514 {
515 	struct epoch_thread *tdwait;
516 	struct thread *td;
517 	epoch_record_t er;
518 
519 	td = curthread;
520 	if (td->td_epochnest == 0)
521 		return (0);
522 	if (__predict_false((epoch) == NULL))
523 		return (0);
524 	critical_enter();
525 	er = epoch_currecord(epoch);
526 	TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
527 		if (tdwait->et_td == td) {
528 			critical_exit();
529 			return (1);
530 		}
531 #ifdef INVARIANTS
532 	if (dump_onfail) {
533 		MPASS(td->td_pinned);
534 		printf("cpu: %d id: %d\n", curcpu, td->td_tid);
535 		TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
536 			printf("td_tid: %d ", tdwait->et_td->td_tid);
537 		printf("\n");
538 	}
539 #endif
540 	critical_exit();
541 	return (0);
542 }
543 
544 int
545 in_epoch(epoch_t epoch)
546 {
547 	return (in_epoch_verbose(epoch, 0));
548 }
549 
550 void
551 epoch_adjust_prio(struct thread *td, u_char prio)
552 {
553 	thread_lock(td);
554 	sched_prio(td, prio);
555 	thread_unlock(td);
556 }
557