xref: /freebsd/sys/kern/subr_epoch.c (revision 48d41ef0fb2a73cd29a75544e164a59829c29351)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2018, Matthew Macy <mmacy@freebsd.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/types.h>
34 #include <sys/systm.h>
35 #include <sys/counter.h>
36 #include <sys/epoch.h>
37 #include <sys/gtaskqueue.h>
38 #include <sys/kernel.h>
39 #include <sys/limits.h>
40 #include <sys/lock.h>
41 #include <sys/malloc.h>
42 #include <sys/mutex.h>
43 #include <sys/proc.h>
44 #include <sys/sched.h>
45 #include <sys/smp.h>
46 #include <sys/sysctl.h>
47 #include <sys/turnstile.h>
48 #include <vm/vm.h>
49 #include <vm/vm_extern.h>
50 #include <vm/vm_kern.h>
51 
52 #include <ck_epoch.h>
53 
54 static MALLOC_DEFINE(M_EPOCH, "epoch", "epoch based reclamation");
55 
56 /* arbitrary --- needs benchmarking */
57 #define MAX_ADAPTIVE_SPIN 5000
58 
59 #define EPOCH_EXITING 0x1
60 #ifdef __amd64__
61 #define EPOCH_ALIGN CACHE_LINE_SIZE*2
62 #else
63 #define EPOCH_ALIGN CACHE_LINE_SIZE
64 #endif
65 
66 SYSCTL_NODE(_kern, OID_AUTO, epoch, CTLFLAG_RW, 0, "epoch information");
67 SYSCTL_NODE(_kern_epoch, OID_AUTO, stats, CTLFLAG_RW, 0, "epoch stats");
68 
69 static int poll_intvl;
70 SYSCTL_INT(_kern_epoch, OID_AUTO, poll_intvl, CTLFLAG_RWTUN,
71 		   &poll_intvl, 0, "# of ticks to wait between garbage collecting deferred frees");
72 /* Stats. */
73 static counter_u64_t block_count;
74 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, nblocked, CTLFLAG_RW,
75 				   &block_count, "# of times a thread was in an epoch when epoch_wait was called");
76 static counter_u64_t migrate_count;
77 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, migrations, CTLFLAG_RW,
78 				   &migrate_count, "# of times thread was migrated to another CPU in epoch_wait");
79 static counter_u64_t turnstile_count;
80 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, ncontended, CTLFLAG_RW,
81 				   &turnstile_count, "# of times a thread was blocked on a lock in an epoch during an epoch_wait");
82 static counter_u64_t switch_count;
83 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, switches, CTLFLAG_RW,
84 				   &switch_count, "# of times a thread voluntarily context switched in epoch_wait");
85 
86 typedef struct epoch_cb {
87 	void (*ec_callback)(epoch_context_t);
88 	STAILQ_ENTRY(epoch_cb) ec_link;
89 } *epoch_cb_t;
90 
91 TAILQ_HEAD(threadlist, thread);
92 
93 typedef struct epoch_record {
94 	ck_epoch_record_t er_record;
95 	volatile struct threadlist er_tdlist;
96 	volatile uint32_t er_gen;
97 	uint32_t er_cpuid;
98 } *epoch_record_t;
99 
100 struct epoch_pcpu_state {
101 	struct epoch_record eps_record;
102 	STAILQ_HEAD(, epoch_cb) eps_cblist;
103 } __aligned(EPOCH_ALIGN);
104 
105 struct epoch {
106 	struct ck_epoch e_epoch __aligned(EPOCH_ALIGN);
107 	struct grouptask e_gtask;
108 	struct callout e_timer;
109 	struct mtx e_lock;
110 	int e_flags;
111 	/* make sure that immutable data doesn't overlap with the gtask, callout, and mutex*/
112 	struct epoch_pcpu_state *e_pcpu_dom[MAXMEMDOM] __aligned(EPOCH_ALIGN);
113 	counter_u64_t e_frees;
114 	uint64_t e_free_last;
115 	struct epoch_pcpu_state *e_pcpu[0];
116 };
117 
118 static __read_mostly int domcount[MAXMEMDOM];
119 static __read_mostly int domoffsets[MAXMEMDOM];
120 static __read_mostly int inited;
121 __read_mostly epoch_t global_epoch;
122 
123 static void epoch_call_task(void *context);
124 
125 #if defined(__powerpc64__) || defined(__powerpc__) || !defined(NUMA)
126 static bool usedomains = false;
127 #else
128 static bool usedomains = true;
129 #endif
130 static void
131 epoch_init(void *arg __unused)
132 {
133 	int domain, count;
134 
135 	if (poll_intvl == 0)
136 		poll_intvl = hz;
137 
138 	block_count = counter_u64_alloc(M_WAITOK);
139 	migrate_count = counter_u64_alloc(M_WAITOK);
140 	turnstile_count = counter_u64_alloc(M_WAITOK);
141 	switch_count = counter_u64_alloc(M_WAITOK);
142 	if (usedomains == false)
143 		goto done;
144 	count = domain = 0;
145 	domoffsets[0] = 0;
146 	for (domain = 0; domain < vm_ndomains; domain++) {
147 		domcount[domain] = CPU_COUNT(&cpuset_domain[domain]);
148 		if (bootverbose)
149 			printf("domcount[%d] %d\n", domain, domcount[domain]);
150 	}
151 	for (domain = 1; domain < vm_ndomains; domain++)
152 		domoffsets[domain] = domoffsets[domain-1] + domcount[domain-1];
153 
154 	for (domain = 0; domain < vm_ndomains; domain++) {
155 		if (domcount[domain] == 0) {
156 			usedomains = false;
157 			break;
158 		}
159 	}
160  done:
161 	inited = 1;
162 	global_epoch = epoch_alloc();
163 }
164 SYSINIT(epoch, SI_SUB_TASKQ + 1, SI_ORDER_FIRST, epoch_init, NULL);
165 
166 static void
167 epoch_init_numa(epoch_t epoch)
168 {
169 	int domain, cpu_offset;
170 	struct epoch_pcpu_state *eps;
171 	epoch_record_t er;
172 
173 	for (domain = 0; domain < vm_ndomains; domain++) {
174 		eps = malloc_domain(sizeof(*eps)*domcount[domain], M_EPOCH,
175 							domain, M_ZERO|M_WAITOK);
176 		epoch->e_pcpu_dom[domain] = eps;
177 		cpu_offset = domoffsets[domain];
178 		for (int i = 0; i < domcount[domain]; i++, eps++) {
179 			epoch->e_pcpu[cpu_offset + i] = eps;
180 			er = &eps->eps_record;
181 			STAILQ_INIT(&eps->eps_cblist);
182 			ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL);
183 			TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist);
184 			er->er_cpuid = cpu_offset + i;
185 		}
186 	}
187 }
188 
189 static void
190 epoch_init_legacy(epoch_t epoch)
191 {
192 	struct epoch_pcpu_state *eps;
193 	epoch_record_t er;
194 
195 	eps = malloc(sizeof(*eps)*mp_ncpus, M_EPOCH, M_ZERO|M_WAITOK);
196 	epoch->e_pcpu_dom[0] = eps;
197 	for (int i = 0; i < mp_ncpus; i++, eps++) {
198 		epoch->e_pcpu[i] = eps;
199 		er = &eps->eps_record;
200 		ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL);
201 		TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist);
202 		STAILQ_INIT(&eps->eps_cblist);
203 		er->er_cpuid = i;
204 	}
205 }
206 
207 static void
208 epoch_callout(void *arg)
209 {
210 	epoch_t epoch;
211 	uint64_t frees;
212 
213 	epoch = arg;
214 	frees = counter_u64_fetch(epoch->e_frees);
215 	/* pick some better value */
216 	if (frees - epoch->e_free_last > 10) {
217 		GROUPTASK_ENQUEUE(&epoch->e_gtask);
218 		epoch->e_free_last = frees;
219 	}
220 	if ((epoch->e_flags & EPOCH_EXITING) == 0)
221 		callout_reset(&epoch->e_timer, poll_intvl, epoch_callout, epoch);
222 }
223 
224 epoch_t
225 epoch_alloc(void)
226 {
227 	epoch_t epoch;
228 
229 	if (__predict_false(!inited))
230 		panic("%s called too early in boot", __func__);
231 	epoch = malloc(sizeof(struct epoch) + mp_ncpus*sizeof(void*),
232 				   M_EPOCH, M_ZERO|M_WAITOK);
233 	ck_epoch_init(&epoch->e_epoch);
234 	epoch->e_frees = counter_u64_alloc(M_WAITOK);
235 	mtx_init(&epoch->e_lock, "epoch callout", NULL, MTX_DEF);
236 	callout_init_mtx(&epoch->e_timer, &epoch->e_lock, 0);
237 	taskqgroup_config_gtask_init(epoch, &epoch->e_gtask, epoch_call_task, "epoch call task");
238 	if (usedomains)
239 		epoch_init_numa(epoch);
240 	else
241 		epoch_init_legacy(epoch);
242 	callout_reset(&epoch->e_timer, poll_intvl, epoch_callout, epoch);
243 	return (epoch);
244 }
245 
246 void
247 epoch_free(epoch_t epoch)
248 {
249 	int domain;
250 #ifdef INVARIANTS
251 	struct epoch_pcpu_state *eps;
252 	int cpu;
253 
254 	CPU_FOREACH(cpu) {
255 		eps = epoch->e_pcpu[cpu];
256 		MPASS(TAILQ_EMPTY(&eps->eps_record.er_tdlist));
257 	}
258 #endif
259 	mtx_lock(&epoch->e_lock);
260 	epoch->e_flags |= EPOCH_EXITING;
261 	mtx_unlock(&epoch->e_lock);
262 	/*
263 	 * Execute any lingering callbacks
264 	 */
265 	GROUPTASK_ENQUEUE(&epoch->e_gtask);
266 	gtaskqueue_drain(epoch->e_gtask.gt_taskqueue, &epoch->e_gtask.gt_task);
267 	callout_drain(&epoch->e_timer);
268 	mtx_destroy(&epoch->e_lock);
269 	counter_u64_free(epoch->e_frees);
270 	taskqgroup_config_gtask_deinit(&epoch->e_gtask);
271 	if (usedomains)
272 		for (domain = 0; domain < vm_ndomains; domain++)
273 			free_domain(epoch->e_pcpu_dom[domain], M_EPOCH);
274 	else
275 		free(epoch->e_pcpu_dom[0], M_EPOCH);
276 	free(epoch, M_EPOCH);
277 }
278 
279 #define INIT_CHECK(epoch)								\
280 	do {											\
281 		if (__predict_false((epoch) == NULL))		\
282 			return;									\
283 	} while (0)
284 
285 void
286 epoch_enter(epoch_t epoch)
287 {
288 	struct epoch_pcpu_state *eps;
289 	struct thread *td;
290 
291 	INIT_CHECK(epoch);
292 
293 	td = curthread;
294 	critical_enter();
295 	eps = epoch->e_pcpu[curcpu];
296 	td->td_epochnest++;
297 	MPASS(td->td_epochnest < UCHAR_MAX - 2);
298 	if (td->td_epochnest == 1)
299 		TAILQ_INSERT_TAIL(&eps->eps_record.er_tdlist, td, td_epochq);
300 #ifdef INVARIANTS
301 	if (td->td_epochnest > 1) {
302 		struct thread *curtd;
303 		int found = 0;
304 
305 		TAILQ_FOREACH(curtd, &eps->eps_record.er_tdlist, td_epochq)
306 			if (curtd == td)
307 				found = 1;
308 		KASSERT(found, ("recursing on a second epoch"));
309 	}
310 #endif
311 	sched_pin();
312 	ck_epoch_begin(&eps->eps_record.er_record, NULL);
313 	critical_exit();
314 }
315 
316 void
317 epoch_exit(epoch_t epoch)
318 {
319 	struct epoch_pcpu_state *eps;
320 	struct thread *td;
321 
322 	td = curthread;
323 	INIT_CHECK(epoch);
324 	MPASS(td->td_epochnest);
325 	critical_enter();
326 	eps = epoch->e_pcpu[curcpu];
327 	sched_unpin();
328 	ck_epoch_end(&eps->eps_record.er_record, NULL);
329 	td->td_epochnest--;
330 	if (td->td_epochnest == 0)
331 		TAILQ_REMOVE(&eps->eps_record.er_tdlist, td, td_epochq);
332 	eps->eps_record.er_gen++;
333 	critical_exit();
334 }
335 
336 /*
337  * epoch_block_handler is a callback from the ck code when another thread is
338  * currently in an epoch section.
339  */
340 static void
341 epoch_block_handler(struct ck_epoch *global __unused, ck_epoch_record_t *cr,
342 					void *arg __unused)
343 {
344 	epoch_record_t record;
345 	struct epoch_pcpu_state *eps;
346 	struct thread *td, *tdwait, *owner;
347 	struct turnstile *ts;
348 	struct lock_object *lock;
349 	int spincount, gen;
350 
351 	eps = arg;
352 	record = __containerof(cr, struct epoch_record, er_record);
353 	td = curthread;
354 	spincount = 0;
355 	counter_u64_add(block_count, 1);
356 	if (record->er_cpuid != curcpu) {
357 		/*
358 		 * If the head of the list is running, we can wait for it
359 		 * to remove itself from the list and thus save us the
360 		 * overhead of a migration
361 		 */
362 		if ((tdwait = TAILQ_FIRST(&record->er_tdlist)) != NULL &&
363 			TD_IS_RUNNING(tdwait)) {
364 			gen = record->er_gen;
365 			thread_unlock(td);
366 			do {
367 				cpu_spinwait();
368 			} while (tdwait == TAILQ_FIRST(&record->er_tdlist) &&
369 					 gen == record->er_gen && TD_IS_RUNNING(tdwait) &&
370 					 spincount++ < MAX_ADAPTIVE_SPIN);
371 			thread_lock(td);
372 			return;
373 		}
374 
375 		/*
376 		 * Being on the same CPU as that of the record on which
377 		 * we need to wait allows us access to the thread
378 		 * list associated with that CPU. We can then examine the
379 		 * oldest thread in the queue and wait on its turnstile
380 		 * until it resumes and so on until a grace period
381 		 * elapses.
382 		 *
383 		 */
384 		counter_u64_add(migrate_count, 1);
385 		sched_bind(td, record->er_cpuid);
386 		/*
387 		 * At this point we need to return to the ck code
388 		 * to scan to see if a grace period has elapsed.
389 		 * We can't move on to check the thread list, because
390 		 * in the meantime new threads may have arrived that
391 		 * in fact belong to a different epoch.
392 		 */
393 		return;
394 	}
395 	/*
396 	 * Try to find a thread in an epoch section on this CPU
397 	 * waiting on a turnstile. Otherwise find the lowest
398 	 * priority thread (highest prio value) and drop our priority
399 	 * to match to allow it to run.
400 	 */
401 	TAILQ_FOREACH(tdwait, &record->er_tdlist, td_epochq) {
402 		/*
403 		 * Propagate our priority to any other waiters to prevent us
404 		 * from starving them. They will have their original priority
405 		 * restore on exit from epoch_wait().
406 		 */
407 		if (!TD_IS_INHIBITED(tdwait) && tdwait->td_priority > td->td_priority) {
408 			thread_lock(tdwait);
409 			sched_prio(tdwait, td->td_priority);
410 			thread_unlock(tdwait);
411 		}
412 		if (TD_IS_INHIBITED(tdwait) && TD_ON_LOCK(tdwait) &&
413 			((ts = tdwait->td_blocked) != NULL)) {
414 			/*
415 			 * We unlock td to allow turnstile_wait to reacquire the
416 			 * the thread lock. Before unlocking it we enter a critical
417 			 * section to prevent preemption after we reenable interrupts
418 			 * by dropping the thread lock in order to prevent tdwait
419 			 * from getting to run.
420 			 */
421 			critical_enter();
422 			thread_unlock(td);
423 			owner = turnstile_lock(ts, &lock);
424 			/*
425 			 * The owner pointer indicates that the lock succeeded. Only
426 			 * in case we hold the lock and the turnstile we locked is still
427 			 * the one that tdwait is blocked on can we continue. Otherwise
428 			 * The turnstile pointer has been changed out from underneath
429 			 * us, as in the case where the lock holder has signalled tdwait,
430 			 * and we need to continue.
431 			 */
432 			if (owner != NULL && ts == tdwait->td_blocked) {
433 				MPASS(TD_IS_INHIBITED(tdwait) && TD_ON_LOCK(tdwait));
434 				critical_exit();
435 				turnstile_wait(ts, owner, tdwait->td_tsqueue);
436 				counter_u64_add(turnstile_count, 1);
437 				thread_lock(td);
438 				return;
439 			} else if (owner != NULL)
440 				turnstile_unlock(ts, lock);
441 			thread_lock(td);
442 			critical_exit();
443 			KASSERT(td->td_locks == 0,
444 					("%d locks held", td->td_locks));
445 		}
446 	}
447 	/*
448 	 * We didn't find any threads actually blocked on a lock
449 	 * so we have nothing to do except context switch away.
450 	 */
451 	counter_u64_add(switch_count, 1);
452 	mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
453 
454 	/*
455 	 * Release the thread lock while yielding to
456 	 * allow other threads to acquire the lock
457 	 * pointed to by TDQ_LOCKPTR(td). Else a
458 	 * deadlock like situation might happen. (HPS)
459 	 */
460 	thread_unlock(td);
461 	thread_lock(td);
462 }
463 
464 void
465 epoch_wait(epoch_t epoch)
466 {
467 	struct thread *td;
468 	int was_bound;
469 	int old_cpu;
470 	int old_pinned;
471 	u_char old_prio;
472 #ifdef INVARIANTS
473 	int locks;
474 
475 	locks = curthread->td_locks;
476 #endif
477 	INIT_CHECK(epoch);
478 
479 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
480 	    "epoch_wait() can sleep");
481 
482 	td = curthread;
483 	KASSERT(td->td_epochnest == 0, ("epoch_wait() in the middle of an epoch section"));
484 	thread_lock(td);
485 
486 	DROP_GIANT();
487 
488 	old_cpu = PCPU_GET(cpuid);
489 	old_pinned = td->td_pinned;
490 	old_prio = td->td_priority;
491 	was_bound = sched_is_bound(td);
492 	sched_unbind(td);
493 	td->td_pinned = 0;
494 	sched_bind(td, old_cpu);
495 
496 	ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler, NULL);
497 
498 	/* restore CPU binding, if any */
499 	if (was_bound != 0) {
500 		sched_bind(td, old_cpu);
501 	} else {
502 		/* get thread back to initial CPU, if any */
503 		if (old_pinned != 0)
504 			sched_bind(td, old_cpu);
505 		sched_unbind(td);
506 	}
507 	/* restore pinned after bind */
508 	td->td_pinned = old_pinned;
509 
510 	/* restore thread priority */
511 	sched_prio(td, old_prio);
512 	thread_unlock(td);
513 	PICKUP_GIANT();
514 	KASSERT(td->td_locks == locks,
515 			("%d residual locks held", td->td_locks - locks));
516 }
517 
518 void
519 epoch_call(epoch_t epoch, epoch_context_t ctx, void (*callback) (epoch_context_t))
520 {
521 	struct epoch_pcpu_state *eps;
522 	epoch_cb_t cb;
523 
524 	cb = (void *)ctx;
525 
526 	MPASS(callback);
527 	/* too early in boot to have epoch set up */
528 	if (__predict_false(epoch == NULL)) {
529 		callback(ctx);
530 		return;
531 	}
532 	MPASS(cb->ec_callback == NULL);
533 	MPASS(cb->ec_link.stqe_next == NULL);
534 	cb->ec_callback = callback;
535 	counter_u64_add(epoch->e_frees, 1);
536 
537 	critical_enter();
538 	eps = epoch->e_pcpu[curcpu];
539 	STAILQ_INSERT_HEAD(&eps->eps_cblist, cb, ec_link);
540 	critical_exit();
541 }
542 
543 static void
544 epoch_call_task(void *context)
545 {
546 	struct epoch_pcpu_state *eps;
547 	epoch_t epoch;
548 	epoch_cb_t cb;
549 	struct thread *td;
550 	int cpu;
551 	STAILQ_HEAD(, epoch_cb) tmp_head;
552 
553 	epoch = context;
554 	STAILQ_INIT(&tmp_head);
555 	td = curthread;
556 	thread_lock(td);
557 	CPU_FOREACH(cpu) {
558 		sched_bind(td, cpu);
559 		eps = epoch->e_pcpu[cpu];
560 		if (!STAILQ_EMPTY(&eps->eps_cblist))
561 			STAILQ_CONCAT(&tmp_head, &eps->eps_cblist);
562 	}
563 	sched_unbind(td);
564 	thread_unlock(td);
565 	epoch_wait(epoch);
566 
567 	while ((cb = STAILQ_FIRST(&tmp_head)) != NULL) {
568 		STAILQ_REMOVE_HEAD(&tmp_head, ec_link);
569 		cb->ec_callback((void*)cb);
570 	}
571 }
572 
573 int
574 in_epoch(void)
575 {
576 	return (curthread->td_epochnest != 0);
577 }
578