xref: /freebsd/sys/kern/subr_epoch.c (revision 2397aecf28352676c462122ead5ffe9b363b6cd0)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2018, Matthew Macy <mmacy@freebsd.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/types.h>
34 #include <sys/systm.h>
35 #include <sys/counter.h>
36 #include <sys/epoch.h>
37 #include <sys/gtaskqueue.h>
38 #include <sys/kernel.h>
39 #include <sys/limits.h>
40 #include <sys/lock.h>
41 #include <sys/malloc.h>
42 #include <sys/mutex.h>
43 #include <sys/proc.h>
44 #include <sys/sched.h>
45 #include <sys/smp.h>
46 #include <sys/sysctl.h>
47 #include <sys/turnstile.h>
48 #include <vm/vm.h>
49 #include <vm/vm_extern.h>
50 #include <vm/vm_kern.h>
51 
52 #include <ck_epoch.h>
53 
54 static MALLOC_DEFINE(M_EPOCH, "epoch", "epoch based reclamation");
55 
56 /* arbitrary --- needs benchmarking */
57 #define MAX_ADAPTIVE_SPIN 1000
58 
59 #define EPOCH_EXITING 0x1
60 #ifdef __amd64__
61 #define EPOCH_ALIGN CACHE_LINE_SIZE*2
62 #else
63 #define EPOCH_ALIGN CACHE_LINE_SIZE
64 #endif
65 
66 CTASSERT(sizeof(epoch_section_t) == sizeof(ck_epoch_section_t));
67 SYSCTL_NODE(_kern, OID_AUTO, epoch, CTLFLAG_RW, 0, "epoch information");
68 SYSCTL_NODE(_kern_epoch, OID_AUTO, stats, CTLFLAG_RW, 0, "epoch stats");
69 
70 static int poll_intvl;
71 SYSCTL_INT(_kern_epoch, OID_AUTO, poll_intvl, CTLFLAG_RWTUN,
72 		   &poll_intvl, 0, "# of ticks to wait between garbage collecting deferred frees");
73 /* Stats. */
74 static counter_u64_t block_count;
75 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, nblocked, CTLFLAG_RW,
76 				   &block_count, "# of times a thread was in an epoch when epoch_wait was called");
77 static counter_u64_t migrate_count;
78 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, migrations, CTLFLAG_RW,
79 				   &migrate_count, "# of times thread was migrated to another CPU in epoch_wait");
80 static counter_u64_t turnstile_count;
81 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, ncontended, CTLFLAG_RW,
82 				   &turnstile_count, "# of times a thread was blocked on a lock in an epoch during an epoch_wait");
83 static counter_u64_t switch_count;
84 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, switches, CTLFLAG_RW,
85 				   &switch_count, "# of times a thread voluntarily context switched in epoch_wait");
86 
87 typedef struct epoch_cb {
88 	void (*ec_callback)(epoch_context_t);
89 	STAILQ_ENTRY(epoch_cb) ec_link;
90 } *epoch_cb_t;
91 
92 TAILQ_HEAD(threadlist, thread);
93 
94 typedef struct epoch_record {
95 	ck_epoch_record_t er_record;
96 	volatile struct threadlist er_tdlist;
97 	volatile uint32_t er_gen;
98 	uint32_t er_cpuid;
99 } *epoch_record_t;
100 
101 struct epoch_pcpu_state {
102 	struct epoch_record eps_record;
103 	STAILQ_HEAD(, epoch_cb) eps_cblist;
104 } __aligned(EPOCH_ALIGN);
105 
106 struct epoch {
107 	struct ck_epoch e_epoch __aligned(EPOCH_ALIGN);
108 	struct grouptask e_gtask;
109 	struct callout e_timer;
110 	struct mtx e_lock;
111 	int e_flags;
112 	/* make sure that immutable data doesn't overlap with the gtask, callout, and mutex*/
113 	struct epoch_pcpu_state *e_pcpu_dom[MAXMEMDOM] __aligned(EPOCH_ALIGN);
114 	counter_u64_t e_frees;
115 	uint64_t e_free_last;
116 	struct epoch_pcpu_state *e_pcpu[0];
117 };
118 
119 static __read_mostly int domcount[MAXMEMDOM];
120 static __read_mostly int domoffsets[MAXMEMDOM];
121 static __read_mostly int inited;
122 __read_mostly epoch_t global_epoch;
123 
124 static void epoch_call_task(void *context);
125 
126 #if defined(__powerpc64__) || defined(__powerpc__) || !defined(NUMA)
127 static bool usedomains = false;
128 #else
129 static bool usedomains = true;
130 #endif
131 static void
132 epoch_init(void *arg __unused)
133 {
134 	int domain, count;
135 
136 	if (poll_intvl == 0)
137 		poll_intvl = hz;
138 
139 	block_count = counter_u64_alloc(M_WAITOK);
140 	migrate_count = counter_u64_alloc(M_WAITOK);
141 	turnstile_count = counter_u64_alloc(M_WAITOK);
142 	switch_count = counter_u64_alloc(M_WAITOK);
143 	if (usedomains == false)
144 		goto done;
145 	count = domain = 0;
146 	domoffsets[0] = 0;
147 	for (domain = 0; domain < vm_ndomains; domain++) {
148 		domcount[domain] = CPU_COUNT(&cpuset_domain[domain]);
149 		if (bootverbose)
150 			printf("domcount[%d] %d\n", domain, domcount[domain]);
151 	}
152 	for (domain = 1; domain < vm_ndomains; domain++)
153 		domoffsets[domain] = domoffsets[domain-1] + domcount[domain-1];
154 
155 	for (domain = 0; domain < vm_ndomains; domain++) {
156 		if (domcount[domain] == 0) {
157 			usedomains = false;
158 			break;
159 		}
160 	}
161  done:
162 	inited = 1;
163 	global_epoch = epoch_alloc();
164 }
165 SYSINIT(epoch, SI_SUB_TASKQ + 1, SI_ORDER_FIRST, epoch_init, NULL);
166 
167 static void
168 epoch_init_numa(epoch_t epoch)
169 {
170 	int domain, cpu_offset;
171 	struct epoch_pcpu_state *eps;
172 	epoch_record_t er;
173 
174 	for (domain = 0; domain < vm_ndomains; domain++) {
175 		eps = malloc_domain(sizeof(*eps)*domcount[domain], M_EPOCH,
176 							domain, M_ZERO|M_WAITOK);
177 		epoch->e_pcpu_dom[domain] = eps;
178 		cpu_offset = domoffsets[domain];
179 		for (int i = 0; i < domcount[domain]; i++, eps++) {
180 			epoch->e_pcpu[cpu_offset + i] = eps;
181 			er = &eps->eps_record;
182 			STAILQ_INIT(&eps->eps_cblist);
183 			ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL);
184 			TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist);
185 			er->er_cpuid = cpu_offset + i;
186 		}
187 	}
188 }
189 
190 static void
191 epoch_init_legacy(epoch_t epoch)
192 {
193 	struct epoch_pcpu_state *eps;
194 	epoch_record_t er;
195 
196 	eps = malloc(sizeof(*eps)*mp_ncpus, M_EPOCH, M_ZERO|M_WAITOK);
197 	epoch->e_pcpu_dom[0] = eps;
198 	for (int i = 0; i < mp_ncpus; i++, eps++) {
199 		epoch->e_pcpu[i] = eps;
200 		er = &eps->eps_record;
201 		ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL);
202 		TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist);
203 		STAILQ_INIT(&eps->eps_cblist);
204 		er->er_cpuid = i;
205 	}
206 }
207 
208 static void
209 epoch_callout(void *arg)
210 {
211 	epoch_t epoch;
212 	uint64_t frees;
213 
214 	epoch = arg;
215 	frees = counter_u64_fetch(epoch->e_frees);
216 	/* pick some better value */
217 	if (frees - epoch->e_free_last > 10) {
218 		GROUPTASK_ENQUEUE(&epoch->e_gtask);
219 		epoch->e_free_last = frees;
220 	}
221 	if ((epoch->e_flags & EPOCH_EXITING) == 0)
222 		callout_reset(&epoch->e_timer, poll_intvl, epoch_callout, epoch);
223 }
224 
225 epoch_t
226 epoch_alloc(void)
227 {
228 	epoch_t epoch;
229 
230 	if (__predict_false(!inited))
231 		panic("%s called too early in boot", __func__);
232 	epoch = malloc(sizeof(struct epoch) + mp_ncpus*sizeof(void*),
233 				   M_EPOCH, M_ZERO|M_WAITOK);
234 	ck_epoch_init(&epoch->e_epoch);
235 	epoch->e_frees = counter_u64_alloc(M_WAITOK);
236 	mtx_init(&epoch->e_lock, "epoch callout", NULL, MTX_DEF);
237 	callout_init_mtx(&epoch->e_timer, &epoch->e_lock, 0);
238 	taskqgroup_config_gtask_init(epoch, &epoch->e_gtask, epoch_call_task, "epoch call task");
239 	if (usedomains)
240 		epoch_init_numa(epoch);
241 	else
242 		epoch_init_legacy(epoch);
243 	callout_reset(&epoch->e_timer, poll_intvl, epoch_callout, epoch);
244 	return (epoch);
245 }
246 
247 void
248 epoch_free(epoch_t epoch)
249 {
250 	int domain;
251 #ifdef INVARIANTS
252 	struct epoch_pcpu_state *eps;
253 	int cpu;
254 
255 	CPU_FOREACH(cpu) {
256 		eps = epoch->e_pcpu[cpu];
257 		MPASS(TAILQ_EMPTY(&eps->eps_record.er_tdlist));
258 	}
259 #endif
260 	mtx_lock(&epoch->e_lock);
261 	epoch->e_flags |= EPOCH_EXITING;
262 	mtx_unlock(&epoch->e_lock);
263 	/*
264 	 * Execute any lingering callbacks
265 	 */
266 	GROUPTASK_ENQUEUE(&epoch->e_gtask);
267 	gtaskqueue_drain(epoch->e_gtask.gt_taskqueue, &epoch->e_gtask.gt_task);
268 	callout_drain(&epoch->e_timer);
269 	mtx_destroy(&epoch->e_lock);
270 	counter_u64_free(epoch->e_frees);
271 	taskqgroup_config_gtask_deinit(&epoch->e_gtask);
272 	if (usedomains)
273 		for (domain = 0; domain < vm_ndomains; domain++)
274 			free_domain(epoch->e_pcpu_dom[domain], M_EPOCH);
275 	else
276 		free(epoch->e_pcpu_dom[0], M_EPOCH);
277 	free(epoch, M_EPOCH);
278 }
279 
280 #define INIT_CHECK(epoch)								\
281 	do {											\
282 		if (__predict_false((epoch) == NULL))		\
283 			return;									\
284 	} while (0)
285 
286 void
287 epoch_enter_internal(epoch_t epoch, struct thread *td)
288 {
289 	struct epoch_pcpu_state *eps;
290 
291 	INIT_CHECK(epoch);
292 	critical_enter();
293 	eps = epoch->e_pcpu[curcpu];
294 #ifdef INVARIANTS
295 	MPASS(td->td_epochnest < UCHAR_MAX - 2);
296 	if (td->td_epochnest > 1) {
297 		struct thread *curtd;
298 		int found = 0;
299 
300 		TAILQ_FOREACH(curtd, &eps->eps_record.er_tdlist, td_epochq)
301 			if (curtd == td)
302 				found = 1;
303 		KASSERT(found, ("recursing on a second epoch"));
304 		critical_exit();
305 		return;
306 	}
307 #endif
308 	TAILQ_INSERT_TAIL(&eps->eps_record.er_tdlist, td, td_epochq);
309 	sched_pin();
310 	ck_epoch_begin(&eps->eps_record.er_record, (ck_epoch_section_t*)&td->td_epoch_section);
311 	critical_exit();
312 }
313 
314 void
315 epoch_exit_internal(epoch_t epoch, struct thread *td)
316 {
317 	struct epoch_pcpu_state *eps;
318 
319 	td = curthread;
320 	MPASS(td->td_epochnest == 0);
321 	INIT_CHECK(epoch);
322 	critical_enter();
323 	eps = epoch->e_pcpu[curcpu];
324 
325 	ck_epoch_end(&eps->eps_record.er_record, (ck_epoch_section_t*)&td->td_epoch_section);
326 	TAILQ_REMOVE(&eps->eps_record.er_tdlist, td, td_epochq);
327 	eps->eps_record.er_gen++;
328 	sched_unpin();
329 	critical_exit();
330 }
331 
332 /*
333  * epoch_block_handler is a callback from the ck code when another thread is
334  * currently in an epoch section.
335  */
336 static void
337 epoch_block_handler(struct ck_epoch *global __unused, ck_epoch_record_t *cr,
338 					void *arg __unused)
339 {
340 	epoch_record_t record;
341 	struct epoch_pcpu_state *eps;
342 	struct thread *td, *tdwait, *owner;
343 	struct turnstile *ts;
344 	struct lock_object *lock;
345 	int spincount, gen;
346 
347 	eps = arg;
348 	record = __containerof(cr, struct epoch_record, er_record);
349 	td = curthread;
350 	spincount = 0;
351 	counter_u64_add(block_count, 1);
352 	if (record->er_cpuid != curcpu) {
353 		/*
354 		 * If the head of the list is running, we can wait for it
355 		 * to remove itself from the list and thus save us the
356 		 * overhead of a migration
357 		 */
358 		if ((tdwait = TAILQ_FIRST(&record->er_tdlist)) != NULL &&
359 			TD_IS_RUNNING(tdwait)) {
360 			gen = record->er_gen;
361 			thread_unlock(td);
362 			do {
363 				cpu_spinwait();
364 			} while (tdwait == TAILQ_FIRST(&record->er_tdlist) &&
365 					 gen == record->er_gen && TD_IS_RUNNING(tdwait) &&
366 					 spincount++ < MAX_ADAPTIVE_SPIN);
367 			thread_lock(td);
368 			return;
369 		}
370 
371 		/*
372 		 * Being on the same CPU as that of the record on which
373 		 * we need to wait allows us access to the thread
374 		 * list associated with that CPU. We can then examine the
375 		 * oldest thread in the queue and wait on its turnstile
376 		 * until it resumes and so on until a grace period
377 		 * elapses.
378 		 *
379 		 */
380 		counter_u64_add(migrate_count, 1);
381 		sched_bind(td, record->er_cpuid);
382 		/*
383 		 * At this point we need to return to the ck code
384 		 * to scan to see if a grace period has elapsed.
385 		 * We can't move on to check the thread list, because
386 		 * in the meantime new threads may have arrived that
387 		 * in fact belong to a different epoch.
388 		 */
389 		return;
390 	}
391 	/*
392 	 * Try to find a thread in an epoch section on this CPU
393 	 * waiting on a turnstile. Otherwise find the lowest
394 	 * priority thread (highest prio value) and drop our priority
395 	 * to match to allow it to run.
396 	 */
397 	TAILQ_FOREACH(tdwait, &record->er_tdlist, td_epochq) {
398 		/*
399 		 * Propagate our priority to any other waiters to prevent us
400 		 * from starving them. They will have their original priority
401 		 * restore on exit from epoch_wait().
402 		 */
403 		if (!TD_IS_INHIBITED(tdwait) && tdwait->td_priority > td->td_priority) {
404 			thread_lock(tdwait);
405 			sched_prio(tdwait, td->td_priority);
406 			thread_unlock(tdwait);
407 		}
408 		if (TD_IS_INHIBITED(tdwait) && TD_ON_LOCK(tdwait) &&
409 			((ts = tdwait->td_blocked) != NULL)) {
410 			/*
411 			 * We unlock td to allow turnstile_wait to reacquire the
412 			 * the thread lock. Before unlocking it we enter a critical
413 			 * section to prevent preemption after we reenable interrupts
414 			 * by dropping the thread lock in order to prevent tdwait
415 			 * from getting to run.
416 			 */
417 			critical_enter();
418 			thread_unlock(td);
419 			owner = turnstile_lock(ts, &lock);
420 			/*
421 			 * The owner pointer indicates that the lock succeeded. Only
422 			 * in case we hold the lock and the turnstile we locked is still
423 			 * the one that tdwait is blocked on can we continue. Otherwise
424 			 * The turnstile pointer has been changed out from underneath
425 			 * us, as in the case where the lock holder has signalled tdwait,
426 			 * and we need to continue.
427 			 */
428 			if (owner != NULL && ts == tdwait->td_blocked) {
429 				MPASS(TD_IS_INHIBITED(tdwait) && TD_ON_LOCK(tdwait));
430 				critical_exit();
431 				turnstile_wait(ts, owner, tdwait->td_tsqueue);
432 				counter_u64_add(turnstile_count, 1);
433 				thread_lock(td);
434 				return;
435 			} else if (owner != NULL)
436 				turnstile_unlock(ts, lock);
437 			thread_lock(td);
438 			critical_exit();
439 			KASSERT(td->td_locks == 0,
440 					("%d locks held", td->td_locks));
441 		}
442 	}
443 	/*
444 	 * We didn't find any threads actually blocked on a lock
445 	 * so we have nothing to do except context switch away.
446 	 */
447 	counter_u64_add(switch_count, 1);
448 	mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
449 
450 	/*
451 	 * Release the thread lock while yielding to
452 	 * allow other threads to acquire the lock
453 	 * pointed to by TDQ_LOCKPTR(td). Else a
454 	 * deadlock like situation might happen. (HPS)
455 	 */
456 	thread_unlock(td);
457 	thread_lock(td);
458 }
459 
460 void
461 epoch_wait(epoch_t epoch)
462 {
463 	struct thread *td;
464 	int was_bound;
465 	int old_cpu;
466 	int old_pinned;
467 	u_char old_prio;
468 #ifdef INVARIANTS
469 	int locks;
470 
471 	locks = curthread->td_locks;
472 #endif
473 	INIT_CHECK(epoch);
474 
475 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
476 	    "epoch_wait() can sleep");
477 
478 	td = curthread;
479 	KASSERT(td->td_epochnest == 0, ("epoch_wait() in the middle of an epoch section"));
480 	thread_lock(td);
481 
482 	DROP_GIANT();
483 
484 	old_cpu = PCPU_GET(cpuid);
485 	old_pinned = td->td_pinned;
486 	old_prio = td->td_priority;
487 	was_bound = sched_is_bound(td);
488 	sched_unbind(td);
489 	td->td_pinned = 0;
490 	sched_bind(td, old_cpu);
491 
492 	ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler, NULL);
493 
494 	/* restore CPU binding, if any */
495 	if (was_bound != 0) {
496 		sched_bind(td, old_cpu);
497 	} else {
498 		/* get thread back to initial CPU, if any */
499 		if (old_pinned != 0)
500 			sched_bind(td, old_cpu);
501 		sched_unbind(td);
502 	}
503 	/* restore pinned after bind */
504 	td->td_pinned = old_pinned;
505 
506 	/* restore thread priority */
507 	sched_prio(td, old_prio);
508 	thread_unlock(td);
509 	PICKUP_GIANT();
510 	KASSERT(td->td_locks == locks,
511 			("%d residual locks held", td->td_locks - locks));
512 }
513 
514 void
515 epoch_call(epoch_t epoch, epoch_context_t ctx, void (*callback) (epoch_context_t))
516 {
517 	struct epoch_pcpu_state *eps;
518 	epoch_cb_t cb;
519 
520 	cb = (void *)ctx;
521 
522 	MPASS(callback);
523 	/* too early in boot to have epoch set up */
524 	if (__predict_false(epoch == NULL)) {
525 		callback(ctx);
526 		return;
527 	}
528 	MPASS(cb->ec_callback == NULL);
529 	MPASS(cb->ec_link.stqe_next == NULL);
530 	cb->ec_callback = callback;
531 	counter_u64_add(epoch->e_frees, 1);
532 
533 	critical_enter();
534 	eps = epoch->e_pcpu[curcpu];
535 	STAILQ_INSERT_HEAD(&eps->eps_cblist, cb, ec_link);
536 	critical_exit();
537 }
538 
539 static void
540 epoch_call_task(void *context)
541 {
542 	struct epoch_pcpu_state *eps;
543 	epoch_t epoch;
544 	epoch_cb_t cb;
545 	struct thread *td;
546 	int cpu;
547 	STAILQ_HEAD(, epoch_cb) tmp_head;
548 
549 	epoch = context;
550 	STAILQ_INIT(&tmp_head);
551 	td = curthread;
552 	thread_lock(td);
553 	CPU_FOREACH(cpu) {
554 		sched_bind(td, cpu);
555 		eps = epoch->e_pcpu[cpu];
556 		if (!STAILQ_EMPTY(&eps->eps_cblist))
557 			STAILQ_CONCAT(&tmp_head, &eps->eps_cblist);
558 	}
559 	sched_unbind(td);
560 	thread_unlock(td);
561 	epoch_wait(epoch);
562 
563 	while ((cb = STAILQ_FIRST(&tmp_head)) != NULL) {
564 		STAILQ_REMOVE_HEAD(&tmp_head, ec_link);
565 		cb->ec_callback((void*)cb);
566 	}
567 }
568 
569 int
570 in_epoch(void)
571 {
572 	return (curthread->td_epochnest != 0);
573 }
574