xref: /freebsd/sys/kern/subr_epoch.c (revision 4a84c26cfc241ffa113d2e815d61d4b406b937e9)
1 /*-
2  * Copyright (c) 2018, Matthew Macy <mmacy@freebsd.org>
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  *  1. Redistributions of source code must retain the above copyright notice,
8  *     this list of conditions and the following disclaimer.
9  *
10  *  2. Neither the name of Matthew Macy nor the names of its
11  *     contributors may be used to endorse or promote products derived from
12  *     this software without specific prior written permission.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
18  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
20  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
24  * POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/types.h>
32 #include <sys/systm.h>
33 #include <sys/counter.h>
34 #include <sys/epoch.h>
35 #include <sys/gtaskqueue.h>
36 #include <sys/kernel.h>
37 #include <sys/limits.h>
38 #include <sys/lock.h>
39 #include <sys/malloc.h>
40 #include <sys/mutex.h>
41 #include <sys/proc.h>
42 #include <sys/sched.h>
43 #include <sys/smp.h>
44 #include <sys/sysctl.h>
45 #include <sys/turnstile.h>
46 #include <vm/vm.h>
47 #include <vm/vm_extern.h>
48 #include <vm/vm_kern.h>
49 
50 #include <ck_epoch.h>
51 
52 MALLOC_DEFINE(M_EPOCH, "epoch", "epoch based reclamation");
53 
54 /* arbitrary --- needs benchmarking */
55 #define MAX_ADAPTIVE_SPIN 5000
56 
57 #define EPOCH_EXITING 0x1
58 #ifdef __amd64__
59 #define EPOCH_ALIGN CACHE_LINE_SIZE*2
60 #else
61 #define EPOCH_ALIGN CACHE_LINE_SIZE
62 #endif
63 
64 SYSCTL_NODE(_kern, OID_AUTO, epoch, CTLFLAG_RW, 0, "epoch information");
65 SYSCTL_NODE(_kern_epoch, OID_AUTO, stats, CTLFLAG_RW, 0, "epoch stats");
66 
67 static int poll_intvl;
68 SYSCTL_INT(_kern_epoch, OID_AUTO, poll_intvl, CTLFLAG_RWTUN,
69 		   &poll_intvl, 0, "# of ticks to wait between garbage collecting deferred frees");
70 /* Stats. */
71 static counter_u64_t block_count;
72 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, nblocked, CTLFLAG_RW,
73 				   &block_count, "# of times a thread was in an epoch when epoch_wait was called");
74 static counter_u64_t migrate_count;
75 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, migrations, CTLFLAG_RW,
76 				   &migrate_count, "# of times thread was migrated to another CPU in epoch_wait");
77 static counter_u64_t turnstile_count;
78 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, ncontended, CTLFLAG_RW,
79 				   &turnstile_count, "# of times a thread was blocked on a lock in an epoch during an epoch_wait");
80 static counter_u64_t switch_count;
81 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, switches, CTLFLAG_RW,
82 				   &switch_count, "# of times a thread voluntarily context switched in epoch_wait");
83 
84 typedef struct epoch_cb {
85 	void (*ec_callback)(epoch_context_t);
86 	STAILQ_ENTRY(epoch_cb) ec_link;
87 } *epoch_cb_t;
88 
89 TAILQ_HEAD(threadlist, thread);
90 
91 typedef struct epoch_record {
92 	ck_epoch_record_t er_record;
93 	volatile struct threadlist er_tdlist;
94 	volatile uint32_t er_gen;
95 	uint32_t er_cpuid;
96 } *epoch_record_t;
97 
98 struct epoch_pcpu_state {
99 	struct epoch_record eps_record;
100 	STAILQ_HEAD(, epoch_cb) eps_cblist;
101 } __aligned(EPOCH_ALIGN);
102 
103 struct epoch {
104 	struct ck_epoch e_epoch __aligned(EPOCH_ALIGN);
105 	struct grouptask e_gtask;
106 	struct callout e_timer;
107 	struct mtx e_lock;
108 	int e_flags;
109 	/* make sure that immutable data doesn't overlap with the gtask, callout, and mutex*/
110 	struct epoch_pcpu_state *e_pcpu_dom[MAXMEMDOM] __aligned(EPOCH_ALIGN);
111 	counter_u64_t e_frees;
112 	uint64_t e_free_last;
113 	struct epoch_pcpu_state *e_pcpu[0];
114 };
115 
116 static __read_mostly int domcount[MAXMEMDOM];
117 static __read_mostly int domoffsets[MAXMEMDOM];
118 static __read_mostly int inited;
119 
120 static void epoch_call_task(void *context);
121 
122 #if defined(__powerpc64__) || defined(__powerpc__)
123 static bool usedomains = false;
124 #else
125 static bool usedomains = true;
126 #endif
127 static void
128 epoch_init(void *arg __unused)
129 {
130 	int domain, count;
131 
132 	if (poll_intvl == 0)
133 		poll_intvl = hz;
134 
135 	block_count = counter_u64_alloc(M_WAITOK);
136 	migrate_count = counter_u64_alloc(M_WAITOK);
137 	turnstile_count = counter_u64_alloc(M_WAITOK);
138 	switch_count = counter_u64_alloc(M_WAITOK);
139 	if (usedomains == false) {
140 		inited = 1;
141 		return;
142 	}
143 	count = domain = 0;
144 	domoffsets[0] = 0;
145 	for (domain = 0; domain < vm_ndomains; domain++) {
146 		domcount[domain] = CPU_COUNT(&cpuset_domain[domain]);
147 		if (bootverbose)
148 			printf("domcount[%d] %d\n", domain, domcount[domain]);
149 	}
150 	for (domain = 1; domain < vm_ndomains; domain++)
151 		domoffsets[domain] = domoffsets[domain-1] + domcount[domain-1];
152 
153 	for (domain = 0; domain < vm_ndomains; domain++) {
154 		if (domcount[domain] == 0) {
155 			usedomains = false;
156 			break;
157 		}
158 	}
159 	inited = 1;
160 }
161 SYSINIT(epoch, SI_SUB_CPU + 1, SI_ORDER_FIRST, epoch_init, NULL);
162 
163 static void
164 epoch_init_numa(epoch_t epoch)
165 {
166 	int domain, cpu_offset;
167 	struct epoch_pcpu_state *eps;
168 	epoch_record_t er;
169 
170 	for (domain = 0; domain < vm_ndomains; domain++) {
171 		eps = malloc_domain(sizeof(*eps)*domcount[domain], M_EPOCH,
172 							domain, M_ZERO|M_WAITOK);
173 		epoch->e_pcpu_dom[domain] = eps;
174 		cpu_offset = domoffsets[domain];
175 		for (int i = 0; i < domcount[domain]; i++, eps++) {
176 			epoch->e_pcpu[cpu_offset + i] = eps;
177 			er = &eps->eps_record;
178 			STAILQ_INIT(&eps->eps_cblist);
179 			ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL);
180 			TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist);
181 			er->er_cpuid = cpu_offset + i;
182 		}
183 	}
184 }
185 
186 static void
187 epoch_init_legacy(epoch_t epoch)
188 {
189 	struct epoch_pcpu_state *eps;
190 	epoch_record_t er;
191 
192 	eps = malloc(sizeof(*eps)*mp_ncpus, M_EPOCH, M_ZERO|M_WAITOK);
193 	epoch->e_pcpu_dom[0] = eps;
194 	for (int i = 0; i < mp_ncpus; i++, eps++) {
195 		epoch->e_pcpu[i] = eps;
196 		er = &eps->eps_record;
197 		ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL);
198 		TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist);
199 		STAILQ_INIT(&eps->eps_cblist);
200 		er->er_cpuid = i;
201 	}
202 }
203 
204 static void
205 epoch_callout(void *arg)
206 {
207 	epoch_t epoch;
208 	uint64_t frees;
209 
210 	epoch = arg;
211 	frees = counter_u64_fetch(epoch->e_frees);
212 	/* pick some better value */
213 	if (frees - epoch->e_free_last > 10) {
214 		GROUPTASK_ENQUEUE(&epoch->e_gtask);
215 		epoch->e_free_last = frees;
216 	}
217 	if ((epoch->e_flags & EPOCH_EXITING) == 0)
218 		callout_reset(&epoch->e_timer, poll_intvl, epoch_callout, epoch);
219 }
220 
221 epoch_t
222 epoch_alloc(void)
223 {
224 	epoch_t epoch;
225 
226 	if (__predict_false(!inited))
227 		panic("%s called too early in boot", __func__);
228 	epoch = malloc(sizeof(struct epoch) + mp_ncpus*sizeof(void*),
229 				   M_EPOCH, M_ZERO|M_WAITOK);
230 	ck_epoch_init(&epoch->e_epoch);
231 	epoch->e_frees = counter_u64_alloc(M_WAITOK);
232 	mtx_init(&epoch->e_lock, "epoch callout", NULL, MTX_DEF);
233 	callout_init_mtx(&epoch->e_timer, &epoch->e_lock, 0);
234 	taskqgroup_config_gtask_init(epoch, &epoch->e_gtask, epoch_call_task, "epoch call task");
235 	if (usedomains)
236 		epoch_init_numa(epoch);
237 	else
238 		epoch_init_legacy(epoch);
239 	callout_reset(&epoch->e_timer, poll_intvl, epoch_callout, epoch);
240 	return (epoch);
241 }
242 
243 void
244 epoch_free(epoch_t epoch)
245 {
246 	int domain;
247 #ifdef INVARIANTS
248 	struct epoch_pcpu_state *eps;
249 	int cpu;
250 
251 	CPU_FOREACH(cpu) {
252 		eps = epoch->e_pcpu[cpu];
253 		MPASS(TAILQ_EMPTY(&eps->eps_record.er_tdlist));
254 	}
255 #endif
256 	mtx_lock(&epoch->e_lock);
257 	epoch->e_flags |= EPOCH_EXITING;
258 	mtx_unlock(&epoch->e_lock);
259 	/*
260 	 * Execute any lingering callbacks
261 	 */
262 	GROUPTASK_ENQUEUE(&epoch->e_gtask);
263 	gtaskqueue_drain(epoch->e_gtask.gt_taskqueue, &epoch->e_gtask.gt_task);
264 	callout_drain(&epoch->e_timer);
265 	mtx_destroy(&epoch->e_lock);
266 	counter_u64_free(epoch->e_frees);
267 	taskqgroup_config_gtask_deinit(&epoch->e_gtask);
268 	if (usedomains)
269 		for (domain = 0; domain < vm_ndomains; domain++)
270 			free_domain(epoch->e_pcpu_dom[domain], M_EPOCH);
271 	else
272 		free(epoch->e_pcpu_dom[0], M_EPOCH);
273 	free(epoch, M_EPOCH);
274 }
275 
276 #define INIT_CHECK(epoch)								\
277 	do {											\
278 		if (__predict_false((epoch) == NULL))		\
279 			return;									\
280 	} while (0)
281 
282 void
283 epoch_enter(epoch_t epoch)
284 {
285 	struct epoch_pcpu_state *eps;
286 	struct thread *td;
287 
288 	INIT_CHECK(epoch);
289 
290 	td = curthread;
291 	critical_enter();
292 	eps = epoch->e_pcpu[curcpu];
293 	td->td_epochnest++;
294 	MPASS(td->td_epochnest < UCHAR_MAX - 2);
295 	if (td->td_epochnest == 1)
296 		TAILQ_INSERT_TAIL(&eps->eps_record.er_tdlist, td, td_epochq);
297 #ifdef INVARIANTS
298 	if (td->td_epochnest > 1) {
299 		struct thread *curtd;
300 		int found = 0;
301 
302 		TAILQ_FOREACH(curtd, &eps->eps_record.er_tdlist, td_epochq)
303 			if (curtd == td)
304 				found = 1;
305 		KASSERT(found, ("recursing on a second epoch"));
306 	}
307 #endif
308 	sched_pin();
309 	ck_epoch_begin(&eps->eps_record.er_record, NULL);
310 	critical_exit();
311 }
312 
313 void
314 epoch_enter_nopreempt(epoch_t epoch)
315 {
316 	struct epoch_pcpu_state *eps;
317 
318 	INIT_CHECK(epoch);
319 	critical_enter();
320 	eps = epoch->e_pcpu[curcpu];
321 	curthread->td_epochnest++;
322 	MPASS(curthread->td_epochnest < UCHAR_MAX - 2);
323 	ck_epoch_begin(&eps->eps_record.er_record, NULL);
324 }
325 
326 void
327 epoch_exit(epoch_t epoch)
328 {
329 	struct epoch_pcpu_state *eps;
330 	struct thread *td;
331 
332 	td = curthread;
333 	INIT_CHECK(epoch);
334 	critical_enter();
335 	eps = epoch->e_pcpu[curcpu];
336 	sched_unpin();
337 	ck_epoch_end(&eps->eps_record.er_record, NULL);
338 	td->td_epochnest--;
339 	if (td->td_epochnest == 0)
340 		TAILQ_REMOVE(&eps->eps_record.er_tdlist, td, td_epochq);
341 	eps->eps_record.er_gen++;
342 	critical_exit();
343 }
344 
345 void
346 epoch_exit_nopreempt(epoch_t epoch)
347 {
348 	struct epoch_pcpu_state *eps;
349 
350 	INIT_CHECK(epoch);
351 	MPASS(curthread->td_critnest);
352 	eps = epoch->e_pcpu[curcpu];
353 	ck_epoch_end(&eps->eps_record.er_record, NULL);
354 	curthread->td_epochnest--;
355 	critical_exit();
356 }
357 
358 /*
359  * epoch_block_handler is a callback from the ck code when another thread is
360  * currently in an epoch section.
361  */
362 static void
363 epoch_block_handler(struct ck_epoch *global __unused, ck_epoch_record_t *cr,
364 					void *arg __unused)
365 {
366 	epoch_record_t record;
367 	struct epoch_pcpu_state *eps;
368 	struct thread *td, *tdwait, *owner;
369 	struct turnstile *ts;
370 	struct lock_object *lock;
371 	int spincount, gen;
372 
373 	eps = arg;
374 	record = __containerof(cr, struct epoch_record, er_record);
375 	td = curthread;
376 	spincount = 0;
377 	counter_u64_add(block_count, 1);
378 	if (record->er_cpuid != curcpu) {
379 		/*
380 		 * If the head of the list is running, we can wait for it
381 		 * to remove itself from the list and thus save us the
382 		 * overhead of a migration
383 		 */
384 		if ((tdwait = TAILQ_FIRST(&record->er_tdlist)) != NULL &&
385 			TD_IS_RUNNING(tdwait)) {
386 			gen = record->er_gen;
387 			thread_unlock(td);
388 			do {
389 				cpu_spinwait();
390 			} while (tdwait == TAILQ_FIRST(&record->er_tdlist) &&
391 					 gen == record->er_gen && TD_IS_RUNNING(tdwait) &&
392 					 spincount++ < MAX_ADAPTIVE_SPIN);
393 			thread_lock(td);
394 			return;
395 		}
396 
397 		/*
398 		 * Being on the same CPU as that of the record on which
399 		 * we need to wait allows us access to the thread
400 		 * list associated with that CPU. We can then examine the
401 		 * oldest thread in the queue and wait on its turnstile
402 		 * until it resumes and so on until a grace period
403 		 * elapses.
404 		 *
405 		 */
406 		counter_u64_add(migrate_count, 1);
407 		sched_bind(td, record->er_cpuid);
408 		/*
409 		 * At this point we need to return to the ck code
410 		 * to scan to see if a grace period has elapsed.
411 		 * We can't move on to check the thread list, because
412 		 * in the meantime new threads may have arrived that
413 		 * in fact belong to a different epoch.
414 		 */
415 		return;
416 	}
417 	/*
418 	 * Try to find a thread in an epoch section on this CPU
419 	 * waiting on a turnstile. Otherwise find the lowest
420 	 * priority thread (highest prio value) and drop our priority
421 	 * to match to allow it to run.
422 	 */
423 	TAILQ_FOREACH(tdwait, &record->er_tdlist, td_epochq) {
424 		/*
425 		 * Propagate our priority to any other waiters to prevent us
426 		 * from starving them. They will have their original priority
427 		 * restore on exit from epoch_wait().
428 		 */
429 		if (!TD_IS_INHIBITED(tdwait) && tdwait->td_priority > td->td_priority) {
430 			thread_lock(tdwait);
431 			sched_prio(tdwait, td->td_priority);
432 			thread_unlock(tdwait);
433 		}
434 		if (TD_IS_INHIBITED(tdwait) && TD_ON_LOCK(tdwait) &&
435 			((ts = tdwait->td_blocked) != NULL)) {
436 			/*
437 			 * We unlock td to allow turnstile_wait to reacquire the
438 			 * the thread lock. Before unlocking it we enter a critical
439 			 * section to prevent preemption after we reenable interrupts
440 			 * by dropping the thread lock in order to prevent tdwait
441 			 * from getting to run.
442 			 */
443 			critical_enter();
444 			thread_unlock(td);
445 			owner = turnstile_lock(ts, &lock);
446 			/*
447 			 * The owner pointer indicates that the lock succeeded. Only
448 			 * in case we hold the lock and the turnstile we locked is still
449 			 * the one that tdwait is blocked on can we continue. Otherwise
450 			 * The turnstile pointer has been changed out from underneath
451 			 * us, as in the case where the lock holder has signalled tdwait,
452 			 * and we need to continue.
453 			 */
454 			if (owner != NULL && ts == tdwait->td_blocked) {
455 				MPASS(TD_IS_INHIBITED(tdwait) && TD_ON_LOCK(tdwait));
456 				critical_exit();
457 				turnstile_wait(ts, owner, tdwait->td_tsqueue);
458 				counter_u64_add(turnstile_count, 1);
459 				thread_lock(td);
460 				return;
461 			} else if (owner != NULL)
462 				turnstile_unlock(ts, lock);
463 			thread_lock(td);
464 			critical_exit();
465 			KASSERT(td->td_locks == 0,
466 					("%d locks held", td->td_locks));
467 		}
468 	}
469 	/*
470 	 * We didn't find any threads actually blocked on a lock
471 	 * so we have nothing to do except context switch away.
472 	 */
473 	counter_u64_add(switch_count, 1);
474 	mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
475 
476 	/*
477 	 * Release the thread lock while yielding to
478 	 * allow other threads to acquire the lock
479 	 * pointed to by TDQ_LOCKPTR(td). Else a
480 	 * deadlock like situation might happen. (HPS)
481 	 */
482 	thread_unlock(td);
483 	thread_lock(td);
484 }
485 
486 void
487 epoch_wait(epoch_t epoch)
488 {
489 	struct thread *td;
490 	int was_bound;
491 	int old_cpu;
492 	int old_pinned;
493 	u_char old_prio;
494 
495 	INIT_CHECK(epoch);
496 
497 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
498 	    "epoch_wait() can sleep");
499 
500 	td = curthread;
501 	KASSERT(td->td_epochnest == 0, ("epoch_wait() in the middle of an epoch section"));
502 	thread_lock(td);
503 
504 	DROP_GIANT();
505 
506 	old_cpu = PCPU_GET(cpuid);
507 	old_pinned = td->td_pinned;
508 	old_prio = td->td_priority;
509 	was_bound = sched_is_bound(td);
510 	sched_unbind(td);
511 	td->td_pinned = 0;
512 	sched_bind(td, old_cpu);
513 
514 	ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler, NULL);
515 
516 	/* restore CPU binding, if any */
517 	if (was_bound != 0) {
518 		sched_bind(td, old_cpu);
519 	} else {
520 		/* get thread back to initial CPU, if any */
521 		if (old_pinned != 0)
522 			sched_bind(td, old_cpu);
523 		sched_unbind(td);
524 	}
525 	/* restore pinned after bind */
526 	td->td_pinned = old_pinned;
527 
528 	/* restore thread priority */
529 	sched_prio(td, old_prio);
530 	thread_unlock(td);
531 	KASSERT(td->td_locks == 0,
532 			("%d locks held", td->td_locks));
533 	PICKUP_GIANT();
534 }
535 
536 void
537 epoch_call(epoch_t epoch, epoch_context_t ctx, void (*callback) (epoch_context_t))
538 {
539 	struct epoch_pcpu_state *eps;
540 	epoch_cb_t cb;
541 
542 	cb = (void *)ctx;
543 
544 	MPASS(cb->ec_callback == NULL);
545 	MPASS(cb->ec_link.stqe_next == NULL);
546 	MPASS(epoch);
547 	MPASS(callback);
548 	cb->ec_callback = callback;
549 	counter_u64_add(epoch->e_frees, 1);
550 	critical_enter();
551 	eps = epoch->e_pcpu[curcpu];
552 	STAILQ_INSERT_HEAD(&eps->eps_cblist, cb, ec_link);
553 	critical_exit();
554 }
555 
556 static void
557 epoch_call_task(void *context)
558 {
559 	struct epoch_pcpu_state *eps;
560 	epoch_t epoch;
561 	epoch_cb_t cb;
562 	struct thread *td;
563 	int cpu;
564 	STAILQ_HEAD(, epoch_cb) tmp_head;
565 
566 	epoch = context;
567 	STAILQ_INIT(&tmp_head);
568 	td = curthread;
569 	thread_lock(td);
570 	CPU_FOREACH(cpu) {
571 		sched_bind(td, cpu);
572 		eps = epoch->e_pcpu[cpu];
573 		if (!STAILQ_EMPTY(&eps->eps_cblist))
574 			STAILQ_CONCAT(&tmp_head, &eps->eps_cblist);
575 	}
576 	sched_unbind(td);
577 	thread_unlock(td);
578 	epoch_wait(epoch);
579 
580 	while ((cb = STAILQ_FIRST(&tmp_head)) != NULL) {
581 		STAILQ_REMOVE_HEAD(&tmp_head, ec_link);
582 		cb->ec_callback((void*)cb);
583 	}
584 }
585 
586 int
587 in_epoch(void)
588 {
589 	return (curthread->td_epochnest != 0);
590 }
591