xref: /freebsd/sys/kern/subr_epoch.c (revision edca4938f74db18d091868237592abbf7e718669)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2018, Matthew Macy <mmacy@freebsd.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/counter.h>
35 #include <sys/epoch.h>
36 #include <sys/gtaskqueue.h>
37 #include <sys/kernel.h>
38 #include <sys/limits.h>
39 #include <sys/lock.h>
40 #include <sys/malloc.h>
41 #include <sys/mutex.h>
42 #include <sys/pcpu.h>
43 #include <sys/proc.h>
44 #include <sys/sched.h>
45 #include <sys/sx.h>
46 #include <sys/smp.h>
47 #include <sys/sysctl.h>
48 #include <sys/turnstile.h>
49 #ifdef EPOCH_TRACE
50 #include <machine/stdarg.h>
51 #include <sys/stack.h>
52 #include <sys/tree.h>
53 #endif
54 #include <vm/vm.h>
55 #include <vm/vm_extern.h>
56 #include <vm/vm_kern.h>
57 #include <vm/uma.h>
58 
59 #include <ck_epoch.h>
60 
61 static MALLOC_DEFINE(M_EPOCH, "epoch", "epoch based reclamation");
62 
63 #ifdef __amd64__
64 #define EPOCH_ALIGN CACHE_LINE_SIZE*2
65 #else
66 #define EPOCH_ALIGN CACHE_LINE_SIZE
67 #endif
68 
69 TAILQ_HEAD (epoch_tdlist, epoch_tracker);
70 typedef struct epoch_record {
71 	ck_epoch_record_t er_record;
72 	struct epoch_context er_drain_ctx;
73 	struct epoch *er_parent;
74 	volatile struct epoch_tdlist er_tdlist;
75 	volatile uint32_t er_gen;
76 	uint32_t er_cpuid;
77 } __aligned(EPOCH_ALIGN)     *epoch_record_t;
78 
79 struct epoch {
80 	struct ck_epoch e_epoch __aligned(EPOCH_ALIGN);
81 	epoch_record_t e_pcpu_record;
82 	int	e_idx;
83 	int	e_flags;
84 	struct sx e_drain_sx;
85 	struct mtx e_drain_mtx;
86 	volatile int e_drain_count;
87 	const char *e_name;
88 };
89 
90 /* arbitrary --- needs benchmarking */
91 #define MAX_ADAPTIVE_SPIN 100
92 #define MAX_EPOCHS 64
93 
94 CTASSERT(sizeof(ck_epoch_entry_t) == sizeof(struct epoch_context));
95 SYSCTL_NODE(_kern, OID_AUTO, epoch, CTLFLAG_RW, 0, "epoch information");
96 SYSCTL_NODE(_kern_epoch, OID_AUTO, stats, CTLFLAG_RW, 0, "epoch stats");
97 
98 /* Stats. */
99 static counter_u64_t block_count;
100 
101 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, nblocked, CTLFLAG_RW,
102     &block_count, "# of times a thread was in an epoch when epoch_wait was called");
103 static counter_u64_t migrate_count;
104 
105 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, migrations, CTLFLAG_RW,
106     &migrate_count, "# of times thread was migrated to another CPU in epoch_wait");
107 static counter_u64_t turnstile_count;
108 
109 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, ncontended, CTLFLAG_RW,
110     &turnstile_count, "# of times a thread was blocked on a lock in an epoch during an epoch_wait");
111 static counter_u64_t switch_count;
112 
113 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, switches, CTLFLAG_RW,
114     &switch_count, "# of times a thread voluntarily context switched in epoch_wait");
115 static counter_u64_t epoch_call_count;
116 
117 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_calls, CTLFLAG_RW,
118     &epoch_call_count, "# of times a callback was deferred");
119 static counter_u64_t epoch_call_task_count;
120 
121 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_call_tasks, CTLFLAG_RW,
122     &epoch_call_task_count, "# of times a callback task was run");
123 
124 TAILQ_HEAD (threadlist, thread);
125 
126 CK_STACK_CONTAINER(struct ck_epoch_entry, stack_entry,
127     ck_epoch_entry_container)
128 
129 epoch_t	allepochs[MAX_EPOCHS];
130 
131 DPCPU_DEFINE(struct grouptask, epoch_cb_task);
132 DPCPU_DEFINE(int, epoch_cb_count);
133 
134 static __read_mostly int inited;
135 static __read_mostly int epoch_count;
136 __read_mostly epoch_t global_epoch;
137 __read_mostly epoch_t global_epoch_preempt;
138 
139 static void epoch_call_task(void *context __unused);
140 static 	uma_zone_t pcpu_zone_record;
141 
142 #ifdef EPOCH_TRACE
143 struct stackentry {
144 	RB_ENTRY(stackentry) se_node;
145 	struct stack se_stack;
146 };
147 
148 static int
149 stackentry_compare(struct stackentry *a, struct stackentry *b)
150 {
151 
152 	if (a->se_stack.depth > b->se_stack.depth)
153 		return (1);
154 	if (a->se_stack.depth < b->se_stack.depth)
155 		return (-1);
156 	for (int i = 0; i < a->se_stack.depth; i++) {
157 		if (a->se_stack.pcs[i] > b->se_stack.pcs[i])
158 			return (1);
159 		if (a->se_stack.pcs[i] < b->se_stack.pcs[i])
160 			return (-1);
161 	}
162 
163 	return (0);
164 }
165 
166 RB_HEAD(stacktree, stackentry) epoch_stacks = RB_INITIALIZER(&epoch_stacks);
167 RB_GENERATE_STATIC(stacktree, stackentry, se_node, stackentry_compare);
168 
169 static struct mtx epoch_stacks_lock;
170 MTX_SYSINIT(epochstacks, &epoch_stacks_lock, "epoch_stacks", MTX_DEF);
171 
172 static void epoch_trace_report(const char *fmt, ...) __printflike(1, 2);
173 static inline void
174 epoch_trace_report(const char *fmt, ...)
175 {
176 	va_list ap;
177 	struct stackentry se, *new;
178 
179 	stack_zero(&se.se_stack);	/* XXX: is it really needed? */
180 	stack_save(&se.se_stack);
181 
182 	/* Tree is never reduced - go lockless. */
183 	if (RB_FIND(stacktree, &epoch_stacks, &se) != NULL)
184 		return;
185 
186 	new = malloc(sizeof(*new), M_STACK, M_NOWAIT);
187 	if (new != NULL) {
188 		bcopy(&se.se_stack, &new->se_stack, sizeof(struct stack));
189 
190 		mtx_lock(&epoch_stacks_lock);
191 		new = RB_INSERT(stacktree, &epoch_stacks, new);
192 		mtx_unlock(&epoch_stacks_lock);
193 		if (new != NULL)
194 			free(new, M_STACK);
195 	}
196 
197 	va_start(ap, fmt);
198 	(void)vprintf(fmt, ap);
199 	va_end(ap);
200 	stack_print_ddb(&se.se_stack);
201 }
202 
203 static inline void
204 epoch_trace_enter(struct thread *td, epoch_t epoch, epoch_tracker_t et,
205     const char *file, int line)
206 {
207 	epoch_tracker_t iet;
208 
209 	SLIST_FOREACH(iet, &td->td_epochs, et_tlink)
210 		if (iet->et_epoch == epoch)
211 			epoch_trace_report("Recursively entering epoch %s "
212 			    "previously entered at %s:%d\n",
213 			    epoch->e_name, iet->et_file, iet->et_line);
214 	et->et_epoch = epoch;
215 	et->et_file = file;
216 	et->et_line = line;
217 	SLIST_INSERT_HEAD(&td->td_epochs, et, et_tlink);
218 }
219 
220 static inline void
221 epoch_trace_exit(struct thread *td, epoch_t epoch, epoch_tracker_t et,
222     const char *file, int line)
223 {
224 
225 	if (SLIST_FIRST(&td->td_epochs) != et) {
226 		epoch_trace_report("Exiting epoch %s in a not nested order. "
227 		    "Most recently entered %s at %s:%d\n",
228 		    epoch->e_name,
229 		    SLIST_FIRST(&td->td_epochs)->et_epoch->e_name,
230 		    SLIST_FIRST(&td->td_epochs)->et_file,
231 		    SLIST_FIRST(&td->td_epochs)->et_line);
232 		/* This will panic if et is not anywhere on td_epochs. */
233 		SLIST_REMOVE(&td->td_epochs, et, epoch_tracker, et_tlink);
234 	} else
235 		SLIST_REMOVE_HEAD(&td->td_epochs, et_tlink);
236 }
237 
238 /* Used by assertions that check thread state before going to sleep. */
239 void
240 epoch_trace_list(struct thread *td)
241 {
242 	epoch_tracker_t iet;
243 
244 	SLIST_FOREACH(iet, &td->td_epochs, et_tlink)
245 		printf("Epoch %s entered at %s:%d\n", iet->et_epoch->e_name,
246 		    iet->et_file, iet->et_line);
247 }
248 #endif /* EPOCH_TRACE */
249 
250 static void
251 epoch_init(void *arg __unused)
252 {
253 	int cpu;
254 
255 	block_count = counter_u64_alloc(M_WAITOK);
256 	migrate_count = counter_u64_alloc(M_WAITOK);
257 	turnstile_count = counter_u64_alloc(M_WAITOK);
258 	switch_count = counter_u64_alloc(M_WAITOK);
259 	epoch_call_count = counter_u64_alloc(M_WAITOK);
260 	epoch_call_task_count = counter_u64_alloc(M_WAITOK);
261 
262 	pcpu_zone_record = uma_zcreate("epoch_record pcpu",
263 	    sizeof(struct epoch_record), NULL, NULL, NULL, NULL,
264 	    UMA_ALIGN_PTR, UMA_ZONE_PCPU);
265 	CPU_FOREACH(cpu) {
266 		GROUPTASK_INIT(DPCPU_ID_PTR(cpu, epoch_cb_task), 0,
267 		    epoch_call_task, NULL);
268 		taskqgroup_attach_cpu(qgroup_softirq,
269 		    DPCPU_ID_PTR(cpu, epoch_cb_task), NULL, cpu, NULL, NULL,
270 		    "epoch call task");
271 	}
272 #ifdef EPOCH_TRACE
273 	SLIST_INIT(&thread0.td_epochs);
274 #endif
275 	inited = 1;
276 	global_epoch = epoch_alloc("Global", 0);
277 	global_epoch_preempt = epoch_alloc("Global preemptible", EPOCH_PREEMPT);
278 }
279 SYSINIT(epoch, SI_SUB_TASKQ + 1, SI_ORDER_FIRST, epoch_init, NULL);
280 
281 #if !defined(EARLY_AP_STARTUP)
282 static void
283 epoch_init_smp(void *dummy __unused)
284 {
285 	inited = 2;
286 }
287 SYSINIT(epoch_smp, SI_SUB_SMP + 1, SI_ORDER_FIRST, epoch_init_smp, NULL);
288 #endif
289 
290 static void
291 epoch_ctor(epoch_t epoch)
292 {
293 	epoch_record_t er;
294 	int cpu;
295 
296 	epoch->e_pcpu_record = uma_zalloc_pcpu(pcpu_zone_record, M_WAITOK);
297 	CPU_FOREACH(cpu) {
298 		er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
299 		bzero(er, sizeof(*er));
300 		ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL);
301 		TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist);
302 		er->er_cpuid = cpu;
303 		er->er_parent = epoch;
304 	}
305 }
306 
307 static void
308 epoch_adjust_prio(struct thread *td, u_char prio)
309 {
310 
311 	thread_lock(td);
312 	sched_prio(td, prio);
313 	thread_unlock(td);
314 }
315 
316 epoch_t
317 epoch_alloc(const char *name, int flags)
318 {
319 	epoch_t epoch;
320 
321 	if (__predict_false(!inited))
322 		panic("%s called too early in boot", __func__);
323 	epoch = malloc(sizeof(struct epoch), M_EPOCH, M_ZERO | M_WAITOK);
324 	ck_epoch_init(&epoch->e_epoch);
325 	epoch_ctor(epoch);
326 	MPASS(epoch_count < MAX_EPOCHS - 2);
327 	epoch->e_flags = flags;
328 	epoch->e_idx = epoch_count;
329 	epoch->e_name = name;
330 	sx_init(&epoch->e_drain_sx, "epoch-drain-sx");
331 	mtx_init(&epoch->e_drain_mtx, "epoch-drain-mtx", NULL, MTX_DEF);
332 	allepochs[epoch_count++] = epoch;
333 	return (epoch);
334 }
335 
336 void
337 epoch_free(epoch_t epoch)
338 {
339 
340 	epoch_drain_callbacks(epoch);
341 	allepochs[epoch->e_idx] = NULL;
342 	epoch_wait(global_epoch);
343 	uma_zfree_pcpu(pcpu_zone_record, epoch->e_pcpu_record);
344 	mtx_destroy(&epoch->e_drain_mtx);
345 	sx_destroy(&epoch->e_drain_sx);
346 	free(epoch, M_EPOCH);
347 }
348 
349 static epoch_record_t
350 epoch_currecord(epoch_t epoch)
351 {
352 
353 	return (zpcpu_get_cpu(epoch->e_pcpu_record, curcpu));
354 }
355 
356 #define INIT_CHECK(epoch)					\
357 	do {							\
358 		if (__predict_false((epoch) == NULL))		\
359 			return;					\
360 	} while (0)
361 
362 void
363 _epoch_enter_preempt(epoch_t epoch, epoch_tracker_t et EPOCH_FILE_LINE)
364 {
365 	struct epoch_record *er;
366 	struct thread *td;
367 
368 	MPASS(cold || epoch != NULL);
369 	INIT_CHECK(epoch);
370 	MPASS(epoch->e_flags & EPOCH_PREEMPT);
371 	td = curthread;
372 #ifdef EPOCH_TRACE
373 	epoch_trace_enter(td, epoch, et, file, line);
374 #endif
375 	et->et_td = td;
376 	td->td_epochnest++;
377 	critical_enter();
378 	sched_pin();
379 	td->td_pre_epoch_prio = td->td_priority;
380 	er = epoch_currecord(epoch);
381 	TAILQ_INSERT_TAIL(&er->er_tdlist, et, et_link);
382 	ck_epoch_begin(&er->er_record, &et->et_section);
383 	critical_exit();
384 }
385 
386 void
387 epoch_enter(epoch_t epoch)
388 {
389 	struct thread *td;
390 	epoch_record_t er;
391 
392 	MPASS(cold || epoch != NULL);
393 	INIT_CHECK(epoch);
394 	td = curthread;
395 	td->td_epochnest++;
396 	critical_enter();
397 	er = epoch_currecord(epoch);
398 	ck_epoch_begin(&er->er_record, NULL);
399 }
400 
401 void
402 _epoch_exit_preempt(epoch_t epoch, epoch_tracker_t et EPOCH_FILE_LINE)
403 {
404 	struct epoch_record *er;
405 	struct thread *td;
406 
407 	INIT_CHECK(epoch);
408 	td = curthread;
409 	critical_enter();
410 	sched_unpin();
411 	MPASS(td->td_epochnest);
412 	td->td_epochnest--;
413 	er = epoch_currecord(epoch);
414 	MPASS(epoch->e_flags & EPOCH_PREEMPT);
415 	MPASS(et != NULL);
416 	MPASS(et->et_td == td);
417 #ifdef INVARIANTS
418 	et->et_td = (void*)0xDEADBEEF;
419 #endif
420 	ck_epoch_end(&er->er_record, &et->et_section);
421 	TAILQ_REMOVE(&er->er_tdlist, et, et_link);
422 	er->er_gen++;
423 	if (__predict_false(td->td_pre_epoch_prio != td->td_priority))
424 		epoch_adjust_prio(td, td->td_pre_epoch_prio);
425 	critical_exit();
426 #ifdef EPOCH_TRACE
427 	epoch_trace_exit(td, epoch, et, file, line);
428 #endif
429 }
430 
431 void
432 epoch_exit(epoch_t epoch)
433 {
434 	struct thread *td;
435 	epoch_record_t er;
436 
437 	INIT_CHECK(epoch);
438 	td = curthread;
439 	MPASS(td->td_epochnest);
440 	td->td_epochnest--;
441 	er = epoch_currecord(epoch);
442 	ck_epoch_end(&er->er_record, NULL);
443 	critical_exit();
444 }
445 
446 /*
447  * epoch_block_handler_preempt() is a callback from the CK code when another
448  * thread is currently in an epoch section.
449  */
450 static void
451 epoch_block_handler_preempt(struct ck_epoch *global __unused,
452     ck_epoch_record_t *cr, void *arg __unused)
453 {
454 	epoch_record_t record;
455 	struct thread *td, *owner, *curwaittd;
456 	struct epoch_tracker *tdwait;
457 	struct turnstile *ts;
458 	struct lock_object *lock;
459 	int spincount, gen;
460 	int locksheld __unused;
461 
462 	record = __containerof(cr, struct epoch_record, er_record);
463 	td = curthread;
464 	locksheld = td->td_locks;
465 	spincount = 0;
466 	counter_u64_add(block_count, 1);
467 	/*
468 	 * We lost a race and there's no longer any threads
469 	 * on the CPU in an epoch section.
470 	 */
471 	if (TAILQ_EMPTY(&record->er_tdlist))
472 		return;
473 
474 	if (record->er_cpuid != curcpu) {
475 		/*
476 		 * If the head of the list is running, we can wait for it
477 		 * to remove itself from the list and thus save us the
478 		 * overhead of a migration
479 		 */
480 		gen = record->er_gen;
481 		thread_unlock(td);
482 		/*
483 		 * We can't actually check if the waiting thread is running
484 		 * so we simply poll for it to exit before giving up and
485 		 * migrating.
486 		 */
487 		do {
488 			cpu_spinwait();
489 		} while (!TAILQ_EMPTY(&record->er_tdlist) &&
490 				 gen == record->er_gen &&
491 				 spincount++ < MAX_ADAPTIVE_SPIN);
492 		thread_lock(td);
493 		/*
494 		 * If the generation has changed we can poll again
495 		 * otherwise we need to migrate.
496 		 */
497 		if (gen != record->er_gen)
498 			return;
499 		/*
500 		 * Being on the same CPU as that of the record on which
501 		 * we need to wait allows us access to the thread
502 		 * list associated with that CPU. We can then examine the
503 		 * oldest thread in the queue and wait on its turnstile
504 		 * until it resumes and so on until a grace period
505 		 * elapses.
506 		 *
507 		 */
508 		counter_u64_add(migrate_count, 1);
509 		sched_bind(td, record->er_cpuid);
510 		/*
511 		 * At this point we need to return to the ck code
512 		 * to scan to see if a grace period has elapsed.
513 		 * We can't move on to check the thread list, because
514 		 * in the meantime new threads may have arrived that
515 		 * in fact belong to a different epoch.
516 		 */
517 		return;
518 	}
519 	/*
520 	 * Try to find a thread in an epoch section on this CPU
521 	 * waiting on a turnstile. Otherwise find the lowest
522 	 * priority thread (highest prio value) and drop our priority
523 	 * to match to allow it to run.
524 	 */
525 	TAILQ_FOREACH(tdwait, &record->er_tdlist, et_link) {
526 		/*
527 		 * Propagate our priority to any other waiters to prevent us
528 		 * from starving them. They will have their original priority
529 		 * restore on exit from epoch_wait().
530 		 */
531 		curwaittd = tdwait->et_td;
532 		if (!TD_IS_INHIBITED(curwaittd) && curwaittd->td_priority > td->td_priority) {
533 			critical_enter();
534 			thread_unlock(td);
535 			thread_lock(curwaittd);
536 			sched_prio(curwaittd, td->td_priority);
537 			thread_unlock(curwaittd);
538 			thread_lock(td);
539 			critical_exit();
540 		}
541 		if (TD_IS_INHIBITED(curwaittd) && TD_ON_LOCK(curwaittd) &&
542 		    ((ts = curwaittd->td_blocked) != NULL)) {
543 			/*
544 			 * We unlock td to allow turnstile_wait to reacquire
545 			 * the thread lock. Before unlocking it we enter a
546 			 * critical section to prevent preemption after we
547 			 * reenable interrupts by dropping the thread lock in
548 			 * order to prevent curwaittd from getting to run.
549 			 */
550 			critical_enter();
551 			thread_unlock(td);
552 
553 			if (turnstile_lock(ts, &lock, &owner)) {
554 				if (ts == curwaittd->td_blocked) {
555 					MPASS(TD_IS_INHIBITED(curwaittd) &&
556 					    TD_ON_LOCK(curwaittd));
557 					critical_exit();
558 					turnstile_wait(ts, owner,
559 					    curwaittd->td_tsqueue);
560 					counter_u64_add(turnstile_count, 1);
561 					thread_lock(td);
562 					return;
563 				}
564 				turnstile_unlock(ts, lock);
565 			}
566 			thread_lock(td);
567 			critical_exit();
568 			KASSERT(td->td_locks == locksheld,
569 			    ("%d extra locks held", td->td_locks - locksheld));
570 		}
571 	}
572 	/*
573 	 * We didn't find any threads actually blocked on a lock
574 	 * so we have nothing to do except context switch away.
575 	 */
576 	counter_u64_add(switch_count, 1);
577 	mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
578 
579 	/*
580 	 * Release the thread lock while yielding to
581 	 * allow other threads to acquire the lock
582 	 * pointed to by TDQ_LOCKPTR(td). Else a
583 	 * deadlock like situation might happen. (HPS)
584 	 */
585 	thread_unlock(td);
586 	thread_lock(td);
587 }
588 
589 void
590 epoch_wait_preempt(epoch_t epoch)
591 {
592 	struct thread *td;
593 	int was_bound;
594 	int old_cpu;
595 	int old_pinned;
596 	u_char old_prio;
597 	int locks __unused;
598 
599 	MPASS(cold || epoch != NULL);
600 	INIT_CHECK(epoch);
601 	td = curthread;
602 #ifdef INVARIANTS
603 	locks = curthread->td_locks;
604 	MPASS(epoch->e_flags & EPOCH_PREEMPT);
605 	if ((epoch->e_flags & EPOCH_LOCKED) == 0)
606 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
607 		    "epoch_wait() can be long running");
608 	KASSERT(!in_epoch(epoch), ("epoch_wait_preempt() called in the middle "
609 	    "of an epoch section of the same epoch"));
610 #endif
611 	thread_lock(td);
612 	DROP_GIANT();
613 
614 	old_cpu = PCPU_GET(cpuid);
615 	old_pinned = td->td_pinned;
616 	old_prio = td->td_priority;
617 	was_bound = sched_is_bound(td);
618 	sched_unbind(td);
619 	td->td_pinned = 0;
620 	sched_bind(td, old_cpu);
621 
622 	ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler_preempt,
623 	    NULL);
624 
625 	/* restore CPU binding, if any */
626 	if (was_bound != 0) {
627 		sched_bind(td, old_cpu);
628 	} else {
629 		/* get thread back to initial CPU, if any */
630 		if (old_pinned != 0)
631 			sched_bind(td, old_cpu);
632 		sched_unbind(td);
633 	}
634 	/* restore pinned after bind */
635 	td->td_pinned = old_pinned;
636 
637 	/* restore thread priority */
638 	sched_prio(td, old_prio);
639 	thread_unlock(td);
640 	PICKUP_GIANT();
641 	KASSERT(td->td_locks == locks,
642 	    ("%d residual locks held", td->td_locks - locks));
643 }
644 
645 static void
646 epoch_block_handler(struct ck_epoch *g __unused, ck_epoch_record_t *c __unused,
647     void *arg __unused)
648 {
649 	cpu_spinwait();
650 }
651 
652 void
653 epoch_wait(epoch_t epoch)
654 {
655 
656 	MPASS(cold || epoch != NULL);
657 	INIT_CHECK(epoch);
658 	MPASS(epoch->e_flags == 0);
659 	critical_enter();
660 	ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler, NULL);
661 	critical_exit();
662 }
663 
664 void
665 epoch_call(epoch_t epoch, epoch_context_t ctx, void (*callback) (epoch_context_t))
666 {
667 	epoch_record_t er;
668 	ck_epoch_entry_t *cb;
669 
670 	cb = (void *)ctx;
671 
672 	MPASS(callback);
673 	/* too early in boot to have epoch set up */
674 	if (__predict_false(epoch == NULL))
675 		goto boottime;
676 #if !defined(EARLY_AP_STARTUP)
677 	if (__predict_false(inited < 2))
678 		goto boottime;
679 #endif
680 
681 	critical_enter();
682 	*DPCPU_PTR(epoch_cb_count) += 1;
683 	er = epoch_currecord(epoch);
684 	ck_epoch_call(&er->er_record, cb, (ck_epoch_cb_t *)callback);
685 	critical_exit();
686 	return;
687 boottime:
688 	callback(ctx);
689 }
690 
691 static void
692 epoch_call_task(void *arg __unused)
693 {
694 	ck_stack_entry_t *cursor, *head, *next;
695 	ck_epoch_record_t *record;
696 	epoch_record_t er;
697 	epoch_t epoch;
698 	ck_stack_t cb_stack;
699 	int i, npending, total;
700 
701 	ck_stack_init(&cb_stack);
702 	critical_enter();
703 	epoch_enter(global_epoch);
704 	for (total = i = 0; i < epoch_count; i++) {
705 		if (__predict_false((epoch = allepochs[i]) == NULL))
706 			continue;
707 		er = epoch_currecord(epoch);
708 		record = &er->er_record;
709 		if ((npending = record->n_pending) == 0)
710 			continue;
711 		ck_epoch_poll_deferred(record, &cb_stack);
712 		total += npending - record->n_pending;
713 	}
714 	epoch_exit(global_epoch);
715 	*DPCPU_PTR(epoch_cb_count) -= total;
716 	critical_exit();
717 
718 	counter_u64_add(epoch_call_count, total);
719 	counter_u64_add(epoch_call_task_count, 1);
720 
721 	head = ck_stack_batch_pop_npsc(&cb_stack);
722 	for (cursor = head; cursor != NULL; cursor = next) {
723 		struct ck_epoch_entry *entry =
724 		    ck_epoch_entry_container(cursor);
725 
726 		next = CK_STACK_NEXT(cursor);
727 		entry->function(entry);
728 	}
729 }
730 
731 int
732 in_epoch_verbose(epoch_t epoch, int dump_onfail)
733 {
734 	struct epoch_tracker *tdwait;
735 	struct thread *td;
736 	epoch_record_t er;
737 
738 	td = curthread;
739 	if (td->td_epochnest == 0)
740 		return (0);
741 	if (__predict_false((epoch) == NULL))
742 		return (0);
743 	critical_enter();
744 	er = epoch_currecord(epoch);
745 	TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
746 		if (tdwait->et_td == td) {
747 			critical_exit();
748 			return (1);
749 		}
750 #ifdef INVARIANTS
751 	if (dump_onfail) {
752 		MPASS(td->td_pinned);
753 		printf("cpu: %d id: %d\n", curcpu, td->td_tid);
754 		TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
755 			printf("td_tid: %d ", tdwait->et_td->td_tid);
756 		printf("\n");
757 	}
758 #endif
759 	critical_exit();
760 	return (0);
761 }
762 
763 int
764 in_epoch(epoch_t epoch)
765 {
766 	return (in_epoch_verbose(epoch, 0));
767 }
768 
769 static void
770 epoch_drain_cb(struct epoch_context *ctx)
771 {
772 	struct epoch *epoch =
773 	    __containerof(ctx, struct epoch_record, er_drain_ctx)->er_parent;
774 
775 	if (atomic_fetchadd_int(&epoch->e_drain_count, -1) == 1) {
776 		mtx_lock(&epoch->e_drain_mtx);
777 		wakeup(epoch);
778 		mtx_unlock(&epoch->e_drain_mtx);
779 	}
780 }
781 
782 void
783 epoch_drain_callbacks(epoch_t epoch)
784 {
785 	epoch_record_t er;
786 	struct thread *td;
787 	int was_bound;
788 	int old_pinned;
789 	int old_cpu;
790 	int cpu;
791 
792 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
793 	    "epoch_drain_callbacks() may sleep!");
794 
795 	/* too early in boot to have epoch set up */
796 	if (__predict_false(epoch == NULL))
797 		return;
798 #if !defined(EARLY_AP_STARTUP)
799 	if (__predict_false(inited < 2))
800 		return;
801 #endif
802 	DROP_GIANT();
803 
804 	sx_xlock(&epoch->e_drain_sx);
805 	mtx_lock(&epoch->e_drain_mtx);
806 
807 	td = curthread;
808 	thread_lock(td);
809 	old_cpu = PCPU_GET(cpuid);
810 	old_pinned = td->td_pinned;
811 	was_bound = sched_is_bound(td);
812 	sched_unbind(td);
813 	td->td_pinned = 0;
814 
815 	CPU_FOREACH(cpu)
816 		epoch->e_drain_count++;
817 	CPU_FOREACH(cpu) {
818 		er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
819 		sched_bind(td, cpu);
820 		epoch_call(epoch, &er->er_drain_ctx, &epoch_drain_cb);
821 	}
822 
823 	/* restore CPU binding, if any */
824 	if (was_bound != 0) {
825 		sched_bind(td, old_cpu);
826 	} else {
827 		/* get thread back to initial CPU, if any */
828 		if (old_pinned != 0)
829 			sched_bind(td, old_cpu);
830 		sched_unbind(td);
831 	}
832 	/* restore pinned after bind */
833 	td->td_pinned = old_pinned;
834 
835 	thread_unlock(td);
836 
837 	while (epoch->e_drain_count != 0)
838 		msleep(epoch, &epoch->e_drain_mtx, PZERO, "EDRAIN", 0);
839 
840 	mtx_unlock(&epoch->e_drain_mtx);
841 	sx_xunlock(&epoch->e_drain_sx);
842 
843 	PICKUP_GIANT();
844 }
845 
846 void
847 epoch_thread_init(struct thread *td)
848 {
849 
850 	td->td_et = malloc(sizeof(struct epoch_tracker), M_EPOCH, M_WAITOK);
851 }
852 
853 void
854 epoch_thread_fini(struct thread *td)
855 {
856 
857 	free(td->td_et, M_EPOCH);
858 }
859