xref: /freebsd/sys/kern/subr_epoch.c (revision ccfd87fe2ac0e2e6aeb1911a7d7cce6712a8564f)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2018, Matthew Macy <mmacy@freebsd.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/counter.h>
35 #include <sys/epoch.h>
36 #include <sys/gtaskqueue.h>
37 #include <sys/kernel.h>
38 #include <sys/limits.h>
39 #include <sys/lock.h>
40 #include <sys/malloc.h>
41 #include <sys/mutex.h>
42 #include <sys/pcpu.h>
43 #include <sys/proc.h>
44 #include <sys/sched.h>
45 #include <sys/sx.h>
46 #include <sys/smp.h>
47 #include <sys/sysctl.h>
48 #include <sys/turnstile.h>
49 #ifdef EPOCH_TRACE
50 #include <machine/stdarg.h>
51 #include <sys/stack.h>
52 #include <sys/tree.h>
53 #endif
54 #include <vm/vm.h>
55 #include <vm/vm_extern.h>
56 #include <vm/vm_kern.h>
57 #include <vm/uma.h>
58 
59 #include <machine/stack.h>
60 
61 #include <ck_epoch.h>
62 
63 #ifdef __amd64__
64 #define EPOCH_ALIGN CACHE_LINE_SIZE*2
65 #else
66 #define EPOCH_ALIGN CACHE_LINE_SIZE
67 #endif
68 
69 TAILQ_HEAD (epoch_tdlist, epoch_tracker);
70 typedef struct epoch_record {
71 	ck_epoch_record_t er_record;
72 	struct epoch_context er_drain_ctx;
73 	struct epoch *er_parent;
74 	volatile struct epoch_tdlist er_tdlist;
75 	volatile uint32_t er_gen;
76 	uint32_t er_cpuid;
77 #ifdef INVARIANTS
78 	/* Used to verify record ownership for non-preemptible epochs. */
79 	struct thread *er_td;
80 #endif
81 } __aligned(EPOCH_ALIGN)     *epoch_record_t;
82 
83 struct epoch {
84 	struct ck_epoch e_epoch __aligned(EPOCH_ALIGN);
85 	epoch_record_t e_pcpu_record;
86 	int	e_in_use;
87 	int	e_flags;
88 	struct sx e_drain_sx;
89 	struct mtx e_drain_mtx;
90 	volatile int e_drain_count;
91 	const char *e_name;
92 };
93 
94 /* arbitrary --- needs benchmarking */
95 #define MAX_ADAPTIVE_SPIN 100
96 #define MAX_EPOCHS 64
97 
98 CTASSERT(sizeof(ck_epoch_entry_t) == sizeof(struct epoch_context));
99 SYSCTL_NODE(_kern, OID_AUTO, epoch, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
100     "epoch information");
101 SYSCTL_NODE(_kern_epoch, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
102     "epoch stats");
103 
104 /* Stats. */
105 static counter_u64_t block_count;
106 
107 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, nblocked, CTLFLAG_RW,
108     &block_count, "# of times a thread was in an epoch when epoch_wait was called");
109 static counter_u64_t migrate_count;
110 
111 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, migrations, CTLFLAG_RW,
112     &migrate_count, "# of times thread was migrated to another CPU in epoch_wait");
113 static counter_u64_t turnstile_count;
114 
115 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, ncontended, CTLFLAG_RW,
116     &turnstile_count, "# of times a thread was blocked on a lock in an epoch during an epoch_wait");
117 static counter_u64_t switch_count;
118 
119 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, switches, CTLFLAG_RW,
120     &switch_count, "# of times a thread voluntarily context switched in epoch_wait");
121 static counter_u64_t epoch_call_count;
122 
123 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_calls, CTLFLAG_RW,
124     &epoch_call_count, "# of times a callback was deferred");
125 static counter_u64_t epoch_call_task_count;
126 
127 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_call_tasks, CTLFLAG_RW,
128     &epoch_call_task_count, "# of times a callback task was run");
129 
130 TAILQ_HEAD (threadlist, thread);
131 
132 CK_STACK_CONTAINER(struct ck_epoch_entry, stack_entry,
133     ck_epoch_entry_container)
134 
135 static struct epoch epoch_array[MAX_EPOCHS];
136 
137 DPCPU_DEFINE(struct grouptask, epoch_cb_task);
138 DPCPU_DEFINE(int, epoch_cb_count);
139 
140 static __read_mostly int inited;
141 __read_mostly epoch_t global_epoch;
142 __read_mostly epoch_t global_epoch_preempt;
143 
144 static void epoch_call_task(void *context __unused);
145 static 	uma_zone_t pcpu_zone_record;
146 
147 static struct sx epoch_sx;
148 
149 #define	EPOCH_LOCK() sx_xlock(&epoch_sx)
150 #define	EPOCH_UNLOCK() sx_xunlock(&epoch_sx)
151 
152 static epoch_record_t
153 epoch_currecord(epoch_t epoch)
154 {
155 
156 	return (zpcpu_get(epoch->e_pcpu_record));
157 }
158 
159 #ifdef EPOCH_TRACE
160 struct stackentry {
161 	RB_ENTRY(stackentry) se_node;
162 	struct stack se_stack;
163 };
164 
165 static int
166 stackentry_compare(struct stackentry *a, struct stackentry *b)
167 {
168 
169 	if (a->se_stack.depth > b->se_stack.depth)
170 		return (1);
171 	if (a->se_stack.depth < b->se_stack.depth)
172 		return (-1);
173 	for (int i = 0; i < a->se_stack.depth; i++) {
174 		if (a->se_stack.pcs[i] > b->se_stack.pcs[i])
175 			return (1);
176 		if (a->se_stack.pcs[i] < b->se_stack.pcs[i])
177 			return (-1);
178 	}
179 
180 	return (0);
181 }
182 
183 RB_HEAD(stacktree, stackentry) epoch_stacks = RB_INITIALIZER(&epoch_stacks);
184 RB_GENERATE_STATIC(stacktree, stackentry, se_node, stackentry_compare);
185 
186 static struct mtx epoch_stacks_lock;
187 MTX_SYSINIT(epochstacks, &epoch_stacks_lock, "epoch_stacks", MTX_DEF);
188 
189 static bool epoch_trace_stack_print = true;
190 SYSCTL_BOOL(_kern_epoch, OID_AUTO, trace_stack_print, CTLFLAG_RWTUN,
191     &epoch_trace_stack_print, 0, "Print stack traces on epoch reports");
192 
193 static void epoch_trace_report(const char *fmt, ...) __printflike(1, 2);
194 static inline void
195 epoch_trace_report(const char *fmt, ...)
196 {
197 	va_list ap;
198 	struct stackentry se, *new;
199 
200 	stack_save(&se.se_stack);
201 
202 	/* Tree is never reduced - go lockless. */
203 	if (RB_FIND(stacktree, &epoch_stacks, &se) != NULL)
204 		return;
205 
206 	new = malloc(sizeof(*new), M_STACK, M_NOWAIT);
207 	if (new != NULL) {
208 		bcopy(&se.se_stack, &new->se_stack, sizeof(struct stack));
209 
210 		mtx_lock(&epoch_stacks_lock);
211 		new = RB_INSERT(stacktree, &epoch_stacks, new);
212 		mtx_unlock(&epoch_stacks_lock);
213 		if (new != NULL)
214 			free(new, M_STACK);
215 	}
216 
217 	va_start(ap, fmt);
218 	(void)vprintf(fmt, ap);
219 	va_end(ap);
220 	if (epoch_trace_stack_print)
221 		stack_print_ddb(&se.se_stack);
222 }
223 
224 static inline void
225 epoch_trace_enter(struct thread *td, epoch_t epoch, epoch_tracker_t et,
226     const char *file, int line)
227 {
228 	epoch_tracker_t iet;
229 
230 	SLIST_FOREACH(iet, &td->td_epochs, et_tlink) {
231 		if (iet->et_epoch != epoch)
232 			continue;
233 		epoch_trace_report("Recursively entering epoch %s "
234 		    "at %s:%d, previously entered at %s:%d\n",
235 		    epoch->e_name, file, line,
236 		    iet->et_file, iet->et_line);
237 	}
238 	et->et_epoch = epoch;
239 	et->et_file = file;
240 	et->et_line = line;
241 	et->et_flags = 0;
242 	SLIST_INSERT_HEAD(&td->td_epochs, et, et_tlink);
243 }
244 
245 static inline void
246 epoch_trace_exit(struct thread *td, epoch_t epoch, epoch_tracker_t et,
247     const char *file, int line)
248 {
249 
250 	if (SLIST_FIRST(&td->td_epochs) != et) {
251 		epoch_trace_report("Exiting epoch %s in a not nested order "
252 		    "at %s:%d. Most recently entered %s at %s:%d\n",
253 		    epoch->e_name,
254 		    file, line,
255 		    SLIST_FIRST(&td->td_epochs)->et_epoch->e_name,
256 		    SLIST_FIRST(&td->td_epochs)->et_file,
257 		    SLIST_FIRST(&td->td_epochs)->et_line);
258 		/* This will panic if et is not anywhere on td_epochs. */
259 		SLIST_REMOVE(&td->td_epochs, et, epoch_tracker, et_tlink);
260 	} else
261 		SLIST_REMOVE_HEAD(&td->td_epochs, et_tlink);
262 	if (et->et_flags & ET_REPORT_EXIT)
263 		printf("Td %p exiting epoch %s at %s:%d\n", td, epoch->e_name,
264 		    file, line);
265 }
266 
267 /* Used by assertions that check thread state before going to sleep. */
268 void
269 epoch_trace_list(struct thread *td)
270 {
271 	epoch_tracker_t iet;
272 
273 	SLIST_FOREACH(iet, &td->td_epochs, et_tlink)
274 		printf("Epoch %s entered at %s:%d\n", iet->et_epoch->e_name,
275 		    iet->et_file, iet->et_line);
276 }
277 
278 void
279 epoch_where_report(epoch_t epoch)
280 {
281 	epoch_record_t er;
282 	struct epoch_tracker *tdwait;
283 
284 	MPASS(epoch != NULL);
285 	MPASS((epoch->e_flags & EPOCH_PREEMPT) != 0);
286 	MPASS(!THREAD_CAN_SLEEP());
287 	critical_enter();
288 	er = epoch_currecord(epoch);
289 	TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
290 		if (tdwait->et_td == curthread)
291 			break;
292 	critical_exit();
293 	if (tdwait != NULL) {
294 		tdwait->et_flags |= ET_REPORT_EXIT;
295 		printf("Td %p entered epoch %s at %s:%d\n", curthread,
296 		    epoch->e_name, tdwait->et_file, tdwait->et_line);
297 	}
298 }
299 #endif /* EPOCH_TRACE */
300 
301 static void
302 epoch_init(void *arg __unused)
303 {
304 	int cpu;
305 
306 	block_count = counter_u64_alloc(M_WAITOK);
307 	migrate_count = counter_u64_alloc(M_WAITOK);
308 	turnstile_count = counter_u64_alloc(M_WAITOK);
309 	switch_count = counter_u64_alloc(M_WAITOK);
310 	epoch_call_count = counter_u64_alloc(M_WAITOK);
311 	epoch_call_task_count = counter_u64_alloc(M_WAITOK);
312 
313 	pcpu_zone_record = uma_zcreate("epoch_record pcpu",
314 	    sizeof(struct epoch_record), NULL, NULL, NULL, NULL,
315 	    UMA_ALIGN_PTR, UMA_ZONE_PCPU);
316 	CPU_FOREACH(cpu) {
317 		GROUPTASK_INIT(DPCPU_ID_PTR(cpu, epoch_cb_task), 0,
318 		    epoch_call_task, NULL);
319 		taskqgroup_attach_cpu(qgroup_softirq,
320 		    DPCPU_ID_PTR(cpu, epoch_cb_task), NULL, cpu, NULL, NULL,
321 		    "epoch call task");
322 	}
323 #ifdef EPOCH_TRACE
324 	SLIST_INIT(&thread0.td_epochs);
325 #endif
326 	sx_init(&epoch_sx, "epoch-sx");
327 	inited = 1;
328 	global_epoch = epoch_alloc("Global", 0);
329 	global_epoch_preempt = epoch_alloc("Global preemptible", EPOCH_PREEMPT);
330 }
331 SYSINIT(epoch, SI_SUB_EPOCH, SI_ORDER_FIRST, epoch_init, NULL);
332 
333 #if !defined(EARLY_AP_STARTUP)
334 static void
335 epoch_init_smp(void *dummy __unused)
336 {
337 	inited = 2;
338 }
339 SYSINIT(epoch_smp, SI_SUB_SMP + 1, SI_ORDER_FIRST, epoch_init_smp, NULL);
340 #endif
341 
342 static void
343 epoch_ctor(epoch_t epoch)
344 {
345 	epoch_record_t er;
346 	int cpu;
347 
348 	epoch->e_pcpu_record = uma_zalloc_pcpu(pcpu_zone_record, M_WAITOK);
349 	CPU_FOREACH(cpu) {
350 		er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
351 		bzero(er, sizeof(*er));
352 		ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL);
353 		TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist);
354 		er->er_cpuid = cpu;
355 		er->er_parent = epoch;
356 	}
357 }
358 
359 static void
360 epoch_adjust_prio(struct thread *td, u_char prio)
361 {
362 
363 	thread_lock(td);
364 	sched_prio(td, prio);
365 	thread_unlock(td);
366 }
367 
368 epoch_t
369 epoch_alloc(const char *name, int flags)
370 {
371 	epoch_t epoch;
372 	int i;
373 
374 	MPASS(name != NULL);
375 
376 	if (__predict_false(!inited))
377 		panic("%s called too early in boot", __func__);
378 
379 	EPOCH_LOCK();
380 
381 	/*
382 	 * Find a free index in the epoch array. If no free index is
383 	 * found, try to use the index after the last one.
384 	 */
385 	for (i = 0;; i++) {
386 		/*
387 		 * If too many epochs are currently allocated,
388 		 * return NULL.
389 		 */
390 		if (i == MAX_EPOCHS) {
391 			epoch = NULL;
392 			goto done;
393 		}
394 		if (epoch_array[i].e_in_use == 0)
395 			break;
396 	}
397 
398 	epoch = epoch_array + i;
399 	ck_epoch_init(&epoch->e_epoch);
400 	epoch_ctor(epoch);
401 	epoch->e_flags = flags;
402 	epoch->e_name = name;
403 	sx_init(&epoch->e_drain_sx, "epoch-drain-sx");
404 	mtx_init(&epoch->e_drain_mtx, "epoch-drain-mtx", NULL, MTX_DEF);
405 
406 	/*
407 	 * Set e_in_use last, because when this field is set the
408 	 * epoch_call_task() function will start scanning this epoch
409 	 * structure.
410 	 */
411 	atomic_store_rel_int(&epoch->e_in_use, 1);
412 done:
413 	EPOCH_UNLOCK();
414 	return (epoch);
415 }
416 
417 void
418 epoch_free(epoch_t epoch)
419 {
420 #ifdef INVARIANTS
421 	int cpu;
422 #endif
423 
424 	EPOCH_LOCK();
425 
426 	MPASS(epoch->e_in_use != 0);
427 
428 	epoch_drain_callbacks(epoch);
429 
430 	atomic_store_rel_int(&epoch->e_in_use, 0);
431 	/*
432 	 * Make sure the epoch_call_task() function see e_in_use equal
433 	 * to zero, by calling epoch_wait() on the global_epoch:
434 	 */
435 	epoch_wait(global_epoch);
436 #ifdef INVARIANTS
437 	CPU_FOREACH(cpu) {
438 		epoch_record_t er;
439 
440 		er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
441 
442 		/*
443 		 * Sanity check: none of the records should be in use anymore.
444 		 * We drained callbacks above and freeing the pcpu records is
445 		 * imminent.
446 		 */
447 		MPASS(er->er_td == NULL);
448 		MPASS(TAILQ_EMPTY(&er->er_tdlist));
449 	}
450 #endif
451 	uma_zfree_pcpu(pcpu_zone_record, epoch->e_pcpu_record);
452 	mtx_destroy(&epoch->e_drain_mtx);
453 	sx_destroy(&epoch->e_drain_sx);
454 	memset(epoch, 0, sizeof(*epoch));
455 
456 	EPOCH_UNLOCK();
457 }
458 
459 #define INIT_CHECK(epoch)					\
460 	do {							\
461 		if (__predict_false((epoch) == NULL))		\
462 			return;					\
463 	} while (0)
464 
465 void
466 _epoch_enter_preempt(epoch_t epoch, epoch_tracker_t et EPOCH_FILE_LINE)
467 {
468 	struct epoch_record *er;
469 	struct thread *td;
470 
471 	MPASS(cold || epoch != NULL);
472 	td = curthread;
473 	MPASS(kstack_contains(td, (vm_offset_t)et, sizeof(*et)));
474 
475 	INIT_CHECK(epoch);
476 	MPASS(epoch->e_flags & EPOCH_PREEMPT);
477 
478 #ifdef EPOCH_TRACE
479 	epoch_trace_enter(td, epoch, et, file, line);
480 #endif
481 	et->et_td = td;
482 	THREAD_NO_SLEEPING();
483 	critical_enter();
484 	sched_pin();
485 	et->et_old_priority = td->td_priority;
486 	er = epoch_currecord(epoch);
487 	/* Record-level tracking is reserved for non-preemptible epochs. */
488 	MPASS(er->er_td == NULL);
489 	TAILQ_INSERT_TAIL(&er->er_tdlist, et, et_link);
490 	ck_epoch_begin(&er->er_record, &et->et_section);
491 	critical_exit();
492 }
493 
494 void
495 epoch_enter(epoch_t epoch)
496 {
497 	epoch_record_t er;
498 
499 	MPASS(cold || epoch != NULL);
500 	INIT_CHECK(epoch);
501 	critical_enter();
502 	er = epoch_currecord(epoch);
503 #ifdef INVARIANTS
504 	if (er->er_record.active == 0) {
505 		MPASS(er->er_td == NULL);
506 		er->er_td = curthread;
507 	} else {
508 		/* We've recursed, just make sure our accounting isn't wrong. */
509 		MPASS(er->er_td == curthread);
510 	}
511 #endif
512 	ck_epoch_begin(&er->er_record, NULL);
513 }
514 
515 void
516 _epoch_exit_preempt(epoch_t epoch, epoch_tracker_t et EPOCH_FILE_LINE)
517 {
518 	struct epoch_record *er;
519 	struct thread *td;
520 
521 	INIT_CHECK(epoch);
522 	td = curthread;
523 	critical_enter();
524 	sched_unpin();
525 	THREAD_SLEEPING_OK();
526 	er = epoch_currecord(epoch);
527 	MPASS(epoch->e_flags & EPOCH_PREEMPT);
528 	MPASS(et != NULL);
529 	MPASS(et->et_td == td);
530 #ifdef INVARIANTS
531 	et->et_td = (void*)0xDEADBEEF;
532 	/* Record-level tracking is reserved for non-preemptible epochs. */
533 	MPASS(er->er_td == NULL);
534 #endif
535 	ck_epoch_end(&er->er_record, &et->et_section);
536 	TAILQ_REMOVE(&er->er_tdlist, et, et_link);
537 	er->er_gen++;
538 	if (__predict_false(et->et_old_priority != td->td_priority))
539 		epoch_adjust_prio(td, et->et_old_priority);
540 	critical_exit();
541 #ifdef EPOCH_TRACE
542 	epoch_trace_exit(td, epoch, et, file, line);
543 #endif
544 }
545 
546 void
547 epoch_exit(epoch_t epoch)
548 {
549 	epoch_record_t er;
550 
551 	INIT_CHECK(epoch);
552 	er = epoch_currecord(epoch);
553 	ck_epoch_end(&er->er_record, NULL);
554 #ifdef INVARIANTS
555 	MPASS(er->er_td == curthread);
556 	if (er->er_record.active == 0)
557 		er->er_td = NULL;
558 #endif
559 	critical_exit();
560 }
561 
562 /*
563  * epoch_block_handler_preempt() is a callback from the CK code when another
564  * thread is currently in an epoch section.
565  */
566 static void
567 epoch_block_handler_preempt(struct ck_epoch *global __unused,
568     ck_epoch_record_t *cr, void *arg __unused)
569 {
570 	epoch_record_t record;
571 	struct thread *td, *owner, *curwaittd;
572 	struct epoch_tracker *tdwait;
573 	struct turnstile *ts;
574 	struct lock_object *lock;
575 	int spincount, gen;
576 	int locksheld __unused;
577 
578 	record = __containerof(cr, struct epoch_record, er_record);
579 	td = curthread;
580 	locksheld = td->td_locks;
581 	spincount = 0;
582 	counter_u64_add(block_count, 1);
583 	/*
584 	 * We lost a race and there's no longer any threads
585 	 * on the CPU in an epoch section.
586 	 */
587 	if (TAILQ_EMPTY(&record->er_tdlist))
588 		return;
589 
590 	if (record->er_cpuid != curcpu) {
591 		/*
592 		 * If the head of the list is running, we can wait for it
593 		 * to remove itself from the list and thus save us the
594 		 * overhead of a migration
595 		 */
596 		gen = record->er_gen;
597 		thread_unlock(td);
598 		/*
599 		 * We can't actually check if the waiting thread is running
600 		 * so we simply poll for it to exit before giving up and
601 		 * migrating.
602 		 */
603 		do {
604 			cpu_spinwait();
605 		} while (!TAILQ_EMPTY(&record->er_tdlist) &&
606 				 gen == record->er_gen &&
607 				 spincount++ < MAX_ADAPTIVE_SPIN);
608 		thread_lock(td);
609 		/*
610 		 * If the generation has changed we can poll again
611 		 * otherwise we need to migrate.
612 		 */
613 		if (gen != record->er_gen)
614 			return;
615 		/*
616 		 * Being on the same CPU as that of the record on which
617 		 * we need to wait allows us access to the thread
618 		 * list associated with that CPU. We can then examine the
619 		 * oldest thread in the queue and wait on its turnstile
620 		 * until it resumes and so on until a grace period
621 		 * elapses.
622 		 *
623 		 */
624 		counter_u64_add(migrate_count, 1);
625 		sched_bind(td, record->er_cpuid);
626 		/*
627 		 * At this point we need to return to the ck code
628 		 * to scan to see if a grace period has elapsed.
629 		 * We can't move on to check the thread list, because
630 		 * in the meantime new threads may have arrived that
631 		 * in fact belong to a different epoch.
632 		 */
633 		return;
634 	}
635 	/*
636 	 * Try to find a thread in an epoch section on this CPU
637 	 * waiting on a turnstile. Otherwise find the lowest
638 	 * priority thread (highest prio value) and drop our priority
639 	 * to match to allow it to run.
640 	 */
641 	TAILQ_FOREACH(tdwait, &record->er_tdlist, et_link) {
642 		/*
643 		 * Propagate our priority to any other waiters to prevent us
644 		 * from starving them. They will have their original priority
645 		 * restore on exit from epoch_wait().
646 		 */
647 		curwaittd = tdwait->et_td;
648 		if (!TD_IS_INHIBITED(curwaittd) && curwaittd->td_priority > td->td_priority) {
649 			critical_enter();
650 			thread_unlock(td);
651 			thread_lock(curwaittd);
652 			sched_prio(curwaittd, td->td_priority);
653 			thread_unlock(curwaittd);
654 			thread_lock(td);
655 			critical_exit();
656 		}
657 		if (TD_IS_INHIBITED(curwaittd) && TD_ON_LOCK(curwaittd) &&
658 		    ((ts = curwaittd->td_blocked) != NULL)) {
659 			/*
660 			 * We unlock td to allow turnstile_wait to reacquire
661 			 * the thread lock. Before unlocking it we enter a
662 			 * critical section to prevent preemption after we
663 			 * reenable interrupts by dropping the thread lock in
664 			 * order to prevent curwaittd from getting to run.
665 			 */
666 			critical_enter();
667 			thread_unlock(td);
668 
669 			if (turnstile_lock(ts, &lock, &owner)) {
670 				if (ts == curwaittd->td_blocked) {
671 					MPASS(TD_IS_INHIBITED(curwaittd) &&
672 					    TD_ON_LOCK(curwaittd));
673 					critical_exit();
674 					turnstile_wait(ts, owner,
675 					    curwaittd->td_tsqueue);
676 					counter_u64_add(turnstile_count, 1);
677 					thread_lock(td);
678 					return;
679 				}
680 				turnstile_unlock(ts, lock);
681 			}
682 			thread_lock(td);
683 			critical_exit();
684 			KASSERT(td->td_locks == locksheld,
685 			    ("%d extra locks held", td->td_locks - locksheld));
686 		}
687 	}
688 	/*
689 	 * We didn't find any threads actually blocked on a lock
690 	 * so we have nothing to do except context switch away.
691 	 */
692 	counter_u64_add(switch_count, 1);
693 	mi_switch(SW_VOL | SWT_RELINQUISH);
694 	/*
695 	 * It is important the thread lock is dropped while yielding
696 	 * to allow other threads to acquire the lock pointed to by
697 	 * TDQ_LOCKPTR(td). Currently mi_switch() will unlock the
698 	 * thread lock before returning. Else a deadlock like
699 	 * situation might happen.
700 	 */
701 	thread_lock(td);
702 }
703 
704 void
705 epoch_wait_preempt(epoch_t epoch)
706 {
707 	struct thread *td;
708 	int was_bound;
709 	int old_cpu;
710 	int old_pinned;
711 	u_char old_prio;
712 	int locks __unused;
713 
714 	MPASS(cold || epoch != NULL);
715 	INIT_CHECK(epoch);
716 	td = curthread;
717 #ifdef INVARIANTS
718 	locks = curthread->td_locks;
719 	MPASS(epoch->e_flags & EPOCH_PREEMPT);
720 	if ((epoch->e_flags & EPOCH_LOCKED) == 0)
721 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
722 		    "epoch_wait() can be long running");
723 	KASSERT(!in_epoch(epoch), ("epoch_wait_preempt() called in the middle "
724 	    "of an epoch section of the same epoch"));
725 #endif
726 	DROP_GIANT();
727 	thread_lock(td);
728 
729 	old_cpu = PCPU_GET(cpuid);
730 	old_pinned = td->td_pinned;
731 	old_prio = td->td_priority;
732 	was_bound = sched_is_bound(td);
733 	sched_unbind(td);
734 	td->td_pinned = 0;
735 	sched_bind(td, old_cpu);
736 
737 	ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler_preempt,
738 	    NULL);
739 
740 	/* restore CPU binding, if any */
741 	if (was_bound != 0) {
742 		sched_bind(td, old_cpu);
743 	} else {
744 		/* get thread back to initial CPU, if any */
745 		if (old_pinned != 0)
746 			sched_bind(td, old_cpu);
747 		sched_unbind(td);
748 	}
749 	/* restore pinned after bind */
750 	td->td_pinned = old_pinned;
751 
752 	/* restore thread priority */
753 	sched_prio(td, old_prio);
754 	thread_unlock(td);
755 	PICKUP_GIANT();
756 	KASSERT(td->td_locks == locks,
757 	    ("%d residual locks held", td->td_locks - locks));
758 }
759 
760 static void
761 epoch_block_handler(struct ck_epoch *g __unused, ck_epoch_record_t *c __unused,
762     void *arg __unused)
763 {
764 	cpu_spinwait();
765 }
766 
767 void
768 epoch_wait(epoch_t epoch)
769 {
770 
771 	MPASS(cold || epoch != NULL);
772 	INIT_CHECK(epoch);
773 	MPASS(epoch->e_flags == 0);
774 	critical_enter();
775 	ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler, NULL);
776 	critical_exit();
777 }
778 
779 void
780 epoch_call(epoch_t epoch, epoch_callback_t callback, epoch_context_t ctx)
781 {
782 	epoch_record_t er;
783 	ck_epoch_entry_t *cb;
784 
785 	cb = (void *)ctx;
786 
787 	MPASS(callback);
788 	/* too early in boot to have epoch set up */
789 	if (__predict_false(epoch == NULL))
790 		goto boottime;
791 #if !defined(EARLY_AP_STARTUP)
792 	if (__predict_false(inited < 2))
793 		goto boottime;
794 #endif
795 
796 	critical_enter();
797 	*DPCPU_PTR(epoch_cb_count) += 1;
798 	er = epoch_currecord(epoch);
799 	ck_epoch_call(&er->er_record, cb, (ck_epoch_cb_t *)callback);
800 	critical_exit();
801 	return;
802 boottime:
803 	callback(ctx);
804 }
805 
806 static void
807 epoch_call_task(void *arg __unused)
808 {
809 	ck_stack_entry_t *cursor, *head, *next;
810 	ck_epoch_record_t *record;
811 	epoch_record_t er;
812 	epoch_t epoch;
813 	ck_stack_t cb_stack;
814 	int i, npending, total;
815 
816 	ck_stack_init(&cb_stack);
817 	critical_enter();
818 	epoch_enter(global_epoch);
819 	for (total = i = 0; i != MAX_EPOCHS; i++) {
820 		epoch = epoch_array + i;
821 		if (__predict_false(
822 		    atomic_load_acq_int(&epoch->e_in_use) == 0))
823 			continue;
824 		er = epoch_currecord(epoch);
825 		record = &er->er_record;
826 		if ((npending = record->n_pending) == 0)
827 			continue;
828 		ck_epoch_poll_deferred(record, &cb_stack);
829 		total += npending - record->n_pending;
830 	}
831 	epoch_exit(global_epoch);
832 	*DPCPU_PTR(epoch_cb_count) -= total;
833 	critical_exit();
834 
835 	counter_u64_add(epoch_call_count, total);
836 	counter_u64_add(epoch_call_task_count, 1);
837 
838 	head = ck_stack_batch_pop_npsc(&cb_stack);
839 	for (cursor = head; cursor != NULL; cursor = next) {
840 		struct ck_epoch_entry *entry =
841 		    ck_epoch_entry_container(cursor);
842 
843 		next = CK_STACK_NEXT(cursor);
844 		entry->function(entry);
845 	}
846 }
847 
848 static int
849 in_epoch_verbose_preempt(epoch_t epoch, int dump_onfail)
850 {
851 	epoch_record_t er;
852 	struct epoch_tracker *tdwait;
853 	struct thread *td;
854 
855 	MPASS(epoch != NULL);
856 	MPASS((epoch->e_flags & EPOCH_PREEMPT) != 0);
857 	td = curthread;
858 	if (THREAD_CAN_SLEEP())
859 		return (0);
860 	critical_enter();
861 	er = epoch_currecord(epoch);
862 	TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
863 		if (tdwait->et_td == td) {
864 			critical_exit();
865 			return (1);
866 		}
867 #ifdef INVARIANTS
868 	if (dump_onfail) {
869 		MPASS(td->td_pinned);
870 		printf("cpu: %d id: %d\n", curcpu, td->td_tid);
871 		TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
872 			printf("td_tid: %d ", tdwait->et_td->td_tid);
873 		printf("\n");
874 	}
875 #endif
876 	critical_exit();
877 	return (0);
878 }
879 
880 #ifdef INVARIANTS
881 static void
882 epoch_assert_nocpu(epoch_t epoch, struct thread *td)
883 {
884 	epoch_record_t er;
885 	int cpu;
886 	bool crit;
887 
888 	crit = td->td_critnest > 0;
889 
890 	/* Check for a critical section mishap. */
891 	CPU_FOREACH(cpu) {
892 		er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
893 		KASSERT(er->er_td != td,
894 		    ("%s critical section in epoch '%s', from cpu %d",
895 		    (crit ? "exited" : "re-entered"), epoch->e_name, cpu));
896 	}
897 }
898 #else
899 #define	epoch_assert_nocpu(e, td) do {} while (0)
900 #endif
901 
902 int
903 in_epoch_verbose(epoch_t epoch, int dump_onfail)
904 {
905 	epoch_record_t er;
906 	struct thread *td;
907 
908 	if (__predict_false((epoch) == NULL))
909 		return (0);
910 	if ((epoch->e_flags & EPOCH_PREEMPT) != 0)
911 		return (in_epoch_verbose_preempt(epoch, dump_onfail));
912 
913 	/*
914 	 * The thread being in a critical section is a necessary
915 	 * condition to be correctly inside a non-preemptible epoch,
916 	 * so it's definitely not in this epoch.
917 	 */
918 	td = curthread;
919 	if (td->td_critnest == 0) {
920 		epoch_assert_nocpu(epoch, td);
921 		return (0);
922 	}
923 
924 	/*
925 	 * The current cpu is in a critical section, so the epoch record will be
926 	 * stable for the rest of this function.  Knowing that the record is not
927 	 * active is sufficient for knowing whether we're in this epoch or not,
928 	 * since it's a pcpu record.
929 	 */
930 	er = epoch_currecord(epoch);
931 	if (er->er_record.active == 0) {
932 		epoch_assert_nocpu(epoch, td);
933 		return (0);
934 	}
935 
936 	MPASS(er->er_td == td);
937 	return (1);
938 }
939 
940 int
941 in_epoch(epoch_t epoch)
942 {
943 	return (in_epoch_verbose(epoch, 0));
944 }
945 
946 static void
947 epoch_drain_cb(struct epoch_context *ctx)
948 {
949 	struct epoch *epoch =
950 	    __containerof(ctx, struct epoch_record, er_drain_ctx)->er_parent;
951 
952 	if (atomic_fetchadd_int(&epoch->e_drain_count, -1) == 1) {
953 		mtx_lock(&epoch->e_drain_mtx);
954 		wakeup(epoch);
955 		mtx_unlock(&epoch->e_drain_mtx);
956 	}
957 }
958 
959 void
960 epoch_drain_callbacks(epoch_t epoch)
961 {
962 	epoch_record_t er;
963 	struct thread *td;
964 	int was_bound;
965 	int old_pinned;
966 	int old_cpu;
967 	int cpu;
968 
969 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
970 	    "epoch_drain_callbacks() may sleep!");
971 
972 	/* too early in boot to have epoch set up */
973 	if (__predict_false(epoch == NULL))
974 		return;
975 #if !defined(EARLY_AP_STARTUP)
976 	if (__predict_false(inited < 2))
977 		return;
978 #endif
979 	DROP_GIANT();
980 
981 	sx_xlock(&epoch->e_drain_sx);
982 	mtx_lock(&epoch->e_drain_mtx);
983 
984 	td = curthread;
985 	thread_lock(td);
986 	old_cpu = PCPU_GET(cpuid);
987 	old_pinned = td->td_pinned;
988 	was_bound = sched_is_bound(td);
989 	sched_unbind(td);
990 	td->td_pinned = 0;
991 
992 	CPU_FOREACH(cpu)
993 		epoch->e_drain_count++;
994 	CPU_FOREACH(cpu) {
995 		er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
996 		sched_bind(td, cpu);
997 		epoch_call(epoch, &epoch_drain_cb, &er->er_drain_ctx);
998 	}
999 
1000 	/* restore CPU binding, if any */
1001 	if (was_bound != 0) {
1002 		sched_bind(td, old_cpu);
1003 	} else {
1004 		/* get thread back to initial CPU, if any */
1005 		if (old_pinned != 0)
1006 			sched_bind(td, old_cpu);
1007 		sched_unbind(td);
1008 	}
1009 	/* restore pinned after bind */
1010 	td->td_pinned = old_pinned;
1011 
1012 	thread_unlock(td);
1013 
1014 	while (epoch->e_drain_count != 0)
1015 		msleep(epoch, &epoch->e_drain_mtx, PZERO, "EDRAIN", 0);
1016 
1017 	mtx_unlock(&epoch->e_drain_mtx);
1018 	sx_xunlock(&epoch->e_drain_sx);
1019 
1020 	PICKUP_GIANT();
1021 }
1022