xref: /freebsd/sys/kern/subr_epoch.c (revision d0ba1baed3f6e4936a0c1b89c25f6c59168ef6de)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2018, Matthew Macy <mmacy@freebsd.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/types.h>
34 #include <sys/systm.h>
35 #include <sys/counter.h>
36 #include <sys/epoch.h>
37 #include <sys/gtaskqueue.h>
38 #include <sys/kernel.h>
39 #include <sys/limits.h>
40 #include <sys/lock.h>
41 #include <sys/malloc.h>
42 #include <sys/mutex.h>
43 #include <sys/pcpu.h>
44 #include <sys/proc.h>
45 #include <sys/sched.h>
46 #include <sys/smp.h>
47 #include <sys/sysctl.h>
48 #include <sys/turnstile.h>
49 #include <vm/vm.h>
50 #include <vm/vm_extern.h>
51 #include <vm/vm_kern.h>
52 
53 #include <ck_epoch.h>
54 
55 static MALLOC_DEFINE(M_EPOCH, "epoch", "epoch based reclamation");
56 
57 /* arbitrary --- needs benchmarking */
58 #define MAX_ADAPTIVE_SPIN 1000
59 #define MAX_EPOCHS 64
60 
61 #ifdef __amd64__
62 #define EPOCH_ALIGN CACHE_LINE_SIZE*2
63 #else
64 #define EPOCH_ALIGN CACHE_LINE_SIZE
65 #endif
66 
67 CTASSERT(sizeof(epoch_section_t) == sizeof(ck_epoch_section_t));
68 CTASSERT(sizeof(ck_epoch_entry_t) == sizeof(struct epoch_context));
69 SYSCTL_NODE(_kern, OID_AUTO, epoch, CTLFLAG_RW, 0, "epoch information");
70 SYSCTL_NODE(_kern_epoch, OID_AUTO, stats, CTLFLAG_RW, 0, "epoch stats");
71 
72 
73 /* Stats. */
74 static counter_u64_t block_count;
75 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, nblocked, CTLFLAG_RW,
76 				   &block_count, "# of times a thread was in an epoch when epoch_wait was called");
77 static counter_u64_t migrate_count;
78 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, migrations, CTLFLAG_RW,
79 				   &migrate_count, "# of times thread was migrated to another CPU in epoch_wait");
80 static counter_u64_t turnstile_count;
81 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, ncontended, CTLFLAG_RW,
82 				   &turnstile_count, "# of times a thread was blocked on a lock in an epoch during an epoch_wait");
83 static counter_u64_t switch_count;
84 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, switches, CTLFLAG_RW,
85 				   &switch_count, "# of times a thread voluntarily context switched in epoch_wait");
86 static counter_u64_t epoch_call_count;
87 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_calls, CTLFLAG_RW,
88 				   &epoch_call_count, "# of times a callback was deferred");
89 static counter_u64_t epoch_call_task_count;
90 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_call_tasks, CTLFLAG_RW,
91 				   &epoch_call_task_count, "# of times a callback task was run");
92 
93 TAILQ_HEAD(threadlist, thread);
94 
95 CK_STACK_CONTAINER(struct ck_epoch_entry, stack_entry,
96     ck_epoch_entry_container)
97 
98 typedef struct epoch_record {
99 	ck_epoch_record_t er_record;
100 	volatile struct threadlist er_tdlist;
101 	volatile uint32_t er_gen;
102 	uint32_t er_cpuid;
103 } *epoch_record_t;
104 
105 struct epoch_pcpu_state {
106 	struct epoch_record eps_record;
107 } __aligned(EPOCH_ALIGN);
108 
109 struct epoch {
110 	struct ck_epoch e_epoch __aligned(EPOCH_ALIGN);
111 	struct epoch_pcpu_state *e_pcpu_dom[MAXMEMDOM] __aligned(EPOCH_ALIGN);
112 	int e_idx;
113 	int e_flags;
114 	struct epoch_pcpu_state *e_pcpu[0];
115 };
116 
117 epoch_t allepochs[MAX_EPOCHS];
118 
119 DPCPU_DEFINE(struct grouptask, epoch_cb_task);
120 DPCPU_DEFINE(int, epoch_cb_count);
121 
122 static __read_mostly int domcount[MAXMEMDOM];
123 static __read_mostly int domoffsets[MAXMEMDOM];
124 static __read_mostly int inited;
125 static __read_mostly int epoch_count;
126 __read_mostly epoch_t global_epoch;
127 __read_mostly epoch_t global_epoch_preempt;
128 
129 static void epoch_call_task(void *context __unused);
130 
131 #if defined(__powerpc64__) || defined(__powerpc__) || !defined(NUMA)
132 static bool usedomains = false;
133 #else
134 static bool usedomains = true;
135 #endif
136 static void
137 epoch_init(void *arg __unused)
138 {
139 	int domain, cpu;
140 
141 	block_count = counter_u64_alloc(M_WAITOK);
142 	migrate_count = counter_u64_alloc(M_WAITOK);
143 	turnstile_count = counter_u64_alloc(M_WAITOK);
144 	switch_count = counter_u64_alloc(M_WAITOK);
145 	epoch_call_count = counter_u64_alloc(M_WAITOK);
146 	epoch_call_task_count = counter_u64_alloc(M_WAITOK);
147 	if (usedomains == false)
148 		goto done;
149 	domain = 0;
150 	domoffsets[0] = 0;
151 	for (domain = 0; domain < vm_ndomains; domain++) {
152 		domcount[domain] = CPU_COUNT(&cpuset_domain[domain]);
153 		if (bootverbose)
154 			printf("domcount[%d] %d\n", domain, domcount[domain]);
155 	}
156 	for (domain = 1; domain < vm_ndomains; domain++)
157 		domoffsets[domain] = domoffsets[domain-1] + domcount[domain-1];
158 
159 	for (domain = 0; domain < vm_ndomains; domain++) {
160 		if (domcount[domain] == 0) {
161 			usedomains = false;
162 			break;
163 		}
164 	}
165  done:
166 	CPU_FOREACH(cpu) {
167 		GROUPTASK_INIT(DPCPU_ID_PTR(cpu, epoch_cb_task), 0, epoch_call_task, NULL);
168 		taskqgroup_attach_cpu(qgroup_softirq, DPCPU_ID_PTR(cpu, epoch_cb_task), NULL, cpu, -1, "epoch call task");
169 	}
170 	inited = 1;
171 	global_epoch = epoch_alloc(0);
172 	global_epoch_preempt = epoch_alloc(EPOCH_PREEMPT);
173 }
174 SYSINIT(epoch, SI_SUB_TASKQ + 1, SI_ORDER_FIRST, epoch_init, NULL);
175 
176 static void
177 epoch_init_numa(epoch_t epoch)
178 {
179 	int domain, cpu_offset;
180 	struct epoch_pcpu_state *eps;
181 	epoch_record_t er;
182 
183 	for (domain = 0; domain < vm_ndomains; domain++) {
184 		eps = malloc_domain(sizeof(*eps)*domcount[domain], M_EPOCH,
185 							domain, M_ZERO|M_WAITOK);
186 		epoch->e_pcpu_dom[domain] = eps;
187 		cpu_offset = domoffsets[domain];
188 		for (int i = 0; i < domcount[domain]; i++, eps++) {
189 			epoch->e_pcpu[cpu_offset + i] = eps;
190 			er = &eps->eps_record;
191 			ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL);
192 			TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist);
193 			er->er_cpuid = cpu_offset + i;
194 		}
195 	}
196 }
197 
198 static void
199 epoch_init_legacy(epoch_t epoch)
200 {
201 	struct epoch_pcpu_state *eps;
202 	epoch_record_t er;
203 
204 	eps = malloc(sizeof(*eps)*mp_ncpus, M_EPOCH, M_ZERO|M_WAITOK);
205 	epoch->e_pcpu_dom[0] = eps;
206 	for (int i = 0; i < mp_ncpus; i++, eps++) {
207 		epoch->e_pcpu[i] = eps;
208 		er = &eps->eps_record;
209 		ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL);
210 		TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist);
211 		er->er_cpuid = i;
212 	}
213 }
214 
215 epoch_t
216 epoch_alloc(int flags)
217 {
218 	epoch_t epoch;
219 
220 	if (__predict_false(!inited))
221 		panic("%s called too early in boot", __func__);
222 	epoch = malloc(sizeof(struct epoch) + mp_ncpus*sizeof(void*),
223 				   M_EPOCH, M_ZERO|M_WAITOK);
224 	ck_epoch_init(&epoch->e_epoch);
225 	if (usedomains)
226 		epoch_init_numa(epoch);
227 	else
228 		epoch_init_legacy(epoch);
229 	MPASS(epoch_count < MAX_EPOCHS-2);
230 	epoch->e_flags = flags;
231 	epoch->e_idx = epoch_count;
232 	allepochs[epoch_count++] = epoch;
233 	return (epoch);
234 }
235 
236 void
237 epoch_free(epoch_t epoch)
238 {
239 	int domain;
240 #ifdef INVARIANTS
241 	struct epoch_pcpu_state *eps;
242 	int cpu;
243 
244 	CPU_FOREACH(cpu) {
245 		eps = epoch->e_pcpu[cpu];
246 		MPASS(TAILQ_EMPTY(&eps->eps_record.er_tdlist));
247 	}
248 #endif
249 	allepochs[epoch->e_idx] = NULL;
250 	epoch_wait(global_epoch);
251 	if (usedomains)
252 		for (domain = 0; domain < vm_ndomains; domain++)
253 			free_domain(epoch->e_pcpu_dom[domain], M_EPOCH);
254 	else
255 		free(epoch->e_pcpu_dom[0], M_EPOCH);
256 	free(epoch, M_EPOCH);
257 }
258 
259 #define INIT_CHECK(epoch)								\
260 	do {											\
261 		if (__predict_false((epoch) == NULL))		\
262 			return;									\
263 	} while (0)
264 
265 void
266 epoch_enter_preempt_internal(epoch_t epoch, struct thread *td)
267 {
268 	struct epoch_pcpu_state *eps;
269 
270 	MPASS(cold || epoch != NULL);
271 	INIT_CHECK(epoch);
272 	MPASS(epoch->e_flags & EPOCH_PREEMPT);
273 	critical_enter();
274 	td->td_pre_epoch_prio = td->td_priority;
275 	eps = epoch->e_pcpu[curcpu];
276 #ifdef INVARIANTS
277 	MPASS(td->td_epochnest < UCHAR_MAX - 2);
278 	if (td->td_epochnest > 1) {
279 		struct thread *curtd;
280 		int found = 0;
281 
282 		TAILQ_FOREACH(curtd, &eps->eps_record.er_tdlist, td_epochq)
283 			if (curtd == td)
284 				found = 1;
285 		KASSERT(found, ("recursing on a second epoch"));
286 		critical_exit();
287 		return;
288 	}
289 #endif
290 	TAILQ_INSERT_TAIL(&eps->eps_record.er_tdlist, td, td_epochq);
291 	sched_pin();
292 	ck_epoch_begin(&eps->eps_record.er_record, (ck_epoch_section_t*)&td->td_epoch_section);
293 	critical_exit();
294 }
295 
296 
297 void
298 epoch_enter(epoch_t epoch)
299 {
300 	ck_epoch_record_t *record;
301 	ck_epoch_section_t *section;
302 	struct thread *td;
303 
304 	MPASS(cold || epoch != NULL);
305 	section = NULL;
306 	td = curthread;
307 	critical_enter();
308 	if (__predict_true(td->td_epochnest++ == 0))
309 		section = (ck_epoch_section_t*)&td->td_epoch_section;
310 
311 	record = &epoch->e_pcpu[curcpu]->eps_record.er_record;
312 	ck_epoch_begin(record, section);
313 }
314 
315 void
316 epoch_exit_preempt_internal(epoch_t epoch, struct thread *td)
317 {
318 	struct epoch_pcpu_state *eps;
319 
320 	MPASS(td->td_epochnest == 0);
321 	INIT_CHECK(epoch);
322 	critical_enter();
323 	eps = epoch->e_pcpu[curcpu];
324 
325 	MPASS(epoch->e_flags & EPOCH_PREEMPT);
326 	ck_epoch_end(&eps->eps_record.er_record, (ck_epoch_section_t*)&td->td_epoch_section);
327 	TAILQ_REMOVE(&eps->eps_record.er_tdlist, td, td_epochq);
328 	eps->eps_record.er_gen++;
329 	sched_unpin();
330 	if (__predict_false(td->td_pre_epoch_prio != td->td_priority)) {
331 		thread_lock(td);
332 		sched_prio(td, td->td_pre_epoch_prio);
333 		thread_unlock(td);
334 	}
335 	critical_exit();
336 }
337 
338 void
339 epoch_exit(epoch_t epoch)
340 {
341 	ck_epoch_record_t *record;
342 	ck_epoch_section_t *section;
343 	struct thread *td;
344 
345 	section = NULL;
346 	td = curthread;
347 	MPASS(td->td_critnest);
348 	if (__predict_true(td->td_epochnest-- == 1))
349 		section = (ck_epoch_section_t*)&td->td_epoch_section;
350 	record = &epoch->e_pcpu[curcpu]->eps_record.er_record;
351 	ck_epoch_end(record, section);
352 	critical_exit();
353 }
354 
355 /*
356  * epoch_block_handler_preempt is a callback from the ck code when another thread is
357  * currently in an epoch section.
358  */
359 static void
360 epoch_block_handler_preempt(struct ck_epoch *global __unused, ck_epoch_record_t *cr,
361 					void *arg __unused)
362 {
363 	epoch_record_t record;
364 	struct thread *td, *tdwait, *owner;
365 	struct turnstile *ts;
366 	struct lock_object *lock;
367 	int spincount, gen;
368 
369 	record = __containerof(cr, struct epoch_record, er_record);
370 	td = curthread;
371 	spincount = 0;
372 	counter_u64_add(block_count, 1);
373 	if (record->er_cpuid != curcpu) {
374 		/*
375 		 * If the head of the list is running, we can wait for it
376 		 * to remove itself from the list and thus save us the
377 		 * overhead of a migration
378 		 */
379 		if ((tdwait = TAILQ_FIRST(&record->er_tdlist)) != NULL &&
380 			TD_IS_RUNNING(tdwait)) {
381 			gen = record->er_gen;
382 			thread_unlock(td);
383 			do {
384 				cpu_spinwait();
385 			} while (tdwait == TAILQ_FIRST(&record->er_tdlist) &&
386 					 gen == record->er_gen && TD_IS_RUNNING(tdwait) &&
387 					 spincount++ < MAX_ADAPTIVE_SPIN);
388 			thread_lock(td);
389 			return;
390 		}
391 
392 		/*
393 		 * Being on the same CPU as that of the record on which
394 		 * we need to wait allows us access to the thread
395 		 * list associated with that CPU. We can then examine the
396 		 * oldest thread in the queue and wait on its turnstile
397 		 * until it resumes and so on until a grace period
398 		 * elapses.
399 		 *
400 		 */
401 		counter_u64_add(migrate_count, 1);
402 		sched_bind(td, record->er_cpuid);
403 		/*
404 		 * At this point we need to return to the ck code
405 		 * to scan to see if a grace period has elapsed.
406 		 * We can't move on to check the thread list, because
407 		 * in the meantime new threads may have arrived that
408 		 * in fact belong to a different epoch.
409 		 */
410 		return;
411 	}
412 	/*
413 	 * Try to find a thread in an epoch section on this CPU
414 	 * waiting on a turnstile. Otherwise find the lowest
415 	 * priority thread (highest prio value) and drop our priority
416 	 * to match to allow it to run.
417 	 */
418 	TAILQ_FOREACH(tdwait, &record->er_tdlist, td_epochq) {
419 		/*
420 		 * Propagate our priority to any other waiters to prevent us
421 		 * from starving them. They will have their original priority
422 		 * restore on exit from epoch_wait().
423 		 */
424 		if (!TD_IS_INHIBITED(tdwait) && tdwait->td_priority > td->td_priority) {
425 			critical_enter();
426 			thread_unlock(td);
427 			thread_lock(tdwait);
428 			sched_prio(tdwait, td->td_priority);
429 			thread_unlock(tdwait);
430 			thread_lock(td);
431 			critical_exit();
432 		}
433 		if (TD_IS_INHIBITED(tdwait) && TD_ON_LOCK(tdwait) &&
434 			((ts = tdwait->td_blocked) != NULL)) {
435 			/*
436 			 * We unlock td to allow turnstile_wait to reacquire the
437 			 * the thread lock. Before unlocking it we enter a critical
438 			 * section to prevent preemption after we reenable interrupts
439 			 * by dropping the thread lock in order to prevent tdwait
440 			 * from getting to run.
441 			 */
442 			critical_enter();
443 			thread_unlock(td);
444 			owner = turnstile_lock(ts, &lock);
445 			/*
446 			 * The owner pointer indicates that the lock succeeded. Only
447 			 * in case we hold the lock and the turnstile we locked is still
448 			 * the one that tdwait is blocked on can we continue. Otherwise
449 			 * The turnstile pointer has been changed out from underneath
450 			 * us, as in the case where the lock holder has signalled tdwait,
451 			 * and we need to continue.
452 			 */
453 			if (owner != NULL && ts == tdwait->td_blocked) {
454 				MPASS(TD_IS_INHIBITED(tdwait) && TD_ON_LOCK(tdwait));
455 				critical_exit();
456 				turnstile_wait(ts, owner, tdwait->td_tsqueue);
457 				counter_u64_add(turnstile_count, 1);
458 				thread_lock(td);
459 				return;
460 			} else if (owner != NULL)
461 				turnstile_unlock(ts, lock);
462 			thread_lock(td);
463 			critical_exit();
464 			KASSERT(td->td_locks == 0,
465 					("%d locks held", td->td_locks));
466 		}
467 	}
468 	/*
469 	 * We didn't find any threads actually blocked on a lock
470 	 * so we have nothing to do except context switch away.
471 	 */
472 	counter_u64_add(switch_count, 1);
473 	mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
474 
475 	/*
476 	 * Release the thread lock while yielding to
477 	 * allow other threads to acquire the lock
478 	 * pointed to by TDQ_LOCKPTR(td). Else a
479 	 * deadlock like situation might happen. (HPS)
480 	 */
481 	thread_unlock(td);
482 	thread_lock(td);
483 }
484 
485 void
486 epoch_wait_preempt(epoch_t epoch)
487 {
488 	struct thread *td;
489 	int was_bound;
490 	int old_cpu;
491 	int old_pinned;
492 	u_char old_prio;
493 #ifdef INVARIANTS
494 	int locks;
495 
496 	locks = curthread->td_locks;
497 #endif
498 
499 	MPASS(cold || epoch != NULL);
500 	INIT_CHECK(epoch);
501 
502 	MPASS(epoch->e_flags & EPOCH_PREEMPT);
503 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
504 	    "epoch_wait() can sleep");
505 
506 	td = curthread;
507 	KASSERT(td->td_epochnest == 0, ("epoch_wait() in the middle of an epoch section"));
508 	thread_lock(td);
509 
510 	DROP_GIANT();
511 
512 	old_cpu = PCPU_GET(cpuid);
513 	old_pinned = td->td_pinned;
514 	old_prio = td->td_priority;
515 	was_bound = sched_is_bound(td);
516 	sched_unbind(td);
517 	td->td_pinned = 0;
518 	sched_bind(td, old_cpu);
519 
520 	ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler_preempt, NULL);
521 
522 	/* restore CPU binding, if any */
523 	if (was_bound != 0) {
524 		sched_bind(td, old_cpu);
525 	} else {
526 		/* get thread back to initial CPU, if any */
527 		if (old_pinned != 0)
528 			sched_bind(td, old_cpu);
529 		sched_unbind(td);
530 	}
531 	/* restore pinned after bind */
532 	td->td_pinned = old_pinned;
533 
534 	/* restore thread priority */
535 	sched_prio(td, old_prio);
536 	thread_unlock(td);
537 	PICKUP_GIANT();
538 	KASSERT(td->td_locks == locks,
539 			("%d residual locks held", td->td_locks - locks));
540 }
541 
542 static void
543 epoch_block_handler(struct ck_epoch *g __unused, ck_epoch_record_t *c __unused,
544 					void *arg __unused)
545 {
546 	cpu_spinwait();
547 }
548 
549 void
550 epoch_wait(epoch_t epoch)
551 {
552 
553 	MPASS(cold || epoch != NULL);
554 	INIT_CHECK(epoch);
555 	MPASS(epoch->e_flags == 0);
556 	critical_enter();
557 	ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler, NULL);
558 	critical_exit();
559 }
560 
561 void
562 epoch_call(epoch_t epoch, epoch_context_t ctx, void (*callback) (epoch_context_t))
563 {
564 	struct epoch_pcpu_state *eps;
565 	ck_epoch_entry_t *cb;
566 
567 	cb = (void *)ctx;
568 
569 	MPASS(callback);
570 	/* too early in boot to have epoch set up */
571 	if (__predict_false(epoch == NULL))
572 		goto boottime;
573 
574 	critical_enter();
575 	*DPCPU_PTR(epoch_cb_count) += 1;
576 	eps = epoch->e_pcpu[curcpu];
577 	ck_epoch_call(&eps->eps_record.er_record, cb, (ck_epoch_cb_t*)callback);
578 	critical_exit();
579 	return;
580  boottime:
581 	callback(ctx);
582 }
583 
584 static void
585 epoch_call_task(void *arg __unused)
586 {
587 	ck_stack_entry_t *cursor, *head, *next;
588 	ck_epoch_record_t *record;
589 	epoch_t epoch;
590 	ck_stack_t cb_stack;
591 	int i, npending, total;
592 
593 	ck_stack_init(&cb_stack);
594 	critical_enter();
595 	epoch_enter(global_epoch);
596 	for (total = i = 0; i < epoch_count; i++) {
597 		if (__predict_false((epoch = allepochs[i]) == NULL))
598 			continue;
599 		record = &epoch->e_pcpu[curcpu]->eps_record.er_record;
600 		if ((npending = record->n_pending) == 0)
601 			continue;
602 		ck_epoch_poll_deferred(record, &cb_stack);
603 		total += npending - record->n_pending;
604 	}
605 	epoch_exit(global_epoch);
606 	*DPCPU_PTR(epoch_cb_count) -= total;
607 	critical_exit();
608 
609 	counter_u64_add(epoch_call_count, total);
610 	counter_u64_add(epoch_call_task_count, 1);
611 
612 	head = ck_stack_batch_pop_npsc(&cb_stack);
613 	for (cursor = head; cursor != NULL; cursor = next) {
614 		struct ck_epoch_entry *entry =
615 		    ck_epoch_entry_container(cursor);
616 		next = CK_STACK_NEXT(cursor);
617 		entry->function(entry);
618 	}
619 }
620 
621 int
622 in_epoch(void)
623 {
624 	return (curthread->td_epochnest != 0);
625 }
626