1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2018, Matthew Macy <mmacy@freebsd.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/types.h> 34 #include <sys/systm.h> 35 #include <sys/counter.h> 36 #include <sys/epoch.h> 37 #include <sys/gtaskqueue.h> 38 #include <sys/kernel.h> 39 #include <sys/limits.h> 40 #include <sys/lock.h> 41 #include <sys/malloc.h> 42 #include <sys/mutex.h> 43 #include <sys/pcpu.h> 44 #include <sys/proc.h> 45 #include <sys/sched.h> 46 #include <sys/smp.h> 47 #include <sys/sysctl.h> 48 #include <sys/turnstile.h> 49 #include <vm/vm.h> 50 #include <vm/vm_extern.h> 51 #include <vm/vm_kern.h> 52 53 #include <ck_epoch.h> 54 55 static MALLOC_DEFINE(M_EPOCH, "epoch", "epoch based reclamation"); 56 57 /* arbitrary --- needs benchmarking */ 58 #define MAX_ADAPTIVE_SPIN 1000 59 #define MAX_EPOCHS 64 60 61 #ifdef __amd64__ 62 #define EPOCH_ALIGN CACHE_LINE_SIZE*2 63 #else 64 #define EPOCH_ALIGN CACHE_LINE_SIZE 65 #endif 66 67 CTASSERT(sizeof(epoch_section_t) == sizeof(ck_epoch_section_t)); 68 CTASSERT(sizeof(ck_epoch_entry_t) == sizeof(struct epoch_context)); 69 SYSCTL_NODE(_kern, OID_AUTO, epoch, CTLFLAG_RW, 0, "epoch information"); 70 SYSCTL_NODE(_kern_epoch, OID_AUTO, stats, CTLFLAG_RW, 0, "epoch stats"); 71 72 73 /* Stats. */ 74 static counter_u64_t block_count; 75 76 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, nblocked, CTLFLAG_RW, 77 &block_count, "# of times a thread was in an epoch when epoch_wait was called"); 78 static counter_u64_t migrate_count; 79 80 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, migrations, CTLFLAG_RW, 81 &migrate_count, "# of times thread was migrated to another CPU in epoch_wait"); 82 static counter_u64_t turnstile_count; 83 84 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, ncontended, CTLFLAG_RW, 85 &turnstile_count, "# of times a thread was blocked on a lock in an epoch during an epoch_wait"); 86 static counter_u64_t switch_count; 87 88 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, switches, CTLFLAG_RW, 89 &switch_count, "# of times a thread voluntarily context switched in epoch_wait"); 90 static counter_u64_t epoch_call_count; 91 92 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_calls, CTLFLAG_RW, 93 &epoch_call_count, "# of times a callback was deferred"); 94 static counter_u64_t epoch_call_task_count; 95 96 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_call_tasks, CTLFLAG_RW, 97 &epoch_call_task_count, "# of times a callback task was run"); 98 99 TAILQ_HEAD (threadlist, thread); 100 101 CK_STACK_CONTAINER(struct ck_epoch_entry, stack_entry, 102 ck_epoch_entry_container) 103 typedef struct epoch_record { 104 ck_epoch_record_t er_record; 105 volatile struct threadlist er_tdlist; 106 volatile uint32_t er_gen; 107 uint32_t er_cpuid; 108 } *epoch_record_t; 109 110 struct epoch_pcpu_state { 111 struct epoch_record eps_record; 112 } __aligned(EPOCH_ALIGN); 113 114 struct epoch { 115 struct ck_epoch e_epoch __aligned(EPOCH_ALIGN); 116 struct epoch_pcpu_state *e_pcpu_dom[MAXMEMDOM] __aligned(EPOCH_ALIGN); 117 int e_idx; 118 int e_flags; 119 struct epoch_pcpu_state *e_pcpu[0]; 120 }; 121 122 epoch_t allepochs[MAX_EPOCHS]; 123 124 DPCPU_DEFINE(struct grouptask, epoch_cb_task); 125 DPCPU_DEFINE(int, epoch_cb_count); 126 127 static __read_mostly int domcount[MAXMEMDOM]; 128 static __read_mostly int domoffsets[MAXMEMDOM]; 129 static __read_mostly int inited; 130 static __read_mostly int epoch_count; 131 __read_mostly epoch_t global_epoch; 132 __read_mostly epoch_t global_epoch_preempt; 133 134 static void epoch_call_task(void *context __unused); 135 136 #if defined(__powerpc64__) || defined(__powerpc__) || !defined(NUMA) 137 static bool usedomains = false; 138 #else 139 static bool usedomains = true; 140 #endif 141 static void 142 epoch_init(void *arg __unused) 143 { 144 int domain, cpu; 145 146 block_count = counter_u64_alloc(M_WAITOK); 147 migrate_count = counter_u64_alloc(M_WAITOK); 148 turnstile_count = counter_u64_alloc(M_WAITOK); 149 switch_count = counter_u64_alloc(M_WAITOK); 150 epoch_call_count = counter_u64_alloc(M_WAITOK); 151 epoch_call_task_count = counter_u64_alloc(M_WAITOK); 152 if (usedomains == false) 153 goto done; 154 domain = 0; 155 domoffsets[0] = 0; 156 for (domain = 0; domain < vm_ndomains; domain++) { 157 domcount[domain] = CPU_COUNT(&cpuset_domain[domain]); 158 if (bootverbose) 159 printf("domcount[%d] %d\n", domain, domcount[domain]); 160 } 161 for (domain = 1; domain < vm_ndomains; domain++) 162 domoffsets[domain] = domoffsets[domain - 1] + domcount[domain - 1]; 163 164 for (domain = 0; domain < vm_ndomains; domain++) { 165 if (domcount[domain] == 0) { 166 usedomains = false; 167 break; 168 } 169 } 170 done: 171 CPU_FOREACH(cpu) { 172 GROUPTASK_INIT(DPCPU_ID_PTR(cpu, epoch_cb_task), 0, epoch_call_task, NULL); 173 taskqgroup_attach_cpu(qgroup_softirq, DPCPU_ID_PTR(cpu, epoch_cb_task), NULL, cpu, -1, "epoch call task"); 174 } 175 inited = 1; 176 global_epoch = epoch_alloc(0); 177 global_epoch_preempt = epoch_alloc(EPOCH_PREEMPT); 178 } 179 SYSINIT(epoch, SI_SUB_TASKQ + 1, SI_ORDER_FIRST, epoch_init, NULL); 180 181 #if !defined(EARLY_AP_STARTUP) 182 static void 183 epoch_init_smp(void *dummy __unused) 184 { 185 inited = 2; 186 } 187 SYSINIT(epoch_smp, SI_SUB_SMP + 1, SI_ORDER_FIRST, epoch_init_smp, NULL); 188 #endif 189 190 191 static void 192 epoch_init_numa(epoch_t epoch) 193 { 194 int domain, cpu_offset; 195 struct epoch_pcpu_state *eps; 196 epoch_record_t er; 197 198 for (domain = 0; domain < vm_ndomains; domain++) { 199 eps = malloc_domain(sizeof(*eps) * domcount[domain], M_EPOCH, 200 domain, M_ZERO | M_WAITOK); 201 epoch->e_pcpu_dom[domain] = eps; 202 cpu_offset = domoffsets[domain]; 203 for (int i = 0; i < domcount[domain]; i++, eps++) { 204 epoch->e_pcpu[cpu_offset + i] = eps; 205 er = &eps->eps_record; 206 ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL); 207 TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist); 208 er->er_cpuid = cpu_offset + i; 209 } 210 } 211 } 212 213 static void 214 epoch_init_legacy(epoch_t epoch) 215 { 216 struct epoch_pcpu_state *eps; 217 epoch_record_t er; 218 219 eps = malloc(sizeof(*eps) * mp_ncpus, M_EPOCH, M_ZERO | M_WAITOK); 220 epoch->e_pcpu_dom[0] = eps; 221 for (int i = 0; i < mp_ncpus; i++, eps++) { 222 epoch->e_pcpu[i] = eps; 223 er = &eps->eps_record; 224 ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL); 225 TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist); 226 er->er_cpuid = i; 227 } 228 } 229 230 epoch_t 231 epoch_alloc(int flags) 232 { 233 epoch_t epoch; 234 235 if (__predict_false(!inited)) 236 panic("%s called too early in boot", __func__); 237 epoch = malloc(sizeof(struct epoch) + mp_ncpus * sizeof(void *), 238 M_EPOCH, M_ZERO | M_WAITOK); 239 ck_epoch_init(&epoch->e_epoch); 240 if (usedomains) 241 epoch_init_numa(epoch); 242 else 243 epoch_init_legacy(epoch); 244 MPASS(epoch_count < MAX_EPOCHS - 2); 245 epoch->e_flags = flags; 246 epoch->e_idx = epoch_count; 247 allepochs[epoch_count++] = epoch; 248 return (epoch); 249 } 250 251 void 252 epoch_free(epoch_t epoch) 253 { 254 int domain; 255 #ifdef INVARIANTS 256 struct epoch_pcpu_state *eps; 257 int cpu; 258 259 CPU_FOREACH(cpu) { 260 eps = epoch->e_pcpu[cpu]; 261 MPASS(TAILQ_EMPTY(&eps->eps_record.er_tdlist)); 262 } 263 #endif 264 allepochs[epoch->e_idx] = NULL; 265 epoch_wait(global_epoch); 266 if (usedomains) 267 for (domain = 0; domain < vm_ndomains; domain++) 268 free_domain(epoch->e_pcpu_dom[domain], M_EPOCH); 269 else 270 free(epoch->e_pcpu_dom[0], M_EPOCH); 271 free(epoch, M_EPOCH); 272 } 273 274 #define INIT_CHECK(epoch) \ 275 do { \ 276 if (__predict_false((epoch) == NULL)) \ 277 return; \ 278 } while (0) 279 280 void 281 epoch_enter_preempt_internal(epoch_t epoch, struct thread *td) 282 { 283 struct epoch_pcpu_state *eps; 284 285 MPASS(cold || epoch != NULL); 286 INIT_CHECK(epoch); 287 MPASS(epoch->e_flags & EPOCH_PREEMPT); 288 critical_enter(); 289 td->td_pre_epoch_prio = td->td_priority; 290 eps = epoch->e_pcpu[curcpu]; 291 #ifdef INVARIANTS 292 MPASS(td->td_epochnest < UCHAR_MAX - 2); 293 if (td->td_epochnest > 1) { 294 struct thread *curtd; 295 int found = 0; 296 297 TAILQ_FOREACH(curtd, &eps->eps_record.er_tdlist, td_epochq) 298 if (curtd == td) 299 found = 1; 300 KASSERT(found, ("recursing on a second epoch")); 301 critical_exit(); 302 return; 303 } 304 #endif 305 TAILQ_INSERT_TAIL(&eps->eps_record.er_tdlist, td, td_epochq); 306 sched_pin(); 307 ck_epoch_begin(&eps->eps_record.er_record, (ck_epoch_section_t *)&td->td_epoch_section); 308 critical_exit(); 309 } 310 311 312 void 313 epoch_enter(epoch_t epoch) 314 { 315 ck_epoch_record_t *record; 316 struct thread *td; 317 318 MPASS(cold || epoch != NULL); 319 INIT_CHECK(epoch); 320 td = curthread; 321 322 critical_enter(); 323 td->td_epochnest++; 324 record = &epoch->e_pcpu[curcpu]->eps_record.er_record; 325 ck_epoch_begin(record, NULL); 326 } 327 328 void 329 epoch_exit_preempt_internal(epoch_t epoch, struct thread *td) 330 { 331 struct epoch_pcpu_state *eps; 332 333 MPASS(td->td_epochnest == 0); 334 INIT_CHECK(epoch); 335 critical_enter(); 336 eps = epoch->e_pcpu[curcpu]; 337 338 MPASS(epoch->e_flags & EPOCH_PREEMPT); 339 ck_epoch_end(&eps->eps_record.er_record, (ck_epoch_section_t *)&td->td_epoch_section); 340 TAILQ_REMOVE(&eps->eps_record.er_tdlist, td, td_epochq); 341 eps->eps_record.er_gen++; 342 sched_unpin(); 343 if (__predict_false(td->td_pre_epoch_prio != td->td_priority)) { 344 thread_lock(td); 345 sched_prio(td, td->td_pre_epoch_prio); 346 thread_unlock(td); 347 } 348 critical_exit(); 349 } 350 351 void 352 epoch_exit(epoch_t epoch) 353 { 354 ck_epoch_record_t *record; 355 struct thread *td; 356 357 INIT_CHECK(epoch); 358 td = curthread; 359 td->td_epochnest--; 360 record = &epoch->e_pcpu[curcpu]->eps_record.er_record; 361 ck_epoch_end(record, NULL); 362 critical_exit(); 363 } 364 365 /* 366 * epoch_block_handler_preempt is a callback from the ck code when another thread is 367 * currently in an epoch section. 368 */ 369 static void 370 epoch_block_handler_preempt(struct ck_epoch *global __unused, ck_epoch_record_t *cr, 371 void *arg __unused) 372 { 373 epoch_record_t record; 374 struct thread *td, *tdwait, *owner; 375 struct turnstile *ts; 376 struct lock_object *lock; 377 int spincount, gen; 378 int locksheld __unused; 379 380 record = __containerof(cr, struct epoch_record, er_record); 381 td = curthread; 382 locksheld = td->td_locks; 383 spincount = 0; 384 counter_u64_add(block_count, 1); 385 if (record->er_cpuid != curcpu) { 386 /* 387 * If the head of the list is running, we can wait for it 388 * to remove itself from the list and thus save us the 389 * overhead of a migration 390 */ 391 if ((tdwait = TAILQ_FIRST(&record->er_tdlist)) != NULL && 392 TD_IS_RUNNING(tdwait)) { 393 gen = record->er_gen; 394 thread_unlock(td); 395 do { 396 cpu_spinwait(); 397 } while (tdwait == TAILQ_FIRST(&record->er_tdlist) && 398 gen == record->er_gen && TD_IS_RUNNING(tdwait) && 399 spincount++ < MAX_ADAPTIVE_SPIN); 400 thread_lock(td); 401 return; 402 } 403 /* 404 * Being on the same CPU as that of the record on which 405 * we need to wait allows us access to the thread 406 * list associated with that CPU. We can then examine the 407 * oldest thread in the queue and wait on its turnstile 408 * until it resumes and so on until a grace period 409 * elapses. 410 * 411 */ 412 counter_u64_add(migrate_count, 1); 413 sched_bind(td, record->er_cpuid); 414 /* 415 * At this point we need to return to the ck code 416 * to scan to see if a grace period has elapsed. 417 * We can't move on to check the thread list, because 418 * in the meantime new threads may have arrived that 419 * in fact belong to a different epoch. 420 */ 421 return; 422 } 423 /* 424 * Try to find a thread in an epoch section on this CPU 425 * waiting on a turnstile. Otherwise find the lowest 426 * priority thread (highest prio value) and drop our priority 427 * to match to allow it to run. 428 */ 429 TAILQ_FOREACH(tdwait, &record->er_tdlist, td_epochq) { 430 /* 431 * Propagate our priority to any other waiters to prevent us 432 * from starving them. They will have their original priority 433 * restore on exit from epoch_wait(). 434 */ 435 if (!TD_IS_INHIBITED(tdwait) && tdwait->td_priority > td->td_priority) { 436 critical_enter(); 437 thread_unlock(td); 438 thread_lock(tdwait); 439 sched_prio(tdwait, td->td_priority); 440 thread_unlock(tdwait); 441 thread_lock(td); 442 critical_exit(); 443 } 444 if (TD_IS_INHIBITED(tdwait) && TD_ON_LOCK(tdwait) && 445 ((ts = tdwait->td_blocked) != NULL)) { 446 /* 447 * We unlock td to allow turnstile_wait to reacquire the 448 * the thread lock. Before unlocking it we enter a critical 449 * section to prevent preemption after we reenable interrupts 450 * by dropping the thread lock in order to prevent tdwait 451 * from getting to run. 452 */ 453 critical_enter(); 454 thread_unlock(td); 455 owner = turnstile_lock(ts, &lock); 456 /* 457 * The owner pointer indicates that the lock succeeded. Only 458 * in case we hold the lock and the turnstile we locked is still 459 * the one that tdwait is blocked on can we continue. Otherwise 460 * The turnstile pointer has been changed out from underneath 461 * us, as in the case where the lock holder has signalled tdwait, 462 * and we need to continue. 463 */ 464 if (owner != NULL && ts == tdwait->td_blocked) { 465 MPASS(TD_IS_INHIBITED(tdwait) && TD_ON_LOCK(tdwait)); 466 critical_exit(); 467 turnstile_wait(ts, owner, tdwait->td_tsqueue); 468 counter_u64_add(turnstile_count, 1); 469 thread_lock(td); 470 return; 471 } else if (owner != NULL) 472 turnstile_unlock(ts, lock); 473 thread_lock(td); 474 critical_exit(); 475 KASSERT(td->td_locks == locksheld, 476 ("%d extra locks held", td->td_locks - locksheld)); 477 } 478 } 479 /* 480 * We didn't find any threads actually blocked on a lock 481 * so we have nothing to do except context switch away. 482 */ 483 counter_u64_add(switch_count, 1); 484 mi_switch(SW_VOL | SWT_RELINQUISH, NULL); 485 486 /* 487 * Release the thread lock while yielding to 488 * allow other threads to acquire the lock 489 * pointed to by TDQ_LOCKPTR(td). Else a 490 * deadlock like situation might happen. (HPS) 491 */ 492 thread_unlock(td); 493 thread_lock(td); 494 } 495 496 void 497 epoch_wait_preempt(epoch_t epoch) 498 { 499 struct thread *td; 500 int was_bound; 501 int old_cpu; 502 int old_pinned; 503 u_char old_prio; 504 int locks __unused; 505 506 MPASS(cold || epoch != NULL); 507 INIT_CHECK(epoch); 508 td = curthread; 509 #ifdef INVARIANTS 510 locks = curthread->td_locks; 511 MPASS(epoch->e_flags & EPOCH_PREEMPT); 512 if ((epoch->e_flags & EPOCH_LOCKED) == 0) 513 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 514 "epoch_wait() can be long running"); 515 KASSERT(td->td_epochnest == 0, ("epoch_wait() in the middle of an epoch section")); 516 #endif 517 thread_lock(td); 518 DROP_GIANT(); 519 520 old_cpu = PCPU_GET(cpuid); 521 old_pinned = td->td_pinned; 522 old_prio = td->td_priority; 523 was_bound = sched_is_bound(td); 524 sched_unbind(td); 525 td->td_pinned = 0; 526 sched_bind(td, old_cpu); 527 528 ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler_preempt, NULL); 529 530 /* restore CPU binding, if any */ 531 if (was_bound != 0) { 532 sched_bind(td, old_cpu); 533 } else { 534 /* get thread back to initial CPU, if any */ 535 if (old_pinned != 0) 536 sched_bind(td, old_cpu); 537 sched_unbind(td); 538 } 539 /* restore pinned after bind */ 540 td->td_pinned = old_pinned; 541 542 /* restore thread priority */ 543 sched_prio(td, old_prio); 544 thread_unlock(td); 545 PICKUP_GIANT(); 546 KASSERT(td->td_locks == locks, 547 ("%d residual locks held", td->td_locks - locks)); 548 } 549 550 static void 551 epoch_block_handler(struct ck_epoch *g __unused, ck_epoch_record_t *c __unused, 552 void *arg __unused) 553 { 554 cpu_spinwait(); 555 } 556 557 void 558 epoch_wait(epoch_t epoch) 559 { 560 561 MPASS(cold || epoch != NULL); 562 INIT_CHECK(epoch); 563 MPASS(epoch->e_flags == 0); 564 critical_enter(); 565 ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler, NULL); 566 critical_exit(); 567 } 568 569 void 570 epoch_call(epoch_t epoch, epoch_context_t ctx, void (*callback) (epoch_context_t)) 571 { 572 struct epoch_pcpu_state *eps; 573 ck_epoch_entry_t *cb; 574 575 cb = (void *)ctx; 576 577 MPASS(callback); 578 /* too early in boot to have epoch set up */ 579 if (__predict_false(epoch == NULL)) 580 goto boottime; 581 #if !defined(EARLY_AP_STARTUP) 582 if (__predict_false(inited < 2)) 583 goto boottime; 584 #endif 585 586 critical_enter(); 587 *DPCPU_PTR(epoch_cb_count) += 1; 588 eps = epoch->e_pcpu[curcpu]; 589 ck_epoch_call(&eps->eps_record.er_record, cb, (ck_epoch_cb_t *)callback); 590 critical_exit(); 591 return; 592 boottime: 593 callback(ctx); 594 } 595 596 static void 597 epoch_call_task(void *arg __unused) 598 { 599 ck_stack_entry_t *cursor, *head, *next; 600 ck_epoch_record_t *record; 601 epoch_t epoch; 602 ck_stack_t cb_stack; 603 int i, npending, total; 604 605 ck_stack_init(&cb_stack); 606 critical_enter(); 607 epoch_enter(global_epoch); 608 for (total = i = 0; i < epoch_count; i++) { 609 if (__predict_false((epoch = allepochs[i]) == NULL)) 610 continue; 611 record = &epoch->e_pcpu[curcpu]->eps_record.er_record; 612 if ((npending = record->n_pending) == 0) 613 continue; 614 ck_epoch_poll_deferred(record, &cb_stack); 615 total += npending - record->n_pending; 616 } 617 epoch_exit(global_epoch); 618 *DPCPU_PTR(epoch_cb_count) -= total; 619 critical_exit(); 620 621 counter_u64_add(epoch_call_count, total); 622 counter_u64_add(epoch_call_task_count, 1); 623 624 head = ck_stack_batch_pop_npsc(&cb_stack); 625 for (cursor = head; cursor != NULL; cursor = next) { 626 struct ck_epoch_entry *entry = 627 ck_epoch_entry_container(cursor); 628 629 next = CK_STACK_NEXT(cursor); 630 entry->function(entry); 631 } 632 } 633 634 int 635 in_epoch(void) 636 { 637 return (curthread->td_epochnest != 0); 638 } 639