1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2018, Matthew Macy <mmacy@freebsd.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/counter.h> 35 #include <sys/epoch.h> 36 #include <sys/gtaskqueue.h> 37 #include <sys/kernel.h> 38 #include <sys/limits.h> 39 #include <sys/lock.h> 40 #include <sys/malloc.h> 41 #include <sys/mutex.h> 42 #include <sys/pcpu.h> 43 #include <sys/proc.h> 44 #include <sys/sched.h> 45 #include <sys/sx.h> 46 #include <sys/smp.h> 47 #include <sys/sysctl.h> 48 #include <sys/turnstile.h> 49 #ifdef EPOCH_TRACE 50 #include <machine/stdarg.h> 51 #include <sys/stack.h> 52 #include <sys/tree.h> 53 #endif 54 #include <vm/vm.h> 55 #include <vm/vm_extern.h> 56 #include <vm/vm_kern.h> 57 #include <vm/uma.h> 58 59 #include <ck_epoch.h> 60 61 static MALLOC_DEFINE(M_EPOCH, "epoch", "epoch based reclamation"); 62 63 #ifdef __amd64__ 64 #define EPOCH_ALIGN CACHE_LINE_SIZE*2 65 #else 66 #define EPOCH_ALIGN CACHE_LINE_SIZE 67 #endif 68 69 TAILQ_HEAD (epoch_tdlist, epoch_tracker); 70 typedef struct epoch_record { 71 ck_epoch_record_t er_record; 72 struct epoch_context er_drain_ctx; 73 struct epoch *er_parent; 74 volatile struct epoch_tdlist er_tdlist; 75 volatile uint32_t er_gen; 76 uint32_t er_cpuid; 77 } __aligned(EPOCH_ALIGN) *epoch_record_t; 78 79 struct epoch { 80 struct ck_epoch e_epoch __aligned(EPOCH_ALIGN); 81 epoch_record_t e_pcpu_record; 82 int e_idx; 83 int e_flags; 84 struct sx e_drain_sx; 85 struct mtx e_drain_mtx; 86 volatile int e_drain_count; 87 const char *e_name; 88 }; 89 90 /* arbitrary --- needs benchmarking */ 91 #define MAX_ADAPTIVE_SPIN 100 92 #define MAX_EPOCHS 64 93 94 CTASSERT(sizeof(ck_epoch_entry_t) == sizeof(struct epoch_context)); 95 SYSCTL_NODE(_kern, OID_AUTO, epoch, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 96 "epoch information"); 97 SYSCTL_NODE(_kern_epoch, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 98 "epoch stats"); 99 100 /* Stats. */ 101 static counter_u64_t block_count; 102 103 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, nblocked, CTLFLAG_RW, 104 &block_count, "# of times a thread was in an epoch when epoch_wait was called"); 105 static counter_u64_t migrate_count; 106 107 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, migrations, CTLFLAG_RW, 108 &migrate_count, "# of times thread was migrated to another CPU in epoch_wait"); 109 static counter_u64_t turnstile_count; 110 111 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, ncontended, CTLFLAG_RW, 112 &turnstile_count, "# of times a thread was blocked on a lock in an epoch during an epoch_wait"); 113 static counter_u64_t switch_count; 114 115 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, switches, CTLFLAG_RW, 116 &switch_count, "# of times a thread voluntarily context switched in epoch_wait"); 117 static counter_u64_t epoch_call_count; 118 119 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_calls, CTLFLAG_RW, 120 &epoch_call_count, "# of times a callback was deferred"); 121 static counter_u64_t epoch_call_task_count; 122 123 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_call_tasks, CTLFLAG_RW, 124 &epoch_call_task_count, "# of times a callback task was run"); 125 126 TAILQ_HEAD (threadlist, thread); 127 128 CK_STACK_CONTAINER(struct ck_epoch_entry, stack_entry, 129 ck_epoch_entry_container) 130 131 epoch_t allepochs[MAX_EPOCHS]; 132 133 DPCPU_DEFINE(struct grouptask, epoch_cb_task); 134 DPCPU_DEFINE(int, epoch_cb_count); 135 136 static __read_mostly int inited; 137 static __read_mostly int epoch_count; 138 __read_mostly epoch_t global_epoch; 139 __read_mostly epoch_t global_epoch_preempt; 140 141 static void epoch_call_task(void *context __unused); 142 static uma_zone_t pcpu_zone_record; 143 144 #ifdef EPOCH_TRACE 145 struct stackentry { 146 RB_ENTRY(stackentry) se_node; 147 struct stack se_stack; 148 }; 149 150 static int 151 stackentry_compare(struct stackentry *a, struct stackentry *b) 152 { 153 154 if (a->se_stack.depth > b->se_stack.depth) 155 return (1); 156 if (a->se_stack.depth < b->se_stack.depth) 157 return (-1); 158 for (int i = 0; i < a->se_stack.depth; i++) { 159 if (a->se_stack.pcs[i] > b->se_stack.pcs[i]) 160 return (1); 161 if (a->se_stack.pcs[i] < b->se_stack.pcs[i]) 162 return (-1); 163 } 164 165 return (0); 166 } 167 168 RB_HEAD(stacktree, stackentry) epoch_stacks = RB_INITIALIZER(&epoch_stacks); 169 RB_GENERATE_STATIC(stacktree, stackentry, se_node, stackentry_compare); 170 171 static struct mtx epoch_stacks_lock; 172 MTX_SYSINIT(epochstacks, &epoch_stacks_lock, "epoch_stacks", MTX_DEF); 173 174 static bool epoch_trace_stack_print = true; 175 SYSCTL_BOOL(_kern_epoch, OID_AUTO, trace_stack_print, CTLFLAG_RWTUN, 176 &epoch_trace_stack_print, 0, "Print stack traces on epoch reports"); 177 178 static void epoch_trace_report(const char *fmt, ...) __printflike(1, 2); 179 static inline void 180 epoch_trace_report(const char *fmt, ...) 181 { 182 va_list ap; 183 struct stackentry se, *new; 184 185 stack_zero(&se.se_stack); /* XXX: is it really needed? */ 186 stack_save(&se.se_stack); 187 188 /* Tree is never reduced - go lockless. */ 189 if (RB_FIND(stacktree, &epoch_stacks, &se) != NULL) 190 return; 191 192 new = malloc(sizeof(*new), M_STACK, M_NOWAIT); 193 if (new != NULL) { 194 bcopy(&se.se_stack, &new->se_stack, sizeof(struct stack)); 195 196 mtx_lock(&epoch_stacks_lock); 197 new = RB_INSERT(stacktree, &epoch_stacks, new); 198 mtx_unlock(&epoch_stacks_lock); 199 if (new != NULL) 200 free(new, M_STACK); 201 } 202 203 va_start(ap, fmt); 204 (void)vprintf(fmt, ap); 205 va_end(ap); 206 if (epoch_trace_stack_print) 207 stack_print_ddb(&se.se_stack); 208 } 209 210 static inline void 211 epoch_trace_enter(struct thread *td, epoch_t epoch, epoch_tracker_t et, 212 const char *file, int line) 213 { 214 epoch_tracker_t iet; 215 216 SLIST_FOREACH(iet, &td->td_epochs, et_tlink) 217 if (iet->et_epoch == epoch) 218 epoch_trace_report("Recursively entering epoch %s " 219 "at %s:%d, previously entered at %s:%d\n", 220 epoch->e_name, file, line, 221 iet->et_file, iet->et_line); 222 et->et_epoch = epoch; 223 et->et_file = file; 224 et->et_line = line; 225 SLIST_INSERT_HEAD(&td->td_epochs, et, et_tlink); 226 } 227 228 static inline void 229 epoch_trace_exit(struct thread *td, epoch_t epoch, epoch_tracker_t et, 230 const char *file, int line) 231 { 232 233 if (SLIST_FIRST(&td->td_epochs) != et) { 234 epoch_trace_report("Exiting epoch %s in a not nested order " 235 "at %s:%d. Most recently entered %s at %s:%d\n", 236 epoch->e_name, 237 file, line, 238 SLIST_FIRST(&td->td_epochs)->et_epoch->e_name, 239 SLIST_FIRST(&td->td_epochs)->et_file, 240 SLIST_FIRST(&td->td_epochs)->et_line); 241 /* This will panic if et is not anywhere on td_epochs. */ 242 SLIST_REMOVE(&td->td_epochs, et, epoch_tracker, et_tlink); 243 } else 244 SLIST_REMOVE_HEAD(&td->td_epochs, et_tlink); 245 } 246 247 /* Used by assertions that check thread state before going to sleep. */ 248 void 249 epoch_trace_list(struct thread *td) 250 { 251 epoch_tracker_t iet; 252 253 SLIST_FOREACH(iet, &td->td_epochs, et_tlink) 254 printf("Epoch %s entered at %s:%d\n", iet->et_epoch->e_name, 255 iet->et_file, iet->et_line); 256 } 257 #endif /* EPOCH_TRACE */ 258 259 static void 260 epoch_init(void *arg __unused) 261 { 262 int cpu; 263 264 block_count = counter_u64_alloc(M_WAITOK); 265 migrate_count = counter_u64_alloc(M_WAITOK); 266 turnstile_count = counter_u64_alloc(M_WAITOK); 267 switch_count = counter_u64_alloc(M_WAITOK); 268 epoch_call_count = counter_u64_alloc(M_WAITOK); 269 epoch_call_task_count = counter_u64_alloc(M_WAITOK); 270 271 pcpu_zone_record = uma_zcreate("epoch_record pcpu", 272 sizeof(struct epoch_record), NULL, NULL, NULL, NULL, 273 UMA_ALIGN_PTR, UMA_ZONE_PCPU); 274 CPU_FOREACH(cpu) { 275 GROUPTASK_INIT(DPCPU_ID_PTR(cpu, epoch_cb_task), 0, 276 epoch_call_task, NULL); 277 taskqgroup_attach_cpu(qgroup_softirq, 278 DPCPU_ID_PTR(cpu, epoch_cb_task), NULL, cpu, NULL, NULL, 279 "epoch call task"); 280 } 281 #ifdef EPOCH_TRACE 282 SLIST_INIT(&thread0.td_epochs); 283 #endif 284 inited = 1; 285 global_epoch = epoch_alloc("Global", 0); 286 global_epoch_preempt = epoch_alloc("Global preemptible", EPOCH_PREEMPT); 287 } 288 SYSINIT(epoch, SI_SUB_EPOCH, SI_ORDER_FIRST, epoch_init, NULL); 289 290 #if !defined(EARLY_AP_STARTUP) 291 static void 292 epoch_init_smp(void *dummy __unused) 293 { 294 inited = 2; 295 } 296 SYSINIT(epoch_smp, SI_SUB_SMP + 1, SI_ORDER_FIRST, epoch_init_smp, NULL); 297 #endif 298 299 static void 300 epoch_ctor(epoch_t epoch) 301 { 302 epoch_record_t er; 303 int cpu; 304 305 epoch->e_pcpu_record = uma_zalloc_pcpu(pcpu_zone_record, M_WAITOK); 306 CPU_FOREACH(cpu) { 307 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu); 308 bzero(er, sizeof(*er)); 309 ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL); 310 TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist); 311 er->er_cpuid = cpu; 312 er->er_parent = epoch; 313 } 314 } 315 316 static void 317 epoch_adjust_prio(struct thread *td, u_char prio) 318 { 319 320 thread_lock(td); 321 sched_prio(td, prio); 322 thread_unlock(td); 323 } 324 325 epoch_t 326 epoch_alloc(const char *name, int flags) 327 { 328 epoch_t epoch; 329 330 if (__predict_false(!inited)) 331 panic("%s called too early in boot", __func__); 332 epoch = malloc(sizeof(struct epoch), M_EPOCH, M_ZERO | M_WAITOK); 333 ck_epoch_init(&epoch->e_epoch); 334 epoch_ctor(epoch); 335 MPASS(epoch_count < MAX_EPOCHS - 2); 336 epoch->e_flags = flags; 337 epoch->e_idx = epoch_count; 338 epoch->e_name = name; 339 sx_init(&epoch->e_drain_sx, "epoch-drain-sx"); 340 mtx_init(&epoch->e_drain_mtx, "epoch-drain-mtx", NULL, MTX_DEF); 341 allepochs[epoch_count++] = epoch; 342 return (epoch); 343 } 344 345 void 346 epoch_free(epoch_t epoch) 347 { 348 349 epoch_drain_callbacks(epoch); 350 allepochs[epoch->e_idx] = NULL; 351 epoch_wait(global_epoch); 352 uma_zfree_pcpu(pcpu_zone_record, epoch->e_pcpu_record); 353 mtx_destroy(&epoch->e_drain_mtx); 354 sx_destroy(&epoch->e_drain_sx); 355 free(epoch, M_EPOCH); 356 } 357 358 static epoch_record_t 359 epoch_currecord(epoch_t epoch) 360 { 361 362 return (zpcpu_get(epoch->e_pcpu_record)); 363 } 364 365 #define INIT_CHECK(epoch) \ 366 do { \ 367 if (__predict_false((epoch) == NULL)) \ 368 return; \ 369 } while (0) 370 371 void 372 _epoch_enter_preempt(epoch_t epoch, epoch_tracker_t et EPOCH_FILE_LINE) 373 { 374 struct epoch_record *er; 375 struct thread *td; 376 377 MPASS(cold || epoch != NULL); 378 MPASS(epoch->e_flags & EPOCH_PREEMPT); 379 td = curthread; 380 MPASS((vm_offset_t)et >= td->td_kstack && 381 (vm_offset_t)et + sizeof(struct epoch_tracker) <= 382 td->td_kstack + td->td_kstack_pages * PAGE_SIZE); 383 384 INIT_CHECK(epoch); 385 #ifdef EPOCH_TRACE 386 epoch_trace_enter(td, epoch, et, file, line); 387 #endif 388 et->et_td = td; 389 THREAD_NO_SLEEPING(); 390 critical_enter(); 391 sched_pin(); 392 td->td_pre_epoch_prio = td->td_priority; 393 er = epoch_currecord(epoch); 394 TAILQ_INSERT_TAIL(&er->er_tdlist, et, et_link); 395 ck_epoch_begin(&er->er_record, &et->et_section); 396 critical_exit(); 397 } 398 399 void 400 epoch_enter(epoch_t epoch) 401 { 402 epoch_record_t er; 403 404 MPASS(cold || epoch != NULL); 405 INIT_CHECK(epoch); 406 critical_enter(); 407 er = epoch_currecord(epoch); 408 ck_epoch_begin(&er->er_record, NULL); 409 } 410 411 void 412 _epoch_exit_preempt(epoch_t epoch, epoch_tracker_t et EPOCH_FILE_LINE) 413 { 414 struct epoch_record *er; 415 struct thread *td; 416 417 INIT_CHECK(epoch); 418 td = curthread; 419 critical_enter(); 420 sched_unpin(); 421 THREAD_SLEEPING_OK(); 422 er = epoch_currecord(epoch); 423 MPASS(epoch->e_flags & EPOCH_PREEMPT); 424 MPASS(et != NULL); 425 MPASS(et->et_td == td); 426 #ifdef INVARIANTS 427 et->et_td = (void*)0xDEADBEEF; 428 #endif 429 ck_epoch_end(&er->er_record, &et->et_section); 430 TAILQ_REMOVE(&er->er_tdlist, et, et_link); 431 er->er_gen++; 432 if (__predict_false(td->td_pre_epoch_prio != td->td_priority)) 433 epoch_adjust_prio(td, td->td_pre_epoch_prio); 434 critical_exit(); 435 #ifdef EPOCH_TRACE 436 epoch_trace_exit(td, epoch, et, file, line); 437 #endif 438 } 439 440 void 441 epoch_exit(epoch_t epoch) 442 { 443 epoch_record_t er; 444 445 INIT_CHECK(epoch); 446 er = epoch_currecord(epoch); 447 ck_epoch_end(&er->er_record, NULL); 448 critical_exit(); 449 } 450 451 /* 452 * epoch_block_handler_preempt() is a callback from the CK code when another 453 * thread is currently in an epoch section. 454 */ 455 static void 456 epoch_block_handler_preempt(struct ck_epoch *global __unused, 457 ck_epoch_record_t *cr, void *arg __unused) 458 { 459 epoch_record_t record; 460 struct thread *td, *owner, *curwaittd; 461 struct epoch_tracker *tdwait; 462 struct turnstile *ts; 463 struct lock_object *lock; 464 int spincount, gen; 465 int locksheld __unused; 466 467 record = __containerof(cr, struct epoch_record, er_record); 468 td = curthread; 469 locksheld = td->td_locks; 470 spincount = 0; 471 counter_u64_add(block_count, 1); 472 /* 473 * We lost a race and there's no longer any threads 474 * on the CPU in an epoch section. 475 */ 476 if (TAILQ_EMPTY(&record->er_tdlist)) 477 return; 478 479 if (record->er_cpuid != curcpu) { 480 /* 481 * If the head of the list is running, we can wait for it 482 * to remove itself from the list and thus save us the 483 * overhead of a migration 484 */ 485 gen = record->er_gen; 486 thread_unlock(td); 487 /* 488 * We can't actually check if the waiting thread is running 489 * so we simply poll for it to exit before giving up and 490 * migrating. 491 */ 492 do { 493 cpu_spinwait(); 494 } while (!TAILQ_EMPTY(&record->er_tdlist) && 495 gen == record->er_gen && 496 spincount++ < MAX_ADAPTIVE_SPIN); 497 thread_lock(td); 498 /* 499 * If the generation has changed we can poll again 500 * otherwise we need to migrate. 501 */ 502 if (gen != record->er_gen) 503 return; 504 /* 505 * Being on the same CPU as that of the record on which 506 * we need to wait allows us access to the thread 507 * list associated with that CPU. We can then examine the 508 * oldest thread in the queue and wait on its turnstile 509 * until it resumes and so on until a grace period 510 * elapses. 511 * 512 */ 513 counter_u64_add(migrate_count, 1); 514 sched_bind(td, record->er_cpuid); 515 /* 516 * At this point we need to return to the ck code 517 * to scan to see if a grace period has elapsed. 518 * We can't move on to check the thread list, because 519 * in the meantime new threads may have arrived that 520 * in fact belong to a different epoch. 521 */ 522 return; 523 } 524 /* 525 * Try to find a thread in an epoch section on this CPU 526 * waiting on a turnstile. Otherwise find the lowest 527 * priority thread (highest prio value) and drop our priority 528 * to match to allow it to run. 529 */ 530 TAILQ_FOREACH(tdwait, &record->er_tdlist, et_link) { 531 /* 532 * Propagate our priority to any other waiters to prevent us 533 * from starving them. They will have their original priority 534 * restore on exit from epoch_wait(). 535 */ 536 curwaittd = tdwait->et_td; 537 if (!TD_IS_INHIBITED(curwaittd) && curwaittd->td_priority > td->td_priority) { 538 critical_enter(); 539 thread_unlock(td); 540 thread_lock(curwaittd); 541 sched_prio(curwaittd, td->td_priority); 542 thread_unlock(curwaittd); 543 thread_lock(td); 544 critical_exit(); 545 } 546 if (TD_IS_INHIBITED(curwaittd) && TD_ON_LOCK(curwaittd) && 547 ((ts = curwaittd->td_blocked) != NULL)) { 548 /* 549 * We unlock td to allow turnstile_wait to reacquire 550 * the thread lock. Before unlocking it we enter a 551 * critical section to prevent preemption after we 552 * reenable interrupts by dropping the thread lock in 553 * order to prevent curwaittd from getting to run. 554 */ 555 critical_enter(); 556 thread_unlock(td); 557 558 if (turnstile_lock(ts, &lock, &owner)) { 559 if (ts == curwaittd->td_blocked) { 560 MPASS(TD_IS_INHIBITED(curwaittd) && 561 TD_ON_LOCK(curwaittd)); 562 critical_exit(); 563 turnstile_wait(ts, owner, 564 curwaittd->td_tsqueue); 565 counter_u64_add(turnstile_count, 1); 566 thread_lock(td); 567 return; 568 } 569 turnstile_unlock(ts, lock); 570 } 571 thread_lock(td); 572 critical_exit(); 573 KASSERT(td->td_locks == locksheld, 574 ("%d extra locks held", td->td_locks - locksheld)); 575 } 576 } 577 /* 578 * We didn't find any threads actually blocked on a lock 579 * so we have nothing to do except context switch away. 580 */ 581 counter_u64_add(switch_count, 1); 582 mi_switch(SW_VOL | SWT_RELINQUISH); 583 /* 584 * It is important the thread lock is dropped while yielding 585 * to allow other threads to acquire the lock pointed to by 586 * TDQ_LOCKPTR(td). Currently mi_switch() will unlock the 587 * thread lock before returning. Else a deadlock like 588 * situation might happen. 589 */ 590 thread_lock(td); 591 } 592 593 void 594 epoch_wait_preempt(epoch_t epoch) 595 { 596 struct thread *td; 597 int was_bound; 598 int old_cpu; 599 int old_pinned; 600 u_char old_prio; 601 int locks __unused; 602 603 MPASS(cold || epoch != NULL); 604 INIT_CHECK(epoch); 605 td = curthread; 606 #ifdef INVARIANTS 607 locks = curthread->td_locks; 608 MPASS(epoch->e_flags & EPOCH_PREEMPT); 609 if ((epoch->e_flags & EPOCH_LOCKED) == 0) 610 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 611 "epoch_wait() can be long running"); 612 KASSERT(!in_epoch(epoch), ("epoch_wait_preempt() called in the middle " 613 "of an epoch section of the same epoch")); 614 #endif 615 DROP_GIANT(); 616 thread_lock(td); 617 618 old_cpu = PCPU_GET(cpuid); 619 old_pinned = td->td_pinned; 620 old_prio = td->td_priority; 621 was_bound = sched_is_bound(td); 622 sched_unbind(td); 623 td->td_pinned = 0; 624 sched_bind(td, old_cpu); 625 626 ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler_preempt, 627 NULL); 628 629 /* restore CPU binding, if any */ 630 if (was_bound != 0) { 631 sched_bind(td, old_cpu); 632 } else { 633 /* get thread back to initial CPU, if any */ 634 if (old_pinned != 0) 635 sched_bind(td, old_cpu); 636 sched_unbind(td); 637 } 638 /* restore pinned after bind */ 639 td->td_pinned = old_pinned; 640 641 /* restore thread priority */ 642 sched_prio(td, old_prio); 643 thread_unlock(td); 644 PICKUP_GIANT(); 645 KASSERT(td->td_locks == locks, 646 ("%d residual locks held", td->td_locks - locks)); 647 } 648 649 static void 650 epoch_block_handler(struct ck_epoch *g __unused, ck_epoch_record_t *c __unused, 651 void *arg __unused) 652 { 653 cpu_spinwait(); 654 } 655 656 void 657 epoch_wait(epoch_t epoch) 658 { 659 660 MPASS(cold || epoch != NULL); 661 INIT_CHECK(epoch); 662 MPASS(epoch->e_flags == 0); 663 critical_enter(); 664 ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler, NULL); 665 critical_exit(); 666 } 667 668 void 669 epoch_call(epoch_t epoch, epoch_callback_t callback, epoch_context_t ctx) 670 { 671 epoch_record_t er; 672 ck_epoch_entry_t *cb; 673 674 cb = (void *)ctx; 675 676 MPASS(callback); 677 /* too early in boot to have epoch set up */ 678 if (__predict_false(epoch == NULL)) 679 goto boottime; 680 #if !defined(EARLY_AP_STARTUP) 681 if (__predict_false(inited < 2)) 682 goto boottime; 683 #endif 684 685 critical_enter(); 686 *DPCPU_PTR(epoch_cb_count) += 1; 687 er = epoch_currecord(epoch); 688 ck_epoch_call(&er->er_record, cb, (ck_epoch_cb_t *)callback); 689 critical_exit(); 690 return; 691 boottime: 692 callback(ctx); 693 } 694 695 static void 696 epoch_call_task(void *arg __unused) 697 { 698 ck_stack_entry_t *cursor, *head, *next; 699 ck_epoch_record_t *record; 700 epoch_record_t er; 701 epoch_t epoch; 702 ck_stack_t cb_stack; 703 int i, npending, total; 704 705 ck_stack_init(&cb_stack); 706 critical_enter(); 707 epoch_enter(global_epoch); 708 for (total = i = 0; i < epoch_count; i++) { 709 if (__predict_false((epoch = allepochs[i]) == NULL)) 710 continue; 711 er = epoch_currecord(epoch); 712 record = &er->er_record; 713 if ((npending = record->n_pending) == 0) 714 continue; 715 ck_epoch_poll_deferred(record, &cb_stack); 716 total += npending - record->n_pending; 717 } 718 epoch_exit(global_epoch); 719 *DPCPU_PTR(epoch_cb_count) -= total; 720 critical_exit(); 721 722 counter_u64_add(epoch_call_count, total); 723 counter_u64_add(epoch_call_task_count, 1); 724 725 head = ck_stack_batch_pop_npsc(&cb_stack); 726 for (cursor = head; cursor != NULL; cursor = next) { 727 struct ck_epoch_entry *entry = 728 ck_epoch_entry_container(cursor); 729 730 next = CK_STACK_NEXT(cursor); 731 entry->function(entry); 732 } 733 } 734 735 int 736 in_epoch_verbose(epoch_t epoch, int dump_onfail) 737 { 738 struct epoch_tracker *tdwait; 739 struct thread *td; 740 epoch_record_t er; 741 742 td = curthread; 743 if (THREAD_CAN_SLEEP()) 744 return (0); 745 if (__predict_false((epoch) == NULL)) 746 return (0); 747 critical_enter(); 748 er = epoch_currecord(epoch); 749 TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link) 750 if (tdwait->et_td == td) { 751 critical_exit(); 752 return (1); 753 } 754 #ifdef INVARIANTS 755 if (dump_onfail) { 756 MPASS(td->td_pinned); 757 printf("cpu: %d id: %d\n", curcpu, td->td_tid); 758 TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link) 759 printf("td_tid: %d ", tdwait->et_td->td_tid); 760 printf("\n"); 761 } 762 #endif 763 critical_exit(); 764 return (0); 765 } 766 767 int 768 in_epoch(epoch_t epoch) 769 { 770 return (in_epoch_verbose(epoch, 0)); 771 } 772 773 static void 774 epoch_drain_cb(struct epoch_context *ctx) 775 { 776 struct epoch *epoch = 777 __containerof(ctx, struct epoch_record, er_drain_ctx)->er_parent; 778 779 if (atomic_fetchadd_int(&epoch->e_drain_count, -1) == 1) { 780 mtx_lock(&epoch->e_drain_mtx); 781 wakeup(epoch); 782 mtx_unlock(&epoch->e_drain_mtx); 783 } 784 } 785 786 void 787 epoch_drain_callbacks(epoch_t epoch) 788 { 789 epoch_record_t er; 790 struct thread *td; 791 int was_bound; 792 int old_pinned; 793 int old_cpu; 794 int cpu; 795 796 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 797 "epoch_drain_callbacks() may sleep!"); 798 799 /* too early in boot to have epoch set up */ 800 if (__predict_false(epoch == NULL)) 801 return; 802 #if !defined(EARLY_AP_STARTUP) 803 if (__predict_false(inited < 2)) 804 return; 805 #endif 806 DROP_GIANT(); 807 808 sx_xlock(&epoch->e_drain_sx); 809 mtx_lock(&epoch->e_drain_mtx); 810 811 td = curthread; 812 thread_lock(td); 813 old_cpu = PCPU_GET(cpuid); 814 old_pinned = td->td_pinned; 815 was_bound = sched_is_bound(td); 816 sched_unbind(td); 817 td->td_pinned = 0; 818 819 CPU_FOREACH(cpu) 820 epoch->e_drain_count++; 821 CPU_FOREACH(cpu) { 822 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu); 823 sched_bind(td, cpu); 824 epoch_call(epoch, &epoch_drain_cb, &er->er_drain_ctx); 825 } 826 827 /* restore CPU binding, if any */ 828 if (was_bound != 0) { 829 sched_bind(td, old_cpu); 830 } else { 831 /* get thread back to initial CPU, if any */ 832 if (old_pinned != 0) 833 sched_bind(td, old_cpu); 834 sched_unbind(td); 835 } 836 /* restore pinned after bind */ 837 td->td_pinned = old_pinned; 838 839 thread_unlock(td); 840 841 while (epoch->e_drain_count != 0) 842 msleep(epoch, &epoch->e_drain_mtx, PZERO, "EDRAIN", 0); 843 844 mtx_unlock(&epoch->e_drain_mtx); 845 sx_xunlock(&epoch->e_drain_sx); 846 847 PICKUP_GIANT(); 848 } 849