1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2018, Matthew Macy <mmacy@freebsd.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/counter.h> 35 #include <sys/epoch.h> 36 #include <sys/gtaskqueue.h> 37 #include <sys/kernel.h> 38 #include <sys/limits.h> 39 #include <sys/lock.h> 40 #include <sys/malloc.h> 41 #include <sys/mutex.h> 42 #include <sys/pcpu.h> 43 #include <sys/proc.h> 44 #include <sys/sched.h> 45 #include <sys/sx.h> 46 #include <sys/smp.h> 47 #include <sys/sysctl.h> 48 #include <sys/turnstile.h> 49 #ifdef EPOCH_TRACE 50 #include <machine/stdarg.h> 51 #include <sys/stack.h> 52 #include <sys/tree.h> 53 #endif 54 #include <vm/vm.h> 55 #include <vm/vm_extern.h> 56 #include <vm/vm_kern.h> 57 #include <vm/uma.h> 58 59 #include <ck_epoch.h> 60 61 #ifdef __amd64__ 62 #define EPOCH_ALIGN CACHE_LINE_SIZE*2 63 #else 64 #define EPOCH_ALIGN CACHE_LINE_SIZE 65 #endif 66 67 TAILQ_HEAD (epoch_tdlist, epoch_tracker); 68 typedef struct epoch_record { 69 ck_epoch_record_t er_record; 70 struct epoch_context er_drain_ctx; 71 struct epoch *er_parent; 72 volatile struct epoch_tdlist er_tdlist; 73 volatile uint32_t er_gen; 74 uint32_t er_cpuid; 75 } __aligned(EPOCH_ALIGN) *epoch_record_t; 76 77 struct epoch { 78 struct ck_epoch e_epoch __aligned(EPOCH_ALIGN); 79 epoch_record_t e_pcpu_record; 80 int e_in_use; 81 int e_flags; 82 struct sx e_drain_sx; 83 struct mtx e_drain_mtx; 84 volatile int e_drain_count; 85 const char *e_name; 86 }; 87 88 /* arbitrary --- needs benchmarking */ 89 #define MAX_ADAPTIVE_SPIN 100 90 #define MAX_EPOCHS 64 91 92 CTASSERT(sizeof(ck_epoch_entry_t) == sizeof(struct epoch_context)); 93 SYSCTL_NODE(_kern, OID_AUTO, epoch, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 94 "epoch information"); 95 SYSCTL_NODE(_kern_epoch, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 96 "epoch stats"); 97 98 /* Stats. */ 99 static counter_u64_t block_count; 100 101 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, nblocked, CTLFLAG_RW, 102 &block_count, "# of times a thread was in an epoch when epoch_wait was called"); 103 static counter_u64_t migrate_count; 104 105 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, migrations, CTLFLAG_RW, 106 &migrate_count, "# of times thread was migrated to another CPU in epoch_wait"); 107 static counter_u64_t turnstile_count; 108 109 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, ncontended, CTLFLAG_RW, 110 &turnstile_count, "# of times a thread was blocked on a lock in an epoch during an epoch_wait"); 111 static counter_u64_t switch_count; 112 113 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, switches, CTLFLAG_RW, 114 &switch_count, "# of times a thread voluntarily context switched in epoch_wait"); 115 static counter_u64_t epoch_call_count; 116 117 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_calls, CTLFLAG_RW, 118 &epoch_call_count, "# of times a callback was deferred"); 119 static counter_u64_t epoch_call_task_count; 120 121 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_call_tasks, CTLFLAG_RW, 122 &epoch_call_task_count, "# of times a callback task was run"); 123 124 TAILQ_HEAD (threadlist, thread); 125 126 CK_STACK_CONTAINER(struct ck_epoch_entry, stack_entry, 127 ck_epoch_entry_container) 128 129 static struct epoch epoch_array[MAX_EPOCHS]; 130 131 DPCPU_DEFINE(struct grouptask, epoch_cb_task); 132 DPCPU_DEFINE(int, epoch_cb_count); 133 134 static __read_mostly int inited; 135 __read_mostly epoch_t global_epoch; 136 __read_mostly epoch_t global_epoch_preempt; 137 138 static void epoch_call_task(void *context __unused); 139 static uma_zone_t pcpu_zone_record; 140 141 static struct sx epoch_sx; 142 143 #define EPOCH_LOCK() sx_xlock(&epoch_sx) 144 #define EPOCH_UNLOCK() sx_xunlock(&epoch_sx) 145 146 #ifdef EPOCH_TRACE 147 struct stackentry { 148 RB_ENTRY(stackentry) se_node; 149 struct stack se_stack; 150 }; 151 152 static int 153 stackentry_compare(struct stackentry *a, struct stackentry *b) 154 { 155 156 if (a->se_stack.depth > b->se_stack.depth) 157 return (1); 158 if (a->se_stack.depth < b->se_stack.depth) 159 return (-1); 160 for (int i = 0; i < a->se_stack.depth; i++) { 161 if (a->se_stack.pcs[i] > b->se_stack.pcs[i]) 162 return (1); 163 if (a->se_stack.pcs[i] < b->se_stack.pcs[i]) 164 return (-1); 165 } 166 167 return (0); 168 } 169 170 RB_HEAD(stacktree, stackentry) epoch_stacks = RB_INITIALIZER(&epoch_stacks); 171 RB_GENERATE_STATIC(stacktree, stackentry, se_node, stackentry_compare); 172 173 static struct mtx epoch_stacks_lock; 174 MTX_SYSINIT(epochstacks, &epoch_stacks_lock, "epoch_stacks", MTX_DEF); 175 176 static bool epoch_trace_stack_print = true; 177 SYSCTL_BOOL(_kern_epoch, OID_AUTO, trace_stack_print, CTLFLAG_RWTUN, 178 &epoch_trace_stack_print, 0, "Print stack traces on epoch reports"); 179 180 static void epoch_trace_report(const char *fmt, ...) __printflike(1, 2); 181 static inline void 182 epoch_trace_report(const char *fmt, ...) 183 { 184 va_list ap; 185 struct stackentry se, *new; 186 187 stack_zero(&se.se_stack); /* XXX: is it really needed? */ 188 stack_save(&se.se_stack); 189 190 /* Tree is never reduced - go lockless. */ 191 if (RB_FIND(stacktree, &epoch_stacks, &se) != NULL) 192 return; 193 194 new = malloc(sizeof(*new), M_STACK, M_NOWAIT); 195 if (new != NULL) { 196 bcopy(&se.se_stack, &new->se_stack, sizeof(struct stack)); 197 198 mtx_lock(&epoch_stacks_lock); 199 new = RB_INSERT(stacktree, &epoch_stacks, new); 200 mtx_unlock(&epoch_stacks_lock); 201 if (new != NULL) 202 free(new, M_STACK); 203 } 204 205 va_start(ap, fmt); 206 (void)vprintf(fmt, ap); 207 va_end(ap); 208 if (epoch_trace_stack_print) 209 stack_print_ddb(&se.se_stack); 210 } 211 212 static inline void 213 epoch_trace_enter(struct thread *td, epoch_t epoch, epoch_tracker_t et, 214 const char *file, int line) 215 { 216 epoch_tracker_t iet; 217 218 SLIST_FOREACH(iet, &td->td_epochs, et_tlink) 219 if (iet->et_epoch == epoch) 220 epoch_trace_report("Recursively entering epoch %s " 221 "at %s:%d, previously entered at %s:%d\n", 222 epoch->e_name, file, line, 223 iet->et_file, iet->et_line); 224 et->et_epoch = epoch; 225 et->et_file = file; 226 et->et_line = line; 227 SLIST_INSERT_HEAD(&td->td_epochs, et, et_tlink); 228 } 229 230 static inline void 231 epoch_trace_exit(struct thread *td, epoch_t epoch, epoch_tracker_t et, 232 const char *file, int line) 233 { 234 235 if (SLIST_FIRST(&td->td_epochs) != et) { 236 epoch_trace_report("Exiting epoch %s in a not nested order " 237 "at %s:%d. Most recently entered %s at %s:%d\n", 238 epoch->e_name, 239 file, line, 240 SLIST_FIRST(&td->td_epochs)->et_epoch->e_name, 241 SLIST_FIRST(&td->td_epochs)->et_file, 242 SLIST_FIRST(&td->td_epochs)->et_line); 243 /* This will panic if et is not anywhere on td_epochs. */ 244 SLIST_REMOVE(&td->td_epochs, et, epoch_tracker, et_tlink); 245 } else 246 SLIST_REMOVE_HEAD(&td->td_epochs, et_tlink); 247 } 248 249 /* Used by assertions that check thread state before going to sleep. */ 250 void 251 epoch_trace_list(struct thread *td) 252 { 253 epoch_tracker_t iet; 254 255 SLIST_FOREACH(iet, &td->td_epochs, et_tlink) 256 printf("Epoch %s entered at %s:%d\n", iet->et_epoch->e_name, 257 iet->et_file, iet->et_line); 258 } 259 #endif /* EPOCH_TRACE */ 260 261 static void 262 epoch_init(void *arg __unused) 263 { 264 int cpu; 265 266 block_count = counter_u64_alloc(M_WAITOK); 267 migrate_count = counter_u64_alloc(M_WAITOK); 268 turnstile_count = counter_u64_alloc(M_WAITOK); 269 switch_count = counter_u64_alloc(M_WAITOK); 270 epoch_call_count = counter_u64_alloc(M_WAITOK); 271 epoch_call_task_count = counter_u64_alloc(M_WAITOK); 272 273 pcpu_zone_record = uma_zcreate("epoch_record pcpu", 274 sizeof(struct epoch_record), NULL, NULL, NULL, NULL, 275 UMA_ALIGN_PTR, UMA_ZONE_PCPU); 276 CPU_FOREACH(cpu) { 277 GROUPTASK_INIT(DPCPU_ID_PTR(cpu, epoch_cb_task), 0, 278 epoch_call_task, NULL); 279 taskqgroup_attach_cpu(qgroup_softirq, 280 DPCPU_ID_PTR(cpu, epoch_cb_task), NULL, cpu, NULL, NULL, 281 "epoch call task"); 282 } 283 #ifdef EPOCH_TRACE 284 SLIST_INIT(&thread0.td_epochs); 285 #endif 286 sx_init(&epoch_sx, "epoch-sx"); 287 inited = 1; 288 global_epoch = epoch_alloc("Global", 0); 289 global_epoch_preempt = epoch_alloc("Global preemptible", EPOCH_PREEMPT); 290 } 291 SYSINIT(epoch, SI_SUB_EPOCH, SI_ORDER_FIRST, epoch_init, NULL); 292 293 #if !defined(EARLY_AP_STARTUP) 294 static void 295 epoch_init_smp(void *dummy __unused) 296 { 297 inited = 2; 298 } 299 SYSINIT(epoch_smp, SI_SUB_SMP + 1, SI_ORDER_FIRST, epoch_init_smp, NULL); 300 #endif 301 302 static void 303 epoch_ctor(epoch_t epoch) 304 { 305 epoch_record_t er; 306 int cpu; 307 308 epoch->e_pcpu_record = uma_zalloc_pcpu(pcpu_zone_record, M_WAITOK); 309 CPU_FOREACH(cpu) { 310 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu); 311 bzero(er, sizeof(*er)); 312 ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL); 313 TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist); 314 er->er_cpuid = cpu; 315 er->er_parent = epoch; 316 } 317 } 318 319 static void 320 epoch_adjust_prio(struct thread *td, u_char prio) 321 { 322 323 thread_lock(td); 324 sched_prio(td, prio); 325 thread_unlock(td); 326 } 327 328 epoch_t 329 epoch_alloc(const char *name, int flags) 330 { 331 epoch_t epoch; 332 int i; 333 334 MPASS(name != NULL); 335 336 if (__predict_false(!inited)) 337 panic("%s called too early in boot", __func__); 338 339 EPOCH_LOCK(); 340 341 /* 342 * Find a free index in the epoch array. If no free index is 343 * found, try to use the index after the last one. 344 */ 345 for (i = 0;; i++) { 346 /* 347 * If too many epochs are currently allocated, 348 * return NULL. 349 */ 350 if (i == MAX_EPOCHS) { 351 epoch = NULL; 352 goto done; 353 } 354 if (epoch_array[i].e_in_use == 0) 355 break; 356 } 357 358 epoch = epoch_array + i; 359 ck_epoch_init(&epoch->e_epoch); 360 epoch_ctor(epoch); 361 epoch->e_flags = flags; 362 epoch->e_name = name; 363 sx_init(&epoch->e_drain_sx, "epoch-drain-sx"); 364 mtx_init(&epoch->e_drain_mtx, "epoch-drain-mtx", NULL, MTX_DEF); 365 366 /* 367 * Set e_in_use last, because when this field is set the 368 * epoch_call_task() function will start scanning this epoch 369 * structure. 370 */ 371 atomic_store_rel_int(&epoch->e_in_use, 1); 372 done: 373 EPOCH_UNLOCK(); 374 return (epoch); 375 } 376 377 void 378 epoch_free(epoch_t epoch) 379 { 380 381 EPOCH_LOCK(); 382 383 MPASS(epoch->e_in_use != 0); 384 385 epoch_drain_callbacks(epoch); 386 387 atomic_store_rel_int(&epoch->e_in_use, 0); 388 /* 389 * Make sure the epoch_call_task() function see e_in_use equal 390 * to zero, by calling epoch_wait() on the global_epoch: 391 */ 392 epoch_wait(global_epoch); 393 uma_zfree_pcpu(pcpu_zone_record, epoch->e_pcpu_record); 394 mtx_destroy(&epoch->e_drain_mtx); 395 sx_destroy(&epoch->e_drain_sx); 396 memset(epoch, 0, sizeof(*epoch)); 397 398 EPOCH_UNLOCK(); 399 } 400 401 static epoch_record_t 402 epoch_currecord(epoch_t epoch) 403 { 404 405 return (zpcpu_get(epoch->e_pcpu_record)); 406 } 407 408 #define INIT_CHECK(epoch) \ 409 do { \ 410 if (__predict_false((epoch) == NULL)) \ 411 return; \ 412 } while (0) 413 414 void 415 _epoch_enter_preempt(epoch_t epoch, epoch_tracker_t et EPOCH_FILE_LINE) 416 { 417 struct epoch_record *er; 418 struct thread *td; 419 420 MPASS(cold || epoch != NULL); 421 MPASS(epoch->e_flags & EPOCH_PREEMPT); 422 td = curthread; 423 MPASS((vm_offset_t)et >= td->td_kstack && 424 (vm_offset_t)et + sizeof(struct epoch_tracker) <= 425 td->td_kstack + td->td_kstack_pages * PAGE_SIZE); 426 427 INIT_CHECK(epoch); 428 #ifdef EPOCH_TRACE 429 epoch_trace_enter(td, epoch, et, file, line); 430 #endif 431 et->et_td = td; 432 THREAD_NO_SLEEPING(); 433 critical_enter(); 434 sched_pin(); 435 td->td_pre_epoch_prio = td->td_priority; 436 er = epoch_currecord(epoch); 437 TAILQ_INSERT_TAIL(&er->er_tdlist, et, et_link); 438 ck_epoch_begin(&er->er_record, &et->et_section); 439 critical_exit(); 440 } 441 442 void 443 epoch_enter(epoch_t epoch) 444 { 445 epoch_record_t er; 446 447 MPASS(cold || epoch != NULL); 448 INIT_CHECK(epoch); 449 critical_enter(); 450 er = epoch_currecord(epoch); 451 ck_epoch_begin(&er->er_record, NULL); 452 } 453 454 void 455 _epoch_exit_preempt(epoch_t epoch, epoch_tracker_t et EPOCH_FILE_LINE) 456 { 457 struct epoch_record *er; 458 struct thread *td; 459 460 INIT_CHECK(epoch); 461 td = curthread; 462 critical_enter(); 463 sched_unpin(); 464 THREAD_SLEEPING_OK(); 465 er = epoch_currecord(epoch); 466 MPASS(epoch->e_flags & EPOCH_PREEMPT); 467 MPASS(et != NULL); 468 MPASS(et->et_td == td); 469 #ifdef INVARIANTS 470 et->et_td = (void*)0xDEADBEEF; 471 #endif 472 ck_epoch_end(&er->er_record, &et->et_section); 473 TAILQ_REMOVE(&er->er_tdlist, et, et_link); 474 er->er_gen++; 475 if (__predict_false(td->td_pre_epoch_prio != td->td_priority)) 476 epoch_adjust_prio(td, td->td_pre_epoch_prio); 477 critical_exit(); 478 #ifdef EPOCH_TRACE 479 epoch_trace_exit(td, epoch, et, file, line); 480 #endif 481 } 482 483 void 484 epoch_exit(epoch_t epoch) 485 { 486 epoch_record_t er; 487 488 INIT_CHECK(epoch); 489 er = epoch_currecord(epoch); 490 ck_epoch_end(&er->er_record, NULL); 491 critical_exit(); 492 } 493 494 /* 495 * epoch_block_handler_preempt() is a callback from the CK code when another 496 * thread is currently in an epoch section. 497 */ 498 static void 499 epoch_block_handler_preempt(struct ck_epoch *global __unused, 500 ck_epoch_record_t *cr, void *arg __unused) 501 { 502 epoch_record_t record; 503 struct thread *td, *owner, *curwaittd; 504 struct epoch_tracker *tdwait; 505 struct turnstile *ts; 506 struct lock_object *lock; 507 int spincount, gen; 508 int locksheld __unused; 509 510 record = __containerof(cr, struct epoch_record, er_record); 511 td = curthread; 512 locksheld = td->td_locks; 513 spincount = 0; 514 counter_u64_add(block_count, 1); 515 /* 516 * We lost a race and there's no longer any threads 517 * on the CPU in an epoch section. 518 */ 519 if (TAILQ_EMPTY(&record->er_tdlist)) 520 return; 521 522 if (record->er_cpuid != curcpu) { 523 /* 524 * If the head of the list is running, we can wait for it 525 * to remove itself from the list and thus save us the 526 * overhead of a migration 527 */ 528 gen = record->er_gen; 529 thread_unlock(td); 530 /* 531 * We can't actually check if the waiting thread is running 532 * so we simply poll for it to exit before giving up and 533 * migrating. 534 */ 535 do { 536 cpu_spinwait(); 537 } while (!TAILQ_EMPTY(&record->er_tdlist) && 538 gen == record->er_gen && 539 spincount++ < MAX_ADAPTIVE_SPIN); 540 thread_lock(td); 541 /* 542 * If the generation has changed we can poll again 543 * otherwise we need to migrate. 544 */ 545 if (gen != record->er_gen) 546 return; 547 /* 548 * Being on the same CPU as that of the record on which 549 * we need to wait allows us access to the thread 550 * list associated with that CPU. We can then examine the 551 * oldest thread in the queue and wait on its turnstile 552 * until it resumes and so on until a grace period 553 * elapses. 554 * 555 */ 556 counter_u64_add(migrate_count, 1); 557 sched_bind(td, record->er_cpuid); 558 /* 559 * At this point we need to return to the ck code 560 * to scan to see if a grace period has elapsed. 561 * We can't move on to check the thread list, because 562 * in the meantime new threads may have arrived that 563 * in fact belong to a different epoch. 564 */ 565 return; 566 } 567 /* 568 * Try to find a thread in an epoch section on this CPU 569 * waiting on a turnstile. Otherwise find the lowest 570 * priority thread (highest prio value) and drop our priority 571 * to match to allow it to run. 572 */ 573 TAILQ_FOREACH(tdwait, &record->er_tdlist, et_link) { 574 /* 575 * Propagate our priority to any other waiters to prevent us 576 * from starving them. They will have their original priority 577 * restore on exit from epoch_wait(). 578 */ 579 curwaittd = tdwait->et_td; 580 if (!TD_IS_INHIBITED(curwaittd) && curwaittd->td_priority > td->td_priority) { 581 critical_enter(); 582 thread_unlock(td); 583 thread_lock(curwaittd); 584 sched_prio(curwaittd, td->td_priority); 585 thread_unlock(curwaittd); 586 thread_lock(td); 587 critical_exit(); 588 } 589 if (TD_IS_INHIBITED(curwaittd) && TD_ON_LOCK(curwaittd) && 590 ((ts = curwaittd->td_blocked) != NULL)) { 591 /* 592 * We unlock td to allow turnstile_wait to reacquire 593 * the thread lock. Before unlocking it we enter a 594 * critical section to prevent preemption after we 595 * reenable interrupts by dropping the thread lock in 596 * order to prevent curwaittd from getting to run. 597 */ 598 critical_enter(); 599 thread_unlock(td); 600 601 if (turnstile_lock(ts, &lock, &owner)) { 602 if (ts == curwaittd->td_blocked) { 603 MPASS(TD_IS_INHIBITED(curwaittd) && 604 TD_ON_LOCK(curwaittd)); 605 critical_exit(); 606 turnstile_wait(ts, owner, 607 curwaittd->td_tsqueue); 608 counter_u64_add(turnstile_count, 1); 609 thread_lock(td); 610 return; 611 } 612 turnstile_unlock(ts, lock); 613 } 614 thread_lock(td); 615 critical_exit(); 616 KASSERT(td->td_locks == locksheld, 617 ("%d extra locks held", td->td_locks - locksheld)); 618 } 619 } 620 /* 621 * We didn't find any threads actually blocked on a lock 622 * so we have nothing to do except context switch away. 623 */ 624 counter_u64_add(switch_count, 1); 625 mi_switch(SW_VOL | SWT_RELINQUISH); 626 /* 627 * It is important the thread lock is dropped while yielding 628 * to allow other threads to acquire the lock pointed to by 629 * TDQ_LOCKPTR(td). Currently mi_switch() will unlock the 630 * thread lock before returning. Else a deadlock like 631 * situation might happen. 632 */ 633 thread_lock(td); 634 } 635 636 void 637 epoch_wait_preempt(epoch_t epoch) 638 { 639 struct thread *td; 640 int was_bound; 641 int old_cpu; 642 int old_pinned; 643 u_char old_prio; 644 int locks __unused; 645 646 MPASS(cold || epoch != NULL); 647 INIT_CHECK(epoch); 648 td = curthread; 649 #ifdef INVARIANTS 650 locks = curthread->td_locks; 651 MPASS(epoch->e_flags & EPOCH_PREEMPT); 652 if ((epoch->e_flags & EPOCH_LOCKED) == 0) 653 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 654 "epoch_wait() can be long running"); 655 KASSERT(!in_epoch(epoch), ("epoch_wait_preempt() called in the middle " 656 "of an epoch section of the same epoch")); 657 #endif 658 DROP_GIANT(); 659 thread_lock(td); 660 661 old_cpu = PCPU_GET(cpuid); 662 old_pinned = td->td_pinned; 663 old_prio = td->td_priority; 664 was_bound = sched_is_bound(td); 665 sched_unbind(td); 666 td->td_pinned = 0; 667 sched_bind(td, old_cpu); 668 669 ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler_preempt, 670 NULL); 671 672 /* restore CPU binding, if any */ 673 if (was_bound != 0) { 674 sched_bind(td, old_cpu); 675 } else { 676 /* get thread back to initial CPU, if any */ 677 if (old_pinned != 0) 678 sched_bind(td, old_cpu); 679 sched_unbind(td); 680 } 681 /* restore pinned after bind */ 682 td->td_pinned = old_pinned; 683 684 /* restore thread priority */ 685 sched_prio(td, old_prio); 686 thread_unlock(td); 687 PICKUP_GIANT(); 688 KASSERT(td->td_locks == locks, 689 ("%d residual locks held", td->td_locks - locks)); 690 } 691 692 static void 693 epoch_block_handler(struct ck_epoch *g __unused, ck_epoch_record_t *c __unused, 694 void *arg __unused) 695 { 696 cpu_spinwait(); 697 } 698 699 void 700 epoch_wait(epoch_t epoch) 701 { 702 703 MPASS(cold || epoch != NULL); 704 INIT_CHECK(epoch); 705 MPASS(epoch->e_flags == 0); 706 critical_enter(); 707 ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler, NULL); 708 critical_exit(); 709 } 710 711 void 712 epoch_call(epoch_t epoch, epoch_callback_t callback, epoch_context_t ctx) 713 { 714 epoch_record_t er; 715 ck_epoch_entry_t *cb; 716 717 cb = (void *)ctx; 718 719 MPASS(callback); 720 /* too early in boot to have epoch set up */ 721 if (__predict_false(epoch == NULL)) 722 goto boottime; 723 #if !defined(EARLY_AP_STARTUP) 724 if (__predict_false(inited < 2)) 725 goto boottime; 726 #endif 727 728 critical_enter(); 729 *DPCPU_PTR(epoch_cb_count) += 1; 730 er = epoch_currecord(epoch); 731 ck_epoch_call(&er->er_record, cb, (ck_epoch_cb_t *)callback); 732 critical_exit(); 733 return; 734 boottime: 735 callback(ctx); 736 } 737 738 static void 739 epoch_call_task(void *arg __unused) 740 { 741 ck_stack_entry_t *cursor, *head, *next; 742 ck_epoch_record_t *record; 743 epoch_record_t er; 744 epoch_t epoch; 745 ck_stack_t cb_stack; 746 int i, npending, total; 747 748 ck_stack_init(&cb_stack); 749 critical_enter(); 750 epoch_enter(global_epoch); 751 for (total = i = 0; i != MAX_EPOCHS; i++) { 752 epoch = epoch_array + i; 753 if (__predict_false( 754 atomic_load_acq_int(&epoch->e_in_use) == 0)) 755 continue; 756 er = epoch_currecord(epoch); 757 record = &er->er_record; 758 if ((npending = record->n_pending) == 0) 759 continue; 760 ck_epoch_poll_deferred(record, &cb_stack); 761 total += npending - record->n_pending; 762 } 763 epoch_exit(global_epoch); 764 *DPCPU_PTR(epoch_cb_count) -= total; 765 critical_exit(); 766 767 counter_u64_add(epoch_call_count, total); 768 counter_u64_add(epoch_call_task_count, 1); 769 770 head = ck_stack_batch_pop_npsc(&cb_stack); 771 for (cursor = head; cursor != NULL; cursor = next) { 772 struct ck_epoch_entry *entry = 773 ck_epoch_entry_container(cursor); 774 775 next = CK_STACK_NEXT(cursor); 776 entry->function(entry); 777 } 778 } 779 780 int 781 in_epoch_verbose(epoch_t epoch, int dump_onfail) 782 { 783 struct epoch_tracker *tdwait; 784 struct thread *td; 785 epoch_record_t er; 786 787 td = curthread; 788 if (THREAD_CAN_SLEEP()) 789 return (0); 790 if (__predict_false((epoch) == NULL)) 791 return (0); 792 critical_enter(); 793 er = epoch_currecord(epoch); 794 TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link) 795 if (tdwait->et_td == td) { 796 critical_exit(); 797 return (1); 798 } 799 #ifdef INVARIANTS 800 if (dump_onfail) { 801 MPASS(td->td_pinned); 802 printf("cpu: %d id: %d\n", curcpu, td->td_tid); 803 TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link) 804 printf("td_tid: %d ", tdwait->et_td->td_tid); 805 printf("\n"); 806 } 807 #endif 808 critical_exit(); 809 return (0); 810 } 811 812 int 813 in_epoch(epoch_t epoch) 814 { 815 return (in_epoch_verbose(epoch, 0)); 816 } 817 818 static void 819 epoch_drain_cb(struct epoch_context *ctx) 820 { 821 struct epoch *epoch = 822 __containerof(ctx, struct epoch_record, er_drain_ctx)->er_parent; 823 824 if (atomic_fetchadd_int(&epoch->e_drain_count, -1) == 1) { 825 mtx_lock(&epoch->e_drain_mtx); 826 wakeup(epoch); 827 mtx_unlock(&epoch->e_drain_mtx); 828 } 829 } 830 831 void 832 epoch_drain_callbacks(epoch_t epoch) 833 { 834 epoch_record_t er; 835 struct thread *td; 836 int was_bound; 837 int old_pinned; 838 int old_cpu; 839 int cpu; 840 841 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 842 "epoch_drain_callbacks() may sleep!"); 843 844 /* too early in boot to have epoch set up */ 845 if (__predict_false(epoch == NULL)) 846 return; 847 #if !defined(EARLY_AP_STARTUP) 848 if (__predict_false(inited < 2)) 849 return; 850 #endif 851 DROP_GIANT(); 852 853 sx_xlock(&epoch->e_drain_sx); 854 mtx_lock(&epoch->e_drain_mtx); 855 856 td = curthread; 857 thread_lock(td); 858 old_cpu = PCPU_GET(cpuid); 859 old_pinned = td->td_pinned; 860 was_bound = sched_is_bound(td); 861 sched_unbind(td); 862 td->td_pinned = 0; 863 864 CPU_FOREACH(cpu) 865 epoch->e_drain_count++; 866 CPU_FOREACH(cpu) { 867 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu); 868 sched_bind(td, cpu); 869 epoch_call(epoch, &epoch_drain_cb, &er->er_drain_ctx); 870 } 871 872 /* restore CPU binding, if any */ 873 if (was_bound != 0) { 874 sched_bind(td, old_cpu); 875 } else { 876 /* get thread back to initial CPU, if any */ 877 if (old_pinned != 0) 878 sched_bind(td, old_cpu); 879 sched_unbind(td); 880 } 881 /* restore pinned after bind */ 882 td->td_pinned = old_pinned; 883 884 thread_unlock(td); 885 886 while (epoch->e_drain_count != 0) 887 msleep(epoch, &epoch->e_drain_mtx, PZERO, "EDRAIN", 0); 888 889 mtx_unlock(&epoch->e_drain_mtx); 890 sx_xunlock(&epoch->e_drain_sx); 891 892 PICKUP_GIANT(); 893 } 894