1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2018, Matthew Macy <mmacy@freebsd.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/types.h> 34 #include <sys/systm.h> 35 #include <sys/counter.h> 36 #include <sys/epoch.h> 37 #include <sys/gtaskqueue.h> 38 #include <sys/kernel.h> 39 #include <sys/limits.h> 40 #include <sys/lock.h> 41 #include <sys/malloc.h> 42 #include <sys/mutex.h> 43 #include <sys/pcpu.h> 44 #include <sys/proc.h> 45 #include <sys/sched.h> 46 #include <sys/sx.h> 47 #include <sys/smp.h> 48 #include <sys/sysctl.h> 49 #include <sys/turnstile.h> 50 #include <vm/vm.h> 51 #include <vm/vm_extern.h> 52 #include <vm/vm_kern.h> 53 #include <vm/uma.h> 54 55 #include <ck_epoch.h> 56 57 static MALLOC_DEFINE(M_EPOCH, "epoch", "epoch based reclamation"); 58 59 #ifdef __amd64__ 60 #define EPOCH_ALIGN CACHE_LINE_SIZE*2 61 #else 62 #define EPOCH_ALIGN CACHE_LINE_SIZE 63 #endif 64 65 TAILQ_HEAD (epoch_tdlist, epoch_tracker); 66 typedef struct epoch_record { 67 ck_epoch_record_t er_record; 68 struct epoch_context er_drain_ctx; 69 struct epoch *er_parent; 70 volatile struct epoch_tdlist er_tdlist; 71 volatile uint32_t er_gen; 72 uint32_t er_cpuid; 73 } __aligned(EPOCH_ALIGN) *epoch_record_t; 74 75 struct epoch { 76 struct ck_epoch e_epoch __aligned(EPOCH_ALIGN); 77 epoch_record_t e_pcpu_record; 78 int e_idx; 79 int e_flags; 80 struct sx e_drain_sx; 81 struct mtx e_drain_mtx; 82 volatile int e_drain_count; 83 }; 84 85 /* arbitrary --- needs benchmarking */ 86 #define MAX_ADAPTIVE_SPIN 100 87 #define MAX_EPOCHS 64 88 89 CTASSERT(sizeof(ck_epoch_entry_t) == sizeof(struct epoch_context)); 90 SYSCTL_NODE(_kern, OID_AUTO, epoch, CTLFLAG_RW, 0, "epoch information"); 91 SYSCTL_NODE(_kern_epoch, OID_AUTO, stats, CTLFLAG_RW, 0, "epoch stats"); 92 93 /* Stats. */ 94 static counter_u64_t block_count; 95 96 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, nblocked, CTLFLAG_RW, 97 &block_count, "# of times a thread was in an epoch when epoch_wait was called"); 98 static counter_u64_t migrate_count; 99 100 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, migrations, CTLFLAG_RW, 101 &migrate_count, "# of times thread was migrated to another CPU in epoch_wait"); 102 static counter_u64_t turnstile_count; 103 104 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, ncontended, CTLFLAG_RW, 105 &turnstile_count, "# of times a thread was blocked on a lock in an epoch during an epoch_wait"); 106 static counter_u64_t switch_count; 107 108 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, switches, CTLFLAG_RW, 109 &switch_count, "# of times a thread voluntarily context switched in epoch_wait"); 110 static counter_u64_t epoch_call_count; 111 112 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_calls, CTLFLAG_RW, 113 &epoch_call_count, "# of times a callback was deferred"); 114 static counter_u64_t epoch_call_task_count; 115 116 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_call_tasks, CTLFLAG_RW, 117 &epoch_call_task_count, "# of times a callback task was run"); 118 119 TAILQ_HEAD (threadlist, thread); 120 121 CK_STACK_CONTAINER(struct ck_epoch_entry, stack_entry, 122 ck_epoch_entry_container) 123 124 epoch_t allepochs[MAX_EPOCHS]; 125 126 DPCPU_DEFINE(struct grouptask, epoch_cb_task); 127 DPCPU_DEFINE(int, epoch_cb_count); 128 129 static __read_mostly int inited; 130 static __read_mostly int epoch_count; 131 __read_mostly epoch_t global_epoch; 132 __read_mostly epoch_t global_epoch_preempt; 133 134 static void epoch_call_task(void *context __unused); 135 static uma_zone_t pcpu_zone_record; 136 137 static void 138 epoch_init(void *arg __unused) 139 { 140 int cpu; 141 142 block_count = counter_u64_alloc(M_WAITOK); 143 migrate_count = counter_u64_alloc(M_WAITOK); 144 turnstile_count = counter_u64_alloc(M_WAITOK); 145 switch_count = counter_u64_alloc(M_WAITOK); 146 epoch_call_count = counter_u64_alloc(M_WAITOK); 147 epoch_call_task_count = counter_u64_alloc(M_WAITOK); 148 149 pcpu_zone_record = uma_zcreate("epoch_record pcpu", 150 sizeof(struct epoch_record), NULL, NULL, NULL, NULL, 151 UMA_ALIGN_PTR, UMA_ZONE_PCPU); 152 CPU_FOREACH(cpu) { 153 GROUPTASK_INIT(DPCPU_ID_PTR(cpu, epoch_cb_task), 0, 154 epoch_call_task, NULL); 155 taskqgroup_attach_cpu(qgroup_softirq, 156 DPCPU_ID_PTR(cpu, epoch_cb_task), NULL, cpu, NULL, NULL, 157 "epoch call task"); 158 } 159 inited = 1; 160 global_epoch = epoch_alloc(0); 161 global_epoch_preempt = epoch_alloc(EPOCH_PREEMPT); 162 } 163 SYSINIT(epoch, SI_SUB_TASKQ + 1, SI_ORDER_FIRST, epoch_init, NULL); 164 165 #if !defined(EARLY_AP_STARTUP) 166 static void 167 epoch_init_smp(void *dummy __unused) 168 { 169 inited = 2; 170 } 171 SYSINIT(epoch_smp, SI_SUB_SMP + 1, SI_ORDER_FIRST, epoch_init_smp, NULL); 172 #endif 173 174 static void 175 epoch_ctor(epoch_t epoch) 176 { 177 epoch_record_t er; 178 int cpu; 179 180 epoch->e_pcpu_record = uma_zalloc_pcpu(pcpu_zone_record, M_WAITOK); 181 CPU_FOREACH(cpu) { 182 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu); 183 bzero(er, sizeof(*er)); 184 ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL); 185 TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist); 186 er->er_cpuid = cpu; 187 er->er_parent = epoch; 188 } 189 } 190 191 static void 192 epoch_adjust_prio(struct thread *td, u_char prio) 193 { 194 195 thread_lock(td); 196 sched_prio(td, prio); 197 thread_unlock(td); 198 } 199 200 epoch_t 201 epoch_alloc(int flags) 202 { 203 epoch_t epoch; 204 205 if (__predict_false(!inited)) 206 panic("%s called too early in boot", __func__); 207 epoch = malloc(sizeof(struct epoch), M_EPOCH, M_ZERO | M_WAITOK); 208 ck_epoch_init(&epoch->e_epoch); 209 epoch_ctor(epoch); 210 MPASS(epoch_count < MAX_EPOCHS - 2); 211 epoch->e_flags = flags; 212 epoch->e_idx = epoch_count; 213 sx_init(&epoch->e_drain_sx, "epoch-drain-sx"); 214 mtx_init(&epoch->e_drain_mtx, "epoch-drain-mtx", NULL, MTX_DEF); 215 allepochs[epoch_count++] = epoch; 216 return (epoch); 217 } 218 219 void 220 epoch_free(epoch_t epoch) 221 { 222 223 epoch_drain_callbacks(epoch); 224 allepochs[epoch->e_idx] = NULL; 225 epoch_wait(global_epoch); 226 uma_zfree_pcpu(pcpu_zone_record, epoch->e_pcpu_record); 227 mtx_destroy(&epoch->e_drain_mtx); 228 sx_destroy(&epoch->e_drain_sx); 229 free(epoch, M_EPOCH); 230 } 231 232 static epoch_record_t 233 epoch_currecord(epoch_t epoch) 234 { 235 236 return (zpcpu_get_cpu(epoch->e_pcpu_record, curcpu)); 237 } 238 239 #define INIT_CHECK(epoch) \ 240 do { \ 241 if (__predict_false((epoch) == NULL)) \ 242 return; \ 243 } while (0) 244 245 void 246 epoch_enter_preempt(epoch_t epoch, epoch_tracker_t et) 247 { 248 struct epoch_record *er; 249 struct thread *td; 250 251 MPASS(cold || epoch != NULL); 252 INIT_CHECK(epoch); 253 MPASS(epoch->e_flags & EPOCH_PREEMPT); 254 #ifdef EPOCH_TRACKER_DEBUG 255 et->et_magic_pre = EPOCH_MAGIC0; 256 et->et_magic_post = EPOCH_MAGIC1; 257 #endif 258 td = curthread; 259 et->et_td = td; 260 td->td_epochnest++; 261 critical_enter(); 262 sched_pin(); 263 264 td->td_pre_epoch_prio = td->td_priority; 265 er = epoch_currecord(epoch); 266 TAILQ_INSERT_TAIL(&er->er_tdlist, et, et_link); 267 ck_epoch_begin(&er->er_record, &et->et_section); 268 critical_exit(); 269 } 270 271 void 272 epoch_enter(epoch_t epoch) 273 { 274 struct thread *td; 275 epoch_record_t er; 276 277 MPASS(cold || epoch != NULL); 278 INIT_CHECK(epoch); 279 td = curthread; 280 281 td->td_epochnest++; 282 critical_enter(); 283 er = epoch_currecord(epoch); 284 ck_epoch_begin(&er->er_record, NULL); 285 } 286 287 void 288 epoch_exit_preempt(epoch_t epoch, epoch_tracker_t et) 289 { 290 struct epoch_record *er; 291 struct thread *td; 292 293 INIT_CHECK(epoch); 294 td = curthread; 295 critical_enter(); 296 sched_unpin(); 297 MPASS(td->td_epochnest); 298 td->td_epochnest--; 299 er = epoch_currecord(epoch); 300 MPASS(epoch->e_flags & EPOCH_PREEMPT); 301 MPASS(et != NULL); 302 MPASS(et->et_td == td); 303 #ifdef EPOCH_TRACKER_DEBUG 304 MPASS(et->et_magic_pre == EPOCH_MAGIC0); 305 MPASS(et->et_magic_post == EPOCH_MAGIC1); 306 et->et_magic_pre = 0; 307 et->et_magic_post = 0; 308 #endif 309 #ifdef INVARIANTS 310 et->et_td = (void*)0xDEADBEEF; 311 #endif 312 ck_epoch_end(&er->er_record, &et->et_section); 313 TAILQ_REMOVE(&er->er_tdlist, et, et_link); 314 er->er_gen++; 315 if (__predict_false(td->td_pre_epoch_prio != td->td_priority)) 316 epoch_adjust_prio(td, td->td_pre_epoch_prio); 317 critical_exit(); 318 } 319 320 void 321 epoch_exit(epoch_t epoch) 322 { 323 struct thread *td; 324 epoch_record_t er; 325 326 INIT_CHECK(epoch); 327 td = curthread; 328 MPASS(td->td_epochnest); 329 td->td_epochnest--; 330 er = epoch_currecord(epoch); 331 ck_epoch_end(&er->er_record, NULL); 332 critical_exit(); 333 } 334 335 /* 336 * epoch_block_handler_preempt() is a callback from the CK code when another 337 * thread is currently in an epoch section. 338 */ 339 static void 340 epoch_block_handler_preempt(struct ck_epoch *global __unused, 341 ck_epoch_record_t *cr, void *arg __unused) 342 { 343 epoch_record_t record; 344 struct thread *td, *owner, *curwaittd; 345 struct epoch_tracker *tdwait; 346 struct turnstile *ts; 347 struct lock_object *lock; 348 int spincount, gen; 349 int locksheld __unused; 350 351 record = __containerof(cr, struct epoch_record, er_record); 352 td = curthread; 353 locksheld = td->td_locks; 354 spincount = 0; 355 counter_u64_add(block_count, 1); 356 /* 357 * We lost a race and there's no longer any threads 358 * on the CPU in an epoch section. 359 */ 360 if (TAILQ_EMPTY(&record->er_tdlist)) 361 return; 362 363 if (record->er_cpuid != curcpu) { 364 /* 365 * If the head of the list is running, we can wait for it 366 * to remove itself from the list and thus save us the 367 * overhead of a migration 368 */ 369 gen = record->er_gen; 370 thread_unlock(td); 371 /* 372 * We can't actually check if the waiting thread is running 373 * so we simply poll for it to exit before giving up and 374 * migrating. 375 */ 376 do { 377 cpu_spinwait(); 378 } while (!TAILQ_EMPTY(&record->er_tdlist) && 379 gen == record->er_gen && 380 spincount++ < MAX_ADAPTIVE_SPIN); 381 thread_lock(td); 382 /* 383 * If the generation has changed we can poll again 384 * otherwise we need to migrate. 385 */ 386 if (gen != record->er_gen) 387 return; 388 /* 389 * Being on the same CPU as that of the record on which 390 * we need to wait allows us access to the thread 391 * list associated with that CPU. We can then examine the 392 * oldest thread in the queue and wait on its turnstile 393 * until it resumes and so on until a grace period 394 * elapses. 395 * 396 */ 397 counter_u64_add(migrate_count, 1); 398 sched_bind(td, record->er_cpuid); 399 /* 400 * At this point we need to return to the ck code 401 * to scan to see if a grace period has elapsed. 402 * We can't move on to check the thread list, because 403 * in the meantime new threads may have arrived that 404 * in fact belong to a different epoch. 405 */ 406 return; 407 } 408 /* 409 * Try to find a thread in an epoch section on this CPU 410 * waiting on a turnstile. Otherwise find the lowest 411 * priority thread (highest prio value) and drop our priority 412 * to match to allow it to run. 413 */ 414 TAILQ_FOREACH(tdwait, &record->er_tdlist, et_link) { 415 /* 416 * Propagate our priority to any other waiters to prevent us 417 * from starving them. They will have their original priority 418 * restore on exit from epoch_wait(). 419 */ 420 curwaittd = tdwait->et_td; 421 if (!TD_IS_INHIBITED(curwaittd) && curwaittd->td_priority > td->td_priority) { 422 critical_enter(); 423 thread_unlock(td); 424 thread_lock(curwaittd); 425 sched_prio(curwaittd, td->td_priority); 426 thread_unlock(curwaittd); 427 thread_lock(td); 428 critical_exit(); 429 } 430 if (TD_IS_INHIBITED(curwaittd) && TD_ON_LOCK(curwaittd) && 431 ((ts = curwaittd->td_blocked) != NULL)) { 432 /* 433 * We unlock td to allow turnstile_wait to reacquire 434 * the thread lock. Before unlocking it we enter a 435 * critical section to prevent preemption after we 436 * reenable interrupts by dropping the thread lock in 437 * order to prevent curwaittd from getting to run. 438 */ 439 critical_enter(); 440 thread_unlock(td); 441 442 if (turnstile_lock(ts, &lock, &owner)) { 443 if (ts == curwaittd->td_blocked) { 444 MPASS(TD_IS_INHIBITED(curwaittd) && 445 TD_ON_LOCK(curwaittd)); 446 critical_exit(); 447 turnstile_wait(ts, owner, 448 curwaittd->td_tsqueue); 449 counter_u64_add(turnstile_count, 1); 450 thread_lock(td); 451 return; 452 } 453 turnstile_unlock(ts, lock); 454 } 455 thread_lock(td); 456 critical_exit(); 457 KASSERT(td->td_locks == locksheld, 458 ("%d extra locks held", td->td_locks - locksheld)); 459 } 460 } 461 /* 462 * We didn't find any threads actually blocked on a lock 463 * so we have nothing to do except context switch away. 464 */ 465 counter_u64_add(switch_count, 1); 466 mi_switch(SW_VOL | SWT_RELINQUISH, NULL); 467 468 /* 469 * Release the thread lock while yielding to 470 * allow other threads to acquire the lock 471 * pointed to by TDQ_LOCKPTR(td). Else a 472 * deadlock like situation might happen. (HPS) 473 */ 474 thread_unlock(td); 475 thread_lock(td); 476 } 477 478 void 479 epoch_wait_preempt(epoch_t epoch) 480 { 481 struct thread *td; 482 int was_bound; 483 int old_cpu; 484 int old_pinned; 485 u_char old_prio; 486 int locks __unused; 487 488 MPASS(cold || epoch != NULL); 489 INIT_CHECK(epoch); 490 td = curthread; 491 #ifdef INVARIANTS 492 locks = curthread->td_locks; 493 MPASS(epoch->e_flags & EPOCH_PREEMPT); 494 if ((epoch->e_flags & EPOCH_LOCKED) == 0) 495 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 496 "epoch_wait() can be long running"); 497 KASSERT(!in_epoch(epoch), ("epoch_wait_preempt() called in the middle " 498 "of an epoch section of the same epoch")); 499 #endif 500 thread_lock(td); 501 DROP_GIANT(); 502 503 old_cpu = PCPU_GET(cpuid); 504 old_pinned = td->td_pinned; 505 old_prio = td->td_priority; 506 was_bound = sched_is_bound(td); 507 sched_unbind(td); 508 td->td_pinned = 0; 509 sched_bind(td, old_cpu); 510 511 ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler_preempt, 512 NULL); 513 514 /* restore CPU binding, if any */ 515 if (was_bound != 0) { 516 sched_bind(td, old_cpu); 517 } else { 518 /* get thread back to initial CPU, if any */ 519 if (old_pinned != 0) 520 sched_bind(td, old_cpu); 521 sched_unbind(td); 522 } 523 /* restore pinned after bind */ 524 td->td_pinned = old_pinned; 525 526 /* restore thread priority */ 527 sched_prio(td, old_prio); 528 thread_unlock(td); 529 PICKUP_GIANT(); 530 KASSERT(td->td_locks == locks, 531 ("%d residual locks held", td->td_locks - locks)); 532 } 533 534 static void 535 epoch_block_handler(struct ck_epoch *g __unused, ck_epoch_record_t *c __unused, 536 void *arg __unused) 537 { 538 cpu_spinwait(); 539 } 540 541 void 542 epoch_wait(epoch_t epoch) 543 { 544 545 MPASS(cold || epoch != NULL); 546 INIT_CHECK(epoch); 547 MPASS(epoch->e_flags == 0); 548 critical_enter(); 549 ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler, NULL); 550 critical_exit(); 551 } 552 553 void 554 epoch_call(epoch_t epoch, epoch_context_t ctx, void (*callback) (epoch_context_t)) 555 { 556 epoch_record_t er; 557 ck_epoch_entry_t *cb; 558 559 cb = (void *)ctx; 560 561 MPASS(callback); 562 /* too early in boot to have epoch set up */ 563 if (__predict_false(epoch == NULL)) 564 goto boottime; 565 #if !defined(EARLY_AP_STARTUP) 566 if (__predict_false(inited < 2)) 567 goto boottime; 568 #endif 569 570 critical_enter(); 571 *DPCPU_PTR(epoch_cb_count) += 1; 572 er = epoch_currecord(epoch); 573 ck_epoch_call(&er->er_record, cb, (ck_epoch_cb_t *)callback); 574 critical_exit(); 575 return; 576 boottime: 577 callback(ctx); 578 } 579 580 static void 581 epoch_call_task(void *arg __unused) 582 { 583 ck_stack_entry_t *cursor, *head, *next; 584 ck_epoch_record_t *record; 585 epoch_record_t er; 586 epoch_t epoch; 587 ck_stack_t cb_stack; 588 int i, npending, total; 589 590 ck_stack_init(&cb_stack); 591 critical_enter(); 592 epoch_enter(global_epoch); 593 for (total = i = 0; i < epoch_count; i++) { 594 if (__predict_false((epoch = allepochs[i]) == NULL)) 595 continue; 596 er = epoch_currecord(epoch); 597 record = &er->er_record; 598 if ((npending = record->n_pending) == 0) 599 continue; 600 ck_epoch_poll_deferred(record, &cb_stack); 601 total += npending - record->n_pending; 602 } 603 epoch_exit(global_epoch); 604 *DPCPU_PTR(epoch_cb_count) -= total; 605 critical_exit(); 606 607 counter_u64_add(epoch_call_count, total); 608 counter_u64_add(epoch_call_task_count, 1); 609 610 head = ck_stack_batch_pop_npsc(&cb_stack); 611 for (cursor = head; cursor != NULL; cursor = next) { 612 struct ck_epoch_entry *entry = 613 ck_epoch_entry_container(cursor); 614 615 next = CK_STACK_NEXT(cursor); 616 entry->function(entry); 617 } 618 } 619 620 int 621 in_epoch_verbose(epoch_t epoch, int dump_onfail) 622 { 623 struct epoch_tracker *tdwait; 624 struct thread *td; 625 epoch_record_t er; 626 627 td = curthread; 628 if (td->td_epochnest == 0) 629 return (0); 630 if (__predict_false((epoch) == NULL)) 631 return (0); 632 critical_enter(); 633 er = epoch_currecord(epoch); 634 TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link) 635 if (tdwait->et_td == td) { 636 critical_exit(); 637 return (1); 638 } 639 #ifdef INVARIANTS 640 if (dump_onfail) { 641 MPASS(td->td_pinned); 642 printf("cpu: %d id: %d\n", curcpu, td->td_tid); 643 TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link) 644 printf("td_tid: %d ", tdwait->et_td->td_tid); 645 printf("\n"); 646 } 647 #endif 648 critical_exit(); 649 return (0); 650 } 651 652 int 653 in_epoch(epoch_t epoch) 654 { 655 return (in_epoch_verbose(epoch, 0)); 656 } 657 658 static void 659 epoch_drain_cb(struct epoch_context *ctx) 660 { 661 struct epoch *epoch = 662 __containerof(ctx, struct epoch_record, er_drain_ctx)->er_parent; 663 664 if (atomic_fetchadd_int(&epoch->e_drain_count, -1) == 1) { 665 mtx_lock(&epoch->e_drain_mtx); 666 wakeup(epoch); 667 mtx_unlock(&epoch->e_drain_mtx); 668 } 669 } 670 671 void 672 epoch_drain_callbacks(epoch_t epoch) 673 { 674 epoch_record_t er; 675 struct thread *td; 676 int was_bound; 677 int old_pinned; 678 int old_cpu; 679 int cpu; 680 681 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 682 "epoch_drain_callbacks() may sleep!"); 683 684 /* too early in boot to have epoch set up */ 685 if (__predict_false(epoch == NULL)) 686 return; 687 #if !defined(EARLY_AP_STARTUP) 688 if (__predict_false(inited < 2)) 689 return; 690 #endif 691 DROP_GIANT(); 692 693 sx_xlock(&epoch->e_drain_sx); 694 mtx_lock(&epoch->e_drain_mtx); 695 696 td = curthread; 697 thread_lock(td); 698 old_cpu = PCPU_GET(cpuid); 699 old_pinned = td->td_pinned; 700 was_bound = sched_is_bound(td); 701 sched_unbind(td); 702 td->td_pinned = 0; 703 704 CPU_FOREACH(cpu) 705 epoch->e_drain_count++; 706 CPU_FOREACH(cpu) { 707 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu); 708 sched_bind(td, cpu); 709 epoch_call(epoch, &er->er_drain_ctx, &epoch_drain_cb); 710 } 711 712 /* restore CPU binding, if any */ 713 if (was_bound != 0) { 714 sched_bind(td, old_cpu); 715 } else { 716 /* get thread back to initial CPU, if any */ 717 if (old_pinned != 0) 718 sched_bind(td, old_cpu); 719 sched_unbind(td); 720 } 721 /* restore pinned after bind */ 722 td->td_pinned = old_pinned; 723 724 thread_unlock(td); 725 726 while (epoch->e_drain_count != 0) 727 msleep(epoch, &epoch->e_drain_mtx, PZERO, "EDRAIN", 0); 728 729 mtx_unlock(&epoch->e_drain_mtx); 730 sx_xunlock(&epoch->e_drain_sx); 731 732 PICKUP_GIANT(); 733 } 734 735 void 736 epoch_thread_init(struct thread *td) 737 { 738 739 td->td_et = malloc(sizeof(struct epoch_tracker), M_EPOCH, M_WAITOK); 740 } 741 742 void 743 epoch_thread_fini(struct thread *td) 744 { 745 746 free(td->td_et, M_EPOCH); 747 } 748