1 /*- 2 * Copyright (c) 2018, Matthew Macy <mmacy@freebsd.org> 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * 1. Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * 2. Neither the name of Matthew Macy nor the names of its 11 * contributors may be used to endorse or promote products derived from 12 * this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 18 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 19 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 20 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 21 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 22 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 23 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 24 * POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/types.h> 32 #include <sys/systm.h> 33 #include <sys/counter.h> 34 #include <sys/epoch.h> 35 #include <sys/gtaskqueue.h> 36 #include <sys/kernel.h> 37 #include <sys/limits.h> 38 #include <sys/lock.h> 39 #include <sys/malloc.h> 40 #include <sys/mutex.h> 41 #include <sys/proc.h> 42 #include <sys/sched.h> 43 #include <sys/smp.h> 44 #include <sys/sysctl.h> 45 #include <sys/turnstile.h> 46 #include <vm/vm.h> 47 #include <vm/vm_extern.h> 48 #include <vm/vm_kern.h> 49 50 #include <ck_epoch.h> 51 52 MALLOC_DEFINE(M_EPOCH, "epoch", "epoch based reclamation"); 53 54 /* arbitrary --- needs benchmarking */ 55 #define MAX_ADAPTIVE_SPIN 5000 56 57 #define EPOCH_EXITING 0x1 58 #ifdef __amd64__ 59 #define EPOCH_ALIGN CACHE_LINE_SIZE*2 60 #else 61 #define EPOCH_ALIGN CACHE_LINE_SIZE 62 #endif 63 64 SYSCTL_NODE(_kern, OID_AUTO, epoch, CTLFLAG_RW, 0, "epoch information"); 65 SYSCTL_NODE(_kern_epoch, OID_AUTO, stats, CTLFLAG_RW, 0, "epoch stats"); 66 67 static int poll_intvl; 68 SYSCTL_INT(_kern_epoch, OID_AUTO, poll_intvl, CTLFLAG_RWTUN, 69 &poll_intvl, 0, "# of ticks to wait between garbage collecting deferred frees"); 70 /* Stats. */ 71 static counter_u64_t block_count; 72 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, nblocked, CTLFLAG_RW, 73 &block_count, "# of times a thread was in an epoch when epoch_wait was called"); 74 static counter_u64_t migrate_count; 75 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, migrations, CTLFLAG_RW, 76 &migrate_count, "# of times thread was migrated to another CPU in epoch_wait"); 77 static counter_u64_t turnstile_count; 78 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, ncontended, CTLFLAG_RW, 79 &turnstile_count, "# of times a thread was blocked on a lock in an epoch during an epoch_wait"); 80 static counter_u64_t switch_count; 81 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, switches, CTLFLAG_RW, 82 &switch_count, "# of times a thread voluntarily context switched in epoch_wait"); 83 84 typedef struct epoch_cb { 85 void (*ec_callback)(epoch_context_t); 86 STAILQ_ENTRY(epoch_cb) ec_link; 87 } *epoch_cb_t; 88 89 TAILQ_HEAD(threadlist, thread); 90 91 typedef struct epoch_record { 92 ck_epoch_record_t er_record; 93 volatile struct threadlist er_tdlist; 94 volatile uint32_t er_gen; 95 uint32_t er_cpuid; 96 } *epoch_record_t; 97 98 struct epoch_pcpu_state { 99 struct epoch_record eps_record; 100 STAILQ_HEAD(, epoch_cb) eps_cblist; 101 } __aligned(EPOCH_ALIGN); 102 103 struct epoch { 104 struct ck_epoch e_epoch __aligned(EPOCH_ALIGN); 105 struct grouptask e_gtask; 106 struct callout e_timer; 107 struct mtx e_lock; 108 int e_flags; 109 /* make sure that immutable data doesn't overlap with the gtask, callout, and mutex*/ 110 struct epoch_pcpu_state *e_pcpu_dom[MAXMEMDOM] __aligned(EPOCH_ALIGN); 111 counter_u64_t e_frees; 112 uint64_t e_free_last; 113 struct epoch_pcpu_state *e_pcpu[0]; 114 }; 115 116 static __read_mostly int domcount[MAXMEMDOM]; 117 static __read_mostly int domoffsets[MAXMEMDOM]; 118 static __read_mostly int inited; 119 120 static void epoch_call_task(void *context); 121 122 #if defined(__powerpc64__) || defined(__powerpc__) 123 static bool usedomains = false; 124 #else 125 static bool usedomains = true; 126 #endif 127 static void 128 epoch_init(void *arg __unused) 129 { 130 int domain, count; 131 132 if (poll_intvl == 0) 133 poll_intvl = hz; 134 135 block_count = counter_u64_alloc(M_WAITOK); 136 migrate_count = counter_u64_alloc(M_WAITOK); 137 turnstile_count = counter_u64_alloc(M_WAITOK); 138 switch_count = counter_u64_alloc(M_WAITOK); 139 if (usedomains == false) 140 return; 141 count = domain = 0; 142 domoffsets[0] = 0; 143 for (domain = 0; domain < vm_ndomains; domain++) { 144 domcount[domain] = CPU_COUNT(&cpuset_domain[domain]); 145 if (bootverbose) 146 printf("domcount[%d] %d\n", domain, domcount[domain]); 147 } 148 for (domain = 1; domain < vm_ndomains; domain++) 149 domoffsets[domain] = domoffsets[domain-1] + domcount[domain-1]; 150 151 for (domain = 0; domain < vm_ndomains; domain++) { 152 if (domcount[domain] == 0) { 153 usedomains = false; 154 break; 155 } 156 } 157 158 inited = 1; 159 } 160 SYSINIT(epoch, SI_SUB_CPU + 1, SI_ORDER_FIRST, epoch_init, NULL); 161 162 static void 163 epoch_init_numa(epoch_t epoch) 164 { 165 int domain, cpu_offset; 166 struct epoch_pcpu_state *eps; 167 epoch_record_t er; 168 169 for (domain = 0; domain < vm_ndomains; domain++) { 170 eps = malloc_domain(sizeof(*eps)*domcount[domain], M_EPOCH, 171 domain, M_ZERO|M_WAITOK); 172 epoch->e_pcpu_dom[domain] = eps; 173 cpu_offset = domoffsets[domain]; 174 for (int i = 0; i < domcount[domain]; i++, eps++) { 175 epoch->e_pcpu[cpu_offset + i] = eps; 176 er = &eps->eps_record; 177 STAILQ_INIT(&eps->eps_cblist); 178 ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL); 179 TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist); 180 er->er_cpuid = cpu_offset + i; 181 } 182 } 183 } 184 185 static void 186 epoch_init_legacy(epoch_t epoch) 187 { 188 struct epoch_pcpu_state *eps; 189 epoch_record_t er; 190 191 eps = malloc(sizeof(*eps)*mp_ncpus, M_EPOCH, M_ZERO|M_WAITOK); 192 epoch->e_pcpu_dom[0] = eps; 193 for (int i = 0; i < mp_ncpus; i++, eps++) { 194 epoch->e_pcpu[i] = eps; 195 er = &eps->eps_record; 196 ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL); 197 TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist); 198 STAILQ_INIT(&eps->eps_cblist); 199 er->er_cpuid = i; 200 } 201 } 202 203 static void 204 epoch_callout(void *arg) 205 { 206 epoch_t epoch; 207 uint64_t frees; 208 209 epoch = arg; 210 frees = counter_u64_fetch(epoch->e_frees); 211 /* pick some better value */ 212 if (frees - epoch->e_free_last > 10) { 213 GROUPTASK_ENQUEUE(&epoch->e_gtask); 214 epoch->e_free_last = frees; 215 } 216 if ((epoch->e_flags & EPOCH_EXITING) == 0) 217 callout_reset(&epoch->e_timer, poll_intvl, epoch_callout, epoch); 218 } 219 220 epoch_t 221 epoch_alloc(void) 222 { 223 epoch_t epoch; 224 225 if (__predict_false(!inited)) 226 panic("%s called too early in boot", __func__); 227 epoch = malloc(sizeof(struct epoch) + mp_ncpus*sizeof(void*), 228 M_EPOCH, M_ZERO|M_WAITOK); 229 ck_epoch_init(&epoch->e_epoch); 230 epoch->e_frees = counter_u64_alloc(M_WAITOK); 231 mtx_init(&epoch->e_lock, "epoch callout", NULL, MTX_DEF); 232 callout_init_mtx(&epoch->e_timer, &epoch->e_lock, 0); 233 taskqgroup_config_gtask_init(epoch, &epoch->e_gtask, epoch_call_task, "epoch call task"); 234 if (usedomains) 235 epoch_init_numa(epoch); 236 else 237 epoch_init_legacy(epoch); 238 callout_reset(&epoch->e_timer, poll_intvl, epoch_callout, epoch); 239 return (epoch); 240 } 241 242 void 243 epoch_free(epoch_t epoch) 244 { 245 int domain; 246 #ifdef INVARIANTS 247 struct epoch_pcpu_state *eps; 248 int cpu; 249 250 CPU_FOREACH(cpu) { 251 eps = epoch->e_pcpu[cpu]; 252 MPASS(TAILQ_EMPTY(&eps->eps_record.er_tdlist)); 253 } 254 #endif 255 mtx_lock(&epoch->e_lock); 256 epoch->e_flags |= EPOCH_EXITING; 257 mtx_unlock(&epoch->e_lock); 258 /* 259 * Execute any lingering callbacks 260 */ 261 GROUPTASK_ENQUEUE(&epoch->e_gtask); 262 gtaskqueue_drain(epoch->e_gtask.gt_taskqueue, &epoch->e_gtask.gt_task); 263 callout_drain(&epoch->e_timer); 264 mtx_destroy(&epoch->e_lock); 265 counter_u64_free(epoch->e_frees); 266 taskqgroup_config_gtask_deinit(&epoch->e_gtask); 267 if (usedomains) 268 for (domain = 0; domain < vm_ndomains; domain++) 269 free_domain(epoch->e_pcpu_dom[domain], M_EPOCH); 270 else 271 free(epoch->e_pcpu_dom[0], M_EPOCH); 272 free(epoch, M_EPOCH); 273 } 274 275 #define INIT_CHECK(epoch) \ 276 do { \ 277 if (__predict_false((epoch) == NULL)) \ 278 return; \ 279 } while (0) 280 281 void 282 epoch_enter(epoch_t epoch) 283 { 284 struct epoch_pcpu_state *eps; 285 struct thread *td; 286 287 INIT_CHECK(epoch); 288 289 td = curthread; 290 critical_enter(); 291 eps = epoch->e_pcpu[curcpu]; 292 td->td_epochnest++; 293 MPASS(td->td_epochnest < UCHAR_MAX - 2); 294 if (td->td_epochnest == 1) 295 TAILQ_INSERT_TAIL(&eps->eps_record.er_tdlist, td, td_epochq); 296 #ifdef INVARIANTS 297 if (td->td_epochnest > 1) { 298 struct thread *curtd; 299 int found = 0; 300 301 TAILQ_FOREACH(curtd, &eps->eps_record.er_tdlist, td_epochq) 302 if (curtd == td) 303 found = 1; 304 KASSERT(found, ("recursing on a second epoch")); 305 } 306 #endif 307 sched_pin(); 308 ck_epoch_begin(&eps->eps_record.er_record, NULL); 309 critical_exit(); 310 } 311 312 void 313 epoch_enter_nopreempt(epoch_t epoch) 314 { 315 struct epoch_pcpu_state *eps; 316 317 INIT_CHECK(epoch); 318 critical_enter(); 319 eps = epoch->e_pcpu[curcpu]; 320 curthread->td_epochnest++; 321 MPASS(curthread->td_epochnest < UCHAR_MAX - 2); 322 ck_epoch_begin(&eps->eps_record.er_record, NULL); 323 } 324 325 void 326 epoch_exit(epoch_t epoch) 327 { 328 struct epoch_pcpu_state *eps; 329 struct thread *td; 330 331 td = curthread; 332 INIT_CHECK(epoch); 333 critical_enter(); 334 eps = epoch->e_pcpu[curcpu]; 335 sched_unpin(); 336 ck_epoch_end(&eps->eps_record.er_record, NULL); 337 td->td_epochnest--; 338 if (td->td_epochnest == 0) 339 TAILQ_REMOVE(&eps->eps_record.er_tdlist, td, td_epochq); 340 eps->eps_record.er_gen++; 341 critical_exit(); 342 } 343 344 void 345 epoch_exit_nopreempt(epoch_t epoch) 346 { 347 struct epoch_pcpu_state *eps; 348 349 INIT_CHECK(epoch); 350 MPASS(curthread->td_critnest); 351 eps = epoch->e_pcpu[curcpu]; 352 ck_epoch_end(&eps->eps_record.er_record, NULL); 353 curthread->td_epochnest--; 354 critical_exit(); 355 } 356 357 /* 358 * epoch_block_handler is a callback from the ck code when another thread is 359 * currently in an epoch section. 360 */ 361 static void 362 epoch_block_handler(struct ck_epoch *global __unused, ck_epoch_record_t *cr, 363 void *arg __unused) 364 { 365 epoch_record_t record; 366 struct epoch_pcpu_state *eps; 367 struct thread *td, *tdwait, *owner; 368 struct turnstile *ts; 369 struct lock_object *lock; 370 int spincount, gen; 371 372 eps = arg; 373 record = __containerof(cr, struct epoch_record, er_record); 374 td = curthread; 375 spincount = 0; 376 counter_u64_add(block_count, 1); 377 if (record->er_cpuid != curcpu) { 378 /* 379 * If the head of the list is running, we can wait for it 380 * to remove itself from the list and thus save us the 381 * overhead of a migration 382 */ 383 if ((tdwait = TAILQ_FIRST(&record->er_tdlist)) != NULL && 384 TD_IS_RUNNING(tdwait)) { 385 gen = record->er_gen; 386 thread_unlock(td); 387 do { 388 cpu_spinwait(); 389 } while (tdwait == TAILQ_FIRST(&record->er_tdlist) && 390 gen == record->er_gen && TD_IS_RUNNING(tdwait) && 391 spincount++ < MAX_ADAPTIVE_SPIN); 392 thread_lock(td); 393 return; 394 } 395 396 /* 397 * Being on the same CPU as that of the record on which 398 * we need to wait allows us access to the thread 399 * list associated with that CPU. We can then examine the 400 * oldest thread in the queue and wait on its turnstile 401 * until it resumes and so on until a grace period 402 * elapses. 403 * 404 */ 405 counter_u64_add(migrate_count, 1); 406 sched_bind(td, record->er_cpuid); 407 /* 408 * At this point we need to return to the ck code 409 * to scan to see if a grace period has elapsed. 410 * We can't move on to check the thread list, because 411 * in the meantime new threads may have arrived that 412 * in fact belong to a different epoch. 413 */ 414 return; 415 } 416 /* 417 * Try to find a thread in an epoch section on this CPU 418 * waiting on a turnstile. Otherwise find the lowest 419 * priority thread (highest prio value) and drop our priority 420 * to match to allow it to run. 421 */ 422 TAILQ_FOREACH(tdwait, &record->er_tdlist, td_epochq) { 423 /* 424 * Propagate our priority to any other waiters to prevent us 425 * from starving them. They will have their original priority 426 * restore on exit from epoch_wait(). 427 */ 428 if (!TD_IS_INHIBITED(tdwait) && tdwait->td_priority > td->td_priority) { 429 thread_lock(tdwait); 430 sched_prio(tdwait, td->td_priority); 431 thread_unlock(tdwait); 432 } 433 if (TD_IS_INHIBITED(tdwait) && TD_ON_LOCK(tdwait) && 434 ((ts = tdwait->td_blocked) != NULL)) { 435 /* 436 * We unlock td to allow turnstile_wait to reacquire the 437 * the thread lock. Before unlocking it we enter a critical 438 * section to prevent preemption after we reenable interrupts 439 * by dropping the thread lock in order to prevent tdwait 440 * from getting to run. 441 */ 442 critical_enter(); 443 thread_unlock(td); 444 owner = turnstile_lock(ts, &lock); 445 /* 446 * The owner pointer indicates that the lock succeeded. Only 447 * in case we hold the lock and the turnstile we locked is still 448 * the one that tdwait is blocked on can we continue. Otherwise 449 * The turnstile pointer has been changed out from underneath 450 * us, as in the case where the lock holder has signalled tdwait, 451 * and we need to continue. 452 */ 453 if (owner != NULL && ts == tdwait->td_blocked) { 454 MPASS(TD_IS_INHIBITED(tdwait) && TD_ON_LOCK(tdwait)); 455 critical_exit(); 456 turnstile_wait(ts, owner, tdwait->td_tsqueue); 457 counter_u64_add(turnstile_count, 1); 458 thread_lock(td); 459 return; 460 } else if (owner != NULL) 461 turnstile_unlock(ts, lock); 462 thread_lock(td); 463 critical_exit(); 464 KASSERT(td->td_locks == 0, 465 ("%d locks held", td->td_locks)); 466 } 467 } 468 /* 469 * We didn't find any threads actually blocked on a lock 470 * so we have nothing to do except context switch away. 471 */ 472 counter_u64_add(switch_count, 1); 473 mi_switch(SW_VOL | SWT_RELINQUISH, NULL); 474 475 /* 476 * Release the thread lock while yielding to 477 * allow other threads to acquire the lock 478 * pointed to by TDQ_LOCKPTR(td). Else a 479 * deadlock like situation might happen. (HPS) 480 */ 481 thread_unlock(td); 482 thread_lock(td); 483 } 484 485 void 486 epoch_wait(epoch_t epoch) 487 { 488 struct thread *td; 489 int was_bound; 490 int old_cpu; 491 int old_pinned; 492 u_char old_prio; 493 494 INIT_CHECK(epoch); 495 496 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 497 "epoch_wait() can sleep"); 498 499 td = curthread; 500 KASSERT(td->td_epochnest == 0, ("epoch_wait() in the middle of an epoch section")); 501 thread_lock(td); 502 503 DROP_GIANT(); 504 505 old_cpu = PCPU_GET(cpuid); 506 old_pinned = td->td_pinned; 507 old_prio = td->td_priority; 508 was_bound = sched_is_bound(td); 509 sched_unbind(td); 510 td->td_pinned = 0; 511 sched_bind(td, old_cpu); 512 513 ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler, NULL); 514 515 /* restore CPU binding, if any */ 516 if (was_bound != 0) { 517 sched_bind(td, old_cpu); 518 } else { 519 /* get thread back to initial CPU, if any */ 520 if (old_pinned != 0) 521 sched_bind(td, old_cpu); 522 sched_unbind(td); 523 } 524 /* restore pinned after bind */ 525 td->td_pinned = old_pinned; 526 527 /* restore thread priority */ 528 sched_prio(td, old_prio); 529 thread_unlock(td); 530 KASSERT(td->td_locks == 0, 531 ("%d locks held", td->td_locks)); 532 PICKUP_GIANT(); 533 } 534 535 void 536 epoch_call(epoch_t epoch, epoch_context_t ctx, void (*callback) (epoch_context_t)) 537 { 538 struct epoch_pcpu_state *eps; 539 epoch_cb_t cb; 540 541 cb = (void *)ctx; 542 543 MPASS(cb->ec_callback == NULL); 544 MPASS(cb->ec_link.stqe_next == NULL); 545 MPASS(epoch); 546 MPASS(callback); 547 cb->ec_callback = callback; 548 counter_u64_add(epoch->e_frees, 1); 549 critical_enter(); 550 eps = epoch->e_pcpu[curcpu]; 551 STAILQ_INSERT_HEAD(&eps->eps_cblist, cb, ec_link); 552 critical_exit(); 553 } 554 555 static void 556 epoch_call_task(void *context) 557 { 558 struct epoch_pcpu_state *eps; 559 epoch_t epoch; 560 epoch_cb_t cb; 561 struct thread *td; 562 int cpu; 563 STAILQ_HEAD(, epoch_cb) tmp_head; 564 565 epoch = context; 566 STAILQ_INIT(&tmp_head); 567 td = curthread; 568 thread_lock(td); 569 CPU_FOREACH(cpu) { 570 sched_bind(td, cpu); 571 eps = epoch->e_pcpu[cpu]; 572 if (!STAILQ_EMPTY(&eps->eps_cblist)) 573 STAILQ_CONCAT(&tmp_head, &eps->eps_cblist); 574 } 575 sched_unbind(td); 576 thread_unlock(td); 577 epoch_wait(epoch); 578 579 while ((cb = STAILQ_FIRST(&tmp_head)) != NULL) { 580 STAILQ_REMOVE_HEAD(&tmp_head, ec_link); 581 cb->ec_callback((void*)cb); 582 } 583 } 584 585 int 586 in_epoch(void) 587 { 588 return (curthread->td_epochnest != 0); 589 } 590