1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2018, Matthew Macy <mmacy@freebsd.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/types.h> 34 #include <sys/systm.h> 35 #include <sys/counter.h> 36 #include <sys/epoch.h> 37 #include <sys/gtaskqueue.h> 38 #include <sys/kernel.h> 39 #include <sys/limits.h> 40 #include <sys/lock.h> 41 #include <sys/malloc.h> 42 #include <sys/mutex.h> 43 #include <sys/pcpu.h> 44 #include <sys/proc.h> 45 #include <sys/sched.h> 46 #include <sys/smp.h> 47 #include <sys/sysctl.h> 48 #include <sys/turnstile.h> 49 #include <vm/vm.h> 50 #include <vm/vm_extern.h> 51 #include <vm/vm_kern.h> 52 #include <vm/uma.h> 53 54 #include <ck_epoch.h> 55 56 static MALLOC_DEFINE(M_EPOCH, "epoch", "epoch based reclamation"); 57 58 /* arbitrary --- needs benchmarking */ 59 #define MAX_ADAPTIVE_SPIN 1000 60 #define MAX_EPOCHS 64 61 62 CTASSERT(sizeof(ck_epoch_entry_t) == sizeof(struct epoch_context)); 63 SYSCTL_NODE(_kern, OID_AUTO, epoch, CTLFLAG_RW, 0, "epoch information"); 64 SYSCTL_NODE(_kern_epoch, OID_AUTO, stats, CTLFLAG_RW, 0, "epoch stats"); 65 66 /* Stats. */ 67 static counter_u64_t block_count; 68 69 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, nblocked, CTLFLAG_RW, 70 &block_count, "# of times a thread was in an epoch when epoch_wait was called"); 71 static counter_u64_t migrate_count; 72 73 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, migrations, CTLFLAG_RW, 74 &migrate_count, "# of times thread was migrated to another CPU in epoch_wait"); 75 static counter_u64_t turnstile_count; 76 77 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, ncontended, CTLFLAG_RW, 78 &turnstile_count, "# of times a thread was blocked on a lock in an epoch during an epoch_wait"); 79 static counter_u64_t switch_count; 80 81 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, switches, CTLFLAG_RW, 82 &switch_count, "# of times a thread voluntarily context switched in epoch_wait"); 83 static counter_u64_t epoch_call_count; 84 85 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_calls, CTLFLAG_RW, 86 &epoch_call_count, "# of times a callback was deferred"); 87 static counter_u64_t epoch_call_task_count; 88 89 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_call_tasks, CTLFLAG_RW, 90 &epoch_call_task_count, "# of times a callback task was run"); 91 92 TAILQ_HEAD (threadlist, thread); 93 94 CK_STACK_CONTAINER(struct ck_epoch_entry, stack_entry, 95 ck_epoch_entry_container) 96 97 epoch_t allepochs[MAX_EPOCHS]; 98 99 DPCPU_DEFINE(struct grouptask, epoch_cb_task); 100 DPCPU_DEFINE(int, epoch_cb_count); 101 102 static __read_mostly int inited; 103 static __read_mostly int epoch_count; 104 __read_mostly epoch_t global_epoch; 105 __read_mostly epoch_t global_epoch_preempt; 106 107 static void epoch_call_task(void *context __unused); 108 static uma_zone_t pcpu_zone_record; 109 110 static void 111 epoch_init(void *arg __unused) 112 { 113 int cpu; 114 115 block_count = counter_u64_alloc(M_WAITOK); 116 migrate_count = counter_u64_alloc(M_WAITOK); 117 turnstile_count = counter_u64_alloc(M_WAITOK); 118 switch_count = counter_u64_alloc(M_WAITOK); 119 epoch_call_count = counter_u64_alloc(M_WAITOK); 120 epoch_call_task_count = counter_u64_alloc(M_WAITOK); 121 122 pcpu_zone_record = uma_zcreate("epoch_record pcpu", sizeof(struct epoch_record), 123 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_PCPU); 124 CPU_FOREACH(cpu) { 125 GROUPTASK_INIT(DPCPU_ID_PTR(cpu, epoch_cb_task), 0, epoch_call_task, NULL); 126 taskqgroup_attach_cpu(qgroup_softirq, DPCPU_ID_PTR(cpu, epoch_cb_task), NULL, cpu, -1, "epoch call task"); 127 } 128 inited = 1; 129 global_epoch = epoch_alloc(0); 130 global_epoch_preempt = epoch_alloc(EPOCH_PREEMPT); 131 } 132 SYSINIT(epoch, SI_SUB_TASKQ + 1, SI_ORDER_FIRST, epoch_init, NULL); 133 134 #if !defined(EARLY_AP_STARTUP) 135 static void 136 epoch_init_smp(void *dummy __unused) 137 { 138 inited = 2; 139 } 140 SYSINIT(epoch_smp, SI_SUB_SMP + 1, SI_ORDER_FIRST, epoch_init_smp, NULL); 141 #endif 142 143 static void 144 epoch_ctor(epoch_t epoch) 145 { 146 epoch_record_t er; 147 int cpu; 148 149 epoch->e_pcpu_record = uma_zalloc_pcpu(pcpu_zone_record, M_WAITOK); 150 CPU_FOREACH(cpu) { 151 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu); 152 bzero(er, sizeof(*er)); 153 ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL); 154 TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist); 155 er->er_cpuid = cpu; 156 } 157 } 158 159 epoch_t 160 epoch_alloc(int flags) 161 { 162 epoch_t epoch; 163 164 if (__predict_false(!inited)) 165 panic("%s called too early in boot", __func__); 166 epoch = malloc(sizeof(struct epoch), M_EPOCH, M_ZERO | M_WAITOK); 167 ck_epoch_init(&epoch->e_epoch); 168 epoch_ctor(epoch); 169 MPASS(epoch_count < MAX_EPOCHS - 2); 170 epoch->e_flags = flags; 171 epoch->e_idx = epoch_count; 172 allepochs[epoch_count++] = epoch; 173 return (epoch); 174 } 175 176 void 177 epoch_free(epoch_t epoch) 178 { 179 #ifdef INVARIANTS 180 struct epoch_record *er; 181 int cpu; 182 183 CPU_FOREACH(cpu) { 184 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu); 185 MPASS(TAILQ_EMPTY(&er->er_tdlist)); 186 } 187 #endif 188 allepochs[epoch->e_idx] = NULL; 189 epoch_wait(global_epoch); 190 uma_zfree_pcpu(pcpu_zone_record, epoch->e_pcpu_record); 191 free(epoch, M_EPOCH); 192 } 193 194 void 195 epoch_enter_preempt_KBI(epoch_t epoch, epoch_tracker_t et) 196 { 197 198 epoch_enter_preempt(epoch, et); 199 } 200 201 void 202 epoch_exit_preempt_KBI(epoch_t epoch, epoch_tracker_t et) 203 { 204 205 epoch_exit_preempt(epoch, et); 206 } 207 208 void 209 epoch_enter_KBI(epoch_t epoch) 210 { 211 212 epoch_enter(epoch); 213 } 214 215 void 216 epoch_exit_KBI(epoch_t epoch) 217 { 218 219 epoch_exit(epoch); 220 } 221 222 /* 223 * epoch_block_handler_preempt is a callback from the ck code when another thread is 224 * currently in an epoch section. 225 */ 226 static void 227 epoch_block_handler_preempt(struct ck_epoch *global __unused, ck_epoch_record_t *cr, 228 void *arg __unused) 229 { 230 epoch_record_t record; 231 struct thread *td, *owner, *curwaittd; 232 struct epoch_thread *tdwait; 233 struct turnstile *ts; 234 struct lock_object *lock; 235 int spincount, gen; 236 int locksheld __unused; 237 238 record = __containerof(cr, struct epoch_record, er_record); 239 td = curthread; 240 locksheld = td->td_locks; 241 spincount = 0; 242 counter_u64_add(block_count, 1); 243 if (record->er_cpuid != curcpu) { 244 /* 245 * If the head of the list is running, we can wait for it 246 * to remove itself from the list and thus save us the 247 * overhead of a migration 248 */ 249 if ((tdwait = TAILQ_FIRST(&record->er_tdlist)) != NULL && 250 TD_IS_RUNNING(tdwait->et_td)) { 251 gen = record->er_gen; 252 thread_unlock(td); 253 do { 254 cpu_spinwait(); 255 } while (tdwait == TAILQ_FIRST(&record->er_tdlist) && 256 gen == record->er_gen && TD_IS_RUNNING(tdwait->et_td) && 257 spincount++ < MAX_ADAPTIVE_SPIN); 258 thread_lock(td); 259 return; 260 } 261 /* 262 * Being on the same CPU as that of the record on which 263 * we need to wait allows us access to the thread 264 * list associated with that CPU. We can then examine the 265 * oldest thread in the queue and wait on its turnstile 266 * until it resumes and so on until a grace period 267 * elapses. 268 * 269 */ 270 counter_u64_add(migrate_count, 1); 271 sched_bind(td, record->er_cpuid); 272 /* 273 * At this point we need to return to the ck code 274 * to scan to see if a grace period has elapsed. 275 * We can't move on to check the thread list, because 276 * in the meantime new threads may have arrived that 277 * in fact belong to a different epoch. 278 */ 279 return; 280 } 281 /* 282 * Try to find a thread in an epoch section on this CPU 283 * waiting on a turnstile. Otherwise find the lowest 284 * priority thread (highest prio value) and drop our priority 285 * to match to allow it to run. 286 */ 287 TAILQ_FOREACH(tdwait, &record->er_tdlist, et_link) { 288 /* 289 * Propagate our priority to any other waiters to prevent us 290 * from starving them. They will have their original priority 291 * restore on exit from epoch_wait(). 292 */ 293 curwaittd = tdwait->et_td; 294 if (!TD_IS_INHIBITED(curwaittd) && curwaittd->td_priority > td->td_priority) { 295 critical_enter(); 296 thread_unlock(td); 297 thread_lock(curwaittd); 298 sched_prio(curwaittd, td->td_priority); 299 thread_unlock(curwaittd); 300 thread_lock(td); 301 critical_exit(); 302 } 303 if (TD_IS_INHIBITED(curwaittd) && TD_ON_LOCK(curwaittd) && 304 ((ts = curwaittd->td_blocked) != NULL)) { 305 /* 306 * We unlock td to allow turnstile_wait to reacquire the 307 * the thread lock. Before unlocking it we enter a critical 308 * section to prevent preemption after we reenable interrupts 309 * by dropping the thread lock in order to prevent curwaittd 310 * from getting to run. 311 */ 312 critical_enter(); 313 thread_unlock(td); 314 owner = turnstile_lock(ts, &lock); 315 /* 316 * The owner pointer indicates that the lock succeeded. Only 317 * in case we hold the lock and the turnstile we locked is still 318 * the one that curwaittd is blocked on can we continue. Otherwise 319 * The turnstile pointer has been changed out from underneath 320 * us, as in the case where the lock holder has signalled curwaittd, 321 * and we need to continue. 322 */ 323 if (owner != NULL && ts == curwaittd->td_blocked) { 324 MPASS(TD_IS_INHIBITED(curwaittd) && TD_ON_LOCK(curwaittd)); 325 critical_exit(); 326 turnstile_wait(ts, owner, curwaittd->td_tsqueue); 327 counter_u64_add(turnstile_count, 1); 328 thread_lock(td); 329 return; 330 } else if (owner != NULL) 331 turnstile_unlock(ts, lock); 332 thread_lock(td); 333 critical_exit(); 334 KASSERT(td->td_locks == locksheld, 335 ("%d extra locks held", td->td_locks - locksheld)); 336 } 337 } 338 /* 339 * We didn't find any threads actually blocked on a lock 340 * so we have nothing to do except context switch away. 341 */ 342 counter_u64_add(switch_count, 1); 343 mi_switch(SW_VOL | SWT_RELINQUISH, NULL); 344 345 /* 346 * Release the thread lock while yielding to 347 * allow other threads to acquire the lock 348 * pointed to by TDQ_LOCKPTR(td). Else a 349 * deadlock like situation might happen. (HPS) 350 */ 351 thread_unlock(td); 352 thread_lock(td); 353 } 354 355 void 356 epoch_wait_preempt(epoch_t epoch) 357 { 358 struct thread *td; 359 int was_bound; 360 int old_cpu; 361 int old_pinned; 362 u_char old_prio; 363 int locks __unused; 364 365 MPASS(cold || epoch != NULL); 366 INIT_CHECK(epoch); 367 td = curthread; 368 #ifdef INVARIANTS 369 locks = curthread->td_locks; 370 MPASS(epoch->e_flags & EPOCH_PREEMPT); 371 if ((epoch->e_flags & EPOCH_LOCKED) == 0) 372 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 373 "epoch_wait() can be long running"); 374 KASSERT(!in_epoch(epoch), 375 ("epoch_wait_preempt() called in the middle " 376 "of an epoch section of the same epoch")); 377 #endif 378 thread_lock(td); 379 DROP_GIANT(); 380 381 old_cpu = PCPU_GET(cpuid); 382 old_pinned = td->td_pinned; 383 old_prio = td->td_priority; 384 was_bound = sched_is_bound(td); 385 sched_unbind(td); 386 td->td_pinned = 0; 387 sched_bind(td, old_cpu); 388 389 ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler_preempt, NULL); 390 391 /* restore CPU binding, if any */ 392 if (was_bound != 0) { 393 sched_bind(td, old_cpu); 394 } else { 395 /* get thread back to initial CPU, if any */ 396 if (old_pinned != 0) 397 sched_bind(td, old_cpu); 398 sched_unbind(td); 399 } 400 /* restore pinned after bind */ 401 td->td_pinned = old_pinned; 402 403 /* restore thread priority */ 404 sched_prio(td, old_prio); 405 thread_unlock(td); 406 PICKUP_GIANT(); 407 KASSERT(td->td_locks == locks, 408 ("%d residual locks held", td->td_locks - locks)); 409 } 410 411 static void 412 epoch_block_handler(struct ck_epoch *g __unused, ck_epoch_record_t *c __unused, 413 void *arg __unused) 414 { 415 cpu_spinwait(); 416 } 417 418 void 419 epoch_wait(epoch_t epoch) 420 { 421 422 MPASS(cold || epoch != NULL); 423 INIT_CHECK(epoch); 424 MPASS(epoch->e_flags == 0); 425 critical_enter(); 426 ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler, NULL); 427 critical_exit(); 428 } 429 430 void 431 epoch_call(epoch_t epoch, epoch_context_t ctx, void (*callback) (epoch_context_t)) 432 { 433 epoch_record_t er; 434 ck_epoch_entry_t *cb; 435 436 cb = (void *)ctx; 437 438 MPASS(callback); 439 /* too early in boot to have epoch set up */ 440 if (__predict_false(epoch == NULL)) 441 goto boottime; 442 #if !defined(EARLY_AP_STARTUP) 443 if (__predict_false(inited < 2)) 444 goto boottime; 445 #endif 446 447 critical_enter(); 448 *DPCPU_PTR(epoch_cb_count) += 1; 449 er = epoch_currecord(epoch); 450 ck_epoch_call(&er->er_record, cb, (ck_epoch_cb_t *)callback); 451 critical_exit(); 452 return; 453 boottime: 454 callback(ctx); 455 } 456 457 static void 458 epoch_call_task(void *arg __unused) 459 { 460 ck_stack_entry_t *cursor, *head, *next; 461 ck_epoch_record_t *record; 462 epoch_record_t er; 463 epoch_t epoch; 464 ck_stack_t cb_stack; 465 int i, npending, total; 466 467 ck_stack_init(&cb_stack); 468 critical_enter(); 469 epoch_enter(global_epoch); 470 for (total = i = 0; i < epoch_count; i++) { 471 if (__predict_false((epoch = allepochs[i]) == NULL)) 472 continue; 473 er = epoch_currecord(epoch); 474 record = &er->er_record; 475 if ((npending = record->n_pending) == 0) 476 continue; 477 ck_epoch_poll_deferred(record, &cb_stack); 478 total += npending - record->n_pending; 479 } 480 epoch_exit(global_epoch); 481 *DPCPU_PTR(epoch_cb_count) -= total; 482 critical_exit(); 483 484 counter_u64_add(epoch_call_count, total); 485 counter_u64_add(epoch_call_task_count, 1); 486 487 head = ck_stack_batch_pop_npsc(&cb_stack); 488 for (cursor = head; cursor != NULL; cursor = next) { 489 struct ck_epoch_entry *entry = 490 ck_epoch_entry_container(cursor); 491 492 next = CK_STACK_NEXT(cursor); 493 entry->function(entry); 494 } 495 } 496 497 int 498 in_epoch_verbose(epoch_t epoch, int dump_onfail) 499 { 500 struct epoch_thread *tdwait; 501 struct thread *td; 502 epoch_record_t er; 503 504 td = curthread; 505 if (td->td_epochnest == 0) 506 return (0); 507 if (__predict_false((epoch) == NULL)) 508 return (0); 509 critical_enter(); 510 er = epoch_currecord(epoch); 511 TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link) 512 if (tdwait->et_td == td) { 513 critical_exit(); 514 return (1); 515 } 516 #ifdef INVARIANTS 517 if (dump_onfail) { 518 MPASS(td->td_pinned); 519 printf("cpu: %d id: %d\n", curcpu, td->td_tid); 520 TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link) 521 printf("td_tid: %d ", tdwait->et_td->td_tid); 522 printf("\n"); 523 } 524 #endif 525 critical_exit(); 526 return (0); 527 } 528 529 int 530 in_epoch(epoch_t epoch) 531 { 532 return (in_epoch_verbose(epoch, 0)); 533 } 534 535 void 536 epoch_adjust_prio(struct thread *td, u_char prio) 537 { 538 thread_lock(td); 539 sched_prio(td, prio); 540 thread_unlock(td); 541 } 542