1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2018, Matthew Macy <mmacy@freebsd.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 */
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/counter.h>
32 #include <sys/epoch.h>
33 #include <sys/gtaskqueue.h>
34 #include <sys/kernel.h>
35 #include <sys/limits.h>
36 #include <sys/lock.h>
37 #include <sys/malloc.h>
38 #include <sys/mutex.h>
39 #include <sys/pcpu.h>
40 #include <sys/proc.h>
41 #include <sys/sched.h>
42 #include <sys/sx.h>
43 #include <sys/smp.h>
44 #include <sys/sysctl.h>
45 #include <sys/turnstile.h>
46 #ifdef EPOCH_TRACE
47 #include <machine/stdarg.h>
48 #include <sys/stack.h>
49 #include <sys/tree.h>
50 #endif
51 #include <vm/vm.h>
52 #include <vm/vm_extern.h>
53 #include <vm/vm_kern.h>
54 #include <vm/uma.h>
55
56 #include <machine/stack.h>
57
58 #include <ck_epoch.h>
59
60 #ifdef __amd64__
61 #define EPOCH_ALIGN CACHE_LINE_SIZE*2
62 #else
63 #define EPOCH_ALIGN CACHE_LINE_SIZE
64 #endif
65
66 TAILQ_HEAD (epoch_tdlist, epoch_tracker);
67 typedef struct epoch_record {
68 ck_epoch_record_t er_record;
69 struct epoch_context er_drain_ctx;
70 struct epoch *er_parent;
71 volatile struct epoch_tdlist er_tdlist;
72 volatile uint32_t er_gen;
73 uint32_t er_cpuid;
74 #ifdef INVARIANTS
75 /* Used to verify record ownership for non-preemptible epochs. */
76 struct thread *er_td;
77 #endif
78 } __aligned(EPOCH_ALIGN) *epoch_record_t;
79
80 struct epoch {
81 struct ck_epoch e_epoch __aligned(EPOCH_ALIGN);
82 epoch_record_t e_pcpu_record;
83 int e_in_use;
84 int e_flags;
85 struct sx e_drain_sx;
86 struct mtx e_drain_mtx;
87 volatile int e_drain_count;
88 const char *e_name;
89 };
90
91 /* arbitrary --- needs benchmarking */
92 #define MAX_ADAPTIVE_SPIN 100
93 #define MAX_EPOCHS 64
94
95 CTASSERT(sizeof(ck_epoch_entry_t) == sizeof(struct epoch_context));
96 SYSCTL_NODE(_kern, OID_AUTO, epoch, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
97 "epoch information");
98 SYSCTL_NODE(_kern_epoch, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
99 "epoch stats");
100
101 /* Stats. */
102 static counter_u64_t block_count;
103
104 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, nblocked, CTLFLAG_RW,
105 &block_count, "# of times a thread was in an epoch when epoch_wait was called");
106 static counter_u64_t migrate_count;
107
108 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, migrations, CTLFLAG_RW,
109 &migrate_count, "# of times thread was migrated to another CPU in epoch_wait");
110 static counter_u64_t turnstile_count;
111
112 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, ncontended, CTLFLAG_RW,
113 &turnstile_count, "# of times a thread was blocked on a lock in an epoch during an epoch_wait");
114 static counter_u64_t switch_count;
115
116 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, switches, CTLFLAG_RW,
117 &switch_count, "# of times a thread voluntarily context switched in epoch_wait");
118 static counter_u64_t epoch_call_count;
119
120 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_calls, CTLFLAG_RW,
121 &epoch_call_count, "# of times a callback was deferred");
122 static counter_u64_t epoch_call_task_count;
123
124 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_call_tasks, CTLFLAG_RW,
125 &epoch_call_task_count, "# of times a callback task was run");
126
127 TAILQ_HEAD (threadlist, thread);
128
129 CK_STACK_CONTAINER(struct ck_epoch_entry, stack_entry,
130 ck_epoch_entry_container)
131
132 static struct epoch epoch_array[MAX_EPOCHS];
133
134 DPCPU_DEFINE(struct grouptask, epoch_cb_task);
135 DPCPU_DEFINE(int, epoch_cb_count);
136
137 static __read_mostly int inited;
138 __read_mostly epoch_t global_epoch;
139 __read_mostly epoch_t global_epoch_preempt;
140
141 static void epoch_call_task(void *context __unused);
142 static uma_zone_t pcpu_zone_record;
143
144 static struct sx epoch_sx;
145
146 #define EPOCH_LOCK() sx_xlock(&epoch_sx)
147 #define EPOCH_UNLOCK() sx_xunlock(&epoch_sx)
148
149 static epoch_record_t
epoch_currecord(epoch_t epoch)150 epoch_currecord(epoch_t epoch)
151 {
152
153 return (zpcpu_get(epoch->e_pcpu_record));
154 }
155
156 #ifdef EPOCH_TRACE
157 struct stackentry {
158 RB_ENTRY(stackentry) se_node;
159 struct stack se_stack;
160 };
161
162 static int
stackentry_compare(struct stackentry * a,struct stackentry * b)163 stackentry_compare(struct stackentry *a, struct stackentry *b)
164 {
165
166 if (a->se_stack.depth > b->se_stack.depth)
167 return (1);
168 if (a->se_stack.depth < b->se_stack.depth)
169 return (-1);
170 for (int i = 0; i < a->se_stack.depth; i++) {
171 if (a->se_stack.pcs[i] > b->se_stack.pcs[i])
172 return (1);
173 if (a->se_stack.pcs[i] < b->se_stack.pcs[i])
174 return (-1);
175 }
176
177 return (0);
178 }
179
180 RB_HEAD(stacktree, stackentry) epoch_stacks = RB_INITIALIZER(&epoch_stacks);
181 RB_GENERATE_STATIC(stacktree, stackentry, se_node, stackentry_compare);
182
183 static struct mtx epoch_stacks_lock;
184 MTX_SYSINIT(epochstacks, &epoch_stacks_lock, "epoch_stacks", MTX_DEF);
185
186 static bool epoch_trace_stack_print = true;
187 SYSCTL_BOOL(_kern_epoch, OID_AUTO, trace_stack_print, CTLFLAG_RWTUN,
188 &epoch_trace_stack_print, 0, "Print stack traces on epoch reports");
189
190 static void epoch_trace_report(const char *fmt, ...) __printflike(1, 2);
191 static inline void
epoch_trace_report(const char * fmt,...)192 epoch_trace_report(const char *fmt, ...)
193 {
194 va_list ap;
195 struct stackentry se, *new;
196
197 stack_save(&se.se_stack);
198
199 /* Tree is never reduced - go lockless. */
200 if (RB_FIND(stacktree, &epoch_stacks, &se) != NULL)
201 return;
202
203 new = malloc(sizeof(*new), M_STACK, M_NOWAIT);
204 if (new != NULL) {
205 bcopy(&se.se_stack, &new->se_stack, sizeof(struct stack));
206
207 mtx_lock(&epoch_stacks_lock);
208 new = RB_INSERT(stacktree, &epoch_stacks, new);
209 mtx_unlock(&epoch_stacks_lock);
210 if (new != NULL)
211 free(new, M_STACK);
212 }
213
214 va_start(ap, fmt);
215 (void)vprintf(fmt, ap);
216 va_end(ap);
217 if (epoch_trace_stack_print)
218 stack_print_ddb(&se.se_stack);
219 }
220
221 static inline void
epoch_trace_enter(struct thread * td,epoch_t epoch,epoch_tracker_t et,const char * file,int line)222 epoch_trace_enter(struct thread *td, epoch_t epoch, epoch_tracker_t et,
223 const char *file, int line)
224 {
225 epoch_tracker_t iet;
226
227 SLIST_FOREACH(iet, &td->td_epochs, et_tlink) {
228 if (iet->et_epoch != epoch)
229 continue;
230 epoch_trace_report("Recursively entering epoch %s "
231 "at %s:%d, previously entered at %s:%d\n",
232 epoch->e_name, file, line,
233 iet->et_file, iet->et_line);
234 }
235 et->et_epoch = epoch;
236 et->et_file = file;
237 et->et_line = line;
238 et->et_flags = 0;
239 SLIST_INSERT_HEAD(&td->td_epochs, et, et_tlink);
240 }
241
242 static inline void
epoch_trace_exit(struct thread * td,epoch_t epoch,epoch_tracker_t et,const char * file,int line)243 epoch_trace_exit(struct thread *td, epoch_t epoch, epoch_tracker_t et,
244 const char *file, int line)
245 {
246
247 if (SLIST_FIRST(&td->td_epochs) != et) {
248 epoch_trace_report("Exiting epoch %s in a not nested order "
249 "at %s:%d. Most recently entered %s at %s:%d\n",
250 epoch->e_name,
251 file, line,
252 SLIST_FIRST(&td->td_epochs)->et_epoch->e_name,
253 SLIST_FIRST(&td->td_epochs)->et_file,
254 SLIST_FIRST(&td->td_epochs)->et_line);
255 /* This will panic if et is not anywhere on td_epochs. */
256 SLIST_REMOVE(&td->td_epochs, et, epoch_tracker, et_tlink);
257 } else
258 SLIST_REMOVE_HEAD(&td->td_epochs, et_tlink);
259 if (et->et_flags & ET_REPORT_EXIT)
260 printf("Td %p exiting epoch %s at %s:%d\n", td, epoch->e_name,
261 file, line);
262 }
263
264 /* Used by assertions that check thread state before going to sleep. */
265 void
epoch_trace_list(struct thread * td)266 epoch_trace_list(struct thread *td)
267 {
268 epoch_tracker_t iet;
269
270 SLIST_FOREACH(iet, &td->td_epochs, et_tlink)
271 printf("Epoch %s entered at %s:%d\n", iet->et_epoch->e_name,
272 iet->et_file, iet->et_line);
273 }
274
275 void
epoch_where_report(epoch_t epoch)276 epoch_where_report(epoch_t epoch)
277 {
278 epoch_record_t er;
279 struct epoch_tracker *tdwait;
280
281 MPASS(epoch != NULL);
282 MPASS((epoch->e_flags & EPOCH_PREEMPT) != 0);
283 MPASS(!THREAD_CAN_SLEEP());
284 critical_enter();
285 er = epoch_currecord(epoch);
286 TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
287 if (tdwait->et_td == curthread)
288 break;
289 critical_exit();
290 if (tdwait != NULL) {
291 tdwait->et_flags |= ET_REPORT_EXIT;
292 printf("Td %p entered epoch %s at %s:%d\n", curthread,
293 epoch->e_name, tdwait->et_file, tdwait->et_line);
294 }
295 }
296 #endif /* EPOCH_TRACE */
297
298 static void
epoch_init(void * arg __unused)299 epoch_init(void *arg __unused)
300 {
301 int cpu;
302
303 block_count = counter_u64_alloc(M_WAITOK);
304 migrate_count = counter_u64_alloc(M_WAITOK);
305 turnstile_count = counter_u64_alloc(M_WAITOK);
306 switch_count = counter_u64_alloc(M_WAITOK);
307 epoch_call_count = counter_u64_alloc(M_WAITOK);
308 epoch_call_task_count = counter_u64_alloc(M_WAITOK);
309
310 pcpu_zone_record = uma_zcreate("epoch_record pcpu",
311 sizeof(struct epoch_record), NULL, NULL, NULL, NULL,
312 UMA_ALIGN_PTR, UMA_ZONE_PCPU);
313 CPU_FOREACH(cpu) {
314 GROUPTASK_INIT(DPCPU_ID_PTR(cpu, epoch_cb_task), 0,
315 epoch_call_task, NULL);
316 taskqgroup_attach_cpu(qgroup_softirq,
317 DPCPU_ID_PTR(cpu, epoch_cb_task), NULL, cpu, NULL, NULL,
318 "epoch call task");
319 }
320 #ifdef EPOCH_TRACE
321 SLIST_INIT(&thread0.td_epochs);
322 #endif
323 sx_init(&epoch_sx, "epoch-sx");
324 inited = 1;
325 global_epoch = epoch_alloc("Global", 0);
326 global_epoch_preempt = epoch_alloc("Global preemptible", EPOCH_PREEMPT);
327 }
328 SYSINIT(epoch, SI_SUB_EPOCH, SI_ORDER_FIRST, epoch_init, NULL);
329
330 #if !defined(EARLY_AP_STARTUP)
331 static void
epoch_init_smp(void * dummy __unused)332 epoch_init_smp(void *dummy __unused)
333 {
334 inited = 2;
335 }
336 SYSINIT(epoch_smp, SI_SUB_SMP + 1, SI_ORDER_FIRST, epoch_init_smp, NULL);
337 #endif
338
339 static void
epoch_ctor(epoch_t epoch)340 epoch_ctor(epoch_t epoch)
341 {
342 epoch_record_t er;
343 int cpu;
344
345 epoch->e_pcpu_record = uma_zalloc_pcpu(pcpu_zone_record, M_WAITOK);
346 CPU_FOREACH(cpu) {
347 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
348 bzero(er, sizeof(*er));
349 ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL);
350 TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist);
351 er->er_cpuid = cpu;
352 er->er_parent = epoch;
353 }
354 }
355
356 static void
epoch_adjust_prio(struct thread * td,u_char prio)357 epoch_adjust_prio(struct thread *td, u_char prio)
358 {
359
360 thread_lock(td);
361 sched_prio(td, prio);
362 thread_unlock(td);
363 }
364
365 epoch_t
epoch_alloc(const char * name,int flags)366 epoch_alloc(const char *name, int flags)
367 {
368 epoch_t epoch;
369 int i;
370
371 MPASS(name != NULL);
372
373 if (__predict_false(!inited))
374 panic("%s called too early in boot", __func__);
375
376 EPOCH_LOCK();
377
378 /*
379 * Find a free index in the epoch array. If no free index is
380 * found, try to use the index after the last one.
381 */
382 for (i = 0;; i++) {
383 /*
384 * If too many epochs are currently allocated,
385 * return NULL.
386 */
387 if (i == MAX_EPOCHS) {
388 epoch = NULL;
389 goto done;
390 }
391 if (epoch_array[i].e_in_use == 0)
392 break;
393 }
394
395 epoch = epoch_array + i;
396 ck_epoch_init(&epoch->e_epoch);
397 epoch_ctor(epoch);
398 epoch->e_flags = flags;
399 epoch->e_name = name;
400 sx_init(&epoch->e_drain_sx, "epoch-drain-sx");
401 mtx_init(&epoch->e_drain_mtx, "epoch-drain-mtx", NULL, MTX_DEF);
402
403 /*
404 * Set e_in_use last, because when this field is set the
405 * epoch_call_task() function will start scanning this epoch
406 * structure.
407 */
408 atomic_store_rel_int(&epoch->e_in_use, 1);
409 done:
410 EPOCH_UNLOCK();
411 return (epoch);
412 }
413
414 void
epoch_free(epoch_t epoch)415 epoch_free(epoch_t epoch)
416 {
417 #ifdef INVARIANTS
418 int cpu;
419 #endif
420
421 EPOCH_LOCK();
422
423 MPASS(epoch->e_in_use != 0);
424
425 epoch_drain_callbacks(epoch);
426
427 atomic_store_rel_int(&epoch->e_in_use, 0);
428 /*
429 * Make sure the epoch_call_task() function see e_in_use equal
430 * to zero, by calling epoch_wait() on the global_epoch:
431 */
432 epoch_wait(global_epoch);
433 #ifdef INVARIANTS
434 CPU_FOREACH(cpu) {
435 epoch_record_t er;
436
437 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
438
439 /*
440 * Sanity check: none of the records should be in use anymore.
441 * We drained callbacks above and freeing the pcpu records is
442 * imminent.
443 */
444 MPASS(er->er_td == NULL);
445 MPASS(TAILQ_EMPTY(&er->er_tdlist));
446 }
447 #endif
448 uma_zfree_pcpu(pcpu_zone_record, epoch->e_pcpu_record);
449 mtx_destroy(&epoch->e_drain_mtx);
450 sx_destroy(&epoch->e_drain_sx);
451 memset(epoch, 0, sizeof(*epoch));
452
453 EPOCH_UNLOCK();
454 }
455
456 #define INIT_CHECK(epoch) \
457 do { \
458 if (__predict_false((epoch) == NULL)) \
459 return; \
460 } while (0)
461
462 void
_epoch_enter_preempt(epoch_t epoch,epoch_tracker_t et EPOCH_FILE_LINE)463 _epoch_enter_preempt(epoch_t epoch, epoch_tracker_t et EPOCH_FILE_LINE)
464 {
465 struct epoch_record *er;
466 struct thread *td;
467
468 MPASS(cold || epoch != NULL);
469 td = curthread;
470 MPASS(kstack_contains(td, (vm_offset_t)et, sizeof(*et)));
471
472 INIT_CHECK(epoch);
473 MPASS(epoch->e_flags & EPOCH_PREEMPT);
474
475 #ifdef EPOCH_TRACE
476 epoch_trace_enter(td, epoch, et, file, line);
477 #endif
478 et->et_td = td;
479 THREAD_NO_SLEEPING();
480 critical_enter();
481 sched_pin();
482 et->et_old_priority = td->td_priority;
483 er = epoch_currecord(epoch);
484 /* Record-level tracking is reserved for non-preemptible epochs. */
485 MPASS(er->er_td == NULL);
486 TAILQ_INSERT_TAIL(&er->er_tdlist, et, et_link);
487 ck_epoch_begin(&er->er_record, &et->et_section);
488 critical_exit();
489 }
490
491 void
epoch_enter(epoch_t epoch)492 epoch_enter(epoch_t epoch)
493 {
494 epoch_record_t er;
495
496 MPASS(cold || epoch != NULL);
497 INIT_CHECK(epoch);
498 critical_enter();
499 er = epoch_currecord(epoch);
500 #ifdef INVARIANTS
501 if (er->er_record.active == 0) {
502 MPASS(er->er_td == NULL);
503 er->er_td = curthread;
504 } else {
505 /* We've recursed, just make sure our accounting isn't wrong. */
506 MPASS(er->er_td == curthread);
507 }
508 #endif
509 ck_epoch_begin(&er->er_record, NULL);
510 }
511
512 void
_epoch_exit_preempt(epoch_t epoch,epoch_tracker_t et EPOCH_FILE_LINE)513 _epoch_exit_preempt(epoch_t epoch, epoch_tracker_t et EPOCH_FILE_LINE)
514 {
515 struct epoch_record *er;
516 struct thread *td;
517
518 INIT_CHECK(epoch);
519 td = curthread;
520 critical_enter();
521 sched_unpin();
522 THREAD_SLEEPING_OK();
523 er = epoch_currecord(epoch);
524 MPASS(epoch->e_flags & EPOCH_PREEMPT);
525 MPASS(et != NULL);
526 MPASS(et->et_td == td);
527 #ifdef INVARIANTS
528 et->et_td = (void*)0xDEADBEEF;
529 /* Record-level tracking is reserved for non-preemptible epochs. */
530 MPASS(er->er_td == NULL);
531 #endif
532 ck_epoch_end(&er->er_record, &et->et_section);
533 TAILQ_REMOVE(&er->er_tdlist, et, et_link);
534 er->er_gen++;
535 if (__predict_false(et->et_old_priority != td->td_priority))
536 epoch_adjust_prio(td, et->et_old_priority);
537 critical_exit();
538 #ifdef EPOCH_TRACE
539 epoch_trace_exit(td, epoch, et, file, line);
540 #endif
541 }
542
543 void
epoch_exit(epoch_t epoch)544 epoch_exit(epoch_t epoch)
545 {
546 epoch_record_t er;
547
548 INIT_CHECK(epoch);
549 er = epoch_currecord(epoch);
550 ck_epoch_end(&er->er_record, NULL);
551 #ifdef INVARIANTS
552 MPASS(er->er_td == curthread);
553 if (er->er_record.active == 0)
554 er->er_td = NULL;
555 #endif
556 critical_exit();
557 }
558
559 /*
560 * epoch_block_handler_preempt() is a callback from the CK code when another
561 * thread is currently in an epoch section.
562 */
563 static void
epoch_block_handler_preempt(struct ck_epoch * global __unused,ck_epoch_record_t * cr,void * arg __unused)564 epoch_block_handler_preempt(struct ck_epoch *global __unused,
565 ck_epoch_record_t *cr, void *arg __unused)
566 {
567 epoch_record_t record;
568 struct thread *td, *owner, *curwaittd;
569 struct epoch_tracker *tdwait;
570 struct turnstile *ts;
571 struct lock_object *lock;
572 int spincount, gen;
573 int locksheld __unused;
574
575 record = __containerof(cr, struct epoch_record, er_record);
576 td = curthread;
577 locksheld = td->td_locks;
578 spincount = 0;
579 counter_u64_add(block_count, 1);
580 /*
581 * We lost a race and there's no longer any threads
582 * on the CPU in an epoch section.
583 */
584 if (TAILQ_EMPTY(&record->er_tdlist))
585 return;
586
587 if (record->er_cpuid != curcpu) {
588 /*
589 * If the head of the list is running, we can wait for it
590 * to remove itself from the list and thus save us the
591 * overhead of a migration
592 */
593 gen = record->er_gen;
594 thread_unlock(td);
595 /*
596 * We can't actually check if the waiting thread is running
597 * so we simply poll for it to exit before giving up and
598 * migrating.
599 */
600 do {
601 cpu_spinwait();
602 } while (!TAILQ_EMPTY(&record->er_tdlist) &&
603 gen == record->er_gen &&
604 spincount++ < MAX_ADAPTIVE_SPIN);
605 thread_lock(td);
606 /*
607 * If the generation has changed we can poll again
608 * otherwise we need to migrate.
609 */
610 if (gen != record->er_gen)
611 return;
612 /*
613 * Being on the same CPU as that of the record on which
614 * we need to wait allows us access to the thread
615 * list associated with that CPU. We can then examine the
616 * oldest thread in the queue and wait on its turnstile
617 * until it resumes and so on until a grace period
618 * elapses.
619 *
620 */
621 counter_u64_add(migrate_count, 1);
622 sched_bind(td, record->er_cpuid);
623 /*
624 * At this point we need to return to the ck code
625 * to scan to see if a grace period has elapsed.
626 * We can't move on to check the thread list, because
627 * in the meantime new threads may have arrived that
628 * in fact belong to a different epoch.
629 */
630 return;
631 }
632 /*
633 * Try to find a thread in an epoch section on this CPU
634 * waiting on a turnstile. Otherwise find the lowest
635 * priority thread (highest prio value) and drop our priority
636 * to match to allow it to run.
637 */
638 TAILQ_FOREACH(tdwait, &record->er_tdlist, et_link) {
639 /*
640 * Propagate our priority to any other waiters to prevent us
641 * from starving them. They will have their original priority
642 * restore on exit from epoch_wait().
643 */
644 curwaittd = tdwait->et_td;
645 if (!TD_IS_INHIBITED(curwaittd) && curwaittd->td_priority > td->td_priority) {
646 critical_enter();
647 thread_unlock(td);
648 thread_lock(curwaittd);
649 sched_prio(curwaittd, td->td_priority);
650 thread_unlock(curwaittd);
651 thread_lock(td);
652 critical_exit();
653 }
654 if (TD_IS_INHIBITED(curwaittd) && TD_ON_LOCK(curwaittd) &&
655 ((ts = curwaittd->td_blocked) != NULL)) {
656 /*
657 * We unlock td to allow turnstile_wait to reacquire
658 * the thread lock. Before unlocking it we enter a
659 * critical section to prevent preemption after we
660 * reenable interrupts by dropping the thread lock in
661 * order to prevent curwaittd from getting to run.
662 */
663 critical_enter();
664 thread_unlock(td);
665
666 if (turnstile_lock(ts, &lock, &owner)) {
667 if (ts == curwaittd->td_blocked) {
668 MPASS(TD_IS_INHIBITED(curwaittd) &&
669 TD_ON_LOCK(curwaittd));
670 critical_exit();
671 turnstile_wait(ts, owner,
672 curwaittd->td_tsqueue);
673 counter_u64_add(turnstile_count, 1);
674 thread_lock(td);
675 return;
676 }
677 turnstile_unlock(ts, lock);
678 }
679 thread_lock(td);
680 critical_exit();
681 KASSERT(td->td_locks == locksheld,
682 ("%d extra locks held", td->td_locks - locksheld));
683 }
684 }
685 /*
686 * We didn't find any threads actually blocked on a lock
687 * so we have nothing to do except context switch away.
688 */
689 counter_u64_add(switch_count, 1);
690 mi_switch(SW_VOL | SWT_RELINQUISH);
691 /*
692 * It is important the thread lock is dropped while yielding
693 * to allow other threads to acquire the lock pointed to by
694 * TDQ_LOCKPTR(td). Currently mi_switch() will unlock the
695 * thread lock before returning. Else a deadlock like
696 * situation might happen.
697 */
698 thread_lock(td);
699 }
700
701 void
epoch_wait_preempt(epoch_t epoch)702 epoch_wait_preempt(epoch_t epoch)
703 {
704 struct thread *td;
705 int was_bound;
706 int old_cpu;
707 int old_pinned;
708 u_char old_prio;
709 int locks __unused;
710
711 MPASS(cold || epoch != NULL);
712 INIT_CHECK(epoch);
713 td = curthread;
714 #ifdef INVARIANTS
715 locks = curthread->td_locks;
716 MPASS(epoch->e_flags & EPOCH_PREEMPT);
717 if ((epoch->e_flags & EPOCH_LOCKED) == 0)
718 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
719 "epoch_wait() can be long running");
720 KASSERT(!in_epoch(epoch), ("epoch_wait_preempt() called in the middle "
721 "of an epoch section of the same epoch"));
722 #endif
723 DROP_GIANT();
724 thread_lock(td);
725
726 old_cpu = PCPU_GET(cpuid);
727 old_pinned = td->td_pinned;
728 old_prio = td->td_priority;
729 was_bound = sched_is_bound(td);
730 sched_unbind(td);
731 td->td_pinned = 0;
732 sched_bind(td, old_cpu);
733
734 ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler_preempt,
735 NULL);
736
737 /* restore CPU binding, if any */
738 if (was_bound != 0) {
739 sched_bind(td, old_cpu);
740 } else {
741 /* get thread back to initial CPU, if any */
742 if (old_pinned != 0)
743 sched_bind(td, old_cpu);
744 sched_unbind(td);
745 }
746 /* restore pinned after bind */
747 td->td_pinned = old_pinned;
748
749 /* restore thread priority */
750 sched_prio(td, old_prio);
751 thread_unlock(td);
752 PICKUP_GIANT();
753 KASSERT(td->td_locks == locks,
754 ("%d residual locks held", td->td_locks - locks));
755 }
756
757 static void
epoch_block_handler(struct ck_epoch * g __unused,ck_epoch_record_t * c __unused,void * arg __unused)758 epoch_block_handler(struct ck_epoch *g __unused, ck_epoch_record_t *c __unused,
759 void *arg __unused)
760 {
761 cpu_spinwait();
762 }
763
764 void
epoch_wait(epoch_t epoch)765 epoch_wait(epoch_t epoch)
766 {
767
768 MPASS(cold || epoch != NULL);
769 INIT_CHECK(epoch);
770 MPASS(epoch->e_flags == 0);
771 critical_enter();
772 ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler, NULL);
773 critical_exit();
774 }
775
776 void
epoch_call(epoch_t epoch,epoch_callback_t callback,epoch_context_t ctx)777 epoch_call(epoch_t epoch, epoch_callback_t callback, epoch_context_t ctx)
778 {
779 epoch_record_t er;
780 ck_epoch_entry_t *cb;
781
782 cb = (void *)ctx;
783
784 MPASS(callback);
785 /* too early in boot to have epoch set up */
786 if (__predict_false(epoch == NULL))
787 goto boottime;
788 #if !defined(EARLY_AP_STARTUP)
789 if (__predict_false(inited < 2))
790 goto boottime;
791 #endif
792
793 critical_enter();
794 *DPCPU_PTR(epoch_cb_count) += 1;
795 er = epoch_currecord(epoch);
796 ck_epoch_call(&er->er_record, cb, (ck_epoch_cb_t *)callback);
797 critical_exit();
798 return;
799 boottime:
800 callback(ctx);
801 }
802
803 static void
epoch_call_task(void * arg __unused)804 epoch_call_task(void *arg __unused)
805 {
806 ck_stack_entry_t *cursor, *head, *next;
807 ck_epoch_record_t *record;
808 epoch_record_t er;
809 epoch_t epoch;
810 ck_stack_t cb_stack;
811 int i, npending, total;
812
813 ck_stack_init(&cb_stack);
814 critical_enter();
815 epoch_enter(global_epoch);
816 for (total = i = 0; i != MAX_EPOCHS; i++) {
817 epoch = epoch_array + i;
818 if (__predict_false(
819 atomic_load_acq_int(&epoch->e_in_use) == 0))
820 continue;
821 er = epoch_currecord(epoch);
822 record = &er->er_record;
823 if ((npending = record->n_pending) == 0)
824 continue;
825 ck_epoch_poll_deferred(record, &cb_stack);
826 total += npending - record->n_pending;
827 }
828 epoch_exit(global_epoch);
829 *DPCPU_PTR(epoch_cb_count) -= total;
830 critical_exit();
831
832 counter_u64_add(epoch_call_count, total);
833 counter_u64_add(epoch_call_task_count, 1);
834
835 head = ck_stack_batch_pop_npsc(&cb_stack);
836 for (cursor = head; cursor != NULL; cursor = next) {
837 struct ck_epoch_entry *entry =
838 ck_epoch_entry_container(cursor);
839
840 next = CK_STACK_NEXT(cursor);
841 entry->function(entry);
842 }
843 }
844
845 static int
in_epoch_verbose_preempt(epoch_t epoch,int dump_onfail)846 in_epoch_verbose_preempt(epoch_t epoch, int dump_onfail)
847 {
848 epoch_record_t er;
849 struct epoch_tracker *tdwait;
850 struct thread *td;
851
852 MPASS(epoch != NULL);
853 MPASS((epoch->e_flags & EPOCH_PREEMPT) != 0);
854 td = curthread;
855 if (THREAD_CAN_SLEEP())
856 return (0);
857 critical_enter();
858 er = epoch_currecord(epoch);
859 TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
860 if (tdwait->et_td == td) {
861 critical_exit();
862 return (1);
863 }
864 #ifdef INVARIANTS
865 if (dump_onfail) {
866 MPASS(td->td_pinned);
867 printf("cpu: %d id: %d\n", curcpu, td->td_tid);
868 TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
869 printf("td_tid: %d ", tdwait->et_td->td_tid);
870 printf("\n");
871 }
872 #endif
873 critical_exit();
874 return (0);
875 }
876
877 #ifdef INVARIANTS
878 static void
epoch_assert_nocpu(epoch_t epoch,struct thread * td)879 epoch_assert_nocpu(epoch_t epoch, struct thread *td)
880 {
881 epoch_record_t er;
882 int cpu;
883 bool crit;
884
885 crit = td->td_critnest > 0;
886
887 /* Check for a critical section mishap. */
888 CPU_FOREACH(cpu) {
889 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
890 KASSERT(er->er_td != td,
891 ("%s critical section in epoch '%s', from cpu %d",
892 (crit ? "exited" : "re-entered"), epoch->e_name, cpu));
893 }
894 }
895 #else
896 #define epoch_assert_nocpu(e, td) do {} while (0)
897 #endif
898
899 int
in_epoch_verbose(epoch_t epoch,int dump_onfail)900 in_epoch_verbose(epoch_t epoch, int dump_onfail)
901 {
902 epoch_record_t er;
903 struct thread *td;
904
905 if (__predict_false((epoch) == NULL))
906 return (0);
907 if ((epoch->e_flags & EPOCH_PREEMPT) != 0)
908 return (in_epoch_verbose_preempt(epoch, dump_onfail));
909
910 /*
911 * The thread being in a critical section is a necessary
912 * condition to be correctly inside a non-preemptible epoch,
913 * so it's definitely not in this epoch.
914 */
915 td = curthread;
916 if (td->td_critnest == 0) {
917 epoch_assert_nocpu(epoch, td);
918 return (0);
919 }
920
921 /*
922 * The current cpu is in a critical section, so the epoch record will be
923 * stable for the rest of this function. Knowing that the record is not
924 * active is sufficient for knowing whether we're in this epoch or not,
925 * since it's a pcpu record.
926 */
927 er = epoch_currecord(epoch);
928 if (er->er_record.active == 0) {
929 epoch_assert_nocpu(epoch, td);
930 return (0);
931 }
932
933 MPASS(er->er_td == td);
934 return (1);
935 }
936
937 int
in_epoch(epoch_t epoch)938 in_epoch(epoch_t epoch)
939 {
940 return (in_epoch_verbose(epoch, 0));
941 }
942
943 static void
epoch_drain_cb(struct epoch_context * ctx)944 epoch_drain_cb(struct epoch_context *ctx)
945 {
946 struct epoch *epoch =
947 __containerof(ctx, struct epoch_record, er_drain_ctx)->er_parent;
948
949 if (atomic_fetchadd_int(&epoch->e_drain_count, -1) == 1) {
950 mtx_lock(&epoch->e_drain_mtx);
951 wakeup(epoch);
952 mtx_unlock(&epoch->e_drain_mtx);
953 }
954 }
955
956 void
epoch_drain_callbacks(epoch_t epoch)957 epoch_drain_callbacks(epoch_t epoch)
958 {
959 epoch_record_t er;
960 struct thread *td;
961 int was_bound;
962 int old_pinned;
963 int old_cpu;
964 int cpu;
965
966 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
967 "epoch_drain_callbacks() may sleep!");
968
969 /* too early in boot to have epoch set up */
970 if (__predict_false(epoch == NULL))
971 return;
972 #if !defined(EARLY_AP_STARTUP)
973 if (__predict_false(inited < 2))
974 return;
975 #endif
976 DROP_GIANT();
977
978 sx_xlock(&epoch->e_drain_sx);
979 mtx_lock(&epoch->e_drain_mtx);
980
981 td = curthread;
982 thread_lock(td);
983 old_cpu = PCPU_GET(cpuid);
984 old_pinned = td->td_pinned;
985 was_bound = sched_is_bound(td);
986 sched_unbind(td);
987 td->td_pinned = 0;
988
989 CPU_FOREACH(cpu)
990 epoch->e_drain_count++;
991 CPU_FOREACH(cpu) {
992 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
993 sched_bind(td, cpu);
994 epoch_call(epoch, &epoch_drain_cb, &er->er_drain_ctx);
995 }
996
997 /* restore CPU binding, if any */
998 if (was_bound != 0) {
999 sched_bind(td, old_cpu);
1000 } else {
1001 /* get thread back to initial CPU, if any */
1002 if (old_pinned != 0)
1003 sched_bind(td, old_cpu);
1004 sched_unbind(td);
1005 }
1006 /* restore pinned after bind */
1007 td->td_pinned = old_pinned;
1008
1009 thread_unlock(td);
1010
1011 while (epoch->e_drain_count != 0)
1012 msleep(epoch, &epoch->e_drain_mtx, PZERO, "EDRAIN", 0);
1013
1014 mtx_unlock(&epoch->e_drain_mtx);
1015 sx_xunlock(&epoch->e_drain_sx);
1016
1017 PICKUP_GIANT();
1018 }
1019