subr_epoch.c (c981cbbd13775bb259623977c23853f3db93c68a) subr_epoch.c (131b2b7658d7e961a245697cb5af55306388fc54)
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2018, Matthew Macy <mmacy@freebsd.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:

--- 29 unchanged lines hidden (view full) ---

38#include <sys/kernel.h>
39#include <sys/limits.h>
40#include <sys/lock.h>
41#include <sys/malloc.h>
42#include <sys/mutex.h>
43#include <sys/pcpu.h>
44#include <sys/proc.h>
45#include <sys/sched.h>
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2018, Matthew Macy <mmacy@freebsd.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:

--- 29 unchanged lines hidden (view full) ---

38#include <sys/kernel.h>
39#include <sys/limits.h>
40#include <sys/lock.h>
41#include <sys/malloc.h>
42#include <sys/mutex.h>
43#include <sys/pcpu.h>
44#include <sys/proc.h>
45#include <sys/sched.h>
46#include <sys/sx.h>
46#include <sys/smp.h>
47#include <sys/sysctl.h>
48#include <sys/turnstile.h>
49#include <vm/vm.h>
50#include <vm/vm_extern.h>
51#include <vm/vm_kern.h>
52#include <vm/uma.h>
53

--- 5 unchanged lines hidden (view full) ---

59#define EPOCH_ALIGN CACHE_LINE_SIZE*2
60#else
61#define EPOCH_ALIGN CACHE_LINE_SIZE
62#endif
63
64TAILQ_HEAD (epoch_tdlist, epoch_tracker);
65typedef struct epoch_record {
66 ck_epoch_record_t er_record;
47#include <sys/smp.h>
48#include <sys/sysctl.h>
49#include <sys/turnstile.h>
50#include <vm/vm.h>
51#include <vm/vm_extern.h>
52#include <vm/vm_kern.h>
53#include <vm/uma.h>
54

--- 5 unchanged lines hidden (view full) ---

60#define EPOCH_ALIGN CACHE_LINE_SIZE*2
61#else
62#define EPOCH_ALIGN CACHE_LINE_SIZE
63#endif
64
65TAILQ_HEAD (epoch_tdlist, epoch_tracker);
66typedef struct epoch_record {
67 ck_epoch_record_t er_record;
68 struct epoch_context er_drain_ctx;
69 struct epoch *er_parent;
67 volatile struct epoch_tdlist er_tdlist;
68 volatile uint32_t er_gen;
69 uint32_t er_cpuid;
70} __aligned(EPOCH_ALIGN) *epoch_record_t;
71
72struct epoch {
73 struct ck_epoch e_epoch __aligned(EPOCH_ALIGN);
74 epoch_record_t e_pcpu_record;
75 int e_idx;
76 int e_flags;
70 volatile struct epoch_tdlist er_tdlist;
71 volatile uint32_t er_gen;
72 uint32_t er_cpuid;
73} __aligned(EPOCH_ALIGN) *epoch_record_t;
74
75struct epoch {
76 struct ck_epoch e_epoch __aligned(EPOCH_ALIGN);
77 epoch_record_t e_pcpu_record;
78 int e_idx;
79 int e_flags;
80 struct sx e_drain_sx;
81 struct mtx e_drain_mtx;
82 volatile int e_drain_count;
77};
78
79/* arbitrary --- needs benchmarking */
80#define MAX_ADAPTIVE_SPIN 100
81#define MAX_EPOCHS 64
82
83CTASSERT(sizeof(ck_epoch_entry_t) == sizeof(struct epoch_context));
84SYSCTL_NODE(_kern, OID_AUTO, epoch, CTLFLAG_RW, 0, "epoch information");

--- 88 unchanged lines hidden (view full) ---

173
174 epoch->e_pcpu_record = uma_zalloc_pcpu(pcpu_zone_record, M_WAITOK);
175 CPU_FOREACH(cpu) {
176 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
177 bzero(er, sizeof(*er));
178 ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL);
179 TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist);
180 er->er_cpuid = cpu;
83};
84
85/* arbitrary --- needs benchmarking */
86#define MAX_ADAPTIVE_SPIN 100
87#define MAX_EPOCHS 64
88
89CTASSERT(sizeof(ck_epoch_entry_t) == sizeof(struct epoch_context));
90SYSCTL_NODE(_kern, OID_AUTO, epoch, CTLFLAG_RW, 0, "epoch information");

--- 88 unchanged lines hidden (view full) ---

179
180 epoch->e_pcpu_record = uma_zalloc_pcpu(pcpu_zone_record, M_WAITOK);
181 CPU_FOREACH(cpu) {
182 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
183 bzero(er, sizeof(*er));
184 ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL);
185 TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist);
186 er->er_cpuid = cpu;
187 er->er_parent = epoch;
181 }
182}
183
184static void
185epoch_adjust_prio(struct thread *td, u_char prio)
186{
187
188 thread_lock(td);

--- 9 unchanged lines hidden (view full) ---

198 if (__predict_false(!inited))
199 panic("%s called too early in boot", __func__);
200 epoch = malloc(sizeof(struct epoch), M_EPOCH, M_ZERO | M_WAITOK);
201 ck_epoch_init(&epoch->e_epoch);
202 epoch_ctor(epoch);
203 MPASS(epoch_count < MAX_EPOCHS - 2);
204 epoch->e_flags = flags;
205 epoch->e_idx = epoch_count;
188 }
189}
190
191static void
192epoch_adjust_prio(struct thread *td, u_char prio)
193{
194
195 thread_lock(td);

--- 9 unchanged lines hidden (view full) ---

205 if (__predict_false(!inited))
206 panic("%s called too early in boot", __func__);
207 epoch = malloc(sizeof(struct epoch), M_EPOCH, M_ZERO | M_WAITOK);
208 ck_epoch_init(&epoch->e_epoch);
209 epoch_ctor(epoch);
210 MPASS(epoch_count < MAX_EPOCHS - 2);
211 epoch->e_flags = flags;
212 epoch->e_idx = epoch_count;
213 sx_init(&epoch->e_drain_sx, "epoch-drain-sx");
214 mtx_init(&epoch->e_drain_mtx, "epoch-drain-mtx", NULL, MTX_DEF);
206 allepochs[epoch_count++] = epoch;
207 return (epoch);
208}
209
210void
211epoch_free(epoch_t epoch)
212{
215 allepochs[epoch_count++] = epoch;
216 return (epoch);
217}
218
219void
220epoch_free(epoch_t epoch)
221{
213#ifdef INVARIANTS
214 struct epoch_record *er;
215 int cpu;
216
222
217 CPU_FOREACH(cpu) {
218 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
219 MPASS(TAILQ_EMPTY(&er->er_tdlist));
220 }
221#endif
223 epoch_drain_callbacks(epoch);
222 allepochs[epoch->e_idx] = NULL;
223 epoch_wait(global_epoch);
224 uma_zfree_pcpu(pcpu_zone_record, epoch->e_pcpu_record);
224 allepochs[epoch->e_idx] = NULL;
225 epoch_wait(global_epoch);
226 uma_zfree_pcpu(pcpu_zone_record, epoch->e_pcpu_record);
227 mtx_destroy(&epoch->e_drain_mtx);
228 sx_destroy(&epoch->e_drain_sx);
225 free(epoch, M_EPOCH);
226}
227
228static epoch_record_t
229epoch_currecord(epoch_t epoch)
230{
231
232 return (zpcpu_get_cpu(epoch->e_pcpu_record, curcpu));

--- 419 unchanged lines hidden (view full) ---

652}
653
654int
655in_epoch(epoch_t epoch)
656{
657 return (in_epoch_verbose(epoch, 0));
658}
659
229 free(epoch, M_EPOCH);
230}
231
232static epoch_record_t
233epoch_currecord(epoch_t epoch)
234{
235
236 return (zpcpu_get_cpu(epoch->e_pcpu_record, curcpu));

--- 419 unchanged lines hidden (view full) ---

656}
657
658int
659in_epoch(epoch_t epoch)
660{
661 return (in_epoch_verbose(epoch, 0));
662}
663
664static void
665epoch_drain_cb(struct epoch_context *ctx)
666{
667 struct epoch *epoch =
668 __containerof(ctx, struct epoch_record, er_drain_ctx)->er_parent;
669
670 if (atomic_fetchadd_int(&epoch->e_drain_count, -1) == 1) {
671 mtx_lock(&epoch->e_drain_mtx);
672 wakeup(epoch);
673 mtx_unlock(&epoch->e_drain_mtx);
674 }
675}
676
660void
677void
678epoch_drain_callbacks(epoch_t epoch)
679{
680 epoch_record_t er;
681 struct thread *td;
682 int was_bound;
683 int old_pinned;
684 int old_cpu;
685 int cpu;
686
687 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
688 "epoch_drain_callbacks() may sleep!");
689
690 /* too early in boot to have epoch set up */
691 if (__predict_false(epoch == NULL))
692 return;
693#if !defined(EARLY_AP_STARTUP)
694 if (__predict_false(inited < 2))
695 return;
696#endif
697 DROP_GIANT();
698
699 sx_xlock(&epoch->e_drain_sx);
700 mtx_lock(&epoch->e_drain_mtx);
701
702 td = curthread;
703 thread_lock(td);
704 old_cpu = PCPU_GET(cpuid);
705 old_pinned = td->td_pinned;
706 was_bound = sched_is_bound(td);
707 sched_unbind(td);
708 td->td_pinned = 0;
709
710 CPU_FOREACH(cpu)
711 epoch->e_drain_count++;
712 CPU_FOREACH(cpu) {
713 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
714 sched_bind(td, cpu);
715 epoch_call(epoch, &er->er_drain_ctx, &epoch_drain_cb);
716 }
717
718 /* restore CPU binding, if any */
719 if (was_bound != 0) {
720 sched_bind(td, old_cpu);
721 } else {
722 /* get thread back to initial CPU, if any */
723 if (old_pinned != 0)
724 sched_bind(td, old_cpu);
725 sched_unbind(td);
726 }
727 /* restore pinned after bind */
728 td->td_pinned = old_pinned;
729
730 thread_unlock(td);
731
732 while (epoch->e_drain_count != 0)
733 msleep(epoch, &epoch->e_drain_mtx, PZERO, "EDRAIN", 0);
734
735 mtx_unlock(&epoch->e_drain_mtx);
736 sx_xunlock(&epoch->e_drain_sx);
737
738 PICKUP_GIANT();
739}
740
741void
661epoch_thread_init(struct thread *td)
662{
663
664 td->td_et = malloc(sizeof(struct epoch_tracker), M_EPOCH, M_WAITOK);
665}
666
667void
668epoch_thread_fini(struct thread *td)
669{
670
671 free(td->td_et, M_EPOCH);
672}
742epoch_thread_init(struct thread *td)
743{
744
745 td->td_et = malloc(sizeof(struct epoch_tracker), M_EPOCH, M_WAITOK);
746}
747
748void
749epoch_thread_fini(struct thread *td)
750{
751
752 free(td->td_et, M_EPOCH);
753}