xref: /freebsd/sys/kern/subr_smr.c (revision fdafd315ad0d0f28a11b9fb4476a9ab059c62b92)
1d4665eaaSJeff Roberson /*-
2*4d846d26SWarner Losh  * SPDX-License-Identifier: BSD-2-Clause
3d4665eaaSJeff Roberson  *
4da6e9935SJeff Roberson  * Copyright (c) 2019,2020 Jeffrey Roberson <jeff@FreeBSD.org>
5d4665eaaSJeff Roberson  *
6d4665eaaSJeff Roberson  * Redistribution and use in source and binary forms, with or without
7d4665eaaSJeff Roberson  * modification, are permitted provided that the following conditions
8d4665eaaSJeff Roberson  * are met:
9d4665eaaSJeff Roberson  * 1. Redistributions of source code must retain the above copyright
10d4665eaaSJeff Roberson  *    notice unmodified, this list of conditions, and the following
11d4665eaaSJeff Roberson  *    disclaimer.
12d4665eaaSJeff Roberson  * 2. Redistributions in binary form must reproduce the above copyright
13d4665eaaSJeff Roberson  *    notice, this list of conditions and the following disclaimer in the
14d4665eaaSJeff Roberson  *    documentation and/or other materials provided with the distribution.
15d4665eaaSJeff Roberson  *
16d4665eaaSJeff Roberson  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17d4665eaaSJeff Roberson  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18d4665eaaSJeff Roberson  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19d4665eaaSJeff Roberson  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20d4665eaaSJeff Roberson  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21d4665eaaSJeff Roberson  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22d4665eaaSJeff Roberson  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23d4665eaaSJeff Roberson  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24d4665eaaSJeff Roberson  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25d4665eaaSJeff Roberson  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26d4665eaaSJeff Roberson  */
27d4665eaaSJeff Roberson 
28d4665eaaSJeff Roberson #include <sys/param.h>
29d4665eaaSJeff Roberson #include <sys/systm.h>
308d7f16a5SJeff Roberson #include <sys/counter.h>
31d4665eaaSJeff Roberson #include <sys/kernel.h>
328d7f16a5SJeff Roberson #include <sys/limits.h>
33d4665eaaSJeff Roberson #include <sys/proc.h>
34d4665eaaSJeff Roberson #include <sys/smp.h>
35d4665eaaSJeff Roberson #include <sys/smr.h>
368d7f16a5SJeff Roberson #include <sys/sysctl.h>
37d4665eaaSJeff Roberson 
38d4665eaaSJeff Roberson #include <vm/uma.h>
39d4665eaaSJeff Roberson 
40d4665eaaSJeff Roberson /*
41226dd6dbSJeff Roberson  * Global Unbounded Sequences (GUS)
42226dd6dbSJeff Roberson  *
43d4665eaaSJeff Roberson  * This is a novel safe memory reclamation technique inspired by
44d4665eaaSJeff Roberson  * epoch based reclamation from Samy Al Bahra's concurrency kit which
45d4665eaaSJeff Roberson  * in turn was based on work described in:
46d4665eaaSJeff Roberson  *   Fraser, K. 2004. Practical Lock-Freedom. PhD Thesis, University
47d4665eaaSJeff Roberson  *   of Cambridge Computing Laboratory.
48d4665eaaSJeff Roberson  * And shares some similarities with:
49d4665eaaSJeff Roberson  *   Wang, Stamler, Parmer. 2016 Parallel Sections: Scaling System-Level
50d4665eaaSJeff Roberson  *   Data-Structures
51d4665eaaSJeff Roberson  *
52d4665eaaSJeff Roberson  * This is not an implementation of hazard pointers or related
53d4665eaaSJeff Roberson  * techniques.  The term safe memory reclamation is used as a
54d4665eaaSJeff Roberson  * generic descriptor for algorithms that defer frees to avoid
55226dd6dbSJeff Roberson  * use-after-free errors with lockless datastructures or as
56226dd6dbSJeff Roberson  * a mechanism to detect quiescence for writer synchronization.
57d4665eaaSJeff Roberson  *
58d4665eaaSJeff Roberson  * The basic approach is to maintain a monotonic write sequence
59d4665eaaSJeff Roberson  * number that is updated on some application defined granularity.
60d4665eaaSJeff Roberson  * Readers record the most recent write sequence number they have
61d4665eaaSJeff Roberson  * observed.  A shared read sequence number records the lowest
62d4665eaaSJeff Roberson  * sequence number observed by any reader as of the last poll.  Any
63d4665eaaSJeff Roberson  * write older than this value has been observed by all readers
64d4665eaaSJeff Roberson  * and memory can be reclaimed.  Like Epoch we also detect idle
65d4665eaaSJeff Roberson  * readers by storing an invalid sequence number in the per-cpu
66d4665eaaSJeff Roberson  * state when the read section exits.  Like Parsec we establish
67d4665eaaSJeff Roberson  * a global write clock that is used to mark memory on free.
68d4665eaaSJeff Roberson  *
69d4665eaaSJeff Roberson  * The write and read sequence numbers can be thought of as a two
70226dd6dbSJeff Roberson  * handed clock with readers always advancing towards writers.  GUS
71d4665eaaSJeff Roberson  * maintains the invariant that all readers can safely access memory
72d4665eaaSJeff Roberson  * that was visible at the time they loaded their copy of the sequence
73d4665eaaSJeff Roberson  * number.  Periodically the read sequence or hand is polled and
74d4665eaaSJeff Roberson  * advanced as far towards the write sequence as active readers allow.
75d4665eaaSJeff Roberson  * Memory which was freed between the old and new global read sequence
76d4665eaaSJeff Roberson  * number can now be reclaimed.  When the system is idle the two hands
77d4665eaaSJeff Roberson  * meet and no deferred memory is outstanding.  Readers never advance
78d4665eaaSJeff Roberson  * any sequence number, they only observe them.  The shared read
79d4665eaaSJeff Roberson  * sequence number is consequently never higher than the write sequence.
80d4665eaaSJeff Roberson  * A stored sequence number that falls outside of this range has expired
81d4665eaaSJeff Roberson  * and needs no scan to reclaim.
82d4665eaaSJeff Roberson  *
83226dd6dbSJeff Roberson  * A notable distinction between GUS and Epoch, qsbr, rcu, etc. is
84d4665eaaSJeff Roberson  * that advancing the sequence number is decoupled from detecting its
85226dd6dbSJeff Roberson  * observation.  That is to say, the delta between read and write
86226dd6dbSJeff Roberson  * sequence numbers is not bound.  This can be thought of as a more
87226dd6dbSJeff Roberson  * generalized form of epoch which requires them at most one step
88226dd6dbSJeff Roberson  * apart.  This results in a more granular assignment of sequence
89d4665eaaSJeff Roberson  * numbers even as read latencies prohibit all or some expiration.
90d4665eaaSJeff Roberson  * It also allows writers to advance the sequence number and save the
91d4665eaaSJeff Roberson  * poll for expiration until a later time when it is likely to
92d4665eaaSJeff Roberson  * complete without waiting.  The batch granularity and free-to-use
93d4665eaaSJeff Roberson  * latency is dynamic and can be significantly smaller than in more
94d4665eaaSJeff Roberson  * strict systems.
95d4665eaaSJeff Roberson  *
96d4665eaaSJeff Roberson  * This mechanism is primarily intended to be used in coordination with
97d4665eaaSJeff Roberson  * UMA.  By integrating with the allocator we avoid all of the callout
98d4665eaaSJeff Roberson  * queue machinery and are provided with an efficient way to batch
99d4665eaaSJeff Roberson  * sequence advancement and waiting.  The allocator accumulates a full
100d4665eaaSJeff Roberson  * per-cpu cache of memory before advancing the sequence.  It then
101d4665eaaSJeff Roberson  * delays waiting for this sequence to expire until the memory is
102d4665eaaSJeff Roberson  * selected for reuse.  In this way we only increment the sequence
103d4665eaaSJeff Roberson  * value once for n=cache-size frees and the waits are done long
104d4665eaaSJeff Roberson  * after the sequence has been expired so they need only be verified
105d4665eaaSJeff Roberson  * to account for pathological conditions and to advance the read
106d4665eaaSJeff Roberson  * sequence.  Tying the sequence number to the bucket size has the
107d4665eaaSJeff Roberson  * nice property that as the zone gets busier the buckets get larger
108d4665eaaSJeff Roberson  * and the sequence writes become fewer.  If the coherency of advancing
109d4665eaaSJeff Roberson  * the write sequence number becomes too costly we can advance
110d4665eaaSJeff Roberson  * it for every N buckets in exchange for higher free-to-use
111d4665eaaSJeff Roberson  * latency and consequently higher memory consumption.
112d4665eaaSJeff Roberson  *
113d4665eaaSJeff Roberson  * If the read overhead of accessing the shared cacheline becomes
114d4665eaaSJeff Roberson  * especially burdensome an invariant TSC could be used in place of the
115d4665eaaSJeff Roberson  * sequence.  The algorithm would then only need to maintain the minimum
116d4665eaaSJeff Roberson  * observed tsc.  This would trade potential cache synchronization
117d4665eaaSJeff Roberson  * overhead for local serialization and cpu timestamp overhead.
118d4665eaaSJeff Roberson  */
119d4665eaaSJeff Roberson 
120d4665eaaSJeff Roberson /*
121d4665eaaSJeff Roberson  * A simplified diagram:
122d4665eaaSJeff Roberson  *
123d4665eaaSJeff Roberson  * 0                                                          UINT_MAX
124d4665eaaSJeff Roberson  * | -------------------- sequence number space -------------------- |
125d4665eaaSJeff Roberson  *              ^ rd seq                            ^ wr seq
126d4665eaaSJeff Roberson  *              | ----- valid sequence numbers ---- |
127d4665eaaSJeff Roberson  *                ^cpuA  ^cpuC
128d4665eaaSJeff Roberson  * | -- free -- | --------- deferred frees -------- | ---- free ---- |
129d4665eaaSJeff Roberson  *
130d4665eaaSJeff Roberson  *
131d4665eaaSJeff Roberson  * In this example cpuA has the lowest sequence number and poll can
132d4665eaaSJeff Roberson  * advance rd seq.  cpuB is not running and is considered to observe
133d4665eaaSJeff Roberson  * wr seq.
134d4665eaaSJeff Roberson  *
135d4665eaaSJeff Roberson  * Freed memory that is tagged with a sequence number between rd seq and
136d4665eaaSJeff Roberson  * wr seq can not be safely reclaimed because cpuA may hold a reference to
137d4665eaaSJeff Roberson  * it.  Any other memory is guaranteed to be unreferenced.
138d4665eaaSJeff Roberson  *
139d4665eaaSJeff Roberson  * Any writer is free to advance wr seq at any time however it may busy
140d4665eaaSJeff Roberson  * poll in pathological cases.
141d4665eaaSJeff Roberson  */
142d4665eaaSJeff Roberson 
143d4665eaaSJeff Roberson static uma_zone_t smr_shared_zone;
144d4665eaaSJeff Roberson static uma_zone_t smr_zone;
145d4665eaaSJeff Roberson 
146d4665eaaSJeff Roberson #ifndef INVARIANTS
147d4665eaaSJeff Roberson #define	SMR_SEQ_INIT	1		/* All valid sequence numbers are odd. */
148d4665eaaSJeff Roberson #define	SMR_SEQ_INCR	2
149d4665eaaSJeff Roberson 
150d4665eaaSJeff Roberson /*
151d4665eaaSJeff Roberson  * SMR_SEQ_MAX_DELTA is the maximum distance allowed between rd_seq and
152d4665eaaSJeff Roberson  * wr_seq.  For the modular arithmetic to work a value of UNIT_MAX / 2
153d4665eaaSJeff Roberson  * would be possible but it is checked after we increment the wr_seq so
154d4665eaaSJeff Roberson  * a safety margin is left to prevent overflow.
155d4665eaaSJeff Roberson  *
156d4665eaaSJeff Roberson  * We will block until SMR_SEQ_MAX_ADVANCE sequence numbers have progressed
157d4665eaaSJeff Roberson  * to prevent integer wrapping.  See smr_advance() for more details.
158d4665eaaSJeff Roberson  */
159d4665eaaSJeff Roberson #define	SMR_SEQ_MAX_DELTA	(UINT_MAX / 4)
160d4665eaaSJeff Roberson #define	SMR_SEQ_MAX_ADVANCE	(SMR_SEQ_MAX_DELTA - 1024)
161d4665eaaSJeff Roberson #else
162d4665eaaSJeff Roberson /* We want to test the wrapping feature in invariants kernels. */
163d4665eaaSJeff Roberson #define	SMR_SEQ_INCR	(UINT_MAX / 10000)
164d4665eaaSJeff Roberson #define	SMR_SEQ_INIT	(UINT_MAX - 100000)
165d4665eaaSJeff Roberson /* Force extra polls to test the integer overflow detection. */
166a40068e5SJeff Roberson #define	SMR_SEQ_MAX_DELTA	(SMR_SEQ_INCR * 32)
167d4665eaaSJeff Roberson #define	SMR_SEQ_MAX_ADVANCE	SMR_SEQ_MAX_DELTA / 2
168d4665eaaSJeff Roberson #endif
169d4665eaaSJeff Roberson 
170226dd6dbSJeff Roberson /*
171226dd6dbSJeff Roberson  * The grace period for lazy (tick based) SMR.
172226dd6dbSJeff Roberson  *
173226dd6dbSJeff Roberson  * Hardclock is responsible for advancing ticks on a single CPU while every
174226dd6dbSJeff Roberson  * CPU receives a regular clock interrupt.  The clock interrupts are flushing
175226dd6dbSJeff Roberson  * the store buffers and any speculative loads that may violate our invariants.
176226dd6dbSJeff Roberson  * Because these interrupts are not synchronized we must wait one additional
177226dd6dbSJeff Roberson  * tick in the future to be certain that all processors have had their state
178226dd6dbSJeff Roberson  * synchronized by an interrupt.
179226dd6dbSJeff Roberson  *
180226dd6dbSJeff Roberson  * This assumes that the clock interrupt will only be delayed by other causes
181226dd6dbSJeff Roberson  * that will flush the store buffer or prevent access to the section protected
182226dd6dbSJeff Roberson  * data.  For example, an idle processor, or an system management interrupt,
183226dd6dbSJeff Roberson  * or a vm exit.
184226dd6dbSJeff Roberson  */
185226dd6dbSJeff Roberson #define	SMR_LAZY_GRACE		2
186561af25fSJeff Roberson #define	SMR_LAZY_INCR		(SMR_LAZY_GRACE * SMR_SEQ_INCR)
1878d7f16a5SJeff Roberson 
188d4665eaaSJeff Roberson /*
189226dd6dbSJeff Roberson  * The maximum sequence number ahead of wr_seq that may still be valid.  The
190226dd6dbSJeff Roberson  * sequence may not be advanced on write for lazy or deferred SMRs.  In this
191226dd6dbSJeff Roberson  * case poll needs to attempt to forward the sequence number if the goal is
192226dd6dbSJeff Roberson  * within wr_seq + SMR_SEQ_ADVANCE.
193226dd6dbSJeff Roberson  */
194561af25fSJeff Roberson #define	SMR_SEQ_ADVANCE		SMR_LAZY_INCR
195226dd6dbSJeff Roberson 
1967029da5cSPawel Biernacki static SYSCTL_NODE(_debug, OID_AUTO, smr, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL,
1977029da5cSPawel Biernacki     "SMR Stats");
198d869a17eSMark Johnston static COUNTER_U64_DEFINE_EARLY(advance);
199226dd6dbSJeff Roberson SYSCTL_COUNTER_U64(_debug_smr, OID_AUTO, advance, CTLFLAG_RW, &advance, "");
200d869a17eSMark Johnston static COUNTER_U64_DEFINE_EARLY(advance_wait);
201226dd6dbSJeff Roberson SYSCTL_COUNTER_U64(_debug_smr, OID_AUTO, advance_wait, CTLFLAG_RW, &advance_wait, "");
202d869a17eSMark Johnston static COUNTER_U64_DEFINE_EARLY(poll);
203226dd6dbSJeff Roberson SYSCTL_COUNTER_U64(_debug_smr, OID_AUTO, poll, CTLFLAG_RW, &poll, "");
204d869a17eSMark Johnston static COUNTER_U64_DEFINE_EARLY(poll_scan);
205226dd6dbSJeff Roberson SYSCTL_COUNTER_U64(_debug_smr, OID_AUTO, poll_scan, CTLFLAG_RW, &poll_scan, "");
206d869a17eSMark Johnston static COUNTER_U64_DEFINE_EARLY(poll_fail);
207226dd6dbSJeff Roberson SYSCTL_COUNTER_U64(_debug_smr, OID_AUTO, poll_fail, CTLFLAG_RW, &poll_fail, "");
208226dd6dbSJeff Roberson 
209226dd6dbSJeff Roberson /*
210226dd6dbSJeff Roberson  * Advance a lazy write sequence number.  These move forward at the rate of
211561af25fSJeff Roberson  * ticks.  Grace is SMR_LAZY_INCR (2 ticks) in the future.
212226dd6dbSJeff Roberson  *
213561af25fSJeff Roberson  * This returns the goal write sequence number.
214226dd6dbSJeff Roberson  */
215226dd6dbSJeff Roberson static smr_seq_t
smr_lazy_advance(smr_t smr,smr_shared_t s)216226dd6dbSJeff Roberson smr_lazy_advance(smr_t smr, smr_shared_t s)
217226dd6dbSJeff Roberson {
218561af25fSJeff Roberson 	union s_wr s_wr, old;
219561af25fSJeff Roberson 	int t, d;
220226dd6dbSJeff Roberson 
221226dd6dbSJeff Roberson 	CRITICAL_ASSERT(curthread);
222226dd6dbSJeff Roberson 
223226dd6dbSJeff Roberson 	/*
224561af25fSJeff Roberson 	 * Load the stored ticks value before the current one.  This way the
225561af25fSJeff Roberson 	 * current value can only be the same or larger.
226226dd6dbSJeff Roberson 	 */
227561af25fSJeff Roberson 	old._pair = s_wr._pair = atomic_load_acq_64(&s->s_wr._pair);
228226dd6dbSJeff Roberson 	t = ticks;
229226dd6dbSJeff Roberson 
230226dd6dbSJeff Roberson 	/*
231226dd6dbSJeff Roberson 	 * The most probable condition that the update already took place.
232226dd6dbSJeff Roberson 	 */
233561af25fSJeff Roberson 	d = t - s_wr.ticks;
234561af25fSJeff Roberson 	if (__predict_true(d == 0))
235226dd6dbSJeff Roberson 		goto out;
236561af25fSJeff Roberson 	/* Cap the rate of advancement and handle long idle periods. */
237561af25fSJeff Roberson 	if (d > SMR_LAZY_GRACE || d < 0)
238561af25fSJeff Roberson 		d = SMR_LAZY_GRACE;
239561af25fSJeff Roberson 	s_wr.ticks = t;
240561af25fSJeff Roberson 	s_wr.seq += d * SMR_SEQ_INCR;
241226dd6dbSJeff Roberson 
242226dd6dbSJeff Roberson 	/*
243561af25fSJeff Roberson 	 * This can only fail if another thread races to call advance().
244561af25fSJeff Roberson 	 * Strong cmpset semantics mean we are guaranteed that the update
245561af25fSJeff Roberson 	 * happened.
246226dd6dbSJeff Roberson 	 */
247561af25fSJeff Roberson 	atomic_cmpset_64(&s->s_wr._pair, old._pair, s_wr._pair);
248226dd6dbSJeff Roberson out:
249561af25fSJeff Roberson 	return (s_wr.seq + SMR_LAZY_INCR);
250226dd6dbSJeff Roberson }
251226dd6dbSJeff Roberson 
252226dd6dbSJeff Roberson /*
253226dd6dbSJeff Roberson  * Increment the shared write sequence by 2.  Since it is initialized
254226dd6dbSJeff Roberson  * to 1 this means the only valid values are odd and an observed value
255226dd6dbSJeff Roberson  * of 0 in a particular CPU means it is not currently in a read section.
256226dd6dbSJeff Roberson  */
257226dd6dbSJeff Roberson static smr_seq_t
smr_shared_advance(smr_shared_t s)258226dd6dbSJeff Roberson smr_shared_advance(smr_shared_t s)
259226dd6dbSJeff Roberson {
260226dd6dbSJeff Roberson 
261561af25fSJeff Roberson 	return (atomic_fetchadd_int(&s->s_wr.seq, SMR_SEQ_INCR) + SMR_SEQ_INCR);
262226dd6dbSJeff Roberson }
263226dd6dbSJeff Roberson 
264226dd6dbSJeff Roberson /*
265226dd6dbSJeff Roberson  * Advance the write sequence number for a normal smr section.  If the
266226dd6dbSJeff Roberson  * write sequence is too far behind the read sequence we have to poll
267226dd6dbSJeff Roberson  * to advance rd_seq and prevent undetectable wraps.
268226dd6dbSJeff Roberson  */
269226dd6dbSJeff Roberson static smr_seq_t
smr_default_advance(smr_t smr,smr_shared_t s)270226dd6dbSJeff Roberson smr_default_advance(smr_t smr, smr_shared_t s)
271226dd6dbSJeff Roberson {
272226dd6dbSJeff Roberson 	smr_seq_t goal, s_rd_seq;
273226dd6dbSJeff Roberson 
274226dd6dbSJeff Roberson 	CRITICAL_ASSERT(curthread);
275226dd6dbSJeff Roberson 	KASSERT((zpcpu_get(smr)->c_flags & SMR_LAZY) == 0,
276226dd6dbSJeff Roberson 	    ("smr_default_advance: called with lazy smr."));
277226dd6dbSJeff Roberson 
278226dd6dbSJeff Roberson 	/*
279226dd6dbSJeff Roberson 	 * Load the current read seq before incrementing the goal so
280226dd6dbSJeff Roberson 	 * we are guaranteed it is always < goal.
281226dd6dbSJeff Roberson 	 */
282226dd6dbSJeff Roberson 	s_rd_seq = atomic_load_acq_int(&s->s_rd_seq);
283226dd6dbSJeff Roberson 	goal = smr_shared_advance(s);
284226dd6dbSJeff Roberson 
285226dd6dbSJeff Roberson 	/*
286226dd6dbSJeff Roberson 	 * Force a synchronization here if the goal is getting too
287226dd6dbSJeff Roberson 	 * far ahead of the read sequence number.  This keeps the
288226dd6dbSJeff Roberson 	 * wrap detecting arithmetic working in pathological cases.
289226dd6dbSJeff Roberson 	 */
290226dd6dbSJeff Roberson 	if (SMR_SEQ_DELTA(goal, s_rd_seq) >= SMR_SEQ_MAX_DELTA) {
291226dd6dbSJeff Roberson 		counter_u64_add(advance_wait, 1);
292226dd6dbSJeff Roberson 		smr_wait(smr, goal - SMR_SEQ_MAX_ADVANCE);
293226dd6dbSJeff Roberson 	}
294226dd6dbSJeff Roberson 	counter_u64_add(advance, 1);
295226dd6dbSJeff Roberson 
296226dd6dbSJeff Roberson 	return (goal);
297226dd6dbSJeff Roberson }
298226dd6dbSJeff Roberson 
299226dd6dbSJeff Roberson /*
300226dd6dbSJeff Roberson  * Deferred SMRs conditionally update s_wr_seq based on an
301226dd6dbSJeff Roberson  * cpu local interval count.
302226dd6dbSJeff Roberson  */
303226dd6dbSJeff Roberson static smr_seq_t
smr_deferred_advance(smr_t smr,smr_shared_t s,smr_t self)304226dd6dbSJeff Roberson smr_deferred_advance(smr_t smr, smr_shared_t s, smr_t self)
305226dd6dbSJeff Roberson {
306226dd6dbSJeff Roberson 
307226dd6dbSJeff Roberson 	if (++self->c_deferred < self->c_limit)
308226dd6dbSJeff Roberson 		return (smr_shared_current(s) + SMR_SEQ_INCR);
309226dd6dbSJeff Roberson 	self->c_deferred = 0;
310226dd6dbSJeff Roberson 	return (smr_default_advance(smr, s));
311226dd6dbSJeff Roberson }
312226dd6dbSJeff Roberson 
313226dd6dbSJeff Roberson /*
314226dd6dbSJeff Roberson  * Advance the write sequence and return the value for use as the
315d4665eaaSJeff Roberson  * wait goal.  This guarantees that any changes made by the calling
316d4665eaaSJeff Roberson  * thread prior to this call will be visible to all threads after
317d4665eaaSJeff Roberson  * rd_seq meets or exceeds the return value.
318d4665eaaSJeff Roberson  *
319d4665eaaSJeff Roberson  * This function may busy loop if the readers are roughly 1 billion
320d4665eaaSJeff Roberson  * sequence numbers behind the writers.
321226dd6dbSJeff Roberson  *
322561af25fSJeff Roberson  * Lazy SMRs will not busy loop and the wrap happens every 25 days
323561af25fSJeff Roberson  * at 1khz and 60 hours at 10khz.  Readers can block for no longer
324226dd6dbSJeff Roberson  * than half of this for SMR_SEQ_ macros to continue working.
325d4665eaaSJeff Roberson  */
326d4665eaaSJeff Roberson smr_seq_t
smr_advance(smr_t smr)327d4665eaaSJeff Roberson smr_advance(smr_t smr)
328d4665eaaSJeff Roberson {
329226dd6dbSJeff Roberson 	smr_t self;
330d4665eaaSJeff Roberson 	smr_shared_t s;
331226dd6dbSJeff Roberson 	smr_seq_t goal;
332226dd6dbSJeff Roberson 	int flags;
333d4665eaaSJeff Roberson 
334d4665eaaSJeff Roberson 	/*
335d4665eaaSJeff Roberson 	 * It is illegal to enter while in an smr section.
336d4665eaaSJeff Roberson 	 */
337a4d50e49SJeff Roberson 	SMR_ASSERT_NOT_ENTERED(smr);
338d4665eaaSJeff Roberson 
339d4665eaaSJeff Roberson 	/*
340d4665eaaSJeff Roberson 	 * Modifications not done in a smr section need to be visible
341d4665eaaSJeff Roberson 	 * before advancing the seq.
342d4665eaaSJeff Roberson 	 */
343d4665eaaSJeff Roberson 	atomic_thread_fence_rel();
344d4665eaaSJeff Roberson 
345bc650984SJeff Roberson 	critical_enter();
346226dd6dbSJeff Roberson 	/* Try to touch the line once. */
347226dd6dbSJeff Roberson 	self = zpcpu_get(smr);
348226dd6dbSJeff Roberson 	s = self->c_shared;
349226dd6dbSJeff Roberson 	flags = self->c_flags;
350bc650984SJeff Roberson 	goal = SMR_SEQ_INVALID;
351226dd6dbSJeff Roberson 	if ((flags & (SMR_LAZY | SMR_DEFERRED)) == 0)
352226dd6dbSJeff Roberson 		goal = smr_default_advance(smr, s);
353226dd6dbSJeff Roberson 	else if ((flags & SMR_LAZY) != 0)
354226dd6dbSJeff Roberson 		goal = smr_lazy_advance(smr, s);
355226dd6dbSJeff Roberson 	else if ((flags & SMR_DEFERRED) != 0)
356226dd6dbSJeff Roberson 		goal = smr_deferred_advance(smr, s, self);
357bc650984SJeff Roberson 	critical_exit();
358226dd6dbSJeff Roberson 
359bc650984SJeff Roberson 	return (goal);
360bc650984SJeff Roberson }
361bc650984SJeff Roberson 
362d4665eaaSJeff Roberson /*
363226dd6dbSJeff Roberson  * Poll to determine the currently observed sequence number on a cpu
364226dd6dbSJeff Roberson  * and spinwait if the 'wait' argument is true.
365d4665eaaSJeff Roberson  */
366226dd6dbSJeff Roberson static smr_seq_t
smr_poll_cpu(smr_t c,smr_seq_t s_rd_seq,smr_seq_t goal,bool wait)367226dd6dbSJeff Roberson smr_poll_cpu(smr_t c, smr_seq_t s_rd_seq, smr_seq_t goal, bool wait)
368d4665eaaSJeff Roberson {
369226dd6dbSJeff Roberson 	smr_seq_t c_seq;
370d4665eaaSJeff Roberson 
371d4665eaaSJeff Roberson 	c_seq = SMR_SEQ_INVALID;
372d4665eaaSJeff Roberson 	for (;;) {
373d4665eaaSJeff Roberson 		c_seq = atomic_load_int(&c->c_seq);
374d4665eaaSJeff Roberson 		if (c_seq == SMR_SEQ_INVALID)
375d4665eaaSJeff Roberson 			break;
376d4665eaaSJeff Roberson 
377d4665eaaSJeff Roberson 		/*
378d4665eaaSJeff Roberson 		 * There is a race described in smr.h:smr_enter that
379d4665eaaSJeff Roberson 		 * can lead to a stale seq value but not stale data
380d4665eaaSJeff Roberson 		 * access.  If we find a value out of range here we
381d4665eaaSJeff Roberson 		 * pin it to the current min to prevent it from
382d4665eaaSJeff Roberson 		 * advancing until that stale section has expired.
383d4665eaaSJeff Roberson 		 *
384d4665eaaSJeff Roberson 		 * The race is created when a cpu loads the s_wr_seq
385d4665eaaSJeff Roberson 		 * value in a local register and then another thread
386d4665eaaSJeff Roberson 		 * advances s_wr_seq and calls smr_poll() which will
387d4665eaaSJeff Roberson 		 * oberve no value yet in c_seq and advance s_rd_seq
388d4665eaaSJeff Roberson 		 * up to s_wr_seq which is beyond the register
389d4665eaaSJeff Roberson 		 * cached value.  This is only likely to happen on
390d4665eaaSJeff Roberson 		 * hypervisor or with a system management interrupt.
391d4665eaaSJeff Roberson 		 */
392d4665eaaSJeff Roberson 		if (SMR_SEQ_LT(c_seq, s_rd_seq))
393d4665eaaSJeff Roberson 			c_seq = s_rd_seq;
394d4665eaaSJeff Roberson 
395d4665eaaSJeff Roberson 		/*
396226dd6dbSJeff Roberson 		 * If the sequence number meets the goal we are done
397226dd6dbSJeff Roberson 		 * with this cpu.
398d4665eaaSJeff Roberson 		 */
399226dd6dbSJeff Roberson 		if (SMR_SEQ_LEQ(goal, c_seq))
400d4665eaaSJeff Roberson 			break;
401d4665eaaSJeff Roberson 
402226dd6dbSJeff Roberson 		if (!wait)
403d4665eaaSJeff Roberson 			break;
404d4665eaaSJeff Roberson 		cpu_spinwait();
405d4665eaaSJeff Roberson 	}
406d4665eaaSJeff Roberson 
407226dd6dbSJeff Roberson 	return (c_seq);
408226dd6dbSJeff Roberson }
409226dd6dbSJeff Roberson 
410226dd6dbSJeff Roberson /*
411226dd6dbSJeff Roberson  * Loop until all cores have observed the goal sequence or have
412226dd6dbSJeff Roberson  * gone inactive.  Returns the oldest sequence currently active;
413226dd6dbSJeff Roberson  *
414226dd6dbSJeff Roberson  * This function assumes a snapshot of sequence values has
415226dd6dbSJeff Roberson  * been obtained and validated by smr_poll().
416226dd6dbSJeff Roberson  */
417226dd6dbSJeff Roberson static smr_seq_t
smr_poll_scan(smr_t smr,smr_shared_t s,smr_seq_t s_rd_seq,smr_seq_t s_wr_seq,smr_seq_t goal,bool wait)418226dd6dbSJeff Roberson smr_poll_scan(smr_t smr, smr_shared_t s, smr_seq_t s_rd_seq,
419226dd6dbSJeff Roberson     smr_seq_t s_wr_seq, smr_seq_t goal, bool wait)
420226dd6dbSJeff Roberson {
421226dd6dbSJeff Roberson 	smr_seq_t rd_seq, c_seq;
422226dd6dbSJeff Roberson 	int i;
423226dd6dbSJeff Roberson 
424226dd6dbSJeff Roberson 	CRITICAL_ASSERT(curthread);
425226dd6dbSJeff Roberson 	counter_u64_add_protected(poll_scan, 1);
426226dd6dbSJeff Roberson 
427226dd6dbSJeff Roberson 	/*
428226dd6dbSJeff Roberson 	 * The read sequence can be no larger than the write sequence at
429226dd6dbSJeff Roberson 	 * the start of the poll.
430226dd6dbSJeff Roberson 	 */
431226dd6dbSJeff Roberson 	rd_seq = s_wr_seq;
432226dd6dbSJeff Roberson 	CPU_FOREACH(i) {
433226dd6dbSJeff Roberson 		/*
434226dd6dbSJeff Roberson 		 * Query the active sequence on this cpu.  If we're not
435226dd6dbSJeff Roberson 		 * waiting and we don't meet the goal we will still scan
436226dd6dbSJeff Roberson 		 * the rest of the cpus to update s_rd_seq before returning
437226dd6dbSJeff Roberson 		 * failure.
438226dd6dbSJeff Roberson 		 */
439226dd6dbSJeff Roberson 		c_seq = smr_poll_cpu(zpcpu_get_cpu(smr, i), s_rd_seq, goal,
440226dd6dbSJeff Roberson 		    wait);
441226dd6dbSJeff Roberson 
442d4665eaaSJeff Roberson 		/*
443d4665eaaSJeff Roberson 		 * Limit the minimum observed rd_seq whether we met the goal
444d4665eaaSJeff Roberson 		 * or not.
445d4665eaaSJeff Roberson 		 */
446226dd6dbSJeff Roberson 		if (c_seq != SMR_SEQ_INVALID)
447226dd6dbSJeff Roberson 			rd_seq = SMR_SEQ_MIN(rd_seq, c_seq);
448d4665eaaSJeff Roberson 	}
449d4665eaaSJeff Roberson 
450d4665eaaSJeff Roberson 	/*
451226dd6dbSJeff Roberson 	 * Advance the rd_seq as long as we observed a more recent value.
452d4665eaaSJeff Roberson 	 */
453d4665eaaSJeff Roberson 	s_rd_seq = atomic_load_int(&s->s_rd_seq);
454561af25fSJeff Roberson 	if (SMR_SEQ_GT(rd_seq, s_rd_seq)) {
455226dd6dbSJeff Roberson 		atomic_cmpset_int(&s->s_rd_seq, s_rd_seq, rd_seq);
456226dd6dbSJeff Roberson 		s_rd_seq = rd_seq;
457226dd6dbSJeff Roberson 	}
458d4665eaaSJeff Roberson 
459226dd6dbSJeff Roberson 	return (s_rd_seq);
460226dd6dbSJeff Roberson }
461226dd6dbSJeff Roberson 
462226dd6dbSJeff Roberson /*
463226dd6dbSJeff Roberson  * Poll to determine whether all readers have observed the 'goal' write
464226dd6dbSJeff Roberson  * sequence number.
465226dd6dbSJeff Roberson  *
466226dd6dbSJeff Roberson  * If wait is true this will spin until the goal is met.
467226dd6dbSJeff Roberson  *
468226dd6dbSJeff Roberson  * This routine will updated the minimum observed read sequence number in
469226dd6dbSJeff Roberson  * s_rd_seq if it does a scan.  It may not do a scan if another call has
470226dd6dbSJeff Roberson  * advanced s_rd_seq beyond the callers goal already.
471226dd6dbSJeff Roberson  *
472226dd6dbSJeff Roberson  * Returns true if the goal is met and false if not.
473226dd6dbSJeff Roberson  */
474226dd6dbSJeff Roberson bool
smr_poll(smr_t smr,smr_seq_t goal,bool wait)475226dd6dbSJeff Roberson smr_poll(smr_t smr, smr_seq_t goal, bool wait)
476226dd6dbSJeff Roberson {
477226dd6dbSJeff Roberson 	smr_shared_t s;
478226dd6dbSJeff Roberson 	smr_t self;
479226dd6dbSJeff Roberson 	smr_seq_t s_wr_seq, s_rd_seq;
480226dd6dbSJeff Roberson 	smr_delta_t delta;
481226dd6dbSJeff Roberson 	int flags;
482226dd6dbSJeff Roberson 	bool success;
483226dd6dbSJeff Roberson 
484226dd6dbSJeff Roberson 	/*
485226dd6dbSJeff Roberson 	 * It is illegal to enter while in an smr section.
486226dd6dbSJeff Roberson 	 */
487226dd6dbSJeff Roberson 	KASSERT(!wait || !SMR_ENTERED(smr),
488226dd6dbSJeff Roberson 	    ("smr_poll: Blocking not allowed in a SMR section."));
489226dd6dbSJeff Roberson 	KASSERT(!wait || (zpcpu_get(smr)->c_flags & SMR_LAZY) == 0,
490226dd6dbSJeff Roberson 	    ("smr_poll: Blocking not allowed on lazy smrs."));
491226dd6dbSJeff Roberson 
492226dd6dbSJeff Roberson 	/*
493226dd6dbSJeff Roberson 	 * Use a critical section so that we can avoid ABA races
494226dd6dbSJeff Roberson 	 * caused by long preemption sleeps.
495226dd6dbSJeff Roberson 	 */
496226dd6dbSJeff Roberson 	success = true;
497226dd6dbSJeff Roberson 	critical_enter();
498226dd6dbSJeff Roberson 	/* Attempt to load from self only once. */
499226dd6dbSJeff Roberson 	self = zpcpu_get(smr);
500226dd6dbSJeff Roberson 	s = self->c_shared;
501226dd6dbSJeff Roberson 	flags = self->c_flags;
502226dd6dbSJeff Roberson 	counter_u64_add_protected(poll, 1);
503226dd6dbSJeff Roberson 
504226dd6dbSJeff Roberson 	/*
505226dd6dbSJeff Roberson 	 * Conditionally advance the lazy write clock on any writer
506561af25fSJeff Roberson 	 * activity.
507226dd6dbSJeff Roberson 	 */
508226dd6dbSJeff Roberson 	if ((flags & SMR_LAZY) != 0)
509226dd6dbSJeff Roberson 		smr_lazy_advance(smr, s);
510226dd6dbSJeff Roberson 
511226dd6dbSJeff Roberson 	/*
512226dd6dbSJeff Roberson 	 * Acquire barrier loads s_wr_seq after s_rd_seq so that we can not
513226dd6dbSJeff Roberson 	 * observe an updated read sequence that is larger than write.
514226dd6dbSJeff Roberson 	 */
515226dd6dbSJeff Roberson 	s_rd_seq = atomic_load_acq_int(&s->s_rd_seq);
516226dd6dbSJeff Roberson 
517226dd6dbSJeff Roberson 	/*
518226dd6dbSJeff Roberson 	 * If we have already observed the sequence number we can immediately
519226dd6dbSJeff Roberson 	 * return success.  Most polls should meet this criterion.
520226dd6dbSJeff Roberson 	 */
521226dd6dbSJeff Roberson 	if (SMR_SEQ_LEQ(goal, s_rd_seq))
522226dd6dbSJeff Roberson 		goto out;
523226dd6dbSJeff Roberson 
524226dd6dbSJeff Roberson 	/*
525226dd6dbSJeff Roberson 	 * wr_seq must be loaded prior to any c_seq value so that a
526226dd6dbSJeff Roberson 	 * stale c_seq can only reference time after this wr_seq.
527226dd6dbSJeff Roberson 	 */
528561af25fSJeff Roberson 	s_wr_seq = atomic_load_acq_int(&s->s_wr.seq);
529226dd6dbSJeff Roberson 
530226dd6dbSJeff Roberson 	/*
531226dd6dbSJeff Roberson 	 * This is the distance from s_wr_seq to goal.  Positive values
532226dd6dbSJeff Roberson 	 * are in the future.
533226dd6dbSJeff Roberson 	 */
534226dd6dbSJeff Roberson 	delta = SMR_SEQ_DELTA(goal, s_wr_seq);
535226dd6dbSJeff Roberson 
536226dd6dbSJeff Roberson 	/*
537226dd6dbSJeff Roberson 	 * Detect a stale wr_seq.
538226dd6dbSJeff Roberson 	 *
539226dd6dbSJeff Roberson 	 * This goal may have come from a deferred advance or a lazy
540226dd6dbSJeff Roberson 	 * smr.  If we are not blocking we can not succeed but the
541226dd6dbSJeff Roberson 	 * sequence number is valid.
542226dd6dbSJeff Roberson 	 */
543561af25fSJeff Roberson 	if (delta > 0 && delta <= SMR_SEQ_ADVANCE &&
544226dd6dbSJeff Roberson 	    (flags & (SMR_LAZY | SMR_DEFERRED)) != 0) {
545226dd6dbSJeff Roberson 		if (!wait) {
546226dd6dbSJeff Roberson 			success = false;
547226dd6dbSJeff Roberson 			goto out;
548226dd6dbSJeff Roberson 		}
549226dd6dbSJeff Roberson 		/* LAZY is always !wait. */
550226dd6dbSJeff Roberson 		s_wr_seq = smr_shared_advance(s);
551226dd6dbSJeff Roberson 		delta = 0;
552226dd6dbSJeff Roberson 	}
553226dd6dbSJeff Roberson 
554226dd6dbSJeff Roberson 	/*
555226dd6dbSJeff Roberson 	 * Detect an invalid goal.
556226dd6dbSJeff Roberson 	 *
557226dd6dbSJeff Roberson 	 * The goal must be in the range of s_wr_seq >= goal >= s_rd_seq for
558226dd6dbSJeff Roberson 	 * it to be valid.  If it is not then the caller held on to it and
559226dd6dbSJeff Roberson 	 * the integer wrapped.  If we wrapped back within range the caller
560226dd6dbSJeff Roberson 	 * will harmlessly scan.
561226dd6dbSJeff Roberson 	 */
562226dd6dbSJeff Roberson 	if (delta > 0)
563226dd6dbSJeff Roberson 		goto out;
564226dd6dbSJeff Roberson 
565226dd6dbSJeff Roberson 	/* Determine the lowest visible sequence number. */
566226dd6dbSJeff Roberson 	s_rd_seq = smr_poll_scan(smr, s, s_rd_seq, s_wr_seq, goal, wait);
567226dd6dbSJeff Roberson 	success = SMR_SEQ_LEQ(goal, s_rd_seq);
568d4665eaaSJeff Roberson out:
569226dd6dbSJeff Roberson 	if (!success)
570226dd6dbSJeff Roberson 		counter_u64_add_protected(poll_fail, 1);
571d4665eaaSJeff Roberson 	critical_exit();
572d4665eaaSJeff Roberson 
573915c367eSJeff Roberson 	/*
574915c367eSJeff Roberson 	 * Serialize with smr_advance()/smr_exit().  The caller is now free
575915c367eSJeff Roberson 	 * to modify memory as expected.
576915c367eSJeff Roberson 	 */
577915c367eSJeff Roberson 	atomic_thread_fence_acq();
578915c367eSJeff Roberson 
579cd133525SMark Johnston 	KASSERT(success || !wait, ("%s: blocking poll failed", __func__));
580d4665eaaSJeff Roberson 	return (success);
581d4665eaaSJeff Roberson }
582d4665eaaSJeff Roberson 
583d4665eaaSJeff Roberson smr_t
smr_create(const char * name,int limit,int flags)584226dd6dbSJeff Roberson smr_create(const char *name, int limit, int flags)
585d4665eaaSJeff Roberson {
586d4665eaaSJeff Roberson 	smr_t smr, c;
587d4665eaaSJeff Roberson 	smr_shared_t s;
588d4665eaaSJeff Roberson 	int i;
589d4665eaaSJeff Roberson 
590d4665eaaSJeff Roberson 	s = uma_zalloc(smr_shared_zone, M_WAITOK);
5911f2a6b85SJeff Roberson 	smr = uma_zalloc_pcpu(smr_zone, M_WAITOK);
592d4665eaaSJeff Roberson 
593d4665eaaSJeff Roberson 	s->s_name = name;
594561af25fSJeff Roberson 	s->s_rd_seq = s->s_wr.seq = SMR_SEQ_INIT;
595561af25fSJeff Roberson 	s->s_wr.ticks = ticks;
596d4665eaaSJeff Roberson 
597d4665eaaSJeff Roberson 	/* Initialize all CPUS, not just those running. */
598d4665eaaSJeff Roberson 	for (i = 0; i <= mp_maxid; i++) {
599d4665eaaSJeff Roberson 		c = zpcpu_get_cpu(smr, i);
600d4665eaaSJeff Roberson 		c->c_seq = SMR_SEQ_INVALID;
601d4665eaaSJeff Roberson 		c->c_shared = s;
602226dd6dbSJeff Roberson 		c->c_deferred = 0;
603226dd6dbSJeff Roberson 		c->c_limit = limit;
604226dd6dbSJeff Roberson 		c->c_flags = flags;
605d4665eaaSJeff Roberson 	}
606d4665eaaSJeff Roberson 	atomic_thread_fence_seq_cst();
607d4665eaaSJeff Roberson 
608d4665eaaSJeff Roberson 	return (smr);
609d4665eaaSJeff Roberson }
610d4665eaaSJeff Roberson 
611d4665eaaSJeff Roberson void
smr_destroy(smr_t smr)612d4665eaaSJeff Roberson smr_destroy(smr_t smr)
613d4665eaaSJeff Roberson {
614d4665eaaSJeff Roberson 
615d4665eaaSJeff Roberson 	smr_synchronize(smr);
616d4665eaaSJeff Roberson 	uma_zfree(smr_shared_zone, smr->c_shared);
6171f2a6b85SJeff Roberson 	uma_zfree_pcpu(smr_zone, smr);
618d4665eaaSJeff Roberson }
619d4665eaaSJeff Roberson 
620d4665eaaSJeff Roberson /*
621d4665eaaSJeff Roberson  * Initialize the UMA slab zone.
622d4665eaaSJeff Roberson  */
623d4665eaaSJeff Roberson void
smr_init(void)624d4665eaaSJeff Roberson smr_init(void)
625d4665eaaSJeff Roberson {
626d4665eaaSJeff Roberson 
627d4665eaaSJeff Roberson 	smr_shared_zone = uma_zcreate("SMR SHARED", sizeof(struct smr_shared),
628d4665eaaSJeff Roberson 	    NULL, NULL, NULL, NULL, (CACHE_LINE_SIZE * 2) - 1, 0);
629d4665eaaSJeff Roberson 	smr_zone = uma_zcreate("SMR CPU", sizeof(struct smr),
630d4665eaaSJeff Roberson 	    NULL, NULL, NULL, NULL, (CACHE_LINE_SIZE * 2) - 1, UMA_ZONE_PCPU);
631d4665eaaSJeff Roberson }
632