xref: /freebsd/sys/kern/subr_smr.c (revision fc7510aef78781b0068da1a6ba190a636a54d6e7)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2019,2020 Jeffrey Roberson <jeff@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice unmodified, this list of conditions, and the following
11  *    disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/limits.h>
34 #include <sys/kernel.h>
35 #include <sys/proc.h>
36 #include <sys/smp.h>
37 #include <sys/smr.h>
38 
39 #include <vm/uma.h>
40 
41 /*
42  * This is a novel safe memory reclamation technique inspired by
43  * epoch based reclamation from Samy Al Bahra's concurrency kit which
44  * in turn was based on work described in:
45  *   Fraser, K. 2004. Practical Lock-Freedom. PhD Thesis, University
46  *   of Cambridge Computing Laboratory.
47  * And shares some similarities with:
48  *   Wang, Stamler, Parmer. 2016 Parallel Sections: Scaling System-Level
49  *   Data-Structures
50  *
51  * This is not an implementation of hazard pointers or related
52  * techniques.  The term safe memory reclamation is used as a
53  * generic descriptor for algorithms that defer frees to avoid
54  * use-after-free errors with lockless datastructures.
55  *
56  * The basic approach is to maintain a monotonic write sequence
57  * number that is updated on some application defined granularity.
58  * Readers record the most recent write sequence number they have
59  * observed.  A shared read sequence number records the lowest
60  * sequence number observed by any reader as of the last poll.  Any
61  * write older than this value has been observed by all readers
62  * and memory can be reclaimed.  Like Epoch we also detect idle
63  * readers by storing an invalid sequence number in the per-cpu
64  * state when the read section exits.  Like Parsec we establish
65  * a global write clock that is used to mark memory on free.
66  *
67  * The write and read sequence numbers can be thought of as a two
68  * handed clock with readers always advancing towards writers.  SMR
69  * maintains the invariant that all readers can safely access memory
70  * that was visible at the time they loaded their copy of the sequence
71  * number.  Periodically the read sequence or hand is polled and
72  * advanced as far towards the write sequence as active readers allow.
73  * Memory which was freed between the old and new global read sequence
74  * number can now be reclaimed.  When the system is idle the two hands
75  * meet and no deferred memory is outstanding.  Readers never advance
76  * any sequence number, they only observe them.  The shared read
77  * sequence number is consequently never higher than the write sequence.
78  * A stored sequence number that falls outside of this range has expired
79  * and needs no scan to reclaim.
80  *
81  * A notable distinction between this SMR and Epoch, qsbr, rcu, etc. is
82  * that advancing the sequence number is decoupled from detecting its
83  * observation.  This results in a more granular assignment of sequence
84  * numbers even as read latencies prohibit all or some expiration.
85  * It also allows writers to advance the sequence number and save the
86  * poll for expiration until a later time when it is likely to
87  * complete without waiting.  The batch granularity and free-to-use
88  * latency is dynamic and can be significantly smaller than in more
89  * strict systems.
90  *
91  * This mechanism is primarily intended to be used in coordination with
92  * UMA.  By integrating with the allocator we avoid all of the callout
93  * queue machinery and are provided with an efficient way to batch
94  * sequence advancement and waiting.  The allocator accumulates a full
95  * per-cpu cache of memory before advancing the sequence.  It then
96  * delays waiting for this sequence to expire until the memory is
97  * selected for reuse.  In this way we only increment the sequence
98  * value once for n=cache-size frees and the waits are done long
99  * after the sequence has been expired so they need only be verified
100  * to account for pathological conditions and to advance the read
101  * sequence.  Tying the sequence number to the bucket size has the
102  * nice property that as the zone gets busier the buckets get larger
103  * and the sequence writes become fewer.  If the coherency of advancing
104  * the write sequence number becomes too costly we can advance
105  * it for every N buckets in exchange for higher free-to-use
106  * latency and consequently higher memory consumption.
107  *
108  * If the read overhead of accessing the shared cacheline becomes
109  * especially burdensome an invariant TSC could be used in place of the
110  * sequence.  The algorithm would then only need to maintain the minimum
111  * observed tsc.  This would trade potential cache synchronization
112  * overhead for local serialization and cpu timestamp overhead.
113  */
114 
115 /*
116  * A simplified diagram:
117  *
118  * 0                                                          UINT_MAX
119  * | -------------------- sequence number space -------------------- |
120  *              ^ rd seq                            ^ wr seq
121  *              | ----- valid sequence numbers ---- |
122  *                ^cpuA  ^cpuC
123  * | -- free -- | --------- deferred frees -------- | ---- free ---- |
124  *
125  *
126  * In this example cpuA has the lowest sequence number and poll can
127  * advance rd seq.  cpuB is not running and is considered to observe
128  * wr seq.
129  *
130  * Freed memory that is tagged with a sequence number between rd seq and
131  * wr seq can not be safely reclaimed because cpuA may hold a reference to
132  * it.  Any other memory is guaranteed to be unreferenced.
133  *
134  * Any writer is free to advance wr seq at any time however it may busy
135  * poll in pathological cases.
136  */
137 
138 static uma_zone_t smr_shared_zone;
139 static uma_zone_t smr_zone;
140 
141 #ifndef INVARIANTS
142 #define	SMR_SEQ_INIT	1		/* All valid sequence numbers are odd. */
143 #define	SMR_SEQ_INCR	2
144 
145 /*
146  * SMR_SEQ_MAX_DELTA is the maximum distance allowed between rd_seq and
147  * wr_seq.  For the modular arithmetic to work a value of UNIT_MAX / 2
148  * would be possible but it is checked after we increment the wr_seq so
149  * a safety margin is left to prevent overflow.
150  *
151  * We will block until SMR_SEQ_MAX_ADVANCE sequence numbers have progressed
152  * to prevent integer wrapping.  See smr_advance() for more details.
153  */
154 #define	SMR_SEQ_MAX_DELTA	(UINT_MAX / 4)
155 #define	SMR_SEQ_MAX_ADVANCE	(SMR_SEQ_MAX_DELTA - 1024)
156 #else
157 /* We want to test the wrapping feature in invariants kernels. */
158 #define	SMR_SEQ_INCR	(UINT_MAX / 10000)
159 #define	SMR_SEQ_INIT	(UINT_MAX - 100000)
160 /* Force extra polls to test the integer overflow detection. */
161 #define	SMR_SEQ_MAX_DELTA	(1000)
162 #define	SMR_SEQ_MAX_ADVANCE	SMR_SEQ_MAX_DELTA / 2
163 #endif
164 
165 /*
166  * Advance the write sequence and return the new value for use as the
167  * wait goal.  This guarantees that any changes made by the calling
168  * thread prior to this call will be visible to all threads after
169  * rd_seq meets or exceeds the return value.
170  *
171  * This function may busy loop if the readers are roughly 1 billion
172  * sequence numbers behind the writers.
173  */
174 smr_seq_t
175 smr_advance(smr_t smr)
176 {
177 	smr_shared_t s;
178 	smr_seq_t goal;
179 
180 	/*
181 	 * It is illegal to enter while in an smr section.
182 	 */
183 	KASSERT(curthread->td_critnest == 0,
184 	    ("smr_advance: Not allowed in a critical section."));
185 
186 	/*
187 	 * Modifications not done in a smr section need to be visible
188 	 * before advancing the seq.
189 	 */
190 	atomic_thread_fence_rel();
191 
192 	/*
193 	 * Increment the shared write sequence by 2.  Since it is
194 	 * initialized to 1 this means the only valid values are
195 	 * odd and an observed value of 0 in a particular CPU means
196 	 * it is not currently in a read section.
197 	 */
198 	s = zpcpu_get(smr)->c_shared;
199 	goal = atomic_fetchadd_int(&s->s_wr_seq, SMR_SEQ_INCR) + SMR_SEQ_INCR;
200 
201 	/*
202 	 * Force a synchronization here if the goal is getting too
203 	 * far ahead of the read sequence number.  This keeps the
204 	 * wrap detecting arithmetic working in pathological cases.
205 	 */
206 	if (goal - atomic_load_int(&s->s_rd_seq) >= SMR_SEQ_MAX_DELTA)
207 		smr_wait(smr, goal - SMR_SEQ_MAX_ADVANCE);
208 
209 	return (goal);
210 }
211 
212 smr_seq_t
213 smr_advance_deferred(smr_t smr, int limit)
214 {
215 	smr_seq_t goal;
216 	smr_t csmr;
217 
218 	critical_enter();
219 	csmr = zpcpu_get(smr);
220 	if (++csmr->c_deferred >= limit) {
221 		goal = SMR_SEQ_INVALID;
222 		csmr->c_deferred = 0;
223 	} else
224 		goal = smr_shared_current(csmr->c_shared) + SMR_SEQ_INCR;
225 	critical_exit();
226 	if (goal != SMR_SEQ_INVALID)
227 		return (goal);
228 
229 	return (smr_advance(smr));
230 }
231 
232 /*
233  * Poll to determine whether all readers have observed the 'goal' write
234  * sequence number.
235  *
236  * If wait is true this will spin until the goal is met.
237  *
238  * This routine will updated the minimum observed read sequence number in
239  * s_rd_seq if it does a scan.  It may not do a scan if another call has
240  * advanced s_rd_seq beyond the callers goal already.
241  *
242  * Returns true if the goal is met and false if not.
243  */
244 bool
245 smr_poll(smr_t smr, smr_seq_t goal, bool wait)
246 {
247 	smr_shared_t s;
248 	smr_t c;
249 	smr_seq_t s_wr_seq, s_rd_seq, rd_seq, c_seq;
250 	int i;
251 	bool success;
252 
253 	/*
254 	 * It is illegal to enter while in an smr section.
255 	 */
256 	KASSERT(!wait || curthread->td_critnest == 0,
257 	    ("smr_poll: Blocking not allowed in a critical section."));
258 
259 	/*
260 	 * Use a critical section so that we can avoid ABA races
261 	 * caused by long preemption sleeps.
262 	 */
263 	success = true;
264 	critical_enter();
265 	s = zpcpu_get(smr)->c_shared;
266 
267 	/*
268 	 * Acquire barrier loads s_wr_seq after s_rd_seq so that we can not
269 	 * observe an updated read sequence that is larger than write.
270 	 */
271 	s_rd_seq = atomic_load_acq_int(&s->s_rd_seq);
272 
273 	/*
274 	 * wr_seq must be loaded prior to any c_seq value so that a stale
275 	 * c_seq can only reference time after this wr_seq.
276 	 */
277 	s_wr_seq = atomic_load_acq_int(&s->s_wr_seq);
278 
279 	/*
280 	 * This may have come from a deferred advance.  Consider one
281 	 * increment past the current wr_seq valid and make sure we
282 	 * have advanced far enough to succeed.  We simply add to avoid
283 	 * an additional fence.
284 	 */
285 	if (goal == s_wr_seq + SMR_SEQ_INCR) {
286 		atomic_add_int(&s->s_wr_seq, SMR_SEQ_INCR);
287 		s_wr_seq = goal;
288 	}
289 
290 	/*
291 	 * Detect whether the goal is valid and has already been observed.
292 	 *
293 	 * The goal must be in the range of s_wr_seq >= goal >= s_rd_seq for
294 	 * it to be valid.  If it is not then the caller held on to it and
295 	 * the integer wrapped.  If we wrapped back within range the caller
296 	 * will harmlessly scan.
297 	 *
298 	 * A valid goal must be greater than s_rd_seq or we have not verified
299 	 * that it has been observed and must fall through to polling.
300 	 */
301 	if (SMR_SEQ_GEQ(s_rd_seq, goal) || SMR_SEQ_LT(s_wr_seq, goal))
302 		goto out;
303 
304 	/*
305 	 * Loop until all cores have observed the goal sequence or have
306 	 * gone inactive.  Keep track of the oldest sequence currently
307 	 * active as rd_seq.
308 	 */
309 	rd_seq = s_wr_seq;
310 	CPU_FOREACH(i) {
311 		c = zpcpu_get_cpu(smr, i);
312 		c_seq = SMR_SEQ_INVALID;
313 		for (;;) {
314 			c_seq = atomic_load_int(&c->c_seq);
315 			if (c_seq == SMR_SEQ_INVALID)
316 				break;
317 
318 			/*
319 			 * There is a race described in smr.h:smr_enter that
320 			 * can lead to a stale seq value but not stale data
321 			 * access.  If we find a value out of range here we
322 			 * pin it to the current min to prevent it from
323 			 * advancing until that stale section has expired.
324 			 *
325 			 * The race is created when a cpu loads the s_wr_seq
326 			 * value in a local register and then another thread
327 			 * advances s_wr_seq and calls smr_poll() which will
328 			 * oberve no value yet in c_seq and advance s_rd_seq
329 			 * up to s_wr_seq which is beyond the register
330 			 * cached value.  This is only likely to happen on
331 			 * hypervisor or with a system management interrupt.
332 			 */
333 			if (SMR_SEQ_LT(c_seq, s_rd_seq))
334 				c_seq = s_rd_seq;
335 
336 			/*
337 			 * If the sequence number meets the goal we are
338 			 * done with this cpu.
339 			 */
340 			if (SMR_SEQ_GEQ(c_seq, goal))
341 				break;
342 
343 			/*
344 			 * If we're not waiting we will still scan the rest
345 			 * of the cpus and update s_rd_seq before returning
346 			 * an error.
347 			 */
348 			if (!wait) {
349 				success = false;
350 				break;
351 			}
352 			cpu_spinwait();
353 		}
354 
355 		/*
356 		 * Limit the minimum observed rd_seq whether we met the goal
357 		 * or not.
358 		 */
359 		if (c_seq != SMR_SEQ_INVALID && SMR_SEQ_GT(rd_seq, c_seq))
360 			rd_seq = c_seq;
361 	}
362 
363 	/*
364 	 * Advance the rd_seq as long as we observed the most recent one.
365 	 */
366 	s_rd_seq = atomic_load_int(&s->s_rd_seq);
367 	do {
368 		if (SMR_SEQ_LEQ(rd_seq, s_rd_seq))
369 			break;
370 	} while (atomic_fcmpset_int(&s->s_rd_seq, &s_rd_seq, rd_seq) == 0);
371 
372 out:
373 	critical_exit();
374 
375 	/*
376 	 * Serialize with smr_advance()/smr_exit().  The caller is now free
377 	 * to modify memory as expected.
378 	 */
379 	atomic_thread_fence_acq();
380 
381 	return (success);
382 }
383 
384 smr_t
385 smr_create(const char *name)
386 {
387 	smr_t smr, c;
388 	smr_shared_t s;
389 	int i;
390 
391 	s = uma_zalloc(smr_shared_zone, M_WAITOK);
392 	smr = uma_zalloc(smr_zone, M_WAITOK);
393 
394 	s->s_name = name;
395 	s->s_rd_seq = s->s_wr_seq = SMR_SEQ_INIT;
396 
397 	/* Initialize all CPUS, not just those running. */
398 	for (i = 0; i <= mp_maxid; i++) {
399 		c = zpcpu_get_cpu(smr, i);
400 		c->c_seq = SMR_SEQ_INVALID;
401 		c->c_shared = s;
402 	}
403 	atomic_thread_fence_seq_cst();
404 
405 	return (smr);
406 }
407 
408 void
409 smr_destroy(smr_t smr)
410 {
411 
412 	smr_synchronize(smr);
413 	uma_zfree(smr_shared_zone, smr->c_shared);
414 	uma_zfree(smr_zone, smr);
415 }
416 
417 /*
418  * Initialize the UMA slab zone.
419  */
420 void
421 smr_init(void)
422 {
423 
424 	smr_shared_zone = uma_zcreate("SMR SHARED", sizeof(struct smr_shared),
425 	    NULL, NULL, NULL, NULL, (CACHE_LINE_SIZE * 2) - 1, 0);
426 	smr_zone = uma_zcreate("SMR CPU", sizeof(struct smr),
427 	    NULL, NULL, NULL, NULL, (CACHE_LINE_SIZE * 2) - 1, UMA_ZONE_PCPU);
428 }
429