Lines Matching +full:write +full:- +full:only
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
46 * Fraser, K. 2004. Practical Lock-Freedom. PhD Thesis, University
49 * Wang, Stamler, Parmer. 2016 Parallel Sections: Scaling System-Level
50 * Data-Structures
55 * use-after-free errors with lockless datastructures or as
58 * The basic approach is to maintain a monotonic write sequence
60 * Readers record the most recent write sequence number they have
63 * write older than this value has been observed by all readers
65 * readers by storing an invalid sequence number in the per-cpu
67 * a global write clock that is used to mark memory on free.
69 * The write and read sequence numbers can be thought of as a two
74 * advanced as far towards the write sequence as active readers allow.
78 * any sequence number, they only observe them. The shared read
79 * sequence number is consequently never higher than the write sequence.
85 * observation. That is to say, the delta between read and write
92 * complete without waiting. The batch granularity and free-to-use
100 * per-cpu cache of memory before advancing the sequence. It then
102 * selected for reuse. In this way we only increment the sequence
103 * value once for n=cache-size frees and the waits are done long
104 * after the sequence has been expired so they need only be verified
109 * the write sequence number becomes too costly we can advance
110 * it for every N buckets in exchange for higher free-to-use
115 * sequence. The algorithm would then only need to maintain the minimum
124 * | -------------------- sequence number space -------------------- |
126 * | ----- valid sequence numbers ---- |
128 * | -- free -- | --------- deferred frees -------- | ---- free ---- |
160 #define SMR_SEQ_MAX_ADVANCE (SMR_SEQ_MAX_DELTA - 1024)
164 #define SMR_SEQ_INIT (UINT_MAX - 100000)
180 * This assumes that the clock interrupt will only be delayed by other causes
190 * sequence may not be advanced on write for lazy or deferred SMRs. In this
210 * Advance a lazy write sequence number. These move forward at the rate of
213 * This returns the goal write sequence number.
225 * current value can only be the same or larger. in smr_lazy_advance()
227 old._pair = s_wr._pair = atomic_load_acq_64(&s->s_wr._pair); in smr_lazy_advance()
233 d = t - s_wr.ticks; in smr_lazy_advance()
243 * This can only fail if another thread races to call advance(). in smr_lazy_advance()
247 atomic_cmpset_64(&s->s_wr._pair, old._pair, s_wr._pair); in smr_lazy_advance()
253 * Increment the shared write sequence by 2. Since it is initialized
254 * to 1 this means the only valid values are odd and an observed value
261 return (atomic_fetchadd_int(&s->s_wr.seq, SMR_SEQ_INCR) + SMR_SEQ_INCR); in smr_shared_advance()
265 * Advance the write sequence number for a normal smr section. If the
266 * write sequence is too far behind the read sequence we have to poll
275 KASSERT((zpcpu_get(smr)->c_flags & SMR_LAZY) == 0, in smr_default_advance()
282 s_rd_seq = atomic_load_acq_int(&s->s_rd_seq); in smr_default_advance()
292 smr_wait(smr, goal - SMR_SEQ_MAX_ADVANCE); in smr_default_advance()
307 if (++self->c_deferred < self->c_limit) in smr_deferred_advance()
309 self->c_deferred = 0; in smr_deferred_advance()
314 * Advance the write sequence and return the value for use as the
348 s = self->c_shared; in smr_advance()
349 flags = self->c_flags; in smr_advance()
373 c_seq = atomic_load_int(&c->c_seq); in smr_poll_cpu()
389 * cached value. This is only likely to happen on in smr_poll_cpu()
428 * The read sequence can be no larger than the write sequence at in smr_poll_scan()
453 s_rd_seq = atomic_load_int(&s->s_rd_seq); in smr_poll_scan()
455 atomic_cmpset_int(&s->s_rd_seq, s_rd_seq, rd_seq); in smr_poll_scan()
463 * Poll to determine whether all readers have observed the 'goal' write
489 KASSERT(!wait || (zpcpu_get(smr)->c_flags & SMR_LAZY) == 0, in smr_poll()
498 /* Attempt to load from self only once. */ in smr_poll()
500 s = self->c_shared; in smr_poll()
501 flags = self->c_flags; in smr_poll()
505 * Conditionally advance the lazy write clock on any writer in smr_poll()
513 * observe an updated read sequence that is larger than write. in smr_poll()
515 s_rd_seq = atomic_load_acq_int(&s->s_rd_seq); in smr_poll()
526 * stale c_seq can only reference time after this wr_seq. in smr_poll()
528 s_wr_seq = atomic_load_acq_int(&s->s_wr.seq); in smr_poll()
593 s->s_name = name; in smr_create()
594 s->s_rd_seq = s->s_wr.seq = SMR_SEQ_INIT; in smr_create()
595 s->s_wr.ticks = ticks; in smr_create()
600 c->c_seq = SMR_SEQ_INVALID; in smr_create()
601 c->c_shared = s; in smr_create()
602 c->c_deferred = 0; in smr_create()
603 c->c_limit = limit; in smr_create()
604 c->c_flags = flags; in smr_create()
616 uma_zfree(smr_shared_zone, smr->c_shared); in smr_destroy()
628 NULL, NULL, NULL, NULL, (CACHE_LINE_SIZE * 2) - 1, 0); in smr_init()
630 NULL, NULL, NULL, NULL, (CACHE_LINE_SIZE * 2) - 1, UMA_ZONE_PCPU); in smr_init()