Lines Matching +full:self +full:- +full:working
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
46 * Fraser, K. 2004. Practical Lock-Freedom. PhD Thesis, University
49 * Wang, Stamler, Parmer. 2016 Parallel Sections: Scaling System-Level
50 * Data-Structures
55 * use-after-free errors with lockless datastructures or as
65 * readers by storing an invalid sequence number in the per-cpu
92 * complete without waiting. The batch granularity and free-to-use
100 * per-cpu cache of memory before advancing the sequence. It then
103 * value once for n=cache-size frees and the waits are done long
110 * it for every N buckets in exchange for higher free-to-use
124 * | -------------------- sequence number space -------------------- |
126 * | ----- valid sequence numbers ---- |
128 * | -- free -- | --------- deferred frees -------- | ---- free ---- |
160 #define SMR_SEQ_MAX_ADVANCE (SMR_SEQ_MAX_DELTA - 1024)
164 #define SMR_SEQ_INIT (UINT_MAX - 100000)
227 old._pair = s_wr._pair = atomic_load_acq_64(&s->s_wr._pair); in smr_lazy_advance()
233 d = t - s_wr.ticks; in smr_lazy_advance()
247 atomic_cmpset_64(&s->s_wr._pair, old._pair, s_wr._pair); in smr_lazy_advance()
261 return (atomic_fetchadd_int(&s->s_wr.seq, SMR_SEQ_INCR) + SMR_SEQ_INCR); in smr_shared_advance()
275 KASSERT((zpcpu_get(smr)->c_flags & SMR_LAZY) == 0, in smr_default_advance()
282 s_rd_seq = atomic_load_acq_int(&s->s_rd_seq); in smr_default_advance()
288 * wrap detecting arithmetic working in pathological cases. in smr_default_advance()
292 smr_wait(smr, goal - SMR_SEQ_MAX_ADVANCE); in smr_default_advance()
304 smr_deferred_advance(smr_t smr, smr_shared_t s, smr_t self) in smr_deferred_advance() argument
307 if (++self->c_deferred < self->c_limit) in smr_deferred_advance()
309 self->c_deferred = 0; in smr_deferred_advance()
324 * than half of this for SMR_SEQ_ macros to continue working.
329 smr_t self; in smr_advance() local
347 self = zpcpu_get(smr); in smr_advance()
348 s = self->c_shared; in smr_advance()
349 flags = self->c_flags; in smr_advance()
356 goal = smr_deferred_advance(smr, s, self); in smr_advance()
373 c_seq = atomic_load_int(&c->c_seq); in smr_poll_cpu()
453 s_rd_seq = atomic_load_int(&s->s_rd_seq); in smr_poll_scan()
455 atomic_cmpset_int(&s->s_rd_seq, s_rd_seq, rd_seq); in smr_poll_scan()
478 smr_t self; in smr_poll() local
489 KASSERT(!wait || (zpcpu_get(smr)->c_flags & SMR_LAZY) == 0, in smr_poll()
498 /* Attempt to load from self only once. */ in smr_poll()
499 self = zpcpu_get(smr); in smr_poll()
500 s = self->c_shared; in smr_poll()
501 flags = self->c_flags; in smr_poll()
515 s_rd_seq = atomic_load_acq_int(&s->s_rd_seq); in smr_poll()
528 s_wr_seq = atomic_load_acq_int(&s->s_wr.seq); in smr_poll()
593 s->s_name = name; in smr_create()
594 s->s_rd_seq = s->s_wr.seq = SMR_SEQ_INIT; in smr_create()
595 s->s_wr.ticks = ticks; in smr_create()
600 c->c_seq = SMR_SEQ_INVALID; in smr_create()
601 c->c_shared = s; in smr_create()
602 c->c_deferred = 0; in smr_create()
603 c->c_limit = limit; in smr_create()
604 c->c_flags = flags; in smr_create()
616 uma_zfree(smr_shared_zone, smr->c_shared); in smr_destroy()
628 NULL, NULL, NULL, NULL, (CACHE_LINE_SIZE * 2) - 1, 0); in smr_init()
630 NULL, NULL, NULL, NULL, (CACHE_LINE_SIZE * 2) - 1, UMA_ZONE_PCPU); in smr_init()