Lines Matching +full:idle +full:- +full:touch

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
46 * Fraser, K. 2004. Practical Lock-Freedom. PhD Thesis, University
49 * Wang, Stamler, Parmer. 2016 Parallel Sections: Scaling System-Level
50 * Data-Structures
55 * use-after-free errors with lockless datastructures or as
64 * and memory can be reclaimed. Like Epoch we also detect idle
65 * readers by storing an invalid sequence number in the per-cpu
76 * number can now be reclaimed. When the system is idle the two hands
92 * complete without waiting. The batch granularity and free-to-use
100 * per-cpu cache of memory before advancing the sequence. It then
103 * value once for n=cache-size frees and the waits are done long
110 * it for every N buckets in exchange for higher free-to-use
124 * | -------------------- sequence number space -------------------- |
126 * | ----- valid sequence numbers ---- |
128 * | -- free -- | --------- deferred frees -------- | ---- free ---- |
160 #define SMR_SEQ_MAX_ADVANCE (SMR_SEQ_MAX_DELTA - 1024)
164 #define SMR_SEQ_INIT (UINT_MAX - 100000)
182 * data. For example, an idle processor, or an system management interrupt,
227 old._pair = s_wr._pair = atomic_load_acq_64(&s->s_wr._pair); in smr_lazy_advance()
233 d = t - s_wr.ticks; in smr_lazy_advance()
236 /* Cap the rate of advancement and handle long idle periods. */ in smr_lazy_advance()
247 atomic_cmpset_64(&s->s_wr._pair, old._pair, s_wr._pair); in smr_lazy_advance()
261 return (atomic_fetchadd_int(&s->s_wr.seq, SMR_SEQ_INCR) + SMR_SEQ_INCR); in smr_shared_advance()
275 KASSERT((zpcpu_get(smr)->c_flags & SMR_LAZY) == 0, in smr_default_advance()
282 s_rd_seq = atomic_load_acq_int(&s->s_rd_seq); in smr_default_advance()
292 smr_wait(smr, goal - SMR_SEQ_MAX_ADVANCE); in smr_default_advance()
307 if (++self->c_deferred < self->c_limit) in smr_deferred_advance()
309 self->c_deferred = 0; in smr_deferred_advance()
346 /* Try to touch the line once. */ in smr_advance()
348 s = self->c_shared; in smr_advance()
349 flags = self->c_flags; in smr_advance()
373 c_seq = atomic_load_int(&c->c_seq); in smr_poll_cpu()
453 s_rd_seq = atomic_load_int(&s->s_rd_seq); in smr_poll_scan()
455 atomic_cmpset_int(&s->s_rd_seq, s_rd_seq, rd_seq); in smr_poll_scan()
489 KASSERT(!wait || (zpcpu_get(smr)->c_flags & SMR_LAZY) == 0, in smr_poll()
500 s = self->c_shared; in smr_poll()
501 flags = self->c_flags; in smr_poll()
515 s_rd_seq = atomic_load_acq_int(&s->s_rd_seq); in smr_poll()
528 s_wr_seq = atomic_load_acq_int(&s->s_wr.seq); in smr_poll()
593 s->s_name = name; in smr_create()
594 s->s_rd_seq = s->s_wr.seq = SMR_SEQ_INIT; in smr_create()
595 s->s_wr.ticks = ticks; in smr_create()
600 c->c_seq = SMR_SEQ_INVALID; in smr_create()
601 c->c_shared = s; in smr_create()
602 c->c_deferred = 0; in smr_create()
603 c->c_limit = limit; in smr_create()
604 c->c_flags = flags; in smr_create()
616 uma_zfree(smr_shared_zone, smr->c_shared); in smr_destroy()
628 NULL, NULL, NULL, NULL, (CACHE_LINE_SIZE * 2) - 1, 0); in smr_init()
630 NULL, NULL, NULL, NULL, (CACHE_LINE_SIZE * 2) - 1, UMA_ZONE_PCPU); in smr_init()