Lines Matching +full:cpu +full:- +full:read
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
46 * Fraser, K. 2004. Practical Lock-Freedom. PhD Thesis, University
49 * Wang, Stamler, Parmer. 2016 Parallel Sections: Scaling System-Level
50 * Data-Structures
55 * use-after-free errors with lockless datastructures or as
61 * observed. A shared read sequence number records the lowest
65 * readers by storing an invalid sequence number in the per-cpu
66 * state when the read section exits. Like Parsec we establish
69 * The write and read sequence numbers can be thought of as a two
73 * number. Periodically the read sequence or hand is polled and
75 * Memory which was freed between the old and new global read sequence
78 * any sequence number, they only observe them. The shared read
85 * observation. That is to say, the delta between read and write
89 * numbers even as read latencies prohibit all or some expiration.
92 * complete without waiting. The batch granularity and free-to-use
100 * per-cpu cache of memory before advancing the sequence. It then
103 * value once for n=cache-size frees and the waits are done long
105 * to account for pathological conditions and to advance the read
110 * it for every N buckets in exchange for higher free-to-use
113 * If the read overhead of accessing the shared cacheline becomes
117 * overhead for local serialization and cpu timestamp overhead.
124 * | -------------------- sequence number space -------------------- |
126 * | ----- valid sequence numbers ---- |
128 * | -- free -- | --------- deferred frees -------- | ---- free ---- |
160 #define SMR_SEQ_MAX_ADVANCE (SMR_SEQ_MAX_DELTA - 1024)
164 #define SMR_SEQ_INIT (UINT_MAX - 100000)
173 * Hardclock is responsible for advancing ticks on a single CPU while every
174 * CPU receives a regular clock interrupt. The clock interrupts are flushing
227 old._pair = s_wr._pair = atomic_load_acq_64(&s->s_wr._pair); in smr_lazy_advance()
233 d = t - s_wr.ticks; in smr_lazy_advance()
247 atomic_cmpset_64(&s->s_wr._pair, old._pair, s_wr._pair); in smr_lazy_advance()
255 * of 0 in a particular CPU means it is not currently in a read section.
261 return (atomic_fetchadd_int(&s->s_wr.seq, SMR_SEQ_INCR) + SMR_SEQ_INCR); in smr_shared_advance()
266 * write sequence is too far behind the read sequence we have to poll
275 KASSERT((zpcpu_get(smr)->c_flags & SMR_LAZY) == 0, in smr_default_advance()
279 * Load the current read seq before incrementing the goal so in smr_default_advance()
282 s_rd_seq = atomic_load_acq_int(&s->s_rd_seq); in smr_default_advance()
287 * far ahead of the read sequence number. This keeps the in smr_default_advance()
292 smr_wait(smr, goal - SMR_SEQ_MAX_ADVANCE); in smr_default_advance()
301 * cpu local interval count.
307 if (++self->c_deferred < self->c_limit) in smr_deferred_advance()
309 self->c_deferred = 0; in smr_deferred_advance()
348 s = self->c_shared; in smr_advance()
349 flags = self->c_flags; in smr_advance()
363 * Poll to determine the currently observed sequence number on a cpu
373 c_seq = atomic_load_int(&c->c_seq); in smr_poll_cpu()
384 * The race is created when a cpu loads the s_wr_seq in smr_poll_cpu()
397 * with this cpu. in smr_poll_cpu()
428 * The read sequence can be no larger than the write sequence at in smr_poll_scan()
434 * Query the active sequence on this cpu. If we're not in smr_poll_scan()
453 s_rd_seq = atomic_load_int(&s->s_rd_seq); in smr_poll_scan()
455 atomic_cmpset_int(&s->s_rd_seq, s_rd_seq, rd_seq); in smr_poll_scan()
468 * This routine will updated the minimum observed read sequence number in
489 KASSERT(!wait || (zpcpu_get(smr)->c_flags & SMR_LAZY) == 0, in smr_poll()
500 s = self->c_shared; in smr_poll()
501 flags = self->c_flags; in smr_poll()
513 * observe an updated read sequence that is larger than write. in smr_poll()
515 s_rd_seq = atomic_load_acq_int(&s->s_rd_seq); in smr_poll()
528 s_wr_seq = atomic_load_acq_int(&s->s_wr.seq); in smr_poll()
593 s->s_name = name; in smr_create()
594 s->s_rd_seq = s->s_wr.seq = SMR_SEQ_INIT; in smr_create()
595 s->s_wr.ticks = ticks; in smr_create()
600 c->c_seq = SMR_SEQ_INVALID; in smr_create()
601 c->c_shared = s; in smr_create()
602 c->c_deferred = 0; in smr_create()
603 c->c_limit = limit; in smr_create()
604 c->c_flags = flags; in smr_create()
616 uma_zfree(smr_shared_zone, smr->c_shared); in smr_destroy()
628 NULL, NULL, NULL, NULL, (CACHE_LINE_SIZE * 2) - 1, 0); in smr_init()
629 smr_zone = uma_zcreate("SMR CPU", sizeof(struct smr), in smr_init()
630 NULL, NULL, NULL, NULL, (CACHE_LINE_SIZE * 2) - 1, UMA_ZONE_PCPU); in smr_init()