1d4665eaaSJeff Roberson /*- 2d4665eaaSJeff Roberson * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3d4665eaaSJeff Roberson * 4da6e9935SJeff Roberson * Copyright (c) 2019,2020 Jeffrey Roberson <jeff@FreeBSD.org> 5d4665eaaSJeff Roberson * 6d4665eaaSJeff Roberson * Redistribution and use in source and binary forms, with or without 7d4665eaaSJeff Roberson * modification, are permitted provided that the following conditions 8d4665eaaSJeff Roberson * are met: 9d4665eaaSJeff Roberson * 1. Redistributions of source code must retain the above copyright 10d4665eaaSJeff Roberson * notice unmodified, this list of conditions, and the following 11d4665eaaSJeff Roberson * disclaimer. 12d4665eaaSJeff Roberson * 2. Redistributions in binary form must reproduce the above copyright 13d4665eaaSJeff Roberson * notice, this list of conditions and the following disclaimer in the 14d4665eaaSJeff Roberson * documentation and/or other materials provided with the distribution. 15d4665eaaSJeff Roberson * 16d4665eaaSJeff Roberson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17d4665eaaSJeff Roberson * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18d4665eaaSJeff Roberson * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19d4665eaaSJeff Roberson * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20d4665eaaSJeff Roberson * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21d4665eaaSJeff Roberson * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22d4665eaaSJeff Roberson * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23d4665eaaSJeff Roberson * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24d4665eaaSJeff Roberson * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25d4665eaaSJeff Roberson * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26d4665eaaSJeff Roberson */ 27d4665eaaSJeff Roberson 28d4665eaaSJeff Roberson #include <sys/cdefs.h> 29d4665eaaSJeff Roberson __FBSDID("$FreeBSD$"); 30d4665eaaSJeff Roberson 31d4665eaaSJeff Roberson #include <sys/param.h> 32d4665eaaSJeff Roberson #include <sys/systm.h> 338d7f16a5SJeff Roberson #include <sys/counter.h> 34d4665eaaSJeff Roberson #include <sys/kernel.h> 358d7f16a5SJeff Roberson #include <sys/limits.h> 36d4665eaaSJeff Roberson #include <sys/proc.h> 37d4665eaaSJeff Roberson #include <sys/smp.h> 38d4665eaaSJeff Roberson #include <sys/smr.h> 398d7f16a5SJeff Roberson #include <sys/sysctl.h> 40d4665eaaSJeff Roberson 41d4665eaaSJeff Roberson #include <vm/uma.h> 42d4665eaaSJeff Roberson 43d4665eaaSJeff Roberson /* 44226dd6dbSJeff Roberson * Global Unbounded Sequences (GUS) 45226dd6dbSJeff Roberson * 46d4665eaaSJeff Roberson * This is a novel safe memory reclamation technique inspired by 47d4665eaaSJeff Roberson * epoch based reclamation from Samy Al Bahra's concurrency kit which 48d4665eaaSJeff Roberson * in turn was based on work described in: 49d4665eaaSJeff Roberson * Fraser, K. 2004. Practical Lock-Freedom. PhD Thesis, University 50d4665eaaSJeff Roberson * of Cambridge Computing Laboratory. 51d4665eaaSJeff Roberson * And shares some similarities with: 52d4665eaaSJeff Roberson * Wang, Stamler, Parmer. 2016 Parallel Sections: Scaling System-Level 53d4665eaaSJeff Roberson * Data-Structures 54d4665eaaSJeff Roberson * 55d4665eaaSJeff Roberson * This is not an implementation of hazard pointers or related 56d4665eaaSJeff Roberson * techniques. The term safe memory reclamation is used as a 57d4665eaaSJeff Roberson * generic descriptor for algorithms that defer frees to avoid 58226dd6dbSJeff Roberson * use-after-free errors with lockless datastructures or as 59226dd6dbSJeff Roberson * a mechanism to detect quiescence for writer synchronization. 60d4665eaaSJeff Roberson * 61d4665eaaSJeff Roberson * The basic approach is to maintain a monotonic write sequence 62d4665eaaSJeff Roberson * number that is updated on some application defined granularity. 63d4665eaaSJeff Roberson * Readers record the most recent write sequence number they have 64d4665eaaSJeff Roberson * observed. A shared read sequence number records the lowest 65d4665eaaSJeff Roberson * sequence number observed by any reader as of the last poll. Any 66d4665eaaSJeff Roberson * write older than this value has been observed by all readers 67d4665eaaSJeff Roberson * and memory can be reclaimed. Like Epoch we also detect idle 68d4665eaaSJeff Roberson * readers by storing an invalid sequence number in the per-cpu 69d4665eaaSJeff Roberson * state when the read section exits. Like Parsec we establish 70d4665eaaSJeff Roberson * a global write clock that is used to mark memory on free. 71d4665eaaSJeff Roberson * 72d4665eaaSJeff Roberson * The write and read sequence numbers can be thought of as a two 73226dd6dbSJeff Roberson * handed clock with readers always advancing towards writers. GUS 74d4665eaaSJeff Roberson * maintains the invariant that all readers can safely access memory 75d4665eaaSJeff Roberson * that was visible at the time they loaded their copy of the sequence 76d4665eaaSJeff Roberson * number. Periodically the read sequence or hand is polled and 77d4665eaaSJeff Roberson * advanced as far towards the write sequence as active readers allow. 78d4665eaaSJeff Roberson * Memory which was freed between the old and new global read sequence 79d4665eaaSJeff Roberson * number can now be reclaimed. When the system is idle the two hands 80d4665eaaSJeff Roberson * meet and no deferred memory is outstanding. Readers never advance 81d4665eaaSJeff Roberson * any sequence number, they only observe them. The shared read 82d4665eaaSJeff Roberson * sequence number is consequently never higher than the write sequence. 83d4665eaaSJeff Roberson * A stored sequence number that falls outside of this range has expired 84d4665eaaSJeff Roberson * and needs no scan to reclaim. 85d4665eaaSJeff Roberson * 86226dd6dbSJeff Roberson * A notable distinction between GUS and Epoch, qsbr, rcu, etc. is 87d4665eaaSJeff Roberson * that advancing the sequence number is decoupled from detecting its 88226dd6dbSJeff Roberson * observation. That is to say, the delta between read and write 89226dd6dbSJeff Roberson * sequence numbers is not bound. This can be thought of as a more 90226dd6dbSJeff Roberson * generalized form of epoch which requires them at most one step 91226dd6dbSJeff Roberson * apart. This results in a more granular assignment of sequence 92d4665eaaSJeff Roberson * numbers even as read latencies prohibit all or some expiration. 93d4665eaaSJeff Roberson * It also allows writers to advance the sequence number and save the 94d4665eaaSJeff Roberson * poll for expiration until a later time when it is likely to 95d4665eaaSJeff Roberson * complete without waiting. The batch granularity and free-to-use 96d4665eaaSJeff Roberson * latency is dynamic and can be significantly smaller than in more 97d4665eaaSJeff Roberson * strict systems. 98d4665eaaSJeff Roberson * 99d4665eaaSJeff Roberson * This mechanism is primarily intended to be used in coordination with 100d4665eaaSJeff Roberson * UMA. By integrating with the allocator we avoid all of the callout 101d4665eaaSJeff Roberson * queue machinery and are provided with an efficient way to batch 102d4665eaaSJeff Roberson * sequence advancement and waiting. The allocator accumulates a full 103d4665eaaSJeff Roberson * per-cpu cache of memory before advancing the sequence. It then 104d4665eaaSJeff Roberson * delays waiting for this sequence to expire until the memory is 105d4665eaaSJeff Roberson * selected for reuse. In this way we only increment the sequence 106d4665eaaSJeff Roberson * value once for n=cache-size frees and the waits are done long 107d4665eaaSJeff Roberson * after the sequence has been expired so they need only be verified 108d4665eaaSJeff Roberson * to account for pathological conditions and to advance the read 109d4665eaaSJeff Roberson * sequence. Tying the sequence number to the bucket size has the 110d4665eaaSJeff Roberson * nice property that as the zone gets busier the buckets get larger 111d4665eaaSJeff Roberson * and the sequence writes become fewer. If the coherency of advancing 112d4665eaaSJeff Roberson * the write sequence number becomes too costly we can advance 113d4665eaaSJeff Roberson * it for every N buckets in exchange for higher free-to-use 114d4665eaaSJeff Roberson * latency and consequently higher memory consumption. 115d4665eaaSJeff Roberson * 116d4665eaaSJeff Roberson * If the read overhead of accessing the shared cacheline becomes 117d4665eaaSJeff Roberson * especially burdensome an invariant TSC could be used in place of the 118d4665eaaSJeff Roberson * sequence. The algorithm would then only need to maintain the minimum 119d4665eaaSJeff Roberson * observed tsc. This would trade potential cache synchronization 120d4665eaaSJeff Roberson * overhead for local serialization and cpu timestamp overhead. 121d4665eaaSJeff Roberson */ 122d4665eaaSJeff Roberson 123d4665eaaSJeff Roberson /* 124d4665eaaSJeff Roberson * A simplified diagram: 125d4665eaaSJeff Roberson * 126d4665eaaSJeff Roberson * 0 UINT_MAX 127d4665eaaSJeff Roberson * | -------------------- sequence number space -------------------- | 128d4665eaaSJeff Roberson * ^ rd seq ^ wr seq 129d4665eaaSJeff Roberson * | ----- valid sequence numbers ---- | 130d4665eaaSJeff Roberson * ^cpuA ^cpuC 131d4665eaaSJeff Roberson * | -- free -- | --------- deferred frees -------- | ---- free ---- | 132d4665eaaSJeff Roberson * 133d4665eaaSJeff Roberson * 134d4665eaaSJeff Roberson * In this example cpuA has the lowest sequence number and poll can 135d4665eaaSJeff Roberson * advance rd seq. cpuB is not running and is considered to observe 136d4665eaaSJeff Roberson * wr seq. 137d4665eaaSJeff Roberson * 138d4665eaaSJeff Roberson * Freed memory that is tagged with a sequence number between rd seq and 139d4665eaaSJeff Roberson * wr seq can not be safely reclaimed because cpuA may hold a reference to 140d4665eaaSJeff Roberson * it. Any other memory is guaranteed to be unreferenced. 141d4665eaaSJeff Roberson * 142d4665eaaSJeff Roberson * Any writer is free to advance wr seq at any time however it may busy 143d4665eaaSJeff Roberson * poll in pathological cases. 144d4665eaaSJeff Roberson */ 145d4665eaaSJeff Roberson 146d4665eaaSJeff Roberson static uma_zone_t smr_shared_zone; 147d4665eaaSJeff Roberson static uma_zone_t smr_zone; 148d4665eaaSJeff Roberson 149d4665eaaSJeff Roberson #ifndef INVARIANTS 150d4665eaaSJeff Roberson #define SMR_SEQ_INIT 1 /* All valid sequence numbers are odd. */ 151d4665eaaSJeff Roberson #define SMR_SEQ_INCR 2 152d4665eaaSJeff Roberson 153d4665eaaSJeff Roberson /* 154d4665eaaSJeff Roberson * SMR_SEQ_MAX_DELTA is the maximum distance allowed between rd_seq and 155d4665eaaSJeff Roberson * wr_seq. For the modular arithmetic to work a value of UNIT_MAX / 2 156d4665eaaSJeff Roberson * would be possible but it is checked after we increment the wr_seq so 157d4665eaaSJeff Roberson * a safety margin is left to prevent overflow. 158d4665eaaSJeff Roberson * 159d4665eaaSJeff Roberson * We will block until SMR_SEQ_MAX_ADVANCE sequence numbers have progressed 160d4665eaaSJeff Roberson * to prevent integer wrapping. See smr_advance() for more details. 161d4665eaaSJeff Roberson */ 162d4665eaaSJeff Roberson #define SMR_SEQ_MAX_DELTA (UINT_MAX / 4) 163d4665eaaSJeff Roberson #define SMR_SEQ_MAX_ADVANCE (SMR_SEQ_MAX_DELTA - 1024) 164d4665eaaSJeff Roberson #else 165d4665eaaSJeff Roberson /* We want to test the wrapping feature in invariants kernels. */ 166d4665eaaSJeff Roberson #define SMR_SEQ_INCR (UINT_MAX / 10000) 167d4665eaaSJeff Roberson #define SMR_SEQ_INIT (UINT_MAX - 100000) 168d4665eaaSJeff Roberson /* Force extra polls to test the integer overflow detection. */ 169a40068e5SJeff Roberson #define SMR_SEQ_MAX_DELTA (SMR_SEQ_INCR * 32) 170d4665eaaSJeff Roberson #define SMR_SEQ_MAX_ADVANCE SMR_SEQ_MAX_DELTA / 2 171d4665eaaSJeff Roberson #endif 172d4665eaaSJeff Roberson 173226dd6dbSJeff Roberson /* 174226dd6dbSJeff Roberson * The grace period for lazy (tick based) SMR. 175226dd6dbSJeff Roberson * 176226dd6dbSJeff Roberson * Hardclock is responsible for advancing ticks on a single CPU while every 177226dd6dbSJeff Roberson * CPU receives a regular clock interrupt. The clock interrupts are flushing 178226dd6dbSJeff Roberson * the store buffers and any speculative loads that may violate our invariants. 179226dd6dbSJeff Roberson * Because these interrupts are not synchronized we must wait one additional 180226dd6dbSJeff Roberson * tick in the future to be certain that all processors have had their state 181226dd6dbSJeff Roberson * synchronized by an interrupt. 182226dd6dbSJeff Roberson * 183226dd6dbSJeff Roberson * This assumes that the clock interrupt will only be delayed by other causes 184226dd6dbSJeff Roberson * that will flush the store buffer or prevent access to the section protected 185226dd6dbSJeff Roberson * data. For example, an idle processor, or an system management interrupt, 186226dd6dbSJeff Roberson * or a vm exit. 187226dd6dbSJeff Roberson */ 188226dd6dbSJeff Roberson #define SMR_LAZY_GRACE 2 189561af25fSJeff Roberson #define SMR_LAZY_INCR (SMR_LAZY_GRACE * SMR_SEQ_INCR) 1908d7f16a5SJeff Roberson 191d4665eaaSJeff Roberson /* 192226dd6dbSJeff Roberson * The maximum sequence number ahead of wr_seq that may still be valid. The 193226dd6dbSJeff Roberson * sequence may not be advanced on write for lazy or deferred SMRs. In this 194226dd6dbSJeff Roberson * case poll needs to attempt to forward the sequence number if the goal is 195226dd6dbSJeff Roberson * within wr_seq + SMR_SEQ_ADVANCE. 196226dd6dbSJeff Roberson */ 197561af25fSJeff Roberson #define SMR_SEQ_ADVANCE SMR_LAZY_INCR 198226dd6dbSJeff Roberson 1997029da5cSPawel Biernacki static SYSCTL_NODE(_debug, OID_AUTO, smr, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 2007029da5cSPawel Biernacki "SMR Stats"); 201d869a17eSMark Johnston static COUNTER_U64_DEFINE_EARLY(advance); 202226dd6dbSJeff Roberson SYSCTL_COUNTER_U64(_debug_smr, OID_AUTO, advance, CTLFLAG_RW, &advance, ""); 203d869a17eSMark Johnston static COUNTER_U64_DEFINE_EARLY(advance_wait); 204226dd6dbSJeff Roberson SYSCTL_COUNTER_U64(_debug_smr, OID_AUTO, advance_wait, CTLFLAG_RW, &advance_wait, ""); 205d869a17eSMark Johnston static COUNTER_U64_DEFINE_EARLY(poll); 206226dd6dbSJeff Roberson SYSCTL_COUNTER_U64(_debug_smr, OID_AUTO, poll, CTLFLAG_RW, &poll, ""); 207d869a17eSMark Johnston static COUNTER_U64_DEFINE_EARLY(poll_scan); 208226dd6dbSJeff Roberson SYSCTL_COUNTER_U64(_debug_smr, OID_AUTO, poll_scan, CTLFLAG_RW, &poll_scan, ""); 209d869a17eSMark Johnston static COUNTER_U64_DEFINE_EARLY(poll_fail); 210226dd6dbSJeff Roberson SYSCTL_COUNTER_U64(_debug_smr, OID_AUTO, poll_fail, CTLFLAG_RW, &poll_fail, ""); 211226dd6dbSJeff Roberson 212226dd6dbSJeff Roberson /* 213226dd6dbSJeff Roberson * Advance a lazy write sequence number. These move forward at the rate of 214561af25fSJeff Roberson * ticks. Grace is SMR_LAZY_INCR (2 ticks) in the future. 215226dd6dbSJeff Roberson * 216561af25fSJeff Roberson * This returns the goal write sequence number. 217226dd6dbSJeff Roberson */ 218226dd6dbSJeff Roberson static smr_seq_t 219226dd6dbSJeff Roberson smr_lazy_advance(smr_t smr, smr_shared_t s) 220226dd6dbSJeff Roberson { 221561af25fSJeff Roberson union s_wr s_wr, old; 222561af25fSJeff Roberson int t, d; 223226dd6dbSJeff Roberson 224226dd6dbSJeff Roberson CRITICAL_ASSERT(curthread); 225226dd6dbSJeff Roberson 226226dd6dbSJeff Roberson /* 227561af25fSJeff Roberson * Load the stored ticks value before the current one. This way the 228561af25fSJeff Roberson * current value can only be the same or larger. 229226dd6dbSJeff Roberson */ 230561af25fSJeff Roberson old._pair = s_wr._pair = atomic_load_acq_64(&s->s_wr._pair); 231226dd6dbSJeff Roberson t = ticks; 232226dd6dbSJeff Roberson 233226dd6dbSJeff Roberson /* 234226dd6dbSJeff Roberson * The most probable condition that the update already took place. 235226dd6dbSJeff Roberson */ 236561af25fSJeff Roberson d = t - s_wr.ticks; 237561af25fSJeff Roberson if (__predict_true(d == 0)) 238226dd6dbSJeff Roberson goto out; 239561af25fSJeff Roberson /* Cap the rate of advancement and handle long idle periods. */ 240561af25fSJeff Roberson if (d > SMR_LAZY_GRACE || d < 0) 241561af25fSJeff Roberson d = SMR_LAZY_GRACE; 242561af25fSJeff Roberson s_wr.ticks = t; 243561af25fSJeff Roberson s_wr.seq += d * SMR_SEQ_INCR; 244226dd6dbSJeff Roberson 245226dd6dbSJeff Roberson /* 246561af25fSJeff Roberson * This can only fail if another thread races to call advance(). 247561af25fSJeff Roberson * Strong cmpset semantics mean we are guaranteed that the update 248561af25fSJeff Roberson * happened. 249226dd6dbSJeff Roberson */ 250561af25fSJeff Roberson atomic_cmpset_64(&s->s_wr._pair, old._pair, s_wr._pair); 251226dd6dbSJeff Roberson out: 252561af25fSJeff Roberson return (s_wr.seq + SMR_LAZY_INCR); 253226dd6dbSJeff Roberson } 254226dd6dbSJeff Roberson 255226dd6dbSJeff Roberson /* 256226dd6dbSJeff Roberson * Increment the shared write sequence by 2. Since it is initialized 257226dd6dbSJeff Roberson * to 1 this means the only valid values are odd and an observed value 258226dd6dbSJeff Roberson * of 0 in a particular CPU means it is not currently in a read section. 259226dd6dbSJeff Roberson */ 260226dd6dbSJeff Roberson static smr_seq_t 261226dd6dbSJeff Roberson smr_shared_advance(smr_shared_t s) 262226dd6dbSJeff Roberson { 263226dd6dbSJeff Roberson 264561af25fSJeff Roberson return (atomic_fetchadd_int(&s->s_wr.seq, SMR_SEQ_INCR) + SMR_SEQ_INCR); 265226dd6dbSJeff Roberson } 266226dd6dbSJeff Roberson 267226dd6dbSJeff Roberson /* 268226dd6dbSJeff Roberson * Advance the write sequence number for a normal smr section. If the 269226dd6dbSJeff Roberson * write sequence is too far behind the read sequence we have to poll 270226dd6dbSJeff Roberson * to advance rd_seq and prevent undetectable wraps. 271226dd6dbSJeff Roberson */ 272226dd6dbSJeff Roberson static smr_seq_t 273226dd6dbSJeff Roberson smr_default_advance(smr_t smr, smr_shared_t s) 274226dd6dbSJeff Roberson { 275226dd6dbSJeff Roberson smr_seq_t goal, s_rd_seq; 276226dd6dbSJeff Roberson 277226dd6dbSJeff Roberson CRITICAL_ASSERT(curthread); 278226dd6dbSJeff Roberson KASSERT((zpcpu_get(smr)->c_flags & SMR_LAZY) == 0, 279226dd6dbSJeff Roberson ("smr_default_advance: called with lazy smr.")); 280226dd6dbSJeff Roberson 281226dd6dbSJeff Roberson /* 282226dd6dbSJeff Roberson * Load the current read seq before incrementing the goal so 283226dd6dbSJeff Roberson * we are guaranteed it is always < goal. 284226dd6dbSJeff Roberson */ 285226dd6dbSJeff Roberson s_rd_seq = atomic_load_acq_int(&s->s_rd_seq); 286226dd6dbSJeff Roberson goal = smr_shared_advance(s); 287226dd6dbSJeff Roberson 288226dd6dbSJeff Roberson /* 289226dd6dbSJeff Roberson * Force a synchronization here if the goal is getting too 290226dd6dbSJeff Roberson * far ahead of the read sequence number. This keeps the 291226dd6dbSJeff Roberson * wrap detecting arithmetic working in pathological cases. 292226dd6dbSJeff Roberson */ 293226dd6dbSJeff Roberson if (SMR_SEQ_DELTA(goal, s_rd_seq) >= SMR_SEQ_MAX_DELTA) { 294226dd6dbSJeff Roberson counter_u64_add(advance_wait, 1); 295226dd6dbSJeff Roberson smr_wait(smr, goal - SMR_SEQ_MAX_ADVANCE); 296226dd6dbSJeff Roberson } 297226dd6dbSJeff Roberson counter_u64_add(advance, 1); 298226dd6dbSJeff Roberson 299226dd6dbSJeff Roberson return (goal); 300226dd6dbSJeff Roberson } 301226dd6dbSJeff Roberson 302226dd6dbSJeff Roberson /* 303226dd6dbSJeff Roberson * Deferred SMRs conditionally update s_wr_seq based on an 304226dd6dbSJeff Roberson * cpu local interval count. 305226dd6dbSJeff Roberson */ 306226dd6dbSJeff Roberson static smr_seq_t 307226dd6dbSJeff Roberson smr_deferred_advance(smr_t smr, smr_shared_t s, smr_t self) 308226dd6dbSJeff Roberson { 309226dd6dbSJeff Roberson 310226dd6dbSJeff Roberson if (++self->c_deferred < self->c_limit) 311226dd6dbSJeff Roberson return (smr_shared_current(s) + SMR_SEQ_INCR); 312226dd6dbSJeff Roberson self->c_deferred = 0; 313226dd6dbSJeff Roberson return (smr_default_advance(smr, s)); 314226dd6dbSJeff Roberson } 315226dd6dbSJeff Roberson 316226dd6dbSJeff Roberson /* 317226dd6dbSJeff Roberson * Advance the write sequence and return the value for use as the 318d4665eaaSJeff Roberson * wait goal. This guarantees that any changes made by the calling 319d4665eaaSJeff Roberson * thread prior to this call will be visible to all threads after 320d4665eaaSJeff Roberson * rd_seq meets or exceeds the return value. 321d4665eaaSJeff Roberson * 322d4665eaaSJeff Roberson * This function may busy loop if the readers are roughly 1 billion 323d4665eaaSJeff Roberson * sequence numbers behind the writers. 324226dd6dbSJeff Roberson * 325561af25fSJeff Roberson * Lazy SMRs will not busy loop and the wrap happens every 25 days 326561af25fSJeff Roberson * at 1khz and 60 hours at 10khz. Readers can block for no longer 327226dd6dbSJeff Roberson * than half of this for SMR_SEQ_ macros to continue working. 328d4665eaaSJeff Roberson */ 329d4665eaaSJeff Roberson smr_seq_t 330d4665eaaSJeff Roberson smr_advance(smr_t smr) 331d4665eaaSJeff Roberson { 332226dd6dbSJeff Roberson smr_t self; 333d4665eaaSJeff Roberson smr_shared_t s; 334226dd6dbSJeff Roberson smr_seq_t goal; 335226dd6dbSJeff Roberson int flags; 336d4665eaaSJeff Roberson 337d4665eaaSJeff Roberson /* 338d4665eaaSJeff Roberson * It is illegal to enter while in an smr section. 339d4665eaaSJeff Roberson */ 340a4d50e49SJeff Roberson SMR_ASSERT_NOT_ENTERED(smr); 341d4665eaaSJeff Roberson 342d4665eaaSJeff Roberson /* 343d4665eaaSJeff Roberson * Modifications not done in a smr section need to be visible 344d4665eaaSJeff Roberson * before advancing the seq. 345d4665eaaSJeff Roberson */ 346d4665eaaSJeff Roberson atomic_thread_fence_rel(); 347d4665eaaSJeff Roberson 348bc650984SJeff Roberson critical_enter(); 349226dd6dbSJeff Roberson /* Try to touch the line once. */ 350226dd6dbSJeff Roberson self = zpcpu_get(smr); 351226dd6dbSJeff Roberson s = self->c_shared; 352226dd6dbSJeff Roberson flags = self->c_flags; 353bc650984SJeff Roberson goal = SMR_SEQ_INVALID; 354226dd6dbSJeff Roberson if ((flags & (SMR_LAZY | SMR_DEFERRED)) == 0) 355226dd6dbSJeff Roberson goal = smr_default_advance(smr, s); 356226dd6dbSJeff Roberson else if ((flags & SMR_LAZY) != 0) 357226dd6dbSJeff Roberson goal = smr_lazy_advance(smr, s); 358226dd6dbSJeff Roberson else if ((flags & SMR_DEFERRED) != 0) 359226dd6dbSJeff Roberson goal = smr_deferred_advance(smr, s, self); 360bc650984SJeff Roberson critical_exit(); 361226dd6dbSJeff Roberson 362bc650984SJeff Roberson return (goal); 363bc650984SJeff Roberson } 364bc650984SJeff Roberson 365d4665eaaSJeff Roberson /* 366226dd6dbSJeff Roberson * Poll to determine the currently observed sequence number on a cpu 367226dd6dbSJeff Roberson * and spinwait if the 'wait' argument is true. 368d4665eaaSJeff Roberson */ 369226dd6dbSJeff Roberson static smr_seq_t 370226dd6dbSJeff Roberson smr_poll_cpu(smr_t c, smr_seq_t s_rd_seq, smr_seq_t goal, bool wait) 371d4665eaaSJeff Roberson { 372226dd6dbSJeff Roberson smr_seq_t c_seq; 373d4665eaaSJeff Roberson 374d4665eaaSJeff Roberson c_seq = SMR_SEQ_INVALID; 375d4665eaaSJeff Roberson for (;;) { 376d4665eaaSJeff Roberson c_seq = atomic_load_int(&c->c_seq); 377d4665eaaSJeff Roberson if (c_seq == SMR_SEQ_INVALID) 378d4665eaaSJeff Roberson break; 379d4665eaaSJeff Roberson 380d4665eaaSJeff Roberson /* 381d4665eaaSJeff Roberson * There is a race described in smr.h:smr_enter that 382d4665eaaSJeff Roberson * can lead to a stale seq value but not stale data 383d4665eaaSJeff Roberson * access. If we find a value out of range here we 384d4665eaaSJeff Roberson * pin it to the current min to prevent it from 385d4665eaaSJeff Roberson * advancing until that stale section has expired. 386d4665eaaSJeff Roberson * 387d4665eaaSJeff Roberson * The race is created when a cpu loads the s_wr_seq 388d4665eaaSJeff Roberson * value in a local register and then another thread 389d4665eaaSJeff Roberson * advances s_wr_seq and calls smr_poll() which will 390d4665eaaSJeff Roberson * oberve no value yet in c_seq and advance s_rd_seq 391d4665eaaSJeff Roberson * up to s_wr_seq which is beyond the register 392d4665eaaSJeff Roberson * cached value. This is only likely to happen on 393d4665eaaSJeff Roberson * hypervisor or with a system management interrupt. 394d4665eaaSJeff Roberson */ 395d4665eaaSJeff Roberson if (SMR_SEQ_LT(c_seq, s_rd_seq)) 396d4665eaaSJeff Roberson c_seq = s_rd_seq; 397d4665eaaSJeff Roberson 398d4665eaaSJeff Roberson /* 399226dd6dbSJeff Roberson * If the sequence number meets the goal we are done 400226dd6dbSJeff Roberson * with this cpu. 401d4665eaaSJeff Roberson */ 402226dd6dbSJeff Roberson if (SMR_SEQ_LEQ(goal, c_seq)) 403d4665eaaSJeff Roberson break; 404d4665eaaSJeff Roberson 405226dd6dbSJeff Roberson if (!wait) 406d4665eaaSJeff Roberson break; 407d4665eaaSJeff Roberson cpu_spinwait(); 408d4665eaaSJeff Roberson } 409d4665eaaSJeff Roberson 410226dd6dbSJeff Roberson return (c_seq); 411226dd6dbSJeff Roberson } 412226dd6dbSJeff Roberson 413226dd6dbSJeff Roberson /* 414226dd6dbSJeff Roberson * Loop until all cores have observed the goal sequence or have 415226dd6dbSJeff Roberson * gone inactive. Returns the oldest sequence currently active; 416226dd6dbSJeff Roberson * 417226dd6dbSJeff Roberson * This function assumes a snapshot of sequence values has 418226dd6dbSJeff Roberson * been obtained and validated by smr_poll(). 419226dd6dbSJeff Roberson */ 420226dd6dbSJeff Roberson static smr_seq_t 421226dd6dbSJeff Roberson smr_poll_scan(smr_t smr, smr_shared_t s, smr_seq_t s_rd_seq, 422226dd6dbSJeff Roberson smr_seq_t s_wr_seq, smr_seq_t goal, bool wait) 423226dd6dbSJeff Roberson { 424226dd6dbSJeff Roberson smr_seq_t rd_seq, c_seq; 425226dd6dbSJeff Roberson int i; 426226dd6dbSJeff Roberson 427226dd6dbSJeff Roberson CRITICAL_ASSERT(curthread); 428226dd6dbSJeff Roberson counter_u64_add_protected(poll_scan, 1); 429226dd6dbSJeff Roberson 430226dd6dbSJeff Roberson /* 431226dd6dbSJeff Roberson * The read sequence can be no larger than the write sequence at 432226dd6dbSJeff Roberson * the start of the poll. 433226dd6dbSJeff Roberson */ 434226dd6dbSJeff Roberson rd_seq = s_wr_seq; 435226dd6dbSJeff Roberson CPU_FOREACH(i) { 436226dd6dbSJeff Roberson /* 437226dd6dbSJeff Roberson * Query the active sequence on this cpu. If we're not 438226dd6dbSJeff Roberson * waiting and we don't meet the goal we will still scan 439226dd6dbSJeff Roberson * the rest of the cpus to update s_rd_seq before returning 440226dd6dbSJeff Roberson * failure. 441226dd6dbSJeff Roberson */ 442226dd6dbSJeff Roberson c_seq = smr_poll_cpu(zpcpu_get_cpu(smr, i), s_rd_seq, goal, 443226dd6dbSJeff Roberson wait); 444226dd6dbSJeff Roberson 445d4665eaaSJeff Roberson /* 446d4665eaaSJeff Roberson * Limit the minimum observed rd_seq whether we met the goal 447d4665eaaSJeff Roberson * or not. 448d4665eaaSJeff Roberson */ 449226dd6dbSJeff Roberson if (c_seq != SMR_SEQ_INVALID) 450226dd6dbSJeff Roberson rd_seq = SMR_SEQ_MIN(rd_seq, c_seq); 451d4665eaaSJeff Roberson } 452d4665eaaSJeff Roberson 453d4665eaaSJeff Roberson /* 454226dd6dbSJeff Roberson * Advance the rd_seq as long as we observed a more recent value. 455d4665eaaSJeff Roberson */ 456d4665eaaSJeff Roberson s_rd_seq = atomic_load_int(&s->s_rd_seq); 457561af25fSJeff Roberson if (SMR_SEQ_GT(rd_seq, s_rd_seq)) { 458226dd6dbSJeff Roberson atomic_cmpset_int(&s->s_rd_seq, s_rd_seq, rd_seq); 459226dd6dbSJeff Roberson s_rd_seq = rd_seq; 460226dd6dbSJeff Roberson } 461d4665eaaSJeff Roberson 462226dd6dbSJeff Roberson return (s_rd_seq); 463226dd6dbSJeff Roberson } 464226dd6dbSJeff Roberson 465226dd6dbSJeff Roberson /* 466226dd6dbSJeff Roberson * Poll to determine whether all readers have observed the 'goal' write 467226dd6dbSJeff Roberson * sequence number. 468226dd6dbSJeff Roberson * 469226dd6dbSJeff Roberson * If wait is true this will spin until the goal is met. 470226dd6dbSJeff Roberson * 471226dd6dbSJeff Roberson * This routine will updated the minimum observed read sequence number in 472226dd6dbSJeff Roberson * s_rd_seq if it does a scan. It may not do a scan if another call has 473226dd6dbSJeff Roberson * advanced s_rd_seq beyond the callers goal already. 474226dd6dbSJeff Roberson * 475226dd6dbSJeff Roberson * Returns true if the goal is met and false if not. 476226dd6dbSJeff Roberson */ 477226dd6dbSJeff Roberson bool 478226dd6dbSJeff Roberson smr_poll(smr_t smr, smr_seq_t goal, bool wait) 479226dd6dbSJeff Roberson { 480226dd6dbSJeff Roberson smr_shared_t s; 481226dd6dbSJeff Roberson smr_t self; 482226dd6dbSJeff Roberson smr_seq_t s_wr_seq, s_rd_seq; 483226dd6dbSJeff Roberson smr_delta_t delta; 484226dd6dbSJeff Roberson int flags; 485226dd6dbSJeff Roberson bool success; 486226dd6dbSJeff Roberson 487226dd6dbSJeff Roberson /* 488226dd6dbSJeff Roberson * It is illegal to enter while in an smr section. 489226dd6dbSJeff Roberson */ 490226dd6dbSJeff Roberson KASSERT(!wait || !SMR_ENTERED(smr), 491226dd6dbSJeff Roberson ("smr_poll: Blocking not allowed in a SMR section.")); 492226dd6dbSJeff Roberson KASSERT(!wait || (zpcpu_get(smr)->c_flags & SMR_LAZY) == 0, 493226dd6dbSJeff Roberson ("smr_poll: Blocking not allowed on lazy smrs.")); 494226dd6dbSJeff Roberson 495226dd6dbSJeff Roberson /* 496226dd6dbSJeff Roberson * Use a critical section so that we can avoid ABA races 497226dd6dbSJeff Roberson * caused by long preemption sleeps. 498226dd6dbSJeff Roberson */ 499226dd6dbSJeff Roberson success = true; 500226dd6dbSJeff Roberson critical_enter(); 501226dd6dbSJeff Roberson /* Attempt to load from self only once. */ 502226dd6dbSJeff Roberson self = zpcpu_get(smr); 503226dd6dbSJeff Roberson s = self->c_shared; 504226dd6dbSJeff Roberson flags = self->c_flags; 505226dd6dbSJeff Roberson counter_u64_add_protected(poll, 1); 506226dd6dbSJeff Roberson 507226dd6dbSJeff Roberson /* 508226dd6dbSJeff Roberson * Conditionally advance the lazy write clock on any writer 509561af25fSJeff Roberson * activity. 510226dd6dbSJeff Roberson */ 511226dd6dbSJeff Roberson if ((flags & SMR_LAZY) != 0) 512226dd6dbSJeff Roberson smr_lazy_advance(smr, s); 513226dd6dbSJeff Roberson 514226dd6dbSJeff Roberson /* 515226dd6dbSJeff Roberson * Acquire barrier loads s_wr_seq after s_rd_seq so that we can not 516226dd6dbSJeff Roberson * observe an updated read sequence that is larger than write. 517226dd6dbSJeff Roberson */ 518226dd6dbSJeff Roberson s_rd_seq = atomic_load_acq_int(&s->s_rd_seq); 519226dd6dbSJeff Roberson 520226dd6dbSJeff Roberson /* 521226dd6dbSJeff Roberson * If we have already observed the sequence number we can immediately 522226dd6dbSJeff Roberson * return success. Most polls should meet this criterion. 523226dd6dbSJeff Roberson */ 524226dd6dbSJeff Roberson if (SMR_SEQ_LEQ(goal, s_rd_seq)) 525226dd6dbSJeff Roberson goto out; 526226dd6dbSJeff Roberson 527226dd6dbSJeff Roberson /* 528226dd6dbSJeff Roberson * wr_seq must be loaded prior to any c_seq value so that a 529226dd6dbSJeff Roberson * stale c_seq can only reference time after this wr_seq. 530226dd6dbSJeff Roberson */ 531561af25fSJeff Roberson s_wr_seq = atomic_load_acq_int(&s->s_wr.seq); 532226dd6dbSJeff Roberson 533226dd6dbSJeff Roberson /* 534226dd6dbSJeff Roberson * This is the distance from s_wr_seq to goal. Positive values 535226dd6dbSJeff Roberson * are in the future. 536226dd6dbSJeff Roberson */ 537226dd6dbSJeff Roberson delta = SMR_SEQ_DELTA(goal, s_wr_seq); 538226dd6dbSJeff Roberson 539226dd6dbSJeff Roberson /* 540226dd6dbSJeff Roberson * Detect a stale wr_seq. 541226dd6dbSJeff Roberson * 542226dd6dbSJeff Roberson * This goal may have come from a deferred advance or a lazy 543226dd6dbSJeff Roberson * smr. If we are not blocking we can not succeed but the 544226dd6dbSJeff Roberson * sequence number is valid. 545226dd6dbSJeff Roberson */ 546561af25fSJeff Roberson if (delta > 0 && delta <= SMR_SEQ_ADVANCE && 547226dd6dbSJeff Roberson (flags & (SMR_LAZY | SMR_DEFERRED)) != 0) { 548226dd6dbSJeff Roberson if (!wait) { 549226dd6dbSJeff Roberson success = false; 550226dd6dbSJeff Roberson goto out; 551226dd6dbSJeff Roberson } 552226dd6dbSJeff Roberson /* LAZY is always !wait. */ 553226dd6dbSJeff Roberson s_wr_seq = smr_shared_advance(s); 554226dd6dbSJeff Roberson delta = 0; 555226dd6dbSJeff Roberson } 556226dd6dbSJeff Roberson 557226dd6dbSJeff Roberson /* 558226dd6dbSJeff Roberson * Detect an invalid goal. 559226dd6dbSJeff Roberson * 560226dd6dbSJeff Roberson * The goal must be in the range of s_wr_seq >= goal >= s_rd_seq for 561226dd6dbSJeff Roberson * it to be valid. If it is not then the caller held on to it and 562226dd6dbSJeff Roberson * the integer wrapped. If we wrapped back within range the caller 563226dd6dbSJeff Roberson * will harmlessly scan. 564226dd6dbSJeff Roberson */ 565226dd6dbSJeff Roberson if (delta > 0) 566226dd6dbSJeff Roberson goto out; 567226dd6dbSJeff Roberson 568226dd6dbSJeff Roberson /* Determine the lowest visible sequence number. */ 569226dd6dbSJeff Roberson s_rd_seq = smr_poll_scan(smr, s, s_rd_seq, s_wr_seq, goal, wait); 570226dd6dbSJeff Roberson success = SMR_SEQ_LEQ(goal, s_rd_seq); 571d4665eaaSJeff Roberson out: 572226dd6dbSJeff Roberson if (!success) 573226dd6dbSJeff Roberson counter_u64_add_protected(poll_fail, 1); 574d4665eaaSJeff Roberson critical_exit(); 575d4665eaaSJeff Roberson 576915c367eSJeff Roberson /* 577915c367eSJeff Roberson * Serialize with smr_advance()/smr_exit(). The caller is now free 578915c367eSJeff Roberson * to modify memory as expected. 579915c367eSJeff Roberson */ 580915c367eSJeff Roberson atomic_thread_fence_acq(); 581915c367eSJeff Roberson 582*cd133525SMark Johnston KASSERT(success || !wait, ("%s: blocking poll failed", __func__)); 583d4665eaaSJeff Roberson return (success); 584d4665eaaSJeff Roberson } 585d4665eaaSJeff Roberson 586d4665eaaSJeff Roberson smr_t 587226dd6dbSJeff Roberson smr_create(const char *name, int limit, int flags) 588d4665eaaSJeff Roberson { 589d4665eaaSJeff Roberson smr_t smr, c; 590d4665eaaSJeff Roberson smr_shared_t s; 591d4665eaaSJeff Roberson int i; 592d4665eaaSJeff Roberson 593d4665eaaSJeff Roberson s = uma_zalloc(smr_shared_zone, M_WAITOK); 5941f2a6b85SJeff Roberson smr = uma_zalloc_pcpu(smr_zone, M_WAITOK); 595d4665eaaSJeff Roberson 596d4665eaaSJeff Roberson s->s_name = name; 597561af25fSJeff Roberson s->s_rd_seq = s->s_wr.seq = SMR_SEQ_INIT; 598561af25fSJeff Roberson s->s_wr.ticks = ticks; 599d4665eaaSJeff Roberson 600d4665eaaSJeff Roberson /* Initialize all CPUS, not just those running. */ 601d4665eaaSJeff Roberson for (i = 0; i <= mp_maxid; i++) { 602d4665eaaSJeff Roberson c = zpcpu_get_cpu(smr, i); 603d4665eaaSJeff Roberson c->c_seq = SMR_SEQ_INVALID; 604d4665eaaSJeff Roberson c->c_shared = s; 605226dd6dbSJeff Roberson c->c_deferred = 0; 606226dd6dbSJeff Roberson c->c_limit = limit; 607226dd6dbSJeff Roberson c->c_flags = flags; 608d4665eaaSJeff Roberson } 609d4665eaaSJeff Roberson atomic_thread_fence_seq_cst(); 610d4665eaaSJeff Roberson 611d4665eaaSJeff Roberson return (smr); 612d4665eaaSJeff Roberson } 613d4665eaaSJeff Roberson 614d4665eaaSJeff Roberson void 615d4665eaaSJeff Roberson smr_destroy(smr_t smr) 616d4665eaaSJeff Roberson { 617d4665eaaSJeff Roberson 618d4665eaaSJeff Roberson smr_synchronize(smr); 619d4665eaaSJeff Roberson uma_zfree(smr_shared_zone, smr->c_shared); 6201f2a6b85SJeff Roberson uma_zfree_pcpu(smr_zone, smr); 621d4665eaaSJeff Roberson } 622d4665eaaSJeff Roberson 623d4665eaaSJeff Roberson /* 624d4665eaaSJeff Roberson * Initialize the UMA slab zone. 625d4665eaaSJeff Roberson */ 626d4665eaaSJeff Roberson void 627d4665eaaSJeff Roberson smr_init(void) 628d4665eaaSJeff Roberson { 629d4665eaaSJeff Roberson 630d4665eaaSJeff Roberson smr_shared_zone = uma_zcreate("SMR SHARED", sizeof(struct smr_shared), 631d4665eaaSJeff Roberson NULL, NULL, NULL, NULL, (CACHE_LINE_SIZE * 2) - 1, 0); 632d4665eaaSJeff Roberson smr_zone = uma_zcreate("SMR CPU", sizeof(struct smr), 633d4665eaaSJeff Roberson NULL, NULL, NULL, NULL, (CACHE_LINE_SIZE * 2) - 1, UMA_ZONE_PCPU); 634d4665eaaSJeff Roberson } 635