1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2019,2020 Jeffrey Roberson <jeff@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice unmodified, this list of conditions, and the following 11 * disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/limits.h> 34 #include <sys/kernel.h> 35 #include <sys/proc.h> 36 #include <sys/smp.h> 37 #include <sys/smr.h> 38 39 #include <vm/uma.h> 40 41 /* 42 * This is a novel safe memory reclamation technique inspired by 43 * epoch based reclamation from Samy Al Bahra's concurrency kit which 44 * in turn was based on work described in: 45 * Fraser, K. 2004. Practical Lock-Freedom. PhD Thesis, University 46 * of Cambridge Computing Laboratory. 47 * And shares some similarities with: 48 * Wang, Stamler, Parmer. 2016 Parallel Sections: Scaling System-Level 49 * Data-Structures 50 * 51 * This is not an implementation of hazard pointers or related 52 * techniques. The term safe memory reclamation is used as a 53 * generic descriptor for algorithms that defer frees to avoid 54 * use-after-free errors with lockless datastructures. 55 * 56 * The basic approach is to maintain a monotonic write sequence 57 * number that is updated on some application defined granularity. 58 * Readers record the most recent write sequence number they have 59 * observed. A shared read sequence number records the lowest 60 * sequence number observed by any reader as of the last poll. Any 61 * write older than this value has been observed by all readers 62 * and memory can be reclaimed. Like Epoch we also detect idle 63 * readers by storing an invalid sequence number in the per-cpu 64 * state when the read section exits. Like Parsec we establish 65 * a global write clock that is used to mark memory on free. 66 * 67 * The write and read sequence numbers can be thought of as a two 68 * handed clock with readers always advancing towards writers. SMR 69 * maintains the invariant that all readers can safely access memory 70 * that was visible at the time they loaded their copy of the sequence 71 * number. Periodically the read sequence or hand is polled and 72 * advanced as far towards the write sequence as active readers allow. 73 * Memory which was freed between the old and new global read sequence 74 * number can now be reclaimed. When the system is idle the two hands 75 * meet and no deferred memory is outstanding. Readers never advance 76 * any sequence number, they only observe them. The shared read 77 * sequence number is consequently never higher than the write sequence. 78 * A stored sequence number that falls outside of this range has expired 79 * and needs no scan to reclaim. 80 * 81 * A notable distinction between this SMR and Epoch, qsbr, rcu, etc. is 82 * that advancing the sequence number is decoupled from detecting its 83 * observation. This results in a more granular assignment of sequence 84 * numbers even as read latencies prohibit all or some expiration. 85 * It also allows writers to advance the sequence number and save the 86 * poll for expiration until a later time when it is likely to 87 * complete without waiting. The batch granularity and free-to-use 88 * latency is dynamic and can be significantly smaller than in more 89 * strict systems. 90 * 91 * This mechanism is primarily intended to be used in coordination with 92 * UMA. By integrating with the allocator we avoid all of the callout 93 * queue machinery and are provided with an efficient way to batch 94 * sequence advancement and waiting. The allocator accumulates a full 95 * per-cpu cache of memory before advancing the sequence. It then 96 * delays waiting for this sequence to expire until the memory is 97 * selected for reuse. In this way we only increment the sequence 98 * value once for n=cache-size frees and the waits are done long 99 * after the sequence has been expired so they need only be verified 100 * to account for pathological conditions and to advance the read 101 * sequence. Tying the sequence number to the bucket size has the 102 * nice property that as the zone gets busier the buckets get larger 103 * and the sequence writes become fewer. If the coherency of advancing 104 * the write sequence number becomes too costly we can advance 105 * it for every N buckets in exchange for higher free-to-use 106 * latency and consequently higher memory consumption. 107 * 108 * If the read overhead of accessing the shared cacheline becomes 109 * especially burdensome an invariant TSC could be used in place of the 110 * sequence. The algorithm would then only need to maintain the minimum 111 * observed tsc. This would trade potential cache synchronization 112 * overhead for local serialization and cpu timestamp overhead. 113 */ 114 115 /* 116 * A simplified diagram: 117 * 118 * 0 UINT_MAX 119 * | -------------------- sequence number space -------------------- | 120 * ^ rd seq ^ wr seq 121 * | ----- valid sequence numbers ---- | 122 * ^cpuA ^cpuC 123 * | -- free -- | --------- deferred frees -------- | ---- free ---- | 124 * 125 * 126 * In this example cpuA has the lowest sequence number and poll can 127 * advance rd seq. cpuB is not running and is considered to observe 128 * wr seq. 129 * 130 * Freed memory that is tagged with a sequence number between rd seq and 131 * wr seq can not be safely reclaimed because cpuA may hold a reference to 132 * it. Any other memory is guaranteed to be unreferenced. 133 * 134 * Any writer is free to advance wr seq at any time however it may busy 135 * poll in pathological cases. 136 */ 137 138 static uma_zone_t smr_shared_zone; 139 static uma_zone_t smr_zone; 140 141 #ifndef INVARIANTS 142 #define SMR_SEQ_INIT 1 /* All valid sequence numbers are odd. */ 143 #define SMR_SEQ_INCR 2 144 145 /* 146 * SMR_SEQ_MAX_DELTA is the maximum distance allowed between rd_seq and 147 * wr_seq. For the modular arithmetic to work a value of UNIT_MAX / 2 148 * would be possible but it is checked after we increment the wr_seq so 149 * a safety margin is left to prevent overflow. 150 * 151 * We will block until SMR_SEQ_MAX_ADVANCE sequence numbers have progressed 152 * to prevent integer wrapping. See smr_advance() for more details. 153 */ 154 #define SMR_SEQ_MAX_DELTA (UINT_MAX / 4) 155 #define SMR_SEQ_MAX_ADVANCE (SMR_SEQ_MAX_DELTA - 1024) 156 #else 157 /* We want to test the wrapping feature in invariants kernels. */ 158 #define SMR_SEQ_INCR (UINT_MAX / 10000) 159 #define SMR_SEQ_INIT (UINT_MAX - 100000) 160 /* Force extra polls to test the integer overflow detection. */ 161 #define SMR_SEQ_MAX_DELTA (1000) 162 #define SMR_SEQ_MAX_ADVANCE SMR_SEQ_MAX_DELTA / 2 163 #endif 164 165 /* 166 * Advance the write sequence and return the new value for use as the 167 * wait goal. This guarantees that any changes made by the calling 168 * thread prior to this call will be visible to all threads after 169 * rd_seq meets or exceeds the return value. 170 * 171 * This function may busy loop if the readers are roughly 1 billion 172 * sequence numbers behind the writers. 173 */ 174 smr_seq_t 175 smr_advance(smr_t smr) 176 { 177 smr_shared_t s; 178 smr_seq_t goal; 179 180 /* 181 * It is illegal to enter while in an smr section. 182 */ 183 KASSERT(curthread->td_critnest == 0, 184 ("smr_advance: Not allowed in a critical section.")); 185 186 /* 187 * Modifications not done in a smr section need to be visible 188 * before advancing the seq. 189 */ 190 atomic_thread_fence_rel(); 191 192 /* 193 * Increment the shared write sequence by 2. Since it is 194 * initialized to 1 this means the only valid values are 195 * odd and an observed value of 0 in a particular CPU means 196 * it is not currently in a read section. 197 */ 198 s = smr->c_shared; 199 goal = atomic_fetchadd_int(&s->s_wr_seq, SMR_SEQ_INCR) + SMR_SEQ_INCR; 200 201 /* 202 * Force a synchronization here if the goal is getting too 203 * far ahead of the read sequence number. This keeps the 204 * wrap detecting arithmetic working in pathological cases. 205 */ 206 if (goal - atomic_load_int(&s->s_rd_seq) >= SMR_SEQ_MAX_DELTA) 207 smr_wait(smr, goal - SMR_SEQ_MAX_ADVANCE); 208 209 return (goal); 210 } 211 212 /* 213 * Poll to determine whether all readers have observed the 'goal' write 214 * sequence number. 215 * 216 * If wait is true this will spin until the goal is met. 217 * 218 * This routine will updated the minimum observed read sequence number in 219 * s_rd_seq if it does a scan. It may not do a scan if another call has 220 * advanced s_rd_seq beyond the callers goal already. 221 * 222 * Returns true if the goal is met and false if not. 223 */ 224 bool 225 smr_poll(smr_t smr, smr_seq_t goal, bool wait) 226 { 227 smr_shared_t s; 228 smr_t c; 229 smr_seq_t s_wr_seq, s_rd_seq, rd_seq, c_seq; 230 int i; 231 bool success; 232 233 /* 234 * It is illegal to enter while in an smr section. 235 */ 236 KASSERT(!wait || curthread->td_critnest == 0, 237 ("smr_poll: Blocking not allowed in a critical section.")); 238 239 /* 240 * Use a critical section so that we can avoid ABA races 241 * caused by long preemption sleeps. 242 */ 243 success = true; 244 critical_enter(); 245 s = smr->c_shared; 246 247 /* 248 * Acquire barrier loads s_wr_seq after s_rd_seq so that we can not 249 * observe an updated read sequence that is larger than write. 250 */ 251 s_rd_seq = atomic_load_acq_int(&s->s_rd_seq); 252 s_wr_seq = smr_current(smr); 253 254 /* 255 * Detect whether the goal is valid and has already been observed. 256 * 257 * The goal must be in the range of s_wr_seq >= goal >= s_rd_seq for 258 * it to be valid. If it is not then the caller held on to it and 259 * the integer wrapped. If we wrapped back within range the caller 260 * will harmlessly scan. 261 * 262 * A valid goal must be greater than s_rd_seq or we have not verified 263 * that it has been observed and must fall through to polling. 264 */ 265 if (SMR_SEQ_GEQ(s_rd_seq, goal) || SMR_SEQ_LT(s_wr_seq, goal)) 266 goto out; 267 268 /* 269 * Loop until all cores have observed the goal sequence or have 270 * gone inactive. Keep track of the oldest sequence currently 271 * active as rd_seq. 272 */ 273 rd_seq = s_wr_seq; 274 CPU_FOREACH(i) { 275 c = zpcpu_get_cpu(smr, i); 276 c_seq = SMR_SEQ_INVALID; 277 for (;;) { 278 c_seq = atomic_load_int(&c->c_seq); 279 if (c_seq == SMR_SEQ_INVALID) 280 break; 281 282 /* 283 * There is a race described in smr.h:smr_enter that 284 * can lead to a stale seq value but not stale data 285 * access. If we find a value out of range here we 286 * pin it to the current min to prevent it from 287 * advancing until that stale section has expired. 288 * 289 * The race is created when a cpu loads the s_wr_seq 290 * value in a local register and then another thread 291 * advances s_wr_seq and calls smr_poll() which will 292 * oberve no value yet in c_seq and advance s_rd_seq 293 * up to s_wr_seq which is beyond the register 294 * cached value. This is only likely to happen on 295 * hypervisor or with a system management interrupt. 296 */ 297 if (SMR_SEQ_LT(c_seq, s_rd_seq)) 298 c_seq = s_rd_seq; 299 300 /* 301 * If the sequence number meets the goal we are 302 * done with this cpu. 303 */ 304 if (SMR_SEQ_GEQ(c_seq, goal)) 305 break; 306 307 /* 308 * If we're not waiting we will still scan the rest 309 * of the cpus and update s_rd_seq before returning 310 * an error. 311 */ 312 if (!wait) { 313 success = false; 314 break; 315 } 316 cpu_spinwait(); 317 } 318 319 /* 320 * Limit the minimum observed rd_seq whether we met the goal 321 * or not. 322 */ 323 if (c_seq != SMR_SEQ_INVALID && SMR_SEQ_GT(rd_seq, c_seq)) 324 rd_seq = c_seq; 325 } 326 327 /* 328 * Advance the rd_seq as long as we observed the most recent one. 329 */ 330 s_rd_seq = atomic_load_int(&s->s_rd_seq); 331 do { 332 if (SMR_SEQ_LEQ(rd_seq, s_rd_seq)) 333 break; 334 } while (atomic_fcmpset_int(&s->s_rd_seq, &s_rd_seq, rd_seq) == 0); 335 336 out: 337 critical_exit(); 338 339 return (success); 340 } 341 342 smr_t 343 smr_create(const char *name) 344 { 345 smr_t smr, c; 346 smr_shared_t s; 347 int i; 348 349 s = uma_zalloc(smr_shared_zone, M_WAITOK); 350 smr = uma_zalloc(smr_zone, M_WAITOK); 351 352 s->s_name = name; 353 s->s_rd_seq = s->s_wr_seq = SMR_SEQ_INIT; 354 355 /* Initialize all CPUS, not just those running. */ 356 for (i = 0; i <= mp_maxid; i++) { 357 c = zpcpu_get_cpu(smr, i); 358 c->c_seq = SMR_SEQ_INVALID; 359 c->c_shared = s; 360 } 361 atomic_thread_fence_seq_cst(); 362 363 return (smr); 364 } 365 366 void 367 smr_destroy(smr_t smr) 368 { 369 370 smr_synchronize(smr); 371 uma_zfree(smr_shared_zone, smr->c_shared); 372 uma_zfree(smr_zone, smr); 373 } 374 375 /* 376 * Initialize the UMA slab zone. 377 */ 378 void 379 smr_init(void) 380 { 381 382 smr_shared_zone = uma_zcreate("SMR SHARED", sizeof(struct smr_shared), 383 NULL, NULL, NULL, NULL, (CACHE_LINE_SIZE * 2) - 1, 0); 384 smr_zone = uma_zcreate("SMR CPU", sizeof(struct smr), 385 NULL, NULL, NULL, NULL, (CACHE_LINE_SIZE * 2) - 1, UMA_ZONE_PCPU); 386 } 387