1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2019,2020 Jeffrey Roberson <jeff@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice unmodified, this list of conditions, and the following 11 * disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/counter.h> 34 #include <sys/kernel.h> 35 #include <sys/limits.h> 36 #include <sys/proc.h> 37 #include <sys/smp.h> 38 #include <sys/smr.h> 39 #include <sys/sysctl.h> 40 41 #include <vm/uma.h> 42 43 /* 44 * This is a novel safe memory reclamation technique inspired by 45 * epoch based reclamation from Samy Al Bahra's concurrency kit which 46 * in turn was based on work described in: 47 * Fraser, K. 2004. Practical Lock-Freedom. PhD Thesis, University 48 * of Cambridge Computing Laboratory. 49 * And shares some similarities with: 50 * Wang, Stamler, Parmer. 2016 Parallel Sections: Scaling System-Level 51 * Data-Structures 52 * 53 * This is not an implementation of hazard pointers or related 54 * techniques. The term safe memory reclamation is used as a 55 * generic descriptor for algorithms that defer frees to avoid 56 * use-after-free errors with lockless datastructures. 57 * 58 * The basic approach is to maintain a monotonic write sequence 59 * number that is updated on some application defined granularity. 60 * Readers record the most recent write sequence number they have 61 * observed. A shared read sequence number records the lowest 62 * sequence number observed by any reader as of the last poll. Any 63 * write older than this value has been observed by all readers 64 * and memory can be reclaimed. Like Epoch we also detect idle 65 * readers by storing an invalid sequence number in the per-cpu 66 * state when the read section exits. Like Parsec we establish 67 * a global write clock that is used to mark memory on free. 68 * 69 * The write and read sequence numbers can be thought of as a two 70 * handed clock with readers always advancing towards writers. SMR 71 * maintains the invariant that all readers can safely access memory 72 * that was visible at the time they loaded their copy of the sequence 73 * number. Periodically the read sequence or hand is polled and 74 * advanced as far towards the write sequence as active readers allow. 75 * Memory which was freed between the old and new global read sequence 76 * number can now be reclaimed. When the system is idle the two hands 77 * meet and no deferred memory is outstanding. Readers never advance 78 * any sequence number, they only observe them. The shared read 79 * sequence number is consequently never higher than the write sequence. 80 * A stored sequence number that falls outside of this range has expired 81 * and needs no scan to reclaim. 82 * 83 * A notable distinction between this SMR and Epoch, qsbr, rcu, etc. is 84 * that advancing the sequence number is decoupled from detecting its 85 * observation. This results in a more granular assignment of sequence 86 * numbers even as read latencies prohibit all or some expiration. 87 * It also allows writers to advance the sequence number and save the 88 * poll for expiration until a later time when it is likely to 89 * complete without waiting. The batch granularity and free-to-use 90 * latency is dynamic and can be significantly smaller than in more 91 * strict systems. 92 * 93 * This mechanism is primarily intended to be used in coordination with 94 * UMA. By integrating with the allocator we avoid all of the callout 95 * queue machinery and are provided with an efficient way to batch 96 * sequence advancement and waiting. The allocator accumulates a full 97 * per-cpu cache of memory before advancing the sequence. It then 98 * delays waiting for this sequence to expire until the memory is 99 * selected for reuse. In this way we only increment the sequence 100 * value once for n=cache-size frees and the waits are done long 101 * after the sequence has been expired so they need only be verified 102 * to account for pathological conditions and to advance the read 103 * sequence. Tying the sequence number to the bucket size has the 104 * nice property that as the zone gets busier the buckets get larger 105 * and the sequence writes become fewer. If the coherency of advancing 106 * the write sequence number becomes too costly we can advance 107 * it for every N buckets in exchange for higher free-to-use 108 * latency and consequently higher memory consumption. 109 * 110 * If the read overhead of accessing the shared cacheline becomes 111 * especially burdensome an invariant TSC could be used in place of the 112 * sequence. The algorithm would then only need to maintain the minimum 113 * observed tsc. This would trade potential cache synchronization 114 * overhead for local serialization and cpu timestamp overhead. 115 */ 116 117 /* 118 * A simplified diagram: 119 * 120 * 0 UINT_MAX 121 * | -------------------- sequence number space -------------------- | 122 * ^ rd seq ^ wr seq 123 * | ----- valid sequence numbers ---- | 124 * ^cpuA ^cpuC 125 * | -- free -- | --------- deferred frees -------- | ---- free ---- | 126 * 127 * 128 * In this example cpuA has the lowest sequence number and poll can 129 * advance rd seq. cpuB is not running and is considered to observe 130 * wr seq. 131 * 132 * Freed memory that is tagged with a sequence number between rd seq and 133 * wr seq can not be safely reclaimed because cpuA may hold a reference to 134 * it. Any other memory is guaranteed to be unreferenced. 135 * 136 * Any writer is free to advance wr seq at any time however it may busy 137 * poll in pathological cases. 138 */ 139 140 static uma_zone_t smr_shared_zone; 141 static uma_zone_t smr_zone; 142 143 #ifndef INVARIANTS 144 #define SMR_SEQ_INIT 1 /* All valid sequence numbers are odd. */ 145 #define SMR_SEQ_INCR 2 146 147 /* 148 * SMR_SEQ_MAX_DELTA is the maximum distance allowed between rd_seq and 149 * wr_seq. For the modular arithmetic to work a value of UNIT_MAX / 2 150 * would be possible but it is checked after we increment the wr_seq so 151 * a safety margin is left to prevent overflow. 152 * 153 * We will block until SMR_SEQ_MAX_ADVANCE sequence numbers have progressed 154 * to prevent integer wrapping. See smr_advance() for more details. 155 */ 156 #define SMR_SEQ_MAX_DELTA (UINT_MAX / 4) 157 #define SMR_SEQ_MAX_ADVANCE (SMR_SEQ_MAX_DELTA - 1024) 158 #else 159 /* We want to test the wrapping feature in invariants kernels. */ 160 #define SMR_SEQ_INCR (UINT_MAX / 10000) 161 #define SMR_SEQ_INIT (UINT_MAX - 100000) 162 /* Force extra polls to test the integer overflow detection. */ 163 #define SMR_SEQ_MAX_DELTA (SMR_SEQ_INCR * 32) 164 #define SMR_SEQ_MAX_ADVANCE SMR_SEQ_MAX_DELTA / 2 165 #endif 166 167 static SYSCTL_NODE(_debug, OID_AUTO, smr, CTLFLAG_RW, NULL, "SMR Stats"); 168 static counter_u64_t advance = EARLY_COUNTER; 169 SYSCTL_COUNTER_U64(_debug_smr, OID_AUTO, advance, CTLFLAG_RD, &advance, ""); 170 static counter_u64_t advance_wait = EARLY_COUNTER; 171 SYSCTL_COUNTER_U64(_debug_smr, OID_AUTO, advance_wait, CTLFLAG_RD, &advance_wait, ""); 172 static counter_u64_t poll = EARLY_COUNTER; 173 SYSCTL_COUNTER_U64(_debug_smr, OID_AUTO, poll, CTLFLAG_RD, &poll, ""); 174 static counter_u64_t poll_scan = EARLY_COUNTER; 175 SYSCTL_COUNTER_U64(_debug_smr, OID_AUTO, poll_scan, CTLFLAG_RD, &poll_scan, ""); 176 177 178 /* 179 * Advance the write sequence and return the new value for use as the 180 * wait goal. This guarantees that any changes made by the calling 181 * thread prior to this call will be visible to all threads after 182 * rd_seq meets or exceeds the return value. 183 * 184 * This function may busy loop if the readers are roughly 1 billion 185 * sequence numbers behind the writers. 186 */ 187 smr_seq_t 188 smr_advance(smr_t smr) 189 { 190 smr_shared_t s; 191 smr_seq_t goal, s_rd_seq; 192 193 /* 194 * It is illegal to enter while in an smr section. 195 */ 196 SMR_ASSERT_NOT_ENTERED(smr); 197 198 /* 199 * Modifications not done in a smr section need to be visible 200 * before advancing the seq. 201 */ 202 atomic_thread_fence_rel(); 203 204 /* 205 * Load the current read seq before incrementing the goal so 206 * we are guaranteed it is always < goal. 207 */ 208 s = zpcpu_get(smr)->c_shared; 209 s_rd_seq = atomic_load_acq_int(&s->s_rd_seq); 210 211 /* 212 * Increment the shared write sequence by 2. Since it is 213 * initialized to 1 this means the only valid values are 214 * odd and an observed value of 0 in a particular CPU means 215 * it is not currently in a read section. 216 */ 217 goal = atomic_fetchadd_int(&s->s_wr_seq, SMR_SEQ_INCR) + SMR_SEQ_INCR; 218 counter_u64_add(advance, 1); 219 220 /* 221 * Force a synchronization here if the goal is getting too 222 * far ahead of the read sequence number. This keeps the 223 * wrap detecting arithmetic working in pathological cases. 224 */ 225 if (SMR_SEQ_DELTA(goal, s_rd_seq) >= SMR_SEQ_MAX_DELTA) { 226 counter_u64_add(advance_wait, 1); 227 smr_wait(smr, goal - SMR_SEQ_MAX_ADVANCE); 228 } 229 230 return (goal); 231 } 232 233 smr_seq_t 234 smr_advance_deferred(smr_t smr, int limit) 235 { 236 smr_seq_t goal; 237 smr_t csmr; 238 239 SMR_ASSERT_NOT_ENTERED(smr); 240 241 critical_enter(); 242 csmr = zpcpu_get(smr); 243 if (++csmr->c_deferred >= limit) { 244 goal = SMR_SEQ_INVALID; 245 csmr->c_deferred = 0; 246 } else 247 goal = smr_shared_current(csmr->c_shared) + SMR_SEQ_INCR; 248 critical_exit(); 249 if (goal != SMR_SEQ_INVALID) 250 return (goal); 251 252 return (smr_advance(smr)); 253 } 254 255 /* 256 * Poll to determine whether all readers have observed the 'goal' write 257 * sequence number. 258 * 259 * If wait is true this will spin until the goal is met. 260 * 261 * This routine will updated the minimum observed read sequence number in 262 * s_rd_seq if it does a scan. It may not do a scan if another call has 263 * advanced s_rd_seq beyond the callers goal already. 264 * 265 * Returns true if the goal is met and false if not. 266 */ 267 bool 268 smr_poll(smr_t smr, smr_seq_t goal, bool wait) 269 { 270 smr_shared_t s; 271 smr_t c; 272 smr_seq_t s_wr_seq, s_rd_seq, rd_seq, c_seq; 273 int i; 274 bool success; 275 276 /* 277 * It is illegal to enter while in an smr section. 278 */ 279 KASSERT(!wait || !SMR_ENTERED(smr), 280 ("smr_poll: Blocking not allowed in a SMR section.")); 281 282 /* 283 * Use a critical section so that we can avoid ABA races 284 * caused by long preemption sleeps. 285 */ 286 success = true; 287 critical_enter(); 288 s = zpcpu_get(smr)->c_shared; 289 counter_u64_add_protected(poll, 1); 290 291 /* 292 * Acquire barrier loads s_wr_seq after s_rd_seq so that we can not 293 * observe an updated read sequence that is larger than write. 294 */ 295 s_rd_seq = atomic_load_acq_int(&s->s_rd_seq); 296 297 /* 298 * wr_seq must be loaded prior to any c_seq value so that a stale 299 * c_seq can only reference time after this wr_seq. 300 */ 301 s_wr_seq = atomic_load_acq_int(&s->s_wr_seq); 302 303 /* 304 * This may have come from a deferred advance. Consider one 305 * increment past the current wr_seq valid and make sure we 306 * have advanced far enough to succeed. We simply add to avoid 307 * an additional fence. 308 */ 309 if (goal == s_wr_seq + SMR_SEQ_INCR) { 310 atomic_add_int(&s->s_wr_seq, SMR_SEQ_INCR); 311 s_wr_seq = goal; 312 } 313 314 /* 315 * Detect whether the goal is valid and has already been observed. 316 * 317 * The goal must be in the range of s_wr_seq >= goal >= s_rd_seq for 318 * it to be valid. If it is not then the caller held on to it and 319 * the integer wrapped. If we wrapped back within range the caller 320 * will harmlessly scan. 321 * 322 * A valid goal must be greater than s_rd_seq or we have not verified 323 * that it has been observed and must fall through to polling. 324 */ 325 if (SMR_SEQ_GEQ(s_rd_seq, goal) || SMR_SEQ_LT(s_wr_seq, goal)) 326 goto out; 327 328 /* 329 * Loop until all cores have observed the goal sequence or have 330 * gone inactive. Keep track of the oldest sequence currently 331 * active as rd_seq. 332 */ 333 counter_u64_add_protected(poll_scan, 1); 334 rd_seq = s_wr_seq; 335 CPU_FOREACH(i) { 336 c = zpcpu_get_cpu(smr, i); 337 c_seq = SMR_SEQ_INVALID; 338 for (;;) { 339 c_seq = atomic_load_int(&c->c_seq); 340 if (c_seq == SMR_SEQ_INVALID) 341 break; 342 343 /* 344 * There is a race described in smr.h:smr_enter that 345 * can lead to a stale seq value but not stale data 346 * access. If we find a value out of range here we 347 * pin it to the current min to prevent it from 348 * advancing until that stale section has expired. 349 * 350 * The race is created when a cpu loads the s_wr_seq 351 * value in a local register and then another thread 352 * advances s_wr_seq and calls smr_poll() which will 353 * oberve no value yet in c_seq and advance s_rd_seq 354 * up to s_wr_seq which is beyond the register 355 * cached value. This is only likely to happen on 356 * hypervisor or with a system management interrupt. 357 */ 358 if (SMR_SEQ_LT(c_seq, s_rd_seq)) 359 c_seq = s_rd_seq; 360 361 /* 362 * If the sequence number meets the goal we are 363 * done with this cpu. 364 */ 365 if (SMR_SEQ_GEQ(c_seq, goal)) 366 break; 367 368 /* 369 * If we're not waiting we will still scan the rest 370 * of the cpus and update s_rd_seq before returning 371 * an error. 372 */ 373 if (!wait) { 374 success = false; 375 break; 376 } 377 cpu_spinwait(); 378 } 379 380 /* 381 * Limit the minimum observed rd_seq whether we met the goal 382 * or not. 383 */ 384 if (c_seq != SMR_SEQ_INVALID && SMR_SEQ_GT(rd_seq, c_seq)) 385 rd_seq = c_seq; 386 } 387 388 /* 389 * Advance the rd_seq as long as we observed the most recent one. 390 */ 391 s_rd_seq = atomic_load_int(&s->s_rd_seq); 392 do { 393 if (SMR_SEQ_LEQ(rd_seq, s_rd_seq)) 394 goto out; 395 } while (atomic_fcmpset_int(&s->s_rd_seq, &s_rd_seq, rd_seq) == 0); 396 397 out: 398 critical_exit(); 399 400 /* 401 * Serialize with smr_advance()/smr_exit(). The caller is now free 402 * to modify memory as expected. 403 */ 404 atomic_thread_fence_acq(); 405 406 return (success); 407 } 408 409 smr_t 410 smr_create(const char *name) 411 { 412 smr_t smr, c; 413 smr_shared_t s; 414 int i; 415 416 s = uma_zalloc(smr_shared_zone, M_WAITOK); 417 smr = uma_zalloc_pcpu(smr_zone, M_WAITOK); 418 419 s->s_name = name; 420 s->s_rd_seq = s->s_wr_seq = SMR_SEQ_INIT; 421 422 /* Initialize all CPUS, not just those running. */ 423 for (i = 0; i <= mp_maxid; i++) { 424 c = zpcpu_get_cpu(smr, i); 425 c->c_seq = SMR_SEQ_INVALID; 426 c->c_shared = s; 427 } 428 atomic_thread_fence_seq_cst(); 429 430 return (smr); 431 } 432 433 void 434 smr_destroy(smr_t smr) 435 { 436 437 smr_synchronize(smr); 438 uma_zfree(smr_shared_zone, smr->c_shared); 439 uma_zfree_pcpu(smr_zone, smr); 440 } 441 442 /* 443 * Initialize the UMA slab zone. 444 */ 445 void 446 smr_init(void) 447 { 448 449 smr_shared_zone = uma_zcreate("SMR SHARED", sizeof(struct smr_shared), 450 NULL, NULL, NULL, NULL, (CACHE_LINE_SIZE * 2) - 1, 0); 451 smr_zone = uma_zcreate("SMR CPU", sizeof(struct smr), 452 NULL, NULL, NULL, NULL, (CACHE_LINE_SIZE * 2) - 1, UMA_ZONE_PCPU); 453 } 454 455 static void 456 smr_init_counters(void *unused) 457 { 458 459 advance = counter_u64_alloc(M_WAITOK); 460 advance_wait = counter_u64_alloc(M_WAITOK); 461 poll = counter_u64_alloc(M_WAITOK); 462 poll_scan = counter_u64_alloc(M_WAITOK); 463 } 464 SYSINIT(smr_counters, SI_SUB_CPU, SI_ORDER_ANY, smr_init_counters, NULL); 465