1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2019,2020 Jeffrey Roberson <jeff@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice unmodified, this list of conditions, and the following 11 * disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/counter.h> 34 #include <sys/kernel.h> 35 #include <sys/limits.h> 36 #include <sys/proc.h> 37 #include <sys/smp.h> 38 #include <sys/smr.h> 39 #include <sys/sysctl.h> 40 41 #include <vm/uma.h> 42 43 /* 44 * Global Unbounded Sequences (GUS) 45 * 46 * This is a novel safe memory reclamation technique inspired by 47 * epoch based reclamation from Samy Al Bahra's concurrency kit which 48 * in turn was based on work described in: 49 * Fraser, K. 2004. Practical Lock-Freedom. PhD Thesis, University 50 * of Cambridge Computing Laboratory. 51 * And shares some similarities with: 52 * Wang, Stamler, Parmer. 2016 Parallel Sections: Scaling System-Level 53 * Data-Structures 54 * 55 * This is not an implementation of hazard pointers or related 56 * techniques. The term safe memory reclamation is used as a 57 * generic descriptor for algorithms that defer frees to avoid 58 * use-after-free errors with lockless datastructures or as 59 * a mechanism to detect quiescence for writer synchronization. 60 * 61 * The basic approach is to maintain a monotonic write sequence 62 * number that is updated on some application defined granularity. 63 * Readers record the most recent write sequence number they have 64 * observed. A shared read sequence number records the lowest 65 * sequence number observed by any reader as of the last poll. Any 66 * write older than this value has been observed by all readers 67 * and memory can be reclaimed. Like Epoch we also detect idle 68 * readers by storing an invalid sequence number in the per-cpu 69 * state when the read section exits. Like Parsec we establish 70 * a global write clock that is used to mark memory on free. 71 * 72 * The write and read sequence numbers can be thought of as a two 73 * handed clock with readers always advancing towards writers. GUS 74 * maintains the invariant that all readers can safely access memory 75 * that was visible at the time they loaded their copy of the sequence 76 * number. Periodically the read sequence or hand is polled and 77 * advanced as far towards the write sequence as active readers allow. 78 * Memory which was freed between the old and new global read sequence 79 * number can now be reclaimed. When the system is idle the two hands 80 * meet and no deferred memory is outstanding. Readers never advance 81 * any sequence number, they only observe them. The shared read 82 * sequence number is consequently never higher than the write sequence. 83 * A stored sequence number that falls outside of this range has expired 84 * and needs no scan to reclaim. 85 * 86 * A notable distinction between GUS and Epoch, qsbr, rcu, etc. is 87 * that advancing the sequence number is decoupled from detecting its 88 * observation. That is to say, the delta between read and write 89 * sequence numbers is not bound. This can be thought of as a more 90 * generalized form of epoch which requires them at most one step 91 * apart. This results in a more granular assignment of sequence 92 * numbers even as read latencies prohibit all or some expiration. 93 * It also allows writers to advance the sequence number and save the 94 * poll for expiration until a later time when it is likely to 95 * complete without waiting. The batch granularity and free-to-use 96 * latency is dynamic and can be significantly smaller than in more 97 * strict systems. 98 * 99 * This mechanism is primarily intended to be used in coordination with 100 * UMA. By integrating with the allocator we avoid all of the callout 101 * queue machinery and are provided with an efficient way to batch 102 * sequence advancement and waiting. The allocator accumulates a full 103 * per-cpu cache of memory before advancing the sequence. It then 104 * delays waiting for this sequence to expire until the memory is 105 * selected for reuse. In this way we only increment the sequence 106 * value once for n=cache-size frees and the waits are done long 107 * after the sequence has been expired so they need only be verified 108 * to account for pathological conditions and to advance the read 109 * sequence. Tying the sequence number to the bucket size has the 110 * nice property that as the zone gets busier the buckets get larger 111 * and the sequence writes become fewer. If the coherency of advancing 112 * the write sequence number becomes too costly we can advance 113 * it for every N buckets in exchange for higher free-to-use 114 * latency and consequently higher memory consumption. 115 * 116 * If the read overhead of accessing the shared cacheline becomes 117 * especially burdensome an invariant TSC could be used in place of the 118 * sequence. The algorithm would then only need to maintain the minimum 119 * observed tsc. This would trade potential cache synchronization 120 * overhead for local serialization and cpu timestamp overhead. 121 */ 122 123 /* 124 * A simplified diagram: 125 * 126 * 0 UINT_MAX 127 * | -------------------- sequence number space -------------------- | 128 * ^ rd seq ^ wr seq 129 * | ----- valid sequence numbers ---- | 130 * ^cpuA ^cpuC 131 * | -- free -- | --------- deferred frees -------- | ---- free ---- | 132 * 133 * 134 * In this example cpuA has the lowest sequence number and poll can 135 * advance rd seq. cpuB is not running and is considered to observe 136 * wr seq. 137 * 138 * Freed memory that is tagged with a sequence number between rd seq and 139 * wr seq can not be safely reclaimed because cpuA may hold a reference to 140 * it. Any other memory is guaranteed to be unreferenced. 141 * 142 * Any writer is free to advance wr seq at any time however it may busy 143 * poll in pathological cases. 144 */ 145 146 static uma_zone_t smr_shared_zone; 147 static uma_zone_t smr_zone; 148 149 #ifndef INVARIANTS 150 #define SMR_SEQ_INIT 1 /* All valid sequence numbers are odd. */ 151 #define SMR_SEQ_INCR 2 152 153 /* 154 * SMR_SEQ_MAX_DELTA is the maximum distance allowed between rd_seq and 155 * wr_seq. For the modular arithmetic to work a value of UNIT_MAX / 2 156 * would be possible but it is checked after we increment the wr_seq so 157 * a safety margin is left to prevent overflow. 158 * 159 * We will block until SMR_SEQ_MAX_ADVANCE sequence numbers have progressed 160 * to prevent integer wrapping. See smr_advance() for more details. 161 */ 162 #define SMR_SEQ_MAX_DELTA (UINT_MAX / 4) 163 #define SMR_SEQ_MAX_ADVANCE (SMR_SEQ_MAX_DELTA - 1024) 164 #else 165 /* We want to test the wrapping feature in invariants kernels. */ 166 #define SMR_SEQ_INCR (UINT_MAX / 10000) 167 #define SMR_SEQ_INIT (UINT_MAX - 100000) 168 /* Force extra polls to test the integer overflow detection. */ 169 #define SMR_SEQ_MAX_DELTA (SMR_SEQ_INCR * 32) 170 #define SMR_SEQ_MAX_ADVANCE SMR_SEQ_MAX_DELTA / 2 171 #endif 172 173 /* 174 * The grace period for lazy (tick based) SMR. 175 * 176 * Hardclock is responsible for advancing ticks on a single CPU while every 177 * CPU receives a regular clock interrupt. The clock interrupts are flushing 178 * the store buffers and any speculative loads that may violate our invariants. 179 * Because these interrupts are not synchronized we must wait one additional 180 * tick in the future to be certain that all processors have had their state 181 * synchronized by an interrupt. 182 * 183 * This assumes that the clock interrupt will only be delayed by other causes 184 * that will flush the store buffer or prevent access to the section protected 185 * data. For example, an idle processor, or an system management interrupt, 186 * or a vm exit. 187 * 188 * We must wait one additional tick if we are around the wrap condition 189 * because the write seq will move forward by two with one interrupt. 190 */ 191 #define SMR_LAZY_GRACE 2 192 #define SMR_LAZY_GRACE_MAX (SMR_LAZY_GRACE + 1) 193 194 /* 195 * The maximum sequence number ahead of wr_seq that may still be valid. The 196 * sequence may not be advanced on write for lazy or deferred SMRs. In this 197 * case poll needs to attempt to forward the sequence number if the goal is 198 * within wr_seq + SMR_SEQ_ADVANCE. 199 */ 200 #define SMR_SEQ_ADVANCE MAX(SMR_SEQ_INCR, SMR_LAZY_GRACE_MAX) 201 202 static SYSCTL_NODE(_debug, OID_AUTO, smr, CTLFLAG_RW, NULL, "SMR Stats"); 203 static counter_u64_t advance = EARLY_COUNTER; 204 SYSCTL_COUNTER_U64(_debug_smr, OID_AUTO, advance, CTLFLAG_RW, &advance, ""); 205 static counter_u64_t advance_wait = EARLY_COUNTER; 206 SYSCTL_COUNTER_U64(_debug_smr, OID_AUTO, advance_wait, CTLFLAG_RW, &advance_wait, ""); 207 static counter_u64_t poll = EARLY_COUNTER; 208 SYSCTL_COUNTER_U64(_debug_smr, OID_AUTO, poll, CTLFLAG_RW, &poll, ""); 209 static counter_u64_t poll_scan = EARLY_COUNTER; 210 SYSCTL_COUNTER_U64(_debug_smr, OID_AUTO, poll_scan, CTLFLAG_RW, &poll_scan, ""); 211 static counter_u64_t poll_fail = EARLY_COUNTER; 212 SYSCTL_COUNTER_U64(_debug_smr, OID_AUTO, poll_fail, CTLFLAG_RW, &poll_fail, ""); 213 214 /* 215 * Advance a lazy write sequence number. These move forward at the rate of 216 * ticks. Grace is two ticks in the future. lazy write sequence numbers can 217 * be even but not SMR_SEQ_INVALID so we pause time for a tick when we wrap. 218 * 219 * This returns the _current_ write sequence number. The lazy goal sequence 220 * number is SMR_LAZY_GRACE ticks ahead. 221 */ 222 static smr_seq_t 223 smr_lazy_advance(smr_t smr, smr_shared_t s) 224 { 225 smr_seq_t s_rd_seq, s_wr_seq, goal; 226 int t; 227 228 CRITICAL_ASSERT(curthread); 229 230 /* 231 * Load s_wr_seq prior to ticks to ensure that the thread that 232 * observes the largest value wins. 233 */ 234 s_wr_seq = atomic_load_acq_int(&s->s_wr_seq); 235 236 /* 237 * We must not allow a zero tick value. We go back in time one tick 238 * and advance the grace period forward one tick around zero. 239 */ 240 t = ticks; 241 if (t == SMR_SEQ_INVALID) 242 t--; 243 244 /* 245 * The most probable condition that the update already took place. 246 */ 247 if (__predict_true(t == s_wr_seq)) 248 goto out; 249 250 /* 251 * After long idle periods the read sequence may fall too far 252 * behind write. Prevent poll from ever seeing this condition 253 * by updating the stale rd_seq. This assumes that there can 254 * be no valid section 2bn ticks old. The rd_seq update must 255 * be visible before wr_seq to avoid races with other advance 256 * callers. 257 */ 258 s_rd_seq = atomic_load_int(&s->s_rd_seq); 259 if (SMR_SEQ_GT(s_rd_seq, t)) 260 atomic_cmpset_rel_int(&s->s_rd_seq, s_rd_seq, t); 261 262 /* 263 * Release to synchronize with the wr_seq load above. Ignore 264 * cmpset failures from simultaneous updates. 265 */ 266 atomic_cmpset_rel_int(&s->s_wr_seq, s_wr_seq, t); 267 counter_u64_add(advance, 1); 268 /* If we lost either update race another thread did it. */ 269 s_wr_seq = t; 270 out: 271 goal = s_wr_seq + SMR_LAZY_GRACE; 272 /* Skip over the SMR_SEQ_INVALID tick. */ 273 if (goal < SMR_LAZY_GRACE) 274 goal++; 275 return (goal); 276 } 277 278 /* 279 * Increment the shared write sequence by 2. Since it is initialized 280 * to 1 this means the only valid values are odd and an observed value 281 * of 0 in a particular CPU means it is not currently in a read section. 282 */ 283 static smr_seq_t 284 smr_shared_advance(smr_shared_t s) 285 { 286 287 return (atomic_fetchadd_int(&s->s_wr_seq, SMR_SEQ_INCR) + SMR_SEQ_INCR); 288 } 289 290 /* 291 * Advance the write sequence number for a normal smr section. If the 292 * write sequence is too far behind the read sequence we have to poll 293 * to advance rd_seq and prevent undetectable wraps. 294 */ 295 static smr_seq_t 296 smr_default_advance(smr_t smr, smr_shared_t s) 297 { 298 smr_seq_t goal, s_rd_seq; 299 300 CRITICAL_ASSERT(curthread); 301 KASSERT((zpcpu_get(smr)->c_flags & SMR_LAZY) == 0, 302 ("smr_default_advance: called with lazy smr.")); 303 304 /* 305 * Load the current read seq before incrementing the goal so 306 * we are guaranteed it is always < goal. 307 */ 308 s_rd_seq = atomic_load_acq_int(&s->s_rd_seq); 309 goal = smr_shared_advance(s); 310 311 /* 312 * Force a synchronization here if the goal is getting too 313 * far ahead of the read sequence number. This keeps the 314 * wrap detecting arithmetic working in pathological cases. 315 */ 316 if (SMR_SEQ_DELTA(goal, s_rd_seq) >= SMR_SEQ_MAX_DELTA) { 317 counter_u64_add(advance_wait, 1); 318 smr_wait(smr, goal - SMR_SEQ_MAX_ADVANCE); 319 } 320 counter_u64_add(advance, 1); 321 322 return (goal); 323 } 324 325 /* 326 * Deferred SMRs conditionally update s_wr_seq based on an 327 * cpu local interval count. 328 */ 329 static smr_seq_t 330 smr_deferred_advance(smr_t smr, smr_shared_t s, smr_t self) 331 { 332 333 if (++self->c_deferred < self->c_limit) 334 return (smr_shared_current(s) + SMR_SEQ_INCR); 335 self->c_deferred = 0; 336 return (smr_default_advance(smr, s)); 337 } 338 339 /* 340 * Advance the write sequence and return the value for use as the 341 * wait goal. This guarantees that any changes made by the calling 342 * thread prior to this call will be visible to all threads after 343 * rd_seq meets or exceeds the return value. 344 * 345 * This function may busy loop if the readers are roughly 1 billion 346 * sequence numbers behind the writers. 347 * 348 * Lazy SMRs will not busy loop and the wrap happens every 49.6 days 349 * at 1khz and 119 hours at 10khz. Readers can block for no longer 350 * than half of this for SMR_SEQ_ macros to continue working. 351 */ 352 smr_seq_t 353 smr_advance(smr_t smr) 354 { 355 smr_t self; 356 smr_shared_t s; 357 smr_seq_t goal; 358 int flags; 359 360 /* 361 * It is illegal to enter while in an smr section. 362 */ 363 SMR_ASSERT_NOT_ENTERED(smr); 364 365 /* 366 * Modifications not done in a smr section need to be visible 367 * before advancing the seq. 368 */ 369 atomic_thread_fence_rel(); 370 371 critical_enter(); 372 /* Try to touch the line once. */ 373 self = zpcpu_get(smr); 374 s = self->c_shared; 375 flags = self->c_flags; 376 goal = SMR_SEQ_INVALID; 377 if ((flags & (SMR_LAZY | SMR_DEFERRED)) == 0) 378 goal = smr_default_advance(smr, s); 379 else if ((flags & SMR_LAZY) != 0) 380 goal = smr_lazy_advance(smr, s); 381 else if ((flags & SMR_DEFERRED) != 0) 382 goal = smr_deferred_advance(smr, s, self); 383 critical_exit(); 384 385 return (goal); 386 } 387 388 /* 389 * Poll to determine the currently observed sequence number on a cpu 390 * and spinwait if the 'wait' argument is true. 391 */ 392 static smr_seq_t 393 smr_poll_cpu(smr_t c, smr_seq_t s_rd_seq, smr_seq_t goal, bool wait) 394 { 395 smr_seq_t c_seq; 396 397 c_seq = SMR_SEQ_INVALID; 398 for (;;) { 399 c_seq = atomic_load_int(&c->c_seq); 400 if (c_seq == SMR_SEQ_INVALID) 401 break; 402 403 /* 404 * There is a race described in smr.h:smr_enter that 405 * can lead to a stale seq value but not stale data 406 * access. If we find a value out of range here we 407 * pin it to the current min to prevent it from 408 * advancing until that stale section has expired. 409 * 410 * The race is created when a cpu loads the s_wr_seq 411 * value in a local register and then another thread 412 * advances s_wr_seq and calls smr_poll() which will 413 * oberve no value yet in c_seq and advance s_rd_seq 414 * up to s_wr_seq which is beyond the register 415 * cached value. This is only likely to happen on 416 * hypervisor or with a system management interrupt. 417 */ 418 if (SMR_SEQ_LT(c_seq, s_rd_seq)) 419 c_seq = s_rd_seq; 420 421 /* 422 * If the sequence number meets the goal we are done 423 * with this cpu. 424 */ 425 if (SMR_SEQ_LEQ(goal, c_seq)) 426 break; 427 428 if (!wait) 429 break; 430 cpu_spinwait(); 431 } 432 433 return (c_seq); 434 } 435 436 /* 437 * Loop until all cores have observed the goal sequence or have 438 * gone inactive. Returns the oldest sequence currently active; 439 * 440 * This function assumes a snapshot of sequence values has 441 * been obtained and validated by smr_poll(). 442 */ 443 static smr_seq_t 444 smr_poll_scan(smr_t smr, smr_shared_t s, smr_seq_t s_rd_seq, 445 smr_seq_t s_wr_seq, smr_seq_t goal, bool wait) 446 { 447 smr_seq_t rd_seq, c_seq; 448 int i; 449 450 CRITICAL_ASSERT(curthread); 451 counter_u64_add_protected(poll_scan, 1); 452 453 /* 454 * The read sequence can be no larger than the write sequence at 455 * the start of the poll. 456 */ 457 rd_seq = s_wr_seq; 458 CPU_FOREACH(i) { 459 /* 460 * Query the active sequence on this cpu. If we're not 461 * waiting and we don't meet the goal we will still scan 462 * the rest of the cpus to update s_rd_seq before returning 463 * failure. 464 */ 465 c_seq = smr_poll_cpu(zpcpu_get_cpu(smr, i), s_rd_seq, goal, 466 wait); 467 468 /* 469 * Limit the minimum observed rd_seq whether we met the goal 470 * or not. 471 */ 472 if (c_seq != SMR_SEQ_INVALID) 473 rd_seq = SMR_SEQ_MIN(rd_seq, c_seq); 474 } 475 476 /* 477 * Advance the rd_seq as long as we observed a more recent value. 478 */ 479 s_rd_seq = atomic_load_int(&s->s_rd_seq); 480 if (SMR_SEQ_GEQ(rd_seq, s_rd_seq)) { 481 atomic_cmpset_int(&s->s_rd_seq, s_rd_seq, rd_seq); 482 s_rd_seq = rd_seq; 483 } 484 485 return (s_rd_seq); 486 } 487 488 /* 489 * Poll to determine whether all readers have observed the 'goal' write 490 * sequence number. 491 * 492 * If wait is true this will spin until the goal is met. 493 * 494 * This routine will updated the minimum observed read sequence number in 495 * s_rd_seq if it does a scan. It may not do a scan if another call has 496 * advanced s_rd_seq beyond the callers goal already. 497 * 498 * Returns true if the goal is met and false if not. 499 */ 500 bool 501 smr_poll(smr_t smr, smr_seq_t goal, bool wait) 502 { 503 smr_shared_t s; 504 smr_t self; 505 smr_seq_t s_wr_seq, s_rd_seq; 506 smr_delta_t delta; 507 int flags; 508 bool success; 509 510 /* 511 * It is illegal to enter while in an smr section. 512 */ 513 KASSERT(!wait || !SMR_ENTERED(smr), 514 ("smr_poll: Blocking not allowed in a SMR section.")); 515 KASSERT(!wait || (zpcpu_get(smr)->c_flags & SMR_LAZY) == 0, 516 ("smr_poll: Blocking not allowed on lazy smrs.")); 517 518 /* 519 * Use a critical section so that we can avoid ABA races 520 * caused by long preemption sleeps. 521 */ 522 success = true; 523 critical_enter(); 524 /* Attempt to load from self only once. */ 525 self = zpcpu_get(smr); 526 s = self->c_shared; 527 flags = self->c_flags; 528 counter_u64_add_protected(poll, 1); 529 530 /* 531 * Conditionally advance the lazy write clock on any writer 532 * activity. This may reset s_rd_seq. 533 */ 534 if ((flags & SMR_LAZY) != 0) 535 smr_lazy_advance(smr, s); 536 537 /* 538 * Acquire barrier loads s_wr_seq after s_rd_seq so that we can not 539 * observe an updated read sequence that is larger than write. 540 */ 541 s_rd_seq = atomic_load_acq_int(&s->s_rd_seq); 542 543 /* 544 * If we have already observed the sequence number we can immediately 545 * return success. Most polls should meet this criterion. 546 */ 547 if (SMR_SEQ_LEQ(goal, s_rd_seq)) 548 goto out; 549 550 /* 551 * wr_seq must be loaded prior to any c_seq value so that a 552 * stale c_seq can only reference time after this wr_seq. 553 */ 554 s_wr_seq = atomic_load_acq_int(&s->s_wr_seq); 555 556 /* 557 * This is the distance from s_wr_seq to goal. Positive values 558 * are in the future. 559 */ 560 delta = SMR_SEQ_DELTA(goal, s_wr_seq); 561 562 /* 563 * Detect a stale wr_seq. 564 * 565 * This goal may have come from a deferred advance or a lazy 566 * smr. If we are not blocking we can not succeed but the 567 * sequence number is valid. 568 */ 569 if (delta > 0 && delta <= SMR_SEQ_MAX_ADVANCE && 570 (flags & (SMR_LAZY | SMR_DEFERRED)) != 0) { 571 if (!wait) { 572 success = false; 573 goto out; 574 } 575 /* LAZY is always !wait. */ 576 s_wr_seq = smr_shared_advance(s); 577 delta = 0; 578 } 579 580 /* 581 * Detect an invalid goal. 582 * 583 * The goal must be in the range of s_wr_seq >= goal >= s_rd_seq for 584 * it to be valid. If it is not then the caller held on to it and 585 * the integer wrapped. If we wrapped back within range the caller 586 * will harmlessly scan. 587 */ 588 if (delta > 0) 589 goto out; 590 591 /* Determine the lowest visible sequence number. */ 592 s_rd_seq = smr_poll_scan(smr, s, s_rd_seq, s_wr_seq, goal, wait); 593 success = SMR_SEQ_LEQ(goal, s_rd_seq); 594 out: 595 if (!success) 596 counter_u64_add_protected(poll_fail, 1); 597 critical_exit(); 598 599 /* 600 * Serialize with smr_advance()/smr_exit(). The caller is now free 601 * to modify memory as expected. 602 */ 603 atomic_thread_fence_acq(); 604 605 return (success); 606 } 607 608 smr_t 609 smr_create(const char *name, int limit, int flags) 610 { 611 smr_t smr, c; 612 smr_shared_t s; 613 int i; 614 615 s = uma_zalloc(smr_shared_zone, M_WAITOK); 616 smr = uma_zalloc_pcpu(smr_zone, M_WAITOK); 617 618 s->s_name = name; 619 if ((flags & SMR_LAZY) == 0) 620 s->s_rd_seq = s->s_wr_seq = SMR_SEQ_INIT; 621 else 622 s->s_rd_seq = s->s_wr_seq = ticks; 623 624 /* Initialize all CPUS, not just those running. */ 625 for (i = 0; i <= mp_maxid; i++) { 626 c = zpcpu_get_cpu(smr, i); 627 c->c_seq = SMR_SEQ_INVALID; 628 c->c_shared = s; 629 c->c_deferred = 0; 630 c->c_limit = limit; 631 c->c_flags = flags; 632 } 633 atomic_thread_fence_seq_cst(); 634 635 return (smr); 636 } 637 638 void 639 smr_destroy(smr_t smr) 640 { 641 642 smr_synchronize(smr); 643 uma_zfree(smr_shared_zone, smr->c_shared); 644 uma_zfree_pcpu(smr_zone, smr); 645 } 646 647 /* 648 * Initialize the UMA slab zone. 649 */ 650 void 651 smr_init(void) 652 { 653 654 smr_shared_zone = uma_zcreate("SMR SHARED", sizeof(struct smr_shared), 655 NULL, NULL, NULL, NULL, (CACHE_LINE_SIZE * 2) - 1, 0); 656 smr_zone = uma_zcreate("SMR CPU", sizeof(struct smr), 657 NULL, NULL, NULL, NULL, (CACHE_LINE_SIZE * 2) - 1, UMA_ZONE_PCPU); 658 } 659 660 static void 661 smr_init_counters(void *unused) 662 { 663 664 advance = counter_u64_alloc(M_WAITOK); 665 advance_wait = counter_u64_alloc(M_WAITOK); 666 poll = counter_u64_alloc(M_WAITOK); 667 poll_scan = counter_u64_alloc(M_WAITOK); 668 poll_fail = counter_u64_alloc(M_WAITOK); 669 } 670 SYSINIT(smr_counters, SI_SUB_CPU, SI_ORDER_ANY, smr_init_counters, NULL); 671