1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2019,2020 Jeffrey Roberson <jeff@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice unmodified, this list of conditions, and the following 11 * disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/counter.h> 34 #include <sys/kernel.h> 35 #include <sys/limits.h> 36 #include <sys/proc.h> 37 #include <sys/smp.h> 38 #include <sys/smr.h> 39 #include <sys/sysctl.h> 40 41 #include <vm/uma.h> 42 43 /* 44 * Global Unbounded Sequences (GUS) 45 * 46 * This is a novel safe memory reclamation technique inspired by 47 * epoch based reclamation from Samy Al Bahra's concurrency kit which 48 * in turn was based on work described in: 49 * Fraser, K. 2004. Practical Lock-Freedom. PhD Thesis, University 50 * of Cambridge Computing Laboratory. 51 * And shares some similarities with: 52 * Wang, Stamler, Parmer. 2016 Parallel Sections: Scaling System-Level 53 * Data-Structures 54 * 55 * This is not an implementation of hazard pointers or related 56 * techniques. The term safe memory reclamation is used as a 57 * generic descriptor for algorithms that defer frees to avoid 58 * use-after-free errors with lockless datastructures or as 59 * a mechanism to detect quiescence for writer synchronization. 60 * 61 * The basic approach is to maintain a monotonic write sequence 62 * number that is updated on some application defined granularity. 63 * Readers record the most recent write sequence number they have 64 * observed. A shared read sequence number records the lowest 65 * sequence number observed by any reader as of the last poll. Any 66 * write older than this value has been observed by all readers 67 * and memory can be reclaimed. Like Epoch we also detect idle 68 * readers by storing an invalid sequence number in the per-cpu 69 * state when the read section exits. Like Parsec we establish 70 * a global write clock that is used to mark memory on free. 71 * 72 * The write and read sequence numbers can be thought of as a two 73 * handed clock with readers always advancing towards writers. GUS 74 * maintains the invariant that all readers can safely access memory 75 * that was visible at the time they loaded their copy of the sequence 76 * number. Periodically the read sequence or hand is polled and 77 * advanced as far towards the write sequence as active readers allow. 78 * Memory which was freed between the old and new global read sequence 79 * number can now be reclaimed. When the system is idle the two hands 80 * meet and no deferred memory is outstanding. Readers never advance 81 * any sequence number, they only observe them. The shared read 82 * sequence number is consequently never higher than the write sequence. 83 * A stored sequence number that falls outside of this range has expired 84 * and needs no scan to reclaim. 85 * 86 * A notable distinction between GUS and Epoch, qsbr, rcu, etc. is 87 * that advancing the sequence number is decoupled from detecting its 88 * observation. That is to say, the delta between read and write 89 * sequence numbers is not bound. This can be thought of as a more 90 * generalized form of epoch which requires them at most one step 91 * apart. This results in a more granular assignment of sequence 92 * numbers even as read latencies prohibit all or some expiration. 93 * It also allows writers to advance the sequence number and save the 94 * poll for expiration until a later time when it is likely to 95 * complete without waiting. The batch granularity and free-to-use 96 * latency is dynamic and can be significantly smaller than in more 97 * strict systems. 98 * 99 * This mechanism is primarily intended to be used in coordination with 100 * UMA. By integrating with the allocator we avoid all of the callout 101 * queue machinery and are provided with an efficient way to batch 102 * sequence advancement and waiting. The allocator accumulates a full 103 * per-cpu cache of memory before advancing the sequence. It then 104 * delays waiting for this sequence to expire until the memory is 105 * selected for reuse. In this way we only increment the sequence 106 * value once for n=cache-size frees and the waits are done long 107 * after the sequence has been expired so they need only be verified 108 * to account for pathological conditions and to advance the read 109 * sequence. Tying the sequence number to the bucket size has the 110 * nice property that as the zone gets busier the buckets get larger 111 * and the sequence writes become fewer. If the coherency of advancing 112 * the write sequence number becomes too costly we can advance 113 * it for every N buckets in exchange for higher free-to-use 114 * latency and consequently higher memory consumption. 115 * 116 * If the read overhead of accessing the shared cacheline becomes 117 * especially burdensome an invariant TSC could be used in place of the 118 * sequence. The algorithm would then only need to maintain the minimum 119 * observed tsc. This would trade potential cache synchronization 120 * overhead for local serialization and cpu timestamp overhead. 121 */ 122 123 /* 124 * A simplified diagram: 125 * 126 * 0 UINT_MAX 127 * | -------------------- sequence number space -------------------- | 128 * ^ rd seq ^ wr seq 129 * | ----- valid sequence numbers ---- | 130 * ^cpuA ^cpuC 131 * | -- free -- | --------- deferred frees -------- | ---- free ---- | 132 * 133 * 134 * In this example cpuA has the lowest sequence number and poll can 135 * advance rd seq. cpuB is not running and is considered to observe 136 * wr seq. 137 * 138 * Freed memory that is tagged with a sequence number between rd seq and 139 * wr seq can not be safely reclaimed because cpuA may hold a reference to 140 * it. Any other memory is guaranteed to be unreferenced. 141 * 142 * Any writer is free to advance wr seq at any time however it may busy 143 * poll in pathological cases. 144 */ 145 146 static uma_zone_t smr_shared_zone; 147 static uma_zone_t smr_zone; 148 149 #ifndef INVARIANTS 150 #define SMR_SEQ_INIT 1 /* All valid sequence numbers are odd. */ 151 #define SMR_SEQ_INCR 2 152 153 /* 154 * SMR_SEQ_MAX_DELTA is the maximum distance allowed between rd_seq and 155 * wr_seq. For the modular arithmetic to work a value of UNIT_MAX / 2 156 * would be possible but it is checked after we increment the wr_seq so 157 * a safety margin is left to prevent overflow. 158 * 159 * We will block until SMR_SEQ_MAX_ADVANCE sequence numbers have progressed 160 * to prevent integer wrapping. See smr_advance() for more details. 161 */ 162 #define SMR_SEQ_MAX_DELTA (UINT_MAX / 4) 163 #define SMR_SEQ_MAX_ADVANCE (SMR_SEQ_MAX_DELTA - 1024) 164 #else 165 /* We want to test the wrapping feature in invariants kernels. */ 166 #define SMR_SEQ_INCR (UINT_MAX / 10000) 167 #define SMR_SEQ_INIT (UINT_MAX - 100000) 168 /* Force extra polls to test the integer overflow detection. */ 169 #define SMR_SEQ_MAX_DELTA (SMR_SEQ_INCR * 32) 170 #define SMR_SEQ_MAX_ADVANCE SMR_SEQ_MAX_DELTA / 2 171 #endif 172 173 /* 174 * The grace period for lazy (tick based) SMR. 175 * 176 * Hardclock is responsible for advancing ticks on a single CPU while every 177 * CPU receives a regular clock interrupt. The clock interrupts are flushing 178 * the store buffers and any speculative loads that may violate our invariants. 179 * Because these interrupts are not synchronized we must wait one additional 180 * tick in the future to be certain that all processors have had their state 181 * synchronized by an interrupt. 182 * 183 * This assumes that the clock interrupt will only be delayed by other causes 184 * that will flush the store buffer or prevent access to the section protected 185 * data. For example, an idle processor, or an system management interrupt, 186 * or a vm exit. 187 */ 188 #define SMR_LAZY_GRACE 2 189 #define SMR_LAZY_INCR (SMR_LAZY_GRACE * SMR_SEQ_INCR) 190 191 /* 192 * The maximum sequence number ahead of wr_seq that may still be valid. The 193 * sequence may not be advanced on write for lazy or deferred SMRs. In this 194 * case poll needs to attempt to forward the sequence number if the goal is 195 * within wr_seq + SMR_SEQ_ADVANCE. 196 */ 197 #define SMR_SEQ_ADVANCE SMR_LAZY_INCR 198 199 static SYSCTL_NODE(_debug, OID_AUTO, smr, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 200 "SMR Stats"); 201 static COUNTER_U64_DEFINE_EARLY(advance); 202 SYSCTL_COUNTER_U64(_debug_smr, OID_AUTO, advance, CTLFLAG_RW, &advance, ""); 203 static COUNTER_U64_DEFINE_EARLY(advance_wait); 204 SYSCTL_COUNTER_U64(_debug_smr, OID_AUTO, advance_wait, CTLFLAG_RW, &advance_wait, ""); 205 static COUNTER_U64_DEFINE_EARLY(poll); 206 SYSCTL_COUNTER_U64(_debug_smr, OID_AUTO, poll, CTLFLAG_RW, &poll, ""); 207 static COUNTER_U64_DEFINE_EARLY(poll_scan); 208 SYSCTL_COUNTER_U64(_debug_smr, OID_AUTO, poll_scan, CTLFLAG_RW, &poll_scan, ""); 209 static COUNTER_U64_DEFINE_EARLY(poll_fail); 210 SYSCTL_COUNTER_U64(_debug_smr, OID_AUTO, poll_fail, CTLFLAG_RW, &poll_fail, ""); 211 212 /* 213 * Advance a lazy write sequence number. These move forward at the rate of 214 * ticks. Grace is SMR_LAZY_INCR (2 ticks) in the future. 215 * 216 * This returns the goal write sequence number. 217 */ 218 static smr_seq_t 219 smr_lazy_advance(smr_t smr, smr_shared_t s) 220 { 221 union s_wr s_wr, old; 222 int t, d; 223 224 CRITICAL_ASSERT(curthread); 225 226 /* 227 * Load the stored ticks value before the current one. This way the 228 * current value can only be the same or larger. 229 */ 230 old._pair = s_wr._pair = atomic_load_acq_64(&s->s_wr._pair); 231 t = ticks; 232 233 /* 234 * The most probable condition that the update already took place. 235 */ 236 d = t - s_wr.ticks; 237 if (__predict_true(d == 0)) 238 goto out; 239 /* Cap the rate of advancement and handle long idle periods. */ 240 if (d > SMR_LAZY_GRACE || d < 0) 241 d = SMR_LAZY_GRACE; 242 s_wr.ticks = t; 243 s_wr.seq += d * SMR_SEQ_INCR; 244 245 /* 246 * This can only fail if another thread races to call advance(). 247 * Strong cmpset semantics mean we are guaranteed that the update 248 * happened. 249 */ 250 atomic_cmpset_64(&s->s_wr._pair, old._pair, s_wr._pair); 251 out: 252 return (s_wr.seq + SMR_LAZY_INCR); 253 } 254 255 /* 256 * Increment the shared write sequence by 2. Since it is initialized 257 * to 1 this means the only valid values are odd and an observed value 258 * of 0 in a particular CPU means it is not currently in a read section. 259 */ 260 static smr_seq_t 261 smr_shared_advance(smr_shared_t s) 262 { 263 264 return (atomic_fetchadd_int(&s->s_wr.seq, SMR_SEQ_INCR) + SMR_SEQ_INCR); 265 } 266 267 /* 268 * Advance the write sequence number for a normal smr section. If the 269 * write sequence is too far behind the read sequence we have to poll 270 * to advance rd_seq and prevent undetectable wraps. 271 */ 272 static smr_seq_t 273 smr_default_advance(smr_t smr, smr_shared_t s) 274 { 275 smr_seq_t goal, s_rd_seq; 276 277 CRITICAL_ASSERT(curthread); 278 KASSERT((zpcpu_get(smr)->c_flags & SMR_LAZY) == 0, 279 ("smr_default_advance: called with lazy smr.")); 280 281 /* 282 * Load the current read seq before incrementing the goal so 283 * we are guaranteed it is always < goal. 284 */ 285 s_rd_seq = atomic_load_acq_int(&s->s_rd_seq); 286 goal = smr_shared_advance(s); 287 288 /* 289 * Force a synchronization here if the goal is getting too 290 * far ahead of the read sequence number. This keeps the 291 * wrap detecting arithmetic working in pathological cases. 292 */ 293 if (SMR_SEQ_DELTA(goal, s_rd_seq) >= SMR_SEQ_MAX_DELTA) { 294 counter_u64_add(advance_wait, 1); 295 smr_wait(smr, goal - SMR_SEQ_MAX_ADVANCE); 296 } 297 counter_u64_add(advance, 1); 298 299 return (goal); 300 } 301 302 /* 303 * Deferred SMRs conditionally update s_wr_seq based on an 304 * cpu local interval count. 305 */ 306 static smr_seq_t 307 smr_deferred_advance(smr_t smr, smr_shared_t s, smr_t self) 308 { 309 310 if (++self->c_deferred < self->c_limit) 311 return (smr_shared_current(s) + SMR_SEQ_INCR); 312 self->c_deferred = 0; 313 return (smr_default_advance(smr, s)); 314 } 315 316 /* 317 * Advance the write sequence and return the value for use as the 318 * wait goal. This guarantees that any changes made by the calling 319 * thread prior to this call will be visible to all threads after 320 * rd_seq meets or exceeds the return value. 321 * 322 * This function may busy loop if the readers are roughly 1 billion 323 * sequence numbers behind the writers. 324 * 325 * Lazy SMRs will not busy loop and the wrap happens every 25 days 326 * at 1khz and 60 hours at 10khz. Readers can block for no longer 327 * than half of this for SMR_SEQ_ macros to continue working. 328 */ 329 smr_seq_t 330 smr_advance(smr_t smr) 331 { 332 smr_t self; 333 smr_shared_t s; 334 smr_seq_t goal; 335 int flags; 336 337 /* 338 * It is illegal to enter while in an smr section. 339 */ 340 SMR_ASSERT_NOT_ENTERED(smr); 341 342 /* 343 * Modifications not done in a smr section need to be visible 344 * before advancing the seq. 345 */ 346 atomic_thread_fence_rel(); 347 348 critical_enter(); 349 /* Try to touch the line once. */ 350 self = zpcpu_get(smr); 351 s = self->c_shared; 352 flags = self->c_flags; 353 goal = SMR_SEQ_INVALID; 354 if ((flags & (SMR_LAZY | SMR_DEFERRED)) == 0) 355 goal = smr_default_advance(smr, s); 356 else if ((flags & SMR_LAZY) != 0) 357 goal = smr_lazy_advance(smr, s); 358 else if ((flags & SMR_DEFERRED) != 0) 359 goal = smr_deferred_advance(smr, s, self); 360 critical_exit(); 361 362 return (goal); 363 } 364 365 /* 366 * Poll to determine the currently observed sequence number on a cpu 367 * and spinwait if the 'wait' argument is true. 368 */ 369 static smr_seq_t 370 smr_poll_cpu(smr_t c, smr_seq_t s_rd_seq, smr_seq_t goal, bool wait) 371 { 372 smr_seq_t c_seq; 373 374 c_seq = SMR_SEQ_INVALID; 375 for (;;) { 376 c_seq = atomic_load_int(&c->c_seq); 377 if (c_seq == SMR_SEQ_INVALID) 378 break; 379 380 /* 381 * There is a race described in smr.h:smr_enter that 382 * can lead to a stale seq value but not stale data 383 * access. If we find a value out of range here we 384 * pin it to the current min to prevent it from 385 * advancing until that stale section has expired. 386 * 387 * The race is created when a cpu loads the s_wr_seq 388 * value in a local register and then another thread 389 * advances s_wr_seq and calls smr_poll() which will 390 * oberve no value yet in c_seq and advance s_rd_seq 391 * up to s_wr_seq which is beyond the register 392 * cached value. This is only likely to happen on 393 * hypervisor or with a system management interrupt. 394 */ 395 if (SMR_SEQ_LT(c_seq, s_rd_seq)) 396 c_seq = s_rd_seq; 397 398 /* 399 * If the sequence number meets the goal we are done 400 * with this cpu. 401 */ 402 if (SMR_SEQ_LEQ(goal, c_seq)) 403 break; 404 405 if (!wait) 406 break; 407 cpu_spinwait(); 408 } 409 410 return (c_seq); 411 } 412 413 /* 414 * Loop until all cores have observed the goal sequence or have 415 * gone inactive. Returns the oldest sequence currently active; 416 * 417 * This function assumes a snapshot of sequence values has 418 * been obtained and validated by smr_poll(). 419 */ 420 static smr_seq_t 421 smr_poll_scan(smr_t smr, smr_shared_t s, smr_seq_t s_rd_seq, 422 smr_seq_t s_wr_seq, smr_seq_t goal, bool wait) 423 { 424 smr_seq_t rd_seq, c_seq; 425 int i; 426 427 CRITICAL_ASSERT(curthread); 428 counter_u64_add_protected(poll_scan, 1); 429 430 /* 431 * The read sequence can be no larger than the write sequence at 432 * the start of the poll. 433 */ 434 rd_seq = s_wr_seq; 435 CPU_FOREACH(i) { 436 /* 437 * Query the active sequence on this cpu. If we're not 438 * waiting and we don't meet the goal we will still scan 439 * the rest of the cpus to update s_rd_seq before returning 440 * failure. 441 */ 442 c_seq = smr_poll_cpu(zpcpu_get_cpu(smr, i), s_rd_seq, goal, 443 wait); 444 445 /* 446 * Limit the minimum observed rd_seq whether we met the goal 447 * or not. 448 */ 449 if (c_seq != SMR_SEQ_INVALID) 450 rd_seq = SMR_SEQ_MIN(rd_seq, c_seq); 451 } 452 453 /* 454 * Advance the rd_seq as long as we observed a more recent value. 455 */ 456 s_rd_seq = atomic_load_int(&s->s_rd_seq); 457 if (SMR_SEQ_GT(rd_seq, s_rd_seq)) { 458 atomic_cmpset_int(&s->s_rd_seq, s_rd_seq, rd_seq); 459 s_rd_seq = rd_seq; 460 } 461 462 return (s_rd_seq); 463 } 464 465 /* 466 * Poll to determine whether all readers have observed the 'goal' write 467 * sequence number. 468 * 469 * If wait is true this will spin until the goal is met. 470 * 471 * This routine will updated the minimum observed read sequence number in 472 * s_rd_seq if it does a scan. It may not do a scan if another call has 473 * advanced s_rd_seq beyond the callers goal already. 474 * 475 * Returns true if the goal is met and false if not. 476 */ 477 bool 478 smr_poll(smr_t smr, smr_seq_t goal, bool wait) 479 { 480 smr_shared_t s; 481 smr_t self; 482 smr_seq_t s_wr_seq, s_rd_seq; 483 smr_delta_t delta; 484 int flags; 485 bool success; 486 487 /* 488 * It is illegal to enter while in an smr section. 489 */ 490 KASSERT(!wait || !SMR_ENTERED(smr), 491 ("smr_poll: Blocking not allowed in a SMR section.")); 492 KASSERT(!wait || (zpcpu_get(smr)->c_flags & SMR_LAZY) == 0, 493 ("smr_poll: Blocking not allowed on lazy smrs.")); 494 495 /* 496 * Use a critical section so that we can avoid ABA races 497 * caused by long preemption sleeps. 498 */ 499 success = true; 500 critical_enter(); 501 /* Attempt to load from self only once. */ 502 self = zpcpu_get(smr); 503 s = self->c_shared; 504 flags = self->c_flags; 505 counter_u64_add_protected(poll, 1); 506 507 /* 508 * Conditionally advance the lazy write clock on any writer 509 * activity. 510 */ 511 if ((flags & SMR_LAZY) != 0) 512 smr_lazy_advance(smr, s); 513 514 /* 515 * Acquire barrier loads s_wr_seq after s_rd_seq so that we can not 516 * observe an updated read sequence that is larger than write. 517 */ 518 s_rd_seq = atomic_load_acq_int(&s->s_rd_seq); 519 520 /* 521 * If we have already observed the sequence number we can immediately 522 * return success. Most polls should meet this criterion. 523 */ 524 if (SMR_SEQ_LEQ(goal, s_rd_seq)) 525 goto out; 526 527 /* 528 * wr_seq must be loaded prior to any c_seq value so that a 529 * stale c_seq can only reference time after this wr_seq. 530 */ 531 s_wr_seq = atomic_load_acq_int(&s->s_wr.seq); 532 533 /* 534 * This is the distance from s_wr_seq to goal. Positive values 535 * are in the future. 536 */ 537 delta = SMR_SEQ_DELTA(goal, s_wr_seq); 538 539 /* 540 * Detect a stale wr_seq. 541 * 542 * This goal may have come from a deferred advance or a lazy 543 * smr. If we are not blocking we can not succeed but the 544 * sequence number is valid. 545 */ 546 if (delta > 0 && delta <= SMR_SEQ_ADVANCE && 547 (flags & (SMR_LAZY | SMR_DEFERRED)) != 0) { 548 if (!wait) { 549 success = false; 550 goto out; 551 } 552 /* LAZY is always !wait. */ 553 s_wr_seq = smr_shared_advance(s); 554 delta = 0; 555 } 556 557 /* 558 * Detect an invalid goal. 559 * 560 * The goal must be in the range of s_wr_seq >= goal >= s_rd_seq for 561 * it to be valid. If it is not then the caller held on to it and 562 * the integer wrapped. If we wrapped back within range the caller 563 * will harmlessly scan. 564 */ 565 if (delta > 0) 566 goto out; 567 568 /* Determine the lowest visible sequence number. */ 569 s_rd_seq = smr_poll_scan(smr, s, s_rd_seq, s_wr_seq, goal, wait); 570 success = SMR_SEQ_LEQ(goal, s_rd_seq); 571 out: 572 if (!success) 573 counter_u64_add_protected(poll_fail, 1); 574 critical_exit(); 575 576 /* 577 * Serialize with smr_advance()/smr_exit(). The caller is now free 578 * to modify memory as expected. 579 */ 580 atomic_thread_fence_acq(); 581 582 KASSERT(success || !wait, ("%s: blocking poll failed", __func__)); 583 return (success); 584 } 585 586 smr_t 587 smr_create(const char *name, int limit, int flags) 588 { 589 smr_t smr, c; 590 smr_shared_t s; 591 int i; 592 593 s = uma_zalloc(smr_shared_zone, M_WAITOK); 594 smr = uma_zalloc_pcpu(smr_zone, M_WAITOK); 595 596 s->s_name = name; 597 s->s_rd_seq = s->s_wr.seq = SMR_SEQ_INIT; 598 s->s_wr.ticks = ticks; 599 600 /* Initialize all CPUS, not just those running. */ 601 for (i = 0; i <= mp_maxid; i++) { 602 c = zpcpu_get_cpu(smr, i); 603 c->c_seq = SMR_SEQ_INVALID; 604 c->c_shared = s; 605 c->c_deferred = 0; 606 c->c_limit = limit; 607 c->c_flags = flags; 608 } 609 atomic_thread_fence_seq_cst(); 610 611 return (smr); 612 } 613 614 void 615 smr_destroy(smr_t smr) 616 { 617 618 smr_synchronize(smr); 619 uma_zfree(smr_shared_zone, smr->c_shared); 620 uma_zfree_pcpu(smr_zone, smr); 621 } 622 623 /* 624 * Initialize the UMA slab zone. 625 */ 626 void 627 smr_init(void) 628 { 629 630 smr_shared_zone = uma_zcreate("SMR SHARED", sizeof(struct smr_shared), 631 NULL, NULL, NULL, NULL, (CACHE_LINE_SIZE * 2) - 1, 0); 632 smr_zone = uma_zcreate("SMR CPU", sizeof(struct smr), 633 NULL, NULL, NULL, NULL, (CACHE_LINE_SIZE * 2) - 1, UMA_ZONE_PCPU); 634 } 635