1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2004 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * based on usr/src/uts/common/os/kmem.c r1.64 from 2001/12/18 31 * 32 * The slab allocator, as described in the following two papers: 33 * 34 * Jeff Bonwick, 35 * The Slab Allocator: An Object-Caching Kernel Memory Allocator. 36 * Proceedings of the Summer 1994 Usenix Conference. 37 * Available as /shared/sac/PSARC/1994/028/materials/kmem.pdf. 38 * 39 * Jeff Bonwick and Jonathan Adams, 40 * Magazines and vmem: Extending the Slab Allocator to Many CPUs and 41 * Arbitrary Resources. 42 * Proceedings of the 2001 Usenix Conference. 43 * Available as /shared/sac/PSARC/2000/550/materials/vmem.pdf. 44 * 45 * 1. Overview 46 * ----------- 47 * umem is very close to kmem in implementation. There are four major 48 * areas of divergence: 49 * 50 * * Initialization 51 * 52 * * CPU handling 53 * 54 * * umem_update() 55 * 56 * * KM_SLEEP v.s. UMEM_NOFAIL 57 * 58 * 59 * 2. Initialization 60 * ----------------- 61 * kmem is initialized early on in boot, and knows that no one will call 62 * into it before it is ready. umem does not have these luxuries. Instead, 63 * initialization is divided into two phases: 64 * 65 * * library initialization, and 66 * 67 * * first use 68 * 69 * umem's full initialization happens at the time of the first allocation 70 * request (via malloc() and friends, umem_alloc(), or umem_zalloc()), 71 * or the first call to umem_cache_create(). 72 * 73 * umem_free(), and umem_cache_alloc() do not require special handling, 74 * since the only way to get valid arguments for them is to successfully 75 * call a function from the first group. 76 * 77 * 2.1. Library Initialization: umem_startup() 78 * ------------------------------------------- 79 * umem_startup() is libumem.so's .init section. It calls pthread_atfork() 80 * to install the handlers necessary for umem's Fork1-Safety. Because of 81 * race condition issues, all other pre-umem_init() initialization is done 82 * statically (i.e. by the dynamic linker). 83 * 84 * For standalone use, umem_startup() returns everything to its initial 85 * state. 86 * 87 * 2.2. First use: umem_init() 88 * ------------------------------ 89 * The first time any memory allocation function is used, we have to 90 * create the backing caches and vmem arenas which are needed for it. 91 * umem_init() is the central point for that task. When it completes, 92 * umem_ready is either UMEM_READY (all set) or UMEM_READY_INIT_FAILED (unable 93 * to initialize, probably due to lack of memory). 94 * 95 * There are four different paths from which umem_init() is called: 96 * 97 * * from umem_alloc() or umem_zalloc(), with 0 < size < UMEM_MAXBUF, 98 * 99 * * from umem_alloc() or umem_zalloc(), with size > UMEM_MAXBUF, 100 * 101 * * from umem_cache_create(), and 102 * 103 * * from memalign(), with align > UMEM_ALIGN. 104 * 105 * The last three just check if umem is initialized, and call umem_init() 106 * if it is not. For performance reasons, the first case is more complicated. 107 * 108 * 2.2.1. umem_alloc()/umem_zalloc(), with 0 < size < UMEM_MAXBUF 109 * ----------------------------------------------------------------- 110 * In this case, umem_cache_alloc(&umem_null_cache, ...) is called. 111 * There is special case code in which causes any allocation on 112 * &umem_null_cache to fail by returning (NULL), regardless of the 113 * flags argument. 114 * 115 * So umem_cache_alloc() returns NULL, and umem_alloc()/umem_zalloc() call 116 * umem_alloc_retry(). umem_alloc_retry() sees that the allocation 117 * was agains &umem_null_cache, and calls umem_init(). 118 * 119 * If initialization is successful, umem_alloc_retry() returns 1, which 120 * causes umem_alloc()/umem_zalloc() to start over, which causes it to load 121 * the (now valid) cache pointer from umem_alloc_table. 122 * 123 * 2.2.2. Dealing with race conditions 124 * ----------------------------------- 125 * There are a couple race conditions resulting from the initialization 126 * code that we have to guard against: 127 * 128 * * In umem_cache_create(), there is a special UMC_INTERNAL cflag 129 * that is passed for caches created during initialization. It 130 * is illegal for a user to try to create a UMC_INTERNAL cache. 131 * This allows initialization to proceed, but any other 132 * umem_cache_create()s will block by calling umem_init(). 133 * 134 * * Since umem_null_cache has a 1-element cache_cpu, it's cache_cpu_mask 135 * is always zero. umem_cache_alloc uses cp->cache_cpu_mask to 136 * mask the cpu number. This prevents a race between grabbing a 137 * cache pointer out of umem_alloc_table and growing the cpu array. 138 * 139 * 140 * 3. CPU handling 141 * --------------- 142 * kmem uses the CPU's sequence number to determine which "cpu cache" to 143 * use for an allocation. Currently, there is no way to get the sequence 144 * number in userspace. 145 * 146 * umem keeps track of cpu information in umem_cpus, an array of umem_max_ncpus 147 * umem_cpu_t structures. CURCPU() is a a "hint" function, which we then mask 148 * with either umem_cpu_mask or cp->cache_cpu_mask to find the actual "cpu" id. 149 * The mechanics of this is all in the CPU(mask) macro. 150 * 151 * Currently, umem uses _lwp_self() as its hint. 152 * 153 * 154 * 4. The update thread 155 * -------------------- 156 * kmem uses a task queue, kmem_taskq, to do periodic maintenance on 157 * every kmem cache. vmem has a periodic timeout for hash table resizing. 158 * The kmem_taskq also provides a separate context for kmem_cache_reap()'s 159 * to be done in, avoiding issues of the context of kmem_reap() callers. 160 * 161 * Instead, umem has the concept of "updates", which are asynchronous requests 162 * for work attached to single caches. All caches with pending work are 163 * on a doubly linked list rooted at the umem_null_cache. All update state 164 * is protected by the umem_update_lock mutex, and the umem_update_cv is used 165 * for notification between threads. 166 * 167 * 4.1. Cache states with regards to updates 168 * ----------------------------------------- 169 * A given cache is in one of three states: 170 * 171 * Inactive cache_uflags is zero, cache_u{next,prev} are NULL 172 * 173 * Work Requested cache_uflags is non-zero (but UMU_ACTIVE is not set), 174 * cache_u{next,prev} link the cache onto the global 175 * update list 176 * 177 * Active cache_uflags has UMU_ACTIVE set, cache_u{next,prev} 178 * are NULL, and either umem_update_thr or 179 * umem_st_update_thr are actively doing work on the 180 * cache. 181 * 182 * An update can be added to any cache in any state -- if the cache is 183 * Inactive, it transitions to being Work Requested. If the cache is 184 * Active, the worker will notice the new update and act on it before 185 * transitioning the cache to the Inactive state. 186 * 187 * If a cache is in the Active state, UMU_NOTIFY can be set, which asks 188 * the worker to broadcast the umem_update_cv when it has finished. 189 * 190 * 4.2. Update interface 191 * --------------------- 192 * umem_add_update() adds an update to a particular cache. 193 * umem_updateall() adds an update to all caches. 194 * umem_remove_updates() returns a cache to the Inactive state. 195 * 196 * umem_process_updates() process all caches in the Work Requested state. 197 * 198 * 4.3. Reaping 199 * ------------ 200 * When umem_reap() is called (at the time of heap growth), it schedule 201 * UMU_REAP updates on every cache. It then checks to see if the update 202 * thread exists (umem_update_thr != 0). If it is, it broadcasts 203 * the umem_update_cv to wake the update thread up, and returns. 204 * 205 * If the update thread does not exist (umem_update_thr == 0), and the 206 * program currently has multiple threads, umem_reap() attempts to create 207 * a new update thread. 208 * 209 * If the process is not multithreaded, or the creation fails, umem_reap() 210 * calls umem_st_update() to do an inline update. 211 * 212 * 4.4. The update thread 213 * ---------------------- 214 * The update thread spends most of its time in cond_timedwait() on the 215 * umem_update_cv. It wakes up under two conditions: 216 * 217 * * The timedwait times out, in which case it needs to run a global 218 * update, or 219 * 220 * * someone cond_broadcast(3THR)s the umem_update_cv, in which case 221 * it needs to check if there are any caches in the Work Requested 222 * state. 223 * 224 * When it is time for another global update, umem calls umem_cache_update() 225 * on every cache, then calls vmem_update(), which tunes the vmem structures. 226 * umem_cache_update() can request further work using umem_add_update(). 227 * 228 * After any work from the global update completes, the update timer is 229 * reset to umem_reap_interval seconds in the future. This makes the 230 * updates self-throttling. 231 * 232 * Reaps are similarly self-throttling. After a UMU_REAP update has 233 * been scheduled on all caches, umem_reap() sets a flag and wakes up the 234 * update thread. The update thread notices the flag, and resets the 235 * reap state. 236 * 237 * 4.5. Inline updates 238 * ------------------- 239 * If the update thread is not running, umem_st_update() is used instead. It 240 * immediately does a global update (as above), then calls 241 * umem_process_updates() to process both the reaps that umem_reap() added and 242 * any work generated by the global update. Afterwards, it resets the reap 243 * state. 244 * 245 * While the umem_st_update() is running, umem_st_update_thr holds the thread 246 * id of the thread performing the update. 247 * 248 * 4.6. Updates and fork1() 249 * ------------------------ 250 * umem has fork1() pre- and post-handlers which lock up (and release) every 251 * mutex in every cache. They also lock up the umem_update_lock. Since 252 * fork1() only copies over a single lwp, other threads (including the update 253 * thread) could have been actively using a cache in the parent. This 254 * can lead to inconsistencies in the child process. 255 * 256 * Because we locked all of the mutexes, the only possible inconsistancies are: 257 * 258 * * a umem_cache_alloc() could leak its buffer. 259 * 260 * * a caller of umem_depot_alloc() could leak a magazine, and all the 261 * buffers contained in it. 262 * 263 * * a cache could be in the Active update state. In the child, there 264 * would be no thread actually working on it. 265 * 266 * * a umem_hash_rescale() could leak the new hash table. 267 * 268 * * a umem_magazine_resize() could be in progress. 269 * 270 * * a umem_reap() could be in progress. 271 * 272 * The memory leaks we can't do anything about. umem_release_child() resets 273 * the update state, moves any caches in the Active state to the Work Requested 274 * state. This might cause some updates to be re-run, but UMU_REAP and 275 * UMU_HASH_RESCALE are effectively idempotent, and the worst that can 276 * happen from umem_magazine_resize() is resizing the magazine twice in close 277 * succession. 278 * 279 * Much of the cleanup in umem_release_child() is skipped if 280 * umem_st_update_thr == thr_self(). This is so that applications which call 281 * fork1() from a cache callback does not break. Needless to say, any such 282 * application is tremendously broken. 283 * 284 * 285 * 5. KM_SLEEP v.s. UMEM_NOFAIL 286 * ---------------------------- 287 * Allocations against kmem and vmem have two basic modes: SLEEP and 288 * NOSLEEP. A sleeping allocation is will go to sleep (waiting for 289 * more memory) instead of failing (returning NULL). 290 * 291 * SLEEP allocations presume an extremely multithreaded model, with 292 * a lot of allocation and deallocation activity. umem cannot presume 293 * that its clients have any particular type of behavior. Instead, 294 * it provides two types of allocations: 295 * 296 * * UMEM_DEFAULT, equivalent to KM_NOSLEEP (i.e. return NULL on 297 * failure) 298 * 299 * * UMEM_NOFAIL, which, on failure, calls an optional callback 300 * (registered with umem_nofail_callback()). 301 * 302 * The callback is invoked with no locks held, and can do an arbitrary 303 * amount of work. It then has a choice between: 304 * 305 * * Returning UMEM_CALLBACK_RETRY, which will cause the allocation 306 * to be restarted. 307 * 308 * * Returning UMEM_CALLBACK_EXIT(status), which will cause exit(2) 309 * to be invoked with status. If multiple threads attempt to do 310 * this simultaneously, only one will call exit(2). 311 * 312 * * Doing some kind of non-local exit (thr_exit(3thr), longjmp(3C), 313 * etc.) 314 * 315 * The default callback returns UMEM_CALLBACK_EXIT(255). 316 * 317 * To have these callbacks without risk of state corruption (in the case of 318 * a non-local exit), we have to ensure that the callbacks get invoked 319 * close to the original allocation, with no inconsistent state or held 320 * locks. The following steps are taken: 321 * 322 * * All invocations of vmem are VM_NOSLEEP. 323 * 324 * * All constructor callbacks (which can themselves to allocations) 325 * are passed UMEM_DEFAULT as their required allocation argument. This 326 * way, the constructor will fail, allowing the highest-level allocation 327 * invoke the nofail callback. 328 * 329 * If a constructor callback _does_ do a UMEM_NOFAIL allocation, and 330 * the nofail callback does a non-local exit, we will leak the 331 * partially-constructed buffer. 332 */ 333 334 #include "mtlib.h" 335 #include <umem_impl.h> 336 #include <sys/vmem_impl_user.h> 337 #include "umem_base.h" 338 #include "vmem_base.h" 339 340 #include <sys/processor.h> 341 #include <sys/sysmacros.h> 342 343 #include <alloca.h> 344 #include <errno.h> 345 #include <limits.h> 346 #include <stdio.h> 347 #include <stdlib.h> 348 #include <string.h> 349 #include <strings.h> 350 #include <signal.h> 351 #include <unistd.h> 352 #include <atomic.h> 353 354 #include "misc.h" 355 356 #define UMEM_VMFLAGS(umflag) (VM_NOSLEEP) 357 358 size_t pagesize; 359 360 /* 361 * The default set of caches to back umem_alloc(). 362 * These sizes should be reevaluated periodically. 363 * 364 * We want allocations that are multiples of the coherency granularity 365 * (64 bytes) to be satisfied from a cache which is a multiple of 64 366 * bytes, so that it will be 64-byte aligned. For all multiples of 64, 367 * the next kmem_cache_size greater than or equal to it must be a 368 * multiple of 64. 369 */ 370 static const int umem_alloc_sizes[] = { 371 #ifdef _LP64 372 1 * 8, 373 1 * 16, 374 2 * 16, 375 3 * 16, 376 #else 377 1 * 8, 378 2 * 8, 379 3 * 8, 380 4 * 8, 5 * 8, 6 * 8, 7 * 8, 381 #endif 382 4 * 16, 5 * 16, 6 * 16, 7 * 16, 383 4 * 32, 5 * 32, 6 * 32, 7 * 32, 384 4 * 64, 5 * 64, 6 * 64, 7 * 64, 385 4 * 128, 5 * 128, 6 * 128, 7 * 128, 386 P2ALIGN(8192 / 7, 64), 387 P2ALIGN(8192 / 6, 64), 388 P2ALIGN(8192 / 5, 64), 389 P2ALIGN(8192 / 4, 64), 390 P2ALIGN(8192 / 3, 64), 391 P2ALIGN(8192 / 2, 64), 392 P2ALIGN(8192 / 1, 64), 393 4096 * 3, 394 8192 * 2, 395 }; 396 #define NUM_ALLOC_SIZES (sizeof (umem_alloc_sizes) / sizeof (*umem_alloc_sizes)) 397 398 #define UMEM_MAXBUF 16384 399 400 static umem_magtype_t umem_magtype[] = { 401 { 1, 8, 3200, 65536 }, 402 { 3, 16, 256, 32768 }, 403 { 7, 32, 64, 16384 }, 404 { 15, 64, 0, 8192 }, 405 { 31, 64, 0, 4096 }, 406 { 47, 64, 0, 2048 }, 407 { 63, 64, 0, 1024 }, 408 { 95, 64, 0, 512 }, 409 { 143, 64, 0, 0 }, 410 }; 411 412 /* 413 * umem tunables 414 */ 415 uint32_t umem_max_ncpus; /* # of CPU caches. */ 416 417 uint32_t umem_stack_depth = 15; /* # stack frames in a bufctl_audit */ 418 uint32_t umem_reap_interval = 10; /* max reaping rate (seconds) */ 419 uint_t umem_depot_contention = 2; /* max failed trylocks per real interval */ 420 uint_t umem_abort = 1; /* whether to abort on error */ 421 uint_t umem_output = 0; /* whether to write to standard error */ 422 uint_t umem_logging = 0; /* umem_log_enter() override */ 423 uint32_t umem_mtbf = 0; /* mean time between failures [default: off] */ 424 size_t umem_transaction_log_size; /* size of transaction log */ 425 size_t umem_content_log_size; /* size of content log */ 426 size_t umem_failure_log_size; /* failure log [4 pages per CPU] */ 427 size_t umem_slab_log_size; /* slab create log [4 pages per CPU] */ 428 size_t umem_content_maxsave = 256; /* UMF_CONTENTS max bytes to log */ 429 size_t umem_lite_minsize = 0; /* minimum buffer size for UMF_LITE */ 430 size_t umem_lite_maxalign = 1024; /* maximum buffer alignment for UMF_LITE */ 431 size_t umem_maxverify; /* maximum bytes to inspect in debug routines */ 432 size_t umem_minfirewall; /* hardware-enforced redzone threshold */ 433 434 uint_t umem_flags = 0; 435 436 mutex_t umem_init_lock; /* locks initialization */ 437 cond_t umem_init_cv; /* initialization CV */ 438 thread_t umem_init_thr; /* thread initializing */ 439 int umem_init_env_ready; /* environ pre-initted */ 440 int umem_ready = UMEM_READY_STARTUP; 441 442 static umem_nofail_callback_t *nofail_callback; 443 static mutex_t umem_nofail_exit_lock; 444 static thread_t umem_nofail_exit_thr; 445 446 static umem_cache_t *umem_slab_cache; 447 static umem_cache_t *umem_bufctl_cache; 448 static umem_cache_t *umem_bufctl_audit_cache; 449 450 mutex_t umem_flags_lock; 451 452 static vmem_t *heap_arena; 453 static vmem_alloc_t *heap_alloc; 454 static vmem_free_t *heap_free; 455 456 static vmem_t *umem_internal_arena; 457 static vmem_t *umem_cache_arena; 458 static vmem_t *umem_hash_arena; 459 static vmem_t *umem_log_arena; 460 static vmem_t *umem_oversize_arena; 461 static vmem_t *umem_va_arena; 462 static vmem_t *umem_default_arena; 463 static vmem_t *umem_firewall_va_arena; 464 static vmem_t *umem_firewall_arena; 465 466 vmem_t *umem_memalign_arena; 467 468 umem_log_header_t *umem_transaction_log; 469 umem_log_header_t *umem_content_log; 470 umem_log_header_t *umem_failure_log; 471 umem_log_header_t *umem_slab_log; 472 473 extern thread_t _thr_self(void); 474 #define CPUHINT() (_thr_self()) 475 #define CPUHINT_MAX() INT_MAX 476 477 #define CPU(mask) (umem_cpus + (CPUHINT() & (mask))) 478 static umem_cpu_t umem_startup_cpu = { /* initial, single, cpu */ 479 UMEM_CACHE_SIZE(0), 480 0 481 }; 482 483 static uint32_t umem_cpu_mask = 0; /* global cpu mask */ 484 static umem_cpu_t *umem_cpus = &umem_startup_cpu; /* cpu list */ 485 486 volatile uint32_t umem_reaping; 487 488 thread_t umem_update_thr; 489 struct timeval umem_update_next; /* timeofday of next update */ 490 volatile thread_t umem_st_update_thr; /* only used when single-thd */ 491 492 #define IN_UPDATE() (thr_self() == umem_update_thr || \ 493 thr_self() == umem_st_update_thr) 494 #define IN_REAP() IN_UPDATE() 495 496 mutex_t umem_update_lock; /* cache_u{next,prev,flags} */ 497 cond_t umem_update_cv; 498 499 volatile hrtime_t umem_reap_next; /* min hrtime of next reap */ 500 501 mutex_t umem_cache_lock; /* inter-cache linkage only */ 502 503 #ifdef UMEM_STANDALONE 504 umem_cache_t umem_null_cache; 505 static const umem_cache_t umem_null_cache_template = { 506 #else 507 umem_cache_t umem_null_cache = { 508 #endif 509 0, 0, 0, 0, 0, 510 0, 0, 511 0, 0, 512 0, 0, 513 "invalid_cache", 514 0, 0, 515 NULL, NULL, NULL, NULL, 516 NULL, 517 0, 0, 0, 0, 518 &umem_null_cache, &umem_null_cache, 519 &umem_null_cache, &umem_null_cache, 520 0, 521 DEFAULTMUTEX, /* start of slab layer */ 522 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 523 &umem_null_cache.cache_nullslab, 524 { 525 &umem_null_cache, 526 NULL, 527 &umem_null_cache.cache_nullslab, 528 &umem_null_cache.cache_nullslab, 529 NULL, 530 -1, 531 0 532 }, 533 NULL, 534 NULL, 535 DEFAULTMUTEX, /* start of depot layer */ 536 NULL, { 537 NULL, 0, 0, 0, 0 538 }, { 539 NULL, 0, 0, 0, 0 540 }, { 541 { 542 DEFAULTMUTEX, /* start of CPU cache */ 543 0, 0, NULL, NULL, -1, -1, 0 544 } 545 } 546 }; 547 548 #define ALLOC_TABLE_4 \ 549 &umem_null_cache, &umem_null_cache, &umem_null_cache, &umem_null_cache 550 551 #define ALLOC_TABLE_64 \ 552 ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, \ 553 ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, \ 554 ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, \ 555 ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4 556 557 #define ALLOC_TABLE_1024 \ 558 ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, \ 559 ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, \ 560 ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, \ 561 ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64 562 563 static umem_cache_t *umem_alloc_table[UMEM_MAXBUF >> UMEM_ALIGN_SHIFT] = { 564 ALLOC_TABLE_1024, 565 ALLOC_TABLE_1024 566 }; 567 568 569 /* Used to constrain audit-log stack traces */ 570 caddr_t umem_min_stack; 571 caddr_t umem_max_stack; 572 573 574 /* 575 * we use the _ versions, since we don't want to be cancelled. 576 * Actually, this is automatically taken care of by including "mtlib.h". 577 */ 578 extern int _cond_wait(cond_t *cv, mutex_t *mutex); 579 580 #define UMERR_MODIFIED 0 /* buffer modified while on freelist */ 581 #define UMERR_REDZONE 1 /* redzone violation (write past end of buf) */ 582 #define UMERR_DUPFREE 2 /* freed a buffer twice */ 583 #define UMERR_BADADDR 3 /* freed a bad (unallocated) address */ 584 #define UMERR_BADBUFTAG 4 /* buftag corrupted */ 585 #define UMERR_BADBUFCTL 5 /* bufctl corrupted */ 586 #define UMERR_BADCACHE 6 /* freed a buffer to the wrong cache */ 587 #define UMERR_BADSIZE 7 /* alloc size != free size */ 588 #define UMERR_BADBASE 8 /* buffer base address wrong */ 589 590 struct { 591 hrtime_t ump_timestamp; /* timestamp of error */ 592 int ump_error; /* type of umem error (UMERR_*) */ 593 void *ump_buffer; /* buffer that induced abort */ 594 void *ump_realbuf; /* real start address for buffer */ 595 umem_cache_t *ump_cache; /* buffer's cache according to client */ 596 umem_cache_t *ump_realcache; /* actual cache containing buffer */ 597 umem_slab_t *ump_slab; /* slab accoring to umem_findslab() */ 598 umem_bufctl_t *ump_bufctl; /* bufctl */ 599 } umem_abort_info; 600 601 static void 602 copy_pattern(uint64_t pattern, void *buf_arg, size_t size) 603 { 604 uint64_t *bufend = (uint64_t *)((char *)buf_arg + size); 605 uint64_t *buf = buf_arg; 606 607 while (buf < bufend) 608 *buf++ = pattern; 609 } 610 611 static void * 612 verify_pattern(uint64_t pattern, void *buf_arg, size_t size) 613 { 614 uint64_t *bufend = (uint64_t *)((char *)buf_arg + size); 615 uint64_t *buf; 616 617 for (buf = buf_arg; buf < bufend; buf++) 618 if (*buf != pattern) 619 return (buf); 620 return (NULL); 621 } 622 623 static void * 624 verify_and_copy_pattern(uint64_t old, uint64_t new, void *buf_arg, size_t size) 625 { 626 uint64_t *bufend = (uint64_t *)((char *)buf_arg + size); 627 uint64_t *buf; 628 629 for (buf = buf_arg; buf < bufend; buf++) { 630 if (*buf != old) { 631 copy_pattern(old, buf_arg, 632 (char *)buf - (char *)buf_arg); 633 return (buf); 634 } 635 *buf = new; 636 } 637 638 return (NULL); 639 } 640 641 void 642 umem_cache_applyall(void (*func)(umem_cache_t *)) 643 { 644 umem_cache_t *cp; 645 646 (void) mutex_lock(&umem_cache_lock); 647 for (cp = umem_null_cache.cache_next; cp != &umem_null_cache; 648 cp = cp->cache_next) 649 func(cp); 650 (void) mutex_unlock(&umem_cache_lock); 651 } 652 653 static void 654 umem_add_update_unlocked(umem_cache_t *cp, int flags) 655 { 656 umem_cache_t *cnext, *cprev; 657 658 flags &= ~UMU_ACTIVE; 659 660 if (!flags) 661 return; 662 663 if (cp->cache_uflags & UMU_ACTIVE) { 664 cp->cache_uflags |= flags; 665 } else { 666 if (cp->cache_unext != NULL) { 667 ASSERT(cp->cache_uflags != 0); 668 cp->cache_uflags |= flags; 669 } else { 670 ASSERT(cp->cache_uflags == 0); 671 cp->cache_uflags = flags; 672 cp->cache_unext = cnext = &umem_null_cache; 673 cp->cache_uprev = cprev = umem_null_cache.cache_uprev; 674 cnext->cache_uprev = cp; 675 cprev->cache_unext = cp; 676 } 677 } 678 } 679 680 static void 681 umem_add_update(umem_cache_t *cp, int flags) 682 { 683 (void) mutex_lock(&umem_update_lock); 684 685 umem_add_update_unlocked(cp, flags); 686 687 if (!IN_UPDATE()) 688 (void) cond_broadcast(&umem_update_cv); 689 690 (void) mutex_unlock(&umem_update_lock); 691 } 692 693 /* 694 * Remove a cache from the update list, waiting for any in-progress work to 695 * complete first. 696 */ 697 static void 698 umem_remove_updates(umem_cache_t *cp) 699 { 700 (void) mutex_lock(&umem_update_lock); 701 702 /* 703 * Get it out of the active state 704 */ 705 while (cp->cache_uflags & UMU_ACTIVE) { 706 ASSERT(cp->cache_unext == NULL); 707 708 cp->cache_uflags |= UMU_NOTIFY; 709 710 /* 711 * Make sure the update state is sane, before we wait 712 */ 713 ASSERT(umem_update_thr != 0 || umem_st_update_thr != 0); 714 ASSERT(umem_update_thr != thr_self() && 715 umem_st_update_thr != thr_self()); 716 717 (void) _cond_wait(&umem_update_cv, &umem_update_lock); 718 } 719 /* 720 * Get it out of the Work Requested state 721 */ 722 if (cp->cache_unext != NULL) { 723 cp->cache_uprev->cache_unext = cp->cache_unext; 724 cp->cache_unext->cache_uprev = cp->cache_uprev; 725 cp->cache_uprev = cp->cache_unext = NULL; 726 cp->cache_uflags = 0; 727 } 728 /* 729 * Make sure it is in the Inactive state 730 */ 731 ASSERT(cp->cache_unext == NULL && cp->cache_uflags == 0); 732 (void) mutex_unlock(&umem_update_lock); 733 } 734 735 static void 736 umem_updateall(int flags) 737 { 738 umem_cache_t *cp; 739 740 /* 741 * NOTE: To prevent deadlock, umem_cache_lock is always acquired first. 742 * 743 * (umem_add_update is called from things run via umem_cache_applyall) 744 */ 745 (void) mutex_lock(&umem_cache_lock); 746 (void) mutex_lock(&umem_update_lock); 747 748 for (cp = umem_null_cache.cache_next; cp != &umem_null_cache; 749 cp = cp->cache_next) 750 umem_add_update_unlocked(cp, flags); 751 752 if (!IN_UPDATE()) 753 (void) cond_broadcast(&umem_update_cv); 754 755 (void) mutex_unlock(&umem_update_lock); 756 (void) mutex_unlock(&umem_cache_lock); 757 } 758 759 /* 760 * Debugging support. Given a buffer address, find its slab. 761 */ 762 static umem_slab_t * 763 umem_findslab(umem_cache_t *cp, void *buf) 764 { 765 umem_slab_t *sp; 766 767 (void) mutex_lock(&cp->cache_lock); 768 for (sp = cp->cache_nullslab.slab_next; 769 sp != &cp->cache_nullslab; sp = sp->slab_next) { 770 if (UMEM_SLAB_MEMBER(sp, buf)) { 771 (void) mutex_unlock(&cp->cache_lock); 772 return (sp); 773 } 774 } 775 (void) mutex_unlock(&cp->cache_lock); 776 777 return (NULL); 778 } 779 780 static void 781 umem_error(int error, umem_cache_t *cparg, void *bufarg) 782 { 783 umem_buftag_t *btp = NULL; 784 umem_bufctl_t *bcp = NULL; 785 umem_cache_t *cp = cparg; 786 umem_slab_t *sp; 787 uint64_t *off; 788 void *buf = bufarg; 789 790 int old_logging = umem_logging; 791 792 umem_logging = 0; /* stop logging when a bad thing happens */ 793 794 umem_abort_info.ump_timestamp = gethrtime(); 795 796 sp = umem_findslab(cp, buf); 797 if (sp == NULL) { 798 for (cp = umem_null_cache.cache_prev; cp != &umem_null_cache; 799 cp = cp->cache_prev) { 800 if ((sp = umem_findslab(cp, buf)) != NULL) 801 break; 802 } 803 } 804 805 if (sp == NULL) { 806 cp = NULL; 807 error = UMERR_BADADDR; 808 } else { 809 if (cp != cparg) 810 error = UMERR_BADCACHE; 811 else 812 buf = (char *)bufarg - ((uintptr_t)bufarg - 813 (uintptr_t)sp->slab_base) % cp->cache_chunksize; 814 if (buf != bufarg) 815 error = UMERR_BADBASE; 816 if (cp->cache_flags & UMF_BUFTAG) 817 btp = UMEM_BUFTAG(cp, buf); 818 if (cp->cache_flags & UMF_HASH) { 819 (void) mutex_lock(&cp->cache_lock); 820 for (bcp = *UMEM_HASH(cp, buf); bcp; bcp = bcp->bc_next) 821 if (bcp->bc_addr == buf) 822 break; 823 (void) mutex_unlock(&cp->cache_lock); 824 if (bcp == NULL && btp != NULL) 825 bcp = btp->bt_bufctl; 826 if (umem_findslab(cp->cache_bufctl_cache, bcp) == 827 NULL || P2PHASE((uintptr_t)bcp, UMEM_ALIGN) || 828 bcp->bc_addr != buf) { 829 error = UMERR_BADBUFCTL; 830 bcp = NULL; 831 } 832 } 833 } 834 835 umem_abort_info.ump_error = error; 836 umem_abort_info.ump_buffer = bufarg; 837 umem_abort_info.ump_realbuf = buf; 838 umem_abort_info.ump_cache = cparg; 839 umem_abort_info.ump_realcache = cp; 840 umem_abort_info.ump_slab = sp; 841 umem_abort_info.ump_bufctl = bcp; 842 843 umem_printf("umem allocator: "); 844 845 switch (error) { 846 847 case UMERR_MODIFIED: 848 umem_printf("buffer modified after being freed\n"); 849 off = verify_pattern(UMEM_FREE_PATTERN, buf, cp->cache_verify); 850 if (off == NULL) /* shouldn't happen */ 851 off = buf; 852 umem_printf("modification occurred at offset 0x%lx " 853 "(0x%llx replaced by 0x%llx)\n", 854 (uintptr_t)off - (uintptr_t)buf, 855 (longlong_t)UMEM_FREE_PATTERN, (longlong_t)*off); 856 break; 857 858 case UMERR_REDZONE: 859 umem_printf("redzone violation: write past end of buffer\n"); 860 break; 861 862 case UMERR_BADADDR: 863 umem_printf("invalid free: buffer not in cache\n"); 864 break; 865 866 case UMERR_DUPFREE: 867 umem_printf("duplicate free: buffer freed twice\n"); 868 break; 869 870 case UMERR_BADBUFTAG: 871 umem_printf("boundary tag corrupted\n"); 872 umem_printf("bcp ^ bxstat = %lx, should be %lx\n", 873 (intptr_t)btp->bt_bufctl ^ btp->bt_bxstat, 874 UMEM_BUFTAG_FREE); 875 break; 876 877 case UMERR_BADBUFCTL: 878 umem_printf("bufctl corrupted\n"); 879 break; 880 881 case UMERR_BADCACHE: 882 umem_printf("buffer freed to wrong cache\n"); 883 umem_printf("buffer was allocated from %s,\n", cp->cache_name); 884 umem_printf("caller attempting free to %s.\n", 885 cparg->cache_name); 886 break; 887 888 case UMERR_BADSIZE: 889 umem_printf("bad free: free size (%u) != alloc size (%u)\n", 890 UMEM_SIZE_DECODE(((uint32_t *)btp)[0]), 891 UMEM_SIZE_DECODE(((uint32_t *)btp)[1])); 892 break; 893 894 case UMERR_BADBASE: 895 umem_printf("bad free: free address (%p) != alloc address " 896 "(%p)\n", bufarg, buf); 897 break; 898 } 899 900 umem_printf("buffer=%p bufctl=%p cache: %s\n", 901 bufarg, (void *)bcp, cparg->cache_name); 902 903 if (bcp != NULL && (cp->cache_flags & UMF_AUDIT) && 904 error != UMERR_BADBUFCTL) { 905 int d; 906 timespec_t ts; 907 hrtime_t diff; 908 umem_bufctl_audit_t *bcap = (umem_bufctl_audit_t *)bcp; 909 910 diff = umem_abort_info.ump_timestamp - bcap->bc_timestamp; 911 ts.tv_sec = diff / NANOSEC; 912 ts.tv_nsec = diff % NANOSEC; 913 914 umem_printf("previous transaction on buffer %p:\n", buf); 915 umem_printf("thread=%p time=T-%ld.%09ld slab=%p cache: %s\n", 916 (void *)(intptr_t)bcap->bc_thread, ts.tv_sec, ts.tv_nsec, 917 (void *)sp, cp->cache_name); 918 for (d = 0; d < MIN(bcap->bc_depth, umem_stack_depth); d++) { 919 (void) print_sym((void *)bcap->bc_stack[d]); 920 umem_printf("\n"); 921 } 922 } 923 924 umem_err_recoverable("umem: heap corruption detected"); 925 926 umem_logging = old_logging; /* resume logging */ 927 } 928 929 void 930 umem_nofail_callback(umem_nofail_callback_t *cb) 931 { 932 nofail_callback = cb; 933 } 934 935 static int 936 umem_alloc_retry(umem_cache_t *cp, int umflag) 937 { 938 if (cp == &umem_null_cache) { 939 if (umem_init()) 940 return (1); /* retry */ 941 /* 942 * Initialization failed. Do normal failure processing. 943 */ 944 } 945 if (umflag & UMEM_NOFAIL) { 946 int def_result = UMEM_CALLBACK_EXIT(255); 947 int result = def_result; 948 umem_nofail_callback_t *callback = nofail_callback; 949 950 if (callback != NULL) 951 result = callback(); 952 953 if (result == UMEM_CALLBACK_RETRY) 954 return (1); 955 956 if ((result & ~0xFF) != UMEM_CALLBACK_EXIT(0)) { 957 log_message("nofail callback returned %x\n", result); 958 result = def_result; 959 } 960 961 /* 962 * only one thread will call exit 963 */ 964 if (umem_nofail_exit_thr == thr_self()) 965 umem_panic("recursive UMEM_CALLBACK_EXIT()\n"); 966 967 (void) mutex_lock(&umem_nofail_exit_lock); 968 umem_nofail_exit_thr = thr_self(); 969 exit(result & 0xFF); 970 /*NOTREACHED*/ 971 } 972 return (0); 973 } 974 975 static umem_log_header_t * 976 umem_log_init(size_t logsize) 977 { 978 umem_log_header_t *lhp; 979 int nchunks = 4 * umem_max_ncpus; 980 size_t lhsize = offsetof(umem_log_header_t, lh_cpu[umem_max_ncpus]); 981 int i; 982 983 if (logsize == 0) 984 return (NULL); 985 986 /* 987 * Make sure that lhp->lh_cpu[] is nicely aligned 988 * to prevent false sharing of cache lines. 989 */ 990 lhsize = P2ROUNDUP(lhsize, UMEM_ALIGN); 991 lhp = vmem_xalloc(umem_log_arena, lhsize, 64, P2NPHASE(lhsize, 64), 0, 992 NULL, NULL, VM_NOSLEEP); 993 if (lhp == NULL) 994 goto fail; 995 996 bzero(lhp, lhsize); 997 998 (void) mutex_init(&lhp->lh_lock, USYNC_THREAD, NULL); 999 lhp->lh_nchunks = nchunks; 1000 lhp->lh_chunksize = P2ROUNDUP(logsize / nchunks, PAGESIZE); 1001 if (lhp->lh_chunksize == 0) 1002 lhp->lh_chunksize = PAGESIZE; 1003 1004 lhp->lh_base = vmem_alloc(umem_log_arena, 1005 lhp->lh_chunksize * nchunks, VM_NOSLEEP); 1006 if (lhp->lh_base == NULL) 1007 goto fail; 1008 1009 lhp->lh_free = vmem_alloc(umem_log_arena, 1010 nchunks * sizeof (int), VM_NOSLEEP); 1011 if (lhp->lh_free == NULL) 1012 goto fail; 1013 1014 bzero(lhp->lh_base, lhp->lh_chunksize * nchunks); 1015 1016 for (i = 0; i < umem_max_ncpus; i++) { 1017 umem_cpu_log_header_t *clhp = &lhp->lh_cpu[i]; 1018 (void) mutex_init(&clhp->clh_lock, USYNC_THREAD, NULL); 1019 clhp->clh_chunk = i; 1020 } 1021 1022 for (i = umem_max_ncpus; i < nchunks; i++) 1023 lhp->lh_free[i] = i; 1024 1025 lhp->lh_head = umem_max_ncpus; 1026 lhp->lh_tail = 0; 1027 1028 return (lhp); 1029 1030 fail: 1031 if (lhp != NULL) { 1032 if (lhp->lh_base != NULL) 1033 vmem_free(umem_log_arena, lhp->lh_base, 1034 lhp->lh_chunksize * nchunks); 1035 1036 vmem_xfree(umem_log_arena, lhp, lhsize); 1037 } 1038 return (NULL); 1039 } 1040 1041 static void * 1042 umem_log_enter(umem_log_header_t *lhp, void *data, size_t size) 1043 { 1044 void *logspace; 1045 umem_cpu_log_header_t *clhp = 1046 &lhp->lh_cpu[CPU(umem_cpu_mask)->cpu_number]; 1047 1048 if (lhp == NULL || umem_logging == 0) 1049 return (NULL); 1050 1051 (void) mutex_lock(&clhp->clh_lock); 1052 clhp->clh_hits++; 1053 if (size > clhp->clh_avail) { 1054 (void) mutex_lock(&lhp->lh_lock); 1055 lhp->lh_hits++; 1056 lhp->lh_free[lhp->lh_tail] = clhp->clh_chunk; 1057 lhp->lh_tail = (lhp->lh_tail + 1) % lhp->lh_nchunks; 1058 clhp->clh_chunk = lhp->lh_free[lhp->lh_head]; 1059 lhp->lh_head = (lhp->lh_head + 1) % lhp->lh_nchunks; 1060 clhp->clh_current = lhp->lh_base + 1061 clhp->clh_chunk * lhp->lh_chunksize; 1062 clhp->clh_avail = lhp->lh_chunksize; 1063 if (size > lhp->lh_chunksize) 1064 size = lhp->lh_chunksize; 1065 (void) mutex_unlock(&lhp->lh_lock); 1066 } 1067 logspace = clhp->clh_current; 1068 clhp->clh_current += size; 1069 clhp->clh_avail -= size; 1070 bcopy(data, logspace, size); 1071 (void) mutex_unlock(&clhp->clh_lock); 1072 return (logspace); 1073 } 1074 1075 #define UMEM_AUDIT(lp, cp, bcp) \ 1076 { \ 1077 umem_bufctl_audit_t *_bcp = (umem_bufctl_audit_t *)(bcp); \ 1078 _bcp->bc_timestamp = gethrtime(); \ 1079 _bcp->bc_thread = thr_self(); \ 1080 _bcp->bc_depth = getpcstack(_bcp->bc_stack, umem_stack_depth, \ 1081 (cp != NULL) && (cp->cache_flags & UMF_CHECKSIGNAL)); \ 1082 _bcp->bc_lastlog = umem_log_enter((lp), _bcp, \ 1083 UMEM_BUFCTL_AUDIT_SIZE); \ 1084 } 1085 1086 static void 1087 umem_log_event(umem_log_header_t *lp, umem_cache_t *cp, 1088 umem_slab_t *sp, void *addr) 1089 { 1090 umem_bufctl_audit_t *bcp; 1091 UMEM_LOCAL_BUFCTL_AUDIT(&bcp); 1092 1093 bzero(bcp, UMEM_BUFCTL_AUDIT_SIZE); 1094 bcp->bc_addr = addr; 1095 bcp->bc_slab = sp; 1096 bcp->bc_cache = cp; 1097 UMEM_AUDIT(lp, cp, bcp); 1098 } 1099 1100 /* 1101 * Create a new slab for cache cp. 1102 */ 1103 static umem_slab_t * 1104 umem_slab_create(umem_cache_t *cp, int umflag) 1105 { 1106 size_t slabsize = cp->cache_slabsize; 1107 size_t chunksize = cp->cache_chunksize; 1108 int cache_flags = cp->cache_flags; 1109 size_t color, chunks; 1110 char *buf, *slab; 1111 umem_slab_t *sp; 1112 umem_bufctl_t *bcp; 1113 vmem_t *vmp = cp->cache_arena; 1114 1115 color = cp->cache_color + cp->cache_align; 1116 if (color > cp->cache_maxcolor) 1117 color = cp->cache_mincolor; 1118 cp->cache_color = color; 1119 1120 slab = vmem_alloc(vmp, slabsize, UMEM_VMFLAGS(umflag)); 1121 1122 if (slab == NULL) 1123 goto vmem_alloc_failure; 1124 1125 ASSERT(P2PHASE((uintptr_t)slab, vmp->vm_quantum) == 0); 1126 1127 if (!(cp->cache_cflags & UMC_NOTOUCH) && 1128 (cp->cache_flags & UMF_DEADBEEF)) 1129 copy_pattern(UMEM_UNINITIALIZED_PATTERN, slab, slabsize); 1130 1131 if (cache_flags & UMF_HASH) { 1132 if ((sp = _umem_cache_alloc(umem_slab_cache, umflag)) == NULL) 1133 goto slab_alloc_failure; 1134 chunks = (slabsize - color) / chunksize; 1135 } else { 1136 sp = UMEM_SLAB(cp, slab); 1137 chunks = (slabsize - sizeof (umem_slab_t) - color) / chunksize; 1138 } 1139 1140 sp->slab_cache = cp; 1141 sp->slab_head = NULL; 1142 sp->slab_refcnt = 0; 1143 sp->slab_base = buf = slab + color; 1144 sp->slab_chunks = chunks; 1145 1146 ASSERT(chunks > 0); 1147 while (chunks-- != 0) { 1148 if (cache_flags & UMF_HASH) { 1149 bcp = _umem_cache_alloc(cp->cache_bufctl_cache, umflag); 1150 if (bcp == NULL) 1151 goto bufctl_alloc_failure; 1152 if (cache_flags & UMF_AUDIT) { 1153 umem_bufctl_audit_t *bcap = 1154 (umem_bufctl_audit_t *)bcp; 1155 bzero(bcap, UMEM_BUFCTL_AUDIT_SIZE); 1156 bcap->bc_cache = cp; 1157 } 1158 bcp->bc_addr = buf; 1159 bcp->bc_slab = sp; 1160 } else { 1161 bcp = UMEM_BUFCTL(cp, buf); 1162 } 1163 if (cache_flags & UMF_BUFTAG) { 1164 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf); 1165 btp->bt_redzone = UMEM_REDZONE_PATTERN; 1166 btp->bt_bufctl = bcp; 1167 btp->bt_bxstat = (intptr_t)bcp ^ UMEM_BUFTAG_FREE; 1168 if (cache_flags & UMF_DEADBEEF) { 1169 copy_pattern(UMEM_FREE_PATTERN, buf, 1170 cp->cache_verify); 1171 } 1172 } 1173 bcp->bc_next = sp->slab_head; 1174 sp->slab_head = bcp; 1175 buf += chunksize; 1176 } 1177 1178 umem_log_event(umem_slab_log, cp, sp, slab); 1179 1180 return (sp); 1181 1182 bufctl_alloc_failure: 1183 1184 while ((bcp = sp->slab_head) != NULL) { 1185 sp->slab_head = bcp->bc_next; 1186 _umem_cache_free(cp->cache_bufctl_cache, bcp); 1187 } 1188 _umem_cache_free(umem_slab_cache, sp); 1189 1190 slab_alloc_failure: 1191 1192 vmem_free(vmp, slab, slabsize); 1193 1194 vmem_alloc_failure: 1195 1196 umem_log_event(umem_failure_log, cp, NULL, NULL); 1197 atomic_add_64(&cp->cache_alloc_fail, 1); 1198 1199 return (NULL); 1200 } 1201 1202 /* 1203 * Destroy a slab. 1204 */ 1205 static void 1206 umem_slab_destroy(umem_cache_t *cp, umem_slab_t *sp) 1207 { 1208 vmem_t *vmp = cp->cache_arena; 1209 void *slab = (void *)P2ALIGN((uintptr_t)sp->slab_base, vmp->vm_quantum); 1210 1211 if (cp->cache_flags & UMF_HASH) { 1212 umem_bufctl_t *bcp; 1213 while ((bcp = sp->slab_head) != NULL) { 1214 sp->slab_head = bcp->bc_next; 1215 _umem_cache_free(cp->cache_bufctl_cache, bcp); 1216 } 1217 _umem_cache_free(umem_slab_cache, sp); 1218 } 1219 vmem_free(vmp, slab, cp->cache_slabsize); 1220 } 1221 1222 /* 1223 * Allocate a raw (unconstructed) buffer from cp's slab layer. 1224 */ 1225 static void * 1226 umem_slab_alloc(umem_cache_t *cp, int umflag) 1227 { 1228 umem_bufctl_t *bcp, **hash_bucket; 1229 umem_slab_t *sp; 1230 void *buf; 1231 1232 (void) mutex_lock(&cp->cache_lock); 1233 cp->cache_slab_alloc++; 1234 sp = cp->cache_freelist; 1235 ASSERT(sp->slab_cache == cp); 1236 if (sp->slab_head == NULL) { 1237 /* 1238 * The freelist is empty. Create a new slab. 1239 */ 1240 (void) mutex_unlock(&cp->cache_lock); 1241 if (cp == &umem_null_cache) 1242 return (NULL); 1243 if ((sp = umem_slab_create(cp, umflag)) == NULL) 1244 return (NULL); 1245 (void) mutex_lock(&cp->cache_lock); 1246 cp->cache_slab_create++; 1247 if ((cp->cache_buftotal += sp->slab_chunks) > cp->cache_bufmax) 1248 cp->cache_bufmax = cp->cache_buftotal; 1249 sp->slab_next = cp->cache_freelist; 1250 sp->slab_prev = cp->cache_freelist->slab_prev; 1251 sp->slab_next->slab_prev = sp; 1252 sp->slab_prev->slab_next = sp; 1253 cp->cache_freelist = sp; 1254 } 1255 1256 sp->slab_refcnt++; 1257 ASSERT(sp->slab_refcnt <= sp->slab_chunks); 1258 1259 /* 1260 * If we're taking the last buffer in the slab, 1261 * remove the slab from the cache's freelist. 1262 */ 1263 bcp = sp->slab_head; 1264 if ((sp->slab_head = bcp->bc_next) == NULL) { 1265 cp->cache_freelist = sp->slab_next; 1266 ASSERT(sp->slab_refcnt == sp->slab_chunks); 1267 } 1268 1269 if (cp->cache_flags & UMF_HASH) { 1270 /* 1271 * Add buffer to allocated-address hash table. 1272 */ 1273 buf = bcp->bc_addr; 1274 hash_bucket = UMEM_HASH(cp, buf); 1275 bcp->bc_next = *hash_bucket; 1276 *hash_bucket = bcp; 1277 if ((cp->cache_flags & (UMF_AUDIT | UMF_BUFTAG)) == UMF_AUDIT) { 1278 UMEM_AUDIT(umem_transaction_log, cp, bcp); 1279 } 1280 } else { 1281 buf = UMEM_BUF(cp, bcp); 1282 } 1283 1284 ASSERT(UMEM_SLAB_MEMBER(sp, buf)); 1285 1286 (void) mutex_unlock(&cp->cache_lock); 1287 1288 return (buf); 1289 } 1290 1291 /* 1292 * Free a raw (unconstructed) buffer to cp's slab layer. 1293 */ 1294 static void 1295 umem_slab_free(umem_cache_t *cp, void *buf) 1296 { 1297 umem_slab_t *sp; 1298 umem_bufctl_t *bcp, **prev_bcpp; 1299 1300 ASSERT(buf != NULL); 1301 1302 (void) mutex_lock(&cp->cache_lock); 1303 cp->cache_slab_free++; 1304 1305 if (cp->cache_flags & UMF_HASH) { 1306 /* 1307 * Look up buffer in allocated-address hash table. 1308 */ 1309 prev_bcpp = UMEM_HASH(cp, buf); 1310 while ((bcp = *prev_bcpp) != NULL) { 1311 if (bcp->bc_addr == buf) { 1312 *prev_bcpp = bcp->bc_next; 1313 sp = bcp->bc_slab; 1314 break; 1315 } 1316 cp->cache_lookup_depth++; 1317 prev_bcpp = &bcp->bc_next; 1318 } 1319 } else { 1320 bcp = UMEM_BUFCTL(cp, buf); 1321 sp = UMEM_SLAB(cp, buf); 1322 } 1323 1324 if (bcp == NULL || sp->slab_cache != cp || !UMEM_SLAB_MEMBER(sp, buf)) { 1325 (void) mutex_unlock(&cp->cache_lock); 1326 umem_error(UMERR_BADADDR, cp, buf); 1327 return; 1328 } 1329 1330 if ((cp->cache_flags & (UMF_AUDIT | UMF_BUFTAG)) == UMF_AUDIT) { 1331 if (cp->cache_flags & UMF_CONTENTS) 1332 ((umem_bufctl_audit_t *)bcp)->bc_contents = 1333 umem_log_enter(umem_content_log, buf, 1334 cp->cache_contents); 1335 UMEM_AUDIT(umem_transaction_log, cp, bcp); 1336 } 1337 1338 /* 1339 * If this slab isn't currently on the freelist, put it there. 1340 */ 1341 if (sp->slab_head == NULL) { 1342 ASSERT(sp->slab_refcnt == sp->slab_chunks); 1343 ASSERT(cp->cache_freelist != sp); 1344 sp->slab_next->slab_prev = sp->slab_prev; 1345 sp->slab_prev->slab_next = sp->slab_next; 1346 sp->slab_next = cp->cache_freelist; 1347 sp->slab_prev = cp->cache_freelist->slab_prev; 1348 sp->slab_next->slab_prev = sp; 1349 sp->slab_prev->slab_next = sp; 1350 cp->cache_freelist = sp; 1351 } 1352 1353 bcp->bc_next = sp->slab_head; 1354 sp->slab_head = bcp; 1355 1356 ASSERT(sp->slab_refcnt >= 1); 1357 if (--sp->slab_refcnt == 0) { 1358 /* 1359 * There are no outstanding allocations from this slab, 1360 * so we can reclaim the memory. 1361 */ 1362 sp->slab_next->slab_prev = sp->slab_prev; 1363 sp->slab_prev->slab_next = sp->slab_next; 1364 if (sp == cp->cache_freelist) 1365 cp->cache_freelist = sp->slab_next; 1366 cp->cache_slab_destroy++; 1367 cp->cache_buftotal -= sp->slab_chunks; 1368 (void) mutex_unlock(&cp->cache_lock); 1369 umem_slab_destroy(cp, sp); 1370 return; 1371 } 1372 (void) mutex_unlock(&cp->cache_lock); 1373 } 1374 1375 static int 1376 umem_cache_alloc_debug(umem_cache_t *cp, void *buf, int umflag) 1377 { 1378 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf); 1379 umem_bufctl_audit_t *bcp = (umem_bufctl_audit_t *)btp->bt_bufctl; 1380 uint32_t mtbf; 1381 int flags_nfatal; 1382 1383 if (btp->bt_bxstat != ((intptr_t)bcp ^ UMEM_BUFTAG_FREE)) { 1384 umem_error(UMERR_BADBUFTAG, cp, buf); 1385 return (-1); 1386 } 1387 1388 btp->bt_bxstat = (intptr_t)bcp ^ UMEM_BUFTAG_ALLOC; 1389 1390 if ((cp->cache_flags & UMF_HASH) && bcp->bc_addr != buf) { 1391 umem_error(UMERR_BADBUFCTL, cp, buf); 1392 return (-1); 1393 } 1394 1395 btp->bt_redzone = UMEM_REDZONE_PATTERN; 1396 1397 if (cp->cache_flags & UMF_DEADBEEF) { 1398 if (verify_and_copy_pattern(UMEM_FREE_PATTERN, 1399 UMEM_UNINITIALIZED_PATTERN, buf, cp->cache_verify)) { 1400 umem_error(UMERR_MODIFIED, cp, buf); 1401 return (-1); 1402 } 1403 } 1404 1405 if ((mtbf = umem_mtbf | cp->cache_mtbf) != 0 && 1406 gethrtime() % mtbf == 0 && 1407 (umflag & (UMEM_FATAL_FLAGS)) == 0) { 1408 umem_log_event(umem_failure_log, cp, NULL, NULL); 1409 } else { 1410 mtbf = 0; 1411 } 1412 1413 /* 1414 * We do not pass fatal flags on to the constructor. This prevents 1415 * leaking buffers in the event of a subordinate constructor failing. 1416 */ 1417 flags_nfatal = UMEM_DEFAULT; 1418 if (mtbf || (cp->cache_constructor != NULL && 1419 cp->cache_constructor(buf, cp->cache_private, flags_nfatal) != 0)) { 1420 atomic_add_64(&cp->cache_alloc_fail, 1); 1421 btp->bt_bxstat = (intptr_t)bcp ^ UMEM_BUFTAG_FREE; 1422 copy_pattern(UMEM_FREE_PATTERN, buf, cp->cache_verify); 1423 umem_slab_free(cp, buf); 1424 return (-1); 1425 } 1426 1427 if (cp->cache_flags & UMF_AUDIT) { 1428 UMEM_AUDIT(umem_transaction_log, cp, bcp); 1429 } 1430 1431 return (0); 1432 } 1433 1434 static int 1435 umem_cache_free_debug(umem_cache_t *cp, void *buf) 1436 { 1437 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf); 1438 umem_bufctl_audit_t *bcp = (umem_bufctl_audit_t *)btp->bt_bufctl; 1439 umem_slab_t *sp; 1440 1441 if (btp->bt_bxstat != ((intptr_t)bcp ^ UMEM_BUFTAG_ALLOC)) { 1442 if (btp->bt_bxstat == ((intptr_t)bcp ^ UMEM_BUFTAG_FREE)) { 1443 umem_error(UMERR_DUPFREE, cp, buf); 1444 return (-1); 1445 } 1446 sp = umem_findslab(cp, buf); 1447 if (sp == NULL || sp->slab_cache != cp) 1448 umem_error(UMERR_BADADDR, cp, buf); 1449 else 1450 umem_error(UMERR_REDZONE, cp, buf); 1451 return (-1); 1452 } 1453 1454 btp->bt_bxstat = (intptr_t)bcp ^ UMEM_BUFTAG_FREE; 1455 1456 if ((cp->cache_flags & UMF_HASH) && bcp->bc_addr != buf) { 1457 umem_error(UMERR_BADBUFCTL, cp, buf); 1458 return (-1); 1459 } 1460 1461 if (btp->bt_redzone != UMEM_REDZONE_PATTERN) { 1462 umem_error(UMERR_REDZONE, cp, buf); 1463 return (-1); 1464 } 1465 1466 if (cp->cache_flags & UMF_AUDIT) { 1467 if (cp->cache_flags & UMF_CONTENTS) 1468 bcp->bc_contents = umem_log_enter(umem_content_log, 1469 buf, cp->cache_contents); 1470 UMEM_AUDIT(umem_transaction_log, cp, bcp); 1471 } 1472 1473 if (cp->cache_destructor != NULL) 1474 cp->cache_destructor(buf, cp->cache_private); 1475 1476 if (cp->cache_flags & UMF_DEADBEEF) 1477 copy_pattern(UMEM_FREE_PATTERN, buf, cp->cache_verify); 1478 1479 return (0); 1480 } 1481 1482 /* 1483 * Free each object in magazine mp to cp's slab layer, and free mp itself. 1484 */ 1485 static void 1486 umem_magazine_destroy(umem_cache_t *cp, umem_magazine_t *mp, int nrounds) 1487 { 1488 int round; 1489 1490 ASSERT(cp->cache_next == NULL || IN_UPDATE()); 1491 1492 for (round = 0; round < nrounds; round++) { 1493 void *buf = mp->mag_round[round]; 1494 1495 if ((cp->cache_flags & UMF_DEADBEEF) && 1496 verify_pattern(UMEM_FREE_PATTERN, buf, 1497 cp->cache_verify) != NULL) { 1498 umem_error(UMERR_MODIFIED, cp, buf); 1499 continue; 1500 } 1501 1502 if (!(cp->cache_flags & UMF_BUFTAG) && 1503 cp->cache_destructor != NULL) 1504 cp->cache_destructor(buf, cp->cache_private); 1505 1506 umem_slab_free(cp, buf); 1507 } 1508 ASSERT(UMEM_MAGAZINE_VALID(cp, mp)); 1509 _umem_cache_free(cp->cache_magtype->mt_cache, mp); 1510 } 1511 1512 /* 1513 * Allocate a magazine from the depot. 1514 */ 1515 static umem_magazine_t * 1516 umem_depot_alloc(umem_cache_t *cp, umem_maglist_t *mlp) 1517 { 1518 umem_magazine_t *mp; 1519 1520 /* 1521 * If we can't get the depot lock without contention, 1522 * update our contention count. We use the depot 1523 * contention rate to determine whether we need to 1524 * increase the magazine size for better scalability. 1525 */ 1526 if (mutex_trylock(&cp->cache_depot_lock) != 0) { 1527 (void) mutex_lock(&cp->cache_depot_lock); 1528 cp->cache_depot_contention++; 1529 } 1530 1531 if ((mp = mlp->ml_list) != NULL) { 1532 ASSERT(UMEM_MAGAZINE_VALID(cp, mp)); 1533 mlp->ml_list = mp->mag_next; 1534 if (--mlp->ml_total < mlp->ml_min) 1535 mlp->ml_min = mlp->ml_total; 1536 mlp->ml_alloc++; 1537 } 1538 1539 (void) mutex_unlock(&cp->cache_depot_lock); 1540 1541 return (mp); 1542 } 1543 1544 /* 1545 * Free a magazine to the depot. 1546 */ 1547 static void 1548 umem_depot_free(umem_cache_t *cp, umem_maglist_t *mlp, umem_magazine_t *mp) 1549 { 1550 (void) mutex_lock(&cp->cache_depot_lock); 1551 ASSERT(UMEM_MAGAZINE_VALID(cp, mp)); 1552 mp->mag_next = mlp->ml_list; 1553 mlp->ml_list = mp; 1554 mlp->ml_total++; 1555 (void) mutex_unlock(&cp->cache_depot_lock); 1556 } 1557 1558 /* 1559 * Update the working set statistics for cp's depot. 1560 */ 1561 static void 1562 umem_depot_ws_update(umem_cache_t *cp) 1563 { 1564 (void) mutex_lock(&cp->cache_depot_lock); 1565 cp->cache_full.ml_reaplimit = cp->cache_full.ml_min; 1566 cp->cache_full.ml_min = cp->cache_full.ml_total; 1567 cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_min; 1568 cp->cache_empty.ml_min = cp->cache_empty.ml_total; 1569 (void) mutex_unlock(&cp->cache_depot_lock); 1570 } 1571 1572 /* 1573 * Reap all magazines that have fallen out of the depot's working set. 1574 */ 1575 static void 1576 umem_depot_ws_reap(umem_cache_t *cp) 1577 { 1578 long reap; 1579 umem_magazine_t *mp; 1580 1581 ASSERT(cp->cache_next == NULL || IN_REAP()); 1582 1583 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min); 1584 while (reap-- && (mp = umem_depot_alloc(cp, &cp->cache_full)) != NULL) 1585 umem_magazine_destroy(cp, mp, cp->cache_magtype->mt_magsize); 1586 1587 reap = MIN(cp->cache_empty.ml_reaplimit, cp->cache_empty.ml_min); 1588 while (reap-- && (mp = umem_depot_alloc(cp, &cp->cache_empty)) != NULL) 1589 umem_magazine_destroy(cp, mp, 0); 1590 } 1591 1592 static void 1593 umem_cpu_reload(umem_cpu_cache_t *ccp, umem_magazine_t *mp, int rounds) 1594 { 1595 ASSERT((ccp->cc_loaded == NULL && ccp->cc_rounds == -1) || 1596 (ccp->cc_loaded && ccp->cc_rounds + rounds == ccp->cc_magsize)); 1597 ASSERT(ccp->cc_magsize > 0); 1598 1599 ccp->cc_ploaded = ccp->cc_loaded; 1600 ccp->cc_prounds = ccp->cc_rounds; 1601 ccp->cc_loaded = mp; 1602 ccp->cc_rounds = rounds; 1603 } 1604 1605 /* 1606 * Allocate a constructed object from cache cp. 1607 */ 1608 #pragma weak umem_cache_alloc = _umem_cache_alloc 1609 void * 1610 _umem_cache_alloc(umem_cache_t *cp, int umflag) 1611 { 1612 umem_cpu_cache_t *ccp; 1613 umem_magazine_t *fmp; 1614 void *buf; 1615 int flags_nfatal; 1616 1617 retry: 1618 ccp = UMEM_CPU_CACHE(cp, CPU(cp->cache_cpu_mask)); 1619 (void) mutex_lock(&ccp->cc_lock); 1620 for (;;) { 1621 /* 1622 * If there's an object available in the current CPU's 1623 * loaded magazine, just take it and return. 1624 */ 1625 if (ccp->cc_rounds > 0) { 1626 buf = ccp->cc_loaded->mag_round[--ccp->cc_rounds]; 1627 ccp->cc_alloc++; 1628 (void) mutex_unlock(&ccp->cc_lock); 1629 if ((ccp->cc_flags & UMF_BUFTAG) && 1630 umem_cache_alloc_debug(cp, buf, umflag) == -1) { 1631 if (umem_alloc_retry(cp, umflag)) { 1632 goto retry; 1633 } 1634 1635 return (NULL); 1636 } 1637 return (buf); 1638 } 1639 1640 /* 1641 * The loaded magazine is empty. If the previously loaded 1642 * magazine was full, exchange them and try again. 1643 */ 1644 if (ccp->cc_prounds > 0) { 1645 umem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds); 1646 continue; 1647 } 1648 1649 /* 1650 * If the magazine layer is disabled, break out now. 1651 */ 1652 if (ccp->cc_magsize == 0) 1653 break; 1654 1655 /* 1656 * Try to get a full magazine from the depot. 1657 */ 1658 fmp = umem_depot_alloc(cp, &cp->cache_full); 1659 if (fmp != NULL) { 1660 if (ccp->cc_ploaded != NULL) 1661 umem_depot_free(cp, &cp->cache_empty, 1662 ccp->cc_ploaded); 1663 umem_cpu_reload(ccp, fmp, ccp->cc_magsize); 1664 continue; 1665 } 1666 1667 /* 1668 * There are no full magazines in the depot, 1669 * so fall through to the slab layer. 1670 */ 1671 break; 1672 } 1673 (void) mutex_unlock(&ccp->cc_lock); 1674 1675 /* 1676 * We couldn't allocate a constructed object from the magazine layer, 1677 * so get a raw buffer from the slab layer and apply its constructor. 1678 */ 1679 buf = umem_slab_alloc(cp, umflag); 1680 1681 if (buf == NULL) { 1682 if (cp == &umem_null_cache) 1683 return (NULL); 1684 if (umem_alloc_retry(cp, umflag)) { 1685 goto retry; 1686 } 1687 1688 return (NULL); 1689 } 1690 1691 if (cp->cache_flags & UMF_BUFTAG) { 1692 /* 1693 * Let umem_cache_alloc_debug() apply the constructor for us. 1694 */ 1695 if (umem_cache_alloc_debug(cp, buf, umflag) == -1) { 1696 if (umem_alloc_retry(cp, umflag)) { 1697 goto retry; 1698 } 1699 return (NULL); 1700 } 1701 return (buf); 1702 } 1703 1704 /* 1705 * We do not pass fatal flags on to the constructor. This prevents 1706 * leaking buffers in the event of a subordinate constructor failing. 1707 */ 1708 flags_nfatal = UMEM_DEFAULT; 1709 if (cp->cache_constructor != NULL && 1710 cp->cache_constructor(buf, cp->cache_private, flags_nfatal) != 0) { 1711 atomic_add_64(&cp->cache_alloc_fail, 1); 1712 umem_slab_free(cp, buf); 1713 1714 if (umem_alloc_retry(cp, umflag)) { 1715 goto retry; 1716 } 1717 return (NULL); 1718 } 1719 1720 return (buf); 1721 } 1722 1723 /* 1724 * Free a constructed object to cache cp. 1725 */ 1726 #pragma weak umem_cache_free = _umem_cache_free 1727 void 1728 _umem_cache_free(umem_cache_t *cp, void *buf) 1729 { 1730 umem_cpu_cache_t *ccp = UMEM_CPU_CACHE(cp, CPU(cp->cache_cpu_mask)); 1731 umem_magazine_t *emp; 1732 umem_magtype_t *mtp; 1733 1734 if (ccp->cc_flags & UMF_BUFTAG) 1735 if (umem_cache_free_debug(cp, buf) == -1) 1736 return; 1737 1738 (void) mutex_lock(&ccp->cc_lock); 1739 for (;;) { 1740 /* 1741 * If there's a slot available in the current CPU's 1742 * loaded magazine, just put the object there and return. 1743 */ 1744 if ((uint_t)ccp->cc_rounds < ccp->cc_magsize) { 1745 ccp->cc_loaded->mag_round[ccp->cc_rounds++] = buf; 1746 ccp->cc_free++; 1747 (void) mutex_unlock(&ccp->cc_lock); 1748 return; 1749 } 1750 1751 /* 1752 * The loaded magazine is full. If the previously loaded 1753 * magazine was empty, exchange them and try again. 1754 */ 1755 if (ccp->cc_prounds == 0) { 1756 umem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds); 1757 continue; 1758 } 1759 1760 /* 1761 * If the magazine layer is disabled, break out now. 1762 */ 1763 if (ccp->cc_magsize == 0) 1764 break; 1765 1766 /* 1767 * Try to get an empty magazine from the depot. 1768 */ 1769 emp = umem_depot_alloc(cp, &cp->cache_empty); 1770 if (emp != NULL) { 1771 if (ccp->cc_ploaded != NULL) 1772 umem_depot_free(cp, &cp->cache_full, 1773 ccp->cc_ploaded); 1774 umem_cpu_reload(ccp, emp, 0); 1775 continue; 1776 } 1777 1778 /* 1779 * There are no empty magazines in the depot, 1780 * so try to allocate a new one. We must drop all locks 1781 * across umem_cache_alloc() because lower layers may 1782 * attempt to allocate from this cache. 1783 */ 1784 mtp = cp->cache_magtype; 1785 (void) mutex_unlock(&ccp->cc_lock); 1786 emp = _umem_cache_alloc(mtp->mt_cache, UMEM_DEFAULT); 1787 (void) mutex_lock(&ccp->cc_lock); 1788 1789 if (emp != NULL) { 1790 /* 1791 * We successfully allocated an empty magazine. 1792 * However, we had to drop ccp->cc_lock to do it, 1793 * so the cache's magazine size may have changed. 1794 * If so, free the magazine and try again. 1795 */ 1796 if (ccp->cc_magsize != mtp->mt_magsize) { 1797 (void) mutex_unlock(&ccp->cc_lock); 1798 _umem_cache_free(mtp->mt_cache, emp); 1799 (void) mutex_lock(&ccp->cc_lock); 1800 continue; 1801 } 1802 1803 /* 1804 * We got a magazine of the right size. Add it to 1805 * the depot and try the whole dance again. 1806 */ 1807 umem_depot_free(cp, &cp->cache_empty, emp); 1808 continue; 1809 } 1810 1811 /* 1812 * We couldn't allocate an empty magazine, 1813 * so fall through to the slab layer. 1814 */ 1815 break; 1816 } 1817 (void) mutex_unlock(&ccp->cc_lock); 1818 1819 /* 1820 * We couldn't free our constructed object to the magazine layer, 1821 * so apply its destructor and free it to the slab layer. 1822 * Note that if UMF_BUFTAG is in effect, umem_cache_free_debug() 1823 * will have already applied the destructor. 1824 */ 1825 if (!(cp->cache_flags & UMF_BUFTAG) && cp->cache_destructor != NULL) 1826 cp->cache_destructor(buf, cp->cache_private); 1827 1828 umem_slab_free(cp, buf); 1829 } 1830 1831 #pragma weak umem_zalloc = _umem_zalloc 1832 void * 1833 _umem_zalloc(size_t size, int umflag) 1834 { 1835 size_t index = (size - 1) >> UMEM_ALIGN_SHIFT; 1836 void *buf; 1837 1838 retry: 1839 if (index < UMEM_MAXBUF >> UMEM_ALIGN_SHIFT) { 1840 umem_cache_t *cp = umem_alloc_table[index]; 1841 buf = _umem_cache_alloc(cp, umflag); 1842 if (buf != NULL) { 1843 if (cp->cache_flags & UMF_BUFTAG) { 1844 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf); 1845 ((uint8_t *)buf)[size] = UMEM_REDZONE_BYTE; 1846 ((uint32_t *)btp)[1] = UMEM_SIZE_ENCODE(size); 1847 } 1848 bzero(buf, size); 1849 } else if (umem_alloc_retry(cp, umflag)) 1850 goto retry; 1851 } else { 1852 buf = _umem_alloc(size, umflag); /* handles failure */ 1853 if (buf != NULL) 1854 bzero(buf, size); 1855 } 1856 return (buf); 1857 } 1858 1859 #pragma weak umem_alloc = _umem_alloc 1860 void * 1861 _umem_alloc(size_t size, int umflag) 1862 { 1863 size_t index = (size - 1) >> UMEM_ALIGN_SHIFT; 1864 void *buf; 1865 umem_alloc_retry: 1866 if (index < UMEM_MAXBUF >> UMEM_ALIGN_SHIFT) { 1867 umem_cache_t *cp = umem_alloc_table[index]; 1868 buf = _umem_cache_alloc(cp, umflag); 1869 if ((cp->cache_flags & UMF_BUFTAG) && buf != NULL) { 1870 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf); 1871 ((uint8_t *)buf)[size] = UMEM_REDZONE_BYTE; 1872 ((uint32_t *)btp)[1] = UMEM_SIZE_ENCODE(size); 1873 } 1874 if (buf == NULL && umem_alloc_retry(cp, umflag)) 1875 goto umem_alloc_retry; 1876 return (buf); 1877 } 1878 if (size == 0) 1879 return (NULL); 1880 if (umem_oversize_arena == NULL) { 1881 if (umem_init()) 1882 ASSERT(umem_oversize_arena != NULL); 1883 else 1884 return (NULL); 1885 } 1886 buf = vmem_alloc(umem_oversize_arena, size, UMEM_VMFLAGS(umflag)); 1887 if (buf == NULL) { 1888 umem_log_event(umem_failure_log, NULL, NULL, (void *)size); 1889 if (umem_alloc_retry(NULL, umflag)) 1890 goto umem_alloc_retry; 1891 } 1892 return (buf); 1893 } 1894 1895 #pragma weak umem_alloc_align = _umem_alloc_align 1896 void * 1897 _umem_alloc_align(size_t size, size_t align, int umflag) 1898 { 1899 void *buf; 1900 1901 if (size == 0) 1902 return (NULL); 1903 if ((align & (align - 1)) != 0) 1904 return (NULL); 1905 if (align < UMEM_ALIGN) 1906 align = UMEM_ALIGN; 1907 1908 umem_alloc_align_retry: 1909 if (umem_memalign_arena == NULL) { 1910 if (umem_init()) 1911 ASSERT(umem_oversize_arena != NULL); 1912 else 1913 return (NULL); 1914 } 1915 buf = vmem_xalloc(umem_memalign_arena, size, align, 0, 0, NULL, NULL, 1916 UMEM_VMFLAGS(umflag)); 1917 if (buf == NULL) { 1918 umem_log_event(umem_failure_log, NULL, NULL, (void *)size); 1919 if (umem_alloc_retry(NULL, umflag)) 1920 goto umem_alloc_align_retry; 1921 } 1922 return (buf); 1923 } 1924 1925 #pragma weak umem_free = _umem_free 1926 void 1927 _umem_free(void *buf, size_t size) 1928 { 1929 size_t index = (size - 1) >> UMEM_ALIGN_SHIFT; 1930 1931 if (index < UMEM_MAXBUF >> UMEM_ALIGN_SHIFT) { 1932 umem_cache_t *cp = umem_alloc_table[index]; 1933 if (cp->cache_flags & UMF_BUFTAG) { 1934 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf); 1935 uint32_t *ip = (uint32_t *)btp; 1936 if (ip[1] != UMEM_SIZE_ENCODE(size)) { 1937 if (*(uint64_t *)buf == UMEM_FREE_PATTERN) { 1938 umem_error(UMERR_DUPFREE, cp, buf); 1939 return; 1940 } 1941 if (UMEM_SIZE_VALID(ip[1])) { 1942 ip[0] = UMEM_SIZE_ENCODE(size); 1943 umem_error(UMERR_BADSIZE, cp, buf); 1944 } else { 1945 umem_error(UMERR_REDZONE, cp, buf); 1946 } 1947 return; 1948 } 1949 if (((uint8_t *)buf)[size] != UMEM_REDZONE_BYTE) { 1950 umem_error(UMERR_REDZONE, cp, buf); 1951 return; 1952 } 1953 btp->bt_redzone = UMEM_REDZONE_PATTERN; 1954 } 1955 _umem_cache_free(cp, buf); 1956 } else { 1957 if (buf == NULL && size == 0) 1958 return; 1959 vmem_free(umem_oversize_arena, buf, size); 1960 } 1961 } 1962 1963 #pragma weak umem_free_align = _umem_free_align 1964 void 1965 _umem_free_align(void *buf, size_t size) 1966 { 1967 if (buf == NULL && size == 0) 1968 return; 1969 vmem_xfree(umem_memalign_arena, buf, size); 1970 } 1971 1972 static void * 1973 umem_firewall_va_alloc(vmem_t *vmp, size_t size, int vmflag) 1974 { 1975 size_t realsize = size + vmp->vm_quantum; 1976 1977 /* 1978 * Annoying edge case: if 'size' is just shy of ULONG_MAX, adding 1979 * vm_quantum will cause integer wraparound. Check for this, and 1980 * blow off the firewall page in this case. Note that such a 1981 * giant allocation (the entire address space) can never be 1982 * satisfied, so it will either fail immediately (VM_NOSLEEP) 1983 * or sleep forever (VM_SLEEP). Thus, there is no need for a 1984 * corresponding check in umem_firewall_va_free(). 1985 */ 1986 if (realsize < size) 1987 realsize = size; 1988 1989 return (vmem_alloc(vmp, realsize, vmflag | VM_NEXTFIT)); 1990 } 1991 1992 static void 1993 umem_firewall_va_free(vmem_t *vmp, void *addr, size_t size) 1994 { 1995 vmem_free(vmp, addr, size + vmp->vm_quantum); 1996 } 1997 1998 /* 1999 * Reclaim all unused memory from a cache. 2000 */ 2001 static void 2002 umem_cache_reap(umem_cache_t *cp) 2003 { 2004 /* 2005 * Ask the cache's owner to free some memory if possible. 2006 * The idea is to handle things like the inode cache, which 2007 * typically sits on a bunch of memory that it doesn't truly 2008 * *need*. Reclaim policy is entirely up to the owner; this 2009 * callback is just an advisory plea for help. 2010 */ 2011 if (cp->cache_reclaim != NULL) 2012 cp->cache_reclaim(cp->cache_private); 2013 2014 umem_depot_ws_reap(cp); 2015 } 2016 2017 /* 2018 * Purge all magazines from a cache and set its magazine limit to zero. 2019 * All calls are serialized by being done by the update thread, except for 2020 * the final call from umem_cache_destroy(). 2021 */ 2022 static void 2023 umem_cache_magazine_purge(umem_cache_t *cp) 2024 { 2025 umem_cpu_cache_t *ccp; 2026 umem_magazine_t *mp, *pmp; 2027 int rounds, prounds, cpu_seqid; 2028 2029 ASSERT(cp->cache_next == NULL || IN_UPDATE()); 2030 2031 for (cpu_seqid = 0; cpu_seqid < umem_max_ncpus; cpu_seqid++) { 2032 ccp = &cp->cache_cpu[cpu_seqid]; 2033 2034 (void) mutex_lock(&ccp->cc_lock); 2035 mp = ccp->cc_loaded; 2036 pmp = ccp->cc_ploaded; 2037 rounds = ccp->cc_rounds; 2038 prounds = ccp->cc_prounds; 2039 ccp->cc_loaded = NULL; 2040 ccp->cc_ploaded = NULL; 2041 ccp->cc_rounds = -1; 2042 ccp->cc_prounds = -1; 2043 ccp->cc_magsize = 0; 2044 (void) mutex_unlock(&ccp->cc_lock); 2045 2046 if (mp) 2047 umem_magazine_destroy(cp, mp, rounds); 2048 if (pmp) 2049 umem_magazine_destroy(cp, pmp, prounds); 2050 } 2051 2052 /* 2053 * Updating the working set statistics twice in a row has the 2054 * effect of setting the working set size to zero, so everything 2055 * is eligible for reaping. 2056 */ 2057 umem_depot_ws_update(cp); 2058 umem_depot_ws_update(cp); 2059 2060 umem_depot_ws_reap(cp); 2061 } 2062 2063 /* 2064 * Enable per-cpu magazines on a cache. 2065 */ 2066 static void 2067 umem_cache_magazine_enable(umem_cache_t *cp) 2068 { 2069 int cpu_seqid; 2070 2071 if (cp->cache_flags & UMF_NOMAGAZINE) 2072 return; 2073 2074 for (cpu_seqid = 0; cpu_seqid < umem_max_ncpus; cpu_seqid++) { 2075 umem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid]; 2076 (void) mutex_lock(&ccp->cc_lock); 2077 ccp->cc_magsize = cp->cache_magtype->mt_magsize; 2078 (void) mutex_unlock(&ccp->cc_lock); 2079 } 2080 2081 } 2082 2083 /* 2084 * Recompute a cache's magazine size. The trade-off is that larger magazines 2085 * provide a higher transfer rate with the depot, while smaller magazines 2086 * reduce memory consumption. Magazine resizing is an expensive operation; 2087 * it should not be done frequently. 2088 * 2089 * Changes to the magazine size are serialized by only having one thread 2090 * doing updates. (the update thread) 2091 * 2092 * Note: at present this only grows the magazine size. It might be useful 2093 * to allow shrinkage too. 2094 */ 2095 static void 2096 umem_cache_magazine_resize(umem_cache_t *cp) 2097 { 2098 umem_magtype_t *mtp = cp->cache_magtype; 2099 2100 ASSERT(IN_UPDATE()); 2101 2102 if (cp->cache_chunksize < mtp->mt_maxbuf) { 2103 umem_cache_magazine_purge(cp); 2104 (void) mutex_lock(&cp->cache_depot_lock); 2105 cp->cache_magtype = ++mtp; 2106 cp->cache_depot_contention_prev = 2107 cp->cache_depot_contention + INT_MAX; 2108 (void) mutex_unlock(&cp->cache_depot_lock); 2109 umem_cache_magazine_enable(cp); 2110 } 2111 } 2112 2113 /* 2114 * Rescale a cache's hash table, so that the table size is roughly the 2115 * cache size. We want the average lookup time to be extremely small. 2116 */ 2117 static void 2118 umem_hash_rescale(umem_cache_t *cp) 2119 { 2120 umem_bufctl_t **old_table, **new_table, *bcp; 2121 size_t old_size, new_size, h; 2122 2123 ASSERT(IN_UPDATE()); 2124 2125 new_size = MAX(UMEM_HASH_INITIAL, 2126 1 << (highbit(3 * cp->cache_buftotal + 4) - 2)); 2127 old_size = cp->cache_hash_mask + 1; 2128 2129 if ((old_size >> 1) <= new_size && new_size <= (old_size << 1)) 2130 return; 2131 2132 new_table = vmem_alloc(umem_hash_arena, new_size * sizeof (void *), 2133 VM_NOSLEEP); 2134 if (new_table == NULL) 2135 return; 2136 bzero(new_table, new_size * sizeof (void *)); 2137 2138 (void) mutex_lock(&cp->cache_lock); 2139 2140 old_size = cp->cache_hash_mask + 1; 2141 old_table = cp->cache_hash_table; 2142 2143 cp->cache_hash_mask = new_size - 1; 2144 cp->cache_hash_table = new_table; 2145 cp->cache_rescale++; 2146 2147 for (h = 0; h < old_size; h++) { 2148 bcp = old_table[h]; 2149 while (bcp != NULL) { 2150 void *addr = bcp->bc_addr; 2151 umem_bufctl_t *next_bcp = bcp->bc_next; 2152 umem_bufctl_t **hash_bucket = UMEM_HASH(cp, addr); 2153 bcp->bc_next = *hash_bucket; 2154 *hash_bucket = bcp; 2155 bcp = next_bcp; 2156 } 2157 } 2158 2159 (void) mutex_unlock(&cp->cache_lock); 2160 2161 vmem_free(umem_hash_arena, old_table, old_size * sizeof (void *)); 2162 } 2163 2164 /* 2165 * Perform periodic maintenance on a cache: hash rescaling, 2166 * depot working-set update, and magazine resizing. 2167 */ 2168 void 2169 umem_cache_update(umem_cache_t *cp) 2170 { 2171 int update_flags = 0; 2172 2173 ASSERT(MUTEX_HELD(&umem_cache_lock)); 2174 2175 /* 2176 * If the cache has become much larger or smaller than its hash table, 2177 * fire off a request to rescale the hash table. 2178 */ 2179 (void) mutex_lock(&cp->cache_lock); 2180 2181 if ((cp->cache_flags & UMF_HASH) && 2182 (cp->cache_buftotal > (cp->cache_hash_mask << 1) || 2183 (cp->cache_buftotal < (cp->cache_hash_mask >> 1) && 2184 cp->cache_hash_mask > UMEM_HASH_INITIAL))) 2185 update_flags |= UMU_HASH_RESCALE; 2186 2187 (void) mutex_unlock(&cp->cache_lock); 2188 2189 /* 2190 * Update the depot working set statistics. 2191 */ 2192 umem_depot_ws_update(cp); 2193 2194 /* 2195 * If there's a lot of contention in the depot, 2196 * increase the magazine size. 2197 */ 2198 (void) mutex_lock(&cp->cache_depot_lock); 2199 2200 if (cp->cache_chunksize < cp->cache_magtype->mt_maxbuf && 2201 (int)(cp->cache_depot_contention - 2202 cp->cache_depot_contention_prev) > umem_depot_contention) 2203 update_flags |= UMU_MAGAZINE_RESIZE; 2204 2205 cp->cache_depot_contention_prev = cp->cache_depot_contention; 2206 2207 (void) mutex_unlock(&cp->cache_depot_lock); 2208 2209 if (update_flags) 2210 umem_add_update(cp, update_flags); 2211 } 2212 2213 /* 2214 * Runs all pending updates. 2215 * 2216 * The update lock must be held on entrance, and will be held on exit. 2217 */ 2218 void 2219 umem_process_updates(void) 2220 { 2221 ASSERT(MUTEX_HELD(&umem_update_lock)); 2222 2223 while (umem_null_cache.cache_unext != &umem_null_cache) { 2224 int notify = 0; 2225 umem_cache_t *cp = umem_null_cache.cache_unext; 2226 2227 cp->cache_uprev->cache_unext = cp->cache_unext; 2228 cp->cache_unext->cache_uprev = cp->cache_uprev; 2229 cp->cache_uprev = cp->cache_unext = NULL; 2230 2231 ASSERT(!(cp->cache_uflags & UMU_ACTIVE)); 2232 2233 while (cp->cache_uflags) { 2234 int uflags = (cp->cache_uflags |= UMU_ACTIVE); 2235 (void) mutex_unlock(&umem_update_lock); 2236 2237 /* 2238 * The order here is important. Each step can speed up 2239 * later steps. 2240 */ 2241 2242 if (uflags & UMU_HASH_RESCALE) 2243 umem_hash_rescale(cp); 2244 2245 if (uflags & UMU_MAGAZINE_RESIZE) 2246 umem_cache_magazine_resize(cp); 2247 2248 if (uflags & UMU_REAP) 2249 umem_cache_reap(cp); 2250 2251 (void) mutex_lock(&umem_update_lock); 2252 2253 /* 2254 * check if anyone has requested notification 2255 */ 2256 if (cp->cache_uflags & UMU_NOTIFY) { 2257 uflags |= UMU_NOTIFY; 2258 notify = 1; 2259 } 2260 cp->cache_uflags &= ~uflags; 2261 } 2262 if (notify) 2263 (void) cond_broadcast(&umem_update_cv); 2264 } 2265 } 2266 2267 #ifndef UMEM_STANDALONE 2268 static void 2269 umem_st_update(void) 2270 { 2271 ASSERT(MUTEX_HELD(&umem_update_lock)); 2272 ASSERT(umem_update_thr == 0 && umem_st_update_thr == 0); 2273 2274 umem_st_update_thr = thr_self(); 2275 2276 (void) mutex_unlock(&umem_update_lock); 2277 2278 vmem_update(NULL); 2279 umem_cache_applyall(umem_cache_update); 2280 2281 (void) mutex_lock(&umem_update_lock); 2282 2283 umem_process_updates(); /* does all of the requested work */ 2284 2285 umem_reap_next = gethrtime() + 2286 (hrtime_t)umem_reap_interval * NANOSEC; 2287 2288 umem_reaping = UMEM_REAP_DONE; 2289 2290 umem_st_update_thr = 0; 2291 } 2292 #endif 2293 2294 /* 2295 * Reclaim all unused memory from all caches. Called from vmem when memory 2296 * gets tight. Must be called with no locks held. 2297 * 2298 * This just requests a reap on all caches, and notifies the update thread. 2299 */ 2300 void 2301 umem_reap(void) 2302 { 2303 #ifndef UMEM_STANDALONE 2304 extern int __nthreads(void); 2305 #endif 2306 2307 if (umem_ready != UMEM_READY || umem_reaping != UMEM_REAP_DONE || 2308 gethrtime() < umem_reap_next) 2309 return; 2310 2311 (void) mutex_lock(&umem_update_lock); 2312 2313 if (umem_reaping != UMEM_REAP_DONE || gethrtime() < umem_reap_next) { 2314 (void) mutex_unlock(&umem_update_lock); 2315 return; 2316 } 2317 umem_reaping = UMEM_REAP_ADDING; /* lock out other reaps */ 2318 2319 (void) mutex_unlock(&umem_update_lock); 2320 2321 umem_updateall(UMU_REAP); 2322 2323 (void) mutex_lock(&umem_update_lock); 2324 2325 umem_reaping = UMEM_REAP_ACTIVE; 2326 2327 /* Standalone is single-threaded */ 2328 #ifndef UMEM_STANDALONE 2329 if (umem_update_thr == 0) { 2330 /* 2331 * The update thread does not exist. If the process is 2332 * multi-threaded, create it. If not, or the creation fails, 2333 * do the update processing inline. 2334 */ 2335 ASSERT(umem_st_update_thr == 0); 2336 2337 if (__nthreads() <= 1 || umem_create_update_thread() == 0) 2338 umem_st_update(); 2339 } 2340 2341 (void) cond_broadcast(&umem_update_cv); /* wake up the update thread */ 2342 #endif 2343 2344 (void) mutex_unlock(&umem_update_lock); 2345 } 2346 2347 umem_cache_t * 2348 umem_cache_create( 2349 char *name, /* descriptive name for this cache */ 2350 size_t bufsize, /* size of the objects it manages */ 2351 size_t align, /* required object alignment */ 2352 umem_constructor_t *constructor, /* object constructor */ 2353 umem_destructor_t *destructor, /* object destructor */ 2354 umem_reclaim_t *reclaim, /* memory reclaim callback */ 2355 void *private, /* pass-thru arg for constr/destr/reclaim */ 2356 vmem_t *vmp, /* vmem source for slab allocation */ 2357 int cflags) /* cache creation flags */ 2358 { 2359 int cpu_seqid; 2360 size_t chunksize; 2361 umem_cache_t *cp, *cnext, *cprev; 2362 umem_magtype_t *mtp; 2363 size_t csize; 2364 size_t phase; 2365 2366 /* 2367 * The init thread is allowed to create internal and quantum caches. 2368 * 2369 * Other threads must wait until until initialization is complete. 2370 */ 2371 if (umem_init_thr == thr_self()) 2372 ASSERT((cflags & (UMC_INTERNAL | UMC_QCACHE)) != 0); 2373 else { 2374 ASSERT(!(cflags & UMC_INTERNAL)); 2375 if (umem_ready != UMEM_READY && umem_init() == 0) { 2376 errno = EAGAIN; 2377 return (NULL); 2378 } 2379 } 2380 2381 csize = UMEM_CACHE_SIZE(umem_max_ncpus); 2382 phase = P2NPHASE(csize, UMEM_CPU_CACHE_SIZE); 2383 2384 if (vmp == NULL) 2385 vmp = umem_default_arena; 2386 2387 ASSERT(P2PHASE(phase, UMEM_ALIGN) == 0); 2388 2389 /* 2390 * Check that the arguments are reasonable 2391 */ 2392 if ((align & (align - 1)) != 0 || align > vmp->vm_quantum || 2393 ((cflags & UMC_NOHASH) && (cflags & UMC_NOTOUCH)) || 2394 name == NULL || bufsize == 0) { 2395 errno = EINVAL; 2396 return (NULL); 2397 } 2398 2399 /* 2400 * If align == 0, we set it to the minimum required alignment. 2401 * 2402 * If align < UMEM_ALIGN, we round it up to UMEM_ALIGN, unless 2403 * UMC_NOTOUCH was passed. 2404 */ 2405 if (align == 0) { 2406 if (P2ROUNDUP(bufsize, UMEM_ALIGN) >= UMEM_SECOND_ALIGN) 2407 align = UMEM_SECOND_ALIGN; 2408 else 2409 align = UMEM_ALIGN; 2410 } else if (align < UMEM_ALIGN && (cflags & UMC_NOTOUCH) == 0) 2411 align = UMEM_ALIGN; 2412 2413 2414 /* 2415 * Get a umem_cache structure. We arrange that cp->cache_cpu[] 2416 * is aligned on a UMEM_CPU_CACHE_SIZE boundary to prevent 2417 * false sharing of per-CPU data. 2418 */ 2419 cp = vmem_xalloc(umem_cache_arena, csize, UMEM_CPU_CACHE_SIZE, phase, 2420 0, NULL, NULL, VM_NOSLEEP); 2421 2422 if (cp == NULL) { 2423 errno = EAGAIN; 2424 return (NULL); 2425 } 2426 2427 bzero(cp, csize); 2428 2429 (void) mutex_lock(&umem_flags_lock); 2430 if (umem_flags & UMF_RANDOMIZE) 2431 umem_flags = (((umem_flags | ~UMF_RANDOM) + 1) & UMF_RANDOM) | 2432 UMF_RANDOMIZE; 2433 cp->cache_flags = umem_flags | (cflags & UMF_DEBUG); 2434 (void) mutex_unlock(&umem_flags_lock); 2435 2436 /* 2437 * Make sure all the various flags are reasonable. 2438 */ 2439 if (cp->cache_flags & UMF_LITE) { 2440 if (bufsize >= umem_lite_minsize && 2441 align <= umem_lite_maxalign && 2442 P2PHASE(bufsize, umem_lite_maxalign) != 0) { 2443 cp->cache_flags |= UMF_BUFTAG; 2444 cp->cache_flags &= ~(UMF_AUDIT | UMF_FIREWALL); 2445 } else { 2446 cp->cache_flags &= ~UMF_DEBUG; 2447 } 2448 } 2449 2450 if ((cflags & UMC_QCACHE) && (cp->cache_flags & UMF_AUDIT)) 2451 cp->cache_flags |= UMF_NOMAGAZINE; 2452 2453 if (cflags & UMC_NODEBUG) 2454 cp->cache_flags &= ~UMF_DEBUG; 2455 2456 if (cflags & UMC_NOTOUCH) 2457 cp->cache_flags &= ~UMF_TOUCH; 2458 2459 if (cflags & UMC_NOHASH) 2460 cp->cache_flags &= ~(UMF_AUDIT | UMF_FIREWALL); 2461 2462 if (cflags & UMC_NOMAGAZINE) 2463 cp->cache_flags |= UMF_NOMAGAZINE; 2464 2465 if ((cp->cache_flags & UMF_AUDIT) && !(cflags & UMC_NOTOUCH)) 2466 cp->cache_flags |= UMF_REDZONE; 2467 2468 if ((cp->cache_flags & UMF_BUFTAG) && bufsize >= umem_minfirewall && 2469 !(cp->cache_flags & UMF_LITE) && !(cflags & UMC_NOHASH)) 2470 cp->cache_flags |= UMF_FIREWALL; 2471 2472 if (vmp != umem_default_arena || umem_firewall_arena == NULL) 2473 cp->cache_flags &= ~UMF_FIREWALL; 2474 2475 if (cp->cache_flags & UMF_FIREWALL) { 2476 cp->cache_flags &= ~UMF_BUFTAG; 2477 cp->cache_flags |= UMF_NOMAGAZINE; 2478 ASSERT(vmp == umem_default_arena); 2479 vmp = umem_firewall_arena; 2480 } 2481 2482 /* 2483 * Set cache properties. 2484 */ 2485 (void) strncpy(cp->cache_name, name, sizeof (cp->cache_name) - 1); 2486 cp->cache_bufsize = bufsize; 2487 cp->cache_align = align; 2488 cp->cache_constructor = constructor; 2489 cp->cache_destructor = destructor; 2490 cp->cache_reclaim = reclaim; 2491 cp->cache_private = private; 2492 cp->cache_arena = vmp; 2493 cp->cache_cflags = cflags; 2494 cp->cache_cpu_mask = umem_cpu_mask; 2495 2496 /* 2497 * Determine the chunk size. 2498 */ 2499 chunksize = bufsize; 2500 2501 if (align >= UMEM_ALIGN) { 2502 chunksize = P2ROUNDUP(chunksize, UMEM_ALIGN); 2503 cp->cache_bufctl = chunksize - UMEM_ALIGN; 2504 } 2505 2506 if (cp->cache_flags & UMF_BUFTAG) { 2507 cp->cache_bufctl = chunksize; 2508 cp->cache_buftag = chunksize; 2509 chunksize += sizeof (umem_buftag_t); 2510 } 2511 2512 if (cp->cache_flags & UMF_DEADBEEF) { 2513 cp->cache_verify = MIN(cp->cache_buftag, umem_maxverify); 2514 if (cp->cache_flags & UMF_LITE) 2515 cp->cache_verify = MIN(cp->cache_verify, UMEM_ALIGN); 2516 } 2517 2518 cp->cache_contents = MIN(cp->cache_bufctl, umem_content_maxsave); 2519 2520 cp->cache_chunksize = chunksize = P2ROUNDUP(chunksize, align); 2521 2522 if (chunksize < bufsize) { 2523 errno = ENOMEM; 2524 goto fail; 2525 } 2526 2527 /* 2528 * Now that we know the chunk size, determine the optimal slab size. 2529 */ 2530 if (vmp == umem_firewall_arena) { 2531 cp->cache_slabsize = P2ROUNDUP(chunksize, vmp->vm_quantum); 2532 cp->cache_mincolor = cp->cache_slabsize - chunksize; 2533 cp->cache_maxcolor = cp->cache_mincolor; 2534 cp->cache_flags |= UMF_HASH; 2535 ASSERT(!(cp->cache_flags & UMF_BUFTAG)); 2536 } else if ((cflags & UMC_NOHASH) || (!(cflags & UMC_NOTOUCH) && 2537 !(cp->cache_flags & UMF_AUDIT) && 2538 chunksize < vmp->vm_quantum / UMEM_VOID_FRACTION)) { 2539 cp->cache_slabsize = vmp->vm_quantum; 2540 cp->cache_mincolor = 0; 2541 cp->cache_maxcolor = 2542 (cp->cache_slabsize - sizeof (umem_slab_t)) % chunksize; 2543 2544 if (chunksize + sizeof (umem_slab_t) > cp->cache_slabsize) { 2545 errno = EINVAL; 2546 goto fail; 2547 } 2548 ASSERT(!(cp->cache_flags & UMF_AUDIT)); 2549 } else { 2550 size_t chunks, bestfit, waste, slabsize; 2551 size_t minwaste = LONG_MAX; 2552 2553 for (chunks = 1; chunks <= UMEM_VOID_FRACTION; chunks++) { 2554 slabsize = P2ROUNDUP(chunksize * chunks, 2555 vmp->vm_quantum); 2556 /* 2557 * check for overflow 2558 */ 2559 if ((slabsize / chunks) < chunksize) { 2560 errno = ENOMEM; 2561 goto fail; 2562 } 2563 chunks = slabsize / chunksize; 2564 waste = (slabsize % chunksize) / chunks; 2565 if (waste < minwaste) { 2566 minwaste = waste; 2567 bestfit = slabsize; 2568 } 2569 } 2570 if (cflags & UMC_QCACHE) 2571 bestfit = MAX(1 << highbit(3 * vmp->vm_qcache_max), 64); 2572 cp->cache_slabsize = bestfit; 2573 cp->cache_mincolor = 0; 2574 cp->cache_maxcolor = bestfit % chunksize; 2575 cp->cache_flags |= UMF_HASH; 2576 } 2577 2578 if (cp->cache_flags & UMF_HASH) { 2579 ASSERT(!(cflags & UMC_NOHASH)); 2580 cp->cache_bufctl_cache = (cp->cache_flags & UMF_AUDIT) ? 2581 umem_bufctl_audit_cache : umem_bufctl_cache; 2582 } 2583 2584 if (cp->cache_maxcolor >= vmp->vm_quantum) 2585 cp->cache_maxcolor = vmp->vm_quantum - 1; 2586 2587 cp->cache_color = cp->cache_mincolor; 2588 2589 /* 2590 * Initialize the rest of the slab layer. 2591 */ 2592 (void) mutex_init(&cp->cache_lock, USYNC_THREAD, NULL); 2593 2594 cp->cache_freelist = &cp->cache_nullslab; 2595 cp->cache_nullslab.slab_cache = cp; 2596 cp->cache_nullslab.slab_refcnt = -1; 2597 cp->cache_nullslab.slab_next = &cp->cache_nullslab; 2598 cp->cache_nullslab.slab_prev = &cp->cache_nullslab; 2599 2600 if (cp->cache_flags & UMF_HASH) { 2601 cp->cache_hash_table = vmem_alloc(umem_hash_arena, 2602 UMEM_HASH_INITIAL * sizeof (void *), VM_NOSLEEP); 2603 if (cp->cache_hash_table == NULL) { 2604 errno = EAGAIN; 2605 goto fail_lock; 2606 } 2607 bzero(cp->cache_hash_table, 2608 UMEM_HASH_INITIAL * sizeof (void *)); 2609 cp->cache_hash_mask = UMEM_HASH_INITIAL - 1; 2610 cp->cache_hash_shift = highbit((ulong_t)chunksize) - 1; 2611 } 2612 2613 /* 2614 * Initialize the depot. 2615 */ 2616 (void) mutex_init(&cp->cache_depot_lock, USYNC_THREAD, NULL); 2617 2618 for (mtp = umem_magtype; chunksize <= mtp->mt_minbuf; mtp++) 2619 continue; 2620 2621 cp->cache_magtype = mtp; 2622 2623 /* 2624 * Initialize the CPU layer. 2625 */ 2626 for (cpu_seqid = 0; cpu_seqid < umem_max_ncpus; cpu_seqid++) { 2627 umem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid]; 2628 (void) mutex_init(&ccp->cc_lock, USYNC_THREAD, NULL); 2629 ccp->cc_flags = cp->cache_flags; 2630 ccp->cc_rounds = -1; 2631 ccp->cc_prounds = -1; 2632 } 2633 2634 /* 2635 * Add the cache to the global list. This makes it visible 2636 * to umem_update(), so the cache must be ready for business. 2637 */ 2638 (void) mutex_lock(&umem_cache_lock); 2639 cp->cache_next = cnext = &umem_null_cache; 2640 cp->cache_prev = cprev = umem_null_cache.cache_prev; 2641 cnext->cache_prev = cp; 2642 cprev->cache_next = cp; 2643 (void) mutex_unlock(&umem_cache_lock); 2644 2645 if (umem_ready == UMEM_READY) 2646 umem_cache_magazine_enable(cp); 2647 2648 return (cp); 2649 2650 fail_lock: 2651 (void) mutex_destroy(&cp->cache_lock); 2652 fail: 2653 vmem_xfree(umem_cache_arena, cp, csize); 2654 return (NULL); 2655 } 2656 2657 void 2658 umem_cache_destroy(umem_cache_t *cp) 2659 { 2660 int cpu_seqid; 2661 2662 /* 2663 * Remove the cache from the global cache list so that no new updates 2664 * will be scheduled on its behalf, wait for any pending tasks to 2665 * complete, purge the cache, and then destroy it. 2666 */ 2667 (void) mutex_lock(&umem_cache_lock); 2668 cp->cache_prev->cache_next = cp->cache_next; 2669 cp->cache_next->cache_prev = cp->cache_prev; 2670 cp->cache_prev = cp->cache_next = NULL; 2671 (void) mutex_unlock(&umem_cache_lock); 2672 2673 umem_remove_updates(cp); 2674 2675 umem_cache_magazine_purge(cp); 2676 2677 (void) mutex_lock(&cp->cache_lock); 2678 if (cp->cache_buftotal != 0) 2679 log_message("umem_cache_destroy: '%s' (%p) not empty\n", 2680 cp->cache_name, (void *)cp); 2681 cp->cache_reclaim = NULL; 2682 /* 2683 * The cache is now dead. There should be no further activity. 2684 * We enforce this by setting land mines in the constructor and 2685 * destructor routines that induce a segmentation fault if invoked. 2686 */ 2687 cp->cache_constructor = (umem_constructor_t *)1; 2688 cp->cache_destructor = (umem_destructor_t *)2; 2689 (void) mutex_unlock(&cp->cache_lock); 2690 2691 if (cp->cache_hash_table != NULL) 2692 vmem_free(umem_hash_arena, cp->cache_hash_table, 2693 (cp->cache_hash_mask + 1) * sizeof (void *)); 2694 2695 for (cpu_seqid = 0; cpu_seqid < umem_max_ncpus; cpu_seqid++) 2696 (void) mutex_destroy(&cp->cache_cpu[cpu_seqid].cc_lock); 2697 2698 (void) mutex_destroy(&cp->cache_depot_lock); 2699 (void) mutex_destroy(&cp->cache_lock); 2700 2701 vmem_free(umem_cache_arena, cp, UMEM_CACHE_SIZE(umem_max_ncpus)); 2702 } 2703 2704 static int 2705 umem_cache_init(void) 2706 { 2707 int i; 2708 size_t size, max_size; 2709 umem_cache_t *cp; 2710 umem_magtype_t *mtp; 2711 char name[UMEM_CACHE_NAMELEN + 1]; 2712 umem_cache_t *umem_alloc_caches[NUM_ALLOC_SIZES]; 2713 2714 for (i = 0; i < sizeof (umem_magtype) / sizeof (*mtp); i++) { 2715 mtp = &umem_magtype[i]; 2716 (void) snprintf(name, sizeof (name), "umem_magazine_%d", 2717 mtp->mt_magsize); 2718 mtp->mt_cache = umem_cache_create(name, 2719 (mtp->mt_magsize + 1) * sizeof (void *), 2720 mtp->mt_align, NULL, NULL, NULL, NULL, 2721 umem_internal_arena, UMC_NOHASH | UMC_INTERNAL); 2722 if (mtp->mt_cache == NULL) 2723 return (0); 2724 } 2725 2726 umem_slab_cache = umem_cache_create("umem_slab_cache", 2727 sizeof (umem_slab_t), 0, NULL, NULL, NULL, NULL, 2728 umem_internal_arena, UMC_NOHASH | UMC_INTERNAL); 2729 2730 if (umem_slab_cache == NULL) 2731 return (0); 2732 2733 umem_bufctl_cache = umem_cache_create("umem_bufctl_cache", 2734 sizeof (umem_bufctl_t), 0, NULL, NULL, NULL, NULL, 2735 umem_internal_arena, UMC_NOHASH | UMC_INTERNAL); 2736 2737 if (umem_bufctl_cache == NULL) 2738 return (0); 2739 2740 /* 2741 * The size of the umem_bufctl_audit structure depends upon 2742 * umem_stack_depth. See umem_impl.h for details on the size 2743 * restrictions. 2744 */ 2745 2746 size = UMEM_BUFCTL_AUDIT_SIZE_DEPTH(umem_stack_depth); 2747 max_size = UMEM_BUFCTL_AUDIT_MAX_SIZE; 2748 2749 if (size > max_size) { /* too large -- truncate */ 2750 int max_frames = UMEM_MAX_STACK_DEPTH; 2751 2752 ASSERT(UMEM_BUFCTL_AUDIT_SIZE_DEPTH(max_frames) <= max_size); 2753 2754 umem_stack_depth = max_frames; 2755 size = UMEM_BUFCTL_AUDIT_SIZE_DEPTH(umem_stack_depth); 2756 } 2757 2758 umem_bufctl_audit_cache = umem_cache_create("umem_bufctl_audit_cache", 2759 size, 0, NULL, NULL, NULL, NULL, umem_internal_arena, 2760 UMC_NOHASH | UMC_INTERNAL); 2761 2762 if (umem_bufctl_audit_cache == NULL) 2763 return (0); 2764 2765 if (vmem_backend & VMEM_BACKEND_MMAP) 2766 umem_va_arena = vmem_create("umem_va", 2767 NULL, 0, pagesize, 2768 vmem_alloc, vmem_free, heap_arena, 2769 8 * pagesize, VM_NOSLEEP); 2770 else 2771 umem_va_arena = heap_arena; 2772 2773 if (umem_va_arena == NULL) 2774 return (0); 2775 2776 umem_default_arena = vmem_create("umem_default", 2777 NULL, 0, pagesize, 2778 heap_alloc, heap_free, umem_va_arena, 2779 0, VM_NOSLEEP); 2780 2781 if (umem_default_arena == NULL) 2782 return (0); 2783 2784 /* 2785 * make sure the umem_alloc table initializer is correct 2786 */ 2787 i = sizeof (umem_alloc_table) / sizeof (*umem_alloc_table); 2788 ASSERT(umem_alloc_table[i - 1] == &umem_null_cache); 2789 2790 /* 2791 * Create the default caches to back umem_alloc() 2792 */ 2793 for (i = 0; i < NUM_ALLOC_SIZES; i++) { 2794 size_t cache_size = umem_alloc_sizes[i]; 2795 size_t align = 0; 2796 /* 2797 * If they allocate a multiple of the coherency granularity, 2798 * they get a coherency-granularity-aligned address. 2799 */ 2800 if (IS_P2ALIGNED(cache_size, 64)) 2801 align = 64; 2802 if (IS_P2ALIGNED(cache_size, pagesize)) 2803 align = pagesize; 2804 (void) snprintf(name, sizeof (name), "umem_alloc_%lu", 2805 (long)cache_size); 2806 2807 cp = umem_cache_create(name, cache_size, align, 2808 NULL, NULL, NULL, NULL, NULL, UMC_INTERNAL); 2809 if (cp == NULL) 2810 return (0); 2811 2812 umem_alloc_caches[i] = cp; 2813 } 2814 2815 /* 2816 * Initialization cannot fail at this point. Make the caches 2817 * visible to umem_alloc() and friends. 2818 */ 2819 size = UMEM_ALIGN; 2820 for (i = 0; i < NUM_ALLOC_SIZES; i++) { 2821 size_t cache_size = umem_alloc_sizes[i]; 2822 2823 cp = umem_alloc_caches[i]; 2824 2825 while (size <= cache_size) { 2826 umem_alloc_table[(size - 1) >> UMEM_ALIGN_SHIFT] = cp; 2827 size += UMEM_ALIGN; 2828 } 2829 } 2830 return (1); 2831 } 2832 2833 /* 2834 * umem_startup() is called early on, and must be called explicitly if we're 2835 * the standalone version. 2836 */ 2837 #ifdef UMEM_STANDALONE 2838 void 2839 #else 2840 #pragma init(umem_startup) 2841 static void 2842 #endif 2843 umem_startup(caddr_t start, size_t len, size_t pagesize, caddr_t minstack, 2844 caddr_t maxstack) 2845 { 2846 #ifdef UMEM_STANDALONE 2847 int idx; 2848 /* Standalone doesn't fork */ 2849 #else 2850 umem_forkhandler_init(); /* register the fork handler */ 2851 #endif 2852 2853 #ifdef __lint 2854 /* make lint happy */ 2855 minstack = maxstack; 2856 #endif 2857 2858 #ifdef UMEM_STANDALONE 2859 umem_ready = UMEM_READY_STARTUP; 2860 umem_init_env_ready = 0; 2861 2862 umem_min_stack = minstack; 2863 umem_max_stack = maxstack; 2864 2865 nofail_callback = NULL; 2866 umem_slab_cache = NULL; 2867 umem_bufctl_cache = NULL; 2868 umem_bufctl_audit_cache = NULL; 2869 heap_arena = NULL; 2870 heap_alloc = NULL; 2871 heap_free = NULL; 2872 umem_internal_arena = NULL; 2873 umem_cache_arena = NULL; 2874 umem_hash_arena = NULL; 2875 umem_log_arena = NULL; 2876 umem_oversize_arena = NULL; 2877 umem_va_arena = NULL; 2878 umem_default_arena = NULL; 2879 umem_firewall_va_arena = NULL; 2880 umem_firewall_arena = NULL; 2881 umem_memalign_arena = NULL; 2882 umem_transaction_log = NULL; 2883 umem_content_log = NULL; 2884 umem_failure_log = NULL; 2885 umem_slab_log = NULL; 2886 umem_cpu_mask = 0; 2887 2888 umem_cpus = &umem_startup_cpu; 2889 umem_startup_cpu.cpu_cache_offset = UMEM_CACHE_SIZE(0); 2890 umem_startup_cpu.cpu_number = 0; 2891 2892 bcopy(&umem_null_cache_template, &umem_null_cache, 2893 sizeof (umem_cache_t)); 2894 2895 for (idx = 0; idx < (UMEM_MAXBUF >> UMEM_ALIGN_SHIFT); idx++) 2896 umem_alloc_table[idx] = &umem_null_cache; 2897 #endif 2898 2899 /* 2900 * Perform initialization specific to the way we've been compiled 2901 * (library or standalone) 2902 */ 2903 umem_type_init(start, len, pagesize); 2904 2905 vmem_startup(); 2906 } 2907 2908 int 2909 umem_init(void) 2910 { 2911 size_t maxverify, minfirewall; 2912 size_t size; 2913 int idx; 2914 umem_cpu_t *new_cpus; 2915 2916 vmem_t *memalign_arena, *oversize_arena; 2917 2918 if (thr_self() != umem_init_thr) { 2919 /* 2920 * The usual case -- non-recursive invocation of umem_init(). 2921 */ 2922 (void) mutex_lock(&umem_init_lock); 2923 if (umem_ready != UMEM_READY_STARTUP) { 2924 /* 2925 * someone else beat us to initializing umem. Wait 2926 * for them to complete, then return. 2927 */ 2928 while (umem_ready == UMEM_READY_INITING) 2929 (void) _cond_wait(&umem_init_cv, 2930 &umem_init_lock); 2931 ASSERT(umem_ready == UMEM_READY || 2932 umem_ready == UMEM_READY_INIT_FAILED); 2933 (void) mutex_unlock(&umem_init_lock); 2934 return (umem_ready == UMEM_READY); 2935 } 2936 2937 ASSERT(umem_ready == UMEM_READY_STARTUP); 2938 ASSERT(umem_init_env_ready == 0); 2939 2940 umem_ready = UMEM_READY_INITING; 2941 umem_init_thr = thr_self(); 2942 2943 (void) mutex_unlock(&umem_init_lock); 2944 umem_setup_envvars(0); /* can recurse -- see below */ 2945 if (umem_init_env_ready) { 2946 /* 2947 * initialization was completed already 2948 */ 2949 ASSERT(umem_ready == UMEM_READY || 2950 umem_ready == UMEM_READY_INIT_FAILED); 2951 ASSERT(umem_init_thr == 0); 2952 return (umem_ready == UMEM_READY); 2953 } 2954 } else if (!umem_init_env_ready) { 2955 /* 2956 * The umem_setup_envvars() call (above) makes calls into 2957 * the dynamic linker and directly into user-supplied code. 2958 * Since we cannot know what that code will do, we could be 2959 * recursively invoked (by, say, a malloc() call in the code 2960 * itself, or in a (C++) _init section it causes to be fired). 2961 * 2962 * This code is where we end up if such recursion occurs. We 2963 * first clean up any partial results in the envvar code, then 2964 * proceed to finish initialization processing in the recursive 2965 * call. The original call will notice this, and return 2966 * immediately. 2967 */ 2968 umem_setup_envvars(1); /* clean up any partial state */ 2969 } else { 2970 umem_panic( 2971 "recursive allocation while initializing umem\n"); 2972 } 2973 umem_init_env_ready = 1; 2974 2975 /* 2976 * From this point until we finish, recursion into umem_init() will 2977 * cause a umem_panic(). 2978 */ 2979 maxverify = minfirewall = ULONG_MAX; 2980 2981 /* LINTED constant condition */ 2982 if (sizeof (umem_cpu_cache_t) != UMEM_CPU_CACHE_SIZE) { 2983 umem_panic("sizeof (umem_cpu_cache_t) = %d, should be %d\n", 2984 sizeof (umem_cpu_cache_t), UMEM_CPU_CACHE_SIZE); 2985 } 2986 2987 umem_max_ncpus = umem_get_max_ncpus(); 2988 2989 /* 2990 * load tunables from environment 2991 */ 2992 umem_process_envvars(); 2993 2994 if (issetugid()) 2995 umem_mtbf = 0; 2996 2997 /* 2998 * set up vmem 2999 */ 3000 if (!(umem_flags & UMF_AUDIT)) 3001 vmem_no_debug(); 3002 3003 heap_arena = vmem_heap_arena(&heap_alloc, &heap_free); 3004 3005 pagesize = heap_arena->vm_quantum; 3006 3007 umem_internal_arena = vmem_create("umem_internal", NULL, 0, pagesize, 3008 heap_alloc, heap_free, heap_arena, 0, VM_NOSLEEP); 3009 3010 umem_default_arena = umem_internal_arena; 3011 3012 if (umem_internal_arena == NULL) 3013 goto fail; 3014 3015 umem_cache_arena = vmem_create("umem_cache", NULL, 0, UMEM_ALIGN, 3016 vmem_alloc, vmem_free, umem_internal_arena, 0, VM_NOSLEEP); 3017 3018 umem_hash_arena = vmem_create("umem_hash", NULL, 0, UMEM_ALIGN, 3019 vmem_alloc, vmem_free, umem_internal_arena, 0, VM_NOSLEEP); 3020 3021 umem_log_arena = vmem_create("umem_log", NULL, 0, UMEM_ALIGN, 3022 heap_alloc, heap_free, heap_arena, 0, VM_NOSLEEP); 3023 3024 umem_firewall_va_arena = vmem_create("umem_firewall_va", 3025 NULL, 0, pagesize, 3026 umem_firewall_va_alloc, umem_firewall_va_free, heap_arena, 3027 0, VM_NOSLEEP); 3028 3029 if (umem_cache_arena == NULL || umem_hash_arena == NULL || 3030 umem_log_arena == NULL || umem_firewall_va_arena == NULL) 3031 goto fail; 3032 3033 umem_firewall_arena = vmem_create("umem_firewall", NULL, 0, pagesize, 3034 heap_alloc, heap_free, umem_firewall_va_arena, 0, 3035 VM_NOSLEEP); 3036 3037 if (umem_firewall_arena == NULL) 3038 goto fail; 3039 3040 oversize_arena = vmem_create("umem_oversize", NULL, 0, pagesize, 3041 heap_alloc, heap_free, minfirewall < ULONG_MAX ? 3042 umem_firewall_va_arena : heap_arena, 0, VM_NOSLEEP); 3043 3044 memalign_arena = vmem_create("umem_memalign", NULL, 0, UMEM_ALIGN, 3045 heap_alloc, heap_free, minfirewall < ULONG_MAX ? 3046 umem_firewall_va_arena : heap_arena, 0, VM_NOSLEEP); 3047 3048 if (oversize_arena == NULL || memalign_arena == NULL) 3049 goto fail; 3050 3051 if (umem_max_ncpus > CPUHINT_MAX()) 3052 umem_max_ncpus = CPUHINT_MAX(); 3053 3054 while ((umem_max_ncpus & (umem_max_ncpus - 1)) != 0) 3055 umem_max_ncpus++; 3056 3057 if (umem_max_ncpus == 0) 3058 umem_max_ncpus = 1; 3059 3060 size = umem_max_ncpus * sizeof (umem_cpu_t); 3061 new_cpus = vmem_alloc(umem_internal_arena, size, VM_NOSLEEP); 3062 if (new_cpus == NULL) 3063 goto fail; 3064 3065 bzero(new_cpus, size); 3066 for (idx = 0; idx < umem_max_ncpus; idx++) { 3067 new_cpus[idx].cpu_number = idx; 3068 new_cpus[idx].cpu_cache_offset = UMEM_CACHE_SIZE(idx); 3069 } 3070 umem_cpus = new_cpus; 3071 umem_cpu_mask = (umem_max_ncpus - 1); 3072 3073 if (umem_maxverify == 0) 3074 umem_maxverify = maxverify; 3075 3076 if (umem_minfirewall == 0) 3077 umem_minfirewall = minfirewall; 3078 3079 /* 3080 * Set up updating and reaping 3081 */ 3082 umem_reap_next = gethrtime() + NANOSEC; 3083 3084 #ifndef UMEM_STANDALONE 3085 (void) gettimeofday(&umem_update_next, NULL); 3086 #endif 3087 3088 /* 3089 * Set up logging -- failure here is okay, since it will just disable 3090 * the logs 3091 */ 3092 if (umem_logging) { 3093 umem_transaction_log = umem_log_init(umem_transaction_log_size); 3094 umem_content_log = umem_log_init(umem_content_log_size); 3095 umem_failure_log = umem_log_init(umem_failure_log_size); 3096 umem_slab_log = umem_log_init(umem_slab_log_size); 3097 } 3098 3099 /* 3100 * Set up caches -- if successful, initialization cannot fail, since 3101 * allocations from other threads can now succeed. 3102 */ 3103 if (umem_cache_init() == 0) { 3104 log_message("unable to create initial caches\n"); 3105 goto fail; 3106 } 3107 umem_oversize_arena = oversize_arena; 3108 umem_memalign_arena = memalign_arena; 3109 3110 umem_cache_applyall(umem_cache_magazine_enable); 3111 3112 /* 3113 * initialization done, ready to go 3114 */ 3115 (void) mutex_lock(&umem_init_lock); 3116 umem_ready = UMEM_READY; 3117 umem_init_thr = 0; 3118 (void) cond_broadcast(&umem_init_cv); 3119 (void) mutex_unlock(&umem_init_lock); 3120 return (1); 3121 3122 fail: 3123 log_message("umem initialization failed\n"); 3124 3125 (void) mutex_lock(&umem_init_lock); 3126 umem_ready = UMEM_READY_INIT_FAILED; 3127 umem_init_thr = 0; 3128 (void) cond_broadcast(&umem_init_cv); 3129 (void) mutex_unlock(&umem_init_lock); 3130 return (0); 3131 } 3132