1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * based on usr/src/uts/common/os/kmem.c r1.64 from 2001/12/18 31 * 32 * The slab allocator, as described in the following two papers: 33 * 34 * Jeff Bonwick, 35 * The Slab Allocator: An Object-Caching Kernel Memory Allocator. 36 * Proceedings of the Summer 1994 Usenix Conference. 37 * Available as /shared/sac/PSARC/1994/028/materials/kmem.pdf. 38 * 39 * Jeff Bonwick and Jonathan Adams, 40 * Magazines and vmem: Extending the Slab Allocator to Many CPUs and 41 * Arbitrary Resources. 42 * Proceedings of the 2001 Usenix Conference. 43 * Available as /shared/sac/PSARC/2000/550/materials/vmem.pdf. 44 * 45 * 1. Overview 46 * ----------- 47 * umem is very close to kmem in implementation. There are four major 48 * areas of divergence: 49 * 50 * * Initialization 51 * 52 * * CPU handling 53 * 54 * * umem_update() 55 * 56 * * KM_SLEEP v.s. UMEM_NOFAIL 57 * 58 * * lock ordering 59 * 60 * 2. Initialization 61 * ----------------- 62 * kmem is initialized early on in boot, and knows that no one will call 63 * into it before it is ready. umem does not have these luxuries. Instead, 64 * initialization is divided into two phases: 65 * 66 * * library initialization, and 67 * 68 * * first use 69 * 70 * umem's full initialization happens at the time of the first allocation 71 * request (via malloc() and friends, umem_alloc(), or umem_zalloc()), 72 * or the first call to umem_cache_create(). 73 * 74 * umem_free(), and umem_cache_alloc() do not require special handling, 75 * since the only way to get valid arguments for them is to successfully 76 * call a function from the first group. 77 * 78 * 2.1. Library Initialization: umem_startup() 79 * ------------------------------------------- 80 * umem_startup() is libumem.so's .init section. It calls pthread_atfork() 81 * to install the handlers necessary for umem's Fork1-Safety. Because of 82 * race condition issues, all other pre-umem_init() initialization is done 83 * statically (i.e. by the dynamic linker). 84 * 85 * For standalone use, umem_startup() returns everything to its initial 86 * state. 87 * 88 * 2.2. First use: umem_init() 89 * ------------------------------ 90 * The first time any memory allocation function is used, we have to 91 * create the backing caches and vmem arenas which are needed for it. 92 * umem_init() is the central point for that task. When it completes, 93 * umem_ready is either UMEM_READY (all set) or UMEM_READY_INIT_FAILED (unable 94 * to initialize, probably due to lack of memory). 95 * 96 * There are four different paths from which umem_init() is called: 97 * 98 * * from umem_alloc() or umem_zalloc(), with 0 < size < UMEM_MAXBUF, 99 * 100 * * from umem_alloc() or umem_zalloc(), with size > UMEM_MAXBUF, 101 * 102 * * from umem_cache_create(), and 103 * 104 * * from memalign(), with align > UMEM_ALIGN. 105 * 106 * The last three just check if umem is initialized, and call umem_init() 107 * if it is not. For performance reasons, the first case is more complicated. 108 * 109 * 2.2.1. umem_alloc()/umem_zalloc(), with 0 < size < UMEM_MAXBUF 110 * ----------------------------------------------------------------- 111 * In this case, umem_cache_alloc(&umem_null_cache, ...) is called. 112 * There is special case code in which causes any allocation on 113 * &umem_null_cache to fail by returning (NULL), regardless of the 114 * flags argument. 115 * 116 * So umem_cache_alloc() returns NULL, and umem_alloc()/umem_zalloc() call 117 * umem_alloc_retry(). umem_alloc_retry() sees that the allocation 118 * was agains &umem_null_cache, and calls umem_init(). 119 * 120 * If initialization is successful, umem_alloc_retry() returns 1, which 121 * causes umem_alloc()/umem_zalloc() to start over, which causes it to load 122 * the (now valid) cache pointer from umem_alloc_table. 123 * 124 * 2.2.2. Dealing with race conditions 125 * ----------------------------------- 126 * There are a couple race conditions resulting from the initialization 127 * code that we have to guard against: 128 * 129 * * In umem_cache_create(), there is a special UMC_INTERNAL cflag 130 * that is passed for caches created during initialization. It 131 * is illegal for a user to try to create a UMC_INTERNAL cache. 132 * This allows initialization to proceed, but any other 133 * umem_cache_create()s will block by calling umem_init(). 134 * 135 * * Since umem_null_cache has a 1-element cache_cpu, it's cache_cpu_mask 136 * is always zero. umem_cache_alloc uses cp->cache_cpu_mask to 137 * mask the cpu number. This prevents a race between grabbing a 138 * cache pointer out of umem_alloc_table and growing the cpu array. 139 * 140 * 141 * 3. CPU handling 142 * --------------- 143 * kmem uses the CPU's sequence number to determine which "cpu cache" to 144 * use for an allocation. Currently, there is no way to get the sequence 145 * number in userspace. 146 * 147 * umem keeps track of cpu information in umem_cpus, an array of umem_max_ncpus 148 * umem_cpu_t structures. CURCPU() is a a "hint" function, which we then mask 149 * with either umem_cpu_mask or cp->cache_cpu_mask to find the actual "cpu" id. 150 * The mechanics of this is all in the CPU(mask) macro. 151 * 152 * Currently, umem uses _lwp_self() as its hint. 153 * 154 * 155 * 4. The update thread 156 * -------------------- 157 * kmem uses a task queue, kmem_taskq, to do periodic maintenance on 158 * every kmem cache. vmem has a periodic timeout for hash table resizing. 159 * The kmem_taskq also provides a separate context for kmem_cache_reap()'s 160 * to be done in, avoiding issues of the context of kmem_reap() callers. 161 * 162 * Instead, umem has the concept of "updates", which are asynchronous requests 163 * for work attached to single caches. All caches with pending work are 164 * on a doubly linked list rooted at the umem_null_cache. All update state 165 * is protected by the umem_update_lock mutex, and the umem_update_cv is used 166 * for notification between threads. 167 * 168 * 4.1. Cache states with regards to updates 169 * ----------------------------------------- 170 * A given cache is in one of three states: 171 * 172 * Inactive cache_uflags is zero, cache_u{next,prev} are NULL 173 * 174 * Work Requested cache_uflags is non-zero (but UMU_ACTIVE is not set), 175 * cache_u{next,prev} link the cache onto the global 176 * update list 177 * 178 * Active cache_uflags has UMU_ACTIVE set, cache_u{next,prev} 179 * are NULL, and either umem_update_thr or 180 * umem_st_update_thr are actively doing work on the 181 * cache. 182 * 183 * An update can be added to any cache in any state -- if the cache is 184 * Inactive, it transitions to being Work Requested. If the cache is 185 * Active, the worker will notice the new update and act on it before 186 * transitioning the cache to the Inactive state. 187 * 188 * If a cache is in the Active state, UMU_NOTIFY can be set, which asks 189 * the worker to broadcast the umem_update_cv when it has finished. 190 * 191 * 4.2. Update interface 192 * --------------------- 193 * umem_add_update() adds an update to a particular cache. 194 * umem_updateall() adds an update to all caches. 195 * umem_remove_updates() returns a cache to the Inactive state. 196 * 197 * umem_process_updates() process all caches in the Work Requested state. 198 * 199 * 4.3. Reaping 200 * ------------ 201 * When umem_reap() is called (at the time of heap growth), it schedule 202 * UMU_REAP updates on every cache. It then checks to see if the update 203 * thread exists (umem_update_thr != 0). If it is, it broadcasts 204 * the umem_update_cv to wake the update thread up, and returns. 205 * 206 * If the update thread does not exist (umem_update_thr == 0), and the 207 * program currently has multiple threads, umem_reap() attempts to create 208 * a new update thread. 209 * 210 * If the process is not multithreaded, or the creation fails, umem_reap() 211 * calls umem_st_update() to do an inline update. 212 * 213 * 4.4. The update thread 214 * ---------------------- 215 * The update thread spends most of its time in cond_timedwait() on the 216 * umem_update_cv. It wakes up under two conditions: 217 * 218 * * The timedwait times out, in which case it needs to run a global 219 * update, or 220 * 221 * * someone cond_broadcast(3THR)s the umem_update_cv, in which case 222 * it needs to check if there are any caches in the Work Requested 223 * state. 224 * 225 * When it is time for another global update, umem calls umem_cache_update() 226 * on every cache, then calls vmem_update(), which tunes the vmem structures. 227 * umem_cache_update() can request further work using umem_add_update(). 228 * 229 * After any work from the global update completes, the update timer is 230 * reset to umem_reap_interval seconds in the future. This makes the 231 * updates self-throttling. 232 * 233 * Reaps are similarly self-throttling. After a UMU_REAP update has 234 * been scheduled on all caches, umem_reap() sets a flag and wakes up the 235 * update thread. The update thread notices the flag, and resets the 236 * reap state. 237 * 238 * 4.5. Inline updates 239 * ------------------- 240 * If the update thread is not running, umem_st_update() is used instead. It 241 * immediately does a global update (as above), then calls 242 * umem_process_updates() to process both the reaps that umem_reap() added and 243 * any work generated by the global update. Afterwards, it resets the reap 244 * state. 245 * 246 * While the umem_st_update() is running, umem_st_update_thr holds the thread 247 * id of the thread performing the update. 248 * 249 * 4.6. Updates and fork1() 250 * ------------------------ 251 * umem has fork1() pre- and post-handlers which lock up (and release) every 252 * mutex in every cache. They also lock up the umem_update_lock. Since 253 * fork1() only copies over a single lwp, other threads (including the update 254 * thread) could have been actively using a cache in the parent. This 255 * can lead to inconsistencies in the child process. 256 * 257 * Because we locked all of the mutexes, the only possible inconsistancies are: 258 * 259 * * a umem_cache_alloc() could leak its buffer. 260 * 261 * * a caller of umem_depot_alloc() could leak a magazine, and all the 262 * buffers contained in it. 263 * 264 * * a cache could be in the Active update state. In the child, there 265 * would be no thread actually working on it. 266 * 267 * * a umem_hash_rescale() could leak the new hash table. 268 * 269 * * a umem_magazine_resize() could be in progress. 270 * 271 * * a umem_reap() could be in progress. 272 * 273 * The memory leaks we can't do anything about. umem_release_child() resets 274 * the update state, moves any caches in the Active state to the Work Requested 275 * state. This might cause some updates to be re-run, but UMU_REAP and 276 * UMU_HASH_RESCALE are effectively idempotent, and the worst that can 277 * happen from umem_magazine_resize() is resizing the magazine twice in close 278 * succession. 279 * 280 * Much of the cleanup in umem_release_child() is skipped if 281 * umem_st_update_thr == thr_self(). This is so that applications which call 282 * fork1() from a cache callback does not break. Needless to say, any such 283 * application is tremendously broken. 284 * 285 * 286 * 5. KM_SLEEP v.s. UMEM_NOFAIL 287 * ---------------------------- 288 * Allocations against kmem and vmem have two basic modes: SLEEP and 289 * NOSLEEP. A sleeping allocation is will go to sleep (waiting for 290 * more memory) instead of failing (returning NULL). 291 * 292 * SLEEP allocations presume an extremely multithreaded model, with 293 * a lot of allocation and deallocation activity. umem cannot presume 294 * that its clients have any particular type of behavior. Instead, 295 * it provides two types of allocations: 296 * 297 * * UMEM_DEFAULT, equivalent to KM_NOSLEEP (i.e. return NULL on 298 * failure) 299 * 300 * * UMEM_NOFAIL, which, on failure, calls an optional callback 301 * (registered with umem_nofail_callback()). 302 * 303 * The callback is invoked with no locks held, and can do an arbitrary 304 * amount of work. It then has a choice between: 305 * 306 * * Returning UMEM_CALLBACK_RETRY, which will cause the allocation 307 * to be restarted. 308 * 309 * * Returning UMEM_CALLBACK_EXIT(status), which will cause exit(2) 310 * to be invoked with status. If multiple threads attempt to do 311 * this simultaneously, only one will call exit(2). 312 * 313 * * Doing some kind of non-local exit (thr_exit(3thr), longjmp(3C), 314 * etc.) 315 * 316 * The default callback returns UMEM_CALLBACK_EXIT(255). 317 * 318 * To have these callbacks without risk of state corruption (in the case of 319 * a non-local exit), we have to ensure that the callbacks get invoked 320 * close to the original allocation, with no inconsistent state or held 321 * locks. The following steps are taken: 322 * 323 * * All invocations of vmem are VM_NOSLEEP. 324 * 325 * * All constructor callbacks (which can themselves to allocations) 326 * are passed UMEM_DEFAULT as their required allocation argument. This 327 * way, the constructor will fail, allowing the highest-level allocation 328 * invoke the nofail callback. 329 * 330 * If a constructor callback _does_ do a UMEM_NOFAIL allocation, and 331 * the nofail callback does a non-local exit, we will leak the 332 * partially-constructed buffer. 333 * 334 * 335 * 6. Lock Ordering 336 * ---------------- 337 * umem has a few more locks than kmem does, mostly in the update path. The 338 * overall lock ordering (earlier locks must be acquired first) is: 339 * 340 * umem_init_lock 341 * 342 * vmem_list_lock 343 * vmem_nosleep_lock.vmpl_mutex 344 * vmem_t's: 345 * vm_lock 346 * sbrk_lock 347 * 348 * umem_cache_lock 349 * umem_update_lock 350 * umem_flags_lock 351 * umem_cache_t's: 352 * cache_cpu[*].cc_lock 353 * cache_depot_lock 354 * cache_lock 355 * umem_log_header_t's: 356 * lh_cpu[*].clh_lock 357 * lh_lock 358 */ 359 360 #include "c_synonyms.h" 361 #include <umem_impl.h> 362 #include <sys/vmem_impl_user.h> 363 #include "umem_base.h" 364 #include "vmem_base.h" 365 366 #include <sys/processor.h> 367 #include <sys/sysmacros.h> 368 369 #include <alloca.h> 370 #include <errno.h> 371 #include <limits.h> 372 #include <stdio.h> 373 #include <stdlib.h> 374 #include <string.h> 375 #include <strings.h> 376 #include <signal.h> 377 #include <unistd.h> 378 #include <atomic.h> 379 380 #include "misc.h" 381 382 #define UMEM_VMFLAGS(umflag) (VM_NOSLEEP) 383 384 size_t pagesize; 385 386 /* 387 * The default set of caches to back umem_alloc(). 388 * These sizes should be reevaluated periodically. 389 * 390 * We want allocations that are multiples of the coherency granularity 391 * (64 bytes) to be satisfied from a cache which is a multiple of 64 392 * bytes, so that it will be 64-byte aligned. For all multiples of 64, 393 * the next kmem_cache_size greater than or equal to it must be a 394 * multiple of 64. 395 * 396 * This table must be in sorted order, from smallest to highest. The 397 * highest slot must be UMEM_MAXBUF, and every slot afterwards must be 398 * zero. 399 */ 400 static int umem_alloc_sizes[] = { 401 #ifdef _LP64 402 1 * 8, 403 1 * 16, 404 2 * 16, 405 3 * 16, 406 #else 407 1 * 8, 408 2 * 8, 409 3 * 8, 410 4 * 8, 5 * 8, 6 * 8, 7 * 8, 411 #endif 412 4 * 16, 5 * 16, 6 * 16, 7 * 16, 413 4 * 32, 5 * 32, 6 * 32, 7 * 32, 414 4 * 64, 5 * 64, 6 * 64, 7 * 64, 415 4 * 128, 5 * 128, 6 * 128, 7 * 128, 416 P2ALIGN(8192 / 7, 64), 417 P2ALIGN(8192 / 6, 64), 418 P2ALIGN(8192 / 5, 64), 419 P2ALIGN(8192 / 4, 64), 2304, 420 P2ALIGN(8192 / 3, 64), 421 P2ALIGN(8192 / 2, 64), 4544, 422 P2ALIGN(8192 / 1, 64), 9216, 423 4096 * 3, 424 UMEM_MAXBUF, /* = 8192 * 2 */ 425 /* 24 slots for user expansion */ 426 0, 0, 0, 0, 0, 0, 0, 0, 427 0, 0, 0, 0, 0, 0, 0, 0, 428 0, 0, 0, 0, 0, 0, 0, 0, 429 }; 430 #define NUM_ALLOC_SIZES (sizeof (umem_alloc_sizes) / sizeof (*umem_alloc_sizes)) 431 432 static umem_magtype_t umem_magtype[] = { 433 { 1, 8, 3200, 65536 }, 434 { 3, 16, 256, 32768 }, 435 { 7, 32, 64, 16384 }, 436 { 15, 64, 0, 8192 }, 437 { 31, 64, 0, 4096 }, 438 { 47, 64, 0, 2048 }, 439 { 63, 64, 0, 1024 }, 440 { 95, 64, 0, 512 }, 441 { 143, 64, 0, 0 }, 442 }; 443 444 /* 445 * umem tunables 446 */ 447 uint32_t umem_max_ncpus; /* # of CPU caches. */ 448 449 uint32_t umem_stack_depth = 15; /* # stack frames in a bufctl_audit */ 450 uint32_t umem_reap_interval = 10; /* max reaping rate (seconds) */ 451 uint_t umem_depot_contention = 2; /* max failed trylocks per real interval */ 452 uint_t umem_abort = 1; /* whether to abort on error */ 453 uint_t umem_output = 0; /* whether to write to standard error */ 454 uint_t umem_logging = 0; /* umem_log_enter() override */ 455 uint32_t umem_mtbf = 0; /* mean time between failures [default: off] */ 456 size_t umem_transaction_log_size; /* size of transaction log */ 457 size_t umem_content_log_size; /* size of content log */ 458 size_t umem_failure_log_size; /* failure log [4 pages per CPU] */ 459 size_t umem_slab_log_size; /* slab create log [4 pages per CPU] */ 460 size_t umem_content_maxsave = 256; /* UMF_CONTENTS max bytes to log */ 461 size_t umem_lite_minsize = 0; /* minimum buffer size for UMF_LITE */ 462 size_t umem_lite_maxalign = 1024; /* maximum buffer alignment for UMF_LITE */ 463 size_t umem_maxverify; /* maximum bytes to inspect in debug routines */ 464 size_t umem_minfirewall; /* hardware-enforced redzone threshold */ 465 466 uint_t umem_flags = 0; 467 468 mutex_t umem_init_lock; /* locks initialization */ 469 cond_t umem_init_cv; /* initialization CV */ 470 thread_t umem_init_thr; /* thread initializing */ 471 int umem_init_env_ready; /* environ pre-initted */ 472 int umem_ready = UMEM_READY_STARTUP; 473 474 static umem_nofail_callback_t *nofail_callback; 475 static mutex_t umem_nofail_exit_lock; 476 static thread_t umem_nofail_exit_thr; 477 478 static umem_cache_t *umem_slab_cache; 479 static umem_cache_t *umem_bufctl_cache; 480 static umem_cache_t *umem_bufctl_audit_cache; 481 482 mutex_t umem_flags_lock; 483 484 static vmem_t *heap_arena; 485 static vmem_alloc_t *heap_alloc; 486 static vmem_free_t *heap_free; 487 488 static vmem_t *umem_internal_arena; 489 static vmem_t *umem_cache_arena; 490 static vmem_t *umem_hash_arena; 491 static vmem_t *umem_log_arena; 492 static vmem_t *umem_oversize_arena; 493 static vmem_t *umem_va_arena; 494 static vmem_t *umem_default_arena; 495 static vmem_t *umem_firewall_va_arena; 496 static vmem_t *umem_firewall_arena; 497 498 vmem_t *umem_memalign_arena; 499 500 umem_log_header_t *umem_transaction_log; 501 umem_log_header_t *umem_content_log; 502 umem_log_header_t *umem_failure_log; 503 umem_log_header_t *umem_slab_log; 504 505 extern thread_t _thr_self(void); 506 #define CPUHINT() (_thr_self()) 507 #define CPUHINT_MAX() INT_MAX 508 509 #define CPU(mask) (umem_cpus + (CPUHINT() & (mask))) 510 static umem_cpu_t umem_startup_cpu = { /* initial, single, cpu */ 511 UMEM_CACHE_SIZE(0), 512 0 513 }; 514 515 static uint32_t umem_cpu_mask = 0; /* global cpu mask */ 516 static umem_cpu_t *umem_cpus = &umem_startup_cpu; /* cpu list */ 517 518 volatile uint32_t umem_reaping; 519 520 thread_t umem_update_thr; 521 struct timeval umem_update_next; /* timeofday of next update */ 522 volatile thread_t umem_st_update_thr; /* only used when single-thd */ 523 524 #define IN_UPDATE() (thr_self() == umem_update_thr || \ 525 thr_self() == umem_st_update_thr) 526 #define IN_REAP() IN_UPDATE() 527 528 mutex_t umem_update_lock; /* cache_u{next,prev,flags} */ 529 cond_t umem_update_cv; 530 531 volatile hrtime_t umem_reap_next; /* min hrtime of next reap */ 532 533 mutex_t umem_cache_lock; /* inter-cache linkage only */ 534 535 #ifdef UMEM_STANDALONE 536 umem_cache_t umem_null_cache; 537 static const umem_cache_t umem_null_cache_template = { 538 #else 539 umem_cache_t umem_null_cache = { 540 #endif 541 0, 0, 0, 0, 0, 542 0, 0, 543 0, 0, 544 0, 0, 545 "invalid_cache", 546 0, 0, 547 NULL, NULL, NULL, NULL, 548 NULL, 549 0, 0, 0, 0, 550 &umem_null_cache, &umem_null_cache, 551 &umem_null_cache, &umem_null_cache, 552 0, 553 DEFAULTMUTEX, /* start of slab layer */ 554 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 555 &umem_null_cache.cache_nullslab, 556 { 557 &umem_null_cache, 558 NULL, 559 &umem_null_cache.cache_nullslab, 560 &umem_null_cache.cache_nullslab, 561 NULL, 562 -1, 563 0 564 }, 565 NULL, 566 NULL, 567 DEFAULTMUTEX, /* start of depot layer */ 568 NULL, { 569 NULL, 0, 0, 0, 0 570 }, { 571 NULL, 0, 0, 0, 0 572 }, { 573 { 574 DEFAULTMUTEX, /* start of CPU cache */ 575 0, 0, NULL, NULL, -1, -1, 0 576 } 577 } 578 }; 579 580 #define ALLOC_TABLE_4 \ 581 &umem_null_cache, &umem_null_cache, &umem_null_cache, &umem_null_cache 582 583 #define ALLOC_TABLE_64 \ 584 ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, \ 585 ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, \ 586 ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, \ 587 ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4 588 589 #define ALLOC_TABLE_1024 \ 590 ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, \ 591 ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, \ 592 ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, \ 593 ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64 594 595 static umem_cache_t *umem_alloc_table[UMEM_MAXBUF >> UMEM_ALIGN_SHIFT] = { 596 ALLOC_TABLE_1024, 597 ALLOC_TABLE_1024 598 }; 599 600 601 /* Used to constrain audit-log stack traces */ 602 caddr_t umem_min_stack; 603 caddr_t umem_max_stack; 604 605 606 /* 607 * we use the _ versions, since we don't want to be cancelled. 608 * Actually, this is automatically taken care of by including "mtlib.h". 609 */ 610 extern int _cond_wait(cond_t *cv, mutex_t *mutex); 611 612 #define UMERR_MODIFIED 0 /* buffer modified while on freelist */ 613 #define UMERR_REDZONE 1 /* redzone violation (write past end of buf) */ 614 #define UMERR_DUPFREE 2 /* freed a buffer twice */ 615 #define UMERR_BADADDR 3 /* freed a bad (unallocated) address */ 616 #define UMERR_BADBUFTAG 4 /* buftag corrupted */ 617 #define UMERR_BADBUFCTL 5 /* bufctl corrupted */ 618 #define UMERR_BADCACHE 6 /* freed a buffer to the wrong cache */ 619 #define UMERR_BADSIZE 7 /* alloc size != free size */ 620 #define UMERR_BADBASE 8 /* buffer base address wrong */ 621 622 struct { 623 hrtime_t ump_timestamp; /* timestamp of error */ 624 int ump_error; /* type of umem error (UMERR_*) */ 625 void *ump_buffer; /* buffer that induced abort */ 626 void *ump_realbuf; /* real start address for buffer */ 627 umem_cache_t *ump_cache; /* buffer's cache according to client */ 628 umem_cache_t *ump_realcache; /* actual cache containing buffer */ 629 umem_slab_t *ump_slab; /* slab accoring to umem_findslab() */ 630 umem_bufctl_t *ump_bufctl; /* bufctl */ 631 } umem_abort_info; 632 633 static void 634 copy_pattern(uint64_t pattern, void *buf_arg, size_t size) 635 { 636 uint64_t *bufend = (uint64_t *)((char *)buf_arg + size); 637 uint64_t *buf = buf_arg; 638 639 while (buf < bufend) 640 *buf++ = pattern; 641 } 642 643 static void * 644 verify_pattern(uint64_t pattern, void *buf_arg, size_t size) 645 { 646 uint64_t *bufend = (uint64_t *)((char *)buf_arg + size); 647 uint64_t *buf; 648 649 for (buf = buf_arg; buf < bufend; buf++) 650 if (*buf != pattern) 651 return (buf); 652 return (NULL); 653 } 654 655 static void * 656 verify_and_copy_pattern(uint64_t old, uint64_t new, void *buf_arg, size_t size) 657 { 658 uint64_t *bufend = (uint64_t *)((char *)buf_arg + size); 659 uint64_t *buf; 660 661 for (buf = buf_arg; buf < bufend; buf++) { 662 if (*buf != old) { 663 copy_pattern(old, buf_arg, 664 (char *)buf - (char *)buf_arg); 665 return (buf); 666 } 667 *buf = new; 668 } 669 670 return (NULL); 671 } 672 673 void 674 umem_cache_applyall(void (*func)(umem_cache_t *)) 675 { 676 umem_cache_t *cp; 677 678 (void) mutex_lock(&umem_cache_lock); 679 for (cp = umem_null_cache.cache_next; cp != &umem_null_cache; 680 cp = cp->cache_next) 681 func(cp); 682 (void) mutex_unlock(&umem_cache_lock); 683 } 684 685 static void 686 umem_add_update_unlocked(umem_cache_t *cp, int flags) 687 { 688 umem_cache_t *cnext, *cprev; 689 690 flags &= ~UMU_ACTIVE; 691 692 if (!flags) 693 return; 694 695 if (cp->cache_uflags & UMU_ACTIVE) { 696 cp->cache_uflags |= flags; 697 } else { 698 if (cp->cache_unext != NULL) { 699 ASSERT(cp->cache_uflags != 0); 700 cp->cache_uflags |= flags; 701 } else { 702 ASSERT(cp->cache_uflags == 0); 703 cp->cache_uflags = flags; 704 cp->cache_unext = cnext = &umem_null_cache; 705 cp->cache_uprev = cprev = umem_null_cache.cache_uprev; 706 cnext->cache_uprev = cp; 707 cprev->cache_unext = cp; 708 } 709 } 710 } 711 712 static void 713 umem_add_update(umem_cache_t *cp, int flags) 714 { 715 (void) mutex_lock(&umem_update_lock); 716 717 umem_add_update_unlocked(cp, flags); 718 719 if (!IN_UPDATE()) 720 (void) cond_broadcast(&umem_update_cv); 721 722 (void) mutex_unlock(&umem_update_lock); 723 } 724 725 /* 726 * Remove a cache from the update list, waiting for any in-progress work to 727 * complete first. 728 */ 729 static void 730 umem_remove_updates(umem_cache_t *cp) 731 { 732 (void) mutex_lock(&umem_update_lock); 733 734 /* 735 * Get it out of the active state 736 */ 737 while (cp->cache_uflags & UMU_ACTIVE) { 738 ASSERT(cp->cache_unext == NULL); 739 740 cp->cache_uflags |= UMU_NOTIFY; 741 742 /* 743 * Make sure the update state is sane, before we wait 744 */ 745 ASSERT(umem_update_thr != 0 || umem_st_update_thr != 0); 746 ASSERT(umem_update_thr != thr_self() && 747 umem_st_update_thr != thr_self()); 748 749 (void) _cond_wait(&umem_update_cv, &umem_update_lock); 750 } 751 /* 752 * Get it out of the Work Requested state 753 */ 754 if (cp->cache_unext != NULL) { 755 cp->cache_uprev->cache_unext = cp->cache_unext; 756 cp->cache_unext->cache_uprev = cp->cache_uprev; 757 cp->cache_uprev = cp->cache_unext = NULL; 758 cp->cache_uflags = 0; 759 } 760 /* 761 * Make sure it is in the Inactive state 762 */ 763 ASSERT(cp->cache_unext == NULL && cp->cache_uflags == 0); 764 (void) mutex_unlock(&umem_update_lock); 765 } 766 767 static void 768 umem_updateall(int flags) 769 { 770 umem_cache_t *cp; 771 772 /* 773 * NOTE: To prevent deadlock, umem_cache_lock is always acquired first. 774 * 775 * (umem_add_update is called from things run via umem_cache_applyall) 776 */ 777 (void) mutex_lock(&umem_cache_lock); 778 (void) mutex_lock(&umem_update_lock); 779 780 for (cp = umem_null_cache.cache_next; cp != &umem_null_cache; 781 cp = cp->cache_next) 782 umem_add_update_unlocked(cp, flags); 783 784 if (!IN_UPDATE()) 785 (void) cond_broadcast(&umem_update_cv); 786 787 (void) mutex_unlock(&umem_update_lock); 788 (void) mutex_unlock(&umem_cache_lock); 789 } 790 791 /* 792 * Debugging support. Given a buffer address, find its slab. 793 */ 794 static umem_slab_t * 795 umem_findslab(umem_cache_t *cp, void *buf) 796 { 797 umem_slab_t *sp; 798 799 (void) mutex_lock(&cp->cache_lock); 800 for (sp = cp->cache_nullslab.slab_next; 801 sp != &cp->cache_nullslab; sp = sp->slab_next) { 802 if (UMEM_SLAB_MEMBER(sp, buf)) { 803 (void) mutex_unlock(&cp->cache_lock); 804 return (sp); 805 } 806 } 807 (void) mutex_unlock(&cp->cache_lock); 808 809 return (NULL); 810 } 811 812 static void 813 umem_error(int error, umem_cache_t *cparg, void *bufarg) 814 { 815 umem_buftag_t *btp = NULL; 816 umem_bufctl_t *bcp = NULL; 817 umem_cache_t *cp = cparg; 818 umem_slab_t *sp; 819 uint64_t *off; 820 void *buf = bufarg; 821 822 int old_logging = umem_logging; 823 824 umem_logging = 0; /* stop logging when a bad thing happens */ 825 826 umem_abort_info.ump_timestamp = gethrtime(); 827 828 sp = umem_findslab(cp, buf); 829 if (sp == NULL) { 830 for (cp = umem_null_cache.cache_prev; cp != &umem_null_cache; 831 cp = cp->cache_prev) { 832 if ((sp = umem_findslab(cp, buf)) != NULL) 833 break; 834 } 835 } 836 837 if (sp == NULL) { 838 cp = NULL; 839 error = UMERR_BADADDR; 840 } else { 841 if (cp != cparg) 842 error = UMERR_BADCACHE; 843 else 844 buf = (char *)bufarg - ((uintptr_t)bufarg - 845 (uintptr_t)sp->slab_base) % cp->cache_chunksize; 846 if (buf != bufarg) 847 error = UMERR_BADBASE; 848 if (cp->cache_flags & UMF_BUFTAG) 849 btp = UMEM_BUFTAG(cp, buf); 850 if (cp->cache_flags & UMF_HASH) { 851 (void) mutex_lock(&cp->cache_lock); 852 for (bcp = *UMEM_HASH(cp, buf); bcp; bcp = bcp->bc_next) 853 if (bcp->bc_addr == buf) 854 break; 855 (void) mutex_unlock(&cp->cache_lock); 856 if (bcp == NULL && btp != NULL) 857 bcp = btp->bt_bufctl; 858 if (umem_findslab(cp->cache_bufctl_cache, bcp) == 859 NULL || P2PHASE((uintptr_t)bcp, UMEM_ALIGN) || 860 bcp->bc_addr != buf) { 861 error = UMERR_BADBUFCTL; 862 bcp = NULL; 863 } 864 } 865 } 866 867 umem_abort_info.ump_error = error; 868 umem_abort_info.ump_buffer = bufarg; 869 umem_abort_info.ump_realbuf = buf; 870 umem_abort_info.ump_cache = cparg; 871 umem_abort_info.ump_realcache = cp; 872 umem_abort_info.ump_slab = sp; 873 umem_abort_info.ump_bufctl = bcp; 874 875 umem_printf("umem allocator: "); 876 877 switch (error) { 878 879 case UMERR_MODIFIED: 880 umem_printf("buffer modified after being freed\n"); 881 off = verify_pattern(UMEM_FREE_PATTERN, buf, cp->cache_verify); 882 if (off == NULL) /* shouldn't happen */ 883 off = buf; 884 umem_printf("modification occurred at offset 0x%lx " 885 "(0x%llx replaced by 0x%llx)\n", 886 (uintptr_t)off - (uintptr_t)buf, 887 (longlong_t)UMEM_FREE_PATTERN, (longlong_t)*off); 888 break; 889 890 case UMERR_REDZONE: 891 umem_printf("redzone violation: write past end of buffer\n"); 892 break; 893 894 case UMERR_BADADDR: 895 umem_printf("invalid free: buffer not in cache\n"); 896 break; 897 898 case UMERR_DUPFREE: 899 umem_printf("duplicate free: buffer freed twice\n"); 900 break; 901 902 case UMERR_BADBUFTAG: 903 umem_printf("boundary tag corrupted\n"); 904 umem_printf("bcp ^ bxstat = %lx, should be %lx\n", 905 (intptr_t)btp->bt_bufctl ^ btp->bt_bxstat, 906 UMEM_BUFTAG_FREE); 907 break; 908 909 case UMERR_BADBUFCTL: 910 umem_printf("bufctl corrupted\n"); 911 break; 912 913 case UMERR_BADCACHE: 914 umem_printf("buffer freed to wrong cache\n"); 915 umem_printf("buffer was allocated from %s,\n", cp->cache_name); 916 umem_printf("caller attempting free to %s.\n", 917 cparg->cache_name); 918 break; 919 920 case UMERR_BADSIZE: 921 umem_printf("bad free: free size (%u) != alloc size (%u)\n", 922 UMEM_SIZE_DECODE(((uint32_t *)btp)[0]), 923 UMEM_SIZE_DECODE(((uint32_t *)btp)[1])); 924 break; 925 926 case UMERR_BADBASE: 927 umem_printf("bad free: free address (%p) != alloc address " 928 "(%p)\n", bufarg, buf); 929 break; 930 } 931 932 umem_printf("buffer=%p bufctl=%p cache: %s\n", 933 bufarg, (void *)bcp, cparg->cache_name); 934 935 if (bcp != NULL && (cp->cache_flags & UMF_AUDIT) && 936 error != UMERR_BADBUFCTL) { 937 int d; 938 timespec_t ts; 939 hrtime_t diff; 940 umem_bufctl_audit_t *bcap = (umem_bufctl_audit_t *)bcp; 941 942 diff = umem_abort_info.ump_timestamp - bcap->bc_timestamp; 943 ts.tv_sec = diff / NANOSEC; 944 ts.tv_nsec = diff % NANOSEC; 945 946 umem_printf("previous transaction on buffer %p:\n", buf); 947 umem_printf("thread=%p time=T-%ld.%09ld slab=%p cache: %s\n", 948 (void *)(intptr_t)bcap->bc_thread, ts.tv_sec, ts.tv_nsec, 949 (void *)sp, cp->cache_name); 950 for (d = 0; d < MIN(bcap->bc_depth, umem_stack_depth); d++) { 951 (void) print_sym((void *)bcap->bc_stack[d]); 952 umem_printf("\n"); 953 } 954 } 955 956 umem_err_recoverable("umem: heap corruption detected"); 957 958 umem_logging = old_logging; /* resume logging */ 959 } 960 961 void 962 umem_nofail_callback(umem_nofail_callback_t *cb) 963 { 964 nofail_callback = cb; 965 } 966 967 static int 968 umem_alloc_retry(umem_cache_t *cp, int umflag) 969 { 970 if (cp == &umem_null_cache) { 971 if (umem_init()) 972 return (1); /* retry */ 973 /* 974 * Initialization failed. Do normal failure processing. 975 */ 976 } 977 if (umflag & UMEM_NOFAIL) { 978 int def_result = UMEM_CALLBACK_EXIT(255); 979 int result = def_result; 980 umem_nofail_callback_t *callback = nofail_callback; 981 982 if (callback != NULL) 983 result = callback(); 984 985 if (result == UMEM_CALLBACK_RETRY) 986 return (1); 987 988 if ((result & ~0xFF) != UMEM_CALLBACK_EXIT(0)) { 989 log_message("nofail callback returned %x\n", result); 990 result = def_result; 991 } 992 993 /* 994 * only one thread will call exit 995 */ 996 if (umem_nofail_exit_thr == thr_self()) 997 umem_panic("recursive UMEM_CALLBACK_EXIT()\n"); 998 999 (void) mutex_lock(&umem_nofail_exit_lock); 1000 umem_nofail_exit_thr = thr_self(); 1001 exit(result & 0xFF); 1002 /*NOTREACHED*/ 1003 } 1004 return (0); 1005 } 1006 1007 static umem_log_header_t * 1008 umem_log_init(size_t logsize) 1009 { 1010 umem_log_header_t *lhp; 1011 int nchunks = 4 * umem_max_ncpus; 1012 size_t lhsize = offsetof(umem_log_header_t, lh_cpu[umem_max_ncpus]); 1013 int i; 1014 1015 if (logsize == 0) 1016 return (NULL); 1017 1018 /* 1019 * Make sure that lhp->lh_cpu[] is nicely aligned 1020 * to prevent false sharing of cache lines. 1021 */ 1022 lhsize = P2ROUNDUP(lhsize, UMEM_ALIGN); 1023 lhp = vmem_xalloc(umem_log_arena, lhsize, 64, P2NPHASE(lhsize, 64), 0, 1024 NULL, NULL, VM_NOSLEEP); 1025 if (lhp == NULL) 1026 goto fail; 1027 1028 bzero(lhp, lhsize); 1029 1030 (void) mutex_init(&lhp->lh_lock, USYNC_THREAD, NULL); 1031 lhp->lh_nchunks = nchunks; 1032 lhp->lh_chunksize = P2ROUNDUP(logsize / nchunks, PAGESIZE); 1033 if (lhp->lh_chunksize == 0) 1034 lhp->lh_chunksize = PAGESIZE; 1035 1036 lhp->lh_base = vmem_alloc(umem_log_arena, 1037 lhp->lh_chunksize * nchunks, VM_NOSLEEP); 1038 if (lhp->lh_base == NULL) 1039 goto fail; 1040 1041 lhp->lh_free = vmem_alloc(umem_log_arena, 1042 nchunks * sizeof (int), VM_NOSLEEP); 1043 if (lhp->lh_free == NULL) 1044 goto fail; 1045 1046 bzero(lhp->lh_base, lhp->lh_chunksize * nchunks); 1047 1048 for (i = 0; i < umem_max_ncpus; i++) { 1049 umem_cpu_log_header_t *clhp = &lhp->lh_cpu[i]; 1050 (void) mutex_init(&clhp->clh_lock, USYNC_THREAD, NULL); 1051 clhp->clh_chunk = i; 1052 } 1053 1054 for (i = umem_max_ncpus; i < nchunks; i++) 1055 lhp->lh_free[i] = i; 1056 1057 lhp->lh_head = umem_max_ncpus; 1058 lhp->lh_tail = 0; 1059 1060 return (lhp); 1061 1062 fail: 1063 if (lhp != NULL) { 1064 if (lhp->lh_base != NULL) 1065 vmem_free(umem_log_arena, lhp->lh_base, 1066 lhp->lh_chunksize * nchunks); 1067 1068 vmem_xfree(umem_log_arena, lhp, lhsize); 1069 } 1070 return (NULL); 1071 } 1072 1073 static void * 1074 umem_log_enter(umem_log_header_t *lhp, void *data, size_t size) 1075 { 1076 void *logspace; 1077 umem_cpu_log_header_t *clhp = 1078 &lhp->lh_cpu[CPU(umem_cpu_mask)->cpu_number]; 1079 1080 if (lhp == NULL || umem_logging == 0) 1081 return (NULL); 1082 1083 (void) mutex_lock(&clhp->clh_lock); 1084 clhp->clh_hits++; 1085 if (size > clhp->clh_avail) { 1086 (void) mutex_lock(&lhp->lh_lock); 1087 lhp->lh_hits++; 1088 lhp->lh_free[lhp->lh_tail] = clhp->clh_chunk; 1089 lhp->lh_tail = (lhp->lh_tail + 1) % lhp->lh_nchunks; 1090 clhp->clh_chunk = lhp->lh_free[lhp->lh_head]; 1091 lhp->lh_head = (lhp->lh_head + 1) % lhp->lh_nchunks; 1092 clhp->clh_current = lhp->lh_base + 1093 clhp->clh_chunk * lhp->lh_chunksize; 1094 clhp->clh_avail = lhp->lh_chunksize; 1095 if (size > lhp->lh_chunksize) 1096 size = lhp->lh_chunksize; 1097 (void) mutex_unlock(&lhp->lh_lock); 1098 } 1099 logspace = clhp->clh_current; 1100 clhp->clh_current += size; 1101 clhp->clh_avail -= size; 1102 bcopy(data, logspace, size); 1103 (void) mutex_unlock(&clhp->clh_lock); 1104 return (logspace); 1105 } 1106 1107 #define UMEM_AUDIT(lp, cp, bcp) \ 1108 { \ 1109 umem_bufctl_audit_t *_bcp = (umem_bufctl_audit_t *)(bcp); \ 1110 _bcp->bc_timestamp = gethrtime(); \ 1111 _bcp->bc_thread = thr_self(); \ 1112 _bcp->bc_depth = getpcstack(_bcp->bc_stack, umem_stack_depth, \ 1113 (cp != NULL) && (cp->cache_flags & UMF_CHECKSIGNAL)); \ 1114 _bcp->bc_lastlog = umem_log_enter((lp), _bcp, \ 1115 UMEM_BUFCTL_AUDIT_SIZE); \ 1116 } 1117 1118 static void 1119 umem_log_event(umem_log_header_t *lp, umem_cache_t *cp, 1120 umem_slab_t *sp, void *addr) 1121 { 1122 umem_bufctl_audit_t *bcp; 1123 UMEM_LOCAL_BUFCTL_AUDIT(&bcp); 1124 1125 bzero(bcp, UMEM_BUFCTL_AUDIT_SIZE); 1126 bcp->bc_addr = addr; 1127 bcp->bc_slab = sp; 1128 bcp->bc_cache = cp; 1129 UMEM_AUDIT(lp, cp, bcp); 1130 } 1131 1132 /* 1133 * Create a new slab for cache cp. 1134 */ 1135 static umem_slab_t * 1136 umem_slab_create(umem_cache_t *cp, int umflag) 1137 { 1138 size_t slabsize = cp->cache_slabsize; 1139 size_t chunksize = cp->cache_chunksize; 1140 int cache_flags = cp->cache_flags; 1141 size_t color, chunks; 1142 char *buf, *slab; 1143 umem_slab_t *sp; 1144 umem_bufctl_t *bcp; 1145 vmem_t *vmp = cp->cache_arena; 1146 1147 color = cp->cache_color + cp->cache_align; 1148 if (color > cp->cache_maxcolor) 1149 color = cp->cache_mincolor; 1150 cp->cache_color = color; 1151 1152 slab = vmem_alloc(vmp, slabsize, UMEM_VMFLAGS(umflag)); 1153 1154 if (slab == NULL) 1155 goto vmem_alloc_failure; 1156 1157 ASSERT(P2PHASE((uintptr_t)slab, vmp->vm_quantum) == 0); 1158 1159 if (!(cp->cache_cflags & UMC_NOTOUCH) && 1160 (cp->cache_flags & UMF_DEADBEEF)) 1161 copy_pattern(UMEM_UNINITIALIZED_PATTERN, slab, slabsize); 1162 1163 if (cache_flags & UMF_HASH) { 1164 if ((sp = _umem_cache_alloc(umem_slab_cache, umflag)) == NULL) 1165 goto slab_alloc_failure; 1166 chunks = (slabsize - color) / chunksize; 1167 } else { 1168 sp = UMEM_SLAB(cp, slab); 1169 chunks = (slabsize - sizeof (umem_slab_t) - color) / chunksize; 1170 } 1171 1172 sp->slab_cache = cp; 1173 sp->slab_head = NULL; 1174 sp->slab_refcnt = 0; 1175 sp->slab_base = buf = slab + color; 1176 sp->slab_chunks = chunks; 1177 1178 ASSERT(chunks > 0); 1179 while (chunks-- != 0) { 1180 if (cache_flags & UMF_HASH) { 1181 bcp = _umem_cache_alloc(cp->cache_bufctl_cache, umflag); 1182 if (bcp == NULL) 1183 goto bufctl_alloc_failure; 1184 if (cache_flags & UMF_AUDIT) { 1185 umem_bufctl_audit_t *bcap = 1186 (umem_bufctl_audit_t *)bcp; 1187 bzero(bcap, UMEM_BUFCTL_AUDIT_SIZE); 1188 bcap->bc_cache = cp; 1189 } 1190 bcp->bc_addr = buf; 1191 bcp->bc_slab = sp; 1192 } else { 1193 bcp = UMEM_BUFCTL(cp, buf); 1194 } 1195 if (cache_flags & UMF_BUFTAG) { 1196 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf); 1197 btp->bt_redzone = UMEM_REDZONE_PATTERN; 1198 btp->bt_bufctl = bcp; 1199 btp->bt_bxstat = (intptr_t)bcp ^ UMEM_BUFTAG_FREE; 1200 if (cache_flags & UMF_DEADBEEF) { 1201 copy_pattern(UMEM_FREE_PATTERN, buf, 1202 cp->cache_verify); 1203 } 1204 } 1205 bcp->bc_next = sp->slab_head; 1206 sp->slab_head = bcp; 1207 buf += chunksize; 1208 } 1209 1210 umem_log_event(umem_slab_log, cp, sp, slab); 1211 1212 return (sp); 1213 1214 bufctl_alloc_failure: 1215 1216 while ((bcp = sp->slab_head) != NULL) { 1217 sp->slab_head = bcp->bc_next; 1218 _umem_cache_free(cp->cache_bufctl_cache, bcp); 1219 } 1220 _umem_cache_free(umem_slab_cache, sp); 1221 1222 slab_alloc_failure: 1223 1224 vmem_free(vmp, slab, slabsize); 1225 1226 vmem_alloc_failure: 1227 1228 umem_log_event(umem_failure_log, cp, NULL, NULL); 1229 atomic_add_64(&cp->cache_alloc_fail, 1); 1230 1231 return (NULL); 1232 } 1233 1234 /* 1235 * Destroy a slab. 1236 */ 1237 static void 1238 umem_slab_destroy(umem_cache_t *cp, umem_slab_t *sp) 1239 { 1240 vmem_t *vmp = cp->cache_arena; 1241 void *slab = (void *)P2ALIGN((uintptr_t)sp->slab_base, vmp->vm_quantum); 1242 1243 if (cp->cache_flags & UMF_HASH) { 1244 umem_bufctl_t *bcp; 1245 while ((bcp = sp->slab_head) != NULL) { 1246 sp->slab_head = bcp->bc_next; 1247 _umem_cache_free(cp->cache_bufctl_cache, bcp); 1248 } 1249 _umem_cache_free(umem_slab_cache, sp); 1250 } 1251 vmem_free(vmp, slab, cp->cache_slabsize); 1252 } 1253 1254 /* 1255 * Allocate a raw (unconstructed) buffer from cp's slab layer. 1256 */ 1257 static void * 1258 umem_slab_alloc(umem_cache_t *cp, int umflag) 1259 { 1260 umem_bufctl_t *bcp, **hash_bucket; 1261 umem_slab_t *sp; 1262 void *buf; 1263 1264 (void) mutex_lock(&cp->cache_lock); 1265 cp->cache_slab_alloc++; 1266 sp = cp->cache_freelist; 1267 ASSERT(sp->slab_cache == cp); 1268 if (sp->slab_head == NULL) { 1269 /* 1270 * The freelist is empty. Create a new slab. 1271 */ 1272 (void) mutex_unlock(&cp->cache_lock); 1273 if (cp == &umem_null_cache) 1274 return (NULL); 1275 if ((sp = umem_slab_create(cp, umflag)) == NULL) 1276 return (NULL); 1277 (void) mutex_lock(&cp->cache_lock); 1278 cp->cache_slab_create++; 1279 if ((cp->cache_buftotal += sp->slab_chunks) > cp->cache_bufmax) 1280 cp->cache_bufmax = cp->cache_buftotal; 1281 sp->slab_next = cp->cache_freelist; 1282 sp->slab_prev = cp->cache_freelist->slab_prev; 1283 sp->slab_next->slab_prev = sp; 1284 sp->slab_prev->slab_next = sp; 1285 cp->cache_freelist = sp; 1286 } 1287 1288 sp->slab_refcnt++; 1289 ASSERT(sp->slab_refcnt <= sp->slab_chunks); 1290 1291 /* 1292 * If we're taking the last buffer in the slab, 1293 * remove the slab from the cache's freelist. 1294 */ 1295 bcp = sp->slab_head; 1296 if ((sp->slab_head = bcp->bc_next) == NULL) { 1297 cp->cache_freelist = sp->slab_next; 1298 ASSERT(sp->slab_refcnt == sp->slab_chunks); 1299 } 1300 1301 if (cp->cache_flags & UMF_HASH) { 1302 /* 1303 * Add buffer to allocated-address hash table. 1304 */ 1305 buf = bcp->bc_addr; 1306 hash_bucket = UMEM_HASH(cp, buf); 1307 bcp->bc_next = *hash_bucket; 1308 *hash_bucket = bcp; 1309 if ((cp->cache_flags & (UMF_AUDIT | UMF_BUFTAG)) == UMF_AUDIT) { 1310 UMEM_AUDIT(umem_transaction_log, cp, bcp); 1311 } 1312 } else { 1313 buf = UMEM_BUF(cp, bcp); 1314 } 1315 1316 ASSERT(UMEM_SLAB_MEMBER(sp, buf)); 1317 1318 (void) mutex_unlock(&cp->cache_lock); 1319 1320 return (buf); 1321 } 1322 1323 /* 1324 * Free a raw (unconstructed) buffer to cp's slab layer. 1325 */ 1326 static void 1327 umem_slab_free(umem_cache_t *cp, void *buf) 1328 { 1329 umem_slab_t *sp; 1330 umem_bufctl_t *bcp, **prev_bcpp; 1331 1332 ASSERT(buf != NULL); 1333 1334 (void) mutex_lock(&cp->cache_lock); 1335 cp->cache_slab_free++; 1336 1337 if (cp->cache_flags & UMF_HASH) { 1338 /* 1339 * Look up buffer in allocated-address hash table. 1340 */ 1341 prev_bcpp = UMEM_HASH(cp, buf); 1342 while ((bcp = *prev_bcpp) != NULL) { 1343 if (bcp->bc_addr == buf) { 1344 *prev_bcpp = bcp->bc_next; 1345 sp = bcp->bc_slab; 1346 break; 1347 } 1348 cp->cache_lookup_depth++; 1349 prev_bcpp = &bcp->bc_next; 1350 } 1351 } else { 1352 bcp = UMEM_BUFCTL(cp, buf); 1353 sp = UMEM_SLAB(cp, buf); 1354 } 1355 1356 if (bcp == NULL || sp->slab_cache != cp || !UMEM_SLAB_MEMBER(sp, buf)) { 1357 (void) mutex_unlock(&cp->cache_lock); 1358 umem_error(UMERR_BADADDR, cp, buf); 1359 return; 1360 } 1361 1362 if ((cp->cache_flags & (UMF_AUDIT | UMF_BUFTAG)) == UMF_AUDIT) { 1363 if (cp->cache_flags & UMF_CONTENTS) 1364 ((umem_bufctl_audit_t *)bcp)->bc_contents = 1365 umem_log_enter(umem_content_log, buf, 1366 cp->cache_contents); 1367 UMEM_AUDIT(umem_transaction_log, cp, bcp); 1368 } 1369 1370 /* 1371 * If this slab isn't currently on the freelist, put it there. 1372 */ 1373 if (sp->slab_head == NULL) { 1374 ASSERT(sp->slab_refcnt == sp->slab_chunks); 1375 ASSERT(cp->cache_freelist != sp); 1376 sp->slab_next->slab_prev = sp->slab_prev; 1377 sp->slab_prev->slab_next = sp->slab_next; 1378 sp->slab_next = cp->cache_freelist; 1379 sp->slab_prev = cp->cache_freelist->slab_prev; 1380 sp->slab_next->slab_prev = sp; 1381 sp->slab_prev->slab_next = sp; 1382 cp->cache_freelist = sp; 1383 } 1384 1385 bcp->bc_next = sp->slab_head; 1386 sp->slab_head = bcp; 1387 1388 ASSERT(sp->slab_refcnt >= 1); 1389 if (--sp->slab_refcnt == 0) { 1390 /* 1391 * There are no outstanding allocations from this slab, 1392 * so we can reclaim the memory. 1393 */ 1394 sp->slab_next->slab_prev = sp->slab_prev; 1395 sp->slab_prev->slab_next = sp->slab_next; 1396 if (sp == cp->cache_freelist) 1397 cp->cache_freelist = sp->slab_next; 1398 cp->cache_slab_destroy++; 1399 cp->cache_buftotal -= sp->slab_chunks; 1400 (void) mutex_unlock(&cp->cache_lock); 1401 umem_slab_destroy(cp, sp); 1402 return; 1403 } 1404 (void) mutex_unlock(&cp->cache_lock); 1405 } 1406 1407 static int 1408 umem_cache_alloc_debug(umem_cache_t *cp, void *buf, int umflag) 1409 { 1410 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf); 1411 umem_bufctl_audit_t *bcp = (umem_bufctl_audit_t *)btp->bt_bufctl; 1412 uint32_t mtbf; 1413 int flags_nfatal; 1414 1415 if (btp->bt_bxstat != ((intptr_t)bcp ^ UMEM_BUFTAG_FREE)) { 1416 umem_error(UMERR_BADBUFTAG, cp, buf); 1417 return (-1); 1418 } 1419 1420 btp->bt_bxstat = (intptr_t)bcp ^ UMEM_BUFTAG_ALLOC; 1421 1422 if ((cp->cache_flags & UMF_HASH) && bcp->bc_addr != buf) { 1423 umem_error(UMERR_BADBUFCTL, cp, buf); 1424 return (-1); 1425 } 1426 1427 btp->bt_redzone = UMEM_REDZONE_PATTERN; 1428 1429 if (cp->cache_flags & UMF_DEADBEEF) { 1430 if (verify_and_copy_pattern(UMEM_FREE_PATTERN, 1431 UMEM_UNINITIALIZED_PATTERN, buf, cp->cache_verify)) { 1432 umem_error(UMERR_MODIFIED, cp, buf); 1433 return (-1); 1434 } 1435 } 1436 1437 if ((mtbf = umem_mtbf | cp->cache_mtbf) != 0 && 1438 gethrtime() % mtbf == 0 && 1439 (umflag & (UMEM_FATAL_FLAGS)) == 0) { 1440 umem_log_event(umem_failure_log, cp, NULL, NULL); 1441 } else { 1442 mtbf = 0; 1443 } 1444 1445 /* 1446 * We do not pass fatal flags on to the constructor. This prevents 1447 * leaking buffers in the event of a subordinate constructor failing. 1448 */ 1449 flags_nfatal = UMEM_DEFAULT; 1450 if (mtbf || (cp->cache_constructor != NULL && 1451 cp->cache_constructor(buf, cp->cache_private, flags_nfatal) != 0)) { 1452 atomic_add_64(&cp->cache_alloc_fail, 1); 1453 btp->bt_bxstat = (intptr_t)bcp ^ UMEM_BUFTAG_FREE; 1454 copy_pattern(UMEM_FREE_PATTERN, buf, cp->cache_verify); 1455 umem_slab_free(cp, buf); 1456 return (-1); 1457 } 1458 1459 if (cp->cache_flags & UMF_AUDIT) { 1460 UMEM_AUDIT(umem_transaction_log, cp, bcp); 1461 } 1462 1463 return (0); 1464 } 1465 1466 static int 1467 umem_cache_free_debug(umem_cache_t *cp, void *buf) 1468 { 1469 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf); 1470 umem_bufctl_audit_t *bcp = (umem_bufctl_audit_t *)btp->bt_bufctl; 1471 umem_slab_t *sp; 1472 1473 if (btp->bt_bxstat != ((intptr_t)bcp ^ UMEM_BUFTAG_ALLOC)) { 1474 if (btp->bt_bxstat == ((intptr_t)bcp ^ UMEM_BUFTAG_FREE)) { 1475 umem_error(UMERR_DUPFREE, cp, buf); 1476 return (-1); 1477 } 1478 sp = umem_findslab(cp, buf); 1479 if (sp == NULL || sp->slab_cache != cp) 1480 umem_error(UMERR_BADADDR, cp, buf); 1481 else 1482 umem_error(UMERR_REDZONE, cp, buf); 1483 return (-1); 1484 } 1485 1486 btp->bt_bxstat = (intptr_t)bcp ^ UMEM_BUFTAG_FREE; 1487 1488 if ((cp->cache_flags & UMF_HASH) && bcp->bc_addr != buf) { 1489 umem_error(UMERR_BADBUFCTL, cp, buf); 1490 return (-1); 1491 } 1492 1493 if (btp->bt_redzone != UMEM_REDZONE_PATTERN) { 1494 umem_error(UMERR_REDZONE, cp, buf); 1495 return (-1); 1496 } 1497 1498 if (cp->cache_flags & UMF_AUDIT) { 1499 if (cp->cache_flags & UMF_CONTENTS) 1500 bcp->bc_contents = umem_log_enter(umem_content_log, 1501 buf, cp->cache_contents); 1502 UMEM_AUDIT(umem_transaction_log, cp, bcp); 1503 } 1504 1505 if (cp->cache_destructor != NULL) 1506 cp->cache_destructor(buf, cp->cache_private); 1507 1508 if (cp->cache_flags & UMF_DEADBEEF) 1509 copy_pattern(UMEM_FREE_PATTERN, buf, cp->cache_verify); 1510 1511 return (0); 1512 } 1513 1514 /* 1515 * Free each object in magazine mp to cp's slab layer, and free mp itself. 1516 */ 1517 static void 1518 umem_magazine_destroy(umem_cache_t *cp, umem_magazine_t *mp, int nrounds) 1519 { 1520 int round; 1521 1522 ASSERT(cp->cache_next == NULL || IN_UPDATE()); 1523 1524 for (round = 0; round < nrounds; round++) { 1525 void *buf = mp->mag_round[round]; 1526 1527 if ((cp->cache_flags & UMF_DEADBEEF) && 1528 verify_pattern(UMEM_FREE_PATTERN, buf, 1529 cp->cache_verify) != NULL) { 1530 umem_error(UMERR_MODIFIED, cp, buf); 1531 continue; 1532 } 1533 1534 if (!(cp->cache_flags & UMF_BUFTAG) && 1535 cp->cache_destructor != NULL) 1536 cp->cache_destructor(buf, cp->cache_private); 1537 1538 umem_slab_free(cp, buf); 1539 } 1540 ASSERT(UMEM_MAGAZINE_VALID(cp, mp)); 1541 _umem_cache_free(cp->cache_magtype->mt_cache, mp); 1542 } 1543 1544 /* 1545 * Allocate a magazine from the depot. 1546 */ 1547 static umem_magazine_t * 1548 umem_depot_alloc(umem_cache_t *cp, umem_maglist_t *mlp) 1549 { 1550 umem_magazine_t *mp; 1551 1552 /* 1553 * If we can't get the depot lock without contention, 1554 * update our contention count. We use the depot 1555 * contention rate to determine whether we need to 1556 * increase the magazine size for better scalability. 1557 */ 1558 if (mutex_trylock(&cp->cache_depot_lock) != 0) { 1559 (void) mutex_lock(&cp->cache_depot_lock); 1560 cp->cache_depot_contention++; 1561 } 1562 1563 if ((mp = mlp->ml_list) != NULL) { 1564 ASSERT(UMEM_MAGAZINE_VALID(cp, mp)); 1565 mlp->ml_list = mp->mag_next; 1566 if (--mlp->ml_total < mlp->ml_min) 1567 mlp->ml_min = mlp->ml_total; 1568 mlp->ml_alloc++; 1569 } 1570 1571 (void) mutex_unlock(&cp->cache_depot_lock); 1572 1573 return (mp); 1574 } 1575 1576 /* 1577 * Free a magazine to the depot. 1578 */ 1579 static void 1580 umem_depot_free(umem_cache_t *cp, umem_maglist_t *mlp, umem_magazine_t *mp) 1581 { 1582 (void) mutex_lock(&cp->cache_depot_lock); 1583 ASSERT(UMEM_MAGAZINE_VALID(cp, mp)); 1584 mp->mag_next = mlp->ml_list; 1585 mlp->ml_list = mp; 1586 mlp->ml_total++; 1587 (void) mutex_unlock(&cp->cache_depot_lock); 1588 } 1589 1590 /* 1591 * Update the working set statistics for cp's depot. 1592 */ 1593 static void 1594 umem_depot_ws_update(umem_cache_t *cp) 1595 { 1596 (void) mutex_lock(&cp->cache_depot_lock); 1597 cp->cache_full.ml_reaplimit = cp->cache_full.ml_min; 1598 cp->cache_full.ml_min = cp->cache_full.ml_total; 1599 cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_min; 1600 cp->cache_empty.ml_min = cp->cache_empty.ml_total; 1601 (void) mutex_unlock(&cp->cache_depot_lock); 1602 } 1603 1604 /* 1605 * Reap all magazines that have fallen out of the depot's working set. 1606 */ 1607 static void 1608 umem_depot_ws_reap(umem_cache_t *cp) 1609 { 1610 long reap; 1611 umem_magazine_t *mp; 1612 1613 ASSERT(cp->cache_next == NULL || IN_REAP()); 1614 1615 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min); 1616 while (reap-- && (mp = umem_depot_alloc(cp, &cp->cache_full)) != NULL) 1617 umem_magazine_destroy(cp, mp, cp->cache_magtype->mt_magsize); 1618 1619 reap = MIN(cp->cache_empty.ml_reaplimit, cp->cache_empty.ml_min); 1620 while (reap-- && (mp = umem_depot_alloc(cp, &cp->cache_empty)) != NULL) 1621 umem_magazine_destroy(cp, mp, 0); 1622 } 1623 1624 static void 1625 umem_cpu_reload(umem_cpu_cache_t *ccp, umem_magazine_t *mp, int rounds) 1626 { 1627 ASSERT((ccp->cc_loaded == NULL && ccp->cc_rounds == -1) || 1628 (ccp->cc_loaded && ccp->cc_rounds + rounds == ccp->cc_magsize)); 1629 ASSERT(ccp->cc_magsize > 0); 1630 1631 ccp->cc_ploaded = ccp->cc_loaded; 1632 ccp->cc_prounds = ccp->cc_rounds; 1633 ccp->cc_loaded = mp; 1634 ccp->cc_rounds = rounds; 1635 } 1636 1637 /* 1638 * Allocate a constructed object from cache cp. 1639 */ 1640 #pragma weak umem_cache_alloc = _umem_cache_alloc 1641 void * 1642 _umem_cache_alloc(umem_cache_t *cp, int umflag) 1643 { 1644 umem_cpu_cache_t *ccp; 1645 umem_magazine_t *fmp; 1646 void *buf; 1647 int flags_nfatal; 1648 1649 retry: 1650 ccp = UMEM_CPU_CACHE(cp, CPU(cp->cache_cpu_mask)); 1651 (void) mutex_lock(&ccp->cc_lock); 1652 for (;;) { 1653 /* 1654 * If there's an object available in the current CPU's 1655 * loaded magazine, just take it and return. 1656 */ 1657 if (ccp->cc_rounds > 0) { 1658 buf = ccp->cc_loaded->mag_round[--ccp->cc_rounds]; 1659 ccp->cc_alloc++; 1660 (void) mutex_unlock(&ccp->cc_lock); 1661 if ((ccp->cc_flags & UMF_BUFTAG) && 1662 umem_cache_alloc_debug(cp, buf, umflag) == -1) { 1663 if (umem_alloc_retry(cp, umflag)) { 1664 goto retry; 1665 } 1666 1667 return (NULL); 1668 } 1669 return (buf); 1670 } 1671 1672 /* 1673 * The loaded magazine is empty. If the previously loaded 1674 * magazine was full, exchange them and try again. 1675 */ 1676 if (ccp->cc_prounds > 0) { 1677 umem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds); 1678 continue; 1679 } 1680 1681 /* 1682 * If the magazine layer is disabled, break out now. 1683 */ 1684 if (ccp->cc_magsize == 0) 1685 break; 1686 1687 /* 1688 * Try to get a full magazine from the depot. 1689 */ 1690 fmp = umem_depot_alloc(cp, &cp->cache_full); 1691 if (fmp != NULL) { 1692 if (ccp->cc_ploaded != NULL) 1693 umem_depot_free(cp, &cp->cache_empty, 1694 ccp->cc_ploaded); 1695 umem_cpu_reload(ccp, fmp, ccp->cc_magsize); 1696 continue; 1697 } 1698 1699 /* 1700 * There are no full magazines in the depot, 1701 * so fall through to the slab layer. 1702 */ 1703 break; 1704 } 1705 (void) mutex_unlock(&ccp->cc_lock); 1706 1707 /* 1708 * We couldn't allocate a constructed object from the magazine layer, 1709 * so get a raw buffer from the slab layer and apply its constructor. 1710 */ 1711 buf = umem_slab_alloc(cp, umflag); 1712 1713 if (buf == NULL) { 1714 if (cp == &umem_null_cache) 1715 return (NULL); 1716 if (umem_alloc_retry(cp, umflag)) { 1717 goto retry; 1718 } 1719 1720 return (NULL); 1721 } 1722 1723 if (cp->cache_flags & UMF_BUFTAG) { 1724 /* 1725 * Let umem_cache_alloc_debug() apply the constructor for us. 1726 */ 1727 if (umem_cache_alloc_debug(cp, buf, umflag) == -1) { 1728 if (umem_alloc_retry(cp, umflag)) { 1729 goto retry; 1730 } 1731 return (NULL); 1732 } 1733 return (buf); 1734 } 1735 1736 /* 1737 * We do not pass fatal flags on to the constructor. This prevents 1738 * leaking buffers in the event of a subordinate constructor failing. 1739 */ 1740 flags_nfatal = UMEM_DEFAULT; 1741 if (cp->cache_constructor != NULL && 1742 cp->cache_constructor(buf, cp->cache_private, flags_nfatal) != 0) { 1743 atomic_add_64(&cp->cache_alloc_fail, 1); 1744 umem_slab_free(cp, buf); 1745 1746 if (umem_alloc_retry(cp, umflag)) { 1747 goto retry; 1748 } 1749 return (NULL); 1750 } 1751 1752 return (buf); 1753 } 1754 1755 /* 1756 * Free a constructed object to cache cp. 1757 */ 1758 #pragma weak umem_cache_free = _umem_cache_free 1759 void 1760 _umem_cache_free(umem_cache_t *cp, void *buf) 1761 { 1762 umem_cpu_cache_t *ccp = UMEM_CPU_CACHE(cp, CPU(cp->cache_cpu_mask)); 1763 umem_magazine_t *emp; 1764 umem_magtype_t *mtp; 1765 1766 if (ccp->cc_flags & UMF_BUFTAG) 1767 if (umem_cache_free_debug(cp, buf) == -1) 1768 return; 1769 1770 (void) mutex_lock(&ccp->cc_lock); 1771 for (;;) { 1772 /* 1773 * If there's a slot available in the current CPU's 1774 * loaded magazine, just put the object there and return. 1775 */ 1776 if ((uint_t)ccp->cc_rounds < ccp->cc_magsize) { 1777 ccp->cc_loaded->mag_round[ccp->cc_rounds++] = buf; 1778 ccp->cc_free++; 1779 (void) mutex_unlock(&ccp->cc_lock); 1780 return; 1781 } 1782 1783 /* 1784 * The loaded magazine is full. If the previously loaded 1785 * magazine was empty, exchange them and try again. 1786 */ 1787 if (ccp->cc_prounds == 0) { 1788 umem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds); 1789 continue; 1790 } 1791 1792 /* 1793 * If the magazine layer is disabled, break out now. 1794 */ 1795 if (ccp->cc_magsize == 0) 1796 break; 1797 1798 /* 1799 * Try to get an empty magazine from the depot. 1800 */ 1801 emp = umem_depot_alloc(cp, &cp->cache_empty); 1802 if (emp != NULL) { 1803 if (ccp->cc_ploaded != NULL) 1804 umem_depot_free(cp, &cp->cache_full, 1805 ccp->cc_ploaded); 1806 umem_cpu_reload(ccp, emp, 0); 1807 continue; 1808 } 1809 1810 /* 1811 * There are no empty magazines in the depot, 1812 * so try to allocate a new one. We must drop all locks 1813 * across umem_cache_alloc() because lower layers may 1814 * attempt to allocate from this cache. 1815 */ 1816 mtp = cp->cache_magtype; 1817 (void) mutex_unlock(&ccp->cc_lock); 1818 emp = _umem_cache_alloc(mtp->mt_cache, UMEM_DEFAULT); 1819 (void) mutex_lock(&ccp->cc_lock); 1820 1821 if (emp != NULL) { 1822 /* 1823 * We successfully allocated an empty magazine. 1824 * However, we had to drop ccp->cc_lock to do it, 1825 * so the cache's magazine size may have changed. 1826 * If so, free the magazine and try again. 1827 */ 1828 if (ccp->cc_magsize != mtp->mt_magsize) { 1829 (void) mutex_unlock(&ccp->cc_lock); 1830 _umem_cache_free(mtp->mt_cache, emp); 1831 (void) mutex_lock(&ccp->cc_lock); 1832 continue; 1833 } 1834 1835 /* 1836 * We got a magazine of the right size. Add it to 1837 * the depot and try the whole dance again. 1838 */ 1839 umem_depot_free(cp, &cp->cache_empty, emp); 1840 continue; 1841 } 1842 1843 /* 1844 * We couldn't allocate an empty magazine, 1845 * so fall through to the slab layer. 1846 */ 1847 break; 1848 } 1849 (void) mutex_unlock(&ccp->cc_lock); 1850 1851 /* 1852 * We couldn't free our constructed object to the magazine layer, 1853 * so apply its destructor and free it to the slab layer. 1854 * Note that if UMF_BUFTAG is in effect, umem_cache_free_debug() 1855 * will have already applied the destructor. 1856 */ 1857 if (!(cp->cache_flags & UMF_BUFTAG) && cp->cache_destructor != NULL) 1858 cp->cache_destructor(buf, cp->cache_private); 1859 1860 umem_slab_free(cp, buf); 1861 } 1862 1863 #pragma weak umem_zalloc = _umem_zalloc 1864 void * 1865 _umem_zalloc(size_t size, int umflag) 1866 { 1867 size_t index = (size - 1) >> UMEM_ALIGN_SHIFT; 1868 void *buf; 1869 1870 retry: 1871 if (index < UMEM_MAXBUF >> UMEM_ALIGN_SHIFT) { 1872 umem_cache_t *cp = umem_alloc_table[index]; 1873 buf = _umem_cache_alloc(cp, umflag); 1874 if (buf != NULL) { 1875 if (cp->cache_flags & UMF_BUFTAG) { 1876 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf); 1877 ((uint8_t *)buf)[size] = UMEM_REDZONE_BYTE; 1878 ((uint32_t *)btp)[1] = UMEM_SIZE_ENCODE(size); 1879 } 1880 bzero(buf, size); 1881 } else if (umem_alloc_retry(cp, umflag)) 1882 goto retry; 1883 } else { 1884 buf = _umem_alloc(size, umflag); /* handles failure */ 1885 if (buf != NULL) 1886 bzero(buf, size); 1887 } 1888 return (buf); 1889 } 1890 1891 #pragma weak umem_alloc = _umem_alloc 1892 void * 1893 _umem_alloc(size_t size, int umflag) 1894 { 1895 size_t index = (size - 1) >> UMEM_ALIGN_SHIFT; 1896 void *buf; 1897 umem_alloc_retry: 1898 if (index < UMEM_MAXBUF >> UMEM_ALIGN_SHIFT) { 1899 umem_cache_t *cp = umem_alloc_table[index]; 1900 buf = _umem_cache_alloc(cp, umflag); 1901 if ((cp->cache_flags & UMF_BUFTAG) && buf != NULL) { 1902 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf); 1903 ((uint8_t *)buf)[size] = UMEM_REDZONE_BYTE; 1904 ((uint32_t *)btp)[1] = UMEM_SIZE_ENCODE(size); 1905 } 1906 if (buf == NULL && umem_alloc_retry(cp, umflag)) 1907 goto umem_alloc_retry; 1908 return (buf); 1909 } 1910 if (size == 0) 1911 return (NULL); 1912 if (umem_oversize_arena == NULL) { 1913 if (umem_init()) 1914 ASSERT(umem_oversize_arena != NULL); 1915 else 1916 return (NULL); 1917 } 1918 buf = vmem_alloc(umem_oversize_arena, size, UMEM_VMFLAGS(umflag)); 1919 if (buf == NULL) { 1920 umem_log_event(umem_failure_log, NULL, NULL, (void *)size); 1921 if (umem_alloc_retry(NULL, umflag)) 1922 goto umem_alloc_retry; 1923 } 1924 return (buf); 1925 } 1926 1927 #pragma weak umem_alloc_align = _umem_alloc_align 1928 void * 1929 _umem_alloc_align(size_t size, size_t align, int umflag) 1930 { 1931 void *buf; 1932 1933 if (size == 0) 1934 return (NULL); 1935 if ((align & (align - 1)) != 0) 1936 return (NULL); 1937 if (align < UMEM_ALIGN) 1938 align = UMEM_ALIGN; 1939 1940 umem_alloc_align_retry: 1941 if (umem_memalign_arena == NULL) { 1942 if (umem_init()) 1943 ASSERT(umem_oversize_arena != NULL); 1944 else 1945 return (NULL); 1946 } 1947 buf = vmem_xalloc(umem_memalign_arena, size, align, 0, 0, NULL, NULL, 1948 UMEM_VMFLAGS(umflag)); 1949 if (buf == NULL) { 1950 umem_log_event(umem_failure_log, NULL, NULL, (void *)size); 1951 if (umem_alloc_retry(NULL, umflag)) 1952 goto umem_alloc_align_retry; 1953 } 1954 return (buf); 1955 } 1956 1957 #pragma weak umem_free = _umem_free 1958 void 1959 _umem_free(void *buf, size_t size) 1960 { 1961 size_t index = (size - 1) >> UMEM_ALIGN_SHIFT; 1962 1963 if (index < UMEM_MAXBUF >> UMEM_ALIGN_SHIFT) { 1964 umem_cache_t *cp = umem_alloc_table[index]; 1965 if (cp->cache_flags & UMF_BUFTAG) { 1966 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf); 1967 uint32_t *ip = (uint32_t *)btp; 1968 if (ip[1] != UMEM_SIZE_ENCODE(size)) { 1969 if (*(uint64_t *)buf == UMEM_FREE_PATTERN) { 1970 umem_error(UMERR_DUPFREE, cp, buf); 1971 return; 1972 } 1973 if (UMEM_SIZE_VALID(ip[1])) { 1974 ip[0] = UMEM_SIZE_ENCODE(size); 1975 umem_error(UMERR_BADSIZE, cp, buf); 1976 } else { 1977 umem_error(UMERR_REDZONE, cp, buf); 1978 } 1979 return; 1980 } 1981 if (((uint8_t *)buf)[size] != UMEM_REDZONE_BYTE) { 1982 umem_error(UMERR_REDZONE, cp, buf); 1983 return; 1984 } 1985 btp->bt_redzone = UMEM_REDZONE_PATTERN; 1986 } 1987 _umem_cache_free(cp, buf); 1988 } else { 1989 if (buf == NULL && size == 0) 1990 return; 1991 vmem_free(umem_oversize_arena, buf, size); 1992 } 1993 } 1994 1995 #pragma weak umem_free_align = _umem_free_align 1996 void 1997 _umem_free_align(void *buf, size_t size) 1998 { 1999 if (buf == NULL && size == 0) 2000 return; 2001 vmem_xfree(umem_memalign_arena, buf, size); 2002 } 2003 2004 static void * 2005 umem_firewall_va_alloc(vmem_t *vmp, size_t size, int vmflag) 2006 { 2007 size_t realsize = size + vmp->vm_quantum; 2008 2009 /* 2010 * Annoying edge case: if 'size' is just shy of ULONG_MAX, adding 2011 * vm_quantum will cause integer wraparound. Check for this, and 2012 * blow off the firewall page in this case. Note that such a 2013 * giant allocation (the entire address space) can never be 2014 * satisfied, so it will either fail immediately (VM_NOSLEEP) 2015 * or sleep forever (VM_SLEEP). Thus, there is no need for a 2016 * corresponding check in umem_firewall_va_free(). 2017 */ 2018 if (realsize < size) 2019 realsize = size; 2020 2021 return (vmem_alloc(vmp, realsize, vmflag | VM_NEXTFIT)); 2022 } 2023 2024 static void 2025 umem_firewall_va_free(vmem_t *vmp, void *addr, size_t size) 2026 { 2027 vmem_free(vmp, addr, size + vmp->vm_quantum); 2028 } 2029 2030 /* 2031 * Reclaim all unused memory from a cache. 2032 */ 2033 static void 2034 umem_cache_reap(umem_cache_t *cp) 2035 { 2036 /* 2037 * Ask the cache's owner to free some memory if possible. 2038 * The idea is to handle things like the inode cache, which 2039 * typically sits on a bunch of memory that it doesn't truly 2040 * *need*. Reclaim policy is entirely up to the owner; this 2041 * callback is just an advisory plea for help. 2042 */ 2043 if (cp->cache_reclaim != NULL) 2044 cp->cache_reclaim(cp->cache_private); 2045 2046 umem_depot_ws_reap(cp); 2047 } 2048 2049 /* 2050 * Purge all magazines from a cache and set its magazine limit to zero. 2051 * All calls are serialized by being done by the update thread, except for 2052 * the final call from umem_cache_destroy(). 2053 */ 2054 static void 2055 umem_cache_magazine_purge(umem_cache_t *cp) 2056 { 2057 umem_cpu_cache_t *ccp; 2058 umem_magazine_t *mp, *pmp; 2059 int rounds, prounds, cpu_seqid; 2060 2061 ASSERT(cp->cache_next == NULL || IN_UPDATE()); 2062 2063 for (cpu_seqid = 0; cpu_seqid < umem_max_ncpus; cpu_seqid++) { 2064 ccp = &cp->cache_cpu[cpu_seqid]; 2065 2066 (void) mutex_lock(&ccp->cc_lock); 2067 mp = ccp->cc_loaded; 2068 pmp = ccp->cc_ploaded; 2069 rounds = ccp->cc_rounds; 2070 prounds = ccp->cc_prounds; 2071 ccp->cc_loaded = NULL; 2072 ccp->cc_ploaded = NULL; 2073 ccp->cc_rounds = -1; 2074 ccp->cc_prounds = -1; 2075 ccp->cc_magsize = 0; 2076 (void) mutex_unlock(&ccp->cc_lock); 2077 2078 if (mp) 2079 umem_magazine_destroy(cp, mp, rounds); 2080 if (pmp) 2081 umem_magazine_destroy(cp, pmp, prounds); 2082 } 2083 2084 /* 2085 * Updating the working set statistics twice in a row has the 2086 * effect of setting the working set size to zero, so everything 2087 * is eligible for reaping. 2088 */ 2089 umem_depot_ws_update(cp); 2090 umem_depot_ws_update(cp); 2091 2092 umem_depot_ws_reap(cp); 2093 } 2094 2095 /* 2096 * Enable per-cpu magazines on a cache. 2097 */ 2098 static void 2099 umem_cache_magazine_enable(umem_cache_t *cp) 2100 { 2101 int cpu_seqid; 2102 2103 if (cp->cache_flags & UMF_NOMAGAZINE) 2104 return; 2105 2106 for (cpu_seqid = 0; cpu_seqid < umem_max_ncpus; cpu_seqid++) { 2107 umem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid]; 2108 (void) mutex_lock(&ccp->cc_lock); 2109 ccp->cc_magsize = cp->cache_magtype->mt_magsize; 2110 (void) mutex_unlock(&ccp->cc_lock); 2111 } 2112 2113 } 2114 2115 /* 2116 * Recompute a cache's magazine size. The trade-off is that larger magazines 2117 * provide a higher transfer rate with the depot, while smaller magazines 2118 * reduce memory consumption. Magazine resizing is an expensive operation; 2119 * it should not be done frequently. 2120 * 2121 * Changes to the magazine size are serialized by only having one thread 2122 * doing updates. (the update thread) 2123 * 2124 * Note: at present this only grows the magazine size. It might be useful 2125 * to allow shrinkage too. 2126 */ 2127 static void 2128 umem_cache_magazine_resize(umem_cache_t *cp) 2129 { 2130 umem_magtype_t *mtp = cp->cache_magtype; 2131 2132 ASSERT(IN_UPDATE()); 2133 2134 if (cp->cache_chunksize < mtp->mt_maxbuf) { 2135 umem_cache_magazine_purge(cp); 2136 (void) mutex_lock(&cp->cache_depot_lock); 2137 cp->cache_magtype = ++mtp; 2138 cp->cache_depot_contention_prev = 2139 cp->cache_depot_contention + INT_MAX; 2140 (void) mutex_unlock(&cp->cache_depot_lock); 2141 umem_cache_magazine_enable(cp); 2142 } 2143 } 2144 2145 /* 2146 * Rescale a cache's hash table, so that the table size is roughly the 2147 * cache size. We want the average lookup time to be extremely small. 2148 */ 2149 static void 2150 umem_hash_rescale(umem_cache_t *cp) 2151 { 2152 umem_bufctl_t **old_table, **new_table, *bcp; 2153 size_t old_size, new_size, h; 2154 2155 ASSERT(IN_UPDATE()); 2156 2157 new_size = MAX(UMEM_HASH_INITIAL, 2158 1 << (highbit(3 * cp->cache_buftotal + 4) - 2)); 2159 old_size = cp->cache_hash_mask + 1; 2160 2161 if ((old_size >> 1) <= new_size && new_size <= (old_size << 1)) 2162 return; 2163 2164 new_table = vmem_alloc(umem_hash_arena, new_size * sizeof (void *), 2165 VM_NOSLEEP); 2166 if (new_table == NULL) 2167 return; 2168 bzero(new_table, new_size * sizeof (void *)); 2169 2170 (void) mutex_lock(&cp->cache_lock); 2171 2172 old_size = cp->cache_hash_mask + 1; 2173 old_table = cp->cache_hash_table; 2174 2175 cp->cache_hash_mask = new_size - 1; 2176 cp->cache_hash_table = new_table; 2177 cp->cache_rescale++; 2178 2179 for (h = 0; h < old_size; h++) { 2180 bcp = old_table[h]; 2181 while (bcp != NULL) { 2182 void *addr = bcp->bc_addr; 2183 umem_bufctl_t *next_bcp = bcp->bc_next; 2184 umem_bufctl_t **hash_bucket = UMEM_HASH(cp, addr); 2185 bcp->bc_next = *hash_bucket; 2186 *hash_bucket = bcp; 2187 bcp = next_bcp; 2188 } 2189 } 2190 2191 (void) mutex_unlock(&cp->cache_lock); 2192 2193 vmem_free(umem_hash_arena, old_table, old_size * sizeof (void *)); 2194 } 2195 2196 /* 2197 * Perform periodic maintenance on a cache: hash rescaling, 2198 * depot working-set update, and magazine resizing. 2199 */ 2200 void 2201 umem_cache_update(umem_cache_t *cp) 2202 { 2203 int update_flags = 0; 2204 2205 ASSERT(MUTEX_HELD(&umem_cache_lock)); 2206 2207 /* 2208 * If the cache has become much larger or smaller than its hash table, 2209 * fire off a request to rescale the hash table. 2210 */ 2211 (void) mutex_lock(&cp->cache_lock); 2212 2213 if ((cp->cache_flags & UMF_HASH) && 2214 (cp->cache_buftotal > (cp->cache_hash_mask << 1) || 2215 (cp->cache_buftotal < (cp->cache_hash_mask >> 1) && 2216 cp->cache_hash_mask > UMEM_HASH_INITIAL))) 2217 update_flags |= UMU_HASH_RESCALE; 2218 2219 (void) mutex_unlock(&cp->cache_lock); 2220 2221 /* 2222 * Update the depot working set statistics. 2223 */ 2224 umem_depot_ws_update(cp); 2225 2226 /* 2227 * If there's a lot of contention in the depot, 2228 * increase the magazine size. 2229 */ 2230 (void) mutex_lock(&cp->cache_depot_lock); 2231 2232 if (cp->cache_chunksize < cp->cache_magtype->mt_maxbuf && 2233 (int)(cp->cache_depot_contention - 2234 cp->cache_depot_contention_prev) > umem_depot_contention) 2235 update_flags |= UMU_MAGAZINE_RESIZE; 2236 2237 cp->cache_depot_contention_prev = cp->cache_depot_contention; 2238 2239 (void) mutex_unlock(&cp->cache_depot_lock); 2240 2241 if (update_flags) 2242 umem_add_update(cp, update_flags); 2243 } 2244 2245 /* 2246 * Runs all pending updates. 2247 * 2248 * The update lock must be held on entrance, and will be held on exit. 2249 */ 2250 void 2251 umem_process_updates(void) 2252 { 2253 ASSERT(MUTEX_HELD(&umem_update_lock)); 2254 2255 while (umem_null_cache.cache_unext != &umem_null_cache) { 2256 int notify = 0; 2257 umem_cache_t *cp = umem_null_cache.cache_unext; 2258 2259 cp->cache_uprev->cache_unext = cp->cache_unext; 2260 cp->cache_unext->cache_uprev = cp->cache_uprev; 2261 cp->cache_uprev = cp->cache_unext = NULL; 2262 2263 ASSERT(!(cp->cache_uflags & UMU_ACTIVE)); 2264 2265 while (cp->cache_uflags) { 2266 int uflags = (cp->cache_uflags |= UMU_ACTIVE); 2267 (void) mutex_unlock(&umem_update_lock); 2268 2269 /* 2270 * The order here is important. Each step can speed up 2271 * later steps. 2272 */ 2273 2274 if (uflags & UMU_HASH_RESCALE) 2275 umem_hash_rescale(cp); 2276 2277 if (uflags & UMU_MAGAZINE_RESIZE) 2278 umem_cache_magazine_resize(cp); 2279 2280 if (uflags & UMU_REAP) 2281 umem_cache_reap(cp); 2282 2283 (void) mutex_lock(&umem_update_lock); 2284 2285 /* 2286 * check if anyone has requested notification 2287 */ 2288 if (cp->cache_uflags & UMU_NOTIFY) { 2289 uflags |= UMU_NOTIFY; 2290 notify = 1; 2291 } 2292 cp->cache_uflags &= ~uflags; 2293 } 2294 if (notify) 2295 (void) cond_broadcast(&umem_update_cv); 2296 } 2297 } 2298 2299 #ifndef UMEM_STANDALONE 2300 static void 2301 umem_st_update(void) 2302 { 2303 ASSERT(MUTEX_HELD(&umem_update_lock)); 2304 ASSERT(umem_update_thr == 0 && umem_st_update_thr == 0); 2305 2306 umem_st_update_thr = thr_self(); 2307 2308 (void) mutex_unlock(&umem_update_lock); 2309 2310 vmem_update(NULL); 2311 umem_cache_applyall(umem_cache_update); 2312 2313 (void) mutex_lock(&umem_update_lock); 2314 2315 umem_process_updates(); /* does all of the requested work */ 2316 2317 umem_reap_next = gethrtime() + 2318 (hrtime_t)umem_reap_interval * NANOSEC; 2319 2320 umem_reaping = UMEM_REAP_DONE; 2321 2322 umem_st_update_thr = 0; 2323 } 2324 #endif 2325 2326 /* 2327 * Reclaim all unused memory from all caches. Called from vmem when memory 2328 * gets tight. Must be called with no locks held. 2329 * 2330 * This just requests a reap on all caches, and notifies the update thread. 2331 */ 2332 void 2333 umem_reap(void) 2334 { 2335 #ifndef UMEM_STANDALONE 2336 extern int __nthreads(void); 2337 #endif 2338 2339 if (umem_ready != UMEM_READY || umem_reaping != UMEM_REAP_DONE || 2340 gethrtime() < umem_reap_next) 2341 return; 2342 2343 (void) mutex_lock(&umem_update_lock); 2344 2345 if (umem_reaping != UMEM_REAP_DONE || gethrtime() < umem_reap_next) { 2346 (void) mutex_unlock(&umem_update_lock); 2347 return; 2348 } 2349 umem_reaping = UMEM_REAP_ADDING; /* lock out other reaps */ 2350 2351 (void) mutex_unlock(&umem_update_lock); 2352 2353 umem_updateall(UMU_REAP); 2354 2355 (void) mutex_lock(&umem_update_lock); 2356 2357 umem_reaping = UMEM_REAP_ACTIVE; 2358 2359 /* Standalone is single-threaded */ 2360 #ifndef UMEM_STANDALONE 2361 if (umem_update_thr == 0) { 2362 /* 2363 * The update thread does not exist. If the process is 2364 * multi-threaded, create it. If not, or the creation fails, 2365 * do the update processing inline. 2366 */ 2367 ASSERT(umem_st_update_thr == 0); 2368 2369 if (__nthreads() <= 1 || umem_create_update_thread() == 0) 2370 umem_st_update(); 2371 } 2372 2373 (void) cond_broadcast(&umem_update_cv); /* wake up the update thread */ 2374 #endif 2375 2376 (void) mutex_unlock(&umem_update_lock); 2377 } 2378 2379 umem_cache_t * 2380 umem_cache_create( 2381 char *name, /* descriptive name for this cache */ 2382 size_t bufsize, /* size of the objects it manages */ 2383 size_t align, /* required object alignment */ 2384 umem_constructor_t *constructor, /* object constructor */ 2385 umem_destructor_t *destructor, /* object destructor */ 2386 umem_reclaim_t *reclaim, /* memory reclaim callback */ 2387 void *private, /* pass-thru arg for constr/destr/reclaim */ 2388 vmem_t *vmp, /* vmem source for slab allocation */ 2389 int cflags) /* cache creation flags */ 2390 { 2391 int cpu_seqid; 2392 size_t chunksize; 2393 umem_cache_t *cp, *cnext, *cprev; 2394 umem_magtype_t *mtp; 2395 size_t csize; 2396 size_t phase; 2397 2398 /* 2399 * The init thread is allowed to create internal and quantum caches. 2400 * 2401 * Other threads must wait until until initialization is complete. 2402 */ 2403 if (umem_init_thr == thr_self()) 2404 ASSERT((cflags & (UMC_INTERNAL | UMC_QCACHE)) != 0); 2405 else { 2406 ASSERT(!(cflags & UMC_INTERNAL)); 2407 if (umem_ready != UMEM_READY && umem_init() == 0) { 2408 errno = EAGAIN; 2409 return (NULL); 2410 } 2411 } 2412 2413 csize = UMEM_CACHE_SIZE(umem_max_ncpus); 2414 phase = P2NPHASE(csize, UMEM_CPU_CACHE_SIZE); 2415 2416 if (vmp == NULL) 2417 vmp = umem_default_arena; 2418 2419 ASSERT(P2PHASE(phase, UMEM_ALIGN) == 0); 2420 2421 /* 2422 * Check that the arguments are reasonable 2423 */ 2424 if ((align & (align - 1)) != 0 || align > vmp->vm_quantum || 2425 ((cflags & UMC_NOHASH) && (cflags & UMC_NOTOUCH)) || 2426 name == NULL || bufsize == 0) { 2427 errno = EINVAL; 2428 return (NULL); 2429 } 2430 2431 /* 2432 * If align == 0, we set it to the minimum required alignment. 2433 * 2434 * If align < UMEM_ALIGN, we round it up to UMEM_ALIGN, unless 2435 * UMC_NOTOUCH was passed. 2436 */ 2437 if (align == 0) { 2438 if (P2ROUNDUP(bufsize, UMEM_ALIGN) >= UMEM_SECOND_ALIGN) 2439 align = UMEM_SECOND_ALIGN; 2440 else 2441 align = UMEM_ALIGN; 2442 } else if (align < UMEM_ALIGN && (cflags & UMC_NOTOUCH) == 0) 2443 align = UMEM_ALIGN; 2444 2445 2446 /* 2447 * Get a umem_cache structure. We arrange that cp->cache_cpu[] 2448 * is aligned on a UMEM_CPU_CACHE_SIZE boundary to prevent 2449 * false sharing of per-CPU data. 2450 */ 2451 cp = vmem_xalloc(umem_cache_arena, csize, UMEM_CPU_CACHE_SIZE, phase, 2452 0, NULL, NULL, VM_NOSLEEP); 2453 2454 if (cp == NULL) { 2455 errno = EAGAIN; 2456 return (NULL); 2457 } 2458 2459 bzero(cp, csize); 2460 2461 (void) mutex_lock(&umem_flags_lock); 2462 if (umem_flags & UMF_RANDOMIZE) 2463 umem_flags = (((umem_flags | ~UMF_RANDOM) + 1) & UMF_RANDOM) | 2464 UMF_RANDOMIZE; 2465 cp->cache_flags = umem_flags | (cflags & UMF_DEBUG); 2466 (void) mutex_unlock(&umem_flags_lock); 2467 2468 /* 2469 * Make sure all the various flags are reasonable. 2470 */ 2471 if (cp->cache_flags & UMF_LITE) { 2472 if (bufsize >= umem_lite_minsize && 2473 align <= umem_lite_maxalign && 2474 P2PHASE(bufsize, umem_lite_maxalign) != 0) { 2475 cp->cache_flags |= UMF_BUFTAG; 2476 cp->cache_flags &= ~(UMF_AUDIT | UMF_FIREWALL); 2477 } else { 2478 cp->cache_flags &= ~UMF_DEBUG; 2479 } 2480 } 2481 2482 if ((cflags & UMC_QCACHE) && (cp->cache_flags & UMF_AUDIT)) 2483 cp->cache_flags |= UMF_NOMAGAZINE; 2484 2485 if (cflags & UMC_NODEBUG) 2486 cp->cache_flags &= ~UMF_DEBUG; 2487 2488 if (cflags & UMC_NOTOUCH) 2489 cp->cache_flags &= ~UMF_TOUCH; 2490 2491 if (cflags & UMC_NOHASH) 2492 cp->cache_flags &= ~(UMF_AUDIT | UMF_FIREWALL); 2493 2494 if (cflags & UMC_NOMAGAZINE) 2495 cp->cache_flags |= UMF_NOMAGAZINE; 2496 2497 if ((cp->cache_flags & UMF_AUDIT) && !(cflags & UMC_NOTOUCH)) 2498 cp->cache_flags |= UMF_REDZONE; 2499 2500 if ((cp->cache_flags & UMF_BUFTAG) && bufsize >= umem_minfirewall && 2501 !(cp->cache_flags & UMF_LITE) && !(cflags & UMC_NOHASH)) 2502 cp->cache_flags |= UMF_FIREWALL; 2503 2504 if (vmp != umem_default_arena || umem_firewall_arena == NULL) 2505 cp->cache_flags &= ~UMF_FIREWALL; 2506 2507 if (cp->cache_flags & UMF_FIREWALL) { 2508 cp->cache_flags &= ~UMF_BUFTAG; 2509 cp->cache_flags |= UMF_NOMAGAZINE; 2510 ASSERT(vmp == umem_default_arena); 2511 vmp = umem_firewall_arena; 2512 } 2513 2514 /* 2515 * Set cache properties. 2516 */ 2517 (void) strncpy(cp->cache_name, name, sizeof (cp->cache_name) - 1); 2518 cp->cache_bufsize = bufsize; 2519 cp->cache_align = align; 2520 cp->cache_constructor = constructor; 2521 cp->cache_destructor = destructor; 2522 cp->cache_reclaim = reclaim; 2523 cp->cache_private = private; 2524 cp->cache_arena = vmp; 2525 cp->cache_cflags = cflags; 2526 cp->cache_cpu_mask = umem_cpu_mask; 2527 2528 /* 2529 * Determine the chunk size. 2530 */ 2531 chunksize = bufsize; 2532 2533 if (align >= UMEM_ALIGN) { 2534 chunksize = P2ROUNDUP(chunksize, UMEM_ALIGN); 2535 cp->cache_bufctl = chunksize - UMEM_ALIGN; 2536 } 2537 2538 if (cp->cache_flags & UMF_BUFTAG) { 2539 cp->cache_bufctl = chunksize; 2540 cp->cache_buftag = chunksize; 2541 chunksize += sizeof (umem_buftag_t); 2542 } 2543 2544 if (cp->cache_flags & UMF_DEADBEEF) { 2545 cp->cache_verify = MIN(cp->cache_buftag, umem_maxverify); 2546 if (cp->cache_flags & UMF_LITE) 2547 cp->cache_verify = MIN(cp->cache_verify, UMEM_ALIGN); 2548 } 2549 2550 cp->cache_contents = MIN(cp->cache_bufctl, umem_content_maxsave); 2551 2552 cp->cache_chunksize = chunksize = P2ROUNDUP(chunksize, align); 2553 2554 if (chunksize < bufsize) { 2555 errno = ENOMEM; 2556 goto fail; 2557 } 2558 2559 /* 2560 * Now that we know the chunk size, determine the optimal slab size. 2561 */ 2562 if (vmp == umem_firewall_arena) { 2563 cp->cache_slabsize = P2ROUNDUP(chunksize, vmp->vm_quantum); 2564 cp->cache_mincolor = cp->cache_slabsize - chunksize; 2565 cp->cache_maxcolor = cp->cache_mincolor; 2566 cp->cache_flags |= UMF_HASH; 2567 ASSERT(!(cp->cache_flags & UMF_BUFTAG)); 2568 } else if ((cflags & UMC_NOHASH) || (!(cflags & UMC_NOTOUCH) && 2569 !(cp->cache_flags & UMF_AUDIT) && 2570 chunksize < vmp->vm_quantum / UMEM_VOID_FRACTION)) { 2571 cp->cache_slabsize = vmp->vm_quantum; 2572 cp->cache_mincolor = 0; 2573 cp->cache_maxcolor = 2574 (cp->cache_slabsize - sizeof (umem_slab_t)) % chunksize; 2575 2576 if (chunksize + sizeof (umem_slab_t) > cp->cache_slabsize) { 2577 errno = EINVAL; 2578 goto fail; 2579 } 2580 ASSERT(!(cp->cache_flags & UMF_AUDIT)); 2581 } else { 2582 size_t chunks, bestfit, waste, slabsize; 2583 size_t minwaste = LONG_MAX; 2584 2585 for (chunks = 1; chunks <= UMEM_VOID_FRACTION; chunks++) { 2586 slabsize = P2ROUNDUP(chunksize * chunks, 2587 vmp->vm_quantum); 2588 /* 2589 * check for overflow 2590 */ 2591 if ((slabsize / chunks) < chunksize) { 2592 errno = ENOMEM; 2593 goto fail; 2594 } 2595 chunks = slabsize / chunksize; 2596 waste = (slabsize % chunksize) / chunks; 2597 if (waste < minwaste) { 2598 minwaste = waste; 2599 bestfit = slabsize; 2600 } 2601 } 2602 if (cflags & UMC_QCACHE) 2603 bestfit = MAX(1 << highbit(3 * vmp->vm_qcache_max), 64); 2604 cp->cache_slabsize = bestfit; 2605 cp->cache_mincolor = 0; 2606 cp->cache_maxcolor = bestfit % chunksize; 2607 cp->cache_flags |= UMF_HASH; 2608 } 2609 2610 if (cp->cache_flags & UMF_HASH) { 2611 ASSERT(!(cflags & UMC_NOHASH)); 2612 cp->cache_bufctl_cache = (cp->cache_flags & UMF_AUDIT) ? 2613 umem_bufctl_audit_cache : umem_bufctl_cache; 2614 } 2615 2616 if (cp->cache_maxcolor >= vmp->vm_quantum) 2617 cp->cache_maxcolor = vmp->vm_quantum - 1; 2618 2619 cp->cache_color = cp->cache_mincolor; 2620 2621 /* 2622 * Initialize the rest of the slab layer. 2623 */ 2624 (void) mutex_init(&cp->cache_lock, USYNC_THREAD, NULL); 2625 2626 cp->cache_freelist = &cp->cache_nullslab; 2627 cp->cache_nullslab.slab_cache = cp; 2628 cp->cache_nullslab.slab_refcnt = -1; 2629 cp->cache_nullslab.slab_next = &cp->cache_nullslab; 2630 cp->cache_nullslab.slab_prev = &cp->cache_nullslab; 2631 2632 if (cp->cache_flags & UMF_HASH) { 2633 cp->cache_hash_table = vmem_alloc(umem_hash_arena, 2634 UMEM_HASH_INITIAL * sizeof (void *), VM_NOSLEEP); 2635 if (cp->cache_hash_table == NULL) { 2636 errno = EAGAIN; 2637 goto fail_lock; 2638 } 2639 bzero(cp->cache_hash_table, 2640 UMEM_HASH_INITIAL * sizeof (void *)); 2641 cp->cache_hash_mask = UMEM_HASH_INITIAL - 1; 2642 cp->cache_hash_shift = highbit((ulong_t)chunksize) - 1; 2643 } 2644 2645 /* 2646 * Initialize the depot. 2647 */ 2648 (void) mutex_init(&cp->cache_depot_lock, USYNC_THREAD, NULL); 2649 2650 for (mtp = umem_magtype; chunksize <= mtp->mt_minbuf; mtp++) 2651 continue; 2652 2653 cp->cache_magtype = mtp; 2654 2655 /* 2656 * Initialize the CPU layer. 2657 */ 2658 for (cpu_seqid = 0; cpu_seqid < umem_max_ncpus; cpu_seqid++) { 2659 umem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid]; 2660 (void) mutex_init(&ccp->cc_lock, USYNC_THREAD, NULL); 2661 ccp->cc_flags = cp->cache_flags; 2662 ccp->cc_rounds = -1; 2663 ccp->cc_prounds = -1; 2664 } 2665 2666 /* 2667 * Add the cache to the global list. This makes it visible 2668 * to umem_update(), so the cache must be ready for business. 2669 */ 2670 (void) mutex_lock(&umem_cache_lock); 2671 cp->cache_next = cnext = &umem_null_cache; 2672 cp->cache_prev = cprev = umem_null_cache.cache_prev; 2673 cnext->cache_prev = cp; 2674 cprev->cache_next = cp; 2675 (void) mutex_unlock(&umem_cache_lock); 2676 2677 if (umem_ready == UMEM_READY) 2678 umem_cache_magazine_enable(cp); 2679 2680 return (cp); 2681 2682 fail_lock: 2683 (void) mutex_destroy(&cp->cache_lock); 2684 fail: 2685 vmem_xfree(umem_cache_arena, cp, csize); 2686 return (NULL); 2687 } 2688 2689 void 2690 umem_cache_destroy(umem_cache_t *cp) 2691 { 2692 int cpu_seqid; 2693 2694 /* 2695 * Remove the cache from the global cache list so that no new updates 2696 * will be scheduled on its behalf, wait for any pending tasks to 2697 * complete, purge the cache, and then destroy it. 2698 */ 2699 (void) mutex_lock(&umem_cache_lock); 2700 cp->cache_prev->cache_next = cp->cache_next; 2701 cp->cache_next->cache_prev = cp->cache_prev; 2702 cp->cache_prev = cp->cache_next = NULL; 2703 (void) mutex_unlock(&umem_cache_lock); 2704 2705 umem_remove_updates(cp); 2706 2707 umem_cache_magazine_purge(cp); 2708 2709 (void) mutex_lock(&cp->cache_lock); 2710 if (cp->cache_buftotal != 0) 2711 log_message("umem_cache_destroy: '%s' (%p) not empty\n", 2712 cp->cache_name, (void *)cp); 2713 cp->cache_reclaim = NULL; 2714 /* 2715 * The cache is now dead. There should be no further activity. 2716 * We enforce this by setting land mines in the constructor and 2717 * destructor routines that induce a segmentation fault if invoked. 2718 */ 2719 cp->cache_constructor = (umem_constructor_t *)1; 2720 cp->cache_destructor = (umem_destructor_t *)2; 2721 (void) mutex_unlock(&cp->cache_lock); 2722 2723 if (cp->cache_hash_table != NULL) 2724 vmem_free(umem_hash_arena, cp->cache_hash_table, 2725 (cp->cache_hash_mask + 1) * sizeof (void *)); 2726 2727 for (cpu_seqid = 0; cpu_seqid < umem_max_ncpus; cpu_seqid++) 2728 (void) mutex_destroy(&cp->cache_cpu[cpu_seqid].cc_lock); 2729 2730 (void) mutex_destroy(&cp->cache_depot_lock); 2731 (void) mutex_destroy(&cp->cache_lock); 2732 2733 vmem_free(umem_cache_arena, cp, UMEM_CACHE_SIZE(umem_max_ncpus)); 2734 } 2735 2736 void 2737 umem_alloc_sizes_clear(void) 2738 { 2739 int i; 2740 2741 umem_alloc_sizes[0] = UMEM_MAXBUF; 2742 for (i = 1; i < NUM_ALLOC_SIZES; i++) 2743 umem_alloc_sizes[i] = 0; 2744 } 2745 2746 void 2747 umem_alloc_sizes_add(size_t size_arg) 2748 { 2749 int i, j; 2750 size_t size = size_arg; 2751 2752 if (size == 0) { 2753 log_message("size_add: cannot add zero-sized cache\n", 2754 size, UMEM_MAXBUF); 2755 return; 2756 } 2757 2758 if (size > UMEM_MAXBUF) { 2759 log_message("size_add: %ld > %d, cannot add\n", size, 2760 UMEM_MAXBUF); 2761 return; 2762 } 2763 2764 if (umem_alloc_sizes[NUM_ALLOC_SIZES - 1] != 0) { 2765 log_message("size_add: no space in alloc_table for %d\n", 2766 size); 2767 return; 2768 } 2769 2770 if (P2PHASE(size, UMEM_ALIGN) != 0) { 2771 size = P2ROUNDUP(size, UMEM_ALIGN); 2772 log_message("size_add: rounding %d up to %d\n", size_arg, 2773 size); 2774 } 2775 2776 for (i = 0; i < NUM_ALLOC_SIZES; i++) { 2777 int cur = umem_alloc_sizes[i]; 2778 if (cur == size) { 2779 log_message("size_add: %ld already in table\n", 2780 size); 2781 return; 2782 } 2783 if (cur > size) 2784 break; 2785 } 2786 2787 for (j = NUM_ALLOC_SIZES - 1; j > i; j--) 2788 umem_alloc_sizes[j] = umem_alloc_sizes[j-1]; 2789 umem_alloc_sizes[i] = size; 2790 } 2791 2792 void 2793 umem_alloc_sizes_remove(size_t size) 2794 { 2795 int i; 2796 2797 if (size == UMEM_MAXBUF) { 2798 log_message("size_remove: cannot remove %ld\n", size); 2799 return; 2800 } 2801 2802 for (i = 0; i < NUM_ALLOC_SIZES; i++) { 2803 int cur = umem_alloc_sizes[i]; 2804 if (cur == size) 2805 break; 2806 else if (cur > size || cur == 0) { 2807 log_message("size_remove: %ld not found in table\n", 2808 size); 2809 return; 2810 } 2811 } 2812 2813 for (; i + 1 < NUM_ALLOC_SIZES; i++) 2814 umem_alloc_sizes[i] = umem_alloc_sizes[i+1]; 2815 umem_alloc_sizes[i] = 0; 2816 } 2817 2818 static int 2819 umem_cache_init(void) 2820 { 2821 int i; 2822 size_t size, max_size; 2823 umem_cache_t *cp; 2824 umem_magtype_t *mtp; 2825 char name[UMEM_CACHE_NAMELEN + 1]; 2826 umem_cache_t *umem_alloc_caches[NUM_ALLOC_SIZES]; 2827 2828 for (i = 0; i < sizeof (umem_magtype) / sizeof (*mtp); i++) { 2829 mtp = &umem_magtype[i]; 2830 (void) snprintf(name, sizeof (name), "umem_magazine_%d", 2831 mtp->mt_magsize); 2832 mtp->mt_cache = umem_cache_create(name, 2833 (mtp->mt_magsize + 1) * sizeof (void *), 2834 mtp->mt_align, NULL, NULL, NULL, NULL, 2835 umem_internal_arena, UMC_NOHASH | UMC_INTERNAL); 2836 if (mtp->mt_cache == NULL) 2837 return (0); 2838 } 2839 2840 umem_slab_cache = umem_cache_create("umem_slab_cache", 2841 sizeof (umem_slab_t), 0, NULL, NULL, NULL, NULL, 2842 umem_internal_arena, UMC_NOHASH | UMC_INTERNAL); 2843 2844 if (umem_slab_cache == NULL) 2845 return (0); 2846 2847 umem_bufctl_cache = umem_cache_create("umem_bufctl_cache", 2848 sizeof (umem_bufctl_t), 0, NULL, NULL, NULL, NULL, 2849 umem_internal_arena, UMC_NOHASH | UMC_INTERNAL); 2850 2851 if (umem_bufctl_cache == NULL) 2852 return (0); 2853 2854 /* 2855 * The size of the umem_bufctl_audit structure depends upon 2856 * umem_stack_depth. See umem_impl.h for details on the size 2857 * restrictions. 2858 */ 2859 2860 size = UMEM_BUFCTL_AUDIT_SIZE_DEPTH(umem_stack_depth); 2861 max_size = UMEM_BUFCTL_AUDIT_MAX_SIZE; 2862 2863 if (size > max_size) { /* too large -- truncate */ 2864 int max_frames = UMEM_MAX_STACK_DEPTH; 2865 2866 ASSERT(UMEM_BUFCTL_AUDIT_SIZE_DEPTH(max_frames) <= max_size); 2867 2868 umem_stack_depth = max_frames; 2869 size = UMEM_BUFCTL_AUDIT_SIZE_DEPTH(umem_stack_depth); 2870 } 2871 2872 umem_bufctl_audit_cache = umem_cache_create("umem_bufctl_audit_cache", 2873 size, 0, NULL, NULL, NULL, NULL, umem_internal_arena, 2874 UMC_NOHASH | UMC_INTERNAL); 2875 2876 if (umem_bufctl_audit_cache == NULL) 2877 return (0); 2878 2879 if (vmem_backend & VMEM_BACKEND_MMAP) 2880 umem_va_arena = vmem_create("umem_va", 2881 NULL, 0, pagesize, 2882 vmem_alloc, vmem_free, heap_arena, 2883 8 * pagesize, VM_NOSLEEP); 2884 else 2885 umem_va_arena = heap_arena; 2886 2887 if (umem_va_arena == NULL) 2888 return (0); 2889 2890 umem_default_arena = vmem_create("umem_default", 2891 NULL, 0, pagesize, 2892 heap_alloc, heap_free, umem_va_arena, 2893 0, VM_NOSLEEP); 2894 2895 if (umem_default_arena == NULL) 2896 return (0); 2897 2898 /* 2899 * make sure the umem_alloc table initializer is correct 2900 */ 2901 i = sizeof (umem_alloc_table) / sizeof (*umem_alloc_table); 2902 ASSERT(umem_alloc_table[i - 1] == &umem_null_cache); 2903 2904 /* 2905 * Create the default caches to back umem_alloc() 2906 */ 2907 for (i = 0; i < NUM_ALLOC_SIZES; i++) { 2908 size_t cache_size = umem_alloc_sizes[i]; 2909 size_t align = 0; 2910 2911 if (cache_size == 0) 2912 break; /* 0 terminates the list */ 2913 2914 /* 2915 * If they allocate a multiple of the coherency granularity, 2916 * they get a coherency-granularity-aligned address. 2917 */ 2918 if (IS_P2ALIGNED(cache_size, 64)) 2919 align = 64; 2920 if (IS_P2ALIGNED(cache_size, pagesize)) 2921 align = pagesize; 2922 (void) snprintf(name, sizeof (name), "umem_alloc_%lu", 2923 (long)cache_size); 2924 2925 cp = umem_cache_create(name, cache_size, align, 2926 NULL, NULL, NULL, NULL, NULL, UMC_INTERNAL); 2927 if (cp == NULL) 2928 return (0); 2929 2930 umem_alloc_caches[i] = cp; 2931 } 2932 2933 /* 2934 * Initialization cannot fail at this point. Make the caches 2935 * visible to umem_alloc() and friends. 2936 */ 2937 size = UMEM_ALIGN; 2938 for (i = 0; i < NUM_ALLOC_SIZES; i++) { 2939 size_t cache_size = umem_alloc_sizes[i]; 2940 2941 if (cache_size == 0) 2942 break; /* 0 terminates the list */ 2943 2944 cp = umem_alloc_caches[i]; 2945 2946 while (size <= cache_size) { 2947 umem_alloc_table[(size - 1) >> UMEM_ALIGN_SHIFT] = cp; 2948 size += UMEM_ALIGN; 2949 } 2950 } 2951 ASSERT(size - UMEM_ALIGN == UMEM_MAXBUF); 2952 return (1); 2953 } 2954 2955 /* 2956 * umem_startup() is called early on, and must be called explicitly if we're 2957 * the standalone version. 2958 */ 2959 #ifdef UMEM_STANDALONE 2960 void 2961 #else 2962 #pragma init(umem_startup) 2963 static void 2964 #endif 2965 umem_startup(caddr_t start, size_t len, size_t pagesize, caddr_t minstack, 2966 caddr_t maxstack) 2967 { 2968 #ifdef UMEM_STANDALONE 2969 int idx; 2970 /* Standalone doesn't fork */ 2971 #else 2972 umem_forkhandler_init(); /* register the fork handler */ 2973 #endif 2974 2975 #ifdef __lint 2976 /* make lint happy */ 2977 minstack = maxstack; 2978 #endif 2979 2980 #ifdef UMEM_STANDALONE 2981 umem_ready = UMEM_READY_STARTUP; 2982 umem_init_env_ready = 0; 2983 2984 umem_min_stack = minstack; 2985 umem_max_stack = maxstack; 2986 2987 nofail_callback = NULL; 2988 umem_slab_cache = NULL; 2989 umem_bufctl_cache = NULL; 2990 umem_bufctl_audit_cache = NULL; 2991 heap_arena = NULL; 2992 heap_alloc = NULL; 2993 heap_free = NULL; 2994 umem_internal_arena = NULL; 2995 umem_cache_arena = NULL; 2996 umem_hash_arena = NULL; 2997 umem_log_arena = NULL; 2998 umem_oversize_arena = NULL; 2999 umem_va_arena = NULL; 3000 umem_default_arena = NULL; 3001 umem_firewall_va_arena = NULL; 3002 umem_firewall_arena = NULL; 3003 umem_memalign_arena = NULL; 3004 umem_transaction_log = NULL; 3005 umem_content_log = NULL; 3006 umem_failure_log = NULL; 3007 umem_slab_log = NULL; 3008 umem_cpu_mask = 0; 3009 3010 umem_cpus = &umem_startup_cpu; 3011 umem_startup_cpu.cpu_cache_offset = UMEM_CACHE_SIZE(0); 3012 umem_startup_cpu.cpu_number = 0; 3013 3014 bcopy(&umem_null_cache_template, &umem_null_cache, 3015 sizeof (umem_cache_t)); 3016 3017 for (idx = 0; idx < (UMEM_MAXBUF >> UMEM_ALIGN_SHIFT); idx++) 3018 umem_alloc_table[idx] = &umem_null_cache; 3019 #endif 3020 3021 /* 3022 * Perform initialization specific to the way we've been compiled 3023 * (library or standalone) 3024 */ 3025 umem_type_init(start, len, pagesize); 3026 3027 vmem_startup(); 3028 } 3029 3030 int 3031 umem_init(void) 3032 { 3033 size_t maxverify, minfirewall; 3034 size_t size; 3035 int idx; 3036 umem_cpu_t *new_cpus; 3037 3038 vmem_t *memalign_arena, *oversize_arena; 3039 3040 if (thr_self() != umem_init_thr) { 3041 /* 3042 * The usual case -- non-recursive invocation of umem_init(). 3043 */ 3044 (void) mutex_lock(&umem_init_lock); 3045 if (umem_ready != UMEM_READY_STARTUP) { 3046 /* 3047 * someone else beat us to initializing umem. Wait 3048 * for them to complete, then return. 3049 */ 3050 while (umem_ready == UMEM_READY_INITING) 3051 (void) _cond_wait(&umem_init_cv, 3052 &umem_init_lock); 3053 ASSERT(umem_ready == UMEM_READY || 3054 umem_ready == UMEM_READY_INIT_FAILED); 3055 (void) mutex_unlock(&umem_init_lock); 3056 return (umem_ready == UMEM_READY); 3057 } 3058 3059 ASSERT(umem_ready == UMEM_READY_STARTUP); 3060 ASSERT(umem_init_env_ready == 0); 3061 3062 umem_ready = UMEM_READY_INITING; 3063 umem_init_thr = thr_self(); 3064 3065 (void) mutex_unlock(&umem_init_lock); 3066 umem_setup_envvars(0); /* can recurse -- see below */ 3067 if (umem_init_env_ready) { 3068 /* 3069 * initialization was completed already 3070 */ 3071 ASSERT(umem_ready == UMEM_READY || 3072 umem_ready == UMEM_READY_INIT_FAILED); 3073 ASSERT(umem_init_thr == 0); 3074 return (umem_ready == UMEM_READY); 3075 } 3076 } else if (!umem_init_env_ready) { 3077 /* 3078 * The umem_setup_envvars() call (above) makes calls into 3079 * the dynamic linker and directly into user-supplied code. 3080 * Since we cannot know what that code will do, we could be 3081 * recursively invoked (by, say, a malloc() call in the code 3082 * itself, or in a (C++) _init section it causes to be fired). 3083 * 3084 * This code is where we end up if such recursion occurs. We 3085 * first clean up any partial results in the envvar code, then 3086 * proceed to finish initialization processing in the recursive 3087 * call. The original call will notice this, and return 3088 * immediately. 3089 */ 3090 umem_setup_envvars(1); /* clean up any partial state */ 3091 } else { 3092 umem_panic( 3093 "recursive allocation while initializing umem\n"); 3094 } 3095 umem_init_env_ready = 1; 3096 3097 /* 3098 * From this point until we finish, recursion into umem_init() will 3099 * cause a umem_panic(). 3100 */ 3101 maxverify = minfirewall = ULONG_MAX; 3102 3103 /* LINTED constant condition */ 3104 if (sizeof (umem_cpu_cache_t) != UMEM_CPU_CACHE_SIZE) { 3105 umem_panic("sizeof (umem_cpu_cache_t) = %d, should be %d\n", 3106 sizeof (umem_cpu_cache_t), UMEM_CPU_CACHE_SIZE); 3107 } 3108 3109 umem_max_ncpus = umem_get_max_ncpus(); 3110 3111 /* 3112 * load tunables from environment 3113 */ 3114 umem_process_envvars(); 3115 3116 if (issetugid()) 3117 umem_mtbf = 0; 3118 3119 /* 3120 * set up vmem 3121 */ 3122 if (!(umem_flags & UMF_AUDIT)) 3123 vmem_no_debug(); 3124 3125 heap_arena = vmem_heap_arena(&heap_alloc, &heap_free); 3126 3127 pagesize = heap_arena->vm_quantum; 3128 3129 umem_internal_arena = vmem_create("umem_internal", NULL, 0, pagesize, 3130 heap_alloc, heap_free, heap_arena, 0, VM_NOSLEEP); 3131 3132 umem_default_arena = umem_internal_arena; 3133 3134 if (umem_internal_arena == NULL) 3135 goto fail; 3136 3137 umem_cache_arena = vmem_create("umem_cache", NULL, 0, UMEM_ALIGN, 3138 vmem_alloc, vmem_free, umem_internal_arena, 0, VM_NOSLEEP); 3139 3140 umem_hash_arena = vmem_create("umem_hash", NULL, 0, UMEM_ALIGN, 3141 vmem_alloc, vmem_free, umem_internal_arena, 0, VM_NOSLEEP); 3142 3143 umem_log_arena = vmem_create("umem_log", NULL, 0, UMEM_ALIGN, 3144 heap_alloc, heap_free, heap_arena, 0, VM_NOSLEEP); 3145 3146 umem_firewall_va_arena = vmem_create("umem_firewall_va", 3147 NULL, 0, pagesize, 3148 umem_firewall_va_alloc, umem_firewall_va_free, heap_arena, 3149 0, VM_NOSLEEP); 3150 3151 if (umem_cache_arena == NULL || umem_hash_arena == NULL || 3152 umem_log_arena == NULL || umem_firewall_va_arena == NULL) 3153 goto fail; 3154 3155 umem_firewall_arena = vmem_create("umem_firewall", NULL, 0, pagesize, 3156 heap_alloc, heap_free, umem_firewall_va_arena, 0, 3157 VM_NOSLEEP); 3158 3159 if (umem_firewall_arena == NULL) 3160 goto fail; 3161 3162 oversize_arena = vmem_create("umem_oversize", NULL, 0, pagesize, 3163 heap_alloc, heap_free, minfirewall < ULONG_MAX ? 3164 umem_firewall_va_arena : heap_arena, 0, VM_NOSLEEP); 3165 3166 memalign_arena = vmem_create("umem_memalign", NULL, 0, UMEM_ALIGN, 3167 heap_alloc, heap_free, minfirewall < ULONG_MAX ? 3168 umem_firewall_va_arena : heap_arena, 0, VM_NOSLEEP); 3169 3170 if (oversize_arena == NULL || memalign_arena == NULL) 3171 goto fail; 3172 3173 if (umem_max_ncpus > CPUHINT_MAX()) 3174 umem_max_ncpus = CPUHINT_MAX(); 3175 3176 while ((umem_max_ncpus & (umem_max_ncpus - 1)) != 0) 3177 umem_max_ncpus++; 3178 3179 if (umem_max_ncpus == 0) 3180 umem_max_ncpus = 1; 3181 3182 size = umem_max_ncpus * sizeof (umem_cpu_t); 3183 new_cpus = vmem_alloc(umem_internal_arena, size, VM_NOSLEEP); 3184 if (new_cpus == NULL) 3185 goto fail; 3186 3187 bzero(new_cpus, size); 3188 for (idx = 0; idx < umem_max_ncpus; idx++) { 3189 new_cpus[idx].cpu_number = idx; 3190 new_cpus[idx].cpu_cache_offset = UMEM_CACHE_SIZE(idx); 3191 } 3192 umem_cpus = new_cpus; 3193 umem_cpu_mask = (umem_max_ncpus - 1); 3194 3195 if (umem_maxverify == 0) 3196 umem_maxverify = maxverify; 3197 3198 if (umem_minfirewall == 0) 3199 umem_minfirewall = minfirewall; 3200 3201 /* 3202 * Set up updating and reaping 3203 */ 3204 umem_reap_next = gethrtime() + NANOSEC; 3205 3206 #ifndef UMEM_STANDALONE 3207 (void) gettimeofday(&umem_update_next, NULL); 3208 #endif 3209 3210 /* 3211 * Set up logging -- failure here is okay, since it will just disable 3212 * the logs 3213 */ 3214 if (umem_logging) { 3215 umem_transaction_log = umem_log_init(umem_transaction_log_size); 3216 umem_content_log = umem_log_init(umem_content_log_size); 3217 umem_failure_log = umem_log_init(umem_failure_log_size); 3218 umem_slab_log = umem_log_init(umem_slab_log_size); 3219 } 3220 3221 /* 3222 * Set up caches -- if successful, initialization cannot fail, since 3223 * allocations from other threads can now succeed. 3224 */ 3225 if (umem_cache_init() == 0) { 3226 log_message("unable to create initial caches\n"); 3227 goto fail; 3228 } 3229 umem_oversize_arena = oversize_arena; 3230 umem_memalign_arena = memalign_arena; 3231 3232 umem_cache_applyall(umem_cache_magazine_enable); 3233 3234 /* 3235 * initialization done, ready to go 3236 */ 3237 (void) mutex_lock(&umem_init_lock); 3238 umem_ready = UMEM_READY; 3239 umem_init_thr = 0; 3240 (void) cond_broadcast(&umem_init_cv); 3241 (void) mutex_unlock(&umem_init_lock); 3242 return (1); 3243 3244 fail: 3245 log_message("umem initialization failed\n"); 3246 3247 (void) mutex_lock(&umem_init_lock); 3248 umem_ready = UMEM_READY_INIT_FAILED; 3249 umem_init_thr = 0; 3250 (void) cond_broadcast(&umem_init_cv); 3251 (void) mutex_unlock(&umem_init_lock); 3252 return (0); 3253 } 3254