1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * Copyright (c) 2014 Joyent, Inc. All rights reserved. 29 */ 30 31 /* 32 * based on usr/src/uts/common/os/kmem.c r1.64 from 2001/12/18 33 * 34 * The slab allocator, as described in the following two papers: 35 * 36 * Jeff Bonwick, 37 * The Slab Allocator: An Object-Caching Kernel Memory Allocator. 38 * Proceedings of the Summer 1994 Usenix Conference. 39 * Available as /shared/sac/PSARC/1994/028/materials/kmem.pdf. 40 * 41 * Jeff Bonwick and Jonathan Adams, 42 * Magazines and vmem: Extending the Slab Allocator to Many CPUs and 43 * Arbitrary Resources. 44 * Proceedings of the 2001 Usenix Conference. 45 * Available as /shared/sac/PSARC/2000/550/materials/vmem.pdf. 46 * 47 * 1. Overview 48 * ----------- 49 * umem is very close to kmem in implementation. There are seven major 50 * areas of divergence: 51 * 52 * * Initialization 53 * 54 * * CPU handling 55 * 56 * * umem_update() 57 * 58 * * KM_SLEEP v.s. UMEM_NOFAIL 59 * 60 * * lock ordering 61 * 62 * * changing UMEM_MAXBUF 63 * 64 * * Per-thread caching for malloc/free 65 * 66 * 2. Initialization 67 * ----------------- 68 * kmem is initialized early on in boot, and knows that no one will call 69 * into it before it is ready. umem does not have these luxuries. Instead, 70 * initialization is divided into two phases: 71 * 72 * * library initialization, and 73 * 74 * * first use 75 * 76 * umem's full initialization happens at the time of the first allocation 77 * request (via malloc() and friends, umem_alloc(), or umem_zalloc()), 78 * or the first call to umem_cache_create(). 79 * 80 * umem_free(), and umem_cache_alloc() do not require special handling, 81 * since the only way to get valid arguments for them is to successfully 82 * call a function from the first group. 83 * 84 * 2.1. Library Initialization: umem_startup() 85 * ------------------------------------------- 86 * umem_startup() is libumem.so's .init section. It calls pthread_atfork() 87 * to install the handlers necessary for umem's Fork1-Safety. Because of 88 * race condition issues, all other pre-umem_init() initialization is done 89 * statically (i.e. by the dynamic linker). 90 * 91 * For standalone use, umem_startup() returns everything to its initial 92 * state. 93 * 94 * 2.2. First use: umem_init() 95 * ------------------------------ 96 * The first time any memory allocation function is used, we have to 97 * create the backing caches and vmem arenas which are needed for it. 98 * umem_init() is the central point for that task. When it completes, 99 * umem_ready is either UMEM_READY (all set) or UMEM_READY_INIT_FAILED (unable 100 * to initialize, probably due to lack of memory). 101 * 102 * There are four different paths from which umem_init() is called: 103 * 104 * * from umem_alloc() or umem_zalloc(), with 0 < size < UMEM_MAXBUF, 105 * 106 * * from umem_alloc() or umem_zalloc(), with size > UMEM_MAXBUF, 107 * 108 * * from umem_cache_create(), and 109 * 110 * * from memalign(), with align > UMEM_ALIGN. 111 * 112 * The last three just check if umem is initialized, and call umem_init() 113 * if it is not. For performance reasons, the first case is more complicated. 114 * 115 * 2.2.1. umem_alloc()/umem_zalloc(), with 0 < size < UMEM_MAXBUF 116 * ----------------------------------------------------------------- 117 * In this case, umem_cache_alloc(&umem_null_cache, ...) is called. 118 * There is special case code in which causes any allocation on 119 * &umem_null_cache to fail by returning (NULL), regardless of the 120 * flags argument. 121 * 122 * So umem_cache_alloc() returns NULL, and umem_alloc()/umem_zalloc() call 123 * umem_alloc_retry(). umem_alloc_retry() sees that the allocation 124 * was agains &umem_null_cache, and calls umem_init(). 125 * 126 * If initialization is successful, umem_alloc_retry() returns 1, which 127 * causes umem_alloc()/umem_zalloc() to start over, which causes it to load 128 * the (now valid) cache pointer from umem_alloc_table. 129 * 130 * 2.2.2. Dealing with race conditions 131 * ----------------------------------- 132 * There are a couple race conditions resulting from the initialization 133 * code that we have to guard against: 134 * 135 * * In umem_cache_create(), there is a special UMC_INTERNAL cflag 136 * that is passed for caches created during initialization. It 137 * is illegal for a user to try to create a UMC_INTERNAL cache. 138 * This allows initialization to proceed, but any other 139 * umem_cache_create()s will block by calling umem_init(). 140 * 141 * * Since umem_null_cache has a 1-element cache_cpu, it's cache_cpu_mask 142 * is always zero. umem_cache_alloc uses cp->cache_cpu_mask to 143 * mask the cpu number. This prevents a race between grabbing a 144 * cache pointer out of umem_alloc_table and growing the cpu array. 145 * 146 * 147 * 3. CPU handling 148 * --------------- 149 * kmem uses the CPU's sequence number to determine which "cpu cache" to 150 * use for an allocation. Currently, there is no way to get the sequence 151 * number in userspace. 152 * 153 * umem keeps track of cpu information in umem_cpus, an array of umem_max_ncpus 154 * umem_cpu_t structures. CURCPU() is a a "hint" function, which we then mask 155 * with either umem_cpu_mask or cp->cache_cpu_mask to find the actual "cpu" id. 156 * The mechanics of this is all in the CPU(mask) macro. 157 * 158 * Currently, umem uses _lwp_self() as its hint. 159 * 160 * 161 * 4. The update thread 162 * -------------------- 163 * kmem uses a task queue, kmem_taskq, to do periodic maintenance on 164 * every kmem cache. vmem has a periodic timeout for hash table resizing. 165 * The kmem_taskq also provides a separate context for kmem_cache_reap()'s 166 * to be done in, avoiding issues of the context of kmem_reap() callers. 167 * 168 * Instead, umem has the concept of "updates", which are asynchronous requests 169 * for work attached to single caches. All caches with pending work are 170 * on a doubly linked list rooted at the umem_null_cache. All update state 171 * is protected by the umem_update_lock mutex, and the umem_update_cv is used 172 * for notification between threads. 173 * 174 * 4.1. Cache states with regards to updates 175 * ----------------------------------------- 176 * A given cache is in one of three states: 177 * 178 * Inactive cache_uflags is zero, cache_u{next,prev} are NULL 179 * 180 * Work Requested cache_uflags is non-zero (but UMU_ACTIVE is not set), 181 * cache_u{next,prev} link the cache onto the global 182 * update list 183 * 184 * Active cache_uflags has UMU_ACTIVE set, cache_u{next,prev} 185 * are NULL, and either umem_update_thr or 186 * umem_st_update_thr are actively doing work on the 187 * cache. 188 * 189 * An update can be added to any cache in any state -- if the cache is 190 * Inactive, it transitions to being Work Requested. If the cache is 191 * Active, the worker will notice the new update and act on it before 192 * transitioning the cache to the Inactive state. 193 * 194 * If a cache is in the Active state, UMU_NOTIFY can be set, which asks 195 * the worker to broadcast the umem_update_cv when it has finished. 196 * 197 * 4.2. Update interface 198 * --------------------- 199 * umem_add_update() adds an update to a particular cache. 200 * umem_updateall() adds an update to all caches. 201 * umem_remove_updates() returns a cache to the Inactive state. 202 * 203 * umem_process_updates() process all caches in the Work Requested state. 204 * 205 * 4.3. Reaping 206 * ------------ 207 * When umem_reap() is called (at the time of heap growth), it schedule 208 * UMU_REAP updates on every cache. It then checks to see if the update 209 * thread exists (umem_update_thr != 0). If it is, it broadcasts 210 * the umem_update_cv to wake the update thread up, and returns. 211 * 212 * If the update thread does not exist (umem_update_thr == 0), and the 213 * program currently has multiple threads, umem_reap() attempts to create 214 * a new update thread. 215 * 216 * If the process is not multithreaded, or the creation fails, umem_reap() 217 * calls umem_st_update() to do an inline update. 218 * 219 * 4.4. The update thread 220 * ---------------------- 221 * The update thread spends most of its time in cond_timedwait() on the 222 * umem_update_cv. It wakes up under two conditions: 223 * 224 * * The timedwait times out, in which case it needs to run a global 225 * update, or 226 * 227 * * someone cond_broadcast(3THR)s the umem_update_cv, in which case 228 * it needs to check if there are any caches in the Work Requested 229 * state. 230 * 231 * When it is time for another global update, umem calls umem_cache_update() 232 * on every cache, then calls vmem_update(), which tunes the vmem structures. 233 * umem_cache_update() can request further work using umem_add_update(). 234 * 235 * After any work from the global update completes, the update timer is 236 * reset to umem_reap_interval seconds in the future. This makes the 237 * updates self-throttling. 238 * 239 * Reaps are similarly self-throttling. After a UMU_REAP update has 240 * been scheduled on all caches, umem_reap() sets a flag and wakes up the 241 * update thread. The update thread notices the flag, and resets the 242 * reap state. 243 * 244 * 4.5. Inline updates 245 * ------------------- 246 * If the update thread is not running, umem_st_update() is used instead. It 247 * immediately does a global update (as above), then calls 248 * umem_process_updates() to process both the reaps that umem_reap() added and 249 * any work generated by the global update. Afterwards, it resets the reap 250 * state. 251 * 252 * While the umem_st_update() is running, umem_st_update_thr holds the thread 253 * id of the thread performing the update. 254 * 255 * 4.6. Updates and fork1() 256 * ------------------------ 257 * umem has fork1() pre- and post-handlers which lock up (and release) every 258 * mutex in every cache. They also lock up the umem_update_lock. Since 259 * fork1() only copies over a single lwp, other threads (including the update 260 * thread) could have been actively using a cache in the parent. This 261 * can lead to inconsistencies in the child process. 262 * 263 * Because we locked all of the mutexes, the only possible inconsistancies are: 264 * 265 * * a umem_cache_alloc() could leak its buffer. 266 * 267 * * a caller of umem_depot_alloc() could leak a magazine, and all the 268 * buffers contained in it. 269 * 270 * * a cache could be in the Active update state. In the child, there 271 * would be no thread actually working on it. 272 * 273 * * a umem_hash_rescale() could leak the new hash table. 274 * 275 * * a umem_magazine_resize() could be in progress. 276 * 277 * * a umem_reap() could be in progress. 278 * 279 * The memory leaks we can't do anything about. umem_release_child() resets 280 * the update state, moves any caches in the Active state to the Work Requested 281 * state. This might cause some updates to be re-run, but UMU_REAP and 282 * UMU_HASH_RESCALE are effectively idempotent, and the worst that can 283 * happen from umem_magazine_resize() is resizing the magazine twice in close 284 * succession. 285 * 286 * Much of the cleanup in umem_release_child() is skipped if 287 * umem_st_update_thr == thr_self(). This is so that applications which call 288 * fork1() from a cache callback does not break. Needless to say, any such 289 * application is tremendously broken. 290 * 291 * 292 * 5. KM_SLEEP v.s. UMEM_NOFAIL 293 * ---------------------------- 294 * Allocations against kmem and vmem have two basic modes: SLEEP and 295 * NOSLEEP. A sleeping allocation is will go to sleep (waiting for 296 * more memory) instead of failing (returning NULL). 297 * 298 * SLEEP allocations presume an extremely multithreaded model, with 299 * a lot of allocation and deallocation activity. umem cannot presume 300 * that its clients have any particular type of behavior. Instead, 301 * it provides two types of allocations: 302 * 303 * * UMEM_DEFAULT, equivalent to KM_NOSLEEP (i.e. return NULL on 304 * failure) 305 * 306 * * UMEM_NOFAIL, which, on failure, calls an optional callback 307 * (registered with umem_nofail_callback()). 308 * 309 * The callback is invoked with no locks held, and can do an arbitrary 310 * amount of work. It then has a choice between: 311 * 312 * * Returning UMEM_CALLBACK_RETRY, which will cause the allocation 313 * to be restarted. 314 * 315 * * Returning UMEM_CALLBACK_EXIT(status), which will cause exit(2) 316 * to be invoked with status. If multiple threads attempt to do 317 * this simultaneously, only one will call exit(2). 318 * 319 * * Doing some kind of non-local exit (thr_exit(3thr), longjmp(3C), 320 * etc.) 321 * 322 * The default callback returns UMEM_CALLBACK_EXIT(255). 323 * 324 * To have these callbacks without risk of state corruption (in the case of 325 * a non-local exit), we have to ensure that the callbacks get invoked 326 * close to the original allocation, with no inconsistent state or held 327 * locks. The following steps are taken: 328 * 329 * * All invocations of vmem are VM_NOSLEEP. 330 * 331 * * All constructor callbacks (which can themselves to allocations) 332 * are passed UMEM_DEFAULT as their required allocation argument. This 333 * way, the constructor will fail, allowing the highest-level allocation 334 * invoke the nofail callback. 335 * 336 * If a constructor callback _does_ do a UMEM_NOFAIL allocation, and 337 * the nofail callback does a non-local exit, we will leak the 338 * partially-constructed buffer. 339 * 340 * 341 * 6. Lock Ordering 342 * ---------------- 343 * umem has a few more locks than kmem does, mostly in the update path. The 344 * overall lock ordering (earlier locks must be acquired first) is: 345 * 346 * umem_init_lock 347 * 348 * vmem_list_lock 349 * vmem_nosleep_lock.vmpl_mutex 350 * vmem_t's: 351 * vm_lock 352 * sbrk_lock 353 * 354 * umem_cache_lock 355 * umem_update_lock 356 * umem_flags_lock 357 * umem_cache_t's: 358 * cache_cpu[*].cc_lock 359 * cache_depot_lock 360 * cache_lock 361 * umem_log_header_t's: 362 * lh_cpu[*].clh_lock 363 * lh_lock 364 * 365 * 7. Changing UMEM_MAXBUF 366 * ----------------------- 367 * 368 * When changing UMEM_MAXBUF extra care has to be taken. It is not sufficient to 369 * simply increase this number. First, one must update the umem_alloc_table to 370 * have the appropriate number of entires based upon the new size. If this is 371 * not done, this will lead to libumem blowing an assertion. 372 * 373 * The second place to update, which is not required, is the umem_alloc_sizes. 374 * These determine the default cache sizes that we're going to support. 375 * 376 * 8. Per-thread caching for malloc/free 377 * ------------------------------------- 378 * 379 * "Time is an illusion. Lunchtime doubly so." -- Douglas Adams 380 * 381 * Time may be an illusion, but CPU cycles aren't. While libumem is designed 382 * to be a highly scalable allocator, that scalability comes with a fixed cycle 383 * penalty even in the absence of contention: libumem must acquire (and release 384 * a per-CPU lock for each allocation. When contention is low and malloc(3C) 385 * frequency is high, this overhead can dominate execution time. To alleviate 386 * this, we allow for per-thread caching, a lock-free means of caching recent 387 * deallocations on a per-thread basis for use in satisfying subsequent calls 388 * 389 * In addition to improving performance, we also want to: 390 * * Minimize fragmentation 391 * * Not add additional memory overhead (no larger malloc tags) 392 * 393 * In the ulwp_t of each thread there is a private data structure called a 394 * umem_t that looks like: 395 * 396 * typedef struct { 397 * size_t tm_size; 398 * void *tm_roots[NTMEMBASE]; (Currently 16) 399 * } tmem_t; 400 * 401 * Each of the roots is treated as the head of a linked list. Each entry in the 402 * list can be thought of as a void ** which points to the next entry, until one 403 * of them points to NULL. If the head points to NULL, the list is empty. 404 * 405 * Each head corresponds to a umem_cache. Currently there is a linear mapping 406 * where the first root corresponds to the first cache, second root to the 407 * second cache, etc. This works because every allocation that malloc makes to 408 * umem_alloc that can be satisified by a umem_cache will actually return a 409 * number of bytes equal to the size of that cache. Because of this property and 410 * a one to one mapping between caches and roots we can guarantee that every 411 * entry in a given root's list will be able to satisfy the same requests as the 412 * corresponding cache. 413 * 414 * The choice of sixteen roots is based on where we believe we get the biggest 415 * bang for our buck. The per-thread caches will cache up to 256 byte and 448 416 * byte allocations on ILP32 and LP64 respectively. Generally applications plan 417 * more carefully how they do larger allocations than smaller ones. Therefore 418 * sixteen roots is a reasonable compromise between the amount of additional 419 * overhead per thread, and the likelihood of a program to benefit from it. 420 * 421 * The maximum amount of memory that can be cached in each thread is determined 422 * by the perthread_cache UMEM_OPTION. It corresponds to the umem_ptc_size 423 * value. The default value for this is currently 1 MB. Once umem_init() has 424 * finished this cannot be directly tuned without directly modifying the 425 * instruction text. If, upon calling free(3C), the amount cached would exceed 426 * this maximum, we instead actually return the buffer to the umem_cache instead 427 * of holding onto it in the thread. 428 * 429 * When a thread calls malloc(3C) it first determines which umem_cache it 430 * would be serviced by. If the allocation is not covered by ptcumem it goes to 431 * the normal malloc instead. Next, it checks if the tmem_root's list is empty 432 * or not. If it is empty, we instead go and allocate the memory from 433 * umem_alloc. If it is not empty, we remove the head of the list, set the 434 * appropriate malloc tags, and return that buffer. 435 * 436 * When a thread calls free(3C) it first looks at the malloc tag and if it is 437 * invalid or the allocation exceeds the largest cache in ptcumem and sends it 438 * off to the original free() to handle and clean up appropriately. Next, it 439 * checks if the allocation size is covered by one of the per-thread roots and 440 * if it isn't, it passes it off to the original free() to be released. Finally, 441 * before it inserts this buffer as the head, it checks if adding this buffer 442 * would put the thread over its maximum cache size. If it would, it frees the 443 * buffer back to the umem_cache. Otherwise it increments the threads total 444 * cached amount and makes the buffer the new head of the appropriate tm_root. 445 * 446 * When a thread exits, all of the buffers that it has in its per-thread cache 447 * will be passed to umem_free() and returned to the appropriate umem_cache. 448 * 449 * 8.1 Handling addition and removal of umem_caches 450 * ------------------------------------------------ 451 * 452 * The set of umem_caches that are used to back calls to umem_alloc() and 453 * ultimately malloc() are determined at program execution time. The default set 454 * of caches is defined below in umem_alloc_sizes[]. Various umem_options exist 455 * that modify the set of caches: size_add, size_clear, and size_remove. Because 456 * the set of caches can only be determined once umem_init() has been called and 457 * we have the additional goals of minimizing additional fragmentation and 458 * metadata space overhead in the malloc tags, this forces our hand to go down a 459 * slightly different path: the one tread by fasttrap and trapstat. 460 * 461 * During umem_init we're going to dynamically construct a new version of 462 * malloc(3C) and free(3C) that utilizes the known cache sizes and then ensure 463 * that ptcmalloc and ptcfree replace malloc and free as entries in the plt. If 464 * ptcmalloc and ptcfree cannot handle a request, they simply jump to the 465 * original libumem implementations. 466 * 467 * After creating all of the umem_caches, but before making them visible, 468 * umem_cache_init checks that umem_genasm_supported is non-zero. This value is 469 * set by each architecture in $ARCH/umem_genasm.c to indicate whether or not 470 * they support this. If the value is zero, then this process is skipped. 471 * Similarly, if the cache size has been tuned to zero by UMEM_OPTIONS, then 472 * this is also skipped. 473 * 474 * In umem_genasm.c, each architecture's implementation implements a single 475 * function called umem_genasm() that is responsible for generating the 476 * appropriate versions of ptcmalloc() and ptcfree(), placing them in the 477 * appropriate memory location, and finally doing the switch from malloc() and 478 * free() to ptcmalloc() and ptcfree(). Once the change has been made, there is 479 * no way to switch back, short of restarting the program or modifying program 480 * text with mdb. 481 * 482 * 8.2 Modifying the Procedure Linkage Table (PLT) 483 * ----------------------------------------------- 484 * 485 * The last piece of this puzzle is how we actually jam ptcmalloc() into the 486 * PLT. To handle this, we have defined two functions, _malloc and _free and 487 * used a special mapfile directive to place them into the a readable, 488 * writeable, and executable segment. Next we use a standard #pragma weak for 489 * malloc and free and direct them to those symbols. By default, those symbols 490 * have text defined as nops for our generated functions and when they're 491 * invoked, they jump to the default malloc and free functions. 492 * 493 * When umem_genasm() is called, it goes through and generates new malloc() and 494 * free() functions in the text provided for by _malloc and _free just after the 495 * jump. Once both have been successfully generated, umem_genasm() nops over the 496 * original jump so that we now call into the genasm versions of these 497 * functions. 498 * 499 * 8.3 umem_genasm() 500 * ----------------- 501 * 502 * umem_genasm() is currently implemented for i386 and amd64. This section 503 * describes the theory behind the construction. For specific byte code to 504 * assembly instructions and niceish C and asm versions of ptcmalloc and 505 * ptcfree, see the individual umem_genasm.c files. The layout consists of the 506 * following sections: 507 * 508 * o. function-specfic prologue 509 * o. function-generic cache-selecting elements 510 * o. function-specific epilogue 511 * 512 * There are three different generic cache elements that exist: 513 * 514 * o. the last or only cache 515 * o. the intermediary caches if more than two 516 * o. the first one if more than one cache 517 * 518 * The malloc and free prologues and epilogues mimic the necessary portions of 519 * libumem's malloc and free. This includes things like checking for size 520 * overflow, setting and verifying the malloc tags. 521 * 522 * It is an important constraint that these functions do not make use of the 523 * call instruction. The only jmp outside of the individual functions is to the 524 * original libumem malloc and free respectively. Because doing things like 525 * setting errno or raising an internal umem error on improper malloc tags would 526 * require using calls into the PLT, whenever we encounter one of those cases we 527 * just jump to the original malloc and free functions reusing the same stack 528 * frame. 529 * 530 * Each of the above sections, the three caches, and the malloc and free 531 * prologue and epilogue are implemented as blocks of machine code with the 532 * corresponding assembly in comments. There are known offsets into each block 533 * that corresponds to locations of data and addresses that we only know at run 534 * time. These blocks are copied as necessary and the blanks filled in 535 * appropriately. 536 * 537 * As mentioned in section 8.2, the trampoline library uses specifically named 538 * variables to communicate the buffers and size to use. These variables are: 539 * 540 * o. umem_genasm_mptr: The buffer for ptcmalloc 541 * o. umem_genasm_msize: The size in bytes of the above buffer 542 * o. umem_genasm_fptr: The buffer for ptcfree 543 * o. umem_genasm_fsize: The size in bytes of the above buffer 544 * 545 * Finally, to enable the generated assembly we need to remove the previous jump 546 * to the actual malloc that exists at the start of these buffers. On x86, this 547 * is a five byte region. We could zero out the jump offset to be a jmp +0, but 548 * using nops can be faster. We specifically use a single five byte nop on x86 549 * as it is faster. When porting ptcumem to other architectures, the various 550 * opcode changes and options should be analyzed. 551 * 552 * 8.4 Interface with libc.so 553 * -------------------------- 554 * 555 * The tmem_t structure as described in the beginning of section 8, is part of a 556 * private interface with libc. There are three functions that exist to cover 557 * this. They are not documented in man pages or header files. They are in the 558 * SUNWprivate part of libc's mapfile. 559 * 560 * o. _tmem_get_base(void) 561 * 562 * Returns the offset from the ulwp_t (curthread) to the tmem_t structure. 563 * This is a constant for all threads and is effectively a way to to do 564 * ::offsetof ulwp_t ul_tmem without having to know the specifics of the 565 * structure outside of libc. 566 * 567 * o. _tmem_get_nentries(void) 568 * 569 * Returns the number of roots that exist in the tmem_t. This is one part 570 * of the cap on the number of umem_caches that we can back with tmem. 571 * 572 * o. _tmem_set_cleanup(void (*)(void *, int)) 573 * 574 * This sets a clean up handler that gets called back when a thread exits. 575 * There is one call per buffer, the void * is a pointer to the buffer on 576 * the list, the int is the index into the roots array for this buffer. 577 * 578 * 8.5 Tuning and disabling per-thread caching 579 * ------------------------------------------- 580 * 581 * There is only one tunable for per-thread caching: the amount of memory each 582 * thread should be able to cache. This is specified via the perthread_cache 583 * UMEM_OPTION option. No attempt is made to to sanity check the specified 584 * value; the limit is simply the maximum value of a size_t. 585 * 586 * If the perthread_cache UMEM_OPTION is set to zero, nomagazines was requested, 587 * or UMEM_DEBUG has been turned on then we will never call into umem_genasm; 588 * however, the trampoline audit library and jump will still be in place. 589 * 590 * 8.6 Observing efficacy of per-thread caching 591 * -------------------------------------------- 592 * 593 * To understand the efficacy of per-thread caching, use the ::umastat dcmd 594 * to see the percentage of capacity consumed on a per-thread basis, the 595 * degree to which each umem cache contributes to per-thread cache consumption, 596 * and the number of buffers in per-thread caches on a per-umem cache basis. 597 * If more detail is required, the specific buffers in a per-thread cache can 598 * be iterated over with the umem_ptc_* walkers. (These walkers allow an 599 * optional ulwp_t to be specified to iterate only over a particular thread's 600 * cache.) 601 */ 602 603 #include <umem_impl.h> 604 #include <sys/vmem_impl_user.h> 605 #include "umem_base.h" 606 #include "vmem_base.h" 607 608 #include <sys/processor.h> 609 #include <sys/sysmacros.h> 610 611 #include <alloca.h> 612 #include <errno.h> 613 #include <limits.h> 614 #include <stdio.h> 615 #include <stdlib.h> 616 #include <string.h> 617 #include <strings.h> 618 #include <signal.h> 619 #include <unistd.h> 620 #include <atomic.h> 621 622 #include "misc.h" 623 624 #define UMEM_VMFLAGS(umflag) (VM_NOSLEEP) 625 626 size_t pagesize; 627 628 /* 629 * The default set of caches to back umem_alloc(). 630 * These sizes should be reevaluated periodically. 631 * 632 * We want allocations that are multiples of the coherency granularity 633 * (64 bytes) to be satisfied from a cache which is a multiple of 64 634 * bytes, so that it will be 64-byte aligned. For all multiples of 64, 635 * the next kmem_cache_size greater than or equal to it must be a 636 * multiple of 64. 637 * 638 * This table must be in sorted order, from smallest to highest. The 639 * highest slot must be UMEM_MAXBUF, and every slot afterwards must be 640 * zero. 641 */ 642 static int umem_alloc_sizes[] = { 643 #ifdef _LP64 644 1 * 8, 645 1 * 16, 646 2 * 16, 647 3 * 16, 648 #else 649 1 * 8, 650 2 * 8, 651 3 * 8, 652 4 * 8, 5 * 8, 6 * 8, 7 * 8, 653 #endif 654 4 * 16, 5 * 16, 6 * 16, 7 * 16, 655 4 * 32, 5 * 32, 6 * 32, 7 * 32, 656 4 * 64, 5 * 64, 6 * 64, 7 * 64, 657 4 * 128, 5 * 128, 6 * 128, 7 * 128, 658 P2ALIGN(8192 / 7, 64), 659 P2ALIGN(8192 / 6, 64), 660 P2ALIGN(8192 / 5, 64), 661 P2ALIGN(8192 / 4, 64), 2304, 662 P2ALIGN(8192 / 3, 64), 663 P2ALIGN(8192 / 2, 64), 4544, 664 P2ALIGN(8192 / 1, 64), 9216, 665 4096 * 3, 666 8192 * 2, /* = 8192 * 2 */ 667 24576, 32768, 40960, 49152, 57344, 65536, 73728, 81920, 668 90112, 98304, 106496, 114688, 122880, UMEM_MAXBUF, /* 128k */ 669 /* 24 slots for user expansion */ 670 0, 0, 0, 0, 0, 0, 0, 0, 671 0, 0, 0, 0, 0, 0, 0, 0, 672 0, 0, 0, 0, 0, 0, 0, 0, 673 }; 674 #define NUM_ALLOC_SIZES (sizeof (umem_alloc_sizes) / sizeof (*umem_alloc_sizes)) 675 676 static umem_magtype_t umem_magtype[] = { 677 { 1, 8, 3200, 65536 }, 678 { 3, 16, 256, 32768 }, 679 { 7, 32, 64, 16384 }, 680 { 15, 64, 0, 8192 }, 681 { 31, 64, 0, 4096 }, 682 { 47, 64, 0, 2048 }, 683 { 63, 64, 0, 1024 }, 684 { 95, 64, 0, 512 }, 685 { 143, 64, 0, 0 }, 686 }; 687 688 /* 689 * umem tunables 690 */ 691 uint32_t umem_max_ncpus; /* # of CPU caches. */ 692 693 uint32_t umem_stack_depth = 15; /* # stack frames in a bufctl_audit */ 694 uint32_t umem_reap_interval = 10; /* max reaping rate (seconds) */ 695 uint_t umem_depot_contention = 2; /* max failed trylocks per real interval */ 696 uint_t umem_abort = 1; /* whether to abort on error */ 697 uint_t umem_output = 0; /* whether to write to standard error */ 698 uint_t umem_logging = 0; /* umem_log_enter() override */ 699 uint32_t umem_mtbf = 0; /* mean time between failures [default: off] */ 700 size_t umem_transaction_log_size; /* size of transaction log */ 701 size_t umem_content_log_size; /* size of content log */ 702 size_t umem_failure_log_size; /* failure log [4 pages per CPU] */ 703 size_t umem_slab_log_size; /* slab create log [4 pages per CPU] */ 704 size_t umem_content_maxsave = 256; /* UMF_CONTENTS max bytes to log */ 705 size_t umem_lite_minsize = 0; /* minimum buffer size for UMF_LITE */ 706 size_t umem_lite_maxalign = 1024; /* maximum buffer alignment for UMF_LITE */ 707 size_t umem_maxverify; /* maximum bytes to inspect in debug routines */ 708 size_t umem_minfirewall; /* hardware-enforced redzone threshold */ 709 size_t umem_ptc_size = 1048576; /* size of per-thread cache (in bytes) */ 710 711 uint_t umem_flags = 0; 712 uintptr_t umem_tmem_off; 713 714 mutex_t umem_init_lock; /* locks initialization */ 715 cond_t umem_init_cv; /* initialization CV */ 716 thread_t umem_init_thr; /* thread initializing */ 717 int umem_init_env_ready; /* environ pre-initted */ 718 int umem_ready = UMEM_READY_STARTUP; 719 720 int umem_ptc_enabled; /* per-thread caching enabled */ 721 722 static umem_nofail_callback_t *nofail_callback; 723 static mutex_t umem_nofail_exit_lock; 724 static thread_t umem_nofail_exit_thr; 725 726 static umem_cache_t *umem_slab_cache; 727 static umem_cache_t *umem_bufctl_cache; 728 static umem_cache_t *umem_bufctl_audit_cache; 729 730 mutex_t umem_flags_lock; 731 732 static vmem_t *heap_arena; 733 static vmem_alloc_t *heap_alloc; 734 static vmem_free_t *heap_free; 735 736 static vmem_t *umem_internal_arena; 737 static vmem_t *umem_cache_arena; 738 static vmem_t *umem_hash_arena; 739 static vmem_t *umem_log_arena; 740 static vmem_t *umem_oversize_arena; 741 static vmem_t *umem_va_arena; 742 static vmem_t *umem_default_arena; 743 static vmem_t *umem_firewall_va_arena; 744 static vmem_t *umem_firewall_arena; 745 746 vmem_t *umem_memalign_arena; 747 748 umem_log_header_t *umem_transaction_log; 749 umem_log_header_t *umem_content_log; 750 umem_log_header_t *umem_failure_log; 751 umem_log_header_t *umem_slab_log; 752 753 #define CPUHINT() (thr_self()) 754 #define CPUHINT_MAX() INT_MAX 755 756 #define CPU(mask) (umem_cpus + (CPUHINT() & (mask))) 757 static umem_cpu_t umem_startup_cpu = { /* initial, single, cpu */ 758 UMEM_CACHE_SIZE(0), 759 0 760 }; 761 762 static uint32_t umem_cpu_mask = 0; /* global cpu mask */ 763 static umem_cpu_t *umem_cpus = &umem_startup_cpu; /* cpu list */ 764 765 volatile uint32_t umem_reaping; 766 767 thread_t umem_update_thr; 768 struct timeval umem_update_next; /* timeofday of next update */ 769 volatile thread_t umem_st_update_thr; /* only used when single-thd */ 770 771 #define IN_UPDATE() (thr_self() == umem_update_thr || \ 772 thr_self() == umem_st_update_thr) 773 #define IN_REAP() IN_UPDATE() 774 775 mutex_t umem_update_lock; /* cache_u{next,prev,flags} */ 776 cond_t umem_update_cv; 777 778 volatile hrtime_t umem_reap_next; /* min hrtime of next reap */ 779 780 mutex_t umem_cache_lock; /* inter-cache linkage only */ 781 782 #ifdef UMEM_STANDALONE 783 umem_cache_t umem_null_cache; 784 static const umem_cache_t umem_null_cache_template = { 785 #else 786 umem_cache_t umem_null_cache = { 787 #endif 788 0, 0, 0, 0, 0, 789 0, 0, 790 0, 0, 791 0, 0, 792 "invalid_cache", 793 0, 0, 794 NULL, NULL, NULL, NULL, 795 NULL, 796 0, 0, 0, 0, 797 &umem_null_cache, &umem_null_cache, 798 &umem_null_cache, &umem_null_cache, 799 0, 800 DEFAULTMUTEX, /* start of slab layer */ 801 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 802 &umem_null_cache.cache_nullslab, 803 { 804 &umem_null_cache, 805 NULL, 806 &umem_null_cache.cache_nullslab, 807 &umem_null_cache.cache_nullslab, 808 NULL, 809 -1, 810 0 811 }, 812 NULL, 813 NULL, 814 DEFAULTMUTEX, /* start of depot layer */ 815 NULL, { 816 NULL, 0, 0, 0, 0 817 }, { 818 NULL, 0, 0, 0, 0 819 }, { 820 { 821 DEFAULTMUTEX, /* start of CPU cache */ 822 0, 0, NULL, NULL, -1, -1, 0 823 } 824 } 825 }; 826 827 #define ALLOC_TABLE_4 \ 828 &umem_null_cache, &umem_null_cache, &umem_null_cache, &umem_null_cache 829 830 #define ALLOC_TABLE_64 \ 831 ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, \ 832 ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, \ 833 ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, \ 834 ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4 835 836 #define ALLOC_TABLE_1024 \ 837 ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, \ 838 ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, \ 839 ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, \ 840 ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64 841 842 static umem_cache_t *umem_alloc_table[UMEM_MAXBUF >> UMEM_ALIGN_SHIFT] = { 843 ALLOC_TABLE_1024, 844 ALLOC_TABLE_1024, 845 ALLOC_TABLE_1024, 846 ALLOC_TABLE_1024, 847 ALLOC_TABLE_1024, 848 ALLOC_TABLE_1024, 849 ALLOC_TABLE_1024, 850 ALLOC_TABLE_1024, 851 ALLOC_TABLE_1024, 852 ALLOC_TABLE_1024, 853 ALLOC_TABLE_1024, 854 ALLOC_TABLE_1024, 855 ALLOC_TABLE_1024, 856 ALLOC_TABLE_1024, 857 ALLOC_TABLE_1024, 858 ALLOC_TABLE_1024 859 }; 860 861 862 /* Used to constrain audit-log stack traces */ 863 caddr_t umem_min_stack; 864 caddr_t umem_max_stack; 865 866 867 #define UMERR_MODIFIED 0 /* buffer modified while on freelist */ 868 #define UMERR_REDZONE 1 /* redzone violation (write past end of buf) */ 869 #define UMERR_DUPFREE 2 /* freed a buffer twice */ 870 #define UMERR_BADADDR 3 /* freed a bad (unallocated) address */ 871 #define UMERR_BADBUFTAG 4 /* buftag corrupted */ 872 #define UMERR_BADBUFCTL 5 /* bufctl corrupted */ 873 #define UMERR_BADCACHE 6 /* freed a buffer to the wrong cache */ 874 #define UMERR_BADSIZE 7 /* alloc size != free size */ 875 #define UMERR_BADBASE 8 /* buffer base address wrong */ 876 877 struct { 878 hrtime_t ump_timestamp; /* timestamp of error */ 879 int ump_error; /* type of umem error (UMERR_*) */ 880 void *ump_buffer; /* buffer that induced abort */ 881 void *ump_realbuf; /* real start address for buffer */ 882 umem_cache_t *ump_cache; /* buffer's cache according to client */ 883 umem_cache_t *ump_realcache; /* actual cache containing buffer */ 884 umem_slab_t *ump_slab; /* slab accoring to umem_findslab() */ 885 umem_bufctl_t *ump_bufctl; /* bufctl */ 886 } umem_abort_info; 887 888 static void 889 copy_pattern(uint64_t pattern, void *buf_arg, size_t size) 890 { 891 uint64_t *bufend = (uint64_t *)((char *)buf_arg + size); 892 uint64_t *buf = buf_arg; 893 894 while (buf < bufend) 895 *buf++ = pattern; 896 } 897 898 static void * 899 verify_pattern(uint64_t pattern, void *buf_arg, size_t size) 900 { 901 uint64_t *bufend = (uint64_t *)((char *)buf_arg + size); 902 uint64_t *buf; 903 904 for (buf = buf_arg; buf < bufend; buf++) 905 if (*buf != pattern) 906 return (buf); 907 return (NULL); 908 } 909 910 static void * 911 verify_and_copy_pattern(uint64_t old, uint64_t new, void *buf_arg, size_t size) 912 { 913 uint64_t *bufend = (uint64_t *)((char *)buf_arg + size); 914 uint64_t *buf; 915 916 for (buf = buf_arg; buf < bufend; buf++) { 917 if (*buf != old) { 918 copy_pattern(old, buf_arg, 919 (char *)buf - (char *)buf_arg); 920 return (buf); 921 } 922 *buf = new; 923 } 924 925 return (NULL); 926 } 927 928 void 929 umem_cache_applyall(void (*func)(umem_cache_t *)) 930 { 931 umem_cache_t *cp; 932 933 (void) mutex_lock(&umem_cache_lock); 934 for (cp = umem_null_cache.cache_next; cp != &umem_null_cache; 935 cp = cp->cache_next) 936 func(cp); 937 (void) mutex_unlock(&umem_cache_lock); 938 } 939 940 static void 941 umem_add_update_unlocked(umem_cache_t *cp, int flags) 942 { 943 umem_cache_t *cnext, *cprev; 944 945 flags &= ~UMU_ACTIVE; 946 947 if (!flags) 948 return; 949 950 if (cp->cache_uflags & UMU_ACTIVE) { 951 cp->cache_uflags |= flags; 952 } else { 953 if (cp->cache_unext != NULL) { 954 ASSERT(cp->cache_uflags != 0); 955 cp->cache_uflags |= flags; 956 } else { 957 ASSERT(cp->cache_uflags == 0); 958 cp->cache_uflags = flags; 959 cp->cache_unext = cnext = &umem_null_cache; 960 cp->cache_uprev = cprev = umem_null_cache.cache_uprev; 961 cnext->cache_uprev = cp; 962 cprev->cache_unext = cp; 963 } 964 } 965 } 966 967 static void 968 umem_add_update(umem_cache_t *cp, int flags) 969 { 970 (void) mutex_lock(&umem_update_lock); 971 972 umem_add_update_unlocked(cp, flags); 973 974 if (!IN_UPDATE()) 975 (void) cond_broadcast(&umem_update_cv); 976 977 (void) mutex_unlock(&umem_update_lock); 978 } 979 980 /* 981 * Remove a cache from the update list, waiting for any in-progress work to 982 * complete first. 983 */ 984 static void 985 umem_remove_updates(umem_cache_t *cp) 986 { 987 (void) mutex_lock(&umem_update_lock); 988 989 /* 990 * Get it out of the active state 991 */ 992 while (cp->cache_uflags & UMU_ACTIVE) { 993 int cancel_state; 994 995 ASSERT(cp->cache_unext == NULL); 996 997 cp->cache_uflags |= UMU_NOTIFY; 998 999 /* 1000 * Make sure the update state is sane, before we wait 1001 */ 1002 ASSERT(umem_update_thr != 0 || umem_st_update_thr != 0); 1003 ASSERT(umem_update_thr != thr_self() && 1004 umem_st_update_thr != thr_self()); 1005 1006 (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, 1007 &cancel_state); 1008 (void) cond_wait(&umem_update_cv, &umem_update_lock); 1009 (void) pthread_setcancelstate(cancel_state, NULL); 1010 } 1011 /* 1012 * Get it out of the Work Requested state 1013 */ 1014 if (cp->cache_unext != NULL) { 1015 cp->cache_uprev->cache_unext = cp->cache_unext; 1016 cp->cache_unext->cache_uprev = cp->cache_uprev; 1017 cp->cache_uprev = cp->cache_unext = NULL; 1018 cp->cache_uflags = 0; 1019 } 1020 /* 1021 * Make sure it is in the Inactive state 1022 */ 1023 ASSERT(cp->cache_unext == NULL && cp->cache_uflags == 0); 1024 (void) mutex_unlock(&umem_update_lock); 1025 } 1026 1027 static void 1028 umem_updateall(int flags) 1029 { 1030 umem_cache_t *cp; 1031 1032 /* 1033 * NOTE: To prevent deadlock, umem_cache_lock is always acquired first. 1034 * 1035 * (umem_add_update is called from things run via umem_cache_applyall) 1036 */ 1037 (void) mutex_lock(&umem_cache_lock); 1038 (void) mutex_lock(&umem_update_lock); 1039 1040 for (cp = umem_null_cache.cache_next; cp != &umem_null_cache; 1041 cp = cp->cache_next) 1042 umem_add_update_unlocked(cp, flags); 1043 1044 if (!IN_UPDATE()) 1045 (void) cond_broadcast(&umem_update_cv); 1046 1047 (void) mutex_unlock(&umem_update_lock); 1048 (void) mutex_unlock(&umem_cache_lock); 1049 } 1050 1051 /* 1052 * Debugging support. Given a buffer address, find its slab. 1053 */ 1054 static umem_slab_t * 1055 umem_findslab(umem_cache_t *cp, void *buf) 1056 { 1057 umem_slab_t *sp; 1058 1059 (void) mutex_lock(&cp->cache_lock); 1060 for (sp = cp->cache_nullslab.slab_next; 1061 sp != &cp->cache_nullslab; sp = sp->slab_next) { 1062 if (UMEM_SLAB_MEMBER(sp, buf)) { 1063 (void) mutex_unlock(&cp->cache_lock); 1064 return (sp); 1065 } 1066 } 1067 (void) mutex_unlock(&cp->cache_lock); 1068 1069 return (NULL); 1070 } 1071 1072 static void 1073 umem_error(int error, umem_cache_t *cparg, void *bufarg) 1074 { 1075 umem_buftag_t *btp = NULL; 1076 umem_bufctl_t *bcp = NULL; 1077 umem_cache_t *cp = cparg; 1078 umem_slab_t *sp; 1079 uint64_t *off; 1080 void *buf = bufarg; 1081 1082 int old_logging = umem_logging; 1083 1084 umem_logging = 0; /* stop logging when a bad thing happens */ 1085 1086 umem_abort_info.ump_timestamp = gethrtime(); 1087 1088 sp = umem_findslab(cp, buf); 1089 if (sp == NULL) { 1090 for (cp = umem_null_cache.cache_prev; cp != &umem_null_cache; 1091 cp = cp->cache_prev) { 1092 if ((sp = umem_findslab(cp, buf)) != NULL) 1093 break; 1094 } 1095 } 1096 1097 if (sp == NULL) { 1098 cp = NULL; 1099 error = UMERR_BADADDR; 1100 } else { 1101 if (cp != cparg) 1102 error = UMERR_BADCACHE; 1103 else 1104 buf = (char *)bufarg - ((uintptr_t)bufarg - 1105 (uintptr_t)sp->slab_base) % cp->cache_chunksize; 1106 if (buf != bufarg) 1107 error = UMERR_BADBASE; 1108 if (cp->cache_flags & UMF_BUFTAG) 1109 btp = UMEM_BUFTAG(cp, buf); 1110 if (cp->cache_flags & UMF_HASH) { 1111 (void) mutex_lock(&cp->cache_lock); 1112 for (bcp = *UMEM_HASH(cp, buf); bcp; bcp = bcp->bc_next) 1113 if (bcp->bc_addr == buf) 1114 break; 1115 (void) mutex_unlock(&cp->cache_lock); 1116 if (bcp == NULL && btp != NULL) 1117 bcp = btp->bt_bufctl; 1118 if (umem_findslab(cp->cache_bufctl_cache, bcp) == 1119 NULL || P2PHASE((uintptr_t)bcp, UMEM_ALIGN) || 1120 bcp->bc_addr != buf) { 1121 error = UMERR_BADBUFCTL; 1122 bcp = NULL; 1123 } 1124 } 1125 } 1126 1127 umem_abort_info.ump_error = error; 1128 umem_abort_info.ump_buffer = bufarg; 1129 umem_abort_info.ump_realbuf = buf; 1130 umem_abort_info.ump_cache = cparg; 1131 umem_abort_info.ump_realcache = cp; 1132 umem_abort_info.ump_slab = sp; 1133 umem_abort_info.ump_bufctl = bcp; 1134 1135 umem_printf("umem allocator: "); 1136 1137 switch (error) { 1138 1139 case UMERR_MODIFIED: 1140 umem_printf("buffer modified after being freed\n"); 1141 off = verify_pattern(UMEM_FREE_PATTERN, buf, cp->cache_verify); 1142 if (off == NULL) /* shouldn't happen */ 1143 off = buf; 1144 umem_printf("modification occurred at offset 0x%lx " 1145 "(0x%llx replaced by 0x%llx)\n", 1146 (uintptr_t)off - (uintptr_t)buf, 1147 (longlong_t)UMEM_FREE_PATTERN, (longlong_t)*off); 1148 break; 1149 1150 case UMERR_REDZONE: 1151 umem_printf("redzone violation: write past end of buffer\n"); 1152 break; 1153 1154 case UMERR_BADADDR: 1155 umem_printf("invalid free: buffer not in cache\n"); 1156 break; 1157 1158 case UMERR_DUPFREE: 1159 umem_printf("duplicate free: buffer freed twice\n"); 1160 break; 1161 1162 case UMERR_BADBUFTAG: 1163 umem_printf("boundary tag corrupted\n"); 1164 umem_printf("bcp ^ bxstat = %lx, should be %lx\n", 1165 (intptr_t)btp->bt_bufctl ^ btp->bt_bxstat, 1166 UMEM_BUFTAG_FREE); 1167 break; 1168 1169 case UMERR_BADBUFCTL: 1170 umem_printf("bufctl corrupted\n"); 1171 break; 1172 1173 case UMERR_BADCACHE: 1174 umem_printf("buffer freed to wrong cache\n"); 1175 umem_printf("buffer was allocated from %s,\n", cp->cache_name); 1176 umem_printf("caller attempting free to %s.\n", 1177 cparg->cache_name); 1178 break; 1179 1180 case UMERR_BADSIZE: 1181 umem_printf("bad free: free size (%u) != alloc size (%u)\n", 1182 UMEM_SIZE_DECODE(((uint32_t *)btp)[0]), 1183 UMEM_SIZE_DECODE(((uint32_t *)btp)[1])); 1184 break; 1185 1186 case UMERR_BADBASE: 1187 umem_printf("bad free: free address (%p) != alloc address " 1188 "(%p)\n", bufarg, buf); 1189 break; 1190 } 1191 1192 umem_printf("buffer=%p bufctl=%p cache: %s\n", 1193 bufarg, (void *)bcp, cparg->cache_name); 1194 1195 if (bcp != NULL && (cp->cache_flags & UMF_AUDIT) && 1196 error != UMERR_BADBUFCTL) { 1197 int d; 1198 timespec_t ts; 1199 hrtime_t diff; 1200 umem_bufctl_audit_t *bcap = (umem_bufctl_audit_t *)bcp; 1201 1202 diff = umem_abort_info.ump_timestamp - bcap->bc_timestamp; 1203 ts.tv_sec = diff / NANOSEC; 1204 ts.tv_nsec = diff % NANOSEC; 1205 1206 umem_printf("previous transaction on buffer %p:\n", buf); 1207 umem_printf("thread=%p time=T-%ld.%09ld slab=%p cache: %s\n", 1208 (void *)(intptr_t)bcap->bc_thread, ts.tv_sec, ts.tv_nsec, 1209 (void *)sp, cp->cache_name); 1210 for (d = 0; d < MIN(bcap->bc_depth, umem_stack_depth); d++) { 1211 (void) print_sym((void *)bcap->bc_stack[d]); 1212 umem_printf("\n"); 1213 } 1214 } 1215 1216 umem_err_recoverable("umem: heap corruption detected"); 1217 1218 umem_logging = old_logging; /* resume logging */ 1219 } 1220 1221 void 1222 umem_nofail_callback(umem_nofail_callback_t *cb) 1223 { 1224 nofail_callback = cb; 1225 } 1226 1227 static int 1228 umem_alloc_retry(umem_cache_t *cp, int umflag) 1229 { 1230 if (cp == &umem_null_cache) { 1231 if (umem_init()) 1232 return (1); /* retry */ 1233 /* 1234 * Initialization failed. Do normal failure processing. 1235 */ 1236 } 1237 if (umflag & UMEM_NOFAIL) { 1238 int def_result = UMEM_CALLBACK_EXIT(255); 1239 int result = def_result; 1240 umem_nofail_callback_t *callback = nofail_callback; 1241 1242 if (callback != NULL) 1243 result = callback(); 1244 1245 if (result == UMEM_CALLBACK_RETRY) 1246 return (1); 1247 1248 if ((result & ~0xFF) != UMEM_CALLBACK_EXIT(0)) { 1249 log_message("nofail callback returned %x\n", result); 1250 result = def_result; 1251 } 1252 1253 /* 1254 * only one thread will call exit 1255 */ 1256 if (umem_nofail_exit_thr == thr_self()) 1257 umem_panic("recursive UMEM_CALLBACK_EXIT()\n"); 1258 1259 (void) mutex_lock(&umem_nofail_exit_lock); 1260 umem_nofail_exit_thr = thr_self(); 1261 exit(result & 0xFF); 1262 /*NOTREACHED*/ 1263 } 1264 return (0); 1265 } 1266 1267 static umem_log_header_t * 1268 umem_log_init(size_t logsize) 1269 { 1270 umem_log_header_t *lhp; 1271 int nchunks = 4 * umem_max_ncpus; 1272 size_t lhsize = offsetof(umem_log_header_t, lh_cpu[umem_max_ncpus]); 1273 int i; 1274 1275 if (logsize == 0) 1276 return (NULL); 1277 1278 /* 1279 * Make sure that lhp->lh_cpu[] is nicely aligned 1280 * to prevent false sharing of cache lines. 1281 */ 1282 lhsize = P2ROUNDUP(lhsize, UMEM_ALIGN); 1283 lhp = vmem_xalloc(umem_log_arena, lhsize, 64, P2NPHASE(lhsize, 64), 0, 1284 NULL, NULL, VM_NOSLEEP); 1285 if (lhp == NULL) 1286 goto fail; 1287 1288 bzero(lhp, lhsize); 1289 1290 (void) mutex_init(&lhp->lh_lock, USYNC_THREAD, NULL); 1291 lhp->lh_nchunks = nchunks; 1292 lhp->lh_chunksize = P2ROUNDUP(logsize / nchunks, PAGESIZE); 1293 if (lhp->lh_chunksize == 0) 1294 lhp->lh_chunksize = PAGESIZE; 1295 1296 lhp->lh_base = vmem_alloc(umem_log_arena, 1297 lhp->lh_chunksize * nchunks, VM_NOSLEEP); 1298 if (lhp->lh_base == NULL) 1299 goto fail; 1300 1301 lhp->lh_free = vmem_alloc(umem_log_arena, 1302 nchunks * sizeof (int), VM_NOSLEEP); 1303 if (lhp->lh_free == NULL) 1304 goto fail; 1305 1306 bzero(lhp->lh_base, lhp->lh_chunksize * nchunks); 1307 1308 for (i = 0; i < umem_max_ncpus; i++) { 1309 umem_cpu_log_header_t *clhp = &lhp->lh_cpu[i]; 1310 (void) mutex_init(&clhp->clh_lock, USYNC_THREAD, NULL); 1311 clhp->clh_chunk = i; 1312 } 1313 1314 for (i = umem_max_ncpus; i < nchunks; i++) 1315 lhp->lh_free[i] = i; 1316 1317 lhp->lh_head = umem_max_ncpus; 1318 lhp->lh_tail = 0; 1319 1320 return (lhp); 1321 1322 fail: 1323 if (lhp != NULL) { 1324 if (lhp->lh_base != NULL) 1325 vmem_free(umem_log_arena, lhp->lh_base, 1326 lhp->lh_chunksize * nchunks); 1327 1328 vmem_xfree(umem_log_arena, lhp, lhsize); 1329 } 1330 return (NULL); 1331 } 1332 1333 static void * 1334 umem_log_enter(umem_log_header_t *lhp, void *data, size_t size) 1335 { 1336 void *logspace; 1337 umem_cpu_log_header_t *clhp = 1338 &lhp->lh_cpu[CPU(umem_cpu_mask)->cpu_number]; 1339 1340 if (lhp == NULL || umem_logging == 0) 1341 return (NULL); 1342 1343 (void) mutex_lock(&clhp->clh_lock); 1344 clhp->clh_hits++; 1345 if (size > clhp->clh_avail) { 1346 (void) mutex_lock(&lhp->lh_lock); 1347 lhp->lh_hits++; 1348 lhp->lh_free[lhp->lh_tail] = clhp->clh_chunk; 1349 lhp->lh_tail = (lhp->lh_tail + 1) % lhp->lh_nchunks; 1350 clhp->clh_chunk = lhp->lh_free[lhp->lh_head]; 1351 lhp->lh_head = (lhp->lh_head + 1) % lhp->lh_nchunks; 1352 clhp->clh_current = lhp->lh_base + 1353 clhp->clh_chunk * lhp->lh_chunksize; 1354 clhp->clh_avail = lhp->lh_chunksize; 1355 if (size > lhp->lh_chunksize) 1356 size = lhp->lh_chunksize; 1357 (void) mutex_unlock(&lhp->lh_lock); 1358 } 1359 logspace = clhp->clh_current; 1360 clhp->clh_current += size; 1361 clhp->clh_avail -= size; 1362 bcopy(data, logspace, size); 1363 (void) mutex_unlock(&clhp->clh_lock); 1364 return (logspace); 1365 } 1366 1367 #define UMEM_AUDIT(lp, cp, bcp) \ 1368 { \ 1369 umem_bufctl_audit_t *_bcp = (umem_bufctl_audit_t *)(bcp); \ 1370 _bcp->bc_timestamp = gethrtime(); \ 1371 _bcp->bc_thread = thr_self(); \ 1372 _bcp->bc_depth = getpcstack(_bcp->bc_stack, umem_stack_depth, \ 1373 (cp != NULL) && (cp->cache_flags & UMF_CHECKSIGNAL)); \ 1374 _bcp->bc_lastlog = umem_log_enter((lp), _bcp, \ 1375 UMEM_BUFCTL_AUDIT_SIZE); \ 1376 } 1377 1378 static void 1379 umem_log_event(umem_log_header_t *lp, umem_cache_t *cp, 1380 umem_slab_t *sp, void *addr) 1381 { 1382 umem_bufctl_audit_t *bcp; 1383 UMEM_LOCAL_BUFCTL_AUDIT(&bcp); 1384 1385 bzero(bcp, UMEM_BUFCTL_AUDIT_SIZE); 1386 bcp->bc_addr = addr; 1387 bcp->bc_slab = sp; 1388 bcp->bc_cache = cp; 1389 UMEM_AUDIT(lp, cp, bcp); 1390 } 1391 1392 /* 1393 * Create a new slab for cache cp. 1394 */ 1395 static umem_slab_t * 1396 umem_slab_create(umem_cache_t *cp, int umflag) 1397 { 1398 size_t slabsize = cp->cache_slabsize; 1399 size_t chunksize = cp->cache_chunksize; 1400 int cache_flags = cp->cache_flags; 1401 size_t color, chunks; 1402 char *buf, *slab; 1403 umem_slab_t *sp; 1404 umem_bufctl_t *bcp; 1405 vmem_t *vmp = cp->cache_arena; 1406 1407 color = cp->cache_color + cp->cache_align; 1408 if (color > cp->cache_maxcolor) 1409 color = cp->cache_mincolor; 1410 cp->cache_color = color; 1411 1412 slab = vmem_alloc(vmp, slabsize, UMEM_VMFLAGS(umflag)); 1413 1414 if (slab == NULL) 1415 goto vmem_alloc_failure; 1416 1417 ASSERT(P2PHASE((uintptr_t)slab, vmp->vm_quantum) == 0); 1418 1419 if (!(cp->cache_cflags & UMC_NOTOUCH) && 1420 (cp->cache_flags & UMF_DEADBEEF)) 1421 copy_pattern(UMEM_UNINITIALIZED_PATTERN, slab, slabsize); 1422 1423 if (cache_flags & UMF_HASH) { 1424 if ((sp = _umem_cache_alloc(umem_slab_cache, umflag)) == NULL) 1425 goto slab_alloc_failure; 1426 chunks = (slabsize - color) / chunksize; 1427 } else { 1428 sp = UMEM_SLAB(cp, slab); 1429 chunks = (slabsize - sizeof (umem_slab_t) - color) / chunksize; 1430 } 1431 1432 sp->slab_cache = cp; 1433 sp->slab_head = NULL; 1434 sp->slab_refcnt = 0; 1435 sp->slab_base = buf = slab + color; 1436 sp->slab_chunks = chunks; 1437 1438 ASSERT(chunks > 0); 1439 while (chunks-- != 0) { 1440 if (cache_flags & UMF_HASH) { 1441 bcp = _umem_cache_alloc(cp->cache_bufctl_cache, umflag); 1442 if (bcp == NULL) 1443 goto bufctl_alloc_failure; 1444 if (cache_flags & UMF_AUDIT) { 1445 umem_bufctl_audit_t *bcap = 1446 (umem_bufctl_audit_t *)bcp; 1447 bzero(bcap, UMEM_BUFCTL_AUDIT_SIZE); 1448 bcap->bc_cache = cp; 1449 } 1450 bcp->bc_addr = buf; 1451 bcp->bc_slab = sp; 1452 } else { 1453 bcp = UMEM_BUFCTL(cp, buf); 1454 } 1455 if (cache_flags & UMF_BUFTAG) { 1456 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf); 1457 btp->bt_redzone = UMEM_REDZONE_PATTERN; 1458 btp->bt_bufctl = bcp; 1459 btp->bt_bxstat = (intptr_t)bcp ^ UMEM_BUFTAG_FREE; 1460 if (cache_flags & UMF_DEADBEEF) { 1461 copy_pattern(UMEM_FREE_PATTERN, buf, 1462 cp->cache_verify); 1463 } 1464 } 1465 bcp->bc_next = sp->slab_head; 1466 sp->slab_head = bcp; 1467 buf += chunksize; 1468 } 1469 1470 umem_log_event(umem_slab_log, cp, sp, slab); 1471 1472 return (sp); 1473 1474 bufctl_alloc_failure: 1475 1476 while ((bcp = sp->slab_head) != NULL) { 1477 sp->slab_head = bcp->bc_next; 1478 _umem_cache_free(cp->cache_bufctl_cache, bcp); 1479 } 1480 _umem_cache_free(umem_slab_cache, sp); 1481 1482 slab_alloc_failure: 1483 1484 vmem_free(vmp, slab, slabsize); 1485 1486 vmem_alloc_failure: 1487 1488 umem_log_event(umem_failure_log, cp, NULL, NULL); 1489 atomic_add_64(&cp->cache_alloc_fail, 1); 1490 1491 return (NULL); 1492 } 1493 1494 /* 1495 * Destroy a slab. 1496 */ 1497 static void 1498 umem_slab_destroy(umem_cache_t *cp, umem_slab_t *sp) 1499 { 1500 vmem_t *vmp = cp->cache_arena; 1501 void *slab = (void *)P2ALIGN((uintptr_t)sp->slab_base, vmp->vm_quantum); 1502 1503 if (cp->cache_flags & UMF_HASH) { 1504 umem_bufctl_t *bcp; 1505 while ((bcp = sp->slab_head) != NULL) { 1506 sp->slab_head = bcp->bc_next; 1507 _umem_cache_free(cp->cache_bufctl_cache, bcp); 1508 } 1509 _umem_cache_free(umem_slab_cache, sp); 1510 } 1511 vmem_free(vmp, slab, cp->cache_slabsize); 1512 } 1513 1514 /* 1515 * Allocate a raw (unconstructed) buffer from cp's slab layer. 1516 */ 1517 static void * 1518 umem_slab_alloc(umem_cache_t *cp, int umflag) 1519 { 1520 umem_bufctl_t *bcp, **hash_bucket; 1521 umem_slab_t *sp; 1522 void *buf; 1523 1524 (void) mutex_lock(&cp->cache_lock); 1525 cp->cache_slab_alloc++; 1526 sp = cp->cache_freelist; 1527 ASSERT(sp->slab_cache == cp); 1528 if (sp->slab_head == NULL) { 1529 /* 1530 * The freelist is empty. Create a new slab. 1531 */ 1532 (void) mutex_unlock(&cp->cache_lock); 1533 if (cp == &umem_null_cache) 1534 return (NULL); 1535 if ((sp = umem_slab_create(cp, umflag)) == NULL) 1536 return (NULL); 1537 (void) mutex_lock(&cp->cache_lock); 1538 cp->cache_slab_create++; 1539 if ((cp->cache_buftotal += sp->slab_chunks) > cp->cache_bufmax) 1540 cp->cache_bufmax = cp->cache_buftotal; 1541 sp->slab_next = cp->cache_freelist; 1542 sp->slab_prev = cp->cache_freelist->slab_prev; 1543 sp->slab_next->slab_prev = sp; 1544 sp->slab_prev->slab_next = sp; 1545 cp->cache_freelist = sp; 1546 } 1547 1548 sp->slab_refcnt++; 1549 ASSERT(sp->slab_refcnt <= sp->slab_chunks); 1550 1551 /* 1552 * If we're taking the last buffer in the slab, 1553 * remove the slab from the cache's freelist. 1554 */ 1555 bcp = sp->slab_head; 1556 if ((sp->slab_head = bcp->bc_next) == NULL) { 1557 cp->cache_freelist = sp->slab_next; 1558 ASSERT(sp->slab_refcnt == sp->slab_chunks); 1559 } 1560 1561 if (cp->cache_flags & UMF_HASH) { 1562 /* 1563 * Add buffer to allocated-address hash table. 1564 */ 1565 buf = bcp->bc_addr; 1566 hash_bucket = UMEM_HASH(cp, buf); 1567 bcp->bc_next = *hash_bucket; 1568 *hash_bucket = bcp; 1569 if ((cp->cache_flags & (UMF_AUDIT | UMF_BUFTAG)) == UMF_AUDIT) { 1570 UMEM_AUDIT(umem_transaction_log, cp, bcp); 1571 } 1572 } else { 1573 buf = UMEM_BUF(cp, bcp); 1574 } 1575 1576 ASSERT(UMEM_SLAB_MEMBER(sp, buf)); 1577 1578 (void) mutex_unlock(&cp->cache_lock); 1579 1580 return (buf); 1581 } 1582 1583 /* 1584 * Free a raw (unconstructed) buffer to cp's slab layer. 1585 */ 1586 static void 1587 umem_slab_free(umem_cache_t *cp, void *buf) 1588 { 1589 umem_slab_t *sp; 1590 umem_bufctl_t *bcp, **prev_bcpp; 1591 1592 ASSERT(buf != NULL); 1593 1594 (void) mutex_lock(&cp->cache_lock); 1595 cp->cache_slab_free++; 1596 1597 if (cp->cache_flags & UMF_HASH) { 1598 /* 1599 * Look up buffer in allocated-address hash table. 1600 */ 1601 prev_bcpp = UMEM_HASH(cp, buf); 1602 while ((bcp = *prev_bcpp) != NULL) { 1603 if (bcp->bc_addr == buf) { 1604 *prev_bcpp = bcp->bc_next; 1605 sp = bcp->bc_slab; 1606 break; 1607 } 1608 cp->cache_lookup_depth++; 1609 prev_bcpp = &bcp->bc_next; 1610 } 1611 } else { 1612 bcp = UMEM_BUFCTL(cp, buf); 1613 sp = UMEM_SLAB(cp, buf); 1614 } 1615 1616 if (bcp == NULL || sp->slab_cache != cp || !UMEM_SLAB_MEMBER(sp, buf)) { 1617 (void) mutex_unlock(&cp->cache_lock); 1618 umem_error(UMERR_BADADDR, cp, buf); 1619 return; 1620 } 1621 1622 if ((cp->cache_flags & (UMF_AUDIT | UMF_BUFTAG)) == UMF_AUDIT) { 1623 if (cp->cache_flags & UMF_CONTENTS) 1624 ((umem_bufctl_audit_t *)bcp)->bc_contents = 1625 umem_log_enter(umem_content_log, buf, 1626 cp->cache_contents); 1627 UMEM_AUDIT(umem_transaction_log, cp, bcp); 1628 } 1629 1630 /* 1631 * If this slab isn't currently on the freelist, put it there. 1632 */ 1633 if (sp->slab_head == NULL) { 1634 ASSERT(sp->slab_refcnt == sp->slab_chunks); 1635 ASSERT(cp->cache_freelist != sp); 1636 sp->slab_next->slab_prev = sp->slab_prev; 1637 sp->slab_prev->slab_next = sp->slab_next; 1638 sp->slab_next = cp->cache_freelist; 1639 sp->slab_prev = cp->cache_freelist->slab_prev; 1640 sp->slab_next->slab_prev = sp; 1641 sp->slab_prev->slab_next = sp; 1642 cp->cache_freelist = sp; 1643 } 1644 1645 bcp->bc_next = sp->slab_head; 1646 sp->slab_head = bcp; 1647 1648 ASSERT(sp->slab_refcnt >= 1); 1649 if (--sp->slab_refcnt == 0) { 1650 /* 1651 * There are no outstanding allocations from this slab, 1652 * so we can reclaim the memory. 1653 */ 1654 sp->slab_next->slab_prev = sp->slab_prev; 1655 sp->slab_prev->slab_next = sp->slab_next; 1656 if (sp == cp->cache_freelist) 1657 cp->cache_freelist = sp->slab_next; 1658 cp->cache_slab_destroy++; 1659 cp->cache_buftotal -= sp->slab_chunks; 1660 (void) mutex_unlock(&cp->cache_lock); 1661 umem_slab_destroy(cp, sp); 1662 return; 1663 } 1664 (void) mutex_unlock(&cp->cache_lock); 1665 } 1666 1667 static int 1668 umem_cache_alloc_debug(umem_cache_t *cp, void *buf, int umflag) 1669 { 1670 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf); 1671 umem_bufctl_audit_t *bcp = (umem_bufctl_audit_t *)btp->bt_bufctl; 1672 uint32_t mtbf; 1673 int flags_nfatal; 1674 1675 if (btp->bt_bxstat != ((intptr_t)bcp ^ UMEM_BUFTAG_FREE)) { 1676 umem_error(UMERR_BADBUFTAG, cp, buf); 1677 return (-1); 1678 } 1679 1680 btp->bt_bxstat = (intptr_t)bcp ^ UMEM_BUFTAG_ALLOC; 1681 1682 if ((cp->cache_flags & UMF_HASH) && bcp->bc_addr != buf) { 1683 umem_error(UMERR_BADBUFCTL, cp, buf); 1684 return (-1); 1685 } 1686 1687 btp->bt_redzone = UMEM_REDZONE_PATTERN; 1688 1689 if (cp->cache_flags & UMF_DEADBEEF) { 1690 if (verify_and_copy_pattern(UMEM_FREE_PATTERN, 1691 UMEM_UNINITIALIZED_PATTERN, buf, cp->cache_verify)) { 1692 umem_error(UMERR_MODIFIED, cp, buf); 1693 return (-1); 1694 } 1695 } 1696 1697 if ((mtbf = umem_mtbf | cp->cache_mtbf) != 0 && 1698 gethrtime() % mtbf == 0 && 1699 (umflag & (UMEM_FATAL_FLAGS)) == 0) { 1700 umem_log_event(umem_failure_log, cp, NULL, NULL); 1701 } else { 1702 mtbf = 0; 1703 } 1704 1705 /* 1706 * We do not pass fatal flags on to the constructor. This prevents 1707 * leaking buffers in the event of a subordinate constructor failing. 1708 */ 1709 flags_nfatal = UMEM_DEFAULT; 1710 if (mtbf || (cp->cache_constructor != NULL && 1711 cp->cache_constructor(buf, cp->cache_private, flags_nfatal) != 0)) { 1712 atomic_add_64(&cp->cache_alloc_fail, 1); 1713 btp->bt_bxstat = (intptr_t)bcp ^ UMEM_BUFTAG_FREE; 1714 copy_pattern(UMEM_FREE_PATTERN, buf, cp->cache_verify); 1715 umem_slab_free(cp, buf); 1716 return (-1); 1717 } 1718 1719 if (cp->cache_flags & UMF_AUDIT) { 1720 UMEM_AUDIT(umem_transaction_log, cp, bcp); 1721 } 1722 1723 return (0); 1724 } 1725 1726 static int 1727 umem_cache_free_debug(umem_cache_t *cp, void *buf) 1728 { 1729 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf); 1730 umem_bufctl_audit_t *bcp = (umem_bufctl_audit_t *)btp->bt_bufctl; 1731 umem_slab_t *sp; 1732 1733 if (btp->bt_bxstat != ((intptr_t)bcp ^ UMEM_BUFTAG_ALLOC)) { 1734 if (btp->bt_bxstat == ((intptr_t)bcp ^ UMEM_BUFTAG_FREE)) { 1735 umem_error(UMERR_DUPFREE, cp, buf); 1736 return (-1); 1737 } 1738 sp = umem_findslab(cp, buf); 1739 if (sp == NULL || sp->slab_cache != cp) 1740 umem_error(UMERR_BADADDR, cp, buf); 1741 else 1742 umem_error(UMERR_REDZONE, cp, buf); 1743 return (-1); 1744 } 1745 1746 btp->bt_bxstat = (intptr_t)bcp ^ UMEM_BUFTAG_FREE; 1747 1748 if ((cp->cache_flags & UMF_HASH) && bcp->bc_addr != buf) { 1749 umem_error(UMERR_BADBUFCTL, cp, buf); 1750 return (-1); 1751 } 1752 1753 if (btp->bt_redzone != UMEM_REDZONE_PATTERN) { 1754 umem_error(UMERR_REDZONE, cp, buf); 1755 return (-1); 1756 } 1757 1758 if (cp->cache_flags & UMF_AUDIT) { 1759 if (cp->cache_flags & UMF_CONTENTS) 1760 bcp->bc_contents = umem_log_enter(umem_content_log, 1761 buf, cp->cache_contents); 1762 UMEM_AUDIT(umem_transaction_log, cp, bcp); 1763 } 1764 1765 if (cp->cache_destructor != NULL) 1766 cp->cache_destructor(buf, cp->cache_private); 1767 1768 if (cp->cache_flags & UMF_DEADBEEF) 1769 copy_pattern(UMEM_FREE_PATTERN, buf, cp->cache_verify); 1770 1771 return (0); 1772 } 1773 1774 /* 1775 * Free each object in magazine mp to cp's slab layer, and free mp itself. 1776 */ 1777 static void 1778 umem_magazine_destroy(umem_cache_t *cp, umem_magazine_t *mp, int nrounds) 1779 { 1780 int round; 1781 1782 ASSERT(cp->cache_next == NULL || IN_UPDATE()); 1783 1784 for (round = 0; round < nrounds; round++) { 1785 void *buf = mp->mag_round[round]; 1786 1787 if ((cp->cache_flags & UMF_DEADBEEF) && 1788 verify_pattern(UMEM_FREE_PATTERN, buf, 1789 cp->cache_verify) != NULL) { 1790 umem_error(UMERR_MODIFIED, cp, buf); 1791 continue; 1792 } 1793 1794 if (!(cp->cache_flags & UMF_BUFTAG) && 1795 cp->cache_destructor != NULL) 1796 cp->cache_destructor(buf, cp->cache_private); 1797 1798 umem_slab_free(cp, buf); 1799 } 1800 ASSERT(UMEM_MAGAZINE_VALID(cp, mp)); 1801 _umem_cache_free(cp->cache_magtype->mt_cache, mp); 1802 } 1803 1804 /* 1805 * Allocate a magazine from the depot. 1806 */ 1807 static umem_magazine_t * 1808 umem_depot_alloc(umem_cache_t *cp, umem_maglist_t *mlp) 1809 { 1810 umem_magazine_t *mp; 1811 1812 /* 1813 * If we can't get the depot lock without contention, 1814 * update our contention count. We use the depot 1815 * contention rate to determine whether we need to 1816 * increase the magazine size for better scalability. 1817 */ 1818 if (mutex_trylock(&cp->cache_depot_lock) != 0) { 1819 (void) mutex_lock(&cp->cache_depot_lock); 1820 cp->cache_depot_contention++; 1821 } 1822 1823 if ((mp = mlp->ml_list) != NULL) { 1824 ASSERT(UMEM_MAGAZINE_VALID(cp, mp)); 1825 mlp->ml_list = mp->mag_next; 1826 if (--mlp->ml_total < mlp->ml_min) 1827 mlp->ml_min = mlp->ml_total; 1828 mlp->ml_alloc++; 1829 } 1830 1831 (void) mutex_unlock(&cp->cache_depot_lock); 1832 1833 return (mp); 1834 } 1835 1836 /* 1837 * Free a magazine to the depot. 1838 */ 1839 static void 1840 umem_depot_free(umem_cache_t *cp, umem_maglist_t *mlp, umem_magazine_t *mp) 1841 { 1842 (void) mutex_lock(&cp->cache_depot_lock); 1843 ASSERT(UMEM_MAGAZINE_VALID(cp, mp)); 1844 mp->mag_next = mlp->ml_list; 1845 mlp->ml_list = mp; 1846 mlp->ml_total++; 1847 (void) mutex_unlock(&cp->cache_depot_lock); 1848 } 1849 1850 /* 1851 * Update the working set statistics for cp's depot. 1852 */ 1853 static void 1854 umem_depot_ws_update(umem_cache_t *cp) 1855 { 1856 (void) mutex_lock(&cp->cache_depot_lock); 1857 cp->cache_full.ml_reaplimit = cp->cache_full.ml_min; 1858 cp->cache_full.ml_min = cp->cache_full.ml_total; 1859 cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_min; 1860 cp->cache_empty.ml_min = cp->cache_empty.ml_total; 1861 (void) mutex_unlock(&cp->cache_depot_lock); 1862 } 1863 1864 /* 1865 * Reap all magazines that have fallen out of the depot's working set. 1866 */ 1867 static void 1868 umem_depot_ws_reap(umem_cache_t *cp) 1869 { 1870 long reap; 1871 umem_magazine_t *mp; 1872 1873 ASSERT(cp->cache_next == NULL || IN_REAP()); 1874 1875 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min); 1876 while (reap-- && (mp = umem_depot_alloc(cp, &cp->cache_full)) != NULL) 1877 umem_magazine_destroy(cp, mp, cp->cache_magtype->mt_magsize); 1878 1879 reap = MIN(cp->cache_empty.ml_reaplimit, cp->cache_empty.ml_min); 1880 while (reap-- && (mp = umem_depot_alloc(cp, &cp->cache_empty)) != NULL) 1881 umem_magazine_destroy(cp, mp, 0); 1882 } 1883 1884 static void 1885 umem_cpu_reload(umem_cpu_cache_t *ccp, umem_magazine_t *mp, int rounds) 1886 { 1887 ASSERT((ccp->cc_loaded == NULL && ccp->cc_rounds == -1) || 1888 (ccp->cc_loaded && ccp->cc_rounds + rounds == ccp->cc_magsize)); 1889 ASSERT(ccp->cc_magsize > 0); 1890 1891 ccp->cc_ploaded = ccp->cc_loaded; 1892 ccp->cc_prounds = ccp->cc_rounds; 1893 ccp->cc_loaded = mp; 1894 ccp->cc_rounds = rounds; 1895 } 1896 1897 /* 1898 * Allocate a constructed object from cache cp. 1899 */ 1900 #pragma weak umem_cache_alloc = _umem_cache_alloc 1901 void * 1902 _umem_cache_alloc(umem_cache_t *cp, int umflag) 1903 { 1904 umem_cpu_cache_t *ccp; 1905 umem_magazine_t *fmp; 1906 void *buf; 1907 int flags_nfatal; 1908 1909 retry: 1910 ccp = UMEM_CPU_CACHE(cp, CPU(cp->cache_cpu_mask)); 1911 (void) mutex_lock(&ccp->cc_lock); 1912 for (;;) { 1913 /* 1914 * If there's an object available in the current CPU's 1915 * loaded magazine, just take it and return. 1916 */ 1917 if (ccp->cc_rounds > 0) { 1918 buf = ccp->cc_loaded->mag_round[--ccp->cc_rounds]; 1919 ccp->cc_alloc++; 1920 (void) mutex_unlock(&ccp->cc_lock); 1921 if ((ccp->cc_flags & UMF_BUFTAG) && 1922 umem_cache_alloc_debug(cp, buf, umflag) == -1) { 1923 if (umem_alloc_retry(cp, umflag)) { 1924 goto retry; 1925 } 1926 1927 return (NULL); 1928 } 1929 return (buf); 1930 } 1931 1932 /* 1933 * The loaded magazine is empty. If the previously loaded 1934 * magazine was full, exchange them and try again. 1935 */ 1936 if (ccp->cc_prounds > 0) { 1937 umem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds); 1938 continue; 1939 } 1940 1941 /* 1942 * If the magazine layer is disabled, break out now. 1943 */ 1944 if (ccp->cc_magsize == 0) 1945 break; 1946 1947 /* 1948 * Try to get a full magazine from the depot. 1949 */ 1950 fmp = umem_depot_alloc(cp, &cp->cache_full); 1951 if (fmp != NULL) { 1952 if (ccp->cc_ploaded != NULL) 1953 umem_depot_free(cp, &cp->cache_empty, 1954 ccp->cc_ploaded); 1955 umem_cpu_reload(ccp, fmp, ccp->cc_magsize); 1956 continue; 1957 } 1958 1959 /* 1960 * There are no full magazines in the depot, 1961 * so fall through to the slab layer. 1962 */ 1963 break; 1964 } 1965 (void) mutex_unlock(&ccp->cc_lock); 1966 1967 /* 1968 * We couldn't allocate a constructed object from the magazine layer, 1969 * so get a raw buffer from the slab layer and apply its constructor. 1970 */ 1971 buf = umem_slab_alloc(cp, umflag); 1972 1973 if (buf == NULL) { 1974 if (cp == &umem_null_cache) 1975 return (NULL); 1976 if (umem_alloc_retry(cp, umflag)) { 1977 goto retry; 1978 } 1979 1980 return (NULL); 1981 } 1982 1983 if (cp->cache_flags & UMF_BUFTAG) { 1984 /* 1985 * Let umem_cache_alloc_debug() apply the constructor for us. 1986 */ 1987 if (umem_cache_alloc_debug(cp, buf, umflag) == -1) { 1988 if (umem_alloc_retry(cp, umflag)) { 1989 goto retry; 1990 } 1991 return (NULL); 1992 } 1993 return (buf); 1994 } 1995 1996 /* 1997 * We do not pass fatal flags on to the constructor. This prevents 1998 * leaking buffers in the event of a subordinate constructor failing. 1999 */ 2000 flags_nfatal = UMEM_DEFAULT; 2001 if (cp->cache_constructor != NULL && 2002 cp->cache_constructor(buf, cp->cache_private, flags_nfatal) != 0) { 2003 atomic_add_64(&cp->cache_alloc_fail, 1); 2004 umem_slab_free(cp, buf); 2005 2006 if (umem_alloc_retry(cp, umflag)) { 2007 goto retry; 2008 } 2009 return (NULL); 2010 } 2011 2012 return (buf); 2013 } 2014 2015 /* 2016 * Free a constructed object to cache cp. 2017 */ 2018 #pragma weak umem_cache_free = _umem_cache_free 2019 void 2020 _umem_cache_free(umem_cache_t *cp, void *buf) 2021 { 2022 umem_cpu_cache_t *ccp = UMEM_CPU_CACHE(cp, CPU(cp->cache_cpu_mask)); 2023 umem_magazine_t *emp; 2024 umem_magtype_t *mtp; 2025 2026 if (ccp->cc_flags & UMF_BUFTAG) 2027 if (umem_cache_free_debug(cp, buf) == -1) 2028 return; 2029 2030 (void) mutex_lock(&ccp->cc_lock); 2031 for (;;) { 2032 /* 2033 * If there's a slot available in the current CPU's 2034 * loaded magazine, just put the object there and return. 2035 */ 2036 if ((uint_t)ccp->cc_rounds < ccp->cc_magsize) { 2037 ccp->cc_loaded->mag_round[ccp->cc_rounds++] = buf; 2038 ccp->cc_free++; 2039 (void) mutex_unlock(&ccp->cc_lock); 2040 return; 2041 } 2042 2043 /* 2044 * The loaded magazine is full. If the previously loaded 2045 * magazine was empty, exchange them and try again. 2046 */ 2047 if (ccp->cc_prounds == 0) { 2048 umem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds); 2049 continue; 2050 } 2051 2052 /* 2053 * If the magazine layer is disabled, break out now. 2054 */ 2055 if (ccp->cc_magsize == 0) 2056 break; 2057 2058 /* 2059 * Try to get an empty magazine from the depot. 2060 */ 2061 emp = umem_depot_alloc(cp, &cp->cache_empty); 2062 if (emp != NULL) { 2063 if (ccp->cc_ploaded != NULL) 2064 umem_depot_free(cp, &cp->cache_full, 2065 ccp->cc_ploaded); 2066 umem_cpu_reload(ccp, emp, 0); 2067 continue; 2068 } 2069 2070 /* 2071 * There are no empty magazines in the depot, 2072 * so try to allocate a new one. We must drop all locks 2073 * across umem_cache_alloc() because lower layers may 2074 * attempt to allocate from this cache. 2075 */ 2076 mtp = cp->cache_magtype; 2077 (void) mutex_unlock(&ccp->cc_lock); 2078 emp = _umem_cache_alloc(mtp->mt_cache, UMEM_DEFAULT); 2079 (void) mutex_lock(&ccp->cc_lock); 2080 2081 if (emp != NULL) { 2082 /* 2083 * We successfully allocated an empty magazine. 2084 * However, we had to drop ccp->cc_lock to do it, 2085 * so the cache's magazine size may have changed. 2086 * If so, free the magazine and try again. 2087 */ 2088 if (ccp->cc_magsize != mtp->mt_magsize) { 2089 (void) mutex_unlock(&ccp->cc_lock); 2090 _umem_cache_free(mtp->mt_cache, emp); 2091 (void) mutex_lock(&ccp->cc_lock); 2092 continue; 2093 } 2094 2095 /* 2096 * We got a magazine of the right size. Add it to 2097 * the depot and try the whole dance again. 2098 */ 2099 umem_depot_free(cp, &cp->cache_empty, emp); 2100 continue; 2101 } 2102 2103 /* 2104 * We couldn't allocate an empty magazine, 2105 * so fall through to the slab layer. 2106 */ 2107 break; 2108 } 2109 (void) mutex_unlock(&ccp->cc_lock); 2110 2111 /* 2112 * We couldn't free our constructed object to the magazine layer, 2113 * so apply its destructor and free it to the slab layer. 2114 * Note that if UMF_BUFTAG is in effect, umem_cache_free_debug() 2115 * will have already applied the destructor. 2116 */ 2117 if (!(cp->cache_flags & UMF_BUFTAG) && cp->cache_destructor != NULL) 2118 cp->cache_destructor(buf, cp->cache_private); 2119 2120 umem_slab_free(cp, buf); 2121 } 2122 2123 #pragma weak umem_zalloc = _umem_zalloc 2124 void * 2125 _umem_zalloc(size_t size, int umflag) 2126 { 2127 size_t index = (size - 1) >> UMEM_ALIGN_SHIFT; 2128 void *buf; 2129 2130 retry: 2131 if (index < UMEM_MAXBUF >> UMEM_ALIGN_SHIFT) { 2132 umem_cache_t *cp = umem_alloc_table[index]; 2133 buf = _umem_cache_alloc(cp, umflag); 2134 if (buf != NULL) { 2135 if (cp->cache_flags & UMF_BUFTAG) { 2136 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf); 2137 ((uint8_t *)buf)[size] = UMEM_REDZONE_BYTE; 2138 ((uint32_t *)btp)[1] = UMEM_SIZE_ENCODE(size); 2139 } 2140 bzero(buf, size); 2141 } else if (umem_alloc_retry(cp, umflag)) 2142 goto retry; 2143 } else { 2144 buf = _umem_alloc(size, umflag); /* handles failure */ 2145 if (buf != NULL) 2146 bzero(buf, size); 2147 } 2148 return (buf); 2149 } 2150 2151 #pragma weak umem_alloc = _umem_alloc 2152 void * 2153 _umem_alloc(size_t size, int umflag) 2154 { 2155 size_t index = (size - 1) >> UMEM_ALIGN_SHIFT; 2156 void *buf; 2157 umem_alloc_retry: 2158 if (index < UMEM_MAXBUF >> UMEM_ALIGN_SHIFT) { 2159 umem_cache_t *cp = umem_alloc_table[index]; 2160 buf = _umem_cache_alloc(cp, umflag); 2161 if ((cp->cache_flags & UMF_BUFTAG) && buf != NULL) { 2162 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf); 2163 ((uint8_t *)buf)[size] = UMEM_REDZONE_BYTE; 2164 ((uint32_t *)btp)[1] = UMEM_SIZE_ENCODE(size); 2165 } 2166 if (buf == NULL && umem_alloc_retry(cp, umflag)) 2167 goto umem_alloc_retry; 2168 return (buf); 2169 } 2170 if (size == 0) 2171 return (NULL); 2172 if (umem_oversize_arena == NULL) { 2173 if (umem_init()) 2174 ASSERT(umem_oversize_arena != NULL); 2175 else 2176 return (NULL); 2177 } 2178 buf = vmem_alloc(umem_oversize_arena, size, UMEM_VMFLAGS(umflag)); 2179 if (buf == NULL) { 2180 umem_log_event(umem_failure_log, NULL, NULL, (void *)size); 2181 if (umem_alloc_retry(NULL, umflag)) 2182 goto umem_alloc_retry; 2183 } 2184 return (buf); 2185 } 2186 2187 #pragma weak umem_alloc_align = _umem_alloc_align 2188 void * 2189 _umem_alloc_align(size_t size, size_t align, int umflag) 2190 { 2191 void *buf; 2192 2193 if (size == 0) 2194 return (NULL); 2195 if ((align & (align - 1)) != 0) 2196 return (NULL); 2197 if (align < UMEM_ALIGN) 2198 align = UMEM_ALIGN; 2199 2200 umem_alloc_align_retry: 2201 if (umem_memalign_arena == NULL) { 2202 if (umem_init()) 2203 ASSERT(umem_oversize_arena != NULL); 2204 else 2205 return (NULL); 2206 } 2207 buf = vmem_xalloc(umem_memalign_arena, size, align, 0, 0, NULL, NULL, 2208 UMEM_VMFLAGS(umflag)); 2209 if (buf == NULL) { 2210 umem_log_event(umem_failure_log, NULL, NULL, (void *)size); 2211 if (umem_alloc_retry(NULL, umflag)) 2212 goto umem_alloc_align_retry; 2213 } 2214 return (buf); 2215 } 2216 2217 #pragma weak umem_free = _umem_free 2218 void 2219 _umem_free(void *buf, size_t size) 2220 { 2221 size_t index = (size - 1) >> UMEM_ALIGN_SHIFT; 2222 2223 if (index < UMEM_MAXBUF >> UMEM_ALIGN_SHIFT) { 2224 umem_cache_t *cp = umem_alloc_table[index]; 2225 if (cp->cache_flags & UMF_BUFTAG) { 2226 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf); 2227 uint32_t *ip = (uint32_t *)btp; 2228 if (ip[1] != UMEM_SIZE_ENCODE(size)) { 2229 if (*(uint64_t *)buf == UMEM_FREE_PATTERN) { 2230 umem_error(UMERR_DUPFREE, cp, buf); 2231 return; 2232 } 2233 if (UMEM_SIZE_VALID(ip[1])) { 2234 ip[0] = UMEM_SIZE_ENCODE(size); 2235 umem_error(UMERR_BADSIZE, cp, buf); 2236 } else { 2237 umem_error(UMERR_REDZONE, cp, buf); 2238 } 2239 return; 2240 } 2241 if (((uint8_t *)buf)[size] != UMEM_REDZONE_BYTE) { 2242 umem_error(UMERR_REDZONE, cp, buf); 2243 return; 2244 } 2245 btp->bt_redzone = UMEM_REDZONE_PATTERN; 2246 } 2247 _umem_cache_free(cp, buf); 2248 } else { 2249 if (buf == NULL && size == 0) 2250 return; 2251 vmem_free(umem_oversize_arena, buf, size); 2252 } 2253 } 2254 2255 #pragma weak umem_free_align = _umem_free_align 2256 void 2257 _umem_free_align(void *buf, size_t size) 2258 { 2259 if (buf == NULL && size == 0) 2260 return; 2261 vmem_xfree(umem_memalign_arena, buf, size); 2262 } 2263 2264 static void * 2265 umem_firewall_va_alloc(vmem_t *vmp, size_t size, int vmflag) 2266 { 2267 size_t realsize = size + vmp->vm_quantum; 2268 2269 /* 2270 * Annoying edge case: if 'size' is just shy of ULONG_MAX, adding 2271 * vm_quantum will cause integer wraparound. Check for this, and 2272 * blow off the firewall page in this case. Note that such a 2273 * giant allocation (the entire address space) can never be 2274 * satisfied, so it will either fail immediately (VM_NOSLEEP) 2275 * or sleep forever (VM_SLEEP). Thus, there is no need for a 2276 * corresponding check in umem_firewall_va_free(). 2277 */ 2278 if (realsize < size) 2279 realsize = size; 2280 2281 return (vmem_alloc(vmp, realsize, vmflag | VM_NEXTFIT)); 2282 } 2283 2284 static void 2285 umem_firewall_va_free(vmem_t *vmp, void *addr, size_t size) 2286 { 2287 vmem_free(vmp, addr, size + vmp->vm_quantum); 2288 } 2289 2290 /* 2291 * Reclaim all unused memory from a cache. 2292 */ 2293 static void 2294 umem_cache_reap(umem_cache_t *cp) 2295 { 2296 /* 2297 * Ask the cache's owner to free some memory if possible. 2298 * The idea is to handle things like the inode cache, which 2299 * typically sits on a bunch of memory that it doesn't truly 2300 * *need*. Reclaim policy is entirely up to the owner; this 2301 * callback is just an advisory plea for help. 2302 */ 2303 if (cp->cache_reclaim != NULL) 2304 cp->cache_reclaim(cp->cache_private); 2305 2306 umem_depot_ws_reap(cp); 2307 } 2308 2309 /* 2310 * Purge all magazines from a cache and set its magazine limit to zero. 2311 * All calls are serialized by being done by the update thread, except for 2312 * the final call from umem_cache_destroy(). 2313 */ 2314 static void 2315 umem_cache_magazine_purge(umem_cache_t *cp) 2316 { 2317 umem_cpu_cache_t *ccp; 2318 umem_magazine_t *mp, *pmp; 2319 int rounds, prounds, cpu_seqid; 2320 2321 ASSERT(cp->cache_next == NULL || IN_UPDATE()); 2322 2323 for (cpu_seqid = 0; cpu_seqid < umem_max_ncpus; cpu_seqid++) { 2324 ccp = &cp->cache_cpu[cpu_seqid]; 2325 2326 (void) mutex_lock(&ccp->cc_lock); 2327 mp = ccp->cc_loaded; 2328 pmp = ccp->cc_ploaded; 2329 rounds = ccp->cc_rounds; 2330 prounds = ccp->cc_prounds; 2331 ccp->cc_loaded = NULL; 2332 ccp->cc_ploaded = NULL; 2333 ccp->cc_rounds = -1; 2334 ccp->cc_prounds = -1; 2335 ccp->cc_magsize = 0; 2336 (void) mutex_unlock(&ccp->cc_lock); 2337 2338 if (mp) 2339 umem_magazine_destroy(cp, mp, rounds); 2340 if (pmp) 2341 umem_magazine_destroy(cp, pmp, prounds); 2342 } 2343 2344 /* 2345 * Updating the working set statistics twice in a row has the 2346 * effect of setting the working set size to zero, so everything 2347 * is eligible for reaping. 2348 */ 2349 umem_depot_ws_update(cp); 2350 umem_depot_ws_update(cp); 2351 2352 umem_depot_ws_reap(cp); 2353 } 2354 2355 /* 2356 * Enable per-cpu magazines on a cache. 2357 */ 2358 static void 2359 umem_cache_magazine_enable(umem_cache_t *cp) 2360 { 2361 int cpu_seqid; 2362 2363 if (cp->cache_flags & UMF_NOMAGAZINE) 2364 return; 2365 2366 for (cpu_seqid = 0; cpu_seqid < umem_max_ncpus; cpu_seqid++) { 2367 umem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid]; 2368 (void) mutex_lock(&ccp->cc_lock); 2369 ccp->cc_magsize = cp->cache_magtype->mt_magsize; 2370 (void) mutex_unlock(&ccp->cc_lock); 2371 } 2372 2373 } 2374 2375 /* 2376 * Recompute a cache's magazine size. The trade-off is that larger magazines 2377 * provide a higher transfer rate with the depot, while smaller magazines 2378 * reduce memory consumption. Magazine resizing is an expensive operation; 2379 * it should not be done frequently. 2380 * 2381 * Changes to the magazine size are serialized by only having one thread 2382 * doing updates. (the update thread) 2383 * 2384 * Note: at present this only grows the magazine size. It might be useful 2385 * to allow shrinkage too. 2386 */ 2387 static void 2388 umem_cache_magazine_resize(umem_cache_t *cp) 2389 { 2390 umem_magtype_t *mtp = cp->cache_magtype; 2391 2392 ASSERT(IN_UPDATE()); 2393 2394 if (cp->cache_chunksize < mtp->mt_maxbuf) { 2395 umem_cache_magazine_purge(cp); 2396 (void) mutex_lock(&cp->cache_depot_lock); 2397 cp->cache_magtype = ++mtp; 2398 cp->cache_depot_contention_prev = 2399 cp->cache_depot_contention + INT_MAX; 2400 (void) mutex_unlock(&cp->cache_depot_lock); 2401 umem_cache_magazine_enable(cp); 2402 } 2403 } 2404 2405 /* 2406 * Rescale a cache's hash table, so that the table size is roughly the 2407 * cache size. We want the average lookup time to be extremely small. 2408 */ 2409 static void 2410 umem_hash_rescale(umem_cache_t *cp) 2411 { 2412 umem_bufctl_t **old_table, **new_table, *bcp; 2413 size_t old_size, new_size, h; 2414 2415 ASSERT(IN_UPDATE()); 2416 2417 new_size = MAX(UMEM_HASH_INITIAL, 2418 1 << (highbit(3 * cp->cache_buftotal + 4) - 2)); 2419 old_size = cp->cache_hash_mask + 1; 2420 2421 if ((old_size >> 1) <= new_size && new_size <= (old_size << 1)) 2422 return; 2423 2424 new_table = vmem_alloc(umem_hash_arena, new_size * sizeof (void *), 2425 VM_NOSLEEP); 2426 if (new_table == NULL) 2427 return; 2428 bzero(new_table, new_size * sizeof (void *)); 2429 2430 (void) mutex_lock(&cp->cache_lock); 2431 2432 old_size = cp->cache_hash_mask + 1; 2433 old_table = cp->cache_hash_table; 2434 2435 cp->cache_hash_mask = new_size - 1; 2436 cp->cache_hash_table = new_table; 2437 cp->cache_rescale++; 2438 2439 for (h = 0; h < old_size; h++) { 2440 bcp = old_table[h]; 2441 while (bcp != NULL) { 2442 void *addr = bcp->bc_addr; 2443 umem_bufctl_t *next_bcp = bcp->bc_next; 2444 umem_bufctl_t **hash_bucket = UMEM_HASH(cp, addr); 2445 bcp->bc_next = *hash_bucket; 2446 *hash_bucket = bcp; 2447 bcp = next_bcp; 2448 } 2449 } 2450 2451 (void) mutex_unlock(&cp->cache_lock); 2452 2453 vmem_free(umem_hash_arena, old_table, old_size * sizeof (void *)); 2454 } 2455 2456 /* 2457 * Perform periodic maintenance on a cache: hash rescaling, 2458 * depot working-set update, and magazine resizing. 2459 */ 2460 void 2461 umem_cache_update(umem_cache_t *cp) 2462 { 2463 int update_flags = 0; 2464 2465 ASSERT(MUTEX_HELD(&umem_cache_lock)); 2466 2467 /* 2468 * If the cache has become much larger or smaller than its hash table, 2469 * fire off a request to rescale the hash table. 2470 */ 2471 (void) mutex_lock(&cp->cache_lock); 2472 2473 if ((cp->cache_flags & UMF_HASH) && 2474 (cp->cache_buftotal > (cp->cache_hash_mask << 1) || 2475 (cp->cache_buftotal < (cp->cache_hash_mask >> 1) && 2476 cp->cache_hash_mask > UMEM_HASH_INITIAL))) 2477 update_flags |= UMU_HASH_RESCALE; 2478 2479 (void) mutex_unlock(&cp->cache_lock); 2480 2481 /* 2482 * Update the depot working set statistics. 2483 */ 2484 umem_depot_ws_update(cp); 2485 2486 /* 2487 * If there's a lot of contention in the depot, 2488 * increase the magazine size. 2489 */ 2490 (void) mutex_lock(&cp->cache_depot_lock); 2491 2492 if (cp->cache_chunksize < cp->cache_magtype->mt_maxbuf && 2493 (int)(cp->cache_depot_contention - 2494 cp->cache_depot_contention_prev) > umem_depot_contention) 2495 update_flags |= UMU_MAGAZINE_RESIZE; 2496 2497 cp->cache_depot_contention_prev = cp->cache_depot_contention; 2498 2499 (void) mutex_unlock(&cp->cache_depot_lock); 2500 2501 if (update_flags) 2502 umem_add_update(cp, update_flags); 2503 } 2504 2505 /* 2506 * Runs all pending updates. 2507 * 2508 * The update lock must be held on entrance, and will be held on exit. 2509 */ 2510 void 2511 umem_process_updates(void) 2512 { 2513 ASSERT(MUTEX_HELD(&umem_update_lock)); 2514 2515 while (umem_null_cache.cache_unext != &umem_null_cache) { 2516 int notify = 0; 2517 umem_cache_t *cp = umem_null_cache.cache_unext; 2518 2519 cp->cache_uprev->cache_unext = cp->cache_unext; 2520 cp->cache_unext->cache_uprev = cp->cache_uprev; 2521 cp->cache_uprev = cp->cache_unext = NULL; 2522 2523 ASSERT(!(cp->cache_uflags & UMU_ACTIVE)); 2524 2525 while (cp->cache_uflags) { 2526 int uflags = (cp->cache_uflags |= UMU_ACTIVE); 2527 (void) mutex_unlock(&umem_update_lock); 2528 2529 /* 2530 * The order here is important. Each step can speed up 2531 * later steps. 2532 */ 2533 2534 if (uflags & UMU_HASH_RESCALE) 2535 umem_hash_rescale(cp); 2536 2537 if (uflags & UMU_MAGAZINE_RESIZE) 2538 umem_cache_magazine_resize(cp); 2539 2540 if (uflags & UMU_REAP) 2541 umem_cache_reap(cp); 2542 2543 (void) mutex_lock(&umem_update_lock); 2544 2545 /* 2546 * check if anyone has requested notification 2547 */ 2548 if (cp->cache_uflags & UMU_NOTIFY) { 2549 uflags |= UMU_NOTIFY; 2550 notify = 1; 2551 } 2552 cp->cache_uflags &= ~uflags; 2553 } 2554 if (notify) 2555 (void) cond_broadcast(&umem_update_cv); 2556 } 2557 } 2558 2559 #ifndef UMEM_STANDALONE 2560 static void 2561 umem_st_update(void) 2562 { 2563 ASSERT(MUTEX_HELD(&umem_update_lock)); 2564 ASSERT(umem_update_thr == 0 && umem_st_update_thr == 0); 2565 2566 umem_st_update_thr = thr_self(); 2567 2568 (void) mutex_unlock(&umem_update_lock); 2569 2570 vmem_update(NULL); 2571 umem_cache_applyall(umem_cache_update); 2572 2573 (void) mutex_lock(&umem_update_lock); 2574 2575 umem_process_updates(); /* does all of the requested work */ 2576 2577 umem_reap_next = gethrtime() + 2578 (hrtime_t)umem_reap_interval * NANOSEC; 2579 2580 umem_reaping = UMEM_REAP_DONE; 2581 2582 umem_st_update_thr = 0; 2583 } 2584 #endif 2585 2586 /* 2587 * Reclaim all unused memory from all caches. Called from vmem when memory 2588 * gets tight. Must be called with no locks held. 2589 * 2590 * This just requests a reap on all caches, and notifies the update thread. 2591 */ 2592 void 2593 umem_reap(void) 2594 { 2595 #ifndef UMEM_STANDALONE 2596 extern int __nthreads(void); 2597 #endif 2598 2599 if (umem_ready != UMEM_READY || umem_reaping != UMEM_REAP_DONE || 2600 gethrtime() < umem_reap_next) 2601 return; 2602 2603 (void) mutex_lock(&umem_update_lock); 2604 2605 if (umem_reaping != UMEM_REAP_DONE || gethrtime() < umem_reap_next) { 2606 (void) mutex_unlock(&umem_update_lock); 2607 return; 2608 } 2609 umem_reaping = UMEM_REAP_ADDING; /* lock out other reaps */ 2610 2611 (void) mutex_unlock(&umem_update_lock); 2612 2613 umem_updateall(UMU_REAP); 2614 2615 (void) mutex_lock(&umem_update_lock); 2616 2617 umem_reaping = UMEM_REAP_ACTIVE; 2618 2619 /* Standalone is single-threaded */ 2620 #ifndef UMEM_STANDALONE 2621 if (umem_update_thr == 0) { 2622 /* 2623 * The update thread does not exist. If the process is 2624 * multi-threaded, create it. If not, or the creation fails, 2625 * do the update processing inline. 2626 */ 2627 ASSERT(umem_st_update_thr == 0); 2628 2629 if (__nthreads() <= 1 || umem_create_update_thread() == 0) 2630 umem_st_update(); 2631 } 2632 2633 (void) cond_broadcast(&umem_update_cv); /* wake up the update thread */ 2634 #endif 2635 2636 (void) mutex_unlock(&umem_update_lock); 2637 } 2638 2639 umem_cache_t * 2640 umem_cache_create( 2641 char *name, /* descriptive name for this cache */ 2642 size_t bufsize, /* size of the objects it manages */ 2643 size_t align, /* required object alignment */ 2644 umem_constructor_t *constructor, /* object constructor */ 2645 umem_destructor_t *destructor, /* object destructor */ 2646 umem_reclaim_t *reclaim, /* memory reclaim callback */ 2647 void *private, /* pass-thru arg for constr/destr/reclaim */ 2648 vmem_t *vmp, /* vmem source for slab allocation */ 2649 int cflags) /* cache creation flags */ 2650 { 2651 int cpu_seqid; 2652 size_t chunksize; 2653 umem_cache_t *cp, *cnext, *cprev; 2654 umem_magtype_t *mtp; 2655 size_t csize; 2656 size_t phase; 2657 2658 /* 2659 * The init thread is allowed to create internal and quantum caches. 2660 * 2661 * Other threads must wait until until initialization is complete. 2662 */ 2663 if (umem_init_thr == thr_self()) 2664 ASSERT((cflags & (UMC_INTERNAL | UMC_QCACHE)) != 0); 2665 else { 2666 ASSERT(!(cflags & UMC_INTERNAL)); 2667 if (umem_ready != UMEM_READY && umem_init() == 0) { 2668 errno = EAGAIN; 2669 return (NULL); 2670 } 2671 } 2672 2673 csize = UMEM_CACHE_SIZE(umem_max_ncpus); 2674 phase = P2NPHASE(csize, UMEM_CPU_CACHE_SIZE); 2675 2676 if (vmp == NULL) 2677 vmp = umem_default_arena; 2678 2679 ASSERT(P2PHASE(phase, UMEM_ALIGN) == 0); 2680 2681 /* 2682 * Check that the arguments are reasonable 2683 */ 2684 if ((align & (align - 1)) != 0 || align > vmp->vm_quantum || 2685 ((cflags & UMC_NOHASH) && (cflags & UMC_NOTOUCH)) || 2686 name == NULL || bufsize == 0) { 2687 errno = EINVAL; 2688 return (NULL); 2689 } 2690 2691 /* 2692 * If align == 0, we set it to the minimum required alignment. 2693 * 2694 * If align < UMEM_ALIGN, we round it up to UMEM_ALIGN, unless 2695 * UMC_NOTOUCH was passed. 2696 */ 2697 if (align == 0) { 2698 if (P2ROUNDUP(bufsize, UMEM_ALIGN) >= UMEM_SECOND_ALIGN) 2699 align = UMEM_SECOND_ALIGN; 2700 else 2701 align = UMEM_ALIGN; 2702 } else if (align < UMEM_ALIGN && (cflags & UMC_NOTOUCH) == 0) 2703 align = UMEM_ALIGN; 2704 2705 2706 /* 2707 * Get a umem_cache structure. We arrange that cp->cache_cpu[] 2708 * is aligned on a UMEM_CPU_CACHE_SIZE boundary to prevent 2709 * false sharing of per-CPU data. 2710 */ 2711 cp = vmem_xalloc(umem_cache_arena, csize, UMEM_CPU_CACHE_SIZE, phase, 2712 0, NULL, NULL, VM_NOSLEEP); 2713 2714 if (cp == NULL) { 2715 errno = EAGAIN; 2716 return (NULL); 2717 } 2718 2719 bzero(cp, csize); 2720 2721 (void) mutex_lock(&umem_flags_lock); 2722 if (umem_flags & UMF_RANDOMIZE) 2723 umem_flags = (((umem_flags | ~UMF_RANDOM) + 1) & UMF_RANDOM) | 2724 UMF_RANDOMIZE; 2725 cp->cache_flags = umem_flags | (cflags & UMF_DEBUG); 2726 (void) mutex_unlock(&umem_flags_lock); 2727 2728 /* 2729 * Make sure all the various flags are reasonable. 2730 */ 2731 if (cp->cache_flags & UMF_LITE) { 2732 if (bufsize >= umem_lite_minsize && 2733 align <= umem_lite_maxalign && 2734 P2PHASE(bufsize, umem_lite_maxalign) != 0) { 2735 cp->cache_flags |= UMF_BUFTAG; 2736 cp->cache_flags &= ~(UMF_AUDIT | UMF_FIREWALL); 2737 } else { 2738 cp->cache_flags &= ~UMF_DEBUG; 2739 } 2740 } 2741 2742 if ((cflags & UMC_QCACHE) && (cp->cache_flags & UMF_AUDIT)) 2743 cp->cache_flags |= UMF_NOMAGAZINE; 2744 2745 if (cflags & UMC_NODEBUG) 2746 cp->cache_flags &= ~UMF_DEBUG; 2747 2748 if (cflags & UMC_NOTOUCH) 2749 cp->cache_flags &= ~UMF_TOUCH; 2750 2751 if (cflags & UMC_NOHASH) 2752 cp->cache_flags &= ~(UMF_AUDIT | UMF_FIREWALL); 2753 2754 if (cflags & UMC_NOMAGAZINE) 2755 cp->cache_flags |= UMF_NOMAGAZINE; 2756 2757 if ((cp->cache_flags & UMF_AUDIT) && !(cflags & UMC_NOTOUCH)) 2758 cp->cache_flags |= UMF_REDZONE; 2759 2760 if ((cp->cache_flags & UMF_BUFTAG) && bufsize >= umem_minfirewall && 2761 !(cp->cache_flags & UMF_LITE) && !(cflags & UMC_NOHASH)) 2762 cp->cache_flags |= UMF_FIREWALL; 2763 2764 if (vmp != umem_default_arena || umem_firewall_arena == NULL) 2765 cp->cache_flags &= ~UMF_FIREWALL; 2766 2767 if (cp->cache_flags & UMF_FIREWALL) { 2768 cp->cache_flags &= ~UMF_BUFTAG; 2769 cp->cache_flags |= UMF_NOMAGAZINE; 2770 ASSERT(vmp == umem_default_arena); 2771 vmp = umem_firewall_arena; 2772 } 2773 2774 /* 2775 * Set cache properties. 2776 */ 2777 (void) strncpy(cp->cache_name, name, sizeof (cp->cache_name) - 1); 2778 cp->cache_bufsize = bufsize; 2779 cp->cache_align = align; 2780 cp->cache_constructor = constructor; 2781 cp->cache_destructor = destructor; 2782 cp->cache_reclaim = reclaim; 2783 cp->cache_private = private; 2784 cp->cache_arena = vmp; 2785 cp->cache_cflags = cflags; 2786 cp->cache_cpu_mask = umem_cpu_mask; 2787 2788 /* 2789 * Determine the chunk size. 2790 */ 2791 chunksize = bufsize; 2792 2793 if (align >= UMEM_ALIGN) { 2794 chunksize = P2ROUNDUP(chunksize, UMEM_ALIGN); 2795 cp->cache_bufctl = chunksize - UMEM_ALIGN; 2796 } 2797 2798 if (cp->cache_flags & UMF_BUFTAG) { 2799 cp->cache_bufctl = chunksize; 2800 cp->cache_buftag = chunksize; 2801 chunksize += sizeof (umem_buftag_t); 2802 } 2803 2804 if (cp->cache_flags & UMF_DEADBEEF) { 2805 cp->cache_verify = MIN(cp->cache_buftag, umem_maxverify); 2806 if (cp->cache_flags & UMF_LITE) 2807 cp->cache_verify = MIN(cp->cache_verify, UMEM_ALIGN); 2808 } 2809 2810 cp->cache_contents = MIN(cp->cache_bufctl, umem_content_maxsave); 2811 2812 cp->cache_chunksize = chunksize = P2ROUNDUP(chunksize, align); 2813 2814 if (chunksize < bufsize) { 2815 errno = ENOMEM; 2816 goto fail; 2817 } 2818 2819 /* 2820 * Now that we know the chunk size, determine the optimal slab size. 2821 */ 2822 if (vmp == umem_firewall_arena) { 2823 cp->cache_slabsize = P2ROUNDUP(chunksize, vmp->vm_quantum); 2824 cp->cache_mincolor = cp->cache_slabsize - chunksize; 2825 cp->cache_maxcolor = cp->cache_mincolor; 2826 cp->cache_flags |= UMF_HASH; 2827 ASSERT(!(cp->cache_flags & UMF_BUFTAG)); 2828 } else if ((cflags & UMC_NOHASH) || (!(cflags & UMC_NOTOUCH) && 2829 !(cp->cache_flags & UMF_AUDIT) && 2830 chunksize < vmp->vm_quantum / UMEM_VOID_FRACTION)) { 2831 cp->cache_slabsize = vmp->vm_quantum; 2832 cp->cache_mincolor = 0; 2833 cp->cache_maxcolor = 2834 (cp->cache_slabsize - sizeof (umem_slab_t)) % chunksize; 2835 2836 if (chunksize + sizeof (umem_slab_t) > cp->cache_slabsize) { 2837 errno = EINVAL; 2838 goto fail; 2839 } 2840 ASSERT(!(cp->cache_flags & UMF_AUDIT)); 2841 } else { 2842 size_t chunks, bestfit, waste, slabsize; 2843 size_t minwaste = LONG_MAX; 2844 2845 for (chunks = 1; chunks <= UMEM_VOID_FRACTION; chunks++) { 2846 slabsize = P2ROUNDUP(chunksize * chunks, 2847 vmp->vm_quantum); 2848 /* 2849 * check for overflow 2850 */ 2851 if ((slabsize / chunks) < chunksize) { 2852 errno = ENOMEM; 2853 goto fail; 2854 } 2855 chunks = slabsize / chunksize; 2856 waste = (slabsize % chunksize) / chunks; 2857 if (waste < minwaste) { 2858 minwaste = waste; 2859 bestfit = slabsize; 2860 } 2861 } 2862 if (cflags & UMC_QCACHE) 2863 bestfit = MAX(1 << highbit(3 * vmp->vm_qcache_max), 64); 2864 cp->cache_slabsize = bestfit; 2865 cp->cache_mincolor = 0; 2866 cp->cache_maxcolor = bestfit % chunksize; 2867 cp->cache_flags |= UMF_HASH; 2868 } 2869 2870 if (cp->cache_flags & UMF_HASH) { 2871 ASSERT(!(cflags & UMC_NOHASH)); 2872 cp->cache_bufctl_cache = (cp->cache_flags & UMF_AUDIT) ? 2873 umem_bufctl_audit_cache : umem_bufctl_cache; 2874 } 2875 2876 if (cp->cache_maxcolor >= vmp->vm_quantum) 2877 cp->cache_maxcolor = vmp->vm_quantum - 1; 2878 2879 cp->cache_color = cp->cache_mincolor; 2880 2881 /* 2882 * Initialize the rest of the slab layer. 2883 */ 2884 (void) mutex_init(&cp->cache_lock, USYNC_THREAD, NULL); 2885 2886 cp->cache_freelist = &cp->cache_nullslab; 2887 cp->cache_nullslab.slab_cache = cp; 2888 cp->cache_nullslab.slab_refcnt = -1; 2889 cp->cache_nullslab.slab_next = &cp->cache_nullslab; 2890 cp->cache_nullslab.slab_prev = &cp->cache_nullslab; 2891 2892 if (cp->cache_flags & UMF_HASH) { 2893 cp->cache_hash_table = vmem_alloc(umem_hash_arena, 2894 UMEM_HASH_INITIAL * sizeof (void *), VM_NOSLEEP); 2895 if (cp->cache_hash_table == NULL) { 2896 errno = EAGAIN; 2897 goto fail_lock; 2898 } 2899 bzero(cp->cache_hash_table, 2900 UMEM_HASH_INITIAL * sizeof (void *)); 2901 cp->cache_hash_mask = UMEM_HASH_INITIAL - 1; 2902 cp->cache_hash_shift = highbit((ulong_t)chunksize) - 1; 2903 } 2904 2905 /* 2906 * Initialize the depot. 2907 */ 2908 (void) mutex_init(&cp->cache_depot_lock, USYNC_THREAD, NULL); 2909 2910 for (mtp = umem_magtype; chunksize <= mtp->mt_minbuf; mtp++) 2911 continue; 2912 2913 cp->cache_magtype = mtp; 2914 2915 /* 2916 * Initialize the CPU layer. 2917 */ 2918 for (cpu_seqid = 0; cpu_seqid < umem_max_ncpus; cpu_seqid++) { 2919 umem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid]; 2920 (void) mutex_init(&ccp->cc_lock, USYNC_THREAD, NULL); 2921 ccp->cc_flags = cp->cache_flags; 2922 ccp->cc_rounds = -1; 2923 ccp->cc_prounds = -1; 2924 } 2925 2926 /* 2927 * Add the cache to the global list. This makes it visible 2928 * to umem_update(), so the cache must be ready for business. 2929 */ 2930 (void) mutex_lock(&umem_cache_lock); 2931 cp->cache_next = cnext = &umem_null_cache; 2932 cp->cache_prev = cprev = umem_null_cache.cache_prev; 2933 cnext->cache_prev = cp; 2934 cprev->cache_next = cp; 2935 (void) mutex_unlock(&umem_cache_lock); 2936 2937 if (umem_ready == UMEM_READY) 2938 umem_cache_magazine_enable(cp); 2939 2940 return (cp); 2941 2942 fail_lock: 2943 (void) mutex_destroy(&cp->cache_lock); 2944 fail: 2945 vmem_xfree(umem_cache_arena, cp, csize); 2946 return (NULL); 2947 } 2948 2949 void 2950 umem_cache_destroy(umem_cache_t *cp) 2951 { 2952 int cpu_seqid; 2953 2954 /* 2955 * Remove the cache from the global cache list so that no new updates 2956 * will be scheduled on its behalf, wait for any pending tasks to 2957 * complete, purge the cache, and then destroy it. 2958 */ 2959 (void) mutex_lock(&umem_cache_lock); 2960 cp->cache_prev->cache_next = cp->cache_next; 2961 cp->cache_next->cache_prev = cp->cache_prev; 2962 cp->cache_prev = cp->cache_next = NULL; 2963 (void) mutex_unlock(&umem_cache_lock); 2964 2965 umem_remove_updates(cp); 2966 2967 umem_cache_magazine_purge(cp); 2968 2969 (void) mutex_lock(&cp->cache_lock); 2970 if (cp->cache_buftotal != 0) 2971 log_message("umem_cache_destroy: '%s' (%p) not empty\n", 2972 cp->cache_name, (void *)cp); 2973 cp->cache_reclaim = NULL; 2974 /* 2975 * The cache is now dead. There should be no further activity. 2976 * We enforce this by setting land mines in the constructor and 2977 * destructor routines that induce a segmentation fault if invoked. 2978 */ 2979 cp->cache_constructor = (umem_constructor_t *)1; 2980 cp->cache_destructor = (umem_destructor_t *)2; 2981 (void) mutex_unlock(&cp->cache_lock); 2982 2983 if (cp->cache_hash_table != NULL) 2984 vmem_free(umem_hash_arena, cp->cache_hash_table, 2985 (cp->cache_hash_mask + 1) * sizeof (void *)); 2986 2987 for (cpu_seqid = 0; cpu_seqid < umem_max_ncpus; cpu_seqid++) 2988 (void) mutex_destroy(&cp->cache_cpu[cpu_seqid].cc_lock); 2989 2990 (void) mutex_destroy(&cp->cache_depot_lock); 2991 (void) mutex_destroy(&cp->cache_lock); 2992 2993 vmem_free(umem_cache_arena, cp, UMEM_CACHE_SIZE(umem_max_ncpus)); 2994 } 2995 2996 void 2997 umem_alloc_sizes_clear(void) 2998 { 2999 int i; 3000 3001 umem_alloc_sizes[0] = UMEM_MAXBUF; 3002 for (i = 1; i < NUM_ALLOC_SIZES; i++) 3003 umem_alloc_sizes[i] = 0; 3004 } 3005 3006 void 3007 umem_alloc_sizes_add(size_t size_arg) 3008 { 3009 int i, j; 3010 size_t size = size_arg; 3011 3012 if (size == 0) { 3013 log_message("size_add: cannot add zero-sized cache\n", 3014 size, UMEM_MAXBUF); 3015 return; 3016 } 3017 3018 if (size > UMEM_MAXBUF) { 3019 log_message("size_add: %ld > %d, cannot add\n", size, 3020 UMEM_MAXBUF); 3021 return; 3022 } 3023 3024 if (umem_alloc_sizes[NUM_ALLOC_SIZES - 1] != 0) { 3025 log_message("size_add: no space in alloc_table for %d\n", 3026 size); 3027 return; 3028 } 3029 3030 if (P2PHASE(size, UMEM_ALIGN) != 0) { 3031 size = P2ROUNDUP(size, UMEM_ALIGN); 3032 log_message("size_add: rounding %d up to %d\n", size_arg, 3033 size); 3034 } 3035 3036 for (i = 0; i < NUM_ALLOC_SIZES; i++) { 3037 int cur = umem_alloc_sizes[i]; 3038 if (cur == size) { 3039 log_message("size_add: %ld already in table\n", 3040 size); 3041 return; 3042 } 3043 if (cur > size) 3044 break; 3045 } 3046 3047 for (j = NUM_ALLOC_SIZES - 1; j > i; j--) 3048 umem_alloc_sizes[j] = umem_alloc_sizes[j-1]; 3049 umem_alloc_sizes[i] = size; 3050 } 3051 3052 void 3053 umem_alloc_sizes_remove(size_t size) 3054 { 3055 int i; 3056 3057 if (size == UMEM_MAXBUF) { 3058 log_message("size_remove: cannot remove %ld\n", size); 3059 return; 3060 } 3061 3062 for (i = 0; i < NUM_ALLOC_SIZES; i++) { 3063 int cur = umem_alloc_sizes[i]; 3064 if (cur == size) 3065 break; 3066 else if (cur > size || cur == 0) { 3067 log_message("size_remove: %ld not found in table\n", 3068 size); 3069 return; 3070 } 3071 } 3072 3073 for (; i + 1 < NUM_ALLOC_SIZES; i++) 3074 umem_alloc_sizes[i] = umem_alloc_sizes[i+1]; 3075 umem_alloc_sizes[i] = 0; 3076 } 3077 3078 /* 3079 * We've been called back from libc to indicate that thread is terminating and 3080 * that it needs to release the per-thread memory that it has. We get to know 3081 * which entry in the thread's tmem array the allocation came from. Currently 3082 * this refers to first n umem_caches which makes this a pretty simple indexing 3083 * job. 3084 */ 3085 static void 3086 umem_cache_tmem_cleanup(void *buf, int entry) 3087 { 3088 size_t size; 3089 umem_cache_t *cp; 3090 3091 size = umem_alloc_sizes[entry]; 3092 cp = umem_alloc_table[(size - 1) >> UMEM_ALIGN_SHIFT]; 3093 _umem_cache_free(cp, buf); 3094 } 3095 3096 static int 3097 umem_cache_init(void) 3098 { 3099 int i; 3100 size_t size, max_size; 3101 umem_cache_t *cp; 3102 umem_magtype_t *mtp; 3103 char name[UMEM_CACHE_NAMELEN + 1]; 3104 umem_cache_t *umem_alloc_caches[NUM_ALLOC_SIZES]; 3105 3106 for (i = 0; i < sizeof (umem_magtype) / sizeof (*mtp); i++) { 3107 mtp = &umem_magtype[i]; 3108 (void) snprintf(name, sizeof (name), "umem_magazine_%d", 3109 mtp->mt_magsize); 3110 mtp->mt_cache = umem_cache_create(name, 3111 (mtp->mt_magsize + 1) * sizeof (void *), 3112 mtp->mt_align, NULL, NULL, NULL, NULL, 3113 umem_internal_arena, UMC_NOHASH | UMC_INTERNAL); 3114 if (mtp->mt_cache == NULL) 3115 return (0); 3116 } 3117 3118 umem_slab_cache = umem_cache_create("umem_slab_cache", 3119 sizeof (umem_slab_t), 0, NULL, NULL, NULL, NULL, 3120 umem_internal_arena, UMC_NOHASH | UMC_INTERNAL); 3121 3122 if (umem_slab_cache == NULL) 3123 return (0); 3124 3125 umem_bufctl_cache = umem_cache_create("umem_bufctl_cache", 3126 sizeof (umem_bufctl_t), 0, NULL, NULL, NULL, NULL, 3127 umem_internal_arena, UMC_NOHASH | UMC_INTERNAL); 3128 3129 if (umem_bufctl_cache == NULL) 3130 return (0); 3131 3132 /* 3133 * The size of the umem_bufctl_audit structure depends upon 3134 * umem_stack_depth. See umem_impl.h for details on the size 3135 * restrictions. 3136 */ 3137 3138 size = UMEM_BUFCTL_AUDIT_SIZE_DEPTH(umem_stack_depth); 3139 max_size = UMEM_BUFCTL_AUDIT_MAX_SIZE; 3140 3141 if (size > max_size) { /* too large -- truncate */ 3142 int max_frames = UMEM_MAX_STACK_DEPTH; 3143 3144 ASSERT(UMEM_BUFCTL_AUDIT_SIZE_DEPTH(max_frames) <= max_size); 3145 3146 umem_stack_depth = max_frames; 3147 size = UMEM_BUFCTL_AUDIT_SIZE_DEPTH(umem_stack_depth); 3148 } 3149 3150 umem_bufctl_audit_cache = umem_cache_create("umem_bufctl_audit_cache", 3151 size, 0, NULL, NULL, NULL, NULL, umem_internal_arena, 3152 UMC_NOHASH | UMC_INTERNAL); 3153 3154 if (umem_bufctl_audit_cache == NULL) 3155 return (0); 3156 3157 if (vmem_backend & VMEM_BACKEND_MMAP) 3158 umem_va_arena = vmem_create("umem_va", 3159 NULL, 0, pagesize, 3160 vmem_alloc, vmem_free, heap_arena, 3161 8 * pagesize, VM_NOSLEEP); 3162 else 3163 umem_va_arena = heap_arena; 3164 3165 if (umem_va_arena == NULL) 3166 return (0); 3167 3168 umem_default_arena = vmem_create("umem_default", 3169 NULL, 0, pagesize, 3170 heap_alloc, heap_free, umem_va_arena, 3171 0, VM_NOSLEEP); 3172 3173 if (umem_default_arena == NULL) 3174 return (0); 3175 3176 /* 3177 * make sure the umem_alloc table initializer is correct 3178 */ 3179 i = sizeof (umem_alloc_table) / sizeof (*umem_alloc_table); 3180 ASSERT(umem_alloc_table[i - 1] == &umem_null_cache); 3181 3182 /* 3183 * Create the default caches to back umem_alloc() 3184 */ 3185 for (i = 0; i < NUM_ALLOC_SIZES; i++) { 3186 size_t cache_size = umem_alloc_sizes[i]; 3187 size_t align = 0; 3188 3189 if (cache_size == 0) 3190 break; /* 0 terminates the list */ 3191 3192 /* 3193 * If they allocate a multiple of the coherency granularity, 3194 * they get a coherency-granularity-aligned address. 3195 */ 3196 if (IS_P2ALIGNED(cache_size, 64)) 3197 align = 64; 3198 if (IS_P2ALIGNED(cache_size, pagesize)) 3199 align = pagesize; 3200 (void) snprintf(name, sizeof (name), "umem_alloc_%lu", 3201 (long)cache_size); 3202 3203 cp = umem_cache_create(name, cache_size, align, 3204 NULL, NULL, NULL, NULL, NULL, UMC_INTERNAL); 3205 if (cp == NULL) 3206 return (0); 3207 3208 umem_alloc_caches[i] = cp; 3209 } 3210 3211 umem_tmem_off = _tmem_get_base(); 3212 _tmem_set_cleanup(umem_cache_tmem_cleanup); 3213 3214 if (umem_genasm_supported && !(umem_flags & UMF_DEBUG) && 3215 !(umem_flags & UMF_NOMAGAZINE) && 3216 umem_ptc_size > 0) { 3217 umem_ptc_enabled = umem_genasm(umem_alloc_sizes, 3218 umem_alloc_caches, i) == 0 ? 1 : 0; 3219 } 3220 3221 /* 3222 * Initialization cannot fail at this point. Make the caches 3223 * visible to umem_alloc() and friends. 3224 */ 3225 size = UMEM_ALIGN; 3226 for (i = 0; i < NUM_ALLOC_SIZES; i++) { 3227 size_t cache_size = umem_alloc_sizes[i]; 3228 3229 if (cache_size == 0) 3230 break; /* 0 terminates the list */ 3231 3232 cp = umem_alloc_caches[i]; 3233 3234 while (size <= cache_size) { 3235 umem_alloc_table[(size - 1) >> UMEM_ALIGN_SHIFT] = cp; 3236 size += UMEM_ALIGN; 3237 } 3238 } 3239 ASSERT(size - UMEM_ALIGN == UMEM_MAXBUF); 3240 return (1); 3241 } 3242 3243 /* 3244 * umem_startup() is called early on, and must be called explicitly if we're 3245 * the standalone version. 3246 */ 3247 #ifdef UMEM_STANDALONE 3248 void 3249 #else 3250 #pragma init(umem_startup) 3251 static void 3252 #endif 3253 umem_startup(caddr_t start, size_t len, size_t pagesize, caddr_t minstack, 3254 caddr_t maxstack) 3255 { 3256 #ifdef UMEM_STANDALONE 3257 int idx; 3258 /* Standalone doesn't fork */ 3259 #else 3260 umem_forkhandler_init(); /* register the fork handler */ 3261 #endif 3262 3263 #ifdef __lint 3264 /* make lint happy */ 3265 minstack = maxstack; 3266 #endif 3267 3268 #ifdef UMEM_STANDALONE 3269 umem_ready = UMEM_READY_STARTUP; 3270 umem_init_env_ready = 0; 3271 3272 umem_min_stack = minstack; 3273 umem_max_stack = maxstack; 3274 3275 nofail_callback = NULL; 3276 umem_slab_cache = NULL; 3277 umem_bufctl_cache = NULL; 3278 umem_bufctl_audit_cache = NULL; 3279 heap_arena = NULL; 3280 heap_alloc = NULL; 3281 heap_free = NULL; 3282 umem_internal_arena = NULL; 3283 umem_cache_arena = NULL; 3284 umem_hash_arena = NULL; 3285 umem_log_arena = NULL; 3286 umem_oversize_arena = NULL; 3287 umem_va_arena = NULL; 3288 umem_default_arena = NULL; 3289 umem_firewall_va_arena = NULL; 3290 umem_firewall_arena = NULL; 3291 umem_memalign_arena = NULL; 3292 umem_transaction_log = NULL; 3293 umem_content_log = NULL; 3294 umem_failure_log = NULL; 3295 umem_slab_log = NULL; 3296 umem_cpu_mask = 0; 3297 3298 umem_cpus = &umem_startup_cpu; 3299 umem_startup_cpu.cpu_cache_offset = UMEM_CACHE_SIZE(0); 3300 umem_startup_cpu.cpu_number = 0; 3301 3302 bcopy(&umem_null_cache_template, &umem_null_cache, 3303 sizeof (umem_cache_t)); 3304 3305 for (idx = 0; idx < (UMEM_MAXBUF >> UMEM_ALIGN_SHIFT); idx++) 3306 umem_alloc_table[idx] = &umem_null_cache; 3307 #endif 3308 3309 /* 3310 * Perform initialization specific to the way we've been compiled 3311 * (library or standalone) 3312 */ 3313 umem_type_init(start, len, pagesize); 3314 3315 vmem_startup(); 3316 } 3317 3318 int 3319 umem_init(void) 3320 { 3321 size_t maxverify, minfirewall; 3322 size_t size; 3323 int idx; 3324 umem_cpu_t *new_cpus; 3325 3326 vmem_t *memalign_arena, *oversize_arena; 3327 3328 if (thr_self() != umem_init_thr) { 3329 /* 3330 * The usual case -- non-recursive invocation of umem_init(). 3331 */ 3332 (void) mutex_lock(&umem_init_lock); 3333 if (umem_ready != UMEM_READY_STARTUP) { 3334 /* 3335 * someone else beat us to initializing umem. Wait 3336 * for them to complete, then return. 3337 */ 3338 while (umem_ready == UMEM_READY_INITING) { 3339 int cancel_state; 3340 3341 (void) pthread_setcancelstate( 3342 PTHREAD_CANCEL_DISABLE, &cancel_state); 3343 (void) cond_wait(&umem_init_cv, 3344 &umem_init_lock); 3345 (void) pthread_setcancelstate( 3346 cancel_state, NULL); 3347 } 3348 ASSERT(umem_ready == UMEM_READY || 3349 umem_ready == UMEM_READY_INIT_FAILED); 3350 (void) mutex_unlock(&umem_init_lock); 3351 return (umem_ready == UMEM_READY); 3352 } 3353 3354 ASSERT(umem_ready == UMEM_READY_STARTUP); 3355 ASSERT(umem_init_env_ready == 0); 3356 3357 umem_ready = UMEM_READY_INITING; 3358 umem_init_thr = thr_self(); 3359 3360 (void) mutex_unlock(&umem_init_lock); 3361 umem_setup_envvars(0); /* can recurse -- see below */ 3362 if (umem_init_env_ready) { 3363 /* 3364 * initialization was completed already 3365 */ 3366 ASSERT(umem_ready == UMEM_READY || 3367 umem_ready == UMEM_READY_INIT_FAILED); 3368 ASSERT(umem_init_thr == 0); 3369 return (umem_ready == UMEM_READY); 3370 } 3371 } else if (!umem_init_env_ready) { 3372 /* 3373 * The umem_setup_envvars() call (above) makes calls into 3374 * the dynamic linker and directly into user-supplied code. 3375 * Since we cannot know what that code will do, we could be 3376 * recursively invoked (by, say, a malloc() call in the code 3377 * itself, or in a (C++) _init section it causes to be fired). 3378 * 3379 * This code is where we end up if such recursion occurs. We 3380 * first clean up any partial results in the envvar code, then 3381 * proceed to finish initialization processing in the recursive 3382 * call. The original call will notice this, and return 3383 * immediately. 3384 */ 3385 umem_setup_envvars(1); /* clean up any partial state */ 3386 } else { 3387 umem_panic( 3388 "recursive allocation while initializing umem\n"); 3389 } 3390 umem_init_env_ready = 1; 3391 3392 /* 3393 * From this point until we finish, recursion into umem_init() will 3394 * cause a umem_panic(). 3395 */ 3396 maxverify = minfirewall = ULONG_MAX; 3397 3398 /* LINTED constant condition */ 3399 if (sizeof (umem_cpu_cache_t) != UMEM_CPU_CACHE_SIZE) { 3400 umem_panic("sizeof (umem_cpu_cache_t) = %d, should be %d\n", 3401 sizeof (umem_cpu_cache_t), UMEM_CPU_CACHE_SIZE); 3402 } 3403 3404 umem_max_ncpus = umem_get_max_ncpus(); 3405 3406 /* 3407 * load tunables from environment 3408 */ 3409 umem_process_envvars(); 3410 3411 if (issetugid()) 3412 umem_mtbf = 0; 3413 3414 /* 3415 * set up vmem 3416 */ 3417 if (!(umem_flags & UMF_AUDIT)) 3418 vmem_no_debug(); 3419 3420 heap_arena = vmem_heap_arena(&heap_alloc, &heap_free); 3421 3422 pagesize = heap_arena->vm_quantum; 3423 3424 umem_internal_arena = vmem_create("umem_internal", NULL, 0, pagesize, 3425 heap_alloc, heap_free, heap_arena, 0, VM_NOSLEEP); 3426 3427 umem_default_arena = umem_internal_arena; 3428 3429 if (umem_internal_arena == NULL) 3430 goto fail; 3431 3432 umem_cache_arena = vmem_create("umem_cache", NULL, 0, UMEM_ALIGN, 3433 vmem_alloc, vmem_free, umem_internal_arena, 0, VM_NOSLEEP); 3434 3435 umem_hash_arena = vmem_create("umem_hash", NULL, 0, UMEM_ALIGN, 3436 vmem_alloc, vmem_free, umem_internal_arena, 0, VM_NOSLEEP); 3437 3438 umem_log_arena = vmem_create("umem_log", NULL, 0, UMEM_ALIGN, 3439 heap_alloc, heap_free, heap_arena, 0, VM_NOSLEEP); 3440 3441 umem_firewall_va_arena = vmem_create("umem_firewall_va", 3442 NULL, 0, pagesize, 3443 umem_firewall_va_alloc, umem_firewall_va_free, heap_arena, 3444 0, VM_NOSLEEP); 3445 3446 if (umem_cache_arena == NULL || umem_hash_arena == NULL || 3447 umem_log_arena == NULL || umem_firewall_va_arena == NULL) 3448 goto fail; 3449 3450 umem_firewall_arena = vmem_create("umem_firewall", NULL, 0, pagesize, 3451 heap_alloc, heap_free, umem_firewall_va_arena, 0, 3452 VM_NOSLEEP); 3453 3454 if (umem_firewall_arena == NULL) 3455 goto fail; 3456 3457 oversize_arena = vmem_create("umem_oversize", NULL, 0, pagesize, 3458 heap_alloc, heap_free, minfirewall < ULONG_MAX ? 3459 umem_firewall_va_arena : heap_arena, 0, VM_NOSLEEP); 3460 3461 memalign_arena = vmem_create("umem_memalign", NULL, 0, UMEM_ALIGN, 3462 heap_alloc, heap_free, minfirewall < ULONG_MAX ? 3463 umem_firewall_va_arena : heap_arena, 0, VM_NOSLEEP); 3464 3465 if (oversize_arena == NULL || memalign_arena == NULL) 3466 goto fail; 3467 3468 if (umem_max_ncpus > CPUHINT_MAX()) 3469 umem_max_ncpus = CPUHINT_MAX(); 3470 3471 while ((umem_max_ncpus & (umem_max_ncpus - 1)) != 0) 3472 umem_max_ncpus++; 3473 3474 if (umem_max_ncpus == 0) 3475 umem_max_ncpus = 1; 3476 3477 size = umem_max_ncpus * sizeof (umem_cpu_t); 3478 new_cpus = vmem_alloc(umem_internal_arena, size, VM_NOSLEEP); 3479 if (new_cpus == NULL) 3480 goto fail; 3481 3482 bzero(new_cpus, size); 3483 for (idx = 0; idx < umem_max_ncpus; idx++) { 3484 new_cpus[idx].cpu_number = idx; 3485 new_cpus[idx].cpu_cache_offset = UMEM_CACHE_SIZE(idx); 3486 } 3487 umem_cpus = new_cpus; 3488 umem_cpu_mask = (umem_max_ncpus - 1); 3489 3490 if (umem_maxverify == 0) 3491 umem_maxverify = maxverify; 3492 3493 if (umem_minfirewall == 0) 3494 umem_minfirewall = minfirewall; 3495 3496 /* 3497 * Set up updating and reaping 3498 */ 3499 umem_reap_next = gethrtime() + NANOSEC; 3500 3501 #ifndef UMEM_STANDALONE 3502 (void) gettimeofday(&umem_update_next, NULL); 3503 #endif 3504 3505 /* 3506 * Set up logging -- failure here is okay, since it will just disable 3507 * the logs 3508 */ 3509 if (umem_logging) { 3510 umem_transaction_log = umem_log_init(umem_transaction_log_size); 3511 umem_content_log = umem_log_init(umem_content_log_size); 3512 umem_failure_log = umem_log_init(umem_failure_log_size); 3513 umem_slab_log = umem_log_init(umem_slab_log_size); 3514 } 3515 3516 /* 3517 * Set up caches -- if successful, initialization cannot fail, since 3518 * allocations from other threads can now succeed. 3519 */ 3520 if (umem_cache_init() == 0) { 3521 log_message("unable to create initial caches\n"); 3522 goto fail; 3523 } 3524 umem_oversize_arena = oversize_arena; 3525 umem_memalign_arena = memalign_arena; 3526 3527 umem_cache_applyall(umem_cache_magazine_enable); 3528 3529 /* 3530 * initialization done, ready to go 3531 */ 3532 (void) mutex_lock(&umem_init_lock); 3533 umem_ready = UMEM_READY; 3534 umem_init_thr = 0; 3535 (void) cond_broadcast(&umem_init_cv); 3536 (void) mutex_unlock(&umem_init_lock); 3537 return (1); 3538 3539 fail: 3540 log_message("umem initialization failed\n"); 3541 3542 (void) mutex_lock(&umem_init_lock); 3543 umem_ready = UMEM_READY_INIT_FAILED; 3544 umem_init_thr = 0; 3545 (void) cond_broadcast(&umem_init_cv); 3546 (void) mutex_unlock(&umem_init_lock); 3547 return (0); 3548 } 3549