1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * Copyright (c) 2019 Joyent, Inc.
29 * Copyright (c) 2015 by Delphix. All rights reserved.
30 */
31
32 /*
33 * based on usr/src/uts/common/os/kmem.c r1.64 from 2001/12/18
34 *
35 * The slab allocator, as described in the following two papers:
36 *
37 * Jeff Bonwick,
38 * The Slab Allocator: An Object-Caching Kernel Memory Allocator.
39 * Proceedings of the Summer 1994 Usenix Conference.
40 * Available as /shared/sac/PSARC/1994/028/materials/kmem.pdf.
41 *
42 * Jeff Bonwick and Jonathan Adams,
43 * Magazines and vmem: Extending the Slab Allocator to Many CPUs and
44 * Arbitrary Resources.
45 * Proceedings of the 2001 Usenix Conference.
46 * Available as /shared/sac/PSARC/2000/550/materials/vmem.pdf.
47 *
48 * 1. Overview
49 * -----------
50 * umem is very close to kmem in implementation. There are seven major
51 * areas of divergence:
52 *
53 * * Initialization
54 *
55 * * CPU handling
56 *
57 * * umem_update()
58 *
59 * * KM_SLEEP v.s. UMEM_NOFAIL
60 *
61 * * lock ordering
62 *
63 * * changing UMEM_MAXBUF
64 *
65 * * Per-thread caching for malloc/free
66 *
67 * 2. Initialization
68 * -----------------
69 * kmem is initialized early on in boot, and knows that no one will call
70 * into it before it is ready. umem does not have these luxuries. Instead,
71 * initialization is divided into two phases:
72 *
73 * * library initialization, and
74 *
75 * * first use
76 *
77 * umem's full initialization happens at the time of the first allocation
78 * request (via malloc() and friends, umem_alloc(), or umem_zalloc()),
79 * or the first call to umem_cache_create().
80 *
81 * umem_free(), and umem_cache_alloc() do not require special handling,
82 * since the only way to get valid arguments for them is to successfully
83 * call a function from the first group.
84 *
85 * 2.1. Library Initialization: umem_startup()
86 * -------------------------------------------
87 * umem_startup() is libumem.so's .init section. It calls pthread_atfork()
88 * to install the handlers necessary for umem's Fork1-Safety. Because of
89 * race condition issues, all other pre-umem_init() initialization is done
90 * statically (i.e. by the dynamic linker).
91 *
92 * For standalone use, umem_startup() returns everything to its initial
93 * state.
94 *
95 * 2.2. First use: umem_init()
96 * ------------------------------
97 * The first time any memory allocation function is used, we have to
98 * create the backing caches and vmem arenas which are needed for it.
99 * umem_init() is the central point for that task. When it completes,
100 * umem_ready is either UMEM_READY (all set) or UMEM_READY_INIT_FAILED (unable
101 * to initialize, probably due to lack of memory).
102 *
103 * There are four different paths from which umem_init() is called:
104 *
105 * * from umem_alloc() or umem_zalloc(), with 0 < size < UMEM_MAXBUF,
106 *
107 * * from umem_alloc() or umem_zalloc(), with size > UMEM_MAXBUF,
108 *
109 * * from umem_cache_create(), and
110 *
111 * * from memalign(), with align > UMEM_ALIGN.
112 *
113 * The last three just check if umem is initialized, and call umem_init()
114 * if it is not. For performance reasons, the first case is more complicated.
115 *
116 * 2.2.1. umem_alloc()/umem_zalloc(), with 0 < size < UMEM_MAXBUF
117 * -----------------------------------------------------------------
118 * In this case, umem_cache_alloc(&umem_null_cache, ...) is called.
119 * There is special case code in which causes any allocation on
120 * &umem_null_cache to fail by returning (NULL), regardless of the
121 * flags argument.
122 *
123 * So umem_cache_alloc() returns NULL, and umem_alloc()/umem_zalloc() call
124 * umem_alloc_retry(). umem_alloc_retry() sees that the allocation
125 * was agains &umem_null_cache, and calls umem_init().
126 *
127 * If initialization is successful, umem_alloc_retry() returns 1, which
128 * causes umem_alloc()/umem_zalloc() to start over, which causes it to load
129 * the (now valid) cache pointer from umem_alloc_table.
130 *
131 * 2.2.2. Dealing with race conditions
132 * -----------------------------------
133 * There are a couple race conditions resulting from the initialization
134 * code that we have to guard against:
135 *
136 * * In umem_cache_create(), there is a special UMC_INTERNAL cflag
137 * that is passed for caches created during initialization. It
138 * is illegal for a user to try to create a UMC_INTERNAL cache.
139 * This allows initialization to proceed, but any other
140 * umem_cache_create()s will block by calling umem_init().
141 *
142 * * Since umem_null_cache has a 1-element cache_cpu, it's cache_cpu_mask
143 * is always zero. umem_cache_alloc uses cp->cache_cpu_mask to
144 * mask the cpu number. This prevents a race between grabbing a
145 * cache pointer out of umem_alloc_table and growing the cpu array.
146 *
147 *
148 * 3. CPU handling
149 * ---------------
150 * kmem uses the CPU's sequence number to determine which "cpu cache" to
151 * use for an allocation. Currently, there is no way to get the sequence
152 * number in userspace.
153 *
154 * umem keeps track of cpu information in umem_cpus, an array of umem_max_ncpus
155 * umem_cpu_t structures. CURCPU() is a a "hint" function, which we then mask
156 * with either umem_cpu_mask or cp->cache_cpu_mask to find the actual "cpu" id.
157 * The mechanics of this is all in the CPU(mask) macro.
158 *
159 * Currently, umem uses _lwp_self() as its hint.
160 *
161 *
162 * 4. The update thread
163 * --------------------
164 * kmem uses a task queue, kmem_taskq, to do periodic maintenance on
165 * every kmem cache. vmem has a periodic timeout for hash table resizing.
166 * The kmem_taskq also provides a separate context for kmem_cache_reap()'s
167 * to be done in, avoiding issues of the context of kmem_reap() callers.
168 *
169 * Instead, umem has the concept of "updates", which are asynchronous requests
170 * for work attached to single caches. All caches with pending work are
171 * on a doubly linked list rooted at the umem_null_cache. All update state
172 * is protected by the umem_update_lock mutex, and the umem_update_cv is used
173 * for notification between threads.
174 *
175 * 4.1. Cache states with regards to updates
176 * -----------------------------------------
177 * A given cache is in one of three states:
178 *
179 * Inactive cache_uflags is zero, cache_u{next,prev} are NULL
180 *
181 * Work Requested cache_uflags is non-zero (but UMU_ACTIVE is not set),
182 * cache_u{next,prev} link the cache onto the global
183 * update list
184 *
185 * Active cache_uflags has UMU_ACTIVE set, cache_u{next,prev}
186 * are NULL, and either umem_update_thr or
187 * umem_st_update_thr are actively doing work on the
188 * cache.
189 *
190 * An update can be added to any cache in any state -- if the cache is
191 * Inactive, it transitions to being Work Requested. If the cache is
192 * Active, the worker will notice the new update and act on it before
193 * transitioning the cache to the Inactive state.
194 *
195 * If a cache is in the Active state, UMU_NOTIFY can be set, which asks
196 * the worker to broadcast the umem_update_cv when it has finished.
197 *
198 * 4.2. Update interface
199 * ---------------------
200 * umem_add_update() adds an update to a particular cache.
201 * umem_updateall() adds an update to all caches.
202 * umem_remove_updates() returns a cache to the Inactive state.
203 *
204 * umem_process_updates() process all caches in the Work Requested state.
205 *
206 * 4.3. Reaping
207 * ------------
208 * When umem_reap() is called (at the time of heap growth), it schedule
209 * UMU_REAP updates on every cache. It then checks to see if the update
210 * thread exists (umem_update_thr != 0). If it is, it broadcasts
211 * the umem_update_cv to wake the update thread up, and returns.
212 *
213 * If the update thread does not exist (umem_update_thr == 0), and the
214 * program currently has multiple threads, umem_reap() attempts to create
215 * a new update thread.
216 *
217 * If the process is not multithreaded, or the creation fails, umem_reap()
218 * calls umem_st_update() to do an inline update.
219 *
220 * 4.4. The update thread
221 * ----------------------
222 * The update thread spends most of its time in cond_timedwait() on the
223 * umem_update_cv. It wakes up under two conditions:
224 *
225 * * The timedwait times out, in which case it needs to run a global
226 * update, or
227 *
228 * * someone cond_broadcast(3THR)s the umem_update_cv, in which case
229 * it needs to check if there are any caches in the Work Requested
230 * state.
231 *
232 * When it is time for another global update, umem calls umem_cache_update()
233 * on every cache, then calls vmem_update(), which tunes the vmem structures.
234 * umem_cache_update() can request further work using umem_add_update().
235 *
236 * After any work from the global update completes, the update timer is
237 * reset to umem_reap_interval seconds in the future. This makes the
238 * updates self-throttling.
239 *
240 * Reaps are similarly self-throttling. After a UMU_REAP update has
241 * been scheduled on all caches, umem_reap() sets a flag and wakes up the
242 * update thread. The update thread notices the flag, and resets the
243 * reap state.
244 *
245 * 4.5. Inline updates
246 * -------------------
247 * If the update thread is not running, umem_st_update() is used instead. It
248 * immediately does a global update (as above), then calls
249 * umem_process_updates() to process both the reaps that umem_reap() added and
250 * any work generated by the global update. Afterwards, it resets the reap
251 * state.
252 *
253 * While the umem_st_update() is running, umem_st_update_thr holds the thread
254 * id of the thread performing the update.
255 *
256 * 4.6. Updates and fork1()
257 * ------------------------
258 * umem has fork1() pre- and post-handlers which lock up (and release) every
259 * mutex in every cache. They also lock up the umem_update_lock. Since
260 * fork1() only copies over a single lwp, other threads (including the update
261 * thread) could have been actively using a cache in the parent. This
262 * can lead to inconsistencies in the child process.
263 *
264 * Because we locked all of the mutexes, the only possible inconsistancies are:
265 *
266 * * a umem_cache_alloc() could leak its buffer.
267 *
268 * * a caller of umem_depot_alloc() could leak a magazine, and all the
269 * buffers contained in it.
270 *
271 * * a cache could be in the Active update state. In the child, there
272 * would be no thread actually working on it.
273 *
274 * * a umem_hash_rescale() could leak the new hash table.
275 *
276 * * a umem_magazine_resize() could be in progress.
277 *
278 * * a umem_reap() could be in progress.
279 *
280 * The memory leaks we can't do anything about. umem_release_child() resets
281 * the update state, moves any caches in the Active state to the Work Requested
282 * state. This might cause some updates to be re-run, but UMU_REAP and
283 * UMU_HASH_RESCALE are effectively idempotent, and the worst that can
284 * happen from umem_magazine_resize() is resizing the magazine twice in close
285 * succession.
286 *
287 * Much of the cleanup in umem_release_child() is skipped if
288 * umem_st_update_thr == thr_self(). This is so that applications which call
289 * fork1() from a cache callback does not break. Needless to say, any such
290 * application is tremendously broken.
291 *
292 *
293 * 5. KM_SLEEP v.s. UMEM_NOFAIL
294 * ----------------------------
295 * Allocations against kmem and vmem have two basic modes: SLEEP and
296 * NOSLEEP. A sleeping allocation is will go to sleep (waiting for
297 * more memory) instead of failing (returning NULL).
298 *
299 * SLEEP allocations presume an extremely multithreaded model, with
300 * a lot of allocation and deallocation activity. umem cannot presume
301 * that its clients have any particular type of behavior. Instead,
302 * it provides two types of allocations:
303 *
304 * * UMEM_DEFAULT, equivalent to KM_NOSLEEP (i.e. return NULL on
305 * failure)
306 *
307 * * UMEM_NOFAIL, which, on failure, calls an optional callback
308 * (registered with umem_nofail_callback()).
309 *
310 * The callback is invoked with no locks held, and can do an arbitrary
311 * amount of work. It then has a choice between:
312 *
313 * * Returning UMEM_CALLBACK_RETRY, which will cause the allocation
314 * to be restarted.
315 *
316 * * Returning UMEM_CALLBACK_EXIT(status), which will cause exit(2)
317 * to be invoked with status. If multiple threads attempt to do
318 * this simultaneously, only one will call exit(2).
319 *
320 * * Doing some kind of non-local exit (thr_exit(3thr), longjmp(3C),
321 * etc.)
322 *
323 * The default callback returns UMEM_CALLBACK_EXIT(255).
324 *
325 * To have these callbacks without risk of state corruption (in the case of
326 * a non-local exit), we have to ensure that the callbacks get invoked
327 * close to the original allocation, with no inconsistent state or held
328 * locks. The following steps are taken:
329 *
330 * * All invocations of vmem are VM_NOSLEEP.
331 *
332 * * All constructor callbacks (which can themselves to allocations)
333 * are passed UMEM_DEFAULT as their required allocation argument. This
334 * way, the constructor will fail, allowing the highest-level allocation
335 * invoke the nofail callback.
336 *
337 * If a constructor callback _does_ do a UMEM_NOFAIL allocation, and
338 * the nofail callback does a non-local exit, we will leak the
339 * partially-constructed buffer.
340 *
341 *
342 * 6. Lock Ordering
343 * ----------------
344 * umem has a few more locks than kmem does, mostly in the update path. The
345 * overall lock ordering (earlier locks must be acquired first) is:
346 *
347 * umem_init_lock
348 *
349 * vmem_list_lock
350 * vmem_nosleep_lock.vmpl_mutex
351 * vmem_t's:
352 * vm_lock
353 * sbrk_lock
354 *
355 * umem_cache_lock
356 * umem_update_lock
357 * umem_flags_lock
358 * umem_cache_t's:
359 * cache_cpu[*].cc_lock
360 * cache_depot_lock
361 * cache_lock
362 * umem_log_header_t's:
363 * lh_cpu[*].clh_lock
364 * lh_lock
365 *
366 * 7. Changing UMEM_MAXBUF
367 * -----------------------
368 *
369 * When changing UMEM_MAXBUF extra care has to be taken. It is not sufficient to
370 * simply increase this number. First, one must update the umem_alloc_table to
371 * have the appropriate number of entires based upon the new size. If this is
372 * not done, this will lead to libumem blowing an assertion.
373 *
374 * The second place to update, which is not required, is the umem_alloc_sizes.
375 * These determine the default cache sizes that we're going to support.
376 *
377 * 8. Per-thread caching for malloc/free
378 * -------------------------------------
379 *
380 * "Time is an illusion. Lunchtime doubly so." -- Douglas Adams
381 *
382 * Time may be an illusion, but CPU cycles aren't. While libumem is designed
383 * to be a highly scalable allocator, that scalability comes with a fixed cycle
384 * penalty even in the absence of contention: libumem must acquire (and release
385 * a per-CPU lock for each allocation. When contention is low and malloc(3C)
386 * frequency is high, this overhead can dominate execution time. To alleviate
387 * this, we allow for per-thread caching, a lock-free means of caching recent
388 * deallocations on a per-thread basis for use in satisfying subsequent calls
389 *
390 * In addition to improving performance, we also want to:
391 * * Minimize fragmentation
392 * * Not add additional memory overhead (no larger malloc tags)
393 *
394 * In the ulwp_t of each thread there is a private data structure called a
395 * umem_t that looks like:
396 *
397 * typedef struct {
398 * size_t tm_size;
399 * void *tm_roots[NTMEMBASE]; (Currently 16)
400 * } tmem_t;
401 *
402 * Each of the roots is treated as the head of a linked list. Each entry in the
403 * list can be thought of as a void ** which points to the next entry, until one
404 * of them points to NULL. If the head points to NULL, the list is empty.
405 *
406 * Each head corresponds to a umem_cache. Currently there is a linear mapping
407 * where the first root corresponds to the first cache, second root to the
408 * second cache, etc. This works because every allocation that malloc makes to
409 * umem_alloc that can be satisified by a umem_cache will actually return a
410 * number of bytes equal to the size of that cache. Because of this property and
411 * a one to one mapping between caches and roots we can guarantee that every
412 * entry in a given root's list will be able to satisfy the same requests as the
413 * corresponding cache.
414 *
415 * The choice of sixteen roots is based on where we believe we get the biggest
416 * bang for our buck. The per-thread caches will cache up to 256 byte and 448
417 * byte allocations on ILP32 and LP64 respectively. Generally applications plan
418 * more carefully how they do larger allocations than smaller ones. Therefore
419 * sixteen roots is a reasonable compromise between the amount of additional
420 * overhead per thread, and the likelihood of a program to benefit from it.
421 *
422 * The maximum amount of memory that can be cached in each thread is determined
423 * by the perthread_cache UMEM_OPTION. It corresponds to the umem_ptc_size
424 * value. The default value for this is currently 1 MB. Once umem_init() has
425 * finished this cannot be directly tuned without directly modifying the
426 * instruction text. If, upon calling free(3C), the amount cached would exceed
427 * this maximum, we instead actually return the buffer to the umem_cache instead
428 * of holding onto it in the thread.
429 *
430 * When a thread calls malloc(3C) it first determines which umem_cache it
431 * would be serviced by. If the allocation is not covered by ptcumem it goes to
432 * the normal malloc instead. Next, it checks if the tmem_root's list is empty
433 * or not. If it is empty, we instead go and allocate the memory from
434 * umem_alloc. If it is not empty, we remove the head of the list, set the
435 * appropriate malloc tags, and return that buffer.
436 *
437 * When a thread calls free(3C) it first looks at the malloc tag and if it is
438 * invalid or the allocation exceeds the largest cache in ptcumem and sends it
439 * off to the original free() to handle and clean up appropriately. Next, it
440 * checks if the allocation size is covered by one of the per-thread roots and
441 * if it isn't, it passes it off to the original free() to be released. Finally,
442 * before it inserts this buffer as the head, it checks if adding this buffer
443 * would put the thread over its maximum cache size. If it would, it frees the
444 * buffer back to the umem_cache. Otherwise it increments the threads total
445 * cached amount and makes the buffer the new head of the appropriate tm_root.
446 *
447 * When a thread exits, all of the buffers that it has in its per-thread cache
448 * will be passed to umem_free() and returned to the appropriate umem_cache.
449 *
450 * 8.1 Handling addition and removal of umem_caches
451 * ------------------------------------------------
452 *
453 * The set of umem_caches that are used to back calls to umem_alloc() and
454 * ultimately malloc() are determined at program execution time. The default set
455 * of caches is defined below in umem_alloc_sizes[]. Various umem_options exist
456 * that modify the set of caches: size_add, size_clear, and size_remove. Because
457 * the set of caches can only be determined once umem_init() has been called and
458 * we have the additional goals of minimizing additional fragmentation and
459 * metadata space overhead in the malloc tags, this forces our hand to go down a
460 * slightly different path: the one tread by fasttrap and trapstat.
461 *
462 * During umem_init we're going to dynamically construct a new version of
463 * malloc(3C) and free(3C) that utilizes the known cache sizes and then ensure
464 * that ptcmalloc and ptcfree replace malloc and free as entries in the plt. If
465 * ptcmalloc and ptcfree cannot handle a request, they simply jump to the
466 * original libumem implementations.
467 *
468 * After creating all of the umem_caches, but before making them visible,
469 * umem_cache_init checks that umem_genasm_supported is non-zero. This value is
470 * set by each architecture in $ARCH/umem_genasm.c to indicate whether or not
471 * they support this. If the value is zero, then this process is skipped.
472 * Similarly, if the cache size has been tuned to zero by UMEM_OPTIONS, then
473 * this is also skipped.
474 *
475 * In umem_genasm.c, each architecture's implementation implements a single
476 * function called umem_genasm() that is responsible for generating the
477 * appropriate versions of ptcmalloc() and ptcfree(), placing them in the
478 * appropriate memory location, and finally doing the switch from malloc() and
479 * free() to ptcmalloc() and ptcfree(). Once the change has been made, there is
480 * no way to switch back, short of restarting the program or modifying program
481 * text with mdb.
482 *
483 * 8.2 Modifying the Procedure Linkage Table (PLT)
484 * -----------------------------------------------
485 *
486 * The last piece of this puzzle is how we actually jam ptcmalloc() into the
487 * PLT. To handle this, we have defined two functions, _malloc and _free, we
488 * use a standard #pragma weak for malloc and free and direct them to those
489 * symbols. By default, those symbols have text defined as nops for our
490 * generated functions and when they're invoked, they jump to the default
491 * malloc and free functions.
492 *
493 * When umem_genasm() is called, it makes _malloc and _free writeable and goes
494 * through and updates the text provided for by _malloc and _free just after
495 * the jump. Once both have been successfully generated, umem_genasm() nops
496 * over the original jump so that we now call into the genasm versions of
497 * these functions, and makes the functions read-only once again.
498 *
499 * 8.3 umem_genasm()
500 * -----------------
501 *
502 * umem_genasm() is currently implemented for i386 and amd64. This section
503 * describes the theory behind the construction. For specific byte code to
504 * assembly instructions and niceish C and asm versions of ptcmalloc and
505 * ptcfree, see the individual umem_genasm.c files. The layout consists of the
506 * following sections:
507 *
508 * o. function-specfic prologue
509 * o. function-generic cache-selecting elements
510 * o. function-specific epilogue
511 *
512 * There are three different generic cache elements that exist:
513 *
514 * o. the last or only cache
515 * o. the intermediary caches if more than two
516 * o. the first one if more than one cache
517 *
518 * The malloc and free prologues and epilogues mimic the necessary portions of
519 * libumem's malloc and free. This includes things like checking for size
520 * overflow, setting and verifying the malloc tags.
521 *
522 * It is an important constraint that these functions do not make use of the
523 * call instruction. The only jmp outside of the individual functions is to the
524 * original libumem malloc and free respectively. Because doing things like
525 * setting errno or raising an internal umem error on improper malloc tags would
526 * require using calls into the PLT, whenever we encounter one of those cases we
527 * just jump to the original malloc and free functions reusing the same stack
528 * frame.
529 *
530 * Each of the above sections, the three caches, and the malloc and free
531 * prologue and epilogue are implemented as blocks of machine code with the
532 * corresponding assembly in comments. There are known offsets into each block
533 * that corresponds to locations of data and addresses that we only know at run
534 * time. These blocks are copied as necessary and the blanks filled in
535 * appropriately.
536 *
537 * As mentioned in section 8.2, the trampoline library uses specifically named
538 * variables to communicate the buffers and size to use. These variables are:
539 *
540 * o. umem_genasm_mptr: The buffer for ptcmalloc
541 * o. umem_genasm_msize: The size in bytes of the above buffer
542 * o. umem_genasm_fptr: The buffer for ptcfree
543 * o. umem_genasm_fsize: The size in bytes of the above buffer
544 *
545 * Finally, to enable the generated assembly we need to remove the previous jump
546 * to the actual malloc that exists at the start of these buffers. On x86, this
547 * is a five byte region. We could zero out the jump offset to be a jmp +0, but
548 * using nops can be faster. We specifically use a single five byte nop on x86
549 * as it is faster. When porting ptcumem to other architectures, the various
550 * opcode changes and options should be analyzed.
551 *
552 * 8.4 Interface with libc.so
553 * --------------------------
554 *
555 * The tmem_t structure as described in the beginning of section 8, is part of a
556 * private interface with libc. There are three functions that exist to cover
557 * this. They are not documented in man pages or header files. They are in the
558 * SUNWprivate part of libc's mapfile.
559 *
560 * o. _tmem_get_base(void)
561 *
562 * Returns the offset from the ulwp_t (curthread) to the tmem_t structure.
563 * This is a constant for all threads and is effectively a way to to do
564 * ::offsetof ulwp_t ul_tmem without having to know the specifics of the
565 * structure outside of libc.
566 *
567 * o. _tmem_get_nentries(void)
568 *
569 * Returns the number of roots that exist in the tmem_t. This is one part
570 * of the cap on the number of umem_caches that we can back with tmem.
571 *
572 * o. _tmem_set_cleanup(void (*)(void *, int))
573 *
574 * This sets a clean up handler that gets called back when a thread exits.
575 * There is one call per buffer, the void * is a pointer to the buffer on
576 * the list, the int is the index into the roots array for this buffer.
577 *
578 * 8.5 Tuning and disabling per-thread caching
579 * -------------------------------------------
580 *
581 * There is only one tunable for per-thread caching: the amount of memory each
582 * thread should be able to cache. This is specified via the perthread_cache
583 * UMEM_OPTION option. No attempt is made to to sanity check the specified
584 * value; the limit is simply the maximum value of a size_t.
585 *
586 * If the perthread_cache UMEM_OPTION is set to zero, nomagazines was requested,
587 * or UMEM_DEBUG has been turned on then we will never call into umem_genasm;
588 * however, the trampoline audit library and jump will still be in place.
589 *
590 * 8.6 Observing efficacy of per-thread caching
591 * --------------------------------------------
592 *
593 * To understand the efficacy of per-thread caching, use the ::umastat dcmd
594 * to see the percentage of capacity consumed on a per-thread basis, the
595 * degree to which each umem cache contributes to per-thread cache consumption,
596 * and the number of buffers in per-thread caches on a per-umem cache basis.
597 * If more detail is required, the specific buffers in a per-thread cache can
598 * be iterated over with the umem_ptc_* walkers. (These walkers allow an
599 * optional ulwp_t to be specified to iterate only over a particular thread's
600 * cache.)
601 */
602
603 #include <umem_impl.h>
604 #include <sys/vmem_impl_user.h>
605 #include "umem_base.h"
606 #include "vmem_base.h"
607
608 #include <sys/processor.h>
609 #include <sys/sysmacros.h>
610
611 #include <alloca.h>
612 #include <errno.h>
613 #include <limits.h>
614 #include <stdio.h>
615 #include <stdlib.h>
616 #include <string.h>
617 #include <strings.h>
618 #include <signal.h>
619 #include <unistd.h>
620 #include <atomic.h>
621
622 #include "misc.h"
623
624 #define UMEM_VMFLAGS(umflag) (VM_NOSLEEP)
625
626 size_t pagesize;
627
628 /*
629 * The default set of caches to back umem_alloc().
630 * These sizes should be reevaluated periodically.
631 *
632 * We want allocations that are multiples of the coherency granularity
633 * (64 bytes) to be satisfied from a cache which is a multiple of 64
634 * bytes, so that it will be 64-byte aligned. For all multiples of 64,
635 * the next kmem_cache_size greater than or equal to it must be a
636 * multiple of 64.
637 *
638 * This table must be in sorted order, from smallest to highest. The
639 * highest slot must be UMEM_MAXBUF, and every slot afterwards must be
640 * zero.
641 */
642 static int umem_alloc_sizes[] = {
643 #ifdef _LP64
644 1 * 8,
645 1 * 16,
646 2 * 16,
647 3 * 16,
648 #else
649 1 * 8,
650 2 * 8,
651 3 * 8,
652 4 * 8, 5 * 8, 6 * 8, 7 * 8,
653 #endif
654 4 * 16, 5 * 16, 6 * 16, 7 * 16,
655 4 * 32, 5 * 32, 6 * 32, 7 * 32,
656 4 * 64, 5 * 64, 6 * 64, 7 * 64,
657 4 * 128, 5 * 128, 6 * 128, 7 * 128,
658 P2ALIGN(8192 / 7, 64),
659 P2ALIGN(8192 / 6, 64),
660 P2ALIGN(8192 / 5, 64),
661 P2ALIGN(8192 / 4, 64), 2304,
662 P2ALIGN(8192 / 3, 64),
663 P2ALIGN(8192 / 2, 64), 4544,
664 P2ALIGN(8192 / 1, 64), 9216,
665 4096 * 3,
666 8192 * 2, /* = 8192 * 2 */
667 24576, 32768, 40960, 49152, 57344, 65536, 73728, 81920,
668 90112, 98304, 106496, 114688, 122880, UMEM_MAXBUF, /* 128k */
669 /* 24 slots for user expansion */
670 0, 0, 0, 0, 0, 0, 0, 0,
671 0, 0, 0, 0, 0, 0, 0, 0,
672 0, 0, 0, 0, 0, 0, 0, 0,
673 };
674 #define NUM_ALLOC_SIZES (sizeof (umem_alloc_sizes) / sizeof (*umem_alloc_sizes))
675
676 static umem_magtype_t umem_magtype[] = {
677 { 1, 8, 3200, 65536 },
678 { 3, 16, 256, 32768 },
679 { 7, 32, 64, 16384 },
680 { 15, 64, 0, 8192 },
681 { 31, 64, 0, 4096 },
682 { 47, 64, 0, 2048 },
683 { 63, 64, 0, 1024 },
684 { 95, 64, 0, 512 },
685 { 143, 64, 0, 0 },
686 };
687
688 /*
689 * umem tunables
690 */
691 uint32_t umem_max_ncpus; /* # of CPU caches. */
692
693 uint32_t umem_stack_depth = 15; /* # stack frames in a bufctl_audit */
694 uint32_t umem_reap_interval = 10; /* max reaping rate (seconds) */
695 uint_t umem_depot_contention = 2; /* max failed trylocks per real interval */
696 uint_t umem_abort = 1; /* whether to abort on error */
697 uint_t umem_output = 0; /* whether to write to standard error */
698 uint_t umem_logging = 0; /* umem_log_enter() override */
699 uint32_t umem_mtbf = 0; /* mean time between failures [default: off] */
700 size_t umem_transaction_log_size; /* size of transaction log */
701 size_t umem_content_log_size; /* size of content log */
702 size_t umem_failure_log_size; /* failure log [4 pages per CPU] */
703 size_t umem_slab_log_size; /* slab create log [4 pages per CPU] */
704 size_t umem_content_maxsave = 256; /* UMF_CONTENTS max bytes to log */
705 size_t umem_lite_minsize = 0; /* minimum buffer size for UMF_LITE */
706 size_t umem_lite_maxalign = 1024; /* maximum buffer alignment for UMF_LITE */
707 size_t umem_maxverify; /* maximum bytes to inspect in debug routines */
708 size_t umem_minfirewall; /* hardware-enforced redzone threshold */
709 size_t umem_ptc_size = 1048576; /* size of per-thread cache (in bytes) */
710
711 uint_t umem_flags = 0;
712 uintptr_t umem_tmem_off;
713
714 mutex_t umem_init_lock; /* locks initialization */
715 cond_t umem_init_cv; /* initialization CV */
716 thread_t umem_init_thr; /* thread initializing */
717 int umem_init_env_ready; /* environ pre-initted */
718 int umem_ready = UMEM_READY_STARTUP;
719
720 int umem_ptc_enabled; /* per-thread caching enabled */
721
722 static umem_nofail_callback_t *nofail_callback;
723 static mutex_t umem_nofail_exit_lock;
724 static thread_t umem_nofail_exit_thr;
725
726 static umem_cache_t *umem_slab_cache;
727 static umem_cache_t *umem_bufctl_cache;
728 static umem_cache_t *umem_bufctl_audit_cache;
729
730 mutex_t umem_flags_lock;
731
732 static vmem_t *heap_arena;
733 static vmem_alloc_t *heap_alloc;
734 static vmem_free_t *heap_free;
735
736 static vmem_t *umem_internal_arena;
737 static vmem_t *umem_cache_arena;
738 static vmem_t *umem_hash_arena;
739 static vmem_t *umem_log_arena;
740 static vmem_t *umem_oversize_arena;
741 static vmem_t *umem_va_arena;
742 static vmem_t *umem_default_arena;
743 static vmem_t *umem_firewall_va_arena;
744 static vmem_t *umem_firewall_arena;
745
746 vmem_t *umem_memalign_arena;
747
748 umem_log_header_t *umem_transaction_log;
749 umem_log_header_t *umem_content_log;
750 umem_log_header_t *umem_failure_log;
751 umem_log_header_t *umem_slab_log;
752
753 #define CPUHINT() (thr_self())
754 #define CPUHINT_MAX() INT_MAX
755
756 #define CPU(mask) (umem_cpus + (CPUHINT() & (mask)))
757 static umem_cpu_t umem_startup_cpu = { /* initial, single, cpu */
758 UMEM_CACHE_SIZE(0),
759 0
760 };
761
762 static uint32_t umem_cpu_mask = 0; /* global cpu mask */
763 static umem_cpu_t *umem_cpus = &umem_startup_cpu; /* cpu list */
764
765 volatile uint32_t umem_reaping;
766
767 thread_t umem_update_thr;
768 struct timeval umem_update_next; /* timeofday of next update */
769 volatile thread_t umem_st_update_thr; /* only used when single-thd */
770
771 #define IN_UPDATE() (thr_self() == umem_update_thr || \
772 thr_self() == umem_st_update_thr)
773 #define IN_REAP() IN_UPDATE()
774
775 mutex_t umem_update_lock; /* cache_u{next,prev,flags} */
776 cond_t umem_update_cv;
777
778 volatile hrtime_t umem_reap_next; /* min hrtime of next reap */
779
780 mutex_t umem_cache_lock; /* inter-cache linkage only */
781
782 #ifdef UMEM_STANDALONE
783 umem_cache_t umem_null_cache;
784 static const umem_cache_t umem_null_cache_template = {
785 #else
786 umem_cache_t umem_null_cache = {
787 #endif
788 0, 0, 0, 0, 0,
789 0, 0,
790 0, 0,
791 0, 0,
792 "invalid_cache",
793 0, 0,
794 NULL, NULL, NULL, NULL,
795 NULL,
796 0, 0, 0, 0,
797 &umem_null_cache, &umem_null_cache,
798 &umem_null_cache, &umem_null_cache,
799 0,
800 DEFAULTMUTEX, /* start of slab layer */
801 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
802 &umem_null_cache.cache_nullslab,
803 {
804 &umem_null_cache,
805 NULL,
806 &umem_null_cache.cache_nullslab,
807 &umem_null_cache.cache_nullslab,
808 NULL,
809 -1,
810 0
811 },
812 NULL,
813 NULL,
814 DEFAULTMUTEX, /* start of depot layer */
815 NULL, {
816 NULL, 0, 0, 0, 0
817 }, {
818 NULL, 0, 0, 0, 0
819 }, {
820 {
821 DEFAULTMUTEX, /* start of CPU cache */
822 0, 0, NULL, NULL, -1, -1, 0
823 }
824 }
825 };
826
827 #define ALLOC_TABLE_4 \
828 &umem_null_cache, &umem_null_cache, &umem_null_cache, &umem_null_cache
829
830 #define ALLOC_TABLE_64 \
831 ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, \
832 ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, \
833 ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, \
834 ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4
835
836 #define ALLOC_TABLE_1024 \
837 ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, \
838 ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, \
839 ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, \
840 ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64
841
842 static umem_cache_t *umem_alloc_table[UMEM_MAXBUF >> UMEM_ALIGN_SHIFT] = {
843 ALLOC_TABLE_1024,
844 ALLOC_TABLE_1024,
845 ALLOC_TABLE_1024,
846 ALLOC_TABLE_1024,
847 ALLOC_TABLE_1024,
848 ALLOC_TABLE_1024,
849 ALLOC_TABLE_1024,
850 ALLOC_TABLE_1024,
851 ALLOC_TABLE_1024,
852 ALLOC_TABLE_1024,
853 ALLOC_TABLE_1024,
854 ALLOC_TABLE_1024,
855 ALLOC_TABLE_1024,
856 ALLOC_TABLE_1024,
857 ALLOC_TABLE_1024,
858 ALLOC_TABLE_1024
859 };
860
861
862 /* Used to constrain audit-log stack traces */
863 caddr_t umem_min_stack;
864 caddr_t umem_max_stack;
865
866
867 #define UMERR_MODIFIED 0 /* buffer modified while on freelist */
868 #define UMERR_REDZONE 1 /* redzone violation (write past end of buf) */
869 #define UMERR_DUPFREE 2 /* freed a buffer twice */
870 #define UMERR_BADADDR 3 /* freed a bad (unallocated) address */
871 #define UMERR_BADBUFTAG 4 /* buftag corrupted */
872 #define UMERR_BADBUFCTL 5 /* bufctl corrupted */
873 #define UMERR_BADCACHE 6 /* freed a buffer to the wrong cache */
874 #define UMERR_BADSIZE 7 /* alloc size != free size */
875 #define UMERR_BADBASE 8 /* buffer base address wrong */
876
877 struct {
878 hrtime_t ump_timestamp; /* timestamp of error */
879 int ump_error; /* type of umem error (UMERR_*) */
880 void *ump_buffer; /* buffer that induced abort */
881 void *ump_realbuf; /* real start address for buffer */
882 umem_cache_t *ump_cache; /* buffer's cache according to client */
883 umem_cache_t *ump_realcache; /* actual cache containing buffer */
884 umem_slab_t *ump_slab; /* slab accoring to umem_findslab() */
885 umem_bufctl_t *ump_bufctl; /* bufctl */
886 } umem_abort_info;
887
888 static void
copy_pattern(uint64_t pattern,void * buf_arg,size_t size)889 copy_pattern(uint64_t pattern, void *buf_arg, size_t size)
890 {
891 uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
892 uint64_t *buf = buf_arg;
893
894 while (buf < bufend)
895 *buf++ = pattern;
896 }
897
898 static void *
verify_pattern(uint64_t pattern,void * buf_arg,size_t size)899 verify_pattern(uint64_t pattern, void *buf_arg, size_t size)
900 {
901 uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
902 uint64_t *buf;
903
904 for (buf = buf_arg; buf < bufend; buf++)
905 if (*buf != pattern)
906 return (buf);
907 return (NULL);
908 }
909
910 static void *
verify_and_copy_pattern(uint64_t old,uint64_t new,void * buf_arg,size_t size)911 verify_and_copy_pattern(uint64_t old, uint64_t new, void *buf_arg, size_t size)
912 {
913 uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
914 uint64_t *buf;
915
916 for (buf = buf_arg; buf < bufend; buf++) {
917 if (*buf != old) {
918 copy_pattern(old, buf_arg,
919 (char *)buf - (char *)buf_arg);
920 return (buf);
921 }
922 *buf = new;
923 }
924
925 return (NULL);
926 }
927
928 void
umem_cache_applyall(void (* func)(umem_cache_t *))929 umem_cache_applyall(void (*func)(umem_cache_t *))
930 {
931 umem_cache_t *cp;
932
933 (void) mutex_lock(&umem_cache_lock);
934 for (cp = umem_null_cache.cache_next; cp != &umem_null_cache;
935 cp = cp->cache_next)
936 func(cp);
937 (void) mutex_unlock(&umem_cache_lock);
938 }
939
940 static void
umem_add_update_unlocked(umem_cache_t * cp,int flags)941 umem_add_update_unlocked(umem_cache_t *cp, int flags)
942 {
943 umem_cache_t *cnext, *cprev;
944
945 flags &= ~UMU_ACTIVE;
946
947 if (!flags)
948 return;
949
950 if (cp->cache_uflags & UMU_ACTIVE) {
951 cp->cache_uflags |= flags;
952 } else {
953 if (cp->cache_unext != NULL) {
954 ASSERT(cp->cache_uflags != 0);
955 cp->cache_uflags |= flags;
956 } else {
957 ASSERT(cp->cache_uflags == 0);
958 cp->cache_uflags = flags;
959 cp->cache_unext = cnext = &umem_null_cache;
960 cp->cache_uprev = cprev = umem_null_cache.cache_uprev;
961 cnext->cache_uprev = cp;
962 cprev->cache_unext = cp;
963 }
964 }
965 }
966
967 static void
umem_add_update(umem_cache_t * cp,int flags)968 umem_add_update(umem_cache_t *cp, int flags)
969 {
970 (void) mutex_lock(&umem_update_lock);
971
972 umem_add_update_unlocked(cp, flags);
973
974 if (!IN_UPDATE())
975 (void) cond_broadcast(&umem_update_cv);
976
977 (void) mutex_unlock(&umem_update_lock);
978 }
979
980 /*
981 * Remove a cache from the update list, waiting for any in-progress work to
982 * complete first.
983 */
984 static void
umem_remove_updates(umem_cache_t * cp)985 umem_remove_updates(umem_cache_t *cp)
986 {
987 (void) mutex_lock(&umem_update_lock);
988
989 /*
990 * Get it out of the active state
991 */
992 while (cp->cache_uflags & UMU_ACTIVE) {
993 int cancel_state;
994
995 ASSERT(cp->cache_unext == NULL);
996
997 cp->cache_uflags |= UMU_NOTIFY;
998
999 /*
1000 * Make sure the update state is sane, before we wait
1001 */
1002 ASSERT(umem_update_thr != 0 || umem_st_update_thr != 0);
1003 ASSERT(umem_update_thr != thr_self() &&
1004 umem_st_update_thr != thr_self());
1005
1006 (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE,
1007 &cancel_state);
1008 (void) cond_wait(&umem_update_cv, &umem_update_lock);
1009 (void) pthread_setcancelstate(cancel_state, NULL);
1010 }
1011 /*
1012 * Get it out of the Work Requested state
1013 */
1014 if (cp->cache_unext != NULL) {
1015 cp->cache_uprev->cache_unext = cp->cache_unext;
1016 cp->cache_unext->cache_uprev = cp->cache_uprev;
1017 cp->cache_uprev = cp->cache_unext = NULL;
1018 cp->cache_uflags = 0;
1019 }
1020 /*
1021 * Make sure it is in the Inactive state
1022 */
1023 ASSERT(cp->cache_unext == NULL && cp->cache_uflags == 0);
1024 (void) mutex_unlock(&umem_update_lock);
1025 }
1026
1027 static void
umem_updateall(int flags)1028 umem_updateall(int flags)
1029 {
1030 umem_cache_t *cp;
1031
1032 /*
1033 * NOTE: To prevent deadlock, umem_cache_lock is always acquired first.
1034 *
1035 * (umem_add_update is called from things run via umem_cache_applyall)
1036 */
1037 (void) mutex_lock(&umem_cache_lock);
1038 (void) mutex_lock(&umem_update_lock);
1039
1040 for (cp = umem_null_cache.cache_next; cp != &umem_null_cache;
1041 cp = cp->cache_next)
1042 umem_add_update_unlocked(cp, flags);
1043
1044 if (!IN_UPDATE())
1045 (void) cond_broadcast(&umem_update_cv);
1046
1047 (void) mutex_unlock(&umem_update_lock);
1048 (void) mutex_unlock(&umem_cache_lock);
1049 }
1050
1051 /*
1052 * Debugging support. Given a buffer address, find its slab.
1053 */
1054 static umem_slab_t *
umem_findslab(umem_cache_t * cp,void * buf)1055 umem_findslab(umem_cache_t *cp, void *buf)
1056 {
1057 umem_slab_t *sp;
1058
1059 (void) mutex_lock(&cp->cache_lock);
1060 for (sp = cp->cache_nullslab.slab_next;
1061 sp != &cp->cache_nullslab; sp = sp->slab_next) {
1062 if (UMEM_SLAB_MEMBER(sp, buf)) {
1063 (void) mutex_unlock(&cp->cache_lock);
1064 return (sp);
1065 }
1066 }
1067 (void) mutex_unlock(&cp->cache_lock);
1068
1069 return (NULL);
1070 }
1071
1072 static void
umem_error(int error,umem_cache_t * cparg,void * bufarg)1073 umem_error(int error, umem_cache_t *cparg, void *bufarg)
1074 {
1075 umem_buftag_t *btp = NULL;
1076 umem_bufctl_t *bcp = NULL;
1077 umem_cache_t *cp = cparg;
1078 umem_slab_t *sp;
1079 uint64_t *off;
1080 void *buf = bufarg;
1081
1082 int old_logging = umem_logging;
1083
1084 umem_logging = 0; /* stop logging when a bad thing happens */
1085
1086 umem_abort_info.ump_timestamp = gethrtime();
1087
1088 sp = umem_findslab(cp, buf);
1089 if (sp == NULL) {
1090 for (cp = umem_null_cache.cache_prev; cp != &umem_null_cache;
1091 cp = cp->cache_prev) {
1092 if ((sp = umem_findslab(cp, buf)) != NULL)
1093 break;
1094 }
1095 }
1096
1097 if (sp == NULL) {
1098 cp = NULL;
1099 error = UMERR_BADADDR;
1100 } else {
1101 if (cp != cparg)
1102 error = UMERR_BADCACHE;
1103 else
1104 buf = (char *)bufarg - ((uintptr_t)bufarg -
1105 (uintptr_t)sp->slab_base) % cp->cache_chunksize;
1106 if (buf != bufarg)
1107 error = UMERR_BADBASE;
1108 if (cp->cache_flags & UMF_BUFTAG)
1109 btp = UMEM_BUFTAG(cp, buf);
1110 if (cp->cache_flags & UMF_HASH) {
1111 (void) mutex_lock(&cp->cache_lock);
1112 for (bcp = *UMEM_HASH(cp, buf); bcp; bcp = bcp->bc_next)
1113 if (bcp->bc_addr == buf)
1114 break;
1115 (void) mutex_unlock(&cp->cache_lock);
1116 if (bcp == NULL && btp != NULL)
1117 bcp = btp->bt_bufctl;
1118 if (umem_findslab(cp->cache_bufctl_cache, bcp) ==
1119 NULL || P2PHASE((uintptr_t)bcp, UMEM_ALIGN) ||
1120 bcp->bc_addr != buf) {
1121 error = UMERR_BADBUFCTL;
1122 bcp = NULL;
1123 }
1124 }
1125 }
1126
1127 umem_abort_info.ump_error = error;
1128 umem_abort_info.ump_buffer = bufarg;
1129 umem_abort_info.ump_realbuf = buf;
1130 umem_abort_info.ump_cache = cparg;
1131 umem_abort_info.ump_realcache = cp;
1132 umem_abort_info.ump_slab = sp;
1133 umem_abort_info.ump_bufctl = bcp;
1134
1135 umem_printf("umem allocator: ");
1136
1137 switch (error) {
1138
1139 case UMERR_MODIFIED:
1140 umem_printf("buffer modified after being freed\n");
1141 off = verify_pattern(UMEM_FREE_PATTERN, buf, cp->cache_verify);
1142 if (off == NULL) /* shouldn't happen */
1143 off = buf;
1144 umem_printf("modification occurred at offset 0x%lx "
1145 "(0x%llx replaced by 0x%llx)\n",
1146 (uintptr_t)off - (uintptr_t)buf,
1147 (longlong_t)UMEM_FREE_PATTERN, (longlong_t)*off);
1148 break;
1149
1150 case UMERR_REDZONE:
1151 umem_printf("redzone violation: write past end of buffer\n");
1152 break;
1153
1154 case UMERR_BADADDR:
1155 umem_printf("invalid free: buffer not in cache\n");
1156 break;
1157
1158 case UMERR_DUPFREE:
1159 umem_printf("duplicate free: buffer freed twice\n");
1160 break;
1161
1162 case UMERR_BADBUFTAG:
1163 umem_printf("boundary tag corrupted\n");
1164 umem_printf("bcp ^ bxstat = %lx, should be %lx\n",
1165 (intptr_t)btp->bt_bufctl ^ btp->bt_bxstat,
1166 UMEM_BUFTAG_FREE);
1167 break;
1168
1169 case UMERR_BADBUFCTL:
1170 umem_printf("bufctl corrupted\n");
1171 break;
1172
1173 case UMERR_BADCACHE:
1174 umem_printf("buffer freed to wrong cache\n");
1175 umem_printf("buffer was allocated from %s,\n", cp->cache_name);
1176 umem_printf("caller attempting free to %s.\n",
1177 cparg->cache_name);
1178 break;
1179
1180 case UMERR_BADSIZE:
1181 umem_printf("bad free: free size (%u) != alloc size (%u)\n",
1182 UMEM_SIZE_DECODE(((uint32_t *)btp)[0]),
1183 UMEM_SIZE_DECODE(((uint32_t *)btp)[1]));
1184 break;
1185
1186 case UMERR_BADBASE:
1187 umem_printf("bad free: free address (%p) != alloc address "
1188 "(%p)\n", bufarg, buf);
1189 break;
1190 }
1191
1192 umem_printf("buffer=%p bufctl=%p cache: %s\n",
1193 bufarg, (void *)bcp, cparg->cache_name);
1194
1195 if (bcp != NULL && (cp->cache_flags & UMF_AUDIT) &&
1196 error != UMERR_BADBUFCTL) {
1197 int d;
1198 timespec_t ts;
1199 hrtime_t diff;
1200 umem_bufctl_audit_t *bcap = (umem_bufctl_audit_t *)bcp;
1201
1202 diff = umem_abort_info.ump_timestamp - bcap->bc_timestamp;
1203 ts.tv_sec = diff / NANOSEC;
1204 ts.tv_nsec = diff % NANOSEC;
1205
1206 umem_printf("previous transaction on buffer %p:\n", buf);
1207 umem_printf("thread=%p time=T-%ld.%09ld slab=%p cache: %s\n",
1208 (void *)(intptr_t)bcap->bc_thread, ts.tv_sec, ts.tv_nsec,
1209 (void *)sp, cp->cache_name);
1210 for (d = 0; d < MIN(bcap->bc_depth, umem_stack_depth); d++) {
1211 (void) print_sym((void *)bcap->bc_stack[d]);
1212 umem_printf("\n");
1213 }
1214 }
1215
1216 umem_err_recoverable("umem: heap corruption detected");
1217
1218 umem_logging = old_logging; /* resume logging */
1219 }
1220
1221 void
umem_nofail_callback(umem_nofail_callback_t * cb)1222 umem_nofail_callback(umem_nofail_callback_t *cb)
1223 {
1224 nofail_callback = cb;
1225 }
1226
1227 static int
umem_alloc_retry(umem_cache_t * cp,int umflag)1228 umem_alloc_retry(umem_cache_t *cp, int umflag)
1229 {
1230 if (cp == &umem_null_cache) {
1231 if (umem_init())
1232 return (1); /* retry */
1233 /*
1234 * Initialization failed. Do normal failure processing.
1235 */
1236 }
1237 if (umem_flags & UMF_CHECKNULL) {
1238 umem_err_recoverable("umem: out of heap space");
1239 }
1240 if (umflag & UMEM_NOFAIL) {
1241 int def_result = UMEM_CALLBACK_EXIT(255);
1242 int result = def_result;
1243 umem_nofail_callback_t *callback = nofail_callback;
1244
1245 if (callback != NULL)
1246 result = callback();
1247
1248 if (result == UMEM_CALLBACK_RETRY)
1249 return (1);
1250
1251 if ((result & ~0xFF) != UMEM_CALLBACK_EXIT(0)) {
1252 log_message("nofail callback returned %x\n", result);
1253 result = def_result;
1254 }
1255
1256 /*
1257 * only one thread will call exit
1258 */
1259 if (umem_nofail_exit_thr == thr_self())
1260 umem_panic("recursive UMEM_CALLBACK_EXIT()\n");
1261
1262 (void) mutex_lock(&umem_nofail_exit_lock);
1263 umem_nofail_exit_thr = thr_self();
1264 exit(result & 0xFF);
1265 /*NOTREACHED*/
1266 }
1267 return (0);
1268 }
1269
1270 static umem_log_header_t *
umem_log_init(size_t logsize)1271 umem_log_init(size_t logsize)
1272 {
1273 umem_log_header_t *lhp;
1274 int nchunks = 4 * umem_max_ncpus;
1275 size_t lhsize = offsetof(umem_log_header_t, lh_cpu[umem_max_ncpus]);
1276 int i;
1277
1278 if (logsize == 0)
1279 return (NULL);
1280
1281 /*
1282 * Make sure that lhp->lh_cpu[] is nicely aligned
1283 * to prevent false sharing of cache lines.
1284 */
1285 lhsize = P2ROUNDUP(lhsize, UMEM_ALIGN);
1286 lhp = vmem_xalloc(umem_log_arena, lhsize, 64, P2NPHASE(lhsize, 64), 0,
1287 NULL, NULL, VM_NOSLEEP);
1288 if (lhp == NULL)
1289 goto fail;
1290
1291 bzero(lhp, lhsize);
1292
1293 (void) mutex_init(&lhp->lh_lock, USYNC_THREAD, NULL);
1294 lhp->lh_nchunks = nchunks;
1295 lhp->lh_chunksize = P2ROUNDUP(logsize / nchunks, PAGESIZE);
1296 if (lhp->lh_chunksize == 0)
1297 lhp->lh_chunksize = PAGESIZE;
1298
1299 lhp->lh_base = vmem_alloc(umem_log_arena,
1300 lhp->lh_chunksize * nchunks, VM_NOSLEEP);
1301 if (lhp->lh_base == NULL)
1302 goto fail;
1303
1304 lhp->lh_free = vmem_alloc(umem_log_arena,
1305 nchunks * sizeof (int), VM_NOSLEEP);
1306 if (lhp->lh_free == NULL)
1307 goto fail;
1308
1309 bzero(lhp->lh_base, lhp->lh_chunksize * nchunks);
1310
1311 for (i = 0; i < umem_max_ncpus; i++) {
1312 umem_cpu_log_header_t *clhp = &lhp->lh_cpu[i];
1313 (void) mutex_init(&clhp->clh_lock, USYNC_THREAD, NULL);
1314 clhp->clh_chunk = i;
1315 }
1316
1317 for (i = umem_max_ncpus; i < nchunks; i++)
1318 lhp->lh_free[i] = i;
1319
1320 lhp->lh_head = umem_max_ncpus;
1321 lhp->lh_tail = 0;
1322
1323 return (lhp);
1324
1325 fail:
1326 if (lhp != NULL) {
1327 if (lhp->lh_base != NULL)
1328 vmem_free(umem_log_arena, lhp->lh_base,
1329 lhp->lh_chunksize * nchunks);
1330
1331 vmem_xfree(umem_log_arena, lhp, lhsize);
1332 }
1333 return (NULL);
1334 }
1335
1336 static void *
umem_log_enter(umem_log_header_t * lhp,void * data,size_t size)1337 umem_log_enter(umem_log_header_t *lhp, void *data, size_t size)
1338 {
1339 void *logspace;
1340 umem_cpu_log_header_t *clhp;
1341
1342 if (lhp == NULL || umem_logging == 0)
1343 return (NULL);
1344
1345 clhp = &lhp->lh_cpu[CPU(umem_cpu_mask)->cpu_number];
1346
1347 (void) mutex_lock(&clhp->clh_lock);
1348 clhp->clh_hits++;
1349 if (size > clhp->clh_avail) {
1350 (void) mutex_lock(&lhp->lh_lock);
1351 lhp->lh_hits++;
1352 lhp->lh_free[lhp->lh_tail] = clhp->clh_chunk;
1353 lhp->lh_tail = (lhp->lh_tail + 1) % lhp->lh_nchunks;
1354 clhp->clh_chunk = lhp->lh_free[lhp->lh_head];
1355 lhp->lh_head = (lhp->lh_head + 1) % lhp->lh_nchunks;
1356 clhp->clh_current = lhp->lh_base +
1357 clhp->clh_chunk * lhp->lh_chunksize;
1358 clhp->clh_avail = lhp->lh_chunksize;
1359 if (size > lhp->lh_chunksize)
1360 size = lhp->lh_chunksize;
1361 (void) mutex_unlock(&lhp->lh_lock);
1362 }
1363 logspace = clhp->clh_current;
1364 clhp->clh_current += size;
1365 clhp->clh_avail -= size;
1366 bcopy(data, logspace, size);
1367 (void) mutex_unlock(&clhp->clh_lock);
1368 return (logspace);
1369 }
1370
1371 #define UMEM_AUDIT(lp, cp, bcp) \
1372 { \
1373 umem_bufctl_audit_t *_bcp = (umem_bufctl_audit_t *)(bcp); \
1374 _bcp->bc_timestamp = gethrtime(); \
1375 _bcp->bc_thread = thr_self(); \
1376 _bcp->bc_depth = getpcstack(_bcp->bc_stack, umem_stack_depth, \
1377 (cp != NULL) && (cp->cache_flags & UMF_CHECKSIGNAL)); \
1378 _bcp->bc_lastlog = umem_log_enter((lp), _bcp, \
1379 UMEM_BUFCTL_AUDIT_SIZE); \
1380 }
1381
1382 static void
umem_log_event(umem_log_header_t * lp,umem_cache_t * cp,umem_slab_t * sp,void * addr)1383 umem_log_event(umem_log_header_t *lp, umem_cache_t *cp,
1384 umem_slab_t *sp, void *addr)
1385 {
1386 umem_bufctl_audit_t *bcp;
1387 UMEM_LOCAL_BUFCTL_AUDIT(&bcp);
1388
1389 bzero(bcp, UMEM_BUFCTL_AUDIT_SIZE);
1390 bcp->bc_addr = addr;
1391 bcp->bc_slab = sp;
1392 bcp->bc_cache = cp;
1393 UMEM_AUDIT(lp, cp, bcp);
1394 }
1395
1396 /*
1397 * Create a new slab for cache cp.
1398 */
1399 static umem_slab_t *
umem_slab_create(umem_cache_t * cp,int umflag)1400 umem_slab_create(umem_cache_t *cp, int umflag)
1401 {
1402 size_t slabsize = cp->cache_slabsize;
1403 size_t chunksize = cp->cache_chunksize;
1404 int cache_flags = cp->cache_flags;
1405 size_t color, chunks;
1406 char *buf, *slab;
1407 umem_slab_t *sp;
1408 umem_bufctl_t *bcp;
1409 vmem_t *vmp = cp->cache_arena;
1410
1411 color = cp->cache_color + cp->cache_align;
1412 if (color > cp->cache_maxcolor)
1413 color = cp->cache_mincolor;
1414 cp->cache_color = color;
1415
1416 slab = vmem_alloc(vmp, slabsize, UMEM_VMFLAGS(umflag));
1417
1418 if (slab == NULL)
1419 goto vmem_alloc_failure;
1420
1421 ASSERT(P2PHASE((uintptr_t)slab, vmp->vm_quantum) == 0);
1422
1423 if (!(cp->cache_cflags & UMC_NOTOUCH) &&
1424 (cp->cache_flags & UMF_DEADBEEF))
1425 copy_pattern(UMEM_UNINITIALIZED_PATTERN, slab, slabsize);
1426
1427 if (cache_flags & UMF_HASH) {
1428 if ((sp = _umem_cache_alloc(umem_slab_cache, umflag)) == NULL)
1429 goto slab_alloc_failure;
1430 chunks = (slabsize - color) / chunksize;
1431 } else {
1432 sp = UMEM_SLAB(cp, slab);
1433 chunks = (slabsize - sizeof (umem_slab_t) - color) / chunksize;
1434 }
1435
1436 sp->slab_cache = cp;
1437 sp->slab_head = NULL;
1438 sp->slab_refcnt = 0;
1439 sp->slab_base = buf = slab + color;
1440 sp->slab_chunks = chunks;
1441
1442 ASSERT(chunks > 0);
1443 while (chunks-- != 0) {
1444 if (cache_flags & UMF_HASH) {
1445 bcp = _umem_cache_alloc(cp->cache_bufctl_cache, umflag);
1446 if (bcp == NULL)
1447 goto bufctl_alloc_failure;
1448 if (cache_flags & UMF_AUDIT) {
1449 umem_bufctl_audit_t *bcap =
1450 (umem_bufctl_audit_t *)bcp;
1451 bzero(bcap, UMEM_BUFCTL_AUDIT_SIZE);
1452 bcap->bc_cache = cp;
1453 }
1454 bcp->bc_addr = buf;
1455 bcp->bc_slab = sp;
1456 } else {
1457 bcp = UMEM_BUFCTL(cp, buf);
1458 }
1459 if (cache_flags & UMF_BUFTAG) {
1460 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
1461 btp->bt_redzone = UMEM_REDZONE_PATTERN;
1462 btp->bt_bufctl = bcp;
1463 btp->bt_bxstat = (intptr_t)bcp ^ UMEM_BUFTAG_FREE;
1464 if (cache_flags & UMF_DEADBEEF) {
1465 copy_pattern(UMEM_FREE_PATTERN, buf,
1466 cp->cache_verify);
1467 }
1468 }
1469 bcp->bc_next = sp->slab_head;
1470 sp->slab_head = bcp;
1471 buf += chunksize;
1472 }
1473
1474 umem_log_event(umem_slab_log, cp, sp, slab);
1475
1476 return (sp);
1477
1478 bufctl_alloc_failure:
1479
1480 while ((bcp = sp->slab_head) != NULL) {
1481 sp->slab_head = bcp->bc_next;
1482 _umem_cache_free(cp->cache_bufctl_cache, bcp);
1483 }
1484 _umem_cache_free(umem_slab_cache, sp);
1485
1486 slab_alloc_failure:
1487
1488 vmem_free(vmp, slab, slabsize);
1489
1490 vmem_alloc_failure:
1491
1492 umem_log_event(umem_failure_log, cp, NULL, NULL);
1493 atomic_add_64(&cp->cache_alloc_fail, 1);
1494
1495 return (NULL);
1496 }
1497
1498 /*
1499 * Destroy a slab.
1500 */
1501 static void
umem_slab_destroy(umem_cache_t * cp,umem_slab_t * sp)1502 umem_slab_destroy(umem_cache_t *cp, umem_slab_t *sp)
1503 {
1504 vmem_t *vmp = cp->cache_arena;
1505 void *slab = (void *)P2ALIGN((uintptr_t)sp->slab_base, vmp->vm_quantum);
1506
1507 if (cp->cache_flags & UMF_HASH) {
1508 umem_bufctl_t *bcp;
1509 while ((bcp = sp->slab_head) != NULL) {
1510 sp->slab_head = bcp->bc_next;
1511 _umem_cache_free(cp->cache_bufctl_cache, bcp);
1512 }
1513 _umem_cache_free(umem_slab_cache, sp);
1514 }
1515 vmem_free(vmp, slab, cp->cache_slabsize);
1516 }
1517
1518 /*
1519 * Allocate a raw (unconstructed) buffer from cp's slab layer.
1520 */
1521 static void *
umem_slab_alloc(umem_cache_t * cp,int umflag)1522 umem_slab_alloc(umem_cache_t *cp, int umflag)
1523 {
1524 umem_bufctl_t *bcp, **hash_bucket;
1525 umem_slab_t *sp;
1526 void *buf;
1527
1528 (void) mutex_lock(&cp->cache_lock);
1529 cp->cache_slab_alloc++;
1530 sp = cp->cache_freelist;
1531 ASSERT(sp->slab_cache == cp);
1532 if (sp->slab_head == NULL) {
1533 /*
1534 * The freelist is empty. Create a new slab.
1535 */
1536 (void) mutex_unlock(&cp->cache_lock);
1537 if (cp == &umem_null_cache)
1538 return (NULL);
1539 if ((sp = umem_slab_create(cp, umflag)) == NULL)
1540 return (NULL);
1541 (void) mutex_lock(&cp->cache_lock);
1542 cp->cache_slab_create++;
1543 if ((cp->cache_buftotal += sp->slab_chunks) > cp->cache_bufmax)
1544 cp->cache_bufmax = cp->cache_buftotal;
1545 sp->slab_next = cp->cache_freelist;
1546 sp->slab_prev = cp->cache_freelist->slab_prev;
1547 sp->slab_next->slab_prev = sp;
1548 sp->slab_prev->slab_next = sp;
1549 cp->cache_freelist = sp;
1550 }
1551
1552 sp->slab_refcnt++;
1553 ASSERT(sp->slab_refcnt <= sp->slab_chunks);
1554
1555 /*
1556 * If we're taking the last buffer in the slab,
1557 * remove the slab from the cache's freelist.
1558 */
1559 bcp = sp->slab_head;
1560 if ((sp->slab_head = bcp->bc_next) == NULL) {
1561 cp->cache_freelist = sp->slab_next;
1562 ASSERT(sp->slab_refcnt == sp->slab_chunks);
1563 }
1564
1565 if (cp->cache_flags & UMF_HASH) {
1566 /*
1567 * Add buffer to allocated-address hash table.
1568 */
1569 buf = bcp->bc_addr;
1570 hash_bucket = UMEM_HASH(cp, buf);
1571 bcp->bc_next = *hash_bucket;
1572 *hash_bucket = bcp;
1573 if ((cp->cache_flags & (UMF_AUDIT | UMF_BUFTAG)) == UMF_AUDIT) {
1574 UMEM_AUDIT(umem_transaction_log, cp, bcp);
1575 }
1576 } else {
1577 buf = UMEM_BUF(cp, bcp);
1578 }
1579
1580 ASSERT(UMEM_SLAB_MEMBER(sp, buf));
1581
1582 (void) mutex_unlock(&cp->cache_lock);
1583
1584 return (buf);
1585 }
1586
1587 /*
1588 * Free a raw (unconstructed) buffer to cp's slab layer.
1589 */
1590 static void
umem_slab_free(umem_cache_t * cp,void * buf)1591 umem_slab_free(umem_cache_t *cp, void *buf)
1592 {
1593 umem_slab_t *sp;
1594 umem_bufctl_t *bcp, **prev_bcpp;
1595
1596 ASSERT(buf != NULL);
1597
1598 (void) mutex_lock(&cp->cache_lock);
1599 cp->cache_slab_free++;
1600
1601 if (cp->cache_flags & UMF_HASH) {
1602 /*
1603 * Look up buffer in allocated-address hash table.
1604 */
1605 prev_bcpp = UMEM_HASH(cp, buf);
1606 while ((bcp = *prev_bcpp) != NULL) {
1607 if (bcp->bc_addr == buf) {
1608 *prev_bcpp = bcp->bc_next;
1609 sp = bcp->bc_slab;
1610 break;
1611 }
1612 cp->cache_lookup_depth++;
1613 prev_bcpp = &bcp->bc_next;
1614 }
1615 } else {
1616 bcp = UMEM_BUFCTL(cp, buf);
1617 sp = UMEM_SLAB(cp, buf);
1618 }
1619
1620 if (bcp == NULL || sp->slab_cache != cp || !UMEM_SLAB_MEMBER(sp, buf)) {
1621 (void) mutex_unlock(&cp->cache_lock);
1622 umem_error(UMERR_BADADDR, cp, buf);
1623 return;
1624 }
1625
1626 if ((cp->cache_flags & (UMF_AUDIT | UMF_BUFTAG)) == UMF_AUDIT) {
1627 if (cp->cache_flags & UMF_CONTENTS)
1628 ((umem_bufctl_audit_t *)bcp)->bc_contents =
1629 umem_log_enter(umem_content_log, buf,
1630 cp->cache_contents);
1631 UMEM_AUDIT(umem_transaction_log, cp, bcp);
1632 }
1633
1634 /*
1635 * If this slab isn't currently on the freelist, put it there.
1636 */
1637 if (sp->slab_head == NULL) {
1638 ASSERT(sp->slab_refcnt == sp->slab_chunks);
1639 ASSERT(cp->cache_freelist != sp);
1640 sp->slab_next->slab_prev = sp->slab_prev;
1641 sp->slab_prev->slab_next = sp->slab_next;
1642 sp->slab_next = cp->cache_freelist;
1643 sp->slab_prev = cp->cache_freelist->slab_prev;
1644 sp->slab_next->slab_prev = sp;
1645 sp->slab_prev->slab_next = sp;
1646 cp->cache_freelist = sp;
1647 }
1648
1649 bcp->bc_next = sp->slab_head;
1650 sp->slab_head = bcp;
1651
1652 ASSERT(sp->slab_refcnt >= 1);
1653 if (--sp->slab_refcnt == 0) {
1654 /*
1655 * There are no outstanding allocations from this slab,
1656 * so we can reclaim the memory.
1657 */
1658 sp->slab_next->slab_prev = sp->slab_prev;
1659 sp->slab_prev->slab_next = sp->slab_next;
1660 if (sp == cp->cache_freelist)
1661 cp->cache_freelist = sp->slab_next;
1662 cp->cache_slab_destroy++;
1663 cp->cache_buftotal -= sp->slab_chunks;
1664 (void) mutex_unlock(&cp->cache_lock);
1665 umem_slab_destroy(cp, sp);
1666 return;
1667 }
1668 (void) mutex_unlock(&cp->cache_lock);
1669 }
1670
1671 static int
umem_cache_alloc_debug(umem_cache_t * cp,void * buf,int umflag)1672 umem_cache_alloc_debug(umem_cache_t *cp, void *buf, int umflag)
1673 {
1674 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
1675 umem_bufctl_audit_t *bcp = (umem_bufctl_audit_t *)btp->bt_bufctl;
1676 uint32_t mtbf;
1677 int flags_nfatal;
1678
1679 if (btp->bt_bxstat != ((intptr_t)bcp ^ UMEM_BUFTAG_FREE)) {
1680 umem_error(UMERR_BADBUFTAG, cp, buf);
1681 return (-1);
1682 }
1683
1684 btp->bt_bxstat = (intptr_t)bcp ^ UMEM_BUFTAG_ALLOC;
1685
1686 if ((cp->cache_flags & UMF_HASH) && bcp->bc_addr != buf) {
1687 umem_error(UMERR_BADBUFCTL, cp, buf);
1688 return (-1);
1689 }
1690
1691 btp->bt_redzone = UMEM_REDZONE_PATTERN;
1692
1693 if (cp->cache_flags & UMF_DEADBEEF) {
1694 if (verify_and_copy_pattern(UMEM_FREE_PATTERN,
1695 UMEM_UNINITIALIZED_PATTERN, buf, cp->cache_verify)) {
1696 umem_error(UMERR_MODIFIED, cp, buf);
1697 return (-1);
1698 }
1699 }
1700
1701 if ((mtbf = umem_mtbf | cp->cache_mtbf) != 0 &&
1702 gethrtime() % mtbf == 0 &&
1703 (umflag & (UMEM_FATAL_FLAGS)) == 0) {
1704 umem_log_event(umem_failure_log, cp, NULL, NULL);
1705 } else {
1706 mtbf = 0;
1707 }
1708
1709 /*
1710 * We do not pass fatal flags on to the constructor. This prevents
1711 * leaking buffers in the event of a subordinate constructor failing.
1712 */
1713 flags_nfatal = UMEM_DEFAULT;
1714 if (mtbf || (cp->cache_constructor != NULL &&
1715 cp->cache_constructor(buf, cp->cache_private, flags_nfatal) != 0)) {
1716 atomic_add_64(&cp->cache_alloc_fail, 1);
1717 btp->bt_bxstat = (intptr_t)bcp ^ UMEM_BUFTAG_FREE;
1718 copy_pattern(UMEM_FREE_PATTERN, buf, cp->cache_verify);
1719 umem_slab_free(cp, buf);
1720 return (-1);
1721 }
1722
1723 if (cp->cache_flags & UMF_AUDIT) {
1724 UMEM_AUDIT(umem_transaction_log, cp, bcp);
1725 }
1726
1727 return (0);
1728 }
1729
1730 static int
umem_cache_free_debug(umem_cache_t * cp,void * buf)1731 umem_cache_free_debug(umem_cache_t *cp, void *buf)
1732 {
1733 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
1734 umem_bufctl_audit_t *bcp = (umem_bufctl_audit_t *)btp->bt_bufctl;
1735 umem_slab_t *sp;
1736
1737 if (btp->bt_bxstat != ((intptr_t)bcp ^ UMEM_BUFTAG_ALLOC)) {
1738 if (btp->bt_bxstat == ((intptr_t)bcp ^ UMEM_BUFTAG_FREE)) {
1739 umem_error(UMERR_DUPFREE, cp, buf);
1740 return (-1);
1741 }
1742 sp = umem_findslab(cp, buf);
1743 if (sp == NULL || sp->slab_cache != cp)
1744 umem_error(UMERR_BADADDR, cp, buf);
1745 else
1746 umem_error(UMERR_REDZONE, cp, buf);
1747 return (-1);
1748 }
1749
1750 btp->bt_bxstat = (intptr_t)bcp ^ UMEM_BUFTAG_FREE;
1751
1752 if ((cp->cache_flags & UMF_HASH) && bcp->bc_addr != buf) {
1753 umem_error(UMERR_BADBUFCTL, cp, buf);
1754 return (-1);
1755 }
1756
1757 if (btp->bt_redzone != UMEM_REDZONE_PATTERN) {
1758 umem_error(UMERR_REDZONE, cp, buf);
1759 return (-1);
1760 }
1761
1762 if (cp->cache_flags & UMF_AUDIT) {
1763 if (cp->cache_flags & UMF_CONTENTS)
1764 bcp->bc_contents = umem_log_enter(umem_content_log,
1765 buf, cp->cache_contents);
1766 UMEM_AUDIT(umem_transaction_log, cp, bcp);
1767 }
1768
1769 if (cp->cache_destructor != NULL)
1770 cp->cache_destructor(buf, cp->cache_private);
1771
1772 if (cp->cache_flags & UMF_DEADBEEF)
1773 copy_pattern(UMEM_FREE_PATTERN, buf, cp->cache_verify);
1774
1775 return (0);
1776 }
1777
1778 /*
1779 * Free each object in magazine mp to cp's slab layer, and free mp itself.
1780 */
1781 static void
umem_magazine_destroy(umem_cache_t * cp,umem_magazine_t * mp,int nrounds)1782 umem_magazine_destroy(umem_cache_t *cp, umem_magazine_t *mp, int nrounds)
1783 {
1784 int round;
1785
1786 ASSERT(cp->cache_next == NULL || IN_UPDATE());
1787
1788 for (round = 0; round < nrounds; round++) {
1789 void *buf = mp->mag_round[round];
1790
1791 if ((cp->cache_flags & UMF_DEADBEEF) &&
1792 verify_pattern(UMEM_FREE_PATTERN, buf,
1793 cp->cache_verify) != NULL) {
1794 umem_error(UMERR_MODIFIED, cp, buf);
1795 continue;
1796 }
1797
1798 if (!(cp->cache_flags & UMF_BUFTAG) &&
1799 cp->cache_destructor != NULL)
1800 cp->cache_destructor(buf, cp->cache_private);
1801
1802 umem_slab_free(cp, buf);
1803 }
1804 ASSERT(UMEM_MAGAZINE_VALID(cp, mp));
1805 _umem_cache_free(cp->cache_magtype->mt_cache, mp);
1806 }
1807
1808 /*
1809 * Allocate a magazine from the depot.
1810 */
1811 static umem_magazine_t *
umem_depot_alloc(umem_cache_t * cp,umem_maglist_t * mlp)1812 umem_depot_alloc(umem_cache_t *cp, umem_maglist_t *mlp)
1813 {
1814 umem_magazine_t *mp;
1815
1816 /*
1817 * If we can't get the depot lock without contention,
1818 * update our contention count. We use the depot
1819 * contention rate to determine whether we need to
1820 * increase the magazine size for better scalability.
1821 */
1822 if (mutex_trylock(&cp->cache_depot_lock) != 0) {
1823 (void) mutex_lock(&cp->cache_depot_lock);
1824 cp->cache_depot_contention++;
1825 }
1826
1827 if ((mp = mlp->ml_list) != NULL) {
1828 ASSERT(UMEM_MAGAZINE_VALID(cp, mp));
1829 mlp->ml_list = mp->mag_next;
1830 if (--mlp->ml_total < mlp->ml_min)
1831 mlp->ml_min = mlp->ml_total;
1832 mlp->ml_alloc++;
1833 }
1834
1835 (void) mutex_unlock(&cp->cache_depot_lock);
1836
1837 return (mp);
1838 }
1839
1840 /*
1841 * Free a magazine to the depot.
1842 */
1843 static void
umem_depot_free(umem_cache_t * cp,umem_maglist_t * mlp,umem_magazine_t * mp)1844 umem_depot_free(umem_cache_t *cp, umem_maglist_t *mlp, umem_magazine_t *mp)
1845 {
1846 (void) mutex_lock(&cp->cache_depot_lock);
1847 ASSERT(UMEM_MAGAZINE_VALID(cp, mp));
1848 mp->mag_next = mlp->ml_list;
1849 mlp->ml_list = mp;
1850 mlp->ml_total++;
1851 (void) mutex_unlock(&cp->cache_depot_lock);
1852 }
1853
1854 /*
1855 * Update the working set statistics for cp's depot.
1856 */
1857 static void
umem_depot_ws_update(umem_cache_t * cp)1858 umem_depot_ws_update(umem_cache_t *cp)
1859 {
1860 (void) mutex_lock(&cp->cache_depot_lock);
1861 cp->cache_full.ml_reaplimit = cp->cache_full.ml_min;
1862 cp->cache_full.ml_min = cp->cache_full.ml_total;
1863 cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_min;
1864 cp->cache_empty.ml_min = cp->cache_empty.ml_total;
1865 (void) mutex_unlock(&cp->cache_depot_lock);
1866 }
1867
1868 /*
1869 * Reap all magazines that have fallen out of the depot's working set.
1870 */
1871 static void
umem_depot_ws_reap(umem_cache_t * cp)1872 umem_depot_ws_reap(umem_cache_t *cp)
1873 {
1874 long reap;
1875 umem_magazine_t *mp;
1876
1877 ASSERT(cp->cache_next == NULL || IN_REAP());
1878
1879 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
1880 while (reap-- && (mp = umem_depot_alloc(cp, &cp->cache_full)) != NULL)
1881 umem_magazine_destroy(cp, mp, cp->cache_magtype->mt_magsize);
1882
1883 reap = MIN(cp->cache_empty.ml_reaplimit, cp->cache_empty.ml_min);
1884 while (reap-- && (mp = umem_depot_alloc(cp, &cp->cache_empty)) != NULL)
1885 umem_magazine_destroy(cp, mp, 0);
1886 }
1887
1888 static void
umem_cpu_reload(umem_cpu_cache_t * ccp,umem_magazine_t * mp,int rounds)1889 umem_cpu_reload(umem_cpu_cache_t *ccp, umem_magazine_t *mp, int rounds)
1890 {
1891 ASSERT((ccp->cc_loaded == NULL && ccp->cc_rounds == -1) ||
1892 (ccp->cc_loaded && ccp->cc_rounds + rounds == ccp->cc_magsize));
1893 ASSERT(ccp->cc_magsize > 0);
1894
1895 ccp->cc_ploaded = ccp->cc_loaded;
1896 ccp->cc_prounds = ccp->cc_rounds;
1897 ccp->cc_loaded = mp;
1898 ccp->cc_rounds = rounds;
1899 }
1900
1901 /*
1902 * Allocate a constructed object from cache cp.
1903 */
1904 #pragma weak umem_cache_alloc = _umem_cache_alloc
1905 void *
_umem_cache_alloc(umem_cache_t * cp,int umflag)1906 _umem_cache_alloc(umem_cache_t *cp, int umflag)
1907 {
1908 umem_cpu_cache_t *ccp;
1909 umem_magazine_t *fmp;
1910 void *buf;
1911 int flags_nfatal;
1912
1913 retry:
1914 ccp = UMEM_CPU_CACHE(cp, CPU(cp->cache_cpu_mask));
1915 (void) mutex_lock(&ccp->cc_lock);
1916 for (;;) {
1917 /*
1918 * If there's an object available in the current CPU's
1919 * loaded magazine, just take it and return.
1920 */
1921 if (ccp->cc_rounds > 0) {
1922 buf = ccp->cc_loaded->mag_round[--ccp->cc_rounds];
1923 ccp->cc_alloc++;
1924 (void) mutex_unlock(&ccp->cc_lock);
1925 if ((ccp->cc_flags & UMF_BUFTAG) &&
1926 umem_cache_alloc_debug(cp, buf, umflag) == -1) {
1927 if (umem_alloc_retry(cp, umflag)) {
1928 goto retry;
1929 }
1930
1931 return (NULL);
1932 }
1933 return (buf);
1934 }
1935
1936 /*
1937 * The loaded magazine is empty. If the previously loaded
1938 * magazine was full, exchange them and try again.
1939 */
1940 if (ccp->cc_prounds > 0) {
1941 umem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds);
1942 continue;
1943 }
1944
1945 /*
1946 * If the magazine layer is disabled, break out now.
1947 */
1948 if (ccp->cc_magsize == 0)
1949 break;
1950
1951 /*
1952 * Try to get a full magazine from the depot.
1953 */
1954 fmp = umem_depot_alloc(cp, &cp->cache_full);
1955 if (fmp != NULL) {
1956 if (ccp->cc_ploaded != NULL)
1957 umem_depot_free(cp, &cp->cache_empty,
1958 ccp->cc_ploaded);
1959 umem_cpu_reload(ccp, fmp, ccp->cc_magsize);
1960 continue;
1961 }
1962
1963 /*
1964 * There are no full magazines in the depot,
1965 * so fall through to the slab layer.
1966 */
1967 break;
1968 }
1969 (void) mutex_unlock(&ccp->cc_lock);
1970
1971 /*
1972 * We couldn't allocate a constructed object from the magazine layer,
1973 * so get a raw buffer from the slab layer and apply its constructor.
1974 */
1975 buf = umem_slab_alloc(cp, umflag);
1976
1977 if (buf == NULL) {
1978 if (cp == &umem_null_cache)
1979 return (NULL);
1980 if (umem_alloc_retry(cp, umflag)) {
1981 goto retry;
1982 }
1983
1984 return (NULL);
1985 }
1986
1987 if (cp->cache_flags & UMF_BUFTAG) {
1988 /*
1989 * Let umem_cache_alloc_debug() apply the constructor for us.
1990 */
1991 if (umem_cache_alloc_debug(cp, buf, umflag) == -1) {
1992 if (umem_alloc_retry(cp, umflag)) {
1993 goto retry;
1994 }
1995 return (NULL);
1996 }
1997 return (buf);
1998 }
1999
2000 /*
2001 * We do not pass fatal flags on to the constructor. This prevents
2002 * leaking buffers in the event of a subordinate constructor failing.
2003 */
2004 flags_nfatal = UMEM_DEFAULT;
2005 if (cp->cache_constructor != NULL &&
2006 cp->cache_constructor(buf, cp->cache_private, flags_nfatal) != 0) {
2007 atomic_add_64(&cp->cache_alloc_fail, 1);
2008 umem_slab_free(cp, buf);
2009
2010 if (umem_alloc_retry(cp, umflag)) {
2011 goto retry;
2012 }
2013 return (NULL);
2014 }
2015
2016 return (buf);
2017 }
2018
2019 /*
2020 * Free a constructed object to cache cp.
2021 */
2022 #pragma weak umem_cache_free = _umem_cache_free
2023 void
_umem_cache_free(umem_cache_t * cp,void * buf)2024 _umem_cache_free(umem_cache_t *cp, void *buf)
2025 {
2026 umem_cpu_cache_t *ccp = UMEM_CPU_CACHE(cp, CPU(cp->cache_cpu_mask));
2027 umem_magazine_t *emp;
2028 umem_magtype_t *mtp;
2029
2030 if (ccp->cc_flags & UMF_BUFTAG)
2031 if (umem_cache_free_debug(cp, buf) == -1)
2032 return;
2033
2034 (void) mutex_lock(&ccp->cc_lock);
2035 for (;;) {
2036 /*
2037 * If there's a slot available in the current CPU's
2038 * loaded magazine, just put the object there and return.
2039 */
2040 if ((uint_t)ccp->cc_rounds < ccp->cc_magsize) {
2041 ccp->cc_loaded->mag_round[ccp->cc_rounds++] = buf;
2042 ccp->cc_free++;
2043 (void) mutex_unlock(&ccp->cc_lock);
2044 return;
2045 }
2046
2047 /*
2048 * The loaded magazine is full. If the previously loaded
2049 * magazine was empty, exchange them and try again.
2050 */
2051 if (ccp->cc_prounds == 0) {
2052 umem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds);
2053 continue;
2054 }
2055
2056 /*
2057 * If the magazine layer is disabled, break out now.
2058 */
2059 if (ccp->cc_magsize == 0)
2060 break;
2061
2062 /*
2063 * Try to get an empty magazine from the depot.
2064 */
2065 emp = umem_depot_alloc(cp, &cp->cache_empty);
2066 if (emp != NULL) {
2067 if (ccp->cc_ploaded != NULL)
2068 umem_depot_free(cp, &cp->cache_full,
2069 ccp->cc_ploaded);
2070 umem_cpu_reload(ccp, emp, 0);
2071 continue;
2072 }
2073
2074 /*
2075 * There are no empty magazines in the depot,
2076 * so try to allocate a new one. We must drop all locks
2077 * across umem_cache_alloc() because lower layers may
2078 * attempt to allocate from this cache.
2079 */
2080 mtp = cp->cache_magtype;
2081 (void) mutex_unlock(&ccp->cc_lock);
2082 emp = _umem_cache_alloc(mtp->mt_cache, UMEM_DEFAULT);
2083 (void) mutex_lock(&ccp->cc_lock);
2084
2085 if (emp != NULL) {
2086 /*
2087 * We successfully allocated an empty magazine.
2088 * However, we had to drop ccp->cc_lock to do it,
2089 * so the cache's magazine size may have changed.
2090 * If so, free the magazine and try again.
2091 */
2092 if (ccp->cc_magsize != mtp->mt_magsize) {
2093 (void) mutex_unlock(&ccp->cc_lock);
2094 _umem_cache_free(mtp->mt_cache, emp);
2095 (void) mutex_lock(&ccp->cc_lock);
2096 continue;
2097 }
2098
2099 /*
2100 * We got a magazine of the right size. Add it to
2101 * the depot and try the whole dance again.
2102 */
2103 umem_depot_free(cp, &cp->cache_empty, emp);
2104 continue;
2105 }
2106
2107 /*
2108 * We couldn't allocate an empty magazine,
2109 * so fall through to the slab layer.
2110 */
2111 break;
2112 }
2113 (void) mutex_unlock(&ccp->cc_lock);
2114
2115 /*
2116 * We couldn't free our constructed object to the magazine layer,
2117 * so apply its destructor and free it to the slab layer.
2118 * Note that if UMF_BUFTAG is in effect, umem_cache_free_debug()
2119 * will have already applied the destructor.
2120 */
2121 if (!(cp->cache_flags & UMF_BUFTAG) && cp->cache_destructor != NULL)
2122 cp->cache_destructor(buf, cp->cache_private);
2123
2124 umem_slab_free(cp, buf);
2125 }
2126
2127 #pragma weak umem_zalloc = _umem_zalloc
2128 void *
_umem_zalloc(size_t size,int umflag)2129 _umem_zalloc(size_t size, int umflag)
2130 {
2131 size_t index = (size - 1) >> UMEM_ALIGN_SHIFT;
2132 void *buf;
2133
2134 retry:
2135 if (index < UMEM_MAXBUF >> UMEM_ALIGN_SHIFT) {
2136 umem_cache_t *cp = umem_alloc_table[index];
2137 buf = _umem_cache_alloc(cp, umflag);
2138 if (buf != NULL) {
2139 if (cp->cache_flags & UMF_BUFTAG) {
2140 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
2141 ((uint8_t *)buf)[size] = UMEM_REDZONE_BYTE;
2142 ((uint32_t *)btp)[1] = UMEM_SIZE_ENCODE(size);
2143 }
2144 bzero(buf, size);
2145 } else if (umem_alloc_retry(cp, umflag))
2146 goto retry;
2147 } else {
2148 buf = _umem_alloc(size, umflag); /* handles failure */
2149 if (buf != NULL)
2150 bzero(buf, size);
2151 }
2152 return (buf);
2153 }
2154
2155 #pragma weak umem_alloc = _umem_alloc
2156 void *
_umem_alloc(size_t size,int umflag)2157 _umem_alloc(size_t size, int umflag)
2158 {
2159 size_t index = (size - 1) >> UMEM_ALIGN_SHIFT;
2160 void *buf;
2161 umem_alloc_retry:
2162 if (index < UMEM_MAXBUF >> UMEM_ALIGN_SHIFT) {
2163 umem_cache_t *cp = umem_alloc_table[index];
2164 buf = _umem_cache_alloc(cp, umflag);
2165 if ((cp->cache_flags & UMF_BUFTAG) && buf != NULL) {
2166 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
2167 ((uint8_t *)buf)[size] = UMEM_REDZONE_BYTE;
2168 ((uint32_t *)btp)[1] = UMEM_SIZE_ENCODE(size);
2169 }
2170 if (buf == NULL && umem_alloc_retry(cp, umflag))
2171 goto umem_alloc_retry;
2172 return (buf);
2173 }
2174 if (size == 0)
2175 return (NULL);
2176 if (umem_oversize_arena == NULL) {
2177 if (umem_init())
2178 ASSERT(umem_oversize_arena != NULL);
2179 else
2180 return (NULL);
2181 }
2182 buf = vmem_alloc(umem_oversize_arena, size, UMEM_VMFLAGS(umflag));
2183 if (buf == NULL) {
2184 umem_log_event(umem_failure_log, NULL, NULL, (void *)size);
2185 if (umem_alloc_retry(NULL, umflag))
2186 goto umem_alloc_retry;
2187 }
2188 return (buf);
2189 }
2190
2191 #pragma weak umem_alloc_align = _umem_alloc_align
2192 void *
_umem_alloc_align(size_t size,size_t align,int umflag)2193 _umem_alloc_align(size_t size, size_t align, int umflag)
2194 {
2195 void *buf;
2196
2197 if (size == 0)
2198 return (NULL);
2199 if ((align & (align - 1)) != 0)
2200 return (NULL);
2201 if (align < UMEM_ALIGN)
2202 align = UMEM_ALIGN;
2203
2204 umem_alloc_align_retry:
2205 if (umem_memalign_arena == NULL) {
2206 if (umem_init())
2207 ASSERT(umem_oversize_arena != NULL);
2208 else
2209 return (NULL);
2210 }
2211 buf = vmem_xalloc(umem_memalign_arena, size, align, 0, 0, NULL, NULL,
2212 UMEM_VMFLAGS(umflag));
2213 if (buf == NULL) {
2214 umem_log_event(umem_failure_log, NULL, NULL, (void *)size);
2215 if (umem_alloc_retry(NULL, umflag))
2216 goto umem_alloc_align_retry;
2217 }
2218 return (buf);
2219 }
2220
2221 #pragma weak umem_free = _umem_free
2222 void
_umem_free(void * buf,size_t size)2223 _umem_free(void *buf, size_t size)
2224 {
2225 size_t index = (size - 1) >> UMEM_ALIGN_SHIFT;
2226
2227 if (index < UMEM_MAXBUF >> UMEM_ALIGN_SHIFT) {
2228 umem_cache_t *cp = umem_alloc_table[index];
2229 if (cp->cache_flags & UMF_BUFTAG) {
2230 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
2231 uint32_t *ip = (uint32_t *)btp;
2232 if (ip[1] != UMEM_SIZE_ENCODE(size)) {
2233 if (*(uint64_t *)buf == UMEM_FREE_PATTERN) {
2234 umem_error(UMERR_DUPFREE, cp, buf);
2235 return;
2236 }
2237 if (UMEM_SIZE_VALID(ip[1])) {
2238 ip[0] = UMEM_SIZE_ENCODE(size);
2239 umem_error(UMERR_BADSIZE, cp, buf);
2240 } else {
2241 umem_error(UMERR_REDZONE, cp, buf);
2242 }
2243 return;
2244 }
2245 if (((uint8_t *)buf)[size] != UMEM_REDZONE_BYTE) {
2246 umem_error(UMERR_REDZONE, cp, buf);
2247 return;
2248 }
2249 btp->bt_redzone = UMEM_REDZONE_PATTERN;
2250 }
2251 _umem_cache_free(cp, buf);
2252 } else {
2253 if (buf == NULL && size == 0)
2254 return;
2255 vmem_free(umem_oversize_arena, buf, size);
2256 }
2257 }
2258
2259 #pragma weak umem_free_align = _umem_free_align
2260 void
_umem_free_align(void * buf,size_t size)2261 _umem_free_align(void *buf, size_t size)
2262 {
2263 if (buf == NULL && size == 0)
2264 return;
2265 vmem_xfree(umem_memalign_arena, buf, size);
2266 }
2267
2268 static void *
umem_firewall_va_alloc(vmem_t * vmp,size_t size,int vmflag)2269 umem_firewall_va_alloc(vmem_t *vmp, size_t size, int vmflag)
2270 {
2271 size_t realsize = size + vmp->vm_quantum;
2272
2273 /*
2274 * Annoying edge case: if 'size' is just shy of ULONG_MAX, adding
2275 * vm_quantum will cause integer wraparound. Check for this, and
2276 * blow off the firewall page in this case. Note that such a
2277 * giant allocation (the entire address space) can never be
2278 * satisfied, so it will either fail immediately (VM_NOSLEEP)
2279 * or sleep forever (VM_SLEEP). Thus, there is no need for a
2280 * corresponding check in umem_firewall_va_free().
2281 */
2282 if (realsize < size)
2283 realsize = size;
2284
2285 return (vmem_alloc(vmp, realsize, vmflag | VM_NEXTFIT));
2286 }
2287
2288 static void
umem_firewall_va_free(vmem_t * vmp,void * addr,size_t size)2289 umem_firewall_va_free(vmem_t *vmp, void *addr, size_t size)
2290 {
2291 vmem_free(vmp, addr, size + vmp->vm_quantum);
2292 }
2293
2294 /*
2295 * Reclaim all unused memory from a cache.
2296 */
2297 static void
umem_cache_reap(umem_cache_t * cp)2298 umem_cache_reap(umem_cache_t *cp)
2299 {
2300 /*
2301 * Ask the cache's owner to free some memory if possible.
2302 * The idea is to handle things like the inode cache, which
2303 * typically sits on a bunch of memory that it doesn't truly
2304 * *need*. Reclaim policy is entirely up to the owner; this
2305 * callback is just an advisory plea for help.
2306 */
2307 if (cp->cache_reclaim != NULL)
2308 cp->cache_reclaim(cp->cache_private);
2309
2310 umem_depot_ws_reap(cp);
2311 }
2312
2313 /*
2314 * Purge all magazines from a cache and set its magazine limit to zero.
2315 * All calls are serialized by being done by the update thread, except for
2316 * the final call from umem_cache_destroy().
2317 */
2318 static void
umem_cache_magazine_purge(umem_cache_t * cp)2319 umem_cache_magazine_purge(umem_cache_t *cp)
2320 {
2321 umem_cpu_cache_t *ccp;
2322 umem_magazine_t *mp, *pmp;
2323 int rounds, prounds, cpu_seqid;
2324
2325 ASSERT(cp->cache_next == NULL || IN_UPDATE());
2326
2327 for (cpu_seqid = 0; cpu_seqid < umem_max_ncpus; cpu_seqid++) {
2328 ccp = &cp->cache_cpu[cpu_seqid];
2329
2330 (void) mutex_lock(&ccp->cc_lock);
2331 mp = ccp->cc_loaded;
2332 pmp = ccp->cc_ploaded;
2333 rounds = ccp->cc_rounds;
2334 prounds = ccp->cc_prounds;
2335 ccp->cc_loaded = NULL;
2336 ccp->cc_ploaded = NULL;
2337 ccp->cc_rounds = -1;
2338 ccp->cc_prounds = -1;
2339 ccp->cc_magsize = 0;
2340 (void) mutex_unlock(&ccp->cc_lock);
2341
2342 if (mp)
2343 umem_magazine_destroy(cp, mp, rounds);
2344 if (pmp)
2345 umem_magazine_destroy(cp, pmp, prounds);
2346 }
2347
2348 /*
2349 * Updating the working set statistics twice in a row has the
2350 * effect of setting the working set size to zero, so everything
2351 * is eligible for reaping.
2352 */
2353 umem_depot_ws_update(cp);
2354 umem_depot_ws_update(cp);
2355
2356 umem_depot_ws_reap(cp);
2357 }
2358
2359 /*
2360 * Enable per-cpu magazines on a cache.
2361 */
2362 static void
umem_cache_magazine_enable(umem_cache_t * cp)2363 umem_cache_magazine_enable(umem_cache_t *cp)
2364 {
2365 int cpu_seqid;
2366
2367 if (cp->cache_flags & UMF_NOMAGAZINE)
2368 return;
2369
2370 for (cpu_seqid = 0; cpu_seqid < umem_max_ncpus; cpu_seqid++) {
2371 umem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
2372 (void) mutex_lock(&ccp->cc_lock);
2373 ccp->cc_magsize = cp->cache_magtype->mt_magsize;
2374 (void) mutex_unlock(&ccp->cc_lock);
2375 }
2376
2377 }
2378
2379 /*
2380 * Recompute a cache's magazine size. The trade-off is that larger magazines
2381 * provide a higher transfer rate with the depot, while smaller magazines
2382 * reduce memory consumption. Magazine resizing is an expensive operation;
2383 * it should not be done frequently.
2384 *
2385 * Changes to the magazine size are serialized by only having one thread
2386 * doing updates. (the update thread)
2387 *
2388 * Note: at present this only grows the magazine size. It might be useful
2389 * to allow shrinkage too.
2390 */
2391 static void
umem_cache_magazine_resize(umem_cache_t * cp)2392 umem_cache_magazine_resize(umem_cache_t *cp)
2393 {
2394 umem_magtype_t *mtp = cp->cache_magtype;
2395
2396 ASSERT(IN_UPDATE());
2397
2398 if (cp->cache_chunksize < mtp->mt_maxbuf) {
2399 umem_cache_magazine_purge(cp);
2400 (void) mutex_lock(&cp->cache_depot_lock);
2401 cp->cache_magtype = ++mtp;
2402 cp->cache_depot_contention_prev =
2403 cp->cache_depot_contention + INT_MAX;
2404 (void) mutex_unlock(&cp->cache_depot_lock);
2405 umem_cache_magazine_enable(cp);
2406 }
2407 }
2408
2409 /*
2410 * Rescale a cache's hash table, so that the table size is roughly the
2411 * cache size. We want the average lookup time to be extremely small.
2412 */
2413 static void
umem_hash_rescale(umem_cache_t * cp)2414 umem_hash_rescale(umem_cache_t *cp)
2415 {
2416 umem_bufctl_t **old_table, **new_table, *bcp;
2417 size_t old_size, new_size, h;
2418
2419 ASSERT(IN_UPDATE());
2420
2421 new_size = MAX(UMEM_HASH_INITIAL,
2422 1 << (highbit(3 * cp->cache_buftotal + 4) - 2));
2423 old_size = cp->cache_hash_mask + 1;
2424
2425 if ((old_size >> 1) <= new_size && new_size <= (old_size << 1))
2426 return;
2427
2428 new_table = vmem_alloc(umem_hash_arena, new_size * sizeof (void *),
2429 VM_NOSLEEP);
2430 if (new_table == NULL)
2431 return;
2432 bzero(new_table, new_size * sizeof (void *));
2433
2434 (void) mutex_lock(&cp->cache_lock);
2435
2436 old_size = cp->cache_hash_mask + 1;
2437 old_table = cp->cache_hash_table;
2438
2439 cp->cache_hash_mask = new_size - 1;
2440 cp->cache_hash_table = new_table;
2441 cp->cache_rescale++;
2442
2443 for (h = 0; h < old_size; h++) {
2444 bcp = old_table[h];
2445 while (bcp != NULL) {
2446 void *addr = bcp->bc_addr;
2447 umem_bufctl_t *next_bcp = bcp->bc_next;
2448 umem_bufctl_t **hash_bucket = UMEM_HASH(cp, addr);
2449 bcp->bc_next = *hash_bucket;
2450 *hash_bucket = bcp;
2451 bcp = next_bcp;
2452 }
2453 }
2454
2455 (void) mutex_unlock(&cp->cache_lock);
2456
2457 vmem_free(umem_hash_arena, old_table, old_size * sizeof (void *));
2458 }
2459
2460 /*
2461 * Perform periodic maintenance on a cache: hash rescaling,
2462 * depot working-set update, and magazine resizing.
2463 */
2464 void
umem_cache_update(umem_cache_t * cp)2465 umem_cache_update(umem_cache_t *cp)
2466 {
2467 int update_flags = 0;
2468
2469 ASSERT(MUTEX_HELD(&umem_cache_lock));
2470
2471 /*
2472 * If the cache has become much larger or smaller than its hash table,
2473 * fire off a request to rescale the hash table.
2474 */
2475 (void) mutex_lock(&cp->cache_lock);
2476
2477 if ((cp->cache_flags & UMF_HASH) &&
2478 (cp->cache_buftotal > (cp->cache_hash_mask << 1) ||
2479 (cp->cache_buftotal < (cp->cache_hash_mask >> 1) &&
2480 cp->cache_hash_mask > UMEM_HASH_INITIAL)))
2481 update_flags |= UMU_HASH_RESCALE;
2482
2483 (void) mutex_unlock(&cp->cache_lock);
2484
2485 /*
2486 * Update the depot working set statistics.
2487 */
2488 umem_depot_ws_update(cp);
2489
2490 /*
2491 * If there's a lot of contention in the depot,
2492 * increase the magazine size.
2493 */
2494 (void) mutex_lock(&cp->cache_depot_lock);
2495
2496 if (cp->cache_chunksize < cp->cache_magtype->mt_maxbuf &&
2497 (int)(cp->cache_depot_contention -
2498 cp->cache_depot_contention_prev) > umem_depot_contention)
2499 update_flags |= UMU_MAGAZINE_RESIZE;
2500
2501 cp->cache_depot_contention_prev = cp->cache_depot_contention;
2502
2503 (void) mutex_unlock(&cp->cache_depot_lock);
2504
2505 if (update_flags)
2506 umem_add_update(cp, update_flags);
2507 }
2508
2509 /*
2510 * Runs all pending updates.
2511 *
2512 * The update lock must be held on entrance, and will be held on exit.
2513 */
2514 void
umem_process_updates(void)2515 umem_process_updates(void)
2516 {
2517 ASSERT(MUTEX_HELD(&umem_update_lock));
2518
2519 while (umem_null_cache.cache_unext != &umem_null_cache) {
2520 int notify = 0;
2521 umem_cache_t *cp = umem_null_cache.cache_unext;
2522
2523 cp->cache_uprev->cache_unext = cp->cache_unext;
2524 cp->cache_unext->cache_uprev = cp->cache_uprev;
2525 cp->cache_uprev = cp->cache_unext = NULL;
2526
2527 ASSERT(!(cp->cache_uflags & UMU_ACTIVE));
2528
2529 while (cp->cache_uflags) {
2530 int uflags = (cp->cache_uflags |= UMU_ACTIVE);
2531 (void) mutex_unlock(&umem_update_lock);
2532
2533 /*
2534 * The order here is important. Each step can speed up
2535 * later steps.
2536 */
2537
2538 if (uflags & UMU_HASH_RESCALE)
2539 umem_hash_rescale(cp);
2540
2541 if (uflags & UMU_MAGAZINE_RESIZE)
2542 umem_cache_magazine_resize(cp);
2543
2544 if (uflags & UMU_REAP)
2545 umem_cache_reap(cp);
2546
2547 (void) mutex_lock(&umem_update_lock);
2548
2549 /*
2550 * check if anyone has requested notification
2551 */
2552 if (cp->cache_uflags & UMU_NOTIFY) {
2553 uflags |= UMU_NOTIFY;
2554 notify = 1;
2555 }
2556 cp->cache_uflags &= ~uflags;
2557 }
2558 if (notify)
2559 (void) cond_broadcast(&umem_update_cv);
2560 }
2561 }
2562
2563 #ifndef UMEM_STANDALONE
2564 static void
umem_st_update(void)2565 umem_st_update(void)
2566 {
2567 ASSERT(MUTEX_HELD(&umem_update_lock));
2568 ASSERT(umem_update_thr == 0 && umem_st_update_thr == 0);
2569
2570 umem_st_update_thr = thr_self();
2571
2572 (void) mutex_unlock(&umem_update_lock);
2573
2574 vmem_update(NULL);
2575 umem_cache_applyall(umem_cache_update);
2576
2577 (void) mutex_lock(&umem_update_lock);
2578
2579 umem_process_updates(); /* does all of the requested work */
2580
2581 umem_reap_next = gethrtime() +
2582 (hrtime_t)umem_reap_interval * NANOSEC;
2583
2584 umem_reaping = UMEM_REAP_DONE;
2585
2586 umem_st_update_thr = 0;
2587 }
2588 #endif
2589
2590 /*
2591 * Reclaim all unused memory from all caches. Called from vmem when memory
2592 * gets tight. Must be called with no locks held.
2593 *
2594 * This just requests a reap on all caches, and notifies the update thread.
2595 */
2596 void
umem_reap(void)2597 umem_reap(void)
2598 {
2599 #ifndef UMEM_STANDALONE
2600 extern int __nthreads(void);
2601 #endif
2602
2603 if (umem_ready != UMEM_READY || umem_reaping != UMEM_REAP_DONE ||
2604 gethrtime() < umem_reap_next)
2605 return;
2606
2607 (void) mutex_lock(&umem_update_lock);
2608
2609 if (umem_reaping != UMEM_REAP_DONE || gethrtime() < umem_reap_next) {
2610 (void) mutex_unlock(&umem_update_lock);
2611 return;
2612 }
2613 umem_reaping = UMEM_REAP_ADDING; /* lock out other reaps */
2614
2615 (void) mutex_unlock(&umem_update_lock);
2616
2617 umem_updateall(UMU_REAP);
2618
2619 (void) mutex_lock(&umem_update_lock);
2620
2621 umem_reaping = UMEM_REAP_ACTIVE;
2622
2623 /* Standalone is single-threaded */
2624 #ifndef UMEM_STANDALONE
2625 if (umem_update_thr == 0) {
2626 /*
2627 * The update thread does not exist. If the process is
2628 * multi-threaded, create it. If not, or the creation fails,
2629 * do the update processing inline.
2630 */
2631 ASSERT(umem_st_update_thr == 0);
2632
2633 if (__nthreads() <= 1 || umem_create_update_thread() == 0)
2634 umem_st_update();
2635 }
2636
2637 (void) cond_broadcast(&umem_update_cv); /* wake up the update thread */
2638 #endif
2639
2640 (void) mutex_unlock(&umem_update_lock);
2641 }
2642
2643 umem_cache_t *
umem_cache_create(char * name,size_t bufsize,size_t align,umem_constructor_t * constructor,umem_destructor_t * destructor,umem_reclaim_t * reclaim,void * private,vmem_t * vmp,int cflags)2644 umem_cache_create(
2645 char *name, /* descriptive name for this cache */
2646 size_t bufsize, /* size of the objects it manages */
2647 size_t align, /* required object alignment */
2648 umem_constructor_t *constructor, /* object constructor */
2649 umem_destructor_t *destructor, /* object destructor */
2650 umem_reclaim_t *reclaim, /* memory reclaim callback */
2651 void *private, /* pass-thru arg for constr/destr/reclaim */
2652 vmem_t *vmp, /* vmem source for slab allocation */
2653 int cflags) /* cache creation flags */
2654 {
2655 int cpu_seqid;
2656 size_t chunksize;
2657 umem_cache_t *cp, *cnext, *cprev;
2658 umem_magtype_t *mtp;
2659 size_t csize;
2660 size_t phase;
2661
2662 /*
2663 * The init thread is allowed to create internal and quantum caches.
2664 *
2665 * Other threads must wait until until initialization is complete.
2666 */
2667 if (umem_init_thr == thr_self())
2668 ASSERT((cflags & (UMC_INTERNAL | UMC_QCACHE)) != 0);
2669 else {
2670 ASSERT(!(cflags & UMC_INTERNAL));
2671 if (umem_ready != UMEM_READY && umem_init() == 0) {
2672 errno = EAGAIN;
2673 return (NULL);
2674 }
2675 }
2676
2677 csize = UMEM_CACHE_SIZE(umem_max_ncpus);
2678 phase = P2NPHASE(csize, UMEM_CPU_CACHE_SIZE);
2679
2680 if (vmp == NULL)
2681 vmp = umem_default_arena;
2682
2683 ASSERT(P2PHASE(phase, UMEM_ALIGN) == 0);
2684
2685 /*
2686 * Check that the arguments are reasonable
2687 */
2688 if ((align & (align - 1)) != 0 || align > vmp->vm_quantum ||
2689 ((cflags & UMC_NOHASH) && (cflags & UMC_NOTOUCH)) ||
2690 name == NULL || bufsize == 0) {
2691 errno = EINVAL;
2692 return (NULL);
2693 }
2694
2695 /*
2696 * If align == 0, we set it to the minimum required alignment.
2697 *
2698 * If align < UMEM_ALIGN, we round it up to UMEM_ALIGN, unless
2699 * UMC_NOTOUCH was passed.
2700 */
2701 if (align == 0) {
2702 if (P2ROUNDUP(bufsize, UMEM_ALIGN) >= UMEM_SECOND_ALIGN)
2703 align = UMEM_SECOND_ALIGN;
2704 else
2705 align = UMEM_ALIGN;
2706 } else if (align < UMEM_ALIGN && (cflags & UMC_NOTOUCH) == 0)
2707 align = UMEM_ALIGN;
2708
2709
2710 /*
2711 * Get a umem_cache structure. We arrange that cp->cache_cpu[]
2712 * is aligned on a UMEM_CPU_CACHE_SIZE boundary to prevent
2713 * false sharing of per-CPU data.
2714 */
2715 cp = vmem_xalloc(umem_cache_arena, csize, UMEM_CPU_CACHE_SIZE, phase,
2716 0, NULL, NULL, VM_NOSLEEP);
2717
2718 if (cp == NULL) {
2719 errno = EAGAIN;
2720 return (NULL);
2721 }
2722
2723 bzero(cp, csize);
2724
2725 (void) mutex_lock(&umem_flags_lock);
2726 if (umem_flags & UMF_RANDOMIZE)
2727 umem_flags = (((umem_flags | ~UMF_RANDOM) + 1) & UMF_RANDOM) |
2728 UMF_RANDOMIZE;
2729 cp->cache_flags = umem_flags | (cflags & UMF_DEBUG);
2730 (void) mutex_unlock(&umem_flags_lock);
2731
2732 /*
2733 * Make sure all the various flags are reasonable.
2734 */
2735 if (cp->cache_flags & UMF_LITE) {
2736 if (bufsize >= umem_lite_minsize &&
2737 align <= umem_lite_maxalign &&
2738 P2PHASE(bufsize, umem_lite_maxalign) != 0) {
2739 cp->cache_flags |= UMF_BUFTAG;
2740 cp->cache_flags &= ~(UMF_AUDIT | UMF_FIREWALL);
2741 } else {
2742 cp->cache_flags &= ~UMF_DEBUG;
2743 }
2744 }
2745
2746 if ((cflags & UMC_QCACHE) && (cp->cache_flags & UMF_AUDIT))
2747 cp->cache_flags |= UMF_NOMAGAZINE;
2748
2749 if (cflags & UMC_NODEBUG)
2750 cp->cache_flags &= ~UMF_DEBUG;
2751
2752 if (cflags & UMC_NOTOUCH)
2753 cp->cache_flags &= ~UMF_TOUCH;
2754
2755 if (cflags & UMC_NOHASH)
2756 cp->cache_flags &= ~(UMF_AUDIT | UMF_FIREWALL);
2757
2758 if (cflags & UMC_NOMAGAZINE)
2759 cp->cache_flags |= UMF_NOMAGAZINE;
2760
2761 if ((cp->cache_flags & UMF_AUDIT) && !(cflags & UMC_NOTOUCH))
2762 cp->cache_flags |= UMF_REDZONE;
2763
2764 if ((cp->cache_flags & UMF_BUFTAG) && bufsize >= umem_minfirewall &&
2765 !(cp->cache_flags & UMF_LITE) && !(cflags & UMC_NOHASH))
2766 cp->cache_flags |= UMF_FIREWALL;
2767
2768 if (vmp != umem_default_arena || umem_firewall_arena == NULL)
2769 cp->cache_flags &= ~UMF_FIREWALL;
2770
2771 if (cp->cache_flags & UMF_FIREWALL) {
2772 cp->cache_flags &= ~UMF_BUFTAG;
2773 cp->cache_flags |= UMF_NOMAGAZINE;
2774 ASSERT(vmp == umem_default_arena);
2775 vmp = umem_firewall_arena;
2776 }
2777
2778 /*
2779 * Set cache properties.
2780 */
2781 (void) strncpy(cp->cache_name, name, sizeof (cp->cache_name) - 1);
2782 cp->cache_bufsize = bufsize;
2783 cp->cache_align = align;
2784 cp->cache_constructor = constructor;
2785 cp->cache_destructor = destructor;
2786 cp->cache_reclaim = reclaim;
2787 cp->cache_private = private;
2788 cp->cache_arena = vmp;
2789 cp->cache_cflags = cflags;
2790 cp->cache_cpu_mask = umem_cpu_mask;
2791
2792 /*
2793 * Determine the chunk size.
2794 */
2795 chunksize = bufsize;
2796
2797 if (align >= UMEM_ALIGN) {
2798 chunksize = P2ROUNDUP(chunksize, UMEM_ALIGN);
2799 cp->cache_bufctl = chunksize - UMEM_ALIGN;
2800 }
2801
2802 if (cp->cache_flags & UMF_BUFTAG) {
2803 cp->cache_bufctl = chunksize;
2804 cp->cache_buftag = chunksize;
2805 chunksize += sizeof (umem_buftag_t);
2806 }
2807
2808 if (cp->cache_flags & UMF_DEADBEEF) {
2809 cp->cache_verify = MIN(cp->cache_buftag, umem_maxverify);
2810 if (cp->cache_flags & UMF_LITE)
2811 cp->cache_verify = MIN(cp->cache_verify, UMEM_ALIGN);
2812 }
2813
2814 cp->cache_contents = MIN(cp->cache_bufctl, umem_content_maxsave);
2815
2816 cp->cache_chunksize = chunksize = P2ROUNDUP(chunksize, align);
2817
2818 if (chunksize < bufsize) {
2819 errno = ENOMEM;
2820 goto fail;
2821 }
2822
2823 /*
2824 * Now that we know the chunk size, determine the optimal slab size.
2825 */
2826 if (vmp == umem_firewall_arena) {
2827 cp->cache_slabsize = P2ROUNDUP(chunksize, vmp->vm_quantum);
2828 cp->cache_mincolor = cp->cache_slabsize - chunksize;
2829 cp->cache_maxcolor = cp->cache_mincolor;
2830 cp->cache_flags |= UMF_HASH;
2831 ASSERT(!(cp->cache_flags & UMF_BUFTAG));
2832 } else if ((cflags & UMC_NOHASH) || (!(cflags & UMC_NOTOUCH) &&
2833 !(cp->cache_flags & UMF_AUDIT) &&
2834 chunksize < vmp->vm_quantum / UMEM_VOID_FRACTION)) {
2835 cp->cache_slabsize = vmp->vm_quantum;
2836 cp->cache_mincolor = 0;
2837 cp->cache_maxcolor =
2838 (cp->cache_slabsize - sizeof (umem_slab_t)) % chunksize;
2839
2840 if (chunksize + sizeof (umem_slab_t) > cp->cache_slabsize) {
2841 errno = EINVAL;
2842 goto fail;
2843 }
2844 ASSERT(!(cp->cache_flags & UMF_AUDIT));
2845 } else {
2846 size_t chunks, waste, slabsize;
2847 size_t minwaste = LONG_MAX;
2848 size_t bestfit = SIZE_MAX;
2849
2850 for (chunks = 1; chunks <= UMEM_VOID_FRACTION; chunks++) {
2851 slabsize = P2ROUNDUP(chunksize * chunks,
2852 vmp->vm_quantum);
2853 /*
2854 * check for overflow
2855 */
2856 if ((slabsize / chunks) < chunksize) {
2857 errno = ENOMEM;
2858 goto fail;
2859 }
2860 chunks = slabsize / chunksize;
2861 waste = (slabsize % chunksize) / chunks;
2862 if (waste < minwaste) {
2863 minwaste = waste;
2864 bestfit = slabsize;
2865 }
2866 }
2867 if (cflags & UMC_QCACHE)
2868 bestfit = MAX(1 << highbit(3 * vmp->vm_qcache_max), 64);
2869 if (bestfit == SIZE_MAX) {
2870 errno = ENOMEM;
2871 goto fail;
2872 }
2873 cp->cache_slabsize = bestfit;
2874 cp->cache_mincolor = 0;
2875 cp->cache_maxcolor = bestfit % chunksize;
2876 cp->cache_flags |= UMF_HASH;
2877 }
2878
2879 if (cp->cache_flags & UMF_HASH) {
2880 ASSERT(!(cflags & UMC_NOHASH));
2881 cp->cache_bufctl_cache = (cp->cache_flags & UMF_AUDIT) ?
2882 umem_bufctl_audit_cache : umem_bufctl_cache;
2883 }
2884
2885 if (cp->cache_maxcolor >= vmp->vm_quantum)
2886 cp->cache_maxcolor = vmp->vm_quantum - 1;
2887
2888 cp->cache_color = cp->cache_mincolor;
2889
2890 /*
2891 * Initialize the rest of the slab layer.
2892 */
2893 (void) mutex_init(&cp->cache_lock, USYNC_THREAD, NULL);
2894
2895 cp->cache_freelist = &cp->cache_nullslab;
2896 cp->cache_nullslab.slab_cache = cp;
2897 cp->cache_nullslab.slab_refcnt = -1;
2898 cp->cache_nullslab.slab_next = &cp->cache_nullslab;
2899 cp->cache_nullslab.slab_prev = &cp->cache_nullslab;
2900
2901 if (cp->cache_flags & UMF_HASH) {
2902 cp->cache_hash_table = vmem_alloc(umem_hash_arena,
2903 UMEM_HASH_INITIAL * sizeof (void *), VM_NOSLEEP);
2904 if (cp->cache_hash_table == NULL) {
2905 errno = EAGAIN;
2906 goto fail_lock;
2907 }
2908 bzero(cp->cache_hash_table,
2909 UMEM_HASH_INITIAL * sizeof (void *));
2910 cp->cache_hash_mask = UMEM_HASH_INITIAL - 1;
2911 cp->cache_hash_shift = highbit((ulong_t)chunksize) - 1;
2912 }
2913
2914 /*
2915 * Initialize the depot.
2916 */
2917 (void) mutex_init(&cp->cache_depot_lock, USYNC_THREAD, NULL);
2918
2919 for (mtp = umem_magtype; chunksize <= mtp->mt_minbuf; mtp++)
2920 continue;
2921
2922 cp->cache_magtype = mtp;
2923
2924 /*
2925 * Initialize the CPU layer.
2926 */
2927 for (cpu_seqid = 0; cpu_seqid < umem_max_ncpus; cpu_seqid++) {
2928 umem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
2929 (void) mutex_init(&ccp->cc_lock, USYNC_THREAD, NULL);
2930 ccp->cc_flags = cp->cache_flags;
2931 ccp->cc_rounds = -1;
2932 ccp->cc_prounds = -1;
2933 }
2934
2935 /*
2936 * Add the cache to the global list. This makes it visible
2937 * to umem_update(), so the cache must be ready for business.
2938 */
2939 (void) mutex_lock(&umem_cache_lock);
2940 cp->cache_next = cnext = &umem_null_cache;
2941 cp->cache_prev = cprev = umem_null_cache.cache_prev;
2942 cnext->cache_prev = cp;
2943 cprev->cache_next = cp;
2944 (void) mutex_unlock(&umem_cache_lock);
2945
2946 if (umem_ready == UMEM_READY)
2947 umem_cache_magazine_enable(cp);
2948
2949 return (cp);
2950
2951 fail_lock:
2952 (void) mutex_destroy(&cp->cache_lock);
2953 fail:
2954 vmem_xfree(umem_cache_arena, cp, csize);
2955 return (NULL);
2956 }
2957
2958 void
umem_cache_destroy(umem_cache_t * cp)2959 umem_cache_destroy(umem_cache_t *cp)
2960 {
2961 int cpu_seqid;
2962
2963 /*
2964 * Remove the cache from the global cache list so that no new updates
2965 * will be scheduled on its behalf, wait for any pending tasks to
2966 * complete, purge the cache, and then destroy it.
2967 */
2968 (void) mutex_lock(&umem_cache_lock);
2969 cp->cache_prev->cache_next = cp->cache_next;
2970 cp->cache_next->cache_prev = cp->cache_prev;
2971 cp->cache_prev = cp->cache_next = NULL;
2972 (void) mutex_unlock(&umem_cache_lock);
2973
2974 umem_remove_updates(cp);
2975
2976 umem_cache_magazine_purge(cp);
2977
2978 (void) mutex_lock(&cp->cache_lock);
2979 if (cp->cache_buftotal != 0)
2980 log_message("umem_cache_destroy: '%s' (%p) not empty\n",
2981 cp->cache_name, (void *)cp);
2982 cp->cache_reclaim = NULL;
2983 /*
2984 * The cache is now dead. There should be no further activity.
2985 * We enforce this by setting land mines in the constructor and
2986 * destructor routines that induce a segmentation fault if invoked.
2987 */
2988 cp->cache_constructor = (umem_constructor_t *)1;
2989 cp->cache_destructor = (umem_destructor_t *)2;
2990 (void) mutex_unlock(&cp->cache_lock);
2991
2992 if (cp->cache_hash_table != NULL)
2993 vmem_free(umem_hash_arena, cp->cache_hash_table,
2994 (cp->cache_hash_mask + 1) * sizeof (void *));
2995
2996 for (cpu_seqid = 0; cpu_seqid < umem_max_ncpus; cpu_seqid++)
2997 (void) mutex_destroy(&cp->cache_cpu[cpu_seqid].cc_lock);
2998
2999 (void) mutex_destroy(&cp->cache_depot_lock);
3000 (void) mutex_destroy(&cp->cache_lock);
3001
3002 vmem_free(umem_cache_arena, cp, UMEM_CACHE_SIZE(umem_max_ncpus));
3003 }
3004
3005 void
umem_alloc_sizes_clear(void)3006 umem_alloc_sizes_clear(void)
3007 {
3008 int i;
3009
3010 umem_alloc_sizes[0] = UMEM_MAXBUF;
3011 for (i = 1; i < NUM_ALLOC_SIZES; i++)
3012 umem_alloc_sizes[i] = 0;
3013 }
3014
3015 void
umem_alloc_sizes_add(size_t size_arg)3016 umem_alloc_sizes_add(size_t size_arg)
3017 {
3018 int i, j;
3019 size_t size = size_arg;
3020
3021 if (size == 0) {
3022 log_message("size_add: cannot add zero-sized cache\n",
3023 size, UMEM_MAXBUF);
3024 return;
3025 }
3026
3027 if (size > UMEM_MAXBUF) {
3028 log_message("size_add: %ld > %d, cannot add\n", size,
3029 UMEM_MAXBUF);
3030 return;
3031 }
3032
3033 if (umem_alloc_sizes[NUM_ALLOC_SIZES - 1] != 0) {
3034 log_message("size_add: no space in alloc_table for %d\n",
3035 size);
3036 return;
3037 }
3038
3039 if (P2PHASE(size, UMEM_ALIGN) != 0) {
3040 size = P2ROUNDUP(size, UMEM_ALIGN);
3041 log_message("size_add: rounding %d up to %d\n", size_arg,
3042 size);
3043 }
3044
3045 for (i = 0; i < NUM_ALLOC_SIZES; i++) {
3046 int cur = umem_alloc_sizes[i];
3047 if (cur == size) {
3048 log_message("size_add: %ld already in table\n",
3049 size);
3050 return;
3051 }
3052 if (cur > size)
3053 break;
3054 }
3055
3056 for (j = NUM_ALLOC_SIZES - 1; j > i; j--)
3057 umem_alloc_sizes[j] = umem_alloc_sizes[j-1];
3058 umem_alloc_sizes[i] = size;
3059 }
3060
3061 void
umem_alloc_sizes_remove(size_t size)3062 umem_alloc_sizes_remove(size_t size)
3063 {
3064 int i;
3065
3066 if (size == UMEM_MAXBUF) {
3067 log_message("size_remove: cannot remove %ld\n", size);
3068 return;
3069 }
3070
3071 for (i = 0; i < NUM_ALLOC_SIZES; i++) {
3072 int cur = umem_alloc_sizes[i];
3073 if (cur == size)
3074 break;
3075 else if (cur > size || cur == 0) {
3076 log_message("size_remove: %ld not found in table\n",
3077 size);
3078 return;
3079 }
3080 }
3081
3082 for (; i + 1 < NUM_ALLOC_SIZES; i++)
3083 umem_alloc_sizes[i] = umem_alloc_sizes[i+1];
3084 umem_alloc_sizes[i] = 0;
3085 }
3086
3087 /*
3088 * We've been called back from libc to indicate that thread is terminating and
3089 * that it needs to release the per-thread memory that it has. We get to know
3090 * which entry in the thread's tmem array the allocation came from. Currently
3091 * this refers to first n umem_caches which makes this a pretty simple indexing
3092 * job.
3093 */
3094 static void
umem_cache_tmem_cleanup(void * buf,int entry)3095 umem_cache_tmem_cleanup(void *buf, int entry)
3096 {
3097 size_t size;
3098 umem_cache_t *cp;
3099
3100 size = umem_alloc_sizes[entry];
3101 cp = umem_alloc_table[(size - 1) >> UMEM_ALIGN_SHIFT];
3102 _umem_cache_free(cp, buf);
3103 }
3104
3105 static int
umem_cache_init(void)3106 umem_cache_init(void)
3107 {
3108 int i;
3109 size_t size, max_size;
3110 umem_cache_t *cp;
3111 umem_magtype_t *mtp;
3112 char name[UMEM_CACHE_NAMELEN + 1];
3113 umem_cache_t *umem_alloc_caches[NUM_ALLOC_SIZES];
3114
3115 for (i = 0; i < sizeof (umem_magtype) / sizeof (*mtp); i++) {
3116 mtp = &umem_magtype[i];
3117 (void) snprintf(name, sizeof (name), "umem_magazine_%d",
3118 mtp->mt_magsize);
3119 mtp->mt_cache = umem_cache_create(name,
3120 (mtp->mt_magsize + 1) * sizeof (void *),
3121 mtp->mt_align, NULL, NULL, NULL, NULL,
3122 umem_internal_arena, UMC_NOHASH | UMC_INTERNAL);
3123 if (mtp->mt_cache == NULL)
3124 return (0);
3125 }
3126
3127 umem_slab_cache = umem_cache_create("umem_slab_cache",
3128 sizeof (umem_slab_t), 0, NULL, NULL, NULL, NULL,
3129 umem_internal_arena, UMC_NOHASH | UMC_INTERNAL);
3130
3131 if (umem_slab_cache == NULL)
3132 return (0);
3133
3134 umem_bufctl_cache = umem_cache_create("umem_bufctl_cache",
3135 sizeof (umem_bufctl_t), 0, NULL, NULL, NULL, NULL,
3136 umem_internal_arena, UMC_NOHASH | UMC_INTERNAL);
3137
3138 if (umem_bufctl_cache == NULL)
3139 return (0);
3140
3141 /*
3142 * The size of the umem_bufctl_audit structure depends upon
3143 * umem_stack_depth. See umem_impl.h for details on the size
3144 * restrictions.
3145 */
3146
3147 size = UMEM_BUFCTL_AUDIT_SIZE_DEPTH(umem_stack_depth);
3148 max_size = UMEM_BUFCTL_AUDIT_MAX_SIZE;
3149
3150 if (size > max_size) { /* too large -- truncate */
3151 int max_frames = UMEM_MAX_STACK_DEPTH;
3152
3153 ASSERT(UMEM_BUFCTL_AUDIT_SIZE_DEPTH(max_frames) <= max_size);
3154
3155 umem_stack_depth = max_frames;
3156 size = UMEM_BUFCTL_AUDIT_SIZE_DEPTH(umem_stack_depth);
3157 }
3158
3159 umem_bufctl_audit_cache = umem_cache_create("umem_bufctl_audit_cache",
3160 size, 0, NULL, NULL, NULL, NULL, umem_internal_arena,
3161 UMC_NOHASH | UMC_INTERNAL);
3162
3163 if (umem_bufctl_audit_cache == NULL)
3164 return (0);
3165
3166 if (vmem_backend & VMEM_BACKEND_MMAP)
3167 umem_va_arena = vmem_create("umem_va",
3168 NULL, 0, pagesize,
3169 vmem_alloc, vmem_free, heap_arena,
3170 8 * pagesize, VM_NOSLEEP);
3171 else
3172 umem_va_arena = heap_arena;
3173
3174 if (umem_va_arena == NULL)
3175 return (0);
3176
3177 umem_default_arena = vmem_create("umem_default",
3178 NULL, 0, pagesize,
3179 heap_alloc, heap_free, umem_va_arena,
3180 0, VM_NOSLEEP);
3181
3182 if (umem_default_arena == NULL)
3183 return (0);
3184
3185 /*
3186 * make sure the umem_alloc table initializer is correct
3187 */
3188 i = sizeof (umem_alloc_table) / sizeof (*umem_alloc_table);
3189 ASSERT(umem_alloc_table[i - 1] == &umem_null_cache);
3190
3191 /*
3192 * Create the default caches to back umem_alloc()
3193 */
3194 for (i = 0; i < NUM_ALLOC_SIZES; i++) {
3195 size_t cache_size = umem_alloc_sizes[i];
3196 size_t align = 0;
3197
3198 if (cache_size == 0)
3199 break; /* 0 terminates the list */
3200
3201 /*
3202 * If they allocate a multiple of the coherency granularity,
3203 * they get a coherency-granularity-aligned address.
3204 */
3205 if (IS_P2ALIGNED(cache_size, 64))
3206 align = 64;
3207 if (IS_P2ALIGNED(cache_size, pagesize))
3208 align = pagesize;
3209 (void) snprintf(name, sizeof (name), "umem_alloc_%lu",
3210 (long)cache_size);
3211
3212 cp = umem_cache_create(name, cache_size, align,
3213 NULL, NULL, NULL, NULL, NULL, UMC_INTERNAL);
3214 if (cp == NULL)
3215 return (0);
3216
3217 umem_alloc_caches[i] = cp;
3218 }
3219
3220 umem_tmem_off = _tmem_get_base();
3221 _tmem_set_cleanup(umem_cache_tmem_cleanup);
3222
3223 #ifndef UMEM_STANDALONE
3224 if (umem_genasm_supported && !(umem_flags & UMF_DEBUG) &&
3225 !(umem_flags & UMF_NOMAGAZINE) &&
3226 umem_ptc_size > 0) {
3227 umem_ptc_enabled = umem_genasm(umem_alloc_sizes,
3228 umem_alloc_caches, i) ? 1 : 0;
3229 }
3230 #else
3231 umem_ptc_enabled = 0;
3232 #endif
3233
3234 /*
3235 * Initialization cannot fail at this point. Make the caches
3236 * visible to umem_alloc() and friends.
3237 */
3238 size = UMEM_ALIGN;
3239 for (i = 0; i < NUM_ALLOC_SIZES; i++) {
3240 size_t cache_size = umem_alloc_sizes[i];
3241
3242 if (cache_size == 0)
3243 break; /* 0 terminates the list */
3244
3245 cp = umem_alloc_caches[i];
3246
3247 while (size <= cache_size) {
3248 umem_alloc_table[(size - 1) >> UMEM_ALIGN_SHIFT] = cp;
3249 size += UMEM_ALIGN;
3250 }
3251 }
3252 ASSERT(size - UMEM_ALIGN == UMEM_MAXBUF);
3253 return (1);
3254 }
3255
3256 /*
3257 * umem_startup() is called early on, and must be called explicitly if we're
3258 * the standalone version.
3259 */
3260 #ifdef UMEM_STANDALONE
3261 void
3262 #else
3263 #pragma init(umem_startup)
3264 static void
3265 #endif
umem_startup(caddr_t start,size_t len,size_t pagesize,caddr_t minstack,caddr_t maxstack)3266 umem_startup(caddr_t start, size_t len, size_t pagesize, caddr_t minstack,
3267 caddr_t maxstack)
3268 {
3269 #ifdef UMEM_STANDALONE
3270 int idx;
3271 /* Standalone doesn't fork */
3272 #else
3273 umem_forkhandler_init(); /* register the fork handler */
3274 #endif
3275
3276 #ifdef __lint
3277 /* make lint happy */
3278 minstack = maxstack;
3279 #endif
3280
3281 #ifdef UMEM_STANDALONE
3282 umem_ready = UMEM_READY_STARTUP;
3283 umem_init_env_ready = 0;
3284
3285 umem_min_stack = minstack;
3286 umem_max_stack = maxstack;
3287
3288 nofail_callback = NULL;
3289 umem_slab_cache = NULL;
3290 umem_bufctl_cache = NULL;
3291 umem_bufctl_audit_cache = NULL;
3292 heap_arena = NULL;
3293 heap_alloc = NULL;
3294 heap_free = NULL;
3295 umem_internal_arena = NULL;
3296 umem_cache_arena = NULL;
3297 umem_hash_arena = NULL;
3298 umem_log_arena = NULL;
3299 umem_oversize_arena = NULL;
3300 umem_va_arena = NULL;
3301 umem_default_arena = NULL;
3302 umem_firewall_va_arena = NULL;
3303 umem_firewall_arena = NULL;
3304 umem_memalign_arena = NULL;
3305 umem_transaction_log = NULL;
3306 umem_content_log = NULL;
3307 umem_failure_log = NULL;
3308 umem_slab_log = NULL;
3309 umem_cpu_mask = 0;
3310
3311 umem_cpus = &umem_startup_cpu;
3312 umem_startup_cpu.cpu_cache_offset = UMEM_CACHE_SIZE(0);
3313 umem_startup_cpu.cpu_number = 0;
3314
3315 bcopy(&umem_null_cache_template, &umem_null_cache,
3316 sizeof (umem_cache_t));
3317
3318 for (idx = 0; idx < (UMEM_MAXBUF >> UMEM_ALIGN_SHIFT); idx++)
3319 umem_alloc_table[idx] = &umem_null_cache;
3320 #endif
3321
3322 /*
3323 * Perform initialization specific to the way we've been compiled
3324 * (library or standalone)
3325 */
3326 umem_type_init(start, len, pagesize);
3327
3328 vmem_startup();
3329 }
3330
3331 int
umem_init(void)3332 umem_init(void)
3333 {
3334 size_t maxverify, minfirewall;
3335 size_t size;
3336 int idx;
3337 umem_cpu_t *new_cpus;
3338
3339 vmem_t *memalign_arena, *oversize_arena;
3340
3341 if (thr_self() != umem_init_thr) {
3342 /*
3343 * The usual case -- non-recursive invocation of umem_init().
3344 */
3345 (void) mutex_lock(&umem_init_lock);
3346 if (umem_ready != UMEM_READY_STARTUP) {
3347 /*
3348 * someone else beat us to initializing umem. Wait
3349 * for them to complete, then return.
3350 */
3351 while (umem_ready == UMEM_READY_INITING) {
3352 int cancel_state;
3353
3354 (void) pthread_setcancelstate(
3355 PTHREAD_CANCEL_DISABLE, &cancel_state);
3356 (void) cond_wait(&umem_init_cv,
3357 &umem_init_lock);
3358 (void) pthread_setcancelstate(
3359 cancel_state, NULL);
3360 }
3361 ASSERT(umem_ready == UMEM_READY ||
3362 umem_ready == UMEM_READY_INIT_FAILED);
3363 (void) mutex_unlock(&umem_init_lock);
3364 return (umem_ready == UMEM_READY);
3365 }
3366
3367 ASSERT(umem_ready == UMEM_READY_STARTUP);
3368 ASSERT(umem_init_env_ready == 0);
3369
3370 umem_ready = UMEM_READY_INITING;
3371 umem_init_thr = thr_self();
3372
3373 (void) mutex_unlock(&umem_init_lock);
3374 umem_setup_envvars(0); /* can recurse -- see below */
3375 if (umem_init_env_ready) {
3376 /*
3377 * initialization was completed already
3378 */
3379 ASSERT(umem_ready == UMEM_READY ||
3380 umem_ready == UMEM_READY_INIT_FAILED);
3381 ASSERT(umem_init_thr == 0);
3382 return (umem_ready == UMEM_READY);
3383 }
3384 } else if (!umem_init_env_ready) {
3385 /*
3386 * The umem_setup_envvars() call (above) makes calls into
3387 * the dynamic linker and directly into user-supplied code.
3388 * Since we cannot know what that code will do, we could be
3389 * recursively invoked (by, say, a malloc() call in the code
3390 * itself, or in a (C++) _init section it causes to be fired).
3391 *
3392 * This code is where we end up if such recursion occurs. We
3393 * first clean up any partial results in the envvar code, then
3394 * proceed to finish initialization processing in the recursive
3395 * call. The original call will notice this, and return
3396 * immediately.
3397 */
3398 umem_setup_envvars(1); /* clean up any partial state */
3399 } else {
3400 umem_panic(
3401 "recursive allocation while initializing umem\n");
3402 }
3403 umem_init_env_ready = 1;
3404
3405 /*
3406 * From this point until we finish, recursion into umem_init() will
3407 * cause a umem_panic().
3408 */
3409 maxverify = minfirewall = ULONG_MAX;
3410
3411 /* LINTED constant condition */
3412 if (sizeof (umem_cpu_cache_t) != UMEM_CPU_CACHE_SIZE) {
3413 umem_panic("sizeof (umem_cpu_cache_t) = %d, should be %d\n",
3414 sizeof (umem_cpu_cache_t), UMEM_CPU_CACHE_SIZE);
3415 }
3416
3417 umem_max_ncpus = umem_get_max_ncpus();
3418
3419 /*
3420 * load tunables from environment
3421 */
3422 umem_process_envvars();
3423
3424 if (issetugid())
3425 umem_mtbf = 0;
3426
3427 /*
3428 * set up vmem
3429 */
3430 if (!(umem_flags & UMF_AUDIT))
3431 vmem_no_debug();
3432
3433 heap_arena = vmem_heap_arena(&heap_alloc, &heap_free);
3434
3435 pagesize = heap_arena->vm_quantum;
3436
3437 umem_internal_arena = vmem_create("umem_internal", NULL, 0, pagesize,
3438 heap_alloc, heap_free, heap_arena, 0, VM_NOSLEEP);
3439
3440 umem_default_arena = umem_internal_arena;
3441
3442 if (umem_internal_arena == NULL)
3443 goto fail;
3444
3445 umem_cache_arena = vmem_create("umem_cache", NULL, 0, UMEM_ALIGN,
3446 vmem_alloc, vmem_free, umem_internal_arena, 0, VM_NOSLEEP);
3447
3448 umem_hash_arena = vmem_create("umem_hash", NULL, 0, UMEM_ALIGN,
3449 vmem_alloc, vmem_free, umem_internal_arena, 0, VM_NOSLEEP);
3450
3451 umem_log_arena = vmem_create("umem_log", NULL, 0, UMEM_ALIGN,
3452 heap_alloc, heap_free, heap_arena, 0, VM_NOSLEEP);
3453
3454 umem_firewall_va_arena = vmem_create("umem_firewall_va",
3455 NULL, 0, pagesize,
3456 umem_firewall_va_alloc, umem_firewall_va_free, heap_arena,
3457 0, VM_NOSLEEP);
3458
3459 if (umem_cache_arena == NULL || umem_hash_arena == NULL ||
3460 umem_log_arena == NULL || umem_firewall_va_arena == NULL)
3461 goto fail;
3462
3463 umem_firewall_arena = vmem_create("umem_firewall", NULL, 0, pagesize,
3464 heap_alloc, heap_free, umem_firewall_va_arena, 0,
3465 VM_NOSLEEP);
3466
3467 if (umem_firewall_arena == NULL)
3468 goto fail;
3469
3470 oversize_arena = vmem_create("umem_oversize", NULL, 0, pagesize,
3471 heap_alloc, heap_free, minfirewall < ULONG_MAX ?
3472 umem_firewall_va_arena : heap_arena, 0, VM_NOSLEEP);
3473
3474 memalign_arena = vmem_create("umem_memalign", NULL, 0, UMEM_ALIGN,
3475 heap_alloc, heap_free, minfirewall < ULONG_MAX ?
3476 umem_firewall_va_arena : heap_arena, 0, VM_NOSLEEP);
3477
3478 if (oversize_arena == NULL || memalign_arena == NULL)
3479 goto fail;
3480
3481 if (umem_max_ncpus > CPUHINT_MAX())
3482 umem_max_ncpus = CPUHINT_MAX();
3483
3484 while ((umem_max_ncpus & (umem_max_ncpus - 1)) != 0)
3485 umem_max_ncpus++;
3486
3487 if (umem_max_ncpus == 0)
3488 umem_max_ncpus = 1;
3489
3490 size = umem_max_ncpus * sizeof (umem_cpu_t);
3491 new_cpus = vmem_alloc(umem_internal_arena, size, VM_NOSLEEP);
3492 if (new_cpus == NULL)
3493 goto fail;
3494
3495 bzero(new_cpus, size);
3496 for (idx = 0; idx < umem_max_ncpus; idx++) {
3497 new_cpus[idx].cpu_number = idx;
3498 new_cpus[idx].cpu_cache_offset = UMEM_CACHE_SIZE(idx);
3499 }
3500 umem_cpus = new_cpus;
3501 umem_cpu_mask = (umem_max_ncpus - 1);
3502
3503 if (umem_maxverify == 0)
3504 umem_maxverify = maxverify;
3505
3506 if (umem_minfirewall == 0)
3507 umem_minfirewall = minfirewall;
3508
3509 /*
3510 * Set up updating and reaping
3511 */
3512 umem_reap_next = gethrtime() + NANOSEC;
3513
3514 #ifndef UMEM_STANDALONE
3515 (void) gettimeofday(&umem_update_next, NULL);
3516 #endif
3517
3518 /*
3519 * Set up logging -- failure here is okay, since it will just disable
3520 * the logs
3521 */
3522 if (umem_logging) {
3523 umem_transaction_log = umem_log_init(umem_transaction_log_size);
3524 umem_content_log = umem_log_init(umem_content_log_size);
3525 umem_failure_log = umem_log_init(umem_failure_log_size);
3526 umem_slab_log = umem_log_init(umem_slab_log_size);
3527 }
3528
3529 /*
3530 * Set up caches -- if successful, initialization cannot fail, since
3531 * allocations from other threads can now succeed.
3532 */
3533 if (umem_cache_init() == 0) {
3534 log_message("unable to create initial caches\n");
3535 goto fail;
3536 }
3537 umem_oversize_arena = oversize_arena;
3538 umem_memalign_arena = memalign_arena;
3539
3540 umem_cache_applyall(umem_cache_magazine_enable);
3541
3542 /*
3543 * initialization done, ready to go
3544 */
3545 (void) mutex_lock(&umem_init_lock);
3546 umem_ready = UMEM_READY;
3547 umem_init_thr = 0;
3548 (void) cond_broadcast(&umem_init_cv);
3549 (void) mutex_unlock(&umem_init_lock);
3550 return (1);
3551
3552 fail:
3553 log_message("umem initialization failed\n");
3554
3555 (void) mutex_lock(&umem_init_lock);
3556 umem_ready = UMEM_READY_INIT_FAILED;
3557 umem_init_thr = 0;
3558 (void) cond_broadcast(&umem_init_cv);
3559 (void) mutex_unlock(&umem_init_lock);
3560 return (0);
3561 }
3562
3563 void
umem_setmtbf(uint32_t mtbf)3564 umem_setmtbf(uint32_t mtbf)
3565 {
3566 extern uint32_t vmem_mtbf;
3567
3568 umem_mtbf = mtbf;
3569 vmem_mtbf = mtbf;
3570 }
3571