17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate * CDDL HEADER START
37c478bd9Sstevel@tonic-gate *
47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the
57d692464Sdp201428 * Common Development and Distribution License (the "License").
67d692464Sdp201428 * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate *
87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate * and limitations under the License.
127c478bd9Sstevel@tonic-gate *
137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate *
197c478bd9Sstevel@tonic-gate * CDDL HEADER END
207c478bd9Sstevel@tonic-gate */
217c478bd9Sstevel@tonic-gate /*
22b942e89bSDavid Valin * Copyright (c) 1994, 2010, Oracle and/or its affiliates. All rights reserved.
2347bb2664SMatthew Ahrens * Copyright (c) 2012, 2016 by Delphix. All rights reserved.
240c833d64SJosef 'Jeff' Sipek * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
25*de58340cSJoshua M. Clulow * Copyright 2020 Oxide Computer Company
267c478bd9Sstevel@tonic-gate */
277c478bd9Sstevel@tonic-gate
287c478bd9Sstevel@tonic-gate /*
29b5fca8f8Stomee * Kernel memory allocator, as described in the following two papers and a
30b5fca8f8Stomee * statement about the consolidator:
317c478bd9Sstevel@tonic-gate *
327c478bd9Sstevel@tonic-gate * Jeff Bonwick,
337c478bd9Sstevel@tonic-gate * The Slab Allocator: An Object-Caching Kernel Memory Allocator.
347c478bd9Sstevel@tonic-gate * Proceedings of the Summer 1994 Usenix Conference.
357c478bd9Sstevel@tonic-gate * Available as /shared/sac/PSARC/1994/028/materials/kmem.pdf.
367c478bd9Sstevel@tonic-gate *
377c478bd9Sstevel@tonic-gate * Jeff Bonwick and Jonathan Adams,
387c478bd9Sstevel@tonic-gate * Magazines and vmem: Extending the Slab Allocator to Many CPUs and
397c478bd9Sstevel@tonic-gate * Arbitrary Resources.
407c478bd9Sstevel@tonic-gate * Proceedings of the 2001 Usenix Conference.
417c478bd9Sstevel@tonic-gate * Available as /shared/sac/PSARC/2000/550/materials/vmem.pdf.
42b5fca8f8Stomee *
43b5fca8f8Stomee * kmem Slab Consolidator Big Theory Statement:
44b5fca8f8Stomee *
45b5fca8f8Stomee * 1. Motivation
46b5fca8f8Stomee *
47b5fca8f8Stomee * As stated in Bonwick94, slabs provide the following advantages over other
48b5fca8f8Stomee * allocation structures in terms of memory fragmentation:
49b5fca8f8Stomee *
50b5fca8f8Stomee * - Internal fragmentation (per-buffer wasted space) is minimal.
51b5fca8f8Stomee * - Severe external fragmentation (unused buffers on the free list) is
52b5fca8f8Stomee * unlikely.
53b5fca8f8Stomee *
54b5fca8f8Stomee * Segregating objects by size eliminates one source of external fragmentation,
55b5fca8f8Stomee * and according to Bonwick:
56b5fca8f8Stomee *
57b5fca8f8Stomee * The other reason that slabs reduce external fragmentation is that all
58b5fca8f8Stomee * objects in a slab are of the same type, so they have the same lifetime
59b5fca8f8Stomee * distribution. The resulting segregation of short-lived and long-lived
60b5fca8f8Stomee * objects at slab granularity reduces the likelihood of an entire page being
61b5fca8f8Stomee * held hostage due to a single long-lived allocation [Barrett93, Hanson90].
62b5fca8f8Stomee *
63b5fca8f8Stomee * While unlikely, severe external fragmentation remains possible. Clients that
64b5fca8f8Stomee * allocate both short- and long-lived objects from the same cache cannot
65b5fca8f8Stomee * anticipate the distribution of long-lived objects within the allocator's slab
66b5fca8f8Stomee * implementation. Even a small percentage of long-lived objects distributed
67b5fca8f8Stomee * randomly across many slabs can lead to a worst case scenario where the client
68b5fca8f8Stomee * frees the majority of its objects and the system gets back almost none of the
69b5fca8f8Stomee * slabs. Despite the client doing what it reasonably can to help the system
70b5fca8f8Stomee * reclaim memory, the allocator cannot shake free enough slabs because of
71b5fca8f8Stomee * lonely allocations stubbornly hanging on. Although the allocator is in a
72b5fca8f8Stomee * position to diagnose the fragmentation, there is nothing that the allocator
73b5fca8f8Stomee * by itself can do about it. It only takes a single allocated object to prevent
74b5fca8f8Stomee * an entire slab from being reclaimed, and any object handed out by
75b5fca8f8Stomee * kmem_cache_alloc() is by definition in the client's control. Conversely,
76b5fca8f8Stomee * although the client is in a position to move a long-lived object, it has no
77b5fca8f8Stomee * way of knowing if the object is causing fragmentation, and if so, where to
78b5fca8f8Stomee * move it. A solution necessarily requires further cooperation between the
79b5fca8f8Stomee * allocator and the client.
80b5fca8f8Stomee *
81b5fca8f8Stomee * 2. Move Callback
82b5fca8f8Stomee *
83b5fca8f8Stomee * The kmem slab consolidator therefore adds a move callback to the
84b5fca8f8Stomee * allocator/client interface, improving worst-case external fragmentation in
85b5fca8f8Stomee * kmem caches that supply a function to move objects from one memory location
86b5fca8f8Stomee * to another. In a situation of low memory kmem attempts to consolidate all of
87b5fca8f8Stomee * a cache's slabs at once; otherwise it works slowly to bring external
88b5fca8f8Stomee * fragmentation within the 1/8 limit guaranteed for internal fragmentation,
89b5fca8f8Stomee * thereby helping to avoid a low memory situation in the future.
90b5fca8f8Stomee *
91b5fca8f8Stomee * The callback has the following signature:
92b5fca8f8Stomee *
93b5fca8f8Stomee * kmem_cbrc_t move(void *old, void *new, size_t size, void *user_arg)
94b5fca8f8Stomee *
95b5fca8f8Stomee * It supplies the kmem client with two addresses: the allocated object that
96b5fca8f8Stomee * kmem wants to move and a buffer selected by kmem for the client to use as the
97b5fca8f8Stomee * copy destination. The callback is kmem's way of saying "Please get off of
98b5fca8f8Stomee * this buffer and use this one instead." kmem knows where it wants to move the
99b5fca8f8Stomee * object in order to best reduce fragmentation. All the client needs to know
100b5fca8f8Stomee * about the second argument (void *new) is that it is an allocated, constructed
101b5fca8f8Stomee * object ready to take the contents of the old object. When the move function
102b5fca8f8Stomee * is called, the system is likely to be low on memory, and the new object
103b5fca8f8Stomee * spares the client from having to worry about allocating memory for the
104b5fca8f8Stomee * requested move. The third argument supplies the size of the object, in case a
105b5fca8f8Stomee * single move function handles multiple caches whose objects differ only in
106b5fca8f8Stomee * size (such as zio_buf_512, zio_buf_1024, etc). Finally, the same optional
107b5fca8f8Stomee * user argument passed to the constructor, destructor, and reclaim functions is
108b5fca8f8Stomee * also passed to the move callback.
109b5fca8f8Stomee *
110b5fca8f8Stomee * 2.1 Setting the Move Callback
111b5fca8f8Stomee *
112b5fca8f8Stomee * The client sets the move callback after creating the cache and before
113b5fca8f8Stomee * allocating from it:
114b5fca8f8Stomee *
115b5fca8f8Stomee * object_cache = kmem_cache_create(...);
116b5fca8f8Stomee * kmem_cache_set_move(object_cache, object_move);
117b5fca8f8Stomee *
118b5fca8f8Stomee * 2.2 Move Callback Return Values
119b5fca8f8Stomee *
120b5fca8f8Stomee * Only the client knows about its own data and when is a good time to move it.
121b5fca8f8Stomee * The client is cooperating with kmem to return unused memory to the system,
122b5fca8f8Stomee * and kmem respectfully accepts this help at the client's convenience. When
123b5fca8f8Stomee * asked to move an object, the client can respond with any of the following:
124b5fca8f8Stomee *
125b5fca8f8Stomee * typedef enum kmem_cbrc {
126b5fca8f8Stomee * KMEM_CBRC_YES,
127b5fca8f8Stomee * KMEM_CBRC_NO,
128b5fca8f8Stomee * KMEM_CBRC_LATER,
129b5fca8f8Stomee * KMEM_CBRC_DONT_NEED,
130b5fca8f8Stomee * KMEM_CBRC_DONT_KNOW
131b5fca8f8Stomee * } kmem_cbrc_t;
132b5fca8f8Stomee *
133b5fca8f8Stomee * The client must not explicitly kmem_cache_free() either of the objects passed
134b5fca8f8Stomee * to the callback, since kmem wants to free them directly to the slab layer
135b5fca8f8Stomee * (bypassing the per-CPU magazine layer). The response tells kmem which of the
136b5fca8f8Stomee * objects to free:
137b5fca8f8Stomee *
138b5fca8f8Stomee * YES: (Did it) The client moved the object, so kmem frees the old one.
139b5fca8f8Stomee * NO: (Never) The client refused, so kmem frees the new object (the
140b5fca8f8Stomee * unused copy destination). kmem also marks the slab of the old
141b5fca8f8Stomee * object so as not to bother the client with further callbacks for
142b5fca8f8Stomee * that object as long as the slab remains on the partial slab list.
143b5fca8f8Stomee * (The system won't be getting the slab back as long as the
144b5fca8f8Stomee * immovable object holds it hostage, so there's no point in moving
145b5fca8f8Stomee * any of its objects.)
146b5fca8f8Stomee * LATER: The client is using the object and cannot move it now, so kmem
147b5fca8f8Stomee * frees the new object (the unused copy destination). kmem still
148b5fca8f8Stomee * attempts to move other objects off the slab, since it expects to
149b5fca8f8Stomee * succeed in clearing the slab in a later callback. The client
150b5fca8f8Stomee * should use LATER instead of NO if the object is likely to become
151b5fca8f8Stomee * movable very soon.
152b5fca8f8Stomee * DONT_NEED: The client no longer needs the object, so kmem frees the old along
153b5fca8f8Stomee * with the new object (the unused copy destination). This response
154b5fca8f8Stomee * is the client's opportunity to be a model citizen and give back as
155b5fca8f8Stomee * much as it can.
156b5fca8f8Stomee * DONT_KNOW: The client does not know about the object because
157b5fca8f8Stomee * a) the client has just allocated the object and not yet put it
158b5fca8f8Stomee * wherever it expects to find known objects
159b5fca8f8Stomee * b) the client has removed the object from wherever it expects to
160b5fca8f8Stomee * find known objects and is about to free it, or
161b5fca8f8Stomee * c) the client has freed the object.
162b5fca8f8Stomee * In all these cases (a, b, and c) kmem frees the new object (the
163aa7175abSBryan Cantrill * unused copy destination). In the first case, the object is in
164aa7175abSBryan Cantrill * use and the correct action is that for LATER; in the latter two
165aa7175abSBryan Cantrill * cases, we know that the object is either freed or about to be
166aa7175abSBryan Cantrill * freed, in which case it is either already in a magazine or about
167aa7175abSBryan Cantrill * to be in one. In these cases, we know that the object will either
168aa7175abSBryan Cantrill * be reallocated and reused, or it will end up in a full magazine
169aa7175abSBryan Cantrill * that will be reaped (thereby liberating the slab). Because it
170aa7175abSBryan Cantrill * is prohibitively expensive to differentiate these cases, and
171aa7175abSBryan Cantrill * because the defrag code is executed when we're low on memory
172aa7175abSBryan Cantrill * (thereby biasing the system to reclaim full magazines) we treat
173aa7175abSBryan Cantrill * all DONT_KNOW cases as LATER and rely on cache reaping to
174aa7175abSBryan Cantrill * generally clean up full magazines. While we take the same action
175aa7175abSBryan Cantrill * for these cases, we maintain their semantic distinction: if
176aa7175abSBryan Cantrill * defragmentation is not occurring, it is useful to know if this
177aa7175abSBryan Cantrill * is due to objects in use (LATER) or objects in an unknown state
178aa7175abSBryan Cantrill * of transition (DONT_KNOW).
179b5fca8f8Stomee *
180b5fca8f8Stomee * 2.3 Object States
181b5fca8f8Stomee *
182b5fca8f8Stomee * Neither kmem nor the client can be assumed to know the object's whereabouts
183b5fca8f8Stomee * at the time of the callback. An object belonging to a kmem cache may be in
184b5fca8f8Stomee * any of the following states:
185b5fca8f8Stomee *
186b5fca8f8Stomee * 1. Uninitialized on the slab
187b5fca8f8Stomee * 2. Allocated from the slab but not constructed (still uninitialized)
188b5fca8f8Stomee * 3. Allocated from the slab, constructed, but not yet ready for business
189b5fca8f8Stomee * (not in a valid state for the move callback)
190b5fca8f8Stomee * 4. In use (valid and known to the client)
191b5fca8f8Stomee * 5. About to be freed (no longer in a valid state for the move callback)
192b5fca8f8Stomee * 6. Freed to a magazine (still constructed)
193b5fca8f8Stomee * 7. Allocated from a magazine, not yet ready for business (not in a valid
194b5fca8f8Stomee * state for the move callback), and about to return to state #4
195b5fca8f8Stomee * 8. Deconstructed on a magazine that is about to be freed
196b5fca8f8Stomee * 9. Freed to the slab
197b5fca8f8Stomee *
198b5fca8f8Stomee * Since the move callback may be called at any time while the object is in any
199b5fca8f8Stomee * of the above states (except state #1), the client needs a safe way to
200b5fca8f8Stomee * determine whether or not it knows about the object. Specifically, the client
201b5fca8f8Stomee * needs to know whether or not the object is in state #4, the only state in
202b5fca8f8Stomee * which a move is valid. If the object is in any other state, the client should
203b5fca8f8Stomee * immediately return KMEM_CBRC_DONT_KNOW, since it is unsafe to access any of
204b5fca8f8Stomee * the object's fields.
205b5fca8f8Stomee *
206b5fca8f8Stomee * Note that although an object may be in state #4 when kmem initiates the move
207b5fca8f8Stomee * request, the object may no longer be in that state by the time kmem actually
208b5fca8f8Stomee * calls the move function. Not only does the client free objects
209b5fca8f8Stomee * asynchronously, kmem itself puts move requests on a queue where thay are
210b5fca8f8Stomee * pending until kmem processes them from another context. Also, objects freed
211b5fca8f8Stomee * to a magazine appear allocated from the point of view of the slab layer, so
212b5fca8f8Stomee * kmem may even initiate requests for objects in a state other than state #4.
213b5fca8f8Stomee *
214b5fca8f8Stomee * 2.3.1 Magazine Layer
215b5fca8f8Stomee *
216b5fca8f8Stomee * An important insight revealed by the states listed above is that the magazine
217b5fca8f8Stomee * layer is populated only by kmem_cache_free(). Magazines of constructed
218b5fca8f8Stomee * objects are never populated directly from the slab layer (which contains raw,
219b5fca8f8Stomee * unconstructed objects). Whenever an allocation request cannot be satisfied
220b5fca8f8Stomee * from the magazine layer, the magazines are bypassed and the request is
221b5fca8f8Stomee * satisfied from the slab layer (creating a new slab if necessary). kmem calls
222b5fca8f8Stomee * the object constructor only when allocating from the slab layer, and only in
223b5fca8f8Stomee * response to kmem_cache_alloc() or to prepare the destination buffer passed in
224b5fca8f8Stomee * the move callback. kmem does not preconstruct objects in anticipation of
225b5fca8f8Stomee * kmem_cache_alloc().
226b5fca8f8Stomee *
227b5fca8f8Stomee * 2.3.2 Object Constructor and Destructor
228b5fca8f8Stomee *
229b5fca8f8Stomee * If the client supplies a destructor, it must be valid to call the destructor
230b5fca8f8Stomee * on a newly created object (immediately after the constructor).
231b5fca8f8Stomee *
232b5fca8f8Stomee * 2.4 Recognizing Known Objects
233b5fca8f8Stomee *
234b5fca8f8Stomee * There is a simple test to determine safely whether or not the client knows
235b5fca8f8Stomee * about a given object in the move callback. It relies on the fact that kmem
236b5fca8f8Stomee * guarantees that the object of the move callback has only been touched by the
237b5fca8f8Stomee * client itself or else by kmem. kmem does this by ensuring that none of the
238b5fca8f8Stomee * cache's slabs are freed to the virtual memory (VM) subsystem while a move
239b5fca8f8Stomee * callback is pending. When the last object on a slab is freed, if there is a
240b5fca8f8Stomee * pending move, kmem puts the slab on a per-cache dead list and defers freeing
241b5fca8f8Stomee * slabs on that list until all pending callbacks are completed. That way,
242b5fca8f8Stomee * clients can be certain that the object of a move callback is in one of the
243b5fca8f8Stomee * states listed above, making it possible to distinguish known objects (in
244b5fca8f8Stomee * state #4) using the two low order bits of any pointer member (with the
245b5fca8f8Stomee * exception of 'char *' or 'short *' which may not be 4-byte aligned on some
246b5fca8f8Stomee * platforms).
247b5fca8f8Stomee *
248b5fca8f8Stomee * The test works as long as the client always transitions objects from state #4
249b5fca8f8Stomee * (known, in use) to state #5 (about to be freed, invalid) by setting the low
250b5fca8f8Stomee * order bit of the client-designated pointer member. Since kmem only writes
251b5fca8f8Stomee * invalid memory patterns, such as 0xbaddcafe to uninitialized memory and
252b5fca8f8Stomee * 0xdeadbeef to freed memory, any scribbling on the object done by kmem is
253b5fca8f8Stomee * guaranteed to set at least one of the two low order bits. Therefore, given an
254b5fca8f8Stomee * object with a back pointer to a 'container_t *o_container', the client can
255b5fca8f8Stomee * test
256b5fca8f8Stomee *
257b5fca8f8Stomee * container_t *container = object->o_container;
258b5fca8f8Stomee * if ((uintptr_t)container & 0x3) {
259b5fca8f8Stomee * return (KMEM_CBRC_DONT_KNOW);
260b5fca8f8Stomee * }
261b5fca8f8Stomee *
262b5fca8f8Stomee * Typically, an object will have a pointer to some structure with a list or
263b5fca8f8Stomee * hash where objects from the cache are kept while in use. Assuming that the
264b5fca8f8Stomee * client has some way of knowing that the container structure is valid and will
265b5fca8f8Stomee * not go away during the move, and assuming that the structure includes a lock
266b5fca8f8Stomee * to protect whatever collection is used, then the client would continue as
267b5fca8f8Stomee * follows:
268b5fca8f8Stomee *
269b5fca8f8Stomee * // Ensure that the container structure does not go away.
270b5fca8f8Stomee * if (container_hold(container) == 0) {
271b5fca8f8Stomee * return (KMEM_CBRC_DONT_KNOW);
272b5fca8f8Stomee * }
273b5fca8f8Stomee * mutex_enter(&container->c_objects_lock);
274b5fca8f8Stomee * if (container != object->o_container) {
275b5fca8f8Stomee * mutex_exit(&container->c_objects_lock);
276b5fca8f8Stomee * container_rele(container);
277b5fca8f8Stomee * return (KMEM_CBRC_DONT_KNOW);
278b5fca8f8Stomee * }
279b5fca8f8Stomee *
280b5fca8f8Stomee * At this point the client knows that the object cannot be freed as long as
281b5fca8f8Stomee * c_objects_lock is held. Note that after acquiring the lock, the client must
282b5fca8f8Stomee * recheck the o_container pointer in case the object was removed just before
283b5fca8f8Stomee * acquiring the lock.
284b5fca8f8Stomee *
285b5fca8f8Stomee * When the client is about to free an object, it must first remove that object
286b5fca8f8Stomee * from the list, hash, or other structure where it is kept. At that time, to
287b5fca8f8Stomee * mark the object so it can be distinguished from the remaining, known objects,
288b5fca8f8Stomee * the client sets the designated low order bit:
289b5fca8f8Stomee *
290b5fca8f8Stomee * mutex_enter(&container->c_objects_lock);
291b5fca8f8Stomee * object->o_container = (void *)((uintptr_t)object->o_container | 0x1);
292b5fca8f8Stomee * list_remove(&container->c_objects, object);
293b5fca8f8Stomee * mutex_exit(&container->c_objects_lock);
294b5fca8f8Stomee *
295b5fca8f8Stomee * In the common case, the object is freed to the magazine layer, where it may
296b5fca8f8Stomee * be reused on a subsequent allocation without the overhead of calling the
297b5fca8f8Stomee * constructor. While in the magazine it appears allocated from the point of
298b5fca8f8Stomee * view of the slab layer, making it a candidate for the move callback. Most
299b5fca8f8Stomee * objects unrecognized by the client in the move callback fall into this
300b5fca8f8Stomee * category and are cheaply distinguished from known objects by the test
301aa7175abSBryan Cantrill * described earlier. Because searching magazines is prohibitively expensive
302aa7175abSBryan Cantrill * for kmem, clients that do not mark freed objects (and therefore return
303aa7175abSBryan Cantrill * KMEM_CBRC_DONT_KNOW for large numbers of objects) may find defragmentation
304aa7175abSBryan Cantrill * efficacy reduced.
305b5fca8f8Stomee *
306b5fca8f8Stomee * Invalidating the designated pointer member before freeing the object marks
307b5fca8f8Stomee * the object to be avoided in the callback, and conversely, assigning a valid
308b5fca8f8Stomee * value to the designated pointer member after allocating the object makes the
309b5fca8f8Stomee * object fair game for the callback:
310b5fca8f8Stomee *
311b5fca8f8Stomee * ... allocate object ...
312b5fca8f8Stomee * ... set any initial state not set by the constructor ...
313b5fca8f8Stomee *
314b5fca8f8Stomee * mutex_enter(&container->c_objects_lock);
315b5fca8f8Stomee * list_insert_tail(&container->c_objects, object);
316b5fca8f8Stomee * membar_producer();
317b5fca8f8Stomee * object->o_container = container;
318b5fca8f8Stomee * mutex_exit(&container->c_objects_lock);
319b5fca8f8Stomee *
320b5fca8f8Stomee * Note that everything else must be valid before setting o_container makes the
321b5fca8f8Stomee * object fair game for the move callback. The membar_producer() call ensures
322b5fca8f8Stomee * that all the object's state is written to memory before setting the pointer
323b5fca8f8Stomee * that transitions the object from state #3 or #7 (allocated, constructed, not
324b5fca8f8Stomee * yet in use) to state #4 (in use, valid). That's important because the move
325b5fca8f8Stomee * function has to check the validity of the pointer before it can safely
326b5fca8f8Stomee * acquire the lock protecting the collection where it expects to find known
327b5fca8f8Stomee * objects.
328b5fca8f8Stomee *
329b5fca8f8Stomee * This method of distinguishing known objects observes the usual symmetry:
330b5fca8f8Stomee * invalidating the designated pointer is the first thing the client does before
331b5fca8f8Stomee * freeing the object, and setting the designated pointer is the last thing the
332b5fca8f8Stomee * client does after allocating the object. Of course, the client is not
333b5fca8f8Stomee * required to use this method. Fundamentally, how the client recognizes known
334b5fca8f8Stomee * objects is completely up to the client, but this method is recommended as an
335b5fca8f8Stomee * efficient and safe way to take advantage of the guarantees made by kmem. If
336b5fca8f8Stomee * the entire object is arbitrary data without any markable bits from a suitable
337b5fca8f8Stomee * pointer member, then the client must find some other method, such as
338b5fca8f8Stomee * searching a hash table of known objects.
339b5fca8f8Stomee *
340b5fca8f8Stomee * 2.5 Preventing Objects From Moving
341b5fca8f8Stomee *
342b5fca8f8Stomee * Besides a way to distinguish known objects, the other thing that the client
343b5fca8f8Stomee * needs is a strategy to ensure that an object will not move while the client
344b5fca8f8Stomee * is actively using it. The details of satisfying this requirement tend to be
345b5fca8f8Stomee * highly cache-specific. It might seem that the same rules that let a client
346b5fca8f8Stomee * remove an object safely should also decide when an object can be moved
347b5fca8f8Stomee * safely. However, any object state that makes a removal attempt invalid is
348b5fca8f8Stomee * likely to be long-lasting for objects that the client does not expect to
349b5fca8f8Stomee * remove. kmem knows nothing about the object state and is equally likely (from
350b5fca8f8Stomee * the client's point of view) to request a move for any object in the cache,
351b5fca8f8Stomee * whether prepared for removal or not. Even a low percentage of objects stuck
352b5fca8f8Stomee * in place by unremovability will defeat the consolidator if the stuck objects
353b5fca8f8Stomee * are the same long-lived allocations likely to hold slabs hostage.
354b5fca8f8Stomee * Fundamentally, the consolidator is not aimed at common cases. Severe external
355b5fca8f8Stomee * fragmentation is a worst case scenario manifested as sparsely allocated
356b5fca8f8Stomee * slabs, by definition a low percentage of the cache's objects. When deciding
357b5fca8f8Stomee * what makes an object movable, keep in mind the goal of the consolidator: to
358b5fca8f8Stomee * bring worst-case external fragmentation within the limits guaranteed for
359b5fca8f8Stomee * internal fragmentation. Removability is a poor criterion if it is likely to
360b5fca8f8Stomee * exclude more than an insignificant percentage of objects for long periods of
361b5fca8f8Stomee * time.
362b5fca8f8Stomee *
363b5fca8f8Stomee * A tricky general solution exists, and it has the advantage of letting you
364b5fca8f8Stomee * move any object at almost any moment, practically eliminating the likelihood
365b5fca8f8Stomee * that an object can hold a slab hostage. However, if there is a cache-specific
366b5fca8f8Stomee * way to ensure that an object is not actively in use in the vast majority of
367b5fca8f8Stomee * cases, a simpler solution that leverages this cache-specific knowledge is
368b5fca8f8Stomee * preferred.
369b5fca8f8Stomee *
370b5fca8f8Stomee * 2.5.1 Cache-Specific Solution
371b5fca8f8Stomee *
372b5fca8f8Stomee * As an example of a cache-specific solution, the ZFS znode cache takes
373b5fca8f8Stomee * advantage of the fact that the vast majority of znodes are only being
374b5fca8f8Stomee * referenced from the DNLC. (A typical case might be a few hundred in active
375b5fca8f8Stomee * use and a hundred thousand in the DNLC.) In the move callback, after the ZFS
376b5fca8f8Stomee * client has established that it recognizes the znode and can access its fields
377b5fca8f8Stomee * safely (using the method described earlier), it then tests whether the znode
378b5fca8f8Stomee * is referenced by anything other than the DNLC. If so, it assumes that the
379b5fca8f8Stomee * znode may be in active use and is unsafe to move, so it drops its locks and
380b5fca8f8Stomee * returns KMEM_CBRC_LATER. The advantage of this strategy is that everywhere
381b5fca8f8Stomee * else znodes are used, no change is needed to protect against the possibility
382b5fca8f8Stomee * of the znode moving. The disadvantage is that it remains possible for an
383b5fca8f8Stomee * application to hold a znode slab hostage with an open file descriptor.
384b5fca8f8Stomee * However, this case ought to be rare and the consolidator has a way to deal
385b5fca8f8Stomee * with it: If the client responds KMEM_CBRC_LATER repeatedly for the same
386b5fca8f8Stomee * object, kmem eventually stops believing it and treats the slab as if the
387b5fca8f8Stomee * client had responded KMEM_CBRC_NO. Having marked the hostage slab, kmem can
388b5fca8f8Stomee * then focus on getting it off of the partial slab list by allocating rather
389b5fca8f8Stomee * than freeing all of its objects. (Either way of getting a slab off the
390b5fca8f8Stomee * free list reduces fragmentation.)
391b5fca8f8Stomee *
392b5fca8f8Stomee * 2.5.2 General Solution
393b5fca8f8Stomee *
394b5fca8f8Stomee * The general solution, on the other hand, requires an explicit hold everywhere
395b5fca8f8Stomee * the object is used to prevent it from moving. To keep the client locking
396b5fca8f8Stomee * strategy as uncomplicated as possible, kmem guarantees the simplifying
397b5fca8f8Stomee * assumption that move callbacks are sequential, even across multiple caches.
398b5fca8f8Stomee * Internally, a global queue processed by a single thread supports all caches
399b5fca8f8Stomee * implementing the callback function. No matter how many caches supply a move
400b5fca8f8Stomee * function, the consolidator never moves more than one object at a time, so the
401b5fca8f8Stomee * client does not have to worry about tricky lock ordering involving several
402b5fca8f8Stomee * related objects from different kmem caches.
403b5fca8f8Stomee *
404b5fca8f8Stomee * The general solution implements the explicit hold as a read-write lock, which
405b5fca8f8Stomee * allows multiple readers to access an object from the cache simultaneously
406b5fca8f8Stomee * while a single writer is excluded from moving it. A single rwlock for the
407b5fca8f8Stomee * entire cache would lock out all threads from using any of the cache's objects
408b5fca8f8Stomee * even though only a single object is being moved, so to reduce contention,
409b5fca8f8Stomee * the client can fan out the single rwlock into an array of rwlocks hashed by
410b5fca8f8Stomee * the object address, making it probable that moving one object will not
411b5fca8f8Stomee * prevent other threads from using a different object. The rwlock cannot be a
412b5fca8f8Stomee * member of the object itself, because the possibility of the object moving
413b5fca8f8Stomee * makes it unsafe to access any of the object's fields until the lock is
414b5fca8f8Stomee * acquired.
415b5fca8f8Stomee *
416b5fca8f8Stomee * Assuming a small, fixed number of locks, it's possible that multiple objects
417b5fca8f8Stomee * will hash to the same lock. A thread that needs to use multiple objects in
418b5fca8f8Stomee * the same function may acquire the same lock multiple times. Since rwlocks are
419b5fca8f8Stomee * reentrant for readers, and since there is never more than a single writer at
420b5fca8f8Stomee * a time (assuming that the client acquires the lock as a writer only when
421b5fca8f8Stomee * moving an object inside the callback), there would seem to be no problem.
422b5fca8f8Stomee * However, a client locking multiple objects in the same function must handle
423b5fca8f8Stomee * one case of potential deadlock: Assume that thread A needs to prevent both
424b5fca8f8Stomee * object 1 and object 2 from moving, and thread B, the callback, meanwhile
425b5fca8f8Stomee * tries to move object 3. It's possible, if objects 1, 2, and 3 all hash to the
426b5fca8f8Stomee * same lock, that thread A will acquire the lock for object 1 as a reader
427b5fca8f8Stomee * before thread B sets the lock's write-wanted bit, preventing thread A from
428b5fca8f8Stomee * reacquiring the lock for object 2 as a reader. Unable to make forward
429b5fca8f8Stomee * progress, thread A will never release the lock for object 1, resulting in
430b5fca8f8Stomee * deadlock.
431b5fca8f8Stomee *
432b5fca8f8Stomee * There are two ways of avoiding the deadlock just described. The first is to
433b5fca8f8Stomee * use rw_tryenter() rather than rw_enter() in the callback function when
434b5fca8f8Stomee * attempting to acquire the lock as a writer. If tryenter discovers that the
435b5fca8f8Stomee * same object (or another object hashed to the same lock) is already in use, it
436b5fca8f8Stomee * aborts the callback and returns KMEM_CBRC_LATER. The second way is to use
437b5fca8f8Stomee * rprwlock_t (declared in common/fs/zfs/sys/rprwlock.h) instead of rwlock_t,
438b5fca8f8Stomee * since it allows a thread to acquire the lock as a reader in spite of a
439b5fca8f8Stomee * waiting writer. This second approach insists on moving the object now, no
440b5fca8f8Stomee * matter how many readers the move function must wait for in order to do so,
441b5fca8f8Stomee * and could delay the completion of the callback indefinitely (blocking
442b5fca8f8Stomee * callbacks to other clients). In practice, a less insistent callback using
443b5fca8f8Stomee * rw_tryenter() returns KMEM_CBRC_LATER infrequently enough that there seems
444b5fca8f8Stomee * little reason to use anything else.
445b5fca8f8Stomee *
446b5fca8f8Stomee * Avoiding deadlock is not the only problem that an implementation using an
447b5fca8f8Stomee * explicit hold needs to solve. Locking the object in the first place (to
448b5fca8f8Stomee * prevent it from moving) remains a problem, since the object could move
449b5fca8f8Stomee * between the time you obtain a pointer to the object and the time you acquire
450b5fca8f8Stomee * the rwlock hashed to that pointer value. Therefore the client needs to
451b5fca8f8Stomee * recheck the value of the pointer after acquiring the lock, drop the lock if
452b5fca8f8Stomee * the value has changed, and try again. This requires a level of indirection:
453b5fca8f8Stomee * something that points to the object rather than the object itself, that the
454b5fca8f8Stomee * client can access safely while attempting to acquire the lock. (The object
455b5fca8f8Stomee * itself cannot be referenced safely because it can move at any time.)
456b5fca8f8Stomee * The following lock-acquisition function takes whatever is safe to reference
457b5fca8f8Stomee * (arg), follows its pointer to the object (using function f), and tries as
458b5fca8f8Stomee * often as necessary to acquire the hashed lock and verify that the object
459b5fca8f8Stomee * still has not moved:
460b5fca8f8Stomee *
461b5fca8f8Stomee * object_t *
462b5fca8f8Stomee * object_hold(object_f f, void *arg)
463b5fca8f8Stomee * {
464b5fca8f8Stomee * object_t *op;
465b5fca8f8Stomee *
466b5fca8f8Stomee * op = f(arg);
467b5fca8f8Stomee * if (op == NULL) {
468b5fca8f8Stomee * return (NULL);
469b5fca8f8Stomee * }
470b5fca8f8Stomee *
471b5fca8f8Stomee * rw_enter(OBJECT_RWLOCK(op), RW_READER);
472b5fca8f8Stomee * while (op != f(arg)) {
473b5fca8f8Stomee * rw_exit(OBJECT_RWLOCK(op));
474b5fca8f8Stomee * op = f(arg);
475b5fca8f8Stomee * if (op == NULL) {
476b5fca8f8Stomee * break;
477b5fca8f8Stomee * }
478b5fca8f8Stomee * rw_enter(OBJECT_RWLOCK(op), RW_READER);
479b5fca8f8Stomee * }
480b5fca8f8Stomee *
481b5fca8f8Stomee * return (op);
482b5fca8f8Stomee * }
483b5fca8f8Stomee *
484b5fca8f8Stomee * The OBJECT_RWLOCK macro hashes the object address to obtain the rwlock. The
485b5fca8f8Stomee * lock reacquisition loop, while necessary, almost never executes. The function
486b5fca8f8Stomee * pointer f (used to obtain the object pointer from arg) has the following type
487b5fca8f8Stomee * definition:
488b5fca8f8Stomee *
489b5fca8f8Stomee * typedef object_t *(*object_f)(void *arg);
490b5fca8f8Stomee *
491b5fca8f8Stomee * An object_f implementation is likely to be as simple as accessing a structure
492b5fca8f8Stomee * member:
493b5fca8f8Stomee *
494b5fca8f8Stomee * object_t *
495b5fca8f8Stomee * s_object(void *arg)
496b5fca8f8Stomee * {
497b5fca8f8Stomee * something_t *sp = arg;
498b5fca8f8Stomee * return (sp->s_object);
499b5fca8f8Stomee * }
500b5fca8f8Stomee *
501b5fca8f8Stomee * The flexibility of a function pointer allows the path to the object to be
502b5fca8f8Stomee * arbitrarily complex and also supports the notion that depending on where you
503b5fca8f8Stomee * are using the object, you may need to get it from someplace different.
504b5fca8f8Stomee *
505b5fca8f8Stomee * The function that releases the explicit hold is simpler because it does not
506b5fca8f8Stomee * have to worry about the object moving:
507b5fca8f8Stomee *
508b5fca8f8Stomee * void
509b5fca8f8Stomee * object_rele(object_t *op)
510b5fca8f8Stomee * {
511b5fca8f8Stomee * rw_exit(OBJECT_RWLOCK(op));
512b5fca8f8Stomee * }
513b5fca8f8Stomee *
514b5fca8f8Stomee * The caller is spared these details so that obtaining and releasing an
515b5fca8f8Stomee * explicit hold feels like a simple mutex_enter()/mutex_exit() pair. The caller
516b5fca8f8Stomee * of object_hold() only needs to know that the returned object pointer is valid
517b5fca8f8Stomee * if not NULL and that the object will not move until released.
518b5fca8f8Stomee *
519b5fca8f8Stomee * Although object_hold() prevents an object from moving, it does not prevent it
520b5fca8f8Stomee * from being freed. The caller must take measures before calling object_hold()
521b5fca8f8Stomee * (afterwards is too late) to ensure that the held object cannot be freed. The
522b5fca8f8Stomee * caller must do so without accessing the unsafe object reference, so any lock
523b5fca8f8Stomee * or reference count used to ensure the continued existence of the object must
524b5fca8f8Stomee * live outside the object itself.
525b5fca8f8Stomee *
526b5fca8f8Stomee * Obtaining a new object is a special case where an explicit hold is impossible
527b5fca8f8Stomee * for the caller. Any function that returns a newly allocated object (either as
528b5fca8f8Stomee * a return value, or as an in-out paramter) must return it already held; after
529b5fca8f8Stomee * the caller gets it is too late, since the object cannot be safely accessed
530b5fca8f8Stomee * without the level of indirection described earlier. The following
531b5fca8f8Stomee * object_alloc() example uses the same code shown earlier to transition a new
532b5fca8f8Stomee * object into the state of being recognized (by the client) as a known object.
533b5fca8f8Stomee * The function must acquire the hold (rw_enter) before that state transition
534b5fca8f8Stomee * makes the object movable:
535b5fca8f8Stomee *
536b5fca8f8Stomee * static object_t *
537b5fca8f8Stomee * object_alloc(container_t *container)
538b5fca8f8Stomee * {
5394d4c4c43STom Erickson * object_t *object = kmem_cache_alloc(object_cache, 0);
540b5fca8f8Stomee * ... set any initial state not set by the constructor ...
541b5fca8f8Stomee * rw_enter(OBJECT_RWLOCK(object), RW_READER);
542b5fca8f8Stomee * mutex_enter(&container->c_objects_lock);
543b5fca8f8Stomee * list_insert_tail(&container->c_objects, object);
544b5fca8f8Stomee * membar_producer();
545b5fca8f8Stomee * object->o_container = container;
546b5fca8f8Stomee * mutex_exit(&container->c_objects_lock);
547b5fca8f8Stomee * return (object);
548b5fca8f8Stomee * }
549b5fca8f8Stomee *
550b5fca8f8Stomee * Functions that implicitly acquire an object hold (any function that calls
551b5fca8f8Stomee * object_alloc() to supply an object for the caller) need to be carefully noted
552b5fca8f8Stomee * so that the matching object_rele() is not neglected. Otherwise, leaked holds
553b5fca8f8Stomee * prevent all objects hashed to the affected rwlocks from ever being moved.
554b5fca8f8Stomee *
555b5fca8f8Stomee * The pointer to a held object can be hashed to the holding rwlock even after
556b5fca8f8Stomee * the object has been freed. Although it is possible to release the hold
557b5fca8f8Stomee * after freeing the object, you may decide to release the hold implicitly in
558b5fca8f8Stomee * whatever function frees the object, so as to release the hold as soon as
559b5fca8f8Stomee * possible, and for the sake of symmetry with the function that implicitly
560b5fca8f8Stomee * acquires the hold when it allocates the object. Here, object_free() releases
561b5fca8f8Stomee * the hold acquired by object_alloc(). Its implicit object_rele() forms a
562b5fca8f8Stomee * matching pair with object_hold():
563b5fca8f8Stomee *
564b5fca8f8Stomee * void
565b5fca8f8Stomee * object_free(object_t *object)
566b5fca8f8Stomee * {
567b5fca8f8Stomee * container_t *container;
568b5fca8f8Stomee *
569b5fca8f8Stomee * ASSERT(object_held(object));
570b5fca8f8Stomee * container = object->o_container;
571b5fca8f8Stomee * mutex_enter(&container->c_objects_lock);
572b5fca8f8Stomee * object->o_container =
573b5fca8f8Stomee * (void *)((uintptr_t)object->o_container | 0x1);
574b5fca8f8Stomee * list_remove(&container->c_objects, object);
575b5fca8f8Stomee * mutex_exit(&container->c_objects_lock);
576b5fca8f8Stomee * object_rele(object);
577b5fca8f8Stomee * kmem_cache_free(object_cache, object);
578b5fca8f8Stomee * }
579b5fca8f8Stomee *
580b5fca8f8Stomee * Note that object_free() cannot safely accept an object pointer as an argument
581b5fca8f8Stomee * unless the object is already held. Any function that calls object_free()
582b5fca8f8Stomee * needs to be carefully noted since it similarly forms a matching pair with
583b5fca8f8Stomee * object_hold().
584b5fca8f8Stomee *
585b5fca8f8Stomee * To complete the picture, the following callback function implements the
586b5fca8f8Stomee * general solution by moving objects only if they are currently unheld:
587b5fca8f8Stomee *
588b5fca8f8Stomee * static kmem_cbrc_t
589b5fca8f8Stomee * object_move(void *buf, void *newbuf, size_t size, void *arg)
590b5fca8f8Stomee * {
591b5fca8f8Stomee * object_t *op = buf, *np = newbuf;
592b5fca8f8Stomee * container_t *container;
593b5fca8f8Stomee *
594b5fca8f8Stomee * container = op->o_container;
595b5fca8f8Stomee * if ((uintptr_t)container & 0x3) {
596b5fca8f8Stomee * return (KMEM_CBRC_DONT_KNOW);
597b5fca8f8Stomee * }
598b5fca8f8Stomee *
599b5fca8f8Stomee * // Ensure that the container structure does not go away.
600b5fca8f8Stomee * if (container_hold(container) == 0) {
601b5fca8f8Stomee * return (KMEM_CBRC_DONT_KNOW);
602b5fca8f8Stomee * }
603b5fca8f8Stomee *
604b5fca8f8Stomee * mutex_enter(&container->c_objects_lock);
605b5fca8f8Stomee * if (container != op->o_container) {
606b5fca8f8Stomee * mutex_exit(&container->c_objects_lock);
607b5fca8f8Stomee * container_rele(container);
608b5fca8f8Stomee * return (KMEM_CBRC_DONT_KNOW);
609b5fca8f8Stomee * }
610b5fca8f8Stomee *
611b5fca8f8Stomee * if (rw_tryenter(OBJECT_RWLOCK(op), RW_WRITER) == 0) {
612b5fca8f8Stomee * mutex_exit(&container->c_objects_lock);
613b5fca8f8Stomee * container_rele(container);
614b5fca8f8Stomee * return (KMEM_CBRC_LATER);
615b5fca8f8Stomee * }
616b5fca8f8Stomee *
617b5fca8f8Stomee * object_move_impl(op, np); // critical section
618b5fca8f8Stomee * rw_exit(OBJECT_RWLOCK(op));
619b5fca8f8Stomee *
620b5fca8f8Stomee * op->o_container = (void *)((uintptr_t)op->o_container | 0x1);
621b5fca8f8Stomee * list_link_replace(&op->o_link_node, &np->o_link_node);
622b5fca8f8Stomee * mutex_exit(&container->c_objects_lock);
623b5fca8f8Stomee * container_rele(container);
624b5fca8f8Stomee * return (KMEM_CBRC_YES);
625b5fca8f8Stomee * }
626b5fca8f8Stomee *
627b5fca8f8Stomee * Note that object_move() must invalidate the designated o_container pointer of
628b5fca8f8Stomee * the old object in the same way that object_free() does, since kmem will free
629b5fca8f8Stomee * the object in response to the KMEM_CBRC_YES return value.
630b5fca8f8Stomee *
631b5fca8f8Stomee * The lock order in object_move() differs from object_alloc(), which locks
632b5fca8f8Stomee * OBJECT_RWLOCK first and &container->c_objects_lock second, but as long as the
633b5fca8f8Stomee * callback uses rw_tryenter() (preventing the deadlock described earlier), it's
634b5fca8f8Stomee * not a problem. Holding the lock on the object list in the example above
635b5fca8f8Stomee * through the entire callback not only prevents the object from going away, it
636b5fca8f8Stomee * also allows you to lock the list elsewhere and know that none of its elements
637b5fca8f8Stomee * will move during iteration.
638b5fca8f8Stomee *
639b5fca8f8Stomee * Adding an explicit hold everywhere an object from the cache is used is tricky
640b5fca8f8Stomee * and involves much more change to client code than a cache-specific solution
641b5fca8f8Stomee * that leverages existing state to decide whether or not an object is
642b5fca8f8Stomee * movable. However, this approach has the advantage that no object remains
643b5fca8f8Stomee * immovable for any significant length of time, making it extremely unlikely
644b5fca8f8Stomee * that long-lived allocations can continue holding slabs hostage; and it works
645b5fca8f8Stomee * for any cache.
646b5fca8f8Stomee *
647b5fca8f8Stomee * 3. Consolidator Implementation
648b5fca8f8Stomee *
649b5fca8f8Stomee * Once the client supplies a move function that a) recognizes known objects and
650b5fca8f8Stomee * b) avoids moving objects that are actively in use, the remaining work is up
651b5fca8f8Stomee * to the consolidator to decide which objects to move and when to issue
652b5fca8f8Stomee * callbacks.
653b5fca8f8Stomee *
654b5fca8f8Stomee * The consolidator relies on the fact that a cache's slabs are ordered by
655b5fca8f8Stomee * usage. Each slab has a fixed number of objects. Depending on the slab's
656b5fca8f8Stomee * "color" (the offset of the first object from the beginning of the slab;
657b5fca8f8Stomee * offsets are staggered to mitigate false sharing of cache lines) it is either
658b5fca8f8Stomee * the maximum number of objects per slab determined at cache creation time or
659b5fca8f8Stomee * else the number closest to the maximum that fits within the space remaining
660b5fca8f8Stomee * after the initial offset. A completely allocated slab may contribute some
661b5fca8f8Stomee * internal fragmentation (per-slab overhead) but no external fragmentation, so
662b5fca8f8Stomee * it is of no interest to the consolidator. At the other extreme, slabs whose
663b5fca8f8Stomee * objects have all been freed to the slab are released to the virtual memory
664b5fca8f8Stomee * (VM) subsystem (objects freed to magazines are still allocated as far as the
665b5fca8f8Stomee * slab is concerned). External fragmentation exists when there are slabs
666b5fca8f8Stomee * somewhere between these extremes. A partial slab has at least one but not all
667b5fca8f8Stomee * of its objects allocated. The more partial slabs, and the fewer allocated
668b5fca8f8Stomee * objects on each of them, the higher the fragmentation. Hence the
669b5fca8f8Stomee * consolidator's overall strategy is to reduce the number of partial slabs by
670b5fca8f8Stomee * moving allocated objects from the least allocated slabs to the most allocated
671b5fca8f8Stomee * slabs.
672b5fca8f8Stomee *
673b5fca8f8Stomee * Partial slabs are kept in an AVL tree ordered by usage. Completely allocated
674b5fca8f8Stomee * slabs are kept separately in an unordered list. Since the majority of slabs
675b5fca8f8Stomee * tend to be completely allocated (a typical unfragmented cache may have
676b5fca8f8Stomee * thousands of complete slabs and only a single partial slab), separating
677b5fca8f8Stomee * complete slabs improves the efficiency of partial slab ordering, since the
678b5fca8f8Stomee * complete slabs do not affect the depth or balance of the AVL tree. This
679b5fca8f8Stomee * ordered sequence of partial slabs acts as a "free list" supplying objects for
680b5fca8f8Stomee * allocation requests.
681b5fca8f8Stomee *
682b5fca8f8Stomee * Objects are always allocated from the first partial slab in the free list,
683b5fca8f8Stomee * where the allocation is most likely to eliminate a partial slab (by
684b5fca8f8Stomee * completely allocating it). Conversely, when a single object from a completely
685b5fca8f8Stomee * allocated slab is freed to the slab, that slab is added to the front of the
686b5fca8f8Stomee * free list. Since most free list activity involves highly allocated slabs
687b5fca8f8Stomee * coming and going at the front of the list, slabs tend naturally toward the
688b5fca8f8Stomee * ideal order: highly allocated at the front, sparsely allocated at the back.
689b5fca8f8Stomee * Slabs with few allocated objects are likely to become completely free if they
690b5fca8f8Stomee * keep a safe distance away from the front of the free list. Slab misorders
691b5fca8f8Stomee * interfere with the natural tendency of slabs to become completely free or
692b5fca8f8Stomee * completely allocated. For example, a slab with a single allocated object
693b5fca8f8Stomee * needs only a single free to escape the cache; its natural desire is
694b5fca8f8Stomee * frustrated when it finds itself at the front of the list where a second
695b5fca8f8Stomee * allocation happens just before the free could have released it. Another slab
696b5fca8f8Stomee * with all but one object allocated might have supplied the buffer instead, so
697b5fca8f8Stomee * that both (as opposed to neither) of the slabs would have been taken off the
698b5fca8f8Stomee * free list.
699b5fca8f8Stomee *
700b5fca8f8Stomee * Although slabs tend naturally toward the ideal order, misorders allowed by a
701b5fca8f8Stomee * simple list implementation defeat the consolidator's strategy of merging
702b5fca8f8Stomee * least- and most-allocated slabs. Without an AVL tree to guarantee order, kmem
703b5fca8f8Stomee * needs another way to fix misorders to optimize its callback strategy. One
704b5fca8f8Stomee * approach is to periodically scan a limited number of slabs, advancing a
705b5fca8f8Stomee * marker to hold the current scan position, and to move extreme misorders to
706b5fca8f8Stomee * the front or back of the free list and to the front or back of the current
707b5fca8f8Stomee * scan range. By making consecutive scan ranges overlap by one slab, the least
708b5fca8f8Stomee * allocated slab in the current range can be carried along from the end of one
709b5fca8f8Stomee * scan to the start of the next.
710b5fca8f8Stomee *
711b5fca8f8Stomee * Maintaining partial slabs in an AVL tree relieves kmem of this additional
712b5fca8f8Stomee * task, however. Since most of the cache's activity is in the magazine layer,
713b5fca8f8Stomee * and allocations from the slab layer represent only a startup cost, the
714b5fca8f8Stomee * overhead of maintaining a balanced tree is not a significant concern compared
715b5fca8f8Stomee * to the opportunity of reducing complexity by eliminating the partial slab
716b5fca8f8Stomee * scanner just described. The overhead of an AVL tree is minimized by
717b5fca8f8Stomee * maintaining only partial slabs in the tree and keeping completely allocated
718b5fca8f8Stomee * slabs separately in a list. To avoid increasing the size of the slab
719b5fca8f8Stomee * structure the AVL linkage pointers are reused for the slab's list linkage,
720b5fca8f8Stomee * since the slab will always be either partial or complete, never stored both
721b5fca8f8Stomee * ways at the same time. To further minimize the overhead of the AVL tree the
722b5fca8f8Stomee * compare function that orders partial slabs by usage divides the range of
723b5fca8f8Stomee * allocated object counts into bins such that counts within the same bin are
724b5fca8f8Stomee * considered equal. Binning partial slabs makes it less likely that allocating
725b5fca8f8Stomee * or freeing a single object will change the slab's order, requiring a tree
726b5fca8f8Stomee * reinsertion (an avl_remove() followed by an avl_add(), both potentially
727b5fca8f8Stomee * requiring some rebalancing of the tree). Allocation counts closest to
728b5fca8f8Stomee * completely free and completely allocated are left unbinned (finely sorted) to
729b5fca8f8Stomee * better support the consolidator's strategy of merging slabs at either
730b5fca8f8Stomee * extreme.
731b5fca8f8Stomee *
732b5fca8f8Stomee * 3.1 Assessing Fragmentation and Selecting Candidate Slabs
733b5fca8f8Stomee *
734b5fca8f8Stomee * The consolidator piggybacks on the kmem maintenance thread and is called on
735b5fca8f8Stomee * the same interval as kmem_cache_update(), once per cache every fifteen
736b5fca8f8Stomee * seconds. kmem maintains a running count of unallocated objects in the slab
737b5fca8f8Stomee * layer (cache_bufslab). The consolidator checks whether that number exceeds
738b5fca8f8Stomee * 12.5% (1/8) of the total objects in the cache (cache_buftotal), and whether
739b5fca8f8Stomee * there is a significant number of slabs in the cache (arbitrarily a minimum
740b5fca8f8Stomee * 101 total slabs). Unused objects that have fallen out of the magazine layer's
741b5fca8f8Stomee * working set are included in the assessment, and magazines in the depot are
742b5fca8f8Stomee * reaped if those objects would lift cache_bufslab above the fragmentation
743b5fca8f8Stomee * threshold. Once the consolidator decides that a cache is fragmented, it looks
744b5fca8f8Stomee * for a candidate slab to reclaim, starting at the end of the partial slab free
745b5fca8f8Stomee * list and scanning backwards. At first the consolidator is choosy: only a slab
746b5fca8f8Stomee * with fewer than 12.5% (1/8) of its objects allocated qualifies (or else a
747b5fca8f8Stomee * single allocated object, regardless of percentage). If there is difficulty
748b5fca8f8Stomee * finding a candidate slab, kmem raises the allocation threshold incrementally,
749b5fca8f8Stomee * up to a maximum 87.5% (7/8), so that eventually the consolidator will reduce
750b5fca8f8Stomee * external fragmentation (unused objects on the free list) below 12.5% (1/8),
751b5fca8f8Stomee * even in the worst case of every slab in the cache being almost 7/8 allocated.
752b5fca8f8Stomee * The threshold can also be lowered incrementally when candidate slabs are easy
753b5fca8f8Stomee * to find, and the threshold is reset to the minimum 1/8 as soon as the cache
754b5fca8f8Stomee * is no longer fragmented.
755b5fca8f8Stomee *
756b5fca8f8Stomee * 3.2 Generating Callbacks
757b5fca8f8Stomee *
758b5fca8f8Stomee * Once an eligible slab is chosen, a callback is generated for every allocated
759b5fca8f8Stomee * object on the slab, in the hope that the client will move everything off the
760b5fca8f8Stomee * slab and make it reclaimable. Objects selected as move destinations are
761b5fca8f8Stomee * chosen from slabs at the front of the free list. Assuming slabs in the ideal
762b5fca8f8Stomee * order (most allocated at the front, least allocated at the back) and a
763b5fca8f8Stomee * cooperative client, the consolidator will succeed in removing slabs from both
764b5fca8f8Stomee * ends of the free list, completely allocating on the one hand and completely
765b5fca8f8Stomee * freeing on the other. Objects selected as move destinations are allocated in
766b5fca8f8Stomee * the kmem maintenance thread where move requests are enqueued. A separate
767b5fca8f8Stomee * callback thread removes pending callbacks from the queue and calls the
768b5fca8f8Stomee * client. The separate thread ensures that client code (the move function) does
769b5fca8f8Stomee * not interfere with internal kmem maintenance tasks. A map of pending
770b5fca8f8Stomee * callbacks keyed by object address (the object to be moved) is checked to
771b5fca8f8Stomee * ensure that duplicate callbacks are not generated for the same object.
772b5fca8f8Stomee * Allocating the move destination (the object to move to) prevents subsequent
773b5fca8f8Stomee * callbacks from selecting the same destination as an earlier pending callback.
774b5fca8f8Stomee *
775b5fca8f8Stomee * Move requests can also be generated by kmem_cache_reap() when the system is
776b5fca8f8Stomee * desperate for memory and by kmem_cache_move_notify(), called by the client to
777b5fca8f8Stomee * notify kmem that a move refused earlier with KMEM_CBRC_LATER is now possible.
778b5fca8f8Stomee * The map of pending callbacks is protected by the same lock that protects the
779b5fca8f8Stomee * slab layer.
780b5fca8f8Stomee *
781b5fca8f8Stomee * When the system is desperate for memory, kmem does not bother to determine
782b5fca8f8Stomee * whether or not the cache exceeds the fragmentation threshold, but tries to
783b5fca8f8Stomee * consolidate as many slabs as possible. Normally, the consolidator chews
784b5fca8f8Stomee * slowly, one sparsely allocated slab at a time during each maintenance
785b5fca8f8Stomee * interval that the cache is fragmented. When desperate, the consolidator
786b5fca8f8Stomee * starts at the last partial slab and enqueues callbacks for every allocated
787b5fca8f8Stomee * object on every partial slab, working backwards until it reaches the first
788b5fca8f8Stomee * partial slab. The first partial slab, meanwhile, advances in pace with the
789b5fca8f8Stomee * consolidator as allocations to supply move destinations for the enqueued
790b5fca8f8Stomee * callbacks use up the highly allocated slabs at the front of the free list.
791b5fca8f8Stomee * Ideally, the overgrown free list collapses like an accordion, starting at
792b5fca8f8Stomee * both ends and ending at the center with a single partial slab.
793b5fca8f8Stomee *
794b5fca8f8Stomee * 3.3 Client Responses
795b5fca8f8Stomee *
796b5fca8f8Stomee * When the client returns KMEM_CBRC_NO in response to the move callback, kmem
797b5fca8f8Stomee * marks the slab that supplied the stuck object non-reclaimable and moves it to
798b5fca8f8Stomee * front of the free list. The slab remains marked as long as it remains on the
799b5fca8f8Stomee * free list, and it appears more allocated to the partial slab compare function
800b5fca8f8Stomee * than any unmarked slab, no matter how many of its objects are allocated.
801b5fca8f8Stomee * Since even one immovable object ties up the entire slab, the goal is to
802b5fca8f8Stomee * completely allocate any slab that cannot be completely freed. kmem does not
803b5fca8f8Stomee * bother generating callbacks to move objects from a marked slab unless the
804b5fca8f8Stomee * system is desperate.
805b5fca8f8Stomee *
806b5fca8f8Stomee * When the client responds KMEM_CBRC_LATER, kmem increments a count for the
807b5fca8f8Stomee * slab. If the client responds LATER too many times, kmem disbelieves and
808b5fca8f8Stomee * treats the response as a NO. The count is cleared when the slab is taken off
809b5fca8f8Stomee * the partial slab list or when the client moves one of the slab's objects.
810b5fca8f8Stomee *
811b5fca8f8Stomee * 4. Observability
812b5fca8f8Stomee *
813b5fca8f8Stomee * A kmem cache's external fragmentation is best observed with 'mdb -k' using
814b5fca8f8Stomee * the ::kmem_slabs dcmd. For a complete description of the command, enter
815b5fca8f8Stomee * '::help kmem_slabs' at the mdb prompt.
8167c478bd9Sstevel@tonic-gate */
8177c478bd9Sstevel@tonic-gate
8187c478bd9Sstevel@tonic-gate #include <sys/kmem_impl.h>
8197c478bd9Sstevel@tonic-gate #include <sys/vmem_impl.h>
8207c478bd9Sstevel@tonic-gate #include <sys/param.h>
8217c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h>
8227c478bd9Sstevel@tonic-gate #include <sys/vm.h>
8237c478bd9Sstevel@tonic-gate #include <sys/proc.h>
8247c478bd9Sstevel@tonic-gate #include <sys/tuneable.h>
8257c478bd9Sstevel@tonic-gate #include <sys/systm.h>
8267c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
8277c478bd9Sstevel@tonic-gate #include <sys/debug.h>
828b5fca8f8Stomee #include <sys/sdt.h>
8297c478bd9Sstevel@tonic-gate #include <sys/mutex.h>
8307c478bd9Sstevel@tonic-gate #include <sys/bitmap.h>
8317c478bd9Sstevel@tonic-gate #include <sys/atomic.h>
8327c478bd9Sstevel@tonic-gate #include <sys/kobj.h>
8337c478bd9Sstevel@tonic-gate #include <sys/disp.h>
8347c478bd9Sstevel@tonic-gate #include <vm/seg_kmem.h>
8357c478bd9Sstevel@tonic-gate #include <sys/log.h>
8367c478bd9Sstevel@tonic-gate #include <sys/callb.h>
8377c478bd9Sstevel@tonic-gate #include <sys/taskq.h>
8387c478bd9Sstevel@tonic-gate #include <sys/modctl.h>
8397c478bd9Sstevel@tonic-gate #include <sys/reboot.h>
8407c478bd9Sstevel@tonic-gate #include <sys/id32.h>
8417c478bd9Sstevel@tonic-gate #include <sys/zone.h>
842f4b3ec61Sdh155122 #include <sys/netstack.h>
843b5fca8f8Stomee #ifdef DEBUG
844b5fca8f8Stomee #include <sys/random.h>
845b5fca8f8Stomee #endif
8467c478bd9Sstevel@tonic-gate
8477c478bd9Sstevel@tonic-gate extern void streams_msg_init(void);
8487c478bd9Sstevel@tonic-gate extern int segkp_fromheap;
8497c478bd9Sstevel@tonic-gate extern void segkp_cache_free(void);
8506e00b116SPeter Telford extern int callout_init_done;
8517c478bd9Sstevel@tonic-gate
8527c478bd9Sstevel@tonic-gate struct kmem_cache_kstat {
8537c478bd9Sstevel@tonic-gate kstat_named_t kmc_buf_size;
8547c478bd9Sstevel@tonic-gate kstat_named_t kmc_align;
8557c478bd9Sstevel@tonic-gate kstat_named_t kmc_chunk_size;
8567c478bd9Sstevel@tonic-gate kstat_named_t kmc_slab_size;
8577c478bd9Sstevel@tonic-gate kstat_named_t kmc_alloc;
8587c478bd9Sstevel@tonic-gate kstat_named_t kmc_alloc_fail;
8597c478bd9Sstevel@tonic-gate kstat_named_t kmc_free;
8607c478bd9Sstevel@tonic-gate kstat_named_t kmc_depot_alloc;
8617c478bd9Sstevel@tonic-gate kstat_named_t kmc_depot_free;
8627c478bd9Sstevel@tonic-gate kstat_named_t kmc_depot_contention;
8637c478bd9Sstevel@tonic-gate kstat_named_t kmc_slab_alloc;
8647c478bd9Sstevel@tonic-gate kstat_named_t kmc_slab_free;
8657c478bd9Sstevel@tonic-gate kstat_named_t kmc_buf_constructed;
8667c478bd9Sstevel@tonic-gate kstat_named_t kmc_buf_avail;
8677c478bd9Sstevel@tonic-gate kstat_named_t kmc_buf_inuse;
8687c478bd9Sstevel@tonic-gate kstat_named_t kmc_buf_total;
8697c478bd9Sstevel@tonic-gate kstat_named_t kmc_buf_max;
8707c478bd9Sstevel@tonic-gate kstat_named_t kmc_slab_create;
8717c478bd9Sstevel@tonic-gate kstat_named_t kmc_slab_destroy;
8727c478bd9Sstevel@tonic-gate kstat_named_t kmc_vmem_source;
8737c478bd9Sstevel@tonic-gate kstat_named_t kmc_hash_size;
8747c478bd9Sstevel@tonic-gate kstat_named_t kmc_hash_lookup_depth;
8757c478bd9Sstevel@tonic-gate kstat_named_t kmc_hash_rescale;
8767c478bd9Sstevel@tonic-gate kstat_named_t kmc_full_magazines;
8777c478bd9Sstevel@tonic-gate kstat_named_t kmc_empty_magazines;
8787c478bd9Sstevel@tonic-gate kstat_named_t kmc_magazine_size;
879686031edSTom Erickson kstat_named_t kmc_reap; /* number of kmem_cache_reap() calls */
880686031edSTom Erickson kstat_named_t kmc_defrag; /* attempts to defrag all partial slabs */
881686031edSTom Erickson kstat_named_t kmc_scan; /* attempts to defrag one partial slab */
882686031edSTom Erickson kstat_named_t kmc_move_callbacks; /* sum of yes, no, later, dn, dk */
883b5fca8f8Stomee kstat_named_t kmc_move_yes;
884b5fca8f8Stomee kstat_named_t kmc_move_no;
885b5fca8f8Stomee kstat_named_t kmc_move_later;
886b5fca8f8Stomee kstat_named_t kmc_move_dont_need;
887686031edSTom Erickson kstat_named_t kmc_move_dont_know; /* obj unrecognized by client ... */
888686031edSTom Erickson kstat_named_t kmc_move_hunt_found; /* ... but found in mag layer */
889686031edSTom Erickson kstat_named_t kmc_move_slabs_freed; /* slabs freed by consolidator */
890686031edSTom Erickson kstat_named_t kmc_move_reclaimable; /* buffers, if consolidator ran */
8917c478bd9Sstevel@tonic-gate } kmem_cache_kstat = {
8927c478bd9Sstevel@tonic-gate { "buf_size", KSTAT_DATA_UINT64 },
8937c478bd9Sstevel@tonic-gate { "align", KSTAT_DATA_UINT64 },
8947c478bd9Sstevel@tonic-gate { "chunk_size", KSTAT_DATA_UINT64 },
8957c478bd9Sstevel@tonic-gate { "slab_size", KSTAT_DATA_UINT64 },
8967c478bd9Sstevel@tonic-gate { "alloc", KSTAT_DATA_UINT64 },
8977c478bd9Sstevel@tonic-gate { "alloc_fail", KSTAT_DATA_UINT64 },
8987c478bd9Sstevel@tonic-gate { "free", KSTAT_DATA_UINT64 },
8997c478bd9Sstevel@tonic-gate { "depot_alloc", KSTAT_DATA_UINT64 },
9007c478bd9Sstevel@tonic-gate { "depot_free", KSTAT_DATA_UINT64 },
9017c478bd9Sstevel@tonic-gate { "depot_contention", KSTAT_DATA_UINT64 },
9027c478bd9Sstevel@tonic-gate { "slab_alloc", KSTAT_DATA_UINT64 },
9037c478bd9Sstevel@tonic-gate { "slab_free", KSTAT_DATA_UINT64 },
9047c478bd9Sstevel@tonic-gate { "buf_constructed", KSTAT_DATA_UINT64 },
9057c478bd9Sstevel@tonic-gate { "buf_avail", KSTAT_DATA_UINT64 },
9067c478bd9Sstevel@tonic-gate { "buf_inuse", KSTAT_DATA_UINT64 },
9077c478bd9Sstevel@tonic-gate { "buf_total", KSTAT_DATA_UINT64 },
9087c478bd9Sstevel@tonic-gate { "buf_max", KSTAT_DATA_UINT64 },
9097c478bd9Sstevel@tonic-gate { "slab_create", KSTAT_DATA_UINT64 },
9107c478bd9Sstevel@tonic-gate { "slab_destroy", KSTAT_DATA_UINT64 },
9117c478bd9Sstevel@tonic-gate { "vmem_source", KSTAT_DATA_UINT64 },
9127c478bd9Sstevel@tonic-gate { "hash_size", KSTAT_DATA_UINT64 },
9137c478bd9Sstevel@tonic-gate { "hash_lookup_depth", KSTAT_DATA_UINT64 },
9147c478bd9Sstevel@tonic-gate { "hash_rescale", KSTAT_DATA_UINT64 },
9157c478bd9Sstevel@tonic-gate { "full_magazines", KSTAT_DATA_UINT64 },
9167c478bd9Sstevel@tonic-gate { "empty_magazines", KSTAT_DATA_UINT64 },
9177c478bd9Sstevel@tonic-gate { "magazine_size", KSTAT_DATA_UINT64 },
918686031edSTom Erickson { "reap", KSTAT_DATA_UINT64 },
919686031edSTom Erickson { "defrag", KSTAT_DATA_UINT64 },
920686031edSTom Erickson { "scan", KSTAT_DATA_UINT64 },
921b5fca8f8Stomee { "move_callbacks", KSTAT_DATA_UINT64 },
922b5fca8f8Stomee { "move_yes", KSTAT_DATA_UINT64 },
923b5fca8f8Stomee { "move_no", KSTAT_DATA_UINT64 },
924b5fca8f8Stomee { "move_later", KSTAT_DATA_UINT64 },
925b5fca8f8Stomee { "move_dont_need", KSTAT_DATA_UINT64 },
926b5fca8f8Stomee { "move_dont_know", KSTAT_DATA_UINT64 },
927b5fca8f8Stomee { "move_hunt_found", KSTAT_DATA_UINT64 },
928686031edSTom Erickson { "move_slabs_freed", KSTAT_DATA_UINT64 },
929686031edSTom Erickson { "move_reclaimable", KSTAT_DATA_UINT64 },
9307c478bd9Sstevel@tonic-gate };
9317c478bd9Sstevel@tonic-gate
9327c478bd9Sstevel@tonic-gate static kmutex_t kmem_cache_kstat_lock;
9337c478bd9Sstevel@tonic-gate
9347c478bd9Sstevel@tonic-gate /*
9357c478bd9Sstevel@tonic-gate * The default set of caches to back kmem_alloc().
9367c478bd9Sstevel@tonic-gate * These sizes should be reevaluated periodically.
9377c478bd9Sstevel@tonic-gate *
9387c478bd9Sstevel@tonic-gate * We want allocations that are multiples of the coherency granularity
9397c478bd9Sstevel@tonic-gate * (64 bytes) to be satisfied from a cache which is a multiple of 64
9407c478bd9Sstevel@tonic-gate * bytes, so that it will be 64-byte aligned. For all multiples of 64,
9417c478bd9Sstevel@tonic-gate * the next kmem_cache_size greater than or equal to it must be a
9427c478bd9Sstevel@tonic-gate * multiple of 64.
943dce01e3fSJonathan W Adams *
944dce01e3fSJonathan W Adams * We split the table into two sections: size <= 4k and size > 4k. This
945dce01e3fSJonathan W Adams * saves a lot of space and cache footprint in our cache tables.
9467c478bd9Sstevel@tonic-gate */
9477c478bd9Sstevel@tonic-gate static const int kmem_alloc_sizes[] = {
9487c478bd9Sstevel@tonic-gate 1 * 8,
9497c478bd9Sstevel@tonic-gate 2 * 8,
9507c478bd9Sstevel@tonic-gate 3 * 8,
9517c478bd9Sstevel@tonic-gate 4 * 8, 5 * 8, 6 * 8, 7 * 8,
9527c478bd9Sstevel@tonic-gate 4 * 16, 5 * 16, 6 * 16, 7 * 16,
9537c478bd9Sstevel@tonic-gate 4 * 32, 5 * 32, 6 * 32, 7 * 32,
9547c478bd9Sstevel@tonic-gate 4 * 64, 5 * 64, 6 * 64, 7 * 64,
9557c478bd9Sstevel@tonic-gate 4 * 128, 5 * 128, 6 * 128, 7 * 128,
9567c478bd9Sstevel@tonic-gate P2ALIGN(8192 / 7, 64),
9577c478bd9Sstevel@tonic-gate P2ALIGN(8192 / 6, 64),
9587c478bd9Sstevel@tonic-gate P2ALIGN(8192 / 5, 64),
9597c478bd9Sstevel@tonic-gate P2ALIGN(8192 / 4, 64),
9607c478bd9Sstevel@tonic-gate P2ALIGN(8192 / 3, 64),
9617c478bd9Sstevel@tonic-gate P2ALIGN(8192 / 2, 64),
9627c478bd9Sstevel@tonic-gate };
9637c478bd9Sstevel@tonic-gate
964dce01e3fSJonathan W Adams static const int kmem_big_alloc_sizes[] = {
965dce01e3fSJonathan W Adams 2 * 4096, 3 * 4096,
966dce01e3fSJonathan W Adams 2 * 8192, 3 * 8192,
967dce01e3fSJonathan W Adams 4 * 8192, 5 * 8192, 6 * 8192, 7 * 8192,
968dce01e3fSJonathan W Adams 8 * 8192, 9 * 8192, 10 * 8192, 11 * 8192,
969dce01e3fSJonathan W Adams 12 * 8192, 13 * 8192, 14 * 8192, 15 * 8192,
970dce01e3fSJonathan W Adams 16 * 8192
971dce01e3fSJonathan W Adams };
972dce01e3fSJonathan W Adams
973dce01e3fSJonathan W Adams #define KMEM_MAXBUF 4096
974dce01e3fSJonathan W Adams #define KMEM_BIG_MAXBUF_32BIT 32768
975dce01e3fSJonathan W Adams #define KMEM_BIG_MAXBUF 131072
976dce01e3fSJonathan W Adams
977dce01e3fSJonathan W Adams #define KMEM_BIG_MULTIPLE 4096 /* big_alloc_sizes must be a multiple */
978dce01e3fSJonathan W Adams #define KMEM_BIG_SHIFT 12 /* lg(KMEM_BIG_MULTIPLE) */
9797c478bd9Sstevel@tonic-gate
9807c478bd9Sstevel@tonic-gate static kmem_cache_t *kmem_alloc_table[KMEM_MAXBUF >> KMEM_ALIGN_SHIFT];
981dce01e3fSJonathan W Adams static kmem_cache_t *kmem_big_alloc_table[KMEM_BIG_MAXBUF >> KMEM_BIG_SHIFT];
982dce01e3fSJonathan W Adams
983dce01e3fSJonathan W Adams #define KMEM_ALLOC_TABLE_MAX (KMEM_MAXBUF >> KMEM_ALIGN_SHIFT)
984dce01e3fSJonathan W Adams static size_t kmem_big_alloc_table_max = 0; /* # of filled elements */
9857c478bd9Sstevel@tonic-gate
9867c478bd9Sstevel@tonic-gate static kmem_magtype_t kmem_magtype[] = {
9877c478bd9Sstevel@tonic-gate { 1, 8, 3200, 65536 },
9887c478bd9Sstevel@tonic-gate { 3, 16, 256, 32768 },
9897c478bd9Sstevel@tonic-gate { 7, 32, 64, 16384 },
9907c478bd9Sstevel@tonic-gate { 15, 64, 0, 8192 },
9917c478bd9Sstevel@tonic-gate { 31, 64, 0, 4096 },
9927c478bd9Sstevel@tonic-gate { 47, 64, 0, 2048 },
9937c478bd9Sstevel@tonic-gate { 63, 64, 0, 1024 },
9947c478bd9Sstevel@tonic-gate { 95, 64, 0, 512 },
9957c478bd9Sstevel@tonic-gate { 143, 64, 0, 0 },
9967c478bd9Sstevel@tonic-gate };
9977c478bd9Sstevel@tonic-gate
9987c478bd9Sstevel@tonic-gate static uint32_t kmem_reaping;
9997c478bd9Sstevel@tonic-gate static uint32_t kmem_reaping_idspace;
10007c478bd9Sstevel@tonic-gate
10017c478bd9Sstevel@tonic-gate /*
10027c478bd9Sstevel@tonic-gate * kmem tunables
10037c478bd9Sstevel@tonic-gate */
10047c478bd9Sstevel@tonic-gate clock_t kmem_reap_interval; /* cache reaping rate [15 * HZ ticks] */
10057c478bd9Sstevel@tonic-gate int kmem_depot_contention = 3; /* max failed tryenters per real interval */
10067c478bd9Sstevel@tonic-gate pgcnt_t kmem_reapahead = 0; /* start reaping N pages before pageout */
10077c478bd9Sstevel@tonic-gate int kmem_panic = 1; /* whether to panic on error */
10087c478bd9Sstevel@tonic-gate int kmem_logging = 1; /* kmem_log_enter() override */
10097c478bd9Sstevel@tonic-gate uint32_t kmem_mtbf = 0; /* mean time between failures [default: off] */
10107c478bd9Sstevel@tonic-gate size_t kmem_transaction_log_size; /* transaction log size [2% of memory] */
10117c478bd9Sstevel@tonic-gate size_t kmem_content_log_size; /* content log size [2% of memory] */
10127c478bd9Sstevel@tonic-gate size_t kmem_failure_log_size; /* failure log [4 pages per CPU] */
10137c478bd9Sstevel@tonic-gate size_t kmem_slab_log_size; /* slab create log [4 pages per CPU] */
10147c478bd9Sstevel@tonic-gate size_t kmem_content_maxsave = 256; /* KMF_CONTENTS max bytes to log */
10157c478bd9Sstevel@tonic-gate size_t kmem_lite_minsize = 0; /* minimum buffer size for KMF_LITE */
10167c478bd9Sstevel@tonic-gate size_t kmem_lite_maxalign = 1024; /* maximum buffer alignment for KMF_LITE */
10177c478bd9Sstevel@tonic-gate int kmem_lite_pcs = 4; /* number of PCs to store in KMF_LITE mode */
10187c478bd9Sstevel@tonic-gate size_t kmem_maxverify; /* maximum bytes to inspect in debug routines */
10197c478bd9Sstevel@tonic-gate size_t kmem_minfirewall; /* hardware-enforced redzone threshold */
10207c478bd9Sstevel@tonic-gate
1021dce01e3fSJonathan W Adams #ifdef _LP64
1022dce01e3fSJonathan W Adams size_t kmem_max_cached = KMEM_BIG_MAXBUF; /* maximum kmem_alloc cache */
1023dce01e3fSJonathan W Adams #else
1024dce01e3fSJonathan W Adams size_t kmem_max_cached = KMEM_BIG_MAXBUF_32BIT; /* maximum kmem_alloc cache */
1025dce01e3fSJonathan W Adams #endif
1026dce01e3fSJonathan W Adams
10277c478bd9Sstevel@tonic-gate #ifdef DEBUG
10287c478bd9Sstevel@tonic-gate int kmem_flags = KMF_AUDIT | KMF_DEADBEEF | KMF_REDZONE | KMF_CONTENTS;
10297c478bd9Sstevel@tonic-gate #else
10307c478bd9Sstevel@tonic-gate int kmem_flags = 0;
10317c478bd9Sstevel@tonic-gate #endif
10327c478bd9Sstevel@tonic-gate int kmem_ready;
10337c478bd9Sstevel@tonic-gate
10347c478bd9Sstevel@tonic-gate static kmem_cache_t *kmem_slab_cache;
10357c478bd9Sstevel@tonic-gate static kmem_cache_t *kmem_bufctl_cache;
10367c478bd9Sstevel@tonic-gate static kmem_cache_t *kmem_bufctl_audit_cache;
10377c478bd9Sstevel@tonic-gate
10387c478bd9Sstevel@tonic-gate static kmutex_t kmem_cache_lock; /* inter-cache linkage only */
1039b5fca8f8Stomee static list_t kmem_caches;
10407c478bd9Sstevel@tonic-gate
10417c478bd9Sstevel@tonic-gate static taskq_t *kmem_taskq;
10427c478bd9Sstevel@tonic-gate static kmutex_t kmem_flags_lock;
10437c478bd9Sstevel@tonic-gate static vmem_t *kmem_metadata_arena;
10447c478bd9Sstevel@tonic-gate static vmem_t *kmem_msb_arena; /* arena for metadata caches */
10457c478bd9Sstevel@tonic-gate static vmem_t *kmem_cache_arena;
10467c478bd9Sstevel@tonic-gate static vmem_t *kmem_hash_arena;
10477c478bd9Sstevel@tonic-gate static vmem_t *kmem_log_arena;
10487c478bd9Sstevel@tonic-gate static vmem_t *kmem_oversize_arena;
10497c478bd9Sstevel@tonic-gate static vmem_t *kmem_va_arena;
10507c478bd9Sstevel@tonic-gate static vmem_t *kmem_default_arena;
10517c478bd9Sstevel@tonic-gate static vmem_t *kmem_firewall_va_arena;
10527c478bd9Sstevel@tonic-gate static vmem_t *kmem_firewall_arena;
10537c478bd9Sstevel@tonic-gate
1054b5fca8f8Stomee /*
1055b5fca8f8Stomee * kmem slab consolidator thresholds (tunables)
1056b5fca8f8Stomee */
1057686031edSTom Erickson size_t kmem_frag_minslabs = 101; /* minimum total slabs */
1058686031edSTom Erickson size_t kmem_frag_numer = 1; /* free buffers (numerator) */
1059686031edSTom Erickson size_t kmem_frag_denom = KMEM_VOID_FRACTION; /* buffers (denominator) */
1060b5fca8f8Stomee /*
1061b5fca8f8Stomee * Maximum number of slabs from which to move buffers during a single
1062b5fca8f8Stomee * maintenance interval while the system is not low on memory.
1063b5fca8f8Stomee */
1064686031edSTom Erickson size_t kmem_reclaim_max_slabs = 1;
1065b5fca8f8Stomee /*
1066b5fca8f8Stomee * Number of slabs to scan backwards from the end of the partial slab list
1067b5fca8f8Stomee * when searching for buffers to relocate.
1068b5fca8f8Stomee */
1069686031edSTom Erickson size_t kmem_reclaim_scan_range = 12;
1070b5fca8f8Stomee
1071b5fca8f8Stomee /* consolidator knobs */
1072b5fca8f8Stomee static boolean_t kmem_move_noreap;
1073b5fca8f8Stomee static boolean_t kmem_move_blocked;
1074b5fca8f8Stomee static boolean_t kmem_move_fulltilt;
1075b5fca8f8Stomee static boolean_t kmem_move_any_partial;
1076b5fca8f8Stomee
1077b5fca8f8Stomee #ifdef DEBUG
1078b5fca8f8Stomee /*
1079686031edSTom Erickson * kmem consolidator debug tunables:
1080b5fca8f8Stomee * Ensure code coverage by occasionally running the consolidator even when the
1081b5fca8f8Stomee * caches are not fragmented (they may never be). These intervals are mean time
1082b5fca8f8Stomee * in cache maintenance intervals (kmem_cache_update).
1083b5fca8f8Stomee */
1084686031edSTom Erickson uint32_t kmem_mtb_move = 60; /* defrag 1 slab (~15min) */
1085686031edSTom Erickson uint32_t kmem_mtb_reap = 1800; /* defrag all slabs (~7.5hrs) */
1086b5fca8f8Stomee #endif /* DEBUG */
1087b5fca8f8Stomee
1088b5fca8f8Stomee static kmem_cache_t *kmem_defrag_cache;
1089b5fca8f8Stomee static kmem_cache_t *kmem_move_cache;
1090b5fca8f8Stomee static taskq_t *kmem_move_taskq;
1091b5fca8f8Stomee
1092b5fca8f8Stomee static void kmem_cache_scan(kmem_cache_t *);
1093b5fca8f8Stomee static void kmem_cache_defrag(kmem_cache_t *);
1094b942e89bSDavid Valin static void kmem_slab_prefill(kmem_cache_t *, kmem_slab_t *);
1095b5fca8f8Stomee
1096b5fca8f8Stomee
10977c478bd9Sstevel@tonic-gate kmem_log_header_t *kmem_transaction_log;
10987c478bd9Sstevel@tonic-gate kmem_log_header_t *kmem_content_log;
10997c478bd9Sstevel@tonic-gate kmem_log_header_t *kmem_failure_log;
11007c478bd9Sstevel@tonic-gate kmem_log_header_t *kmem_slab_log;
11017c478bd9Sstevel@tonic-gate
11027c478bd9Sstevel@tonic-gate static int kmem_lite_count; /* # of PCs in kmem_buftag_lite_t */
11037c478bd9Sstevel@tonic-gate
11047c478bd9Sstevel@tonic-gate #define KMEM_BUFTAG_LITE_ENTER(bt, count, caller) \
11057c478bd9Sstevel@tonic-gate if ((count) > 0) { \
11067c478bd9Sstevel@tonic-gate pc_t *_s = ((kmem_buftag_lite_t *)(bt))->bt_history; \
11077c478bd9Sstevel@tonic-gate pc_t *_e; \
11087c478bd9Sstevel@tonic-gate /* memmove() the old entries down one notch */ \
11097c478bd9Sstevel@tonic-gate for (_e = &_s[(count) - 1]; _e > _s; _e--) \
11107c478bd9Sstevel@tonic-gate *_e = *(_e - 1); \
11117c478bd9Sstevel@tonic-gate *_s = (uintptr_t)(caller); \
11127c478bd9Sstevel@tonic-gate }
11137c478bd9Sstevel@tonic-gate
11147c478bd9Sstevel@tonic-gate #define KMERR_MODIFIED 0 /* buffer modified while on freelist */
11157c478bd9Sstevel@tonic-gate #define KMERR_REDZONE 1 /* redzone violation (write past end of buf) */
11167c478bd9Sstevel@tonic-gate #define KMERR_DUPFREE 2 /* freed a buffer twice */
11177c478bd9Sstevel@tonic-gate #define KMERR_BADADDR 3 /* freed a bad (unallocated) address */
11187c478bd9Sstevel@tonic-gate #define KMERR_BADBUFTAG 4 /* buftag corrupted */
11197c478bd9Sstevel@tonic-gate #define KMERR_BADBUFCTL 5 /* bufctl corrupted */
11207c478bd9Sstevel@tonic-gate #define KMERR_BADCACHE 6 /* freed a buffer to the wrong cache */
11217c478bd9Sstevel@tonic-gate #define KMERR_BADSIZE 7 /* alloc size != free size */
11227c478bd9Sstevel@tonic-gate #define KMERR_BADBASE 8 /* buffer base address wrong */
11237c478bd9Sstevel@tonic-gate
11247c478bd9Sstevel@tonic-gate struct {
11257c478bd9Sstevel@tonic-gate hrtime_t kmp_timestamp; /* timestamp of panic */
11267c478bd9Sstevel@tonic-gate int kmp_error; /* type of kmem error */
11277c478bd9Sstevel@tonic-gate void *kmp_buffer; /* buffer that induced panic */
11287c478bd9Sstevel@tonic-gate void *kmp_realbuf; /* real start address for buffer */
11297c478bd9Sstevel@tonic-gate kmem_cache_t *kmp_cache; /* buffer's cache according to client */
11307c478bd9Sstevel@tonic-gate kmem_cache_t *kmp_realcache; /* actual cache containing buffer */
11317c478bd9Sstevel@tonic-gate kmem_slab_t *kmp_slab; /* slab accoring to kmem_findslab() */
11327c478bd9Sstevel@tonic-gate kmem_bufctl_t *kmp_bufctl; /* bufctl */
11337c478bd9Sstevel@tonic-gate } kmem_panic_info;
11347c478bd9Sstevel@tonic-gate
11357c478bd9Sstevel@tonic-gate
11367c478bd9Sstevel@tonic-gate static void
copy_pattern(uint64_t pattern,void * buf_arg,size_t size)11377c478bd9Sstevel@tonic-gate copy_pattern(uint64_t pattern, void *buf_arg, size_t size)
11387c478bd9Sstevel@tonic-gate {
11397c478bd9Sstevel@tonic-gate uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
11407c478bd9Sstevel@tonic-gate uint64_t *buf = buf_arg;
11417c478bd9Sstevel@tonic-gate
11427c478bd9Sstevel@tonic-gate while (buf < bufend)
11437c478bd9Sstevel@tonic-gate *buf++ = pattern;
11447c478bd9Sstevel@tonic-gate }
11457c478bd9Sstevel@tonic-gate
11467c478bd9Sstevel@tonic-gate static void *
verify_pattern(uint64_t pattern,void * buf_arg,size_t size)11477c478bd9Sstevel@tonic-gate verify_pattern(uint64_t pattern, void *buf_arg, size_t size)
11487c478bd9Sstevel@tonic-gate {
11497c478bd9Sstevel@tonic-gate uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
11507c478bd9Sstevel@tonic-gate uint64_t *buf;
11517c478bd9Sstevel@tonic-gate
11527c478bd9Sstevel@tonic-gate for (buf = buf_arg; buf < bufend; buf++)
11537c478bd9Sstevel@tonic-gate if (*buf != pattern)
11547c478bd9Sstevel@tonic-gate return (buf);
11557c478bd9Sstevel@tonic-gate return (NULL);
11567c478bd9Sstevel@tonic-gate }
11577c478bd9Sstevel@tonic-gate
11587c478bd9Sstevel@tonic-gate static void *
verify_and_copy_pattern(uint64_t old,uint64_t new,void * buf_arg,size_t size)11597c478bd9Sstevel@tonic-gate verify_and_copy_pattern(uint64_t old, uint64_t new, void *buf_arg, size_t size)
11607c478bd9Sstevel@tonic-gate {
11617c478bd9Sstevel@tonic-gate uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
11627c478bd9Sstevel@tonic-gate uint64_t *buf;
11637c478bd9Sstevel@tonic-gate
11647c478bd9Sstevel@tonic-gate for (buf = buf_arg; buf < bufend; buf++) {
11657c478bd9Sstevel@tonic-gate if (*buf != old) {
11667c478bd9Sstevel@tonic-gate copy_pattern(old, buf_arg,
11677c478bd9Sstevel@tonic-gate (char *)buf - (char *)buf_arg);
11687c478bd9Sstevel@tonic-gate return (buf);
11697c478bd9Sstevel@tonic-gate }
11707c478bd9Sstevel@tonic-gate *buf = new;
11717c478bd9Sstevel@tonic-gate }
11727c478bd9Sstevel@tonic-gate
11737c478bd9Sstevel@tonic-gate return (NULL);
11747c478bd9Sstevel@tonic-gate }
11757c478bd9Sstevel@tonic-gate
11767c478bd9Sstevel@tonic-gate static void
kmem_cache_applyall(void (* func)(kmem_cache_t *),taskq_t * tq,int tqflag)11777c478bd9Sstevel@tonic-gate kmem_cache_applyall(void (*func)(kmem_cache_t *), taskq_t *tq, int tqflag)
11787c478bd9Sstevel@tonic-gate {
11797c478bd9Sstevel@tonic-gate kmem_cache_t *cp;
11807c478bd9Sstevel@tonic-gate
11817c478bd9Sstevel@tonic-gate mutex_enter(&kmem_cache_lock);
1182b5fca8f8Stomee for (cp = list_head(&kmem_caches); cp != NULL;
1183b5fca8f8Stomee cp = list_next(&kmem_caches, cp))
11847c478bd9Sstevel@tonic-gate if (tq != NULL)
11857c478bd9Sstevel@tonic-gate (void) taskq_dispatch(tq, (task_func_t *)func, cp,
11867c478bd9Sstevel@tonic-gate tqflag);
11877c478bd9Sstevel@tonic-gate else
11887c478bd9Sstevel@tonic-gate func(cp);
11897c478bd9Sstevel@tonic-gate mutex_exit(&kmem_cache_lock);
11907c478bd9Sstevel@tonic-gate }
11917c478bd9Sstevel@tonic-gate
11927c478bd9Sstevel@tonic-gate static void
kmem_cache_applyall_id(void (* func)(kmem_cache_t *),taskq_t * tq,int tqflag)11937c478bd9Sstevel@tonic-gate kmem_cache_applyall_id(void (*func)(kmem_cache_t *), taskq_t *tq, int tqflag)
11947c478bd9Sstevel@tonic-gate {
11957c478bd9Sstevel@tonic-gate kmem_cache_t *cp;
11967c478bd9Sstevel@tonic-gate
11977c478bd9Sstevel@tonic-gate mutex_enter(&kmem_cache_lock);
1198b5fca8f8Stomee for (cp = list_head(&kmem_caches); cp != NULL;
1199b5fca8f8Stomee cp = list_next(&kmem_caches, cp)) {
12007c478bd9Sstevel@tonic-gate if (!(cp->cache_cflags & KMC_IDENTIFIER))
12017c478bd9Sstevel@tonic-gate continue;
12027c478bd9Sstevel@tonic-gate if (tq != NULL)
12037c478bd9Sstevel@tonic-gate (void) taskq_dispatch(tq, (task_func_t *)func, cp,
12047c478bd9Sstevel@tonic-gate tqflag);
12057c478bd9Sstevel@tonic-gate else
12067c478bd9Sstevel@tonic-gate func(cp);
12077c478bd9Sstevel@tonic-gate }
12087c478bd9Sstevel@tonic-gate mutex_exit(&kmem_cache_lock);
12097c478bd9Sstevel@tonic-gate }
12107c478bd9Sstevel@tonic-gate
12117c478bd9Sstevel@tonic-gate /*
12127c478bd9Sstevel@tonic-gate * Debugging support. Given a buffer address, find its slab.
12137c478bd9Sstevel@tonic-gate */
12147c478bd9Sstevel@tonic-gate static kmem_slab_t *
kmem_findslab(kmem_cache_t * cp,void * buf)12157c478bd9Sstevel@tonic-gate kmem_findslab(kmem_cache_t *cp, void *buf)
12167c478bd9Sstevel@tonic-gate {
12177c478bd9Sstevel@tonic-gate kmem_slab_t *sp;
12187c478bd9Sstevel@tonic-gate
12197c478bd9Sstevel@tonic-gate mutex_enter(&cp->cache_lock);
1220b5fca8f8Stomee for (sp = list_head(&cp->cache_complete_slabs); sp != NULL;
1221b5fca8f8Stomee sp = list_next(&cp->cache_complete_slabs, sp)) {
1222b5fca8f8Stomee if (KMEM_SLAB_MEMBER(sp, buf)) {
1223b5fca8f8Stomee mutex_exit(&cp->cache_lock);
1224b5fca8f8Stomee return (sp);
1225b5fca8f8Stomee }
1226b5fca8f8Stomee }
1227b5fca8f8Stomee for (sp = avl_first(&cp->cache_partial_slabs); sp != NULL;
1228b5fca8f8Stomee sp = AVL_NEXT(&cp->cache_partial_slabs, sp)) {
12297c478bd9Sstevel@tonic-gate if (KMEM_SLAB_MEMBER(sp, buf)) {
12307c478bd9Sstevel@tonic-gate mutex_exit(&cp->cache_lock);
12317c478bd9Sstevel@tonic-gate return (sp);
12327c478bd9Sstevel@tonic-gate }
12337c478bd9Sstevel@tonic-gate }
12347c478bd9Sstevel@tonic-gate mutex_exit(&cp->cache_lock);
12357c478bd9Sstevel@tonic-gate
12367c478bd9Sstevel@tonic-gate return (NULL);
12377c478bd9Sstevel@tonic-gate }
12387c478bd9Sstevel@tonic-gate
12397c478bd9Sstevel@tonic-gate static void
kmem_error(int error,kmem_cache_t * cparg,void * bufarg)12407c478bd9Sstevel@tonic-gate kmem_error(int error, kmem_cache_t *cparg, void *bufarg)
12417c478bd9Sstevel@tonic-gate {
12427c478bd9Sstevel@tonic-gate kmem_buftag_t *btp = NULL;
12437c478bd9Sstevel@tonic-gate kmem_bufctl_t *bcp = NULL;
12447c478bd9Sstevel@tonic-gate kmem_cache_t *cp = cparg;
12457c478bd9Sstevel@tonic-gate kmem_slab_t *sp;
12467c478bd9Sstevel@tonic-gate uint64_t *off;
12477c478bd9Sstevel@tonic-gate void *buf = bufarg;
12487c478bd9Sstevel@tonic-gate
12497c478bd9Sstevel@tonic-gate kmem_logging = 0; /* stop logging when a bad thing happens */
12507c478bd9Sstevel@tonic-gate
12517c478bd9Sstevel@tonic-gate kmem_panic_info.kmp_timestamp = gethrtime();
12527c478bd9Sstevel@tonic-gate
12537c478bd9Sstevel@tonic-gate sp = kmem_findslab(cp, buf);
12547c478bd9Sstevel@tonic-gate if (sp == NULL) {
1255b5fca8f8Stomee for (cp = list_tail(&kmem_caches); cp != NULL;
1256b5fca8f8Stomee cp = list_prev(&kmem_caches, cp)) {
12577c478bd9Sstevel@tonic-gate if ((sp = kmem_findslab(cp, buf)) != NULL)
12587c478bd9Sstevel@tonic-gate break;
12597c478bd9Sstevel@tonic-gate }
12607c478bd9Sstevel@tonic-gate }
12617c478bd9Sstevel@tonic-gate
12627c478bd9Sstevel@tonic-gate if (sp == NULL) {
12637c478bd9Sstevel@tonic-gate cp = NULL;
12647c478bd9Sstevel@tonic-gate error = KMERR_BADADDR;
12657c478bd9Sstevel@tonic-gate } else {
12667c478bd9Sstevel@tonic-gate if (cp != cparg)
12677c478bd9Sstevel@tonic-gate error = KMERR_BADCACHE;
12687c478bd9Sstevel@tonic-gate else
12697c478bd9Sstevel@tonic-gate buf = (char *)bufarg - ((uintptr_t)bufarg -
12707c478bd9Sstevel@tonic-gate (uintptr_t)sp->slab_base) % cp->cache_chunksize;
12717c478bd9Sstevel@tonic-gate if (buf != bufarg)
12727c478bd9Sstevel@tonic-gate error = KMERR_BADBASE;
12737c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_BUFTAG)
12747c478bd9Sstevel@tonic-gate btp = KMEM_BUFTAG(cp, buf);
12757c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_HASH) {
12767c478bd9Sstevel@tonic-gate mutex_enter(&cp->cache_lock);
12777c478bd9Sstevel@tonic-gate for (bcp = *KMEM_HASH(cp, buf); bcp; bcp = bcp->bc_next)
12787c478bd9Sstevel@tonic-gate if (bcp->bc_addr == buf)
12797c478bd9Sstevel@tonic-gate break;
12807c478bd9Sstevel@tonic-gate mutex_exit(&cp->cache_lock);
12817c478bd9Sstevel@tonic-gate if (bcp == NULL && btp != NULL)
12827c478bd9Sstevel@tonic-gate bcp = btp->bt_bufctl;
12837c478bd9Sstevel@tonic-gate if (kmem_findslab(cp->cache_bufctl_cache, bcp) ==
12847c478bd9Sstevel@tonic-gate NULL || P2PHASE((uintptr_t)bcp, KMEM_ALIGN) ||
12857c478bd9Sstevel@tonic-gate bcp->bc_addr != buf) {
12867c478bd9Sstevel@tonic-gate error = KMERR_BADBUFCTL;
12877c478bd9Sstevel@tonic-gate bcp = NULL;
12887c478bd9Sstevel@tonic-gate }
12897c478bd9Sstevel@tonic-gate }
12907c478bd9Sstevel@tonic-gate }
12917c478bd9Sstevel@tonic-gate
12927c478bd9Sstevel@tonic-gate kmem_panic_info.kmp_error = error;
12937c478bd9Sstevel@tonic-gate kmem_panic_info.kmp_buffer = bufarg;
12947c478bd9Sstevel@tonic-gate kmem_panic_info.kmp_realbuf = buf;
12957c478bd9Sstevel@tonic-gate kmem_panic_info.kmp_cache = cparg;
12967c478bd9Sstevel@tonic-gate kmem_panic_info.kmp_realcache = cp;
12977c478bd9Sstevel@tonic-gate kmem_panic_info.kmp_slab = sp;
12987c478bd9Sstevel@tonic-gate kmem_panic_info.kmp_bufctl = bcp;
12997c478bd9Sstevel@tonic-gate
13007c478bd9Sstevel@tonic-gate printf("kernel memory allocator: ");
13017c478bd9Sstevel@tonic-gate
13027c478bd9Sstevel@tonic-gate switch (error) {
13037c478bd9Sstevel@tonic-gate
13047c478bd9Sstevel@tonic-gate case KMERR_MODIFIED:
13057c478bd9Sstevel@tonic-gate printf("buffer modified after being freed\n");
13067c478bd9Sstevel@tonic-gate off = verify_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
13077c478bd9Sstevel@tonic-gate if (off == NULL) /* shouldn't happen */
13087c478bd9Sstevel@tonic-gate off = buf;
13097c478bd9Sstevel@tonic-gate printf("modification occurred at offset 0x%lx "
13107c478bd9Sstevel@tonic-gate "(0x%llx replaced by 0x%llx)\n",
13117c478bd9Sstevel@tonic-gate (uintptr_t)off - (uintptr_t)buf,
13127c478bd9Sstevel@tonic-gate (longlong_t)KMEM_FREE_PATTERN, (longlong_t)*off);
13137c478bd9Sstevel@tonic-gate break;
13147c478bd9Sstevel@tonic-gate
13157c478bd9Sstevel@tonic-gate case KMERR_REDZONE:
13167c478bd9Sstevel@tonic-gate printf("redzone violation: write past end of buffer\n");
13177c478bd9Sstevel@tonic-gate break;
13187c478bd9Sstevel@tonic-gate
13197c478bd9Sstevel@tonic-gate case KMERR_BADADDR:
13207c478bd9Sstevel@tonic-gate printf("invalid free: buffer not in cache\n");
13217c478bd9Sstevel@tonic-gate break;
13227c478bd9Sstevel@tonic-gate
13237c478bd9Sstevel@tonic-gate case KMERR_DUPFREE:
13247c478bd9Sstevel@tonic-gate printf("duplicate free: buffer freed twice\n");
13257c478bd9Sstevel@tonic-gate break;
13267c478bd9Sstevel@tonic-gate
13277c478bd9Sstevel@tonic-gate case KMERR_BADBUFTAG:
13287c478bd9Sstevel@tonic-gate printf("boundary tag corrupted\n");
13297c478bd9Sstevel@tonic-gate printf("bcp ^ bxstat = %lx, should be %lx\n",
13307c478bd9Sstevel@tonic-gate (intptr_t)btp->bt_bufctl ^ btp->bt_bxstat,
13317c478bd9Sstevel@tonic-gate KMEM_BUFTAG_FREE);
13327c478bd9Sstevel@tonic-gate break;
13337c478bd9Sstevel@tonic-gate
13347c478bd9Sstevel@tonic-gate case KMERR_BADBUFCTL:
13357c478bd9Sstevel@tonic-gate printf("bufctl corrupted\n");
13367c478bd9Sstevel@tonic-gate break;
13377c478bd9Sstevel@tonic-gate
13387c478bd9Sstevel@tonic-gate case KMERR_BADCACHE:
13397c478bd9Sstevel@tonic-gate printf("buffer freed to wrong cache\n");
13407c478bd9Sstevel@tonic-gate printf("buffer was allocated from %s,\n", cp->cache_name);
13417c478bd9Sstevel@tonic-gate printf("caller attempting free to %s.\n", cparg->cache_name);
13427c478bd9Sstevel@tonic-gate break;
13437c478bd9Sstevel@tonic-gate
13447c478bd9Sstevel@tonic-gate case KMERR_BADSIZE:
13457c478bd9Sstevel@tonic-gate printf("bad free: free size (%u) != alloc size (%u)\n",
13467c478bd9Sstevel@tonic-gate KMEM_SIZE_DECODE(((uint32_t *)btp)[0]),
13477c478bd9Sstevel@tonic-gate KMEM_SIZE_DECODE(((uint32_t *)btp)[1]));
13487c478bd9Sstevel@tonic-gate break;
13497c478bd9Sstevel@tonic-gate
13507c478bd9Sstevel@tonic-gate case KMERR_BADBASE:
13517c478bd9Sstevel@tonic-gate printf("bad free: free address (%p) != alloc address (%p)\n",
13527c478bd9Sstevel@tonic-gate bufarg, buf);
13537c478bd9Sstevel@tonic-gate break;
13547c478bd9Sstevel@tonic-gate }
13557c478bd9Sstevel@tonic-gate
13567c478bd9Sstevel@tonic-gate printf("buffer=%p bufctl=%p cache: %s\n",
13577c478bd9Sstevel@tonic-gate bufarg, (void *)bcp, cparg->cache_name);
13587c478bd9Sstevel@tonic-gate
13597c478bd9Sstevel@tonic-gate if (bcp != NULL && (cp->cache_flags & KMF_AUDIT) &&
13607c478bd9Sstevel@tonic-gate error != KMERR_BADBUFCTL) {
13617c478bd9Sstevel@tonic-gate int d;
13627c478bd9Sstevel@tonic-gate timestruc_t ts;
13637c478bd9Sstevel@tonic-gate kmem_bufctl_audit_t *bcap = (kmem_bufctl_audit_t *)bcp;
13647c478bd9Sstevel@tonic-gate
13657c478bd9Sstevel@tonic-gate hrt2ts(kmem_panic_info.kmp_timestamp - bcap->bc_timestamp, &ts);
13667c478bd9Sstevel@tonic-gate printf("previous transaction on buffer %p:\n", buf);
13677c478bd9Sstevel@tonic-gate printf("thread=%p time=T-%ld.%09ld slab=%p cache: %s\n",
13687c478bd9Sstevel@tonic-gate (void *)bcap->bc_thread, ts.tv_sec, ts.tv_nsec,
13697c478bd9Sstevel@tonic-gate (void *)sp, cp->cache_name);
13707c478bd9Sstevel@tonic-gate for (d = 0; d < MIN(bcap->bc_depth, KMEM_STACK_DEPTH); d++) {
13717c478bd9Sstevel@tonic-gate ulong_t off;
13727c478bd9Sstevel@tonic-gate char *sym = kobj_getsymname(bcap->bc_stack[d], &off);
13737c478bd9Sstevel@tonic-gate printf("%s+%lx\n", sym ? sym : "?", off);
13747c478bd9Sstevel@tonic-gate }
13757c478bd9Sstevel@tonic-gate }
13767c478bd9Sstevel@tonic-gate if (kmem_panic > 0)
13777c478bd9Sstevel@tonic-gate panic("kernel heap corruption detected");
13787c478bd9Sstevel@tonic-gate if (kmem_panic == 0)
13797c478bd9Sstevel@tonic-gate debug_enter(NULL);
13807c478bd9Sstevel@tonic-gate kmem_logging = 1; /* resume logging */
13817c478bd9Sstevel@tonic-gate }
13827c478bd9Sstevel@tonic-gate
13837c478bd9Sstevel@tonic-gate static kmem_log_header_t *
kmem_log_init(size_t logsize)13847c478bd9Sstevel@tonic-gate kmem_log_init(size_t logsize)
13857c478bd9Sstevel@tonic-gate {
13867c478bd9Sstevel@tonic-gate kmem_log_header_t *lhp;
13877c478bd9Sstevel@tonic-gate int nchunks = 4 * max_ncpus;
13887c478bd9Sstevel@tonic-gate size_t lhsize = (size_t)&((kmem_log_header_t *)0)->lh_cpu[max_ncpus];
13897c478bd9Sstevel@tonic-gate int i;
13907c478bd9Sstevel@tonic-gate
13917c478bd9Sstevel@tonic-gate /*
13927c478bd9Sstevel@tonic-gate * Make sure that lhp->lh_cpu[] is nicely aligned
13937c478bd9Sstevel@tonic-gate * to prevent false sharing of cache lines.
13947c478bd9Sstevel@tonic-gate */
13957c478bd9Sstevel@tonic-gate lhsize = P2ROUNDUP(lhsize, KMEM_ALIGN);
13967c478bd9Sstevel@tonic-gate lhp = vmem_xalloc(kmem_log_arena, lhsize, 64, P2NPHASE(lhsize, 64), 0,
13977c478bd9Sstevel@tonic-gate NULL, NULL, VM_SLEEP);
13987c478bd9Sstevel@tonic-gate bzero(lhp, lhsize);
13997c478bd9Sstevel@tonic-gate
14007c478bd9Sstevel@tonic-gate mutex_init(&lhp->lh_lock, NULL, MUTEX_DEFAULT, NULL);
14017c478bd9Sstevel@tonic-gate lhp->lh_nchunks = nchunks;
14027c478bd9Sstevel@tonic-gate lhp->lh_chunksize = P2ROUNDUP(logsize / nchunks + 1, PAGESIZE);
14037c478bd9Sstevel@tonic-gate lhp->lh_base = vmem_alloc(kmem_log_arena,
14047c478bd9Sstevel@tonic-gate lhp->lh_chunksize * nchunks, VM_SLEEP);
14057c478bd9Sstevel@tonic-gate lhp->lh_free = vmem_alloc(kmem_log_arena,
14067c478bd9Sstevel@tonic-gate nchunks * sizeof (int), VM_SLEEP);
14077c478bd9Sstevel@tonic-gate bzero(lhp->lh_base, lhp->lh_chunksize * nchunks);
14087c478bd9Sstevel@tonic-gate
14097c478bd9Sstevel@tonic-gate for (i = 0; i < max_ncpus; i++) {
14107c478bd9Sstevel@tonic-gate kmem_cpu_log_header_t *clhp = &lhp->lh_cpu[i];
14117c478bd9Sstevel@tonic-gate mutex_init(&clhp->clh_lock, NULL, MUTEX_DEFAULT, NULL);
14127c478bd9Sstevel@tonic-gate clhp->clh_chunk = i;
14137c478bd9Sstevel@tonic-gate }
14147c478bd9Sstevel@tonic-gate
14157c478bd9Sstevel@tonic-gate for (i = max_ncpus; i < nchunks; i++)
14167c478bd9Sstevel@tonic-gate lhp->lh_free[i] = i;
14177c478bd9Sstevel@tonic-gate
14187c478bd9Sstevel@tonic-gate lhp->lh_head = max_ncpus;
14197c478bd9Sstevel@tonic-gate lhp->lh_tail = 0;
14207c478bd9Sstevel@tonic-gate
14217c478bd9Sstevel@tonic-gate return (lhp);
14227c478bd9Sstevel@tonic-gate }
14237c478bd9Sstevel@tonic-gate
14247c478bd9Sstevel@tonic-gate static void *
kmem_log_enter(kmem_log_header_t * lhp,void * data,size_t size)14257c478bd9Sstevel@tonic-gate kmem_log_enter(kmem_log_header_t *lhp, void *data, size_t size)
14267c478bd9Sstevel@tonic-gate {
14277c478bd9Sstevel@tonic-gate void *logspace;
1428d8f51c15SJohn Levon kmem_cpu_log_header_t *clhp;
14297c478bd9Sstevel@tonic-gate
14307c478bd9Sstevel@tonic-gate if (lhp == NULL || kmem_logging == 0 || panicstr)
14317c478bd9Sstevel@tonic-gate return (NULL);
14327c478bd9Sstevel@tonic-gate
1433d8f51c15SJohn Levon clhp = &lhp->lh_cpu[CPU->cpu_seqid];
1434d8f51c15SJohn Levon
14357c478bd9Sstevel@tonic-gate mutex_enter(&clhp->clh_lock);
14367c478bd9Sstevel@tonic-gate clhp->clh_hits++;
14377c478bd9Sstevel@tonic-gate if (size > clhp->clh_avail) {
14387c478bd9Sstevel@tonic-gate mutex_enter(&lhp->lh_lock);
14397c478bd9Sstevel@tonic-gate lhp->lh_hits++;
14407c478bd9Sstevel@tonic-gate lhp->lh_free[lhp->lh_tail] = clhp->clh_chunk;
14417c478bd9Sstevel@tonic-gate lhp->lh_tail = (lhp->lh_tail + 1) % lhp->lh_nchunks;
14427c478bd9Sstevel@tonic-gate clhp->clh_chunk = lhp->lh_free[lhp->lh_head];
14437c478bd9Sstevel@tonic-gate lhp->lh_head = (lhp->lh_head + 1) % lhp->lh_nchunks;
14447c478bd9Sstevel@tonic-gate clhp->clh_current = lhp->lh_base +
14457c478bd9Sstevel@tonic-gate clhp->clh_chunk * lhp->lh_chunksize;
14467c478bd9Sstevel@tonic-gate clhp->clh_avail = lhp->lh_chunksize;
14477c478bd9Sstevel@tonic-gate if (size > lhp->lh_chunksize)
14487c478bd9Sstevel@tonic-gate size = lhp->lh_chunksize;
14497c478bd9Sstevel@tonic-gate mutex_exit(&lhp->lh_lock);
14507c478bd9Sstevel@tonic-gate }
14517c478bd9Sstevel@tonic-gate logspace = clhp->clh_current;
14527c478bd9Sstevel@tonic-gate clhp->clh_current += size;
14537c478bd9Sstevel@tonic-gate clhp->clh_avail -= size;
14547c478bd9Sstevel@tonic-gate bcopy(data, logspace, size);
14557c478bd9Sstevel@tonic-gate mutex_exit(&clhp->clh_lock);
14567c478bd9Sstevel@tonic-gate return (logspace);
14577c478bd9Sstevel@tonic-gate }
14587c478bd9Sstevel@tonic-gate
14597c478bd9Sstevel@tonic-gate #define KMEM_AUDIT(lp, cp, bcp) \
14607c478bd9Sstevel@tonic-gate { \
14617c478bd9Sstevel@tonic-gate kmem_bufctl_audit_t *_bcp = (kmem_bufctl_audit_t *)(bcp); \
14627c478bd9Sstevel@tonic-gate _bcp->bc_timestamp = gethrtime(); \
14637c478bd9Sstevel@tonic-gate _bcp->bc_thread = curthread; \
14647c478bd9Sstevel@tonic-gate _bcp->bc_depth = getpcstack(_bcp->bc_stack, KMEM_STACK_DEPTH); \
14657c478bd9Sstevel@tonic-gate _bcp->bc_lastlog = kmem_log_enter((lp), _bcp, sizeof (*_bcp)); \
14667c478bd9Sstevel@tonic-gate }
14677c478bd9Sstevel@tonic-gate
14687c478bd9Sstevel@tonic-gate static void
kmem_log_event(kmem_log_header_t * lp,kmem_cache_t * cp,kmem_slab_t * sp,void * addr)14697c478bd9Sstevel@tonic-gate kmem_log_event(kmem_log_header_t *lp, kmem_cache_t *cp,
14707c478bd9Sstevel@tonic-gate kmem_slab_t *sp, void *addr)
14717c478bd9Sstevel@tonic-gate {
14727c478bd9Sstevel@tonic-gate kmem_bufctl_audit_t bca;
14737c478bd9Sstevel@tonic-gate
14747c478bd9Sstevel@tonic-gate bzero(&bca, sizeof (kmem_bufctl_audit_t));
14757c478bd9Sstevel@tonic-gate bca.bc_addr = addr;
14767c478bd9Sstevel@tonic-gate bca.bc_slab = sp;
14777c478bd9Sstevel@tonic-gate bca.bc_cache = cp;
14787c478bd9Sstevel@tonic-gate KMEM_AUDIT(lp, cp, &bca);
14797c478bd9Sstevel@tonic-gate }
14807c478bd9Sstevel@tonic-gate
14817c478bd9Sstevel@tonic-gate /*
14827c478bd9Sstevel@tonic-gate * Create a new slab for cache cp.
14837c478bd9Sstevel@tonic-gate */
14847c478bd9Sstevel@tonic-gate static kmem_slab_t *
kmem_slab_create(kmem_cache_t * cp,int kmflag)14857c478bd9Sstevel@tonic-gate kmem_slab_create(kmem_cache_t *cp, int kmflag)
14867c478bd9Sstevel@tonic-gate {
14877c478bd9Sstevel@tonic-gate size_t slabsize = cp->cache_slabsize;
14887c478bd9Sstevel@tonic-gate size_t chunksize = cp->cache_chunksize;
14897c478bd9Sstevel@tonic-gate int cache_flags = cp->cache_flags;
14907c478bd9Sstevel@tonic-gate size_t color, chunks;
14917c478bd9Sstevel@tonic-gate char *buf, *slab;
14927c478bd9Sstevel@tonic-gate kmem_slab_t *sp;
14937c478bd9Sstevel@tonic-gate kmem_bufctl_t *bcp;
14947c478bd9Sstevel@tonic-gate vmem_t *vmp = cp->cache_arena;
14957c478bd9Sstevel@tonic-gate
1496b5fca8f8Stomee ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
1497b5fca8f8Stomee
14987c478bd9Sstevel@tonic-gate color = cp->cache_color + cp->cache_align;
14997c478bd9Sstevel@tonic-gate if (color > cp->cache_maxcolor)
15007c478bd9Sstevel@tonic-gate color = cp->cache_mincolor;
15017c478bd9Sstevel@tonic-gate cp->cache_color = color;
15027c478bd9Sstevel@tonic-gate
15037c478bd9Sstevel@tonic-gate slab = vmem_alloc(vmp, slabsize, kmflag & KM_VMFLAGS);
15047c478bd9Sstevel@tonic-gate
15057c478bd9Sstevel@tonic-gate if (slab == NULL)
15067c478bd9Sstevel@tonic-gate goto vmem_alloc_failure;
15077c478bd9Sstevel@tonic-gate
15087c478bd9Sstevel@tonic-gate ASSERT(P2PHASE((uintptr_t)slab, vmp->vm_quantum) == 0);
15097c478bd9Sstevel@tonic-gate
1510b5fca8f8Stomee /*
1511b5fca8f8Stomee * Reverify what was already checked in kmem_cache_set_move(), since the
1512b5fca8f8Stomee * consolidator depends (for correctness) on slabs being initialized
1513b5fca8f8Stomee * with the 0xbaddcafe memory pattern (setting a low order bit usable by
1514b5fca8f8Stomee * clients to distinguish uninitialized memory from known objects).
1515b5fca8f8Stomee */
1516b5fca8f8Stomee ASSERT((cp->cache_move == NULL) || !(cp->cache_cflags & KMC_NOTOUCH));
15177c478bd9Sstevel@tonic-gate if (!(cp->cache_cflags & KMC_NOTOUCH))
15187c478bd9Sstevel@tonic-gate copy_pattern(KMEM_UNINITIALIZED_PATTERN, slab, slabsize);
15197c478bd9Sstevel@tonic-gate
15207c478bd9Sstevel@tonic-gate if (cache_flags & KMF_HASH) {
15217c478bd9Sstevel@tonic-gate if ((sp = kmem_cache_alloc(kmem_slab_cache, kmflag)) == NULL)
15227c478bd9Sstevel@tonic-gate goto slab_alloc_failure;
15237c478bd9Sstevel@tonic-gate chunks = (slabsize - color) / chunksize;
15247c478bd9Sstevel@tonic-gate } else {
15257c478bd9Sstevel@tonic-gate sp = KMEM_SLAB(cp, slab);
15267c478bd9Sstevel@tonic-gate chunks = (slabsize - sizeof (kmem_slab_t) - color) / chunksize;
15277c478bd9Sstevel@tonic-gate }
15287c478bd9Sstevel@tonic-gate
15297c478bd9Sstevel@tonic-gate sp->slab_cache = cp;
15307c478bd9Sstevel@tonic-gate sp->slab_head = NULL;
15317c478bd9Sstevel@tonic-gate sp->slab_refcnt = 0;
15327c478bd9Sstevel@tonic-gate sp->slab_base = buf = slab + color;
15337c478bd9Sstevel@tonic-gate sp->slab_chunks = chunks;
1534b5fca8f8Stomee sp->slab_stuck_offset = (uint32_t)-1;
1535b5fca8f8Stomee sp->slab_later_count = 0;
1536b5fca8f8Stomee sp->slab_flags = 0;
15377c478bd9Sstevel@tonic-gate
15387c478bd9Sstevel@tonic-gate ASSERT(chunks > 0);
15397c478bd9Sstevel@tonic-gate while (chunks-- != 0) {
15407c478bd9Sstevel@tonic-gate if (cache_flags & KMF_HASH) {
15417c478bd9Sstevel@tonic-gate bcp = kmem_cache_alloc(cp->cache_bufctl_cache, kmflag);
15427c478bd9Sstevel@tonic-gate if (bcp == NULL)
15437c478bd9Sstevel@tonic-gate goto bufctl_alloc_failure;
15447c478bd9Sstevel@tonic-gate if (cache_flags & KMF_AUDIT) {
15457c478bd9Sstevel@tonic-gate kmem_bufctl_audit_t *bcap =
15467c478bd9Sstevel@tonic-gate (kmem_bufctl_audit_t *)bcp;
15477c478bd9Sstevel@tonic-gate bzero(bcap, sizeof (kmem_bufctl_audit_t));
15487c478bd9Sstevel@tonic-gate bcap->bc_cache = cp;
15497c478bd9Sstevel@tonic-gate }
15507c478bd9Sstevel@tonic-gate bcp->bc_addr = buf;
15517c478bd9Sstevel@tonic-gate bcp->bc_slab = sp;
15527c478bd9Sstevel@tonic-gate } else {
15537c478bd9Sstevel@tonic-gate bcp = KMEM_BUFCTL(cp, buf);
15547c478bd9Sstevel@tonic-gate }
15557c478bd9Sstevel@tonic-gate if (cache_flags & KMF_BUFTAG) {
15567c478bd9Sstevel@tonic-gate kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
15577c478bd9Sstevel@tonic-gate btp->bt_redzone = KMEM_REDZONE_PATTERN;
15587c478bd9Sstevel@tonic-gate btp->bt_bufctl = bcp;
15597c478bd9Sstevel@tonic-gate btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
15607c478bd9Sstevel@tonic-gate if (cache_flags & KMF_DEADBEEF) {
15617c478bd9Sstevel@tonic-gate copy_pattern(KMEM_FREE_PATTERN, buf,
15627c478bd9Sstevel@tonic-gate cp->cache_verify);
15637c478bd9Sstevel@tonic-gate }
15647c478bd9Sstevel@tonic-gate }
15657c478bd9Sstevel@tonic-gate bcp->bc_next = sp->slab_head;
15667c478bd9Sstevel@tonic-gate sp->slab_head = bcp;
15677c478bd9Sstevel@tonic-gate buf += chunksize;
15687c478bd9Sstevel@tonic-gate }
15697c478bd9Sstevel@tonic-gate
15707c478bd9Sstevel@tonic-gate kmem_log_event(kmem_slab_log, cp, sp, slab);
15717c478bd9Sstevel@tonic-gate
15727c478bd9Sstevel@tonic-gate return (sp);
15737c478bd9Sstevel@tonic-gate
15747c478bd9Sstevel@tonic-gate bufctl_alloc_failure:
15757c478bd9Sstevel@tonic-gate
15767c478bd9Sstevel@tonic-gate while ((bcp = sp->slab_head) != NULL) {
15777c478bd9Sstevel@tonic-gate sp->slab_head = bcp->bc_next;
15787c478bd9Sstevel@tonic-gate kmem_cache_free(cp->cache_bufctl_cache, bcp);
15797c478bd9Sstevel@tonic-gate }
15807c478bd9Sstevel@tonic-gate kmem_cache_free(kmem_slab_cache, sp);
15817c478bd9Sstevel@tonic-gate
15827c478bd9Sstevel@tonic-gate slab_alloc_failure:
15837c478bd9Sstevel@tonic-gate
15847c478bd9Sstevel@tonic-gate vmem_free(vmp, slab, slabsize);
15857c478bd9Sstevel@tonic-gate
15867c478bd9Sstevel@tonic-gate vmem_alloc_failure:
15877c478bd9Sstevel@tonic-gate
15887c478bd9Sstevel@tonic-gate kmem_log_event(kmem_failure_log, cp, NULL, NULL);
15891a5e258fSJosef 'Jeff' Sipek atomic_inc_64(&cp->cache_alloc_fail);
15907c478bd9Sstevel@tonic-gate
15917c478bd9Sstevel@tonic-gate return (NULL);
15927c478bd9Sstevel@tonic-gate }
15937c478bd9Sstevel@tonic-gate
15947c478bd9Sstevel@tonic-gate /*
15957c478bd9Sstevel@tonic-gate * Destroy a slab.
15967c478bd9Sstevel@tonic-gate */
15977c478bd9Sstevel@tonic-gate static void
kmem_slab_destroy(kmem_cache_t * cp,kmem_slab_t * sp)15987c478bd9Sstevel@tonic-gate kmem_slab_destroy(kmem_cache_t *cp, kmem_slab_t *sp)
15997c478bd9Sstevel@tonic-gate {
16007c478bd9Sstevel@tonic-gate vmem_t *vmp = cp->cache_arena;
16017c478bd9Sstevel@tonic-gate void *slab = (void *)P2ALIGN((uintptr_t)sp->slab_base, vmp->vm_quantum);
16027c478bd9Sstevel@tonic-gate
1603b5fca8f8Stomee ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
1604b5fca8f8Stomee ASSERT(sp->slab_refcnt == 0);
1605b5fca8f8Stomee
16067c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_HASH) {
16077c478bd9Sstevel@tonic-gate kmem_bufctl_t *bcp;
16087c478bd9Sstevel@tonic-gate while ((bcp = sp->slab_head) != NULL) {
16097c478bd9Sstevel@tonic-gate sp->slab_head = bcp->bc_next;
16107c478bd9Sstevel@tonic-gate kmem_cache_free(cp->cache_bufctl_cache, bcp);
16117c478bd9Sstevel@tonic-gate }
16127c478bd9Sstevel@tonic-gate kmem_cache_free(kmem_slab_cache, sp);
16137c478bd9Sstevel@tonic-gate }
16147c478bd9Sstevel@tonic-gate vmem_free(vmp, slab, cp->cache_slabsize);
16157c478bd9Sstevel@tonic-gate }
16167c478bd9Sstevel@tonic-gate
16177c478bd9Sstevel@tonic-gate static void *
kmem_slab_alloc_impl(kmem_cache_t * cp,kmem_slab_t * sp,boolean_t prefill)1618b942e89bSDavid Valin kmem_slab_alloc_impl(kmem_cache_t *cp, kmem_slab_t *sp, boolean_t prefill)
16197c478bd9Sstevel@tonic-gate {
16207c478bd9Sstevel@tonic-gate kmem_bufctl_t *bcp, **hash_bucket;
16217c478bd9Sstevel@tonic-gate void *buf;
1622b942e89bSDavid Valin boolean_t new_slab = (sp->slab_refcnt == 0);
16237c478bd9Sstevel@tonic-gate
1624b5fca8f8Stomee ASSERT(MUTEX_HELD(&cp->cache_lock));
16257c478bd9Sstevel@tonic-gate /*
1626b5fca8f8Stomee * kmem_slab_alloc() drops cache_lock when it creates a new slab, so we
1627b5fca8f8Stomee * can't ASSERT(avl_is_empty(&cp->cache_partial_slabs)) here when the
1628b942e89bSDavid Valin * slab is newly created.
16297c478bd9Sstevel@tonic-gate */
1630b942e89bSDavid Valin ASSERT(new_slab || (KMEM_SLAB_IS_PARTIAL(sp) &&
1631b5fca8f8Stomee (sp == avl_first(&cp->cache_partial_slabs))));
1632b5fca8f8Stomee ASSERT(sp->slab_cache == cp);
16337c478bd9Sstevel@tonic-gate
1634b5fca8f8Stomee cp->cache_slab_alloc++;
16359f1b636aStomee cp->cache_bufslab--;
16367c478bd9Sstevel@tonic-gate sp->slab_refcnt++;
16377c478bd9Sstevel@tonic-gate
16387c478bd9Sstevel@tonic-gate bcp = sp->slab_head;
1639b942e89bSDavid Valin sp->slab_head = bcp->bc_next;
16407c478bd9Sstevel@tonic-gate
16417c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_HASH) {
16427c478bd9Sstevel@tonic-gate /*
16437c478bd9Sstevel@tonic-gate * Add buffer to allocated-address hash table.
16447c478bd9Sstevel@tonic-gate */
16457c478bd9Sstevel@tonic-gate buf = bcp->bc_addr;
16467c478bd9Sstevel@tonic-gate hash_bucket = KMEM_HASH(cp, buf);
16477c478bd9Sstevel@tonic-gate bcp->bc_next = *hash_bucket;
16487c478bd9Sstevel@tonic-gate *hash_bucket = bcp;
16497c478bd9Sstevel@tonic-gate if ((cp->cache_flags & (KMF_AUDIT | KMF_BUFTAG)) == KMF_AUDIT) {
16507c478bd9Sstevel@tonic-gate KMEM_AUDIT(kmem_transaction_log, cp, bcp);
16517c478bd9Sstevel@tonic-gate }
16527c478bd9Sstevel@tonic-gate } else {
16537c478bd9Sstevel@tonic-gate buf = KMEM_BUF(cp, bcp);
16547c478bd9Sstevel@tonic-gate }
16557c478bd9Sstevel@tonic-gate
16567c478bd9Sstevel@tonic-gate ASSERT(KMEM_SLAB_MEMBER(sp, buf));
1657b942e89bSDavid Valin
1658b942e89bSDavid Valin if (sp->slab_head == NULL) {
1659b942e89bSDavid Valin ASSERT(KMEM_SLAB_IS_ALL_USED(sp));
1660b942e89bSDavid Valin if (new_slab) {
1661b942e89bSDavid Valin ASSERT(sp->slab_chunks == 1);
1662b942e89bSDavid Valin } else {
1663b942e89bSDavid Valin ASSERT(sp->slab_chunks > 1); /* the slab was partial */
1664b942e89bSDavid Valin avl_remove(&cp->cache_partial_slabs, sp);
1665b942e89bSDavid Valin sp->slab_later_count = 0; /* clear history */
1666b942e89bSDavid Valin sp->slab_flags &= ~KMEM_SLAB_NOMOVE;
1667b942e89bSDavid Valin sp->slab_stuck_offset = (uint32_t)-1;
1668b942e89bSDavid Valin }
1669b942e89bSDavid Valin list_insert_head(&cp->cache_complete_slabs, sp);
1670b942e89bSDavid Valin cp->cache_complete_slab_count++;
1671b942e89bSDavid Valin return (buf);
1672b942e89bSDavid Valin }
1673b942e89bSDavid Valin
1674b942e89bSDavid Valin ASSERT(KMEM_SLAB_IS_PARTIAL(sp));
1675b942e89bSDavid Valin /*
1676b942e89bSDavid Valin * Peek to see if the magazine layer is enabled before
1677b942e89bSDavid Valin * we prefill. We're not holding the cpu cache lock,
1678b942e89bSDavid Valin * so the peek could be wrong, but there's no harm in it.
1679b942e89bSDavid Valin */
1680b942e89bSDavid Valin if (new_slab && prefill && (cp->cache_flags & KMF_PREFILL) &&
1681b942e89bSDavid Valin (KMEM_CPU_CACHE(cp)->cc_magsize != 0)) {
1682b942e89bSDavid Valin kmem_slab_prefill(cp, sp);
1683b942e89bSDavid Valin return (buf);
1684b942e89bSDavid Valin }
1685b942e89bSDavid Valin
1686b942e89bSDavid Valin if (new_slab) {
1687b942e89bSDavid Valin avl_add(&cp->cache_partial_slabs, sp);
1688b942e89bSDavid Valin return (buf);
1689b942e89bSDavid Valin }
1690b942e89bSDavid Valin
1691b942e89bSDavid Valin /*
1692b942e89bSDavid Valin * The slab is now more allocated than it was, so the
1693b942e89bSDavid Valin * order remains unchanged.
1694b942e89bSDavid Valin */
1695b942e89bSDavid Valin ASSERT(!avl_update(&cp->cache_partial_slabs, sp));
1696b5fca8f8Stomee return (buf);
1697b5fca8f8Stomee }
16987c478bd9Sstevel@tonic-gate
1699b5fca8f8Stomee /*
1700b5fca8f8Stomee * Allocate a raw (unconstructed) buffer from cp's slab layer.
1701b5fca8f8Stomee */
1702b5fca8f8Stomee static void *
kmem_slab_alloc(kmem_cache_t * cp,int kmflag)1703b5fca8f8Stomee kmem_slab_alloc(kmem_cache_t *cp, int kmflag)
1704b5fca8f8Stomee {
1705b5fca8f8Stomee kmem_slab_t *sp;
1706b5fca8f8Stomee void *buf;
17074d4c4c43STom Erickson boolean_t test_destructor;
1708b5fca8f8Stomee
1709b5fca8f8Stomee mutex_enter(&cp->cache_lock);
17104d4c4c43STom Erickson test_destructor = (cp->cache_slab_alloc == 0);
1711b5fca8f8Stomee sp = avl_first(&cp->cache_partial_slabs);
1712b5fca8f8Stomee if (sp == NULL) {
1713b5fca8f8Stomee ASSERT(cp->cache_bufslab == 0);
1714b5fca8f8Stomee
1715b5fca8f8Stomee /*
1716b5fca8f8Stomee * The freelist is empty. Create a new slab.
1717b5fca8f8Stomee */
1718b5fca8f8Stomee mutex_exit(&cp->cache_lock);
1719b5fca8f8Stomee if ((sp = kmem_slab_create(cp, kmflag)) == NULL) {
1720b5fca8f8Stomee return (NULL);
1721b5fca8f8Stomee }
1722b5fca8f8Stomee mutex_enter(&cp->cache_lock);
1723b5fca8f8Stomee cp->cache_slab_create++;
1724b5fca8f8Stomee if ((cp->cache_buftotal += sp->slab_chunks) > cp->cache_bufmax)
1725b5fca8f8Stomee cp->cache_bufmax = cp->cache_buftotal;
1726b5fca8f8Stomee cp->cache_bufslab += sp->slab_chunks;
1727b5fca8f8Stomee }
1728b5fca8f8Stomee
1729b942e89bSDavid Valin buf = kmem_slab_alloc_impl(cp, sp, B_TRUE);
1730b5fca8f8Stomee ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) ==
1731b5fca8f8Stomee (cp->cache_complete_slab_count +
1732b5fca8f8Stomee avl_numnodes(&cp->cache_partial_slabs) +
1733b5fca8f8Stomee (cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount)));
17347c478bd9Sstevel@tonic-gate mutex_exit(&cp->cache_lock);
17357c478bd9Sstevel@tonic-gate
17364d4c4c43STom Erickson if (test_destructor && cp->cache_destructor != NULL) {
17374d4c4c43STom Erickson /*
17384d4c4c43STom Erickson * On the first kmem_slab_alloc(), assert that it is valid to
17394d4c4c43STom Erickson * call the destructor on a newly constructed object without any
17404d4c4c43STom Erickson * client involvement.
17414d4c4c43STom Erickson */
17424d4c4c43STom Erickson if ((cp->cache_constructor == NULL) ||
17434d4c4c43STom Erickson cp->cache_constructor(buf, cp->cache_private,
17444d4c4c43STom Erickson kmflag) == 0) {
17454d4c4c43STom Erickson cp->cache_destructor(buf, cp->cache_private);
17464d4c4c43STom Erickson }
17474d4c4c43STom Erickson copy_pattern(KMEM_UNINITIALIZED_PATTERN, buf,
17484d4c4c43STom Erickson cp->cache_bufsize);
17494d4c4c43STom Erickson if (cp->cache_flags & KMF_DEADBEEF) {
17504d4c4c43STom Erickson copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
17514d4c4c43STom Erickson }
17524d4c4c43STom Erickson }
17534d4c4c43STom Erickson
17547c478bd9Sstevel@tonic-gate return (buf);
17557c478bd9Sstevel@tonic-gate }
17567c478bd9Sstevel@tonic-gate
1757b5fca8f8Stomee static void kmem_slab_move_yes(kmem_cache_t *, kmem_slab_t *, void *);
1758b5fca8f8Stomee
17597c478bd9Sstevel@tonic-gate /*
17607c478bd9Sstevel@tonic-gate * Free a raw (unconstructed) buffer to cp's slab layer.
17617c478bd9Sstevel@tonic-gate */
17627c478bd9Sstevel@tonic-gate static void
kmem_slab_free(kmem_cache_t * cp,void * buf)17637c478bd9Sstevel@tonic-gate kmem_slab_free(kmem_cache_t *cp, void *buf)
17647c478bd9Sstevel@tonic-gate {
17657c478bd9Sstevel@tonic-gate kmem_slab_t *sp;
17667c478bd9Sstevel@tonic-gate kmem_bufctl_t *bcp, **prev_bcpp;
17677c478bd9Sstevel@tonic-gate
17687c478bd9Sstevel@tonic-gate ASSERT(buf != NULL);
17697c478bd9Sstevel@tonic-gate
17707c478bd9Sstevel@tonic-gate mutex_enter(&cp->cache_lock);
17717c478bd9Sstevel@tonic-gate cp->cache_slab_free++;
17727c478bd9Sstevel@tonic-gate
17737c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_HASH) {
17747c478bd9Sstevel@tonic-gate /*
17757c478bd9Sstevel@tonic-gate * Look up buffer in allocated-address hash table.
17767c478bd9Sstevel@tonic-gate */
17777c478bd9Sstevel@tonic-gate prev_bcpp = KMEM_HASH(cp, buf);
17787c478bd9Sstevel@tonic-gate while ((bcp = *prev_bcpp) != NULL) {
17797c478bd9Sstevel@tonic-gate if (bcp->bc_addr == buf) {
17807c478bd9Sstevel@tonic-gate *prev_bcpp = bcp->bc_next;
17817c478bd9Sstevel@tonic-gate sp = bcp->bc_slab;
17827c478bd9Sstevel@tonic-gate break;
17837c478bd9Sstevel@tonic-gate }
17847c478bd9Sstevel@tonic-gate cp->cache_lookup_depth++;
17857c478bd9Sstevel@tonic-gate prev_bcpp = &bcp->bc_next;
17867c478bd9Sstevel@tonic-gate }
17877c478bd9Sstevel@tonic-gate } else {
17887c478bd9Sstevel@tonic-gate bcp = KMEM_BUFCTL(cp, buf);
17897c478bd9Sstevel@tonic-gate sp = KMEM_SLAB(cp, buf);
17907c478bd9Sstevel@tonic-gate }
17917c478bd9Sstevel@tonic-gate
17927c478bd9Sstevel@tonic-gate if (bcp == NULL || sp->slab_cache != cp || !KMEM_SLAB_MEMBER(sp, buf)) {
17937c478bd9Sstevel@tonic-gate mutex_exit(&cp->cache_lock);
17947c478bd9Sstevel@tonic-gate kmem_error(KMERR_BADADDR, cp, buf);
17957c478bd9Sstevel@tonic-gate return;
17967c478bd9Sstevel@tonic-gate }
17977c478bd9Sstevel@tonic-gate
1798b5fca8f8Stomee if (KMEM_SLAB_OFFSET(sp, buf) == sp->slab_stuck_offset) {
1799b5fca8f8Stomee /*
1800b5fca8f8Stomee * If this is the buffer that prevented the consolidator from
1801b5fca8f8Stomee * clearing the slab, we can reset the slab flags now that the
1802b5fca8f8Stomee * buffer is freed. (It makes sense to do this in
1803b5fca8f8Stomee * kmem_cache_free(), where the client gives up ownership of the
1804b5fca8f8Stomee * buffer, but on the hot path the test is too expensive.)
1805b5fca8f8Stomee */
1806b5fca8f8Stomee kmem_slab_move_yes(cp, sp, buf);
1807b5fca8f8Stomee }
1808b5fca8f8Stomee
18097c478bd9Sstevel@tonic-gate if ((cp->cache_flags & (KMF_AUDIT | KMF_BUFTAG)) == KMF_AUDIT) {
18107c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_CONTENTS)
18117c478bd9Sstevel@tonic-gate ((kmem_bufctl_audit_t *)bcp)->bc_contents =
18127c478bd9Sstevel@tonic-gate kmem_log_enter(kmem_content_log, buf,
18137c478bd9Sstevel@tonic-gate cp->cache_contents);
18147c478bd9Sstevel@tonic-gate KMEM_AUDIT(kmem_transaction_log, cp, bcp);
18157c478bd9Sstevel@tonic-gate }
18167c478bd9Sstevel@tonic-gate
18177c478bd9Sstevel@tonic-gate bcp->bc_next = sp->slab_head;
18187c478bd9Sstevel@tonic-gate sp->slab_head = bcp;
18197c478bd9Sstevel@tonic-gate
18209f1b636aStomee cp->cache_bufslab++;
18217c478bd9Sstevel@tonic-gate ASSERT(sp->slab_refcnt >= 1);
1822b5fca8f8Stomee
18237c478bd9Sstevel@tonic-gate if (--sp->slab_refcnt == 0) {
18247c478bd9Sstevel@tonic-gate /*
18257c478bd9Sstevel@tonic-gate * There are no outstanding allocations from this slab,
18267c478bd9Sstevel@tonic-gate * so we can reclaim the memory.
18277c478bd9Sstevel@tonic-gate */
1828b5fca8f8Stomee if (sp->slab_chunks == 1) {
1829b5fca8f8Stomee list_remove(&cp->cache_complete_slabs, sp);
1830b5fca8f8Stomee cp->cache_complete_slab_count--;
1831b5fca8f8Stomee } else {
1832b5fca8f8Stomee avl_remove(&cp->cache_partial_slabs, sp);
1833b5fca8f8Stomee }
1834b5fca8f8Stomee
18357c478bd9Sstevel@tonic-gate cp->cache_buftotal -= sp->slab_chunks;
18369f1b636aStomee cp->cache_bufslab -= sp->slab_chunks;
1837b5fca8f8Stomee /*
1838b5fca8f8Stomee * Defer releasing the slab to the virtual memory subsystem
1839b5fca8f8Stomee * while there is a pending move callback, since we guarantee
1840b5fca8f8Stomee * that buffers passed to the move callback have only been
1841b5fca8f8Stomee * touched by kmem or by the client itself. Since the memory
1842b5fca8f8Stomee * patterns baddcafe (uninitialized) and deadbeef (freed) both
1843b5fca8f8Stomee * set at least one of the two lowest order bits, the client can
1844b5fca8f8Stomee * test those bits in the move callback to determine whether or
1845b5fca8f8Stomee * not it knows about the buffer (assuming that the client also
1846b5fca8f8Stomee * sets one of those low order bits whenever it frees a buffer).
1847b5fca8f8Stomee */
1848b5fca8f8Stomee if (cp->cache_defrag == NULL ||
1849b5fca8f8Stomee (avl_is_empty(&cp->cache_defrag->kmd_moves_pending) &&
1850b5fca8f8Stomee !(sp->slab_flags & KMEM_SLAB_MOVE_PENDING))) {
1851b5fca8f8Stomee cp->cache_slab_destroy++;
18527c478bd9Sstevel@tonic-gate mutex_exit(&cp->cache_lock);
18537c478bd9Sstevel@tonic-gate kmem_slab_destroy(cp, sp);
1854b5fca8f8Stomee } else {
1855b5fca8f8Stomee list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
1856b5fca8f8Stomee /*
1857b5fca8f8Stomee * Slabs are inserted at both ends of the deadlist to
1858b5fca8f8Stomee * distinguish between slabs freed while move callbacks
1859b5fca8f8Stomee * are pending (list head) and a slab freed while the
1860b5fca8f8Stomee * lock is dropped in kmem_move_buffers() (list tail) so
1861b5fca8f8Stomee * that in both cases slab_destroy() is called from the
1862b5fca8f8Stomee * right context.
1863b5fca8f8Stomee */
1864b5fca8f8Stomee if (sp->slab_flags & KMEM_SLAB_MOVE_PENDING) {
1865b5fca8f8Stomee list_insert_tail(deadlist, sp);
1866b5fca8f8Stomee } else {
1867b5fca8f8Stomee list_insert_head(deadlist, sp);
1868b5fca8f8Stomee }
1869b5fca8f8Stomee cp->cache_defrag->kmd_deadcount++;
1870b5fca8f8Stomee mutex_exit(&cp->cache_lock);
1871b5fca8f8Stomee }
18727c478bd9Sstevel@tonic-gate return;
18737c478bd9Sstevel@tonic-gate }
1874b5fca8f8Stomee
1875b5fca8f8Stomee if (bcp->bc_next == NULL) {
1876b5fca8f8Stomee /* Transition the slab from completely allocated to partial. */
1877b5fca8f8Stomee ASSERT(sp->slab_refcnt == (sp->slab_chunks - 1));
1878b5fca8f8Stomee ASSERT(sp->slab_chunks > 1);
1879b5fca8f8Stomee list_remove(&cp->cache_complete_slabs, sp);
1880b5fca8f8Stomee cp->cache_complete_slab_count--;
1881b5fca8f8Stomee avl_add(&cp->cache_partial_slabs, sp);
1882b5fca8f8Stomee } else {
1883b5fca8f8Stomee (void) avl_update_gt(&cp->cache_partial_slabs, sp);
1884b5fca8f8Stomee }
1885b5fca8f8Stomee
1886b5fca8f8Stomee ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) ==
1887b5fca8f8Stomee (cp->cache_complete_slab_count +
1888b5fca8f8Stomee avl_numnodes(&cp->cache_partial_slabs) +
1889b5fca8f8Stomee (cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount)));
18907c478bd9Sstevel@tonic-gate mutex_exit(&cp->cache_lock);
18917c478bd9Sstevel@tonic-gate }
18927c478bd9Sstevel@tonic-gate
1893b5fca8f8Stomee /*
1894b5fca8f8Stomee * Return -1 if kmem_error, 1 if constructor fails, 0 if successful.
1895b5fca8f8Stomee */
18967c478bd9Sstevel@tonic-gate static int
kmem_cache_alloc_debug(kmem_cache_t * cp,void * buf,int kmflag,int construct,caddr_t caller)18977c478bd9Sstevel@tonic-gate kmem_cache_alloc_debug(kmem_cache_t *cp, void *buf, int kmflag, int construct,
18987c478bd9Sstevel@tonic-gate caddr_t caller)
18997c478bd9Sstevel@tonic-gate {
19007c478bd9Sstevel@tonic-gate kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
19017c478bd9Sstevel@tonic-gate kmem_bufctl_audit_t *bcp = (kmem_bufctl_audit_t *)btp->bt_bufctl;
19027c478bd9Sstevel@tonic-gate uint32_t mtbf;
19037c478bd9Sstevel@tonic-gate
19047c478bd9Sstevel@tonic-gate if (btp->bt_bxstat != ((intptr_t)bcp ^ KMEM_BUFTAG_FREE)) {
19057c478bd9Sstevel@tonic-gate kmem_error(KMERR_BADBUFTAG, cp, buf);
19067c478bd9Sstevel@tonic-gate return (-1);
19077c478bd9Sstevel@tonic-gate }
19087c478bd9Sstevel@tonic-gate
19097c478bd9Sstevel@tonic-gate btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_ALLOC;
19107c478bd9Sstevel@tonic-gate
19117c478bd9Sstevel@tonic-gate if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) {
19127c478bd9Sstevel@tonic-gate kmem_error(KMERR_BADBUFCTL, cp, buf);
19137c478bd9Sstevel@tonic-gate return (-1);
19147c478bd9Sstevel@tonic-gate }
19157c478bd9Sstevel@tonic-gate
19167c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_DEADBEEF) {
19177c478bd9Sstevel@tonic-gate if (!construct && (cp->cache_flags & KMF_LITE)) {
19187c478bd9Sstevel@tonic-gate if (*(uint64_t *)buf != KMEM_FREE_PATTERN) {
19197c478bd9Sstevel@tonic-gate kmem_error(KMERR_MODIFIED, cp, buf);
19207c478bd9Sstevel@tonic-gate return (-1);
19217c478bd9Sstevel@tonic-gate }
19227c478bd9Sstevel@tonic-gate if (cp->cache_constructor != NULL)
19237c478bd9Sstevel@tonic-gate *(uint64_t *)buf = btp->bt_redzone;
19247c478bd9Sstevel@tonic-gate else
19257c478bd9Sstevel@tonic-gate *(uint64_t *)buf = KMEM_UNINITIALIZED_PATTERN;
19267c478bd9Sstevel@tonic-gate } else {
19277c478bd9Sstevel@tonic-gate construct = 1;
19287c478bd9Sstevel@tonic-gate if (verify_and_copy_pattern(KMEM_FREE_PATTERN,
19297c478bd9Sstevel@tonic-gate KMEM_UNINITIALIZED_PATTERN, buf,
19307c478bd9Sstevel@tonic-gate cp->cache_verify)) {
19317c478bd9Sstevel@tonic-gate kmem_error(KMERR_MODIFIED, cp, buf);
19327c478bd9Sstevel@tonic-gate return (-1);
19337c478bd9Sstevel@tonic-gate }
19347c478bd9Sstevel@tonic-gate }
19357c478bd9Sstevel@tonic-gate }
19367c478bd9Sstevel@tonic-gate btp->bt_redzone = KMEM_REDZONE_PATTERN;
19377c478bd9Sstevel@tonic-gate
19387c478bd9Sstevel@tonic-gate if ((mtbf = kmem_mtbf | cp->cache_mtbf) != 0 &&
19397c478bd9Sstevel@tonic-gate gethrtime() % mtbf == 0 &&
19407c478bd9Sstevel@tonic-gate (kmflag & (KM_NOSLEEP | KM_PANIC)) == KM_NOSLEEP) {
19417c478bd9Sstevel@tonic-gate kmem_log_event(kmem_failure_log, cp, NULL, NULL);
19427c478bd9Sstevel@tonic-gate if (!construct && cp->cache_destructor != NULL)
19437c478bd9Sstevel@tonic-gate cp->cache_destructor(buf, cp->cache_private);
19447c478bd9Sstevel@tonic-gate } else {
19457c478bd9Sstevel@tonic-gate mtbf = 0;
19467c478bd9Sstevel@tonic-gate }
19477c478bd9Sstevel@tonic-gate
19487c478bd9Sstevel@tonic-gate if (mtbf || (construct && cp->cache_constructor != NULL &&
19497c478bd9Sstevel@tonic-gate cp->cache_constructor(buf, cp->cache_private, kmflag) != 0)) {
19501a5e258fSJosef 'Jeff' Sipek atomic_inc_64(&cp->cache_alloc_fail);
19517c478bd9Sstevel@tonic-gate btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
19527c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_DEADBEEF)
19537c478bd9Sstevel@tonic-gate copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
19547c478bd9Sstevel@tonic-gate kmem_slab_free(cp, buf);
1955b5fca8f8Stomee return (1);
19567c478bd9Sstevel@tonic-gate }
19577c478bd9Sstevel@tonic-gate
19587c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_AUDIT) {
19597c478bd9Sstevel@tonic-gate KMEM_AUDIT(kmem_transaction_log, cp, bcp);
19607c478bd9Sstevel@tonic-gate }
19617c478bd9Sstevel@tonic-gate
19627c478bd9Sstevel@tonic-gate if ((cp->cache_flags & KMF_LITE) &&
19637c478bd9Sstevel@tonic-gate !(cp->cache_cflags & KMC_KMEM_ALLOC)) {
19647c478bd9Sstevel@tonic-gate KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller);
19657c478bd9Sstevel@tonic-gate }
19667c478bd9Sstevel@tonic-gate
19677c478bd9Sstevel@tonic-gate return (0);
19687c478bd9Sstevel@tonic-gate }
19697c478bd9Sstevel@tonic-gate
19707c478bd9Sstevel@tonic-gate static int
kmem_cache_free_debug(kmem_cache_t * cp,void * buf,caddr_t caller)19717c478bd9Sstevel@tonic-gate kmem_cache_free_debug(kmem_cache_t *cp, void *buf, caddr_t caller)
19727c478bd9Sstevel@tonic-gate {
19737c478bd9Sstevel@tonic-gate kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
19747c478bd9Sstevel@tonic-gate kmem_bufctl_audit_t *bcp = (kmem_bufctl_audit_t *)btp->bt_bufctl;
19757c478bd9Sstevel@tonic-gate kmem_slab_t *sp;
19767c478bd9Sstevel@tonic-gate
19777c478bd9Sstevel@tonic-gate if (btp->bt_bxstat != ((intptr_t)bcp ^ KMEM_BUFTAG_ALLOC)) {
19787c478bd9Sstevel@tonic-gate if (btp->bt_bxstat == ((intptr_t)bcp ^ KMEM_BUFTAG_FREE)) {
19797c478bd9Sstevel@tonic-gate kmem_error(KMERR_DUPFREE, cp, buf);
19807c478bd9Sstevel@tonic-gate return (-1);
19817c478bd9Sstevel@tonic-gate }
19827c478bd9Sstevel@tonic-gate sp = kmem_findslab(cp, buf);
19837c478bd9Sstevel@tonic-gate if (sp == NULL || sp->slab_cache != cp)
19847c478bd9Sstevel@tonic-gate kmem_error(KMERR_BADADDR, cp, buf);
19857c478bd9Sstevel@tonic-gate else
19867c478bd9Sstevel@tonic-gate kmem_error(KMERR_REDZONE, cp, buf);
19877c478bd9Sstevel@tonic-gate return (-1);
19887c478bd9Sstevel@tonic-gate }
19897c478bd9Sstevel@tonic-gate
19907c478bd9Sstevel@tonic-gate btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
19917c478bd9Sstevel@tonic-gate
19927c478bd9Sstevel@tonic-gate if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) {
19937c478bd9Sstevel@tonic-gate kmem_error(KMERR_BADBUFCTL, cp, buf);
19947c478bd9Sstevel@tonic-gate return (-1);
19957c478bd9Sstevel@tonic-gate }
19967c478bd9Sstevel@tonic-gate
19977c478bd9Sstevel@tonic-gate if (btp->bt_redzone != KMEM_REDZONE_PATTERN) {
19987c478bd9Sstevel@tonic-gate kmem_error(KMERR_REDZONE, cp, buf);
19997c478bd9Sstevel@tonic-gate return (-1);
20007c478bd9Sstevel@tonic-gate }
20017c478bd9Sstevel@tonic-gate
20027c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_AUDIT) {
20037c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_CONTENTS)
20047c478bd9Sstevel@tonic-gate bcp->bc_contents = kmem_log_enter(kmem_content_log,
20057c478bd9Sstevel@tonic-gate buf, cp->cache_contents);
20067c478bd9Sstevel@tonic-gate KMEM_AUDIT(kmem_transaction_log, cp, bcp);
20077c478bd9Sstevel@tonic-gate }
20087c478bd9Sstevel@tonic-gate
20097c478bd9Sstevel@tonic-gate if ((cp->cache_flags & KMF_LITE) &&
20107c478bd9Sstevel@tonic-gate !(cp->cache_cflags & KMC_KMEM_ALLOC)) {
20117c478bd9Sstevel@tonic-gate KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller);
20127c478bd9Sstevel@tonic-gate }
20137c478bd9Sstevel@tonic-gate
20147c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_DEADBEEF) {
20157c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_LITE)
20167c478bd9Sstevel@tonic-gate btp->bt_redzone = *(uint64_t *)buf;
20177c478bd9Sstevel@tonic-gate else if (cp->cache_destructor != NULL)
20187c478bd9Sstevel@tonic-gate cp->cache_destructor(buf, cp->cache_private);
20197c478bd9Sstevel@tonic-gate
20207c478bd9Sstevel@tonic-gate copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
20217c478bd9Sstevel@tonic-gate }
20227c478bd9Sstevel@tonic-gate
20237c478bd9Sstevel@tonic-gate return (0);
20247c478bd9Sstevel@tonic-gate }
20257c478bd9Sstevel@tonic-gate
20267c478bd9Sstevel@tonic-gate /*
20277c478bd9Sstevel@tonic-gate * Free each object in magazine mp to cp's slab layer, and free mp itself.
20287c478bd9Sstevel@tonic-gate */
20297c478bd9Sstevel@tonic-gate static void
kmem_magazine_destroy(kmem_cache_t * cp,kmem_magazine_t * mp,int nrounds)20307c478bd9Sstevel@tonic-gate kmem_magazine_destroy(kmem_cache_t *cp, kmem_magazine_t *mp, int nrounds)
20317c478bd9Sstevel@tonic-gate {
20327c478bd9Sstevel@tonic-gate int round;
20337c478bd9Sstevel@tonic-gate
2034b5fca8f8Stomee ASSERT(!list_link_active(&cp->cache_link) ||
2035b5fca8f8Stomee taskq_member(kmem_taskq, curthread));
20367c478bd9Sstevel@tonic-gate
20377c478bd9Sstevel@tonic-gate for (round = 0; round < nrounds; round++) {
20387c478bd9Sstevel@tonic-gate void *buf = mp->mag_round[round];
20397c478bd9Sstevel@tonic-gate
20407c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_DEADBEEF) {
20417c478bd9Sstevel@tonic-gate if (verify_pattern(KMEM_FREE_PATTERN, buf,
20427c478bd9Sstevel@tonic-gate cp->cache_verify) != NULL) {
20437c478bd9Sstevel@tonic-gate kmem_error(KMERR_MODIFIED, cp, buf);
20447c478bd9Sstevel@tonic-gate continue;
20457c478bd9Sstevel@tonic-gate }
20467c478bd9Sstevel@tonic-gate if ((cp->cache_flags & KMF_LITE) &&
20477c478bd9Sstevel@tonic-gate cp->cache_destructor != NULL) {
20487c478bd9Sstevel@tonic-gate kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
20497c478bd9Sstevel@tonic-gate *(uint64_t *)buf = btp->bt_redzone;
20507c478bd9Sstevel@tonic-gate cp->cache_destructor(buf, cp->cache_private);
20517c478bd9Sstevel@tonic-gate *(uint64_t *)buf = KMEM_FREE_PATTERN;
20527c478bd9Sstevel@tonic-gate }
20537c478bd9Sstevel@tonic-gate } else if (cp->cache_destructor != NULL) {
20547c478bd9Sstevel@tonic-gate cp->cache_destructor(buf, cp->cache_private);
20557c478bd9Sstevel@tonic-gate }
20567c478bd9Sstevel@tonic-gate
20577c478bd9Sstevel@tonic-gate kmem_slab_free(cp, buf);
20587c478bd9Sstevel@tonic-gate }
20597c478bd9Sstevel@tonic-gate ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
20607c478bd9Sstevel@tonic-gate kmem_cache_free(cp->cache_magtype->mt_cache, mp);
20617c478bd9Sstevel@tonic-gate }
20627c478bd9Sstevel@tonic-gate
20637c478bd9Sstevel@tonic-gate /*
20647c478bd9Sstevel@tonic-gate * Allocate a magazine from the depot.
20657c478bd9Sstevel@tonic-gate */
20667c478bd9Sstevel@tonic-gate static kmem_magazine_t *
kmem_depot_alloc(kmem_cache_t * cp,kmem_maglist_t * mlp)20677c478bd9Sstevel@tonic-gate kmem_depot_alloc(kmem_cache_t *cp, kmem_maglist_t *mlp)
20687c478bd9Sstevel@tonic-gate {
20697c478bd9Sstevel@tonic-gate kmem_magazine_t *mp;
20707c478bd9Sstevel@tonic-gate
20717c478bd9Sstevel@tonic-gate /*
20727c478bd9Sstevel@tonic-gate * If we can't get the depot lock without contention,
20737c478bd9Sstevel@tonic-gate * update our contention count. We use the depot
20747c478bd9Sstevel@tonic-gate * contention rate to determine whether we need to
20757c478bd9Sstevel@tonic-gate * increase the magazine size for better scalability.
20767c478bd9Sstevel@tonic-gate */
20777c478bd9Sstevel@tonic-gate if (!mutex_tryenter(&cp->cache_depot_lock)) {
20787c478bd9Sstevel@tonic-gate mutex_enter(&cp->cache_depot_lock);
20797c478bd9Sstevel@tonic-gate cp->cache_depot_contention++;
20807c478bd9Sstevel@tonic-gate }
20817c478bd9Sstevel@tonic-gate
20827c478bd9Sstevel@tonic-gate if ((mp = mlp->ml_list) != NULL) {
20837c478bd9Sstevel@tonic-gate ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
20847c478bd9Sstevel@tonic-gate mlp->ml_list = mp->mag_next;
20857c478bd9Sstevel@tonic-gate if (--mlp->ml_total < mlp->ml_min)
20867c478bd9Sstevel@tonic-gate mlp->ml_min = mlp->ml_total;
20877c478bd9Sstevel@tonic-gate mlp->ml_alloc++;
20887c478bd9Sstevel@tonic-gate }
20897c478bd9Sstevel@tonic-gate
20907c478bd9Sstevel@tonic-gate mutex_exit(&cp->cache_depot_lock);
20917c478bd9Sstevel@tonic-gate
20927c478bd9Sstevel@tonic-gate return (mp);
20937c478bd9Sstevel@tonic-gate }
20947c478bd9Sstevel@tonic-gate
20957c478bd9Sstevel@tonic-gate /*
20967c478bd9Sstevel@tonic-gate * Free a magazine to the depot.
20977c478bd9Sstevel@tonic-gate */
20987c478bd9Sstevel@tonic-gate static void
kmem_depot_free(kmem_cache_t * cp,kmem_maglist_t * mlp,kmem_magazine_t * mp)20997c478bd9Sstevel@tonic-gate kmem_depot_free(kmem_cache_t *cp, kmem_maglist_t *mlp, kmem_magazine_t *mp)
21007c478bd9Sstevel@tonic-gate {
21017c478bd9Sstevel@tonic-gate mutex_enter(&cp->cache_depot_lock);
21027c478bd9Sstevel@tonic-gate ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
21037c478bd9Sstevel@tonic-gate mp->mag_next = mlp->ml_list;
21047c478bd9Sstevel@tonic-gate mlp->ml_list = mp;
21057c478bd9Sstevel@tonic-gate mlp->ml_total++;
21067c478bd9Sstevel@tonic-gate mutex_exit(&cp->cache_depot_lock);
21077c478bd9Sstevel@tonic-gate }
21087c478bd9Sstevel@tonic-gate
21097c478bd9Sstevel@tonic-gate /*
21107c478bd9Sstevel@tonic-gate * Update the working set statistics for cp's depot.
21117c478bd9Sstevel@tonic-gate */
21127c478bd9Sstevel@tonic-gate static void
kmem_depot_ws_update(kmem_cache_t * cp)21137c478bd9Sstevel@tonic-gate kmem_depot_ws_update(kmem_cache_t *cp)
21147c478bd9Sstevel@tonic-gate {
21157c478bd9Sstevel@tonic-gate mutex_enter(&cp->cache_depot_lock);
21167c478bd9Sstevel@tonic-gate cp->cache_full.ml_reaplimit = cp->cache_full.ml_min;
21177c478bd9Sstevel@tonic-gate cp->cache_full.ml_min = cp->cache_full.ml_total;
21187c478bd9Sstevel@tonic-gate cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_min;
21197c478bd9Sstevel@tonic-gate cp->cache_empty.ml_min = cp->cache_empty.ml_total;
21207c478bd9Sstevel@tonic-gate mutex_exit(&cp->cache_depot_lock);
21217c478bd9Sstevel@tonic-gate }
21227c478bd9Sstevel@tonic-gate
21237c478bd9Sstevel@tonic-gate /*
21240c833d64SJosef 'Jeff' Sipek * Set the working set statistics for cp's depot to zero. (Everything is
21250c833d64SJosef 'Jeff' Sipek * eligible for reaping.)
21260c833d64SJosef 'Jeff' Sipek */
21270c833d64SJosef 'Jeff' Sipek static void
kmem_depot_ws_zero(kmem_cache_t * cp)21280c833d64SJosef 'Jeff' Sipek kmem_depot_ws_zero(kmem_cache_t *cp)
21290c833d64SJosef 'Jeff' Sipek {
21300c833d64SJosef 'Jeff' Sipek mutex_enter(&cp->cache_depot_lock);
21310c833d64SJosef 'Jeff' Sipek cp->cache_full.ml_reaplimit = cp->cache_full.ml_total;
21320c833d64SJosef 'Jeff' Sipek cp->cache_full.ml_min = cp->cache_full.ml_total;
21330c833d64SJosef 'Jeff' Sipek cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_total;
21340c833d64SJosef 'Jeff' Sipek cp->cache_empty.ml_min = cp->cache_empty.ml_total;
21350c833d64SJosef 'Jeff' Sipek mutex_exit(&cp->cache_depot_lock);
21360c833d64SJosef 'Jeff' Sipek }
21370c833d64SJosef 'Jeff' Sipek
21380c833d64SJosef 'Jeff' Sipek /*
213947bb2664SMatthew Ahrens * The number of bytes to reap before we call kpreempt(). The default (1MB)
214047bb2664SMatthew Ahrens * causes us to preempt reaping up to hundreds of times per second. Using a
214147bb2664SMatthew Ahrens * larger value (1GB) causes this to have virtually no effect.
214247bb2664SMatthew Ahrens */
214347bb2664SMatthew Ahrens size_t kmem_reap_preempt_bytes = 1024 * 1024;
214447bb2664SMatthew Ahrens
214547bb2664SMatthew Ahrens /*
21467c478bd9Sstevel@tonic-gate * Reap all magazines that have fallen out of the depot's working set.
21477c478bd9Sstevel@tonic-gate */
21487c478bd9Sstevel@tonic-gate static void
kmem_depot_ws_reap(kmem_cache_t * cp)21497c478bd9Sstevel@tonic-gate kmem_depot_ws_reap(kmem_cache_t *cp)
21507c478bd9Sstevel@tonic-gate {
215147bb2664SMatthew Ahrens size_t bytes = 0;
21527c478bd9Sstevel@tonic-gate long reap;
21537c478bd9Sstevel@tonic-gate kmem_magazine_t *mp;
21547c478bd9Sstevel@tonic-gate
2155b5fca8f8Stomee ASSERT(!list_link_active(&cp->cache_link) ||
2156b5fca8f8Stomee taskq_member(kmem_taskq, curthread));
21577c478bd9Sstevel@tonic-gate
21587c478bd9Sstevel@tonic-gate reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
215947bb2664SMatthew Ahrens while (reap-- &&
216047bb2664SMatthew Ahrens (mp = kmem_depot_alloc(cp, &cp->cache_full)) != NULL) {
21617c478bd9Sstevel@tonic-gate kmem_magazine_destroy(cp, mp, cp->cache_magtype->mt_magsize);
216247bb2664SMatthew Ahrens bytes += cp->cache_magtype->mt_magsize * cp->cache_bufsize;
216347bb2664SMatthew Ahrens if (bytes > kmem_reap_preempt_bytes) {
216447bb2664SMatthew Ahrens kpreempt(KPREEMPT_SYNC);
216547bb2664SMatthew Ahrens bytes = 0;
216647bb2664SMatthew Ahrens }
216747bb2664SMatthew Ahrens }
21687c478bd9Sstevel@tonic-gate
21697c478bd9Sstevel@tonic-gate reap = MIN(cp->cache_empty.ml_reaplimit, cp->cache_empty.ml_min);
217047bb2664SMatthew Ahrens while (reap-- &&
217147bb2664SMatthew Ahrens (mp = kmem_depot_alloc(cp, &cp->cache_empty)) != NULL) {
21727c478bd9Sstevel@tonic-gate kmem_magazine_destroy(cp, mp, 0);
217347bb2664SMatthew Ahrens bytes += cp->cache_magtype->mt_magsize * cp->cache_bufsize;
217447bb2664SMatthew Ahrens if (bytes > kmem_reap_preempt_bytes) {
217547bb2664SMatthew Ahrens kpreempt(KPREEMPT_SYNC);
217647bb2664SMatthew Ahrens bytes = 0;
217747bb2664SMatthew Ahrens }
217847bb2664SMatthew Ahrens }
21797c478bd9Sstevel@tonic-gate }
21807c478bd9Sstevel@tonic-gate
21817c478bd9Sstevel@tonic-gate static void
kmem_cpu_reload(kmem_cpu_cache_t * ccp,kmem_magazine_t * mp,int rounds)21827c478bd9Sstevel@tonic-gate kmem_cpu_reload(kmem_cpu_cache_t *ccp, kmem_magazine_t *mp, int rounds)
21837c478bd9Sstevel@tonic-gate {
21847c478bd9Sstevel@tonic-gate ASSERT((ccp->cc_loaded == NULL && ccp->cc_rounds == -1) ||
21857c478bd9Sstevel@tonic-gate (ccp->cc_loaded && ccp->cc_rounds + rounds == ccp->cc_magsize));
21867c478bd9Sstevel@tonic-gate ASSERT(ccp->cc_magsize > 0);
21877c478bd9Sstevel@tonic-gate
21887c478bd9Sstevel@tonic-gate ccp->cc_ploaded = ccp->cc_loaded;
21897c478bd9Sstevel@tonic-gate ccp->cc_prounds = ccp->cc_rounds;
21907c478bd9Sstevel@tonic-gate ccp->cc_loaded = mp;
21917c478bd9Sstevel@tonic-gate ccp->cc_rounds = rounds;
21927c478bd9Sstevel@tonic-gate }
21937c478bd9Sstevel@tonic-gate
21947c478bd9Sstevel@tonic-gate /*
21959dd77bc8SDave Plauger * Intercept kmem alloc/free calls during crash dump in order to avoid
21969dd77bc8SDave Plauger * changing kmem state while memory is being saved to the dump device.
21979dd77bc8SDave Plauger * Otherwise, ::kmem_verify will report "corrupt buffers". Note that
21989dd77bc8SDave Plauger * there are no locks because only one CPU calls kmem during a crash
21999dd77bc8SDave Plauger * dump. To enable this feature, first create the associated vmem
22009dd77bc8SDave Plauger * arena with VMC_DUMPSAFE.
22019dd77bc8SDave Plauger */
22029dd77bc8SDave Plauger static void *kmem_dump_start; /* start of pre-reserved heap */
22039dd77bc8SDave Plauger static void *kmem_dump_end; /* end of heap area */
22049dd77bc8SDave Plauger static void *kmem_dump_curr; /* current free heap pointer */
22059dd77bc8SDave Plauger static size_t kmem_dump_size; /* size of heap area */
22069dd77bc8SDave Plauger
22079dd77bc8SDave Plauger /* append to each buf created in the pre-reserved heap */
22089dd77bc8SDave Plauger typedef struct kmem_dumpctl {
22099dd77bc8SDave Plauger void *kdc_next; /* cache dump free list linkage */
22109dd77bc8SDave Plauger } kmem_dumpctl_t;
22119dd77bc8SDave Plauger
22129dd77bc8SDave Plauger #define KMEM_DUMPCTL(cp, buf) \
22139dd77bc8SDave Plauger ((kmem_dumpctl_t *)P2ROUNDUP((uintptr_t)(buf) + (cp)->cache_bufsize, \
22149dd77bc8SDave Plauger sizeof (void *)))
22159dd77bc8SDave Plauger
22169dd77bc8SDave Plauger /* set non zero for full report */
22179dd77bc8SDave Plauger uint_t kmem_dump_verbose = 0;
22189dd77bc8SDave Plauger
22199dd77bc8SDave Plauger /* stats for overize heap */
22209dd77bc8SDave Plauger uint_t kmem_dump_oversize_allocs = 0;
22219dd77bc8SDave Plauger uint_t kmem_dump_oversize_max = 0;
22229dd77bc8SDave Plauger
22239dd77bc8SDave Plauger static void
kmem_dumppr(char ** pp,char * e,const char * format,...)22249dd77bc8SDave Plauger kmem_dumppr(char **pp, char *e, const char *format, ...)
22259dd77bc8SDave Plauger {
22269dd77bc8SDave Plauger char *p = *pp;
22279dd77bc8SDave Plauger
22289dd77bc8SDave Plauger if (p < e) {
22299dd77bc8SDave Plauger int n;
22309dd77bc8SDave Plauger va_list ap;
22319dd77bc8SDave Plauger
22329dd77bc8SDave Plauger va_start(ap, format);
22339dd77bc8SDave Plauger n = vsnprintf(p, e - p, format, ap);
22349dd77bc8SDave Plauger va_end(ap);
22359dd77bc8SDave Plauger *pp = p + n;
22369dd77bc8SDave Plauger }
22379dd77bc8SDave Plauger }
22389dd77bc8SDave Plauger
22399dd77bc8SDave Plauger /*
22409dd77bc8SDave Plauger * Called when dumpadm(1M) configures dump parameters.
22419dd77bc8SDave Plauger */
22429dd77bc8SDave Plauger void
kmem_dump_init(size_t size)22439dd77bc8SDave Plauger kmem_dump_init(size_t size)
22449dd77bc8SDave Plauger {
22453608e2e0SJohn Levon /* Our caller ensures size is always set. */
22463608e2e0SJohn Levon ASSERT3U(size, >, 0);
22473608e2e0SJohn Levon
22489dd77bc8SDave Plauger if (kmem_dump_start != NULL)
22499dd77bc8SDave Plauger kmem_free(kmem_dump_start, kmem_dump_size);
22509dd77bc8SDave Plauger
22519dd77bc8SDave Plauger kmem_dump_start = kmem_alloc(size, KM_SLEEP);
22529dd77bc8SDave Plauger kmem_dump_size = size;
22539dd77bc8SDave Plauger kmem_dump_curr = kmem_dump_start;
22549dd77bc8SDave Plauger kmem_dump_end = (void *)((char *)kmem_dump_start + size);
22559dd77bc8SDave Plauger copy_pattern(KMEM_UNINITIALIZED_PATTERN, kmem_dump_start, size);
22569dd77bc8SDave Plauger }
22579dd77bc8SDave Plauger
22589dd77bc8SDave Plauger /*
22599dd77bc8SDave Plauger * Set flag for each kmem_cache_t if is safe to use alternate dump
22609dd77bc8SDave Plauger * memory. Called just before panic crash dump starts. Set the flag
22619dd77bc8SDave Plauger * for the calling CPU.
22629dd77bc8SDave Plauger */
22639dd77bc8SDave Plauger void
kmem_dump_begin(void)22649dd77bc8SDave Plauger kmem_dump_begin(void)
22659dd77bc8SDave Plauger {
22669dd77bc8SDave Plauger kmem_cache_t *cp;
22679dd77bc8SDave Plauger
22683608e2e0SJohn Levon ASSERT(panicstr != NULL);
22693608e2e0SJohn Levon
22709dd77bc8SDave Plauger for (cp = list_head(&kmem_caches); cp != NULL;
22719dd77bc8SDave Plauger cp = list_next(&kmem_caches, cp)) {
22729dd77bc8SDave Plauger kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
22739dd77bc8SDave Plauger
22749dd77bc8SDave Plauger if (cp->cache_arena->vm_cflags & VMC_DUMPSAFE) {
22759dd77bc8SDave Plauger cp->cache_flags |= KMF_DUMPDIVERT;
22769dd77bc8SDave Plauger ccp->cc_flags |= KMF_DUMPDIVERT;
22779dd77bc8SDave Plauger ccp->cc_dump_rounds = ccp->cc_rounds;
22789dd77bc8SDave Plauger ccp->cc_dump_prounds = ccp->cc_prounds;
22799dd77bc8SDave Plauger ccp->cc_rounds = ccp->cc_prounds = -1;
22809dd77bc8SDave Plauger } else {
22819dd77bc8SDave Plauger cp->cache_flags |= KMF_DUMPUNSAFE;
22829dd77bc8SDave Plauger ccp->cc_flags |= KMF_DUMPUNSAFE;
22839dd77bc8SDave Plauger }
22849dd77bc8SDave Plauger }
22859dd77bc8SDave Plauger }
22869dd77bc8SDave Plauger
22879dd77bc8SDave Plauger /*
22889dd77bc8SDave Plauger * finished dump intercept
22899dd77bc8SDave Plauger * print any warnings on the console
22909dd77bc8SDave Plauger * return verbose information to dumpsys() in the given buffer
22919dd77bc8SDave Plauger */
22929dd77bc8SDave Plauger size_t
kmem_dump_finish(char * buf,size_t size)22939dd77bc8SDave Plauger kmem_dump_finish(char *buf, size_t size)
22949dd77bc8SDave Plauger {
22959dd77bc8SDave Plauger int percent = 0;
22969dd77bc8SDave Plauger size_t used;
22979dd77bc8SDave Plauger char *e = buf + size;
22989dd77bc8SDave Plauger char *p = buf;
22999dd77bc8SDave Plauger
23003608e2e0SJohn Levon if (kmem_dump_curr == kmem_dump_end) {
23013608e2e0SJohn Levon cmn_err(CE_WARN, "exceeded kmem_dump space of %lu "
23023608e2e0SJohn Levon "bytes: kmem state in dump may be inconsistent",
23033608e2e0SJohn Levon kmem_dump_size);
23043608e2e0SJohn Levon }
23053608e2e0SJohn Levon
23063608e2e0SJohn Levon if (kmem_dump_verbose == 0)
23079dd77bc8SDave Plauger return (0);
23089dd77bc8SDave Plauger
23099dd77bc8SDave Plauger used = (char *)kmem_dump_curr - (char *)kmem_dump_start;
23109dd77bc8SDave Plauger percent = (used * 100) / kmem_dump_size;
23119dd77bc8SDave Plauger
23129dd77bc8SDave Plauger kmem_dumppr(&p, e, "%% heap used,%d\n", percent);
23139dd77bc8SDave Plauger kmem_dumppr(&p, e, "used bytes,%ld\n", used);
23149dd77bc8SDave Plauger kmem_dumppr(&p, e, "heap size,%ld\n", kmem_dump_size);
23159dd77bc8SDave Plauger kmem_dumppr(&p, e, "Oversize allocs,%d\n",
23169dd77bc8SDave Plauger kmem_dump_oversize_allocs);
23179dd77bc8SDave Plauger kmem_dumppr(&p, e, "Oversize max size,%ld\n",
23189dd77bc8SDave Plauger kmem_dump_oversize_max);
23199dd77bc8SDave Plauger
23209dd77bc8SDave Plauger /* return buffer size used */
23219dd77bc8SDave Plauger if (p < e)
23229dd77bc8SDave Plauger bzero(p, e - p);
23239dd77bc8SDave Plauger return (p - buf);
23249dd77bc8SDave Plauger }
23259dd77bc8SDave Plauger
23269dd77bc8SDave Plauger /*
23279dd77bc8SDave Plauger * Allocate a constructed object from alternate dump memory.
23289dd77bc8SDave Plauger */
23299dd77bc8SDave Plauger void *
kmem_cache_alloc_dump(kmem_cache_t * cp,int kmflag)23309dd77bc8SDave Plauger kmem_cache_alloc_dump(kmem_cache_t *cp, int kmflag)
23319dd77bc8SDave Plauger {
23329dd77bc8SDave Plauger void *buf;
23339dd77bc8SDave Plauger void *curr;
23349dd77bc8SDave Plauger char *bufend;
23359dd77bc8SDave Plauger
23369dd77bc8SDave Plauger /* return a constructed object */
23373608e2e0SJohn Levon if ((buf = cp->cache_dump.kd_freelist) != NULL) {
23383608e2e0SJohn Levon cp->cache_dump.kd_freelist = KMEM_DUMPCTL(cp, buf)->kdc_next;
23399dd77bc8SDave Plauger return (buf);
23409dd77bc8SDave Plauger }
23419dd77bc8SDave Plauger
23429dd77bc8SDave Plauger /* create a new constructed object */
23439dd77bc8SDave Plauger curr = kmem_dump_curr;
23449dd77bc8SDave Plauger buf = (void *)P2ROUNDUP((uintptr_t)curr, cp->cache_align);
23459dd77bc8SDave Plauger bufend = (char *)KMEM_DUMPCTL(cp, buf) + sizeof (kmem_dumpctl_t);
23469dd77bc8SDave Plauger
23479dd77bc8SDave Plauger /* hat layer objects cannot cross a page boundary */
23489dd77bc8SDave Plauger if (cp->cache_align < PAGESIZE) {
23499dd77bc8SDave Plauger char *page = (char *)P2ROUNDUP((uintptr_t)buf, PAGESIZE);
23509dd77bc8SDave Plauger if (bufend > page) {
23519dd77bc8SDave Plauger bufend += page - (char *)buf;
23529dd77bc8SDave Plauger buf = (void *)page;
23539dd77bc8SDave Plauger }
23549dd77bc8SDave Plauger }
23559dd77bc8SDave Plauger
23569dd77bc8SDave Plauger /* fall back to normal alloc if reserved area is used up */
23579dd77bc8SDave Plauger if (bufend > (char *)kmem_dump_end) {
23589dd77bc8SDave Plauger kmem_dump_curr = kmem_dump_end;
23593608e2e0SJohn Levon cp->cache_dump.kd_alloc_fails++;
23609dd77bc8SDave Plauger return (NULL);
23619dd77bc8SDave Plauger }
23629dd77bc8SDave Plauger
23639dd77bc8SDave Plauger /*
23649dd77bc8SDave Plauger * Must advance curr pointer before calling a constructor that
23659dd77bc8SDave Plauger * may also allocate memory.
23669dd77bc8SDave Plauger */
23679dd77bc8SDave Plauger kmem_dump_curr = bufend;
23689dd77bc8SDave Plauger
23699dd77bc8SDave Plauger /* run constructor */
23709dd77bc8SDave Plauger if (cp->cache_constructor != NULL &&
23719dd77bc8SDave Plauger cp->cache_constructor(buf, cp->cache_private, kmflag)
23729dd77bc8SDave Plauger != 0) {
23739dd77bc8SDave Plauger #ifdef DEBUG
23749dd77bc8SDave Plauger printf("name='%s' cache=0x%p: kmem cache constructor failed\n",
23759dd77bc8SDave Plauger cp->cache_name, (void *)cp);
23769dd77bc8SDave Plauger #endif
23779dd77bc8SDave Plauger /* reset curr pointer iff no allocs were done */
23789dd77bc8SDave Plauger if (kmem_dump_curr == bufend)
23799dd77bc8SDave Plauger kmem_dump_curr = curr;
23809dd77bc8SDave Plauger
23813608e2e0SJohn Levon cp->cache_dump.kd_alloc_fails++;
23829dd77bc8SDave Plauger /* fall back to normal alloc if the constructor fails */
23839dd77bc8SDave Plauger return (NULL);
23849dd77bc8SDave Plauger }
23859dd77bc8SDave Plauger
23869dd77bc8SDave Plauger return (buf);
23879dd77bc8SDave Plauger }
23889dd77bc8SDave Plauger
23899dd77bc8SDave Plauger /*
23909dd77bc8SDave Plauger * Free a constructed object in alternate dump memory.
23919dd77bc8SDave Plauger */
23929dd77bc8SDave Plauger int
kmem_cache_free_dump(kmem_cache_t * cp,void * buf)23939dd77bc8SDave Plauger kmem_cache_free_dump(kmem_cache_t *cp, void *buf)
23949dd77bc8SDave Plauger {
23959dd77bc8SDave Plauger /* save constructed buffers for next time */
23969dd77bc8SDave Plauger if ((char *)buf >= (char *)kmem_dump_start &&
23979dd77bc8SDave Plauger (char *)buf < (char *)kmem_dump_end) {
23983608e2e0SJohn Levon KMEM_DUMPCTL(cp, buf)->kdc_next = cp->cache_dump.kd_freelist;
23993608e2e0SJohn Levon cp->cache_dump.kd_freelist = buf;
24009dd77bc8SDave Plauger return (0);
24019dd77bc8SDave Plauger }
24029dd77bc8SDave Plauger
24039dd77bc8SDave Plauger /* just drop buffers that were allocated before dump started */
24049dd77bc8SDave Plauger if (kmem_dump_curr < kmem_dump_end)
24059dd77bc8SDave Plauger return (0);
24069dd77bc8SDave Plauger
24079dd77bc8SDave Plauger /* fall back to normal free if reserved area is used up */
24089dd77bc8SDave Plauger return (1);
24099dd77bc8SDave Plauger }
24109dd77bc8SDave Plauger
24119dd77bc8SDave Plauger /*
24127c478bd9Sstevel@tonic-gate * Allocate a constructed object from cache cp.
24137c478bd9Sstevel@tonic-gate */
24147c478bd9Sstevel@tonic-gate void *
kmem_cache_alloc(kmem_cache_t * cp,int kmflag)24157c478bd9Sstevel@tonic-gate kmem_cache_alloc(kmem_cache_t *cp, int kmflag)
24167c478bd9Sstevel@tonic-gate {
24177c478bd9Sstevel@tonic-gate kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
24187c478bd9Sstevel@tonic-gate kmem_magazine_t *fmp;
24197c478bd9Sstevel@tonic-gate void *buf;
24207c478bd9Sstevel@tonic-gate
24217c478bd9Sstevel@tonic-gate mutex_enter(&ccp->cc_lock);
24227c478bd9Sstevel@tonic-gate for (;;) {
24237c478bd9Sstevel@tonic-gate /*
24247c478bd9Sstevel@tonic-gate * If there's an object available in the current CPU's
24257c478bd9Sstevel@tonic-gate * loaded magazine, just take it and return.
24267c478bd9Sstevel@tonic-gate */
24277c478bd9Sstevel@tonic-gate if (ccp->cc_rounds > 0) {
24287c478bd9Sstevel@tonic-gate buf = ccp->cc_loaded->mag_round[--ccp->cc_rounds];
24297c478bd9Sstevel@tonic-gate ccp->cc_alloc++;
24307c478bd9Sstevel@tonic-gate mutex_exit(&ccp->cc_lock);
24319dd77bc8SDave Plauger if (ccp->cc_flags & (KMF_BUFTAG | KMF_DUMPUNSAFE)) {
24329dd77bc8SDave Plauger if (ccp->cc_flags & KMF_DUMPUNSAFE) {
24339dd77bc8SDave Plauger ASSERT(!(ccp->cc_flags &
24349dd77bc8SDave Plauger KMF_DUMPDIVERT));
24353608e2e0SJohn Levon cp->cache_dump.kd_unsafe++;
24369dd77bc8SDave Plauger }
24377c478bd9Sstevel@tonic-gate if ((ccp->cc_flags & KMF_BUFTAG) &&
24387c478bd9Sstevel@tonic-gate kmem_cache_alloc_debug(cp, buf, kmflag, 0,
2439b5fca8f8Stomee caller()) != 0) {
24407c478bd9Sstevel@tonic-gate if (kmflag & KM_NOSLEEP)
24417c478bd9Sstevel@tonic-gate return (NULL);
24427c478bd9Sstevel@tonic-gate mutex_enter(&ccp->cc_lock);
24437c478bd9Sstevel@tonic-gate continue;
24447c478bd9Sstevel@tonic-gate }
24459dd77bc8SDave Plauger }
24467c478bd9Sstevel@tonic-gate return (buf);
24477c478bd9Sstevel@tonic-gate }
24487c478bd9Sstevel@tonic-gate
24497c478bd9Sstevel@tonic-gate /*
24507c478bd9Sstevel@tonic-gate * The loaded magazine is empty. If the previously loaded
24517c478bd9Sstevel@tonic-gate * magazine was full, exchange them and try again.
24527c478bd9Sstevel@tonic-gate */
24537c478bd9Sstevel@tonic-gate if (ccp->cc_prounds > 0) {
24547c478bd9Sstevel@tonic-gate kmem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds);
24557c478bd9Sstevel@tonic-gate continue;
24567c478bd9Sstevel@tonic-gate }
24577c478bd9Sstevel@tonic-gate
24587c478bd9Sstevel@tonic-gate /*
24599dd77bc8SDave Plauger * Return an alternate buffer at dump time to preserve
24609dd77bc8SDave Plauger * the heap.
24619dd77bc8SDave Plauger */
24629dd77bc8SDave Plauger if (ccp->cc_flags & (KMF_DUMPDIVERT | KMF_DUMPUNSAFE)) {
24639dd77bc8SDave Plauger if (ccp->cc_flags & KMF_DUMPUNSAFE) {
24649dd77bc8SDave Plauger ASSERT(!(ccp->cc_flags & KMF_DUMPDIVERT));
24659dd77bc8SDave Plauger /* log it so that we can warn about it */
24663608e2e0SJohn Levon cp->cache_dump.kd_unsafe++;
24679dd77bc8SDave Plauger } else {
24689dd77bc8SDave Plauger if ((buf = kmem_cache_alloc_dump(cp, kmflag)) !=
24699dd77bc8SDave Plauger NULL) {
24709dd77bc8SDave Plauger mutex_exit(&ccp->cc_lock);
24719dd77bc8SDave Plauger return (buf);
24729dd77bc8SDave Plauger }
24739dd77bc8SDave Plauger break; /* fall back to slab layer */
24749dd77bc8SDave Plauger }
24759dd77bc8SDave Plauger }
24769dd77bc8SDave Plauger
24779dd77bc8SDave Plauger /*
24787c478bd9Sstevel@tonic-gate * If the magazine layer is disabled, break out now.
24797c478bd9Sstevel@tonic-gate */
24807c478bd9Sstevel@tonic-gate if (ccp->cc_magsize == 0)
24817c478bd9Sstevel@tonic-gate break;
24827c478bd9Sstevel@tonic-gate
24837c478bd9Sstevel@tonic-gate /*
24847c478bd9Sstevel@tonic-gate * Try to get a full magazine from the depot.
24857c478bd9Sstevel@tonic-gate */
24867c478bd9Sstevel@tonic-gate fmp = kmem_depot_alloc(cp, &cp->cache_full);
24877c478bd9Sstevel@tonic-gate if (fmp != NULL) {
24887c478bd9Sstevel@tonic-gate if (ccp->cc_ploaded != NULL)
24897c478bd9Sstevel@tonic-gate kmem_depot_free(cp, &cp->cache_empty,
24907c478bd9Sstevel@tonic-gate ccp->cc_ploaded);
24917c478bd9Sstevel@tonic-gate kmem_cpu_reload(ccp, fmp, ccp->cc_magsize);
24927c478bd9Sstevel@tonic-gate continue;
24937c478bd9Sstevel@tonic-gate }
24947c478bd9Sstevel@tonic-gate
24957c478bd9Sstevel@tonic-gate /*
24967c478bd9Sstevel@tonic-gate * There are no full magazines in the depot,
24977c478bd9Sstevel@tonic-gate * so fall through to the slab layer.
24987c478bd9Sstevel@tonic-gate */
24997c478bd9Sstevel@tonic-gate break;
25007c478bd9Sstevel@tonic-gate }
25017c478bd9Sstevel@tonic-gate mutex_exit(&ccp->cc_lock);
25027c478bd9Sstevel@tonic-gate
25037c478bd9Sstevel@tonic-gate /*
25047c478bd9Sstevel@tonic-gate * We couldn't allocate a constructed object from the magazine layer,
25057c478bd9Sstevel@tonic-gate * so get a raw buffer from the slab layer and apply its constructor.
25067c478bd9Sstevel@tonic-gate */
25077c478bd9Sstevel@tonic-gate buf = kmem_slab_alloc(cp, kmflag);
25087c478bd9Sstevel@tonic-gate
25097c478bd9Sstevel@tonic-gate if (buf == NULL)
25107c478bd9Sstevel@tonic-gate return (NULL);
25117c478bd9Sstevel@tonic-gate
25127c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_BUFTAG) {
25137c478bd9Sstevel@tonic-gate /*
25147c478bd9Sstevel@tonic-gate * Make kmem_cache_alloc_debug() apply the constructor for us.
25157c478bd9Sstevel@tonic-gate */
2516b5fca8f8Stomee int rc = kmem_cache_alloc_debug(cp, buf, kmflag, 1, caller());
2517b5fca8f8Stomee if (rc != 0) {
25187c478bd9Sstevel@tonic-gate if (kmflag & KM_NOSLEEP)
25197c478bd9Sstevel@tonic-gate return (NULL);
25207c478bd9Sstevel@tonic-gate /*
25217c478bd9Sstevel@tonic-gate * kmem_cache_alloc_debug() detected corruption
2522b5fca8f8Stomee * but didn't panic (kmem_panic <= 0). We should not be
2523b5fca8f8Stomee * here because the constructor failed (indicated by a
2524b5fca8f8Stomee * return code of 1). Try again.
25257c478bd9Sstevel@tonic-gate */
2526b5fca8f8Stomee ASSERT(rc == -1);
25277c478bd9Sstevel@tonic-gate return (kmem_cache_alloc(cp, kmflag));
25287c478bd9Sstevel@tonic-gate }
25297c478bd9Sstevel@tonic-gate return (buf);
25307c478bd9Sstevel@tonic-gate }
25317c478bd9Sstevel@tonic-gate
25327c478bd9Sstevel@tonic-gate if (cp->cache_constructor != NULL &&
25337c478bd9Sstevel@tonic-gate cp->cache_constructor(buf, cp->cache_private, kmflag) != 0) {
25341a5e258fSJosef 'Jeff' Sipek atomic_inc_64(&cp->cache_alloc_fail);
25357c478bd9Sstevel@tonic-gate kmem_slab_free(cp, buf);
25367c478bd9Sstevel@tonic-gate return (NULL);
25377c478bd9Sstevel@tonic-gate }
25387c478bd9Sstevel@tonic-gate
25397c478bd9Sstevel@tonic-gate return (buf);
25407c478bd9Sstevel@tonic-gate }
25417c478bd9Sstevel@tonic-gate
25427c478bd9Sstevel@tonic-gate /*
2543b5fca8f8Stomee * The freed argument tells whether or not kmem_cache_free_debug() has already
2544b5fca8f8Stomee * been called so that we can avoid the duplicate free error. For example, a
2545b5fca8f8Stomee * buffer on a magazine has already been freed by the client but is still
2546b5fca8f8Stomee * constructed.
2547b5fca8f8Stomee */
2548b5fca8f8Stomee static void
kmem_slab_free_constructed(kmem_cache_t * cp,void * buf,boolean_t freed)2549b5fca8f8Stomee kmem_slab_free_constructed(kmem_cache_t *cp, void *buf, boolean_t freed)
2550b5fca8f8Stomee {
2551b5fca8f8Stomee if (!freed && (cp->cache_flags & KMF_BUFTAG))
2552b5fca8f8Stomee if (kmem_cache_free_debug(cp, buf, caller()) == -1)
2553b5fca8f8Stomee return;
2554b5fca8f8Stomee
2555b5fca8f8Stomee /*
2556b5fca8f8Stomee * Note that if KMF_DEADBEEF is in effect and KMF_LITE is not,
2557b5fca8f8Stomee * kmem_cache_free_debug() will have already applied the destructor.
2558b5fca8f8Stomee */
2559b5fca8f8Stomee if ((cp->cache_flags & (KMF_DEADBEEF | KMF_LITE)) != KMF_DEADBEEF &&
2560b5fca8f8Stomee cp->cache_destructor != NULL) {
2561b5fca8f8Stomee if (cp->cache_flags & KMF_DEADBEEF) { /* KMF_LITE implied */
2562b5fca8f8Stomee kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2563b5fca8f8Stomee *(uint64_t *)buf = btp->bt_redzone;
2564b5fca8f8Stomee cp->cache_destructor(buf, cp->cache_private);
2565b5fca8f8Stomee *(uint64_t *)buf = KMEM_FREE_PATTERN;
2566b5fca8f8Stomee } else {
2567b5fca8f8Stomee cp->cache_destructor(buf, cp->cache_private);
2568b5fca8f8Stomee }
2569b5fca8f8Stomee }
2570b5fca8f8Stomee
2571b5fca8f8Stomee kmem_slab_free(cp, buf);
2572b5fca8f8Stomee }
2573b5fca8f8Stomee
2574b5fca8f8Stomee /*
2575b942e89bSDavid Valin * Used when there's no room to free a buffer to the per-CPU cache.
2576b942e89bSDavid Valin * Drops and re-acquires &ccp->cc_lock, and returns non-zero if the
2577b942e89bSDavid Valin * caller should try freeing to the per-CPU cache again.
2578b942e89bSDavid Valin * Note that we don't directly install the magazine in the cpu cache,
2579b942e89bSDavid Valin * since its state may have changed wildly while the lock was dropped.
2580b942e89bSDavid Valin */
2581b942e89bSDavid Valin static int
kmem_cpucache_magazine_alloc(kmem_cpu_cache_t * ccp,kmem_cache_t * cp)2582b942e89bSDavid Valin kmem_cpucache_magazine_alloc(kmem_cpu_cache_t *ccp, kmem_cache_t *cp)
2583b942e89bSDavid Valin {
2584b942e89bSDavid Valin kmem_magazine_t *emp;
2585b942e89bSDavid Valin kmem_magtype_t *mtp;
2586b942e89bSDavid Valin
2587b942e89bSDavid Valin ASSERT(MUTEX_HELD(&ccp->cc_lock));
2588b942e89bSDavid Valin ASSERT(((uint_t)ccp->cc_rounds == ccp->cc_magsize ||
2589b942e89bSDavid Valin ((uint_t)ccp->cc_rounds == -1)) &&
2590b942e89bSDavid Valin ((uint_t)ccp->cc_prounds == ccp->cc_magsize ||
2591b942e89bSDavid Valin ((uint_t)ccp->cc_prounds == -1)));
2592b942e89bSDavid Valin
2593b942e89bSDavid Valin emp = kmem_depot_alloc(cp, &cp->cache_empty);
2594b942e89bSDavid Valin if (emp != NULL) {
2595b942e89bSDavid Valin if (ccp->cc_ploaded != NULL)
2596b942e89bSDavid Valin kmem_depot_free(cp, &cp->cache_full,
2597b942e89bSDavid Valin ccp->cc_ploaded);
2598b942e89bSDavid Valin kmem_cpu_reload(ccp, emp, 0);
2599b942e89bSDavid Valin return (1);
2600b942e89bSDavid Valin }
2601b942e89bSDavid Valin /*
2602b942e89bSDavid Valin * There are no empty magazines in the depot,
2603b942e89bSDavid Valin * so try to allocate a new one. We must drop all locks
2604b942e89bSDavid Valin * across kmem_cache_alloc() because lower layers may
2605b942e89bSDavid Valin * attempt to allocate from this cache.
2606b942e89bSDavid Valin */
2607b942e89bSDavid Valin mtp = cp->cache_magtype;
2608b942e89bSDavid Valin mutex_exit(&ccp->cc_lock);
2609b942e89bSDavid Valin emp = kmem_cache_alloc(mtp->mt_cache, KM_NOSLEEP);
2610b942e89bSDavid Valin mutex_enter(&ccp->cc_lock);
2611b942e89bSDavid Valin
2612b942e89bSDavid Valin if (emp != NULL) {
2613b942e89bSDavid Valin /*
2614b942e89bSDavid Valin * We successfully allocated an empty magazine.
2615b942e89bSDavid Valin * However, we had to drop ccp->cc_lock to do it,
2616b942e89bSDavid Valin * so the cache's magazine size may have changed.
2617b942e89bSDavid Valin * If so, free the magazine and try again.
2618b942e89bSDavid Valin */
2619b942e89bSDavid Valin if (ccp->cc_magsize != mtp->mt_magsize) {
2620b942e89bSDavid Valin mutex_exit(&ccp->cc_lock);
2621b942e89bSDavid Valin kmem_cache_free(mtp->mt_cache, emp);
2622b942e89bSDavid Valin mutex_enter(&ccp->cc_lock);
2623b942e89bSDavid Valin return (1);
2624b942e89bSDavid Valin }
2625b942e89bSDavid Valin
2626b942e89bSDavid Valin /*
2627b942e89bSDavid Valin * We got a magazine of the right size. Add it to
2628b942e89bSDavid Valin * the depot and try the whole dance again.
2629b942e89bSDavid Valin */
2630b942e89bSDavid Valin kmem_depot_free(cp, &cp->cache_empty, emp);
2631b942e89bSDavid Valin return (1);
2632b942e89bSDavid Valin }
2633b942e89bSDavid Valin
2634b942e89bSDavid Valin /*
2635b942e89bSDavid Valin * We couldn't allocate an empty magazine,
2636b942e89bSDavid Valin * so fall through to the slab layer.
2637b942e89bSDavid Valin */
2638b942e89bSDavid Valin return (0);
2639b942e89bSDavid Valin }
2640b942e89bSDavid Valin
2641b942e89bSDavid Valin /*
26427c478bd9Sstevel@tonic-gate * Free a constructed object to cache cp.
26437c478bd9Sstevel@tonic-gate */
26447c478bd9Sstevel@tonic-gate void
kmem_cache_free(kmem_cache_t * cp,void * buf)26457c478bd9Sstevel@tonic-gate kmem_cache_free(kmem_cache_t *cp, void *buf)
26467c478bd9Sstevel@tonic-gate {
26477c478bd9Sstevel@tonic-gate kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
26487c478bd9Sstevel@tonic-gate
2649b5fca8f8Stomee /*
2650b5fca8f8Stomee * The client must not free either of the buffers passed to the move
2651b5fca8f8Stomee * callback function.
2652b5fca8f8Stomee */
2653b5fca8f8Stomee ASSERT(cp->cache_defrag == NULL ||
2654b5fca8f8Stomee cp->cache_defrag->kmd_thread != curthread ||
2655b5fca8f8Stomee (buf != cp->cache_defrag->kmd_from_buf &&
2656b5fca8f8Stomee buf != cp->cache_defrag->kmd_to_buf));
2657b5fca8f8Stomee
26589dd77bc8SDave Plauger if (ccp->cc_flags & (KMF_BUFTAG | KMF_DUMPDIVERT | KMF_DUMPUNSAFE)) {
26599dd77bc8SDave Plauger if (ccp->cc_flags & KMF_DUMPUNSAFE) {
26609dd77bc8SDave Plauger ASSERT(!(ccp->cc_flags & KMF_DUMPDIVERT));
26619dd77bc8SDave Plauger /* log it so that we can warn about it */
26623608e2e0SJohn Levon cp->cache_dump.kd_unsafe++;
26639dd77bc8SDave Plauger } else if (KMEM_DUMPCC(ccp) && !kmem_cache_free_dump(cp, buf)) {
26649dd77bc8SDave Plauger return;
26659dd77bc8SDave Plauger }
26669dd77bc8SDave Plauger if (ccp->cc_flags & KMF_BUFTAG) {
26677c478bd9Sstevel@tonic-gate if (kmem_cache_free_debug(cp, buf, caller()) == -1)
26687c478bd9Sstevel@tonic-gate return;
26699dd77bc8SDave Plauger }
26709dd77bc8SDave Plauger }
26717c478bd9Sstevel@tonic-gate
26727c478bd9Sstevel@tonic-gate mutex_enter(&ccp->cc_lock);
2673b942e89bSDavid Valin /*
2674b942e89bSDavid Valin * Any changes to this logic should be reflected in kmem_slab_prefill()
2675b942e89bSDavid Valin */
26767c478bd9Sstevel@tonic-gate for (;;) {
26777c478bd9Sstevel@tonic-gate /*
26787c478bd9Sstevel@tonic-gate * If there's a slot available in the current CPU's
26797c478bd9Sstevel@tonic-gate * loaded magazine, just put the object there and return.
26807c478bd9Sstevel@tonic-gate */
26817c478bd9Sstevel@tonic-gate if ((uint_t)ccp->cc_rounds < ccp->cc_magsize) {
26827c478bd9Sstevel@tonic-gate ccp->cc_loaded->mag_round[ccp->cc_rounds++] = buf;
26837c478bd9Sstevel@tonic-gate ccp->cc_free++;
26847c478bd9Sstevel@tonic-gate mutex_exit(&ccp->cc_lock);
26857c478bd9Sstevel@tonic-gate return;
26867c478bd9Sstevel@tonic-gate }
26877c478bd9Sstevel@tonic-gate
26887c478bd9Sstevel@tonic-gate /*
26897c478bd9Sstevel@tonic-gate * The loaded magazine is full. If the previously loaded
26907c478bd9Sstevel@tonic-gate * magazine was empty, exchange them and try again.
26917c478bd9Sstevel@tonic-gate */
26927c478bd9Sstevel@tonic-gate if (ccp->cc_prounds == 0) {
26937c478bd9Sstevel@tonic-gate kmem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds);
26947c478bd9Sstevel@tonic-gate continue;
26957c478bd9Sstevel@tonic-gate }
26967c478bd9Sstevel@tonic-gate
26977c478bd9Sstevel@tonic-gate /*
26987c478bd9Sstevel@tonic-gate * If the magazine layer is disabled, break out now.
26997c478bd9Sstevel@tonic-gate */
27007c478bd9Sstevel@tonic-gate if (ccp->cc_magsize == 0)
27017c478bd9Sstevel@tonic-gate break;
27027c478bd9Sstevel@tonic-gate
2703b942e89bSDavid Valin if (!kmem_cpucache_magazine_alloc(ccp, cp)) {
27047c478bd9Sstevel@tonic-gate /*
2705b942e89bSDavid Valin * We couldn't free our constructed object to the
2706b942e89bSDavid Valin * magazine layer, so apply its destructor and free it
2707b942e89bSDavid Valin * to the slab layer.
27087c478bd9Sstevel@tonic-gate */
27097c478bd9Sstevel@tonic-gate break;
27107c478bd9Sstevel@tonic-gate }
2711b942e89bSDavid Valin }
27127c478bd9Sstevel@tonic-gate mutex_exit(&ccp->cc_lock);
2713b942e89bSDavid Valin kmem_slab_free_constructed(cp, buf, B_TRUE);
2714b942e89bSDavid Valin }
2715b942e89bSDavid Valin
2716b942e89bSDavid Valin static void
kmem_slab_prefill(kmem_cache_t * cp,kmem_slab_t * sp)2717b942e89bSDavid Valin kmem_slab_prefill(kmem_cache_t *cp, kmem_slab_t *sp)
2718b942e89bSDavid Valin {
2719b942e89bSDavid Valin kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2720b942e89bSDavid Valin int cache_flags = cp->cache_flags;
2721b942e89bSDavid Valin
2722b942e89bSDavid Valin kmem_bufctl_t *next, *head;
2723b942e89bSDavid Valin size_t nbufs;
27247c478bd9Sstevel@tonic-gate
27257c478bd9Sstevel@tonic-gate /*
2726b942e89bSDavid Valin * Completely allocate the newly created slab and put the pre-allocated
2727b942e89bSDavid Valin * buffers in magazines. Any of the buffers that cannot be put in
2728b942e89bSDavid Valin * magazines must be returned to the slab.
27297c478bd9Sstevel@tonic-gate */
2730b942e89bSDavid Valin ASSERT(MUTEX_HELD(&cp->cache_lock));
2731b942e89bSDavid Valin ASSERT((cache_flags & (KMF_PREFILL|KMF_BUFTAG)) == KMF_PREFILL);
2732b942e89bSDavid Valin ASSERT(cp->cache_constructor == NULL);
2733b942e89bSDavid Valin ASSERT(sp->slab_cache == cp);
2734b942e89bSDavid Valin ASSERT(sp->slab_refcnt == 1);
2735b942e89bSDavid Valin ASSERT(sp->slab_head != NULL && sp->slab_chunks > sp->slab_refcnt);
2736b942e89bSDavid Valin ASSERT(avl_find(&cp->cache_partial_slabs, sp, NULL) == NULL);
2737b942e89bSDavid Valin
2738b942e89bSDavid Valin head = sp->slab_head;
2739b942e89bSDavid Valin nbufs = (sp->slab_chunks - sp->slab_refcnt);
2740b942e89bSDavid Valin sp->slab_head = NULL;
2741b942e89bSDavid Valin sp->slab_refcnt += nbufs;
2742b942e89bSDavid Valin cp->cache_bufslab -= nbufs;
2743b942e89bSDavid Valin cp->cache_slab_alloc += nbufs;
2744b942e89bSDavid Valin list_insert_head(&cp->cache_complete_slabs, sp);
2745b942e89bSDavid Valin cp->cache_complete_slab_count++;
2746b942e89bSDavid Valin mutex_exit(&cp->cache_lock);
2747b942e89bSDavid Valin mutex_enter(&ccp->cc_lock);
2748b942e89bSDavid Valin
2749b942e89bSDavid Valin while (head != NULL) {
2750b942e89bSDavid Valin void *buf = KMEM_BUF(cp, head);
2751b942e89bSDavid Valin /*
2752b942e89bSDavid Valin * If there's a slot available in the current CPU's
2753b942e89bSDavid Valin * loaded magazine, just put the object there and
2754b942e89bSDavid Valin * continue.
2755b942e89bSDavid Valin */
2756b942e89bSDavid Valin if ((uint_t)ccp->cc_rounds < ccp->cc_magsize) {
2757b942e89bSDavid Valin ccp->cc_loaded->mag_round[ccp->cc_rounds++] =
2758b942e89bSDavid Valin buf;
2759b942e89bSDavid Valin ccp->cc_free++;
2760b942e89bSDavid Valin nbufs--;
2761b942e89bSDavid Valin head = head->bc_next;
2762b942e89bSDavid Valin continue;
2763b942e89bSDavid Valin }
2764b942e89bSDavid Valin
2765b942e89bSDavid Valin /*
2766b942e89bSDavid Valin * The loaded magazine is full. If the previously
2767b942e89bSDavid Valin * loaded magazine was empty, exchange them and try
2768b942e89bSDavid Valin * again.
2769b942e89bSDavid Valin */
2770b942e89bSDavid Valin if (ccp->cc_prounds == 0) {
2771b942e89bSDavid Valin kmem_cpu_reload(ccp, ccp->cc_ploaded,
2772b942e89bSDavid Valin ccp->cc_prounds);
2773b942e89bSDavid Valin continue;
2774b942e89bSDavid Valin }
2775b942e89bSDavid Valin
2776b942e89bSDavid Valin /*
2777b942e89bSDavid Valin * If the magazine layer is disabled, break out now.
2778b942e89bSDavid Valin */
2779b942e89bSDavid Valin
2780b942e89bSDavid Valin if (ccp->cc_magsize == 0) {
2781b942e89bSDavid Valin break;
2782b942e89bSDavid Valin }
2783b942e89bSDavid Valin
2784b942e89bSDavid Valin if (!kmem_cpucache_magazine_alloc(ccp, cp))
2785b942e89bSDavid Valin break;
2786b942e89bSDavid Valin }
2787b942e89bSDavid Valin mutex_exit(&ccp->cc_lock);
2788b942e89bSDavid Valin if (nbufs != 0) {
2789b942e89bSDavid Valin ASSERT(head != NULL);
2790b942e89bSDavid Valin
2791b942e89bSDavid Valin /*
2792b942e89bSDavid Valin * If there was a failure, return remaining objects to
2793b942e89bSDavid Valin * the slab
2794b942e89bSDavid Valin */
2795b942e89bSDavid Valin while (head != NULL) {
2796b942e89bSDavid Valin ASSERT(nbufs != 0);
2797b942e89bSDavid Valin next = head->bc_next;
2798b942e89bSDavid Valin head->bc_next = NULL;
2799b942e89bSDavid Valin kmem_slab_free(cp, KMEM_BUF(cp, head));
2800b942e89bSDavid Valin head = next;
2801b942e89bSDavid Valin nbufs--;
2802b942e89bSDavid Valin }
2803b942e89bSDavid Valin }
2804b942e89bSDavid Valin ASSERT(head == NULL);
2805b942e89bSDavid Valin ASSERT(nbufs == 0);
2806b942e89bSDavid Valin mutex_enter(&cp->cache_lock);
28077c478bd9Sstevel@tonic-gate }
28087c478bd9Sstevel@tonic-gate
28097c478bd9Sstevel@tonic-gate void *
kmem_zalloc(size_t size,int kmflag)28107c478bd9Sstevel@tonic-gate kmem_zalloc(size_t size, int kmflag)
28117c478bd9Sstevel@tonic-gate {
2812dce01e3fSJonathan W Adams size_t index;
28137c478bd9Sstevel@tonic-gate void *buf;
28147c478bd9Sstevel@tonic-gate
2815dce01e3fSJonathan W Adams if ((index = ((size - 1) >> KMEM_ALIGN_SHIFT)) < KMEM_ALLOC_TABLE_MAX) {
28167c478bd9Sstevel@tonic-gate kmem_cache_t *cp = kmem_alloc_table[index];
28177c478bd9Sstevel@tonic-gate buf = kmem_cache_alloc(cp, kmflag);
28187c478bd9Sstevel@tonic-gate if (buf != NULL) {
28199dd77bc8SDave Plauger if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp)) {
28207c478bd9Sstevel@tonic-gate kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
28217c478bd9Sstevel@tonic-gate ((uint8_t *)buf)[size] = KMEM_REDZONE_BYTE;
28227c478bd9Sstevel@tonic-gate ((uint32_t *)btp)[1] = KMEM_SIZE_ENCODE(size);
28237c478bd9Sstevel@tonic-gate
28247c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_LITE) {
28257c478bd9Sstevel@tonic-gate KMEM_BUFTAG_LITE_ENTER(btp,
28267c478bd9Sstevel@tonic-gate kmem_lite_count, caller());
28277c478bd9Sstevel@tonic-gate }
28287c478bd9Sstevel@tonic-gate }
28297c478bd9Sstevel@tonic-gate bzero(buf, size);
28307c478bd9Sstevel@tonic-gate }
28317c478bd9Sstevel@tonic-gate } else {
28327c478bd9Sstevel@tonic-gate buf = kmem_alloc(size, kmflag);
28337c478bd9Sstevel@tonic-gate if (buf != NULL)
28347c478bd9Sstevel@tonic-gate bzero(buf, size);
28357c478bd9Sstevel@tonic-gate }
28367c478bd9Sstevel@tonic-gate return (buf);
28377c478bd9Sstevel@tonic-gate }
28387c478bd9Sstevel@tonic-gate
28397c478bd9Sstevel@tonic-gate void *
kmem_alloc(size_t size,int kmflag)28407c478bd9Sstevel@tonic-gate kmem_alloc(size_t size, int kmflag)
28417c478bd9Sstevel@tonic-gate {
2842dce01e3fSJonathan W Adams size_t index;
2843dce01e3fSJonathan W Adams kmem_cache_t *cp;
28447c478bd9Sstevel@tonic-gate void *buf;
28457c478bd9Sstevel@tonic-gate
2846dce01e3fSJonathan W Adams if ((index = ((size - 1) >> KMEM_ALIGN_SHIFT)) < KMEM_ALLOC_TABLE_MAX) {
2847dce01e3fSJonathan W Adams cp = kmem_alloc_table[index];
2848dce01e3fSJonathan W Adams /* fall through to kmem_cache_alloc() */
2849dce01e3fSJonathan W Adams
2850dce01e3fSJonathan W Adams } else if ((index = ((size - 1) >> KMEM_BIG_SHIFT)) <
2851dce01e3fSJonathan W Adams kmem_big_alloc_table_max) {
2852dce01e3fSJonathan W Adams cp = kmem_big_alloc_table[index];
2853dce01e3fSJonathan W Adams /* fall through to kmem_cache_alloc() */
2854dce01e3fSJonathan W Adams
2855dce01e3fSJonathan W Adams } else {
2856dce01e3fSJonathan W Adams if (size == 0)
2857dce01e3fSJonathan W Adams return (NULL);
2858dce01e3fSJonathan W Adams
2859dce01e3fSJonathan W Adams buf = vmem_alloc(kmem_oversize_arena, size,
2860dce01e3fSJonathan W Adams kmflag & KM_VMFLAGS);
2861dce01e3fSJonathan W Adams if (buf == NULL)
2862dce01e3fSJonathan W Adams kmem_log_event(kmem_failure_log, NULL, NULL,
2863dce01e3fSJonathan W Adams (void *)size);
28649dd77bc8SDave Plauger else if (KMEM_DUMP(kmem_slab_cache)) {
28659dd77bc8SDave Plauger /* stats for dump intercept */
28669dd77bc8SDave Plauger kmem_dump_oversize_allocs++;
28679dd77bc8SDave Plauger if (size > kmem_dump_oversize_max)
28689dd77bc8SDave Plauger kmem_dump_oversize_max = size;
28699dd77bc8SDave Plauger }
2870dce01e3fSJonathan W Adams return (buf);
2871dce01e3fSJonathan W Adams }
2872dce01e3fSJonathan W Adams
28737c478bd9Sstevel@tonic-gate buf = kmem_cache_alloc(cp, kmflag);
28749dd77bc8SDave Plauger if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp) && buf != NULL) {
28757c478bd9Sstevel@tonic-gate kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
28767c478bd9Sstevel@tonic-gate ((uint8_t *)buf)[size] = KMEM_REDZONE_BYTE;
28777c478bd9Sstevel@tonic-gate ((uint32_t *)btp)[1] = KMEM_SIZE_ENCODE(size);
28787c478bd9Sstevel@tonic-gate
28797c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_LITE) {
2880dce01e3fSJonathan W Adams KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller());
28817c478bd9Sstevel@tonic-gate }
28827c478bd9Sstevel@tonic-gate }
28837c478bd9Sstevel@tonic-gate return (buf);
28847c478bd9Sstevel@tonic-gate }
28857c478bd9Sstevel@tonic-gate
28867c478bd9Sstevel@tonic-gate void
kmem_free(void * buf,size_t size)28877c478bd9Sstevel@tonic-gate kmem_free(void *buf, size_t size)
28887c478bd9Sstevel@tonic-gate {
2889dce01e3fSJonathan W Adams size_t index;
2890dce01e3fSJonathan W Adams kmem_cache_t *cp;
28917c478bd9Sstevel@tonic-gate
2892dce01e3fSJonathan W Adams if ((index = (size - 1) >> KMEM_ALIGN_SHIFT) < KMEM_ALLOC_TABLE_MAX) {
2893dce01e3fSJonathan W Adams cp = kmem_alloc_table[index];
2894dce01e3fSJonathan W Adams /* fall through to kmem_cache_free() */
2895dce01e3fSJonathan W Adams
2896dce01e3fSJonathan W Adams } else if ((index = ((size - 1) >> KMEM_BIG_SHIFT)) <
2897dce01e3fSJonathan W Adams kmem_big_alloc_table_max) {
2898dce01e3fSJonathan W Adams cp = kmem_big_alloc_table[index];
2899dce01e3fSJonathan W Adams /* fall through to kmem_cache_free() */
2900dce01e3fSJonathan W Adams
2901dce01e3fSJonathan W Adams } else {
290296992ee7SEthindra Ramamurthy EQUIV(buf == NULL, size == 0);
2903dce01e3fSJonathan W Adams if (buf == NULL && size == 0)
2904dce01e3fSJonathan W Adams return;
2905dce01e3fSJonathan W Adams vmem_free(kmem_oversize_arena, buf, size);
2906dce01e3fSJonathan W Adams return;
2907dce01e3fSJonathan W Adams }
2908dce01e3fSJonathan W Adams
29099dd77bc8SDave Plauger if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp)) {
29107c478bd9Sstevel@tonic-gate kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
29117c478bd9Sstevel@tonic-gate uint32_t *ip = (uint32_t *)btp;
29127c478bd9Sstevel@tonic-gate if (ip[1] != KMEM_SIZE_ENCODE(size)) {
29137c478bd9Sstevel@tonic-gate if (*(uint64_t *)buf == KMEM_FREE_PATTERN) {
29147c478bd9Sstevel@tonic-gate kmem_error(KMERR_DUPFREE, cp, buf);
29157c478bd9Sstevel@tonic-gate return;
29167c478bd9Sstevel@tonic-gate }
29177c478bd9Sstevel@tonic-gate if (KMEM_SIZE_VALID(ip[1])) {
29187c478bd9Sstevel@tonic-gate ip[0] = KMEM_SIZE_ENCODE(size);
29197c478bd9Sstevel@tonic-gate kmem_error(KMERR_BADSIZE, cp, buf);
29207c478bd9Sstevel@tonic-gate } else {
29217c478bd9Sstevel@tonic-gate kmem_error(KMERR_REDZONE, cp, buf);
29227c478bd9Sstevel@tonic-gate }
29237c478bd9Sstevel@tonic-gate return;
29247c478bd9Sstevel@tonic-gate }
29257c478bd9Sstevel@tonic-gate if (((uint8_t *)buf)[size] != KMEM_REDZONE_BYTE) {
29267c478bd9Sstevel@tonic-gate kmem_error(KMERR_REDZONE, cp, buf);
29277c478bd9Sstevel@tonic-gate return;
29287c478bd9Sstevel@tonic-gate }
29297c478bd9Sstevel@tonic-gate btp->bt_redzone = KMEM_REDZONE_PATTERN;
29307c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_LITE) {
29317c478bd9Sstevel@tonic-gate KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count,
29327c478bd9Sstevel@tonic-gate caller());
29337c478bd9Sstevel@tonic-gate }
29347c478bd9Sstevel@tonic-gate }
29357c478bd9Sstevel@tonic-gate kmem_cache_free(cp, buf);
29367c478bd9Sstevel@tonic-gate }
29377c478bd9Sstevel@tonic-gate
29387c478bd9Sstevel@tonic-gate void *
kmem_firewall_va_alloc(vmem_t * vmp,size_t size,int vmflag)29397c478bd9Sstevel@tonic-gate kmem_firewall_va_alloc(vmem_t *vmp, size_t size, int vmflag)
29407c478bd9Sstevel@tonic-gate {
29417c478bd9Sstevel@tonic-gate size_t realsize = size + vmp->vm_quantum;
29427c478bd9Sstevel@tonic-gate void *addr;
29437c478bd9Sstevel@tonic-gate
29447c478bd9Sstevel@tonic-gate /*
29457c478bd9Sstevel@tonic-gate * Annoying edge case: if 'size' is just shy of ULONG_MAX, adding
29467c478bd9Sstevel@tonic-gate * vm_quantum will cause integer wraparound. Check for this, and
29477c478bd9Sstevel@tonic-gate * blow off the firewall page in this case. Note that such a
29487c478bd9Sstevel@tonic-gate * giant allocation (the entire kernel address space) can never
29497c478bd9Sstevel@tonic-gate * be satisfied, so it will either fail immediately (VM_NOSLEEP)
29507c478bd9Sstevel@tonic-gate * or sleep forever (VM_SLEEP). Thus, there is no need for a
29517c478bd9Sstevel@tonic-gate * corresponding check in kmem_firewall_va_free().
29527c478bd9Sstevel@tonic-gate */
29537c478bd9Sstevel@tonic-gate if (realsize < size)
29547c478bd9Sstevel@tonic-gate realsize = size;
29557c478bd9Sstevel@tonic-gate
29567c478bd9Sstevel@tonic-gate /*
29577c478bd9Sstevel@tonic-gate * While boot still owns resource management, make sure that this
29587c478bd9Sstevel@tonic-gate * redzone virtual address allocation is properly accounted for in
29597c478bd9Sstevel@tonic-gate * OBPs "virtual-memory" "available" lists because we're
29607c478bd9Sstevel@tonic-gate * effectively claiming them for a red zone. If we don't do this,
29617c478bd9Sstevel@tonic-gate * the available lists become too fragmented and too large for the
29627c478bd9Sstevel@tonic-gate * current boot/kernel memory list interface.
29637c478bd9Sstevel@tonic-gate */
29647c478bd9Sstevel@tonic-gate addr = vmem_alloc(vmp, realsize, vmflag | VM_NEXTFIT);
29657c478bd9Sstevel@tonic-gate
29667c478bd9Sstevel@tonic-gate if (addr != NULL && kvseg.s_base == NULL && realsize != size)
29677c478bd9Sstevel@tonic-gate (void) boot_virt_alloc((char *)addr + size, vmp->vm_quantum);
29687c478bd9Sstevel@tonic-gate
29697c478bd9Sstevel@tonic-gate return (addr);
29707c478bd9Sstevel@tonic-gate }
29717c478bd9Sstevel@tonic-gate
29727c478bd9Sstevel@tonic-gate void
kmem_firewall_va_free(vmem_t * vmp,void * addr,size_t size)29737c478bd9Sstevel@tonic-gate kmem_firewall_va_free(vmem_t *vmp, void *addr, size_t size)
29747c478bd9Sstevel@tonic-gate {
29757c478bd9Sstevel@tonic-gate ASSERT((kvseg.s_base == NULL ?
29767c478bd9Sstevel@tonic-gate va_to_pfn((char *)addr + size) :
29777c478bd9Sstevel@tonic-gate hat_getpfnum(kas.a_hat, (caddr_t)addr + size)) == PFN_INVALID);
29787c478bd9Sstevel@tonic-gate
29797c478bd9Sstevel@tonic-gate vmem_free(vmp, addr, size + vmp->vm_quantum);
29807c478bd9Sstevel@tonic-gate }
29817c478bd9Sstevel@tonic-gate
29827c478bd9Sstevel@tonic-gate /*
29837c478bd9Sstevel@tonic-gate * Try to allocate at least `size' bytes of memory without sleeping or
29847c478bd9Sstevel@tonic-gate * panicking. Return actual allocated size in `asize'. If allocation failed,
29857c478bd9Sstevel@tonic-gate * try final allocation with sleep or panic allowed.
29867c478bd9Sstevel@tonic-gate */
29877c478bd9Sstevel@tonic-gate void *
kmem_alloc_tryhard(size_t size,size_t * asize,int kmflag)29887c478bd9Sstevel@tonic-gate kmem_alloc_tryhard(size_t size, size_t *asize, int kmflag)
29897c478bd9Sstevel@tonic-gate {
29907c478bd9Sstevel@tonic-gate void *p;
29917c478bd9Sstevel@tonic-gate
29927c478bd9Sstevel@tonic-gate *asize = P2ROUNDUP(size, KMEM_ALIGN);
29937c478bd9Sstevel@tonic-gate do {
29947c478bd9Sstevel@tonic-gate p = kmem_alloc(*asize, (kmflag | KM_NOSLEEP) & ~KM_PANIC);
29957c478bd9Sstevel@tonic-gate if (p != NULL)
29967c478bd9Sstevel@tonic-gate return (p);
29977c478bd9Sstevel@tonic-gate *asize += KMEM_ALIGN;
29987c478bd9Sstevel@tonic-gate } while (*asize <= PAGESIZE);
29997c478bd9Sstevel@tonic-gate
30007c478bd9Sstevel@tonic-gate *asize = P2ROUNDUP(size, KMEM_ALIGN);
30017c478bd9Sstevel@tonic-gate return (kmem_alloc(*asize, kmflag));
30027c478bd9Sstevel@tonic-gate }
30037c478bd9Sstevel@tonic-gate
30047c478bd9Sstevel@tonic-gate /*
30057c478bd9Sstevel@tonic-gate * Reclaim all unused memory from a cache.
30067c478bd9Sstevel@tonic-gate */
30077c478bd9Sstevel@tonic-gate static void
kmem_cache_reap(kmem_cache_t * cp)30087c478bd9Sstevel@tonic-gate kmem_cache_reap(kmem_cache_t *cp)
30097c478bd9Sstevel@tonic-gate {
3010b5fca8f8Stomee ASSERT(taskq_member(kmem_taskq, curthread));
3011686031edSTom Erickson cp->cache_reap++;
3012b5fca8f8Stomee
30137c478bd9Sstevel@tonic-gate /*
30147c478bd9Sstevel@tonic-gate * Ask the cache's owner to free some memory if possible.
30157c478bd9Sstevel@tonic-gate * The idea is to handle things like the inode cache, which
30167c478bd9Sstevel@tonic-gate * typically sits on a bunch of memory that it doesn't truly
30177c478bd9Sstevel@tonic-gate * *need*. Reclaim policy is entirely up to the owner; this
30187c478bd9Sstevel@tonic-gate * callback is just an advisory plea for help.
30197c478bd9Sstevel@tonic-gate */
3020b5fca8f8Stomee if (cp->cache_reclaim != NULL) {
3021b5fca8f8Stomee long delta;
3022b5fca8f8Stomee
3023b5fca8f8Stomee /*
3024b5fca8f8Stomee * Reclaimed memory should be reapable (not included in the
3025b5fca8f8Stomee * depot's working set).
3026b5fca8f8Stomee */
3027b5fca8f8Stomee delta = cp->cache_full.ml_total;
30287c478bd9Sstevel@tonic-gate cp->cache_reclaim(cp->cache_private);
3029b5fca8f8Stomee delta = cp->cache_full.ml_total - delta;
3030b5fca8f8Stomee if (delta > 0) {
3031b5fca8f8Stomee mutex_enter(&cp->cache_depot_lock);
3032b5fca8f8Stomee cp->cache_full.ml_reaplimit += delta;
3033b5fca8f8Stomee cp->cache_full.ml_min += delta;
3034b5fca8f8Stomee mutex_exit(&cp->cache_depot_lock);
3035b5fca8f8Stomee }
3036b5fca8f8Stomee }
30377c478bd9Sstevel@tonic-gate
30387c478bd9Sstevel@tonic-gate kmem_depot_ws_reap(cp);
3039b5fca8f8Stomee
3040b5fca8f8Stomee if (cp->cache_defrag != NULL && !kmem_move_noreap) {
3041b5fca8f8Stomee kmem_cache_defrag(cp);
3042b5fca8f8Stomee }
30437c478bd9Sstevel@tonic-gate }
30447c478bd9Sstevel@tonic-gate
30457c478bd9Sstevel@tonic-gate static void
kmem_reap_timeout(void * flag_arg)30467c478bd9Sstevel@tonic-gate kmem_reap_timeout(void *flag_arg)
30477c478bd9Sstevel@tonic-gate {
30487c478bd9Sstevel@tonic-gate uint32_t *flag = (uint32_t *)flag_arg;
30497c478bd9Sstevel@tonic-gate
30507c478bd9Sstevel@tonic-gate ASSERT(flag == &kmem_reaping || flag == &kmem_reaping_idspace);
30517c478bd9Sstevel@tonic-gate *flag = 0;
30527c478bd9Sstevel@tonic-gate }
30537c478bd9Sstevel@tonic-gate
30547c478bd9Sstevel@tonic-gate static void
kmem_reap_done(void * flag)30557c478bd9Sstevel@tonic-gate kmem_reap_done(void *flag)
30567c478bd9Sstevel@tonic-gate {
30576e00b116SPeter Telford if (!callout_init_done) {
30586e00b116SPeter Telford /* can't schedule a timeout at this point */
30596e00b116SPeter Telford kmem_reap_timeout(flag);
30606e00b116SPeter Telford } else {
30617c478bd9Sstevel@tonic-gate (void) timeout(kmem_reap_timeout, flag, kmem_reap_interval);
30627c478bd9Sstevel@tonic-gate }
30636e00b116SPeter Telford }
30647c478bd9Sstevel@tonic-gate
30657c478bd9Sstevel@tonic-gate static void
kmem_reap_start(void * flag)30667c478bd9Sstevel@tonic-gate kmem_reap_start(void *flag)
30677c478bd9Sstevel@tonic-gate {
30687c478bd9Sstevel@tonic-gate ASSERT(flag == &kmem_reaping || flag == &kmem_reaping_idspace);
30697c478bd9Sstevel@tonic-gate
30707c478bd9Sstevel@tonic-gate if (flag == &kmem_reaping) {
30717c478bd9Sstevel@tonic-gate kmem_cache_applyall(kmem_cache_reap, kmem_taskq, TQ_NOSLEEP);
30727c478bd9Sstevel@tonic-gate /*
30737c478bd9Sstevel@tonic-gate * if we have segkp under heap, reap segkp cache.
30747c478bd9Sstevel@tonic-gate */
30757c478bd9Sstevel@tonic-gate if (segkp_fromheap)
30767c478bd9Sstevel@tonic-gate segkp_cache_free();
30777c478bd9Sstevel@tonic-gate }
30787c478bd9Sstevel@tonic-gate else
30797c478bd9Sstevel@tonic-gate kmem_cache_applyall_id(kmem_cache_reap, kmem_taskq, TQ_NOSLEEP);
30807c478bd9Sstevel@tonic-gate
30817c478bd9Sstevel@tonic-gate /*
30827c478bd9Sstevel@tonic-gate * We use taskq_dispatch() to schedule a timeout to clear
30837c478bd9Sstevel@tonic-gate * the flag so that kmem_reap() becomes self-throttling:
30847c478bd9Sstevel@tonic-gate * we won't reap again until the current reap completes *and*
30857c478bd9Sstevel@tonic-gate * at least kmem_reap_interval ticks have elapsed.
30867c478bd9Sstevel@tonic-gate */
30877c478bd9Sstevel@tonic-gate if (!taskq_dispatch(kmem_taskq, kmem_reap_done, flag, TQ_NOSLEEP))
30887c478bd9Sstevel@tonic-gate kmem_reap_done(flag);
30897c478bd9Sstevel@tonic-gate }
30907c478bd9Sstevel@tonic-gate
30917c478bd9Sstevel@tonic-gate static void
kmem_reap_common(void * flag_arg)30927c478bd9Sstevel@tonic-gate kmem_reap_common(void *flag_arg)
30937c478bd9Sstevel@tonic-gate {
30947c478bd9Sstevel@tonic-gate uint32_t *flag = (uint32_t *)flag_arg;
30957c478bd9Sstevel@tonic-gate
30967c478bd9Sstevel@tonic-gate if (MUTEX_HELD(&kmem_cache_lock) || kmem_taskq == NULL ||
309775d94465SJosef 'Jeff' Sipek atomic_cas_32(flag, 0, 1) != 0)
30987c478bd9Sstevel@tonic-gate return;
30997c478bd9Sstevel@tonic-gate
31007c478bd9Sstevel@tonic-gate /*
31017c478bd9Sstevel@tonic-gate * It may not be kosher to do memory allocation when a reap is called
31029321cd04SJosef 'Jeff' Sipek * (for example, if vmem_populate() is in the call chain). So we
31039321cd04SJosef 'Jeff' Sipek * start the reap going with a TQ_NOALLOC dispatch. If the dispatch
31049321cd04SJosef 'Jeff' Sipek * fails, we reset the flag, and the next reap will try again.
31057c478bd9Sstevel@tonic-gate */
31067c478bd9Sstevel@tonic-gate if (!taskq_dispatch(kmem_taskq, kmem_reap_start, flag, TQ_NOALLOC))
31077c478bd9Sstevel@tonic-gate *flag = 0;
31087c478bd9Sstevel@tonic-gate }
31097c478bd9Sstevel@tonic-gate
31107c478bd9Sstevel@tonic-gate /*
31117c478bd9Sstevel@tonic-gate * Reclaim all unused memory from all caches. Called from the VM system
31127c478bd9Sstevel@tonic-gate * when memory gets tight.
31137c478bd9Sstevel@tonic-gate */
31147c478bd9Sstevel@tonic-gate void
kmem_reap(void)31157c478bd9Sstevel@tonic-gate kmem_reap(void)
31167c478bd9Sstevel@tonic-gate {
31177c478bd9Sstevel@tonic-gate kmem_reap_common(&kmem_reaping);
31187c478bd9Sstevel@tonic-gate }
31197c478bd9Sstevel@tonic-gate
31207c478bd9Sstevel@tonic-gate /*
31217c478bd9Sstevel@tonic-gate * Reclaim all unused memory from identifier arenas, called when a vmem
31227c478bd9Sstevel@tonic-gate * arena not back by memory is exhausted. Since reaping memory-backed caches
31237c478bd9Sstevel@tonic-gate * cannot help with identifier exhaustion, we avoid both a large amount of
31247c478bd9Sstevel@tonic-gate * work and unwanted side-effects from reclaim callbacks.
31257c478bd9Sstevel@tonic-gate */
31267c478bd9Sstevel@tonic-gate void
kmem_reap_idspace(void)31277c478bd9Sstevel@tonic-gate kmem_reap_idspace(void)
31287c478bd9Sstevel@tonic-gate {
31297c478bd9Sstevel@tonic-gate kmem_reap_common(&kmem_reaping_idspace);
31307c478bd9Sstevel@tonic-gate }
31317c478bd9Sstevel@tonic-gate
31327c478bd9Sstevel@tonic-gate /*
31337c478bd9Sstevel@tonic-gate * Purge all magazines from a cache and set its magazine limit to zero.
31347c478bd9Sstevel@tonic-gate * All calls are serialized by the kmem_taskq lock, except for the final
31357c478bd9Sstevel@tonic-gate * call from kmem_cache_destroy().
31367c478bd9Sstevel@tonic-gate */
31377c478bd9Sstevel@tonic-gate static void
kmem_cache_magazine_purge(kmem_cache_t * cp)31387c478bd9Sstevel@tonic-gate kmem_cache_magazine_purge(kmem_cache_t *cp)
31397c478bd9Sstevel@tonic-gate {
31407c478bd9Sstevel@tonic-gate kmem_cpu_cache_t *ccp;
31417c478bd9Sstevel@tonic-gate kmem_magazine_t *mp, *pmp;
31427c478bd9Sstevel@tonic-gate int rounds, prounds, cpu_seqid;
31437c478bd9Sstevel@tonic-gate
3144b5fca8f8Stomee ASSERT(!list_link_active(&cp->cache_link) ||
3145b5fca8f8Stomee taskq_member(kmem_taskq, curthread));
31467c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
31477c478bd9Sstevel@tonic-gate
31487c478bd9Sstevel@tonic-gate for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
31497c478bd9Sstevel@tonic-gate ccp = &cp->cache_cpu[cpu_seqid];
31507c478bd9Sstevel@tonic-gate
31517c478bd9Sstevel@tonic-gate mutex_enter(&ccp->cc_lock);
31527c478bd9Sstevel@tonic-gate mp = ccp->cc_loaded;
31537c478bd9Sstevel@tonic-gate pmp = ccp->cc_ploaded;
31547c478bd9Sstevel@tonic-gate rounds = ccp->cc_rounds;
31557c478bd9Sstevel@tonic-gate prounds = ccp->cc_prounds;
31567c478bd9Sstevel@tonic-gate ccp->cc_loaded = NULL;
31577c478bd9Sstevel@tonic-gate ccp->cc_ploaded = NULL;
31587c478bd9Sstevel@tonic-gate ccp->cc_rounds = -1;
31597c478bd9Sstevel@tonic-gate ccp->cc_prounds = -1;
31607c478bd9Sstevel@tonic-gate ccp->cc_magsize = 0;
31617c478bd9Sstevel@tonic-gate mutex_exit(&ccp->cc_lock);
31627c478bd9Sstevel@tonic-gate
31637c478bd9Sstevel@tonic-gate if (mp)
31647c478bd9Sstevel@tonic-gate kmem_magazine_destroy(cp, mp, rounds);
31657c478bd9Sstevel@tonic-gate if (pmp)
31667c478bd9Sstevel@tonic-gate kmem_magazine_destroy(cp, pmp, prounds);
31677c478bd9Sstevel@tonic-gate }
31687c478bd9Sstevel@tonic-gate
31690c833d64SJosef 'Jeff' Sipek kmem_depot_ws_zero(cp);
31707c478bd9Sstevel@tonic-gate kmem_depot_ws_reap(cp);
31717c478bd9Sstevel@tonic-gate }
31727c478bd9Sstevel@tonic-gate
31737c478bd9Sstevel@tonic-gate /*
31747c478bd9Sstevel@tonic-gate * Enable per-cpu magazines on a cache.
31757c478bd9Sstevel@tonic-gate */
31767c478bd9Sstevel@tonic-gate static void
kmem_cache_magazine_enable(kmem_cache_t * cp)31777c478bd9Sstevel@tonic-gate kmem_cache_magazine_enable(kmem_cache_t *cp)
31787c478bd9Sstevel@tonic-gate {
31797c478bd9Sstevel@tonic-gate int cpu_seqid;
31807c478bd9Sstevel@tonic-gate
31817c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_NOMAGAZINE)
31827c478bd9Sstevel@tonic-gate return;
31837c478bd9Sstevel@tonic-gate
31847c478bd9Sstevel@tonic-gate for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
31857c478bd9Sstevel@tonic-gate kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
31867c478bd9Sstevel@tonic-gate mutex_enter(&ccp->cc_lock);
31877c478bd9Sstevel@tonic-gate ccp->cc_magsize = cp->cache_magtype->mt_magsize;
31887c478bd9Sstevel@tonic-gate mutex_exit(&ccp->cc_lock);
31897c478bd9Sstevel@tonic-gate }
31907c478bd9Sstevel@tonic-gate
31917c478bd9Sstevel@tonic-gate }
31927c478bd9Sstevel@tonic-gate
31937c478bd9Sstevel@tonic-gate /*
31940c833d64SJosef 'Jeff' Sipek * Reap (almost) everything right now.
3195fa9e4066Sahrens */
3196fa9e4066Sahrens void
kmem_cache_reap_now(kmem_cache_t * cp)3197fa9e4066Sahrens kmem_cache_reap_now(kmem_cache_t *cp)
3198fa9e4066Sahrens {
3199b5fca8f8Stomee ASSERT(list_link_active(&cp->cache_link));
3200b5fca8f8Stomee
32010c833d64SJosef 'Jeff' Sipek kmem_depot_ws_zero(cp);
3202fa9e4066Sahrens
3203fa9e4066Sahrens (void) taskq_dispatch(kmem_taskq,
3204fa9e4066Sahrens (task_func_t *)kmem_depot_ws_reap, cp, TQ_SLEEP);
3205fa9e4066Sahrens taskq_wait(kmem_taskq);
3206fa9e4066Sahrens }
3207fa9e4066Sahrens
3208fa9e4066Sahrens /*
32097c478bd9Sstevel@tonic-gate * Recompute a cache's magazine size. The trade-off is that larger magazines
32107c478bd9Sstevel@tonic-gate * provide a higher transfer rate with the depot, while smaller magazines
32117c478bd9Sstevel@tonic-gate * reduce memory consumption. Magazine resizing is an expensive operation;
32127c478bd9Sstevel@tonic-gate * it should not be done frequently.
32137c478bd9Sstevel@tonic-gate *
32147c478bd9Sstevel@tonic-gate * Changes to the magazine size are serialized by the kmem_taskq lock.
32157c478bd9Sstevel@tonic-gate *
32167c478bd9Sstevel@tonic-gate * Note: at present this only grows the magazine size. It might be useful
32177c478bd9Sstevel@tonic-gate * to allow shrinkage too.
32187c478bd9Sstevel@tonic-gate */
32197c478bd9Sstevel@tonic-gate static void
kmem_cache_magazine_resize(kmem_cache_t * cp)32207c478bd9Sstevel@tonic-gate kmem_cache_magazine_resize(kmem_cache_t *cp)
32217c478bd9Sstevel@tonic-gate {
32227c478bd9Sstevel@tonic-gate kmem_magtype_t *mtp = cp->cache_magtype;
32237c478bd9Sstevel@tonic-gate
32247c478bd9Sstevel@tonic-gate ASSERT(taskq_member(kmem_taskq, curthread));
32257c478bd9Sstevel@tonic-gate
32267c478bd9Sstevel@tonic-gate if (cp->cache_chunksize < mtp->mt_maxbuf) {
32277c478bd9Sstevel@tonic-gate kmem_cache_magazine_purge(cp);
32287c478bd9Sstevel@tonic-gate mutex_enter(&cp->cache_depot_lock);
32297c478bd9Sstevel@tonic-gate cp->cache_magtype = ++mtp;
32307c478bd9Sstevel@tonic-gate cp->cache_depot_contention_prev =
32317c478bd9Sstevel@tonic-gate cp->cache_depot_contention + INT_MAX;
32327c478bd9Sstevel@tonic-gate mutex_exit(&cp->cache_depot_lock);
32337c478bd9Sstevel@tonic-gate kmem_cache_magazine_enable(cp);
32347c478bd9Sstevel@tonic-gate }
32357c478bd9Sstevel@tonic-gate }
32367c478bd9Sstevel@tonic-gate
32377c478bd9Sstevel@tonic-gate /*
32387c478bd9Sstevel@tonic-gate * Rescale a cache's hash table, so that the table size is roughly the
32397c478bd9Sstevel@tonic-gate * cache size. We want the average lookup time to be extremely small.
32407c478bd9Sstevel@tonic-gate */
32417c478bd9Sstevel@tonic-gate static void
kmem_hash_rescale(kmem_cache_t * cp)32427c478bd9Sstevel@tonic-gate kmem_hash_rescale(kmem_cache_t *cp)
32437c478bd9Sstevel@tonic-gate {
32447c478bd9Sstevel@tonic-gate kmem_bufctl_t **old_table, **new_table, *bcp;
32457c478bd9Sstevel@tonic-gate size_t old_size, new_size, h;
32467c478bd9Sstevel@tonic-gate
32477c478bd9Sstevel@tonic-gate ASSERT(taskq_member(kmem_taskq, curthread));
32487c478bd9Sstevel@tonic-gate
32497c478bd9Sstevel@tonic-gate new_size = MAX(KMEM_HASH_INITIAL,
32507c478bd9Sstevel@tonic-gate 1 << (highbit(3 * cp->cache_buftotal + 4) - 2));
32517c478bd9Sstevel@tonic-gate old_size = cp->cache_hash_mask + 1;
32527c478bd9Sstevel@tonic-gate
32537c478bd9Sstevel@tonic-gate if ((old_size >> 1) <= new_size && new_size <= (old_size << 1))
32547c478bd9Sstevel@tonic-gate return;
32557c478bd9Sstevel@tonic-gate
32567c478bd9Sstevel@tonic-gate new_table = vmem_alloc(kmem_hash_arena, new_size * sizeof (void *),
32577c478bd9Sstevel@tonic-gate VM_NOSLEEP);
32587c478bd9Sstevel@tonic-gate if (new_table == NULL)
32597c478bd9Sstevel@tonic-gate return;
32607c478bd9Sstevel@tonic-gate bzero(new_table, new_size * sizeof (void *));
32617c478bd9Sstevel@tonic-gate
32627c478bd9Sstevel@tonic-gate mutex_enter(&cp->cache_lock);
32637c478bd9Sstevel@tonic-gate
32647c478bd9Sstevel@tonic-gate old_size = cp->cache_hash_mask + 1;
32657c478bd9Sstevel@tonic-gate old_table = cp->cache_hash_table;
32667c478bd9Sstevel@tonic-gate
32677c478bd9Sstevel@tonic-gate cp->cache_hash_mask = new_size - 1;
32687c478bd9Sstevel@tonic-gate cp->cache_hash_table = new_table;
32697c478bd9Sstevel@tonic-gate cp->cache_rescale++;
32707c478bd9Sstevel@tonic-gate
32717c478bd9Sstevel@tonic-gate for (h = 0; h < old_size; h++) {
32727c478bd9Sstevel@tonic-gate bcp = old_table[h];
32737c478bd9Sstevel@tonic-gate while (bcp != NULL) {
32747c478bd9Sstevel@tonic-gate void *addr = bcp->bc_addr;
32757c478bd9Sstevel@tonic-gate kmem_bufctl_t *next_bcp = bcp->bc_next;
32767c478bd9Sstevel@tonic-gate kmem_bufctl_t **hash_bucket = KMEM_HASH(cp, addr);
32777c478bd9Sstevel@tonic-gate bcp->bc_next = *hash_bucket;
32787c478bd9Sstevel@tonic-gate *hash_bucket = bcp;
32797c478bd9Sstevel@tonic-gate bcp = next_bcp;
32807c478bd9Sstevel@tonic-gate }
32817c478bd9Sstevel@tonic-gate }
32827c478bd9Sstevel@tonic-gate
32837c478bd9Sstevel@tonic-gate mutex_exit(&cp->cache_lock);
32847c478bd9Sstevel@tonic-gate
32857c478bd9Sstevel@tonic-gate vmem_free(kmem_hash_arena, old_table, old_size * sizeof (void *));
32867c478bd9Sstevel@tonic-gate }
32877c478bd9Sstevel@tonic-gate
32887c478bd9Sstevel@tonic-gate /*
3289b5fca8f8Stomee * Perform periodic maintenance on a cache: hash rescaling, depot working-set
3290b5fca8f8Stomee * update, magazine resizing, and slab consolidation.
32917c478bd9Sstevel@tonic-gate */
32927c478bd9Sstevel@tonic-gate static void
kmem_cache_update(kmem_cache_t * cp)32937c478bd9Sstevel@tonic-gate kmem_cache_update(kmem_cache_t *cp)
32947c478bd9Sstevel@tonic-gate {
32957c478bd9Sstevel@tonic-gate int need_hash_rescale = 0;
32967c478bd9Sstevel@tonic-gate int need_magazine_resize = 0;
32977c478bd9Sstevel@tonic-gate
32987c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&kmem_cache_lock));
32997c478bd9Sstevel@tonic-gate
33007c478bd9Sstevel@tonic-gate /*
33017c478bd9Sstevel@tonic-gate * If the cache has become much larger or smaller than its hash table,
33027c478bd9Sstevel@tonic-gate * fire off a request to rescale the hash table.
33037c478bd9Sstevel@tonic-gate */
33047c478bd9Sstevel@tonic-gate mutex_enter(&cp->cache_lock);
33057c478bd9Sstevel@tonic-gate
33067c478bd9Sstevel@tonic-gate if ((cp->cache_flags & KMF_HASH) &&
33077c478bd9Sstevel@tonic-gate (cp->cache_buftotal > (cp->cache_hash_mask << 1) ||
33087c478bd9Sstevel@tonic-gate (cp->cache_buftotal < (cp->cache_hash_mask >> 1) &&
33097c478bd9Sstevel@tonic-gate cp->cache_hash_mask > KMEM_HASH_INITIAL)))
33107c478bd9Sstevel@tonic-gate need_hash_rescale = 1;
33117c478bd9Sstevel@tonic-gate
33127c478bd9Sstevel@tonic-gate mutex_exit(&cp->cache_lock);
33137c478bd9Sstevel@tonic-gate
33147c478bd9Sstevel@tonic-gate /*
33157c478bd9Sstevel@tonic-gate * Update the depot working set statistics.
33167c478bd9Sstevel@tonic-gate */
33177c478bd9Sstevel@tonic-gate kmem_depot_ws_update(cp);
33187c478bd9Sstevel@tonic-gate
33197c478bd9Sstevel@tonic-gate /*
33207c478bd9Sstevel@tonic-gate * If there's a lot of contention in the depot,
33217c478bd9Sstevel@tonic-gate * increase the magazine size.
33227c478bd9Sstevel@tonic-gate */
33237c478bd9Sstevel@tonic-gate mutex_enter(&cp->cache_depot_lock);
33247c478bd9Sstevel@tonic-gate
33257c478bd9Sstevel@tonic-gate if (cp->cache_chunksize < cp->cache_magtype->mt_maxbuf &&
33267c478bd9Sstevel@tonic-gate (int)(cp->cache_depot_contention -
33277c478bd9Sstevel@tonic-gate cp->cache_depot_contention_prev) > kmem_depot_contention)
33287c478bd9Sstevel@tonic-gate need_magazine_resize = 1;
33297c478bd9Sstevel@tonic-gate
33307c478bd9Sstevel@tonic-gate cp->cache_depot_contention_prev = cp->cache_depot_contention;
33317c478bd9Sstevel@tonic-gate
33327c478bd9Sstevel@tonic-gate mutex_exit(&cp->cache_depot_lock);
33337c478bd9Sstevel@tonic-gate
33347c478bd9Sstevel@tonic-gate if (need_hash_rescale)
33357c478bd9Sstevel@tonic-gate (void) taskq_dispatch(kmem_taskq,
33367c478bd9Sstevel@tonic-gate (task_func_t *)kmem_hash_rescale, cp, TQ_NOSLEEP);
33377c478bd9Sstevel@tonic-gate
33387c478bd9Sstevel@tonic-gate if (need_magazine_resize)
33397c478bd9Sstevel@tonic-gate (void) taskq_dispatch(kmem_taskq,
33407c478bd9Sstevel@tonic-gate (task_func_t *)kmem_cache_magazine_resize, cp, TQ_NOSLEEP);
3341b5fca8f8Stomee
3342b5fca8f8Stomee if (cp->cache_defrag != NULL)
3343b5fca8f8Stomee (void) taskq_dispatch(kmem_taskq,
3344b5fca8f8Stomee (task_func_t *)kmem_cache_scan, cp, TQ_NOSLEEP);
33457c478bd9Sstevel@tonic-gate }
33467c478bd9Sstevel@tonic-gate
3347d67944fbSScott Rotondo static void kmem_update(void *);
3348d67944fbSScott Rotondo
33497c478bd9Sstevel@tonic-gate static void
kmem_update_timeout(void * dummy)33507c478bd9Sstevel@tonic-gate kmem_update_timeout(void *dummy)
33517c478bd9Sstevel@tonic-gate {
33527c478bd9Sstevel@tonic-gate (void) timeout(kmem_update, dummy, kmem_reap_interval);
33537c478bd9Sstevel@tonic-gate }
33547c478bd9Sstevel@tonic-gate
33557c478bd9Sstevel@tonic-gate static void
kmem_update(void * dummy)33567c478bd9Sstevel@tonic-gate kmem_update(void *dummy)
33577c478bd9Sstevel@tonic-gate {
33587c478bd9Sstevel@tonic-gate kmem_cache_applyall(kmem_cache_update, NULL, TQ_NOSLEEP);
33597c478bd9Sstevel@tonic-gate
33607c478bd9Sstevel@tonic-gate /*
33617c478bd9Sstevel@tonic-gate * We use taskq_dispatch() to reschedule the timeout so that
33627c478bd9Sstevel@tonic-gate * kmem_update() becomes self-throttling: it won't schedule
33637c478bd9Sstevel@tonic-gate * new tasks until all previous tasks have completed.
33647c478bd9Sstevel@tonic-gate */
33657c478bd9Sstevel@tonic-gate if (!taskq_dispatch(kmem_taskq, kmem_update_timeout, dummy, TQ_NOSLEEP))
33667c478bd9Sstevel@tonic-gate kmem_update_timeout(NULL);
33677c478bd9Sstevel@tonic-gate }
33687c478bd9Sstevel@tonic-gate
33697c478bd9Sstevel@tonic-gate static int
kmem_cache_kstat_update(kstat_t * ksp,int rw)33707c478bd9Sstevel@tonic-gate kmem_cache_kstat_update(kstat_t *ksp, int rw)
33717c478bd9Sstevel@tonic-gate {
33727c478bd9Sstevel@tonic-gate struct kmem_cache_kstat *kmcp = &kmem_cache_kstat;
33737c478bd9Sstevel@tonic-gate kmem_cache_t *cp = ksp->ks_private;
33747c478bd9Sstevel@tonic-gate uint64_t cpu_buf_avail;
33757c478bd9Sstevel@tonic-gate uint64_t buf_avail = 0;
33767c478bd9Sstevel@tonic-gate int cpu_seqid;
3377686031edSTom Erickson long reap;
33787c478bd9Sstevel@tonic-gate
33797c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&kmem_cache_kstat_lock));
33807c478bd9Sstevel@tonic-gate
33817c478bd9Sstevel@tonic-gate if (rw == KSTAT_WRITE)
33827c478bd9Sstevel@tonic-gate return (EACCES);
33837c478bd9Sstevel@tonic-gate
33847c478bd9Sstevel@tonic-gate mutex_enter(&cp->cache_lock);
33857c478bd9Sstevel@tonic-gate
33867c478bd9Sstevel@tonic-gate kmcp->kmc_alloc_fail.value.ui64 = cp->cache_alloc_fail;
33877c478bd9Sstevel@tonic-gate kmcp->kmc_alloc.value.ui64 = cp->cache_slab_alloc;
33887c478bd9Sstevel@tonic-gate kmcp->kmc_free.value.ui64 = cp->cache_slab_free;
33897c478bd9Sstevel@tonic-gate kmcp->kmc_slab_alloc.value.ui64 = cp->cache_slab_alloc;
33907c478bd9Sstevel@tonic-gate kmcp->kmc_slab_free.value.ui64 = cp->cache_slab_free;
33917c478bd9Sstevel@tonic-gate
33927c478bd9Sstevel@tonic-gate for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
33937c478bd9Sstevel@tonic-gate kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
33947c478bd9Sstevel@tonic-gate
33957c478bd9Sstevel@tonic-gate mutex_enter(&ccp->cc_lock);
33967c478bd9Sstevel@tonic-gate
33977c478bd9Sstevel@tonic-gate cpu_buf_avail = 0;
33987c478bd9Sstevel@tonic-gate if (ccp->cc_rounds > 0)
33997c478bd9Sstevel@tonic-gate cpu_buf_avail += ccp->cc_rounds;
34007c478bd9Sstevel@tonic-gate if (ccp->cc_prounds > 0)
34017c478bd9Sstevel@tonic-gate cpu_buf_avail += ccp->cc_prounds;
34027c478bd9Sstevel@tonic-gate
34037c478bd9Sstevel@tonic-gate kmcp->kmc_alloc.value.ui64 += ccp->cc_alloc;
34047c478bd9Sstevel@tonic-gate kmcp->kmc_free.value.ui64 += ccp->cc_free;
34057c478bd9Sstevel@tonic-gate buf_avail += cpu_buf_avail;
34067c478bd9Sstevel@tonic-gate
34077c478bd9Sstevel@tonic-gate mutex_exit(&ccp->cc_lock);
34087c478bd9Sstevel@tonic-gate }
34097c478bd9Sstevel@tonic-gate
34107c478bd9Sstevel@tonic-gate mutex_enter(&cp->cache_depot_lock);
34117c478bd9Sstevel@tonic-gate
34127c478bd9Sstevel@tonic-gate kmcp->kmc_depot_alloc.value.ui64 = cp->cache_full.ml_alloc;
34137c478bd9Sstevel@tonic-gate kmcp->kmc_depot_free.value.ui64 = cp->cache_empty.ml_alloc;
34147c478bd9Sstevel@tonic-gate kmcp->kmc_depot_contention.value.ui64 = cp->cache_depot_contention;
34157c478bd9Sstevel@tonic-gate kmcp->kmc_full_magazines.value.ui64 = cp->cache_full.ml_total;
34167c478bd9Sstevel@tonic-gate kmcp->kmc_empty_magazines.value.ui64 = cp->cache_empty.ml_total;
34177c478bd9Sstevel@tonic-gate kmcp->kmc_magazine_size.value.ui64 =
34187c478bd9Sstevel@tonic-gate (cp->cache_flags & KMF_NOMAGAZINE) ?
34197c478bd9Sstevel@tonic-gate 0 : cp->cache_magtype->mt_magsize;
34207c478bd9Sstevel@tonic-gate
34217c478bd9Sstevel@tonic-gate kmcp->kmc_alloc.value.ui64 += cp->cache_full.ml_alloc;
34227c478bd9Sstevel@tonic-gate kmcp->kmc_free.value.ui64 += cp->cache_empty.ml_alloc;
34237c478bd9Sstevel@tonic-gate buf_avail += cp->cache_full.ml_total * cp->cache_magtype->mt_magsize;
34247c478bd9Sstevel@tonic-gate
3425686031edSTom Erickson reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
3426686031edSTom Erickson reap = MIN(reap, cp->cache_full.ml_total);
3427686031edSTom Erickson
34287c478bd9Sstevel@tonic-gate mutex_exit(&cp->cache_depot_lock);
34297c478bd9Sstevel@tonic-gate
34307c478bd9Sstevel@tonic-gate kmcp->kmc_buf_size.value.ui64 = cp->cache_bufsize;
34317c478bd9Sstevel@tonic-gate kmcp->kmc_align.value.ui64 = cp->cache_align;
34327c478bd9Sstevel@tonic-gate kmcp->kmc_chunk_size.value.ui64 = cp->cache_chunksize;
34337c478bd9Sstevel@tonic-gate kmcp->kmc_slab_size.value.ui64 = cp->cache_slabsize;
34347c478bd9Sstevel@tonic-gate kmcp->kmc_buf_constructed.value.ui64 = buf_avail;
34359f1b636aStomee buf_avail += cp->cache_bufslab;
34367c478bd9Sstevel@tonic-gate kmcp->kmc_buf_avail.value.ui64 = buf_avail;
34377c478bd9Sstevel@tonic-gate kmcp->kmc_buf_inuse.value.ui64 = cp->cache_buftotal - buf_avail;
34387c478bd9Sstevel@tonic-gate kmcp->kmc_buf_total.value.ui64 = cp->cache_buftotal;
34397c478bd9Sstevel@tonic-gate kmcp->kmc_buf_max.value.ui64 = cp->cache_bufmax;
34407c478bd9Sstevel@tonic-gate kmcp->kmc_slab_create.value.ui64 = cp->cache_slab_create;
34417c478bd9Sstevel@tonic-gate kmcp->kmc_slab_destroy.value.ui64 = cp->cache_slab_destroy;
34427c478bd9Sstevel@tonic-gate kmcp->kmc_hash_size.value.ui64 = (cp->cache_flags & KMF_HASH) ?
34437c478bd9Sstevel@tonic-gate cp->cache_hash_mask + 1 : 0;
34447c478bd9Sstevel@tonic-gate kmcp->kmc_hash_lookup_depth.value.ui64 = cp->cache_lookup_depth;
34457c478bd9Sstevel@tonic-gate kmcp->kmc_hash_rescale.value.ui64 = cp->cache_rescale;
34467c478bd9Sstevel@tonic-gate kmcp->kmc_vmem_source.value.ui64 = cp->cache_arena->vm_id;
3447686031edSTom Erickson kmcp->kmc_reap.value.ui64 = cp->cache_reap;
34487c478bd9Sstevel@tonic-gate
3449b5fca8f8Stomee if (cp->cache_defrag == NULL) {
3450b5fca8f8Stomee kmcp->kmc_move_callbacks.value.ui64 = 0;
3451b5fca8f8Stomee kmcp->kmc_move_yes.value.ui64 = 0;
3452b5fca8f8Stomee kmcp->kmc_move_no.value.ui64 = 0;
3453b5fca8f8Stomee kmcp->kmc_move_later.value.ui64 = 0;
3454b5fca8f8Stomee kmcp->kmc_move_dont_need.value.ui64 = 0;
3455b5fca8f8Stomee kmcp->kmc_move_dont_know.value.ui64 = 0;
3456b5fca8f8Stomee kmcp->kmc_move_hunt_found.value.ui64 = 0;
3457686031edSTom Erickson kmcp->kmc_move_slabs_freed.value.ui64 = 0;
3458686031edSTom Erickson kmcp->kmc_defrag.value.ui64 = 0;
3459686031edSTom Erickson kmcp->kmc_scan.value.ui64 = 0;
3460686031edSTom Erickson kmcp->kmc_move_reclaimable.value.ui64 = 0;
3461b5fca8f8Stomee } else {
3462686031edSTom Erickson int64_t reclaimable;
3463686031edSTom Erickson
3464b5fca8f8Stomee kmem_defrag_t *kd = cp->cache_defrag;
3465b5fca8f8Stomee kmcp->kmc_move_callbacks.value.ui64 = kd->kmd_callbacks;
3466b5fca8f8Stomee kmcp->kmc_move_yes.value.ui64 = kd->kmd_yes;
3467b5fca8f8Stomee kmcp->kmc_move_no.value.ui64 = kd->kmd_no;
3468b5fca8f8Stomee kmcp->kmc_move_later.value.ui64 = kd->kmd_later;
3469b5fca8f8Stomee kmcp->kmc_move_dont_need.value.ui64 = kd->kmd_dont_need;
3470b5fca8f8Stomee kmcp->kmc_move_dont_know.value.ui64 = kd->kmd_dont_know;
3471aa7175abSBryan Cantrill kmcp->kmc_move_hunt_found.value.ui64 = 0;
3472686031edSTom Erickson kmcp->kmc_move_slabs_freed.value.ui64 = kd->kmd_slabs_freed;
3473686031edSTom Erickson kmcp->kmc_defrag.value.ui64 = kd->kmd_defrags;
3474686031edSTom Erickson kmcp->kmc_scan.value.ui64 = kd->kmd_scans;
3475686031edSTom Erickson
3476686031edSTom Erickson reclaimable = cp->cache_bufslab - (cp->cache_maxchunks - 1);
3477686031edSTom Erickson reclaimable = MAX(reclaimable, 0);
3478686031edSTom Erickson reclaimable += ((uint64_t)reap * cp->cache_magtype->mt_magsize);
3479686031edSTom Erickson kmcp->kmc_move_reclaimable.value.ui64 = reclaimable;
3480b5fca8f8Stomee }
3481b5fca8f8Stomee
34827c478bd9Sstevel@tonic-gate mutex_exit(&cp->cache_lock);
34837c478bd9Sstevel@tonic-gate return (0);
34847c478bd9Sstevel@tonic-gate }
34857c478bd9Sstevel@tonic-gate
34867c478bd9Sstevel@tonic-gate /*
34877c478bd9Sstevel@tonic-gate * Return a named statistic about a particular cache.
34887c478bd9Sstevel@tonic-gate * This shouldn't be called very often, so it's currently designed for
34897c478bd9Sstevel@tonic-gate * simplicity (leverages existing kstat support) rather than efficiency.
34907c478bd9Sstevel@tonic-gate */
34917c478bd9Sstevel@tonic-gate uint64_t
kmem_cache_stat(kmem_cache_t * cp,char * name)34927c478bd9Sstevel@tonic-gate kmem_cache_stat(kmem_cache_t *cp, char *name)
34937c478bd9Sstevel@tonic-gate {
34947c478bd9Sstevel@tonic-gate int i;
34957c478bd9Sstevel@tonic-gate kstat_t *ksp = cp->cache_kstat;
34967c478bd9Sstevel@tonic-gate kstat_named_t *knp = (kstat_named_t *)&kmem_cache_kstat;
34977c478bd9Sstevel@tonic-gate uint64_t value = 0;
34987c478bd9Sstevel@tonic-gate
34997c478bd9Sstevel@tonic-gate if (ksp != NULL) {
35007c478bd9Sstevel@tonic-gate mutex_enter(&kmem_cache_kstat_lock);
35017c478bd9Sstevel@tonic-gate (void) kmem_cache_kstat_update(ksp, KSTAT_READ);
35027c478bd9Sstevel@tonic-gate for (i = 0; i < ksp->ks_ndata; i++) {
35037c478bd9Sstevel@tonic-gate if (strcmp(knp[i].name, name) == 0) {
35047c478bd9Sstevel@tonic-gate value = knp[i].value.ui64;
35057c478bd9Sstevel@tonic-gate break;
35067c478bd9Sstevel@tonic-gate }
35077c478bd9Sstevel@tonic-gate }
35087c478bd9Sstevel@tonic-gate mutex_exit(&kmem_cache_kstat_lock);
35097c478bd9Sstevel@tonic-gate }
35107c478bd9Sstevel@tonic-gate return (value);
35117c478bd9Sstevel@tonic-gate }
35127c478bd9Sstevel@tonic-gate
35137c478bd9Sstevel@tonic-gate /*
35147c478bd9Sstevel@tonic-gate * Return an estimate of currently available kernel heap memory.
35157c478bd9Sstevel@tonic-gate * On 32-bit systems, physical memory may exceed virtual memory,
35167c478bd9Sstevel@tonic-gate * we just truncate the result at 1GB.
35177c478bd9Sstevel@tonic-gate */
35187c478bd9Sstevel@tonic-gate size_t
kmem_avail(void)35197c478bd9Sstevel@tonic-gate kmem_avail(void)
35207c478bd9Sstevel@tonic-gate {
35217c478bd9Sstevel@tonic-gate spgcnt_t rmem = availrmem - tune.t_minarmem;
35227c478bd9Sstevel@tonic-gate spgcnt_t fmem = freemem - minfree;
35237c478bd9Sstevel@tonic-gate
35247c478bd9Sstevel@tonic-gate return ((size_t)ptob(MIN(MAX(MIN(rmem, fmem), 0),
35257c478bd9Sstevel@tonic-gate 1 << (30 - PAGESHIFT))));
35267c478bd9Sstevel@tonic-gate }
35277c478bd9Sstevel@tonic-gate
35287c478bd9Sstevel@tonic-gate /*
35297c478bd9Sstevel@tonic-gate * Return the maximum amount of memory that is (in theory) allocatable
35307c478bd9Sstevel@tonic-gate * from the heap. This may be used as an estimate only since there
35317c478bd9Sstevel@tonic-gate * is no guarentee this space will still be available when an allocation
35327c478bd9Sstevel@tonic-gate * request is made, nor that the space may be allocated in one big request
35337c478bd9Sstevel@tonic-gate * due to kernel heap fragmentation.
35347c478bd9Sstevel@tonic-gate */
35357c478bd9Sstevel@tonic-gate size_t
kmem_maxavail(void)35367c478bd9Sstevel@tonic-gate kmem_maxavail(void)
35377c478bd9Sstevel@tonic-gate {
35387c478bd9Sstevel@tonic-gate spgcnt_t pmem = availrmem - tune.t_minarmem;
35397c478bd9Sstevel@tonic-gate spgcnt_t vmem = btop(vmem_size(heap_arena, VMEM_FREE));
35407c478bd9Sstevel@tonic-gate
35417c478bd9Sstevel@tonic-gate return ((size_t)ptob(MAX(MIN(pmem, vmem), 0)));
35427c478bd9Sstevel@tonic-gate }
35437c478bd9Sstevel@tonic-gate
3544fa9e4066Sahrens /*
3545fa9e4066Sahrens * Indicate whether memory-intensive kmem debugging is enabled.
3546fa9e4066Sahrens */
3547fa9e4066Sahrens int
kmem_debugging(void)3548fa9e4066Sahrens kmem_debugging(void)
3549fa9e4066Sahrens {
3550fa9e4066Sahrens return (kmem_flags & (KMF_AUDIT | KMF_REDZONE));
3551fa9e4066Sahrens }
3552fa9e4066Sahrens
3553b5fca8f8Stomee /* binning function, sorts finely at the two extremes */
3554b5fca8f8Stomee #define KMEM_PARTIAL_SLAB_WEIGHT(sp, binshift) \
3555b5fca8f8Stomee ((((sp)->slab_refcnt <= (binshift)) || \
3556b5fca8f8Stomee (((sp)->slab_chunks - (sp)->slab_refcnt) <= (binshift))) \
3557b5fca8f8Stomee ? -(sp)->slab_refcnt \
3558b5fca8f8Stomee : -((binshift) + ((sp)->slab_refcnt >> (binshift))))
3559b5fca8f8Stomee
3560b5fca8f8Stomee /*
3561b5fca8f8Stomee * Minimizing the number of partial slabs on the freelist minimizes
3562b5fca8f8Stomee * fragmentation (the ratio of unused buffers held by the slab layer). There are
3563b5fca8f8Stomee * two ways to get a slab off of the freelist: 1) free all the buffers on the
3564b5fca8f8Stomee * slab, and 2) allocate all the buffers on the slab. It follows that we want
3565b5fca8f8Stomee * the most-used slabs at the front of the list where they have the best chance
3566b5fca8f8Stomee * of being completely allocated, and the least-used slabs at a safe distance
3567b5fca8f8Stomee * from the front to improve the odds that the few remaining buffers will all be
3568b5fca8f8Stomee * freed before another allocation can tie up the slab. For that reason a slab
3569b5fca8f8Stomee * with a higher slab_refcnt sorts less than than a slab with a lower
3570b5fca8f8Stomee * slab_refcnt.
3571b5fca8f8Stomee *
3572b5fca8f8Stomee * However, if a slab has at least one buffer that is deemed unfreeable, we
3573b5fca8f8Stomee * would rather have that slab at the front of the list regardless of
3574b5fca8f8Stomee * slab_refcnt, since even one unfreeable buffer makes the entire slab
3575b5fca8f8Stomee * unfreeable. If the client returns KMEM_CBRC_NO in response to a cache_move()
3576b5fca8f8Stomee * callback, the slab is marked unfreeable for as long as it remains on the
3577b5fca8f8Stomee * freelist.
3578b5fca8f8Stomee */
3579b5fca8f8Stomee static int
kmem_partial_slab_cmp(const void * p0,const void * p1)3580b5fca8f8Stomee kmem_partial_slab_cmp(const void *p0, const void *p1)
3581b5fca8f8Stomee {
3582b5fca8f8Stomee const kmem_cache_t *cp;
3583b5fca8f8Stomee const kmem_slab_t *s0 = p0;
3584b5fca8f8Stomee const kmem_slab_t *s1 = p1;
3585b5fca8f8Stomee int w0, w1;
3586b5fca8f8Stomee size_t binshift;
3587b5fca8f8Stomee
3588b5fca8f8Stomee ASSERT(KMEM_SLAB_IS_PARTIAL(s0));
3589b5fca8f8Stomee ASSERT(KMEM_SLAB_IS_PARTIAL(s1));
3590b5fca8f8Stomee ASSERT(s0->slab_cache == s1->slab_cache);
3591b5fca8f8Stomee cp = s1->slab_cache;
3592b5fca8f8Stomee ASSERT(MUTEX_HELD(&cp->cache_lock));
3593b5fca8f8Stomee binshift = cp->cache_partial_binshift;
3594b5fca8f8Stomee
3595b5fca8f8Stomee /* weight of first slab */
3596b5fca8f8Stomee w0 = KMEM_PARTIAL_SLAB_WEIGHT(s0, binshift);
3597b5fca8f8Stomee if (s0->slab_flags & KMEM_SLAB_NOMOVE) {
3598b5fca8f8Stomee w0 -= cp->cache_maxchunks;
3599b5fca8f8Stomee }
3600b5fca8f8Stomee
3601b5fca8f8Stomee /* weight of second slab */
3602b5fca8f8Stomee w1 = KMEM_PARTIAL_SLAB_WEIGHT(s1, binshift);
3603b5fca8f8Stomee if (s1->slab_flags & KMEM_SLAB_NOMOVE) {
3604b5fca8f8Stomee w1 -= cp->cache_maxchunks;
3605b5fca8f8Stomee }
3606b5fca8f8Stomee
3607b5fca8f8Stomee if (w0 < w1)
3608b5fca8f8Stomee return (-1);
3609b5fca8f8Stomee if (w0 > w1)
3610b5fca8f8Stomee return (1);
3611b5fca8f8Stomee
3612b5fca8f8Stomee /* compare pointer values */
3613b5fca8f8Stomee if ((uintptr_t)s0 < (uintptr_t)s1)
3614b5fca8f8Stomee return (-1);
3615b5fca8f8Stomee if ((uintptr_t)s0 > (uintptr_t)s1)
3616b5fca8f8Stomee return (1);
3617b5fca8f8Stomee
3618b5fca8f8Stomee return (0);
3619b5fca8f8Stomee }
3620b5fca8f8Stomee
3621b5fca8f8Stomee /*
3622b5fca8f8Stomee * It must be valid to call the destructor (if any) on a newly created object.
3623b5fca8f8Stomee * That is, the constructor (if any) must leave the object in a valid state for
3624b5fca8f8Stomee * the destructor.
3625b5fca8f8Stomee */
36267c478bd9Sstevel@tonic-gate kmem_cache_t *
kmem_cache_create(char * name,size_t bufsize,size_t align,int (* constructor)(void *,void *,int),void (* destructor)(void *,void *),void (* reclaim)(void *),void * private,vmem_t * vmp,int cflags)36277c478bd9Sstevel@tonic-gate kmem_cache_create(
36287c478bd9Sstevel@tonic-gate char *name, /* descriptive name for this cache */
36297c478bd9Sstevel@tonic-gate size_t bufsize, /* size of the objects it manages */
36307c478bd9Sstevel@tonic-gate size_t align, /* required object alignment */
36317c478bd9Sstevel@tonic-gate int (*constructor)(void *, void *, int), /* object constructor */
36327c478bd9Sstevel@tonic-gate void (*destructor)(void *, void *), /* object destructor */
36337c478bd9Sstevel@tonic-gate void (*reclaim)(void *), /* memory reclaim callback */
36347c478bd9Sstevel@tonic-gate void *private, /* pass-thru arg for constr/destr/reclaim */
36357c478bd9Sstevel@tonic-gate vmem_t *vmp, /* vmem source for slab allocation */
36367c478bd9Sstevel@tonic-gate int cflags) /* cache creation flags */
36377c478bd9Sstevel@tonic-gate {
36387c478bd9Sstevel@tonic-gate int cpu_seqid;
36397c478bd9Sstevel@tonic-gate size_t chunksize;
3640b5fca8f8Stomee kmem_cache_t *cp;
36417c478bd9Sstevel@tonic-gate kmem_magtype_t *mtp;
36427c478bd9Sstevel@tonic-gate size_t csize = KMEM_CACHE_SIZE(max_ncpus);
36437c478bd9Sstevel@tonic-gate
36447c478bd9Sstevel@tonic-gate #ifdef DEBUG
36457c478bd9Sstevel@tonic-gate /*
36467c478bd9Sstevel@tonic-gate * Cache names should conform to the rules for valid C identifiers
36477c478bd9Sstevel@tonic-gate */
36487c478bd9Sstevel@tonic-gate if (!strident_valid(name)) {
36497c478bd9Sstevel@tonic-gate cmn_err(CE_CONT,
36507c478bd9Sstevel@tonic-gate "kmem_cache_create: '%s' is an invalid cache name\n"
36517c478bd9Sstevel@tonic-gate "cache names must conform to the rules for "
36527c478bd9Sstevel@tonic-gate "C identifiers\n", name);
36537c478bd9Sstevel@tonic-gate }
36547c478bd9Sstevel@tonic-gate #endif /* DEBUG */
36557c478bd9Sstevel@tonic-gate
36567c478bd9Sstevel@tonic-gate if (vmp == NULL)
36577c478bd9Sstevel@tonic-gate vmp = kmem_default_arena;
36587c478bd9Sstevel@tonic-gate
36597c478bd9Sstevel@tonic-gate /*
36607c478bd9Sstevel@tonic-gate * If this kmem cache has an identifier vmem arena as its source, mark
36617c478bd9Sstevel@tonic-gate * it such to allow kmem_reap_idspace().
36627c478bd9Sstevel@tonic-gate */
36637c478bd9Sstevel@tonic-gate ASSERT(!(cflags & KMC_IDENTIFIER)); /* consumer should not set this */
36647c478bd9Sstevel@tonic-gate if (vmp->vm_cflags & VMC_IDENTIFIER)
36657c478bd9Sstevel@tonic-gate cflags |= KMC_IDENTIFIER;
36667c478bd9Sstevel@tonic-gate
36677c478bd9Sstevel@tonic-gate /*
36687c478bd9Sstevel@tonic-gate * Get a kmem_cache structure. We arrange that cp->cache_cpu[]
36697c478bd9Sstevel@tonic-gate * is aligned on a KMEM_CPU_CACHE_SIZE boundary to prevent
36707c478bd9Sstevel@tonic-gate * false sharing of per-CPU data.
36717c478bd9Sstevel@tonic-gate */
36727c478bd9Sstevel@tonic-gate cp = vmem_xalloc(kmem_cache_arena, csize, KMEM_CPU_CACHE_SIZE,
36737c478bd9Sstevel@tonic-gate P2NPHASE(csize, KMEM_CPU_CACHE_SIZE), 0, NULL, NULL, VM_SLEEP);
36747c478bd9Sstevel@tonic-gate bzero(cp, csize);
3675b5fca8f8Stomee list_link_init(&cp->cache_link);
36767c478bd9Sstevel@tonic-gate
36777c478bd9Sstevel@tonic-gate if (align == 0)
36787c478bd9Sstevel@tonic-gate align = KMEM_ALIGN;
36797c478bd9Sstevel@tonic-gate
36807c478bd9Sstevel@tonic-gate /*
36817c478bd9Sstevel@tonic-gate * If we're not at least KMEM_ALIGN aligned, we can't use free
36827c478bd9Sstevel@tonic-gate * memory to hold bufctl information (because we can't safely
36837c478bd9Sstevel@tonic-gate * perform word loads and stores on it).
36847c478bd9Sstevel@tonic-gate */
36857c478bd9Sstevel@tonic-gate if (align < KMEM_ALIGN)
36867c478bd9Sstevel@tonic-gate cflags |= KMC_NOTOUCH;
36877c478bd9Sstevel@tonic-gate
3688de710d24SJosef 'Jeff' Sipek if (!ISP2(align) || align > vmp->vm_quantum)
36897c478bd9Sstevel@tonic-gate panic("kmem_cache_create: bad alignment %lu", align);
36907c478bd9Sstevel@tonic-gate
36917c478bd9Sstevel@tonic-gate mutex_enter(&kmem_flags_lock);
36927c478bd9Sstevel@tonic-gate if (kmem_flags & KMF_RANDOMIZE)
36937c478bd9Sstevel@tonic-gate kmem_flags = (((kmem_flags | ~KMF_RANDOM) + 1) & KMF_RANDOM) |
36947c478bd9Sstevel@tonic-gate KMF_RANDOMIZE;
36957c478bd9Sstevel@tonic-gate cp->cache_flags = (kmem_flags | cflags) & KMF_DEBUG;
36967c478bd9Sstevel@tonic-gate mutex_exit(&kmem_flags_lock);
36977c478bd9Sstevel@tonic-gate
36987c478bd9Sstevel@tonic-gate /*
36997c478bd9Sstevel@tonic-gate * Make sure all the various flags are reasonable.
37007c478bd9Sstevel@tonic-gate */
37017c478bd9Sstevel@tonic-gate ASSERT(!(cflags & KMC_NOHASH) || !(cflags & KMC_NOTOUCH));
37027c478bd9Sstevel@tonic-gate
37037c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_LITE) {
37047c478bd9Sstevel@tonic-gate if (bufsize >= kmem_lite_minsize &&
37057c478bd9Sstevel@tonic-gate align <= kmem_lite_maxalign &&
37067c478bd9Sstevel@tonic-gate P2PHASE(bufsize, kmem_lite_maxalign) != 0) {
37077c478bd9Sstevel@tonic-gate cp->cache_flags |= KMF_BUFTAG;
37087c478bd9Sstevel@tonic-gate cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL);
37097c478bd9Sstevel@tonic-gate } else {
37107c478bd9Sstevel@tonic-gate cp->cache_flags &= ~KMF_DEBUG;
37117c478bd9Sstevel@tonic-gate }
37127c478bd9Sstevel@tonic-gate }
37137c478bd9Sstevel@tonic-gate
37147c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_DEADBEEF)
37157c478bd9Sstevel@tonic-gate cp->cache_flags |= KMF_REDZONE;
37167c478bd9Sstevel@tonic-gate
37177c478bd9Sstevel@tonic-gate if ((cflags & KMC_QCACHE) && (cp->cache_flags & KMF_AUDIT))
37187c478bd9Sstevel@tonic-gate cp->cache_flags |= KMF_NOMAGAZINE;
37197c478bd9Sstevel@tonic-gate
37207c478bd9Sstevel@tonic-gate if (cflags & KMC_NODEBUG)
37217c478bd9Sstevel@tonic-gate cp->cache_flags &= ~KMF_DEBUG;
37227c478bd9Sstevel@tonic-gate
37237c478bd9Sstevel@tonic-gate if (cflags & KMC_NOTOUCH)
37247c478bd9Sstevel@tonic-gate cp->cache_flags &= ~KMF_TOUCH;
37257c478bd9Sstevel@tonic-gate
3726b942e89bSDavid Valin if (cflags & KMC_PREFILL)
3727b942e89bSDavid Valin cp->cache_flags |= KMF_PREFILL;
3728b942e89bSDavid Valin
37297c478bd9Sstevel@tonic-gate if (cflags & KMC_NOHASH)
37307c478bd9Sstevel@tonic-gate cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL);
37317c478bd9Sstevel@tonic-gate
37327c478bd9Sstevel@tonic-gate if (cflags & KMC_NOMAGAZINE)
37337c478bd9Sstevel@tonic-gate cp->cache_flags |= KMF_NOMAGAZINE;
37347c478bd9Sstevel@tonic-gate
37357c478bd9Sstevel@tonic-gate if ((cp->cache_flags & KMF_AUDIT) && !(cflags & KMC_NOTOUCH))
37367c478bd9Sstevel@tonic-gate cp->cache_flags |= KMF_REDZONE;
37377c478bd9Sstevel@tonic-gate
37387c478bd9Sstevel@tonic-gate if (!(cp->cache_flags & KMF_AUDIT))
37397c478bd9Sstevel@tonic-gate cp->cache_flags &= ~KMF_CONTENTS;
37407c478bd9Sstevel@tonic-gate
37417c478bd9Sstevel@tonic-gate if ((cp->cache_flags & KMF_BUFTAG) && bufsize >= kmem_minfirewall &&
37427c478bd9Sstevel@tonic-gate !(cp->cache_flags & KMF_LITE) && !(cflags & KMC_NOHASH))
37437c478bd9Sstevel@tonic-gate cp->cache_flags |= KMF_FIREWALL;
37447c478bd9Sstevel@tonic-gate
37457c478bd9Sstevel@tonic-gate if (vmp != kmem_default_arena || kmem_firewall_arena == NULL)
37467c478bd9Sstevel@tonic-gate cp->cache_flags &= ~KMF_FIREWALL;
37477c478bd9Sstevel@tonic-gate
37487c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_FIREWALL) {
37497c478bd9Sstevel@tonic-gate cp->cache_flags &= ~KMF_BUFTAG;
37507c478bd9Sstevel@tonic-gate cp->cache_flags |= KMF_NOMAGAZINE;
37517c478bd9Sstevel@tonic-gate ASSERT(vmp == kmem_default_arena);
37527c478bd9Sstevel@tonic-gate vmp = kmem_firewall_arena;
37537c478bd9Sstevel@tonic-gate }
37547c478bd9Sstevel@tonic-gate
37557c478bd9Sstevel@tonic-gate /*
37567c478bd9Sstevel@tonic-gate * Set cache properties.
37577c478bd9Sstevel@tonic-gate */
37587c478bd9Sstevel@tonic-gate (void) strncpy(cp->cache_name, name, KMEM_CACHE_NAMELEN);
3759b5fca8f8Stomee strident_canon(cp->cache_name, KMEM_CACHE_NAMELEN + 1);
37607c478bd9Sstevel@tonic-gate cp->cache_bufsize = bufsize;
37617c478bd9Sstevel@tonic-gate cp->cache_align = align;
37627c478bd9Sstevel@tonic-gate cp->cache_constructor = constructor;
37637c478bd9Sstevel@tonic-gate cp->cache_destructor = destructor;
37647c478bd9Sstevel@tonic-gate cp->cache_reclaim = reclaim;
37657c478bd9Sstevel@tonic-gate cp->cache_private = private;
37667c478bd9Sstevel@tonic-gate cp->cache_arena = vmp;
37677c478bd9Sstevel@tonic-gate cp->cache_cflags = cflags;
37687c478bd9Sstevel@tonic-gate
37697c478bd9Sstevel@tonic-gate /*
37707c478bd9Sstevel@tonic-gate * Determine the chunk size.
37717c478bd9Sstevel@tonic-gate */
37727c478bd9Sstevel@tonic-gate chunksize = bufsize;
37737c478bd9Sstevel@tonic-gate
37747c478bd9Sstevel@tonic-gate if (align >= KMEM_ALIGN) {
37757c478bd9Sstevel@tonic-gate chunksize = P2ROUNDUP(chunksize, KMEM_ALIGN);
37767c478bd9Sstevel@tonic-gate cp->cache_bufctl = chunksize - KMEM_ALIGN;
37777c478bd9Sstevel@tonic-gate }
37787c478bd9Sstevel@tonic-gate
37797c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_BUFTAG) {
37807c478bd9Sstevel@tonic-gate cp->cache_bufctl = chunksize;
37817c478bd9Sstevel@tonic-gate cp->cache_buftag = chunksize;
37827c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_LITE)
37837c478bd9Sstevel@tonic-gate chunksize += KMEM_BUFTAG_LITE_SIZE(kmem_lite_count);
37847c478bd9Sstevel@tonic-gate else
37857c478bd9Sstevel@tonic-gate chunksize += sizeof (kmem_buftag_t);
37867c478bd9Sstevel@tonic-gate }
37877c478bd9Sstevel@tonic-gate
37887c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_DEADBEEF) {
37897c478bd9Sstevel@tonic-gate cp->cache_verify = MIN(cp->cache_buftag, kmem_maxverify);
37907c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_LITE)
37917c478bd9Sstevel@tonic-gate cp->cache_verify = sizeof (uint64_t);
37927c478bd9Sstevel@tonic-gate }
37937c478bd9Sstevel@tonic-gate
37947c478bd9Sstevel@tonic-gate cp->cache_contents = MIN(cp->cache_bufctl, kmem_content_maxsave);
37957c478bd9Sstevel@tonic-gate
37967c478bd9Sstevel@tonic-gate cp->cache_chunksize = chunksize = P2ROUNDUP(chunksize, align);
37977c478bd9Sstevel@tonic-gate
37987c478bd9Sstevel@tonic-gate /*
37997c478bd9Sstevel@tonic-gate * Now that we know the chunk size, determine the optimal slab size.
38007c478bd9Sstevel@tonic-gate */
38017c478bd9Sstevel@tonic-gate if (vmp == kmem_firewall_arena) {
38027c478bd9Sstevel@tonic-gate cp->cache_slabsize = P2ROUNDUP(chunksize, vmp->vm_quantum);
38037c478bd9Sstevel@tonic-gate cp->cache_mincolor = cp->cache_slabsize - chunksize;
38047c478bd9Sstevel@tonic-gate cp->cache_maxcolor = cp->cache_mincolor;
38057c478bd9Sstevel@tonic-gate cp->cache_flags |= KMF_HASH;
38067c478bd9Sstevel@tonic-gate ASSERT(!(cp->cache_flags & KMF_BUFTAG));
38077c478bd9Sstevel@tonic-gate } else if ((cflags & KMC_NOHASH) || (!(cflags & KMC_NOTOUCH) &&
38087c478bd9Sstevel@tonic-gate !(cp->cache_flags & KMF_AUDIT) &&
38097c478bd9Sstevel@tonic-gate chunksize < vmp->vm_quantum / KMEM_VOID_FRACTION)) {
38107c478bd9Sstevel@tonic-gate cp->cache_slabsize = vmp->vm_quantum;
38117c478bd9Sstevel@tonic-gate cp->cache_mincolor = 0;
38127c478bd9Sstevel@tonic-gate cp->cache_maxcolor =
38137c478bd9Sstevel@tonic-gate (cp->cache_slabsize - sizeof (kmem_slab_t)) % chunksize;
38147c478bd9Sstevel@tonic-gate ASSERT(chunksize + sizeof (kmem_slab_t) <= cp->cache_slabsize);
38157c478bd9Sstevel@tonic-gate ASSERT(!(cp->cache_flags & KMF_AUDIT));
38167c478bd9Sstevel@tonic-gate } else {
38177c478bd9Sstevel@tonic-gate size_t chunks, bestfit, waste, slabsize;
38187c478bd9Sstevel@tonic-gate size_t minwaste = LONG_MAX;
38197c478bd9Sstevel@tonic-gate
38207c478bd9Sstevel@tonic-gate for (chunks = 1; chunks <= KMEM_VOID_FRACTION; chunks++) {
38217c478bd9Sstevel@tonic-gate slabsize = P2ROUNDUP(chunksize * chunks,
38227c478bd9Sstevel@tonic-gate vmp->vm_quantum);
38237c478bd9Sstevel@tonic-gate chunks = slabsize / chunksize;
38247c478bd9Sstevel@tonic-gate waste = (slabsize % chunksize) / chunks;
38257c478bd9Sstevel@tonic-gate if (waste < minwaste) {
38267c478bd9Sstevel@tonic-gate minwaste = waste;
38277c478bd9Sstevel@tonic-gate bestfit = slabsize;
38287c478bd9Sstevel@tonic-gate }
38297c478bd9Sstevel@tonic-gate }
38307c478bd9Sstevel@tonic-gate if (cflags & KMC_QCACHE)
38317c478bd9Sstevel@tonic-gate bestfit = VMEM_QCACHE_SLABSIZE(vmp->vm_qcache_max);
38327c478bd9Sstevel@tonic-gate cp->cache_slabsize = bestfit;
38337c478bd9Sstevel@tonic-gate cp->cache_mincolor = 0;
38347c478bd9Sstevel@tonic-gate cp->cache_maxcolor = bestfit % chunksize;
38357c478bd9Sstevel@tonic-gate cp->cache_flags |= KMF_HASH;
38367c478bd9Sstevel@tonic-gate }
38377c478bd9Sstevel@tonic-gate
3838b5fca8f8Stomee cp->cache_maxchunks = (cp->cache_slabsize / cp->cache_chunksize);
3839b5fca8f8Stomee cp->cache_partial_binshift = highbit(cp->cache_maxchunks / 16) + 1;
3840b5fca8f8Stomee
3841b942e89bSDavid Valin /*
3842b942e89bSDavid Valin * Disallowing prefill when either the DEBUG or HASH flag is set or when
3843b942e89bSDavid Valin * there is a constructor avoids some tricky issues with debug setup
3844b942e89bSDavid Valin * that may be revisited later. We cannot allow prefill in a
3845b942e89bSDavid Valin * metadata cache because of potential recursion.
3846b942e89bSDavid Valin */
3847b942e89bSDavid Valin if (vmp == kmem_msb_arena ||
3848b942e89bSDavid Valin cp->cache_flags & (KMF_HASH | KMF_BUFTAG) ||
3849b942e89bSDavid Valin cp->cache_constructor != NULL)
3850b942e89bSDavid Valin cp->cache_flags &= ~KMF_PREFILL;
3851b942e89bSDavid Valin
38527c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_HASH) {
38537c478bd9Sstevel@tonic-gate ASSERT(!(cflags & KMC_NOHASH));
38547c478bd9Sstevel@tonic-gate cp->cache_bufctl_cache = (cp->cache_flags & KMF_AUDIT) ?
38557c478bd9Sstevel@tonic-gate kmem_bufctl_audit_cache : kmem_bufctl_cache;
38567c478bd9Sstevel@tonic-gate }
38577c478bd9Sstevel@tonic-gate
38587c478bd9Sstevel@tonic-gate if (cp->cache_maxcolor >= vmp->vm_quantum)
38597c478bd9Sstevel@tonic-gate cp->cache_maxcolor = vmp->vm_quantum - 1;
38607c478bd9Sstevel@tonic-gate
38617c478bd9Sstevel@tonic-gate cp->cache_color = cp->cache_mincolor;
38627c478bd9Sstevel@tonic-gate
38637c478bd9Sstevel@tonic-gate /*
38647c478bd9Sstevel@tonic-gate * Initialize the rest of the slab layer.
38657c478bd9Sstevel@tonic-gate */
38667c478bd9Sstevel@tonic-gate mutex_init(&cp->cache_lock, NULL, MUTEX_DEFAULT, NULL);
38677c478bd9Sstevel@tonic-gate
3868b5fca8f8Stomee avl_create(&cp->cache_partial_slabs, kmem_partial_slab_cmp,
3869b5fca8f8Stomee sizeof (kmem_slab_t), offsetof(kmem_slab_t, slab_link));
3870b5fca8f8Stomee /* LINTED: E_TRUE_LOGICAL_EXPR */
3871b5fca8f8Stomee ASSERT(sizeof (list_node_t) <= sizeof (avl_node_t));
3872b5fca8f8Stomee /* reuse partial slab AVL linkage for complete slab list linkage */
3873b5fca8f8Stomee list_create(&cp->cache_complete_slabs,
3874b5fca8f8Stomee sizeof (kmem_slab_t), offsetof(kmem_slab_t, slab_link));
38757c478bd9Sstevel@tonic-gate
38767c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_HASH) {
38777c478bd9Sstevel@tonic-gate cp->cache_hash_table = vmem_alloc(kmem_hash_arena,
38787c478bd9Sstevel@tonic-gate KMEM_HASH_INITIAL * sizeof (void *), VM_SLEEP);
38797c478bd9Sstevel@tonic-gate bzero(cp->cache_hash_table,
38807c478bd9Sstevel@tonic-gate KMEM_HASH_INITIAL * sizeof (void *));
38817c478bd9Sstevel@tonic-gate cp->cache_hash_mask = KMEM_HASH_INITIAL - 1;
38827c478bd9Sstevel@tonic-gate cp->cache_hash_shift = highbit((ulong_t)chunksize) - 1;
38837c478bd9Sstevel@tonic-gate }
38847c478bd9Sstevel@tonic-gate
38857c478bd9Sstevel@tonic-gate /*
38867c478bd9Sstevel@tonic-gate * Initialize the depot.
38877c478bd9Sstevel@tonic-gate */
38887c478bd9Sstevel@tonic-gate mutex_init(&cp->cache_depot_lock, NULL, MUTEX_DEFAULT, NULL);
38897c478bd9Sstevel@tonic-gate
38907c478bd9Sstevel@tonic-gate for (mtp = kmem_magtype; chunksize <= mtp->mt_minbuf; mtp++)
38917c478bd9Sstevel@tonic-gate continue;
38927c478bd9Sstevel@tonic-gate
38937c478bd9Sstevel@tonic-gate cp->cache_magtype = mtp;
38947c478bd9Sstevel@tonic-gate
38957c478bd9Sstevel@tonic-gate /*
38967c478bd9Sstevel@tonic-gate * Initialize the CPU layer.
38977c478bd9Sstevel@tonic-gate */
38987c478bd9Sstevel@tonic-gate for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
38997c478bd9Sstevel@tonic-gate kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
39007c478bd9Sstevel@tonic-gate mutex_init(&ccp->cc_lock, NULL, MUTEX_DEFAULT, NULL);
39017c478bd9Sstevel@tonic-gate ccp->cc_flags = cp->cache_flags;
39027c478bd9Sstevel@tonic-gate ccp->cc_rounds = -1;
39037c478bd9Sstevel@tonic-gate ccp->cc_prounds = -1;
39047c478bd9Sstevel@tonic-gate }
39057c478bd9Sstevel@tonic-gate
39067c478bd9Sstevel@tonic-gate /*
39077c478bd9Sstevel@tonic-gate * Create the cache's kstats.
39087c478bd9Sstevel@tonic-gate */
39097c478bd9Sstevel@tonic-gate if ((cp->cache_kstat = kstat_create("unix", 0, cp->cache_name,
39107c478bd9Sstevel@tonic-gate "kmem_cache", KSTAT_TYPE_NAMED,
39117c478bd9Sstevel@tonic-gate sizeof (kmem_cache_kstat) / sizeof (kstat_named_t),
39127c478bd9Sstevel@tonic-gate KSTAT_FLAG_VIRTUAL)) != NULL) {
39137c478bd9Sstevel@tonic-gate cp->cache_kstat->ks_data = &kmem_cache_kstat;
39147c478bd9Sstevel@tonic-gate cp->cache_kstat->ks_update = kmem_cache_kstat_update;
39157c478bd9Sstevel@tonic-gate cp->cache_kstat->ks_private = cp;
39167c478bd9Sstevel@tonic-gate cp->cache_kstat->ks_lock = &kmem_cache_kstat_lock;
39177c478bd9Sstevel@tonic-gate kstat_install(cp->cache_kstat);
39187c478bd9Sstevel@tonic-gate }
39197c478bd9Sstevel@tonic-gate
39207c478bd9Sstevel@tonic-gate /*
39217c478bd9Sstevel@tonic-gate * Add the cache to the global list. This makes it visible
39227c478bd9Sstevel@tonic-gate * to kmem_update(), so the cache must be ready for business.
39237c478bd9Sstevel@tonic-gate */
39247c478bd9Sstevel@tonic-gate mutex_enter(&kmem_cache_lock);
3925b5fca8f8Stomee list_insert_tail(&kmem_caches, cp);
39267c478bd9Sstevel@tonic-gate mutex_exit(&kmem_cache_lock);
39277c478bd9Sstevel@tonic-gate
39287c478bd9Sstevel@tonic-gate if (kmem_ready)
39297c478bd9Sstevel@tonic-gate kmem_cache_magazine_enable(cp);
39307c478bd9Sstevel@tonic-gate
39317c478bd9Sstevel@tonic-gate return (cp);
39327c478bd9Sstevel@tonic-gate }
39337c478bd9Sstevel@tonic-gate
3934b5fca8f8Stomee static int
kmem_move_cmp(const void * buf,const void * p)3935b5fca8f8Stomee kmem_move_cmp(const void *buf, const void *p)
3936b5fca8f8Stomee {
3937b5fca8f8Stomee const kmem_move_t *kmm = p;
3938b5fca8f8Stomee uintptr_t v1 = (uintptr_t)buf;
3939b5fca8f8Stomee uintptr_t v2 = (uintptr_t)kmm->kmm_from_buf;
3940b5fca8f8Stomee return (v1 < v2 ? -1 : (v1 > v2 ? 1 : 0));
3941b5fca8f8Stomee }
3942b5fca8f8Stomee
3943b5fca8f8Stomee static void
kmem_reset_reclaim_threshold(kmem_defrag_t * kmd)3944b5fca8f8Stomee kmem_reset_reclaim_threshold(kmem_defrag_t *kmd)
3945b5fca8f8Stomee {
3946b5fca8f8Stomee kmd->kmd_reclaim_numer = 1;
3947b5fca8f8Stomee }
3948b5fca8f8Stomee
3949b5fca8f8Stomee /*
3950b5fca8f8Stomee * Initially, when choosing candidate slabs for buffers to move, we want to be
3951b5fca8f8Stomee * very selective and take only slabs that are less than
3952b5fca8f8Stomee * (1 / KMEM_VOID_FRACTION) allocated. If we have difficulty finding candidate
3953b5fca8f8Stomee * slabs, then we raise the allocation ceiling incrementally. The reclaim
3954b5fca8f8Stomee * threshold is reset to (1 / KMEM_VOID_FRACTION) as soon as the cache is no
3955b5fca8f8Stomee * longer fragmented.
3956b5fca8f8Stomee */
3957b5fca8f8Stomee static void
kmem_adjust_reclaim_threshold(kmem_defrag_t * kmd,int direction)3958b5fca8f8Stomee kmem_adjust_reclaim_threshold(kmem_defrag_t *kmd, int direction)
3959b5fca8f8Stomee {
3960b5fca8f8Stomee if (direction > 0) {
3961b5fca8f8Stomee /* make it easier to find a candidate slab */
3962b5fca8f8Stomee if (kmd->kmd_reclaim_numer < (KMEM_VOID_FRACTION - 1)) {
3963b5fca8f8Stomee kmd->kmd_reclaim_numer++;
3964b5fca8f8Stomee }
3965b5fca8f8Stomee } else {
3966b5fca8f8Stomee /* be more selective */
3967b5fca8f8Stomee if (kmd->kmd_reclaim_numer > 1) {
3968b5fca8f8Stomee kmd->kmd_reclaim_numer--;
3969b5fca8f8Stomee }
3970b5fca8f8Stomee }
3971b5fca8f8Stomee }
3972b5fca8f8Stomee
3973b5fca8f8Stomee void
kmem_cache_set_move(kmem_cache_t * cp,kmem_cbrc_t (* move)(void *,void *,size_t,void *))3974b5fca8f8Stomee kmem_cache_set_move(kmem_cache_t *cp,
3975b5fca8f8Stomee kmem_cbrc_t (*move)(void *, void *, size_t, void *))
3976b5fca8f8Stomee {
3977b5fca8f8Stomee kmem_defrag_t *defrag;
3978b5fca8f8Stomee
3979b5fca8f8Stomee ASSERT(move != NULL);
3980b5fca8f8Stomee /*
3981b5fca8f8Stomee * The consolidator does not support NOTOUCH caches because kmem cannot
3982b5fca8f8Stomee * initialize their slabs with the 0xbaddcafe memory pattern, which sets
3983b5fca8f8Stomee * a low order bit usable by clients to distinguish uninitialized memory
3984b5fca8f8Stomee * from known objects (see kmem_slab_create).
3985b5fca8f8Stomee */
3986b5fca8f8Stomee ASSERT(!(cp->cache_cflags & KMC_NOTOUCH));
3987b5fca8f8Stomee ASSERT(!(cp->cache_cflags & KMC_IDENTIFIER));
3988b5fca8f8Stomee
3989b5fca8f8Stomee /*
3990b5fca8f8Stomee * We should not be holding anyone's cache lock when calling
3991b5fca8f8Stomee * kmem_cache_alloc(), so allocate in all cases before acquiring the
3992b5fca8f8Stomee * lock.
3993b5fca8f8Stomee */
3994b5fca8f8Stomee defrag = kmem_cache_alloc(kmem_defrag_cache, KM_SLEEP);
3995b5fca8f8Stomee
3996b5fca8f8Stomee mutex_enter(&cp->cache_lock);
3997b5fca8f8Stomee
3998b5fca8f8Stomee if (KMEM_IS_MOVABLE(cp)) {
3999b5fca8f8Stomee if (cp->cache_move == NULL) {
40004d4c4c43STom Erickson ASSERT(cp->cache_slab_alloc == 0);
4001b5fca8f8Stomee
4002b5fca8f8Stomee cp->cache_defrag = defrag;
4003b5fca8f8Stomee defrag = NULL; /* nothing to free */
4004b5fca8f8Stomee bzero(cp->cache_defrag, sizeof (kmem_defrag_t));
4005b5fca8f8Stomee avl_create(&cp->cache_defrag->kmd_moves_pending,
4006b5fca8f8Stomee kmem_move_cmp, sizeof (kmem_move_t),
4007b5fca8f8Stomee offsetof(kmem_move_t, kmm_entry));
4008b5fca8f8Stomee /* LINTED: E_TRUE_LOGICAL_EXPR */
4009b5fca8f8Stomee ASSERT(sizeof (list_node_t) <= sizeof (avl_node_t));
4010b5fca8f8Stomee /* reuse the slab's AVL linkage for deadlist linkage */
4011b5fca8f8Stomee list_create(&cp->cache_defrag->kmd_deadlist,
4012b5fca8f8Stomee sizeof (kmem_slab_t),
4013b5fca8f8Stomee offsetof(kmem_slab_t, slab_link));
4014b5fca8f8Stomee kmem_reset_reclaim_threshold(cp->cache_defrag);
4015b5fca8f8Stomee }
4016b5fca8f8Stomee cp->cache_move = move;
4017b5fca8f8Stomee }
4018b5fca8f8Stomee
4019b5fca8f8Stomee mutex_exit(&cp->cache_lock);
4020b5fca8f8Stomee
4021b5fca8f8Stomee if (defrag != NULL) {
4022b5fca8f8Stomee kmem_cache_free(kmem_defrag_cache, defrag); /* unused */
4023b5fca8f8Stomee }
4024b5fca8f8Stomee }
4025b5fca8f8Stomee
40267c478bd9Sstevel@tonic-gate void
kmem_cache_destroy(kmem_cache_t * cp)40277c478bd9Sstevel@tonic-gate kmem_cache_destroy(kmem_cache_t *cp)
40287c478bd9Sstevel@tonic-gate {
40297c478bd9Sstevel@tonic-gate int cpu_seqid;
40307c478bd9Sstevel@tonic-gate
40317c478bd9Sstevel@tonic-gate /*
40327c478bd9Sstevel@tonic-gate * Remove the cache from the global cache list so that no one else
40337c478bd9Sstevel@tonic-gate * can schedule tasks on its behalf, wait for any pending tasks to
40347c478bd9Sstevel@tonic-gate * complete, purge the cache, and then destroy it.
40357c478bd9Sstevel@tonic-gate */
40367c478bd9Sstevel@tonic-gate mutex_enter(&kmem_cache_lock);
4037b5fca8f8Stomee list_remove(&kmem_caches, cp);
40387c478bd9Sstevel@tonic-gate mutex_exit(&kmem_cache_lock);
40397c478bd9Sstevel@tonic-gate
40407c478bd9Sstevel@tonic-gate if (kmem_taskq != NULL)
40417c478bd9Sstevel@tonic-gate taskq_wait(kmem_taskq);
4042aa7175abSBryan Cantrill
4043aa7175abSBryan Cantrill if (kmem_move_taskq != NULL && cp->cache_defrag != NULL)
4044b5fca8f8Stomee taskq_wait(kmem_move_taskq);
40457c478bd9Sstevel@tonic-gate
40467c478bd9Sstevel@tonic-gate kmem_cache_magazine_purge(cp);
40477c478bd9Sstevel@tonic-gate
40487c478bd9Sstevel@tonic-gate mutex_enter(&cp->cache_lock);
40497c478bd9Sstevel@tonic-gate if (cp->cache_buftotal != 0)
40507c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "kmem_cache_destroy: '%s' (%p) not empty",
40517c478bd9Sstevel@tonic-gate cp->cache_name, (void *)cp);
4052b5fca8f8Stomee if (cp->cache_defrag != NULL) {
4053b5fca8f8Stomee avl_destroy(&cp->cache_defrag->kmd_moves_pending);
4054b5fca8f8Stomee list_destroy(&cp->cache_defrag->kmd_deadlist);
4055b5fca8f8Stomee kmem_cache_free(kmem_defrag_cache, cp->cache_defrag);
4056b5fca8f8Stomee cp->cache_defrag = NULL;
4057b5fca8f8Stomee }
40587c478bd9Sstevel@tonic-gate /*
4059b5fca8f8Stomee * The cache is now dead. There should be no further activity. We
4060b5fca8f8Stomee * enforce this by setting land mines in the constructor, destructor,
4061b5fca8f8Stomee * reclaim, and move routines that induce a kernel text fault if
4062b5fca8f8Stomee * invoked.
40637c478bd9Sstevel@tonic-gate */
40647c478bd9Sstevel@tonic-gate cp->cache_constructor = (int (*)(void *, void *, int))1;
40657c478bd9Sstevel@tonic-gate cp->cache_destructor = (void (*)(void *, void *))2;
4066b5fca8f8Stomee cp->cache_reclaim = (void (*)(void *))3;
4067b5fca8f8Stomee cp->cache_move = (kmem_cbrc_t (*)(void *, void *, size_t, void *))4;
40687c478bd9Sstevel@tonic-gate mutex_exit(&cp->cache_lock);
40697c478bd9Sstevel@tonic-gate
40707c478bd9Sstevel@tonic-gate kstat_delete(cp->cache_kstat);
40717c478bd9Sstevel@tonic-gate
40727c478bd9Sstevel@tonic-gate if (cp->cache_hash_table != NULL)
40737c478bd9Sstevel@tonic-gate vmem_free(kmem_hash_arena, cp->cache_hash_table,
40747c478bd9Sstevel@tonic-gate (cp->cache_hash_mask + 1) * sizeof (void *));
40757c478bd9Sstevel@tonic-gate
40767c478bd9Sstevel@tonic-gate for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++)
40777c478bd9Sstevel@tonic-gate mutex_destroy(&cp->cache_cpu[cpu_seqid].cc_lock);
40787c478bd9Sstevel@tonic-gate
40797c478bd9Sstevel@tonic-gate mutex_destroy(&cp->cache_depot_lock);
40807c478bd9Sstevel@tonic-gate mutex_destroy(&cp->cache_lock);
40817c478bd9Sstevel@tonic-gate
40827c478bd9Sstevel@tonic-gate vmem_free(kmem_cache_arena, cp, KMEM_CACHE_SIZE(max_ncpus));
40837c478bd9Sstevel@tonic-gate }
40847c478bd9Sstevel@tonic-gate
40857c478bd9Sstevel@tonic-gate /*ARGSUSED*/
40867c478bd9Sstevel@tonic-gate static int
kmem_cpu_setup(cpu_setup_t what,int id,void * arg)40877c478bd9Sstevel@tonic-gate kmem_cpu_setup(cpu_setup_t what, int id, void *arg)
40887c478bd9Sstevel@tonic-gate {
40897c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
40907c478bd9Sstevel@tonic-gate if (what == CPU_UNCONFIG) {
40917c478bd9Sstevel@tonic-gate kmem_cache_applyall(kmem_cache_magazine_purge,
40927c478bd9Sstevel@tonic-gate kmem_taskq, TQ_SLEEP);
40937c478bd9Sstevel@tonic-gate kmem_cache_applyall(kmem_cache_magazine_enable,
40947c478bd9Sstevel@tonic-gate kmem_taskq, TQ_SLEEP);
40957c478bd9Sstevel@tonic-gate }
40967c478bd9Sstevel@tonic-gate return (0);
40977c478bd9Sstevel@tonic-gate }
40987c478bd9Sstevel@tonic-gate
40997c478bd9Sstevel@tonic-gate static void
kmem_alloc_caches_create(const int * array,size_t count,kmem_cache_t ** alloc_table,size_t maxbuf,uint_t shift)4100dce01e3fSJonathan W Adams kmem_alloc_caches_create(const int *array, size_t count,
4101dce01e3fSJonathan W Adams kmem_cache_t **alloc_table, size_t maxbuf, uint_t shift)
4102dce01e3fSJonathan W Adams {
4103dce01e3fSJonathan W Adams char name[KMEM_CACHE_NAMELEN + 1];
4104dce01e3fSJonathan W Adams size_t table_unit = (1 << shift); /* range of one alloc_table entry */
4105dce01e3fSJonathan W Adams size_t size = table_unit;
4106dce01e3fSJonathan W Adams int i;
4107dce01e3fSJonathan W Adams
4108dce01e3fSJonathan W Adams for (i = 0; i < count; i++) {
4109dce01e3fSJonathan W Adams size_t cache_size = array[i];
4110dce01e3fSJonathan W Adams size_t align = KMEM_ALIGN;
4111dce01e3fSJonathan W Adams kmem_cache_t *cp;
4112dce01e3fSJonathan W Adams
4113dce01e3fSJonathan W Adams /* if the table has an entry for maxbuf, we're done */
4114dce01e3fSJonathan W Adams if (size > maxbuf)
4115dce01e3fSJonathan W Adams break;
4116dce01e3fSJonathan W Adams
4117dce01e3fSJonathan W Adams /* cache size must be a multiple of the table unit */
4118dce01e3fSJonathan W Adams ASSERT(P2PHASE(cache_size, table_unit) == 0);
4119dce01e3fSJonathan W Adams
4120dce01e3fSJonathan W Adams /*
4121dce01e3fSJonathan W Adams * If they allocate a multiple of the coherency granularity,
4122dce01e3fSJonathan W Adams * they get a coherency-granularity-aligned address.
4123dce01e3fSJonathan W Adams */
4124dce01e3fSJonathan W Adams if (IS_P2ALIGNED(cache_size, 64))
4125dce01e3fSJonathan W Adams align = 64;
4126dce01e3fSJonathan W Adams if (IS_P2ALIGNED(cache_size, PAGESIZE))
4127dce01e3fSJonathan W Adams align = PAGESIZE;
4128dce01e3fSJonathan W Adams (void) snprintf(name, sizeof (name),
4129dce01e3fSJonathan W Adams "kmem_alloc_%lu", cache_size);
4130dce01e3fSJonathan W Adams cp = kmem_cache_create(name, cache_size, align,
4131dce01e3fSJonathan W Adams NULL, NULL, NULL, NULL, NULL, KMC_KMEM_ALLOC);
4132dce01e3fSJonathan W Adams
4133dce01e3fSJonathan W Adams while (size <= cache_size) {
4134dce01e3fSJonathan W Adams alloc_table[(size - 1) >> shift] = cp;
4135dce01e3fSJonathan W Adams size += table_unit;
4136dce01e3fSJonathan W Adams }
4137dce01e3fSJonathan W Adams }
4138dce01e3fSJonathan W Adams
4139dce01e3fSJonathan W Adams ASSERT(size > maxbuf); /* i.e. maxbuf <= max(cache_size) */
4140dce01e3fSJonathan W Adams }
4141dce01e3fSJonathan W Adams
4142dce01e3fSJonathan W Adams static void
kmem_cache_init(int pass,int use_large_pages)41437c478bd9Sstevel@tonic-gate kmem_cache_init(int pass, int use_large_pages)
41447c478bd9Sstevel@tonic-gate {
41457c478bd9Sstevel@tonic-gate int i;
4146dce01e3fSJonathan W Adams size_t maxbuf;
41477c478bd9Sstevel@tonic-gate kmem_magtype_t *mtp;
41487c478bd9Sstevel@tonic-gate
41497c478bd9Sstevel@tonic-gate for (i = 0; i < sizeof (kmem_magtype) / sizeof (*mtp); i++) {
4150dce01e3fSJonathan W Adams char name[KMEM_CACHE_NAMELEN + 1];
4151dce01e3fSJonathan W Adams
41527c478bd9Sstevel@tonic-gate mtp = &kmem_magtype[i];
41537c478bd9Sstevel@tonic-gate (void) sprintf(name, "kmem_magazine_%d", mtp->mt_magsize);
41547c478bd9Sstevel@tonic-gate mtp->mt_cache = kmem_cache_create(name,
41557c478bd9Sstevel@tonic-gate (mtp->mt_magsize + 1) * sizeof (void *),
41567c478bd9Sstevel@tonic-gate mtp->mt_align, NULL, NULL, NULL, NULL,
41577c478bd9Sstevel@tonic-gate kmem_msb_arena, KMC_NOHASH);
41587c478bd9Sstevel@tonic-gate }
41597c478bd9Sstevel@tonic-gate
41607c478bd9Sstevel@tonic-gate kmem_slab_cache = kmem_cache_create("kmem_slab_cache",
41617c478bd9Sstevel@tonic-gate sizeof (kmem_slab_t), 0, NULL, NULL, NULL, NULL,
41627c478bd9Sstevel@tonic-gate kmem_msb_arena, KMC_NOHASH);
41637c478bd9Sstevel@tonic-gate
41647c478bd9Sstevel@tonic-gate kmem_bufctl_cache = kmem_cache_create("kmem_bufctl_cache",
41657c478bd9Sstevel@tonic-gate sizeof (kmem_bufctl_t), 0, NULL, NULL, NULL, NULL,
41667c478bd9Sstevel@tonic-gate kmem_msb_arena, KMC_NOHASH);
41677c478bd9Sstevel@tonic-gate
41687c478bd9Sstevel@tonic-gate kmem_bufctl_audit_cache = kmem_cache_create("kmem_bufctl_audit_cache",
41697c478bd9Sstevel@tonic-gate sizeof (kmem_bufctl_audit_t), 0, NULL, NULL, NULL, NULL,
41707c478bd9Sstevel@tonic-gate kmem_msb_arena, KMC_NOHASH);
41717c478bd9Sstevel@tonic-gate
41727c478bd9Sstevel@tonic-gate if (pass == 2) {
41737c478bd9Sstevel@tonic-gate kmem_va_arena = vmem_create("kmem_va",
41747c478bd9Sstevel@tonic-gate NULL, 0, PAGESIZE,
41757c478bd9Sstevel@tonic-gate vmem_alloc, vmem_free, heap_arena,
41767c478bd9Sstevel@tonic-gate 8 * PAGESIZE, VM_SLEEP);
41777c478bd9Sstevel@tonic-gate
41787c478bd9Sstevel@tonic-gate if (use_large_pages) {
41797c478bd9Sstevel@tonic-gate kmem_default_arena = vmem_xcreate("kmem_default",
41807c478bd9Sstevel@tonic-gate NULL, 0, PAGESIZE,
41817c478bd9Sstevel@tonic-gate segkmem_alloc_lp, segkmem_free_lp, kmem_va_arena,
41829dd77bc8SDave Plauger 0, VMC_DUMPSAFE | VM_SLEEP);
41837c478bd9Sstevel@tonic-gate } else {
41847c478bd9Sstevel@tonic-gate kmem_default_arena = vmem_create("kmem_default",
41857c478bd9Sstevel@tonic-gate NULL, 0, PAGESIZE,
41867c478bd9Sstevel@tonic-gate segkmem_alloc, segkmem_free, kmem_va_arena,
41879dd77bc8SDave Plauger 0, VMC_DUMPSAFE | VM_SLEEP);
41887c478bd9Sstevel@tonic-gate }
4189dce01e3fSJonathan W Adams
4190dce01e3fSJonathan W Adams /* Figure out what our maximum cache size is */
4191dce01e3fSJonathan W Adams maxbuf = kmem_max_cached;
4192dce01e3fSJonathan W Adams if (maxbuf <= KMEM_MAXBUF) {
4193dce01e3fSJonathan W Adams maxbuf = 0;
4194dce01e3fSJonathan W Adams kmem_max_cached = KMEM_MAXBUF;
4195dce01e3fSJonathan W Adams } else {
4196dce01e3fSJonathan W Adams size_t size = 0;
4197dce01e3fSJonathan W Adams size_t max =
4198dce01e3fSJonathan W Adams sizeof (kmem_big_alloc_sizes) / sizeof (int);
4199dce01e3fSJonathan W Adams /*
4200dce01e3fSJonathan W Adams * Round maxbuf up to an existing cache size. If maxbuf
4201dce01e3fSJonathan W Adams * is larger than the largest cache, we truncate it to
4202dce01e3fSJonathan W Adams * the largest cache's size.
4203dce01e3fSJonathan W Adams */
4204dce01e3fSJonathan W Adams for (i = 0; i < max; i++) {
4205dce01e3fSJonathan W Adams size = kmem_big_alloc_sizes[i];
4206dce01e3fSJonathan W Adams if (maxbuf <= size)
4207dce01e3fSJonathan W Adams break;
4208dce01e3fSJonathan W Adams }
4209dce01e3fSJonathan W Adams kmem_max_cached = maxbuf = size;
4210dce01e3fSJonathan W Adams }
4211dce01e3fSJonathan W Adams
4212dce01e3fSJonathan W Adams /*
4213dce01e3fSJonathan W Adams * The big alloc table may not be completely overwritten, so
4214dce01e3fSJonathan W Adams * we clear out any stale cache pointers from the first pass.
4215dce01e3fSJonathan W Adams */
4216dce01e3fSJonathan W Adams bzero(kmem_big_alloc_table, sizeof (kmem_big_alloc_table));
42177c478bd9Sstevel@tonic-gate } else {
42187c478bd9Sstevel@tonic-gate /*
42197c478bd9Sstevel@tonic-gate * During the first pass, the kmem_alloc_* caches
42207c478bd9Sstevel@tonic-gate * are treated as metadata.
42217c478bd9Sstevel@tonic-gate */
42227c478bd9Sstevel@tonic-gate kmem_default_arena = kmem_msb_arena;
4223dce01e3fSJonathan W Adams maxbuf = KMEM_BIG_MAXBUF_32BIT;
42247c478bd9Sstevel@tonic-gate }
42257c478bd9Sstevel@tonic-gate
42267c478bd9Sstevel@tonic-gate /*
42277c478bd9Sstevel@tonic-gate * Set up the default caches to back kmem_alloc()
42287c478bd9Sstevel@tonic-gate */
4229dce01e3fSJonathan W Adams kmem_alloc_caches_create(
4230dce01e3fSJonathan W Adams kmem_alloc_sizes, sizeof (kmem_alloc_sizes) / sizeof (int),
4231dce01e3fSJonathan W Adams kmem_alloc_table, KMEM_MAXBUF, KMEM_ALIGN_SHIFT);
4232dce01e3fSJonathan W Adams
4233dce01e3fSJonathan W Adams kmem_alloc_caches_create(
4234dce01e3fSJonathan W Adams kmem_big_alloc_sizes, sizeof (kmem_big_alloc_sizes) / sizeof (int),
4235dce01e3fSJonathan W Adams kmem_big_alloc_table, maxbuf, KMEM_BIG_SHIFT);
4236dce01e3fSJonathan W Adams
4237dce01e3fSJonathan W Adams kmem_big_alloc_table_max = maxbuf >> KMEM_BIG_SHIFT;
42387c478bd9Sstevel@tonic-gate }
42397c478bd9Sstevel@tonic-gate
42407c478bd9Sstevel@tonic-gate void
kmem_init(void)42417c478bd9Sstevel@tonic-gate kmem_init(void)
42427c478bd9Sstevel@tonic-gate {
42437c478bd9Sstevel@tonic-gate kmem_cache_t *cp;
42447c478bd9Sstevel@tonic-gate int old_kmem_flags = kmem_flags;
42457c478bd9Sstevel@tonic-gate int use_large_pages = 0;
42467c478bd9Sstevel@tonic-gate size_t maxverify, minfirewall;
42477c478bd9Sstevel@tonic-gate
42487c478bd9Sstevel@tonic-gate kstat_init();
42497c478bd9Sstevel@tonic-gate
42507c478bd9Sstevel@tonic-gate /*
42517c478bd9Sstevel@tonic-gate * Don't do firewalled allocations if the heap is less than 1TB
42527c478bd9Sstevel@tonic-gate * (i.e. on a 32-bit kernel)
42537c478bd9Sstevel@tonic-gate * The resulting VM_NEXTFIT allocations would create too much
42547c478bd9Sstevel@tonic-gate * fragmentation in a small heap.
42557c478bd9Sstevel@tonic-gate */
42567c478bd9Sstevel@tonic-gate #if defined(_LP64)
42577c478bd9Sstevel@tonic-gate maxverify = minfirewall = PAGESIZE / 2;
42587c478bd9Sstevel@tonic-gate #else
42597c478bd9Sstevel@tonic-gate maxverify = minfirewall = ULONG_MAX;
42607c478bd9Sstevel@tonic-gate #endif
42617c478bd9Sstevel@tonic-gate
42627c478bd9Sstevel@tonic-gate /* LINTED */
42637c478bd9Sstevel@tonic-gate ASSERT(sizeof (kmem_cpu_cache_t) == KMEM_CPU_CACHE_SIZE);
42647c478bd9Sstevel@tonic-gate
4265b5fca8f8Stomee list_create(&kmem_caches, sizeof (kmem_cache_t),
4266b5fca8f8Stomee offsetof(kmem_cache_t, cache_link));
42677c478bd9Sstevel@tonic-gate
42687c478bd9Sstevel@tonic-gate kmem_metadata_arena = vmem_create("kmem_metadata", NULL, 0, PAGESIZE,
42697c478bd9Sstevel@tonic-gate vmem_alloc, vmem_free, heap_arena, 8 * PAGESIZE,
42707c478bd9Sstevel@tonic-gate VM_SLEEP | VMC_NO_QCACHE);
42717c478bd9Sstevel@tonic-gate
42727c478bd9Sstevel@tonic-gate kmem_msb_arena = vmem_create("kmem_msb", NULL, 0,
42737c478bd9Sstevel@tonic-gate PAGESIZE, segkmem_alloc, segkmem_free, kmem_metadata_arena, 0,
42749dd77bc8SDave Plauger VMC_DUMPSAFE | VM_SLEEP);
42757c478bd9Sstevel@tonic-gate
42767c478bd9Sstevel@tonic-gate kmem_cache_arena = vmem_create("kmem_cache", NULL, 0, KMEM_ALIGN,
42777c478bd9Sstevel@tonic-gate segkmem_alloc, segkmem_free, kmem_metadata_arena, 0, VM_SLEEP);
42787c478bd9Sstevel@tonic-gate
42797c478bd9Sstevel@tonic-gate kmem_hash_arena = vmem_create("kmem_hash", NULL, 0, KMEM_ALIGN,
42807c478bd9Sstevel@tonic-gate segkmem_alloc, segkmem_free, kmem_metadata_arena, 0, VM_SLEEP);
42817c478bd9Sstevel@tonic-gate
42827c478bd9Sstevel@tonic-gate kmem_log_arena = vmem_create("kmem_log", NULL, 0, KMEM_ALIGN,
42837c478bd9Sstevel@tonic-gate segkmem_alloc, segkmem_free, heap_arena, 0, VM_SLEEP);
42847c478bd9Sstevel@tonic-gate
42857c478bd9Sstevel@tonic-gate kmem_firewall_va_arena = vmem_create("kmem_firewall_va",
42867c478bd9Sstevel@tonic-gate NULL, 0, PAGESIZE,
42877c478bd9Sstevel@tonic-gate kmem_firewall_va_alloc, kmem_firewall_va_free, heap_arena,
42887c478bd9Sstevel@tonic-gate 0, VM_SLEEP);
42897c478bd9Sstevel@tonic-gate
42907c478bd9Sstevel@tonic-gate kmem_firewall_arena = vmem_create("kmem_firewall", NULL, 0, PAGESIZE,
42919dd77bc8SDave Plauger segkmem_alloc, segkmem_free, kmem_firewall_va_arena, 0,
42929dd77bc8SDave Plauger VMC_DUMPSAFE | VM_SLEEP);
42937c478bd9Sstevel@tonic-gate
42947c478bd9Sstevel@tonic-gate /* temporary oversize arena for mod_read_system_file */
42957c478bd9Sstevel@tonic-gate kmem_oversize_arena = vmem_create("kmem_oversize", NULL, 0, PAGESIZE,
42967c478bd9Sstevel@tonic-gate segkmem_alloc, segkmem_free, heap_arena, 0, VM_SLEEP);
42977c478bd9Sstevel@tonic-gate
42987c478bd9Sstevel@tonic-gate kmem_reap_interval = 15 * hz;
42997c478bd9Sstevel@tonic-gate
43007c478bd9Sstevel@tonic-gate /*
43017c478bd9Sstevel@tonic-gate * Read /etc/system. This is a chicken-and-egg problem because
43027c478bd9Sstevel@tonic-gate * kmem_flags may be set in /etc/system, but mod_read_system_file()
43037c478bd9Sstevel@tonic-gate * needs to use the allocator. The simplest solution is to create
43047c478bd9Sstevel@tonic-gate * all the standard kmem caches, read /etc/system, destroy all the
43057c478bd9Sstevel@tonic-gate * caches we just created, and then create them all again in light
43067c478bd9Sstevel@tonic-gate * of the (possibly) new kmem_flags and other kmem tunables.
43077c478bd9Sstevel@tonic-gate */
43087c478bd9Sstevel@tonic-gate kmem_cache_init(1, 0);
43097c478bd9Sstevel@tonic-gate
43107c478bd9Sstevel@tonic-gate mod_read_system_file(boothowto & RB_ASKNAME);
43117c478bd9Sstevel@tonic-gate
4312b5fca8f8Stomee while ((cp = list_tail(&kmem_caches)) != NULL)
43137c478bd9Sstevel@tonic-gate kmem_cache_destroy(cp);
43147c478bd9Sstevel@tonic-gate
43157c478bd9Sstevel@tonic-gate vmem_destroy(kmem_oversize_arena);
43167c478bd9Sstevel@tonic-gate
43177c478bd9Sstevel@tonic-gate if (old_kmem_flags & KMF_STICKY)
43187c478bd9Sstevel@tonic-gate kmem_flags = old_kmem_flags;
43197c478bd9Sstevel@tonic-gate
43207c478bd9Sstevel@tonic-gate if (!(kmem_flags & KMF_AUDIT))
43217c478bd9Sstevel@tonic-gate vmem_seg_size = offsetof(vmem_seg_t, vs_thread);
43227c478bd9Sstevel@tonic-gate
43237c478bd9Sstevel@tonic-gate if (kmem_maxverify == 0)
43247c478bd9Sstevel@tonic-gate kmem_maxverify = maxverify;
43257c478bd9Sstevel@tonic-gate
43267c478bd9Sstevel@tonic-gate if (kmem_minfirewall == 0)
43277c478bd9Sstevel@tonic-gate kmem_minfirewall = minfirewall;
43287c478bd9Sstevel@tonic-gate
43297c478bd9Sstevel@tonic-gate /*
43307c478bd9Sstevel@tonic-gate * give segkmem a chance to figure out if we are using large pages
43317c478bd9Sstevel@tonic-gate * for the kernel heap
43327c478bd9Sstevel@tonic-gate */
43337c478bd9Sstevel@tonic-gate use_large_pages = segkmem_lpsetup();
43347c478bd9Sstevel@tonic-gate
43357c478bd9Sstevel@tonic-gate /*
43367c478bd9Sstevel@tonic-gate * To protect against corruption, we keep the actual number of callers
43377c478bd9Sstevel@tonic-gate * KMF_LITE records seperate from the tunable. We arbitrarily clamp
43387c478bd9Sstevel@tonic-gate * to 16, since the overhead for small buffers quickly gets out of
43397c478bd9Sstevel@tonic-gate * hand.
43407c478bd9Sstevel@tonic-gate *
43417c478bd9Sstevel@tonic-gate * The real limit would depend on the needs of the largest KMC_NOHASH
43427c478bd9Sstevel@tonic-gate * cache.
43437c478bd9Sstevel@tonic-gate */
43447c478bd9Sstevel@tonic-gate kmem_lite_count = MIN(MAX(0, kmem_lite_pcs), 16);
43457c478bd9Sstevel@tonic-gate kmem_lite_pcs = kmem_lite_count;
43467c478bd9Sstevel@tonic-gate
43477c478bd9Sstevel@tonic-gate /*
43487c478bd9Sstevel@tonic-gate * Normally, we firewall oversized allocations when possible, but
43497c478bd9Sstevel@tonic-gate * if we are using large pages for kernel memory, and we don't have
43507c478bd9Sstevel@tonic-gate * any non-LITE debugging flags set, we want to allocate oversized
43517c478bd9Sstevel@tonic-gate * buffers from large pages, and so skip the firewalling.
43527c478bd9Sstevel@tonic-gate */
43537c478bd9Sstevel@tonic-gate if (use_large_pages &&
43547c478bd9Sstevel@tonic-gate ((kmem_flags & KMF_LITE) || !(kmem_flags & KMF_DEBUG))) {
43557c478bd9Sstevel@tonic-gate kmem_oversize_arena = vmem_xcreate("kmem_oversize", NULL, 0,
43567c478bd9Sstevel@tonic-gate PAGESIZE, segkmem_alloc_lp, segkmem_free_lp, heap_arena,
43579dd77bc8SDave Plauger 0, VMC_DUMPSAFE | VM_SLEEP);
43587c478bd9Sstevel@tonic-gate } else {
43597c478bd9Sstevel@tonic-gate kmem_oversize_arena = vmem_create("kmem_oversize",
43607c478bd9Sstevel@tonic-gate NULL, 0, PAGESIZE,
43617c478bd9Sstevel@tonic-gate segkmem_alloc, segkmem_free, kmem_minfirewall < ULONG_MAX?
43629dd77bc8SDave Plauger kmem_firewall_va_arena : heap_arena, 0, VMC_DUMPSAFE |
43639dd77bc8SDave Plauger VM_SLEEP);
43647c478bd9Sstevel@tonic-gate }
43657c478bd9Sstevel@tonic-gate
43667c478bd9Sstevel@tonic-gate kmem_cache_init(2, use_large_pages);
43677c478bd9Sstevel@tonic-gate
43687c478bd9Sstevel@tonic-gate if (kmem_flags & (KMF_AUDIT | KMF_RANDOMIZE)) {
43697c478bd9Sstevel@tonic-gate if (kmem_transaction_log_size == 0)
43707c478bd9Sstevel@tonic-gate kmem_transaction_log_size = kmem_maxavail() / 50;
43717c478bd9Sstevel@tonic-gate kmem_transaction_log = kmem_log_init(kmem_transaction_log_size);
43727c478bd9Sstevel@tonic-gate }
43737c478bd9Sstevel@tonic-gate
43747c478bd9Sstevel@tonic-gate if (kmem_flags & (KMF_CONTENTS | KMF_RANDOMIZE)) {
43757c478bd9Sstevel@tonic-gate if (kmem_content_log_size == 0)
43767c478bd9Sstevel@tonic-gate kmem_content_log_size = kmem_maxavail() / 50;
43777c478bd9Sstevel@tonic-gate kmem_content_log = kmem_log_init(kmem_content_log_size);
43787c478bd9Sstevel@tonic-gate }
43797c478bd9Sstevel@tonic-gate
43807c478bd9Sstevel@tonic-gate kmem_failure_log = kmem_log_init(kmem_failure_log_size);
43817c478bd9Sstevel@tonic-gate
43827c478bd9Sstevel@tonic-gate kmem_slab_log = kmem_log_init(kmem_slab_log_size);
43837c478bd9Sstevel@tonic-gate
43847c478bd9Sstevel@tonic-gate /*
43857c478bd9Sstevel@tonic-gate * Initialize STREAMS message caches so allocb() is available.
43867c478bd9Sstevel@tonic-gate * This allows us to initialize the logging framework (cmn_err(9F),
43877c478bd9Sstevel@tonic-gate * strlog(9F), etc) so we can start recording messages.
43887c478bd9Sstevel@tonic-gate */
43897c478bd9Sstevel@tonic-gate streams_msg_init();
43907d692464Sdp201428
43917c478bd9Sstevel@tonic-gate /*
43927c478bd9Sstevel@tonic-gate * Initialize the ZSD framework in Zones so modules loaded henceforth
43937c478bd9Sstevel@tonic-gate * can register their callbacks.
43947c478bd9Sstevel@tonic-gate */
43957c478bd9Sstevel@tonic-gate zone_zsd_init();
4396f4b3ec61Sdh155122
43977c478bd9Sstevel@tonic-gate log_init();
43987c478bd9Sstevel@tonic-gate taskq_init();
43997c478bd9Sstevel@tonic-gate
44007d692464Sdp201428 /*
44017d692464Sdp201428 * Warn about invalid or dangerous values of kmem_flags.
44027d692464Sdp201428 * Always warn about unsupported values.
44037d692464Sdp201428 */
44047d692464Sdp201428 if (((kmem_flags & ~(KMF_AUDIT | KMF_DEADBEEF | KMF_REDZONE |
44057d692464Sdp201428 KMF_CONTENTS | KMF_LITE)) != 0) ||
44067d692464Sdp201428 ((kmem_flags & KMF_LITE) && kmem_flags != KMF_LITE))
44077d692464Sdp201428 cmn_err(CE_WARN, "kmem_flags set to unsupported value 0x%x. "
44087d692464Sdp201428 "See the Solaris Tunable Parameters Reference Manual.",
44097d692464Sdp201428 kmem_flags);
44107d692464Sdp201428
44117d692464Sdp201428 #ifdef DEBUG
44127d692464Sdp201428 if ((kmem_flags & KMF_DEBUG) == 0)
44137d692464Sdp201428 cmn_err(CE_NOTE, "kmem debugging disabled.");
44147d692464Sdp201428 #else
44157d692464Sdp201428 /*
44167d692464Sdp201428 * For non-debug kernels, the only "normal" flags are 0, KMF_LITE,
44177d692464Sdp201428 * KMF_REDZONE, and KMF_CONTENTS (the last because it is only enabled
44187d692464Sdp201428 * if KMF_AUDIT is set). We should warn the user about the performance
44197d692464Sdp201428 * penalty of KMF_AUDIT or KMF_DEADBEEF if they are set and KMF_LITE
44207d692464Sdp201428 * isn't set (since that disables AUDIT).
44217d692464Sdp201428 */
44227d692464Sdp201428 if (!(kmem_flags & KMF_LITE) &&
44237d692464Sdp201428 (kmem_flags & (KMF_AUDIT | KMF_DEADBEEF)) != 0)
44247d692464Sdp201428 cmn_err(CE_WARN, "High-overhead kmem debugging features "
44257d692464Sdp201428 "enabled (kmem_flags = 0x%x). Performance degradation "
44267d692464Sdp201428 "and large memory overhead possible. See the Solaris "
44277d692464Sdp201428 "Tunable Parameters Reference Manual.", kmem_flags);
44287d692464Sdp201428 #endif /* not DEBUG */
44297d692464Sdp201428
44307c478bd9Sstevel@tonic-gate kmem_cache_applyall(kmem_cache_magazine_enable, NULL, TQ_SLEEP);
44317c478bd9Sstevel@tonic-gate
44327c478bd9Sstevel@tonic-gate kmem_ready = 1;
44337c478bd9Sstevel@tonic-gate
44347c478bd9Sstevel@tonic-gate /*
44357c478bd9Sstevel@tonic-gate * Initialize the platform-specific aligned/DMA memory allocator.
44367c478bd9Sstevel@tonic-gate */
44377c478bd9Sstevel@tonic-gate ka_init();
44387c478bd9Sstevel@tonic-gate
44397c478bd9Sstevel@tonic-gate /*
44407c478bd9Sstevel@tonic-gate * Initialize 32-bit ID cache.
44417c478bd9Sstevel@tonic-gate */
44427c478bd9Sstevel@tonic-gate id32_init();
4443f4b3ec61Sdh155122
4444f4b3ec61Sdh155122 /*
4445f4b3ec61Sdh155122 * Initialize the networking stack so modules loaded can
4446f4b3ec61Sdh155122 * register their callbacks.
4447f4b3ec61Sdh155122 */
4448f4b3ec61Sdh155122 netstack_init();
44497c478bd9Sstevel@tonic-gate }
44507c478bd9Sstevel@tonic-gate
4451b5fca8f8Stomee static void
kmem_move_init(void)4452b5fca8f8Stomee kmem_move_init(void)
4453b5fca8f8Stomee {
4454b5fca8f8Stomee kmem_defrag_cache = kmem_cache_create("kmem_defrag_cache",
4455b5fca8f8Stomee sizeof (kmem_defrag_t), 0, NULL, NULL, NULL, NULL,
4456b5fca8f8Stomee kmem_msb_arena, KMC_NOHASH);
4457b5fca8f8Stomee kmem_move_cache = kmem_cache_create("kmem_move_cache",
4458b5fca8f8Stomee sizeof (kmem_move_t), 0, NULL, NULL, NULL, NULL,
4459b5fca8f8Stomee kmem_msb_arena, KMC_NOHASH);
4460b5fca8f8Stomee
4461b5fca8f8Stomee /*
4462b5fca8f8Stomee * kmem guarantees that move callbacks are sequential and that even
4463b5fca8f8Stomee * across multiple caches no two moves ever execute simultaneously.
4464b5fca8f8Stomee * Move callbacks are processed on a separate taskq so that client code
4465b5fca8f8Stomee * does not interfere with internal maintenance tasks.
4466b5fca8f8Stomee */
4467b5fca8f8Stomee kmem_move_taskq = taskq_create_instance("kmem_move_taskq", 0, 1,
4468b5fca8f8Stomee minclsyspri, 100, INT_MAX, TASKQ_PREPOPULATE);
4469b5fca8f8Stomee }
4470b5fca8f8Stomee
44717c478bd9Sstevel@tonic-gate void
kmem_thread_init(void)44727c478bd9Sstevel@tonic-gate kmem_thread_init(void)
44737c478bd9Sstevel@tonic-gate {
4474b5fca8f8Stomee kmem_move_init();
4475*de58340cSJoshua M. Clulow
4476*de58340cSJoshua M. Clulow /*
4477*de58340cSJoshua M. Clulow * This taskq is used for various kmem maintenance functions, including
4478*de58340cSJoshua M. Clulow * kmem_reap(). When maintenance is required on every cache,
4479*de58340cSJoshua M. Clulow * kmem_cache_applyall() dispatches one task per cache onto this queue.
4480*de58340cSJoshua M. Clulow *
4481*de58340cSJoshua M. Clulow * In the case of kmem_reap(), the system may be under increasingly
4482*de58340cSJoshua M. Clulow * dire memory pressure and may not be able to allocate a new task
4483*de58340cSJoshua M. Clulow * entry. The count of entries to prepopulate (below) should cover at
4484*de58340cSJoshua M. Clulow * least as many caches as we generally expect to exist on the system
4485*de58340cSJoshua M. Clulow * so that they may all be scheduled for reaping under those
4486*de58340cSJoshua M. Clulow * conditions.
4487*de58340cSJoshua M. Clulow */
44887c478bd9Sstevel@tonic-gate kmem_taskq = taskq_create_instance("kmem_taskq", 0, 1, minclsyspri,
4489*de58340cSJoshua M. Clulow 600, INT_MAX, TASKQ_PREPOPULATE);
44907c478bd9Sstevel@tonic-gate }
44917c478bd9Sstevel@tonic-gate
44927c478bd9Sstevel@tonic-gate void
kmem_mp_init(void)44937c478bd9Sstevel@tonic-gate kmem_mp_init(void)
44947c478bd9Sstevel@tonic-gate {
44957c478bd9Sstevel@tonic-gate mutex_enter(&cpu_lock);
44967c478bd9Sstevel@tonic-gate register_cpu_setup_func(kmem_cpu_setup, NULL);
44977c478bd9Sstevel@tonic-gate mutex_exit(&cpu_lock);
44987c478bd9Sstevel@tonic-gate
44997c478bd9Sstevel@tonic-gate kmem_update_timeout(NULL);
45002e0c549eSJonathan Adams
45012e0c549eSJonathan Adams taskq_mp_init();
45027c478bd9Sstevel@tonic-gate }
4503b5fca8f8Stomee
4504b5fca8f8Stomee /*
4505b5fca8f8Stomee * Return the slab of the allocated buffer, or NULL if the buffer is not
4506b5fca8f8Stomee * allocated. This function may be called with a known slab address to determine
4507b5fca8f8Stomee * whether or not the buffer is allocated, or with a NULL slab address to obtain
4508b5fca8f8Stomee * an allocated buffer's slab.
4509b5fca8f8Stomee */
4510b5fca8f8Stomee static kmem_slab_t *
kmem_slab_allocated(kmem_cache_t * cp,kmem_slab_t * sp,void * buf)4511b5fca8f8Stomee kmem_slab_allocated(kmem_cache_t *cp, kmem_slab_t *sp, void *buf)
4512b5fca8f8Stomee {
4513b5fca8f8Stomee kmem_bufctl_t *bcp, *bufbcp;
4514b5fca8f8Stomee
4515b5fca8f8Stomee ASSERT(MUTEX_HELD(&cp->cache_lock));
4516b5fca8f8Stomee ASSERT(sp == NULL || KMEM_SLAB_MEMBER(sp, buf));
4517b5fca8f8Stomee
4518b5fca8f8Stomee if (cp->cache_flags & KMF_HASH) {
4519b5fca8f8Stomee for (bcp = *KMEM_HASH(cp, buf);
4520b5fca8f8Stomee (bcp != NULL) && (bcp->bc_addr != buf);
4521b5fca8f8Stomee bcp = bcp->bc_next) {
4522b5fca8f8Stomee continue;
4523b5fca8f8Stomee }
4524b5fca8f8Stomee ASSERT(sp != NULL && bcp != NULL ? sp == bcp->bc_slab : 1);
4525b5fca8f8Stomee return (bcp == NULL ? NULL : bcp->bc_slab);
4526b5fca8f8Stomee }
4527b5fca8f8Stomee
4528b5fca8f8Stomee if (sp == NULL) {
4529b5fca8f8Stomee sp = KMEM_SLAB(cp, buf);
4530b5fca8f8Stomee }
4531b5fca8f8Stomee bufbcp = KMEM_BUFCTL(cp, buf);
4532b5fca8f8Stomee for (bcp = sp->slab_head;
4533b5fca8f8Stomee (bcp != NULL) && (bcp != bufbcp);
4534b5fca8f8Stomee bcp = bcp->bc_next) {
4535b5fca8f8Stomee continue;
4536b5fca8f8Stomee }
4537b5fca8f8Stomee return (bcp == NULL ? sp : NULL);
4538b5fca8f8Stomee }
4539b5fca8f8Stomee
4540b5fca8f8Stomee static boolean_t
kmem_slab_is_reclaimable(kmem_cache_t * cp,kmem_slab_t * sp,int flags)4541b5fca8f8Stomee kmem_slab_is_reclaimable(kmem_cache_t *cp, kmem_slab_t *sp, int flags)
4542b5fca8f8Stomee {
4543686031edSTom Erickson long refcnt = sp->slab_refcnt;
4544b5fca8f8Stomee
4545b5fca8f8Stomee ASSERT(cp->cache_defrag != NULL);
4546b5fca8f8Stomee
4547686031edSTom Erickson /*
4548686031edSTom Erickson * For code coverage we want to be able to move an object within the
4549686031edSTom Erickson * same slab (the only partial slab) even if allocating the destination
4550686031edSTom Erickson * buffer resulted in a completely allocated slab.
4551686031edSTom Erickson */
4552686031edSTom Erickson if (flags & KMM_DEBUG) {
4553686031edSTom Erickson return ((flags & KMM_DESPERATE) ||
4554686031edSTom Erickson ((sp->slab_flags & KMEM_SLAB_NOMOVE) == 0));
4555686031edSTom Erickson }
4556686031edSTom Erickson
4557b5fca8f8Stomee /* If we're desperate, we don't care if the client said NO. */
4558b5fca8f8Stomee if (flags & KMM_DESPERATE) {
4559b5fca8f8Stomee return (refcnt < sp->slab_chunks); /* any partial */
4560b5fca8f8Stomee }
4561b5fca8f8Stomee
4562b5fca8f8Stomee if (sp->slab_flags & KMEM_SLAB_NOMOVE) {
4563b5fca8f8Stomee return (B_FALSE);
4564b5fca8f8Stomee }
4565b5fca8f8Stomee
4566686031edSTom Erickson if ((refcnt == 1) || kmem_move_any_partial) {
4567b5fca8f8Stomee return (refcnt < sp->slab_chunks);
4568b5fca8f8Stomee }
4569b5fca8f8Stomee
4570b5fca8f8Stomee /*
4571b5fca8f8Stomee * The reclaim threshold is adjusted at each kmem_cache_scan() so that
4572b5fca8f8Stomee * slabs with a progressively higher percentage of used buffers can be
4573b5fca8f8Stomee * reclaimed until the cache as a whole is no longer fragmented.
4574b5fca8f8Stomee *
4575b5fca8f8Stomee * sp->slab_refcnt kmd_reclaim_numer
4576b5fca8f8Stomee * --------------- < ------------------
4577b5fca8f8Stomee * sp->slab_chunks KMEM_VOID_FRACTION
4578b5fca8f8Stomee */
4579b5fca8f8Stomee return ((refcnt * KMEM_VOID_FRACTION) <
4580b5fca8f8Stomee (sp->slab_chunks * cp->cache_defrag->kmd_reclaim_numer));
4581b5fca8f8Stomee }
4582b5fca8f8Stomee
4583b5fca8f8Stomee /*
4584b5fca8f8Stomee * May be called from the kmem_move_taskq, from kmem_cache_move_notify_task(),
4585b5fca8f8Stomee * or when the buffer is freed.
4586b5fca8f8Stomee */
4587b5fca8f8Stomee static void
kmem_slab_move_yes(kmem_cache_t * cp,kmem_slab_t * sp,void * from_buf)4588b5fca8f8Stomee kmem_slab_move_yes(kmem_cache_t *cp, kmem_slab_t *sp, void *from_buf)
4589b5fca8f8Stomee {
4590b5fca8f8Stomee ASSERT(MUTEX_HELD(&cp->cache_lock));
4591b5fca8f8Stomee ASSERT(KMEM_SLAB_MEMBER(sp, from_buf));
4592b5fca8f8Stomee
4593b5fca8f8Stomee if (!KMEM_SLAB_IS_PARTIAL(sp)) {
4594b5fca8f8Stomee return;
4595b5fca8f8Stomee }
4596b5fca8f8Stomee
4597b5fca8f8Stomee if (sp->slab_flags & KMEM_SLAB_NOMOVE) {
4598b5fca8f8Stomee if (KMEM_SLAB_OFFSET(sp, from_buf) == sp->slab_stuck_offset) {
4599b5fca8f8Stomee avl_remove(&cp->cache_partial_slabs, sp);
4600b5fca8f8Stomee sp->slab_flags &= ~KMEM_SLAB_NOMOVE;
4601b5fca8f8Stomee sp->slab_stuck_offset = (uint32_t)-1;
4602b5fca8f8Stomee avl_add(&cp->cache_partial_slabs, sp);
4603b5fca8f8Stomee }
4604b5fca8f8Stomee } else {
4605b5fca8f8Stomee sp->slab_later_count = 0;
4606b5fca8f8Stomee sp->slab_stuck_offset = (uint32_t)-1;
4607b5fca8f8Stomee }
4608b5fca8f8Stomee }
4609b5fca8f8Stomee
4610b5fca8f8Stomee static void
kmem_slab_move_no(kmem_cache_t * cp,kmem_slab_t * sp,void * from_buf)4611b5fca8f8Stomee kmem_slab_move_no(kmem_cache_t *cp, kmem_slab_t *sp, void *from_buf)
4612b5fca8f8Stomee {
4613b5fca8f8Stomee ASSERT(taskq_member(kmem_move_taskq, curthread));
4614b5fca8f8Stomee ASSERT(MUTEX_HELD(&cp->cache_lock));
4615b5fca8f8Stomee ASSERT(KMEM_SLAB_MEMBER(sp, from_buf));
4616b5fca8f8Stomee
4617b5fca8f8Stomee if (!KMEM_SLAB_IS_PARTIAL(sp)) {
4618b5fca8f8Stomee return;
4619b5fca8f8Stomee }
4620b5fca8f8Stomee
4621b5fca8f8Stomee avl_remove(&cp->cache_partial_slabs, sp);
4622b5fca8f8Stomee sp->slab_later_count = 0;
4623b5fca8f8Stomee sp->slab_flags |= KMEM_SLAB_NOMOVE;
4624b5fca8f8Stomee sp->slab_stuck_offset = KMEM_SLAB_OFFSET(sp, from_buf);
4625b5fca8f8Stomee avl_add(&cp->cache_partial_slabs, sp);
4626b5fca8f8Stomee }
4627b5fca8f8Stomee
4628b5fca8f8Stomee static void kmem_move_end(kmem_cache_t *, kmem_move_t *);
4629b5fca8f8Stomee
4630b5fca8f8Stomee /*
4631b5fca8f8Stomee * The move callback takes two buffer addresses, the buffer to be moved, and a
4632b5fca8f8Stomee * newly allocated and constructed buffer selected by kmem as the destination.
4633b5fca8f8Stomee * It also takes the size of the buffer and an optional user argument specified
4634b5fca8f8Stomee * at cache creation time. kmem guarantees that the buffer to be moved has not
4635b5fca8f8Stomee * been unmapped by the virtual memory subsystem. Beyond that, it cannot
4636b5fca8f8Stomee * guarantee the present whereabouts of the buffer to be moved, so it is up to
4637b5fca8f8Stomee * the client to safely determine whether or not it is still using the buffer.
4638b5fca8f8Stomee * The client must not free either of the buffers passed to the move callback,
4639b5fca8f8Stomee * since kmem wants to free them directly to the slab layer. The client response
4640b5fca8f8Stomee * tells kmem which of the two buffers to free:
4641b5fca8f8Stomee *
4642b5fca8f8Stomee * YES kmem frees the old buffer (the move was successful)
4643b5fca8f8Stomee * NO kmem frees the new buffer, marks the slab of the old buffer
4644b5fca8f8Stomee * non-reclaimable to avoid bothering the client again
4645b5fca8f8Stomee * LATER kmem frees the new buffer, increments slab_later_count
4646aa7175abSBryan Cantrill * DONT_KNOW kmem frees the new buffer
4647b5fca8f8Stomee * DONT_NEED kmem frees both the old buffer and the new buffer
4648b5fca8f8Stomee *
4649b5fca8f8Stomee * The pending callback argument now being processed contains both of the
4650b5fca8f8Stomee * buffers (old and new) passed to the move callback function, the slab of the
4651b5fca8f8Stomee * old buffer, and flags related to the move request, such as whether or not the
4652b5fca8f8Stomee * system was desperate for memory.
4653686031edSTom Erickson *
4654686031edSTom Erickson * Slabs are not freed while there is a pending callback, but instead are kept
4655686031edSTom Erickson * on a deadlist, which is drained after the last callback completes. This means
4656686031edSTom Erickson * that slabs are safe to access until kmem_move_end(), no matter how many of
4657686031edSTom Erickson * their buffers have been freed. Once slab_refcnt reaches zero, it stays at
4658686031edSTom Erickson * zero for as long as the slab remains on the deadlist and until the slab is
4659686031edSTom Erickson * freed.
4660b5fca8f8Stomee */
4661b5fca8f8Stomee static void
kmem_move_buffer(kmem_move_t * callback)4662b5fca8f8Stomee kmem_move_buffer(kmem_move_t *callback)
4663b5fca8f8Stomee {
4664b5fca8f8Stomee kmem_cbrc_t response;
4665b5fca8f8Stomee kmem_slab_t *sp = callback->kmm_from_slab;
4666b5fca8f8Stomee kmem_cache_t *cp = sp->slab_cache;
4667b5fca8f8Stomee boolean_t free_on_slab;
4668b5fca8f8Stomee
4669b5fca8f8Stomee ASSERT(taskq_member(kmem_move_taskq, curthread));
4670b5fca8f8Stomee ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4671b5fca8f8Stomee ASSERT(KMEM_SLAB_MEMBER(sp, callback->kmm_from_buf));
4672b5fca8f8Stomee
4673b5fca8f8Stomee /*
4674b5fca8f8Stomee * The number of allocated buffers on the slab may have changed since we
4675b5fca8f8Stomee * last checked the slab's reclaimability (when the pending move was
4676b5fca8f8Stomee * enqueued), or the client may have responded NO when asked to move
4677b5fca8f8Stomee * another buffer on the same slab.
4678b5fca8f8Stomee */
4679b5fca8f8Stomee if (!kmem_slab_is_reclaimable(cp, sp, callback->kmm_flags)) {
4680b5fca8f8Stomee kmem_slab_free(cp, callback->kmm_to_buf);
4681b5fca8f8Stomee kmem_move_end(cp, callback);
4682b5fca8f8Stomee return;
4683b5fca8f8Stomee }
4684b5fca8f8Stomee
4685b5fca8f8Stomee /*
4686aa7175abSBryan Cantrill * Checking the slab layer is easy, so we might as well do that here
4687aa7175abSBryan Cantrill * in case we can avoid bothering the client.
4688b5fca8f8Stomee */
4689b5fca8f8Stomee mutex_enter(&cp->cache_lock);
4690b5fca8f8Stomee free_on_slab = (kmem_slab_allocated(cp, sp,
4691b5fca8f8Stomee callback->kmm_from_buf) == NULL);
4692b5fca8f8Stomee mutex_exit(&cp->cache_lock);
4693b5fca8f8Stomee
4694b5fca8f8Stomee if (free_on_slab) {
4695b5fca8f8Stomee kmem_slab_free(cp, callback->kmm_to_buf);
4696b5fca8f8Stomee kmem_move_end(cp, callback);
4697b5fca8f8Stomee return;
4698b5fca8f8Stomee }
4699b5fca8f8Stomee
4700b5fca8f8Stomee if (cp->cache_flags & KMF_BUFTAG) {
4701b5fca8f8Stomee /*
4702b5fca8f8Stomee * Make kmem_cache_alloc_debug() apply the constructor for us.
4703b5fca8f8Stomee */
4704b5fca8f8Stomee if (kmem_cache_alloc_debug(cp, callback->kmm_to_buf,
4705b5fca8f8Stomee KM_NOSLEEP, 1, caller()) != 0) {
4706b5fca8f8Stomee kmem_move_end(cp, callback);
4707b5fca8f8Stomee return;
4708b5fca8f8Stomee }
4709b5fca8f8Stomee } else if (cp->cache_constructor != NULL &&
4710b5fca8f8Stomee cp->cache_constructor(callback->kmm_to_buf, cp->cache_private,
4711b5fca8f8Stomee KM_NOSLEEP) != 0) {
47121a5e258fSJosef 'Jeff' Sipek atomic_inc_64(&cp->cache_alloc_fail);
4713b5fca8f8Stomee kmem_slab_free(cp, callback->kmm_to_buf);
4714b5fca8f8Stomee kmem_move_end(cp, callback);
4715b5fca8f8Stomee return;
4716b5fca8f8Stomee }
4717b5fca8f8Stomee
4718b5fca8f8Stomee cp->cache_defrag->kmd_callbacks++;
4719b5fca8f8Stomee cp->cache_defrag->kmd_thread = curthread;
4720b5fca8f8Stomee cp->cache_defrag->kmd_from_buf = callback->kmm_from_buf;
4721b5fca8f8Stomee cp->cache_defrag->kmd_to_buf = callback->kmm_to_buf;
4722b5fca8f8Stomee DTRACE_PROBE2(kmem__move__start, kmem_cache_t *, cp, kmem_move_t *,
4723b5fca8f8Stomee callback);
4724b5fca8f8Stomee
4725b5fca8f8Stomee response = cp->cache_move(callback->kmm_from_buf,
4726b5fca8f8Stomee callback->kmm_to_buf, cp->cache_bufsize, cp->cache_private);
4727b5fca8f8Stomee
4728b5fca8f8Stomee DTRACE_PROBE3(kmem__move__end, kmem_cache_t *, cp, kmem_move_t *,
4729b5fca8f8Stomee callback, kmem_cbrc_t, response);
4730b5fca8f8Stomee cp->cache_defrag->kmd_thread = NULL;
4731b5fca8f8Stomee cp->cache_defrag->kmd_from_buf = NULL;
4732b5fca8f8Stomee cp->cache_defrag->kmd_to_buf = NULL;
4733b5fca8f8Stomee
4734b5fca8f8Stomee if (response == KMEM_CBRC_YES) {
4735b5fca8f8Stomee cp->cache_defrag->kmd_yes++;
4736b5fca8f8Stomee kmem_slab_free_constructed(cp, callback->kmm_from_buf, B_FALSE);
4737686031edSTom Erickson /* slab safe to access until kmem_move_end() */
4738686031edSTom Erickson if (sp->slab_refcnt == 0)
4739686031edSTom Erickson cp->cache_defrag->kmd_slabs_freed++;
4740b5fca8f8Stomee mutex_enter(&cp->cache_lock);
4741b5fca8f8Stomee kmem_slab_move_yes(cp, sp, callback->kmm_from_buf);
4742b5fca8f8Stomee mutex_exit(&cp->cache_lock);
4743b5fca8f8Stomee kmem_move_end(cp, callback);
4744b5fca8f8Stomee return;
4745b5fca8f8Stomee }
4746b5fca8f8Stomee
4747b5fca8f8Stomee switch (response) {
4748b5fca8f8Stomee case KMEM_CBRC_NO:
4749b5fca8f8Stomee cp->cache_defrag->kmd_no++;
4750b5fca8f8Stomee mutex_enter(&cp->cache_lock);
4751b5fca8f8Stomee kmem_slab_move_no(cp, sp, callback->kmm_from_buf);
4752b5fca8f8Stomee mutex_exit(&cp->cache_lock);
4753b5fca8f8Stomee break;
4754b5fca8f8Stomee case KMEM_CBRC_LATER:
4755b5fca8f8Stomee cp->cache_defrag->kmd_later++;
4756b5fca8f8Stomee mutex_enter(&cp->cache_lock);
4757b5fca8f8Stomee if (!KMEM_SLAB_IS_PARTIAL(sp)) {
4758b5fca8f8Stomee mutex_exit(&cp->cache_lock);
4759b5fca8f8Stomee break;
4760b5fca8f8Stomee }
4761b5fca8f8Stomee
4762b5fca8f8Stomee if (++sp->slab_later_count >= KMEM_DISBELIEF) {
4763b5fca8f8Stomee kmem_slab_move_no(cp, sp, callback->kmm_from_buf);
4764b5fca8f8Stomee } else if (!(sp->slab_flags & KMEM_SLAB_NOMOVE)) {
4765b5fca8f8Stomee sp->slab_stuck_offset = KMEM_SLAB_OFFSET(sp,
4766b5fca8f8Stomee callback->kmm_from_buf);
4767b5fca8f8Stomee }
4768b5fca8f8Stomee mutex_exit(&cp->cache_lock);
4769b5fca8f8Stomee break;
4770b5fca8f8Stomee case KMEM_CBRC_DONT_NEED:
4771b5fca8f8Stomee cp->cache_defrag->kmd_dont_need++;
4772b5fca8f8Stomee kmem_slab_free_constructed(cp, callback->kmm_from_buf, B_FALSE);
4773686031edSTom Erickson if (sp->slab_refcnt == 0)
4774686031edSTom Erickson cp->cache_defrag->kmd_slabs_freed++;
4775b5fca8f8Stomee mutex_enter(&cp->cache_lock);
4776b5fca8f8Stomee kmem_slab_move_yes(cp, sp, callback->kmm_from_buf);
4777b5fca8f8Stomee mutex_exit(&cp->cache_lock);
4778b5fca8f8Stomee break;
4779b5fca8f8Stomee case KMEM_CBRC_DONT_KNOW:
4780aa7175abSBryan Cantrill /*
4781aa7175abSBryan Cantrill * If we don't know if we can move this buffer or not, we'll
4782aa7175abSBryan Cantrill * just assume that we can't: if the buffer is in fact free,
4783aa7175abSBryan Cantrill * then it is sitting in one of the per-CPU magazines or in
4784aa7175abSBryan Cantrill * a full magazine in the depot layer. Either way, because
4785aa7175abSBryan Cantrill * defrag is induced in the same logic that reaps a cache,
4786aa7175abSBryan Cantrill * it's likely that full magazines will be returned to the
4787aa7175abSBryan Cantrill * system soon (thereby accomplishing what we're trying to
4788aa7175abSBryan Cantrill * accomplish here: return those magazines to their slabs).
4789aa7175abSBryan Cantrill * Given this, any work that we might do now to locate a buffer
4790aa7175abSBryan Cantrill * in a magazine is wasted (and expensive!) work; we bump
4791aa7175abSBryan Cantrill * a counter in this case and otherwise assume that we can't
4792aa7175abSBryan Cantrill * move it.
4793aa7175abSBryan Cantrill */
4794b5fca8f8Stomee cp->cache_defrag->kmd_dont_know++;
4795b5fca8f8Stomee break;
4796b5fca8f8Stomee default:
4797b5fca8f8Stomee panic("'%s' (%p) unexpected move callback response %d\n",
4798b5fca8f8Stomee cp->cache_name, (void *)cp, response);
4799b5fca8f8Stomee }
4800b5fca8f8Stomee
4801b5fca8f8Stomee kmem_slab_free_constructed(cp, callback->kmm_to_buf, B_FALSE);
4802b5fca8f8Stomee kmem_move_end(cp, callback);
4803b5fca8f8Stomee }
4804b5fca8f8Stomee
4805b5fca8f8Stomee /* Return B_FALSE if there is insufficient memory for the move request. */
4806b5fca8f8Stomee static boolean_t
kmem_move_begin(kmem_cache_t * cp,kmem_slab_t * sp,void * buf,int flags)4807b5fca8f8Stomee kmem_move_begin(kmem_cache_t *cp, kmem_slab_t *sp, void *buf, int flags)
4808b5fca8f8Stomee {
4809b5fca8f8Stomee void *to_buf;
4810b5fca8f8Stomee avl_index_t index;
4811b5fca8f8Stomee kmem_move_t *callback, *pending;
4812686031edSTom Erickson ulong_t n;
4813b5fca8f8Stomee
4814b5fca8f8Stomee ASSERT(taskq_member(kmem_taskq, curthread));
4815b5fca8f8Stomee ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4816b5fca8f8Stomee ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING);
4817b5fca8f8Stomee
4818b5fca8f8Stomee callback = kmem_cache_alloc(kmem_move_cache, KM_NOSLEEP);
4819aa7175abSBryan Cantrill
4820aa7175abSBryan Cantrill if (callback == NULL)
4821b5fca8f8Stomee return (B_FALSE);
4822b5fca8f8Stomee
4823b5fca8f8Stomee callback->kmm_from_slab = sp;
4824b5fca8f8Stomee callback->kmm_from_buf = buf;
4825b5fca8f8Stomee callback->kmm_flags = flags;
4826b5fca8f8Stomee
4827b5fca8f8Stomee mutex_enter(&cp->cache_lock);
4828b5fca8f8Stomee
4829686031edSTom Erickson n = avl_numnodes(&cp->cache_partial_slabs);
4830686031edSTom Erickson if ((n == 0) || ((n == 1) && !(flags & KMM_DEBUG))) {
4831b5fca8f8Stomee mutex_exit(&cp->cache_lock);
4832b5fca8f8Stomee kmem_cache_free(kmem_move_cache, callback);
4833b5fca8f8Stomee return (B_TRUE); /* there is no need for the move request */
4834b5fca8f8Stomee }
4835b5fca8f8Stomee
4836b5fca8f8Stomee pending = avl_find(&cp->cache_defrag->kmd_moves_pending, buf, &index);
4837b5fca8f8Stomee if (pending != NULL) {
4838b5fca8f8Stomee /*
4839b5fca8f8Stomee * If the move is already pending and we're desperate now,
4840b5fca8f8Stomee * update the move flags.
4841b5fca8f8Stomee */
4842b5fca8f8Stomee if (flags & KMM_DESPERATE) {
4843b5fca8f8Stomee pending->kmm_flags |= KMM_DESPERATE;
4844b5fca8f8Stomee }
4845b5fca8f8Stomee mutex_exit(&cp->cache_lock);
4846b5fca8f8Stomee kmem_cache_free(kmem_move_cache, callback);
4847b5fca8f8Stomee return (B_TRUE);
4848b5fca8f8Stomee }
4849b5fca8f8Stomee
4850b942e89bSDavid Valin to_buf = kmem_slab_alloc_impl(cp, avl_first(&cp->cache_partial_slabs),
4851b942e89bSDavid Valin B_FALSE);
4852b5fca8f8Stomee callback->kmm_to_buf = to_buf;
4853b5fca8f8Stomee avl_insert(&cp->cache_defrag->kmd_moves_pending, callback, index);
4854b5fca8f8Stomee
4855b5fca8f8Stomee mutex_exit(&cp->cache_lock);
4856b5fca8f8Stomee
4857b5fca8f8Stomee if (!taskq_dispatch(kmem_move_taskq, (task_func_t *)kmem_move_buffer,
4858b5fca8f8Stomee callback, TQ_NOSLEEP)) {
4859b5fca8f8Stomee mutex_enter(&cp->cache_lock);
4860b5fca8f8Stomee avl_remove(&cp->cache_defrag->kmd_moves_pending, callback);
4861b5fca8f8Stomee mutex_exit(&cp->cache_lock);
486225e2c9cfStomee kmem_slab_free(cp, to_buf);
4863b5fca8f8Stomee kmem_cache_free(kmem_move_cache, callback);
4864b5fca8f8Stomee return (B_FALSE);
4865b5fca8f8Stomee }
4866b5fca8f8Stomee
4867b5fca8f8Stomee return (B_TRUE);
4868b5fca8f8Stomee }
4869b5fca8f8Stomee
4870b5fca8f8Stomee static void
kmem_move_end(kmem_cache_t * cp,kmem_move_t * callback)4871b5fca8f8Stomee kmem_move_end(kmem_cache_t *cp, kmem_move_t *callback)
4872b5fca8f8Stomee {
4873b5fca8f8Stomee avl_index_t index;
4874b5fca8f8Stomee
4875b5fca8f8Stomee ASSERT(cp->cache_defrag != NULL);
4876b5fca8f8Stomee ASSERT(taskq_member(kmem_move_taskq, curthread));
4877b5fca8f8Stomee ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4878b5fca8f8Stomee
4879b5fca8f8Stomee mutex_enter(&cp->cache_lock);
4880b5fca8f8Stomee VERIFY(avl_find(&cp->cache_defrag->kmd_moves_pending,
4881b5fca8f8Stomee callback->kmm_from_buf, &index) != NULL);
4882b5fca8f8Stomee avl_remove(&cp->cache_defrag->kmd_moves_pending, callback);
4883b5fca8f8Stomee if (avl_is_empty(&cp->cache_defrag->kmd_moves_pending)) {
4884b5fca8f8Stomee list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
4885b5fca8f8Stomee kmem_slab_t *sp;
4886b5fca8f8Stomee
4887b5fca8f8Stomee /*
4888b5fca8f8Stomee * The last pending move completed. Release all slabs from the
4889b5fca8f8Stomee * front of the dead list except for any slab at the tail that
4890b5fca8f8Stomee * needs to be released from the context of kmem_move_buffers().
4891b5fca8f8Stomee * kmem deferred unmapping the buffers on these slabs in order
4892b5fca8f8Stomee * to guarantee that buffers passed to the move callback have
4893b5fca8f8Stomee * been touched only by kmem or by the client itself.
4894b5fca8f8Stomee */
4895b5fca8f8Stomee while ((sp = list_remove_head(deadlist)) != NULL) {
4896b5fca8f8Stomee if (sp->slab_flags & KMEM_SLAB_MOVE_PENDING) {
4897b5fca8f8Stomee list_insert_tail(deadlist, sp);
4898b5fca8f8Stomee break;
4899b5fca8f8Stomee }
4900b5fca8f8Stomee cp->cache_defrag->kmd_deadcount--;
4901b5fca8f8Stomee cp->cache_slab_destroy++;
4902b5fca8f8Stomee mutex_exit(&cp->cache_lock);
4903b5fca8f8Stomee kmem_slab_destroy(cp, sp);
4904b5fca8f8Stomee mutex_enter(&cp->cache_lock);
4905b5fca8f8Stomee }
4906b5fca8f8Stomee }
4907b5fca8f8Stomee mutex_exit(&cp->cache_lock);
4908b5fca8f8Stomee kmem_cache_free(kmem_move_cache, callback);
4909b5fca8f8Stomee }
4910b5fca8f8Stomee
4911b5fca8f8Stomee /*
4912b5fca8f8Stomee * Move buffers from least used slabs first by scanning backwards from the end
4913b5fca8f8Stomee * of the partial slab list. Scan at most max_scan candidate slabs and move
4914b5fca8f8Stomee * buffers from at most max_slabs slabs (0 for all partial slabs in both cases).
4915b5fca8f8Stomee * If desperate to reclaim memory, move buffers from any partial slab, otherwise
4916b5fca8f8Stomee * skip slabs with a ratio of allocated buffers at or above the current
4917b5fca8f8Stomee * threshold. Return the number of unskipped slabs (at most max_slabs, -1 if the
4918b5fca8f8Stomee * scan is aborted) so that the caller can adjust the reclaimability threshold
4919b5fca8f8Stomee * depending on how many reclaimable slabs it finds.
4920b5fca8f8Stomee *
4921b5fca8f8Stomee * kmem_move_buffers() drops and reacquires cache_lock every time it issues a
4922b5fca8f8Stomee * move request, since it is not valid for kmem_move_begin() to call
4923b5fca8f8Stomee * kmem_cache_alloc() or taskq_dispatch() with cache_lock held.
4924b5fca8f8Stomee */
4925b5fca8f8Stomee static int
kmem_move_buffers(kmem_cache_t * cp,size_t max_scan,size_t max_slabs,int flags)4926b5fca8f8Stomee kmem_move_buffers(kmem_cache_t *cp, size_t max_scan, size_t max_slabs,
4927b5fca8f8Stomee int flags)
4928b5fca8f8Stomee {
4929b5fca8f8Stomee kmem_slab_t *sp;
4930b5fca8f8Stomee void *buf;
4931b5fca8f8Stomee int i, j; /* slab index, buffer index */
4932b5fca8f8Stomee int s; /* reclaimable slabs */
4933b5fca8f8Stomee int b; /* allocated (movable) buffers on reclaimable slab */
4934b5fca8f8Stomee boolean_t success;
4935b5fca8f8Stomee int refcnt;
4936b5fca8f8Stomee int nomove;
4937b5fca8f8Stomee
4938b5fca8f8Stomee ASSERT(taskq_member(kmem_taskq, curthread));
4939b5fca8f8Stomee ASSERT(MUTEX_HELD(&cp->cache_lock));
4940b5fca8f8Stomee ASSERT(kmem_move_cache != NULL);
4941b5fca8f8Stomee ASSERT(cp->cache_move != NULL && cp->cache_defrag != NULL);
4942686031edSTom Erickson ASSERT((flags & KMM_DEBUG) ? !avl_is_empty(&cp->cache_partial_slabs) :
4943686031edSTom Erickson avl_numnodes(&cp->cache_partial_slabs) > 1);
4944b5fca8f8Stomee
4945b5fca8f8Stomee if (kmem_move_blocked) {
4946b5fca8f8Stomee return (0);
4947b5fca8f8Stomee }
4948b5fca8f8Stomee
4949b5fca8f8Stomee if (kmem_move_fulltilt) {
4950b5fca8f8Stomee flags |= KMM_DESPERATE;
4951b5fca8f8Stomee }
4952b5fca8f8Stomee
4953b5fca8f8Stomee if (max_scan == 0 || (flags & KMM_DESPERATE)) {
4954b5fca8f8Stomee /*
4955b5fca8f8Stomee * Scan as many slabs as needed to find the desired number of
4956b5fca8f8Stomee * candidate slabs.
4957b5fca8f8Stomee */
4958b5fca8f8Stomee max_scan = (size_t)-1;
4959b5fca8f8Stomee }
4960b5fca8f8Stomee
4961b5fca8f8Stomee if (max_slabs == 0 || (flags & KMM_DESPERATE)) {
4962b5fca8f8Stomee /* Find as many candidate slabs as possible. */
4963b5fca8f8Stomee max_slabs = (size_t)-1;
4964b5fca8f8Stomee }
4965b5fca8f8Stomee
4966b5fca8f8Stomee sp = avl_last(&cp->cache_partial_slabs);
4967686031edSTom Erickson ASSERT(KMEM_SLAB_IS_PARTIAL(sp));
4968686031edSTom Erickson for (i = 0, s = 0; (i < max_scan) && (s < max_slabs) && (sp != NULL) &&
4969686031edSTom Erickson ((sp != avl_first(&cp->cache_partial_slabs)) ||
4970686031edSTom Erickson (flags & KMM_DEBUG));
4971b5fca8f8Stomee sp = AVL_PREV(&cp->cache_partial_slabs, sp), i++) {
4972b5fca8f8Stomee
4973b5fca8f8Stomee if (!kmem_slab_is_reclaimable(cp, sp, flags)) {
4974b5fca8f8Stomee continue;
4975b5fca8f8Stomee }
4976b5fca8f8Stomee s++;
4977b5fca8f8Stomee
4978b5fca8f8Stomee /* Look for allocated buffers to move. */
4979b5fca8f8Stomee for (j = 0, b = 0, buf = sp->slab_base;
4980b5fca8f8Stomee (j < sp->slab_chunks) && (b < sp->slab_refcnt);
4981b5fca8f8Stomee buf = (((char *)buf) + cp->cache_chunksize), j++) {
4982b5fca8f8Stomee
4983b5fca8f8Stomee if (kmem_slab_allocated(cp, sp, buf) == NULL) {
4984b5fca8f8Stomee continue;
4985b5fca8f8Stomee }
4986b5fca8f8Stomee
4987b5fca8f8Stomee b++;
4988b5fca8f8Stomee
4989b5fca8f8Stomee /*
4990b5fca8f8Stomee * Prevent the slab from being destroyed while we drop
4991b5fca8f8Stomee * cache_lock and while the pending move is not yet
4992b5fca8f8Stomee * registered. Flag the pending move while
4993b5fca8f8Stomee * kmd_moves_pending may still be empty, since we can't
4994b5fca8f8Stomee * yet rely on a non-zero pending move count to prevent
4995b5fca8f8Stomee * the slab from being destroyed.
4996b5fca8f8Stomee */
4997b5fca8f8Stomee ASSERT(!(sp->slab_flags & KMEM_SLAB_MOVE_PENDING));
4998b5fca8f8Stomee sp->slab_flags |= KMEM_SLAB_MOVE_PENDING;
4999b5fca8f8Stomee /*
5000b5fca8f8Stomee * Recheck refcnt and nomove after reacquiring the lock,
5001b5fca8f8Stomee * since these control the order of partial slabs, and
5002b5fca8f8Stomee * we want to know if we can pick up the scan where we
5003b5fca8f8Stomee * left off.
5004b5fca8f8Stomee */
5005b5fca8f8Stomee refcnt = sp->slab_refcnt;
5006b5fca8f8Stomee nomove = (sp->slab_flags & KMEM_SLAB_NOMOVE);
5007b5fca8f8Stomee mutex_exit(&cp->cache_lock);
5008b5fca8f8Stomee
5009b5fca8f8Stomee success = kmem_move_begin(cp, sp, buf, flags);
5010b5fca8f8Stomee
5011b5fca8f8Stomee /*
5012b5fca8f8Stomee * Now, before the lock is reacquired, kmem could
5013b5fca8f8Stomee * process all pending move requests and purge the
5014b5fca8f8Stomee * deadlist, so that upon reacquiring the lock, sp has
5015686031edSTom Erickson * been remapped. Or, the client may free all the
5016686031edSTom Erickson * objects on the slab while the pending moves are still
5017686031edSTom Erickson * on the taskq. Therefore, the KMEM_SLAB_MOVE_PENDING
5018b5fca8f8Stomee * flag causes the slab to be put at the end of the
5019686031edSTom Erickson * deadlist and prevents it from being destroyed, since
5020686031edSTom Erickson * we plan to destroy it here after reacquiring the
5021686031edSTom Erickson * lock.
5022b5fca8f8Stomee */
5023b5fca8f8Stomee mutex_enter(&cp->cache_lock);
5024b5fca8f8Stomee ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING);
5025b5fca8f8Stomee sp->slab_flags &= ~KMEM_SLAB_MOVE_PENDING;
5026b5fca8f8Stomee
5027b5fca8f8Stomee if (sp->slab_refcnt == 0) {
5028b5fca8f8Stomee list_t *deadlist =
5029b5fca8f8Stomee &cp->cache_defrag->kmd_deadlist;
5030b5fca8f8Stomee list_remove(deadlist, sp);
5031686031edSTom Erickson
5032686031edSTom Erickson if (!avl_is_empty(
5033686031edSTom Erickson &cp->cache_defrag->kmd_moves_pending)) {
5034686031edSTom Erickson /*
5035686031edSTom Erickson * A pending move makes it unsafe to
5036686031edSTom Erickson * destroy the slab, because even though
5037686031edSTom Erickson * the move is no longer needed, the
5038686031edSTom Erickson * context where that is determined
5039686031edSTom Erickson * requires the slab to exist.
5040686031edSTom Erickson * Fortunately, a pending move also
5041686031edSTom Erickson * means we don't need to destroy the
5042686031edSTom Erickson * slab here, since it will get
5043686031edSTom Erickson * destroyed along with any other slabs
5044686031edSTom Erickson * on the deadlist after the last
5045686031edSTom Erickson * pending move completes.
5046686031edSTom Erickson */
5047686031edSTom Erickson list_insert_head(deadlist, sp);
5048686031edSTom Erickson return (-1);
5049686031edSTom Erickson }
5050686031edSTom Erickson
5051686031edSTom Erickson /*
5052686031edSTom Erickson * Destroy the slab now if it was completely
5053686031edSTom Erickson * freed while we dropped cache_lock and there
5054686031edSTom Erickson * are no pending moves. Since slab_refcnt
5055686031edSTom Erickson * cannot change once it reaches zero, no new
5056686031edSTom Erickson * pending moves from that slab are possible.
5057686031edSTom Erickson */
5058b5fca8f8Stomee cp->cache_defrag->kmd_deadcount--;
5059b5fca8f8Stomee cp->cache_slab_destroy++;
5060b5fca8f8Stomee mutex_exit(&cp->cache_lock);
5061b5fca8f8Stomee kmem_slab_destroy(cp, sp);
5062b5fca8f8Stomee mutex_enter(&cp->cache_lock);
5063b5fca8f8Stomee /*
5064b5fca8f8Stomee * Since we can't pick up the scan where we left
5065b5fca8f8Stomee * off, abort the scan and say nothing about the
5066b5fca8f8Stomee * number of reclaimable slabs.
5067b5fca8f8Stomee */
5068b5fca8f8Stomee return (-1);
5069b5fca8f8Stomee }
5070b5fca8f8Stomee
5071b5fca8f8Stomee if (!success) {
5072b5fca8f8Stomee /*
5073b5fca8f8Stomee * Abort the scan if there is not enough memory
5074b5fca8f8Stomee * for the request and say nothing about the
5075b5fca8f8Stomee * number of reclaimable slabs.
5076b5fca8f8Stomee */
5077b5fca8f8Stomee return (-1);
5078b5fca8f8Stomee }
5079b5fca8f8Stomee
5080b5fca8f8Stomee /*
5081b5fca8f8Stomee * The slab's position changed while the lock was
5082b5fca8f8Stomee * dropped, so we don't know where we are in the
5083b5fca8f8Stomee * sequence any more.
5084b5fca8f8Stomee */
5085b5fca8f8Stomee if (sp->slab_refcnt != refcnt) {
5086686031edSTom Erickson /*
5087686031edSTom Erickson * If this is a KMM_DEBUG move, the slab_refcnt
5088686031edSTom Erickson * may have changed because we allocated a
5089686031edSTom Erickson * destination buffer on the same slab. In that
5090686031edSTom Erickson * case, we're not interested in counting it.
5091686031edSTom Erickson */
5092b5fca8f8Stomee return (-1);
5093b5fca8f8Stomee }
5094aa7175abSBryan Cantrill if ((sp->slab_flags & KMEM_SLAB_NOMOVE) != nomove)
5095b5fca8f8Stomee return (-1);
5096b5fca8f8Stomee
5097b5fca8f8Stomee /*
5098b5fca8f8Stomee * Generating a move request allocates a destination
5099686031edSTom Erickson * buffer from the slab layer, bumping the first partial
5100686031edSTom Erickson * slab if it is completely allocated. If the current
5101686031edSTom Erickson * slab becomes the first partial slab as a result, we
5102686031edSTom Erickson * can't continue to scan backwards.
5103686031edSTom Erickson *
5104686031edSTom Erickson * If this is a KMM_DEBUG move and we allocated the
5105686031edSTom Erickson * destination buffer from the last partial slab, then
5106686031edSTom Erickson * the buffer we're moving is on the same slab and our
5107686031edSTom Erickson * slab_refcnt has changed, causing us to return before
5108686031edSTom Erickson * reaching here if there are no partial slabs left.
5109b5fca8f8Stomee */
5110b5fca8f8Stomee ASSERT(!avl_is_empty(&cp->cache_partial_slabs));
5111b5fca8f8Stomee if (sp == avl_first(&cp->cache_partial_slabs)) {
5112686031edSTom Erickson /*
5113686031edSTom Erickson * We're not interested in a second KMM_DEBUG
5114686031edSTom Erickson * move.
5115686031edSTom Erickson */
5116b5fca8f8Stomee goto end_scan;
5117b5fca8f8Stomee }
5118b5fca8f8Stomee }
5119b5fca8f8Stomee }
5120b5fca8f8Stomee end_scan:
5121b5fca8f8Stomee
5122b5fca8f8Stomee return (s);
5123b5fca8f8Stomee }
5124b5fca8f8Stomee
5125b5fca8f8Stomee typedef struct kmem_move_notify_args {
5126b5fca8f8Stomee kmem_cache_t *kmna_cache;
5127b5fca8f8Stomee void *kmna_buf;
5128b5fca8f8Stomee } kmem_move_notify_args_t;
5129b5fca8f8Stomee
5130b5fca8f8Stomee static void
kmem_cache_move_notify_task(void * arg)5131b5fca8f8Stomee kmem_cache_move_notify_task(void *arg)
5132b5fca8f8Stomee {
5133b5fca8f8Stomee kmem_move_notify_args_t *args = arg;
5134b5fca8f8Stomee kmem_cache_t *cp = args->kmna_cache;
5135b5fca8f8Stomee void *buf = args->kmna_buf;
5136b5fca8f8Stomee kmem_slab_t *sp;
5137b5fca8f8Stomee
5138b5fca8f8Stomee ASSERT(taskq_member(kmem_taskq, curthread));
5139b5fca8f8Stomee ASSERT(list_link_active(&cp->cache_link));
5140b5fca8f8Stomee
5141b5fca8f8Stomee kmem_free(args, sizeof (kmem_move_notify_args_t));
5142b5fca8f8Stomee mutex_enter(&cp->cache_lock);
5143b5fca8f8Stomee sp = kmem_slab_allocated(cp, NULL, buf);
5144b5fca8f8Stomee
5145b5fca8f8Stomee /* Ignore the notification if the buffer is no longer allocated. */
5146b5fca8f8Stomee if (sp == NULL) {
5147b5fca8f8Stomee mutex_exit(&cp->cache_lock);
5148b5fca8f8Stomee return;
5149b5fca8f8Stomee }
5150b5fca8f8Stomee
5151b5fca8f8Stomee /* Ignore the notification if there's no reason to move the buffer. */
5152b5fca8f8Stomee if (avl_numnodes(&cp->cache_partial_slabs) > 1) {
5153b5fca8f8Stomee /*
5154b5fca8f8Stomee * So far the notification is not ignored. Ignore the
5155b5fca8f8Stomee * notification if the slab is not marked by an earlier refusal
5156b5fca8f8Stomee * to move a buffer.
5157b5fca8f8Stomee */
5158b5fca8f8Stomee if (!(sp->slab_flags & KMEM_SLAB_NOMOVE) &&
5159b5fca8f8Stomee (sp->slab_later_count == 0)) {
5160b5fca8f8Stomee mutex_exit(&cp->cache_lock);
5161b5fca8f8Stomee return;
5162b5fca8f8Stomee }
5163b5fca8f8Stomee
5164b5fca8f8Stomee kmem_slab_move_yes(cp, sp, buf);
5165b5fca8f8Stomee ASSERT(!(sp->slab_flags & KMEM_SLAB_MOVE_PENDING));
5166b5fca8f8Stomee sp->slab_flags |= KMEM_SLAB_MOVE_PENDING;
5167b5fca8f8Stomee mutex_exit(&cp->cache_lock);
5168b5fca8f8Stomee /* see kmem_move_buffers() about dropping the lock */
5169b5fca8f8Stomee (void) kmem_move_begin(cp, sp, buf, KMM_NOTIFY);
5170b5fca8f8Stomee mutex_enter(&cp->cache_lock);
5171b5fca8f8Stomee ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING);
5172b5fca8f8Stomee sp->slab_flags &= ~KMEM_SLAB_MOVE_PENDING;
5173b5fca8f8Stomee if (sp->slab_refcnt == 0) {
5174b5fca8f8Stomee list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
5175b5fca8f8Stomee list_remove(deadlist, sp);
5176686031edSTom Erickson
5177686031edSTom Erickson if (!avl_is_empty(
5178686031edSTom Erickson &cp->cache_defrag->kmd_moves_pending)) {
5179686031edSTom Erickson list_insert_head(deadlist, sp);
5180686031edSTom Erickson mutex_exit(&cp->cache_lock);
5181686031edSTom Erickson return;
5182686031edSTom Erickson }
5183686031edSTom Erickson
5184b5fca8f8Stomee cp->cache_defrag->kmd_deadcount--;
5185b5fca8f8Stomee cp->cache_slab_destroy++;
5186b5fca8f8Stomee mutex_exit(&cp->cache_lock);
5187b5fca8f8Stomee kmem_slab_destroy(cp, sp);
5188b5fca8f8Stomee return;
5189b5fca8f8Stomee }
5190b5fca8f8Stomee } else {
5191b5fca8f8Stomee kmem_slab_move_yes(cp, sp, buf);
5192b5fca8f8Stomee }
5193b5fca8f8Stomee mutex_exit(&cp->cache_lock);
5194b5fca8f8Stomee }
5195b5fca8f8Stomee
5196b5fca8f8Stomee void
kmem_cache_move_notify(kmem_cache_t * cp,void * buf)5197b5fca8f8Stomee kmem_cache_move_notify(kmem_cache_t *cp, void *buf)
5198b5fca8f8Stomee {
5199b5fca8f8Stomee kmem_move_notify_args_t *args;
5200b5fca8f8Stomee
5201b5fca8f8Stomee args = kmem_alloc(sizeof (kmem_move_notify_args_t), KM_NOSLEEP);
5202b5fca8f8Stomee if (args != NULL) {
5203b5fca8f8Stomee args->kmna_cache = cp;
5204b5fca8f8Stomee args->kmna_buf = buf;
5205eb697d4eStomee if (!taskq_dispatch(kmem_taskq,
5206b5fca8f8Stomee (task_func_t *)kmem_cache_move_notify_task, args,
5207eb697d4eStomee TQ_NOSLEEP))
5208eb697d4eStomee kmem_free(args, sizeof (kmem_move_notify_args_t));
5209b5fca8f8Stomee }
5210b5fca8f8Stomee }
5211b5fca8f8Stomee
5212b5fca8f8Stomee static void
kmem_cache_defrag(kmem_cache_t * cp)5213b5fca8f8Stomee kmem_cache_defrag(kmem_cache_t *cp)
5214b5fca8f8Stomee {
5215b5fca8f8Stomee size_t n;
5216b5fca8f8Stomee
5217b5fca8f8Stomee ASSERT(cp->cache_defrag != NULL);
5218b5fca8f8Stomee
5219b5fca8f8Stomee mutex_enter(&cp->cache_lock);
5220b5fca8f8Stomee n = avl_numnodes(&cp->cache_partial_slabs);
5221b5fca8f8Stomee if (n > 1) {
5222b5fca8f8Stomee /* kmem_move_buffers() drops and reacquires cache_lock */
5223686031edSTom Erickson cp->cache_defrag->kmd_defrags++;
5224686031edSTom Erickson (void) kmem_move_buffers(cp, n, 0, KMM_DESPERATE);
5225b5fca8f8Stomee }
5226b5fca8f8Stomee mutex_exit(&cp->cache_lock);
5227b5fca8f8Stomee }
5228b5fca8f8Stomee
5229b5fca8f8Stomee /* Is this cache above the fragmentation threshold? */
5230b5fca8f8Stomee static boolean_t
kmem_cache_frag_threshold(kmem_cache_t * cp,uint64_t nfree)5231b5fca8f8Stomee kmem_cache_frag_threshold(kmem_cache_t *cp, uint64_t nfree)
5232b5fca8f8Stomee {
5233b5fca8f8Stomee /*
5234b5fca8f8Stomee * nfree kmem_frag_numer
5235b5fca8f8Stomee * ------------------ > ---------------
5236b5fca8f8Stomee * cp->cache_buftotal kmem_frag_denom
5237b5fca8f8Stomee */
5238b5fca8f8Stomee return ((nfree * kmem_frag_denom) >
5239b5fca8f8Stomee (cp->cache_buftotal * kmem_frag_numer));
5240b5fca8f8Stomee }
5241b5fca8f8Stomee
5242b5fca8f8Stomee static boolean_t
kmem_cache_is_fragmented(kmem_cache_t * cp,boolean_t * doreap)5243b5fca8f8Stomee kmem_cache_is_fragmented(kmem_cache_t *cp, boolean_t *doreap)
5244b5fca8f8Stomee {
5245b5fca8f8Stomee boolean_t fragmented;
5246b5fca8f8Stomee uint64_t nfree;
5247b5fca8f8Stomee
5248b5fca8f8Stomee ASSERT(MUTEX_HELD(&cp->cache_lock));
5249b5fca8f8Stomee *doreap = B_FALSE;
5250b5fca8f8Stomee
5251686031edSTom Erickson if (kmem_move_fulltilt) {
5252686031edSTom Erickson if (avl_numnodes(&cp->cache_partial_slabs) > 1) {
5253686031edSTom Erickson return (B_TRUE);
5254686031edSTom Erickson }
5255686031edSTom Erickson } else {
5256686031edSTom Erickson if ((cp->cache_complete_slab_count + avl_numnodes(
5257686031edSTom Erickson &cp->cache_partial_slabs)) < kmem_frag_minslabs) {
5258b5fca8f8Stomee return (B_FALSE);
5259686031edSTom Erickson }
5260686031edSTom Erickson }
5261b5fca8f8Stomee
5262b5fca8f8Stomee nfree = cp->cache_bufslab;
5263686031edSTom Erickson fragmented = ((avl_numnodes(&cp->cache_partial_slabs) > 1) &&
5264686031edSTom Erickson kmem_cache_frag_threshold(cp, nfree));
5265686031edSTom Erickson
5266b5fca8f8Stomee /*
5267b5fca8f8Stomee * Free buffers in the magazine layer appear allocated from the point of
5268b5fca8f8Stomee * view of the slab layer. We want to know if the slab layer would
5269b5fca8f8Stomee * appear fragmented if we included free buffers from magazines that
5270b5fca8f8Stomee * have fallen out of the working set.
5271b5fca8f8Stomee */
5272b5fca8f8Stomee if (!fragmented) {
5273b5fca8f8Stomee long reap;
5274b5fca8f8Stomee
5275b5fca8f8Stomee mutex_enter(&cp->cache_depot_lock);
5276b5fca8f8Stomee reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
5277b5fca8f8Stomee reap = MIN(reap, cp->cache_full.ml_total);
5278b5fca8f8Stomee mutex_exit(&cp->cache_depot_lock);
5279b5fca8f8Stomee
5280b5fca8f8Stomee nfree += ((uint64_t)reap * cp->cache_magtype->mt_magsize);
5281b5fca8f8Stomee if (kmem_cache_frag_threshold(cp, nfree)) {
5282b5fca8f8Stomee *doreap = B_TRUE;
5283b5fca8f8Stomee }
5284b5fca8f8Stomee }
5285b5fca8f8Stomee
5286b5fca8f8Stomee return (fragmented);
5287b5fca8f8Stomee }
5288b5fca8f8Stomee
5289b5fca8f8Stomee /* Called periodically from kmem_taskq */
5290b5fca8f8Stomee static void
kmem_cache_scan(kmem_cache_t * cp)5291b5fca8f8Stomee kmem_cache_scan(kmem_cache_t *cp)
5292b5fca8f8Stomee {
5293b5fca8f8Stomee boolean_t reap = B_FALSE;
5294686031edSTom Erickson kmem_defrag_t *kmd;
5295b5fca8f8Stomee
5296b5fca8f8Stomee ASSERT(taskq_member(kmem_taskq, curthread));
5297b5fca8f8Stomee
5298b5fca8f8Stomee mutex_enter(&cp->cache_lock);
5299b5fca8f8Stomee
5300686031edSTom Erickson kmd = cp->cache_defrag;
5301686031edSTom Erickson if (kmd->kmd_consolidate > 0) {
5302686031edSTom Erickson kmd->kmd_consolidate--;
5303686031edSTom Erickson mutex_exit(&cp->cache_lock);
5304686031edSTom Erickson kmem_cache_reap(cp);
5305686031edSTom Erickson return;
5306686031edSTom Erickson }
5307686031edSTom Erickson
5308b5fca8f8Stomee if (kmem_cache_is_fragmented(cp, &reap)) {
5309b5fca8f8Stomee size_t slabs_found;
5310b5fca8f8Stomee
5311b5fca8f8Stomee /*
5312b5fca8f8Stomee * Consolidate reclaimable slabs from the end of the partial
5313b5fca8f8Stomee * slab list (scan at most kmem_reclaim_scan_range slabs to find
5314b5fca8f8Stomee * reclaimable slabs). Keep track of how many candidate slabs we
5315b5fca8f8Stomee * looked for and how many we actually found so we can adjust
5316b5fca8f8Stomee * the definition of a candidate slab if we're having trouble
5317b5fca8f8Stomee * finding them.
5318b5fca8f8Stomee *
5319b5fca8f8Stomee * kmem_move_buffers() drops and reacquires cache_lock.
5320b5fca8f8Stomee */
5321686031edSTom Erickson kmd->kmd_scans++;
5322b5fca8f8Stomee slabs_found = kmem_move_buffers(cp, kmem_reclaim_scan_range,
5323b5fca8f8Stomee kmem_reclaim_max_slabs, 0);
5324b5fca8f8Stomee if (slabs_found >= 0) {
5325b5fca8f8Stomee kmd->kmd_slabs_sought += kmem_reclaim_max_slabs;
5326b5fca8f8Stomee kmd->kmd_slabs_found += slabs_found;
5327b5fca8f8Stomee }
5328b5fca8f8Stomee
5329686031edSTom Erickson if (++kmd->kmd_tries >= kmem_reclaim_scan_range) {
5330686031edSTom Erickson kmd->kmd_tries = 0;
5331b5fca8f8Stomee
5332b5fca8f8Stomee /*
5333b5fca8f8Stomee * If we had difficulty finding candidate slabs in
5334b5fca8f8Stomee * previous scans, adjust the threshold so that
5335b5fca8f8Stomee * candidates are easier to find.
5336b5fca8f8Stomee */
5337b5fca8f8Stomee if (kmd->kmd_slabs_found == kmd->kmd_slabs_sought) {
5338b5fca8f8Stomee kmem_adjust_reclaim_threshold(kmd, -1);
5339b5fca8f8Stomee } else if ((kmd->kmd_slabs_found * 2) <
5340b5fca8f8Stomee kmd->kmd_slabs_sought) {
5341b5fca8f8Stomee kmem_adjust_reclaim_threshold(kmd, 1);
5342b5fca8f8Stomee }
5343b5fca8f8Stomee kmd->kmd_slabs_sought = 0;
5344b5fca8f8Stomee kmd->kmd_slabs_found = 0;
5345b5fca8f8Stomee }
5346b5fca8f8Stomee } else {
5347b5fca8f8Stomee kmem_reset_reclaim_threshold(cp->cache_defrag);
5348b5fca8f8Stomee #ifdef DEBUG
5349686031edSTom Erickson if (!avl_is_empty(&cp->cache_partial_slabs)) {
5350b5fca8f8Stomee /*
5351b5fca8f8Stomee * In a debug kernel we want the consolidator to
5352b5fca8f8Stomee * run occasionally even when there is plenty of
5353b5fca8f8Stomee * memory.
5354b5fca8f8Stomee */
5355686031edSTom Erickson uint16_t debug_rand;
5356b5fca8f8Stomee
5357686031edSTom Erickson (void) random_get_bytes((uint8_t *)&debug_rand, 2);
5358b5fca8f8Stomee if (!kmem_move_noreap &&
5359b5fca8f8Stomee ((debug_rand % kmem_mtb_reap) == 0)) {
5360b5fca8f8Stomee mutex_exit(&cp->cache_lock);
5361686031edSTom Erickson kmem_cache_reap(cp);
5362b5fca8f8Stomee return;
5363b5fca8f8Stomee } else if ((debug_rand % kmem_mtb_move) == 0) {
5364686031edSTom Erickson kmd->kmd_scans++;
5365b5fca8f8Stomee (void) kmem_move_buffers(cp,
5366686031edSTom Erickson kmem_reclaim_scan_range, 1, KMM_DEBUG);
5367b5fca8f8Stomee }
5368b5fca8f8Stomee }
5369b5fca8f8Stomee #endif /* DEBUG */
5370b5fca8f8Stomee }
5371b5fca8f8Stomee
5372b5fca8f8Stomee mutex_exit(&cp->cache_lock);
5373b5fca8f8Stomee
5374aa7175abSBryan Cantrill if (reap)
5375b5fca8f8Stomee kmem_depot_ws_reap(cp);
5376b5fca8f8Stomee }
5377