1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 1994, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
25 * Copyright 2018, Joyent, Inc.
26 * Copyright 2023 Oxide Computer Company
27 */
28
29 /*
30 * Kernel memory allocator, as described in the following two papers and a
31 * statement about the consolidator:
32 *
33 * Jeff Bonwick,
34 * The Slab Allocator: An Object-Caching Kernel Memory Allocator.
35 * Proceedings of the Summer 1994 Usenix Conference.
36 * Available as /shared/sac/PSARC/1994/028/materials/kmem.pdf.
37 *
38 * Jeff Bonwick and Jonathan Adams,
39 * Magazines and vmem: Extending the Slab Allocator to Many CPUs and
40 * Arbitrary Resources.
41 * Proceedings of the 2001 Usenix Conference.
42 * Available as /shared/sac/PSARC/2000/550/materials/vmem.pdf.
43 *
44 * kmem Slab Consolidator Big Theory Statement:
45 *
46 * 1. Motivation
47 *
48 * As stated in Bonwick94, slabs provide the following advantages over other
49 * allocation structures in terms of memory fragmentation:
50 *
51 * - Internal fragmentation (per-buffer wasted space) is minimal.
52 * - Severe external fragmentation (unused buffers on the free list) is
53 * unlikely.
54 *
55 * Segregating objects by size eliminates one source of external fragmentation,
56 * and according to Bonwick:
57 *
58 * The other reason that slabs reduce external fragmentation is that all
59 * objects in a slab are of the same type, so they have the same lifetime
60 * distribution. The resulting segregation of short-lived and long-lived
61 * objects at slab granularity reduces the likelihood of an entire page being
62 * held hostage due to a single long-lived allocation [Barrett93, Hanson90].
63 *
64 * While unlikely, severe external fragmentation remains possible. Clients that
65 * allocate both short- and long-lived objects from the same cache cannot
66 * anticipate the distribution of long-lived objects within the allocator's slab
67 * implementation. Even a small percentage of long-lived objects distributed
68 * randomly across many slabs can lead to a worst case scenario where the client
69 * frees the majority of its objects and the system gets back almost none of the
70 * slabs. Despite the client doing what it reasonably can to help the system
71 * reclaim memory, the allocator cannot shake free enough slabs because of
72 * lonely allocations stubbornly hanging on. Although the allocator is in a
73 * position to diagnose the fragmentation, there is nothing that the allocator
74 * by itself can do about it. It only takes a single allocated object to prevent
75 * an entire slab from being reclaimed, and any object handed out by
76 * kmem_cache_alloc() is by definition in the client's control. Conversely,
77 * although the client is in a position to move a long-lived object, it has no
78 * way of knowing if the object is causing fragmentation, and if so, where to
79 * move it. A solution necessarily requires further cooperation between the
80 * allocator and the client.
81 *
82 * 2. Move Callback
83 *
84 * The kmem slab consolidator therefore adds a move callback to the
85 * allocator/client interface, improving worst-case external fragmentation in
86 * kmem caches that supply a function to move objects from one memory location
87 * to another. In a situation of low memory kmem attempts to consolidate all of
88 * a cache's slabs at once; otherwise it works slowly to bring external
89 * fragmentation within the 1/8 limit guaranteed for internal fragmentation,
90 * thereby helping to avoid a low memory situation in the future.
91 *
92 * The callback has the following signature:
93 *
94 * kmem_cbrc_t move(void *old, void *new, size_t size, void *user_arg)
95 *
96 * It supplies the kmem client with two addresses: the allocated object that
97 * kmem wants to move and a buffer selected by kmem for the client to use as the
98 * copy destination. The callback is kmem's way of saying "Please get off of
99 * this buffer and use this one instead." kmem knows where it wants to move the
100 * object in order to best reduce fragmentation. All the client needs to know
101 * about the second argument (void *new) is that it is an allocated, constructed
102 * object ready to take the contents of the old object. When the move function
103 * is called, the system is likely to be low on memory, and the new object
104 * spares the client from having to worry about allocating memory for the
105 * requested move. The third argument supplies the size of the object, in case a
106 * single move function handles multiple caches whose objects differ only in
107 * size (such as zio_buf_512, zio_buf_1024, etc). Finally, the same optional
108 * user argument passed to the constructor, destructor, and reclaim functions is
109 * also passed to the move callback.
110 *
111 * 2.1 Setting the Move Callback
112 *
113 * The client sets the move callback after creating the cache and before
114 * allocating from it:
115 *
116 * object_cache = kmem_cache_create(...);
117 * kmem_cache_set_move(object_cache, object_move);
118 *
119 * 2.2 Move Callback Return Values
120 *
121 * Only the client knows about its own data and when is a good time to move it.
122 * The client is cooperating with kmem to return unused memory to the system,
123 * and kmem respectfully accepts this help at the client's convenience. When
124 * asked to move an object, the client can respond with any of the following:
125 *
126 * typedef enum kmem_cbrc {
127 * KMEM_CBRC_YES,
128 * KMEM_CBRC_NO,
129 * KMEM_CBRC_LATER,
130 * KMEM_CBRC_DONT_NEED,
131 * KMEM_CBRC_DONT_KNOW
132 * } kmem_cbrc_t;
133 *
134 * The client must not explicitly kmem_cache_free() either of the objects passed
135 * to the callback, since kmem wants to free them directly to the slab layer
136 * (bypassing the per-CPU magazine layer). The response tells kmem which of the
137 * objects to free:
138 *
139 * YES: (Did it) The client moved the object, so kmem frees the old one.
140 * NO: (Never) The client refused, so kmem frees the new object (the
141 * unused copy destination). kmem also marks the slab of the old
142 * object so as not to bother the client with further callbacks for
143 * that object as long as the slab remains on the partial slab list.
144 * (The system won't be getting the slab back as long as the
145 * immovable object holds it hostage, so there's no point in moving
146 * any of its objects.)
147 * LATER: The client is using the object and cannot move it now, so kmem
148 * frees the new object (the unused copy destination). kmem still
149 * attempts to move other objects off the slab, since it expects to
150 * succeed in clearing the slab in a later callback. The client
151 * should use LATER instead of NO if the object is likely to become
152 * movable very soon.
153 * DONT_NEED: The client no longer needs the object, so kmem frees the old along
154 * with the new object (the unused copy destination). This response
155 * is the client's opportunity to be a model citizen and give back as
156 * much as it can.
157 * DONT_KNOW: The client does not know about the object because
158 * a) the client has just allocated the object and not yet put it
159 * wherever it expects to find known objects
160 * b) the client has removed the object from wherever it expects to
161 * find known objects and is about to free it, or
162 * c) the client has freed the object.
163 * In all these cases (a, b, and c) kmem frees the new object (the
164 * unused copy destination). In the first case, the object is in
165 * use and the correct action is that for LATER; in the latter two
166 * cases, we know that the object is either freed or about to be
167 * freed, in which case it is either already in a magazine or about
168 * to be in one. In these cases, we know that the object will either
169 * be reallocated and reused, or it will end up in a full magazine
170 * that will be reaped (thereby liberating the slab). Because it
171 * is prohibitively expensive to differentiate these cases, and
172 * because the defrag code is executed when we're low on memory
173 * (thereby biasing the system to reclaim full magazines) we treat
174 * all DONT_KNOW cases as LATER and rely on cache reaping to
175 * generally clean up full magazines. While we take the same action
176 * for these cases, we maintain their semantic distinction: if
177 * defragmentation is not occurring, it is useful to know if this
178 * is due to objects in use (LATER) or objects in an unknown state
179 * of transition (DONT_KNOW).
180 *
181 * 2.3 Object States
182 *
183 * Neither kmem nor the client can be assumed to know the object's whereabouts
184 * at the time of the callback. An object belonging to a kmem cache may be in
185 * any of the following states:
186 *
187 * 1. Uninitialized on the slab
188 * 2. Allocated from the slab but not constructed (still uninitialized)
189 * 3. Allocated from the slab, constructed, but not yet ready for business
190 * (not in a valid state for the move callback)
191 * 4. In use (valid and known to the client)
192 * 5. About to be freed (no longer in a valid state for the move callback)
193 * 6. Freed to a magazine (still constructed)
194 * 7. Allocated from a magazine, not yet ready for business (not in a valid
195 * state for the move callback), and about to return to state #4
196 * 8. Deconstructed on a magazine that is about to be freed
197 * 9. Freed to the slab
198 *
199 * Since the move callback may be called at any time while the object is in any
200 * of the above states (except state #1), the client needs a safe way to
201 * determine whether or not it knows about the object. Specifically, the client
202 * needs to know whether or not the object is in state #4, the only state in
203 * which a move is valid. If the object is in any other state, the client should
204 * immediately return KMEM_CBRC_DONT_KNOW, since it is unsafe to access any of
205 * the object's fields.
206 *
207 * Note that although an object may be in state #4 when kmem initiates the move
208 * request, the object may no longer be in that state by the time kmem actually
209 * calls the move function. Not only does the client free objects
210 * asynchronously, kmem itself puts move requests on a queue where thay are
211 * pending until kmem processes them from another context. Also, objects freed
212 * to a magazine appear allocated from the point of view of the slab layer, so
213 * kmem may even initiate requests for objects in a state other than state #4.
214 *
215 * 2.3.1 Magazine Layer
216 *
217 * An important insight revealed by the states listed above is that the magazine
218 * layer is populated only by kmem_cache_free(). Magazines of constructed
219 * objects are never populated directly from the slab layer (which contains raw,
220 * unconstructed objects). Whenever an allocation request cannot be satisfied
221 * from the magazine layer, the magazines are bypassed and the request is
222 * satisfied from the slab layer (creating a new slab if necessary). kmem calls
223 * the object constructor only when allocating from the slab layer, and only in
224 * response to kmem_cache_alloc() or to prepare the destination buffer passed in
225 * the move callback. kmem does not preconstruct objects in anticipation of
226 * kmem_cache_alloc().
227 *
228 * 2.3.2 Object Constructor and Destructor
229 *
230 * If the client supplies a destructor, it must be valid to call the destructor
231 * on a newly created object (immediately after the constructor).
232 *
233 * 2.4 Recognizing Known Objects
234 *
235 * There is a simple test to determine safely whether or not the client knows
236 * about a given object in the move callback. It relies on the fact that kmem
237 * guarantees that the object of the move callback has only been touched by the
238 * client itself or else by kmem. kmem does this by ensuring that none of the
239 * cache's slabs are freed to the virtual memory (VM) subsystem while a move
240 * callback is pending. When the last object on a slab is freed, if there is a
241 * pending move, kmem puts the slab on a per-cache dead list and defers freeing
242 * slabs on that list until all pending callbacks are completed. That way,
243 * clients can be certain that the object of a move callback is in one of the
244 * states listed above, making it possible to distinguish known objects (in
245 * state #4) using the two low order bits of any pointer member (with the
246 * exception of 'char *' or 'short *' which may not be 4-byte aligned on some
247 * platforms).
248 *
249 * The test works as long as the client always transitions objects from state #4
250 * (known, in use) to state #5 (about to be freed, invalid) by setting the low
251 * order bit of the client-designated pointer member. Since kmem only writes
252 * invalid memory patterns, such as 0xbaddcafe to uninitialized memory and
253 * 0xdeadbeef to freed memory, any scribbling on the object done by kmem is
254 * guaranteed to set at least one of the two low order bits. Therefore, given an
255 * object with a back pointer to a 'container_t *o_container', the client can
256 * test
257 *
258 * container_t *container = object->o_container;
259 * if ((uintptr_t)container & 0x3) {
260 * return (KMEM_CBRC_DONT_KNOW);
261 * }
262 *
263 * Typically, an object will have a pointer to some structure with a list or
264 * hash where objects from the cache are kept while in use. Assuming that the
265 * client has some way of knowing that the container structure is valid and will
266 * not go away during the move, and assuming that the structure includes a lock
267 * to protect whatever collection is used, then the client would continue as
268 * follows:
269 *
270 * // Ensure that the container structure does not go away.
271 * if (container_hold(container) == 0) {
272 * return (KMEM_CBRC_DONT_KNOW);
273 * }
274 * mutex_enter(&container->c_objects_lock);
275 * if (container != object->o_container) {
276 * mutex_exit(&container->c_objects_lock);
277 * container_rele(container);
278 * return (KMEM_CBRC_DONT_KNOW);
279 * }
280 *
281 * At this point the client knows that the object cannot be freed as long as
282 * c_objects_lock is held. Note that after acquiring the lock, the client must
283 * recheck the o_container pointer in case the object was removed just before
284 * acquiring the lock.
285 *
286 * When the client is about to free an object, it must first remove that object
287 * from the list, hash, or other structure where it is kept. At that time, to
288 * mark the object so it can be distinguished from the remaining, known objects,
289 * the client sets the designated low order bit:
290 *
291 * mutex_enter(&container->c_objects_lock);
292 * object->o_container = (void *)((uintptr_t)object->o_container | 0x1);
293 * list_remove(&container->c_objects, object);
294 * mutex_exit(&container->c_objects_lock);
295 *
296 * In the common case, the object is freed to the magazine layer, where it may
297 * be reused on a subsequent allocation without the overhead of calling the
298 * constructor. While in the magazine it appears allocated from the point of
299 * view of the slab layer, making it a candidate for the move callback. Most
300 * objects unrecognized by the client in the move callback fall into this
301 * category and are cheaply distinguished from known objects by the test
302 * described earlier. Because searching magazines is prohibitively expensive
303 * for kmem, clients that do not mark freed objects (and therefore return
304 * KMEM_CBRC_DONT_KNOW for large numbers of objects) may find defragmentation
305 * efficacy reduced.
306 *
307 * Invalidating the designated pointer member before freeing the object marks
308 * the object to be avoided in the callback, and conversely, assigning a valid
309 * value to the designated pointer member after allocating the object makes the
310 * object fair game for the callback:
311 *
312 * ... allocate object ...
313 * ... set any initial state not set by the constructor ...
314 *
315 * mutex_enter(&container->c_objects_lock);
316 * list_insert_tail(&container->c_objects, object);
317 * membar_producer();
318 * object->o_container = container;
319 * mutex_exit(&container->c_objects_lock);
320 *
321 * Note that everything else must be valid before setting o_container makes the
322 * object fair game for the move callback. The membar_producer() call ensures
323 * that all the object's state is written to memory before setting the pointer
324 * that transitions the object from state #3 or #7 (allocated, constructed, not
325 * yet in use) to state #4 (in use, valid). That's important because the move
326 * function has to check the validity of the pointer before it can safely
327 * acquire the lock protecting the collection where it expects to find known
328 * objects.
329 *
330 * This method of distinguishing known objects observes the usual symmetry:
331 * invalidating the designated pointer is the first thing the client does before
332 * freeing the object, and setting the designated pointer is the last thing the
333 * client does after allocating the object. Of course, the client is not
334 * required to use this method. Fundamentally, how the client recognizes known
335 * objects is completely up to the client, but this method is recommended as an
336 * efficient and safe way to take advantage of the guarantees made by kmem. If
337 * the entire object is arbitrary data without any markable bits from a suitable
338 * pointer member, then the client must find some other method, such as
339 * searching a hash table of known objects.
340 *
341 * 2.5 Preventing Objects From Moving
342 *
343 * Besides a way to distinguish known objects, the other thing that the client
344 * needs is a strategy to ensure that an object will not move while the client
345 * is actively using it. The details of satisfying this requirement tend to be
346 * highly cache-specific. It might seem that the same rules that let a client
347 * remove an object safely should also decide when an object can be moved
348 * safely. However, any object state that makes a removal attempt invalid is
349 * likely to be long-lasting for objects that the client does not expect to
350 * remove. kmem knows nothing about the object state and is equally likely (from
351 * the client's point of view) to request a move for any object in the cache,
352 * whether prepared for removal or not. Even a low percentage of objects stuck
353 * in place by unremovability will defeat the consolidator if the stuck objects
354 * are the same long-lived allocations likely to hold slabs hostage.
355 * Fundamentally, the consolidator is not aimed at common cases. Severe external
356 * fragmentation is a worst case scenario manifested as sparsely allocated
357 * slabs, by definition a low percentage of the cache's objects. When deciding
358 * what makes an object movable, keep in mind the goal of the consolidator: to
359 * bring worst-case external fragmentation within the limits guaranteed for
360 * internal fragmentation. Removability is a poor criterion if it is likely to
361 * exclude more than an insignificant percentage of objects for long periods of
362 * time.
363 *
364 * A tricky general solution exists, and it has the advantage of letting you
365 * move any object at almost any moment, practically eliminating the likelihood
366 * that an object can hold a slab hostage. However, if there is a cache-specific
367 * way to ensure that an object is not actively in use in the vast majority of
368 * cases, a simpler solution that leverages this cache-specific knowledge is
369 * preferred.
370 *
371 * 2.5.1 Cache-Specific Solution
372 *
373 * As an example of a cache-specific solution, the ZFS znode cache takes
374 * advantage of the fact that the vast majority of znodes are only being
375 * referenced from the DNLC. (A typical case might be a few hundred in active
376 * use and a hundred thousand in the DNLC.) In the move callback, after the ZFS
377 * client has established that it recognizes the znode and can access its fields
378 * safely (using the method described earlier), it then tests whether the znode
379 * is referenced by anything other than the DNLC. If so, it assumes that the
380 * znode may be in active use and is unsafe to move, so it drops its locks and
381 * returns KMEM_CBRC_LATER. The advantage of this strategy is that everywhere
382 * else znodes are used, no change is needed to protect against the possibility
383 * of the znode moving. The disadvantage is that it remains possible for an
384 * application to hold a znode slab hostage with an open file descriptor.
385 * However, this case ought to be rare and the consolidator has a way to deal
386 * with it: If the client responds KMEM_CBRC_LATER repeatedly for the same
387 * object, kmem eventually stops believing it and treats the slab as if the
388 * client had responded KMEM_CBRC_NO. Having marked the hostage slab, kmem can
389 * then focus on getting it off of the partial slab list by allocating rather
390 * than freeing all of its objects. (Either way of getting a slab off the
391 * free list reduces fragmentation.)
392 *
393 * 2.5.2 General Solution
394 *
395 * The general solution, on the other hand, requires an explicit hold everywhere
396 * the object is used to prevent it from moving. To keep the client locking
397 * strategy as uncomplicated as possible, kmem guarantees the simplifying
398 * assumption that move callbacks are sequential, even across multiple caches.
399 * Internally, a global queue processed by a single thread supports all caches
400 * implementing the callback function. No matter how many caches supply a move
401 * function, the consolidator never moves more than one object at a time, so the
402 * client does not have to worry about tricky lock ordering involving several
403 * related objects from different kmem caches.
404 *
405 * The general solution implements the explicit hold as a read-write lock, which
406 * allows multiple readers to access an object from the cache simultaneously
407 * while a single writer is excluded from moving it. A single rwlock for the
408 * entire cache would lock out all threads from using any of the cache's objects
409 * even though only a single object is being moved, so to reduce contention,
410 * the client can fan out the single rwlock into an array of rwlocks hashed by
411 * the object address, making it probable that moving one object will not
412 * prevent other threads from using a different object. The rwlock cannot be a
413 * member of the object itself, because the possibility of the object moving
414 * makes it unsafe to access any of the object's fields until the lock is
415 * acquired.
416 *
417 * Assuming a small, fixed number of locks, it's possible that multiple objects
418 * will hash to the same lock. A thread that needs to use multiple objects in
419 * the same function may acquire the same lock multiple times. Since rwlocks are
420 * reentrant for readers, and since there is never more than a single writer at
421 * a time (assuming that the client acquires the lock as a writer only when
422 * moving an object inside the callback), there would seem to be no problem.
423 * However, a client locking multiple objects in the same function must handle
424 * one case of potential deadlock: Assume that thread A needs to prevent both
425 * object 1 and object 2 from moving, and thread B, the callback, meanwhile
426 * tries to move object 3. It's possible, if objects 1, 2, and 3 all hash to the
427 * same lock, that thread A will acquire the lock for object 1 as a reader
428 * before thread B sets the lock's write-wanted bit, preventing thread A from
429 * reacquiring the lock for object 2 as a reader. Unable to make forward
430 * progress, thread A will never release the lock for object 1, resulting in
431 * deadlock.
432 *
433 * There are two ways of avoiding the deadlock just described. The first is to
434 * use rw_tryenter() rather than rw_enter() in the callback function when
435 * attempting to acquire the lock as a writer. If tryenter discovers that the
436 * same object (or another object hashed to the same lock) is already in use, it
437 * aborts the callback and returns KMEM_CBRC_LATER. The second way is to use
438 * rprwlock_t (declared in common/fs/zfs/sys/rprwlock.h) instead of rwlock_t,
439 * since it allows a thread to acquire the lock as a reader in spite of a
440 * waiting writer. This second approach insists on moving the object now, no
441 * matter how many readers the move function must wait for in order to do so,
442 * and could delay the completion of the callback indefinitely (blocking
443 * callbacks to other clients). In practice, a less insistent callback using
444 * rw_tryenter() returns KMEM_CBRC_LATER infrequently enough that there seems
445 * little reason to use anything else.
446 *
447 * Avoiding deadlock is not the only problem that an implementation using an
448 * explicit hold needs to solve. Locking the object in the first place (to
449 * prevent it from moving) remains a problem, since the object could move
450 * between the time you obtain a pointer to the object and the time you acquire
451 * the rwlock hashed to that pointer value. Therefore the client needs to
452 * recheck the value of the pointer after acquiring the lock, drop the lock if
453 * the value has changed, and try again. This requires a level of indirection:
454 * something that points to the object rather than the object itself, that the
455 * client can access safely while attempting to acquire the lock. (The object
456 * itself cannot be referenced safely because it can move at any time.)
457 * The following lock-acquisition function takes whatever is safe to reference
458 * (arg), follows its pointer to the object (using function f), and tries as
459 * often as necessary to acquire the hashed lock and verify that the object
460 * still has not moved:
461 *
462 * object_t *
463 * object_hold(object_f f, void *arg)
464 * {
465 * object_t *op;
466 *
467 * op = f(arg);
468 * if (op == NULL) {
469 * return (NULL);
470 * }
471 *
472 * rw_enter(OBJECT_RWLOCK(op), RW_READER);
473 * while (op != f(arg)) {
474 * rw_exit(OBJECT_RWLOCK(op));
475 * op = f(arg);
476 * if (op == NULL) {
477 * break;
478 * }
479 * rw_enter(OBJECT_RWLOCK(op), RW_READER);
480 * }
481 *
482 * return (op);
483 * }
484 *
485 * The OBJECT_RWLOCK macro hashes the object address to obtain the rwlock. The
486 * lock reacquisition loop, while necessary, almost never executes. The function
487 * pointer f (used to obtain the object pointer from arg) has the following type
488 * definition:
489 *
490 * typedef object_t *(*object_f)(void *arg);
491 *
492 * An object_f implementation is likely to be as simple as accessing a structure
493 * member:
494 *
495 * object_t *
496 * s_object(void *arg)
497 * {
498 * something_t *sp = arg;
499 * return (sp->s_object);
500 * }
501 *
502 * The flexibility of a function pointer allows the path to the object to be
503 * arbitrarily complex and also supports the notion that depending on where you
504 * are using the object, you may need to get it from someplace different.
505 *
506 * The function that releases the explicit hold is simpler because it does not
507 * have to worry about the object moving:
508 *
509 * void
510 * object_rele(object_t *op)
511 * {
512 * rw_exit(OBJECT_RWLOCK(op));
513 * }
514 *
515 * The caller is spared these details so that obtaining and releasing an
516 * explicit hold feels like a simple mutex_enter()/mutex_exit() pair. The caller
517 * of object_hold() only needs to know that the returned object pointer is valid
518 * if not NULL and that the object will not move until released.
519 *
520 * Although object_hold() prevents an object from moving, it does not prevent it
521 * from being freed. The caller must take measures before calling object_hold()
522 * (afterwards is too late) to ensure that the held object cannot be freed. The
523 * caller must do so without accessing the unsafe object reference, so any lock
524 * or reference count used to ensure the continued existence of the object must
525 * live outside the object itself.
526 *
527 * Obtaining a new object is a special case where an explicit hold is impossible
528 * for the caller. Any function that returns a newly allocated object (either as
529 * a return value, or as an in-out paramter) must return it already held; after
530 * the caller gets it is too late, since the object cannot be safely accessed
531 * without the level of indirection described earlier. The following
532 * object_alloc() example uses the same code shown earlier to transition a new
533 * object into the state of being recognized (by the client) as a known object.
534 * The function must acquire the hold (rw_enter) before that state transition
535 * makes the object movable:
536 *
537 * static object_t *
538 * object_alloc(container_t *container)
539 * {
540 * object_t *object = kmem_cache_alloc(object_cache, 0);
541 * ... set any initial state not set by the constructor ...
542 * rw_enter(OBJECT_RWLOCK(object), RW_READER);
543 * mutex_enter(&container->c_objects_lock);
544 * list_insert_tail(&container->c_objects, object);
545 * membar_producer();
546 * object->o_container = container;
547 * mutex_exit(&container->c_objects_lock);
548 * return (object);
549 * }
550 *
551 * Functions that implicitly acquire an object hold (any function that calls
552 * object_alloc() to supply an object for the caller) need to be carefully noted
553 * so that the matching object_rele() is not neglected. Otherwise, leaked holds
554 * prevent all objects hashed to the affected rwlocks from ever being moved.
555 *
556 * The pointer to a held object can be hashed to the holding rwlock even after
557 * the object has been freed. Although it is possible to release the hold
558 * after freeing the object, you may decide to release the hold implicitly in
559 * whatever function frees the object, so as to release the hold as soon as
560 * possible, and for the sake of symmetry with the function that implicitly
561 * acquires the hold when it allocates the object. Here, object_free() releases
562 * the hold acquired by object_alloc(). Its implicit object_rele() forms a
563 * matching pair with object_hold():
564 *
565 * void
566 * object_free(object_t *object)
567 * {
568 * container_t *container;
569 *
570 * ASSERT(object_held(object));
571 * container = object->o_container;
572 * mutex_enter(&container->c_objects_lock);
573 * object->o_container =
574 * (void *)((uintptr_t)object->o_container | 0x1);
575 * list_remove(&container->c_objects, object);
576 * mutex_exit(&container->c_objects_lock);
577 * object_rele(object);
578 * kmem_cache_free(object_cache, object);
579 * }
580 *
581 * Note that object_free() cannot safely accept an object pointer as an argument
582 * unless the object is already held. Any function that calls object_free()
583 * needs to be carefully noted since it similarly forms a matching pair with
584 * object_hold().
585 *
586 * To complete the picture, the following callback function implements the
587 * general solution by moving objects only if they are currently unheld:
588 *
589 * static kmem_cbrc_t
590 * object_move(void *buf, void *newbuf, size_t size, void *arg)
591 * {
592 * object_t *op = buf, *np = newbuf;
593 * container_t *container;
594 *
595 * container = op->o_container;
596 * if ((uintptr_t)container & 0x3) {
597 * return (KMEM_CBRC_DONT_KNOW);
598 * }
599 *
600 * // Ensure that the container structure does not go away.
601 * if (container_hold(container) == 0) {
602 * return (KMEM_CBRC_DONT_KNOW);
603 * }
604 *
605 * mutex_enter(&container->c_objects_lock);
606 * if (container != op->o_container) {
607 * mutex_exit(&container->c_objects_lock);
608 * container_rele(container);
609 * return (KMEM_CBRC_DONT_KNOW);
610 * }
611 *
612 * if (rw_tryenter(OBJECT_RWLOCK(op), RW_WRITER) == 0) {
613 * mutex_exit(&container->c_objects_lock);
614 * container_rele(container);
615 * return (KMEM_CBRC_LATER);
616 * }
617 *
618 * object_move_impl(op, np); // critical section
619 * rw_exit(OBJECT_RWLOCK(op));
620 *
621 * op->o_container = (void *)((uintptr_t)op->o_container | 0x1);
622 * list_link_replace(&op->o_link_node, &np->o_link_node);
623 * mutex_exit(&container->c_objects_lock);
624 * container_rele(container);
625 * return (KMEM_CBRC_YES);
626 * }
627 *
628 * Note that object_move() must invalidate the designated o_container pointer of
629 * the old object in the same way that object_free() does, since kmem will free
630 * the object in response to the KMEM_CBRC_YES return value.
631 *
632 * The lock order in object_move() differs from object_alloc(), which locks
633 * OBJECT_RWLOCK first and &container->c_objects_lock second, but as long as the
634 * callback uses rw_tryenter() (preventing the deadlock described earlier), it's
635 * not a problem. Holding the lock on the object list in the example above
636 * through the entire callback not only prevents the object from going away, it
637 * also allows you to lock the list elsewhere and know that none of its elements
638 * will move during iteration.
639 *
640 * Adding an explicit hold everywhere an object from the cache is used is tricky
641 * and involves much more change to client code than a cache-specific solution
642 * that leverages existing state to decide whether or not an object is
643 * movable. However, this approach has the advantage that no object remains
644 * immovable for any significant length of time, making it extremely unlikely
645 * that long-lived allocations can continue holding slabs hostage; and it works
646 * for any cache.
647 *
648 * 3. Consolidator Implementation
649 *
650 * Once the client supplies a move function that a) recognizes known objects and
651 * b) avoids moving objects that are actively in use, the remaining work is up
652 * to the consolidator to decide which objects to move and when to issue
653 * callbacks.
654 *
655 * The consolidator relies on the fact that a cache's slabs are ordered by
656 * usage. Each slab has a fixed number of objects. Depending on the slab's
657 * "color" (the offset of the first object from the beginning of the slab;
658 * offsets are staggered to mitigate false sharing of cache lines) it is either
659 * the maximum number of objects per slab determined at cache creation time or
660 * else the number closest to the maximum that fits within the space remaining
661 * after the initial offset. A completely allocated slab may contribute some
662 * internal fragmentation (per-slab overhead) but no external fragmentation, so
663 * it is of no interest to the consolidator. At the other extreme, slabs whose
664 * objects have all been freed to the slab are released to the virtual memory
665 * (VM) subsystem (objects freed to magazines are still allocated as far as the
666 * slab is concerned). External fragmentation exists when there are slabs
667 * somewhere between these extremes. A partial slab has at least one but not all
668 * of its objects allocated. The more partial slabs, and the fewer allocated
669 * objects on each of them, the higher the fragmentation. Hence the
670 * consolidator's overall strategy is to reduce the number of partial slabs by
671 * moving allocated objects from the least allocated slabs to the most allocated
672 * slabs.
673 *
674 * Partial slabs are kept in an AVL tree ordered by usage. Completely allocated
675 * slabs are kept separately in an unordered list. Since the majority of slabs
676 * tend to be completely allocated (a typical unfragmented cache may have
677 * thousands of complete slabs and only a single partial slab), separating
678 * complete slabs improves the efficiency of partial slab ordering, since the
679 * complete slabs do not affect the depth or balance of the AVL tree. This
680 * ordered sequence of partial slabs acts as a "free list" supplying objects for
681 * allocation requests.
682 *
683 * Objects are always allocated from the first partial slab in the free list,
684 * where the allocation is most likely to eliminate a partial slab (by
685 * completely allocating it). Conversely, when a single object from a completely
686 * allocated slab is freed to the slab, that slab is added to the front of the
687 * free list. Since most free list activity involves highly allocated slabs
688 * coming and going at the front of the list, slabs tend naturally toward the
689 * ideal order: highly allocated at the front, sparsely allocated at the back.
690 * Slabs with few allocated objects are likely to become completely free if they
691 * keep a safe distance away from the front of the free list. Slab misorders
692 * interfere with the natural tendency of slabs to become completely free or
693 * completely allocated. For example, a slab with a single allocated object
694 * needs only a single free to escape the cache; its natural desire is
695 * frustrated when it finds itself at the front of the list where a second
696 * allocation happens just before the free could have released it. Another slab
697 * with all but one object allocated might have supplied the buffer instead, so
698 * that both (as opposed to neither) of the slabs would have been taken off the
699 * free list.
700 *
701 * Although slabs tend naturally toward the ideal order, misorders allowed by a
702 * simple list implementation defeat the consolidator's strategy of merging
703 * least- and most-allocated slabs. Without an AVL tree to guarantee order, kmem
704 * needs another way to fix misorders to optimize its callback strategy. One
705 * approach is to periodically scan a limited number of slabs, advancing a
706 * marker to hold the current scan position, and to move extreme misorders to
707 * the front or back of the free list and to the front or back of the current
708 * scan range. By making consecutive scan ranges overlap by one slab, the least
709 * allocated slab in the current range can be carried along from the end of one
710 * scan to the start of the next.
711 *
712 * Maintaining partial slabs in an AVL tree relieves kmem of this additional
713 * task, however. Since most of the cache's activity is in the magazine layer,
714 * and allocations from the slab layer represent only a startup cost, the
715 * overhead of maintaining a balanced tree is not a significant concern compared
716 * to the opportunity of reducing complexity by eliminating the partial slab
717 * scanner just described. The overhead of an AVL tree is minimized by
718 * maintaining only partial slabs in the tree and keeping completely allocated
719 * slabs separately in a list. To avoid increasing the size of the slab
720 * structure the AVL linkage pointers are reused for the slab's list linkage,
721 * since the slab will always be either partial or complete, never stored both
722 * ways at the same time. To further minimize the overhead of the AVL tree the
723 * compare function that orders partial slabs by usage divides the range of
724 * allocated object counts into bins such that counts within the same bin are
725 * considered equal. Binning partial slabs makes it less likely that allocating
726 * or freeing a single object will change the slab's order, requiring a tree
727 * reinsertion (an avl_remove() followed by an avl_add(), both potentially
728 * requiring some rebalancing of the tree). Allocation counts closest to
729 * completely free and completely allocated are left unbinned (finely sorted) to
730 * better support the consolidator's strategy of merging slabs at either
731 * extreme.
732 *
733 * 3.1 Assessing Fragmentation and Selecting Candidate Slabs
734 *
735 * The consolidator piggybacks on the kmem maintenance thread and is called on
736 * the same interval as kmem_cache_update(), once per cache every fifteen
737 * seconds. kmem maintains a running count of unallocated objects in the slab
738 * layer (cache_bufslab). The consolidator checks whether that number exceeds
739 * 12.5% (1/8) of the total objects in the cache (cache_buftotal), and whether
740 * there is a significant number of slabs in the cache (arbitrarily a minimum
741 * 101 total slabs). Unused objects that have fallen out of the magazine layer's
742 * working set are included in the assessment, and magazines in the depot are
743 * reaped if those objects would lift cache_bufslab above the fragmentation
744 * threshold. Once the consolidator decides that a cache is fragmented, it looks
745 * for a candidate slab to reclaim, starting at the end of the partial slab free
746 * list and scanning backwards. At first the consolidator is choosy: only a slab
747 * with fewer than 12.5% (1/8) of its objects allocated qualifies (or else a
748 * single allocated object, regardless of percentage). If there is difficulty
749 * finding a candidate slab, kmem raises the allocation threshold incrementally,
750 * up to a maximum 87.5% (7/8), so that eventually the consolidator will reduce
751 * external fragmentation (unused objects on the free list) below 12.5% (1/8),
752 * even in the worst case of every slab in the cache being almost 7/8 allocated.
753 * The threshold can also be lowered incrementally when candidate slabs are easy
754 * to find, and the threshold is reset to the minimum 1/8 as soon as the cache
755 * is no longer fragmented.
756 *
757 * 3.2 Generating Callbacks
758 *
759 * Once an eligible slab is chosen, a callback is generated for every allocated
760 * object on the slab, in the hope that the client will move everything off the
761 * slab and make it reclaimable. Objects selected as move destinations are
762 * chosen from slabs at the front of the free list. Assuming slabs in the ideal
763 * order (most allocated at the front, least allocated at the back) and a
764 * cooperative client, the consolidator will succeed in removing slabs from both
765 * ends of the free list, completely allocating on the one hand and completely
766 * freeing on the other. Objects selected as move destinations are allocated in
767 * the kmem maintenance thread where move requests are enqueued. A separate
768 * callback thread removes pending callbacks from the queue and calls the
769 * client. The separate thread ensures that client code (the move function) does
770 * not interfere with internal kmem maintenance tasks. A map of pending
771 * callbacks keyed by object address (the object to be moved) is checked to
772 * ensure that duplicate callbacks are not generated for the same object.
773 * Allocating the move destination (the object to move to) prevents subsequent
774 * callbacks from selecting the same destination as an earlier pending callback.
775 *
776 * Move requests can also be generated by kmem_cache_reap() when the system is
777 * desperate for memory and by kmem_cache_move_notify(), called by the client to
778 * notify kmem that a move refused earlier with KMEM_CBRC_LATER is now possible.
779 * The map of pending callbacks is protected by the same lock that protects the
780 * slab layer.
781 *
782 * When the system is desperate for memory, kmem does not bother to determine
783 * whether or not the cache exceeds the fragmentation threshold, but tries to
784 * consolidate as many slabs as possible. Normally, the consolidator chews
785 * slowly, one sparsely allocated slab at a time during each maintenance
786 * interval that the cache is fragmented. When desperate, the consolidator
787 * starts at the last partial slab and enqueues callbacks for every allocated
788 * object on every partial slab, working backwards until it reaches the first
789 * partial slab. The first partial slab, meanwhile, advances in pace with the
790 * consolidator as allocations to supply move destinations for the enqueued
791 * callbacks use up the highly allocated slabs at the front of the free list.
792 * Ideally, the overgrown free list collapses like an accordion, starting at
793 * both ends and ending at the center with a single partial slab.
794 *
795 * 3.3 Client Responses
796 *
797 * When the client returns KMEM_CBRC_NO in response to the move callback, kmem
798 * marks the slab that supplied the stuck object non-reclaimable and moves it to
799 * front of the free list. The slab remains marked as long as it remains on the
800 * free list, and it appears more allocated to the partial slab compare function
801 * than any unmarked slab, no matter how many of its objects are allocated.
802 * Since even one immovable object ties up the entire slab, the goal is to
803 * completely allocate any slab that cannot be completely freed. kmem does not
804 * bother generating callbacks to move objects from a marked slab unless the
805 * system is desperate.
806 *
807 * When the client responds KMEM_CBRC_LATER, kmem increments a count for the
808 * slab. If the client responds LATER too many times, kmem disbelieves and
809 * treats the response as a NO. The count is cleared when the slab is taken off
810 * the partial slab list or when the client moves one of the slab's objects.
811 *
812 * 4. Observability
813 *
814 * A kmem cache's external fragmentation is best observed with 'mdb -k' using
815 * the ::kmem_slabs dcmd. For a complete description of the command, enter
816 * '::help kmem_slabs' at the mdb prompt.
817 */
818
819 #include <sys/kmem_impl.h>
820 #include <sys/vmem_impl.h>
821 #include <sys/param.h>
822 #include <sys/sysmacros.h>
823 #include <sys/vm.h>
824 #include <sys/proc.h>
825 #include <sys/tuneable.h>
826 #include <sys/systm.h>
827 #include <sys/cmn_err.h>
828 #include <sys/debug.h>
829 #include <sys/sdt.h>
830 #include <sys/mutex.h>
831 #include <sys/bitmap.h>
832 #include <sys/atomic.h>
833 #include <sys/kobj.h>
834 #include <sys/disp.h>
835 #include <vm/seg_kmem.h>
836 #include <sys/log.h>
837 #include <sys/callb.h>
838 #include <sys/taskq.h>
839 #include <sys/modctl.h>
840 #include <sys/reboot.h>
841 #include <sys/id32.h>
842 #include <sys/zone.h>
843 #include <sys/netstack.h>
844 #ifdef DEBUG
845 #include <sys/random.h>
846 #endif
847
848 extern void streams_msg_init(void);
849 extern int segkp_fromheap;
850 extern void segkp_cache_free(void);
851 extern int callout_init_done;
852
853 struct kmem_cache_kstat {
854 kstat_named_t kmc_buf_size;
855 kstat_named_t kmc_align;
856 kstat_named_t kmc_chunk_size;
857 kstat_named_t kmc_slab_size;
858 kstat_named_t kmc_alloc;
859 kstat_named_t kmc_alloc_fail;
860 kstat_named_t kmc_free;
861 kstat_named_t kmc_depot_alloc;
862 kstat_named_t kmc_depot_free;
863 kstat_named_t kmc_depot_contention;
864 kstat_named_t kmc_slab_alloc;
865 kstat_named_t kmc_slab_free;
866 kstat_named_t kmc_buf_constructed;
867 kstat_named_t kmc_buf_avail;
868 kstat_named_t kmc_buf_inuse;
869 kstat_named_t kmc_buf_total;
870 kstat_named_t kmc_buf_max;
871 kstat_named_t kmc_slab_create;
872 kstat_named_t kmc_slab_destroy;
873 kstat_named_t kmc_vmem_source;
874 kstat_named_t kmc_hash_size;
875 kstat_named_t kmc_hash_lookup_depth;
876 kstat_named_t kmc_hash_rescale;
877 kstat_named_t kmc_full_magazines;
878 kstat_named_t kmc_empty_magazines;
879 kstat_named_t kmc_magazine_size;
880 kstat_named_t kmc_reap; /* number of kmem_cache_reap() calls */
881 kstat_named_t kmc_defrag; /* attempts to defrag all partial slabs */
882 kstat_named_t kmc_scan; /* attempts to defrag one partial slab */
883 kstat_named_t kmc_move_callbacks; /* sum of yes, no, later, dn, dk */
884 kstat_named_t kmc_move_yes;
885 kstat_named_t kmc_move_no;
886 kstat_named_t kmc_move_later;
887 kstat_named_t kmc_move_dont_need;
888 kstat_named_t kmc_move_dont_know; /* obj unrecognized by client ... */
889 kstat_named_t kmc_move_hunt_found; /* ... but found in mag layer */
890 kstat_named_t kmc_move_slabs_freed; /* slabs freed by consolidator */
891 kstat_named_t kmc_move_reclaimable; /* buffers, if consolidator ran */
892 } kmem_cache_kstat = {
893 { "buf_size", KSTAT_DATA_UINT64 },
894 { "align", KSTAT_DATA_UINT64 },
895 { "chunk_size", KSTAT_DATA_UINT64 },
896 { "slab_size", KSTAT_DATA_UINT64 },
897 { "alloc", KSTAT_DATA_UINT64 },
898 { "alloc_fail", KSTAT_DATA_UINT64 },
899 { "free", KSTAT_DATA_UINT64 },
900 { "depot_alloc", KSTAT_DATA_UINT64 },
901 { "depot_free", KSTAT_DATA_UINT64 },
902 { "depot_contention", KSTAT_DATA_UINT64 },
903 { "slab_alloc", KSTAT_DATA_UINT64 },
904 { "slab_free", KSTAT_DATA_UINT64 },
905 { "buf_constructed", KSTAT_DATA_UINT64 },
906 { "buf_avail", KSTAT_DATA_UINT64 },
907 { "buf_inuse", KSTAT_DATA_UINT64 },
908 { "buf_total", KSTAT_DATA_UINT64 },
909 { "buf_max", KSTAT_DATA_UINT64 },
910 { "slab_create", KSTAT_DATA_UINT64 },
911 { "slab_destroy", KSTAT_DATA_UINT64 },
912 { "vmem_source", KSTAT_DATA_UINT64 },
913 { "hash_size", KSTAT_DATA_UINT64 },
914 { "hash_lookup_depth", KSTAT_DATA_UINT64 },
915 { "hash_rescale", KSTAT_DATA_UINT64 },
916 { "full_magazines", KSTAT_DATA_UINT64 },
917 { "empty_magazines", KSTAT_DATA_UINT64 },
918 { "magazine_size", KSTAT_DATA_UINT64 },
919 { "reap", KSTAT_DATA_UINT64 },
920 { "defrag", KSTAT_DATA_UINT64 },
921 { "scan", KSTAT_DATA_UINT64 },
922 { "move_callbacks", KSTAT_DATA_UINT64 },
923 { "move_yes", KSTAT_DATA_UINT64 },
924 { "move_no", KSTAT_DATA_UINT64 },
925 { "move_later", KSTAT_DATA_UINT64 },
926 { "move_dont_need", KSTAT_DATA_UINT64 },
927 { "move_dont_know", KSTAT_DATA_UINT64 },
928 { "move_hunt_found", KSTAT_DATA_UINT64 },
929 { "move_slabs_freed", KSTAT_DATA_UINT64 },
930 { "move_reclaimable", KSTAT_DATA_UINT64 },
931 };
932
933 static kmutex_t kmem_cache_kstat_lock;
934
935 /*
936 * The default set of caches to back kmem_alloc().
937 * These sizes should be reevaluated periodically.
938 *
939 * We want allocations that are multiples of the coherency granularity
940 * (64 bytes) to be satisfied from a cache which is a multiple of 64
941 * bytes, so that it will be 64-byte aligned. For all multiples of 64,
942 * the next kmem_cache_size greater than or equal to it must be a
943 * multiple of 64.
944 *
945 * We split the table into two sections: size <= 4k and size > 4k. This
946 * saves a lot of space and cache footprint in our cache tables.
947 */
948 static const int kmem_alloc_sizes[] = {
949 1 * 8,
950 2 * 8,
951 3 * 8,
952 4 * 8, 5 * 8, 6 * 8, 7 * 8,
953 4 * 16, 5 * 16, 6 * 16, 7 * 16,
954 4 * 32, 5 * 32, 6 * 32, 7 * 32,
955 4 * 64, 5 * 64, 6 * 64, 7 * 64,
956 4 * 128, 5 * 128, 6 * 128, 7 * 128,
957 P2ALIGN(8192 / 7, 64),
958 P2ALIGN(8192 / 6, 64),
959 P2ALIGN(8192 / 5, 64),
960 P2ALIGN(8192 / 4, 64),
961 P2ALIGN(8192 / 3, 64),
962 P2ALIGN(8192 / 2, 64),
963 };
964
965 static const int kmem_big_alloc_sizes[] = {
966 2 * 4096, 3 * 4096,
967 2 * 8192, 3 * 8192,
968 4 * 8192, 5 * 8192, 6 * 8192, 7 * 8192,
969 8 * 8192, 9 * 8192, 10 * 8192, 11 * 8192,
970 12 * 8192, 13 * 8192, 14 * 8192, 15 * 8192,
971 16 * 8192
972 };
973
974 #define KMEM_MAXBUF 4096
975 #define KMEM_BIG_MAXBUF_32BIT 32768
976 #define KMEM_BIG_MAXBUF 131072
977
978 #define KMEM_BIG_MULTIPLE 4096 /* big_alloc_sizes must be a multiple */
979 #define KMEM_BIG_SHIFT 12 /* lg(KMEM_BIG_MULTIPLE) */
980
981 static kmem_cache_t *kmem_alloc_table[KMEM_MAXBUF >> KMEM_ALIGN_SHIFT];
982 static kmem_cache_t *kmem_big_alloc_table[KMEM_BIG_MAXBUF >> KMEM_BIG_SHIFT];
983
984 #define KMEM_ALLOC_TABLE_MAX (KMEM_MAXBUF >> KMEM_ALIGN_SHIFT)
985 static size_t kmem_big_alloc_table_max = 0; /* # of filled elements */
986
987 static kmem_magtype_t kmem_magtype[] = {
988 { 1, 8, 3200, 65536 },
989 { 3, 16, 256, 32768 },
990 { 7, 32, 64, 16384 },
991 { 15, 64, 0, 8192 },
992 { 31, 64, 0, 4096 },
993 { 47, 64, 0, 2048 },
994 { 63, 64, 0, 1024 },
995 { 95, 64, 0, 512 },
996 { 143, 64, 0, 0 },
997 };
998
999 static uint32_t kmem_reaping;
1000 static uint32_t kmem_reaping_idspace;
1001
1002 /*
1003 * kmem tunables
1004 */
1005 clock_t kmem_reap_interval; /* cache reaping rate [15 * HZ ticks] */
1006 int kmem_depot_contention = 3; /* max failed tryenters per real interval */
1007 pgcnt_t kmem_reapahead = 0; /* start reaping N pages before pageout */
1008 int kmem_panic = 1; /* whether to panic on error */
1009 int kmem_logging = 1; /* kmem_log_enter() override */
1010 uint32_t kmem_mtbf = 0; /* mean time between failures [default: off] */
1011 size_t kmem_transaction_log_size; /* transaction log size [2% of memory] */
1012 size_t kmem_content_log_size; /* content log size [2% of memory] */
1013 size_t kmem_failure_log_size; /* failure log [4 pages per CPU] */
1014 size_t kmem_slab_log_size; /* slab create log [4 pages per CPU] */
1015 size_t kmem_zerosized_log_size; /* zero-sized log [4 pages per CPU] */
1016 size_t kmem_content_maxsave = 256; /* KMF_CONTENTS max bytes to log */
1017 size_t kmem_lite_minsize = 0; /* minimum buffer size for KMF_LITE */
1018 size_t kmem_lite_maxalign = 1024; /* maximum buffer alignment for KMF_LITE */
1019 int kmem_lite_pcs = 4; /* number of PCs to store in KMF_LITE mode */
1020 size_t kmem_maxverify; /* maximum bytes to inspect in debug routines */
1021 size_t kmem_minfirewall; /* hardware-enforced redzone threshold */
1022
1023 #ifdef DEBUG
1024 int kmem_warn_zerosized = 1; /* whether to warn on zero-sized KM_SLEEP */
1025 #else
1026 int kmem_warn_zerosized = 0; /* whether to warn on zero-sized KM_SLEEP */
1027 #endif
1028
1029 int kmem_panic_zerosized = 0; /* whether to panic on zero-sized KM_SLEEP */
1030
1031 #ifdef _LP64
1032 size_t kmem_max_cached = KMEM_BIG_MAXBUF; /* maximum kmem_alloc cache */
1033 #else
1034 size_t kmem_max_cached = KMEM_BIG_MAXBUF_32BIT; /* maximum kmem_alloc cache */
1035 #endif
1036
1037 #ifdef DEBUG
1038 int kmem_flags = KMF_AUDIT | KMF_DEADBEEF | KMF_REDZONE | KMF_CONTENTS;
1039 #else
1040 int kmem_flags = 0;
1041 #endif
1042 int kmem_ready;
1043
1044 static kmem_cache_t *kmem_slab_cache;
1045 static kmem_cache_t *kmem_bufctl_cache;
1046 static kmem_cache_t *kmem_bufctl_audit_cache;
1047
1048 static kmutex_t kmem_cache_lock; /* inter-cache linkage only */
1049 static list_t kmem_caches;
1050
1051 static taskq_t *kmem_taskq;
1052 static kmutex_t kmem_flags_lock;
1053 static vmem_t *kmem_metadata_arena;
1054 static vmem_t *kmem_msb_arena; /* arena for metadata caches */
1055 static vmem_t *kmem_cache_arena;
1056 static vmem_t *kmem_hash_arena;
1057 static vmem_t *kmem_log_arena;
1058 static vmem_t *kmem_oversize_arena;
1059 static vmem_t *kmem_va_arena;
1060 static vmem_t *kmem_default_arena;
1061 static vmem_t *kmem_firewall_va_arena;
1062 static vmem_t *kmem_firewall_arena;
1063
1064 static int kmem_zerosized; /* # of zero-sized allocs */
1065
1066 /*
1067 * kmem slab consolidator thresholds (tunables)
1068 */
1069 size_t kmem_frag_minslabs = 101; /* minimum total slabs */
1070 size_t kmem_frag_numer = 1; /* free buffers (numerator) */
1071 size_t kmem_frag_denom = KMEM_VOID_FRACTION; /* buffers (denominator) */
1072 /*
1073 * Maximum number of slabs from which to move buffers during a single
1074 * maintenance interval while the system is not low on memory.
1075 */
1076 size_t kmem_reclaim_max_slabs = 1;
1077 /*
1078 * Number of slabs to scan backwards from the end of the partial slab list
1079 * when searching for buffers to relocate.
1080 */
1081 size_t kmem_reclaim_scan_range = 12;
1082
1083 /* consolidator knobs */
1084 boolean_t kmem_move_noreap;
1085 boolean_t kmem_move_blocked;
1086 boolean_t kmem_move_fulltilt;
1087 boolean_t kmem_move_any_partial;
1088
1089 #ifdef DEBUG
1090 /*
1091 * kmem consolidator debug tunables:
1092 * Ensure code coverage by occasionally running the consolidator even when the
1093 * caches are not fragmented (they may never be). These intervals are mean time
1094 * in cache maintenance intervals (kmem_cache_update).
1095 */
1096 uint32_t kmem_mtb_move = 60; /* defrag 1 slab (~15min) */
1097 uint32_t kmem_mtb_reap = 1800; /* defrag all slabs (~7.5hrs) */
1098 #endif /* DEBUG */
1099
1100 static kmem_cache_t *kmem_defrag_cache;
1101 static kmem_cache_t *kmem_move_cache;
1102 static taskq_t *kmem_move_taskq;
1103
1104 static void kmem_cache_scan(kmem_cache_t *);
1105 static void kmem_cache_defrag(kmem_cache_t *);
1106 static void kmem_slab_prefill(kmem_cache_t *, kmem_slab_t *);
1107
1108
1109 kmem_log_header_t *kmem_transaction_log;
1110 kmem_log_header_t *kmem_content_log;
1111 kmem_log_header_t *kmem_failure_log;
1112 kmem_log_header_t *kmem_slab_log;
1113 kmem_log_header_t *kmem_zerosized_log;
1114
1115 static int kmem_lite_count; /* # of PCs in kmem_buftag_lite_t */
1116
1117 #define KMEM_BUFTAG_LITE_ENTER(bt, count, caller) \
1118 if ((count) > 0) { \
1119 pc_t *_s = ((kmem_buftag_lite_t *)(bt))->bt_history; \
1120 pc_t *_e; \
1121 /* memmove() the old entries down one notch */ \
1122 for (_e = &_s[(count) - 1]; _e > _s; _e--) \
1123 *_e = *(_e - 1); \
1124 *_s = (uintptr_t)(caller); \
1125 }
1126
1127 #define KMERR_MODIFIED 0 /* buffer modified while on freelist */
1128 #define KMERR_REDZONE 1 /* redzone violation (write past end of buf) */
1129 #define KMERR_DUPFREE 2 /* freed a buffer twice */
1130 #define KMERR_BADADDR 3 /* freed a bad (unallocated) address */
1131 #define KMERR_BADBUFTAG 4 /* buftag corrupted */
1132 #define KMERR_BADBUFCTL 5 /* bufctl corrupted */
1133 #define KMERR_BADCACHE 6 /* freed a buffer to the wrong cache */
1134 #define KMERR_BADSIZE 7 /* alloc size != free size */
1135 #define KMERR_BADBASE 8 /* buffer base address wrong */
1136
1137 struct {
1138 hrtime_t kmp_timestamp; /* timestamp of panic */
1139 int kmp_error; /* type of kmem error */
1140 void *kmp_buffer; /* buffer that induced panic */
1141 void *kmp_realbuf; /* real start address for buffer */
1142 kmem_cache_t *kmp_cache; /* buffer's cache according to client */
1143 kmem_cache_t *kmp_realcache; /* actual cache containing buffer */
1144 kmem_slab_t *kmp_slab; /* slab accoring to kmem_findslab() */
1145 kmem_bufctl_t *kmp_bufctl; /* bufctl */
1146 } kmem_panic_info;
1147
1148
1149 static void
copy_pattern(uint64_t pattern,void * buf_arg,size_t size)1150 copy_pattern(uint64_t pattern, void *buf_arg, size_t size)
1151 {
1152 uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
1153 uint64_t *buf = buf_arg;
1154
1155 while (buf < bufend)
1156 *buf++ = pattern;
1157 }
1158
1159 static void *
verify_pattern(uint64_t pattern,void * buf_arg,size_t size)1160 verify_pattern(uint64_t pattern, void *buf_arg, size_t size)
1161 {
1162 uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
1163 uint64_t *buf;
1164
1165 for (buf = buf_arg; buf < bufend; buf++)
1166 if (*buf != pattern)
1167 return (buf);
1168 return (NULL);
1169 }
1170
1171 static void *
verify_and_copy_pattern(uint64_t old,uint64_t new,void * buf_arg,size_t size)1172 verify_and_copy_pattern(uint64_t old, uint64_t new, void *buf_arg, size_t size)
1173 {
1174 uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
1175 uint64_t *buf;
1176
1177 for (buf = buf_arg; buf < bufend; buf++) {
1178 if (*buf != old) {
1179 copy_pattern(old, buf_arg,
1180 (char *)buf - (char *)buf_arg);
1181 return (buf);
1182 }
1183 *buf = new;
1184 }
1185
1186 return (NULL);
1187 }
1188
1189 static void
kmem_cache_applyall(void (* func)(kmem_cache_t *),taskq_t * tq,int tqflag)1190 kmem_cache_applyall(void (*func)(kmem_cache_t *), taskq_t *tq, int tqflag)
1191 {
1192 kmem_cache_t *cp;
1193
1194 mutex_enter(&kmem_cache_lock);
1195 for (cp = list_head(&kmem_caches); cp != NULL;
1196 cp = list_next(&kmem_caches, cp))
1197 if (tq != NULL)
1198 (void) taskq_dispatch(tq, (task_func_t *)func, cp,
1199 tqflag);
1200 else
1201 func(cp);
1202 mutex_exit(&kmem_cache_lock);
1203 }
1204
1205 static void
kmem_cache_applyall_id(void (* func)(kmem_cache_t *),taskq_t * tq,int tqflag)1206 kmem_cache_applyall_id(void (*func)(kmem_cache_t *), taskq_t *tq, int tqflag)
1207 {
1208 kmem_cache_t *cp;
1209
1210 mutex_enter(&kmem_cache_lock);
1211 for (cp = list_head(&kmem_caches); cp != NULL;
1212 cp = list_next(&kmem_caches, cp)) {
1213 if (!(cp->cache_cflags & KMC_IDENTIFIER))
1214 continue;
1215 if (tq != NULL)
1216 (void) taskq_dispatch(tq, (task_func_t *)func, cp,
1217 tqflag);
1218 else
1219 func(cp);
1220 }
1221 mutex_exit(&kmem_cache_lock);
1222 }
1223
1224 /*
1225 * Debugging support. Given a buffer address, find its slab.
1226 */
1227 static kmem_slab_t *
kmem_findslab(kmem_cache_t * cp,void * buf)1228 kmem_findslab(kmem_cache_t *cp, void *buf)
1229 {
1230 kmem_slab_t *sp;
1231
1232 mutex_enter(&cp->cache_lock);
1233 for (sp = list_head(&cp->cache_complete_slabs); sp != NULL;
1234 sp = list_next(&cp->cache_complete_slabs, sp)) {
1235 if (KMEM_SLAB_MEMBER(sp, buf)) {
1236 mutex_exit(&cp->cache_lock);
1237 return (sp);
1238 }
1239 }
1240 for (sp = avl_first(&cp->cache_partial_slabs); sp != NULL;
1241 sp = AVL_NEXT(&cp->cache_partial_slabs, sp)) {
1242 if (KMEM_SLAB_MEMBER(sp, buf)) {
1243 mutex_exit(&cp->cache_lock);
1244 return (sp);
1245 }
1246 }
1247 mutex_exit(&cp->cache_lock);
1248
1249 return (NULL);
1250 }
1251
1252 static void
kmem_error(int error,kmem_cache_t * cparg,void * bufarg)1253 kmem_error(int error, kmem_cache_t *cparg, void *bufarg)
1254 {
1255 kmem_buftag_t *btp = NULL;
1256 kmem_bufctl_t *bcp = NULL;
1257 kmem_cache_t *cp = cparg;
1258 kmem_slab_t *sp;
1259 uint64_t *off;
1260 void *buf = bufarg;
1261
1262 kmem_logging = 0; /* stop logging when a bad thing happens */
1263
1264 kmem_panic_info.kmp_timestamp = gethrtime();
1265
1266 sp = kmem_findslab(cp, buf);
1267 if (sp == NULL) {
1268 for (cp = list_tail(&kmem_caches); cp != NULL;
1269 cp = list_prev(&kmem_caches, cp)) {
1270 if ((sp = kmem_findslab(cp, buf)) != NULL)
1271 break;
1272 }
1273 }
1274
1275 if (sp == NULL) {
1276 cp = NULL;
1277 error = KMERR_BADADDR;
1278 } else {
1279 if (cp != cparg)
1280 error = KMERR_BADCACHE;
1281 else
1282 buf = (char *)bufarg - ((uintptr_t)bufarg -
1283 (uintptr_t)sp->slab_base) % cp->cache_chunksize;
1284 if (buf != bufarg)
1285 error = KMERR_BADBASE;
1286 if (cp->cache_flags & KMF_BUFTAG)
1287 btp = KMEM_BUFTAG(cp, buf);
1288 if (cp->cache_flags & KMF_HASH) {
1289 mutex_enter(&cp->cache_lock);
1290 for (bcp = *KMEM_HASH(cp, buf); bcp; bcp = bcp->bc_next)
1291 if (bcp->bc_addr == buf)
1292 break;
1293 mutex_exit(&cp->cache_lock);
1294 if (bcp == NULL && btp != NULL)
1295 bcp = btp->bt_bufctl;
1296 if (kmem_findslab(cp->cache_bufctl_cache, bcp) ==
1297 NULL || P2PHASE((uintptr_t)bcp, KMEM_ALIGN) ||
1298 bcp->bc_addr != buf) {
1299 error = KMERR_BADBUFCTL;
1300 bcp = NULL;
1301 }
1302 }
1303 }
1304
1305 kmem_panic_info.kmp_error = error;
1306 kmem_panic_info.kmp_buffer = bufarg;
1307 kmem_panic_info.kmp_realbuf = buf;
1308 kmem_panic_info.kmp_cache = cparg;
1309 kmem_panic_info.kmp_realcache = cp;
1310 kmem_panic_info.kmp_slab = sp;
1311 kmem_panic_info.kmp_bufctl = bcp;
1312
1313 printf("kernel memory allocator: ");
1314
1315 switch (error) {
1316
1317 case KMERR_MODIFIED:
1318 printf("buffer modified after being freed\n");
1319 off = verify_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
1320 if (off == NULL) /* shouldn't happen */
1321 off = buf;
1322 printf("modification occurred at offset 0x%lx "
1323 "(0x%llx replaced by 0x%llx)\n",
1324 (uintptr_t)off - (uintptr_t)buf,
1325 (longlong_t)KMEM_FREE_PATTERN, (longlong_t)*off);
1326 break;
1327
1328 case KMERR_REDZONE:
1329 printf("redzone violation: write past end of buffer\n");
1330 break;
1331
1332 case KMERR_BADADDR:
1333 printf("invalid free: buffer not in cache\n");
1334 break;
1335
1336 case KMERR_DUPFREE:
1337 printf("duplicate free: buffer freed twice\n");
1338 break;
1339
1340 case KMERR_BADBUFTAG:
1341 printf("boundary tag corrupted\n");
1342 printf("bcp ^ bxstat = %lx, should be %lx\n",
1343 (intptr_t)btp->bt_bufctl ^ btp->bt_bxstat,
1344 KMEM_BUFTAG_FREE);
1345 break;
1346
1347 case KMERR_BADBUFCTL:
1348 printf("bufctl corrupted\n");
1349 break;
1350
1351 case KMERR_BADCACHE:
1352 printf("buffer freed to wrong cache\n");
1353 printf("buffer was allocated from %s,\n", cp->cache_name);
1354 printf("caller attempting free to %s.\n", cparg->cache_name);
1355 break;
1356
1357 case KMERR_BADSIZE:
1358 printf("bad free: free size (%u) != alloc size (%u)\n",
1359 KMEM_SIZE_DECODE(((uint32_t *)btp)[0]),
1360 KMEM_SIZE_DECODE(((uint32_t *)btp)[1]));
1361 break;
1362
1363 case KMERR_BADBASE:
1364 printf("bad free: free address (%p) != alloc address (%p)\n",
1365 bufarg, buf);
1366 break;
1367 }
1368
1369 printf("buffer=%p bufctl=%p cache: %s\n",
1370 bufarg, (void *)bcp, cparg->cache_name);
1371
1372 if (bcp != NULL && (cp->cache_flags & KMF_AUDIT) &&
1373 error != KMERR_BADBUFCTL) {
1374 int d;
1375 timestruc_t ts;
1376 kmem_bufctl_audit_t *bcap = (kmem_bufctl_audit_t *)bcp;
1377
1378 hrt2ts(kmem_panic_info.kmp_timestamp - bcap->bc_timestamp, &ts);
1379 printf("previous transaction on buffer %p:\n", buf);
1380 printf("thread=%p time=T-%ld.%09ld slab=%p cache: %s\n",
1381 (void *)bcap->bc_thread, ts.tv_sec, ts.tv_nsec,
1382 (void *)sp, cp->cache_name);
1383 for (d = 0; d < MIN(bcap->bc_depth, KMEM_STACK_DEPTH); d++) {
1384 ulong_t off;
1385 char *sym = kobj_getsymname(bcap->bc_stack[d], &off);
1386 printf("%s+%lx\n", sym ? sym : "?", off);
1387 }
1388 }
1389 if (kmem_panic > 0)
1390 panic("kernel heap corruption detected");
1391 if (kmem_panic == 0)
1392 debug_enter(NULL);
1393 kmem_logging = 1; /* resume logging */
1394 }
1395
1396 static kmem_log_header_t *
kmem_log_init(size_t logsize)1397 kmem_log_init(size_t logsize)
1398 {
1399 kmem_log_header_t *lhp;
1400 int nchunks = 4 * max_ncpus;
1401 size_t lhsize = (size_t)&((kmem_log_header_t *)0)->lh_cpu[max_ncpus];
1402 int i;
1403
1404 /*
1405 * Make sure that lhp->lh_cpu[] is nicely aligned
1406 * to prevent false sharing of cache lines.
1407 */
1408 lhsize = P2ROUNDUP(lhsize, KMEM_ALIGN);
1409 lhp = vmem_xalloc(kmem_log_arena, lhsize, 64, P2NPHASE(lhsize, 64), 0,
1410 NULL, NULL, VM_SLEEP);
1411 bzero(lhp, lhsize);
1412
1413 mutex_init(&lhp->lh_lock, NULL, MUTEX_DEFAULT, NULL);
1414 lhp->lh_nchunks = nchunks;
1415 lhp->lh_chunksize = P2ROUNDUP(logsize / nchunks + 1, PAGESIZE);
1416 lhp->lh_base = vmem_alloc(kmem_log_arena,
1417 lhp->lh_chunksize * nchunks, VM_SLEEP);
1418 lhp->lh_free = vmem_alloc(kmem_log_arena,
1419 nchunks * sizeof (int), VM_SLEEP);
1420 bzero(lhp->lh_base, lhp->lh_chunksize * nchunks);
1421
1422 for (i = 0; i < max_ncpus; i++) {
1423 kmem_cpu_log_header_t *clhp = &lhp->lh_cpu[i];
1424 mutex_init(&clhp->clh_lock, NULL, MUTEX_DEFAULT, NULL);
1425 clhp->clh_chunk = i;
1426 }
1427
1428 for (i = max_ncpus; i < nchunks; i++)
1429 lhp->lh_free[i] = i;
1430
1431 lhp->lh_head = max_ncpus;
1432 lhp->lh_tail = 0;
1433
1434 return (lhp);
1435 }
1436
1437 static void *
kmem_log_enter(kmem_log_header_t * lhp,void * data,size_t size)1438 kmem_log_enter(kmem_log_header_t *lhp, void *data, size_t size)
1439 {
1440 void *logspace;
1441 kmem_cpu_log_header_t *clhp;
1442
1443 if (lhp == NULL || kmem_logging == 0 || panicstr)
1444 return (NULL);
1445
1446 clhp = &lhp->lh_cpu[CPU->cpu_seqid];
1447
1448 mutex_enter(&clhp->clh_lock);
1449 clhp->clh_hits++;
1450 if (size > clhp->clh_avail) {
1451 mutex_enter(&lhp->lh_lock);
1452 lhp->lh_hits++;
1453 lhp->lh_free[lhp->lh_tail] = clhp->clh_chunk;
1454 lhp->lh_tail = (lhp->lh_tail + 1) % lhp->lh_nchunks;
1455 clhp->clh_chunk = lhp->lh_free[lhp->lh_head];
1456 lhp->lh_head = (lhp->lh_head + 1) % lhp->lh_nchunks;
1457 clhp->clh_current = lhp->lh_base +
1458 clhp->clh_chunk * lhp->lh_chunksize;
1459 clhp->clh_avail = lhp->lh_chunksize;
1460 if (size > lhp->lh_chunksize)
1461 size = lhp->lh_chunksize;
1462 mutex_exit(&lhp->lh_lock);
1463 }
1464 logspace = clhp->clh_current;
1465 clhp->clh_current += size;
1466 clhp->clh_avail -= size;
1467 bcopy(data, logspace, size);
1468 mutex_exit(&clhp->clh_lock);
1469 return (logspace);
1470 }
1471
1472 #define KMEM_AUDIT(lp, cp, bcp) \
1473 { \
1474 kmem_bufctl_audit_t *_bcp = (kmem_bufctl_audit_t *)(bcp); \
1475 _bcp->bc_timestamp = gethrtime(); \
1476 _bcp->bc_thread = curthread; \
1477 _bcp->bc_depth = getpcstack(_bcp->bc_stack, KMEM_STACK_DEPTH); \
1478 _bcp->bc_lastlog = kmem_log_enter((lp), _bcp, sizeof (*_bcp)); \
1479 }
1480
1481 static void
kmem_log_event(kmem_log_header_t * lp,kmem_cache_t * cp,kmem_slab_t * sp,void * addr)1482 kmem_log_event(kmem_log_header_t *lp, kmem_cache_t *cp,
1483 kmem_slab_t *sp, void *addr)
1484 {
1485 kmem_bufctl_audit_t bca;
1486
1487 bzero(&bca, sizeof (kmem_bufctl_audit_t));
1488 bca.bc_addr = addr;
1489 bca.bc_slab = sp;
1490 bca.bc_cache = cp;
1491 KMEM_AUDIT(lp, cp, &bca);
1492 }
1493
1494 /*
1495 * Create a new slab for cache cp.
1496 */
1497 static kmem_slab_t *
kmem_slab_create(kmem_cache_t * cp,int kmflag)1498 kmem_slab_create(kmem_cache_t *cp, int kmflag)
1499 {
1500 size_t slabsize = cp->cache_slabsize;
1501 size_t chunksize = cp->cache_chunksize;
1502 int cache_flags = cp->cache_flags;
1503 size_t color, chunks;
1504 char *buf, *slab;
1505 kmem_slab_t *sp;
1506 kmem_bufctl_t *bcp;
1507 vmem_t *vmp = cp->cache_arena;
1508
1509 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
1510
1511 color = cp->cache_color + cp->cache_align;
1512 if (color > cp->cache_maxcolor)
1513 color = cp->cache_mincolor;
1514 cp->cache_color = color;
1515
1516 slab = vmem_alloc(vmp, slabsize, kmflag & KM_VMFLAGS);
1517
1518 if (slab == NULL)
1519 goto vmem_alloc_failure;
1520
1521 ASSERT(P2PHASE((uintptr_t)slab, vmp->vm_quantum) == 0);
1522
1523 /*
1524 * Reverify what was already checked in kmem_cache_set_move(), since the
1525 * consolidator depends (for correctness) on slabs being initialized
1526 * with the 0xbaddcafe memory pattern (setting a low order bit usable by
1527 * clients to distinguish uninitialized memory from known objects).
1528 */
1529 ASSERT((cp->cache_move == NULL) || !(cp->cache_cflags & KMC_NOTOUCH));
1530 if (!(cp->cache_cflags & KMC_NOTOUCH))
1531 copy_pattern(KMEM_UNINITIALIZED_PATTERN, slab, slabsize);
1532
1533 if (cache_flags & KMF_HASH) {
1534 if ((sp = kmem_cache_alloc(kmem_slab_cache, kmflag)) == NULL)
1535 goto slab_alloc_failure;
1536 chunks = (slabsize - color) / chunksize;
1537 } else {
1538 sp = KMEM_SLAB(cp, slab);
1539 chunks = (slabsize - sizeof (kmem_slab_t) - color) / chunksize;
1540 }
1541
1542 sp->slab_cache = cp;
1543 sp->slab_head = NULL;
1544 sp->slab_refcnt = 0;
1545 sp->slab_base = buf = slab + color;
1546 sp->slab_chunks = chunks;
1547 sp->slab_stuck_offset = (uint32_t)-1;
1548 sp->slab_later_count = 0;
1549 sp->slab_flags = 0;
1550
1551 ASSERT(chunks > 0);
1552 while (chunks-- != 0) {
1553 if (cache_flags & KMF_HASH) {
1554 bcp = kmem_cache_alloc(cp->cache_bufctl_cache, kmflag);
1555 if (bcp == NULL)
1556 goto bufctl_alloc_failure;
1557 if (cache_flags & KMF_AUDIT) {
1558 kmem_bufctl_audit_t *bcap =
1559 (kmem_bufctl_audit_t *)bcp;
1560 bzero(bcap, sizeof (kmem_bufctl_audit_t));
1561 bcap->bc_cache = cp;
1562 }
1563 bcp->bc_addr = buf;
1564 bcp->bc_slab = sp;
1565 } else {
1566 bcp = KMEM_BUFCTL(cp, buf);
1567 }
1568 if (cache_flags & KMF_BUFTAG) {
1569 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
1570 btp->bt_redzone = KMEM_REDZONE_PATTERN;
1571 btp->bt_bufctl = bcp;
1572 btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
1573 if (cache_flags & KMF_DEADBEEF) {
1574 copy_pattern(KMEM_FREE_PATTERN, buf,
1575 cp->cache_verify);
1576 }
1577 }
1578 bcp->bc_next = sp->slab_head;
1579 sp->slab_head = bcp;
1580 buf += chunksize;
1581 }
1582
1583 kmem_log_event(kmem_slab_log, cp, sp, slab);
1584
1585 return (sp);
1586
1587 bufctl_alloc_failure:
1588
1589 while ((bcp = sp->slab_head) != NULL) {
1590 sp->slab_head = bcp->bc_next;
1591 kmem_cache_free(cp->cache_bufctl_cache, bcp);
1592 }
1593 kmem_cache_free(kmem_slab_cache, sp);
1594
1595 slab_alloc_failure:
1596
1597 vmem_free(vmp, slab, slabsize);
1598
1599 vmem_alloc_failure:
1600
1601 kmem_log_event(kmem_failure_log, cp, NULL, NULL);
1602 atomic_inc_64(&cp->cache_alloc_fail);
1603
1604 return (NULL);
1605 }
1606
1607 /*
1608 * Destroy a slab.
1609 */
1610 static void
kmem_slab_destroy(kmem_cache_t * cp,kmem_slab_t * sp)1611 kmem_slab_destroy(kmem_cache_t *cp, kmem_slab_t *sp)
1612 {
1613 vmem_t *vmp = cp->cache_arena;
1614 void *slab = (void *)P2ALIGN((uintptr_t)sp->slab_base, vmp->vm_quantum);
1615
1616 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
1617 ASSERT(sp->slab_refcnt == 0);
1618
1619 if (cp->cache_flags & KMF_HASH) {
1620 kmem_bufctl_t *bcp;
1621 while ((bcp = sp->slab_head) != NULL) {
1622 sp->slab_head = bcp->bc_next;
1623 kmem_cache_free(cp->cache_bufctl_cache, bcp);
1624 }
1625 kmem_cache_free(kmem_slab_cache, sp);
1626 }
1627 vmem_free(vmp, slab, cp->cache_slabsize);
1628 }
1629
1630 static void *
kmem_slab_alloc_impl(kmem_cache_t * cp,kmem_slab_t * sp,boolean_t prefill)1631 kmem_slab_alloc_impl(kmem_cache_t *cp, kmem_slab_t *sp, boolean_t prefill)
1632 {
1633 kmem_bufctl_t *bcp, **hash_bucket;
1634 void *buf;
1635 boolean_t new_slab = (sp->slab_refcnt == 0);
1636
1637 ASSERT(MUTEX_HELD(&cp->cache_lock));
1638 /*
1639 * kmem_slab_alloc() drops cache_lock when it creates a new slab, so we
1640 * can't ASSERT(avl_is_empty(&cp->cache_partial_slabs)) here when the
1641 * slab is newly created.
1642 */
1643 ASSERT(new_slab || (KMEM_SLAB_IS_PARTIAL(sp) &&
1644 (sp == avl_first(&cp->cache_partial_slabs))));
1645 ASSERT(sp->slab_cache == cp);
1646
1647 cp->cache_slab_alloc++;
1648 cp->cache_bufslab--;
1649 sp->slab_refcnt++;
1650
1651 bcp = sp->slab_head;
1652 sp->slab_head = bcp->bc_next;
1653
1654 if (cp->cache_flags & KMF_HASH) {
1655 /*
1656 * Add buffer to allocated-address hash table.
1657 */
1658 buf = bcp->bc_addr;
1659 hash_bucket = KMEM_HASH(cp, buf);
1660 bcp->bc_next = *hash_bucket;
1661 *hash_bucket = bcp;
1662 if ((cp->cache_flags & (KMF_AUDIT | KMF_BUFTAG)) == KMF_AUDIT) {
1663 KMEM_AUDIT(kmem_transaction_log, cp, bcp);
1664 }
1665 } else {
1666 buf = KMEM_BUF(cp, bcp);
1667 }
1668
1669 ASSERT(KMEM_SLAB_MEMBER(sp, buf));
1670
1671 if (sp->slab_head == NULL) {
1672 ASSERT(KMEM_SLAB_IS_ALL_USED(sp));
1673 if (new_slab) {
1674 ASSERT(sp->slab_chunks == 1);
1675 } else {
1676 ASSERT(sp->slab_chunks > 1); /* the slab was partial */
1677 avl_remove(&cp->cache_partial_slabs, sp);
1678 sp->slab_later_count = 0; /* clear history */
1679 sp->slab_flags &= ~KMEM_SLAB_NOMOVE;
1680 sp->slab_stuck_offset = (uint32_t)-1;
1681 }
1682 list_insert_head(&cp->cache_complete_slabs, sp);
1683 cp->cache_complete_slab_count++;
1684 return (buf);
1685 }
1686
1687 ASSERT(KMEM_SLAB_IS_PARTIAL(sp));
1688 /*
1689 * Peek to see if the magazine layer is enabled before
1690 * we prefill. We're not holding the cpu cache lock,
1691 * so the peek could be wrong, but there's no harm in it.
1692 */
1693 if (new_slab && prefill && (cp->cache_flags & KMF_PREFILL) &&
1694 (KMEM_CPU_CACHE(cp)->cc_magsize != 0)) {
1695 kmem_slab_prefill(cp, sp);
1696 return (buf);
1697 }
1698
1699 if (new_slab) {
1700 avl_add(&cp->cache_partial_slabs, sp);
1701 return (buf);
1702 }
1703
1704 /*
1705 * The slab is now more allocated than it was, so the
1706 * order remains unchanged.
1707 */
1708 ASSERT(!avl_update(&cp->cache_partial_slabs, sp));
1709 return (buf);
1710 }
1711
1712 /*
1713 * Allocate a raw (unconstructed) buffer from cp's slab layer.
1714 */
1715 static void *
kmem_slab_alloc(kmem_cache_t * cp,int kmflag)1716 kmem_slab_alloc(kmem_cache_t *cp, int kmflag)
1717 {
1718 kmem_slab_t *sp;
1719 void *buf;
1720 boolean_t test_destructor;
1721
1722 mutex_enter(&cp->cache_lock);
1723 test_destructor = (cp->cache_slab_alloc == 0);
1724 sp = avl_first(&cp->cache_partial_slabs);
1725 if (sp == NULL) {
1726 ASSERT(cp->cache_bufslab == 0);
1727
1728 /*
1729 * The freelist is empty. Create a new slab.
1730 */
1731 mutex_exit(&cp->cache_lock);
1732 if ((sp = kmem_slab_create(cp, kmflag)) == NULL) {
1733 return (NULL);
1734 }
1735 mutex_enter(&cp->cache_lock);
1736 cp->cache_slab_create++;
1737 if ((cp->cache_buftotal += sp->slab_chunks) > cp->cache_bufmax)
1738 cp->cache_bufmax = cp->cache_buftotal;
1739 cp->cache_bufslab += sp->slab_chunks;
1740 }
1741
1742 buf = kmem_slab_alloc_impl(cp, sp, B_TRUE);
1743 ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) ==
1744 (cp->cache_complete_slab_count +
1745 avl_numnodes(&cp->cache_partial_slabs) +
1746 (cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount)));
1747 mutex_exit(&cp->cache_lock);
1748
1749 if (test_destructor && cp->cache_destructor != NULL) {
1750 /*
1751 * On the first kmem_slab_alloc(), assert that it is valid to
1752 * call the destructor on a newly constructed object without any
1753 * client involvement.
1754 */
1755 if ((cp->cache_constructor == NULL) ||
1756 cp->cache_constructor(buf, cp->cache_private,
1757 kmflag) == 0) {
1758 cp->cache_destructor(buf, cp->cache_private);
1759 }
1760 copy_pattern(KMEM_UNINITIALIZED_PATTERN, buf,
1761 cp->cache_bufsize);
1762 if (cp->cache_flags & KMF_DEADBEEF) {
1763 copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
1764 }
1765 }
1766
1767 return (buf);
1768 }
1769
1770 static void kmem_slab_move_yes(kmem_cache_t *, kmem_slab_t *, void *);
1771
1772 /*
1773 * Free a raw (unconstructed) buffer to cp's slab layer.
1774 */
1775 static void
kmem_slab_free(kmem_cache_t * cp,void * buf)1776 kmem_slab_free(kmem_cache_t *cp, void *buf)
1777 {
1778 kmem_slab_t *sp;
1779 kmem_bufctl_t *bcp, **prev_bcpp;
1780
1781 ASSERT(buf != NULL);
1782
1783 mutex_enter(&cp->cache_lock);
1784 cp->cache_slab_free++;
1785
1786 if (cp->cache_flags & KMF_HASH) {
1787 /*
1788 * Look up buffer in allocated-address hash table.
1789 */
1790 prev_bcpp = KMEM_HASH(cp, buf);
1791 while ((bcp = *prev_bcpp) != NULL) {
1792 if (bcp->bc_addr == buf) {
1793 *prev_bcpp = bcp->bc_next;
1794 sp = bcp->bc_slab;
1795 break;
1796 }
1797 cp->cache_lookup_depth++;
1798 prev_bcpp = &bcp->bc_next;
1799 }
1800 } else {
1801 bcp = KMEM_BUFCTL(cp, buf);
1802 sp = KMEM_SLAB(cp, buf);
1803 }
1804
1805 if (bcp == NULL || sp->slab_cache != cp || !KMEM_SLAB_MEMBER(sp, buf)) {
1806 mutex_exit(&cp->cache_lock);
1807 kmem_error(KMERR_BADADDR, cp, buf);
1808 return;
1809 }
1810
1811 if (KMEM_SLAB_OFFSET(sp, buf) == sp->slab_stuck_offset) {
1812 /*
1813 * If this is the buffer that prevented the consolidator from
1814 * clearing the slab, we can reset the slab flags now that the
1815 * buffer is freed. (It makes sense to do this in
1816 * kmem_cache_free(), where the client gives up ownership of the
1817 * buffer, but on the hot path the test is too expensive.)
1818 */
1819 kmem_slab_move_yes(cp, sp, buf);
1820 }
1821
1822 if ((cp->cache_flags & (KMF_AUDIT | KMF_BUFTAG)) == KMF_AUDIT) {
1823 if (cp->cache_flags & KMF_CONTENTS)
1824 ((kmem_bufctl_audit_t *)bcp)->bc_contents =
1825 kmem_log_enter(kmem_content_log, buf,
1826 cp->cache_contents);
1827 KMEM_AUDIT(kmem_transaction_log, cp, bcp);
1828 }
1829
1830 bcp->bc_next = sp->slab_head;
1831 sp->slab_head = bcp;
1832
1833 cp->cache_bufslab++;
1834 ASSERT(sp->slab_refcnt >= 1);
1835
1836 if (--sp->slab_refcnt == 0) {
1837 /*
1838 * There are no outstanding allocations from this slab,
1839 * so we can reclaim the memory.
1840 */
1841 if (sp->slab_chunks == 1) {
1842 list_remove(&cp->cache_complete_slabs, sp);
1843 cp->cache_complete_slab_count--;
1844 } else {
1845 avl_remove(&cp->cache_partial_slabs, sp);
1846 }
1847
1848 cp->cache_buftotal -= sp->slab_chunks;
1849 cp->cache_bufslab -= sp->slab_chunks;
1850 /*
1851 * Defer releasing the slab to the virtual memory subsystem
1852 * while there is a pending move callback, since we guarantee
1853 * that buffers passed to the move callback have only been
1854 * touched by kmem or by the client itself. Since the memory
1855 * patterns baddcafe (uninitialized) and deadbeef (freed) both
1856 * set at least one of the two lowest order bits, the client can
1857 * test those bits in the move callback to determine whether or
1858 * not it knows about the buffer (assuming that the client also
1859 * sets one of those low order bits whenever it frees a buffer).
1860 */
1861 if (cp->cache_defrag == NULL ||
1862 (avl_is_empty(&cp->cache_defrag->kmd_moves_pending) &&
1863 !(sp->slab_flags & KMEM_SLAB_MOVE_PENDING))) {
1864 cp->cache_slab_destroy++;
1865 mutex_exit(&cp->cache_lock);
1866 kmem_slab_destroy(cp, sp);
1867 } else {
1868 list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
1869 /*
1870 * Slabs are inserted at both ends of the deadlist to
1871 * distinguish between slabs freed while move callbacks
1872 * are pending (list head) and a slab freed while the
1873 * lock is dropped in kmem_move_buffers() (list tail) so
1874 * that in both cases slab_destroy() is called from the
1875 * right context.
1876 */
1877 if (sp->slab_flags & KMEM_SLAB_MOVE_PENDING) {
1878 list_insert_tail(deadlist, sp);
1879 } else {
1880 list_insert_head(deadlist, sp);
1881 }
1882 cp->cache_defrag->kmd_deadcount++;
1883 mutex_exit(&cp->cache_lock);
1884 }
1885 return;
1886 }
1887
1888 if (bcp->bc_next == NULL) {
1889 /* Transition the slab from completely allocated to partial. */
1890 ASSERT(sp->slab_refcnt == (sp->slab_chunks - 1));
1891 ASSERT(sp->slab_chunks > 1);
1892 list_remove(&cp->cache_complete_slabs, sp);
1893 cp->cache_complete_slab_count--;
1894 avl_add(&cp->cache_partial_slabs, sp);
1895 } else {
1896 (void) avl_update_gt(&cp->cache_partial_slabs, sp);
1897 }
1898
1899 ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) ==
1900 (cp->cache_complete_slab_count +
1901 avl_numnodes(&cp->cache_partial_slabs) +
1902 (cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount)));
1903 mutex_exit(&cp->cache_lock);
1904 }
1905
1906 /*
1907 * Return -1 if kmem_error, 1 if constructor fails, 0 if successful.
1908 */
1909 static int
kmem_cache_alloc_debug(kmem_cache_t * cp,void * buf,int kmflag,int construct,caddr_t caller)1910 kmem_cache_alloc_debug(kmem_cache_t *cp, void *buf, int kmflag, int construct,
1911 caddr_t caller)
1912 {
1913 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
1914 kmem_bufctl_audit_t *bcp = (kmem_bufctl_audit_t *)btp->bt_bufctl;
1915 uint32_t mtbf;
1916
1917 if (btp->bt_bxstat != ((intptr_t)bcp ^ KMEM_BUFTAG_FREE)) {
1918 kmem_error(KMERR_BADBUFTAG, cp, buf);
1919 return (-1);
1920 }
1921
1922 btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_ALLOC;
1923
1924 if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) {
1925 kmem_error(KMERR_BADBUFCTL, cp, buf);
1926 return (-1);
1927 }
1928
1929 if (cp->cache_flags & KMF_DEADBEEF) {
1930 if (!construct && (cp->cache_flags & KMF_LITE)) {
1931 if (*(uint64_t *)buf != KMEM_FREE_PATTERN) {
1932 kmem_error(KMERR_MODIFIED, cp, buf);
1933 return (-1);
1934 }
1935 if (cp->cache_constructor != NULL)
1936 *(uint64_t *)buf = btp->bt_redzone;
1937 else
1938 *(uint64_t *)buf = KMEM_UNINITIALIZED_PATTERN;
1939 } else {
1940 construct = 1;
1941 if (verify_and_copy_pattern(KMEM_FREE_PATTERN,
1942 KMEM_UNINITIALIZED_PATTERN, buf,
1943 cp->cache_verify)) {
1944 kmem_error(KMERR_MODIFIED, cp, buf);
1945 return (-1);
1946 }
1947 }
1948 }
1949 btp->bt_redzone = KMEM_REDZONE_PATTERN;
1950
1951 if ((mtbf = kmem_mtbf | cp->cache_mtbf) != 0 &&
1952 gethrtime() % mtbf == 0 &&
1953 (kmflag & (KM_NOSLEEP | KM_PANIC)) == KM_NOSLEEP) {
1954 kmem_log_event(kmem_failure_log, cp, NULL, NULL);
1955 if (!construct && cp->cache_destructor != NULL)
1956 cp->cache_destructor(buf, cp->cache_private);
1957 } else {
1958 mtbf = 0;
1959 }
1960
1961 if (mtbf || (construct && cp->cache_constructor != NULL &&
1962 cp->cache_constructor(buf, cp->cache_private, kmflag) != 0)) {
1963 atomic_inc_64(&cp->cache_alloc_fail);
1964 btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
1965 if (cp->cache_flags & KMF_DEADBEEF)
1966 copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
1967 kmem_slab_free(cp, buf);
1968 return (1);
1969 }
1970
1971 if (cp->cache_flags & KMF_AUDIT) {
1972 KMEM_AUDIT(kmem_transaction_log, cp, bcp);
1973 }
1974
1975 if ((cp->cache_flags & KMF_LITE) &&
1976 !(cp->cache_cflags & KMC_KMEM_ALLOC)) {
1977 KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller);
1978 }
1979
1980 return (0);
1981 }
1982
1983 static int
kmem_cache_free_debug(kmem_cache_t * cp,void * buf,caddr_t caller)1984 kmem_cache_free_debug(kmem_cache_t *cp, void *buf, caddr_t caller)
1985 {
1986 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
1987 kmem_bufctl_audit_t *bcp = (kmem_bufctl_audit_t *)btp->bt_bufctl;
1988 kmem_slab_t *sp;
1989
1990 if (btp->bt_bxstat != ((intptr_t)bcp ^ KMEM_BUFTAG_ALLOC)) {
1991 if (btp->bt_bxstat == ((intptr_t)bcp ^ KMEM_BUFTAG_FREE)) {
1992 kmem_error(KMERR_DUPFREE, cp, buf);
1993 return (-1);
1994 }
1995 sp = kmem_findslab(cp, buf);
1996 if (sp == NULL || sp->slab_cache != cp)
1997 kmem_error(KMERR_BADADDR, cp, buf);
1998 else
1999 kmem_error(KMERR_REDZONE, cp, buf);
2000 return (-1);
2001 }
2002
2003 btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
2004
2005 if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) {
2006 kmem_error(KMERR_BADBUFCTL, cp, buf);
2007 return (-1);
2008 }
2009
2010 if (btp->bt_redzone != KMEM_REDZONE_PATTERN) {
2011 kmem_error(KMERR_REDZONE, cp, buf);
2012 return (-1);
2013 }
2014
2015 if (cp->cache_flags & KMF_AUDIT) {
2016 if (cp->cache_flags & KMF_CONTENTS)
2017 bcp->bc_contents = kmem_log_enter(kmem_content_log,
2018 buf, cp->cache_contents);
2019 KMEM_AUDIT(kmem_transaction_log, cp, bcp);
2020 }
2021
2022 if ((cp->cache_flags & KMF_LITE) &&
2023 !(cp->cache_cflags & KMC_KMEM_ALLOC)) {
2024 KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller);
2025 }
2026
2027 if (cp->cache_flags & KMF_DEADBEEF) {
2028 if (cp->cache_flags & KMF_LITE)
2029 btp->bt_redzone = *(uint64_t *)buf;
2030 else if (cp->cache_destructor != NULL)
2031 cp->cache_destructor(buf, cp->cache_private);
2032
2033 copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
2034 }
2035
2036 return (0);
2037 }
2038
2039 /*
2040 * Free each object in magazine mp to cp's slab layer, and free mp itself.
2041 */
2042 static void
kmem_magazine_destroy(kmem_cache_t * cp,kmem_magazine_t * mp,int nrounds)2043 kmem_magazine_destroy(kmem_cache_t *cp, kmem_magazine_t *mp, int nrounds)
2044 {
2045 int round;
2046
2047 ASSERT(!list_link_active(&cp->cache_link) ||
2048 taskq_member(kmem_taskq, curthread));
2049
2050 for (round = 0; round < nrounds; round++) {
2051 void *buf = mp->mag_round[round];
2052
2053 if (cp->cache_flags & KMF_DEADBEEF) {
2054 if (verify_pattern(KMEM_FREE_PATTERN, buf,
2055 cp->cache_verify) != NULL) {
2056 kmem_error(KMERR_MODIFIED, cp, buf);
2057 continue;
2058 }
2059 if ((cp->cache_flags & KMF_LITE) &&
2060 cp->cache_destructor != NULL) {
2061 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2062 *(uint64_t *)buf = btp->bt_redzone;
2063 cp->cache_destructor(buf, cp->cache_private);
2064 *(uint64_t *)buf = KMEM_FREE_PATTERN;
2065 }
2066 } else if (cp->cache_destructor != NULL) {
2067 cp->cache_destructor(buf, cp->cache_private);
2068 }
2069
2070 kmem_slab_free(cp, buf);
2071 }
2072 ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
2073 kmem_cache_free(cp->cache_magtype->mt_cache, mp);
2074 }
2075
2076 /*
2077 * Allocate a magazine from the depot.
2078 */
2079 static kmem_magazine_t *
kmem_depot_alloc(kmem_cache_t * cp,kmem_maglist_t * mlp)2080 kmem_depot_alloc(kmem_cache_t *cp, kmem_maglist_t *mlp)
2081 {
2082 kmem_magazine_t *mp;
2083
2084 /*
2085 * If we can't get the depot lock without contention,
2086 * update our contention count. We use the depot
2087 * contention rate to determine whether we need to
2088 * increase the magazine size for better scalability.
2089 */
2090 if (!mutex_tryenter(&cp->cache_depot_lock)) {
2091 mutex_enter(&cp->cache_depot_lock);
2092 cp->cache_depot_contention++;
2093 }
2094
2095 if ((mp = mlp->ml_list) != NULL) {
2096 ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
2097 mlp->ml_list = mp->mag_next;
2098 if (--mlp->ml_total < mlp->ml_min)
2099 mlp->ml_min = mlp->ml_total;
2100 mlp->ml_alloc++;
2101 }
2102
2103 mutex_exit(&cp->cache_depot_lock);
2104
2105 return (mp);
2106 }
2107
2108 /*
2109 * Free a magazine to the depot.
2110 */
2111 static void
kmem_depot_free(kmem_cache_t * cp,kmem_maglist_t * mlp,kmem_magazine_t * mp)2112 kmem_depot_free(kmem_cache_t *cp, kmem_maglist_t *mlp, kmem_magazine_t *mp)
2113 {
2114 mutex_enter(&cp->cache_depot_lock);
2115 ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
2116 mp->mag_next = mlp->ml_list;
2117 mlp->ml_list = mp;
2118 mlp->ml_total++;
2119 mutex_exit(&cp->cache_depot_lock);
2120 }
2121
2122 /*
2123 * Update the working set statistics for cp's depot.
2124 */
2125 static void
kmem_depot_ws_update(kmem_cache_t * cp)2126 kmem_depot_ws_update(kmem_cache_t *cp)
2127 {
2128 mutex_enter(&cp->cache_depot_lock);
2129 cp->cache_full.ml_reaplimit = cp->cache_full.ml_min;
2130 cp->cache_full.ml_min = cp->cache_full.ml_total;
2131 cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_min;
2132 cp->cache_empty.ml_min = cp->cache_empty.ml_total;
2133 mutex_exit(&cp->cache_depot_lock);
2134 }
2135
2136 /*
2137 * Set the working set statistics for cp's depot to zero. (Everything is
2138 * eligible for reaping.)
2139 */
2140 static void
kmem_depot_ws_zero(kmem_cache_t * cp)2141 kmem_depot_ws_zero(kmem_cache_t *cp)
2142 {
2143 mutex_enter(&cp->cache_depot_lock);
2144 cp->cache_full.ml_reaplimit = cp->cache_full.ml_total;
2145 cp->cache_full.ml_min = cp->cache_full.ml_total;
2146 cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_total;
2147 cp->cache_empty.ml_min = cp->cache_empty.ml_total;
2148 mutex_exit(&cp->cache_depot_lock);
2149 }
2150
2151 /*
2152 * The number of bytes to reap before we call kpreempt(). The default (1MB)
2153 * causes us to preempt reaping up to hundreds of times per second. Using a
2154 * larger value (1GB) causes this to have virtually no effect.
2155 */
2156 size_t kmem_reap_preempt_bytes = 1024 * 1024;
2157
2158 /*
2159 * Reap all magazines that have fallen out of the depot's working set.
2160 */
2161 static void
kmem_depot_ws_reap(kmem_cache_t * cp)2162 kmem_depot_ws_reap(kmem_cache_t *cp)
2163 {
2164 size_t bytes = 0;
2165 long reap;
2166 kmem_magazine_t *mp;
2167
2168 ASSERT(!list_link_active(&cp->cache_link) ||
2169 taskq_member(kmem_taskq, curthread));
2170
2171 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
2172 while (reap-- &&
2173 (mp = kmem_depot_alloc(cp, &cp->cache_full)) != NULL) {
2174 kmem_magazine_destroy(cp, mp, cp->cache_magtype->mt_magsize);
2175 bytes += cp->cache_magtype->mt_magsize * cp->cache_bufsize;
2176 if (bytes > kmem_reap_preempt_bytes) {
2177 kpreempt(KPREEMPT_SYNC);
2178 bytes = 0;
2179 }
2180 }
2181
2182 reap = MIN(cp->cache_empty.ml_reaplimit, cp->cache_empty.ml_min);
2183 while (reap-- &&
2184 (mp = kmem_depot_alloc(cp, &cp->cache_empty)) != NULL) {
2185 kmem_magazine_destroy(cp, mp, 0);
2186 bytes += cp->cache_magtype->mt_magsize * cp->cache_bufsize;
2187 if (bytes > kmem_reap_preempt_bytes) {
2188 kpreempt(KPREEMPT_SYNC);
2189 bytes = 0;
2190 }
2191 }
2192 }
2193
2194 static void
kmem_cpu_reload(kmem_cpu_cache_t * ccp,kmem_magazine_t * mp,int rounds)2195 kmem_cpu_reload(kmem_cpu_cache_t *ccp, kmem_magazine_t *mp, int rounds)
2196 {
2197 ASSERT((ccp->cc_loaded == NULL && ccp->cc_rounds == -1) ||
2198 (ccp->cc_loaded && ccp->cc_rounds + rounds == ccp->cc_magsize));
2199 ASSERT(ccp->cc_magsize > 0);
2200
2201 ccp->cc_ploaded = ccp->cc_loaded;
2202 ccp->cc_prounds = ccp->cc_rounds;
2203 ccp->cc_loaded = mp;
2204 ccp->cc_rounds = rounds;
2205 }
2206
2207 /*
2208 * Intercept kmem alloc/free calls during crash dump in order to avoid
2209 * changing kmem state while memory is being saved to the dump device.
2210 * Otherwise, ::kmem_verify will report "corrupt buffers". Note that
2211 * there are no locks because only one CPU calls kmem during a crash
2212 * dump. To enable this feature, first create the associated vmem
2213 * arena with VMC_DUMPSAFE.
2214 */
2215 static void *kmem_dump_start; /* start of pre-reserved heap */
2216 static void *kmem_dump_end; /* end of heap area */
2217 static void *kmem_dump_curr; /* current free heap pointer */
2218 static size_t kmem_dump_size; /* size of heap area */
2219
2220 /* append to each buf created in the pre-reserved heap */
2221 typedef struct kmem_dumpctl {
2222 void *kdc_next; /* cache dump free list linkage */
2223 } kmem_dumpctl_t;
2224
2225 #define KMEM_DUMPCTL(cp, buf) \
2226 ((kmem_dumpctl_t *)P2ROUNDUP((uintptr_t)(buf) + (cp)->cache_bufsize, \
2227 sizeof (void *)))
2228
2229 /* set non zero for full report */
2230 uint_t kmem_dump_verbose = 0;
2231
2232 /* stats for overize heap */
2233 uint_t kmem_dump_oversize_allocs = 0;
2234 uint_t kmem_dump_oversize_max = 0;
2235
2236 static void
kmem_dumppr(char ** pp,char * e,const char * format,...)2237 kmem_dumppr(char **pp, char *e, const char *format, ...)
2238 {
2239 char *p = *pp;
2240
2241 if (p < e) {
2242 int n;
2243 va_list ap;
2244
2245 va_start(ap, format);
2246 n = vsnprintf(p, e - p, format, ap);
2247 va_end(ap);
2248 *pp = p + n;
2249 }
2250 }
2251
2252 /*
2253 * Called when dumpadm(8) configures dump parameters.
2254 */
2255 void
kmem_dump_init(size_t size)2256 kmem_dump_init(size_t size)
2257 {
2258 /* Our caller ensures size is always set. */
2259 ASSERT3U(size, >, 0);
2260
2261 if (kmem_dump_start != NULL)
2262 kmem_free(kmem_dump_start, kmem_dump_size);
2263
2264 kmem_dump_start = kmem_alloc(size, KM_SLEEP);
2265 kmem_dump_size = size;
2266 kmem_dump_curr = kmem_dump_start;
2267 kmem_dump_end = (void *)((char *)kmem_dump_start + size);
2268 copy_pattern(KMEM_UNINITIALIZED_PATTERN, kmem_dump_start, size);
2269 }
2270
2271 /*
2272 * Set flag for each kmem_cache_t if is safe to use alternate dump
2273 * memory. Called just before panic crash dump starts. Set the flag
2274 * for the calling CPU.
2275 */
2276 void
kmem_dump_begin(void)2277 kmem_dump_begin(void)
2278 {
2279 kmem_cache_t *cp;
2280
2281 ASSERT(panicstr != NULL);
2282
2283 for (cp = list_head(&kmem_caches); cp != NULL;
2284 cp = list_next(&kmem_caches, cp)) {
2285 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2286
2287 if (cp->cache_arena->vm_cflags & VMC_DUMPSAFE) {
2288 cp->cache_flags |= KMF_DUMPDIVERT;
2289 ccp->cc_flags |= KMF_DUMPDIVERT;
2290 ccp->cc_dump_rounds = ccp->cc_rounds;
2291 ccp->cc_dump_prounds = ccp->cc_prounds;
2292 ccp->cc_rounds = ccp->cc_prounds = -1;
2293 } else {
2294 cp->cache_flags |= KMF_DUMPUNSAFE;
2295 ccp->cc_flags |= KMF_DUMPUNSAFE;
2296 }
2297 }
2298 }
2299
2300 /*
2301 * finished dump intercept
2302 * print any warnings on the console
2303 * return verbose information to dumpsys() in the given buffer
2304 */
2305 size_t
kmem_dump_finish(char * buf,size_t size)2306 kmem_dump_finish(char *buf, size_t size)
2307 {
2308 int percent = 0;
2309 size_t used;
2310 char *e = buf + size;
2311 char *p = buf;
2312
2313 if (kmem_dump_curr == kmem_dump_end) {
2314 cmn_err(CE_WARN, "exceeded kmem_dump space of %lu "
2315 "bytes: kmem state in dump may be inconsistent",
2316 kmem_dump_size);
2317 }
2318
2319 if (kmem_dump_verbose == 0)
2320 return (0);
2321
2322 used = (char *)kmem_dump_curr - (char *)kmem_dump_start;
2323 percent = (used * 100) / kmem_dump_size;
2324
2325 kmem_dumppr(&p, e, "%% heap used,%d\n", percent);
2326 kmem_dumppr(&p, e, "used bytes,%ld\n", used);
2327 kmem_dumppr(&p, e, "heap size,%ld\n", kmem_dump_size);
2328 kmem_dumppr(&p, e, "Oversize allocs,%d\n",
2329 kmem_dump_oversize_allocs);
2330 kmem_dumppr(&p, e, "Oversize max size,%ld\n",
2331 kmem_dump_oversize_max);
2332
2333 /* return buffer size used */
2334 if (p < e)
2335 bzero(p, e - p);
2336 return (p - buf);
2337 }
2338
2339 /*
2340 * Allocate a constructed object from alternate dump memory.
2341 */
2342 void *
kmem_cache_alloc_dump(kmem_cache_t * cp,int kmflag)2343 kmem_cache_alloc_dump(kmem_cache_t *cp, int kmflag)
2344 {
2345 void *buf;
2346 void *curr;
2347 char *bufend;
2348
2349 /* return a constructed object */
2350 if ((buf = cp->cache_dump.kd_freelist) != NULL) {
2351 cp->cache_dump.kd_freelist = KMEM_DUMPCTL(cp, buf)->kdc_next;
2352 return (buf);
2353 }
2354
2355 /* create a new constructed object */
2356 curr = kmem_dump_curr;
2357 buf = (void *)P2ROUNDUP((uintptr_t)curr, cp->cache_align);
2358 bufend = (char *)KMEM_DUMPCTL(cp, buf) + sizeof (kmem_dumpctl_t);
2359
2360 /* hat layer objects cannot cross a page boundary */
2361 if (cp->cache_align < PAGESIZE) {
2362 char *page = (char *)P2ROUNDUP((uintptr_t)buf, PAGESIZE);
2363 if (bufend > page) {
2364 bufend += page - (char *)buf;
2365 buf = (void *)page;
2366 }
2367 }
2368
2369 /* fall back to normal alloc if reserved area is used up */
2370 if (bufend > (char *)kmem_dump_end) {
2371 kmem_dump_curr = kmem_dump_end;
2372 cp->cache_dump.kd_alloc_fails++;
2373 return (NULL);
2374 }
2375
2376 /*
2377 * Must advance curr pointer before calling a constructor that
2378 * may also allocate memory.
2379 */
2380 kmem_dump_curr = bufend;
2381
2382 /* run constructor */
2383 if (cp->cache_constructor != NULL &&
2384 cp->cache_constructor(buf, cp->cache_private, kmflag)
2385 != 0) {
2386 #ifdef DEBUG
2387 printf("name='%s' cache=0x%p: kmem cache constructor failed\n",
2388 cp->cache_name, (void *)cp);
2389 #endif
2390 /* reset curr pointer iff no allocs were done */
2391 if (kmem_dump_curr == bufend)
2392 kmem_dump_curr = curr;
2393
2394 cp->cache_dump.kd_alloc_fails++;
2395 /* fall back to normal alloc if the constructor fails */
2396 return (NULL);
2397 }
2398
2399 return (buf);
2400 }
2401
2402 /*
2403 * Free a constructed object in alternate dump memory.
2404 */
2405 int
kmem_cache_free_dump(kmem_cache_t * cp,void * buf)2406 kmem_cache_free_dump(kmem_cache_t *cp, void *buf)
2407 {
2408 /* save constructed buffers for next time */
2409 if ((char *)buf >= (char *)kmem_dump_start &&
2410 (char *)buf < (char *)kmem_dump_end) {
2411 KMEM_DUMPCTL(cp, buf)->kdc_next = cp->cache_dump.kd_freelist;
2412 cp->cache_dump.kd_freelist = buf;
2413 return (0);
2414 }
2415
2416 /* just drop buffers that were allocated before dump started */
2417 if (kmem_dump_curr < kmem_dump_end)
2418 return (0);
2419
2420 /* fall back to normal free if reserved area is used up */
2421 return (1);
2422 }
2423
2424 /*
2425 * Allocate a constructed object from cache cp.
2426 */
2427 void *
kmem_cache_alloc(kmem_cache_t * cp,int kmflag)2428 kmem_cache_alloc(kmem_cache_t *cp, int kmflag)
2429 {
2430 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2431 kmem_magazine_t *fmp;
2432 void *buf;
2433
2434 mutex_enter(&ccp->cc_lock);
2435 for (;;) {
2436 /*
2437 * If there's an object available in the current CPU's
2438 * loaded magazine, just take it and return.
2439 */
2440 if (ccp->cc_rounds > 0) {
2441 buf = ccp->cc_loaded->mag_round[--ccp->cc_rounds];
2442 ccp->cc_alloc++;
2443 mutex_exit(&ccp->cc_lock);
2444 if (ccp->cc_flags & (KMF_BUFTAG | KMF_DUMPUNSAFE)) {
2445 if (ccp->cc_flags & KMF_DUMPUNSAFE) {
2446 ASSERT(!(ccp->cc_flags &
2447 KMF_DUMPDIVERT));
2448 cp->cache_dump.kd_unsafe++;
2449 }
2450 if ((ccp->cc_flags & KMF_BUFTAG) &&
2451 kmem_cache_alloc_debug(cp, buf, kmflag, 0,
2452 caller()) != 0) {
2453 if (kmflag & KM_NOSLEEP)
2454 return (NULL);
2455 mutex_enter(&ccp->cc_lock);
2456 continue;
2457 }
2458 }
2459 return (buf);
2460 }
2461
2462 /*
2463 * The loaded magazine is empty. If the previously loaded
2464 * magazine was full, exchange them and try again.
2465 */
2466 if (ccp->cc_prounds > 0) {
2467 kmem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds);
2468 continue;
2469 }
2470
2471 /*
2472 * Return an alternate buffer at dump time to preserve
2473 * the heap.
2474 */
2475 if (ccp->cc_flags & (KMF_DUMPDIVERT | KMF_DUMPUNSAFE)) {
2476 if (ccp->cc_flags & KMF_DUMPUNSAFE) {
2477 ASSERT(!(ccp->cc_flags & KMF_DUMPDIVERT));
2478 /* log it so that we can warn about it */
2479 cp->cache_dump.kd_unsafe++;
2480 } else {
2481 if ((buf = kmem_cache_alloc_dump(cp, kmflag)) !=
2482 NULL) {
2483 mutex_exit(&ccp->cc_lock);
2484 return (buf);
2485 }
2486 break; /* fall back to slab layer */
2487 }
2488 }
2489
2490 /*
2491 * If the magazine layer is disabled, break out now.
2492 */
2493 if (ccp->cc_magsize == 0)
2494 break;
2495
2496 /*
2497 * Try to get a full magazine from the depot.
2498 */
2499 fmp = kmem_depot_alloc(cp, &cp->cache_full);
2500 if (fmp != NULL) {
2501 if (ccp->cc_ploaded != NULL)
2502 kmem_depot_free(cp, &cp->cache_empty,
2503 ccp->cc_ploaded);
2504 kmem_cpu_reload(ccp, fmp, ccp->cc_magsize);
2505 continue;
2506 }
2507
2508 /*
2509 * There are no full magazines in the depot,
2510 * so fall through to the slab layer.
2511 */
2512 break;
2513 }
2514 mutex_exit(&ccp->cc_lock);
2515
2516 /*
2517 * We couldn't allocate a constructed object from the magazine layer,
2518 * so get a raw buffer from the slab layer and apply its constructor.
2519 */
2520 buf = kmem_slab_alloc(cp, kmflag);
2521
2522 if (buf == NULL)
2523 return (NULL);
2524
2525 if (cp->cache_flags & KMF_BUFTAG) {
2526 /*
2527 * Make kmem_cache_alloc_debug() apply the constructor for us.
2528 */
2529 int rc = kmem_cache_alloc_debug(cp, buf, kmflag, 1, caller());
2530 if (rc != 0) {
2531 if (kmflag & KM_NOSLEEP)
2532 return (NULL);
2533 /*
2534 * kmem_cache_alloc_debug() detected corruption
2535 * but didn't panic (kmem_panic <= 0). We should not be
2536 * here because the constructor failed (indicated by a
2537 * return code of 1). Try again.
2538 */
2539 ASSERT(rc == -1);
2540 return (kmem_cache_alloc(cp, kmflag));
2541 }
2542 return (buf);
2543 }
2544
2545 if (cp->cache_constructor != NULL &&
2546 cp->cache_constructor(buf, cp->cache_private, kmflag) != 0) {
2547 atomic_inc_64(&cp->cache_alloc_fail);
2548 kmem_slab_free(cp, buf);
2549 return (NULL);
2550 }
2551
2552 return (buf);
2553 }
2554
2555 /*
2556 * The freed argument tells whether or not kmem_cache_free_debug() has already
2557 * been called so that we can avoid the duplicate free error. For example, a
2558 * buffer on a magazine has already been freed by the client but is still
2559 * constructed.
2560 */
2561 static void
kmem_slab_free_constructed(kmem_cache_t * cp,void * buf,boolean_t freed)2562 kmem_slab_free_constructed(kmem_cache_t *cp, void *buf, boolean_t freed)
2563 {
2564 if (!freed && (cp->cache_flags & KMF_BUFTAG))
2565 if (kmem_cache_free_debug(cp, buf, caller()) == -1)
2566 return;
2567
2568 /*
2569 * Note that if KMF_DEADBEEF is in effect and KMF_LITE is not,
2570 * kmem_cache_free_debug() will have already applied the destructor.
2571 */
2572 if ((cp->cache_flags & (KMF_DEADBEEF | KMF_LITE)) != KMF_DEADBEEF &&
2573 cp->cache_destructor != NULL) {
2574 if (cp->cache_flags & KMF_DEADBEEF) { /* KMF_LITE implied */
2575 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2576 *(uint64_t *)buf = btp->bt_redzone;
2577 cp->cache_destructor(buf, cp->cache_private);
2578 *(uint64_t *)buf = KMEM_FREE_PATTERN;
2579 } else {
2580 cp->cache_destructor(buf, cp->cache_private);
2581 }
2582 }
2583
2584 kmem_slab_free(cp, buf);
2585 }
2586
2587 /*
2588 * Used when there's no room to free a buffer to the per-CPU cache.
2589 * Drops and re-acquires &ccp->cc_lock, and returns non-zero if the
2590 * caller should try freeing to the per-CPU cache again.
2591 * Note that we don't directly install the magazine in the cpu cache,
2592 * since its state may have changed wildly while the lock was dropped.
2593 */
2594 static int
kmem_cpucache_magazine_alloc(kmem_cpu_cache_t * ccp,kmem_cache_t * cp)2595 kmem_cpucache_magazine_alloc(kmem_cpu_cache_t *ccp, kmem_cache_t *cp)
2596 {
2597 kmem_magazine_t *emp;
2598 kmem_magtype_t *mtp;
2599
2600 ASSERT(MUTEX_HELD(&ccp->cc_lock));
2601 ASSERT(((uint_t)ccp->cc_rounds == ccp->cc_magsize ||
2602 ((uint_t)ccp->cc_rounds == -1)) &&
2603 ((uint_t)ccp->cc_prounds == ccp->cc_magsize ||
2604 ((uint_t)ccp->cc_prounds == -1)));
2605
2606 emp = kmem_depot_alloc(cp, &cp->cache_empty);
2607 if (emp != NULL) {
2608 if (ccp->cc_ploaded != NULL)
2609 kmem_depot_free(cp, &cp->cache_full,
2610 ccp->cc_ploaded);
2611 kmem_cpu_reload(ccp, emp, 0);
2612 return (1);
2613 }
2614 /*
2615 * There are no empty magazines in the depot,
2616 * so try to allocate a new one. We must drop all locks
2617 * across kmem_cache_alloc() because lower layers may
2618 * attempt to allocate from this cache.
2619 */
2620 mtp = cp->cache_magtype;
2621 mutex_exit(&ccp->cc_lock);
2622 emp = kmem_cache_alloc(mtp->mt_cache, KM_NOSLEEP);
2623 mutex_enter(&ccp->cc_lock);
2624
2625 if (emp != NULL) {
2626 /*
2627 * We successfully allocated an empty magazine.
2628 * However, we had to drop ccp->cc_lock to do it,
2629 * so the cache's magazine size may have changed.
2630 * If so, free the magazine and try again.
2631 */
2632 if (ccp->cc_magsize != mtp->mt_magsize) {
2633 mutex_exit(&ccp->cc_lock);
2634 kmem_cache_free(mtp->mt_cache, emp);
2635 mutex_enter(&ccp->cc_lock);
2636 return (1);
2637 }
2638
2639 /*
2640 * We got a magazine of the right size. Add it to
2641 * the depot and try the whole dance again.
2642 */
2643 kmem_depot_free(cp, &cp->cache_empty, emp);
2644 return (1);
2645 }
2646
2647 /*
2648 * We couldn't allocate an empty magazine,
2649 * so fall through to the slab layer.
2650 */
2651 return (0);
2652 }
2653
2654 /*
2655 * Free a constructed object to cache cp.
2656 */
2657 void
kmem_cache_free(kmem_cache_t * cp,void * buf)2658 kmem_cache_free(kmem_cache_t *cp, void *buf)
2659 {
2660 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2661
2662 /*
2663 * The client must not free either of the buffers passed to the move
2664 * callback function.
2665 */
2666 ASSERT(cp->cache_defrag == NULL ||
2667 cp->cache_defrag->kmd_thread != curthread ||
2668 (buf != cp->cache_defrag->kmd_from_buf &&
2669 buf != cp->cache_defrag->kmd_to_buf));
2670
2671 if (ccp->cc_flags & (KMF_BUFTAG | KMF_DUMPDIVERT | KMF_DUMPUNSAFE)) {
2672 if (ccp->cc_flags & KMF_DUMPUNSAFE) {
2673 ASSERT(!(ccp->cc_flags & KMF_DUMPDIVERT));
2674 /* log it so that we can warn about it */
2675 cp->cache_dump.kd_unsafe++;
2676 } else if (KMEM_DUMPCC(ccp) && !kmem_cache_free_dump(cp, buf)) {
2677 return;
2678 }
2679 if (ccp->cc_flags & KMF_BUFTAG) {
2680 if (kmem_cache_free_debug(cp, buf, caller()) == -1)
2681 return;
2682 }
2683 }
2684
2685 mutex_enter(&ccp->cc_lock);
2686 /*
2687 * Any changes to this logic should be reflected in kmem_slab_prefill()
2688 */
2689 for (;;) {
2690 /*
2691 * If there's a slot available in the current CPU's
2692 * loaded magazine, just put the object there and return.
2693 */
2694 if ((uint_t)ccp->cc_rounds < ccp->cc_magsize) {
2695 ccp->cc_loaded->mag_round[ccp->cc_rounds++] = buf;
2696 ccp->cc_free++;
2697 mutex_exit(&ccp->cc_lock);
2698 return;
2699 }
2700
2701 /*
2702 * The loaded magazine is full. If the previously loaded
2703 * magazine was empty, exchange them and try again.
2704 */
2705 if (ccp->cc_prounds == 0) {
2706 kmem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds);
2707 continue;
2708 }
2709
2710 /*
2711 * If the magazine layer is disabled, break out now.
2712 */
2713 if (ccp->cc_magsize == 0)
2714 break;
2715
2716 if (!kmem_cpucache_magazine_alloc(ccp, cp)) {
2717 /*
2718 * We couldn't free our constructed object to the
2719 * magazine layer, so apply its destructor and free it
2720 * to the slab layer.
2721 */
2722 break;
2723 }
2724 }
2725 mutex_exit(&ccp->cc_lock);
2726 kmem_slab_free_constructed(cp, buf, B_TRUE);
2727 }
2728
2729 static void
kmem_slab_prefill(kmem_cache_t * cp,kmem_slab_t * sp)2730 kmem_slab_prefill(kmem_cache_t *cp, kmem_slab_t *sp)
2731 {
2732 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2733 int cache_flags = cp->cache_flags;
2734
2735 kmem_bufctl_t *next, *head;
2736 size_t nbufs;
2737
2738 /*
2739 * Completely allocate the newly created slab and put the pre-allocated
2740 * buffers in magazines. Any of the buffers that cannot be put in
2741 * magazines must be returned to the slab.
2742 */
2743 ASSERT(MUTEX_HELD(&cp->cache_lock));
2744 ASSERT((cache_flags & (KMF_PREFILL|KMF_BUFTAG)) == KMF_PREFILL);
2745 ASSERT(cp->cache_constructor == NULL);
2746 ASSERT(sp->slab_cache == cp);
2747 ASSERT(sp->slab_refcnt == 1);
2748 ASSERT(sp->slab_head != NULL && sp->slab_chunks > sp->slab_refcnt);
2749 ASSERT(avl_find(&cp->cache_partial_slabs, sp, NULL) == NULL);
2750
2751 head = sp->slab_head;
2752 nbufs = (sp->slab_chunks - sp->slab_refcnt);
2753 sp->slab_head = NULL;
2754 sp->slab_refcnt += nbufs;
2755 cp->cache_bufslab -= nbufs;
2756 cp->cache_slab_alloc += nbufs;
2757 list_insert_head(&cp->cache_complete_slabs, sp);
2758 cp->cache_complete_slab_count++;
2759 mutex_exit(&cp->cache_lock);
2760 mutex_enter(&ccp->cc_lock);
2761
2762 while (head != NULL) {
2763 void *buf = KMEM_BUF(cp, head);
2764 /*
2765 * If there's a slot available in the current CPU's
2766 * loaded magazine, just put the object there and
2767 * continue.
2768 */
2769 if ((uint_t)ccp->cc_rounds < ccp->cc_magsize) {
2770 ccp->cc_loaded->mag_round[ccp->cc_rounds++] =
2771 buf;
2772 ccp->cc_free++;
2773 nbufs--;
2774 head = head->bc_next;
2775 continue;
2776 }
2777
2778 /*
2779 * The loaded magazine is full. If the previously
2780 * loaded magazine was empty, exchange them and try
2781 * again.
2782 */
2783 if (ccp->cc_prounds == 0) {
2784 kmem_cpu_reload(ccp, ccp->cc_ploaded,
2785 ccp->cc_prounds);
2786 continue;
2787 }
2788
2789 /*
2790 * If the magazine layer is disabled, break out now.
2791 */
2792
2793 if (ccp->cc_magsize == 0) {
2794 break;
2795 }
2796
2797 if (!kmem_cpucache_magazine_alloc(ccp, cp))
2798 break;
2799 }
2800 mutex_exit(&ccp->cc_lock);
2801 if (nbufs != 0) {
2802 ASSERT(head != NULL);
2803
2804 /*
2805 * If there was a failure, return remaining objects to
2806 * the slab
2807 */
2808 while (head != NULL) {
2809 ASSERT(nbufs != 0);
2810 next = head->bc_next;
2811 head->bc_next = NULL;
2812 kmem_slab_free(cp, KMEM_BUF(cp, head));
2813 head = next;
2814 nbufs--;
2815 }
2816 }
2817 ASSERT(head == NULL);
2818 ASSERT(nbufs == 0);
2819 mutex_enter(&cp->cache_lock);
2820 }
2821
2822 /*
2823 * kmem_rezalloc() is currently considered private and subject to change until
2824 * we sort out how we want to handle realloc vs. reallocf style interfaces. We
2825 * have currently chosen realloc.
2826 */
2827 void *
kmem_rezalloc(void * oldbuf,size_t oldsize,size_t newsize,int kmflag)2828 kmem_rezalloc(void *oldbuf, size_t oldsize, size_t newsize, int kmflag)
2829 {
2830 void *newbuf = kmem_alloc(newsize, kmflag);
2831 if (newbuf == NULL) {
2832 return (NULL);
2833 }
2834
2835 bcopy(oldbuf, newbuf, MIN(oldsize, newsize));
2836 if (newsize > oldsize) {
2837 void *start = (void *)((uintptr_t)newbuf + oldsize);
2838 bzero(start, newsize - oldsize);
2839 }
2840
2841 if (oldbuf != NULL) {
2842 ASSERT3U(oldsize, !=, 0);
2843 kmem_free(oldbuf, oldsize);
2844 }
2845
2846 return (newbuf);
2847 }
2848
2849 void *
kmem_zalloc(size_t size,int kmflag)2850 kmem_zalloc(size_t size, int kmflag)
2851 {
2852 size_t index;
2853 void *buf;
2854
2855 if ((index = ((size - 1) >> KMEM_ALIGN_SHIFT)) < KMEM_ALLOC_TABLE_MAX) {
2856 kmem_cache_t *cp = kmem_alloc_table[index];
2857 buf = kmem_cache_alloc(cp, kmflag);
2858 if (buf != NULL) {
2859 if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp)) {
2860 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2861 ((uint8_t *)buf)[size] = KMEM_REDZONE_BYTE;
2862 ((uint32_t *)btp)[1] = KMEM_SIZE_ENCODE(size);
2863
2864 if (cp->cache_flags & KMF_LITE) {
2865 KMEM_BUFTAG_LITE_ENTER(btp,
2866 kmem_lite_count, caller());
2867 }
2868 }
2869 bzero(buf, size);
2870 }
2871 } else {
2872 buf = kmem_alloc(size, kmflag);
2873 if (buf != NULL)
2874 bzero(buf, size);
2875 }
2876 return (buf);
2877 }
2878
2879 void *
kmem_alloc(size_t size,int kmflag)2880 kmem_alloc(size_t size, int kmflag)
2881 {
2882 size_t index;
2883 kmem_cache_t *cp;
2884 void *buf;
2885
2886 if ((index = ((size - 1) >> KMEM_ALIGN_SHIFT)) < KMEM_ALLOC_TABLE_MAX) {
2887 cp = kmem_alloc_table[index];
2888 /* fall through to kmem_cache_alloc() */
2889
2890 } else if ((index = ((size - 1) >> KMEM_BIG_SHIFT)) <
2891 kmem_big_alloc_table_max) {
2892 cp = kmem_big_alloc_table[index];
2893 /* fall through to kmem_cache_alloc() */
2894
2895 } else {
2896 if (size == 0) {
2897 if (kmflag != KM_SLEEP && !(kmflag & KM_PANIC))
2898 return (NULL);
2899
2900 /*
2901 * If this is a sleeping allocation or one that has
2902 * been specified to panic on allocation failure, we
2903 * consider it to be deprecated behavior to allocate
2904 * 0 bytes. If we have been configured to panic under
2905 * this condition, we panic; if to warn, we warn -- and
2906 * regardless, we log to the kmem_zerosized_log that
2907 * that this condition has occurred (which gives us
2908 * enough information to be able to debug it).
2909 */
2910 if (kmem_panic && kmem_panic_zerosized)
2911 panic("attempted to kmem_alloc() size of 0");
2912
2913 if (kmem_warn_zerosized) {
2914 cmn_err(CE_WARN, "kmem_alloc(): sleeping "
2915 "allocation with size of 0; "
2916 "see kmem_zerosized_log for details");
2917 }
2918
2919 kmem_log_event(kmem_zerosized_log, NULL, NULL, NULL);
2920
2921 return (NULL);
2922 }
2923
2924 buf = vmem_alloc(kmem_oversize_arena, size,
2925 kmflag & KM_VMFLAGS);
2926 if (buf == NULL)
2927 kmem_log_event(kmem_failure_log, NULL, NULL,
2928 (void *)size);
2929 else if (KMEM_DUMP(kmem_slab_cache)) {
2930 /* stats for dump intercept */
2931 kmem_dump_oversize_allocs++;
2932 if (size > kmem_dump_oversize_max)
2933 kmem_dump_oversize_max = size;
2934 }
2935 return (buf);
2936 }
2937
2938 buf = kmem_cache_alloc(cp, kmflag);
2939 if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp) && buf != NULL) {
2940 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2941 ((uint8_t *)buf)[size] = KMEM_REDZONE_BYTE;
2942 ((uint32_t *)btp)[1] = KMEM_SIZE_ENCODE(size);
2943
2944 if (cp->cache_flags & KMF_LITE) {
2945 KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller());
2946 }
2947 }
2948 return (buf);
2949 }
2950
2951 void
kmem_free(void * buf,size_t size)2952 kmem_free(void *buf, size_t size)
2953 {
2954 size_t index;
2955 kmem_cache_t *cp;
2956
2957 if ((index = (size - 1) >> KMEM_ALIGN_SHIFT) < KMEM_ALLOC_TABLE_MAX) {
2958 cp = kmem_alloc_table[index];
2959 /* fall through to kmem_cache_free() */
2960
2961 } else if ((index = ((size - 1) >> KMEM_BIG_SHIFT)) <
2962 kmem_big_alloc_table_max) {
2963 cp = kmem_big_alloc_table[index];
2964 /* fall through to kmem_cache_free() */
2965
2966 } else {
2967 EQUIV(buf == NULL, size == 0);
2968 if (buf == NULL && size == 0)
2969 return;
2970 vmem_free(kmem_oversize_arena, buf, size);
2971 return;
2972 }
2973
2974 if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp)) {
2975 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2976 uint32_t *ip = (uint32_t *)btp;
2977 if (ip[1] != KMEM_SIZE_ENCODE(size)) {
2978 if (*(uint64_t *)buf == KMEM_FREE_PATTERN) {
2979 kmem_error(KMERR_DUPFREE, cp, buf);
2980 return;
2981 }
2982 if (KMEM_SIZE_VALID(ip[1])) {
2983 ip[0] = KMEM_SIZE_ENCODE(size);
2984 kmem_error(KMERR_BADSIZE, cp, buf);
2985 } else {
2986 kmem_error(KMERR_REDZONE, cp, buf);
2987 }
2988 return;
2989 }
2990 if (((uint8_t *)buf)[size] != KMEM_REDZONE_BYTE) {
2991 kmem_error(KMERR_REDZONE, cp, buf);
2992 return;
2993 }
2994 btp->bt_redzone = KMEM_REDZONE_PATTERN;
2995 if (cp->cache_flags & KMF_LITE) {
2996 KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count,
2997 caller());
2998 }
2999 }
3000 kmem_cache_free(cp, buf);
3001 }
3002
3003 void *
kmem_firewall_va_alloc(vmem_t * vmp,size_t size,int vmflag)3004 kmem_firewall_va_alloc(vmem_t *vmp, size_t size, int vmflag)
3005 {
3006 size_t realsize = size + vmp->vm_quantum;
3007 void *addr;
3008
3009 /*
3010 * Annoying edge case: if 'size' is just shy of ULONG_MAX, adding
3011 * vm_quantum will cause integer wraparound. Check for this, and
3012 * blow off the firewall page in this case. Note that such a
3013 * giant allocation (the entire kernel address space) can never
3014 * be satisfied, so it will either fail immediately (VM_NOSLEEP)
3015 * or sleep forever (VM_SLEEP). Thus, there is no need for a
3016 * corresponding check in kmem_firewall_va_free().
3017 */
3018 if (realsize < size)
3019 realsize = size;
3020
3021 /*
3022 * While boot still owns resource management, make sure that this
3023 * redzone virtual address allocation is properly accounted for in
3024 * OBPs "virtual-memory" "available" lists because we're
3025 * effectively claiming them for a red zone. If we don't do this,
3026 * the available lists become too fragmented and too large for the
3027 * current boot/kernel memory list interface.
3028 */
3029 addr = vmem_alloc(vmp, realsize, vmflag | VM_NEXTFIT);
3030
3031 if (addr != NULL && kvseg.s_base == NULL && realsize != size)
3032 (void) boot_virt_alloc((char *)addr + size, vmp->vm_quantum);
3033
3034 return (addr);
3035 }
3036
3037 void
kmem_firewall_va_free(vmem_t * vmp,void * addr,size_t size)3038 kmem_firewall_va_free(vmem_t *vmp, void *addr, size_t size)
3039 {
3040 ASSERT((kvseg.s_base == NULL ?
3041 va_to_pfn((char *)addr + size) :
3042 hat_getpfnum(kas.a_hat, (caddr_t)addr + size)) == PFN_INVALID);
3043
3044 vmem_free(vmp, addr, size + vmp->vm_quantum);
3045 }
3046
3047 /*
3048 * Try to allocate at least `size' bytes of memory without sleeping or
3049 * panicking. Return actual allocated size in `asize'. If allocation failed,
3050 * try final allocation with sleep or panic allowed.
3051 */
3052 void *
kmem_alloc_tryhard(size_t size,size_t * asize,int kmflag)3053 kmem_alloc_tryhard(size_t size, size_t *asize, int kmflag)
3054 {
3055 void *p;
3056
3057 *asize = P2ROUNDUP(size, KMEM_ALIGN);
3058 do {
3059 p = kmem_alloc(*asize, (kmflag | KM_NOSLEEP) & ~KM_PANIC);
3060 if (p != NULL)
3061 return (p);
3062 *asize += KMEM_ALIGN;
3063 } while (*asize <= PAGESIZE);
3064
3065 *asize = P2ROUNDUP(size, KMEM_ALIGN);
3066 return (kmem_alloc(*asize, kmflag));
3067 }
3068
3069 /*
3070 * Reclaim all unused memory from a cache.
3071 */
3072 static void
kmem_cache_reap(kmem_cache_t * cp)3073 kmem_cache_reap(kmem_cache_t *cp)
3074 {
3075 ASSERT(taskq_member(kmem_taskq, curthread));
3076 cp->cache_reap++;
3077
3078 /*
3079 * Ask the cache's owner to free some memory if possible.
3080 * The idea is to handle things like the inode cache, which
3081 * typically sits on a bunch of memory that it doesn't truly
3082 * *need*. Reclaim policy is entirely up to the owner; this
3083 * callback is just an advisory plea for help.
3084 */
3085 if (cp->cache_reclaim != NULL) {
3086 long delta;
3087
3088 /*
3089 * Reclaimed memory should be reapable (not included in the
3090 * depot's working set).
3091 */
3092 delta = cp->cache_full.ml_total;
3093 cp->cache_reclaim(cp->cache_private);
3094 delta = cp->cache_full.ml_total - delta;
3095 if (delta > 0) {
3096 mutex_enter(&cp->cache_depot_lock);
3097 cp->cache_full.ml_reaplimit += delta;
3098 cp->cache_full.ml_min += delta;
3099 mutex_exit(&cp->cache_depot_lock);
3100 }
3101 }
3102
3103 kmem_depot_ws_reap(cp);
3104
3105 if (cp->cache_defrag != NULL && !kmem_move_noreap) {
3106 kmem_cache_defrag(cp);
3107 }
3108 }
3109
3110 static void
kmem_reap_timeout(void * flag_arg)3111 kmem_reap_timeout(void *flag_arg)
3112 {
3113 uint32_t *flag = (uint32_t *)flag_arg;
3114
3115 ASSERT(flag == &kmem_reaping || flag == &kmem_reaping_idspace);
3116 *flag = 0;
3117 }
3118
3119 static void
kmem_reap_done(void * flag)3120 kmem_reap_done(void *flag)
3121 {
3122 if (!callout_init_done) {
3123 /* can't schedule a timeout at this point */
3124 kmem_reap_timeout(flag);
3125 } else {
3126 (void) timeout(kmem_reap_timeout, flag, kmem_reap_interval);
3127 }
3128 }
3129
3130 static void
kmem_reap_start(void * flag)3131 kmem_reap_start(void *flag)
3132 {
3133 ASSERT(flag == &kmem_reaping || flag == &kmem_reaping_idspace);
3134
3135 if (flag == &kmem_reaping) {
3136 kmem_cache_applyall(kmem_cache_reap, kmem_taskq, TQ_NOSLEEP);
3137 /*
3138 * if we have segkp under heap, reap segkp cache.
3139 */
3140 if (segkp_fromheap)
3141 segkp_cache_free();
3142 }
3143 else
3144 kmem_cache_applyall_id(kmem_cache_reap, kmem_taskq, TQ_NOSLEEP);
3145
3146 /*
3147 * We use taskq_dispatch() to schedule a timeout to clear
3148 * the flag so that kmem_reap() becomes self-throttling:
3149 * we won't reap again until the current reap completes *and*
3150 * at least kmem_reap_interval ticks have elapsed.
3151 */
3152 if (taskq_dispatch(kmem_taskq, kmem_reap_done, flag, TQ_NOSLEEP) ==
3153 TASKQID_INVALID)
3154 kmem_reap_done(flag);
3155 }
3156
3157 static void
kmem_reap_common(void * flag_arg)3158 kmem_reap_common(void *flag_arg)
3159 {
3160 uint32_t *flag = (uint32_t *)flag_arg;
3161
3162 if (MUTEX_HELD(&kmem_cache_lock) || kmem_taskq == NULL ||
3163 atomic_cas_32(flag, 0, 1) != 0)
3164 return;
3165
3166 /*
3167 * It may not be kosher to do memory allocation when a reap is called
3168 * (for example, if vmem_populate() is in the call chain). So we
3169 * start the reap going with a TQ_NOALLOC dispatch. If the dispatch
3170 * fails, we reset the flag, and the next reap will try again.
3171 */
3172 if (taskq_dispatch(kmem_taskq, kmem_reap_start, flag, TQ_NOALLOC) ==
3173 TASKQID_INVALID)
3174 *flag = 0;
3175 }
3176
3177 /*
3178 * Reclaim all unused memory from all caches. Called from the VM system
3179 * when memory gets tight.
3180 */
3181 void
kmem_reap(void)3182 kmem_reap(void)
3183 {
3184 kmem_reap_common(&kmem_reaping);
3185 }
3186
3187 /*
3188 * Reclaim all unused memory from identifier arenas, called when a vmem
3189 * arena not back by memory is exhausted. Since reaping memory-backed caches
3190 * cannot help with identifier exhaustion, we avoid both a large amount of
3191 * work and unwanted side-effects from reclaim callbacks.
3192 */
3193 void
kmem_reap_idspace(void)3194 kmem_reap_idspace(void)
3195 {
3196 kmem_reap_common(&kmem_reaping_idspace);
3197 }
3198
3199 /*
3200 * Purge all magazines from a cache and set its magazine limit to zero.
3201 * All calls are serialized by the kmem_taskq lock, except for the final
3202 * call from kmem_cache_destroy().
3203 */
3204 static void
kmem_cache_magazine_purge(kmem_cache_t * cp)3205 kmem_cache_magazine_purge(kmem_cache_t *cp)
3206 {
3207 kmem_cpu_cache_t *ccp;
3208 kmem_magazine_t *mp, *pmp;
3209 int rounds, prounds, cpu_seqid;
3210
3211 ASSERT(!list_link_active(&cp->cache_link) ||
3212 taskq_member(kmem_taskq, curthread));
3213 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
3214
3215 for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
3216 ccp = &cp->cache_cpu[cpu_seqid];
3217
3218 mutex_enter(&ccp->cc_lock);
3219 mp = ccp->cc_loaded;
3220 pmp = ccp->cc_ploaded;
3221 rounds = ccp->cc_rounds;
3222 prounds = ccp->cc_prounds;
3223 ccp->cc_loaded = NULL;
3224 ccp->cc_ploaded = NULL;
3225 ccp->cc_rounds = -1;
3226 ccp->cc_prounds = -1;
3227 ccp->cc_magsize = 0;
3228 mutex_exit(&ccp->cc_lock);
3229
3230 if (mp)
3231 kmem_magazine_destroy(cp, mp, rounds);
3232 if (pmp)
3233 kmem_magazine_destroy(cp, pmp, prounds);
3234 }
3235
3236 kmem_depot_ws_zero(cp);
3237 kmem_depot_ws_reap(cp);
3238 }
3239
3240 /*
3241 * Enable per-cpu magazines on a cache.
3242 */
3243 static void
kmem_cache_magazine_enable(kmem_cache_t * cp)3244 kmem_cache_magazine_enable(kmem_cache_t *cp)
3245 {
3246 int cpu_seqid;
3247
3248 if (cp->cache_flags & KMF_NOMAGAZINE)
3249 return;
3250
3251 for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
3252 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
3253 mutex_enter(&ccp->cc_lock);
3254 ccp->cc_magsize = cp->cache_magtype->mt_magsize;
3255 mutex_exit(&ccp->cc_lock);
3256 }
3257
3258 }
3259
3260 /*
3261 * Allow our caller to determine if there are running reaps.
3262 *
3263 * This call is very conservative and may return B_TRUE even when
3264 * reaping activity isn't active. If it returns B_FALSE, then reaping
3265 * activity is definitely inactive.
3266 */
3267 boolean_t
kmem_cache_reap_active(void)3268 kmem_cache_reap_active(void)
3269 {
3270 return (!taskq_empty(kmem_taskq));
3271 }
3272
3273 /*
3274 * Reap (almost) everything soon.
3275 *
3276 * Note: this does not wait for the reap-tasks to complete. Caller
3277 * should use kmem_cache_reap_active() (above) and/or moderation to
3278 * avoid scheduling too many reap-tasks.
3279 */
3280 void
kmem_cache_reap_soon(kmem_cache_t * cp)3281 kmem_cache_reap_soon(kmem_cache_t *cp)
3282 {
3283 ASSERT(list_link_active(&cp->cache_link));
3284
3285 kmem_depot_ws_zero(cp);
3286
3287 (void) taskq_dispatch(kmem_taskq,
3288 (task_func_t *)kmem_depot_ws_reap, cp, TQ_SLEEP);
3289 }
3290
3291 /*
3292 * Recompute a cache's magazine size. The trade-off is that larger magazines
3293 * provide a higher transfer rate with the depot, while smaller magazines
3294 * reduce memory consumption. Magazine resizing is an expensive operation;
3295 * it should not be done frequently.
3296 *
3297 * Changes to the magazine size are serialized by the kmem_taskq lock.
3298 *
3299 * Note: at present this only grows the magazine size. It might be useful
3300 * to allow shrinkage too.
3301 */
3302 static void
kmem_cache_magazine_resize(kmem_cache_t * cp)3303 kmem_cache_magazine_resize(kmem_cache_t *cp)
3304 {
3305 kmem_magtype_t *mtp = cp->cache_magtype;
3306
3307 ASSERT(taskq_member(kmem_taskq, curthread));
3308
3309 if (cp->cache_chunksize < mtp->mt_maxbuf) {
3310 kmem_cache_magazine_purge(cp);
3311 mutex_enter(&cp->cache_depot_lock);
3312 cp->cache_magtype = ++mtp;
3313 cp->cache_depot_contention_prev =
3314 cp->cache_depot_contention + INT_MAX;
3315 mutex_exit(&cp->cache_depot_lock);
3316 kmem_cache_magazine_enable(cp);
3317 }
3318 }
3319
3320 /*
3321 * Rescale a cache's hash table, so that the table size is roughly the
3322 * cache size. We want the average lookup time to be extremely small.
3323 */
3324 static void
kmem_hash_rescale(kmem_cache_t * cp)3325 kmem_hash_rescale(kmem_cache_t *cp)
3326 {
3327 kmem_bufctl_t **old_table, **new_table, *bcp;
3328 size_t old_size, new_size, h;
3329
3330 ASSERT(taskq_member(kmem_taskq, curthread));
3331
3332 new_size = MAX(KMEM_HASH_INITIAL,
3333 1 << (highbit(3 * cp->cache_buftotal + 4) - 2));
3334 old_size = cp->cache_hash_mask + 1;
3335
3336 if ((old_size >> 1) <= new_size && new_size <= (old_size << 1))
3337 return;
3338
3339 new_table = vmem_alloc(kmem_hash_arena, new_size * sizeof (void *),
3340 VM_NOSLEEP);
3341 if (new_table == NULL)
3342 return;
3343 bzero(new_table, new_size * sizeof (void *));
3344
3345 mutex_enter(&cp->cache_lock);
3346
3347 old_size = cp->cache_hash_mask + 1;
3348 old_table = cp->cache_hash_table;
3349
3350 cp->cache_hash_mask = new_size - 1;
3351 cp->cache_hash_table = new_table;
3352 cp->cache_rescale++;
3353
3354 for (h = 0; h < old_size; h++) {
3355 bcp = old_table[h];
3356 while (bcp != NULL) {
3357 void *addr = bcp->bc_addr;
3358 kmem_bufctl_t *next_bcp = bcp->bc_next;
3359 kmem_bufctl_t **hash_bucket = KMEM_HASH(cp, addr);
3360 bcp->bc_next = *hash_bucket;
3361 *hash_bucket = bcp;
3362 bcp = next_bcp;
3363 }
3364 }
3365
3366 mutex_exit(&cp->cache_lock);
3367
3368 vmem_free(kmem_hash_arena, old_table, old_size * sizeof (void *));
3369 }
3370
3371 /*
3372 * Perform periodic maintenance on a cache: hash rescaling, depot working-set
3373 * update, magazine resizing, and slab consolidation.
3374 */
3375 static void
kmem_cache_update(kmem_cache_t * cp)3376 kmem_cache_update(kmem_cache_t *cp)
3377 {
3378 int need_hash_rescale = 0;
3379 int need_magazine_resize = 0;
3380
3381 ASSERT(MUTEX_HELD(&kmem_cache_lock));
3382
3383 /*
3384 * If the cache has become much larger or smaller than its hash table,
3385 * fire off a request to rescale the hash table.
3386 */
3387 mutex_enter(&cp->cache_lock);
3388
3389 if ((cp->cache_flags & KMF_HASH) &&
3390 (cp->cache_buftotal > (cp->cache_hash_mask << 1) ||
3391 (cp->cache_buftotal < (cp->cache_hash_mask >> 1) &&
3392 cp->cache_hash_mask > KMEM_HASH_INITIAL)))
3393 need_hash_rescale = 1;
3394
3395 mutex_exit(&cp->cache_lock);
3396
3397 /*
3398 * Update the depot working set statistics.
3399 */
3400 kmem_depot_ws_update(cp);
3401
3402 /*
3403 * If there's a lot of contention in the depot,
3404 * increase the magazine size.
3405 */
3406 mutex_enter(&cp->cache_depot_lock);
3407
3408 if (cp->cache_chunksize < cp->cache_magtype->mt_maxbuf &&
3409 (int)(cp->cache_depot_contention -
3410 cp->cache_depot_contention_prev) > kmem_depot_contention)
3411 need_magazine_resize = 1;
3412
3413 cp->cache_depot_contention_prev = cp->cache_depot_contention;
3414
3415 mutex_exit(&cp->cache_depot_lock);
3416
3417 if (need_hash_rescale)
3418 (void) taskq_dispatch(kmem_taskq,
3419 (task_func_t *)kmem_hash_rescale, cp, TQ_NOSLEEP);
3420
3421 if (need_magazine_resize)
3422 (void) taskq_dispatch(kmem_taskq,
3423 (task_func_t *)kmem_cache_magazine_resize, cp, TQ_NOSLEEP);
3424
3425 if (cp->cache_defrag != NULL)
3426 (void) taskq_dispatch(kmem_taskq,
3427 (task_func_t *)kmem_cache_scan, cp, TQ_NOSLEEP);
3428 }
3429
3430 static void kmem_update(void *);
3431
3432 static void
kmem_update_timeout(void * dummy)3433 kmem_update_timeout(void *dummy)
3434 {
3435 (void) timeout(kmem_update, dummy, kmem_reap_interval);
3436 }
3437
3438 static void
kmem_update(void * dummy)3439 kmem_update(void *dummy)
3440 {
3441 kmem_cache_applyall(kmem_cache_update, NULL, TQ_NOSLEEP);
3442
3443 /*
3444 * We use taskq_dispatch() to reschedule the timeout so that
3445 * kmem_update() becomes self-throttling: it won't schedule
3446 * new tasks until all previous tasks have completed.
3447 */
3448 if (taskq_dispatch(kmem_taskq, kmem_update_timeout, dummy, TQ_NOSLEEP)
3449 == TASKQID_INVALID)
3450 kmem_update_timeout(NULL);
3451 }
3452
3453 static int
kmem_cache_kstat_update(kstat_t * ksp,int rw)3454 kmem_cache_kstat_update(kstat_t *ksp, int rw)
3455 {
3456 struct kmem_cache_kstat *kmcp = &kmem_cache_kstat;
3457 kmem_cache_t *cp = ksp->ks_private;
3458 uint64_t cpu_buf_avail;
3459 uint64_t buf_avail = 0;
3460 int cpu_seqid;
3461 long reap;
3462
3463 ASSERT(MUTEX_HELD(&kmem_cache_kstat_lock));
3464
3465 if (rw == KSTAT_WRITE)
3466 return (EACCES);
3467
3468 mutex_enter(&cp->cache_lock);
3469
3470 kmcp->kmc_alloc_fail.value.ui64 = cp->cache_alloc_fail;
3471 kmcp->kmc_alloc.value.ui64 = cp->cache_slab_alloc;
3472 kmcp->kmc_free.value.ui64 = cp->cache_slab_free;
3473 kmcp->kmc_slab_alloc.value.ui64 = cp->cache_slab_alloc;
3474 kmcp->kmc_slab_free.value.ui64 = cp->cache_slab_free;
3475
3476 for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
3477 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
3478
3479 mutex_enter(&ccp->cc_lock);
3480
3481 cpu_buf_avail = 0;
3482 if (ccp->cc_rounds > 0)
3483 cpu_buf_avail += ccp->cc_rounds;
3484 if (ccp->cc_prounds > 0)
3485 cpu_buf_avail += ccp->cc_prounds;
3486
3487 kmcp->kmc_alloc.value.ui64 += ccp->cc_alloc;
3488 kmcp->kmc_free.value.ui64 += ccp->cc_free;
3489 buf_avail += cpu_buf_avail;
3490
3491 mutex_exit(&ccp->cc_lock);
3492 }
3493
3494 mutex_enter(&cp->cache_depot_lock);
3495
3496 kmcp->kmc_depot_alloc.value.ui64 = cp->cache_full.ml_alloc;
3497 kmcp->kmc_depot_free.value.ui64 = cp->cache_empty.ml_alloc;
3498 kmcp->kmc_depot_contention.value.ui64 = cp->cache_depot_contention;
3499 kmcp->kmc_full_magazines.value.ui64 = cp->cache_full.ml_total;
3500 kmcp->kmc_empty_magazines.value.ui64 = cp->cache_empty.ml_total;
3501 kmcp->kmc_magazine_size.value.ui64 =
3502 (cp->cache_flags & KMF_NOMAGAZINE) ?
3503 0 : cp->cache_magtype->mt_magsize;
3504
3505 kmcp->kmc_alloc.value.ui64 += cp->cache_full.ml_alloc;
3506 kmcp->kmc_free.value.ui64 += cp->cache_empty.ml_alloc;
3507 buf_avail += cp->cache_full.ml_total * cp->cache_magtype->mt_magsize;
3508
3509 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
3510 reap = MIN(reap, cp->cache_full.ml_total);
3511
3512 mutex_exit(&cp->cache_depot_lock);
3513
3514 kmcp->kmc_buf_size.value.ui64 = cp->cache_bufsize;
3515 kmcp->kmc_align.value.ui64 = cp->cache_align;
3516 kmcp->kmc_chunk_size.value.ui64 = cp->cache_chunksize;
3517 kmcp->kmc_slab_size.value.ui64 = cp->cache_slabsize;
3518 kmcp->kmc_buf_constructed.value.ui64 = buf_avail;
3519 buf_avail += cp->cache_bufslab;
3520 kmcp->kmc_buf_avail.value.ui64 = buf_avail;
3521 kmcp->kmc_buf_inuse.value.ui64 = cp->cache_buftotal - buf_avail;
3522 kmcp->kmc_buf_total.value.ui64 = cp->cache_buftotal;
3523 kmcp->kmc_buf_max.value.ui64 = cp->cache_bufmax;
3524 kmcp->kmc_slab_create.value.ui64 = cp->cache_slab_create;
3525 kmcp->kmc_slab_destroy.value.ui64 = cp->cache_slab_destroy;
3526 kmcp->kmc_hash_size.value.ui64 = (cp->cache_flags & KMF_HASH) ?
3527 cp->cache_hash_mask + 1 : 0;
3528 kmcp->kmc_hash_lookup_depth.value.ui64 = cp->cache_lookup_depth;
3529 kmcp->kmc_hash_rescale.value.ui64 = cp->cache_rescale;
3530 kmcp->kmc_vmem_source.value.ui64 = cp->cache_arena->vm_id;
3531 kmcp->kmc_reap.value.ui64 = cp->cache_reap;
3532
3533 if (cp->cache_defrag == NULL) {
3534 kmcp->kmc_move_callbacks.value.ui64 = 0;
3535 kmcp->kmc_move_yes.value.ui64 = 0;
3536 kmcp->kmc_move_no.value.ui64 = 0;
3537 kmcp->kmc_move_later.value.ui64 = 0;
3538 kmcp->kmc_move_dont_need.value.ui64 = 0;
3539 kmcp->kmc_move_dont_know.value.ui64 = 0;
3540 kmcp->kmc_move_hunt_found.value.ui64 = 0;
3541 kmcp->kmc_move_slabs_freed.value.ui64 = 0;
3542 kmcp->kmc_defrag.value.ui64 = 0;
3543 kmcp->kmc_scan.value.ui64 = 0;
3544 kmcp->kmc_move_reclaimable.value.ui64 = 0;
3545 } else {
3546 int64_t reclaimable;
3547
3548 kmem_defrag_t *kd = cp->cache_defrag;
3549 kmcp->kmc_move_callbacks.value.ui64 = kd->kmd_callbacks;
3550 kmcp->kmc_move_yes.value.ui64 = kd->kmd_yes;
3551 kmcp->kmc_move_no.value.ui64 = kd->kmd_no;
3552 kmcp->kmc_move_later.value.ui64 = kd->kmd_later;
3553 kmcp->kmc_move_dont_need.value.ui64 = kd->kmd_dont_need;
3554 kmcp->kmc_move_dont_know.value.ui64 = kd->kmd_dont_know;
3555 kmcp->kmc_move_hunt_found.value.ui64 = 0;
3556 kmcp->kmc_move_slabs_freed.value.ui64 = kd->kmd_slabs_freed;
3557 kmcp->kmc_defrag.value.ui64 = kd->kmd_defrags;
3558 kmcp->kmc_scan.value.ui64 = kd->kmd_scans;
3559
3560 reclaimable = cp->cache_bufslab - (cp->cache_maxchunks - 1);
3561 reclaimable = MAX(reclaimable, 0);
3562 reclaimable += ((uint64_t)reap * cp->cache_magtype->mt_magsize);
3563 kmcp->kmc_move_reclaimable.value.ui64 = reclaimable;
3564 }
3565
3566 mutex_exit(&cp->cache_lock);
3567 return (0);
3568 }
3569
3570 /*
3571 * Return a named statistic about a particular cache.
3572 * This shouldn't be called very often, so it's currently designed for
3573 * simplicity (leverages existing kstat support) rather than efficiency.
3574 */
3575 uint64_t
kmem_cache_stat(kmem_cache_t * cp,char * name)3576 kmem_cache_stat(kmem_cache_t *cp, char *name)
3577 {
3578 int i;
3579 kstat_t *ksp = cp->cache_kstat;
3580 kstat_named_t *knp = (kstat_named_t *)&kmem_cache_kstat;
3581 uint64_t value = 0;
3582
3583 if (ksp != NULL) {
3584 mutex_enter(&kmem_cache_kstat_lock);
3585 (void) kmem_cache_kstat_update(ksp, KSTAT_READ);
3586 for (i = 0; i < ksp->ks_ndata; i++) {
3587 if (strcmp(knp[i].name, name) == 0) {
3588 value = knp[i].value.ui64;
3589 break;
3590 }
3591 }
3592 mutex_exit(&kmem_cache_kstat_lock);
3593 }
3594 return (value);
3595 }
3596
3597 /*
3598 * Return an estimate of currently available kernel heap memory.
3599 * On 32-bit systems, physical memory may exceed virtual memory,
3600 * we just truncate the result at 1GB.
3601 */
3602 size_t
kmem_avail(void)3603 kmem_avail(void)
3604 {
3605 spgcnt_t rmem = availrmem - tune.t_minarmem;
3606 spgcnt_t fmem = freemem - minfree;
3607
3608 return ((size_t)ptob(MIN(MAX(MIN(rmem, fmem), 0),
3609 1 << (30 - PAGESHIFT))));
3610 }
3611
3612 /*
3613 * Return the maximum amount of memory that is (in theory) allocatable
3614 * from the heap. This may be used as an estimate only since there
3615 * is no guarentee this space will still be available when an allocation
3616 * request is made, nor that the space may be allocated in one big request
3617 * due to kernel heap fragmentation.
3618 */
3619 size_t
kmem_maxavail(void)3620 kmem_maxavail(void)
3621 {
3622 spgcnt_t pmem = availrmem - tune.t_minarmem;
3623 spgcnt_t vmem = btop(vmem_size(heap_arena, VMEM_FREE));
3624
3625 return ((size_t)ptob(MAX(MIN(pmem, vmem), 0)));
3626 }
3627
3628 /*
3629 * Indicate whether memory-intensive kmem debugging is enabled.
3630 */
3631 int
kmem_debugging(void)3632 kmem_debugging(void)
3633 {
3634 return (kmem_flags & (KMF_AUDIT | KMF_REDZONE));
3635 }
3636
3637 /* binning function, sorts finely at the two extremes */
3638 #define KMEM_PARTIAL_SLAB_WEIGHT(sp, binshift) \
3639 ((((sp)->slab_refcnt <= (binshift)) || \
3640 (((sp)->slab_chunks - (sp)->slab_refcnt) <= (binshift))) \
3641 ? -(sp)->slab_refcnt \
3642 : -((binshift) + ((sp)->slab_refcnt >> (binshift))))
3643
3644 /*
3645 * Minimizing the number of partial slabs on the freelist minimizes
3646 * fragmentation (the ratio of unused buffers held by the slab layer). There are
3647 * two ways to get a slab off of the freelist: 1) free all the buffers on the
3648 * slab, and 2) allocate all the buffers on the slab. It follows that we want
3649 * the most-used slabs at the front of the list where they have the best chance
3650 * of being completely allocated, and the least-used slabs at a safe distance
3651 * from the front to improve the odds that the few remaining buffers will all be
3652 * freed before another allocation can tie up the slab. For that reason a slab
3653 * with a higher slab_refcnt sorts less than than a slab with a lower
3654 * slab_refcnt.
3655 *
3656 * However, if a slab has at least one buffer that is deemed unfreeable, we
3657 * would rather have that slab at the front of the list regardless of
3658 * slab_refcnt, since even one unfreeable buffer makes the entire slab
3659 * unfreeable. If the client returns KMEM_CBRC_NO in response to a cache_move()
3660 * callback, the slab is marked unfreeable for as long as it remains on the
3661 * freelist.
3662 */
3663 static int
kmem_partial_slab_cmp(const void * p0,const void * p1)3664 kmem_partial_slab_cmp(const void *p0, const void *p1)
3665 {
3666 const kmem_cache_t *cp;
3667 const kmem_slab_t *s0 = p0;
3668 const kmem_slab_t *s1 = p1;
3669 int w0, w1;
3670 size_t binshift;
3671
3672 ASSERT(KMEM_SLAB_IS_PARTIAL(s0));
3673 ASSERT(KMEM_SLAB_IS_PARTIAL(s1));
3674 ASSERT(s0->slab_cache == s1->slab_cache);
3675 cp = s1->slab_cache;
3676 ASSERT(MUTEX_HELD(&cp->cache_lock));
3677 binshift = cp->cache_partial_binshift;
3678
3679 /* weight of first slab */
3680 w0 = KMEM_PARTIAL_SLAB_WEIGHT(s0, binshift);
3681 if (s0->slab_flags & KMEM_SLAB_NOMOVE) {
3682 w0 -= cp->cache_maxchunks;
3683 }
3684
3685 /* weight of second slab */
3686 w1 = KMEM_PARTIAL_SLAB_WEIGHT(s1, binshift);
3687 if (s1->slab_flags & KMEM_SLAB_NOMOVE) {
3688 w1 -= cp->cache_maxchunks;
3689 }
3690
3691 if (w0 < w1)
3692 return (-1);
3693 if (w0 > w1)
3694 return (1);
3695
3696 /* compare pointer values */
3697 if ((uintptr_t)s0 < (uintptr_t)s1)
3698 return (-1);
3699 if ((uintptr_t)s0 > (uintptr_t)s1)
3700 return (1);
3701
3702 return (0);
3703 }
3704
3705 /*
3706 * It must be valid to call the destructor (if any) on a newly created object.
3707 * That is, the constructor (if any) must leave the object in a valid state for
3708 * the destructor.
3709 */
3710 kmem_cache_t *
kmem_cache_create(char * name,size_t bufsize,size_t align,int (* constructor)(void *,void *,int),void (* destructor)(void *,void *),void (* reclaim)(void *),void * private,vmem_t * vmp,int cflags)3711 kmem_cache_create(
3712 char *name, /* descriptive name for this cache */
3713 size_t bufsize, /* size of the objects it manages */
3714 size_t align, /* required object alignment */
3715 int (*constructor)(void *, void *, int), /* object constructor */
3716 void (*destructor)(void *, void *), /* object destructor */
3717 void (*reclaim)(void *), /* memory reclaim callback */
3718 void *private, /* pass-thru arg for constr/destr/reclaim */
3719 vmem_t *vmp, /* vmem source for slab allocation */
3720 int cflags) /* cache creation flags */
3721 {
3722 int cpu_seqid;
3723 size_t chunksize;
3724 kmem_cache_t *cp;
3725 kmem_magtype_t *mtp;
3726 size_t csize = KMEM_CACHE_SIZE(max_ncpus);
3727
3728 #ifdef DEBUG
3729 /*
3730 * Cache names should conform to the rules for valid C identifiers
3731 */
3732 if (!strident_valid(name)) {
3733 cmn_err(CE_CONT,
3734 "kmem_cache_create: '%s' is an invalid cache name\n"
3735 "cache names must conform to the rules for "
3736 "C identifiers\n", name);
3737 }
3738 #endif /* DEBUG */
3739
3740 if (vmp == NULL)
3741 vmp = kmem_default_arena;
3742
3743 /*
3744 * If this kmem cache has an identifier vmem arena as its source, mark
3745 * it such to allow kmem_reap_idspace().
3746 */
3747 ASSERT(!(cflags & KMC_IDENTIFIER)); /* consumer should not set this */
3748 if (vmp->vm_cflags & VMC_IDENTIFIER)
3749 cflags |= KMC_IDENTIFIER;
3750
3751 /*
3752 * Get a kmem_cache structure. We arrange that cp->cache_cpu[]
3753 * is aligned on a KMEM_CPU_CACHE_SIZE boundary to prevent
3754 * false sharing of per-CPU data.
3755 */
3756 cp = vmem_xalloc(kmem_cache_arena, csize, KMEM_CPU_CACHE_SIZE,
3757 P2NPHASE(csize, KMEM_CPU_CACHE_SIZE), 0, NULL, NULL, VM_SLEEP);
3758 bzero(cp, csize);
3759 list_link_init(&cp->cache_link);
3760
3761 if (align == 0)
3762 align = KMEM_ALIGN;
3763
3764 /*
3765 * If we're not at least KMEM_ALIGN aligned, we can't use free
3766 * memory to hold bufctl information (because we can't safely
3767 * perform word loads and stores on it).
3768 */
3769 if (align < KMEM_ALIGN)
3770 cflags |= KMC_NOTOUCH;
3771
3772 if (!ISP2(align) || align > vmp->vm_quantum)
3773 panic("kmem_cache_create: bad alignment %lu", align);
3774
3775 mutex_enter(&kmem_flags_lock);
3776 if (kmem_flags & KMF_RANDOMIZE)
3777 kmem_flags = (((kmem_flags | ~KMF_RANDOM) + 1) & KMF_RANDOM) |
3778 KMF_RANDOMIZE;
3779 cp->cache_flags = (kmem_flags | cflags) & KMF_DEBUG;
3780 mutex_exit(&kmem_flags_lock);
3781
3782 /*
3783 * Make sure all the various flags are reasonable.
3784 */
3785 ASSERT(!(cflags & KMC_NOHASH) || !(cflags & KMC_NOTOUCH));
3786
3787 if (cp->cache_flags & KMF_LITE) {
3788 if (bufsize >= kmem_lite_minsize &&
3789 align <= kmem_lite_maxalign &&
3790 P2PHASE(bufsize, kmem_lite_maxalign) != 0) {
3791 cp->cache_flags |= KMF_BUFTAG;
3792 cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL);
3793 } else {
3794 cp->cache_flags &= ~KMF_DEBUG;
3795 }
3796 }
3797
3798 if (cp->cache_flags & KMF_DEADBEEF)
3799 cp->cache_flags |= KMF_REDZONE;
3800
3801 if ((cflags & KMC_QCACHE) && (cp->cache_flags & KMF_AUDIT))
3802 cp->cache_flags |= KMF_NOMAGAZINE;
3803
3804 if (cflags & KMC_NODEBUG)
3805 cp->cache_flags &= ~KMF_DEBUG;
3806
3807 if (cflags & KMC_NOTOUCH)
3808 cp->cache_flags &= ~KMF_TOUCH;
3809
3810 if (cflags & KMC_PREFILL)
3811 cp->cache_flags |= KMF_PREFILL;
3812
3813 if (cflags & KMC_NOHASH)
3814 cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL);
3815
3816 if (cflags & KMC_NOMAGAZINE)
3817 cp->cache_flags |= KMF_NOMAGAZINE;
3818
3819 if ((cp->cache_flags & KMF_AUDIT) && !(cflags & KMC_NOTOUCH))
3820 cp->cache_flags |= KMF_REDZONE;
3821
3822 if (!(cp->cache_flags & KMF_AUDIT))
3823 cp->cache_flags &= ~KMF_CONTENTS;
3824
3825 if ((cp->cache_flags & KMF_BUFTAG) && bufsize >= kmem_minfirewall &&
3826 !(cp->cache_flags & KMF_LITE) && !(cflags & KMC_NOHASH))
3827 cp->cache_flags |= KMF_FIREWALL;
3828
3829 if (vmp != kmem_default_arena || kmem_firewall_arena == NULL)
3830 cp->cache_flags &= ~KMF_FIREWALL;
3831
3832 if (cp->cache_flags & KMF_FIREWALL) {
3833 cp->cache_flags &= ~KMF_BUFTAG;
3834 cp->cache_flags |= KMF_NOMAGAZINE;
3835 ASSERT(vmp == kmem_default_arena);
3836 vmp = kmem_firewall_arena;
3837 }
3838
3839 /*
3840 * Set cache properties.
3841 */
3842 (void) strncpy(cp->cache_name, name, KMEM_CACHE_NAMELEN);
3843 strident_canon(cp->cache_name, KMEM_CACHE_NAMELEN + 1);
3844 cp->cache_bufsize = bufsize;
3845 cp->cache_align = align;
3846 cp->cache_constructor = constructor;
3847 cp->cache_destructor = destructor;
3848 cp->cache_reclaim = reclaim;
3849 cp->cache_private = private;
3850 cp->cache_arena = vmp;
3851 cp->cache_cflags = cflags;
3852
3853 /*
3854 * Determine the chunk size.
3855 */
3856 chunksize = bufsize;
3857
3858 if (align >= KMEM_ALIGN) {
3859 chunksize = P2ROUNDUP(chunksize, KMEM_ALIGN);
3860 cp->cache_bufctl = chunksize - KMEM_ALIGN;
3861 }
3862
3863 if (cp->cache_flags & KMF_BUFTAG) {
3864 cp->cache_bufctl = chunksize;
3865 cp->cache_buftag = chunksize;
3866 if (cp->cache_flags & KMF_LITE)
3867 chunksize += KMEM_BUFTAG_LITE_SIZE(kmem_lite_count);
3868 else
3869 chunksize += sizeof (kmem_buftag_t);
3870 }
3871
3872 if (cp->cache_flags & KMF_DEADBEEF) {
3873 cp->cache_verify = MIN(cp->cache_buftag, kmem_maxverify);
3874 if (cp->cache_flags & KMF_LITE)
3875 cp->cache_verify = sizeof (uint64_t);
3876 }
3877
3878 cp->cache_contents = MIN(cp->cache_bufctl, kmem_content_maxsave);
3879
3880 cp->cache_chunksize = chunksize = P2ROUNDUP(chunksize, align);
3881
3882 /*
3883 * Now that we know the chunk size, determine the optimal slab size.
3884 */
3885 if (vmp == kmem_firewall_arena) {
3886 cp->cache_slabsize = P2ROUNDUP(chunksize, vmp->vm_quantum);
3887 cp->cache_mincolor = cp->cache_slabsize - chunksize;
3888 cp->cache_maxcolor = cp->cache_mincolor;
3889 cp->cache_flags |= KMF_HASH;
3890 ASSERT(!(cp->cache_flags & KMF_BUFTAG));
3891 } else if ((cflags & KMC_NOHASH) || (!(cflags & KMC_NOTOUCH) &&
3892 !(cp->cache_flags & KMF_AUDIT) &&
3893 chunksize < vmp->vm_quantum / KMEM_VOID_FRACTION)) {
3894 cp->cache_slabsize = vmp->vm_quantum;
3895 cp->cache_mincolor = 0;
3896 cp->cache_maxcolor =
3897 (cp->cache_slabsize - sizeof (kmem_slab_t)) % chunksize;
3898 ASSERT(chunksize + sizeof (kmem_slab_t) <= cp->cache_slabsize);
3899 ASSERT(!(cp->cache_flags & KMF_AUDIT));
3900 } else {
3901 size_t chunks, bestfit, waste, slabsize;
3902 size_t minwaste = LONG_MAX;
3903
3904 bestfit = 0;
3905 for (chunks = 1; chunks <= KMEM_VOID_FRACTION; chunks++) {
3906 slabsize = P2ROUNDUP(chunksize * chunks,
3907 vmp->vm_quantum);
3908 chunks = slabsize / chunksize;
3909 waste = (slabsize % chunksize) / chunks;
3910 if (waste < minwaste) {
3911 minwaste = waste;
3912 bestfit = slabsize;
3913 }
3914 }
3915 if (cflags & KMC_QCACHE)
3916 bestfit = VMEM_QCACHE_SLABSIZE(vmp->vm_qcache_max);
3917 cp->cache_slabsize = bestfit;
3918 cp->cache_mincolor = 0;
3919 cp->cache_maxcolor = bestfit % chunksize;
3920 cp->cache_flags |= KMF_HASH;
3921 }
3922
3923 cp->cache_maxchunks = (cp->cache_slabsize / cp->cache_chunksize);
3924 cp->cache_partial_binshift = highbit(cp->cache_maxchunks / 16) + 1;
3925
3926 /*
3927 * Disallowing prefill when either the DEBUG or HASH flag is set or when
3928 * there is a constructor avoids some tricky issues with debug setup
3929 * that may be revisited later. We cannot allow prefill in a
3930 * metadata cache because of potential recursion.
3931 */
3932 if (vmp == kmem_msb_arena ||
3933 cp->cache_flags & (KMF_HASH | KMF_BUFTAG) ||
3934 cp->cache_constructor != NULL)
3935 cp->cache_flags &= ~KMF_PREFILL;
3936
3937 if (cp->cache_flags & KMF_HASH) {
3938 ASSERT(!(cflags & KMC_NOHASH));
3939 cp->cache_bufctl_cache = (cp->cache_flags & KMF_AUDIT) ?
3940 kmem_bufctl_audit_cache : kmem_bufctl_cache;
3941 }
3942
3943 if (cp->cache_maxcolor >= vmp->vm_quantum)
3944 cp->cache_maxcolor = vmp->vm_quantum - 1;
3945
3946 cp->cache_color = cp->cache_mincolor;
3947
3948 /*
3949 * Initialize the rest of the slab layer.
3950 */
3951 mutex_init(&cp->cache_lock, NULL, MUTEX_DEFAULT, NULL);
3952
3953 avl_create(&cp->cache_partial_slabs, kmem_partial_slab_cmp,
3954 sizeof (kmem_slab_t), offsetof(kmem_slab_t, slab_link));
3955 /* LINTED: E_TRUE_LOGICAL_EXPR */
3956 ASSERT(sizeof (list_node_t) <= sizeof (avl_node_t));
3957 /* reuse partial slab AVL linkage for complete slab list linkage */
3958 list_create(&cp->cache_complete_slabs,
3959 sizeof (kmem_slab_t), offsetof(kmem_slab_t, slab_link));
3960
3961 if (cp->cache_flags & KMF_HASH) {
3962 cp->cache_hash_table = vmem_alloc(kmem_hash_arena,
3963 KMEM_HASH_INITIAL * sizeof (void *), VM_SLEEP);
3964 bzero(cp->cache_hash_table,
3965 KMEM_HASH_INITIAL * sizeof (void *));
3966 cp->cache_hash_mask = KMEM_HASH_INITIAL - 1;
3967 cp->cache_hash_shift = highbit((ulong_t)chunksize) - 1;
3968 }
3969
3970 /*
3971 * Initialize the depot.
3972 */
3973 mutex_init(&cp->cache_depot_lock, NULL, MUTEX_DEFAULT, NULL);
3974
3975 for (mtp = kmem_magtype; chunksize <= mtp->mt_minbuf; mtp++)
3976 continue;
3977
3978 cp->cache_magtype = mtp;
3979
3980 /*
3981 * Initialize the CPU layer.
3982 */
3983 for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
3984 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
3985 mutex_init(&ccp->cc_lock, NULL, MUTEX_DEFAULT, NULL);
3986 ccp->cc_flags = cp->cache_flags;
3987 ccp->cc_rounds = -1;
3988 ccp->cc_prounds = -1;
3989 }
3990
3991 /*
3992 * Create the cache's kstats.
3993 */
3994 if ((cp->cache_kstat = kstat_create("unix", 0, cp->cache_name,
3995 "kmem_cache", KSTAT_TYPE_NAMED,
3996 sizeof (kmem_cache_kstat) / sizeof (kstat_named_t),
3997 KSTAT_FLAG_VIRTUAL)) != NULL) {
3998 cp->cache_kstat->ks_data = &kmem_cache_kstat;
3999 cp->cache_kstat->ks_update = kmem_cache_kstat_update;
4000 cp->cache_kstat->ks_private = cp;
4001 cp->cache_kstat->ks_lock = &kmem_cache_kstat_lock;
4002 kstat_install(cp->cache_kstat);
4003 }
4004
4005 /*
4006 * Add the cache to the global list. This makes it visible
4007 * to kmem_update(), so the cache must be ready for business.
4008 */
4009 mutex_enter(&kmem_cache_lock);
4010 list_insert_tail(&kmem_caches, cp);
4011 mutex_exit(&kmem_cache_lock);
4012
4013 if (kmem_ready)
4014 kmem_cache_magazine_enable(cp);
4015
4016 return (cp);
4017 }
4018
4019 static int
kmem_move_cmp(const void * buf,const void * p)4020 kmem_move_cmp(const void *buf, const void *p)
4021 {
4022 const kmem_move_t *kmm = p;
4023 uintptr_t v1 = (uintptr_t)buf;
4024 uintptr_t v2 = (uintptr_t)kmm->kmm_from_buf;
4025 return (v1 < v2 ? -1 : (v1 > v2 ? 1 : 0));
4026 }
4027
4028 static void
kmem_reset_reclaim_threshold(kmem_defrag_t * kmd)4029 kmem_reset_reclaim_threshold(kmem_defrag_t *kmd)
4030 {
4031 kmd->kmd_reclaim_numer = 1;
4032 }
4033
4034 /*
4035 * Initially, when choosing candidate slabs for buffers to move, we want to be
4036 * very selective and take only slabs that are less than
4037 * (1 / KMEM_VOID_FRACTION) allocated. If we have difficulty finding candidate
4038 * slabs, then we raise the allocation ceiling incrementally. The reclaim
4039 * threshold is reset to (1 / KMEM_VOID_FRACTION) as soon as the cache is no
4040 * longer fragmented.
4041 */
4042 static void
kmem_adjust_reclaim_threshold(kmem_defrag_t * kmd,int direction)4043 kmem_adjust_reclaim_threshold(kmem_defrag_t *kmd, int direction)
4044 {
4045 if (direction > 0) {
4046 /* make it easier to find a candidate slab */
4047 if (kmd->kmd_reclaim_numer < (KMEM_VOID_FRACTION - 1)) {
4048 kmd->kmd_reclaim_numer++;
4049 }
4050 } else {
4051 /* be more selective */
4052 if (kmd->kmd_reclaim_numer > 1) {
4053 kmd->kmd_reclaim_numer--;
4054 }
4055 }
4056 }
4057
4058 void
kmem_cache_set_move(kmem_cache_t * cp,kmem_cbrc_t (* move)(void *,void *,size_t,void *))4059 kmem_cache_set_move(kmem_cache_t *cp,
4060 kmem_cbrc_t (*move)(void *, void *, size_t, void *))
4061 {
4062 kmem_defrag_t *defrag;
4063
4064 ASSERT(move != NULL);
4065 /*
4066 * The consolidator does not support NOTOUCH caches because kmem cannot
4067 * initialize their slabs with the 0xbaddcafe memory pattern, which sets
4068 * a low order bit usable by clients to distinguish uninitialized memory
4069 * from known objects (see kmem_slab_create).
4070 */
4071 ASSERT(!(cp->cache_cflags & KMC_NOTOUCH));
4072 ASSERT(!(cp->cache_cflags & KMC_IDENTIFIER));
4073
4074 /*
4075 * We should not be holding anyone's cache lock when calling
4076 * kmem_cache_alloc(), so allocate in all cases before acquiring the
4077 * lock.
4078 */
4079 defrag = kmem_cache_alloc(kmem_defrag_cache, KM_SLEEP);
4080
4081 mutex_enter(&cp->cache_lock);
4082
4083 if (KMEM_IS_MOVABLE(cp)) {
4084 if (cp->cache_move == NULL) {
4085 ASSERT(cp->cache_slab_alloc == 0);
4086
4087 cp->cache_defrag = defrag;
4088 defrag = NULL; /* nothing to free */
4089 bzero(cp->cache_defrag, sizeof (kmem_defrag_t));
4090 avl_create(&cp->cache_defrag->kmd_moves_pending,
4091 kmem_move_cmp, sizeof (kmem_move_t),
4092 offsetof(kmem_move_t, kmm_entry));
4093 /* LINTED: E_TRUE_LOGICAL_EXPR */
4094 ASSERT(sizeof (list_node_t) <= sizeof (avl_node_t));
4095 /* reuse the slab's AVL linkage for deadlist linkage */
4096 list_create(&cp->cache_defrag->kmd_deadlist,
4097 sizeof (kmem_slab_t),
4098 offsetof(kmem_slab_t, slab_link));
4099 kmem_reset_reclaim_threshold(cp->cache_defrag);
4100 }
4101 cp->cache_move = move;
4102 }
4103
4104 mutex_exit(&cp->cache_lock);
4105
4106 if (defrag != NULL) {
4107 kmem_cache_free(kmem_defrag_cache, defrag); /* unused */
4108 }
4109 }
4110
4111 void
kmem_cache_destroy(kmem_cache_t * cp)4112 kmem_cache_destroy(kmem_cache_t *cp)
4113 {
4114 int cpu_seqid;
4115
4116 /*
4117 * Remove the cache from the global cache list so that no one else
4118 * can schedule tasks on its behalf, wait for any pending tasks to
4119 * complete, purge the cache, and then destroy it.
4120 */
4121 mutex_enter(&kmem_cache_lock);
4122 list_remove(&kmem_caches, cp);
4123 mutex_exit(&kmem_cache_lock);
4124
4125 if (kmem_taskq != NULL)
4126 taskq_wait(kmem_taskq);
4127
4128 if (kmem_move_taskq != NULL && cp->cache_defrag != NULL)
4129 taskq_wait(kmem_move_taskq);
4130
4131 kmem_cache_magazine_purge(cp);
4132
4133 mutex_enter(&cp->cache_lock);
4134 if (cp->cache_buftotal != 0)
4135 cmn_err(CE_WARN, "kmem_cache_destroy: '%s' (%p) not empty",
4136 cp->cache_name, (void *)cp);
4137 if (cp->cache_defrag != NULL) {
4138 avl_destroy(&cp->cache_defrag->kmd_moves_pending);
4139 list_destroy(&cp->cache_defrag->kmd_deadlist);
4140 kmem_cache_free(kmem_defrag_cache, cp->cache_defrag);
4141 cp->cache_defrag = NULL;
4142 }
4143 /*
4144 * The cache is now dead. There should be no further activity. We
4145 * enforce this by setting land mines in the constructor, destructor,
4146 * reclaim, and move routines that induce a kernel text fault if
4147 * invoked.
4148 */
4149 cp->cache_constructor = (int (*)(void *, void *, int))1;
4150 cp->cache_destructor = (void (*)(void *, void *))2;
4151 cp->cache_reclaim = (void (*)(void *))3;
4152 cp->cache_move = (kmem_cbrc_t (*)(void *, void *, size_t, void *))4;
4153 mutex_exit(&cp->cache_lock);
4154
4155 kstat_delete(cp->cache_kstat);
4156
4157 if (cp->cache_hash_table != NULL)
4158 vmem_free(kmem_hash_arena, cp->cache_hash_table,
4159 (cp->cache_hash_mask + 1) * sizeof (void *));
4160
4161 for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++)
4162 mutex_destroy(&cp->cache_cpu[cpu_seqid].cc_lock);
4163
4164 mutex_destroy(&cp->cache_depot_lock);
4165 mutex_destroy(&cp->cache_lock);
4166
4167 vmem_free(kmem_cache_arena, cp, KMEM_CACHE_SIZE(max_ncpus));
4168 }
4169
4170 /*ARGSUSED*/
4171 static int
kmem_cpu_setup(cpu_setup_t what,int id,void * arg)4172 kmem_cpu_setup(cpu_setup_t what, int id, void *arg)
4173 {
4174 ASSERT(MUTEX_HELD(&cpu_lock));
4175 if (what == CPU_UNCONFIG) {
4176 kmem_cache_applyall(kmem_cache_magazine_purge,
4177 kmem_taskq, TQ_SLEEP);
4178 kmem_cache_applyall(kmem_cache_magazine_enable,
4179 kmem_taskq, TQ_SLEEP);
4180 }
4181 return (0);
4182 }
4183
4184 static void
kmem_alloc_caches_create(const int * array,size_t count,kmem_cache_t ** alloc_table,size_t maxbuf,uint_t shift)4185 kmem_alloc_caches_create(const int *array, size_t count,
4186 kmem_cache_t **alloc_table, size_t maxbuf, uint_t shift)
4187 {
4188 char name[KMEM_CACHE_NAMELEN + 1];
4189 size_t table_unit = (1 << shift); /* range of one alloc_table entry */
4190 size_t size = table_unit;
4191 int i;
4192
4193 for (i = 0; i < count; i++) {
4194 size_t cache_size = array[i];
4195 size_t align = KMEM_ALIGN;
4196 kmem_cache_t *cp;
4197
4198 /* if the table has an entry for maxbuf, we're done */
4199 if (size > maxbuf)
4200 break;
4201
4202 /* cache size must be a multiple of the table unit */
4203 ASSERT(P2PHASE(cache_size, table_unit) == 0);
4204
4205 /*
4206 * If they allocate a multiple of the coherency granularity,
4207 * they get a coherency-granularity-aligned address.
4208 */
4209 if (IS_P2ALIGNED(cache_size, 64))
4210 align = 64;
4211 if (IS_P2ALIGNED(cache_size, PAGESIZE))
4212 align = PAGESIZE;
4213 (void) snprintf(name, sizeof (name),
4214 "kmem_alloc_%lu", cache_size);
4215 cp = kmem_cache_create(name, cache_size, align,
4216 NULL, NULL, NULL, NULL, NULL, KMC_KMEM_ALLOC);
4217
4218 while (size <= cache_size) {
4219 alloc_table[(size - 1) >> shift] = cp;
4220 size += table_unit;
4221 }
4222 }
4223
4224 ASSERT(size > maxbuf); /* i.e. maxbuf <= max(cache_size) */
4225 }
4226
4227 static void
kmem_cache_init(int pass,int use_large_pages)4228 kmem_cache_init(int pass, int use_large_pages)
4229 {
4230 int i;
4231 size_t maxbuf;
4232 kmem_magtype_t *mtp;
4233
4234 for (i = 0; i < sizeof (kmem_magtype) / sizeof (*mtp); i++) {
4235 char name[KMEM_CACHE_NAMELEN + 1];
4236
4237 mtp = &kmem_magtype[i];
4238 (void) sprintf(name, "kmem_magazine_%d", mtp->mt_magsize);
4239 mtp->mt_cache = kmem_cache_create(name,
4240 (mtp->mt_magsize + 1) * sizeof (void *),
4241 mtp->mt_align, NULL, NULL, NULL, NULL,
4242 kmem_msb_arena, KMC_NOHASH);
4243 }
4244
4245 kmem_slab_cache = kmem_cache_create("kmem_slab_cache",
4246 sizeof (kmem_slab_t), 0, NULL, NULL, NULL, NULL,
4247 kmem_msb_arena, KMC_NOHASH);
4248
4249 kmem_bufctl_cache = kmem_cache_create("kmem_bufctl_cache",
4250 sizeof (kmem_bufctl_t), 0, NULL, NULL, NULL, NULL,
4251 kmem_msb_arena, KMC_NOHASH);
4252
4253 kmem_bufctl_audit_cache = kmem_cache_create("kmem_bufctl_audit_cache",
4254 sizeof (kmem_bufctl_audit_t), 0, NULL, NULL, NULL, NULL,
4255 kmem_msb_arena, KMC_NOHASH);
4256
4257 if (pass == 2) {
4258 kmem_va_arena = vmem_create("kmem_va",
4259 NULL, 0, PAGESIZE,
4260 vmem_alloc, vmem_free, heap_arena,
4261 8 * PAGESIZE, VM_SLEEP);
4262
4263 if (use_large_pages) {
4264 kmem_default_arena = vmem_xcreate("kmem_default",
4265 NULL, 0, PAGESIZE,
4266 segkmem_alloc_lp, segkmem_free_lp, kmem_va_arena,
4267 0, VMC_DUMPSAFE | VM_SLEEP);
4268 } else {
4269 kmem_default_arena = vmem_create("kmem_default",
4270 NULL, 0, PAGESIZE,
4271 segkmem_alloc, segkmem_free, kmem_va_arena,
4272 0, VMC_DUMPSAFE | VM_SLEEP);
4273 }
4274
4275 /* Figure out what our maximum cache size is */
4276 maxbuf = kmem_max_cached;
4277 if (maxbuf <= KMEM_MAXBUF) {
4278 maxbuf = 0;
4279 kmem_max_cached = KMEM_MAXBUF;
4280 } else {
4281 size_t size = 0;
4282 size_t max =
4283 sizeof (kmem_big_alloc_sizes) / sizeof (int);
4284 /*
4285 * Round maxbuf up to an existing cache size. If maxbuf
4286 * is larger than the largest cache, we truncate it to
4287 * the largest cache's size.
4288 */
4289 for (i = 0; i < max; i++) {
4290 size = kmem_big_alloc_sizes[i];
4291 if (maxbuf <= size)
4292 break;
4293 }
4294 kmem_max_cached = maxbuf = size;
4295 }
4296
4297 /*
4298 * The big alloc table may not be completely overwritten, so
4299 * we clear out any stale cache pointers from the first pass.
4300 */
4301 bzero(kmem_big_alloc_table, sizeof (kmem_big_alloc_table));
4302 } else {
4303 /*
4304 * During the first pass, the kmem_alloc_* caches
4305 * are treated as metadata.
4306 */
4307 kmem_default_arena = kmem_msb_arena;
4308 maxbuf = KMEM_BIG_MAXBUF_32BIT;
4309 }
4310
4311 /*
4312 * Set up the default caches to back kmem_alloc()
4313 */
4314 kmem_alloc_caches_create(
4315 kmem_alloc_sizes, sizeof (kmem_alloc_sizes) / sizeof (int),
4316 kmem_alloc_table, KMEM_MAXBUF, KMEM_ALIGN_SHIFT);
4317
4318 kmem_alloc_caches_create(
4319 kmem_big_alloc_sizes, sizeof (kmem_big_alloc_sizes) / sizeof (int),
4320 kmem_big_alloc_table, maxbuf, KMEM_BIG_SHIFT);
4321
4322 kmem_big_alloc_table_max = maxbuf >> KMEM_BIG_SHIFT;
4323 }
4324
4325 void
kmem_init(void)4326 kmem_init(void)
4327 {
4328 kmem_cache_t *cp;
4329 int old_kmem_flags = kmem_flags;
4330 int use_large_pages = 0;
4331 size_t maxverify, minfirewall;
4332
4333 kstat_init();
4334
4335 /*
4336 * Don't do firewalled allocations if the heap is less than 1TB
4337 * (i.e. on a 32-bit kernel)
4338 * The resulting VM_NEXTFIT allocations would create too much
4339 * fragmentation in a small heap.
4340 */
4341 #if defined(_LP64)
4342 maxverify = minfirewall = PAGESIZE / 2;
4343 #else
4344 maxverify = minfirewall = ULONG_MAX;
4345 #endif
4346
4347 /* LINTED */
4348 ASSERT(sizeof (kmem_cpu_cache_t) == KMEM_CPU_CACHE_SIZE);
4349
4350 list_create(&kmem_caches, sizeof (kmem_cache_t),
4351 offsetof(kmem_cache_t, cache_link));
4352
4353 kmem_metadata_arena = vmem_create("kmem_metadata", NULL, 0, PAGESIZE,
4354 vmem_alloc, vmem_free, heap_arena, 8 * PAGESIZE,
4355 VM_SLEEP | VMC_NO_QCACHE);
4356
4357 kmem_msb_arena = vmem_create("kmem_msb", NULL, 0,
4358 PAGESIZE, segkmem_alloc, segkmem_free, kmem_metadata_arena, 0,
4359 VMC_DUMPSAFE | VM_SLEEP);
4360
4361 kmem_cache_arena = vmem_create("kmem_cache", NULL, 0, KMEM_ALIGN,
4362 segkmem_alloc, segkmem_free, kmem_metadata_arena, 0, VM_SLEEP);
4363
4364 kmem_hash_arena = vmem_create("kmem_hash", NULL, 0, KMEM_ALIGN,
4365 segkmem_alloc, segkmem_free, kmem_metadata_arena, 0, VM_SLEEP);
4366
4367 kmem_log_arena = vmem_create("kmem_log", NULL, 0, KMEM_ALIGN,
4368 segkmem_alloc, segkmem_free, heap_arena, 0, VM_SLEEP);
4369
4370 kmem_firewall_va_arena = vmem_create("kmem_firewall_va",
4371 NULL, 0, PAGESIZE,
4372 kmem_firewall_va_alloc, kmem_firewall_va_free, heap_arena,
4373 0, VM_SLEEP);
4374
4375 kmem_firewall_arena = vmem_create("kmem_firewall", NULL, 0, PAGESIZE,
4376 segkmem_alloc, segkmem_free, kmem_firewall_va_arena, 0,
4377 VMC_DUMPSAFE | VM_SLEEP);
4378
4379 /* temporary oversize arena for mod_read_system_file */
4380 kmem_oversize_arena = vmem_create("kmem_oversize", NULL, 0, PAGESIZE,
4381 segkmem_alloc, segkmem_free, heap_arena, 0, VM_SLEEP);
4382
4383 kmem_reap_interval = 15 * hz;
4384
4385 /*
4386 * Read /etc/system. This is a chicken-and-egg problem because
4387 * kmem_flags may be set in /etc/system, but mod_read_system_file()
4388 * needs to use the allocator. The simplest solution is to create
4389 * all the standard kmem caches, read /etc/system, destroy all the
4390 * caches we just created, and then create them all again in light
4391 * of the (possibly) new kmem_flags and other kmem tunables.
4392 */
4393 kmem_cache_init(1, 0);
4394
4395 mod_read_system_file(boothowto & RB_ASKNAME);
4396
4397 while ((cp = list_tail(&kmem_caches)) != NULL)
4398 kmem_cache_destroy(cp);
4399
4400 vmem_destroy(kmem_oversize_arena);
4401
4402 if (old_kmem_flags & KMF_STICKY)
4403 kmem_flags = old_kmem_flags;
4404
4405 if (!(kmem_flags & KMF_AUDIT))
4406 vmem_seg_size = offsetof(vmem_seg_t, vs_thread);
4407
4408 if (kmem_maxverify == 0)
4409 kmem_maxverify = maxverify;
4410
4411 if (kmem_minfirewall == 0)
4412 kmem_minfirewall = minfirewall;
4413
4414 /*
4415 * give segkmem a chance to figure out if we are using large pages
4416 * for the kernel heap
4417 */
4418 use_large_pages = segkmem_lpsetup();
4419
4420 /*
4421 * To protect against corruption, we keep the actual number of callers
4422 * KMF_LITE records seperate from the tunable. We arbitrarily clamp
4423 * to 16, since the overhead for small buffers quickly gets out of
4424 * hand.
4425 *
4426 * The real limit would depend on the needs of the largest KMC_NOHASH
4427 * cache.
4428 */
4429 kmem_lite_count = MIN(MAX(0, kmem_lite_pcs), 16);
4430 kmem_lite_pcs = kmem_lite_count;
4431
4432 /*
4433 * Normally, we firewall oversized allocations when possible, but
4434 * if we are using large pages for kernel memory, and we don't have
4435 * any non-LITE debugging flags set, we want to allocate oversized
4436 * buffers from large pages, and so skip the firewalling.
4437 */
4438 if (use_large_pages &&
4439 ((kmem_flags & KMF_LITE) || !(kmem_flags & KMF_DEBUG))) {
4440 kmem_oversize_arena = vmem_xcreate("kmem_oversize", NULL, 0,
4441 PAGESIZE, segkmem_alloc_lp, segkmem_free_lp, heap_arena,
4442 0, VMC_DUMPSAFE | VM_SLEEP);
4443 } else {
4444 kmem_oversize_arena = vmem_create("kmem_oversize",
4445 NULL, 0, PAGESIZE,
4446 segkmem_alloc, segkmem_free, kmem_minfirewall < ULONG_MAX?
4447 kmem_firewall_va_arena : heap_arena, 0, VMC_DUMPSAFE |
4448 VM_SLEEP);
4449 }
4450
4451 kmem_cache_init(2, use_large_pages);
4452
4453 if (kmem_flags & (KMF_AUDIT | KMF_RANDOMIZE)) {
4454 if (kmem_transaction_log_size == 0)
4455 kmem_transaction_log_size = kmem_maxavail() / 50;
4456 kmem_transaction_log = kmem_log_init(kmem_transaction_log_size);
4457 }
4458
4459 if (kmem_flags & (KMF_CONTENTS | KMF_RANDOMIZE)) {
4460 if (kmem_content_log_size == 0)
4461 kmem_content_log_size = kmem_maxavail() / 50;
4462 kmem_content_log = kmem_log_init(kmem_content_log_size);
4463 }
4464
4465 kmem_failure_log = kmem_log_init(kmem_failure_log_size);
4466 kmem_slab_log = kmem_log_init(kmem_slab_log_size);
4467 kmem_zerosized_log = kmem_log_init(kmem_zerosized_log_size);
4468
4469 /*
4470 * Initialize STREAMS message caches so allocb() is available.
4471 * This allows us to initialize the logging framework (cmn_err(9F),
4472 * strlog(9F), etc) so we can start recording messages.
4473 */
4474 streams_msg_init();
4475
4476 /*
4477 * Initialize the ZSD framework in Zones so modules loaded henceforth
4478 * can register their callbacks.
4479 */
4480 zone_zsd_init();
4481
4482 log_init();
4483 taskq_init();
4484
4485 /*
4486 * Warn about invalid or dangerous values of kmem_flags.
4487 * Always warn about unsupported values.
4488 */
4489 if (((kmem_flags & ~(KMF_AUDIT | KMF_DEADBEEF | KMF_REDZONE |
4490 KMF_CONTENTS | KMF_LITE)) != 0) ||
4491 ((kmem_flags & KMF_LITE) && kmem_flags != KMF_LITE))
4492 cmn_err(CE_WARN, "kmem_flags set to unsupported value 0x%x.",
4493 kmem_flags);
4494
4495 #ifdef DEBUG
4496 if ((kmem_flags & KMF_DEBUG) == 0)
4497 cmn_err(CE_NOTE, "kmem debugging disabled.");
4498 #else
4499 /*
4500 * For non-debug kernels, the only "normal" flags are 0, KMF_LITE,
4501 * KMF_REDZONE, and KMF_CONTENTS (the last because it is only enabled
4502 * if KMF_AUDIT is set). We should warn the user about the performance
4503 * penalty of KMF_AUDIT or KMF_DEADBEEF if they are set and KMF_LITE
4504 * isn't set (since that disables AUDIT).
4505 */
4506 if (!(kmem_flags & KMF_LITE) &&
4507 (kmem_flags & (KMF_AUDIT | KMF_DEADBEEF)) != 0)
4508 cmn_err(CE_WARN, "High-overhead kmem debugging features "
4509 "enabled (kmem_flags = 0x%x). Performance degradation "
4510 "and large memory overhead possible.", kmem_flags);
4511 #endif /* not DEBUG */
4512
4513 kmem_cache_applyall(kmem_cache_magazine_enable, NULL, TQ_SLEEP);
4514
4515 kmem_ready = 1;
4516
4517 /*
4518 * Initialize the platform-specific aligned/DMA memory allocator.
4519 */
4520 ka_init();
4521
4522 /*
4523 * Initialize 32-bit ID cache.
4524 */
4525 id32_init();
4526
4527 /*
4528 * Initialize the networking stack so modules loaded can
4529 * register their callbacks.
4530 */
4531 netstack_init();
4532 }
4533
4534 static void
kmem_move_init(void)4535 kmem_move_init(void)
4536 {
4537 kmem_defrag_cache = kmem_cache_create("kmem_defrag_cache",
4538 sizeof (kmem_defrag_t), 0, NULL, NULL, NULL, NULL,
4539 kmem_msb_arena, KMC_NOHASH);
4540 kmem_move_cache = kmem_cache_create("kmem_move_cache",
4541 sizeof (kmem_move_t), 0, NULL, NULL, NULL, NULL,
4542 kmem_msb_arena, KMC_NOHASH);
4543
4544 /*
4545 * kmem guarantees that move callbacks are sequential and that even
4546 * across multiple caches no two moves ever execute simultaneously.
4547 * Move callbacks are processed on a separate taskq so that client code
4548 * does not interfere with internal maintenance tasks.
4549 */
4550 kmem_move_taskq = taskq_create_instance("kmem_move_taskq", 0, 1,
4551 minclsyspri, 100, INT_MAX, TASKQ_PREPOPULATE);
4552 }
4553
4554 void
kmem_thread_init(void)4555 kmem_thread_init(void)
4556 {
4557 kmem_move_init();
4558
4559 /*
4560 * This taskq is used for various kmem maintenance functions, including
4561 * kmem_reap(). When maintenance is required on every cache,
4562 * kmem_cache_applyall() dispatches one task per cache onto this queue.
4563 *
4564 * In the case of kmem_reap(), the system may be under increasingly
4565 * dire memory pressure and may not be able to allocate a new task
4566 * entry. The count of entries to prepopulate (below) should cover at
4567 * least as many caches as we generally expect to exist on the system
4568 * so that they may all be scheduled for reaping under those
4569 * conditions.
4570 */
4571 kmem_taskq = taskq_create_instance("kmem_taskq", 0, 1, minclsyspri,
4572 600, INT_MAX, TASKQ_PREPOPULATE);
4573 }
4574
4575 void
kmem_mp_init(void)4576 kmem_mp_init(void)
4577 {
4578 mutex_enter(&cpu_lock);
4579 register_cpu_setup_func(kmem_cpu_setup, NULL);
4580 mutex_exit(&cpu_lock);
4581
4582 kmem_update_timeout(NULL);
4583
4584 taskq_mp_init();
4585 }
4586
4587 /*
4588 * Return the slab of the allocated buffer, or NULL if the buffer is not
4589 * allocated. This function may be called with a known slab address to determine
4590 * whether or not the buffer is allocated, or with a NULL slab address to obtain
4591 * an allocated buffer's slab.
4592 */
4593 static kmem_slab_t *
kmem_slab_allocated(kmem_cache_t * cp,kmem_slab_t * sp,void * buf)4594 kmem_slab_allocated(kmem_cache_t *cp, kmem_slab_t *sp, void *buf)
4595 {
4596 kmem_bufctl_t *bcp, *bufbcp;
4597
4598 ASSERT(MUTEX_HELD(&cp->cache_lock));
4599 ASSERT(sp == NULL || KMEM_SLAB_MEMBER(sp, buf));
4600
4601 if (cp->cache_flags & KMF_HASH) {
4602 for (bcp = *KMEM_HASH(cp, buf);
4603 (bcp != NULL) && (bcp->bc_addr != buf);
4604 bcp = bcp->bc_next) {
4605 continue;
4606 }
4607 ASSERT(sp != NULL && bcp != NULL ? sp == bcp->bc_slab : 1);
4608 return (bcp == NULL ? NULL : bcp->bc_slab);
4609 }
4610
4611 if (sp == NULL) {
4612 sp = KMEM_SLAB(cp, buf);
4613 }
4614 bufbcp = KMEM_BUFCTL(cp, buf);
4615 for (bcp = sp->slab_head;
4616 (bcp != NULL) && (bcp != bufbcp);
4617 bcp = bcp->bc_next) {
4618 continue;
4619 }
4620 return (bcp == NULL ? sp : NULL);
4621 }
4622
4623 static boolean_t
kmem_slab_is_reclaimable(kmem_cache_t * cp,kmem_slab_t * sp,int flags)4624 kmem_slab_is_reclaimable(kmem_cache_t *cp, kmem_slab_t *sp, int flags)
4625 {
4626 long refcnt = sp->slab_refcnt;
4627
4628 ASSERT(cp->cache_defrag != NULL);
4629
4630 /*
4631 * For code coverage we want to be able to move an object within the
4632 * same slab (the only partial slab) even if allocating the destination
4633 * buffer resulted in a completely allocated slab.
4634 */
4635 if (flags & KMM_DEBUG) {
4636 return ((flags & KMM_DESPERATE) ||
4637 ((sp->slab_flags & KMEM_SLAB_NOMOVE) == 0));
4638 }
4639
4640 /* If we're desperate, we don't care if the client said NO. */
4641 if (flags & KMM_DESPERATE) {
4642 return (refcnt < sp->slab_chunks); /* any partial */
4643 }
4644
4645 if (sp->slab_flags & KMEM_SLAB_NOMOVE) {
4646 return (B_FALSE);
4647 }
4648
4649 if ((refcnt == 1) || kmem_move_any_partial) {
4650 return (refcnt < sp->slab_chunks);
4651 }
4652
4653 /*
4654 * The reclaim threshold is adjusted at each kmem_cache_scan() so that
4655 * slabs with a progressively higher percentage of used buffers can be
4656 * reclaimed until the cache as a whole is no longer fragmented.
4657 *
4658 * sp->slab_refcnt kmd_reclaim_numer
4659 * --------------- < ------------------
4660 * sp->slab_chunks KMEM_VOID_FRACTION
4661 */
4662 return ((refcnt * KMEM_VOID_FRACTION) <
4663 (sp->slab_chunks * cp->cache_defrag->kmd_reclaim_numer));
4664 }
4665
4666 /*
4667 * May be called from the kmem_move_taskq, from kmem_cache_move_notify_task(),
4668 * or when the buffer is freed.
4669 */
4670 static void
kmem_slab_move_yes(kmem_cache_t * cp,kmem_slab_t * sp,void * from_buf)4671 kmem_slab_move_yes(kmem_cache_t *cp, kmem_slab_t *sp, void *from_buf)
4672 {
4673 ASSERT(MUTEX_HELD(&cp->cache_lock));
4674 ASSERT(KMEM_SLAB_MEMBER(sp, from_buf));
4675
4676 if (!KMEM_SLAB_IS_PARTIAL(sp)) {
4677 return;
4678 }
4679
4680 if (sp->slab_flags & KMEM_SLAB_NOMOVE) {
4681 if (KMEM_SLAB_OFFSET(sp, from_buf) == sp->slab_stuck_offset) {
4682 avl_remove(&cp->cache_partial_slabs, sp);
4683 sp->slab_flags &= ~KMEM_SLAB_NOMOVE;
4684 sp->slab_stuck_offset = (uint32_t)-1;
4685 avl_add(&cp->cache_partial_slabs, sp);
4686 }
4687 } else {
4688 sp->slab_later_count = 0;
4689 sp->slab_stuck_offset = (uint32_t)-1;
4690 }
4691 }
4692
4693 static void
kmem_slab_move_no(kmem_cache_t * cp,kmem_slab_t * sp,void * from_buf)4694 kmem_slab_move_no(kmem_cache_t *cp, kmem_slab_t *sp, void *from_buf)
4695 {
4696 ASSERT(taskq_member(kmem_move_taskq, curthread));
4697 ASSERT(MUTEX_HELD(&cp->cache_lock));
4698 ASSERT(KMEM_SLAB_MEMBER(sp, from_buf));
4699
4700 if (!KMEM_SLAB_IS_PARTIAL(sp)) {
4701 return;
4702 }
4703
4704 avl_remove(&cp->cache_partial_slabs, sp);
4705 sp->slab_later_count = 0;
4706 sp->slab_flags |= KMEM_SLAB_NOMOVE;
4707 sp->slab_stuck_offset = KMEM_SLAB_OFFSET(sp, from_buf);
4708 avl_add(&cp->cache_partial_slabs, sp);
4709 }
4710
4711 static void kmem_move_end(kmem_cache_t *, kmem_move_t *);
4712
4713 /*
4714 * The move callback takes two buffer addresses, the buffer to be moved, and a
4715 * newly allocated and constructed buffer selected by kmem as the destination.
4716 * It also takes the size of the buffer and an optional user argument specified
4717 * at cache creation time. kmem guarantees that the buffer to be moved has not
4718 * been unmapped by the virtual memory subsystem. Beyond that, it cannot
4719 * guarantee the present whereabouts of the buffer to be moved, so it is up to
4720 * the client to safely determine whether or not it is still using the buffer.
4721 * The client must not free either of the buffers passed to the move callback,
4722 * since kmem wants to free them directly to the slab layer. The client response
4723 * tells kmem which of the two buffers to free:
4724 *
4725 * YES kmem frees the old buffer (the move was successful)
4726 * NO kmem frees the new buffer, marks the slab of the old buffer
4727 * non-reclaimable to avoid bothering the client again
4728 * LATER kmem frees the new buffer, increments slab_later_count
4729 * DONT_KNOW kmem frees the new buffer
4730 * DONT_NEED kmem frees both the old buffer and the new buffer
4731 *
4732 * The pending callback argument now being processed contains both of the
4733 * buffers (old and new) passed to the move callback function, the slab of the
4734 * old buffer, and flags related to the move request, such as whether or not the
4735 * system was desperate for memory.
4736 *
4737 * Slabs are not freed while there is a pending callback, but instead are kept
4738 * on a deadlist, which is drained after the last callback completes. This means
4739 * that slabs are safe to access until kmem_move_end(), no matter how many of
4740 * their buffers have been freed. Once slab_refcnt reaches zero, it stays at
4741 * zero for as long as the slab remains on the deadlist and until the slab is
4742 * freed.
4743 */
4744 static void
kmem_move_buffer(kmem_move_t * callback)4745 kmem_move_buffer(kmem_move_t *callback)
4746 {
4747 kmem_cbrc_t response;
4748 kmem_slab_t *sp = callback->kmm_from_slab;
4749 kmem_cache_t *cp = sp->slab_cache;
4750 boolean_t free_on_slab;
4751
4752 ASSERT(taskq_member(kmem_move_taskq, curthread));
4753 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4754 ASSERT(KMEM_SLAB_MEMBER(sp, callback->kmm_from_buf));
4755
4756 /*
4757 * The number of allocated buffers on the slab may have changed since we
4758 * last checked the slab's reclaimability (when the pending move was
4759 * enqueued), or the client may have responded NO when asked to move
4760 * another buffer on the same slab.
4761 */
4762 if (!kmem_slab_is_reclaimable(cp, sp, callback->kmm_flags)) {
4763 kmem_slab_free(cp, callback->kmm_to_buf);
4764 kmem_move_end(cp, callback);
4765 return;
4766 }
4767
4768 /*
4769 * Checking the slab layer is easy, so we might as well do that here
4770 * in case we can avoid bothering the client.
4771 */
4772 mutex_enter(&cp->cache_lock);
4773 free_on_slab = (kmem_slab_allocated(cp, sp,
4774 callback->kmm_from_buf) == NULL);
4775 mutex_exit(&cp->cache_lock);
4776
4777 if (free_on_slab) {
4778 kmem_slab_free(cp, callback->kmm_to_buf);
4779 kmem_move_end(cp, callback);
4780 return;
4781 }
4782
4783 if (cp->cache_flags & KMF_BUFTAG) {
4784 /*
4785 * Make kmem_cache_alloc_debug() apply the constructor for us.
4786 */
4787 if (kmem_cache_alloc_debug(cp, callback->kmm_to_buf,
4788 KM_NOSLEEP, 1, caller()) != 0) {
4789 kmem_move_end(cp, callback);
4790 return;
4791 }
4792 } else if (cp->cache_constructor != NULL &&
4793 cp->cache_constructor(callback->kmm_to_buf, cp->cache_private,
4794 KM_NOSLEEP) != 0) {
4795 atomic_inc_64(&cp->cache_alloc_fail);
4796 kmem_slab_free(cp, callback->kmm_to_buf);
4797 kmem_move_end(cp, callback);
4798 return;
4799 }
4800
4801 cp->cache_defrag->kmd_callbacks++;
4802 cp->cache_defrag->kmd_thread = curthread;
4803 cp->cache_defrag->kmd_from_buf = callback->kmm_from_buf;
4804 cp->cache_defrag->kmd_to_buf = callback->kmm_to_buf;
4805 DTRACE_PROBE2(kmem__move__start, kmem_cache_t *, cp, kmem_move_t *,
4806 callback);
4807
4808 response = cp->cache_move(callback->kmm_from_buf,
4809 callback->kmm_to_buf, cp->cache_bufsize, cp->cache_private);
4810
4811 DTRACE_PROBE3(kmem__move__end, kmem_cache_t *, cp, kmem_move_t *,
4812 callback, kmem_cbrc_t, response);
4813 cp->cache_defrag->kmd_thread = NULL;
4814 cp->cache_defrag->kmd_from_buf = NULL;
4815 cp->cache_defrag->kmd_to_buf = NULL;
4816
4817 if (response == KMEM_CBRC_YES) {
4818 cp->cache_defrag->kmd_yes++;
4819 kmem_slab_free_constructed(cp, callback->kmm_from_buf, B_FALSE);
4820 /* slab safe to access until kmem_move_end() */
4821 if (sp->slab_refcnt == 0)
4822 cp->cache_defrag->kmd_slabs_freed++;
4823 mutex_enter(&cp->cache_lock);
4824 kmem_slab_move_yes(cp, sp, callback->kmm_from_buf);
4825 mutex_exit(&cp->cache_lock);
4826 kmem_move_end(cp, callback);
4827 return;
4828 }
4829
4830 switch (response) {
4831 case KMEM_CBRC_NO:
4832 cp->cache_defrag->kmd_no++;
4833 mutex_enter(&cp->cache_lock);
4834 kmem_slab_move_no(cp, sp, callback->kmm_from_buf);
4835 mutex_exit(&cp->cache_lock);
4836 break;
4837 case KMEM_CBRC_LATER:
4838 cp->cache_defrag->kmd_later++;
4839 mutex_enter(&cp->cache_lock);
4840 if (!KMEM_SLAB_IS_PARTIAL(sp)) {
4841 mutex_exit(&cp->cache_lock);
4842 break;
4843 }
4844
4845 if (++sp->slab_later_count >= KMEM_DISBELIEF) {
4846 kmem_slab_move_no(cp, sp, callback->kmm_from_buf);
4847 } else if (!(sp->slab_flags & KMEM_SLAB_NOMOVE)) {
4848 sp->slab_stuck_offset = KMEM_SLAB_OFFSET(sp,
4849 callback->kmm_from_buf);
4850 }
4851 mutex_exit(&cp->cache_lock);
4852 break;
4853 case KMEM_CBRC_DONT_NEED:
4854 cp->cache_defrag->kmd_dont_need++;
4855 kmem_slab_free_constructed(cp, callback->kmm_from_buf, B_FALSE);
4856 if (sp->slab_refcnt == 0)
4857 cp->cache_defrag->kmd_slabs_freed++;
4858 mutex_enter(&cp->cache_lock);
4859 kmem_slab_move_yes(cp, sp, callback->kmm_from_buf);
4860 mutex_exit(&cp->cache_lock);
4861 break;
4862 case KMEM_CBRC_DONT_KNOW:
4863 /*
4864 * If we don't know if we can move this buffer or not, we'll
4865 * just assume that we can't: if the buffer is in fact free,
4866 * then it is sitting in one of the per-CPU magazines or in
4867 * a full magazine in the depot layer. Either way, because
4868 * defrag is induced in the same logic that reaps a cache,
4869 * it's likely that full magazines will be returned to the
4870 * system soon (thereby accomplishing what we're trying to
4871 * accomplish here: return those magazines to their slabs).
4872 * Given this, any work that we might do now to locate a buffer
4873 * in a magazine is wasted (and expensive!) work; we bump
4874 * a counter in this case and otherwise assume that we can't
4875 * move it.
4876 */
4877 cp->cache_defrag->kmd_dont_know++;
4878 break;
4879 default:
4880 panic("'%s' (%p) unexpected move callback response %d\n",
4881 cp->cache_name, (void *)cp, response);
4882 }
4883
4884 kmem_slab_free_constructed(cp, callback->kmm_to_buf, B_FALSE);
4885 kmem_move_end(cp, callback);
4886 }
4887
4888 /* Return B_FALSE if there is insufficient memory for the move request. */
4889 static boolean_t
kmem_move_begin(kmem_cache_t * cp,kmem_slab_t * sp,void * buf,int flags)4890 kmem_move_begin(kmem_cache_t *cp, kmem_slab_t *sp, void *buf, int flags)
4891 {
4892 void *to_buf;
4893 avl_index_t index;
4894 kmem_move_t *callback, *pending;
4895 ulong_t n;
4896
4897 ASSERT(taskq_member(kmem_taskq, curthread));
4898 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4899 ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING);
4900
4901 callback = kmem_cache_alloc(kmem_move_cache, KM_NOSLEEP);
4902
4903 if (callback == NULL)
4904 return (B_FALSE);
4905
4906 callback->kmm_from_slab = sp;
4907 callback->kmm_from_buf = buf;
4908 callback->kmm_flags = flags;
4909
4910 mutex_enter(&cp->cache_lock);
4911
4912 n = avl_numnodes(&cp->cache_partial_slabs);
4913 if ((n == 0) || ((n == 1) && !(flags & KMM_DEBUG))) {
4914 mutex_exit(&cp->cache_lock);
4915 kmem_cache_free(kmem_move_cache, callback);
4916 return (B_TRUE); /* there is no need for the move request */
4917 }
4918
4919 pending = avl_find(&cp->cache_defrag->kmd_moves_pending, buf, &index);
4920 if (pending != NULL) {
4921 /*
4922 * If the move is already pending and we're desperate now,
4923 * update the move flags.
4924 */
4925 if (flags & KMM_DESPERATE) {
4926 pending->kmm_flags |= KMM_DESPERATE;
4927 }
4928 mutex_exit(&cp->cache_lock);
4929 kmem_cache_free(kmem_move_cache, callback);
4930 return (B_TRUE);
4931 }
4932
4933 to_buf = kmem_slab_alloc_impl(cp, avl_first(&cp->cache_partial_slabs),
4934 B_FALSE);
4935 callback->kmm_to_buf = to_buf;
4936 avl_insert(&cp->cache_defrag->kmd_moves_pending, callback, index);
4937
4938 mutex_exit(&cp->cache_lock);
4939
4940 if (taskq_dispatch(kmem_move_taskq, (task_func_t *)kmem_move_buffer,
4941 callback, TQ_NOSLEEP) == TASKQID_INVALID) {
4942 mutex_enter(&cp->cache_lock);
4943 avl_remove(&cp->cache_defrag->kmd_moves_pending, callback);
4944 mutex_exit(&cp->cache_lock);
4945 kmem_slab_free(cp, to_buf);
4946 kmem_cache_free(kmem_move_cache, callback);
4947 return (B_FALSE);
4948 }
4949
4950 return (B_TRUE);
4951 }
4952
4953 static void
kmem_move_end(kmem_cache_t * cp,kmem_move_t * callback)4954 kmem_move_end(kmem_cache_t *cp, kmem_move_t *callback)
4955 {
4956 avl_index_t index;
4957
4958 ASSERT(cp->cache_defrag != NULL);
4959 ASSERT(taskq_member(kmem_move_taskq, curthread));
4960 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4961
4962 mutex_enter(&cp->cache_lock);
4963 VERIFY(avl_find(&cp->cache_defrag->kmd_moves_pending,
4964 callback->kmm_from_buf, &index) != NULL);
4965 avl_remove(&cp->cache_defrag->kmd_moves_pending, callback);
4966 if (avl_is_empty(&cp->cache_defrag->kmd_moves_pending)) {
4967 list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
4968 kmem_slab_t *sp;
4969
4970 /*
4971 * The last pending move completed. Release all slabs from the
4972 * front of the dead list except for any slab at the tail that
4973 * needs to be released from the context of kmem_move_buffers().
4974 * kmem deferred unmapping the buffers on these slabs in order
4975 * to guarantee that buffers passed to the move callback have
4976 * been touched only by kmem or by the client itself.
4977 */
4978 while ((sp = list_remove_head(deadlist)) != NULL) {
4979 if (sp->slab_flags & KMEM_SLAB_MOVE_PENDING) {
4980 list_insert_tail(deadlist, sp);
4981 break;
4982 }
4983 cp->cache_defrag->kmd_deadcount--;
4984 cp->cache_slab_destroy++;
4985 mutex_exit(&cp->cache_lock);
4986 kmem_slab_destroy(cp, sp);
4987 mutex_enter(&cp->cache_lock);
4988 }
4989 }
4990 mutex_exit(&cp->cache_lock);
4991 kmem_cache_free(kmem_move_cache, callback);
4992 }
4993
4994 /*
4995 * Move buffers from least used slabs first by scanning backwards from the end
4996 * of the partial slab list. Scan at most max_scan candidate slabs and move
4997 * buffers from at most max_slabs slabs (0 for all partial slabs in both cases).
4998 * If desperate to reclaim memory, move buffers from any partial slab, otherwise
4999 * skip slabs with a ratio of allocated buffers at or above the current
5000 * threshold. Return the number of unskipped slabs (at most max_slabs, -1 if the
5001 * scan is aborted) so that the caller can adjust the reclaimability threshold
5002 * depending on how many reclaimable slabs it finds.
5003 *
5004 * kmem_move_buffers() drops and reacquires cache_lock every time it issues a
5005 * move request, since it is not valid for kmem_move_begin() to call
5006 * kmem_cache_alloc() or taskq_dispatch() with cache_lock held.
5007 */
5008 static int
kmem_move_buffers(kmem_cache_t * cp,size_t max_scan,size_t max_slabs,int flags)5009 kmem_move_buffers(kmem_cache_t *cp, size_t max_scan, size_t max_slabs,
5010 int flags)
5011 {
5012 kmem_slab_t *sp;
5013 void *buf;
5014 int i, j; /* slab index, buffer index */
5015 int s; /* reclaimable slabs */
5016 int b; /* allocated (movable) buffers on reclaimable slab */
5017 boolean_t success;
5018 int refcnt;
5019 int nomove;
5020
5021 ASSERT(taskq_member(kmem_taskq, curthread));
5022 ASSERT(MUTEX_HELD(&cp->cache_lock));
5023 ASSERT(kmem_move_cache != NULL);
5024 ASSERT(cp->cache_move != NULL && cp->cache_defrag != NULL);
5025 ASSERT((flags & KMM_DEBUG) ? !avl_is_empty(&cp->cache_partial_slabs) :
5026 avl_numnodes(&cp->cache_partial_slabs) > 1);
5027
5028 if (kmem_move_blocked) {
5029 return (0);
5030 }
5031
5032 if (kmem_move_fulltilt) {
5033 flags |= KMM_DESPERATE;
5034 }
5035
5036 if (max_scan == 0 || (flags & KMM_DESPERATE)) {
5037 /*
5038 * Scan as many slabs as needed to find the desired number of
5039 * candidate slabs.
5040 */
5041 max_scan = (size_t)-1;
5042 }
5043
5044 if (max_slabs == 0 || (flags & KMM_DESPERATE)) {
5045 /* Find as many candidate slabs as possible. */
5046 max_slabs = (size_t)-1;
5047 }
5048
5049 sp = avl_last(&cp->cache_partial_slabs);
5050 ASSERT(KMEM_SLAB_IS_PARTIAL(sp));
5051 for (i = 0, s = 0; (i < max_scan) && (s < max_slabs) && (sp != NULL) &&
5052 ((sp != avl_first(&cp->cache_partial_slabs)) ||
5053 (flags & KMM_DEBUG));
5054 sp = AVL_PREV(&cp->cache_partial_slabs, sp), i++) {
5055
5056 if (!kmem_slab_is_reclaimable(cp, sp, flags)) {
5057 continue;
5058 }
5059 s++;
5060
5061 /* Look for allocated buffers to move. */
5062 for (j = 0, b = 0, buf = sp->slab_base;
5063 (j < sp->slab_chunks) && (b < sp->slab_refcnt);
5064 buf = (((char *)buf) + cp->cache_chunksize), j++) {
5065
5066 if (kmem_slab_allocated(cp, sp, buf) == NULL) {
5067 continue;
5068 }
5069
5070 b++;
5071
5072 /*
5073 * Prevent the slab from being destroyed while we drop
5074 * cache_lock and while the pending move is not yet
5075 * registered. Flag the pending move while
5076 * kmd_moves_pending may still be empty, since we can't
5077 * yet rely on a non-zero pending move count to prevent
5078 * the slab from being destroyed.
5079 */
5080 ASSERT(!(sp->slab_flags & KMEM_SLAB_MOVE_PENDING));
5081 sp->slab_flags |= KMEM_SLAB_MOVE_PENDING;
5082 /*
5083 * Recheck refcnt and nomove after reacquiring the lock,
5084 * since these control the order of partial slabs, and
5085 * we want to know if we can pick up the scan where we
5086 * left off.
5087 */
5088 refcnt = sp->slab_refcnt;
5089 nomove = (sp->slab_flags & KMEM_SLAB_NOMOVE);
5090 mutex_exit(&cp->cache_lock);
5091
5092 success = kmem_move_begin(cp, sp, buf, flags);
5093
5094 /*
5095 * Now, before the lock is reacquired, kmem could
5096 * process all pending move requests and purge the
5097 * deadlist, so that upon reacquiring the lock, sp has
5098 * been remapped. Or, the client may free all the
5099 * objects on the slab while the pending moves are still
5100 * on the taskq. Therefore, the KMEM_SLAB_MOVE_PENDING
5101 * flag causes the slab to be put at the end of the
5102 * deadlist and prevents it from being destroyed, since
5103 * we plan to destroy it here after reacquiring the
5104 * lock.
5105 */
5106 mutex_enter(&cp->cache_lock);
5107 ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING);
5108 sp->slab_flags &= ~KMEM_SLAB_MOVE_PENDING;
5109
5110 if (sp->slab_refcnt == 0) {
5111 list_t *deadlist =
5112 &cp->cache_defrag->kmd_deadlist;
5113 list_remove(deadlist, sp);
5114
5115 if (!avl_is_empty(
5116 &cp->cache_defrag->kmd_moves_pending)) {
5117 /*
5118 * A pending move makes it unsafe to
5119 * destroy the slab, because even though
5120 * the move is no longer needed, the
5121 * context where that is determined
5122 * requires the slab to exist.
5123 * Fortunately, a pending move also
5124 * means we don't need to destroy the
5125 * slab here, since it will get
5126 * destroyed along with any other slabs
5127 * on the deadlist after the last
5128 * pending move completes.
5129 */
5130 list_insert_head(deadlist, sp);
5131 return (-1);
5132 }
5133
5134 /*
5135 * Destroy the slab now if it was completely
5136 * freed while we dropped cache_lock and there
5137 * are no pending moves. Since slab_refcnt
5138 * cannot change once it reaches zero, no new
5139 * pending moves from that slab are possible.
5140 */
5141 cp->cache_defrag->kmd_deadcount--;
5142 cp->cache_slab_destroy++;
5143 mutex_exit(&cp->cache_lock);
5144 kmem_slab_destroy(cp, sp);
5145 mutex_enter(&cp->cache_lock);
5146 /*
5147 * Since we can't pick up the scan where we left
5148 * off, abort the scan and say nothing about the
5149 * number of reclaimable slabs.
5150 */
5151 return (-1);
5152 }
5153
5154 if (!success) {
5155 /*
5156 * Abort the scan if there is not enough memory
5157 * for the request and say nothing about the
5158 * number of reclaimable slabs.
5159 */
5160 return (-1);
5161 }
5162
5163 /*
5164 * The slab's position changed while the lock was
5165 * dropped, so we don't know where we are in the
5166 * sequence any more.
5167 */
5168 if (sp->slab_refcnt != refcnt) {
5169 /*
5170 * If this is a KMM_DEBUG move, the slab_refcnt
5171 * may have changed because we allocated a
5172 * destination buffer on the same slab. In that
5173 * case, we're not interested in counting it.
5174 */
5175 return (-1);
5176 }
5177 if ((sp->slab_flags & KMEM_SLAB_NOMOVE) != nomove)
5178 return (-1);
5179
5180 /*
5181 * Generating a move request allocates a destination
5182 * buffer from the slab layer, bumping the first partial
5183 * slab if it is completely allocated. If the current
5184 * slab becomes the first partial slab as a result, we
5185 * can't continue to scan backwards.
5186 *
5187 * If this is a KMM_DEBUG move and we allocated the
5188 * destination buffer from the last partial slab, then
5189 * the buffer we're moving is on the same slab and our
5190 * slab_refcnt has changed, causing us to return before
5191 * reaching here if there are no partial slabs left.
5192 */
5193 ASSERT(!avl_is_empty(&cp->cache_partial_slabs));
5194 if (sp == avl_first(&cp->cache_partial_slabs)) {
5195 /*
5196 * We're not interested in a second KMM_DEBUG
5197 * move.
5198 */
5199 goto end_scan;
5200 }
5201 }
5202 }
5203 end_scan:
5204
5205 return (s);
5206 }
5207
5208 typedef struct kmem_move_notify_args {
5209 kmem_cache_t *kmna_cache;
5210 void *kmna_buf;
5211 } kmem_move_notify_args_t;
5212
5213 static void
kmem_cache_move_notify_task(void * arg)5214 kmem_cache_move_notify_task(void *arg)
5215 {
5216 kmem_move_notify_args_t *args = arg;
5217 kmem_cache_t *cp = args->kmna_cache;
5218 void *buf = args->kmna_buf;
5219 kmem_slab_t *sp;
5220
5221 ASSERT(taskq_member(kmem_taskq, curthread));
5222 ASSERT(list_link_active(&cp->cache_link));
5223
5224 kmem_free(args, sizeof (kmem_move_notify_args_t));
5225 mutex_enter(&cp->cache_lock);
5226 sp = kmem_slab_allocated(cp, NULL, buf);
5227
5228 /* Ignore the notification if the buffer is no longer allocated. */
5229 if (sp == NULL) {
5230 mutex_exit(&cp->cache_lock);
5231 return;
5232 }
5233
5234 /* Ignore the notification if there's no reason to move the buffer. */
5235 if (avl_numnodes(&cp->cache_partial_slabs) > 1) {
5236 /*
5237 * So far the notification is not ignored. Ignore the
5238 * notification if the slab is not marked by an earlier refusal
5239 * to move a buffer.
5240 */
5241 if (!(sp->slab_flags & KMEM_SLAB_NOMOVE) &&
5242 (sp->slab_later_count == 0)) {
5243 mutex_exit(&cp->cache_lock);
5244 return;
5245 }
5246
5247 kmem_slab_move_yes(cp, sp, buf);
5248 ASSERT(!(sp->slab_flags & KMEM_SLAB_MOVE_PENDING));
5249 sp->slab_flags |= KMEM_SLAB_MOVE_PENDING;
5250 mutex_exit(&cp->cache_lock);
5251 /* see kmem_move_buffers() about dropping the lock */
5252 (void) kmem_move_begin(cp, sp, buf, KMM_NOTIFY);
5253 mutex_enter(&cp->cache_lock);
5254 ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING);
5255 sp->slab_flags &= ~KMEM_SLAB_MOVE_PENDING;
5256 if (sp->slab_refcnt == 0) {
5257 list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
5258 list_remove(deadlist, sp);
5259
5260 if (!avl_is_empty(
5261 &cp->cache_defrag->kmd_moves_pending)) {
5262 list_insert_head(deadlist, sp);
5263 mutex_exit(&cp->cache_lock);
5264 return;
5265 }
5266
5267 cp->cache_defrag->kmd_deadcount--;
5268 cp->cache_slab_destroy++;
5269 mutex_exit(&cp->cache_lock);
5270 kmem_slab_destroy(cp, sp);
5271 return;
5272 }
5273 } else {
5274 kmem_slab_move_yes(cp, sp, buf);
5275 }
5276 mutex_exit(&cp->cache_lock);
5277 }
5278
5279 void
kmem_cache_move_notify(kmem_cache_t * cp,void * buf)5280 kmem_cache_move_notify(kmem_cache_t *cp, void *buf)
5281 {
5282 kmem_move_notify_args_t *args;
5283
5284 args = kmem_alloc(sizeof (kmem_move_notify_args_t), KM_NOSLEEP);
5285 if (args != NULL) {
5286 args->kmna_cache = cp;
5287 args->kmna_buf = buf;
5288 if (taskq_dispatch(kmem_taskq,
5289 (task_func_t *)kmem_cache_move_notify_task, args,
5290 TQ_NOSLEEP) == TASKQID_INVALID)
5291 kmem_free(args, sizeof (kmem_move_notify_args_t));
5292 }
5293 }
5294
5295 static void
kmem_cache_defrag(kmem_cache_t * cp)5296 kmem_cache_defrag(kmem_cache_t *cp)
5297 {
5298 size_t n;
5299
5300 ASSERT(cp->cache_defrag != NULL);
5301
5302 mutex_enter(&cp->cache_lock);
5303 n = avl_numnodes(&cp->cache_partial_slabs);
5304 if (n > 1) {
5305 /* kmem_move_buffers() drops and reacquires cache_lock */
5306 cp->cache_defrag->kmd_defrags++;
5307 (void) kmem_move_buffers(cp, n, 0, KMM_DESPERATE);
5308 }
5309 mutex_exit(&cp->cache_lock);
5310 }
5311
5312 /* Is this cache above the fragmentation threshold? */
5313 static boolean_t
kmem_cache_frag_threshold(kmem_cache_t * cp,uint64_t nfree)5314 kmem_cache_frag_threshold(kmem_cache_t *cp, uint64_t nfree)
5315 {
5316 /*
5317 * nfree kmem_frag_numer
5318 * ------------------ > ---------------
5319 * cp->cache_buftotal kmem_frag_denom
5320 */
5321 return ((nfree * kmem_frag_denom) >
5322 (cp->cache_buftotal * kmem_frag_numer));
5323 }
5324
5325 static boolean_t
kmem_cache_is_fragmented(kmem_cache_t * cp,boolean_t * doreap)5326 kmem_cache_is_fragmented(kmem_cache_t *cp, boolean_t *doreap)
5327 {
5328 boolean_t fragmented;
5329 uint64_t nfree;
5330
5331 ASSERT(MUTEX_HELD(&cp->cache_lock));
5332 *doreap = B_FALSE;
5333
5334 if (kmem_move_fulltilt) {
5335 if (avl_numnodes(&cp->cache_partial_slabs) > 1) {
5336 return (B_TRUE);
5337 }
5338 } else {
5339 if ((cp->cache_complete_slab_count + avl_numnodes(
5340 &cp->cache_partial_slabs)) < kmem_frag_minslabs) {
5341 return (B_FALSE);
5342 }
5343 }
5344
5345 nfree = cp->cache_bufslab;
5346 fragmented = ((avl_numnodes(&cp->cache_partial_slabs) > 1) &&
5347 kmem_cache_frag_threshold(cp, nfree));
5348
5349 /*
5350 * Free buffers in the magazine layer appear allocated from the point of
5351 * view of the slab layer. We want to know if the slab layer would
5352 * appear fragmented if we included free buffers from magazines that
5353 * have fallen out of the working set.
5354 */
5355 if (!fragmented) {
5356 long reap;
5357
5358 mutex_enter(&cp->cache_depot_lock);
5359 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
5360 reap = MIN(reap, cp->cache_full.ml_total);
5361 mutex_exit(&cp->cache_depot_lock);
5362
5363 nfree += ((uint64_t)reap * cp->cache_magtype->mt_magsize);
5364 if (kmem_cache_frag_threshold(cp, nfree)) {
5365 *doreap = B_TRUE;
5366 }
5367 }
5368
5369 return (fragmented);
5370 }
5371
5372 /* Called periodically from kmem_taskq */
5373 static void
kmem_cache_scan(kmem_cache_t * cp)5374 kmem_cache_scan(kmem_cache_t *cp)
5375 {
5376 boolean_t reap = B_FALSE;
5377 kmem_defrag_t *kmd;
5378
5379 ASSERT(taskq_member(kmem_taskq, curthread));
5380
5381 mutex_enter(&cp->cache_lock);
5382
5383 kmd = cp->cache_defrag;
5384 if (kmd->kmd_consolidate > 0) {
5385 kmd->kmd_consolidate--;
5386 mutex_exit(&cp->cache_lock);
5387 kmem_cache_reap(cp);
5388 return;
5389 }
5390
5391 if (kmem_cache_is_fragmented(cp, &reap)) {
5392 int slabs_found;
5393
5394 /*
5395 * Consolidate reclaimable slabs from the end of the partial
5396 * slab list (scan at most kmem_reclaim_scan_range slabs to find
5397 * reclaimable slabs). Keep track of how many candidate slabs we
5398 * looked for and how many we actually found so we can adjust
5399 * the definition of a candidate slab if we're having trouble
5400 * finding them.
5401 *
5402 * kmem_move_buffers() drops and reacquires cache_lock.
5403 */
5404 kmd->kmd_scans++;
5405 slabs_found = kmem_move_buffers(cp, kmem_reclaim_scan_range,
5406 kmem_reclaim_max_slabs, 0);
5407 if (slabs_found >= 0) {
5408 kmd->kmd_slabs_sought += kmem_reclaim_max_slabs;
5409 kmd->kmd_slabs_found += slabs_found;
5410 }
5411
5412 if (++kmd->kmd_tries >= kmem_reclaim_scan_range) {
5413 kmd->kmd_tries = 0;
5414
5415 /*
5416 * If we had difficulty finding candidate slabs in
5417 * previous scans, adjust the threshold so that
5418 * candidates are easier to find.
5419 */
5420 if (kmd->kmd_slabs_found == kmd->kmd_slabs_sought) {
5421 kmem_adjust_reclaim_threshold(kmd, -1);
5422 } else if ((kmd->kmd_slabs_found * 2) <
5423 kmd->kmd_slabs_sought) {
5424 kmem_adjust_reclaim_threshold(kmd, 1);
5425 }
5426 kmd->kmd_slabs_sought = 0;
5427 kmd->kmd_slabs_found = 0;
5428 }
5429 } else {
5430 kmem_reset_reclaim_threshold(cp->cache_defrag);
5431 #ifdef DEBUG
5432 if (!avl_is_empty(&cp->cache_partial_slabs)) {
5433 /*
5434 * In a debug kernel we want the consolidator to
5435 * run occasionally even when there is plenty of
5436 * memory.
5437 */
5438 uint16_t debug_rand;
5439
5440 (void) random_get_bytes((uint8_t *)&debug_rand, 2);
5441 if (!kmem_move_noreap &&
5442 ((debug_rand % kmem_mtb_reap) == 0)) {
5443 mutex_exit(&cp->cache_lock);
5444 kmem_cache_reap(cp);
5445 return;
5446 } else if ((debug_rand % kmem_mtb_move) == 0) {
5447 kmd->kmd_scans++;
5448 (void) kmem_move_buffers(cp,
5449 kmem_reclaim_scan_range, 1, KMM_DEBUG);
5450 }
5451 }
5452 #endif /* DEBUG */
5453 }
5454
5455 mutex_exit(&cp->cache_lock);
5456
5457 if (reap)
5458 kmem_depot_ws_reap(cp);
5459 }
5460