xref: /titanic_51/usr/src/uts/common/os/kmem.c (revision bb87aac1865ae33fc71739e102ce06d4f6e8c629)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 1994, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2012, 2016 by Delphix. All rights reserved.
24  * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
25  */
26 
27 /*
28  * Kernel memory allocator, as described in the following two papers and a
29  * statement about the consolidator:
30  *
31  * Jeff Bonwick,
32  * The Slab Allocator: An Object-Caching Kernel Memory Allocator.
33  * Proceedings of the Summer 1994 Usenix Conference.
34  * Available as /shared/sac/PSARC/1994/028/materials/kmem.pdf.
35  *
36  * Jeff Bonwick and Jonathan Adams,
37  * Magazines and vmem: Extending the Slab Allocator to Many CPUs and
38  * Arbitrary Resources.
39  * Proceedings of the 2001 Usenix Conference.
40  * Available as /shared/sac/PSARC/2000/550/materials/vmem.pdf.
41  *
42  * kmem Slab Consolidator Big Theory Statement:
43  *
44  * 1. Motivation
45  *
46  * As stated in Bonwick94, slabs provide the following advantages over other
47  * allocation structures in terms of memory fragmentation:
48  *
49  *  - Internal fragmentation (per-buffer wasted space) is minimal.
50  *  - Severe external fragmentation (unused buffers on the free list) is
51  *    unlikely.
52  *
53  * Segregating objects by size eliminates one source of external fragmentation,
54  * and according to Bonwick:
55  *
56  *   The other reason that slabs reduce external fragmentation is that all
57  *   objects in a slab are of the same type, so they have the same lifetime
58  *   distribution. The resulting segregation of short-lived and long-lived
59  *   objects at slab granularity reduces the likelihood of an entire page being
60  *   held hostage due to a single long-lived allocation [Barrett93, Hanson90].
61  *
62  * While unlikely, severe external fragmentation remains possible. Clients that
63  * allocate both short- and long-lived objects from the same cache cannot
64  * anticipate the distribution of long-lived objects within the allocator's slab
65  * implementation. Even a small percentage of long-lived objects distributed
66  * randomly across many slabs can lead to a worst case scenario where the client
67  * frees the majority of its objects and the system gets back almost none of the
68  * slabs. Despite the client doing what it reasonably can to help the system
69  * reclaim memory, the allocator cannot shake free enough slabs because of
70  * lonely allocations stubbornly hanging on. Although the allocator is in a
71  * position to diagnose the fragmentation, there is nothing that the allocator
72  * by itself can do about it. It only takes a single allocated object to prevent
73  * an entire slab from being reclaimed, and any object handed out by
74  * kmem_cache_alloc() is by definition in the client's control. Conversely,
75  * although the client is in a position to move a long-lived object, it has no
76  * way of knowing if the object is causing fragmentation, and if so, where to
77  * move it. A solution necessarily requires further cooperation between the
78  * allocator and the client.
79  *
80  * 2. Move Callback
81  *
82  * The kmem slab consolidator therefore adds a move callback to the
83  * allocator/client interface, improving worst-case external fragmentation in
84  * kmem caches that supply a function to move objects from one memory location
85  * to another. In a situation of low memory kmem attempts to consolidate all of
86  * a cache's slabs at once; otherwise it works slowly to bring external
87  * fragmentation within the 1/8 limit guaranteed for internal fragmentation,
88  * thereby helping to avoid a low memory situation in the future.
89  *
90  * The callback has the following signature:
91  *
92  *   kmem_cbrc_t move(void *old, void *new, size_t size, void *user_arg)
93  *
94  * It supplies the kmem client with two addresses: the allocated object that
95  * kmem wants to move and a buffer selected by kmem for the client to use as the
96  * copy destination. The callback is kmem's way of saying "Please get off of
97  * this buffer and use this one instead." kmem knows where it wants to move the
98  * object in order to best reduce fragmentation. All the client needs to know
99  * about the second argument (void *new) is that it is an allocated, constructed
100  * object ready to take the contents of the old object. When the move function
101  * is called, the system is likely to be low on memory, and the new object
102  * spares the client from having to worry about allocating memory for the
103  * requested move. The third argument supplies the size of the object, in case a
104  * single move function handles multiple caches whose objects differ only in
105  * size (such as zio_buf_512, zio_buf_1024, etc). Finally, the same optional
106  * user argument passed to the constructor, destructor, and reclaim functions is
107  * also passed to the move callback.
108  *
109  * 2.1 Setting the Move Callback
110  *
111  * The client sets the move callback after creating the cache and before
112  * allocating from it:
113  *
114  *	object_cache = kmem_cache_create(...);
115  *      kmem_cache_set_move(object_cache, object_move);
116  *
117  * 2.2 Move Callback Return Values
118  *
119  * Only the client knows about its own data and when is a good time to move it.
120  * The client is cooperating with kmem to return unused memory to the system,
121  * and kmem respectfully accepts this help at the client's convenience. When
122  * asked to move an object, the client can respond with any of the following:
123  *
124  *   typedef enum kmem_cbrc {
125  *           KMEM_CBRC_YES,
126  *           KMEM_CBRC_NO,
127  *           KMEM_CBRC_LATER,
128  *           KMEM_CBRC_DONT_NEED,
129  *           KMEM_CBRC_DONT_KNOW
130  *   } kmem_cbrc_t;
131  *
132  * The client must not explicitly kmem_cache_free() either of the objects passed
133  * to the callback, since kmem wants to free them directly to the slab layer
134  * (bypassing the per-CPU magazine layer). The response tells kmem which of the
135  * objects to free:
136  *
137  *       YES: (Did it) The client moved the object, so kmem frees the old one.
138  *        NO: (Never) The client refused, so kmem frees the new object (the
139  *            unused copy destination). kmem also marks the slab of the old
140  *            object so as not to bother the client with further callbacks for
141  *            that object as long as the slab remains on the partial slab list.
142  *            (The system won't be getting the slab back as long as the
143  *            immovable object holds it hostage, so there's no point in moving
144  *            any of its objects.)
145  *     LATER: The client is using the object and cannot move it now, so kmem
146  *            frees the new object (the unused copy destination). kmem still
147  *            attempts to move other objects off the slab, since it expects to
148  *            succeed in clearing the slab in a later callback. The client
149  *            should use LATER instead of NO if the object is likely to become
150  *            movable very soon.
151  * DONT_NEED: The client no longer needs the object, so kmem frees the old along
152  *            with the new object (the unused copy destination). This response
153  *            is the client's opportunity to be a model citizen and give back as
154  *            much as it can.
155  * DONT_KNOW: The client does not know about the object because
156  *            a) the client has just allocated the object and not yet put it
157  *               wherever it expects to find known objects
158  *            b) the client has removed the object from wherever it expects to
159  *               find known objects and is about to free it, or
160  *            c) the client has freed the object.
161  *            In all these cases (a, b, and c) kmem frees the new object (the
162  *            unused copy destination) and searches for the old object in the
163  *            magazine layer. If found, the object is removed from the magazine
164  *            layer and freed to the slab layer so it will no longer hold the
165  *            slab hostage.
166  *
167  * 2.3 Object States
168  *
169  * Neither kmem nor the client can be assumed to know the object's whereabouts
170  * at the time of the callback. An object belonging to a kmem cache may be in
171  * any of the following states:
172  *
173  * 1. Uninitialized on the slab
174  * 2. Allocated from the slab but not constructed (still uninitialized)
175  * 3. Allocated from the slab, constructed, but not yet ready for business
176  *    (not in a valid state for the move callback)
177  * 4. In use (valid and known to the client)
178  * 5. About to be freed (no longer in a valid state for the move callback)
179  * 6. Freed to a magazine (still constructed)
180  * 7. Allocated from a magazine, not yet ready for business (not in a valid
181  *    state for the move callback), and about to return to state #4
182  * 8. Deconstructed on a magazine that is about to be freed
183  * 9. Freed to the slab
184  *
185  * Since the move callback may be called at any time while the object is in any
186  * of the above states (except state #1), the client needs a safe way to
187  * determine whether or not it knows about the object. Specifically, the client
188  * needs to know whether or not the object is in state #4, the only state in
189  * which a move is valid. If the object is in any other state, the client should
190  * immediately return KMEM_CBRC_DONT_KNOW, since it is unsafe to access any of
191  * the object's fields.
192  *
193  * Note that although an object may be in state #4 when kmem initiates the move
194  * request, the object may no longer be in that state by the time kmem actually
195  * calls the move function. Not only does the client free objects
196  * asynchronously, kmem itself puts move requests on a queue where thay are
197  * pending until kmem processes them from another context. Also, objects freed
198  * to a magazine appear allocated from the point of view of the slab layer, so
199  * kmem may even initiate requests for objects in a state other than state #4.
200  *
201  * 2.3.1 Magazine Layer
202  *
203  * An important insight revealed by the states listed above is that the magazine
204  * layer is populated only by kmem_cache_free(). Magazines of constructed
205  * objects are never populated directly from the slab layer (which contains raw,
206  * unconstructed objects). Whenever an allocation request cannot be satisfied
207  * from the magazine layer, the magazines are bypassed and the request is
208  * satisfied from the slab layer (creating a new slab if necessary). kmem calls
209  * the object constructor only when allocating from the slab layer, and only in
210  * response to kmem_cache_alloc() or to prepare the destination buffer passed in
211  * the move callback. kmem does not preconstruct objects in anticipation of
212  * kmem_cache_alloc().
213  *
214  * 2.3.2 Object Constructor and Destructor
215  *
216  * If the client supplies a destructor, it must be valid to call the destructor
217  * on a newly created object (immediately after the constructor).
218  *
219  * 2.4 Recognizing Known Objects
220  *
221  * There is a simple test to determine safely whether or not the client knows
222  * about a given object in the move callback. It relies on the fact that kmem
223  * guarantees that the object of the move callback has only been touched by the
224  * client itself or else by kmem. kmem does this by ensuring that none of the
225  * cache's slabs are freed to the virtual memory (VM) subsystem while a move
226  * callback is pending. When the last object on a slab is freed, if there is a
227  * pending move, kmem puts the slab on a per-cache dead list and defers freeing
228  * slabs on that list until all pending callbacks are completed. That way,
229  * clients can be certain that the object of a move callback is in one of the
230  * states listed above, making it possible to distinguish known objects (in
231  * state #4) using the two low order bits of any pointer member (with the
232  * exception of 'char *' or 'short *' which may not be 4-byte aligned on some
233  * platforms).
234  *
235  * The test works as long as the client always transitions objects from state #4
236  * (known, in use) to state #5 (about to be freed, invalid) by setting the low
237  * order bit of the client-designated pointer member. Since kmem only writes
238  * invalid memory patterns, such as 0xbaddcafe to uninitialized memory and
239  * 0xdeadbeef to freed memory, any scribbling on the object done by kmem is
240  * guaranteed to set at least one of the two low order bits. Therefore, given an
241  * object with a back pointer to a 'container_t *o_container', the client can
242  * test
243  *
244  *      container_t *container = object->o_container;
245  *      if ((uintptr_t)container & 0x3) {
246  *              return (KMEM_CBRC_DONT_KNOW);
247  *      }
248  *
249  * Typically, an object will have a pointer to some structure with a list or
250  * hash where objects from the cache are kept while in use. Assuming that the
251  * client has some way of knowing that the container structure is valid and will
252  * not go away during the move, and assuming that the structure includes a lock
253  * to protect whatever collection is used, then the client would continue as
254  * follows:
255  *
256  *	// Ensure that the container structure does not go away.
257  *      if (container_hold(container) == 0) {
258  *              return (KMEM_CBRC_DONT_KNOW);
259  *      }
260  *      mutex_enter(&container->c_objects_lock);
261  *      if (container != object->o_container) {
262  *              mutex_exit(&container->c_objects_lock);
263  *              container_rele(container);
264  *              return (KMEM_CBRC_DONT_KNOW);
265  *      }
266  *
267  * At this point the client knows that the object cannot be freed as long as
268  * c_objects_lock is held. Note that after acquiring the lock, the client must
269  * recheck the o_container pointer in case the object was removed just before
270  * acquiring the lock.
271  *
272  * When the client is about to free an object, it must first remove that object
273  * from the list, hash, or other structure where it is kept. At that time, to
274  * mark the object so it can be distinguished from the remaining, known objects,
275  * the client sets the designated low order bit:
276  *
277  *      mutex_enter(&container->c_objects_lock);
278  *      object->o_container = (void *)((uintptr_t)object->o_container | 0x1);
279  *      list_remove(&container->c_objects, object);
280  *      mutex_exit(&container->c_objects_lock);
281  *
282  * In the common case, the object is freed to the magazine layer, where it may
283  * be reused on a subsequent allocation without the overhead of calling the
284  * constructor. While in the magazine it appears allocated from the point of
285  * view of the slab layer, making it a candidate for the move callback. Most
286  * objects unrecognized by the client in the move callback fall into this
287  * category and are cheaply distinguished from known objects by the test
288  * described earlier. Since recognition is cheap for the client, and searching
289  * magazines is expensive for kmem, kmem defers searching until the client first
290  * returns KMEM_CBRC_DONT_KNOW. As long as the needed effort is reasonable, kmem
291  * elsewhere does what it can to avoid bothering the client unnecessarily.
292  *
293  * Invalidating the designated pointer member before freeing the object marks
294  * the object to be avoided in the callback, and conversely, assigning a valid
295  * value to the designated pointer member after allocating the object makes the
296  * object fair game for the callback:
297  *
298  *      ... allocate object ...
299  *      ... set any initial state not set by the constructor ...
300  *
301  *      mutex_enter(&container->c_objects_lock);
302  *      list_insert_tail(&container->c_objects, object);
303  *      membar_producer();
304  *      object->o_container = container;
305  *      mutex_exit(&container->c_objects_lock);
306  *
307  * Note that everything else must be valid before setting o_container makes the
308  * object fair game for the move callback. The membar_producer() call ensures
309  * that all the object's state is written to memory before setting the pointer
310  * that transitions the object from state #3 or #7 (allocated, constructed, not
311  * yet in use) to state #4 (in use, valid). That's important because the move
312  * function has to check the validity of the pointer before it can safely
313  * acquire the lock protecting the collection where it expects to find known
314  * objects.
315  *
316  * This method of distinguishing known objects observes the usual symmetry:
317  * invalidating the designated pointer is the first thing the client does before
318  * freeing the object, and setting the designated pointer is the last thing the
319  * client does after allocating the object. Of course, the client is not
320  * required to use this method. Fundamentally, how the client recognizes known
321  * objects is completely up to the client, but this method is recommended as an
322  * efficient and safe way to take advantage of the guarantees made by kmem. If
323  * the entire object is arbitrary data without any markable bits from a suitable
324  * pointer member, then the client must find some other method, such as
325  * searching a hash table of known objects.
326  *
327  * 2.5 Preventing Objects From Moving
328  *
329  * Besides a way to distinguish known objects, the other thing that the client
330  * needs is a strategy to ensure that an object will not move while the client
331  * is actively using it. The details of satisfying this requirement tend to be
332  * highly cache-specific. It might seem that the same rules that let a client
333  * remove an object safely should also decide when an object can be moved
334  * safely. However, any object state that makes a removal attempt invalid is
335  * likely to be long-lasting for objects that the client does not expect to
336  * remove. kmem knows nothing about the object state and is equally likely (from
337  * the client's point of view) to request a move for any object in the cache,
338  * whether prepared for removal or not. Even a low percentage of objects stuck
339  * in place by unremovability will defeat the consolidator if the stuck objects
340  * are the same long-lived allocations likely to hold slabs hostage.
341  * Fundamentally, the consolidator is not aimed at common cases. Severe external
342  * fragmentation is a worst case scenario manifested as sparsely allocated
343  * slabs, by definition a low percentage of the cache's objects. When deciding
344  * what makes an object movable, keep in mind the goal of the consolidator: to
345  * bring worst-case external fragmentation within the limits guaranteed for
346  * internal fragmentation. Removability is a poor criterion if it is likely to
347  * exclude more than an insignificant percentage of objects for long periods of
348  * time.
349  *
350  * A tricky general solution exists, and it has the advantage of letting you
351  * move any object at almost any moment, practically eliminating the likelihood
352  * that an object can hold a slab hostage. However, if there is a cache-specific
353  * way to ensure that an object is not actively in use in the vast majority of
354  * cases, a simpler solution that leverages this cache-specific knowledge is
355  * preferred.
356  *
357  * 2.5.1 Cache-Specific Solution
358  *
359  * As an example of a cache-specific solution, the ZFS znode cache takes
360  * advantage of the fact that the vast majority of znodes are only being
361  * referenced from the DNLC. (A typical case might be a few hundred in active
362  * use and a hundred thousand in the DNLC.) In the move callback, after the ZFS
363  * client has established that it recognizes the znode and can access its fields
364  * safely (using the method described earlier), it then tests whether the znode
365  * is referenced by anything other than the DNLC. If so, it assumes that the
366  * znode may be in active use and is unsafe to move, so it drops its locks and
367  * returns KMEM_CBRC_LATER. The advantage of this strategy is that everywhere
368  * else znodes are used, no change is needed to protect against the possibility
369  * of the znode moving. The disadvantage is that it remains possible for an
370  * application to hold a znode slab hostage with an open file descriptor.
371  * However, this case ought to be rare and the consolidator has a way to deal
372  * with it: If the client responds KMEM_CBRC_LATER repeatedly for the same
373  * object, kmem eventually stops believing it and treats the slab as if the
374  * client had responded KMEM_CBRC_NO. Having marked the hostage slab, kmem can
375  * then focus on getting it off of the partial slab list by allocating rather
376  * than freeing all of its objects. (Either way of getting a slab off the
377  * free list reduces fragmentation.)
378  *
379  * 2.5.2 General Solution
380  *
381  * The general solution, on the other hand, requires an explicit hold everywhere
382  * the object is used to prevent it from moving. To keep the client locking
383  * strategy as uncomplicated as possible, kmem guarantees the simplifying
384  * assumption that move callbacks are sequential, even across multiple caches.
385  * Internally, a global queue processed by a single thread supports all caches
386  * implementing the callback function. No matter how many caches supply a move
387  * function, the consolidator never moves more than one object at a time, so the
388  * client does not have to worry about tricky lock ordering involving several
389  * related objects from different kmem caches.
390  *
391  * The general solution implements the explicit hold as a read-write lock, which
392  * allows multiple readers to access an object from the cache simultaneously
393  * while a single writer is excluded from moving it. A single rwlock for the
394  * entire cache would lock out all threads from using any of the cache's objects
395  * even though only a single object is being moved, so to reduce contention,
396  * the client can fan out the single rwlock into an array of rwlocks hashed by
397  * the object address, making it probable that moving one object will not
398  * prevent other threads from using a different object. The rwlock cannot be a
399  * member of the object itself, because the possibility of the object moving
400  * makes it unsafe to access any of the object's fields until the lock is
401  * acquired.
402  *
403  * Assuming a small, fixed number of locks, it's possible that multiple objects
404  * will hash to the same lock. A thread that needs to use multiple objects in
405  * the same function may acquire the same lock multiple times. Since rwlocks are
406  * reentrant for readers, and since there is never more than a single writer at
407  * a time (assuming that the client acquires the lock as a writer only when
408  * moving an object inside the callback), there would seem to be no problem.
409  * However, a client locking multiple objects in the same function must handle
410  * one case of potential deadlock: Assume that thread A needs to prevent both
411  * object 1 and object 2 from moving, and thread B, the callback, meanwhile
412  * tries to move object 3. It's possible, if objects 1, 2, and 3 all hash to the
413  * same lock, that thread A will acquire the lock for object 1 as a reader
414  * before thread B sets the lock's write-wanted bit, preventing thread A from
415  * reacquiring the lock for object 2 as a reader. Unable to make forward
416  * progress, thread A will never release the lock for object 1, resulting in
417  * deadlock.
418  *
419  * There are two ways of avoiding the deadlock just described. The first is to
420  * use rw_tryenter() rather than rw_enter() in the callback function when
421  * attempting to acquire the lock as a writer. If tryenter discovers that the
422  * same object (or another object hashed to the same lock) is already in use, it
423  * aborts the callback and returns KMEM_CBRC_LATER. The second way is to use
424  * rprwlock_t (declared in common/fs/zfs/sys/rprwlock.h) instead of rwlock_t,
425  * since it allows a thread to acquire the lock as a reader in spite of a
426  * waiting writer. This second approach insists on moving the object now, no
427  * matter how many readers the move function must wait for in order to do so,
428  * and could delay the completion of the callback indefinitely (blocking
429  * callbacks to other clients). In practice, a less insistent callback using
430  * rw_tryenter() returns KMEM_CBRC_LATER infrequently enough that there seems
431  * little reason to use anything else.
432  *
433  * Avoiding deadlock is not the only problem that an implementation using an
434  * explicit hold needs to solve. Locking the object in the first place (to
435  * prevent it from moving) remains a problem, since the object could move
436  * between the time you obtain a pointer to the object and the time you acquire
437  * the rwlock hashed to that pointer value. Therefore the client needs to
438  * recheck the value of the pointer after acquiring the lock, drop the lock if
439  * the value has changed, and try again. This requires a level of indirection:
440  * something that points to the object rather than the object itself, that the
441  * client can access safely while attempting to acquire the lock. (The object
442  * itself cannot be referenced safely because it can move at any time.)
443  * The following lock-acquisition function takes whatever is safe to reference
444  * (arg), follows its pointer to the object (using function f), and tries as
445  * often as necessary to acquire the hashed lock and verify that the object
446  * still has not moved:
447  *
448  *      object_t *
449  *      object_hold(object_f f, void *arg)
450  *      {
451  *              object_t *op;
452  *
453  *              op = f(arg);
454  *              if (op == NULL) {
455  *                      return (NULL);
456  *              }
457  *
458  *              rw_enter(OBJECT_RWLOCK(op), RW_READER);
459  *              while (op != f(arg)) {
460  *                      rw_exit(OBJECT_RWLOCK(op));
461  *                      op = f(arg);
462  *                      if (op == NULL) {
463  *                              break;
464  *                      }
465  *                      rw_enter(OBJECT_RWLOCK(op), RW_READER);
466  *              }
467  *
468  *              return (op);
469  *      }
470  *
471  * The OBJECT_RWLOCK macro hashes the object address to obtain the rwlock. The
472  * lock reacquisition loop, while necessary, almost never executes. The function
473  * pointer f (used to obtain the object pointer from arg) has the following type
474  * definition:
475  *
476  *      typedef object_t *(*object_f)(void *arg);
477  *
478  * An object_f implementation is likely to be as simple as accessing a structure
479  * member:
480  *
481  *      object_t *
482  *      s_object(void *arg)
483  *      {
484  *              something_t *sp = arg;
485  *              return (sp->s_object);
486  *      }
487  *
488  * The flexibility of a function pointer allows the path to the object to be
489  * arbitrarily complex and also supports the notion that depending on where you
490  * are using the object, you may need to get it from someplace different.
491  *
492  * The function that releases the explicit hold is simpler because it does not
493  * have to worry about the object moving:
494  *
495  *      void
496  *      object_rele(object_t *op)
497  *      {
498  *              rw_exit(OBJECT_RWLOCK(op));
499  *      }
500  *
501  * The caller is spared these details so that obtaining and releasing an
502  * explicit hold feels like a simple mutex_enter()/mutex_exit() pair. The caller
503  * of object_hold() only needs to know that the returned object pointer is valid
504  * if not NULL and that the object will not move until released.
505  *
506  * Although object_hold() prevents an object from moving, it does not prevent it
507  * from being freed. The caller must take measures before calling object_hold()
508  * (afterwards is too late) to ensure that the held object cannot be freed. The
509  * caller must do so without accessing the unsafe object reference, so any lock
510  * or reference count used to ensure the continued existence of the object must
511  * live outside the object itself.
512  *
513  * Obtaining a new object is a special case where an explicit hold is impossible
514  * for the caller. Any function that returns a newly allocated object (either as
515  * a return value, or as an in-out paramter) must return it already held; after
516  * the caller gets it is too late, since the object cannot be safely accessed
517  * without the level of indirection described earlier. The following
518  * object_alloc() example uses the same code shown earlier to transition a new
519  * object into the state of being recognized (by the client) as a known object.
520  * The function must acquire the hold (rw_enter) before that state transition
521  * makes the object movable:
522  *
523  *      static object_t *
524  *      object_alloc(container_t *container)
525  *      {
526  *              object_t *object = kmem_cache_alloc(object_cache, 0);
527  *              ... set any initial state not set by the constructor ...
528  *              rw_enter(OBJECT_RWLOCK(object), RW_READER);
529  *              mutex_enter(&container->c_objects_lock);
530  *              list_insert_tail(&container->c_objects, object);
531  *              membar_producer();
532  *              object->o_container = container;
533  *              mutex_exit(&container->c_objects_lock);
534  *              return (object);
535  *      }
536  *
537  * Functions that implicitly acquire an object hold (any function that calls
538  * object_alloc() to supply an object for the caller) need to be carefully noted
539  * so that the matching object_rele() is not neglected. Otherwise, leaked holds
540  * prevent all objects hashed to the affected rwlocks from ever being moved.
541  *
542  * The pointer to a held object can be hashed to the holding rwlock even after
543  * the object has been freed. Although it is possible to release the hold
544  * after freeing the object, you may decide to release the hold implicitly in
545  * whatever function frees the object, so as to release the hold as soon as
546  * possible, and for the sake of symmetry with the function that implicitly
547  * acquires the hold when it allocates the object. Here, object_free() releases
548  * the hold acquired by object_alloc(). Its implicit object_rele() forms a
549  * matching pair with object_hold():
550  *
551  *      void
552  *      object_free(object_t *object)
553  *      {
554  *              container_t *container;
555  *
556  *              ASSERT(object_held(object));
557  *              container = object->o_container;
558  *              mutex_enter(&container->c_objects_lock);
559  *              object->o_container =
560  *                  (void *)((uintptr_t)object->o_container | 0x1);
561  *              list_remove(&container->c_objects, object);
562  *              mutex_exit(&container->c_objects_lock);
563  *              object_rele(object);
564  *              kmem_cache_free(object_cache, object);
565  *      }
566  *
567  * Note that object_free() cannot safely accept an object pointer as an argument
568  * unless the object is already held. Any function that calls object_free()
569  * needs to be carefully noted since it similarly forms a matching pair with
570  * object_hold().
571  *
572  * To complete the picture, the following callback function implements the
573  * general solution by moving objects only if they are currently unheld:
574  *
575  *      static kmem_cbrc_t
576  *      object_move(void *buf, void *newbuf, size_t size, void *arg)
577  *      {
578  *              object_t *op = buf, *np = newbuf;
579  *              container_t *container;
580  *
581  *              container = op->o_container;
582  *              if ((uintptr_t)container & 0x3) {
583  *                      return (KMEM_CBRC_DONT_KNOW);
584  *              }
585  *
586  *	        // Ensure that the container structure does not go away.
587  *              if (container_hold(container) == 0) {
588  *                      return (KMEM_CBRC_DONT_KNOW);
589  *              }
590  *
591  *              mutex_enter(&container->c_objects_lock);
592  *              if (container != op->o_container) {
593  *                      mutex_exit(&container->c_objects_lock);
594  *                      container_rele(container);
595  *                      return (KMEM_CBRC_DONT_KNOW);
596  *              }
597  *
598  *              if (rw_tryenter(OBJECT_RWLOCK(op), RW_WRITER) == 0) {
599  *                      mutex_exit(&container->c_objects_lock);
600  *                      container_rele(container);
601  *                      return (KMEM_CBRC_LATER);
602  *              }
603  *
604  *              object_move_impl(op, np); // critical section
605  *              rw_exit(OBJECT_RWLOCK(op));
606  *
607  *              op->o_container = (void *)((uintptr_t)op->o_container | 0x1);
608  *              list_link_replace(&op->o_link_node, &np->o_link_node);
609  *              mutex_exit(&container->c_objects_lock);
610  *              container_rele(container);
611  *              return (KMEM_CBRC_YES);
612  *      }
613  *
614  * Note that object_move() must invalidate the designated o_container pointer of
615  * the old object in the same way that object_free() does, since kmem will free
616  * the object in response to the KMEM_CBRC_YES return value.
617  *
618  * The lock order in object_move() differs from object_alloc(), which locks
619  * OBJECT_RWLOCK first and &container->c_objects_lock second, but as long as the
620  * callback uses rw_tryenter() (preventing the deadlock described earlier), it's
621  * not a problem. Holding the lock on the object list in the example above
622  * through the entire callback not only prevents the object from going away, it
623  * also allows you to lock the list elsewhere and know that none of its elements
624  * will move during iteration.
625  *
626  * Adding an explicit hold everywhere an object from the cache is used is tricky
627  * and involves much more change to client code than a cache-specific solution
628  * that leverages existing state to decide whether or not an object is
629  * movable. However, this approach has the advantage that no object remains
630  * immovable for any significant length of time, making it extremely unlikely
631  * that long-lived allocations can continue holding slabs hostage; and it works
632  * for any cache.
633  *
634  * 3. Consolidator Implementation
635  *
636  * Once the client supplies a move function that a) recognizes known objects and
637  * b) avoids moving objects that are actively in use, the remaining work is up
638  * to the consolidator to decide which objects to move and when to issue
639  * callbacks.
640  *
641  * The consolidator relies on the fact that a cache's slabs are ordered by
642  * usage. Each slab has a fixed number of objects. Depending on the slab's
643  * "color" (the offset of the first object from the beginning of the slab;
644  * offsets are staggered to mitigate false sharing of cache lines) it is either
645  * the maximum number of objects per slab determined at cache creation time or
646  * else the number closest to the maximum that fits within the space remaining
647  * after the initial offset. A completely allocated slab may contribute some
648  * internal fragmentation (per-slab overhead) but no external fragmentation, so
649  * it is of no interest to the consolidator. At the other extreme, slabs whose
650  * objects have all been freed to the slab are released to the virtual memory
651  * (VM) subsystem (objects freed to magazines are still allocated as far as the
652  * slab is concerned). External fragmentation exists when there are slabs
653  * somewhere between these extremes. A partial slab has at least one but not all
654  * of its objects allocated. The more partial slabs, and the fewer allocated
655  * objects on each of them, the higher the fragmentation. Hence the
656  * consolidator's overall strategy is to reduce the number of partial slabs by
657  * moving allocated objects from the least allocated slabs to the most allocated
658  * slabs.
659  *
660  * Partial slabs are kept in an AVL tree ordered by usage. Completely allocated
661  * slabs are kept separately in an unordered list. Since the majority of slabs
662  * tend to be completely allocated (a typical unfragmented cache may have
663  * thousands of complete slabs and only a single partial slab), separating
664  * complete slabs improves the efficiency of partial slab ordering, since the
665  * complete slabs do not affect the depth or balance of the AVL tree. This
666  * ordered sequence of partial slabs acts as a "free list" supplying objects for
667  * allocation requests.
668  *
669  * Objects are always allocated from the first partial slab in the free list,
670  * where the allocation is most likely to eliminate a partial slab (by
671  * completely allocating it). Conversely, when a single object from a completely
672  * allocated slab is freed to the slab, that slab is added to the front of the
673  * free list. Since most free list activity involves highly allocated slabs
674  * coming and going at the front of the list, slabs tend naturally toward the
675  * ideal order: highly allocated at the front, sparsely allocated at the back.
676  * Slabs with few allocated objects are likely to become completely free if they
677  * keep a safe distance away from the front of the free list. Slab misorders
678  * interfere with the natural tendency of slabs to become completely free or
679  * completely allocated. For example, a slab with a single allocated object
680  * needs only a single free to escape the cache; its natural desire is
681  * frustrated when it finds itself at the front of the list where a second
682  * allocation happens just before the free could have released it. Another slab
683  * with all but one object allocated might have supplied the buffer instead, so
684  * that both (as opposed to neither) of the slabs would have been taken off the
685  * free list.
686  *
687  * Although slabs tend naturally toward the ideal order, misorders allowed by a
688  * simple list implementation defeat the consolidator's strategy of merging
689  * least- and most-allocated slabs. Without an AVL tree to guarantee order, kmem
690  * needs another way to fix misorders to optimize its callback strategy. One
691  * approach is to periodically scan a limited number of slabs, advancing a
692  * marker to hold the current scan position, and to move extreme misorders to
693  * the front or back of the free list and to the front or back of the current
694  * scan range. By making consecutive scan ranges overlap by one slab, the least
695  * allocated slab in the current range can be carried along from the end of one
696  * scan to the start of the next.
697  *
698  * Maintaining partial slabs in an AVL tree relieves kmem of this additional
699  * task, however. Since most of the cache's activity is in the magazine layer,
700  * and allocations from the slab layer represent only a startup cost, the
701  * overhead of maintaining a balanced tree is not a significant concern compared
702  * to the opportunity of reducing complexity by eliminating the partial slab
703  * scanner just described. The overhead of an AVL tree is minimized by
704  * maintaining only partial slabs in the tree and keeping completely allocated
705  * slabs separately in a list. To avoid increasing the size of the slab
706  * structure the AVL linkage pointers are reused for the slab's list linkage,
707  * since the slab will always be either partial or complete, never stored both
708  * ways at the same time. To further minimize the overhead of the AVL tree the
709  * compare function that orders partial slabs by usage divides the range of
710  * allocated object counts into bins such that counts within the same bin are
711  * considered equal. Binning partial slabs makes it less likely that allocating
712  * or freeing a single object will change the slab's order, requiring a tree
713  * reinsertion (an avl_remove() followed by an avl_add(), both potentially
714  * requiring some rebalancing of the tree). Allocation counts closest to
715  * completely free and completely allocated are left unbinned (finely sorted) to
716  * better support the consolidator's strategy of merging slabs at either
717  * extreme.
718  *
719  * 3.1 Assessing Fragmentation and Selecting Candidate Slabs
720  *
721  * The consolidator piggybacks on the kmem maintenance thread and is called on
722  * the same interval as kmem_cache_update(), once per cache every fifteen
723  * seconds. kmem maintains a running count of unallocated objects in the slab
724  * layer (cache_bufslab). The consolidator checks whether that number exceeds
725  * 12.5% (1/8) of the total objects in the cache (cache_buftotal), and whether
726  * there is a significant number of slabs in the cache (arbitrarily a minimum
727  * 101 total slabs). Unused objects that have fallen out of the magazine layer's
728  * working set are included in the assessment, and magazines in the depot are
729  * reaped if those objects would lift cache_bufslab above the fragmentation
730  * threshold. Once the consolidator decides that a cache is fragmented, it looks
731  * for a candidate slab to reclaim, starting at the end of the partial slab free
732  * list and scanning backwards. At first the consolidator is choosy: only a slab
733  * with fewer than 12.5% (1/8) of its objects allocated qualifies (or else a
734  * single allocated object, regardless of percentage). If there is difficulty
735  * finding a candidate slab, kmem raises the allocation threshold incrementally,
736  * up to a maximum 87.5% (7/8), so that eventually the consolidator will reduce
737  * external fragmentation (unused objects on the free list) below 12.5% (1/8),
738  * even in the worst case of every slab in the cache being almost 7/8 allocated.
739  * The threshold can also be lowered incrementally when candidate slabs are easy
740  * to find, and the threshold is reset to the minimum 1/8 as soon as the cache
741  * is no longer fragmented.
742  *
743  * 3.2 Generating Callbacks
744  *
745  * Once an eligible slab is chosen, a callback is generated for every allocated
746  * object on the slab, in the hope that the client will move everything off the
747  * slab and make it reclaimable. Objects selected as move destinations are
748  * chosen from slabs at the front of the free list. Assuming slabs in the ideal
749  * order (most allocated at the front, least allocated at the back) and a
750  * cooperative client, the consolidator will succeed in removing slabs from both
751  * ends of the free list, completely allocating on the one hand and completely
752  * freeing on the other. Objects selected as move destinations are allocated in
753  * the kmem maintenance thread where move requests are enqueued. A separate
754  * callback thread removes pending callbacks from the queue and calls the
755  * client. The separate thread ensures that client code (the move function) does
756  * not interfere with internal kmem maintenance tasks. A map of pending
757  * callbacks keyed by object address (the object to be moved) is checked to
758  * ensure that duplicate callbacks are not generated for the same object.
759  * Allocating the move destination (the object to move to) prevents subsequent
760  * callbacks from selecting the same destination as an earlier pending callback.
761  *
762  * Move requests can also be generated by kmem_cache_reap() when the system is
763  * desperate for memory and by kmem_cache_move_notify(), called by the client to
764  * notify kmem that a move refused earlier with KMEM_CBRC_LATER is now possible.
765  * The map of pending callbacks is protected by the same lock that protects the
766  * slab layer.
767  *
768  * When the system is desperate for memory, kmem does not bother to determine
769  * whether or not the cache exceeds the fragmentation threshold, but tries to
770  * consolidate as many slabs as possible. Normally, the consolidator chews
771  * slowly, one sparsely allocated slab at a time during each maintenance
772  * interval that the cache is fragmented. When desperate, the consolidator
773  * starts at the last partial slab and enqueues callbacks for every allocated
774  * object on every partial slab, working backwards until it reaches the first
775  * partial slab. The first partial slab, meanwhile, advances in pace with the
776  * consolidator as allocations to supply move destinations for the enqueued
777  * callbacks use up the highly allocated slabs at the front of the free list.
778  * Ideally, the overgrown free list collapses like an accordion, starting at
779  * both ends and ending at the center with a single partial slab.
780  *
781  * 3.3 Client Responses
782  *
783  * When the client returns KMEM_CBRC_NO in response to the move callback, kmem
784  * marks the slab that supplied the stuck object non-reclaimable and moves it to
785  * front of the free list. The slab remains marked as long as it remains on the
786  * free list, and it appears more allocated to the partial slab compare function
787  * than any unmarked slab, no matter how many of its objects are allocated.
788  * Since even one immovable object ties up the entire slab, the goal is to
789  * completely allocate any slab that cannot be completely freed. kmem does not
790  * bother generating callbacks to move objects from a marked slab unless the
791  * system is desperate.
792  *
793  * When the client responds KMEM_CBRC_LATER, kmem increments a count for the
794  * slab. If the client responds LATER too many times, kmem disbelieves and
795  * treats the response as a NO. The count is cleared when the slab is taken off
796  * the partial slab list or when the client moves one of the slab's objects.
797  *
798  * 4. Observability
799  *
800  * A kmem cache's external fragmentation is best observed with 'mdb -k' using
801  * the ::kmem_slabs dcmd. For a complete description of the command, enter
802  * '::help kmem_slabs' at the mdb prompt.
803  */
804 
805 #include <sys/kmem_impl.h>
806 #include <sys/vmem_impl.h>
807 #include <sys/param.h>
808 #include <sys/sysmacros.h>
809 #include <sys/vm.h>
810 #include <sys/proc.h>
811 #include <sys/tuneable.h>
812 #include <sys/systm.h>
813 #include <sys/cmn_err.h>
814 #include <sys/debug.h>
815 #include <sys/sdt.h>
816 #include <sys/mutex.h>
817 #include <sys/bitmap.h>
818 #include <sys/atomic.h>
819 #include <sys/kobj.h>
820 #include <sys/disp.h>
821 #include <vm/seg_kmem.h>
822 #include <sys/log.h>
823 #include <sys/callb.h>
824 #include <sys/taskq.h>
825 #include <sys/modctl.h>
826 #include <sys/reboot.h>
827 #include <sys/id32.h>
828 #include <sys/zone.h>
829 #include <sys/netstack.h>
830 #ifdef	DEBUG
831 #include <sys/random.h>
832 #endif
833 
834 extern void streams_msg_init(void);
835 extern int segkp_fromheap;
836 extern void segkp_cache_free(void);
837 extern int callout_init_done;
838 
839 struct kmem_cache_kstat {
840 	kstat_named_t	kmc_buf_size;
841 	kstat_named_t	kmc_align;
842 	kstat_named_t	kmc_chunk_size;
843 	kstat_named_t	kmc_slab_size;
844 	kstat_named_t	kmc_alloc;
845 	kstat_named_t	kmc_alloc_fail;
846 	kstat_named_t	kmc_free;
847 	kstat_named_t	kmc_depot_alloc;
848 	kstat_named_t	kmc_depot_free;
849 	kstat_named_t	kmc_depot_contention;
850 	kstat_named_t	kmc_slab_alloc;
851 	kstat_named_t	kmc_slab_free;
852 	kstat_named_t	kmc_buf_constructed;
853 	kstat_named_t	kmc_buf_avail;
854 	kstat_named_t	kmc_buf_inuse;
855 	kstat_named_t	kmc_buf_total;
856 	kstat_named_t	kmc_buf_max;
857 	kstat_named_t	kmc_slab_create;
858 	kstat_named_t	kmc_slab_destroy;
859 	kstat_named_t	kmc_vmem_source;
860 	kstat_named_t	kmc_hash_size;
861 	kstat_named_t	kmc_hash_lookup_depth;
862 	kstat_named_t	kmc_hash_rescale;
863 	kstat_named_t	kmc_full_magazines;
864 	kstat_named_t	kmc_empty_magazines;
865 	kstat_named_t	kmc_magazine_size;
866 	kstat_named_t	kmc_reap; /* number of kmem_cache_reap() calls */
867 	kstat_named_t	kmc_defrag; /* attempts to defrag all partial slabs */
868 	kstat_named_t	kmc_scan; /* attempts to defrag one partial slab */
869 	kstat_named_t	kmc_move_callbacks; /* sum of yes, no, later, dn, dk */
870 	kstat_named_t	kmc_move_yes;
871 	kstat_named_t	kmc_move_no;
872 	kstat_named_t	kmc_move_later;
873 	kstat_named_t	kmc_move_dont_need;
874 	kstat_named_t	kmc_move_dont_know; /* obj unrecognized by client ... */
875 	kstat_named_t	kmc_move_hunt_found; /* ... but found in mag layer */
876 	kstat_named_t	kmc_move_slabs_freed; /* slabs freed by consolidator */
877 	kstat_named_t	kmc_move_reclaimable; /* buffers, if consolidator ran */
878 } kmem_cache_kstat = {
879 	{ "buf_size",		KSTAT_DATA_UINT64 },
880 	{ "align",		KSTAT_DATA_UINT64 },
881 	{ "chunk_size",		KSTAT_DATA_UINT64 },
882 	{ "slab_size",		KSTAT_DATA_UINT64 },
883 	{ "alloc",		KSTAT_DATA_UINT64 },
884 	{ "alloc_fail",		KSTAT_DATA_UINT64 },
885 	{ "free",		KSTAT_DATA_UINT64 },
886 	{ "depot_alloc",	KSTAT_DATA_UINT64 },
887 	{ "depot_free",		KSTAT_DATA_UINT64 },
888 	{ "depot_contention",	KSTAT_DATA_UINT64 },
889 	{ "slab_alloc",		KSTAT_DATA_UINT64 },
890 	{ "slab_free",		KSTAT_DATA_UINT64 },
891 	{ "buf_constructed",	KSTAT_DATA_UINT64 },
892 	{ "buf_avail",		KSTAT_DATA_UINT64 },
893 	{ "buf_inuse",		KSTAT_DATA_UINT64 },
894 	{ "buf_total",		KSTAT_DATA_UINT64 },
895 	{ "buf_max",		KSTAT_DATA_UINT64 },
896 	{ "slab_create",	KSTAT_DATA_UINT64 },
897 	{ "slab_destroy",	KSTAT_DATA_UINT64 },
898 	{ "vmem_source",	KSTAT_DATA_UINT64 },
899 	{ "hash_size",		KSTAT_DATA_UINT64 },
900 	{ "hash_lookup_depth",	KSTAT_DATA_UINT64 },
901 	{ "hash_rescale",	KSTAT_DATA_UINT64 },
902 	{ "full_magazines",	KSTAT_DATA_UINT64 },
903 	{ "empty_magazines",	KSTAT_DATA_UINT64 },
904 	{ "magazine_size",	KSTAT_DATA_UINT64 },
905 	{ "reap",		KSTAT_DATA_UINT64 },
906 	{ "defrag",		KSTAT_DATA_UINT64 },
907 	{ "scan",		KSTAT_DATA_UINT64 },
908 	{ "move_callbacks",	KSTAT_DATA_UINT64 },
909 	{ "move_yes",		KSTAT_DATA_UINT64 },
910 	{ "move_no",		KSTAT_DATA_UINT64 },
911 	{ "move_later",		KSTAT_DATA_UINT64 },
912 	{ "move_dont_need",	KSTAT_DATA_UINT64 },
913 	{ "move_dont_know",	KSTAT_DATA_UINT64 },
914 	{ "move_hunt_found",	KSTAT_DATA_UINT64 },
915 	{ "move_slabs_freed",	KSTAT_DATA_UINT64 },
916 	{ "move_reclaimable",	KSTAT_DATA_UINT64 },
917 };
918 
919 static kmutex_t kmem_cache_kstat_lock;
920 
921 /*
922  * The default set of caches to back kmem_alloc().
923  * These sizes should be reevaluated periodically.
924  *
925  * We want allocations that are multiples of the coherency granularity
926  * (64 bytes) to be satisfied from a cache which is a multiple of 64
927  * bytes, so that it will be 64-byte aligned.  For all multiples of 64,
928  * the next kmem_cache_size greater than or equal to it must be a
929  * multiple of 64.
930  *
931  * We split the table into two sections:  size <= 4k and size > 4k.  This
932  * saves a lot of space and cache footprint in our cache tables.
933  */
934 static const int kmem_alloc_sizes[] = {
935 	1 * 8,
936 	2 * 8,
937 	3 * 8,
938 	4 * 8,		5 * 8,		6 * 8,		7 * 8,
939 	4 * 16,		5 * 16,		6 * 16,		7 * 16,
940 	4 * 32,		5 * 32,		6 * 32,		7 * 32,
941 	4 * 64,		5 * 64,		6 * 64,		7 * 64,
942 	4 * 128,	5 * 128,	6 * 128,	7 * 128,
943 	P2ALIGN(8192 / 7, 64),
944 	P2ALIGN(8192 / 6, 64),
945 	P2ALIGN(8192 / 5, 64),
946 	P2ALIGN(8192 / 4, 64),
947 	P2ALIGN(8192 / 3, 64),
948 	P2ALIGN(8192 / 2, 64),
949 };
950 
951 static const int kmem_big_alloc_sizes[] = {
952 	2 * 4096,	3 * 4096,
953 	2 * 8192,	3 * 8192,
954 	4 * 8192,	5 * 8192,	6 * 8192,	7 * 8192,
955 	8 * 8192,	9 * 8192,	10 * 8192,	11 * 8192,
956 	12 * 8192,	13 * 8192,	14 * 8192,	15 * 8192,
957 	16 * 8192
958 };
959 
960 #define	KMEM_MAXBUF		4096
961 #define	KMEM_BIG_MAXBUF_32BIT	32768
962 #define	KMEM_BIG_MAXBUF		131072
963 
964 #define	KMEM_BIG_MULTIPLE	4096	/* big_alloc_sizes must be a multiple */
965 #define	KMEM_BIG_SHIFT		12	/* lg(KMEM_BIG_MULTIPLE) */
966 
967 static kmem_cache_t *kmem_alloc_table[KMEM_MAXBUF >> KMEM_ALIGN_SHIFT];
968 static kmem_cache_t *kmem_big_alloc_table[KMEM_BIG_MAXBUF >> KMEM_BIG_SHIFT];
969 
970 #define	KMEM_ALLOC_TABLE_MAX	(KMEM_MAXBUF >> KMEM_ALIGN_SHIFT)
971 static size_t kmem_big_alloc_table_max = 0;	/* # of filled elements */
972 
973 static kmem_magtype_t kmem_magtype[] = {
974 	{ 1,	8,	3200,	65536	},
975 	{ 3,	16,	256,	32768	},
976 	{ 7,	32,	64,	16384	},
977 	{ 15,	64,	0,	8192	},
978 	{ 31,	64,	0,	4096	},
979 	{ 47,	64,	0,	2048	},
980 	{ 63,	64,	0,	1024	},
981 	{ 95,	64,	0,	512	},
982 	{ 143,	64,	0,	0	},
983 };
984 
985 static uint32_t kmem_reaping;
986 static uint32_t kmem_reaping_idspace;
987 
988 /*
989  * kmem tunables
990  */
991 clock_t kmem_reap_interval;	/* cache reaping rate [15 * HZ ticks] */
992 int kmem_depot_contention = 3;	/* max failed tryenters per real interval */
993 pgcnt_t kmem_reapahead = 0;	/* start reaping N pages before pageout */
994 int kmem_panic = 1;		/* whether to panic on error */
995 int kmem_logging = 1;		/* kmem_log_enter() override */
996 uint32_t kmem_mtbf = 0;		/* mean time between failures [default: off] */
997 size_t kmem_transaction_log_size; /* transaction log size [2% of memory] */
998 size_t kmem_content_log_size;	/* content log size [2% of memory] */
999 size_t kmem_failure_log_size;	/* failure log [4 pages per CPU] */
1000 size_t kmem_slab_log_size;	/* slab create log [4 pages per CPU] */
1001 size_t kmem_content_maxsave = 256; /* KMF_CONTENTS max bytes to log */
1002 size_t kmem_lite_minsize = 0;	/* minimum buffer size for KMF_LITE */
1003 size_t kmem_lite_maxalign = 1024; /* maximum buffer alignment for KMF_LITE */
1004 int kmem_lite_pcs = 4;		/* number of PCs to store in KMF_LITE mode */
1005 size_t kmem_maxverify;		/* maximum bytes to inspect in debug routines */
1006 size_t kmem_minfirewall;	/* hardware-enforced redzone threshold */
1007 
1008 #ifdef _LP64
1009 size_t	kmem_max_cached = KMEM_BIG_MAXBUF;	/* maximum kmem_alloc cache */
1010 #else
1011 size_t	kmem_max_cached = KMEM_BIG_MAXBUF_32BIT; /* maximum kmem_alloc cache */
1012 #endif
1013 
1014 #ifdef DEBUG
1015 int kmem_flags = KMF_AUDIT | KMF_DEADBEEF | KMF_REDZONE | KMF_CONTENTS;
1016 #else
1017 int kmem_flags = 0;
1018 #endif
1019 int kmem_ready;
1020 
1021 static kmem_cache_t	*kmem_slab_cache;
1022 static kmem_cache_t	*kmem_bufctl_cache;
1023 static kmem_cache_t	*kmem_bufctl_audit_cache;
1024 
1025 static kmutex_t		kmem_cache_lock;	/* inter-cache linkage only */
1026 static list_t		kmem_caches;
1027 
1028 static taskq_t		*kmem_taskq;
1029 static kmutex_t		kmem_flags_lock;
1030 static vmem_t		*kmem_metadata_arena;
1031 static vmem_t		*kmem_msb_arena;	/* arena for metadata caches */
1032 static vmem_t		*kmem_cache_arena;
1033 static vmem_t		*kmem_hash_arena;
1034 static vmem_t		*kmem_log_arena;
1035 static vmem_t		*kmem_oversize_arena;
1036 static vmem_t		*kmem_va_arena;
1037 static vmem_t		*kmem_default_arena;
1038 static vmem_t		*kmem_firewall_va_arena;
1039 static vmem_t		*kmem_firewall_arena;
1040 
1041 /*
1042  * Define KMEM_STATS to turn on statistic gathering. By default, it is only
1043  * turned on when DEBUG is also defined.
1044  */
1045 #ifdef	DEBUG
1046 #define	KMEM_STATS
1047 #endif	/* DEBUG */
1048 
1049 #ifdef	KMEM_STATS
1050 #define	KMEM_STAT_ADD(stat)			((stat)++)
1051 #define	KMEM_STAT_COND_ADD(cond, stat)		((void) (!(cond) || (stat)++))
1052 #else
1053 #define	KMEM_STAT_ADD(stat)			/* nothing */
1054 #define	KMEM_STAT_COND_ADD(cond, stat)		/* nothing */
1055 #endif	/* KMEM_STATS */
1056 
1057 /*
1058  * kmem slab consolidator thresholds (tunables)
1059  */
1060 size_t kmem_frag_minslabs = 101;	/* minimum total slabs */
1061 size_t kmem_frag_numer = 1;		/* free buffers (numerator) */
1062 size_t kmem_frag_denom = KMEM_VOID_FRACTION; /* buffers (denominator) */
1063 /*
1064  * Maximum number of slabs from which to move buffers during a single
1065  * maintenance interval while the system is not low on memory.
1066  */
1067 size_t kmem_reclaim_max_slabs = 1;
1068 /*
1069  * Number of slabs to scan backwards from the end of the partial slab list
1070  * when searching for buffers to relocate.
1071  */
1072 size_t kmem_reclaim_scan_range = 12;
1073 
1074 #ifdef	KMEM_STATS
1075 static struct {
1076 	uint64_t kms_callbacks;
1077 	uint64_t kms_yes;
1078 	uint64_t kms_no;
1079 	uint64_t kms_later;
1080 	uint64_t kms_dont_need;
1081 	uint64_t kms_dont_know;
1082 	uint64_t kms_hunt_found_mag;
1083 	uint64_t kms_hunt_found_slab;
1084 	uint64_t kms_hunt_alloc_fail;
1085 	uint64_t kms_hunt_lucky;
1086 	uint64_t kms_notify;
1087 	uint64_t kms_notify_callbacks;
1088 	uint64_t kms_disbelief;
1089 	uint64_t kms_already_pending;
1090 	uint64_t kms_callback_alloc_fail;
1091 	uint64_t kms_callback_taskq_fail;
1092 	uint64_t kms_endscan_slab_dead;
1093 	uint64_t kms_endscan_slab_destroyed;
1094 	uint64_t kms_endscan_nomem;
1095 	uint64_t kms_endscan_refcnt_changed;
1096 	uint64_t kms_endscan_nomove_changed;
1097 	uint64_t kms_endscan_freelist;
1098 	uint64_t kms_avl_update;
1099 	uint64_t kms_avl_noupdate;
1100 	uint64_t kms_no_longer_reclaimable;
1101 	uint64_t kms_notify_no_longer_reclaimable;
1102 	uint64_t kms_notify_slab_dead;
1103 	uint64_t kms_notify_slab_destroyed;
1104 	uint64_t kms_alloc_fail;
1105 	uint64_t kms_constructor_fail;
1106 	uint64_t kms_dead_slabs_freed;
1107 	uint64_t kms_defrags;
1108 	uint64_t kms_scans;
1109 	uint64_t kms_scan_depot_ws_reaps;
1110 	uint64_t kms_debug_reaps;
1111 	uint64_t kms_debug_scans;
1112 } kmem_move_stats;
1113 #endif	/* KMEM_STATS */
1114 
1115 /* consolidator knobs */
1116 static boolean_t kmem_move_noreap;
1117 static boolean_t kmem_move_blocked;
1118 static boolean_t kmem_move_fulltilt;
1119 static boolean_t kmem_move_any_partial;
1120 
1121 #ifdef	DEBUG
1122 /*
1123  * kmem consolidator debug tunables:
1124  * Ensure code coverage by occasionally running the consolidator even when the
1125  * caches are not fragmented (they may never be). These intervals are mean time
1126  * in cache maintenance intervals (kmem_cache_update).
1127  */
1128 uint32_t kmem_mtb_move = 60;	/* defrag 1 slab (~15min) */
1129 uint32_t kmem_mtb_reap = 1800;	/* defrag all slabs (~7.5hrs) */
1130 #endif	/* DEBUG */
1131 
1132 static kmem_cache_t	*kmem_defrag_cache;
1133 static kmem_cache_t	*kmem_move_cache;
1134 static taskq_t		*kmem_move_taskq;
1135 
1136 static void kmem_cache_scan(kmem_cache_t *);
1137 static void kmem_cache_defrag(kmem_cache_t *);
1138 static void kmem_slab_prefill(kmem_cache_t *, kmem_slab_t *);
1139 
1140 
1141 kmem_log_header_t	*kmem_transaction_log;
1142 kmem_log_header_t	*kmem_content_log;
1143 kmem_log_header_t	*kmem_failure_log;
1144 kmem_log_header_t	*kmem_slab_log;
1145 
1146 static int		kmem_lite_count; /* # of PCs in kmem_buftag_lite_t */
1147 
1148 #define	KMEM_BUFTAG_LITE_ENTER(bt, count, caller)			\
1149 	if ((count) > 0) {						\
1150 		pc_t *_s = ((kmem_buftag_lite_t *)(bt))->bt_history;	\
1151 		pc_t *_e;						\
1152 		/* memmove() the old entries down one notch */		\
1153 		for (_e = &_s[(count) - 1]; _e > _s; _e--)		\
1154 			*_e = *(_e - 1);				\
1155 		*_s = (uintptr_t)(caller);				\
1156 	}
1157 
1158 #define	KMERR_MODIFIED	0	/* buffer modified while on freelist */
1159 #define	KMERR_REDZONE	1	/* redzone violation (write past end of buf) */
1160 #define	KMERR_DUPFREE	2	/* freed a buffer twice */
1161 #define	KMERR_BADADDR	3	/* freed a bad (unallocated) address */
1162 #define	KMERR_BADBUFTAG	4	/* buftag corrupted */
1163 #define	KMERR_BADBUFCTL	5	/* bufctl corrupted */
1164 #define	KMERR_BADCACHE	6	/* freed a buffer to the wrong cache */
1165 #define	KMERR_BADSIZE	7	/* alloc size != free size */
1166 #define	KMERR_BADBASE	8	/* buffer base address wrong */
1167 
1168 struct {
1169 	hrtime_t	kmp_timestamp;	/* timestamp of panic */
1170 	int		kmp_error;	/* type of kmem error */
1171 	void		*kmp_buffer;	/* buffer that induced panic */
1172 	void		*kmp_realbuf;	/* real start address for buffer */
1173 	kmem_cache_t	*kmp_cache;	/* buffer's cache according to client */
1174 	kmem_cache_t	*kmp_realcache;	/* actual cache containing buffer */
1175 	kmem_slab_t	*kmp_slab;	/* slab accoring to kmem_findslab() */
1176 	kmem_bufctl_t	*kmp_bufctl;	/* bufctl */
1177 } kmem_panic_info;
1178 
1179 
1180 static void
1181 copy_pattern(uint64_t pattern, void *buf_arg, size_t size)
1182 {
1183 	uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
1184 	uint64_t *buf = buf_arg;
1185 
1186 	while (buf < bufend)
1187 		*buf++ = pattern;
1188 }
1189 
1190 static void *
1191 verify_pattern(uint64_t pattern, void *buf_arg, size_t size)
1192 {
1193 	uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
1194 	uint64_t *buf;
1195 
1196 	for (buf = buf_arg; buf < bufend; buf++)
1197 		if (*buf != pattern)
1198 			return (buf);
1199 	return (NULL);
1200 }
1201 
1202 static void *
1203 verify_and_copy_pattern(uint64_t old, uint64_t new, void *buf_arg, size_t size)
1204 {
1205 	uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
1206 	uint64_t *buf;
1207 
1208 	for (buf = buf_arg; buf < bufend; buf++) {
1209 		if (*buf != old) {
1210 			copy_pattern(old, buf_arg,
1211 			    (char *)buf - (char *)buf_arg);
1212 			return (buf);
1213 		}
1214 		*buf = new;
1215 	}
1216 
1217 	return (NULL);
1218 }
1219 
1220 static void
1221 kmem_cache_applyall(void (*func)(kmem_cache_t *), taskq_t *tq, int tqflag)
1222 {
1223 	kmem_cache_t *cp;
1224 
1225 	mutex_enter(&kmem_cache_lock);
1226 	for (cp = list_head(&kmem_caches); cp != NULL;
1227 	    cp = list_next(&kmem_caches, cp))
1228 		if (tq != NULL)
1229 			(void) taskq_dispatch(tq, (task_func_t *)func, cp,
1230 			    tqflag);
1231 		else
1232 			func(cp);
1233 	mutex_exit(&kmem_cache_lock);
1234 }
1235 
1236 static void
1237 kmem_cache_applyall_id(void (*func)(kmem_cache_t *), taskq_t *tq, int tqflag)
1238 {
1239 	kmem_cache_t *cp;
1240 
1241 	mutex_enter(&kmem_cache_lock);
1242 	for (cp = list_head(&kmem_caches); cp != NULL;
1243 	    cp = list_next(&kmem_caches, cp)) {
1244 		if (!(cp->cache_cflags & KMC_IDENTIFIER))
1245 			continue;
1246 		if (tq != NULL)
1247 			(void) taskq_dispatch(tq, (task_func_t *)func, cp,
1248 			    tqflag);
1249 		else
1250 			func(cp);
1251 	}
1252 	mutex_exit(&kmem_cache_lock);
1253 }
1254 
1255 /*
1256  * Debugging support.  Given a buffer address, find its slab.
1257  */
1258 static kmem_slab_t *
1259 kmem_findslab(kmem_cache_t *cp, void *buf)
1260 {
1261 	kmem_slab_t *sp;
1262 
1263 	mutex_enter(&cp->cache_lock);
1264 	for (sp = list_head(&cp->cache_complete_slabs); sp != NULL;
1265 	    sp = list_next(&cp->cache_complete_slabs, sp)) {
1266 		if (KMEM_SLAB_MEMBER(sp, buf)) {
1267 			mutex_exit(&cp->cache_lock);
1268 			return (sp);
1269 		}
1270 	}
1271 	for (sp = avl_first(&cp->cache_partial_slabs); sp != NULL;
1272 	    sp = AVL_NEXT(&cp->cache_partial_slabs, sp)) {
1273 		if (KMEM_SLAB_MEMBER(sp, buf)) {
1274 			mutex_exit(&cp->cache_lock);
1275 			return (sp);
1276 		}
1277 	}
1278 	mutex_exit(&cp->cache_lock);
1279 
1280 	return (NULL);
1281 }
1282 
1283 static void
1284 kmem_error(int error, kmem_cache_t *cparg, void *bufarg)
1285 {
1286 	kmem_buftag_t *btp = NULL;
1287 	kmem_bufctl_t *bcp = NULL;
1288 	kmem_cache_t *cp = cparg;
1289 	kmem_slab_t *sp;
1290 	uint64_t *off;
1291 	void *buf = bufarg;
1292 
1293 	kmem_logging = 0;	/* stop logging when a bad thing happens */
1294 
1295 	kmem_panic_info.kmp_timestamp = gethrtime();
1296 
1297 	sp = kmem_findslab(cp, buf);
1298 	if (sp == NULL) {
1299 		for (cp = list_tail(&kmem_caches); cp != NULL;
1300 		    cp = list_prev(&kmem_caches, cp)) {
1301 			if ((sp = kmem_findslab(cp, buf)) != NULL)
1302 				break;
1303 		}
1304 	}
1305 
1306 	if (sp == NULL) {
1307 		cp = NULL;
1308 		error = KMERR_BADADDR;
1309 	} else {
1310 		if (cp != cparg)
1311 			error = KMERR_BADCACHE;
1312 		else
1313 			buf = (char *)bufarg - ((uintptr_t)bufarg -
1314 			    (uintptr_t)sp->slab_base) % cp->cache_chunksize;
1315 		if (buf != bufarg)
1316 			error = KMERR_BADBASE;
1317 		if (cp->cache_flags & KMF_BUFTAG)
1318 			btp = KMEM_BUFTAG(cp, buf);
1319 		if (cp->cache_flags & KMF_HASH) {
1320 			mutex_enter(&cp->cache_lock);
1321 			for (bcp = *KMEM_HASH(cp, buf); bcp; bcp = bcp->bc_next)
1322 				if (bcp->bc_addr == buf)
1323 					break;
1324 			mutex_exit(&cp->cache_lock);
1325 			if (bcp == NULL && btp != NULL)
1326 				bcp = btp->bt_bufctl;
1327 			if (kmem_findslab(cp->cache_bufctl_cache, bcp) ==
1328 			    NULL || P2PHASE((uintptr_t)bcp, KMEM_ALIGN) ||
1329 			    bcp->bc_addr != buf) {
1330 				error = KMERR_BADBUFCTL;
1331 				bcp = NULL;
1332 			}
1333 		}
1334 	}
1335 
1336 	kmem_panic_info.kmp_error = error;
1337 	kmem_panic_info.kmp_buffer = bufarg;
1338 	kmem_panic_info.kmp_realbuf = buf;
1339 	kmem_panic_info.kmp_cache = cparg;
1340 	kmem_panic_info.kmp_realcache = cp;
1341 	kmem_panic_info.kmp_slab = sp;
1342 	kmem_panic_info.kmp_bufctl = bcp;
1343 
1344 	printf("kernel memory allocator: ");
1345 
1346 	switch (error) {
1347 
1348 	case KMERR_MODIFIED:
1349 		printf("buffer modified after being freed\n");
1350 		off = verify_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
1351 		if (off == NULL)	/* shouldn't happen */
1352 			off = buf;
1353 		printf("modification occurred at offset 0x%lx "
1354 		    "(0x%llx replaced by 0x%llx)\n",
1355 		    (uintptr_t)off - (uintptr_t)buf,
1356 		    (longlong_t)KMEM_FREE_PATTERN, (longlong_t)*off);
1357 		break;
1358 
1359 	case KMERR_REDZONE:
1360 		printf("redzone violation: write past end of buffer\n");
1361 		break;
1362 
1363 	case KMERR_BADADDR:
1364 		printf("invalid free: buffer not in cache\n");
1365 		break;
1366 
1367 	case KMERR_DUPFREE:
1368 		printf("duplicate free: buffer freed twice\n");
1369 		break;
1370 
1371 	case KMERR_BADBUFTAG:
1372 		printf("boundary tag corrupted\n");
1373 		printf("bcp ^ bxstat = %lx, should be %lx\n",
1374 		    (intptr_t)btp->bt_bufctl ^ btp->bt_bxstat,
1375 		    KMEM_BUFTAG_FREE);
1376 		break;
1377 
1378 	case KMERR_BADBUFCTL:
1379 		printf("bufctl corrupted\n");
1380 		break;
1381 
1382 	case KMERR_BADCACHE:
1383 		printf("buffer freed to wrong cache\n");
1384 		printf("buffer was allocated from %s,\n", cp->cache_name);
1385 		printf("caller attempting free to %s.\n", cparg->cache_name);
1386 		break;
1387 
1388 	case KMERR_BADSIZE:
1389 		printf("bad free: free size (%u) != alloc size (%u)\n",
1390 		    KMEM_SIZE_DECODE(((uint32_t *)btp)[0]),
1391 		    KMEM_SIZE_DECODE(((uint32_t *)btp)[1]));
1392 		break;
1393 
1394 	case KMERR_BADBASE:
1395 		printf("bad free: free address (%p) != alloc address (%p)\n",
1396 		    bufarg, buf);
1397 		break;
1398 	}
1399 
1400 	printf("buffer=%p  bufctl=%p  cache: %s\n",
1401 	    bufarg, (void *)bcp, cparg->cache_name);
1402 
1403 	if (bcp != NULL && (cp->cache_flags & KMF_AUDIT) &&
1404 	    error != KMERR_BADBUFCTL) {
1405 		int d;
1406 		timestruc_t ts;
1407 		kmem_bufctl_audit_t *bcap = (kmem_bufctl_audit_t *)bcp;
1408 
1409 		hrt2ts(kmem_panic_info.kmp_timestamp - bcap->bc_timestamp, &ts);
1410 		printf("previous transaction on buffer %p:\n", buf);
1411 		printf("thread=%p  time=T-%ld.%09ld  slab=%p  cache: %s\n",
1412 		    (void *)bcap->bc_thread, ts.tv_sec, ts.tv_nsec,
1413 		    (void *)sp, cp->cache_name);
1414 		for (d = 0; d < MIN(bcap->bc_depth, KMEM_STACK_DEPTH); d++) {
1415 			ulong_t off;
1416 			char *sym = kobj_getsymname(bcap->bc_stack[d], &off);
1417 			printf("%s+%lx\n", sym ? sym : "?", off);
1418 		}
1419 	}
1420 	if (kmem_panic > 0)
1421 		panic("kernel heap corruption detected");
1422 	if (kmem_panic == 0)
1423 		debug_enter(NULL);
1424 	kmem_logging = 1;	/* resume logging */
1425 }
1426 
1427 static kmem_log_header_t *
1428 kmem_log_init(size_t logsize)
1429 {
1430 	kmem_log_header_t *lhp;
1431 	int nchunks = 4 * max_ncpus;
1432 	size_t lhsize = (size_t)&((kmem_log_header_t *)0)->lh_cpu[max_ncpus];
1433 	int i;
1434 
1435 	/*
1436 	 * Make sure that lhp->lh_cpu[] is nicely aligned
1437 	 * to prevent false sharing of cache lines.
1438 	 */
1439 	lhsize = P2ROUNDUP(lhsize, KMEM_ALIGN);
1440 	lhp = vmem_xalloc(kmem_log_arena, lhsize, 64, P2NPHASE(lhsize, 64), 0,
1441 	    NULL, NULL, VM_SLEEP);
1442 	bzero(lhp, lhsize);
1443 
1444 	mutex_init(&lhp->lh_lock, NULL, MUTEX_DEFAULT, NULL);
1445 	lhp->lh_nchunks = nchunks;
1446 	lhp->lh_chunksize = P2ROUNDUP(logsize / nchunks + 1, PAGESIZE);
1447 	lhp->lh_base = vmem_alloc(kmem_log_arena,
1448 	    lhp->lh_chunksize * nchunks, VM_SLEEP);
1449 	lhp->lh_free = vmem_alloc(kmem_log_arena,
1450 	    nchunks * sizeof (int), VM_SLEEP);
1451 	bzero(lhp->lh_base, lhp->lh_chunksize * nchunks);
1452 
1453 	for (i = 0; i < max_ncpus; i++) {
1454 		kmem_cpu_log_header_t *clhp = &lhp->lh_cpu[i];
1455 		mutex_init(&clhp->clh_lock, NULL, MUTEX_DEFAULT, NULL);
1456 		clhp->clh_chunk = i;
1457 	}
1458 
1459 	for (i = max_ncpus; i < nchunks; i++)
1460 		lhp->lh_free[i] = i;
1461 
1462 	lhp->lh_head = max_ncpus;
1463 	lhp->lh_tail = 0;
1464 
1465 	return (lhp);
1466 }
1467 
1468 static void *
1469 kmem_log_enter(kmem_log_header_t *lhp, void *data, size_t size)
1470 {
1471 	void *logspace;
1472 	kmem_cpu_log_header_t *clhp = &lhp->lh_cpu[CPU->cpu_seqid];
1473 
1474 	if (lhp == NULL || kmem_logging == 0 || panicstr)
1475 		return (NULL);
1476 
1477 	mutex_enter(&clhp->clh_lock);
1478 	clhp->clh_hits++;
1479 	if (size > clhp->clh_avail) {
1480 		mutex_enter(&lhp->lh_lock);
1481 		lhp->lh_hits++;
1482 		lhp->lh_free[lhp->lh_tail] = clhp->clh_chunk;
1483 		lhp->lh_tail = (lhp->lh_tail + 1) % lhp->lh_nchunks;
1484 		clhp->clh_chunk = lhp->lh_free[lhp->lh_head];
1485 		lhp->lh_head = (lhp->lh_head + 1) % lhp->lh_nchunks;
1486 		clhp->clh_current = lhp->lh_base +
1487 		    clhp->clh_chunk * lhp->lh_chunksize;
1488 		clhp->clh_avail = lhp->lh_chunksize;
1489 		if (size > lhp->lh_chunksize)
1490 			size = lhp->lh_chunksize;
1491 		mutex_exit(&lhp->lh_lock);
1492 	}
1493 	logspace = clhp->clh_current;
1494 	clhp->clh_current += size;
1495 	clhp->clh_avail -= size;
1496 	bcopy(data, logspace, size);
1497 	mutex_exit(&clhp->clh_lock);
1498 	return (logspace);
1499 }
1500 
1501 #define	KMEM_AUDIT(lp, cp, bcp)						\
1502 {									\
1503 	kmem_bufctl_audit_t *_bcp = (kmem_bufctl_audit_t *)(bcp);	\
1504 	_bcp->bc_timestamp = gethrtime();				\
1505 	_bcp->bc_thread = curthread;					\
1506 	_bcp->bc_depth = getpcstack(_bcp->bc_stack, KMEM_STACK_DEPTH);	\
1507 	_bcp->bc_lastlog = kmem_log_enter((lp), _bcp, sizeof (*_bcp));	\
1508 }
1509 
1510 static void
1511 kmem_log_event(kmem_log_header_t *lp, kmem_cache_t *cp,
1512     kmem_slab_t *sp, void *addr)
1513 {
1514 	kmem_bufctl_audit_t bca;
1515 
1516 	bzero(&bca, sizeof (kmem_bufctl_audit_t));
1517 	bca.bc_addr = addr;
1518 	bca.bc_slab = sp;
1519 	bca.bc_cache = cp;
1520 	KMEM_AUDIT(lp, cp, &bca);
1521 }
1522 
1523 /*
1524  * Create a new slab for cache cp.
1525  */
1526 static kmem_slab_t *
1527 kmem_slab_create(kmem_cache_t *cp, int kmflag)
1528 {
1529 	size_t slabsize = cp->cache_slabsize;
1530 	size_t chunksize = cp->cache_chunksize;
1531 	int cache_flags = cp->cache_flags;
1532 	size_t color, chunks;
1533 	char *buf, *slab;
1534 	kmem_slab_t *sp;
1535 	kmem_bufctl_t *bcp;
1536 	vmem_t *vmp = cp->cache_arena;
1537 
1538 	ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
1539 
1540 	color = cp->cache_color + cp->cache_align;
1541 	if (color > cp->cache_maxcolor)
1542 		color = cp->cache_mincolor;
1543 	cp->cache_color = color;
1544 
1545 	slab = vmem_alloc(vmp, slabsize, kmflag & KM_VMFLAGS);
1546 
1547 	if (slab == NULL)
1548 		goto vmem_alloc_failure;
1549 
1550 	ASSERT(P2PHASE((uintptr_t)slab, vmp->vm_quantum) == 0);
1551 
1552 	/*
1553 	 * Reverify what was already checked in kmem_cache_set_move(), since the
1554 	 * consolidator depends (for correctness) on slabs being initialized
1555 	 * with the 0xbaddcafe memory pattern (setting a low order bit usable by
1556 	 * clients to distinguish uninitialized memory from known objects).
1557 	 */
1558 	ASSERT((cp->cache_move == NULL) || !(cp->cache_cflags & KMC_NOTOUCH));
1559 	if (!(cp->cache_cflags & KMC_NOTOUCH))
1560 		copy_pattern(KMEM_UNINITIALIZED_PATTERN, slab, slabsize);
1561 
1562 	if (cache_flags & KMF_HASH) {
1563 		if ((sp = kmem_cache_alloc(kmem_slab_cache, kmflag)) == NULL)
1564 			goto slab_alloc_failure;
1565 		chunks = (slabsize - color) / chunksize;
1566 	} else {
1567 		sp = KMEM_SLAB(cp, slab);
1568 		chunks = (slabsize - sizeof (kmem_slab_t) - color) / chunksize;
1569 	}
1570 
1571 	sp->slab_cache	= cp;
1572 	sp->slab_head	= NULL;
1573 	sp->slab_refcnt	= 0;
1574 	sp->slab_base	= buf = slab + color;
1575 	sp->slab_chunks	= chunks;
1576 	sp->slab_stuck_offset = (uint32_t)-1;
1577 	sp->slab_later_count = 0;
1578 	sp->slab_flags = 0;
1579 
1580 	ASSERT(chunks > 0);
1581 	while (chunks-- != 0) {
1582 		if (cache_flags & KMF_HASH) {
1583 			bcp = kmem_cache_alloc(cp->cache_bufctl_cache, kmflag);
1584 			if (bcp == NULL)
1585 				goto bufctl_alloc_failure;
1586 			if (cache_flags & KMF_AUDIT) {
1587 				kmem_bufctl_audit_t *bcap =
1588 				    (kmem_bufctl_audit_t *)bcp;
1589 				bzero(bcap, sizeof (kmem_bufctl_audit_t));
1590 				bcap->bc_cache = cp;
1591 			}
1592 			bcp->bc_addr = buf;
1593 			bcp->bc_slab = sp;
1594 		} else {
1595 			bcp = KMEM_BUFCTL(cp, buf);
1596 		}
1597 		if (cache_flags & KMF_BUFTAG) {
1598 			kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
1599 			btp->bt_redzone = KMEM_REDZONE_PATTERN;
1600 			btp->bt_bufctl = bcp;
1601 			btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
1602 			if (cache_flags & KMF_DEADBEEF) {
1603 				copy_pattern(KMEM_FREE_PATTERN, buf,
1604 				    cp->cache_verify);
1605 			}
1606 		}
1607 		bcp->bc_next = sp->slab_head;
1608 		sp->slab_head = bcp;
1609 		buf += chunksize;
1610 	}
1611 
1612 	kmem_log_event(kmem_slab_log, cp, sp, slab);
1613 
1614 	return (sp);
1615 
1616 bufctl_alloc_failure:
1617 
1618 	while ((bcp = sp->slab_head) != NULL) {
1619 		sp->slab_head = bcp->bc_next;
1620 		kmem_cache_free(cp->cache_bufctl_cache, bcp);
1621 	}
1622 	kmem_cache_free(kmem_slab_cache, sp);
1623 
1624 slab_alloc_failure:
1625 
1626 	vmem_free(vmp, slab, slabsize);
1627 
1628 vmem_alloc_failure:
1629 
1630 	kmem_log_event(kmem_failure_log, cp, NULL, NULL);
1631 	atomic_inc_64(&cp->cache_alloc_fail);
1632 
1633 	return (NULL);
1634 }
1635 
1636 /*
1637  * Destroy a slab.
1638  */
1639 static void
1640 kmem_slab_destroy(kmem_cache_t *cp, kmem_slab_t *sp)
1641 {
1642 	vmem_t *vmp = cp->cache_arena;
1643 	void *slab = (void *)P2ALIGN((uintptr_t)sp->slab_base, vmp->vm_quantum);
1644 
1645 	ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
1646 	ASSERT(sp->slab_refcnt == 0);
1647 
1648 	if (cp->cache_flags & KMF_HASH) {
1649 		kmem_bufctl_t *bcp;
1650 		while ((bcp = sp->slab_head) != NULL) {
1651 			sp->slab_head = bcp->bc_next;
1652 			kmem_cache_free(cp->cache_bufctl_cache, bcp);
1653 		}
1654 		kmem_cache_free(kmem_slab_cache, sp);
1655 	}
1656 	vmem_free(vmp, slab, cp->cache_slabsize);
1657 }
1658 
1659 static void *
1660 kmem_slab_alloc_impl(kmem_cache_t *cp, kmem_slab_t *sp, boolean_t prefill)
1661 {
1662 	kmem_bufctl_t *bcp, **hash_bucket;
1663 	void *buf;
1664 	boolean_t new_slab = (sp->slab_refcnt == 0);
1665 
1666 	ASSERT(MUTEX_HELD(&cp->cache_lock));
1667 	/*
1668 	 * kmem_slab_alloc() drops cache_lock when it creates a new slab, so we
1669 	 * can't ASSERT(avl_is_empty(&cp->cache_partial_slabs)) here when the
1670 	 * slab is newly created.
1671 	 */
1672 	ASSERT(new_slab || (KMEM_SLAB_IS_PARTIAL(sp) &&
1673 	    (sp == avl_first(&cp->cache_partial_slabs))));
1674 	ASSERT(sp->slab_cache == cp);
1675 
1676 	cp->cache_slab_alloc++;
1677 	cp->cache_bufslab--;
1678 	sp->slab_refcnt++;
1679 
1680 	bcp = sp->slab_head;
1681 	sp->slab_head = bcp->bc_next;
1682 
1683 	if (cp->cache_flags & KMF_HASH) {
1684 		/*
1685 		 * Add buffer to allocated-address hash table.
1686 		 */
1687 		buf = bcp->bc_addr;
1688 		hash_bucket = KMEM_HASH(cp, buf);
1689 		bcp->bc_next = *hash_bucket;
1690 		*hash_bucket = bcp;
1691 		if ((cp->cache_flags & (KMF_AUDIT | KMF_BUFTAG)) == KMF_AUDIT) {
1692 			KMEM_AUDIT(kmem_transaction_log, cp, bcp);
1693 		}
1694 	} else {
1695 		buf = KMEM_BUF(cp, bcp);
1696 	}
1697 
1698 	ASSERT(KMEM_SLAB_MEMBER(sp, buf));
1699 
1700 	if (sp->slab_head == NULL) {
1701 		ASSERT(KMEM_SLAB_IS_ALL_USED(sp));
1702 		if (new_slab) {
1703 			ASSERT(sp->slab_chunks == 1);
1704 		} else {
1705 			ASSERT(sp->slab_chunks > 1); /* the slab was partial */
1706 			avl_remove(&cp->cache_partial_slabs, sp);
1707 			sp->slab_later_count = 0; /* clear history */
1708 			sp->slab_flags &= ~KMEM_SLAB_NOMOVE;
1709 			sp->slab_stuck_offset = (uint32_t)-1;
1710 		}
1711 		list_insert_head(&cp->cache_complete_slabs, sp);
1712 		cp->cache_complete_slab_count++;
1713 		return (buf);
1714 	}
1715 
1716 	ASSERT(KMEM_SLAB_IS_PARTIAL(sp));
1717 	/*
1718 	 * Peek to see if the magazine layer is enabled before
1719 	 * we prefill.  We're not holding the cpu cache lock,
1720 	 * so the peek could be wrong, but there's no harm in it.
1721 	 */
1722 	if (new_slab && prefill && (cp->cache_flags & KMF_PREFILL) &&
1723 	    (KMEM_CPU_CACHE(cp)->cc_magsize != 0))  {
1724 		kmem_slab_prefill(cp, sp);
1725 		return (buf);
1726 	}
1727 
1728 	if (new_slab) {
1729 		avl_add(&cp->cache_partial_slabs, sp);
1730 		return (buf);
1731 	}
1732 
1733 	/*
1734 	 * The slab is now more allocated than it was, so the
1735 	 * order remains unchanged.
1736 	 */
1737 	ASSERT(!avl_update(&cp->cache_partial_slabs, sp));
1738 	return (buf);
1739 }
1740 
1741 /*
1742  * Allocate a raw (unconstructed) buffer from cp's slab layer.
1743  */
1744 static void *
1745 kmem_slab_alloc(kmem_cache_t *cp, int kmflag)
1746 {
1747 	kmem_slab_t *sp;
1748 	void *buf;
1749 	boolean_t test_destructor;
1750 
1751 	mutex_enter(&cp->cache_lock);
1752 	test_destructor = (cp->cache_slab_alloc == 0);
1753 	sp = avl_first(&cp->cache_partial_slabs);
1754 	if (sp == NULL) {
1755 		ASSERT(cp->cache_bufslab == 0);
1756 
1757 		/*
1758 		 * The freelist is empty.  Create a new slab.
1759 		 */
1760 		mutex_exit(&cp->cache_lock);
1761 		if ((sp = kmem_slab_create(cp, kmflag)) == NULL) {
1762 			return (NULL);
1763 		}
1764 		mutex_enter(&cp->cache_lock);
1765 		cp->cache_slab_create++;
1766 		if ((cp->cache_buftotal += sp->slab_chunks) > cp->cache_bufmax)
1767 			cp->cache_bufmax = cp->cache_buftotal;
1768 		cp->cache_bufslab += sp->slab_chunks;
1769 	}
1770 
1771 	buf = kmem_slab_alloc_impl(cp, sp, B_TRUE);
1772 	ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) ==
1773 	    (cp->cache_complete_slab_count +
1774 	    avl_numnodes(&cp->cache_partial_slabs) +
1775 	    (cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount)));
1776 	mutex_exit(&cp->cache_lock);
1777 
1778 	if (test_destructor && cp->cache_destructor != NULL) {
1779 		/*
1780 		 * On the first kmem_slab_alloc(), assert that it is valid to
1781 		 * call the destructor on a newly constructed object without any
1782 		 * client involvement.
1783 		 */
1784 		if ((cp->cache_constructor == NULL) ||
1785 		    cp->cache_constructor(buf, cp->cache_private,
1786 		    kmflag) == 0) {
1787 			cp->cache_destructor(buf, cp->cache_private);
1788 		}
1789 		copy_pattern(KMEM_UNINITIALIZED_PATTERN, buf,
1790 		    cp->cache_bufsize);
1791 		if (cp->cache_flags & KMF_DEADBEEF) {
1792 			copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
1793 		}
1794 	}
1795 
1796 	return (buf);
1797 }
1798 
1799 static void kmem_slab_move_yes(kmem_cache_t *, kmem_slab_t *, void *);
1800 
1801 /*
1802  * Free a raw (unconstructed) buffer to cp's slab layer.
1803  */
1804 static void
1805 kmem_slab_free(kmem_cache_t *cp, void *buf)
1806 {
1807 	kmem_slab_t *sp;
1808 	kmem_bufctl_t *bcp, **prev_bcpp;
1809 
1810 	ASSERT(buf != NULL);
1811 
1812 	mutex_enter(&cp->cache_lock);
1813 	cp->cache_slab_free++;
1814 
1815 	if (cp->cache_flags & KMF_HASH) {
1816 		/*
1817 		 * Look up buffer in allocated-address hash table.
1818 		 */
1819 		prev_bcpp = KMEM_HASH(cp, buf);
1820 		while ((bcp = *prev_bcpp) != NULL) {
1821 			if (bcp->bc_addr == buf) {
1822 				*prev_bcpp = bcp->bc_next;
1823 				sp = bcp->bc_slab;
1824 				break;
1825 			}
1826 			cp->cache_lookup_depth++;
1827 			prev_bcpp = &bcp->bc_next;
1828 		}
1829 	} else {
1830 		bcp = KMEM_BUFCTL(cp, buf);
1831 		sp = KMEM_SLAB(cp, buf);
1832 	}
1833 
1834 	if (bcp == NULL || sp->slab_cache != cp || !KMEM_SLAB_MEMBER(sp, buf)) {
1835 		mutex_exit(&cp->cache_lock);
1836 		kmem_error(KMERR_BADADDR, cp, buf);
1837 		return;
1838 	}
1839 
1840 	if (KMEM_SLAB_OFFSET(sp, buf) == sp->slab_stuck_offset) {
1841 		/*
1842 		 * If this is the buffer that prevented the consolidator from
1843 		 * clearing the slab, we can reset the slab flags now that the
1844 		 * buffer is freed. (It makes sense to do this in
1845 		 * kmem_cache_free(), where the client gives up ownership of the
1846 		 * buffer, but on the hot path the test is too expensive.)
1847 		 */
1848 		kmem_slab_move_yes(cp, sp, buf);
1849 	}
1850 
1851 	if ((cp->cache_flags & (KMF_AUDIT | KMF_BUFTAG)) == KMF_AUDIT) {
1852 		if (cp->cache_flags & KMF_CONTENTS)
1853 			((kmem_bufctl_audit_t *)bcp)->bc_contents =
1854 			    kmem_log_enter(kmem_content_log, buf,
1855 			    cp->cache_contents);
1856 		KMEM_AUDIT(kmem_transaction_log, cp, bcp);
1857 	}
1858 
1859 	bcp->bc_next = sp->slab_head;
1860 	sp->slab_head = bcp;
1861 
1862 	cp->cache_bufslab++;
1863 	ASSERT(sp->slab_refcnt >= 1);
1864 
1865 	if (--sp->slab_refcnt == 0) {
1866 		/*
1867 		 * There are no outstanding allocations from this slab,
1868 		 * so we can reclaim the memory.
1869 		 */
1870 		if (sp->slab_chunks == 1) {
1871 			list_remove(&cp->cache_complete_slabs, sp);
1872 			cp->cache_complete_slab_count--;
1873 		} else {
1874 			avl_remove(&cp->cache_partial_slabs, sp);
1875 		}
1876 
1877 		cp->cache_buftotal -= sp->slab_chunks;
1878 		cp->cache_bufslab -= sp->slab_chunks;
1879 		/*
1880 		 * Defer releasing the slab to the virtual memory subsystem
1881 		 * while there is a pending move callback, since we guarantee
1882 		 * that buffers passed to the move callback have only been
1883 		 * touched by kmem or by the client itself. Since the memory
1884 		 * patterns baddcafe (uninitialized) and deadbeef (freed) both
1885 		 * set at least one of the two lowest order bits, the client can
1886 		 * test those bits in the move callback to determine whether or
1887 		 * not it knows about the buffer (assuming that the client also
1888 		 * sets one of those low order bits whenever it frees a buffer).
1889 		 */
1890 		if (cp->cache_defrag == NULL ||
1891 		    (avl_is_empty(&cp->cache_defrag->kmd_moves_pending) &&
1892 		    !(sp->slab_flags & KMEM_SLAB_MOVE_PENDING))) {
1893 			cp->cache_slab_destroy++;
1894 			mutex_exit(&cp->cache_lock);
1895 			kmem_slab_destroy(cp, sp);
1896 		} else {
1897 			list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
1898 			/*
1899 			 * Slabs are inserted at both ends of the deadlist to
1900 			 * distinguish between slabs freed while move callbacks
1901 			 * are pending (list head) and a slab freed while the
1902 			 * lock is dropped in kmem_move_buffers() (list tail) so
1903 			 * that in both cases slab_destroy() is called from the
1904 			 * right context.
1905 			 */
1906 			if (sp->slab_flags & KMEM_SLAB_MOVE_PENDING) {
1907 				list_insert_tail(deadlist, sp);
1908 			} else {
1909 				list_insert_head(deadlist, sp);
1910 			}
1911 			cp->cache_defrag->kmd_deadcount++;
1912 			mutex_exit(&cp->cache_lock);
1913 		}
1914 		return;
1915 	}
1916 
1917 	if (bcp->bc_next == NULL) {
1918 		/* Transition the slab from completely allocated to partial. */
1919 		ASSERT(sp->slab_refcnt == (sp->slab_chunks - 1));
1920 		ASSERT(sp->slab_chunks > 1);
1921 		list_remove(&cp->cache_complete_slabs, sp);
1922 		cp->cache_complete_slab_count--;
1923 		avl_add(&cp->cache_partial_slabs, sp);
1924 	} else {
1925 #ifdef	DEBUG
1926 		if (avl_update_gt(&cp->cache_partial_slabs, sp)) {
1927 			KMEM_STAT_ADD(kmem_move_stats.kms_avl_update);
1928 		} else {
1929 			KMEM_STAT_ADD(kmem_move_stats.kms_avl_noupdate);
1930 		}
1931 #else
1932 		(void) avl_update_gt(&cp->cache_partial_slabs, sp);
1933 #endif
1934 	}
1935 
1936 	ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) ==
1937 	    (cp->cache_complete_slab_count +
1938 	    avl_numnodes(&cp->cache_partial_slabs) +
1939 	    (cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount)));
1940 	mutex_exit(&cp->cache_lock);
1941 }
1942 
1943 /*
1944  * Return -1 if kmem_error, 1 if constructor fails, 0 if successful.
1945  */
1946 static int
1947 kmem_cache_alloc_debug(kmem_cache_t *cp, void *buf, int kmflag, int construct,
1948     caddr_t caller)
1949 {
1950 	kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
1951 	kmem_bufctl_audit_t *bcp = (kmem_bufctl_audit_t *)btp->bt_bufctl;
1952 	uint32_t mtbf;
1953 
1954 	if (btp->bt_bxstat != ((intptr_t)bcp ^ KMEM_BUFTAG_FREE)) {
1955 		kmem_error(KMERR_BADBUFTAG, cp, buf);
1956 		return (-1);
1957 	}
1958 
1959 	btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_ALLOC;
1960 
1961 	if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) {
1962 		kmem_error(KMERR_BADBUFCTL, cp, buf);
1963 		return (-1);
1964 	}
1965 
1966 	if (cp->cache_flags & KMF_DEADBEEF) {
1967 		if (!construct && (cp->cache_flags & KMF_LITE)) {
1968 			if (*(uint64_t *)buf != KMEM_FREE_PATTERN) {
1969 				kmem_error(KMERR_MODIFIED, cp, buf);
1970 				return (-1);
1971 			}
1972 			if (cp->cache_constructor != NULL)
1973 				*(uint64_t *)buf = btp->bt_redzone;
1974 			else
1975 				*(uint64_t *)buf = KMEM_UNINITIALIZED_PATTERN;
1976 		} else {
1977 			construct = 1;
1978 			if (verify_and_copy_pattern(KMEM_FREE_PATTERN,
1979 			    KMEM_UNINITIALIZED_PATTERN, buf,
1980 			    cp->cache_verify)) {
1981 				kmem_error(KMERR_MODIFIED, cp, buf);
1982 				return (-1);
1983 			}
1984 		}
1985 	}
1986 	btp->bt_redzone = KMEM_REDZONE_PATTERN;
1987 
1988 	if ((mtbf = kmem_mtbf | cp->cache_mtbf) != 0 &&
1989 	    gethrtime() % mtbf == 0 &&
1990 	    (kmflag & (KM_NOSLEEP | KM_PANIC)) == KM_NOSLEEP) {
1991 		kmem_log_event(kmem_failure_log, cp, NULL, NULL);
1992 		if (!construct && cp->cache_destructor != NULL)
1993 			cp->cache_destructor(buf, cp->cache_private);
1994 	} else {
1995 		mtbf = 0;
1996 	}
1997 
1998 	if (mtbf || (construct && cp->cache_constructor != NULL &&
1999 	    cp->cache_constructor(buf, cp->cache_private, kmflag) != 0)) {
2000 		atomic_inc_64(&cp->cache_alloc_fail);
2001 		btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
2002 		if (cp->cache_flags & KMF_DEADBEEF)
2003 			copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
2004 		kmem_slab_free(cp, buf);
2005 		return (1);
2006 	}
2007 
2008 	if (cp->cache_flags & KMF_AUDIT) {
2009 		KMEM_AUDIT(kmem_transaction_log, cp, bcp);
2010 	}
2011 
2012 	if ((cp->cache_flags & KMF_LITE) &&
2013 	    !(cp->cache_cflags & KMC_KMEM_ALLOC)) {
2014 		KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller);
2015 	}
2016 
2017 	return (0);
2018 }
2019 
2020 static int
2021 kmem_cache_free_debug(kmem_cache_t *cp, void *buf, caddr_t caller)
2022 {
2023 	kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2024 	kmem_bufctl_audit_t *bcp = (kmem_bufctl_audit_t *)btp->bt_bufctl;
2025 	kmem_slab_t *sp;
2026 
2027 	if (btp->bt_bxstat != ((intptr_t)bcp ^ KMEM_BUFTAG_ALLOC)) {
2028 		if (btp->bt_bxstat == ((intptr_t)bcp ^ KMEM_BUFTAG_FREE)) {
2029 			kmem_error(KMERR_DUPFREE, cp, buf);
2030 			return (-1);
2031 		}
2032 		sp = kmem_findslab(cp, buf);
2033 		if (sp == NULL || sp->slab_cache != cp)
2034 			kmem_error(KMERR_BADADDR, cp, buf);
2035 		else
2036 			kmem_error(KMERR_REDZONE, cp, buf);
2037 		return (-1);
2038 	}
2039 
2040 	btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
2041 
2042 	if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) {
2043 		kmem_error(KMERR_BADBUFCTL, cp, buf);
2044 		return (-1);
2045 	}
2046 
2047 	if (btp->bt_redzone != KMEM_REDZONE_PATTERN) {
2048 		kmem_error(KMERR_REDZONE, cp, buf);
2049 		return (-1);
2050 	}
2051 
2052 	if (cp->cache_flags & KMF_AUDIT) {
2053 		if (cp->cache_flags & KMF_CONTENTS)
2054 			bcp->bc_contents = kmem_log_enter(kmem_content_log,
2055 			    buf, cp->cache_contents);
2056 		KMEM_AUDIT(kmem_transaction_log, cp, bcp);
2057 	}
2058 
2059 	if ((cp->cache_flags & KMF_LITE) &&
2060 	    !(cp->cache_cflags & KMC_KMEM_ALLOC)) {
2061 		KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller);
2062 	}
2063 
2064 	if (cp->cache_flags & KMF_DEADBEEF) {
2065 		if (cp->cache_flags & KMF_LITE)
2066 			btp->bt_redzone = *(uint64_t *)buf;
2067 		else if (cp->cache_destructor != NULL)
2068 			cp->cache_destructor(buf, cp->cache_private);
2069 
2070 		copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
2071 	}
2072 
2073 	return (0);
2074 }
2075 
2076 /*
2077  * Free each object in magazine mp to cp's slab layer, and free mp itself.
2078  */
2079 static void
2080 kmem_magazine_destroy(kmem_cache_t *cp, kmem_magazine_t *mp, int nrounds)
2081 {
2082 	int round;
2083 
2084 	ASSERT(!list_link_active(&cp->cache_link) ||
2085 	    taskq_member(kmem_taskq, curthread));
2086 
2087 	for (round = 0; round < nrounds; round++) {
2088 		void *buf = mp->mag_round[round];
2089 
2090 		if (cp->cache_flags & KMF_DEADBEEF) {
2091 			if (verify_pattern(KMEM_FREE_PATTERN, buf,
2092 			    cp->cache_verify) != NULL) {
2093 				kmem_error(KMERR_MODIFIED, cp, buf);
2094 				continue;
2095 			}
2096 			if ((cp->cache_flags & KMF_LITE) &&
2097 			    cp->cache_destructor != NULL) {
2098 				kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2099 				*(uint64_t *)buf = btp->bt_redzone;
2100 				cp->cache_destructor(buf, cp->cache_private);
2101 				*(uint64_t *)buf = KMEM_FREE_PATTERN;
2102 			}
2103 		} else if (cp->cache_destructor != NULL) {
2104 			cp->cache_destructor(buf, cp->cache_private);
2105 		}
2106 
2107 		kmem_slab_free(cp, buf);
2108 	}
2109 	ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
2110 	kmem_cache_free(cp->cache_magtype->mt_cache, mp);
2111 }
2112 
2113 /*
2114  * Allocate a magazine from the depot.
2115  */
2116 static kmem_magazine_t *
2117 kmem_depot_alloc(kmem_cache_t *cp, kmem_maglist_t *mlp)
2118 {
2119 	kmem_magazine_t *mp;
2120 
2121 	/*
2122 	 * If we can't get the depot lock without contention,
2123 	 * update our contention count.  We use the depot
2124 	 * contention rate to determine whether we need to
2125 	 * increase the magazine size for better scalability.
2126 	 */
2127 	if (!mutex_tryenter(&cp->cache_depot_lock)) {
2128 		mutex_enter(&cp->cache_depot_lock);
2129 		cp->cache_depot_contention++;
2130 	}
2131 
2132 	if ((mp = mlp->ml_list) != NULL) {
2133 		ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
2134 		mlp->ml_list = mp->mag_next;
2135 		if (--mlp->ml_total < mlp->ml_min)
2136 			mlp->ml_min = mlp->ml_total;
2137 		mlp->ml_alloc++;
2138 	}
2139 
2140 	mutex_exit(&cp->cache_depot_lock);
2141 
2142 	return (mp);
2143 }
2144 
2145 /*
2146  * Free a magazine to the depot.
2147  */
2148 static void
2149 kmem_depot_free(kmem_cache_t *cp, kmem_maglist_t *mlp, kmem_magazine_t *mp)
2150 {
2151 	mutex_enter(&cp->cache_depot_lock);
2152 	ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
2153 	mp->mag_next = mlp->ml_list;
2154 	mlp->ml_list = mp;
2155 	mlp->ml_total++;
2156 	mutex_exit(&cp->cache_depot_lock);
2157 }
2158 
2159 /*
2160  * Update the working set statistics for cp's depot.
2161  */
2162 static void
2163 kmem_depot_ws_update(kmem_cache_t *cp)
2164 {
2165 	mutex_enter(&cp->cache_depot_lock);
2166 	cp->cache_full.ml_reaplimit = cp->cache_full.ml_min;
2167 	cp->cache_full.ml_min = cp->cache_full.ml_total;
2168 	cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_min;
2169 	cp->cache_empty.ml_min = cp->cache_empty.ml_total;
2170 	mutex_exit(&cp->cache_depot_lock);
2171 }
2172 
2173 /*
2174  * Set the working set statistics for cp's depot to zero.  (Everything is
2175  * eligible for reaping.)
2176  */
2177 static void
2178 kmem_depot_ws_zero(kmem_cache_t *cp)
2179 {
2180 	mutex_enter(&cp->cache_depot_lock);
2181 	cp->cache_full.ml_reaplimit = cp->cache_full.ml_total;
2182 	cp->cache_full.ml_min = cp->cache_full.ml_total;
2183 	cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_total;
2184 	cp->cache_empty.ml_min = cp->cache_empty.ml_total;
2185 	mutex_exit(&cp->cache_depot_lock);
2186 }
2187 
2188 /*
2189  * The number of bytes to reap before we call kpreempt(). The default (1MB)
2190  * causes us to preempt reaping up to hundreds of times per second. Using a
2191  * larger value (1GB) causes this to have virtually no effect.
2192  */
2193 size_t kmem_reap_preempt_bytes = 1024 * 1024;
2194 
2195 /*
2196  * Reap all magazines that have fallen out of the depot's working set.
2197  */
2198 static void
2199 kmem_depot_ws_reap(kmem_cache_t *cp)
2200 {
2201 	size_t bytes = 0;
2202 	long reap;
2203 	kmem_magazine_t *mp;
2204 
2205 	ASSERT(!list_link_active(&cp->cache_link) ||
2206 	    taskq_member(kmem_taskq, curthread));
2207 
2208 	reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
2209 	while (reap-- &&
2210 	    (mp = kmem_depot_alloc(cp, &cp->cache_full)) != NULL) {
2211 		kmem_magazine_destroy(cp, mp, cp->cache_magtype->mt_magsize);
2212 		bytes += cp->cache_magtype->mt_magsize * cp->cache_bufsize;
2213 		if (bytes > kmem_reap_preempt_bytes) {
2214 			kpreempt(KPREEMPT_SYNC);
2215 			bytes = 0;
2216 		}
2217 	}
2218 
2219 	reap = MIN(cp->cache_empty.ml_reaplimit, cp->cache_empty.ml_min);
2220 	while (reap-- &&
2221 	    (mp = kmem_depot_alloc(cp, &cp->cache_empty)) != NULL) {
2222 		kmem_magazine_destroy(cp, mp, 0);
2223 		bytes += cp->cache_magtype->mt_magsize * cp->cache_bufsize;
2224 		if (bytes > kmem_reap_preempt_bytes) {
2225 			kpreempt(KPREEMPT_SYNC);
2226 			bytes = 0;
2227 		}
2228 	}
2229 }
2230 
2231 static void
2232 kmem_cpu_reload(kmem_cpu_cache_t *ccp, kmem_magazine_t *mp, int rounds)
2233 {
2234 	ASSERT((ccp->cc_loaded == NULL && ccp->cc_rounds == -1) ||
2235 	    (ccp->cc_loaded && ccp->cc_rounds + rounds == ccp->cc_magsize));
2236 	ASSERT(ccp->cc_magsize > 0);
2237 
2238 	ccp->cc_ploaded = ccp->cc_loaded;
2239 	ccp->cc_prounds = ccp->cc_rounds;
2240 	ccp->cc_loaded = mp;
2241 	ccp->cc_rounds = rounds;
2242 }
2243 
2244 /*
2245  * Intercept kmem alloc/free calls during crash dump in order to avoid
2246  * changing kmem state while memory is being saved to the dump device.
2247  * Otherwise, ::kmem_verify will report "corrupt buffers".  Note that
2248  * there are no locks because only one CPU calls kmem during a crash
2249  * dump. To enable this feature, first create the associated vmem
2250  * arena with VMC_DUMPSAFE.
2251  */
2252 static void *kmem_dump_start;	/* start of pre-reserved heap */
2253 static void *kmem_dump_end;	/* end of heap area */
2254 static void *kmem_dump_curr;	/* current free heap pointer */
2255 static size_t kmem_dump_size;	/* size of heap area */
2256 
2257 /* append to each buf created in the pre-reserved heap */
2258 typedef struct kmem_dumpctl {
2259 	void	*kdc_next;	/* cache dump free list linkage */
2260 } kmem_dumpctl_t;
2261 
2262 #define	KMEM_DUMPCTL(cp, buf)	\
2263 	((kmem_dumpctl_t *)P2ROUNDUP((uintptr_t)(buf) + (cp)->cache_bufsize, \
2264 	    sizeof (void *)))
2265 
2266 /* set non zero for full report */
2267 uint_t kmem_dump_verbose = 0;
2268 
2269 /* stats for overize heap */
2270 uint_t kmem_dump_oversize_allocs = 0;
2271 uint_t kmem_dump_oversize_max = 0;
2272 
2273 static void
2274 kmem_dumppr(char **pp, char *e, const char *format, ...)
2275 {
2276 	char *p = *pp;
2277 
2278 	if (p < e) {
2279 		int n;
2280 		va_list ap;
2281 
2282 		va_start(ap, format);
2283 		n = vsnprintf(p, e - p, format, ap);
2284 		va_end(ap);
2285 		*pp = p + n;
2286 	}
2287 }
2288 
2289 /*
2290  * Called when dumpadm(1M) configures dump parameters.
2291  */
2292 void
2293 kmem_dump_init(size_t size)
2294 {
2295 	/* Our caller ensures size is always set. */
2296 	ASSERT3U(size, >, 0);
2297 
2298 	if (kmem_dump_start != NULL)
2299 		kmem_free(kmem_dump_start, kmem_dump_size);
2300 
2301 	kmem_dump_start = kmem_alloc(size, KM_SLEEP);
2302 	kmem_dump_size = size;
2303 	kmem_dump_curr = kmem_dump_start;
2304 	kmem_dump_end = (void *)((char *)kmem_dump_start + size);
2305 	copy_pattern(KMEM_UNINITIALIZED_PATTERN, kmem_dump_start, size);
2306 }
2307 
2308 /*
2309  * Set flag for each kmem_cache_t if is safe to use alternate dump
2310  * memory. Called just before panic crash dump starts. Set the flag
2311  * for the calling CPU.
2312  */
2313 void
2314 kmem_dump_begin(void)
2315 {
2316 	kmem_cache_t *cp;
2317 
2318 	ASSERT(panicstr != NULL);
2319 
2320 	for (cp = list_head(&kmem_caches); cp != NULL;
2321 	    cp = list_next(&kmem_caches, cp)) {
2322 		kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2323 
2324 		if (cp->cache_arena->vm_cflags & VMC_DUMPSAFE) {
2325 			cp->cache_flags |= KMF_DUMPDIVERT;
2326 			ccp->cc_flags |= KMF_DUMPDIVERT;
2327 			ccp->cc_dump_rounds = ccp->cc_rounds;
2328 			ccp->cc_dump_prounds = ccp->cc_prounds;
2329 			ccp->cc_rounds = ccp->cc_prounds = -1;
2330 		} else {
2331 			cp->cache_flags |= KMF_DUMPUNSAFE;
2332 			ccp->cc_flags |= KMF_DUMPUNSAFE;
2333 		}
2334 	}
2335 }
2336 
2337 /*
2338  * finished dump intercept
2339  * print any warnings on the console
2340  * return verbose information to dumpsys() in the given buffer
2341  */
2342 size_t
2343 kmem_dump_finish(char *buf, size_t size)
2344 {
2345 	int percent = 0;
2346 	size_t used;
2347 	char *e = buf + size;
2348 	char *p = buf;
2349 
2350 	if (kmem_dump_curr == kmem_dump_end) {
2351 		cmn_err(CE_WARN, "exceeded kmem_dump space of %lu "
2352 		    "bytes: kmem state in dump may be inconsistent",
2353 		    kmem_dump_size);
2354 	}
2355 
2356 	if (kmem_dump_verbose == 0)
2357 		return (0);
2358 
2359 	used = (char *)kmem_dump_curr - (char *)kmem_dump_start;
2360 	percent = (used * 100) / kmem_dump_size;
2361 
2362 	kmem_dumppr(&p, e, "%% heap used,%d\n", percent);
2363 	kmem_dumppr(&p, e, "used bytes,%ld\n", used);
2364 	kmem_dumppr(&p, e, "heap size,%ld\n", kmem_dump_size);
2365 	kmem_dumppr(&p, e, "Oversize allocs,%d\n",
2366 	    kmem_dump_oversize_allocs);
2367 	kmem_dumppr(&p, e, "Oversize max size,%ld\n",
2368 	    kmem_dump_oversize_max);
2369 
2370 	/* return buffer size used */
2371 	if (p < e)
2372 		bzero(p, e - p);
2373 	return (p - buf);
2374 }
2375 
2376 /*
2377  * Allocate a constructed object from alternate dump memory.
2378  */
2379 void *
2380 kmem_cache_alloc_dump(kmem_cache_t *cp, int kmflag)
2381 {
2382 	void *buf;
2383 	void *curr;
2384 	char *bufend;
2385 
2386 	/* return a constructed object */
2387 	if ((buf = cp->cache_dump.kd_freelist) != NULL) {
2388 		cp->cache_dump.kd_freelist = KMEM_DUMPCTL(cp, buf)->kdc_next;
2389 		return (buf);
2390 	}
2391 
2392 	/* create a new constructed object */
2393 	curr = kmem_dump_curr;
2394 	buf = (void *)P2ROUNDUP((uintptr_t)curr, cp->cache_align);
2395 	bufend = (char *)KMEM_DUMPCTL(cp, buf) + sizeof (kmem_dumpctl_t);
2396 
2397 	/* hat layer objects cannot cross a page boundary */
2398 	if (cp->cache_align < PAGESIZE) {
2399 		char *page = (char *)P2ROUNDUP((uintptr_t)buf, PAGESIZE);
2400 		if (bufend > page) {
2401 			bufend += page - (char *)buf;
2402 			buf = (void *)page;
2403 		}
2404 	}
2405 
2406 	/* fall back to normal alloc if reserved area is used up */
2407 	if (bufend > (char *)kmem_dump_end) {
2408 		kmem_dump_curr = kmem_dump_end;
2409 		cp->cache_dump.kd_alloc_fails++;
2410 		return (NULL);
2411 	}
2412 
2413 	/*
2414 	 * Must advance curr pointer before calling a constructor that
2415 	 * may also allocate memory.
2416 	 */
2417 	kmem_dump_curr = bufend;
2418 
2419 	/* run constructor */
2420 	if (cp->cache_constructor != NULL &&
2421 	    cp->cache_constructor(buf, cp->cache_private, kmflag)
2422 	    != 0) {
2423 #ifdef DEBUG
2424 		printf("name='%s' cache=0x%p: kmem cache constructor failed\n",
2425 		    cp->cache_name, (void *)cp);
2426 #endif
2427 		/* reset curr pointer iff no allocs were done */
2428 		if (kmem_dump_curr == bufend)
2429 			kmem_dump_curr = curr;
2430 
2431 		cp->cache_dump.kd_alloc_fails++;
2432 		/* fall back to normal alloc if the constructor fails */
2433 		return (NULL);
2434 	}
2435 
2436 	return (buf);
2437 }
2438 
2439 /*
2440  * Free a constructed object in alternate dump memory.
2441  */
2442 int
2443 kmem_cache_free_dump(kmem_cache_t *cp, void *buf)
2444 {
2445 	/* save constructed buffers for next time */
2446 	if ((char *)buf >= (char *)kmem_dump_start &&
2447 	    (char *)buf < (char *)kmem_dump_end) {
2448 		KMEM_DUMPCTL(cp, buf)->kdc_next = cp->cache_dump.kd_freelist;
2449 		cp->cache_dump.kd_freelist = buf;
2450 		return (0);
2451 	}
2452 
2453 	/* just drop buffers that were allocated before dump started */
2454 	if (kmem_dump_curr < kmem_dump_end)
2455 		return (0);
2456 
2457 	/* fall back to normal free if reserved area is used up */
2458 	return (1);
2459 }
2460 
2461 /*
2462  * Allocate a constructed object from cache cp.
2463  */
2464 void *
2465 kmem_cache_alloc(kmem_cache_t *cp, int kmflag)
2466 {
2467 	kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2468 	kmem_magazine_t *fmp;
2469 	void *buf;
2470 
2471 	mutex_enter(&ccp->cc_lock);
2472 	for (;;) {
2473 		/*
2474 		 * If there's an object available in the current CPU's
2475 		 * loaded magazine, just take it and return.
2476 		 */
2477 		if (ccp->cc_rounds > 0) {
2478 			buf = ccp->cc_loaded->mag_round[--ccp->cc_rounds];
2479 			ccp->cc_alloc++;
2480 			mutex_exit(&ccp->cc_lock);
2481 			if (ccp->cc_flags & (KMF_BUFTAG | KMF_DUMPUNSAFE)) {
2482 				if (ccp->cc_flags & KMF_DUMPUNSAFE) {
2483 					ASSERT(!(ccp->cc_flags &
2484 					    KMF_DUMPDIVERT));
2485 					cp->cache_dump.kd_unsafe++;
2486 				}
2487 				if ((ccp->cc_flags & KMF_BUFTAG) &&
2488 				    kmem_cache_alloc_debug(cp, buf, kmflag, 0,
2489 				    caller()) != 0) {
2490 					if (kmflag & KM_NOSLEEP)
2491 						return (NULL);
2492 					mutex_enter(&ccp->cc_lock);
2493 					continue;
2494 				}
2495 			}
2496 			return (buf);
2497 		}
2498 
2499 		/*
2500 		 * The loaded magazine is empty.  If the previously loaded
2501 		 * magazine was full, exchange them and try again.
2502 		 */
2503 		if (ccp->cc_prounds > 0) {
2504 			kmem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds);
2505 			continue;
2506 		}
2507 
2508 		/*
2509 		 * Return an alternate buffer at dump time to preserve
2510 		 * the heap.
2511 		 */
2512 		if (ccp->cc_flags & (KMF_DUMPDIVERT | KMF_DUMPUNSAFE)) {
2513 			if (ccp->cc_flags & KMF_DUMPUNSAFE) {
2514 				ASSERT(!(ccp->cc_flags & KMF_DUMPDIVERT));
2515 				/* log it so that we can warn about it */
2516 				cp->cache_dump.kd_unsafe++;
2517 			} else {
2518 				if ((buf = kmem_cache_alloc_dump(cp, kmflag)) !=
2519 				    NULL) {
2520 					mutex_exit(&ccp->cc_lock);
2521 					return (buf);
2522 				}
2523 				break;		/* fall back to slab layer */
2524 			}
2525 		}
2526 
2527 		/*
2528 		 * If the magazine layer is disabled, break out now.
2529 		 */
2530 		if (ccp->cc_magsize == 0)
2531 			break;
2532 
2533 		/*
2534 		 * Try to get a full magazine from the depot.
2535 		 */
2536 		fmp = kmem_depot_alloc(cp, &cp->cache_full);
2537 		if (fmp != NULL) {
2538 			if (ccp->cc_ploaded != NULL)
2539 				kmem_depot_free(cp, &cp->cache_empty,
2540 				    ccp->cc_ploaded);
2541 			kmem_cpu_reload(ccp, fmp, ccp->cc_magsize);
2542 			continue;
2543 		}
2544 
2545 		/*
2546 		 * There are no full magazines in the depot,
2547 		 * so fall through to the slab layer.
2548 		 */
2549 		break;
2550 	}
2551 	mutex_exit(&ccp->cc_lock);
2552 
2553 	/*
2554 	 * We couldn't allocate a constructed object from the magazine layer,
2555 	 * so get a raw buffer from the slab layer and apply its constructor.
2556 	 */
2557 	buf = kmem_slab_alloc(cp, kmflag);
2558 
2559 	if (buf == NULL)
2560 		return (NULL);
2561 
2562 	if (cp->cache_flags & KMF_BUFTAG) {
2563 		/*
2564 		 * Make kmem_cache_alloc_debug() apply the constructor for us.
2565 		 */
2566 		int rc = kmem_cache_alloc_debug(cp, buf, kmflag, 1, caller());
2567 		if (rc != 0) {
2568 			if (kmflag & KM_NOSLEEP)
2569 				return (NULL);
2570 			/*
2571 			 * kmem_cache_alloc_debug() detected corruption
2572 			 * but didn't panic (kmem_panic <= 0). We should not be
2573 			 * here because the constructor failed (indicated by a
2574 			 * return code of 1). Try again.
2575 			 */
2576 			ASSERT(rc == -1);
2577 			return (kmem_cache_alloc(cp, kmflag));
2578 		}
2579 		return (buf);
2580 	}
2581 
2582 	if (cp->cache_constructor != NULL &&
2583 	    cp->cache_constructor(buf, cp->cache_private, kmflag) != 0) {
2584 		atomic_inc_64(&cp->cache_alloc_fail);
2585 		kmem_slab_free(cp, buf);
2586 		return (NULL);
2587 	}
2588 
2589 	return (buf);
2590 }
2591 
2592 /*
2593  * The freed argument tells whether or not kmem_cache_free_debug() has already
2594  * been called so that we can avoid the duplicate free error. For example, a
2595  * buffer on a magazine has already been freed by the client but is still
2596  * constructed.
2597  */
2598 static void
2599 kmem_slab_free_constructed(kmem_cache_t *cp, void *buf, boolean_t freed)
2600 {
2601 	if (!freed && (cp->cache_flags & KMF_BUFTAG))
2602 		if (kmem_cache_free_debug(cp, buf, caller()) == -1)
2603 			return;
2604 
2605 	/*
2606 	 * Note that if KMF_DEADBEEF is in effect and KMF_LITE is not,
2607 	 * kmem_cache_free_debug() will have already applied the destructor.
2608 	 */
2609 	if ((cp->cache_flags & (KMF_DEADBEEF | KMF_LITE)) != KMF_DEADBEEF &&
2610 	    cp->cache_destructor != NULL) {
2611 		if (cp->cache_flags & KMF_DEADBEEF) {	/* KMF_LITE implied */
2612 			kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2613 			*(uint64_t *)buf = btp->bt_redzone;
2614 			cp->cache_destructor(buf, cp->cache_private);
2615 			*(uint64_t *)buf = KMEM_FREE_PATTERN;
2616 		} else {
2617 			cp->cache_destructor(buf, cp->cache_private);
2618 		}
2619 	}
2620 
2621 	kmem_slab_free(cp, buf);
2622 }
2623 
2624 /*
2625  * Used when there's no room to free a buffer to the per-CPU cache.
2626  * Drops and re-acquires &ccp->cc_lock, and returns non-zero if the
2627  * caller should try freeing to the per-CPU cache again.
2628  * Note that we don't directly install the magazine in the cpu cache,
2629  * since its state may have changed wildly while the lock was dropped.
2630  */
2631 static int
2632 kmem_cpucache_magazine_alloc(kmem_cpu_cache_t *ccp, kmem_cache_t *cp)
2633 {
2634 	kmem_magazine_t *emp;
2635 	kmem_magtype_t *mtp;
2636 
2637 	ASSERT(MUTEX_HELD(&ccp->cc_lock));
2638 	ASSERT(((uint_t)ccp->cc_rounds == ccp->cc_magsize ||
2639 	    ((uint_t)ccp->cc_rounds == -1)) &&
2640 	    ((uint_t)ccp->cc_prounds == ccp->cc_magsize ||
2641 	    ((uint_t)ccp->cc_prounds == -1)));
2642 
2643 	emp = kmem_depot_alloc(cp, &cp->cache_empty);
2644 	if (emp != NULL) {
2645 		if (ccp->cc_ploaded != NULL)
2646 			kmem_depot_free(cp, &cp->cache_full,
2647 			    ccp->cc_ploaded);
2648 		kmem_cpu_reload(ccp, emp, 0);
2649 		return (1);
2650 	}
2651 	/*
2652 	 * There are no empty magazines in the depot,
2653 	 * so try to allocate a new one.  We must drop all locks
2654 	 * across kmem_cache_alloc() because lower layers may
2655 	 * attempt to allocate from this cache.
2656 	 */
2657 	mtp = cp->cache_magtype;
2658 	mutex_exit(&ccp->cc_lock);
2659 	emp = kmem_cache_alloc(mtp->mt_cache, KM_NOSLEEP);
2660 	mutex_enter(&ccp->cc_lock);
2661 
2662 	if (emp != NULL) {
2663 		/*
2664 		 * We successfully allocated an empty magazine.
2665 		 * However, we had to drop ccp->cc_lock to do it,
2666 		 * so the cache's magazine size may have changed.
2667 		 * If so, free the magazine and try again.
2668 		 */
2669 		if (ccp->cc_magsize != mtp->mt_magsize) {
2670 			mutex_exit(&ccp->cc_lock);
2671 			kmem_cache_free(mtp->mt_cache, emp);
2672 			mutex_enter(&ccp->cc_lock);
2673 			return (1);
2674 		}
2675 
2676 		/*
2677 		 * We got a magazine of the right size.  Add it to
2678 		 * the depot and try the whole dance again.
2679 		 */
2680 		kmem_depot_free(cp, &cp->cache_empty, emp);
2681 		return (1);
2682 	}
2683 
2684 	/*
2685 	 * We couldn't allocate an empty magazine,
2686 	 * so fall through to the slab layer.
2687 	 */
2688 	return (0);
2689 }
2690 
2691 /*
2692  * Free a constructed object to cache cp.
2693  */
2694 void
2695 kmem_cache_free(kmem_cache_t *cp, void *buf)
2696 {
2697 	kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2698 
2699 	/*
2700 	 * The client must not free either of the buffers passed to the move
2701 	 * callback function.
2702 	 */
2703 	ASSERT(cp->cache_defrag == NULL ||
2704 	    cp->cache_defrag->kmd_thread != curthread ||
2705 	    (buf != cp->cache_defrag->kmd_from_buf &&
2706 	    buf != cp->cache_defrag->kmd_to_buf));
2707 
2708 	if (ccp->cc_flags & (KMF_BUFTAG | KMF_DUMPDIVERT | KMF_DUMPUNSAFE)) {
2709 		if (ccp->cc_flags & KMF_DUMPUNSAFE) {
2710 			ASSERT(!(ccp->cc_flags & KMF_DUMPDIVERT));
2711 			/* log it so that we can warn about it */
2712 			cp->cache_dump.kd_unsafe++;
2713 		} else if (KMEM_DUMPCC(ccp) && !kmem_cache_free_dump(cp, buf)) {
2714 			return;
2715 		}
2716 		if (ccp->cc_flags & KMF_BUFTAG) {
2717 			if (kmem_cache_free_debug(cp, buf, caller()) == -1)
2718 				return;
2719 		}
2720 	}
2721 
2722 	mutex_enter(&ccp->cc_lock);
2723 	/*
2724 	 * Any changes to this logic should be reflected in kmem_slab_prefill()
2725 	 */
2726 	for (;;) {
2727 		/*
2728 		 * If there's a slot available in the current CPU's
2729 		 * loaded magazine, just put the object there and return.
2730 		 */
2731 		if ((uint_t)ccp->cc_rounds < ccp->cc_magsize) {
2732 			ccp->cc_loaded->mag_round[ccp->cc_rounds++] = buf;
2733 			ccp->cc_free++;
2734 			mutex_exit(&ccp->cc_lock);
2735 			return;
2736 		}
2737 
2738 		/*
2739 		 * The loaded magazine is full.  If the previously loaded
2740 		 * magazine was empty, exchange them and try again.
2741 		 */
2742 		if (ccp->cc_prounds == 0) {
2743 			kmem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds);
2744 			continue;
2745 		}
2746 
2747 		/*
2748 		 * If the magazine layer is disabled, break out now.
2749 		 */
2750 		if (ccp->cc_magsize == 0)
2751 			break;
2752 
2753 		if (!kmem_cpucache_magazine_alloc(ccp, cp)) {
2754 			/*
2755 			 * We couldn't free our constructed object to the
2756 			 * magazine layer, so apply its destructor and free it
2757 			 * to the slab layer.
2758 			 */
2759 			break;
2760 		}
2761 	}
2762 	mutex_exit(&ccp->cc_lock);
2763 	kmem_slab_free_constructed(cp, buf, B_TRUE);
2764 }
2765 
2766 static void
2767 kmem_slab_prefill(kmem_cache_t *cp, kmem_slab_t *sp)
2768 {
2769 	kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2770 	int cache_flags = cp->cache_flags;
2771 
2772 	kmem_bufctl_t *next, *head;
2773 	size_t nbufs;
2774 
2775 	/*
2776 	 * Completely allocate the newly created slab and put the pre-allocated
2777 	 * buffers in magazines. Any of the buffers that cannot be put in
2778 	 * magazines must be returned to the slab.
2779 	 */
2780 	ASSERT(MUTEX_HELD(&cp->cache_lock));
2781 	ASSERT((cache_flags & (KMF_PREFILL|KMF_BUFTAG)) == KMF_PREFILL);
2782 	ASSERT(cp->cache_constructor == NULL);
2783 	ASSERT(sp->slab_cache == cp);
2784 	ASSERT(sp->slab_refcnt == 1);
2785 	ASSERT(sp->slab_head != NULL && sp->slab_chunks > sp->slab_refcnt);
2786 	ASSERT(avl_find(&cp->cache_partial_slabs, sp, NULL) == NULL);
2787 
2788 	head = sp->slab_head;
2789 	nbufs = (sp->slab_chunks - sp->slab_refcnt);
2790 	sp->slab_head = NULL;
2791 	sp->slab_refcnt += nbufs;
2792 	cp->cache_bufslab -= nbufs;
2793 	cp->cache_slab_alloc += nbufs;
2794 	list_insert_head(&cp->cache_complete_slabs, sp);
2795 	cp->cache_complete_slab_count++;
2796 	mutex_exit(&cp->cache_lock);
2797 	mutex_enter(&ccp->cc_lock);
2798 
2799 	while (head != NULL) {
2800 		void *buf = KMEM_BUF(cp, head);
2801 		/*
2802 		 * If there's a slot available in the current CPU's
2803 		 * loaded magazine, just put the object there and
2804 		 * continue.
2805 		 */
2806 		if ((uint_t)ccp->cc_rounds < ccp->cc_magsize) {
2807 			ccp->cc_loaded->mag_round[ccp->cc_rounds++] =
2808 			    buf;
2809 			ccp->cc_free++;
2810 			nbufs--;
2811 			head = head->bc_next;
2812 			continue;
2813 		}
2814 
2815 		/*
2816 		 * The loaded magazine is full.  If the previously
2817 		 * loaded magazine was empty, exchange them and try
2818 		 * again.
2819 		 */
2820 		if (ccp->cc_prounds == 0) {
2821 			kmem_cpu_reload(ccp, ccp->cc_ploaded,
2822 			    ccp->cc_prounds);
2823 			continue;
2824 		}
2825 
2826 		/*
2827 		 * If the magazine layer is disabled, break out now.
2828 		 */
2829 
2830 		if (ccp->cc_magsize == 0) {
2831 			break;
2832 		}
2833 
2834 		if (!kmem_cpucache_magazine_alloc(ccp, cp))
2835 			break;
2836 	}
2837 	mutex_exit(&ccp->cc_lock);
2838 	if (nbufs != 0) {
2839 		ASSERT(head != NULL);
2840 
2841 		/*
2842 		 * If there was a failure, return remaining objects to
2843 		 * the slab
2844 		 */
2845 		while (head != NULL) {
2846 			ASSERT(nbufs != 0);
2847 			next = head->bc_next;
2848 			head->bc_next = NULL;
2849 			kmem_slab_free(cp, KMEM_BUF(cp, head));
2850 			head = next;
2851 			nbufs--;
2852 		}
2853 	}
2854 	ASSERT(head == NULL);
2855 	ASSERT(nbufs == 0);
2856 	mutex_enter(&cp->cache_lock);
2857 }
2858 
2859 void *
2860 kmem_zalloc(size_t size, int kmflag)
2861 {
2862 	size_t index;
2863 	void *buf;
2864 
2865 	if ((index = ((size - 1) >> KMEM_ALIGN_SHIFT)) < KMEM_ALLOC_TABLE_MAX) {
2866 		kmem_cache_t *cp = kmem_alloc_table[index];
2867 		buf = kmem_cache_alloc(cp, kmflag);
2868 		if (buf != NULL) {
2869 			if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp)) {
2870 				kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2871 				((uint8_t *)buf)[size] = KMEM_REDZONE_BYTE;
2872 				((uint32_t *)btp)[1] = KMEM_SIZE_ENCODE(size);
2873 
2874 				if (cp->cache_flags & KMF_LITE) {
2875 					KMEM_BUFTAG_LITE_ENTER(btp,
2876 					    kmem_lite_count, caller());
2877 				}
2878 			}
2879 			bzero(buf, size);
2880 		}
2881 	} else {
2882 		buf = kmem_alloc(size, kmflag);
2883 		if (buf != NULL)
2884 			bzero(buf, size);
2885 	}
2886 	return (buf);
2887 }
2888 
2889 void *
2890 kmem_alloc(size_t size, int kmflag)
2891 {
2892 	size_t index;
2893 	kmem_cache_t *cp;
2894 	void *buf;
2895 
2896 	if ((index = ((size - 1) >> KMEM_ALIGN_SHIFT)) < KMEM_ALLOC_TABLE_MAX) {
2897 		cp = kmem_alloc_table[index];
2898 		/* fall through to kmem_cache_alloc() */
2899 
2900 	} else if ((index = ((size - 1) >> KMEM_BIG_SHIFT)) <
2901 	    kmem_big_alloc_table_max) {
2902 		cp = kmem_big_alloc_table[index];
2903 		/* fall through to kmem_cache_alloc() */
2904 
2905 	} else {
2906 		if (size == 0)
2907 			return (NULL);
2908 
2909 		buf = vmem_alloc(kmem_oversize_arena, size,
2910 		    kmflag & KM_VMFLAGS);
2911 		if (buf == NULL)
2912 			kmem_log_event(kmem_failure_log, NULL, NULL,
2913 			    (void *)size);
2914 		else if (KMEM_DUMP(kmem_slab_cache)) {
2915 			/* stats for dump intercept */
2916 			kmem_dump_oversize_allocs++;
2917 			if (size > kmem_dump_oversize_max)
2918 				kmem_dump_oversize_max = size;
2919 		}
2920 		return (buf);
2921 	}
2922 
2923 	buf = kmem_cache_alloc(cp, kmflag);
2924 	if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp) && buf != NULL) {
2925 		kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2926 		((uint8_t *)buf)[size] = KMEM_REDZONE_BYTE;
2927 		((uint32_t *)btp)[1] = KMEM_SIZE_ENCODE(size);
2928 
2929 		if (cp->cache_flags & KMF_LITE) {
2930 			KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller());
2931 		}
2932 	}
2933 	return (buf);
2934 }
2935 
2936 void
2937 kmem_free(void *buf, size_t size)
2938 {
2939 	size_t index;
2940 	kmem_cache_t *cp;
2941 
2942 	if ((index = (size - 1) >> KMEM_ALIGN_SHIFT) < KMEM_ALLOC_TABLE_MAX) {
2943 		cp = kmem_alloc_table[index];
2944 		/* fall through to kmem_cache_free() */
2945 
2946 	} else if ((index = ((size - 1) >> KMEM_BIG_SHIFT)) <
2947 	    kmem_big_alloc_table_max) {
2948 		cp = kmem_big_alloc_table[index];
2949 		/* fall through to kmem_cache_free() */
2950 
2951 	} else {
2952 		EQUIV(buf == NULL, size == 0);
2953 		if (buf == NULL && size == 0)
2954 			return;
2955 		vmem_free(kmem_oversize_arena, buf, size);
2956 		return;
2957 	}
2958 
2959 	if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp)) {
2960 		kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2961 		uint32_t *ip = (uint32_t *)btp;
2962 		if (ip[1] != KMEM_SIZE_ENCODE(size)) {
2963 			if (*(uint64_t *)buf == KMEM_FREE_PATTERN) {
2964 				kmem_error(KMERR_DUPFREE, cp, buf);
2965 				return;
2966 			}
2967 			if (KMEM_SIZE_VALID(ip[1])) {
2968 				ip[0] = KMEM_SIZE_ENCODE(size);
2969 				kmem_error(KMERR_BADSIZE, cp, buf);
2970 			} else {
2971 				kmem_error(KMERR_REDZONE, cp, buf);
2972 			}
2973 			return;
2974 		}
2975 		if (((uint8_t *)buf)[size] != KMEM_REDZONE_BYTE) {
2976 			kmem_error(KMERR_REDZONE, cp, buf);
2977 			return;
2978 		}
2979 		btp->bt_redzone = KMEM_REDZONE_PATTERN;
2980 		if (cp->cache_flags & KMF_LITE) {
2981 			KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count,
2982 			    caller());
2983 		}
2984 	}
2985 	kmem_cache_free(cp, buf);
2986 }
2987 
2988 void *
2989 kmem_firewall_va_alloc(vmem_t *vmp, size_t size, int vmflag)
2990 {
2991 	size_t realsize = size + vmp->vm_quantum;
2992 	void *addr;
2993 
2994 	/*
2995 	 * Annoying edge case: if 'size' is just shy of ULONG_MAX, adding
2996 	 * vm_quantum will cause integer wraparound.  Check for this, and
2997 	 * blow off the firewall page in this case.  Note that such a
2998 	 * giant allocation (the entire kernel address space) can never
2999 	 * be satisfied, so it will either fail immediately (VM_NOSLEEP)
3000 	 * or sleep forever (VM_SLEEP).  Thus, there is no need for a
3001 	 * corresponding check in kmem_firewall_va_free().
3002 	 */
3003 	if (realsize < size)
3004 		realsize = size;
3005 
3006 	/*
3007 	 * While boot still owns resource management, make sure that this
3008 	 * redzone virtual address allocation is properly accounted for in
3009 	 * OBPs "virtual-memory" "available" lists because we're
3010 	 * effectively claiming them for a red zone.  If we don't do this,
3011 	 * the available lists become too fragmented and too large for the
3012 	 * current boot/kernel memory list interface.
3013 	 */
3014 	addr = vmem_alloc(vmp, realsize, vmflag | VM_NEXTFIT);
3015 
3016 	if (addr != NULL && kvseg.s_base == NULL && realsize != size)
3017 		(void) boot_virt_alloc((char *)addr + size, vmp->vm_quantum);
3018 
3019 	return (addr);
3020 }
3021 
3022 void
3023 kmem_firewall_va_free(vmem_t *vmp, void *addr, size_t size)
3024 {
3025 	ASSERT((kvseg.s_base == NULL ?
3026 	    va_to_pfn((char *)addr + size) :
3027 	    hat_getpfnum(kas.a_hat, (caddr_t)addr + size)) == PFN_INVALID);
3028 
3029 	vmem_free(vmp, addr, size + vmp->vm_quantum);
3030 }
3031 
3032 /*
3033  * Try to allocate at least `size' bytes of memory without sleeping or
3034  * panicking. Return actual allocated size in `asize'. If allocation failed,
3035  * try final allocation with sleep or panic allowed.
3036  */
3037 void *
3038 kmem_alloc_tryhard(size_t size, size_t *asize, int kmflag)
3039 {
3040 	void *p;
3041 
3042 	*asize = P2ROUNDUP(size, KMEM_ALIGN);
3043 	do {
3044 		p = kmem_alloc(*asize, (kmflag | KM_NOSLEEP) & ~KM_PANIC);
3045 		if (p != NULL)
3046 			return (p);
3047 		*asize += KMEM_ALIGN;
3048 	} while (*asize <= PAGESIZE);
3049 
3050 	*asize = P2ROUNDUP(size, KMEM_ALIGN);
3051 	return (kmem_alloc(*asize, kmflag));
3052 }
3053 
3054 /*
3055  * Reclaim all unused memory from a cache.
3056  */
3057 static void
3058 kmem_cache_reap(kmem_cache_t *cp)
3059 {
3060 	ASSERT(taskq_member(kmem_taskq, curthread));
3061 	cp->cache_reap++;
3062 
3063 	/*
3064 	 * Ask the cache's owner to free some memory if possible.
3065 	 * The idea is to handle things like the inode cache, which
3066 	 * typically sits on a bunch of memory that it doesn't truly
3067 	 * *need*.  Reclaim policy is entirely up to the owner; this
3068 	 * callback is just an advisory plea for help.
3069 	 */
3070 	if (cp->cache_reclaim != NULL) {
3071 		long delta;
3072 
3073 		/*
3074 		 * Reclaimed memory should be reapable (not included in the
3075 		 * depot's working set).
3076 		 */
3077 		delta = cp->cache_full.ml_total;
3078 		cp->cache_reclaim(cp->cache_private);
3079 		delta = cp->cache_full.ml_total - delta;
3080 		if (delta > 0) {
3081 			mutex_enter(&cp->cache_depot_lock);
3082 			cp->cache_full.ml_reaplimit += delta;
3083 			cp->cache_full.ml_min += delta;
3084 			mutex_exit(&cp->cache_depot_lock);
3085 		}
3086 	}
3087 
3088 	kmem_depot_ws_reap(cp);
3089 
3090 	if (cp->cache_defrag != NULL && !kmem_move_noreap) {
3091 		kmem_cache_defrag(cp);
3092 	}
3093 }
3094 
3095 static void
3096 kmem_reap_timeout(void *flag_arg)
3097 {
3098 	uint32_t *flag = (uint32_t *)flag_arg;
3099 
3100 	ASSERT(flag == &kmem_reaping || flag == &kmem_reaping_idspace);
3101 	*flag = 0;
3102 }
3103 
3104 static void
3105 kmem_reap_done(void *flag)
3106 {
3107 	if (!callout_init_done) {
3108 		/* can't schedule a timeout at this point */
3109 		kmem_reap_timeout(flag);
3110 	} else {
3111 		(void) timeout(kmem_reap_timeout, flag, kmem_reap_interval);
3112 	}
3113 }
3114 
3115 static void
3116 kmem_reap_start(void *flag)
3117 {
3118 	ASSERT(flag == &kmem_reaping || flag == &kmem_reaping_idspace);
3119 
3120 	if (flag == &kmem_reaping) {
3121 		kmem_cache_applyall(kmem_cache_reap, kmem_taskq, TQ_NOSLEEP);
3122 		/*
3123 		 * if we have segkp under heap, reap segkp cache.
3124 		 */
3125 		if (segkp_fromheap)
3126 			segkp_cache_free();
3127 	}
3128 	else
3129 		kmem_cache_applyall_id(kmem_cache_reap, kmem_taskq, TQ_NOSLEEP);
3130 
3131 	/*
3132 	 * We use taskq_dispatch() to schedule a timeout to clear
3133 	 * the flag so that kmem_reap() becomes self-throttling:
3134 	 * we won't reap again until the current reap completes *and*
3135 	 * at least kmem_reap_interval ticks have elapsed.
3136 	 */
3137 	if (!taskq_dispatch(kmem_taskq, kmem_reap_done, flag, TQ_NOSLEEP))
3138 		kmem_reap_done(flag);
3139 }
3140 
3141 static void
3142 kmem_reap_common(void *flag_arg)
3143 {
3144 	uint32_t *flag = (uint32_t *)flag_arg;
3145 
3146 	if (MUTEX_HELD(&kmem_cache_lock) || kmem_taskq == NULL ||
3147 	    atomic_cas_32(flag, 0, 1) != 0)
3148 		return;
3149 
3150 	/*
3151 	 * It may not be kosher to do memory allocation when a reap is called
3152 	 * (for example, if vmem_populate() is in the call chain).  So we
3153 	 * start the reap going with a TQ_NOALLOC dispatch.  If the dispatch
3154 	 * fails, we reset the flag, and the next reap will try again.
3155 	 */
3156 	if (!taskq_dispatch(kmem_taskq, kmem_reap_start, flag, TQ_NOALLOC))
3157 		*flag = 0;
3158 }
3159 
3160 /*
3161  * Reclaim all unused memory from all caches.  Called from the VM system
3162  * when memory gets tight.
3163  */
3164 void
3165 kmem_reap(void)
3166 {
3167 	kmem_reap_common(&kmem_reaping);
3168 }
3169 
3170 /*
3171  * Reclaim all unused memory from identifier arenas, called when a vmem
3172  * arena not back by memory is exhausted.  Since reaping memory-backed caches
3173  * cannot help with identifier exhaustion, we avoid both a large amount of
3174  * work and unwanted side-effects from reclaim callbacks.
3175  */
3176 void
3177 kmem_reap_idspace(void)
3178 {
3179 	kmem_reap_common(&kmem_reaping_idspace);
3180 }
3181 
3182 /*
3183  * Purge all magazines from a cache and set its magazine limit to zero.
3184  * All calls are serialized by the kmem_taskq lock, except for the final
3185  * call from kmem_cache_destroy().
3186  */
3187 static void
3188 kmem_cache_magazine_purge(kmem_cache_t *cp)
3189 {
3190 	kmem_cpu_cache_t *ccp;
3191 	kmem_magazine_t *mp, *pmp;
3192 	int rounds, prounds, cpu_seqid;
3193 
3194 	ASSERT(!list_link_active(&cp->cache_link) ||
3195 	    taskq_member(kmem_taskq, curthread));
3196 	ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
3197 
3198 	for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
3199 		ccp = &cp->cache_cpu[cpu_seqid];
3200 
3201 		mutex_enter(&ccp->cc_lock);
3202 		mp = ccp->cc_loaded;
3203 		pmp = ccp->cc_ploaded;
3204 		rounds = ccp->cc_rounds;
3205 		prounds = ccp->cc_prounds;
3206 		ccp->cc_loaded = NULL;
3207 		ccp->cc_ploaded = NULL;
3208 		ccp->cc_rounds = -1;
3209 		ccp->cc_prounds = -1;
3210 		ccp->cc_magsize = 0;
3211 		mutex_exit(&ccp->cc_lock);
3212 
3213 		if (mp)
3214 			kmem_magazine_destroy(cp, mp, rounds);
3215 		if (pmp)
3216 			kmem_magazine_destroy(cp, pmp, prounds);
3217 	}
3218 
3219 	kmem_depot_ws_zero(cp);
3220 	kmem_depot_ws_reap(cp);
3221 }
3222 
3223 /*
3224  * Enable per-cpu magazines on a cache.
3225  */
3226 static void
3227 kmem_cache_magazine_enable(kmem_cache_t *cp)
3228 {
3229 	int cpu_seqid;
3230 
3231 	if (cp->cache_flags & KMF_NOMAGAZINE)
3232 		return;
3233 
3234 	for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
3235 		kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
3236 		mutex_enter(&ccp->cc_lock);
3237 		ccp->cc_magsize = cp->cache_magtype->mt_magsize;
3238 		mutex_exit(&ccp->cc_lock);
3239 	}
3240 
3241 }
3242 
3243 /*
3244  * Reap (almost) everything right now.
3245  */
3246 void
3247 kmem_cache_reap_now(kmem_cache_t *cp)
3248 {
3249 	ASSERT(list_link_active(&cp->cache_link));
3250 
3251 	kmem_depot_ws_zero(cp);
3252 
3253 	(void) taskq_dispatch(kmem_taskq,
3254 	    (task_func_t *)kmem_depot_ws_reap, cp, TQ_SLEEP);
3255 	taskq_wait(kmem_taskq);
3256 }
3257 
3258 /*
3259  * Recompute a cache's magazine size.  The trade-off is that larger magazines
3260  * provide a higher transfer rate with the depot, while smaller magazines
3261  * reduce memory consumption.  Magazine resizing is an expensive operation;
3262  * it should not be done frequently.
3263  *
3264  * Changes to the magazine size are serialized by the kmem_taskq lock.
3265  *
3266  * Note: at present this only grows the magazine size.  It might be useful
3267  * to allow shrinkage too.
3268  */
3269 static void
3270 kmem_cache_magazine_resize(kmem_cache_t *cp)
3271 {
3272 	kmem_magtype_t *mtp = cp->cache_magtype;
3273 
3274 	ASSERT(taskq_member(kmem_taskq, curthread));
3275 
3276 	if (cp->cache_chunksize < mtp->mt_maxbuf) {
3277 		kmem_cache_magazine_purge(cp);
3278 		mutex_enter(&cp->cache_depot_lock);
3279 		cp->cache_magtype = ++mtp;
3280 		cp->cache_depot_contention_prev =
3281 		    cp->cache_depot_contention + INT_MAX;
3282 		mutex_exit(&cp->cache_depot_lock);
3283 		kmem_cache_magazine_enable(cp);
3284 	}
3285 }
3286 
3287 /*
3288  * Rescale a cache's hash table, so that the table size is roughly the
3289  * cache size.  We want the average lookup time to be extremely small.
3290  */
3291 static void
3292 kmem_hash_rescale(kmem_cache_t *cp)
3293 {
3294 	kmem_bufctl_t **old_table, **new_table, *bcp;
3295 	size_t old_size, new_size, h;
3296 
3297 	ASSERT(taskq_member(kmem_taskq, curthread));
3298 
3299 	new_size = MAX(KMEM_HASH_INITIAL,
3300 	    1 << (highbit(3 * cp->cache_buftotal + 4) - 2));
3301 	old_size = cp->cache_hash_mask + 1;
3302 
3303 	if ((old_size >> 1) <= new_size && new_size <= (old_size << 1))
3304 		return;
3305 
3306 	new_table = vmem_alloc(kmem_hash_arena, new_size * sizeof (void *),
3307 	    VM_NOSLEEP);
3308 	if (new_table == NULL)
3309 		return;
3310 	bzero(new_table, new_size * sizeof (void *));
3311 
3312 	mutex_enter(&cp->cache_lock);
3313 
3314 	old_size = cp->cache_hash_mask + 1;
3315 	old_table = cp->cache_hash_table;
3316 
3317 	cp->cache_hash_mask = new_size - 1;
3318 	cp->cache_hash_table = new_table;
3319 	cp->cache_rescale++;
3320 
3321 	for (h = 0; h < old_size; h++) {
3322 		bcp = old_table[h];
3323 		while (bcp != NULL) {
3324 			void *addr = bcp->bc_addr;
3325 			kmem_bufctl_t *next_bcp = bcp->bc_next;
3326 			kmem_bufctl_t **hash_bucket = KMEM_HASH(cp, addr);
3327 			bcp->bc_next = *hash_bucket;
3328 			*hash_bucket = bcp;
3329 			bcp = next_bcp;
3330 		}
3331 	}
3332 
3333 	mutex_exit(&cp->cache_lock);
3334 
3335 	vmem_free(kmem_hash_arena, old_table, old_size * sizeof (void *));
3336 }
3337 
3338 /*
3339  * Perform periodic maintenance on a cache: hash rescaling, depot working-set
3340  * update, magazine resizing, and slab consolidation.
3341  */
3342 static void
3343 kmem_cache_update(kmem_cache_t *cp)
3344 {
3345 	int need_hash_rescale = 0;
3346 	int need_magazine_resize = 0;
3347 
3348 	ASSERT(MUTEX_HELD(&kmem_cache_lock));
3349 
3350 	/*
3351 	 * If the cache has become much larger or smaller than its hash table,
3352 	 * fire off a request to rescale the hash table.
3353 	 */
3354 	mutex_enter(&cp->cache_lock);
3355 
3356 	if ((cp->cache_flags & KMF_HASH) &&
3357 	    (cp->cache_buftotal > (cp->cache_hash_mask << 1) ||
3358 	    (cp->cache_buftotal < (cp->cache_hash_mask >> 1) &&
3359 	    cp->cache_hash_mask > KMEM_HASH_INITIAL)))
3360 		need_hash_rescale = 1;
3361 
3362 	mutex_exit(&cp->cache_lock);
3363 
3364 	/*
3365 	 * Update the depot working set statistics.
3366 	 */
3367 	kmem_depot_ws_update(cp);
3368 
3369 	/*
3370 	 * If there's a lot of contention in the depot,
3371 	 * increase the magazine size.
3372 	 */
3373 	mutex_enter(&cp->cache_depot_lock);
3374 
3375 	if (cp->cache_chunksize < cp->cache_magtype->mt_maxbuf &&
3376 	    (int)(cp->cache_depot_contention -
3377 	    cp->cache_depot_contention_prev) > kmem_depot_contention)
3378 		need_magazine_resize = 1;
3379 
3380 	cp->cache_depot_contention_prev = cp->cache_depot_contention;
3381 
3382 	mutex_exit(&cp->cache_depot_lock);
3383 
3384 	if (need_hash_rescale)
3385 		(void) taskq_dispatch(kmem_taskq,
3386 		    (task_func_t *)kmem_hash_rescale, cp, TQ_NOSLEEP);
3387 
3388 	if (need_magazine_resize)
3389 		(void) taskq_dispatch(kmem_taskq,
3390 		    (task_func_t *)kmem_cache_magazine_resize, cp, TQ_NOSLEEP);
3391 
3392 	if (cp->cache_defrag != NULL)
3393 		(void) taskq_dispatch(kmem_taskq,
3394 		    (task_func_t *)kmem_cache_scan, cp, TQ_NOSLEEP);
3395 }
3396 
3397 static void kmem_update(void *);
3398 
3399 static void
3400 kmem_update_timeout(void *dummy)
3401 {
3402 	(void) timeout(kmem_update, dummy, kmem_reap_interval);
3403 }
3404 
3405 static void
3406 kmem_update(void *dummy)
3407 {
3408 	kmem_cache_applyall(kmem_cache_update, NULL, TQ_NOSLEEP);
3409 
3410 	/*
3411 	 * We use taskq_dispatch() to reschedule the timeout so that
3412 	 * kmem_update() becomes self-throttling: it won't schedule
3413 	 * new tasks until all previous tasks have completed.
3414 	 */
3415 	if (!taskq_dispatch(kmem_taskq, kmem_update_timeout, dummy, TQ_NOSLEEP))
3416 		kmem_update_timeout(NULL);
3417 }
3418 
3419 static int
3420 kmem_cache_kstat_update(kstat_t *ksp, int rw)
3421 {
3422 	struct kmem_cache_kstat *kmcp = &kmem_cache_kstat;
3423 	kmem_cache_t *cp = ksp->ks_private;
3424 	uint64_t cpu_buf_avail;
3425 	uint64_t buf_avail = 0;
3426 	int cpu_seqid;
3427 	long reap;
3428 
3429 	ASSERT(MUTEX_HELD(&kmem_cache_kstat_lock));
3430 
3431 	if (rw == KSTAT_WRITE)
3432 		return (EACCES);
3433 
3434 	mutex_enter(&cp->cache_lock);
3435 
3436 	kmcp->kmc_alloc_fail.value.ui64		= cp->cache_alloc_fail;
3437 	kmcp->kmc_alloc.value.ui64		= cp->cache_slab_alloc;
3438 	kmcp->kmc_free.value.ui64		= cp->cache_slab_free;
3439 	kmcp->kmc_slab_alloc.value.ui64		= cp->cache_slab_alloc;
3440 	kmcp->kmc_slab_free.value.ui64		= cp->cache_slab_free;
3441 
3442 	for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
3443 		kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
3444 
3445 		mutex_enter(&ccp->cc_lock);
3446 
3447 		cpu_buf_avail = 0;
3448 		if (ccp->cc_rounds > 0)
3449 			cpu_buf_avail += ccp->cc_rounds;
3450 		if (ccp->cc_prounds > 0)
3451 			cpu_buf_avail += ccp->cc_prounds;
3452 
3453 		kmcp->kmc_alloc.value.ui64	+= ccp->cc_alloc;
3454 		kmcp->kmc_free.value.ui64	+= ccp->cc_free;
3455 		buf_avail			+= cpu_buf_avail;
3456 
3457 		mutex_exit(&ccp->cc_lock);
3458 	}
3459 
3460 	mutex_enter(&cp->cache_depot_lock);
3461 
3462 	kmcp->kmc_depot_alloc.value.ui64	= cp->cache_full.ml_alloc;
3463 	kmcp->kmc_depot_free.value.ui64		= cp->cache_empty.ml_alloc;
3464 	kmcp->kmc_depot_contention.value.ui64	= cp->cache_depot_contention;
3465 	kmcp->kmc_full_magazines.value.ui64	= cp->cache_full.ml_total;
3466 	kmcp->kmc_empty_magazines.value.ui64	= cp->cache_empty.ml_total;
3467 	kmcp->kmc_magazine_size.value.ui64	=
3468 	    (cp->cache_flags & KMF_NOMAGAZINE) ?
3469 	    0 : cp->cache_magtype->mt_magsize;
3470 
3471 	kmcp->kmc_alloc.value.ui64		+= cp->cache_full.ml_alloc;
3472 	kmcp->kmc_free.value.ui64		+= cp->cache_empty.ml_alloc;
3473 	buf_avail += cp->cache_full.ml_total * cp->cache_magtype->mt_magsize;
3474 
3475 	reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
3476 	reap = MIN(reap, cp->cache_full.ml_total);
3477 
3478 	mutex_exit(&cp->cache_depot_lock);
3479 
3480 	kmcp->kmc_buf_size.value.ui64	= cp->cache_bufsize;
3481 	kmcp->kmc_align.value.ui64	= cp->cache_align;
3482 	kmcp->kmc_chunk_size.value.ui64	= cp->cache_chunksize;
3483 	kmcp->kmc_slab_size.value.ui64	= cp->cache_slabsize;
3484 	kmcp->kmc_buf_constructed.value.ui64 = buf_avail;
3485 	buf_avail += cp->cache_bufslab;
3486 	kmcp->kmc_buf_avail.value.ui64	= buf_avail;
3487 	kmcp->kmc_buf_inuse.value.ui64	= cp->cache_buftotal - buf_avail;
3488 	kmcp->kmc_buf_total.value.ui64	= cp->cache_buftotal;
3489 	kmcp->kmc_buf_max.value.ui64	= cp->cache_bufmax;
3490 	kmcp->kmc_slab_create.value.ui64	= cp->cache_slab_create;
3491 	kmcp->kmc_slab_destroy.value.ui64	= cp->cache_slab_destroy;
3492 	kmcp->kmc_hash_size.value.ui64	= (cp->cache_flags & KMF_HASH) ?
3493 	    cp->cache_hash_mask + 1 : 0;
3494 	kmcp->kmc_hash_lookup_depth.value.ui64	= cp->cache_lookup_depth;
3495 	kmcp->kmc_hash_rescale.value.ui64	= cp->cache_rescale;
3496 	kmcp->kmc_vmem_source.value.ui64	= cp->cache_arena->vm_id;
3497 	kmcp->kmc_reap.value.ui64	= cp->cache_reap;
3498 
3499 	if (cp->cache_defrag == NULL) {
3500 		kmcp->kmc_move_callbacks.value.ui64	= 0;
3501 		kmcp->kmc_move_yes.value.ui64		= 0;
3502 		kmcp->kmc_move_no.value.ui64		= 0;
3503 		kmcp->kmc_move_later.value.ui64		= 0;
3504 		kmcp->kmc_move_dont_need.value.ui64	= 0;
3505 		kmcp->kmc_move_dont_know.value.ui64	= 0;
3506 		kmcp->kmc_move_hunt_found.value.ui64	= 0;
3507 		kmcp->kmc_move_slabs_freed.value.ui64	= 0;
3508 		kmcp->kmc_defrag.value.ui64		= 0;
3509 		kmcp->kmc_scan.value.ui64		= 0;
3510 		kmcp->kmc_move_reclaimable.value.ui64	= 0;
3511 	} else {
3512 		int64_t reclaimable;
3513 
3514 		kmem_defrag_t *kd = cp->cache_defrag;
3515 		kmcp->kmc_move_callbacks.value.ui64	= kd->kmd_callbacks;
3516 		kmcp->kmc_move_yes.value.ui64		= kd->kmd_yes;
3517 		kmcp->kmc_move_no.value.ui64		= kd->kmd_no;
3518 		kmcp->kmc_move_later.value.ui64		= kd->kmd_later;
3519 		kmcp->kmc_move_dont_need.value.ui64	= kd->kmd_dont_need;
3520 		kmcp->kmc_move_dont_know.value.ui64	= kd->kmd_dont_know;
3521 		kmcp->kmc_move_hunt_found.value.ui64	= kd->kmd_hunt_found;
3522 		kmcp->kmc_move_slabs_freed.value.ui64	= kd->kmd_slabs_freed;
3523 		kmcp->kmc_defrag.value.ui64		= kd->kmd_defrags;
3524 		kmcp->kmc_scan.value.ui64		= kd->kmd_scans;
3525 
3526 		reclaimable = cp->cache_bufslab - (cp->cache_maxchunks - 1);
3527 		reclaimable = MAX(reclaimable, 0);
3528 		reclaimable += ((uint64_t)reap * cp->cache_magtype->mt_magsize);
3529 		kmcp->kmc_move_reclaimable.value.ui64	= reclaimable;
3530 	}
3531 
3532 	mutex_exit(&cp->cache_lock);
3533 	return (0);
3534 }
3535 
3536 /*
3537  * Return a named statistic about a particular cache.
3538  * This shouldn't be called very often, so it's currently designed for
3539  * simplicity (leverages existing kstat support) rather than efficiency.
3540  */
3541 uint64_t
3542 kmem_cache_stat(kmem_cache_t *cp, char *name)
3543 {
3544 	int i;
3545 	kstat_t *ksp = cp->cache_kstat;
3546 	kstat_named_t *knp = (kstat_named_t *)&kmem_cache_kstat;
3547 	uint64_t value = 0;
3548 
3549 	if (ksp != NULL) {
3550 		mutex_enter(&kmem_cache_kstat_lock);
3551 		(void) kmem_cache_kstat_update(ksp, KSTAT_READ);
3552 		for (i = 0; i < ksp->ks_ndata; i++) {
3553 			if (strcmp(knp[i].name, name) == 0) {
3554 				value = knp[i].value.ui64;
3555 				break;
3556 			}
3557 		}
3558 		mutex_exit(&kmem_cache_kstat_lock);
3559 	}
3560 	return (value);
3561 }
3562 
3563 /*
3564  * Return an estimate of currently available kernel heap memory.
3565  * On 32-bit systems, physical memory may exceed virtual memory,
3566  * we just truncate the result at 1GB.
3567  */
3568 size_t
3569 kmem_avail(void)
3570 {
3571 	spgcnt_t rmem = availrmem - tune.t_minarmem;
3572 	spgcnt_t fmem = freemem - minfree;
3573 
3574 	return ((size_t)ptob(MIN(MAX(MIN(rmem, fmem), 0),
3575 	    1 << (30 - PAGESHIFT))));
3576 }
3577 
3578 /*
3579  * Return the maximum amount of memory that is (in theory) allocatable
3580  * from the heap. This may be used as an estimate only since there
3581  * is no guarentee this space will still be available when an allocation
3582  * request is made, nor that the space may be allocated in one big request
3583  * due to kernel heap fragmentation.
3584  */
3585 size_t
3586 kmem_maxavail(void)
3587 {
3588 	spgcnt_t pmem = availrmem - tune.t_minarmem;
3589 	spgcnt_t vmem = btop(vmem_size(heap_arena, VMEM_FREE));
3590 
3591 	return ((size_t)ptob(MAX(MIN(pmem, vmem), 0)));
3592 }
3593 
3594 /*
3595  * Indicate whether memory-intensive kmem debugging is enabled.
3596  */
3597 int
3598 kmem_debugging(void)
3599 {
3600 	return (kmem_flags & (KMF_AUDIT | KMF_REDZONE));
3601 }
3602 
3603 /* binning function, sorts finely at the two extremes */
3604 #define	KMEM_PARTIAL_SLAB_WEIGHT(sp, binshift)				\
3605 	((((sp)->slab_refcnt <= (binshift)) ||				\
3606 	    (((sp)->slab_chunks - (sp)->slab_refcnt) <= (binshift)))	\
3607 	    ? -(sp)->slab_refcnt					\
3608 	    : -((binshift) + ((sp)->slab_refcnt >> (binshift))))
3609 
3610 /*
3611  * Minimizing the number of partial slabs on the freelist minimizes
3612  * fragmentation (the ratio of unused buffers held by the slab layer). There are
3613  * two ways to get a slab off of the freelist: 1) free all the buffers on the
3614  * slab, and 2) allocate all the buffers on the slab. It follows that we want
3615  * the most-used slabs at the front of the list where they have the best chance
3616  * of being completely allocated, and the least-used slabs at a safe distance
3617  * from the front to improve the odds that the few remaining buffers will all be
3618  * freed before another allocation can tie up the slab. For that reason a slab
3619  * with a higher slab_refcnt sorts less than than a slab with a lower
3620  * slab_refcnt.
3621  *
3622  * However, if a slab has at least one buffer that is deemed unfreeable, we
3623  * would rather have that slab at the front of the list regardless of
3624  * slab_refcnt, since even one unfreeable buffer makes the entire slab
3625  * unfreeable. If the client returns KMEM_CBRC_NO in response to a cache_move()
3626  * callback, the slab is marked unfreeable for as long as it remains on the
3627  * freelist.
3628  */
3629 static int
3630 kmem_partial_slab_cmp(const void *p0, const void *p1)
3631 {
3632 	const kmem_cache_t *cp;
3633 	const kmem_slab_t *s0 = p0;
3634 	const kmem_slab_t *s1 = p1;
3635 	int w0, w1;
3636 	size_t binshift;
3637 
3638 	ASSERT(KMEM_SLAB_IS_PARTIAL(s0));
3639 	ASSERT(KMEM_SLAB_IS_PARTIAL(s1));
3640 	ASSERT(s0->slab_cache == s1->slab_cache);
3641 	cp = s1->slab_cache;
3642 	ASSERT(MUTEX_HELD(&cp->cache_lock));
3643 	binshift = cp->cache_partial_binshift;
3644 
3645 	/* weight of first slab */
3646 	w0 = KMEM_PARTIAL_SLAB_WEIGHT(s0, binshift);
3647 	if (s0->slab_flags & KMEM_SLAB_NOMOVE) {
3648 		w0 -= cp->cache_maxchunks;
3649 	}
3650 
3651 	/* weight of second slab */
3652 	w1 = KMEM_PARTIAL_SLAB_WEIGHT(s1, binshift);
3653 	if (s1->slab_flags & KMEM_SLAB_NOMOVE) {
3654 		w1 -= cp->cache_maxchunks;
3655 	}
3656 
3657 	if (w0 < w1)
3658 		return (-1);
3659 	if (w0 > w1)
3660 		return (1);
3661 
3662 	/* compare pointer values */
3663 	if ((uintptr_t)s0 < (uintptr_t)s1)
3664 		return (-1);
3665 	if ((uintptr_t)s0 > (uintptr_t)s1)
3666 		return (1);
3667 
3668 	return (0);
3669 }
3670 
3671 /*
3672  * It must be valid to call the destructor (if any) on a newly created object.
3673  * That is, the constructor (if any) must leave the object in a valid state for
3674  * the destructor.
3675  */
3676 kmem_cache_t *
3677 kmem_cache_create(
3678 	char *name,		/* descriptive name for this cache */
3679 	size_t bufsize,		/* size of the objects it manages */
3680 	size_t align,		/* required object alignment */
3681 	int (*constructor)(void *, void *, int), /* object constructor */
3682 	void (*destructor)(void *, void *),	/* object destructor */
3683 	void (*reclaim)(void *), /* memory reclaim callback */
3684 	void *private,		/* pass-thru arg for constr/destr/reclaim */
3685 	vmem_t *vmp,		/* vmem source for slab allocation */
3686 	int cflags)		/* cache creation flags */
3687 {
3688 	int cpu_seqid;
3689 	size_t chunksize;
3690 	kmem_cache_t *cp;
3691 	kmem_magtype_t *mtp;
3692 	size_t csize = KMEM_CACHE_SIZE(max_ncpus);
3693 
3694 #ifdef	DEBUG
3695 	/*
3696 	 * Cache names should conform to the rules for valid C identifiers
3697 	 */
3698 	if (!strident_valid(name)) {
3699 		cmn_err(CE_CONT,
3700 		    "kmem_cache_create: '%s' is an invalid cache name\n"
3701 		    "cache names must conform to the rules for "
3702 		    "C identifiers\n", name);
3703 	}
3704 #endif	/* DEBUG */
3705 
3706 	if (vmp == NULL)
3707 		vmp = kmem_default_arena;
3708 
3709 	/*
3710 	 * If this kmem cache has an identifier vmem arena as its source, mark
3711 	 * it such to allow kmem_reap_idspace().
3712 	 */
3713 	ASSERT(!(cflags & KMC_IDENTIFIER));   /* consumer should not set this */
3714 	if (vmp->vm_cflags & VMC_IDENTIFIER)
3715 		cflags |= KMC_IDENTIFIER;
3716 
3717 	/*
3718 	 * Get a kmem_cache structure.  We arrange that cp->cache_cpu[]
3719 	 * is aligned on a KMEM_CPU_CACHE_SIZE boundary to prevent
3720 	 * false sharing of per-CPU data.
3721 	 */
3722 	cp = vmem_xalloc(kmem_cache_arena, csize, KMEM_CPU_CACHE_SIZE,
3723 	    P2NPHASE(csize, KMEM_CPU_CACHE_SIZE), 0, NULL, NULL, VM_SLEEP);
3724 	bzero(cp, csize);
3725 	list_link_init(&cp->cache_link);
3726 
3727 	if (align == 0)
3728 		align = KMEM_ALIGN;
3729 
3730 	/*
3731 	 * If we're not at least KMEM_ALIGN aligned, we can't use free
3732 	 * memory to hold bufctl information (because we can't safely
3733 	 * perform word loads and stores on it).
3734 	 */
3735 	if (align < KMEM_ALIGN)
3736 		cflags |= KMC_NOTOUCH;
3737 
3738 	if (!ISP2(align) || align > vmp->vm_quantum)
3739 		panic("kmem_cache_create: bad alignment %lu", align);
3740 
3741 	mutex_enter(&kmem_flags_lock);
3742 	if (kmem_flags & KMF_RANDOMIZE)
3743 		kmem_flags = (((kmem_flags | ~KMF_RANDOM) + 1) & KMF_RANDOM) |
3744 		    KMF_RANDOMIZE;
3745 	cp->cache_flags = (kmem_flags | cflags) & KMF_DEBUG;
3746 	mutex_exit(&kmem_flags_lock);
3747 
3748 	/*
3749 	 * Make sure all the various flags are reasonable.
3750 	 */
3751 	ASSERT(!(cflags & KMC_NOHASH) || !(cflags & KMC_NOTOUCH));
3752 
3753 	if (cp->cache_flags & KMF_LITE) {
3754 		if (bufsize >= kmem_lite_minsize &&
3755 		    align <= kmem_lite_maxalign &&
3756 		    P2PHASE(bufsize, kmem_lite_maxalign) != 0) {
3757 			cp->cache_flags |= KMF_BUFTAG;
3758 			cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL);
3759 		} else {
3760 			cp->cache_flags &= ~KMF_DEBUG;
3761 		}
3762 	}
3763 
3764 	if (cp->cache_flags & KMF_DEADBEEF)
3765 		cp->cache_flags |= KMF_REDZONE;
3766 
3767 	if ((cflags & KMC_QCACHE) && (cp->cache_flags & KMF_AUDIT))
3768 		cp->cache_flags |= KMF_NOMAGAZINE;
3769 
3770 	if (cflags & KMC_NODEBUG)
3771 		cp->cache_flags &= ~KMF_DEBUG;
3772 
3773 	if (cflags & KMC_NOTOUCH)
3774 		cp->cache_flags &= ~KMF_TOUCH;
3775 
3776 	if (cflags & KMC_PREFILL)
3777 		cp->cache_flags |= KMF_PREFILL;
3778 
3779 	if (cflags & KMC_NOHASH)
3780 		cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL);
3781 
3782 	if (cflags & KMC_NOMAGAZINE)
3783 		cp->cache_flags |= KMF_NOMAGAZINE;
3784 
3785 	if ((cp->cache_flags & KMF_AUDIT) && !(cflags & KMC_NOTOUCH))
3786 		cp->cache_flags |= KMF_REDZONE;
3787 
3788 	if (!(cp->cache_flags & KMF_AUDIT))
3789 		cp->cache_flags &= ~KMF_CONTENTS;
3790 
3791 	if ((cp->cache_flags & KMF_BUFTAG) && bufsize >= kmem_minfirewall &&
3792 	    !(cp->cache_flags & KMF_LITE) && !(cflags & KMC_NOHASH))
3793 		cp->cache_flags |= KMF_FIREWALL;
3794 
3795 	if (vmp != kmem_default_arena || kmem_firewall_arena == NULL)
3796 		cp->cache_flags &= ~KMF_FIREWALL;
3797 
3798 	if (cp->cache_flags & KMF_FIREWALL) {
3799 		cp->cache_flags &= ~KMF_BUFTAG;
3800 		cp->cache_flags |= KMF_NOMAGAZINE;
3801 		ASSERT(vmp == kmem_default_arena);
3802 		vmp = kmem_firewall_arena;
3803 	}
3804 
3805 	/*
3806 	 * Set cache properties.
3807 	 */
3808 	(void) strncpy(cp->cache_name, name, KMEM_CACHE_NAMELEN);
3809 	strident_canon(cp->cache_name, KMEM_CACHE_NAMELEN + 1);
3810 	cp->cache_bufsize = bufsize;
3811 	cp->cache_align = align;
3812 	cp->cache_constructor = constructor;
3813 	cp->cache_destructor = destructor;
3814 	cp->cache_reclaim = reclaim;
3815 	cp->cache_private = private;
3816 	cp->cache_arena = vmp;
3817 	cp->cache_cflags = cflags;
3818 
3819 	/*
3820 	 * Determine the chunk size.
3821 	 */
3822 	chunksize = bufsize;
3823 
3824 	if (align >= KMEM_ALIGN) {
3825 		chunksize = P2ROUNDUP(chunksize, KMEM_ALIGN);
3826 		cp->cache_bufctl = chunksize - KMEM_ALIGN;
3827 	}
3828 
3829 	if (cp->cache_flags & KMF_BUFTAG) {
3830 		cp->cache_bufctl = chunksize;
3831 		cp->cache_buftag = chunksize;
3832 		if (cp->cache_flags & KMF_LITE)
3833 			chunksize += KMEM_BUFTAG_LITE_SIZE(kmem_lite_count);
3834 		else
3835 			chunksize += sizeof (kmem_buftag_t);
3836 	}
3837 
3838 	if (cp->cache_flags & KMF_DEADBEEF) {
3839 		cp->cache_verify = MIN(cp->cache_buftag, kmem_maxverify);
3840 		if (cp->cache_flags & KMF_LITE)
3841 			cp->cache_verify = sizeof (uint64_t);
3842 	}
3843 
3844 	cp->cache_contents = MIN(cp->cache_bufctl, kmem_content_maxsave);
3845 
3846 	cp->cache_chunksize = chunksize = P2ROUNDUP(chunksize, align);
3847 
3848 	/*
3849 	 * Now that we know the chunk size, determine the optimal slab size.
3850 	 */
3851 	if (vmp == kmem_firewall_arena) {
3852 		cp->cache_slabsize = P2ROUNDUP(chunksize, vmp->vm_quantum);
3853 		cp->cache_mincolor = cp->cache_slabsize - chunksize;
3854 		cp->cache_maxcolor = cp->cache_mincolor;
3855 		cp->cache_flags |= KMF_HASH;
3856 		ASSERT(!(cp->cache_flags & KMF_BUFTAG));
3857 	} else if ((cflags & KMC_NOHASH) || (!(cflags & KMC_NOTOUCH) &&
3858 	    !(cp->cache_flags & KMF_AUDIT) &&
3859 	    chunksize < vmp->vm_quantum / KMEM_VOID_FRACTION)) {
3860 		cp->cache_slabsize = vmp->vm_quantum;
3861 		cp->cache_mincolor = 0;
3862 		cp->cache_maxcolor =
3863 		    (cp->cache_slabsize - sizeof (kmem_slab_t)) % chunksize;
3864 		ASSERT(chunksize + sizeof (kmem_slab_t) <= cp->cache_slabsize);
3865 		ASSERT(!(cp->cache_flags & KMF_AUDIT));
3866 	} else {
3867 		size_t chunks, bestfit, waste, slabsize;
3868 		size_t minwaste = LONG_MAX;
3869 
3870 		for (chunks = 1; chunks <= KMEM_VOID_FRACTION; chunks++) {
3871 			slabsize = P2ROUNDUP(chunksize * chunks,
3872 			    vmp->vm_quantum);
3873 			chunks = slabsize / chunksize;
3874 			waste = (slabsize % chunksize) / chunks;
3875 			if (waste < minwaste) {
3876 				minwaste = waste;
3877 				bestfit = slabsize;
3878 			}
3879 		}
3880 		if (cflags & KMC_QCACHE)
3881 			bestfit = VMEM_QCACHE_SLABSIZE(vmp->vm_qcache_max);
3882 		cp->cache_slabsize = bestfit;
3883 		cp->cache_mincolor = 0;
3884 		cp->cache_maxcolor = bestfit % chunksize;
3885 		cp->cache_flags |= KMF_HASH;
3886 	}
3887 
3888 	cp->cache_maxchunks = (cp->cache_slabsize / cp->cache_chunksize);
3889 	cp->cache_partial_binshift = highbit(cp->cache_maxchunks / 16) + 1;
3890 
3891 	/*
3892 	 * Disallowing prefill when either the DEBUG or HASH flag is set or when
3893 	 * there is a constructor avoids some tricky issues with debug setup
3894 	 * that may be revisited later. We cannot allow prefill in a
3895 	 * metadata cache because of potential recursion.
3896 	 */
3897 	if (vmp == kmem_msb_arena ||
3898 	    cp->cache_flags & (KMF_HASH | KMF_BUFTAG) ||
3899 	    cp->cache_constructor != NULL)
3900 		cp->cache_flags &= ~KMF_PREFILL;
3901 
3902 	if (cp->cache_flags & KMF_HASH) {
3903 		ASSERT(!(cflags & KMC_NOHASH));
3904 		cp->cache_bufctl_cache = (cp->cache_flags & KMF_AUDIT) ?
3905 		    kmem_bufctl_audit_cache : kmem_bufctl_cache;
3906 	}
3907 
3908 	if (cp->cache_maxcolor >= vmp->vm_quantum)
3909 		cp->cache_maxcolor = vmp->vm_quantum - 1;
3910 
3911 	cp->cache_color = cp->cache_mincolor;
3912 
3913 	/*
3914 	 * Initialize the rest of the slab layer.
3915 	 */
3916 	mutex_init(&cp->cache_lock, NULL, MUTEX_DEFAULT, NULL);
3917 
3918 	avl_create(&cp->cache_partial_slabs, kmem_partial_slab_cmp,
3919 	    sizeof (kmem_slab_t), offsetof(kmem_slab_t, slab_link));
3920 	/* LINTED: E_TRUE_LOGICAL_EXPR */
3921 	ASSERT(sizeof (list_node_t) <= sizeof (avl_node_t));
3922 	/* reuse partial slab AVL linkage for complete slab list linkage */
3923 	list_create(&cp->cache_complete_slabs,
3924 	    sizeof (kmem_slab_t), offsetof(kmem_slab_t, slab_link));
3925 
3926 	if (cp->cache_flags & KMF_HASH) {
3927 		cp->cache_hash_table = vmem_alloc(kmem_hash_arena,
3928 		    KMEM_HASH_INITIAL * sizeof (void *), VM_SLEEP);
3929 		bzero(cp->cache_hash_table,
3930 		    KMEM_HASH_INITIAL * sizeof (void *));
3931 		cp->cache_hash_mask = KMEM_HASH_INITIAL - 1;
3932 		cp->cache_hash_shift = highbit((ulong_t)chunksize) - 1;
3933 	}
3934 
3935 	/*
3936 	 * Initialize the depot.
3937 	 */
3938 	mutex_init(&cp->cache_depot_lock, NULL, MUTEX_DEFAULT, NULL);
3939 
3940 	for (mtp = kmem_magtype; chunksize <= mtp->mt_minbuf; mtp++)
3941 		continue;
3942 
3943 	cp->cache_magtype = mtp;
3944 
3945 	/*
3946 	 * Initialize the CPU layer.
3947 	 */
3948 	for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
3949 		kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
3950 		mutex_init(&ccp->cc_lock, NULL, MUTEX_DEFAULT, NULL);
3951 		ccp->cc_flags = cp->cache_flags;
3952 		ccp->cc_rounds = -1;
3953 		ccp->cc_prounds = -1;
3954 	}
3955 
3956 	/*
3957 	 * Create the cache's kstats.
3958 	 */
3959 	if ((cp->cache_kstat = kstat_create("unix", 0, cp->cache_name,
3960 	    "kmem_cache", KSTAT_TYPE_NAMED,
3961 	    sizeof (kmem_cache_kstat) / sizeof (kstat_named_t),
3962 	    KSTAT_FLAG_VIRTUAL)) != NULL) {
3963 		cp->cache_kstat->ks_data = &kmem_cache_kstat;
3964 		cp->cache_kstat->ks_update = kmem_cache_kstat_update;
3965 		cp->cache_kstat->ks_private = cp;
3966 		cp->cache_kstat->ks_lock = &kmem_cache_kstat_lock;
3967 		kstat_install(cp->cache_kstat);
3968 	}
3969 
3970 	/*
3971 	 * Add the cache to the global list.  This makes it visible
3972 	 * to kmem_update(), so the cache must be ready for business.
3973 	 */
3974 	mutex_enter(&kmem_cache_lock);
3975 	list_insert_tail(&kmem_caches, cp);
3976 	mutex_exit(&kmem_cache_lock);
3977 
3978 	if (kmem_ready)
3979 		kmem_cache_magazine_enable(cp);
3980 
3981 	return (cp);
3982 }
3983 
3984 static int
3985 kmem_move_cmp(const void *buf, const void *p)
3986 {
3987 	const kmem_move_t *kmm = p;
3988 	uintptr_t v1 = (uintptr_t)buf;
3989 	uintptr_t v2 = (uintptr_t)kmm->kmm_from_buf;
3990 	return (v1 < v2 ? -1 : (v1 > v2 ? 1 : 0));
3991 }
3992 
3993 static void
3994 kmem_reset_reclaim_threshold(kmem_defrag_t *kmd)
3995 {
3996 	kmd->kmd_reclaim_numer = 1;
3997 }
3998 
3999 /*
4000  * Initially, when choosing candidate slabs for buffers to move, we want to be
4001  * very selective and take only slabs that are less than
4002  * (1 / KMEM_VOID_FRACTION) allocated. If we have difficulty finding candidate
4003  * slabs, then we raise the allocation ceiling incrementally. The reclaim
4004  * threshold is reset to (1 / KMEM_VOID_FRACTION) as soon as the cache is no
4005  * longer fragmented.
4006  */
4007 static void
4008 kmem_adjust_reclaim_threshold(kmem_defrag_t *kmd, int direction)
4009 {
4010 	if (direction > 0) {
4011 		/* make it easier to find a candidate slab */
4012 		if (kmd->kmd_reclaim_numer < (KMEM_VOID_FRACTION - 1)) {
4013 			kmd->kmd_reclaim_numer++;
4014 		}
4015 	} else {
4016 		/* be more selective */
4017 		if (kmd->kmd_reclaim_numer > 1) {
4018 			kmd->kmd_reclaim_numer--;
4019 		}
4020 	}
4021 }
4022 
4023 void
4024 kmem_cache_set_move(kmem_cache_t *cp,
4025     kmem_cbrc_t (*move)(void *, void *, size_t, void *))
4026 {
4027 	kmem_defrag_t *defrag;
4028 
4029 	ASSERT(move != NULL);
4030 	/*
4031 	 * The consolidator does not support NOTOUCH caches because kmem cannot
4032 	 * initialize their slabs with the 0xbaddcafe memory pattern, which sets
4033 	 * a low order bit usable by clients to distinguish uninitialized memory
4034 	 * from known objects (see kmem_slab_create).
4035 	 */
4036 	ASSERT(!(cp->cache_cflags & KMC_NOTOUCH));
4037 	ASSERT(!(cp->cache_cflags & KMC_IDENTIFIER));
4038 
4039 	/*
4040 	 * We should not be holding anyone's cache lock when calling
4041 	 * kmem_cache_alloc(), so allocate in all cases before acquiring the
4042 	 * lock.
4043 	 */
4044 	defrag = kmem_cache_alloc(kmem_defrag_cache, KM_SLEEP);
4045 
4046 	mutex_enter(&cp->cache_lock);
4047 
4048 	if (KMEM_IS_MOVABLE(cp)) {
4049 		if (cp->cache_move == NULL) {
4050 			ASSERT(cp->cache_slab_alloc == 0);
4051 
4052 			cp->cache_defrag = defrag;
4053 			defrag = NULL; /* nothing to free */
4054 			bzero(cp->cache_defrag, sizeof (kmem_defrag_t));
4055 			avl_create(&cp->cache_defrag->kmd_moves_pending,
4056 			    kmem_move_cmp, sizeof (kmem_move_t),
4057 			    offsetof(kmem_move_t, kmm_entry));
4058 			/* LINTED: E_TRUE_LOGICAL_EXPR */
4059 			ASSERT(sizeof (list_node_t) <= sizeof (avl_node_t));
4060 			/* reuse the slab's AVL linkage for deadlist linkage */
4061 			list_create(&cp->cache_defrag->kmd_deadlist,
4062 			    sizeof (kmem_slab_t),
4063 			    offsetof(kmem_slab_t, slab_link));
4064 			kmem_reset_reclaim_threshold(cp->cache_defrag);
4065 		}
4066 		cp->cache_move = move;
4067 	}
4068 
4069 	mutex_exit(&cp->cache_lock);
4070 
4071 	if (defrag != NULL) {
4072 		kmem_cache_free(kmem_defrag_cache, defrag); /* unused */
4073 	}
4074 }
4075 
4076 void
4077 kmem_cache_destroy(kmem_cache_t *cp)
4078 {
4079 	int cpu_seqid;
4080 
4081 	/*
4082 	 * Remove the cache from the global cache list so that no one else
4083 	 * can schedule tasks on its behalf, wait for any pending tasks to
4084 	 * complete, purge the cache, and then destroy it.
4085 	 */
4086 	mutex_enter(&kmem_cache_lock);
4087 	list_remove(&kmem_caches, cp);
4088 	mutex_exit(&kmem_cache_lock);
4089 
4090 	if (kmem_taskq != NULL)
4091 		taskq_wait(kmem_taskq);
4092 	if (kmem_move_taskq != NULL)
4093 		taskq_wait(kmem_move_taskq);
4094 
4095 	kmem_cache_magazine_purge(cp);
4096 
4097 	mutex_enter(&cp->cache_lock);
4098 	if (cp->cache_buftotal != 0)
4099 		cmn_err(CE_WARN, "kmem_cache_destroy: '%s' (%p) not empty",
4100 		    cp->cache_name, (void *)cp);
4101 	if (cp->cache_defrag != NULL) {
4102 		avl_destroy(&cp->cache_defrag->kmd_moves_pending);
4103 		list_destroy(&cp->cache_defrag->kmd_deadlist);
4104 		kmem_cache_free(kmem_defrag_cache, cp->cache_defrag);
4105 		cp->cache_defrag = NULL;
4106 	}
4107 	/*
4108 	 * The cache is now dead.  There should be no further activity.  We
4109 	 * enforce this by setting land mines in the constructor, destructor,
4110 	 * reclaim, and move routines that induce a kernel text fault if
4111 	 * invoked.
4112 	 */
4113 	cp->cache_constructor = (int (*)(void *, void *, int))1;
4114 	cp->cache_destructor = (void (*)(void *, void *))2;
4115 	cp->cache_reclaim = (void (*)(void *))3;
4116 	cp->cache_move = (kmem_cbrc_t (*)(void *, void *, size_t, void *))4;
4117 	mutex_exit(&cp->cache_lock);
4118 
4119 	kstat_delete(cp->cache_kstat);
4120 
4121 	if (cp->cache_hash_table != NULL)
4122 		vmem_free(kmem_hash_arena, cp->cache_hash_table,
4123 		    (cp->cache_hash_mask + 1) * sizeof (void *));
4124 
4125 	for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++)
4126 		mutex_destroy(&cp->cache_cpu[cpu_seqid].cc_lock);
4127 
4128 	mutex_destroy(&cp->cache_depot_lock);
4129 	mutex_destroy(&cp->cache_lock);
4130 
4131 	vmem_free(kmem_cache_arena, cp, KMEM_CACHE_SIZE(max_ncpus));
4132 }
4133 
4134 /*ARGSUSED*/
4135 static int
4136 kmem_cpu_setup(cpu_setup_t what, int id, void *arg)
4137 {
4138 	ASSERT(MUTEX_HELD(&cpu_lock));
4139 	if (what == CPU_UNCONFIG) {
4140 		kmem_cache_applyall(kmem_cache_magazine_purge,
4141 		    kmem_taskq, TQ_SLEEP);
4142 		kmem_cache_applyall(kmem_cache_magazine_enable,
4143 		    kmem_taskq, TQ_SLEEP);
4144 	}
4145 	return (0);
4146 }
4147 
4148 static void
4149 kmem_alloc_caches_create(const int *array, size_t count,
4150     kmem_cache_t **alloc_table, size_t maxbuf, uint_t shift)
4151 {
4152 	char name[KMEM_CACHE_NAMELEN + 1];
4153 	size_t table_unit = (1 << shift); /* range of one alloc_table entry */
4154 	size_t size = table_unit;
4155 	int i;
4156 
4157 	for (i = 0; i < count; i++) {
4158 		size_t cache_size = array[i];
4159 		size_t align = KMEM_ALIGN;
4160 		kmem_cache_t *cp;
4161 
4162 		/* if the table has an entry for maxbuf, we're done */
4163 		if (size > maxbuf)
4164 			break;
4165 
4166 		/* cache size must be a multiple of the table unit */
4167 		ASSERT(P2PHASE(cache_size, table_unit) == 0);
4168 
4169 		/*
4170 		 * If they allocate a multiple of the coherency granularity,
4171 		 * they get a coherency-granularity-aligned address.
4172 		 */
4173 		if (IS_P2ALIGNED(cache_size, 64))
4174 			align = 64;
4175 		if (IS_P2ALIGNED(cache_size, PAGESIZE))
4176 			align = PAGESIZE;
4177 		(void) snprintf(name, sizeof (name),
4178 		    "kmem_alloc_%lu", cache_size);
4179 		cp = kmem_cache_create(name, cache_size, align,
4180 		    NULL, NULL, NULL, NULL, NULL, KMC_KMEM_ALLOC);
4181 
4182 		while (size <= cache_size) {
4183 			alloc_table[(size - 1) >> shift] = cp;
4184 			size += table_unit;
4185 		}
4186 	}
4187 
4188 	ASSERT(size > maxbuf);		/* i.e. maxbuf <= max(cache_size) */
4189 }
4190 
4191 static void
4192 kmem_cache_init(int pass, int use_large_pages)
4193 {
4194 	int i;
4195 	size_t maxbuf;
4196 	kmem_magtype_t *mtp;
4197 
4198 	for (i = 0; i < sizeof (kmem_magtype) / sizeof (*mtp); i++) {
4199 		char name[KMEM_CACHE_NAMELEN + 1];
4200 
4201 		mtp = &kmem_magtype[i];
4202 		(void) sprintf(name, "kmem_magazine_%d", mtp->mt_magsize);
4203 		mtp->mt_cache = kmem_cache_create(name,
4204 		    (mtp->mt_magsize + 1) * sizeof (void *),
4205 		    mtp->mt_align, NULL, NULL, NULL, NULL,
4206 		    kmem_msb_arena, KMC_NOHASH);
4207 	}
4208 
4209 	kmem_slab_cache = kmem_cache_create("kmem_slab_cache",
4210 	    sizeof (kmem_slab_t), 0, NULL, NULL, NULL, NULL,
4211 	    kmem_msb_arena, KMC_NOHASH);
4212 
4213 	kmem_bufctl_cache = kmem_cache_create("kmem_bufctl_cache",
4214 	    sizeof (kmem_bufctl_t), 0, NULL, NULL, NULL, NULL,
4215 	    kmem_msb_arena, KMC_NOHASH);
4216 
4217 	kmem_bufctl_audit_cache = kmem_cache_create("kmem_bufctl_audit_cache",
4218 	    sizeof (kmem_bufctl_audit_t), 0, NULL, NULL, NULL, NULL,
4219 	    kmem_msb_arena, KMC_NOHASH);
4220 
4221 	if (pass == 2) {
4222 		kmem_va_arena = vmem_create("kmem_va",
4223 		    NULL, 0, PAGESIZE,
4224 		    vmem_alloc, vmem_free, heap_arena,
4225 		    8 * PAGESIZE, VM_SLEEP);
4226 
4227 		if (use_large_pages) {
4228 			kmem_default_arena = vmem_xcreate("kmem_default",
4229 			    NULL, 0, PAGESIZE,
4230 			    segkmem_alloc_lp, segkmem_free_lp, kmem_va_arena,
4231 			    0, VMC_DUMPSAFE | VM_SLEEP);
4232 		} else {
4233 			kmem_default_arena = vmem_create("kmem_default",
4234 			    NULL, 0, PAGESIZE,
4235 			    segkmem_alloc, segkmem_free, kmem_va_arena,
4236 			    0, VMC_DUMPSAFE | VM_SLEEP);
4237 		}
4238 
4239 		/* Figure out what our maximum cache size is */
4240 		maxbuf = kmem_max_cached;
4241 		if (maxbuf <= KMEM_MAXBUF) {
4242 			maxbuf = 0;
4243 			kmem_max_cached = KMEM_MAXBUF;
4244 		} else {
4245 			size_t size = 0;
4246 			size_t max =
4247 			    sizeof (kmem_big_alloc_sizes) / sizeof (int);
4248 			/*
4249 			 * Round maxbuf up to an existing cache size.  If maxbuf
4250 			 * is larger than the largest cache, we truncate it to
4251 			 * the largest cache's size.
4252 			 */
4253 			for (i = 0; i < max; i++) {
4254 				size = kmem_big_alloc_sizes[i];
4255 				if (maxbuf <= size)
4256 					break;
4257 			}
4258 			kmem_max_cached = maxbuf = size;
4259 		}
4260 
4261 		/*
4262 		 * The big alloc table may not be completely overwritten, so
4263 		 * we clear out any stale cache pointers from the first pass.
4264 		 */
4265 		bzero(kmem_big_alloc_table, sizeof (kmem_big_alloc_table));
4266 	} else {
4267 		/*
4268 		 * During the first pass, the kmem_alloc_* caches
4269 		 * are treated as metadata.
4270 		 */
4271 		kmem_default_arena = kmem_msb_arena;
4272 		maxbuf = KMEM_BIG_MAXBUF_32BIT;
4273 	}
4274 
4275 	/*
4276 	 * Set up the default caches to back kmem_alloc()
4277 	 */
4278 	kmem_alloc_caches_create(
4279 	    kmem_alloc_sizes, sizeof (kmem_alloc_sizes) / sizeof (int),
4280 	    kmem_alloc_table, KMEM_MAXBUF, KMEM_ALIGN_SHIFT);
4281 
4282 	kmem_alloc_caches_create(
4283 	    kmem_big_alloc_sizes, sizeof (kmem_big_alloc_sizes) / sizeof (int),
4284 	    kmem_big_alloc_table, maxbuf, KMEM_BIG_SHIFT);
4285 
4286 	kmem_big_alloc_table_max = maxbuf >> KMEM_BIG_SHIFT;
4287 }
4288 
4289 void
4290 kmem_init(void)
4291 {
4292 	kmem_cache_t *cp;
4293 	int old_kmem_flags = kmem_flags;
4294 	int use_large_pages = 0;
4295 	size_t maxverify, minfirewall;
4296 
4297 	kstat_init();
4298 
4299 	/*
4300 	 * Don't do firewalled allocations if the heap is less than 1TB
4301 	 * (i.e. on a 32-bit kernel)
4302 	 * The resulting VM_NEXTFIT allocations would create too much
4303 	 * fragmentation in a small heap.
4304 	 */
4305 #if defined(_LP64)
4306 	maxverify = minfirewall = PAGESIZE / 2;
4307 #else
4308 	maxverify = minfirewall = ULONG_MAX;
4309 #endif
4310 
4311 	/* LINTED */
4312 	ASSERT(sizeof (kmem_cpu_cache_t) == KMEM_CPU_CACHE_SIZE);
4313 
4314 	list_create(&kmem_caches, sizeof (kmem_cache_t),
4315 	    offsetof(kmem_cache_t, cache_link));
4316 
4317 	kmem_metadata_arena = vmem_create("kmem_metadata", NULL, 0, PAGESIZE,
4318 	    vmem_alloc, vmem_free, heap_arena, 8 * PAGESIZE,
4319 	    VM_SLEEP | VMC_NO_QCACHE);
4320 
4321 	kmem_msb_arena = vmem_create("kmem_msb", NULL, 0,
4322 	    PAGESIZE, segkmem_alloc, segkmem_free, kmem_metadata_arena, 0,
4323 	    VMC_DUMPSAFE | VM_SLEEP);
4324 
4325 	kmem_cache_arena = vmem_create("kmem_cache", NULL, 0, KMEM_ALIGN,
4326 	    segkmem_alloc, segkmem_free, kmem_metadata_arena, 0, VM_SLEEP);
4327 
4328 	kmem_hash_arena = vmem_create("kmem_hash", NULL, 0, KMEM_ALIGN,
4329 	    segkmem_alloc, segkmem_free, kmem_metadata_arena, 0, VM_SLEEP);
4330 
4331 	kmem_log_arena = vmem_create("kmem_log", NULL, 0, KMEM_ALIGN,
4332 	    segkmem_alloc, segkmem_free, heap_arena, 0, VM_SLEEP);
4333 
4334 	kmem_firewall_va_arena = vmem_create("kmem_firewall_va",
4335 	    NULL, 0, PAGESIZE,
4336 	    kmem_firewall_va_alloc, kmem_firewall_va_free, heap_arena,
4337 	    0, VM_SLEEP);
4338 
4339 	kmem_firewall_arena = vmem_create("kmem_firewall", NULL, 0, PAGESIZE,
4340 	    segkmem_alloc, segkmem_free, kmem_firewall_va_arena, 0,
4341 	    VMC_DUMPSAFE | VM_SLEEP);
4342 
4343 	/* temporary oversize arena for mod_read_system_file */
4344 	kmem_oversize_arena = vmem_create("kmem_oversize", NULL, 0, PAGESIZE,
4345 	    segkmem_alloc, segkmem_free, heap_arena, 0, VM_SLEEP);
4346 
4347 	kmem_reap_interval = 15 * hz;
4348 
4349 	/*
4350 	 * Read /etc/system.  This is a chicken-and-egg problem because
4351 	 * kmem_flags may be set in /etc/system, but mod_read_system_file()
4352 	 * needs to use the allocator.  The simplest solution is to create
4353 	 * all the standard kmem caches, read /etc/system, destroy all the
4354 	 * caches we just created, and then create them all again in light
4355 	 * of the (possibly) new kmem_flags and other kmem tunables.
4356 	 */
4357 	kmem_cache_init(1, 0);
4358 
4359 	mod_read_system_file(boothowto & RB_ASKNAME);
4360 
4361 	while ((cp = list_tail(&kmem_caches)) != NULL)
4362 		kmem_cache_destroy(cp);
4363 
4364 	vmem_destroy(kmem_oversize_arena);
4365 
4366 	if (old_kmem_flags & KMF_STICKY)
4367 		kmem_flags = old_kmem_flags;
4368 
4369 	if (!(kmem_flags & KMF_AUDIT))
4370 		vmem_seg_size = offsetof(vmem_seg_t, vs_thread);
4371 
4372 	if (kmem_maxverify == 0)
4373 		kmem_maxverify = maxverify;
4374 
4375 	if (kmem_minfirewall == 0)
4376 		kmem_minfirewall = minfirewall;
4377 
4378 	/*
4379 	 * give segkmem a chance to figure out if we are using large pages
4380 	 * for the kernel heap
4381 	 */
4382 	use_large_pages = segkmem_lpsetup();
4383 
4384 	/*
4385 	 * To protect against corruption, we keep the actual number of callers
4386 	 * KMF_LITE records seperate from the tunable.  We arbitrarily clamp
4387 	 * to 16, since the overhead for small buffers quickly gets out of
4388 	 * hand.
4389 	 *
4390 	 * The real limit would depend on the needs of the largest KMC_NOHASH
4391 	 * cache.
4392 	 */
4393 	kmem_lite_count = MIN(MAX(0, kmem_lite_pcs), 16);
4394 	kmem_lite_pcs = kmem_lite_count;
4395 
4396 	/*
4397 	 * Normally, we firewall oversized allocations when possible, but
4398 	 * if we are using large pages for kernel memory, and we don't have
4399 	 * any non-LITE debugging flags set, we want to allocate oversized
4400 	 * buffers from large pages, and so skip the firewalling.
4401 	 */
4402 	if (use_large_pages &&
4403 	    ((kmem_flags & KMF_LITE) || !(kmem_flags & KMF_DEBUG))) {
4404 		kmem_oversize_arena = vmem_xcreate("kmem_oversize", NULL, 0,
4405 		    PAGESIZE, segkmem_alloc_lp, segkmem_free_lp, heap_arena,
4406 		    0, VMC_DUMPSAFE | VM_SLEEP);
4407 	} else {
4408 		kmem_oversize_arena = vmem_create("kmem_oversize",
4409 		    NULL, 0, PAGESIZE,
4410 		    segkmem_alloc, segkmem_free, kmem_minfirewall < ULONG_MAX?
4411 		    kmem_firewall_va_arena : heap_arena, 0, VMC_DUMPSAFE |
4412 		    VM_SLEEP);
4413 	}
4414 
4415 	kmem_cache_init(2, use_large_pages);
4416 
4417 	if (kmem_flags & (KMF_AUDIT | KMF_RANDOMIZE)) {
4418 		if (kmem_transaction_log_size == 0)
4419 			kmem_transaction_log_size = kmem_maxavail() / 50;
4420 		kmem_transaction_log = kmem_log_init(kmem_transaction_log_size);
4421 	}
4422 
4423 	if (kmem_flags & (KMF_CONTENTS | KMF_RANDOMIZE)) {
4424 		if (kmem_content_log_size == 0)
4425 			kmem_content_log_size = kmem_maxavail() / 50;
4426 		kmem_content_log = kmem_log_init(kmem_content_log_size);
4427 	}
4428 
4429 	kmem_failure_log = kmem_log_init(kmem_failure_log_size);
4430 
4431 	kmem_slab_log = kmem_log_init(kmem_slab_log_size);
4432 
4433 	/*
4434 	 * Initialize STREAMS message caches so allocb() is available.
4435 	 * This allows us to initialize the logging framework (cmn_err(9F),
4436 	 * strlog(9F), etc) so we can start recording messages.
4437 	 */
4438 	streams_msg_init();
4439 
4440 	/*
4441 	 * Initialize the ZSD framework in Zones so modules loaded henceforth
4442 	 * can register their callbacks.
4443 	 */
4444 	zone_zsd_init();
4445 
4446 	log_init();
4447 	taskq_init();
4448 
4449 	/*
4450 	 * Warn about invalid or dangerous values of kmem_flags.
4451 	 * Always warn about unsupported values.
4452 	 */
4453 	if (((kmem_flags & ~(KMF_AUDIT | KMF_DEADBEEF | KMF_REDZONE |
4454 	    KMF_CONTENTS | KMF_LITE)) != 0) ||
4455 	    ((kmem_flags & KMF_LITE) && kmem_flags != KMF_LITE))
4456 		cmn_err(CE_WARN, "kmem_flags set to unsupported value 0x%x. "
4457 		    "See the Solaris Tunable Parameters Reference Manual.",
4458 		    kmem_flags);
4459 
4460 #ifdef DEBUG
4461 	if ((kmem_flags & KMF_DEBUG) == 0)
4462 		cmn_err(CE_NOTE, "kmem debugging disabled.");
4463 #else
4464 	/*
4465 	 * For non-debug kernels, the only "normal" flags are 0, KMF_LITE,
4466 	 * KMF_REDZONE, and KMF_CONTENTS (the last because it is only enabled
4467 	 * if KMF_AUDIT is set). We should warn the user about the performance
4468 	 * penalty of KMF_AUDIT or KMF_DEADBEEF if they are set and KMF_LITE
4469 	 * isn't set (since that disables AUDIT).
4470 	 */
4471 	if (!(kmem_flags & KMF_LITE) &&
4472 	    (kmem_flags & (KMF_AUDIT | KMF_DEADBEEF)) != 0)
4473 		cmn_err(CE_WARN, "High-overhead kmem debugging features "
4474 		    "enabled (kmem_flags = 0x%x).  Performance degradation "
4475 		    "and large memory overhead possible. See the Solaris "
4476 		    "Tunable Parameters Reference Manual.", kmem_flags);
4477 #endif /* not DEBUG */
4478 
4479 	kmem_cache_applyall(kmem_cache_magazine_enable, NULL, TQ_SLEEP);
4480 
4481 	kmem_ready = 1;
4482 
4483 	/*
4484 	 * Initialize the platform-specific aligned/DMA memory allocator.
4485 	 */
4486 	ka_init();
4487 
4488 	/*
4489 	 * Initialize 32-bit ID cache.
4490 	 */
4491 	id32_init();
4492 
4493 	/*
4494 	 * Initialize the networking stack so modules loaded can
4495 	 * register their callbacks.
4496 	 */
4497 	netstack_init();
4498 }
4499 
4500 static void
4501 kmem_move_init(void)
4502 {
4503 	kmem_defrag_cache = kmem_cache_create("kmem_defrag_cache",
4504 	    sizeof (kmem_defrag_t), 0, NULL, NULL, NULL, NULL,
4505 	    kmem_msb_arena, KMC_NOHASH);
4506 	kmem_move_cache = kmem_cache_create("kmem_move_cache",
4507 	    sizeof (kmem_move_t), 0, NULL, NULL, NULL, NULL,
4508 	    kmem_msb_arena, KMC_NOHASH);
4509 
4510 	/*
4511 	 * kmem guarantees that move callbacks are sequential and that even
4512 	 * across multiple caches no two moves ever execute simultaneously.
4513 	 * Move callbacks are processed on a separate taskq so that client code
4514 	 * does not interfere with internal maintenance tasks.
4515 	 */
4516 	kmem_move_taskq = taskq_create_instance("kmem_move_taskq", 0, 1,
4517 	    minclsyspri, 100, INT_MAX, TASKQ_PREPOPULATE);
4518 }
4519 
4520 void
4521 kmem_thread_init(void)
4522 {
4523 	kmem_move_init();
4524 	kmem_taskq = taskq_create_instance("kmem_taskq", 0, 1, minclsyspri,
4525 	    300, INT_MAX, TASKQ_PREPOPULATE);
4526 }
4527 
4528 void
4529 kmem_mp_init(void)
4530 {
4531 	mutex_enter(&cpu_lock);
4532 	register_cpu_setup_func(kmem_cpu_setup, NULL);
4533 	mutex_exit(&cpu_lock);
4534 
4535 	kmem_update_timeout(NULL);
4536 
4537 	taskq_mp_init();
4538 }
4539 
4540 /*
4541  * Return the slab of the allocated buffer, or NULL if the buffer is not
4542  * allocated. This function may be called with a known slab address to determine
4543  * whether or not the buffer is allocated, or with a NULL slab address to obtain
4544  * an allocated buffer's slab.
4545  */
4546 static kmem_slab_t *
4547 kmem_slab_allocated(kmem_cache_t *cp, kmem_slab_t *sp, void *buf)
4548 {
4549 	kmem_bufctl_t *bcp, *bufbcp;
4550 
4551 	ASSERT(MUTEX_HELD(&cp->cache_lock));
4552 	ASSERT(sp == NULL || KMEM_SLAB_MEMBER(sp, buf));
4553 
4554 	if (cp->cache_flags & KMF_HASH) {
4555 		for (bcp = *KMEM_HASH(cp, buf);
4556 		    (bcp != NULL) && (bcp->bc_addr != buf);
4557 		    bcp = bcp->bc_next) {
4558 			continue;
4559 		}
4560 		ASSERT(sp != NULL && bcp != NULL ? sp == bcp->bc_slab : 1);
4561 		return (bcp == NULL ? NULL : bcp->bc_slab);
4562 	}
4563 
4564 	if (sp == NULL) {
4565 		sp = KMEM_SLAB(cp, buf);
4566 	}
4567 	bufbcp = KMEM_BUFCTL(cp, buf);
4568 	for (bcp = sp->slab_head;
4569 	    (bcp != NULL) && (bcp != bufbcp);
4570 	    bcp = bcp->bc_next) {
4571 		continue;
4572 	}
4573 	return (bcp == NULL ? sp : NULL);
4574 }
4575 
4576 static boolean_t
4577 kmem_slab_is_reclaimable(kmem_cache_t *cp, kmem_slab_t *sp, int flags)
4578 {
4579 	long refcnt = sp->slab_refcnt;
4580 
4581 	ASSERT(cp->cache_defrag != NULL);
4582 
4583 	/*
4584 	 * For code coverage we want to be able to move an object within the
4585 	 * same slab (the only partial slab) even if allocating the destination
4586 	 * buffer resulted in a completely allocated slab.
4587 	 */
4588 	if (flags & KMM_DEBUG) {
4589 		return ((flags & KMM_DESPERATE) ||
4590 		    ((sp->slab_flags & KMEM_SLAB_NOMOVE) == 0));
4591 	}
4592 
4593 	/* If we're desperate, we don't care if the client said NO. */
4594 	if (flags & KMM_DESPERATE) {
4595 		return (refcnt < sp->slab_chunks); /* any partial */
4596 	}
4597 
4598 	if (sp->slab_flags & KMEM_SLAB_NOMOVE) {
4599 		return (B_FALSE);
4600 	}
4601 
4602 	if ((refcnt == 1) || kmem_move_any_partial) {
4603 		return (refcnt < sp->slab_chunks);
4604 	}
4605 
4606 	/*
4607 	 * The reclaim threshold is adjusted at each kmem_cache_scan() so that
4608 	 * slabs with a progressively higher percentage of used buffers can be
4609 	 * reclaimed until the cache as a whole is no longer fragmented.
4610 	 *
4611 	 *	sp->slab_refcnt   kmd_reclaim_numer
4612 	 *	--------------- < ------------------
4613 	 *	sp->slab_chunks   KMEM_VOID_FRACTION
4614 	 */
4615 	return ((refcnt * KMEM_VOID_FRACTION) <
4616 	    (sp->slab_chunks * cp->cache_defrag->kmd_reclaim_numer));
4617 }
4618 
4619 static void *
4620 kmem_hunt_mag(kmem_cache_t *cp, kmem_magazine_t *m, int n, void *buf,
4621     void *tbuf)
4622 {
4623 	int i;		/* magazine round index */
4624 
4625 	for (i = 0; i < n; i++) {
4626 		if (buf == m->mag_round[i]) {
4627 			if (cp->cache_flags & KMF_BUFTAG) {
4628 				(void) kmem_cache_free_debug(cp, tbuf,
4629 				    caller());
4630 			}
4631 			m->mag_round[i] = tbuf;
4632 			return (buf);
4633 		}
4634 	}
4635 
4636 	return (NULL);
4637 }
4638 
4639 /*
4640  * Hunt the magazine layer for the given buffer. If found, the buffer is
4641  * removed from the magazine layer and returned, otherwise NULL is returned.
4642  * The state of the returned buffer is freed and constructed.
4643  */
4644 static void *
4645 kmem_hunt_mags(kmem_cache_t *cp, void *buf)
4646 {
4647 	kmem_cpu_cache_t *ccp;
4648 	kmem_magazine_t	*m;
4649 	int cpu_seqid;
4650 	int n;		/* magazine rounds */
4651 	void *tbuf;	/* temporary swap buffer */
4652 
4653 	ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4654 
4655 	/*
4656 	 * Allocated a buffer to swap with the one we hope to pull out of a
4657 	 * magazine when found.
4658 	 */
4659 	tbuf = kmem_cache_alloc(cp, KM_NOSLEEP);
4660 	if (tbuf == NULL) {
4661 		KMEM_STAT_ADD(kmem_move_stats.kms_hunt_alloc_fail);
4662 		return (NULL);
4663 	}
4664 	if (tbuf == buf) {
4665 		KMEM_STAT_ADD(kmem_move_stats.kms_hunt_lucky);
4666 		if (cp->cache_flags & KMF_BUFTAG) {
4667 			(void) kmem_cache_free_debug(cp, buf, caller());
4668 		}
4669 		return (buf);
4670 	}
4671 
4672 	/* Hunt the depot. */
4673 	mutex_enter(&cp->cache_depot_lock);
4674 	n = cp->cache_magtype->mt_magsize;
4675 	for (m = cp->cache_full.ml_list; m != NULL; m = m->mag_next) {
4676 		if (kmem_hunt_mag(cp, m, n, buf, tbuf) != NULL) {
4677 			mutex_exit(&cp->cache_depot_lock);
4678 			return (buf);
4679 		}
4680 	}
4681 	mutex_exit(&cp->cache_depot_lock);
4682 
4683 	/* Hunt the per-CPU magazines. */
4684 	for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
4685 		ccp = &cp->cache_cpu[cpu_seqid];
4686 
4687 		mutex_enter(&ccp->cc_lock);
4688 		m = ccp->cc_loaded;
4689 		n = ccp->cc_rounds;
4690 		if (kmem_hunt_mag(cp, m, n, buf, tbuf) != NULL) {
4691 			mutex_exit(&ccp->cc_lock);
4692 			return (buf);
4693 		}
4694 		m = ccp->cc_ploaded;
4695 		n = ccp->cc_prounds;
4696 		if (kmem_hunt_mag(cp, m, n, buf, tbuf) != NULL) {
4697 			mutex_exit(&ccp->cc_lock);
4698 			return (buf);
4699 		}
4700 		mutex_exit(&ccp->cc_lock);
4701 	}
4702 
4703 	kmem_cache_free(cp, tbuf);
4704 	return (NULL);
4705 }
4706 
4707 /*
4708  * May be called from the kmem_move_taskq, from kmem_cache_move_notify_task(),
4709  * or when the buffer is freed.
4710  */
4711 static void
4712 kmem_slab_move_yes(kmem_cache_t *cp, kmem_slab_t *sp, void *from_buf)
4713 {
4714 	ASSERT(MUTEX_HELD(&cp->cache_lock));
4715 	ASSERT(KMEM_SLAB_MEMBER(sp, from_buf));
4716 
4717 	if (!KMEM_SLAB_IS_PARTIAL(sp)) {
4718 		return;
4719 	}
4720 
4721 	if (sp->slab_flags & KMEM_SLAB_NOMOVE) {
4722 		if (KMEM_SLAB_OFFSET(sp, from_buf) == sp->slab_stuck_offset) {
4723 			avl_remove(&cp->cache_partial_slabs, sp);
4724 			sp->slab_flags &= ~KMEM_SLAB_NOMOVE;
4725 			sp->slab_stuck_offset = (uint32_t)-1;
4726 			avl_add(&cp->cache_partial_slabs, sp);
4727 		}
4728 	} else {
4729 		sp->slab_later_count = 0;
4730 		sp->slab_stuck_offset = (uint32_t)-1;
4731 	}
4732 }
4733 
4734 static void
4735 kmem_slab_move_no(kmem_cache_t *cp, kmem_slab_t *sp, void *from_buf)
4736 {
4737 	ASSERT(taskq_member(kmem_move_taskq, curthread));
4738 	ASSERT(MUTEX_HELD(&cp->cache_lock));
4739 	ASSERT(KMEM_SLAB_MEMBER(sp, from_buf));
4740 
4741 	if (!KMEM_SLAB_IS_PARTIAL(sp)) {
4742 		return;
4743 	}
4744 
4745 	avl_remove(&cp->cache_partial_slabs, sp);
4746 	sp->slab_later_count = 0;
4747 	sp->slab_flags |= KMEM_SLAB_NOMOVE;
4748 	sp->slab_stuck_offset = KMEM_SLAB_OFFSET(sp, from_buf);
4749 	avl_add(&cp->cache_partial_slabs, sp);
4750 }
4751 
4752 static void kmem_move_end(kmem_cache_t *, kmem_move_t *);
4753 
4754 /*
4755  * The move callback takes two buffer addresses, the buffer to be moved, and a
4756  * newly allocated and constructed buffer selected by kmem as the destination.
4757  * It also takes the size of the buffer and an optional user argument specified
4758  * at cache creation time. kmem guarantees that the buffer to be moved has not
4759  * been unmapped by the virtual memory subsystem. Beyond that, it cannot
4760  * guarantee the present whereabouts of the buffer to be moved, so it is up to
4761  * the client to safely determine whether or not it is still using the buffer.
4762  * The client must not free either of the buffers passed to the move callback,
4763  * since kmem wants to free them directly to the slab layer. The client response
4764  * tells kmem which of the two buffers to free:
4765  *
4766  * YES		kmem frees the old buffer (the move was successful)
4767  * NO		kmem frees the new buffer, marks the slab of the old buffer
4768  *              non-reclaimable to avoid bothering the client again
4769  * LATER	kmem frees the new buffer, increments slab_later_count
4770  * DONT_KNOW	kmem frees the new buffer, searches mags for the old buffer
4771  * DONT_NEED	kmem frees both the old buffer and the new buffer
4772  *
4773  * The pending callback argument now being processed contains both of the
4774  * buffers (old and new) passed to the move callback function, the slab of the
4775  * old buffer, and flags related to the move request, such as whether or not the
4776  * system was desperate for memory.
4777  *
4778  * Slabs are not freed while there is a pending callback, but instead are kept
4779  * on a deadlist, which is drained after the last callback completes. This means
4780  * that slabs are safe to access until kmem_move_end(), no matter how many of
4781  * their buffers have been freed. Once slab_refcnt reaches zero, it stays at
4782  * zero for as long as the slab remains on the deadlist and until the slab is
4783  * freed.
4784  */
4785 static void
4786 kmem_move_buffer(kmem_move_t *callback)
4787 {
4788 	kmem_cbrc_t response;
4789 	kmem_slab_t *sp = callback->kmm_from_slab;
4790 	kmem_cache_t *cp = sp->slab_cache;
4791 	boolean_t free_on_slab;
4792 
4793 	ASSERT(taskq_member(kmem_move_taskq, curthread));
4794 	ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4795 	ASSERT(KMEM_SLAB_MEMBER(sp, callback->kmm_from_buf));
4796 
4797 	/*
4798 	 * The number of allocated buffers on the slab may have changed since we
4799 	 * last checked the slab's reclaimability (when the pending move was
4800 	 * enqueued), or the client may have responded NO when asked to move
4801 	 * another buffer on the same slab.
4802 	 */
4803 	if (!kmem_slab_is_reclaimable(cp, sp, callback->kmm_flags)) {
4804 		KMEM_STAT_ADD(kmem_move_stats.kms_no_longer_reclaimable);
4805 		KMEM_STAT_COND_ADD((callback->kmm_flags & KMM_NOTIFY),
4806 		    kmem_move_stats.kms_notify_no_longer_reclaimable);
4807 		kmem_slab_free(cp, callback->kmm_to_buf);
4808 		kmem_move_end(cp, callback);
4809 		return;
4810 	}
4811 
4812 	/*
4813 	 * Hunting magazines is expensive, so we'll wait to do that until the
4814 	 * client responds KMEM_CBRC_DONT_KNOW. However, checking the slab layer
4815 	 * is cheap, so we might as well do that here in case we can avoid
4816 	 * bothering the client.
4817 	 */
4818 	mutex_enter(&cp->cache_lock);
4819 	free_on_slab = (kmem_slab_allocated(cp, sp,
4820 	    callback->kmm_from_buf) == NULL);
4821 	mutex_exit(&cp->cache_lock);
4822 
4823 	if (free_on_slab) {
4824 		KMEM_STAT_ADD(kmem_move_stats.kms_hunt_found_slab);
4825 		kmem_slab_free(cp, callback->kmm_to_buf);
4826 		kmem_move_end(cp, callback);
4827 		return;
4828 	}
4829 
4830 	if (cp->cache_flags & KMF_BUFTAG) {
4831 		/*
4832 		 * Make kmem_cache_alloc_debug() apply the constructor for us.
4833 		 */
4834 		if (kmem_cache_alloc_debug(cp, callback->kmm_to_buf,
4835 		    KM_NOSLEEP, 1, caller()) != 0) {
4836 			KMEM_STAT_ADD(kmem_move_stats.kms_alloc_fail);
4837 			kmem_move_end(cp, callback);
4838 			return;
4839 		}
4840 	} else if (cp->cache_constructor != NULL &&
4841 	    cp->cache_constructor(callback->kmm_to_buf, cp->cache_private,
4842 	    KM_NOSLEEP) != 0) {
4843 		atomic_inc_64(&cp->cache_alloc_fail);
4844 		KMEM_STAT_ADD(kmem_move_stats.kms_constructor_fail);
4845 		kmem_slab_free(cp, callback->kmm_to_buf);
4846 		kmem_move_end(cp, callback);
4847 		return;
4848 	}
4849 
4850 	KMEM_STAT_ADD(kmem_move_stats.kms_callbacks);
4851 	KMEM_STAT_COND_ADD((callback->kmm_flags & KMM_NOTIFY),
4852 	    kmem_move_stats.kms_notify_callbacks);
4853 	cp->cache_defrag->kmd_callbacks++;
4854 	cp->cache_defrag->kmd_thread = curthread;
4855 	cp->cache_defrag->kmd_from_buf = callback->kmm_from_buf;
4856 	cp->cache_defrag->kmd_to_buf = callback->kmm_to_buf;
4857 	DTRACE_PROBE2(kmem__move__start, kmem_cache_t *, cp, kmem_move_t *,
4858 	    callback);
4859 
4860 	response = cp->cache_move(callback->kmm_from_buf,
4861 	    callback->kmm_to_buf, cp->cache_bufsize, cp->cache_private);
4862 
4863 	DTRACE_PROBE3(kmem__move__end, kmem_cache_t *, cp, kmem_move_t *,
4864 	    callback, kmem_cbrc_t, response);
4865 	cp->cache_defrag->kmd_thread = NULL;
4866 	cp->cache_defrag->kmd_from_buf = NULL;
4867 	cp->cache_defrag->kmd_to_buf = NULL;
4868 
4869 	if (response == KMEM_CBRC_YES) {
4870 		KMEM_STAT_ADD(kmem_move_stats.kms_yes);
4871 		cp->cache_defrag->kmd_yes++;
4872 		kmem_slab_free_constructed(cp, callback->kmm_from_buf, B_FALSE);
4873 		/* slab safe to access until kmem_move_end() */
4874 		if (sp->slab_refcnt == 0)
4875 			cp->cache_defrag->kmd_slabs_freed++;
4876 		mutex_enter(&cp->cache_lock);
4877 		kmem_slab_move_yes(cp, sp, callback->kmm_from_buf);
4878 		mutex_exit(&cp->cache_lock);
4879 		kmem_move_end(cp, callback);
4880 		return;
4881 	}
4882 
4883 	switch (response) {
4884 	case KMEM_CBRC_NO:
4885 		KMEM_STAT_ADD(kmem_move_stats.kms_no);
4886 		cp->cache_defrag->kmd_no++;
4887 		mutex_enter(&cp->cache_lock);
4888 		kmem_slab_move_no(cp, sp, callback->kmm_from_buf);
4889 		mutex_exit(&cp->cache_lock);
4890 		break;
4891 	case KMEM_CBRC_LATER:
4892 		KMEM_STAT_ADD(kmem_move_stats.kms_later);
4893 		cp->cache_defrag->kmd_later++;
4894 		mutex_enter(&cp->cache_lock);
4895 		if (!KMEM_SLAB_IS_PARTIAL(sp)) {
4896 			mutex_exit(&cp->cache_lock);
4897 			break;
4898 		}
4899 
4900 		if (++sp->slab_later_count >= KMEM_DISBELIEF) {
4901 			KMEM_STAT_ADD(kmem_move_stats.kms_disbelief);
4902 			kmem_slab_move_no(cp, sp, callback->kmm_from_buf);
4903 		} else if (!(sp->slab_flags & KMEM_SLAB_NOMOVE)) {
4904 			sp->slab_stuck_offset = KMEM_SLAB_OFFSET(sp,
4905 			    callback->kmm_from_buf);
4906 		}
4907 		mutex_exit(&cp->cache_lock);
4908 		break;
4909 	case KMEM_CBRC_DONT_NEED:
4910 		KMEM_STAT_ADD(kmem_move_stats.kms_dont_need);
4911 		cp->cache_defrag->kmd_dont_need++;
4912 		kmem_slab_free_constructed(cp, callback->kmm_from_buf, B_FALSE);
4913 		if (sp->slab_refcnt == 0)
4914 			cp->cache_defrag->kmd_slabs_freed++;
4915 		mutex_enter(&cp->cache_lock);
4916 		kmem_slab_move_yes(cp, sp, callback->kmm_from_buf);
4917 		mutex_exit(&cp->cache_lock);
4918 		break;
4919 	case KMEM_CBRC_DONT_KNOW:
4920 		KMEM_STAT_ADD(kmem_move_stats.kms_dont_know);
4921 		cp->cache_defrag->kmd_dont_know++;
4922 		if (kmem_hunt_mags(cp, callback->kmm_from_buf) != NULL) {
4923 			KMEM_STAT_ADD(kmem_move_stats.kms_hunt_found_mag);
4924 			cp->cache_defrag->kmd_hunt_found++;
4925 			kmem_slab_free_constructed(cp, callback->kmm_from_buf,
4926 			    B_TRUE);
4927 			if (sp->slab_refcnt == 0)
4928 				cp->cache_defrag->kmd_slabs_freed++;
4929 			mutex_enter(&cp->cache_lock);
4930 			kmem_slab_move_yes(cp, sp, callback->kmm_from_buf);
4931 			mutex_exit(&cp->cache_lock);
4932 		}
4933 		break;
4934 	default:
4935 		panic("'%s' (%p) unexpected move callback response %d\n",
4936 		    cp->cache_name, (void *)cp, response);
4937 	}
4938 
4939 	kmem_slab_free_constructed(cp, callback->kmm_to_buf, B_FALSE);
4940 	kmem_move_end(cp, callback);
4941 }
4942 
4943 /* Return B_FALSE if there is insufficient memory for the move request. */
4944 static boolean_t
4945 kmem_move_begin(kmem_cache_t *cp, kmem_slab_t *sp, void *buf, int flags)
4946 {
4947 	void *to_buf;
4948 	avl_index_t index;
4949 	kmem_move_t *callback, *pending;
4950 	ulong_t n;
4951 
4952 	ASSERT(taskq_member(kmem_taskq, curthread));
4953 	ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4954 	ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING);
4955 
4956 	callback = kmem_cache_alloc(kmem_move_cache, KM_NOSLEEP);
4957 	if (callback == NULL) {
4958 		KMEM_STAT_ADD(kmem_move_stats.kms_callback_alloc_fail);
4959 		return (B_FALSE);
4960 	}
4961 
4962 	callback->kmm_from_slab = sp;
4963 	callback->kmm_from_buf = buf;
4964 	callback->kmm_flags = flags;
4965 
4966 	mutex_enter(&cp->cache_lock);
4967 
4968 	n = avl_numnodes(&cp->cache_partial_slabs);
4969 	if ((n == 0) || ((n == 1) && !(flags & KMM_DEBUG))) {
4970 		mutex_exit(&cp->cache_lock);
4971 		kmem_cache_free(kmem_move_cache, callback);
4972 		return (B_TRUE); /* there is no need for the move request */
4973 	}
4974 
4975 	pending = avl_find(&cp->cache_defrag->kmd_moves_pending, buf, &index);
4976 	if (pending != NULL) {
4977 		/*
4978 		 * If the move is already pending and we're desperate now,
4979 		 * update the move flags.
4980 		 */
4981 		if (flags & KMM_DESPERATE) {
4982 			pending->kmm_flags |= KMM_DESPERATE;
4983 		}
4984 		mutex_exit(&cp->cache_lock);
4985 		KMEM_STAT_ADD(kmem_move_stats.kms_already_pending);
4986 		kmem_cache_free(kmem_move_cache, callback);
4987 		return (B_TRUE);
4988 	}
4989 
4990 	to_buf = kmem_slab_alloc_impl(cp, avl_first(&cp->cache_partial_slabs),
4991 	    B_FALSE);
4992 	callback->kmm_to_buf = to_buf;
4993 	avl_insert(&cp->cache_defrag->kmd_moves_pending, callback, index);
4994 
4995 	mutex_exit(&cp->cache_lock);
4996 
4997 	if (!taskq_dispatch(kmem_move_taskq, (task_func_t *)kmem_move_buffer,
4998 	    callback, TQ_NOSLEEP)) {
4999 		KMEM_STAT_ADD(kmem_move_stats.kms_callback_taskq_fail);
5000 		mutex_enter(&cp->cache_lock);
5001 		avl_remove(&cp->cache_defrag->kmd_moves_pending, callback);
5002 		mutex_exit(&cp->cache_lock);
5003 		kmem_slab_free(cp, to_buf);
5004 		kmem_cache_free(kmem_move_cache, callback);
5005 		return (B_FALSE);
5006 	}
5007 
5008 	return (B_TRUE);
5009 }
5010 
5011 static void
5012 kmem_move_end(kmem_cache_t *cp, kmem_move_t *callback)
5013 {
5014 	avl_index_t index;
5015 
5016 	ASSERT(cp->cache_defrag != NULL);
5017 	ASSERT(taskq_member(kmem_move_taskq, curthread));
5018 	ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
5019 
5020 	mutex_enter(&cp->cache_lock);
5021 	VERIFY(avl_find(&cp->cache_defrag->kmd_moves_pending,
5022 	    callback->kmm_from_buf, &index) != NULL);
5023 	avl_remove(&cp->cache_defrag->kmd_moves_pending, callback);
5024 	if (avl_is_empty(&cp->cache_defrag->kmd_moves_pending)) {
5025 		list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
5026 		kmem_slab_t *sp;
5027 
5028 		/*
5029 		 * The last pending move completed. Release all slabs from the
5030 		 * front of the dead list except for any slab at the tail that
5031 		 * needs to be released from the context of kmem_move_buffers().
5032 		 * kmem deferred unmapping the buffers on these slabs in order
5033 		 * to guarantee that buffers passed to the move callback have
5034 		 * been touched only by kmem or by the client itself.
5035 		 */
5036 		while ((sp = list_remove_head(deadlist)) != NULL) {
5037 			if (sp->slab_flags & KMEM_SLAB_MOVE_PENDING) {
5038 				list_insert_tail(deadlist, sp);
5039 				break;
5040 			}
5041 			cp->cache_defrag->kmd_deadcount--;
5042 			cp->cache_slab_destroy++;
5043 			mutex_exit(&cp->cache_lock);
5044 			kmem_slab_destroy(cp, sp);
5045 			KMEM_STAT_ADD(kmem_move_stats.kms_dead_slabs_freed);
5046 			mutex_enter(&cp->cache_lock);
5047 		}
5048 	}
5049 	mutex_exit(&cp->cache_lock);
5050 	kmem_cache_free(kmem_move_cache, callback);
5051 }
5052 
5053 /*
5054  * Move buffers from least used slabs first by scanning backwards from the end
5055  * of the partial slab list. Scan at most max_scan candidate slabs and move
5056  * buffers from at most max_slabs slabs (0 for all partial slabs in both cases).
5057  * If desperate to reclaim memory, move buffers from any partial slab, otherwise
5058  * skip slabs with a ratio of allocated buffers at or above the current
5059  * threshold. Return the number of unskipped slabs (at most max_slabs, -1 if the
5060  * scan is aborted) so that the caller can adjust the reclaimability threshold
5061  * depending on how many reclaimable slabs it finds.
5062  *
5063  * kmem_move_buffers() drops and reacquires cache_lock every time it issues a
5064  * move request, since it is not valid for kmem_move_begin() to call
5065  * kmem_cache_alloc() or taskq_dispatch() with cache_lock held.
5066  */
5067 static int
5068 kmem_move_buffers(kmem_cache_t *cp, size_t max_scan, size_t max_slabs,
5069     int flags)
5070 {
5071 	kmem_slab_t *sp;
5072 	void *buf;
5073 	int i, j; /* slab index, buffer index */
5074 	int s; /* reclaimable slabs */
5075 	int b; /* allocated (movable) buffers on reclaimable slab */
5076 	boolean_t success;
5077 	int refcnt;
5078 	int nomove;
5079 
5080 	ASSERT(taskq_member(kmem_taskq, curthread));
5081 	ASSERT(MUTEX_HELD(&cp->cache_lock));
5082 	ASSERT(kmem_move_cache != NULL);
5083 	ASSERT(cp->cache_move != NULL && cp->cache_defrag != NULL);
5084 	ASSERT((flags & KMM_DEBUG) ? !avl_is_empty(&cp->cache_partial_slabs) :
5085 	    avl_numnodes(&cp->cache_partial_slabs) > 1);
5086 
5087 	if (kmem_move_blocked) {
5088 		return (0);
5089 	}
5090 
5091 	if (kmem_move_fulltilt) {
5092 		flags |= KMM_DESPERATE;
5093 	}
5094 
5095 	if (max_scan == 0 || (flags & KMM_DESPERATE)) {
5096 		/*
5097 		 * Scan as many slabs as needed to find the desired number of
5098 		 * candidate slabs.
5099 		 */
5100 		max_scan = (size_t)-1;
5101 	}
5102 
5103 	if (max_slabs == 0 || (flags & KMM_DESPERATE)) {
5104 		/* Find as many candidate slabs as possible. */
5105 		max_slabs = (size_t)-1;
5106 	}
5107 
5108 	sp = avl_last(&cp->cache_partial_slabs);
5109 	ASSERT(KMEM_SLAB_IS_PARTIAL(sp));
5110 	for (i = 0, s = 0; (i < max_scan) && (s < max_slabs) && (sp != NULL) &&
5111 	    ((sp != avl_first(&cp->cache_partial_slabs)) ||
5112 	    (flags & KMM_DEBUG));
5113 	    sp = AVL_PREV(&cp->cache_partial_slabs, sp), i++) {
5114 
5115 		if (!kmem_slab_is_reclaimable(cp, sp, flags)) {
5116 			continue;
5117 		}
5118 		s++;
5119 
5120 		/* Look for allocated buffers to move. */
5121 		for (j = 0, b = 0, buf = sp->slab_base;
5122 		    (j < sp->slab_chunks) && (b < sp->slab_refcnt);
5123 		    buf = (((char *)buf) + cp->cache_chunksize), j++) {
5124 
5125 			if (kmem_slab_allocated(cp, sp, buf) == NULL) {
5126 				continue;
5127 			}
5128 
5129 			b++;
5130 
5131 			/*
5132 			 * Prevent the slab from being destroyed while we drop
5133 			 * cache_lock and while the pending move is not yet
5134 			 * registered. Flag the pending move while
5135 			 * kmd_moves_pending may still be empty, since we can't
5136 			 * yet rely on a non-zero pending move count to prevent
5137 			 * the slab from being destroyed.
5138 			 */
5139 			ASSERT(!(sp->slab_flags & KMEM_SLAB_MOVE_PENDING));
5140 			sp->slab_flags |= KMEM_SLAB_MOVE_PENDING;
5141 			/*
5142 			 * Recheck refcnt and nomove after reacquiring the lock,
5143 			 * since these control the order of partial slabs, and
5144 			 * we want to know if we can pick up the scan where we
5145 			 * left off.
5146 			 */
5147 			refcnt = sp->slab_refcnt;
5148 			nomove = (sp->slab_flags & KMEM_SLAB_NOMOVE);
5149 			mutex_exit(&cp->cache_lock);
5150 
5151 			success = kmem_move_begin(cp, sp, buf, flags);
5152 
5153 			/*
5154 			 * Now, before the lock is reacquired, kmem could
5155 			 * process all pending move requests and purge the
5156 			 * deadlist, so that upon reacquiring the lock, sp has
5157 			 * been remapped. Or, the client may free all the
5158 			 * objects on the slab while the pending moves are still
5159 			 * on the taskq. Therefore, the KMEM_SLAB_MOVE_PENDING
5160 			 * flag causes the slab to be put at the end of the
5161 			 * deadlist and prevents it from being destroyed, since
5162 			 * we plan to destroy it here after reacquiring the
5163 			 * lock.
5164 			 */
5165 			mutex_enter(&cp->cache_lock);
5166 			ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING);
5167 			sp->slab_flags &= ~KMEM_SLAB_MOVE_PENDING;
5168 
5169 			if (sp->slab_refcnt == 0) {
5170 				list_t *deadlist =
5171 				    &cp->cache_defrag->kmd_deadlist;
5172 				list_remove(deadlist, sp);
5173 
5174 				if (!avl_is_empty(
5175 				    &cp->cache_defrag->kmd_moves_pending)) {
5176 					/*
5177 					 * A pending move makes it unsafe to
5178 					 * destroy the slab, because even though
5179 					 * the move is no longer needed, the
5180 					 * context where that is determined
5181 					 * requires the slab to exist.
5182 					 * Fortunately, a pending move also
5183 					 * means we don't need to destroy the
5184 					 * slab here, since it will get
5185 					 * destroyed along with any other slabs
5186 					 * on the deadlist after the last
5187 					 * pending move completes.
5188 					 */
5189 					list_insert_head(deadlist, sp);
5190 					KMEM_STAT_ADD(kmem_move_stats.
5191 					    kms_endscan_slab_dead);
5192 					return (-1);
5193 				}
5194 
5195 				/*
5196 				 * Destroy the slab now if it was completely
5197 				 * freed while we dropped cache_lock and there
5198 				 * are no pending moves. Since slab_refcnt
5199 				 * cannot change once it reaches zero, no new
5200 				 * pending moves from that slab are possible.
5201 				 */
5202 				cp->cache_defrag->kmd_deadcount--;
5203 				cp->cache_slab_destroy++;
5204 				mutex_exit(&cp->cache_lock);
5205 				kmem_slab_destroy(cp, sp);
5206 				KMEM_STAT_ADD(kmem_move_stats.
5207 				    kms_dead_slabs_freed);
5208 				KMEM_STAT_ADD(kmem_move_stats.
5209 				    kms_endscan_slab_destroyed);
5210 				mutex_enter(&cp->cache_lock);
5211 				/*
5212 				 * Since we can't pick up the scan where we left
5213 				 * off, abort the scan and say nothing about the
5214 				 * number of reclaimable slabs.
5215 				 */
5216 				return (-1);
5217 			}
5218 
5219 			if (!success) {
5220 				/*
5221 				 * Abort the scan if there is not enough memory
5222 				 * for the request and say nothing about the
5223 				 * number of reclaimable slabs.
5224 				 */
5225 				KMEM_STAT_COND_ADD(s < max_slabs,
5226 				    kmem_move_stats.kms_endscan_nomem);
5227 				return (-1);
5228 			}
5229 
5230 			/*
5231 			 * The slab's position changed while the lock was
5232 			 * dropped, so we don't know where we are in the
5233 			 * sequence any more.
5234 			 */
5235 			if (sp->slab_refcnt != refcnt) {
5236 				/*
5237 				 * If this is a KMM_DEBUG move, the slab_refcnt
5238 				 * may have changed because we allocated a
5239 				 * destination buffer on the same slab. In that
5240 				 * case, we're not interested in counting it.
5241 				 */
5242 				KMEM_STAT_COND_ADD(!(flags & KMM_DEBUG) &&
5243 				    (s < max_slabs),
5244 				    kmem_move_stats.kms_endscan_refcnt_changed);
5245 				return (-1);
5246 			}
5247 			if ((sp->slab_flags & KMEM_SLAB_NOMOVE) != nomove) {
5248 				KMEM_STAT_COND_ADD(s < max_slabs,
5249 				    kmem_move_stats.kms_endscan_nomove_changed);
5250 				return (-1);
5251 			}
5252 
5253 			/*
5254 			 * Generating a move request allocates a destination
5255 			 * buffer from the slab layer, bumping the first partial
5256 			 * slab if it is completely allocated. If the current
5257 			 * slab becomes the first partial slab as a result, we
5258 			 * can't continue to scan backwards.
5259 			 *
5260 			 * If this is a KMM_DEBUG move and we allocated the
5261 			 * destination buffer from the last partial slab, then
5262 			 * the buffer we're moving is on the same slab and our
5263 			 * slab_refcnt has changed, causing us to return before
5264 			 * reaching here if there are no partial slabs left.
5265 			 */
5266 			ASSERT(!avl_is_empty(&cp->cache_partial_slabs));
5267 			if (sp == avl_first(&cp->cache_partial_slabs)) {
5268 				/*
5269 				 * We're not interested in a second KMM_DEBUG
5270 				 * move.
5271 				 */
5272 				goto end_scan;
5273 			}
5274 		}
5275 	}
5276 end_scan:
5277 
5278 	KMEM_STAT_COND_ADD(!(flags & KMM_DEBUG) &&
5279 	    (s < max_slabs) &&
5280 	    (sp == avl_first(&cp->cache_partial_slabs)),
5281 	    kmem_move_stats.kms_endscan_freelist);
5282 
5283 	return (s);
5284 }
5285 
5286 typedef struct kmem_move_notify_args {
5287 	kmem_cache_t *kmna_cache;
5288 	void *kmna_buf;
5289 } kmem_move_notify_args_t;
5290 
5291 static void
5292 kmem_cache_move_notify_task(void *arg)
5293 {
5294 	kmem_move_notify_args_t *args = arg;
5295 	kmem_cache_t *cp = args->kmna_cache;
5296 	void *buf = args->kmna_buf;
5297 	kmem_slab_t *sp;
5298 
5299 	ASSERT(taskq_member(kmem_taskq, curthread));
5300 	ASSERT(list_link_active(&cp->cache_link));
5301 
5302 	kmem_free(args, sizeof (kmem_move_notify_args_t));
5303 	mutex_enter(&cp->cache_lock);
5304 	sp = kmem_slab_allocated(cp, NULL, buf);
5305 
5306 	/* Ignore the notification if the buffer is no longer allocated. */
5307 	if (sp == NULL) {
5308 		mutex_exit(&cp->cache_lock);
5309 		return;
5310 	}
5311 
5312 	/* Ignore the notification if there's no reason to move the buffer. */
5313 	if (avl_numnodes(&cp->cache_partial_slabs) > 1) {
5314 		/*
5315 		 * So far the notification is not ignored. Ignore the
5316 		 * notification if the slab is not marked by an earlier refusal
5317 		 * to move a buffer.
5318 		 */
5319 		if (!(sp->slab_flags & KMEM_SLAB_NOMOVE) &&
5320 		    (sp->slab_later_count == 0)) {
5321 			mutex_exit(&cp->cache_lock);
5322 			return;
5323 		}
5324 
5325 		kmem_slab_move_yes(cp, sp, buf);
5326 		ASSERT(!(sp->slab_flags & KMEM_SLAB_MOVE_PENDING));
5327 		sp->slab_flags |= KMEM_SLAB_MOVE_PENDING;
5328 		mutex_exit(&cp->cache_lock);
5329 		/* see kmem_move_buffers() about dropping the lock */
5330 		(void) kmem_move_begin(cp, sp, buf, KMM_NOTIFY);
5331 		mutex_enter(&cp->cache_lock);
5332 		ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING);
5333 		sp->slab_flags &= ~KMEM_SLAB_MOVE_PENDING;
5334 		if (sp->slab_refcnt == 0) {
5335 			list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
5336 			list_remove(deadlist, sp);
5337 
5338 			if (!avl_is_empty(
5339 			    &cp->cache_defrag->kmd_moves_pending)) {
5340 				list_insert_head(deadlist, sp);
5341 				mutex_exit(&cp->cache_lock);
5342 				KMEM_STAT_ADD(kmem_move_stats.
5343 				    kms_notify_slab_dead);
5344 				return;
5345 			}
5346 
5347 			cp->cache_defrag->kmd_deadcount--;
5348 			cp->cache_slab_destroy++;
5349 			mutex_exit(&cp->cache_lock);
5350 			kmem_slab_destroy(cp, sp);
5351 			KMEM_STAT_ADD(kmem_move_stats.kms_dead_slabs_freed);
5352 			KMEM_STAT_ADD(kmem_move_stats.
5353 			    kms_notify_slab_destroyed);
5354 			return;
5355 		}
5356 	} else {
5357 		kmem_slab_move_yes(cp, sp, buf);
5358 	}
5359 	mutex_exit(&cp->cache_lock);
5360 }
5361 
5362 void
5363 kmem_cache_move_notify(kmem_cache_t *cp, void *buf)
5364 {
5365 	kmem_move_notify_args_t *args;
5366 
5367 	KMEM_STAT_ADD(kmem_move_stats.kms_notify);
5368 	args = kmem_alloc(sizeof (kmem_move_notify_args_t), KM_NOSLEEP);
5369 	if (args != NULL) {
5370 		args->kmna_cache = cp;
5371 		args->kmna_buf = buf;
5372 		if (!taskq_dispatch(kmem_taskq,
5373 		    (task_func_t *)kmem_cache_move_notify_task, args,
5374 		    TQ_NOSLEEP))
5375 			kmem_free(args, sizeof (kmem_move_notify_args_t));
5376 	}
5377 }
5378 
5379 static void
5380 kmem_cache_defrag(kmem_cache_t *cp)
5381 {
5382 	size_t n;
5383 
5384 	ASSERT(cp->cache_defrag != NULL);
5385 
5386 	mutex_enter(&cp->cache_lock);
5387 	n = avl_numnodes(&cp->cache_partial_slabs);
5388 	if (n > 1) {
5389 		/* kmem_move_buffers() drops and reacquires cache_lock */
5390 		KMEM_STAT_ADD(kmem_move_stats.kms_defrags);
5391 		cp->cache_defrag->kmd_defrags++;
5392 		(void) kmem_move_buffers(cp, n, 0, KMM_DESPERATE);
5393 	}
5394 	mutex_exit(&cp->cache_lock);
5395 }
5396 
5397 /* Is this cache above the fragmentation threshold? */
5398 static boolean_t
5399 kmem_cache_frag_threshold(kmem_cache_t *cp, uint64_t nfree)
5400 {
5401 	/*
5402 	 *	nfree		kmem_frag_numer
5403 	 * ------------------ > ---------------
5404 	 * cp->cache_buftotal	kmem_frag_denom
5405 	 */
5406 	return ((nfree * kmem_frag_denom) >
5407 	    (cp->cache_buftotal * kmem_frag_numer));
5408 }
5409 
5410 static boolean_t
5411 kmem_cache_is_fragmented(kmem_cache_t *cp, boolean_t *doreap)
5412 {
5413 	boolean_t fragmented;
5414 	uint64_t nfree;
5415 
5416 	ASSERT(MUTEX_HELD(&cp->cache_lock));
5417 	*doreap = B_FALSE;
5418 
5419 	if (kmem_move_fulltilt) {
5420 		if (avl_numnodes(&cp->cache_partial_slabs) > 1) {
5421 			return (B_TRUE);
5422 		}
5423 	} else {
5424 		if ((cp->cache_complete_slab_count + avl_numnodes(
5425 		    &cp->cache_partial_slabs)) < kmem_frag_minslabs) {
5426 			return (B_FALSE);
5427 		}
5428 	}
5429 
5430 	nfree = cp->cache_bufslab;
5431 	fragmented = ((avl_numnodes(&cp->cache_partial_slabs) > 1) &&
5432 	    kmem_cache_frag_threshold(cp, nfree));
5433 
5434 	/*
5435 	 * Free buffers in the magazine layer appear allocated from the point of
5436 	 * view of the slab layer. We want to know if the slab layer would
5437 	 * appear fragmented if we included free buffers from magazines that
5438 	 * have fallen out of the working set.
5439 	 */
5440 	if (!fragmented) {
5441 		long reap;
5442 
5443 		mutex_enter(&cp->cache_depot_lock);
5444 		reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
5445 		reap = MIN(reap, cp->cache_full.ml_total);
5446 		mutex_exit(&cp->cache_depot_lock);
5447 
5448 		nfree += ((uint64_t)reap * cp->cache_magtype->mt_magsize);
5449 		if (kmem_cache_frag_threshold(cp, nfree)) {
5450 			*doreap = B_TRUE;
5451 		}
5452 	}
5453 
5454 	return (fragmented);
5455 }
5456 
5457 /* Called periodically from kmem_taskq */
5458 static void
5459 kmem_cache_scan(kmem_cache_t *cp)
5460 {
5461 	boolean_t reap = B_FALSE;
5462 	kmem_defrag_t *kmd;
5463 
5464 	ASSERT(taskq_member(kmem_taskq, curthread));
5465 
5466 	mutex_enter(&cp->cache_lock);
5467 
5468 	kmd = cp->cache_defrag;
5469 	if (kmd->kmd_consolidate > 0) {
5470 		kmd->kmd_consolidate--;
5471 		mutex_exit(&cp->cache_lock);
5472 		kmem_cache_reap(cp);
5473 		return;
5474 	}
5475 
5476 	if (kmem_cache_is_fragmented(cp, &reap)) {
5477 		size_t slabs_found;
5478 
5479 		/*
5480 		 * Consolidate reclaimable slabs from the end of the partial
5481 		 * slab list (scan at most kmem_reclaim_scan_range slabs to find
5482 		 * reclaimable slabs). Keep track of how many candidate slabs we
5483 		 * looked for and how many we actually found so we can adjust
5484 		 * the definition of a candidate slab if we're having trouble
5485 		 * finding them.
5486 		 *
5487 		 * kmem_move_buffers() drops and reacquires cache_lock.
5488 		 */
5489 		KMEM_STAT_ADD(kmem_move_stats.kms_scans);
5490 		kmd->kmd_scans++;
5491 		slabs_found = kmem_move_buffers(cp, kmem_reclaim_scan_range,
5492 		    kmem_reclaim_max_slabs, 0);
5493 		if (slabs_found >= 0) {
5494 			kmd->kmd_slabs_sought += kmem_reclaim_max_slabs;
5495 			kmd->kmd_slabs_found += slabs_found;
5496 		}
5497 
5498 		if (++kmd->kmd_tries >= kmem_reclaim_scan_range) {
5499 			kmd->kmd_tries = 0;
5500 
5501 			/*
5502 			 * If we had difficulty finding candidate slabs in
5503 			 * previous scans, adjust the threshold so that
5504 			 * candidates are easier to find.
5505 			 */
5506 			if (kmd->kmd_slabs_found == kmd->kmd_slabs_sought) {
5507 				kmem_adjust_reclaim_threshold(kmd, -1);
5508 			} else if ((kmd->kmd_slabs_found * 2) <
5509 			    kmd->kmd_slabs_sought) {
5510 				kmem_adjust_reclaim_threshold(kmd, 1);
5511 			}
5512 			kmd->kmd_slabs_sought = 0;
5513 			kmd->kmd_slabs_found = 0;
5514 		}
5515 	} else {
5516 		kmem_reset_reclaim_threshold(cp->cache_defrag);
5517 #ifdef	DEBUG
5518 		if (!avl_is_empty(&cp->cache_partial_slabs)) {
5519 			/*
5520 			 * In a debug kernel we want the consolidator to
5521 			 * run occasionally even when there is plenty of
5522 			 * memory.
5523 			 */
5524 			uint16_t debug_rand;
5525 
5526 			(void) random_get_bytes((uint8_t *)&debug_rand, 2);
5527 			if (!kmem_move_noreap &&
5528 			    ((debug_rand % kmem_mtb_reap) == 0)) {
5529 				mutex_exit(&cp->cache_lock);
5530 				KMEM_STAT_ADD(kmem_move_stats.kms_debug_reaps);
5531 				kmem_cache_reap(cp);
5532 				return;
5533 			} else if ((debug_rand % kmem_mtb_move) == 0) {
5534 				KMEM_STAT_ADD(kmem_move_stats.kms_scans);
5535 				KMEM_STAT_ADD(kmem_move_stats.kms_debug_scans);
5536 				kmd->kmd_scans++;
5537 				(void) kmem_move_buffers(cp,
5538 				    kmem_reclaim_scan_range, 1, KMM_DEBUG);
5539 			}
5540 		}
5541 #endif	/* DEBUG */
5542 	}
5543 
5544 	mutex_exit(&cp->cache_lock);
5545 
5546 	if (reap) {
5547 		KMEM_STAT_ADD(kmem_move_stats.kms_scan_depot_ws_reaps);
5548 		kmem_depot_ws_reap(cp);
5549 	}
5550 }
5551