1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 1994, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 24 */ 25 26 /* 27 * Kernel memory allocator, as described in the following two papers and a 28 * statement about the consolidator: 29 * 30 * Jeff Bonwick, 31 * The Slab Allocator: An Object-Caching Kernel Memory Allocator. 32 * Proceedings of the Summer 1994 Usenix Conference. 33 * Available as /shared/sac/PSARC/1994/028/materials/kmem.pdf. 34 * 35 * Jeff Bonwick and Jonathan Adams, 36 * Magazines and vmem: Extending the Slab Allocator to Many CPUs and 37 * Arbitrary Resources. 38 * Proceedings of the 2001 Usenix Conference. 39 * Available as /shared/sac/PSARC/2000/550/materials/vmem.pdf. 40 * 41 * kmem Slab Consolidator Big Theory Statement: 42 * 43 * 1. Motivation 44 * 45 * As stated in Bonwick94, slabs provide the following advantages over other 46 * allocation structures in terms of memory fragmentation: 47 * 48 * - Internal fragmentation (per-buffer wasted space) is minimal. 49 * - Severe external fragmentation (unused buffers on the free list) is 50 * unlikely. 51 * 52 * Segregating objects by size eliminates one source of external fragmentation, 53 * and according to Bonwick: 54 * 55 * The other reason that slabs reduce external fragmentation is that all 56 * objects in a slab are of the same type, so they have the same lifetime 57 * distribution. The resulting segregation of short-lived and long-lived 58 * objects at slab granularity reduces the likelihood of an entire page being 59 * held hostage due to a single long-lived allocation [Barrett93, Hanson90]. 60 * 61 * While unlikely, severe external fragmentation remains possible. Clients that 62 * allocate both short- and long-lived objects from the same cache cannot 63 * anticipate the distribution of long-lived objects within the allocator's slab 64 * implementation. Even a small percentage of long-lived objects distributed 65 * randomly across many slabs can lead to a worst case scenario where the client 66 * frees the majority of its objects and the system gets back almost none of the 67 * slabs. Despite the client doing what it reasonably can to help the system 68 * reclaim memory, the allocator cannot shake free enough slabs because of 69 * lonely allocations stubbornly hanging on. Although the allocator is in a 70 * position to diagnose the fragmentation, there is nothing that the allocator 71 * by itself can do about it. It only takes a single allocated object to prevent 72 * an entire slab from being reclaimed, and any object handed out by 73 * kmem_cache_alloc() is by definition in the client's control. Conversely, 74 * although the client is in a position to move a long-lived object, it has no 75 * way of knowing if the object is causing fragmentation, and if so, where to 76 * move it. A solution necessarily requires further cooperation between the 77 * allocator and the client. 78 * 79 * 2. Move Callback 80 * 81 * The kmem slab consolidator therefore adds a move callback to the 82 * allocator/client interface, improving worst-case external fragmentation in 83 * kmem caches that supply a function to move objects from one memory location 84 * to another. In a situation of low memory kmem attempts to consolidate all of 85 * a cache's slabs at once; otherwise it works slowly to bring external 86 * fragmentation within the 1/8 limit guaranteed for internal fragmentation, 87 * thereby helping to avoid a low memory situation in the future. 88 * 89 * The callback has the following signature: 90 * 91 * kmem_cbrc_t move(void *old, void *new, size_t size, void *user_arg) 92 * 93 * It supplies the kmem client with two addresses: the allocated object that 94 * kmem wants to move and a buffer selected by kmem for the client to use as the 95 * copy destination. The callback is kmem's way of saying "Please get off of 96 * this buffer and use this one instead." kmem knows where it wants to move the 97 * object in order to best reduce fragmentation. All the client needs to know 98 * about the second argument (void *new) is that it is an allocated, constructed 99 * object ready to take the contents of the old object. When the move function 100 * is called, the system is likely to be low on memory, and the new object 101 * spares the client from having to worry about allocating memory for the 102 * requested move. The third argument supplies the size of the object, in case a 103 * single move function handles multiple caches whose objects differ only in 104 * size (such as zio_buf_512, zio_buf_1024, etc). Finally, the same optional 105 * user argument passed to the constructor, destructor, and reclaim functions is 106 * also passed to the move callback. 107 * 108 * 2.1 Setting the Move Callback 109 * 110 * The client sets the move callback after creating the cache and before 111 * allocating from it: 112 * 113 * object_cache = kmem_cache_create(...); 114 * kmem_cache_set_move(object_cache, object_move); 115 * 116 * 2.2 Move Callback Return Values 117 * 118 * Only the client knows about its own data and when is a good time to move it. 119 * The client is cooperating with kmem to return unused memory to the system, 120 * and kmem respectfully accepts this help at the client's convenience. When 121 * asked to move an object, the client can respond with any of the following: 122 * 123 * typedef enum kmem_cbrc { 124 * KMEM_CBRC_YES, 125 * KMEM_CBRC_NO, 126 * KMEM_CBRC_LATER, 127 * KMEM_CBRC_DONT_NEED, 128 * KMEM_CBRC_DONT_KNOW 129 * } kmem_cbrc_t; 130 * 131 * The client must not explicitly kmem_cache_free() either of the objects passed 132 * to the callback, since kmem wants to free them directly to the slab layer 133 * (bypassing the per-CPU magazine layer). The response tells kmem which of the 134 * objects to free: 135 * 136 * YES: (Did it) The client moved the object, so kmem frees the old one. 137 * NO: (Never) The client refused, so kmem frees the new object (the 138 * unused copy destination). kmem also marks the slab of the old 139 * object so as not to bother the client with further callbacks for 140 * that object as long as the slab remains on the partial slab list. 141 * (The system won't be getting the slab back as long as the 142 * immovable object holds it hostage, so there's no point in moving 143 * any of its objects.) 144 * LATER: The client is using the object and cannot move it now, so kmem 145 * frees the new object (the unused copy destination). kmem still 146 * attempts to move other objects off the slab, since it expects to 147 * succeed in clearing the slab in a later callback. The client 148 * should use LATER instead of NO if the object is likely to become 149 * movable very soon. 150 * DONT_NEED: The client no longer needs the object, so kmem frees the old along 151 * with the new object (the unused copy destination). This response 152 * is the client's opportunity to be a model citizen and give back as 153 * much as it can. 154 * DONT_KNOW: The client does not know about the object because 155 * a) the client has just allocated the object and not yet put it 156 * wherever it expects to find known objects 157 * b) the client has removed the object from wherever it expects to 158 * find known objects and is about to free it, or 159 * c) the client has freed the object. 160 * In all these cases (a, b, and c) kmem frees the new object (the 161 * unused copy destination) and searches for the old object in the 162 * magazine layer. If found, the object is removed from the magazine 163 * layer and freed to the slab layer so it will no longer hold the 164 * slab hostage. 165 * 166 * 2.3 Object States 167 * 168 * Neither kmem nor the client can be assumed to know the object's whereabouts 169 * at the time of the callback. An object belonging to a kmem cache may be in 170 * any of the following states: 171 * 172 * 1. Uninitialized on the slab 173 * 2. Allocated from the slab but not constructed (still uninitialized) 174 * 3. Allocated from the slab, constructed, but not yet ready for business 175 * (not in a valid state for the move callback) 176 * 4. In use (valid and known to the client) 177 * 5. About to be freed (no longer in a valid state for the move callback) 178 * 6. Freed to a magazine (still constructed) 179 * 7. Allocated from a magazine, not yet ready for business (not in a valid 180 * state for the move callback), and about to return to state #4 181 * 8. Deconstructed on a magazine that is about to be freed 182 * 9. Freed to the slab 183 * 184 * Since the move callback may be called at any time while the object is in any 185 * of the above states (except state #1), the client needs a safe way to 186 * determine whether or not it knows about the object. Specifically, the client 187 * needs to know whether or not the object is in state #4, the only state in 188 * which a move is valid. If the object is in any other state, the client should 189 * immediately return KMEM_CBRC_DONT_KNOW, since it is unsafe to access any of 190 * the object's fields. 191 * 192 * Note that although an object may be in state #4 when kmem initiates the move 193 * request, the object may no longer be in that state by the time kmem actually 194 * calls the move function. Not only does the client free objects 195 * asynchronously, kmem itself puts move requests on a queue where thay are 196 * pending until kmem processes them from another context. Also, objects freed 197 * to a magazine appear allocated from the point of view of the slab layer, so 198 * kmem may even initiate requests for objects in a state other than state #4. 199 * 200 * 2.3.1 Magazine Layer 201 * 202 * An important insight revealed by the states listed above is that the magazine 203 * layer is populated only by kmem_cache_free(). Magazines of constructed 204 * objects are never populated directly from the slab layer (which contains raw, 205 * unconstructed objects). Whenever an allocation request cannot be satisfied 206 * from the magazine layer, the magazines are bypassed and the request is 207 * satisfied from the slab layer (creating a new slab if necessary). kmem calls 208 * the object constructor only when allocating from the slab layer, and only in 209 * response to kmem_cache_alloc() or to prepare the destination buffer passed in 210 * the move callback. kmem does not preconstruct objects in anticipation of 211 * kmem_cache_alloc(). 212 * 213 * 2.3.2 Object Constructor and Destructor 214 * 215 * If the client supplies a destructor, it must be valid to call the destructor 216 * on a newly created object (immediately after the constructor). 217 * 218 * 2.4 Recognizing Known Objects 219 * 220 * There is a simple test to determine safely whether or not the client knows 221 * about a given object in the move callback. It relies on the fact that kmem 222 * guarantees that the object of the move callback has only been touched by the 223 * client itself or else by kmem. kmem does this by ensuring that none of the 224 * cache's slabs are freed to the virtual memory (VM) subsystem while a move 225 * callback is pending. When the last object on a slab is freed, if there is a 226 * pending move, kmem puts the slab on a per-cache dead list and defers freeing 227 * slabs on that list until all pending callbacks are completed. That way, 228 * clients can be certain that the object of a move callback is in one of the 229 * states listed above, making it possible to distinguish known objects (in 230 * state #4) using the two low order bits of any pointer member (with the 231 * exception of 'char *' or 'short *' which may not be 4-byte aligned on some 232 * platforms). 233 * 234 * The test works as long as the client always transitions objects from state #4 235 * (known, in use) to state #5 (about to be freed, invalid) by setting the low 236 * order bit of the client-designated pointer member. Since kmem only writes 237 * invalid memory patterns, such as 0xbaddcafe to uninitialized memory and 238 * 0xdeadbeef to freed memory, any scribbling on the object done by kmem is 239 * guaranteed to set at least one of the two low order bits. Therefore, given an 240 * object with a back pointer to a 'container_t *o_container', the client can 241 * test 242 * 243 * container_t *container = object->o_container; 244 * if ((uintptr_t)container & 0x3) { 245 * return (KMEM_CBRC_DONT_KNOW); 246 * } 247 * 248 * Typically, an object will have a pointer to some structure with a list or 249 * hash where objects from the cache are kept while in use. Assuming that the 250 * client has some way of knowing that the container structure is valid and will 251 * not go away during the move, and assuming that the structure includes a lock 252 * to protect whatever collection is used, then the client would continue as 253 * follows: 254 * 255 * // Ensure that the container structure does not go away. 256 * if (container_hold(container) == 0) { 257 * return (KMEM_CBRC_DONT_KNOW); 258 * } 259 * mutex_enter(&container->c_objects_lock); 260 * if (container != object->o_container) { 261 * mutex_exit(&container->c_objects_lock); 262 * container_rele(container); 263 * return (KMEM_CBRC_DONT_KNOW); 264 * } 265 * 266 * At this point the client knows that the object cannot be freed as long as 267 * c_objects_lock is held. Note that after acquiring the lock, the client must 268 * recheck the o_container pointer in case the object was removed just before 269 * acquiring the lock. 270 * 271 * When the client is about to free an object, it must first remove that object 272 * from the list, hash, or other structure where it is kept. At that time, to 273 * mark the object so it can be distinguished from the remaining, known objects, 274 * the client sets the designated low order bit: 275 * 276 * mutex_enter(&container->c_objects_lock); 277 * object->o_container = (void *)((uintptr_t)object->o_container | 0x1); 278 * list_remove(&container->c_objects, object); 279 * mutex_exit(&container->c_objects_lock); 280 * 281 * In the common case, the object is freed to the magazine layer, where it may 282 * be reused on a subsequent allocation without the overhead of calling the 283 * constructor. While in the magazine it appears allocated from the point of 284 * view of the slab layer, making it a candidate for the move callback. Most 285 * objects unrecognized by the client in the move callback fall into this 286 * category and are cheaply distinguished from known objects by the test 287 * described earlier. Since recognition is cheap for the client, and searching 288 * magazines is expensive for kmem, kmem defers searching until the client first 289 * returns KMEM_CBRC_DONT_KNOW. As long as the needed effort is reasonable, kmem 290 * elsewhere does what it can to avoid bothering the client unnecessarily. 291 * 292 * Invalidating the designated pointer member before freeing the object marks 293 * the object to be avoided in the callback, and conversely, assigning a valid 294 * value to the designated pointer member after allocating the object makes the 295 * object fair game for the callback: 296 * 297 * ... allocate object ... 298 * ... set any initial state not set by the constructor ... 299 * 300 * mutex_enter(&container->c_objects_lock); 301 * list_insert_tail(&container->c_objects, object); 302 * membar_producer(); 303 * object->o_container = container; 304 * mutex_exit(&container->c_objects_lock); 305 * 306 * Note that everything else must be valid before setting o_container makes the 307 * object fair game for the move callback. The membar_producer() call ensures 308 * that all the object's state is written to memory before setting the pointer 309 * that transitions the object from state #3 or #7 (allocated, constructed, not 310 * yet in use) to state #4 (in use, valid). That's important because the move 311 * function has to check the validity of the pointer before it can safely 312 * acquire the lock protecting the collection where it expects to find known 313 * objects. 314 * 315 * This method of distinguishing known objects observes the usual symmetry: 316 * invalidating the designated pointer is the first thing the client does before 317 * freeing the object, and setting the designated pointer is the last thing the 318 * client does after allocating the object. Of course, the client is not 319 * required to use this method. Fundamentally, how the client recognizes known 320 * objects is completely up to the client, but this method is recommended as an 321 * efficient and safe way to take advantage of the guarantees made by kmem. If 322 * the entire object is arbitrary data without any markable bits from a suitable 323 * pointer member, then the client must find some other method, such as 324 * searching a hash table of known objects. 325 * 326 * 2.5 Preventing Objects From Moving 327 * 328 * Besides a way to distinguish known objects, the other thing that the client 329 * needs is a strategy to ensure that an object will not move while the client 330 * is actively using it. The details of satisfying this requirement tend to be 331 * highly cache-specific. It might seem that the same rules that let a client 332 * remove an object safely should also decide when an object can be moved 333 * safely. However, any object state that makes a removal attempt invalid is 334 * likely to be long-lasting for objects that the client does not expect to 335 * remove. kmem knows nothing about the object state and is equally likely (from 336 * the client's point of view) to request a move for any object in the cache, 337 * whether prepared for removal or not. Even a low percentage of objects stuck 338 * in place by unremovability will defeat the consolidator if the stuck objects 339 * are the same long-lived allocations likely to hold slabs hostage. 340 * Fundamentally, the consolidator is not aimed at common cases. Severe external 341 * fragmentation is a worst case scenario manifested as sparsely allocated 342 * slabs, by definition a low percentage of the cache's objects. When deciding 343 * what makes an object movable, keep in mind the goal of the consolidator: to 344 * bring worst-case external fragmentation within the limits guaranteed for 345 * internal fragmentation. Removability is a poor criterion if it is likely to 346 * exclude more than an insignificant percentage of objects for long periods of 347 * time. 348 * 349 * A tricky general solution exists, and it has the advantage of letting you 350 * move any object at almost any moment, practically eliminating the likelihood 351 * that an object can hold a slab hostage. However, if there is a cache-specific 352 * way to ensure that an object is not actively in use in the vast majority of 353 * cases, a simpler solution that leverages this cache-specific knowledge is 354 * preferred. 355 * 356 * 2.5.1 Cache-Specific Solution 357 * 358 * As an example of a cache-specific solution, the ZFS znode cache takes 359 * advantage of the fact that the vast majority of znodes are only being 360 * referenced from the DNLC. (A typical case might be a few hundred in active 361 * use and a hundred thousand in the DNLC.) In the move callback, after the ZFS 362 * client has established that it recognizes the znode and can access its fields 363 * safely (using the method described earlier), it then tests whether the znode 364 * is referenced by anything other than the DNLC. If so, it assumes that the 365 * znode may be in active use and is unsafe to move, so it drops its locks and 366 * returns KMEM_CBRC_LATER. The advantage of this strategy is that everywhere 367 * else znodes are used, no change is needed to protect against the possibility 368 * of the znode moving. The disadvantage is that it remains possible for an 369 * application to hold a znode slab hostage with an open file descriptor. 370 * However, this case ought to be rare and the consolidator has a way to deal 371 * with it: If the client responds KMEM_CBRC_LATER repeatedly for the same 372 * object, kmem eventually stops believing it and treats the slab as if the 373 * client had responded KMEM_CBRC_NO. Having marked the hostage slab, kmem can 374 * then focus on getting it off of the partial slab list by allocating rather 375 * than freeing all of its objects. (Either way of getting a slab off the 376 * free list reduces fragmentation.) 377 * 378 * 2.5.2 General Solution 379 * 380 * The general solution, on the other hand, requires an explicit hold everywhere 381 * the object is used to prevent it from moving. To keep the client locking 382 * strategy as uncomplicated as possible, kmem guarantees the simplifying 383 * assumption that move callbacks are sequential, even across multiple caches. 384 * Internally, a global queue processed by a single thread supports all caches 385 * implementing the callback function. No matter how many caches supply a move 386 * function, the consolidator never moves more than one object at a time, so the 387 * client does not have to worry about tricky lock ordering involving several 388 * related objects from different kmem caches. 389 * 390 * The general solution implements the explicit hold as a read-write lock, which 391 * allows multiple readers to access an object from the cache simultaneously 392 * while a single writer is excluded from moving it. A single rwlock for the 393 * entire cache would lock out all threads from using any of the cache's objects 394 * even though only a single object is being moved, so to reduce contention, 395 * the client can fan out the single rwlock into an array of rwlocks hashed by 396 * the object address, making it probable that moving one object will not 397 * prevent other threads from using a different object. The rwlock cannot be a 398 * member of the object itself, because the possibility of the object moving 399 * makes it unsafe to access any of the object's fields until the lock is 400 * acquired. 401 * 402 * Assuming a small, fixed number of locks, it's possible that multiple objects 403 * will hash to the same lock. A thread that needs to use multiple objects in 404 * the same function may acquire the same lock multiple times. Since rwlocks are 405 * reentrant for readers, and since there is never more than a single writer at 406 * a time (assuming that the client acquires the lock as a writer only when 407 * moving an object inside the callback), there would seem to be no problem. 408 * However, a client locking multiple objects in the same function must handle 409 * one case of potential deadlock: Assume that thread A needs to prevent both 410 * object 1 and object 2 from moving, and thread B, the callback, meanwhile 411 * tries to move object 3. It's possible, if objects 1, 2, and 3 all hash to the 412 * same lock, that thread A will acquire the lock for object 1 as a reader 413 * before thread B sets the lock's write-wanted bit, preventing thread A from 414 * reacquiring the lock for object 2 as a reader. Unable to make forward 415 * progress, thread A will never release the lock for object 1, resulting in 416 * deadlock. 417 * 418 * There are two ways of avoiding the deadlock just described. The first is to 419 * use rw_tryenter() rather than rw_enter() in the callback function when 420 * attempting to acquire the lock as a writer. If tryenter discovers that the 421 * same object (or another object hashed to the same lock) is already in use, it 422 * aborts the callback and returns KMEM_CBRC_LATER. The second way is to use 423 * rprwlock_t (declared in common/fs/zfs/sys/rprwlock.h) instead of rwlock_t, 424 * since it allows a thread to acquire the lock as a reader in spite of a 425 * waiting writer. This second approach insists on moving the object now, no 426 * matter how many readers the move function must wait for in order to do so, 427 * and could delay the completion of the callback indefinitely (blocking 428 * callbacks to other clients). In practice, a less insistent callback using 429 * rw_tryenter() returns KMEM_CBRC_LATER infrequently enough that there seems 430 * little reason to use anything else. 431 * 432 * Avoiding deadlock is not the only problem that an implementation using an 433 * explicit hold needs to solve. Locking the object in the first place (to 434 * prevent it from moving) remains a problem, since the object could move 435 * between the time you obtain a pointer to the object and the time you acquire 436 * the rwlock hashed to that pointer value. Therefore the client needs to 437 * recheck the value of the pointer after acquiring the lock, drop the lock if 438 * the value has changed, and try again. This requires a level of indirection: 439 * something that points to the object rather than the object itself, that the 440 * client can access safely while attempting to acquire the lock. (The object 441 * itself cannot be referenced safely because it can move at any time.) 442 * The following lock-acquisition function takes whatever is safe to reference 443 * (arg), follows its pointer to the object (using function f), and tries as 444 * often as necessary to acquire the hashed lock and verify that the object 445 * still has not moved: 446 * 447 * object_t * 448 * object_hold(object_f f, void *arg) 449 * { 450 * object_t *op; 451 * 452 * op = f(arg); 453 * if (op == NULL) { 454 * return (NULL); 455 * } 456 * 457 * rw_enter(OBJECT_RWLOCK(op), RW_READER); 458 * while (op != f(arg)) { 459 * rw_exit(OBJECT_RWLOCK(op)); 460 * op = f(arg); 461 * if (op == NULL) { 462 * break; 463 * } 464 * rw_enter(OBJECT_RWLOCK(op), RW_READER); 465 * } 466 * 467 * return (op); 468 * } 469 * 470 * The OBJECT_RWLOCK macro hashes the object address to obtain the rwlock. The 471 * lock reacquisition loop, while necessary, almost never executes. The function 472 * pointer f (used to obtain the object pointer from arg) has the following type 473 * definition: 474 * 475 * typedef object_t *(*object_f)(void *arg); 476 * 477 * An object_f implementation is likely to be as simple as accessing a structure 478 * member: 479 * 480 * object_t * 481 * s_object(void *arg) 482 * { 483 * something_t *sp = arg; 484 * return (sp->s_object); 485 * } 486 * 487 * The flexibility of a function pointer allows the path to the object to be 488 * arbitrarily complex and also supports the notion that depending on where you 489 * are using the object, you may need to get it from someplace different. 490 * 491 * The function that releases the explicit hold is simpler because it does not 492 * have to worry about the object moving: 493 * 494 * void 495 * object_rele(object_t *op) 496 * { 497 * rw_exit(OBJECT_RWLOCK(op)); 498 * } 499 * 500 * The caller is spared these details so that obtaining and releasing an 501 * explicit hold feels like a simple mutex_enter()/mutex_exit() pair. The caller 502 * of object_hold() only needs to know that the returned object pointer is valid 503 * if not NULL and that the object will not move until released. 504 * 505 * Although object_hold() prevents an object from moving, it does not prevent it 506 * from being freed. The caller must take measures before calling object_hold() 507 * (afterwards is too late) to ensure that the held object cannot be freed. The 508 * caller must do so without accessing the unsafe object reference, so any lock 509 * or reference count used to ensure the continued existence of the object must 510 * live outside the object itself. 511 * 512 * Obtaining a new object is a special case where an explicit hold is impossible 513 * for the caller. Any function that returns a newly allocated object (either as 514 * a return value, or as an in-out paramter) must return it already held; after 515 * the caller gets it is too late, since the object cannot be safely accessed 516 * without the level of indirection described earlier. The following 517 * object_alloc() example uses the same code shown earlier to transition a new 518 * object into the state of being recognized (by the client) as a known object. 519 * The function must acquire the hold (rw_enter) before that state transition 520 * makes the object movable: 521 * 522 * static object_t * 523 * object_alloc(container_t *container) 524 * { 525 * object_t *object = kmem_cache_alloc(object_cache, 0); 526 * ... set any initial state not set by the constructor ... 527 * rw_enter(OBJECT_RWLOCK(object), RW_READER); 528 * mutex_enter(&container->c_objects_lock); 529 * list_insert_tail(&container->c_objects, object); 530 * membar_producer(); 531 * object->o_container = container; 532 * mutex_exit(&container->c_objects_lock); 533 * return (object); 534 * } 535 * 536 * Functions that implicitly acquire an object hold (any function that calls 537 * object_alloc() to supply an object for the caller) need to be carefully noted 538 * so that the matching object_rele() is not neglected. Otherwise, leaked holds 539 * prevent all objects hashed to the affected rwlocks from ever being moved. 540 * 541 * The pointer to a held object can be hashed to the holding rwlock even after 542 * the object has been freed. Although it is possible to release the hold 543 * after freeing the object, you may decide to release the hold implicitly in 544 * whatever function frees the object, so as to release the hold as soon as 545 * possible, and for the sake of symmetry with the function that implicitly 546 * acquires the hold when it allocates the object. Here, object_free() releases 547 * the hold acquired by object_alloc(). Its implicit object_rele() forms a 548 * matching pair with object_hold(): 549 * 550 * void 551 * object_free(object_t *object) 552 * { 553 * container_t *container; 554 * 555 * ASSERT(object_held(object)); 556 * container = object->o_container; 557 * mutex_enter(&container->c_objects_lock); 558 * object->o_container = 559 * (void *)((uintptr_t)object->o_container | 0x1); 560 * list_remove(&container->c_objects, object); 561 * mutex_exit(&container->c_objects_lock); 562 * object_rele(object); 563 * kmem_cache_free(object_cache, object); 564 * } 565 * 566 * Note that object_free() cannot safely accept an object pointer as an argument 567 * unless the object is already held. Any function that calls object_free() 568 * needs to be carefully noted since it similarly forms a matching pair with 569 * object_hold(). 570 * 571 * To complete the picture, the following callback function implements the 572 * general solution by moving objects only if they are currently unheld: 573 * 574 * static kmem_cbrc_t 575 * object_move(void *buf, void *newbuf, size_t size, void *arg) 576 * { 577 * object_t *op = buf, *np = newbuf; 578 * container_t *container; 579 * 580 * container = op->o_container; 581 * if ((uintptr_t)container & 0x3) { 582 * return (KMEM_CBRC_DONT_KNOW); 583 * } 584 * 585 * // Ensure that the container structure does not go away. 586 * if (container_hold(container) == 0) { 587 * return (KMEM_CBRC_DONT_KNOW); 588 * } 589 * 590 * mutex_enter(&container->c_objects_lock); 591 * if (container != op->o_container) { 592 * mutex_exit(&container->c_objects_lock); 593 * container_rele(container); 594 * return (KMEM_CBRC_DONT_KNOW); 595 * } 596 * 597 * if (rw_tryenter(OBJECT_RWLOCK(op), RW_WRITER) == 0) { 598 * mutex_exit(&container->c_objects_lock); 599 * container_rele(container); 600 * return (KMEM_CBRC_LATER); 601 * } 602 * 603 * object_move_impl(op, np); // critical section 604 * rw_exit(OBJECT_RWLOCK(op)); 605 * 606 * op->o_container = (void *)((uintptr_t)op->o_container | 0x1); 607 * list_link_replace(&op->o_link_node, &np->o_link_node); 608 * mutex_exit(&container->c_objects_lock); 609 * container_rele(container); 610 * return (KMEM_CBRC_YES); 611 * } 612 * 613 * Note that object_move() must invalidate the designated o_container pointer of 614 * the old object in the same way that object_free() does, since kmem will free 615 * the object in response to the KMEM_CBRC_YES return value. 616 * 617 * The lock order in object_move() differs from object_alloc(), which locks 618 * OBJECT_RWLOCK first and &container->c_objects_lock second, but as long as the 619 * callback uses rw_tryenter() (preventing the deadlock described earlier), it's 620 * not a problem. Holding the lock on the object list in the example above 621 * through the entire callback not only prevents the object from going away, it 622 * also allows you to lock the list elsewhere and know that none of its elements 623 * will move during iteration. 624 * 625 * Adding an explicit hold everywhere an object from the cache is used is tricky 626 * and involves much more change to client code than a cache-specific solution 627 * that leverages existing state to decide whether or not an object is 628 * movable. However, this approach has the advantage that no object remains 629 * immovable for any significant length of time, making it extremely unlikely 630 * that long-lived allocations can continue holding slabs hostage; and it works 631 * for any cache. 632 * 633 * 3. Consolidator Implementation 634 * 635 * Once the client supplies a move function that a) recognizes known objects and 636 * b) avoids moving objects that are actively in use, the remaining work is up 637 * to the consolidator to decide which objects to move and when to issue 638 * callbacks. 639 * 640 * The consolidator relies on the fact that a cache's slabs are ordered by 641 * usage. Each slab has a fixed number of objects. Depending on the slab's 642 * "color" (the offset of the first object from the beginning of the slab; 643 * offsets are staggered to mitigate false sharing of cache lines) it is either 644 * the maximum number of objects per slab determined at cache creation time or 645 * else the number closest to the maximum that fits within the space remaining 646 * after the initial offset. A completely allocated slab may contribute some 647 * internal fragmentation (per-slab overhead) but no external fragmentation, so 648 * it is of no interest to the consolidator. At the other extreme, slabs whose 649 * objects have all been freed to the slab are released to the virtual memory 650 * (VM) subsystem (objects freed to magazines are still allocated as far as the 651 * slab is concerned). External fragmentation exists when there are slabs 652 * somewhere between these extremes. A partial slab has at least one but not all 653 * of its objects allocated. The more partial slabs, and the fewer allocated 654 * objects on each of them, the higher the fragmentation. Hence the 655 * consolidator's overall strategy is to reduce the number of partial slabs by 656 * moving allocated objects from the least allocated slabs to the most allocated 657 * slabs. 658 * 659 * Partial slabs are kept in an AVL tree ordered by usage. Completely allocated 660 * slabs are kept separately in an unordered list. Since the majority of slabs 661 * tend to be completely allocated (a typical unfragmented cache may have 662 * thousands of complete slabs and only a single partial slab), separating 663 * complete slabs improves the efficiency of partial slab ordering, since the 664 * complete slabs do not affect the depth or balance of the AVL tree. This 665 * ordered sequence of partial slabs acts as a "free list" supplying objects for 666 * allocation requests. 667 * 668 * Objects are always allocated from the first partial slab in the free list, 669 * where the allocation is most likely to eliminate a partial slab (by 670 * completely allocating it). Conversely, when a single object from a completely 671 * allocated slab is freed to the slab, that slab is added to the front of the 672 * free list. Since most free list activity involves highly allocated slabs 673 * coming and going at the front of the list, slabs tend naturally toward the 674 * ideal order: highly allocated at the front, sparsely allocated at the back. 675 * Slabs with few allocated objects are likely to become completely free if they 676 * keep a safe distance away from the front of the free list. Slab misorders 677 * interfere with the natural tendency of slabs to become completely free or 678 * completely allocated. For example, a slab with a single allocated object 679 * needs only a single free to escape the cache; its natural desire is 680 * frustrated when it finds itself at the front of the list where a second 681 * allocation happens just before the free could have released it. Another slab 682 * with all but one object allocated might have supplied the buffer instead, so 683 * that both (as opposed to neither) of the slabs would have been taken off the 684 * free list. 685 * 686 * Although slabs tend naturally toward the ideal order, misorders allowed by a 687 * simple list implementation defeat the consolidator's strategy of merging 688 * least- and most-allocated slabs. Without an AVL tree to guarantee order, kmem 689 * needs another way to fix misorders to optimize its callback strategy. One 690 * approach is to periodically scan a limited number of slabs, advancing a 691 * marker to hold the current scan position, and to move extreme misorders to 692 * the front or back of the free list and to the front or back of the current 693 * scan range. By making consecutive scan ranges overlap by one slab, the least 694 * allocated slab in the current range can be carried along from the end of one 695 * scan to the start of the next. 696 * 697 * Maintaining partial slabs in an AVL tree relieves kmem of this additional 698 * task, however. Since most of the cache's activity is in the magazine layer, 699 * and allocations from the slab layer represent only a startup cost, the 700 * overhead of maintaining a balanced tree is not a significant concern compared 701 * to the opportunity of reducing complexity by eliminating the partial slab 702 * scanner just described. The overhead of an AVL tree is minimized by 703 * maintaining only partial slabs in the tree and keeping completely allocated 704 * slabs separately in a list. To avoid increasing the size of the slab 705 * structure the AVL linkage pointers are reused for the slab's list linkage, 706 * since the slab will always be either partial or complete, never stored both 707 * ways at the same time. To further minimize the overhead of the AVL tree the 708 * compare function that orders partial slabs by usage divides the range of 709 * allocated object counts into bins such that counts within the same bin are 710 * considered equal. Binning partial slabs makes it less likely that allocating 711 * or freeing a single object will change the slab's order, requiring a tree 712 * reinsertion (an avl_remove() followed by an avl_add(), both potentially 713 * requiring some rebalancing of the tree). Allocation counts closest to 714 * completely free and completely allocated are left unbinned (finely sorted) to 715 * better support the consolidator's strategy of merging slabs at either 716 * extreme. 717 * 718 * 3.1 Assessing Fragmentation and Selecting Candidate Slabs 719 * 720 * The consolidator piggybacks on the kmem maintenance thread and is called on 721 * the same interval as kmem_cache_update(), once per cache every fifteen 722 * seconds. kmem maintains a running count of unallocated objects in the slab 723 * layer (cache_bufslab). The consolidator checks whether that number exceeds 724 * 12.5% (1/8) of the total objects in the cache (cache_buftotal), and whether 725 * there is a significant number of slabs in the cache (arbitrarily a minimum 726 * 101 total slabs). Unused objects that have fallen out of the magazine layer's 727 * working set are included in the assessment, and magazines in the depot are 728 * reaped if those objects would lift cache_bufslab above the fragmentation 729 * threshold. Once the consolidator decides that a cache is fragmented, it looks 730 * for a candidate slab to reclaim, starting at the end of the partial slab free 731 * list and scanning backwards. At first the consolidator is choosy: only a slab 732 * with fewer than 12.5% (1/8) of its objects allocated qualifies (or else a 733 * single allocated object, regardless of percentage). If there is difficulty 734 * finding a candidate slab, kmem raises the allocation threshold incrementally, 735 * up to a maximum 87.5% (7/8), so that eventually the consolidator will reduce 736 * external fragmentation (unused objects on the free list) below 12.5% (1/8), 737 * even in the worst case of every slab in the cache being almost 7/8 allocated. 738 * The threshold can also be lowered incrementally when candidate slabs are easy 739 * to find, and the threshold is reset to the minimum 1/8 as soon as the cache 740 * is no longer fragmented. 741 * 742 * 3.2 Generating Callbacks 743 * 744 * Once an eligible slab is chosen, a callback is generated for every allocated 745 * object on the slab, in the hope that the client will move everything off the 746 * slab and make it reclaimable. Objects selected as move destinations are 747 * chosen from slabs at the front of the free list. Assuming slabs in the ideal 748 * order (most allocated at the front, least allocated at the back) and a 749 * cooperative client, the consolidator will succeed in removing slabs from both 750 * ends of the free list, completely allocating on the one hand and completely 751 * freeing on the other. Objects selected as move destinations are allocated in 752 * the kmem maintenance thread where move requests are enqueued. A separate 753 * callback thread removes pending callbacks from the queue and calls the 754 * client. The separate thread ensures that client code (the move function) does 755 * not interfere with internal kmem maintenance tasks. A map of pending 756 * callbacks keyed by object address (the object to be moved) is checked to 757 * ensure that duplicate callbacks are not generated for the same object. 758 * Allocating the move destination (the object to move to) prevents subsequent 759 * callbacks from selecting the same destination as an earlier pending callback. 760 * 761 * Move requests can also be generated by kmem_cache_reap() when the system is 762 * desperate for memory and by kmem_cache_move_notify(), called by the client to 763 * notify kmem that a move refused earlier with KMEM_CBRC_LATER is now possible. 764 * The map of pending callbacks is protected by the same lock that protects the 765 * slab layer. 766 * 767 * When the system is desperate for memory, kmem does not bother to determine 768 * whether or not the cache exceeds the fragmentation threshold, but tries to 769 * consolidate as many slabs as possible. Normally, the consolidator chews 770 * slowly, one sparsely allocated slab at a time during each maintenance 771 * interval that the cache is fragmented. When desperate, the consolidator 772 * starts at the last partial slab and enqueues callbacks for every allocated 773 * object on every partial slab, working backwards until it reaches the first 774 * partial slab. The first partial slab, meanwhile, advances in pace with the 775 * consolidator as allocations to supply move destinations for the enqueued 776 * callbacks use up the highly allocated slabs at the front of the free list. 777 * Ideally, the overgrown free list collapses like an accordion, starting at 778 * both ends and ending at the center with a single partial slab. 779 * 780 * 3.3 Client Responses 781 * 782 * When the client returns KMEM_CBRC_NO in response to the move callback, kmem 783 * marks the slab that supplied the stuck object non-reclaimable and moves it to 784 * front of the free list. The slab remains marked as long as it remains on the 785 * free list, and it appears more allocated to the partial slab compare function 786 * than any unmarked slab, no matter how many of its objects are allocated. 787 * Since even one immovable object ties up the entire slab, the goal is to 788 * completely allocate any slab that cannot be completely freed. kmem does not 789 * bother generating callbacks to move objects from a marked slab unless the 790 * system is desperate. 791 * 792 * When the client responds KMEM_CBRC_LATER, kmem increments a count for the 793 * slab. If the client responds LATER too many times, kmem disbelieves and 794 * treats the response as a NO. The count is cleared when the slab is taken off 795 * the partial slab list or when the client moves one of the slab's objects. 796 * 797 * 4. Observability 798 * 799 * A kmem cache's external fragmentation is best observed with 'mdb -k' using 800 * the ::kmem_slabs dcmd. For a complete description of the command, enter 801 * '::help kmem_slabs' at the mdb prompt. 802 */ 803 804 #include <sys/kmem_impl.h> 805 #include <sys/vmem_impl.h> 806 #include <sys/param.h> 807 #include <sys/sysmacros.h> 808 #include <sys/vm.h> 809 #include <sys/proc.h> 810 #include <sys/tuneable.h> 811 #include <sys/systm.h> 812 #include <sys/cmn_err.h> 813 #include <sys/debug.h> 814 #include <sys/sdt.h> 815 #include <sys/mutex.h> 816 #include <sys/bitmap.h> 817 #include <sys/atomic.h> 818 #include <sys/kobj.h> 819 #include <sys/disp.h> 820 #include <vm/seg_kmem.h> 821 #include <sys/log.h> 822 #include <sys/callb.h> 823 #include <sys/taskq.h> 824 #include <sys/modctl.h> 825 #include <sys/reboot.h> 826 #include <sys/id32.h> 827 #include <sys/zone.h> 828 #include <sys/netstack.h> 829 #ifdef DEBUG 830 #include <sys/random.h> 831 #endif 832 833 extern void streams_msg_init(void); 834 extern int segkp_fromheap; 835 extern void segkp_cache_free(void); 836 extern int callout_init_done; 837 838 struct kmem_cache_kstat { 839 kstat_named_t kmc_buf_size; 840 kstat_named_t kmc_align; 841 kstat_named_t kmc_chunk_size; 842 kstat_named_t kmc_slab_size; 843 kstat_named_t kmc_alloc; 844 kstat_named_t kmc_alloc_fail; 845 kstat_named_t kmc_free; 846 kstat_named_t kmc_depot_alloc; 847 kstat_named_t kmc_depot_free; 848 kstat_named_t kmc_depot_contention; 849 kstat_named_t kmc_slab_alloc; 850 kstat_named_t kmc_slab_free; 851 kstat_named_t kmc_buf_constructed; 852 kstat_named_t kmc_buf_avail; 853 kstat_named_t kmc_buf_inuse; 854 kstat_named_t kmc_buf_total; 855 kstat_named_t kmc_buf_max; 856 kstat_named_t kmc_slab_create; 857 kstat_named_t kmc_slab_destroy; 858 kstat_named_t kmc_vmem_source; 859 kstat_named_t kmc_hash_size; 860 kstat_named_t kmc_hash_lookup_depth; 861 kstat_named_t kmc_hash_rescale; 862 kstat_named_t kmc_full_magazines; 863 kstat_named_t kmc_empty_magazines; 864 kstat_named_t kmc_magazine_size; 865 kstat_named_t kmc_reap; /* number of kmem_cache_reap() calls */ 866 kstat_named_t kmc_defrag; /* attempts to defrag all partial slabs */ 867 kstat_named_t kmc_scan; /* attempts to defrag one partial slab */ 868 kstat_named_t kmc_move_callbacks; /* sum of yes, no, later, dn, dk */ 869 kstat_named_t kmc_move_yes; 870 kstat_named_t kmc_move_no; 871 kstat_named_t kmc_move_later; 872 kstat_named_t kmc_move_dont_need; 873 kstat_named_t kmc_move_dont_know; /* obj unrecognized by client ... */ 874 kstat_named_t kmc_move_hunt_found; /* ... but found in mag layer */ 875 kstat_named_t kmc_move_slabs_freed; /* slabs freed by consolidator */ 876 kstat_named_t kmc_move_reclaimable; /* buffers, if consolidator ran */ 877 } kmem_cache_kstat = { 878 { "buf_size", KSTAT_DATA_UINT64 }, 879 { "align", KSTAT_DATA_UINT64 }, 880 { "chunk_size", KSTAT_DATA_UINT64 }, 881 { "slab_size", KSTAT_DATA_UINT64 }, 882 { "alloc", KSTAT_DATA_UINT64 }, 883 { "alloc_fail", KSTAT_DATA_UINT64 }, 884 { "free", KSTAT_DATA_UINT64 }, 885 { "depot_alloc", KSTAT_DATA_UINT64 }, 886 { "depot_free", KSTAT_DATA_UINT64 }, 887 { "depot_contention", KSTAT_DATA_UINT64 }, 888 { "slab_alloc", KSTAT_DATA_UINT64 }, 889 { "slab_free", KSTAT_DATA_UINT64 }, 890 { "buf_constructed", KSTAT_DATA_UINT64 }, 891 { "buf_avail", KSTAT_DATA_UINT64 }, 892 { "buf_inuse", KSTAT_DATA_UINT64 }, 893 { "buf_total", KSTAT_DATA_UINT64 }, 894 { "buf_max", KSTAT_DATA_UINT64 }, 895 { "slab_create", KSTAT_DATA_UINT64 }, 896 { "slab_destroy", KSTAT_DATA_UINT64 }, 897 { "vmem_source", KSTAT_DATA_UINT64 }, 898 { "hash_size", KSTAT_DATA_UINT64 }, 899 { "hash_lookup_depth", KSTAT_DATA_UINT64 }, 900 { "hash_rescale", KSTAT_DATA_UINT64 }, 901 { "full_magazines", KSTAT_DATA_UINT64 }, 902 { "empty_magazines", KSTAT_DATA_UINT64 }, 903 { "magazine_size", KSTAT_DATA_UINT64 }, 904 { "reap", KSTAT_DATA_UINT64 }, 905 { "defrag", KSTAT_DATA_UINT64 }, 906 { "scan", KSTAT_DATA_UINT64 }, 907 { "move_callbacks", KSTAT_DATA_UINT64 }, 908 { "move_yes", KSTAT_DATA_UINT64 }, 909 { "move_no", KSTAT_DATA_UINT64 }, 910 { "move_later", KSTAT_DATA_UINT64 }, 911 { "move_dont_need", KSTAT_DATA_UINT64 }, 912 { "move_dont_know", KSTAT_DATA_UINT64 }, 913 { "move_hunt_found", KSTAT_DATA_UINT64 }, 914 { "move_slabs_freed", KSTAT_DATA_UINT64 }, 915 { "move_reclaimable", KSTAT_DATA_UINT64 }, 916 }; 917 918 static kmutex_t kmem_cache_kstat_lock; 919 920 /* 921 * The default set of caches to back kmem_alloc(). 922 * These sizes should be reevaluated periodically. 923 * 924 * We want allocations that are multiples of the coherency granularity 925 * (64 bytes) to be satisfied from a cache which is a multiple of 64 926 * bytes, so that it will be 64-byte aligned. For all multiples of 64, 927 * the next kmem_cache_size greater than or equal to it must be a 928 * multiple of 64. 929 * 930 * We split the table into two sections: size <= 4k and size > 4k. This 931 * saves a lot of space and cache footprint in our cache tables. 932 */ 933 static const int kmem_alloc_sizes[] = { 934 1 * 8, 935 2 * 8, 936 3 * 8, 937 4 * 8, 5 * 8, 6 * 8, 7 * 8, 938 4 * 16, 5 * 16, 6 * 16, 7 * 16, 939 4 * 32, 5 * 32, 6 * 32, 7 * 32, 940 4 * 64, 5 * 64, 6 * 64, 7 * 64, 941 4 * 128, 5 * 128, 6 * 128, 7 * 128, 942 P2ALIGN(8192 / 7, 64), 943 P2ALIGN(8192 / 6, 64), 944 P2ALIGN(8192 / 5, 64), 945 P2ALIGN(8192 / 4, 64), 946 P2ALIGN(8192 / 3, 64), 947 P2ALIGN(8192 / 2, 64), 948 }; 949 950 static const int kmem_big_alloc_sizes[] = { 951 2 * 4096, 3 * 4096, 952 2 * 8192, 3 * 8192, 953 4 * 8192, 5 * 8192, 6 * 8192, 7 * 8192, 954 8 * 8192, 9 * 8192, 10 * 8192, 11 * 8192, 955 12 * 8192, 13 * 8192, 14 * 8192, 15 * 8192, 956 16 * 8192 957 }; 958 959 #define KMEM_MAXBUF 4096 960 #define KMEM_BIG_MAXBUF_32BIT 32768 961 #define KMEM_BIG_MAXBUF 131072 962 963 #define KMEM_BIG_MULTIPLE 4096 /* big_alloc_sizes must be a multiple */ 964 #define KMEM_BIG_SHIFT 12 /* lg(KMEM_BIG_MULTIPLE) */ 965 966 static kmem_cache_t *kmem_alloc_table[KMEM_MAXBUF >> KMEM_ALIGN_SHIFT]; 967 static kmem_cache_t *kmem_big_alloc_table[KMEM_BIG_MAXBUF >> KMEM_BIG_SHIFT]; 968 969 #define KMEM_ALLOC_TABLE_MAX (KMEM_MAXBUF >> KMEM_ALIGN_SHIFT) 970 static size_t kmem_big_alloc_table_max = 0; /* # of filled elements */ 971 972 static kmem_magtype_t kmem_magtype[] = { 973 { 1, 8, 3200, 65536 }, 974 { 3, 16, 256, 32768 }, 975 { 7, 32, 64, 16384 }, 976 { 15, 64, 0, 8192 }, 977 { 31, 64, 0, 4096 }, 978 { 47, 64, 0, 2048 }, 979 { 63, 64, 0, 1024 }, 980 { 95, 64, 0, 512 }, 981 { 143, 64, 0, 0 }, 982 }; 983 984 static uint32_t kmem_reaping; 985 static uint32_t kmem_reaping_idspace; 986 987 /* 988 * kmem tunables 989 */ 990 clock_t kmem_reap_interval; /* cache reaping rate [15 * HZ ticks] */ 991 int kmem_depot_contention = 3; /* max failed tryenters per real interval */ 992 pgcnt_t kmem_reapahead = 0; /* start reaping N pages before pageout */ 993 int kmem_panic = 1; /* whether to panic on error */ 994 int kmem_logging = 1; /* kmem_log_enter() override */ 995 uint32_t kmem_mtbf = 0; /* mean time between failures [default: off] */ 996 size_t kmem_transaction_log_size; /* transaction log size [2% of memory] */ 997 size_t kmem_content_log_size; /* content log size [2% of memory] */ 998 size_t kmem_failure_log_size; /* failure log [4 pages per CPU] */ 999 size_t kmem_slab_log_size; /* slab create log [4 pages per CPU] */ 1000 size_t kmem_content_maxsave = 256; /* KMF_CONTENTS max bytes to log */ 1001 size_t kmem_lite_minsize = 0; /* minimum buffer size for KMF_LITE */ 1002 size_t kmem_lite_maxalign = 1024; /* maximum buffer alignment for KMF_LITE */ 1003 int kmem_lite_pcs = 4; /* number of PCs to store in KMF_LITE mode */ 1004 size_t kmem_maxverify; /* maximum bytes to inspect in debug routines */ 1005 size_t kmem_minfirewall; /* hardware-enforced redzone threshold */ 1006 1007 #ifdef _LP64 1008 size_t kmem_max_cached = KMEM_BIG_MAXBUF; /* maximum kmem_alloc cache */ 1009 #else 1010 size_t kmem_max_cached = KMEM_BIG_MAXBUF_32BIT; /* maximum kmem_alloc cache */ 1011 #endif 1012 1013 #ifdef DEBUG 1014 int kmem_flags = KMF_AUDIT | KMF_DEADBEEF | KMF_REDZONE | KMF_CONTENTS; 1015 #else 1016 int kmem_flags = 0; 1017 #endif 1018 int kmem_ready; 1019 1020 static kmem_cache_t *kmem_slab_cache; 1021 static kmem_cache_t *kmem_bufctl_cache; 1022 static kmem_cache_t *kmem_bufctl_audit_cache; 1023 1024 static kmutex_t kmem_cache_lock; /* inter-cache linkage only */ 1025 static list_t kmem_caches; 1026 1027 static taskq_t *kmem_taskq; 1028 static kmutex_t kmem_flags_lock; 1029 static vmem_t *kmem_metadata_arena; 1030 static vmem_t *kmem_msb_arena; /* arena for metadata caches */ 1031 static vmem_t *kmem_cache_arena; 1032 static vmem_t *kmem_hash_arena; 1033 static vmem_t *kmem_log_arena; 1034 static vmem_t *kmem_oversize_arena; 1035 static vmem_t *kmem_va_arena; 1036 static vmem_t *kmem_default_arena; 1037 static vmem_t *kmem_firewall_va_arena; 1038 static vmem_t *kmem_firewall_arena; 1039 1040 /* 1041 * Define KMEM_STATS to turn on statistic gathering. By default, it is only 1042 * turned on when DEBUG is also defined. 1043 */ 1044 #ifdef DEBUG 1045 #define KMEM_STATS 1046 #endif /* DEBUG */ 1047 1048 #ifdef KMEM_STATS 1049 #define KMEM_STAT_ADD(stat) ((stat)++) 1050 #define KMEM_STAT_COND_ADD(cond, stat) ((void) (!(cond) || (stat)++)) 1051 #else 1052 #define KMEM_STAT_ADD(stat) /* nothing */ 1053 #define KMEM_STAT_COND_ADD(cond, stat) /* nothing */ 1054 #endif /* KMEM_STATS */ 1055 1056 /* 1057 * kmem slab consolidator thresholds (tunables) 1058 */ 1059 size_t kmem_frag_minslabs = 101; /* minimum total slabs */ 1060 size_t kmem_frag_numer = 1; /* free buffers (numerator) */ 1061 size_t kmem_frag_denom = KMEM_VOID_FRACTION; /* buffers (denominator) */ 1062 /* 1063 * Maximum number of slabs from which to move buffers during a single 1064 * maintenance interval while the system is not low on memory. 1065 */ 1066 size_t kmem_reclaim_max_slabs = 1; 1067 /* 1068 * Number of slabs to scan backwards from the end of the partial slab list 1069 * when searching for buffers to relocate. 1070 */ 1071 size_t kmem_reclaim_scan_range = 12; 1072 1073 #ifdef KMEM_STATS 1074 static struct { 1075 uint64_t kms_callbacks; 1076 uint64_t kms_yes; 1077 uint64_t kms_no; 1078 uint64_t kms_later; 1079 uint64_t kms_dont_need; 1080 uint64_t kms_dont_know; 1081 uint64_t kms_hunt_found_mag; 1082 uint64_t kms_hunt_found_slab; 1083 uint64_t kms_hunt_alloc_fail; 1084 uint64_t kms_hunt_lucky; 1085 uint64_t kms_notify; 1086 uint64_t kms_notify_callbacks; 1087 uint64_t kms_disbelief; 1088 uint64_t kms_already_pending; 1089 uint64_t kms_callback_alloc_fail; 1090 uint64_t kms_callback_taskq_fail; 1091 uint64_t kms_endscan_slab_dead; 1092 uint64_t kms_endscan_slab_destroyed; 1093 uint64_t kms_endscan_nomem; 1094 uint64_t kms_endscan_refcnt_changed; 1095 uint64_t kms_endscan_nomove_changed; 1096 uint64_t kms_endscan_freelist; 1097 uint64_t kms_avl_update; 1098 uint64_t kms_avl_noupdate; 1099 uint64_t kms_no_longer_reclaimable; 1100 uint64_t kms_notify_no_longer_reclaimable; 1101 uint64_t kms_notify_slab_dead; 1102 uint64_t kms_notify_slab_destroyed; 1103 uint64_t kms_alloc_fail; 1104 uint64_t kms_constructor_fail; 1105 uint64_t kms_dead_slabs_freed; 1106 uint64_t kms_defrags; 1107 uint64_t kms_scans; 1108 uint64_t kms_scan_depot_ws_reaps; 1109 uint64_t kms_debug_reaps; 1110 uint64_t kms_debug_scans; 1111 } kmem_move_stats; 1112 #endif /* KMEM_STATS */ 1113 1114 /* consolidator knobs */ 1115 static boolean_t kmem_move_noreap; 1116 static boolean_t kmem_move_blocked; 1117 static boolean_t kmem_move_fulltilt; 1118 static boolean_t kmem_move_any_partial; 1119 1120 #ifdef DEBUG 1121 /* 1122 * kmem consolidator debug tunables: 1123 * Ensure code coverage by occasionally running the consolidator even when the 1124 * caches are not fragmented (they may never be). These intervals are mean time 1125 * in cache maintenance intervals (kmem_cache_update). 1126 */ 1127 uint32_t kmem_mtb_move = 60; /* defrag 1 slab (~15min) */ 1128 uint32_t kmem_mtb_reap = 1800; /* defrag all slabs (~7.5hrs) */ 1129 #endif /* DEBUG */ 1130 1131 static kmem_cache_t *kmem_defrag_cache; 1132 static kmem_cache_t *kmem_move_cache; 1133 static taskq_t *kmem_move_taskq; 1134 1135 static void kmem_cache_scan(kmem_cache_t *); 1136 static void kmem_cache_defrag(kmem_cache_t *); 1137 static void kmem_slab_prefill(kmem_cache_t *, kmem_slab_t *); 1138 1139 1140 kmem_log_header_t *kmem_transaction_log; 1141 kmem_log_header_t *kmem_content_log; 1142 kmem_log_header_t *kmem_failure_log; 1143 kmem_log_header_t *kmem_slab_log; 1144 1145 static int kmem_lite_count; /* # of PCs in kmem_buftag_lite_t */ 1146 1147 #define KMEM_BUFTAG_LITE_ENTER(bt, count, caller) \ 1148 if ((count) > 0) { \ 1149 pc_t *_s = ((kmem_buftag_lite_t *)(bt))->bt_history; \ 1150 pc_t *_e; \ 1151 /* memmove() the old entries down one notch */ \ 1152 for (_e = &_s[(count) - 1]; _e > _s; _e--) \ 1153 *_e = *(_e - 1); \ 1154 *_s = (uintptr_t)(caller); \ 1155 } 1156 1157 #define KMERR_MODIFIED 0 /* buffer modified while on freelist */ 1158 #define KMERR_REDZONE 1 /* redzone violation (write past end of buf) */ 1159 #define KMERR_DUPFREE 2 /* freed a buffer twice */ 1160 #define KMERR_BADADDR 3 /* freed a bad (unallocated) address */ 1161 #define KMERR_BADBUFTAG 4 /* buftag corrupted */ 1162 #define KMERR_BADBUFCTL 5 /* bufctl corrupted */ 1163 #define KMERR_BADCACHE 6 /* freed a buffer to the wrong cache */ 1164 #define KMERR_BADSIZE 7 /* alloc size != free size */ 1165 #define KMERR_BADBASE 8 /* buffer base address wrong */ 1166 1167 struct { 1168 hrtime_t kmp_timestamp; /* timestamp of panic */ 1169 int kmp_error; /* type of kmem error */ 1170 void *kmp_buffer; /* buffer that induced panic */ 1171 void *kmp_realbuf; /* real start address for buffer */ 1172 kmem_cache_t *kmp_cache; /* buffer's cache according to client */ 1173 kmem_cache_t *kmp_realcache; /* actual cache containing buffer */ 1174 kmem_slab_t *kmp_slab; /* slab accoring to kmem_findslab() */ 1175 kmem_bufctl_t *kmp_bufctl; /* bufctl */ 1176 } kmem_panic_info; 1177 1178 1179 static void 1180 copy_pattern(uint64_t pattern, void *buf_arg, size_t size) 1181 { 1182 uint64_t *bufend = (uint64_t *)((char *)buf_arg + size); 1183 uint64_t *buf = buf_arg; 1184 1185 while (buf < bufend) 1186 *buf++ = pattern; 1187 } 1188 1189 static void * 1190 verify_pattern(uint64_t pattern, void *buf_arg, size_t size) 1191 { 1192 uint64_t *bufend = (uint64_t *)((char *)buf_arg + size); 1193 uint64_t *buf; 1194 1195 for (buf = buf_arg; buf < bufend; buf++) 1196 if (*buf != pattern) 1197 return (buf); 1198 return (NULL); 1199 } 1200 1201 static void * 1202 verify_and_copy_pattern(uint64_t old, uint64_t new, void *buf_arg, size_t size) 1203 { 1204 uint64_t *bufend = (uint64_t *)((char *)buf_arg + size); 1205 uint64_t *buf; 1206 1207 for (buf = buf_arg; buf < bufend; buf++) { 1208 if (*buf != old) { 1209 copy_pattern(old, buf_arg, 1210 (char *)buf - (char *)buf_arg); 1211 return (buf); 1212 } 1213 *buf = new; 1214 } 1215 1216 return (NULL); 1217 } 1218 1219 static void 1220 kmem_cache_applyall(void (*func)(kmem_cache_t *), taskq_t *tq, int tqflag) 1221 { 1222 kmem_cache_t *cp; 1223 1224 mutex_enter(&kmem_cache_lock); 1225 for (cp = list_head(&kmem_caches); cp != NULL; 1226 cp = list_next(&kmem_caches, cp)) 1227 if (tq != NULL) 1228 (void) taskq_dispatch(tq, (task_func_t *)func, cp, 1229 tqflag); 1230 else 1231 func(cp); 1232 mutex_exit(&kmem_cache_lock); 1233 } 1234 1235 static void 1236 kmem_cache_applyall_id(void (*func)(kmem_cache_t *), taskq_t *tq, int tqflag) 1237 { 1238 kmem_cache_t *cp; 1239 1240 mutex_enter(&kmem_cache_lock); 1241 for (cp = list_head(&kmem_caches); cp != NULL; 1242 cp = list_next(&kmem_caches, cp)) { 1243 if (!(cp->cache_cflags & KMC_IDENTIFIER)) 1244 continue; 1245 if (tq != NULL) 1246 (void) taskq_dispatch(tq, (task_func_t *)func, cp, 1247 tqflag); 1248 else 1249 func(cp); 1250 } 1251 mutex_exit(&kmem_cache_lock); 1252 } 1253 1254 /* 1255 * Debugging support. Given a buffer address, find its slab. 1256 */ 1257 static kmem_slab_t * 1258 kmem_findslab(kmem_cache_t *cp, void *buf) 1259 { 1260 kmem_slab_t *sp; 1261 1262 mutex_enter(&cp->cache_lock); 1263 for (sp = list_head(&cp->cache_complete_slabs); sp != NULL; 1264 sp = list_next(&cp->cache_complete_slabs, sp)) { 1265 if (KMEM_SLAB_MEMBER(sp, buf)) { 1266 mutex_exit(&cp->cache_lock); 1267 return (sp); 1268 } 1269 } 1270 for (sp = avl_first(&cp->cache_partial_slabs); sp != NULL; 1271 sp = AVL_NEXT(&cp->cache_partial_slabs, sp)) { 1272 if (KMEM_SLAB_MEMBER(sp, buf)) { 1273 mutex_exit(&cp->cache_lock); 1274 return (sp); 1275 } 1276 } 1277 mutex_exit(&cp->cache_lock); 1278 1279 return (NULL); 1280 } 1281 1282 static void 1283 kmem_error(int error, kmem_cache_t *cparg, void *bufarg) 1284 { 1285 kmem_buftag_t *btp = NULL; 1286 kmem_bufctl_t *bcp = NULL; 1287 kmem_cache_t *cp = cparg; 1288 kmem_slab_t *sp; 1289 uint64_t *off; 1290 void *buf = bufarg; 1291 1292 kmem_logging = 0; /* stop logging when a bad thing happens */ 1293 1294 kmem_panic_info.kmp_timestamp = gethrtime(); 1295 1296 sp = kmem_findslab(cp, buf); 1297 if (sp == NULL) { 1298 for (cp = list_tail(&kmem_caches); cp != NULL; 1299 cp = list_prev(&kmem_caches, cp)) { 1300 if ((sp = kmem_findslab(cp, buf)) != NULL) 1301 break; 1302 } 1303 } 1304 1305 if (sp == NULL) { 1306 cp = NULL; 1307 error = KMERR_BADADDR; 1308 } else { 1309 if (cp != cparg) 1310 error = KMERR_BADCACHE; 1311 else 1312 buf = (char *)bufarg - ((uintptr_t)bufarg - 1313 (uintptr_t)sp->slab_base) % cp->cache_chunksize; 1314 if (buf != bufarg) 1315 error = KMERR_BADBASE; 1316 if (cp->cache_flags & KMF_BUFTAG) 1317 btp = KMEM_BUFTAG(cp, buf); 1318 if (cp->cache_flags & KMF_HASH) { 1319 mutex_enter(&cp->cache_lock); 1320 for (bcp = *KMEM_HASH(cp, buf); bcp; bcp = bcp->bc_next) 1321 if (bcp->bc_addr == buf) 1322 break; 1323 mutex_exit(&cp->cache_lock); 1324 if (bcp == NULL && btp != NULL) 1325 bcp = btp->bt_bufctl; 1326 if (kmem_findslab(cp->cache_bufctl_cache, bcp) == 1327 NULL || P2PHASE((uintptr_t)bcp, KMEM_ALIGN) || 1328 bcp->bc_addr != buf) { 1329 error = KMERR_BADBUFCTL; 1330 bcp = NULL; 1331 } 1332 } 1333 } 1334 1335 kmem_panic_info.kmp_error = error; 1336 kmem_panic_info.kmp_buffer = bufarg; 1337 kmem_panic_info.kmp_realbuf = buf; 1338 kmem_panic_info.kmp_cache = cparg; 1339 kmem_panic_info.kmp_realcache = cp; 1340 kmem_panic_info.kmp_slab = sp; 1341 kmem_panic_info.kmp_bufctl = bcp; 1342 1343 printf("kernel memory allocator: "); 1344 1345 switch (error) { 1346 1347 case KMERR_MODIFIED: 1348 printf("buffer modified after being freed\n"); 1349 off = verify_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify); 1350 if (off == NULL) /* shouldn't happen */ 1351 off = buf; 1352 printf("modification occurred at offset 0x%lx " 1353 "(0x%llx replaced by 0x%llx)\n", 1354 (uintptr_t)off - (uintptr_t)buf, 1355 (longlong_t)KMEM_FREE_PATTERN, (longlong_t)*off); 1356 break; 1357 1358 case KMERR_REDZONE: 1359 printf("redzone violation: write past end of buffer\n"); 1360 break; 1361 1362 case KMERR_BADADDR: 1363 printf("invalid free: buffer not in cache\n"); 1364 break; 1365 1366 case KMERR_DUPFREE: 1367 printf("duplicate free: buffer freed twice\n"); 1368 break; 1369 1370 case KMERR_BADBUFTAG: 1371 printf("boundary tag corrupted\n"); 1372 printf("bcp ^ bxstat = %lx, should be %lx\n", 1373 (intptr_t)btp->bt_bufctl ^ btp->bt_bxstat, 1374 KMEM_BUFTAG_FREE); 1375 break; 1376 1377 case KMERR_BADBUFCTL: 1378 printf("bufctl corrupted\n"); 1379 break; 1380 1381 case KMERR_BADCACHE: 1382 printf("buffer freed to wrong cache\n"); 1383 printf("buffer was allocated from %s,\n", cp->cache_name); 1384 printf("caller attempting free to %s.\n", cparg->cache_name); 1385 break; 1386 1387 case KMERR_BADSIZE: 1388 printf("bad free: free size (%u) != alloc size (%u)\n", 1389 KMEM_SIZE_DECODE(((uint32_t *)btp)[0]), 1390 KMEM_SIZE_DECODE(((uint32_t *)btp)[1])); 1391 break; 1392 1393 case KMERR_BADBASE: 1394 printf("bad free: free address (%p) != alloc address (%p)\n", 1395 bufarg, buf); 1396 break; 1397 } 1398 1399 printf("buffer=%p bufctl=%p cache: %s\n", 1400 bufarg, (void *)bcp, cparg->cache_name); 1401 1402 if (bcp != NULL && (cp->cache_flags & KMF_AUDIT) && 1403 error != KMERR_BADBUFCTL) { 1404 int d; 1405 timestruc_t ts; 1406 kmem_bufctl_audit_t *bcap = (kmem_bufctl_audit_t *)bcp; 1407 1408 hrt2ts(kmem_panic_info.kmp_timestamp - bcap->bc_timestamp, &ts); 1409 printf("previous transaction on buffer %p:\n", buf); 1410 printf("thread=%p time=T-%ld.%09ld slab=%p cache: %s\n", 1411 (void *)bcap->bc_thread, ts.tv_sec, ts.tv_nsec, 1412 (void *)sp, cp->cache_name); 1413 for (d = 0; d < MIN(bcap->bc_depth, KMEM_STACK_DEPTH); d++) { 1414 ulong_t off; 1415 char *sym = kobj_getsymname(bcap->bc_stack[d], &off); 1416 printf("%s+%lx\n", sym ? sym : "?", off); 1417 } 1418 } 1419 if (kmem_panic > 0) 1420 panic("kernel heap corruption detected"); 1421 if (kmem_panic == 0) 1422 debug_enter(NULL); 1423 kmem_logging = 1; /* resume logging */ 1424 } 1425 1426 static kmem_log_header_t * 1427 kmem_log_init(size_t logsize) 1428 { 1429 kmem_log_header_t *lhp; 1430 int nchunks = 4 * max_ncpus; 1431 size_t lhsize = (size_t)&((kmem_log_header_t *)0)->lh_cpu[max_ncpus]; 1432 int i; 1433 1434 /* 1435 * Make sure that lhp->lh_cpu[] is nicely aligned 1436 * to prevent false sharing of cache lines. 1437 */ 1438 lhsize = P2ROUNDUP(lhsize, KMEM_ALIGN); 1439 lhp = vmem_xalloc(kmem_log_arena, lhsize, 64, P2NPHASE(lhsize, 64), 0, 1440 NULL, NULL, VM_SLEEP); 1441 bzero(lhp, lhsize); 1442 1443 mutex_init(&lhp->lh_lock, NULL, MUTEX_DEFAULT, NULL); 1444 lhp->lh_nchunks = nchunks; 1445 lhp->lh_chunksize = P2ROUNDUP(logsize / nchunks + 1, PAGESIZE); 1446 lhp->lh_base = vmem_alloc(kmem_log_arena, 1447 lhp->lh_chunksize * nchunks, VM_SLEEP); 1448 lhp->lh_free = vmem_alloc(kmem_log_arena, 1449 nchunks * sizeof (int), VM_SLEEP); 1450 bzero(lhp->lh_base, lhp->lh_chunksize * nchunks); 1451 1452 for (i = 0; i < max_ncpus; i++) { 1453 kmem_cpu_log_header_t *clhp = &lhp->lh_cpu[i]; 1454 mutex_init(&clhp->clh_lock, NULL, MUTEX_DEFAULT, NULL); 1455 clhp->clh_chunk = i; 1456 } 1457 1458 for (i = max_ncpus; i < nchunks; i++) 1459 lhp->lh_free[i] = i; 1460 1461 lhp->lh_head = max_ncpus; 1462 lhp->lh_tail = 0; 1463 1464 return (lhp); 1465 } 1466 1467 static void * 1468 kmem_log_enter(kmem_log_header_t *lhp, void *data, size_t size) 1469 { 1470 void *logspace; 1471 kmem_cpu_log_header_t *clhp = &lhp->lh_cpu[CPU->cpu_seqid]; 1472 1473 if (lhp == NULL || kmem_logging == 0 || panicstr) 1474 return (NULL); 1475 1476 mutex_enter(&clhp->clh_lock); 1477 clhp->clh_hits++; 1478 if (size > clhp->clh_avail) { 1479 mutex_enter(&lhp->lh_lock); 1480 lhp->lh_hits++; 1481 lhp->lh_free[lhp->lh_tail] = clhp->clh_chunk; 1482 lhp->lh_tail = (lhp->lh_tail + 1) % lhp->lh_nchunks; 1483 clhp->clh_chunk = lhp->lh_free[lhp->lh_head]; 1484 lhp->lh_head = (lhp->lh_head + 1) % lhp->lh_nchunks; 1485 clhp->clh_current = lhp->lh_base + 1486 clhp->clh_chunk * lhp->lh_chunksize; 1487 clhp->clh_avail = lhp->lh_chunksize; 1488 if (size > lhp->lh_chunksize) 1489 size = lhp->lh_chunksize; 1490 mutex_exit(&lhp->lh_lock); 1491 } 1492 logspace = clhp->clh_current; 1493 clhp->clh_current += size; 1494 clhp->clh_avail -= size; 1495 bcopy(data, logspace, size); 1496 mutex_exit(&clhp->clh_lock); 1497 return (logspace); 1498 } 1499 1500 #define KMEM_AUDIT(lp, cp, bcp) \ 1501 { \ 1502 kmem_bufctl_audit_t *_bcp = (kmem_bufctl_audit_t *)(bcp); \ 1503 _bcp->bc_timestamp = gethrtime(); \ 1504 _bcp->bc_thread = curthread; \ 1505 _bcp->bc_depth = getpcstack(_bcp->bc_stack, KMEM_STACK_DEPTH); \ 1506 _bcp->bc_lastlog = kmem_log_enter((lp), _bcp, sizeof (*_bcp)); \ 1507 } 1508 1509 static void 1510 kmem_log_event(kmem_log_header_t *lp, kmem_cache_t *cp, 1511 kmem_slab_t *sp, void *addr) 1512 { 1513 kmem_bufctl_audit_t bca; 1514 1515 bzero(&bca, sizeof (kmem_bufctl_audit_t)); 1516 bca.bc_addr = addr; 1517 bca.bc_slab = sp; 1518 bca.bc_cache = cp; 1519 KMEM_AUDIT(lp, cp, &bca); 1520 } 1521 1522 /* 1523 * Create a new slab for cache cp. 1524 */ 1525 static kmem_slab_t * 1526 kmem_slab_create(kmem_cache_t *cp, int kmflag) 1527 { 1528 size_t slabsize = cp->cache_slabsize; 1529 size_t chunksize = cp->cache_chunksize; 1530 int cache_flags = cp->cache_flags; 1531 size_t color, chunks; 1532 char *buf, *slab; 1533 kmem_slab_t *sp; 1534 kmem_bufctl_t *bcp; 1535 vmem_t *vmp = cp->cache_arena; 1536 1537 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock)); 1538 1539 color = cp->cache_color + cp->cache_align; 1540 if (color > cp->cache_maxcolor) 1541 color = cp->cache_mincolor; 1542 cp->cache_color = color; 1543 1544 slab = vmem_alloc(vmp, slabsize, kmflag & KM_VMFLAGS); 1545 1546 if (slab == NULL) 1547 goto vmem_alloc_failure; 1548 1549 ASSERT(P2PHASE((uintptr_t)slab, vmp->vm_quantum) == 0); 1550 1551 /* 1552 * Reverify what was already checked in kmem_cache_set_move(), since the 1553 * consolidator depends (for correctness) on slabs being initialized 1554 * with the 0xbaddcafe memory pattern (setting a low order bit usable by 1555 * clients to distinguish uninitialized memory from known objects). 1556 */ 1557 ASSERT((cp->cache_move == NULL) || !(cp->cache_cflags & KMC_NOTOUCH)); 1558 if (!(cp->cache_cflags & KMC_NOTOUCH)) 1559 copy_pattern(KMEM_UNINITIALIZED_PATTERN, slab, slabsize); 1560 1561 if (cache_flags & KMF_HASH) { 1562 if ((sp = kmem_cache_alloc(kmem_slab_cache, kmflag)) == NULL) 1563 goto slab_alloc_failure; 1564 chunks = (slabsize - color) / chunksize; 1565 } else { 1566 sp = KMEM_SLAB(cp, slab); 1567 chunks = (slabsize - sizeof (kmem_slab_t) - color) / chunksize; 1568 } 1569 1570 sp->slab_cache = cp; 1571 sp->slab_head = NULL; 1572 sp->slab_refcnt = 0; 1573 sp->slab_base = buf = slab + color; 1574 sp->slab_chunks = chunks; 1575 sp->slab_stuck_offset = (uint32_t)-1; 1576 sp->slab_later_count = 0; 1577 sp->slab_flags = 0; 1578 1579 ASSERT(chunks > 0); 1580 while (chunks-- != 0) { 1581 if (cache_flags & KMF_HASH) { 1582 bcp = kmem_cache_alloc(cp->cache_bufctl_cache, kmflag); 1583 if (bcp == NULL) 1584 goto bufctl_alloc_failure; 1585 if (cache_flags & KMF_AUDIT) { 1586 kmem_bufctl_audit_t *bcap = 1587 (kmem_bufctl_audit_t *)bcp; 1588 bzero(bcap, sizeof (kmem_bufctl_audit_t)); 1589 bcap->bc_cache = cp; 1590 } 1591 bcp->bc_addr = buf; 1592 bcp->bc_slab = sp; 1593 } else { 1594 bcp = KMEM_BUFCTL(cp, buf); 1595 } 1596 if (cache_flags & KMF_BUFTAG) { 1597 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf); 1598 btp->bt_redzone = KMEM_REDZONE_PATTERN; 1599 btp->bt_bufctl = bcp; 1600 btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE; 1601 if (cache_flags & KMF_DEADBEEF) { 1602 copy_pattern(KMEM_FREE_PATTERN, buf, 1603 cp->cache_verify); 1604 } 1605 } 1606 bcp->bc_next = sp->slab_head; 1607 sp->slab_head = bcp; 1608 buf += chunksize; 1609 } 1610 1611 kmem_log_event(kmem_slab_log, cp, sp, slab); 1612 1613 return (sp); 1614 1615 bufctl_alloc_failure: 1616 1617 while ((bcp = sp->slab_head) != NULL) { 1618 sp->slab_head = bcp->bc_next; 1619 kmem_cache_free(cp->cache_bufctl_cache, bcp); 1620 } 1621 kmem_cache_free(kmem_slab_cache, sp); 1622 1623 slab_alloc_failure: 1624 1625 vmem_free(vmp, slab, slabsize); 1626 1627 vmem_alloc_failure: 1628 1629 kmem_log_event(kmem_failure_log, cp, NULL, NULL); 1630 atomic_inc_64(&cp->cache_alloc_fail); 1631 1632 return (NULL); 1633 } 1634 1635 /* 1636 * Destroy a slab. 1637 */ 1638 static void 1639 kmem_slab_destroy(kmem_cache_t *cp, kmem_slab_t *sp) 1640 { 1641 vmem_t *vmp = cp->cache_arena; 1642 void *slab = (void *)P2ALIGN((uintptr_t)sp->slab_base, vmp->vm_quantum); 1643 1644 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock)); 1645 ASSERT(sp->slab_refcnt == 0); 1646 1647 if (cp->cache_flags & KMF_HASH) { 1648 kmem_bufctl_t *bcp; 1649 while ((bcp = sp->slab_head) != NULL) { 1650 sp->slab_head = bcp->bc_next; 1651 kmem_cache_free(cp->cache_bufctl_cache, bcp); 1652 } 1653 kmem_cache_free(kmem_slab_cache, sp); 1654 } 1655 vmem_free(vmp, slab, cp->cache_slabsize); 1656 } 1657 1658 static void * 1659 kmem_slab_alloc_impl(kmem_cache_t *cp, kmem_slab_t *sp, boolean_t prefill) 1660 { 1661 kmem_bufctl_t *bcp, **hash_bucket; 1662 void *buf; 1663 boolean_t new_slab = (sp->slab_refcnt == 0); 1664 1665 ASSERT(MUTEX_HELD(&cp->cache_lock)); 1666 /* 1667 * kmem_slab_alloc() drops cache_lock when it creates a new slab, so we 1668 * can't ASSERT(avl_is_empty(&cp->cache_partial_slabs)) here when the 1669 * slab is newly created. 1670 */ 1671 ASSERT(new_slab || (KMEM_SLAB_IS_PARTIAL(sp) && 1672 (sp == avl_first(&cp->cache_partial_slabs)))); 1673 ASSERT(sp->slab_cache == cp); 1674 1675 cp->cache_slab_alloc++; 1676 cp->cache_bufslab--; 1677 sp->slab_refcnt++; 1678 1679 bcp = sp->slab_head; 1680 sp->slab_head = bcp->bc_next; 1681 1682 if (cp->cache_flags & KMF_HASH) { 1683 /* 1684 * Add buffer to allocated-address hash table. 1685 */ 1686 buf = bcp->bc_addr; 1687 hash_bucket = KMEM_HASH(cp, buf); 1688 bcp->bc_next = *hash_bucket; 1689 *hash_bucket = bcp; 1690 if ((cp->cache_flags & (KMF_AUDIT | KMF_BUFTAG)) == KMF_AUDIT) { 1691 KMEM_AUDIT(kmem_transaction_log, cp, bcp); 1692 } 1693 } else { 1694 buf = KMEM_BUF(cp, bcp); 1695 } 1696 1697 ASSERT(KMEM_SLAB_MEMBER(sp, buf)); 1698 1699 if (sp->slab_head == NULL) { 1700 ASSERT(KMEM_SLAB_IS_ALL_USED(sp)); 1701 if (new_slab) { 1702 ASSERT(sp->slab_chunks == 1); 1703 } else { 1704 ASSERT(sp->slab_chunks > 1); /* the slab was partial */ 1705 avl_remove(&cp->cache_partial_slabs, sp); 1706 sp->slab_later_count = 0; /* clear history */ 1707 sp->slab_flags &= ~KMEM_SLAB_NOMOVE; 1708 sp->slab_stuck_offset = (uint32_t)-1; 1709 } 1710 list_insert_head(&cp->cache_complete_slabs, sp); 1711 cp->cache_complete_slab_count++; 1712 return (buf); 1713 } 1714 1715 ASSERT(KMEM_SLAB_IS_PARTIAL(sp)); 1716 /* 1717 * Peek to see if the magazine layer is enabled before 1718 * we prefill. We're not holding the cpu cache lock, 1719 * so the peek could be wrong, but there's no harm in it. 1720 */ 1721 if (new_slab && prefill && (cp->cache_flags & KMF_PREFILL) && 1722 (KMEM_CPU_CACHE(cp)->cc_magsize != 0)) { 1723 kmem_slab_prefill(cp, sp); 1724 return (buf); 1725 } 1726 1727 if (new_slab) { 1728 avl_add(&cp->cache_partial_slabs, sp); 1729 return (buf); 1730 } 1731 1732 /* 1733 * The slab is now more allocated than it was, so the 1734 * order remains unchanged. 1735 */ 1736 ASSERT(!avl_update(&cp->cache_partial_slabs, sp)); 1737 return (buf); 1738 } 1739 1740 /* 1741 * Allocate a raw (unconstructed) buffer from cp's slab layer. 1742 */ 1743 static void * 1744 kmem_slab_alloc(kmem_cache_t *cp, int kmflag) 1745 { 1746 kmem_slab_t *sp; 1747 void *buf; 1748 boolean_t test_destructor; 1749 1750 mutex_enter(&cp->cache_lock); 1751 test_destructor = (cp->cache_slab_alloc == 0); 1752 sp = avl_first(&cp->cache_partial_slabs); 1753 if (sp == NULL) { 1754 ASSERT(cp->cache_bufslab == 0); 1755 1756 /* 1757 * The freelist is empty. Create a new slab. 1758 */ 1759 mutex_exit(&cp->cache_lock); 1760 if ((sp = kmem_slab_create(cp, kmflag)) == NULL) { 1761 return (NULL); 1762 } 1763 mutex_enter(&cp->cache_lock); 1764 cp->cache_slab_create++; 1765 if ((cp->cache_buftotal += sp->slab_chunks) > cp->cache_bufmax) 1766 cp->cache_bufmax = cp->cache_buftotal; 1767 cp->cache_bufslab += sp->slab_chunks; 1768 } 1769 1770 buf = kmem_slab_alloc_impl(cp, sp, B_TRUE); 1771 ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) == 1772 (cp->cache_complete_slab_count + 1773 avl_numnodes(&cp->cache_partial_slabs) + 1774 (cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount))); 1775 mutex_exit(&cp->cache_lock); 1776 1777 if (test_destructor && cp->cache_destructor != NULL) { 1778 /* 1779 * On the first kmem_slab_alloc(), assert that it is valid to 1780 * call the destructor on a newly constructed object without any 1781 * client involvement. 1782 */ 1783 if ((cp->cache_constructor == NULL) || 1784 cp->cache_constructor(buf, cp->cache_private, 1785 kmflag) == 0) { 1786 cp->cache_destructor(buf, cp->cache_private); 1787 } 1788 copy_pattern(KMEM_UNINITIALIZED_PATTERN, buf, 1789 cp->cache_bufsize); 1790 if (cp->cache_flags & KMF_DEADBEEF) { 1791 copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify); 1792 } 1793 } 1794 1795 return (buf); 1796 } 1797 1798 static void kmem_slab_move_yes(kmem_cache_t *, kmem_slab_t *, void *); 1799 1800 /* 1801 * Free a raw (unconstructed) buffer to cp's slab layer. 1802 */ 1803 static void 1804 kmem_slab_free(kmem_cache_t *cp, void *buf) 1805 { 1806 kmem_slab_t *sp; 1807 kmem_bufctl_t *bcp, **prev_bcpp; 1808 1809 ASSERT(buf != NULL); 1810 1811 mutex_enter(&cp->cache_lock); 1812 cp->cache_slab_free++; 1813 1814 if (cp->cache_flags & KMF_HASH) { 1815 /* 1816 * Look up buffer in allocated-address hash table. 1817 */ 1818 prev_bcpp = KMEM_HASH(cp, buf); 1819 while ((bcp = *prev_bcpp) != NULL) { 1820 if (bcp->bc_addr == buf) { 1821 *prev_bcpp = bcp->bc_next; 1822 sp = bcp->bc_slab; 1823 break; 1824 } 1825 cp->cache_lookup_depth++; 1826 prev_bcpp = &bcp->bc_next; 1827 } 1828 } else { 1829 bcp = KMEM_BUFCTL(cp, buf); 1830 sp = KMEM_SLAB(cp, buf); 1831 } 1832 1833 if (bcp == NULL || sp->slab_cache != cp || !KMEM_SLAB_MEMBER(sp, buf)) { 1834 mutex_exit(&cp->cache_lock); 1835 kmem_error(KMERR_BADADDR, cp, buf); 1836 return; 1837 } 1838 1839 if (KMEM_SLAB_OFFSET(sp, buf) == sp->slab_stuck_offset) { 1840 /* 1841 * If this is the buffer that prevented the consolidator from 1842 * clearing the slab, we can reset the slab flags now that the 1843 * buffer is freed. (It makes sense to do this in 1844 * kmem_cache_free(), where the client gives up ownership of the 1845 * buffer, but on the hot path the test is too expensive.) 1846 */ 1847 kmem_slab_move_yes(cp, sp, buf); 1848 } 1849 1850 if ((cp->cache_flags & (KMF_AUDIT | KMF_BUFTAG)) == KMF_AUDIT) { 1851 if (cp->cache_flags & KMF_CONTENTS) 1852 ((kmem_bufctl_audit_t *)bcp)->bc_contents = 1853 kmem_log_enter(kmem_content_log, buf, 1854 cp->cache_contents); 1855 KMEM_AUDIT(kmem_transaction_log, cp, bcp); 1856 } 1857 1858 bcp->bc_next = sp->slab_head; 1859 sp->slab_head = bcp; 1860 1861 cp->cache_bufslab++; 1862 ASSERT(sp->slab_refcnt >= 1); 1863 1864 if (--sp->slab_refcnt == 0) { 1865 /* 1866 * There are no outstanding allocations from this slab, 1867 * so we can reclaim the memory. 1868 */ 1869 if (sp->slab_chunks == 1) { 1870 list_remove(&cp->cache_complete_slabs, sp); 1871 cp->cache_complete_slab_count--; 1872 } else { 1873 avl_remove(&cp->cache_partial_slabs, sp); 1874 } 1875 1876 cp->cache_buftotal -= sp->slab_chunks; 1877 cp->cache_bufslab -= sp->slab_chunks; 1878 /* 1879 * Defer releasing the slab to the virtual memory subsystem 1880 * while there is a pending move callback, since we guarantee 1881 * that buffers passed to the move callback have only been 1882 * touched by kmem or by the client itself. Since the memory 1883 * patterns baddcafe (uninitialized) and deadbeef (freed) both 1884 * set at least one of the two lowest order bits, the client can 1885 * test those bits in the move callback to determine whether or 1886 * not it knows about the buffer (assuming that the client also 1887 * sets one of those low order bits whenever it frees a buffer). 1888 */ 1889 if (cp->cache_defrag == NULL || 1890 (avl_is_empty(&cp->cache_defrag->kmd_moves_pending) && 1891 !(sp->slab_flags & KMEM_SLAB_MOVE_PENDING))) { 1892 cp->cache_slab_destroy++; 1893 mutex_exit(&cp->cache_lock); 1894 kmem_slab_destroy(cp, sp); 1895 } else { 1896 list_t *deadlist = &cp->cache_defrag->kmd_deadlist; 1897 /* 1898 * Slabs are inserted at both ends of the deadlist to 1899 * distinguish between slabs freed while move callbacks 1900 * are pending (list head) and a slab freed while the 1901 * lock is dropped in kmem_move_buffers() (list tail) so 1902 * that in both cases slab_destroy() is called from the 1903 * right context. 1904 */ 1905 if (sp->slab_flags & KMEM_SLAB_MOVE_PENDING) { 1906 list_insert_tail(deadlist, sp); 1907 } else { 1908 list_insert_head(deadlist, sp); 1909 } 1910 cp->cache_defrag->kmd_deadcount++; 1911 mutex_exit(&cp->cache_lock); 1912 } 1913 return; 1914 } 1915 1916 if (bcp->bc_next == NULL) { 1917 /* Transition the slab from completely allocated to partial. */ 1918 ASSERT(sp->slab_refcnt == (sp->slab_chunks - 1)); 1919 ASSERT(sp->slab_chunks > 1); 1920 list_remove(&cp->cache_complete_slabs, sp); 1921 cp->cache_complete_slab_count--; 1922 avl_add(&cp->cache_partial_slabs, sp); 1923 } else { 1924 #ifdef DEBUG 1925 if (avl_update_gt(&cp->cache_partial_slabs, sp)) { 1926 KMEM_STAT_ADD(kmem_move_stats.kms_avl_update); 1927 } else { 1928 KMEM_STAT_ADD(kmem_move_stats.kms_avl_noupdate); 1929 } 1930 #else 1931 (void) avl_update_gt(&cp->cache_partial_slabs, sp); 1932 #endif 1933 } 1934 1935 ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) == 1936 (cp->cache_complete_slab_count + 1937 avl_numnodes(&cp->cache_partial_slabs) + 1938 (cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount))); 1939 mutex_exit(&cp->cache_lock); 1940 } 1941 1942 /* 1943 * Return -1 if kmem_error, 1 if constructor fails, 0 if successful. 1944 */ 1945 static int 1946 kmem_cache_alloc_debug(kmem_cache_t *cp, void *buf, int kmflag, int construct, 1947 caddr_t caller) 1948 { 1949 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf); 1950 kmem_bufctl_audit_t *bcp = (kmem_bufctl_audit_t *)btp->bt_bufctl; 1951 uint32_t mtbf; 1952 1953 if (btp->bt_bxstat != ((intptr_t)bcp ^ KMEM_BUFTAG_FREE)) { 1954 kmem_error(KMERR_BADBUFTAG, cp, buf); 1955 return (-1); 1956 } 1957 1958 btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_ALLOC; 1959 1960 if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) { 1961 kmem_error(KMERR_BADBUFCTL, cp, buf); 1962 return (-1); 1963 } 1964 1965 if (cp->cache_flags & KMF_DEADBEEF) { 1966 if (!construct && (cp->cache_flags & KMF_LITE)) { 1967 if (*(uint64_t *)buf != KMEM_FREE_PATTERN) { 1968 kmem_error(KMERR_MODIFIED, cp, buf); 1969 return (-1); 1970 } 1971 if (cp->cache_constructor != NULL) 1972 *(uint64_t *)buf = btp->bt_redzone; 1973 else 1974 *(uint64_t *)buf = KMEM_UNINITIALIZED_PATTERN; 1975 } else { 1976 construct = 1; 1977 if (verify_and_copy_pattern(KMEM_FREE_PATTERN, 1978 KMEM_UNINITIALIZED_PATTERN, buf, 1979 cp->cache_verify)) { 1980 kmem_error(KMERR_MODIFIED, cp, buf); 1981 return (-1); 1982 } 1983 } 1984 } 1985 btp->bt_redzone = KMEM_REDZONE_PATTERN; 1986 1987 if ((mtbf = kmem_mtbf | cp->cache_mtbf) != 0 && 1988 gethrtime() % mtbf == 0 && 1989 (kmflag & (KM_NOSLEEP | KM_PANIC)) == KM_NOSLEEP) { 1990 kmem_log_event(kmem_failure_log, cp, NULL, NULL); 1991 if (!construct && cp->cache_destructor != NULL) 1992 cp->cache_destructor(buf, cp->cache_private); 1993 } else { 1994 mtbf = 0; 1995 } 1996 1997 if (mtbf || (construct && cp->cache_constructor != NULL && 1998 cp->cache_constructor(buf, cp->cache_private, kmflag) != 0)) { 1999 atomic_inc_64(&cp->cache_alloc_fail); 2000 btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE; 2001 if (cp->cache_flags & KMF_DEADBEEF) 2002 copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify); 2003 kmem_slab_free(cp, buf); 2004 return (1); 2005 } 2006 2007 if (cp->cache_flags & KMF_AUDIT) { 2008 KMEM_AUDIT(kmem_transaction_log, cp, bcp); 2009 } 2010 2011 if ((cp->cache_flags & KMF_LITE) && 2012 !(cp->cache_cflags & KMC_KMEM_ALLOC)) { 2013 KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller); 2014 } 2015 2016 return (0); 2017 } 2018 2019 static int 2020 kmem_cache_free_debug(kmem_cache_t *cp, void *buf, caddr_t caller) 2021 { 2022 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf); 2023 kmem_bufctl_audit_t *bcp = (kmem_bufctl_audit_t *)btp->bt_bufctl; 2024 kmem_slab_t *sp; 2025 2026 if (btp->bt_bxstat != ((intptr_t)bcp ^ KMEM_BUFTAG_ALLOC)) { 2027 if (btp->bt_bxstat == ((intptr_t)bcp ^ KMEM_BUFTAG_FREE)) { 2028 kmem_error(KMERR_DUPFREE, cp, buf); 2029 return (-1); 2030 } 2031 sp = kmem_findslab(cp, buf); 2032 if (sp == NULL || sp->slab_cache != cp) 2033 kmem_error(KMERR_BADADDR, cp, buf); 2034 else 2035 kmem_error(KMERR_REDZONE, cp, buf); 2036 return (-1); 2037 } 2038 2039 btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE; 2040 2041 if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) { 2042 kmem_error(KMERR_BADBUFCTL, cp, buf); 2043 return (-1); 2044 } 2045 2046 if (btp->bt_redzone != KMEM_REDZONE_PATTERN) { 2047 kmem_error(KMERR_REDZONE, cp, buf); 2048 return (-1); 2049 } 2050 2051 if (cp->cache_flags & KMF_AUDIT) { 2052 if (cp->cache_flags & KMF_CONTENTS) 2053 bcp->bc_contents = kmem_log_enter(kmem_content_log, 2054 buf, cp->cache_contents); 2055 KMEM_AUDIT(kmem_transaction_log, cp, bcp); 2056 } 2057 2058 if ((cp->cache_flags & KMF_LITE) && 2059 !(cp->cache_cflags & KMC_KMEM_ALLOC)) { 2060 KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller); 2061 } 2062 2063 if (cp->cache_flags & KMF_DEADBEEF) { 2064 if (cp->cache_flags & KMF_LITE) 2065 btp->bt_redzone = *(uint64_t *)buf; 2066 else if (cp->cache_destructor != NULL) 2067 cp->cache_destructor(buf, cp->cache_private); 2068 2069 copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify); 2070 } 2071 2072 return (0); 2073 } 2074 2075 /* 2076 * Free each object in magazine mp to cp's slab layer, and free mp itself. 2077 */ 2078 static void 2079 kmem_magazine_destroy(kmem_cache_t *cp, kmem_magazine_t *mp, int nrounds) 2080 { 2081 int round; 2082 2083 ASSERT(!list_link_active(&cp->cache_link) || 2084 taskq_member(kmem_taskq, curthread)); 2085 2086 for (round = 0; round < nrounds; round++) { 2087 void *buf = mp->mag_round[round]; 2088 2089 if (cp->cache_flags & KMF_DEADBEEF) { 2090 if (verify_pattern(KMEM_FREE_PATTERN, buf, 2091 cp->cache_verify) != NULL) { 2092 kmem_error(KMERR_MODIFIED, cp, buf); 2093 continue; 2094 } 2095 if ((cp->cache_flags & KMF_LITE) && 2096 cp->cache_destructor != NULL) { 2097 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf); 2098 *(uint64_t *)buf = btp->bt_redzone; 2099 cp->cache_destructor(buf, cp->cache_private); 2100 *(uint64_t *)buf = KMEM_FREE_PATTERN; 2101 } 2102 } else if (cp->cache_destructor != NULL) { 2103 cp->cache_destructor(buf, cp->cache_private); 2104 } 2105 2106 kmem_slab_free(cp, buf); 2107 } 2108 ASSERT(KMEM_MAGAZINE_VALID(cp, mp)); 2109 kmem_cache_free(cp->cache_magtype->mt_cache, mp); 2110 } 2111 2112 /* 2113 * Allocate a magazine from the depot. 2114 */ 2115 static kmem_magazine_t * 2116 kmem_depot_alloc(kmem_cache_t *cp, kmem_maglist_t *mlp) 2117 { 2118 kmem_magazine_t *mp; 2119 2120 /* 2121 * If we can't get the depot lock without contention, 2122 * update our contention count. We use the depot 2123 * contention rate to determine whether we need to 2124 * increase the magazine size for better scalability. 2125 */ 2126 if (!mutex_tryenter(&cp->cache_depot_lock)) { 2127 mutex_enter(&cp->cache_depot_lock); 2128 cp->cache_depot_contention++; 2129 } 2130 2131 if ((mp = mlp->ml_list) != NULL) { 2132 ASSERT(KMEM_MAGAZINE_VALID(cp, mp)); 2133 mlp->ml_list = mp->mag_next; 2134 if (--mlp->ml_total < mlp->ml_min) 2135 mlp->ml_min = mlp->ml_total; 2136 mlp->ml_alloc++; 2137 } 2138 2139 mutex_exit(&cp->cache_depot_lock); 2140 2141 return (mp); 2142 } 2143 2144 /* 2145 * Free a magazine to the depot. 2146 */ 2147 static void 2148 kmem_depot_free(kmem_cache_t *cp, kmem_maglist_t *mlp, kmem_magazine_t *mp) 2149 { 2150 mutex_enter(&cp->cache_depot_lock); 2151 ASSERT(KMEM_MAGAZINE_VALID(cp, mp)); 2152 mp->mag_next = mlp->ml_list; 2153 mlp->ml_list = mp; 2154 mlp->ml_total++; 2155 mutex_exit(&cp->cache_depot_lock); 2156 } 2157 2158 /* 2159 * Update the working set statistics for cp's depot. 2160 */ 2161 static void 2162 kmem_depot_ws_update(kmem_cache_t *cp) 2163 { 2164 mutex_enter(&cp->cache_depot_lock); 2165 cp->cache_full.ml_reaplimit = cp->cache_full.ml_min; 2166 cp->cache_full.ml_min = cp->cache_full.ml_total; 2167 cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_min; 2168 cp->cache_empty.ml_min = cp->cache_empty.ml_total; 2169 mutex_exit(&cp->cache_depot_lock); 2170 } 2171 2172 /* 2173 * Set the working set statistics for cp's depot to zero. (Everything is 2174 * eligible for reaping.) 2175 */ 2176 static void 2177 kmem_depot_ws_zero(kmem_cache_t *cp) 2178 { 2179 mutex_enter(&cp->cache_depot_lock); 2180 cp->cache_full.ml_reaplimit = cp->cache_full.ml_total; 2181 cp->cache_full.ml_min = cp->cache_full.ml_total; 2182 cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_total; 2183 cp->cache_empty.ml_min = cp->cache_empty.ml_total; 2184 mutex_exit(&cp->cache_depot_lock); 2185 } 2186 2187 /* 2188 * Reap all magazines that have fallen out of the depot's working set. 2189 */ 2190 static void 2191 kmem_depot_ws_reap(kmem_cache_t *cp) 2192 { 2193 long reap; 2194 kmem_magazine_t *mp; 2195 2196 ASSERT(!list_link_active(&cp->cache_link) || 2197 taskq_member(kmem_taskq, curthread)); 2198 2199 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min); 2200 while (reap-- && (mp = kmem_depot_alloc(cp, &cp->cache_full)) != NULL) 2201 kmem_magazine_destroy(cp, mp, cp->cache_magtype->mt_magsize); 2202 2203 reap = MIN(cp->cache_empty.ml_reaplimit, cp->cache_empty.ml_min); 2204 while (reap-- && (mp = kmem_depot_alloc(cp, &cp->cache_empty)) != NULL) 2205 kmem_magazine_destroy(cp, mp, 0); 2206 } 2207 2208 static void 2209 kmem_cpu_reload(kmem_cpu_cache_t *ccp, kmem_magazine_t *mp, int rounds) 2210 { 2211 ASSERT((ccp->cc_loaded == NULL && ccp->cc_rounds == -1) || 2212 (ccp->cc_loaded && ccp->cc_rounds + rounds == ccp->cc_magsize)); 2213 ASSERT(ccp->cc_magsize > 0); 2214 2215 ccp->cc_ploaded = ccp->cc_loaded; 2216 ccp->cc_prounds = ccp->cc_rounds; 2217 ccp->cc_loaded = mp; 2218 ccp->cc_rounds = rounds; 2219 } 2220 2221 /* 2222 * Intercept kmem alloc/free calls during crash dump in order to avoid 2223 * changing kmem state while memory is being saved to the dump device. 2224 * Otherwise, ::kmem_verify will report "corrupt buffers". Note that 2225 * there are no locks because only one CPU calls kmem during a crash 2226 * dump. To enable this feature, first create the associated vmem 2227 * arena with VMC_DUMPSAFE. 2228 */ 2229 static void *kmem_dump_start; /* start of pre-reserved heap */ 2230 static void *kmem_dump_end; /* end of heap area */ 2231 static void *kmem_dump_curr; /* current free heap pointer */ 2232 static size_t kmem_dump_size; /* size of heap area */ 2233 2234 /* append to each buf created in the pre-reserved heap */ 2235 typedef struct kmem_dumpctl { 2236 void *kdc_next; /* cache dump free list linkage */ 2237 } kmem_dumpctl_t; 2238 2239 #define KMEM_DUMPCTL(cp, buf) \ 2240 ((kmem_dumpctl_t *)P2ROUNDUP((uintptr_t)(buf) + (cp)->cache_bufsize, \ 2241 sizeof (void *))) 2242 2243 /* set non zero for full report */ 2244 uint_t kmem_dump_verbose = 0; 2245 2246 /* stats for overize heap */ 2247 uint_t kmem_dump_oversize_allocs = 0; 2248 uint_t kmem_dump_oversize_max = 0; 2249 2250 static void 2251 kmem_dumppr(char **pp, char *e, const char *format, ...) 2252 { 2253 char *p = *pp; 2254 2255 if (p < e) { 2256 int n; 2257 va_list ap; 2258 2259 va_start(ap, format); 2260 n = vsnprintf(p, e - p, format, ap); 2261 va_end(ap); 2262 *pp = p + n; 2263 } 2264 } 2265 2266 /* 2267 * Called when dumpadm(1M) configures dump parameters. 2268 */ 2269 void 2270 kmem_dump_init(size_t size) 2271 { 2272 /* Our caller ensures size is always set. */ 2273 ASSERT3U(size, >, 0); 2274 2275 if (kmem_dump_start != NULL) 2276 kmem_free(kmem_dump_start, kmem_dump_size); 2277 2278 kmem_dump_start = kmem_alloc(size, KM_SLEEP); 2279 kmem_dump_size = size; 2280 kmem_dump_curr = kmem_dump_start; 2281 kmem_dump_end = (void *)((char *)kmem_dump_start + size); 2282 copy_pattern(KMEM_UNINITIALIZED_PATTERN, kmem_dump_start, size); 2283 } 2284 2285 /* 2286 * Set flag for each kmem_cache_t if is safe to use alternate dump 2287 * memory. Called just before panic crash dump starts. Set the flag 2288 * for the calling CPU. 2289 */ 2290 void 2291 kmem_dump_begin(void) 2292 { 2293 kmem_cache_t *cp; 2294 2295 ASSERT(panicstr != NULL); 2296 2297 for (cp = list_head(&kmem_caches); cp != NULL; 2298 cp = list_next(&kmem_caches, cp)) { 2299 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp); 2300 2301 if (cp->cache_arena->vm_cflags & VMC_DUMPSAFE) { 2302 cp->cache_flags |= KMF_DUMPDIVERT; 2303 ccp->cc_flags |= KMF_DUMPDIVERT; 2304 ccp->cc_dump_rounds = ccp->cc_rounds; 2305 ccp->cc_dump_prounds = ccp->cc_prounds; 2306 ccp->cc_rounds = ccp->cc_prounds = -1; 2307 } else { 2308 cp->cache_flags |= KMF_DUMPUNSAFE; 2309 ccp->cc_flags |= KMF_DUMPUNSAFE; 2310 } 2311 } 2312 } 2313 2314 /* 2315 * finished dump intercept 2316 * print any warnings on the console 2317 * return verbose information to dumpsys() in the given buffer 2318 */ 2319 size_t 2320 kmem_dump_finish(char *buf, size_t size) 2321 { 2322 int percent = 0; 2323 size_t used; 2324 char *e = buf + size; 2325 char *p = buf; 2326 2327 if (kmem_dump_curr == kmem_dump_end) { 2328 cmn_err(CE_WARN, "exceeded kmem_dump space of %lu " 2329 "bytes: kmem state in dump may be inconsistent", 2330 kmem_dump_size); 2331 } 2332 2333 if (kmem_dump_verbose == 0) 2334 return (0); 2335 2336 used = (char *)kmem_dump_curr - (char *)kmem_dump_start; 2337 percent = (used * 100) / kmem_dump_size; 2338 2339 kmem_dumppr(&p, e, "%% heap used,%d\n", percent); 2340 kmem_dumppr(&p, e, "used bytes,%ld\n", used); 2341 kmem_dumppr(&p, e, "heap size,%ld\n", kmem_dump_size); 2342 kmem_dumppr(&p, e, "Oversize allocs,%d\n", 2343 kmem_dump_oversize_allocs); 2344 kmem_dumppr(&p, e, "Oversize max size,%ld\n", 2345 kmem_dump_oversize_max); 2346 2347 /* return buffer size used */ 2348 if (p < e) 2349 bzero(p, e - p); 2350 return (p - buf); 2351 } 2352 2353 /* 2354 * Allocate a constructed object from alternate dump memory. 2355 */ 2356 void * 2357 kmem_cache_alloc_dump(kmem_cache_t *cp, int kmflag) 2358 { 2359 void *buf; 2360 void *curr; 2361 char *bufend; 2362 2363 /* return a constructed object */ 2364 if ((buf = cp->cache_dump.kd_freelist) != NULL) { 2365 cp->cache_dump.kd_freelist = KMEM_DUMPCTL(cp, buf)->kdc_next; 2366 return (buf); 2367 } 2368 2369 /* create a new constructed object */ 2370 curr = kmem_dump_curr; 2371 buf = (void *)P2ROUNDUP((uintptr_t)curr, cp->cache_align); 2372 bufend = (char *)KMEM_DUMPCTL(cp, buf) + sizeof (kmem_dumpctl_t); 2373 2374 /* hat layer objects cannot cross a page boundary */ 2375 if (cp->cache_align < PAGESIZE) { 2376 char *page = (char *)P2ROUNDUP((uintptr_t)buf, PAGESIZE); 2377 if (bufend > page) { 2378 bufend += page - (char *)buf; 2379 buf = (void *)page; 2380 } 2381 } 2382 2383 /* fall back to normal alloc if reserved area is used up */ 2384 if (bufend > (char *)kmem_dump_end) { 2385 kmem_dump_curr = kmem_dump_end; 2386 cp->cache_dump.kd_alloc_fails++; 2387 return (NULL); 2388 } 2389 2390 /* 2391 * Must advance curr pointer before calling a constructor that 2392 * may also allocate memory. 2393 */ 2394 kmem_dump_curr = bufend; 2395 2396 /* run constructor */ 2397 if (cp->cache_constructor != NULL && 2398 cp->cache_constructor(buf, cp->cache_private, kmflag) 2399 != 0) { 2400 #ifdef DEBUG 2401 printf("name='%s' cache=0x%p: kmem cache constructor failed\n", 2402 cp->cache_name, (void *)cp); 2403 #endif 2404 /* reset curr pointer iff no allocs were done */ 2405 if (kmem_dump_curr == bufend) 2406 kmem_dump_curr = curr; 2407 2408 cp->cache_dump.kd_alloc_fails++; 2409 /* fall back to normal alloc if the constructor fails */ 2410 return (NULL); 2411 } 2412 2413 return (buf); 2414 } 2415 2416 /* 2417 * Free a constructed object in alternate dump memory. 2418 */ 2419 int 2420 kmem_cache_free_dump(kmem_cache_t *cp, void *buf) 2421 { 2422 /* save constructed buffers for next time */ 2423 if ((char *)buf >= (char *)kmem_dump_start && 2424 (char *)buf < (char *)kmem_dump_end) { 2425 KMEM_DUMPCTL(cp, buf)->kdc_next = cp->cache_dump.kd_freelist; 2426 cp->cache_dump.kd_freelist = buf; 2427 return (0); 2428 } 2429 2430 /* just drop buffers that were allocated before dump started */ 2431 if (kmem_dump_curr < kmem_dump_end) 2432 return (0); 2433 2434 /* fall back to normal free if reserved area is used up */ 2435 return (1); 2436 } 2437 2438 /* 2439 * Allocate a constructed object from cache cp. 2440 */ 2441 void * 2442 kmem_cache_alloc(kmem_cache_t *cp, int kmflag) 2443 { 2444 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp); 2445 kmem_magazine_t *fmp; 2446 void *buf; 2447 2448 mutex_enter(&ccp->cc_lock); 2449 for (;;) { 2450 /* 2451 * If there's an object available in the current CPU's 2452 * loaded magazine, just take it and return. 2453 */ 2454 if (ccp->cc_rounds > 0) { 2455 buf = ccp->cc_loaded->mag_round[--ccp->cc_rounds]; 2456 ccp->cc_alloc++; 2457 mutex_exit(&ccp->cc_lock); 2458 if (ccp->cc_flags & (KMF_BUFTAG | KMF_DUMPUNSAFE)) { 2459 if (ccp->cc_flags & KMF_DUMPUNSAFE) { 2460 ASSERT(!(ccp->cc_flags & 2461 KMF_DUMPDIVERT)); 2462 cp->cache_dump.kd_unsafe++; 2463 } 2464 if ((ccp->cc_flags & KMF_BUFTAG) && 2465 kmem_cache_alloc_debug(cp, buf, kmflag, 0, 2466 caller()) != 0) { 2467 if (kmflag & KM_NOSLEEP) 2468 return (NULL); 2469 mutex_enter(&ccp->cc_lock); 2470 continue; 2471 } 2472 } 2473 return (buf); 2474 } 2475 2476 /* 2477 * The loaded magazine is empty. If the previously loaded 2478 * magazine was full, exchange them and try again. 2479 */ 2480 if (ccp->cc_prounds > 0) { 2481 kmem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds); 2482 continue; 2483 } 2484 2485 /* 2486 * Return an alternate buffer at dump time to preserve 2487 * the heap. 2488 */ 2489 if (ccp->cc_flags & (KMF_DUMPDIVERT | KMF_DUMPUNSAFE)) { 2490 if (ccp->cc_flags & KMF_DUMPUNSAFE) { 2491 ASSERT(!(ccp->cc_flags & KMF_DUMPDIVERT)); 2492 /* log it so that we can warn about it */ 2493 cp->cache_dump.kd_unsafe++; 2494 } else { 2495 if ((buf = kmem_cache_alloc_dump(cp, kmflag)) != 2496 NULL) { 2497 mutex_exit(&ccp->cc_lock); 2498 return (buf); 2499 } 2500 break; /* fall back to slab layer */ 2501 } 2502 } 2503 2504 /* 2505 * If the magazine layer is disabled, break out now. 2506 */ 2507 if (ccp->cc_magsize == 0) 2508 break; 2509 2510 /* 2511 * Try to get a full magazine from the depot. 2512 */ 2513 fmp = kmem_depot_alloc(cp, &cp->cache_full); 2514 if (fmp != NULL) { 2515 if (ccp->cc_ploaded != NULL) 2516 kmem_depot_free(cp, &cp->cache_empty, 2517 ccp->cc_ploaded); 2518 kmem_cpu_reload(ccp, fmp, ccp->cc_magsize); 2519 continue; 2520 } 2521 2522 /* 2523 * There are no full magazines in the depot, 2524 * so fall through to the slab layer. 2525 */ 2526 break; 2527 } 2528 mutex_exit(&ccp->cc_lock); 2529 2530 /* 2531 * We couldn't allocate a constructed object from the magazine layer, 2532 * so get a raw buffer from the slab layer and apply its constructor. 2533 */ 2534 buf = kmem_slab_alloc(cp, kmflag); 2535 2536 if (buf == NULL) 2537 return (NULL); 2538 2539 if (cp->cache_flags & KMF_BUFTAG) { 2540 /* 2541 * Make kmem_cache_alloc_debug() apply the constructor for us. 2542 */ 2543 int rc = kmem_cache_alloc_debug(cp, buf, kmflag, 1, caller()); 2544 if (rc != 0) { 2545 if (kmflag & KM_NOSLEEP) 2546 return (NULL); 2547 /* 2548 * kmem_cache_alloc_debug() detected corruption 2549 * but didn't panic (kmem_panic <= 0). We should not be 2550 * here because the constructor failed (indicated by a 2551 * return code of 1). Try again. 2552 */ 2553 ASSERT(rc == -1); 2554 return (kmem_cache_alloc(cp, kmflag)); 2555 } 2556 return (buf); 2557 } 2558 2559 if (cp->cache_constructor != NULL && 2560 cp->cache_constructor(buf, cp->cache_private, kmflag) != 0) { 2561 atomic_inc_64(&cp->cache_alloc_fail); 2562 kmem_slab_free(cp, buf); 2563 return (NULL); 2564 } 2565 2566 return (buf); 2567 } 2568 2569 /* 2570 * The freed argument tells whether or not kmem_cache_free_debug() has already 2571 * been called so that we can avoid the duplicate free error. For example, a 2572 * buffer on a magazine has already been freed by the client but is still 2573 * constructed. 2574 */ 2575 static void 2576 kmem_slab_free_constructed(kmem_cache_t *cp, void *buf, boolean_t freed) 2577 { 2578 if (!freed && (cp->cache_flags & KMF_BUFTAG)) 2579 if (kmem_cache_free_debug(cp, buf, caller()) == -1) 2580 return; 2581 2582 /* 2583 * Note that if KMF_DEADBEEF is in effect and KMF_LITE is not, 2584 * kmem_cache_free_debug() will have already applied the destructor. 2585 */ 2586 if ((cp->cache_flags & (KMF_DEADBEEF | KMF_LITE)) != KMF_DEADBEEF && 2587 cp->cache_destructor != NULL) { 2588 if (cp->cache_flags & KMF_DEADBEEF) { /* KMF_LITE implied */ 2589 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf); 2590 *(uint64_t *)buf = btp->bt_redzone; 2591 cp->cache_destructor(buf, cp->cache_private); 2592 *(uint64_t *)buf = KMEM_FREE_PATTERN; 2593 } else { 2594 cp->cache_destructor(buf, cp->cache_private); 2595 } 2596 } 2597 2598 kmem_slab_free(cp, buf); 2599 } 2600 2601 /* 2602 * Used when there's no room to free a buffer to the per-CPU cache. 2603 * Drops and re-acquires &ccp->cc_lock, and returns non-zero if the 2604 * caller should try freeing to the per-CPU cache again. 2605 * Note that we don't directly install the magazine in the cpu cache, 2606 * since its state may have changed wildly while the lock was dropped. 2607 */ 2608 static int 2609 kmem_cpucache_magazine_alloc(kmem_cpu_cache_t *ccp, kmem_cache_t *cp) 2610 { 2611 kmem_magazine_t *emp; 2612 kmem_magtype_t *mtp; 2613 2614 ASSERT(MUTEX_HELD(&ccp->cc_lock)); 2615 ASSERT(((uint_t)ccp->cc_rounds == ccp->cc_magsize || 2616 ((uint_t)ccp->cc_rounds == -1)) && 2617 ((uint_t)ccp->cc_prounds == ccp->cc_magsize || 2618 ((uint_t)ccp->cc_prounds == -1))); 2619 2620 emp = kmem_depot_alloc(cp, &cp->cache_empty); 2621 if (emp != NULL) { 2622 if (ccp->cc_ploaded != NULL) 2623 kmem_depot_free(cp, &cp->cache_full, 2624 ccp->cc_ploaded); 2625 kmem_cpu_reload(ccp, emp, 0); 2626 return (1); 2627 } 2628 /* 2629 * There are no empty magazines in the depot, 2630 * so try to allocate a new one. We must drop all locks 2631 * across kmem_cache_alloc() because lower layers may 2632 * attempt to allocate from this cache. 2633 */ 2634 mtp = cp->cache_magtype; 2635 mutex_exit(&ccp->cc_lock); 2636 emp = kmem_cache_alloc(mtp->mt_cache, KM_NOSLEEP); 2637 mutex_enter(&ccp->cc_lock); 2638 2639 if (emp != NULL) { 2640 /* 2641 * We successfully allocated an empty magazine. 2642 * However, we had to drop ccp->cc_lock to do it, 2643 * so the cache's magazine size may have changed. 2644 * If so, free the magazine and try again. 2645 */ 2646 if (ccp->cc_magsize != mtp->mt_magsize) { 2647 mutex_exit(&ccp->cc_lock); 2648 kmem_cache_free(mtp->mt_cache, emp); 2649 mutex_enter(&ccp->cc_lock); 2650 return (1); 2651 } 2652 2653 /* 2654 * We got a magazine of the right size. Add it to 2655 * the depot and try the whole dance again. 2656 */ 2657 kmem_depot_free(cp, &cp->cache_empty, emp); 2658 return (1); 2659 } 2660 2661 /* 2662 * We couldn't allocate an empty magazine, 2663 * so fall through to the slab layer. 2664 */ 2665 return (0); 2666 } 2667 2668 /* 2669 * Free a constructed object to cache cp. 2670 */ 2671 void 2672 kmem_cache_free(kmem_cache_t *cp, void *buf) 2673 { 2674 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp); 2675 2676 /* 2677 * The client must not free either of the buffers passed to the move 2678 * callback function. 2679 */ 2680 ASSERT(cp->cache_defrag == NULL || 2681 cp->cache_defrag->kmd_thread != curthread || 2682 (buf != cp->cache_defrag->kmd_from_buf && 2683 buf != cp->cache_defrag->kmd_to_buf)); 2684 2685 if (ccp->cc_flags & (KMF_BUFTAG | KMF_DUMPDIVERT | KMF_DUMPUNSAFE)) { 2686 if (ccp->cc_flags & KMF_DUMPUNSAFE) { 2687 ASSERT(!(ccp->cc_flags & KMF_DUMPDIVERT)); 2688 /* log it so that we can warn about it */ 2689 cp->cache_dump.kd_unsafe++; 2690 } else if (KMEM_DUMPCC(ccp) && !kmem_cache_free_dump(cp, buf)) { 2691 return; 2692 } 2693 if (ccp->cc_flags & KMF_BUFTAG) { 2694 if (kmem_cache_free_debug(cp, buf, caller()) == -1) 2695 return; 2696 } 2697 } 2698 2699 mutex_enter(&ccp->cc_lock); 2700 /* 2701 * Any changes to this logic should be reflected in kmem_slab_prefill() 2702 */ 2703 for (;;) { 2704 /* 2705 * If there's a slot available in the current CPU's 2706 * loaded magazine, just put the object there and return. 2707 */ 2708 if ((uint_t)ccp->cc_rounds < ccp->cc_magsize) { 2709 ccp->cc_loaded->mag_round[ccp->cc_rounds++] = buf; 2710 ccp->cc_free++; 2711 mutex_exit(&ccp->cc_lock); 2712 return; 2713 } 2714 2715 /* 2716 * The loaded magazine is full. If the previously loaded 2717 * magazine was empty, exchange them and try again. 2718 */ 2719 if (ccp->cc_prounds == 0) { 2720 kmem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds); 2721 continue; 2722 } 2723 2724 /* 2725 * If the magazine layer is disabled, break out now. 2726 */ 2727 if (ccp->cc_magsize == 0) 2728 break; 2729 2730 if (!kmem_cpucache_magazine_alloc(ccp, cp)) { 2731 /* 2732 * We couldn't free our constructed object to the 2733 * magazine layer, so apply its destructor and free it 2734 * to the slab layer. 2735 */ 2736 break; 2737 } 2738 } 2739 mutex_exit(&ccp->cc_lock); 2740 kmem_slab_free_constructed(cp, buf, B_TRUE); 2741 } 2742 2743 static void 2744 kmem_slab_prefill(kmem_cache_t *cp, kmem_slab_t *sp) 2745 { 2746 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp); 2747 int cache_flags = cp->cache_flags; 2748 2749 kmem_bufctl_t *next, *head; 2750 size_t nbufs; 2751 2752 /* 2753 * Completely allocate the newly created slab and put the pre-allocated 2754 * buffers in magazines. Any of the buffers that cannot be put in 2755 * magazines must be returned to the slab. 2756 */ 2757 ASSERT(MUTEX_HELD(&cp->cache_lock)); 2758 ASSERT((cache_flags & (KMF_PREFILL|KMF_BUFTAG)) == KMF_PREFILL); 2759 ASSERT(cp->cache_constructor == NULL); 2760 ASSERT(sp->slab_cache == cp); 2761 ASSERT(sp->slab_refcnt == 1); 2762 ASSERT(sp->slab_head != NULL && sp->slab_chunks > sp->slab_refcnt); 2763 ASSERT(avl_find(&cp->cache_partial_slabs, sp, NULL) == NULL); 2764 2765 head = sp->slab_head; 2766 nbufs = (sp->slab_chunks - sp->slab_refcnt); 2767 sp->slab_head = NULL; 2768 sp->slab_refcnt += nbufs; 2769 cp->cache_bufslab -= nbufs; 2770 cp->cache_slab_alloc += nbufs; 2771 list_insert_head(&cp->cache_complete_slabs, sp); 2772 cp->cache_complete_slab_count++; 2773 mutex_exit(&cp->cache_lock); 2774 mutex_enter(&ccp->cc_lock); 2775 2776 while (head != NULL) { 2777 void *buf = KMEM_BUF(cp, head); 2778 /* 2779 * If there's a slot available in the current CPU's 2780 * loaded magazine, just put the object there and 2781 * continue. 2782 */ 2783 if ((uint_t)ccp->cc_rounds < ccp->cc_magsize) { 2784 ccp->cc_loaded->mag_round[ccp->cc_rounds++] = 2785 buf; 2786 ccp->cc_free++; 2787 nbufs--; 2788 head = head->bc_next; 2789 continue; 2790 } 2791 2792 /* 2793 * The loaded magazine is full. If the previously 2794 * loaded magazine was empty, exchange them and try 2795 * again. 2796 */ 2797 if (ccp->cc_prounds == 0) { 2798 kmem_cpu_reload(ccp, ccp->cc_ploaded, 2799 ccp->cc_prounds); 2800 continue; 2801 } 2802 2803 /* 2804 * If the magazine layer is disabled, break out now. 2805 */ 2806 2807 if (ccp->cc_magsize == 0) { 2808 break; 2809 } 2810 2811 if (!kmem_cpucache_magazine_alloc(ccp, cp)) 2812 break; 2813 } 2814 mutex_exit(&ccp->cc_lock); 2815 if (nbufs != 0) { 2816 ASSERT(head != NULL); 2817 2818 /* 2819 * If there was a failure, return remaining objects to 2820 * the slab 2821 */ 2822 while (head != NULL) { 2823 ASSERT(nbufs != 0); 2824 next = head->bc_next; 2825 head->bc_next = NULL; 2826 kmem_slab_free(cp, KMEM_BUF(cp, head)); 2827 head = next; 2828 nbufs--; 2829 } 2830 } 2831 ASSERT(head == NULL); 2832 ASSERT(nbufs == 0); 2833 mutex_enter(&cp->cache_lock); 2834 } 2835 2836 void * 2837 kmem_zalloc(size_t size, int kmflag) 2838 { 2839 size_t index; 2840 void *buf; 2841 2842 if ((index = ((size - 1) >> KMEM_ALIGN_SHIFT)) < KMEM_ALLOC_TABLE_MAX) { 2843 kmem_cache_t *cp = kmem_alloc_table[index]; 2844 buf = kmem_cache_alloc(cp, kmflag); 2845 if (buf != NULL) { 2846 if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp)) { 2847 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf); 2848 ((uint8_t *)buf)[size] = KMEM_REDZONE_BYTE; 2849 ((uint32_t *)btp)[1] = KMEM_SIZE_ENCODE(size); 2850 2851 if (cp->cache_flags & KMF_LITE) { 2852 KMEM_BUFTAG_LITE_ENTER(btp, 2853 kmem_lite_count, caller()); 2854 } 2855 } 2856 bzero(buf, size); 2857 } 2858 } else { 2859 buf = kmem_alloc(size, kmflag); 2860 if (buf != NULL) 2861 bzero(buf, size); 2862 } 2863 return (buf); 2864 } 2865 2866 void * 2867 kmem_alloc(size_t size, int kmflag) 2868 { 2869 size_t index; 2870 kmem_cache_t *cp; 2871 void *buf; 2872 2873 if ((index = ((size - 1) >> KMEM_ALIGN_SHIFT)) < KMEM_ALLOC_TABLE_MAX) { 2874 cp = kmem_alloc_table[index]; 2875 /* fall through to kmem_cache_alloc() */ 2876 2877 } else if ((index = ((size - 1) >> KMEM_BIG_SHIFT)) < 2878 kmem_big_alloc_table_max) { 2879 cp = kmem_big_alloc_table[index]; 2880 /* fall through to kmem_cache_alloc() */ 2881 2882 } else { 2883 if (size == 0) 2884 return (NULL); 2885 2886 buf = vmem_alloc(kmem_oversize_arena, size, 2887 kmflag & KM_VMFLAGS); 2888 if (buf == NULL) 2889 kmem_log_event(kmem_failure_log, NULL, NULL, 2890 (void *)size); 2891 else if (KMEM_DUMP(kmem_slab_cache)) { 2892 /* stats for dump intercept */ 2893 kmem_dump_oversize_allocs++; 2894 if (size > kmem_dump_oversize_max) 2895 kmem_dump_oversize_max = size; 2896 } 2897 return (buf); 2898 } 2899 2900 buf = kmem_cache_alloc(cp, kmflag); 2901 if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp) && buf != NULL) { 2902 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf); 2903 ((uint8_t *)buf)[size] = KMEM_REDZONE_BYTE; 2904 ((uint32_t *)btp)[1] = KMEM_SIZE_ENCODE(size); 2905 2906 if (cp->cache_flags & KMF_LITE) { 2907 KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller()); 2908 } 2909 } 2910 return (buf); 2911 } 2912 2913 void 2914 kmem_free(void *buf, size_t size) 2915 { 2916 size_t index; 2917 kmem_cache_t *cp; 2918 2919 if ((index = (size - 1) >> KMEM_ALIGN_SHIFT) < KMEM_ALLOC_TABLE_MAX) { 2920 cp = kmem_alloc_table[index]; 2921 /* fall through to kmem_cache_free() */ 2922 2923 } else if ((index = ((size - 1) >> KMEM_BIG_SHIFT)) < 2924 kmem_big_alloc_table_max) { 2925 cp = kmem_big_alloc_table[index]; 2926 /* fall through to kmem_cache_free() */ 2927 2928 } else { 2929 EQUIV(buf == NULL, size == 0); 2930 if (buf == NULL && size == 0) 2931 return; 2932 vmem_free(kmem_oversize_arena, buf, size); 2933 return; 2934 } 2935 2936 if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp)) { 2937 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf); 2938 uint32_t *ip = (uint32_t *)btp; 2939 if (ip[1] != KMEM_SIZE_ENCODE(size)) { 2940 if (*(uint64_t *)buf == KMEM_FREE_PATTERN) { 2941 kmem_error(KMERR_DUPFREE, cp, buf); 2942 return; 2943 } 2944 if (KMEM_SIZE_VALID(ip[1])) { 2945 ip[0] = KMEM_SIZE_ENCODE(size); 2946 kmem_error(KMERR_BADSIZE, cp, buf); 2947 } else { 2948 kmem_error(KMERR_REDZONE, cp, buf); 2949 } 2950 return; 2951 } 2952 if (((uint8_t *)buf)[size] != KMEM_REDZONE_BYTE) { 2953 kmem_error(KMERR_REDZONE, cp, buf); 2954 return; 2955 } 2956 btp->bt_redzone = KMEM_REDZONE_PATTERN; 2957 if (cp->cache_flags & KMF_LITE) { 2958 KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, 2959 caller()); 2960 } 2961 } 2962 kmem_cache_free(cp, buf); 2963 } 2964 2965 void * 2966 kmem_firewall_va_alloc(vmem_t *vmp, size_t size, int vmflag) 2967 { 2968 size_t realsize = size + vmp->vm_quantum; 2969 void *addr; 2970 2971 /* 2972 * Annoying edge case: if 'size' is just shy of ULONG_MAX, adding 2973 * vm_quantum will cause integer wraparound. Check for this, and 2974 * blow off the firewall page in this case. Note that such a 2975 * giant allocation (the entire kernel address space) can never 2976 * be satisfied, so it will either fail immediately (VM_NOSLEEP) 2977 * or sleep forever (VM_SLEEP). Thus, there is no need for a 2978 * corresponding check in kmem_firewall_va_free(). 2979 */ 2980 if (realsize < size) 2981 realsize = size; 2982 2983 /* 2984 * While boot still owns resource management, make sure that this 2985 * redzone virtual address allocation is properly accounted for in 2986 * OBPs "virtual-memory" "available" lists because we're 2987 * effectively claiming them for a red zone. If we don't do this, 2988 * the available lists become too fragmented and too large for the 2989 * current boot/kernel memory list interface. 2990 */ 2991 addr = vmem_alloc(vmp, realsize, vmflag | VM_NEXTFIT); 2992 2993 if (addr != NULL && kvseg.s_base == NULL && realsize != size) 2994 (void) boot_virt_alloc((char *)addr + size, vmp->vm_quantum); 2995 2996 return (addr); 2997 } 2998 2999 void 3000 kmem_firewall_va_free(vmem_t *vmp, void *addr, size_t size) 3001 { 3002 ASSERT((kvseg.s_base == NULL ? 3003 va_to_pfn((char *)addr + size) : 3004 hat_getpfnum(kas.a_hat, (caddr_t)addr + size)) == PFN_INVALID); 3005 3006 vmem_free(vmp, addr, size + vmp->vm_quantum); 3007 } 3008 3009 /* 3010 * Try to allocate at least `size' bytes of memory without sleeping or 3011 * panicking. Return actual allocated size in `asize'. If allocation failed, 3012 * try final allocation with sleep or panic allowed. 3013 */ 3014 void * 3015 kmem_alloc_tryhard(size_t size, size_t *asize, int kmflag) 3016 { 3017 void *p; 3018 3019 *asize = P2ROUNDUP(size, KMEM_ALIGN); 3020 do { 3021 p = kmem_alloc(*asize, (kmflag | KM_NOSLEEP) & ~KM_PANIC); 3022 if (p != NULL) 3023 return (p); 3024 *asize += KMEM_ALIGN; 3025 } while (*asize <= PAGESIZE); 3026 3027 *asize = P2ROUNDUP(size, KMEM_ALIGN); 3028 return (kmem_alloc(*asize, kmflag)); 3029 } 3030 3031 /* 3032 * Reclaim all unused memory from a cache. 3033 */ 3034 static void 3035 kmem_cache_reap(kmem_cache_t *cp) 3036 { 3037 ASSERT(taskq_member(kmem_taskq, curthread)); 3038 cp->cache_reap++; 3039 3040 /* 3041 * Ask the cache's owner to free some memory if possible. 3042 * The idea is to handle things like the inode cache, which 3043 * typically sits on a bunch of memory that it doesn't truly 3044 * *need*. Reclaim policy is entirely up to the owner; this 3045 * callback is just an advisory plea for help. 3046 */ 3047 if (cp->cache_reclaim != NULL) { 3048 long delta; 3049 3050 /* 3051 * Reclaimed memory should be reapable (not included in the 3052 * depot's working set). 3053 */ 3054 delta = cp->cache_full.ml_total; 3055 cp->cache_reclaim(cp->cache_private); 3056 delta = cp->cache_full.ml_total - delta; 3057 if (delta > 0) { 3058 mutex_enter(&cp->cache_depot_lock); 3059 cp->cache_full.ml_reaplimit += delta; 3060 cp->cache_full.ml_min += delta; 3061 mutex_exit(&cp->cache_depot_lock); 3062 } 3063 } 3064 3065 kmem_depot_ws_reap(cp); 3066 3067 if (cp->cache_defrag != NULL && !kmem_move_noreap) { 3068 kmem_cache_defrag(cp); 3069 } 3070 } 3071 3072 static void 3073 kmem_reap_timeout(void *flag_arg) 3074 { 3075 uint32_t *flag = (uint32_t *)flag_arg; 3076 3077 ASSERT(flag == &kmem_reaping || flag == &kmem_reaping_idspace); 3078 *flag = 0; 3079 } 3080 3081 static void 3082 kmem_reap_done(void *flag) 3083 { 3084 if (!callout_init_done) { 3085 /* can't schedule a timeout at this point */ 3086 kmem_reap_timeout(flag); 3087 } else { 3088 (void) timeout(kmem_reap_timeout, flag, kmem_reap_interval); 3089 } 3090 } 3091 3092 static void 3093 kmem_reap_start(void *flag) 3094 { 3095 ASSERT(flag == &kmem_reaping || flag == &kmem_reaping_idspace); 3096 3097 if (flag == &kmem_reaping) { 3098 kmem_cache_applyall(kmem_cache_reap, kmem_taskq, TQ_NOSLEEP); 3099 /* 3100 * if we have segkp under heap, reap segkp cache. 3101 */ 3102 if (segkp_fromheap) 3103 segkp_cache_free(); 3104 } 3105 else 3106 kmem_cache_applyall_id(kmem_cache_reap, kmem_taskq, TQ_NOSLEEP); 3107 3108 /* 3109 * We use taskq_dispatch() to schedule a timeout to clear 3110 * the flag so that kmem_reap() becomes self-throttling: 3111 * we won't reap again until the current reap completes *and* 3112 * at least kmem_reap_interval ticks have elapsed. 3113 */ 3114 if (!taskq_dispatch(kmem_taskq, kmem_reap_done, flag, TQ_NOSLEEP)) 3115 kmem_reap_done(flag); 3116 } 3117 3118 static void 3119 kmem_reap_common(void *flag_arg) 3120 { 3121 uint32_t *flag = (uint32_t *)flag_arg; 3122 3123 if (MUTEX_HELD(&kmem_cache_lock) || kmem_taskq == NULL || 3124 atomic_cas_32(flag, 0, 1) != 0) 3125 return; 3126 3127 /* 3128 * It may not be kosher to do memory allocation when a reap is called 3129 * (for example, if vmem_populate() is in the call chain). So we 3130 * start the reap going with a TQ_NOALLOC dispatch. If the dispatch 3131 * fails, we reset the flag, and the next reap will try again. 3132 */ 3133 if (!taskq_dispatch(kmem_taskq, kmem_reap_start, flag, TQ_NOALLOC)) 3134 *flag = 0; 3135 } 3136 3137 /* 3138 * Reclaim all unused memory from all caches. Called from the VM system 3139 * when memory gets tight. 3140 */ 3141 void 3142 kmem_reap(void) 3143 { 3144 kmem_reap_common(&kmem_reaping); 3145 } 3146 3147 /* 3148 * Reclaim all unused memory from identifier arenas, called when a vmem 3149 * arena not back by memory is exhausted. Since reaping memory-backed caches 3150 * cannot help with identifier exhaustion, we avoid both a large amount of 3151 * work and unwanted side-effects from reclaim callbacks. 3152 */ 3153 void 3154 kmem_reap_idspace(void) 3155 { 3156 kmem_reap_common(&kmem_reaping_idspace); 3157 } 3158 3159 /* 3160 * Purge all magazines from a cache and set its magazine limit to zero. 3161 * All calls are serialized by the kmem_taskq lock, except for the final 3162 * call from kmem_cache_destroy(). 3163 */ 3164 static void 3165 kmem_cache_magazine_purge(kmem_cache_t *cp) 3166 { 3167 kmem_cpu_cache_t *ccp; 3168 kmem_magazine_t *mp, *pmp; 3169 int rounds, prounds, cpu_seqid; 3170 3171 ASSERT(!list_link_active(&cp->cache_link) || 3172 taskq_member(kmem_taskq, curthread)); 3173 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock)); 3174 3175 for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) { 3176 ccp = &cp->cache_cpu[cpu_seqid]; 3177 3178 mutex_enter(&ccp->cc_lock); 3179 mp = ccp->cc_loaded; 3180 pmp = ccp->cc_ploaded; 3181 rounds = ccp->cc_rounds; 3182 prounds = ccp->cc_prounds; 3183 ccp->cc_loaded = NULL; 3184 ccp->cc_ploaded = NULL; 3185 ccp->cc_rounds = -1; 3186 ccp->cc_prounds = -1; 3187 ccp->cc_magsize = 0; 3188 mutex_exit(&ccp->cc_lock); 3189 3190 if (mp) 3191 kmem_magazine_destroy(cp, mp, rounds); 3192 if (pmp) 3193 kmem_magazine_destroy(cp, pmp, prounds); 3194 } 3195 3196 kmem_depot_ws_zero(cp); 3197 kmem_depot_ws_reap(cp); 3198 } 3199 3200 /* 3201 * Enable per-cpu magazines on a cache. 3202 */ 3203 static void 3204 kmem_cache_magazine_enable(kmem_cache_t *cp) 3205 { 3206 int cpu_seqid; 3207 3208 if (cp->cache_flags & KMF_NOMAGAZINE) 3209 return; 3210 3211 for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) { 3212 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid]; 3213 mutex_enter(&ccp->cc_lock); 3214 ccp->cc_magsize = cp->cache_magtype->mt_magsize; 3215 mutex_exit(&ccp->cc_lock); 3216 } 3217 3218 } 3219 3220 /* 3221 * Reap (almost) everything right now. 3222 */ 3223 void 3224 kmem_cache_reap_now(kmem_cache_t *cp) 3225 { 3226 ASSERT(list_link_active(&cp->cache_link)); 3227 3228 kmem_depot_ws_zero(cp); 3229 3230 (void) taskq_dispatch(kmem_taskq, 3231 (task_func_t *)kmem_depot_ws_reap, cp, TQ_SLEEP); 3232 taskq_wait(kmem_taskq); 3233 } 3234 3235 /* 3236 * Recompute a cache's magazine size. The trade-off is that larger magazines 3237 * provide a higher transfer rate with the depot, while smaller magazines 3238 * reduce memory consumption. Magazine resizing is an expensive operation; 3239 * it should not be done frequently. 3240 * 3241 * Changes to the magazine size are serialized by the kmem_taskq lock. 3242 * 3243 * Note: at present this only grows the magazine size. It might be useful 3244 * to allow shrinkage too. 3245 */ 3246 static void 3247 kmem_cache_magazine_resize(kmem_cache_t *cp) 3248 { 3249 kmem_magtype_t *mtp = cp->cache_magtype; 3250 3251 ASSERT(taskq_member(kmem_taskq, curthread)); 3252 3253 if (cp->cache_chunksize < mtp->mt_maxbuf) { 3254 kmem_cache_magazine_purge(cp); 3255 mutex_enter(&cp->cache_depot_lock); 3256 cp->cache_magtype = ++mtp; 3257 cp->cache_depot_contention_prev = 3258 cp->cache_depot_contention + INT_MAX; 3259 mutex_exit(&cp->cache_depot_lock); 3260 kmem_cache_magazine_enable(cp); 3261 } 3262 } 3263 3264 /* 3265 * Rescale a cache's hash table, so that the table size is roughly the 3266 * cache size. We want the average lookup time to be extremely small. 3267 */ 3268 static void 3269 kmem_hash_rescale(kmem_cache_t *cp) 3270 { 3271 kmem_bufctl_t **old_table, **new_table, *bcp; 3272 size_t old_size, new_size, h; 3273 3274 ASSERT(taskq_member(kmem_taskq, curthread)); 3275 3276 new_size = MAX(KMEM_HASH_INITIAL, 3277 1 << (highbit(3 * cp->cache_buftotal + 4) - 2)); 3278 old_size = cp->cache_hash_mask + 1; 3279 3280 if ((old_size >> 1) <= new_size && new_size <= (old_size << 1)) 3281 return; 3282 3283 new_table = vmem_alloc(kmem_hash_arena, new_size * sizeof (void *), 3284 VM_NOSLEEP); 3285 if (new_table == NULL) 3286 return; 3287 bzero(new_table, new_size * sizeof (void *)); 3288 3289 mutex_enter(&cp->cache_lock); 3290 3291 old_size = cp->cache_hash_mask + 1; 3292 old_table = cp->cache_hash_table; 3293 3294 cp->cache_hash_mask = new_size - 1; 3295 cp->cache_hash_table = new_table; 3296 cp->cache_rescale++; 3297 3298 for (h = 0; h < old_size; h++) { 3299 bcp = old_table[h]; 3300 while (bcp != NULL) { 3301 void *addr = bcp->bc_addr; 3302 kmem_bufctl_t *next_bcp = bcp->bc_next; 3303 kmem_bufctl_t **hash_bucket = KMEM_HASH(cp, addr); 3304 bcp->bc_next = *hash_bucket; 3305 *hash_bucket = bcp; 3306 bcp = next_bcp; 3307 } 3308 } 3309 3310 mutex_exit(&cp->cache_lock); 3311 3312 vmem_free(kmem_hash_arena, old_table, old_size * sizeof (void *)); 3313 } 3314 3315 /* 3316 * Perform periodic maintenance on a cache: hash rescaling, depot working-set 3317 * update, magazine resizing, and slab consolidation. 3318 */ 3319 static void 3320 kmem_cache_update(kmem_cache_t *cp) 3321 { 3322 int need_hash_rescale = 0; 3323 int need_magazine_resize = 0; 3324 3325 ASSERT(MUTEX_HELD(&kmem_cache_lock)); 3326 3327 /* 3328 * If the cache has become much larger or smaller than its hash table, 3329 * fire off a request to rescale the hash table. 3330 */ 3331 mutex_enter(&cp->cache_lock); 3332 3333 if ((cp->cache_flags & KMF_HASH) && 3334 (cp->cache_buftotal > (cp->cache_hash_mask << 1) || 3335 (cp->cache_buftotal < (cp->cache_hash_mask >> 1) && 3336 cp->cache_hash_mask > KMEM_HASH_INITIAL))) 3337 need_hash_rescale = 1; 3338 3339 mutex_exit(&cp->cache_lock); 3340 3341 /* 3342 * Update the depot working set statistics. 3343 */ 3344 kmem_depot_ws_update(cp); 3345 3346 /* 3347 * If there's a lot of contention in the depot, 3348 * increase the magazine size. 3349 */ 3350 mutex_enter(&cp->cache_depot_lock); 3351 3352 if (cp->cache_chunksize < cp->cache_magtype->mt_maxbuf && 3353 (int)(cp->cache_depot_contention - 3354 cp->cache_depot_contention_prev) > kmem_depot_contention) 3355 need_magazine_resize = 1; 3356 3357 cp->cache_depot_contention_prev = cp->cache_depot_contention; 3358 3359 mutex_exit(&cp->cache_depot_lock); 3360 3361 if (need_hash_rescale) 3362 (void) taskq_dispatch(kmem_taskq, 3363 (task_func_t *)kmem_hash_rescale, cp, TQ_NOSLEEP); 3364 3365 if (need_magazine_resize) 3366 (void) taskq_dispatch(kmem_taskq, 3367 (task_func_t *)kmem_cache_magazine_resize, cp, TQ_NOSLEEP); 3368 3369 if (cp->cache_defrag != NULL) 3370 (void) taskq_dispatch(kmem_taskq, 3371 (task_func_t *)kmem_cache_scan, cp, TQ_NOSLEEP); 3372 } 3373 3374 static void kmem_update(void *); 3375 3376 static void 3377 kmem_update_timeout(void *dummy) 3378 { 3379 (void) timeout(kmem_update, dummy, kmem_reap_interval); 3380 } 3381 3382 static void 3383 kmem_update(void *dummy) 3384 { 3385 kmem_cache_applyall(kmem_cache_update, NULL, TQ_NOSLEEP); 3386 3387 /* 3388 * We use taskq_dispatch() to reschedule the timeout so that 3389 * kmem_update() becomes self-throttling: it won't schedule 3390 * new tasks until all previous tasks have completed. 3391 */ 3392 if (!taskq_dispatch(kmem_taskq, kmem_update_timeout, dummy, TQ_NOSLEEP)) 3393 kmem_update_timeout(NULL); 3394 } 3395 3396 static int 3397 kmem_cache_kstat_update(kstat_t *ksp, int rw) 3398 { 3399 struct kmem_cache_kstat *kmcp = &kmem_cache_kstat; 3400 kmem_cache_t *cp = ksp->ks_private; 3401 uint64_t cpu_buf_avail; 3402 uint64_t buf_avail = 0; 3403 int cpu_seqid; 3404 long reap; 3405 3406 ASSERT(MUTEX_HELD(&kmem_cache_kstat_lock)); 3407 3408 if (rw == KSTAT_WRITE) 3409 return (EACCES); 3410 3411 mutex_enter(&cp->cache_lock); 3412 3413 kmcp->kmc_alloc_fail.value.ui64 = cp->cache_alloc_fail; 3414 kmcp->kmc_alloc.value.ui64 = cp->cache_slab_alloc; 3415 kmcp->kmc_free.value.ui64 = cp->cache_slab_free; 3416 kmcp->kmc_slab_alloc.value.ui64 = cp->cache_slab_alloc; 3417 kmcp->kmc_slab_free.value.ui64 = cp->cache_slab_free; 3418 3419 for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) { 3420 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid]; 3421 3422 mutex_enter(&ccp->cc_lock); 3423 3424 cpu_buf_avail = 0; 3425 if (ccp->cc_rounds > 0) 3426 cpu_buf_avail += ccp->cc_rounds; 3427 if (ccp->cc_prounds > 0) 3428 cpu_buf_avail += ccp->cc_prounds; 3429 3430 kmcp->kmc_alloc.value.ui64 += ccp->cc_alloc; 3431 kmcp->kmc_free.value.ui64 += ccp->cc_free; 3432 buf_avail += cpu_buf_avail; 3433 3434 mutex_exit(&ccp->cc_lock); 3435 } 3436 3437 mutex_enter(&cp->cache_depot_lock); 3438 3439 kmcp->kmc_depot_alloc.value.ui64 = cp->cache_full.ml_alloc; 3440 kmcp->kmc_depot_free.value.ui64 = cp->cache_empty.ml_alloc; 3441 kmcp->kmc_depot_contention.value.ui64 = cp->cache_depot_contention; 3442 kmcp->kmc_full_magazines.value.ui64 = cp->cache_full.ml_total; 3443 kmcp->kmc_empty_magazines.value.ui64 = cp->cache_empty.ml_total; 3444 kmcp->kmc_magazine_size.value.ui64 = 3445 (cp->cache_flags & KMF_NOMAGAZINE) ? 3446 0 : cp->cache_magtype->mt_magsize; 3447 3448 kmcp->kmc_alloc.value.ui64 += cp->cache_full.ml_alloc; 3449 kmcp->kmc_free.value.ui64 += cp->cache_empty.ml_alloc; 3450 buf_avail += cp->cache_full.ml_total * cp->cache_magtype->mt_magsize; 3451 3452 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min); 3453 reap = MIN(reap, cp->cache_full.ml_total); 3454 3455 mutex_exit(&cp->cache_depot_lock); 3456 3457 kmcp->kmc_buf_size.value.ui64 = cp->cache_bufsize; 3458 kmcp->kmc_align.value.ui64 = cp->cache_align; 3459 kmcp->kmc_chunk_size.value.ui64 = cp->cache_chunksize; 3460 kmcp->kmc_slab_size.value.ui64 = cp->cache_slabsize; 3461 kmcp->kmc_buf_constructed.value.ui64 = buf_avail; 3462 buf_avail += cp->cache_bufslab; 3463 kmcp->kmc_buf_avail.value.ui64 = buf_avail; 3464 kmcp->kmc_buf_inuse.value.ui64 = cp->cache_buftotal - buf_avail; 3465 kmcp->kmc_buf_total.value.ui64 = cp->cache_buftotal; 3466 kmcp->kmc_buf_max.value.ui64 = cp->cache_bufmax; 3467 kmcp->kmc_slab_create.value.ui64 = cp->cache_slab_create; 3468 kmcp->kmc_slab_destroy.value.ui64 = cp->cache_slab_destroy; 3469 kmcp->kmc_hash_size.value.ui64 = (cp->cache_flags & KMF_HASH) ? 3470 cp->cache_hash_mask + 1 : 0; 3471 kmcp->kmc_hash_lookup_depth.value.ui64 = cp->cache_lookup_depth; 3472 kmcp->kmc_hash_rescale.value.ui64 = cp->cache_rescale; 3473 kmcp->kmc_vmem_source.value.ui64 = cp->cache_arena->vm_id; 3474 kmcp->kmc_reap.value.ui64 = cp->cache_reap; 3475 3476 if (cp->cache_defrag == NULL) { 3477 kmcp->kmc_move_callbacks.value.ui64 = 0; 3478 kmcp->kmc_move_yes.value.ui64 = 0; 3479 kmcp->kmc_move_no.value.ui64 = 0; 3480 kmcp->kmc_move_later.value.ui64 = 0; 3481 kmcp->kmc_move_dont_need.value.ui64 = 0; 3482 kmcp->kmc_move_dont_know.value.ui64 = 0; 3483 kmcp->kmc_move_hunt_found.value.ui64 = 0; 3484 kmcp->kmc_move_slabs_freed.value.ui64 = 0; 3485 kmcp->kmc_defrag.value.ui64 = 0; 3486 kmcp->kmc_scan.value.ui64 = 0; 3487 kmcp->kmc_move_reclaimable.value.ui64 = 0; 3488 } else { 3489 int64_t reclaimable; 3490 3491 kmem_defrag_t *kd = cp->cache_defrag; 3492 kmcp->kmc_move_callbacks.value.ui64 = kd->kmd_callbacks; 3493 kmcp->kmc_move_yes.value.ui64 = kd->kmd_yes; 3494 kmcp->kmc_move_no.value.ui64 = kd->kmd_no; 3495 kmcp->kmc_move_later.value.ui64 = kd->kmd_later; 3496 kmcp->kmc_move_dont_need.value.ui64 = kd->kmd_dont_need; 3497 kmcp->kmc_move_dont_know.value.ui64 = kd->kmd_dont_know; 3498 kmcp->kmc_move_hunt_found.value.ui64 = kd->kmd_hunt_found; 3499 kmcp->kmc_move_slabs_freed.value.ui64 = kd->kmd_slabs_freed; 3500 kmcp->kmc_defrag.value.ui64 = kd->kmd_defrags; 3501 kmcp->kmc_scan.value.ui64 = kd->kmd_scans; 3502 3503 reclaimable = cp->cache_bufslab - (cp->cache_maxchunks - 1); 3504 reclaimable = MAX(reclaimable, 0); 3505 reclaimable += ((uint64_t)reap * cp->cache_magtype->mt_magsize); 3506 kmcp->kmc_move_reclaimable.value.ui64 = reclaimable; 3507 } 3508 3509 mutex_exit(&cp->cache_lock); 3510 return (0); 3511 } 3512 3513 /* 3514 * Return a named statistic about a particular cache. 3515 * This shouldn't be called very often, so it's currently designed for 3516 * simplicity (leverages existing kstat support) rather than efficiency. 3517 */ 3518 uint64_t 3519 kmem_cache_stat(kmem_cache_t *cp, char *name) 3520 { 3521 int i; 3522 kstat_t *ksp = cp->cache_kstat; 3523 kstat_named_t *knp = (kstat_named_t *)&kmem_cache_kstat; 3524 uint64_t value = 0; 3525 3526 if (ksp != NULL) { 3527 mutex_enter(&kmem_cache_kstat_lock); 3528 (void) kmem_cache_kstat_update(ksp, KSTAT_READ); 3529 for (i = 0; i < ksp->ks_ndata; i++) { 3530 if (strcmp(knp[i].name, name) == 0) { 3531 value = knp[i].value.ui64; 3532 break; 3533 } 3534 } 3535 mutex_exit(&kmem_cache_kstat_lock); 3536 } 3537 return (value); 3538 } 3539 3540 /* 3541 * Return an estimate of currently available kernel heap memory. 3542 * On 32-bit systems, physical memory may exceed virtual memory, 3543 * we just truncate the result at 1GB. 3544 */ 3545 size_t 3546 kmem_avail(void) 3547 { 3548 spgcnt_t rmem = availrmem - tune.t_minarmem; 3549 spgcnt_t fmem = freemem - minfree; 3550 3551 return ((size_t)ptob(MIN(MAX(MIN(rmem, fmem), 0), 3552 1 << (30 - PAGESHIFT)))); 3553 } 3554 3555 /* 3556 * Return the maximum amount of memory that is (in theory) allocatable 3557 * from the heap. This may be used as an estimate only since there 3558 * is no guarentee this space will still be available when an allocation 3559 * request is made, nor that the space may be allocated in one big request 3560 * due to kernel heap fragmentation. 3561 */ 3562 size_t 3563 kmem_maxavail(void) 3564 { 3565 spgcnt_t pmem = availrmem - tune.t_minarmem; 3566 spgcnt_t vmem = btop(vmem_size(heap_arena, VMEM_FREE)); 3567 3568 return ((size_t)ptob(MAX(MIN(pmem, vmem), 0))); 3569 } 3570 3571 /* 3572 * Indicate whether memory-intensive kmem debugging is enabled. 3573 */ 3574 int 3575 kmem_debugging(void) 3576 { 3577 return (kmem_flags & (KMF_AUDIT | KMF_REDZONE)); 3578 } 3579 3580 /* binning function, sorts finely at the two extremes */ 3581 #define KMEM_PARTIAL_SLAB_WEIGHT(sp, binshift) \ 3582 ((((sp)->slab_refcnt <= (binshift)) || \ 3583 (((sp)->slab_chunks - (sp)->slab_refcnt) <= (binshift))) \ 3584 ? -(sp)->slab_refcnt \ 3585 : -((binshift) + ((sp)->slab_refcnt >> (binshift)))) 3586 3587 /* 3588 * Minimizing the number of partial slabs on the freelist minimizes 3589 * fragmentation (the ratio of unused buffers held by the slab layer). There are 3590 * two ways to get a slab off of the freelist: 1) free all the buffers on the 3591 * slab, and 2) allocate all the buffers on the slab. It follows that we want 3592 * the most-used slabs at the front of the list where they have the best chance 3593 * of being completely allocated, and the least-used slabs at a safe distance 3594 * from the front to improve the odds that the few remaining buffers will all be 3595 * freed before another allocation can tie up the slab. For that reason a slab 3596 * with a higher slab_refcnt sorts less than than a slab with a lower 3597 * slab_refcnt. 3598 * 3599 * However, if a slab has at least one buffer that is deemed unfreeable, we 3600 * would rather have that slab at the front of the list regardless of 3601 * slab_refcnt, since even one unfreeable buffer makes the entire slab 3602 * unfreeable. If the client returns KMEM_CBRC_NO in response to a cache_move() 3603 * callback, the slab is marked unfreeable for as long as it remains on the 3604 * freelist. 3605 */ 3606 static int 3607 kmem_partial_slab_cmp(const void *p0, const void *p1) 3608 { 3609 const kmem_cache_t *cp; 3610 const kmem_slab_t *s0 = p0; 3611 const kmem_slab_t *s1 = p1; 3612 int w0, w1; 3613 size_t binshift; 3614 3615 ASSERT(KMEM_SLAB_IS_PARTIAL(s0)); 3616 ASSERT(KMEM_SLAB_IS_PARTIAL(s1)); 3617 ASSERT(s0->slab_cache == s1->slab_cache); 3618 cp = s1->slab_cache; 3619 ASSERT(MUTEX_HELD(&cp->cache_lock)); 3620 binshift = cp->cache_partial_binshift; 3621 3622 /* weight of first slab */ 3623 w0 = KMEM_PARTIAL_SLAB_WEIGHT(s0, binshift); 3624 if (s0->slab_flags & KMEM_SLAB_NOMOVE) { 3625 w0 -= cp->cache_maxchunks; 3626 } 3627 3628 /* weight of second slab */ 3629 w1 = KMEM_PARTIAL_SLAB_WEIGHT(s1, binshift); 3630 if (s1->slab_flags & KMEM_SLAB_NOMOVE) { 3631 w1 -= cp->cache_maxchunks; 3632 } 3633 3634 if (w0 < w1) 3635 return (-1); 3636 if (w0 > w1) 3637 return (1); 3638 3639 /* compare pointer values */ 3640 if ((uintptr_t)s0 < (uintptr_t)s1) 3641 return (-1); 3642 if ((uintptr_t)s0 > (uintptr_t)s1) 3643 return (1); 3644 3645 return (0); 3646 } 3647 3648 /* 3649 * It must be valid to call the destructor (if any) on a newly created object. 3650 * That is, the constructor (if any) must leave the object in a valid state for 3651 * the destructor. 3652 */ 3653 kmem_cache_t * 3654 kmem_cache_create( 3655 char *name, /* descriptive name for this cache */ 3656 size_t bufsize, /* size of the objects it manages */ 3657 size_t align, /* required object alignment */ 3658 int (*constructor)(void *, void *, int), /* object constructor */ 3659 void (*destructor)(void *, void *), /* object destructor */ 3660 void (*reclaim)(void *), /* memory reclaim callback */ 3661 void *private, /* pass-thru arg for constr/destr/reclaim */ 3662 vmem_t *vmp, /* vmem source for slab allocation */ 3663 int cflags) /* cache creation flags */ 3664 { 3665 int cpu_seqid; 3666 size_t chunksize; 3667 kmem_cache_t *cp; 3668 kmem_magtype_t *mtp; 3669 size_t csize = KMEM_CACHE_SIZE(max_ncpus); 3670 3671 #ifdef DEBUG 3672 /* 3673 * Cache names should conform to the rules for valid C identifiers 3674 */ 3675 if (!strident_valid(name)) { 3676 cmn_err(CE_CONT, 3677 "kmem_cache_create: '%s' is an invalid cache name\n" 3678 "cache names must conform to the rules for " 3679 "C identifiers\n", name); 3680 } 3681 #endif /* DEBUG */ 3682 3683 if (vmp == NULL) 3684 vmp = kmem_default_arena; 3685 3686 /* 3687 * If this kmem cache has an identifier vmem arena as its source, mark 3688 * it such to allow kmem_reap_idspace(). 3689 */ 3690 ASSERT(!(cflags & KMC_IDENTIFIER)); /* consumer should not set this */ 3691 if (vmp->vm_cflags & VMC_IDENTIFIER) 3692 cflags |= KMC_IDENTIFIER; 3693 3694 /* 3695 * Get a kmem_cache structure. We arrange that cp->cache_cpu[] 3696 * is aligned on a KMEM_CPU_CACHE_SIZE boundary to prevent 3697 * false sharing of per-CPU data. 3698 */ 3699 cp = vmem_xalloc(kmem_cache_arena, csize, KMEM_CPU_CACHE_SIZE, 3700 P2NPHASE(csize, KMEM_CPU_CACHE_SIZE), 0, NULL, NULL, VM_SLEEP); 3701 bzero(cp, csize); 3702 list_link_init(&cp->cache_link); 3703 3704 if (align == 0) 3705 align = KMEM_ALIGN; 3706 3707 /* 3708 * If we're not at least KMEM_ALIGN aligned, we can't use free 3709 * memory to hold bufctl information (because we can't safely 3710 * perform word loads and stores on it). 3711 */ 3712 if (align < KMEM_ALIGN) 3713 cflags |= KMC_NOTOUCH; 3714 3715 if (!ISP2(align) || align > vmp->vm_quantum) 3716 panic("kmem_cache_create: bad alignment %lu", align); 3717 3718 mutex_enter(&kmem_flags_lock); 3719 if (kmem_flags & KMF_RANDOMIZE) 3720 kmem_flags = (((kmem_flags | ~KMF_RANDOM) + 1) & KMF_RANDOM) | 3721 KMF_RANDOMIZE; 3722 cp->cache_flags = (kmem_flags | cflags) & KMF_DEBUG; 3723 mutex_exit(&kmem_flags_lock); 3724 3725 /* 3726 * Make sure all the various flags are reasonable. 3727 */ 3728 ASSERT(!(cflags & KMC_NOHASH) || !(cflags & KMC_NOTOUCH)); 3729 3730 if (cp->cache_flags & KMF_LITE) { 3731 if (bufsize >= kmem_lite_minsize && 3732 align <= kmem_lite_maxalign && 3733 P2PHASE(bufsize, kmem_lite_maxalign) != 0) { 3734 cp->cache_flags |= KMF_BUFTAG; 3735 cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL); 3736 } else { 3737 cp->cache_flags &= ~KMF_DEBUG; 3738 } 3739 } 3740 3741 if (cp->cache_flags & KMF_DEADBEEF) 3742 cp->cache_flags |= KMF_REDZONE; 3743 3744 if ((cflags & KMC_QCACHE) && (cp->cache_flags & KMF_AUDIT)) 3745 cp->cache_flags |= KMF_NOMAGAZINE; 3746 3747 if (cflags & KMC_NODEBUG) 3748 cp->cache_flags &= ~KMF_DEBUG; 3749 3750 if (cflags & KMC_NOTOUCH) 3751 cp->cache_flags &= ~KMF_TOUCH; 3752 3753 if (cflags & KMC_PREFILL) 3754 cp->cache_flags |= KMF_PREFILL; 3755 3756 if (cflags & KMC_NOHASH) 3757 cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL); 3758 3759 if (cflags & KMC_NOMAGAZINE) 3760 cp->cache_flags |= KMF_NOMAGAZINE; 3761 3762 if ((cp->cache_flags & KMF_AUDIT) && !(cflags & KMC_NOTOUCH)) 3763 cp->cache_flags |= KMF_REDZONE; 3764 3765 if (!(cp->cache_flags & KMF_AUDIT)) 3766 cp->cache_flags &= ~KMF_CONTENTS; 3767 3768 if ((cp->cache_flags & KMF_BUFTAG) && bufsize >= kmem_minfirewall && 3769 !(cp->cache_flags & KMF_LITE) && !(cflags & KMC_NOHASH)) 3770 cp->cache_flags |= KMF_FIREWALL; 3771 3772 if (vmp != kmem_default_arena || kmem_firewall_arena == NULL) 3773 cp->cache_flags &= ~KMF_FIREWALL; 3774 3775 if (cp->cache_flags & KMF_FIREWALL) { 3776 cp->cache_flags &= ~KMF_BUFTAG; 3777 cp->cache_flags |= KMF_NOMAGAZINE; 3778 ASSERT(vmp == kmem_default_arena); 3779 vmp = kmem_firewall_arena; 3780 } 3781 3782 /* 3783 * Set cache properties. 3784 */ 3785 (void) strncpy(cp->cache_name, name, KMEM_CACHE_NAMELEN); 3786 strident_canon(cp->cache_name, KMEM_CACHE_NAMELEN + 1); 3787 cp->cache_bufsize = bufsize; 3788 cp->cache_align = align; 3789 cp->cache_constructor = constructor; 3790 cp->cache_destructor = destructor; 3791 cp->cache_reclaim = reclaim; 3792 cp->cache_private = private; 3793 cp->cache_arena = vmp; 3794 cp->cache_cflags = cflags; 3795 3796 /* 3797 * Determine the chunk size. 3798 */ 3799 chunksize = bufsize; 3800 3801 if (align >= KMEM_ALIGN) { 3802 chunksize = P2ROUNDUP(chunksize, KMEM_ALIGN); 3803 cp->cache_bufctl = chunksize - KMEM_ALIGN; 3804 } 3805 3806 if (cp->cache_flags & KMF_BUFTAG) { 3807 cp->cache_bufctl = chunksize; 3808 cp->cache_buftag = chunksize; 3809 if (cp->cache_flags & KMF_LITE) 3810 chunksize += KMEM_BUFTAG_LITE_SIZE(kmem_lite_count); 3811 else 3812 chunksize += sizeof (kmem_buftag_t); 3813 } 3814 3815 if (cp->cache_flags & KMF_DEADBEEF) { 3816 cp->cache_verify = MIN(cp->cache_buftag, kmem_maxverify); 3817 if (cp->cache_flags & KMF_LITE) 3818 cp->cache_verify = sizeof (uint64_t); 3819 } 3820 3821 cp->cache_contents = MIN(cp->cache_bufctl, kmem_content_maxsave); 3822 3823 cp->cache_chunksize = chunksize = P2ROUNDUP(chunksize, align); 3824 3825 /* 3826 * Now that we know the chunk size, determine the optimal slab size. 3827 */ 3828 if (vmp == kmem_firewall_arena) { 3829 cp->cache_slabsize = P2ROUNDUP(chunksize, vmp->vm_quantum); 3830 cp->cache_mincolor = cp->cache_slabsize - chunksize; 3831 cp->cache_maxcolor = cp->cache_mincolor; 3832 cp->cache_flags |= KMF_HASH; 3833 ASSERT(!(cp->cache_flags & KMF_BUFTAG)); 3834 } else if ((cflags & KMC_NOHASH) || (!(cflags & KMC_NOTOUCH) && 3835 !(cp->cache_flags & KMF_AUDIT) && 3836 chunksize < vmp->vm_quantum / KMEM_VOID_FRACTION)) { 3837 cp->cache_slabsize = vmp->vm_quantum; 3838 cp->cache_mincolor = 0; 3839 cp->cache_maxcolor = 3840 (cp->cache_slabsize - sizeof (kmem_slab_t)) % chunksize; 3841 ASSERT(chunksize + sizeof (kmem_slab_t) <= cp->cache_slabsize); 3842 ASSERT(!(cp->cache_flags & KMF_AUDIT)); 3843 } else { 3844 size_t chunks, bestfit, waste, slabsize; 3845 size_t minwaste = LONG_MAX; 3846 3847 for (chunks = 1; chunks <= KMEM_VOID_FRACTION; chunks++) { 3848 slabsize = P2ROUNDUP(chunksize * chunks, 3849 vmp->vm_quantum); 3850 chunks = slabsize / chunksize; 3851 waste = (slabsize % chunksize) / chunks; 3852 if (waste < minwaste) { 3853 minwaste = waste; 3854 bestfit = slabsize; 3855 } 3856 } 3857 if (cflags & KMC_QCACHE) 3858 bestfit = VMEM_QCACHE_SLABSIZE(vmp->vm_qcache_max); 3859 cp->cache_slabsize = bestfit; 3860 cp->cache_mincolor = 0; 3861 cp->cache_maxcolor = bestfit % chunksize; 3862 cp->cache_flags |= KMF_HASH; 3863 } 3864 3865 cp->cache_maxchunks = (cp->cache_slabsize / cp->cache_chunksize); 3866 cp->cache_partial_binshift = highbit(cp->cache_maxchunks / 16) + 1; 3867 3868 /* 3869 * Disallowing prefill when either the DEBUG or HASH flag is set or when 3870 * there is a constructor avoids some tricky issues with debug setup 3871 * that may be revisited later. We cannot allow prefill in a 3872 * metadata cache because of potential recursion. 3873 */ 3874 if (vmp == kmem_msb_arena || 3875 cp->cache_flags & (KMF_HASH | KMF_BUFTAG) || 3876 cp->cache_constructor != NULL) 3877 cp->cache_flags &= ~KMF_PREFILL; 3878 3879 if (cp->cache_flags & KMF_HASH) { 3880 ASSERT(!(cflags & KMC_NOHASH)); 3881 cp->cache_bufctl_cache = (cp->cache_flags & KMF_AUDIT) ? 3882 kmem_bufctl_audit_cache : kmem_bufctl_cache; 3883 } 3884 3885 if (cp->cache_maxcolor >= vmp->vm_quantum) 3886 cp->cache_maxcolor = vmp->vm_quantum - 1; 3887 3888 cp->cache_color = cp->cache_mincolor; 3889 3890 /* 3891 * Initialize the rest of the slab layer. 3892 */ 3893 mutex_init(&cp->cache_lock, NULL, MUTEX_DEFAULT, NULL); 3894 3895 avl_create(&cp->cache_partial_slabs, kmem_partial_slab_cmp, 3896 sizeof (kmem_slab_t), offsetof(kmem_slab_t, slab_link)); 3897 /* LINTED: E_TRUE_LOGICAL_EXPR */ 3898 ASSERT(sizeof (list_node_t) <= sizeof (avl_node_t)); 3899 /* reuse partial slab AVL linkage for complete slab list linkage */ 3900 list_create(&cp->cache_complete_slabs, 3901 sizeof (kmem_slab_t), offsetof(kmem_slab_t, slab_link)); 3902 3903 if (cp->cache_flags & KMF_HASH) { 3904 cp->cache_hash_table = vmem_alloc(kmem_hash_arena, 3905 KMEM_HASH_INITIAL * sizeof (void *), VM_SLEEP); 3906 bzero(cp->cache_hash_table, 3907 KMEM_HASH_INITIAL * sizeof (void *)); 3908 cp->cache_hash_mask = KMEM_HASH_INITIAL - 1; 3909 cp->cache_hash_shift = highbit((ulong_t)chunksize) - 1; 3910 } 3911 3912 /* 3913 * Initialize the depot. 3914 */ 3915 mutex_init(&cp->cache_depot_lock, NULL, MUTEX_DEFAULT, NULL); 3916 3917 for (mtp = kmem_magtype; chunksize <= mtp->mt_minbuf; mtp++) 3918 continue; 3919 3920 cp->cache_magtype = mtp; 3921 3922 /* 3923 * Initialize the CPU layer. 3924 */ 3925 for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) { 3926 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid]; 3927 mutex_init(&ccp->cc_lock, NULL, MUTEX_DEFAULT, NULL); 3928 ccp->cc_flags = cp->cache_flags; 3929 ccp->cc_rounds = -1; 3930 ccp->cc_prounds = -1; 3931 } 3932 3933 /* 3934 * Create the cache's kstats. 3935 */ 3936 if ((cp->cache_kstat = kstat_create("unix", 0, cp->cache_name, 3937 "kmem_cache", KSTAT_TYPE_NAMED, 3938 sizeof (kmem_cache_kstat) / sizeof (kstat_named_t), 3939 KSTAT_FLAG_VIRTUAL)) != NULL) { 3940 cp->cache_kstat->ks_data = &kmem_cache_kstat; 3941 cp->cache_kstat->ks_update = kmem_cache_kstat_update; 3942 cp->cache_kstat->ks_private = cp; 3943 cp->cache_kstat->ks_lock = &kmem_cache_kstat_lock; 3944 kstat_install(cp->cache_kstat); 3945 } 3946 3947 /* 3948 * Add the cache to the global list. This makes it visible 3949 * to kmem_update(), so the cache must be ready for business. 3950 */ 3951 mutex_enter(&kmem_cache_lock); 3952 list_insert_tail(&kmem_caches, cp); 3953 mutex_exit(&kmem_cache_lock); 3954 3955 if (kmem_ready) 3956 kmem_cache_magazine_enable(cp); 3957 3958 return (cp); 3959 } 3960 3961 static int 3962 kmem_move_cmp(const void *buf, const void *p) 3963 { 3964 const kmem_move_t *kmm = p; 3965 uintptr_t v1 = (uintptr_t)buf; 3966 uintptr_t v2 = (uintptr_t)kmm->kmm_from_buf; 3967 return (v1 < v2 ? -1 : (v1 > v2 ? 1 : 0)); 3968 } 3969 3970 static void 3971 kmem_reset_reclaim_threshold(kmem_defrag_t *kmd) 3972 { 3973 kmd->kmd_reclaim_numer = 1; 3974 } 3975 3976 /* 3977 * Initially, when choosing candidate slabs for buffers to move, we want to be 3978 * very selective and take only slabs that are less than 3979 * (1 / KMEM_VOID_FRACTION) allocated. If we have difficulty finding candidate 3980 * slabs, then we raise the allocation ceiling incrementally. The reclaim 3981 * threshold is reset to (1 / KMEM_VOID_FRACTION) as soon as the cache is no 3982 * longer fragmented. 3983 */ 3984 static void 3985 kmem_adjust_reclaim_threshold(kmem_defrag_t *kmd, int direction) 3986 { 3987 if (direction > 0) { 3988 /* make it easier to find a candidate slab */ 3989 if (kmd->kmd_reclaim_numer < (KMEM_VOID_FRACTION - 1)) { 3990 kmd->kmd_reclaim_numer++; 3991 } 3992 } else { 3993 /* be more selective */ 3994 if (kmd->kmd_reclaim_numer > 1) { 3995 kmd->kmd_reclaim_numer--; 3996 } 3997 } 3998 } 3999 4000 void 4001 kmem_cache_set_move(kmem_cache_t *cp, 4002 kmem_cbrc_t (*move)(void *, void *, size_t, void *)) 4003 { 4004 kmem_defrag_t *defrag; 4005 4006 ASSERT(move != NULL); 4007 /* 4008 * The consolidator does not support NOTOUCH caches because kmem cannot 4009 * initialize their slabs with the 0xbaddcafe memory pattern, which sets 4010 * a low order bit usable by clients to distinguish uninitialized memory 4011 * from known objects (see kmem_slab_create). 4012 */ 4013 ASSERT(!(cp->cache_cflags & KMC_NOTOUCH)); 4014 ASSERT(!(cp->cache_cflags & KMC_IDENTIFIER)); 4015 4016 /* 4017 * We should not be holding anyone's cache lock when calling 4018 * kmem_cache_alloc(), so allocate in all cases before acquiring the 4019 * lock. 4020 */ 4021 defrag = kmem_cache_alloc(kmem_defrag_cache, KM_SLEEP); 4022 4023 mutex_enter(&cp->cache_lock); 4024 4025 if (KMEM_IS_MOVABLE(cp)) { 4026 if (cp->cache_move == NULL) { 4027 ASSERT(cp->cache_slab_alloc == 0); 4028 4029 cp->cache_defrag = defrag; 4030 defrag = NULL; /* nothing to free */ 4031 bzero(cp->cache_defrag, sizeof (kmem_defrag_t)); 4032 avl_create(&cp->cache_defrag->kmd_moves_pending, 4033 kmem_move_cmp, sizeof (kmem_move_t), 4034 offsetof(kmem_move_t, kmm_entry)); 4035 /* LINTED: E_TRUE_LOGICAL_EXPR */ 4036 ASSERT(sizeof (list_node_t) <= sizeof (avl_node_t)); 4037 /* reuse the slab's AVL linkage for deadlist linkage */ 4038 list_create(&cp->cache_defrag->kmd_deadlist, 4039 sizeof (kmem_slab_t), 4040 offsetof(kmem_slab_t, slab_link)); 4041 kmem_reset_reclaim_threshold(cp->cache_defrag); 4042 } 4043 cp->cache_move = move; 4044 } 4045 4046 mutex_exit(&cp->cache_lock); 4047 4048 if (defrag != NULL) { 4049 kmem_cache_free(kmem_defrag_cache, defrag); /* unused */ 4050 } 4051 } 4052 4053 void 4054 kmem_cache_destroy(kmem_cache_t *cp) 4055 { 4056 int cpu_seqid; 4057 4058 /* 4059 * Remove the cache from the global cache list so that no one else 4060 * can schedule tasks on its behalf, wait for any pending tasks to 4061 * complete, purge the cache, and then destroy it. 4062 */ 4063 mutex_enter(&kmem_cache_lock); 4064 list_remove(&kmem_caches, cp); 4065 mutex_exit(&kmem_cache_lock); 4066 4067 if (kmem_taskq != NULL) 4068 taskq_wait(kmem_taskq); 4069 if (kmem_move_taskq != NULL) 4070 taskq_wait(kmem_move_taskq); 4071 4072 kmem_cache_magazine_purge(cp); 4073 4074 mutex_enter(&cp->cache_lock); 4075 if (cp->cache_buftotal != 0) 4076 cmn_err(CE_WARN, "kmem_cache_destroy: '%s' (%p) not empty", 4077 cp->cache_name, (void *)cp); 4078 if (cp->cache_defrag != NULL) { 4079 avl_destroy(&cp->cache_defrag->kmd_moves_pending); 4080 list_destroy(&cp->cache_defrag->kmd_deadlist); 4081 kmem_cache_free(kmem_defrag_cache, cp->cache_defrag); 4082 cp->cache_defrag = NULL; 4083 } 4084 /* 4085 * The cache is now dead. There should be no further activity. We 4086 * enforce this by setting land mines in the constructor, destructor, 4087 * reclaim, and move routines that induce a kernel text fault if 4088 * invoked. 4089 */ 4090 cp->cache_constructor = (int (*)(void *, void *, int))1; 4091 cp->cache_destructor = (void (*)(void *, void *))2; 4092 cp->cache_reclaim = (void (*)(void *))3; 4093 cp->cache_move = (kmem_cbrc_t (*)(void *, void *, size_t, void *))4; 4094 mutex_exit(&cp->cache_lock); 4095 4096 kstat_delete(cp->cache_kstat); 4097 4098 if (cp->cache_hash_table != NULL) 4099 vmem_free(kmem_hash_arena, cp->cache_hash_table, 4100 (cp->cache_hash_mask + 1) * sizeof (void *)); 4101 4102 for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) 4103 mutex_destroy(&cp->cache_cpu[cpu_seqid].cc_lock); 4104 4105 mutex_destroy(&cp->cache_depot_lock); 4106 mutex_destroy(&cp->cache_lock); 4107 4108 vmem_free(kmem_cache_arena, cp, KMEM_CACHE_SIZE(max_ncpus)); 4109 } 4110 4111 /*ARGSUSED*/ 4112 static int 4113 kmem_cpu_setup(cpu_setup_t what, int id, void *arg) 4114 { 4115 ASSERT(MUTEX_HELD(&cpu_lock)); 4116 if (what == CPU_UNCONFIG) { 4117 kmem_cache_applyall(kmem_cache_magazine_purge, 4118 kmem_taskq, TQ_SLEEP); 4119 kmem_cache_applyall(kmem_cache_magazine_enable, 4120 kmem_taskq, TQ_SLEEP); 4121 } 4122 return (0); 4123 } 4124 4125 static void 4126 kmem_alloc_caches_create(const int *array, size_t count, 4127 kmem_cache_t **alloc_table, size_t maxbuf, uint_t shift) 4128 { 4129 char name[KMEM_CACHE_NAMELEN + 1]; 4130 size_t table_unit = (1 << shift); /* range of one alloc_table entry */ 4131 size_t size = table_unit; 4132 int i; 4133 4134 for (i = 0; i < count; i++) { 4135 size_t cache_size = array[i]; 4136 size_t align = KMEM_ALIGN; 4137 kmem_cache_t *cp; 4138 4139 /* if the table has an entry for maxbuf, we're done */ 4140 if (size > maxbuf) 4141 break; 4142 4143 /* cache size must be a multiple of the table unit */ 4144 ASSERT(P2PHASE(cache_size, table_unit) == 0); 4145 4146 /* 4147 * If they allocate a multiple of the coherency granularity, 4148 * they get a coherency-granularity-aligned address. 4149 */ 4150 if (IS_P2ALIGNED(cache_size, 64)) 4151 align = 64; 4152 if (IS_P2ALIGNED(cache_size, PAGESIZE)) 4153 align = PAGESIZE; 4154 (void) snprintf(name, sizeof (name), 4155 "kmem_alloc_%lu", cache_size); 4156 cp = kmem_cache_create(name, cache_size, align, 4157 NULL, NULL, NULL, NULL, NULL, KMC_KMEM_ALLOC); 4158 4159 while (size <= cache_size) { 4160 alloc_table[(size - 1) >> shift] = cp; 4161 size += table_unit; 4162 } 4163 } 4164 4165 ASSERT(size > maxbuf); /* i.e. maxbuf <= max(cache_size) */ 4166 } 4167 4168 static void 4169 kmem_cache_init(int pass, int use_large_pages) 4170 { 4171 int i; 4172 size_t maxbuf; 4173 kmem_magtype_t *mtp; 4174 4175 for (i = 0; i < sizeof (kmem_magtype) / sizeof (*mtp); i++) { 4176 char name[KMEM_CACHE_NAMELEN + 1]; 4177 4178 mtp = &kmem_magtype[i]; 4179 (void) sprintf(name, "kmem_magazine_%d", mtp->mt_magsize); 4180 mtp->mt_cache = kmem_cache_create(name, 4181 (mtp->mt_magsize + 1) * sizeof (void *), 4182 mtp->mt_align, NULL, NULL, NULL, NULL, 4183 kmem_msb_arena, KMC_NOHASH); 4184 } 4185 4186 kmem_slab_cache = kmem_cache_create("kmem_slab_cache", 4187 sizeof (kmem_slab_t), 0, NULL, NULL, NULL, NULL, 4188 kmem_msb_arena, KMC_NOHASH); 4189 4190 kmem_bufctl_cache = kmem_cache_create("kmem_bufctl_cache", 4191 sizeof (kmem_bufctl_t), 0, NULL, NULL, NULL, NULL, 4192 kmem_msb_arena, KMC_NOHASH); 4193 4194 kmem_bufctl_audit_cache = kmem_cache_create("kmem_bufctl_audit_cache", 4195 sizeof (kmem_bufctl_audit_t), 0, NULL, NULL, NULL, NULL, 4196 kmem_msb_arena, KMC_NOHASH); 4197 4198 if (pass == 2) { 4199 kmem_va_arena = vmem_create("kmem_va", 4200 NULL, 0, PAGESIZE, 4201 vmem_alloc, vmem_free, heap_arena, 4202 8 * PAGESIZE, VM_SLEEP); 4203 4204 if (use_large_pages) { 4205 kmem_default_arena = vmem_xcreate("kmem_default", 4206 NULL, 0, PAGESIZE, 4207 segkmem_alloc_lp, segkmem_free_lp, kmem_va_arena, 4208 0, VMC_DUMPSAFE | VM_SLEEP); 4209 } else { 4210 kmem_default_arena = vmem_create("kmem_default", 4211 NULL, 0, PAGESIZE, 4212 segkmem_alloc, segkmem_free, kmem_va_arena, 4213 0, VMC_DUMPSAFE | VM_SLEEP); 4214 } 4215 4216 /* Figure out what our maximum cache size is */ 4217 maxbuf = kmem_max_cached; 4218 if (maxbuf <= KMEM_MAXBUF) { 4219 maxbuf = 0; 4220 kmem_max_cached = KMEM_MAXBUF; 4221 } else { 4222 size_t size = 0; 4223 size_t max = 4224 sizeof (kmem_big_alloc_sizes) / sizeof (int); 4225 /* 4226 * Round maxbuf up to an existing cache size. If maxbuf 4227 * is larger than the largest cache, we truncate it to 4228 * the largest cache's size. 4229 */ 4230 for (i = 0; i < max; i++) { 4231 size = kmem_big_alloc_sizes[i]; 4232 if (maxbuf <= size) 4233 break; 4234 } 4235 kmem_max_cached = maxbuf = size; 4236 } 4237 4238 /* 4239 * The big alloc table may not be completely overwritten, so 4240 * we clear out any stale cache pointers from the first pass. 4241 */ 4242 bzero(kmem_big_alloc_table, sizeof (kmem_big_alloc_table)); 4243 } else { 4244 /* 4245 * During the first pass, the kmem_alloc_* caches 4246 * are treated as metadata. 4247 */ 4248 kmem_default_arena = kmem_msb_arena; 4249 maxbuf = KMEM_BIG_MAXBUF_32BIT; 4250 } 4251 4252 /* 4253 * Set up the default caches to back kmem_alloc() 4254 */ 4255 kmem_alloc_caches_create( 4256 kmem_alloc_sizes, sizeof (kmem_alloc_sizes) / sizeof (int), 4257 kmem_alloc_table, KMEM_MAXBUF, KMEM_ALIGN_SHIFT); 4258 4259 kmem_alloc_caches_create( 4260 kmem_big_alloc_sizes, sizeof (kmem_big_alloc_sizes) / sizeof (int), 4261 kmem_big_alloc_table, maxbuf, KMEM_BIG_SHIFT); 4262 4263 kmem_big_alloc_table_max = maxbuf >> KMEM_BIG_SHIFT; 4264 } 4265 4266 void 4267 kmem_init(void) 4268 { 4269 kmem_cache_t *cp; 4270 int old_kmem_flags = kmem_flags; 4271 int use_large_pages = 0; 4272 size_t maxverify, minfirewall; 4273 4274 kstat_init(); 4275 4276 /* 4277 * Don't do firewalled allocations if the heap is less than 1TB 4278 * (i.e. on a 32-bit kernel) 4279 * The resulting VM_NEXTFIT allocations would create too much 4280 * fragmentation in a small heap. 4281 */ 4282 #if defined(_LP64) 4283 maxverify = minfirewall = PAGESIZE / 2; 4284 #else 4285 maxverify = minfirewall = ULONG_MAX; 4286 #endif 4287 4288 /* LINTED */ 4289 ASSERT(sizeof (kmem_cpu_cache_t) == KMEM_CPU_CACHE_SIZE); 4290 4291 list_create(&kmem_caches, sizeof (kmem_cache_t), 4292 offsetof(kmem_cache_t, cache_link)); 4293 4294 kmem_metadata_arena = vmem_create("kmem_metadata", NULL, 0, PAGESIZE, 4295 vmem_alloc, vmem_free, heap_arena, 8 * PAGESIZE, 4296 VM_SLEEP | VMC_NO_QCACHE); 4297 4298 kmem_msb_arena = vmem_create("kmem_msb", NULL, 0, 4299 PAGESIZE, segkmem_alloc, segkmem_free, kmem_metadata_arena, 0, 4300 VMC_DUMPSAFE | VM_SLEEP); 4301 4302 kmem_cache_arena = vmem_create("kmem_cache", NULL, 0, KMEM_ALIGN, 4303 segkmem_alloc, segkmem_free, kmem_metadata_arena, 0, VM_SLEEP); 4304 4305 kmem_hash_arena = vmem_create("kmem_hash", NULL, 0, KMEM_ALIGN, 4306 segkmem_alloc, segkmem_free, kmem_metadata_arena, 0, VM_SLEEP); 4307 4308 kmem_log_arena = vmem_create("kmem_log", NULL, 0, KMEM_ALIGN, 4309 segkmem_alloc, segkmem_free, heap_arena, 0, VM_SLEEP); 4310 4311 kmem_firewall_va_arena = vmem_create("kmem_firewall_va", 4312 NULL, 0, PAGESIZE, 4313 kmem_firewall_va_alloc, kmem_firewall_va_free, heap_arena, 4314 0, VM_SLEEP); 4315 4316 kmem_firewall_arena = vmem_create("kmem_firewall", NULL, 0, PAGESIZE, 4317 segkmem_alloc, segkmem_free, kmem_firewall_va_arena, 0, 4318 VMC_DUMPSAFE | VM_SLEEP); 4319 4320 /* temporary oversize arena for mod_read_system_file */ 4321 kmem_oversize_arena = vmem_create("kmem_oversize", NULL, 0, PAGESIZE, 4322 segkmem_alloc, segkmem_free, heap_arena, 0, VM_SLEEP); 4323 4324 kmem_reap_interval = 15 * hz; 4325 4326 /* 4327 * Read /etc/system. This is a chicken-and-egg problem because 4328 * kmem_flags may be set in /etc/system, but mod_read_system_file() 4329 * needs to use the allocator. The simplest solution is to create 4330 * all the standard kmem caches, read /etc/system, destroy all the 4331 * caches we just created, and then create them all again in light 4332 * of the (possibly) new kmem_flags and other kmem tunables. 4333 */ 4334 kmem_cache_init(1, 0); 4335 4336 mod_read_system_file(boothowto & RB_ASKNAME); 4337 4338 while ((cp = list_tail(&kmem_caches)) != NULL) 4339 kmem_cache_destroy(cp); 4340 4341 vmem_destroy(kmem_oversize_arena); 4342 4343 if (old_kmem_flags & KMF_STICKY) 4344 kmem_flags = old_kmem_flags; 4345 4346 if (!(kmem_flags & KMF_AUDIT)) 4347 vmem_seg_size = offsetof(vmem_seg_t, vs_thread); 4348 4349 if (kmem_maxverify == 0) 4350 kmem_maxverify = maxverify; 4351 4352 if (kmem_minfirewall == 0) 4353 kmem_minfirewall = minfirewall; 4354 4355 /* 4356 * give segkmem a chance to figure out if we are using large pages 4357 * for the kernel heap 4358 */ 4359 use_large_pages = segkmem_lpsetup(); 4360 4361 /* 4362 * To protect against corruption, we keep the actual number of callers 4363 * KMF_LITE records seperate from the tunable. We arbitrarily clamp 4364 * to 16, since the overhead for small buffers quickly gets out of 4365 * hand. 4366 * 4367 * The real limit would depend on the needs of the largest KMC_NOHASH 4368 * cache. 4369 */ 4370 kmem_lite_count = MIN(MAX(0, kmem_lite_pcs), 16); 4371 kmem_lite_pcs = kmem_lite_count; 4372 4373 /* 4374 * Normally, we firewall oversized allocations when possible, but 4375 * if we are using large pages for kernel memory, and we don't have 4376 * any non-LITE debugging flags set, we want to allocate oversized 4377 * buffers from large pages, and so skip the firewalling. 4378 */ 4379 if (use_large_pages && 4380 ((kmem_flags & KMF_LITE) || !(kmem_flags & KMF_DEBUG))) { 4381 kmem_oversize_arena = vmem_xcreate("kmem_oversize", NULL, 0, 4382 PAGESIZE, segkmem_alloc_lp, segkmem_free_lp, heap_arena, 4383 0, VMC_DUMPSAFE | VM_SLEEP); 4384 } else { 4385 kmem_oversize_arena = vmem_create("kmem_oversize", 4386 NULL, 0, PAGESIZE, 4387 segkmem_alloc, segkmem_free, kmem_minfirewall < ULONG_MAX? 4388 kmem_firewall_va_arena : heap_arena, 0, VMC_DUMPSAFE | 4389 VM_SLEEP); 4390 } 4391 4392 kmem_cache_init(2, use_large_pages); 4393 4394 if (kmem_flags & (KMF_AUDIT | KMF_RANDOMIZE)) { 4395 if (kmem_transaction_log_size == 0) 4396 kmem_transaction_log_size = kmem_maxavail() / 50; 4397 kmem_transaction_log = kmem_log_init(kmem_transaction_log_size); 4398 } 4399 4400 if (kmem_flags & (KMF_CONTENTS | KMF_RANDOMIZE)) { 4401 if (kmem_content_log_size == 0) 4402 kmem_content_log_size = kmem_maxavail() / 50; 4403 kmem_content_log = kmem_log_init(kmem_content_log_size); 4404 } 4405 4406 kmem_failure_log = kmem_log_init(kmem_failure_log_size); 4407 4408 kmem_slab_log = kmem_log_init(kmem_slab_log_size); 4409 4410 /* 4411 * Initialize STREAMS message caches so allocb() is available. 4412 * This allows us to initialize the logging framework (cmn_err(9F), 4413 * strlog(9F), etc) so we can start recording messages. 4414 */ 4415 streams_msg_init(); 4416 4417 /* 4418 * Initialize the ZSD framework in Zones so modules loaded henceforth 4419 * can register their callbacks. 4420 */ 4421 zone_zsd_init(); 4422 4423 log_init(); 4424 taskq_init(); 4425 4426 /* 4427 * Warn about invalid or dangerous values of kmem_flags. 4428 * Always warn about unsupported values. 4429 */ 4430 if (((kmem_flags & ~(KMF_AUDIT | KMF_DEADBEEF | KMF_REDZONE | 4431 KMF_CONTENTS | KMF_LITE)) != 0) || 4432 ((kmem_flags & KMF_LITE) && kmem_flags != KMF_LITE)) 4433 cmn_err(CE_WARN, "kmem_flags set to unsupported value 0x%x. " 4434 "See the Solaris Tunable Parameters Reference Manual.", 4435 kmem_flags); 4436 4437 #ifdef DEBUG 4438 if ((kmem_flags & KMF_DEBUG) == 0) 4439 cmn_err(CE_NOTE, "kmem debugging disabled."); 4440 #else 4441 /* 4442 * For non-debug kernels, the only "normal" flags are 0, KMF_LITE, 4443 * KMF_REDZONE, and KMF_CONTENTS (the last because it is only enabled 4444 * if KMF_AUDIT is set). We should warn the user about the performance 4445 * penalty of KMF_AUDIT or KMF_DEADBEEF if they are set and KMF_LITE 4446 * isn't set (since that disables AUDIT). 4447 */ 4448 if (!(kmem_flags & KMF_LITE) && 4449 (kmem_flags & (KMF_AUDIT | KMF_DEADBEEF)) != 0) 4450 cmn_err(CE_WARN, "High-overhead kmem debugging features " 4451 "enabled (kmem_flags = 0x%x). Performance degradation " 4452 "and large memory overhead possible. See the Solaris " 4453 "Tunable Parameters Reference Manual.", kmem_flags); 4454 #endif /* not DEBUG */ 4455 4456 kmem_cache_applyall(kmem_cache_magazine_enable, NULL, TQ_SLEEP); 4457 4458 kmem_ready = 1; 4459 4460 /* 4461 * Initialize the platform-specific aligned/DMA memory allocator. 4462 */ 4463 ka_init(); 4464 4465 /* 4466 * Initialize 32-bit ID cache. 4467 */ 4468 id32_init(); 4469 4470 /* 4471 * Initialize the networking stack so modules loaded can 4472 * register their callbacks. 4473 */ 4474 netstack_init(); 4475 } 4476 4477 static void 4478 kmem_move_init(void) 4479 { 4480 kmem_defrag_cache = kmem_cache_create("kmem_defrag_cache", 4481 sizeof (kmem_defrag_t), 0, NULL, NULL, NULL, NULL, 4482 kmem_msb_arena, KMC_NOHASH); 4483 kmem_move_cache = kmem_cache_create("kmem_move_cache", 4484 sizeof (kmem_move_t), 0, NULL, NULL, NULL, NULL, 4485 kmem_msb_arena, KMC_NOHASH); 4486 4487 /* 4488 * kmem guarantees that move callbacks are sequential and that even 4489 * across multiple caches no two moves ever execute simultaneously. 4490 * Move callbacks are processed on a separate taskq so that client code 4491 * does not interfere with internal maintenance tasks. 4492 */ 4493 kmem_move_taskq = taskq_create_instance("kmem_move_taskq", 0, 1, 4494 minclsyspri, 100, INT_MAX, TASKQ_PREPOPULATE); 4495 } 4496 4497 void 4498 kmem_thread_init(void) 4499 { 4500 kmem_move_init(); 4501 kmem_taskq = taskq_create_instance("kmem_taskq", 0, 1, minclsyspri, 4502 300, INT_MAX, TASKQ_PREPOPULATE); 4503 } 4504 4505 void 4506 kmem_mp_init(void) 4507 { 4508 mutex_enter(&cpu_lock); 4509 register_cpu_setup_func(kmem_cpu_setup, NULL); 4510 mutex_exit(&cpu_lock); 4511 4512 kmem_update_timeout(NULL); 4513 4514 taskq_mp_init(); 4515 } 4516 4517 /* 4518 * Return the slab of the allocated buffer, or NULL if the buffer is not 4519 * allocated. This function may be called with a known slab address to determine 4520 * whether or not the buffer is allocated, or with a NULL slab address to obtain 4521 * an allocated buffer's slab. 4522 */ 4523 static kmem_slab_t * 4524 kmem_slab_allocated(kmem_cache_t *cp, kmem_slab_t *sp, void *buf) 4525 { 4526 kmem_bufctl_t *bcp, *bufbcp; 4527 4528 ASSERT(MUTEX_HELD(&cp->cache_lock)); 4529 ASSERT(sp == NULL || KMEM_SLAB_MEMBER(sp, buf)); 4530 4531 if (cp->cache_flags & KMF_HASH) { 4532 for (bcp = *KMEM_HASH(cp, buf); 4533 (bcp != NULL) && (bcp->bc_addr != buf); 4534 bcp = bcp->bc_next) { 4535 continue; 4536 } 4537 ASSERT(sp != NULL && bcp != NULL ? sp == bcp->bc_slab : 1); 4538 return (bcp == NULL ? NULL : bcp->bc_slab); 4539 } 4540 4541 if (sp == NULL) { 4542 sp = KMEM_SLAB(cp, buf); 4543 } 4544 bufbcp = KMEM_BUFCTL(cp, buf); 4545 for (bcp = sp->slab_head; 4546 (bcp != NULL) && (bcp != bufbcp); 4547 bcp = bcp->bc_next) { 4548 continue; 4549 } 4550 return (bcp == NULL ? sp : NULL); 4551 } 4552 4553 static boolean_t 4554 kmem_slab_is_reclaimable(kmem_cache_t *cp, kmem_slab_t *sp, int flags) 4555 { 4556 long refcnt = sp->slab_refcnt; 4557 4558 ASSERT(cp->cache_defrag != NULL); 4559 4560 /* 4561 * For code coverage we want to be able to move an object within the 4562 * same slab (the only partial slab) even if allocating the destination 4563 * buffer resulted in a completely allocated slab. 4564 */ 4565 if (flags & KMM_DEBUG) { 4566 return ((flags & KMM_DESPERATE) || 4567 ((sp->slab_flags & KMEM_SLAB_NOMOVE) == 0)); 4568 } 4569 4570 /* If we're desperate, we don't care if the client said NO. */ 4571 if (flags & KMM_DESPERATE) { 4572 return (refcnt < sp->slab_chunks); /* any partial */ 4573 } 4574 4575 if (sp->slab_flags & KMEM_SLAB_NOMOVE) { 4576 return (B_FALSE); 4577 } 4578 4579 if ((refcnt == 1) || kmem_move_any_partial) { 4580 return (refcnt < sp->slab_chunks); 4581 } 4582 4583 /* 4584 * The reclaim threshold is adjusted at each kmem_cache_scan() so that 4585 * slabs with a progressively higher percentage of used buffers can be 4586 * reclaimed until the cache as a whole is no longer fragmented. 4587 * 4588 * sp->slab_refcnt kmd_reclaim_numer 4589 * --------------- < ------------------ 4590 * sp->slab_chunks KMEM_VOID_FRACTION 4591 */ 4592 return ((refcnt * KMEM_VOID_FRACTION) < 4593 (sp->slab_chunks * cp->cache_defrag->kmd_reclaim_numer)); 4594 } 4595 4596 static void * 4597 kmem_hunt_mag(kmem_cache_t *cp, kmem_magazine_t *m, int n, void *buf, 4598 void *tbuf) 4599 { 4600 int i; /* magazine round index */ 4601 4602 for (i = 0; i < n; i++) { 4603 if (buf == m->mag_round[i]) { 4604 if (cp->cache_flags & KMF_BUFTAG) { 4605 (void) kmem_cache_free_debug(cp, tbuf, 4606 caller()); 4607 } 4608 m->mag_round[i] = tbuf; 4609 return (buf); 4610 } 4611 } 4612 4613 return (NULL); 4614 } 4615 4616 /* 4617 * Hunt the magazine layer for the given buffer. If found, the buffer is 4618 * removed from the magazine layer and returned, otherwise NULL is returned. 4619 * The state of the returned buffer is freed and constructed. 4620 */ 4621 static void * 4622 kmem_hunt_mags(kmem_cache_t *cp, void *buf) 4623 { 4624 kmem_cpu_cache_t *ccp; 4625 kmem_magazine_t *m; 4626 int cpu_seqid; 4627 int n; /* magazine rounds */ 4628 void *tbuf; /* temporary swap buffer */ 4629 4630 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock)); 4631 4632 /* 4633 * Allocated a buffer to swap with the one we hope to pull out of a 4634 * magazine when found. 4635 */ 4636 tbuf = kmem_cache_alloc(cp, KM_NOSLEEP); 4637 if (tbuf == NULL) { 4638 KMEM_STAT_ADD(kmem_move_stats.kms_hunt_alloc_fail); 4639 return (NULL); 4640 } 4641 if (tbuf == buf) { 4642 KMEM_STAT_ADD(kmem_move_stats.kms_hunt_lucky); 4643 if (cp->cache_flags & KMF_BUFTAG) { 4644 (void) kmem_cache_free_debug(cp, buf, caller()); 4645 } 4646 return (buf); 4647 } 4648 4649 /* Hunt the depot. */ 4650 mutex_enter(&cp->cache_depot_lock); 4651 n = cp->cache_magtype->mt_magsize; 4652 for (m = cp->cache_full.ml_list; m != NULL; m = m->mag_next) { 4653 if (kmem_hunt_mag(cp, m, n, buf, tbuf) != NULL) { 4654 mutex_exit(&cp->cache_depot_lock); 4655 return (buf); 4656 } 4657 } 4658 mutex_exit(&cp->cache_depot_lock); 4659 4660 /* Hunt the per-CPU magazines. */ 4661 for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) { 4662 ccp = &cp->cache_cpu[cpu_seqid]; 4663 4664 mutex_enter(&ccp->cc_lock); 4665 m = ccp->cc_loaded; 4666 n = ccp->cc_rounds; 4667 if (kmem_hunt_mag(cp, m, n, buf, tbuf) != NULL) { 4668 mutex_exit(&ccp->cc_lock); 4669 return (buf); 4670 } 4671 m = ccp->cc_ploaded; 4672 n = ccp->cc_prounds; 4673 if (kmem_hunt_mag(cp, m, n, buf, tbuf) != NULL) { 4674 mutex_exit(&ccp->cc_lock); 4675 return (buf); 4676 } 4677 mutex_exit(&ccp->cc_lock); 4678 } 4679 4680 kmem_cache_free(cp, tbuf); 4681 return (NULL); 4682 } 4683 4684 /* 4685 * May be called from the kmem_move_taskq, from kmem_cache_move_notify_task(), 4686 * or when the buffer is freed. 4687 */ 4688 static void 4689 kmem_slab_move_yes(kmem_cache_t *cp, kmem_slab_t *sp, void *from_buf) 4690 { 4691 ASSERT(MUTEX_HELD(&cp->cache_lock)); 4692 ASSERT(KMEM_SLAB_MEMBER(sp, from_buf)); 4693 4694 if (!KMEM_SLAB_IS_PARTIAL(sp)) { 4695 return; 4696 } 4697 4698 if (sp->slab_flags & KMEM_SLAB_NOMOVE) { 4699 if (KMEM_SLAB_OFFSET(sp, from_buf) == sp->slab_stuck_offset) { 4700 avl_remove(&cp->cache_partial_slabs, sp); 4701 sp->slab_flags &= ~KMEM_SLAB_NOMOVE; 4702 sp->slab_stuck_offset = (uint32_t)-1; 4703 avl_add(&cp->cache_partial_slabs, sp); 4704 } 4705 } else { 4706 sp->slab_later_count = 0; 4707 sp->slab_stuck_offset = (uint32_t)-1; 4708 } 4709 } 4710 4711 static void 4712 kmem_slab_move_no(kmem_cache_t *cp, kmem_slab_t *sp, void *from_buf) 4713 { 4714 ASSERT(taskq_member(kmem_move_taskq, curthread)); 4715 ASSERT(MUTEX_HELD(&cp->cache_lock)); 4716 ASSERT(KMEM_SLAB_MEMBER(sp, from_buf)); 4717 4718 if (!KMEM_SLAB_IS_PARTIAL(sp)) { 4719 return; 4720 } 4721 4722 avl_remove(&cp->cache_partial_slabs, sp); 4723 sp->slab_later_count = 0; 4724 sp->slab_flags |= KMEM_SLAB_NOMOVE; 4725 sp->slab_stuck_offset = KMEM_SLAB_OFFSET(sp, from_buf); 4726 avl_add(&cp->cache_partial_slabs, sp); 4727 } 4728 4729 static void kmem_move_end(kmem_cache_t *, kmem_move_t *); 4730 4731 /* 4732 * The move callback takes two buffer addresses, the buffer to be moved, and a 4733 * newly allocated and constructed buffer selected by kmem as the destination. 4734 * It also takes the size of the buffer and an optional user argument specified 4735 * at cache creation time. kmem guarantees that the buffer to be moved has not 4736 * been unmapped by the virtual memory subsystem. Beyond that, it cannot 4737 * guarantee the present whereabouts of the buffer to be moved, so it is up to 4738 * the client to safely determine whether or not it is still using the buffer. 4739 * The client must not free either of the buffers passed to the move callback, 4740 * since kmem wants to free them directly to the slab layer. The client response 4741 * tells kmem which of the two buffers to free: 4742 * 4743 * YES kmem frees the old buffer (the move was successful) 4744 * NO kmem frees the new buffer, marks the slab of the old buffer 4745 * non-reclaimable to avoid bothering the client again 4746 * LATER kmem frees the new buffer, increments slab_later_count 4747 * DONT_KNOW kmem frees the new buffer, searches mags for the old buffer 4748 * DONT_NEED kmem frees both the old buffer and the new buffer 4749 * 4750 * The pending callback argument now being processed contains both of the 4751 * buffers (old and new) passed to the move callback function, the slab of the 4752 * old buffer, and flags related to the move request, such as whether or not the 4753 * system was desperate for memory. 4754 * 4755 * Slabs are not freed while there is a pending callback, but instead are kept 4756 * on a deadlist, which is drained after the last callback completes. This means 4757 * that slabs are safe to access until kmem_move_end(), no matter how many of 4758 * their buffers have been freed. Once slab_refcnt reaches zero, it stays at 4759 * zero for as long as the slab remains on the deadlist and until the slab is 4760 * freed. 4761 */ 4762 static void 4763 kmem_move_buffer(kmem_move_t *callback) 4764 { 4765 kmem_cbrc_t response; 4766 kmem_slab_t *sp = callback->kmm_from_slab; 4767 kmem_cache_t *cp = sp->slab_cache; 4768 boolean_t free_on_slab; 4769 4770 ASSERT(taskq_member(kmem_move_taskq, curthread)); 4771 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock)); 4772 ASSERT(KMEM_SLAB_MEMBER(sp, callback->kmm_from_buf)); 4773 4774 /* 4775 * The number of allocated buffers on the slab may have changed since we 4776 * last checked the slab's reclaimability (when the pending move was 4777 * enqueued), or the client may have responded NO when asked to move 4778 * another buffer on the same slab. 4779 */ 4780 if (!kmem_slab_is_reclaimable(cp, sp, callback->kmm_flags)) { 4781 KMEM_STAT_ADD(kmem_move_stats.kms_no_longer_reclaimable); 4782 KMEM_STAT_COND_ADD((callback->kmm_flags & KMM_NOTIFY), 4783 kmem_move_stats.kms_notify_no_longer_reclaimable); 4784 kmem_slab_free(cp, callback->kmm_to_buf); 4785 kmem_move_end(cp, callback); 4786 return; 4787 } 4788 4789 /* 4790 * Hunting magazines is expensive, so we'll wait to do that until the 4791 * client responds KMEM_CBRC_DONT_KNOW. However, checking the slab layer 4792 * is cheap, so we might as well do that here in case we can avoid 4793 * bothering the client. 4794 */ 4795 mutex_enter(&cp->cache_lock); 4796 free_on_slab = (kmem_slab_allocated(cp, sp, 4797 callback->kmm_from_buf) == NULL); 4798 mutex_exit(&cp->cache_lock); 4799 4800 if (free_on_slab) { 4801 KMEM_STAT_ADD(kmem_move_stats.kms_hunt_found_slab); 4802 kmem_slab_free(cp, callback->kmm_to_buf); 4803 kmem_move_end(cp, callback); 4804 return; 4805 } 4806 4807 if (cp->cache_flags & KMF_BUFTAG) { 4808 /* 4809 * Make kmem_cache_alloc_debug() apply the constructor for us. 4810 */ 4811 if (kmem_cache_alloc_debug(cp, callback->kmm_to_buf, 4812 KM_NOSLEEP, 1, caller()) != 0) { 4813 KMEM_STAT_ADD(kmem_move_stats.kms_alloc_fail); 4814 kmem_move_end(cp, callback); 4815 return; 4816 } 4817 } else if (cp->cache_constructor != NULL && 4818 cp->cache_constructor(callback->kmm_to_buf, cp->cache_private, 4819 KM_NOSLEEP) != 0) { 4820 atomic_inc_64(&cp->cache_alloc_fail); 4821 KMEM_STAT_ADD(kmem_move_stats.kms_constructor_fail); 4822 kmem_slab_free(cp, callback->kmm_to_buf); 4823 kmem_move_end(cp, callback); 4824 return; 4825 } 4826 4827 KMEM_STAT_ADD(kmem_move_stats.kms_callbacks); 4828 KMEM_STAT_COND_ADD((callback->kmm_flags & KMM_NOTIFY), 4829 kmem_move_stats.kms_notify_callbacks); 4830 cp->cache_defrag->kmd_callbacks++; 4831 cp->cache_defrag->kmd_thread = curthread; 4832 cp->cache_defrag->kmd_from_buf = callback->kmm_from_buf; 4833 cp->cache_defrag->kmd_to_buf = callback->kmm_to_buf; 4834 DTRACE_PROBE2(kmem__move__start, kmem_cache_t *, cp, kmem_move_t *, 4835 callback); 4836 4837 response = cp->cache_move(callback->kmm_from_buf, 4838 callback->kmm_to_buf, cp->cache_bufsize, cp->cache_private); 4839 4840 DTRACE_PROBE3(kmem__move__end, kmem_cache_t *, cp, kmem_move_t *, 4841 callback, kmem_cbrc_t, response); 4842 cp->cache_defrag->kmd_thread = NULL; 4843 cp->cache_defrag->kmd_from_buf = NULL; 4844 cp->cache_defrag->kmd_to_buf = NULL; 4845 4846 if (response == KMEM_CBRC_YES) { 4847 KMEM_STAT_ADD(kmem_move_stats.kms_yes); 4848 cp->cache_defrag->kmd_yes++; 4849 kmem_slab_free_constructed(cp, callback->kmm_from_buf, B_FALSE); 4850 /* slab safe to access until kmem_move_end() */ 4851 if (sp->slab_refcnt == 0) 4852 cp->cache_defrag->kmd_slabs_freed++; 4853 mutex_enter(&cp->cache_lock); 4854 kmem_slab_move_yes(cp, sp, callback->kmm_from_buf); 4855 mutex_exit(&cp->cache_lock); 4856 kmem_move_end(cp, callback); 4857 return; 4858 } 4859 4860 switch (response) { 4861 case KMEM_CBRC_NO: 4862 KMEM_STAT_ADD(kmem_move_stats.kms_no); 4863 cp->cache_defrag->kmd_no++; 4864 mutex_enter(&cp->cache_lock); 4865 kmem_slab_move_no(cp, sp, callback->kmm_from_buf); 4866 mutex_exit(&cp->cache_lock); 4867 break; 4868 case KMEM_CBRC_LATER: 4869 KMEM_STAT_ADD(kmem_move_stats.kms_later); 4870 cp->cache_defrag->kmd_later++; 4871 mutex_enter(&cp->cache_lock); 4872 if (!KMEM_SLAB_IS_PARTIAL(sp)) { 4873 mutex_exit(&cp->cache_lock); 4874 break; 4875 } 4876 4877 if (++sp->slab_later_count >= KMEM_DISBELIEF) { 4878 KMEM_STAT_ADD(kmem_move_stats.kms_disbelief); 4879 kmem_slab_move_no(cp, sp, callback->kmm_from_buf); 4880 } else if (!(sp->slab_flags & KMEM_SLAB_NOMOVE)) { 4881 sp->slab_stuck_offset = KMEM_SLAB_OFFSET(sp, 4882 callback->kmm_from_buf); 4883 } 4884 mutex_exit(&cp->cache_lock); 4885 break; 4886 case KMEM_CBRC_DONT_NEED: 4887 KMEM_STAT_ADD(kmem_move_stats.kms_dont_need); 4888 cp->cache_defrag->kmd_dont_need++; 4889 kmem_slab_free_constructed(cp, callback->kmm_from_buf, B_FALSE); 4890 if (sp->slab_refcnt == 0) 4891 cp->cache_defrag->kmd_slabs_freed++; 4892 mutex_enter(&cp->cache_lock); 4893 kmem_slab_move_yes(cp, sp, callback->kmm_from_buf); 4894 mutex_exit(&cp->cache_lock); 4895 break; 4896 case KMEM_CBRC_DONT_KNOW: 4897 KMEM_STAT_ADD(kmem_move_stats.kms_dont_know); 4898 cp->cache_defrag->kmd_dont_know++; 4899 if (kmem_hunt_mags(cp, callback->kmm_from_buf) != NULL) { 4900 KMEM_STAT_ADD(kmem_move_stats.kms_hunt_found_mag); 4901 cp->cache_defrag->kmd_hunt_found++; 4902 kmem_slab_free_constructed(cp, callback->kmm_from_buf, 4903 B_TRUE); 4904 if (sp->slab_refcnt == 0) 4905 cp->cache_defrag->kmd_slabs_freed++; 4906 mutex_enter(&cp->cache_lock); 4907 kmem_slab_move_yes(cp, sp, callback->kmm_from_buf); 4908 mutex_exit(&cp->cache_lock); 4909 } 4910 break; 4911 default: 4912 panic("'%s' (%p) unexpected move callback response %d\n", 4913 cp->cache_name, (void *)cp, response); 4914 } 4915 4916 kmem_slab_free_constructed(cp, callback->kmm_to_buf, B_FALSE); 4917 kmem_move_end(cp, callback); 4918 } 4919 4920 /* Return B_FALSE if there is insufficient memory for the move request. */ 4921 static boolean_t 4922 kmem_move_begin(kmem_cache_t *cp, kmem_slab_t *sp, void *buf, int flags) 4923 { 4924 void *to_buf; 4925 avl_index_t index; 4926 kmem_move_t *callback, *pending; 4927 ulong_t n; 4928 4929 ASSERT(taskq_member(kmem_taskq, curthread)); 4930 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock)); 4931 ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING); 4932 4933 callback = kmem_cache_alloc(kmem_move_cache, KM_NOSLEEP); 4934 if (callback == NULL) { 4935 KMEM_STAT_ADD(kmem_move_stats.kms_callback_alloc_fail); 4936 return (B_FALSE); 4937 } 4938 4939 callback->kmm_from_slab = sp; 4940 callback->kmm_from_buf = buf; 4941 callback->kmm_flags = flags; 4942 4943 mutex_enter(&cp->cache_lock); 4944 4945 n = avl_numnodes(&cp->cache_partial_slabs); 4946 if ((n == 0) || ((n == 1) && !(flags & KMM_DEBUG))) { 4947 mutex_exit(&cp->cache_lock); 4948 kmem_cache_free(kmem_move_cache, callback); 4949 return (B_TRUE); /* there is no need for the move request */ 4950 } 4951 4952 pending = avl_find(&cp->cache_defrag->kmd_moves_pending, buf, &index); 4953 if (pending != NULL) { 4954 /* 4955 * If the move is already pending and we're desperate now, 4956 * update the move flags. 4957 */ 4958 if (flags & KMM_DESPERATE) { 4959 pending->kmm_flags |= KMM_DESPERATE; 4960 } 4961 mutex_exit(&cp->cache_lock); 4962 KMEM_STAT_ADD(kmem_move_stats.kms_already_pending); 4963 kmem_cache_free(kmem_move_cache, callback); 4964 return (B_TRUE); 4965 } 4966 4967 to_buf = kmem_slab_alloc_impl(cp, avl_first(&cp->cache_partial_slabs), 4968 B_FALSE); 4969 callback->kmm_to_buf = to_buf; 4970 avl_insert(&cp->cache_defrag->kmd_moves_pending, callback, index); 4971 4972 mutex_exit(&cp->cache_lock); 4973 4974 if (!taskq_dispatch(kmem_move_taskq, (task_func_t *)kmem_move_buffer, 4975 callback, TQ_NOSLEEP)) { 4976 KMEM_STAT_ADD(kmem_move_stats.kms_callback_taskq_fail); 4977 mutex_enter(&cp->cache_lock); 4978 avl_remove(&cp->cache_defrag->kmd_moves_pending, callback); 4979 mutex_exit(&cp->cache_lock); 4980 kmem_slab_free(cp, to_buf); 4981 kmem_cache_free(kmem_move_cache, callback); 4982 return (B_FALSE); 4983 } 4984 4985 return (B_TRUE); 4986 } 4987 4988 static void 4989 kmem_move_end(kmem_cache_t *cp, kmem_move_t *callback) 4990 { 4991 avl_index_t index; 4992 4993 ASSERT(cp->cache_defrag != NULL); 4994 ASSERT(taskq_member(kmem_move_taskq, curthread)); 4995 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock)); 4996 4997 mutex_enter(&cp->cache_lock); 4998 VERIFY(avl_find(&cp->cache_defrag->kmd_moves_pending, 4999 callback->kmm_from_buf, &index) != NULL); 5000 avl_remove(&cp->cache_defrag->kmd_moves_pending, callback); 5001 if (avl_is_empty(&cp->cache_defrag->kmd_moves_pending)) { 5002 list_t *deadlist = &cp->cache_defrag->kmd_deadlist; 5003 kmem_slab_t *sp; 5004 5005 /* 5006 * The last pending move completed. Release all slabs from the 5007 * front of the dead list except for any slab at the tail that 5008 * needs to be released from the context of kmem_move_buffers(). 5009 * kmem deferred unmapping the buffers on these slabs in order 5010 * to guarantee that buffers passed to the move callback have 5011 * been touched only by kmem or by the client itself. 5012 */ 5013 while ((sp = list_remove_head(deadlist)) != NULL) { 5014 if (sp->slab_flags & KMEM_SLAB_MOVE_PENDING) { 5015 list_insert_tail(deadlist, sp); 5016 break; 5017 } 5018 cp->cache_defrag->kmd_deadcount--; 5019 cp->cache_slab_destroy++; 5020 mutex_exit(&cp->cache_lock); 5021 kmem_slab_destroy(cp, sp); 5022 KMEM_STAT_ADD(kmem_move_stats.kms_dead_slabs_freed); 5023 mutex_enter(&cp->cache_lock); 5024 } 5025 } 5026 mutex_exit(&cp->cache_lock); 5027 kmem_cache_free(kmem_move_cache, callback); 5028 } 5029 5030 /* 5031 * Move buffers from least used slabs first by scanning backwards from the end 5032 * of the partial slab list. Scan at most max_scan candidate slabs and move 5033 * buffers from at most max_slabs slabs (0 for all partial slabs in both cases). 5034 * If desperate to reclaim memory, move buffers from any partial slab, otherwise 5035 * skip slabs with a ratio of allocated buffers at or above the current 5036 * threshold. Return the number of unskipped slabs (at most max_slabs, -1 if the 5037 * scan is aborted) so that the caller can adjust the reclaimability threshold 5038 * depending on how many reclaimable slabs it finds. 5039 * 5040 * kmem_move_buffers() drops and reacquires cache_lock every time it issues a 5041 * move request, since it is not valid for kmem_move_begin() to call 5042 * kmem_cache_alloc() or taskq_dispatch() with cache_lock held. 5043 */ 5044 static int 5045 kmem_move_buffers(kmem_cache_t *cp, size_t max_scan, size_t max_slabs, 5046 int flags) 5047 { 5048 kmem_slab_t *sp; 5049 void *buf; 5050 int i, j; /* slab index, buffer index */ 5051 int s; /* reclaimable slabs */ 5052 int b; /* allocated (movable) buffers on reclaimable slab */ 5053 boolean_t success; 5054 int refcnt; 5055 int nomove; 5056 5057 ASSERT(taskq_member(kmem_taskq, curthread)); 5058 ASSERT(MUTEX_HELD(&cp->cache_lock)); 5059 ASSERT(kmem_move_cache != NULL); 5060 ASSERT(cp->cache_move != NULL && cp->cache_defrag != NULL); 5061 ASSERT((flags & KMM_DEBUG) ? !avl_is_empty(&cp->cache_partial_slabs) : 5062 avl_numnodes(&cp->cache_partial_slabs) > 1); 5063 5064 if (kmem_move_blocked) { 5065 return (0); 5066 } 5067 5068 if (kmem_move_fulltilt) { 5069 flags |= KMM_DESPERATE; 5070 } 5071 5072 if (max_scan == 0 || (flags & KMM_DESPERATE)) { 5073 /* 5074 * Scan as many slabs as needed to find the desired number of 5075 * candidate slabs. 5076 */ 5077 max_scan = (size_t)-1; 5078 } 5079 5080 if (max_slabs == 0 || (flags & KMM_DESPERATE)) { 5081 /* Find as many candidate slabs as possible. */ 5082 max_slabs = (size_t)-1; 5083 } 5084 5085 sp = avl_last(&cp->cache_partial_slabs); 5086 ASSERT(KMEM_SLAB_IS_PARTIAL(sp)); 5087 for (i = 0, s = 0; (i < max_scan) && (s < max_slabs) && (sp != NULL) && 5088 ((sp != avl_first(&cp->cache_partial_slabs)) || 5089 (flags & KMM_DEBUG)); 5090 sp = AVL_PREV(&cp->cache_partial_slabs, sp), i++) { 5091 5092 if (!kmem_slab_is_reclaimable(cp, sp, flags)) { 5093 continue; 5094 } 5095 s++; 5096 5097 /* Look for allocated buffers to move. */ 5098 for (j = 0, b = 0, buf = sp->slab_base; 5099 (j < sp->slab_chunks) && (b < sp->slab_refcnt); 5100 buf = (((char *)buf) + cp->cache_chunksize), j++) { 5101 5102 if (kmem_slab_allocated(cp, sp, buf) == NULL) { 5103 continue; 5104 } 5105 5106 b++; 5107 5108 /* 5109 * Prevent the slab from being destroyed while we drop 5110 * cache_lock and while the pending move is not yet 5111 * registered. Flag the pending move while 5112 * kmd_moves_pending may still be empty, since we can't 5113 * yet rely on a non-zero pending move count to prevent 5114 * the slab from being destroyed. 5115 */ 5116 ASSERT(!(sp->slab_flags & KMEM_SLAB_MOVE_PENDING)); 5117 sp->slab_flags |= KMEM_SLAB_MOVE_PENDING; 5118 /* 5119 * Recheck refcnt and nomove after reacquiring the lock, 5120 * since these control the order of partial slabs, and 5121 * we want to know if we can pick up the scan where we 5122 * left off. 5123 */ 5124 refcnt = sp->slab_refcnt; 5125 nomove = (sp->slab_flags & KMEM_SLAB_NOMOVE); 5126 mutex_exit(&cp->cache_lock); 5127 5128 success = kmem_move_begin(cp, sp, buf, flags); 5129 5130 /* 5131 * Now, before the lock is reacquired, kmem could 5132 * process all pending move requests and purge the 5133 * deadlist, so that upon reacquiring the lock, sp has 5134 * been remapped. Or, the client may free all the 5135 * objects on the slab while the pending moves are still 5136 * on the taskq. Therefore, the KMEM_SLAB_MOVE_PENDING 5137 * flag causes the slab to be put at the end of the 5138 * deadlist and prevents it from being destroyed, since 5139 * we plan to destroy it here after reacquiring the 5140 * lock. 5141 */ 5142 mutex_enter(&cp->cache_lock); 5143 ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING); 5144 sp->slab_flags &= ~KMEM_SLAB_MOVE_PENDING; 5145 5146 if (sp->slab_refcnt == 0) { 5147 list_t *deadlist = 5148 &cp->cache_defrag->kmd_deadlist; 5149 list_remove(deadlist, sp); 5150 5151 if (!avl_is_empty( 5152 &cp->cache_defrag->kmd_moves_pending)) { 5153 /* 5154 * A pending move makes it unsafe to 5155 * destroy the slab, because even though 5156 * the move is no longer needed, the 5157 * context where that is determined 5158 * requires the slab to exist. 5159 * Fortunately, a pending move also 5160 * means we don't need to destroy the 5161 * slab here, since it will get 5162 * destroyed along with any other slabs 5163 * on the deadlist after the last 5164 * pending move completes. 5165 */ 5166 list_insert_head(deadlist, sp); 5167 KMEM_STAT_ADD(kmem_move_stats. 5168 kms_endscan_slab_dead); 5169 return (-1); 5170 } 5171 5172 /* 5173 * Destroy the slab now if it was completely 5174 * freed while we dropped cache_lock and there 5175 * are no pending moves. Since slab_refcnt 5176 * cannot change once it reaches zero, no new 5177 * pending moves from that slab are possible. 5178 */ 5179 cp->cache_defrag->kmd_deadcount--; 5180 cp->cache_slab_destroy++; 5181 mutex_exit(&cp->cache_lock); 5182 kmem_slab_destroy(cp, sp); 5183 KMEM_STAT_ADD(kmem_move_stats. 5184 kms_dead_slabs_freed); 5185 KMEM_STAT_ADD(kmem_move_stats. 5186 kms_endscan_slab_destroyed); 5187 mutex_enter(&cp->cache_lock); 5188 /* 5189 * Since we can't pick up the scan where we left 5190 * off, abort the scan and say nothing about the 5191 * number of reclaimable slabs. 5192 */ 5193 return (-1); 5194 } 5195 5196 if (!success) { 5197 /* 5198 * Abort the scan if there is not enough memory 5199 * for the request and say nothing about the 5200 * number of reclaimable slabs. 5201 */ 5202 KMEM_STAT_COND_ADD(s < max_slabs, 5203 kmem_move_stats.kms_endscan_nomem); 5204 return (-1); 5205 } 5206 5207 /* 5208 * The slab's position changed while the lock was 5209 * dropped, so we don't know where we are in the 5210 * sequence any more. 5211 */ 5212 if (sp->slab_refcnt != refcnt) { 5213 /* 5214 * If this is a KMM_DEBUG move, the slab_refcnt 5215 * may have changed because we allocated a 5216 * destination buffer on the same slab. In that 5217 * case, we're not interested in counting it. 5218 */ 5219 KMEM_STAT_COND_ADD(!(flags & KMM_DEBUG) && 5220 (s < max_slabs), 5221 kmem_move_stats.kms_endscan_refcnt_changed); 5222 return (-1); 5223 } 5224 if ((sp->slab_flags & KMEM_SLAB_NOMOVE) != nomove) { 5225 KMEM_STAT_COND_ADD(s < max_slabs, 5226 kmem_move_stats.kms_endscan_nomove_changed); 5227 return (-1); 5228 } 5229 5230 /* 5231 * Generating a move request allocates a destination 5232 * buffer from the slab layer, bumping the first partial 5233 * slab if it is completely allocated. If the current 5234 * slab becomes the first partial slab as a result, we 5235 * can't continue to scan backwards. 5236 * 5237 * If this is a KMM_DEBUG move and we allocated the 5238 * destination buffer from the last partial slab, then 5239 * the buffer we're moving is on the same slab and our 5240 * slab_refcnt has changed, causing us to return before 5241 * reaching here if there are no partial slabs left. 5242 */ 5243 ASSERT(!avl_is_empty(&cp->cache_partial_slabs)); 5244 if (sp == avl_first(&cp->cache_partial_slabs)) { 5245 /* 5246 * We're not interested in a second KMM_DEBUG 5247 * move. 5248 */ 5249 goto end_scan; 5250 } 5251 } 5252 } 5253 end_scan: 5254 5255 KMEM_STAT_COND_ADD(!(flags & KMM_DEBUG) && 5256 (s < max_slabs) && 5257 (sp == avl_first(&cp->cache_partial_slabs)), 5258 kmem_move_stats.kms_endscan_freelist); 5259 5260 return (s); 5261 } 5262 5263 typedef struct kmem_move_notify_args { 5264 kmem_cache_t *kmna_cache; 5265 void *kmna_buf; 5266 } kmem_move_notify_args_t; 5267 5268 static void 5269 kmem_cache_move_notify_task(void *arg) 5270 { 5271 kmem_move_notify_args_t *args = arg; 5272 kmem_cache_t *cp = args->kmna_cache; 5273 void *buf = args->kmna_buf; 5274 kmem_slab_t *sp; 5275 5276 ASSERT(taskq_member(kmem_taskq, curthread)); 5277 ASSERT(list_link_active(&cp->cache_link)); 5278 5279 kmem_free(args, sizeof (kmem_move_notify_args_t)); 5280 mutex_enter(&cp->cache_lock); 5281 sp = kmem_slab_allocated(cp, NULL, buf); 5282 5283 /* Ignore the notification if the buffer is no longer allocated. */ 5284 if (sp == NULL) { 5285 mutex_exit(&cp->cache_lock); 5286 return; 5287 } 5288 5289 /* Ignore the notification if there's no reason to move the buffer. */ 5290 if (avl_numnodes(&cp->cache_partial_slabs) > 1) { 5291 /* 5292 * So far the notification is not ignored. Ignore the 5293 * notification if the slab is not marked by an earlier refusal 5294 * to move a buffer. 5295 */ 5296 if (!(sp->slab_flags & KMEM_SLAB_NOMOVE) && 5297 (sp->slab_later_count == 0)) { 5298 mutex_exit(&cp->cache_lock); 5299 return; 5300 } 5301 5302 kmem_slab_move_yes(cp, sp, buf); 5303 ASSERT(!(sp->slab_flags & KMEM_SLAB_MOVE_PENDING)); 5304 sp->slab_flags |= KMEM_SLAB_MOVE_PENDING; 5305 mutex_exit(&cp->cache_lock); 5306 /* see kmem_move_buffers() about dropping the lock */ 5307 (void) kmem_move_begin(cp, sp, buf, KMM_NOTIFY); 5308 mutex_enter(&cp->cache_lock); 5309 ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING); 5310 sp->slab_flags &= ~KMEM_SLAB_MOVE_PENDING; 5311 if (sp->slab_refcnt == 0) { 5312 list_t *deadlist = &cp->cache_defrag->kmd_deadlist; 5313 list_remove(deadlist, sp); 5314 5315 if (!avl_is_empty( 5316 &cp->cache_defrag->kmd_moves_pending)) { 5317 list_insert_head(deadlist, sp); 5318 mutex_exit(&cp->cache_lock); 5319 KMEM_STAT_ADD(kmem_move_stats. 5320 kms_notify_slab_dead); 5321 return; 5322 } 5323 5324 cp->cache_defrag->kmd_deadcount--; 5325 cp->cache_slab_destroy++; 5326 mutex_exit(&cp->cache_lock); 5327 kmem_slab_destroy(cp, sp); 5328 KMEM_STAT_ADD(kmem_move_stats.kms_dead_slabs_freed); 5329 KMEM_STAT_ADD(kmem_move_stats. 5330 kms_notify_slab_destroyed); 5331 return; 5332 } 5333 } else { 5334 kmem_slab_move_yes(cp, sp, buf); 5335 } 5336 mutex_exit(&cp->cache_lock); 5337 } 5338 5339 void 5340 kmem_cache_move_notify(kmem_cache_t *cp, void *buf) 5341 { 5342 kmem_move_notify_args_t *args; 5343 5344 KMEM_STAT_ADD(kmem_move_stats.kms_notify); 5345 args = kmem_alloc(sizeof (kmem_move_notify_args_t), KM_NOSLEEP); 5346 if (args != NULL) { 5347 args->kmna_cache = cp; 5348 args->kmna_buf = buf; 5349 if (!taskq_dispatch(kmem_taskq, 5350 (task_func_t *)kmem_cache_move_notify_task, args, 5351 TQ_NOSLEEP)) 5352 kmem_free(args, sizeof (kmem_move_notify_args_t)); 5353 } 5354 } 5355 5356 static void 5357 kmem_cache_defrag(kmem_cache_t *cp) 5358 { 5359 size_t n; 5360 5361 ASSERT(cp->cache_defrag != NULL); 5362 5363 mutex_enter(&cp->cache_lock); 5364 n = avl_numnodes(&cp->cache_partial_slabs); 5365 if (n > 1) { 5366 /* kmem_move_buffers() drops and reacquires cache_lock */ 5367 KMEM_STAT_ADD(kmem_move_stats.kms_defrags); 5368 cp->cache_defrag->kmd_defrags++; 5369 (void) kmem_move_buffers(cp, n, 0, KMM_DESPERATE); 5370 } 5371 mutex_exit(&cp->cache_lock); 5372 } 5373 5374 /* Is this cache above the fragmentation threshold? */ 5375 static boolean_t 5376 kmem_cache_frag_threshold(kmem_cache_t *cp, uint64_t nfree) 5377 { 5378 /* 5379 * nfree kmem_frag_numer 5380 * ------------------ > --------------- 5381 * cp->cache_buftotal kmem_frag_denom 5382 */ 5383 return ((nfree * kmem_frag_denom) > 5384 (cp->cache_buftotal * kmem_frag_numer)); 5385 } 5386 5387 static boolean_t 5388 kmem_cache_is_fragmented(kmem_cache_t *cp, boolean_t *doreap) 5389 { 5390 boolean_t fragmented; 5391 uint64_t nfree; 5392 5393 ASSERT(MUTEX_HELD(&cp->cache_lock)); 5394 *doreap = B_FALSE; 5395 5396 if (kmem_move_fulltilt) { 5397 if (avl_numnodes(&cp->cache_partial_slabs) > 1) { 5398 return (B_TRUE); 5399 } 5400 } else { 5401 if ((cp->cache_complete_slab_count + avl_numnodes( 5402 &cp->cache_partial_slabs)) < kmem_frag_minslabs) { 5403 return (B_FALSE); 5404 } 5405 } 5406 5407 nfree = cp->cache_bufslab; 5408 fragmented = ((avl_numnodes(&cp->cache_partial_slabs) > 1) && 5409 kmem_cache_frag_threshold(cp, nfree)); 5410 5411 /* 5412 * Free buffers in the magazine layer appear allocated from the point of 5413 * view of the slab layer. We want to know if the slab layer would 5414 * appear fragmented if we included free buffers from magazines that 5415 * have fallen out of the working set. 5416 */ 5417 if (!fragmented) { 5418 long reap; 5419 5420 mutex_enter(&cp->cache_depot_lock); 5421 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min); 5422 reap = MIN(reap, cp->cache_full.ml_total); 5423 mutex_exit(&cp->cache_depot_lock); 5424 5425 nfree += ((uint64_t)reap * cp->cache_magtype->mt_magsize); 5426 if (kmem_cache_frag_threshold(cp, nfree)) { 5427 *doreap = B_TRUE; 5428 } 5429 } 5430 5431 return (fragmented); 5432 } 5433 5434 /* Called periodically from kmem_taskq */ 5435 static void 5436 kmem_cache_scan(kmem_cache_t *cp) 5437 { 5438 boolean_t reap = B_FALSE; 5439 kmem_defrag_t *kmd; 5440 5441 ASSERT(taskq_member(kmem_taskq, curthread)); 5442 5443 mutex_enter(&cp->cache_lock); 5444 5445 kmd = cp->cache_defrag; 5446 if (kmd->kmd_consolidate > 0) { 5447 kmd->kmd_consolidate--; 5448 mutex_exit(&cp->cache_lock); 5449 kmem_cache_reap(cp); 5450 return; 5451 } 5452 5453 if (kmem_cache_is_fragmented(cp, &reap)) { 5454 size_t slabs_found; 5455 5456 /* 5457 * Consolidate reclaimable slabs from the end of the partial 5458 * slab list (scan at most kmem_reclaim_scan_range slabs to find 5459 * reclaimable slabs). Keep track of how many candidate slabs we 5460 * looked for and how many we actually found so we can adjust 5461 * the definition of a candidate slab if we're having trouble 5462 * finding them. 5463 * 5464 * kmem_move_buffers() drops and reacquires cache_lock. 5465 */ 5466 KMEM_STAT_ADD(kmem_move_stats.kms_scans); 5467 kmd->kmd_scans++; 5468 slabs_found = kmem_move_buffers(cp, kmem_reclaim_scan_range, 5469 kmem_reclaim_max_slabs, 0); 5470 if (slabs_found >= 0) { 5471 kmd->kmd_slabs_sought += kmem_reclaim_max_slabs; 5472 kmd->kmd_slabs_found += slabs_found; 5473 } 5474 5475 if (++kmd->kmd_tries >= kmem_reclaim_scan_range) { 5476 kmd->kmd_tries = 0; 5477 5478 /* 5479 * If we had difficulty finding candidate slabs in 5480 * previous scans, adjust the threshold so that 5481 * candidates are easier to find. 5482 */ 5483 if (kmd->kmd_slabs_found == kmd->kmd_slabs_sought) { 5484 kmem_adjust_reclaim_threshold(kmd, -1); 5485 } else if ((kmd->kmd_slabs_found * 2) < 5486 kmd->kmd_slabs_sought) { 5487 kmem_adjust_reclaim_threshold(kmd, 1); 5488 } 5489 kmd->kmd_slabs_sought = 0; 5490 kmd->kmd_slabs_found = 0; 5491 } 5492 } else { 5493 kmem_reset_reclaim_threshold(cp->cache_defrag); 5494 #ifdef DEBUG 5495 if (!avl_is_empty(&cp->cache_partial_slabs)) { 5496 /* 5497 * In a debug kernel we want the consolidator to 5498 * run occasionally even when there is plenty of 5499 * memory. 5500 */ 5501 uint16_t debug_rand; 5502 5503 (void) random_get_bytes((uint8_t *)&debug_rand, 2); 5504 if (!kmem_move_noreap && 5505 ((debug_rand % kmem_mtb_reap) == 0)) { 5506 mutex_exit(&cp->cache_lock); 5507 KMEM_STAT_ADD(kmem_move_stats.kms_debug_reaps); 5508 kmem_cache_reap(cp); 5509 return; 5510 } else if ((debug_rand % kmem_mtb_move) == 0) { 5511 KMEM_STAT_ADD(kmem_move_stats.kms_scans); 5512 KMEM_STAT_ADD(kmem_move_stats.kms_debug_scans); 5513 kmd->kmd_scans++; 5514 (void) kmem_move_buffers(cp, 5515 kmem_reclaim_scan_range, 1, KMM_DEBUG); 5516 } 5517 } 5518 #endif /* DEBUG */ 5519 } 5520 5521 mutex_exit(&cp->cache_lock); 5522 5523 if (reap) { 5524 KMEM_STAT_ADD(kmem_move_stats.kms_scan_depot_ws_reaps); 5525 kmem_depot_ws_reap(cp); 5526 } 5527 } 5528