1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * Big Theory Statement for the virtual memory allocator. 30 * 31 * For a more complete description of the main ideas, see: 32 * 33 * Jeff Bonwick and Jonathan Adams, 34 * 35 * Magazines and vmem: Extending the Slab Allocator to Many CPUs and 36 * Arbitrary Resources. 37 * 38 * Proceedings of the 2001 Usenix Conference. 39 * Available as http://www.usenix.org/event/usenix01/bonwick.html 40 * 41 * 42 * 1. General Concepts 43 * ------------------- 44 * 45 * 1.1 Overview 46 * ------------ 47 * We divide the kernel address space into a number of logically distinct 48 * pieces, or *arenas*: text, data, heap, stack, and so on. Within these 49 * arenas we often subdivide further; for example, we use heap addresses 50 * not only for the kernel heap (kmem_alloc() space), but also for DVMA, 51 * bp_mapin(), /dev/kmem, and even some device mappings like the TOD chip. 52 * The kernel address space, therefore, is most accurately described as 53 * a tree of arenas in which each node of the tree *imports* some subset 54 * of its parent. The virtual memory allocator manages these arenas and 55 * supports their natural hierarchical structure. 56 * 57 * 1.2 Arenas 58 * ---------- 59 * An arena is nothing more than a set of integers. These integers most 60 * commonly represent virtual addresses, but in fact they can represent 61 * anything at all. For example, we could use an arena containing the 62 * integers minpid through maxpid to allocate process IDs. vmem_create() 63 * and vmem_destroy() create and destroy vmem arenas. In order to 64 * differentiate between arenas used for adresses and arenas used for 65 * identifiers, the VMC_IDENTIFIER flag is passed to vmem_create(). This 66 * prevents identifier exhaustion from being diagnosed as general memory 67 * failure. 68 * 69 * 1.3 Spans 70 * --------- 71 * We represent the integers in an arena as a collection of *spans*, or 72 * contiguous ranges of integers. For example, the kernel heap consists 73 * of just one span: [kernelheap, ekernelheap). Spans can be added to an 74 * arena in two ways: explicitly, by vmem_add(), or implicitly, by 75 * importing, as described in Section 1.5 below. 76 * 77 * 1.4 Segments 78 * ------------ 79 * Spans are subdivided into *segments*, each of which is either allocated 80 * or free. A segment, like a span, is a contiguous range of integers. 81 * Each allocated segment [addr, addr + size) represents exactly one 82 * vmem_alloc(size) that returned addr. Free segments represent the space 83 * between allocated segments. If two free segments are adjacent, we 84 * coalesce them into one larger segment; that is, if segments [a, b) and 85 * [b, c) are both free, we merge them into a single segment [a, c). 86 * The segments within a span are linked together in increasing-address order 87 * so we can easily determine whether coalescing is possible. 88 * 89 * Segments never cross span boundaries. When all segments within 90 * an imported span become free, we return the span to its source. 91 * 92 * 1.5 Imported Memory 93 * ------------------- 94 * As mentioned in the overview, some arenas are logical subsets of 95 * other arenas. For example, kmem_va_arena (a virtual address cache 96 * that satisfies most kmem_slab_create() requests) is just a subset 97 * of heap_arena (the kernel heap) that provides caching for the most 98 * common slab sizes. When kmem_va_arena runs out of virtual memory, 99 * it *imports* more from the heap; we say that heap_arena is the 100 * *vmem source* for kmem_va_arena. vmem_create() allows you to 101 * specify any existing vmem arena as the source for your new arena. 102 * Topologically, since every arena is a child of at most one source, 103 * the set of all arenas forms a collection of trees. 104 * 105 * 1.6 Constrained Allocations 106 * --------------------------- 107 * Some vmem clients are quite picky about the kind of address they want. 108 * For example, the DVMA code may need an address that is at a particular 109 * phase with respect to some alignment (to get good cache coloring), or 110 * that lies within certain limits (the addressable range of a device), 111 * or that doesn't cross some boundary (a DMA counter restriction) -- 112 * or all of the above. vmem_xalloc() allows the client to specify any 113 * or all of these constraints. 114 * 115 * 1.7 The Vmem Quantum 116 * -------------------- 117 * Every arena has a notion of 'quantum', specified at vmem_create() time, 118 * that defines the arena's minimum unit of currency. Most commonly the 119 * quantum is either 1 or PAGESIZE, but any power of 2 is legal. 120 * All vmem allocations are guaranteed to be quantum-aligned. 121 * 122 * 1.8 Quantum Caching 123 * ------------------- 124 * A vmem arena may be so hot (frequently used) that the scalability of vmem 125 * allocation is a significant concern. We address this by allowing the most 126 * common allocation sizes to be serviced by the kernel memory allocator, 127 * which provides low-latency per-cpu caching. The qcache_max argument to 128 * vmem_create() specifies the largest allocation size to cache. 129 * 130 * 1.9 Relationship to Kernel Memory Allocator 131 * ------------------------------------------- 132 * Every kmem cache has a vmem arena as its slab supplier. The kernel memory 133 * allocator uses vmem_alloc() and vmem_free() to create and destroy slabs. 134 * 135 * 136 * 2. Implementation 137 * ----------------- 138 * 139 * 2.1 Segment lists and markers 140 * ----------------------------- 141 * The segment structure (vmem_seg_t) contains two doubly-linked lists. 142 * 143 * The arena list (vs_anext/vs_aprev) links all segments in the arena. 144 * In addition to the allocated and free segments, the arena contains 145 * special marker segments at span boundaries. Span markers simplify 146 * coalescing and importing logic by making it easy to tell both when 147 * we're at a span boundary (so we don't coalesce across it), and when 148 * a span is completely free (its neighbors will both be span markers). 149 * 150 * Imported spans will have vs_import set. 151 * 152 * The next-of-kin list (vs_knext/vs_kprev) links segments of the same type: 153 * (1) for allocated segments, vs_knext is the hash chain linkage; 154 * (2) for free segments, vs_knext is the freelist linkage; 155 * (3) for span marker segments, vs_knext is the next span marker. 156 * 157 * 2.2 Allocation hashing 158 * ---------------------- 159 * We maintain a hash table of all allocated segments, hashed by address. 160 * This allows vmem_free() to discover the target segment in constant time. 161 * vmem_update() periodically resizes hash tables to keep hash chains short. 162 * 163 * 2.3 Freelist management 164 * ----------------------- 165 * We maintain power-of-2 freelists for free segments, i.e. free segments 166 * of size >= 2^n reside in vmp->vm_freelist[n]. To ensure constant-time 167 * allocation, vmem_xalloc() looks not in the first freelist that *might* 168 * satisfy the allocation, but in the first freelist that *definitely* 169 * satisfies the allocation (unless VM_BESTFIT is specified, or all larger 170 * freelists are empty). For example, a 1000-byte allocation will be 171 * satisfied not from the 512..1023-byte freelist, whose members *might* 172 * contains a 1000-byte segment, but from a 1024-byte or larger freelist, 173 * the first member of which will *definitely* satisfy the allocation. 174 * This ensures that vmem_xalloc() works in constant time. 175 * 176 * We maintain a bit map to determine quickly which freelists are non-empty. 177 * vmp->vm_freemap & (1 << n) is non-zero iff vmp->vm_freelist[n] is non-empty. 178 * 179 * The different freelists are linked together into one large freelist, 180 * with the freelist heads serving as markers. Freelist markers simplify 181 * the maintenance of vm_freemap by making it easy to tell when we're taking 182 * the last member of a freelist (both of its neighbors will be markers). 183 * 184 * 2.4 Vmem Locking 185 * ---------------- 186 * For simplicity, all arena state is protected by a per-arena lock. 187 * For very hot arenas, use quantum caching for scalability. 188 * 189 * 2.5 Vmem Population 190 * ------------------- 191 * Any internal vmem routine that might need to allocate new segment 192 * structures must prepare in advance by calling vmem_populate(), which 193 * will preallocate enough vmem_seg_t's to get is through the entire 194 * operation without dropping the arena lock. 195 * 196 * 2.6 Auditing 197 * ------------ 198 * If KMF_AUDIT is set in kmem_flags, we audit vmem allocations as well. 199 * Since virtual addresses cannot be scribbled on, there is no equivalent 200 * in vmem to redzone checking, deadbeef, or other kmem debugging features. 201 * Moreover, we do not audit frees because segment coalescing destroys the 202 * association between an address and its segment structure. Auditing is 203 * thus intended primarily to keep track of who's consuming the arena. 204 * Debugging support could certainly be extended in the future if it proves 205 * necessary, but we do so much live checking via the allocation hash table 206 * that even non-DEBUG systems get quite a bit of sanity checking already. 207 */ 208 209 #include <sys/vmem_impl.h> 210 #include <sys/kmem.h> 211 #include <sys/kstat.h> 212 #include <sys/param.h> 213 #include <sys/systm.h> 214 #include <sys/atomic.h> 215 #include <sys/bitmap.h> 216 #include <sys/sysmacros.h> 217 #include <sys/cmn_err.h> 218 #include <sys/debug.h> 219 #include <sys/panic.h> 220 221 #define VMEM_INITIAL 10 /* early vmem arenas */ 222 #define VMEM_SEG_INITIAL 200 /* early segments */ 223 224 /* 225 * Adding a new span to an arena requires two segment structures: one to 226 * represent the span, and one to represent the free segment it contains. 227 */ 228 #define VMEM_SEGS_PER_SPAN_CREATE 2 229 230 /* 231 * Allocating a piece of an existing segment requires 0-2 segment structures 232 * depending on how much of the segment we're allocating. 233 * 234 * To allocate the entire segment, no new segment structures are needed; we 235 * simply move the existing segment structure from the freelist to the 236 * allocation hash table. 237 * 238 * To allocate a piece from the left or right end of the segment, we must 239 * split the segment into two pieces (allocated part and remainder), so we 240 * need one new segment structure to represent the remainder. 241 * 242 * To allocate from the middle of a segment, we need two new segment strucures 243 * to represent the remainders on either side of the allocated part. 244 */ 245 #define VMEM_SEGS_PER_EXACT_ALLOC 0 246 #define VMEM_SEGS_PER_LEFT_ALLOC 1 247 #define VMEM_SEGS_PER_RIGHT_ALLOC 1 248 #define VMEM_SEGS_PER_MIDDLE_ALLOC 2 249 250 /* 251 * vmem_populate() preallocates segment structures for vmem to do its work. 252 * It must preallocate enough for the worst case, which is when we must import 253 * a new span and then allocate from the middle of it. 254 */ 255 #define VMEM_SEGS_PER_ALLOC_MAX \ 256 (VMEM_SEGS_PER_SPAN_CREATE + VMEM_SEGS_PER_MIDDLE_ALLOC) 257 258 /* 259 * The segment structures themselves are allocated from vmem_seg_arena, so 260 * we have a recursion problem when vmem_seg_arena needs to populate itself. 261 * We address this by working out the maximum number of segment structures 262 * this act will require, and multiplying by the maximum number of threads 263 * that we'll allow to do it simultaneously. 264 * 265 * The worst-case segment consumption to populate vmem_seg_arena is as 266 * follows (depicted as a stack trace to indicate why events are occurring): 267 * 268 * (In order to lower the fragmentation in the heap_arena, we specify a 269 * minimum import size for the vmem_metadata_arena which is the same size 270 * as the kmem_va quantum cache allocations. This causes the worst-case 271 * allocation from the vmem_metadata_arena to be 3 segments.) 272 * 273 * vmem_alloc(vmem_seg_arena) -> 2 segs (span create + exact alloc) 274 * segkmem_alloc(vmem_metadata_arena) 275 * vmem_alloc(vmem_metadata_arena) -> 3 segs (span create + left alloc) 276 * vmem_alloc(heap_arena) -> 1 seg (left alloc) 277 * page_create() 278 * hat_memload() 279 * kmem_cache_alloc() 280 * kmem_slab_create() 281 * vmem_alloc(hat_memload_arena) -> 2 segs (span create + exact alloc) 282 * segkmem_alloc(heap_arena) 283 * vmem_alloc(heap_arena) -> 1 seg (left alloc) 284 * page_create() 285 * hat_memload() -> (hat layer won't recurse further) 286 * 287 * The worst-case consumption for each arena is 3 segment structures. 288 * Of course, a 3-seg reserve could easily be blown by multiple threads. 289 * Therefore, we serialize all allocations from vmem_seg_arena (which is OK 290 * because they're rare). We cannot allow a non-blocking allocation to get 291 * tied up behind a blocking allocation, however, so we use separate locks 292 * for VM_SLEEP and VM_NOSLEEP allocations. In addition, if the system is 293 * panicking then we must keep enough resources for panic_thread to do its 294 * work. Thus we have at most three threads trying to allocate from 295 * vmem_seg_arena, and each thread consumes at most three segment structures, 296 * so we must maintain a 9-seg reserve. 297 */ 298 #define VMEM_POPULATE_RESERVE 9 299 300 /* 301 * vmem_populate() ensures that each arena has VMEM_MINFREE seg structures 302 * so that it can satisfy the worst-case allocation *and* participate in 303 * worst-case allocation from vmem_seg_arena. 304 */ 305 #define VMEM_MINFREE (VMEM_POPULATE_RESERVE + VMEM_SEGS_PER_ALLOC_MAX) 306 307 static vmem_t vmem0[VMEM_INITIAL]; 308 static vmem_t *vmem_populator[VMEM_INITIAL]; 309 static uint32_t vmem_id; 310 static uint32_t vmem_populators; 311 static vmem_seg_t vmem_seg0[VMEM_SEG_INITIAL]; 312 static vmem_seg_t *vmem_segfree; 313 static kmutex_t vmem_list_lock; 314 static kmutex_t vmem_segfree_lock; 315 static kmutex_t vmem_sleep_lock; 316 static kmutex_t vmem_nosleep_lock; 317 static kmutex_t vmem_panic_lock; 318 static vmem_t *vmem_list; 319 static vmem_t *vmem_metadata_arena; 320 static vmem_t *vmem_seg_arena; 321 static vmem_t *vmem_hash_arena; 322 static vmem_t *vmem_vmem_arena; 323 static long vmem_update_interval = 15; /* vmem_update() every 15 seconds */ 324 uint32_t vmem_mtbf; /* mean time between failures [default: off] */ 325 size_t vmem_seg_size = sizeof (vmem_seg_t); 326 327 static vmem_kstat_t vmem_kstat_template = { 328 { "mem_inuse", KSTAT_DATA_UINT64 }, 329 { "mem_import", KSTAT_DATA_UINT64 }, 330 { "mem_total", KSTAT_DATA_UINT64 }, 331 { "vmem_source", KSTAT_DATA_UINT32 }, 332 { "alloc", KSTAT_DATA_UINT64 }, 333 { "free", KSTAT_DATA_UINT64 }, 334 { "wait", KSTAT_DATA_UINT64 }, 335 { "fail", KSTAT_DATA_UINT64 }, 336 { "lookup", KSTAT_DATA_UINT64 }, 337 { "search", KSTAT_DATA_UINT64 }, 338 { "populate_wait", KSTAT_DATA_UINT64 }, 339 { "populate_fail", KSTAT_DATA_UINT64 }, 340 { "contains", KSTAT_DATA_UINT64 }, 341 { "contains_search", KSTAT_DATA_UINT64 }, 342 }; 343 344 /* 345 * Insert/delete from arena list (type 'a') or next-of-kin list (type 'k'). 346 */ 347 #define VMEM_INSERT(vprev, vsp, type) \ 348 { \ 349 vmem_seg_t *vnext = (vprev)->vs_##type##next; \ 350 (vsp)->vs_##type##next = (vnext); \ 351 (vsp)->vs_##type##prev = (vprev); \ 352 (vprev)->vs_##type##next = (vsp); \ 353 (vnext)->vs_##type##prev = (vsp); \ 354 } 355 356 #define VMEM_DELETE(vsp, type) \ 357 { \ 358 vmem_seg_t *vprev = (vsp)->vs_##type##prev; \ 359 vmem_seg_t *vnext = (vsp)->vs_##type##next; \ 360 (vprev)->vs_##type##next = (vnext); \ 361 (vnext)->vs_##type##prev = (vprev); \ 362 } 363 364 /* 365 * Get a vmem_seg_t from the global segfree list. 366 */ 367 static vmem_seg_t * 368 vmem_getseg_global(void) 369 { 370 vmem_seg_t *vsp; 371 372 mutex_enter(&vmem_segfree_lock); 373 if ((vsp = vmem_segfree) != NULL) 374 vmem_segfree = vsp->vs_knext; 375 mutex_exit(&vmem_segfree_lock); 376 377 return (vsp); 378 } 379 380 /* 381 * Put a vmem_seg_t on the global segfree list. 382 */ 383 static void 384 vmem_putseg_global(vmem_seg_t *vsp) 385 { 386 mutex_enter(&vmem_segfree_lock); 387 vsp->vs_knext = vmem_segfree; 388 vmem_segfree = vsp; 389 mutex_exit(&vmem_segfree_lock); 390 } 391 392 /* 393 * Get a vmem_seg_t from vmp's segfree list. 394 */ 395 static vmem_seg_t * 396 vmem_getseg(vmem_t *vmp) 397 { 398 vmem_seg_t *vsp; 399 400 ASSERT(vmp->vm_nsegfree > 0); 401 402 vsp = vmp->vm_segfree; 403 vmp->vm_segfree = vsp->vs_knext; 404 vmp->vm_nsegfree--; 405 406 return (vsp); 407 } 408 409 /* 410 * Put a vmem_seg_t on vmp's segfree list. 411 */ 412 static void 413 vmem_putseg(vmem_t *vmp, vmem_seg_t *vsp) 414 { 415 vsp->vs_knext = vmp->vm_segfree; 416 vmp->vm_segfree = vsp; 417 vmp->vm_nsegfree++; 418 } 419 420 /* 421 * Add vsp to the appropriate freelist. 422 */ 423 static void 424 vmem_freelist_insert(vmem_t *vmp, vmem_seg_t *vsp) 425 { 426 vmem_seg_t *vprev; 427 428 ASSERT(*VMEM_HASH(vmp, vsp->vs_start) != vsp); 429 430 vprev = (vmem_seg_t *)&vmp->vm_freelist[highbit(VS_SIZE(vsp)) - 1]; 431 vsp->vs_type = VMEM_FREE; 432 vmp->vm_freemap |= VS_SIZE(vprev); 433 VMEM_INSERT(vprev, vsp, k); 434 435 cv_broadcast(&vmp->vm_cv); 436 } 437 438 /* 439 * Take vsp from the freelist. 440 */ 441 static void 442 vmem_freelist_delete(vmem_t *vmp, vmem_seg_t *vsp) 443 { 444 ASSERT(*VMEM_HASH(vmp, vsp->vs_start) != vsp); 445 ASSERT(vsp->vs_type == VMEM_FREE); 446 447 if (vsp->vs_knext->vs_start == 0 && vsp->vs_kprev->vs_start == 0) { 448 /* 449 * The segments on both sides of 'vsp' are freelist heads, 450 * so taking vsp leaves the freelist at vsp->vs_kprev empty. 451 */ 452 ASSERT(vmp->vm_freemap & VS_SIZE(vsp->vs_kprev)); 453 vmp->vm_freemap ^= VS_SIZE(vsp->vs_kprev); 454 } 455 VMEM_DELETE(vsp, k); 456 } 457 458 /* 459 * Add vsp to the allocated-segment hash table and update kstats. 460 */ 461 static void 462 vmem_hash_insert(vmem_t *vmp, vmem_seg_t *vsp) 463 { 464 vmem_seg_t **bucket; 465 466 vsp->vs_type = VMEM_ALLOC; 467 bucket = VMEM_HASH(vmp, vsp->vs_start); 468 vsp->vs_knext = *bucket; 469 *bucket = vsp; 470 471 if (vmem_seg_size == sizeof (vmem_seg_t)) { 472 vsp->vs_depth = (uint8_t)getpcstack(vsp->vs_stack, 473 VMEM_STACK_DEPTH); 474 vsp->vs_thread = curthread; 475 vsp->vs_timestamp = gethrtime(); 476 } else { 477 vsp->vs_depth = 0; 478 } 479 480 vmp->vm_kstat.vk_alloc.value.ui64++; 481 vmp->vm_kstat.vk_mem_inuse.value.ui64 += VS_SIZE(vsp); 482 } 483 484 /* 485 * Remove vsp from the allocated-segment hash table and update kstats. 486 */ 487 static vmem_seg_t * 488 vmem_hash_delete(vmem_t *vmp, uintptr_t addr, size_t size) 489 { 490 vmem_seg_t *vsp, **prev_vspp; 491 492 prev_vspp = VMEM_HASH(vmp, addr); 493 while ((vsp = *prev_vspp) != NULL) { 494 if (vsp->vs_start == addr) { 495 *prev_vspp = vsp->vs_knext; 496 break; 497 } 498 vmp->vm_kstat.vk_lookup.value.ui64++; 499 prev_vspp = &vsp->vs_knext; 500 } 501 502 if (vsp == NULL) 503 panic("vmem_hash_delete(%p, %lx, %lu): bad free", 504 vmp, addr, size); 505 if (VS_SIZE(vsp) != size) 506 panic("vmem_hash_delete(%p, %lx, %lu): wrong size (expect %lu)", 507 vmp, addr, size, VS_SIZE(vsp)); 508 509 vmp->vm_kstat.vk_free.value.ui64++; 510 vmp->vm_kstat.vk_mem_inuse.value.ui64 -= size; 511 512 return (vsp); 513 } 514 515 /* 516 * Create a segment spanning the range [start, end) and add it to the arena. 517 */ 518 static vmem_seg_t * 519 vmem_seg_create(vmem_t *vmp, vmem_seg_t *vprev, uintptr_t start, uintptr_t end) 520 { 521 vmem_seg_t *newseg = vmem_getseg(vmp); 522 523 newseg->vs_start = start; 524 newseg->vs_end = end; 525 newseg->vs_type = 0; 526 newseg->vs_import = 0; 527 528 VMEM_INSERT(vprev, newseg, a); 529 530 return (newseg); 531 } 532 533 /* 534 * Remove segment vsp from the arena. 535 */ 536 static void 537 vmem_seg_destroy(vmem_t *vmp, vmem_seg_t *vsp) 538 { 539 ASSERT(vsp->vs_type != VMEM_ROTOR); 540 VMEM_DELETE(vsp, a); 541 542 vmem_putseg(vmp, vsp); 543 } 544 545 /* 546 * Add the span [vaddr, vaddr + size) to vmp and update kstats. 547 */ 548 static vmem_seg_t * 549 vmem_span_create(vmem_t *vmp, void *vaddr, size_t size, uint8_t import) 550 { 551 vmem_seg_t *newseg, *span; 552 uintptr_t start = (uintptr_t)vaddr; 553 uintptr_t end = start + size; 554 555 ASSERT(MUTEX_HELD(&vmp->vm_lock)); 556 557 if ((start | end) & (vmp->vm_quantum - 1)) 558 panic("vmem_span_create(%p, %p, %lu): misaligned", 559 vmp, vaddr, size); 560 561 span = vmem_seg_create(vmp, vmp->vm_seg0.vs_aprev, start, end); 562 span->vs_type = VMEM_SPAN; 563 span->vs_import = import; 564 VMEM_INSERT(vmp->vm_seg0.vs_kprev, span, k); 565 566 newseg = vmem_seg_create(vmp, span, start, end); 567 vmem_freelist_insert(vmp, newseg); 568 569 if (import) 570 vmp->vm_kstat.vk_mem_import.value.ui64 += size; 571 vmp->vm_kstat.vk_mem_total.value.ui64 += size; 572 573 return (newseg); 574 } 575 576 /* 577 * Remove span vsp from vmp and update kstats. 578 */ 579 static void 580 vmem_span_destroy(vmem_t *vmp, vmem_seg_t *vsp) 581 { 582 vmem_seg_t *span = vsp->vs_aprev; 583 size_t size = VS_SIZE(vsp); 584 585 ASSERT(MUTEX_HELD(&vmp->vm_lock)); 586 ASSERT(span->vs_type == VMEM_SPAN); 587 588 if (span->vs_import) 589 vmp->vm_kstat.vk_mem_import.value.ui64 -= size; 590 vmp->vm_kstat.vk_mem_total.value.ui64 -= size; 591 592 VMEM_DELETE(span, k); 593 594 vmem_seg_destroy(vmp, vsp); 595 vmem_seg_destroy(vmp, span); 596 } 597 598 /* 599 * Allocate the subrange [addr, addr + size) from segment vsp. 600 * If there are leftovers on either side, place them on the freelist. 601 * Returns a pointer to the segment representing [addr, addr + size). 602 */ 603 static vmem_seg_t * 604 vmem_seg_alloc(vmem_t *vmp, vmem_seg_t *vsp, uintptr_t addr, size_t size) 605 { 606 uintptr_t vs_start = vsp->vs_start; 607 uintptr_t vs_end = vsp->vs_end; 608 size_t vs_size = vs_end - vs_start; 609 size_t realsize = P2ROUNDUP(size, vmp->vm_quantum); 610 uintptr_t addr_end = addr + realsize; 611 612 ASSERT(P2PHASE(vs_start, vmp->vm_quantum) == 0); 613 ASSERT(P2PHASE(addr, vmp->vm_quantum) == 0); 614 ASSERT(vsp->vs_type == VMEM_FREE); 615 ASSERT(addr >= vs_start && addr_end - 1 <= vs_end - 1); 616 ASSERT(addr - 1 <= addr_end - 1); 617 618 /* 619 * If we're allocating from the start of the segment, and the 620 * remainder will be on the same freelist, we can save quite 621 * a bit of work. 622 */ 623 if (P2SAMEHIGHBIT(vs_size, vs_size - realsize) && addr == vs_start) { 624 ASSERT(highbit(vs_size) == highbit(vs_size - realsize)); 625 vsp->vs_start = addr_end; 626 vsp = vmem_seg_create(vmp, vsp->vs_aprev, addr, addr + size); 627 vmem_hash_insert(vmp, vsp); 628 return (vsp); 629 } 630 631 vmem_freelist_delete(vmp, vsp); 632 633 if (vs_end != addr_end) 634 vmem_freelist_insert(vmp, 635 vmem_seg_create(vmp, vsp, addr_end, vs_end)); 636 637 if (vs_start != addr) 638 vmem_freelist_insert(vmp, 639 vmem_seg_create(vmp, vsp->vs_aprev, vs_start, addr)); 640 641 vsp->vs_start = addr; 642 vsp->vs_end = addr + size; 643 644 vmem_hash_insert(vmp, vsp); 645 return (vsp); 646 } 647 648 /* 649 * Returns 1 if we are populating, 0 otherwise. 650 * Call it if we want to prevent recursion from HAT. 651 */ 652 int 653 vmem_is_populator() 654 { 655 return (mutex_owner(&vmem_sleep_lock) == curthread || 656 mutex_owner(&vmem_nosleep_lock) == curthread || 657 mutex_owner(&vmem_panic_lock) == curthread); 658 } 659 660 /* 661 * Populate vmp's segfree list with VMEM_MINFREE vmem_seg_t structures. 662 */ 663 static int 664 vmem_populate(vmem_t *vmp, int vmflag) 665 { 666 char *p; 667 vmem_seg_t *vsp; 668 ssize_t nseg; 669 size_t size; 670 kmutex_t *lp; 671 int i; 672 673 while (vmp->vm_nsegfree < VMEM_MINFREE && 674 (vsp = vmem_getseg_global()) != NULL) 675 vmem_putseg(vmp, vsp); 676 677 if (vmp->vm_nsegfree >= VMEM_MINFREE) 678 return (1); 679 680 /* 681 * If we're already populating, tap the reserve. 682 */ 683 if (vmem_is_populator()) { 684 ASSERT(vmp->vm_cflags & VMC_POPULATOR); 685 return (1); 686 } 687 688 mutex_exit(&vmp->vm_lock); 689 690 if (panic_thread == curthread) 691 lp = &vmem_panic_lock; 692 else if (vmflag & VM_NOSLEEP) 693 lp = &vmem_nosleep_lock; 694 else 695 lp = &vmem_sleep_lock; 696 697 mutex_enter(lp); 698 699 nseg = VMEM_MINFREE + vmem_populators * VMEM_POPULATE_RESERVE; 700 size = P2ROUNDUP(nseg * vmem_seg_size, vmem_seg_arena->vm_quantum); 701 nseg = size / vmem_seg_size; 702 703 /* 704 * The following vmem_alloc() may need to populate vmem_seg_arena 705 * and all the things it imports from. When doing so, it will tap 706 * each arena's reserve to prevent recursion (see the block comment 707 * above the definition of VMEM_POPULATE_RESERVE). 708 */ 709 p = vmem_alloc(vmem_seg_arena, size, vmflag & VM_KMFLAGS); 710 if (p == NULL) { 711 mutex_exit(lp); 712 mutex_enter(&vmp->vm_lock); 713 vmp->vm_kstat.vk_populate_fail.value.ui64++; 714 return (0); 715 } 716 717 /* 718 * Restock the arenas that may have been depleted during population. 719 */ 720 for (i = 0; i < vmem_populators; i++) { 721 mutex_enter(&vmem_populator[i]->vm_lock); 722 while (vmem_populator[i]->vm_nsegfree < VMEM_POPULATE_RESERVE) 723 vmem_putseg(vmem_populator[i], 724 (vmem_seg_t *)(p + --nseg * vmem_seg_size)); 725 mutex_exit(&vmem_populator[i]->vm_lock); 726 } 727 728 mutex_exit(lp); 729 mutex_enter(&vmp->vm_lock); 730 731 /* 732 * Now take our own segments. 733 */ 734 ASSERT(nseg >= VMEM_MINFREE); 735 while (vmp->vm_nsegfree < VMEM_MINFREE) 736 vmem_putseg(vmp, (vmem_seg_t *)(p + --nseg * vmem_seg_size)); 737 738 /* 739 * Give the remainder to charity. 740 */ 741 while (nseg > 0) 742 vmem_putseg_global((vmem_seg_t *)(p + --nseg * vmem_seg_size)); 743 744 return (1); 745 } 746 747 /* 748 * Advance a walker from its previous position to 'afterme'. 749 * Note: may drop and reacquire vmp->vm_lock. 750 */ 751 static void 752 vmem_advance(vmem_t *vmp, vmem_seg_t *walker, vmem_seg_t *afterme) 753 { 754 vmem_seg_t *vprev = walker->vs_aprev; 755 vmem_seg_t *vnext = walker->vs_anext; 756 vmem_seg_t *vsp = NULL; 757 758 VMEM_DELETE(walker, a); 759 760 if (afterme != NULL) 761 VMEM_INSERT(afterme, walker, a); 762 763 /* 764 * The walker segment's presence may have prevented its neighbors 765 * from coalescing. If so, coalesce them now. 766 */ 767 if (vprev->vs_type == VMEM_FREE) { 768 if (vnext->vs_type == VMEM_FREE) { 769 ASSERT(vprev->vs_end == vnext->vs_start); 770 vmem_freelist_delete(vmp, vnext); 771 vmem_freelist_delete(vmp, vprev); 772 vprev->vs_end = vnext->vs_end; 773 vmem_freelist_insert(vmp, vprev); 774 vmem_seg_destroy(vmp, vnext); 775 } 776 vsp = vprev; 777 } else if (vnext->vs_type == VMEM_FREE) { 778 vsp = vnext; 779 } 780 781 /* 782 * vsp could represent a complete imported span, 783 * in which case we must return it to the source. 784 */ 785 if (vsp != NULL && vsp->vs_aprev->vs_import && 786 vmp->vm_source_free != NULL && 787 vsp->vs_aprev->vs_type == VMEM_SPAN && 788 vsp->vs_anext->vs_type == VMEM_SPAN) { 789 void *vaddr = (void *)vsp->vs_start; 790 size_t size = VS_SIZE(vsp); 791 ASSERT(size == VS_SIZE(vsp->vs_aprev)); 792 vmem_freelist_delete(vmp, vsp); 793 vmem_span_destroy(vmp, vsp); 794 mutex_exit(&vmp->vm_lock); 795 vmp->vm_source_free(vmp->vm_source, vaddr, size); 796 mutex_enter(&vmp->vm_lock); 797 } 798 } 799 800 /* 801 * VM_NEXTFIT allocations deliberately cycle through all virtual addresses 802 * in an arena, so that we avoid reusing addresses for as long as possible. 803 * This helps to catch used-after-freed bugs. It's also the perfect policy 804 * for allocating things like process IDs, where we want to cycle through 805 * all values in order. 806 */ 807 static void * 808 vmem_nextfit_alloc(vmem_t *vmp, size_t size, int vmflag) 809 { 810 vmem_seg_t *vsp, *rotor; 811 uintptr_t addr; 812 size_t realsize = P2ROUNDUP(size, vmp->vm_quantum); 813 size_t vs_size; 814 815 mutex_enter(&vmp->vm_lock); 816 817 if (vmp->vm_nsegfree < VMEM_MINFREE && !vmem_populate(vmp, vmflag)) { 818 mutex_exit(&vmp->vm_lock); 819 return (NULL); 820 } 821 822 /* 823 * The common case is that the segment right after the rotor is free, 824 * and large enough that extracting 'size' bytes won't change which 825 * freelist it's on. In this case we can avoid a *lot* of work. 826 * Instead of the normal vmem_seg_alloc(), we just advance the start 827 * address of the victim segment. Instead of moving the rotor, we 828 * create the new segment structure *behind the rotor*, which has 829 * the same effect. And finally, we know we don't have to coalesce 830 * the rotor's neighbors because the new segment lies between them. 831 */ 832 rotor = &vmp->vm_rotor; 833 vsp = rotor->vs_anext; 834 if (vsp->vs_type == VMEM_FREE && (vs_size = VS_SIZE(vsp)) > realsize && 835 P2SAMEHIGHBIT(vs_size, vs_size - realsize)) { 836 ASSERT(highbit(vs_size) == highbit(vs_size - realsize)); 837 addr = vsp->vs_start; 838 vsp->vs_start = addr + realsize; 839 vmem_hash_insert(vmp, 840 vmem_seg_create(vmp, rotor->vs_aprev, addr, addr + size)); 841 mutex_exit(&vmp->vm_lock); 842 return ((void *)addr); 843 } 844 845 /* 846 * Starting at the rotor, look for a segment large enough to 847 * satisfy the allocation. 848 */ 849 for (;;) { 850 vmp->vm_kstat.vk_search.value.ui64++; 851 if (vsp->vs_type == VMEM_FREE && VS_SIZE(vsp) >= size) 852 break; 853 vsp = vsp->vs_anext; 854 if (vsp == rotor) { 855 /* 856 * We've come full circle. One possibility is that the 857 * there's actually enough space, but the rotor itself 858 * is preventing the allocation from succeeding because 859 * it's sitting between two free segments. Therefore, 860 * we advance the rotor and see if that liberates a 861 * suitable segment. 862 */ 863 vmem_advance(vmp, rotor, rotor->vs_anext); 864 vsp = rotor->vs_aprev; 865 if (vsp->vs_type == VMEM_FREE && VS_SIZE(vsp) >= size) 866 break; 867 /* 868 * If there's a lower arena we can import from, or it's 869 * a VM_NOSLEEP allocation, let vmem_xalloc() handle it. 870 * Otherwise, wait until another thread frees something. 871 */ 872 if (vmp->vm_source_alloc != NULL || 873 (vmflag & VM_NOSLEEP)) { 874 mutex_exit(&vmp->vm_lock); 875 return (vmem_xalloc(vmp, size, vmp->vm_quantum, 876 0, 0, NULL, NULL, vmflag & VM_KMFLAGS)); 877 } 878 vmp->vm_kstat.vk_wait.value.ui64++; 879 cv_wait(&vmp->vm_cv, &vmp->vm_lock); 880 vsp = rotor->vs_anext; 881 } 882 } 883 884 /* 885 * We found a segment. Extract enough space to satisfy the allocation. 886 */ 887 addr = vsp->vs_start; 888 vsp = vmem_seg_alloc(vmp, vsp, addr, size); 889 ASSERT(vsp->vs_type == VMEM_ALLOC && 890 vsp->vs_start == addr && vsp->vs_end == addr + size); 891 892 /* 893 * Advance the rotor to right after the newly-allocated segment. 894 * That's where the next VM_NEXTFIT allocation will begin searching. 895 */ 896 vmem_advance(vmp, rotor, vsp); 897 mutex_exit(&vmp->vm_lock); 898 return ((void *)addr); 899 } 900 901 /* 902 * Checks if vmp is guaranteed to have a size-byte buffer somewhere on its 903 * freelist. If size is not a power-of-2, it can return a false-negative. 904 * 905 * Used to decide if a newly imported span is superfluous after re-acquiring 906 * the arena lock. 907 */ 908 static int 909 vmem_canalloc(vmem_t *vmp, size_t size) 910 { 911 int hb; 912 int flist = 0; 913 ASSERT(MUTEX_HELD(&vmp->vm_lock)); 914 915 if ((size & (size - 1)) == 0) 916 flist = lowbit(P2ALIGN(vmp->vm_freemap, size)); 917 else if ((hb = highbit(size)) < VMEM_FREELISTS) 918 flist = lowbit(P2ALIGN(vmp->vm_freemap, 1UL << hb)); 919 920 return (flist); 921 } 922 923 /* 924 * Allocate size bytes at offset phase from an align boundary such that the 925 * resulting segment [addr, addr + size) is a subset of [minaddr, maxaddr) 926 * that does not straddle a nocross-aligned boundary. 927 */ 928 void * 929 vmem_xalloc(vmem_t *vmp, size_t size, size_t align_arg, size_t phase, 930 size_t nocross, void *minaddr, void *maxaddr, int vmflag) 931 { 932 vmem_seg_t *vsp; 933 vmem_seg_t *vbest = NULL; 934 uintptr_t addr, taddr, start, end; 935 uintptr_t align = (align_arg != 0) ? align_arg : vmp->vm_quantum; 936 void *vaddr, *xvaddr = NULL; 937 size_t xsize; 938 int hb, flist, resv; 939 uint32_t mtbf; 940 941 if ((align | phase | nocross) & (vmp->vm_quantum - 1)) 942 panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): " 943 "parameters not vm_quantum aligned", 944 (void *)vmp, size, align_arg, phase, nocross, 945 minaddr, maxaddr, vmflag); 946 947 if (nocross != 0 && 948 (align > nocross || P2ROUNDUP(phase + size, align) > nocross)) 949 panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): " 950 "overconstrained allocation", 951 (void *)vmp, size, align_arg, phase, nocross, 952 minaddr, maxaddr, vmflag); 953 954 if (phase >= align || (align & (align - 1)) != 0 || 955 (nocross & (nocross - 1)) != 0) 956 panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): " 957 "parameters inconsistent or invalid", 958 (void *)vmp, size, align_arg, phase, nocross, 959 minaddr, maxaddr, vmflag); 960 961 if ((mtbf = vmem_mtbf | vmp->vm_mtbf) != 0 && gethrtime() % mtbf == 0 && 962 (vmflag & (VM_NOSLEEP | VM_PANIC)) == VM_NOSLEEP) 963 return (NULL); 964 965 mutex_enter(&vmp->vm_lock); 966 for (;;) { 967 if (vmp->vm_nsegfree < VMEM_MINFREE && 968 !vmem_populate(vmp, vmflag)) 969 break; 970 do_alloc: 971 /* 972 * highbit() returns the highest bit + 1, which is exactly 973 * what we want: we want to search the first freelist whose 974 * members are *definitely* large enough to satisfy our 975 * allocation. However, there are certain cases in which we 976 * want to look at the next-smallest freelist (which *might* 977 * be able to satisfy the allocation): 978 * 979 * (1) The size is exactly a power of 2, in which case 980 * the smaller freelist is always big enough; 981 * 982 * (2) All other freelists are empty; 983 * 984 * (3) We're in the highest possible freelist, which is 985 * always empty (e.g. the 4GB freelist on 32-bit systems); 986 * 987 * (4) We're doing a best-fit or first-fit allocation. 988 */ 989 if ((size & (size - 1)) == 0) { 990 flist = lowbit(P2ALIGN(vmp->vm_freemap, size)); 991 } else { 992 hb = highbit(size); 993 if ((vmp->vm_freemap >> hb) == 0 || 994 hb == VMEM_FREELISTS || 995 (vmflag & (VM_BESTFIT | VM_FIRSTFIT))) 996 hb--; 997 flist = lowbit(P2ALIGN(vmp->vm_freemap, 1UL << hb)); 998 } 999 1000 for (vbest = NULL, vsp = (flist == 0) ? NULL : 1001 vmp->vm_freelist[flist - 1].vs_knext; 1002 vsp != NULL; vsp = vsp->vs_knext) { 1003 vmp->vm_kstat.vk_search.value.ui64++; 1004 if (vsp->vs_start == 0) { 1005 /* 1006 * We're moving up to a larger freelist, 1007 * so if we've already found a candidate, 1008 * the fit can't possibly get any better. 1009 */ 1010 if (vbest != NULL) 1011 break; 1012 /* 1013 * Find the next non-empty freelist. 1014 */ 1015 flist = lowbit(P2ALIGN(vmp->vm_freemap, 1016 VS_SIZE(vsp))); 1017 if (flist-- == 0) 1018 break; 1019 vsp = (vmem_seg_t *)&vmp->vm_freelist[flist]; 1020 ASSERT(vsp->vs_knext->vs_type == VMEM_FREE); 1021 continue; 1022 } 1023 if (vsp->vs_end - 1 < (uintptr_t)minaddr) 1024 continue; 1025 if (vsp->vs_start > (uintptr_t)maxaddr - 1) 1026 continue; 1027 start = MAX(vsp->vs_start, (uintptr_t)minaddr); 1028 end = MIN(vsp->vs_end - 1, (uintptr_t)maxaddr - 1) + 1; 1029 taddr = P2PHASEUP(start, align, phase); 1030 if (P2CROSS(taddr, taddr + size - 1, nocross)) 1031 taddr += 1032 P2ROUNDUP(P2NPHASE(taddr, nocross), align); 1033 if ((taddr - start) + size > end - start || 1034 (vbest != NULL && VS_SIZE(vsp) >= VS_SIZE(vbest))) 1035 continue; 1036 vbest = vsp; 1037 addr = taddr; 1038 if (!(vmflag & VM_BESTFIT) || VS_SIZE(vbest) == size) 1039 break; 1040 } 1041 if (vbest != NULL) 1042 break; 1043 ASSERT(xvaddr == NULL); 1044 if (size == 0) 1045 panic("vmem_xalloc(): size == 0"); 1046 if (vmp->vm_source_alloc != NULL && nocross == 0 && 1047 minaddr == NULL && maxaddr == NULL) { 1048 size_t aneeded, asize; 1049 size_t aquantum = MAX(vmp->vm_quantum, 1050 vmp->vm_source->vm_quantum); 1051 size_t aphase = phase; 1052 if ((align > aquantum) && 1053 !(vmp->vm_cflags & VMC_XALIGN)) { 1054 aphase = (P2PHASE(phase, aquantum) != 0) ? 1055 align - vmp->vm_quantum : align - aquantum; 1056 ASSERT(aphase >= phase); 1057 } 1058 aneeded = MAX(size + aphase, vmp->vm_min_import); 1059 asize = P2ROUNDUP(aneeded, aquantum); 1060 1061 /* 1062 * Determine how many segment structures we'll consume. 1063 * The calculation must be precise because if we're 1064 * here on behalf of vmem_populate(), we are taking 1065 * segments from a very limited reserve. 1066 */ 1067 if (size == asize && !(vmp->vm_cflags & VMC_XALLOC)) 1068 resv = VMEM_SEGS_PER_SPAN_CREATE + 1069 VMEM_SEGS_PER_EXACT_ALLOC; 1070 else if (phase == 0 && 1071 align <= vmp->vm_source->vm_quantum) 1072 resv = VMEM_SEGS_PER_SPAN_CREATE + 1073 VMEM_SEGS_PER_LEFT_ALLOC; 1074 else 1075 resv = VMEM_SEGS_PER_ALLOC_MAX; 1076 1077 ASSERT(vmp->vm_nsegfree >= resv); 1078 vmp->vm_nsegfree -= resv; /* reserve our segs */ 1079 mutex_exit(&vmp->vm_lock); 1080 if (vmp->vm_cflags & VMC_XALLOC) { 1081 size_t oasize = asize; 1082 vaddr = ((vmem_ximport_t *) 1083 vmp->vm_source_alloc)(vmp->vm_source, 1084 &asize, align, vmflag & VM_KMFLAGS); 1085 ASSERT(asize >= oasize); 1086 ASSERT(P2PHASE(asize, 1087 vmp->vm_source->vm_quantum) == 0); 1088 ASSERT(!(vmp->vm_cflags & VMC_XALIGN) || 1089 IS_P2ALIGNED(vaddr, align)); 1090 } else { 1091 vaddr = vmp->vm_source_alloc(vmp->vm_source, 1092 asize, vmflag & VM_KMFLAGS); 1093 } 1094 mutex_enter(&vmp->vm_lock); 1095 vmp->vm_nsegfree += resv; /* claim reservation */ 1096 aneeded = size + align - vmp->vm_quantum; 1097 aneeded = P2ROUNDUP(aneeded, vmp->vm_quantum); 1098 if (vaddr != NULL) { 1099 /* 1100 * Since we dropped the vmem lock while 1101 * calling the import function, other 1102 * threads could have imported space 1103 * and made our import unnecessary. In 1104 * order to save space, we return 1105 * excess imports immediately. 1106 */ 1107 if (asize > aneeded && 1108 vmp->vm_source_free != NULL && 1109 vmem_canalloc(vmp, aneeded)) { 1110 ASSERT(resv >= 1111 VMEM_SEGS_PER_MIDDLE_ALLOC); 1112 xvaddr = vaddr; 1113 xsize = asize; 1114 goto do_alloc; 1115 } 1116 vbest = vmem_span_create(vmp, vaddr, asize, 1); 1117 addr = P2PHASEUP(vbest->vs_start, align, phase); 1118 break; 1119 } else if (vmem_canalloc(vmp, aneeded)) { 1120 /* 1121 * Our import failed, but another thread 1122 * added sufficient free memory to the arena 1123 * to satisfy our request. Go back and 1124 * grab it. 1125 */ 1126 ASSERT(resv >= VMEM_SEGS_PER_MIDDLE_ALLOC); 1127 goto do_alloc; 1128 } 1129 } 1130 1131 /* 1132 * If the requestor chooses to fail the allocation attempt 1133 * rather than reap wait and retry - get out of the loop. 1134 */ 1135 if (vmflag & VM_ABORT) 1136 break; 1137 mutex_exit(&vmp->vm_lock); 1138 if (vmp->vm_cflags & VMC_IDENTIFIER) 1139 kmem_reap_idspace(); 1140 else 1141 kmem_reap(); 1142 mutex_enter(&vmp->vm_lock); 1143 if (vmflag & VM_NOSLEEP) 1144 break; 1145 vmp->vm_kstat.vk_wait.value.ui64++; 1146 cv_wait(&vmp->vm_cv, &vmp->vm_lock); 1147 } 1148 if (vbest != NULL) { 1149 ASSERT(vbest->vs_type == VMEM_FREE); 1150 ASSERT(vbest->vs_knext != vbest); 1151 (void) vmem_seg_alloc(vmp, vbest, addr, size); 1152 mutex_exit(&vmp->vm_lock); 1153 if (xvaddr) 1154 vmp->vm_source_free(vmp->vm_source, xvaddr, xsize); 1155 ASSERT(P2PHASE(addr, align) == phase); 1156 ASSERT(!P2CROSS(addr, addr + size - 1, nocross)); 1157 ASSERT(addr >= (uintptr_t)minaddr); 1158 ASSERT(addr + size - 1 <= (uintptr_t)maxaddr - 1); 1159 return ((void *)addr); 1160 } 1161 vmp->vm_kstat.vk_fail.value.ui64++; 1162 mutex_exit(&vmp->vm_lock); 1163 if (vmflag & VM_PANIC) 1164 panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): " 1165 "cannot satisfy mandatory allocation", 1166 (void *)vmp, size, align_arg, phase, nocross, 1167 minaddr, maxaddr, vmflag); 1168 ASSERT(xvaddr == NULL); 1169 return (NULL); 1170 } 1171 1172 /* 1173 * Free the segment [vaddr, vaddr + size), where vaddr was a constrained 1174 * allocation. vmem_xalloc() and vmem_xfree() must always be paired because 1175 * both routines bypass the quantum caches. 1176 */ 1177 void 1178 vmem_xfree(vmem_t *vmp, void *vaddr, size_t size) 1179 { 1180 vmem_seg_t *vsp, *vnext, *vprev; 1181 1182 mutex_enter(&vmp->vm_lock); 1183 1184 vsp = vmem_hash_delete(vmp, (uintptr_t)vaddr, size); 1185 vsp->vs_end = P2ROUNDUP(vsp->vs_end, vmp->vm_quantum); 1186 1187 /* 1188 * Attempt to coalesce with the next segment. 1189 */ 1190 vnext = vsp->vs_anext; 1191 if (vnext->vs_type == VMEM_FREE) { 1192 ASSERT(vsp->vs_end == vnext->vs_start); 1193 vmem_freelist_delete(vmp, vnext); 1194 vsp->vs_end = vnext->vs_end; 1195 vmem_seg_destroy(vmp, vnext); 1196 } 1197 1198 /* 1199 * Attempt to coalesce with the previous segment. 1200 */ 1201 vprev = vsp->vs_aprev; 1202 if (vprev->vs_type == VMEM_FREE) { 1203 ASSERT(vprev->vs_end == vsp->vs_start); 1204 vmem_freelist_delete(vmp, vprev); 1205 vprev->vs_end = vsp->vs_end; 1206 vmem_seg_destroy(vmp, vsp); 1207 vsp = vprev; 1208 } 1209 1210 /* 1211 * If the entire span is free, return it to the source. 1212 */ 1213 if (vsp->vs_aprev->vs_import && vmp->vm_source_free != NULL && 1214 vsp->vs_aprev->vs_type == VMEM_SPAN && 1215 vsp->vs_anext->vs_type == VMEM_SPAN) { 1216 vaddr = (void *)vsp->vs_start; 1217 size = VS_SIZE(vsp); 1218 ASSERT(size == VS_SIZE(vsp->vs_aprev)); 1219 vmem_span_destroy(vmp, vsp); 1220 mutex_exit(&vmp->vm_lock); 1221 vmp->vm_source_free(vmp->vm_source, vaddr, size); 1222 } else { 1223 vmem_freelist_insert(vmp, vsp); 1224 mutex_exit(&vmp->vm_lock); 1225 } 1226 } 1227 1228 /* 1229 * Allocate size bytes from arena vmp. Returns the allocated address 1230 * on success, NULL on failure. vmflag specifies VM_SLEEP or VM_NOSLEEP, 1231 * and may also specify best-fit, first-fit, or next-fit allocation policy 1232 * instead of the default instant-fit policy. VM_SLEEP allocations are 1233 * guaranteed to succeed. 1234 */ 1235 void * 1236 vmem_alloc(vmem_t *vmp, size_t size, int vmflag) 1237 { 1238 vmem_seg_t *vsp; 1239 uintptr_t addr; 1240 int hb; 1241 int flist = 0; 1242 uint32_t mtbf; 1243 1244 if (size - 1 < vmp->vm_qcache_max) 1245 return (kmem_cache_alloc(vmp->vm_qcache[(size - 1) >> 1246 vmp->vm_qshift], vmflag & VM_KMFLAGS)); 1247 1248 if ((mtbf = vmem_mtbf | vmp->vm_mtbf) != 0 && gethrtime() % mtbf == 0 && 1249 (vmflag & (VM_NOSLEEP | VM_PANIC)) == VM_NOSLEEP) 1250 return (NULL); 1251 1252 if (vmflag & VM_NEXTFIT) 1253 return (vmem_nextfit_alloc(vmp, size, vmflag)); 1254 1255 if (vmflag & (VM_BESTFIT | VM_FIRSTFIT)) 1256 return (vmem_xalloc(vmp, size, vmp->vm_quantum, 0, 0, 1257 NULL, NULL, vmflag)); 1258 1259 /* 1260 * Unconstrained instant-fit allocation from the segment list. 1261 */ 1262 mutex_enter(&vmp->vm_lock); 1263 1264 if (vmp->vm_nsegfree >= VMEM_MINFREE || vmem_populate(vmp, vmflag)) { 1265 if ((size & (size - 1)) == 0) 1266 flist = lowbit(P2ALIGN(vmp->vm_freemap, size)); 1267 else if ((hb = highbit(size)) < VMEM_FREELISTS) 1268 flist = lowbit(P2ALIGN(vmp->vm_freemap, 1UL << hb)); 1269 } 1270 1271 if (flist-- == 0) { 1272 mutex_exit(&vmp->vm_lock); 1273 return (vmem_xalloc(vmp, size, vmp->vm_quantum, 1274 0, 0, NULL, NULL, vmflag)); 1275 } 1276 1277 ASSERT(size <= (1UL << flist)); 1278 vsp = vmp->vm_freelist[flist].vs_knext; 1279 addr = vsp->vs_start; 1280 (void) vmem_seg_alloc(vmp, vsp, addr, size); 1281 mutex_exit(&vmp->vm_lock); 1282 return ((void *)addr); 1283 } 1284 1285 /* 1286 * Free the segment [vaddr, vaddr + size). 1287 */ 1288 void 1289 vmem_free(vmem_t *vmp, void *vaddr, size_t size) 1290 { 1291 if (size - 1 < vmp->vm_qcache_max) 1292 kmem_cache_free(vmp->vm_qcache[(size - 1) >> vmp->vm_qshift], 1293 vaddr); 1294 else 1295 vmem_xfree(vmp, vaddr, size); 1296 } 1297 1298 /* 1299 * Determine whether arena vmp contains the segment [vaddr, vaddr + size). 1300 */ 1301 int 1302 vmem_contains(vmem_t *vmp, void *vaddr, size_t size) 1303 { 1304 uintptr_t start = (uintptr_t)vaddr; 1305 uintptr_t end = start + size; 1306 vmem_seg_t *vsp; 1307 vmem_seg_t *seg0 = &vmp->vm_seg0; 1308 1309 mutex_enter(&vmp->vm_lock); 1310 vmp->vm_kstat.vk_contains.value.ui64++; 1311 for (vsp = seg0->vs_knext; vsp != seg0; vsp = vsp->vs_knext) { 1312 vmp->vm_kstat.vk_contains_search.value.ui64++; 1313 ASSERT(vsp->vs_type == VMEM_SPAN); 1314 if (start >= vsp->vs_start && end - 1 <= vsp->vs_end - 1) 1315 break; 1316 } 1317 mutex_exit(&vmp->vm_lock); 1318 return (vsp != seg0); 1319 } 1320 1321 /* 1322 * Add the span [vaddr, vaddr + size) to arena vmp. 1323 */ 1324 void * 1325 vmem_add(vmem_t *vmp, void *vaddr, size_t size, int vmflag) 1326 { 1327 if (vaddr == NULL || size == 0) 1328 panic("vmem_add(%p, %p, %lu): bad arguments", vmp, vaddr, size); 1329 1330 ASSERT(!vmem_contains(vmp, vaddr, size)); 1331 1332 mutex_enter(&vmp->vm_lock); 1333 if (vmem_populate(vmp, vmflag)) 1334 (void) vmem_span_create(vmp, vaddr, size, 0); 1335 else 1336 vaddr = NULL; 1337 mutex_exit(&vmp->vm_lock); 1338 return (vaddr); 1339 } 1340 1341 /* 1342 * Walk the vmp arena, applying func to each segment matching typemask. 1343 * If VMEM_REENTRANT is specified, the arena lock is dropped across each 1344 * call to func(); otherwise, it is held for the duration of vmem_walk() 1345 * to ensure a consistent snapshot. Note that VMEM_REENTRANT callbacks 1346 * are *not* necessarily consistent, so they may only be used when a hint 1347 * is adequate. 1348 */ 1349 void 1350 vmem_walk(vmem_t *vmp, int typemask, 1351 void (*func)(void *, void *, size_t), void *arg) 1352 { 1353 vmem_seg_t *vsp; 1354 vmem_seg_t *seg0 = &vmp->vm_seg0; 1355 vmem_seg_t walker; 1356 1357 if (typemask & VMEM_WALKER) 1358 return; 1359 1360 bzero(&walker, sizeof (walker)); 1361 walker.vs_type = VMEM_WALKER; 1362 1363 mutex_enter(&vmp->vm_lock); 1364 VMEM_INSERT(seg0, &walker, a); 1365 for (vsp = seg0->vs_anext; vsp != seg0; vsp = vsp->vs_anext) { 1366 if (vsp->vs_type & typemask) { 1367 void *start = (void *)vsp->vs_start; 1368 size_t size = VS_SIZE(vsp); 1369 if (typemask & VMEM_REENTRANT) { 1370 vmem_advance(vmp, &walker, vsp); 1371 mutex_exit(&vmp->vm_lock); 1372 func(arg, start, size); 1373 mutex_enter(&vmp->vm_lock); 1374 vsp = &walker; 1375 } else { 1376 func(arg, start, size); 1377 } 1378 } 1379 } 1380 vmem_advance(vmp, &walker, NULL); 1381 mutex_exit(&vmp->vm_lock); 1382 } 1383 1384 /* 1385 * Return the total amount of memory whose type matches typemask. Thus: 1386 * 1387 * typemask VMEM_ALLOC yields total memory allocated (in use). 1388 * typemask VMEM_FREE yields total memory free (available). 1389 * typemask (VMEM_ALLOC | VMEM_FREE) yields total arena size. 1390 */ 1391 size_t 1392 vmem_size(vmem_t *vmp, int typemask) 1393 { 1394 uint64_t size = 0; 1395 1396 if (typemask & VMEM_ALLOC) 1397 size += vmp->vm_kstat.vk_mem_inuse.value.ui64; 1398 if (typemask & VMEM_FREE) 1399 size += vmp->vm_kstat.vk_mem_total.value.ui64 - 1400 vmp->vm_kstat.vk_mem_inuse.value.ui64; 1401 return ((size_t)size); 1402 } 1403 1404 /* 1405 * Create an arena called name whose initial span is [base, base + size). 1406 * The arena's natural unit of currency is quantum, so vmem_alloc() 1407 * guarantees quantum-aligned results. The arena may import new spans 1408 * by invoking afunc() on source, and may return those spans by invoking 1409 * ffunc() on source. To make small allocations fast and scalable, 1410 * the arena offers high-performance caching for each integer multiple 1411 * of quantum up to qcache_max. 1412 */ 1413 static vmem_t * 1414 vmem_create_common(const char *name, void *base, size_t size, size_t quantum, 1415 void *(*afunc)(vmem_t *, size_t, int), 1416 void (*ffunc)(vmem_t *, void *, size_t), 1417 vmem_t *source, size_t qcache_max, int vmflag) 1418 { 1419 int i; 1420 size_t nqcache; 1421 vmem_t *vmp, *cur, **vmpp; 1422 vmem_seg_t *vsp; 1423 vmem_freelist_t *vfp; 1424 uint32_t id = atomic_add_32_nv(&vmem_id, 1); 1425 1426 if (vmem_vmem_arena != NULL) { 1427 vmp = vmem_alloc(vmem_vmem_arena, sizeof (vmem_t), 1428 vmflag & VM_KMFLAGS); 1429 } else { 1430 ASSERT(id <= VMEM_INITIAL); 1431 vmp = &vmem0[id - 1]; 1432 } 1433 1434 /* An identifier arena must inherit from another identifier arena */ 1435 ASSERT(source == NULL || ((source->vm_cflags & VMC_IDENTIFIER) == 1436 (vmflag & VMC_IDENTIFIER))); 1437 1438 if (vmp == NULL) 1439 return (NULL); 1440 bzero(vmp, sizeof (vmem_t)); 1441 1442 (void) snprintf(vmp->vm_name, VMEM_NAMELEN, "%s", name); 1443 mutex_init(&vmp->vm_lock, NULL, MUTEX_DEFAULT, NULL); 1444 cv_init(&vmp->vm_cv, NULL, CV_DEFAULT, NULL); 1445 vmp->vm_cflags = vmflag; 1446 vmflag &= VM_KMFLAGS; 1447 1448 vmp->vm_quantum = quantum; 1449 vmp->vm_qshift = highbit(quantum) - 1; 1450 nqcache = MIN(qcache_max >> vmp->vm_qshift, VMEM_NQCACHE_MAX); 1451 1452 for (i = 0; i <= VMEM_FREELISTS; i++) { 1453 vfp = &vmp->vm_freelist[i]; 1454 vfp->vs_end = 1UL << i; 1455 vfp->vs_knext = (vmem_seg_t *)(vfp + 1); 1456 vfp->vs_kprev = (vmem_seg_t *)(vfp - 1); 1457 } 1458 1459 vmp->vm_freelist[0].vs_kprev = NULL; 1460 vmp->vm_freelist[VMEM_FREELISTS].vs_knext = NULL; 1461 vmp->vm_freelist[VMEM_FREELISTS].vs_end = 0; 1462 vmp->vm_hash_table = vmp->vm_hash0; 1463 vmp->vm_hash_mask = VMEM_HASH_INITIAL - 1; 1464 vmp->vm_hash_shift = highbit(vmp->vm_hash_mask); 1465 1466 vsp = &vmp->vm_seg0; 1467 vsp->vs_anext = vsp; 1468 vsp->vs_aprev = vsp; 1469 vsp->vs_knext = vsp; 1470 vsp->vs_kprev = vsp; 1471 vsp->vs_type = VMEM_SPAN; 1472 1473 vsp = &vmp->vm_rotor; 1474 vsp->vs_type = VMEM_ROTOR; 1475 VMEM_INSERT(&vmp->vm_seg0, vsp, a); 1476 1477 bcopy(&vmem_kstat_template, &vmp->vm_kstat, sizeof (vmem_kstat_t)); 1478 1479 vmp->vm_id = id; 1480 if (source != NULL) 1481 vmp->vm_kstat.vk_source_id.value.ui32 = source->vm_id; 1482 vmp->vm_source = source; 1483 vmp->vm_source_alloc = afunc; 1484 vmp->vm_source_free = ffunc; 1485 1486 /* 1487 * Some arenas (like vmem_metadata and kmem_metadata) cannot 1488 * use quantum caching to lower fragmentation. Instead, we 1489 * increase their imports, giving a similar effect. 1490 */ 1491 if (vmp->vm_cflags & VMC_NO_QCACHE) { 1492 vmp->vm_min_import = 1493 VMEM_QCACHE_SLABSIZE(nqcache << vmp->vm_qshift); 1494 nqcache = 0; 1495 } 1496 1497 if (nqcache != 0) { 1498 ASSERT(!(vmflag & VM_NOSLEEP)); 1499 vmp->vm_qcache_max = nqcache << vmp->vm_qshift; 1500 for (i = 0; i < nqcache; i++) { 1501 char buf[VMEM_NAMELEN + 21]; 1502 (void) sprintf(buf, "%s_%lu", vmp->vm_name, 1503 (i + 1) * quantum); 1504 vmp->vm_qcache[i] = kmem_cache_create(buf, 1505 (i + 1) * quantum, quantum, NULL, NULL, NULL, 1506 NULL, vmp, KMC_QCACHE | KMC_NOTOUCH); 1507 } 1508 } 1509 1510 if ((vmp->vm_ksp = kstat_create("vmem", vmp->vm_id, vmp->vm_name, 1511 "vmem", KSTAT_TYPE_NAMED, sizeof (vmem_kstat_t) / 1512 sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) != NULL) { 1513 vmp->vm_ksp->ks_data = &vmp->vm_kstat; 1514 kstat_install(vmp->vm_ksp); 1515 } 1516 1517 mutex_enter(&vmem_list_lock); 1518 vmpp = &vmem_list; 1519 while ((cur = *vmpp) != NULL) 1520 vmpp = &cur->vm_next; 1521 *vmpp = vmp; 1522 mutex_exit(&vmem_list_lock); 1523 1524 if (vmp->vm_cflags & VMC_POPULATOR) { 1525 ASSERT(vmem_populators < VMEM_INITIAL); 1526 vmem_populator[atomic_add_32_nv(&vmem_populators, 1) - 1] = vmp; 1527 mutex_enter(&vmp->vm_lock); 1528 (void) vmem_populate(vmp, vmflag | VM_PANIC); 1529 mutex_exit(&vmp->vm_lock); 1530 } 1531 1532 if ((base || size) && vmem_add(vmp, base, size, vmflag) == NULL) { 1533 vmem_destroy(vmp); 1534 return (NULL); 1535 } 1536 1537 return (vmp); 1538 } 1539 1540 vmem_t * 1541 vmem_xcreate(const char *name, void *base, size_t size, size_t quantum, 1542 vmem_ximport_t *afunc, vmem_free_t *ffunc, vmem_t *source, 1543 size_t qcache_max, int vmflag) 1544 { 1545 ASSERT(!(vmflag & (VMC_POPULATOR | VMC_XALLOC))); 1546 vmflag &= ~(VMC_POPULATOR | VMC_XALLOC); 1547 1548 return (vmem_create_common(name, base, size, quantum, 1549 (vmem_alloc_t *)afunc, ffunc, source, qcache_max, 1550 vmflag | VMC_XALLOC)); 1551 } 1552 1553 vmem_t * 1554 vmem_create(const char *name, void *base, size_t size, size_t quantum, 1555 vmem_alloc_t *afunc, vmem_free_t *ffunc, vmem_t *source, 1556 size_t qcache_max, int vmflag) 1557 { 1558 ASSERT(!(vmflag & (VMC_XALLOC | VMC_XALIGN))); 1559 vmflag &= ~(VMC_XALLOC | VMC_XALIGN); 1560 1561 return (vmem_create_common(name, base, size, quantum, 1562 afunc, ffunc, source, qcache_max, vmflag)); 1563 } 1564 1565 /* 1566 * Destroy arena vmp. 1567 */ 1568 void 1569 vmem_destroy(vmem_t *vmp) 1570 { 1571 vmem_t *cur, **vmpp; 1572 vmem_seg_t *seg0 = &vmp->vm_seg0; 1573 vmem_seg_t *vsp; 1574 size_t leaked; 1575 int i; 1576 1577 mutex_enter(&vmem_list_lock); 1578 vmpp = &vmem_list; 1579 while ((cur = *vmpp) != vmp) 1580 vmpp = &cur->vm_next; 1581 *vmpp = vmp->vm_next; 1582 mutex_exit(&vmem_list_lock); 1583 1584 for (i = 0; i < VMEM_NQCACHE_MAX; i++) 1585 if (vmp->vm_qcache[i]) 1586 kmem_cache_destroy(vmp->vm_qcache[i]); 1587 1588 leaked = vmem_size(vmp, VMEM_ALLOC); 1589 if (leaked != 0) 1590 cmn_err(CE_WARN, "vmem_destroy('%s'): leaked %lu %s", 1591 vmp->vm_name, leaked, (vmp->vm_cflags & VMC_IDENTIFIER) ? 1592 "identifiers" : "bytes"); 1593 1594 if (vmp->vm_hash_table != vmp->vm_hash0) 1595 vmem_free(vmem_hash_arena, vmp->vm_hash_table, 1596 (vmp->vm_hash_mask + 1) * sizeof (void *)); 1597 1598 /* 1599 * Give back the segment structures for anything that's left in the 1600 * arena, e.g. the primary spans and their free segments. 1601 */ 1602 VMEM_DELETE(&vmp->vm_rotor, a); 1603 for (vsp = seg0->vs_anext; vsp != seg0; vsp = vsp->vs_anext) 1604 vmem_putseg_global(vsp); 1605 1606 while (vmp->vm_nsegfree > 0) 1607 vmem_putseg_global(vmem_getseg(vmp)); 1608 1609 kstat_delete(vmp->vm_ksp); 1610 1611 mutex_destroy(&vmp->vm_lock); 1612 cv_destroy(&vmp->vm_cv); 1613 vmem_free(vmem_vmem_arena, vmp, sizeof (vmem_t)); 1614 } 1615 1616 /* 1617 * Resize vmp's hash table to keep the average lookup depth near 1.0. 1618 */ 1619 static void 1620 vmem_hash_rescale(vmem_t *vmp) 1621 { 1622 vmem_seg_t **old_table, **new_table, *vsp; 1623 size_t old_size, new_size, h, nseg; 1624 1625 nseg = (size_t)(vmp->vm_kstat.vk_alloc.value.ui64 - 1626 vmp->vm_kstat.vk_free.value.ui64); 1627 1628 new_size = MAX(VMEM_HASH_INITIAL, 1 << (highbit(3 * nseg + 4) - 2)); 1629 old_size = vmp->vm_hash_mask + 1; 1630 1631 if ((old_size >> 1) <= new_size && new_size <= (old_size << 1)) 1632 return; 1633 1634 new_table = vmem_alloc(vmem_hash_arena, new_size * sizeof (void *), 1635 VM_NOSLEEP); 1636 if (new_table == NULL) 1637 return; 1638 bzero(new_table, new_size * sizeof (void *)); 1639 1640 mutex_enter(&vmp->vm_lock); 1641 1642 old_size = vmp->vm_hash_mask + 1; 1643 old_table = vmp->vm_hash_table; 1644 1645 vmp->vm_hash_mask = new_size - 1; 1646 vmp->vm_hash_table = new_table; 1647 vmp->vm_hash_shift = highbit(vmp->vm_hash_mask); 1648 1649 for (h = 0; h < old_size; h++) { 1650 vsp = old_table[h]; 1651 while (vsp != NULL) { 1652 uintptr_t addr = vsp->vs_start; 1653 vmem_seg_t *next_vsp = vsp->vs_knext; 1654 vmem_seg_t **hash_bucket = VMEM_HASH(vmp, addr); 1655 vsp->vs_knext = *hash_bucket; 1656 *hash_bucket = vsp; 1657 vsp = next_vsp; 1658 } 1659 } 1660 1661 mutex_exit(&vmp->vm_lock); 1662 1663 if (old_table != vmp->vm_hash0) 1664 vmem_free(vmem_hash_arena, old_table, 1665 old_size * sizeof (void *)); 1666 } 1667 1668 /* 1669 * Perform periodic maintenance on all vmem arenas. 1670 */ 1671 void 1672 vmem_update(void *dummy) 1673 { 1674 vmem_t *vmp; 1675 1676 mutex_enter(&vmem_list_lock); 1677 for (vmp = vmem_list; vmp != NULL; vmp = vmp->vm_next) { 1678 /* 1679 * If threads are waiting for resources, wake them up 1680 * periodically so they can issue another kmem_reap() 1681 * to reclaim resources cached by the slab allocator. 1682 */ 1683 cv_broadcast(&vmp->vm_cv); 1684 1685 /* 1686 * Rescale the hash table to keep the hash chains short. 1687 */ 1688 vmem_hash_rescale(vmp); 1689 } 1690 mutex_exit(&vmem_list_lock); 1691 1692 (void) timeout(vmem_update, dummy, vmem_update_interval * hz); 1693 } 1694 1695 /* 1696 * Prepare vmem for use. 1697 */ 1698 vmem_t * 1699 vmem_init(const char *heap_name, 1700 void *heap_start, size_t heap_size, size_t heap_quantum, 1701 void *(*heap_alloc)(vmem_t *, size_t, int), 1702 void (*heap_free)(vmem_t *, void *, size_t)) 1703 { 1704 uint32_t id; 1705 int nseg = VMEM_SEG_INITIAL; 1706 vmem_t *heap; 1707 1708 while (--nseg >= 0) 1709 vmem_putseg_global(&vmem_seg0[nseg]); 1710 1711 heap = vmem_create(heap_name, 1712 heap_start, heap_size, heap_quantum, 1713 NULL, NULL, NULL, 0, 1714 VM_SLEEP | VMC_POPULATOR); 1715 1716 vmem_metadata_arena = vmem_create("vmem_metadata", 1717 NULL, 0, heap_quantum, 1718 vmem_alloc, vmem_free, heap, 8 * heap_quantum, 1719 VM_SLEEP | VMC_POPULATOR | VMC_NO_QCACHE); 1720 1721 vmem_seg_arena = vmem_create("vmem_seg", 1722 NULL, 0, heap_quantum, 1723 heap_alloc, heap_free, vmem_metadata_arena, 0, 1724 VM_SLEEP | VMC_POPULATOR); 1725 1726 vmem_hash_arena = vmem_create("vmem_hash", 1727 NULL, 0, 8, 1728 heap_alloc, heap_free, vmem_metadata_arena, 0, 1729 VM_SLEEP); 1730 1731 vmem_vmem_arena = vmem_create("vmem_vmem", 1732 vmem0, sizeof (vmem0), 1, 1733 heap_alloc, heap_free, vmem_metadata_arena, 0, 1734 VM_SLEEP); 1735 1736 for (id = 0; id < vmem_id; id++) 1737 (void) vmem_xalloc(vmem_vmem_arena, sizeof (vmem_t), 1738 1, 0, 0, &vmem0[id], &vmem0[id + 1], 1739 VM_NOSLEEP | VM_BESTFIT | VM_PANIC); 1740 1741 return (heap); 1742 } 1743