1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/types.h> 29 #include <sys/t_lock.h> 30 #include <sys/param.h> 31 #include <sys/sysmacros.h> 32 #include <sys/tuneable.h> 33 #include <sys/systm.h> 34 #include <sys/vm.h> 35 #include <sys/kmem.h> 36 #include <sys/vmem.h> 37 #include <sys/mman.h> 38 #include <sys/cmn_err.h> 39 #include <sys/debug.h> 40 #include <sys/dumphdr.h> 41 #include <sys/bootconf.h> 42 #include <sys/lgrp.h> 43 #include <vm/seg_kmem.h> 44 #include <vm/hat.h> 45 #include <vm/page.h> 46 #include <vm/vm_dep.h> 47 #include <vm/faultcode.h> 48 #include <sys/promif.h> 49 #include <vm/seg_kp.h> 50 #include <sys/bitmap.h> 51 #include <sys/mem_cage.h> 52 53 /* 54 * seg_kmem is the primary kernel memory segment driver. It 55 * maps the kernel heap [kernelheap, ekernelheap), module text, 56 * and all memory which was allocated before the VM was initialized 57 * into kas. 58 * 59 * Pages which belong to seg_kmem are hashed into &kvp vnode at 60 * an offset equal to (u_offset_t)virt_addr, and have p_lckcnt >= 1. 61 * They must never be paged out since segkmem_fault() is a no-op to 62 * prevent recursive faults. 63 * 64 * Currently, seg_kmem pages are sharelocked (p_sharelock == 1) on 65 * __x86 and are unlocked (p_sharelock == 0) on __sparc. Once __x86 66 * supports relocation the #ifdef kludges can be removed. 67 * 68 * seg_kmem pages may be subject to relocation by page_relocate(), 69 * provided that the HAT supports it; if this is so, segkmem_reloc 70 * will be set to a nonzero value. All boot time allocated memory as 71 * well as static memory is considered off limits to relocation. 72 * Pages are "relocatable" if p_state does not have P_NORELOC set, so 73 * we request P_NORELOC pages for memory that isn't safe to relocate. 74 * 75 * The kernel heap is logically divided up into four pieces: 76 * 77 * heap32_arena is for allocations that require 32-bit absolute 78 * virtual addresses (e.g. code that uses 32-bit pointers/offsets). 79 * 80 * heap_core is for allocations that require 2GB *relative* 81 * offsets; in other words all memory from heap_core is within 82 * 2GB of all other memory from the same arena. This is a requirement 83 * of the addressing modes of some processors in supervisor code. 84 * 85 * heap_arena is the general heap arena. 86 * 87 * static_arena is the static memory arena. Allocations from it 88 * are not subject to relocation so it is safe to use the memory 89 * physical address as well as the virtual address (e.g. the VA to 90 * PA translations are static). Caches may import from static_arena; 91 * all other static memory allocations should use static_alloc_arena. 92 * 93 * On some platforms which have limited virtual address space, seg_kmem 94 * may share [kernelheap, ekernelheap) with seg_kp; if this is so, 95 * segkp_bitmap is non-NULL, and each bit represents a page of virtual 96 * address space which is actually seg_kp mapped. 97 */ 98 99 extern ulong_t *segkp_bitmap; /* Is set if segkp is from the kernel heap */ 100 101 char *kernelheap; /* start of primary kernel heap */ 102 char *ekernelheap; /* end of primary kernel heap */ 103 struct seg kvseg; /* primary kernel heap segment */ 104 struct seg kvseg_core; /* "core" kernel heap segment */ 105 struct seg kzioseg; /* Segment for zio mappings */ 106 vmem_t *heap_arena; /* primary kernel heap arena */ 107 vmem_t *heap_core_arena; /* core kernel heap arena */ 108 char *heap_core_base; /* start of core kernel heap arena */ 109 char *heap_lp_base; /* start of kernel large page heap arena */ 110 char *heap_lp_end; /* end of kernel large page heap arena */ 111 vmem_t *hat_memload_arena; /* HAT translation data */ 112 struct seg kvseg32; /* 32-bit kernel heap segment */ 113 vmem_t *heap32_arena; /* 32-bit kernel heap arena */ 114 vmem_t *heaptext_arena; /* heaptext arena */ 115 struct as kas; /* kernel address space */ 116 struct vnode kvp; /* vnode for all segkmem pages */ 117 struct vnode zvp; /* vnode for zfs pages */ 118 int segkmem_reloc; /* enable/disable relocatable segkmem pages */ 119 vmem_t *static_arena; /* arena for caches to import static memory */ 120 vmem_t *static_alloc_arena; /* arena for allocating static memory */ 121 vmem_t *zio_arena = NULL; /* arena for allocating zio memory */ 122 vmem_t *zio_alloc_arena = NULL; /* arena for allocating zio memory */ 123 124 /* 125 * seg_kmem driver can map part of the kernel heap with large pages. 126 * Currently this functionality is implemented for sparc platforms only. 127 * 128 * The large page size "segkmem_lpsize" for kernel heap is selected in the 129 * platform specific code. It can also be modified via /etc/system file. 130 * Setting segkmem_lpsize to PAGESIZE in /etc/system disables usage of large 131 * pages for kernel heap. "segkmem_lpshift" is adjusted appropriately to 132 * match segkmem_lpsize. 133 * 134 * At boot time we carve from kernel heap arena a range of virtual addresses 135 * that will be used for large page mappings. This range [heap_lp_base, 136 * heap_lp_end) is set up as a separate vmem arena - "heap_lp_arena". We also 137 * create "kmem_lp_arena" that caches memory already backed up by large 138 * pages. kmem_lp_arena imports virtual segments from heap_lp_arena. 139 */ 140 141 size_t segkmem_lpsize; 142 static uint_t segkmem_lpshift = PAGESHIFT; 143 int segkmem_lpszc = 0; 144 145 size_t segkmem_kmemlp_quantum = 0x400000; /* 4MB */ 146 size_t segkmem_heaplp_quantum; 147 vmem_t *heap_lp_arena; 148 static vmem_t *kmem_lp_arena; 149 static vmem_t *segkmem_ppa_arena; 150 static segkmem_lpcb_t segkmem_lpcb; 151 152 /* 153 * We use "segkmem_kmemlp_max" to limit the total amount of physical memory 154 * consumed by the large page heap. By default this parameter is set to 1/8 of 155 * physmem but can be adjusted through /etc/system either directly or 156 * indirectly by setting "segkmem_kmemlp_pcnt" to the percent of physmem 157 * we allow for large page heap. 158 */ 159 size_t segkmem_kmemlp_max; 160 static uint_t segkmem_kmemlp_pcnt; 161 162 /* 163 * Getting large pages for kernel heap could be problematic due to 164 * physical memory fragmentation. That's why we allow to preallocate 165 * "segkmem_kmemlp_min" bytes at boot time. 166 */ 167 static size_t segkmem_kmemlp_min; 168 169 /* 170 * Throttling is used to avoid expensive tries to allocate large pages 171 * for kernel heap when a lot of succesive attempts to do so fail. 172 */ 173 static ulong_t segkmem_lpthrottle_max = 0x400000; 174 static ulong_t segkmem_lpthrottle_start = 0x40; 175 static ulong_t segkmem_use_lpthrottle = 1; 176 177 /* 178 * Freed pages accumulate on a garbage list until segkmem is ready, 179 * at which point we call segkmem_gc() to free it all. 180 */ 181 typedef struct segkmem_gc_list { 182 struct segkmem_gc_list *gc_next; 183 vmem_t *gc_arena; 184 size_t gc_size; 185 } segkmem_gc_list_t; 186 187 static segkmem_gc_list_t *segkmem_gc_list; 188 189 /* 190 * Allocations from the hat_memload arena add VM_MEMLOAD to their 191 * vmflags so that segkmem_xalloc() can inform the hat layer that it needs 192 * to take steps to prevent infinite recursion. HAT allocations also 193 * must be non-relocatable to prevent recursive page faults. 194 */ 195 static void * 196 hat_memload_alloc(vmem_t *vmp, size_t size, int flags) 197 { 198 flags |= (VM_MEMLOAD | VM_NORELOC); 199 return (segkmem_alloc(vmp, size, flags)); 200 } 201 202 /* 203 * Allocations from static_arena arena (or any other arena that uses 204 * segkmem_alloc_permanent()) require non-relocatable (permanently 205 * wired) memory pages, since these pages are referenced by physical 206 * as well as virtual address. 207 */ 208 void * 209 segkmem_alloc_permanent(vmem_t *vmp, size_t size, int flags) 210 { 211 return (segkmem_alloc(vmp, size, flags | VM_NORELOC)); 212 } 213 214 /* 215 * Initialize kernel heap boundaries. 216 */ 217 void 218 kernelheap_init( 219 void *heap_start, 220 void *heap_end, 221 char *first_avail, 222 void *core_start, 223 void *core_end) 224 { 225 uintptr_t textbase; 226 size_t core_size; 227 size_t heap_size; 228 vmem_t *heaptext_parent; 229 size_t heap_lp_size = 0; 230 #ifdef __sparc 231 size_t kmem64_sz = kmem64_aligned_end - kmem64_base; 232 #endif /* __sparc */ 233 234 kernelheap = heap_start; 235 ekernelheap = heap_end; 236 237 #ifdef __sparc 238 heap_lp_size = (((uintptr_t)heap_end - (uintptr_t)heap_start) / 4); 239 /* 240 * Bias heap_lp start address by kmem64_sz to reduce collisions 241 * in 4M kernel TSB between kmem64 area and heap_lp 242 */ 243 kmem64_sz = P2ROUNDUP(kmem64_sz, MMU_PAGESIZE256M); 244 if (kmem64_sz <= heap_lp_size / 2) 245 heap_lp_size -= kmem64_sz; 246 heap_lp_base = ekernelheap - heap_lp_size; 247 heap_lp_end = heap_lp_base + heap_lp_size; 248 #endif /* __sparc */ 249 250 /* 251 * If this platform has a 'core' heap area, then the space for 252 * overflow module text should be carved out of the end of that 253 * heap. Otherwise, it gets carved out of the general purpose 254 * heap. 255 */ 256 core_size = (uintptr_t)core_end - (uintptr_t)core_start; 257 if (core_size > 0) { 258 ASSERT(core_size >= HEAPTEXT_SIZE); 259 textbase = (uintptr_t)core_end - HEAPTEXT_SIZE; 260 core_size -= HEAPTEXT_SIZE; 261 } 262 #ifndef __sparc 263 else { 264 ekernelheap -= HEAPTEXT_SIZE; 265 textbase = (uintptr_t)ekernelheap; 266 } 267 #endif 268 269 heap_size = (uintptr_t)ekernelheap - (uintptr_t)kernelheap; 270 heap_arena = vmem_init("heap", kernelheap, heap_size, PAGESIZE, 271 segkmem_alloc, segkmem_free); 272 273 if (core_size > 0) { 274 heap_core_arena = vmem_create("heap_core", core_start, 275 core_size, PAGESIZE, NULL, NULL, NULL, 0, VM_SLEEP); 276 heap_core_base = core_start; 277 } else { 278 heap_core_arena = heap_arena; 279 heap_core_base = kernelheap; 280 } 281 282 /* 283 * reserve space for the large page heap. If large pages for kernel 284 * heap is enabled large page heap arean will be created later in the 285 * boot sequence in segkmem_heap_lp_init(). Otherwise the allocated 286 * range will be returned back to the heap_arena. 287 */ 288 if (heap_lp_size) { 289 (void) vmem_xalloc(heap_arena, heap_lp_size, PAGESIZE, 0, 0, 290 heap_lp_base, heap_lp_end, 291 VM_NOSLEEP | VM_BESTFIT | VM_PANIC); 292 } 293 294 /* 295 * Remove the already-spoken-for memory range [kernelheap, first_avail). 296 */ 297 (void) vmem_xalloc(heap_arena, first_avail - kernelheap, PAGESIZE, 298 0, 0, kernelheap, first_avail, VM_NOSLEEP | VM_BESTFIT | VM_PANIC); 299 300 #ifdef __sparc 301 heap32_arena = vmem_create("heap32", (void *)SYSBASE32, 302 SYSLIMIT32 - SYSBASE32 - HEAPTEXT_SIZE, PAGESIZE, NULL, 303 NULL, NULL, 0, VM_SLEEP); 304 305 textbase = SYSLIMIT32 - HEAPTEXT_SIZE; 306 heaptext_parent = NULL; 307 #else /* __sparc */ 308 heap32_arena = heap_core_arena; 309 heaptext_parent = heap_core_arena; 310 #endif /* __sparc */ 311 312 heaptext_arena = vmem_create("heaptext", (void *)textbase, 313 HEAPTEXT_SIZE, PAGESIZE, NULL, NULL, heaptext_parent, 0, VM_SLEEP); 314 315 /* 316 * Create a set of arenas for memory with static translations 317 * (e.g. VA -> PA translations cannot change). Since using 318 * kernel pages by physical address implies it isn't safe to 319 * walk across page boundaries, the static_arena quantum must 320 * be PAGESIZE. Any kmem caches that require static memory 321 * should source from static_arena, while direct allocations 322 * should only use static_alloc_arena. 323 */ 324 static_arena = vmem_create("static", NULL, 0, PAGESIZE, 325 segkmem_alloc_permanent, segkmem_free, heap_arena, 0, VM_SLEEP); 326 static_alloc_arena = vmem_create("static_alloc", NULL, 0, 327 sizeof (uint64_t), vmem_alloc, vmem_free, static_arena, 328 0, VM_SLEEP); 329 330 /* 331 * Create an arena for translation data (ptes, hmes, or hblks). 332 * We need an arena for this because hat_memload() is essential 333 * to vmem_populate() (see comments in common/os/vmem.c). 334 * 335 * Note: any kmem cache that allocates from hat_memload_arena 336 * must be created as a KMC_NOHASH cache (i.e. no external slab 337 * and bufctl structures to allocate) so that slab creation doesn't 338 * require anything more than a single vmem_alloc(). 339 */ 340 hat_memload_arena = vmem_create("hat_memload", NULL, 0, PAGESIZE, 341 hat_memload_alloc, segkmem_free, heap_arena, 0, 342 VM_SLEEP | VMC_POPULATOR); 343 } 344 345 void 346 boot_mapin(caddr_t addr, size_t size) 347 { 348 caddr_t eaddr; 349 page_t *pp; 350 pfn_t pfnum; 351 352 if (page_resv(btop(size), KM_NOSLEEP) == 0) 353 panic("boot_mapin: page_resv failed"); 354 355 for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) { 356 pfnum = va_to_pfn(addr); 357 if (pfnum == PFN_INVALID) 358 continue; 359 if ((pp = page_numtopp_nolock(pfnum)) == NULL) 360 panic("boot_mapin(): No pp for pfnum = %lx", pfnum); 361 362 /* 363 * must break up any large pages that may have constituent 364 * pages being utilized for BOP_ALLOC()'s before calling 365 * page_numtopp().The locking code (ie. page_reclaim()) 366 * can't handle them 367 */ 368 if (pp->p_szc != 0) 369 page_boot_demote(pp); 370 371 pp = page_numtopp(pfnum, SE_EXCL); 372 if (pp == NULL || PP_ISFREE(pp)) 373 panic("boot_alloc: pp is NULL or free"); 374 375 /* 376 * If the cage is on but doesn't yet contain this page, 377 * mark it as non-relocatable. 378 */ 379 if (kcage_on && !PP_ISNORELOC(pp)) 380 PP_SETNORELOC(pp); 381 382 (void) page_hashin(pp, &kvp, (u_offset_t)(uintptr_t)addr, NULL); 383 pp->p_lckcnt = 1; 384 #if defined(__x86) 385 page_downgrade(pp); 386 #else 387 page_unlock(pp); 388 #endif 389 } 390 } 391 392 /* 393 * Get pages from boot and hash them into the kernel's vp. 394 * Used after page structs have been allocated, but before segkmem is ready. 395 */ 396 void * 397 boot_alloc(void *inaddr, size_t size, uint_t align) 398 { 399 caddr_t addr = inaddr; 400 401 if (bootops == NULL) 402 prom_panic("boot_alloc: attempt to allocate memory after " 403 "BOP_GONE"); 404 405 size = ptob(btopr(size)); 406 if (BOP_ALLOC(bootops, addr, size, align) != addr) 407 panic("boot_alloc: BOP_ALLOC failed"); 408 boot_mapin((caddr_t)addr, size); 409 return (addr); 410 } 411 412 static void 413 segkmem_badop() 414 { 415 panic("segkmem_badop"); 416 } 417 418 #define SEGKMEM_BADOP(t) (t(*)())segkmem_badop 419 420 /*ARGSUSED*/ 421 static faultcode_t 422 segkmem_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t size, 423 enum fault_type type, enum seg_rw rw) 424 { 425 pgcnt_t npages; 426 spgcnt_t pg; 427 page_t *pp; 428 struct vnode *vp = seg->s_data; 429 430 ASSERT(RW_READ_HELD(&seg->s_as->a_lock)); 431 432 if (seg->s_as != &kas || size > seg->s_size || 433 addr < seg->s_base || addr + size > seg->s_base + seg->s_size) 434 panic("segkmem_fault: bad args"); 435 436 /* 437 * If it is one of segkp pages, call segkp_fault. 438 */ 439 if (segkp_bitmap && seg == &kvseg && 440 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base)))) 441 return (SEGOP_FAULT(hat, segkp, addr, size, type, rw)); 442 443 if (rw != S_READ && rw != S_WRITE && rw != S_OTHER) 444 return (FC_NOSUPPORT); 445 446 npages = btopr(size); 447 448 switch (type) { 449 case F_SOFTLOCK: /* lock down already-loaded translations */ 450 for (pg = 0; pg < npages; pg++) { 451 pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr, 452 SE_SHARED); 453 if (pp == NULL) { 454 /* 455 * Hmm, no page. Does a kernel mapping 456 * exist for it? 457 */ 458 if (!hat_probe(kas.a_hat, addr)) { 459 addr -= PAGESIZE; 460 while (--pg >= 0) { 461 pp = page_find(vp, (u_offset_t) 462 (uintptr_t)addr); 463 if (pp) 464 page_unlock(pp); 465 addr -= PAGESIZE; 466 } 467 return (FC_NOMAP); 468 } 469 } 470 addr += PAGESIZE; 471 } 472 if (rw == S_OTHER) 473 hat_reserve(seg->s_as, addr, size); 474 return (0); 475 case F_SOFTUNLOCK: 476 while (npages--) { 477 pp = page_find(vp, (u_offset_t)(uintptr_t)addr); 478 if (pp) 479 page_unlock(pp); 480 addr += PAGESIZE; 481 } 482 return (0); 483 default: 484 return (FC_NOSUPPORT); 485 } 486 /*NOTREACHED*/ 487 } 488 489 static int 490 segkmem_setprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot) 491 { 492 ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock)); 493 494 if (seg->s_as != &kas || size > seg->s_size || 495 addr < seg->s_base || addr + size > seg->s_base + seg->s_size) 496 panic("segkmem_setprot: bad args"); 497 498 /* 499 * If it is one of segkp pages, call segkp. 500 */ 501 if (segkp_bitmap && seg == &kvseg && 502 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base)))) 503 return (SEGOP_SETPROT(segkp, addr, size, prot)); 504 505 if (prot == 0) 506 hat_unload(kas.a_hat, addr, size, HAT_UNLOAD); 507 else 508 hat_chgprot(kas.a_hat, addr, size, prot); 509 return (0); 510 } 511 512 /* 513 * This is a dummy segkmem function overloaded to call segkp 514 * when segkp is under the heap. 515 */ 516 /* ARGSUSED */ 517 static int 518 segkmem_checkprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot) 519 { 520 ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock)); 521 522 if (seg->s_as != &kas) 523 segkmem_badop(); 524 525 /* 526 * If it is one of segkp pages, call into segkp. 527 */ 528 if (segkp_bitmap && seg == &kvseg && 529 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base)))) 530 return (SEGOP_CHECKPROT(segkp, addr, size, prot)); 531 532 segkmem_badop(); 533 return (0); 534 } 535 536 /* 537 * This is a dummy segkmem function overloaded to call segkp 538 * when segkp is under the heap. 539 */ 540 /* ARGSUSED */ 541 static int 542 segkmem_kluster(struct seg *seg, caddr_t addr, ssize_t delta) 543 { 544 ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock)); 545 546 if (seg->s_as != &kas) 547 segkmem_badop(); 548 549 /* 550 * If it is one of segkp pages, call into segkp. 551 */ 552 if (segkp_bitmap && seg == &kvseg && 553 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base)))) 554 return (SEGOP_KLUSTER(segkp, addr, delta)); 555 556 segkmem_badop(); 557 return (0); 558 } 559 560 static void 561 segkmem_xdump_range(void *arg, void *start, size_t size) 562 { 563 struct as *as = arg; 564 caddr_t addr = start; 565 caddr_t addr_end = addr + size; 566 567 while (addr < addr_end) { 568 pfn_t pfn = hat_getpfnum(kas.a_hat, addr); 569 if (pfn != PFN_INVALID && pfn <= physmax && pf_is_memory(pfn)) 570 dump_addpage(as, addr, pfn); 571 addr += PAGESIZE; 572 dump_timeleft = dump_timeout; 573 } 574 } 575 576 static void 577 segkmem_dump_range(void *arg, void *start, size_t size) 578 { 579 caddr_t addr = start; 580 caddr_t addr_end = addr + size; 581 582 /* 583 * If we are about to start dumping the range of addresses we 584 * carved out of the kernel heap for the large page heap walk 585 * heap_lp_arena to find what segments are actually populated 586 */ 587 if (SEGKMEM_USE_LARGEPAGES && 588 addr == heap_lp_base && addr_end == heap_lp_end && 589 vmem_size(heap_lp_arena, VMEM_ALLOC) < size) { 590 vmem_walk(heap_lp_arena, VMEM_ALLOC | VMEM_REENTRANT, 591 segkmem_xdump_range, arg); 592 } else { 593 segkmem_xdump_range(arg, start, size); 594 } 595 } 596 597 static void 598 segkmem_dump(struct seg *seg) 599 { 600 /* 601 * The kernel's heap_arena (represented by kvseg) is a very large 602 * VA space, most of which is typically unused. To speed up dumping 603 * we use vmem_walk() to quickly find the pieces of heap_arena that 604 * are actually in use. We do the same for heap32_arena and 605 * heap_core. 606 * 607 * We specify VMEM_REENTRANT to vmem_walk() because dump_addpage() 608 * may ultimately need to allocate memory. Reentrant walks are 609 * necessarily imperfect snapshots. The kernel heap continues 610 * to change during a live crash dump, for example. For a normal 611 * crash dump, however, we know that there won't be any other threads 612 * messing with the heap. Therefore, at worst, we may fail to dump 613 * the pages that get allocated by the act of dumping; but we will 614 * always dump every page that was allocated when the walk began. 615 * 616 * The other segkmem segments are dense (fully populated), so there's 617 * no need to use this technique when dumping them. 618 * 619 * Note: when adding special dump handling for any new sparsely- 620 * populated segments, be sure to add similar handling to the ::kgrep 621 * code in mdb. 622 */ 623 if (seg == &kvseg) { 624 vmem_walk(heap_arena, VMEM_ALLOC | VMEM_REENTRANT, 625 segkmem_dump_range, seg->s_as); 626 #ifndef __sparc 627 vmem_walk(heaptext_arena, VMEM_ALLOC | VMEM_REENTRANT, 628 segkmem_dump_range, seg->s_as); 629 #endif 630 } else if (seg == &kvseg_core) { 631 vmem_walk(heap_core_arena, VMEM_ALLOC | VMEM_REENTRANT, 632 segkmem_dump_range, seg->s_as); 633 } else if (seg == &kvseg32) { 634 vmem_walk(heap32_arena, VMEM_ALLOC | VMEM_REENTRANT, 635 segkmem_dump_range, seg->s_as); 636 vmem_walk(heaptext_arena, VMEM_ALLOC | VMEM_REENTRANT, 637 segkmem_dump_range, seg->s_as); 638 } else if (seg == &kzioseg) { 639 /* 640 * We don't want to dump pages attached to kzioseg since they 641 * contain file data from ZFS. If this page's segment is 642 * kzioseg return instead of writing it to the dump device. 643 */ 644 return; 645 } else { 646 segkmem_dump_range(seg->s_as, seg->s_base, seg->s_size); 647 } 648 } 649 650 /* 651 * lock/unlock kmem pages over a given range [addr, addr+len). 652 * Returns a shadow list of pages in ppp. If there are holes 653 * in the range (e.g. some of the kernel mappings do not have 654 * underlying page_ts) returns ENOTSUP so that as_pagelock() 655 * will handle the range via as_fault(F_SOFTLOCK). 656 */ 657 /*ARGSUSED*/ 658 static int 659 segkmem_pagelock(struct seg *seg, caddr_t addr, size_t len, 660 page_t ***ppp, enum lock_type type, enum seg_rw rw) 661 { 662 page_t **pplist, *pp; 663 pgcnt_t npages; 664 spgcnt_t pg; 665 size_t nb; 666 struct vnode *vp = seg->s_data; 667 668 ASSERT(ppp != NULL); 669 670 /* 671 * If it is one of segkp pages, call into segkp. 672 */ 673 if (segkp_bitmap && seg == &kvseg && 674 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base)))) 675 return (SEGOP_PAGELOCK(segkp, addr, len, ppp, type, rw)); 676 677 if (type == L_PAGERECLAIM) 678 return (ENOTSUP); 679 680 npages = btopr(len); 681 nb = sizeof (page_t *) * npages; 682 683 if (type == L_PAGEUNLOCK) { 684 pplist = *ppp; 685 ASSERT(pplist != NULL); 686 687 for (pg = 0; pg < npages; pg++) { 688 pp = pplist[pg]; 689 page_unlock(pp); 690 } 691 kmem_free(pplist, nb); 692 return (0); 693 } 694 695 ASSERT(type == L_PAGELOCK); 696 697 pplist = kmem_alloc(nb, KM_NOSLEEP); 698 if (pplist == NULL) { 699 *ppp = NULL; 700 return (ENOTSUP); /* take the slow path */ 701 } 702 703 for (pg = 0; pg < npages; pg++) { 704 pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr, SE_SHARED); 705 if (pp == NULL) { 706 while (--pg >= 0) 707 page_unlock(pplist[pg]); 708 kmem_free(pplist, nb); 709 *ppp = NULL; 710 return (ENOTSUP); 711 } 712 pplist[pg] = pp; 713 addr += PAGESIZE; 714 } 715 716 *ppp = pplist; 717 return (0); 718 } 719 720 /* 721 * This is a dummy segkmem function overloaded to call segkp 722 * when segkp is under the heap. 723 */ 724 /* ARGSUSED */ 725 static int 726 segkmem_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp) 727 { 728 ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock)); 729 730 if (seg->s_as != &kas) 731 segkmem_badop(); 732 733 /* 734 * If it is one of segkp pages, call into segkp. 735 */ 736 if (segkp_bitmap && seg == &kvseg && 737 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base)))) 738 return (SEGOP_GETMEMID(segkp, addr, memidp)); 739 740 segkmem_badop(); 741 return (0); 742 } 743 744 /*ARGSUSED*/ 745 static lgrp_mem_policy_info_t * 746 segkmem_getpolicy(struct seg *seg, caddr_t addr) 747 { 748 return (NULL); 749 } 750 751 /*ARGSUSED*/ 752 static int 753 segkmem_capable(struct seg *seg, segcapability_t capability) 754 { 755 if (capability == S_CAPABILITY_NOMINFLT) 756 return (1); 757 return (0); 758 } 759 760 static struct seg_ops segkmem_ops = { 761 SEGKMEM_BADOP(int), /* dup */ 762 SEGKMEM_BADOP(int), /* unmap */ 763 SEGKMEM_BADOP(void), /* free */ 764 segkmem_fault, 765 SEGKMEM_BADOP(faultcode_t), /* faulta */ 766 segkmem_setprot, 767 segkmem_checkprot, 768 segkmem_kluster, 769 SEGKMEM_BADOP(size_t), /* swapout */ 770 SEGKMEM_BADOP(int), /* sync */ 771 SEGKMEM_BADOP(size_t), /* incore */ 772 SEGKMEM_BADOP(int), /* lockop */ 773 SEGKMEM_BADOP(int), /* getprot */ 774 SEGKMEM_BADOP(u_offset_t), /* getoffset */ 775 SEGKMEM_BADOP(int), /* gettype */ 776 SEGKMEM_BADOP(int), /* getvp */ 777 SEGKMEM_BADOP(int), /* advise */ 778 segkmem_dump, 779 segkmem_pagelock, 780 SEGKMEM_BADOP(int), /* setpgsz */ 781 segkmem_getmemid, 782 segkmem_getpolicy, /* getpolicy */ 783 segkmem_capable, /* capable */ 784 }; 785 786 int 787 segkmem_zio_create(struct seg *seg) 788 { 789 ASSERT(seg->s_as == &kas && RW_WRITE_HELD(&kas.a_lock)); 790 seg->s_ops = &segkmem_ops; 791 seg->s_data = &zvp; 792 kas.a_size += seg->s_size; 793 return (0); 794 } 795 796 int 797 segkmem_create(struct seg *seg) 798 { 799 ASSERT(seg->s_as == &kas && RW_WRITE_HELD(&kas.a_lock)); 800 seg->s_ops = &segkmem_ops; 801 seg->s_data = &kvp; 802 kas.a_size += seg->s_size; 803 return (0); 804 } 805 806 /*ARGSUSED*/ 807 page_t * 808 segkmem_page_create(void *addr, size_t size, int vmflag, void *arg) 809 { 810 struct seg kseg; 811 int pgflags; 812 struct vnode *vp = arg; 813 814 if (vp == NULL) 815 vp = &kvp; 816 817 kseg.s_as = &kas; 818 pgflags = PG_EXCL; 819 820 if (segkmem_reloc == 0 || (vmflag & VM_NORELOC)) 821 pgflags |= PG_NORELOC; 822 if ((vmflag & VM_NOSLEEP) == 0) 823 pgflags |= PG_WAIT; 824 if (vmflag & VM_PANIC) 825 pgflags |= PG_PANIC; 826 if (vmflag & VM_PUSHPAGE) 827 pgflags |= PG_PUSHPAGE; 828 829 return (page_create_va(vp, (u_offset_t)(uintptr_t)addr, size, 830 pgflags, &kseg, addr)); 831 } 832 833 /* 834 * Allocate pages to back the virtual address range [addr, addr + size). 835 * If addr is NULL, allocate the virtual address space as well. 836 */ 837 void * 838 segkmem_xalloc(vmem_t *vmp, void *inaddr, size_t size, int vmflag, uint_t attr, 839 page_t *(*page_create_func)(void *, size_t, int, void *), void *pcarg) 840 { 841 page_t *ppl; 842 caddr_t addr = inaddr; 843 pgcnt_t npages = btopr(size); 844 int allocflag; 845 846 if (inaddr == NULL && (addr = vmem_alloc(vmp, size, vmflag)) == NULL) 847 return (NULL); 848 849 ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0); 850 851 if (page_resv(npages, vmflag & VM_KMFLAGS) == 0) { 852 if (inaddr == NULL) 853 vmem_free(vmp, addr, size); 854 return (NULL); 855 } 856 857 ppl = page_create_func(addr, size, vmflag, pcarg); 858 if (ppl == NULL) { 859 if (inaddr == NULL) 860 vmem_free(vmp, addr, size); 861 page_unresv(npages); 862 return (NULL); 863 } 864 865 /* 866 * Under certain conditions, we need to let the HAT layer know 867 * that it cannot safely allocate memory. Allocations from 868 * the hat_memload vmem arena always need this, to prevent 869 * infinite recursion. 870 * 871 * In addition, the x86 hat cannot safely do memory 872 * allocations while in vmem_populate(), because there 873 * is no simple bound on its usage. 874 */ 875 if (vmflag & VM_MEMLOAD) 876 allocflag = HAT_NO_KALLOC; 877 #if defined(__x86) 878 else if (vmem_is_populator()) 879 allocflag = HAT_NO_KALLOC; 880 #endif 881 else 882 allocflag = 0; 883 884 while (ppl != NULL) { 885 page_t *pp = ppl; 886 page_sub(&ppl, pp); 887 ASSERT(page_iolock_assert(pp)); 888 ASSERT(PAGE_EXCL(pp)); 889 page_io_unlock(pp); 890 hat_memload(kas.a_hat, (caddr_t)(uintptr_t)pp->p_offset, pp, 891 (PROT_ALL & ~PROT_USER) | HAT_NOSYNC | attr, 892 HAT_LOAD_LOCK | allocflag); 893 pp->p_lckcnt = 1; 894 #if defined(__x86) 895 page_downgrade(pp); 896 #else 897 if (vmflag & SEGKMEM_SHARELOCKED) 898 page_downgrade(pp); 899 else 900 page_unlock(pp); 901 #endif 902 } 903 904 return (addr); 905 } 906 907 static void * 908 segkmem_alloc_vn(vmem_t *vmp, size_t size, int vmflag, struct vnode *vp) 909 { 910 void *addr; 911 segkmem_gc_list_t *gcp, **prev_gcpp; 912 913 ASSERT(vp != NULL); 914 915 if (kvseg.s_base == NULL) { 916 #ifndef __sparc 917 if (bootops->bsys_alloc == NULL) 918 halt("Memory allocation between bop_alloc() and " 919 "kmem_alloc().\n"); 920 #endif 921 922 /* 923 * There's not a lot of memory to go around during boot, 924 * so recycle it if we can. 925 */ 926 for (prev_gcpp = &segkmem_gc_list; (gcp = *prev_gcpp) != NULL; 927 prev_gcpp = &gcp->gc_next) { 928 if (gcp->gc_arena == vmp && gcp->gc_size == size) { 929 *prev_gcpp = gcp->gc_next; 930 return (gcp); 931 } 932 } 933 934 addr = vmem_alloc(vmp, size, vmflag | VM_PANIC); 935 if (boot_alloc(addr, size, BO_NO_ALIGN) != addr) 936 panic("segkmem_alloc: boot_alloc failed"); 937 return (addr); 938 } 939 return (segkmem_xalloc(vmp, NULL, size, vmflag, 0, 940 segkmem_page_create, vp)); 941 } 942 943 void * 944 segkmem_alloc(vmem_t *vmp, size_t size, int vmflag) 945 { 946 return (segkmem_alloc_vn(vmp, size, vmflag, &kvp)); 947 } 948 949 void * 950 segkmem_zio_alloc(vmem_t *vmp, size_t size, int vmflag) 951 { 952 return (segkmem_alloc_vn(vmp, size, vmflag, &zvp)); 953 } 954 955 /* 956 * Any changes to this routine must also be carried over to 957 * devmap_free_pages() in the seg_dev driver. This is because 958 * we currently don't have a special kernel segment for non-paged 959 * kernel memory that is exported by drivers to user space. 960 */ 961 static void 962 segkmem_free_vn(vmem_t *vmp, void *inaddr, size_t size, struct vnode *vp, 963 void (*func)(page_t *)) 964 { 965 page_t *pp; 966 caddr_t addr = inaddr; 967 caddr_t eaddr; 968 pgcnt_t npages = btopr(size); 969 970 ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0); 971 ASSERT(vp != NULL); 972 973 if (kvseg.s_base == NULL) { 974 segkmem_gc_list_t *gc = inaddr; 975 gc->gc_arena = vmp; 976 gc->gc_size = size; 977 gc->gc_next = segkmem_gc_list; 978 segkmem_gc_list = gc; 979 return; 980 } 981 982 hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK); 983 984 for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) { 985 #if defined(__x86) 986 pp = page_find(vp, (u_offset_t)(uintptr_t)addr); 987 if (pp == NULL) 988 panic("segkmem_free: page not found"); 989 if (!page_tryupgrade(pp)) { 990 /* 991 * Some other thread has a sharelock. Wait for 992 * it to drop the lock so we can free this page. 993 */ 994 page_unlock(pp); 995 pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr, 996 SE_EXCL); 997 } 998 #else 999 pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr, SE_EXCL); 1000 #endif 1001 if (pp == NULL) 1002 panic("segkmem_free: page not found"); 1003 /* Clear p_lckcnt so page_destroy() doesn't update availrmem */ 1004 pp->p_lckcnt = 0; 1005 if (func) 1006 func(pp); 1007 else 1008 page_destroy(pp, 0); 1009 } 1010 if (func == NULL) 1011 page_unresv(npages); 1012 1013 if (vmp != NULL) 1014 vmem_free(vmp, inaddr, size); 1015 1016 } 1017 1018 void 1019 segkmem_xfree(vmem_t *vmp, void *inaddr, size_t size, void (*func)(page_t *)) 1020 { 1021 segkmem_free_vn(vmp, inaddr, size, &kvp, func); 1022 } 1023 1024 void 1025 segkmem_free(vmem_t *vmp, void *inaddr, size_t size) 1026 { 1027 segkmem_free_vn(vmp, inaddr, size, &kvp, NULL); 1028 } 1029 1030 void 1031 segkmem_zio_free(vmem_t *vmp, void *inaddr, size_t size) 1032 { 1033 segkmem_free_vn(vmp, inaddr, size, &zvp, NULL); 1034 } 1035 1036 void 1037 segkmem_gc(void) 1038 { 1039 ASSERT(kvseg.s_base != NULL); 1040 while (segkmem_gc_list != NULL) { 1041 segkmem_gc_list_t *gc = segkmem_gc_list; 1042 segkmem_gc_list = gc->gc_next; 1043 segkmem_free(gc->gc_arena, gc, gc->gc_size); 1044 } 1045 } 1046 1047 /* 1048 * Legacy entry points from here to end of file. 1049 */ 1050 void 1051 segkmem_mapin(struct seg *seg, void *addr, size_t size, uint_t vprot, 1052 pfn_t pfn, uint_t flags) 1053 { 1054 hat_unload(seg->s_as->a_hat, addr, size, HAT_UNLOAD_UNLOCK); 1055 hat_devload(seg->s_as->a_hat, addr, size, pfn, vprot, 1056 flags | HAT_LOAD_LOCK); 1057 } 1058 1059 void 1060 segkmem_mapout(struct seg *seg, void *addr, size_t size) 1061 { 1062 hat_unload(seg->s_as->a_hat, addr, size, HAT_UNLOAD_UNLOCK); 1063 } 1064 1065 void * 1066 kmem_getpages(pgcnt_t npages, int kmflag) 1067 { 1068 return (kmem_alloc(ptob(npages), kmflag)); 1069 } 1070 1071 void 1072 kmem_freepages(void *addr, pgcnt_t npages) 1073 { 1074 kmem_free(addr, ptob(npages)); 1075 } 1076 1077 /* 1078 * segkmem_page_create_large() allocates a large page to be used for the kmem 1079 * caches. If kpr is enabled we ask for a relocatable page unless requested 1080 * otherwise. If kpr is disabled we have to ask for a non-reloc page 1081 */ 1082 static page_t * 1083 segkmem_page_create_large(void *addr, size_t size, int vmflag, void *arg) 1084 { 1085 int pgflags; 1086 1087 pgflags = PG_EXCL; 1088 1089 if (segkmem_reloc == 0 || (vmflag & VM_NORELOC)) 1090 pgflags |= PG_NORELOC; 1091 if (!(vmflag & VM_NOSLEEP)) 1092 pgflags |= PG_WAIT; 1093 if (vmflag & VM_PUSHPAGE) 1094 pgflags |= PG_PUSHPAGE; 1095 1096 return (page_create_va_large(&kvp, (u_offset_t)(uintptr_t)addr, size, 1097 pgflags, &kvseg, addr, arg)); 1098 } 1099 1100 /* 1101 * Allocate a large page to back the virtual address range 1102 * [addr, addr + size). If addr is NULL, allocate the virtual address 1103 * space as well. 1104 */ 1105 static void * 1106 segkmem_xalloc_lp(vmem_t *vmp, void *inaddr, size_t size, int vmflag, 1107 uint_t attr, page_t *(*page_create_func)(void *, size_t, int, void *), 1108 void *pcarg) 1109 { 1110 caddr_t addr = inaddr, pa; 1111 size_t lpsize = segkmem_lpsize; 1112 pgcnt_t npages = btopr(size); 1113 pgcnt_t nbpages = btop(lpsize); 1114 pgcnt_t nlpages = size >> segkmem_lpshift; 1115 size_t ppasize = nbpages * sizeof (page_t *); 1116 page_t *pp, *rootpp, **ppa, *pplist = NULL; 1117 int i; 1118 1119 vmflag |= VM_NOSLEEP; 1120 1121 if (page_resv(npages, vmflag & VM_KMFLAGS) == 0) { 1122 return (NULL); 1123 } 1124 1125 /* 1126 * allocate an array we need for hat_memload_array. 1127 * we use a separate arena to avoid recursion. 1128 * we will not need this array when hat_memload_array learns pp++ 1129 */ 1130 if ((ppa = vmem_alloc(segkmem_ppa_arena, ppasize, vmflag)) == NULL) { 1131 goto fail_array_alloc; 1132 } 1133 1134 if (inaddr == NULL && (addr = vmem_alloc(vmp, size, vmflag)) == NULL) 1135 goto fail_vmem_alloc; 1136 1137 ASSERT(((uintptr_t)addr & (lpsize - 1)) == 0); 1138 1139 /* create all the pages */ 1140 for (pa = addr, i = 0; i < nlpages; i++, pa += lpsize) { 1141 if ((pp = page_create_func(pa, lpsize, vmflag, pcarg)) == NULL) 1142 goto fail_page_create; 1143 page_list_concat(&pplist, &pp); 1144 } 1145 1146 /* at this point we have all the resource to complete the request */ 1147 while ((rootpp = pplist) != NULL) { 1148 for (i = 0; i < nbpages; i++) { 1149 ASSERT(pplist != NULL); 1150 pp = pplist; 1151 page_sub(&pplist, pp); 1152 ASSERT(page_iolock_assert(pp)); 1153 page_io_unlock(pp); 1154 ppa[i] = pp; 1155 } 1156 /* 1157 * Load the locked entry. It's OK to preload the entry into the 1158 * TSB since we now support large mappings in the kernel TSB. 1159 */ 1160 hat_memload_array(kas.a_hat, 1161 (caddr_t)(uintptr_t)rootpp->p_offset, lpsize, 1162 ppa, (PROT_ALL & ~PROT_USER) | HAT_NOSYNC | attr, 1163 HAT_LOAD_LOCK); 1164 1165 for (--i; i >= 0; --i) { 1166 ppa[i]->p_lckcnt = 1; 1167 page_unlock(ppa[i]); 1168 } 1169 } 1170 1171 vmem_free(segkmem_ppa_arena, ppa, ppasize); 1172 return (addr); 1173 1174 fail_page_create: 1175 while ((rootpp = pplist) != NULL) { 1176 for (i = 0, pp = pplist; i < nbpages; i++, pp = pplist) { 1177 ASSERT(pp != NULL); 1178 page_sub(&pplist, pp); 1179 ASSERT(page_iolock_assert(pp)); 1180 page_io_unlock(pp); 1181 } 1182 page_destroy_pages(rootpp); 1183 } 1184 1185 if (inaddr == NULL) 1186 vmem_free(vmp, addr, size); 1187 1188 fail_vmem_alloc: 1189 vmem_free(segkmem_ppa_arena, ppa, ppasize); 1190 1191 fail_array_alloc: 1192 page_unresv(npages); 1193 1194 return (NULL); 1195 } 1196 1197 static void 1198 segkmem_free_one_lp(caddr_t addr, size_t size) 1199 { 1200 page_t *pp, *rootpp = NULL; 1201 pgcnt_t pgs_left = btopr(size); 1202 1203 ASSERT(size == segkmem_lpsize); 1204 1205 hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK); 1206 1207 for (; pgs_left > 0; addr += PAGESIZE, pgs_left--) { 1208 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)addr, SE_EXCL); 1209 if (pp == NULL) 1210 panic("segkmem_free_one_lp: page not found"); 1211 ASSERT(PAGE_EXCL(pp)); 1212 pp->p_lckcnt = 0; 1213 if (rootpp == NULL) 1214 rootpp = pp; 1215 } 1216 ASSERT(rootpp != NULL); 1217 page_destroy_pages(rootpp); 1218 1219 /* page_unresv() is done by the caller */ 1220 } 1221 1222 /* 1223 * This function is called to import new spans into the vmem arenas like 1224 * kmem_default_arena and kmem_oversize_arena. It first tries to import 1225 * spans from large page arena - kmem_lp_arena. In order to do this it might 1226 * have to "upgrade the requested size" to kmem_lp_arena quantum. If 1227 * it was not able to satisfy the upgraded request it then calls regular 1228 * segkmem_alloc() that satisfies the request by importing from "*vmp" arena 1229 */ 1230 /*ARGSUSED*/ 1231 void * 1232 segkmem_alloc_lp(vmem_t *vmp, size_t *sizep, size_t align, int vmflag) 1233 { 1234 size_t size; 1235 kthread_t *t = curthread; 1236 segkmem_lpcb_t *lpcb = &segkmem_lpcb; 1237 1238 ASSERT(sizep != NULL); 1239 1240 size = *sizep; 1241 1242 if (lpcb->lp_uselp && !(t->t_flag & T_PANIC) && 1243 !(vmflag & SEGKMEM_SHARELOCKED)) { 1244 1245 size_t kmemlp_qnt = segkmem_kmemlp_quantum; 1246 size_t asize = P2ROUNDUP(size, kmemlp_qnt); 1247 void *addr = NULL; 1248 ulong_t *lpthrtp = &lpcb->lp_throttle; 1249 ulong_t lpthrt = *lpthrtp; 1250 int dowakeup = 0; 1251 int doalloc = 1; 1252 1253 ASSERT(kmem_lp_arena != NULL); 1254 ASSERT(asize >= size); 1255 1256 if (lpthrt != 0) { 1257 /* try to update the throttle value */ 1258 lpthrt = atomic_add_long_nv(lpthrtp, 1); 1259 if (lpthrt >= segkmem_lpthrottle_max) { 1260 lpthrt = atomic_cas_ulong(lpthrtp, lpthrt, 1261 segkmem_lpthrottle_max / 4); 1262 } 1263 1264 /* 1265 * when we get above throttle start do an exponential 1266 * backoff at trying large pages and reaping 1267 */ 1268 if (lpthrt > segkmem_lpthrottle_start && 1269 (lpthrt & (lpthrt - 1))) { 1270 lpcb->allocs_throttled++; 1271 lpthrt--; 1272 if ((lpthrt & (lpthrt - 1)) == 0) 1273 kmem_reap(); 1274 return (segkmem_alloc(vmp, size, vmflag)); 1275 } 1276 } 1277 1278 if (!(vmflag & VM_NOSLEEP) && 1279 segkmem_heaplp_quantum >= (8 * kmemlp_qnt) && 1280 vmem_size(kmem_lp_arena, VMEM_FREE) <= kmemlp_qnt && 1281 asize < (segkmem_heaplp_quantum - kmemlp_qnt)) { 1282 1283 /* 1284 * we are low on free memory in kmem_lp_arena 1285 * we let only one guy to allocate heap_lp 1286 * quantum size chunk that everybody is going to 1287 * share 1288 */ 1289 mutex_enter(&lpcb->lp_lock); 1290 1291 if (lpcb->lp_wait) { 1292 1293 /* we are not the first one - wait */ 1294 cv_wait(&lpcb->lp_cv, &lpcb->lp_lock); 1295 if (vmem_size(kmem_lp_arena, VMEM_FREE) < 1296 kmemlp_qnt) { 1297 doalloc = 0; 1298 } 1299 } else if (vmem_size(kmem_lp_arena, VMEM_FREE) <= 1300 kmemlp_qnt) { 1301 1302 /* 1303 * we are the first one, make sure we import 1304 * a large page 1305 */ 1306 if (asize == kmemlp_qnt) 1307 asize += kmemlp_qnt; 1308 dowakeup = 1; 1309 lpcb->lp_wait = 1; 1310 } 1311 1312 mutex_exit(&lpcb->lp_lock); 1313 } 1314 1315 /* 1316 * VM_ABORT flag prevents sleeps in vmem_xalloc when 1317 * large pages are not available. In that case this allocation 1318 * attempt will fail and we will retry allocation with small 1319 * pages. We also do not want to panic if this allocation fails 1320 * because we are going to retry. 1321 */ 1322 if (doalloc) { 1323 addr = vmem_alloc(kmem_lp_arena, asize, 1324 (vmflag | VM_ABORT) & ~VM_PANIC); 1325 1326 if (dowakeup) { 1327 mutex_enter(&lpcb->lp_lock); 1328 ASSERT(lpcb->lp_wait != 0); 1329 lpcb->lp_wait = 0; 1330 cv_broadcast(&lpcb->lp_cv); 1331 mutex_exit(&lpcb->lp_lock); 1332 } 1333 } 1334 1335 if (addr != NULL) { 1336 *sizep = asize; 1337 *lpthrtp = 0; 1338 return (addr); 1339 } 1340 1341 if (vmflag & VM_NOSLEEP) 1342 lpcb->nosleep_allocs_failed++; 1343 else 1344 lpcb->sleep_allocs_failed++; 1345 lpcb->alloc_bytes_failed += size; 1346 1347 /* if large page throttling is not started yet do it */ 1348 if (segkmem_use_lpthrottle && lpthrt == 0) { 1349 lpthrt = atomic_cas_ulong(lpthrtp, lpthrt, 1); 1350 } 1351 } 1352 return (segkmem_alloc(vmp, size, vmflag)); 1353 } 1354 1355 void 1356 segkmem_free_lp(vmem_t *vmp, void *inaddr, size_t size) 1357 { 1358 if (kmem_lp_arena == NULL || !IS_KMEM_VA_LARGEPAGE((caddr_t)inaddr)) { 1359 segkmem_free(vmp, inaddr, size); 1360 } else { 1361 vmem_free(kmem_lp_arena, inaddr, size); 1362 } 1363 } 1364 1365 /* 1366 * segkmem_alloc_lpi() imports virtual memory from large page heap arena 1367 * into kmem_lp arena. In the process it maps the imported segment with 1368 * large pages 1369 */ 1370 static void * 1371 segkmem_alloc_lpi(vmem_t *vmp, size_t size, int vmflag) 1372 { 1373 segkmem_lpcb_t *lpcb = &segkmem_lpcb; 1374 void *addr; 1375 1376 ASSERT(size != 0); 1377 ASSERT(vmp == heap_lp_arena); 1378 1379 /* do not allow large page heap grow beyound limits */ 1380 if (vmem_size(vmp, VMEM_ALLOC) >= segkmem_kmemlp_max) { 1381 lpcb->allocs_limited++; 1382 return (NULL); 1383 } 1384 1385 addr = segkmem_xalloc_lp(vmp, NULL, size, vmflag, 0, 1386 segkmem_page_create_large, NULL); 1387 return (addr); 1388 } 1389 1390 /* 1391 * segkmem_free_lpi() returns virtual memory back into large page heap arena 1392 * from kmem_lp arena. Beore doing this it unmaps the segment and frees 1393 * large pages used to map it. 1394 */ 1395 static void 1396 segkmem_free_lpi(vmem_t *vmp, void *inaddr, size_t size) 1397 { 1398 pgcnt_t nlpages = size >> segkmem_lpshift; 1399 size_t lpsize = segkmem_lpsize; 1400 caddr_t addr = inaddr; 1401 pgcnt_t npages = btopr(size); 1402 int i; 1403 1404 ASSERT(vmp == heap_lp_arena); 1405 ASSERT(IS_KMEM_VA_LARGEPAGE(addr)); 1406 ASSERT(((uintptr_t)inaddr & (lpsize - 1)) == 0); 1407 1408 for (i = 0; i < nlpages; i++) { 1409 segkmem_free_one_lp(addr, lpsize); 1410 addr += lpsize; 1411 } 1412 1413 page_unresv(npages); 1414 1415 vmem_free(vmp, inaddr, size); 1416 } 1417 1418 /* 1419 * This function is called at system boot time by kmem_init right after 1420 * /etc/system file has been read. It checks based on hardware configuration 1421 * and /etc/system settings if system is going to use large pages. The 1422 * initialiazation necessary to actually start using large pages 1423 * happens later in the process after segkmem_heap_lp_init() is called. 1424 */ 1425 int 1426 segkmem_lpsetup() 1427 { 1428 int use_large_pages = 0; 1429 1430 #ifdef __sparc 1431 1432 size_t memtotal = physmem * PAGESIZE; 1433 1434 if (heap_lp_base == NULL) { 1435 segkmem_lpsize = PAGESIZE; 1436 return (0); 1437 } 1438 1439 /* get a platform dependent value of large page size for kernel heap */ 1440 segkmem_lpsize = get_segkmem_lpsize(segkmem_lpsize); 1441 1442 if (segkmem_lpsize <= PAGESIZE) { 1443 /* 1444 * put virtual space reserved for the large page kernel 1445 * back to the regular heap 1446 */ 1447 vmem_xfree(heap_arena, heap_lp_base, 1448 heap_lp_end - heap_lp_base); 1449 heap_lp_base = NULL; 1450 heap_lp_end = NULL; 1451 segkmem_lpsize = PAGESIZE; 1452 return (0); 1453 } 1454 1455 /* set heap_lp quantum if necessary */ 1456 if (segkmem_heaplp_quantum == 0 || 1457 (segkmem_heaplp_quantum & (segkmem_heaplp_quantum - 1)) || 1458 P2PHASE(segkmem_heaplp_quantum, segkmem_lpsize)) { 1459 segkmem_heaplp_quantum = segkmem_lpsize; 1460 } 1461 1462 /* set kmem_lp quantum if necessary */ 1463 if (segkmem_kmemlp_quantum == 0 || 1464 (segkmem_kmemlp_quantum & (segkmem_kmemlp_quantum - 1)) || 1465 segkmem_kmemlp_quantum > segkmem_heaplp_quantum) { 1466 segkmem_kmemlp_quantum = segkmem_heaplp_quantum; 1467 } 1468 1469 /* set total amount of memory allowed for large page kernel heap */ 1470 if (segkmem_kmemlp_max == 0) { 1471 if (segkmem_kmemlp_pcnt == 0 || segkmem_kmemlp_pcnt > 100) 1472 segkmem_kmemlp_pcnt = 12; 1473 segkmem_kmemlp_max = (memtotal * segkmem_kmemlp_pcnt) / 100; 1474 } 1475 segkmem_kmemlp_max = P2ROUNDUP(segkmem_kmemlp_max, 1476 segkmem_heaplp_quantum); 1477 1478 /* fix lp kmem preallocation request if necesssary */ 1479 if (segkmem_kmemlp_min) { 1480 segkmem_kmemlp_min = P2ROUNDUP(segkmem_kmemlp_min, 1481 segkmem_heaplp_quantum); 1482 if (segkmem_kmemlp_min > segkmem_kmemlp_max) 1483 segkmem_kmemlp_min = segkmem_kmemlp_max; 1484 } 1485 1486 use_large_pages = 1; 1487 segkmem_lpszc = page_szc(segkmem_lpsize); 1488 segkmem_lpshift = page_get_shift(segkmem_lpszc); 1489 1490 #endif 1491 return (use_large_pages); 1492 } 1493 1494 void 1495 segkmem_zio_init(void *zio_mem_base, size_t zio_mem_size) 1496 { 1497 ASSERT(zio_mem_base != NULL); 1498 ASSERT(zio_mem_size != 0); 1499 1500 zio_arena = vmem_create("zio", zio_mem_base, zio_mem_size, PAGESIZE, 1501 NULL, NULL, NULL, 0, VM_SLEEP); 1502 1503 zio_alloc_arena = vmem_create("zio_buf", NULL, 0, PAGESIZE, 1504 segkmem_zio_alloc, segkmem_zio_free, zio_arena, 0, VM_SLEEP); 1505 1506 ASSERT(zio_arena != NULL); 1507 ASSERT(zio_alloc_arena != NULL); 1508 } 1509 1510 #ifdef __sparc 1511 1512 1513 static void * 1514 segkmem_alloc_ppa(vmem_t *vmp, size_t size, int vmflag) 1515 { 1516 size_t ppaquantum = btopr(segkmem_lpsize) * sizeof (page_t *); 1517 void *addr; 1518 1519 if (ppaquantum <= PAGESIZE) 1520 return (segkmem_alloc(vmp, size, vmflag)); 1521 1522 ASSERT((size & (ppaquantum - 1)) == 0); 1523 1524 addr = vmem_xalloc(vmp, size, ppaquantum, 0, 0, NULL, NULL, vmflag); 1525 if (addr != NULL && segkmem_xalloc(vmp, addr, size, vmflag, 0, 1526 segkmem_page_create, NULL) == NULL) { 1527 vmem_xfree(vmp, addr, size); 1528 addr = NULL; 1529 } 1530 1531 return (addr); 1532 } 1533 1534 static void 1535 segkmem_free_ppa(vmem_t *vmp, void *addr, size_t size) 1536 { 1537 size_t ppaquantum = btopr(segkmem_lpsize) * sizeof (page_t *); 1538 1539 ASSERT(addr != NULL); 1540 1541 if (ppaquantum <= PAGESIZE) { 1542 segkmem_free(vmp, addr, size); 1543 } else { 1544 segkmem_free(NULL, addr, size); 1545 vmem_xfree(vmp, addr, size); 1546 } 1547 } 1548 1549 void 1550 segkmem_heap_lp_init() 1551 { 1552 segkmem_lpcb_t *lpcb = &segkmem_lpcb; 1553 size_t heap_lp_size = heap_lp_end - heap_lp_base; 1554 size_t lpsize = segkmem_lpsize; 1555 size_t ppaquantum; 1556 void *addr; 1557 1558 if (segkmem_lpsize <= PAGESIZE) { 1559 ASSERT(heap_lp_base == NULL); 1560 ASSERT(heap_lp_end == NULL); 1561 return; 1562 } 1563 1564 ASSERT(segkmem_heaplp_quantum >= lpsize); 1565 ASSERT((segkmem_heaplp_quantum & (lpsize - 1)) == 0); 1566 ASSERT(lpcb->lp_uselp == 0); 1567 ASSERT(heap_lp_base != NULL); 1568 ASSERT(heap_lp_end != NULL); 1569 ASSERT(heap_lp_base < heap_lp_end); 1570 ASSERT(heap_lp_arena == NULL); 1571 ASSERT(((uintptr_t)heap_lp_base & (lpsize - 1)) == 0); 1572 ASSERT(((uintptr_t)heap_lp_end & (lpsize - 1)) == 0); 1573 1574 /* create large page heap arena */ 1575 heap_lp_arena = vmem_create("heap_lp", heap_lp_base, heap_lp_size, 1576 segkmem_heaplp_quantum, NULL, NULL, NULL, 0, VM_SLEEP); 1577 1578 ASSERT(heap_lp_arena != NULL); 1579 1580 /* This arena caches memory already mapped by large pages */ 1581 kmem_lp_arena = vmem_create("kmem_lp", NULL, 0, segkmem_kmemlp_quantum, 1582 segkmem_alloc_lpi, segkmem_free_lpi, heap_lp_arena, 0, VM_SLEEP); 1583 1584 ASSERT(kmem_lp_arena != NULL); 1585 1586 mutex_init(&lpcb->lp_lock, NULL, MUTEX_DEFAULT, NULL); 1587 cv_init(&lpcb->lp_cv, NULL, CV_DEFAULT, NULL); 1588 1589 /* 1590 * this arena is used for the array of page_t pointers necessary 1591 * to call hat_mem_load_array 1592 */ 1593 ppaquantum = btopr(lpsize) * sizeof (page_t *); 1594 segkmem_ppa_arena = vmem_create("segkmem_ppa", NULL, 0, ppaquantum, 1595 segkmem_alloc_ppa, segkmem_free_ppa, heap_arena, ppaquantum, 1596 VM_SLEEP); 1597 1598 ASSERT(segkmem_ppa_arena != NULL); 1599 1600 /* prealloacate some memory for the lp kernel heap */ 1601 if (segkmem_kmemlp_min) { 1602 1603 ASSERT(P2PHASE(segkmem_kmemlp_min, 1604 segkmem_heaplp_quantum) == 0); 1605 1606 if ((addr = segkmem_alloc_lpi(heap_lp_arena, 1607 segkmem_kmemlp_min, VM_SLEEP)) != NULL) { 1608 1609 addr = vmem_add(kmem_lp_arena, addr, 1610 segkmem_kmemlp_min, VM_SLEEP); 1611 ASSERT(addr != NULL); 1612 } 1613 } 1614 1615 lpcb->lp_uselp = 1; 1616 } 1617 1618 #endif 1619