1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/types.h> 27 #include <sys/t_lock.h> 28 #include <sys/param.h> 29 #include <sys/sysmacros.h> 30 #include <sys/tuneable.h> 31 #include <sys/systm.h> 32 #include <sys/vm.h> 33 #include <sys/kmem.h> 34 #include <sys/vmem.h> 35 #include <sys/mman.h> 36 #include <sys/cmn_err.h> 37 #include <sys/debug.h> 38 #include <sys/dumphdr.h> 39 #include <sys/bootconf.h> 40 #include <sys/lgrp.h> 41 #include <vm/seg_kmem.h> 42 #include <vm/hat.h> 43 #include <vm/page.h> 44 #include <vm/vm_dep.h> 45 #include <vm/faultcode.h> 46 #include <sys/promif.h> 47 #include <vm/seg_kp.h> 48 #include <sys/bitmap.h> 49 #include <sys/mem_cage.h> 50 #include <sys/ivintr.h> 51 #include <sys/panic.h> 52 53 /* 54 * seg_kmem is the primary kernel memory segment driver. It 55 * maps the kernel heap [kernelheap, ekernelheap), module text, 56 * and all memory which was allocated before the VM was initialized 57 * into kas. 58 * 59 * Pages which belong to seg_kmem are hashed into &kvp vnode at 60 * an offset equal to (u_offset_t)virt_addr, and have p_lckcnt >= 1. 61 * They must never be paged out since segkmem_fault() is a no-op to 62 * prevent recursive faults. 63 * 64 * Currently, seg_kmem pages are sharelocked (p_sharelock == 1) on 65 * __x86 and are unlocked (p_sharelock == 0) on __sparc. Once __x86 66 * supports relocation the #ifdef kludges can be removed. 67 * 68 * seg_kmem pages may be subject to relocation by page_relocate(), 69 * provided that the HAT supports it; if this is so, segkmem_reloc 70 * will be set to a nonzero value. All boot time allocated memory as 71 * well as static memory is considered off limits to relocation. 72 * Pages are "relocatable" if p_state does not have P_NORELOC set, so 73 * we request P_NORELOC pages for memory that isn't safe to relocate. 74 * 75 * The kernel heap is logically divided up into four pieces: 76 * 77 * heap32_arena is for allocations that require 32-bit absolute 78 * virtual addresses (e.g. code that uses 32-bit pointers/offsets). 79 * 80 * heap_core is for allocations that require 2GB *relative* 81 * offsets; in other words all memory from heap_core is within 82 * 2GB of all other memory from the same arena. This is a requirement 83 * of the addressing modes of some processors in supervisor code. 84 * 85 * heap_arena is the general heap arena. 86 * 87 * static_arena is the static memory arena. Allocations from it 88 * are not subject to relocation so it is safe to use the memory 89 * physical address as well as the virtual address (e.g. the VA to 90 * PA translations are static). Caches may import from static_arena; 91 * all other static memory allocations should use static_alloc_arena. 92 * 93 * On some platforms which have limited virtual address space, seg_kmem 94 * may share [kernelheap, ekernelheap) with seg_kp; if this is so, 95 * segkp_bitmap is non-NULL, and each bit represents a page of virtual 96 * address space which is actually seg_kp mapped. 97 */ 98 99 extern ulong_t *segkp_bitmap; /* Is set if segkp is from the kernel heap */ 100 101 char *kernelheap; /* start of primary kernel heap */ 102 char *ekernelheap; /* end of primary kernel heap */ 103 struct seg kvseg; /* primary kernel heap segment */ 104 struct seg kvseg_core; /* "core" kernel heap segment */ 105 struct seg kzioseg; /* Segment for zio mappings */ 106 vmem_t *heap_arena; /* primary kernel heap arena */ 107 vmem_t *heap_core_arena; /* core kernel heap arena */ 108 char *heap_core_base; /* start of core kernel heap arena */ 109 char *heap_lp_base; /* start of kernel large page heap arena */ 110 char *heap_lp_end; /* end of kernel large page heap arena */ 111 vmem_t *hat_memload_arena; /* HAT translation data */ 112 struct seg kvseg32; /* 32-bit kernel heap segment */ 113 vmem_t *heap32_arena; /* 32-bit kernel heap arena */ 114 vmem_t *heaptext_arena; /* heaptext arena */ 115 struct as kas; /* kernel address space */ 116 int segkmem_reloc; /* enable/disable relocatable segkmem pages */ 117 vmem_t *static_arena; /* arena for caches to import static memory */ 118 vmem_t *static_alloc_arena; /* arena for allocating static memory */ 119 vmem_t *zio_arena = NULL; /* arena for allocating zio memory */ 120 vmem_t *zio_alloc_arena = NULL; /* arena for allocating zio memory */ 121 122 /* 123 * seg_kmem driver can map part of the kernel heap with large pages. 124 * Currently this functionality is implemented for sparc platforms only. 125 * 126 * The large page size "segkmem_lpsize" for kernel heap is selected in the 127 * platform specific code. It can also be modified via /etc/system file. 128 * Setting segkmem_lpsize to PAGESIZE in /etc/system disables usage of large 129 * pages for kernel heap. "segkmem_lpshift" is adjusted appropriately to 130 * match segkmem_lpsize. 131 * 132 * At boot time we carve from kernel heap arena a range of virtual addresses 133 * that will be used for large page mappings. This range [heap_lp_base, 134 * heap_lp_end) is set up as a separate vmem arena - "heap_lp_arena". We also 135 * create "kmem_lp_arena" that caches memory already backed up by large 136 * pages. kmem_lp_arena imports virtual segments from heap_lp_arena. 137 */ 138 139 size_t segkmem_lpsize; 140 static uint_t segkmem_lpshift = PAGESHIFT; 141 int segkmem_lpszc = 0; 142 143 size_t segkmem_kmemlp_quantum = 0x400000; /* 4MB */ 144 size_t segkmem_heaplp_quantum; 145 vmem_t *heap_lp_arena; 146 static vmem_t *kmem_lp_arena; 147 static vmem_t *segkmem_ppa_arena; 148 static segkmem_lpcb_t segkmem_lpcb; 149 150 /* 151 * We use "segkmem_kmemlp_max" to limit the total amount of physical memory 152 * consumed by the large page heap. By default this parameter is set to 1/8 of 153 * physmem but can be adjusted through /etc/system either directly or 154 * indirectly by setting "segkmem_kmemlp_pcnt" to the percent of physmem 155 * we allow for large page heap. 156 */ 157 size_t segkmem_kmemlp_max; 158 static uint_t segkmem_kmemlp_pcnt; 159 160 /* 161 * Getting large pages for kernel heap could be problematic due to 162 * physical memory fragmentation. That's why we allow to preallocate 163 * "segkmem_kmemlp_min" bytes at boot time. 164 */ 165 static size_t segkmem_kmemlp_min; 166 167 /* 168 * Throttling is used to avoid expensive tries to allocate large pages 169 * for kernel heap when a lot of succesive attempts to do so fail. 170 */ 171 static ulong_t segkmem_lpthrottle_max = 0x400000; 172 static ulong_t segkmem_lpthrottle_start = 0x40; 173 static ulong_t segkmem_use_lpthrottle = 1; 174 175 /* 176 * Freed pages accumulate on a garbage list until segkmem is ready, 177 * at which point we call segkmem_gc() to free it all. 178 */ 179 typedef struct segkmem_gc_list { 180 struct segkmem_gc_list *gc_next; 181 vmem_t *gc_arena; 182 size_t gc_size; 183 } segkmem_gc_list_t; 184 185 static segkmem_gc_list_t *segkmem_gc_list; 186 187 /* 188 * Allocations from the hat_memload arena add VM_MEMLOAD to their 189 * vmflags so that segkmem_xalloc() can inform the hat layer that it needs 190 * to take steps to prevent infinite recursion. HAT allocations also 191 * must be non-relocatable to prevent recursive page faults. 192 */ 193 static void * 194 hat_memload_alloc(vmem_t *vmp, size_t size, int flags) 195 { 196 flags |= (VM_MEMLOAD | VM_NORELOC); 197 return (segkmem_alloc(vmp, size, flags)); 198 } 199 200 /* 201 * Allocations from static_arena arena (or any other arena that uses 202 * segkmem_alloc_permanent()) require non-relocatable (permanently 203 * wired) memory pages, since these pages are referenced by physical 204 * as well as virtual address. 205 */ 206 void * 207 segkmem_alloc_permanent(vmem_t *vmp, size_t size, int flags) 208 { 209 return (segkmem_alloc(vmp, size, flags | VM_NORELOC)); 210 } 211 212 /* 213 * Initialize kernel heap boundaries. 214 */ 215 void 216 kernelheap_init( 217 void *heap_start, 218 void *heap_end, 219 char *first_avail, 220 void *core_start, 221 void *core_end) 222 { 223 uintptr_t textbase; 224 size_t core_size; 225 size_t heap_size; 226 vmem_t *heaptext_parent; 227 size_t heap_lp_size = 0; 228 #ifdef __sparc 229 size_t kmem64_sz = kmem64_aligned_end - kmem64_base; 230 #endif /* __sparc */ 231 232 kernelheap = heap_start; 233 ekernelheap = heap_end; 234 235 #ifdef __sparc 236 heap_lp_size = (((uintptr_t)heap_end - (uintptr_t)heap_start) / 4); 237 /* 238 * Bias heap_lp start address by kmem64_sz to reduce collisions 239 * in 4M kernel TSB between kmem64 area and heap_lp 240 */ 241 kmem64_sz = P2ROUNDUP(kmem64_sz, MMU_PAGESIZE256M); 242 if (kmem64_sz <= heap_lp_size / 2) 243 heap_lp_size -= kmem64_sz; 244 heap_lp_base = ekernelheap - heap_lp_size; 245 heap_lp_end = heap_lp_base + heap_lp_size; 246 #endif /* __sparc */ 247 248 /* 249 * If this platform has a 'core' heap area, then the space for 250 * overflow module text should be carved out of the end of that 251 * heap. Otherwise, it gets carved out of the general purpose 252 * heap. 253 */ 254 core_size = (uintptr_t)core_end - (uintptr_t)core_start; 255 if (core_size > 0) { 256 ASSERT(core_size >= HEAPTEXT_SIZE); 257 textbase = (uintptr_t)core_end - HEAPTEXT_SIZE; 258 core_size -= HEAPTEXT_SIZE; 259 } 260 #ifndef __sparc 261 else { 262 ekernelheap -= HEAPTEXT_SIZE; 263 textbase = (uintptr_t)ekernelheap; 264 } 265 #endif 266 267 heap_size = (uintptr_t)ekernelheap - (uintptr_t)kernelheap; 268 heap_arena = vmem_init("heap", kernelheap, heap_size, PAGESIZE, 269 segkmem_alloc, segkmem_free); 270 271 if (core_size > 0) { 272 heap_core_arena = vmem_create("heap_core", core_start, 273 core_size, PAGESIZE, NULL, NULL, NULL, 0, VM_SLEEP); 274 heap_core_base = core_start; 275 } else { 276 heap_core_arena = heap_arena; 277 heap_core_base = kernelheap; 278 } 279 280 /* 281 * reserve space for the large page heap. If large pages for kernel 282 * heap is enabled large page heap arean will be created later in the 283 * boot sequence in segkmem_heap_lp_init(). Otherwise the allocated 284 * range will be returned back to the heap_arena. 285 */ 286 if (heap_lp_size) { 287 (void) vmem_xalloc(heap_arena, heap_lp_size, PAGESIZE, 0, 0, 288 heap_lp_base, heap_lp_end, 289 VM_NOSLEEP | VM_BESTFIT | VM_PANIC); 290 } 291 292 /* 293 * Remove the already-spoken-for memory range [kernelheap, first_avail). 294 */ 295 (void) vmem_xalloc(heap_arena, first_avail - kernelheap, PAGESIZE, 296 0, 0, kernelheap, first_avail, VM_NOSLEEP | VM_BESTFIT | VM_PANIC); 297 298 #ifdef __sparc 299 heap32_arena = vmem_create("heap32", (void *)SYSBASE32, 300 SYSLIMIT32 - SYSBASE32 - HEAPTEXT_SIZE, PAGESIZE, NULL, 301 NULL, NULL, 0, VM_SLEEP); 302 303 /* 304 * Prom claims the physical and virtual resources used by panicbuf 305 * and inter_vec_table. So reserve space for panicbuf, intr_vec_table, 306 * reserved interrrupt vector data structures from the 32-bit heap. 307 */ 308 (void) vmem_xalloc(heap32_arena, PANICBUFSIZE, PAGESIZE, 0, 0, 309 panicbuf, panicbuf + PANICBUFSIZE, 310 VM_NOSLEEP | VM_BESTFIT | VM_PANIC); 311 312 (void) vmem_xalloc(heap32_arena, IVSIZE, PAGESIZE, 0, 0, 313 intr_vec_table, (caddr_t)intr_vec_table + IVSIZE, 314 VM_NOSLEEP | VM_BESTFIT | VM_PANIC); 315 316 textbase = SYSLIMIT32 - HEAPTEXT_SIZE; 317 heaptext_parent = NULL; 318 #else /* __sparc */ 319 heap32_arena = heap_core_arena; 320 heaptext_parent = heap_core_arena; 321 #endif /* __sparc */ 322 323 heaptext_arena = vmem_create("heaptext", (void *)textbase, 324 HEAPTEXT_SIZE, PAGESIZE, NULL, NULL, heaptext_parent, 0, VM_SLEEP); 325 326 /* 327 * Create a set of arenas for memory with static translations 328 * (e.g. VA -> PA translations cannot change). Since using 329 * kernel pages by physical address implies it isn't safe to 330 * walk across page boundaries, the static_arena quantum must 331 * be PAGESIZE. Any kmem caches that require static memory 332 * should source from static_arena, while direct allocations 333 * should only use static_alloc_arena. 334 */ 335 static_arena = vmem_create("static", NULL, 0, PAGESIZE, 336 segkmem_alloc_permanent, segkmem_free, heap_arena, 0, VM_SLEEP); 337 static_alloc_arena = vmem_create("static_alloc", NULL, 0, 338 sizeof (uint64_t), vmem_alloc, vmem_free, static_arena, 339 0, VM_SLEEP); 340 341 /* 342 * Create an arena for translation data (ptes, hmes, or hblks). 343 * We need an arena for this because hat_memload() is essential 344 * to vmem_populate() (see comments in common/os/vmem.c). 345 * 346 * Note: any kmem cache that allocates from hat_memload_arena 347 * must be created as a KMC_NOHASH cache (i.e. no external slab 348 * and bufctl structures to allocate) so that slab creation doesn't 349 * require anything more than a single vmem_alloc(). 350 */ 351 hat_memload_arena = vmem_create("hat_memload", NULL, 0, PAGESIZE, 352 hat_memload_alloc, segkmem_free, heap_arena, 0, 353 VM_SLEEP | VMC_POPULATOR | VMC_DUMPSAFE); 354 } 355 356 void 357 boot_mapin(caddr_t addr, size_t size) 358 { 359 caddr_t eaddr; 360 page_t *pp; 361 pfn_t pfnum; 362 363 if (page_resv(btop(size), KM_NOSLEEP) == 0) 364 panic("boot_mapin: page_resv failed"); 365 366 for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) { 367 pfnum = va_to_pfn(addr); 368 if (pfnum == PFN_INVALID) 369 continue; 370 if ((pp = page_numtopp_nolock(pfnum)) == NULL) 371 panic("boot_mapin(): No pp for pfnum = %lx", pfnum); 372 373 /* 374 * must break up any large pages that may have constituent 375 * pages being utilized for BOP_ALLOC()'s before calling 376 * page_numtopp().The locking code (ie. page_reclaim()) 377 * can't handle them 378 */ 379 if (pp->p_szc != 0) 380 page_boot_demote(pp); 381 382 pp = page_numtopp(pfnum, SE_EXCL); 383 if (pp == NULL || PP_ISFREE(pp)) 384 panic("boot_alloc: pp is NULL or free"); 385 386 /* 387 * If the cage is on but doesn't yet contain this page, 388 * mark it as non-relocatable. 389 */ 390 if (kcage_on && !PP_ISNORELOC(pp)) { 391 PP_SETNORELOC(pp); 392 PLCNT_XFER_NORELOC(pp); 393 } 394 395 (void) page_hashin(pp, &kvp, (u_offset_t)(uintptr_t)addr, NULL); 396 pp->p_lckcnt = 1; 397 #if defined(__x86) 398 page_downgrade(pp); 399 #else 400 page_unlock(pp); 401 #endif 402 } 403 } 404 405 /* 406 * Get pages from boot and hash them into the kernel's vp. 407 * Used after page structs have been allocated, but before segkmem is ready. 408 */ 409 void * 410 boot_alloc(void *inaddr, size_t size, uint_t align) 411 { 412 caddr_t addr = inaddr; 413 414 if (bootops == NULL) 415 prom_panic("boot_alloc: attempt to allocate memory after " 416 "BOP_GONE"); 417 418 size = ptob(btopr(size)); 419 #ifdef __sparc 420 if (bop_alloc_chunk(addr, size, align) != (caddr_t)addr) 421 panic("boot_alloc: bop_alloc_chunk failed"); 422 #else 423 if (BOP_ALLOC(bootops, addr, size, align) != addr) 424 panic("boot_alloc: BOP_ALLOC failed"); 425 #endif 426 boot_mapin((caddr_t)addr, size); 427 return (addr); 428 } 429 430 static void 431 segkmem_badop() 432 { 433 panic("segkmem_badop"); 434 } 435 436 #define SEGKMEM_BADOP(t) (t(*)())segkmem_badop 437 438 /*ARGSUSED*/ 439 static faultcode_t 440 segkmem_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t size, 441 enum fault_type type, enum seg_rw rw) 442 { 443 pgcnt_t npages; 444 spgcnt_t pg; 445 page_t *pp; 446 struct vnode *vp = seg->s_data; 447 448 ASSERT(RW_READ_HELD(&seg->s_as->a_lock)); 449 450 if (seg->s_as != &kas || size > seg->s_size || 451 addr < seg->s_base || addr + size > seg->s_base + seg->s_size) 452 panic("segkmem_fault: bad args"); 453 454 /* 455 * If it is one of segkp pages, call segkp_fault. 456 */ 457 if (segkp_bitmap && seg == &kvseg && 458 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base)))) 459 return (SEGOP_FAULT(hat, segkp, addr, size, type, rw)); 460 461 if (rw != S_READ && rw != S_WRITE && rw != S_OTHER) 462 return (FC_NOSUPPORT); 463 464 npages = btopr(size); 465 466 switch (type) { 467 case F_SOFTLOCK: /* lock down already-loaded translations */ 468 for (pg = 0; pg < npages; pg++) { 469 pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr, 470 SE_SHARED); 471 if (pp == NULL) { 472 /* 473 * Hmm, no page. Does a kernel mapping 474 * exist for it? 475 */ 476 if (!hat_probe(kas.a_hat, addr)) { 477 addr -= PAGESIZE; 478 while (--pg >= 0) { 479 pp = page_find(vp, (u_offset_t) 480 (uintptr_t)addr); 481 if (pp) 482 page_unlock(pp); 483 addr -= PAGESIZE; 484 } 485 return (FC_NOMAP); 486 } 487 } 488 addr += PAGESIZE; 489 } 490 if (rw == S_OTHER) 491 hat_reserve(seg->s_as, addr, size); 492 return (0); 493 case F_SOFTUNLOCK: 494 while (npages--) { 495 pp = page_find(vp, (u_offset_t)(uintptr_t)addr); 496 if (pp) 497 page_unlock(pp); 498 addr += PAGESIZE; 499 } 500 return (0); 501 default: 502 return (FC_NOSUPPORT); 503 } 504 /*NOTREACHED*/ 505 } 506 507 static int 508 segkmem_setprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot) 509 { 510 ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock)); 511 512 if (seg->s_as != &kas || size > seg->s_size || 513 addr < seg->s_base || addr + size > seg->s_base + seg->s_size) 514 panic("segkmem_setprot: bad args"); 515 516 /* 517 * If it is one of segkp pages, call segkp. 518 */ 519 if (segkp_bitmap && seg == &kvseg && 520 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base)))) 521 return (SEGOP_SETPROT(segkp, addr, size, prot)); 522 523 if (prot == 0) 524 hat_unload(kas.a_hat, addr, size, HAT_UNLOAD); 525 else 526 hat_chgprot(kas.a_hat, addr, size, prot); 527 return (0); 528 } 529 530 /* 531 * This is a dummy segkmem function overloaded to call segkp 532 * when segkp is under the heap. 533 */ 534 /* ARGSUSED */ 535 static int 536 segkmem_checkprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot) 537 { 538 ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock)); 539 540 if (seg->s_as != &kas) 541 segkmem_badop(); 542 543 /* 544 * If it is one of segkp pages, call into segkp. 545 */ 546 if (segkp_bitmap && seg == &kvseg && 547 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base)))) 548 return (SEGOP_CHECKPROT(segkp, addr, size, prot)); 549 550 segkmem_badop(); 551 return (0); 552 } 553 554 /* 555 * This is a dummy segkmem function overloaded to call segkp 556 * when segkp is under the heap. 557 */ 558 /* ARGSUSED */ 559 static int 560 segkmem_kluster(struct seg *seg, caddr_t addr, ssize_t delta) 561 { 562 ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock)); 563 564 if (seg->s_as != &kas) 565 segkmem_badop(); 566 567 /* 568 * If it is one of segkp pages, call into segkp. 569 */ 570 if (segkp_bitmap && seg == &kvseg && 571 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base)))) 572 return (SEGOP_KLUSTER(segkp, addr, delta)); 573 574 segkmem_badop(); 575 return (0); 576 } 577 578 static void 579 segkmem_xdump_range(void *arg, void *start, size_t size) 580 { 581 struct as *as = arg; 582 caddr_t addr = start; 583 caddr_t addr_end = addr + size; 584 585 while (addr < addr_end) { 586 pfn_t pfn = hat_getpfnum(kas.a_hat, addr); 587 if (pfn != PFN_INVALID && pfn <= physmax && pf_is_memory(pfn)) 588 dump_addpage(as, addr, pfn); 589 addr += PAGESIZE; 590 dump_timeleft = dump_timeout; 591 } 592 } 593 594 static void 595 segkmem_dump_range(void *arg, void *start, size_t size) 596 { 597 caddr_t addr = start; 598 caddr_t addr_end = addr + size; 599 600 /* 601 * If we are about to start dumping the range of addresses we 602 * carved out of the kernel heap for the large page heap walk 603 * heap_lp_arena to find what segments are actually populated 604 */ 605 if (SEGKMEM_USE_LARGEPAGES && 606 addr == heap_lp_base && addr_end == heap_lp_end && 607 vmem_size(heap_lp_arena, VMEM_ALLOC) < size) { 608 vmem_walk(heap_lp_arena, VMEM_ALLOC | VMEM_REENTRANT, 609 segkmem_xdump_range, arg); 610 } else { 611 segkmem_xdump_range(arg, start, size); 612 } 613 } 614 615 static void 616 segkmem_dump(struct seg *seg) 617 { 618 /* 619 * The kernel's heap_arena (represented by kvseg) is a very large 620 * VA space, most of which is typically unused. To speed up dumping 621 * we use vmem_walk() to quickly find the pieces of heap_arena that 622 * are actually in use. We do the same for heap32_arena and 623 * heap_core. 624 * 625 * We specify VMEM_REENTRANT to vmem_walk() because dump_addpage() 626 * may ultimately need to allocate memory. Reentrant walks are 627 * necessarily imperfect snapshots. The kernel heap continues 628 * to change during a live crash dump, for example. For a normal 629 * crash dump, however, we know that there won't be any other threads 630 * messing with the heap. Therefore, at worst, we may fail to dump 631 * the pages that get allocated by the act of dumping; but we will 632 * always dump every page that was allocated when the walk began. 633 * 634 * The other segkmem segments are dense (fully populated), so there's 635 * no need to use this technique when dumping them. 636 * 637 * Note: when adding special dump handling for any new sparsely- 638 * populated segments, be sure to add similar handling to the ::kgrep 639 * code in mdb. 640 */ 641 if (seg == &kvseg) { 642 vmem_walk(heap_arena, VMEM_ALLOC | VMEM_REENTRANT, 643 segkmem_dump_range, seg->s_as); 644 #ifndef __sparc 645 vmem_walk(heaptext_arena, VMEM_ALLOC | VMEM_REENTRANT, 646 segkmem_dump_range, seg->s_as); 647 #endif 648 } else if (seg == &kvseg_core) { 649 vmem_walk(heap_core_arena, VMEM_ALLOC | VMEM_REENTRANT, 650 segkmem_dump_range, seg->s_as); 651 } else if (seg == &kvseg32) { 652 vmem_walk(heap32_arena, VMEM_ALLOC | VMEM_REENTRANT, 653 segkmem_dump_range, seg->s_as); 654 vmem_walk(heaptext_arena, VMEM_ALLOC | VMEM_REENTRANT, 655 segkmem_dump_range, seg->s_as); 656 } else if (seg == &kzioseg) { 657 /* 658 * We don't want to dump pages attached to kzioseg since they 659 * contain file data from ZFS. If this page's segment is 660 * kzioseg return instead of writing it to the dump device. 661 */ 662 return; 663 } else { 664 segkmem_dump_range(seg->s_as, seg->s_base, seg->s_size); 665 } 666 } 667 668 /* 669 * lock/unlock kmem pages over a given range [addr, addr+len). 670 * Returns a shadow list of pages in ppp. If there are holes 671 * in the range (e.g. some of the kernel mappings do not have 672 * underlying page_ts) returns ENOTSUP so that as_pagelock() 673 * will handle the range via as_fault(F_SOFTLOCK). 674 */ 675 /*ARGSUSED*/ 676 static int 677 segkmem_pagelock(struct seg *seg, caddr_t addr, size_t len, 678 page_t ***ppp, enum lock_type type, enum seg_rw rw) 679 { 680 page_t **pplist, *pp; 681 pgcnt_t npages; 682 spgcnt_t pg; 683 size_t nb; 684 struct vnode *vp = seg->s_data; 685 686 ASSERT(ppp != NULL); 687 688 /* 689 * If it is one of segkp pages, call into segkp. 690 */ 691 if (segkp_bitmap && seg == &kvseg && 692 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base)))) 693 return (SEGOP_PAGELOCK(segkp, addr, len, ppp, type, rw)); 694 695 npages = btopr(len); 696 nb = sizeof (page_t *) * npages; 697 698 if (type == L_PAGEUNLOCK) { 699 pplist = *ppp; 700 ASSERT(pplist != NULL); 701 702 for (pg = 0; pg < npages; pg++) { 703 pp = pplist[pg]; 704 page_unlock(pp); 705 } 706 kmem_free(pplist, nb); 707 return (0); 708 } 709 710 ASSERT(type == L_PAGELOCK); 711 712 pplist = kmem_alloc(nb, KM_NOSLEEP); 713 if (pplist == NULL) { 714 *ppp = NULL; 715 return (ENOTSUP); /* take the slow path */ 716 } 717 718 for (pg = 0; pg < npages; pg++) { 719 pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr, SE_SHARED); 720 if (pp == NULL) { 721 while (--pg >= 0) 722 page_unlock(pplist[pg]); 723 kmem_free(pplist, nb); 724 *ppp = NULL; 725 return (ENOTSUP); 726 } 727 pplist[pg] = pp; 728 addr += PAGESIZE; 729 } 730 731 *ppp = pplist; 732 return (0); 733 } 734 735 /* 736 * This is a dummy segkmem function overloaded to call segkp 737 * when segkp is under the heap. 738 */ 739 /* ARGSUSED */ 740 static int 741 segkmem_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp) 742 { 743 ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock)); 744 745 if (seg->s_as != &kas) 746 segkmem_badop(); 747 748 /* 749 * If it is one of segkp pages, call into segkp. 750 */ 751 if (segkp_bitmap && seg == &kvseg && 752 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base)))) 753 return (SEGOP_GETMEMID(segkp, addr, memidp)); 754 755 segkmem_badop(); 756 return (0); 757 } 758 759 /*ARGSUSED*/ 760 static lgrp_mem_policy_info_t * 761 segkmem_getpolicy(struct seg *seg, caddr_t addr) 762 { 763 return (NULL); 764 } 765 766 /*ARGSUSED*/ 767 static int 768 segkmem_capable(struct seg *seg, segcapability_t capability) 769 { 770 if (capability == S_CAPABILITY_NOMINFLT) 771 return (1); 772 return (0); 773 } 774 775 static struct seg_ops segkmem_ops = { 776 SEGKMEM_BADOP(int), /* dup */ 777 SEGKMEM_BADOP(int), /* unmap */ 778 SEGKMEM_BADOP(void), /* free */ 779 segkmem_fault, 780 SEGKMEM_BADOP(faultcode_t), /* faulta */ 781 segkmem_setprot, 782 segkmem_checkprot, 783 segkmem_kluster, 784 SEGKMEM_BADOP(size_t), /* swapout */ 785 SEGKMEM_BADOP(int), /* sync */ 786 SEGKMEM_BADOP(size_t), /* incore */ 787 SEGKMEM_BADOP(int), /* lockop */ 788 SEGKMEM_BADOP(int), /* getprot */ 789 SEGKMEM_BADOP(u_offset_t), /* getoffset */ 790 SEGKMEM_BADOP(int), /* gettype */ 791 SEGKMEM_BADOP(int), /* getvp */ 792 SEGKMEM_BADOP(int), /* advise */ 793 segkmem_dump, 794 segkmem_pagelock, 795 SEGKMEM_BADOP(int), /* setpgsz */ 796 segkmem_getmemid, 797 segkmem_getpolicy, /* getpolicy */ 798 segkmem_capable, /* capable */ 799 }; 800 801 int 802 segkmem_zio_create(struct seg *seg) 803 { 804 ASSERT(seg->s_as == &kas && RW_WRITE_HELD(&kas.a_lock)); 805 seg->s_ops = &segkmem_ops; 806 seg->s_data = &zvp; 807 kas.a_size += seg->s_size; 808 return (0); 809 } 810 811 int 812 segkmem_create(struct seg *seg) 813 { 814 ASSERT(seg->s_as == &kas && RW_WRITE_HELD(&kas.a_lock)); 815 seg->s_ops = &segkmem_ops; 816 seg->s_data = &kvp; 817 kas.a_size += seg->s_size; 818 return (0); 819 } 820 821 /*ARGSUSED*/ 822 page_t * 823 segkmem_page_create(void *addr, size_t size, int vmflag, void *arg) 824 { 825 struct seg kseg; 826 int pgflags; 827 struct vnode *vp = arg; 828 829 if (vp == NULL) 830 vp = &kvp; 831 832 kseg.s_as = &kas; 833 pgflags = PG_EXCL; 834 835 if (segkmem_reloc == 0 || (vmflag & VM_NORELOC)) 836 pgflags |= PG_NORELOC; 837 if ((vmflag & VM_NOSLEEP) == 0) 838 pgflags |= PG_WAIT; 839 if (vmflag & VM_PANIC) 840 pgflags |= PG_PANIC; 841 if (vmflag & VM_PUSHPAGE) 842 pgflags |= PG_PUSHPAGE; 843 844 return (page_create_va(vp, (u_offset_t)(uintptr_t)addr, size, 845 pgflags, &kseg, addr)); 846 } 847 848 /* 849 * Allocate pages to back the virtual address range [addr, addr + size). 850 * If addr is NULL, allocate the virtual address space as well. 851 */ 852 void * 853 segkmem_xalloc(vmem_t *vmp, void *inaddr, size_t size, int vmflag, uint_t attr, 854 page_t *(*page_create_func)(void *, size_t, int, void *), void *pcarg) 855 { 856 page_t *ppl; 857 caddr_t addr = inaddr; 858 pgcnt_t npages = btopr(size); 859 int allocflag; 860 861 if (inaddr == NULL && (addr = vmem_alloc(vmp, size, vmflag)) == NULL) 862 return (NULL); 863 864 ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0); 865 866 if (page_resv(npages, vmflag & VM_KMFLAGS) == 0) { 867 if (inaddr == NULL) 868 vmem_free(vmp, addr, size); 869 return (NULL); 870 } 871 872 ppl = page_create_func(addr, size, vmflag, pcarg); 873 if (ppl == NULL) { 874 if (inaddr == NULL) 875 vmem_free(vmp, addr, size); 876 page_unresv(npages); 877 return (NULL); 878 } 879 880 /* 881 * Under certain conditions, we need to let the HAT layer know 882 * that it cannot safely allocate memory. Allocations from 883 * the hat_memload vmem arena always need this, to prevent 884 * infinite recursion. 885 * 886 * In addition, the x86 hat cannot safely do memory 887 * allocations while in vmem_populate(), because there 888 * is no simple bound on its usage. 889 */ 890 if (vmflag & VM_MEMLOAD) 891 allocflag = HAT_NO_KALLOC; 892 #if defined(__x86) 893 else if (vmem_is_populator()) 894 allocflag = HAT_NO_KALLOC; 895 #endif 896 else 897 allocflag = 0; 898 899 while (ppl != NULL) { 900 page_t *pp = ppl; 901 page_sub(&ppl, pp); 902 ASSERT(page_iolock_assert(pp)); 903 ASSERT(PAGE_EXCL(pp)); 904 page_io_unlock(pp); 905 hat_memload(kas.a_hat, (caddr_t)(uintptr_t)pp->p_offset, pp, 906 (PROT_ALL & ~PROT_USER) | HAT_NOSYNC | attr, 907 HAT_LOAD_LOCK | allocflag); 908 pp->p_lckcnt = 1; 909 #if defined(__x86) 910 page_downgrade(pp); 911 #else 912 if (vmflag & SEGKMEM_SHARELOCKED) 913 page_downgrade(pp); 914 else 915 page_unlock(pp); 916 #endif 917 } 918 919 return (addr); 920 } 921 922 static void * 923 segkmem_alloc_vn(vmem_t *vmp, size_t size, int vmflag, struct vnode *vp) 924 { 925 void *addr; 926 segkmem_gc_list_t *gcp, **prev_gcpp; 927 928 ASSERT(vp != NULL); 929 930 if (kvseg.s_base == NULL) { 931 #ifndef __sparc 932 if (bootops->bsys_alloc == NULL) 933 halt("Memory allocation between bop_alloc() and " 934 "kmem_alloc().\n"); 935 #endif 936 937 /* 938 * There's not a lot of memory to go around during boot, 939 * so recycle it if we can. 940 */ 941 for (prev_gcpp = &segkmem_gc_list; (gcp = *prev_gcpp) != NULL; 942 prev_gcpp = &gcp->gc_next) { 943 if (gcp->gc_arena == vmp && gcp->gc_size == size) { 944 *prev_gcpp = gcp->gc_next; 945 return (gcp); 946 } 947 } 948 949 addr = vmem_alloc(vmp, size, vmflag | VM_PANIC); 950 if (boot_alloc(addr, size, BO_NO_ALIGN) != addr) 951 panic("segkmem_alloc: boot_alloc failed"); 952 return (addr); 953 } 954 return (segkmem_xalloc(vmp, NULL, size, vmflag, 0, 955 segkmem_page_create, vp)); 956 } 957 958 void * 959 segkmem_alloc(vmem_t *vmp, size_t size, int vmflag) 960 { 961 return (segkmem_alloc_vn(vmp, size, vmflag, &kvp)); 962 } 963 964 void * 965 segkmem_zio_alloc(vmem_t *vmp, size_t size, int vmflag) 966 { 967 return (segkmem_alloc_vn(vmp, size, vmflag, &zvp)); 968 } 969 970 /* 971 * Any changes to this routine must also be carried over to 972 * devmap_free_pages() in the seg_dev driver. This is because 973 * we currently don't have a special kernel segment for non-paged 974 * kernel memory that is exported by drivers to user space. 975 */ 976 static void 977 segkmem_free_vn(vmem_t *vmp, void *inaddr, size_t size, struct vnode *vp, 978 void (*func)(page_t *)) 979 { 980 page_t *pp; 981 caddr_t addr = inaddr; 982 caddr_t eaddr; 983 pgcnt_t npages = btopr(size); 984 985 ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0); 986 ASSERT(vp != NULL); 987 988 if (kvseg.s_base == NULL) { 989 segkmem_gc_list_t *gc = inaddr; 990 gc->gc_arena = vmp; 991 gc->gc_size = size; 992 gc->gc_next = segkmem_gc_list; 993 segkmem_gc_list = gc; 994 return; 995 } 996 997 hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK); 998 999 for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) { 1000 #if defined(__x86) 1001 pp = page_find(vp, (u_offset_t)(uintptr_t)addr); 1002 if (pp == NULL) 1003 panic("segkmem_free: page not found"); 1004 if (!page_tryupgrade(pp)) { 1005 /* 1006 * Some other thread has a sharelock. Wait for 1007 * it to drop the lock so we can free this page. 1008 */ 1009 page_unlock(pp); 1010 pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr, 1011 SE_EXCL); 1012 } 1013 #else 1014 pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr, SE_EXCL); 1015 #endif 1016 if (pp == NULL) 1017 panic("segkmem_free: page not found"); 1018 /* Clear p_lckcnt so page_destroy() doesn't update availrmem */ 1019 pp->p_lckcnt = 0; 1020 if (func) 1021 func(pp); 1022 else 1023 page_destroy(pp, 0); 1024 } 1025 if (func == NULL) 1026 page_unresv(npages); 1027 1028 if (vmp != NULL) 1029 vmem_free(vmp, inaddr, size); 1030 1031 } 1032 1033 void 1034 segkmem_xfree(vmem_t *vmp, void *inaddr, size_t size, void (*func)(page_t *)) 1035 { 1036 segkmem_free_vn(vmp, inaddr, size, &kvp, func); 1037 } 1038 1039 void 1040 segkmem_free(vmem_t *vmp, void *inaddr, size_t size) 1041 { 1042 segkmem_free_vn(vmp, inaddr, size, &kvp, NULL); 1043 } 1044 1045 void 1046 segkmem_zio_free(vmem_t *vmp, void *inaddr, size_t size) 1047 { 1048 segkmem_free_vn(vmp, inaddr, size, &zvp, NULL); 1049 } 1050 1051 void 1052 segkmem_gc(void) 1053 { 1054 ASSERT(kvseg.s_base != NULL); 1055 while (segkmem_gc_list != NULL) { 1056 segkmem_gc_list_t *gc = segkmem_gc_list; 1057 segkmem_gc_list = gc->gc_next; 1058 segkmem_free(gc->gc_arena, gc, gc->gc_size); 1059 } 1060 } 1061 1062 /* 1063 * Legacy entry points from here to end of file. 1064 */ 1065 void 1066 segkmem_mapin(struct seg *seg, void *addr, size_t size, uint_t vprot, 1067 pfn_t pfn, uint_t flags) 1068 { 1069 hat_unload(seg->s_as->a_hat, addr, size, HAT_UNLOAD_UNLOCK); 1070 hat_devload(seg->s_as->a_hat, addr, size, pfn, vprot, 1071 flags | HAT_LOAD_LOCK); 1072 } 1073 1074 void 1075 segkmem_mapout(struct seg *seg, void *addr, size_t size) 1076 { 1077 hat_unload(seg->s_as->a_hat, addr, size, HAT_UNLOAD_UNLOCK); 1078 } 1079 1080 void * 1081 kmem_getpages(pgcnt_t npages, int kmflag) 1082 { 1083 return (kmem_alloc(ptob(npages), kmflag)); 1084 } 1085 1086 void 1087 kmem_freepages(void *addr, pgcnt_t npages) 1088 { 1089 kmem_free(addr, ptob(npages)); 1090 } 1091 1092 /* 1093 * segkmem_page_create_large() allocates a large page to be used for the kmem 1094 * caches. If kpr is enabled we ask for a relocatable page unless requested 1095 * otherwise. If kpr is disabled we have to ask for a non-reloc page 1096 */ 1097 static page_t * 1098 segkmem_page_create_large(void *addr, size_t size, int vmflag, void *arg) 1099 { 1100 int pgflags; 1101 1102 pgflags = PG_EXCL; 1103 1104 if (segkmem_reloc == 0 || (vmflag & VM_NORELOC)) 1105 pgflags |= PG_NORELOC; 1106 if (!(vmflag & VM_NOSLEEP)) 1107 pgflags |= PG_WAIT; 1108 if (vmflag & VM_PUSHPAGE) 1109 pgflags |= PG_PUSHPAGE; 1110 1111 return (page_create_va_large(&kvp, (u_offset_t)(uintptr_t)addr, size, 1112 pgflags, &kvseg, addr, arg)); 1113 } 1114 1115 /* 1116 * Allocate a large page to back the virtual address range 1117 * [addr, addr + size). If addr is NULL, allocate the virtual address 1118 * space as well. 1119 */ 1120 static void * 1121 segkmem_xalloc_lp(vmem_t *vmp, void *inaddr, size_t size, int vmflag, 1122 uint_t attr, page_t *(*page_create_func)(void *, size_t, int, void *), 1123 void *pcarg) 1124 { 1125 caddr_t addr = inaddr, pa; 1126 size_t lpsize = segkmem_lpsize; 1127 pgcnt_t npages = btopr(size); 1128 pgcnt_t nbpages = btop(lpsize); 1129 pgcnt_t nlpages = size >> segkmem_lpshift; 1130 size_t ppasize = nbpages * sizeof (page_t *); 1131 page_t *pp, *rootpp, **ppa, *pplist = NULL; 1132 int i; 1133 1134 vmflag |= VM_NOSLEEP; 1135 1136 if (page_resv(npages, vmflag & VM_KMFLAGS) == 0) { 1137 return (NULL); 1138 } 1139 1140 /* 1141 * allocate an array we need for hat_memload_array. 1142 * we use a separate arena to avoid recursion. 1143 * we will not need this array when hat_memload_array learns pp++ 1144 */ 1145 if ((ppa = vmem_alloc(segkmem_ppa_arena, ppasize, vmflag)) == NULL) { 1146 goto fail_array_alloc; 1147 } 1148 1149 if (inaddr == NULL && (addr = vmem_alloc(vmp, size, vmflag)) == NULL) 1150 goto fail_vmem_alloc; 1151 1152 ASSERT(((uintptr_t)addr & (lpsize - 1)) == 0); 1153 1154 /* create all the pages */ 1155 for (pa = addr, i = 0; i < nlpages; i++, pa += lpsize) { 1156 if ((pp = page_create_func(pa, lpsize, vmflag, pcarg)) == NULL) 1157 goto fail_page_create; 1158 page_list_concat(&pplist, &pp); 1159 } 1160 1161 /* at this point we have all the resource to complete the request */ 1162 while ((rootpp = pplist) != NULL) { 1163 for (i = 0; i < nbpages; i++) { 1164 ASSERT(pplist != NULL); 1165 pp = pplist; 1166 page_sub(&pplist, pp); 1167 ASSERT(page_iolock_assert(pp)); 1168 page_io_unlock(pp); 1169 ppa[i] = pp; 1170 } 1171 /* 1172 * Load the locked entry. It's OK to preload the entry into the 1173 * TSB since we now support large mappings in the kernel TSB. 1174 */ 1175 hat_memload_array(kas.a_hat, 1176 (caddr_t)(uintptr_t)rootpp->p_offset, lpsize, 1177 ppa, (PROT_ALL & ~PROT_USER) | HAT_NOSYNC | attr, 1178 HAT_LOAD_LOCK); 1179 1180 for (--i; i >= 0; --i) { 1181 ppa[i]->p_lckcnt = 1; 1182 page_unlock(ppa[i]); 1183 } 1184 } 1185 1186 vmem_free(segkmem_ppa_arena, ppa, ppasize); 1187 return (addr); 1188 1189 fail_page_create: 1190 while ((rootpp = pplist) != NULL) { 1191 for (i = 0, pp = pplist; i < nbpages; i++, pp = pplist) { 1192 ASSERT(pp != NULL); 1193 page_sub(&pplist, pp); 1194 ASSERT(page_iolock_assert(pp)); 1195 page_io_unlock(pp); 1196 } 1197 page_destroy_pages(rootpp); 1198 } 1199 1200 if (inaddr == NULL) 1201 vmem_free(vmp, addr, size); 1202 1203 fail_vmem_alloc: 1204 vmem_free(segkmem_ppa_arena, ppa, ppasize); 1205 1206 fail_array_alloc: 1207 page_unresv(npages); 1208 1209 return (NULL); 1210 } 1211 1212 static void 1213 segkmem_free_one_lp(caddr_t addr, size_t size) 1214 { 1215 page_t *pp, *rootpp = NULL; 1216 pgcnt_t pgs_left = btopr(size); 1217 1218 ASSERT(size == segkmem_lpsize); 1219 1220 hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK); 1221 1222 for (; pgs_left > 0; addr += PAGESIZE, pgs_left--) { 1223 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)addr, SE_EXCL); 1224 if (pp == NULL) 1225 panic("segkmem_free_one_lp: page not found"); 1226 ASSERT(PAGE_EXCL(pp)); 1227 pp->p_lckcnt = 0; 1228 if (rootpp == NULL) 1229 rootpp = pp; 1230 } 1231 ASSERT(rootpp != NULL); 1232 page_destroy_pages(rootpp); 1233 1234 /* page_unresv() is done by the caller */ 1235 } 1236 1237 /* 1238 * This function is called to import new spans into the vmem arenas like 1239 * kmem_default_arena and kmem_oversize_arena. It first tries to import 1240 * spans from large page arena - kmem_lp_arena. In order to do this it might 1241 * have to "upgrade the requested size" to kmem_lp_arena quantum. If 1242 * it was not able to satisfy the upgraded request it then calls regular 1243 * segkmem_alloc() that satisfies the request by importing from "*vmp" arena 1244 */ 1245 /*ARGSUSED*/ 1246 void * 1247 segkmem_alloc_lp(vmem_t *vmp, size_t *sizep, size_t align, int vmflag) 1248 { 1249 size_t size; 1250 kthread_t *t = curthread; 1251 segkmem_lpcb_t *lpcb = &segkmem_lpcb; 1252 1253 ASSERT(sizep != NULL); 1254 1255 size = *sizep; 1256 1257 if (lpcb->lp_uselp && !(t->t_flag & T_PANIC) && 1258 !(vmflag & SEGKMEM_SHARELOCKED)) { 1259 1260 size_t kmemlp_qnt = segkmem_kmemlp_quantum; 1261 size_t asize = P2ROUNDUP(size, kmemlp_qnt); 1262 void *addr = NULL; 1263 ulong_t *lpthrtp = &lpcb->lp_throttle; 1264 ulong_t lpthrt = *lpthrtp; 1265 int dowakeup = 0; 1266 int doalloc = 1; 1267 1268 ASSERT(kmem_lp_arena != NULL); 1269 ASSERT(asize >= size); 1270 1271 if (lpthrt != 0) { 1272 /* try to update the throttle value */ 1273 lpthrt = atomic_add_long_nv(lpthrtp, 1); 1274 if (lpthrt >= segkmem_lpthrottle_max) { 1275 lpthrt = atomic_cas_ulong(lpthrtp, lpthrt, 1276 segkmem_lpthrottle_max / 4); 1277 } 1278 1279 /* 1280 * when we get above throttle start do an exponential 1281 * backoff at trying large pages and reaping 1282 */ 1283 if (lpthrt > segkmem_lpthrottle_start && 1284 (lpthrt & (lpthrt - 1))) { 1285 lpcb->allocs_throttled++; 1286 lpthrt--; 1287 if ((lpthrt & (lpthrt - 1)) == 0) 1288 kmem_reap(); 1289 return (segkmem_alloc(vmp, size, vmflag)); 1290 } 1291 } 1292 1293 if (!(vmflag & VM_NOSLEEP) && 1294 segkmem_heaplp_quantum >= (8 * kmemlp_qnt) && 1295 vmem_size(kmem_lp_arena, VMEM_FREE) <= kmemlp_qnt && 1296 asize < (segkmem_heaplp_quantum - kmemlp_qnt)) { 1297 1298 /* 1299 * we are low on free memory in kmem_lp_arena 1300 * we let only one guy to allocate heap_lp 1301 * quantum size chunk that everybody is going to 1302 * share 1303 */ 1304 mutex_enter(&lpcb->lp_lock); 1305 1306 if (lpcb->lp_wait) { 1307 1308 /* we are not the first one - wait */ 1309 cv_wait(&lpcb->lp_cv, &lpcb->lp_lock); 1310 if (vmem_size(kmem_lp_arena, VMEM_FREE) < 1311 kmemlp_qnt) { 1312 doalloc = 0; 1313 } 1314 } else if (vmem_size(kmem_lp_arena, VMEM_FREE) <= 1315 kmemlp_qnt) { 1316 1317 /* 1318 * we are the first one, make sure we import 1319 * a large page 1320 */ 1321 if (asize == kmemlp_qnt) 1322 asize += kmemlp_qnt; 1323 dowakeup = 1; 1324 lpcb->lp_wait = 1; 1325 } 1326 1327 mutex_exit(&lpcb->lp_lock); 1328 } 1329 1330 /* 1331 * VM_ABORT flag prevents sleeps in vmem_xalloc when 1332 * large pages are not available. In that case this allocation 1333 * attempt will fail and we will retry allocation with small 1334 * pages. We also do not want to panic if this allocation fails 1335 * because we are going to retry. 1336 */ 1337 if (doalloc) { 1338 addr = vmem_alloc(kmem_lp_arena, asize, 1339 (vmflag | VM_ABORT) & ~VM_PANIC); 1340 1341 if (dowakeup) { 1342 mutex_enter(&lpcb->lp_lock); 1343 ASSERT(lpcb->lp_wait != 0); 1344 lpcb->lp_wait = 0; 1345 cv_broadcast(&lpcb->lp_cv); 1346 mutex_exit(&lpcb->lp_lock); 1347 } 1348 } 1349 1350 if (addr != NULL) { 1351 *sizep = asize; 1352 *lpthrtp = 0; 1353 return (addr); 1354 } 1355 1356 if (vmflag & VM_NOSLEEP) 1357 lpcb->nosleep_allocs_failed++; 1358 else 1359 lpcb->sleep_allocs_failed++; 1360 lpcb->alloc_bytes_failed += size; 1361 1362 /* if large page throttling is not started yet do it */ 1363 if (segkmem_use_lpthrottle && lpthrt == 0) { 1364 lpthrt = atomic_cas_ulong(lpthrtp, lpthrt, 1); 1365 } 1366 } 1367 return (segkmem_alloc(vmp, size, vmflag)); 1368 } 1369 1370 void 1371 segkmem_free_lp(vmem_t *vmp, void *inaddr, size_t size) 1372 { 1373 if (kmem_lp_arena == NULL || !IS_KMEM_VA_LARGEPAGE((caddr_t)inaddr)) { 1374 segkmem_free(vmp, inaddr, size); 1375 } else { 1376 vmem_free(kmem_lp_arena, inaddr, size); 1377 } 1378 } 1379 1380 /* 1381 * segkmem_alloc_lpi() imports virtual memory from large page heap arena 1382 * into kmem_lp arena. In the process it maps the imported segment with 1383 * large pages 1384 */ 1385 static void * 1386 segkmem_alloc_lpi(vmem_t *vmp, size_t size, int vmflag) 1387 { 1388 segkmem_lpcb_t *lpcb = &segkmem_lpcb; 1389 void *addr; 1390 1391 ASSERT(size != 0); 1392 ASSERT(vmp == heap_lp_arena); 1393 1394 /* do not allow large page heap grow beyound limits */ 1395 if (vmem_size(vmp, VMEM_ALLOC) >= segkmem_kmemlp_max) { 1396 lpcb->allocs_limited++; 1397 return (NULL); 1398 } 1399 1400 addr = segkmem_xalloc_lp(vmp, NULL, size, vmflag, 0, 1401 segkmem_page_create_large, NULL); 1402 return (addr); 1403 } 1404 1405 /* 1406 * segkmem_free_lpi() returns virtual memory back into large page heap arena 1407 * from kmem_lp arena. Beore doing this it unmaps the segment and frees 1408 * large pages used to map it. 1409 */ 1410 static void 1411 segkmem_free_lpi(vmem_t *vmp, void *inaddr, size_t size) 1412 { 1413 pgcnt_t nlpages = size >> segkmem_lpshift; 1414 size_t lpsize = segkmem_lpsize; 1415 caddr_t addr = inaddr; 1416 pgcnt_t npages = btopr(size); 1417 int i; 1418 1419 ASSERT(vmp == heap_lp_arena); 1420 ASSERT(IS_KMEM_VA_LARGEPAGE(addr)); 1421 ASSERT(((uintptr_t)inaddr & (lpsize - 1)) == 0); 1422 1423 for (i = 0; i < nlpages; i++) { 1424 segkmem_free_one_lp(addr, lpsize); 1425 addr += lpsize; 1426 } 1427 1428 page_unresv(npages); 1429 1430 vmem_free(vmp, inaddr, size); 1431 } 1432 1433 /* 1434 * This function is called at system boot time by kmem_init right after 1435 * /etc/system file has been read. It checks based on hardware configuration 1436 * and /etc/system settings if system is going to use large pages. The 1437 * initialiazation necessary to actually start using large pages 1438 * happens later in the process after segkmem_heap_lp_init() is called. 1439 */ 1440 int 1441 segkmem_lpsetup() 1442 { 1443 int use_large_pages = 0; 1444 1445 #ifdef __sparc 1446 1447 size_t memtotal = physmem * PAGESIZE; 1448 1449 if (heap_lp_base == NULL) { 1450 segkmem_lpsize = PAGESIZE; 1451 return (0); 1452 } 1453 1454 /* get a platform dependent value of large page size for kernel heap */ 1455 segkmem_lpsize = get_segkmem_lpsize(segkmem_lpsize); 1456 1457 if (segkmem_lpsize <= PAGESIZE) { 1458 /* 1459 * put virtual space reserved for the large page kernel 1460 * back to the regular heap 1461 */ 1462 vmem_xfree(heap_arena, heap_lp_base, 1463 heap_lp_end - heap_lp_base); 1464 heap_lp_base = NULL; 1465 heap_lp_end = NULL; 1466 segkmem_lpsize = PAGESIZE; 1467 return (0); 1468 } 1469 1470 /* set heap_lp quantum if necessary */ 1471 if (segkmem_heaplp_quantum == 0 || 1472 (segkmem_heaplp_quantum & (segkmem_heaplp_quantum - 1)) || 1473 P2PHASE(segkmem_heaplp_quantum, segkmem_lpsize)) { 1474 segkmem_heaplp_quantum = segkmem_lpsize; 1475 } 1476 1477 /* set kmem_lp quantum if necessary */ 1478 if (segkmem_kmemlp_quantum == 0 || 1479 (segkmem_kmemlp_quantum & (segkmem_kmemlp_quantum - 1)) || 1480 segkmem_kmemlp_quantum > segkmem_heaplp_quantum) { 1481 segkmem_kmemlp_quantum = segkmem_heaplp_quantum; 1482 } 1483 1484 /* set total amount of memory allowed for large page kernel heap */ 1485 if (segkmem_kmemlp_max == 0) { 1486 if (segkmem_kmemlp_pcnt == 0 || segkmem_kmemlp_pcnt > 100) 1487 segkmem_kmemlp_pcnt = 12; 1488 segkmem_kmemlp_max = (memtotal * segkmem_kmemlp_pcnt) / 100; 1489 } 1490 segkmem_kmemlp_max = P2ROUNDUP(segkmem_kmemlp_max, 1491 segkmem_heaplp_quantum); 1492 1493 /* fix lp kmem preallocation request if necesssary */ 1494 if (segkmem_kmemlp_min) { 1495 segkmem_kmemlp_min = P2ROUNDUP(segkmem_kmemlp_min, 1496 segkmem_heaplp_quantum); 1497 if (segkmem_kmemlp_min > segkmem_kmemlp_max) 1498 segkmem_kmemlp_min = segkmem_kmemlp_max; 1499 } 1500 1501 use_large_pages = 1; 1502 segkmem_lpszc = page_szc(segkmem_lpsize); 1503 segkmem_lpshift = page_get_shift(segkmem_lpszc); 1504 1505 #endif 1506 return (use_large_pages); 1507 } 1508 1509 void 1510 segkmem_zio_init(void *zio_mem_base, size_t zio_mem_size) 1511 { 1512 ASSERT(zio_mem_base != NULL); 1513 ASSERT(zio_mem_size != 0); 1514 1515 zio_arena = vmem_create("zfs_file_data", zio_mem_base, zio_mem_size, 1516 PAGESIZE, NULL, NULL, NULL, 0, VM_SLEEP); 1517 1518 zio_alloc_arena = vmem_create("zfs_file_data_buf", NULL, 0, PAGESIZE, 1519 segkmem_zio_alloc, segkmem_zio_free, zio_arena, 0, VM_SLEEP); 1520 1521 ASSERT(zio_arena != NULL); 1522 ASSERT(zio_alloc_arena != NULL); 1523 } 1524 1525 #ifdef __sparc 1526 1527 1528 static void * 1529 segkmem_alloc_ppa(vmem_t *vmp, size_t size, int vmflag) 1530 { 1531 size_t ppaquantum = btopr(segkmem_lpsize) * sizeof (page_t *); 1532 void *addr; 1533 1534 if (ppaquantum <= PAGESIZE) 1535 return (segkmem_alloc(vmp, size, vmflag)); 1536 1537 ASSERT((size & (ppaquantum - 1)) == 0); 1538 1539 addr = vmem_xalloc(vmp, size, ppaquantum, 0, 0, NULL, NULL, vmflag); 1540 if (addr != NULL && segkmem_xalloc(vmp, addr, size, vmflag, 0, 1541 segkmem_page_create, NULL) == NULL) { 1542 vmem_xfree(vmp, addr, size); 1543 addr = NULL; 1544 } 1545 1546 return (addr); 1547 } 1548 1549 static void 1550 segkmem_free_ppa(vmem_t *vmp, void *addr, size_t size) 1551 { 1552 size_t ppaquantum = btopr(segkmem_lpsize) * sizeof (page_t *); 1553 1554 ASSERT(addr != NULL); 1555 1556 if (ppaquantum <= PAGESIZE) { 1557 segkmem_free(vmp, addr, size); 1558 } else { 1559 segkmem_free(NULL, addr, size); 1560 vmem_xfree(vmp, addr, size); 1561 } 1562 } 1563 1564 void 1565 segkmem_heap_lp_init() 1566 { 1567 segkmem_lpcb_t *lpcb = &segkmem_lpcb; 1568 size_t heap_lp_size = heap_lp_end - heap_lp_base; 1569 size_t lpsize = segkmem_lpsize; 1570 size_t ppaquantum; 1571 void *addr; 1572 1573 if (segkmem_lpsize <= PAGESIZE) { 1574 ASSERT(heap_lp_base == NULL); 1575 ASSERT(heap_lp_end == NULL); 1576 return; 1577 } 1578 1579 ASSERT(segkmem_heaplp_quantum >= lpsize); 1580 ASSERT((segkmem_heaplp_quantum & (lpsize - 1)) == 0); 1581 ASSERT(lpcb->lp_uselp == 0); 1582 ASSERT(heap_lp_base != NULL); 1583 ASSERT(heap_lp_end != NULL); 1584 ASSERT(heap_lp_base < heap_lp_end); 1585 ASSERT(heap_lp_arena == NULL); 1586 ASSERT(((uintptr_t)heap_lp_base & (lpsize - 1)) == 0); 1587 ASSERT(((uintptr_t)heap_lp_end & (lpsize - 1)) == 0); 1588 1589 /* create large page heap arena */ 1590 heap_lp_arena = vmem_create("heap_lp", heap_lp_base, heap_lp_size, 1591 segkmem_heaplp_quantum, NULL, NULL, NULL, 0, VM_SLEEP); 1592 1593 ASSERT(heap_lp_arena != NULL); 1594 1595 /* This arena caches memory already mapped by large pages */ 1596 kmem_lp_arena = vmem_create("kmem_lp", NULL, 0, segkmem_kmemlp_quantum, 1597 segkmem_alloc_lpi, segkmem_free_lpi, heap_lp_arena, 0, VM_SLEEP); 1598 1599 ASSERT(kmem_lp_arena != NULL); 1600 1601 mutex_init(&lpcb->lp_lock, NULL, MUTEX_DEFAULT, NULL); 1602 cv_init(&lpcb->lp_cv, NULL, CV_DEFAULT, NULL); 1603 1604 /* 1605 * this arena is used for the array of page_t pointers necessary 1606 * to call hat_mem_load_array 1607 */ 1608 ppaquantum = btopr(lpsize) * sizeof (page_t *); 1609 segkmem_ppa_arena = vmem_create("segkmem_ppa", NULL, 0, ppaquantum, 1610 segkmem_alloc_ppa, segkmem_free_ppa, heap_arena, ppaquantum, 1611 VM_SLEEP); 1612 1613 ASSERT(segkmem_ppa_arena != NULL); 1614 1615 /* prealloacate some memory for the lp kernel heap */ 1616 if (segkmem_kmemlp_min) { 1617 1618 ASSERT(P2PHASE(segkmem_kmemlp_min, 1619 segkmem_heaplp_quantum) == 0); 1620 1621 if ((addr = segkmem_alloc_lpi(heap_lp_arena, 1622 segkmem_kmemlp_min, VM_SLEEP)) != NULL) { 1623 1624 addr = vmem_add(kmem_lp_arena, addr, 1625 segkmem_kmemlp_min, VM_SLEEP); 1626 ASSERT(addr != NULL); 1627 } 1628 } 1629 1630 lpcb->lp_uselp = 1; 1631 } 1632 1633 #endif 1634