1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 27 /* All Rights Reserved */ 28 29 /* 30 * Portions of this source code were derived from Berkeley 4.3 BSD 31 * under license from the Regents of the University of California. 32 */ 33 34 /* 35 * UNIX machine dependent virtual memory support. 36 */ 37 38 #include <sys/vm.h> 39 #include <sys/exec.h> 40 #include <sys/cmn_err.h> 41 #include <sys/cpu_module.h> 42 #include <sys/cpu.h> 43 #include <sys/elf_SPARC.h> 44 #include <sys/archsystm.h> 45 #include <vm/hat_sfmmu.h> 46 #include <sys/memnode.h> 47 #include <sys/mem_cage.h> 48 #include <vm/vm_dep.h> 49 #include <sys/error.h> 50 #include <sys/machsystm.h> 51 #include <vm/seg_kmem.h> 52 #include <sys/stack.h> 53 #include <sys/atomic.h> 54 #include <sys/promif.h> 55 #include <sys/hsvc.h> 56 57 uint_t page_colors = 0; 58 uint_t page_colors_mask = 0; 59 uint_t page_coloring_shift = 0; 60 int consistent_coloring; 61 int update_proc_pgcolorbase_after_fork = 1; 62 63 uint_t mmu_page_sizes = MMU_PAGE_SIZES; 64 uint_t max_mmu_page_sizes = MMU_PAGE_SIZES; 65 uint_t mmu_hashcnt = MAX_HASHCNT; 66 uint_t max_mmu_hashcnt = MAX_HASHCNT; 67 size_t mmu_ism_pagesize = DEFAULT_ISM_PAGESIZE; 68 69 /* 70 * A bitmask of the page sizes supported by hardware based upon szc. 71 * The base pagesize (p_szc == 0) must always be supported by the hardware. 72 */ 73 int mmu_exported_pagesize_mask; 74 uint_t mmu_exported_page_sizes; 75 76 uint_t szc_2_userszc[MMU_PAGE_SIZES]; 77 uint_t userszc_2_szc[MMU_PAGE_SIZES]; 78 79 extern uint_t vac_colors_mask; 80 extern int vac_shift; 81 82 hw_pagesize_t hw_page_array[] = { 83 {MMU_PAGESIZE, MMU_PAGESHIFT, 0, MMU_PAGESIZE >> MMU_PAGESHIFT}, 84 {MMU_PAGESIZE64K, MMU_PAGESHIFT64K, 0, 85 MMU_PAGESIZE64K >> MMU_PAGESHIFT}, 86 {MMU_PAGESIZE512K, MMU_PAGESHIFT512K, 0, 87 MMU_PAGESIZE512K >> MMU_PAGESHIFT}, 88 {MMU_PAGESIZE4M, MMU_PAGESHIFT4M, 0, MMU_PAGESIZE4M >> MMU_PAGESHIFT}, 89 {MMU_PAGESIZE32M, MMU_PAGESHIFT32M, 0, 90 MMU_PAGESIZE32M >> MMU_PAGESHIFT}, 91 {MMU_PAGESIZE256M, MMU_PAGESHIFT256M, 0, 92 MMU_PAGESIZE256M >> MMU_PAGESHIFT}, 93 {0, 0, 0, 0} 94 }; 95 96 /* 97 * Maximum page size used to map 64-bit memory segment kmem64_base..kmem64_end 98 */ 99 int max_bootlp_tteszc = TTE256M; 100 101 /* 102 * Maximum and default segment size tunables for user heap, stack, private 103 * and shared anonymous memory, and user text and initialized data. 104 */ 105 size_t max_uheap_lpsize = MMU_PAGESIZE64K; 106 size_t default_uheap_lpsize = MMU_PAGESIZE64K; 107 size_t max_ustack_lpsize = MMU_PAGESIZE64K; 108 size_t default_ustack_lpsize = MMU_PAGESIZE64K; 109 size_t max_privmap_lpsize = MMU_PAGESIZE64K; 110 size_t max_uidata_lpsize = MMU_PAGESIZE64K; 111 size_t max_utext_lpsize = MMU_PAGESIZE4M; 112 size_t max_shm_lpsize = MMU_PAGESIZE4M; 113 114 /* 115 * Contiguous memory allocator data structures and variables. 116 * 117 * The sun4v kernel must provide a means to allocate physically 118 * contiguous, non-relocatable memory. The contig_mem_arena 119 * and contig_mem_slab_arena exist for this purpose. Allocations 120 * that require physically contiguous non-relocatable memory should 121 * be made using contig_mem_alloc() or contig_mem_alloc_align() 122 * which return memory from contig_mem_arena or contig_mem_reloc_arena. 123 * These arenas import memory from the contig_mem_slab_arena one 124 * contiguous chunk at a time. 125 * 126 * When importing slabs, an attempt is made to allocate a large page 127 * to use as backing. As a result of the non-relocatable requirement, 128 * slabs are allocated from the kernel cage freelists. If the cage does 129 * not contain any free contiguous chunks large enough to satisfy the 130 * slab allocation, the slab size will be downsized and the operation 131 * retried. Large slab sizes are tried first to minimize cage 132 * fragmentation. If the slab allocation is unsuccessful still, the slab 133 * is allocated from outside the kernel cage. This is undesirable because, 134 * until slabs are freed, it results in non-relocatable chunks scattered 135 * throughout physical memory. 136 * 137 * Allocations from the contig_mem_arena are backed by slabs from the 138 * cage. Allocations from the contig_mem_reloc_arena are backed by 139 * slabs allocated outside the cage. Slabs are left share locked while 140 * in use to prevent non-cage slabs from being relocated. 141 * 142 * Since there is no guarantee that large pages will be available in 143 * the kernel cage, contiguous memory is reserved and added to the 144 * contig_mem_arena at boot time, making it available for later 145 * contiguous memory allocations. This reserve will be used to satisfy 146 * contig_mem allocations first and it is only when the reserve is 147 * completely allocated that new slabs will need to be imported. 148 */ 149 static vmem_t *contig_mem_slab_arena; 150 static vmem_t *contig_mem_arena; 151 static vmem_t *contig_mem_reloc_arena; 152 static kmutex_t contig_mem_lock; 153 static kmutex_t contig_mem_sleep_lock; 154 #define CONTIG_MEM_ARENA_QUANTUM 64 155 #define CONTIG_MEM_SLAB_ARENA_QUANTUM MMU_PAGESIZE64K 156 157 /* contig_mem_arena import slab sizes, in decreasing size order */ 158 static size_t contig_mem_import_sizes[] = { 159 MMU_PAGESIZE4M, 160 MMU_PAGESIZE512K, 161 MMU_PAGESIZE64K 162 }; 163 #define NUM_IMPORT_SIZES \ 164 (sizeof (contig_mem_import_sizes) / sizeof (size_t)) 165 static size_t contig_mem_import_size_max = MMU_PAGESIZE4M; 166 size_t contig_mem_slab_size = MMU_PAGESIZE4M; 167 168 /* Boot-time allocated buffer to pre-populate the contig_mem_arena */ 169 static size_t contig_mem_prealloc_size; 170 static void *contig_mem_prealloc_buf; 171 172 /* 173 * map_addr_proc() is the routine called when the system is to 174 * choose an address for the user. We will pick an address 175 * range which is just below the current stack limit. The 176 * algorithm used for cache consistency on machines with virtual 177 * address caches is such that offset 0 in the vnode is always 178 * on a shm_alignment'ed aligned address. Unfortunately, this 179 * means that vnodes which are demand paged will not be mapped 180 * cache consistently with the executable images. When the 181 * cache alignment for a given object is inconsistent, the 182 * lower level code must manage the translations so that this 183 * is not seen here (at the cost of efficiency, of course). 184 * 185 * Every mapping will have a redzone of a single page on either side of 186 * the request. This is done to leave one page unmapped between segments. 187 * This is not required, but it's useful for the user because if their 188 * program strays across a segment boundary, it will catch a fault 189 * immediately making debugging a little easier. Currently the redzone 190 * is mandatory. 191 * 192 * addrp is a value/result parameter. 193 * On input it is a hint from the user to be used in a completely 194 * machine dependent fashion. For MAP_ALIGN, addrp contains the 195 * minimal alignment, which must be some "power of two" multiple of 196 * pagesize. 197 * 198 * On output it is NULL if no address can be found in the current 199 * processes address space or else an address that is currently 200 * not mapped for len bytes with a page of red zone on either side. 201 * If vacalign is true, then the selected address will obey the alignment 202 * constraints of a vac machine based on the given off value. 203 */ 204 /*ARGSUSED3*/ 205 void 206 map_addr_proc(caddr_t *addrp, size_t len, offset_t off, int vacalign, 207 caddr_t userlimit, struct proc *p, uint_t flags) 208 { 209 struct as *as = p->p_as; 210 caddr_t addr; 211 caddr_t base; 212 size_t slen; 213 uintptr_t align_amount; 214 int allow_largepage_alignment = 1; 215 216 base = p->p_brkbase; 217 if (userlimit < as->a_userlimit) { 218 /* 219 * This happens when a program wants to map something in 220 * a range that's accessible to a program in a smaller 221 * address space. For example, a 64-bit program might 222 * be calling mmap32(2) to guarantee that the returned 223 * address is below 4Gbytes. 224 */ 225 ASSERT(userlimit > base); 226 slen = userlimit - base; 227 } else { 228 slen = p->p_usrstack - base - 229 ((p->p_stk_ctl + PAGEOFFSET) & PAGEMASK); 230 } 231 /* Make len be a multiple of PAGESIZE */ 232 len = (len + PAGEOFFSET) & PAGEMASK; 233 234 /* 235 * If the request is larger than the size of a particular 236 * mmu level, then we use that level to map the request. 237 * But this requires that both the virtual and the physical 238 * addresses be aligned with respect to that level, so we 239 * do the virtual bit of nastiness here. 240 * 241 * For 32-bit processes, only those which have specified 242 * MAP_ALIGN or an addr will be aligned on a page size > 4MB. Otherwise 243 * we can potentially waste up to 256MB of the 4G process address 244 * space just for alignment. 245 * 246 * XXXQ Should iterate trough hw_page_array here to catch 247 * all supported pagesizes 248 */ 249 if (p->p_model == DATAMODEL_ILP32 && ((flags & MAP_ALIGN) == 0 || 250 ((uintptr_t)*addrp) != 0)) { 251 allow_largepage_alignment = 0; 252 } 253 if ((mmu_page_sizes == max_mmu_page_sizes) && 254 allow_largepage_alignment && 255 (len >= MMU_PAGESIZE256M)) { /* 256MB mappings */ 256 align_amount = MMU_PAGESIZE256M; 257 } else if ((mmu_page_sizes == max_mmu_page_sizes) && 258 allow_largepage_alignment && 259 (len >= MMU_PAGESIZE32M)) { /* 32MB mappings */ 260 align_amount = MMU_PAGESIZE32M; 261 } else if (len >= MMU_PAGESIZE4M) { /* 4MB mappings */ 262 align_amount = MMU_PAGESIZE4M; 263 } else if (len >= MMU_PAGESIZE512K) { /* 512KB mappings */ 264 align_amount = MMU_PAGESIZE512K; 265 } else if (len >= MMU_PAGESIZE64K) { /* 64KB mappings */ 266 align_amount = MMU_PAGESIZE64K; 267 } else { 268 /* 269 * Align virtual addresses on a 64K boundary to ensure 270 * that ELF shared libraries are mapped with the appropriate 271 * alignment constraints by the run-time linker. 272 */ 273 align_amount = ELF_SPARC_MAXPGSZ; 274 if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp != 0) && 275 ((uintptr_t)*addrp < align_amount)) 276 align_amount = (uintptr_t)*addrp; 277 } 278 279 /* 280 * 64-bit processes require 1024K alignment of ELF shared libraries. 281 */ 282 if (p->p_model == DATAMODEL_LP64) 283 align_amount = MAX(align_amount, ELF_SPARCV9_MAXPGSZ); 284 #ifdef VAC 285 if (vac && vacalign && (align_amount < shm_alignment)) 286 align_amount = shm_alignment; 287 #endif 288 289 if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp > align_amount)) { 290 align_amount = (uintptr_t)*addrp; 291 } 292 293 ASSERT(ISP2(align_amount)); 294 ASSERT(align_amount == 0 || align_amount >= PAGESIZE); 295 296 /* 297 * Look for a large enough hole starting below the stack limit. 298 * After finding it, use the upper part. 299 */ 300 as_purge(as); 301 off = off & (align_amount - 1); 302 if (as_gap_aligned(as, len, &base, &slen, AH_HI, NULL, align_amount, 303 PAGESIZE, off) == 0) { 304 caddr_t as_addr; 305 306 /* 307 * addr is the highest possible address to use since we have 308 * a PAGESIZE redzone at the beginning and end. 309 */ 310 addr = base + slen - (PAGESIZE + len); 311 as_addr = addr; 312 /* 313 * Round address DOWN to the alignment amount and 314 * add the offset in. 315 * If addr is greater than as_addr, len would not be large 316 * enough to include the redzone, so we must adjust down 317 * by the alignment amount. 318 */ 319 addr = (caddr_t)((uintptr_t)addr & (~(align_amount - 1l))); 320 addr += (long)off; 321 if (addr > as_addr) { 322 addr -= align_amount; 323 } 324 325 ASSERT(addr > base); 326 ASSERT(addr + len < base + slen); 327 ASSERT(((uintptr_t)addr & (align_amount - 1l)) == 328 ((uintptr_t)(off))); 329 *addrp = addr; 330 331 } else { 332 *addrp = NULL; /* no more virtual space */ 333 } 334 } 335 336 /* 337 * Platform-dependent page scrub call. 338 * We call hypervisor to scrub the page. 339 */ 340 void 341 pagescrub(page_t *pp, uint_t off, uint_t len) 342 { 343 uint64_t pa, length; 344 345 pa = (uint64_t)(pp->p_pagenum << MMU_PAGESHIFT + off); 346 length = (uint64_t)len; 347 348 (void) mem_scrub(pa, length); 349 } 350 351 void 352 sync_data_memory(caddr_t va, size_t len) 353 { 354 /* Call memory sync function */ 355 (void) mem_sync(va, len); 356 } 357 358 size_t 359 mmu_get_kernel_lpsize(size_t lpsize) 360 { 361 extern int mmu_exported_pagesize_mask; 362 uint_t tte; 363 364 if (lpsize == 0) { 365 /* no setting for segkmem_lpsize in /etc/system: use default */ 366 if (mmu_exported_pagesize_mask & (1 << TTE256M)) { 367 lpsize = MMU_PAGESIZE256M; 368 } else if (mmu_exported_pagesize_mask & (1 << TTE4M)) { 369 lpsize = MMU_PAGESIZE4M; 370 } else if (mmu_exported_pagesize_mask & (1 << TTE64K)) { 371 lpsize = MMU_PAGESIZE64K; 372 } else { 373 lpsize = MMU_PAGESIZE; 374 } 375 376 return (lpsize); 377 } 378 379 for (tte = TTE8K; tte <= TTE256M; tte++) { 380 381 if ((mmu_exported_pagesize_mask & (1 << tte)) == 0) 382 continue; 383 384 if (lpsize == TTEBYTES(tte)) 385 return (lpsize); 386 } 387 388 lpsize = TTEBYTES(TTE8K); 389 return (lpsize); 390 } 391 392 void 393 mmu_init_kcontext() 394 { 395 } 396 397 /*ARGSUSED*/ 398 void 399 mmu_init_kernel_pgsz(struct hat *hat) 400 { 401 } 402 403 static void * 404 contig_mem_span_alloc(vmem_t *vmp, size_t size, int vmflag) 405 { 406 page_t *ppl; 407 page_t *rootpp; 408 caddr_t addr = NULL; 409 pgcnt_t npages = btopr(size); 410 page_t **ppa; 411 int pgflags; 412 spgcnt_t i = 0; 413 414 415 ASSERT(size <= contig_mem_import_size_max); 416 ASSERT((size & (size - 1)) == 0); 417 418 if ((addr = vmem_xalloc(vmp, size, size, 0, 0, 419 NULL, NULL, vmflag)) == NULL) { 420 return (NULL); 421 } 422 423 /* The address should be slab-size aligned. */ 424 ASSERT(((uintptr_t)addr & (size - 1)) == 0); 425 426 if (page_resv(npages, vmflag & VM_KMFLAGS) == 0) { 427 vmem_xfree(vmp, addr, size); 428 return (NULL); 429 } 430 431 pgflags = PG_EXCL; 432 if (vmflag & VM_NORELOC) 433 pgflags |= PG_NORELOC; 434 435 ppl = page_create_va_large(&kvp, (u_offset_t)(uintptr_t)addr, size, 436 pgflags, &kvseg, addr, NULL); 437 438 if (ppl == NULL) { 439 vmem_xfree(vmp, addr, size); 440 page_unresv(npages); 441 return (NULL); 442 } 443 444 rootpp = ppl; 445 ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP); 446 while (ppl != NULL) { 447 page_t *pp = ppl; 448 ppa[i++] = pp; 449 page_sub(&ppl, pp); 450 ASSERT(page_iolock_assert(pp)); 451 ASSERT(PAGE_EXCL(pp)); 452 page_io_unlock(pp); 453 } 454 455 /* 456 * Load the locked entry. It's OK to preload the entry into 457 * the TSB since we now support large mappings in the kernel TSB. 458 */ 459 hat_memload_array(kas.a_hat, (caddr_t)rootpp->p_offset, size, 460 ppa, (PROT_ALL & ~PROT_USER) | HAT_NOSYNC, HAT_LOAD_LOCK); 461 462 ASSERT(i == page_get_pagecnt(ppa[0]->p_szc)); 463 for (--i; i >= 0; --i) { 464 ASSERT(ppa[i]->p_szc == ppa[0]->p_szc); 465 ASSERT(page_pptonum(ppa[i]) == page_pptonum(ppa[0]) + i); 466 (void) page_pp_lock(ppa[i], 0, 1); 467 /* 468 * Leave the page share locked. For non-cage pages, 469 * this would prevent memory DR if it were supported 470 * on sun4v. 471 */ 472 page_downgrade(ppa[i]); 473 } 474 475 kmem_free(ppa, npages * sizeof (page_t *)); 476 return (addr); 477 } 478 479 /* 480 * Allocates a slab by first trying to use the largest slab size 481 * in contig_mem_import_sizes and then falling back to smaller slab 482 * sizes still large enough for the allocation. The sizep argument 483 * is a pointer to the requested size. When a slab is successfully 484 * allocated, the slab size, which must be >= *sizep and <= 485 * contig_mem_import_size_max, is returned in the *sizep argument. 486 * Returns the virtual address of the new slab. 487 */ 488 static void * 489 span_alloc_downsize(vmem_t *vmp, size_t *sizep, size_t align, int vmflag) 490 { 491 int i; 492 493 ASSERT(*sizep <= contig_mem_import_size_max); 494 495 for (i = 0; i < NUM_IMPORT_SIZES; i++) { 496 size_t page_size = contig_mem_import_sizes[i]; 497 498 /* 499 * Check that the alignment is also less than the 500 * import (large page) size. In the case where the 501 * alignment is larger than the size, a large page 502 * large enough for the allocation is not necessarily 503 * physical-address aligned to satisfy the requested 504 * alignment. Since alignment is required to be a 505 * power-of-2, any large page >= size && >= align will 506 * suffice. 507 */ 508 if (*sizep <= page_size && align <= page_size) { 509 void *addr; 510 addr = contig_mem_span_alloc(vmp, page_size, vmflag); 511 if (addr == NULL) 512 continue; 513 *sizep = page_size; 514 return (addr); 515 } 516 return (NULL); 517 } 518 519 return (NULL); 520 } 521 522 static void * 523 contig_mem_span_xalloc(vmem_t *vmp, size_t *sizep, size_t align, int vmflag) 524 { 525 return (span_alloc_downsize(vmp, sizep, align, vmflag | VM_NORELOC)); 526 } 527 528 static void * 529 contig_mem_reloc_span_xalloc(vmem_t *vmp, size_t *sizep, size_t align, 530 int vmflag) 531 { 532 ASSERT((vmflag & VM_NORELOC) == 0); 533 return (span_alloc_downsize(vmp, sizep, align, vmflag)); 534 } 535 536 /* 537 * Free a span, which is always exactly one large page. 538 */ 539 static void 540 contig_mem_span_free(vmem_t *vmp, void *inaddr, size_t size) 541 { 542 page_t *pp; 543 caddr_t addr = inaddr; 544 caddr_t eaddr; 545 pgcnt_t npages = btopr(size); 546 page_t *rootpp = NULL; 547 548 ASSERT(size <= contig_mem_import_size_max); 549 /* All slabs should be size aligned */ 550 ASSERT(((uintptr_t)addr & (size - 1)) == 0); 551 552 hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK); 553 554 for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) { 555 pp = page_find(&kvp, (u_offset_t)(uintptr_t)addr); 556 if (pp == NULL) { 557 panic("contig_mem_span_free: page not found"); 558 } 559 if (!page_tryupgrade(pp)) { 560 page_unlock(pp); 561 pp = page_lookup(&kvp, 562 (u_offset_t)(uintptr_t)addr, SE_EXCL); 563 if (pp == NULL) 564 panic("contig_mem_span_free: page not found"); 565 } 566 567 ASSERT(PAGE_EXCL(pp)); 568 ASSERT(size == page_get_pagesize(pp->p_szc)); 569 ASSERT(rootpp == NULL || rootpp->p_szc == pp->p_szc); 570 ASSERT(rootpp == NULL || (page_pptonum(rootpp) + 571 (pgcnt_t)btop(addr - (caddr_t)inaddr) == page_pptonum(pp))); 572 573 page_pp_unlock(pp, 0, 1); 574 575 if (rootpp == NULL) 576 rootpp = pp; 577 } 578 page_destroy_pages(rootpp); 579 page_unresv(npages); 580 581 if (vmp != NULL) 582 vmem_xfree(vmp, inaddr, size); 583 } 584 585 static void * 586 contig_vmem_xalloc_aligned_wrapper(vmem_t *vmp, size_t *sizep, size_t align, 587 int vmflag) 588 { 589 ASSERT((align & (align - 1)) == 0); 590 return (vmem_xalloc(vmp, *sizep, align, 0, 0, NULL, NULL, vmflag)); 591 } 592 593 /* 594 * contig_mem_alloc, contig_mem_alloc_align 595 * 596 * Caution: contig_mem_alloc and contig_mem_alloc_align should be 597 * used only when physically contiguous non-relocatable memory is 598 * required. Furthermore, use of these allocation routines should be 599 * minimized as well as should the allocation size. As described in the 600 * contig_mem_arena comment block above, slab allocations fall back to 601 * being outside of the cage. Therefore, overuse of these allocation 602 * routines can lead to non-relocatable large pages being allocated 603 * outside the cage. Such pages prevent the allocation of a larger page 604 * occupying overlapping pages. This can impact performance for 605 * applications that utilize e.g. 256M large pages. 606 */ 607 608 /* 609 * Allocates size aligned contiguous memory up to contig_mem_import_size_max. 610 * Size must be a power of 2. 611 */ 612 void * 613 contig_mem_alloc(size_t size) 614 { 615 ASSERT((size & (size - 1)) == 0); 616 return (contig_mem_alloc_align(size, size)); 617 } 618 619 /* 620 * contig_mem_alloc_align_flag allocates real contiguous memory with the 621 * specified alignment up to contig_mem_import_size_max. The alignment must 622 * be a power of 2 and no greater than contig_mem_import_size_max. We assert 623 * the aligment is a power of 2. For non-debug, vmem_xalloc will panic 624 * for non power of 2 alignments. 625 */ 626 static void * 627 contig_mem_alloc_align_flag(size_t size, size_t align, int flag, 628 kmutex_t *lockp) 629 { 630 void *buf; 631 632 ASSERT(size <= contig_mem_import_size_max); 633 ASSERT(align <= contig_mem_import_size_max); 634 ASSERT((align & (align - 1)) == 0); 635 636 if (align < CONTIG_MEM_ARENA_QUANTUM) 637 align = CONTIG_MEM_ARENA_QUANTUM; 638 639 /* 640 * We take the lock here to serialize span allocations. 641 * We do not lose concurrency for the common case, since 642 * allocations that don't require new span allocations 643 * are serialized by vmem_xalloc. Serializing span 644 * allocations also prevents us from trying to allocate 645 * more spans than necessary. 646 */ 647 mutex_enter(lockp); 648 649 buf = vmem_xalloc(contig_mem_arena, size, align, 0, 0, 650 NULL, NULL, flag | VM_NORELOC); 651 652 if ((buf == NULL) && (size <= MMU_PAGESIZE)) { 653 mutex_exit(lockp); 654 return (vmem_xalloc(static_alloc_arena, size, align, 0, 0, 655 NULL, NULL, flag)); 656 } 657 658 if (buf == NULL) { 659 buf = vmem_xalloc(contig_mem_reloc_arena, size, align, 0, 0, 660 NULL, NULL, flag); 661 } 662 663 mutex_exit(lockp); 664 665 return (buf); 666 } 667 668 void * 669 contig_mem_alloc_align(size_t size, size_t align) 670 { 671 return (contig_mem_alloc_align_flag 672 (size, align, VM_NOSLEEP, &contig_mem_lock)); 673 } 674 675 /* 676 * This function is provided for callers that need physically contiguous 677 * allocations but can sleep. We use the contig_mem_sleep_lock so that we 678 * don't interfere with contig_mem_alloc_align calls that should never sleep. 679 * Similarly to contig_mem_alloc_align, we use a lock to prevent allocating 680 * unnecessary spans when called in parallel. 681 */ 682 void * 683 contig_mem_alloc_align_sleep(size_t size, size_t align) 684 { 685 return (contig_mem_alloc_align_flag 686 (size, align, VM_SLEEP, &contig_mem_sleep_lock)); 687 } 688 689 void 690 contig_mem_free(void *vaddr, size_t size) 691 { 692 if (vmem_contains(contig_mem_arena, vaddr, size)) { 693 vmem_xfree(contig_mem_arena, vaddr, size); 694 } else if (size > MMU_PAGESIZE) { 695 vmem_xfree(contig_mem_reloc_arena, vaddr, size); 696 } else { 697 vmem_xfree(static_alloc_arena, vaddr, size); 698 } 699 } 700 701 /* 702 * We create a set of stacked vmem arenas to enable us to 703 * allocate large >PAGESIZE chucks of contiguous Real Address space. 704 * The vmem_xcreate interface is used to create the contig_mem_arena 705 * allowing the import routine to downsize the requested slab size 706 * and return a smaller slab. 707 */ 708 void 709 contig_mem_init(void) 710 { 711 mutex_init(&contig_mem_lock, NULL, MUTEX_DEFAULT, NULL); 712 mutex_init(&contig_mem_sleep_lock, NULL, MUTEX_DEFAULT, NULL); 713 714 contig_mem_slab_arena = vmem_xcreate("contig_mem_slab_arena", NULL, 0, 715 CONTIG_MEM_SLAB_ARENA_QUANTUM, contig_vmem_xalloc_aligned_wrapper, 716 vmem_xfree, heap_arena, 0, VM_SLEEP | VMC_XALIGN); 717 718 contig_mem_arena = vmem_xcreate("contig_mem_arena", NULL, 0, 719 CONTIG_MEM_ARENA_QUANTUM, contig_mem_span_xalloc, 720 contig_mem_span_free, contig_mem_slab_arena, 0, 721 VM_SLEEP | VM_BESTFIT | VMC_XALIGN); 722 723 contig_mem_reloc_arena = vmem_xcreate("contig_mem_reloc_arena", NULL, 0, 724 CONTIG_MEM_ARENA_QUANTUM, contig_mem_reloc_span_xalloc, 725 contig_mem_span_free, contig_mem_slab_arena, 0, 726 VM_SLEEP | VM_BESTFIT | VMC_XALIGN); 727 728 if (contig_mem_prealloc_buf == NULL || vmem_add(contig_mem_arena, 729 contig_mem_prealloc_buf, contig_mem_prealloc_size, VM_SLEEP) 730 == NULL) { 731 cmn_err(CE_WARN, "Failed to pre-populate contig_mem_arena"); 732 } 733 } 734 735 /* 736 * In calculating how much memory to pre-allocate, we include a small 737 * amount per-CPU to account for per-CPU buffers in line with measured 738 * values for different size systems. contig_mem_prealloc_base_size is 739 * a cpu specific amount to be pre-allocated before considering per-CPU 740 * requirements and memory size. We always pre-allocate a minimum amount 741 * of memory determined by PREALLOC_MIN. Beyond that, we take the minimum 742 * of contig_mem_prealloc_base_size and a small percentage of physical 743 * memory to prevent allocating too much on smaller systems. 744 * contig_mem_prealloc_base_size is global, allowing for the CPU module 745 * to increase its value if necessary. 746 */ 747 #define PREALLOC_PER_CPU (256 * 1024) /* 256K */ 748 #define PREALLOC_PERCENT (4) /* 4% */ 749 #define PREALLOC_MIN (16 * 1024 * 1024) /* 16M */ 750 size_t contig_mem_prealloc_base_size = 0; 751 752 /* 753 * Called at boot-time allowing pre-allocation of contiguous memory. 754 * The argument 'alloc_base' is the requested base address for the 755 * allocation and originates in startup_memlist. 756 */ 757 caddr_t 758 contig_mem_prealloc(caddr_t alloc_base, pgcnt_t npages) 759 { 760 caddr_t chunkp; 761 762 contig_mem_prealloc_size = MIN((PREALLOC_PER_CPU * ncpu_guest_max) + 763 contig_mem_prealloc_base_size, 764 (ptob(npages) * PREALLOC_PERCENT) / 100); 765 contig_mem_prealloc_size = MAX(contig_mem_prealloc_size, PREALLOC_MIN); 766 contig_mem_prealloc_size = P2ROUNDUP(contig_mem_prealloc_size, 767 MMU_PAGESIZE4M); 768 769 alloc_base = (caddr_t)roundup((uintptr_t)alloc_base, MMU_PAGESIZE4M); 770 if (prom_alloc(alloc_base, contig_mem_prealloc_size, 771 MMU_PAGESIZE4M) != alloc_base) { 772 773 /* 774 * Failed. This may mean the physical memory has holes in it 775 * and it will be more difficult to get large contiguous 776 * pieces of memory. Since we only guarantee contiguous 777 * pieces of memory contig_mem_import_size_max or smaller, 778 * loop, getting contig_mem_import_size_max at a time, until 779 * failure or contig_mem_prealloc_size is reached. 780 */ 781 for (chunkp = alloc_base; 782 (chunkp - alloc_base) < contig_mem_prealloc_size; 783 chunkp += contig_mem_import_size_max) { 784 785 if (prom_alloc(chunkp, contig_mem_import_size_max, 786 MMU_PAGESIZE4M) != chunkp) { 787 break; 788 } 789 } 790 contig_mem_prealloc_size = chunkp - alloc_base; 791 ASSERT(contig_mem_prealloc_size != 0); 792 } 793 794 if (contig_mem_prealloc_size != 0) { 795 contig_mem_prealloc_buf = alloc_base; 796 } else { 797 contig_mem_prealloc_buf = NULL; 798 } 799 alloc_base += contig_mem_prealloc_size; 800 801 return (alloc_base); 802 } 803 804 static uint_t sp_color_stride = 16; 805 static uint_t sp_color_mask = 0x1f; 806 static uint_t sp_current_color = (uint_t)-1; 807 808 size_t 809 exec_get_spslew(void) 810 { 811 uint_t spcolor = atomic_inc_32_nv(&sp_current_color); 812 return ((size_t)((spcolor & sp_color_mask) * SA(sp_color_stride))); 813 } 814 815 /* 816 * This flag may be set via /etc/system to force the synchronization 817 * of I-cache with memory after every bcopy. The default is 0, meaning 818 * that there is no need for an I-cache flush after each bcopy. This 819 * flag is relevant only on platforms that have non-coherent I-caches. 820 */ 821 uint_t force_sync_icache_after_bcopy = 0; 822 823 /* 824 * This flag may be set via /etc/system to force the synchronization 825 * of I-cache to memory after every DMA. The default is 0, meaning 826 * that there is no need for an I-cache flush after each dma write to 827 * memory. This flag is relevant only on platforms that have 828 * non-coherent I-caches. 829 */ 830 uint_t force_sync_icache_after_dma = 0; 831 832 /* 833 * This internal flag enables mach_sync_icache_pa, which is always 834 * called from common code if it is defined. However, not all 835 * platforms support the hv_mem_iflush firmware call. 836 */ 837 static uint_t do_mach_sync_icache_pa = 0; 838 839 int hsvc_kdi_mem_iflush_negotiated = B_FALSE; 840 841 #define MEM_IFLUSH_MAJOR 1 842 #define MEM_IFLUSH_MINOR 0 843 static hsvc_info_t kdi_mem_iflush_hsvc = { 844 HSVC_REV_1, /* HSVC rev num */ 845 NULL, /* Private */ 846 HSVC_GROUP_MEM_IFLUSH, /* Requested API Group */ 847 MEM_IFLUSH_MAJOR, /* Requested Major */ 848 MEM_IFLUSH_MINOR, /* Requested Minor */ 849 "kdi" /* Module name */ 850 }; 851 852 /* 853 * Setup soft exec mode. 854 * Since /etc/system is read later on init, it 855 * may be used to override these flags. 856 */ 857 void 858 mach_setup_icache(uint_t coherency) 859 { 860 int status; 861 uint64_t sup_minor; 862 863 if (coherency == 0 && icache_is_coherent) { 864 extern void kdi_flush_caches(void); 865 status = hsvc_register(&kdi_mem_iflush_hsvc, &sup_minor); 866 if (status != 0) 867 cmn_err(CE_PANIC, "I$ flush not implemented on " 868 "I$ incoherent system"); 869 hsvc_kdi_mem_iflush_negotiated = B_TRUE; 870 kdi_flush_caches(); 871 icache_is_coherent = 0; 872 do_mach_sync_icache_pa = 1; 873 } 874 } 875 876 /* 877 * Flush specified physical address range from I$ via hv_mem_iflush interface 878 */ 879 /*ARGSUSED*/ 880 void 881 mach_sync_icache_pa(caddr_t paddr, size_t size) 882 { 883 if (do_mach_sync_icache_pa) { 884 uint64_t pa = (uint64_t)paddr; 885 uint64_t sz = (uint64_t)size; 886 uint64_t i, flushed; 887 888 for (i = 0; i < sz; i += flushed) { 889 if (hv_mem_iflush(pa + i, sz - i, &flushed) != H_EOK) { 890 cmn_err(CE_PANIC, "Flushing the Icache failed"); 891 break; 892 } 893 } 894 } 895 } 896 897 /* 898 * Flush the page if it has been marked as executed 899 */ 900 /*ARGSUSED*/ 901 void 902 mach_sync_icache_pp(page_t *pp) 903 { 904 if (PP_ISEXEC(pp)) 905 mach_sync_icache_pa((caddr_t)ptob(pp->p_pagenum), PAGESIZE); 906 } 907