1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 27 /* All Rights Reserved */ 28 29 /* 30 * Portions of this source code were derived from Berkeley 4.3 BSD 31 * under license from the Regents of the University of California. 32 */ 33 34 #pragma ident "%Z%%M% %I% %E% SMI" 35 36 /* 37 * UNIX machine dependent virtual memory support. 38 */ 39 40 #include <sys/vm.h> 41 #include <sys/exec.h> 42 #include <sys/cmn_err.h> 43 #include <sys/cpu_module.h> 44 #include <sys/cpu.h> 45 #include <sys/elf_SPARC.h> 46 #include <sys/archsystm.h> 47 #include <vm/hat_sfmmu.h> 48 #include <sys/memnode.h> 49 #include <sys/mem_cage.h> 50 #include <vm/vm_dep.h> 51 #include <sys/error.h> 52 #include <sys/machsystm.h> 53 #include <vm/seg_kmem.h> 54 #include <sys/stack.h> 55 #include <sys/atomic.h> 56 57 uint_t page_colors = 0; 58 uint_t page_colors_mask = 0; 59 uint_t page_coloring_shift = 0; 60 int consistent_coloring; 61 62 uint_t mmu_page_sizes = MMU_PAGE_SIZES; 63 uint_t max_mmu_page_sizes = MMU_PAGE_SIZES; 64 uint_t mmu_hashcnt = MAX_HASHCNT; 65 uint_t max_mmu_hashcnt = MAX_HASHCNT; 66 size_t mmu_ism_pagesize = DEFAULT_ISM_PAGESIZE; 67 68 /* 69 * A bitmask of the page sizes supported by hardware based upon szc. 70 * The base pagesize (p_szc == 0) must always be supported by the hardware. 71 */ 72 int mmu_exported_pagesize_mask; 73 uint_t mmu_exported_page_sizes; 74 75 uint_t szc_2_userszc[MMU_PAGE_SIZES]; 76 uint_t userszc_2_szc[MMU_PAGE_SIZES]; 77 78 extern uint_t vac_colors_mask; 79 extern int vac_shift; 80 81 hw_pagesize_t hw_page_array[] = { 82 {MMU_PAGESIZE, MMU_PAGESHIFT, 0, MMU_PAGESIZE >> MMU_PAGESHIFT}, 83 {MMU_PAGESIZE64K, MMU_PAGESHIFT64K, 0, 84 MMU_PAGESIZE64K >> MMU_PAGESHIFT}, 85 {MMU_PAGESIZE512K, MMU_PAGESHIFT512K, 0, 86 MMU_PAGESIZE512K >> MMU_PAGESHIFT}, 87 {MMU_PAGESIZE4M, MMU_PAGESHIFT4M, 0, MMU_PAGESIZE4M >> MMU_PAGESHIFT}, 88 {MMU_PAGESIZE32M, MMU_PAGESHIFT32M, 0, 89 MMU_PAGESIZE32M >> MMU_PAGESHIFT}, 90 {MMU_PAGESIZE256M, MMU_PAGESHIFT256M, 0, 91 MMU_PAGESIZE256M >> MMU_PAGESHIFT}, 92 {0, 0, 0, 0} 93 }; 94 95 /* 96 * Maximum page size used to map 64-bit memory segment kmem64_base..kmem64_end 97 */ 98 int max_bootlp_tteszc = TTE256M; 99 100 /* 101 * Maximum and default segment size tunables for user heap, stack, private 102 * and shared anonymous memory, and user text and initialized data. 103 */ 104 size_t max_uheap_lpsize = MMU_PAGESIZE64K; 105 size_t default_uheap_lpsize = MMU_PAGESIZE64K; 106 size_t max_ustack_lpsize = MMU_PAGESIZE64K; 107 size_t default_ustack_lpsize = MMU_PAGESIZE64K; 108 size_t max_privmap_lpsize = MMU_PAGESIZE64K; 109 size_t max_uidata_lpsize = MMU_PAGESIZE64K; 110 size_t max_utext_lpsize = MMU_PAGESIZE4M; 111 size_t max_shm_lpsize = MMU_PAGESIZE4M; 112 113 /* 114 * map_addr_proc() is the routine called when the system is to 115 * choose an address for the user. We will pick an address 116 * range which is just below the current stack limit. The 117 * algorithm used for cache consistency on machines with virtual 118 * address caches is such that offset 0 in the vnode is always 119 * on a shm_alignment'ed aligned address. Unfortunately, this 120 * means that vnodes which are demand paged will not be mapped 121 * cache consistently with the executable images. When the 122 * cache alignment for a given object is inconsistent, the 123 * lower level code must manage the translations so that this 124 * is not seen here (at the cost of efficiency, of course). 125 * 126 * addrp is a value/result parameter. 127 * On input it is a hint from the user to be used in a completely 128 * machine dependent fashion. For MAP_ALIGN, addrp contains the 129 * minimal alignment. 130 * 131 * On output it is NULL if no address can be found in the current 132 * processes address space or else an address that is currently 133 * not mapped for len bytes with a page of red zone on either side. 134 * If vacalign is true, then the selected address will obey the alignment 135 * constraints of a vac machine based on the given off value. 136 */ 137 /*ARGSUSED3*/ 138 void 139 map_addr_proc(caddr_t *addrp, size_t len, offset_t off, int vacalign, 140 caddr_t userlimit, struct proc *p, uint_t flags) 141 { 142 struct as *as = p->p_as; 143 caddr_t addr; 144 caddr_t base; 145 size_t slen; 146 uintptr_t align_amount; 147 int allow_largepage_alignment = 1; 148 149 base = p->p_brkbase; 150 if (userlimit < as->a_userlimit) { 151 /* 152 * This happens when a program wants to map something in 153 * a range that's accessible to a program in a smaller 154 * address space. For example, a 64-bit program might 155 * be calling mmap32(2) to guarantee that the returned 156 * address is below 4Gbytes. 157 */ 158 ASSERT(userlimit > base); 159 slen = userlimit - base; 160 } else { 161 slen = p->p_usrstack - base - (((size_t)rctl_enforced_value( 162 rctlproc_legacy[RLIMIT_STACK], p->p_rctls, p) + PAGEOFFSET) 163 & PAGEMASK); 164 } 165 len = (len + PAGEOFFSET) & PAGEMASK; 166 167 /* 168 * Redzone for each side of the request. This is done to leave 169 * one page unmapped between segments. This is not required, but 170 * it's useful for the user because if their program strays across 171 * a segment boundary, it will catch a fault immediately making 172 * debugging a little easier. 173 */ 174 len += (2 * PAGESIZE); 175 176 /* 177 * If the request is larger than the size of a particular 178 * mmu level, then we use that level to map the request. 179 * But this requires that both the virtual and the physical 180 * addresses be aligned with respect to that level, so we 181 * do the virtual bit of nastiness here. 182 * 183 * For 32-bit processes, only those which have specified 184 * MAP_ALIGN or an addr will be aligned on a page size > 4MB. Otherwise 185 * we can potentially waste up to 256MB of the 4G process address 186 * space just for alignment. 187 * 188 * XXXQ Should iterate trough hw_page_array here to catch 189 * all supported pagesizes 190 */ 191 if (p->p_model == DATAMODEL_ILP32 && ((flags & MAP_ALIGN) == 0 || 192 ((uintptr_t)*addrp) != 0)) { 193 allow_largepage_alignment = 0; 194 } 195 if ((mmu_page_sizes == max_mmu_page_sizes) && 196 allow_largepage_alignment && 197 (len >= MMU_PAGESIZE256M)) { /* 256MB mappings */ 198 align_amount = MMU_PAGESIZE256M; 199 } else if ((mmu_page_sizes == max_mmu_page_sizes) && 200 allow_largepage_alignment && 201 (len >= MMU_PAGESIZE32M)) { /* 32MB mappings */ 202 align_amount = MMU_PAGESIZE32M; 203 } else if (len >= MMU_PAGESIZE4M) { /* 4MB mappings */ 204 align_amount = MMU_PAGESIZE4M; 205 } else if (len >= MMU_PAGESIZE512K) { /* 512KB mappings */ 206 align_amount = MMU_PAGESIZE512K; 207 } else if (len >= MMU_PAGESIZE64K) { /* 64KB mappings */ 208 align_amount = MMU_PAGESIZE64K; 209 } else { 210 /* 211 * Align virtual addresses on a 64K boundary to ensure 212 * that ELF shared libraries are mapped with the appropriate 213 * alignment constraints by the run-time linker. 214 */ 215 align_amount = ELF_SPARC_MAXPGSZ; 216 if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp != 0) && 217 ((uintptr_t)*addrp < align_amount)) 218 align_amount = (uintptr_t)*addrp; 219 } 220 221 /* 222 * 64-bit processes require 1024K alignment of ELF shared libraries. 223 */ 224 if (p->p_model == DATAMODEL_LP64) 225 align_amount = MAX(align_amount, ELF_SPARCV9_MAXPGSZ); 226 #ifdef VAC 227 if (vac && vacalign && (align_amount < shm_alignment)) 228 align_amount = shm_alignment; 229 #endif 230 231 if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp > align_amount)) { 232 align_amount = (uintptr_t)*addrp; 233 } 234 len += align_amount; 235 236 /* 237 * Look for a large enough hole starting below the stack limit. 238 * After finding it, use the upper part. Addition of PAGESIZE is 239 * for the redzone as described above. 240 */ 241 as_purge(as); 242 if (as_gap(as, len, &base, &slen, AH_HI, NULL) == 0) { 243 caddr_t as_addr; 244 245 addr = base + slen - len + PAGESIZE; 246 as_addr = addr; 247 /* 248 * Round address DOWN to the alignment amount, 249 * add the offset, and if this address is less 250 * than the original address, add alignment amount. 251 */ 252 addr = (caddr_t)((uintptr_t)addr & (~(align_amount - 1l))); 253 addr += (long)(off & (align_amount - 1l)); 254 if (addr < as_addr) { 255 addr += align_amount; 256 } 257 258 ASSERT(addr <= (as_addr + align_amount)); 259 ASSERT(((uintptr_t)addr & (align_amount - 1l)) == 260 ((uintptr_t)(off & (align_amount - 1l)))); 261 *addrp = addr; 262 263 } else { 264 *addrp = NULL; /* no more virtual space */ 265 } 266 } 267 268 /* 269 * Platform-dependent page scrub call. 270 * We call hypervisor to scrub the page. 271 */ 272 void 273 pagescrub(page_t *pp, uint_t off, uint_t len) 274 { 275 uint64_t pa, length; 276 277 pa = (uint64_t)(pp->p_pagenum << MMU_PAGESHIFT + off); 278 length = (uint64_t)len; 279 280 (void) mem_scrub(pa, length); 281 } 282 283 void 284 sync_data_memory(caddr_t va, size_t len) 285 { 286 /* Call memory sync function */ 287 (void) mem_sync(va, len); 288 } 289 290 size_t 291 mmu_get_kernel_lpsize(size_t lpsize) 292 { 293 extern int mmu_exported_pagesize_mask; 294 uint_t tte; 295 296 if (lpsize == 0) { 297 /* no setting for segkmem_lpsize in /etc/system: use default */ 298 if (mmu_exported_pagesize_mask & (1 << TTE256M)) { 299 lpsize = MMU_PAGESIZE256M; 300 } else if (mmu_exported_pagesize_mask & (1 << TTE4M)) { 301 lpsize = MMU_PAGESIZE4M; 302 } else if (mmu_exported_pagesize_mask & (1 << TTE64K)) { 303 lpsize = MMU_PAGESIZE64K; 304 } else { 305 lpsize = MMU_PAGESIZE; 306 } 307 308 return (lpsize); 309 } 310 311 for (tte = TTE8K; tte <= TTE256M; tte++) { 312 313 if ((mmu_exported_pagesize_mask & (1 << tte)) == 0) 314 continue; 315 316 if (lpsize == TTEBYTES(tte)) 317 return (lpsize); 318 } 319 320 lpsize = TTEBYTES(TTE8K); 321 return (lpsize); 322 } 323 324 void 325 mmu_init_kcontext() 326 { 327 } 328 329 /*ARGSUSED*/ 330 void 331 mmu_init_kernel_pgsz(struct hat *hat) 332 { 333 } 334 335 #define QUANTUM_SIZE 64 336 337 static vmem_t *contig_mem_slab_arena; 338 static vmem_t *contig_mem_arena; 339 340 uint_t contig_mem_slab_size = MMU_PAGESIZE4M; 341 342 static void * 343 contig_mem_span_alloc(vmem_t *vmp, size_t size, int vmflag) 344 { 345 page_t *ppl; 346 page_t *rootpp; 347 caddr_t addr = NULL; 348 pgcnt_t npages = btopr(size); 349 page_t **ppa; 350 int pgflags; 351 int i = 0; 352 353 354 /* 355 * The import request should be at least 356 * contig_mem_slab_size because that is the 357 * slab arena's quantum. The size can be 358 * further restricted since contiguous 359 * allocations larger than contig_mem_slab_size 360 * are not supported here. 361 */ 362 ASSERT(size == contig_mem_slab_size); 363 364 if ((addr = vmem_xalloc(vmp, size, size, 0, 0, 365 NULL, NULL, vmflag)) == NULL) { 366 return (NULL); 367 } 368 369 /* The address should be slab-size aligned. */ 370 ASSERT(((uintptr_t)addr & (contig_mem_slab_size - 1)) == 0); 371 372 if (page_resv(npages, vmflag & VM_KMFLAGS) == 0) { 373 vmem_xfree(vmp, addr, size); 374 return (NULL); 375 } 376 377 pgflags = PG_EXCL; 378 if ((vmflag & VM_NOSLEEP) == 0) 379 pgflags |= PG_WAIT; 380 if (vmflag & VM_PANIC) 381 pgflags |= PG_PANIC; 382 if (vmflag & VM_PUSHPAGE) 383 pgflags |= PG_PUSHPAGE; 384 385 ppl = page_create_va_large(&kvp, (u_offset_t)(uintptr_t)addr, size, 386 pgflags, &kvseg, addr, NULL); 387 388 if (ppl == NULL) { 389 vmem_xfree(vmp, addr, size); 390 page_unresv(npages); 391 return (NULL); 392 } 393 394 rootpp = ppl; 395 ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP); 396 while (ppl != NULL) { 397 page_t *pp = ppl; 398 ppa[i++] = pp; 399 page_sub(&ppl, pp); 400 ASSERT(page_iolock_assert(pp)); 401 page_io_unlock(pp); 402 } 403 404 /* 405 * Load the locked entry. It's OK to preload the entry into 406 * the TSB since we now support large mappings in the kernel TSB. 407 */ 408 hat_memload_array(kas.a_hat, (caddr_t)rootpp->p_offset, size, 409 ppa, (PROT_ALL & ~PROT_USER) | HAT_NOSYNC, HAT_LOAD_LOCK); 410 411 for (--i; i >= 0; --i) { 412 (void) page_pp_lock(ppa[i], 0, 1); 413 page_unlock(ppa[i]); 414 } 415 416 kmem_free(ppa, npages * sizeof (page_t *)); 417 return (addr); 418 } 419 420 void 421 contig_mem_span_free(vmem_t *vmp, void *inaddr, size_t size) 422 { 423 page_t *pp; 424 caddr_t addr = inaddr; 425 caddr_t eaddr; 426 pgcnt_t npages = btopr(size); 427 pgcnt_t pgs_left = npages; 428 page_t *rootpp = NULL; 429 430 ASSERT(((uintptr_t)addr & (contig_mem_slab_size - 1)) == 0); 431 432 hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK); 433 434 for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) { 435 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)addr, SE_EXCL); 436 if (pp == NULL) 437 panic("contig_mem_span_free: page not found"); 438 439 ASSERT(PAGE_EXCL(pp)); 440 page_pp_unlock(pp, 0, 1); 441 442 if (rootpp == NULL) 443 rootpp = pp; 444 if (--pgs_left == 0) { 445 /* 446 * similar logic to segspt_free_pages, but we know we 447 * have one large page. 448 */ 449 page_destroy_pages(rootpp); 450 } 451 } 452 page_unresv(npages); 453 454 if (vmp != NULL) 455 vmem_xfree(vmp, inaddr, size); 456 } 457 458 static void * 459 contig_vmem_xalloc_aligned_wrapper(vmem_t *vmp, size_t size, int vmflag) 460 { 461 return (vmem_xalloc(vmp, size, size, 0, 0, NULL, NULL, vmflag)); 462 } 463 464 /* 465 * conting_mem_alloc_align allocates real contiguous memory with the specified 466 * alignment upto contig_mem_slab_size. The alignment must be a power of 2. 467 */ 468 void * 469 contig_mem_alloc_align(size_t size, size_t align) 470 { 471 ASSERT(align <= contig_mem_slab_size); 472 473 if ((align & (align - 1)) != 0) 474 return (NULL); 475 476 return (vmem_xalloc(contig_mem_arena, size, align, 0, 0, 477 NULL, NULL, VM_NOSLEEP)); 478 } 479 480 /* 481 * Allocates size aligned contiguous memory upto contig_mem_slab_size. 482 * Size must be a power of 2. 483 */ 484 void * 485 contig_mem_alloc(size_t size) 486 { 487 ASSERT((size & (size - 1)) == 0); 488 return (contig_mem_alloc_align(size, size)); 489 } 490 491 void 492 contig_mem_free(void *vaddr, size_t size) 493 { 494 vmem_xfree(contig_mem_arena, vaddr, size); 495 } 496 497 /* 498 * We create a set of stacked vmem arenas to enable us to 499 * allocate large >PAGESIZE chucks of contiguous Real Address space 500 * This is what the Dynamics TSB support does for TSBs. 501 * The contig_mem_arena import functions are exactly the same as the 502 * TSB kmem_default arena import functions. 503 */ 504 void 505 contig_mem_init(void) 506 { 507 508 contig_mem_slab_arena = vmem_create("contig_mem_slab_arena", NULL, 0, 509 contig_mem_slab_size, contig_vmem_xalloc_aligned_wrapper, 510 vmem_xfree, heap_arena, 0, VM_SLEEP); 511 512 contig_mem_arena = vmem_create("contig_mem_arena", NULL, 0, 513 QUANTUM_SIZE, contig_mem_span_alloc, contig_mem_span_free, 514 contig_mem_slab_arena, 0, VM_SLEEP | VM_BESTFIT); 515 516 } 517 518 519 static uint_t sp_color_stride = 16; 520 static uint_t sp_color_mask = 0x1f; 521 static uint_t sp_current_color = (uint_t)-1; 522 523 size_t 524 exec_get_spslew(void) 525 { 526 uint_t spcolor = atomic_inc_32_nv(&sp_current_color); 527 return ((size_t)((spcolor & sp_color_mask) * SA(sp_color_stride))); 528 } 529