1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 27 /* All Rights Reserved */ 28 29 /* 30 * Portions of this source code were derived from Berkeley 4.3 BSD 31 * under license from the Regents of the University of California. 32 */ 33 34 #pragma ident "%Z%%M% %I% %E% SMI" 35 36 /* 37 * UNIX machine dependent virtual memory support. 38 */ 39 40 #include <sys/vm.h> 41 #include <sys/exec.h> 42 #include <sys/cmn_err.h> 43 #include <sys/cpu_module.h> 44 #include <sys/cpu.h> 45 #include <sys/elf_SPARC.h> 46 #include <sys/archsystm.h> 47 #include <vm/hat_sfmmu.h> 48 #include <sys/memnode.h> 49 #include <sys/mem_cage.h> 50 #include <vm/vm_dep.h> 51 #include <sys/error.h> 52 #include <sys/machsystm.h> 53 #include <vm/seg_kmem.h> 54 55 uint_t page_colors = 0; 56 uint_t page_colors_mask = 0; 57 uint_t page_coloring_shift = 0; 58 int consistent_coloring; 59 60 uint_t mmu_page_sizes = MMU_PAGE_SIZES; 61 uint_t max_mmu_page_sizes = MMU_PAGE_SIZES; 62 uint_t mmu_hashcnt = MAX_HASHCNT; 63 uint_t max_mmu_hashcnt = MAX_HASHCNT; 64 size_t mmu_ism_pagesize = DEFAULT_ISM_PAGESIZE; 65 66 /* 67 * A bitmask of the page sizes supported by hardware based upon szc. 68 * The base pagesize (p_szc == 0) must always be supported by the hardware. 69 */ 70 int mmu_exported_pagesize_mask; 71 uint_t mmu_exported_page_sizes; 72 73 uint_t szc_2_userszc[MMU_PAGE_SIZES]; 74 uint_t userszc_2_szc[MMU_PAGE_SIZES]; 75 76 extern uint_t vac_colors_mask; 77 extern int vac_shift; 78 79 hw_pagesize_t hw_page_array[] = { 80 {MMU_PAGESIZE, MMU_PAGESHIFT, 0, MMU_PAGESIZE >> MMU_PAGESHIFT}, 81 {MMU_PAGESIZE64K, MMU_PAGESHIFT64K, 0, 82 MMU_PAGESIZE64K >> MMU_PAGESHIFT}, 83 {MMU_PAGESIZE512K, MMU_PAGESHIFT512K, 0, 84 MMU_PAGESIZE512K >> MMU_PAGESHIFT}, 85 {MMU_PAGESIZE4M, MMU_PAGESHIFT4M, 0, MMU_PAGESIZE4M >> MMU_PAGESHIFT}, 86 {MMU_PAGESIZE32M, MMU_PAGESHIFT32M, 0, 87 MMU_PAGESIZE32M >> MMU_PAGESHIFT}, 88 {MMU_PAGESIZE256M, MMU_PAGESHIFT256M, 0, 89 MMU_PAGESIZE256M >> MMU_PAGESHIFT}, 90 {0, 0, 0, 0} 91 }; 92 93 /* 94 * Maximum and default segment size tunables for user heap, stack, private 95 * and shared anonymous memory, and user text and initialized data. 96 */ 97 size_t max_uheap_lpsize = MMU_PAGESIZE64K; 98 size_t default_uheap_lpsize = MMU_PAGESIZE64K; 99 size_t max_ustack_lpsize = MMU_PAGESIZE64K; 100 size_t default_ustack_lpsize = MMU_PAGESIZE64K; 101 size_t max_privmap_lpsize = MMU_PAGESIZE64K; 102 size_t max_uidata_lpsize = MMU_PAGESIZE64K; 103 size_t max_utext_lpsize = MMU_PAGESIZE4M; 104 size_t max_shm_lpsize = MMU_PAGESIZE4M; 105 106 /* 107 * map_addr_proc() is the routine called when the system is to 108 * choose an address for the user. We will pick an address 109 * range which is just below the current stack limit. The 110 * algorithm used for cache consistency on machines with virtual 111 * address caches is such that offset 0 in the vnode is always 112 * on a shm_alignment'ed aligned address. Unfortunately, this 113 * means that vnodes which are demand paged will not be mapped 114 * cache consistently with the executable images. When the 115 * cache alignment for a given object is inconsistent, the 116 * lower level code must manage the translations so that this 117 * is not seen here (at the cost of efficiency, of course). 118 * 119 * addrp is a value/result parameter. 120 * On input it is a hint from the user to be used in a completely 121 * machine dependent fashion. For MAP_ALIGN, addrp contains the 122 * minimal alignment. 123 * 124 * On output it is NULL if no address can be found in the current 125 * processes address space or else an address that is currently 126 * not mapped for len bytes with a page of red zone on either side. 127 * If vacalign is true, then the selected address will obey the alignment 128 * constraints of a vac machine based on the given off value. 129 */ 130 /*ARGSUSED3*/ 131 void 132 map_addr_proc(caddr_t *addrp, size_t len, offset_t off, int vacalign, 133 caddr_t userlimit, struct proc *p, uint_t flags) 134 { 135 struct as *as = p->p_as; 136 caddr_t addr; 137 caddr_t base; 138 size_t slen; 139 uintptr_t align_amount; 140 int allow_largepage_alignment = 1; 141 142 base = p->p_brkbase; 143 if (userlimit < as->a_userlimit) { 144 /* 145 * This happens when a program wants to map something in 146 * a range that's accessible to a program in a smaller 147 * address space. For example, a 64-bit program might 148 * be calling mmap32(2) to guarantee that the returned 149 * address is below 4Gbytes. 150 */ 151 ASSERT(userlimit > base); 152 slen = userlimit - base; 153 } else { 154 slen = p->p_usrstack - base - (((size_t)rctl_enforced_value( 155 rctlproc_legacy[RLIMIT_STACK], p->p_rctls, p) + PAGEOFFSET) 156 & PAGEMASK); 157 } 158 len = (len + PAGEOFFSET) & PAGEMASK; 159 160 /* 161 * Redzone for each side of the request. This is done to leave 162 * one page unmapped between segments. This is not required, but 163 * it's useful for the user because if their program strays across 164 * a segment boundary, it will catch a fault immediately making 165 * debugging a little easier. 166 */ 167 len += (2 * PAGESIZE); 168 169 /* 170 * If the request is larger than the size of a particular 171 * mmu level, then we use that level to map the request. 172 * But this requires that both the virtual and the physical 173 * addresses be aligned with respect to that level, so we 174 * do the virtual bit of nastiness here. 175 * 176 * For 32-bit processes, only those which have specified 177 * MAP_ALIGN or an addr will be aligned on a page size > 4MB. Otherwise 178 * we can potentially waste up to 256MB of the 4G process address 179 * space just for alignment. 180 * 181 * XXXQ Should iterate trough hw_page_array here to catch 182 * all supported pagesizes 183 */ 184 if (p->p_model == DATAMODEL_ILP32 && ((flags & MAP_ALIGN) == 0 || 185 ((uintptr_t)*addrp) != 0)) { 186 allow_largepage_alignment = 0; 187 } 188 if ((mmu_page_sizes == max_mmu_page_sizes) && 189 allow_largepage_alignment && 190 (len >= MMU_PAGESIZE256M)) { /* 256MB mappings */ 191 align_amount = MMU_PAGESIZE256M; 192 } else if ((mmu_page_sizes == max_mmu_page_sizes) && 193 allow_largepage_alignment && 194 (len >= MMU_PAGESIZE32M)) { /* 32MB mappings */ 195 align_amount = MMU_PAGESIZE32M; 196 } else if (len >= MMU_PAGESIZE4M) { /* 4MB mappings */ 197 align_amount = MMU_PAGESIZE4M; 198 } else if (len >= MMU_PAGESIZE512K) { /* 512KB mappings */ 199 align_amount = MMU_PAGESIZE512K; 200 } else if (len >= MMU_PAGESIZE64K) { /* 64KB mappings */ 201 align_amount = MMU_PAGESIZE64K; 202 } else { 203 /* 204 * Align virtual addresses on a 64K boundary to ensure 205 * that ELF shared libraries are mapped with the appropriate 206 * alignment constraints by the run-time linker. 207 */ 208 align_amount = ELF_SPARC_MAXPGSZ; 209 if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp != 0) && 210 ((uintptr_t)*addrp < align_amount)) 211 align_amount = (uintptr_t)*addrp; 212 } 213 214 /* 215 * 64-bit processes require 1024K alignment of ELF shared libraries. 216 */ 217 if (p->p_model == DATAMODEL_LP64) 218 align_amount = MAX(align_amount, ELF_SPARCV9_MAXPGSZ); 219 #ifdef VAC 220 if (vac && vacalign && (align_amount < shm_alignment)) 221 align_amount = shm_alignment; 222 #endif 223 224 if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp > align_amount)) { 225 align_amount = (uintptr_t)*addrp; 226 } 227 len += align_amount; 228 229 /* 230 * Look for a large enough hole starting below the stack limit. 231 * After finding it, use the upper part. Addition of PAGESIZE is 232 * for the redzone as described above. 233 */ 234 as_purge(as); 235 if (as_gap(as, len, &base, &slen, AH_HI, NULL) == 0) { 236 caddr_t as_addr; 237 238 addr = base + slen - len + PAGESIZE; 239 as_addr = addr; 240 /* 241 * Round address DOWN to the alignment amount, 242 * add the offset, and if this address is less 243 * than the original address, add alignment amount. 244 */ 245 addr = (caddr_t)((uintptr_t)addr & (~(align_amount - 1l))); 246 addr += (long)(off & (align_amount - 1l)); 247 if (addr < as_addr) { 248 addr += align_amount; 249 } 250 251 ASSERT(addr <= (as_addr + align_amount)); 252 ASSERT(((uintptr_t)addr & (align_amount - 1l)) == 253 ((uintptr_t)(off & (align_amount - 1l)))); 254 *addrp = addr; 255 256 } else { 257 *addrp = NULL; /* no more virtual space */ 258 } 259 } 260 261 /* 262 * Platform-dependent page scrub call. 263 * We call hypervisor to scrub the page. 264 */ 265 void 266 pagescrub(page_t *pp, uint_t off, uint_t len) 267 { 268 uint64_t pa, length; 269 270 pa = (uint64_t)(pp->p_pagenum << MMU_PAGESHIFT + off); 271 length = (uint64_t)len; 272 273 (void) mem_scrub(pa, length); 274 } 275 276 void 277 sync_data_memory(caddr_t va, size_t len) 278 { 279 /* Call memory sync function */ 280 mem_sync(va, len); 281 } 282 283 size_t 284 mmu_get_kernel_lpsize(size_t lpsize) 285 { 286 extern int mmu_exported_pagesize_mask; 287 uint_t tte; 288 289 if (lpsize == 0) { 290 /* no setting for segkmem_lpsize in /etc/system: use default */ 291 if (mmu_exported_pagesize_mask & (1 << TTE256M)) { 292 lpsize = MMU_PAGESIZE256M; 293 } else if (mmu_exported_pagesize_mask & (1 << TTE4M)) { 294 lpsize = MMU_PAGESIZE4M; 295 } else if (mmu_exported_pagesize_mask & (1 << TTE64K)) { 296 lpsize = MMU_PAGESIZE64K; 297 } else { 298 lpsize = MMU_PAGESIZE; 299 } 300 301 return (lpsize); 302 } 303 304 for (tte = TTE8K; tte <= TTE256M; tte++) { 305 306 if ((mmu_exported_pagesize_mask & (1 << tte)) == 0) 307 continue; 308 309 if (lpsize == TTEBYTES(tte)) 310 return (lpsize); 311 } 312 313 lpsize = TTEBYTES(TTE8K); 314 return (lpsize); 315 } 316 317 void 318 mmu_init_kcontext() 319 { 320 } 321 322 /*ARGSUSED*/ 323 void 324 mmu_init_kernel_pgsz(struct hat *hat) 325 { 326 } 327 328 #define QUANTUM_SIZE 64 329 330 static vmem_t *contig_mem_slab_arena; 331 static vmem_t *contig_mem_arena; 332 333 uint_t contig_mem_slab_size = MMU_PAGESIZE4M; 334 335 static void * 336 contig_mem_span_alloc(vmem_t *vmp, size_t size, int vmflag) 337 { 338 page_t *ppl; 339 page_t *rootpp; 340 caddr_t addr = NULL; 341 pgcnt_t npages = btopr(size); 342 page_t **ppa; 343 int pgflags; 344 int i = 0; 345 346 347 /* 348 * The import request should be at least 349 * contig_mem_slab_size because that is the 350 * slab arena's quantum. The size can be 351 * further restricted since contiguous 352 * allocations larger than contig_mem_slab_size 353 * are not supported here. 354 */ 355 ASSERT(size == contig_mem_slab_size); 356 357 if ((addr = vmem_xalloc(vmp, size, size, 0, 0, 358 NULL, NULL, vmflag)) == NULL) { 359 return (NULL); 360 } 361 362 /* The address should be slab-size aligned. */ 363 ASSERT(((uintptr_t)addr & (contig_mem_slab_size - 1)) == 0); 364 365 if (page_resv(npages, vmflag & VM_KMFLAGS) == 0) { 366 vmem_xfree(vmp, addr, size); 367 return (NULL); 368 } 369 370 pgflags = PG_EXCL; 371 if ((vmflag & VM_NOSLEEP) == 0) 372 pgflags |= PG_WAIT; 373 if (vmflag & VM_PANIC) 374 pgflags |= PG_PANIC; 375 if (vmflag & VM_PUSHPAGE) 376 pgflags |= PG_PUSHPAGE; 377 378 ppl = page_create_va_large(&kvp, (u_offset_t)(uintptr_t)addr, size, 379 pgflags, &kvseg, addr, NULL); 380 381 if (ppl == NULL) { 382 vmem_xfree(vmp, addr, size); 383 page_unresv(npages); 384 return (NULL); 385 } 386 387 rootpp = ppl; 388 ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP); 389 while (ppl != NULL) { 390 page_t *pp = ppl; 391 ppa[i++] = pp; 392 page_sub(&ppl, pp); 393 ASSERT(page_iolock_assert(pp)); 394 page_io_unlock(pp); 395 } 396 397 /* 398 * Load the locked entry. It's OK to preload the entry into 399 * the TSB since we now support large mappings in the kernel TSB. 400 */ 401 hat_memload_array(kas.a_hat, (caddr_t)rootpp->p_offset, size, 402 ppa, (PROT_ALL & ~PROT_USER) | HAT_NOSYNC, HAT_LOAD_LOCK); 403 404 for (--i; i >= 0; --i) { 405 (void) page_pp_lock(ppa[i], 0, 1); 406 page_unlock(ppa[i]); 407 } 408 409 kmem_free(ppa, npages * sizeof (page_t *)); 410 return (addr); 411 } 412 413 void 414 contig_mem_span_free(vmem_t *vmp, void *inaddr, size_t size) 415 { 416 page_t *pp; 417 caddr_t addr = inaddr; 418 caddr_t eaddr; 419 pgcnt_t npages = btopr(size); 420 pgcnt_t pgs_left = npages; 421 page_t *rootpp = NULL; 422 423 ASSERT(((uintptr_t)addr & (contig_mem_slab_size - 1)) == 0); 424 425 hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK); 426 427 for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) { 428 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)addr, SE_EXCL); 429 if (pp == NULL) 430 panic("contig_mem_span_free: page not found"); 431 432 ASSERT(PAGE_EXCL(pp)); 433 page_pp_unlock(pp, 0, 1); 434 435 if (rootpp == NULL) 436 rootpp = pp; 437 if (--pgs_left == 0) { 438 /* 439 * similar logic to segspt_free_pages, but we know we 440 * have one large page. 441 */ 442 page_destroy_pages(rootpp); 443 } 444 } 445 page_unresv(npages); 446 447 if (vmp != NULL) 448 vmem_xfree(vmp, inaddr, size); 449 } 450 451 static void * 452 contig_vmem_xalloc_aligned_wrapper(vmem_t *vmp, size_t size, int vmflag) 453 { 454 return (vmem_xalloc(vmp, size, size, 0, 0, NULL, NULL, vmflag)); 455 } 456 457 /* 458 * conting_mem_alloc_align allocates real contiguous memory with the specified 459 * alignment upto contig_mem_slab_size. The alignment must be a power of 2. 460 */ 461 void * 462 contig_mem_alloc_align(size_t size, size_t align) 463 { 464 ASSERT(align <= contig_mem_slab_size); 465 466 if ((align & (align - 1)) != 0) 467 return (NULL); 468 469 return (vmem_xalloc(contig_mem_arena, size, align, 0, 0, 470 NULL, NULL, VM_NOSLEEP)); 471 } 472 473 /* 474 * Allocates size aligned contiguous memory upto contig_mem_slab_size. 475 * Size must be a power of 2. 476 */ 477 void * 478 contig_mem_alloc(size_t size) 479 { 480 ASSERT((size & (size - 1)) == 0); 481 return (contig_mem_alloc_align(size, size)); 482 } 483 484 void 485 contig_mem_free(void *vaddr, size_t size) 486 { 487 vmem_xfree(contig_mem_arena, vaddr, size); 488 } 489 490 /* 491 * We create a set of stacked vmem arenas to enable us to 492 * allocate large >PAGESIZE chucks of contiguous Real Address space 493 * This is what the Dynamics TSB support does for TSBs. 494 * The contig_mem_arena import functions are exactly the same as the 495 * TSB kmem_default arena import functions. 496 */ 497 void 498 contig_mem_init(void) 499 { 500 501 contig_mem_slab_arena = vmem_create("contig_mem_slab_arena", NULL, 0, 502 contig_mem_slab_size, contig_vmem_xalloc_aligned_wrapper, 503 vmem_xfree, heap_arena, 0, VM_SLEEP); 504 505 contig_mem_arena = vmem_create("contig_mem_arena", NULL, 0, 506 QUANTUM_SIZE, contig_mem_span_alloc, contig_mem_span_free, 507 contig_mem_slab_arena, 0, VM_SLEEP | VM_BESTFIT); 508 509 } 510