1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
27 /* All Rights Reserved */
28
29 /*
30 * Portions of this source code were derived from Berkeley 4.3 BSD
31 * under license from the Regents of the University of California.
32 */
33
34 /*
35 * UNIX machine dependent virtual memory support.
36 */
37
38 #include <sys/vm.h>
39 #include <sys/exec.h>
40 #include <sys/cmn_err.h>
41 #include <sys/cpu_module.h>
42 #include <sys/cpu.h>
43 #include <sys/elf_SPARC.h>
44 #include <sys/archsystm.h>
45 #include <vm/hat_sfmmu.h>
46 #include <sys/memnode.h>
47 #include <sys/mem_cage.h>
48 #include <vm/vm_dep.h>
49 #include <sys/error.h>
50 #include <sys/machsystm.h>
51 #include <vm/seg_kmem.h>
52 #include <sys/stack.h>
53 #include <sys/atomic.h>
54 #include <sys/promif.h>
55
56 uint_t page_colors = 0;
57 uint_t page_colors_mask = 0;
58 uint_t page_coloring_shift = 0;
59 int consistent_coloring;
60 int update_proc_pgcolorbase_after_fork = 1;
61
62 uint_t mmu_page_sizes = MMU_PAGE_SIZES;
63 uint_t max_mmu_page_sizes = MMU_PAGE_SIZES;
64 uint_t mmu_hashcnt = MAX_HASHCNT;
65 uint_t max_mmu_hashcnt = MAX_HASHCNT;
66 size_t mmu_ism_pagesize = DEFAULT_ISM_PAGESIZE;
67
68 /*
69 * A bitmask of the page sizes supported by hardware based upon szc.
70 * The base pagesize (p_szc == 0) must always be supported by the hardware.
71 */
72 int mmu_exported_pagesize_mask;
73 uint_t mmu_exported_page_sizes;
74
75 uint_t szc_2_userszc[MMU_PAGE_SIZES];
76 uint_t userszc_2_szc[MMU_PAGE_SIZES];
77
78 extern uint_t vac_colors_mask;
79 extern int vac_shift;
80
81 hw_pagesize_t hw_page_array[] = {
82 {MMU_PAGESIZE, MMU_PAGESHIFT, 0, MMU_PAGESIZE >> MMU_PAGESHIFT},
83 {MMU_PAGESIZE64K, MMU_PAGESHIFT64K, 0,
84 MMU_PAGESIZE64K >> MMU_PAGESHIFT},
85 {MMU_PAGESIZE512K, MMU_PAGESHIFT512K, 0,
86 MMU_PAGESIZE512K >> MMU_PAGESHIFT},
87 {MMU_PAGESIZE4M, MMU_PAGESHIFT4M, 0, MMU_PAGESIZE4M >> MMU_PAGESHIFT},
88 {MMU_PAGESIZE32M, MMU_PAGESHIFT32M, 0,
89 MMU_PAGESIZE32M >> MMU_PAGESHIFT},
90 {MMU_PAGESIZE256M, MMU_PAGESHIFT256M, 0,
91 MMU_PAGESIZE256M >> MMU_PAGESHIFT},
92 {0, 0, 0, 0}
93 };
94
95 /*
96 * Maximum page size used to map 64-bit memory segment kmem64_base..kmem64_end
97 */
98 int max_bootlp_tteszc = TTE256M;
99
100 /*
101 * Maximum and default segment size tunables for user heap, stack, private
102 * and shared anonymous memory, and user text and initialized data.
103 */
104 size_t max_uheap_lpsize = MMU_PAGESIZE64K;
105 size_t default_uheap_lpsize = MMU_PAGESIZE64K;
106 size_t max_ustack_lpsize = MMU_PAGESIZE64K;
107 size_t default_ustack_lpsize = MMU_PAGESIZE64K;
108 size_t max_privmap_lpsize = MMU_PAGESIZE64K;
109 size_t max_uidata_lpsize = MMU_PAGESIZE64K;
110 size_t max_utext_lpsize = MMU_PAGESIZE4M;
111 size_t max_shm_lpsize = MMU_PAGESIZE4M;
112
113 /*
114 * Contiguous memory allocator data structures and variables.
115 *
116 * The sun4v kernel must provide a means to allocate physically
117 * contiguous, non-relocatable memory. The contig_mem_arena
118 * and contig_mem_slab_arena exist for this purpose. Allocations
119 * that require physically contiguous non-relocatable memory should
120 * be made using contig_mem_alloc() or contig_mem_alloc_align()
121 * which return memory from contig_mem_arena or contig_mem_reloc_arena.
122 * These arenas import memory from the contig_mem_slab_arena one
123 * contiguous chunk at a time.
124 *
125 * When importing slabs, an attempt is made to allocate a large page
126 * to use as backing. As a result of the non-relocatable requirement,
127 * slabs are allocated from the kernel cage freelists. If the cage does
128 * not contain any free contiguous chunks large enough to satisfy the
129 * slab allocation, the slab size will be downsized and the operation
130 * retried. Large slab sizes are tried first to minimize cage
131 * fragmentation. If the slab allocation is unsuccessful still, the slab
132 * is allocated from outside the kernel cage. This is undesirable because,
133 * until slabs are freed, it results in non-relocatable chunks scattered
134 * throughout physical memory.
135 *
136 * Allocations from the contig_mem_arena are backed by slabs from the
137 * cage. Allocations from the contig_mem_reloc_arena are backed by
138 * slabs allocated outside the cage. Slabs are left share locked while
139 * in use to prevent non-cage slabs from being relocated.
140 *
141 * Since there is no guarantee that large pages will be available in
142 * the kernel cage, contiguous memory is reserved and added to the
143 * contig_mem_arena at boot time, making it available for later
144 * contiguous memory allocations. This reserve will be used to satisfy
145 * contig_mem allocations first and it is only when the reserve is
146 * completely allocated that new slabs will need to be imported.
147 */
148 static vmem_t *contig_mem_slab_arena;
149 static vmem_t *contig_mem_arena;
150 static vmem_t *contig_mem_reloc_arena;
151 static kmutex_t contig_mem_lock;
152 #define CONTIG_MEM_ARENA_QUANTUM 64
153 #define CONTIG_MEM_SLAB_ARENA_QUANTUM MMU_PAGESIZE64K
154
155 /* contig_mem_arena import slab sizes, in decreasing size order */
156 static size_t contig_mem_import_sizes[] = {
157 MMU_PAGESIZE4M,
158 MMU_PAGESIZE512K,
159 MMU_PAGESIZE64K
160 };
161 #define NUM_IMPORT_SIZES \
162 (sizeof (contig_mem_import_sizes) / sizeof (size_t))
163 static size_t contig_mem_import_size_max = MMU_PAGESIZE4M;
164 size_t contig_mem_slab_size = MMU_PAGESIZE4M;
165
166 /* Boot-time allocated buffer to pre-populate the contig_mem_arena */
167 static size_t contig_mem_prealloc_size;
168 static void *contig_mem_prealloc_buf;
169
170 /*
171 * map_addr_proc() is the routine called when the system is to
172 * choose an address for the user. We will pick an address
173 * range which is just below the current stack limit. The
174 * algorithm used for cache consistency on machines with virtual
175 * address caches is such that offset 0 in the vnode is always
176 * on a shm_alignment'ed aligned address. Unfortunately, this
177 * means that vnodes which are demand paged will not be mapped
178 * cache consistently with the executable images. When the
179 * cache alignment for a given object is inconsistent, the
180 * lower level code must manage the translations so that this
181 * is not seen here (at the cost of efficiency, of course).
182 *
183 * Every mapping will have a redzone of a single page on either side of
184 * the request. This is done to leave one page unmapped between segments.
185 * This is not required, but it's useful for the user because if their
186 * program strays across a segment boundary, it will catch a fault
187 * immediately making debugging a little easier. Currently the redzone
188 * is mandatory.
189 *
190 * addrp is a value/result parameter.
191 * On input it is a hint from the user to be used in a completely
192 * machine dependent fashion. For MAP_ALIGN, addrp contains the
193 * minimal alignment, which must be some "power of two" multiple of
194 * pagesize.
195 *
196 * On output it is NULL if no address can be found in the current
197 * processes address space or else an address that is currently
198 * not mapped for len bytes with a page of red zone on either side.
199 * If vacalign is true, then the selected address will obey the alignment
200 * constraints of a vac machine based on the given off value.
201 */
202 /*ARGSUSED3*/
203 void
map_addr_proc(caddr_t * addrp,size_t len,offset_t off,int vacalign,caddr_t userlimit,struct proc * p,uint_t flags)204 map_addr_proc(caddr_t *addrp, size_t len, offset_t off, int vacalign,
205 caddr_t userlimit, struct proc *p, uint_t flags)
206 {
207 struct as *as = p->p_as;
208 caddr_t addr;
209 caddr_t base;
210 size_t slen;
211 uintptr_t align_amount;
212 int allow_largepage_alignment = 1;
213
214 base = p->p_brkbase;
215 if (userlimit < as->a_userlimit) {
216 /*
217 * This happens when a program wants to map something in
218 * a range that's accessible to a program in a smaller
219 * address space. For example, a 64-bit program might
220 * be calling mmap32(2) to guarantee that the returned
221 * address is below 4Gbytes.
222 */
223 ASSERT(userlimit > base);
224 slen = userlimit - base;
225 } else {
226 slen = p->p_usrstack - base -
227 ((p->p_stk_ctl + PAGEOFFSET) & PAGEMASK);
228 }
229 /* Make len be a multiple of PAGESIZE */
230 len = (len + PAGEOFFSET) & PAGEMASK;
231
232 /*
233 * If the request is larger than the size of a particular
234 * mmu level, then we use that level to map the request.
235 * But this requires that both the virtual and the physical
236 * addresses be aligned with respect to that level, so we
237 * do the virtual bit of nastiness here.
238 *
239 * For 32-bit processes, only those which have specified
240 * MAP_ALIGN or an addr will be aligned on a page size > 4MB. Otherwise
241 * we can potentially waste up to 256MB of the 4G process address
242 * space just for alignment.
243 *
244 * XXXQ Should iterate trough hw_page_array here to catch
245 * all supported pagesizes
246 */
247 if (p->p_model == DATAMODEL_ILP32 && ((flags & MAP_ALIGN) == 0 ||
248 ((uintptr_t)*addrp) != 0)) {
249 allow_largepage_alignment = 0;
250 }
251 if ((mmu_page_sizes == max_mmu_page_sizes) &&
252 allow_largepage_alignment &&
253 (len >= MMU_PAGESIZE256M)) { /* 256MB mappings */
254 align_amount = MMU_PAGESIZE256M;
255 } else if ((mmu_page_sizes == max_mmu_page_sizes) &&
256 allow_largepage_alignment &&
257 (len >= MMU_PAGESIZE32M)) { /* 32MB mappings */
258 align_amount = MMU_PAGESIZE32M;
259 } else if (len >= MMU_PAGESIZE4M) { /* 4MB mappings */
260 align_amount = MMU_PAGESIZE4M;
261 } else if (len >= MMU_PAGESIZE512K) { /* 512KB mappings */
262 align_amount = MMU_PAGESIZE512K;
263 } else if (len >= MMU_PAGESIZE64K) { /* 64KB mappings */
264 align_amount = MMU_PAGESIZE64K;
265 } else {
266 /*
267 * Align virtual addresses on a 64K boundary to ensure
268 * that ELF shared libraries are mapped with the appropriate
269 * alignment constraints by the run-time linker.
270 */
271 align_amount = ELF_SPARC_MAXPGSZ;
272 if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp != 0) &&
273 ((uintptr_t)*addrp < align_amount))
274 align_amount = (uintptr_t)*addrp;
275 }
276
277 /*
278 * 64-bit processes require 1024K alignment of ELF shared libraries.
279 */
280 if (p->p_model == DATAMODEL_LP64)
281 align_amount = MAX(align_amount, ELF_SPARCV9_MAXPGSZ);
282 #ifdef VAC
283 if (vac && vacalign && (align_amount < shm_alignment))
284 align_amount = shm_alignment;
285 #endif
286
287 if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp > align_amount)) {
288 align_amount = (uintptr_t)*addrp;
289 }
290
291 ASSERT(ISP2(align_amount));
292 ASSERT(align_amount == 0 || align_amount >= PAGESIZE);
293
294 /*
295 * Look for a large enough hole starting below the stack limit.
296 * After finding it, use the upper part.
297 */
298 as_purge(as);
299 off = off & (align_amount - 1);
300 if (as_gap_aligned(as, len, &base, &slen, AH_HI, NULL, align_amount,
301 PAGESIZE, off) == 0) {
302 caddr_t as_addr;
303
304 /*
305 * addr is the highest possible address to use since we have
306 * a PAGESIZE redzone at the beginning and end.
307 */
308 addr = base + slen - (PAGESIZE + len);
309 as_addr = addr;
310 /*
311 * Round address DOWN to the alignment amount and
312 * add the offset in.
313 * If addr is greater than as_addr, len would not be large
314 * enough to include the redzone, so we must adjust down
315 * by the alignment amount.
316 */
317 addr = (caddr_t)((uintptr_t)addr & (~(align_amount - 1l)));
318 addr += (long)off;
319 if (addr > as_addr) {
320 addr -= align_amount;
321 }
322
323 ASSERT(addr > base);
324 ASSERT(addr + len < base + slen);
325 ASSERT(((uintptr_t)addr & (align_amount - 1l)) ==
326 ((uintptr_t)(off)));
327 *addrp = addr;
328
329 } else {
330 *addrp = NULL; /* no more virtual space */
331 }
332 }
333
334 /*
335 * Platform-dependent page scrub call.
336 * We call hypervisor to scrub the page.
337 */
338 void
pagescrub(page_t * pp,uint_t off,uint_t len)339 pagescrub(page_t *pp, uint_t off, uint_t len)
340 {
341 uint64_t pa, length;
342
343 pa = (uint64_t)(pp->p_pagenum << MMU_PAGESHIFT + off);
344 length = (uint64_t)len;
345
346 (void) mem_scrub(pa, length);
347 }
348
349 void
sync_data_memory(caddr_t va,size_t len)350 sync_data_memory(caddr_t va, size_t len)
351 {
352 /* Call memory sync function */
353 (void) mem_sync(va, len);
354 }
355
356 size_t
mmu_get_kernel_lpsize(size_t lpsize)357 mmu_get_kernel_lpsize(size_t lpsize)
358 {
359 extern int mmu_exported_pagesize_mask;
360 uint_t tte;
361
362 if (lpsize == 0) {
363 /* no setting for segkmem_lpsize in /etc/system: use default */
364 if (mmu_exported_pagesize_mask & (1 << TTE256M)) {
365 lpsize = MMU_PAGESIZE256M;
366 } else if (mmu_exported_pagesize_mask & (1 << TTE4M)) {
367 lpsize = MMU_PAGESIZE4M;
368 } else if (mmu_exported_pagesize_mask & (1 << TTE64K)) {
369 lpsize = MMU_PAGESIZE64K;
370 } else {
371 lpsize = MMU_PAGESIZE;
372 }
373
374 return (lpsize);
375 }
376
377 for (tte = TTE8K; tte <= TTE256M; tte++) {
378
379 if ((mmu_exported_pagesize_mask & (1 << tte)) == 0)
380 continue;
381
382 if (lpsize == TTEBYTES(tte))
383 return (lpsize);
384 }
385
386 lpsize = TTEBYTES(TTE8K);
387 return (lpsize);
388 }
389
390 void
mmu_init_kcontext()391 mmu_init_kcontext()
392 {
393 }
394
395 /*ARGSUSED*/
396 void
mmu_init_kernel_pgsz(struct hat * hat)397 mmu_init_kernel_pgsz(struct hat *hat)
398 {
399 }
400
401 static void *
contig_mem_span_alloc(vmem_t * vmp,size_t size,int vmflag)402 contig_mem_span_alloc(vmem_t *vmp, size_t size, int vmflag)
403 {
404 page_t *ppl;
405 page_t *rootpp;
406 caddr_t addr = NULL;
407 pgcnt_t npages = btopr(size);
408 page_t **ppa;
409 int pgflags;
410 spgcnt_t i = 0;
411
412
413 ASSERT(size <= contig_mem_import_size_max);
414 ASSERT((size & (size - 1)) == 0);
415
416 if ((addr = vmem_xalloc(vmp, size, size, 0, 0,
417 NULL, NULL, vmflag)) == NULL) {
418 return (NULL);
419 }
420
421 /* The address should be slab-size aligned. */
422 ASSERT(((uintptr_t)addr & (size - 1)) == 0);
423
424 if (page_resv(npages, vmflag & VM_KMFLAGS) == 0) {
425 vmem_xfree(vmp, addr, size);
426 return (NULL);
427 }
428
429 pgflags = PG_EXCL;
430 if (vmflag & VM_NORELOC)
431 pgflags |= PG_NORELOC;
432
433 ppl = page_create_va_large(&kvp, (u_offset_t)(uintptr_t)addr, size,
434 pgflags, &kvseg, addr, NULL);
435
436 if (ppl == NULL) {
437 vmem_xfree(vmp, addr, size);
438 page_unresv(npages);
439 return (NULL);
440 }
441
442 rootpp = ppl;
443 ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP);
444 while (ppl != NULL) {
445 page_t *pp = ppl;
446 ppa[i++] = pp;
447 page_sub(&ppl, pp);
448 ASSERT(page_iolock_assert(pp));
449 ASSERT(PAGE_EXCL(pp));
450 page_io_unlock(pp);
451 }
452
453 /*
454 * Load the locked entry. It's OK to preload the entry into
455 * the TSB since we now support large mappings in the kernel TSB.
456 */
457 hat_memload_array(kas.a_hat, (caddr_t)rootpp->p_offset, size,
458 ppa, (PROT_ALL & ~PROT_USER) | HAT_NOSYNC, HAT_LOAD_LOCK);
459
460 ASSERT(i == page_get_pagecnt(ppa[0]->p_szc));
461 for (--i; i >= 0; --i) {
462 ASSERT(ppa[i]->p_szc == ppa[0]->p_szc);
463 ASSERT(page_pptonum(ppa[i]) == page_pptonum(ppa[0]) + i);
464 (void) page_pp_lock(ppa[i], 0, 1);
465 /*
466 * Leave the page share locked. For non-cage pages,
467 * this would prevent memory DR if it were supported
468 * on sun4v.
469 */
470 page_downgrade(ppa[i]);
471 }
472
473 kmem_free(ppa, npages * sizeof (page_t *));
474 return (addr);
475 }
476
477 /*
478 * Allocates a slab by first trying to use the largest slab size
479 * in contig_mem_import_sizes and then falling back to smaller slab
480 * sizes still large enough for the allocation. The sizep argument
481 * is a pointer to the requested size. When a slab is successfully
482 * allocated, the slab size, which must be >= *sizep and <=
483 * contig_mem_import_size_max, is returned in the *sizep argument.
484 * Returns the virtual address of the new slab.
485 */
486 static void *
span_alloc_downsize(vmem_t * vmp,size_t * sizep,size_t align,int vmflag)487 span_alloc_downsize(vmem_t *vmp, size_t *sizep, size_t align, int vmflag)
488 {
489 int i;
490
491 ASSERT(*sizep <= contig_mem_import_size_max);
492
493 for (i = 0; i < NUM_IMPORT_SIZES; i++) {
494 size_t page_size = contig_mem_import_sizes[i];
495
496 /*
497 * Check that the alignment is also less than the
498 * import (large page) size. In the case where the
499 * alignment is larger than the size, a large page
500 * large enough for the allocation is not necessarily
501 * physical-address aligned to satisfy the requested
502 * alignment. Since alignment is required to be a
503 * power-of-2, any large page >= size && >= align will
504 * suffice.
505 */
506 if (*sizep <= page_size && align <= page_size) {
507 void *addr;
508 addr = contig_mem_span_alloc(vmp, page_size, vmflag);
509 if (addr == NULL)
510 continue;
511 *sizep = page_size;
512 return (addr);
513 }
514 return (NULL);
515 }
516
517 return (NULL);
518 }
519
520 static void *
contig_mem_span_xalloc(vmem_t * vmp,size_t * sizep,size_t align,int vmflag)521 contig_mem_span_xalloc(vmem_t *vmp, size_t *sizep, size_t align, int vmflag)
522 {
523 return (span_alloc_downsize(vmp, sizep, align, vmflag | VM_NORELOC));
524 }
525
526 static void *
contig_mem_reloc_span_xalloc(vmem_t * vmp,size_t * sizep,size_t align,int vmflag)527 contig_mem_reloc_span_xalloc(vmem_t *vmp, size_t *sizep, size_t align,
528 int vmflag)
529 {
530 ASSERT((vmflag & VM_NORELOC) == 0);
531 return (span_alloc_downsize(vmp, sizep, align, vmflag));
532 }
533
534 /*
535 * Free a span, which is always exactly one large page.
536 */
537 static void
contig_mem_span_free(vmem_t * vmp,void * inaddr,size_t size)538 contig_mem_span_free(vmem_t *vmp, void *inaddr, size_t size)
539 {
540 page_t *pp;
541 caddr_t addr = inaddr;
542 caddr_t eaddr;
543 pgcnt_t npages = btopr(size);
544 page_t *rootpp = NULL;
545
546 ASSERT(size <= contig_mem_import_size_max);
547 /* All slabs should be size aligned */
548 ASSERT(((uintptr_t)addr & (size - 1)) == 0);
549
550 hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);
551
552 for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) {
553 pp = page_find(&kvp, (u_offset_t)(uintptr_t)addr);
554 if (pp == NULL) {
555 panic("contig_mem_span_free: page not found");
556 }
557 if (!page_tryupgrade(pp)) {
558 page_unlock(pp);
559 pp = page_lookup(&kvp,
560 (u_offset_t)(uintptr_t)addr, SE_EXCL);
561 if (pp == NULL)
562 panic("contig_mem_span_free: page not found");
563 }
564
565 ASSERT(PAGE_EXCL(pp));
566 ASSERT(size == page_get_pagesize(pp->p_szc));
567 ASSERT(rootpp == NULL || rootpp->p_szc == pp->p_szc);
568 ASSERT(rootpp == NULL || (page_pptonum(rootpp) +
569 (pgcnt_t)btop(addr - (caddr_t)inaddr) == page_pptonum(pp)));
570
571 page_pp_unlock(pp, 0, 1);
572
573 if (rootpp == NULL)
574 rootpp = pp;
575 }
576 page_destroy_pages(rootpp);
577 page_unresv(npages);
578
579 if (vmp != NULL)
580 vmem_xfree(vmp, inaddr, size);
581 }
582
583 static void *
contig_vmem_xalloc_aligned_wrapper(vmem_t * vmp,size_t * sizep,size_t align,int vmflag)584 contig_vmem_xalloc_aligned_wrapper(vmem_t *vmp, size_t *sizep, size_t align,
585 int vmflag)
586 {
587 ASSERT((align & (align - 1)) == 0);
588 return (vmem_xalloc(vmp, *sizep, align, 0, 0, NULL, NULL, vmflag));
589 }
590
591 /*
592 * contig_mem_alloc, contig_mem_alloc_align
593 *
594 * Caution: contig_mem_alloc and contig_mem_alloc_align should be
595 * used only when physically contiguous non-relocatable memory is
596 * required. Furthermore, use of these allocation routines should be
597 * minimized as well as should the allocation size. As described in the
598 * contig_mem_arena comment block above, slab allocations fall back to
599 * being outside of the cage. Therefore, overuse of these allocation
600 * routines can lead to non-relocatable large pages being allocated
601 * outside the cage. Such pages prevent the allocation of a larger page
602 * occupying overlapping pages. This can impact performance for
603 * applications that utilize e.g. 256M large pages.
604 */
605
606 /*
607 * Allocates size aligned contiguous memory up to contig_mem_import_size_max.
608 * Size must be a power of 2.
609 */
610 void *
contig_mem_alloc(size_t size)611 contig_mem_alloc(size_t size)
612 {
613 ASSERT((size & (size - 1)) == 0);
614 return (contig_mem_alloc_align(size, size));
615 }
616
617 /*
618 * contig_mem_alloc_align allocates real contiguous memory with the
619 * specified alignment up to contig_mem_import_size_max. The alignment must
620 * be a power of 2 and no greater than contig_mem_import_size_max. We assert
621 * the aligment is a power of 2. For non-debug, vmem_xalloc will panic
622 * for non power of 2 alignments.
623 */
624 void *
contig_mem_alloc_align(size_t size,size_t align)625 contig_mem_alloc_align(size_t size, size_t align)
626 {
627 void *buf;
628
629 ASSERT(size <= contig_mem_import_size_max);
630 ASSERT(align <= contig_mem_import_size_max);
631 ASSERT((align & (align - 1)) == 0);
632
633 if (align < CONTIG_MEM_ARENA_QUANTUM)
634 align = CONTIG_MEM_ARENA_QUANTUM;
635
636 /*
637 * We take the lock here to serialize span allocations.
638 * We do not lose concurrency for the common case, since
639 * allocations that don't require new span allocations
640 * are serialized by vmem_xalloc. Serializing span
641 * allocations also prevents us from trying to allocate
642 * more spans than necessary.
643 */
644 mutex_enter(&contig_mem_lock);
645
646 buf = vmem_xalloc(contig_mem_arena, size, align, 0, 0,
647 NULL, NULL, VM_NOSLEEP | VM_NORELOC);
648
649 if ((buf == NULL) && (size <= MMU_PAGESIZE)) {
650 mutex_exit(&contig_mem_lock);
651 return (vmem_xalloc(static_alloc_arena, size, align, 0, 0,
652 NULL, NULL, VM_NOSLEEP));
653 }
654
655 if (buf == NULL) {
656 buf = vmem_xalloc(contig_mem_reloc_arena, size, align, 0, 0,
657 NULL, NULL, VM_NOSLEEP);
658 }
659
660 mutex_exit(&contig_mem_lock);
661
662 return (buf);
663 }
664
665 void
contig_mem_free(void * vaddr,size_t size)666 contig_mem_free(void *vaddr, size_t size)
667 {
668 if (vmem_contains(contig_mem_arena, vaddr, size)) {
669 vmem_xfree(contig_mem_arena, vaddr, size);
670 } else if (size > MMU_PAGESIZE) {
671 vmem_xfree(contig_mem_reloc_arena, vaddr, size);
672 } else {
673 vmem_xfree(static_alloc_arena, vaddr, size);
674 }
675 }
676
677 /*
678 * We create a set of stacked vmem arenas to enable us to
679 * allocate large >PAGESIZE chucks of contiguous Real Address space.
680 * The vmem_xcreate interface is used to create the contig_mem_arena
681 * allowing the import routine to downsize the requested slab size
682 * and return a smaller slab.
683 */
684 void
contig_mem_init(void)685 contig_mem_init(void)
686 {
687 mutex_init(&contig_mem_lock, NULL, MUTEX_DEFAULT, NULL);
688
689 contig_mem_slab_arena = vmem_xcreate("contig_mem_slab_arena", NULL, 0,
690 CONTIG_MEM_SLAB_ARENA_QUANTUM, contig_vmem_xalloc_aligned_wrapper,
691 vmem_xfree, heap_arena, 0, VM_SLEEP | VMC_XALIGN);
692
693 contig_mem_arena = vmem_xcreate("contig_mem_arena", NULL, 0,
694 CONTIG_MEM_ARENA_QUANTUM, contig_mem_span_xalloc,
695 contig_mem_span_free, contig_mem_slab_arena, 0,
696 VM_SLEEP | VM_BESTFIT | VMC_XALIGN);
697
698 contig_mem_reloc_arena = vmem_xcreate("contig_mem_reloc_arena", NULL, 0,
699 CONTIG_MEM_ARENA_QUANTUM, contig_mem_reloc_span_xalloc,
700 contig_mem_span_free, contig_mem_slab_arena, 0,
701 VM_SLEEP | VM_BESTFIT | VMC_XALIGN);
702
703 if (contig_mem_prealloc_buf == NULL || vmem_add(contig_mem_arena,
704 contig_mem_prealloc_buf, contig_mem_prealloc_size, VM_SLEEP)
705 == NULL) {
706 cmn_err(CE_WARN, "Failed to pre-populate contig_mem_arena");
707 }
708 }
709
710 /*
711 * In calculating how much memory to pre-allocate, we include a small
712 * amount per-CPU to account for per-CPU buffers in line with measured
713 * values for different size systems. contig_mem_prealloc_base_size is
714 * a cpu specific amount to be pre-allocated before considering per-CPU
715 * requirements and memory size. We always pre-allocate a minimum amount
716 * of memory determined by PREALLOC_MIN. Beyond that, we take the minimum
717 * of contig_mem_prealloc_base_size and a small percentage of physical
718 * memory to prevent allocating too much on smaller systems.
719 * contig_mem_prealloc_base_size is global, allowing for the CPU module
720 * to increase its value if necessary.
721 */
722 #define PREALLOC_PER_CPU (256 * 1024) /* 256K */
723 #define PREALLOC_PERCENT (4) /* 4% */
724 #define PREALLOC_MIN (16 * 1024 * 1024) /* 16M */
725 size_t contig_mem_prealloc_base_size = 0;
726
727 /*
728 * Called at boot-time allowing pre-allocation of contiguous memory.
729 * The argument 'alloc_base' is the requested base address for the
730 * allocation and originates in startup_memlist.
731 */
732 caddr_t
contig_mem_prealloc(caddr_t alloc_base,pgcnt_t npages)733 contig_mem_prealloc(caddr_t alloc_base, pgcnt_t npages)
734 {
735 caddr_t chunkp;
736
737 contig_mem_prealloc_size = MIN((PREALLOC_PER_CPU * ncpu_guest_max) +
738 contig_mem_prealloc_base_size,
739 (ptob(npages) * PREALLOC_PERCENT) / 100);
740 contig_mem_prealloc_size = MAX(contig_mem_prealloc_size, PREALLOC_MIN);
741 contig_mem_prealloc_size = P2ROUNDUP(contig_mem_prealloc_size,
742 MMU_PAGESIZE4M);
743
744 alloc_base = (caddr_t)roundup((uintptr_t)alloc_base, MMU_PAGESIZE4M);
745 if (prom_alloc(alloc_base, contig_mem_prealloc_size,
746 MMU_PAGESIZE4M) != alloc_base) {
747
748 /*
749 * Failed. This may mean the physical memory has holes in it
750 * and it will be more difficult to get large contiguous
751 * pieces of memory. Since we only guarantee contiguous
752 * pieces of memory contig_mem_import_size_max or smaller,
753 * loop, getting contig_mem_import_size_max at a time, until
754 * failure or contig_mem_prealloc_size is reached.
755 */
756 for (chunkp = alloc_base;
757 (chunkp - alloc_base) < contig_mem_prealloc_size;
758 chunkp += contig_mem_import_size_max) {
759
760 if (prom_alloc(chunkp, contig_mem_import_size_max,
761 MMU_PAGESIZE4M) != chunkp) {
762 break;
763 }
764 }
765 contig_mem_prealloc_size = chunkp - alloc_base;
766 ASSERT(contig_mem_prealloc_size != 0);
767 }
768
769 if (contig_mem_prealloc_size != 0) {
770 contig_mem_prealloc_buf = alloc_base;
771 } else {
772 contig_mem_prealloc_buf = NULL;
773 }
774 alloc_base += contig_mem_prealloc_size;
775
776 return (alloc_base);
777 }
778
779 static uint_t sp_color_stride = 16;
780 static uint_t sp_color_mask = 0x1f;
781 static uint_t sp_current_color = (uint_t)-1;
782
783 size_t
exec_get_spslew(void)784 exec_get_spslew(void)
785 {
786 uint_t spcolor = atomic_inc_32_nv(&sp_current_color);
787 return ((size_t)((spcolor & sp_color_mask) * SA(sp_color_stride)));
788 }
789