17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate * CDDL HEADER START
37c478bd9Sstevel@tonic-gate *
47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the
50400e0b7Sha137994 * Common Development and Distribution License (the "License").
60400e0b7Sha137994 * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate *
87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate * and limitations under the License.
127c478bd9Sstevel@tonic-gate *
137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate *
197c478bd9Sstevel@tonic-gate * CDDL HEADER END
207c478bd9Sstevel@tonic-gate */
217c478bd9Sstevel@tonic-gate /*
22125be069SJason Beloro * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
237c478bd9Sstevel@tonic-gate * Use is subject to license terms.
247c478bd9Sstevel@tonic-gate */
257c478bd9Sstevel@tonic-gate
267c478bd9Sstevel@tonic-gate /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
277c478bd9Sstevel@tonic-gate /* All Rights Reserved */
287c478bd9Sstevel@tonic-gate
297c478bd9Sstevel@tonic-gate /*
307c478bd9Sstevel@tonic-gate * Portions of this source code were derived from Berkeley 4.3 BSD
317c478bd9Sstevel@tonic-gate * under license from the Regents of the University of California.
327c478bd9Sstevel@tonic-gate */
337c478bd9Sstevel@tonic-gate
347c478bd9Sstevel@tonic-gate /*
357c478bd9Sstevel@tonic-gate * UNIX machine dependent virtual memory support.
367c478bd9Sstevel@tonic-gate */
377c478bd9Sstevel@tonic-gate
387c478bd9Sstevel@tonic-gate #include <sys/vm.h>
397c478bd9Sstevel@tonic-gate #include <sys/exec.h>
407c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
417c478bd9Sstevel@tonic-gate #include <sys/cpu_module.h>
427c478bd9Sstevel@tonic-gate #include <sys/cpu.h>
437c478bd9Sstevel@tonic-gate #include <sys/elf_SPARC.h>
447c478bd9Sstevel@tonic-gate #include <sys/archsystm.h>
457c478bd9Sstevel@tonic-gate #include <vm/hat_sfmmu.h>
467c478bd9Sstevel@tonic-gate #include <sys/memnode.h>
477c478bd9Sstevel@tonic-gate #include <sys/mem_cage.h>
487c478bd9Sstevel@tonic-gate #include <vm/vm_dep.h>
497c478bd9Sstevel@tonic-gate #include <sys/error.h>
507c478bd9Sstevel@tonic-gate #include <sys/machsystm.h>
517c478bd9Sstevel@tonic-gate #include <vm/seg_kmem.h>
52102033aaSdp78419 #include <sys/stack.h>
53102033aaSdp78419 #include <sys/atomic.h>
54986fd29aSsetje #include <sys/promif.h>
55*d2a70789SRichard Lowe #include <sys/random.h>
567c478bd9Sstevel@tonic-gate
577c478bd9Sstevel@tonic-gate uint_t page_colors = 0;
587c478bd9Sstevel@tonic-gate uint_t page_colors_mask = 0;
597c478bd9Sstevel@tonic-gate uint_t page_coloring_shift = 0;
607c478bd9Sstevel@tonic-gate int consistent_coloring;
6185f58038Sdp78419 int update_proc_pgcolorbase_after_fork = 1;
627c478bd9Sstevel@tonic-gate
637c478bd9Sstevel@tonic-gate uint_t mmu_page_sizes = MMU_PAGE_SIZES;
647c478bd9Sstevel@tonic-gate uint_t max_mmu_page_sizes = MMU_PAGE_SIZES;
657c478bd9Sstevel@tonic-gate uint_t mmu_hashcnt = MAX_HASHCNT;
667c478bd9Sstevel@tonic-gate uint_t max_mmu_hashcnt = MAX_HASHCNT;
677c478bd9Sstevel@tonic-gate size_t mmu_ism_pagesize = DEFAULT_ISM_PAGESIZE;
687c478bd9Sstevel@tonic-gate
697c478bd9Sstevel@tonic-gate /*
707c478bd9Sstevel@tonic-gate * A bitmask of the page sizes supported by hardware based upon szc.
717c478bd9Sstevel@tonic-gate * The base pagesize (p_szc == 0) must always be supported by the hardware.
727c478bd9Sstevel@tonic-gate */
737c478bd9Sstevel@tonic-gate int mmu_exported_pagesize_mask;
747c478bd9Sstevel@tonic-gate uint_t mmu_exported_page_sizes;
757c478bd9Sstevel@tonic-gate
767c478bd9Sstevel@tonic-gate uint_t szc_2_userszc[MMU_PAGE_SIZES];
777c478bd9Sstevel@tonic-gate uint_t userszc_2_szc[MMU_PAGE_SIZES];
787c478bd9Sstevel@tonic-gate
797c478bd9Sstevel@tonic-gate extern uint_t vac_colors_mask;
807c478bd9Sstevel@tonic-gate extern int vac_shift;
817c478bd9Sstevel@tonic-gate
827c478bd9Sstevel@tonic-gate hw_pagesize_t hw_page_array[] = {
835d07b933Sdp78419 {MMU_PAGESIZE, MMU_PAGESHIFT, 0, MMU_PAGESIZE >> MMU_PAGESHIFT},
845d07b933Sdp78419 {MMU_PAGESIZE64K, MMU_PAGESHIFT64K, 0,
855d07b933Sdp78419 MMU_PAGESIZE64K >> MMU_PAGESHIFT},
865d07b933Sdp78419 {MMU_PAGESIZE512K, MMU_PAGESHIFT512K, 0,
877c478bd9Sstevel@tonic-gate MMU_PAGESIZE512K >> MMU_PAGESHIFT},
885d07b933Sdp78419 {MMU_PAGESIZE4M, MMU_PAGESHIFT4M, 0, MMU_PAGESIZE4M >> MMU_PAGESHIFT},
895d07b933Sdp78419 {MMU_PAGESIZE32M, MMU_PAGESHIFT32M, 0,
905d07b933Sdp78419 MMU_PAGESIZE32M >> MMU_PAGESHIFT},
915d07b933Sdp78419 {MMU_PAGESIZE256M, MMU_PAGESHIFT256M, 0,
927c478bd9Sstevel@tonic-gate MMU_PAGESIZE256M >> MMU_PAGESHIFT},
935d07b933Sdp78419 {0, 0, 0, 0}
947c478bd9Sstevel@tonic-gate };
957c478bd9Sstevel@tonic-gate
967c478bd9Sstevel@tonic-gate /*
97bb121940Sdp78419 * Maximum page size used to map 64-bit memory segment kmem64_base..kmem64_end
98bb121940Sdp78419 */
99bb121940Sdp78419 int max_bootlp_tteszc = TTE256M;
100bb121940Sdp78419
101bb121940Sdp78419 /*
102ec25b48fSsusans * Maximum and default segment size tunables for user heap, stack, private
103ec25b48fSsusans * and shared anonymous memory, and user text and initialized data.
1047c478bd9Sstevel@tonic-gate */
105ec25b48fSsusans size_t max_uheap_lpsize = MMU_PAGESIZE64K;
106ec25b48fSsusans size_t default_uheap_lpsize = MMU_PAGESIZE64K;
107ec25b48fSsusans size_t max_ustack_lpsize = MMU_PAGESIZE64K;
108ec25b48fSsusans size_t default_ustack_lpsize = MMU_PAGESIZE64K;
109ec25b48fSsusans size_t max_privmap_lpsize = MMU_PAGESIZE64K;
110ec25b48fSsusans size_t max_uidata_lpsize = MMU_PAGESIZE64K;
111ec25b48fSsusans size_t max_utext_lpsize = MMU_PAGESIZE4M;
11207b65a64Saguzovsk size_t max_shm_lpsize = MMU_PAGESIZE4M;
11307b65a64Saguzovsk
1147c478bd9Sstevel@tonic-gate /*
115aaa10e67Sha137994 * Contiguous memory allocator data structures and variables.
116aaa10e67Sha137994 *
117aaa10e67Sha137994 * The sun4v kernel must provide a means to allocate physically
118aaa10e67Sha137994 * contiguous, non-relocatable memory. The contig_mem_arena
119aaa10e67Sha137994 * and contig_mem_slab_arena exist for this purpose. Allocations
120aaa10e67Sha137994 * that require physically contiguous non-relocatable memory should
121aaa10e67Sha137994 * be made using contig_mem_alloc() or contig_mem_alloc_align()
122aaa10e67Sha137994 * which return memory from contig_mem_arena or contig_mem_reloc_arena.
123aaa10e67Sha137994 * These arenas import memory from the contig_mem_slab_arena one
124aaa10e67Sha137994 * contiguous chunk at a time.
125aaa10e67Sha137994 *
126aaa10e67Sha137994 * When importing slabs, an attempt is made to allocate a large page
127aaa10e67Sha137994 * to use as backing. As a result of the non-relocatable requirement,
128aaa10e67Sha137994 * slabs are allocated from the kernel cage freelists. If the cage does
129aaa10e67Sha137994 * not contain any free contiguous chunks large enough to satisfy the
130aaa10e67Sha137994 * slab allocation, the slab size will be downsized and the operation
131aaa10e67Sha137994 * retried. Large slab sizes are tried first to minimize cage
132aaa10e67Sha137994 * fragmentation. If the slab allocation is unsuccessful still, the slab
133aaa10e67Sha137994 * is allocated from outside the kernel cage. This is undesirable because,
134aaa10e67Sha137994 * until slabs are freed, it results in non-relocatable chunks scattered
135aaa10e67Sha137994 * throughout physical memory.
136aaa10e67Sha137994 *
137aaa10e67Sha137994 * Allocations from the contig_mem_arena are backed by slabs from the
138aaa10e67Sha137994 * cage. Allocations from the contig_mem_reloc_arena are backed by
139aaa10e67Sha137994 * slabs allocated outside the cage. Slabs are left share locked while
140aaa10e67Sha137994 * in use to prevent non-cage slabs from being relocated.
141aaa10e67Sha137994 *
142aaa10e67Sha137994 * Since there is no guarantee that large pages will be available in
143aaa10e67Sha137994 * the kernel cage, contiguous memory is reserved and added to the
144aaa10e67Sha137994 * contig_mem_arena at boot time, making it available for later
145aaa10e67Sha137994 * contiguous memory allocations. This reserve will be used to satisfy
146aaa10e67Sha137994 * contig_mem allocations first and it is only when the reserve is
147aaa10e67Sha137994 * completely allocated that new slabs will need to be imported.
148aaa10e67Sha137994 */
149aaa10e67Sha137994 static vmem_t *contig_mem_slab_arena;
150aaa10e67Sha137994 static vmem_t *contig_mem_arena;
151aaa10e67Sha137994 static vmem_t *contig_mem_reloc_arena;
152aaa10e67Sha137994 static kmutex_t contig_mem_lock;
153aaa10e67Sha137994 #define CONTIG_MEM_ARENA_QUANTUM 64
154aaa10e67Sha137994 #define CONTIG_MEM_SLAB_ARENA_QUANTUM MMU_PAGESIZE64K
155aaa10e67Sha137994
156aaa10e67Sha137994 /* contig_mem_arena import slab sizes, in decreasing size order */
157aaa10e67Sha137994 static size_t contig_mem_import_sizes[] = {
158aaa10e67Sha137994 MMU_PAGESIZE4M,
159aaa10e67Sha137994 MMU_PAGESIZE512K,
160aaa10e67Sha137994 MMU_PAGESIZE64K
161aaa10e67Sha137994 };
162aaa10e67Sha137994 #define NUM_IMPORT_SIZES \
163aaa10e67Sha137994 (sizeof (contig_mem_import_sizes) / sizeof (size_t))
164aaa10e67Sha137994 static size_t contig_mem_import_size_max = MMU_PAGESIZE4M;
165aaa10e67Sha137994 size_t contig_mem_slab_size = MMU_PAGESIZE4M;
166aaa10e67Sha137994
167aaa10e67Sha137994 /* Boot-time allocated buffer to pre-populate the contig_mem_arena */
1687ec363dcSwh94709 static size_t contig_mem_prealloc_size;
1697ec363dcSwh94709 static void *contig_mem_prealloc_buf;
170aaa10e67Sha137994
171aaa10e67Sha137994 /*
172*d2a70789SRichard Lowe * The maximum amount a randomized mapping will be slewed. We should perhaps
173*d2a70789SRichard Lowe * arrange things so these tunables can be separate for mmap, mmapobj, and
174*d2a70789SRichard Lowe * ld.so
175*d2a70789SRichard Lowe */
176*d2a70789SRichard Lowe size_t aslr_max_map_skew = 256 * 1024 * 1024; /* 256MB */
177*d2a70789SRichard Lowe
178*d2a70789SRichard Lowe /*
1797c478bd9Sstevel@tonic-gate * map_addr_proc() is the routine called when the system is to
1807c478bd9Sstevel@tonic-gate * choose an address for the user. We will pick an address
1817c478bd9Sstevel@tonic-gate * range which is just below the current stack limit. The
1827c478bd9Sstevel@tonic-gate * algorithm used for cache consistency on machines with virtual
1837c478bd9Sstevel@tonic-gate * address caches is such that offset 0 in the vnode is always
1847c478bd9Sstevel@tonic-gate * on a shm_alignment'ed aligned address. Unfortunately, this
1857c478bd9Sstevel@tonic-gate * means that vnodes which are demand paged will not be mapped
1867c478bd9Sstevel@tonic-gate * cache consistently with the executable images. When the
1877c478bd9Sstevel@tonic-gate * cache alignment for a given object is inconsistent, the
1887c478bd9Sstevel@tonic-gate * lower level code must manage the translations so that this
1897c478bd9Sstevel@tonic-gate * is not seen here (at the cost of efficiency, of course).
1907c478bd9Sstevel@tonic-gate *
19146ab9534Smec * Every mapping will have a redzone of a single page on either side of
19246ab9534Smec * the request. This is done to leave one page unmapped between segments.
19346ab9534Smec * This is not required, but it's useful for the user because if their
19446ab9534Smec * program strays across a segment boundary, it will catch a fault
19546ab9534Smec * immediately making debugging a little easier. Currently the redzone
19646ab9534Smec * is mandatory.
19746ab9534Smec *
1987c478bd9Sstevel@tonic-gate * addrp is a value/result parameter.
1997c478bd9Sstevel@tonic-gate * On input it is a hint from the user to be used in a completely
2007c478bd9Sstevel@tonic-gate * machine dependent fashion. For MAP_ALIGN, addrp contains the
20146ab9534Smec * minimal alignment, which must be some "power of two" multiple of
20246ab9534Smec * pagesize.
2037c478bd9Sstevel@tonic-gate *
2047c478bd9Sstevel@tonic-gate * On output it is NULL if no address can be found in the current
2057c478bd9Sstevel@tonic-gate * processes address space or else an address that is currently
2067c478bd9Sstevel@tonic-gate * not mapped for len bytes with a page of red zone on either side.
2077c478bd9Sstevel@tonic-gate * If vacalign is true, then the selected address will obey the alignment
2087c478bd9Sstevel@tonic-gate * constraints of a vac machine based on the given off value.
2097c478bd9Sstevel@tonic-gate */
2107c478bd9Sstevel@tonic-gate /*ARGSUSED3*/
2117c478bd9Sstevel@tonic-gate void
map_addr_proc(caddr_t * addrp,size_t len,offset_t off,int vacalign,caddr_t userlimit,struct proc * p,uint_t flags)2127c478bd9Sstevel@tonic-gate map_addr_proc(caddr_t *addrp, size_t len, offset_t off, int vacalign,
2137c478bd9Sstevel@tonic-gate caddr_t userlimit, struct proc *p, uint_t flags)
2147c478bd9Sstevel@tonic-gate {
2157c478bd9Sstevel@tonic-gate struct as *as = p->p_as;
2167c478bd9Sstevel@tonic-gate caddr_t addr;
2177c478bd9Sstevel@tonic-gate caddr_t base;
2187c478bd9Sstevel@tonic-gate size_t slen;
2197c478bd9Sstevel@tonic-gate uintptr_t align_amount;
2207c478bd9Sstevel@tonic-gate int allow_largepage_alignment = 1;
2217c478bd9Sstevel@tonic-gate
2227c478bd9Sstevel@tonic-gate base = p->p_brkbase;
2237c478bd9Sstevel@tonic-gate if (userlimit < as->a_userlimit) {
2247c478bd9Sstevel@tonic-gate /*
2257c478bd9Sstevel@tonic-gate * This happens when a program wants to map something in
2267c478bd9Sstevel@tonic-gate * a range that's accessible to a program in a smaller
2277c478bd9Sstevel@tonic-gate * address space. For example, a 64-bit program might
2287c478bd9Sstevel@tonic-gate * be calling mmap32(2) to guarantee that the returned
2297c478bd9Sstevel@tonic-gate * address is below 4Gbytes.
2307c478bd9Sstevel@tonic-gate */
2317c478bd9Sstevel@tonic-gate ASSERT(userlimit > base);
2327c478bd9Sstevel@tonic-gate slen = userlimit - base;
2337c478bd9Sstevel@tonic-gate } else {
2341e1e1eecSMichael Corcoran slen = p->p_usrstack - base -
2351e1e1eecSMichael Corcoran ((p->p_stk_ctl + PAGEOFFSET) & PAGEMASK);
2367c478bd9Sstevel@tonic-gate }
23746ab9534Smec /* Make len be a multiple of PAGESIZE */
2387c478bd9Sstevel@tonic-gate len = (len + PAGEOFFSET) & PAGEMASK;
2397c478bd9Sstevel@tonic-gate
2407c478bd9Sstevel@tonic-gate /*
2417c478bd9Sstevel@tonic-gate * If the request is larger than the size of a particular
2427c478bd9Sstevel@tonic-gate * mmu level, then we use that level to map the request.
2437c478bd9Sstevel@tonic-gate * But this requires that both the virtual and the physical
2447c478bd9Sstevel@tonic-gate * addresses be aligned with respect to that level, so we
2457c478bd9Sstevel@tonic-gate * do the virtual bit of nastiness here.
2467c478bd9Sstevel@tonic-gate *
2477c478bd9Sstevel@tonic-gate * For 32-bit processes, only those which have specified
2487c478bd9Sstevel@tonic-gate * MAP_ALIGN or an addr will be aligned on a page size > 4MB. Otherwise
2497c478bd9Sstevel@tonic-gate * we can potentially waste up to 256MB of the 4G process address
2507c478bd9Sstevel@tonic-gate * space just for alignment.
2517c478bd9Sstevel@tonic-gate *
2527c478bd9Sstevel@tonic-gate * XXXQ Should iterate trough hw_page_array here to catch
2537c478bd9Sstevel@tonic-gate * all supported pagesizes
2547c478bd9Sstevel@tonic-gate */
2557c478bd9Sstevel@tonic-gate if (p->p_model == DATAMODEL_ILP32 && ((flags & MAP_ALIGN) == 0 ||
2567c478bd9Sstevel@tonic-gate ((uintptr_t)*addrp) != 0)) {
2577c478bd9Sstevel@tonic-gate allow_largepage_alignment = 0;
2587c478bd9Sstevel@tonic-gate }
2597c478bd9Sstevel@tonic-gate if ((mmu_page_sizes == max_mmu_page_sizes) &&
2607c478bd9Sstevel@tonic-gate allow_largepage_alignment &&
2617c478bd9Sstevel@tonic-gate (len >= MMU_PAGESIZE256M)) { /* 256MB mappings */
2627c478bd9Sstevel@tonic-gate align_amount = MMU_PAGESIZE256M;
2637c478bd9Sstevel@tonic-gate } else if ((mmu_page_sizes == max_mmu_page_sizes) &&
2647c478bd9Sstevel@tonic-gate allow_largepage_alignment &&
2657c478bd9Sstevel@tonic-gate (len >= MMU_PAGESIZE32M)) { /* 32MB mappings */
2667c478bd9Sstevel@tonic-gate align_amount = MMU_PAGESIZE32M;
2677c478bd9Sstevel@tonic-gate } else if (len >= MMU_PAGESIZE4M) { /* 4MB mappings */
2687c478bd9Sstevel@tonic-gate align_amount = MMU_PAGESIZE4M;
2697c478bd9Sstevel@tonic-gate } else if (len >= MMU_PAGESIZE512K) { /* 512KB mappings */
2707c478bd9Sstevel@tonic-gate align_amount = MMU_PAGESIZE512K;
2717c478bd9Sstevel@tonic-gate } else if (len >= MMU_PAGESIZE64K) { /* 64KB mappings */
2727c478bd9Sstevel@tonic-gate align_amount = MMU_PAGESIZE64K;
2737c478bd9Sstevel@tonic-gate } else {
2747c478bd9Sstevel@tonic-gate /*
2757c478bd9Sstevel@tonic-gate * Align virtual addresses on a 64K boundary to ensure
2767c478bd9Sstevel@tonic-gate * that ELF shared libraries are mapped with the appropriate
2777c478bd9Sstevel@tonic-gate * alignment constraints by the run-time linker.
2787c478bd9Sstevel@tonic-gate */
2797c478bd9Sstevel@tonic-gate align_amount = ELF_SPARC_MAXPGSZ;
2807c478bd9Sstevel@tonic-gate if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp != 0) &&
2817c478bd9Sstevel@tonic-gate ((uintptr_t)*addrp < align_amount))
2827c478bd9Sstevel@tonic-gate align_amount = (uintptr_t)*addrp;
2837c478bd9Sstevel@tonic-gate }
2847c478bd9Sstevel@tonic-gate
2857c478bd9Sstevel@tonic-gate /*
2867c478bd9Sstevel@tonic-gate * 64-bit processes require 1024K alignment of ELF shared libraries.
2877c478bd9Sstevel@tonic-gate */
2887c478bd9Sstevel@tonic-gate if (p->p_model == DATAMODEL_LP64)
2897c478bd9Sstevel@tonic-gate align_amount = MAX(align_amount, ELF_SPARCV9_MAXPGSZ);
2907c478bd9Sstevel@tonic-gate #ifdef VAC
2917c478bd9Sstevel@tonic-gate if (vac && vacalign && (align_amount < shm_alignment))
2927c478bd9Sstevel@tonic-gate align_amount = shm_alignment;
2937c478bd9Sstevel@tonic-gate #endif
2947c478bd9Sstevel@tonic-gate
2957c478bd9Sstevel@tonic-gate if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp > align_amount)) {
2967c478bd9Sstevel@tonic-gate align_amount = (uintptr_t)*addrp;
2977c478bd9Sstevel@tonic-gate }
29846ab9534Smec
29946ab9534Smec ASSERT(ISP2(align_amount));
30046ab9534Smec ASSERT(align_amount == 0 || align_amount >= PAGESIZE);
3017c478bd9Sstevel@tonic-gate
3027c478bd9Sstevel@tonic-gate /*
3037c478bd9Sstevel@tonic-gate * Look for a large enough hole starting below the stack limit.
30446ab9534Smec * After finding it, use the upper part.
3057c478bd9Sstevel@tonic-gate */
3067c478bd9Sstevel@tonic-gate as_purge(as);
30746ab9534Smec off = off & (align_amount - 1);
30846ab9534Smec if (as_gap_aligned(as, len, &base, &slen, AH_HI, NULL, align_amount,
30946ab9534Smec PAGESIZE, off) == 0) {
3107c478bd9Sstevel@tonic-gate caddr_t as_addr;
3117c478bd9Sstevel@tonic-gate
31246ab9534Smec /*
31346ab9534Smec * addr is the highest possible address to use since we have
31446ab9534Smec * a PAGESIZE redzone at the beginning and end.
31546ab9534Smec */
31646ab9534Smec addr = base + slen - (PAGESIZE + len);
3177c478bd9Sstevel@tonic-gate as_addr = addr;
3187c478bd9Sstevel@tonic-gate /*
31946ab9534Smec * Round address DOWN to the alignment amount and
32046ab9534Smec * add the offset in.
32146ab9534Smec * If addr is greater than as_addr, len would not be large
32246ab9534Smec * enough to include the redzone, so we must adjust down
32346ab9534Smec * by the alignment amount.
3247c478bd9Sstevel@tonic-gate */
3257c478bd9Sstevel@tonic-gate addr = (caddr_t)((uintptr_t)addr & (~(align_amount - 1l)));
32646ab9534Smec addr += (long)off;
32746ab9534Smec if (addr > as_addr) {
32846ab9534Smec addr -= align_amount;
3297c478bd9Sstevel@tonic-gate }
3307c478bd9Sstevel@tonic-gate
331*d2a70789SRichard Lowe /*
332*d2a70789SRichard Lowe * If randomization is requested, slew the allocation
333*d2a70789SRichard Lowe * backwards, within the same gap, by a random amount.
334*d2a70789SRichard Lowe */
335*d2a70789SRichard Lowe if (flags & _MAP_RANDOMIZE) {
336*d2a70789SRichard Lowe uint32_t slew;
337*d2a70789SRichard Lowe
338*d2a70789SRichard Lowe (void) random_get_pseudo_bytes((uint8_t *)&slew,
339*d2a70789SRichard Lowe sizeof (slew));
340*d2a70789SRichard Lowe
341*d2a70789SRichard Lowe slew = slew % MIN(aslr_max_map_skew, (addr - base));
342*d2a70789SRichard Lowe addr -= P2ALIGN(slew, align_amount);
343*d2a70789SRichard Lowe }
344*d2a70789SRichard Lowe
34546ab9534Smec ASSERT(addr > base);
34646ab9534Smec ASSERT(addr + len < base + slen);
3477c478bd9Sstevel@tonic-gate ASSERT(((uintptr_t)addr & (align_amount - 1l)) ==
34846ab9534Smec ((uintptr_t)(off)));
3497c478bd9Sstevel@tonic-gate *addrp = addr;
3507c478bd9Sstevel@tonic-gate
3517c478bd9Sstevel@tonic-gate } else {
3527c478bd9Sstevel@tonic-gate *addrp = NULL; /* no more virtual space */
3537c478bd9Sstevel@tonic-gate }
3547c478bd9Sstevel@tonic-gate }
3557c478bd9Sstevel@tonic-gate
3567c478bd9Sstevel@tonic-gate /*
3577c478bd9Sstevel@tonic-gate * Platform-dependent page scrub call.
3587c478bd9Sstevel@tonic-gate * We call hypervisor to scrub the page.
3597c478bd9Sstevel@tonic-gate */
3607c478bd9Sstevel@tonic-gate void
pagescrub(page_t * pp,uint_t off,uint_t len)3617c478bd9Sstevel@tonic-gate pagescrub(page_t *pp, uint_t off, uint_t len)
3627c478bd9Sstevel@tonic-gate {
3637c478bd9Sstevel@tonic-gate uint64_t pa, length;
3647c478bd9Sstevel@tonic-gate
3657c478bd9Sstevel@tonic-gate pa = (uint64_t)(pp->p_pagenum << MMU_PAGESHIFT + off);
3667c478bd9Sstevel@tonic-gate length = (uint64_t)len;
3677c478bd9Sstevel@tonic-gate
3687c478bd9Sstevel@tonic-gate (void) mem_scrub(pa, length);
3697c478bd9Sstevel@tonic-gate }
3707c478bd9Sstevel@tonic-gate
3717c478bd9Sstevel@tonic-gate void
sync_data_memory(caddr_t va,size_t len)3727c478bd9Sstevel@tonic-gate sync_data_memory(caddr_t va, size_t len)
3737c478bd9Sstevel@tonic-gate {
3747c478bd9Sstevel@tonic-gate /* Call memory sync function */
3752ae0af4bSep32863 (void) mem_sync(va, len);
3767c478bd9Sstevel@tonic-gate }
3777c478bd9Sstevel@tonic-gate
3787c478bd9Sstevel@tonic-gate size_t
mmu_get_kernel_lpsize(size_t lpsize)3797c478bd9Sstevel@tonic-gate mmu_get_kernel_lpsize(size_t lpsize)
3807c478bd9Sstevel@tonic-gate {
3817c478bd9Sstevel@tonic-gate extern int mmu_exported_pagesize_mask;
3827c478bd9Sstevel@tonic-gate uint_t tte;
3837c478bd9Sstevel@tonic-gate
3847c478bd9Sstevel@tonic-gate if (lpsize == 0) {
3857c478bd9Sstevel@tonic-gate /* no setting for segkmem_lpsize in /etc/system: use default */
3867c478bd9Sstevel@tonic-gate if (mmu_exported_pagesize_mask & (1 << TTE256M)) {
3877c478bd9Sstevel@tonic-gate lpsize = MMU_PAGESIZE256M;
3887c478bd9Sstevel@tonic-gate } else if (mmu_exported_pagesize_mask & (1 << TTE4M)) {
3897c478bd9Sstevel@tonic-gate lpsize = MMU_PAGESIZE4M;
3907c478bd9Sstevel@tonic-gate } else if (mmu_exported_pagesize_mask & (1 << TTE64K)) {
3917c478bd9Sstevel@tonic-gate lpsize = MMU_PAGESIZE64K;
3927c478bd9Sstevel@tonic-gate } else {
3937c478bd9Sstevel@tonic-gate lpsize = MMU_PAGESIZE;
3947c478bd9Sstevel@tonic-gate }
3957c478bd9Sstevel@tonic-gate
3967c478bd9Sstevel@tonic-gate return (lpsize);
3977c478bd9Sstevel@tonic-gate }
3987c478bd9Sstevel@tonic-gate
3997c478bd9Sstevel@tonic-gate for (tte = TTE8K; tte <= TTE256M; tte++) {
4007c478bd9Sstevel@tonic-gate
4017c478bd9Sstevel@tonic-gate if ((mmu_exported_pagesize_mask & (1 << tte)) == 0)
4027c478bd9Sstevel@tonic-gate continue;
4037c478bd9Sstevel@tonic-gate
4047c478bd9Sstevel@tonic-gate if (lpsize == TTEBYTES(tte))
4057c478bd9Sstevel@tonic-gate return (lpsize);
4067c478bd9Sstevel@tonic-gate }
4077c478bd9Sstevel@tonic-gate
4087c478bd9Sstevel@tonic-gate lpsize = TTEBYTES(TTE8K);
4097c478bd9Sstevel@tonic-gate return (lpsize);
4107c478bd9Sstevel@tonic-gate }
4117c478bd9Sstevel@tonic-gate
4127c478bd9Sstevel@tonic-gate void
mmu_init_kcontext()4137c478bd9Sstevel@tonic-gate mmu_init_kcontext()
4147c478bd9Sstevel@tonic-gate {
4157c478bd9Sstevel@tonic-gate }
4167c478bd9Sstevel@tonic-gate
4177c478bd9Sstevel@tonic-gate /*ARGSUSED*/
4187c478bd9Sstevel@tonic-gate void
mmu_init_kernel_pgsz(struct hat * hat)4197c478bd9Sstevel@tonic-gate mmu_init_kernel_pgsz(struct hat *hat)
4207c478bd9Sstevel@tonic-gate {
4217c478bd9Sstevel@tonic-gate }
4227c478bd9Sstevel@tonic-gate
4237c478bd9Sstevel@tonic-gate static void *
contig_mem_span_alloc(vmem_t * vmp,size_t size,int vmflag)4247c478bd9Sstevel@tonic-gate contig_mem_span_alloc(vmem_t *vmp, size_t size, int vmflag)
4257c478bd9Sstevel@tonic-gate {
4267c478bd9Sstevel@tonic-gate page_t *ppl;
4277c478bd9Sstevel@tonic-gate page_t *rootpp;
4287c478bd9Sstevel@tonic-gate caddr_t addr = NULL;
4297c478bd9Sstevel@tonic-gate pgcnt_t npages = btopr(size);
4307c478bd9Sstevel@tonic-gate page_t **ppa;
4317c478bd9Sstevel@tonic-gate int pgflags;
432aaa10e67Sha137994 spgcnt_t i = 0;
4337c478bd9Sstevel@tonic-gate
4347c478bd9Sstevel@tonic-gate
435aaa10e67Sha137994 ASSERT(size <= contig_mem_import_size_max);
436aaa10e67Sha137994 ASSERT((size & (size - 1)) == 0);
4370400e0b7Sha137994
4387c478bd9Sstevel@tonic-gate if ((addr = vmem_xalloc(vmp, size, size, 0, 0,
4397c478bd9Sstevel@tonic-gate NULL, NULL, vmflag)) == NULL) {
4407c478bd9Sstevel@tonic-gate return (NULL);
4417c478bd9Sstevel@tonic-gate }
4427c478bd9Sstevel@tonic-gate
4430400e0b7Sha137994 /* The address should be slab-size aligned. */
444aaa10e67Sha137994 ASSERT(((uintptr_t)addr & (size - 1)) == 0);
4457c478bd9Sstevel@tonic-gate
4467c478bd9Sstevel@tonic-gate if (page_resv(npages, vmflag & VM_KMFLAGS) == 0) {
4477c478bd9Sstevel@tonic-gate vmem_xfree(vmp, addr, size);
4487c478bd9Sstevel@tonic-gate return (NULL);
4497c478bd9Sstevel@tonic-gate }
4507c478bd9Sstevel@tonic-gate
4517c478bd9Sstevel@tonic-gate pgflags = PG_EXCL;
452aaa10e67Sha137994 if (vmflag & VM_NORELOC)
453aaa10e67Sha137994 pgflags |= PG_NORELOC;
4547c478bd9Sstevel@tonic-gate
4557c478bd9Sstevel@tonic-gate ppl = page_create_va_large(&kvp, (u_offset_t)(uintptr_t)addr, size,
4567c478bd9Sstevel@tonic-gate pgflags, &kvseg, addr, NULL);
4577c478bd9Sstevel@tonic-gate
4587c478bd9Sstevel@tonic-gate if (ppl == NULL) {
4597c478bd9Sstevel@tonic-gate vmem_xfree(vmp, addr, size);
4607c478bd9Sstevel@tonic-gate page_unresv(npages);
4617c478bd9Sstevel@tonic-gate return (NULL);
4627c478bd9Sstevel@tonic-gate }
4637c478bd9Sstevel@tonic-gate
4647c478bd9Sstevel@tonic-gate rootpp = ppl;
4657c478bd9Sstevel@tonic-gate ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP);
4667c478bd9Sstevel@tonic-gate while (ppl != NULL) {
4677c478bd9Sstevel@tonic-gate page_t *pp = ppl;
4687c478bd9Sstevel@tonic-gate ppa[i++] = pp;
4697c478bd9Sstevel@tonic-gate page_sub(&ppl, pp);
4707c478bd9Sstevel@tonic-gate ASSERT(page_iolock_assert(pp));
471aaa10e67Sha137994 ASSERT(PAGE_EXCL(pp));
4727c478bd9Sstevel@tonic-gate page_io_unlock(pp);
4737c478bd9Sstevel@tonic-gate }
4747c478bd9Sstevel@tonic-gate
4757c478bd9Sstevel@tonic-gate /*
4767c478bd9Sstevel@tonic-gate * Load the locked entry. It's OK to preload the entry into
4777c478bd9Sstevel@tonic-gate * the TSB since we now support large mappings in the kernel TSB.
4787c478bd9Sstevel@tonic-gate */
4797c478bd9Sstevel@tonic-gate hat_memload_array(kas.a_hat, (caddr_t)rootpp->p_offset, size,
4807c478bd9Sstevel@tonic-gate ppa, (PROT_ALL & ~PROT_USER) | HAT_NOSYNC, HAT_LOAD_LOCK);
4817c478bd9Sstevel@tonic-gate
482aaa10e67Sha137994 ASSERT(i == page_get_pagecnt(ppa[0]->p_szc));
4837c478bd9Sstevel@tonic-gate for (--i; i >= 0; --i) {
484aaa10e67Sha137994 ASSERT(ppa[i]->p_szc == ppa[0]->p_szc);
485aaa10e67Sha137994 ASSERT(page_pptonum(ppa[i]) == page_pptonum(ppa[0]) + i);
4867c478bd9Sstevel@tonic-gate (void) page_pp_lock(ppa[i], 0, 1);
487aaa10e67Sha137994 /*
488aaa10e67Sha137994 * Leave the page share locked. For non-cage pages,
489aaa10e67Sha137994 * this would prevent memory DR if it were supported
490aaa10e67Sha137994 * on sun4v.
491aaa10e67Sha137994 */
492aaa10e67Sha137994 page_downgrade(ppa[i]);
4937c478bd9Sstevel@tonic-gate }
4947c478bd9Sstevel@tonic-gate
4957c478bd9Sstevel@tonic-gate kmem_free(ppa, npages * sizeof (page_t *));
4967c478bd9Sstevel@tonic-gate return (addr);
4977c478bd9Sstevel@tonic-gate }
4987c478bd9Sstevel@tonic-gate
499aaa10e67Sha137994 /*
500aaa10e67Sha137994 * Allocates a slab by first trying to use the largest slab size
501aaa10e67Sha137994 * in contig_mem_import_sizes and then falling back to smaller slab
502aaa10e67Sha137994 * sizes still large enough for the allocation. The sizep argument
503aaa10e67Sha137994 * is a pointer to the requested size. When a slab is successfully
504aaa10e67Sha137994 * allocated, the slab size, which must be >= *sizep and <=
505aaa10e67Sha137994 * contig_mem_import_size_max, is returned in the *sizep argument.
506aaa10e67Sha137994 * Returns the virtual address of the new slab.
507aaa10e67Sha137994 */
508aaa10e67Sha137994 static void *
span_alloc_downsize(vmem_t * vmp,size_t * sizep,size_t align,int vmflag)509aaa10e67Sha137994 span_alloc_downsize(vmem_t *vmp, size_t *sizep, size_t align, int vmflag)
510aaa10e67Sha137994 {
511aaa10e67Sha137994 int i;
512aaa10e67Sha137994
513aaa10e67Sha137994 ASSERT(*sizep <= contig_mem_import_size_max);
514aaa10e67Sha137994
515aaa10e67Sha137994 for (i = 0; i < NUM_IMPORT_SIZES; i++) {
516aaa10e67Sha137994 size_t page_size = contig_mem_import_sizes[i];
517aaa10e67Sha137994
518aaa10e67Sha137994 /*
519aaa10e67Sha137994 * Check that the alignment is also less than the
520aaa10e67Sha137994 * import (large page) size. In the case where the
521aaa10e67Sha137994 * alignment is larger than the size, a large page
522aaa10e67Sha137994 * large enough for the allocation is not necessarily
523aaa10e67Sha137994 * physical-address aligned to satisfy the requested
524aaa10e67Sha137994 * alignment. Since alignment is required to be a
525aaa10e67Sha137994 * power-of-2, any large page >= size && >= align will
526aaa10e67Sha137994 * suffice.
527aaa10e67Sha137994 */
528aaa10e67Sha137994 if (*sizep <= page_size && align <= page_size) {
529aaa10e67Sha137994 void *addr;
530aaa10e67Sha137994 addr = contig_mem_span_alloc(vmp, page_size, vmflag);
531aaa10e67Sha137994 if (addr == NULL)
532aaa10e67Sha137994 continue;
533aaa10e67Sha137994 *sizep = page_size;
534aaa10e67Sha137994 return (addr);
535aaa10e67Sha137994 }
536aaa10e67Sha137994 return (NULL);
537aaa10e67Sha137994 }
538aaa10e67Sha137994
539aaa10e67Sha137994 return (NULL);
540aaa10e67Sha137994 }
541aaa10e67Sha137994
542aaa10e67Sha137994 static void *
contig_mem_span_xalloc(vmem_t * vmp,size_t * sizep,size_t align,int vmflag)543aaa10e67Sha137994 contig_mem_span_xalloc(vmem_t *vmp, size_t *sizep, size_t align, int vmflag)
544aaa10e67Sha137994 {
545aaa10e67Sha137994 return (span_alloc_downsize(vmp, sizep, align, vmflag | VM_NORELOC));
546aaa10e67Sha137994 }
547aaa10e67Sha137994
548aaa10e67Sha137994 static void *
contig_mem_reloc_span_xalloc(vmem_t * vmp,size_t * sizep,size_t align,int vmflag)549aaa10e67Sha137994 contig_mem_reloc_span_xalloc(vmem_t *vmp, size_t *sizep, size_t align,
550aaa10e67Sha137994 int vmflag)
551aaa10e67Sha137994 {
552aaa10e67Sha137994 ASSERT((vmflag & VM_NORELOC) == 0);
553aaa10e67Sha137994 return (span_alloc_downsize(vmp, sizep, align, vmflag));
554aaa10e67Sha137994 }
555aaa10e67Sha137994
556aaa10e67Sha137994 /*
557aaa10e67Sha137994 * Free a span, which is always exactly one large page.
558aaa10e67Sha137994 */
559aaa10e67Sha137994 static void
contig_mem_span_free(vmem_t * vmp,void * inaddr,size_t size)5607c478bd9Sstevel@tonic-gate contig_mem_span_free(vmem_t *vmp, void *inaddr, size_t size)
5617c478bd9Sstevel@tonic-gate {
5627c478bd9Sstevel@tonic-gate page_t *pp;
5637c478bd9Sstevel@tonic-gate caddr_t addr = inaddr;
5647c478bd9Sstevel@tonic-gate caddr_t eaddr;
5657c478bd9Sstevel@tonic-gate pgcnt_t npages = btopr(size);
5667c478bd9Sstevel@tonic-gate page_t *rootpp = NULL;
5677c478bd9Sstevel@tonic-gate
568aaa10e67Sha137994 ASSERT(size <= contig_mem_import_size_max);
569aaa10e67Sha137994 /* All slabs should be size aligned */
570aaa10e67Sha137994 ASSERT(((uintptr_t)addr & (size - 1)) == 0);
5717c478bd9Sstevel@tonic-gate
5727c478bd9Sstevel@tonic-gate hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);
5737c478bd9Sstevel@tonic-gate
5747c478bd9Sstevel@tonic-gate for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) {
575aaa10e67Sha137994 pp = page_find(&kvp, (u_offset_t)(uintptr_t)addr);
576aaa10e67Sha137994 if (pp == NULL) {
577aaa10e67Sha137994 panic("contig_mem_span_free: page not found");
578aaa10e67Sha137994 }
579aaa10e67Sha137994 if (!page_tryupgrade(pp)) {
580aaa10e67Sha137994 page_unlock(pp);
581aaa10e67Sha137994 pp = page_lookup(&kvp,
582aaa10e67Sha137994 (u_offset_t)(uintptr_t)addr, SE_EXCL);
5837c478bd9Sstevel@tonic-gate if (pp == NULL)
5847c478bd9Sstevel@tonic-gate panic("contig_mem_span_free: page not found");
585aaa10e67Sha137994 }
5867c478bd9Sstevel@tonic-gate
5877c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp));
588aaa10e67Sha137994 ASSERT(size == page_get_pagesize(pp->p_szc));
589aaa10e67Sha137994 ASSERT(rootpp == NULL || rootpp->p_szc == pp->p_szc);
590aaa10e67Sha137994 ASSERT(rootpp == NULL || (page_pptonum(rootpp) +
591aaa10e67Sha137994 (pgcnt_t)btop(addr - (caddr_t)inaddr) == page_pptonum(pp)));
592aaa10e67Sha137994
5937c478bd9Sstevel@tonic-gate page_pp_unlock(pp, 0, 1);
5947c478bd9Sstevel@tonic-gate
5957c478bd9Sstevel@tonic-gate if (rootpp == NULL)
5967c478bd9Sstevel@tonic-gate rootpp = pp;
597aaa10e67Sha137994 }
5987c478bd9Sstevel@tonic-gate page_destroy_pages(rootpp);
5997c478bd9Sstevel@tonic-gate page_unresv(npages);
6007c478bd9Sstevel@tonic-gate
6017c478bd9Sstevel@tonic-gate if (vmp != NULL)
6027c478bd9Sstevel@tonic-gate vmem_xfree(vmp, inaddr, size);
6037c478bd9Sstevel@tonic-gate }
6047c478bd9Sstevel@tonic-gate
6057c478bd9Sstevel@tonic-gate static void *
contig_vmem_xalloc_aligned_wrapper(vmem_t * vmp,size_t * sizep,size_t align,int vmflag)606aaa10e67Sha137994 contig_vmem_xalloc_aligned_wrapper(vmem_t *vmp, size_t *sizep, size_t align,
607aaa10e67Sha137994 int vmflag)
6087c478bd9Sstevel@tonic-gate {
609aaa10e67Sha137994 ASSERT((align & (align - 1)) == 0);
610aaa10e67Sha137994 return (vmem_xalloc(vmp, *sizep, align, 0, 0, NULL, NULL, vmflag));
6117c478bd9Sstevel@tonic-gate }
6127c478bd9Sstevel@tonic-gate
613ea841a36Sarao /*
614aaa10e67Sha137994 * contig_mem_alloc, contig_mem_alloc_align
615aaa10e67Sha137994 *
616aaa10e67Sha137994 * Caution: contig_mem_alloc and contig_mem_alloc_align should be
617aaa10e67Sha137994 * used only when physically contiguous non-relocatable memory is
618aaa10e67Sha137994 * required. Furthermore, use of these allocation routines should be
619aaa10e67Sha137994 * minimized as well as should the allocation size. As described in the
620aaa10e67Sha137994 * contig_mem_arena comment block above, slab allocations fall back to
621aaa10e67Sha137994 * being outside of the cage. Therefore, overuse of these allocation
622aaa10e67Sha137994 * routines can lead to non-relocatable large pages being allocated
623aaa10e67Sha137994 * outside the cage. Such pages prevent the allocation of a larger page
624aaa10e67Sha137994 * occupying overlapping pages. This can impact performance for
625aaa10e67Sha137994 * applications that utilize e.g. 256M large pages.
626ea841a36Sarao */
627ea841a36Sarao
628ea841a36Sarao /*
629aaa10e67Sha137994 * Allocates size aligned contiguous memory up to contig_mem_import_size_max.
630ea841a36Sarao * Size must be a power of 2.
631ea841a36Sarao */
6327c478bd9Sstevel@tonic-gate void *
contig_mem_alloc(size_t size)6337c478bd9Sstevel@tonic-gate contig_mem_alloc(size_t size)
6347c478bd9Sstevel@tonic-gate {
635ea841a36Sarao ASSERT((size & (size - 1)) == 0);
636ea841a36Sarao return (contig_mem_alloc_align(size, size));
6377c478bd9Sstevel@tonic-gate }
6387c478bd9Sstevel@tonic-gate
639aaa10e67Sha137994 /*
6409d0d62adSJason Beloro * contig_mem_alloc_align allocates real contiguous memory with the
641125be069SJason Beloro * specified alignment up to contig_mem_import_size_max. The alignment must
642125be069SJason Beloro * be a power of 2 and no greater than contig_mem_import_size_max. We assert
643aaa10e67Sha137994 * the aligment is a power of 2. For non-debug, vmem_xalloc will panic
644aaa10e67Sha137994 * for non power of 2 alignments.
645aaa10e67Sha137994 */
6469d0d62adSJason Beloro void *
contig_mem_alloc_align(size_t size,size_t align)6479d0d62adSJason Beloro contig_mem_alloc_align(size_t size, size_t align)
648aaa10e67Sha137994 {
649aaa10e67Sha137994 void *buf;
650aaa10e67Sha137994
651aaa10e67Sha137994 ASSERT(size <= contig_mem_import_size_max);
652aaa10e67Sha137994 ASSERT(align <= contig_mem_import_size_max);
653aaa10e67Sha137994 ASSERT((align & (align - 1)) == 0);
654aaa10e67Sha137994
655aaa10e67Sha137994 if (align < CONTIG_MEM_ARENA_QUANTUM)
656aaa10e67Sha137994 align = CONTIG_MEM_ARENA_QUANTUM;
657aaa10e67Sha137994
658aaa10e67Sha137994 /*
659aaa10e67Sha137994 * We take the lock here to serialize span allocations.
660aaa10e67Sha137994 * We do not lose concurrency for the common case, since
661aaa10e67Sha137994 * allocations that don't require new span allocations
662aaa10e67Sha137994 * are serialized by vmem_xalloc. Serializing span
663aaa10e67Sha137994 * allocations also prevents us from trying to allocate
664125be069SJason Beloro * more spans than necessary.
665aaa10e67Sha137994 */
6669d0d62adSJason Beloro mutex_enter(&contig_mem_lock);
667aaa10e67Sha137994
668aaa10e67Sha137994 buf = vmem_xalloc(contig_mem_arena, size, align, 0, 0,
6699d0d62adSJason Beloro NULL, NULL, VM_NOSLEEP | VM_NORELOC);
670aaa10e67Sha137994
671aaa10e67Sha137994 if ((buf == NULL) && (size <= MMU_PAGESIZE)) {
6729d0d62adSJason Beloro mutex_exit(&contig_mem_lock);
673aaa10e67Sha137994 return (vmem_xalloc(static_alloc_arena, size, align, 0, 0,
6749d0d62adSJason Beloro NULL, NULL, VM_NOSLEEP));
675aaa10e67Sha137994 }
676aaa10e67Sha137994
677aaa10e67Sha137994 if (buf == NULL) {
678aaa10e67Sha137994 buf = vmem_xalloc(contig_mem_reloc_arena, size, align, 0, 0,
6799d0d62adSJason Beloro NULL, NULL, VM_NOSLEEP);
680aaa10e67Sha137994 }
681aaa10e67Sha137994
6829d0d62adSJason Beloro mutex_exit(&contig_mem_lock);
683aaa10e67Sha137994
684aaa10e67Sha137994 return (buf);
685aaa10e67Sha137994 }
686aaa10e67Sha137994
6877c478bd9Sstevel@tonic-gate void
contig_mem_free(void * vaddr,size_t size)6887c478bd9Sstevel@tonic-gate contig_mem_free(void *vaddr, size_t size)
6897c478bd9Sstevel@tonic-gate {
690aaa10e67Sha137994 if (vmem_contains(contig_mem_arena, vaddr, size)) {
6917c478bd9Sstevel@tonic-gate vmem_xfree(contig_mem_arena, vaddr, size);
692aaa10e67Sha137994 } else if (size > MMU_PAGESIZE) {
693aaa10e67Sha137994 vmem_xfree(contig_mem_reloc_arena, vaddr, size);
694aaa10e67Sha137994 } else {
695aaa10e67Sha137994 vmem_xfree(static_alloc_arena, vaddr, size);
696aaa10e67Sha137994 }
6977c478bd9Sstevel@tonic-gate }
6987c478bd9Sstevel@tonic-gate
6997c478bd9Sstevel@tonic-gate /*
7007c478bd9Sstevel@tonic-gate * We create a set of stacked vmem arenas to enable us to
701aaa10e67Sha137994 * allocate large >PAGESIZE chucks of contiguous Real Address space.
702aaa10e67Sha137994 * The vmem_xcreate interface is used to create the contig_mem_arena
703aaa10e67Sha137994 * allowing the import routine to downsize the requested slab size
704aaa10e67Sha137994 * and return a smaller slab.
7057c478bd9Sstevel@tonic-gate */
7067c478bd9Sstevel@tonic-gate void
contig_mem_init(void)7077c478bd9Sstevel@tonic-gate contig_mem_init(void)
7087c478bd9Sstevel@tonic-gate {
709aaa10e67Sha137994 mutex_init(&contig_mem_lock, NULL, MUTEX_DEFAULT, NULL);
7107c478bd9Sstevel@tonic-gate
711aaa10e67Sha137994 contig_mem_slab_arena = vmem_xcreate("contig_mem_slab_arena", NULL, 0,
712aaa10e67Sha137994 CONTIG_MEM_SLAB_ARENA_QUANTUM, contig_vmem_xalloc_aligned_wrapper,
713aaa10e67Sha137994 vmem_xfree, heap_arena, 0, VM_SLEEP | VMC_XALIGN);
7147c478bd9Sstevel@tonic-gate
715aaa10e67Sha137994 contig_mem_arena = vmem_xcreate("contig_mem_arena", NULL, 0,
716aaa10e67Sha137994 CONTIG_MEM_ARENA_QUANTUM, contig_mem_span_xalloc,
717aaa10e67Sha137994 contig_mem_span_free, contig_mem_slab_arena, 0,
718aaa10e67Sha137994 VM_SLEEP | VM_BESTFIT | VMC_XALIGN);
7197c478bd9Sstevel@tonic-gate
720aaa10e67Sha137994 contig_mem_reloc_arena = vmem_xcreate("contig_mem_reloc_arena", NULL, 0,
721aaa10e67Sha137994 CONTIG_MEM_ARENA_QUANTUM, contig_mem_reloc_span_xalloc,
722aaa10e67Sha137994 contig_mem_span_free, contig_mem_slab_arena, 0,
723aaa10e67Sha137994 VM_SLEEP | VM_BESTFIT | VMC_XALIGN);
724aaa10e67Sha137994
725d831bad6Sdavemq if (contig_mem_prealloc_buf == NULL || vmem_add(contig_mem_arena,
726d831bad6Sdavemq contig_mem_prealloc_buf, contig_mem_prealloc_size, VM_SLEEP)
727d831bad6Sdavemq == NULL) {
728d831bad6Sdavemq cmn_err(CE_WARN, "Failed to pre-populate contig_mem_arena");
729d831bad6Sdavemq }
7307c478bd9Sstevel@tonic-gate }
731102033aaSdp78419
732aaa10e67Sha137994 /*
733aaa10e67Sha137994 * In calculating how much memory to pre-allocate, we include a small
734aaa10e67Sha137994 * amount per-CPU to account for per-CPU buffers in line with measured
7357ec363dcSwh94709 * values for different size systems. contig_mem_prealloc_base_size is
7367ec363dcSwh94709 * a cpu specific amount to be pre-allocated before considering per-CPU
7377ec363dcSwh94709 * requirements and memory size. We always pre-allocate a minimum amount
7387ec363dcSwh94709 * of memory determined by PREALLOC_MIN. Beyond that, we take the minimum
7397ec363dcSwh94709 * of contig_mem_prealloc_base_size and a small percentage of physical
7407ec363dcSwh94709 * memory to prevent allocating too much on smaller systems.
7417ec363dcSwh94709 * contig_mem_prealloc_base_size is global, allowing for the CPU module
7427ec363dcSwh94709 * to increase its value if necessary.
743aaa10e67Sha137994 */
744aaa10e67Sha137994 #define PREALLOC_PER_CPU (256 * 1024) /* 256K */
745aaa10e67Sha137994 #define PREALLOC_PERCENT (4) /* 4% */
746aaa10e67Sha137994 #define PREALLOC_MIN (16 * 1024 * 1024) /* 16M */
7477ec363dcSwh94709 size_t contig_mem_prealloc_base_size = 0;
748aaa10e67Sha137994
749aaa10e67Sha137994 /*
750aaa10e67Sha137994 * Called at boot-time allowing pre-allocation of contiguous memory.
751aaa10e67Sha137994 * The argument 'alloc_base' is the requested base address for the
752aaa10e67Sha137994 * allocation and originates in startup_memlist.
753aaa10e67Sha137994 */
754aaa10e67Sha137994 caddr_t
contig_mem_prealloc(caddr_t alloc_base,pgcnt_t npages)755aaa10e67Sha137994 contig_mem_prealloc(caddr_t alloc_base, pgcnt_t npages)
756aaa10e67Sha137994 {
757d831bad6Sdavemq caddr_t chunkp;
758d831bad6Sdavemq
7597ec363dcSwh94709 contig_mem_prealloc_size = MIN((PREALLOC_PER_CPU * ncpu_guest_max) +
7607ec363dcSwh94709 contig_mem_prealloc_base_size,
7617ec363dcSwh94709 (ptob(npages) * PREALLOC_PERCENT) / 100);
7627ec363dcSwh94709 contig_mem_prealloc_size = MAX(contig_mem_prealloc_size, PREALLOC_MIN);
7637ec363dcSwh94709 contig_mem_prealloc_size = P2ROUNDUP(contig_mem_prealloc_size,
7647ec363dcSwh94709 MMU_PAGESIZE4M);
765aaa10e67Sha137994
766aaa10e67Sha137994 alloc_base = (caddr_t)roundup((uintptr_t)alloc_base, MMU_PAGESIZE4M);
767986fd29aSsetje if (prom_alloc(alloc_base, contig_mem_prealloc_size,
768d831bad6Sdavemq MMU_PAGESIZE4M) != alloc_base) {
769986fd29aSsetje
770d831bad6Sdavemq /*
771d831bad6Sdavemq * Failed. This may mean the physical memory has holes in it
772d831bad6Sdavemq * and it will be more difficult to get large contiguous
773d831bad6Sdavemq * pieces of memory. Since we only guarantee contiguous
774d831bad6Sdavemq * pieces of memory contig_mem_import_size_max or smaller,
775d831bad6Sdavemq * loop, getting contig_mem_import_size_max at a time, until
776d831bad6Sdavemq * failure or contig_mem_prealloc_size is reached.
777d831bad6Sdavemq */
778d831bad6Sdavemq for (chunkp = alloc_base;
779d831bad6Sdavemq (chunkp - alloc_base) < contig_mem_prealloc_size;
780d831bad6Sdavemq chunkp += contig_mem_import_size_max) {
781d831bad6Sdavemq
782d831bad6Sdavemq if (prom_alloc(chunkp, contig_mem_import_size_max,
783d831bad6Sdavemq MMU_PAGESIZE4M) != chunkp) {
784d831bad6Sdavemq break;
785d831bad6Sdavemq }
786d831bad6Sdavemq }
787d831bad6Sdavemq contig_mem_prealloc_size = chunkp - alloc_base;
788d831bad6Sdavemq ASSERT(contig_mem_prealloc_size != 0);
789d831bad6Sdavemq }
790d831bad6Sdavemq
791d831bad6Sdavemq if (contig_mem_prealloc_size != 0) {
7927ec363dcSwh94709 contig_mem_prealloc_buf = alloc_base;
793d831bad6Sdavemq } else {
794d831bad6Sdavemq contig_mem_prealloc_buf = NULL;
795d831bad6Sdavemq }
7967ec363dcSwh94709 alloc_base += contig_mem_prealloc_size;
797aaa10e67Sha137994
798aaa10e67Sha137994 return (alloc_base);
799aaa10e67Sha137994 }
800