xref: /titanic_50/usr/src/uts/sun4v/vm/mach_vm_dep.c (revision 8668df41d90e075636bc3817b28ad77cbd470959)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
27 /*	All Rights Reserved   */
28 
29 /*
30  * Portions of this source code were derived from Berkeley 4.3 BSD
31  * under license from the Regents of the University of California.
32  */
33 
34 #pragma ident	"%Z%%M%	%I%	%E% SMI"
35 
36 /*
37  * UNIX machine dependent virtual memory support.
38  */
39 
40 #include <sys/vm.h>
41 #include <sys/exec.h>
42 #include <sys/cmn_err.h>
43 #include <sys/cpu_module.h>
44 #include <sys/cpu.h>
45 #include <sys/elf_SPARC.h>
46 #include <sys/archsystm.h>
47 #include <vm/hat_sfmmu.h>
48 #include <sys/memnode.h>
49 #include <sys/mem_cage.h>
50 #include <vm/vm_dep.h>
51 #include <sys/error.h>
52 #include <sys/machsystm.h>
53 #include <vm/seg_kmem.h>
54 #include <sys/stack.h>
55 #include <sys/atomic.h>
56 
57 uint_t page_colors = 0;
58 uint_t page_colors_mask = 0;
59 uint_t page_coloring_shift = 0;
60 int consistent_coloring;
61 
62 uint_t mmu_page_sizes = MMU_PAGE_SIZES;
63 uint_t max_mmu_page_sizes = MMU_PAGE_SIZES;
64 uint_t mmu_hashcnt = MAX_HASHCNT;
65 uint_t max_mmu_hashcnt = MAX_HASHCNT;
66 size_t mmu_ism_pagesize = DEFAULT_ISM_PAGESIZE;
67 
68 /*
69  * A bitmask of the page sizes supported by hardware based upon szc.
70  * The base pagesize (p_szc == 0) must always be supported by the hardware.
71  */
72 int mmu_exported_pagesize_mask;
73 uint_t mmu_exported_page_sizes;
74 
75 uint_t szc_2_userszc[MMU_PAGE_SIZES];
76 uint_t userszc_2_szc[MMU_PAGE_SIZES];
77 
78 extern uint_t vac_colors_mask;
79 extern int vac_shift;
80 
81 hw_pagesize_t hw_page_array[] = {
82 	{MMU_PAGESIZE, MMU_PAGESHIFT, 0, MMU_PAGESIZE >> MMU_PAGESHIFT},
83 	{MMU_PAGESIZE64K, MMU_PAGESHIFT64K, 0,
84 	    MMU_PAGESIZE64K >> MMU_PAGESHIFT},
85 	{MMU_PAGESIZE512K, MMU_PAGESHIFT512K, 0,
86 	    MMU_PAGESIZE512K >> MMU_PAGESHIFT},
87 	{MMU_PAGESIZE4M, MMU_PAGESHIFT4M, 0, MMU_PAGESIZE4M >> MMU_PAGESHIFT},
88 	{MMU_PAGESIZE32M, MMU_PAGESHIFT32M, 0,
89 	    MMU_PAGESIZE32M >> MMU_PAGESHIFT},
90 	{MMU_PAGESIZE256M, MMU_PAGESHIFT256M, 0,
91 	    MMU_PAGESIZE256M >> MMU_PAGESHIFT},
92 	{0, 0, 0, 0}
93 };
94 
95 /*
96  * Maximum and default segment size tunables for user heap, stack, private
97  * and shared anonymous memory, and user text and initialized data.
98  */
99 size_t max_uheap_lpsize = MMU_PAGESIZE64K;
100 size_t default_uheap_lpsize = MMU_PAGESIZE64K;
101 size_t max_ustack_lpsize = MMU_PAGESIZE64K;
102 size_t default_ustack_lpsize = MMU_PAGESIZE64K;
103 size_t max_privmap_lpsize = MMU_PAGESIZE64K;
104 size_t max_uidata_lpsize = MMU_PAGESIZE64K;
105 size_t max_utext_lpsize = MMU_PAGESIZE4M;
106 size_t max_shm_lpsize = MMU_PAGESIZE4M;
107 
108 /*
109  * map_addr_proc() is the routine called when the system is to
110  * choose an address for the user.  We will pick an address
111  * range which is just below the current stack limit.  The
112  * algorithm used for cache consistency on machines with virtual
113  * address caches is such that offset 0 in the vnode is always
114  * on a shm_alignment'ed aligned address.  Unfortunately, this
115  * means that vnodes which are demand paged will not be mapped
116  * cache consistently with the executable images.  When the
117  * cache alignment for a given object is inconsistent, the
118  * lower level code must manage the translations so that this
119  * is not seen here (at the cost of efficiency, of course).
120  *
121  * addrp is a value/result parameter.
122  *	On input it is a hint from the user to be used in a completely
123  *	machine dependent fashion.  For MAP_ALIGN, addrp contains the
124  *	minimal alignment.
125  *
126  *	On output it is NULL if no address can be found in the current
127  *	processes address space or else an address that is currently
128  *	not mapped for len bytes with a page of red zone on either side.
129  *	If vacalign is true, then the selected address will obey the alignment
130  *	constraints of a vac machine based on the given off value.
131  */
132 /*ARGSUSED3*/
133 void
134 map_addr_proc(caddr_t *addrp, size_t len, offset_t off, int vacalign,
135     caddr_t userlimit, struct proc *p, uint_t flags)
136 {
137 	struct as *as = p->p_as;
138 	caddr_t addr;
139 	caddr_t base;
140 	size_t slen;
141 	uintptr_t align_amount;
142 	int allow_largepage_alignment = 1;
143 
144 	base = p->p_brkbase;
145 	if (userlimit < as->a_userlimit) {
146 		/*
147 		 * This happens when a program wants to map something in
148 		 * a range that's accessible to a program in a smaller
149 		 * address space.  For example, a 64-bit program might
150 		 * be calling mmap32(2) to guarantee that the returned
151 		 * address is below 4Gbytes.
152 		 */
153 		ASSERT(userlimit > base);
154 		slen = userlimit - base;
155 	} else {
156 		slen = p->p_usrstack - base - (((size_t)rctl_enforced_value(
157 		    rctlproc_legacy[RLIMIT_STACK], p->p_rctls, p) + PAGEOFFSET)
158 		    & PAGEMASK);
159 	}
160 	len = (len + PAGEOFFSET) & PAGEMASK;
161 
162 	/*
163 	 * Redzone for each side of the request. This is done to leave
164 	 * one page unmapped between segments. This is not required, but
165 	 * it's useful for the user because if their program strays across
166 	 * a segment boundary, it will catch a fault immediately making
167 	 * debugging a little easier.
168 	 */
169 	len += (2 * PAGESIZE);
170 
171 	/*
172 	 *  If the request is larger than the size of a particular
173 	 *  mmu level, then we use that level to map the request.
174 	 *  But this requires that both the virtual and the physical
175 	 *  addresses be aligned with respect to that level, so we
176 	 *  do the virtual bit of nastiness here.
177 	 *
178 	 *  For 32-bit processes, only those which have specified
179 	 *  MAP_ALIGN or an addr will be aligned on a page size > 4MB. Otherwise
180 	 *  we can potentially waste up to 256MB of the 4G process address
181 	 *  space just for alignment.
182 	 *
183 	 * XXXQ Should iterate trough hw_page_array here to catch
184 	 * all supported pagesizes
185 	 */
186 	if (p->p_model == DATAMODEL_ILP32 && ((flags & MAP_ALIGN) == 0 ||
187 	    ((uintptr_t)*addrp) != 0)) {
188 		allow_largepage_alignment = 0;
189 	}
190 	if ((mmu_page_sizes == max_mmu_page_sizes) &&
191 	    allow_largepage_alignment &&
192 		(len >= MMU_PAGESIZE256M)) {	/* 256MB mappings */
193 		align_amount = MMU_PAGESIZE256M;
194 	} else if ((mmu_page_sizes == max_mmu_page_sizes) &&
195 	    allow_largepage_alignment &&
196 		(len >= MMU_PAGESIZE32M)) {	/* 32MB mappings */
197 		align_amount = MMU_PAGESIZE32M;
198 	} else if (len >= MMU_PAGESIZE4M) {  /* 4MB mappings */
199 		align_amount = MMU_PAGESIZE4M;
200 	} else if (len >= MMU_PAGESIZE512K) { /* 512KB mappings */
201 		align_amount = MMU_PAGESIZE512K;
202 	} else if (len >= MMU_PAGESIZE64K) { /* 64KB mappings */
203 		align_amount = MMU_PAGESIZE64K;
204 	} else  {
205 		/*
206 		 * Align virtual addresses on a 64K boundary to ensure
207 		 * that ELF shared libraries are mapped with the appropriate
208 		 * alignment constraints by the run-time linker.
209 		 */
210 		align_amount = ELF_SPARC_MAXPGSZ;
211 		if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp != 0) &&
212 			((uintptr_t)*addrp < align_amount))
213 			align_amount = (uintptr_t)*addrp;
214 	}
215 
216 	/*
217 	 * 64-bit processes require 1024K alignment of ELF shared libraries.
218 	 */
219 	if (p->p_model == DATAMODEL_LP64)
220 		align_amount = MAX(align_amount, ELF_SPARCV9_MAXPGSZ);
221 #ifdef VAC
222 	if (vac && vacalign && (align_amount < shm_alignment))
223 		align_amount = shm_alignment;
224 #endif
225 
226 	if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp > align_amount)) {
227 		align_amount = (uintptr_t)*addrp;
228 	}
229 	len += align_amount;
230 
231 	/*
232 	 * Look for a large enough hole starting below the stack limit.
233 	 * After finding it, use the upper part.  Addition of PAGESIZE is
234 	 * for the redzone as described above.
235 	 */
236 	as_purge(as);
237 	if (as_gap(as, len, &base, &slen, AH_HI, NULL) == 0) {
238 		caddr_t as_addr;
239 
240 		addr = base + slen - len + PAGESIZE;
241 		as_addr = addr;
242 		/*
243 		 * Round address DOWN to the alignment amount,
244 		 * add the offset, and if this address is less
245 		 * than the original address, add alignment amount.
246 		 */
247 		addr = (caddr_t)((uintptr_t)addr & (~(align_amount - 1l)));
248 		addr += (long)(off & (align_amount - 1l));
249 		if (addr < as_addr) {
250 			addr += align_amount;
251 		}
252 
253 		ASSERT(addr <= (as_addr + align_amount));
254 		ASSERT(((uintptr_t)addr & (align_amount - 1l)) ==
255 		    ((uintptr_t)(off & (align_amount - 1l))));
256 		*addrp = addr;
257 
258 	} else {
259 		*addrp = NULL;	/* no more virtual space */
260 	}
261 }
262 
263 /*
264  * Platform-dependent page scrub call.
265  * We call hypervisor to scrub the page.
266  */
267 void
268 pagescrub(page_t *pp, uint_t off, uint_t len)
269 {
270 	uint64_t pa, length;
271 
272 	pa = (uint64_t)(pp->p_pagenum << MMU_PAGESHIFT + off);
273 	length = (uint64_t)len;
274 
275 	(void) mem_scrub(pa, length);
276 }
277 
278 void
279 sync_data_memory(caddr_t va, size_t len)
280 {
281 	/* Call memory sync function */
282 	(void) mem_sync(va, len);
283 }
284 
285 size_t
286 mmu_get_kernel_lpsize(size_t lpsize)
287 {
288 	extern int mmu_exported_pagesize_mask;
289 	uint_t tte;
290 
291 	if (lpsize == 0) {
292 		/* no setting for segkmem_lpsize in /etc/system: use default */
293 		if (mmu_exported_pagesize_mask & (1 << TTE256M)) {
294 			lpsize = MMU_PAGESIZE256M;
295 		} else if (mmu_exported_pagesize_mask & (1 << TTE4M)) {
296 			lpsize = MMU_PAGESIZE4M;
297 		} else if (mmu_exported_pagesize_mask & (1 << TTE64K)) {
298 			lpsize = MMU_PAGESIZE64K;
299 		} else {
300 			lpsize = MMU_PAGESIZE;
301 		}
302 
303 		return (lpsize);
304 	}
305 
306 	for (tte = TTE8K; tte <= TTE256M; tte++) {
307 
308 		if ((mmu_exported_pagesize_mask & (1 << tte)) == 0)
309 			continue;
310 
311 		if (lpsize == TTEBYTES(tte))
312 			return (lpsize);
313 	}
314 
315 	lpsize = TTEBYTES(TTE8K);
316 	return (lpsize);
317 }
318 
319 void
320 mmu_init_kcontext()
321 {
322 }
323 
324 /*ARGSUSED*/
325 void
326 mmu_init_kernel_pgsz(struct hat *hat)
327 {
328 }
329 
330 #define	QUANTUM_SIZE	64
331 
332 static	vmem_t	*contig_mem_slab_arena;
333 static	vmem_t	*contig_mem_arena;
334 
335 uint_t contig_mem_slab_size = MMU_PAGESIZE4M;
336 
337 static void *
338 contig_mem_span_alloc(vmem_t *vmp, size_t size, int vmflag)
339 {
340 	page_t *ppl;
341 	page_t *rootpp;
342 	caddr_t addr = NULL;
343 	pgcnt_t npages = btopr(size);
344 	page_t **ppa;
345 	int pgflags;
346 	int i = 0;
347 
348 
349 	/*
350 	 * The import request should be at least
351 	 * contig_mem_slab_size because that is the
352 	 * slab arena's quantum. The size can be
353 	 * further restricted since contiguous
354 	 * allocations larger than contig_mem_slab_size
355 	 * are not supported here.
356 	 */
357 	ASSERT(size == contig_mem_slab_size);
358 
359 	if ((addr = vmem_xalloc(vmp, size, size, 0, 0,
360 	    NULL, NULL, vmflag)) == NULL) {
361 		return (NULL);
362 	}
363 
364 	/* The address should be slab-size aligned. */
365 	ASSERT(((uintptr_t)addr & (contig_mem_slab_size - 1)) == 0);
366 
367 	if (page_resv(npages, vmflag & VM_KMFLAGS) == 0) {
368 		vmem_xfree(vmp, addr, size);
369 		return (NULL);
370 	}
371 
372 	pgflags = PG_EXCL;
373 	if ((vmflag & VM_NOSLEEP) == 0)
374 		pgflags |= PG_WAIT;
375 	if (vmflag & VM_PANIC)
376 		pgflags |= PG_PANIC;
377 	if (vmflag & VM_PUSHPAGE)
378 		pgflags |= PG_PUSHPAGE;
379 
380 	ppl = page_create_va_large(&kvp, (u_offset_t)(uintptr_t)addr, size,
381 	    pgflags, &kvseg, addr, NULL);
382 
383 	if (ppl == NULL) {
384 		vmem_xfree(vmp, addr, size);
385 		page_unresv(npages);
386 		return (NULL);
387 	}
388 
389 	rootpp = ppl;
390 	ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP);
391 	while (ppl != NULL) {
392 		page_t *pp = ppl;
393 		ppa[i++] = pp;
394 		page_sub(&ppl, pp);
395 		ASSERT(page_iolock_assert(pp));
396 		page_io_unlock(pp);
397 	}
398 
399 	/*
400 	 * Load the locked entry.  It's OK to preload the entry into
401 	 * the TSB since we now support large mappings in the kernel TSB.
402 	 */
403 	hat_memload_array(kas.a_hat, (caddr_t)rootpp->p_offset, size,
404 	    ppa, (PROT_ALL & ~PROT_USER) | HAT_NOSYNC, HAT_LOAD_LOCK);
405 
406 	for (--i; i >= 0; --i) {
407 		(void) page_pp_lock(ppa[i], 0, 1);
408 		page_unlock(ppa[i]);
409 	}
410 
411 	kmem_free(ppa, npages * sizeof (page_t *));
412 	return (addr);
413 }
414 
415 void
416 contig_mem_span_free(vmem_t *vmp, void *inaddr, size_t size)
417 {
418 	page_t *pp;
419 	caddr_t addr = inaddr;
420 	caddr_t eaddr;
421 	pgcnt_t npages = btopr(size);
422 	pgcnt_t pgs_left = npages;
423 	page_t *rootpp = NULL;
424 
425 	ASSERT(((uintptr_t)addr & (contig_mem_slab_size - 1)) == 0);
426 
427 	hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);
428 
429 	for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) {
430 		pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)addr, SE_EXCL);
431 		if (pp == NULL)
432 			panic("contig_mem_span_free: page not found");
433 
434 		ASSERT(PAGE_EXCL(pp));
435 		page_pp_unlock(pp, 0, 1);
436 
437 		if (rootpp == NULL)
438 			rootpp = pp;
439 		if (--pgs_left == 0) {
440 			/*
441 			 * similar logic to segspt_free_pages, but we know we
442 			 * have one large page.
443 			 */
444 			page_destroy_pages(rootpp);
445 		}
446 	}
447 	page_unresv(npages);
448 
449 	if (vmp != NULL)
450 		vmem_xfree(vmp, inaddr, size);
451 }
452 
453 static void *
454 contig_vmem_xalloc_aligned_wrapper(vmem_t *vmp, size_t size, int vmflag)
455 {
456 	return (vmem_xalloc(vmp, size, size, 0, 0, NULL, NULL, vmflag));
457 }
458 
459 /*
460  * conting_mem_alloc_align allocates real contiguous memory with the specified
461  * alignment upto contig_mem_slab_size. The alignment must be a power of 2.
462  */
463 void *
464 contig_mem_alloc_align(size_t size, size_t align)
465 {
466 	ASSERT(align <= contig_mem_slab_size);
467 
468 	if ((align & (align - 1)) != 0)
469 		return (NULL);
470 
471 	return (vmem_xalloc(contig_mem_arena, size, align, 0, 0,
472 	    NULL, NULL, VM_NOSLEEP));
473 }
474 
475 /*
476  * Allocates size aligned contiguous memory upto contig_mem_slab_size.
477  * Size must be a power of 2.
478  */
479 void *
480 contig_mem_alloc(size_t size)
481 {
482 	ASSERT((size & (size - 1)) == 0);
483 	return (contig_mem_alloc_align(size, size));
484 }
485 
486 void
487 contig_mem_free(void *vaddr, size_t size)
488 {
489 	vmem_xfree(contig_mem_arena, vaddr, size);
490 }
491 
492 /*
493  * We create a set of stacked vmem arenas to enable us to
494  * allocate large >PAGESIZE chucks of contiguous Real Address space
495  * This is  what the Dynamics TSB support does for TSBs.
496  * The contig_mem_arena import functions are exactly the same as the
497  * TSB kmem_default arena import functions.
498  */
499 void
500 contig_mem_init(void)
501 {
502 
503 	contig_mem_slab_arena = vmem_create("contig_mem_slab_arena", NULL, 0,
504 	    contig_mem_slab_size, contig_vmem_xalloc_aligned_wrapper,
505 	    vmem_xfree, heap_arena, 0, VM_SLEEP);
506 
507 	contig_mem_arena = vmem_create("contig_mem_arena", NULL, 0,
508 	    QUANTUM_SIZE, contig_mem_span_alloc, contig_mem_span_free,
509 	    contig_mem_slab_arena, 0, VM_SLEEP | VM_BESTFIT);
510 
511 }
512 
513 
514 static uint_t sp_color_stride = 16;
515 static uint_t sp_color_mask = 0x1f;
516 static uint_t sp_current_color = (uint_t)-1;
517 
518 size_t
519 exec_get_spslew(void)
520 {
521 	uint_t spcolor = atomic_inc_32_nv(&sp_current_color);
522 	return ((size_t)((spcolor & sp_color_mask) * SA(sp_color_stride)));
523 }
524