xref: /titanic_41/usr/src/uts/common/vm/seg_kmem.c (revision 9c9af2590af49bb395bc8d2eace0f2d4ea16d165)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/types.h>
27 #include <sys/t_lock.h>
28 #include <sys/param.h>
29 #include <sys/sysmacros.h>
30 #include <sys/tuneable.h>
31 #include <sys/systm.h>
32 #include <sys/vm.h>
33 #include <sys/kmem.h>
34 #include <sys/vmem.h>
35 #include <sys/mman.h>
36 #include <sys/cmn_err.h>
37 #include <sys/debug.h>
38 #include <sys/dumphdr.h>
39 #include <sys/bootconf.h>
40 #include <sys/lgrp.h>
41 #include <vm/seg_kmem.h>
42 #include <vm/hat.h>
43 #include <vm/page.h>
44 #include <vm/vm_dep.h>
45 #include <vm/faultcode.h>
46 #include <sys/promif.h>
47 #include <vm/seg_kp.h>
48 #include <sys/bitmap.h>
49 #include <sys/mem_cage.h>
50 
51 /*
52  * seg_kmem is the primary kernel memory segment driver.  It
53  * maps the kernel heap [kernelheap, ekernelheap), module text,
54  * and all memory which was allocated before the VM was initialized
55  * into kas.
56  *
57  * Pages which belong to seg_kmem are hashed into &kvp vnode at
58  * an offset equal to (u_offset_t)virt_addr, and have p_lckcnt >= 1.
59  * They must never be paged out since segkmem_fault() is a no-op to
60  * prevent recursive faults.
61  *
62  * Currently, seg_kmem pages are sharelocked (p_sharelock == 1) on
63  * __x86 and are unlocked (p_sharelock == 0) on __sparc.  Once __x86
64  * supports relocation the #ifdef kludges can be removed.
65  *
66  * seg_kmem pages may be subject to relocation by page_relocate(),
67  * provided that the HAT supports it; if this is so, segkmem_reloc
68  * will be set to a nonzero value. All boot time allocated memory as
69  * well as static memory is considered off limits to relocation.
70  * Pages are "relocatable" if p_state does not have P_NORELOC set, so
71  * we request P_NORELOC pages for memory that isn't safe to relocate.
72  *
73  * The kernel heap is logically divided up into four pieces:
74  *
75  *   heap32_arena is for allocations that require 32-bit absolute
76  *   virtual addresses (e.g. code that uses 32-bit pointers/offsets).
77  *
78  *   heap_core is for allocations that require 2GB *relative*
79  *   offsets; in other words all memory from heap_core is within
80  *   2GB of all other memory from the same arena. This is a requirement
81  *   of the addressing modes of some processors in supervisor code.
82  *
83  *   heap_arena is the general heap arena.
84  *
85  *   static_arena is the static memory arena.  Allocations from it
86  *   are not subject to relocation so it is safe to use the memory
87  *   physical address as well as the virtual address (e.g. the VA to
88  *   PA translations are static).  Caches may import from static_arena;
89  *   all other static memory allocations should use static_alloc_arena.
90  *
91  * On some platforms which have limited virtual address space, seg_kmem
92  * may share [kernelheap, ekernelheap) with seg_kp; if this is so,
93  * segkp_bitmap is non-NULL, and each bit represents a page of virtual
94  * address space which is actually seg_kp mapped.
95  */
96 
97 extern ulong_t *segkp_bitmap;   /* Is set if segkp is from the kernel heap */
98 
99 char *kernelheap;		/* start of primary kernel heap */
100 char *ekernelheap;		/* end of primary kernel heap */
101 struct seg kvseg;		/* primary kernel heap segment */
102 struct seg kvseg_core;		/* "core" kernel heap segment */
103 struct seg kzioseg;		/* Segment for zio mappings */
104 vmem_t *heap_arena;		/* primary kernel heap arena */
105 vmem_t *heap_core_arena;	/* core kernel heap arena */
106 char *heap_core_base;		/* start of core kernel heap arena */
107 char *heap_lp_base;		/* start of kernel large page heap arena */
108 char *heap_lp_end;		/* end of kernel large page heap arena */
109 vmem_t *hat_memload_arena;	/* HAT translation data */
110 struct seg kvseg32;		/* 32-bit kernel heap segment */
111 vmem_t *heap32_arena;		/* 32-bit kernel heap arena */
112 vmem_t *heaptext_arena;		/* heaptext arena */
113 struct as kas;			/* kernel address space */
114 struct vnode kvp;		/* vnode for all segkmem pages */
115 struct vnode zvp;		/* vnode for zfs pages */
116 int segkmem_reloc;		/* enable/disable relocatable segkmem pages */
117 vmem_t *static_arena;		/* arena for caches to import static memory */
118 vmem_t *static_alloc_arena;	/* arena for allocating static memory */
119 vmem_t *zio_arena = NULL;	/* arena for allocating zio memory */
120 vmem_t *zio_alloc_arena = NULL;	/* arena for allocating zio memory */
121 
122 /*
123  * seg_kmem driver can map part of the kernel heap with large pages.
124  * Currently this functionality is implemented for sparc platforms only.
125  *
126  * The large page size "segkmem_lpsize" for kernel heap is selected in the
127  * platform specific code. It can also be modified via /etc/system file.
128  * Setting segkmem_lpsize to PAGESIZE in /etc/system disables usage of large
129  * pages for kernel heap. "segkmem_lpshift" is adjusted appropriately to
130  * match segkmem_lpsize.
131  *
132  * At boot time we carve from kernel heap arena a range of virtual addresses
133  * that will be used for large page mappings. This range [heap_lp_base,
134  * heap_lp_end) is set up as a separate vmem arena - "heap_lp_arena". We also
135  * create "kmem_lp_arena" that caches memory already backed up by large
136  * pages. kmem_lp_arena imports virtual segments from heap_lp_arena.
137  */
138 
139 size_t	segkmem_lpsize;
140 static  uint_t	segkmem_lpshift = PAGESHIFT;
141 int	segkmem_lpszc = 0;
142 
143 size_t  segkmem_kmemlp_quantum = 0x400000;	/* 4MB */
144 size_t  segkmem_heaplp_quantum;
145 vmem_t *heap_lp_arena;
146 static  vmem_t *kmem_lp_arena;
147 static  vmem_t *segkmem_ppa_arena;
148 static	segkmem_lpcb_t segkmem_lpcb;
149 
150 /*
151  * We use "segkmem_kmemlp_max" to limit the total amount of physical memory
152  * consumed by the large page heap. By default this parameter is set to 1/8 of
153  * physmem but can be adjusted through /etc/system either directly or
154  * indirectly by setting "segkmem_kmemlp_pcnt" to the percent of physmem
155  * we allow for large page heap.
156  */
157 size_t  segkmem_kmemlp_max;
158 static  uint_t  segkmem_kmemlp_pcnt;
159 
160 /*
161  * Getting large pages for kernel heap could be problematic due to
162  * physical memory fragmentation. That's why we allow to preallocate
163  * "segkmem_kmemlp_min" bytes at boot time.
164  */
165 static  size_t	segkmem_kmemlp_min;
166 
167 /*
168  * Throttling is used to avoid expensive tries to allocate large pages
169  * for kernel heap when a lot of succesive attempts to do so fail.
170  */
171 static  ulong_t segkmem_lpthrottle_max = 0x400000;
172 static  ulong_t segkmem_lpthrottle_start = 0x40;
173 static  ulong_t segkmem_use_lpthrottle = 1;
174 
175 /*
176  * Freed pages accumulate on a garbage list until segkmem is ready,
177  * at which point we call segkmem_gc() to free it all.
178  */
179 typedef struct segkmem_gc_list {
180 	struct segkmem_gc_list	*gc_next;
181 	vmem_t			*gc_arena;
182 	size_t			gc_size;
183 } segkmem_gc_list_t;
184 
185 static segkmem_gc_list_t *segkmem_gc_list;
186 
187 /*
188  * Allocations from the hat_memload arena add VM_MEMLOAD to their
189  * vmflags so that segkmem_xalloc() can inform the hat layer that it needs
190  * to take steps to prevent infinite recursion.  HAT allocations also
191  * must be non-relocatable to prevent recursive page faults.
192  */
193 static void *
194 hat_memload_alloc(vmem_t *vmp, size_t size, int flags)
195 {
196 	flags |= (VM_MEMLOAD | VM_NORELOC);
197 	return (segkmem_alloc(vmp, size, flags));
198 }
199 
200 /*
201  * Allocations from static_arena arena (or any other arena that uses
202  * segkmem_alloc_permanent()) require non-relocatable (permanently
203  * wired) memory pages, since these pages are referenced by physical
204  * as well as virtual address.
205  */
206 void *
207 segkmem_alloc_permanent(vmem_t *vmp, size_t size, int flags)
208 {
209 	return (segkmem_alloc(vmp, size, flags | VM_NORELOC));
210 }
211 
212 /*
213  * Initialize kernel heap boundaries.
214  */
215 void
216 kernelheap_init(
217 	void *heap_start,
218 	void *heap_end,
219 	char *first_avail,
220 	void *core_start,
221 	void *core_end)
222 {
223 	uintptr_t textbase;
224 	size_t core_size;
225 	size_t heap_size;
226 	vmem_t *heaptext_parent;
227 	size_t	heap_lp_size = 0;
228 #ifdef __sparc
229 	size_t kmem64_sz = kmem64_aligned_end - kmem64_base;
230 #endif	/* __sparc */
231 
232 	kernelheap = heap_start;
233 	ekernelheap = heap_end;
234 
235 #ifdef __sparc
236 	heap_lp_size = (((uintptr_t)heap_end - (uintptr_t)heap_start) / 4);
237 	/*
238 	 * Bias heap_lp start address by kmem64_sz to reduce collisions
239 	 * in 4M kernel TSB between kmem64 area and heap_lp
240 	 */
241 	kmem64_sz = P2ROUNDUP(kmem64_sz, MMU_PAGESIZE256M);
242 	if (kmem64_sz <= heap_lp_size / 2)
243 		heap_lp_size -= kmem64_sz;
244 	heap_lp_base = ekernelheap - heap_lp_size;
245 	heap_lp_end = heap_lp_base + heap_lp_size;
246 #endif	/* __sparc */
247 
248 	/*
249 	 * If this platform has a 'core' heap area, then the space for
250 	 * overflow module text should be carved out of the end of that
251 	 * heap.  Otherwise, it gets carved out of the general purpose
252 	 * heap.
253 	 */
254 	core_size = (uintptr_t)core_end - (uintptr_t)core_start;
255 	if (core_size > 0) {
256 		ASSERT(core_size >= HEAPTEXT_SIZE);
257 		textbase = (uintptr_t)core_end - HEAPTEXT_SIZE;
258 		core_size -= HEAPTEXT_SIZE;
259 	}
260 #ifndef __sparc
261 	else {
262 		ekernelheap -= HEAPTEXT_SIZE;
263 		textbase = (uintptr_t)ekernelheap;
264 	}
265 #endif
266 
267 	heap_size = (uintptr_t)ekernelheap - (uintptr_t)kernelheap;
268 	heap_arena = vmem_init("heap", kernelheap, heap_size, PAGESIZE,
269 	    segkmem_alloc, segkmem_free);
270 
271 	if (core_size > 0) {
272 		heap_core_arena = vmem_create("heap_core", core_start,
273 		    core_size, PAGESIZE, NULL, NULL, NULL, 0, VM_SLEEP);
274 		heap_core_base = core_start;
275 	} else {
276 		heap_core_arena = heap_arena;
277 		heap_core_base = kernelheap;
278 	}
279 
280 	/*
281 	 * reserve space for the large page heap. If large pages for kernel
282 	 * heap is enabled large page heap arean will be created later in the
283 	 * boot sequence in segkmem_heap_lp_init(). Otherwise the allocated
284 	 * range will be returned back to the heap_arena.
285 	 */
286 	if (heap_lp_size) {
287 		(void) vmem_xalloc(heap_arena, heap_lp_size, PAGESIZE, 0, 0,
288 		    heap_lp_base, heap_lp_end,
289 		    VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
290 	}
291 
292 	/*
293 	 * Remove the already-spoken-for memory range [kernelheap, first_avail).
294 	 */
295 	(void) vmem_xalloc(heap_arena, first_avail - kernelheap, PAGESIZE,
296 	    0, 0, kernelheap, first_avail, VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
297 
298 #ifdef __sparc
299 	heap32_arena = vmem_create("heap32", (void *)SYSBASE32,
300 	    SYSLIMIT32 - SYSBASE32 - HEAPTEXT_SIZE, PAGESIZE, NULL,
301 	    NULL, NULL, 0, VM_SLEEP);
302 
303 	textbase = SYSLIMIT32 - HEAPTEXT_SIZE;
304 	heaptext_parent = NULL;
305 #else	/* __sparc */
306 	heap32_arena = heap_core_arena;
307 	heaptext_parent = heap_core_arena;
308 #endif	/* __sparc */
309 
310 	heaptext_arena = vmem_create("heaptext", (void *)textbase,
311 	    HEAPTEXT_SIZE, PAGESIZE, NULL, NULL, heaptext_parent, 0, VM_SLEEP);
312 
313 	/*
314 	 * Create a set of arenas for memory with static translations
315 	 * (e.g. VA -> PA translations cannot change).  Since using
316 	 * kernel pages by physical address implies it isn't safe to
317 	 * walk across page boundaries, the static_arena quantum must
318 	 * be PAGESIZE.  Any kmem caches that require static memory
319 	 * should source from static_arena, while direct allocations
320 	 * should only use static_alloc_arena.
321 	 */
322 	static_arena = vmem_create("static", NULL, 0, PAGESIZE,
323 	    segkmem_alloc_permanent, segkmem_free, heap_arena, 0, VM_SLEEP);
324 	static_alloc_arena = vmem_create("static_alloc", NULL, 0,
325 	    sizeof (uint64_t), vmem_alloc, vmem_free, static_arena,
326 	    0, VM_SLEEP);
327 
328 	/*
329 	 * Create an arena for translation data (ptes, hmes, or hblks).
330 	 * We need an arena for this because hat_memload() is essential
331 	 * to vmem_populate() (see comments in common/os/vmem.c).
332 	 *
333 	 * Note: any kmem cache that allocates from hat_memload_arena
334 	 * must be created as a KMC_NOHASH cache (i.e. no external slab
335 	 * and bufctl structures to allocate) so that slab creation doesn't
336 	 * require anything more than a single vmem_alloc().
337 	 */
338 	hat_memload_arena = vmem_create("hat_memload", NULL, 0, PAGESIZE,
339 	    hat_memload_alloc, segkmem_free, heap_arena, 0,
340 	    VM_SLEEP | VMC_POPULATOR);
341 }
342 
343 void
344 boot_mapin(caddr_t addr, size_t size)
345 {
346 	caddr_t	 eaddr;
347 	page_t	*pp;
348 	pfn_t	 pfnum;
349 
350 	if (page_resv(btop(size), KM_NOSLEEP) == 0)
351 		panic("boot_mapin: page_resv failed");
352 
353 	for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) {
354 		pfnum = va_to_pfn(addr);
355 		if (pfnum == PFN_INVALID)
356 			continue;
357 		if ((pp = page_numtopp_nolock(pfnum)) == NULL)
358 			panic("boot_mapin(): No pp for pfnum = %lx", pfnum);
359 
360 		/*
361 		 * must break up any large pages that may have constituent
362 		 * pages being utilized for BOP_ALLOC()'s before calling
363 		 * page_numtopp().The locking code (ie. page_reclaim())
364 		 * can't handle them
365 		 */
366 		if (pp->p_szc != 0)
367 			page_boot_demote(pp);
368 
369 		pp = page_numtopp(pfnum, SE_EXCL);
370 		if (pp == NULL || PP_ISFREE(pp))
371 			panic("boot_alloc: pp is NULL or free");
372 
373 		/*
374 		 * If the cage is on but doesn't yet contain this page,
375 		 * mark it as non-relocatable.
376 		 */
377 		if (kcage_on && !PP_ISNORELOC(pp)) {
378 			PP_SETNORELOC(pp);
379 			PLCNT_XFER_NORELOC(pp);
380 		}
381 
382 		(void) page_hashin(pp, &kvp, (u_offset_t)(uintptr_t)addr, NULL);
383 		pp->p_lckcnt = 1;
384 #if defined(__x86)
385 		page_downgrade(pp);
386 #else
387 		page_unlock(pp);
388 #endif
389 	}
390 }
391 
392 /*
393  * Get pages from boot and hash them into the kernel's vp.
394  * Used after page structs have been allocated, but before segkmem is ready.
395  */
396 void *
397 boot_alloc(void *inaddr, size_t size, uint_t align)
398 {
399 	caddr_t addr = inaddr;
400 
401 	if (bootops == NULL)
402 		prom_panic("boot_alloc: attempt to allocate memory after "
403 		    "BOP_GONE");
404 
405 	size = ptob(btopr(size));
406 #ifdef __sparc
407 	if (bop_alloc_chunk(addr, size, align) != (caddr_t)addr)
408 		panic("boot_alloc: bop_alloc_chunk failed");
409 #else
410 	if (BOP_ALLOC(bootops, addr, size, align) != addr)
411 		panic("boot_alloc: BOP_ALLOC failed");
412 #endif
413 	boot_mapin((caddr_t)addr, size);
414 	return (addr);
415 }
416 
417 static void
418 segkmem_badop()
419 {
420 	panic("segkmem_badop");
421 }
422 
423 #define	SEGKMEM_BADOP(t)	(t(*)())segkmem_badop
424 
425 /*ARGSUSED*/
426 static faultcode_t
427 segkmem_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t size,
428 	enum fault_type type, enum seg_rw rw)
429 {
430 	pgcnt_t npages;
431 	spgcnt_t pg;
432 	page_t *pp;
433 	struct vnode *vp = seg->s_data;
434 
435 	ASSERT(RW_READ_HELD(&seg->s_as->a_lock));
436 
437 	if (seg->s_as != &kas || size > seg->s_size ||
438 	    addr < seg->s_base || addr + size > seg->s_base + seg->s_size)
439 		panic("segkmem_fault: bad args");
440 
441 	/*
442 	 * If it is one of segkp pages, call segkp_fault.
443 	 */
444 	if (segkp_bitmap && seg == &kvseg &&
445 	    BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
446 		return (SEGOP_FAULT(hat, segkp, addr, size, type, rw));
447 
448 	if (rw != S_READ && rw != S_WRITE && rw != S_OTHER)
449 		return (FC_NOSUPPORT);
450 
451 	npages = btopr(size);
452 
453 	switch (type) {
454 	case F_SOFTLOCK:	/* lock down already-loaded translations */
455 		for (pg = 0; pg < npages; pg++) {
456 			pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr,
457 			    SE_SHARED);
458 			if (pp == NULL) {
459 				/*
460 				 * Hmm, no page. Does a kernel mapping
461 				 * exist for it?
462 				 */
463 				if (!hat_probe(kas.a_hat, addr)) {
464 					addr -= PAGESIZE;
465 					while (--pg >= 0) {
466 						pp = page_find(vp, (u_offset_t)
467 						    (uintptr_t)addr);
468 						if (pp)
469 							page_unlock(pp);
470 						addr -= PAGESIZE;
471 					}
472 					return (FC_NOMAP);
473 				}
474 			}
475 			addr += PAGESIZE;
476 		}
477 		if (rw == S_OTHER)
478 			hat_reserve(seg->s_as, addr, size);
479 		return (0);
480 	case F_SOFTUNLOCK:
481 		while (npages--) {
482 			pp = page_find(vp, (u_offset_t)(uintptr_t)addr);
483 			if (pp)
484 				page_unlock(pp);
485 			addr += PAGESIZE;
486 		}
487 		return (0);
488 	default:
489 		return (FC_NOSUPPORT);
490 	}
491 	/*NOTREACHED*/
492 }
493 
494 static int
495 segkmem_setprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
496 {
497 	ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
498 
499 	if (seg->s_as != &kas || size > seg->s_size ||
500 	    addr < seg->s_base || addr + size > seg->s_base + seg->s_size)
501 		panic("segkmem_setprot: bad args");
502 
503 	/*
504 	 * If it is one of segkp pages, call segkp.
505 	 */
506 	if (segkp_bitmap && seg == &kvseg &&
507 	    BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
508 		return (SEGOP_SETPROT(segkp, addr, size, prot));
509 
510 	if (prot == 0)
511 		hat_unload(kas.a_hat, addr, size, HAT_UNLOAD);
512 	else
513 		hat_chgprot(kas.a_hat, addr, size, prot);
514 	return (0);
515 }
516 
517 /*
518  * This is a dummy segkmem function overloaded to call segkp
519  * when segkp is under the heap.
520  */
521 /* ARGSUSED */
522 static int
523 segkmem_checkprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
524 {
525 	ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
526 
527 	if (seg->s_as != &kas)
528 		segkmem_badop();
529 
530 	/*
531 	 * If it is one of segkp pages, call into segkp.
532 	 */
533 	if (segkp_bitmap && seg == &kvseg &&
534 	    BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
535 		return (SEGOP_CHECKPROT(segkp, addr, size, prot));
536 
537 	segkmem_badop();
538 	return (0);
539 }
540 
541 /*
542  * This is a dummy segkmem function overloaded to call segkp
543  * when segkp is under the heap.
544  */
545 /* ARGSUSED */
546 static int
547 segkmem_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
548 {
549 	ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
550 
551 	if (seg->s_as != &kas)
552 		segkmem_badop();
553 
554 	/*
555 	 * If it is one of segkp pages, call into segkp.
556 	 */
557 	if (segkp_bitmap && seg == &kvseg &&
558 	    BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
559 		return (SEGOP_KLUSTER(segkp, addr, delta));
560 
561 	segkmem_badop();
562 	return (0);
563 }
564 
565 static void
566 segkmem_xdump_range(void *arg, void *start, size_t size)
567 {
568 	struct as *as = arg;
569 	caddr_t addr = start;
570 	caddr_t addr_end = addr + size;
571 
572 	while (addr < addr_end) {
573 		pfn_t pfn = hat_getpfnum(kas.a_hat, addr);
574 		if (pfn != PFN_INVALID && pfn <= physmax && pf_is_memory(pfn))
575 			dump_addpage(as, addr, pfn);
576 		addr += PAGESIZE;
577 		dump_timeleft = dump_timeout;
578 	}
579 }
580 
581 static void
582 segkmem_dump_range(void *arg, void *start, size_t size)
583 {
584 	caddr_t addr = start;
585 	caddr_t addr_end = addr + size;
586 
587 	/*
588 	 * If we are about to start dumping the range of addresses we
589 	 * carved out of the kernel heap for the large page heap walk
590 	 * heap_lp_arena to find what segments are actually populated
591 	 */
592 	if (SEGKMEM_USE_LARGEPAGES &&
593 	    addr == heap_lp_base && addr_end == heap_lp_end &&
594 	    vmem_size(heap_lp_arena, VMEM_ALLOC) < size) {
595 		vmem_walk(heap_lp_arena, VMEM_ALLOC | VMEM_REENTRANT,
596 		    segkmem_xdump_range, arg);
597 	} else {
598 		segkmem_xdump_range(arg, start, size);
599 	}
600 }
601 
602 static void
603 segkmem_dump(struct seg *seg)
604 {
605 	/*
606 	 * The kernel's heap_arena (represented by kvseg) is a very large
607 	 * VA space, most of which is typically unused.  To speed up dumping
608 	 * we use vmem_walk() to quickly find the pieces of heap_arena that
609 	 * are actually in use.  We do the same for heap32_arena and
610 	 * heap_core.
611 	 *
612 	 * We specify VMEM_REENTRANT to vmem_walk() because dump_addpage()
613 	 * may ultimately need to allocate memory.  Reentrant walks are
614 	 * necessarily imperfect snapshots.  The kernel heap continues
615 	 * to change during a live crash dump, for example.  For a normal
616 	 * crash dump, however, we know that there won't be any other threads
617 	 * messing with the heap.  Therefore, at worst, we may fail to dump
618 	 * the pages that get allocated by the act of dumping; but we will
619 	 * always dump every page that was allocated when the walk began.
620 	 *
621 	 * The other segkmem segments are dense (fully populated), so there's
622 	 * no need to use this technique when dumping them.
623 	 *
624 	 * Note: when adding special dump handling for any new sparsely-
625 	 * populated segments, be sure to add similar handling to the ::kgrep
626 	 * code in mdb.
627 	 */
628 	if (seg == &kvseg) {
629 		vmem_walk(heap_arena, VMEM_ALLOC | VMEM_REENTRANT,
630 		    segkmem_dump_range, seg->s_as);
631 #ifndef __sparc
632 		vmem_walk(heaptext_arena, VMEM_ALLOC | VMEM_REENTRANT,
633 		    segkmem_dump_range, seg->s_as);
634 #endif
635 	} else if (seg == &kvseg_core) {
636 		vmem_walk(heap_core_arena, VMEM_ALLOC | VMEM_REENTRANT,
637 		    segkmem_dump_range, seg->s_as);
638 	} else if (seg == &kvseg32) {
639 		vmem_walk(heap32_arena, VMEM_ALLOC | VMEM_REENTRANT,
640 		    segkmem_dump_range, seg->s_as);
641 		vmem_walk(heaptext_arena, VMEM_ALLOC | VMEM_REENTRANT,
642 		    segkmem_dump_range, seg->s_as);
643 	} else if (seg == &kzioseg) {
644 		/*
645 		 * We don't want to dump pages attached to kzioseg since they
646 		 * contain file data from ZFS.  If this page's segment is
647 		 * kzioseg return instead of writing it to the dump device.
648 		 */
649 		return;
650 	} else {
651 		segkmem_dump_range(seg->s_as, seg->s_base, seg->s_size);
652 	}
653 }
654 
655 /*
656  * lock/unlock kmem pages over a given range [addr, addr+len).
657  * Returns a shadow list of pages in ppp. If there are holes
658  * in the range (e.g. some of the kernel mappings do not have
659  * underlying page_ts) returns ENOTSUP so that as_pagelock()
660  * will handle the range via as_fault(F_SOFTLOCK).
661  */
662 /*ARGSUSED*/
663 static int
664 segkmem_pagelock(struct seg *seg, caddr_t addr, size_t len,
665 	page_t ***ppp, enum lock_type type, enum seg_rw rw)
666 {
667 	page_t **pplist, *pp;
668 	pgcnt_t npages;
669 	spgcnt_t pg;
670 	size_t nb;
671 	struct vnode *vp = seg->s_data;
672 
673 	ASSERT(ppp != NULL);
674 
675 	/*
676 	 * If it is one of segkp pages, call into segkp.
677 	 */
678 	if (segkp_bitmap && seg == &kvseg &&
679 	    BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
680 		return (SEGOP_PAGELOCK(segkp, addr, len, ppp, type, rw));
681 
682 	npages = btopr(len);
683 	nb = sizeof (page_t *) * npages;
684 
685 	if (type == L_PAGEUNLOCK) {
686 		pplist = *ppp;
687 		ASSERT(pplist != NULL);
688 
689 		for (pg = 0; pg < npages; pg++) {
690 			pp = pplist[pg];
691 			page_unlock(pp);
692 		}
693 		kmem_free(pplist, nb);
694 		return (0);
695 	}
696 
697 	ASSERT(type == L_PAGELOCK);
698 
699 	pplist = kmem_alloc(nb, KM_NOSLEEP);
700 	if (pplist == NULL) {
701 		*ppp = NULL;
702 		return (ENOTSUP);	/* take the slow path */
703 	}
704 
705 	for (pg = 0; pg < npages; pg++) {
706 		pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr, SE_SHARED);
707 		if (pp == NULL) {
708 			while (--pg >= 0)
709 				page_unlock(pplist[pg]);
710 			kmem_free(pplist, nb);
711 			*ppp = NULL;
712 			return (ENOTSUP);
713 		}
714 		pplist[pg] = pp;
715 		addr += PAGESIZE;
716 	}
717 
718 	*ppp = pplist;
719 	return (0);
720 }
721 
722 /*
723  * This is a dummy segkmem function overloaded to call segkp
724  * when segkp is under the heap.
725  */
726 /* ARGSUSED */
727 static int
728 segkmem_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
729 {
730 	ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
731 
732 	if (seg->s_as != &kas)
733 		segkmem_badop();
734 
735 	/*
736 	 * If it is one of segkp pages, call into segkp.
737 	 */
738 	if (segkp_bitmap && seg == &kvseg &&
739 	    BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
740 		return (SEGOP_GETMEMID(segkp, addr, memidp));
741 
742 	segkmem_badop();
743 	return (0);
744 }
745 
746 /*ARGSUSED*/
747 static lgrp_mem_policy_info_t *
748 segkmem_getpolicy(struct seg *seg, caddr_t addr)
749 {
750 	return (NULL);
751 }
752 
753 /*ARGSUSED*/
754 static int
755 segkmem_capable(struct seg *seg, segcapability_t capability)
756 {
757 	if (capability == S_CAPABILITY_NOMINFLT)
758 		return (1);
759 	return (0);
760 }
761 
762 static struct seg_ops segkmem_ops = {
763 	SEGKMEM_BADOP(int),		/* dup */
764 	SEGKMEM_BADOP(int),		/* unmap */
765 	SEGKMEM_BADOP(void),		/* free */
766 	segkmem_fault,
767 	SEGKMEM_BADOP(faultcode_t),	/* faulta */
768 	segkmem_setprot,
769 	segkmem_checkprot,
770 	segkmem_kluster,
771 	SEGKMEM_BADOP(size_t),		/* swapout */
772 	SEGKMEM_BADOP(int),		/* sync */
773 	SEGKMEM_BADOP(size_t),		/* incore */
774 	SEGKMEM_BADOP(int),		/* lockop */
775 	SEGKMEM_BADOP(int),		/* getprot */
776 	SEGKMEM_BADOP(u_offset_t),	/* getoffset */
777 	SEGKMEM_BADOP(int),		/* gettype */
778 	SEGKMEM_BADOP(int),		/* getvp */
779 	SEGKMEM_BADOP(int),		/* advise */
780 	segkmem_dump,
781 	segkmem_pagelock,
782 	SEGKMEM_BADOP(int),		/* setpgsz */
783 	segkmem_getmemid,
784 	segkmem_getpolicy,		/* getpolicy */
785 	segkmem_capable,		/* capable */
786 };
787 
788 int
789 segkmem_zio_create(struct seg *seg)
790 {
791 	ASSERT(seg->s_as == &kas && RW_WRITE_HELD(&kas.a_lock));
792 	seg->s_ops = &segkmem_ops;
793 	seg->s_data = &zvp;
794 	kas.a_size += seg->s_size;
795 	return (0);
796 }
797 
798 int
799 segkmem_create(struct seg *seg)
800 {
801 	ASSERT(seg->s_as == &kas && RW_WRITE_HELD(&kas.a_lock));
802 	seg->s_ops = &segkmem_ops;
803 	seg->s_data = &kvp;
804 	kas.a_size += seg->s_size;
805 	return (0);
806 }
807 
808 /*ARGSUSED*/
809 page_t *
810 segkmem_page_create(void *addr, size_t size, int vmflag, void *arg)
811 {
812 	struct seg kseg;
813 	int pgflags;
814 	struct vnode *vp = arg;
815 
816 	if (vp == NULL)
817 		vp = &kvp;
818 
819 	kseg.s_as = &kas;
820 	pgflags = PG_EXCL;
821 
822 	if (segkmem_reloc == 0 || (vmflag & VM_NORELOC))
823 		pgflags |= PG_NORELOC;
824 	if ((vmflag & VM_NOSLEEP) == 0)
825 		pgflags |= PG_WAIT;
826 	if (vmflag & VM_PANIC)
827 		pgflags |= PG_PANIC;
828 	if (vmflag & VM_PUSHPAGE)
829 		pgflags |= PG_PUSHPAGE;
830 
831 	return (page_create_va(vp, (u_offset_t)(uintptr_t)addr, size,
832 	    pgflags, &kseg, addr));
833 }
834 
835 /*
836  * Allocate pages to back the virtual address range [addr, addr + size).
837  * If addr is NULL, allocate the virtual address space as well.
838  */
839 void *
840 segkmem_xalloc(vmem_t *vmp, void *inaddr, size_t size, int vmflag, uint_t attr,
841 	page_t *(*page_create_func)(void *, size_t, int, void *), void *pcarg)
842 {
843 	page_t *ppl;
844 	caddr_t addr = inaddr;
845 	pgcnt_t npages = btopr(size);
846 	int allocflag;
847 
848 	if (inaddr == NULL && (addr = vmem_alloc(vmp, size, vmflag)) == NULL)
849 		return (NULL);
850 
851 	ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0);
852 
853 	if (page_resv(npages, vmflag & VM_KMFLAGS) == 0) {
854 		if (inaddr == NULL)
855 			vmem_free(vmp, addr, size);
856 		return (NULL);
857 	}
858 
859 	ppl = page_create_func(addr, size, vmflag, pcarg);
860 	if (ppl == NULL) {
861 		if (inaddr == NULL)
862 			vmem_free(vmp, addr, size);
863 		page_unresv(npages);
864 		return (NULL);
865 	}
866 
867 	/*
868 	 * Under certain conditions, we need to let the HAT layer know
869 	 * that it cannot safely allocate memory.  Allocations from
870 	 * the hat_memload vmem arena always need this, to prevent
871 	 * infinite recursion.
872 	 *
873 	 * In addition, the x86 hat cannot safely do memory
874 	 * allocations while in vmem_populate(), because there
875 	 * is no simple bound on its usage.
876 	 */
877 	if (vmflag & VM_MEMLOAD)
878 		allocflag = HAT_NO_KALLOC;
879 #if defined(__x86)
880 	else if (vmem_is_populator())
881 		allocflag = HAT_NO_KALLOC;
882 #endif
883 	else
884 		allocflag = 0;
885 
886 	/*
887 	 * Support for non-coherent I-cache.
888 	 * Set HAT_LOAD_TEXT to override soft execute.
889 	 */
890 	if (attr & HAT_ATTR_TEXT) {
891 		attr &= ~HAT_ATTR_TEXT;
892 		allocflag |= HAT_LOAD_TEXT;
893 	}
894 
895 	while (ppl != NULL) {
896 		page_t *pp = ppl;
897 		page_sub(&ppl, pp);
898 		ASSERT(page_iolock_assert(pp));
899 		ASSERT(PAGE_EXCL(pp));
900 		page_io_unlock(pp);
901 		hat_memload(kas.a_hat, (caddr_t)(uintptr_t)pp->p_offset, pp,
902 		    (PROT_ALL & ~PROT_USER) | HAT_NOSYNC | attr,
903 		    HAT_LOAD_LOCK | allocflag);
904 		pp->p_lckcnt = 1;
905 #if defined(__x86)
906 		page_downgrade(pp);
907 #else
908 		if (vmflag & SEGKMEM_SHARELOCKED)
909 			page_downgrade(pp);
910 		else
911 			page_unlock(pp);
912 #endif
913 	}
914 
915 	return (addr);
916 }
917 
918 static void *
919 segkmem_alloc_vn(vmem_t *vmp, size_t size, int vmflag, struct vnode *vp)
920 {
921 	void *addr;
922 	segkmem_gc_list_t *gcp, **prev_gcpp;
923 
924 	ASSERT(vp != NULL);
925 
926 	if (kvseg.s_base == NULL) {
927 #ifndef __sparc
928 		if (bootops->bsys_alloc == NULL)
929 			halt("Memory allocation between bop_alloc() and "
930 			    "kmem_alloc().\n");
931 #endif
932 
933 		/*
934 		 * There's not a lot of memory to go around during boot,
935 		 * so recycle it if we can.
936 		 */
937 		for (prev_gcpp = &segkmem_gc_list; (gcp = *prev_gcpp) != NULL;
938 		    prev_gcpp = &gcp->gc_next) {
939 			if (gcp->gc_arena == vmp && gcp->gc_size == size) {
940 				*prev_gcpp = gcp->gc_next;
941 				return (gcp);
942 			}
943 		}
944 
945 		addr = vmem_alloc(vmp, size, vmflag | VM_PANIC);
946 		if (boot_alloc(addr, size, BO_NO_ALIGN) != addr)
947 			panic("segkmem_alloc: boot_alloc failed");
948 		return (addr);
949 	}
950 	return (segkmem_xalloc(vmp, NULL, size, vmflag, 0,
951 	    segkmem_page_create, vp));
952 }
953 
954 void *
955 segkmem_alloc(vmem_t *vmp, size_t size, int vmflag)
956 {
957 	return (segkmem_alloc_vn(vmp, size, vmflag, &kvp));
958 }
959 
960 void *
961 segkmem_zio_alloc(vmem_t *vmp, size_t size, int vmflag)
962 {
963 	return (segkmem_alloc_vn(vmp, size, vmflag, &zvp));
964 }
965 
966 /*
967  * Any changes to this routine must also be carried over to
968  * devmap_free_pages() in the seg_dev driver. This is because
969  * we currently don't have a special kernel segment for non-paged
970  * kernel memory that is exported by drivers to user space.
971  */
972 static void
973 segkmem_free_vn(vmem_t *vmp, void *inaddr, size_t size, struct vnode *vp,
974     void (*func)(page_t *))
975 {
976 	page_t *pp;
977 	caddr_t addr = inaddr;
978 	caddr_t eaddr;
979 	pgcnt_t npages = btopr(size);
980 
981 	ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0);
982 	ASSERT(vp != NULL);
983 
984 	if (kvseg.s_base == NULL) {
985 		segkmem_gc_list_t *gc = inaddr;
986 		gc->gc_arena = vmp;
987 		gc->gc_size = size;
988 		gc->gc_next = segkmem_gc_list;
989 		segkmem_gc_list = gc;
990 		return;
991 	}
992 
993 	hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);
994 
995 	for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) {
996 #if defined(__x86)
997 		pp = page_find(vp, (u_offset_t)(uintptr_t)addr);
998 		if (pp == NULL)
999 			panic("segkmem_free: page not found");
1000 		if (!page_tryupgrade(pp)) {
1001 			/*
1002 			 * Some other thread has a sharelock. Wait for
1003 			 * it to drop the lock so we can free this page.
1004 			 */
1005 			page_unlock(pp);
1006 			pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr,
1007 			    SE_EXCL);
1008 		}
1009 #else
1010 		pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr, SE_EXCL);
1011 #endif
1012 		if (pp == NULL)
1013 			panic("segkmem_free: page not found");
1014 		/* Clear p_lckcnt so page_destroy() doesn't update availrmem */
1015 		pp->p_lckcnt = 0;
1016 		if (func)
1017 			func(pp);
1018 		else
1019 			page_destroy(pp, 0);
1020 	}
1021 	if (func == NULL)
1022 		page_unresv(npages);
1023 
1024 	if (vmp != NULL)
1025 		vmem_free(vmp, inaddr, size);
1026 
1027 }
1028 
1029 void
1030 segkmem_xfree(vmem_t *vmp, void *inaddr, size_t size, void (*func)(page_t *))
1031 {
1032 	segkmem_free_vn(vmp, inaddr, size, &kvp, func);
1033 }
1034 
1035 void
1036 segkmem_free(vmem_t *vmp, void *inaddr, size_t size)
1037 {
1038 	segkmem_free_vn(vmp, inaddr, size, &kvp, NULL);
1039 }
1040 
1041 void
1042 segkmem_zio_free(vmem_t *vmp, void *inaddr, size_t size)
1043 {
1044 	segkmem_free_vn(vmp, inaddr, size, &zvp, NULL);
1045 }
1046 
1047 void
1048 segkmem_gc(void)
1049 {
1050 	ASSERT(kvseg.s_base != NULL);
1051 	while (segkmem_gc_list != NULL) {
1052 		segkmem_gc_list_t *gc = segkmem_gc_list;
1053 		segkmem_gc_list = gc->gc_next;
1054 		segkmem_free(gc->gc_arena, gc, gc->gc_size);
1055 	}
1056 }
1057 
1058 /*
1059  * Legacy entry points from here to end of file.
1060  */
1061 void
1062 segkmem_mapin(struct seg *seg, void *addr, size_t size, uint_t vprot,
1063     pfn_t pfn, uint_t flags)
1064 {
1065 	hat_unload(seg->s_as->a_hat, addr, size, HAT_UNLOAD_UNLOCK);
1066 	hat_devload(seg->s_as->a_hat, addr, size, pfn, vprot,
1067 	    flags | HAT_LOAD_LOCK);
1068 }
1069 
1070 void
1071 segkmem_mapout(struct seg *seg, void *addr, size_t size)
1072 {
1073 	hat_unload(seg->s_as->a_hat, addr, size, HAT_UNLOAD_UNLOCK);
1074 }
1075 
1076 void *
1077 kmem_getpages(pgcnt_t npages, int kmflag)
1078 {
1079 	return (kmem_alloc(ptob(npages), kmflag));
1080 }
1081 
1082 void
1083 kmem_freepages(void *addr, pgcnt_t npages)
1084 {
1085 	kmem_free(addr, ptob(npages));
1086 }
1087 
1088 /*
1089  * segkmem_page_create_large() allocates a large page to be used for the kmem
1090  * caches. If kpr is enabled we ask for a relocatable page unless requested
1091  * otherwise. If kpr is disabled we have to ask for a non-reloc page
1092  */
1093 static page_t *
1094 segkmem_page_create_large(void *addr, size_t size, int vmflag, void *arg)
1095 {
1096 	int pgflags;
1097 
1098 	pgflags = PG_EXCL;
1099 
1100 	if (segkmem_reloc == 0 || (vmflag & VM_NORELOC))
1101 		pgflags |= PG_NORELOC;
1102 	if (!(vmflag & VM_NOSLEEP))
1103 		pgflags |= PG_WAIT;
1104 	if (vmflag & VM_PUSHPAGE)
1105 		pgflags |= PG_PUSHPAGE;
1106 
1107 	return (page_create_va_large(&kvp, (u_offset_t)(uintptr_t)addr, size,
1108 	    pgflags, &kvseg, addr, arg));
1109 }
1110 
1111 /*
1112  * Allocate a large page to back the virtual address range
1113  * [addr, addr + size).  If addr is NULL, allocate the virtual address
1114  * space as well.
1115  */
1116 static void *
1117 segkmem_xalloc_lp(vmem_t *vmp, void *inaddr, size_t size, int vmflag,
1118     uint_t attr, page_t *(*page_create_func)(void *, size_t, int, void *),
1119     void *pcarg)
1120 {
1121 	caddr_t addr = inaddr, pa;
1122 	size_t  lpsize = segkmem_lpsize;
1123 	pgcnt_t npages = btopr(size);
1124 	pgcnt_t nbpages = btop(lpsize);
1125 	pgcnt_t nlpages = size >> segkmem_lpshift;
1126 	size_t  ppasize = nbpages * sizeof (page_t *);
1127 	page_t *pp, *rootpp, **ppa, *pplist = NULL;
1128 	int i;
1129 
1130 	vmflag |= VM_NOSLEEP;
1131 
1132 	if (page_resv(npages, vmflag & VM_KMFLAGS) == 0) {
1133 		return (NULL);
1134 	}
1135 
1136 	/*
1137 	 * allocate an array we need for hat_memload_array.
1138 	 * we use a separate arena to avoid recursion.
1139 	 * we will not need this array when hat_memload_array learns pp++
1140 	 */
1141 	if ((ppa = vmem_alloc(segkmem_ppa_arena, ppasize, vmflag)) == NULL) {
1142 		goto fail_array_alloc;
1143 	}
1144 
1145 	if (inaddr == NULL && (addr = vmem_alloc(vmp, size, vmflag)) == NULL)
1146 		goto fail_vmem_alloc;
1147 
1148 	ASSERT(((uintptr_t)addr & (lpsize - 1)) == 0);
1149 
1150 	/* create all the pages */
1151 	for (pa = addr, i = 0; i < nlpages; i++, pa += lpsize) {
1152 		if ((pp = page_create_func(pa, lpsize, vmflag, pcarg)) == NULL)
1153 			goto fail_page_create;
1154 		page_list_concat(&pplist, &pp);
1155 	}
1156 
1157 	/* at this point we have all the resource to complete the request */
1158 	while ((rootpp = pplist) != NULL) {
1159 		for (i = 0; i < nbpages; i++) {
1160 			ASSERT(pplist != NULL);
1161 			pp = pplist;
1162 			page_sub(&pplist, pp);
1163 			ASSERT(page_iolock_assert(pp));
1164 			page_io_unlock(pp);
1165 			ppa[i] = pp;
1166 		}
1167 		/*
1168 		 * Load the locked entry. It's OK to preload the entry into the
1169 		 * TSB since we now support large mappings in the kernel TSB.
1170 		 */
1171 		hat_memload_array(kas.a_hat,
1172 		    (caddr_t)(uintptr_t)rootpp->p_offset, lpsize,
1173 		    ppa, (PROT_ALL & ~PROT_USER) | HAT_NOSYNC | attr,
1174 		    HAT_LOAD_LOCK);
1175 
1176 		for (--i; i >= 0; --i) {
1177 			ppa[i]->p_lckcnt = 1;
1178 			page_unlock(ppa[i]);
1179 		}
1180 	}
1181 
1182 	vmem_free(segkmem_ppa_arena, ppa, ppasize);
1183 	return (addr);
1184 
1185 fail_page_create:
1186 	while ((rootpp = pplist) != NULL) {
1187 		for (i = 0, pp = pplist; i < nbpages; i++, pp = pplist) {
1188 			ASSERT(pp != NULL);
1189 			page_sub(&pplist, pp);
1190 			ASSERT(page_iolock_assert(pp));
1191 			page_io_unlock(pp);
1192 		}
1193 		page_destroy_pages(rootpp);
1194 	}
1195 
1196 	if (inaddr == NULL)
1197 		vmem_free(vmp, addr, size);
1198 
1199 fail_vmem_alloc:
1200 	vmem_free(segkmem_ppa_arena, ppa, ppasize);
1201 
1202 fail_array_alloc:
1203 	page_unresv(npages);
1204 
1205 	return (NULL);
1206 }
1207 
1208 static void
1209 segkmem_free_one_lp(caddr_t addr, size_t size)
1210 {
1211 	page_t		*pp, *rootpp = NULL;
1212 	pgcnt_t 	pgs_left = btopr(size);
1213 
1214 	ASSERT(size == segkmem_lpsize);
1215 
1216 	hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);
1217 
1218 	for (; pgs_left > 0; addr += PAGESIZE, pgs_left--) {
1219 		pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)addr, SE_EXCL);
1220 		if (pp == NULL)
1221 			panic("segkmem_free_one_lp: page not found");
1222 		ASSERT(PAGE_EXCL(pp));
1223 		pp->p_lckcnt = 0;
1224 		if (rootpp == NULL)
1225 			rootpp = pp;
1226 	}
1227 	ASSERT(rootpp != NULL);
1228 	page_destroy_pages(rootpp);
1229 
1230 	/* page_unresv() is done by the caller */
1231 }
1232 
1233 /*
1234  * This function is called to import new spans into the vmem arenas like
1235  * kmem_default_arena and kmem_oversize_arena. It first tries to import
1236  * spans from large page arena - kmem_lp_arena. In order to do this it might
1237  * have to "upgrade the requested size" to kmem_lp_arena quantum. If
1238  * it was not able to satisfy the upgraded request it then calls regular
1239  * segkmem_alloc() that satisfies the request by importing from "*vmp" arena
1240  */
1241 /*ARGSUSED*/
1242 void *
1243 segkmem_alloc_lp(vmem_t *vmp, size_t *sizep, size_t align, int vmflag)
1244 {
1245 	size_t size;
1246 	kthread_t *t = curthread;
1247 	segkmem_lpcb_t *lpcb = &segkmem_lpcb;
1248 
1249 	ASSERT(sizep != NULL);
1250 
1251 	size = *sizep;
1252 
1253 	if (lpcb->lp_uselp && !(t->t_flag & T_PANIC) &&
1254 	    !(vmflag & SEGKMEM_SHARELOCKED)) {
1255 
1256 		size_t kmemlp_qnt = segkmem_kmemlp_quantum;
1257 		size_t asize = P2ROUNDUP(size, kmemlp_qnt);
1258 		void  *addr = NULL;
1259 		ulong_t *lpthrtp = &lpcb->lp_throttle;
1260 		ulong_t lpthrt = *lpthrtp;
1261 		int	dowakeup = 0;
1262 		int	doalloc = 1;
1263 
1264 		ASSERT(kmem_lp_arena != NULL);
1265 		ASSERT(asize >= size);
1266 
1267 		if (lpthrt != 0) {
1268 			/* try to update the throttle value */
1269 			lpthrt = atomic_add_long_nv(lpthrtp, 1);
1270 			if (lpthrt >= segkmem_lpthrottle_max) {
1271 				lpthrt = atomic_cas_ulong(lpthrtp, lpthrt,
1272 				    segkmem_lpthrottle_max / 4);
1273 			}
1274 
1275 			/*
1276 			 * when we get above throttle start do an exponential
1277 			 * backoff at trying large pages and reaping
1278 			 */
1279 			if (lpthrt > segkmem_lpthrottle_start &&
1280 			    (lpthrt & (lpthrt - 1))) {
1281 				lpcb->allocs_throttled++;
1282 				lpthrt--;
1283 				if ((lpthrt & (lpthrt - 1)) == 0)
1284 					kmem_reap();
1285 				return (segkmem_alloc(vmp, size, vmflag));
1286 			}
1287 		}
1288 
1289 		if (!(vmflag & VM_NOSLEEP) &&
1290 		    segkmem_heaplp_quantum >= (8 * kmemlp_qnt) &&
1291 		    vmem_size(kmem_lp_arena, VMEM_FREE) <= kmemlp_qnt &&
1292 		    asize < (segkmem_heaplp_quantum - kmemlp_qnt)) {
1293 
1294 			/*
1295 			 * we are low on free memory in kmem_lp_arena
1296 			 * we let only one guy to allocate heap_lp
1297 			 * quantum size chunk that everybody is going to
1298 			 * share
1299 			 */
1300 			mutex_enter(&lpcb->lp_lock);
1301 
1302 			if (lpcb->lp_wait) {
1303 
1304 				/* we are not the first one - wait */
1305 				cv_wait(&lpcb->lp_cv, &lpcb->lp_lock);
1306 				if (vmem_size(kmem_lp_arena, VMEM_FREE) <
1307 				    kmemlp_qnt)  {
1308 					doalloc = 0;
1309 				}
1310 			} else if (vmem_size(kmem_lp_arena, VMEM_FREE) <=
1311 			    kmemlp_qnt) {
1312 
1313 				/*
1314 				 * we are the first one, make sure we import
1315 				 * a large page
1316 				 */
1317 				if (asize == kmemlp_qnt)
1318 					asize += kmemlp_qnt;
1319 				dowakeup = 1;
1320 				lpcb->lp_wait = 1;
1321 			}
1322 
1323 			mutex_exit(&lpcb->lp_lock);
1324 		}
1325 
1326 		/*
1327 		 * VM_ABORT flag prevents sleeps in vmem_xalloc when
1328 		 * large pages are not available. In that case this allocation
1329 		 * attempt will fail and we will retry allocation with small
1330 		 * pages. We also do not want to panic if this allocation fails
1331 		 * because we are going to retry.
1332 		 */
1333 		if (doalloc) {
1334 			addr = vmem_alloc(kmem_lp_arena, asize,
1335 			    (vmflag | VM_ABORT) & ~VM_PANIC);
1336 
1337 			if (dowakeup) {
1338 				mutex_enter(&lpcb->lp_lock);
1339 				ASSERT(lpcb->lp_wait != 0);
1340 				lpcb->lp_wait = 0;
1341 				cv_broadcast(&lpcb->lp_cv);
1342 				mutex_exit(&lpcb->lp_lock);
1343 			}
1344 		}
1345 
1346 		if (addr != NULL) {
1347 			*sizep = asize;
1348 			*lpthrtp = 0;
1349 			return (addr);
1350 		}
1351 
1352 		if (vmflag & VM_NOSLEEP)
1353 			lpcb->nosleep_allocs_failed++;
1354 		else
1355 			lpcb->sleep_allocs_failed++;
1356 		lpcb->alloc_bytes_failed += size;
1357 
1358 		/* if large page throttling is not started yet do it */
1359 		if (segkmem_use_lpthrottle && lpthrt == 0) {
1360 			lpthrt = atomic_cas_ulong(lpthrtp, lpthrt, 1);
1361 		}
1362 	}
1363 	return (segkmem_alloc(vmp, size, vmflag));
1364 }
1365 
1366 void
1367 segkmem_free_lp(vmem_t *vmp, void *inaddr, size_t size)
1368 {
1369 	if (kmem_lp_arena == NULL || !IS_KMEM_VA_LARGEPAGE((caddr_t)inaddr)) {
1370 		segkmem_free(vmp, inaddr, size);
1371 	} else {
1372 		vmem_free(kmem_lp_arena, inaddr, size);
1373 	}
1374 }
1375 
1376 /*
1377  * segkmem_alloc_lpi() imports virtual memory from large page heap arena
1378  * into kmem_lp arena. In the process it maps the imported segment with
1379  * large pages
1380  */
1381 static void *
1382 segkmem_alloc_lpi(vmem_t *vmp, size_t size, int vmflag)
1383 {
1384 	segkmem_lpcb_t *lpcb = &segkmem_lpcb;
1385 	void  *addr;
1386 
1387 	ASSERT(size != 0);
1388 	ASSERT(vmp == heap_lp_arena);
1389 
1390 	/* do not allow large page heap grow beyound limits */
1391 	if (vmem_size(vmp, VMEM_ALLOC) >= segkmem_kmemlp_max) {
1392 		lpcb->allocs_limited++;
1393 		return (NULL);
1394 	}
1395 
1396 	addr = segkmem_xalloc_lp(vmp, NULL, size, vmflag, 0,
1397 	    segkmem_page_create_large, NULL);
1398 	return (addr);
1399 }
1400 
1401 /*
1402  * segkmem_free_lpi() returns virtual memory back into large page heap arena
1403  * from kmem_lp arena. Beore doing this it unmaps the segment and frees
1404  * large pages used to map it.
1405  */
1406 static void
1407 segkmem_free_lpi(vmem_t *vmp, void *inaddr, size_t size)
1408 {
1409 	pgcnt_t		nlpages = size >> segkmem_lpshift;
1410 	size_t		lpsize = segkmem_lpsize;
1411 	caddr_t		addr = inaddr;
1412 	pgcnt_t 	npages = btopr(size);
1413 	int		i;
1414 
1415 	ASSERT(vmp == heap_lp_arena);
1416 	ASSERT(IS_KMEM_VA_LARGEPAGE(addr));
1417 	ASSERT(((uintptr_t)inaddr & (lpsize - 1)) == 0);
1418 
1419 	for (i = 0; i < nlpages; i++) {
1420 		segkmem_free_one_lp(addr, lpsize);
1421 		addr += lpsize;
1422 	}
1423 
1424 	page_unresv(npages);
1425 
1426 	vmem_free(vmp, inaddr, size);
1427 }
1428 
1429 /*
1430  * This function is called at system boot time by kmem_init right after
1431  * /etc/system file has been read. It checks based on hardware configuration
1432  * and /etc/system settings if system is going to use large pages. The
1433  * initialiazation necessary to actually start using large pages
1434  * happens later in the process after segkmem_heap_lp_init() is called.
1435  */
1436 int
1437 segkmem_lpsetup()
1438 {
1439 	int use_large_pages = 0;
1440 
1441 #ifdef __sparc
1442 
1443 	size_t memtotal = physmem * PAGESIZE;
1444 
1445 	if (heap_lp_base == NULL) {
1446 		segkmem_lpsize = PAGESIZE;
1447 		return (0);
1448 	}
1449 
1450 	/* get a platform dependent value of large page size for kernel heap */
1451 	segkmem_lpsize = get_segkmem_lpsize(segkmem_lpsize);
1452 
1453 	if (segkmem_lpsize <= PAGESIZE) {
1454 		/*
1455 		 * put virtual space reserved for the large page kernel
1456 		 * back to the regular heap
1457 		 */
1458 		vmem_xfree(heap_arena, heap_lp_base,
1459 		    heap_lp_end - heap_lp_base);
1460 		heap_lp_base = NULL;
1461 		heap_lp_end = NULL;
1462 		segkmem_lpsize = PAGESIZE;
1463 		return (0);
1464 	}
1465 
1466 	/* set heap_lp quantum if necessary */
1467 	if (segkmem_heaplp_quantum == 0 ||
1468 	    (segkmem_heaplp_quantum & (segkmem_heaplp_quantum - 1)) ||
1469 	    P2PHASE(segkmem_heaplp_quantum, segkmem_lpsize)) {
1470 		segkmem_heaplp_quantum = segkmem_lpsize;
1471 	}
1472 
1473 	/* set kmem_lp quantum if necessary */
1474 	if (segkmem_kmemlp_quantum == 0 ||
1475 	    (segkmem_kmemlp_quantum & (segkmem_kmemlp_quantum - 1)) ||
1476 	    segkmem_kmemlp_quantum > segkmem_heaplp_quantum) {
1477 		segkmem_kmemlp_quantum = segkmem_heaplp_quantum;
1478 	}
1479 
1480 	/* set total amount of memory allowed for large page kernel heap */
1481 	if (segkmem_kmemlp_max == 0) {
1482 		if (segkmem_kmemlp_pcnt == 0 || segkmem_kmemlp_pcnt > 100)
1483 			segkmem_kmemlp_pcnt = 12;
1484 		segkmem_kmemlp_max = (memtotal * segkmem_kmemlp_pcnt) / 100;
1485 	}
1486 	segkmem_kmemlp_max = P2ROUNDUP(segkmem_kmemlp_max,
1487 	    segkmem_heaplp_quantum);
1488 
1489 	/* fix lp kmem preallocation request if necesssary */
1490 	if (segkmem_kmemlp_min) {
1491 		segkmem_kmemlp_min = P2ROUNDUP(segkmem_kmemlp_min,
1492 		    segkmem_heaplp_quantum);
1493 		if (segkmem_kmemlp_min > segkmem_kmemlp_max)
1494 			segkmem_kmemlp_min = segkmem_kmemlp_max;
1495 	}
1496 
1497 	use_large_pages = 1;
1498 	segkmem_lpszc = page_szc(segkmem_lpsize);
1499 	segkmem_lpshift = page_get_shift(segkmem_lpszc);
1500 
1501 #endif
1502 	return (use_large_pages);
1503 }
1504 
1505 void
1506 segkmem_zio_init(void *zio_mem_base, size_t zio_mem_size)
1507 {
1508 	ASSERT(zio_mem_base != NULL);
1509 	ASSERT(zio_mem_size != 0);
1510 
1511 	zio_arena = vmem_create("zfs_file_data", zio_mem_base, zio_mem_size,
1512 	    PAGESIZE, NULL, NULL, NULL, 0, VM_SLEEP);
1513 
1514 	zio_alloc_arena = vmem_create("zfs_file_data_buf", NULL, 0, PAGESIZE,
1515 	    segkmem_zio_alloc, segkmem_zio_free, zio_arena, 0, VM_SLEEP);
1516 
1517 	ASSERT(zio_arena != NULL);
1518 	ASSERT(zio_alloc_arena != NULL);
1519 }
1520 
1521 #ifdef __sparc
1522 
1523 
1524 static void *
1525 segkmem_alloc_ppa(vmem_t *vmp, size_t size, int vmflag)
1526 {
1527 	size_t ppaquantum = btopr(segkmem_lpsize) * sizeof (page_t *);
1528 	void   *addr;
1529 
1530 	if (ppaquantum <= PAGESIZE)
1531 		return (segkmem_alloc(vmp, size, vmflag));
1532 
1533 	ASSERT((size & (ppaquantum - 1)) == 0);
1534 
1535 	addr = vmem_xalloc(vmp, size, ppaquantum, 0, 0, NULL, NULL, vmflag);
1536 	if (addr != NULL && segkmem_xalloc(vmp, addr, size, vmflag, 0,
1537 	    segkmem_page_create, NULL) == NULL) {
1538 		vmem_xfree(vmp, addr, size);
1539 		addr = NULL;
1540 	}
1541 
1542 	return (addr);
1543 }
1544 
1545 static void
1546 segkmem_free_ppa(vmem_t *vmp, void *addr, size_t size)
1547 {
1548 	size_t ppaquantum = btopr(segkmem_lpsize) * sizeof (page_t *);
1549 
1550 	ASSERT(addr != NULL);
1551 
1552 	if (ppaquantum <= PAGESIZE) {
1553 		segkmem_free(vmp, addr, size);
1554 	} else {
1555 		segkmem_free(NULL, addr, size);
1556 		vmem_xfree(vmp, addr, size);
1557 	}
1558 }
1559 
1560 void
1561 segkmem_heap_lp_init()
1562 {
1563 	segkmem_lpcb_t *lpcb = &segkmem_lpcb;
1564 	size_t heap_lp_size = heap_lp_end - heap_lp_base;
1565 	size_t lpsize = segkmem_lpsize;
1566 	size_t ppaquantum;
1567 	void   *addr;
1568 
1569 	if (segkmem_lpsize <= PAGESIZE) {
1570 		ASSERT(heap_lp_base == NULL);
1571 		ASSERT(heap_lp_end == NULL);
1572 		return;
1573 	}
1574 
1575 	ASSERT(segkmem_heaplp_quantum >= lpsize);
1576 	ASSERT((segkmem_heaplp_quantum & (lpsize - 1)) == 0);
1577 	ASSERT(lpcb->lp_uselp == 0);
1578 	ASSERT(heap_lp_base != NULL);
1579 	ASSERT(heap_lp_end != NULL);
1580 	ASSERT(heap_lp_base < heap_lp_end);
1581 	ASSERT(heap_lp_arena == NULL);
1582 	ASSERT(((uintptr_t)heap_lp_base & (lpsize - 1)) == 0);
1583 	ASSERT(((uintptr_t)heap_lp_end & (lpsize - 1)) == 0);
1584 
1585 	/* create large page heap arena */
1586 	heap_lp_arena = vmem_create("heap_lp", heap_lp_base, heap_lp_size,
1587 	    segkmem_heaplp_quantum, NULL, NULL, NULL, 0, VM_SLEEP);
1588 
1589 	ASSERT(heap_lp_arena != NULL);
1590 
1591 	/* This arena caches memory already mapped by large pages */
1592 	kmem_lp_arena = vmem_create("kmem_lp", NULL, 0, segkmem_kmemlp_quantum,
1593 	    segkmem_alloc_lpi, segkmem_free_lpi, heap_lp_arena, 0, VM_SLEEP);
1594 
1595 	ASSERT(kmem_lp_arena != NULL);
1596 
1597 	mutex_init(&lpcb->lp_lock, NULL, MUTEX_DEFAULT, NULL);
1598 	cv_init(&lpcb->lp_cv, NULL, CV_DEFAULT, NULL);
1599 
1600 	/*
1601 	 * this arena is used for the array of page_t pointers necessary
1602 	 * to call hat_mem_load_array
1603 	 */
1604 	ppaquantum = btopr(lpsize) * sizeof (page_t *);
1605 	segkmem_ppa_arena = vmem_create("segkmem_ppa", NULL, 0, ppaquantum,
1606 	    segkmem_alloc_ppa, segkmem_free_ppa, heap_arena, ppaquantum,
1607 	    VM_SLEEP);
1608 
1609 	ASSERT(segkmem_ppa_arena != NULL);
1610 
1611 	/* prealloacate some memory for the lp kernel heap */
1612 	if (segkmem_kmemlp_min) {
1613 
1614 		ASSERT(P2PHASE(segkmem_kmemlp_min,
1615 		    segkmem_heaplp_quantum) == 0);
1616 
1617 		if ((addr = segkmem_alloc_lpi(heap_lp_arena,
1618 		    segkmem_kmemlp_min, VM_SLEEP)) != NULL) {
1619 
1620 			addr = vmem_add(kmem_lp_arena, addr,
1621 			    segkmem_kmemlp_min, VM_SLEEP);
1622 			ASSERT(addr != NULL);
1623 		}
1624 	}
1625 
1626 	lpcb->lp_uselp = 1;
1627 }
1628 
1629 #endif
1630