1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2019 Joyent, Inc.
24 */
25
26 #include <sys/types.h>
27 #include <sys/t_lock.h>
28 #include <sys/param.h>
29 #include <sys/sysmacros.h>
30 #include <sys/tuneable.h>
31 #include <sys/systm.h>
32 #include <sys/vm.h>
33 #include <sys/kmem.h>
34 #include <sys/vmem.h>
35 #include <sys/mman.h>
36 #include <sys/cmn_err.h>
37 #include <sys/debug.h>
38 #include <sys/dumphdr.h>
39 #include <sys/bootconf.h>
40 #include <sys/lgrp.h>
41 #include <vm/seg_kmem.h>
42 #include <vm/hat.h>
43 #include <vm/page.h>
44 #include <vm/vm_dep.h>
45 #include <vm/faultcode.h>
46 #include <sys/promif.h>
47 #include <vm/seg_kp.h>
48 #include <sys/bitmap.h>
49 #include <sys/mem_cage.h>
50
51 #ifdef __sparc
52 #include <sys/ivintr.h>
53 #include <sys/panic.h>
54 #endif
55
56 /*
57 * seg_kmem is the primary kernel memory segment driver. It
58 * maps the kernel heap [kernelheap, ekernelheap), module text,
59 * and all memory which was allocated before the VM was initialized
60 * into kas.
61 *
62 * Pages which belong to seg_kmem are hashed into &kvp vnode at
63 * an offset equal to (u_offset_t)virt_addr, and have p_lckcnt >= 1.
64 * They must never be paged out since segkmem_fault() is a no-op to
65 * prevent recursive faults.
66 *
67 * Currently, seg_kmem pages are sharelocked (p_sharelock == 1) on
68 * __x86 and are unlocked (p_sharelock == 0) on __sparc. Once __x86
69 * supports relocation the #ifdef kludges can be removed.
70 *
71 * seg_kmem pages may be subject to relocation by page_relocate(),
72 * provided that the HAT supports it; if this is so, segkmem_reloc
73 * will be set to a nonzero value. All boot time allocated memory as
74 * well as static memory is considered off limits to relocation.
75 * Pages are "relocatable" if p_state does not have P_NORELOC set, so
76 * we request P_NORELOC pages for memory that isn't safe to relocate.
77 *
78 * The kernel heap is logically divided up into four pieces:
79 *
80 * heap32_arena is for allocations that require 32-bit absolute
81 * virtual addresses (e.g. code that uses 32-bit pointers/offsets).
82 *
83 * heap_core is for allocations that require 2GB *relative*
84 * offsets; in other words all memory from heap_core is within
85 * 2GB of all other memory from the same arena. This is a requirement
86 * of the addressing modes of some processors in supervisor code.
87 *
88 * heap_arena is the general heap arena.
89 *
90 * static_arena is the static memory arena. Allocations from it
91 * are not subject to relocation so it is safe to use the memory
92 * physical address as well as the virtual address (e.g. the VA to
93 * PA translations are static). Caches may import from static_arena;
94 * all other static memory allocations should use static_alloc_arena.
95 *
96 * On some platforms which have limited virtual address space, seg_kmem
97 * may share [kernelheap, ekernelheap) with seg_kp; if this is so,
98 * segkp_bitmap is non-NULL, and each bit represents a page of virtual
99 * address space which is actually seg_kp mapped.
100 */
101
102 extern ulong_t *segkp_bitmap; /* Is set if segkp is from the kernel heap */
103
104 char *kernelheap; /* start of primary kernel heap */
105 char *ekernelheap; /* end of primary kernel heap */
106 struct seg kvseg; /* primary kernel heap segment */
107 struct seg kvseg_core; /* "core" kernel heap segment */
108 struct seg kzioseg; /* Segment for zio mappings */
109 vmem_t *heap_arena; /* primary kernel heap arena */
110 vmem_t *heap_core_arena; /* core kernel heap arena */
111 char *heap_core_base; /* start of core kernel heap arena */
112 char *heap_lp_base; /* start of kernel large page heap arena */
113 char *heap_lp_end; /* end of kernel large page heap arena */
114 vmem_t *hat_memload_arena; /* HAT translation data */
115 struct seg kvseg32; /* 32-bit kernel heap segment */
116 vmem_t *heap32_arena; /* 32-bit kernel heap arena */
117 vmem_t *heaptext_arena; /* heaptext arena */
118 struct as kas; /* kernel address space */
119 int segkmem_reloc; /* enable/disable relocatable segkmem pages */
120 vmem_t *static_arena; /* arena for caches to import static memory */
121 vmem_t *static_alloc_arena; /* arena for allocating static memory */
122 vmem_t *zio_arena = NULL; /* arena for allocating zio memory */
123 vmem_t *zio_alloc_arena = NULL; /* arena for allocating zio memory */
124
125 #if defined(__amd64)
126 vmem_t *kvmm_arena; /* arena for vmm VA */
127 struct seg kvmmseg; /* Segment for vmm memory */
128 #endif
129
130 /*
131 * seg_kmem driver can map part of the kernel heap with large pages.
132 * Currently this functionality is implemented for sparc platforms only.
133 *
134 * The large page size "segkmem_lpsize" for kernel heap is selected in the
135 * platform specific code. It can also be modified via /etc/system file.
136 * Setting segkmem_lpsize to PAGESIZE in /etc/system disables usage of large
137 * pages for kernel heap. "segkmem_lpshift" is adjusted appropriately to
138 * match segkmem_lpsize.
139 *
140 * At boot time we carve from kernel heap arena a range of virtual addresses
141 * that will be used for large page mappings. This range [heap_lp_base,
142 * heap_lp_end) is set up as a separate vmem arena - "heap_lp_arena". We also
143 * create "kmem_lp_arena" that caches memory already backed up by large
144 * pages. kmem_lp_arena imports virtual segments from heap_lp_arena.
145 */
146
147 size_t segkmem_lpsize;
148 int segkmem_lpszc = 0;
149
150 size_t segkmem_kmemlp_quantum = 0x400000; /* 4MB */
151 size_t segkmem_heaplp_quantum;
152 vmem_t *heap_lp_arena;
153 static vmem_t *kmem_lp_arena;
154 static segkmem_lpcb_t segkmem_lpcb;
155
156 #ifdef __sparc
157 static uint_t segkmem_lpshift = PAGESHIFT;
158 static vmem_t *segkmem_ppa_arena;
159 #endif
160
161 /*
162 * We use "segkmem_kmemlp_max" to limit the total amount of physical memory
163 * consumed by the large page heap. By default this parameter is set to 1/8 of
164 * physmem but can be adjusted through /etc/system either directly or
165 * indirectly by setting "segkmem_kmemlp_pcnt" to the percent of physmem
166 * we allow for large page heap.
167 */
168 size_t segkmem_kmemlp_max;
169 uint_t segkmem_kmemlp_pcnt;
170
171 /*
172 * Getting large pages for kernel heap could be problematic due to
173 * physical memory fragmentation. That's why we allow to preallocate
174 * "segkmem_kmemlp_min" bytes at boot time.
175 */
176 size_t segkmem_kmemlp_min;
177
178 /*
179 * Throttling is used to avoid expensive tries to allocate large pages
180 * for kernel heap when a lot of succesive attempts to do so fail.
181 */
182 static ulong_t segkmem_lpthrottle_max = 0x400000;
183 static ulong_t segkmem_lpthrottle_start = 0x40;
184 static ulong_t segkmem_use_lpthrottle = 1;
185
186 /*
187 * Freed pages accumulate on a garbage list until segkmem is ready,
188 * at which point we call segkmem_gc() to free it all.
189 */
190 typedef struct segkmem_gc_list {
191 struct segkmem_gc_list *gc_next;
192 vmem_t *gc_arena;
193 size_t gc_size;
194 } segkmem_gc_list_t;
195
196 static segkmem_gc_list_t *segkmem_gc_list;
197
198 /*
199 * Allocations from the hat_memload arena add VM_MEMLOAD to their
200 * vmflags so that segkmem_xalloc() can inform the hat layer that it needs
201 * to take steps to prevent infinite recursion. HAT allocations also
202 * must be non-relocatable to prevent recursive page faults.
203 */
204 static void *
hat_memload_alloc(vmem_t * vmp,size_t size,int flags)205 hat_memload_alloc(vmem_t *vmp, size_t size, int flags)
206 {
207 flags |= (VM_MEMLOAD | VM_NORELOC);
208 return (segkmem_alloc(vmp, size, flags));
209 }
210
211 /*
212 * Allocations from static_arena arena (or any other arena that uses
213 * segkmem_alloc_permanent()) require non-relocatable (permanently
214 * wired) memory pages, since these pages are referenced by physical
215 * as well as virtual address.
216 */
217 void *
segkmem_alloc_permanent(vmem_t * vmp,size_t size,int flags)218 segkmem_alloc_permanent(vmem_t *vmp, size_t size, int flags)
219 {
220 return (segkmem_alloc(vmp, size, flags | VM_NORELOC));
221 }
222
223 /*
224 * Initialize kernel heap boundaries.
225 */
226 void
kernelheap_init(void * heap_start,void * heap_end,char * first_avail,void * core_start,void * core_end)227 kernelheap_init(
228 void *heap_start,
229 void *heap_end,
230 char *first_avail,
231 void *core_start,
232 void *core_end)
233 {
234 uintptr_t textbase;
235 size_t core_size;
236 size_t heap_size;
237 vmem_t *heaptext_parent;
238 size_t heap_lp_size = 0;
239 #ifdef __sparc
240 size_t kmem64_sz = kmem64_aligned_end - kmem64_base;
241 #endif /* __sparc */
242
243 kernelheap = heap_start;
244 ekernelheap = heap_end;
245
246 #ifdef __sparc
247 heap_lp_size = (((uintptr_t)heap_end - (uintptr_t)heap_start) / 4);
248 /*
249 * Bias heap_lp start address by kmem64_sz to reduce collisions
250 * in 4M kernel TSB between kmem64 area and heap_lp
251 */
252 kmem64_sz = P2ROUNDUP(kmem64_sz, MMU_PAGESIZE256M);
253 if (kmem64_sz <= heap_lp_size / 2)
254 heap_lp_size -= kmem64_sz;
255 heap_lp_base = ekernelheap - heap_lp_size;
256 heap_lp_end = heap_lp_base + heap_lp_size;
257 #endif /* __sparc */
258
259 /*
260 * If this platform has a 'core' heap area, then the space for
261 * overflow module text should be carved out of the end of that
262 * heap. Otherwise, it gets carved out of the general purpose
263 * heap.
264 */
265 core_size = (uintptr_t)core_end - (uintptr_t)core_start;
266 if (core_size > 0) {
267 ASSERT(core_size >= HEAPTEXT_SIZE);
268 textbase = (uintptr_t)core_end - HEAPTEXT_SIZE;
269 core_size -= HEAPTEXT_SIZE;
270 }
271 #ifndef __sparc
272 else {
273 ekernelheap -= HEAPTEXT_SIZE;
274 textbase = (uintptr_t)ekernelheap;
275 }
276 #endif
277
278 heap_size = (uintptr_t)ekernelheap - (uintptr_t)kernelheap;
279 heap_arena = vmem_init("heap", kernelheap, heap_size, PAGESIZE,
280 segkmem_alloc, segkmem_free);
281
282 if (core_size > 0) {
283 heap_core_arena = vmem_create("heap_core", core_start,
284 core_size, PAGESIZE, NULL, NULL, NULL, 0, VM_SLEEP);
285 heap_core_base = core_start;
286 } else {
287 heap_core_arena = heap_arena;
288 heap_core_base = kernelheap;
289 }
290
291 /*
292 * reserve space for the large page heap. If large pages for kernel
293 * heap is enabled large page heap arean will be created later in the
294 * boot sequence in segkmem_heap_lp_init(). Otherwise the allocated
295 * range will be returned back to the heap_arena.
296 */
297 if (heap_lp_size) {
298 (void) vmem_xalloc(heap_arena, heap_lp_size, PAGESIZE, 0, 0,
299 heap_lp_base, heap_lp_end,
300 VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
301 }
302
303 /*
304 * Remove the already-spoken-for memory range [kernelheap, first_avail).
305 */
306 (void) vmem_xalloc(heap_arena, first_avail - kernelheap, PAGESIZE,
307 0, 0, kernelheap, first_avail, VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
308
309 #ifdef __sparc
310 heap32_arena = vmem_create("heap32", (void *)SYSBASE32,
311 SYSLIMIT32 - SYSBASE32 - HEAPTEXT_SIZE, PAGESIZE, NULL,
312 NULL, NULL, 0, VM_SLEEP);
313 /*
314 * Prom claims the physical and virtual resources used by panicbuf
315 * and inter_vec_table. So reserve space for panicbuf, intr_vec_table,
316 * reserved interrupt vector data structures from 32-bit heap.
317 */
318 (void) vmem_xalloc(heap32_arena, PANICBUFSIZE, PAGESIZE, 0, 0,
319 panicbuf, panicbuf + PANICBUFSIZE,
320 VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
321
322 (void) vmem_xalloc(heap32_arena, IVSIZE, PAGESIZE, 0, 0,
323 intr_vec_table, (caddr_t)intr_vec_table + IVSIZE,
324 VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
325
326 textbase = SYSLIMIT32 - HEAPTEXT_SIZE;
327 heaptext_parent = NULL;
328 #else /* __sparc */
329 heap32_arena = heap_core_arena;
330 heaptext_parent = heap_core_arena;
331 #endif /* __sparc */
332
333 heaptext_arena = vmem_create("heaptext", (void *)textbase,
334 HEAPTEXT_SIZE, PAGESIZE, NULL, NULL, heaptext_parent, 0, VM_SLEEP);
335
336 /*
337 * Create a set of arenas for memory with static translations
338 * (e.g. VA -> PA translations cannot change). Since using
339 * kernel pages by physical address implies it isn't safe to
340 * walk across page boundaries, the static_arena quantum must
341 * be PAGESIZE. Any kmem caches that require static memory
342 * should source from static_arena, while direct allocations
343 * should only use static_alloc_arena.
344 */
345 static_arena = vmem_create("static", NULL, 0, PAGESIZE,
346 segkmem_alloc_permanent, segkmem_free, heap_arena, 0, VM_SLEEP);
347 static_alloc_arena = vmem_create("static_alloc", NULL, 0,
348 sizeof (uint64_t), vmem_alloc, vmem_free, static_arena,
349 0, VM_SLEEP);
350
351 /*
352 * Create an arena for translation data (ptes, hmes, or hblks).
353 * We need an arena for this because hat_memload() is essential
354 * to vmem_populate() (see comments in common/os/vmem.c).
355 *
356 * Note: any kmem cache that allocates from hat_memload_arena
357 * must be created as a KMC_NOHASH cache (i.e. no external slab
358 * and bufctl structures to allocate) so that slab creation doesn't
359 * require anything more than a single vmem_alloc().
360 */
361 hat_memload_arena = vmem_create("hat_memload", NULL, 0, PAGESIZE,
362 hat_memload_alloc, segkmem_free, heap_arena, 0,
363 VM_SLEEP | VMC_POPULATOR | VMC_DUMPSAFE);
364 }
365
366 void
boot_mapin(caddr_t addr,size_t size)367 boot_mapin(caddr_t addr, size_t size)
368 {
369 caddr_t eaddr;
370 page_t *pp;
371 pfn_t pfnum;
372
373 if (page_resv(btop(size), KM_NOSLEEP) == 0)
374 panic("boot_mapin: page_resv failed");
375
376 for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) {
377 pfnum = va_to_pfn(addr);
378 if (pfnum == PFN_INVALID)
379 continue;
380 if ((pp = page_numtopp_nolock(pfnum)) == NULL)
381 panic("boot_mapin(): No pp for pfnum = %lx", pfnum);
382
383 /*
384 * must break up any large pages that may have constituent
385 * pages being utilized for BOP_ALLOC()'s before calling
386 * page_numtopp().The locking code (ie. page_reclaim())
387 * can't handle them
388 */
389 if (pp->p_szc != 0)
390 page_boot_demote(pp);
391
392 pp = page_numtopp(pfnum, SE_EXCL);
393 if (pp == NULL || PP_ISFREE(pp))
394 panic("boot_alloc: pp is NULL or free");
395
396 /*
397 * If the cage is on but doesn't yet contain this page,
398 * mark it as non-relocatable.
399 */
400 if (kcage_on && !PP_ISNORELOC(pp)) {
401 PP_SETNORELOC(pp);
402 PLCNT_XFER_NORELOC(pp);
403 }
404
405 (void) page_hashin(pp, &kvp, (u_offset_t)(uintptr_t)addr, NULL);
406 pp->p_lckcnt = 1;
407 #if defined(__x86)
408 page_downgrade(pp);
409 #else
410 page_unlock(pp);
411 #endif
412 }
413 }
414
415 /*
416 * Get pages from boot and hash them into the kernel's vp.
417 * Used after page structs have been allocated, but before segkmem is ready.
418 */
419 void *
boot_alloc(void * inaddr,size_t size,uint_t align)420 boot_alloc(void *inaddr, size_t size, uint_t align)
421 {
422 caddr_t addr = inaddr;
423
424 if (bootops == NULL)
425 prom_panic("boot_alloc: attempt to allocate memory after "
426 "BOP_GONE");
427
428 size = ptob(btopr(size));
429 #ifdef __sparc
430 if (bop_alloc_chunk(addr, size, align) != (caddr_t)addr)
431 panic("boot_alloc: bop_alloc_chunk failed");
432 #else
433 if (BOP_ALLOC(bootops, addr, size, align) != addr)
434 panic("boot_alloc: BOP_ALLOC failed");
435 #endif
436 boot_mapin((caddr_t)addr, size);
437 return (addr);
438 }
439
440 static void
segkmem_badop()441 segkmem_badop()
442 {
443 panic("segkmem_badop");
444 }
445
446 #define SEGKMEM_BADOP(t) (t(*)())(uintptr_t)segkmem_badop
447
448 /*ARGSUSED*/
449 static faultcode_t
segkmem_fault(struct hat * hat,struct seg * seg,caddr_t addr,size_t size,enum fault_type type,enum seg_rw rw)450 segkmem_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t size,
451 enum fault_type type, enum seg_rw rw)
452 {
453 pgcnt_t npages;
454 spgcnt_t pg;
455 page_t *pp;
456 struct vnode *vp = seg->s_data;
457
458 ASSERT(RW_READ_HELD(&seg->s_as->a_lock));
459
460 if (seg->s_as != &kas || size > seg->s_size ||
461 addr < seg->s_base || addr + size > seg->s_base + seg->s_size)
462 panic("segkmem_fault: bad args");
463
464 /*
465 * If it is one of segkp pages, call segkp_fault.
466 */
467 if (segkp_bitmap && seg == &kvseg &&
468 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
469 return (SEGOP_FAULT(hat, segkp, addr, size, type, rw));
470
471 if (rw != S_READ && rw != S_WRITE && rw != S_OTHER)
472 return (FC_NOSUPPORT);
473
474 npages = btopr(size);
475
476 switch (type) {
477 case F_SOFTLOCK: /* lock down already-loaded translations */
478 for (pg = 0; pg < npages; pg++) {
479 pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr,
480 SE_SHARED);
481 if (pp == NULL) {
482 /*
483 * Hmm, no page. Does a kernel mapping
484 * exist for it?
485 */
486 if (!hat_probe(kas.a_hat, addr)) {
487 addr -= PAGESIZE;
488 while (--pg >= 0) {
489 pp = page_find(vp, (u_offset_t)
490 (uintptr_t)addr);
491 if (pp)
492 page_unlock(pp);
493 addr -= PAGESIZE;
494 }
495 return (FC_NOMAP);
496 }
497 }
498 addr += PAGESIZE;
499 }
500 if (rw == S_OTHER)
501 hat_reserve(seg->s_as, addr, size);
502 return (0);
503 case F_SOFTUNLOCK:
504 while (npages--) {
505 pp = page_find(vp, (u_offset_t)(uintptr_t)addr);
506 if (pp)
507 page_unlock(pp);
508 addr += PAGESIZE;
509 }
510 return (0);
511 default:
512 return (FC_NOSUPPORT);
513 }
514 /*NOTREACHED*/
515 }
516
517 static int
segkmem_setprot(struct seg * seg,caddr_t addr,size_t size,uint_t prot)518 segkmem_setprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
519 {
520 ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
521
522 if (seg->s_as != &kas || size > seg->s_size ||
523 addr < seg->s_base || addr + size > seg->s_base + seg->s_size)
524 panic("segkmem_setprot: bad args");
525
526 /*
527 * If it is one of segkp pages, call segkp.
528 */
529 if (segkp_bitmap && seg == &kvseg &&
530 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
531 return (SEGOP_SETPROT(segkp, addr, size, prot));
532
533 if (prot == 0)
534 hat_unload(kas.a_hat, addr, size, HAT_UNLOAD);
535 else
536 hat_chgprot(kas.a_hat, addr, size, prot);
537 return (0);
538 }
539
540 /*
541 * This is a dummy segkmem function overloaded to call segkp
542 * when segkp is under the heap.
543 */
544 /* ARGSUSED */
545 static int
segkmem_checkprot(struct seg * seg,caddr_t addr,size_t size,uint_t prot)546 segkmem_checkprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
547 {
548 ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
549
550 if (seg->s_as != &kas)
551 segkmem_badop();
552
553 /*
554 * If it is one of segkp pages, call into segkp.
555 */
556 if (segkp_bitmap && seg == &kvseg &&
557 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
558 return (SEGOP_CHECKPROT(segkp, addr, size, prot));
559
560 segkmem_badop();
561 return (0);
562 }
563
564 /*
565 * This is a dummy segkmem function overloaded to call segkp
566 * when segkp is under the heap.
567 */
568 /* ARGSUSED */
569 static int
segkmem_kluster(struct seg * seg,caddr_t addr,ssize_t delta)570 segkmem_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
571 {
572 ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
573
574 if (seg->s_as != &kas)
575 segkmem_badop();
576
577 /*
578 * If it is one of segkp pages, call into segkp.
579 */
580 if (segkp_bitmap && seg == &kvseg &&
581 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
582 return (SEGOP_KLUSTER(segkp, addr, delta));
583
584 segkmem_badop();
585 return (0);
586 }
587
588 static void
segkmem_xdump_range(void * arg,void * start,size_t size)589 segkmem_xdump_range(void *arg, void *start, size_t size)
590 {
591 struct as *as = arg;
592 caddr_t addr = start;
593 caddr_t addr_end = addr + size;
594
595 while (addr < addr_end) {
596 pfn_t pfn = hat_getpfnum(kas.a_hat, addr);
597 if (pfn != PFN_INVALID && pfn <= physmax && pf_is_memory(pfn))
598 dump_addpage(as, addr, pfn);
599 addr += PAGESIZE;
600 dump_timeleft = dump_timeout;
601 }
602 }
603
604 static void
segkmem_dump_range(void * arg,void * start,size_t size)605 segkmem_dump_range(void *arg, void *start, size_t size)
606 {
607 caddr_t addr = start;
608 caddr_t addr_end = addr + size;
609
610 /*
611 * If we are about to start dumping the range of addresses we
612 * carved out of the kernel heap for the large page heap walk
613 * heap_lp_arena to find what segments are actually populated
614 */
615 if (SEGKMEM_USE_LARGEPAGES &&
616 addr == heap_lp_base && addr_end == heap_lp_end &&
617 vmem_size(heap_lp_arena, VMEM_ALLOC) < size) {
618 vmem_walk(heap_lp_arena, VMEM_ALLOC | VMEM_REENTRANT,
619 segkmem_xdump_range, arg);
620 } else {
621 segkmem_xdump_range(arg, start, size);
622 }
623 }
624
625 static void
segkmem_dump(struct seg * seg)626 segkmem_dump(struct seg *seg)
627 {
628 /*
629 * The kernel's heap_arena (represented by kvseg) is a very large
630 * VA space, most of which is typically unused. To speed up dumping
631 * we use vmem_walk() to quickly find the pieces of heap_arena that
632 * are actually in use. We do the same for heap32_arena and
633 * heap_core.
634 *
635 * We specify VMEM_REENTRANT to vmem_walk() because dump_addpage()
636 * may ultimately need to allocate memory. Reentrant walks are
637 * necessarily imperfect snapshots. The kernel heap continues
638 * to change during a live crash dump, for example. For a normal
639 * crash dump, however, we know that there won't be any other threads
640 * messing with the heap. Therefore, at worst, we may fail to dump
641 * the pages that get allocated by the act of dumping; but we will
642 * always dump every page that was allocated when the walk began.
643 *
644 * The other segkmem segments are dense (fully populated), so there's
645 * no need to use this technique when dumping them.
646 *
647 * Note: when adding special dump handling for any new sparsely-
648 * populated segments, be sure to add similar handling to the ::kgrep
649 * code in mdb.
650 */
651 if (seg == &kvseg) {
652 vmem_walk(heap_arena, VMEM_ALLOC | VMEM_REENTRANT,
653 segkmem_dump_range, seg->s_as);
654 #ifndef __sparc
655 vmem_walk(heaptext_arena, VMEM_ALLOC | VMEM_REENTRANT,
656 segkmem_dump_range, seg->s_as);
657 #endif
658 } else if (seg == &kvseg_core) {
659 vmem_walk(heap_core_arena, VMEM_ALLOC | VMEM_REENTRANT,
660 segkmem_dump_range, seg->s_as);
661 } else if (seg == &kvseg32) {
662 vmem_walk(heap32_arena, VMEM_ALLOC | VMEM_REENTRANT,
663 segkmem_dump_range, seg->s_as);
664 vmem_walk(heaptext_arena, VMEM_ALLOC | VMEM_REENTRANT,
665 segkmem_dump_range, seg->s_as);
666 /*
667 * We don't want to dump pages attached to kzioseg since they
668 * contain file data from ZFS. If this page's segment is
669 * kzioseg return instead of writing it to the dump device.
670 *
671 * Same applies to VM memory allocations.
672 */
673 } else if (seg == &kzioseg) {
674 return;
675 #if defined(__amd64)
676 } else if (seg == &kvmmseg) {
677 return;
678 #endif
679 } else {
680 segkmem_dump_range(seg->s_as, seg->s_base, seg->s_size);
681 }
682 }
683
684 /*
685 * lock/unlock kmem pages over a given range [addr, addr+len).
686 * Returns a shadow list of pages in ppp. If there are holes
687 * in the range (e.g. some of the kernel mappings do not have
688 * underlying page_ts) returns ENOTSUP so that as_pagelock()
689 * will handle the range via as_fault(F_SOFTLOCK).
690 */
691 /*ARGSUSED*/
692 static int
segkmem_pagelock(struct seg * seg,caddr_t addr,size_t len,page_t *** ppp,enum lock_type type,enum seg_rw rw)693 segkmem_pagelock(struct seg *seg, caddr_t addr, size_t len,
694 page_t ***ppp, enum lock_type type, enum seg_rw rw)
695 {
696 page_t **pplist, *pp;
697 pgcnt_t npages;
698 spgcnt_t pg;
699 size_t nb;
700 struct vnode *vp = seg->s_data;
701
702 ASSERT(ppp != NULL);
703
704 /*
705 * If it is one of segkp pages, call into segkp.
706 */
707 if (segkp_bitmap && seg == &kvseg &&
708 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
709 return (SEGOP_PAGELOCK(segkp, addr, len, ppp, type, rw));
710
711 npages = btopr(len);
712 nb = sizeof (page_t *) * npages;
713
714 if (type == L_PAGEUNLOCK) {
715 pplist = *ppp;
716 ASSERT(pplist != NULL);
717
718 for (pg = 0; pg < npages; pg++) {
719 pp = pplist[pg];
720 page_unlock(pp);
721 }
722 kmem_free(pplist, nb);
723 return (0);
724 }
725
726 ASSERT(type == L_PAGELOCK);
727
728 pplist = kmem_alloc(nb, KM_NOSLEEP);
729 if (pplist == NULL) {
730 *ppp = NULL;
731 return (ENOTSUP); /* take the slow path */
732 }
733
734 for (pg = 0; pg < npages; pg++) {
735 pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr, SE_SHARED);
736 if (pp == NULL) {
737 while (--pg >= 0)
738 page_unlock(pplist[pg]);
739 kmem_free(pplist, nb);
740 *ppp = NULL;
741 return (ENOTSUP);
742 }
743 pplist[pg] = pp;
744 addr += PAGESIZE;
745 }
746
747 *ppp = pplist;
748 return (0);
749 }
750
751 /*
752 * This is a dummy segkmem function overloaded to call segkp
753 * when segkp is under the heap.
754 */
755 /* ARGSUSED */
756 static int
segkmem_getmemid(struct seg * seg,caddr_t addr,memid_t * memidp)757 segkmem_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
758 {
759 ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
760
761 if (seg->s_as != &kas)
762 segkmem_badop();
763
764 /*
765 * If it is one of segkp pages, call into segkp.
766 */
767 if (segkp_bitmap && seg == &kvseg &&
768 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
769 return (SEGOP_GETMEMID(segkp, addr, memidp));
770
771 segkmem_badop();
772 return (0);
773 }
774
775 /*ARGSUSED*/
776 static lgrp_mem_policy_info_t *
segkmem_getpolicy(struct seg * seg,caddr_t addr)777 segkmem_getpolicy(struct seg *seg, caddr_t addr)
778 {
779 return (NULL);
780 }
781
782 /*ARGSUSED*/
783 static int
segkmem_capable(struct seg * seg,segcapability_t capability)784 segkmem_capable(struct seg *seg, segcapability_t capability)
785 {
786 if (capability == S_CAPABILITY_NOMINFLT)
787 return (1);
788 return (0);
789 }
790
791 struct seg_ops segkmem_ops = {
792 SEGKMEM_BADOP(int), /* dup */
793 SEGKMEM_BADOP(int), /* unmap */
794 SEGKMEM_BADOP(void), /* free */
795 segkmem_fault,
796 SEGKMEM_BADOP(faultcode_t), /* faulta */
797 segkmem_setprot,
798 segkmem_checkprot,
799 segkmem_kluster,
800 SEGKMEM_BADOP(size_t), /* swapout */
801 SEGKMEM_BADOP(int), /* sync */
802 SEGKMEM_BADOP(size_t), /* incore */
803 SEGKMEM_BADOP(int), /* lockop */
804 SEGKMEM_BADOP(int), /* getprot */
805 SEGKMEM_BADOP(u_offset_t), /* getoffset */
806 SEGKMEM_BADOP(int), /* gettype */
807 SEGKMEM_BADOP(int), /* getvp */
808 SEGKMEM_BADOP(int), /* advise */
809 segkmem_dump,
810 segkmem_pagelock,
811 SEGKMEM_BADOP(int), /* setpgsz */
812 segkmem_getmemid,
813 segkmem_getpolicy, /* getpolicy */
814 segkmem_capable, /* capable */
815 seg_inherit_notsup /* inherit */
816 };
817
818 int
segkmem_create(struct seg * seg)819 segkmem_create(struct seg *seg)
820 {
821 ASSERT(seg->s_as == &kas && RW_WRITE_HELD(&kas.a_lock));
822 seg->s_ops = &segkmem_ops;
823 if (seg == &kzioseg)
824 seg->s_data = &kvps[KV_ZVP];
825 #if defined(__amd64)
826 else if (seg == &kvmmseg)
827 seg->s_data = &kvps[KV_VVP];
828 #endif
829 else
830 seg->s_data = &kvps[KV_KVP];
831 kas.a_size += seg->s_size;
832 return (0);
833 }
834
835 /*ARGSUSED*/
836 page_t *
segkmem_page_create(void * addr,size_t size,int vmflag,void * arg)837 segkmem_page_create(void *addr, size_t size, int vmflag, void *arg)
838 {
839 struct seg kseg = { 0 };
840 int pgflags = PG_EXCL;
841 struct vnode *vp = arg;
842
843 if (vp == NULL)
844 vp = &kvp;
845
846 kseg.s_as = &kas;
847
848 if (segkmem_reloc == 0 || (vmflag & VM_NORELOC))
849 pgflags |= PG_NORELOC;
850 if ((vmflag & VM_NOSLEEP) == 0)
851 pgflags |= PG_WAIT;
852 if (vmflag & VM_PANIC)
853 pgflags |= PG_PANIC;
854 if (vmflag & VM_PUSHPAGE)
855 pgflags |= PG_PUSHPAGE;
856 if (vmflag & VM_NORMALPRI) {
857 ASSERT(vmflag & VM_NOSLEEP);
858 pgflags |= PG_NORMALPRI;
859 }
860
861 return (page_create_va(vp, (u_offset_t)(uintptr_t)addr, size,
862 pgflags, &kseg, addr));
863 }
864
865 /*
866 * Allocate pages to back the virtual address range [addr, addr + size).
867 * If addr is NULL, allocate the virtual address space as well.
868 */
869 void *
segkmem_xalloc(vmem_t * vmp,void * inaddr,size_t size,int vmflag,uint_t attr,page_t * (* page_create_func)(void *,size_t,int,void *),void * pcarg)870 segkmem_xalloc(vmem_t *vmp, void *inaddr, size_t size, int vmflag, uint_t attr,
871 page_t *(*page_create_func)(void *, size_t, int, void *), void *pcarg)
872 {
873 page_t *ppl;
874 caddr_t addr = inaddr;
875 pgcnt_t npages = btopr(size);
876 int allocflag;
877
878 if (inaddr == NULL && (addr = vmem_alloc(vmp, size, vmflag)) == NULL)
879 return (NULL);
880
881 ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0);
882
883 if (page_resv(npages, vmflag & VM_KMFLAGS) == 0) {
884 if (inaddr == NULL)
885 vmem_free(vmp, addr, size);
886 return (NULL);
887 }
888
889 ppl = page_create_func(addr, size, vmflag, pcarg);
890 if (ppl == NULL) {
891 if (inaddr == NULL)
892 vmem_free(vmp, addr, size);
893 page_unresv(npages);
894 return (NULL);
895 }
896
897 /*
898 * Under certain conditions, we need to let the HAT layer know
899 * that it cannot safely allocate memory. Allocations from
900 * the hat_memload vmem arena always need this, to prevent
901 * infinite recursion.
902 *
903 * In addition, the x86 hat cannot safely do memory
904 * allocations while in vmem_populate(), because there
905 * is no simple bound on its usage.
906 */
907 if (vmflag & VM_MEMLOAD)
908 allocflag = HAT_NO_KALLOC;
909 #if defined(__x86)
910 else if (vmem_is_populator())
911 allocflag = HAT_NO_KALLOC;
912 #endif
913 else
914 allocflag = 0;
915
916 while (ppl != NULL) {
917 page_t *pp = ppl;
918 page_sub(&ppl, pp);
919 ASSERT(page_iolock_assert(pp));
920 ASSERT(PAGE_EXCL(pp));
921 page_io_unlock(pp);
922 hat_memload(kas.a_hat, (caddr_t)(uintptr_t)pp->p_offset, pp,
923 (PROT_ALL & ~PROT_USER) | HAT_NOSYNC | attr,
924 HAT_LOAD_LOCK | allocflag);
925 pp->p_lckcnt = 1;
926 #if defined(__x86)
927 page_downgrade(pp);
928 #else
929 if (vmflag & SEGKMEM_SHARELOCKED)
930 page_downgrade(pp);
931 else
932 page_unlock(pp);
933 #endif
934 }
935
936 return (addr);
937 }
938
939 static void *
segkmem_alloc_vn(vmem_t * vmp,size_t size,int vmflag,struct vnode * vp)940 segkmem_alloc_vn(vmem_t *vmp, size_t size, int vmflag, struct vnode *vp)
941 {
942 void *addr;
943 segkmem_gc_list_t *gcp, **prev_gcpp;
944
945 ASSERT(vp != NULL);
946
947 if (kvseg.s_base == NULL) {
948 #ifndef __sparc
949 if (bootops->bsys_alloc == NULL)
950 halt("Memory allocation between bop_alloc() and "
951 "kmem_alloc().\n");
952 #endif
953
954 /*
955 * There's not a lot of memory to go around during boot,
956 * so recycle it if we can.
957 */
958 for (prev_gcpp = &segkmem_gc_list; (gcp = *prev_gcpp) != NULL;
959 prev_gcpp = &gcp->gc_next) {
960 if (gcp->gc_arena == vmp && gcp->gc_size == size) {
961 *prev_gcpp = gcp->gc_next;
962 return (gcp);
963 }
964 }
965
966 addr = vmem_alloc(vmp, size, vmflag | VM_PANIC);
967 if (boot_alloc(addr, size, BO_NO_ALIGN) != addr)
968 panic("segkmem_alloc: boot_alloc failed");
969 return (addr);
970 }
971 return (segkmem_xalloc(vmp, NULL, size, vmflag, 0,
972 segkmem_page_create, vp));
973 }
974
975 void *
segkmem_alloc(vmem_t * vmp,size_t size,int vmflag)976 segkmem_alloc(vmem_t *vmp, size_t size, int vmflag)
977 {
978 return (segkmem_alloc_vn(vmp, size, vmflag, &kvp));
979 }
980
981 static void *
segkmem_zio_alloc(vmem_t * vmp,size_t size,int vmflag)982 segkmem_zio_alloc(vmem_t *vmp, size_t size, int vmflag)
983 {
984 return (segkmem_alloc_vn(vmp, size, vmflag, &kvps[KV_ZVP]));
985 }
986
987 /*
988 * Any changes to this routine must also be carried over to
989 * devmap_free_pages() in the seg_dev driver. This is because
990 * we currently don't have a special kernel segment for non-paged
991 * kernel memory that is exported by drivers to user space.
992 */
993 void
segkmem_xfree(vmem_t * vmp,void * inaddr,size_t size,struct vnode * vp,void (* func)(page_t *))994 segkmem_xfree(vmem_t *vmp, void *inaddr, size_t size, struct vnode *vp,
995 void (*func)(page_t *))
996 {
997 page_t *pp;
998 caddr_t addr = inaddr;
999 caddr_t eaddr;
1000 pgcnt_t npages = btopr(size);
1001
1002 ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0);
1003 ASSERT(vp != NULL);
1004
1005 if (kvseg.s_base == NULL) {
1006 segkmem_gc_list_t *gc = inaddr;
1007 gc->gc_arena = vmp;
1008 gc->gc_size = size;
1009 gc->gc_next = segkmem_gc_list;
1010 segkmem_gc_list = gc;
1011 return;
1012 }
1013
1014 hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);
1015
1016 for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) {
1017 #if defined(__x86)
1018 pp = page_find(vp, (u_offset_t)(uintptr_t)addr);
1019 if (pp == NULL)
1020 panic("segkmem_free: page not found");
1021 if (!page_tryupgrade(pp)) {
1022 /*
1023 * Some other thread has a sharelock. Wait for
1024 * it to drop the lock so we can free this page.
1025 */
1026 page_unlock(pp);
1027 pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr,
1028 SE_EXCL);
1029 }
1030 #else
1031 pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr, SE_EXCL);
1032 #endif
1033 if (pp == NULL)
1034 panic("segkmem_free: page not found");
1035 /* Clear p_lckcnt so page_destroy() doesn't update availrmem */
1036 pp->p_lckcnt = 0;
1037 if (func)
1038 func(pp);
1039 else
1040 page_destroy(pp, 0);
1041 }
1042 if (func == NULL)
1043 page_unresv(npages);
1044
1045 if (vmp != NULL)
1046 vmem_free(vmp, inaddr, size);
1047
1048 }
1049
1050 void
segkmem_free(vmem_t * vmp,void * inaddr,size_t size)1051 segkmem_free(vmem_t *vmp, void *inaddr, size_t size)
1052 {
1053 segkmem_xfree(vmp, inaddr, size, &kvp, NULL);
1054 }
1055
1056 static void
segkmem_zio_free(vmem_t * vmp,void * inaddr,size_t size)1057 segkmem_zio_free(vmem_t *vmp, void *inaddr, size_t size)
1058 {
1059 segkmem_xfree(vmp, inaddr, size, &kvps[KV_ZVP], NULL);
1060 }
1061
1062 void
segkmem_gc(void)1063 segkmem_gc(void)
1064 {
1065 ASSERT(kvseg.s_base != NULL);
1066 while (segkmem_gc_list != NULL) {
1067 segkmem_gc_list_t *gc = segkmem_gc_list;
1068 segkmem_gc_list = gc->gc_next;
1069 segkmem_free(gc->gc_arena, gc, gc->gc_size);
1070 }
1071 }
1072
1073 /*
1074 * Legacy entry points from here to end of file.
1075 */
1076 void
segkmem_mapin(struct seg * seg,void * addr,size_t size,uint_t vprot,pfn_t pfn,uint_t flags)1077 segkmem_mapin(struct seg *seg, void *addr, size_t size, uint_t vprot,
1078 pfn_t pfn, uint_t flags)
1079 {
1080 hat_unload(seg->s_as->a_hat, addr, size, HAT_UNLOAD_UNLOCK);
1081 hat_devload(seg->s_as->a_hat, addr, size, pfn, vprot,
1082 flags | HAT_LOAD_LOCK);
1083 }
1084
1085 void
segkmem_mapout(struct seg * seg,void * addr,size_t size)1086 segkmem_mapout(struct seg *seg, void *addr, size_t size)
1087 {
1088 hat_unload(seg->s_as->a_hat, addr, size, HAT_UNLOAD_UNLOCK);
1089 }
1090
1091 void *
kmem_getpages(pgcnt_t npages,int kmflag)1092 kmem_getpages(pgcnt_t npages, int kmflag)
1093 {
1094 return (kmem_alloc(ptob(npages), kmflag));
1095 }
1096
1097 void
kmem_freepages(void * addr,pgcnt_t npages)1098 kmem_freepages(void *addr, pgcnt_t npages)
1099 {
1100 kmem_free(addr, ptob(npages));
1101 }
1102
1103 #ifdef __sparc
1104 /*
1105 * segkmem_page_create_large() allocates a large page to be used for the kmem
1106 * caches. If kpr is enabled we ask for a relocatable page unless requested
1107 * otherwise. If kpr is disabled we have to ask for a non-reloc page
1108 */
1109 static page_t *
segkmem_page_create_large(void * addr,size_t size,int vmflag,void * arg)1110 segkmem_page_create_large(void *addr, size_t size, int vmflag, void *arg)
1111 {
1112 int pgflags;
1113
1114 pgflags = PG_EXCL;
1115
1116 if (segkmem_reloc == 0 || (vmflag & VM_NORELOC))
1117 pgflags |= PG_NORELOC;
1118 if (!(vmflag & VM_NOSLEEP))
1119 pgflags |= PG_WAIT;
1120 if (vmflag & VM_PUSHPAGE)
1121 pgflags |= PG_PUSHPAGE;
1122 if (vmflag & VM_NORMALPRI)
1123 pgflags |= PG_NORMALPRI;
1124
1125 return (page_create_va_large(&kvp, (u_offset_t)(uintptr_t)addr, size,
1126 pgflags, &kvseg, addr, arg));
1127 }
1128
1129 /*
1130 * Allocate a large page to back the virtual address range
1131 * [addr, addr + size). If addr is NULL, allocate the virtual address
1132 * space as well.
1133 */
1134 static void *
segkmem_xalloc_lp(vmem_t * vmp,void * inaddr,size_t size,int vmflag,uint_t attr,page_t * (* page_create_func)(void *,size_t,int,void *),void * pcarg)1135 segkmem_xalloc_lp(vmem_t *vmp, void *inaddr, size_t size, int vmflag,
1136 uint_t attr, page_t *(*page_create_func)(void *, size_t, int, void *),
1137 void *pcarg)
1138 {
1139 caddr_t addr = inaddr, pa;
1140 size_t lpsize = segkmem_lpsize;
1141 pgcnt_t npages = btopr(size);
1142 pgcnt_t nbpages = btop(lpsize);
1143 pgcnt_t nlpages = size >> segkmem_lpshift;
1144 size_t ppasize = nbpages * sizeof (page_t *);
1145 page_t *pp, *rootpp, **ppa, *pplist = NULL;
1146 int i;
1147
1148 vmflag |= VM_NOSLEEP;
1149
1150 if (page_resv(npages, vmflag & VM_KMFLAGS) == 0) {
1151 return (NULL);
1152 }
1153
1154 /*
1155 * allocate an array we need for hat_memload_array.
1156 * we use a separate arena to avoid recursion.
1157 * we will not need this array when hat_memload_array learns pp++
1158 */
1159 if ((ppa = vmem_alloc(segkmem_ppa_arena, ppasize, vmflag)) == NULL) {
1160 goto fail_array_alloc;
1161 }
1162
1163 if (inaddr == NULL && (addr = vmem_alloc(vmp, size, vmflag)) == NULL)
1164 goto fail_vmem_alloc;
1165
1166 ASSERT(((uintptr_t)addr & (lpsize - 1)) == 0);
1167
1168 /* create all the pages */
1169 for (pa = addr, i = 0; i < nlpages; i++, pa += lpsize) {
1170 if ((pp = page_create_func(pa, lpsize, vmflag, pcarg)) == NULL)
1171 goto fail_page_create;
1172 page_list_concat(&pplist, &pp);
1173 }
1174
1175 /* at this point we have all the resource to complete the request */
1176 while ((rootpp = pplist) != NULL) {
1177 for (i = 0; i < nbpages; i++) {
1178 ASSERT(pplist != NULL);
1179 pp = pplist;
1180 page_sub(&pplist, pp);
1181 ASSERT(page_iolock_assert(pp));
1182 page_io_unlock(pp);
1183 ppa[i] = pp;
1184 }
1185 /*
1186 * Load the locked entry. It's OK to preload the entry into the
1187 * TSB since we now support large mappings in the kernel TSB.
1188 */
1189 hat_memload_array(kas.a_hat,
1190 (caddr_t)(uintptr_t)rootpp->p_offset, lpsize,
1191 ppa, (PROT_ALL & ~PROT_USER) | HAT_NOSYNC | attr,
1192 HAT_LOAD_LOCK);
1193
1194 for (--i; i >= 0; --i) {
1195 ppa[i]->p_lckcnt = 1;
1196 page_unlock(ppa[i]);
1197 }
1198 }
1199
1200 vmem_free(segkmem_ppa_arena, ppa, ppasize);
1201 return (addr);
1202
1203 fail_page_create:
1204 while ((rootpp = pplist) != NULL) {
1205 for (i = 0, pp = pplist; i < nbpages; i++, pp = pplist) {
1206 ASSERT(pp != NULL);
1207 page_sub(&pplist, pp);
1208 ASSERT(page_iolock_assert(pp));
1209 page_io_unlock(pp);
1210 }
1211 page_destroy_pages(rootpp);
1212 }
1213
1214 if (inaddr == NULL)
1215 vmem_free(vmp, addr, size);
1216
1217 fail_vmem_alloc:
1218 vmem_free(segkmem_ppa_arena, ppa, ppasize);
1219
1220 fail_array_alloc:
1221 page_unresv(npages);
1222
1223 return (NULL);
1224 }
1225
1226 static void
segkmem_free_one_lp(caddr_t addr,size_t size)1227 segkmem_free_one_lp(caddr_t addr, size_t size)
1228 {
1229 page_t *pp, *rootpp = NULL;
1230 pgcnt_t pgs_left = btopr(size);
1231
1232 ASSERT(size == segkmem_lpsize);
1233
1234 hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);
1235
1236 for (; pgs_left > 0; addr += PAGESIZE, pgs_left--) {
1237 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)addr, SE_EXCL);
1238 if (pp == NULL)
1239 panic("segkmem_free_one_lp: page not found");
1240 ASSERT(PAGE_EXCL(pp));
1241 pp->p_lckcnt = 0;
1242 if (rootpp == NULL)
1243 rootpp = pp;
1244 }
1245 ASSERT(rootpp != NULL);
1246 page_destroy_pages(rootpp);
1247
1248 /* page_unresv() is done by the caller */
1249 }
1250 #endif /* __sparc */
1251
1252 /*
1253 * This function is called to import new spans into the vmem arenas like
1254 * kmem_default_arena and kmem_oversize_arena. It first tries to import
1255 * spans from large page arena - kmem_lp_arena. In order to do this it might
1256 * have to "upgrade the requested size" to kmem_lp_arena quantum. If
1257 * it was not able to satisfy the upgraded request it then calls regular
1258 * segkmem_alloc() that satisfies the request by importing from "*vmp" arena
1259 */
1260 /*ARGSUSED*/
1261 void *
segkmem_alloc_lp(vmem_t * vmp,size_t * sizep,size_t align,int vmflag)1262 segkmem_alloc_lp(vmem_t *vmp, size_t *sizep, size_t align, int vmflag)
1263 {
1264 size_t size;
1265 kthread_t *t = curthread;
1266 segkmem_lpcb_t *lpcb = &segkmem_lpcb;
1267
1268 ASSERT(sizep != NULL);
1269
1270 size = *sizep;
1271
1272 if (lpcb->lp_uselp && !(t->t_flag & T_PANIC) &&
1273 !(vmflag & SEGKMEM_SHARELOCKED)) {
1274
1275 size_t kmemlp_qnt = segkmem_kmemlp_quantum;
1276 size_t asize = P2ROUNDUP(size, kmemlp_qnt);
1277 void *addr = NULL;
1278 ulong_t *lpthrtp = &lpcb->lp_throttle;
1279 ulong_t lpthrt = *lpthrtp;
1280 int dowakeup = 0;
1281 int doalloc = 1;
1282
1283 ASSERT(kmem_lp_arena != NULL);
1284 ASSERT(asize >= size);
1285
1286 if (lpthrt != 0) {
1287 /* try to update the throttle value */
1288 lpthrt = atomic_inc_ulong_nv(lpthrtp);
1289 if (lpthrt >= segkmem_lpthrottle_max) {
1290 lpthrt = atomic_cas_ulong(lpthrtp, lpthrt,
1291 segkmem_lpthrottle_max / 4);
1292 }
1293
1294 /*
1295 * when we get above throttle start do an exponential
1296 * backoff at trying large pages and reaping
1297 */
1298 if (lpthrt > segkmem_lpthrottle_start &&
1299 !ISP2(lpthrt)) {
1300 lpcb->allocs_throttled++;
1301 lpthrt--;
1302 if (ISP2(lpthrt))
1303 kmem_reap();
1304 return (segkmem_alloc(vmp, size, vmflag));
1305 }
1306 }
1307
1308 if (!(vmflag & VM_NOSLEEP) &&
1309 segkmem_heaplp_quantum >= (8 * kmemlp_qnt) &&
1310 vmem_size(kmem_lp_arena, VMEM_FREE) <= kmemlp_qnt &&
1311 asize < (segkmem_heaplp_quantum - kmemlp_qnt)) {
1312
1313 /*
1314 * we are low on free memory in kmem_lp_arena
1315 * we let only one guy to allocate heap_lp
1316 * quantum size chunk that everybody is going to
1317 * share
1318 */
1319 mutex_enter(&lpcb->lp_lock);
1320
1321 if (lpcb->lp_wait) {
1322
1323 /* we are not the first one - wait */
1324 cv_wait(&lpcb->lp_cv, &lpcb->lp_lock);
1325 if (vmem_size(kmem_lp_arena, VMEM_FREE) <
1326 kmemlp_qnt) {
1327 doalloc = 0;
1328 }
1329 } else if (vmem_size(kmem_lp_arena, VMEM_FREE) <=
1330 kmemlp_qnt) {
1331
1332 /*
1333 * we are the first one, make sure we import
1334 * a large page
1335 */
1336 if (asize == kmemlp_qnt)
1337 asize += kmemlp_qnt;
1338 dowakeup = 1;
1339 lpcb->lp_wait = 1;
1340 }
1341
1342 mutex_exit(&lpcb->lp_lock);
1343 }
1344
1345 /*
1346 * VM_ABORT flag prevents sleeps in vmem_xalloc when
1347 * large pages are not available. In that case this allocation
1348 * attempt will fail and we will retry allocation with small
1349 * pages. We also do not want to panic if this allocation fails
1350 * because we are going to retry.
1351 */
1352 if (doalloc) {
1353 addr = vmem_alloc(kmem_lp_arena, asize,
1354 (vmflag | VM_ABORT) & ~VM_PANIC);
1355
1356 if (dowakeup) {
1357 mutex_enter(&lpcb->lp_lock);
1358 ASSERT(lpcb->lp_wait != 0);
1359 lpcb->lp_wait = 0;
1360 cv_broadcast(&lpcb->lp_cv);
1361 mutex_exit(&lpcb->lp_lock);
1362 }
1363 }
1364
1365 if (addr != NULL) {
1366 *sizep = asize;
1367 *lpthrtp = 0;
1368 return (addr);
1369 }
1370
1371 if (vmflag & VM_NOSLEEP)
1372 lpcb->nosleep_allocs_failed++;
1373 else
1374 lpcb->sleep_allocs_failed++;
1375 lpcb->alloc_bytes_failed += size;
1376
1377 /* if large page throttling is not started yet do it */
1378 if (segkmem_use_lpthrottle && lpthrt == 0) {
1379 lpthrt = atomic_cas_ulong(lpthrtp, lpthrt, 1);
1380 }
1381 }
1382 return (segkmem_alloc(vmp, size, vmflag));
1383 }
1384
1385 void
segkmem_free_lp(vmem_t * vmp,void * inaddr,size_t size)1386 segkmem_free_lp(vmem_t *vmp, void *inaddr, size_t size)
1387 {
1388 if (kmem_lp_arena == NULL || !IS_KMEM_VA_LARGEPAGE((caddr_t)inaddr)) {
1389 segkmem_free(vmp, inaddr, size);
1390 } else {
1391 vmem_free(kmem_lp_arena, inaddr, size);
1392 }
1393 }
1394
1395 #ifdef __sparc
1396 /*
1397 * segkmem_alloc_lpi() imports virtual memory from large page heap arena
1398 * into kmem_lp arena. In the process it maps the imported segment with
1399 * large pages
1400 */
1401 static void *
segkmem_alloc_lpi(vmem_t * vmp,size_t size,int vmflag)1402 segkmem_alloc_lpi(vmem_t *vmp, size_t size, int vmflag)
1403 {
1404 segkmem_lpcb_t *lpcb = &segkmem_lpcb;
1405 void *addr;
1406
1407 ASSERT(size != 0);
1408 ASSERT(vmp == heap_lp_arena);
1409
1410 /* do not allow large page heap grow beyound limits */
1411 if (vmem_size(vmp, VMEM_ALLOC) >= segkmem_kmemlp_max) {
1412 lpcb->allocs_limited++;
1413 return (NULL);
1414 }
1415
1416 addr = segkmem_xalloc_lp(vmp, NULL, size, vmflag, 0,
1417 segkmem_page_create_large, NULL);
1418 return (addr);
1419 }
1420
1421 /*
1422 * segkmem_free_lpi() returns virtual memory back into large page heap arena
1423 * from kmem_lp arena. Beore doing this it unmaps the segment and frees
1424 * large pages used to map it.
1425 */
1426 static void
segkmem_free_lpi(vmem_t * vmp,void * inaddr,size_t size)1427 segkmem_free_lpi(vmem_t *vmp, void *inaddr, size_t size)
1428 {
1429 pgcnt_t nlpages = size >> segkmem_lpshift;
1430 size_t lpsize = segkmem_lpsize;
1431 caddr_t addr = inaddr;
1432 pgcnt_t npages = btopr(size);
1433 int i;
1434
1435 ASSERT(vmp == heap_lp_arena);
1436 ASSERT(IS_KMEM_VA_LARGEPAGE(addr));
1437 ASSERT(((uintptr_t)inaddr & (lpsize - 1)) == 0);
1438
1439 for (i = 0; i < nlpages; i++) {
1440 segkmem_free_one_lp(addr, lpsize);
1441 addr += lpsize;
1442 }
1443
1444 page_unresv(npages);
1445
1446 vmem_free(vmp, inaddr, size);
1447 }
1448 #endif /* __sparc */
1449
1450 /*
1451 * This function is called at system boot time by kmem_init right after
1452 * /etc/system file has been read. It checks based on hardware configuration
1453 * and /etc/system settings if system is going to use large pages. The
1454 * initialiazation necessary to actually start using large pages
1455 * happens later in the process after segkmem_heap_lp_init() is called.
1456 */
1457 int
segkmem_lpsetup()1458 segkmem_lpsetup()
1459 {
1460 int use_large_pages = 0;
1461
1462 #ifdef __sparc
1463
1464 size_t memtotal = physmem * PAGESIZE;
1465
1466 if (heap_lp_base == NULL) {
1467 segkmem_lpsize = PAGESIZE;
1468 return (0);
1469 }
1470
1471 /* get a platform dependent value of large page size for kernel heap */
1472 segkmem_lpsize = get_segkmem_lpsize(segkmem_lpsize);
1473
1474 if (segkmem_lpsize <= PAGESIZE) {
1475 /*
1476 * put virtual space reserved for the large page kernel
1477 * back to the regular heap
1478 */
1479 vmem_xfree(heap_arena, heap_lp_base,
1480 heap_lp_end - heap_lp_base);
1481 heap_lp_base = NULL;
1482 heap_lp_end = NULL;
1483 segkmem_lpsize = PAGESIZE;
1484 return (0);
1485 }
1486
1487 /* set heap_lp quantum if necessary */
1488 if (segkmem_heaplp_quantum == 0 || !ISP2(segkmem_heaplp_quantum) ||
1489 P2PHASE(segkmem_heaplp_quantum, segkmem_lpsize)) {
1490 segkmem_heaplp_quantum = segkmem_lpsize;
1491 }
1492
1493 /* set kmem_lp quantum if necessary */
1494 if (segkmem_kmemlp_quantum == 0 || !ISP2(segkmem_kmemlp_quantum) ||
1495 segkmem_kmemlp_quantum > segkmem_heaplp_quantum) {
1496 segkmem_kmemlp_quantum = segkmem_heaplp_quantum;
1497 }
1498
1499 /* set total amount of memory allowed for large page kernel heap */
1500 if (segkmem_kmemlp_max == 0) {
1501 if (segkmem_kmemlp_pcnt == 0 || segkmem_kmemlp_pcnt > 100)
1502 segkmem_kmemlp_pcnt = 12;
1503 segkmem_kmemlp_max = (memtotal * segkmem_kmemlp_pcnt) / 100;
1504 }
1505 segkmem_kmemlp_max = P2ROUNDUP(segkmem_kmemlp_max,
1506 segkmem_heaplp_quantum);
1507
1508 /* fix lp kmem preallocation request if necesssary */
1509 if (segkmem_kmemlp_min) {
1510 segkmem_kmemlp_min = P2ROUNDUP(segkmem_kmemlp_min,
1511 segkmem_heaplp_quantum);
1512 if (segkmem_kmemlp_min > segkmem_kmemlp_max)
1513 segkmem_kmemlp_min = segkmem_kmemlp_max;
1514 }
1515
1516 use_large_pages = 1;
1517 segkmem_lpszc = page_szc(segkmem_lpsize);
1518 segkmem_lpshift = page_get_shift(segkmem_lpszc);
1519
1520 #endif
1521 return (use_large_pages);
1522 }
1523
1524 void
segkmem_zio_init(void * zio_mem_base,size_t zio_mem_size)1525 segkmem_zio_init(void *zio_mem_base, size_t zio_mem_size)
1526 {
1527 ASSERT(zio_mem_base != NULL);
1528 ASSERT(zio_mem_size != 0);
1529
1530 /*
1531 * To reduce VA space fragmentation, we set up quantum caches for the
1532 * smaller sizes; we chose 32k because that translates to 128k VA
1533 * slabs, which matches nicely with the common 128k zio_data bufs.
1534 */
1535 zio_arena = vmem_create("zfs_file_data", zio_mem_base, zio_mem_size,
1536 PAGESIZE, NULL, NULL, NULL, 32 * 1024, VM_SLEEP);
1537
1538 zio_alloc_arena = vmem_create("zfs_file_data_buf", NULL, 0, PAGESIZE,
1539 segkmem_zio_alloc, segkmem_zio_free, zio_arena, 0, VM_SLEEP);
1540
1541 ASSERT(zio_arena != NULL);
1542 ASSERT(zio_alloc_arena != NULL);
1543 }
1544
1545 #if defined(__amd64)
1546
1547 void
segkmem_kvmm_init(void * base,size_t size)1548 segkmem_kvmm_init(void *base, size_t size)
1549 {
1550 ASSERT(base != NULL);
1551 ASSERT(size != 0);
1552
1553 kvmm_arena = vmem_create("kvmm_arena", base, size, 1024 * 1024,
1554 NULL, NULL, NULL, 0, VM_SLEEP);
1555
1556 ASSERT(kvmm_arena != NULL);
1557 }
1558
1559 #elif defined(__sparc)
1560
1561 static void *
segkmem_alloc_ppa(vmem_t * vmp,size_t size,int vmflag)1562 segkmem_alloc_ppa(vmem_t *vmp, size_t size, int vmflag)
1563 {
1564 size_t ppaquantum = btopr(segkmem_lpsize) * sizeof (page_t *);
1565 void *addr;
1566
1567 if (ppaquantum <= PAGESIZE)
1568 return (segkmem_alloc(vmp, size, vmflag));
1569
1570 ASSERT((size & (ppaquantum - 1)) == 0);
1571
1572 addr = vmem_xalloc(vmp, size, ppaquantum, 0, 0, NULL, NULL, vmflag);
1573 if (addr != NULL && segkmem_xalloc(vmp, addr, size, vmflag, 0,
1574 segkmem_page_create, NULL) == NULL) {
1575 vmem_xfree(vmp, addr, size);
1576 addr = NULL;
1577 }
1578
1579 return (addr);
1580 }
1581
1582 static void
segkmem_free_ppa(vmem_t * vmp,void * addr,size_t size)1583 segkmem_free_ppa(vmem_t *vmp, void *addr, size_t size)
1584 {
1585 size_t ppaquantum = btopr(segkmem_lpsize) * sizeof (page_t *);
1586
1587 ASSERT(addr != NULL);
1588
1589 if (ppaquantum <= PAGESIZE) {
1590 segkmem_free(vmp, addr, size);
1591 } else {
1592 segkmem_free(NULL, addr, size);
1593 vmem_xfree(vmp, addr, size);
1594 }
1595 }
1596
1597 void
segkmem_heap_lp_init()1598 segkmem_heap_lp_init()
1599 {
1600 segkmem_lpcb_t *lpcb = &segkmem_lpcb;
1601 size_t heap_lp_size = heap_lp_end - heap_lp_base;
1602 size_t lpsize = segkmem_lpsize;
1603 size_t ppaquantum;
1604 void *addr;
1605
1606 if (segkmem_lpsize <= PAGESIZE) {
1607 ASSERT(heap_lp_base == NULL);
1608 ASSERT(heap_lp_end == NULL);
1609 return;
1610 }
1611
1612 ASSERT(segkmem_heaplp_quantum >= lpsize);
1613 ASSERT((segkmem_heaplp_quantum & (lpsize - 1)) == 0);
1614 ASSERT(lpcb->lp_uselp == 0);
1615 ASSERT(heap_lp_base != NULL);
1616 ASSERT(heap_lp_end != NULL);
1617 ASSERT(heap_lp_base < heap_lp_end);
1618 ASSERT(heap_lp_arena == NULL);
1619 ASSERT(((uintptr_t)heap_lp_base & (lpsize - 1)) == 0);
1620 ASSERT(((uintptr_t)heap_lp_end & (lpsize - 1)) == 0);
1621
1622 /* create large page heap arena */
1623 heap_lp_arena = vmem_create("heap_lp", heap_lp_base, heap_lp_size,
1624 segkmem_heaplp_quantum, NULL, NULL, NULL, 0, VM_SLEEP);
1625
1626 ASSERT(heap_lp_arena != NULL);
1627
1628 /* This arena caches memory already mapped by large pages */
1629 kmem_lp_arena = vmem_create("kmem_lp", NULL, 0, segkmem_kmemlp_quantum,
1630 segkmem_alloc_lpi, segkmem_free_lpi, heap_lp_arena, 0, VM_SLEEP);
1631
1632 ASSERT(kmem_lp_arena != NULL);
1633
1634 mutex_init(&lpcb->lp_lock, NULL, MUTEX_DEFAULT, NULL);
1635 cv_init(&lpcb->lp_cv, NULL, CV_DEFAULT, NULL);
1636
1637 /*
1638 * this arena is used for the array of page_t pointers necessary
1639 * to call hat_mem_load_array
1640 */
1641 ppaquantum = btopr(lpsize) * sizeof (page_t *);
1642 segkmem_ppa_arena = vmem_create("segkmem_ppa", NULL, 0, ppaquantum,
1643 segkmem_alloc_ppa, segkmem_free_ppa, heap_arena, ppaquantum,
1644 VM_SLEEP);
1645
1646 ASSERT(segkmem_ppa_arena != NULL);
1647
1648 /* prealloacate some memory for the lp kernel heap */
1649 if (segkmem_kmemlp_min) {
1650
1651 ASSERT(P2PHASE(segkmem_kmemlp_min,
1652 segkmem_heaplp_quantum) == 0);
1653
1654 if ((addr = segkmem_alloc_lpi(heap_lp_arena,
1655 segkmem_kmemlp_min, VM_SLEEP)) != NULL) {
1656
1657 addr = vmem_add(kmem_lp_arena, addr,
1658 segkmem_kmemlp_min, VM_SLEEP);
1659 ASSERT(addr != NULL);
1660 }
1661 }
1662
1663 lpcb->lp_uselp = 1;
1664 }
1665
1666 #endif
1667