xref: /freebsd/sys/i386/i386/pmap_base.c (revision ef9017aa174db96ee741b936b984f2b5d61dff9f)
1 /*-
2  * SPDX-License-Identifier: BSD-4-Clause
3  *
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  * Copyright (c) 1994 John S. Dyson
7  * All rights reserved.
8  * Copyright (c) 1994 David Greenman
9  * All rights reserved.
10  * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu>
11  * All rights reserved.
12  *
13  * This code is derived from software contributed to Berkeley by
14  * the Systems Programming Group of the University of Utah Computer
15  * Science Department and William Jolitz of UUNET Technologies Inc.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions
19  * are met:
20  * 1. Redistributions of source code must retain the above copyright
21  *    notice, this list of conditions and the following disclaimer.
22  * 2. Redistributions in binary form must reproduce the above copyright
23  *    notice, this list of conditions and the following disclaimer in the
24  *    documentation and/or other materials provided with the distribution.
25  * 3. All advertising materials mentioning features or use of this software
26  *    must display the following acknowledgement:
27  *	This product includes software developed by the University of
28  *	California, Berkeley and its contributors.
29  * 4. Neither the name of the University nor the names of its contributors
30  *    may be used to endorse or promote products derived from this software
31  *    without specific prior written permission.
32  *
33  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
34  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
37  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
38  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
39  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
41  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
42  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
43  * SUCH DAMAGE.
44  */
45 /*-
46  * Copyright (c) 2003 Networks Associates Technology, Inc.
47  * All rights reserved.
48  * Copyright (c) 2018 The FreeBSD Foundation
49  * All rights reserved.
50  *
51  * This software was developed for the FreeBSD Project by Jake Burkholder,
52  * Safeport Network Services, and Network Associates Laboratories, the
53  * Security Research Division of Network Associates, Inc. under
54  * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
55  * CHATS research program.
56  *
57  * Portions of this software were developed by
58  * Konstantin Belousov <kib@FreeBSD.org> under sponsorship from
59  * the FreeBSD Foundation.
60  *
61  * Redistribution and use in source and binary forms, with or without
62  * modification, are permitted provided that the following conditions
63  * are met:
64  * 1. Redistributions of source code must retain the above copyright
65  *    notice, this list of conditions and the following disclaimer.
66  * 2. Redistributions in binary form must reproduce the above copyright
67  *    notice, this list of conditions and the following disclaimer in the
68  *    documentation and/or other materials provided with the distribution.
69  *
70  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
71  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
72  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
73  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
74  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
75  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
76  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
77  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
78  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
79  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
80  * SUCH DAMAGE.
81  */
82 
83 #include <sys/cdefs.h>
84 #include "opt_apic.h"
85 #include "opt_cpu.h"
86 #include "opt_pmap.h"
87 #include "opt_smp.h"
88 #include "opt_vm.h"
89 
90 #include <sys/param.h>
91 #include <sys/systm.h>
92 #include <sys/kernel.h>
93 #include <sys/vmmeter.h>
94 #include <sys/sysctl.h>
95 #include <machine/bootinfo.h>
96 #include <machine/cpu.h>
97 #include <machine/cputypes.h>
98 #include <machine/md_var.h>
99 #ifdef DEV_APIC
100 #include <sys/bus.h>
101 #include <machine/intr_machdep.h>
102 #include <x86/apicvar.h>
103 #endif
104 #include <x86/ifunc.h>
105 
106 static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
107     "VM/pmap parameters");
108 
109 #include <machine/vmparam.h>
110 #include <vm/vm.h>
111 #include <vm/vm_page.h>
112 #include <vm/vm_param.h>
113 #include <vm/pmap.h>
114 #include <machine/pmap_base.h>
115 
116 vm_offset_t virtual_avail;	/* VA of first avail page (after kernel bss) */
117 vm_offset_t virtual_end;	/* VA of last avail page (end of kernel AS) */
118 
119 int unmapped_buf_allowed = 1;
120 
121 int pti;
122 
123 u_long physfree;	/* phys addr of next free page */
124 u_long vm86phystk;	/* PA of vm86/bios stack */
125 u_long vm86paddr;	/* address of vm86 region */
126 int vm86pa;		/* phys addr of vm86 region */
127 u_long KERNend;		/* phys addr end of kernel (just after bss) */
128 u_long KPTphys;		/* phys addr of kernel page tables */
129 caddr_t ptvmmap = 0;
130 vm_offset_t kernel_vm_end;
131 
132 int i386_pmap_VM_NFREEORDER;
133 int i386_pmap_VM_LEVEL_0_ORDER;
134 int i386_pmap_PDRSHIFT;
135 
136 int pat_works = 1;
137 SYSCTL_INT(_vm_pmap, OID_AUTO, pat_works, CTLFLAG_RD,
138     &pat_works, 0,
139     "Is page attribute table fully functional?");
140 
141 int pg_ps_enabled = 1;
142 SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
143     &pg_ps_enabled, 0,
144     "Are large page mappings enabled?");
145 
146 int pv_entry_max = 0;
147 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD,
148     &pv_entry_max, 0,
149     "Max number of PV entries");
150 
151 int pv_entry_count = 0;
152 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD,
153     &pv_entry_count, 0,
154     "Current number of pv entries");
155 
156 #ifndef PMAP_SHPGPERPROC
157 #define PMAP_SHPGPERPROC 200
158 #endif
159 
160 int shpgperproc = PMAP_SHPGPERPROC;
161 SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD,
162     &shpgperproc, 0,
163     "Page share factor per proc");
164 
165 static SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
166     "2/4MB page mapping counters");
167 
168 u_long pmap_pde_demotions;
169 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, demotions, CTLFLAG_RD,
170     &pmap_pde_demotions, 0,
171     "2/4MB page demotions");
172 
173 u_long pmap_pde_mappings;
174 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD,
175     &pmap_pde_mappings, 0,
176     "2/4MB page mappings");
177 
178 u_long pmap_pde_p_failures;
179 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, p_failures, CTLFLAG_RD,
180     &pmap_pde_p_failures, 0,
181     "2/4MB page promotion failures");
182 
183 u_long pmap_pde_promotions;
184 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, promotions, CTLFLAG_RD,
185     &pmap_pde_promotions, 0,
186     "2/4MB page promotions");
187 
188 #ifdef SMP
189 int PMAP1changedcpu;
190 SYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD,
191     &PMAP1changedcpu, 0,
192     "Number of times pmap_pte_quick changed CPU with same PMAP1");
193 #endif
194 
195 int PMAP1changed;
196 SYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD,
197     &PMAP1changed, 0,
198     "Number of times pmap_pte_quick changed PMAP1");
199 int PMAP1unchanged;
200 SYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD,
201     &PMAP1unchanged, 0,
202     "Number of times pmap_pte_quick didn't change PMAP1");
203 
204 static int
kvm_size(SYSCTL_HANDLER_ARGS)205 kvm_size(SYSCTL_HANDLER_ARGS)
206 {
207 	unsigned long ksize;
208 
209 	ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE;
210 	return (sysctl_handle_long(oidp, &ksize, 0, req));
211 }
212 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
213     0, 0, kvm_size, "IU",
214     "Size of KVM");
215 
216 static int
kvm_free(SYSCTL_HANDLER_ARGS)217 kvm_free(SYSCTL_HANDLER_ARGS)
218 {
219 	unsigned long kfree;
220 
221 	kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
222 	return (sysctl_handle_long(oidp, &kfree, 0, req));
223 }
224 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
225     0, 0, kvm_free, "IU",
226     "Amount of KVM free");
227 
228 #ifdef PV_STATS
229 int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
230 long pv_entry_frees, pv_entry_allocs;
231 int pv_entry_spare;
232 
233 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD,
234     &pc_chunk_count, 0,
235     "Current number of pv entry chunks");
236 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD,
237     &pc_chunk_allocs, 0,
238     "Current number of pv entry chunks allocated");
239 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD,
240     &pc_chunk_frees, 0,
241     "Current number of pv entry chunks frees");
242 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD,
243     &pc_chunk_tryfail, 0,
244     "Number of times tried to get a chunk page but failed.");
245 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD,
246     &pv_entry_frees, 0,
247     "Current number of pv entry frees");
248 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD,
249     &pv_entry_allocs, 0,
250     "Current number of pv entry allocs");
251 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD,
252     &pv_entry_spare, 0,
253     "Current number of spare pv entries");
254 #endif
255 
256 static int pmap_growkernel_panic = 0;
257 SYSCTL_INT(_vm_pmap, OID_AUTO, growkernel_panic, CTLFLAG_RDTUN,
258     &pmap_growkernel_panic, 0,
259     "panic on failure to allocate kernel page table page");
260 
261 struct pmap kernel_pmap_store;
262 static struct pmap_methods *pmap_methods_ptr;
263 
264 static int
sysctl_kmaps(SYSCTL_HANDLER_ARGS)265 sysctl_kmaps(SYSCTL_HANDLER_ARGS)
266 {
267 	return (pmap_methods_ptr->pm_sysctl_kmaps(oidp, arg1, arg2, req));
268 }
269 SYSCTL_OID(_vm_pmap, OID_AUTO, kernel_maps,
270     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_SKIP,
271     NULL, 0, sysctl_kmaps, "A",
272     "Dump kernel address layout");
273 
274 /*
275  * Initialize a vm_page's machine-dependent fields.
276  */
277 void
pmap_page_init(vm_page_t m)278 pmap_page_init(vm_page_t m)
279 {
280 
281 	TAILQ_INIT(&m->md.pv_list);
282 	m->md.pat_mode = PAT_WRITE_BACK;
283 }
284 
285 void
invltlb_glob(void)286 invltlb_glob(void)
287 {
288 
289 	invltlb();
290 }
291 
292 static void pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva,
293     vm_offset_t eva);
294 static void pmap_invalidate_cache_range_all(vm_offset_t sva,
295     vm_offset_t eva);
296 
297 void
pmap_flush_page(vm_page_t m)298 pmap_flush_page(vm_page_t m)
299 {
300 
301 	pmap_methods_ptr->pm_flush_page(m);
302 }
303 
304 DEFINE_IFUNC(, void, pmap_invalidate_cache_range, (vm_offset_t, vm_offset_t))
305 {
306 
307 	if ((cpu_feature & CPUID_SS) != 0)
308 		return (pmap_invalidate_cache_range_selfsnoop);
309 	if ((cpu_feature & CPUID_CLFSH) != 0)
310 		return (pmap_force_invalidate_cache_range);
311 	return (pmap_invalidate_cache_range_all);
312 }
313 
314 #define	PMAP_CLFLUSH_THRESHOLD	(2 * 1024 * 1024)
315 
316 static void
pmap_invalidate_cache_range_check_align(vm_offset_t sva,vm_offset_t eva)317 pmap_invalidate_cache_range_check_align(vm_offset_t sva, vm_offset_t eva)
318 {
319 
320 	KASSERT((sva & PAGE_MASK) == 0,
321 	    ("pmap_invalidate_cache_range: sva not page-aligned"));
322 	KASSERT((eva & PAGE_MASK) == 0,
323 	    ("pmap_invalidate_cache_range: eva not page-aligned"));
324 }
325 
326 static void
pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva,vm_offset_t eva)327 pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva, vm_offset_t eva)
328 {
329 
330 	pmap_invalidate_cache_range_check_align(sva, eva);
331 }
332 
333 void
pmap_force_invalidate_cache_range(vm_offset_t sva,vm_offset_t eva)334 pmap_force_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
335 {
336 
337 	sva &= ~(vm_offset_t)(cpu_clflush_line_size - 1);
338 	if (eva - sva >= PMAP_CLFLUSH_THRESHOLD) {
339 		/*
340 		 * The supplied range is bigger than 2MB.
341 		 * Globally invalidate cache.
342 		 */
343 		pmap_invalidate_cache();
344 		return;
345 	}
346 
347 #ifdef DEV_APIC
348 	/*
349 	 * XXX: Some CPUs fault, hang, or trash the local APIC
350 	 * registers if we use CLFLUSH on the local APIC
351 	 * range.  The local APIC is always uncached, so we
352 	 * don't need to flush for that range anyway.
353 	 */
354 	if (pmap_kextract(sva) == lapic_paddr)
355 		return;
356 #endif
357 
358 	if ((cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0) {
359 		/*
360 		 * Do per-cache line flush.  Use the sfence
361 		 * instruction to insure that previous stores are
362 		 * included in the write-back.  The processor
363 		 * propagates flush to other processors in the cache
364 		 * coherence domain.
365 		 */
366 		sfence();
367 		for (; sva < eva; sva += cpu_clflush_line_size)
368 			clflushopt(sva);
369 		sfence();
370 	} else {
371 		/*
372 		 * Writes are ordered by CLFLUSH on Intel CPUs.
373 		 */
374 		if (cpu_vendor_id != CPU_VENDOR_INTEL)
375 			mfence();
376 		for (; sva < eva; sva += cpu_clflush_line_size)
377 			clflush(sva);
378 		if (cpu_vendor_id != CPU_VENDOR_INTEL)
379 			mfence();
380 	}
381 }
382 
383 static void
pmap_invalidate_cache_range_all(vm_offset_t sva,vm_offset_t eva)384 pmap_invalidate_cache_range_all(vm_offset_t sva, vm_offset_t eva)
385 {
386 
387 	pmap_invalidate_cache_range_check_align(sva, eva);
388 	pmap_invalidate_cache();
389 }
390 
391 void
pmap_invalidate_cache_pages(vm_page_t * pages,int count)392 pmap_invalidate_cache_pages(vm_page_t *pages, int count)
393 {
394 	int i;
395 
396 	if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE ||
397 	    (cpu_feature & CPUID_CLFSH) == 0) {
398 		pmap_invalidate_cache();
399 	} else {
400 		for (i = 0; i < count; i++)
401 			pmap_flush_page(pages[i]);
402 	}
403 }
404 
405 void
pmap_ksetrw(vm_offset_t va)406 pmap_ksetrw(vm_offset_t va)
407 {
408 
409 	pmap_methods_ptr->pm_ksetrw(va);
410 }
411 
412 void
pmap_remap_lower(bool enable)413 pmap_remap_lower(bool enable)
414 {
415 
416 	pmap_methods_ptr->pm_remap_lower(enable);
417 }
418 
419 void
pmap_remap_lowptdi(bool enable)420 pmap_remap_lowptdi(bool enable)
421 {
422 
423 	pmap_methods_ptr->pm_remap_lowptdi(enable);
424 }
425 
426 void
pmap_align_superpage(vm_object_t object,vm_ooffset_t offset,vm_offset_t * addr,vm_size_t size)427 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
428     vm_offset_t *addr, vm_size_t size)
429 {
430 
431 	return (pmap_methods_ptr->pm_align_superpage(object, offset,
432 	    addr, size));
433 }
434 
435 vm_offset_t
pmap_quick_enter_page(vm_page_t m)436 pmap_quick_enter_page(vm_page_t m)
437 {
438 
439 	return (pmap_methods_ptr->pm_quick_enter_page(m));
440 }
441 
442 void
pmap_quick_remove_page(vm_offset_t addr)443 pmap_quick_remove_page(vm_offset_t addr)
444 {
445 
446 	return (pmap_methods_ptr->pm_quick_remove_page(addr));
447 }
448 
449 void *
pmap_trm_alloc(size_t size,int flags)450 pmap_trm_alloc(size_t size, int flags)
451 {
452 
453 	return (pmap_methods_ptr->pm_trm_alloc(size, flags));
454 }
455 
456 void
pmap_trm_free(void * addr,size_t size)457 pmap_trm_free(void *addr, size_t size)
458 {
459 
460 	pmap_methods_ptr->pm_trm_free(addr, size);
461 }
462 
463 void
pmap_sync_icache(pmap_t pm,vm_offset_t va,vm_size_t sz)464 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
465 {
466 }
467 
468 vm_offset_t
pmap_get_map_low(void)469 pmap_get_map_low(void)
470 {
471 
472 	return (pmap_methods_ptr->pm_get_map_low());
473 }
474 
475 vm_offset_t
pmap_get_vm_maxuser_address(void)476 pmap_get_vm_maxuser_address(void)
477 {
478 
479 	return (pmap_methods_ptr->pm_get_vm_maxuser_address());
480 }
481 
482 vm_paddr_t
pmap_kextract(vm_offset_t va)483 pmap_kextract(vm_offset_t va)
484 {
485 
486 	return (pmap_methods_ptr->pm_kextract(va));
487 }
488 
489 vm_paddr_t
pmap_pg_frame(vm_paddr_t pa)490 pmap_pg_frame(vm_paddr_t pa)
491 {
492 
493 	return (pmap_methods_ptr->pm_pg_frame(pa));
494 }
495 
496 void
pmap_sf_buf_map(struct sf_buf * sf)497 pmap_sf_buf_map(struct sf_buf *sf)
498 {
499 
500 	pmap_methods_ptr->pm_sf_buf_map(sf);
501 }
502 
503 void
pmap_cp_slow0_map(vm_offset_t kaddr,int plen,vm_page_t * ma)504 pmap_cp_slow0_map(vm_offset_t kaddr, int plen, vm_page_t *ma)
505 {
506 
507 	pmap_methods_ptr->pm_cp_slow0_map(kaddr, plen, ma);
508 }
509 
510 u_int
pmap_get_kcr3(void)511 pmap_get_kcr3(void)
512 {
513 
514 	return (pmap_methods_ptr->pm_get_kcr3());
515 }
516 
517 u_int
pmap_get_cr3(pmap_t pmap)518 pmap_get_cr3(pmap_t pmap)
519 {
520 
521 	return (pmap_methods_ptr->pm_get_cr3(pmap));
522 }
523 
524 caddr_t
pmap_cmap3(vm_paddr_t pa,u_int pte_flags)525 pmap_cmap3(vm_paddr_t pa, u_int pte_flags)
526 {
527 
528 	return (pmap_methods_ptr->pm_cmap3(pa, pte_flags));
529 }
530 
531 void
pmap_basemem_setup(u_int basemem)532 pmap_basemem_setup(u_int basemem)
533 {
534 
535 	pmap_methods_ptr->pm_basemem_setup(basemem);
536 }
537 
538 void
pmap_set_nx(void)539 pmap_set_nx(void)
540 {
541 
542 	pmap_methods_ptr->pm_set_nx();
543 }
544 
545 void *
pmap_bios16_enter(void)546 pmap_bios16_enter(void)
547 {
548 
549 	return (pmap_methods_ptr->pm_bios16_enter());
550 }
551 
552 void
pmap_bios16_leave(void * handle)553 pmap_bios16_leave(void *handle)
554 {
555 
556 	pmap_methods_ptr->pm_bios16_leave(handle);
557 }
558 
559 void
pmap_bootstrap(vm_paddr_t firstaddr)560 pmap_bootstrap(vm_paddr_t firstaddr)
561 {
562 
563 	pmap_methods_ptr->pm_bootstrap(firstaddr);
564 }
565 
566 bool
pmap_is_valid_memattr(pmap_t pmap,vm_memattr_t mode)567 pmap_is_valid_memattr(pmap_t pmap, vm_memattr_t mode)
568 {
569 
570 	return (pmap_methods_ptr->pm_is_valid_memattr(pmap, mode));
571 }
572 
573 int
pmap_cache_bits(pmap_t pmap,int mode,bool is_pde)574 pmap_cache_bits(pmap_t pmap, int mode, bool is_pde)
575 {
576 
577 	return (pmap_methods_ptr->pm_cache_bits(pmap, mode, is_pde));
578 }
579 
580 bool
pmap_ps_enabled(pmap_t pmap)581 pmap_ps_enabled(pmap_t pmap)
582 {
583 
584 	return (pmap_methods_ptr->pm_ps_enabled(pmap));
585 }
586 
587 void
pmap_pinit0(pmap_t pmap)588 pmap_pinit0(pmap_t pmap)
589 {
590 
591 	pmap_methods_ptr->pm_pinit0(pmap);
592 }
593 
594 int
pmap_pinit(pmap_t pmap)595 pmap_pinit(pmap_t pmap)
596 {
597 
598 	return (pmap_methods_ptr->pm_pinit(pmap));
599 }
600 
601 void
pmap_activate(struct thread * td)602 pmap_activate(struct thread *td)
603 {
604 
605 	pmap_methods_ptr->pm_activate(td);
606 }
607 
608 void
pmap_activate_boot(pmap_t pmap)609 pmap_activate_boot(pmap_t pmap)
610 {
611 
612 	pmap_methods_ptr->pm_activate_boot(pmap);
613 }
614 
615 void
pmap_advise(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,int advice)616 pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
617 {
618 
619 	pmap_methods_ptr->pm_advise(pmap, sva, eva, advice);
620 }
621 
622 void
pmap_clear_modify(vm_page_t m)623 pmap_clear_modify(vm_page_t m)
624 {
625 
626 	pmap_methods_ptr->pm_clear_modify(m);
627 }
628 
629 int
pmap_change_attr(vm_offset_t va,vm_size_t size,int mode)630 pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
631 {
632 
633 	return (pmap_methods_ptr->pm_change_attr(va, size, mode));
634 }
635 
636 int
pmap_mincore(pmap_t pmap,vm_offset_t addr,vm_paddr_t * pap)637 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
638 {
639 
640 	return (pmap_methods_ptr->pm_mincore(pmap, addr, pap));
641 }
642 
643 void
pmap_copy(pmap_t dst_pmap,pmap_t src_pmap,vm_offset_t dst_addr,vm_size_t len,vm_offset_t src_addr)644 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
645     vm_offset_t src_addr)
646 {
647 
648 	pmap_methods_ptr->pm_copy(dst_pmap, src_pmap, dst_addr, len, src_addr);
649 }
650 
651 void
pmap_copy_page(vm_page_t src,vm_page_t dst)652 pmap_copy_page(vm_page_t src, vm_page_t dst)
653 {
654 
655 	pmap_methods_ptr->pm_copy_page(src, dst);
656 }
657 
658 void
pmap_copy_pages(vm_page_t ma[],vm_offset_t a_offset,vm_page_t mb[],vm_offset_t b_offset,int xfersize)659 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
660     vm_offset_t b_offset, int xfersize)
661 {
662 
663 	pmap_methods_ptr->pm_copy_pages(ma, a_offset, mb, b_offset, xfersize);
664 }
665 
666 void
pmap_zero_page(vm_page_t m)667 pmap_zero_page(vm_page_t m)
668 {
669 
670 	pmap_methods_ptr->pm_zero_page(m);
671 }
672 
673 void
pmap_zero_page_area(vm_page_t m,int off,int size)674 pmap_zero_page_area(vm_page_t m, int off, int size)
675 {
676 
677 	pmap_methods_ptr->pm_zero_page_area(m, off, size);
678 }
679 
680 int
pmap_enter(pmap_t pmap,vm_offset_t va,vm_page_t m,vm_prot_t prot,u_int flags,int8_t psind)681 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
682     u_int flags, int8_t psind)
683 {
684 
685 	return (pmap_methods_ptr->pm_enter(pmap, va, m, prot, flags, psind));
686 }
687 
688 void
pmap_enter_object(pmap_t pmap,vm_offset_t start,vm_offset_t end,vm_page_t m_start,vm_prot_t prot)689 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
690     vm_page_t m_start, vm_prot_t prot)
691 {
692 
693 	pmap_methods_ptr->pm_enter_object(pmap, start, end, m_start, prot);
694 }
695 
696 void
pmap_enter_quick(pmap_t pmap,vm_offset_t va,vm_page_t m,vm_prot_t prot)697 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
698 {
699 
700 	pmap_methods_ptr->pm_enter_quick(pmap, va, m, prot);
701 }
702 
703 void *
pmap_kenter_temporary(vm_paddr_t pa,int i)704 pmap_kenter_temporary(vm_paddr_t pa, int i)
705 {
706 
707 	return (pmap_methods_ptr->pm_kenter_temporary(pa, i));
708 }
709 
710 void
pmap_object_init_pt(pmap_t pmap,vm_offset_t addr,vm_object_t object,vm_pindex_t pindex,vm_size_t size)711 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
712     vm_pindex_t pindex, vm_size_t size)
713 {
714 
715 	pmap_methods_ptr->pm_object_init_pt(pmap, addr, object, pindex, size);
716 }
717 
718 void
pmap_unwire(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)719 pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
720 {
721 
722 	pmap_methods_ptr->pm_unwire(pmap, sva, eva);
723 }
724 
725 bool
pmap_page_exists_quick(pmap_t pmap,vm_page_t m)726 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
727 {
728 
729 	return (pmap_methods_ptr->pm_page_exists_quick(pmap, m));
730 }
731 
732 int
pmap_page_wired_mappings(vm_page_t m)733 pmap_page_wired_mappings(vm_page_t m)
734 {
735 
736 	return (pmap_methods_ptr->pm_page_wired_mappings(m));
737 }
738 
739 bool
pmap_page_is_mapped(vm_page_t m)740 pmap_page_is_mapped(vm_page_t m)
741 {
742 
743 	return (pmap_methods_ptr->pm_page_is_mapped(m));
744 }
745 
746 void
pmap_remove_pages(pmap_t pmap)747 pmap_remove_pages(pmap_t pmap)
748 {
749 
750 	pmap_methods_ptr->pm_remove_pages(pmap);
751 }
752 
753 bool
pmap_is_modified(vm_page_t m)754 pmap_is_modified(vm_page_t m)
755 {
756 
757 	return (pmap_methods_ptr->pm_is_modified(m));
758 }
759 
760 bool
pmap_is_prefaultable(pmap_t pmap,vm_offset_t addr)761 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
762 {
763 
764 	return (pmap_methods_ptr->pm_is_prefaultable(pmap, addr));
765 }
766 
767 bool
pmap_is_referenced(vm_page_t m)768 pmap_is_referenced(vm_page_t m)
769 {
770 
771 	return (pmap_methods_ptr->pm_is_referenced(m));
772 }
773 
774 void
pmap_remove_write(vm_page_t m)775 pmap_remove_write(vm_page_t m)
776 {
777 
778 	pmap_methods_ptr->pm_remove_write(m);
779 }
780 
781 int
pmap_ts_referenced(vm_page_t m)782 pmap_ts_referenced(vm_page_t m)
783 {
784 
785 	return (pmap_methods_ptr->pm_ts_referenced(m));
786 }
787 
788 void *
pmap_mapdev_attr(vm_paddr_t pa,vm_size_t size,int mode)789 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
790 {
791 
792 	return (pmap_methods_ptr->pm_mapdev_attr(pa, size, mode,
793 	    MAPDEV_SETATTR));
794 }
795 
796 void *
pmap_mapdev(vm_paddr_t pa,vm_size_t size)797 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
798 {
799 
800 	return (pmap_methods_ptr->pm_mapdev_attr(pa, size, PAT_UNCACHEABLE,
801 	    MAPDEV_SETATTR));
802 }
803 
804 void *
pmap_mapbios(vm_paddr_t pa,vm_size_t size)805 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
806 {
807 
808 	return (pmap_methods_ptr->pm_mapdev_attr(pa, size, PAT_WRITE_BACK, 0));
809 }
810 
811 void
pmap_unmapdev(void * p,vm_size_t size)812 pmap_unmapdev(void *p, vm_size_t size)
813 {
814 
815 	pmap_methods_ptr->pm_unmapdev(p, size);
816 }
817 
818 void
pmap_page_set_memattr(vm_page_t m,vm_memattr_t ma)819 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
820 {
821 
822 	pmap_methods_ptr->pm_page_set_memattr(m, ma);
823 }
824 
825 vm_paddr_t
pmap_extract(pmap_t pmap,vm_offset_t va)826 pmap_extract(pmap_t pmap, vm_offset_t va)
827 {
828 
829 	return (pmap_methods_ptr->pm_extract(pmap, va));
830 }
831 
832 vm_page_t
pmap_extract_and_hold(pmap_t pmap,vm_offset_t va,vm_prot_t prot)833 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
834 {
835 
836 	return (pmap_methods_ptr->pm_extract_and_hold(pmap, va, prot));
837 }
838 
839 vm_offset_t
pmap_map(vm_offset_t * virt,vm_paddr_t start,vm_paddr_t end,int prot)840 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
841 {
842 
843 	return (pmap_methods_ptr->pm_map(virt, start, end, prot));
844 }
845 
846 void
pmap_qenter(vm_offset_t sva,vm_page_t * ma,int count)847 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
848 {
849 
850 	pmap_methods_ptr->pm_qenter(sva, ma, count);
851 }
852 
853 void
pmap_qremove(vm_offset_t sva,int count)854 pmap_qremove(vm_offset_t sva, int count)
855 {
856 
857 	pmap_methods_ptr->pm_qremove(sva, count);
858 }
859 
860 void
pmap_release(pmap_t pmap)861 pmap_release(pmap_t pmap)
862 {
863 
864 	pmap_methods_ptr->pm_release(pmap);
865 }
866 
867 void
pmap_remove(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)868 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
869 {
870 
871 	pmap_methods_ptr->pm_remove(pmap, sva, eva);
872 }
873 
874 void
pmap_protect(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,vm_prot_t prot)875 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
876 {
877 
878 	pmap_methods_ptr->pm_protect(pmap, sva, eva, prot);
879 }
880 
881 void
pmap_remove_all(vm_page_t m)882 pmap_remove_all(vm_page_t m)
883 {
884 
885 	pmap_methods_ptr->pm_remove_all(m);
886 }
887 
888 void
pmap_init(void)889 pmap_init(void)
890 {
891 
892 	pmap_methods_ptr->pm_init();
893 }
894 
895 void
pmap_init_pat(void)896 pmap_init_pat(void)
897 {
898 
899 	pmap_methods_ptr->pm_init_pat();
900 }
901 
902 int
pmap_growkernel(vm_offset_t addr)903 pmap_growkernel(vm_offset_t addr)
904 {
905 	int rv;
906 
907 	rv = pmap_methods_ptr->pm_growkernel(addr);
908 	if (rv != KERN_SUCCESS && pmap_growkernel_panic)
909 		panic("pmap_growkernel: no memory to grow kernel");
910 	return (rv);
911 }
912 
913 void
pmap_invalidate_page(pmap_t pmap,vm_offset_t va)914 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
915 {
916 
917 	pmap_methods_ptr->pm_invalidate_page(pmap, va);
918 }
919 
920 void
pmap_invalidate_range(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)921 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
922 {
923 
924 	pmap_methods_ptr->pm_invalidate_range(pmap, sva, eva);
925 }
926 
927 void
pmap_invalidate_all(pmap_t pmap)928 pmap_invalidate_all(pmap_t pmap)
929 {
930 
931 	pmap_methods_ptr->pm_invalidate_all(pmap);
932 }
933 
934 void
pmap_invalidate_cache(void)935 pmap_invalidate_cache(void)
936 {
937 
938 	pmap_methods_ptr->pm_invalidate_cache();
939 }
940 
941 void
pmap_kenter(vm_offset_t va,vm_paddr_t pa)942 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
943 {
944 
945 	pmap_methods_ptr->pm_kenter(va, pa);
946 }
947 
948 void
pmap_kremove(vm_offset_t va)949 pmap_kremove(vm_offset_t va)
950 {
951 
952 	pmap_methods_ptr->pm_kremove(va);
953 }
954 
955 void
pmap_active_cpus(pmap_t pmap,cpuset_t * res)956 pmap_active_cpus(pmap_t pmap, cpuset_t *res)
957 {
958 	*res = pmap->pm_active;
959 }
960 
961 extern struct pmap_methods pmap_pae_methods, pmap_nopae_methods;
962 int pae_mode;
963 SYSCTL_INT(_vm_pmap, OID_AUTO, pae_mode, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
964     &pae_mode, 0,
965     "PAE");
966 
967 void
pmap_cold(void)968 pmap_cold(void)
969 {
970 
971 	init_static_kenv((char *)bootinfo.bi_envp, 0);
972 	pae_mode = (cpu_feature & CPUID_PAE) != 0;
973 	if (pae_mode)
974 		TUNABLE_INT_FETCH("vm.pmap.pae_mode", &pae_mode);
975 	if (pae_mode) {
976 		pmap_methods_ptr = &pmap_pae_methods;
977 		pmap_pae_cold();
978 	} else {
979 		pmap_methods_ptr = &pmap_nopae_methods;
980 		pmap_nopae_cold();
981 	}
982 }
983