xref: /freebsd/sys/i386/i386/pmap_base.c (revision bdd1243df58e60e85101c09001d9812a789b6bc4)
1 /*-
2  * SPDX-License-Identifier: BSD-4-Clause
3  *
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  * Copyright (c) 1994 John S. Dyson
7  * All rights reserved.
8  * Copyright (c) 1994 David Greenman
9  * All rights reserved.
10  * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu>
11  * All rights reserved.
12  *
13  * This code is derived from software contributed to Berkeley by
14  * the Systems Programming Group of the University of Utah Computer
15  * Science Department and William Jolitz of UUNET Technologies Inc.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions
19  * are met:
20  * 1. Redistributions of source code must retain the above copyright
21  *    notice, this list of conditions and the following disclaimer.
22  * 2. Redistributions in binary form must reproduce the above copyright
23  *    notice, this list of conditions and the following disclaimer in the
24  *    documentation and/or other materials provided with the distribution.
25  * 3. All advertising materials mentioning features or use of this software
26  *    must display the following acknowledgement:
27  *	This product includes software developed by the University of
28  *	California, Berkeley and its contributors.
29  * 4. Neither the name of the University nor the names of its contributors
30  *    may be used to endorse or promote products derived from this software
31  *    without specific prior written permission.
32  *
33  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
34  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
37  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
38  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
39  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
41  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
42  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
43  * SUCH DAMAGE.
44  *
45  *	from:	@(#)pmap.c	7.7 (Berkeley)	5/12/91
46  */
47 /*-
48  * Copyright (c) 2003 Networks Associates Technology, Inc.
49  * All rights reserved.
50  * Copyright (c) 2018 The FreeBSD Foundation
51  * All rights reserved.
52  *
53  * This software was developed for the FreeBSD Project by Jake Burkholder,
54  * Safeport Network Services, and Network Associates Laboratories, the
55  * Security Research Division of Network Associates, Inc. under
56  * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
57  * CHATS research program.
58  *
59  * Portions of this software were developed by
60  * Konstantin Belousov <kib@FreeBSD.org> under sponsorship from
61  * the FreeBSD Foundation.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  * 1. Redistributions of source code must retain the above copyright
67  *    notice, this list of conditions and the following disclaimer.
68  * 2. Redistributions in binary form must reproduce the above copyright
69  *    notice, this list of conditions and the following disclaimer in the
70  *    documentation and/or other materials provided with the distribution.
71  *
72  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
73  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
74  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
75  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
76  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
77  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
78  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
79  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
80  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
81  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
82  * SUCH DAMAGE.
83  */
84 
85 #include <sys/cdefs.h>
86 __FBSDID("$FreeBSD$");
87 
88 #include "opt_apic.h"
89 #include "opt_cpu.h"
90 #include "opt_pmap.h"
91 #include "opt_smp.h"
92 #include "opt_vm.h"
93 
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/kernel.h>
97 #include <sys/vmmeter.h>
98 #include <sys/sysctl.h>
99 #include <machine/bootinfo.h>
100 #include <machine/cpu.h>
101 #include <machine/cputypes.h>
102 #include <machine/md_var.h>
103 #ifdef DEV_APIC
104 #include <sys/bus.h>
105 #include <machine/intr_machdep.h>
106 #include <x86/apicvar.h>
107 #endif
108 #include <x86/ifunc.h>
109 
110 static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
111     "VM/pmap parameters");
112 
113 #include <machine/vmparam.h>
114 #include <vm/vm.h>
115 #include <vm/vm_page.h>
116 #include <vm/pmap.h>
117 #include <machine/pmap_base.h>
118 
119 vm_offset_t virtual_avail;	/* VA of first avail page (after kernel bss) */
120 vm_offset_t virtual_end;	/* VA of last avail page (end of kernel AS) */
121 
122 int unmapped_buf_allowed = 1;
123 
124 int pti;
125 
126 u_long physfree;	/* phys addr of next free page */
127 u_long vm86phystk;	/* PA of vm86/bios stack */
128 u_long vm86paddr;	/* address of vm86 region */
129 int vm86pa;		/* phys addr of vm86 region */
130 u_long KERNend;		/* phys addr end of kernel (just after bss) */
131 u_long KPTphys;		/* phys addr of kernel page tables */
132 caddr_t ptvmmap = 0;
133 vm_offset_t kernel_vm_end;
134 
135 int i386_pmap_VM_NFREEORDER;
136 int i386_pmap_VM_LEVEL_0_ORDER;
137 int i386_pmap_PDRSHIFT;
138 
139 int pat_works = 1;
140 SYSCTL_INT(_vm_pmap, OID_AUTO, pat_works, CTLFLAG_RD,
141     &pat_works, 0,
142     "Is page attribute table fully functional?");
143 
144 int pg_ps_enabled = 1;
145 SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
146     &pg_ps_enabled, 0,
147     "Are large page mappings enabled?");
148 
149 int pv_entry_max = 0;
150 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD,
151     &pv_entry_max, 0,
152     "Max number of PV entries");
153 
154 int pv_entry_count = 0;
155 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD,
156     &pv_entry_count, 0,
157     "Current number of pv entries");
158 
159 #ifndef PMAP_SHPGPERPROC
160 #define PMAP_SHPGPERPROC 200
161 #endif
162 
163 int shpgperproc = PMAP_SHPGPERPROC;
164 SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD,
165     &shpgperproc, 0,
166     "Page share factor per proc");
167 
168 static SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
169     "2/4MB page mapping counters");
170 
171 u_long pmap_pde_demotions;
172 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, demotions, CTLFLAG_RD,
173     &pmap_pde_demotions, 0,
174     "2/4MB page demotions");
175 
176 u_long pmap_pde_mappings;
177 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD,
178     &pmap_pde_mappings, 0,
179     "2/4MB page mappings");
180 
181 u_long pmap_pde_p_failures;
182 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, p_failures, CTLFLAG_RD,
183     &pmap_pde_p_failures, 0,
184     "2/4MB page promotion failures");
185 
186 u_long pmap_pde_promotions;
187 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, promotions, CTLFLAG_RD,
188     &pmap_pde_promotions, 0,
189     "2/4MB page promotions");
190 
191 #ifdef SMP
192 int PMAP1changedcpu;
193 SYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD,
194     &PMAP1changedcpu, 0,
195     "Number of times pmap_pte_quick changed CPU with same PMAP1");
196 #endif
197 
198 int PMAP1changed;
199 SYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD,
200     &PMAP1changed, 0,
201     "Number of times pmap_pte_quick changed PMAP1");
202 int PMAP1unchanged;
203 SYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD,
204     &PMAP1unchanged, 0,
205     "Number of times pmap_pte_quick didn't change PMAP1");
206 
207 static int
208 kvm_size(SYSCTL_HANDLER_ARGS)
209 {
210 	unsigned long ksize;
211 
212 	ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE;
213 	return (sysctl_handle_long(oidp, &ksize, 0, req));
214 }
215 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
216     0, 0, kvm_size, "IU",
217     "Size of KVM");
218 
219 static int
220 kvm_free(SYSCTL_HANDLER_ARGS)
221 {
222 	unsigned long kfree;
223 
224 	kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
225 	return (sysctl_handle_long(oidp, &kfree, 0, req));
226 }
227 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
228     0, 0, kvm_free, "IU",
229     "Amount of KVM free");
230 
231 #ifdef PV_STATS
232 int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
233 long pv_entry_frees, pv_entry_allocs;
234 int pv_entry_spare;
235 
236 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD,
237     &pc_chunk_count, 0,
238     "Current number of pv entry chunks");
239 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD,
240     &pc_chunk_allocs, 0,
241     "Current number of pv entry chunks allocated");
242 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD,
243     &pc_chunk_frees, 0,
244     "Current number of pv entry chunks frees");
245 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD,
246     &pc_chunk_tryfail, 0,
247     "Number of times tried to get a chunk page but failed.");
248 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD,
249     &pv_entry_frees, 0,
250     "Current number of pv entry frees");
251 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD,
252     &pv_entry_allocs, 0,
253     "Current number of pv entry allocs");
254 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD,
255     &pv_entry_spare, 0,
256     "Current number of spare pv entries");
257 #endif
258 
259 struct pmap kernel_pmap_store;
260 static struct pmap_methods *pmap_methods_ptr;
261 
262 static int
263 sysctl_kmaps(SYSCTL_HANDLER_ARGS)
264 {
265 	return (pmap_methods_ptr->pm_sysctl_kmaps(oidp, arg1, arg2, req));
266 }
267 SYSCTL_OID(_vm_pmap, OID_AUTO, kernel_maps,
268     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_SKIP,
269     NULL, 0, sysctl_kmaps, "A",
270     "Dump kernel address layout");
271 
272 /*
273  * Initialize a vm_page's machine-dependent fields.
274  */
275 void
276 pmap_page_init(vm_page_t m)
277 {
278 
279 	TAILQ_INIT(&m->md.pv_list);
280 	m->md.pat_mode = PAT_WRITE_BACK;
281 }
282 
283 void
284 invltlb_glob(void)
285 {
286 
287 	invltlb();
288 }
289 
290 static void pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva,
291     vm_offset_t eva);
292 static void pmap_invalidate_cache_range_all(vm_offset_t sva,
293     vm_offset_t eva);
294 
295 void
296 pmap_flush_page(vm_page_t m)
297 {
298 
299 	pmap_methods_ptr->pm_flush_page(m);
300 }
301 
302 DEFINE_IFUNC(, void, pmap_invalidate_cache_range, (vm_offset_t, vm_offset_t))
303 {
304 
305 	if ((cpu_feature & CPUID_SS) != 0)
306 		return (pmap_invalidate_cache_range_selfsnoop);
307 	if ((cpu_feature & CPUID_CLFSH) != 0)
308 		return (pmap_force_invalidate_cache_range);
309 	return (pmap_invalidate_cache_range_all);
310 }
311 
312 #define	PMAP_CLFLUSH_THRESHOLD	(2 * 1024 * 1024)
313 
314 static void
315 pmap_invalidate_cache_range_check_align(vm_offset_t sva, vm_offset_t eva)
316 {
317 
318 	KASSERT((sva & PAGE_MASK) == 0,
319 	    ("pmap_invalidate_cache_range: sva not page-aligned"));
320 	KASSERT((eva & PAGE_MASK) == 0,
321 	    ("pmap_invalidate_cache_range: eva not page-aligned"));
322 }
323 
324 static void
325 pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva, vm_offset_t eva)
326 {
327 
328 	pmap_invalidate_cache_range_check_align(sva, eva);
329 }
330 
331 void
332 pmap_force_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
333 {
334 
335 	sva &= ~(vm_offset_t)(cpu_clflush_line_size - 1);
336 	if (eva - sva >= PMAP_CLFLUSH_THRESHOLD) {
337 		/*
338 		 * The supplied range is bigger than 2MB.
339 		 * Globally invalidate cache.
340 		 */
341 		pmap_invalidate_cache();
342 		return;
343 	}
344 
345 #ifdef DEV_APIC
346 	/*
347 	 * XXX: Some CPUs fault, hang, or trash the local APIC
348 	 * registers if we use CLFLUSH on the local APIC
349 	 * range.  The local APIC is always uncached, so we
350 	 * don't need to flush for that range anyway.
351 	 */
352 	if (pmap_kextract(sva) == lapic_paddr)
353 		return;
354 #endif
355 
356 	if ((cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0) {
357 		/*
358 		 * Do per-cache line flush.  Use the sfence
359 		 * instruction to insure that previous stores are
360 		 * included in the write-back.  The processor
361 		 * propagates flush to other processors in the cache
362 		 * coherence domain.
363 		 */
364 		sfence();
365 		for (; sva < eva; sva += cpu_clflush_line_size)
366 			clflushopt(sva);
367 		sfence();
368 	} else {
369 		/*
370 		 * Writes are ordered by CLFLUSH on Intel CPUs.
371 		 */
372 		if (cpu_vendor_id != CPU_VENDOR_INTEL)
373 			mfence();
374 		for (; sva < eva; sva += cpu_clflush_line_size)
375 			clflush(sva);
376 		if (cpu_vendor_id != CPU_VENDOR_INTEL)
377 			mfence();
378 	}
379 }
380 
381 static void
382 pmap_invalidate_cache_range_all(vm_offset_t sva, vm_offset_t eva)
383 {
384 
385 	pmap_invalidate_cache_range_check_align(sva, eva);
386 	pmap_invalidate_cache();
387 }
388 
389 void
390 pmap_invalidate_cache_pages(vm_page_t *pages, int count)
391 {
392 	int i;
393 
394 	if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE ||
395 	    (cpu_feature & CPUID_CLFSH) == 0) {
396 		pmap_invalidate_cache();
397 	} else {
398 		for (i = 0; i < count; i++)
399 			pmap_flush_page(pages[i]);
400 	}
401 }
402 
403 void
404 pmap_ksetrw(vm_offset_t va)
405 {
406 
407 	pmap_methods_ptr->pm_ksetrw(va);
408 }
409 
410 void
411 pmap_remap_lower(bool enable)
412 {
413 
414 	pmap_methods_ptr->pm_remap_lower(enable);
415 }
416 
417 void
418 pmap_remap_lowptdi(bool enable)
419 {
420 
421 	pmap_methods_ptr->pm_remap_lowptdi(enable);
422 }
423 
424 void
425 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
426     vm_offset_t *addr, vm_size_t size)
427 {
428 
429 	return (pmap_methods_ptr->pm_align_superpage(object, offset,
430 	    addr, size));
431 }
432 
433 vm_offset_t
434 pmap_quick_enter_page(vm_page_t m)
435 {
436 
437 	return (pmap_methods_ptr->pm_quick_enter_page(m));
438 }
439 
440 void
441 pmap_quick_remove_page(vm_offset_t addr)
442 {
443 
444 	return (pmap_methods_ptr->pm_quick_remove_page(addr));
445 }
446 
447 void *
448 pmap_trm_alloc(size_t size, int flags)
449 {
450 
451 	return (pmap_methods_ptr->pm_trm_alloc(size, flags));
452 }
453 
454 void
455 pmap_trm_free(void *addr, size_t size)
456 {
457 
458 	pmap_methods_ptr->pm_trm_free(addr, size);
459 }
460 
461 void
462 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
463 {
464 }
465 
466 vm_offset_t
467 pmap_get_map_low(void)
468 {
469 
470 	return (pmap_methods_ptr->pm_get_map_low());
471 }
472 
473 vm_offset_t
474 pmap_get_vm_maxuser_address(void)
475 {
476 
477 	return (pmap_methods_ptr->pm_get_vm_maxuser_address());
478 }
479 
480 vm_paddr_t
481 pmap_kextract(vm_offset_t va)
482 {
483 
484 	return (pmap_methods_ptr->pm_kextract(va));
485 }
486 
487 vm_paddr_t
488 pmap_pg_frame(vm_paddr_t pa)
489 {
490 
491 	return (pmap_methods_ptr->pm_pg_frame(pa));
492 }
493 
494 void
495 pmap_sf_buf_map(struct sf_buf *sf)
496 {
497 
498 	pmap_methods_ptr->pm_sf_buf_map(sf);
499 }
500 
501 void
502 pmap_cp_slow0_map(vm_offset_t kaddr, int plen, vm_page_t *ma)
503 {
504 
505 	pmap_methods_ptr->pm_cp_slow0_map(kaddr, plen, ma);
506 }
507 
508 u_int
509 pmap_get_kcr3(void)
510 {
511 
512 	return (pmap_methods_ptr->pm_get_kcr3());
513 }
514 
515 u_int
516 pmap_get_cr3(pmap_t pmap)
517 {
518 
519 	return (pmap_methods_ptr->pm_get_cr3(pmap));
520 }
521 
522 caddr_t
523 pmap_cmap3(vm_paddr_t pa, u_int pte_flags)
524 {
525 
526 	return (pmap_methods_ptr->pm_cmap3(pa, pte_flags));
527 }
528 
529 void
530 pmap_basemem_setup(u_int basemem)
531 {
532 
533 	pmap_methods_ptr->pm_basemem_setup(basemem);
534 }
535 
536 void
537 pmap_set_nx(void)
538 {
539 
540 	pmap_methods_ptr->pm_set_nx();
541 }
542 
543 void *
544 pmap_bios16_enter(void)
545 {
546 
547 	return (pmap_methods_ptr->pm_bios16_enter());
548 }
549 
550 void
551 pmap_bios16_leave(void *handle)
552 {
553 
554 	pmap_methods_ptr->pm_bios16_leave(handle);
555 }
556 
557 void
558 pmap_bootstrap(vm_paddr_t firstaddr)
559 {
560 
561 	pmap_methods_ptr->pm_bootstrap(firstaddr);
562 }
563 
564 boolean_t
565 pmap_is_valid_memattr(pmap_t pmap, vm_memattr_t mode)
566 {
567 
568 	return (pmap_methods_ptr->pm_is_valid_memattr(pmap, mode));
569 }
570 
571 int
572 pmap_cache_bits(pmap_t pmap, int mode, boolean_t is_pde)
573 {
574 
575 	return (pmap_methods_ptr->pm_cache_bits(pmap, mode, is_pde));
576 }
577 
578 bool
579 pmap_ps_enabled(pmap_t pmap)
580 {
581 
582 	return (pmap_methods_ptr->pm_ps_enabled(pmap));
583 }
584 
585 void
586 pmap_pinit0(pmap_t pmap)
587 {
588 
589 	pmap_methods_ptr->pm_pinit0(pmap);
590 }
591 
592 int
593 pmap_pinit(pmap_t pmap)
594 {
595 
596 	return (pmap_methods_ptr->pm_pinit(pmap));
597 }
598 
599 void
600 pmap_activate(struct thread *td)
601 {
602 
603 	pmap_methods_ptr->pm_activate(td);
604 }
605 
606 void
607 pmap_activate_boot(pmap_t pmap)
608 {
609 
610 	pmap_methods_ptr->pm_activate_boot(pmap);
611 }
612 
613 void
614 pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
615 {
616 
617 	pmap_methods_ptr->pm_advise(pmap, sva, eva, advice);
618 }
619 
620 void
621 pmap_clear_modify(vm_page_t m)
622 {
623 
624 	pmap_methods_ptr->pm_clear_modify(m);
625 }
626 
627 int
628 pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
629 {
630 
631 	return (pmap_methods_ptr->pm_change_attr(va, size, mode));
632 }
633 
634 int
635 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
636 {
637 
638 	return (pmap_methods_ptr->pm_mincore(pmap, addr, pap));
639 }
640 
641 void
642 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
643     vm_offset_t src_addr)
644 {
645 
646 	pmap_methods_ptr->pm_copy(dst_pmap, src_pmap, dst_addr, len, src_addr);
647 }
648 
649 void
650 pmap_copy_page(vm_page_t src, vm_page_t dst)
651 {
652 
653 	pmap_methods_ptr->pm_copy_page(src, dst);
654 }
655 
656 void
657 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
658     vm_offset_t b_offset, int xfersize)
659 {
660 
661 	pmap_methods_ptr->pm_copy_pages(ma, a_offset, mb, b_offset, xfersize);
662 }
663 
664 void
665 pmap_zero_page(vm_page_t m)
666 {
667 
668 	pmap_methods_ptr->pm_zero_page(m);
669 }
670 
671 void
672 pmap_zero_page_area(vm_page_t m, int off, int size)
673 {
674 
675 	pmap_methods_ptr->pm_zero_page_area(m, off, size);
676 }
677 
678 int
679 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
680     u_int flags, int8_t psind)
681 {
682 
683 	return (pmap_methods_ptr->pm_enter(pmap, va, m, prot, flags, psind));
684 }
685 
686 void
687 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
688     vm_page_t m_start, vm_prot_t prot)
689 {
690 
691 	pmap_methods_ptr->pm_enter_object(pmap, start, end, m_start, prot);
692 }
693 
694 void
695 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
696 {
697 
698 	pmap_methods_ptr->pm_enter_quick(pmap, va, m, prot);
699 }
700 
701 void *
702 pmap_kenter_temporary(vm_paddr_t pa, int i)
703 {
704 
705 	return (pmap_methods_ptr->pm_kenter_temporary(pa, i));
706 }
707 
708 void
709 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
710     vm_pindex_t pindex, vm_size_t size)
711 {
712 
713 	pmap_methods_ptr->pm_object_init_pt(pmap, addr, object, pindex, size);
714 }
715 
716 void
717 pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
718 {
719 
720 	pmap_methods_ptr->pm_unwire(pmap, sva, eva);
721 }
722 
723 boolean_t
724 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
725 {
726 
727 	return (pmap_methods_ptr->pm_page_exists_quick(pmap, m));
728 }
729 
730 int
731 pmap_page_wired_mappings(vm_page_t m)
732 {
733 
734 	return (pmap_methods_ptr->pm_page_wired_mappings(m));
735 }
736 
737 boolean_t
738 pmap_page_is_mapped(vm_page_t m)
739 {
740 
741 	return (pmap_methods_ptr->pm_page_is_mapped(m));
742 }
743 
744 void
745 pmap_remove_pages(pmap_t pmap)
746 {
747 
748 	pmap_methods_ptr->pm_remove_pages(pmap);
749 }
750 
751 boolean_t
752 pmap_is_modified(vm_page_t m)
753 {
754 
755 	return (pmap_methods_ptr->pm_is_modified(m));
756 }
757 
758 boolean_t
759 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
760 {
761 
762 	return (pmap_methods_ptr->pm_is_prefaultable(pmap, addr));
763 }
764 
765 boolean_t
766 pmap_is_referenced(vm_page_t m)
767 {
768 
769 	return (pmap_methods_ptr->pm_is_referenced(m));
770 }
771 
772 void
773 pmap_remove_write(vm_page_t m)
774 {
775 
776 	pmap_methods_ptr->pm_remove_write(m);
777 }
778 
779 int
780 pmap_ts_referenced(vm_page_t m)
781 {
782 
783 	return (pmap_methods_ptr->pm_ts_referenced(m));
784 }
785 
786 void *
787 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
788 {
789 
790 	return (pmap_methods_ptr->pm_mapdev_attr(pa, size, mode,
791 	    MAPDEV_SETATTR));
792 }
793 
794 void *
795 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
796 {
797 
798 	return (pmap_methods_ptr->pm_mapdev_attr(pa, size, PAT_UNCACHEABLE,
799 	    MAPDEV_SETATTR));
800 }
801 
802 void *
803 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
804 {
805 
806 	return (pmap_methods_ptr->pm_mapdev_attr(pa, size, PAT_WRITE_BACK, 0));
807 }
808 
809 void
810 pmap_unmapdev(void *p, vm_size_t size)
811 {
812 
813 	pmap_methods_ptr->pm_unmapdev(p, size);
814 }
815 
816 void
817 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
818 {
819 
820 	pmap_methods_ptr->pm_page_set_memattr(m, ma);
821 }
822 
823 vm_paddr_t
824 pmap_extract(pmap_t pmap, vm_offset_t va)
825 {
826 
827 	return (pmap_methods_ptr->pm_extract(pmap, va));
828 }
829 
830 vm_page_t
831 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
832 {
833 
834 	return (pmap_methods_ptr->pm_extract_and_hold(pmap, va, prot));
835 }
836 
837 vm_offset_t
838 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
839 {
840 
841 	return (pmap_methods_ptr->pm_map(virt, start, end, prot));
842 }
843 
844 void
845 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
846 {
847 
848 	pmap_methods_ptr->pm_qenter(sva, ma, count);
849 }
850 
851 void
852 pmap_qremove(vm_offset_t sva, int count)
853 {
854 
855 	pmap_methods_ptr->pm_qremove(sva, count);
856 }
857 
858 void
859 pmap_release(pmap_t pmap)
860 {
861 
862 	pmap_methods_ptr->pm_release(pmap);
863 }
864 
865 void
866 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
867 {
868 
869 	pmap_methods_ptr->pm_remove(pmap, sva, eva);
870 }
871 
872 void
873 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
874 {
875 
876 	pmap_methods_ptr->pm_protect(pmap, sva, eva, prot);
877 }
878 
879 void
880 pmap_remove_all(vm_page_t m)
881 {
882 
883 	pmap_methods_ptr->pm_remove_all(m);
884 }
885 
886 void
887 pmap_init(void)
888 {
889 
890 	pmap_methods_ptr->pm_init();
891 }
892 
893 void
894 pmap_init_pat(void)
895 {
896 
897 	pmap_methods_ptr->pm_init_pat();
898 }
899 
900 void
901 pmap_growkernel(vm_offset_t addr)
902 {
903 
904 	pmap_methods_ptr->pm_growkernel(addr);
905 }
906 
907 void
908 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
909 {
910 
911 	pmap_methods_ptr->pm_invalidate_page(pmap, va);
912 }
913 
914 void
915 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
916 {
917 
918 	pmap_methods_ptr->pm_invalidate_range(pmap, sva, eva);
919 }
920 
921 void
922 pmap_invalidate_all(pmap_t pmap)
923 {
924 
925 	pmap_methods_ptr->pm_invalidate_all(pmap);
926 }
927 
928 void
929 pmap_invalidate_cache(void)
930 {
931 
932 	pmap_methods_ptr->pm_invalidate_cache();
933 }
934 
935 void
936 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
937 {
938 
939 	pmap_methods_ptr->pm_kenter(va, pa);
940 }
941 
942 void
943 pmap_kremove(vm_offset_t va)
944 {
945 
946 	pmap_methods_ptr->pm_kremove(va);
947 }
948 
949 extern struct pmap_methods pmap_pae_methods, pmap_nopae_methods;
950 int pae_mode;
951 SYSCTL_INT(_vm_pmap, OID_AUTO, pae_mode, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
952     &pae_mode, 0,
953     "PAE");
954 
955 void
956 pmap_cold(void)
957 {
958 
959 	init_static_kenv((char *)bootinfo.bi_envp, 0);
960 	pae_mode = (cpu_feature & CPUID_PAE) != 0;
961 	if (pae_mode)
962 		TUNABLE_INT_FETCH("vm.pmap.pae_mode", &pae_mode);
963 	if (pae_mode) {
964 		pmap_methods_ptr = &pmap_pae_methods;
965 		pmap_pae_cold();
966 	} else {
967 		pmap_methods_ptr = &pmap_nopae_methods;
968 		pmap_nopae_cold();
969 	}
970 }
971