1 /*-
2 * SPDX-License-Identifier: BSD-4-Clause
3 *
4 * Copyright (c) 1991 Regents of the University of California.
5 * All rights reserved.
6 * Copyright (c) 1994 John S. Dyson
7 * All rights reserved.
8 * Copyright (c) 1994 David Greenman
9 * All rights reserved.
10 * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu>
11 * All rights reserved.
12 *
13 * This code is derived from software contributed to Berkeley by
14 * the Systems Programming Group of the University of Utah Computer
15 * Science Department and William Jolitz of UUNET Technologies Inc.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution.
25 * 3. All advertising materials mentioning features or use of this software
26 * must display the following acknowledgement:
27 * This product includes software developed by the University of
28 * California, Berkeley and its contributors.
29 * 4. Neither the name of the University nor the names of its contributors
30 * may be used to endorse or promote products derived from this software
31 * without specific prior written permission.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
34 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
37 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
38 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
39 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
41 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
42 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
43 * SUCH DAMAGE.
44 */
45 /*-
46 * Copyright (c) 2003 Networks Associates Technology, Inc.
47 * All rights reserved.
48 * Copyright (c) 2018 The FreeBSD Foundation
49 * All rights reserved.
50 *
51 * This software was developed for the FreeBSD Project by Jake Burkholder,
52 * Safeport Network Services, and Network Associates Laboratories, the
53 * Security Research Division of Network Associates, Inc. under
54 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
55 * CHATS research program.
56 *
57 * Portions of this software were developed by
58 * Konstantin Belousov <kib@FreeBSD.org> under sponsorship from
59 * the FreeBSD Foundation.
60 *
61 * Redistribution and use in source and binary forms, with or without
62 * modification, are permitted provided that the following conditions
63 * are met:
64 * 1. Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * 2. Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 *
70 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
71 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
72 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
73 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
74 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
75 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
76 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
77 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
78 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
79 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
80 * SUCH DAMAGE.
81 */
82
83 #include <sys/cdefs.h>
84 #include "opt_apic.h"
85 #include "opt_cpu.h"
86 #include "opt_pmap.h"
87 #include "opt_smp.h"
88 #include "opt_vm.h"
89
90 #include <sys/param.h>
91 #include <sys/systm.h>
92 #include <sys/kernel.h>
93 #include <sys/vmmeter.h>
94 #include <sys/sysctl.h>
95 #include <machine/bootinfo.h>
96 #include <machine/cpu.h>
97 #include <machine/cputypes.h>
98 #include <machine/md_var.h>
99 #ifdef DEV_APIC
100 #include <sys/bus.h>
101 #include <machine/intr_machdep.h>
102 #include <x86/apicvar.h>
103 #endif
104 #include <x86/ifunc.h>
105
106 static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
107 "VM/pmap parameters");
108
109 #include <machine/vmparam.h>
110 #include <vm/vm.h>
111 #include <vm/vm_page.h>
112 #include <vm/pmap.h>
113 #include <machine/pmap_base.h>
114
115 vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
116 vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
117
118 int unmapped_buf_allowed = 1;
119
120 int pti;
121
122 u_long physfree; /* phys addr of next free page */
123 u_long vm86phystk; /* PA of vm86/bios stack */
124 u_long vm86paddr; /* address of vm86 region */
125 int vm86pa; /* phys addr of vm86 region */
126 u_long KERNend; /* phys addr end of kernel (just after bss) */
127 u_long KPTphys; /* phys addr of kernel page tables */
128 caddr_t ptvmmap = 0;
129 vm_offset_t kernel_vm_end;
130
131 int i386_pmap_VM_NFREEORDER;
132 int i386_pmap_VM_LEVEL_0_ORDER;
133 int i386_pmap_PDRSHIFT;
134
135 int pat_works = 1;
136 SYSCTL_INT(_vm_pmap, OID_AUTO, pat_works, CTLFLAG_RD,
137 &pat_works, 0,
138 "Is page attribute table fully functional?");
139
140 int pg_ps_enabled = 1;
141 SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
142 &pg_ps_enabled, 0,
143 "Are large page mappings enabled?");
144
145 int pv_entry_max = 0;
146 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD,
147 &pv_entry_max, 0,
148 "Max number of PV entries");
149
150 int pv_entry_count = 0;
151 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD,
152 &pv_entry_count, 0,
153 "Current number of pv entries");
154
155 #ifndef PMAP_SHPGPERPROC
156 #define PMAP_SHPGPERPROC 200
157 #endif
158
159 int shpgperproc = PMAP_SHPGPERPROC;
160 SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD,
161 &shpgperproc, 0,
162 "Page share factor per proc");
163
164 static SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
165 "2/4MB page mapping counters");
166
167 u_long pmap_pde_demotions;
168 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, demotions, CTLFLAG_RD,
169 &pmap_pde_demotions, 0,
170 "2/4MB page demotions");
171
172 u_long pmap_pde_mappings;
173 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD,
174 &pmap_pde_mappings, 0,
175 "2/4MB page mappings");
176
177 u_long pmap_pde_p_failures;
178 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, p_failures, CTLFLAG_RD,
179 &pmap_pde_p_failures, 0,
180 "2/4MB page promotion failures");
181
182 u_long pmap_pde_promotions;
183 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, promotions, CTLFLAG_RD,
184 &pmap_pde_promotions, 0,
185 "2/4MB page promotions");
186
187 #ifdef SMP
188 int PMAP1changedcpu;
189 SYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD,
190 &PMAP1changedcpu, 0,
191 "Number of times pmap_pte_quick changed CPU with same PMAP1");
192 #endif
193
194 int PMAP1changed;
195 SYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD,
196 &PMAP1changed, 0,
197 "Number of times pmap_pte_quick changed PMAP1");
198 int PMAP1unchanged;
199 SYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD,
200 &PMAP1unchanged, 0,
201 "Number of times pmap_pte_quick didn't change PMAP1");
202
203 static int
kvm_size(SYSCTL_HANDLER_ARGS)204 kvm_size(SYSCTL_HANDLER_ARGS)
205 {
206 unsigned long ksize;
207
208 ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE;
209 return (sysctl_handle_long(oidp, &ksize, 0, req));
210 }
211 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
212 0, 0, kvm_size, "IU",
213 "Size of KVM");
214
215 static int
kvm_free(SYSCTL_HANDLER_ARGS)216 kvm_free(SYSCTL_HANDLER_ARGS)
217 {
218 unsigned long kfree;
219
220 kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
221 return (sysctl_handle_long(oidp, &kfree, 0, req));
222 }
223 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
224 0, 0, kvm_free, "IU",
225 "Amount of KVM free");
226
227 #ifdef PV_STATS
228 int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
229 long pv_entry_frees, pv_entry_allocs;
230 int pv_entry_spare;
231
232 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD,
233 &pc_chunk_count, 0,
234 "Current number of pv entry chunks");
235 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD,
236 &pc_chunk_allocs, 0,
237 "Current number of pv entry chunks allocated");
238 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD,
239 &pc_chunk_frees, 0,
240 "Current number of pv entry chunks frees");
241 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD,
242 &pc_chunk_tryfail, 0,
243 "Number of times tried to get a chunk page but failed.");
244 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD,
245 &pv_entry_frees, 0,
246 "Current number of pv entry frees");
247 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD,
248 &pv_entry_allocs, 0,
249 "Current number of pv entry allocs");
250 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD,
251 &pv_entry_spare, 0,
252 "Current number of spare pv entries");
253 #endif
254
255 struct pmap kernel_pmap_store;
256 static struct pmap_methods *pmap_methods_ptr;
257
258 static int
sysctl_kmaps(SYSCTL_HANDLER_ARGS)259 sysctl_kmaps(SYSCTL_HANDLER_ARGS)
260 {
261 return (pmap_methods_ptr->pm_sysctl_kmaps(oidp, arg1, arg2, req));
262 }
263 SYSCTL_OID(_vm_pmap, OID_AUTO, kernel_maps,
264 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_SKIP,
265 NULL, 0, sysctl_kmaps, "A",
266 "Dump kernel address layout");
267
268 /*
269 * Initialize a vm_page's machine-dependent fields.
270 */
271 void
pmap_page_init(vm_page_t m)272 pmap_page_init(vm_page_t m)
273 {
274
275 TAILQ_INIT(&m->md.pv_list);
276 m->md.pat_mode = PAT_WRITE_BACK;
277 }
278
279 void
invltlb_glob(void)280 invltlb_glob(void)
281 {
282
283 invltlb();
284 }
285
286 static void pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva,
287 vm_offset_t eva);
288 static void pmap_invalidate_cache_range_all(vm_offset_t sva,
289 vm_offset_t eva);
290
291 void
pmap_flush_page(vm_page_t m)292 pmap_flush_page(vm_page_t m)
293 {
294
295 pmap_methods_ptr->pm_flush_page(m);
296 }
297
298 DEFINE_IFUNC(, void, pmap_invalidate_cache_range, (vm_offset_t, vm_offset_t))
299 {
300
301 if ((cpu_feature & CPUID_SS) != 0)
302 return (pmap_invalidate_cache_range_selfsnoop);
303 if ((cpu_feature & CPUID_CLFSH) != 0)
304 return (pmap_force_invalidate_cache_range);
305 return (pmap_invalidate_cache_range_all);
306 }
307
308 #define PMAP_CLFLUSH_THRESHOLD (2 * 1024 * 1024)
309
310 static void
pmap_invalidate_cache_range_check_align(vm_offset_t sva,vm_offset_t eva)311 pmap_invalidate_cache_range_check_align(vm_offset_t sva, vm_offset_t eva)
312 {
313
314 KASSERT((sva & PAGE_MASK) == 0,
315 ("pmap_invalidate_cache_range: sva not page-aligned"));
316 KASSERT((eva & PAGE_MASK) == 0,
317 ("pmap_invalidate_cache_range: eva not page-aligned"));
318 }
319
320 static void
pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva,vm_offset_t eva)321 pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva, vm_offset_t eva)
322 {
323
324 pmap_invalidate_cache_range_check_align(sva, eva);
325 }
326
327 void
pmap_force_invalidate_cache_range(vm_offset_t sva,vm_offset_t eva)328 pmap_force_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
329 {
330
331 sva &= ~(vm_offset_t)(cpu_clflush_line_size - 1);
332 if (eva - sva >= PMAP_CLFLUSH_THRESHOLD) {
333 /*
334 * The supplied range is bigger than 2MB.
335 * Globally invalidate cache.
336 */
337 pmap_invalidate_cache();
338 return;
339 }
340
341 #ifdef DEV_APIC
342 /*
343 * XXX: Some CPUs fault, hang, or trash the local APIC
344 * registers if we use CLFLUSH on the local APIC
345 * range. The local APIC is always uncached, so we
346 * don't need to flush for that range anyway.
347 */
348 if (pmap_kextract(sva) == lapic_paddr)
349 return;
350 #endif
351
352 if ((cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0) {
353 /*
354 * Do per-cache line flush. Use the sfence
355 * instruction to insure that previous stores are
356 * included in the write-back. The processor
357 * propagates flush to other processors in the cache
358 * coherence domain.
359 */
360 sfence();
361 for (; sva < eva; sva += cpu_clflush_line_size)
362 clflushopt(sva);
363 sfence();
364 } else {
365 /*
366 * Writes are ordered by CLFLUSH on Intel CPUs.
367 */
368 if (cpu_vendor_id != CPU_VENDOR_INTEL)
369 mfence();
370 for (; sva < eva; sva += cpu_clflush_line_size)
371 clflush(sva);
372 if (cpu_vendor_id != CPU_VENDOR_INTEL)
373 mfence();
374 }
375 }
376
377 static void
pmap_invalidate_cache_range_all(vm_offset_t sva,vm_offset_t eva)378 pmap_invalidate_cache_range_all(vm_offset_t sva, vm_offset_t eva)
379 {
380
381 pmap_invalidate_cache_range_check_align(sva, eva);
382 pmap_invalidate_cache();
383 }
384
385 void
pmap_invalidate_cache_pages(vm_page_t * pages,int count)386 pmap_invalidate_cache_pages(vm_page_t *pages, int count)
387 {
388 int i;
389
390 if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE ||
391 (cpu_feature & CPUID_CLFSH) == 0) {
392 pmap_invalidate_cache();
393 } else {
394 for (i = 0; i < count; i++)
395 pmap_flush_page(pages[i]);
396 }
397 }
398
399 void
pmap_ksetrw(vm_offset_t va)400 pmap_ksetrw(vm_offset_t va)
401 {
402
403 pmap_methods_ptr->pm_ksetrw(va);
404 }
405
406 void
pmap_remap_lower(bool enable)407 pmap_remap_lower(bool enable)
408 {
409
410 pmap_methods_ptr->pm_remap_lower(enable);
411 }
412
413 void
pmap_remap_lowptdi(bool enable)414 pmap_remap_lowptdi(bool enable)
415 {
416
417 pmap_methods_ptr->pm_remap_lowptdi(enable);
418 }
419
420 void
pmap_align_superpage(vm_object_t object,vm_ooffset_t offset,vm_offset_t * addr,vm_size_t size)421 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
422 vm_offset_t *addr, vm_size_t size)
423 {
424
425 return (pmap_methods_ptr->pm_align_superpage(object, offset,
426 addr, size));
427 }
428
429 vm_offset_t
pmap_quick_enter_page(vm_page_t m)430 pmap_quick_enter_page(vm_page_t m)
431 {
432
433 return (pmap_methods_ptr->pm_quick_enter_page(m));
434 }
435
436 void
pmap_quick_remove_page(vm_offset_t addr)437 pmap_quick_remove_page(vm_offset_t addr)
438 {
439
440 return (pmap_methods_ptr->pm_quick_remove_page(addr));
441 }
442
443 void *
pmap_trm_alloc(size_t size,int flags)444 pmap_trm_alloc(size_t size, int flags)
445 {
446
447 return (pmap_methods_ptr->pm_trm_alloc(size, flags));
448 }
449
450 void
pmap_trm_free(void * addr,size_t size)451 pmap_trm_free(void *addr, size_t size)
452 {
453
454 pmap_methods_ptr->pm_trm_free(addr, size);
455 }
456
457 void
pmap_sync_icache(pmap_t pm,vm_offset_t va,vm_size_t sz)458 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
459 {
460 }
461
462 vm_offset_t
pmap_get_map_low(void)463 pmap_get_map_low(void)
464 {
465
466 return (pmap_methods_ptr->pm_get_map_low());
467 }
468
469 vm_offset_t
pmap_get_vm_maxuser_address(void)470 pmap_get_vm_maxuser_address(void)
471 {
472
473 return (pmap_methods_ptr->pm_get_vm_maxuser_address());
474 }
475
476 vm_paddr_t
pmap_kextract(vm_offset_t va)477 pmap_kextract(vm_offset_t va)
478 {
479
480 return (pmap_methods_ptr->pm_kextract(va));
481 }
482
483 vm_paddr_t
pmap_pg_frame(vm_paddr_t pa)484 pmap_pg_frame(vm_paddr_t pa)
485 {
486
487 return (pmap_methods_ptr->pm_pg_frame(pa));
488 }
489
490 void
pmap_sf_buf_map(struct sf_buf * sf)491 pmap_sf_buf_map(struct sf_buf *sf)
492 {
493
494 pmap_methods_ptr->pm_sf_buf_map(sf);
495 }
496
497 void
pmap_cp_slow0_map(vm_offset_t kaddr,int plen,vm_page_t * ma)498 pmap_cp_slow0_map(vm_offset_t kaddr, int plen, vm_page_t *ma)
499 {
500
501 pmap_methods_ptr->pm_cp_slow0_map(kaddr, plen, ma);
502 }
503
504 u_int
pmap_get_kcr3(void)505 pmap_get_kcr3(void)
506 {
507
508 return (pmap_methods_ptr->pm_get_kcr3());
509 }
510
511 u_int
pmap_get_cr3(pmap_t pmap)512 pmap_get_cr3(pmap_t pmap)
513 {
514
515 return (pmap_methods_ptr->pm_get_cr3(pmap));
516 }
517
518 caddr_t
pmap_cmap3(vm_paddr_t pa,u_int pte_flags)519 pmap_cmap3(vm_paddr_t pa, u_int pte_flags)
520 {
521
522 return (pmap_methods_ptr->pm_cmap3(pa, pte_flags));
523 }
524
525 void
pmap_basemem_setup(u_int basemem)526 pmap_basemem_setup(u_int basemem)
527 {
528
529 pmap_methods_ptr->pm_basemem_setup(basemem);
530 }
531
532 void
pmap_set_nx(void)533 pmap_set_nx(void)
534 {
535
536 pmap_methods_ptr->pm_set_nx();
537 }
538
539 void *
pmap_bios16_enter(void)540 pmap_bios16_enter(void)
541 {
542
543 return (pmap_methods_ptr->pm_bios16_enter());
544 }
545
546 void
pmap_bios16_leave(void * handle)547 pmap_bios16_leave(void *handle)
548 {
549
550 pmap_methods_ptr->pm_bios16_leave(handle);
551 }
552
553 void
pmap_bootstrap(vm_paddr_t firstaddr)554 pmap_bootstrap(vm_paddr_t firstaddr)
555 {
556
557 pmap_methods_ptr->pm_bootstrap(firstaddr);
558 }
559
560 bool
pmap_is_valid_memattr(pmap_t pmap,vm_memattr_t mode)561 pmap_is_valid_memattr(pmap_t pmap, vm_memattr_t mode)
562 {
563
564 return (pmap_methods_ptr->pm_is_valid_memattr(pmap, mode));
565 }
566
567 int
pmap_cache_bits(pmap_t pmap,int mode,bool is_pde)568 pmap_cache_bits(pmap_t pmap, int mode, bool is_pde)
569 {
570
571 return (pmap_methods_ptr->pm_cache_bits(pmap, mode, is_pde));
572 }
573
574 bool
pmap_ps_enabled(pmap_t pmap)575 pmap_ps_enabled(pmap_t pmap)
576 {
577
578 return (pmap_methods_ptr->pm_ps_enabled(pmap));
579 }
580
581 void
pmap_pinit0(pmap_t pmap)582 pmap_pinit0(pmap_t pmap)
583 {
584
585 pmap_methods_ptr->pm_pinit0(pmap);
586 }
587
588 int
pmap_pinit(pmap_t pmap)589 pmap_pinit(pmap_t pmap)
590 {
591
592 return (pmap_methods_ptr->pm_pinit(pmap));
593 }
594
595 void
pmap_activate(struct thread * td)596 pmap_activate(struct thread *td)
597 {
598
599 pmap_methods_ptr->pm_activate(td);
600 }
601
602 void
pmap_activate_boot(pmap_t pmap)603 pmap_activate_boot(pmap_t pmap)
604 {
605
606 pmap_methods_ptr->pm_activate_boot(pmap);
607 }
608
609 void
pmap_advise(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,int advice)610 pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
611 {
612
613 pmap_methods_ptr->pm_advise(pmap, sva, eva, advice);
614 }
615
616 void
pmap_clear_modify(vm_page_t m)617 pmap_clear_modify(vm_page_t m)
618 {
619
620 pmap_methods_ptr->pm_clear_modify(m);
621 }
622
623 int
pmap_change_attr(vm_offset_t va,vm_size_t size,int mode)624 pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
625 {
626
627 return (pmap_methods_ptr->pm_change_attr(va, size, mode));
628 }
629
630 int
pmap_mincore(pmap_t pmap,vm_offset_t addr,vm_paddr_t * pap)631 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
632 {
633
634 return (pmap_methods_ptr->pm_mincore(pmap, addr, pap));
635 }
636
637 void
pmap_copy(pmap_t dst_pmap,pmap_t src_pmap,vm_offset_t dst_addr,vm_size_t len,vm_offset_t src_addr)638 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
639 vm_offset_t src_addr)
640 {
641
642 pmap_methods_ptr->pm_copy(dst_pmap, src_pmap, dst_addr, len, src_addr);
643 }
644
645 void
pmap_copy_page(vm_page_t src,vm_page_t dst)646 pmap_copy_page(vm_page_t src, vm_page_t dst)
647 {
648
649 pmap_methods_ptr->pm_copy_page(src, dst);
650 }
651
652 void
pmap_copy_pages(vm_page_t ma[],vm_offset_t a_offset,vm_page_t mb[],vm_offset_t b_offset,int xfersize)653 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
654 vm_offset_t b_offset, int xfersize)
655 {
656
657 pmap_methods_ptr->pm_copy_pages(ma, a_offset, mb, b_offset, xfersize);
658 }
659
660 void
pmap_zero_page(vm_page_t m)661 pmap_zero_page(vm_page_t m)
662 {
663
664 pmap_methods_ptr->pm_zero_page(m);
665 }
666
667 void
pmap_zero_page_area(vm_page_t m,int off,int size)668 pmap_zero_page_area(vm_page_t m, int off, int size)
669 {
670
671 pmap_methods_ptr->pm_zero_page_area(m, off, size);
672 }
673
674 int
pmap_enter(pmap_t pmap,vm_offset_t va,vm_page_t m,vm_prot_t prot,u_int flags,int8_t psind)675 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
676 u_int flags, int8_t psind)
677 {
678
679 return (pmap_methods_ptr->pm_enter(pmap, va, m, prot, flags, psind));
680 }
681
682 void
pmap_enter_object(pmap_t pmap,vm_offset_t start,vm_offset_t end,vm_page_t m_start,vm_prot_t prot)683 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
684 vm_page_t m_start, vm_prot_t prot)
685 {
686
687 pmap_methods_ptr->pm_enter_object(pmap, start, end, m_start, prot);
688 }
689
690 void
pmap_enter_quick(pmap_t pmap,vm_offset_t va,vm_page_t m,vm_prot_t prot)691 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
692 {
693
694 pmap_methods_ptr->pm_enter_quick(pmap, va, m, prot);
695 }
696
697 void *
pmap_kenter_temporary(vm_paddr_t pa,int i)698 pmap_kenter_temporary(vm_paddr_t pa, int i)
699 {
700
701 return (pmap_methods_ptr->pm_kenter_temporary(pa, i));
702 }
703
704 void
pmap_object_init_pt(pmap_t pmap,vm_offset_t addr,vm_object_t object,vm_pindex_t pindex,vm_size_t size)705 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
706 vm_pindex_t pindex, vm_size_t size)
707 {
708
709 pmap_methods_ptr->pm_object_init_pt(pmap, addr, object, pindex, size);
710 }
711
712 void
pmap_unwire(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)713 pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
714 {
715
716 pmap_methods_ptr->pm_unwire(pmap, sva, eva);
717 }
718
719 bool
pmap_page_exists_quick(pmap_t pmap,vm_page_t m)720 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
721 {
722
723 return (pmap_methods_ptr->pm_page_exists_quick(pmap, m));
724 }
725
726 int
pmap_page_wired_mappings(vm_page_t m)727 pmap_page_wired_mappings(vm_page_t m)
728 {
729
730 return (pmap_methods_ptr->pm_page_wired_mappings(m));
731 }
732
733 bool
pmap_page_is_mapped(vm_page_t m)734 pmap_page_is_mapped(vm_page_t m)
735 {
736
737 return (pmap_methods_ptr->pm_page_is_mapped(m));
738 }
739
740 void
pmap_remove_pages(pmap_t pmap)741 pmap_remove_pages(pmap_t pmap)
742 {
743
744 pmap_methods_ptr->pm_remove_pages(pmap);
745 }
746
747 bool
pmap_is_modified(vm_page_t m)748 pmap_is_modified(vm_page_t m)
749 {
750
751 return (pmap_methods_ptr->pm_is_modified(m));
752 }
753
754 bool
pmap_is_prefaultable(pmap_t pmap,vm_offset_t addr)755 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
756 {
757
758 return (pmap_methods_ptr->pm_is_prefaultable(pmap, addr));
759 }
760
761 bool
pmap_is_referenced(vm_page_t m)762 pmap_is_referenced(vm_page_t m)
763 {
764
765 return (pmap_methods_ptr->pm_is_referenced(m));
766 }
767
768 void
pmap_remove_write(vm_page_t m)769 pmap_remove_write(vm_page_t m)
770 {
771
772 pmap_methods_ptr->pm_remove_write(m);
773 }
774
775 int
pmap_ts_referenced(vm_page_t m)776 pmap_ts_referenced(vm_page_t m)
777 {
778
779 return (pmap_methods_ptr->pm_ts_referenced(m));
780 }
781
782 void *
pmap_mapdev_attr(vm_paddr_t pa,vm_size_t size,int mode)783 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
784 {
785
786 return (pmap_methods_ptr->pm_mapdev_attr(pa, size, mode,
787 MAPDEV_SETATTR));
788 }
789
790 void *
pmap_mapdev(vm_paddr_t pa,vm_size_t size)791 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
792 {
793
794 return (pmap_methods_ptr->pm_mapdev_attr(pa, size, PAT_UNCACHEABLE,
795 MAPDEV_SETATTR));
796 }
797
798 void *
pmap_mapbios(vm_paddr_t pa,vm_size_t size)799 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
800 {
801
802 return (pmap_methods_ptr->pm_mapdev_attr(pa, size, PAT_WRITE_BACK, 0));
803 }
804
805 void
pmap_unmapdev(void * p,vm_size_t size)806 pmap_unmapdev(void *p, vm_size_t size)
807 {
808
809 pmap_methods_ptr->pm_unmapdev(p, size);
810 }
811
812 void
pmap_page_set_memattr(vm_page_t m,vm_memattr_t ma)813 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
814 {
815
816 pmap_methods_ptr->pm_page_set_memattr(m, ma);
817 }
818
819 vm_paddr_t
pmap_extract(pmap_t pmap,vm_offset_t va)820 pmap_extract(pmap_t pmap, vm_offset_t va)
821 {
822
823 return (pmap_methods_ptr->pm_extract(pmap, va));
824 }
825
826 vm_page_t
pmap_extract_and_hold(pmap_t pmap,vm_offset_t va,vm_prot_t prot)827 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
828 {
829
830 return (pmap_methods_ptr->pm_extract_and_hold(pmap, va, prot));
831 }
832
833 vm_offset_t
pmap_map(vm_offset_t * virt,vm_paddr_t start,vm_paddr_t end,int prot)834 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
835 {
836
837 return (pmap_methods_ptr->pm_map(virt, start, end, prot));
838 }
839
840 void
pmap_qenter(vm_offset_t sva,vm_page_t * ma,int count)841 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
842 {
843
844 pmap_methods_ptr->pm_qenter(sva, ma, count);
845 }
846
847 void
pmap_qremove(vm_offset_t sva,int count)848 pmap_qremove(vm_offset_t sva, int count)
849 {
850
851 pmap_methods_ptr->pm_qremove(sva, count);
852 }
853
854 void
pmap_release(pmap_t pmap)855 pmap_release(pmap_t pmap)
856 {
857
858 pmap_methods_ptr->pm_release(pmap);
859 }
860
861 void
pmap_remove(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)862 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
863 {
864
865 pmap_methods_ptr->pm_remove(pmap, sva, eva);
866 }
867
868 void
pmap_protect(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,vm_prot_t prot)869 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
870 {
871
872 pmap_methods_ptr->pm_protect(pmap, sva, eva, prot);
873 }
874
875 void
pmap_remove_all(vm_page_t m)876 pmap_remove_all(vm_page_t m)
877 {
878
879 pmap_methods_ptr->pm_remove_all(m);
880 }
881
882 void
pmap_init(void)883 pmap_init(void)
884 {
885
886 pmap_methods_ptr->pm_init();
887 }
888
889 void
pmap_init_pat(void)890 pmap_init_pat(void)
891 {
892
893 pmap_methods_ptr->pm_init_pat();
894 }
895
896 void
pmap_growkernel(vm_offset_t addr)897 pmap_growkernel(vm_offset_t addr)
898 {
899
900 pmap_methods_ptr->pm_growkernel(addr);
901 }
902
903 void
pmap_invalidate_page(pmap_t pmap,vm_offset_t va)904 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
905 {
906
907 pmap_methods_ptr->pm_invalidate_page(pmap, va);
908 }
909
910 void
pmap_invalidate_range(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)911 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
912 {
913
914 pmap_methods_ptr->pm_invalidate_range(pmap, sva, eva);
915 }
916
917 void
pmap_invalidate_all(pmap_t pmap)918 pmap_invalidate_all(pmap_t pmap)
919 {
920
921 pmap_methods_ptr->pm_invalidate_all(pmap);
922 }
923
924 void
pmap_invalidate_cache(void)925 pmap_invalidate_cache(void)
926 {
927
928 pmap_methods_ptr->pm_invalidate_cache();
929 }
930
931 void
pmap_kenter(vm_offset_t va,vm_paddr_t pa)932 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
933 {
934
935 pmap_methods_ptr->pm_kenter(va, pa);
936 }
937
938 void
pmap_kremove(vm_offset_t va)939 pmap_kremove(vm_offset_t va)
940 {
941
942 pmap_methods_ptr->pm_kremove(va);
943 }
944
945 void
pmap_active_cpus(pmap_t pmap,cpuset_t * res)946 pmap_active_cpus(pmap_t pmap, cpuset_t *res)
947 {
948 *res = pmap->pm_active;
949 }
950
951 extern struct pmap_methods pmap_pae_methods, pmap_nopae_methods;
952 int pae_mode;
953 SYSCTL_INT(_vm_pmap, OID_AUTO, pae_mode, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
954 &pae_mode, 0,
955 "PAE");
956
957 void
pmap_cold(void)958 pmap_cold(void)
959 {
960
961 init_static_kenv((char *)bootinfo.bi_envp, 0);
962 pae_mode = (cpu_feature & CPUID_PAE) != 0;
963 if (pae_mode)
964 TUNABLE_INT_FETCH("vm.pmap.pae_mode", &pae_mode);
965 if (pae_mode) {
966 pmap_methods_ptr = &pmap_pae_methods;
967 pmap_pae_cold();
968 } else {
969 pmap_methods_ptr = &pmap_nopae_methods;
970 pmap_nopae_cold();
971 }
972 }
973