xref: /freebsd/sys/amd64/amd64/pmap.c (revision 6a7761b4d27c99b3b548f2d948b88bf1430ee636)
1 /*-
2  * SPDX-License-Identifier: BSD-4-Clause
3  *
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  * Copyright (c) 1994 John S. Dyson
7  * All rights reserved.
8  * Copyright (c) 1994 David Greenman
9  * All rights reserved.
10  * Copyright (c) 2003 Peter Wemm
11  * All rights reserved.
12  * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu>
13  * All rights reserved.
14  *
15  * This code is derived from software contributed to Berkeley by
16  * the Systems Programming Group of the University of Utah Computer
17  * Science Department and William Jolitz of UUNET Technologies Inc.
18  *
19  * Redistribution and use in source and binary forms, with or without
20  * modification, are permitted provided that the following conditions
21  * are met:
22  * 1. Redistributions of source code must retain the above copyright
23  *    notice, this list of conditions and the following disclaimer.
24  * 2. Redistributions in binary form must reproduce the above copyright
25  *    notice, this list of conditions and the following disclaimer in the
26  *    documentation and/or other materials provided with the distribution.
27  * 3. All advertising materials mentioning features or use of this software
28  *    must display the following acknowledgement:
29  *	This product includes software developed by the University of
30  *	California, Berkeley and its contributors.
31  * 4. Neither the name of the University nor the names of its contributors
32  *    may be used to endorse or promote products derived from this software
33  *    without specific prior written permission.
34  *
35  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
36  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
38  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
39  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
40  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
41  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
42  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
43  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
44  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
45  * SUCH DAMAGE.
46  */
47 /*-
48  * Copyright (c) 2003 Networks Associates Technology, Inc.
49  * Copyright (c) 2014-2020 The FreeBSD Foundation
50  * All rights reserved.
51  *
52  * This software was developed for the FreeBSD Project by Jake Burkholder,
53  * Safeport Network Services, and Network Associates Laboratories, the
54  * Security Research Division of Network Associates, Inc. under
55  * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
56  * CHATS research program.
57  *
58  * Portions of this software were developed by
59  * Konstantin Belousov <kib@FreeBSD.org> under sponsorship from
60  * the FreeBSD Foundation.
61  *
62  * Redistribution and use in source and binary forms, with or without
63  * modification, are permitted provided that the following conditions
64  * are met:
65  * 1. Redistributions of source code must retain the above copyright
66  *    notice, this list of conditions and the following disclaimer.
67  * 2. Redistributions in binary form must reproduce the above copyright
68  *    notice, this list of conditions and the following disclaimer in the
69  *    documentation and/or other materials provided with the distribution.
70  *
71  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
72  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
73  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
74  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
75  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
76  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
77  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
78  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
79  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
80  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
81  * SUCH DAMAGE.
82  */
83 
84 #define	AMD64_NPT_AWARE
85 
86 #include <sys/cdefs.h>
87 /*
88  *	Manages physical address maps.
89  *
90  *	Since the information managed by this module is
91  *	also stored by the logical address mapping module,
92  *	this module may throw away valid virtual-to-physical
93  *	mappings at almost any time.  However, invalidations
94  *	of virtual-to-physical mappings must be done as
95  *	requested.
96  *
97  *	In order to cope with hardware architectures which
98  *	make virtual-to-physical map invalidates expensive,
99  *	this module may delay invalidate or reduced protection
100  *	operations until such time as they are actually
101  *	necessary.  This module is given full information as
102  *	to which processors are currently using which maps,
103  *	and to when physical maps must be made correct.
104  */
105 
106 #include "opt_ddb.h"
107 #include "opt_pmap.h"
108 #include "opt_vm.h"
109 
110 #include <sys/param.h>
111 #include <sys/asan.h>
112 #include <sys/bitstring.h>
113 #include <sys/bus.h>
114 #include <sys/systm.h>
115 #include <sys/counter.h>
116 #include <sys/kernel.h>
117 #include <sys/ktr.h>
118 #include <sys/lock.h>
119 #include <sys/malloc.h>
120 #include <sys/mman.h>
121 #include <sys/msan.h>
122 #include <sys/mutex.h>
123 #include <sys/proc.h>
124 #include <sys/rangeset.h>
125 #include <sys/rwlock.h>
126 #include <sys/sbuf.h>
127 #include <sys/smr.h>
128 #include <sys/sx.h>
129 #include <sys/turnstile.h>
130 #include <sys/vmem.h>
131 #include <sys/vmmeter.h>
132 #include <sys/sched.h>
133 #include <sys/sysctl.h>
134 #include <sys/smp.h>
135 #ifdef DDB
136 #include <sys/kdb.h>
137 #include <ddb/ddb.h>
138 #endif
139 
140 #include <vm/vm.h>
141 #include <vm/vm_param.h>
142 #include <vm/vm_kern.h>
143 #include <vm/vm_page.h>
144 #include <vm/vm_map.h>
145 #include <vm/vm_object.h>
146 #include <vm/vm_extern.h>
147 #include <vm/vm_pageout.h>
148 #include <vm/vm_pager.h>
149 #include <vm/vm_phys.h>
150 #include <vm/vm_radix.h>
151 #include <vm/vm_reserv.h>
152 #include <vm/vm_dumpset.h>
153 #include <vm/uma.h>
154 
155 #include <machine/asan.h>
156 #include <machine/intr_machdep.h>
157 #include <x86/apicvar.h>
158 #include <x86/ifunc.h>
159 #include <machine/cpu.h>
160 #include <machine/cputypes.h>
161 #include <machine/md_var.h>
162 #include <machine/msan.h>
163 #include <machine/pcb.h>
164 #include <machine/specialreg.h>
165 #ifdef SMP
166 #include <machine/smp.h>
167 #endif
168 #include <machine/sysarch.h>
169 #include <machine/tss.h>
170 
171 #ifdef NUMA
172 #define	PMAP_MEMDOM	MAXMEMDOM
173 #else
174 #define	PMAP_MEMDOM	1
175 #endif
176 
177 static __inline bool
pmap_type_guest(pmap_t pmap)178 pmap_type_guest(pmap_t pmap)
179 {
180 
181 	return ((pmap->pm_type == PT_EPT) || (pmap->pm_type == PT_RVI));
182 }
183 
184 static __inline bool
pmap_emulate_ad_bits(pmap_t pmap)185 pmap_emulate_ad_bits(pmap_t pmap)
186 {
187 
188 	return ((pmap->pm_flags & PMAP_EMULATE_AD_BITS) != 0);
189 }
190 
191 static __inline pt_entry_t
pmap_valid_bit(pmap_t pmap)192 pmap_valid_bit(pmap_t pmap)
193 {
194 	pt_entry_t mask;
195 
196 	switch (pmap->pm_type) {
197 	case PT_X86:
198 	case PT_RVI:
199 		mask = X86_PG_V;
200 		break;
201 	case PT_EPT:
202 		if (pmap_emulate_ad_bits(pmap))
203 			mask = EPT_PG_EMUL_V;
204 		else
205 			mask = EPT_PG_READ;
206 		break;
207 	default:
208 		panic("pmap_valid_bit: invalid pm_type %d", pmap->pm_type);
209 	}
210 
211 	return (mask);
212 }
213 
214 static __inline pt_entry_t
pmap_rw_bit(pmap_t pmap)215 pmap_rw_bit(pmap_t pmap)
216 {
217 	pt_entry_t mask;
218 
219 	switch (pmap->pm_type) {
220 	case PT_X86:
221 	case PT_RVI:
222 		mask = X86_PG_RW;
223 		break;
224 	case PT_EPT:
225 		if (pmap_emulate_ad_bits(pmap))
226 			mask = EPT_PG_EMUL_RW;
227 		else
228 			mask = EPT_PG_WRITE;
229 		break;
230 	default:
231 		panic("pmap_rw_bit: invalid pm_type %d", pmap->pm_type);
232 	}
233 
234 	return (mask);
235 }
236 
237 static pt_entry_t pg_g;
238 
239 static __inline pt_entry_t
pmap_global_bit(pmap_t pmap)240 pmap_global_bit(pmap_t pmap)
241 {
242 	pt_entry_t mask;
243 
244 	switch (pmap->pm_type) {
245 	case PT_X86:
246 		mask = pg_g;
247 		break;
248 	case PT_RVI:
249 	case PT_EPT:
250 		mask = 0;
251 		break;
252 	default:
253 		panic("pmap_global_bit: invalid pm_type %d", pmap->pm_type);
254 	}
255 
256 	return (mask);
257 }
258 
259 static __inline pt_entry_t
pmap_accessed_bit(pmap_t pmap)260 pmap_accessed_bit(pmap_t pmap)
261 {
262 	pt_entry_t mask;
263 
264 	switch (pmap->pm_type) {
265 	case PT_X86:
266 	case PT_RVI:
267 		mask = X86_PG_A;
268 		break;
269 	case PT_EPT:
270 		if (pmap_emulate_ad_bits(pmap))
271 			mask = EPT_PG_READ;
272 		else
273 			mask = EPT_PG_A;
274 		break;
275 	default:
276 		panic("pmap_accessed_bit: invalid pm_type %d", pmap->pm_type);
277 	}
278 
279 	return (mask);
280 }
281 
282 static __inline pt_entry_t
pmap_modified_bit(pmap_t pmap)283 pmap_modified_bit(pmap_t pmap)
284 {
285 	pt_entry_t mask;
286 
287 	switch (pmap->pm_type) {
288 	case PT_X86:
289 	case PT_RVI:
290 		mask = X86_PG_M;
291 		break;
292 	case PT_EPT:
293 		if (pmap_emulate_ad_bits(pmap))
294 			mask = EPT_PG_WRITE;
295 		else
296 			mask = EPT_PG_M;
297 		break;
298 	default:
299 		panic("pmap_modified_bit: invalid pm_type %d", pmap->pm_type);
300 	}
301 
302 	return (mask);
303 }
304 
305 static __inline pt_entry_t
pmap_pku_mask_bit(pmap_t pmap)306 pmap_pku_mask_bit(pmap_t pmap)
307 {
308 
309 	return (pmap->pm_type == PT_X86 ? X86_PG_PKU_MASK : 0);
310 }
311 
312 static __inline bool
safe_to_clear_referenced(pmap_t pmap,pt_entry_t pte)313 safe_to_clear_referenced(pmap_t pmap, pt_entry_t pte)
314 {
315 
316 	if (!pmap_emulate_ad_bits(pmap))
317 		return (true);
318 
319 	KASSERT(pmap->pm_type == PT_EPT, ("invalid pm_type %d", pmap->pm_type));
320 
321 	/*
322 	 * XWR = 010 or 110 will cause an unconditional EPT misconfiguration
323 	 * so we don't let the referenced (aka EPT_PG_READ) bit to be cleared
324 	 * if the EPT_PG_WRITE bit is set.
325 	 */
326 	if ((pte & EPT_PG_WRITE) != 0)
327 		return (false);
328 
329 	/*
330 	 * XWR = 100 is allowed only if the PMAP_SUPPORTS_EXEC_ONLY is set.
331 	 */
332 	if ((pte & EPT_PG_EXECUTE) == 0 ||
333 	    ((pmap->pm_flags & PMAP_SUPPORTS_EXEC_ONLY) != 0))
334 		return (true);
335 	else
336 		return (false);
337 }
338 
339 #ifdef PV_STATS
340 #define PV_STAT(x)	do { x ; } while (0)
341 #else
342 #define PV_STAT(x)	do { } while (0)
343 #endif
344 
345 #ifdef NUMA
346 #define	pa_index(pa)	({					\
347 	KASSERT((pa) <= vm_phys_segs[vm_phys_nsegs - 1].end,	\
348 	    ("address %lx beyond the last segment", (pa)));	\
349 	(pa) >> PDRSHIFT;					\
350 })
351 #define	pa_to_pmdp(pa)	(&pv_table[pa_index(pa)])
352 #define	pa_to_pvh(pa)	(&(pa_to_pmdp(pa)->pv_page))
353 #define	PHYS_TO_PV_LIST_LOCK(pa)	({			\
354 	struct rwlock *_lock;					\
355 	if (__predict_false((pa) > pmap_last_pa))		\
356 		_lock = &pv_dummy_large.pv_lock;		\
357 	else							\
358 		_lock = &(pa_to_pmdp(pa)->pv_lock);		\
359 	_lock;							\
360 })
361 #else
362 #define	pa_index(pa)	((pa) >> PDRSHIFT)
363 #define	pa_to_pvh(pa)	(&pv_table[pa_index(pa)])
364 
365 #define	NPV_LIST_LOCKS	MAXCPU
366 
367 #define	PHYS_TO_PV_LIST_LOCK(pa)	\
368 			(&pv_list_locks[pa_index(pa) % NPV_LIST_LOCKS])
369 #endif
370 
371 #define	CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa)	do {	\
372 	struct rwlock **_lockp = (lockp);		\
373 	struct rwlock *_new_lock;			\
374 							\
375 	_new_lock = PHYS_TO_PV_LIST_LOCK(pa);		\
376 	if (_new_lock != *_lockp) {			\
377 		if (*_lockp != NULL)			\
378 			rw_wunlock(*_lockp);		\
379 		*_lockp = _new_lock;			\
380 		rw_wlock(*_lockp);			\
381 	}						\
382 } while (0)
383 
384 #define	CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m)	\
385 			CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, VM_PAGE_TO_PHYS(m))
386 
387 #define	RELEASE_PV_LIST_LOCK(lockp)		do {	\
388 	struct rwlock **_lockp = (lockp);		\
389 							\
390 	if (*_lockp != NULL) {				\
391 		rw_wunlock(*_lockp);			\
392 		*_lockp = NULL;				\
393 	}						\
394 } while (0)
395 
396 #define	VM_PAGE_TO_PV_LIST_LOCK(m)	\
397 			PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m))
398 
399 /*
400  * Statically allocate kernel pmap memory.  However, memory for
401  * pm_pcids is obtained after the dynamic allocator is operational.
402  * Initialize it with a non-canonical pointer to catch early accesses
403  * regardless of the active mapping.
404  */
405 struct pmap kernel_pmap_store = {
406 	.pm_pcidp = (void *)0xdeadbeefdeadbeef,
407 };
408 
409 vm_offset_t virtual_avail;	/* VA of first avail page (after kernel bss) */
410 vm_offset_t virtual_end;	/* VA of last avail page (end of kernel AS) */
411 
412 int nkpt;
413 SYSCTL_INT(_machdep, OID_AUTO, nkpt, CTLFLAG_RD, &nkpt, 0,
414     "Number of kernel page table pages allocated on bootup");
415 
416 static int ndmpdp;
417 vm_paddr_t dmaplimit;
418 vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
419 pt_entry_t pg_nx;
420 
421 static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
422     "VM/pmap parameters");
423 
424 static int __read_frequently pg_ps_enabled = 1;
425 SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
426     &pg_ps_enabled, 0, "Are large page mappings enabled?");
427 
428 int __read_frequently la57 = 0;
429 SYSCTL_INT(_vm_pmap, OID_AUTO, la57, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
430     &la57, 0,
431     "5-level paging for host is enabled");
432 
433 /*
434  * The default value is needed in order to preserve compatibility with
435  * some userspace programs that put tags into sign-extended bits.
436  */
437 int prefer_uva_la48 = 1;
438 SYSCTL_INT(_vm_pmap, OID_AUTO, prefer_uva_la48, CTLFLAG_RDTUN,
439     &prefer_uva_la48, 0,
440     "Userspace maps are limited to LA48 unless otherwise configured");
441 
442 static bool
pmap_is_la57(pmap_t pmap)443 pmap_is_la57(pmap_t pmap)
444 {
445 	if (pmap->pm_type == PT_X86)
446 		return (la57);
447 	return (false);		/* XXXKIB handle EPT */
448 }
449 
450 #define	PAT_INDEX_SIZE	8
451 static int pat_index[PAT_INDEX_SIZE];	/* cache mode to PAT index conversion */
452 
453 static u_int64_t	KPTphys;	/* phys addr of kernel level 1 */
454 static u_int64_t	KPDphys;	/* phys addr of kernel level 2 */
455 static u_int64_t	KPDPphys;	/* phys addr of kernel level 3 */
456 u_int64_t		KPML4phys;	/* phys addr of kernel level 4 */
457 u_int64_t		KPML5phys;	/* phys addr of kernel level 5,
458 					   if supported */
459 
460 #ifdef KASAN
461 static uint64_t		KASANPDPphys;
462 #endif
463 #ifdef KMSAN
464 static uint64_t		KMSANSHADPDPphys;
465 static uint64_t		KMSANORIGPDPphys;
466 
467 /*
468  * To support systems with large amounts of memory, it is necessary to extend
469  * the maximum size of the direct map.  This could eat into the space reserved
470  * for the shadow map.
471  */
472 _Static_assert(DMPML4I + NDMPML4E <= KMSANSHADPML4I, "direct map overflow");
473 #endif
474 
475 static pml4_entry_t	*kernel_pml4;
476 static u_int64_t	DMPDphys;	/* phys addr of direct mapped level 2 */
477 static u_int64_t	DMPDPphys;	/* phys addr of direct mapped level 3 */
478 static int		ndmpdpphys;	/* number of DMPDPphys pages */
479 
480 vm_paddr_t		kernphys;	/* phys addr of start of bootstrap data */
481 vm_paddr_t		KERNend;	/* and the end */
482 
483 /*
484  * pmap_mapdev support pre initialization (i.e. console)
485  */
486 #define	PMAP_PREINIT_MAPPING_COUNT	8
487 static struct pmap_preinit_mapping {
488 	vm_paddr_t	pa;
489 	vm_offset_t	va;
490 	vm_size_t	sz;
491 	int		mode;
492 } pmap_preinit_mapping[PMAP_PREINIT_MAPPING_COUNT];
493 static int pmap_initialized;
494 
495 /*
496  * Data for the pv entry allocation mechanism.
497  * Updates to pv_invl_gen are protected by the pv list lock but reads are not.
498  */
499 #ifdef NUMA
500 static __inline int
pc_to_domain(struct pv_chunk * pc)501 pc_to_domain(struct pv_chunk *pc)
502 {
503 
504 	return (vm_phys_domain(DMAP_TO_PHYS((vm_offset_t)pc)));
505 }
506 #else
507 static __inline int
pc_to_domain(struct pv_chunk * pc __unused)508 pc_to_domain(struct pv_chunk *pc __unused)
509 {
510 
511 	return (0);
512 }
513 #endif
514 
515 struct pv_chunks_list {
516 	struct mtx pvc_lock;
517 	TAILQ_HEAD(pch, pv_chunk) pvc_list;
518 	int active_reclaims;
519 } __aligned(CACHE_LINE_SIZE);
520 
521 struct pv_chunks_list __exclusive_cache_line pv_chunks[PMAP_MEMDOM];
522 
523 #ifdef	NUMA
524 struct pmap_large_md_page {
525 	struct rwlock   pv_lock;
526 	struct md_page  pv_page;
527 	u_long pv_invl_gen;
528 };
529 __exclusive_cache_line static struct pmap_large_md_page pv_dummy_large;
530 #define pv_dummy pv_dummy_large.pv_page
531 __read_mostly static struct pmap_large_md_page *pv_table;
532 __read_mostly vm_paddr_t pmap_last_pa;
533 #else
534 static struct rwlock __exclusive_cache_line pv_list_locks[NPV_LIST_LOCKS];
535 static u_long pv_invl_gen[NPV_LIST_LOCKS];
536 static struct md_page *pv_table;
537 static struct md_page pv_dummy;
538 #endif
539 
540 /*
541  * All those kernel PT submaps that BSD is so fond of
542  */
543 pt_entry_t *CMAP1 = NULL;
544 caddr_t CADDR1 = 0;
545 static vm_offset_t qframe = 0;
546 static struct mtx qframe_mtx;
547 
548 static int pmap_flags = PMAP_PDE_SUPERPAGE;	/* flags for x86 pmaps */
549 
550 static vmem_t *large_vmem;
551 static u_int lm_ents;
552 #define	PMAP_ADDRESS_IN_LARGEMAP(va)	((va) >= LARGEMAP_MIN_ADDRESS && \
553 	(va) < LARGEMAP_MIN_ADDRESS + NBPML4 * (u_long)lm_ents)
554 
555 int pmap_pcid_enabled = 1;
556 SYSCTL_INT(_vm_pmap, OID_AUTO, pcid_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
557     &pmap_pcid_enabled, 0, "Is TLB Context ID enabled ?");
558 int invpcid_works = 0;
559 SYSCTL_INT(_vm_pmap, OID_AUTO, invpcid_works, CTLFLAG_RD, &invpcid_works, 0,
560     "Is the invpcid instruction available ?");
561 int invlpgb_works;
562 SYSCTL_INT(_vm_pmap, OID_AUTO, invlpgb_works, CTLFLAG_RD, &invlpgb_works, 0,
563     "Is the invlpgb instruction available?");
564 int invlpgb_maxcnt;
565 int pmap_pcid_invlpg_workaround = 0;
566 SYSCTL_INT(_vm_pmap, OID_AUTO, pcid_invlpg_workaround,
567     CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
568     &pmap_pcid_invlpg_workaround, 0,
569     "Enable small core PCID/INVLPG workaround");
570 int pmap_pcid_invlpg_workaround_uena = 1;
571 
572 int __read_frequently pti = 0;
573 SYSCTL_INT(_vm_pmap, OID_AUTO, pti, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
574     &pti, 0,
575     "Page Table Isolation enabled");
576 static vm_object_t pti_obj;
577 static pml4_entry_t *pti_pml4;
578 static vm_pindex_t pti_pg_idx;
579 static bool pti_finalized;
580 
581 static int pmap_growkernel_panic = 0;
582 SYSCTL_INT(_vm_pmap, OID_AUTO, growkernel_panic, CTLFLAG_RDTUN,
583     &pmap_growkernel_panic, 0,
584     "panic on failure to allocate kernel page table page");
585 
586 struct pmap_pkru_range {
587 	struct rs_el	pkru_rs_el;
588 	u_int		pkru_keyidx;
589 	int		pkru_flags;
590 };
591 
592 static uma_zone_t pmap_pkru_ranges_zone;
593 static bool pmap_pkru_same(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
594     pt_entry_t *pte);
595 static pt_entry_t pmap_pkru_get(pmap_t pmap, vm_offset_t va);
596 static void pmap_pkru_on_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva);
597 static void *pkru_dup_range(void *ctx, void *data);
598 static void pkru_free_range(void *ctx, void *node);
599 static int pmap_pkru_copy(pmap_t dst_pmap, pmap_t src_pmap);
600 static int pmap_pkru_deassign(pmap_t pmap, vm_offset_t sva, vm_offset_t eva);
601 static void pmap_pkru_deassign_all(pmap_t pmap);
602 
603 static COUNTER_U64_DEFINE_EARLY(pcid_save_cnt);
604 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, pcid_save_cnt, CTLFLAG_RD,
605     &pcid_save_cnt, "Count of saved TLB context on switch");
606 
607 static LIST_HEAD(, pmap_invl_gen) pmap_invl_gen_tracker =
608     LIST_HEAD_INITIALIZER(&pmap_invl_gen_tracker);
609 static struct mtx invl_gen_mtx;
610 /* Fake lock object to satisfy turnstiles interface. */
611 static struct lock_object invl_gen_ts = {
612 	.lo_name = "invlts",
613 };
614 static struct pmap_invl_gen pmap_invl_gen_head = {
615 	.gen = 1,
616 	.next = NULL,
617 };
618 static u_long pmap_invl_gen = 1;
619 static int pmap_invl_waiters;
620 static struct callout pmap_invl_callout;
621 static bool pmap_invl_callout_inited;
622 
623 #define	PMAP_ASSERT_NOT_IN_DI() \
624     KASSERT(pmap_not_in_di(), ("DI already started"))
625 
626 static bool
pmap_di_locked(void)627 pmap_di_locked(void)
628 {
629 	int tun;
630 
631 	if ((cpu_feature2 & CPUID2_CX16) == 0)
632 		return (true);
633 	tun = 0;
634 	TUNABLE_INT_FETCH("vm.pmap.di_locked", &tun);
635 	return (tun != 0);
636 }
637 
638 static int
sysctl_pmap_di_locked(SYSCTL_HANDLER_ARGS)639 sysctl_pmap_di_locked(SYSCTL_HANDLER_ARGS)
640 {
641 	int locked;
642 
643 	locked = pmap_di_locked();
644 	return (sysctl_handle_int(oidp, &locked, 0, req));
645 }
646 SYSCTL_PROC(_vm_pmap, OID_AUTO, di_locked, CTLTYPE_INT | CTLFLAG_RDTUN |
647     CTLFLAG_MPSAFE, 0, 0, sysctl_pmap_di_locked, "",
648     "Locked delayed invalidation");
649 
650 static bool pmap_not_in_di_l(void);
651 static bool pmap_not_in_di_u(void);
652 DEFINE_IFUNC(, bool, pmap_not_in_di, (void))
653 {
654 
655 	return (pmap_di_locked() ? pmap_not_in_di_l : pmap_not_in_di_u);
656 }
657 
658 static bool
pmap_not_in_di_l(void)659 pmap_not_in_di_l(void)
660 {
661 	struct pmap_invl_gen *invl_gen;
662 
663 	invl_gen = &curthread->td_md.md_invl_gen;
664 	return (invl_gen->gen == 0);
665 }
666 
667 static void
pmap_thread_init_invl_gen_l(struct thread * td)668 pmap_thread_init_invl_gen_l(struct thread *td)
669 {
670 	struct pmap_invl_gen *invl_gen;
671 
672 	invl_gen = &td->td_md.md_invl_gen;
673 	invl_gen->gen = 0;
674 }
675 
676 static void
pmap_delayed_invl_wait_block(u_long * m_gen,u_long * invl_gen)677 pmap_delayed_invl_wait_block(u_long *m_gen, u_long *invl_gen)
678 {
679 	struct turnstile *ts;
680 
681 	ts = turnstile_trywait(&invl_gen_ts);
682 	if (*m_gen > atomic_load_long(invl_gen))
683 		turnstile_wait(ts, NULL, TS_SHARED_QUEUE);
684 	else
685 		turnstile_cancel(ts);
686 }
687 
688 static void
pmap_delayed_invl_finish_unblock(u_long new_gen)689 pmap_delayed_invl_finish_unblock(u_long new_gen)
690 {
691 	struct turnstile *ts;
692 
693 	turnstile_chain_lock(&invl_gen_ts);
694 	ts = turnstile_lookup(&invl_gen_ts);
695 	if (new_gen != 0)
696 		pmap_invl_gen = new_gen;
697 	if (ts != NULL) {
698 		turnstile_broadcast(ts, TS_SHARED_QUEUE);
699 		turnstile_unpend(ts);
700 	}
701 	turnstile_chain_unlock(&invl_gen_ts);
702 }
703 
704 /*
705  * Start a new Delayed Invalidation (DI) block of code, executed by
706  * the current thread.  Within a DI block, the current thread may
707  * destroy both the page table and PV list entries for a mapping and
708  * then release the corresponding PV list lock before ensuring that
709  * the mapping is flushed from the TLBs of any processors with the
710  * pmap active.
711  */
712 static void
pmap_delayed_invl_start_l(void)713 pmap_delayed_invl_start_l(void)
714 {
715 	struct pmap_invl_gen *invl_gen;
716 	u_long currgen;
717 
718 	invl_gen = &curthread->td_md.md_invl_gen;
719 	PMAP_ASSERT_NOT_IN_DI();
720 	mtx_lock(&invl_gen_mtx);
721 	if (LIST_EMPTY(&pmap_invl_gen_tracker))
722 		currgen = pmap_invl_gen;
723 	else
724 		currgen = LIST_FIRST(&pmap_invl_gen_tracker)->gen;
725 	invl_gen->gen = currgen + 1;
726 	LIST_INSERT_HEAD(&pmap_invl_gen_tracker, invl_gen, link);
727 	mtx_unlock(&invl_gen_mtx);
728 }
729 
730 /*
731  * Finish the DI block, previously started by the current thread.  All
732  * required TLB flushes for the pages marked by
733  * pmap_delayed_invl_page() must be finished before this function is
734  * called.
735  *
736  * This function works by bumping the global DI generation number to
737  * the generation number of the current thread's DI, unless there is a
738  * pending DI that started earlier.  In the latter case, bumping the
739  * global DI generation number would incorrectly signal that the
740  * earlier DI had finished.  Instead, this function bumps the earlier
741  * DI's generation number to match the generation number of the
742  * current thread's DI.
743  */
744 static void
pmap_delayed_invl_finish_l(void)745 pmap_delayed_invl_finish_l(void)
746 {
747 	struct pmap_invl_gen *invl_gen, *next;
748 
749 	invl_gen = &curthread->td_md.md_invl_gen;
750 	KASSERT(invl_gen->gen != 0, ("missed invl_start"));
751 	mtx_lock(&invl_gen_mtx);
752 	next = LIST_NEXT(invl_gen, link);
753 	if (next == NULL)
754 		pmap_delayed_invl_finish_unblock(invl_gen->gen);
755 	else
756 		next->gen = invl_gen->gen;
757 	LIST_REMOVE(invl_gen, link);
758 	mtx_unlock(&invl_gen_mtx);
759 	invl_gen->gen = 0;
760 }
761 
762 static bool
pmap_not_in_di_u(void)763 pmap_not_in_di_u(void)
764 {
765 	struct pmap_invl_gen *invl_gen;
766 
767 	invl_gen = &curthread->td_md.md_invl_gen;
768 	return (((uintptr_t)invl_gen->next & PMAP_INVL_GEN_NEXT_INVALID) != 0);
769 }
770 
771 static void
pmap_thread_init_invl_gen_u(struct thread * td)772 pmap_thread_init_invl_gen_u(struct thread *td)
773 {
774 	struct pmap_invl_gen *invl_gen;
775 
776 	invl_gen = &td->td_md.md_invl_gen;
777 	invl_gen->gen = 0;
778 	invl_gen->next = (void *)PMAP_INVL_GEN_NEXT_INVALID;
779 }
780 
781 static bool
pmap_di_load_invl(struct pmap_invl_gen * ptr,struct pmap_invl_gen * out)782 pmap_di_load_invl(struct pmap_invl_gen *ptr, struct pmap_invl_gen *out)
783 {
784 	uint64_t new_high, new_low, old_high, old_low;
785 	char res;
786 
787 	old_low = new_low = 0;
788 	old_high = new_high = (uintptr_t)0;
789 
790 	__asm volatile("lock;cmpxchg16b\t%1"
791 	    : "=@cce" (res), "+m" (*ptr), "+a" (old_low), "+d" (old_high)
792 	    : "b"(new_low), "c" (new_high)
793 	    : "memory", "cc");
794 	if (res == 0) {
795 		if ((old_high & PMAP_INVL_GEN_NEXT_INVALID) != 0)
796 			return (false);
797 		out->gen = old_low;
798 		out->next = (void *)old_high;
799 	} else {
800 		out->gen = new_low;
801 		out->next = (void *)new_high;
802 	}
803 	return (true);
804 }
805 
806 static bool
pmap_di_store_invl(struct pmap_invl_gen * ptr,struct pmap_invl_gen * old_val,struct pmap_invl_gen * new_val)807 pmap_di_store_invl(struct pmap_invl_gen *ptr, struct pmap_invl_gen *old_val,
808     struct pmap_invl_gen *new_val)
809 {
810 	uint64_t new_high, new_low, old_high, old_low;
811 	char res;
812 
813 	new_low = new_val->gen;
814 	new_high = (uintptr_t)new_val->next;
815 	old_low = old_val->gen;
816 	old_high = (uintptr_t)old_val->next;
817 
818 	__asm volatile("lock;cmpxchg16b\t%1"
819 	    : "=@cce" (res), "+m" (*ptr), "+a" (old_low), "+d" (old_high)
820 	    : "b"(new_low), "c" (new_high)
821 	    : "memory", "cc");
822 	return (res);
823 }
824 
825 static COUNTER_U64_DEFINE_EARLY(pv_page_count);
826 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, pv_page_count, CTLFLAG_RD,
827     &pv_page_count, "Current number of allocated pv pages");
828 
829 static COUNTER_U64_DEFINE_EARLY(user_pt_page_count);
830 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, user_pt_page_count, CTLFLAG_RD,
831     &user_pt_page_count,
832     "Current number of allocated page table pages for userspace");
833 
834 static COUNTER_U64_DEFINE_EARLY(kernel_pt_page_count);
835 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, kernel_pt_page_count, CTLFLAG_RD,
836     &kernel_pt_page_count,
837     "Current number of allocated page table pages for the kernel");
838 
839 #ifdef PV_STATS
840 
841 static COUNTER_U64_DEFINE_EARLY(invl_start_restart);
842 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, invl_start_restart,
843     CTLFLAG_RD, &invl_start_restart,
844     "Number of delayed TLB invalidation request restarts");
845 
846 static COUNTER_U64_DEFINE_EARLY(invl_finish_restart);
847 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, invl_finish_restart, CTLFLAG_RD,
848     &invl_finish_restart,
849     "Number of delayed TLB invalidation completion restarts");
850 
851 static int invl_max_qlen;
852 SYSCTL_INT(_vm_pmap, OID_AUTO, invl_max_qlen, CTLFLAG_RD,
853     &invl_max_qlen, 0,
854     "Maximum delayed TLB invalidation request queue length");
855 #endif
856 
857 #define di_delay	locks_delay
858 
859 static void
pmap_delayed_invl_start_u(void)860 pmap_delayed_invl_start_u(void)
861 {
862 	struct pmap_invl_gen *invl_gen, *p, prev, new_prev;
863 	struct thread *td;
864 	struct lock_delay_arg lda;
865 	uintptr_t prevl;
866 	u_char pri;
867 #ifdef PV_STATS
868 	int i, ii;
869 #endif
870 
871 	td = curthread;
872 	invl_gen = &td->td_md.md_invl_gen;
873 	PMAP_ASSERT_NOT_IN_DI();
874 	lock_delay_arg_init(&lda, &di_delay);
875 	invl_gen->saved_pri = 0;
876 	pri = td->td_base_pri;
877 	if (pri > PVM) {
878 		thread_lock(td);
879 		pri = td->td_base_pri;
880 		if (pri > PVM) {
881 			invl_gen->saved_pri = pri;
882 			sched_prio(td, PVM);
883 		}
884 		thread_unlock(td);
885 	}
886 again:
887 	PV_STAT(i = 0);
888 	for (p = &pmap_invl_gen_head;; p = prev.next) {
889 		PV_STAT(i++);
890 		prevl = (uintptr_t)atomic_load_ptr(&p->next);
891 		if ((prevl & PMAP_INVL_GEN_NEXT_INVALID) != 0) {
892 			PV_STAT(counter_u64_add(invl_start_restart, 1));
893 			lock_delay(&lda);
894 			goto again;
895 		}
896 		if (prevl == 0)
897 			break;
898 		prev.next = (void *)prevl;
899 	}
900 #ifdef PV_STATS
901 	if ((ii = invl_max_qlen) < i)
902 		atomic_cmpset_int(&invl_max_qlen, ii, i);
903 #endif
904 
905 	if (!pmap_di_load_invl(p, &prev) || prev.next != NULL) {
906 		PV_STAT(counter_u64_add(invl_start_restart, 1));
907 		lock_delay(&lda);
908 		goto again;
909 	}
910 
911 	new_prev.gen = prev.gen;
912 	new_prev.next = invl_gen;
913 	invl_gen->gen = prev.gen + 1;
914 
915 	/* Formal fence between store to invl->gen and updating *p. */
916 	atomic_thread_fence_rel();
917 
918 	/*
919 	 * After inserting an invl_gen element with invalid bit set,
920 	 * this thread blocks any other thread trying to enter the
921 	 * delayed invalidation block.  Do not allow to remove us from
922 	 * the CPU, because it causes starvation for other threads.
923 	 */
924 	critical_enter();
925 
926 	/*
927 	 * ABA for *p is not possible there, since p->gen can only
928 	 * increase.  So if the *p thread finished its di, then
929 	 * started a new one and got inserted into the list at the
930 	 * same place, its gen will appear greater than the previously
931 	 * read gen.
932 	 */
933 	if (!pmap_di_store_invl(p, &prev, &new_prev)) {
934 		critical_exit();
935 		PV_STAT(counter_u64_add(invl_start_restart, 1));
936 		lock_delay(&lda);
937 		goto again;
938 	}
939 
940 	/*
941 	 * There we clear PMAP_INVL_GEN_NEXT_INVALID in
942 	 * invl_gen->next, allowing other threads to iterate past us.
943 	 * pmap_di_store_invl() provides fence between the generation
944 	 * write and the update of next.
945 	 */
946 	invl_gen->next = NULL;
947 	critical_exit();
948 }
949 
950 static bool
pmap_delayed_invl_finish_u_crit(struct pmap_invl_gen * invl_gen,struct pmap_invl_gen * p)951 pmap_delayed_invl_finish_u_crit(struct pmap_invl_gen *invl_gen,
952     struct pmap_invl_gen *p)
953 {
954 	struct pmap_invl_gen prev, new_prev;
955 	u_long mygen;
956 
957 	/*
958 	 * Load invl_gen->gen after setting invl_gen->next
959 	 * PMAP_INVL_GEN_NEXT_INVALID.  This prevents larger
960 	 * generations to propagate to our invl_gen->gen.  Lock prefix
961 	 * in atomic_set_ptr() worked as seq_cst fence.
962 	 */
963 	mygen = atomic_load_long(&invl_gen->gen);
964 
965 	if (!pmap_di_load_invl(p, &prev) || prev.next != invl_gen)
966 		return (false);
967 
968 	KASSERT(prev.gen < mygen,
969 	    ("invalid di gen sequence %lu %lu", prev.gen, mygen));
970 	new_prev.gen = mygen;
971 	new_prev.next = (void *)((uintptr_t)invl_gen->next &
972 	    ~PMAP_INVL_GEN_NEXT_INVALID);
973 
974 	/* Formal fence between load of prev and storing update to it. */
975 	atomic_thread_fence_rel();
976 
977 	return (pmap_di_store_invl(p, &prev, &new_prev));
978 }
979 
980 static void
pmap_delayed_invl_finish_u(void)981 pmap_delayed_invl_finish_u(void)
982 {
983 	struct pmap_invl_gen *invl_gen, *p;
984 	struct thread *td;
985 	struct lock_delay_arg lda;
986 	uintptr_t prevl;
987 
988 	td = curthread;
989 	invl_gen = &td->td_md.md_invl_gen;
990 	KASSERT(invl_gen->gen != 0, ("missed invl_start: gen 0"));
991 	KASSERT(((uintptr_t)invl_gen->next & PMAP_INVL_GEN_NEXT_INVALID) == 0,
992 	    ("missed invl_start: INVALID"));
993 	lock_delay_arg_init(&lda, &di_delay);
994 
995 again:
996 	for (p = &pmap_invl_gen_head; p != NULL; p = (void *)prevl) {
997 		prevl = (uintptr_t)atomic_load_ptr(&p->next);
998 		if ((prevl & PMAP_INVL_GEN_NEXT_INVALID) != 0) {
999 			PV_STAT(counter_u64_add(invl_finish_restart, 1));
1000 			lock_delay(&lda);
1001 			goto again;
1002 		}
1003 		if ((void *)prevl == invl_gen)
1004 			break;
1005 	}
1006 
1007 	/*
1008 	 * It is legitimate to not find ourself on the list if a
1009 	 * thread before us finished its DI and started it again.
1010 	 */
1011 	if (__predict_false(p == NULL)) {
1012 		PV_STAT(counter_u64_add(invl_finish_restart, 1));
1013 		lock_delay(&lda);
1014 		goto again;
1015 	}
1016 
1017 	critical_enter();
1018 	atomic_set_ptr((uintptr_t *)&invl_gen->next,
1019 	    PMAP_INVL_GEN_NEXT_INVALID);
1020 	if (!pmap_delayed_invl_finish_u_crit(invl_gen, p)) {
1021 		atomic_clear_ptr((uintptr_t *)&invl_gen->next,
1022 		    PMAP_INVL_GEN_NEXT_INVALID);
1023 		critical_exit();
1024 		PV_STAT(counter_u64_add(invl_finish_restart, 1));
1025 		lock_delay(&lda);
1026 		goto again;
1027 	}
1028 	critical_exit();
1029 	if (atomic_load_int(&pmap_invl_waiters) > 0)
1030 		pmap_delayed_invl_finish_unblock(0);
1031 	if (invl_gen->saved_pri != 0) {
1032 		thread_lock(td);
1033 		sched_prio(td, invl_gen->saved_pri);
1034 		thread_unlock(td);
1035 	}
1036 }
1037 
1038 #ifdef DDB
DB_SHOW_COMMAND(di_queue,pmap_di_queue)1039 DB_SHOW_COMMAND(di_queue, pmap_di_queue)
1040 {
1041 	struct pmap_invl_gen *p, *pn;
1042 	struct thread *td;
1043 	uintptr_t nextl;
1044 	bool first;
1045 
1046 	for (p = &pmap_invl_gen_head, first = true; p != NULL; p = pn,
1047 	    first = false) {
1048 		nextl = (uintptr_t)atomic_load_ptr(&p->next);
1049 		pn = (void *)(nextl & ~PMAP_INVL_GEN_NEXT_INVALID);
1050 		td = first ? NULL : __containerof(p, struct thread,
1051 		    td_md.md_invl_gen);
1052 		db_printf("gen %lu inv %d td %p tid %d\n", p->gen,
1053 		    (nextl & PMAP_INVL_GEN_NEXT_INVALID) != 0, td,
1054 		    td != NULL ? td->td_tid : -1);
1055 	}
1056 }
1057 #endif
1058 
1059 #ifdef PV_STATS
1060 static COUNTER_U64_DEFINE_EARLY(invl_wait);
1061 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, invl_wait,
1062     CTLFLAG_RD, &invl_wait,
1063     "Number of times DI invalidation blocked pmap_remove_all/write");
1064 
1065 static COUNTER_U64_DEFINE_EARLY(invl_wait_slow);
1066 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, invl_wait_slow, CTLFLAG_RD,
1067      &invl_wait_slow, "Number of slow invalidation waits for lockless DI");
1068 
1069 #endif
1070 
1071 #ifdef NUMA
1072 static u_long *
pmap_delayed_invl_genp(vm_page_t m)1073 pmap_delayed_invl_genp(vm_page_t m)
1074 {
1075 	vm_paddr_t pa;
1076 	u_long *gen;
1077 
1078 	pa = VM_PAGE_TO_PHYS(m);
1079 	if (__predict_false((pa) > pmap_last_pa))
1080 		gen = &pv_dummy_large.pv_invl_gen;
1081 	else
1082 		gen = &(pa_to_pmdp(pa)->pv_invl_gen);
1083 
1084 	return (gen);
1085 }
1086 #else
1087 static u_long *
pmap_delayed_invl_genp(vm_page_t m)1088 pmap_delayed_invl_genp(vm_page_t m)
1089 {
1090 
1091 	return (&pv_invl_gen[pa_index(VM_PAGE_TO_PHYS(m)) % NPV_LIST_LOCKS]);
1092 }
1093 #endif
1094 
1095 static void
pmap_delayed_invl_callout_func(void * arg __unused)1096 pmap_delayed_invl_callout_func(void *arg __unused)
1097 {
1098 
1099 	if (atomic_load_int(&pmap_invl_waiters) == 0)
1100 		return;
1101 	pmap_delayed_invl_finish_unblock(0);
1102 }
1103 
1104 static void
pmap_delayed_invl_callout_init(void * arg __unused)1105 pmap_delayed_invl_callout_init(void *arg __unused)
1106 {
1107 
1108 	if (pmap_di_locked())
1109 		return;
1110 	callout_init(&pmap_invl_callout, 1);
1111 	pmap_invl_callout_inited = true;
1112 }
1113 SYSINIT(pmap_di_callout, SI_SUB_CPU + 1, SI_ORDER_ANY,
1114     pmap_delayed_invl_callout_init, NULL);
1115 
1116 /*
1117  * Ensure that all currently executing DI blocks, that need to flush
1118  * TLB for the given page m, actually flushed the TLB at the time the
1119  * function returned.  If the page m has an empty PV list and we call
1120  * pmap_delayed_invl_wait(), upon its return we know that no CPU has a
1121  * valid mapping for the page m in either its page table or TLB.
1122  *
1123  * This function works by blocking until the global DI generation
1124  * number catches up with the generation number associated with the
1125  * given page m and its PV list.  Since this function's callers
1126  * typically own an object lock and sometimes own a page lock, it
1127  * cannot sleep.  Instead, it blocks on a turnstile to relinquish the
1128  * processor.
1129  */
1130 static void
pmap_delayed_invl_wait_l(vm_page_t m)1131 pmap_delayed_invl_wait_l(vm_page_t m)
1132 {
1133 	u_long *m_gen;
1134 #ifdef PV_STATS
1135 	bool accounted = false;
1136 #endif
1137 
1138 	m_gen = pmap_delayed_invl_genp(m);
1139 	while (*m_gen > pmap_invl_gen) {
1140 #ifdef PV_STATS
1141 		if (!accounted) {
1142 			counter_u64_add(invl_wait, 1);
1143 			accounted = true;
1144 		}
1145 #endif
1146 		pmap_delayed_invl_wait_block(m_gen, &pmap_invl_gen);
1147 	}
1148 }
1149 
1150 static void
pmap_delayed_invl_wait_u(vm_page_t m)1151 pmap_delayed_invl_wait_u(vm_page_t m)
1152 {
1153 	u_long *m_gen;
1154 	struct lock_delay_arg lda;
1155 	bool fast;
1156 
1157 	fast = true;
1158 	m_gen = pmap_delayed_invl_genp(m);
1159 	lock_delay_arg_init(&lda, &di_delay);
1160 	while (*m_gen > atomic_load_long(&pmap_invl_gen_head.gen)) {
1161 		if (fast || !pmap_invl_callout_inited) {
1162 			PV_STAT(counter_u64_add(invl_wait, 1));
1163 			lock_delay(&lda);
1164 			fast = false;
1165 		} else {
1166 			/*
1167 			 * The page's invalidation generation number
1168 			 * is still below the current thread's number.
1169 			 * Prepare to block so that we do not waste
1170 			 * CPU cycles or worse, suffer livelock.
1171 			 *
1172 			 * Since it is impossible to block without
1173 			 * racing with pmap_delayed_invl_finish_u(),
1174 			 * prepare for the race by incrementing
1175 			 * pmap_invl_waiters and arming a 1-tick
1176 			 * callout which will unblock us if we lose
1177 			 * the race.
1178 			 */
1179 			atomic_add_int(&pmap_invl_waiters, 1);
1180 
1181 			/*
1182 			 * Re-check the current thread's invalidation
1183 			 * generation after incrementing
1184 			 * pmap_invl_waiters, so that there is no race
1185 			 * with pmap_delayed_invl_finish_u() setting
1186 			 * the page generation and checking
1187 			 * pmap_invl_waiters.  The only race allowed
1188 			 * is for a missed unblock, which is handled
1189 			 * by the callout.
1190 			 */
1191 			if (*m_gen >
1192 			    atomic_load_long(&pmap_invl_gen_head.gen)) {
1193 				callout_reset(&pmap_invl_callout, 1,
1194 				    pmap_delayed_invl_callout_func, NULL);
1195 				PV_STAT(counter_u64_add(invl_wait_slow, 1));
1196 				pmap_delayed_invl_wait_block(m_gen,
1197 				    &pmap_invl_gen_head.gen);
1198 			}
1199 			atomic_add_int(&pmap_invl_waiters, -1);
1200 		}
1201 	}
1202 }
1203 
1204 DEFINE_IFUNC(, void, pmap_thread_init_invl_gen, (struct thread *))
1205 {
1206 
1207 	return (pmap_di_locked() ? pmap_thread_init_invl_gen_l :
1208 	    pmap_thread_init_invl_gen_u);
1209 }
1210 
1211 DEFINE_IFUNC(static, void, pmap_delayed_invl_start, (void))
1212 {
1213 
1214 	return (pmap_di_locked() ? pmap_delayed_invl_start_l :
1215 	    pmap_delayed_invl_start_u);
1216 }
1217 
1218 DEFINE_IFUNC(static, void, pmap_delayed_invl_finish, (void))
1219 {
1220 
1221 	return (pmap_di_locked() ? pmap_delayed_invl_finish_l :
1222 	    pmap_delayed_invl_finish_u);
1223 }
1224 
1225 DEFINE_IFUNC(static, void, pmap_delayed_invl_wait, (vm_page_t))
1226 {
1227 
1228 	return (pmap_di_locked() ? pmap_delayed_invl_wait_l :
1229 	    pmap_delayed_invl_wait_u);
1230 }
1231 
1232 /*
1233  * Mark the page m's PV list as participating in the current thread's
1234  * DI block.  Any threads concurrently using m's PV list to remove or
1235  * restrict all mappings to m will wait for the current thread's DI
1236  * block to complete before proceeding.
1237  *
1238  * The function works by setting the DI generation number for m's PV
1239  * list to at least the DI generation number of the current thread.
1240  * This forces a caller of pmap_delayed_invl_wait() to block until
1241  * current thread calls pmap_delayed_invl_finish().
1242  */
1243 static void
pmap_delayed_invl_page(vm_page_t m)1244 pmap_delayed_invl_page(vm_page_t m)
1245 {
1246 	u_long gen, *m_gen;
1247 
1248 	rw_assert(VM_PAGE_TO_PV_LIST_LOCK(m), RA_WLOCKED);
1249 	gen = curthread->td_md.md_invl_gen.gen;
1250 	if (gen == 0)
1251 		return;
1252 	m_gen = pmap_delayed_invl_genp(m);
1253 	if (*m_gen < gen)
1254 		*m_gen = gen;
1255 }
1256 
1257 /*
1258  * Crashdump maps.
1259  */
1260 static caddr_t crashdumpmap;
1261 
1262 /*
1263  * Internal flags for pmap_enter()'s helper functions.
1264  */
1265 #define	PMAP_ENTER_NORECLAIM	0x1000000	/* Don't reclaim PV entries. */
1266 #define	PMAP_ENTER_NOREPLACE	0x2000000	/* Don't replace mappings. */
1267 
1268 /*
1269  * Internal flags for pmap_mapdev_internal() and
1270  * pmap_change_props_locked().
1271  */
1272 #define	MAPDEV_FLUSHCACHE	0x00000001	/* Flush cache after mapping. */
1273 #define	MAPDEV_SETATTR		0x00000002	/* Modify existing attrs. */
1274 #define	MAPDEV_ASSERTVALID	0x00000004	/* Assert mapping validity. */
1275 
1276 TAILQ_HEAD(pv_chunklist, pv_chunk);
1277 
1278 static void	free_pv_chunk(struct pv_chunk *pc);
1279 static void	free_pv_chunk_batch(struct pv_chunklist *batch);
1280 static void	free_pv_entry(pmap_t pmap, pv_entry_t pv);
1281 static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp);
1282 static int	popcnt_pc_map_pq(uint64_t *map);
1283 static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp);
1284 static void	reserve_pv_entries(pmap_t pmap, int needed,
1285 		    struct rwlock **lockp);
1286 static void	pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
1287 		    struct rwlock **lockp);
1288 static bool	pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde,
1289 		    u_int flags, struct rwlock **lockp);
1290 #if VM_NRESERVLEVEL > 0
1291 static void	pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
1292 		    struct rwlock **lockp);
1293 #endif
1294 static void	pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
1295 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
1296 		    vm_offset_t va);
1297 
1298 static void	pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte);
1299 static int pmap_change_props_locked(vm_offset_t va, vm_size_t size,
1300     vm_prot_t prot, int mode, int flags);
1301 static bool	pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
1302 static bool	pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde,
1303     vm_offset_t va, struct rwlock **lockp);
1304 static bool	pmap_demote_pde_mpte(pmap_t pmap, pd_entry_t *pde,
1305     vm_offset_t va, struct rwlock **lockp, vm_page_t mpte);
1306 static bool	pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe,
1307     vm_offset_t va, vm_page_t m);
1308 static int	pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m,
1309 		    vm_prot_t prot, struct rwlock **lockp);
1310 static int	pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde,
1311 		    u_int flags, vm_page_t m, struct rwlock **lockp);
1312 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
1313     vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp);
1314 static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte);
1315 static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted,
1316     bool allpte_PG_A_set);
1317 static void pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva,
1318     vm_offset_t eva);
1319 static void pmap_invalidate_cache_range_all(vm_offset_t sva,
1320     vm_offset_t eva);
1321 static void pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va,
1322 		    pd_entry_t pde);
1323 static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
1324 static vm_page_t pmap_large_map_getptp_unlocked(void);
1325 static vm_paddr_t pmap_large_map_kextract(vm_offset_t va);
1326 #if VM_NRESERVLEVEL > 0
1327 static bool pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
1328     vm_page_t mpte, struct rwlock **lockp);
1329 #endif
1330 static bool pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva,
1331     vm_prot_t prot);
1332 static void pmap_pte_props(pt_entry_t *pte, u_long bits, u_long mask);
1333 static void pmap_pti_add_kva_locked(vm_offset_t sva, vm_offset_t eva,
1334     bool exec);
1335 static pdp_entry_t *pmap_pti_pdpe(vm_offset_t va);
1336 static pd_entry_t *pmap_pti_pde(vm_offset_t va);
1337 static void pmap_pti_wire_pte(void *pte);
1338 static int pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
1339     bool remove_pt, struct spglist *free, struct rwlock **lockp);
1340 static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
1341     pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp);
1342 static vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va);
1343 static void pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
1344     struct spglist *free);
1345 static bool pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
1346 		    pd_entry_t *pde, struct spglist *free,
1347 		    struct rwlock **lockp);
1348 static bool pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
1349     vm_page_t m, struct rwlock **lockp);
1350 static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
1351     pd_entry_t newpde);
1352 static void pmap_update_pde_invalidate(pmap_t, vm_offset_t va, pd_entry_t pde);
1353 
1354 static pd_entry_t *pmap_alloc_pde(pmap_t pmap, vm_offset_t va, vm_page_t *pdpgp,
1355 		struct rwlock **lockp);
1356 static vm_page_t pmap_allocpte_alloc(pmap_t pmap, vm_pindex_t ptepindex,
1357 		struct rwlock **lockp, vm_offset_t va);
1358 static vm_page_t pmap_allocpte_nosleep(pmap_t pmap, vm_pindex_t ptepindex,
1359 		struct rwlock **lockp, vm_offset_t va);
1360 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va,
1361 		struct rwlock **lockp);
1362 
1363 static void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m,
1364     struct spglist *free);
1365 static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, struct spglist *);
1366 
1367 static vm_page_t pmap_alloc_pt_page(pmap_t, vm_pindex_t, int);
1368 static void pmap_free_pt_page(pmap_t, vm_page_t, bool);
1369 
1370 /********************/
1371 /* Inline functions */
1372 /********************/
1373 
1374 /*
1375  * Return a non-clipped indexes for a given VA, which are page table
1376  * pages indexes at the corresponding level.
1377  */
1378 static __inline vm_pindex_t
pmap_pde_pindex(vm_offset_t va)1379 pmap_pde_pindex(vm_offset_t va)
1380 {
1381 	return (va >> PDRSHIFT);
1382 }
1383 
1384 static __inline vm_pindex_t
pmap_pdpe_pindex(vm_offset_t va)1385 pmap_pdpe_pindex(vm_offset_t va)
1386 {
1387 	return (NUPDE + (va >> PDPSHIFT));
1388 }
1389 
1390 static __inline vm_pindex_t
pmap_pml4e_pindex(vm_offset_t va)1391 pmap_pml4e_pindex(vm_offset_t va)
1392 {
1393 	return (NUPDE + NUPDPE + (va >> PML4SHIFT));
1394 }
1395 
1396 static __inline vm_pindex_t
pmap_pml5e_pindex(vm_offset_t va)1397 pmap_pml5e_pindex(vm_offset_t va)
1398 {
1399 	return (NUPDE + NUPDPE + NUPML4E + (va >> PML5SHIFT));
1400 }
1401 
1402 static __inline pml4_entry_t *
pmap_pml5e(pmap_t pmap,vm_offset_t va)1403 pmap_pml5e(pmap_t pmap, vm_offset_t va)
1404 {
1405 
1406 	MPASS(pmap_is_la57(pmap));
1407 	return (&pmap->pm_pmltop[pmap_pml5e_index(va)]);
1408 }
1409 
1410 static __inline pml4_entry_t *
pmap_pml5e_u(pmap_t pmap,vm_offset_t va)1411 pmap_pml5e_u(pmap_t pmap, vm_offset_t va)
1412 {
1413 
1414 	MPASS(pmap_is_la57(pmap));
1415 	return (&pmap->pm_pmltopu[pmap_pml5e_index(va)]);
1416 }
1417 
1418 static __inline pml4_entry_t *
pmap_pml5e_to_pml4e(pml5_entry_t * pml5e,vm_offset_t va)1419 pmap_pml5e_to_pml4e(pml5_entry_t *pml5e, vm_offset_t va)
1420 {
1421 	pml4_entry_t *pml4e;
1422 
1423 	/* XXX MPASS(pmap_is_la57(pmap); */
1424 	pml4e = (pml4_entry_t *)PHYS_TO_DMAP(*pml5e & PG_FRAME);
1425 	return (&pml4e[pmap_pml4e_index(va)]);
1426 }
1427 
1428 /* Return a pointer to the PML4 slot that corresponds to a VA */
1429 static __inline pml4_entry_t *
pmap_pml4e(pmap_t pmap,vm_offset_t va)1430 pmap_pml4e(pmap_t pmap, vm_offset_t va)
1431 {
1432 	pml5_entry_t *pml5e;
1433 	pml4_entry_t *pml4e;
1434 	pt_entry_t PG_V;
1435 
1436 	if (pmap_is_la57(pmap)) {
1437 		pml5e = pmap_pml5e(pmap, va);
1438 		PG_V = pmap_valid_bit(pmap);
1439 		if ((*pml5e & PG_V) == 0)
1440 			return (NULL);
1441 		pml4e = (pml4_entry_t *)PHYS_TO_DMAP(*pml5e & PG_FRAME);
1442 	} else {
1443 		pml4e = pmap->pm_pmltop;
1444 	}
1445 	return (&pml4e[pmap_pml4e_index(va)]);
1446 }
1447 
1448 static __inline pml4_entry_t *
pmap_pml4e_u(pmap_t pmap,vm_offset_t va)1449 pmap_pml4e_u(pmap_t pmap, vm_offset_t va)
1450 {
1451 	MPASS(!pmap_is_la57(pmap));
1452 	return (&pmap->pm_pmltopu[pmap_pml4e_index(va)]);
1453 }
1454 
1455 /* Return a pointer to the PDP slot that corresponds to a VA */
1456 static __inline pdp_entry_t *
pmap_pml4e_to_pdpe(pml4_entry_t * pml4e,vm_offset_t va)1457 pmap_pml4e_to_pdpe(pml4_entry_t *pml4e, vm_offset_t va)
1458 {
1459 	pdp_entry_t *pdpe;
1460 
1461 	pdpe = (pdp_entry_t *)PHYS_TO_DMAP(*pml4e & PG_FRAME);
1462 	return (&pdpe[pmap_pdpe_index(va)]);
1463 }
1464 
1465 /* Return a pointer to the PDP slot that corresponds to a VA */
1466 static __inline pdp_entry_t *
pmap_pdpe(pmap_t pmap,vm_offset_t va)1467 pmap_pdpe(pmap_t pmap, vm_offset_t va)
1468 {
1469 	pml4_entry_t *pml4e;
1470 	pt_entry_t PG_V;
1471 
1472 	PG_V = pmap_valid_bit(pmap);
1473 	pml4e = pmap_pml4e(pmap, va);
1474 	if (pml4e == NULL || (*pml4e & PG_V) == 0)
1475 		return (NULL);
1476 	return (pmap_pml4e_to_pdpe(pml4e, va));
1477 }
1478 
1479 /* Return a pointer to the PD slot that corresponds to a VA */
1480 static __inline pd_entry_t *
pmap_pdpe_to_pde(pdp_entry_t * pdpe,vm_offset_t va)1481 pmap_pdpe_to_pde(pdp_entry_t *pdpe, vm_offset_t va)
1482 {
1483 	pd_entry_t *pde;
1484 
1485 	KASSERT((*pdpe & PG_PS) == 0,
1486 	    ("%s: pdpe %#lx is a leaf", __func__, *pdpe));
1487 	pde = (pd_entry_t *)PHYS_TO_DMAP(*pdpe & PG_FRAME);
1488 	return (&pde[pmap_pde_index(va)]);
1489 }
1490 
1491 /* Return a pointer to the PD slot that corresponds to a VA */
1492 static __inline pd_entry_t *
pmap_pde(pmap_t pmap,vm_offset_t va)1493 pmap_pde(pmap_t pmap, vm_offset_t va)
1494 {
1495 	pdp_entry_t *pdpe;
1496 	pt_entry_t PG_V;
1497 
1498 	PG_V = pmap_valid_bit(pmap);
1499 	pdpe = pmap_pdpe(pmap, va);
1500 	if (pdpe == NULL || (*pdpe & PG_V) == 0)
1501 		return (NULL);
1502 	KASSERT((*pdpe & PG_PS) == 0,
1503 	    ("pmap_pde for 1G page, pmap %p va %#lx", pmap, va));
1504 	return (pmap_pdpe_to_pde(pdpe, va));
1505 }
1506 
1507 /* Return a pointer to the PT slot that corresponds to a VA */
1508 static __inline pt_entry_t *
pmap_pde_to_pte(pd_entry_t * pde,vm_offset_t va)1509 pmap_pde_to_pte(pd_entry_t *pde, vm_offset_t va)
1510 {
1511 	pt_entry_t *pte;
1512 
1513 	KASSERT((*pde & PG_PS) == 0,
1514 	    ("%s: pde %#lx is a leaf", __func__, *pde));
1515 	pte = (pt_entry_t *)PHYS_TO_DMAP(*pde & PG_FRAME);
1516 	return (&pte[pmap_pte_index(va)]);
1517 }
1518 
1519 /* Return a pointer to the PT slot that corresponds to a VA */
1520 static __inline pt_entry_t *
pmap_pte(pmap_t pmap,vm_offset_t va)1521 pmap_pte(pmap_t pmap, vm_offset_t va)
1522 {
1523 	pd_entry_t *pde;
1524 	pt_entry_t PG_V;
1525 
1526 	PG_V = pmap_valid_bit(pmap);
1527 	pde = pmap_pde(pmap, va);
1528 	if (pde == NULL || (*pde & PG_V) == 0)
1529 		return (NULL);
1530 	if ((*pde & PG_PS) != 0)	/* compat with i386 pmap_pte() */
1531 		return ((pt_entry_t *)pde);
1532 	return (pmap_pde_to_pte(pde, va));
1533 }
1534 
1535 static __inline void
pmap_resident_count_adj(pmap_t pmap,int count)1536 pmap_resident_count_adj(pmap_t pmap, int count)
1537 {
1538 
1539 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1540 	KASSERT(pmap->pm_stats.resident_count + count >= 0,
1541 	    ("pmap %p resident count underflow %ld %d", pmap,
1542 	    pmap->pm_stats.resident_count, count));
1543 	pmap->pm_stats.resident_count += count;
1544 }
1545 
1546 static __inline void
pmap_pt_page_count_pinit(pmap_t pmap,int count)1547 pmap_pt_page_count_pinit(pmap_t pmap, int count)
1548 {
1549 	KASSERT(pmap->pm_stats.resident_count + count >= 0,
1550 	    ("pmap %p resident count underflow %ld %d", pmap,
1551 	    pmap->pm_stats.resident_count, count));
1552 	pmap->pm_stats.resident_count += count;
1553 }
1554 
1555 static __inline void
pmap_pt_page_count_adj(pmap_t pmap,int count)1556 pmap_pt_page_count_adj(pmap_t pmap, int count)
1557 {
1558 	if (pmap == kernel_pmap)
1559 		counter_u64_add(kernel_pt_page_count, count);
1560 	else {
1561 		if (pmap != NULL)
1562 			pmap_resident_count_adj(pmap, count);
1563 		counter_u64_add(user_pt_page_count, count);
1564 	}
1565 }
1566 
1567 pt_entry_t vtoptem __read_mostly = ((1ul << (NPTEPGSHIFT + NPDEPGSHIFT +
1568     NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1) << 3;
1569 vm_offset_t PTmap __read_mostly = (vm_offset_t)P4Tmap;
1570 
1571 pt_entry_t *
vtopte(vm_offset_t va)1572 vtopte(vm_offset_t va)
1573 {
1574 	KASSERT(va >= VM_MAXUSER_ADDRESS, ("vtopte on a uva/gpa 0x%0lx", va));
1575 
1576 	return ((pt_entry_t *)(PTmap + ((va >> (PAGE_SHIFT - 3)) & vtoptem)));
1577 }
1578 
1579 pd_entry_t vtopdem __read_mostly = ((1ul << (NPDEPGSHIFT + NPDPEPGSHIFT +
1580     NPML4EPGSHIFT)) - 1) << 3;
1581 vm_offset_t PDmap __read_mostly = (vm_offset_t)P4Dmap;
1582 
1583 static __inline pd_entry_t *
vtopde(vm_offset_t va)1584 vtopde(vm_offset_t va)
1585 {
1586 	KASSERT(va >= VM_MAXUSER_ADDRESS, ("vtopde on a uva/gpa 0x%0lx", va));
1587 
1588 	return ((pt_entry_t *)(PDmap + ((va >> (PDRSHIFT - 3)) & vtopdem)));
1589 }
1590 
1591 static u_int64_t
allocpages(vm_paddr_t * firstaddr,int n)1592 allocpages(vm_paddr_t *firstaddr, int n)
1593 {
1594 	u_int64_t ret;
1595 
1596 	ret = *firstaddr;
1597 	bzero((void *)ret, n * PAGE_SIZE);
1598 	*firstaddr += n * PAGE_SIZE;
1599 	return (ret);
1600 }
1601 
1602 CTASSERT(powerof2(NDMPML4E));
1603 
1604 /* number of kernel PDP slots */
1605 #define	NKPDPE(ptpgs)		howmany(ptpgs, NPDEPG)
1606 
1607 static void
nkpt_init(vm_paddr_t addr)1608 nkpt_init(vm_paddr_t addr)
1609 {
1610 	int pt_pages;
1611 
1612 #ifdef NKPT
1613 	pt_pages = NKPT;
1614 #else
1615 	pt_pages = howmany(addr - kernphys, NBPDR) + 1; /* +1 for 2M hole @0 */
1616 	pt_pages += NKPDPE(pt_pages);
1617 
1618 	/*
1619 	 * Add some slop beyond the bare minimum required for bootstrapping
1620 	 * the kernel.
1621 	 *
1622 	 * This is quite important when allocating KVA for kernel modules.
1623 	 * The modules are required to be linked in the negative 2GB of
1624 	 * the address space.  If we run out of KVA in this region then
1625 	 * pmap_growkernel() will need to allocate page table pages to map
1626 	 * the entire 512GB of KVA space which is an unnecessary tax on
1627 	 * physical memory.
1628 	 *
1629 	 * Secondly, device memory mapped as part of setting up the low-
1630 	 * level console(s) is taken from KVA, starting at virtual_avail.
1631 	 * This is because cninit() is called after pmap_bootstrap() but
1632 	 * before vm_mem_init() and pmap_init(). 20MB for a frame buffer
1633 	 * is not uncommon.
1634 	 */
1635 	pt_pages += 32;		/* 64MB additional slop. */
1636 #endif
1637 	nkpt = pt_pages;
1638 }
1639 
1640 /*
1641  * Returns the proper write/execute permission for a physical page that is
1642  * part of the initial boot allocations.
1643  *
1644  * If the page has kernel text, it is marked as read-only. If the page has
1645  * kernel read-only data, it is marked as read-only/not-executable. If the
1646  * page has only read-write data, it is marked as read-write/not-executable.
1647  * If the page is below/above the kernel range, it is marked as read-write.
1648  *
1649  * This function operates on 2M pages, since we map the kernel space that
1650  * way.
1651  */
1652 static inline pt_entry_t
bootaddr_rwx(vm_paddr_t pa)1653 bootaddr_rwx(vm_paddr_t pa)
1654 {
1655 	/*
1656 	 * The kernel is loaded at a 2MB-aligned address, and memory below that
1657 	 * need not be executable.  The .bss section is padded to a 2MB
1658 	 * boundary, so memory following the kernel need not be executable
1659 	 * either.  Preloaded kernel modules have their mapping permissions
1660 	 * fixed up by the linker.
1661 	 */
1662 	if (pa < trunc_2mpage(kernphys + btext - KERNSTART) ||
1663 	    pa >= trunc_2mpage(kernphys + _end - KERNSTART))
1664 		return (X86_PG_RW | pg_nx);
1665 
1666 	/*
1667 	 * The linker should ensure that the read-only and read-write
1668 	 * portions don't share the same 2M page, so this shouldn't
1669 	 * impact read-only data. However, in any case, any page with
1670 	 * read-write data needs to be read-write.
1671 	 */
1672 	if (pa >= trunc_2mpage(kernphys + brwsection - KERNSTART))
1673 		return (X86_PG_RW | pg_nx);
1674 
1675 	/*
1676 	 * Mark any 2M page containing kernel text as read-only. Mark
1677 	 * other pages with read-only data as read-only and not executable.
1678 	 * (It is likely a small portion of the read-only data section will
1679 	 * be marked as read-only, but executable. This should be acceptable
1680 	 * since the read-only protection will keep the data from changing.)
1681 	 * Note that fixups to the .text section will still work until we
1682 	 * set CR0.WP.
1683 	 */
1684 	if (pa < round_2mpage(kernphys + etext - KERNSTART))
1685 		return (0);
1686 	return (pg_nx);
1687 }
1688 
1689 extern const char la57_trampoline[];
1690 
1691 static void
pmap_bootstrap_la57(vm_paddr_t * firstaddr)1692 pmap_bootstrap_la57(vm_paddr_t *firstaddr)
1693 {
1694 	void (*la57_tramp)(uint64_t pml5);
1695 	pml5_entry_t *pt;
1696 
1697 	if ((cpu_stdext_feature2 & CPUID_STDEXT2_LA57) == 0)
1698 		return;
1699 	la57 = 1;
1700 	TUNABLE_INT_FETCH("vm.pmap.la57", &la57);
1701 	if (!la57)
1702 		return;
1703 
1704 	KPML5phys = allocpages(firstaddr, 1);
1705 	KPML4phys = rcr3() & 0xfffff000; /* pml4 from loader must be < 4G */
1706 
1707 	pt = (pml5_entry_t *)KPML5phys;
1708 	pt[0] = KPML4phys | X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M;
1709 	pt[NPML4EPG - 1] = KPML4phys | X86_PG_V | X86_PG_RW | X86_PG_A |
1710 	    X86_PG_M;
1711 
1712 	la57_tramp = (void (*)(uint64_t))((uintptr_t)la57_trampoline -
1713 	    KERNSTART + amd64_loadaddr());
1714 	printf("Calling la57 trampoline at %p, KPML5phys %#lx ...",
1715 	    la57_tramp, KPML5phys);
1716 	la57_tramp(KPML5phys);
1717 	printf(" alive in la57 mode\n");
1718 }
1719 
1720 static void
create_pagetables(vm_paddr_t * firstaddr)1721 create_pagetables(vm_paddr_t *firstaddr)
1722 {
1723 	pd_entry_t *pd_p;
1724 	pdp_entry_t *pdp_p;
1725 	pml4_entry_t *p4_p;
1726 	pml5_entry_t *p5_p;
1727 	uint64_t DMPDkernphys;
1728 	vm_paddr_t pax;
1729 #ifdef KASAN
1730 	pt_entry_t *pt_p;
1731 	uint64_t KASANPDphys, KASANPTphys, KASANphys;
1732 	vm_offset_t kasankernbase;
1733 	int kasankpdpi, kasankpdi, nkasanpte;
1734 #endif
1735 	int i, j, ndm1g, nkpdpe, nkdmpde;
1736 
1737 	TSENTER();
1738 	/* Allocate page table pages for the direct map */
1739 	ndmpdp = howmany(ptoa(Maxmem), NBPDP);
1740 	if (ndmpdp < 4)		/* Minimum 4GB of dirmap */
1741 		ndmpdp = 4;
1742 	ndmpdpphys = howmany(ndmpdp, NPDPEPG);
1743 	if (ndmpdpphys > NDMPML4E) {
1744 		/*
1745 		 * Each NDMPML4E allows 512 GB, so limit to that,
1746 		 * and then readjust ndmpdp and ndmpdpphys.
1747 		 */
1748 		printf("NDMPML4E limits system to %d GB\n", NDMPML4E * 512);
1749 		Maxmem = atop(NDMPML4E * NBPML4);
1750 		ndmpdpphys = NDMPML4E;
1751 		ndmpdp = NDMPML4E * NPDEPG;
1752 	}
1753 	DMPDPphys = allocpages(firstaddr, ndmpdpphys);
1754 	ndm1g = 0;
1755 	if ((amd_feature & AMDID_PAGE1GB) != 0) {
1756 		/*
1757 		 * Calculate the number of 1G pages that will fully fit in
1758 		 * Maxmem.
1759 		 */
1760 		ndm1g = ptoa(Maxmem) >> PDPSHIFT;
1761 
1762 		/*
1763 		 * Allocate 2M pages for the kernel. These will be used in
1764 		 * place of the one or more 1G pages from ndm1g that maps
1765 		 * kernel memory into DMAP.
1766 		 */
1767 		nkdmpde = howmany((vm_offset_t)brwsection - KERNSTART +
1768 		    kernphys - rounddown2(kernphys, NBPDP), NBPDP);
1769 		DMPDkernphys = allocpages(firstaddr, nkdmpde);
1770 	}
1771 	if (ndm1g < ndmpdp)
1772 		DMPDphys = allocpages(firstaddr, ndmpdp - ndm1g);
1773 	dmaplimit = (vm_paddr_t)ndmpdp << PDPSHIFT;
1774 
1775 	/* Allocate pages. */
1776 	KPML4phys = allocpages(firstaddr, 1);
1777 	KPDPphys = allocpages(firstaddr, NKPML4E);
1778 #ifdef KASAN
1779 	KASANPDPphys = allocpages(firstaddr, NKASANPML4E);
1780 	KASANPDphys = allocpages(firstaddr, 1);
1781 #endif
1782 #ifdef KMSAN
1783 	/*
1784 	 * The KMSAN shadow maps are initially left unpopulated, since there is
1785 	 * no need to shadow memory above KERNBASE.
1786 	 */
1787 	KMSANSHADPDPphys = allocpages(firstaddr, NKMSANSHADPML4E);
1788 	KMSANORIGPDPphys = allocpages(firstaddr, NKMSANORIGPML4E);
1789 #endif
1790 
1791 	/*
1792 	 * Allocate the initial number of kernel page table pages required to
1793 	 * bootstrap.  We defer this until after all memory-size dependent
1794 	 * allocations are done (e.g. direct map), so that we don't have to
1795 	 * build in too much slop in our estimate.
1796 	 *
1797 	 * Note that when NKPML4E > 1, we have an empty page underneath
1798 	 * all but the KPML4I'th one, so we need NKPML4E-1 extra (zeroed)
1799 	 * pages.  (pmap_enter requires a PD page to exist for each KPML4E.)
1800 	 */
1801 	nkpt_init(*firstaddr);
1802 	nkpdpe = NKPDPE(nkpt);
1803 
1804 	KPTphys = allocpages(firstaddr, nkpt);
1805 	KPDphys = allocpages(firstaddr, nkpdpe);
1806 
1807 #ifdef KASAN
1808 	nkasanpte = howmany(nkpt, KASAN_SHADOW_SCALE);
1809 	KASANPTphys = allocpages(firstaddr, nkasanpte);
1810 	KASANphys = allocpages(firstaddr, nkasanpte * NPTEPG);
1811 #endif
1812 
1813 	/*
1814 	 * Connect the zero-filled PT pages to their PD entries.  This
1815 	 * implicitly maps the PT pages at their correct locations within
1816 	 * the PTmap.
1817 	 */
1818 	pd_p = (pd_entry_t *)KPDphys;
1819 	for (i = 0; i < nkpt; i++)
1820 		pd_p[i] = (KPTphys + ptoa(i)) | X86_PG_RW | X86_PG_V;
1821 
1822 	/*
1823 	 * Map from start of the kernel in physical memory (staging
1824 	 * area) to the end of loader preallocated memory using 2MB
1825 	 * pages.  This replaces some of the PD entries created above.
1826 	 * For compatibility, identity map 2M at the start.
1827 	 */
1828 	pd_p[0] = X86_PG_V | PG_PS | pg_g | X86_PG_M | X86_PG_A |
1829 	    X86_PG_RW | pg_nx;
1830 	for (i = 1, pax = kernphys; pax < KERNend; i++, pax += NBPDR) {
1831 		/* Preset PG_M and PG_A because demotion expects it. */
1832 		pd_p[i] = pax | X86_PG_V | PG_PS | pg_g | X86_PG_M |
1833 		    X86_PG_A | bootaddr_rwx(pax);
1834 	}
1835 
1836 	/*
1837 	 * Because we map the physical blocks in 2M pages, adjust firstaddr
1838 	 * to record the physical blocks we've actually mapped into kernel
1839 	 * virtual address space.
1840 	 */
1841 	if (*firstaddr < round_2mpage(KERNend))
1842 		*firstaddr = round_2mpage(KERNend);
1843 
1844 	/* And connect up the PD to the PDP (leaving room for L4 pages) */
1845 	pdp_p = (pdp_entry_t *)(KPDPphys + ptoa(KPML4I - KPML4BASE));
1846 	for (i = 0; i < nkpdpe; i++)
1847 		pdp_p[i + KPDPI] = (KPDphys + ptoa(i)) | X86_PG_RW | X86_PG_V;
1848 
1849 #ifdef KASAN
1850 	kasankernbase = kasan_md_addr_to_shad(KERNBASE);
1851 	kasankpdpi = pmap_pdpe_index(kasankernbase);
1852 	kasankpdi = pmap_pde_index(kasankernbase);
1853 
1854 	pdp_p = (pdp_entry_t *)KASANPDPphys;
1855 	pdp_p[kasankpdpi] = (KASANPDphys | X86_PG_RW | X86_PG_V | pg_nx);
1856 
1857 	pd_p = (pd_entry_t *)KASANPDphys;
1858 	for (i = 0; i < nkasanpte; i++)
1859 		pd_p[i + kasankpdi] = (KASANPTphys + ptoa(i)) | X86_PG_RW |
1860 		    X86_PG_V | pg_nx;
1861 
1862 	pt_p = (pt_entry_t *)KASANPTphys;
1863 	for (i = 0; i < nkasanpte * NPTEPG; i++)
1864 		pt_p[i] = (KASANphys + ptoa(i)) | X86_PG_RW | X86_PG_V |
1865 		    X86_PG_M | X86_PG_A | pg_nx;
1866 #endif
1867 
1868 	/*
1869 	 * Now, set up the direct map region using 2MB and/or 1GB pages.  If
1870 	 * the end of physical memory is not aligned to a 1GB page boundary,
1871 	 * then the residual physical memory is mapped with 2MB pages.  Later,
1872 	 * if pmap_mapdev{_attr}() uses the direct map for non-write-back
1873 	 * memory, pmap_change_attr() will demote any 2MB or 1GB page mappings
1874 	 * that are partially used.
1875 	 */
1876 	pd_p = (pd_entry_t *)DMPDphys;
1877 	for (i = NPDEPG * ndm1g, j = 0; i < NPDEPG * ndmpdp; i++, j++) {
1878 		pd_p[j] = (vm_paddr_t)i << PDRSHIFT;
1879 		/* Preset PG_M and PG_A because demotion expects it. */
1880 		pd_p[j] |= X86_PG_RW | X86_PG_V | PG_PS | pg_g |
1881 		    X86_PG_M | X86_PG_A | pg_nx;
1882 	}
1883 	pdp_p = (pdp_entry_t *)DMPDPphys;
1884 	for (i = 0; i < ndm1g; i++) {
1885 		pdp_p[i] = (vm_paddr_t)i << PDPSHIFT;
1886 		/* Preset PG_M and PG_A because demotion expects it. */
1887 		pdp_p[i] |= X86_PG_RW | X86_PG_V | PG_PS | pg_g |
1888 		    X86_PG_M | X86_PG_A | pg_nx;
1889 	}
1890 	for (j = 0; i < ndmpdp; i++, j++) {
1891 		pdp_p[i] = DMPDphys + ptoa(j);
1892 		pdp_p[i] |= X86_PG_RW | X86_PG_V | pg_nx;
1893 	}
1894 
1895 	/*
1896 	 * Instead of using a 1G page for the memory containing the kernel,
1897 	 * use 2M pages with read-only and no-execute permissions.  (If using 1G
1898 	 * pages, this will partially overwrite the PDPEs above.)
1899 	 */
1900 	if (ndm1g > 0) {
1901 		pd_p = (pd_entry_t *)DMPDkernphys;
1902 		for (i = 0, pax = rounddown2(kernphys, NBPDP);
1903 		    i < NPDEPG * nkdmpde; i++, pax += NBPDR) {
1904 			pd_p[i] = pax | X86_PG_V | PG_PS | pg_g | X86_PG_M |
1905 			    X86_PG_A | pg_nx | bootaddr_rwx(pax);
1906 		}
1907 		j = rounddown2(kernphys, NBPDP) >> PDPSHIFT;
1908 		for (i = 0; i < nkdmpde; i++) {
1909 			pdp_p[i + j] = (DMPDkernphys + ptoa(i)) |
1910 			    X86_PG_RW | X86_PG_V | pg_nx;
1911 		}
1912 	}
1913 
1914 	/* And recursively map PML4 to itself in order to get PTmap */
1915 	p4_p = (pml4_entry_t *)KPML4phys;
1916 	p4_p[PML4PML4I] = KPML4phys;
1917 	p4_p[PML4PML4I] |= X86_PG_RW | X86_PG_V | pg_nx;
1918 
1919 #ifdef KASAN
1920 	/* Connect the KASAN shadow map slots up to the PML4. */
1921 	for (i = 0; i < NKASANPML4E; i++) {
1922 		p4_p[KASANPML4I + i] = KASANPDPphys + ptoa(i);
1923 		p4_p[KASANPML4I + i] |= X86_PG_RW | X86_PG_V | pg_nx;
1924 	}
1925 #endif
1926 
1927 #ifdef KMSAN
1928 	/* Connect the KMSAN shadow map slots up to the PML4. */
1929 	for (i = 0; i < NKMSANSHADPML4E; i++) {
1930 		p4_p[KMSANSHADPML4I + i] = KMSANSHADPDPphys + ptoa(i);
1931 		p4_p[KMSANSHADPML4I + i] |= X86_PG_RW | X86_PG_V | pg_nx;
1932 	}
1933 
1934 	/* Connect the KMSAN origin map slots up to the PML4. */
1935 	for (i = 0; i < NKMSANORIGPML4E; i++) {
1936 		p4_p[KMSANORIGPML4I + i] = KMSANORIGPDPphys + ptoa(i);
1937 		p4_p[KMSANORIGPML4I + i] |= X86_PG_RW | X86_PG_V | pg_nx;
1938 	}
1939 #endif
1940 
1941 	/* Connect the Direct Map slots up to the PML4. */
1942 	for (i = 0; i < ndmpdpphys; i++) {
1943 		p4_p[DMPML4I + i] = DMPDPphys + ptoa(i);
1944 		p4_p[DMPML4I + i] |= X86_PG_RW | X86_PG_V | pg_nx;
1945 	}
1946 
1947 	/* Connect the KVA slots up to the PML4 */
1948 	for (i = 0; i < NKPML4E; i++) {
1949 		p4_p[KPML4BASE + i] = KPDPphys + ptoa(i);
1950 		p4_p[KPML4BASE + i] |= X86_PG_RW | X86_PG_V;
1951 	}
1952 
1953 	kernel_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(KPML4phys);
1954 
1955 	if (la57) {
1956 		/* XXXKIB bootstrap KPML5phys page is lost */
1957 		KPML5phys = allocpages(firstaddr, 1);
1958 		for (i = 0, p5_p = (pml5_entry_t *)KPML5phys; i < NPML5EPG;
1959 		    i++) {
1960 			if (i == PML5PML5I) {
1961 				/*
1962 				 * Recursively map PML5 to itself in
1963 				 * order to get PTmap and PDmap.
1964 				 */
1965 				p5_p[i] = KPML5phys | X86_PG_RW | X86_PG_A |
1966 				    X86_PG_M | X86_PG_V | pg_nx;
1967 			} else if (i == pmap_pml5e_index(UPT_MAX_ADDRESS)) {
1968 				p5_p[i] = KPML4phys | X86_PG_RW | X86_PG_A |
1969 				    X86_PG_M | X86_PG_V;
1970 			} else {
1971 				p5_p[i] = 0;
1972 			}
1973 		}
1974 	}
1975 	TSEXIT();
1976 }
1977 
1978 /*
1979  *	Bootstrap the system enough to run with virtual memory.
1980  *
1981  *	On amd64 this is called after mapping has already been enabled
1982  *	and just syncs the pmap module with what has already been done.
1983  *	[We can't call it easily with mapping off since the kernel is not
1984  *	mapped with PA == VA, hence we would have to relocate every address
1985  *	from the linked base (virtual) address "KERNBASE" to the actual
1986  *	(physical) address starting relative to 0]
1987  */
1988 void
pmap_bootstrap(vm_paddr_t * firstaddr)1989 pmap_bootstrap(vm_paddr_t *firstaddr)
1990 {
1991 	vm_offset_t va;
1992 	pt_entry_t *pte, *pcpu_pte;
1993 	struct region_descriptor r_gdt;
1994 	uint64_t cr4, pcpu0_phys;
1995 	u_long res;
1996 	int i;
1997 
1998 	TSENTER();
1999 	KERNend = *firstaddr;
2000 	res = atop(KERNend - (vm_paddr_t)kernphys);
2001 
2002 	if (!pti)
2003 		pg_g = X86_PG_G;
2004 
2005 	/*
2006 	 * Create an initial set of page tables to run the kernel in.
2007 	 */
2008 	pmap_bootstrap_la57(firstaddr);
2009 	create_pagetables(firstaddr);
2010 
2011 	pcpu0_phys = allocpages(firstaddr, 1);
2012 
2013 	/*
2014 	 * Add a physical memory segment (vm_phys_seg) corresponding to the
2015 	 * preallocated kernel page table pages so that vm_page structures
2016 	 * representing these pages will be created.  The vm_page structures
2017 	 * are required for promotion of the corresponding kernel virtual
2018 	 * addresses to superpage mappings.
2019 	 */
2020 	vm_phys_early_add_seg(KPTphys, KPTphys + ptoa(nkpt));
2021 
2022 	/*
2023 	 * Account for the virtual addresses mapped by create_pagetables().
2024 	 */
2025 	virtual_avail = (vm_offset_t)KERNSTART + round_2mpage(KERNend -
2026 	    (vm_paddr_t)kernphys);
2027 	virtual_end = VM_MAX_KERNEL_ADDRESS;
2028 
2029 	/*
2030 	 * Enable PG_G global pages, then switch to the kernel page
2031 	 * table from the bootstrap page table.  After the switch, it
2032 	 * is possible to enable SMEP and SMAP since PG_U bits are
2033 	 * correct now.
2034 	 */
2035 	cr4 = rcr4();
2036 	cr4 |= CR4_PGE;
2037 	load_cr4(cr4);
2038 	load_cr3(la57 ? KPML5phys : KPML4phys);
2039 	if (cpu_stdext_feature & CPUID_STDEXT_SMEP)
2040 		cr4 |= CR4_SMEP;
2041 	if (cpu_stdext_feature & CPUID_STDEXT_SMAP)
2042 		cr4 |= CR4_SMAP;
2043 	load_cr4(cr4);
2044 
2045 	/*
2046 	 * Initialize the kernel pmap (which is statically allocated).
2047 	 * Count bootstrap data as being resident in case any of this data is
2048 	 * later unmapped (using pmap_remove()) and freed.
2049 	 */
2050 	PMAP_LOCK_INIT(kernel_pmap);
2051 	if (la57) {
2052 		vtoptem = ((1ul << (NPTEPGSHIFT + NPDEPGSHIFT + NPDPEPGSHIFT +
2053 		    NPML4EPGSHIFT + NPML5EPGSHIFT)) - 1) << 3;
2054 		PTmap = (vm_offset_t)P5Tmap;
2055 		vtopdem = ((1ul << (NPDEPGSHIFT + NPDPEPGSHIFT +
2056 		    NPML4EPGSHIFT + NPML5EPGSHIFT)) - 1) << 3;
2057 		PDmap = (vm_offset_t)P5Dmap;
2058 		kernel_pmap->pm_pmltop = (void *)PHYS_TO_DMAP(KPML5phys);
2059 		kernel_pmap->pm_cr3 = KPML5phys;
2060 		pmap_pt_page_count_adj(kernel_pmap, 1);	/* top-level page */
2061 	} else {
2062 		kernel_pmap->pm_pmltop = kernel_pml4;
2063 		kernel_pmap->pm_cr3 = KPML4phys;
2064 	}
2065 	kernel_pmap->pm_ucr3 = PMAP_NO_CR3;
2066 	TAILQ_INIT(&kernel_pmap->pm_pvchunk);
2067 	kernel_pmap->pm_stats.resident_count = res;
2068 	vm_radix_init(&kernel_pmap->pm_root);
2069 	kernel_pmap->pm_flags = pmap_flags;
2070 	if ((cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0) {
2071 		rangeset_init(&kernel_pmap->pm_pkru, pkru_dup_range,
2072 		    pkru_free_range, kernel_pmap, M_NOWAIT);
2073 	}
2074 
2075 	/*
2076 	 * The kernel pmap is always active on all CPUs.  Once CPUs are
2077 	 * enumerated, the mask will be set equal to all_cpus.
2078 	 */
2079 	CPU_FILL(&kernel_pmap->pm_active);
2080 
2081  	/*
2082 	 * Initialize the TLB invalidations generation number lock.
2083 	 */
2084 	mtx_init(&invl_gen_mtx, "invlgn", NULL, MTX_DEF);
2085 
2086 	/*
2087 	 * Reserve some special page table entries/VA space for temporary
2088 	 * mapping of pages.
2089 	 */
2090 #define	SYSMAP(c, p, v, n)	\
2091 	v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
2092 
2093 	va = virtual_avail;
2094 	pte = vtopte(va);
2095 
2096 	/*
2097 	 * Crashdump maps.  The first page is reused as CMAP1 for the
2098 	 * memory test.
2099 	 */
2100 	SYSMAP(caddr_t, CMAP1, crashdumpmap, MAXDUMPPGS)
2101 	CADDR1 = crashdumpmap;
2102 
2103 	SYSMAP(struct pcpu *, pcpu_pte, __pcpu, MAXCPU);
2104 	virtual_avail = va;
2105 
2106 	/*
2107 	 * Map the BSP PCPU now, the rest of the PCPUs are mapped by
2108 	 * amd64_mp_alloc_pcpu()/start_all_aps() when we know the
2109 	 * number of CPUs and NUMA affinity.
2110 	 */
2111 	pcpu_pte[0] = pcpu0_phys | X86_PG_V | X86_PG_RW | pg_g | pg_nx |
2112 	    X86_PG_M | X86_PG_A;
2113 	for (i = 1; i < MAXCPU; i++)
2114 		pcpu_pte[i] = 0;
2115 
2116 	/*
2117 	 * Re-initialize PCPU area for BSP after switching.
2118 	 * Make hardware use gdt and common_tss from the new PCPU.
2119 	 * Also clears the usage of temporary gdt during switch to
2120 	 * LA57 paging.
2121 	 */
2122 	STAILQ_INIT(&cpuhead);
2123 	wrmsr(MSR_GSBASE, (uint64_t)&__pcpu[0]);
2124 	pcpu_init(&__pcpu[0], 0, sizeof(struct pcpu));
2125 	amd64_bsp_pcpu_init1(&__pcpu[0]);
2126 	amd64_bsp_ist_init(&__pcpu[0]);
2127 	__pcpu[0].pc_common_tss.tss_iobase = sizeof(struct amd64tss) +
2128 	    IOPERM_BITMAP_SIZE;
2129 	memcpy(__pcpu[0].pc_gdt, temp_bsp_pcpu.pc_gdt, NGDT *
2130 	    sizeof(struct user_segment_descriptor));
2131 	gdt_segs[GPROC0_SEL].ssd_base = (uintptr_t)&__pcpu[0].pc_common_tss;
2132 	ssdtosyssd(&gdt_segs[GPROC0_SEL],
2133 	    (struct system_segment_descriptor *)&__pcpu[0].pc_gdt[GPROC0_SEL]);
2134 	r_gdt.rd_limit = NGDT * sizeof(struct user_segment_descriptor) - 1;
2135 	r_gdt.rd_base = (long)__pcpu[0].pc_gdt;
2136 	lgdt(&r_gdt);
2137 	wrmsr(MSR_GSBASE, (uint64_t)&__pcpu[0]);
2138 	ltr(GSEL(GPROC0_SEL, SEL_KPL));
2139 	__pcpu[0].pc_dynamic = temp_bsp_pcpu.pc_dynamic;
2140 	__pcpu[0].pc_acpi_id = temp_bsp_pcpu.pc_acpi_id;
2141 
2142 	/*
2143 	 * Initialize the PAT MSR.
2144 	 * pmap_init_pat() clears and sets CR4_PGE, which, as a
2145 	 * side-effect, invalidates stale PG_G TLB entries that might
2146 	 * have been created in our pre-boot environment.
2147 	 */
2148 	pmap_init_pat();
2149 
2150 	/* Initialize TLB Context Id. */
2151 	if (pmap_pcid_enabled) {
2152 		kernel_pmap->pm_pcidp = (void *)(uintptr_t)
2153 		    offsetof(struct pcpu, pc_kpmap_store);
2154 
2155 		PCPU_SET(kpmap_store.pm_pcid, PMAP_PCID_KERN);
2156 		PCPU_SET(kpmap_store.pm_gen, 1);
2157 
2158 		/*
2159 		 * PMAP_PCID_KERN + 1 is used for initialization of
2160 		 * proc0 pmap.  The pmap' pcid state might be used by
2161 		 * EFIRT entry before first context switch, so it
2162 		 * needs to be valid.
2163 		 */
2164 		PCPU_SET(pcid_next, PMAP_PCID_KERN + 2);
2165 		PCPU_SET(pcid_gen, 1);
2166 
2167 		/*
2168 		 * pcpu area for APs is zeroed during AP startup.
2169 		 * pc_pcid_next and pc_pcid_gen are initialized by AP
2170 		 * during pcpu setup.
2171 		 */
2172 		load_cr4(rcr4() | CR4_PCIDE);
2173 	}
2174 	TSEXIT();
2175 }
2176 
2177 /*
2178  * Setup the PAT MSR.
2179  */
2180 void
pmap_init_pat(void)2181 pmap_init_pat(void)
2182 {
2183 	uint64_t pat_msr;
2184 	u_long cr0, cr4;
2185 	int i;
2186 
2187 	/* Bail if this CPU doesn't implement PAT. */
2188 	if ((cpu_feature & CPUID_PAT) == 0)
2189 		panic("no PAT??");
2190 
2191 	/* Set default PAT index table. */
2192 	for (i = 0; i < PAT_INDEX_SIZE; i++)
2193 		pat_index[i] = -1;
2194 	pat_index[PAT_WRITE_BACK] = 0;
2195 	pat_index[PAT_WRITE_THROUGH] = 1;
2196 	pat_index[PAT_UNCACHEABLE] = 3;
2197 	pat_index[PAT_WRITE_COMBINING] = 6;
2198 	pat_index[PAT_WRITE_PROTECTED] = 5;
2199 	pat_index[PAT_UNCACHED] = 2;
2200 
2201 	/*
2202 	 * Initialize default PAT entries.
2203 	 * Leave the indices 0-3 at the default of WB, WT, UC-, and UC.
2204 	 * Program 5 and 6 as WP and WC.
2205 	 *
2206 	 * Leave 4 and 7 as WB and UC.  Note that a recursive page table
2207 	 * mapping for a 2M page uses a PAT value with the bit 3 set due
2208 	 * to its overload with PG_PS.
2209 	 */
2210 	pat_msr = PAT_VALUE(0, PAT_WRITE_BACK) |
2211 	    PAT_VALUE(1, PAT_WRITE_THROUGH) |
2212 	    PAT_VALUE(2, PAT_UNCACHED) |
2213 	    PAT_VALUE(3, PAT_UNCACHEABLE) |
2214 	    PAT_VALUE(4, PAT_WRITE_BACK) |
2215 	    PAT_VALUE(5, PAT_WRITE_PROTECTED) |
2216 	    PAT_VALUE(6, PAT_WRITE_COMBINING) |
2217 	    PAT_VALUE(7, PAT_UNCACHEABLE);
2218 
2219 	/* Disable PGE. */
2220 	cr4 = rcr4();
2221 	load_cr4(cr4 & ~CR4_PGE);
2222 
2223 	/* Disable caches (CD = 1, NW = 0). */
2224 	cr0 = rcr0();
2225 	load_cr0((cr0 & ~CR0_NW) | CR0_CD);
2226 
2227 	/* Flushes caches and TLBs. */
2228 	wbinvd();
2229 	invltlb();
2230 
2231 	/* Update PAT and index table. */
2232 	wrmsr(MSR_PAT, pat_msr);
2233 
2234 	/* Flush caches and TLBs again. */
2235 	wbinvd();
2236 	invltlb();
2237 
2238 	/* Restore caches and PGE. */
2239 	load_cr0(cr0);
2240 	load_cr4(cr4);
2241 }
2242 
2243 vm_page_t
pmap_page_alloc_below_4g(bool zeroed)2244 pmap_page_alloc_below_4g(bool zeroed)
2245 {
2246 	return (vm_page_alloc_noobj_contig((zeroed ? VM_ALLOC_ZERO : 0),
2247 	    1, 0, (1ULL << 32), PAGE_SIZE, 0, VM_MEMATTR_DEFAULT));
2248 }
2249 
2250 /*
2251  *	Initialize a vm_page's machine-dependent fields.
2252  */
2253 void
pmap_page_init(vm_page_t m)2254 pmap_page_init(vm_page_t m)
2255 {
2256 
2257 	TAILQ_INIT(&m->md.pv_list);
2258 	m->md.pat_mode = PAT_WRITE_BACK;
2259 }
2260 
2261 static int pmap_allow_2m_x_ept;
2262 SYSCTL_INT(_vm_pmap, OID_AUTO, allow_2m_x_ept, CTLFLAG_RWTUN | CTLFLAG_NOFETCH,
2263     &pmap_allow_2m_x_ept, 0,
2264     "Allow executable superpage mappings in EPT");
2265 
2266 void
pmap_allow_2m_x_ept_recalculate(void)2267 pmap_allow_2m_x_ept_recalculate(void)
2268 {
2269 	/*
2270 	 * SKL002, SKL012S.  Since the EPT format is only used by
2271 	 * Intel CPUs, the vendor check is merely a formality.
2272 	 */
2273 	if (!(cpu_vendor_id != CPU_VENDOR_INTEL ||
2274 	    (cpu_ia32_arch_caps & IA32_ARCH_CAP_IF_PSCHANGE_MC_NO) != 0 ||
2275 	    (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
2276 	    (CPUID_TO_MODEL(cpu_id) == 0x26 ||	/* Atoms */
2277 	    CPUID_TO_MODEL(cpu_id) == 0x27 ||
2278 	    CPUID_TO_MODEL(cpu_id) == 0x35 ||
2279 	    CPUID_TO_MODEL(cpu_id) == 0x36 ||
2280 	    CPUID_TO_MODEL(cpu_id) == 0x37 ||
2281 	    CPUID_TO_MODEL(cpu_id) == 0x86 ||
2282 	    CPUID_TO_MODEL(cpu_id) == 0x1c ||
2283 	    CPUID_TO_MODEL(cpu_id) == 0x4a ||
2284 	    CPUID_TO_MODEL(cpu_id) == 0x4c ||
2285 	    CPUID_TO_MODEL(cpu_id) == 0x4d ||
2286 	    CPUID_TO_MODEL(cpu_id) == 0x5a ||
2287 	    CPUID_TO_MODEL(cpu_id) == 0x5c ||
2288 	    CPUID_TO_MODEL(cpu_id) == 0x5d ||
2289 	    CPUID_TO_MODEL(cpu_id) == 0x5f ||
2290 	    CPUID_TO_MODEL(cpu_id) == 0x6e ||
2291 	    CPUID_TO_MODEL(cpu_id) == 0x7a ||
2292 	    CPUID_TO_MODEL(cpu_id) == 0x57 ||	/* Knights */
2293 	    CPUID_TO_MODEL(cpu_id) == 0x85))))
2294 		pmap_allow_2m_x_ept = 1;
2295 #ifndef BURN_BRIDGES
2296 	TUNABLE_INT_FETCH("hw.allow_2m_x_ept", &pmap_allow_2m_x_ept);
2297 #endif
2298 	TUNABLE_INT_FETCH("vm.pmap.allow_2m_x_ept", &pmap_allow_2m_x_ept);
2299 }
2300 
2301 static bool
pmap_allow_2m_x_page(pmap_t pmap,bool executable)2302 pmap_allow_2m_x_page(pmap_t pmap, bool executable)
2303 {
2304 
2305 	return (pmap->pm_type != PT_EPT || !executable ||
2306 	    !pmap_allow_2m_x_ept);
2307 }
2308 
2309 #ifdef NUMA
2310 static void
pmap_init_pv_table(void)2311 pmap_init_pv_table(void)
2312 {
2313 	struct pmap_large_md_page *pvd;
2314 	vm_size_t s;
2315 	long start, end, highest, pv_npg;
2316 	int domain, i, j, pages;
2317 
2318 	/*
2319 	 * For correctness we depend on the size being evenly divisible into a
2320 	 * page. As a tradeoff between performance and total memory use, the
2321 	 * entry is 64 bytes (aka one cacheline) in size. Not being smaller
2322 	 * avoids false-sharing, but not being 128 bytes potentially allows for
2323 	 * avoidable traffic due to adjacent cacheline prefetcher.
2324 	 *
2325 	 * Assert the size so that accidental changes fail to compile.
2326 	 */
2327 	CTASSERT((sizeof(*pvd) == 64));
2328 
2329 	/*
2330 	 * Calculate the size of the array.
2331 	 */
2332 	pmap_last_pa = vm_phys_segs[vm_phys_nsegs - 1].end;
2333 	pv_npg = howmany(pmap_last_pa, NBPDR);
2334 	s = (vm_size_t)pv_npg * sizeof(struct pmap_large_md_page);
2335 	s = round_page(s);
2336 	pv_table = (struct pmap_large_md_page *)kva_alloc(s);
2337 	if (pv_table == NULL)
2338 		panic("%s: kva_alloc failed\n", __func__);
2339 
2340 	/*
2341 	 * Iterate physical segments to allocate space for respective pages.
2342 	 */
2343 	highest = -1;
2344 	s = 0;
2345 	for (i = 0; i < vm_phys_nsegs; i++) {
2346 		end = vm_phys_segs[i].end / NBPDR;
2347 		domain = vm_phys_segs[i].domain;
2348 
2349 		if (highest >= end)
2350 			continue;
2351 
2352 		start = highest + 1;
2353 		pvd = &pv_table[start];
2354 
2355 		pages = end - start + 1;
2356 		s = round_page(pages * sizeof(*pvd));
2357 		highest = start + (s / sizeof(*pvd)) - 1;
2358 
2359 		for (j = 0; j < s; j += PAGE_SIZE) {
2360 			vm_page_t m = vm_page_alloc_noobj_domain(domain, 0);
2361 			if (m == NULL)
2362 				panic("failed to allocate PV table page");
2363 			pmap_qenter((vm_offset_t)pvd + j, &m, 1);
2364 		}
2365 
2366 		for (j = 0; j < s / sizeof(*pvd); j++) {
2367 			rw_init_flags(&pvd->pv_lock, "pmap pv list", RW_NEW);
2368 			TAILQ_INIT(&pvd->pv_page.pv_list);
2369 			pvd->pv_page.pv_gen = 0;
2370 			pvd->pv_page.pat_mode = 0;
2371 			pvd->pv_invl_gen = 0;
2372 			pvd++;
2373 		}
2374 	}
2375 	pvd = &pv_dummy_large;
2376 	rw_init_flags(&pvd->pv_lock, "pmap pv list dummy", RW_NEW);
2377 	TAILQ_INIT(&pvd->pv_page.pv_list);
2378 	pvd->pv_page.pv_gen = 0;
2379 	pvd->pv_page.pat_mode = 0;
2380 	pvd->pv_invl_gen = 0;
2381 }
2382 #else
2383 static void
pmap_init_pv_table(void)2384 pmap_init_pv_table(void)
2385 {
2386 	vm_size_t s;
2387 	long i, pv_npg;
2388 
2389 	/*
2390 	 * Initialize the pool of pv list locks.
2391 	 */
2392 	for (i = 0; i < NPV_LIST_LOCKS; i++)
2393 		rw_init(&pv_list_locks[i], "pmap pv list");
2394 
2395 	/*
2396 	 * Calculate the size of the pv head table for superpages.
2397 	 */
2398 	pv_npg = howmany(vm_phys_segs[vm_phys_nsegs - 1].end, NBPDR);
2399 
2400 	/*
2401 	 * Allocate memory for the pv head table for superpages.
2402 	 */
2403 	s = (vm_size_t)pv_npg * sizeof(struct md_page);
2404 	s = round_page(s);
2405 	pv_table = kmem_malloc(s, M_WAITOK | M_ZERO);
2406 	for (i = 0; i < pv_npg; i++)
2407 		TAILQ_INIT(&pv_table[i].pv_list);
2408 	TAILQ_INIT(&pv_dummy.pv_list);
2409 }
2410 #endif
2411 
2412 /*
2413  *	Initialize the pmap module.
2414  *
2415  *	Called by vm_mem_init(), to initialize any structures that the pmap
2416  *	system needs to map virtual memory.
2417  */
2418 void
pmap_init(void)2419 pmap_init(void)
2420 {
2421 	struct pmap_preinit_mapping *ppim;
2422 	vm_page_t m, mpte;
2423 	int error, i, ret, skz63;
2424 
2425 	/* L1TF, reserve page @0 unconditionally */
2426 	vm_page_blacklist_add(0, bootverbose);
2427 
2428 	/* Detect bare-metal Skylake Server and Skylake-X. */
2429 	if (vm_guest == VM_GUEST_NO && cpu_vendor_id == CPU_VENDOR_INTEL &&
2430 	    CPUID_TO_FAMILY(cpu_id) == 0x6 && CPUID_TO_MODEL(cpu_id) == 0x55) {
2431 		/*
2432 		 * Skylake-X errata SKZ63. Processor May Hang When
2433 		 * Executing Code In an HLE Transaction Region between
2434 		 * 40000000H and 403FFFFFH.
2435 		 *
2436 		 * Mark the pages in the range as preallocated.  It
2437 		 * seems to be impossible to distinguish between
2438 		 * Skylake Server and Skylake X.
2439 		 */
2440 		skz63 = 1;
2441 		TUNABLE_INT_FETCH("hw.skz63_enable", &skz63);
2442 		if (skz63 != 0) {
2443 			if (bootverbose)
2444 				printf("SKZ63: skipping 4M RAM starting "
2445 				    "at physical 1G\n");
2446 			for (i = 0; i < atop(0x400000); i++) {
2447 				ret = vm_page_blacklist_add(0x40000000 +
2448 				    ptoa(i), false);
2449 				if (!ret && bootverbose)
2450 					printf("page at %#x already used\n",
2451 					    0x40000000 + ptoa(i));
2452 			}
2453 		}
2454 	}
2455 
2456 	/* IFU */
2457 	pmap_allow_2m_x_ept_recalculate();
2458 
2459 	/*
2460 	 * Initialize the vm page array entries for the kernel pmap's
2461 	 * page table pages.
2462 	 */
2463 	PMAP_LOCK(kernel_pmap);
2464 	for (i = 0; i < nkpt; i++) {
2465 		mpte = PHYS_TO_VM_PAGE(KPTphys + (i << PAGE_SHIFT));
2466 		KASSERT(mpte >= vm_page_array &&
2467 		    mpte < &vm_page_array[vm_page_array_size],
2468 		    ("pmap_init: page table page is out of range"));
2469 		mpte->pindex = pmap_pde_pindex(KERNBASE) + i;
2470 		mpte->phys_addr = KPTphys + (i << PAGE_SHIFT);
2471 		mpte->ref_count = 1;
2472 
2473 		/*
2474 		 * Collect the page table pages that were replaced by a 2MB
2475 		 * page in create_pagetables().  They are zero filled.
2476 		 */
2477 		if ((i == 0 ||
2478 		    kernphys + ((vm_paddr_t)(i - 1) << PDRSHIFT) < KERNend) &&
2479 		    pmap_insert_pt_page(kernel_pmap, mpte, false, false))
2480 			panic("pmap_init: pmap_insert_pt_page failed");
2481 	}
2482 	PMAP_UNLOCK(kernel_pmap);
2483 	vm_wire_add(nkpt);
2484 
2485 	/*
2486 	 * If the kernel is running on a virtual machine, then it must assume
2487 	 * that MCA is enabled by the hypervisor.  Moreover, the kernel must
2488 	 * be prepared for the hypervisor changing the vendor and family that
2489 	 * are reported by CPUID.  Consequently, the workaround for AMD Family
2490 	 * 10h Erratum 383 is enabled if the processor's feature set does not
2491 	 * include at least one feature that is only supported by older Intel
2492 	 * or newer AMD processors.
2493 	 */
2494 	if (vm_guest != VM_GUEST_NO && (cpu_feature & CPUID_SS) == 0 &&
2495 	    (cpu_feature2 & (CPUID2_SSSE3 | CPUID2_SSE41 | CPUID2_AESNI |
2496 	    CPUID2_AVX | CPUID2_XSAVE)) == 0 && (amd_feature2 & (AMDID2_XOP |
2497 	    AMDID2_FMA4)) == 0)
2498 		workaround_erratum383 = 1;
2499 
2500 	/*
2501 	 * Are large page mappings enabled?
2502 	 */
2503 	TUNABLE_INT_FETCH("vm.pmap.pg_ps_enabled", &pg_ps_enabled);
2504 	if (pg_ps_enabled) {
2505 		KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0,
2506 		    ("pmap_init: can't assign to pagesizes[1]"));
2507 		pagesizes[1] = NBPDR;
2508 		if ((amd_feature & AMDID_PAGE1GB) != 0) {
2509 			KASSERT(MAXPAGESIZES > 2 && pagesizes[2] == 0,
2510 			    ("pmap_init: can't assign to pagesizes[2]"));
2511 			pagesizes[2] = NBPDP;
2512 		}
2513 	}
2514 
2515 	/*
2516 	 * Initialize pv chunk lists.
2517 	 */
2518 	for (i = 0; i < PMAP_MEMDOM; i++) {
2519 		mtx_init(&pv_chunks[i].pvc_lock, "pmap pv chunk list", NULL, MTX_DEF);
2520 		TAILQ_INIT(&pv_chunks[i].pvc_list);
2521 	}
2522 	pmap_init_pv_table();
2523 
2524 	pmap_initialized = 1;
2525 	for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
2526 		ppim = pmap_preinit_mapping + i;
2527 		if (ppim->va == 0)
2528 			continue;
2529 		/* Make the direct map consistent */
2530 		if (ppim->pa < dmaplimit && ppim->pa + ppim->sz <= dmaplimit) {
2531 			(void)pmap_change_attr(PHYS_TO_DMAP(ppim->pa),
2532 			    ppim->sz, ppim->mode);
2533 		}
2534 		if (!bootverbose)
2535 			continue;
2536 		printf("PPIM %u: PA=%#lx, VA=%#lx, size=%#lx, mode=%#x\n", i,
2537 		    ppim->pa, ppim->va, ppim->sz, ppim->mode);
2538 	}
2539 
2540 	mtx_init(&qframe_mtx, "qfrmlk", NULL, MTX_SPIN);
2541 	error = vmem_alloc(kernel_arena, PAGE_SIZE, M_BESTFIT | M_WAITOK,
2542 	    (vmem_addr_t *)&qframe);
2543 	if (error != 0)
2544 		panic("qframe allocation failed");
2545 
2546 	lm_ents = 8;
2547 	TUNABLE_INT_FETCH("vm.pmap.large_map_pml4_entries", &lm_ents);
2548 	if (lm_ents > LMEPML4I - LMSPML4I + 1)
2549 		lm_ents = LMEPML4I - LMSPML4I + 1;
2550 #ifdef KMSAN
2551 	if (lm_ents > KMSANORIGPML4I - LMSPML4I) {
2552 		printf(
2553 	    "pmap: shrinking large map for KMSAN (%d slots to %ld slots)\n",
2554 		    lm_ents, KMSANORIGPML4I - LMSPML4I);
2555 		lm_ents = KMSANORIGPML4I - LMSPML4I;
2556 	}
2557 #endif
2558 	if (bootverbose)
2559 		printf("pmap: large map %u PML4 slots (%lu GB)\n",
2560 		    lm_ents, (u_long)lm_ents * (NBPML4 / 1024 / 1024 / 1024));
2561 	if (lm_ents != 0) {
2562 		large_vmem = vmem_create("large", LARGEMAP_MIN_ADDRESS,
2563 		    (vmem_size_t)lm_ents * NBPML4, PAGE_SIZE, 0, M_WAITOK);
2564 		if (large_vmem == NULL) {
2565 			printf("pmap: cannot create large map\n");
2566 			lm_ents = 0;
2567 		}
2568 		for (i = 0; i < lm_ents; i++) {
2569 			m = pmap_large_map_getptp_unlocked();
2570 			/* XXXKIB la57 */
2571 			kernel_pml4[LMSPML4I + i] = X86_PG_V |
2572 			    X86_PG_RW | X86_PG_A | X86_PG_M | pg_nx |
2573 			    VM_PAGE_TO_PHYS(m);
2574 		}
2575 	}
2576 }
2577 
2578 SYSCTL_UINT(_vm_pmap, OID_AUTO, large_map_pml4_entries,
2579     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &lm_ents, 0,
2580     "Maximum number of PML4 entries for use by large map (tunable).  "
2581     "Each entry corresponds to 512GB of address space.");
2582 
2583 static SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
2584     "2MB page mapping counters");
2585 
2586 static COUNTER_U64_DEFINE_EARLY(pmap_pde_demotions);
2587 SYSCTL_COUNTER_U64(_vm_pmap_pde, OID_AUTO, demotions,
2588     CTLFLAG_RD, &pmap_pde_demotions, "2MB page demotions");
2589 
2590 static COUNTER_U64_DEFINE_EARLY(pmap_pde_mappings);
2591 SYSCTL_COUNTER_U64(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD,
2592     &pmap_pde_mappings, "2MB page mappings");
2593 
2594 static COUNTER_U64_DEFINE_EARLY(pmap_pde_p_failures);
2595 SYSCTL_COUNTER_U64(_vm_pmap_pde, OID_AUTO, p_failures, CTLFLAG_RD,
2596     &pmap_pde_p_failures, "2MB page promotion failures");
2597 
2598 static COUNTER_U64_DEFINE_EARLY(pmap_pde_promotions);
2599 SYSCTL_COUNTER_U64(_vm_pmap_pde, OID_AUTO, promotions, CTLFLAG_RD,
2600     &pmap_pde_promotions, "2MB page promotions");
2601 
2602 static SYSCTL_NODE(_vm_pmap, OID_AUTO, pdpe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
2603     "1GB page mapping counters");
2604 
2605 static COUNTER_U64_DEFINE_EARLY(pmap_pdpe_demotions);
2606 SYSCTL_COUNTER_U64(_vm_pmap_pdpe, OID_AUTO, demotions, CTLFLAG_RD,
2607     &pmap_pdpe_demotions, "1GB page demotions");
2608 
2609 /***************************************************
2610  * Low level helper routines.....
2611  ***************************************************/
2612 
2613 static pt_entry_t
pmap_swap_pat(pmap_t pmap,pt_entry_t entry)2614 pmap_swap_pat(pmap_t pmap, pt_entry_t entry)
2615 {
2616 	int x86_pat_bits = X86_PG_PTE_PAT | X86_PG_PDE_PAT;
2617 
2618 	switch (pmap->pm_type) {
2619 	case PT_X86:
2620 	case PT_RVI:
2621 		/* Verify that both PAT bits are not set at the same time */
2622 		KASSERT((entry & x86_pat_bits) != x86_pat_bits,
2623 		    ("Invalid PAT bits in entry %#lx", entry));
2624 
2625 		/* Swap the PAT bits if one of them is set */
2626 		if ((entry & x86_pat_bits) != 0)
2627 			entry ^= x86_pat_bits;
2628 		break;
2629 	case PT_EPT:
2630 		/*
2631 		 * Nothing to do - the memory attributes are represented
2632 		 * the same way for regular pages and superpages.
2633 		 */
2634 		break;
2635 	default:
2636 		panic("pmap_switch_pat_bits: bad pm_type %d", pmap->pm_type);
2637 	}
2638 
2639 	return (entry);
2640 }
2641 
2642 bool
pmap_is_valid_memattr(pmap_t pmap __unused,vm_memattr_t mode)2643 pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode)
2644 {
2645 
2646 	return (mode >= 0 && mode < PAT_INDEX_SIZE &&
2647 	    pat_index[(int)mode] >= 0);
2648 }
2649 
2650 /*
2651  * Determine the appropriate bits to set in a PTE or PDE for a specified
2652  * caching mode.
2653  */
2654 int
pmap_cache_bits(pmap_t pmap,int mode,bool is_pde)2655 pmap_cache_bits(pmap_t pmap, int mode, bool is_pde)
2656 {
2657 	int cache_bits, pat_flag, pat_idx;
2658 
2659 	if (!pmap_is_valid_memattr(pmap, mode))
2660 		panic("Unknown caching mode %d\n", mode);
2661 
2662 	switch (pmap->pm_type) {
2663 	case PT_X86:
2664 	case PT_RVI:
2665 		/* The PAT bit is different for PTE's and PDE's. */
2666 		pat_flag = is_pde ? X86_PG_PDE_PAT : X86_PG_PTE_PAT;
2667 
2668 		/* Map the caching mode to a PAT index. */
2669 		pat_idx = pat_index[mode];
2670 
2671 		/* Map the 3-bit index value into the PAT, PCD, and PWT bits. */
2672 		cache_bits = 0;
2673 		if (pat_idx & 0x4)
2674 			cache_bits |= pat_flag;
2675 		if (pat_idx & 0x2)
2676 			cache_bits |= PG_NC_PCD;
2677 		if (pat_idx & 0x1)
2678 			cache_bits |= PG_NC_PWT;
2679 		break;
2680 
2681 	case PT_EPT:
2682 		cache_bits = EPT_PG_IGNORE_PAT | EPT_PG_MEMORY_TYPE(mode);
2683 		break;
2684 
2685 	default:
2686 		panic("unsupported pmap type %d", pmap->pm_type);
2687 	}
2688 
2689 	return (cache_bits);
2690 }
2691 
2692 static int
pmap_cache_mask(pmap_t pmap,bool is_pde)2693 pmap_cache_mask(pmap_t pmap, bool is_pde)
2694 {
2695 	int mask;
2696 
2697 	switch (pmap->pm_type) {
2698 	case PT_X86:
2699 	case PT_RVI:
2700 		mask = is_pde ? X86_PG_PDE_CACHE : X86_PG_PTE_CACHE;
2701 		break;
2702 	case PT_EPT:
2703 		mask = EPT_PG_IGNORE_PAT | EPT_PG_MEMORY_TYPE(0x7);
2704 		break;
2705 	default:
2706 		panic("pmap_cache_mask: invalid pm_type %d", pmap->pm_type);
2707 	}
2708 
2709 	return (mask);
2710 }
2711 
2712 static int
pmap_pat_index(pmap_t pmap,pt_entry_t pte,bool is_pde)2713 pmap_pat_index(pmap_t pmap, pt_entry_t pte, bool is_pde)
2714 {
2715 	int pat_flag, pat_idx;
2716 
2717 	pat_idx = 0;
2718 	switch (pmap->pm_type) {
2719 	case PT_X86:
2720 	case PT_RVI:
2721 		/* The PAT bit is different for PTE's and PDE's. */
2722 		pat_flag = is_pde ? X86_PG_PDE_PAT : X86_PG_PTE_PAT;
2723 
2724 		if ((pte & pat_flag) != 0)
2725 			pat_idx |= 0x4;
2726 		if ((pte & PG_NC_PCD) != 0)
2727 			pat_idx |= 0x2;
2728 		if ((pte & PG_NC_PWT) != 0)
2729 			pat_idx |= 0x1;
2730 		break;
2731 	case PT_EPT:
2732 		if ((pte & EPT_PG_IGNORE_PAT) != 0)
2733 			panic("EPT PTE %#lx has no PAT memory type", pte);
2734 		pat_idx = (pte & EPT_PG_MEMORY_TYPE(0x7)) >> 3;
2735 		break;
2736 	}
2737 
2738 	/* See pmap_init_pat(). */
2739 	if (pat_idx == 4)
2740 		pat_idx = 0;
2741 	if (pat_idx == 7)
2742 		pat_idx = 3;
2743 
2744 	return (pat_idx);
2745 }
2746 
2747 bool
pmap_ps_enabled(pmap_t pmap)2748 pmap_ps_enabled(pmap_t pmap)
2749 {
2750 
2751 	return (pg_ps_enabled && (pmap->pm_flags & PMAP_PDE_SUPERPAGE) != 0);
2752 }
2753 
2754 static void
pmap_update_pde_store(pmap_t pmap,pd_entry_t * pde,pd_entry_t newpde)2755 pmap_update_pde_store(pmap_t pmap, pd_entry_t *pde, pd_entry_t newpde)
2756 {
2757 
2758 	switch (pmap->pm_type) {
2759 	case PT_X86:
2760 		break;
2761 	case PT_RVI:
2762 	case PT_EPT:
2763 		/*
2764 		 * XXX
2765 		 * This is a little bogus since the generation number is
2766 		 * supposed to be bumped up when a region of the address
2767 		 * space is invalidated in the page tables.
2768 		 *
2769 		 * In this case the old PDE entry is valid but yet we want
2770 		 * to make sure that any mappings using the old entry are
2771 		 * invalidated in the TLB.
2772 		 *
2773 		 * The reason this works as expected is because we rendezvous
2774 		 * "all" host cpus and force any vcpu context to exit as a
2775 		 * side-effect.
2776 		 */
2777 		atomic_add_long(&pmap->pm_eptgen, 1);
2778 		break;
2779 	default:
2780 		panic("pmap_update_pde_store: bad pm_type %d", pmap->pm_type);
2781 	}
2782 	pde_store(pde, newpde);
2783 }
2784 
2785 /*
2786  * After changing the page size for the specified virtual address in the page
2787  * table, flush the corresponding entries from the processor's TLB.  Only the
2788  * calling processor's TLB is affected.
2789  *
2790  * The calling thread must be pinned to a processor.
2791  */
2792 static void
pmap_update_pde_invalidate(pmap_t pmap,vm_offset_t va,pd_entry_t newpde)2793 pmap_update_pde_invalidate(pmap_t pmap, vm_offset_t va, pd_entry_t newpde)
2794 {
2795 	pt_entry_t PG_G;
2796 
2797 	if (pmap_type_guest(pmap))
2798 		return;
2799 
2800 	KASSERT(pmap->pm_type == PT_X86,
2801 	    ("pmap_update_pde_invalidate: invalid type %d", pmap->pm_type));
2802 
2803 	PG_G = pmap_global_bit(pmap);
2804 
2805 	if ((newpde & PG_PS) == 0)
2806 		/* Demotion: flush a specific 2MB page mapping. */
2807 		pmap_invlpg(pmap, va);
2808 	else if ((newpde & PG_G) == 0)
2809 		/*
2810 		 * Promotion: flush every 4KB page mapping from the TLB
2811 		 * because there are too many to flush individually.
2812 		 */
2813 		invltlb();
2814 	else {
2815 		/*
2816 		 * Promotion: flush every 4KB page mapping from the TLB,
2817 		 * including any global (PG_G) mappings.
2818 		 */
2819 		invltlb_glob();
2820 	}
2821 }
2822 
2823 /*
2824  * The amd64 pmap uses different approaches to TLB invalidation
2825  * depending on the kernel configuration, available hardware features,
2826  * and known hardware errata.  The kernel configuration option that
2827  * has the greatest operational impact on TLB invalidation is PTI,
2828  * which is enabled automatically on affected Intel CPUs.  The most
2829  * impactful hardware features are first PCID, and then INVPCID
2830  * instruction presence.  PCID usage is quite different for PTI
2831  * vs. non-PTI.
2832  *
2833  * * Kernel Page Table Isolation (PTI or KPTI) is used to mitigate
2834  *   the Meltdown bug in some Intel CPUs.  Under PTI, each user address
2835  *   space is served by two page tables, user and kernel.  The user
2836  *   page table only maps user space and a kernel trampoline.  The
2837  *   kernel trampoline includes the entirety of the kernel text but
2838  *   only the kernel data that is needed to switch from user to kernel
2839  *   mode.  The kernel page table maps the user and kernel address
2840  *   spaces in their entirety.  It is identical to the per-process
2841  *   page table used in non-PTI mode.
2842  *
2843  *   User page tables are only used when the CPU is in user mode.
2844  *   Consequently, some TLB invalidations can be postponed until the
2845  *   switch from kernel to user mode.  In contrast, the user
2846  *   space part of the kernel page table is used for copyout(9), so
2847  *   TLB invalidations on this page table cannot be similarly postponed.
2848  *
2849  *   The existence of a user mode page table for the given pmap is
2850  *   indicated by a pm_ucr3 value that differs from PMAP_NO_CR3, in
2851  *   which case pm_ucr3 contains the %cr3 register value for the user
2852  *   mode page table's root.
2853  *
2854  * * The pm_active bitmask indicates which CPUs currently have the
2855  *   pmap active.  A CPU's bit is set on context switch to the pmap, and
2856  *   cleared on switching off this CPU.  For the kernel page table,
2857  *   the pm_active field is immutable and contains all CPUs.  The
2858  *   kernel page table is always logically active on every processor,
2859  *   but not necessarily in use by the hardware, e.g., in PTI mode.
2860  *
2861  *   When requesting invalidation of virtual addresses with
2862  *   pmap_invalidate_XXX() functions, the pmap sends shootdown IPIs to
2863  *   all CPUs recorded as active in pm_active.  Updates to and reads
2864  *   from pm_active are not synchronized, and so they may race with
2865  *   each other.  Shootdown handlers are prepared to handle the race.
2866  *
2867  * * PCID is an optional feature of the long mode x86 MMU where TLB
2868  *   entries are tagged with the 'Process ID' of the address space
2869  *   they belong to.  This feature provides a limited namespace for
2870  *   process identifiers, 12 bits, supporting 4095 simultaneous IDs
2871  *   total.
2872  *
2873  *   Allocation of a PCID to a pmap is done by an algorithm described
2874  *   in section 15.12, "Other TLB Consistency Algorithms", of
2875  *   Vahalia's book "Unix Internals".  A PCID cannot be allocated for
2876  *   the whole lifetime of a pmap in pmap_pinit() due to the limited
2877  *   namespace.  Instead, a per-CPU, per-pmap PCID is assigned when
2878  *   the CPU is about to start caching TLB entries from a pmap,
2879  *   i.e., on the context switch that activates the pmap on the CPU.
2880  *
2881  *   The PCID allocator maintains a per-CPU, per-pmap generation
2882  *   count, pm_gen, which is incremented each time a new PCID is
2883  *   allocated.  On TLB invalidation, the generation counters for the
2884  *   pmap are zeroed, which signals the context switch code that the
2885  *   previously allocated PCID is no longer valid.  Effectively,
2886  *   zeroing any of these counters triggers a TLB shootdown for the
2887  *   given CPU/address space, due to the allocation of a new PCID.
2888  *
2889  *   Zeroing can be performed remotely.  Consequently, if a pmap is
2890  *   inactive on a CPU, then a TLB shootdown for that pmap and CPU can
2891  *   be initiated by an ordinary memory access to reset the target
2892  *   CPU's generation count within the pmap.  The CPU initiating the
2893  *   TLB shootdown does not need to send an IPI to the target CPU.
2894  *
2895  * * PTI + PCID.  The available PCIDs are divided into two sets: PCIDs
2896  *   for complete (kernel) page tables, and PCIDs for user mode page
2897  *   tables.  A user PCID value is obtained from the kernel PCID value
2898  *   by setting the highest bit, 11, to 1 (0x800 == PMAP_PCID_USER_PT).
2899  *
2900  *   User space page tables are activated on return to user mode, by
2901  *   loading pm_ucr3 into %cr3.  If the PCPU(ucr3_load_mask) requests
2902  *   clearing bit 63 of the loaded ucr3, this effectively causes
2903  *   complete invalidation of the user mode TLB entries for the
2904  *   current pmap.  In which case, local invalidations of individual
2905  *   pages in the user page table are skipped.
2906  *
2907  * * Local invalidation, all modes.  If the requested invalidation is
2908  *   for a specific address or the total invalidation of a currently
2909  *   active pmap, then the TLB is flushed using INVLPG for a kernel
2910  *   page table, and INVPCID(INVPCID_CTXGLOB)/invltlb_glob() for a
2911  *   user space page table(s).
2912  *
2913  *   If the INVPCID instruction is available, it is used to flush user
2914  *   entries from the kernel page table.
2915  *
2916  *   When PCID is enabled, the INVLPG instruction invalidates all TLB
2917  *   entries for the given page that either match the current PCID or
2918  *   are global. Since TLB entries for the same page under different
2919  *   PCIDs are unaffected, kernel pages which reside in all address
2920  *   spaces could be problematic.  We avoid the problem by creating
2921  *   all kernel PTEs with the global flag (PG_G) set, when PTI is
2922  *   disabled.
2923  *
2924  * * mode: PTI disabled, PCID present.  The kernel reserves PCID 0 for its
2925  *   address space, all other 4095 PCIDs are used for user mode spaces
2926  *   as described above.  A context switch allocates a new PCID if
2927  *   the recorded PCID is zero or the recorded generation does not match
2928  *   the CPU's generation, effectively flushing the TLB for this address space.
2929  *   Total remote invalidation is performed by zeroing pm_gen for all CPUs.
2930  *	local user page: INVLPG
2931  *	local kernel page: INVLPG
2932  *	local user total: INVPCID(CTX)
2933  *	local kernel total: INVPCID(CTXGLOB) or invltlb_glob()
2934  *	remote user page, inactive pmap: zero pm_gen
2935  *	remote user page, active pmap: zero pm_gen + IPI:INVLPG
2936  *	(Both actions are required to handle the aforementioned pm_active races.)
2937  *	remote kernel page: IPI:INVLPG
2938  *	remote user total, inactive pmap: zero pm_gen
2939  *	remote user total, active pmap: zero pm_gen + IPI:(INVPCID(CTX) or
2940  *          reload %cr3)
2941  *	(See note above about pm_active races.)
2942  *	remote kernel total: IPI:(INVPCID(CTXGLOB) or invltlb_glob())
2943  *
2944  * PTI enabled, PCID present.
2945  *	local user page: INVLPG for kpt, INVPCID(ADDR) or (INVLPG for ucr3)
2946  *          for upt
2947  *	local kernel page: INVLPG
2948  *	local user total: INVPCID(CTX) or reload %cr3 for kpt, clear PCID_SAVE
2949  *          on loading UCR3 into %cr3 for upt
2950  *	local kernel total: INVPCID(CTXGLOB) or invltlb_glob()
2951  *	remote user page, inactive pmap: zero pm_gen
2952  *	remote user page, active pmap: zero pm_gen + IPI:(INVLPG for kpt,
2953  *          INVPCID(ADDR) for upt)
2954  *	remote kernel page: IPI:INVLPG
2955  *	remote user total, inactive pmap: zero pm_gen
2956  *	remote user total, active pmap: zero pm_gen + IPI:(INVPCID(CTX) for kpt,
2957  *          clear PCID_SAVE on loading UCR3 into $cr3 for upt)
2958  *	remote kernel total: IPI:(INVPCID(CTXGLOB) or invltlb_glob())
2959  *
2960  *  No PCID.
2961  *	local user page: INVLPG
2962  *	local kernel page: INVLPG
2963  *	local user total: reload %cr3
2964  *	local kernel total: invltlb_glob()
2965  *	remote user page, inactive pmap: -
2966  *	remote user page, active pmap: IPI:INVLPG
2967  *	remote kernel page: IPI:INVLPG
2968  *	remote user total, inactive pmap: -
2969  *	remote user total, active pmap: IPI:(reload %cr3)
2970  *	remote kernel total: IPI:invltlb_glob()
2971  *  Since on return to user mode, the reload of %cr3 with ucr3 causes
2972  *  TLB invalidation, no specific action is required for user page table.
2973  *
2974  * EPT.  EPT pmaps do not map KVA, all mappings are userspace.
2975  * XXX TODO
2976  */
2977 
2978 #ifdef SMP
2979 /*
2980  * Interrupt the cpus that are executing in the guest context.
2981  * This will force the vcpu to exit and the cached EPT mappings
2982  * will be invalidated by the host before the next vmresume.
2983  */
2984 static __inline void
pmap_invalidate_ept(pmap_t pmap)2985 pmap_invalidate_ept(pmap_t pmap)
2986 {
2987 	smr_seq_t goal;
2988 	int ipinum;
2989 
2990 	sched_pin();
2991 	KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active),
2992 	    ("pmap_invalidate_ept: absurd pm_active"));
2993 
2994 	/*
2995 	 * The TLB mappings associated with a vcpu context are not
2996 	 * flushed each time a different vcpu is chosen to execute.
2997 	 *
2998 	 * This is in contrast with a process's vtop mappings that
2999 	 * are flushed from the TLB on each context switch.
3000 	 *
3001 	 * Therefore we need to do more than just a TLB shootdown on
3002 	 * the active cpus in 'pmap->pm_active'. To do this we keep
3003 	 * track of the number of invalidations performed on this pmap.
3004 	 *
3005 	 * Each vcpu keeps a cache of this counter and compares it
3006 	 * just before a vmresume. If the counter is out-of-date an
3007 	 * invept will be done to flush stale mappings from the TLB.
3008 	 *
3009 	 * To ensure that all vCPU threads have observed the new counter
3010 	 * value before returning, we use SMR.  Ordering is important here:
3011 	 * the VMM enters an SMR read section before loading the counter
3012 	 * and after updating the pm_active bit set.  Thus, pm_active is
3013 	 * a superset of active readers, and any reader that has observed
3014 	 * the goal has observed the new counter value.
3015 	 */
3016 	atomic_add_long(&pmap->pm_eptgen, 1);
3017 
3018 	goal = smr_advance(pmap->pm_eptsmr);
3019 
3020 	/*
3021 	 * Force the vcpu to exit and trap back into the hypervisor.
3022 	 */
3023 	ipinum = pmap->pm_flags & PMAP_NESTED_IPIMASK;
3024 	ipi_selected(pmap->pm_active, ipinum);
3025 	sched_unpin();
3026 
3027 	/*
3028 	 * Ensure that all active vCPUs will observe the new generation counter
3029 	 * value before executing any more guest instructions.
3030 	 */
3031 	smr_wait(pmap->pm_eptsmr, goal);
3032 }
3033 
3034 static inline void
pmap_invalidate_preipi_pcid(pmap_t pmap)3035 pmap_invalidate_preipi_pcid(pmap_t pmap)
3036 {
3037 	struct pmap_pcid *pcidp;
3038 	u_int cpuid, i;
3039 
3040 	sched_pin();
3041 
3042 	cpuid = PCPU_GET(cpuid);
3043 	if (pmap != PCPU_GET(curpmap))
3044 		cpuid = 0xffffffff;	/* An impossible value */
3045 
3046 	CPU_FOREACH(i) {
3047 		if (cpuid != i) {
3048 			pcidp = zpcpu_get_cpu(pmap->pm_pcidp, i);
3049 			pcidp->pm_gen = 0;
3050 		}
3051 	}
3052 
3053 	/*
3054 	 * The fence is between stores to pm_gen and the read of the
3055 	 * pm_active mask.  We need to ensure that it is impossible
3056 	 * for us to miss the bit update in pm_active and
3057 	 * simultaneously observe a non-zero pm_gen in
3058 	 * pmap_activate_sw(), otherwise TLB update is missed.
3059 	 * Without the fence, IA32 allows such an outcome.  Note that
3060 	 * pm_active is updated by a locked operation, which provides
3061 	 * the reciprocal fence.
3062 	 */
3063 	atomic_thread_fence_seq_cst();
3064 }
3065 
3066 static void
pmap_invalidate_preipi_nopcid(pmap_t pmap __unused)3067 pmap_invalidate_preipi_nopcid(pmap_t pmap __unused)
3068 {
3069 	sched_pin();
3070 }
3071 
3072 DEFINE_IFUNC(static, void, pmap_invalidate_preipi, (pmap_t))
3073 {
3074 	return (pmap_pcid_enabled ? pmap_invalidate_preipi_pcid :
3075 	    pmap_invalidate_preipi_nopcid);
3076 }
3077 
3078 static inline void
pmap_invalidate_page_pcid_cb(pmap_t pmap,vm_offset_t va,const bool invpcid_works1)3079 pmap_invalidate_page_pcid_cb(pmap_t pmap, vm_offset_t va,
3080     const bool invpcid_works1)
3081 {
3082 	struct invpcid_descr d;
3083 	uint64_t kcr3, ucr3;
3084 	uint32_t pcid;
3085 
3086 	/*
3087 	 * Because pm_pcid is recalculated on a context switch, we
3088 	 * must ensure there is no preemption, not just pinning.
3089 	 * Otherwise, we might use a stale value below.
3090 	 */
3091 	CRITICAL_ASSERT(curthread);
3092 
3093 	/*
3094 	 * No need to do anything with user page tables invalidation
3095 	 * if there is no user page table, or invalidation is deferred
3096 	 * until the return to userspace.  ucr3_load_mask is stable
3097 	 * because we have preemption disabled.
3098 	 */
3099 	if (pmap->pm_ucr3 == PMAP_NO_CR3 ||
3100 	    PCPU_GET(ucr3_load_mask) != PMAP_UCR3_NOMASK)
3101 		return;
3102 
3103 	pcid = pmap_get_pcid(pmap);
3104 	if (invpcid_works1) {
3105 		d.pcid = pcid | PMAP_PCID_USER_PT;
3106 		d.pad = 0;
3107 		d.addr = va;
3108 		invpcid(&d, INVPCID_ADDR);
3109 	} else {
3110 		kcr3 = pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
3111 		ucr3 = pmap->pm_ucr3 | pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
3112 		pmap_pti_pcid_invlpg(ucr3, kcr3, va);
3113 	}
3114 }
3115 
3116 static void
pmap_invalidate_page_pcid_invpcid_cb(pmap_t pmap,vm_offset_t va)3117 pmap_invalidate_page_pcid_invpcid_cb(pmap_t pmap, vm_offset_t va)
3118 {
3119 	pmap_invalidate_page_pcid_cb(pmap, va, true);
3120 }
3121 
3122 static void
pmap_invalidate_page_pcid_noinvpcid_cb(pmap_t pmap,vm_offset_t va)3123 pmap_invalidate_page_pcid_noinvpcid_cb(pmap_t pmap, vm_offset_t va)
3124 {
3125 	pmap_invalidate_page_pcid_cb(pmap, va, false);
3126 }
3127 
3128 static void
pmap_invalidate_page_nopcid_cb(pmap_t pmap __unused,vm_offset_t va __unused)3129 pmap_invalidate_page_nopcid_cb(pmap_t pmap __unused, vm_offset_t va __unused)
3130 {
3131 }
3132 
3133 DEFINE_IFUNC(static, void, pmap_invalidate_page_cb, (pmap_t, vm_offset_t))
3134 {
3135 	if (pmap_pcid_enabled)
3136 		return (invpcid_works ? pmap_invalidate_page_pcid_invpcid_cb :
3137 		    pmap_invalidate_page_pcid_noinvpcid_cb);
3138 	return (pmap_invalidate_page_nopcid_cb);
3139 }
3140 
3141 static void
pmap_invalidate_page_curcpu_cb(pmap_t pmap,vm_offset_t va,vm_offset_t addr2 __unused)3142 pmap_invalidate_page_curcpu_cb(pmap_t pmap, vm_offset_t va,
3143     vm_offset_t addr2 __unused)
3144 {
3145 	if (pmap == kernel_pmap) {
3146 		pmap_invlpg(kernel_pmap, va);
3147 	} else if (pmap == PCPU_GET(curpmap)) {
3148 		invlpg(va);
3149 		pmap_invalidate_page_cb(pmap, va);
3150 	}
3151 }
3152 
3153 void
pmap_invalidate_page(pmap_t pmap,vm_offset_t va)3154 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
3155 {
3156 	if (pmap_type_guest(pmap)) {
3157 		pmap_invalidate_ept(pmap);
3158 		return;
3159 	}
3160 
3161 	KASSERT(pmap->pm_type == PT_X86,
3162 	    ("pmap_invalidate_page: invalid type %d", pmap->pm_type));
3163 
3164 	pmap_invalidate_preipi(pmap);
3165 	smp_masked_invlpg(va, pmap, pmap_invalidate_page_curcpu_cb);
3166 }
3167 
3168 /* 4k PTEs -- Chosen to exceed the total size of Broadwell L2 TLB */
3169 #define	PMAP_INVLPG_THRESHOLD	(4 * 1024 * PAGE_SIZE)
3170 
3171 static void
pmap_invalidate_range_pcid_cb(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,const bool invpcid_works1)3172 pmap_invalidate_range_pcid_cb(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
3173     const bool invpcid_works1)
3174 {
3175 	struct invpcid_descr d;
3176 	uint64_t kcr3, ucr3;
3177 	uint32_t pcid;
3178 
3179 	CRITICAL_ASSERT(curthread);
3180 
3181 	if (pmap != PCPU_GET(curpmap) ||
3182 	    pmap->pm_ucr3 == PMAP_NO_CR3 ||
3183 	    PCPU_GET(ucr3_load_mask) != PMAP_UCR3_NOMASK)
3184 		return;
3185 
3186 	pcid = pmap_get_pcid(pmap);
3187 	if (invpcid_works1) {
3188 		d.pcid = pcid | PMAP_PCID_USER_PT;
3189 		d.pad = 0;
3190 		for (d.addr = sva; d.addr < eva; d.addr += PAGE_SIZE)
3191 			invpcid(&d, INVPCID_ADDR);
3192 	} else {
3193 		kcr3 = pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
3194 		ucr3 = pmap->pm_ucr3 | pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
3195 		pmap_pti_pcid_invlrng(ucr3, kcr3, sva, eva);
3196 	}
3197 }
3198 
3199 static void
pmap_invalidate_range_pcid_invpcid_cb(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)3200 pmap_invalidate_range_pcid_invpcid_cb(pmap_t pmap, vm_offset_t sva,
3201     vm_offset_t eva)
3202 {
3203 	pmap_invalidate_range_pcid_cb(pmap, sva, eva, true);
3204 }
3205 
3206 static void
pmap_invalidate_range_pcid_noinvpcid_cb(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)3207 pmap_invalidate_range_pcid_noinvpcid_cb(pmap_t pmap, vm_offset_t sva,
3208     vm_offset_t eva)
3209 {
3210 	pmap_invalidate_range_pcid_cb(pmap, sva, eva, false);
3211 }
3212 
3213 static void
pmap_invalidate_range_nopcid_cb(pmap_t pmap __unused,vm_offset_t sva __unused,vm_offset_t eva __unused)3214 pmap_invalidate_range_nopcid_cb(pmap_t pmap __unused, vm_offset_t sva __unused,
3215     vm_offset_t eva __unused)
3216 {
3217 }
3218 
3219 DEFINE_IFUNC(static, void, pmap_invalidate_range_cb, (pmap_t, vm_offset_t,
3220     vm_offset_t))
3221 {
3222 	if (pmap_pcid_enabled)
3223 		return (invpcid_works ? pmap_invalidate_range_pcid_invpcid_cb :
3224 		    pmap_invalidate_range_pcid_noinvpcid_cb);
3225 	return (pmap_invalidate_range_nopcid_cb);
3226 }
3227 
3228 static void
pmap_invalidate_range_curcpu_cb(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)3229 pmap_invalidate_range_curcpu_cb(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
3230 {
3231 	vm_offset_t addr;
3232 
3233 	if (pmap == kernel_pmap) {
3234 		if (PCPU_GET(pcid_invlpg_workaround)) {
3235 			struct invpcid_descr d = { 0 };
3236 
3237 			invpcid(&d, INVPCID_CTXGLOB);
3238 		} else {
3239 			for (addr = sva; addr < eva; addr += PAGE_SIZE)
3240 				invlpg(addr);
3241 		}
3242 	} else if (pmap == PCPU_GET(curpmap)) {
3243 		for (addr = sva; addr < eva; addr += PAGE_SIZE)
3244 			invlpg(addr);
3245 		pmap_invalidate_range_cb(pmap, sva, eva);
3246 	}
3247 }
3248 
3249 void
pmap_invalidate_range(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)3250 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
3251 {
3252 	if (eva - sva >= PMAP_INVLPG_THRESHOLD) {
3253 		pmap_invalidate_all(pmap);
3254 		return;
3255 	}
3256 
3257 	if (pmap_type_guest(pmap)) {
3258 		pmap_invalidate_ept(pmap);
3259 		return;
3260 	}
3261 
3262 	KASSERT(pmap->pm_type == PT_X86,
3263 	    ("pmap_invalidate_range: invalid type %d", pmap->pm_type));
3264 
3265 	pmap_invalidate_preipi(pmap);
3266 	smp_masked_invlpg_range(sva, eva, pmap,
3267 	    pmap_invalidate_range_curcpu_cb);
3268 }
3269 
3270 static inline void
pmap_invalidate_all_pcid_cb(pmap_t pmap,bool invpcid_works1)3271 pmap_invalidate_all_pcid_cb(pmap_t pmap, bool invpcid_works1)
3272 {
3273 	struct invpcid_descr d;
3274 	uint64_t kcr3;
3275 	uint32_t pcid;
3276 
3277 	if (pmap == kernel_pmap) {
3278 		if (invpcid_works1) {
3279 			bzero(&d, sizeof(d));
3280 			invpcid(&d, INVPCID_CTXGLOB);
3281 		} else {
3282 			invltlb_glob();
3283 		}
3284 	} else if (pmap == PCPU_GET(curpmap)) {
3285 		CRITICAL_ASSERT(curthread);
3286 
3287 		pcid = pmap_get_pcid(pmap);
3288 		if (invpcid_works1) {
3289 			d.pcid = pcid;
3290 			d.pad = 0;
3291 			d.addr = 0;
3292 			invpcid(&d, INVPCID_CTX);
3293 		} else {
3294 			kcr3 = pmap->pm_cr3 | pcid;
3295 			load_cr3(kcr3);
3296 		}
3297 		if (pmap->pm_ucr3 != PMAP_NO_CR3)
3298 			PCPU_SET(ucr3_load_mask, ~CR3_PCID_SAVE);
3299 	}
3300 }
3301 
3302 static void
pmap_invalidate_all_pcid_invpcid_cb(pmap_t pmap)3303 pmap_invalidate_all_pcid_invpcid_cb(pmap_t pmap)
3304 {
3305 	pmap_invalidate_all_pcid_cb(pmap, true);
3306 }
3307 
3308 static void
pmap_invalidate_all_pcid_noinvpcid_cb(pmap_t pmap)3309 pmap_invalidate_all_pcid_noinvpcid_cb(pmap_t pmap)
3310 {
3311 	pmap_invalidate_all_pcid_cb(pmap, false);
3312 }
3313 
3314 static void
pmap_invalidate_all_nopcid_cb(pmap_t pmap)3315 pmap_invalidate_all_nopcid_cb(pmap_t pmap)
3316 {
3317 	if (pmap == kernel_pmap)
3318 		invltlb_glob();
3319 	else if (pmap == PCPU_GET(curpmap))
3320 		invltlb();
3321 }
3322 
3323 DEFINE_IFUNC(static, void, pmap_invalidate_all_cb, (pmap_t))
3324 {
3325 	if (pmap_pcid_enabled)
3326 		return (invpcid_works ? pmap_invalidate_all_pcid_invpcid_cb :
3327 		    pmap_invalidate_all_pcid_noinvpcid_cb);
3328 	return (pmap_invalidate_all_nopcid_cb);
3329 }
3330 
3331 static void
pmap_invalidate_all_curcpu_cb(pmap_t pmap,vm_offset_t addr1 __unused,vm_offset_t addr2 __unused)3332 pmap_invalidate_all_curcpu_cb(pmap_t pmap, vm_offset_t addr1 __unused,
3333     vm_offset_t addr2 __unused)
3334 {
3335 	pmap_invalidate_all_cb(pmap);
3336 }
3337 
3338 void
pmap_invalidate_all(pmap_t pmap)3339 pmap_invalidate_all(pmap_t pmap)
3340 {
3341 	if (pmap_type_guest(pmap)) {
3342 		pmap_invalidate_ept(pmap);
3343 		return;
3344 	}
3345 
3346 	KASSERT(pmap->pm_type == PT_X86,
3347 	    ("pmap_invalidate_all: invalid type %d", pmap->pm_type));
3348 
3349 	pmap_invalidate_preipi(pmap);
3350 	smp_masked_invltlb(pmap, pmap_invalidate_all_curcpu_cb);
3351 }
3352 
3353 static void
pmap_invalidate_cache_curcpu_cb(pmap_t pmap __unused,vm_offset_t va __unused,vm_offset_t addr2 __unused)3354 pmap_invalidate_cache_curcpu_cb(pmap_t pmap __unused, vm_offset_t va __unused,
3355     vm_offset_t addr2 __unused)
3356 {
3357 	wbinvd();
3358 }
3359 
3360 void
pmap_invalidate_cache(void)3361 pmap_invalidate_cache(void)
3362 {
3363 	sched_pin();
3364 	smp_cache_flush(pmap_invalidate_cache_curcpu_cb);
3365 }
3366 
3367 struct pde_action {
3368 	cpuset_t invalidate;	/* processors that invalidate their TLB */
3369 	pmap_t pmap;
3370 	vm_offset_t va;
3371 	pd_entry_t *pde;
3372 	pd_entry_t newpde;
3373 	u_int store;		/* processor that updates the PDE */
3374 };
3375 
3376 static void
pmap_update_pde_action(void * arg)3377 pmap_update_pde_action(void *arg)
3378 {
3379 	struct pde_action *act = arg;
3380 
3381 	if (act->store == PCPU_GET(cpuid))
3382 		pmap_update_pde_store(act->pmap, act->pde, act->newpde);
3383 }
3384 
3385 static void
pmap_update_pde_teardown(void * arg)3386 pmap_update_pde_teardown(void *arg)
3387 {
3388 	struct pde_action *act = arg;
3389 
3390 	if (CPU_ISSET(PCPU_GET(cpuid), &act->invalidate))
3391 		pmap_update_pde_invalidate(act->pmap, act->va, act->newpde);
3392 }
3393 
3394 /*
3395  * Change the page size for the specified virtual address in a way that
3396  * prevents any possibility of the TLB ever having two entries that map the
3397  * same virtual address using different page sizes.  This is the recommended
3398  * workaround for Erratum 383 on AMD Family 10h processors.  It prevents a
3399  * machine check exception for a TLB state that is improperly diagnosed as a
3400  * hardware error.
3401  */
3402 static void
pmap_update_pde(pmap_t pmap,vm_offset_t va,pd_entry_t * pde,pd_entry_t newpde)3403 pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
3404 {
3405 	struct pde_action act;
3406 	cpuset_t active, other_cpus;
3407 	u_int cpuid;
3408 
3409 	sched_pin();
3410 	cpuid = PCPU_GET(cpuid);
3411 	other_cpus = all_cpus;
3412 	CPU_CLR(cpuid, &other_cpus);
3413 	if (pmap == kernel_pmap || pmap_type_guest(pmap))
3414 		active = all_cpus;
3415 	else {
3416 		active = pmap->pm_active;
3417 	}
3418 	if (CPU_OVERLAP(&active, &other_cpus)) {
3419 		act.store = cpuid;
3420 		act.invalidate = active;
3421 		act.va = va;
3422 		act.pmap = pmap;
3423 		act.pde = pde;
3424 		act.newpde = newpde;
3425 		CPU_SET(cpuid, &active);
3426 		smp_rendezvous_cpus(active,
3427 		    smp_no_rendezvous_barrier, pmap_update_pde_action,
3428 		    pmap_update_pde_teardown, &act);
3429 	} else {
3430 		pmap_update_pde_store(pmap, pde, newpde);
3431 		if (CPU_ISSET(cpuid, &active))
3432 			pmap_update_pde_invalidate(pmap, va, newpde);
3433 	}
3434 	sched_unpin();
3435 }
3436 #else /* !SMP */
3437 /*
3438  * Normal, non-SMP, invalidation functions.
3439  */
3440 void
pmap_invalidate_page(pmap_t pmap,vm_offset_t va)3441 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
3442 {
3443 	struct invpcid_descr d;
3444 	struct pmap_pcid *pcidp;
3445 	uint64_t kcr3, ucr3;
3446 	uint32_t pcid;
3447 
3448 	if (pmap->pm_type == PT_RVI || pmap->pm_type == PT_EPT) {
3449 		pmap->pm_eptgen++;
3450 		return;
3451 	}
3452 	KASSERT(pmap->pm_type == PT_X86,
3453 	    ("pmap_invalidate_range: unknown type %d", pmap->pm_type));
3454 
3455 	if (pmap == kernel_pmap || pmap == PCPU_GET(curpmap)) {
3456 		invlpg(va);
3457 		if (pmap == PCPU_GET(curpmap) && pmap_pcid_enabled &&
3458 		    pmap->pm_ucr3 != PMAP_NO_CR3) {
3459 			critical_enter();
3460 			pcid = pmap_get_pcid(pmap);
3461 			if (invpcid_works) {
3462 				d.pcid = pcid | PMAP_PCID_USER_PT;
3463 				d.pad = 0;
3464 				d.addr = va;
3465 				invpcid(&d, INVPCID_ADDR);
3466 			} else {
3467 				kcr3 = pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
3468 				ucr3 = pmap->pm_ucr3 | pcid |
3469 				    PMAP_PCID_USER_PT | CR3_PCID_SAVE;
3470 				pmap_pti_pcid_invlpg(ucr3, kcr3, va);
3471 			}
3472 			critical_exit();
3473 		}
3474 	} else if (pmap_pcid_enabled) {
3475 		pcidp = zpcpu_get(pmap->pm_pcidp);
3476 		pcidp->pm_gen = 0;
3477 	}
3478 }
3479 
3480 void
pmap_invalidate_range(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)3481 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
3482 {
3483 	struct invpcid_descr d;
3484 	struct pmap_pcid *pcidp;
3485 	vm_offset_t addr;
3486 	uint64_t kcr3, ucr3;
3487 	uint32_t pcid;
3488 
3489 	if (pmap->pm_type == PT_RVI || pmap->pm_type == PT_EPT) {
3490 		pmap->pm_eptgen++;
3491 		return;
3492 	}
3493 	KASSERT(pmap->pm_type == PT_X86,
3494 	    ("pmap_invalidate_range: unknown type %d", pmap->pm_type));
3495 
3496 	if (pmap == kernel_pmap || pmap == PCPU_GET(curpmap)) {
3497 		for (addr = sva; addr < eva; addr += PAGE_SIZE)
3498 			invlpg(addr);
3499 		if (pmap == PCPU_GET(curpmap) && pmap_pcid_enabled &&
3500 		    pmap->pm_ucr3 != PMAP_NO_CR3) {
3501 			critical_enter();
3502 			pcid = pmap_get_pcid(pmap);
3503 			if (invpcid_works) {
3504 				d.pcid = pcid | PMAP_PCID_USER_PT;
3505 				d.pad = 0;
3506 				d.addr = sva;
3507 				for (; d.addr < eva; d.addr += PAGE_SIZE)
3508 					invpcid(&d, INVPCID_ADDR);
3509 			} else {
3510 				kcr3 = pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
3511 				ucr3 = pmap->pm_ucr3 | pcid |
3512 				    PMAP_PCID_USER_PT | CR3_PCID_SAVE;
3513 				pmap_pti_pcid_invlrng(ucr3, kcr3, sva, eva);
3514 			}
3515 			critical_exit();
3516 		}
3517 	} else if (pmap_pcid_enabled) {
3518 		pcidp = zpcpu_get(pmap->pm_pcidp);
3519 		pcidp->pm_gen = 0;
3520 	}
3521 }
3522 
3523 void
pmap_invalidate_all(pmap_t pmap)3524 pmap_invalidate_all(pmap_t pmap)
3525 {
3526 	struct invpcid_descr d;
3527 	struct pmap_pcid *pcidp;
3528 	uint64_t kcr3, ucr3;
3529 	uint32_t pcid;
3530 
3531 	if (pmap->pm_type == PT_RVI || pmap->pm_type == PT_EPT) {
3532 		pmap->pm_eptgen++;
3533 		return;
3534 	}
3535 	KASSERT(pmap->pm_type == PT_X86,
3536 	    ("pmap_invalidate_all: unknown type %d", pmap->pm_type));
3537 
3538 	if (pmap == kernel_pmap) {
3539 		if (pmap_pcid_enabled && invpcid_works) {
3540 			bzero(&d, sizeof(d));
3541 			invpcid(&d, INVPCID_CTXGLOB);
3542 		} else {
3543 			invltlb_glob();
3544 		}
3545 	} else if (pmap == PCPU_GET(curpmap)) {
3546 		if (pmap_pcid_enabled) {
3547 			critical_enter();
3548 			pcid = pmap_get_pcid(pmap);
3549 			if (invpcid_works) {
3550 				d.pcid = pcid;
3551 				d.pad = 0;
3552 				d.addr = 0;
3553 				invpcid(&d, INVPCID_CTX);
3554 				if (pmap->pm_ucr3 != PMAP_NO_CR3) {
3555 					d.pcid |= PMAP_PCID_USER_PT;
3556 					invpcid(&d, INVPCID_CTX);
3557 				}
3558 			} else {
3559 				kcr3 = pmap->pm_cr3 | pcid;
3560 				if (pmap->pm_ucr3 != PMAP_NO_CR3) {
3561 					ucr3 = pmap->pm_ucr3 | pcid |
3562 					    PMAP_PCID_USER_PT;
3563 					pmap_pti_pcid_invalidate(ucr3, kcr3);
3564 				} else
3565 					load_cr3(kcr3);
3566 			}
3567 			critical_exit();
3568 		} else {
3569 			invltlb();
3570 		}
3571 	} else if (pmap_pcid_enabled) {
3572 		pcidp = zpcpu_get(pmap->pm_pcidp);
3573 		pcidp->pm_gen = 0;
3574 	}
3575 }
3576 
3577 void
pmap_invalidate_cache(void)3578 pmap_invalidate_cache(void)
3579 {
3580 
3581 	wbinvd();
3582 }
3583 
3584 static void
pmap_update_pde(pmap_t pmap,vm_offset_t va,pd_entry_t * pde,pd_entry_t newpde)3585 pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
3586 {
3587 	struct pmap_pcid *pcidp;
3588 
3589 	pmap_update_pde_store(pmap, pde, newpde);
3590 	if (pmap == kernel_pmap || pmap == PCPU_GET(curpmap))
3591 		pmap_update_pde_invalidate(pmap, va, newpde);
3592 	else {
3593 		pcidp = zpcpu_get(pmap->pm_pcidp);
3594 		pcidp->pm_gen = 0;
3595 	}
3596 }
3597 #endif /* !SMP */
3598 
3599 static void
pmap_invalidate_pde_page(pmap_t pmap,vm_offset_t va,pd_entry_t pde)3600 pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va, pd_entry_t pde)
3601 {
3602 
3603 	/*
3604 	 * When the PDE has PG_PROMOTED set, the 2MB page mapping was created
3605 	 * by a promotion that did not invalidate the 512 4KB page mappings
3606 	 * that might exist in the TLB.  Consequently, at this point, the TLB
3607 	 * may hold both 4KB and 2MB page mappings for the address range [va,
3608 	 * va + NBPDR).  Therefore, the entire range must be invalidated here.
3609 	 * In contrast, when PG_PROMOTED is clear, the TLB will not hold any
3610 	 * 4KB page mappings for the address range [va, va + NBPDR), and so a
3611 	 * single INVLPG suffices to invalidate the 2MB page mapping from the
3612 	 * TLB.
3613 	 */
3614 	if ((pde & PG_PROMOTED) != 0)
3615 		pmap_invalidate_range(pmap, va, va + NBPDR - 1);
3616 	else
3617 		pmap_invalidate_page(pmap, va);
3618 }
3619 
3620 DEFINE_IFUNC(, void, pmap_invalidate_cache_range,
3621     (vm_offset_t sva, vm_offset_t eva))
3622 {
3623 
3624 	if ((cpu_feature & CPUID_SS) != 0)
3625 		return (pmap_invalidate_cache_range_selfsnoop);
3626 	if ((cpu_feature & CPUID_CLFSH) != 0)
3627 		return (pmap_force_invalidate_cache_range);
3628 	return (pmap_invalidate_cache_range_all);
3629 }
3630 
3631 #define PMAP_CLFLUSH_THRESHOLD   (2 * 1024 * 1024)
3632 
3633 static void
pmap_invalidate_cache_range_check_align(vm_offset_t sva,vm_offset_t eva)3634 pmap_invalidate_cache_range_check_align(vm_offset_t sva, vm_offset_t eva)
3635 {
3636 
3637 	KASSERT((sva & PAGE_MASK) == 0,
3638 	    ("pmap_invalidate_cache_range: sva not page-aligned"));
3639 	KASSERT((eva & PAGE_MASK) == 0,
3640 	    ("pmap_invalidate_cache_range: eva not page-aligned"));
3641 }
3642 
3643 static void
pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva,vm_offset_t eva)3644 pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva, vm_offset_t eva)
3645 {
3646 
3647 	pmap_invalidate_cache_range_check_align(sva, eva);
3648 }
3649 
3650 void
pmap_force_invalidate_cache_range(vm_offset_t sva,vm_offset_t eva)3651 pmap_force_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
3652 {
3653 
3654 	sva &= ~(vm_offset_t)(cpu_clflush_line_size - 1);
3655 
3656 	/*
3657 	 * XXX: Some CPUs fault, hang, or trash the local APIC
3658 	 * registers if we use CLFLUSH on the local APIC range.  The
3659 	 * local APIC is always uncached, so we don't need to flush
3660 	 * for that range anyway.
3661 	 */
3662 	if (pmap_kextract(sva) == lapic_paddr)
3663 		return;
3664 
3665 	if ((cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0) {
3666 		/*
3667 		 * Do per-cache line flush.  Use a locked
3668 		 * instruction to insure that previous stores are
3669 		 * included in the write-back.  The processor
3670 		 * propagates flush to other processors in the cache
3671 		 * coherence domain.
3672 		 */
3673 		atomic_thread_fence_seq_cst();
3674 		for (; sva < eva; sva += cpu_clflush_line_size)
3675 			clflushopt(sva);
3676 		atomic_thread_fence_seq_cst();
3677 	} else {
3678 		/*
3679 		 * Writes are ordered by CLFLUSH on Intel CPUs.
3680 		 */
3681 		if (cpu_vendor_id != CPU_VENDOR_INTEL)
3682 			mfence();
3683 		for (; sva < eva; sva += cpu_clflush_line_size)
3684 			clflush(sva);
3685 		if (cpu_vendor_id != CPU_VENDOR_INTEL)
3686 			mfence();
3687 	}
3688 }
3689 
3690 static void
pmap_invalidate_cache_range_all(vm_offset_t sva,vm_offset_t eva)3691 pmap_invalidate_cache_range_all(vm_offset_t sva, vm_offset_t eva)
3692 {
3693 
3694 	pmap_invalidate_cache_range_check_align(sva, eva);
3695 	pmap_invalidate_cache();
3696 }
3697 
3698 /*
3699  * Remove the specified set of pages from the data and instruction caches.
3700  *
3701  * In contrast to pmap_invalidate_cache_range(), this function does not
3702  * rely on the CPU's self-snoop feature, because it is intended for use
3703  * when moving pages into a different cache domain.
3704  */
3705 void
pmap_invalidate_cache_pages(vm_page_t * pages,int count)3706 pmap_invalidate_cache_pages(vm_page_t *pages, int count)
3707 {
3708 	vm_offset_t daddr, eva;
3709 	int i;
3710 	bool useclflushopt;
3711 
3712 	useclflushopt = (cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0;
3713 	if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE ||
3714 	    ((cpu_feature & CPUID_CLFSH) == 0 && !useclflushopt))
3715 		pmap_invalidate_cache();
3716 	else {
3717 		if (useclflushopt)
3718 			atomic_thread_fence_seq_cst();
3719 		else if (cpu_vendor_id != CPU_VENDOR_INTEL)
3720 			mfence();
3721 		for (i = 0; i < count; i++) {
3722 			daddr = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pages[i]));
3723 			eva = daddr + PAGE_SIZE;
3724 			for (; daddr < eva; daddr += cpu_clflush_line_size) {
3725 				if (useclflushopt)
3726 					clflushopt(daddr);
3727 				else
3728 					clflush(daddr);
3729 			}
3730 		}
3731 		if (useclflushopt)
3732 			atomic_thread_fence_seq_cst();
3733 		else if (cpu_vendor_id != CPU_VENDOR_INTEL)
3734 			mfence();
3735 	}
3736 }
3737 
3738 void
pmap_flush_cache_range(vm_offset_t sva,vm_offset_t eva)3739 pmap_flush_cache_range(vm_offset_t sva, vm_offset_t eva)
3740 {
3741 
3742 	pmap_invalidate_cache_range_check_align(sva, eva);
3743 
3744 	if ((cpu_stdext_feature & CPUID_STDEXT_CLWB) == 0) {
3745 		pmap_force_invalidate_cache_range(sva, eva);
3746 		return;
3747 	}
3748 
3749 	/* See comment in pmap_force_invalidate_cache_range(). */
3750 	if (pmap_kextract(sva) == lapic_paddr)
3751 		return;
3752 
3753 	atomic_thread_fence_seq_cst();
3754 	for (; sva < eva; sva += cpu_clflush_line_size)
3755 		clwb(sva);
3756 	atomic_thread_fence_seq_cst();
3757 }
3758 
3759 void
pmap_flush_cache_phys_range(vm_paddr_t spa,vm_paddr_t epa,vm_memattr_t mattr)3760 pmap_flush_cache_phys_range(vm_paddr_t spa, vm_paddr_t epa, vm_memattr_t mattr)
3761 {
3762 	pt_entry_t *pte;
3763 	vm_offset_t vaddr;
3764 	int error __diagused;
3765 	int pte_bits;
3766 
3767 	KASSERT((spa & PAGE_MASK) == 0,
3768 	    ("pmap_flush_cache_phys_range: spa not page-aligned"));
3769 	KASSERT((epa & PAGE_MASK) == 0,
3770 	    ("pmap_flush_cache_phys_range: epa not page-aligned"));
3771 
3772 	if (spa < dmaplimit) {
3773 		pmap_flush_cache_range(PHYS_TO_DMAP(spa), PHYS_TO_DMAP(MIN(
3774 		    dmaplimit, epa)));
3775 		if (dmaplimit >= epa)
3776 			return;
3777 		spa = dmaplimit;
3778 	}
3779 
3780 	pte_bits = pmap_cache_bits(kernel_pmap, mattr, false) | X86_PG_RW |
3781 	    X86_PG_V;
3782 	error = vmem_alloc(kernel_arena, PAGE_SIZE, M_BESTFIT | M_WAITOK,
3783 	    &vaddr);
3784 	KASSERT(error == 0, ("vmem_alloc failed: %d", error));
3785 	pte = vtopte(vaddr);
3786 	for (; spa < epa; spa += PAGE_SIZE) {
3787 		sched_pin();
3788 		pte_store(pte, spa | pte_bits);
3789 		pmap_invlpg(kernel_pmap, vaddr);
3790 		/* XXXKIB atomic inside flush_cache_range are excessive */
3791 		pmap_flush_cache_range(vaddr, vaddr + PAGE_SIZE);
3792 		sched_unpin();
3793 	}
3794 	vmem_free(kernel_arena, vaddr, PAGE_SIZE);
3795 }
3796 
3797 /*
3798  *	Routine:	pmap_extract
3799  *	Function:
3800  *		Extract the physical page address associated
3801  *		with the given map/virtual_address pair.
3802  */
3803 vm_paddr_t
pmap_extract(pmap_t pmap,vm_offset_t va)3804 pmap_extract(pmap_t pmap, vm_offset_t va)
3805 {
3806 	pdp_entry_t *pdpe;
3807 	pd_entry_t *pde;
3808 	pt_entry_t *pte, PG_V;
3809 	vm_paddr_t pa;
3810 
3811 	pa = 0;
3812 	PG_V = pmap_valid_bit(pmap);
3813 	PMAP_LOCK(pmap);
3814 	pdpe = pmap_pdpe(pmap, va);
3815 	if (pdpe != NULL && (*pdpe & PG_V) != 0) {
3816 		if ((*pdpe & PG_PS) != 0)
3817 			pa = (*pdpe & PG_PS_FRAME) | (va & PDPMASK);
3818 		else {
3819 			pde = pmap_pdpe_to_pde(pdpe, va);
3820 			if ((*pde & PG_V) != 0) {
3821 				if ((*pde & PG_PS) != 0) {
3822 					pa = (*pde & PG_PS_FRAME) |
3823 					    (va & PDRMASK);
3824 				} else {
3825 					pte = pmap_pde_to_pte(pde, va);
3826 					pa = (*pte & PG_FRAME) |
3827 					    (va & PAGE_MASK);
3828 				}
3829 			}
3830 		}
3831 	}
3832 	PMAP_UNLOCK(pmap);
3833 	return (pa);
3834 }
3835 
3836 /*
3837  *	Routine:	pmap_extract_and_hold
3838  *	Function:
3839  *		Atomically extract and hold the physical page
3840  *		with the given pmap and virtual address pair
3841  *		if that mapping permits the given protection.
3842  */
3843 vm_page_t
pmap_extract_and_hold(pmap_t pmap,vm_offset_t va,vm_prot_t prot)3844 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
3845 {
3846 	pdp_entry_t pdpe, *pdpep;
3847 	pd_entry_t pde, *pdep;
3848 	pt_entry_t pte, PG_RW, PG_V;
3849 	vm_page_t m;
3850 
3851 	m = NULL;
3852 	PG_RW = pmap_rw_bit(pmap);
3853 	PG_V = pmap_valid_bit(pmap);
3854 	PMAP_LOCK(pmap);
3855 
3856 	pdpep = pmap_pdpe(pmap, va);
3857 	if (pdpep == NULL || ((pdpe = *pdpep) & PG_V) == 0)
3858 		goto out;
3859 	if ((pdpe & PG_PS) != 0) {
3860 		if ((pdpe & PG_RW) == 0 && (prot & VM_PROT_WRITE) != 0)
3861 			goto out;
3862 		m = PHYS_TO_VM_PAGE((pdpe & PG_PS_FRAME) | (va & PDPMASK));
3863 		goto check_page;
3864 	}
3865 
3866 	pdep = pmap_pdpe_to_pde(pdpep, va);
3867 	if (pdep == NULL || ((pde = *pdep) & PG_V) == 0)
3868 		goto out;
3869 	if ((pde & PG_PS) != 0) {
3870 		if ((pde & PG_RW) == 0 && (prot & VM_PROT_WRITE) != 0)
3871 			goto out;
3872 		m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) | (va & PDRMASK));
3873 		goto check_page;
3874 	}
3875 
3876 	pte = *pmap_pde_to_pte(pdep, va);
3877 	if ((pte & PG_V) == 0 ||
3878 	    ((pte & PG_RW) == 0 && (prot & VM_PROT_WRITE) != 0))
3879 		goto out;
3880 	m = PHYS_TO_VM_PAGE(pte & PG_FRAME);
3881 
3882 check_page:
3883 	if (m != NULL && !vm_page_wire_mapped(m))
3884 		m = NULL;
3885 out:
3886 	PMAP_UNLOCK(pmap);
3887 	return (m);
3888 }
3889 
3890 /*
3891  *	Routine:	pmap_kextract
3892  *	Function:
3893  *		Extract the physical page address associated with the given kernel
3894  *		virtual address.
3895  */
3896 vm_paddr_t
pmap_kextract(vm_offset_t va)3897 pmap_kextract(vm_offset_t va)
3898 {
3899 	pd_entry_t pde;
3900 	vm_paddr_t pa;
3901 
3902 	if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
3903 		pa = DMAP_TO_PHYS(va);
3904 	} else if (PMAP_ADDRESS_IN_LARGEMAP(va)) {
3905 		pa = pmap_large_map_kextract(va);
3906 	} else {
3907 		pde = *vtopde(va);
3908 		if (pde & PG_PS) {
3909 			pa = (pde & PG_PS_FRAME) | (va & PDRMASK);
3910 		} else {
3911 			/*
3912 			 * Beware of a concurrent promotion that changes the
3913 			 * PDE at this point!  For example, vtopte() must not
3914 			 * be used to access the PTE because it would use the
3915 			 * new PDE.  It is, however, safe to use the old PDE
3916 			 * because the page table page is preserved by the
3917 			 * promotion.
3918 			 */
3919 			pa = *pmap_pde_to_pte(&pde, va);
3920 			pa = (pa & PG_FRAME) | (va & PAGE_MASK);
3921 		}
3922 	}
3923 	return (pa);
3924 }
3925 
3926 /***************************************************
3927  * Low level mapping routines.....
3928  ***************************************************/
3929 
3930 /*
3931  * Add a wired page to the kva.
3932  * Note: not SMP coherent.
3933  */
3934 void
pmap_kenter(vm_offset_t va,vm_paddr_t pa)3935 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
3936 {
3937 	pt_entry_t *pte;
3938 
3939 	pte = vtopte(va);
3940 	pte_store(pte, pa | pg_g | pg_nx | X86_PG_A | X86_PG_M |
3941 	    X86_PG_RW | X86_PG_V);
3942 }
3943 
3944 static __inline void
pmap_kenter_attr(vm_offset_t va,vm_paddr_t pa,int mode)3945 pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode)
3946 {
3947 	pt_entry_t *pte;
3948 	int cache_bits;
3949 
3950 	pte = vtopte(va);
3951 	cache_bits = pmap_cache_bits(kernel_pmap, mode, false);
3952 	pte_store(pte, pa | pg_g | pg_nx | X86_PG_A | X86_PG_M |
3953 	    X86_PG_RW | X86_PG_V | cache_bits);
3954 }
3955 
3956 /*
3957  * Remove a page from the kernel pagetables.
3958  * Note: not SMP coherent.
3959  */
3960 void
pmap_kremove(vm_offset_t va)3961 pmap_kremove(vm_offset_t va)
3962 {
3963 	pt_entry_t *pte;
3964 
3965 	pte = vtopte(va);
3966 	pte_clear(pte);
3967 }
3968 
3969 /*
3970  *	Used to map a range of physical addresses into kernel
3971  *	virtual address space.
3972  *
3973  *	The value passed in '*virt' is a suggested virtual address for
3974  *	the mapping. Architectures which can support a direct-mapped
3975  *	physical to virtual region can return the appropriate address
3976  *	within that region, leaving '*virt' unchanged. Other
3977  *	architectures should map the pages starting at '*virt' and
3978  *	update '*virt' with the first usable address after the mapped
3979  *	region.
3980  */
3981 vm_offset_t
pmap_map(vm_offset_t * virt,vm_paddr_t start,vm_paddr_t end,int prot)3982 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
3983 {
3984 	return PHYS_TO_DMAP(start);
3985 }
3986 
3987 /*
3988  * Add a list of wired pages to the kva
3989  * this routine is only used for temporary
3990  * kernel mappings that do not need to have
3991  * page modification or references recorded.
3992  * Note that old mappings are simply written
3993  * over.  The page *must* be wired.
3994  * Note: SMP coherent.  Uses a ranged shootdown IPI.
3995  */
3996 void
pmap_qenter(vm_offset_t sva,vm_page_t * ma,int count)3997 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
3998 {
3999 	pt_entry_t *endpte, oldpte, pa, *pte;
4000 	vm_page_t m;
4001 	int cache_bits;
4002 
4003 	oldpte = 0;
4004 	pte = vtopte(sva);
4005 	endpte = pte + count;
4006 	while (pte < endpte) {
4007 		m = *ma++;
4008 		cache_bits = pmap_cache_bits(kernel_pmap, m->md.pat_mode, false);
4009 		pa = VM_PAGE_TO_PHYS(m) | cache_bits;
4010 		if ((*pte & (PG_FRAME | X86_PG_PTE_CACHE)) != pa) {
4011 			oldpte |= *pte;
4012 			pte_store(pte, pa | pg_g | pg_nx | X86_PG_A |
4013 			    X86_PG_M | X86_PG_RW | X86_PG_V);
4014 		}
4015 		pte++;
4016 	}
4017 	if (__predict_false((oldpte & X86_PG_V) != 0))
4018 		pmap_invalidate_range(kernel_pmap, sva, sva + count *
4019 		    PAGE_SIZE);
4020 }
4021 
4022 /*
4023  * This routine tears out page mappings from the
4024  * kernel -- it is meant only for temporary mappings.
4025  * Note: SMP coherent.  Uses a ranged shootdown IPI.
4026  */
4027 void
pmap_qremove(vm_offset_t sva,int count)4028 pmap_qremove(vm_offset_t sva, int count)
4029 {
4030 	vm_offset_t va;
4031 
4032 	va = sva;
4033 	while (count-- > 0) {
4034 		/*
4035 		 * pmap_enter() calls within the kernel virtual
4036 		 * address space happen on virtual addresses from
4037 		 * subarenas that import superpage-sized and -aligned
4038 		 * address ranges.  So, the virtual address that we
4039 		 * allocate to use with pmap_qenter() can't be close
4040 		 * enough to one of those pmap_enter() calls for it to
4041 		 * be caught up in a promotion.
4042 		 */
4043 		KASSERT(va >= VM_MIN_KERNEL_ADDRESS, ("usermode va %lx", va));
4044 		KASSERT((*vtopde(va) & X86_PG_PS) == 0,
4045 		    ("pmap_qremove on promoted va %#lx", va));
4046 
4047 		pmap_kremove(va);
4048 		va += PAGE_SIZE;
4049 	}
4050 	pmap_invalidate_range(kernel_pmap, sva, va);
4051 }
4052 
4053 /***************************************************
4054  * Page table page management routines.....
4055  ***************************************************/
4056 /*
4057  * Schedule the specified unused page table page to be freed.  Specifically,
4058  * add the page to the specified list of pages that will be released to the
4059  * physical memory manager after the TLB has been updated.
4060  */
4061 static __inline void
pmap_add_delayed_free_list(vm_page_t m,struct spglist * free,bool set_PG_ZERO)4062 pmap_add_delayed_free_list(vm_page_t m, struct spglist *free, bool set_PG_ZERO)
4063 {
4064 
4065 	if (set_PG_ZERO)
4066 		m->flags |= PG_ZERO;
4067 	else
4068 		m->flags &= ~PG_ZERO;
4069 	SLIST_INSERT_HEAD(free, m, plinks.s.ss);
4070 }
4071 
4072 /*
4073  * Inserts the specified page table page into the specified pmap's collection
4074  * of idle page table pages.  Each of a pmap's page table pages is responsible
4075  * for mapping a distinct range of virtual addresses.  The pmap's collection is
4076  * ordered by this virtual address range.
4077  *
4078  * If "promoted" is false, then the page table page "mpte" must be zero filled;
4079  * "mpte"'s valid field will be set to 0.
4080  *
4081  * If "promoted" is true and "allpte_PG_A_set" is false, then "mpte" must
4082  * contain valid mappings with identical attributes except for PG_A; "mpte"'s
4083  * valid field will be set to 1.
4084  *
4085  * If "promoted" and "allpte_PG_A_set" are both true, then "mpte" must contain
4086  * valid mappings with identical attributes including PG_A; "mpte"'s valid
4087  * field will be set to VM_PAGE_BITS_ALL.
4088  */
4089 static __inline int
pmap_insert_pt_page(pmap_t pmap,vm_page_t mpte,bool promoted,bool allpte_PG_A_set)4090 pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted,
4091     bool allpte_PG_A_set)
4092 {
4093 
4094 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4095 	KASSERT(promoted || !allpte_PG_A_set,
4096 	    ("a zero-filled PTP can't have PG_A set in every PTE"));
4097 	mpte->valid = promoted ? (allpte_PG_A_set ? VM_PAGE_BITS_ALL : 1) : 0;
4098 	return (vm_radix_insert(&pmap->pm_root, mpte));
4099 }
4100 
4101 /*
4102  * Removes the page table page mapping the specified virtual address from the
4103  * specified pmap's collection of idle page table pages, and returns it.
4104  * Otherwise, returns NULL if there is no page table page corresponding to the
4105  * specified virtual address.
4106  */
4107 static __inline vm_page_t
pmap_remove_pt_page(pmap_t pmap,vm_offset_t va)4108 pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
4109 {
4110 
4111 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4112 	return (vm_radix_remove(&pmap->pm_root, pmap_pde_pindex(va)));
4113 }
4114 
4115 /*
4116  * Decrements a page table page's reference count, which is used to record the
4117  * number of valid page table entries within the page.  If the reference count
4118  * drops to zero, then the page table page is unmapped.  Returns true if the
4119  * page table page was unmapped and false otherwise.
4120  */
4121 static inline bool
pmap_unwire_ptp(pmap_t pmap,vm_offset_t va,vm_page_t m,struct spglist * free)4122 pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
4123 {
4124 
4125 	--m->ref_count;
4126 	if (m->ref_count == 0) {
4127 		_pmap_unwire_ptp(pmap, va, m, free);
4128 		return (true);
4129 	} else
4130 		return (false);
4131 }
4132 
4133 static void
_pmap_unwire_ptp(pmap_t pmap,vm_offset_t va,vm_page_t m,struct spglist * free)4134 _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
4135 {
4136 	pml5_entry_t *pml5;
4137 	pml4_entry_t *pml4;
4138 	pdp_entry_t *pdp;
4139 	pd_entry_t *pd;
4140 	vm_page_t pdpg, pdppg, pml4pg;
4141 
4142 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4143 
4144 	/*
4145 	 * unmap the page table page
4146 	 */
4147 	if (m->pindex >= NUPDE + NUPDPE + NUPML4E) {
4148 		/* PML4 page */
4149 		MPASS(pmap_is_la57(pmap));
4150 		pml5 = pmap_pml5e(pmap, va);
4151 		*pml5 = 0;
4152 		if (pmap->pm_pmltopu != NULL && va <= VM_MAXUSER_ADDRESS) {
4153 			pml5 = pmap_pml5e_u(pmap, va);
4154 			*pml5 = 0;
4155 		}
4156 	} else if (m->pindex >= NUPDE + NUPDPE) {
4157 		/* PDP page */
4158 		pml4 = pmap_pml4e(pmap, va);
4159 		*pml4 = 0;
4160 		if (!pmap_is_la57(pmap) && pmap->pm_pmltopu != NULL &&
4161 		    va <= VM_MAXUSER_ADDRESS) {
4162 			pml4 = pmap_pml4e_u(pmap, va);
4163 			*pml4 = 0;
4164 		}
4165 	} else if (m->pindex >= NUPDE) {
4166 		/* PD page */
4167 		pdp = pmap_pdpe(pmap, va);
4168 		*pdp = 0;
4169 	} else {
4170 		/* PTE page */
4171 		pd = pmap_pde(pmap, va);
4172 		*pd = 0;
4173 	}
4174 	if (m->pindex < NUPDE) {
4175 		/* We just released a PT, unhold the matching PD */
4176 		pdpg = PHYS_TO_VM_PAGE(*pmap_pdpe(pmap, va) & PG_FRAME);
4177 		pmap_unwire_ptp(pmap, va, pdpg, free);
4178 	} else if (m->pindex < NUPDE + NUPDPE) {
4179 		/* We just released a PD, unhold the matching PDP */
4180 		pdppg = PHYS_TO_VM_PAGE(*pmap_pml4e(pmap, va) & PG_FRAME);
4181 		pmap_unwire_ptp(pmap, va, pdppg, free);
4182 	} else if (m->pindex < NUPDE + NUPDPE + NUPML4E && pmap_is_la57(pmap)) {
4183 		/* We just released a PDP, unhold the matching PML4 */
4184 		pml4pg = PHYS_TO_VM_PAGE(*pmap_pml5e(pmap, va) & PG_FRAME);
4185 		pmap_unwire_ptp(pmap, va, pml4pg, free);
4186 	}
4187 
4188 	pmap_pt_page_count_adj(pmap, -1);
4189 
4190 	/*
4191 	 * Put page on a list so that it is released after
4192 	 * *ALL* TLB shootdown is done
4193 	 */
4194 	pmap_add_delayed_free_list(m, free, true);
4195 }
4196 
4197 /*
4198  * After removing a page table entry, this routine is used to
4199  * conditionally free the page, and manage the reference count.
4200  */
4201 static int
pmap_unuse_pt(pmap_t pmap,vm_offset_t va,pd_entry_t ptepde,struct spglist * free)4202 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde,
4203     struct spglist *free)
4204 {
4205 	vm_page_t mpte;
4206 
4207 	if (va >= VM_MAXUSER_ADDRESS)
4208 		return (0);
4209 	KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0"));
4210 	mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME);
4211 	return (pmap_unwire_ptp(pmap, va, mpte, free));
4212 }
4213 
4214 /*
4215  * Release a page table page reference after a failed attempt to create a
4216  * mapping.
4217  */
4218 static void
pmap_abort_ptp(pmap_t pmap,vm_offset_t va,vm_page_t mpte)4219 pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte)
4220 {
4221 	struct spglist free;
4222 
4223 	SLIST_INIT(&free);
4224 	if (pmap_unwire_ptp(pmap, va, mpte, &free)) {
4225 		/*
4226 		 * Although "va" was never mapped, paging-structure caches
4227 		 * could nonetheless have entries that refer to the freed
4228 		 * page table pages.  Invalidate those entries.
4229 		 */
4230 		pmap_invalidate_page(pmap, va);
4231 		vm_page_free_pages_toq(&free, true);
4232 	}
4233 }
4234 
4235 static void
pmap_pinit_pcids(pmap_t pmap,uint32_t pcid,int gen)4236 pmap_pinit_pcids(pmap_t pmap, uint32_t pcid, int gen)
4237 {
4238 	struct pmap_pcid *pcidp;
4239 	int i;
4240 
4241 	CPU_FOREACH(i) {
4242 		pcidp = zpcpu_get_cpu(pmap->pm_pcidp, i);
4243 		pcidp->pm_pcid = pcid;
4244 		pcidp->pm_gen = gen;
4245 	}
4246 }
4247 
4248 void
pmap_pinit0(pmap_t pmap)4249 pmap_pinit0(pmap_t pmap)
4250 {
4251 	struct proc *p;
4252 	struct thread *td;
4253 
4254 	PMAP_LOCK_INIT(pmap);
4255 	pmap->pm_pmltop = kernel_pmap->pm_pmltop;
4256 	pmap->pm_pmltopu = NULL;
4257 	pmap->pm_cr3 = kernel_pmap->pm_cr3;
4258 	/* hack to keep pmap_pti_pcid_invalidate() alive */
4259 	pmap->pm_ucr3 = PMAP_NO_CR3;
4260 	vm_radix_init(&pmap->pm_root);
4261 	CPU_ZERO(&pmap->pm_active);
4262 	TAILQ_INIT(&pmap->pm_pvchunk);
4263 	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
4264 	pmap->pm_flags = pmap_flags;
4265 	pmap->pm_pcidp = uma_zalloc_pcpu(pcpu_zone_8, M_WAITOK);
4266 	pmap_pinit_pcids(pmap, PMAP_PCID_KERN + 1, 1);
4267 	pmap_activate_boot(pmap);
4268 	td = curthread;
4269 	if (pti) {
4270 		p = td->td_proc;
4271 		PROC_LOCK(p);
4272 		p->p_md.md_flags |= P_MD_KPTI;
4273 		PROC_UNLOCK(p);
4274 	}
4275 	pmap_thread_init_invl_gen(td);
4276 
4277 	if ((cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0) {
4278 		pmap_pkru_ranges_zone = uma_zcreate("pkru ranges",
4279 		    sizeof(struct pmap_pkru_range), NULL, NULL, NULL, NULL,
4280 		    UMA_ALIGN_PTR, 0);
4281 	}
4282 }
4283 
4284 void
pmap_pinit_pml4(vm_page_t pml4pg)4285 pmap_pinit_pml4(vm_page_t pml4pg)
4286 {
4287 	pml4_entry_t *pm_pml4;
4288 	int i;
4289 
4290 	pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml4pg));
4291 
4292 	/* Wire in kernel global address entries. */
4293 	for (i = 0; i < NKPML4E; i++) {
4294 		pm_pml4[KPML4BASE + i] = (KPDPphys + ptoa(i)) | X86_PG_RW |
4295 		    X86_PG_V;
4296 	}
4297 #ifdef KASAN
4298 	for (i = 0; i < NKASANPML4E; i++) {
4299 		pm_pml4[KASANPML4I + i] = (KASANPDPphys + ptoa(i)) | X86_PG_RW |
4300 		    X86_PG_V | pg_nx;
4301 	}
4302 #endif
4303 #ifdef KMSAN
4304 	for (i = 0; i < NKMSANSHADPML4E; i++) {
4305 		pm_pml4[KMSANSHADPML4I + i] = (KMSANSHADPDPphys + ptoa(i)) |
4306 		    X86_PG_RW | X86_PG_V | pg_nx;
4307 	}
4308 	for (i = 0; i < NKMSANORIGPML4E; i++) {
4309 		pm_pml4[KMSANORIGPML4I + i] = (KMSANORIGPDPphys + ptoa(i)) |
4310 		    X86_PG_RW | X86_PG_V | pg_nx;
4311 	}
4312 #endif
4313 	for (i = 0; i < ndmpdpphys; i++) {
4314 		pm_pml4[DMPML4I + i] = (DMPDPphys + ptoa(i)) | X86_PG_RW |
4315 		    X86_PG_V;
4316 	}
4317 
4318 	/* install self-referential address mapping entry(s) */
4319 	pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(pml4pg) | X86_PG_V | X86_PG_RW |
4320 	    X86_PG_A | X86_PG_M;
4321 
4322 	/* install large map entries if configured */
4323 	for (i = 0; i < lm_ents; i++)
4324 		pm_pml4[LMSPML4I + i] = kernel_pmap->pm_pmltop[LMSPML4I + i];
4325 }
4326 
4327 void
pmap_pinit_pml5(vm_page_t pml5pg)4328 pmap_pinit_pml5(vm_page_t pml5pg)
4329 {
4330 	pml5_entry_t *pm_pml5;
4331 
4332 	pm_pml5 = (pml5_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml5pg));
4333 
4334 	/*
4335 	 * Add pml5 entry at top of KVA pointing to existing pml4 table,
4336 	 * entering all existing kernel mappings into level 5 table.
4337 	 */
4338 	pm_pml5[pmap_pml5e_index(UPT_MAX_ADDRESS)] = KPML4phys | X86_PG_V |
4339 	    X86_PG_RW | X86_PG_A | X86_PG_M;
4340 
4341 	/*
4342 	 * Install self-referential address mapping entry.
4343 	 */
4344 	pm_pml5[PML5PML5I] = VM_PAGE_TO_PHYS(pml5pg) |
4345 	    X86_PG_RW | X86_PG_V | X86_PG_M | X86_PG_A;
4346 }
4347 
4348 static void
pmap_pinit_pml4_pti(vm_page_t pml4pgu)4349 pmap_pinit_pml4_pti(vm_page_t pml4pgu)
4350 {
4351 	pml4_entry_t *pm_pml4u;
4352 	int i;
4353 
4354 	pm_pml4u = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml4pgu));
4355 	for (i = 0; i < NPML4EPG; i++)
4356 		pm_pml4u[i] = pti_pml4[i];
4357 }
4358 
4359 static void
pmap_pinit_pml5_pti(vm_page_t pml5pgu)4360 pmap_pinit_pml5_pti(vm_page_t pml5pgu)
4361 {
4362 	pml5_entry_t *pm_pml5u;
4363 
4364 	pm_pml5u = (pml5_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml5pgu));
4365 	pagezero(pm_pml5u);
4366 
4367 	/*
4368 	 * Add pml5 entry at top of KVA pointing to existing pml4 pti
4369 	 * table, entering all kernel mappings needed for usermode
4370 	 * into level 5 table.
4371 	 */
4372 	pm_pml5u[pmap_pml5e_index(UPT_MAX_ADDRESS)] =
4373 	    pmap_kextract((vm_offset_t)pti_pml4) |
4374 	    X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M;
4375 }
4376 
4377 /* Allocate a page table page and do related bookkeeping */
4378 static vm_page_t
pmap_alloc_pt_page(pmap_t pmap,vm_pindex_t pindex,int flags)4379 pmap_alloc_pt_page(pmap_t pmap, vm_pindex_t pindex, int flags)
4380 {
4381 	vm_page_t m;
4382 
4383 	m = vm_page_alloc_noobj(flags);
4384 	if (__predict_false(m == NULL))
4385 		return (NULL);
4386 	m->pindex = pindex;
4387 	pmap_pt_page_count_adj(pmap, 1);
4388 	return (m);
4389 }
4390 
4391 static void
pmap_free_pt_page(pmap_t pmap,vm_page_t m,bool zerofilled)4392 pmap_free_pt_page(pmap_t pmap, vm_page_t m, bool zerofilled)
4393 {
4394 	/*
4395 	 * This function assumes the page will need to be unwired,
4396 	 * even though the counterpart allocation in pmap_alloc_pt_page()
4397 	 * doesn't enforce VM_ALLOC_WIRED.  However, all current uses
4398 	 * of pmap_free_pt_page() require unwiring.  The case in which
4399 	 * a PT page doesn't require unwiring because its ref_count has
4400 	 * naturally reached 0 is handled through _pmap_unwire_ptp().
4401 	 */
4402 	vm_page_unwire_noq(m);
4403 	if (zerofilled)
4404 		vm_page_free_zero(m);
4405 	else
4406 		vm_page_free(m);
4407 
4408 	pmap_pt_page_count_adj(pmap, -1);
4409 }
4410 
4411 _Static_assert(sizeof(struct pmap_pcid) == 8, "Fix pcpu zone for pm_pcidp");
4412 
4413 /*
4414  * Initialize a preallocated and zeroed pmap structure,
4415  * such as one in a vmspace structure.
4416  */
4417 int
pmap_pinit_type(pmap_t pmap,enum pmap_type pm_type,int flags)4418 pmap_pinit_type(pmap_t pmap, enum pmap_type pm_type, int flags)
4419 {
4420 	vm_page_t pmltop_pg, pmltop_pgu;
4421 	vm_paddr_t pmltop_phys;
4422 
4423 	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
4424 
4425 	/*
4426 	 * Allocate the page directory page.  Pass NULL instead of a
4427 	 * pointer to the pmap here to avoid calling
4428 	 * pmap_resident_count_adj() through pmap_pt_page_count_adj(),
4429 	 * since that requires pmap lock.  Instead do the accounting
4430 	 * manually.
4431 	 *
4432 	 * Note that final call to pmap_remove() optimization that
4433 	 * checks for zero resident_count is basically disabled by
4434 	 * accounting for top-level page.  But the optimization was
4435 	 * not effective since we started using non-managed mapping of
4436 	 * the shared page.
4437 	 */
4438 	pmltop_pg = pmap_alloc_pt_page(NULL, 0, VM_ALLOC_WIRED | VM_ALLOC_ZERO |
4439 	    VM_ALLOC_WAITOK);
4440 	pmap_pt_page_count_pinit(pmap, 1);
4441 
4442 	pmltop_phys = VM_PAGE_TO_PHYS(pmltop_pg);
4443 	pmap->pm_pmltop = (pml5_entry_t *)PHYS_TO_DMAP(pmltop_phys);
4444 
4445 	if (pmap_pcid_enabled) {
4446 		if (pmap->pm_pcidp == NULL)
4447 			pmap->pm_pcidp = uma_zalloc_pcpu(pcpu_zone_8,
4448 			    M_WAITOK);
4449 		pmap_pinit_pcids(pmap, PMAP_PCID_NONE, 0);
4450 	}
4451 	pmap->pm_cr3 = PMAP_NO_CR3;	/* initialize to an invalid value */
4452 	pmap->pm_ucr3 = PMAP_NO_CR3;
4453 	pmap->pm_pmltopu = NULL;
4454 
4455 	pmap->pm_type = pm_type;
4456 
4457 	/*
4458 	 * Do not install the host kernel mappings in the nested page
4459 	 * tables. These mappings are meaningless in the guest physical
4460 	 * address space.
4461 	 * Install minimal kernel mappings in PTI case.
4462 	 */
4463 	switch (pm_type) {
4464 	case PT_X86:
4465 		pmap->pm_cr3 = pmltop_phys;
4466 		if (pmap_is_la57(pmap))
4467 			pmap_pinit_pml5(pmltop_pg);
4468 		else
4469 			pmap_pinit_pml4(pmltop_pg);
4470 		if ((curproc->p_md.md_flags & P_MD_KPTI) != 0) {
4471 			/*
4472 			 * As with pmltop_pg, pass NULL instead of a
4473 			 * pointer to the pmap to ensure that the PTI
4474 			 * page counted explicitly.
4475 			 */
4476 			pmltop_pgu = pmap_alloc_pt_page(NULL, 0,
4477 			    VM_ALLOC_WIRED | VM_ALLOC_WAITOK);
4478 			pmap_pt_page_count_pinit(pmap, 1);
4479 			pmap->pm_pmltopu = (pml4_entry_t *)PHYS_TO_DMAP(
4480 			    VM_PAGE_TO_PHYS(pmltop_pgu));
4481 			if (pmap_is_la57(pmap))
4482 				pmap_pinit_pml5_pti(pmltop_pgu);
4483 			else
4484 				pmap_pinit_pml4_pti(pmltop_pgu);
4485 			pmap->pm_ucr3 = VM_PAGE_TO_PHYS(pmltop_pgu);
4486 		}
4487 		if ((cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0) {
4488 			rangeset_init(&pmap->pm_pkru, pkru_dup_range,
4489 			    pkru_free_range, pmap, M_NOWAIT);
4490 		}
4491 		break;
4492 	case PT_EPT:
4493 	case PT_RVI:
4494 		pmap->pm_eptsmr = smr_create("pmap", 0, 0);
4495 		break;
4496 	}
4497 
4498 	vm_radix_init(&pmap->pm_root);
4499 	CPU_ZERO(&pmap->pm_active);
4500 	TAILQ_INIT(&pmap->pm_pvchunk);
4501 	pmap->pm_flags = flags;
4502 	pmap->pm_eptgen = 0;
4503 
4504 	return (1);
4505 }
4506 
4507 int
pmap_pinit(pmap_t pmap)4508 pmap_pinit(pmap_t pmap)
4509 {
4510 
4511 	return (pmap_pinit_type(pmap, PT_X86, pmap_flags));
4512 }
4513 
4514 static void
pmap_allocpte_free_unref(pmap_t pmap,vm_offset_t va,pt_entry_t * pte)4515 pmap_allocpte_free_unref(pmap_t pmap, vm_offset_t va, pt_entry_t *pte)
4516 {
4517 	vm_page_t mpg;
4518 	struct spglist free;
4519 
4520 	mpg = PHYS_TO_VM_PAGE(*pte & PG_FRAME);
4521 	if (mpg->ref_count != 0)
4522 		return;
4523 	SLIST_INIT(&free);
4524 	_pmap_unwire_ptp(pmap, va, mpg, &free);
4525 	pmap_invalidate_page(pmap, va);
4526 	vm_page_free_pages_toq(&free, true);
4527 }
4528 
4529 static pml4_entry_t *
pmap_allocpte_getpml4(pmap_t pmap,struct rwlock ** lockp,vm_offset_t va,bool addref)4530 pmap_allocpte_getpml4(pmap_t pmap, struct rwlock **lockp, vm_offset_t va,
4531     bool addref)
4532 {
4533 	vm_pindex_t pml5index;
4534 	pml5_entry_t *pml5;
4535 	pml4_entry_t *pml4;
4536 	vm_page_t pml4pg;
4537 	pt_entry_t PG_V;
4538 	bool allocated;
4539 
4540 	if (!pmap_is_la57(pmap))
4541 		return (&pmap->pm_pmltop[pmap_pml4e_index(va)]);
4542 
4543 	PG_V = pmap_valid_bit(pmap);
4544 	pml5index = pmap_pml5e_index(va);
4545 	pml5 = &pmap->pm_pmltop[pml5index];
4546 	if ((*pml5 & PG_V) == 0) {
4547 		if (pmap_allocpte_nosleep(pmap, pmap_pml5e_pindex(va), lockp,
4548 		    va) == NULL)
4549 			return (NULL);
4550 		allocated = true;
4551 	} else {
4552 		allocated = false;
4553 	}
4554 	pml4 = (pml4_entry_t *)PHYS_TO_DMAP(*pml5 & PG_FRAME);
4555 	pml4 = &pml4[pmap_pml4e_index(va)];
4556 	if ((*pml4 & PG_V) == 0) {
4557 		pml4pg = PHYS_TO_VM_PAGE(*pml5 & PG_FRAME);
4558 		if (allocated && !addref)
4559 			pml4pg->ref_count--;
4560 		else if (!allocated && addref)
4561 			pml4pg->ref_count++;
4562 	}
4563 	return (pml4);
4564 }
4565 
4566 static pdp_entry_t *
pmap_allocpte_getpdp(pmap_t pmap,struct rwlock ** lockp,vm_offset_t va,bool addref)4567 pmap_allocpte_getpdp(pmap_t pmap, struct rwlock **lockp, vm_offset_t va,
4568     bool addref)
4569 {
4570 	vm_page_t pdppg;
4571 	pml4_entry_t *pml4;
4572 	pdp_entry_t *pdp;
4573 	pt_entry_t PG_V;
4574 	bool allocated;
4575 
4576 	PG_V = pmap_valid_bit(pmap);
4577 
4578 	pml4 = pmap_allocpte_getpml4(pmap, lockp, va, false);
4579 	if (pml4 == NULL)
4580 		return (NULL);
4581 
4582 	if ((*pml4 & PG_V) == 0) {
4583 		/* Have to allocate a new pdp, recurse */
4584 		if (pmap_allocpte_nosleep(pmap, pmap_pml4e_pindex(va), lockp,
4585 		    va) == NULL) {
4586 			if (pmap_is_la57(pmap))
4587 				pmap_allocpte_free_unref(pmap, va,
4588 				    pmap_pml5e(pmap, va));
4589 			return (NULL);
4590 		}
4591 		allocated = true;
4592 	} else {
4593 		allocated = false;
4594 	}
4595 	pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
4596 	pdp = &pdp[pmap_pdpe_index(va)];
4597 	if ((*pdp & PG_V) == 0) {
4598 		pdppg = PHYS_TO_VM_PAGE(*pml4 & PG_FRAME);
4599 		if (allocated && !addref)
4600 			pdppg->ref_count--;
4601 		else if (!allocated && addref)
4602 			pdppg->ref_count++;
4603 	}
4604 	return (pdp);
4605 }
4606 
4607 /*
4608  * The ptepindexes, i.e. page indices, of the page table pages encountered
4609  * while translating virtual address va are defined as follows:
4610  * - for the page table page (last level),
4611  *      ptepindex = pmap_pde_pindex(va) = va >> PDRSHIFT,
4612  *   in other words, it is just the index of the PDE that maps the page
4613  *   table page.
4614  * - for the page directory page,
4615  *      ptepindex = NUPDE (number of userland PD entries) +
4616  *          (pmap_pde_index(va) >> NPDEPGSHIFT)
4617  *   i.e. index of PDPE is put after the last index of PDE,
4618  * - for the page directory pointer page,
4619  *      ptepindex = NUPDE + NUPDPE + (pmap_pde_index(va) >> (NPDEPGSHIFT +
4620  *          NPML4EPGSHIFT),
4621  *   i.e. index of pml4e is put after the last index of PDPE,
4622  * - for the PML4 page (if LA57 mode is enabled),
4623  *      ptepindex = NUPDE + NUPDPE + NUPML4E + (pmap_pde_index(va) >>
4624  *          (NPDEPGSHIFT + NPML4EPGSHIFT + NPML5EPGSHIFT),
4625  *   i.e. index of pml5e is put after the last index of PML4E.
4626  *
4627  * Define an order on the paging entries, where all entries of the
4628  * same height are put together, then heights are put from deepest to
4629  * root.  Then ptexpindex is the sequential number of the
4630  * corresponding paging entry in this order.
4631  *
4632  * The values of NUPDE, NUPDPE, and NUPML4E are determined by the size of
4633  * LA57 paging structures even in LA48 paging mode. Moreover, the
4634  * ptepindexes are calculated as if the paging structures were 5-level
4635  * regardless of the actual mode of operation.
4636  *
4637  * The root page at PML4/PML5 does not participate in this indexing scheme,
4638  * since it is statically allocated by pmap_pinit() and not by pmap_allocpte().
4639  */
4640 static vm_page_t
pmap_allocpte_nosleep(pmap_t pmap,vm_pindex_t ptepindex,struct rwlock ** lockp,vm_offset_t va)4641 pmap_allocpte_nosleep(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp,
4642     vm_offset_t va)
4643 {
4644 	vm_pindex_t pml5index, pml4index;
4645 	pml5_entry_t *pml5, *pml5u;
4646 	pml4_entry_t *pml4, *pml4u;
4647 	pdp_entry_t *pdp;
4648 	pd_entry_t *pd;
4649 	vm_page_t m, pdpg;
4650 	pt_entry_t PG_A, PG_M, PG_RW, PG_V;
4651 
4652 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4653 
4654 	PG_A = pmap_accessed_bit(pmap);
4655 	PG_M = pmap_modified_bit(pmap);
4656 	PG_V = pmap_valid_bit(pmap);
4657 	PG_RW = pmap_rw_bit(pmap);
4658 
4659 	/*
4660 	 * Allocate a page table page.
4661 	 */
4662 	m = pmap_alloc_pt_page(pmap, ptepindex,
4663 	    VM_ALLOC_WIRED | VM_ALLOC_ZERO);
4664 	if (m == NULL)
4665 		return (NULL);
4666 
4667 	/*
4668 	 * Map the pagetable page into the process address space, if
4669 	 * it isn't already there.
4670 	 */
4671 	if (ptepindex >= NUPDE + NUPDPE + NUPML4E) {
4672 		MPASS(pmap_is_la57(pmap));
4673 
4674 		pml5index = pmap_pml5e_index(va);
4675 		pml5 = &pmap->pm_pmltop[pml5index];
4676 		KASSERT((*pml5 & PG_V) == 0,
4677 		    ("pmap %p va %#lx pml5 %#lx", pmap, va, *pml5));
4678 		*pml5 = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
4679 
4680 		if (pmap->pm_pmltopu != NULL && pml5index < NUPML5E) {
4681 			MPASS(pmap->pm_ucr3 != PMAP_NO_CR3);
4682 			*pml5 |= pg_nx;
4683 
4684 			pml5u = &pmap->pm_pmltopu[pml5index];
4685 			*pml5u = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V |
4686 			    PG_A | PG_M;
4687 		}
4688 	} else if (ptepindex >= NUPDE + NUPDPE) {
4689 		pml4index = pmap_pml4e_index(va);
4690 		/* Wire up a new PDPE page */
4691 		pml4 = pmap_allocpte_getpml4(pmap, lockp, va, true);
4692 		if (pml4 == NULL) {
4693 			pmap_free_pt_page(pmap, m, true);
4694 			return (NULL);
4695 		}
4696 		KASSERT((*pml4 & PG_V) == 0,
4697 		    ("pmap %p va %#lx pml4 %#lx", pmap, va, *pml4));
4698 		*pml4 = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
4699 
4700 		if (!pmap_is_la57(pmap) && pmap->pm_pmltopu != NULL &&
4701 		    pml4index < NUPML4E) {
4702 			MPASS(pmap->pm_ucr3 != PMAP_NO_CR3);
4703 
4704 			/*
4705 			 * PTI: Make all user-space mappings in the
4706 			 * kernel-mode page table no-execute so that
4707 			 * we detect any programming errors that leave
4708 			 * the kernel-mode page table active on return
4709 			 * to user space.
4710 			 */
4711 			*pml4 |= pg_nx;
4712 
4713 			pml4u = &pmap->pm_pmltopu[pml4index];
4714 			*pml4u = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V |
4715 			    PG_A | PG_M;
4716 		}
4717 	} else if (ptepindex >= NUPDE) {
4718 		/* Wire up a new PDE page */
4719 		pdp = pmap_allocpte_getpdp(pmap, lockp, va, true);
4720 		if (pdp == NULL) {
4721 			pmap_free_pt_page(pmap, m, true);
4722 			return (NULL);
4723 		}
4724 		KASSERT((*pdp & PG_V) == 0,
4725 		    ("pmap %p va %#lx pdp %#lx", pmap, va, *pdp));
4726 		*pdp = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
4727 	} else {
4728 		/* Wire up a new PTE page */
4729 		pdp = pmap_allocpte_getpdp(pmap, lockp, va, false);
4730 		if (pdp == NULL) {
4731 			pmap_free_pt_page(pmap, m, true);
4732 			return (NULL);
4733 		}
4734 		if ((*pdp & PG_V) == 0) {
4735 			/* Have to allocate a new pd, recurse */
4736 			if (pmap_allocpte_nosleep(pmap, pmap_pdpe_pindex(va),
4737 			    lockp, va) == NULL) {
4738 				pmap_allocpte_free_unref(pmap, va,
4739 				    pmap_pml4e(pmap, va));
4740 				pmap_free_pt_page(pmap, m, true);
4741 				return (NULL);
4742 			}
4743 		} else {
4744 			/* Add reference to the pd page */
4745 			pdpg = PHYS_TO_VM_PAGE(*pdp & PG_FRAME);
4746 			pdpg->ref_count++;
4747 		}
4748 		pd = (pd_entry_t *)PHYS_TO_DMAP(*pdp & PG_FRAME);
4749 
4750 		/* Now we know where the page directory page is */
4751 		pd = &pd[pmap_pde_index(va)];
4752 		KASSERT((*pd & PG_V) == 0,
4753 		    ("pmap %p va %#lx pd %#lx", pmap, va, *pd));
4754 		*pd = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
4755 	}
4756 
4757 	return (m);
4758 }
4759 
4760 /*
4761  * This routine is called if the desired page table page does not exist.
4762  *
4763  * If page table page allocation fails, this routine may sleep before
4764  * returning NULL.  It sleeps only if a lock pointer was given.  Sleep
4765  * occurs right before returning to the caller. This way, we never
4766  * drop pmap lock to sleep while a page table page has ref_count == 0,
4767  * which prevents the page from being freed under us.
4768  */
4769 static vm_page_t
pmap_allocpte_alloc(pmap_t pmap,vm_pindex_t ptepindex,struct rwlock ** lockp,vm_offset_t va)4770 pmap_allocpte_alloc(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp,
4771     vm_offset_t va)
4772 {
4773 	vm_page_t m;
4774 
4775 	m = pmap_allocpte_nosleep(pmap, ptepindex, lockp, va);
4776 	if (m == NULL && lockp != NULL) {
4777 		RELEASE_PV_LIST_LOCK(lockp);
4778 		PMAP_UNLOCK(pmap);
4779 		PMAP_ASSERT_NOT_IN_DI();
4780 		vm_wait(NULL);
4781 		PMAP_LOCK(pmap);
4782 	}
4783 	return (m);
4784 }
4785 
4786 static pd_entry_t *
pmap_alloc_pde(pmap_t pmap,vm_offset_t va,vm_page_t * pdpgp,struct rwlock ** lockp)4787 pmap_alloc_pde(pmap_t pmap, vm_offset_t va, vm_page_t *pdpgp,
4788     struct rwlock **lockp)
4789 {
4790 	pdp_entry_t *pdpe, PG_V;
4791 	pd_entry_t *pde;
4792 	vm_page_t pdpg;
4793 	vm_pindex_t pdpindex;
4794 
4795 	PG_V = pmap_valid_bit(pmap);
4796 
4797 retry:
4798 	pdpe = pmap_pdpe(pmap, va);
4799 	if (pdpe != NULL && (*pdpe & PG_V) != 0) {
4800 		pde = pmap_pdpe_to_pde(pdpe, va);
4801 		if (va < VM_MAXUSER_ADDRESS) {
4802 			/* Add a reference to the pd page. */
4803 			pdpg = PHYS_TO_VM_PAGE(*pdpe & PG_FRAME);
4804 			pdpg->ref_count++;
4805 		} else
4806 			pdpg = NULL;
4807 	} else if (va < VM_MAXUSER_ADDRESS) {
4808 		/* Allocate a pd page. */
4809 		pdpindex = pmap_pde_pindex(va) >> NPDPEPGSHIFT;
4810 		pdpg = pmap_allocpte_alloc(pmap, NUPDE + pdpindex, lockp, va);
4811 		if (pdpg == NULL) {
4812 			if (lockp != NULL)
4813 				goto retry;
4814 			else
4815 				return (NULL);
4816 		}
4817 		pde = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg));
4818 		pde = &pde[pmap_pde_index(va)];
4819 	} else
4820 		panic("pmap_alloc_pde: missing page table page for va %#lx",
4821 		    va);
4822 	*pdpgp = pdpg;
4823 	return (pde);
4824 }
4825 
4826 static vm_page_t
pmap_allocpte(pmap_t pmap,vm_offset_t va,struct rwlock ** lockp)4827 pmap_allocpte(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
4828 {
4829 	vm_pindex_t ptepindex;
4830 	pd_entry_t *pd, PG_V;
4831 	vm_page_t m;
4832 
4833 	PG_V = pmap_valid_bit(pmap);
4834 
4835 	/*
4836 	 * Calculate pagetable page index
4837 	 */
4838 	ptepindex = pmap_pde_pindex(va);
4839 retry:
4840 	/*
4841 	 * Get the page directory entry
4842 	 */
4843 	pd = pmap_pde(pmap, va);
4844 
4845 	/*
4846 	 * This supports switching from a 2MB page to a
4847 	 * normal 4K page.
4848 	 */
4849 	if (pd != NULL && (*pd & (PG_PS | PG_V)) == (PG_PS | PG_V)) {
4850 		if (!pmap_demote_pde_locked(pmap, pd, va, lockp)) {
4851 			/*
4852 			 * Invalidation of the 2MB page mapping may have caused
4853 			 * the deallocation of the underlying PD page.
4854 			 */
4855 			pd = NULL;
4856 		}
4857 	}
4858 
4859 	/*
4860 	 * If the page table page is mapped, we just increment the
4861 	 * hold count, and activate it.
4862 	 */
4863 	if (pd != NULL && (*pd & PG_V) != 0) {
4864 		m = PHYS_TO_VM_PAGE(*pd & PG_FRAME);
4865 		m->ref_count++;
4866 	} else {
4867 		/*
4868 		 * Here if the pte page isn't mapped, or if it has been
4869 		 * deallocated.
4870 		 */
4871 		m = pmap_allocpte_alloc(pmap, ptepindex, lockp, va);
4872 		if (m == NULL && lockp != NULL)
4873 			goto retry;
4874 	}
4875 	return (m);
4876 }
4877 
4878 /***************************************************
4879  * Pmap allocation/deallocation routines.
4880  ***************************************************/
4881 
4882 /*
4883  * Release any resources held by the given physical map.
4884  * Called when a pmap initialized by pmap_pinit is being released.
4885  * Should only be called if the map contains no valid mappings.
4886  */
4887 void
pmap_release(pmap_t pmap)4888 pmap_release(pmap_t pmap)
4889 {
4890 	vm_page_t m;
4891 	int i;
4892 
4893 	KASSERT(vm_radix_is_empty(&pmap->pm_root),
4894 	    ("pmap_release: pmap %p has reserved page table page(s)",
4895 	    pmap));
4896 	KASSERT(CPU_EMPTY(&pmap->pm_active),
4897 	    ("releasing active pmap %p", pmap));
4898 
4899 	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->pm_pmltop));
4900 
4901 	if (pmap_is_la57(pmap)) {
4902 		pmap->pm_pmltop[pmap_pml5e_index(UPT_MAX_ADDRESS)] = 0;
4903 		pmap->pm_pmltop[PML5PML5I] = 0;
4904 	} else {
4905 		for (i = 0; i < NKPML4E; i++)	/* KVA */
4906 			pmap->pm_pmltop[KPML4BASE + i] = 0;
4907 #ifdef KASAN
4908 		for (i = 0; i < NKASANPML4E; i++) /* KASAN shadow map */
4909 			pmap->pm_pmltop[KASANPML4I + i] = 0;
4910 #endif
4911 #ifdef KMSAN
4912 		for (i = 0; i < NKMSANSHADPML4E; i++) /* KMSAN shadow map */
4913 			pmap->pm_pmltop[KMSANSHADPML4I + i] = 0;
4914 		for (i = 0; i < NKMSANORIGPML4E; i++) /* KMSAN shadow map */
4915 			pmap->pm_pmltop[KMSANORIGPML4I + i] = 0;
4916 #endif
4917 		for (i = 0; i < ndmpdpphys; i++)/* Direct Map */
4918 			pmap->pm_pmltop[DMPML4I + i] = 0;
4919 		pmap->pm_pmltop[PML4PML4I] = 0;	/* Recursive Mapping */
4920 		for (i = 0; i < lm_ents; i++)	/* Large Map */
4921 			pmap->pm_pmltop[LMSPML4I + i] = 0;
4922 	}
4923 
4924 	pmap_free_pt_page(NULL, m, true);
4925 	pmap_pt_page_count_pinit(pmap, -1);
4926 
4927 	if (pmap->pm_pmltopu != NULL) {
4928 		m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->
4929 		    pm_pmltopu));
4930 		pmap_free_pt_page(NULL, m, false);
4931 		pmap_pt_page_count_pinit(pmap, -1);
4932 	}
4933 	if (pmap->pm_type == PT_X86 &&
4934 	    (cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0)
4935 		rangeset_fini(&pmap->pm_pkru);
4936 
4937 	KASSERT(pmap->pm_stats.resident_count == 0,
4938 	    ("pmap_release: pmap %p resident count %ld != 0",
4939 	    pmap, pmap->pm_stats.resident_count));
4940 }
4941 
4942 static int
kvm_size(SYSCTL_HANDLER_ARGS)4943 kvm_size(SYSCTL_HANDLER_ARGS)
4944 {
4945 	unsigned long ksize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
4946 
4947 	return sysctl_handle_long(oidp, &ksize, 0, req);
4948 }
4949 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
4950     0, 0, kvm_size, "LU",
4951     "Size of KVM");
4952 
4953 static int
kvm_free(SYSCTL_HANDLER_ARGS)4954 kvm_free(SYSCTL_HANDLER_ARGS)
4955 {
4956 	unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
4957 
4958 	return sysctl_handle_long(oidp, &kfree, 0, req);
4959 }
4960 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
4961     0, 0, kvm_free, "LU",
4962     "Amount of KVM free");
4963 
4964 #ifdef KMSAN
4965 static void
pmap_kmsan_shadow_map_page_array(vm_paddr_t pdppa,vm_size_t size)4966 pmap_kmsan_shadow_map_page_array(vm_paddr_t pdppa, vm_size_t size)
4967 {
4968 	pdp_entry_t *pdpe;
4969 	pd_entry_t *pde;
4970 	pt_entry_t *pte;
4971 	vm_paddr_t dummypa, dummypd, dummypt;
4972 	int i, npde, npdpg;
4973 
4974 	npdpg = howmany(size, NBPDP);
4975 	npde = size / NBPDR;
4976 
4977 	dummypa = vm_phys_early_alloc(-1, PAGE_SIZE);
4978 	pagezero((void *)PHYS_TO_DMAP(dummypa));
4979 
4980 	dummypt = vm_phys_early_alloc(-1, PAGE_SIZE);
4981 	pagezero((void *)PHYS_TO_DMAP(dummypt));
4982 	dummypd = vm_phys_early_alloc(-1, PAGE_SIZE * npdpg);
4983 	for (i = 0; i < npdpg; i++)
4984 		pagezero((void *)PHYS_TO_DMAP(dummypd + ptoa(i)));
4985 
4986 	pte = (pt_entry_t *)PHYS_TO_DMAP(dummypt);
4987 	for (i = 0; i < NPTEPG; i++)
4988 		pte[i] = (pt_entry_t)(dummypa | X86_PG_V | X86_PG_RW |
4989 		    X86_PG_A | X86_PG_M | pg_nx);
4990 
4991 	pde = (pd_entry_t *)PHYS_TO_DMAP(dummypd);
4992 	for (i = 0; i < npde; i++)
4993 		pde[i] = (pd_entry_t)(dummypt | X86_PG_V | X86_PG_RW | pg_nx);
4994 
4995 	pdpe = (pdp_entry_t *)PHYS_TO_DMAP(pdppa);
4996 	for (i = 0; i < npdpg; i++)
4997 		pdpe[i] = (pdp_entry_t)(dummypd + ptoa(i) | X86_PG_V |
4998 		    X86_PG_RW | pg_nx);
4999 }
5000 
5001 static void
pmap_kmsan_page_array_startup(vm_offset_t start,vm_offset_t end)5002 pmap_kmsan_page_array_startup(vm_offset_t start, vm_offset_t end)
5003 {
5004 	vm_size_t size;
5005 
5006 	KASSERT(start % NBPDP == 0, ("unaligned page array start address"));
5007 
5008 	/*
5009 	 * The end of the page array's KVA region is 2MB aligned, see
5010 	 * kmem_init().
5011 	 */
5012 	size = round_2mpage(end) - start;
5013 	pmap_kmsan_shadow_map_page_array(KMSANSHADPDPphys, size);
5014 	pmap_kmsan_shadow_map_page_array(KMSANORIGPDPphys, size);
5015 }
5016 #endif
5017 
5018 /*
5019  * Allocate physical memory for the vm_page array and map it into KVA,
5020  * attempting to back the vm_pages with domain-local memory.
5021  */
5022 void
pmap_page_array_startup(long pages)5023 pmap_page_array_startup(long pages)
5024 {
5025 	pdp_entry_t *pdpe;
5026 	pd_entry_t *pde, newpdir;
5027 	vm_offset_t va, start, end;
5028 	vm_paddr_t pa;
5029 	long pfn;
5030 	int domain, i;
5031 
5032 	vm_page_array_size = pages;
5033 
5034 	start = VM_MIN_KERNEL_ADDRESS;
5035 	end = start + pages * sizeof(struct vm_page);
5036 	for (va = start; va < end; va += NBPDR) {
5037 		pfn = first_page + (va - start) / sizeof(struct vm_page);
5038 		domain = vm_phys_domain(ptoa(pfn));
5039 		pdpe = pmap_pdpe(kernel_pmap, va);
5040 		if ((*pdpe & X86_PG_V) == 0) {
5041 			pa = vm_phys_early_alloc(domain, PAGE_SIZE);
5042 			dump_add_page(pa);
5043 			pagezero((void *)PHYS_TO_DMAP(pa));
5044 			*pdpe = (pdp_entry_t)(pa | X86_PG_V | X86_PG_RW |
5045 			    X86_PG_A | X86_PG_M);
5046 		}
5047 		pde = pmap_pdpe_to_pde(pdpe, va);
5048 		if ((*pde & X86_PG_V) != 0)
5049 			panic("Unexpected pde");
5050 		pa = vm_phys_early_alloc(domain, NBPDR);
5051 		for (i = 0; i < NPDEPG; i++)
5052 			dump_add_page(pa + i * PAGE_SIZE);
5053 		newpdir = (pd_entry_t)(pa | X86_PG_V | X86_PG_RW | X86_PG_A |
5054 		    X86_PG_M | PG_PS | pg_g | pg_nx);
5055 		pde_store(pde, newpdir);
5056 	}
5057 	vm_page_array = (vm_page_t)start;
5058 
5059 #ifdef KMSAN
5060 	pmap_kmsan_page_array_startup(start, end);
5061 #endif
5062 }
5063 
5064 /*
5065  * grow the number of kernel page table entries, if needed
5066  */
5067 static int
pmap_growkernel_nopanic(vm_offset_t addr)5068 pmap_growkernel_nopanic(vm_offset_t addr)
5069 {
5070 	vm_paddr_t paddr;
5071 	vm_page_t nkpg;
5072 	pd_entry_t *pde, newpdir;
5073 	pdp_entry_t *pdpe;
5074 	vm_offset_t end;
5075 	int rv;
5076 
5077 	TSENTER();
5078 	mtx_assert(&kernel_map->system_mtx, MA_OWNED);
5079 	rv = KERN_SUCCESS;
5080 
5081 	/*
5082 	 * The kernel map covers two distinct regions of KVA: that used
5083 	 * for dynamic kernel memory allocations, and the uppermost 2GB
5084 	 * of the virtual address space.  The latter is used to map the
5085 	 * kernel and loadable kernel modules.  This scheme enables the
5086 	 * use of a special code generation model for kernel code which
5087 	 * takes advantage of compact addressing modes in machine code.
5088 	 *
5089 	 * Both regions grow upwards; to avoid wasting memory, the gap
5090 	 * in between is unmapped.  If "addr" is above "KERNBASE", the
5091 	 * kernel's region is grown, otherwise the kmem region is grown.
5092 	 *
5093 	 * The correctness of this action is based on the following
5094 	 * argument: vm_map_insert() allocates contiguous ranges of the
5095 	 * kernel virtual address space.  It calls this function if a range
5096 	 * ends after "kernel_vm_end".  If the kernel is mapped between
5097 	 * "kernel_vm_end" and "addr", then the range cannot begin at
5098 	 * "kernel_vm_end".  In fact, its beginning address cannot be less
5099 	 * than the kernel.  Thus, there is no immediate need to allocate
5100 	 * any new kernel page table pages between "kernel_vm_end" and
5101 	 * "KERNBASE".
5102 	 */
5103 	if (KERNBASE < addr) {
5104 		end = KERNBASE + nkpt * NBPDR;
5105 		if (end == 0) {
5106 			TSEXIT();
5107 			return (rv);
5108 		}
5109 	} else {
5110 		end = kernel_vm_end;
5111 	}
5112 
5113 	addr = roundup2(addr, NBPDR);
5114 	if (addr - 1 >= vm_map_max(kernel_map))
5115 		addr = vm_map_max(kernel_map);
5116 	if (addr <= end) {
5117 		/*
5118 		 * The grown region is already mapped, so there is
5119 		 * nothing to do.
5120 		 */
5121 		TSEXIT();
5122 		return (rv);
5123 	}
5124 
5125 	kasan_shadow_map(end, addr - end);
5126 	kmsan_shadow_map(end, addr - end);
5127 	while (end < addr) {
5128 		pdpe = pmap_pdpe(kernel_pmap, end);
5129 		if ((*pdpe & X86_PG_V) == 0) {
5130 			nkpg = pmap_alloc_pt_page(kernel_pmap,
5131 			    pmap_pdpe_pindex(end), VM_ALLOC_INTERRUPT |
5132 			        VM_ALLOC_NOFREE | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
5133 			if (nkpg == NULL) {
5134 				rv = KERN_RESOURCE_SHORTAGE;
5135 				break;
5136 			}
5137 			paddr = VM_PAGE_TO_PHYS(nkpg);
5138 			*pdpe = (pdp_entry_t)(paddr | X86_PG_V | X86_PG_RW |
5139 			    X86_PG_A | X86_PG_M);
5140 			continue; /* try again */
5141 		}
5142 		pde = pmap_pdpe_to_pde(pdpe, end);
5143 		if ((*pde & X86_PG_V) != 0) {
5144 			end = (end + NBPDR) & ~PDRMASK;
5145 			if (end - 1 >= vm_map_max(kernel_map)) {
5146 				end = vm_map_max(kernel_map);
5147 				break;
5148 			}
5149 			continue;
5150 		}
5151 
5152 		nkpg = pmap_alloc_pt_page(kernel_pmap, pmap_pde_pindex(end),
5153 		    VM_ALLOC_INTERRUPT | VM_ALLOC_NOFREE | VM_ALLOC_WIRED |
5154 			VM_ALLOC_ZERO);
5155 		if (nkpg == NULL) {
5156 			rv = KERN_RESOURCE_SHORTAGE;
5157 			break;
5158 		}
5159 
5160 		paddr = VM_PAGE_TO_PHYS(nkpg);
5161 		newpdir = paddr | X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M;
5162 		pde_store(pde, newpdir);
5163 
5164 		end = (end + NBPDR) & ~PDRMASK;
5165 		if (end - 1 >= vm_map_max(kernel_map)) {
5166 			end = vm_map_max(kernel_map);
5167 			break;
5168 		}
5169 	}
5170 
5171 	if (end <= KERNBASE)
5172 		kernel_vm_end = end;
5173 	else
5174 		nkpt = howmany(end - KERNBASE, NBPDR);
5175 	TSEXIT();
5176 	return (rv);
5177 }
5178 
5179 int
pmap_growkernel(vm_offset_t addr)5180 pmap_growkernel(vm_offset_t addr)
5181 {
5182 	int rv;
5183 
5184 	rv = pmap_growkernel_nopanic(addr);
5185 	if (rv != KERN_SUCCESS && pmap_growkernel_panic)
5186 		panic("pmap_growkernel: no memory to grow kernel");
5187 	return (rv);
5188 }
5189 
5190 /***************************************************
5191  * page management routines.
5192  ***************************************************/
5193 
5194 static const uint64_t pc_freemask[_NPCM] = {
5195 	[0 ... _NPCM - 2] = PC_FREEN,
5196 	[_NPCM - 1] = PC_FREEL
5197 };
5198 
5199 #ifdef PV_STATS
5200 
5201 static COUNTER_U64_DEFINE_EARLY(pc_chunk_count);
5202 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD,
5203     &pc_chunk_count, "Current number of pv entry cnunks");
5204 
5205 static COUNTER_U64_DEFINE_EARLY(pc_chunk_allocs);
5206 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD,
5207     &pc_chunk_allocs, "Total number of pv entry chunks allocated");
5208 
5209 static COUNTER_U64_DEFINE_EARLY(pc_chunk_frees);
5210 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD,
5211     &pc_chunk_frees, "Total number of pv entry chunks freed");
5212 
5213 static COUNTER_U64_DEFINE_EARLY(pc_chunk_tryfail);
5214 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD,
5215     &pc_chunk_tryfail,
5216     "Number of failed attempts to get a pv entry chunk page");
5217 
5218 static COUNTER_U64_DEFINE_EARLY(pv_entry_frees);
5219 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD,
5220     &pv_entry_frees, "Total number of pv entries freed");
5221 
5222 static COUNTER_U64_DEFINE_EARLY(pv_entry_allocs);
5223 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD,
5224     &pv_entry_allocs, "Total number of pv entries allocated");
5225 
5226 static COUNTER_U64_DEFINE_EARLY(pv_entry_count);
5227 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD,
5228     &pv_entry_count, "Current number of pv entries");
5229 
5230 static COUNTER_U64_DEFINE_EARLY(pv_entry_spare);
5231 SYSCTL_COUNTER_U64(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD,
5232     &pv_entry_spare, "Current number of spare pv entries");
5233 #endif
5234 
5235 static void
reclaim_pv_chunk_leave_pmap(pmap_t pmap,pmap_t locked_pmap,bool start_di)5236 reclaim_pv_chunk_leave_pmap(pmap_t pmap, pmap_t locked_pmap, bool start_di)
5237 {
5238 
5239 	if (pmap == NULL)
5240 		return;
5241 	pmap_invalidate_all(pmap);
5242 	if (pmap != locked_pmap)
5243 		PMAP_UNLOCK(pmap);
5244 	if (start_di)
5245 		pmap_delayed_invl_finish();
5246 }
5247 
5248 /*
5249  * We are in a serious low memory condition.  Resort to
5250  * drastic measures to free some pages so we can allocate
5251  * another pv entry chunk.
5252  *
5253  * Returns NULL if PV entries were reclaimed from the specified pmap.
5254  *
5255  * We do not, however, unmap 2mpages because subsequent accesses will
5256  * allocate per-page pv entries until repromotion occurs, thereby
5257  * exacerbating the shortage of free pv entries.
5258  */
5259 static vm_page_t
reclaim_pv_chunk_domain(pmap_t locked_pmap,struct rwlock ** lockp,int domain)5260 reclaim_pv_chunk_domain(pmap_t locked_pmap, struct rwlock **lockp, int domain)
5261 {
5262 	struct pv_chunks_list *pvc;
5263 	struct pv_chunk *pc, *pc_marker, *pc_marker_end;
5264 	struct pv_chunk_header pc_marker_b, pc_marker_end_b;
5265 	struct md_page *pvh;
5266 	pd_entry_t *pde;
5267 	pmap_t next_pmap, pmap;
5268 	pt_entry_t *pte, tpte;
5269 	pt_entry_t PG_G, PG_A, PG_M, PG_RW;
5270 	pv_entry_t pv;
5271 	vm_offset_t va;
5272 	vm_page_t m, m_pc;
5273 	struct spglist free;
5274 	uint64_t inuse;
5275 	int bit, field, freed;
5276 	bool start_di, restart;
5277 
5278 	PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
5279 	KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL"));
5280 	pmap = NULL;
5281 	m_pc = NULL;
5282 	PG_G = PG_A = PG_M = PG_RW = 0;
5283 	SLIST_INIT(&free);
5284 	bzero(&pc_marker_b, sizeof(pc_marker_b));
5285 	bzero(&pc_marker_end_b, sizeof(pc_marker_end_b));
5286 	pc_marker = (struct pv_chunk *)&pc_marker_b;
5287 	pc_marker_end = (struct pv_chunk *)&pc_marker_end_b;
5288 
5289 	/*
5290 	 * A delayed invalidation block should already be active if
5291 	 * pmap_advise() or pmap_remove() called this function by way
5292 	 * of pmap_demote_pde_locked().
5293 	 */
5294 	start_di = pmap_not_in_di();
5295 
5296 	pvc = &pv_chunks[domain];
5297 	mtx_lock(&pvc->pvc_lock);
5298 	pvc->active_reclaims++;
5299 	TAILQ_INSERT_HEAD(&pvc->pvc_list, pc_marker, pc_lru);
5300 	TAILQ_INSERT_TAIL(&pvc->pvc_list, pc_marker_end, pc_lru);
5301 	while ((pc = TAILQ_NEXT(pc_marker, pc_lru)) != pc_marker_end &&
5302 	    SLIST_EMPTY(&free)) {
5303 		next_pmap = pc->pc_pmap;
5304 		if (next_pmap == NULL) {
5305 			/*
5306 			 * The next chunk is a marker.  However, it is
5307 			 * not our marker, so active_reclaims must be
5308 			 * > 1.  Consequently, the next_chunk code
5309 			 * will not rotate the pv_chunks list.
5310 			 */
5311 			goto next_chunk;
5312 		}
5313 		mtx_unlock(&pvc->pvc_lock);
5314 
5315 		/*
5316 		 * A pv_chunk can only be removed from the pc_lru list
5317 		 * when both pc_chunks_mutex is owned and the
5318 		 * corresponding pmap is locked.
5319 		 */
5320 		if (pmap != next_pmap) {
5321 			restart = false;
5322 			reclaim_pv_chunk_leave_pmap(pmap, locked_pmap,
5323 			    start_di);
5324 			pmap = next_pmap;
5325 			/* Avoid deadlock and lock recursion. */
5326 			if (pmap > locked_pmap) {
5327 				RELEASE_PV_LIST_LOCK(lockp);
5328 				PMAP_LOCK(pmap);
5329 				if (start_di)
5330 					pmap_delayed_invl_start();
5331 				mtx_lock(&pvc->pvc_lock);
5332 				restart = true;
5333 			} else if (pmap != locked_pmap) {
5334 				if (PMAP_TRYLOCK(pmap)) {
5335 					if (start_di)
5336 						pmap_delayed_invl_start();
5337 					mtx_lock(&pvc->pvc_lock);
5338 					restart = true;
5339 				} else {
5340 					pmap = NULL; /* pmap is not locked */
5341 					mtx_lock(&pvc->pvc_lock);
5342 					pc = TAILQ_NEXT(pc_marker, pc_lru);
5343 					if (pc == NULL ||
5344 					    pc->pc_pmap != next_pmap)
5345 						continue;
5346 					goto next_chunk;
5347 				}
5348 			} else if (start_di)
5349 				pmap_delayed_invl_start();
5350 			PG_G = pmap_global_bit(pmap);
5351 			PG_A = pmap_accessed_bit(pmap);
5352 			PG_M = pmap_modified_bit(pmap);
5353 			PG_RW = pmap_rw_bit(pmap);
5354 			if (restart)
5355 				continue;
5356 		}
5357 
5358 		/*
5359 		 * Destroy every non-wired, 4 KB page mapping in the chunk.
5360 		 */
5361 		freed = 0;
5362 		for (field = 0; field < _NPCM; field++) {
5363 			for (inuse = ~pc->pc_map[field] & pc_freemask[field];
5364 			    inuse != 0; inuse &= ~(1UL << bit)) {
5365 				bit = bsfq(inuse);
5366 				pv = &pc->pc_pventry[field * 64 + bit];
5367 				va = pv->pv_va;
5368 				pde = pmap_pde(pmap, va);
5369 				if ((*pde & PG_PS) != 0)
5370 					continue;
5371 				pte = pmap_pde_to_pte(pde, va);
5372 				if ((*pte & PG_W) != 0)
5373 					continue;
5374 				tpte = pte_load_clear(pte);
5375 				if ((tpte & PG_G) != 0)
5376 					pmap_invalidate_page(pmap, va);
5377 				m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
5378 				if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
5379 					vm_page_dirty(m);
5380 				if ((tpte & PG_A) != 0)
5381 					vm_page_aflag_set(m, PGA_REFERENCED);
5382 				CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
5383 				TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
5384 				m->md.pv_gen++;
5385 				if (TAILQ_EMPTY(&m->md.pv_list) &&
5386 				    (m->flags & PG_FICTITIOUS) == 0) {
5387 					pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
5388 					if (TAILQ_EMPTY(&pvh->pv_list)) {
5389 						vm_page_aflag_clear(m,
5390 						    PGA_WRITEABLE);
5391 					}
5392 				}
5393 				pmap_delayed_invl_page(m);
5394 				pc->pc_map[field] |= 1UL << bit;
5395 				pmap_unuse_pt(pmap, va, *pde, &free);
5396 				freed++;
5397 			}
5398 		}
5399 		if (freed == 0) {
5400 			mtx_lock(&pvc->pvc_lock);
5401 			goto next_chunk;
5402 		}
5403 		/* Every freed mapping is for a 4 KB page. */
5404 		pmap_resident_count_adj(pmap, -freed);
5405 		PV_STAT(counter_u64_add(pv_entry_frees, freed));
5406 		PV_STAT(counter_u64_add(pv_entry_spare, freed));
5407 		PV_STAT(counter_u64_add(pv_entry_count, -freed));
5408 		TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
5409 		if (pc_is_free(pc)) {
5410 			PV_STAT(counter_u64_add(pv_entry_spare, -_NPCPV));
5411 			PV_STAT(counter_u64_add(pc_chunk_count, -1));
5412 			PV_STAT(counter_u64_add(pc_chunk_frees, 1));
5413 			/* Entire chunk is free; return it. */
5414 			m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
5415 			dump_drop_page(m_pc->phys_addr);
5416 			mtx_lock(&pvc->pvc_lock);
5417 			TAILQ_REMOVE(&pvc->pvc_list, pc, pc_lru);
5418 			break;
5419 		}
5420 		TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
5421 		mtx_lock(&pvc->pvc_lock);
5422 		/* One freed pv entry in locked_pmap is sufficient. */
5423 		if (pmap == locked_pmap)
5424 			break;
5425 next_chunk:
5426 		TAILQ_REMOVE(&pvc->pvc_list, pc_marker, pc_lru);
5427 		TAILQ_INSERT_AFTER(&pvc->pvc_list, pc, pc_marker, pc_lru);
5428 		if (pvc->active_reclaims == 1 && pmap != NULL) {
5429 			/*
5430 			 * Rotate the pv chunks list so that we do not
5431 			 * scan the same pv chunks that could not be
5432 			 * freed (because they contained a wired
5433 			 * and/or superpage mapping) on every
5434 			 * invocation of reclaim_pv_chunk().
5435 			 */
5436 			while ((pc = TAILQ_FIRST(&pvc->pvc_list)) != pc_marker) {
5437 				MPASS(pc->pc_pmap != NULL);
5438 				TAILQ_REMOVE(&pvc->pvc_list, pc, pc_lru);
5439 				TAILQ_INSERT_TAIL(&pvc->pvc_list, pc, pc_lru);
5440 			}
5441 		}
5442 	}
5443 	TAILQ_REMOVE(&pvc->pvc_list, pc_marker, pc_lru);
5444 	TAILQ_REMOVE(&pvc->pvc_list, pc_marker_end, pc_lru);
5445 	pvc->active_reclaims--;
5446 	mtx_unlock(&pvc->pvc_lock);
5447 	reclaim_pv_chunk_leave_pmap(pmap, locked_pmap, start_di);
5448 	if (m_pc == NULL && !SLIST_EMPTY(&free)) {
5449 		m_pc = SLIST_FIRST(&free);
5450 		SLIST_REMOVE_HEAD(&free, plinks.s.ss);
5451 		/* Recycle a freed page table page. */
5452 		m_pc->ref_count = 1;
5453 	}
5454 	vm_page_free_pages_toq(&free, true);
5455 	return (m_pc);
5456 }
5457 
5458 static vm_page_t
reclaim_pv_chunk(pmap_t locked_pmap,struct rwlock ** lockp)5459 reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
5460 {
5461 	vm_page_t m;
5462 	int i, domain;
5463 
5464 	domain = PCPU_GET(domain);
5465 	for (i = 0; i < vm_ndomains; i++) {
5466 		m = reclaim_pv_chunk_domain(locked_pmap, lockp, domain);
5467 		if (m != NULL)
5468 			break;
5469 		domain = (domain + 1) % vm_ndomains;
5470 	}
5471 
5472 	return (m);
5473 }
5474 
5475 /*
5476  * free the pv_entry back to the free list
5477  */
5478 static void
free_pv_entry(pmap_t pmap,pv_entry_t pv)5479 free_pv_entry(pmap_t pmap, pv_entry_t pv)
5480 {
5481 	struct pv_chunk *pc;
5482 	int idx, field, bit;
5483 
5484 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5485 	PV_STAT(counter_u64_add(pv_entry_frees, 1));
5486 	PV_STAT(counter_u64_add(pv_entry_spare, 1));
5487 	PV_STAT(counter_u64_add(pv_entry_count, -1));
5488 	pc = pv_to_chunk(pv);
5489 	idx = pv - &pc->pc_pventry[0];
5490 	field = idx / 64;
5491 	bit = idx % 64;
5492 	pc->pc_map[field] |= 1ul << bit;
5493 	if (!pc_is_free(pc)) {
5494 		/* 98% of the time, pc is already at the head of the list. */
5495 		if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) {
5496 			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
5497 			TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
5498 		}
5499 		return;
5500 	}
5501 	TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
5502 	free_pv_chunk(pc);
5503 }
5504 
5505 static void
free_pv_chunk_dequeued(struct pv_chunk * pc)5506 free_pv_chunk_dequeued(struct pv_chunk *pc)
5507 {
5508 	vm_page_t m;
5509 
5510 	PV_STAT(counter_u64_add(pv_entry_spare, -_NPCPV));
5511 	PV_STAT(counter_u64_add(pc_chunk_count, -1));
5512 	PV_STAT(counter_u64_add(pc_chunk_frees, 1));
5513 	counter_u64_add(pv_page_count, -1);
5514 	/* entire chunk is free, return it */
5515 	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
5516 	dump_drop_page(m->phys_addr);
5517 	vm_page_unwire_noq(m);
5518 	vm_page_free(m);
5519 }
5520 
5521 static void
free_pv_chunk(struct pv_chunk * pc)5522 free_pv_chunk(struct pv_chunk *pc)
5523 {
5524 	struct pv_chunks_list *pvc;
5525 
5526 	pvc = &pv_chunks[pc_to_domain(pc)];
5527 	mtx_lock(&pvc->pvc_lock);
5528 	TAILQ_REMOVE(&pvc->pvc_list, pc, pc_lru);
5529 	mtx_unlock(&pvc->pvc_lock);
5530 	free_pv_chunk_dequeued(pc);
5531 }
5532 
5533 static void
free_pv_chunk_batch(struct pv_chunklist * batch)5534 free_pv_chunk_batch(struct pv_chunklist *batch)
5535 {
5536 	struct pv_chunks_list *pvc;
5537 	struct pv_chunk *pc, *npc;
5538 	int i;
5539 
5540 	for (i = 0; i < vm_ndomains; i++) {
5541 		if (TAILQ_EMPTY(&batch[i]))
5542 			continue;
5543 		pvc = &pv_chunks[i];
5544 		mtx_lock(&pvc->pvc_lock);
5545 		TAILQ_FOREACH(pc, &batch[i], pc_list) {
5546 			TAILQ_REMOVE(&pvc->pvc_list, pc, pc_lru);
5547 		}
5548 		mtx_unlock(&pvc->pvc_lock);
5549 	}
5550 
5551 	for (i = 0; i < vm_ndomains; i++) {
5552 		TAILQ_FOREACH_SAFE(pc, &batch[i], pc_list, npc) {
5553 			free_pv_chunk_dequeued(pc);
5554 		}
5555 	}
5556 }
5557 
5558 /*
5559  * Returns a new PV entry, allocating a new PV chunk from the system when
5560  * needed.  If this PV chunk allocation fails and a PV list lock pointer was
5561  * given, a PV chunk is reclaimed from an arbitrary pmap.  Otherwise, NULL is
5562  * returned.
5563  *
5564  * The given PV list lock may be released.
5565  */
5566 static pv_entry_t
get_pv_entry(pmap_t pmap,struct rwlock ** lockp)5567 get_pv_entry(pmap_t pmap, struct rwlock **lockp)
5568 {
5569 	struct pv_chunks_list *pvc;
5570 	int bit, field;
5571 	pv_entry_t pv;
5572 	struct pv_chunk *pc;
5573 	vm_page_t m;
5574 
5575 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5576 	PV_STAT(counter_u64_add(pv_entry_allocs, 1));
5577 retry:
5578 	pc = TAILQ_FIRST(&pmap->pm_pvchunk);
5579 	if (pc != NULL) {
5580 		for (field = 0; field < _NPCM; field++) {
5581 			if (pc->pc_map[field]) {
5582 				bit = bsfq(pc->pc_map[field]);
5583 				break;
5584 			}
5585 		}
5586 		if (field < _NPCM) {
5587 			pv = &pc->pc_pventry[field * 64 + bit];
5588 			pc->pc_map[field] &= ~(1ul << bit);
5589 			/* If this was the last item, move it to tail */
5590 			if (pc_is_full(pc)) {
5591 				TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
5592 				TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc,
5593 				    pc_list);
5594 			}
5595 			PV_STAT(counter_u64_add(pv_entry_count, 1));
5596 			PV_STAT(counter_u64_add(pv_entry_spare, -1));
5597 			return (pv);
5598 		}
5599 	}
5600 	/* No free items, allocate another chunk */
5601 	m = vm_page_alloc_noobj(VM_ALLOC_WIRED);
5602 	if (m == NULL) {
5603 		if (lockp == NULL) {
5604 			PV_STAT(counter_u64_add(pc_chunk_tryfail, 1));
5605 			return (NULL);
5606 		}
5607 		m = reclaim_pv_chunk(pmap, lockp);
5608 		if (m == NULL)
5609 			goto retry;
5610 	} else
5611 		counter_u64_add(pv_page_count, 1);
5612 	PV_STAT(counter_u64_add(pc_chunk_count, 1));
5613 	PV_STAT(counter_u64_add(pc_chunk_allocs, 1));
5614 	dump_add_page(m->phys_addr);
5615 	pc = (void *)PHYS_TO_DMAP(m->phys_addr);
5616 	pc->pc_pmap = pmap;
5617 	pc->pc_map[0] = PC_FREEN & ~1ul;	/* preallocated bit 0 */
5618 	pc->pc_map[1] = PC_FREEN;
5619 	pc->pc_map[2] = PC_FREEL;
5620 	pvc = &pv_chunks[vm_page_domain(m)];
5621 	mtx_lock(&pvc->pvc_lock);
5622 	TAILQ_INSERT_TAIL(&pvc->pvc_list, pc, pc_lru);
5623 	mtx_unlock(&pvc->pvc_lock);
5624 	pv = &pc->pc_pventry[0];
5625 	TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
5626 	PV_STAT(counter_u64_add(pv_entry_count, 1));
5627 	PV_STAT(counter_u64_add(pv_entry_spare, _NPCPV - 1));
5628 	return (pv);
5629 }
5630 
5631 /*
5632  * Returns the number of one bits within the given PV chunk map.
5633  *
5634  * The erratas for Intel processors state that "POPCNT Instruction May
5635  * Take Longer to Execute Than Expected".  It is believed that the
5636  * issue is the spurious dependency on the destination register.
5637  * Provide a hint to the register rename logic that the destination
5638  * value is overwritten, by clearing it, as suggested in the
5639  * optimization manual.  It should be cheap for unaffected processors
5640  * as well.
5641  *
5642  * Reference numbers for erratas are
5643  * 4th Gen Core: HSD146
5644  * 5th Gen Core: BDM85
5645  * 6th Gen Core: SKL029
5646  */
5647 static int
popcnt_pc_map_pq(uint64_t * map)5648 popcnt_pc_map_pq(uint64_t *map)
5649 {
5650 	u_long result, tmp;
5651 
5652 	__asm __volatile("xorl %k0,%k0;popcntq %2,%0;"
5653 	    "xorl %k1,%k1;popcntq %3,%1;addl %k1,%k0;"
5654 	    "xorl %k1,%k1;popcntq %4,%1;addl %k1,%k0"
5655 	    : "=&r" (result), "=&r" (tmp)
5656 	    : "m" (map[0]), "m" (map[1]), "m" (map[2]));
5657 	return (result);
5658 }
5659 
5660 /*
5661  * Ensure that the number of spare PV entries in the specified pmap meets or
5662  * exceeds the given count, "needed".
5663  *
5664  * The given PV list lock may be released.
5665  */
5666 static void
reserve_pv_entries(pmap_t pmap,int needed,struct rwlock ** lockp)5667 reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp)
5668 {
5669 	struct pv_chunks_list *pvc;
5670 	struct pch new_tail[PMAP_MEMDOM];
5671 	struct pv_chunk *pc;
5672 	vm_page_t m;
5673 	int avail, free, i;
5674 	bool reclaimed;
5675 
5676 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5677 	KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL"));
5678 
5679 	/*
5680 	 * Newly allocated PV chunks must be stored in a private list until
5681 	 * the required number of PV chunks have been allocated.  Otherwise,
5682 	 * reclaim_pv_chunk() could recycle one of these chunks.  In
5683 	 * contrast, these chunks must be added to the pmap upon allocation.
5684 	 */
5685 	for (i = 0; i < PMAP_MEMDOM; i++)
5686 		TAILQ_INIT(&new_tail[i]);
5687 retry:
5688 	avail = 0;
5689 	TAILQ_FOREACH(pc, &pmap->pm_pvchunk, pc_list) {
5690 #ifndef __POPCNT__
5691 		if ((cpu_feature2 & CPUID2_POPCNT) == 0)
5692 			bit_count((bitstr_t *)pc->pc_map, 0,
5693 			    sizeof(pc->pc_map) * NBBY, &free);
5694 		else
5695 #endif
5696 		free = popcnt_pc_map_pq(pc->pc_map);
5697 		if (free == 0)
5698 			break;
5699 		avail += free;
5700 		if (avail >= needed)
5701 			break;
5702 	}
5703 	for (reclaimed = false; avail < needed; avail += _NPCPV) {
5704 		m = vm_page_alloc_noobj(VM_ALLOC_WIRED);
5705 		if (m == NULL) {
5706 			m = reclaim_pv_chunk(pmap, lockp);
5707 			if (m == NULL)
5708 				goto retry;
5709 			reclaimed = true;
5710 		} else
5711 			counter_u64_add(pv_page_count, 1);
5712 		PV_STAT(counter_u64_add(pc_chunk_count, 1));
5713 		PV_STAT(counter_u64_add(pc_chunk_allocs, 1));
5714 		dump_add_page(m->phys_addr);
5715 		pc = (void *)PHYS_TO_DMAP(m->phys_addr);
5716 		pc->pc_pmap = pmap;
5717 		pc->pc_map[0] = PC_FREEN;
5718 		pc->pc_map[1] = PC_FREEN;
5719 		pc->pc_map[2] = PC_FREEL;
5720 		TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
5721 		TAILQ_INSERT_TAIL(&new_tail[vm_page_domain(m)], pc, pc_lru);
5722 		PV_STAT(counter_u64_add(pv_entry_spare, _NPCPV));
5723 
5724 		/*
5725 		 * The reclaim might have freed a chunk from the current pmap.
5726 		 * If that chunk contained available entries, we need to
5727 		 * re-count the number of available entries.
5728 		 */
5729 		if (reclaimed)
5730 			goto retry;
5731 	}
5732 	for (i = 0; i < vm_ndomains; i++) {
5733 		if (TAILQ_EMPTY(&new_tail[i]))
5734 			continue;
5735 		pvc = &pv_chunks[i];
5736 		mtx_lock(&pvc->pvc_lock);
5737 		TAILQ_CONCAT(&pvc->pvc_list, &new_tail[i], pc_lru);
5738 		mtx_unlock(&pvc->pvc_lock);
5739 	}
5740 }
5741 
5742 /*
5743  * First find and then remove the pv entry for the specified pmap and virtual
5744  * address from the specified pv list.  Returns the pv entry if found and NULL
5745  * otherwise.  This operation can be performed on pv lists for either 4KB or
5746  * 2MB page mappings.
5747  */
5748 static __inline pv_entry_t
pmap_pvh_remove(struct md_page * pvh,pmap_t pmap,vm_offset_t va)5749 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
5750 {
5751 	pv_entry_t pv;
5752 
5753 	TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
5754 		if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
5755 			TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
5756 			pvh->pv_gen++;
5757 			break;
5758 		}
5759 	}
5760 	return (pv);
5761 }
5762 
5763 /*
5764  * After demotion from a 2MB page mapping to 512 4KB page mappings,
5765  * destroy the pv entry for the 2MB page mapping and reinstantiate the pv
5766  * entries for each of the 4KB page mappings.
5767  */
5768 static void
pmap_pv_demote_pde(pmap_t pmap,vm_offset_t va,vm_paddr_t pa,struct rwlock ** lockp)5769 pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
5770     struct rwlock **lockp)
5771 {
5772 	struct md_page *pvh;
5773 	struct pv_chunk *pc;
5774 	pv_entry_t pv;
5775 	vm_offset_t va_last;
5776 	vm_page_t m;
5777 	int bit, field;
5778 
5779 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5780 	KASSERT((pa & PDRMASK) == 0,
5781 	    ("pmap_pv_demote_pde: pa is not 2mpage aligned"));
5782 	CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
5783 
5784 	/*
5785 	 * Transfer the 2mpage's pv entry for this mapping to the first
5786 	 * page's pv list.  Once this transfer begins, the pv list lock
5787 	 * must not be released until the last pv entry is reinstantiated.
5788 	 */
5789 	pvh = pa_to_pvh(pa);
5790 	va = trunc_2mpage(va);
5791 	pv = pmap_pvh_remove(pvh, pmap, va);
5792 	KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found"));
5793 	m = PHYS_TO_VM_PAGE(pa);
5794 	TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
5795 	m->md.pv_gen++;
5796 	/* Instantiate the remaining NPTEPG - 1 pv entries. */
5797 	PV_STAT(counter_u64_add(pv_entry_allocs, NPTEPG - 1));
5798 	va_last = va + NBPDR - PAGE_SIZE;
5799 	for (;;) {
5800 		pc = TAILQ_FIRST(&pmap->pm_pvchunk);
5801 		KASSERT(!pc_is_full(pc), ("pmap_pv_demote_pde: missing spare"));
5802 		for (field = 0; field < _NPCM; field++) {
5803 			while (pc->pc_map[field]) {
5804 				bit = bsfq(pc->pc_map[field]);
5805 				pc->pc_map[field] &= ~(1ul << bit);
5806 				pv = &pc->pc_pventry[field * 64 + bit];
5807 				va += PAGE_SIZE;
5808 				pv->pv_va = va;
5809 				m++;
5810 				KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5811 			    ("pmap_pv_demote_pde: page %p is not managed", m));
5812 				TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
5813 				m->md.pv_gen++;
5814 				if (va == va_last)
5815 					goto out;
5816 			}
5817 		}
5818 		TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
5819 		TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
5820 	}
5821 out:
5822 	if (pc_is_full(pc)) {
5823 		TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
5824 		TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
5825 	}
5826 	PV_STAT(counter_u64_add(pv_entry_count, NPTEPG - 1));
5827 	PV_STAT(counter_u64_add(pv_entry_spare, -(NPTEPG - 1)));
5828 }
5829 
5830 #if VM_NRESERVLEVEL > 0
5831 /*
5832  * After promotion from 512 4KB page mappings to a single 2MB page mapping,
5833  * replace the many pv entries for the 4KB page mappings by a single pv entry
5834  * for the 2MB page mapping.
5835  */
5836 static void
pmap_pv_promote_pde(pmap_t pmap,vm_offset_t va,vm_paddr_t pa,struct rwlock ** lockp)5837 pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
5838     struct rwlock **lockp)
5839 {
5840 	struct md_page *pvh;
5841 	pv_entry_t pv;
5842 	vm_offset_t va_last;
5843 	vm_page_t m;
5844 
5845 	KASSERT((pa & PDRMASK) == 0,
5846 	    ("pmap_pv_promote_pde: pa is not 2mpage aligned"));
5847 	CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
5848 
5849 	/*
5850 	 * Transfer the first page's pv entry for this mapping to the 2mpage's
5851 	 * pv list.  Aside from avoiding the cost of a call to get_pv_entry(),
5852 	 * a transfer avoids the possibility that get_pv_entry() calls
5853 	 * reclaim_pv_chunk() and that reclaim_pv_chunk() removes one of the
5854 	 * mappings that is being promoted.
5855 	 */
5856 	m = PHYS_TO_VM_PAGE(pa);
5857 	va = trunc_2mpage(va);
5858 	pv = pmap_pvh_remove(&m->md, pmap, va);
5859 	KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found"));
5860 	pvh = pa_to_pvh(pa);
5861 	TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
5862 	pvh->pv_gen++;
5863 	/* Free the remaining NPTEPG - 1 pv entries. */
5864 	va_last = va + NBPDR - PAGE_SIZE;
5865 	do {
5866 		m++;
5867 		va += PAGE_SIZE;
5868 		pmap_pvh_free(&m->md, pmap, va);
5869 	} while (va < va_last);
5870 }
5871 #endif /* VM_NRESERVLEVEL > 0 */
5872 
5873 /*
5874  * First find and then destroy the pv entry for the specified pmap and virtual
5875  * address.  This operation can be performed on pv lists for either 4KB or 2MB
5876  * page mappings.
5877  */
5878 static void
pmap_pvh_free(struct md_page * pvh,pmap_t pmap,vm_offset_t va)5879 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
5880 {
5881 	pv_entry_t pv;
5882 
5883 	pv = pmap_pvh_remove(pvh, pmap, va);
5884 	KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
5885 	free_pv_entry(pmap, pv);
5886 }
5887 
5888 /*
5889  * Conditionally create the PV entry for a 4KB page mapping if the required
5890  * memory can be allocated without resorting to reclamation.
5891  */
5892 static bool
pmap_try_insert_pv_entry(pmap_t pmap,vm_offset_t va,vm_page_t m,struct rwlock ** lockp)5893 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
5894     struct rwlock **lockp)
5895 {
5896 	pv_entry_t pv;
5897 
5898 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5899 	/* Pass NULL instead of the lock pointer to disable reclamation. */
5900 	if ((pv = get_pv_entry(pmap, NULL)) != NULL) {
5901 		pv->pv_va = va;
5902 		CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
5903 		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
5904 		m->md.pv_gen++;
5905 		return (true);
5906 	} else
5907 		return (false);
5908 }
5909 
5910 /*
5911  * Create the PV entry for a 2MB page mapping.  Always returns true unless the
5912  * flag PMAP_ENTER_NORECLAIM is specified.  If that flag is specified, returns
5913  * false if the PV entry cannot be allocated without resorting to reclamation.
5914  */
5915 static bool
pmap_pv_insert_pde(pmap_t pmap,vm_offset_t va,pd_entry_t pde,u_int flags,struct rwlock ** lockp)5916 pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde, u_int flags,
5917     struct rwlock **lockp)
5918 {
5919 	struct md_page *pvh;
5920 	pv_entry_t pv;
5921 	vm_paddr_t pa;
5922 
5923 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5924 	/* Pass NULL instead of the lock pointer to disable reclamation. */
5925 	if ((pv = get_pv_entry(pmap, (flags & PMAP_ENTER_NORECLAIM) != 0 ?
5926 	    NULL : lockp)) == NULL)
5927 		return (false);
5928 	pv->pv_va = va;
5929 	pa = pde & PG_PS_FRAME;
5930 	CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
5931 	pvh = pa_to_pvh(pa);
5932 	TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
5933 	pvh->pv_gen++;
5934 	return (true);
5935 }
5936 
5937 /*
5938  * Fills a page table page with mappings to consecutive physical pages.
5939  */
5940 static void
pmap_fill_ptp(pt_entry_t * firstpte,pt_entry_t newpte)5941 pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte)
5942 {
5943 	pt_entry_t *pte;
5944 
5945 	for (pte = firstpte; pte < firstpte + NPTEPG; pte++) {
5946 		*pte = newpte;
5947 		newpte += PAGE_SIZE;
5948 	}
5949 }
5950 
5951 /*
5952  * Tries to demote a 2MB page mapping.  If demotion fails, the 2MB page
5953  * mapping is invalidated.
5954  */
5955 static bool
pmap_demote_pde(pmap_t pmap,pd_entry_t * pde,vm_offset_t va)5956 pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
5957 {
5958 	struct rwlock *lock;
5959 	bool rv;
5960 
5961 	lock = NULL;
5962 	rv = pmap_demote_pde_locked(pmap, pde, va, &lock);
5963 	if (lock != NULL)
5964 		rw_wunlock(lock);
5965 	return (rv);
5966 }
5967 
5968 static void
pmap_demote_pde_check(pt_entry_t * firstpte __unused,pt_entry_t newpte __unused)5969 pmap_demote_pde_check(pt_entry_t *firstpte __unused, pt_entry_t newpte __unused)
5970 {
5971 #ifdef INVARIANTS
5972 #ifdef DIAGNOSTIC
5973 	pt_entry_t *xpte, *ypte;
5974 
5975 	for (xpte = firstpte; xpte < firstpte + NPTEPG;
5976 	    xpte++, newpte += PAGE_SIZE) {
5977 		if ((*xpte & PG_FRAME) != (newpte & PG_FRAME)) {
5978 			printf("pmap_demote_pde: xpte %zd and newpte map "
5979 			    "different pages: found %#lx, expected %#lx\n",
5980 			    xpte - firstpte, *xpte, newpte);
5981 			printf("page table dump\n");
5982 			for (ypte = firstpte; ypte < firstpte + NPTEPG; ypte++)
5983 				printf("%zd %#lx\n", ypte - firstpte, *ypte);
5984 			panic("firstpte");
5985 		}
5986 	}
5987 #else
5988 	KASSERT((*firstpte & PG_FRAME) == (newpte & PG_FRAME),
5989 	    ("pmap_demote_pde: firstpte and newpte map different physical"
5990 	    " addresses"));
5991 #endif
5992 #endif
5993 }
5994 
5995 static void
pmap_demote_pde_abort(pmap_t pmap,vm_offset_t va,pd_entry_t * pde,pd_entry_t oldpde,struct rwlock ** lockp)5996 pmap_demote_pde_abort(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
5997     pd_entry_t oldpde, struct rwlock **lockp)
5998 {
5999 	struct spglist free;
6000 	vm_offset_t sva;
6001 
6002 	SLIST_INIT(&free);
6003 	sva = trunc_2mpage(va);
6004 	pmap_remove_pde(pmap, pde, sva, true, &free, lockp);
6005 	if ((oldpde & pmap_global_bit(pmap)) == 0)
6006 		pmap_invalidate_pde_page(pmap, sva, oldpde);
6007 	vm_page_free_pages_toq(&free, true);
6008 	CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#lx in pmap %p",
6009 	    va, pmap);
6010 }
6011 
6012 static bool
pmap_demote_pde_locked(pmap_t pmap,pd_entry_t * pde,vm_offset_t va,struct rwlock ** lockp)6013 pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
6014     struct rwlock **lockp)
6015 {
6016 	return (pmap_demote_pde_mpte(pmap, pde, va, lockp, NULL));
6017 }
6018 
6019 static bool
pmap_demote_pde_mpte(pmap_t pmap,pd_entry_t * pde,vm_offset_t va,struct rwlock ** lockp,vm_page_t mpte)6020 pmap_demote_pde_mpte(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
6021     struct rwlock **lockp, vm_page_t mpte)
6022 {
6023 	pd_entry_t newpde, oldpde;
6024 	pt_entry_t *firstpte, newpte;
6025 	pt_entry_t PG_A, PG_G, PG_M, PG_PKU_MASK, PG_RW, PG_V;
6026 	vm_paddr_t mptepa;
6027 	int PG_PTE_CACHE;
6028 	bool in_kernel;
6029 
6030 	PG_A = pmap_accessed_bit(pmap);
6031 	PG_G = pmap_global_bit(pmap);
6032 	PG_M = pmap_modified_bit(pmap);
6033 	PG_RW = pmap_rw_bit(pmap);
6034 	PG_V = pmap_valid_bit(pmap);
6035 	PG_PTE_CACHE = pmap_cache_mask(pmap, false);
6036 	PG_PKU_MASK = pmap_pku_mask_bit(pmap);
6037 
6038 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
6039 	oldpde = *pde;
6040 	KASSERT((oldpde & (PG_PS | PG_V)) == (PG_PS | PG_V),
6041 	    ("pmap_demote_pde: oldpde is missing PG_PS and/or PG_V"));
6042 	KASSERT((oldpde & PG_MANAGED) == 0 || lockp != NULL,
6043 	    ("pmap_demote_pde: lockp for a managed mapping is NULL"));
6044 	in_kernel = va >= VM_MAXUSER_ADDRESS;
6045 	if (mpte == NULL) {
6046 		/*
6047 		 * Invalidate the 2MB page mapping and return "failure" if the
6048 		 * mapping was never accessed.
6049 		 */
6050 		if ((oldpde & PG_A) == 0) {
6051 			KASSERT((oldpde & PG_W) == 0,
6052 		    ("pmap_demote_pde: a wired mapping is missing PG_A"));
6053 			pmap_demote_pde_abort(pmap, va, pde, oldpde, lockp);
6054 			return (false);
6055 		}
6056 
6057 		mpte = pmap_remove_pt_page(pmap, va);
6058 		if (mpte == NULL) {
6059 			KASSERT((oldpde & PG_W) == 0,
6060     ("pmap_demote_pde: page table page for a wired mapping is missing"));
6061 
6062 			/*
6063 			 * If the page table page is missing and the mapping
6064 			 * is for a kernel address, the mapping must belong to
6065 			 * the direct map.  Page table pages are preallocated
6066 			 * for every other part of the kernel address space,
6067 			 * so the direct map region is the only part of the
6068 			 * kernel address space that must be handled here.
6069 			 */
6070 			KASSERT(!in_kernel || (va >= DMAP_MIN_ADDRESS &&
6071 			    va < DMAP_MAX_ADDRESS),
6072 			    ("pmap_demote_pde: No saved mpte for va %#lx", va));
6073 
6074 			/*
6075 			 * If the 2MB page mapping belongs to the direct map
6076 			 * region of the kernel's address space, then the page
6077 			 * allocation request specifies the highest possible
6078 			 * priority (VM_ALLOC_INTERRUPT).  Otherwise, the
6079 			 * priority is normal.
6080 			 */
6081 			mpte = pmap_alloc_pt_page(pmap, pmap_pde_pindex(va),
6082 			    (in_kernel ? VM_ALLOC_INTERRUPT : 0) |
6083 			    VM_ALLOC_WIRED);
6084 
6085 			/*
6086 			 * If the allocation of the new page table page fails,
6087 			 * invalidate the 2MB page mapping and return "failure".
6088 			 */
6089 			if (mpte == NULL) {
6090 				pmap_demote_pde_abort(pmap, va, pde, oldpde,
6091 				    lockp);
6092 				return (false);
6093 			}
6094 
6095 			if (!in_kernel)
6096 				mpte->ref_count = NPTEPG;
6097 		}
6098 	}
6099 	mptepa = VM_PAGE_TO_PHYS(mpte);
6100 	firstpte = (pt_entry_t *)PHYS_TO_DMAP(mptepa);
6101 	newpde = mptepa | PG_M | PG_A | (oldpde & PG_U) | PG_RW | PG_V;
6102 	KASSERT((oldpde & (PG_M | PG_RW)) != PG_RW,
6103 	    ("pmap_demote_pde: oldpde is missing PG_M"));
6104 	newpte = oldpde & ~PG_PS;
6105 	newpte = pmap_swap_pat(pmap, newpte);
6106 
6107 	/*
6108 	 * If the PTP is not leftover from an earlier promotion or it does not
6109 	 * have PG_A set in every PTE, then fill it.  The new PTEs will all
6110 	 * have PG_A set.
6111 	 */
6112 	if (!vm_page_all_valid(mpte))
6113 		pmap_fill_ptp(firstpte, newpte);
6114 
6115 	pmap_demote_pde_check(firstpte, newpte);
6116 
6117 	/*
6118 	 * If the mapping has changed attributes, update the PTEs.
6119 	 */
6120 	if ((*firstpte & PG_PTE_PROMOTE) != (newpte & PG_PTE_PROMOTE))
6121 		pmap_fill_ptp(firstpte, newpte);
6122 
6123 	/*
6124 	 * The spare PV entries must be reserved prior to demoting the
6125 	 * mapping, that is, prior to changing the PDE.  Otherwise, the state
6126 	 * of the PDE and the PV lists will be inconsistent, which can result
6127 	 * in reclaim_pv_chunk() attempting to remove a PV entry from the
6128 	 * wrong PV list and pmap_pv_demote_pde() failing to find the expected
6129 	 * PV entry for the 2MB page mapping that is being demoted.
6130 	 */
6131 	if ((oldpde & PG_MANAGED) != 0)
6132 		reserve_pv_entries(pmap, NPTEPG - 1, lockp);
6133 
6134 	/*
6135 	 * Demote the mapping.  This pmap is locked.  The old PDE has
6136 	 * PG_A set.  If the old PDE has PG_RW set, it also has PG_M
6137 	 * set.  Thus, there is no danger of a race with another
6138 	 * processor changing the setting of PG_A and/or PG_M between
6139 	 * the read above and the store below.
6140 	 */
6141 	if (workaround_erratum383)
6142 		pmap_update_pde(pmap, va, pde, newpde);
6143 	else
6144 		pde_store(pde, newpde);
6145 
6146 	/*
6147 	 * Invalidate a stale recursive mapping of the page table page.
6148 	 */
6149 	if (in_kernel)
6150 		pmap_invalidate_page(pmap, (vm_offset_t)vtopte(va));
6151 
6152 	/*
6153 	 * Demote the PV entry.
6154 	 */
6155 	if ((oldpde & PG_MANAGED) != 0)
6156 		pmap_pv_demote_pde(pmap, va, oldpde & PG_PS_FRAME, lockp);
6157 
6158 	counter_u64_add(pmap_pde_demotions, 1);
6159 	CTR2(KTR_PMAP, "pmap_demote_pde: success for va %#lx in pmap %p",
6160 	    va, pmap);
6161 	return (true);
6162 }
6163 
6164 /*
6165  * pmap_remove_kernel_pde: Remove a kernel superpage mapping.
6166  */
6167 static void
pmap_remove_kernel_pde(pmap_t pmap,pd_entry_t * pde,vm_offset_t va,bool remove_pt)6168 pmap_remove_kernel_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
6169     bool remove_pt)
6170 {
6171 	pd_entry_t newpde;
6172 	vm_paddr_t mptepa;
6173 	vm_page_t mpte;
6174 
6175 	KASSERT(pmap == kernel_pmap, ("pmap %p is not kernel_pmap", pmap));
6176 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
6177 	if (remove_pt)
6178 		mpte = pmap_remove_pt_page(pmap, va);
6179 	else
6180 		mpte = vm_radix_lookup(&pmap->pm_root, pmap_pde_pindex(va));
6181 	if (mpte == NULL)
6182 		panic("pmap_remove_kernel_pde: Missing pt page.");
6183 
6184 	mptepa = VM_PAGE_TO_PHYS(mpte);
6185 	newpde = mptepa | X86_PG_M | X86_PG_A | X86_PG_RW | X86_PG_V;
6186 
6187 	/*
6188 	 * If this page table page was unmapped by a promotion, then it
6189 	 * contains valid mappings.  Zero it to invalidate those mappings.
6190 	 */
6191 	if (vm_page_any_valid(mpte))
6192 		pagezero((void *)PHYS_TO_DMAP(mptepa));
6193 
6194 	/*
6195 	 * Demote the mapping.
6196 	 */
6197 	if (workaround_erratum383)
6198 		pmap_update_pde(pmap, va, pde, newpde);
6199 	else
6200 		pde_store(pde, newpde);
6201 
6202 	/*
6203 	 * Invalidate a stale recursive mapping of the page table page.
6204 	 */
6205 	pmap_invalidate_page(pmap, (vm_offset_t)vtopte(va));
6206 }
6207 
6208 /*
6209  * pmap_remove_pde: do the things to unmap a superpage in a process
6210  */
6211 static int
pmap_remove_pde(pmap_t pmap,pd_entry_t * pdq,vm_offset_t sva,bool remove_pt,struct spglist * free,struct rwlock ** lockp)6212 pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva, bool remove_pt,
6213     struct spglist *free, struct rwlock **lockp)
6214 {
6215 	struct md_page *pvh;
6216 	pd_entry_t oldpde;
6217 	vm_offset_t eva, va;
6218 	vm_page_t m, mpte;
6219 	pt_entry_t PG_G, PG_A, PG_M, PG_RW;
6220 
6221 	PG_G = pmap_global_bit(pmap);
6222 	PG_A = pmap_accessed_bit(pmap);
6223 	PG_M = pmap_modified_bit(pmap);
6224 	PG_RW = pmap_rw_bit(pmap);
6225 
6226 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
6227 	KASSERT((sva & PDRMASK) == 0,
6228 	    ("pmap_remove_pde: sva is not 2mpage aligned"));
6229 	oldpde = pte_load_clear(pdq);
6230 	if (oldpde & PG_W)
6231 		pmap->pm_stats.wired_count -= NBPDR / PAGE_SIZE;
6232 	if ((oldpde & PG_G) != 0)
6233 		pmap_invalidate_pde_page(kernel_pmap, sva, oldpde);
6234 	pmap_resident_count_adj(pmap, -NBPDR / PAGE_SIZE);
6235 	if (oldpde & PG_MANAGED) {
6236 		CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, oldpde & PG_PS_FRAME);
6237 		pvh = pa_to_pvh(oldpde & PG_PS_FRAME);
6238 		pmap_pvh_free(pvh, pmap, sva);
6239 		eva = sva + NBPDR;
6240 		for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
6241 		    va < eva; va += PAGE_SIZE, m++) {
6242 			if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
6243 				vm_page_dirty(m);
6244 			if (oldpde & PG_A)
6245 				vm_page_aflag_set(m, PGA_REFERENCED);
6246 			if (TAILQ_EMPTY(&m->md.pv_list) &&
6247 			    TAILQ_EMPTY(&pvh->pv_list))
6248 				vm_page_aflag_clear(m, PGA_WRITEABLE);
6249 			pmap_delayed_invl_page(m);
6250 		}
6251 	}
6252 	if (pmap == kernel_pmap) {
6253 		pmap_remove_kernel_pde(pmap, pdq, sva, remove_pt);
6254 	} else {
6255 		mpte = pmap_remove_pt_page(pmap, sva);
6256 		if (mpte != NULL) {
6257 			KASSERT(vm_page_any_valid(mpte),
6258 			    ("pmap_remove_pde: pte page not promoted"));
6259 			pmap_pt_page_count_adj(pmap, -1);
6260 			KASSERT(mpte->ref_count == NPTEPG,
6261 			    ("pmap_remove_pde: pte page ref count error"));
6262 			mpte->ref_count = 0;
6263 			pmap_add_delayed_free_list(mpte, free, false);
6264 		}
6265 	}
6266 	return (pmap_unuse_pt(pmap, sva, *pmap_pdpe(pmap, sva), free));
6267 }
6268 
6269 /*
6270  * pmap_remove_pte: do the things to unmap a page in a process
6271  */
6272 static int
pmap_remove_pte(pmap_t pmap,pt_entry_t * ptq,vm_offset_t va,pd_entry_t ptepde,struct spglist * free,struct rwlock ** lockp)6273 pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va,
6274     pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp)
6275 {
6276 	struct md_page *pvh;
6277 	pt_entry_t oldpte, PG_A, PG_M, PG_RW;
6278 	vm_page_t m;
6279 
6280 	PG_A = pmap_accessed_bit(pmap);
6281 	PG_M = pmap_modified_bit(pmap);
6282 	PG_RW = pmap_rw_bit(pmap);
6283 
6284 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
6285 	oldpte = pte_load_clear(ptq);
6286 	if (oldpte & PG_W)
6287 		pmap->pm_stats.wired_count -= 1;
6288 	pmap_resident_count_adj(pmap, -1);
6289 	if (oldpte & PG_MANAGED) {
6290 		m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME);
6291 		if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
6292 			vm_page_dirty(m);
6293 		if (oldpte & PG_A)
6294 			vm_page_aflag_set(m, PGA_REFERENCED);
6295 		CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
6296 		pmap_pvh_free(&m->md, pmap, va);
6297 		if (TAILQ_EMPTY(&m->md.pv_list) &&
6298 		    (m->flags & PG_FICTITIOUS) == 0) {
6299 			pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
6300 			if (TAILQ_EMPTY(&pvh->pv_list))
6301 				vm_page_aflag_clear(m, PGA_WRITEABLE);
6302 		}
6303 		pmap_delayed_invl_page(m);
6304 	}
6305 	return (pmap_unuse_pt(pmap, va, ptepde, free));
6306 }
6307 
6308 /*
6309  * Remove a single page from a process address space
6310  */
6311 static void
pmap_remove_page(pmap_t pmap,vm_offset_t va,pd_entry_t * pde,struct spglist * free)6312 pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
6313     struct spglist *free)
6314 {
6315 	struct rwlock *lock;
6316 	pt_entry_t *pte, PG_V;
6317 
6318 	PG_V = pmap_valid_bit(pmap);
6319 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
6320 	if ((*pde & PG_V) == 0)
6321 		return;
6322 	pte = pmap_pde_to_pte(pde, va);
6323 	if ((*pte & PG_V) == 0)
6324 		return;
6325 	lock = NULL;
6326 	pmap_remove_pte(pmap, pte, va, *pde, free, &lock);
6327 	if (lock != NULL)
6328 		rw_wunlock(lock);
6329 	pmap_invalidate_page(pmap, va);
6330 }
6331 
6332 /*
6333  * Removes the specified range of addresses from the page table page.
6334  */
6335 static bool
pmap_remove_ptes(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,pd_entry_t * pde,struct spglist * free,struct rwlock ** lockp)6336 pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
6337     pd_entry_t *pde, struct spglist *free, struct rwlock **lockp)
6338 {
6339 	pt_entry_t PG_G, *pte;
6340 	vm_offset_t va;
6341 	bool anyvalid;
6342 
6343 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
6344 	PG_G = pmap_global_bit(pmap);
6345 	anyvalid = false;
6346 	va = eva;
6347 	for (pte = pmap_pde_to_pte(pde, sva); sva != eva; pte++,
6348 	    sva += PAGE_SIZE) {
6349 		if (*pte == 0) {
6350 			if (va != eva) {
6351 				pmap_invalidate_range(pmap, va, sva);
6352 				va = eva;
6353 			}
6354 			continue;
6355 		}
6356 		if ((*pte & PG_G) == 0)
6357 			anyvalid = true;
6358 		else if (va == eva)
6359 			va = sva;
6360 		if (pmap_remove_pte(pmap, pte, sva, *pde, free, lockp)) {
6361 			sva += PAGE_SIZE;
6362 			break;
6363 		}
6364 	}
6365 	if (va != eva)
6366 		pmap_invalidate_range(pmap, va, sva);
6367 	return (anyvalid);
6368 }
6369 
6370 static void
pmap_remove1(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,bool map_delete)6371 pmap_remove1(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, bool map_delete)
6372 {
6373 	struct rwlock *lock;
6374 	vm_page_t mt;
6375 	vm_offset_t va_next;
6376 	pml5_entry_t *pml5e;
6377 	pml4_entry_t *pml4e;
6378 	pdp_entry_t *pdpe;
6379 	pd_entry_t ptpaddr, *pde;
6380 	pt_entry_t PG_G, PG_V;
6381 	struct spglist free;
6382 	int anyvalid;
6383 
6384 	PG_G = pmap_global_bit(pmap);
6385 	PG_V = pmap_valid_bit(pmap);
6386 
6387 	/*
6388 	 * If there are no resident pages besides the top level page
6389 	 * table page(s), there is nothing to do.  Kernel pmap always
6390 	 * accounts whole preloaded area as resident, which makes its
6391 	 * resident count > 2.
6392 	 * Perform an unsynchronized read.  This is, however, safe.
6393 	 */
6394 	if (pmap->pm_stats.resident_count <= 1 + (pmap->pm_pmltopu != NULL ?
6395 	    1 : 0))
6396 		return;
6397 
6398 	anyvalid = 0;
6399 	SLIST_INIT(&free);
6400 
6401 	pmap_delayed_invl_start();
6402 	PMAP_LOCK(pmap);
6403 	if (map_delete)
6404 		pmap_pkru_on_remove(pmap, sva, eva);
6405 
6406 	/*
6407 	 * special handling of removing one page.  a very
6408 	 * common operation and easy to short circuit some
6409 	 * code.
6410 	 */
6411 	if (sva + PAGE_SIZE == eva) {
6412 		pde = pmap_pde(pmap, sva);
6413 		if (pde && (*pde & PG_PS) == 0) {
6414 			pmap_remove_page(pmap, sva, pde, &free);
6415 			goto out;
6416 		}
6417 	}
6418 
6419 	lock = NULL;
6420 	for (; sva < eva; sva = va_next) {
6421 		if (pmap->pm_stats.resident_count == 0)
6422 			break;
6423 
6424 		if (pmap_is_la57(pmap)) {
6425 			pml5e = pmap_pml5e(pmap, sva);
6426 			if ((*pml5e & PG_V) == 0) {
6427 				va_next = (sva + NBPML5) & ~PML5MASK;
6428 				if (va_next < sva)
6429 					va_next = eva;
6430 				continue;
6431 			}
6432 			pml4e = pmap_pml5e_to_pml4e(pml5e, sva);
6433 		} else {
6434 			pml4e = pmap_pml4e(pmap, sva);
6435 		}
6436 		if ((*pml4e & PG_V) == 0) {
6437 			va_next = (sva + NBPML4) & ~PML4MASK;
6438 			if (va_next < sva)
6439 				va_next = eva;
6440 			continue;
6441 		}
6442 
6443 		va_next = (sva + NBPDP) & ~PDPMASK;
6444 		if (va_next < sva)
6445 			va_next = eva;
6446 		pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
6447 		if ((*pdpe & PG_V) == 0)
6448 			continue;
6449 		if ((*pdpe & PG_PS) != 0) {
6450 			KASSERT(va_next <= eva,
6451 			    ("partial update of non-transparent 1G mapping "
6452 			    "pdpe %#lx sva %#lx eva %#lx va_next %#lx",
6453 			    *pdpe, sva, eva, va_next));
6454 			MPASS(pmap != kernel_pmap); /* XXXKIB */
6455 			MPASS((*pdpe & (PG_MANAGED | PG_G)) == 0);
6456 			anyvalid = 1;
6457 			*pdpe = 0;
6458 			pmap_resident_count_adj(pmap, -NBPDP / PAGE_SIZE);
6459 			mt = PHYS_TO_VM_PAGE(*pmap_pml4e(pmap, sva) & PG_FRAME);
6460 			pmap_unwire_ptp(pmap, sva, mt, &free);
6461 			continue;
6462 		}
6463 
6464 		/*
6465 		 * Calculate index for next page table.
6466 		 */
6467 		va_next = (sva + NBPDR) & ~PDRMASK;
6468 		if (va_next < sva)
6469 			va_next = eva;
6470 
6471 		pde = pmap_pdpe_to_pde(pdpe, sva);
6472 		ptpaddr = *pde;
6473 
6474 		/*
6475 		 * Weed out invalid mappings.
6476 		 */
6477 		if (ptpaddr == 0)
6478 			continue;
6479 
6480 		/*
6481 		 * Check for large page.
6482 		 */
6483 		if ((ptpaddr & PG_PS) != 0) {
6484 			/*
6485 			 * Are we removing the entire large page?  If not,
6486 			 * demote the mapping and fall through.
6487 			 */
6488 			if (sva + NBPDR == va_next && eva >= va_next) {
6489 				/*
6490 				 * The TLB entry for a PG_G mapping is
6491 				 * invalidated by pmap_remove_pde().
6492 				 */
6493 				if ((ptpaddr & PG_G) == 0)
6494 					anyvalid = 1;
6495 				pmap_remove_pde(pmap, pde, sva, true, &free,
6496 				    &lock);
6497 				continue;
6498 			} else if (!pmap_demote_pde_locked(pmap, pde, sva,
6499 			    &lock)) {
6500 				/* The large page mapping was destroyed. */
6501 				continue;
6502 			} else
6503 				ptpaddr = *pde;
6504 		}
6505 
6506 		/*
6507 		 * Limit our scan to either the end of the va represented
6508 		 * by the current page table page, or to the end of the
6509 		 * range being removed.
6510 		 */
6511 		if (va_next > eva)
6512 			va_next = eva;
6513 
6514 		if (pmap_remove_ptes(pmap, sva, va_next, pde, &free, &lock))
6515 			anyvalid = 1;
6516 	}
6517 	if (lock != NULL)
6518 		rw_wunlock(lock);
6519 out:
6520 	if (anyvalid)
6521 		pmap_invalidate_all(pmap);
6522 	PMAP_UNLOCK(pmap);
6523 	pmap_delayed_invl_finish();
6524 	vm_page_free_pages_toq(&free, true);
6525 }
6526 
6527 /*
6528  *	Remove the given range of addresses from the specified map.
6529  *
6530  *	It is assumed that the start and end are properly
6531  *	rounded to the page size.
6532  */
6533 void
pmap_remove(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)6534 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
6535 {
6536 	pmap_remove1(pmap, sva, eva, false);
6537 }
6538 
6539 /*
6540  *	Remove the given range of addresses as part of a logical unmap
6541  *	operation. This has the effect of calling pmap_remove(), but
6542  *	also clears any metadata that should persist for the lifetime
6543  *	of a logical mapping.
6544  */
6545 void
pmap_map_delete(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)6546 pmap_map_delete(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
6547 {
6548 	pmap_remove1(pmap, sva, eva, true);
6549 }
6550 
6551 /*
6552  *	Routine:	pmap_remove_all
6553  *	Function:
6554  *		Removes this physical page from
6555  *		all physical maps in which it resides.
6556  *		Reflects back modify bits to the pager.
6557  *
6558  *	Notes:
6559  *		Original versions of this routine were very
6560  *		inefficient because they iteratively called
6561  *		pmap_remove (slow...)
6562  */
6563 
6564 void
pmap_remove_all(vm_page_t m)6565 pmap_remove_all(vm_page_t m)
6566 {
6567 	struct md_page *pvh;
6568 	pv_entry_t pv;
6569 	pmap_t pmap;
6570 	struct rwlock *lock;
6571 	pt_entry_t *pte, tpte, PG_A, PG_M, PG_RW;
6572 	pd_entry_t *pde;
6573 	vm_offset_t va;
6574 	struct spglist free;
6575 	int pvh_gen, md_gen;
6576 
6577 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
6578 	    ("pmap_remove_all: page %p is not managed", m));
6579 	SLIST_INIT(&free);
6580 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
6581 	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
6582 	    pa_to_pvh(VM_PAGE_TO_PHYS(m));
6583 	rw_wlock(lock);
6584 retry:
6585 	while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
6586 		pmap = PV_PMAP(pv);
6587 		if (!PMAP_TRYLOCK(pmap)) {
6588 			pvh_gen = pvh->pv_gen;
6589 			rw_wunlock(lock);
6590 			PMAP_LOCK(pmap);
6591 			rw_wlock(lock);
6592 			if (pvh_gen != pvh->pv_gen) {
6593 				PMAP_UNLOCK(pmap);
6594 				goto retry;
6595 			}
6596 		}
6597 		va = pv->pv_va;
6598 		pde = pmap_pde(pmap, va);
6599 		(void)pmap_demote_pde_locked(pmap, pde, va, &lock);
6600 		PMAP_UNLOCK(pmap);
6601 	}
6602 	while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
6603 		pmap = PV_PMAP(pv);
6604 		if (!PMAP_TRYLOCK(pmap)) {
6605 			pvh_gen = pvh->pv_gen;
6606 			md_gen = m->md.pv_gen;
6607 			rw_wunlock(lock);
6608 			PMAP_LOCK(pmap);
6609 			rw_wlock(lock);
6610 			if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
6611 				PMAP_UNLOCK(pmap);
6612 				goto retry;
6613 			}
6614 		}
6615 		PG_A = pmap_accessed_bit(pmap);
6616 		PG_M = pmap_modified_bit(pmap);
6617 		PG_RW = pmap_rw_bit(pmap);
6618 		pmap_resident_count_adj(pmap, -1);
6619 		pde = pmap_pde(pmap, pv->pv_va);
6620 		KASSERT((*pde & PG_PS) == 0, ("pmap_remove_all: found"
6621 		    " a 2mpage in page %p's pv list", m));
6622 		pte = pmap_pde_to_pte(pde, pv->pv_va);
6623 		tpte = pte_load_clear(pte);
6624 		if (tpte & PG_W)
6625 			pmap->pm_stats.wired_count--;
6626 		if (tpte & PG_A)
6627 			vm_page_aflag_set(m, PGA_REFERENCED);
6628 
6629 		/*
6630 		 * Update the vm_page_t clean and reference bits.
6631 		 */
6632 		if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
6633 			vm_page_dirty(m);
6634 		pmap_unuse_pt(pmap, pv->pv_va, *pde, &free);
6635 		pmap_invalidate_page(pmap, pv->pv_va);
6636 		TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
6637 		m->md.pv_gen++;
6638 		free_pv_entry(pmap, pv);
6639 		PMAP_UNLOCK(pmap);
6640 	}
6641 	vm_page_aflag_clear(m, PGA_WRITEABLE);
6642 	rw_wunlock(lock);
6643 	pmap_delayed_invl_wait(m);
6644 	vm_page_free_pages_toq(&free, true);
6645 }
6646 
6647 /*
6648  * pmap_protect_pde: do the things to protect a 2mpage in a process
6649  */
6650 static bool
pmap_protect_pde(pmap_t pmap,pd_entry_t * pde,vm_offset_t sva,vm_prot_t prot)6651 pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot)
6652 {
6653 	pd_entry_t newpde, oldpde;
6654 	vm_page_t m, mt;
6655 	bool anychanged;
6656 	pt_entry_t PG_G, PG_M, PG_RW;
6657 
6658 	PG_G = pmap_global_bit(pmap);
6659 	PG_M = pmap_modified_bit(pmap);
6660 	PG_RW = pmap_rw_bit(pmap);
6661 
6662 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
6663 	KASSERT((sva & PDRMASK) == 0,
6664 	    ("pmap_protect_pde: sva is not 2mpage aligned"));
6665 	anychanged = false;
6666 retry:
6667 	oldpde = newpde = *pde;
6668 	if ((prot & VM_PROT_WRITE) == 0) {
6669 		if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) ==
6670 		    (PG_MANAGED | PG_M | PG_RW)) {
6671 			m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
6672 			for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
6673 				vm_page_dirty(mt);
6674 		}
6675 		newpde &= ~(PG_RW | PG_M);
6676 	}
6677 	if ((prot & VM_PROT_EXECUTE) == 0)
6678 		newpde |= pg_nx;
6679 	if (newpde != oldpde) {
6680 		/*
6681 		 * As an optimization to future operations on this PDE, clear
6682 		 * PG_PROMOTED.  The impending invalidation will remove any
6683 		 * lingering 4KB page mappings from the TLB.
6684 		 */
6685 		if (!atomic_cmpset_long(pde, oldpde, newpde & ~PG_PROMOTED))
6686 			goto retry;
6687 		if ((oldpde & PG_G) != 0)
6688 			pmap_invalidate_pde_page(kernel_pmap, sva, oldpde);
6689 		else
6690 			anychanged = true;
6691 	}
6692 	return (anychanged);
6693 }
6694 
6695 /*
6696  *	Set the physical protection on the
6697  *	specified range of this map as requested.
6698  */
6699 void
pmap_protect(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,vm_prot_t prot)6700 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
6701 {
6702 	vm_page_t m;
6703 	vm_offset_t va_next;
6704 	pml4_entry_t *pml4e;
6705 	pdp_entry_t *pdpe;
6706 	pd_entry_t ptpaddr, *pde;
6707 	pt_entry_t *pte, PG_G, PG_M, PG_RW, PG_V;
6708 	pt_entry_t obits, pbits;
6709 	bool anychanged;
6710 
6711 	KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot));
6712 	if (prot == VM_PROT_NONE) {
6713 		pmap_remove(pmap, sva, eva);
6714 		return;
6715 	}
6716 
6717 	if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) ==
6718 	    (VM_PROT_WRITE|VM_PROT_EXECUTE))
6719 		return;
6720 
6721 	PG_G = pmap_global_bit(pmap);
6722 	PG_M = pmap_modified_bit(pmap);
6723 	PG_V = pmap_valid_bit(pmap);
6724 	PG_RW = pmap_rw_bit(pmap);
6725 	anychanged = false;
6726 
6727 	/*
6728 	 * Although this function delays and batches the invalidation
6729 	 * of stale TLB entries, it does not need to call
6730 	 * pmap_delayed_invl_start() and
6731 	 * pmap_delayed_invl_finish(), because it does not
6732 	 * ordinarily destroy mappings.  Stale TLB entries from
6733 	 * protection-only changes need only be invalidated before the
6734 	 * pmap lock is released, because protection-only changes do
6735 	 * not destroy PV entries.  Even operations that iterate over
6736 	 * a physical page's PV list of mappings, like
6737 	 * pmap_remove_write(), acquire the pmap lock for each
6738 	 * mapping.  Consequently, for protection-only changes, the
6739 	 * pmap lock suffices to synchronize both page table and TLB
6740 	 * updates.
6741 	 *
6742 	 * This function only destroys a mapping if pmap_demote_pde()
6743 	 * fails.  In that case, stale TLB entries are immediately
6744 	 * invalidated.
6745 	 */
6746 
6747 	PMAP_LOCK(pmap);
6748 	for (; sva < eva; sva = va_next) {
6749 		pml4e = pmap_pml4e(pmap, sva);
6750 		if (pml4e == NULL || (*pml4e & PG_V) == 0) {
6751 			va_next = (sva + NBPML4) & ~PML4MASK;
6752 			if (va_next < sva)
6753 				va_next = eva;
6754 			continue;
6755 		}
6756 
6757 		va_next = (sva + NBPDP) & ~PDPMASK;
6758 		if (va_next < sva)
6759 			va_next = eva;
6760 		pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
6761 		if ((*pdpe & PG_V) == 0)
6762 			continue;
6763 		if ((*pdpe & PG_PS) != 0) {
6764 			KASSERT(va_next <= eva,
6765 			    ("partial update of non-transparent 1G mapping "
6766 			    "pdpe %#lx sva %#lx eva %#lx va_next %#lx",
6767 			    *pdpe, sva, eva, va_next));
6768 retry_pdpe:
6769 			obits = pbits = *pdpe;
6770 			MPASS((pbits & (PG_MANAGED | PG_G)) == 0);
6771 			MPASS(pmap != kernel_pmap); /* XXXKIB */
6772 			if ((prot & VM_PROT_WRITE) == 0)
6773 				pbits &= ~(PG_RW | PG_M);
6774 			if ((prot & VM_PROT_EXECUTE) == 0)
6775 				pbits |= pg_nx;
6776 
6777 			if (pbits != obits) {
6778 				if (!atomic_cmpset_long(pdpe, obits, pbits))
6779 					/* PG_PS cannot be cleared under us, */
6780 					goto retry_pdpe;
6781 				anychanged = true;
6782 			}
6783 			continue;
6784 		}
6785 
6786 		va_next = (sva + NBPDR) & ~PDRMASK;
6787 		if (va_next < sva)
6788 			va_next = eva;
6789 
6790 		pde = pmap_pdpe_to_pde(pdpe, sva);
6791 		ptpaddr = *pde;
6792 
6793 		/*
6794 		 * Weed out invalid mappings.
6795 		 */
6796 		if (ptpaddr == 0)
6797 			continue;
6798 
6799 		/*
6800 		 * Check for large page.
6801 		 */
6802 		if ((ptpaddr & PG_PS) != 0) {
6803 			/*
6804 			 * Are we protecting the entire large page?
6805 			 */
6806 			if (sva + NBPDR == va_next && eva >= va_next) {
6807 				/*
6808 				 * The TLB entry for a PG_G mapping is
6809 				 * invalidated by pmap_protect_pde().
6810 				 */
6811 				if (pmap_protect_pde(pmap, pde, sva, prot))
6812 					anychanged = true;
6813 				continue;
6814 			}
6815 
6816 			/*
6817 			 * Does the large page mapping need to change?  If so,
6818 			 * demote it and fall through.
6819 			 */
6820 			pbits = ptpaddr;
6821 			if ((prot & VM_PROT_WRITE) == 0)
6822 				pbits &= ~(PG_RW | PG_M);
6823 			if ((prot & VM_PROT_EXECUTE) == 0)
6824 				pbits |= pg_nx;
6825 			if (ptpaddr == pbits || !pmap_demote_pde(pmap, pde,
6826 			    sva)) {
6827 				/*
6828 				 * Either the large page mapping doesn't need
6829 				 * to change, or it was destroyed during
6830 				 * demotion.
6831 				 */
6832 				continue;
6833 			}
6834 		}
6835 
6836 		if (va_next > eva)
6837 			va_next = eva;
6838 
6839 		for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
6840 		    sva += PAGE_SIZE) {
6841 retry:
6842 			obits = pbits = *pte;
6843 			if ((pbits & PG_V) == 0)
6844 				continue;
6845 
6846 			if ((prot & VM_PROT_WRITE) == 0) {
6847 				if ((pbits & (PG_MANAGED | PG_M | PG_RW)) ==
6848 				    (PG_MANAGED | PG_M | PG_RW)) {
6849 					m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
6850 					vm_page_dirty(m);
6851 				}
6852 				pbits &= ~(PG_RW | PG_M);
6853 			}
6854 			if ((prot & VM_PROT_EXECUTE) == 0)
6855 				pbits |= pg_nx;
6856 
6857 			if (pbits != obits) {
6858 				if (!atomic_cmpset_long(pte, obits, pbits))
6859 					goto retry;
6860 				if (obits & PG_G)
6861 					pmap_invalidate_page(pmap, sva);
6862 				else
6863 					anychanged = true;
6864 			}
6865 		}
6866 	}
6867 	if (anychanged)
6868 		pmap_invalidate_all(pmap);
6869 	PMAP_UNLOCK(pmap);
6870 }
6871 
6872 static bool
pmap_pde_ept_executable(pmap_t pmap,pd_entry_t pde)6873 pmap_pde_ept_executable(pmap_t pmap, pd_entry_t pde)
6874 {
6875 
6876 	if (pmap->pm_type != PT_EPT)
6877 		return (false);
6878 	return ((pde & EPT_PG_EXECUTE) != 0);
6879 }
6880 
6881 #if VM_NRESERVLEVEL > 0
6882 /*
6883  * Tries to promote the 512, contiguous 4KB page mappings that are within a
6884  * single page table page (PTP) to a single 2MB page mapping.  For promotion
6885  * to occur, two conditions must be met: (1) the 4KB page mappings must map
6886  * aligned, contiguous physical memory and (2) the 4KB page mappings must have
6887  * identical characteristics.
6888  */
6889 static bool
pmap_promote_pde(pmap_t pmap,pd_entry_t * pde,vm_offset_t va,vm_page_t mpte,struct rwlock ** lockp)6890 pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va, vm_page_t mpte,
6891     struct rwlock **lockp)
6892 {
6893 	pd_entry_t newpde;
6894 	pt_entry_t *firstpte, oldpte, pa, *pte;
6895 	pt_entry_t allpte_PG_A, PG_A, PG_G, PG_M, PG_PKU_MASK, PG_RW, PG_V;
6896 	int PG_PTE_CACHE;
6897 
6898 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
6899 	if (!pmap_ps_enabled(pmap))
6900 		return (false);
6901 
6902 	PG_A = pmap_accessed_bit(pmap);
6903 	PG_G = pmap_global_bit(pmap);
6904 	PG_M = pmap_modified_bit(pmap);
6905 	PG_V = pmap_valid_bit(pmap);
6906 	PG_RW = pmap_rw_bit(pmap);
6907 	PG_PKU_MASK = pmap_pku_mask_bit(pmap);
6908 	PG_PTE_CACHE = pmap_cache_mask(pmap, false);
6909 
6910 	/*
6911 	 * Examine the first PTE in the specified PTP.  Abort if this PTE is
6912 	 * ineligible for promotion due to hardware errata, invalid, or does
6913 	 * not map the first 4KB physical page within a 2MB page.
6914 	 */
6915 	firstpte = (pt_entry_t *)PHYS_TO_DMAP(*pde & PG_FRAME);
6916 	newpde = *firstpte;
6917 	if (!pmap_allow_2m_x_page(pmap, pmap_pde_ept_executable(pmap, newpde)))
6918 		return (false);
6919 	if ((newpde & ((PG_FRAME & PDRMASK) | PG_V)) != PG_V) {
6920 		counter_u64_add(pmap_pde_p_failures, 1);
6921 		CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx"
6922 		    " in pmap %p", va, pmap);
6923 		return (false);
6924 	}
6925 
6926 	/*
6927 	 * Both here and in the below "for" loop, to allow for repromotion
6928 	 * after MADV_FREE, conditionally write protect a clean PTE before
6929 	 * possibly aborting the promotion due to other PTE attributes.  Why?
6930 	 * Suppose that MADV_FREE is applied to a part of a superpage, the
6931 	 * address range [S, E).  pmap_advise() will demote the superpage
6932 	 * mapping, destroy the 4KB page mapping at the end of [S, E), and
6933 	 * clear PG_M and PG_A in the PTEs for the rest of [S, E).  Later,
6934 	 * imagine that the memory in [S, E) is recycled, but the last 4KB
6935 	 * page in [S, E) is not the last to be rewritten, or simply accessed.
6936 	 * In other words, there is still a 4KB page in [S, E), call it P,
6937 	 * that is writeable but PG_M and PG_A are clear in P's PTE.  Unless
6938 	 * we write protect P before aborting the promotion, if and when P is
6939 	 * finally rewritten, there won't be a page fault to trigger
6940 	 * repromotion.
6941 	 */
6942 setpde:
6943 	if ((newpde & (PG_M | PG_RW)) == PG_RW) {
6944 		/*
6945 		 * When PG_M is already clear, PG_RW can be cleared without
6946 		 * a TLB invalidation.
6947 		 */
6948 		if (!atomic_fcmpset_long(firstpte, &newpde, newpde & ~PG_RW))
6949 			goto setpde;
6950 		newpde &= ~PG_RW;
6951 		CTR2(KTR_PMAP, "pmap_promote_pde: protect for va %#lx"
6952 		    " in pmap %p", va & ~PDRMASK, pmap);
6953 	}
6954 
6955 	/*
6956 	 * Examine each of the other PTEs in the specified PTP.  Abort if this
6957 	 * PTE maps an unexpected 4KB physical page or does not have identical
6958 	 * characteristics to the first PTE.
6959 	 */
6960 	allpte_PG_A = newpde & PG_A;
6961 	pa = (newpde & (PG_PS_FRAME | PG_V)) + NBPDR - PAGE_SIZE;
6962 	for (pte = firstpte + NPTEPG - 1; pte > firstpte; pte--) {
6963 		oldpte = *pte;
6964 		if ((oldpte & (PG_FRAME | PG_V)) != pa) {
6965 			counter_u64_add(pmap_pde_p_failures, 1);
6966 			CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx"
6967 			    " in pmap %p", va, pmap);
6968 			return (false);
6969 		}
6970 setpte:
6971 		if ((oldpte & (PG_M | PG_RW)) == PG_RW) {
6972 			/*
6973 			 * When PG_M is already clear, PG_RW can be cleared
6974 			 * without a TLB invalidation.
6975 			 */
6976 			if (!atomic_fcmpset_long(pte, &oldpte, oldpte & ~PG_RW))
6977 				goto setpte;
6978 			oldpte &= ~PG_RW;
6979 			CTR2(KTR_PMAP, "pmap_promote_pde: protect for va %#lx"
6980 			    " in pmap %p", (oldpte & PG_FRAME & PDRMASK) |
6981 			    (va & ~PDRMASK), pmap);
6982 		}
6983 		if ((oldpte & PG_PTE_PROMOTE) != (newpde & PG_PTE_PROMOTE)) {
6984 			counter_u64_add(pmap_pde_p_failures, 1);
6985 			CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx"
6986 			    " in pmap %p", va, pmap);
6987 			return (false);
6988 		}
6989 		allpte_PG_A &= oldpte;
6990 		pa -= PAGE_SIZE;
6991 	}
6992 
6993 	/*
6994 	 * Unless all PTEs have PG_A set, clear it from the superpage mapping,
6995 	 * so that promotions triggered by speculative mappings, such as
6996 	 * pmap_enter_quick(), don't automatically mark the underlying pages
6997 	 * as referenced.
6998 	 */
6999 	newpde &= ~PG_A | allpte_PG_A;
7000 
7001 	/*
7002 	 * EPT PTEs with PG_M set and PG_A clear are not supported by early
7003 	 * MMUs supporting EPT.
7004 	 */
7005 	KASSERT((newpde & PG_A) != 0 || safe_to_clear_referenced(pmap, newpde),
7006 	    ("unsupported EPT PTE"));
7007 
7008 	/*
7009 	 * Save the PTP in its current state until the PDE mapping the
7010 	 * superpage is demoted by pmap_demote_pde() or destroyed by
7011 	 * pmap_remove_pde().  If PG_A is not set in every PTE, then request
7012 	 * that the PTP be refilled on demotion.
7013 	 */
7014 	if (mpte == NULL)
7015 		mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
7016 	KASSERT(mpte >= vm_page_array &&
7017 	    mpte < &vm_page_array[vm_page_array_size],
7018 	    ("pmap_promote_pde: page table page is out of range"));
7019 	KASSERT(mpte->pindex == pmap_pde_pindex(va),
7020 	    ("pmap_promote_pde: page table page's pindex is wrong "
7021 	    "mpte %p pidx %#lx va %#lx va pde pidx %#lx",
7022 	    mpte, mpte->pindex, va, pmap_pde_pindex(va)));
7023 	if (pmap_insert_pt_page(pmap, mpte, true, allpte_PG_A != 0)) {
7024 		counter_u64_add(pmap_pde_p_failures, 1);
7025 		CTR2(KTR_PMAP,
7026 		    "pmap_promote_pde: failure for va %#lx in pmap %p", va,
7027 		    pmap);
7028 		return (false);
7029 	}
7030 
7031 	/*
7032 	 * Promote the pv entries.
7033 	 */
7034 	if ((newpde & PG_MANAGED) != 0)
7035 		pmap_pv_promote_pde(pmap, va, newpde & PG_PS_FRAME, lockp);
7036 
7037 	/*
7038 	 * Propagate the PAT index to its proper position.
7039 	 */
7040 	newpde = pmap_swap_pat(pmap, newpde);
7041 
7042 	/*
7043 	 * Map the superpage.
7044 	 */
7045 	if (workaround_erratum383)
7046 		pmap_update_pde(pmap, va, pde, PG_PS | newpde);
7047 	else
7048 		pde_store(pde, PG_PROMOTED | PG_PS | newpde);
7049 
7050 	counter_u64_add(pmap_pde_promotions, 1);
7051 	CTR2(KTR_PMAP, "pmap_promote_pde: success for va %#lx"
7052 	    " in pmap %p", va, pmap);
7053 	return (true);
7054 }
7055 #endif /* VM_NRESERVLEVEL > 0 */
7056 
7057 static int
pmap_enter_largepage(pmap_t pmap,vm_offset_t va,pt_entry_t newpte,int flags,int psind)7058 pmap_enter_largepage(pmap_t pmap, vm_offset_t va, pt_entry_t newpte, int flags,
7059     int psind)
7060 {
7061 	vm_page_t mp;
7062 	pt_entry_t origpte, *pml4e, *pdpe, *pde, pten, PG_V;
7063 
7064 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
7065 	KASSERT(psind > 0 && psind < MAXPAGESIZES && pagesizes[psind] != 0,
7066 	    ("psind %d unexpected", psind));
7067 	KASSERT(((newpte & PG_FRAME) & (pagesizes[psind] - 1)) == 0,
7068 	    ("unaligned phys address %#lx newpte %#lx psind %d",
7069 	    newpte & PG_FRAME, newpte, psind));
7070 	KASSERT((va & (pagesizes[psind] - 1)) == 0,
7071 	    ("unaligned va %#lx psind %d", va, psind));
7072 	KASSERT(va < VM_MAXUSER_ADDRESS,
7073 	    ("kernel mode non-transparent superpage")); /* XXXKIB */
7074 	KASSERT(va + pagesizes[psind] < VM_MAXUSER_ADDRESS,
7075 	    ("overflowing user map va %#lx psind %d", va, psind)); /* XXXKIB */
7076 
7077 	PG_V = pmap_valid_bit(pmap);
7078 
7079 restart:
7080 	pten = newpte;
7081 	if (!pmap_pkru_same(pmap, va, va + pagesizes[psind], &pten))
7082 		return (KERN_PROTECTION_FAILURE);
7083 
7084 	if (psind == 2) {	/* 1G */
7085 		pml4e = pmap_pml4e(pmap, va);
7086 		if (pml4e == NULL || (*pml4e & PG_V) == 0) {
7087 			mp = pmap_allocpte_alloc(pmap, pmap_pml4e_pindex(va),
7088 			    NULL, va);
7089 			if (mp == NULL)
7090 				goto allocf;
7091 			pdpe = (pdp_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mp));
7092 			pdpe = &pdpe[pmap_pdpe_index(va)];
7093 			origpte = *pdpe;
7094 			MPASS(origpte == 0);
7095 		} else {
7096 			pdpe = pmap_pml4e_to_pdpe(pml4e, va);
7097 			KASSERT(pdpe != NULL, ("va %#lx lost pdpe", va));
7098 			origpte = *pdpe;
7099 			if ((origpte & PG_V) == 0) {
7100 				mp = PHYS_TO_VM_PAGE(*pml4e & PG_FRAME);
7101 				mp->ref_count++;
7102 			}
7103 		}
7104 		*pdpe = pten;
7105 	} else /* (psind == 1) */ {	/* 2M */
7106 		pde = pmap_pde(pmap, va);
7107 		if (pde == NULL) {
7108 			mp = pmap_allocpte_alloc(pmap, pmap_pdpe_pindex(va),
7109 			    NULL, va);
7110 			if (mp == NULL)
7111 				goto allocf;
7112 			pde = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mp));
7113 			pde = &pde[pmap_pde_index(va)];
7114 			origpte = *pde;
7115 			MPASS(origpte == 0);
7116 		} else {
7117 			origpte = *pde;
7118 			if ((origpte & PG_V) == 0) {
7119 				pdpe = pmap_pdpe(pmap, va);
7120 				MPASS(pdpe != NULL && (*pdpe & PG_V) != 0);
7121 				mp = PHYS_TO_VM_PAGE(*pdpe & PG_FRAME);
7122 				mp->ref_count++;
7123 			}
7124 		}
7125 		*pde = pten;
7126 	}
7127 	KASSERT((origpte & PG_V) == 0 || ((origpte & PG_PS) != 0 &&
7128 	    (origpte & PG_PS_FRAME) == (pten & PG_PS_FRAME)),
7129 	    ("va %#lx changing %s phys page origpte %#lx pten %#lx",
7130 	    va, psind == 2 ? "1G" : "2M", origpte, pten));
7131 	if ((pten & PG_W) != 0 && (origpte & PG_W) == 0)
7132 		pmap->pm_stats.wired_count += pagesizes[psind] / PAGE_SIZE;
7133 	else if ((pten & PG_W) == 0 && (origpte & PG_W) != 0)
7134 		pmap->pm_stats.wired_count -= pagesizes[psind] / PAGE_SIZE;
7135 	if ((origpte & PG_V) == 0)
7136 		pmap_resident_count_adj(pmap, pagesizes[psind] / PAGE_SIZE);
7137 
7138 	return (KERN_SUCCESS);
7139 
7140 allocf:
7141 	if ((flags & PMAP_ENTER_NOSLEEP) != 0)
7142 		return (KERN_RESOURCE_SHORTAGE);
7143 	PMAP_UNLOCK(pmap);
7144 	vm_wait(NULL);
7145 	PMAP_LOCK(pmap);
7146 	goto restart;
7147 }
7148 
7149 /*
7150  *	Insert the given physical page (p) at
7151  *	the specified virtual address (v) in the
7152  *	target physical map with the protection requested.
7153  *
7154  *	If specified, the page will be wired down, meaning
7155  *	that the related pte can not be reclaimed.
7156  *
7157  *	NB:  This is the only routine which MAY NOT lazy-evaluate
7158  *	or lose information.  That is, this routine must actually
7159  *	insert this page into the given map NOW.
7160  *
7161  *	When destroying both a page table and PV entry, this function
7162  *	performs the TLB invalidation before releasing the PV list
7163  *	lock, so we do not need pmap_delayed_invl_page() calls here.
7164  */
7165 int
pmap_enter(pmap_t pmap,vm_offset_t va,vm_page_t m,vm_prot_t prot,u_int flags,int8_t psind)7166 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
7167     u_int flags, int8_t psind)
7168 {
7169 	struct rwlock *lock;
7170 	pd_entry_t *pde;
7171 	pt_entry_t *pte, PG_G, PG_A, PG_M, PG_RW, PG_V;
7172 	pt_entry_t newpte, origpte;
7173 	pv_entry_t pv;
7174 	vm_paddr_t opa, pa;
7175 	vm_page_t mpte, om;
7176 	int rv;
7177 	bool nosleep;
7178 
7179 	PG_A = pmap_accessed_bit(pmap);
7180 	PG_G = pmap_global_bit(pmap);
7181 	PG_M = pmap_modified_bit(pmap);
7182 	PG_V = pmap_valid_bit(pmap);
7183 	PG_RW = pmap_rw_bit(pmap);
7184 
7185 	va = trunc_page(va);
7186 	KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
7187 	KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
7188 	    ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%lx)",
7189 	    va));
7190 	KASSERT((m->oflags & VPO_UNMANAGED) != 0 || !VA_IS_CLEANMAP(va),
7191 	    ("pmap_enter: managed mapping within the clean submap"));
7192 	if ((m->oflags & VPO_UNMANAGED) == 0)
7193 		VM_PAGE_OBJECT_BUSY_ASSERT(m);
7194 	KASSERT((flags & PMAP_ENTER_RESERVED) == 0,
7195 	    ("pmap_enter: flags %u has reserved bits set", flags));
7196 	pa = VM_PAGE_TO_PHYS(m);
7197 	newpte = (pt_entry_t)(pa | PG_A | PG_V);
7198 	if ((flags & VM_PROT_WRITE) != 0)
7199 		newpte |= PG_M;
7200 	if ((prot & VM_PROT_WRITE) != 0)
7201 		newpte |= PG_RW;
7202 	KASSERT((newpte & (PG_M | PG_RW)) != PG_M,
7203 	    ("pmap_enter: flags includes VM_PROT_WRITE but prot doesn't"));
7204 	if ((prot & VM_PROT_EXECUTE) == 0)
7205 		newpte |= pg_nx;
7206 	if ((flags & PMAP_ENTER_WIRED) != 0)
7207 		newpte |= PG_W;
7208 	if (va < VM_MAXUSER_ADDRESS)
7209 		newpte |= PG_U;
7210 	if (pmap == kernel_pmap)
7211 		newpte |= PG_G;
7212 	newpte |= pmap_cache_bits(pmap, m->md.pat_mode, psind > 0);
7213 
7214 	/*
7215 	 * Set modified bit gratuitously for writeable mappings if
7216 	 * the page is unmanaged. We do not want to take a fault
7217 	 * to do the dirty bit accounting for these mappings.
7218 	 */
7219 	if ((m->oflags & VPO_UNMANAGED) != 0) {
7220 		if ((newpte & PG_RW) != 0)
7221 			newpte |= PG_M;
7222 	} else
7223 		newpte |= PG_MANAGED;
7224 
7225 	lock = NULL;
7226 	PMAP_LOCK(pmap);
7227 	if ((flags & PMAP_ENTER_LARGEPAGE) != 0) {
7228 		KASSERT((m->oflags & VPO_UNMANAGED) != 0,
7229 		    ("managed largepage va %#lx flags %#x", va, flags));
7230 		rv = pmap_enter_largepage(pmap, va, newpte | PG_PS, flags,
7231 		    psind);
7232 		goto out;
7233 	}
7234 	if (psind == 1) {
7235 		/* Assert the required virtual and physical alignment. */
7236 		KASSERT((va & PDRMASK) == 0, ("pmap_enter: va unaligned"));
7237 		KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind"));
7238 		rv = pmap_enter_pde(pmap, va, newpte | PG_PS, flags, m, &lock);
7239 		goto out;
7240 	}
7241 	mpte = NULL;
7242 
7243 	/*
7244 	 * In the case that a page table page is not
7245 	 * resident, we are creating it here.
7246 	 */
7247 retry:
7248 	pde = pmap_pde(pmap, va);
7249 	if (pde != NULL && (*pde & PG_V) != 0 && ((*pde & PG_PS) == 0 ||
7250 	    pmap_demote_pde_locked(pmap, pde, va, &lock))) {
7251 		pte = pmap_pde_to_pte(pde, va);
7252 		if (va < VM_MAXUSER_ADDRESS && mpte == NULL) {
7253 			mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
7254 			mpte->ref_count++;
7255 		}
7256 	} else if (va < VM_MAXUSER_ADDRESS) {
7257 		/*
7258 		 * Here if the pte page isn't mapped, or if it has been
7259 		 * deallocated.
7260 		 */
7261 		nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
7262 		mpte = pmap_allocpte_alloc(pmap, pmap_pde_pindex(va),
7263 		    nosleep ? NULL : &lock, va);
7264 		if (mpte == NULL && nosleep) {
7265 			rv = KERN_RESOURCE_SHORTAGE;
7266 			goto out;
7267 		}
7268 		goto retry;
7269 	} else
7270 		panic("pmap_enter: invalid page directory va=%#lx", va);
7271 
7272 	origpte = *pte;
7273 	pv = NULL;
7274 	if (va < VM_MAXUSER_ADDRESS && pmap->pm_type == PT_X86)
7275 		newpte |= pmap_pkru_get(pmap, va);
7276 
7277 	/*
7278 	 * Is the specified virtual address already mapped?
7279 	 */
7280 	if ((origpte & PG_V) != 0) {
7281 		/*
7282 		 * Wiring change, just update stats. We don't worry about
7283 		 * wiring PT pages as they remain resident as long as there
7284 		 * are valid mappings in them. Hence, if a user page is wired,
7285 		 * the PT page will be also.
7286 		 */
7287 		if ((newpte & PG_W) != 0 && (origpte & PG_W) == 0)
7288 			pmap->pm_stats.wired_count++;
7289 		else if ((newpte & PG_W) == 0 && (origpte & PG_W) != 0)
7290 			pmap->pm_stats.wired_count--;
7291 
7292 		/*
7293 		 * Remove the extra PT page reference.
7294 		 */
7295 		if (mpte != NULL) {
7296 			mpte->ref_count--;
7297 			KASSERT(mpte->ref_count > 0,
7298 			    ("pmap_enter: missing reference to page table page,"
7299 			     " va: 0x%lx", va));
7300 		}
7301 
7302 		/*
7303 		 * Has the physical page changed?
7304 		 */
7305 		opa = origpte & PG_FRAME;
7306 		if (opa == pa) {
7307 			/*
7308 			 * No, might be a protection or wiring change.
7309 			 */
7310 			if ((origpte & PG_MANAGED) != 0 &&
7311 			    (newpte & PG_RW) != 0)
7312 				vm_page_aflag_set(m, PGA_WRITEABLE);
7313 			if (((origpte ^ newpte) & ~(PG_M | PG_A)) == 0)
7314 				goto unchanged;
7315 			goto validate;
7316 		}
7317 
7318 		/*
7319 		 * The physical page has changed.  Temporarily invalidate
7320 		 * the mapping.  This ensures that all threads sharing the
7321 		 * pmap keep a consistent view of the mapping, which is
7322 		 * necessary for the correct handling of COW faults.  It
7323 		 * also permits reuse of the old mapping's PV entry,
7324 		 * avoiding an allocation.
7325 		 *
7326 		 * For consistency, handle unmanaged mappings the same way.
7327 		 */
7328 		origpte = pte_load_clear(pte);
7329 		KASSERT((origpte & PG_FRAME) == opa,
7330 		    ("pmap_enter: unexpected pa update for %#lx", va));
7331 		if ((origpte & PG_MANAGED) != 0) {
7332 			om = PHYS_TO_VM_PAGE(opa);
7333 
7334 			/*
7335 			 * The pmap lock is sufficient to synchronize with
7336 			 * concurrent calls to pmap_page_test_mappings() and
7337 			 * pmap_ts_referenced().
7338 			 */
7339 			if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
7340 				vm_page_dirty(om);
7341 			if ((origpte & PG_A) != 0) {
7342 				pmap_invalidate_page(pmap, va);
7343 				vm_page_aflag_set(om, PGA_REFERENCED);
7344 			}
7345 			CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
7346 			pv = pmap_pvh_remove(&om->md, pmap, va);
7347 			KASSERT(pv != NULL,
7348 			    ("pmap_enter: no PV entry for %#lx", va));
7349 			if ((newpte & PG_MANAGED) == 0)
7350 				free_pv_entry(pmap, pv);
7351 			if ((om->a.flags & PGA_WRITEABLE) != 0 &&
7352 			    TAILQ_EMPTY(&om->md.pv_list) &&
7353 			    ((om->flags & PG_FICTITIOUS) != 0 ||
7354 			    TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
7355 				vm_page_aflag_clear(om, PGA_WRITEABLE);
7356 		} else {
7357 			/*
7358 			 * Since this mapping is unmanaged, assume that PG_A
7359 			 * is set.
7360 			 */
7361 			pmap_invalidate_page(pmap, va);
7362 		}
7363 		origpte = 0;
7364 	} else {
7365 		/*
7366 		 * Increment the counters.
7367 		 */
7368 		if ((newpte & PG_W) != 0)
7369 			pmap->pm_stats.wired_count++;
7370 		pmap_resident_count_adj(pmap, 1);
7371 	}
7372 
7373 	/*
7374 	 * Enter on the PV list if part of our managed memory.
7375 	 */
7376 	if ((newpte & PG_MANAGED) != 0) {
7377 		if (pv == NULL) {
7378 			pv = get_pv_entry(pmap, &lock);
7379 			pv->pv_va = va;
7380 		}
7381 		CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa);
7382 		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
7383 		m->md.pv_gen++;
7384 		if ((newpte & PG_RW) != 0)
7385 			vm_page_aflag_set(m, PGA_WRITEABLE);
7386 	}
7387 
7388 	/*
7389 	 * Update the PTE.
7390 	 */
7391 	if ((origpte & PG_V) != 0) {
7392 validate:
7393 		origpte = pte_load_store(pte, newpte);
7394 		KASSERT((origpte & PG_FRAME) == pa,
7395 		    ("pmap_enter: unexpected pa update for %#lx", va));
7396 		if ((newpte & PG_M) == 0 && (origpte & (PG_M | PG_RW)) ==
7397 		    (PG_M | PG_RW)) {
7398 			if ((origpte & PG_MANAGED) != 0)
7399 				vm_page_dirty(m);
7400 
7401 			/*
7402 			 * Although the PTE may still have PG_RW set, TLB
7403 			 * invalidation may nonetheless be required because
7404 			 * the PTE no longer has PG_M set.
7405 			 */
7406 		} else if ((origpte & PG_NX) != 0 || (newpte & PG_NX) == 0) {
7407 			/*
7408 			 * This PTE change does not require TLB invalidation.
7409 			 */
7410 			goto unchanged;
7411 		}
7412 		if ((origpte & PG_A) != 0)
7413 			pmap_invalidate_page(pmap, va);
7414 	} else
7415 		pte_store(pte, newpte);
7416 
7417 unchanged:
7418 
7419 #if VM_NRESERVLEVEL > 0
7420 	/*
7421 	 * If both the page table page and the reservation are fully
7422 	 * populated, then attempt promotion.
7423 	 */
7424 	if ((mpte == NULL || mpte->ref_count == NPTEPG) &&
7425 	    (m->flags & PG_FICTITIOUS) == 0 &&
7426 	    vm_reserv_level_iffullpop(m) == 0)
7427 		(void)pmap_promote_pde(pmap, pde, va, mpte, &lock);
7428 #endif
7429 
7430 	rv = KERN_SUCCESS;
7431 out:
7432 	if (lock != NULL)
7433 		rw_wunlock(lock);
7434 	PMAP_UNLOCK(pmap);
7435 	return (rv);
7436 }
7437 
7438 /*
7439  * Tries to create a read- and/or execute-only 2MB page mapping.  Returns
7440  * KERN_SUCCESS if the mapping was created.  Otherwise, returns an error
7441  * value.  See pmap_enter_pde() for the possible error values when "no sleep",
7442  * "no replace", and "no reclaim" are specified.
7443  */
7444 static int
pmap_enter_2mpage(pmap_t pmap,vm_offset_t va,vm_page_t m,vm_prot_t prot,struct rwlock ** lockp)7445 pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
7446     struct rwlock **lockp)
7447 {
7448 	pd_entry_t newpde;
7449 	pt_entry_t PG_V;
7450 
7451 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
7452 	PG_V = pmap_valid_bit(pmap);
7453 	newpde = VM_PAGE_TO_PHYS(m) |
7454 	    pmap_cache_bits(pmap, m->md.pat_mode, true) | PG_PS | PG_V;
7455 	if ((m->oflags & VPO_UNMANAGED) == 0)
7456 		newpde |= PG_MANAGED;
7457 	if ((prot & VM_PROT_EXECUTE) == 0)
7458 		newpde |= pg_nx;
7459 	if (va < VM_MAXUSER_ADDRESS)
7460 		newpde |= PG_U;
7461 	return (pmap_enter_pde(pmap, va, newpde, PMAP_ENTER_NOSLEEP |
7462 	    PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL, lockp));
7463 }
7464 
7465 /*
7466  * Returns true if every page table entry in the specified page table page is
7467  * zero.
7468  */
7469 static bool
pmap_every_pte_zero(vm_paddr_t pa)7470 pmap_every_pte_zero(vm_paddr_t pa)
7471 {
7472 	pt_entry_t *pt_end, *pte;
7473 
7474 	KASSERT((pa & PAGE_MASK) == 0, ("pa is misaligned"));
7475 	pte = (pt_entry_t *)PHYS_TO_DMAP(pa);
7476 	for (pt_end = pte + NPTEPG; pte < pt_end; pte++) {
7477 		if (*pte != 0)
7478 			return (false);
7479 	}
7480 	return (true);
7481 }
7482 
7483 /*
7484  * Tries to create the specified 2MB page mapping.  Returns KERN_SUCCESS if
7485  * the mapping was created, and one of KERN_FAILURE, KERN_NO_SPACE,
7486  * KERN_PROTECTION_FAILURE, or KERN_RESOURCE_SHORTAGE otherwise.  Returns
7487  * KERN_FAILURE if either (1) PMAP_ENTER_NOREPLACE was specified and a 4KB
7488  * page mapping already exists within the 2MB virtual address range starting
7489  * at the specified virtual address or (2) the requested 2MB page mapping is
7490  * not supported due to hardware errata.  Returns KERN_NO_SPACE if
7491  * PMAP_ENTER_NOREPLACE was specified and a 2MB page mapping already exists at
7492  * the specified virtual address.  Returns KERN_PROTECTION_FAILURE if the PKRU
7493  * settings are not the same across the 2MB virtual address range starting at
7494  * the specified virtual address.  Returns KERN_RESOURCE_SHORTAGE if either
7495  * (1) PMAP_ENTER_NOSLEEP was specified and a page table page allocation
7496  * failed or (2) PMAP_ENTER_NORECLAIM was specified and a PV entry allocation
7497  * failed.
7498  *
7499  * The parameter "m" is only used when creating a managed, writeable mapping.
7500  */
7501 static int
pmap_enter_pde(pmap_t pmap,vm_offset_t va,pd_entry_t newpde,u_int flags,vm_page_t m,struct rwlock ** lockp)7502 pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, u_int flags,
7503     vm_page_t m, struct rwlock **lockp)
7504 {
7505 	struct spglist free;
7506 	pd_entry_t oldpde, *pde;
7507 	pt_entry_t PG_G, PG_RW, PG_V;
7508 	vm_page_t mt, pdpg;
7509 	vm_page_t uwptpg;
7510 
7511 	PG_G = pmap_global_bit(pmap);
7512 	PG_RW = pmap_rw_bit(pmap);
7513 	KASSERT((newpde & (pmap_modified_bit(pmap) | PG_RW)) != PG_RW,
7514 	    ("pmap_enter_pde: newpde is missing PG_M"));
7515 	PG_V = pmap_valid_bit(pmap);
7516 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
7517 
7518 	if (!pmap_allow_2m_x_page(pmap, pmap_pde_ept_executable(pmap,
7519 	    newpde))) {
7520 		CTR2(KTR_PMAP, "pmap_enter_pde: 2m x blocked for va %#lx"
7521 		    " in pmap %p", va, pmap);
7522 		return (KERN_FAILURE);
7523 	}
7524 	if ((pde = pmap_alloc_pde(pmap, va, &pdpg, (flags &
7525 	    PMAP_ENTER_NOSLEEP) != 0 ? NULL : lockp)) == NULL) {
7526 		CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
7527 		    " in pmap %p", va, pmap);
7528 		return (KERN_RESOURCE_SHORTAGE);
7529 	}
7530 
7531 	/*
7532 	 * If pkru is not same for the whole pde range, return failure
7533 	 * and let vm_fault() cope.  Check after pde allocation, since
7534 	 * it could sleep.
7535 	 */
7536 	if (!pmap_pkru_same(pmap, va, va + NBPDR, &newpde)) {
7537 		pmap_abort_ptp(pmap, va, pdpg);
7538 		return (KERN_PROTECTION_FAILURE);
7539 	}
7540 
7541 	/*
7542 	 * If there are existing mappings, either abort or remove them.
7543 	 */
7544 	oldpde = *pde;
7545 	if ((oldpde & PG_V) != 0) {
7546 		KASSERT(pdpg == NULL || pdpg->ref_count > 1,
7547 		    ("pmap_enter_pde: pdpg's reference count is too low"));
7548 		if ((flags & PMAP_ENTER_NOREPLACE) != 0) {
7549 			if ((oldpde & PG_PS) != 0) {
7550 				if (pdpg != NULL)
7551 					pdpg->ref_count--;
7552 				CTR2(KTR_PMAP,
7553 				    "pmap_enter_pde: no space for va %#lx"
7554 				    " in pmap %p", va, pmap);
7555 				return (KERN_NO_SPACE);
7556 			} else if (va < VM_MAXUSER_ADDRESS ||
7557 			    !pmap_every_pte_zero(oldpde & PG_FRAME)) {
7558 				if (pdpg != NULL)
7559 					pdpg->ref_count--;
7560 				CTR2(KTR_PMAP,
7561 				    "pmap_enter_pde: failure for va %#lx"
7562 				    " in pmap %p", va, pmap);
7563 				return (KERN_FAILURE);
7564 			}
7565 		}
7566 		/* Break the existing mapping(s). */
7567 		SLIST_INIT(&free);
7568 		if ((oldpde & PG_PS) != 0) {
7569 			/*
7570 			 * The reference to the PD page that was acquired by
7571 			 * pmap_alloc_pde() ensures that it won't be freed.
7572 			 * However, if the PDE resulted from a promotion, and
7573 			 * the mapping is not from kernel_pmap, then
7574 			 * a reserved PT page could be freed.
7575 			 */
7576 			(void)pmap_remove_pde(pmap, pde, va,
7577 			    pmap != kernel_pmap, &free, lockp);
7578 			if ((oldpde & PG_G) == 0)
7579 				pmap_invalidate_pde_page(pmap, va, oldpde);
7580 		} else {
7581 			if (va >= VM_MAXUSER_ADDRESS) {
7582 				/*
7583 				 * Try to save the ptp in the trie
7584 				 * before any changes to mappings are
7585 				 * made.  Abort on failure.
7586 				 */
7587 				mt = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
7588 				if (pmap_insert_pt_page(pmap, mt, false, false)) {
7589 					if (pdpg != NULL)
7590 						pdpg->ref_count--;
7591 					CTR1(KTR_PMAP,
7592 			    "pmap_enter_pde: cannot ins kern ptp va %#lx",
7593 					    va);
7594 					return (KERN_RESOURCE_SHORTAGE);
7595 				}
7596 				/*
7597 				 * Both pmap_remove_pde() and
7598 				 * pmap_remove_ptes() will zero-fill
7599 				 * the kernel page table page.
7600 				 */
7601 			}
7602 			pmap_delayed_invl_start();
7603 			if (pmap_remove_ptes(pmap, va, va + NBPDR, pde, &free,
7604 			    lockp))
7605 		               pmap_invalidate_all(pmap);
7606 			pmap_delayed_invl_finish();
7607 		}
7608 		if (va < VM_MAXUSER_ADDRESS) {
7609 			vm_page_free_pages_toq(&free, true);
7610 			KASSERT(*pde == 0, ("pmap_enter_pde: non-zero pde %p",
7611 			    pde));
7612 		} else {
7613 			KASSERT(SLIST_EMPTY(&free),
7614 			    ("pmap_enter_pde: freed kernel page table page"));
7615 		}
7616 	}
7617 
7618 	/*
7619 	 * Allocate leaf ptpage for wired userspace pages.
7620 	 */
7621 	uwptpg = NULL;
7622 	if ((newpde & PG_W) != 0 && pmap != kernel_pmap) {
7623 		uwptpg = pmap_alloc_pt_page(pmap, pmap_pde_pindex(va),
7624 		    VM_ALLOC_WIRED);
7625 		if (uwptpg == NULL) {
7626 			pmap_abort_ptp(pmap, va, pdpg);
7627 			return (KERN_RESOURCE_SHORTAGE);
7628 		}
7629 		if (pmap_insert_pt_page(pmap, uwptpg, true, false)) {
7630 			pmap_free_pt_page(pmap, uwptpg, false);
7631 			pmap_abort_ptp(pmap, va, pdpg);
7632 			return (KERN_RESOURCE_SHORTAGE);
7633 		}
7634 
7635 		uwptpg->ref_count = NPTEPG;
7636 	}
7637 	if ((newpde & PG_MANAGED) != 0) {
7638 		/*
7639 		 * Abort this mapping if its PV entry could not be created.
7640 		 */
7641 		if (!pmap_pv_insert_pde(pmap, va, newpde, flags, lockp)) {
7642 			if (pdpg != NULL)
7643 				pmap_abort_ptp(pmap, va, pdpg);
7644 			if (uwptpg != NULL) {
7645 				mt = pmap_remove_pt_page(pmap, va);
7646 				KASSERT(mt == uwptpg,
7647 				    ("removed pt page %p, expected %p", mt,
7648 				    uwptpg));
7649 				uwptpg->ref_count = 1;
7650 				pmap_free_pt_page(pmap, uwptpg, false);
7651 			}
7652 			CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
7653 			    " in pmap %p", va, pmap);
7654 			return (KERN_RESOURCE_SHORTAGE);
7655 		}
7656 		if ((newpde & PG_RW) != 0) {
7657 			for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
7658 				vm_page_aflag_set(mt, PGA_WRITEABLE);
7659 		}
7660 	}
7661 
7662 	/*
7663 	 * Increment counters.
7664 	 */
7665 	if ((newpde & PG_W) != 0)
7666 		pmap->pm_stats.wired_count += NBPDR / PAGE_SIZE;
7667 	pmap_resident_count_adj(pmap, NBPDR / PAGE_SIZE);
7668 
7669 	/*
7670 	 * Map the superpage.  (This is not a promoted mapping; there will not
7671 	 * be any lingering 4KB page mappings in the TLB.)
7672 	 */
7673 	pde_store(pde, newpde);
7674 
7675 	counter_u64_add(pmap_pde_mappings, 1);
7676 	CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx in pmap %p",
7677 	    va, pmap);
7678 	return (KERN_SUCCESS);
7679 }
7680 
7681 /*
7682  * Maps a sequence of resident pages belonging to the same object.
7683  * The sequence begins with the given page m_start.  This page is
7684  * mapped at the given virtual address start.  Each subsequent page is
7685  * mapped at a virtual address that is offset from start by the same
7686  * amount as the page is offset from m_start within the object.  The
7687  * last page in the sequence is the page with the largest offset from
7688  * m_start that can be mapped at a virtual address less than the given
7689  * virtual address end.  Not every virtual page between start and end
7690  * is mapped; only those for which a resident page exists with the
7691  * corresponding offset from m_start are mapped.
7692  */
7693 void
pmap_enter_object(pmap_t pmap,vm_offset_t start,vm_offset_t end,vm_page_t m_start,vm_prot_t prot)7694 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
7695     vm_page_t m_start, vm_prot_t prot)
7696 {
7697 	struct pctrie_iter pages;
7698 	struct rwlock *lock;
7699 	vm_offset_t va;
7700 	vm_page_t m, mpte;
7701 	int rv;
7702 
7703 	VM_OBJECT_ASSERT_LOCKED(m_start->object);
7704 
7705 	mpte = NULL;
7706 	vm_page_iter_limit_init(&pages, m_start->object,
7707 	    m_start->pindex + atop(end - start));
7708 	m = vm_radix_iter_lookup(&pages, m_start->pindex);
7709 	lock = NULL;
7710 	PMAP_LOCK(pmap);
7711 	while (m != NULL) {
7712 		va = start + ptoa(m->pindex - m_start->pindex);
7713 		if ((va & PDRMASK) == 0 && va + NBPDR <= end &&
7714 		    m->psind == 1 && pmap_ps_enabled(pmap) &&
7715 		    ((rv = pmap_enter_2mpage(pmap, va, m, prot, &lock)) ==
7716 		    KERN_SUCCESS || rv == KERN_NO_SPACE))
7717 			m = vm_radix_iter_jump(&pages, NBPDR / PAGE_SIZE);
7718 		else {
7719 			mpte = pmap_enter_quick_locked(pmap, va, m, prot,
7720 			    mpte, &lock);
7721 			m = vm_radix_iter_step(&pages);
7722 		}
7723 	}
7724 	if (lock != NULL)
7725 		rw_wunlock(lock);
7726 	PMAP_UNLOCK(pmap);
7727 }
7728 
7729 /*
7730  * this code makes some *MAJOR* assumptions:
7731  * 1. Current pmap & pmap exists.
7732  * 2. Not wired.
7733  * 3. Read access.
7734  * 4. No page table pages.
7735  * but is *MUCH* faster than pmap_enter...
7736  */
7737 
7738 void
pmap_enter_quick(pmap_t pmap,vm_offset_t va,vm_page_t m,vm_prot_t prot)7739 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
7740 {
7741 	struct rwlock *lock;
7742 
7743 	lock = NULL;
7744 	PMAP_LOCK(pmap);
7745 	(void)pmap_enter_quick_locked(pmap, va, m, prot, NULL, &lock);
7746 	if (lock != NULL)
7747 		rw_wunlock(lock);
7748 	PMAP_UNLOCK(pmap);
7749 }
7750 
7751 static vm_page_t
pmap_enter_quick_locked(pmap_t pmap,vm_offset_t va,vm_page_t m,vm_prot_t prot,vm_page_t mpte,struct rwlock ** lockp)7752 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
7753     vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp)
7754 {
7755 	pd_entry_t *pde;
7756 	pt_entry_t newpte, *pte, PG_V;
7757 
7758 	KASSERT(!VA_IS_CLEANMAP(va) ||
7759 	    (m->oflags & VPO_UNMANAGED) != 0,
7760 	    ("pmap_enter_quick_locked: managed mapping within the clean submap"));
7761 	PG_V = pmap_valid_bit(pmap);
7762 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
7763 	pde = NULL;
7764 
7765 	/*
7766 	 * In the case that a page table page is not
7767 	 * resident, we are creating it here.
7768 	 */
7769 	if (va < VM_MAXUSER_ADDRESS) {
7770 		pdp_entry_t *pdpe;
7771 		vm_pindex_t ptepindex;
7772 
7773 		/*
7774 		 * Calculate pagetable page index
7775 		 */
7776 		ptepindex = pmap_pde_pindex(va);
7777 		if (mpte && (mpte->pindex == ptepindex)) {
7778 			mpte->ref_count++;
7779 		} else {
7780 			/*
7781 			 * If the page table page is mapped, we just increment
7782 			 * the hold count, and activate it.  Otherwise, we
7783 			 * attempt to allocate a page table page, passing NULL
7784 			 * instead of the PV list lock pointer because we don't
7785 			 * intend to sleep.  If this attempt fails, we don't
7786 			 * retry.  Instead, we give up.
7787 			 */
7788 			pdpe = pmap_pdpe(pmap, va);
7789 			if (pdpe != NULL && (*pdpe & PG_V) != 0) {
7790 				if ((*pdpe & PG_PS) != 0)
7791 					return (NULL);
7792 				pde = pmap_pdpe_to_pde(pdpe, va);
7793 				if ((*pde & PG_V) != 0) {
7794 					if ((*pde & PG_PS) != 0)
7795 						return (NULL);
7796 					mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
7797 					mpte->ref_count++;
7798 				} else {
7799 					mpte = pmap_allocpte_alloc(pmap,
7800 					    ptepindex, NULL, va);
7801 					if (mpte == NULL)
7802 						return (NULL);
7803 				}
7804 			} else {
7805 				mpte = pmap_allocpte_alloc(pmap, ptepindex,
7806 				    NULL, va);
7807 				if (mpte == NULL)
7808 					return (NULL);
7809 			}
7810 		}
7811 		pte = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte));
7812 		pte = &pte[pmap_pte_index(va)];
7813 	} else {
7814 		mpte = NULL;
7815 		pte = vtopte(va);
7816 	}
7817 	if (*pte) {
7818 		if (mpte != NULL)
7819 			mpte->ref_count--;
7820 		return (NULL);
7821 	}
7822 
7823 	/*
7824 	 * Enter on the PV list if part of our managed memory.
7825 	 */
7826 	if ((m->oflags & VPO_UNMANAGED) == 0 &&
7827 	    !pmap_try_insert_pv_entry(pmap, va, m, lockp)) {
7828 		if (mpte != NULL)
7829 			pmap_abort_ptp(pmap, va, mpte);
7830 		return (NULL);
7831 	}
7832 
7833 	/*
7834 	 * Increment counters
7835 	 */
7836 	pmap_resident_count_adj(pmap, 1);
7837 
7838 	newpte = VM_PAGE_TO_PHYS(m) | PG_V |
7839 	    pmap_cache_bits(pmap, m->md.pat_mode, false);
7840 	if ((m->oflags & VPO_UNMANAGED) == 0)
7841 		newpte |= PG_MANAGED;
7842 	if ((prot & VM_PROT_EXECUTE) == 0)
7843 		newpte |= pg_nx;
7844 	if (va < VM_MAXUSER_ADDRESS)
7845 		newpte |= PG_U | pmap_pkru_get(pmap, va);
7846 	pte_store(pte, newpte);
7847 
7848 #if VM_NRESERVLEVEL > 0
7849 	/*
7850 	 * If both the PTP and the reservation are fully populated, then
7851 	 * attempt promotion.
7852 	 */
7853 	if ((prot & VM_PROT_NO_PROMOTE) == 0 &&
7854 	    (mpte == NULL || mpte->ref_count == NPTEPG) &&
7855 	    (m->flags & PG_FICTITIOUS) == 0 &&
7856 	    vm_reserv_level_iffullpop(m) == 0) {
7857 		if (pde == NULL)
7858 			pde = pmap_pde(pmap, va);
7859 
7860 		/*
7861 		 * If promotion succeeds, then the next call to this function
7862 		 * should not be given the unmapped PTP as a hint.
7863 		 */
7864 		if (pmap_promote_pde(pmap, pde, va, mpte, lockp))
7865 			mpte = NULL;
7866 	}
7867 #endif
7868 
7869 	return (mpte);
7870 }
7871 
7872 /*
7873  * Make a temporary mapping for a physical address.  This is only intended
7874  * to be used for panic dumps.
7875  */
7876 void *
pmap_kenter_temporary(vm_paddr_t pa,int i)7877 pmap_kenter_temporary(vm_paddr_t pa, int i)
7878 {
7879 	vm_offset_t va;
7880 
7881 	va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE);
7882 	pmap_kenter(va, pa);
7883 	pmap_invlpg(kernel_pmap, va);
7884 	return ((void *)crashdumpmap);
7885 }
7886 
7887 /*
7888  * This code maps large physical mmap regions into the
7889  * processor address space.  Note that some shortcuts
7890  * are taken, but the code works.
7891  */
7892 void
pmap_object_init_pt(pmap_t pmap,vm_offset_t addr,vm_object_t object,vm_pindex_t pindex,vm_size_t size)7893 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
7894     vm_pindex_t pindex, vm_size_t size)
7895 {
7896 	struct pctrie_iter pages;
7897 	pd_entry_t *pde;
7898 	pt_entry_t PG_A, PG_M, PG_RW, PG_V;
7899 	vm_paddr_t pa, ptepa;
7900 	vm_page_t p, pdpg;
7901 	int pat_mode;
7902 
7903 	PG_A = pmap_accessed_bit(pmap);
7904 	PG_M = pmap_modified_bit(pmap);
7905 	PG_V = pmap_valid_bit(pmap);
7906 	PG_RW = pmap_rw_bit(pmap);
7907 
7908 	VM_OBJECT_ASSERT_WLOCKED(object);
7909 	KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
7910 	    ("pmap_object_init_pt: non-device object"));
7911 	if ((addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) {
7912 		if (!pmap_ps_enabled(pmap))
7913 			return;
7914 		if (!vm_object_populate(object, pindex, pindex + atop(size)))
7915 			return;
7916 		vm_page_iter_init(&pages, object);
7917 		p = vm_radix_iter_lookup(&pages, pindex);
7918 		KASSERT(vm_page_all_valid(p),
7919 		    ("pmap_object_init_pt: invalid page %p", p));
7920 		pat_mode = p->md.pat_mode;
7921 
7922 		/*
7923 		 * Abort the mapping if the first page is not physically
7924 		 * aligned to a 2MB page boundary.
7925 		 */
7926 		ptepa = VM_PAGE_TO_PHYS(p);
7927 		if (ptepa & (NBPDR - 1))
7928 			return;
7929 
7930 		/*
7931 		 * Skip the first page.  Abort the mapping if the rest of
7932 		 * the pages are not physically contiguous or have differing
7933 		 * memory attributes.
7934 		 */
7935 		for (pa = ptepa + PAGE_SIZE; pa < ptepa + size;
7936 		    pa += PAGE_SIZE) {
7937 			p = vm_radix_iter_next(&pages);
7938 			KASSERT(vm_page_all_valid(p),
7939 			    ("pmap_object_init_pt: invalid page %p", p));
7940 			if (pa != VM_PAGE_TO_PHYS(p) ||
7941 			    pat_mode != p->md.pat_mode)
7942 				return;
7943 		}
7944 
7945 		/*
7946 		 * Map using 2MB pages.  Since "ptepa" is 2M aligned and
7947 		 * "size" is a multiple of 2M, adding the PAT setting to "pa"
7948 		 * will not affect the termination of this loop.
7949 		 */
7950 		PMAP_LOCK(pmap);
7951 		for (pa = ptepa | pmap_cache_bits(pmap, pat_mode, true);
7952 		    pa < ptepa + size; pa += NBPDR) {
7953 			pde = pmap_alloc_pde(pmap, addr, &pdpg, NULL);
7954 			if (pde == NULL) {
7955 				/*
7956 				 * The creation of mappings below is only an
7957 				 * optimization.  If a page directory page
7958 				 * cannot be allocated without blocking,
7959 				 * continue on to the next mapping rather than
7960 				 * blocking.
7961 				 */
7962 				addr += NBPDR;
7963 				continue;
7964 			}
7965 			if ((*pde & PG_V) == 0) {
7966 				pde_store(pde, pa | PG_PS | PG_M | PG_A |
7967 				    PG_U | PG_RW | PG_V);
7968 				pmap_resident_count_adj(pmap, NBPDR / PAGE_SIZE);
7969 				counter_u64_add(pmap_pde_mappings, 1);
7970 			} else {
7971 				/* Continue on if the PDE is already valid. */
7972 				pdpg->ref_count--;
7973 				KASSERT(pdpg->ref_count > 0,
7974 				    ("pmap_object_init_pt: missing reference "
7975 				    "to page directory page, va: 0x%lx", addr));
7976 			}
7977 			addr += NBPDR;
7978 		}
7979 		PMAP_UNLOCK(pmap);
7980 	}
7981 }
7982 
7983 /*
7984  *	Clear the wired attribute from the mappings for the specified range of
7985  *	addresses in the given pmap.  Every valid mapping within that range
7986  *	must have the wired attribute set.  In contrast, invalid mappings
7987  *	cannot have the wired attribute set, so they are ignored.
7988  *
7989  *	The wired attribute of the page table entry is not a hardware
7990  *	feature, so there is no need to invalidate any TLB entries.
7991  *	Since pmap_demote_pde() for the wired entry must never fail,
7992  *	pmap_delayed_invl_start()/finish() calls around the
7993  *	function are not needed.
7994  */
7995 void
pmap_unwire(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)7996 pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
7997 {
7998 	vm_offset_t va_next;
7999 	pml4_entry_t *pml4e;
8000 	pdp_entry_t *pdpe;
8001 	pd_entry_t *pde;
8002 	pt_entry_t *pte, PG_V, PG_G __diagused;
8003 
8004 	PG_V = pmap_valid_bit(pmap);
8005 	PG_G = pmap_global_bit(pmap);
8006 	PMAP_LOCK(pmap);
8007 	for (; sva < eva; sva = va_next) {
8008 		pml4e = pmap_pml4e(pmap, sva);
8009 		if (pml4e == NULL || (*pml4e & PG_V) == 0) {
8010 			va_next = (sva + NBPML4) & ~PML4MASK;
8011 			if (va_next < sva)
8012 				va_next = eva;
8013 			continue;
8014 		}
8015 
8016 		va_next = (sva + NBPDP) & ~PDPMASK;
8017 		if (va_next < sva)
8018 			va_next = eva;
8019 		pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
8020 		if ((*pdpe & PG_V) == 0)
8021 			continue;
8022 		if ((*pdpe & PG_PS) != 0) {
8023 			KASSERT(va_next <= eva,
8024 			    ("partial update of non-transparent 1G mapping "
8025 			    "pdpe %#lx sva %#lx eva %#lx va_next %#lx",
8026 			    *pdpe, sva, eva, va_next));
8027 			MPASS(pmap != kernel_pmap); /* XXXKIB */
8028 			MPASS((*pdpe & (PG_MANAGED | PG_G)) == 0);
8029 			atomic_clear_long(pdpe, PG_W);
8030 			pmap->pm_stats.wired_count -= NBPDP / PAGE_SIZE;
8031 			continue;
8032 		}
8033 
8034 		va_next = (sva + NBPDR) & ~PDRMASK;
8035 		if (va_next < sva)
8036 			va_next = eva;
8037 		pde = pmap_pdpe_to_pde(pdpe, sva);
8038 		if ((*pde & PG_V) == 0)
8039 			continue;
8040 		if ((*pde & PG_PS) != 0) {
8041 			if ((*pde & PG_W) == 0)
8042 				panic("pmap_unwire: pde %#jx is missing PG_W",
8043 				    (uintmax_t)*pde);
8044 
8045 			/*
8046 			 * Are we unwiring the entire large page?  If not,
8047 			 * demote the mapping and fall through.
8048 			 */
8049 			if (sva + NBPDR == va_next && eva >= va_next) {
8050 				atomic_clear_long(pde, PG_W);
8051 				pmap->pm_stats.wired_count -= NBPDR /
8052 				    PAGE_SIZE;
8053 				continue;
8054 			} else if (!pmap_demote_pde(pmap, pde, sva))
8055 				panic("pmap_unwire: demotion failed");
8056 		}
8057 		if (va_next > eva)
8058 			va_next = eva;
8059 		for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
8060 		    sva += PAGE_SIZE) {
8061 			if ((*pte & PG_V) == 0)
8062 				continue;
8063 			if ((*pte & PG_W) == 0)
8064 				panic("pmap_unwire: pte %#jx is missing PG_W",
8065 				    (uintmax_t)*pte);
8066 
8067 			/*
8068 			 * PG_W must be cleared atomically.  Although the pmap
8069 			 * lock synchronizes access to PG_W, another processor
8070 			 * could be setting PG_M and/or PG_A concurrently.
8071 			 */
8072 			atomic_clear_long(pte, PG_W);
8073 			pmap->pm_stats.wired_count--;
8074 		}
8075 	}
8076 	PMAP_UNLOCK(pmap);
8077 }
8078 
8079 /*
8080  *	Copy the range specified by src_addr/len
8081  *	from the source map to the range dst_addr/len
8082  *	in the destination map.
8083  *
8084  *	This routine is only advisory and need not do anything.
8085  */
8086 void
pmap_copy(pmap_t dst_pmap,pmap_t src_pmap,vm_offset_t dst_addr,vm_size_t len,vm_offset_t src_addr)8087 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
8088     vm_offset_t src_addr)
8089 {
8090 	struct rwlock *lock;
8091 	pml4_entry_t *pml4e;
8092 	pdp_entry_t *pdpe;
8093 	pd_entry_t *pde, srcptepaddr;
8094 	pt_entry_t *dst_pte, PG_A, PG_M, PG_V, ptetemp, *src_pte;
8095 	vm_offset_t addr, end_addr, va_next;
8096 	vm_page_t dst_pdpg, dstmpte, srcmpte;
8097 
8098 	if (dst_addr != src_addr)
8099 		return;
8100 
8101 	if (dst_pmap->pm_type != src_pmap->pm_type)
8102 		return;
8103 
8104 	/*
8105 	 * EPT page table entries that require emulation of A/D bits are
8106 	 * sensitive to clearing the PG_A bit (aka EPT_PG_READ). Although
8107 	 * we clear PG_M (aka EPT_PG_WRITE) concomitantly, the PG_U bit
8108 	 * (aka EPT_PG_EXECUTE) could still be set. Since some EPT
8109 	 * implementations flag an EPT misconfiguration for exec-only
8110 	 * mappings we skip this function entirely for emulated pmaps.
8111 	 */
8112 	if (pmap_emulate_ad_bits(dst_pmap))
8113 		return;
8114 
8115 	end_addr = src_addr + len;
8116 	lock = NULL;
8117 	if (dst_pmap < src_pmap) {
8118 		PMAP_LOCK(dst_pmap);
8119 		PMAP_LOCK(src_pmap);
8120 	} else {
8121 		PMAP_LOCK(src_pmap);
8122 		PMAP_LOCK(dst_pmap);
8123 	}
8124 
8125 	PG_A = pmap_accessed_bit(dst_pmap);
8126 	PG_M = pmap_modified_bit(dst_pmap);
8127 	PG_V = pmap_valid_bit(dst_pmap);
8128 
8129 	for (addr = src_addr; addr < end_addr; addr = va_next) {
8130 		KASSERT(addr < UPT_MIN_ADDRESS,
8131 		    ("pmap_copy: invalid to pmap_copy page tables"));
8132 
8133 		pml4e = pmap_pml4e(src_pmap, addr);
8134 		if (pml4e == NULL || (*pml4e & PG_V) == 0) {
8135 			va_next = (addr + NBPML4) & ~PML4MASK;
8136 			if (va_next < addr)
8137 				va_next = end_addr;
8138 			continue;
8139 		}
8140 
8141 		va_next = (addr + NBPDP) & ~PDPMASK;
8142 		if (va_next < addr)
8143 			va_next = end_addr;
8144 		pdpe = pmap_pml4e_to_pdpe(pml4e, addr);
8145 		if ((*pdpe & PG_V) == 0)
8146 			continue;
8147 		if ((*pdpe & PG_PS) != 0) {
8148 			KASSERT(va_next <= end_addr,
8149 			    ("partial update of non-transparent 1G mapping "
8150 			    "pdpe %#lx sva %#lx eva %#lx va_next %#lx",
8151 			    *pdpe, addr, end_addr, va_next));
8152 			MPASS((addr & PDPMASK) == 0);
8153 			MPASS((*pdpe & PG_MANAGED) == 0);
8154 			srcptepaddr = *pdpe;
8155 			pdpe = pmap_pdpe(dst_pmap, addr);
8156 			if (pdpe == NULL) {
8157 				if (pmap_allocpte_alloc(dst_pmap,
8158 				    pmap_pml4e_pindex(addr), NULL, addr) ==
8159 				    NULL)
8160 					break;
8161 				pdpe = pmap_pdpe(dst_pmap, addr);
8162 			} else {
8163 				pml4e = pmap_pml4e(dst_pmap, addr);
8164 				dst_pdpg = PHYS_TO_VM_PAGE(*pml4e & PG_FRAME);
8165 				dst_pdpg->ref_count++;
8166 			}
8167 			KASSERT(*pdpe == 0,
8168 			    ("1G mapping present in dst pmap "
8169 			    "pdpe %#lx sva %#lx eva %#lx va_next %#lx",
8170 			    *pdpe, addr, end_addr, va_next));
8171 			*pdpe = srcptepaddr & ~PG_W;
8172 			pmap_resident_count_adj(dst_pmap, NBPDP / PAGE_SIZE);
8173 			continue;
8174 		}
8175 
8176 		va_next = (addr + NBPDR) & ~PDRMASK;
8177 		if (va_next < addr)
8178 			va_next = end_addr;
8179 
8180 		pde = pmap_pdpe_to_pde(pdpe, addr);
8181 		srcptepaddr = *pde;
8182 		if (srcptepaddr == 0)
8183 			continue;
8184 
8185 		if (srcptepaddr & PG_PS) {
8186 			/*
8187 			 * We can only virtual copy whole superpages.
8188 			 */
8189 			if ((addr & PDRMASK) != 0 || addr + NBPDR > end_addr)
8190 				continue;
8191 			pde = pmap_alloc_pde(dst_pmap, addr, &dst_pdpg, NULL);
8192 			if (pde == NULL)
8193 				break;
8194 			if (*pde == 0 && ((srcptepaddr & PG_MANAGED) == 0 ||
8195 			    pmap_pv_insert_pde(dst_pmap, addr, srcptepaddr,
8196 			    PMAP_ENTER_NORECLAIM, &lock))) {
8197 				/*
8198 				 * We leave the dirty bit unchanged because
8199 				 * managed read/write superpage mappings are
8200 				 * required to be dirty.  However, managed
8201 				 * superpage mappings are not required to
8202 				 * have their accessed bit set, so we clear
8203 				 * it because we don't know if this mapping
8204 				 * will be used.
8205 				 */
8206 				srcptepaddr &= ~PG_W;
8207 				if ((srcptepaddr & PG_MANAGED) != 0)
8208 					srcptepaddr &= ~PG_A;
8209 				*pde = srcptepaddr;
8210 				pmap_resident_count_adj(dst_pmap, NBPDR /
8211 				    PAGE_SIZE);
8212 				counter_u64_add(pmap_pde_mappings, 1);
8213 			} else
8214 				pmap_abort_ptp(dst_pmap, addr, dst_pdpg);
8215 			continue;
8216 		}
8217 
8218 		srcptepaddr &= PG_FRAME;
8219 		srcmpte = PHYS_TO_VM_PAGE(srcptepaddr);
8220 		KASSERT(srcmpte->ref_count > 0,
8221 		    ("pmap_copy: source page table page is unused"));
8222 
8223 		if (va_next > end_addr)
8224 			va_next = end_addr;
8225 
8226 		src_pte = (pt_entry_t *)PHYS_TO_DMAP(srcptepaddr);
8227 		src_pte = &src_pte[pmap_pte_index(addr)];
8228 		dstmpte = NULL;
8229 		for (; addr < va_next; addr += PAGE_SIZE, src_pte++) {
8230 			ptetemp = *src_pte;
8231 
8232 			/*
8233 			 * We only virtual copy managed pages.
8234 			 */
8235 			if ((ptetemp & PG_MANAGED) == 0)
8236 				continue;
8237 
8238 			if (dstmpte != NULL) {
8239 				KASSERT(dstmpte->pindex ==
8240 				    pmap_pde_pindex(addr),
8241 				    ("dstmpte pindex/addr mismatch"));
8242 				dstmpte->ref_count++;
8243 			} else if ((dstmpte = pmap_allocpte(dst_pmap, addr,
8244 			    NULL)) == NULL)
8245 				goto out;
8246 			dst_pte = (pt_entry_t *)
8247 			    PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte));
8248 			dst_pte = &dst_pte[pmap_pte_index(addr)];
8249 			if (*dst_pte == 0 &&
8250 			    pmap_try_insert_pv_entry(dst_pmap, addr,
8251 			    PHYS_TO_VM_PAGE(ptetemp & PG_FRAME), &lock)) {
8252 				/*
8253 				 * Clear the wired, modified, and accessed
8254 				 * (referenced) bits during the copy.
8255 				 */
8256 				*dst_pte = ptetemp & ~(PG_W | PG_M | PG_A);
8257 				pmap_resident_count_adj(dst_pmap, 1);
8258 			} else {
8259 				pmap_abort_ptp(dst_pmap, addr, dstmpte);
8260 				goto out;
8261 			}
8262 			/* Have we copied all of the valid mappings? */
8263 			if (dstmpte->ref_count >= srcmpte->ref_count)
8264 				break;
8265 		}
8266 	}
8267 out:
8268 	if (lock != NULL)
8269 		rw_wunlock(lock);
8270 	PMAP_UNLOCK(src_pmap);
8271 	PMAP_UNLOCK(dst_pmap);
8272 }
8273 
8274 int
pmap_vmspace_copy(pmap_t dst_pmap,pmap_t src_pmap)8275 pmap_vmspace_copy(pmap_t dst_pmap, pmap_t src_pmap)
8276 {
8277 	int error;
8278 
8279 	if (dst_pmap->pm_type != src_pmap->pm_type ||
8280 	    dst_pmap->pm_type != PT_X86 ||
8281 	    (cpu_stdext_feature2 & CPUID_STDEXT2_PKU) == 0)
8282 		return (0);
8283 	for (;;) {
8284 		if (dst_pmap < src_pmap) {
8285 			PMAP_LOCK(dst_pmap);
8286 			PMAP_LOCK(src_pmap);
8287 		} else {
8288 			PMAP_LOCK(src_pmap);
8289 			PMAP_LOCK(dst_pmap);
8290 		}
8291 		error = pmap_pkru_copy(dst_pmap, src_pmap);
8292 		/* Clean up partial copy on failure due to no memory. */
8293 		if (error == ENOMEM)
8294 			pmap_pkru_deassign_all(dst_pmap);
8295 		PMAP_UNLOCK(src_pmap);
8296 		PMAP_UNLOCK(dst_pmap);
8297 		if (error != ENOMEM)
8298 			break;
8299 		vm_wait(NULL);
8300 	}
8301 	return (error);
8302 }
8303 
8304 /*
8305  * Zero the specified hardware page.
8306  */
8307 void
pmap_zero_page(vm_page_t m)8308 pmap_zero_page(vm_page_t m)
8309 {
8310 	vm_offset_t va;
8311 
8312 #ifdef TSLOG_PAGEZERO
8313 	TSENTER();
8314 #endif
8315 	va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
8316 	pagezero((void *)va);
8317 #ifdef TSLOG_PAGEZERO
8318 	TSEXIT();
8319 #endif
8320 }
8321 
8322 /*
8323  * Zero an area within a single hardware page.  off and size must not
8324  * cover an area beyond a single hardware page.
8325  */
8326 void
pmap_zero_page_area(vm_page_t m,int off,int size)8327 pmap_zero_page_area(vm_page_t m, int off, int size)
8328 {
8329 	vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
8330 
8331 	if (off == 0 && size == PAGE_SIZE)
8332 		pagezero((void *)va);
8333 	else
8334 		bzero((char *)va + off, size);
8335 }
8336 
8337 /*
8338  * Copy 1 specified hardware page to another.
8339  */
8340 void
pmap_copy_page(vm_page_t msrc,vm_page_t mdst)8341 pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
8342 {
8343 	vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc));
8344 	vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst));
8345 
8346 	pagecopy((void *)src, (void *)dst);
8347 }
8348 
8349 int unmapped_buf_allowed = 1;
8350 
8351 void
pmap_copy_pages(vm_page_t ma[],vm_offset_t a_offset,vm_page_t mb[],vm_offset_t b_offset,int xfersize)8352 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
8353     vm_offset_t b_offset, int xfersize)
8354 {
8355 	void *a_cp, *b_cp;
8356 	vm_page_t pages[2];
8357 	vm_offset_t vaddr[2], a_pg_offset, b_pg_offset;
8358 	int cnt;
8359 	bool mapped;
8360 
8361 	while (xfersize > 0) {
8362 		a_pg_offset = a_offset & PAGE_MASK;
8363 		pages[0] = ma[a_offset >> PAGE_SHIFT];
8364 		b_pg_offset = b_offset & PAGE_MASK;
8365 		pages[1] = mb[b_offset >> PAGE_SHIFT];
8366 		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
8367 		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
8368 		mapped = pmap_map_io_transient(pages, vaddr, 2, false);
8369 		a_cp = (char *)vaddr[0] + a_pg_offset;
8370 		b_cp = (char *)vaddr[1] + b_pg_offset;
8371 		bcopy(a_cp, b_cp, cnt);
8372 		if (__predict_false(mapped))
8373 			pmap_unmap_io_transient(pages, vaddr, 2, false);
8374 		a_offset += cnt;
8375 		b_offset += cnt;
8376 		xfersize -= cnt;
8377 	}
8378 }
8379 
8380 /*
8381  * Returns true if the pmap's pv is one of the first
8382  * 16 pvs linked to from this page.  This count may
8383  * be changed upwards or downwards in the future; it
8384  * is only necessary that true be returned for a small
8385  * subset of pmaps for proper page aging.
8386  */
8387 bool
pmap_page_exists_quick(pmap_t pmap,vm_page_t m)8388 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
8389 {
8390 	struct md_page *pvh;
8391 	struct rwlock *lock;
8392 	pv_entry_t pv;
8393 	int loops = 0;
8394 	bool rv;
8395 
8396 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
8397 	    ("pmap_page_exists_quick: page %p is not managed", m));
8398 	rv = false;
8399 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
8400 	rw_rlock(lock);
8401 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
8402 		if (PV_PMAP(pv) == pmap) {
8403 			rv = true;
8404 			break;
8405 		}
8406 		loops++;
8407 		if (loops >= 16)
8408 			break;
8409 	}
8410 	if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
8411 		pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
8412 		TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
8413 			if (PV_PMAP(pv) == pmap) {
8414 				rv = true;
8415 				break;
8416 			}
8417 			loops++;
8418 			if (loops >= 16)
8419 				break;
8420 		}
8421 	}
8422 	rw_runlock(lock);
8423 	return (rv);
8424 }
8425 
8426 /*
8427  *	pmap_page_wired_mappings:
8428  *
8429  *	Return the number of managed mappings to the given physical page
8430  *	that are wired.
8431  */
8432 int
pmap_page_wired_mappings(vm_page_t m)8433 pmap_page_wired_mappings(vm_page_t m)
8434 {
8435 	struct rwlock *lock;
8436 	struct md_page *pvh;
8437 	pmap_t pmap;
8438 	pt_entry_t *pte;
8439 	pv_entry_t pv;
8440 	int count, md_gen, pvh_gen;
8441 
8442 	if ((m->oflags & VPO_UNMANAGED) != 0)
8443 		return (0);
8444 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
8445 	rw_rlock(lock);
8446 restart:
8447 	count = 0;
8448 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
8449 		pmap = PV_PMAP(pv);
8450 		if (!PMAP_TRYLOCK(pmap)) {
8451 			md_gen = m->md.pv_gen;
8452 			rw_runlock(lock);
8453 			PMAP_LOCK(pmap);
8454 			rw_rlock(lock);
8455 			if (md_gen != m->md.pv_gen) {
8456 				PMAP_UNLOCK(pmap);
8457 				goto restart;
8458 			}
8459 		}
8460 		pte = pmap_pte(pmap, pv->pv_va);
8461 		if ((*pte & PG_W) != 0)
8462 			count++;
8463 		PMAP_UNLOCK(pmap);
8464 	}
8465 	if ((m->flags & PG_FICTITIOUS) == 0) {
8466 		pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
8467 		TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
8468 			pmap = PV_PMAP(pv);
8469 			if (!PMAP_TRYLOCK(pmap)) {
8470 				md_gen = m->md.pv_gen;
8471 				pvh_gen = pvh->pv_gen;
8472 				rw_runlock(lock);
8473 				PMAP_LOCK(pmap);
8474 				rw_rlock(lock);
8475 				if (md_gen != m->md.pv_gen ||
8476 				    pvh_gen != pvh->pv_gen) {
8477 					PMAP_UNLOCK(pmap);
8478 					goto restart;
8479 				}
8480 			}
8481 			pte = pmap_pde(pmap, pv->pv_va);
8482 			if ((*pte & PG_W) != 0)
8483 				count++;
8484 			PMAP_UNLOCK(pmap);
8485 		}
8486 	}
8487 	rw_runlock(lock);
8488 	return (count);
8489 }
8490 
8491 /*
8492  * Returns true if the given page is mapped individually or as part of
8493  * a 2mpage.  Otherwise, returns false.
8494  */
8495 bool
pmap_page_is_mapped(vm_page_t m)8496 pmap_page_is_mapped(vm_page_t m)
8497 {
8498 	struct rwlock *lock;
8499 	bool rv;
8500 
8501 	if ((m->oflags & VPO_UNMANAGED) != 0)
8502 		return (false);
8503 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
8504 	rw_rlock(lock);
8505 	rv = !TAILQ_EMPTY(&m->md.pv_list) ||
8506 	    ((m->flags & PG_FICTITIOUS) == 0 &&
8507 	    !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list));
8508 	rw_runlock(lock);
8509 	return (rv);
8510 }
8511 
8512 /*
8513  * Destroy all managed, non-wired mappings in the given user-space
8514  * pmap.  This pmap cannot be active on any processor besides the
8515  * caller.
8516  *
8517  * This function cannot be applied to the kernel pmap.  Moreover, it
8518  * is not intended for general use.  It is only to be used during
8519  * process termination.  Consequently, it can be implemented in ways
8520  * that make it faster than pmap_remove().  First, it can more quickly
8521  * destroy mappings by iterating over the pmap's collection of PV
8522  * entries, rather than searching the page table.  Second, it doesn't
8523  * have to test and clear the page table entries atomically, because
8524  * no processor is currently accessing the user address space.  In
8525  * particular, a page table entry's dirty bit won't change state once
8526  * this function starts.
8527  *
8528  * Although this function destroys all of the pmap's managed,
8529  * non-wired mappings, it can delay and batch the invalidation of TLB
8530  * entries without calling pmap_delayed_invl_start() and
8531  * pmap_delayed_invl_finish().  Because the pmap is not active on
8532  * any other processor, none of these TLB entries will ever be used
8533  * before their eventual invalidation.  Consequently, there is no need
8534  * for either pmap_remove_all() or pmap_remove_write() to wait for
8535  * that eventual TLB invalidation.
8536  */
8537 void
pmap_remove_pages(pmap_t pmap)8538 pmap_remove_pages(pmap_t pmap)
8539 {
8540 	pd_entry_t ptepde;
8541 	pt_entry_t *pte, tpte;
8542 	pt_entry_t PG_M, PG_RW, PG_V;
8543 	struct spglist free;
8544 	struct pv_chunklist free_chunks[PMAP_MEMDOM];
8545 	vm_page_t m, mpte, mt;
8546 	pv_entry_t pv;
8547 	struct md_page *pvh;
8548 	struct pv_chunk *pc, *npc;
8549 	struct rwlock *lock;
8550 	int64_t bit;
8551 	uint64_t inuse, bitmask;
8552 	int allfree, field, i, idx;
8553 #ifdef PV_STATS
8554 	int freed;
8555 #endif
8556 	bool superpage;
8557 	vm_paddr_t pa;
8558 
8559 	/*
8560 	 * Assert that the given pmap is only active on the current
8561 	 * CPU.  Unfortunately, we cannot block another CPU from
8562 	 * activating the pmap while this function is executing.
8563 	 */
8564 	KASSERT(pmap == PCPU_GET(curpmap), ("non-current pmap %p", pmap));
8565 #ifdef INVARIANTS
8566 	{
8567 		cpuset_t other_cpus;
8568 
8569 		other_cpus = all_cpus;
8570 		critical_enter();
8571 		CPU_CLR(PCPU_GET(cpuid), &other_cpus);
8572 		CPU_AND(&other_cpus, &other_cpus, &pmap->pm_active);
8573 		critical_exit();
8574 		KASSERT(CPU_EMPTY(&other_cpus), ("pmap active %p", pmap));
8575 	}
8576 #endif
8577 
8578 	lock = NULL;
8579 	PG_M = pmap_modified_bit(pmap);
8580 	PG_V = pmap_valid_bit(pmap);
8581 	PG_RW = pmap_rw_bit(pmap);
8582 
8583 	for (i = 0; i < PMAP_MEMDOM; i++)
8584 		TAILQ_INIT(&free_chunks[i]);
8585 	SLIST_INIT(&free);
8586 	PMAP_LOCK(pmap);
8587 	TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
8588 		allfree = 1;
8589 #ifdef PV_STATS
8590 		freed = 0;
8591 #endif
8592 		for (field = 0; field < _NPCM; field++) {
8593 			inuse = ~pc->pc_map[field] & pc_freemask[field];
8594 			while (inuse != 0) {
8595 				bit = bsfq(inuse);
8596 				bitmask = 1UL << bit;
8597 				idx = field * 64 + bit;
8598 				pv = &pc->pc_pventry[idx];
8599 				inuse &= ~bitmask;
8600 
8601 				pte = pmap_pdpe(pmap, pv->pv_va);
8602 				ptepde = *pte;
8603 				pte = pmap_pdpe_to_pde(pte, pv->pv_va);
8604 				tpte = *pte;
8605 				if ((tpte & (PG_PS | PG_V)) == PG_V) {
8606 					superpage = false;
8607 					ptepde = tpte;
8608 					pte = (pt_entry_t *)PHYS_TO_DMAP(tpte &
8609 					    PG_FRAME);
8610 					pte = &pte[pmap_pte_index(pv->pv_va)];
8611 					tpte = *pte;
8612 				} else {
8613 					/*
8614 					 * Keep track whether 'tpte' is a
8615 					 * superpage explicitly instead of
8616 					 * relying on PG_PS being set.
8617 					 *
8618 					 * This is because PG_PS is numerically
8619 					 * identical to PG_PTE_PAT and thus a
8620 					 * regular page could be mistaken for
8621 					 * a superpage.
8622 					 */
8623 					superpage = true;
8624 				}
8625 
8626 				if ((tpte & PG_V) == 0) {
8627 					panic("bad pte va %lx pte %lx",
8628 					    pv->pv_va, tpte);
8629 				}
8630 
8631 /*
8632  * We cannot remove wired pages from a process' mapping at this time
8633  */
8634 				if (tpte & PG_W) {
8635 					allfree = 0;
8636 					continue;
8637 				}
8638 
8639 				/* Mark free */
8640 				pc->pc_map[field] |= bitmask;
8641 
8642 				/*
8643 				 * Because this pmap is not active on other
8644 				 * processors, the dirty bit cannot have
8645 				 * changed state since we last loaded pte.
8646 				 */
8647 				pte_clear(pte);
8648 
8649 				if (superpage)
8650 					pa = tpte & PG_PS_FRAME;
8651 				else
8652 					pa = tpte & PG_FRAME;
8653 
8654 				m = PHYS_TO_VM_PAGE(pa);
8655 				KASSERT(m->phys_addr == pa,
8656 				    ("vm_page_t %p phys_addr mismatch %016jx %016jx",
8657 				    m, (uintmax_t)m->phys_addr,
8658 				    (uintmax_t)tpte));
8659 
8660 				KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
8661 				    m < &vm_page_array[vm_page_array_size],
8662 				    ("pmap_remove_pages: bad tpte %#jx",
8663 				    (uintmax_t)tpte));
8664 
8665 				/*
8666 				 * Update the vm_page_t clean/reference bits.
8667 				 */
8668 				if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
8669 					if (superpage) {
8670 						for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
8671 							vm_page_dirty(mt);
8672 					} else
8673 						vm_page_dirty(m);
8674 				}
8675 
8676 				CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m);
8677 
8678 				if (superpage) {
8679 					pmap_resident_count_adj(pmap, -NBPDR / PAGE_SIZE);
8680 					pvh = pa_to_pvh(tpte & PG_PS_FRAME);
8681 					TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
8682 					pvh->pv_gen++;
8683 					if (TAILQ_EMPTY(&pvh->pv_list)) {
8684 						for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
8685 							if ((mt->a.flags & PGA_WRITEABLE) != 0 &&
8686 							    TAILQ_EMPTY(&mt->md.pv_list))
8687 								vm_page_aflag_clear(mt, PGA_WRITEABLE);
8688 					}
8689 					mpte = pmap_remove_pt_page(pmap, pv->pv_va);
8690 					if (mpte != NULL) {
8691 						KASSERT(vm_page_any_valid(mpte),
8692 						    ("pmap_remove_pages: pte page not promoted"));
8693 						pmap_pt_page_count_adj(pmap, -1);
8694 						KASSERT(mpte->ref_count == NPTEPG,
8695 						    ("pmap_remove_pages: pte page reference count error"));
8696 						mpte->ref_count = 0;
8697 						pmap_add_delayed_free_list(mpte, &free, false);
8698 					}
8699 				} else {
8700 					pmap_resident_count_adj(pmap, -1);
8701 					TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
8702 					m->md.pv_gen++;
8703 					if ((m->a.flags & PGA_WRITEABLE) != 0 &&
8704 					    TAILQ_EMPTY(&m->md.pv_list) &&
8705 					    (m->flags & PG_FICTITIOUS) == 0) {
8706 						pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
8707 						if (TAILQ_EMPTY(&pvh->pv_list))
8708 							vm_page_aflag_clear(m, PGA_WRITEABLE);
8709 					}
8710 				}
8711 				pmap_unuse_pt(pmap, pv->pv_va, ptepde, &free);
8712 #ifdef PV_STATS
8713 				freed++;
8714 #endif
8715 			}
8716 		}
8717 		PV_STAT(counter_u64_add(pv_entry_frees, freed));
8718 		PV_STAT(counter_u64_add(pv_entry_spare, freed));
8719 		PV_STAT(counter_u64_add(pv_entry_count, -freed));
8720 		if (allfree) {
8721 			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
8722 			TAILQ_INSERT_TAIL(&free_chunks[pc_to_domain(pc)], pc, pc_list);
8723 		}
8724 	}
8725 	if (lock != NULL)
8726 		rw_wunlock(lock);
8727 	pmap_invalidate_all(pmap);
8728 	pmap_pkru_deassign_all(pmap);
8729 	free_pv_chunk_batch((struct pv_chunklist *)&free_chunks);
8730 	PMAP_UNLOCK(pmap);
8731 	vm_page_free_pages_toq(&free, true);
8732 }
8733 
8734 static bool
pmap_page_test_mappings(vm_page_t m,bool accessed,bool modified)8735 pmap_page_test_mappings(vm_page_t m, bool accessed, bool modified)
8736 {
8737 	struct rwlock *lock;
8738 	pv_entry_t pv;
8739 	struct md_page *pvh;
8740 	pt_entry_t *pte, mask;
8741 	pt_entry_t PG_A, PG_M, PG_RW, PG_V;
8742 	pmap_t pmap;
8743 	int md_gen, pvh_gen;
8744 	bool rv;
8745 
8746 	rv = false;
8747 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
8748 	rw_rlock(lock);
8749 restart:
8750 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
8751 		pmap = PV_PMAP(pv);
8752 		if (!PMAP_TRYLOCK(pmap)) {
8753 			md_gen = m->md.pv_gen;
8754 			rw_runlock(lock);
8755 			PMAP_LOCK(pmap);
8756 			rw_rlock(lock);
8757 			if (md_gen != m->md.pv_gen) {
8758 				PMAP_UNLOCK(pmap);
8759 				goto restart;
8760 			}
8761 		}
8762 		pte = pmap_pte(pmap, pv->pv_va);
8763 		mask = 0;
8764 		if (modified) {
8765 			PG_M = pmap_modified_bit(pmap);
8766 			PG_RW = pmap_rw_bit(pmap);
8767 			mask |= PG_RW | PG_M;
8768 		}
8769 		if (accessed) {
8770 			PG_A = pmap_accessed_bit(pmap);
8771 			PG_V = pmap_valid_bit(pmap);
8772 			mask |= PG_V | PG_A;
8773 		}
8774 		rv = (*pte & mask) == mask;
8775 		PMAP_UNLOCK(pmap);
8776 		if (rv)
8777 			goto out;
8778 	}
8779 	if ((m->flags & PG_FICTITIOUS) == 0) {
8780 		pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
8781 		TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
8782 			pmap = PV_PMAP(pv);
8783 			if (!PMAP_TRYLOCK(pmap)) {
8784 				md_gen = m->md.pv_gen;
8785 				pvh_gen = pvh->pv_gen;
8786 				rw_runlock(lock);
8787 				PMAP_LOCK(pmap);
8788 				rw_rlock(lock);
8789 				if (md_gen != m->md.pv_gen ||
8790 				    pvh_gen != pvh->pv_gen) {
8791 					PMAP_UNLOCK(pmap);
8792 					goto restart;
8793 				}
8794 			}
8795 			pte = pmap_pde(pmap, pv->pv_va);
8796 			mask = 0;
8797 			if (modified) {
8798 				PG_M = pmap_modified_bit(pmap);
8799 				PG_RW = pmap_rw_bit(pmap);
8800 				mask |= PG_RW | PG_M;
8801 			}
8802 			if (accessed) {
8803 				PG_A = pmap_accessed_bit(pmap);
8804 				PG_V = pmap_valid_bit(pmap);
8805 				mask |= PG_V | PG_A;
8806 			}
8807 			rv = (*pte & mask) == mask;
8808 			PMAP_UNLOCK(pmap);
8809 			if (rv)
8810 				goto out;
8811 		}
8812 	}
8813 out:
8814 	rw_runlock(lock);
8815 	return (rv);
8816 }
8817 
8818 /*
8819  *	pmap_is_modified:
8820  *
8821  *	Return whether or not the specified physical page was modified
8822  *	in any physical maps.
8823  */
8824 bool
pmap_is_modified(vm_page_t m)8825 pmap_is_modified(vm_page_t m)
8826 {
8827 
8828 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
8829 	    ("pmap_is_modified: page %p is not managed", m));
8830 
8831 	/*
8832 	 * If the page is not busied then this check is racy.
8833 	 */
8834 	if (!pmap_page_is_write_mapped(m))
8835 		return (false);
8836 	return (pmap_page_test_mappings(m, false, true));
8837 }
8838 
8839 /*
8840  *	pmap_is_prefaultable:
8841  *
8842  *	Return whether or not the specified virtual address is eligible
8843  *	for prefault.
8844  */
8845 bool
pmap_is_prefaultable(pmap_t pmap,vm_offset_t addr)8846 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
8847 {
8848 	pd_entry_t *pde;
8849 	pt_entry_t *pte, PG_V;
8850 	bool rv;
8851 
8852 	PG_V = pmap_valid_bit(pmap);
8853 
8854 	/*
8855 	 * Return true if and only if the PTE for the specified virtual
8856 	 * address is allocated but invalid.
8857 	 */
8858 	rv = false;
8859 	PMAP_LOCK(pmap);
8860 	pde = pmap_pde(pmap, addr);
8861 	if (pde != NULL && (*pde & (PG_PS | PG_V)) == PG_V) {
8862 		pte = pmap_pde_to_pte(pde, addr);
8863 		rv = (*pte & PG_V) == 0;
8864 	}
8865 	PMAP_UNLOCK(pmap);
8866 	return (rv);
8867 }
8868 
8869 /*
8870  *	pmap_is_referenced:
8871  *
8872  *	Return whether or not the specified physical page was referenced
8873  *	in any physical maps.
8874  */
8875 bool
pmap_is_referenced(vm_page_t m)8876 pmap_is_referenced(vm_page_t m)
8877 {
8878 
8879 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
8880 	    ("pmap_is_referenced: page %p is not managed", m));
8881 	return (pmap_page_test_mappings(m, true, false));
8882 }
8883 
8884 /*
8885  * Clear the write and modified bits in each of the given page's mappings.
8886  */
8887 void
pmap_remove_write(vm_page_t m)8888 pmap_remove_write(vm_page_t m)
8889 {
8890 	struct md_page *pvh;
8891 	pmap_t pmap;
8892 	struct rwlock *lock;
8893 	pv_entry_t next_pv, pv;
8894 	pd_entry_t *pde;
8895 	pt_entry_t oldpte, *pte, PG_M, PG_RW;
8896 	vm_offset_t va;
8897 	int pvh_gen, md_gen;
8898 
8899 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
8900 	    ("pmap_remove_write: page %p is not managed", m));
8901 
8902 	vm_page_assert_busied(m);
8903 	if (!pmap_page_is_write_mapped(m))
8904 		return;
8905 
8906 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
8907 	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
8908 	    pa_to_pvh(VM_PAGE_TO_PHYS(m));
8909 	rw_wlock(lock);
8910 retry:
8911 	TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
8912 		pmap = PV_PMAP(pv);
8913 		if (!PMAP_TRYLOCK(pmap)) {
8914 			pvh_gen = pvh->pv_gen;
8915 			rw_wunlock(lock);
8916 			PMAP_LOCK(pmap);
8917 			rw_wlock(lock);
8918 			if (pvh_gen != pvh->pv_gen) {
8919 				PMAP_UNLOCK(pmap);
8920 				goto retry;
8921 			}
8922 		}
8923 		PG_RW = pmap_rw_bit(pmap);
8924 		va = pv->pv_va;
8925 		pde = pmap_pde(pmap, va);
8926 		if ((*pde & PG_RW) != 0)
8927 			(void)pmap_demote_pde_locked(pmap, pde, va, &lock);
8928 		KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
8929 		    ("inconsistent pv lock %p %p for page %p",
8930 		    lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
8931 		PMAP_UNLOCK(pmap);
8932 	}
8933 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
8934 		pmap = PV_PMAP(pv);
8935 		if (!PMAP_TRYLOCK(pmap)) {
8936 			pvh_gen = pvh->pv_gen;
8937 			md_gen = m->md.pv_gen;
8938 			rw_wunlock(lock);
8939 			PMAP_LOCK(pmap);
8940 			rw_wlock(lock);
8941 			if (pvh_gen != pvh->pv_gen ||
8942 			    md_gen != m->md.pv_gen) {
8943 				PMAP_UNLOCK(pmap);
8944 				goto retry;
8945 			}
8946 		}
8947 		PG_M = pmap_modified_bit(pmap);
8948 		PG_RW = pmap_rw_bit(pmap);
8949 		pde = pmap_pde(pmap, pv->pv_va);
8950 		KASSERT((*pde & PG_PS) == 0,
8951 		    ("pmap_remove_write: found a 2mpage in page %p's pv list",
8952 		    m));
8953 		pte = pmap_pde_to_pte(pde, pv->pv_va);
8954 		oldpte = *pte;
8955 		if (oldpte & PG_RW) {
8956 			while (!atomic_fcmpset_long(pte, &oldpte, oldpte &
8957 			    ~(PG_RW | PG_M)))
8958 				cpu_spinwait();
8959 			if ((oldpte & PG_M) != 0)
8960 				vm_page_dirty(m);
8961 			pmap_invalidate_page(pmap, pv->pv_va);
8962 		}
8963 		PMAP_UNLOCK(pmap);
8964 	}
8965 	rw_wunlock(lock);
8966 	vm_page_aflag_clear(m, PGA_WRITEABLE);
8967 	pmap_delayed_invl_wait(m);
8968 }
8969 
8970 /*
8971  *	pmap_ts_referenced:
8972  *
8973  *	Return a count of reference bits for a page, clearing those bits.
8974  *	It is not necessary for every reference bit to be cleared, but it
8975  *	is necessary that 0 only be returned when there are truly no
8976  *	reference bits set.
8977  *
8978  *	As an optimization, update the page's dirty field if a modified bit is
8979  *	found while counting reference bits.  This opportunistic update can be
8980  *	performed at low cost and can eliminate the need for some future calls
8981  *	to pmap_is_modified().  However, since this function stops after
8982  *	finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
8983  *	dirty pages.  Those dirty pages will only be detected by a future call
8984  *	to pmap_is_modified().
8985  *
8986  *	A DI block is not needed within this function, because
8987  *	invalidations are performed before the PV list lock is
8988  *	released.
8989  */
8990 int
pmap_ts_referenced(vm_page_t m)8991 pmap_ts_referenced(vm_page_t m)
8992 {
8993 	struct md_page *pvh;
8994 	pv_entry_t pv, pvf;
8995 	pmap_t pmap;
8996 	struct rwlock *lock;
8997 	pd_entry_t oldpde, *pde;
8998 	pt_entry_t *pte, PG_A, PG_M, PG_RW;
8999 	vm_offset_t va;
9000 	vm_paddr_t pa;
9001 	int cleared, md_gen, not_cleared, pvh_gen;
9002 	struct spglist free;
9003 	bool demoted;
9004 
9005 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
9006 	    ("pmap_ts_referenced: page %p is not managed", m));
9007 	SLIST_INIT(&free);
9008 	cleared = 0;
9009 	pa = VM_PAGE_TO_PHYS(m);
9010 	lock = PHYS_TO_PV_LIST_LOCK(pa);
9011 	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : pa_to_pvh(pa);
9012 	rw_wlock(lock);
9013 retry:
9014 	not_cleared = 0;
9015 	if ((pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL)
9016 		goto small_mappings;
9017 	pv = pvf;
9018 	do {
9019 		if (pvf == NULL)
9020 			pvf = pv;
9021 		pmap = PV_PMAP(pv);
9022 		if (!PMAP_TRYLOCK(pmap)) {
9023 			pvh_gen = pvh->pv_gen;
9024 			rw_wunlock(lock);
9025 			PMAP_LOCK(pmap);
9026 			rw_wlock(lock);
9027 			if (pvh_gen != pvh->pv_gen) {
9028 				PMAP_UNLOCK(pmap);
9029 				goto retry;
9030 			}
9031 		}
9032 		PG_A = pmap_accessed_bit(pmap);
9033 		PG_M = pmap_modified_bit(pmap);
9034 		PG_RW = pmap_rw_bit(pmap);
9035 		va = pv->pv_va;
9036 		pde = pmap_pde(pmap, pv->pv_va);
9037 		oldpde = *pde;
9038 		if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
9039 			/*
9040 			 * Although "oldpde" is mapping a 2MB page, because
9041 			 * this function is called at a 4KB page granularity,
9042 			 * we only update the 4KB page under test.
9043 			 */
9044 			vm_page_dirty(m);
9045 		}
9046 		if ((oldpde & PG_A) != 0) {
9047 			/*
9048 			 * Since this reference bit is shared by 512 4KB
9049 			 * pages, it should not be cleared every time it is
9050 			 * tested.  Apply a simple "hash" function on the
9051 			 * physical page number, the virtual superpage number,
9052 			 * and the pmap address to select one 4KB page out of
9053 			 * the 512 on which testing the reference bit will
9054 			 * result in clearing that reference bit.  This
9055 			 * function is designed to avoid the selection of the
9056 			 * same 4KB page for every 2MB page mapping.
9057 			 *
9058 			 * On demotion, a mapping that hasn't been referenced
9059 			 * is simply destroyed.  To avoid the possibility of a
9060 			 * subsequent page fault on a demoted wired mapping,
9061 			 * always leave its reference bit set.  Moreover,
9062 			 * since the superpage is wired, the current state of
9063 			 * its reference bit won't affect page replacement.
9064 			 */
9065 			if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> PDRSHIFT) ^
9066 			    (uintptr_t)pmap) & (NPTEPG - 1)) == 0 &&
9067 			    (oldpde & PG_W) == 0) {
9068 				if (safe_to_clear_referenced(pmap, oldpde)) {
9069 					atomic_clear_long(pde, PG_A);
9070 					pmap_invalidate_page(pmap, pv->pv_va);
9071 					demoted = false;
9072 				} else if (pmap_demote_pde_locked(pmap, pde,
9073 				    pv->pv_va, &lock)) {
9074 					/*
9075 					 * Remove the mapping to a single page
9076 					 * so that a subsequent access may
9077 					 * repromote.  Since the underlying
9078 					 * page table page is fully populated,
9079 					 * this removal never frees a page
9080 					 * table page.
9081 					 */
9082 					demoted = true;
9083 					va += VM_PAGE_TO_PHYS(m) - (oldpde &
9084 					    PG_PS_FRAME);
9085 					pte = pmap_pde_to_pte(pde, va);
9086 					pmap_remove_pte(pmap, pte, va, *pde,
9087 					    NULL, &lock);
9088 					pmap_invalidate_page(pmap, va);
9089 				} else
9090 					demoted = true;
9091 
9092 				if (demoted) {
9093 					/*
9094 					 * The superpage mapping was removed
9095 					 * entirely and therefore 'pv' is no
9096 					 * longer valid.
9097 					 */
9098 					if (pvf == pv)
9099 						pvf = NULL;
9100 					pv = NULL;
9101 				}
9102 				cleared++;
9103 				KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
9104 				    ("inconsistent pv lock %p %p for page %p",
9105 				    lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
9106 			} else
9107 				not_cleared++;
9108 		}
9109 		PMAP_UNLOCK(pmap);
9110 		/* Rotate the PV list if it has more than one entry. */
9111 		if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) {
9112 			TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
9113 			TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
9114 			pvh->pv_gen++;
9115 		}
9116 		if (cleared + not_cleared >= PMAP_TS_REFERENCED_MAX)
9117 			goto out;
9118 	} while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf);
9119 small_mappings:
9120 	if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL)
9121 		goto out;
9122 	pv = pvf;
9123 	do {
9124 		if (pvf == NULL)
9125 			pvf = pv;
9126 		pmap = PV_PMAP(pv);
9127 		if (!PMAP_TRYLOCK(pmap)) {
9128 			pvh_gen = pvh->pv_gen;
9129 			md_gen = m->md.pv_gen;
9130 			rw_wunlock(lock);
9131 			PMAP_LOCK(pmap);
9132 			rw_wlock(lock);
9133 			if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
9134 				PMAP_UNLOCK(pmap);
9135 				goto retry;
9136 			}
9137 		}
9138 		PG_A = pmap_accessed_bit(pmap);
9139 		PG_M = pmap_modified_bit(pmap);
9140 		PG_RW = pmap_rw_bit(pmap);
9141 		pde = pmap_pde(pmap, pv->pv_va);
9142 		KASSERT((*pde & PG_PS) == 0,
9143 		    ("pmap_ts_referenced: found a 2mpage in page %p's pv list",
9144 		    m));
9145 		pte = pmap_pde_to_pte(pde, pv->pv_va);
9146 		if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
9147 			vm_page_dirty(m);
9148 		if ((*pte & PG_A) != 0) {
9149 			if (safe_to_clear_referenced(pmap, *pte)) {
9150 				atomic_clear_long(pte, PG_A);
9151 				pmap_invalidate_page(pmap, pv->pv_va);
9152 				cleared++;
9153 			} else if ((*pte & PG_W) == 0) {
9154 				/*
9155 				 * Wired pages cannot be paged out so
9156 				 * doing accessed bit emulation for
9157 				 * them is wasted effort. We do the
9158 				 * hard work for unwired pages only.
9159 				 */
9160 				pmap_remove_pte(pmap, pte, pv->pv_va,
9161 				    *pde, &free, &lock);
9162 				pmap_invalidate_page(pmap, pv->pv_va);
9163 				cleared++;
9164 				if (pvf == pv)
9165 					pvf = NULL;
9166 				pv = NULL;
9167 				KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
9168 				    ("inconsistent pv lock %p %p for page %p",
9169 				    lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
9170 			} else
9171 				not_cleared++;
9172 		}
9173 		PMAP_UNLOCK(pmap);
9174 		/* Rotate the PV list if it has more than one entry. */
9175 		if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) {
9176 			TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
9177 			TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
9178 			m->md.pv_gen++;
9179 		}
9180 	} while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && cleared +
9181 	    not_cleared < PMAP_TS_REFERENCED_MAX);
9182 out:
9183 	rw_wunlock(lock);
9184 	vm_page_free_pages_toq(&free, true);
9185 	return (cleared + not_cleared);
9186 }
9187 
9188 /*
9189  *	Apply the given advice to the specified range of addresses within the
9190  *	given pmap.  Depending on the advice, clear the referenced and/or
9191  *	modified flags in each mapping and set the mapped page's dirty field.
9192  */
9193 void
pmap_advise(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,int advice)9194 pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
9195 {
9196 	struct rwlock *lock;
9197 	pml4_entry_t *pml4e;
9198 	pdp_entry_t *pdpe;
9199 	pd_entry_t oldpde, *pde;
9200 	pt_entry_t *pte, PG_A, PG_G, PG_M, PG_RW, PG_V;
9201 	vm_offset_t va, va_next;
9202 	vm_page_t m;
9203 	bool anychanged;
9204 
9205 	if (advice != MADV_DONTNEED && advice != MADV_FREE)
9206 		return;
9207 
9208 	/*
9209 	 * A/D bit emulation requires an alternate code path when clearing
9210 	 * the modified and accessed bits below. Since this function is
9211 	 * advisory in nature we skip it entirely for pmaps that require
9212 	 * A/D bit emulation.
9213 	 */
9214 	if (pmap_emulate_ad_bits(pmap))
9215 		return;
9216 
9217 	PG_A = pmap_accessed_bit(pmap);
9218 	PG_G = pmap_global_bit(pmap);
9219 	PG_M = pmap_modified_bit(pmap);
9220 	PG_V = pmap_valid_bit(pmap);
9221 	PG_RW = pmap_rw_bit(pmap);
9222 	anychanged = false;
9223 	pmap_delayed_invl_start();
9224 	PMAP_LOCK(pmap);
9225 	for (; sva < eva; sva = va_next) {
9226 		pml4e = pmap_pml4e(pmap, sva);
9227 		if (pml4e == NULL || (*pml4e & PG_V) == 0) {
9228 			va_next = (sva + NBPML4) & ~PML4MASK;
9229 			if (va_next < sva)
9230 				va_next = eva;
9231 			continue;
9232 		}
9233 
9234 		va_next = (sva + NBPDP) & ~PDPMASK;
9235 		if (va_next < sva)
9236 			va_next = eva;
9237 		pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
9238 		if ((*pdpe & PG_V) == 0)
9239 			continue;
9240 		if ((*pdpe & PG_PS) != 0)
9241 			continue;
9242 
9243 		va_next = (sva + NBPDR) & ~PDRMASK;
9244 		if (va_next < sva)
9245 			va_next = eva;
9246 		pde = pmap_pdpe_to_pde(pdpe, sva);
9247 		oldpde = *pde;
9248 		if ((oldpde & PG_V) == 0)
9249 			continue;
9250 		else if ((oldpde & PG_PS) != 0) {
9251 			if ((oldpde & PG_MANAGED) == 0)
9252 				continue;
9253 			lock = NULL;
9254 			if (!pmap_demote_pde_locked(pmap, pde, sva, &lock)) {
9255 				if (lock != NULL)
9256 					rw_wunlock(lock);
9257 
9258 				/*
9259 				 * The large page mapping was destroyed.
9260 				 */
9261 				continue;
9262 			}
9263 
9264 			/*
9265 			 * Unless the page mappings are wired, remove the
9266 			 * mapping to a single page so that a subsequent
9267 			 * access may repromote.  Choosing the last page
9268 			 * within the address range [sva, min(va_next, eva))
9269 			 * generally results in more repromotions.  Since the
9270 			 * underlying page table page is fully populated, this
9271 			 * removal never frees a page table page.
9272 			 */
9273 			if ((oldpde & PG_W) == 0) {
9274 				va = eva;
9275 				if (va > va_next)
9276 					va = va_next;
9277 				va -= PAGE_SIZE;
9278 				KASSERT(va >= sva,
9279 				    ("pmap_advise: no address gap"));
9280 				pte = pmap_pde_to_pte(pde, va);
9281 				KASSERT((*pte & PG_V) != 0,
9282 				    ("pmap_advise: invalid PTE"));
9283 				pmap_remove_pte(pmap, pte, va, *pde, NULL,
9284 				    &lock);
9285 				anychanged = true;
9286 			}
9287 			if (lock != NULL)
9288 				rw_wunlock(lock);
9289 		}
9290 		if (va_next > eva)
9291 			va_next = eva;
9292 		va = va_next;
9293 		for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
9294 		    sva += PAGE_SIZE) {
9295 			if ((*pte & (PG_MANAGED | PG_V)) != (PG_MANAGED | PG_V))
9296 				goto maybe_invlrng;
9297 			else if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
9298 				if (advice == MADV_DONTNEED) {
9299 					/*
9300 					 * Future calls to pmap_is_modified()
9301 					 * can be avoided by making the page
9302 					 * dirty now.
9303 					 */
9304 					m = PHYS_TO_VM_PAGE(*pte & PG_FRAME);
9305 					vm_page_dirty(m);
9306 				}
9307 				atomic_clear_long(pte, PG_M | PG_A);
9308 			} else if ((*pte & PG_A) != 0)
9309 				atomic_clear_long(pte, PG_A);
9310 			else
9311 				goto maybe_invlrng;
9312 
9313 			if ((*pte & PG_G) != 0) {
9314 				if (va == va_next)
9315 					va = sva;
9316 			} else
9317 				anychanged = true;
9318 			continue;
9319 maybe_invlrng:
9320 			if (va != va_next) {
9321 				pmap_invalidate_range(pmap, va, sva);
9322 				va = va_next;
9323 			}
9324 		}
9325 		if (va != va_next)
9326 			pmap_invalidate_range(pmap, va, sva);
9327 	}
9328 	if (anychanged)
9329 		pmap_invalidate_all(pmap);
9330 	PMAP_UNLOCK(pmap);
9331 	pmap_delayed_invl_finish();
9332 }
9333 
9334 /*
9335  *	Clear the modify bits on the specified physical page.
9336  */
9337 void
pmap_clear_modify(vm_page_t m)9338 pmap_clear_modify(vm_page_t m)
9339 {
9340 	struct md_page *pvh;
9341 	pmap_t pmap;
9342 	pv_entry_t next_pv, pv;
9343 	pd_entry_t oldpde, *pde;
9344 	pt_entry_t *pte, PG_M, PG_RW;
9345 	struct rwlock *lock;
9346 	vm_offset_t va;
9347 	int md_gen, pvh_gen;
9348 
9349 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
9350 	    ("pmap_clear_modify: page %p is not managed", m));
9351 	vm_page_assert_busied(m);
9352 
9353 	if (!pmap_page_is_write_mapped(m))
9354 		return;
9355 	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
9356 	    pa_to_pvh(VM_PAGE_TO_PHYS(m));
9357 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
9358 	rw_wlock(lock);
9359 restart:
9360 	TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
9361 		pmap = PV_PMAP(pv);
9362 		if (!PMAP_TRYLOCK(pmap)) {
9363 			pvh_gen = pvh->pv_gen;
9364 			rw_wunlock(lock);
9365 			PMAP_LOCK(pmap);
9366 			rw_wlock(lock);
9367 			if (pvh_gen != pvh->pv_gen) {
9368 				PMAP_UNLOCK(pmap);
9369 				goto restart;
9370 			}
9371 		}
9372 		PG_M = pmap_modified_bit(pmap);
9373 		PG_RW = pmap_rw_bit(pmap);
9374 		va = pv->pv_va;
9375 		pde = pmap_pde(pmap, va);
9376 		oldpde = *pde;
9377 		/* If oldpde has PG_RW set, then it also has PG_M set. */
9378 		if ((oldpde & PG_RW) != 0 &&
9379 		    pmap_demote_pde_locked(pmap, pde, va, &lock) &&
9380 		    (oldpde & PG_W) == 0) {
9381 			/*
9382 			 * Write protect the mapping to a single page so that
9383 			 * a subsequent write access may repromote.
9384 			 */
9385 			va += VM_PAGE_TO_PHYS(m) - (oldpde & PG_PS_FRAME);
9386 			pte = pmap_pde_to_pte(pde, va);
9387 			atomic_clear_long(pte, PG_M | PG_RW);
9388 			vm_page_dirty(m);
9389 			pmap_invalidate_page(pmap, va);
9390 		}
9391 		PMAP_UNLOCK(pmap);
9392 	}
9393 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
9394 		pmap = PV_PMAP(pv);
9395 		if (!PMAP_TRYLOCK(pmap)) {
9396 			md_gen = m->md.pv_gen;
9397 			pvh_gen = pvh->pv_gen;
9398 			rw_wunlock(lock);
9399 			PMAP_LOCK(pmap);
9400 			rw_wlock(lock);
9401 			if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
9402 				PMAP_UNLOCK(pmap);
9403 				goto restart;
9404 			}
9405 		}
9406 		PG_M = pmap_modified_bit(pmap);
9407 		PG_RW = pmap_rw_bit(pmap);
9408 		pde = pmap_pde(pmap, pv->pv_va);
9409 		KASSERT((*pde & PG_PS) == 0, ("pmap_clear_modify: found"
9410 		    " a 2mpage in page %p's pv list", m));
9411 		pte = pmap_pde_to_pte(pde, pv->pv_va);
9412 		if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
9413 			atomic_clear_long(pte, PG_M);
9414 			pmap_invalidate_page(pmap, pv->pv_va);
9415 		}
9416 		PMAP_UNLOCK(pmap);
9417 	}
9418 	rw_wunlock(lock);
9419 }
9420 
9421 /*
9422  * Miscellaneous support routines follow
9423  */
9424 
9425 /* Adjust the properties for a leaf page table entry. */
9426 static __inline void
pmap_pte_props(pt_entry_t * pte,u_long bits,u_long mask)9427 pmap_pte_props(pt_entry_t *pte, u_long bits, u_long mask)
9428 {
9429 	u_long opte, npte;
9430 
9431 	opte = *(u_long *)pte;
9432 	do {
9433 		npte = opte & ~mask;
9434 		npte |= bits;
9435 	} while (npte != opte && !atomic_fcmpset_long((u_long *)pte, &opte,
9436 	    npte));
9437 }
9438 
9439 /*
9440  * Map a set of physical memory pages into the kernel virtual
9441  * address space. Return a pointer to where it is mapped. This
9442  * routine is intended to be used for mapping device memory,
9443  * NOT real memory.
9444  */
9445 static void *
pmap_mapdev_internal(vm_paddr_t pa,vm_size_t size,int mode,int flags)9446 pmap_mapdev_internal(vm_paddr_t pa, vm_size_t size, int mode, int flags)
9447 {
9448 	struct pmap_preinit_mapping *ppim;
9449 	vm_offset_t va, offset;
9450 	vm_size_t tmpsize;
9451 	int i;
9452 
9453 	offset = pa & PAGE_MASK;
9454 	size = round_page(offset + size);
9455 	pa = trunc_page(pa);
9456 
9457 	if (!pmap_initialized) {
9458 		va = 0;
9459 		for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
9460 			ppim = pmap_preinit_mapping + i;
9461 			if (ppim->va == 0) {
9462 				ppim->pa = pa;
9463 				ppim->sz = size;
9464 				ppim->mode = mode;
9465 				ppim->va = virtual_avail;
9466 				virtual_avail += size;
9467 				va = ppim->va;
9468 				break;
9469 			}
9470 		}
9471 		if (va == 0)
9472 			panic("%s: too many preinit mappings", __func__);
9473 	} else {
9474 		/*
9475 		 * If we have a preinit mapping, reuse it.
9476 		 */
9477 		for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
9478 			ppim = pmap_preinit_mapping + i;
9479 			if (ppim->pa == pa && ppim->sz == size &&
9480 			    (ppim->mode == mode ||
9481 			    (flags & MAPDEV_SETATTR) == 0))
9482 				return ((void *)(ppim->va + offset));
9483 		}
9484 		/*
9485 		 * If the specified range of physical addresses fits within
9486 		 * the direct map window, use the direct map.
9487 		 */
9488 		if (pa < dmaplimit && pa + size <= dmaplimit) {
9489 			va = PHYS_TO_DMAP(pa);
9490 			if ((flags & MAPDEV_SETATTR) != 0) {
9491 				PMAP_LOCK(kernel_pmap);
9492 				i = pmap_change_props_locked(va, size,
9493 				    PROT_NONE, mode, flags);
9494 				PMAP_UNLOCK(kernel_pmap);
9495 			} else
9496 				i = 0;
9497 			if (!i)
9498 				return ((void *)(va + offset));
9499 		}
9500 		va = kva_alloc(size);
9501 		if (va == 0)
9502 			panic("%s: Couldn't allocate KVA", __func__);
9503 	}
9504 	for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
9505 		pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
9506 	pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
9507 	if ((flags & MAPDEV_FLUSHCACHE) != 0)
9508 		pmap_invalidate_cache_range(va, va + tmpsize);
9509 	return ((void *)(va + offset));
9510 }
9511 
9512 void *
pmap_mapdev_attr(vm_paddr_t pa,vm_size_t size,int mode)9513 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
9514 {
9515 
9516 	return (pmap_mapdev_internal(pa, size, mode, MAPDEV_FLUSHCACHE |
9517 	    MAPDEV_SETATTR));
9518 }
9519 
9520 void *
pmap_mapdev(vm_paddr_t pa,vm_size_t size)9521 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
9522 {
9523 
9524 	return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE));
9525 }
9526 
9527 void *
pmap_mapdev_pciecfg(vm_paddr_t pa,vm_size_t size)9528 pmap_mapdev_pciecfg(vm_paddr_t pa, vm_size_t size)
9529 {
9530 
9531 	return (pmap_mapdev_internal(pa, size, PAT_UNCACHEABLE,
9532 	    MAPDEV_SETATTR));
9533 }
9534 
9535 void *
pmap_mapbios(vm_paddr_t pa,vm_size_t size)9536 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
9537 {
9538 
9539 	return (pmap_mapdev_internal(pa, size, PAT_WRITE_BACK,
9540 	    MAPDEV_FLUSHCACHE));
9541 }
9542 
9543 void
pmap_unmapdev(void * p,vm_size_t size)9544 pmap_unmapdev(void *p, vm_size_t size)
9545 {
9546 	struct pmap_preinit_mapping *ppim;
9547 	vm_offset_t offset, va;
9548 	int i;
9549 
9550 	va = (vm_offset_t)p;
9551 
9552 	/* If we gave a direct map region in pmap_mapdev, do nothing */
9553 	if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS)
9554 		return;
9555 	offset = va & PAGE_MASK;
9556 	size = round_page(offset + size);
9557 	va = trunc_page(va);
9558 	for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
9559 		ppim = pmap_preinit_mapping + i;
9560 		if (ppim->va == va && ppim->sz == size) {
9561 			if (pmap_initialized)
9562 				return;
9563 			ppim->pa = 0;
9564 			ppim->va = 0;
9565 			ppim->sz = 0;
9566 			ppim->mode = 0;
9567 			if (va + size == virtual_avail)
9568 				virtual_avail = va;
9569 			return;
9570 		}
9571 	}
9572 	if (pmap_initialized) {
9573 		pmap_qremove(va, atop(size));
9574 		kva_free(va, size);
9575 	}
9576 }
9577 
9578 /*
9579  * Tries to demote a 1GB page mapping.
9580  */
9581 static bool
pmap_demote_pdpe(pmap_t pmap,pdp_entry_t * pdpe,vm_offset_t va,vm_page_t m)9582 pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe, vm_offset_t va, vm_page_t m)
9583 {
9584 	pdp_entry_t newpdpe, oldpdpe;
9585 	pd_entry_t *firstpde, newpde, *pde;
9586 	pt_entry_t PG_A, PG_M, PG_RW, PG_V;
9587 	vm_paddr_t pdpgpa;
9588 	vm_page_t pdpg;
9589 
9590 	PG_A = pmap_accessed_bit(pmap);
9591 	PG_M = pmap_modified_bit(pmap);
9592 	PG_V = pmap_valid_bit(pmap);
9593 	PG_RW = pmap_rw_bit(pmap);
9594 
9595 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
9596 	oldpdpe = *pdpe;
9597 	KASSERT((oldpdpe & (PG_PS | PG_V)) == (PG_PS | PG_V),
9598 	    ("pmap_demote_pdpe: oldpdpe is missing PG_PS and/or PG_V"));
9599 	if (m == NULL) {
9600 		pdpg = pmap_alloc_pt_page(pmap, va >> PDPSHIFT,
9601 		    VM_ALLOC_WIRED);
9602 		if (pdpg  == NULL) {
9603 			CTR2(KTR_PMAP,
9604 			    "pmap_demote_pdpe: failure for va %#lx in pmap %p",
9605 			    va, pmap);
9606 			return (false);
9607 		}
9608 	} else {
9609 		pdpg = m;
9610 		pdpg->pindex = va >> PDPSHIFT;
9611 		pmap_pt_page_count_adj(pmap, 1);
9612 	}
9613 	pdpgpa = VM_PAGE_TO_PHYS(pdpg);
9614 	firstpde = (pd_entry_t *)PHYS_TO_DMAP(pdpgpa);
9615 	newpdpe = pdpgpa | PG_M | PG_A | (oldpdpe & PG_U) | PG_RW | PG_V;
9616 	KASSERT((oldpdpe & PG_A) != 0,
9617 	    ("pmap_demote_pdpe: oldpdpe is missing PG_A"));
9618 	KASSERT((oldpdpe & (PG_M | PG_RW)) != PG_RW,
9619 	    ("pmap_demote_pdpe: oldpdpe is missing PG_M"));
9620 	newpde = oldpdpe;
9621 
9622 	/*
9623 	 * Initialize the page directory page.
9624 	 */
9625 	for (pde = firstpde; pde < firstpde + NPDEPG; pde++) {
9626 		*pde = newpde;
9627 		newpde += NBPDR;
9628 	}
9629 
9630 	/*
9631 	 * Demote the mapping.
9632 	 */
9633 	*pdpe = newpdpe;
9634 
9635 	/*
9636 	 * Invalidate a stale recursive mapping of the page directory page.
9637 	 */
9638 	pmap_invalidate_page(pmap, (vm_offset_t)vtopde(va));
9639 
9640 	counter_u64_add(pmap_pdpe_demotions, 1);
9641 	CTR2(KTR_PMAP, "pmap_demote_pdpe: success for va %#lx"
9642 	    " in pmap %p", va, pmap);
9643 	return (true);
9644 }
9645 
9646 /*
9647  * Sets the memory attribute for the specified page.
9648  */
9649 void
pmap_page_set_memattr(vm_page_t m,vm_memattr_t ma)9650 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
9651 {
9652 
9653 	m->md.pat_mode = ma;
9654 
9655 	/*
9656 	 * If "m" is a normal page, update its direct mapping.  This update
9657 	 * can be relied upon to perform any cache operations that are
9658 	 * required for data coherence.
9659 	 */
9660 	if ((m->flags & PG_FICTITIOUS) == 0 &&
9661 	    pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), PAGE_SIZE,
9662 	    m->md.pat_mode))
9663 		panic("memory attribute change on the direct map failed");
9664 }
9665 
9666 void
pmap_page_set_memattr_noflush(vm_page_t m,vm_memattr_t ma)9667 pmap_page_set_memattr_noflush(vm_page_t m, vm_memattr_t ma)
9668 {
9669 	int error;
9670 
9671 	m->md.pat_mode = ma;
9672 
9673 	if ((m->flags & PG_FICTITIOUS) != 0)
9674 		return;
9675 	PMAP_LOCK(kernel_pmap);
9676 	error = pmap_change_props_locked(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)),
9677 	    PAGE_SIZE, PROT_NONE, m->md.pat_mode, 0);
9678 	PMAP_UNLOCK(kernel_pmap);
9679 	if (error != 0)
9680 		panic("memory attribute change on the direct map failed");
9681 }
9682 
9683 /*
9684  * Changes the specified virtual address range's memory type to that given by
9685  * the parameter "mode".  The specified virtual address range must be
9686  * completely contained within either the direct map or the kernel map.  If
9687  * the virtual address range is contained within the kernel map, then the
9688  * memory type for each of the corresponding ranges of the direct map is also
9689  * changed.  (The corresponding ranges of the direct map are those ranges that
9690  * map the same physical pages as the specified virtual address range.)  These
9691  * changes to the direct map are necessary because Intel describes the
9692  * behavior of their processors as "undefined" if two or more mappings to the
9693  * same physical page have different memory types.
9694  *
9695  * Returns zero if the change completed successfully, and either EINVAL or
9696  * ENOMEM if the change failed.  Specifically, EINVAL is returned if some part
9697  * of the virtual address range was not mapped, and ENOMEM is returned if
9698  * there was insufficient memory available to complete the change.  In the
9699  * latter case, the memory type may have been changed on some part of the
9700  * virtual address range or the direct map.
9701  */
9702 int
pmap_change_attr(vm_offset_t va,vm_size_t size,int mode)9703 pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
9704 {
9705 	int error;
9706 
9707 	PMAP_LOCK(kernel_pmap);
9708 	error = pmap_change_props_locked(va, size, PROT_NONE, mode,
9709 	    MAPDEV_FLUSHCACHE);
9710 	PMAP_UNLOCK(kernel_pmap);
9711 	return (error);
9712 }
9713 
9714 /*
9715  * Changes the specified virtual address range's protections to those
9716  * specified by "prot".  Like pmap_change_attr(), protections for aliases
9717  * in the direct map are updated as well.  Protections on aliasing mappings may
9718  * be a subset of the requested protections; for example, mappings in the direct
9719  * map are never executable.
9720  */
9721 int
pmap_change_prot(vm_offset_t va,vm_size_t size,vm_prot_t prot)9722 pmap_change_prot(vm_offset_t va, vm_size_t size, vm_prot_t prot)
9723 {
9724 	int error;
9725 
9726 	/* Only supported within the kernel map. */
9727 	if (va < VM_MIN_KERNEL_ADDRESS)
9728 		return (EINVAL);
9729 
9730 	PMAP_LOCK(kernel_pmap);
9731 	error = pmap_change_props_locked(va, size, prot, -1,
9732 	    MAPDEV_ASSERTVALID);
9733 	PMAP_UNLOCK(kernel_pmap);
9734 	return (error);
9735 }
9736 
9737 static int
pmap_change_props_locked(vm_offset_t va,vm_size_t size,vm_prot_t prot,int mode,int flags)9738 pmap_change_props_locked(vm_offset_t va, vm_size_t size, vm_prot_t prot,
9739     int mode, int flags)
9740 {
9741 	vm_offset_t base, offset, tmpva;
9742 	vm_paddr_t pa_start, pa_end, pa_end1;
9743 	pdp_entry_t *pdpe;
9744 	pd_entry_t *pde, pde_bits, pde_mask;
9745 	pt_entry_t *pte, pte_bits, pte_mask;
9746 	int error;
9747 	bool changed;
9748 
9749 	PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED);
9750 	base = trunc_page(va);
9751 	offset = va & PAGE_MASK;
9752 	size = round_page(offset + size);
9753 
9754 	/*
9755 	 * Only supported on kernel virtual addresses, including the direct
9756 	 * map but excluding the recursive map.
9757 	 */
9758 	if (base < DMAP_MIN_ADDRESS)
9759 		return (EINVAL);
9760 
9761 	/*
9762 	 * Construct our flag sets and masks.  "bits" is the subset of
9763 	 * "mask" that will be set in each modified PTE.
9764 	 *
9765 	 * Mappings in the direct map are never allowed to be executable.
9766 	 */
9767 	pde_bits = pte_bits = 0;
9768 	pde_mask = pte_mask = 0;
9769 	if (mode != -1) {
9770 		pde_bits |= pmap_cache_bits(kernel_pmap, mode, true);
9771 		pde_mask |= X86_PG_PDE_CACHE;
9772 		pte_bits |= pmap_cache_bits(kernel_pmap, mode, false);
9773 		pte_mask |= X86_PG_PTE_CACHE;
9774 	}
9775 	if (prot != VM_PROT_NONE) {
9776 		if ((prot & VM_PROT_WRITE) != 0) {
9777 			pde_bits |= X86_PG_RW;
9778 			pte_bits |= X86_PG_RW;
9779 		}
9780 		if ((prot & VM_PROT_EXECUTE) == 0 ||
9781 		    va < VM_MIN_KERNEL_ADDRESS) {
9782 			pde_bits |= pg_nx;
9783 			pte_bits |= pg_nx;
9784 		}
9785 		pde_mask |= X86_PG_RW | pg_nx;
9786 		pte_mask |= X86_PG_RW | pg_nx;
9787 	}
9788 
9789 	/*
9790 	 * Pages that aren't mapped aren't supported.  Also break down 2MB pages
9791 	 * into 4KB pages if required.
9792 	 */
9793 	for (tmpva = base; tmpva < base + size; ) {
9794 		pdpe = pmap_pdpe(kernel_pmap, tmpva);
9795 		if (pdpe == NULL || *pdpe == 0) {
9796 			KASSERT((flags & MAPDEV_ASSERTVALID) == 0,
9797 			    ("%s: addr %#lx is not mapped", __func__, tmpva));
9798 			return (EINVAL);
9799 		}
9800 		if (*pdpe & PG_PS) {
9801 			/*
9802 			 * If the current 1GB page already has the required
9803 			 * properties, then we need not demote this page.  Just
9804 			 * increment tmpva to the next 1GB page frame.
9805 			 */
9806 			if ((*pdpe & pde_mask) == pde_bits) {
9807 				tmpva = trunc_1gpage(tmpva) + NBPDP;
9808 				continue;
9809 			}
9810 
9811 			/*
9812 			 * If the current offset aligns with a 1GB page frame
9813 			 * and there is at least 1GB left within the range, then
9814 			 * we need not break down this page into 2MB pages.
9815 			 */
9816 			if ((tmpva & PDPMASK) == 0 &&
9817 			    tmpva + PDPMASK < base + size) {
9818 				tmpva += NBPDP;
9819 				continue;
9820 			}
9821 			if (!pmap_demote_pdpe(kernel_pmap, pdpe, tmpva, NULL))
9822 				return (ENOMEM);
9823 		}
9824 		pde = pmap_pdpe_to_pde(pdpe, tmpva);
9825 		if (*pde == 0) {
9826 			KASSERT((flags & MAPDEV_ASSERTVALID) == 0,
9827 			    ("%s: addr %#lx is not mapped", __func__, tmpva));
9828 			return (EINVAL);
9829 		}
9830 		if (*pde & PG_PS) {
9831 			/*
9832 			 * If the current 2MB page already has the required
9833 			 * properties, then we need not demote this page.  Just
9834 			 * increment tmpva to the next 2MB page frame.
9835 			 */
9836 			if ((*pde & pde_mask) == pde_bits) {
9837 				tmpva = trunc_2mpage(tmpva) + NBPDR;
9838 				continue;
9839 			}
9840 
9841 			/*
9842 			 * If the current offset aligns with a 2MB page frame
9843 			 * and there is at least 2MB left within the range, then
9844 			 * we need not break down this page into 4KB pages.
9845 			 */
9846 			if ((tmpva & PDRMASK) == 0 &&
9847 			    tmpva + PDRMASK < base + size) {
9848 				tmpva += NBPDR;
9849 				continue;
9850 			}
9851 			if (!pmap_demote_pde(kernel_pmap, pde, tmpva))
9852 				return (ENOMEM);
9853 		}
9854 		pte = pmap_pde_to_pte(pde, tmpva);
9855 		if (*pte == 0) {
9856 			KASSERT((flags & MAPDEV_ASSERTVALID) == 0,
9857 			    ("%s: addr %#lx is not mapped", __func__, tmpva));
9858 			return (EINVAL);
9859 		}
9860 		tmpva += PAGE_SIZE;
9861 	}
9862 	error = 0;
9863 
9864 	/*
9865 	 * Ok, all the pages exist, so run through them updating their
9866 	 * properties if required.
9867 	 */
9868 	changed = false;
9869 	pa_start = pa_end = 0;
9870 	for (tmpva = base; tmpva < base + size; ) {
9871 		pdpe = pmap_pdpe(kernel_pmap, tmpva);
9872 		if (*pdpe & PG_PS) {
9873 			if ((*pdpe & pde_mask) != pde_bits) {
9874 				pmap_pte_props(pdpe, pde_bits, pde_mask);
9875 				changed = true;
9876 			}
9877 			if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
9878 			    (*pdpe & PG_PS_FRAME) < dmaplimit) {
9879 				if (pa_start == pa_end) {
9880 					/* Start physical address run. */
9881 					pa_start = *pdpe & PG_PS_FRAME;
9882 					pa_end = pa_start + NBPDP;
9883 				} else if (pa_end == (*pdpe & PG_PS_FRAME))
9884 					pa_end += NBPDP;
9885 				else {
9886 					/* Run ended, update direct map. */
9887 					error = pmap_change_props_locked(
9888 					    PHYS_TO_DMAP(pa_start),
9889 					    pa_end - pa_start, prot, mode,
9890 					    flags);
9891 					if (error != 0)
9892 						break;
9893 					/* Start physical address run. */
9894 					pa_start = *pdpe & PG_PS_FRAME;
9895 					pa_end = pa_start + NBPDP;
9896 				}
9897 			}
9898 			tmpva = trunc_1gpage(tmpva) + NBPDP;
9899 			continue;
9900 		}
9901 		pde = pmap_pdpe_to_pde(pdpe, tmpva);
9902 		if (*pde & PG_PS) {
9903 			if ((*pde & pde_mask) != pde_bits) {
9904 				pmap_pte_props(pde, pde_bits, pde_mask);
9905 				changed = true;
9906 			}
9907 			if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
9908 			    (*pde & PG_PS_FRAME) < dmaplimit) {
9909 				if (pa_start == pa_end) {
9910 					/* Start physical address run. */
9911 					pa_start = *pde & PG_PS_FRAME;
9912 					pa_end = pa_start + NBPDR;
9913 				} else if (pa_end == (*pde & PG_PS_FRAME))
9914 					pa_end += NBPDR;
9915 				else {
9916 					/* Run ended, update direct map. */
9917 					error = pmap_change_props_locked(
9918 					    PHYS_TO_DMAP(pa_start),
9919 					    pa_end - pa_start, prot, mode,
9920 					    flags);
9921 					if (error != 0)
9922 						break;
9923 					/* Start physical address run. */
9924 					pa_start = *pde & PG_PS_FRAME;
9925 					pa_end = pa_start + NBPDR;
9926 				}
9927 			}
9928 			tmpva = trunc_2mpage(tmpva) + NBPDR;
9929 		} else {
9930 			pte = pmap_pde_to_pte(pde, tmpva);
9931 			if ((*pte & pte_mask) != pte_bits) {
9932 				pmap_pte_props(pte, pte_bits, pte_mask);
9933 				changed = true;
9934 			}
9935 			if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
9936 			    (*pte & PG_FRAME) < dmaplimit) {
9937 				if (pa_start == pa_end) {
9938 					/* Start physical address run. */
9939 					pa_start = *pte & PG_FRAME;
9940 					pa_end = pa_start + PAGE_SIZE;
9941 				} else if (pa_end == (*pte & PG_FRAME))
9942 					pa_end += PAGE_SIZE;
9943 				else {
9944 					/* Run ended, update direct map. */
9945 					error = pmap_change_props_locked(
9946 					    PHYS_TO_DMAP(pa_start),
9947 					    pa_end - pa_start, prot, mode,
9948 					    flags);
9949 					if (error != 0)
9950 						break;
9951 					/* Start physical address run. */
9952 					pa_start = *pte & PG_FRAME;
9953 					pa_end = pa_start + PAGE_SIZE;
9954 				}
9955 			}
9956 			tmpva += PAGE_SIZE;
9957 		}
9958 	}
9959 	if (error == 0 && pa_start != pa_end && pa_start < dmaplimit) {
9960 		pa_end1 = MIN(pa_end, dmaplimit);
9961 		if (pa_start != pa_end1)
9962 			error = pmap_change_props_locked(PHYS_TO_DMAP(pa_start),
9963 			    pa_end1 - pa_start, prot, mode, flags);
9964 	}
9965 
9966 	/*
9967 	 * Flush CPU caches if required to make sure any data isn't cached that
9968 	 * shouldn't be, etc.
9969 	 */
9970 	if (changed) {
9971 		pmap_invalidate_range(kernel_pmap, base, tmpva);
9972 		if ((flags & MAPDEV_FLUSHCACHE) != 0)
9973 			pmap_invalidate_cache_range(base, tmpva);
9974 	}
9975 	return (error);
9976 }
9977 
9978 /*
9979  * Demotes any mapping within the direct map region that covers more
9980  * than the specified range of physical addresses.  This range's size
9981  * must be a power of two and its starting address must be a multiple
9982  * of its size, which means that any pdp from the mapping is fully
9983  * covered by the range if len > NBPDP.  Since the demotion does not
9984  * change any attributes of the mapping, a TLB invalidation is not
9985  * mandatory.  The caller may, however, request a TLB invalidation.
9986  */
9987 void
pmap_demote_DMAP(vm_paddr_t base,vm_size_t len,bool invalidate)9988 pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, bool invalidate)
9989 {
9990 	pdp_entry_t *pdpe;
9991 	pd_entry_t *pde;
9992 	vm_offset_t va;
9993 	vm_page_t m, mpte;
9994 	bool changed, rv __diagused;
9995 
9996 	if (len == 0)
9997 		return;
9998 	KASSERT(powerof2(len), ("pmap_demote_DMAP: len is not a power of 2"));
9999 	KASSERT((base & (len - 1)) == 0,
10000 	    ("pmap_demote_DMAP: base is not a multiple of len"));
10001 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "pmap_demote_DMAP");
10002 
10003 	if (len < NBPDP && base < dmaplimit) {
10004 		va = PHYS_TO_DMAP(base);
10005 		changed = false;
10006 
10007 		/*
10008 		 * Assume that it is fine to sleep there.
10009 		 * The only existing caller of pmap_demote_DMAP() is the
10010 		 * x86_mr_split_dmap() function.
10011 		 */
10012 		m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_WAITOK);
10013 		if (len < NBPDR) {
10014 			mpte = vm_page_alloc_noobj(VM_ALLOC_WIRED |
10015 			    VM_ALLOC_WAITOK);
10016 		} else
10017 			mpte = NULL;
10018 
10019 		PMAP_LOCK(kernel_pmap);
10020 		pdpe = pmap_pdpe(kernel_pmap, va);
10021 		if ((*pdpe & X86_PG_V) == 0)
10022 			panic("pmap_demote_DMAP: invalid PDPE");
10023 		if ((*pdpe & PG_PS) != 0) {
10024 			rv = pmap_demote_pdpe(kernel_pmap, pdpe, va, m);
10025 			KASSERT(rv, ("pmap_demote_DMAP: PDPE failed"));
10026 			changed = true;
10027 			m = NULL;
10028 		}
10029 		if (len < NBPDR) {
10030 			pde = pmap_pdpe_to_pde(pdpe, va);
10031 			if ((*pde & X86_PG_V) == 0)
10032 				panic("pmap_demote_DMAP: invalid PDE");
10033 			if ((*pde & PG_PS) != 0) {
10034 				mpte->pindex = pmap_pde_pindex(va);
10035 				pmap_pt_page_count_adj(kernel_pmap, 1);
10036 				rv = pmap_demote_pde_mpte(kernel_pmap, pde, va,
10037 				    NULL, mpte);
10038 				KASSERT(rv, ("pmap_demote_DMAP: PDE failed"));
10039 				changed = true;
10040 				mpte = NULL;
10041 			}
10042 		}
10043 		if (changed && invalidate)
10044 			pmap_invalidate_page(kernel_pmap, va);
10045 		PMAP_UNLOCK(kernel_pmap);
10046 		if (m != NULL) {
10047 			vm_page_unwire_noq(m);
10048 			vm_page_free(m);
10049 		}
10050 		if (mpte != NULL) {
10051 			vm_page_unwire_noq(mpte);
10052 			vm_page_free(mpte);
10053 		}
10054 	}
10055 }
10056 
10057 /*
10058  * Perform the pmap work for mincore(2).  If the page is not both referenced and
10059  * modified by this pmap, returns its physical address so that the caller can
10060  * find other mappings.
10061  */
10062 int
pmap_mincore(pmap_t pmap,vm_offset_t addr,vm_paddr_t * pap)10063 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
10064 {
10065 	pdp_entry_t *pdpe;
10066 	pd_entry_t *pdep;
10067 	pt_entry_t pte, PG_A, PG_M, PG_RW, PG_V;
10068 	vm_paddr_t pa;
10069 	int val;
10070 
10071 	PG_A = pmap_accessed_bit(pmap);
10072 	PG_M = pmap_modified_bit(pmap);
10073 	PG_V = pmap_valid_bit(pmap);
10074 	PG_RW = pmap_rw_bit(pmap);
10075 
10076 	PMAP_LOCK(pmap);
10077 	pte = 0;
10078 	pa = 0;
10079 	val = 0;
10080 	pdpe = pmap_pdpe(pmap, addr);
10081 	if (pdpe == NULL)
10082 		goto out;
10083 	if ((*pdpe & PG_V) != 0) {
10084 		if ((*pdpe & PG_PS) != 0) {
10085 			pte = *pdpe;
10086 			pa = ((pte & PG_PS_PDP_FRAME) | (addr & PDPMASK)) &
10087 			    PG_FRAME;
10088 			val = MINCORE_PSIND(2);
10089 		} else {
10090 			pdep = pmap_pde(pmap, addr);
10091 			if (pdep != NULL && (*pdep & PG_V) != 0) {
10092 				if ((*pdep & PG_PS) != 0) {
10093 					pte = *pdep;
10094 			/* Compute the physical address of the 4KB page. */
10095 					pa = ((pte & PG_PS_FRAME) | (addr &
10096 					    PDRMASK)) & PG_FRAME;
10097 					val = MINCORE_PSIND(1);
10098 				} else {
10099 					pte = *pmap_pde_to_pte(pdep, addr);
10100 					pa = pte & PG_FRAME;
10101 					val = 0;
10102 				}
10103 			}
10104 		}
10105 	}
10106 	if ((pte & PG_V) != 0) {
10107 		val |= MINCORE_INCORE;
10108 		if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
10109 			val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
10110 		if ((pte & PG_A) != 0)
10111 			val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
10112 	}
10113 	if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
10114 	    (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) &&
10115 	    (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) {
10116 		*pap = pa;
10117 	}
10118 out:
10119 	PMAP_UNLOCK(pmap);
10120 	return (val);
10121 }
10122 
10123 static uint64_t
pmap_pcid_alloc(pmap_t pmap,struct pmap_pcid * pcidp)10124 pmap_pcid_alloc(pmap_t pmap, struct pmap_pcid *pcidp)
10125 {
10126 	uint32_t gen, new_gen, pcid_next;
10127 
10128 	CRITICAL_ASSERT(curthread);
10129 	gen = PCPU_GET(pcid_gen);
10130 	if (pcidp->pm_pcid == PMAP_PCID_KERN)
10131 		return (pti ? 0 : CR3_PCID_SAVE);
10132 	if (pcidp->pm_gen == gen)
10133 		return (CR3_PCID_SAVE);
10134 	pcid_next = PCPU_GET(pcid_next);
10135 	KASSERT((!pti && pcid_next <= PMAP_PCID_OVERMAX) ||
10136 	    (pti && pcid_next <= PMAP_PCID_OVERMAX_KERN),
10137 	    ("cpu %d pcid_next %#x", PCPU_GET(cpuid), pcid_next));
10138 	if ((!pti && pcid_next == PMAP_PCID_OVERMAX) ||
10139 	    (pti && pcid_next == PMAP_PCID_OVERMAX_KERN)) {
10140 		new_gen = gen + 1;
10141 		if (new_gen == 0)
10142 			new_gen = 1;
10143 		PCPU_SET(pcid_gen, new_gen);
10144 		pcid_next = PMAP_PCID_KERN + 1;
10145 	} else {
10146 		new_gen = gen;
10147 	}
10148 	pcidp->pm_pcid = pcid_next;
10149 	pcidp->pm_gen = new_gen;
10150 	PCPU_SET(pcid_next, pcid_next + 1);
10151 	return (0);
10152 }
10153 
10154 static uint64_t
pmap_pcid_alloc_checked(pmap_t pmap,struct pmap_pcid * pcidp)10155 pmap_pcid_alloc_checked(pmap_t pmap, struct pmap_pcid *pcidp)
10156 {
10157 	uint64_t cached;
10158 
10159 	cached = pmap_pcid_alloc(pmap, pcidp);
10160 	KASSERT(pcidp->pm_pcid < PMAP_PCID_OVERMAX,
10161 	    ("pmap %p cpu %d pcid %#x", pmap, PCPU_GET(cpuid), pcidp->pm_pcid));
10162 	KASSERT(pcidp->pm_pcid != PMAP_PCID_KERN || pmap == kernel_pmap,
10163 	    ("non-kernel pmap pmap %p cpu %d pcid %#x",
10164 	    pmap, PCPU_GET(cpuid), pcidp->pm_pcid));
10165 	return (cached);
10166 }
10167 
10168 static void
pmap_activate_sw_pti_post(struct thread * td,pmap_t pmap)10169 pmap_activate_sw_pti_post(struct thread *td, pmap_t pmap)
10170 {
10171 
10172 	PCPU_GET(tssp)->tss_rsp0 = pmap->pm_ucr3 != PMAP_NO_CR3 ?
10173 	    PCPU_GET(pti_rsp0) : (uintptr_t)td->td_md.md_stack_base;
10174 }
10175 
10176 static void
pmap_activate_sw_pcid_pti(struct thread * td,pmap_t pmap,u_int cpuid)10177 pmap_activate_sw_pcid_pti(struct thread *td, pmap_t pmap, u_int cpuid)
10178 {
10179 	pmap_t old_pmap;
10180 	struct pmap_pcid *pcidp, *old_pcidp;
10181 	uint64_t cached, cr3, kcr3, ucr3;
10182 
10183 	KASSERT((read_rflags() & PSL_I) == 0,
10184 	    ("PCID needs interrupts disabled in pmap_activate_sw()"));
10185 
10186 	/* See the comment in pmap_invalidate_page_pcid(). */
10187 	if (PCPU_GET(ucr3_load_mask) != PMAP_UCR3_NOMASK) {
10188 		PCPU_SET(ucr3_load_mask, PMAP_UCR3_NOMASK);
10189 		old_pmap = PCPU_GET(curpmap);
10190 		MPASS(old_pmap->pm_ucr3 != PMAP_NO_CR3);
10191 		old_pcidp = zpcpu_get_cpu(old_pmap->pm_pcidp, cpuid);
10192 		old_pcidp->pm_gen = 0;
10193 	}
10194 
10195 	pcidp = zpcpu_get_cpu(pmap->pm_pcidp, cpuid);
10196 	cached = pmap_pcid_alloc_checked(pmap, pcidp);
10197 	cr3 = rcr3();
10198 	if ((cr3 & ~CR3_PCID_MASK) != pmap->pm_cr3)
10199 		load_cr3(pmap->pm_cr3 | pcidp->pm_pcid);
10200 	PCPU_SET(curpmap, pmap);
10201 	kcr3 = pmap->pm_cr3 | pcidp->pm_pcid;
10202 	ucr3 = pmap->pm_ucr3 | pcidp->pm_pcid | PMAP_PCID_USER_PT;
10203 
10204 	if (!cached && pmap->pm_ucr3 != PMAP_NO_CR3)
10205 		PCPU_SET(ucr3_load_mask, ~CR3_PCID_SAVE);
10206 
10207 	PCPU_SET(kcr3, kcr3 | CR3_PCID_SAVE);
10208 	PCPU_SET(ucr3, ucr3 | CR3_PCID_SAVE);
10209 	if (cached)
10210 		counter_u64_add(pcid_save_cnt, 1);
10211 
10212 	pmap_activate_sw_pti_post(td, pmap);
10213 }
10214 
10215 static void
pmap_activate_sw_pcid_nopti(struct thread * td __unused,pmap_t pmap,u_int cpuid)10216 pmap_activate_sw_pcid_nopti(struct thread *td __unused, pmap_t pmap,
10217     u_int cpuid)
10218 {
10219 	struct pmap_pcid *pcidp;
10220 	uint64_t cached, cr3;
10221 
10222 	KASSERT((read_rflags() & PSL_I) == 0,
10223 	    ("PCID needs interrupts disabled in pmap_activate_sw()"));
10224 
10225 	pcidp = zpcpu_get_cpu(pmap->pm_pcidp, cpuid);
10226 	cached = pmap_pcid_alloc_checked(pmap, pcidp);
10227 	cr3 = rcr3();
10228 	if (!cached || (cr3 & ~CR3_PCID_MASK) != pmap->pm_cr3)
10229 		load_cr3(pmap->pm_cr3 | pcidp->pm_pcid | cached);
10230 	PCPU_SET(curpmap, pmap);
10231 	if (cached)
10232 		counter_u64_add(pcid_save_cnt, 1);
10233 }
10234 
10235 static void
pmap_activate_sw_nopcid_nopti(struct thread * td __unused,pmap_t pmap,u_int cpuid __unused)10236 pmap_activate_sw_nopcid_nopti(struct thread *td __unused, pmap_t pmap,
10237     u_int cpuid __unused)
10238 {
10239 
10240 	load_cr3(pmap->pm_cr3);
10241 	PCPU_SET(curpmap, pmap);
10242 }
10243 
10244 static void
pmap_activate_sw_nopcid_pti(struct thread * td,pmap_t pmap,u_int cpuid __unused)10245 pmap_activate_sw_nopcid_pti(struct thread *td, pmap_t pmap,
10246     u_int cpuid __unused)
10247 {
10248 
10249 	pmap_activate_sw_nopcid_nopti(td, pmap, cpuid);
10250 	PCPU_SET(kcr3, pmap->pm_cr3);
10251 	PCPU_SET(ucr3, pmap->pm_ucr3);
10252 	pmap_activate_sw_pti_post(td, pmap);
10253 }
10254 
10255 DEFINE_IFUNC(static, void, pmap_activate_sw_mode, (struct thread *, pmap_t,
10256     u_int))
10257 {
10258 
10259 	if (pmap_pcid_enabled && pti)
10260 		return (pmap_activate_sw_pcid_pti);
10261 	else if (pmap_pcid_enabled && !pti)
10262 		return (pmap_activate_sw_pcid_nopti);
10263 	else if (!pmap_pcid_enabled && pti)
10264 		return (pmap_activate_sw_nopcid_pti);
10265 	else /* if (!pmap_pcid_enabled && !pti) */
10266 		return (pmap_activate_sw_nopcid_nopti);
10267 }
10268 
10269 void
pmap_activate_sw(struct thread * td)10270 pmap_activate_sw(struct thread *td)
10271 {
10272 	pmap_t oldpmap, pmap;
10273 	u_int cpuid;
10274 
10275 	oldpmap = PCPU_GET(curpmap);
10276 	pmap = vmspace_pmap(td->td_proc->p_vmspace);
10277 	if (oldpmap == pmap) {
10278 		if (cpu_vendor_id != CPU_VENDOR_INTEL)
10279 			mfence();
10280 		return;
10281 	}
10282 	cpuid = PCPU_GET(cpuid);
10283 #ifdef SMP
10284 	CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
10285 #else
10286 	CPU_SET(cpuid, &pmap->pm_active);
10287 #endif
10288 	pmap_activate_sw_mode(td, pmap, cpuid);
10289 #ifdef SMP
10290 	CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active);
10291 #else
10292 	CPU_CLR(cpuid, &oldpmap->pm_active);
10293 #endif
10294 }
10295 
10296 void
pmap_activate(struct thread * td)10297 pmap_activate(struct thread *td)
10298 {
10299 	/*
10300 	 * invltlb_{invpcid,}_pcid_handler() is used to handle an
10301 	 * invalidate_all IPI, which checks for curpmap ==
10302 	 * smp_tlb_pmap.  The below sequence of operations has a
10303 	 * window where %CR3 is loaded with the new pmap's PML4
10304 	 * address, but the curpmap value has not yet been updated.
10305 	 * This causes the invltlb IPI handler, which is called
10306 	 * between the updates, to execute as a NOP, which leaves
10307 	 * stale TLB entries.
10308 	 *
10309 	 * Note that the most common use of pmap_activate_sw(), from
10310 	 * a context switch, is immune to this race, because
10311 	 * interrupts are disabled (while the thread lock is owned),
10312 	 * so the IPI is delayed until after curpmap is updated.  Protect
10313 	 * other callers in a similar way, by disabling interrupts
10314 	 * around the %cr3 register reload and curpmap assignment.
10315 	 */
10316 	spinlock_enter();
10317 	pmap_activate_sw(td);
10318 	spinlock_exit();
10319 }
10320 
10321 void
pmap_activate_boot(pmap_t pmap)10322 pmap_activate_boot(pmap_t pmap)
10323 {
10324 	uint64_t kcr3;
10325 	u_int cpuid;
10326 
10327 	/*
10328 	 * kernel_pmap must be never deactivated, and we ensure that
10329 	 * by never activating it at all.
10330 	 */
10331 	MPASS(pmap != kernel_pmap);
10332 
10333 	cpuid = PCPU_GET(cpuid);
10334 #ifdef SMP
10335 	CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
10336 #else
10337 	CPU_SET(cpuid, &pmap->pm_active);
10338 #endif
10339 	PCPU_SET(curpmap, pmap);
10340 	if (pti) {
10341 		kcr3 = pmap->pm_cr3;
10342 		if (pmap_pcid_enabled)
10343 			kcr3 |= pmap_get_pcid(pmap) | CR3_PCID_SAVE;
10344 	} else {
10345 		kcr3 = PMAP_NO_CR3;
10346 	}
10347 	PCPU_SET(kcr3, kcr3);
10348 	PCPU_SET(ucr3, PMAP_NO_CR3);
10349 }
10350 
10351 void
pmap_active_cpus(pmap_t pmap,cpuset_t * res)10352 pmap_active_cpus(pmap_t pmap, cpuset_t *res)
10353 {
10354 	*res = pmap->pm_active;
10355 }
10356 
10357 void
pmap_sync_icache(pmap_t pm,vm_offset_t va,vm_size_t sz)10358 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
10359 {
10360 }
10361 
10362 /*
10363  *	Increase the starting virtual address of the given mapping if a
10364  *	different alignment might result in more superpage mappings.
10365  */
10366 void
pmap_align_superpage(vm_object_t object,vm_ooffset_t offset,vm_offset_t * addr,vm_size_t size)10367 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
10368     vm_offset_t *addr, vm_size_t size)
10369 {
10370 	vm_offset_t superpage_offset;
10371 
10372 	if (size < NBPDR)
10373 		return;
10374 	if (object != NULL && (object->flags & OBJ_COLORED) != 0)
10375 		offset += ptoa(object->pg_color);
10376 	superpage_offset = offset & PDRMASK;
10377 	if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR ||
10378 	    (*addr & PDRMASK) == superpage_offset)
10379 		return;
10380 	if ((*addr & PDRMASK) < superpage_offset)
10381 		*addr = (*addr & ~PDRMASK) + superpage_offset;
10382 	else
10383 		*addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset;
10384 }
10385 
10386 #ifdef INVARIANTS
10387 static unsigned long num_dirty_emulations;
10388 SYSCTL_ULONG(_vm_pmap, OID_AUTO, num_dirty_emulations, CTLFLAG_RW,
10389 	     &num_dirty_emulations, 0, NULL);
10390 
10391 static unsigned long num_accessed_emulations;
10392 SYSCTL_ULONG(_vm_pmap, OID_AUTO, num_accessed_emulations, CTLFLAG_RW,
10393 	     &num_accessed_emulations, 0, NULL);
10394 
10395 static unsigned long num_superpage_accessed_emulations;
10396 SYSCTL_ULONG(_vm_pmap, OID_AUTO, num_superpage_accessed_emulations, CTLFLAG_RW,
10397 	     &num_superpage_accessed_emulations, 0, NULL);
10398 
10399 static unsigned long ad_emulation_superpage_promotions;
10400 SYSCTL_ULONG(_vm_pmap, OID_AUTO, ad_emulation_superpage_promotions, CTLFLAG_RW,
10401 	     &ad_emulation_superpage_promotions, 0, NULL);
10402 #endif	/* INVARIANTS */
10403 
10404 int
pmap_emulate_accessed_dirty(pmap_t pmap,vm_offset_t va,int ftype)10405 pmap_emulate_accessed_dirty(pmap_t pmap, vm_offset_t va, int ftype)
10406 {
10407 	int rv;
10408 	struct rwlock *lock;
10409 #if VM_NRESERVLEVEL > 0
10410 	vm_page_t m, mpte;
10411 #endif
10412 	pd_entry_t *pde;
10413 	pt_entry_t *pte, PG_A, PG_M, PG_RW, PG_V;
10414 
10415 	KASSERT(ftype == VM_PROT_READ || ftype == VM_PROT_WRITE,
10416 	    ("pmap_emulate_accessed_dirty: invalid fault type %d", ftype));
10417 
10418 	if (!pmap_emulate_ad_bits(pmap))
10419 		return (-1);
10420 
10421 	PG_A = pmap_accessed_bit(pmap);
10422 	PG_M = pmap_modified_bit(pmap);
10423 	PG_V = pmap_valid_bit(pmap);
10424 	PG_RW = pmap_rw_bit(pmap);
10425 
10426 	rv = -1;
10427 	lock = NULL;
10428 	PMAP_LOCK(pmap);
10429 
10430 	pde = pmap_pde(pmap, va);
10431 	if (pde == NULL || (*pde & PG_V) == 0)
10432 		goto done;
10433 
10434 	if ((*pde & PG_PS) != 0) {
10435 		if (ftype == VM_PROT_READ) {
10436 #ifdef INVARIANTS
10437 			atomic_add_long(&num_superpage_accessed_emulations, 1);
10438 #endif
10439 			*pde |= PG_A;
10440 			rv = 0;
10441 		}
10442 		goto done;
10443 	}
10444 
10445 	pte = pmap_pde_to_pte(pde, va);
10446 	if ((*pte & PG_V) == 0)
10447 		goto done;
10448 
10449 	if (ftype == VM_PROT_WRITE) {
10450 		if ((*pte & PG_RW) == 0)
10451 			goto done;
10452 		/*
10453 		 * Set the modified and accessed bits simultaneously.
10454 		 *
10455 		 * Intel EPT PTEs that do software emulation of A/D bits map
10456 		 * PG_A and PG_M to EPT_PG_READ and EPT_PG_WRITE respectively.
10457 		 * An EPT misconfiguration is triggered if the PTE is writable
10458 		 * but not readable (WR=10). This is avoided by setting PG_A
10459 		 * and PG_M simultaneously.
10460 		 */
10461 		*pte |= PG_M | PG_A;
10462 	} else {
10463 		*pte |= PG_A;
10464 	}
10465 
10466 #if VM_NRESERVLEVEL > 0
10467 	/* try to promote the mapping */
10468 	if (va < VM_MAXUSER_ADDRESS)
10469 		mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
10470 	else
10471 		mpte = NULL;
10472 
10473 	m = PHYS_TO_VM_PAGE(*pte & PG_FRAME);
10474 
10475 	if ((mpte == NULL || mpte->ref_count == NPTEPG) &&
10476 	    (m->flags & PG_FICTITIOUS) == 0 &&
10477 	    vm_reserv_level_iffullpop(m) == 0 &&
10478 	    pmap_promote_pde(pmap, pde, va, mpte, &lock)) {
10479 #ifdef INVARIANTS
10480 		atomic_add_long(&ad_emulation_superpage_promotions, 1);
10481 #endif
10482 	}
10483 #endif
10484 
10485 #ifdef INVARIANTS
10486 	if (ftype == VM_PROT_WRITE)
10487 		atomic_add_long(&num_dirty_emulations, 1);
10488 	else
10489 		atomic_add_long(&num_accessed_emulations, 1);
10490 #endif
10491 	rv = 0;		/* success */
10492 done:
10493 	if (lock != NULL)
10494 		rw_wunlock(lock);
10495 	PMAP_UNLOCK(pmap);
10496 	return (rv);
10497 }
10498 
10499 void
pmap_get_mapping(pmap_t pmap,vm_offset_t va,uint64_t * ptr,int * num)10500 pmap_get_mapping(pmap_t pmap, vm_offset_t va, uint64_t *ptr, int *num)
10501 {
10502 	pml4_entry_t *pml4;
10503 	pdp_entry_t *pdp;
10504 	pd_entry_t *pde;
10505 	pt_entry_t *pte, PG_V;
10506 	int idx;
10507 
10508 	idx = 0;
10509 	PG_V = pmap_valid_bit(pmap);
10510 	PMAP_LOCK(pmap);
10511 
10512 	pml4 = pmap_pml4e(pmap, va);
10513 	if (pml4 == NULL)
10514 		goto done;
10515 	ptr[idx++] = *pml4;
10516 	if ((*pml4 & PG_V) == 0)
10517 		goto done;
10518 
10519 	pdp = pmap_pml4e_to_pdpe(pml4, va);
10520 	ptr[idx++] = *pdp;
10521 	if ((*pdp & PG_V) == 0 || (*pdp & PG_PS) != 0)
10522 		goto done;
10523 
10524 	pde = pmap_pdpe_to_pde(pdp, va);
10525 	ptr[idx++] = *pde;
10526 	if ((*pde & PG_V) == 0 || (*pde & PG_PS) != 0)
10527 		goto done;
10528 
10529 	pte = pmap_pde_to_pte(pde, va);
10530 	ptr[idx++] = *pte;
10531 
10532 done:
10533 	PMAP_UNLOCK(pmap);
10534 	*num = idx;
10535 }
10536 
10537 /**
10538  * Get the kernel virtual address of a set of physical pages. If there are
10539  * physical addresses not covered by the DMAP perform a transient mapping
10540  * that will be removed when calling pmap_unmap_io_transient.
10541  *
10542  * \param page        The pages the caller wishes to obtain the virtual
10543  *                    address on the kernel memory map.
10544  * \param vaddr       On return contains the kernel virtual memory address
10545  *                    of the pages passed in the page parameter.
10546  * \param count       Number of pages passed in.
10547  * \param can_fault   true if the thread using the mapped pages can take
10548  *                    page faults, false otherwise.
10549  *
10550  * \returns true if the caller must call pmap_unmap_io_transient when
10551  *          finished or false otherwise.
10552  *
10553  */
10554 bool
pmap_map_io_transient(vm_page_t page[],vm_offset_t vaddr[],int count,bool can_fault)10555 pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
10556     bool can_fault)
10557 {
10558 	vm_paddr_t paddr;
10559 	bool needs_mapping;
10560 	int error __unused, i;
10561 
10562 	/*
10563 	 * Allocate any KVA space that we need, this is done in a separate
10564 	 * loop to prevent calling vmem_alloc while pinned.
10565 	 */
10566 	needs_mapping = false;
10567 	for (i = 0; i < count; i++) {
10568 		paddr = VM_PAGE_TO_PHYS(page[i]);
10569 		if (__predict_false(paddr >= dmaplimit)) {
10570 			error = vmem_alloc(kernel_arena, PAGE_SIZE,
10571 			    M_BESTFIT | M_WAITOK, &vaddr[i]);
10572 			KASSERT(error == 0, ("vmem_alloc failed: %d", error));
10573 			needs_mapping = true;
10574 		} else {
10575 			vaddr[i] = PHYS_TO_DMAP(paddr);
10576 		}
10577 	}
10578 
10579 	/* Exit early if everything is covered by the DMAP */
10580 	if (!needs_mapping)
10581 		return (false);
10582 
10583 	/*
10584 	 * NB:  The sequence of updating a page table followed by accesses
10585 	 * to the corresponding pages used in the !DMAP case is subject to
10586 	 * the situation described in the "AMD64 Architecture Programmer's
10587 	 * Manual Volume 2: System Programming" rev. 3.23, "7.3.1 Special
10588 	 * Coherency Considerations".  Therefore, issuing the INVLPG right
10589 	 * after modifying the PTE bits is crucial.
10590 	 */
10591 	if (!can_fault)
10592 		sched_pin();
10593 	for (i = 0; i < count; i++) {
10594 		paddr = VM_PAGE_TO_PHYS(page[i]);
10595 		if (paddr >= dmaplimit) {
10596 			if (can_fault) {
10597 				/*
10598 				 * Slow path, since we can get page faults
10599 				 * while mappings are active don't pin the
10600 				 * thread to the CPU and instead add a global
10601 				 * mapping visible to all CPUs.
10602 				 */
10603 				pmap_qenter(vaddr[i], &page[i], 1);
10604 			} else {
10605 				pmap_kenter_attr(vaddr[i], paddr,
10606 				    page[i]->md.pat_mode);
10607 				pmap_invlpg(kernel_pmap, vaddr[i]);
10608 			}
10609 		}
10610 	}
10611 
10612 	return (needs_mapping);
10613 }
10614 
10615 void
pmap_unmap_io_transient(vm_page_t page[],vm_offset_t vaddr[],int count,bool can_fault)10616 pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
10617     bool can_fault)
10618 {
10619 	vm_paddr_t paddr;
10620 	int i;
10621 
10622 	if (!can_fault)
10623 		sched_unpin();
10624 	for (i = 0; i < count; i++) {
10625 		paddr = VM_PAGE_TO_PHYS(page[i]);
10626 		if (paddr >= dmaplimit) {
10627 			if (can_fault)
10628 				pmap_qremove(vaddr[i], 1);
10629 			vmem_free(kernel_arena, vaddr[i], PAGE_SIZE);
10630 		}
10631 	}
10632 }
10633 
10634 vm_offset_t
pmap_quick_enter_page(vm_page_t m)10635 pmap_quick_enter_page(vm_page_t m)
10636 {
10637 	vm_paddr_t paddr;
10638 
10639 	paddr = VM_PAGE_TO_PHYS(m);
10640 	if (paddr < dmaplimit)
10641 		return (PHYS_TO_DMAP(paddr));
10642 	mtx_lock_spin(&qframe_mtx);
10643 	KASSERT(*vtopte(qframe) == 0, ("qframe busy"));
10644 
10645 	/*
10646 	 * Since qframe is exclusively mapped by us, and we do not set
10647 	 * PG_G, we can use INVLPG here.
10648 	 */
10649 	invlpg(qframe);
10650 
10651 	pte_store(vtopte(qframe), paddr | X86_PG_RW | X86_PG_V | X86_PG_A |
10652 	    X86_PG_M | pmap_cache_bits(kernel_pmap, m->md.pat_mode, false));
10653 	return (qframe);
10654 }
10655 
10656 void
pmap_quick_remove_page(vm_offset_t addr)10657 pmap_quick_remove_page(vm_offset_t addr)
10658 {
10659 
10660 	if (addr != qframe)
10661 		return;
10662 	pte_store(vtopte(qframe), 0);
10663 	mtx_unlock_spin(&qframe_mtx);
10664 }
10665 
10666 /*
10667  * Pdp pages from the large map are managed differently from either
10668  * kernel or user page table pages.  They are permanently allocated at
10669  * initialization time, and their reference count is permanently set to
10670  * zero.  The pml4 entries pointing to those pages are copied into
10671  * each allocated pmap.
10672  *
10673  * In contrast, pd and pt pages are managed like user page table
10674  * pages.  They are dynamically allocated, and their reference count
10675  * represents the number of valid entries within the page.
10676  */
10677 static vm_page_t
pmap_large_map_getptp_unlocked(void)10678 pmap_large_map_getptp_unlocked(void)
10679 {
10680 	return (pmap_alloc_pt_page(kernel_pmap, 0, VM_ALLOC_ZERO));
10681 }
10682 
10683 static vm_page_t
pmap_large_map_getptp(void)10684 pmap_large_map_getptp(void)
10685 {
10686 	vm_page_t m;
10687 
10688 	PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED);
10689 	m = pmap_large_map_getptp_unlocked();
10690 	if (m == NULL) {
10691 		PMAP_UNLOCK(kernel_pmap);
10692 		vm_wait(NULL);
10693 		PMAP_LOCK(kernel_pmap);
10694 		/* Callers retry. */
10695 	}
10696 	return (m);
10697 }
10698 
10699 static pdp_entry_t *
pmap_large_map_pdpe(vm_offset_t va)10700 pmap_large_map_pdpe(vm_offset_t va)
10701 {
10702 	vm_pindex_t pml4_idx;
10703 	vm_paddr_t mphys;
10704 
10705 	pml4_idx = pmap_pml4e_index(va);
10706 	KASSERT(LMSPML4I <= pml4_idx && pml4_idx < LMSPML4I + lm_ents,
10707 	    ("pmap_large_map_pdpe: va %#jx out of range idx %#jx LMSPML4I "
10708 	    "%#jx lm_ents %d",
10709 	    (uintmax_t)va, (uintmax_t)pml4_idx, LMSPML4I, lm_ents));
10710 	KASSERT((kernel_pml4[pml4_idx] & X86_PG_V) != 0,
10711 	    ("pmap_large_map_pdpe: invalid pml4 for va %#jx idx %#jx "
10712 	    "LMSPML4I %#jx lm_ents %d",
10713 	    (uintmax_t)va, (uintmax_t)pml4_idx, LMSPML4I, lm_ents));
10714 	mphys = kernel_pml4[pml4_idx] & PG_FRAME;
10715 	return ((pdp_entry_t *)PHYS_TO_DMAP(mphys) + pmap_pdpe_index(va));
10716 }
10717 
10718 static pd_entry_t *
pmap_large_map_pde(vm_offset_t va)10719 pmap_large_map_pde(vm_offset_t va)
10720 {
10721 	pdp_entry_t *pdpe;
10722 	vm_page_t m;
10723 	vm_paddr_t mphys;
10724 
10725 retry:
10726 	pdpe = pmap_large_map_pdpe(va);
10727 	if (*pdpe == 0) {
10728 		m = pmap_large_map_getptp();
10729 		if (m == NULL)
10730 			goto retry;
10731 		mphys = VM_PAGE_TO_PHYS(m);
10732 		*pdpe = mphys | X86_PG_A | X86_PG_RW | X86_PG_V | pg_nx;
10733 	} else {
10734 		MPASS((*pdpe & X86_PG_PS) == 0);
10735 		mphys = *pdpe & PG_FRAME;
10736 	}
10737 	return ((pd_entry_t *)PHYS_TO_DMAP(mphys) + pmap_pde_index(va));
10738 }
10739 
10740 static pt_entry_t *
pmap_large_map_pte(vm_offset_t va)10741 pmap_large_map_pte(vm_offset_t va)
10742 {
10743 	pd_entry_t *pde;
10744 	vm_page_t m;
10745 	vm_paddr_t mphys;
10746 
10747 retry:
10748 	pde = pmap_large_map_pde(va);
10749 	if (*pde == 0) {
10750 		m = pmap_large_map_getptp();
10751 		if (m == NULL)
10752 			goto retry;
10753 		mphys = VM_PAGE_TO_PHYS(m);
10754 		*pde = mphys | X86_PG_A | X86_PG_RW | X86_PG_V | pg_nx;
10755 		PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pde))->ref_count++;
10756 	} else {
10757 		MPASS((*pde & X86_PG_PS) == 0);
10758 		mphys = *pde & PG_FRAME;
10759 	}
10760 	return ((pt_entry_t *)PHYS_TO_DMAP(mphys) + pmap_pte_index(va));
10761 }
10762 
10763 static vm_paddr_t
pmap_large_map_kextract(vm_offset_t va)10764 pmap_large_map_kextract(vm_offset_t va)
10765 {
10766 	pdp_entry_t *pdpe, pdp;
10767 	pd_entry_t *pde, pd;
10768 	pt_entry_t *pte, pt;
10769 
10770 	KASSERT(PMAP_ADDRESS_IN_LARGEMAP(va),
10771 	    ("not largemap range %#lx", (u_long)va));
10772 	pdpe = pmap_large_map_pdpe(va);
10773 	pdp = *pdpe;
10774 	KASSERT((pdp & X86_PG_V) != 0,
10775 	    ("invalid pdp va %#lx pdpe %#lx pdp %#lx", va,
10776 	    (u_long)pdpe, pdp));
10777 	if ((pdp & X86_PG_PS) != 0) {
10778 		KASSERT((amd_feature & AMDID_PAGE1GB) != 0,
10779 		    ("no 1G pages, va %#lx pdpe %#lx pdp %#lx", va,
10780 		    (u_long)pdpe, pdp));
10781 		return ((pdp & PG_PS_PDP_FRAME) | (va & PDPMASK));
10782 	}
10783 	pde = pmap_pdpe_to_pde(pdpe, va);
10784 	pd = *pde;
10785 	KASSERT((pd & X86_PG_V) != 0,
10786 	    ("invalid pd va %#lx pde %#lx pd %#lx", va, (u_long)pde, pd));
10787 	if ((pd & X86_PG_PS) != 0)
10788 		return ((pd & PG_PS_FRAME) | (va & PDRMASK));
10789 	pte = pmap_pde_to_pte(pde, va);
10790 	pt = *pte;
10791 	KASSERT((pt & X86_PG_V) != 0,
10792 	    ("invalid pte va %#lx pte %#lx pt %#lx", va, (u_long)pte, pt));
10793 	return ((pt & PG_FRAME) | (va & PAGE_MASK));
10794 }
10795 
10796 static int
pmap_large_map_getva(vm_size_t len,vm_offset_t align,vm_offset_t phase,vmem_addr_t * vmem_res)10797 pmap_large_map_getva(vm_size_t len, vm_offset_t align, vm_offset_t phase,
10798     vmem_addr_t *vmem_res)
10799 {
10800 
10801 	/*
10802 	 * Large mappings are all but static.  Consequently, there
10803 	 * is no point in waiting for an earlier allocation to be
10804 	 * freed.
10805 	 */
10806 	return (vmem_xalloc(large_vmem, len, align, phase, 0, VMEM_ADDR_MIN,
10807 	    VMEM_ADDR_MAX, M_NOWAIT | M_BESTFIT, vmem_res));
10808 }
10809 
10810 int
pmap_large_map(vm_paddr_t spa,vm_size_t len,void ** addr,vm_memattr_t mattr)10811 pmap_large_map(vm_paddr_t spa, vm_size_t len, void **addr,
10812     vm_memattr_t mattr)
10813 {
10814 	pdp_entry_t *pdpe;
10815 	pd_entry_t *pde;
10816 	pt_entry_t *pte;
10817 	vm_offset_t va, inc;
10818 	vmem_addr_t vmem_res;
10819 	vm_paddr_t pa;
10820 	int error;
10821 
10822 	if (len == 0 || spa + len < spa)
10823 		return (EINVAL);
10824 
10825 	/* See if DMAP can serve. */
10826 	if (spa + len <= dmaplimit) {
10827 		va = PHYS_TO_DMAP(spa);
10828 		*addr = (void *)va;
10829 		return (pmap_change_attr(va, len, mattr));
10830 	}
10831 
10832 	/*
10833 	 * No, allocate KVA.  Fit the address with best possible
10834 	 * alignment for superpages.  Fall back to worse align if
10835 	 * failed.
10836 	 */
10837 	error = ENOMEM;
10838 	if ((amd_feature & AMDID_PAGE1GB) != 0 && rounddown2(spa + len,
10839 	    NBPDP) >= roundup2(spa, NBPDP) + NBPDP)
10840 		error = pmap_large_map_getva(len, NBPDP, spa & PDPMASK,
10841 		    &vmem_res);
10842 	if (error != 0 && rounddown2(spa + len, NBPDR) >= roundup2(spa,
10843 	    NBPDR) + NBPDR)
10844 		error = pmap_large_map_getva(len, NBPDR, spa & PDRMASK,
10845 		    &vmem_res);
10846 	if (error != 0)
10847 		error = pmap_large_map_getva(len, PAGE_SIZE, 0, &vmem_res);
10848 	if (error != 0)
10849 		return (error);
10850 
10851 	/*
10852 	 * Fill pagetable.  PG_M is not pre-set, we scan modified bits
10853 	 * in the pagetable to minimize flushing.  No need to
10854 	 * invalidate TLB, since we only update invalid entries.
10855 	 */
10856 	PMAP_LOCK(kernel_pmap);
10857 	for (pa = spa, va = vmem_res; len > 0; pa += inc, va += inc,
10858 	    len -= inc) {
10859 		if ((amd_feature & AMDID_PAGE1GB) != 0 && len >= NBPDP &&
10860 		    (pa & PDPMASK) == 0 && (va & PDPMASK) == 0) {
10861 			pdpe = pmap_large_map_pdpe(va);
10862 			MPASS(*pdpe == 0);
10863 			*pdpe = pa | pg_g | X86_PG_PS | X86_PG_RW |
10864 			    X86_PG_V | X86_PG_A | pg_nx |
10865 			    pmap_cache_bits(kernel_pmap, mattr, true);
10866 			inc = NBPDP;
10867 		} else if (len >= NBPDR && (pa & PDRMASK) == 0 &&
10868 		    (va & PDRMASK) == 0) {
10869 			pde = pmap_large_map_pde(va);
10870 			MPASS(*pde == 0);
10871 			*pde = pa | pg_g | X86_PG_PS | X86_PG_RW |
10872 			    X86_PG_V | X86_PG_A | pg_nx |
10873 			    pmap_cache_bits(kernel_pmap, mattr, true);
10874 			PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pde))->
10875 			    ref_count++;
10876 			inc = NBPDR;
10877 		} else {
10878 			pte = pmap_large_map_pte(va);
10879 			MPASS(*pte == 0);
10880 			*pte = pa | pg_g | X86_PG_RW | X86_PG_V |
10881 			    X86_PG_A | pg_nx | pmap_cache_bits(kernel_pmap,
10882 			    mattr, false);
10883 			PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pte))->
10884 			    ref_count++;
10885 			inc = PAGE_SIZE;
10886 		}
10887 	}
10888 	PMAP_UNLOCK(kernel_pmap);
10889 	MPASS(len == 0);
10890 
10891 	*addr = (void *)vmem_res;
10892 	return (0);
10893 }
10894 
10895 void
pmap_large_unmap(void * svaa,vm_size_t len)10896 pmap_large_unmap(void *svaa, vm_size_t len)
10897 {
10898 	vm_offset_t sva, va;
10899 	vm_size_t inc;
10900 	pdp_entry_t *pdpe, pdp;
10901 	pd_entry_t *pde, pd;
10902 	pt_entry_t *pte;
10903 	vm_page_t m;
10904 	struct spglist spgf;
10905 
10906 	sva = (vm_offset_t)svaa;
10907 	if (len == 0 || sva + len < sva || (sva >= DMAP_MIN_ADDRESS &&
10908 	    sva + len <= DMAP_MIN_ADDRESS + dmaplimit))
10909 		return;
10910 
10911 	SLIST_INIT(&spgf);
10912 	KASSERT(PMAP_ADDRESS_IN_LARGEMAP(sva) &&
10913 	    PMAP_ADDRESS_IN_LARGEMAP(sva + len - 1),
10914 	    ("not largemap range %#lx %#lx", (u_long)svaa, (u_long)svaa + len));
10915 	PMAP_LOCK(kernel_pmap);
10916 	for (va = sva; va < sva + len; va += inc) {
10917 		pdpe = pmap_large_map_pdpe(va);
10918 		pdp = *pdpe;
10919 		KASSERT((pdp & X86_PG_V) != 0,
10920 		    ("invalid pdp va %#lx pdpe %#lx pdp %#lx", va,
10921 		    (u_long)pdpe, pdp));
10922 		if ((pdp & X86_PG_PS) != 0) {
10923 			KASSERT((amd_feature & AMDID_PAGE1GB) != 0,
10924 			    ("no 1G pages, va %#lx pdpe %#lx pdp %#lx", va,
10925 			    (u_long)pdpe, pdp));
10926 			KASSERT((va & PDPMASK) == 0,
10927 			    ("PDPMASK bit set, va %#lx pdpe %#lx pdp %#lx", va,
10928 			    (u_long)pdpe, pdp));
10929 			KASSERT(va + NBPDP <= sva + len,
10930 			    ("unmap covers partial 1GB page, sva %#lx va %#lx "
10931 			    "pdpe %#lx pdp %#lx len %#lx", sva, va,
10932 			    (u_long)pdpe, pdp, len));
10933 			*pdpe = 0;
10934 			inc = NBPDP;
10935 			continue;
10936 		}
10937 		pde = pmap_pdpe_to_pde(pdpe, va);
10938 		pd = *pde;
10939 		KASSERT((pd & X86_PG_V) != 0,
10940 		    ("invalid pd va %#lx pde %#lx pd %#lx", va,
10941 		    (u_long)pde, pd));
10942 		if ((pd & X86_PG_PS) != 0) {
10943 			KASSERT((va & PDRMASK) == 0,
10944 			    ("PDRMASK bit set, va %#lx pde %#lx pd %#lx", va,
10945 			    (u_long)pde, pd));
10946 			KASSERT(va + NBPDR <= sva + len,
10947 			    ("unmap covers partial 2MB page, sva %#lx va %#lx "
10948 			    "pde %#lx pd %#lx len %#lx", sva, va, (u_long)pde,
10949 			    pd, len));
10950 			pde_store(pde, 0);
10951 			inc = NBPDR;
10952 			m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pde));
10953 			m->ref_count--;
10954 			if (m->ref_count == 0) {
10955 				*pdpe = 0;
10956 				SLIST_INSERT_HEAD(&spgf, m, plinks.s.ss);
10957 			}
10958 			continue;
10959 		}
10960 		pte = pmap_pde_to_pte(pde, va);
10961 		KASSERT((*pte & X86_PG_V) != 0,
10962 		    ("invalid pte va %#lx pte %#lx pt %#lx", va,
10963 		    (u_long)pte, *pte));
10964 		pte_clear(pte);
10965 		inc = PAGE_SIZE;
10966 		m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pte));
10967 		m->ref_count--;
10968 		if (m->ref_count == 0) {
10969 			*pde = 0;
10970 			SLIST_INSERT_HEAD(&spgf, m, plinks.s.ss);
10971 			m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pde));
10972 			m->ref_count--;
10973 			if (m->ref_count == 0) {
10974 				*pdpe = 0;
10975 				SLIST_INSERT_HEAD(&spgf, m, plinks.s.ss);
10976 			}
10977 		}
10978 	}
10979 	pmap_invalidate_range(kernel_pmap, sva, sva + len);
10980 	PMAP_UNLOCK(kernel_pmap);
10981 	vm_page_free_pages_toq(&spgf, false);
10982 	vmem_free(large_vmem, sva, len);
10983 }
10984 
10985 static void
pmap_large_map_wb_fence_mfence(void)10986 pmap_large_map_wb_fence_mfence(void)
10987 {
10988 
10989 	mfence();
10990 }
10991 
10992 static void
pmap_large_map_wb_fence_atomic(void)10993 pmap_large_map_wb_fence_atomic(void)
10994 {
10995 
10996 	atomic_thread_fence_seq_cst();
10997 }
10998 
10999 static void
pmap_large_map_wb_fence_nop(void)11000 pmap_large_map_wb_fence_nop(void)
11001 {
11002 }
11003 
11004 DEFINE_IFUNC(static, void, pmap_large_map_wb_fence, (void))
11005 {
11006 
11007 	if (cpu_vendor_id != CPU_VENDOR_INTEL)
11008 		return (pmap_large_map_wb_fence_mfence);
11009 	else if ((cpu_stdext_feature & (CPUID_STDEXT_CLWB |
11010 	    CPUID_STDEXT_CLFLUSHOPT)) == 0)
11011 		return (pmap_large_map_wb_fence_atomic);
11012 	else
11013 		/* clflush is strongly enough ordered */
11014 		return (pmap_large_map_wb_fence_nop);
11015 }
11016 
11017 static void
pmap_large_map_flush_range_clwb(vm_offset_t va,vm_size_t len)11018 pmap_large_map_flush_range_clwb(vm_offset_t va, vm_size_t len)
11019 {
11020 
11021 	for (; len > 0; len -= cpu_clflush_line_size,
11022 	    va += cpu_clflush_line_size)
11023 		clwb(va);
11024 }
11025 
11026 static void
pmap_large_map_flush_range_clflushopt(vm_offset_t va,vm_size_t len)11027 pmap_large_map_flush_range_clflushopt(vm_offset_t va, vm_size_t len)
11028 {
11029 
11030 	for (; len > 0; len -= cpu_clflush_line_size,
11031 	    va += cpu_clflush_line_size)
11032 		clflushopt(va);
11033 }
11034 
11035 static void
pmap_large_map_flush_range_clflush(vm_offset_t va,vm_size_t len)11036 pmap_large_map_flush_range_clflush(vm_offset_t va, vm_size_t len)
11037 {
11038 
11039 	for (; len > 0; len -= cpu_clflush_line_size,
11040 	    va += cpu_clflush_line_size)
11041 		clflush(va);
11042 }
11043 
11044 static void
pmap_large_map_flush_range_nop(vm_offset_t sva __unused,vm_size_t len __unused)11045 pmap_large_map_flush_range_nop(vm_offset_t sva __unused, vm_size_t len __unused)
11046 {
11047 }
11048 
11049 DEFINE_IFUNC(static, void, pmap_large_map_flush_range, (vm_offset_t, vm_size_t))
11050 {
11051 
11052 	if ((cpu_stdext_feature & CPUID_STDEXT_CLWB) != 0)
11053 		return (pmap_large_map_flush_range_clwb);
11054 	else if ((cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0)
11055 		return (pmap_large_map_flush_range_clflushopt);
11056 	else if ((cpu_feature & CPUID_CLFSH) != 0)
11057 		return (pmap_large_map_flush_range_clflush);
11058 	else
11059 		return (pmap_large_map_flush_range_nop);
11060 }
11061 
11062 static void
pmap_large_map_wb_large(vm_offset_t sva,vm_offset_t eva)11063 pmap_large_map_wb_large(vm_offset_t sva, vm_offset_t eva)
11064 {
11065 	volatile u_long *pe;
11066 	u_long p;
11067 	vm_offset_t va;
11068 	vm_size_t inc;
11069 	bool seen_other;
11070 
11071 	for (va = sva; va < eva; va += inc) {
11072 		inc = 0;
11073 		if ((amd_feature & AMDID_PAGE1GB) != 0) {
11074 			pe = (volatile u_long *)pmap_large_map_pdpe(va);
11075 			p = *pe;
11076 			if ((p & X86_PG_PS) != 0)
11077 				inc = NBPDP;
11078 		}
11079 		if (inc == 0) {
11080 			pe = (volatile u_long *)pmap_large_map_pde(va);
11081 			p = *pe;
11082 			if ((p & X86_PG_PS) != 0)
11083 				inc = NBPDR;
11084 		}
11085 		if (inc == 0) {
11086 			pe = (volatile u_long *)pmap_large_map_pte(va);
11087 			p = *pe;
11088 			inc = PAGE_SIZE;
11089 		}
11090 		seen_other = false;
11091 		for (;;) {
11092 			if ((p & X86_PG_AVAIL1) != 0) {
11093 				/*
11094 				 * Spin-wait for the end of a parallel
11095 				 * write-back.
11096 				 */
11097 				cpu_spinwait();
11098 				p = *pe;
11099 
11100 				/*
11101 				 * If we saw other write-back
11102 				 * occurring, we cannot rely on PG_M to
11103 				 * indicate state of the cache.  The
11104 				 * PG_M bit is cleared before the
11105 				 * flush to avoid ignoring new writes,
11106 				 * and writes which are relevant for
11107 				 * us might happen after.
11108 				 */
11109 				seen_other = true;
11110 				continue;
11111 			}
11112 
11113 			if ((p & X86_PG_M) != 0 || seen_other) {
11114 				if (!atomic_fcmpset_long(pe, &p,
11115 				    (p & ~X86_PG_M) | X86_PG_AVAIL1))
11116 					/*
11117 					 * If we saw PG_M without
11118 					 * PG_AVAIL1, and then on the
11119 					 * next attempt we do not
11120 					 * observe either PG_M or
11121 					 * PG_AVAIL1, the other
11122 					 * write-back started after us
11123 					 * and finished before us.  We
11124 					 * can rely on it doing our
11125 					 * work.
11126 					 */
11127 					continue;
11128 				pmap_large_map_flush_range(va, inc);
11129 				atomic_clear_long(pe, X86_PG_AVAIL1);
11130 			}
11131 			break;
11132 		}
11133 		maybe_yield();
11134 	}
11135 }
11136 
11137 /*
11138  * Write-back cache lines for the given address range.
11139  *
11140  * Must be called only on the range or sub-range returned from
11141  * pmap_large_map().  Must not be called on the coalesced ranges.
11142  *
11143  * Does nothing on CPUs without CLWB, CLFLUSHOPT, or CLFLUSH
11144  * instructions support.
11145  */
11146 void
pmap_large_map_wb(void * svap,vm_size_t len)11147 pmap_large_map_wb(void *svap, vm_size_t len)
11148 {
11149 	vm_offset_t eva, sva;
11150 
11151 	sva = (vm_offset_t)svap;
11152 	eva = sva + len;
11153 	pmap_large_map_wb_fence();
11154 	if (sva >= DMAP_MIN_ADDRESS && eva <= DMAP_MIN_ADDRESS + dmaplimit) {
11155 		pmap_large_map_flush_range(sva, len);
11156 	} else {
11157 		KASSERT(sva >= LARGEMAP_MIN_ADDRESS &&
11158 		    eva <= LARGEMAP_MIN_ADDRESS + lm_ents * NBPML4,
11159 		    ("pmap_large_map_wb: not largemap %#lx %#lx", sva, len));
11160 		pmap_large_map_wb_large(sva, eva);
11161 	}
11162 	pmap_large_map_wb_fence();
11163 }
11164 
11165 static vm_page_t
pmap_pti_alloc_page(void)11166 pmap_pti_alloc_page(void)
11167 {
11168 	vm_page_t m;
11169 
11170 	VM_OBJECT_ASSERT_WLOCKED(pti_obj);
11171 	m = vm_page_grab(pti_obj, pti_pg_idx++, VM_ALLOC_WIRED | VM_ALLOC_ZERO);
11172 	return (m);
11173 }
11174 
11175 static bool
pmap_pti_free_page(vm_page_t m)11176 pmap_pti_free_page(vm_page_t m)
11177 {
11178 	if (!vm_page_unwire_noq(m))
11179 		return (false);
11180 	vm_page_xbusy_claim(m);
11181 	vm_page_free_zero(m);
11182 	return (true);
11183 }
11184 
11185 static void
pmap_pti_init(void)11186 pmap_pti_init(void)
11187 {
11188 	vm_page_t pml4_pg;
11189 	pdp_entry_t *pdpe;
11190 	vm_offset_t va;
11191 	int i;
11192 
11193 	if (!pti)
11194 		return;
11195 	pti_obj = vm_pager_allocate(OBJT_PHYS, NULL, 0, VM_PROT_ALL, 0, NULL);
11196 	VM_OBJECT_WLOCK(pti_obj);
11197 	pml4_pg = pmap_pti_alloc_page();
11198 	pti_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml4_pg));
11199 	for (va = VM_MIN_KERNEL_ADDRESS; va <= VM_MAX_KERNEL_ADDRESS &&
11200 	    va >= VM_MIN_KERNEL_ADDRESS && va > NBPML4; va += NBPML4) {
11201 		pdpe = pmap_pti_pdpe(va);
11202 		pmap_pti_wire_pte(pdpe);
11203 	}
11204 	pmap_pti_add_kva_locked((vm_offset_t)&__pcpu[0],
11205 	    (vm_offset_t)&__pcpu[0] + sizeof(__pcpu[0]) * MAXCPU, false);
11206 	pmap_pti_add_kva_locked((vm_offset_t)idt, (vm_offset_t)idt +
11207 	    sizeof(struct gate_descriptor) * NIDT, false);
11208 	CPU_FOREACH(i) {
11209 		/* Doublefault stack IST 1 */
11210 		va = __pcpu[i].pc_common_tss.tss_ist1 + sizeof(struct nmi_pcpu);
11211 		pmap_pti_add_kva_locked(va - DBLFAULT_STACK_SIZE, va, false);
11212 		/* NMI stack IST 2 */
11213 		va = __pcpu[i].pc_common_tss.tss_ist2 + sizeof(struct nmi_pcpu);
11214 		pmap_pti_add_kva_locked(va - NMI_STACK_SIZE, va, false);
11215 		/* MC# stack IST 3 */
11216 		va = __pcpu[i].pc_common_tss.tss_ist3 +
11217 		    sizeof(struct nmi_pcpu);
11218 		pmap_pti_add_kva_locked(va - MCE_STACK_SIZE, va, false);
11219 		/* DB# stack IST 4 */
11220 		va = __pcpu[i].pc_common_tss.tss_ist4 + sizeof(struct nmi_pcpu);
11221 		pmap_pti_add_kva_locked(va - DBG_STACK_SIZE, va, false);
11222 	}
11223 	pmap_pti_add_kva_locked((vm_offset_t)KERNSTART, (vm_offset_t)etext,
11224 	    true);
11225 	pti_finalized = true;
11226 	VM_OBJECT_WUNLOCK(pti_obj);
11227 }
11228 
11229 static void
pmap_cpu_init(void * arg __unused)11230 pmap_cpu_init(void *arg __unused)
11231 {
11232 	CPU_COPY(&all_cpus, &kernel_pmap->pm_active);
11233 	pmap_pti_init();
11234 }
11235 SYSINIT(pmap_cpu, SI_SUB_CPU + 1, SI_ORDER_ANY, pmap_cpu_init, NULL);
11236 
11237 static pdp_entry_t *
pmap_pti_pdpe(vm_offset_t va)11238 pmap_pti_pdpe(vm_offset_t va)
11239 {
11240 	pml4_entry_t *pml4e;
11241 	pdp_entry_t *pdpe;
11242 	vm_page_t m;
11243 	vm_pindex_t pml4_idx;
11244 	vm_paddr_t mphys;
11245 
11246 	VM_OBJECT_ASSERT_WLOCKED(pti_obj);
11247 
11248 	pml4_idx = pmap_pml4e_index(va);
11249 	pml4e = &pti_pml4[pml4_idx];
11250 	m = NULL;
11251 	if (*pml4e == 0) {
11252 		if (pti_finalized)
11253 			panic("pml4 alloc after finalization\n");
11254 		m = pmap_pti_alloc_page();
11255 		if (*pml4e != 0) {
11256 			pmap_pti_free_page(m);
11257 			mphys = *pml4e & ~PAGE_MASK;
11258 		} else {
11259 			mphys = VM_PAGE_TO_PHYS(m);
11260 			*pml4e = mphys | X86_PG_RW | X86_PG_V;
11261 		}
11262 	} else {
11263 		mphys = *pml4e & ~PAGE_MASK;
11264 	}
11265 	pdpe = (pdp_entry_t *)PHYS_TO_DMAP(mphys) + pmap_pdpe_index(va);
11266 	return (pdpe);
11267 }
11268 
11269 static void
pmap_pti_wire_pte(void * pte)11270 pmap_pti_wire_pte(void *pte)
11271 {
11272 	vm_page_t m;
11273 
11274 	VM_OBJECT_ASSERT_WLOCKED(pti_obj);
11275 	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pte));
11276 	m->ref_count++;
11277 }
11278 
11279 static void
pmap_pti_unwire_pde(void * pde,bool only_ref)11280 pmap_pti_unwire_pde(void *pde, bool only_ref)
11281 {
11282 	vm_page_t m;
11283 
11284 	VM_OBJECT_ASSERT_WLOCKED(pti_obj);
11285 	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pde));
11286 	MPASS(only_ref || m->ref_count > 1);
11287 	pmap_pti_free_page(m);
11288 }
11289 
11290 static void
pmap_pti_unwire_pte(void * pte,vm_offset_t va)11291 pmap_pti_unwire_pte(void *pte, vm_offset_t va)
11292 {
11293 	vm_page_t m;
11294 	pd_entry_t *pde;
11295 
11296 	VM_OBJECT_ASSERT_WLOCKED(pti_obj);
11297 	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pte));
11298 	if (pmap_pti_free_page(m)) {
11299 		pde = pmap_pti_pde(va);
11300 		MPASS((*pde & (X86_PG_PS | X86_PG_V)) == X86_PG_V);
11301 		*pde = 0;
11302 		pmap_pti_unwire_pde(pde, false);
11303 	}
11304 }
11305 
11306 static pd_entry_t *
pmap_pti_pde(vm_offset_t va)11307 pmap_pti_pde(vm_offset_t va)
11308 {
11309 	pdp_entry_t *pdpe;
11310 	pd_entry_t *pde;
11311 	vm_page_t m;
11312 	vm_pindex_t pd_idx;
11313 	vm_paddr_t mphys;
11314 
11315 	VM_OBJECT_ASSERT_WLOCKED(pti_obj);
11316 
11317 	pdpe = pmap_pti_pdpe(va);
11318 	if (*pdpe == 0) {
11319 		m = pmap_pti_alloc_page();
11320 		if (*pdpe != 0) {
11321 			pmap_pti_free_page(m);
11322 			MPASS((*pdpe & X86_PG_PS) == 0);
11323 			mphys = *pdpe & ~PAGE_MASK;
11324 		} else {
11325 			mphys =  VM_PAGE_TO_PHYS(m);
11326 			*pdpe = mphys | X86_PG_RW | X86_PG_V;
11327 		}
11328 	} else {
11329 		MPASS((*pdpe & X86_PG_PS) == 0);
11330 		mphys = *pdpe & ~PAGE_MASK;
11331 	}
11332 
11333 	pde = (pd_entry_t *)PHYS_TO_DMAP(mphys);
11334 	pd_idx = pmap_pde_index(va);
11335 	pde += pd_idx;
11336 	return (pde);
11337 }
11338 
11339 static pt_entry_t *
pmap_pti_pte(vm_offset_t va,bool * unwire_pde)11340 pmap_pti_pte(vm_offset_t va, bool *unwire_pde)
11341 {
11342 	pd_entry_t *pde;
11343 	pt_entry_t *pte;
11344 	vm_page_t m;
11345 	vm_paddr_t mphys;
11346 
11347 	VM_OBJECT_ASSERT_WLOCKED(pti_obj);
11348 
11349 	pde = pmap_pti_pde(va);
11350 	if (unwire_pde != NULL) {
11351 		*unwire_pde = true;
11352 		pmap_pti_wire_pte(pde);
11353 	}
11354 	if (*pde == 0) {
11355 		m = pmap_pti_alloc_page();
11356 		if (*pde != 0) {
11357 			pmap_pti_free_page(m);
11358 			MPASS((*pde & X86_PG_PS) == 0);
11359 			mphys = *pde & ~(PAGE_MASK | pg_nx);
11360 		} else {
11361 			mphys = VM_PAGE_TO_PHYS(m);
11362 			*pde = mphys | X86_PG_RW | X86_PG_V;
11363 			if (unwire_pde != NULL)
11364 				*unwire_pde = false;
11365 		}
11366 	} else {
11367 		MPASS((*pde & X86_PG_PS) == 0);
11368 		mphys = *pde & ~(PAGE_MASK | pg_nx);
11369 	}
11370 
11371 	pte = (pt_entry_t *)PHYS_TO_DMAP(mphys);
11372 	pte += pmap_pte_index(va);
11373 
11374 	return (pte);
11375 }
11376 
11377 static void
pmap_pti_add_kva_locked(vm_offset_t sva,vm_offset_t eva,bool exec)11378 pmap_pti_add_kva_locked(vm_offset_t sva, vm_offset_t eva, bool exec)
11379 {
11380 	vm_paddr_t pa;
11381 	pd_entry_t *pde;
11382 	pt_entry_t *pte, ptev;
11383 	bool unwire_pde;
11384 
11385 	VM_OBJECT_ASSERT_WLOCKED(pti_obj);
11386 
11387 	sva = trunc_page(sva);
11388 	MPASS(sva > VM_MAXUSER_ADDRESS);
11389 	eva = round_page(eva);
11390 	MPASS(sva < eva);
11391 	for (; sva < eva; sva += PAGE_SIZE) {
11392 		pte = pmap_pti_pte(sva, &unwire_pde);
11393 		pa = pmap_kextract(sva);
11394 		ptev = pa | X86_PG_RW | X86_PG_V | X86_PG_A | X86_PG_G |
11395 		    (exec ? 0 : pg_nx) | pmap_cache_bits(kernel_pmap,
11396 		    VM_MEMATTR_DEFAULT, false);
11397 		if (*pte == 0) {
11398 			pte_store(pte, ptev);
11399 			pmap_pti_wire_pte(pte);
11400 		} else {
11401 			KASSERT(!pti_finalized,
11402 			    ("pti overlap after fin %#lx %#lx %#lx",
11403 			    sva, *pte, ptev));
11404 			KASSERT(*pte == ptev,
11405 			    ("pti non-identical pte after fin %#lx %#lx %#lx",
11406 			    sva, *pte, ptev));
11407 		}
11408 		if (unwire_pde) {
11409 			pde = pmap_pti_pde(sva);
11410 			pmap_pti_unwire_pde(pde, true);
11411 		}
11412 	}
11413 }
11414 
11415 void
pmap_pti_add_kva(vm_offset_t sva,vm_offset_t eva,bool exec)11416 pmap_pti_add_kva(vm_offset_t sva, vm_offset_t eva, bool exec)
11417 {
11418 
11419 	if (!pti)
11420 		return;
11421 	VM_OBJECT_WLOCK(pti_obj);
11422 	pmap_pti_add_kva_locked(sva, eva, exec);
11423 	VM_OBJECT_WUNLOCK(pti_obj);
11424 }
11425 
11426 void
pmap_pti_remove_kva(vm_offset_t sva,vm_offset_t eva)11427 pmap_pti_remove_kva(vm_offset_t sva, vm_offset_t eva)
11428 {
11429 	pt_entry_t *pte;
11430 	vm_offset_t va;
11431 
11432 	if (!pti)
11433 		return;
11434 	sva = rounddown2(sva, PAGE_SIZE);
11435 	MPASS(sva > VM_MAXUSER_ADDRESS);
11436 	eva = roundup2(eva, PAGE_SIZE);
11437 	MPASS(sva < eva);
11438 	VM_OBJECT_WLOCK(pti_obj);
11439 	for (va = sva; va < eva; va += PAGE_SIZE) {
11440 		pte = pmap_pti_pte(va, NULL);
11441 		KASSERT((*pte & X86_PG_V) != 0,
11442 		    ("invalid pte va %#lx pte %#lx pt %#lx", va,
11443 		    (u_long)pte, *pte));
11444 		pte_clear(pte);
11445 		pmap_pti_unwire_pte(pte, va);
11446 	}
11447 	pmap_invalidate_range(kernel_pmap, sva, eva);
11448 	VM_OBJECT_WUNLOCK(pti_obj);
11449 }
11450 
11451 static void *
pkru_dup_range(void * ctx __unused,void * data)11452 pkru_dup_range(void *ctx __unused, void *data)
11453 {
11454 	struct pmap_pkru_range *node, *new_node;
11455 
11456 	new_node = uma_zalloc(pmap_pkru_ranges_zone, M_NOWAIT);
11457 	if (new_node == NULL)
11458 		return (NULL);
11459 	node = data;
11460 	memcpy(new_node, node, sizeof(*node));
11461 	return (new_node);
11462 }
11463 
11464 static void
pkru_free_range(void * ctx __unused,void * node)11465 pkru_free_range(void *ctx __unused, void *node)
11466 {
11467 
11468 	uma_zfree(pmap_pkru_ranges_zone, node);
11469 }
11470 
11471 static int
pmap_pkru_assign(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,u_int keyidx,int flags)11472 pmap_pkru_assign(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, u_int keyidx,
11473     int flags)
11474 {
11475 	struct pmap_pkru_range *ppr;
11476 	int error;
11477 
11478 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
11479 	MPASS(pmap->pm_type == PT_X86);
11480 	MPASS((cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0);
11481 	if ((flags & AMD64_PKRU_EXCL) != 0 &&
11482 	    !rangeset_check_empty(&pmap->pm_pkru, sva, eva))
11483 		return (EBUSY);
11484 	ppr = uma_zalloc(pmap_pkru_ranges_zone, M_NOWAIT);
11485 	if (ppr == NULL)
11486 		return (ENOMEM);
11487 	ppr->pkru_keyidx = keyidx;
11488 	ppr->pkru_flags = flags & AMD64_PKRU_PERSIST;
11489 	error = rangeset_insert(&pmap->pm_pkru, sva, eva, ppr);
11490 	if (error != 0)
11491 		uma_zfree(pmap_pkru_ranges_zone, ppr);
11492 	return (error);
11493 }
11494 
11495 static int
pmap_pkru_deassign(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)11496 pmap_pkru_deassign(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
11497 {
11498 
11499 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
11500 	MPASS(pmap->pm_type == PT_X86);
11501 	MPASS((cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0);
11502 	return (rangeset_remove(&pmap->pm_pkru, sva, eva));
11503 }
11504 
11505 static void
pmap_pkru_deassign_all(pmap_t pmap)11506 pmap_pkru_deassign_all(pmap_t pmap)
11507 {
11508 
11509 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
11510 	if (pmap->pm_type == PT_X86 &&
11511 	    (cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0)
11512 		rangeset_remove_all(&pmap->pm_pkru);
11513 }
11514 
11515 /*
11516  * Returns true if the PKU setting is the same across the specified address
11517  * range, and false otherwise.  When returning true, updates the referenced PTE
11518  * to reflect the PKU setting.
11519  */
11520 static bool
pmap_pkru_same(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,pt_entry_t * pte)11521 pmap_pkru_same(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, pt_entry_t *pte)
11522 {
11523 	struct pmap_pkru_range *ppr;
11524 	vm_offset_t va;
11525 	u_int keyidx;
11526 
11527 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
11528 	KASSERT(pmap->pm_type != PT_X86 || (*pte & X86_PG_PKU_MASK) == 0,
11529 	    ("pte %p has unexpected PKU %ld", pte, *pte & X86_PG_PKU_MASK));
11530 	if (pmap->pm_type != PT_X86 ||
11531 	    (cpu_stdext_feature2 & CPUID_STDEXT2_PKU) == 0 ||
11532 	    sva >= VM_MAXUSER_ADDRESS)
11533 		return (true);
11534 	MPASS(eva <= VM_MAXUSER_ADDRESS);
11535 	ppr = rangeset_containing(&pmap->pm_pkru, sva);
11536 	if (ppr == NULL)
11537 		return (rangeset_empty(&pmap->pm_pkru, sva, eva));
11538 	keyidx = ppr->pkru_keyidx;
11539 	while ((va = ppr->pkru_rs_el.re_end) < eva) {
11540 		if ((ppr = rangeset_beginning(&pmap->pm_pkru, va)) == NULL ||
11541 		    keyidx != ppr->pkru_keyidx)
11542 			return (false);
11543 	}
11544 	*pte |= X86_PG_PKU(keyidx);
11545 	return (true);
11546 }
11547 
11548 static pt_entry_t
pmap_pkru_get(pmap_t pmap,vm_offset_t va)11549 pmap_pkru_get(pmap_t pmap, vm_offset_t va)
11550 {
11551 	struct pmap_pkru_range *ppr;
11552 
11553 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
11554 	if (pmap->pm_type != PT_X86 ||
11555 	    (cpu_stdext_feature2 & CPUID_STDEXT2_PKU) == 0 ||
11556 	    va >= VM_MAXUSER_ADDRESS)
11557 		return (0);
11558 	ppr = rangeset_containing(&pmap->pm_pkru, va);
11559 	if (ppr != NULL)
11560 		return (X86_PG_PKU(ppr->pkru_keyidx));
11561 	return (0);
11562 }
11563 
11564 static bool
pred_pkru_on_remove(void * ctx __unused,void * r)11565 pred_pkru_on_remove(void *ctx __unused, void *r)
11566 {
11567 	struct pmap_pkru_range *ppr;
11568 
11569 	ppr = r;
11570 	return ((ppr->pkru_flags & AMD64_PKRU_PERSIST) == 0);
11571 }
11572 
11573 static void
pmap_pkru_on_remove(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)11574 pmap_pkru_on_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
11575 {
11576 
11577 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
11578 	if (pmap->pm_type == PT_X86 &&
11579 	    (cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0) {
11580 		rangeset_remove_pred(&pmap->pm_pkru, sva, eva,
11581 		    pred_pkru_on_remove);
11582 	}
11583 }
11584 
11585 static int
pmap_pkru_copy(pmap_t dst_pmap,pmap_t src_pmap)11586 pmap_pkru_copy(pmap_t dst_pmap, pmap_t src_pmap)
11587 {
11588 
11589 	PMAP_LOCK_ASSERT(dst_pmap, MA_OWNED);
11590 	PMAP_LOCK_ASSERT(src_pmap, MA_OWNED);
11591 	MPASS(dst_pmap->pm_type == PT_X86);
11592 	MPASS(src_pmap->pm_type == PT_X86);
11593 	MPASS((cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0);
11594 	if (src_pmap->pm_pkru.rs_data_ctx == NULL)
11595 		return (0);
11596 	return (rangeset_copy(&dst_pmap->pm_pkru, &src_pmap->pm_pkru));
11597 }
11598 
11599 static void
pmap_pkru_update_range(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,u_int keyidx)11600 pmap_pkru_update_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
11601     u_int keyidx)
11602 {
11603 	pml4_entry_t *pml4e;
11604 	pdp_entry_t *pdpe;
11605 	pd_entry_t newpde, ptpaddr, *pde;
11606 	pt_entry_t newpte, *ptep, pte;
11607 	vm_offset_t va, va_next;
11608 	bool changed;
11609 
11610 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
11611 	MPASS(pmap->pm_type == PT_X86);
11612 	MPASS(keyidx <= PMAP_MAX_PKRU_IDX);
11613 
11614 	for (changed = false, va = sva; va < eva; va = va_next) {
11615 		pml4e = pmap_pml4e(pmap, va);
11616 		if (pml4e == NULL || (*pml4e & X86_PG_V) == 0) {
11617 			va_next = (va + NBPML4) & ~PML4MASK;
11618 			if (va_next < va)
11619 				va_next = eva;
11620 			continue;
11621 		}
11622 
11623 		pdpe = pmap_pml4e_to_pdpe(pml4e, va);
11624 		if ((*pdpe & X86_PG_V) == 0) {
11625 			va_next = (va + NBPDP) & ~PDPMASK;
11626 			if (va_next < va)
11627 				va_next = eva;
11628 			continue;
11629 		}
11630 
11631 		va_next = (va + NBPDR) & ~PDRMASK;
11632 		if (va_next < va)
11633 			va_next = eva;
11634 
11635 		pde = pmap_pdpe_to_pde(pdpe, va);
11636 		ptpaddr = *pde;
11637 		if (ptpaddr == 0)
11638 			continue;
11639 
11640 		MPASS((ptpaddr & X86_PG_V) != 0);
11641 		if ((ptpaddr & PG_PS) != 0) {
11642 			if (va + NBPDR == va_next && eva >= va_next) {
11643 				newpde = (ptpaddr & ~X86_PG_PKU_MASK) |
11644 				    X86_PG_PKU(keyidx);
11645 				if (newpde != ptpaddr) {
11646 					*pde = newpde;
11647 					changed = true;
11648 				}
11649 				continue;
11650 			} else if (!pmap_demote_pde(pmap, pde, va)) {
11651 				continue;
11652 			}
11653 		}
11654 
11655 		if (va_next > eva)
11656 			va_next = eva;
11657 
11658 		for (ptep = pmap_pde_to_pte(pde, va); va != va_next;
11659 		    ptep++, va += PAGE_SIZE) {
11660 			pte = *ptep;
11661 			if ((pte & X86_PG_V) == 0)
11662 				continue;
11663 			newpte = (pte & ~X86_PG_PKU_MASK) | X86_PG_PKU(keyidx);
11664 			if (newpte != pte) {
11665 				*ptep = newpte;
11666 				changed = true;
11667 			}
11668 		}
11669 	}
11670 	if (changed)
11671 		pmap_invalidate_range(pmap, sva, eva);
11672 }
11673 
11674 static int
pmap_pkru_check_uargs(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,u_int keyidx,int flags)11675 pmap_pkru_check_uargs(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
11676     u_int keyidx, int flags)
11677 {
11678 
11679 	if (pmap->pm_type != PT_X86 || keyidx > PMAP_MAX_PKRU_IDX ||
11680 	    (flags & ~(AMD64_PKRU_PERSIST | AMD64_PKRU_EXCL)) != 0)
11681 		return (EINVAL);
11682 	if (eva <= sva || eva > VM_MAXUSER_ADDRESS)
11683 		return (EFAULT);
11684 	if ((cpu_stdext_feature2 & CPUID_STDEXT2_PKU) == 0)
11685 		return (ENOTSUP);
11686 	return (0);
11687 }
11688 
11689 int
pmap_pkru_set(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,u_int keyidx,int flags)11690 pmap_pkru_set(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, u_int keyidx,
11691     int flags)
11692 {
11693 	int error;
11694 
11695 	sva = trunc_page(sva);
11696 	eva = round_page(eva);
11697 	error = pmap_pkru_check_uargs(pmap, sva, eva, keyidx, flags);
11698 	if (error != 0)
11699 		return (error);
11700 	for (;;) {
11701 		PMAP_LOCK(pmap);
11702 		error = pmap_pkru_assign(pmap, sva, eva, keyidx, flags);
11703 		if (error == 0)
11704 			pmap_pkru_update_range(pmap, sva, eva, keyidx);
11705 		PMAP_UNLOCK(pmap);
11706 		if (error != ENOMEM)
11707 			break;
11708 		vm_wait(NULL);
11709 	}
11710 	return (error);
11711 }
11712 
11713 int
pmap_pkru_clear(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)11714 pmap_pkru_clear(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
11715 {
11716 	int error;
11717 
11718 	sva = trunc_page(sva);
11719 	eva = round_page(eva);
11720 	error = pmap_pkru_check_uargs(pmap, sva, eva, 0, 0);
11721 	if (error != 0)
11722 		return (error);
11723 	for (;;) {
11724 		PMAP_LOCK(pmap);
11725 		error = pmap_pkru_deassign(pmap, sva, eva);
11726 		if (error == 0)
11727 			pmap_pkru_update_range(pmap, sva, eva, 0);
11728 		PMAP_UNLOCK(pmap);
11729 		if (error != ENOMEM)
11730 			break;
11731 		vm_wait(NULL);
11732 	}
11733 	return (error);
11734 }
11735 
11736 #if defined(KASAN) || defined(KMSAN)
11737 
11738 /*
11739  * Reserve enough memory to:
11740  * 1) allocate PDP pages for the shadow map(s),
11741  * 2) shadow the boot stack of KSTACK_PAGES pages,
11742  * 3) assuming that the kernel stack does not cross a 1GB boundary,
11743  * so we need one or two PD pages, one or two PT pages, and KSTACK_PAGES shadow
11744  * pages per shadow map.
11745  */
11746 #ifdef KASAN
11747 #define	SAN_EARLY_PAGES	\
11748 	(NKASANPML4E + 2 + 2 + howmany(KSTACK_PAGES, KASAN_SHADOW_SCALE))
11749 #else
11750 #define	SAN_EARLY_PAGES	\
11751 	(NKMSANSHADPML4E + NKMSANORIGPML4E + 2 * (2 + 2 + KSTACK_PAGES))
11752 #endif
11753 
11754 static uint64_t __nosanitizeaddress __nosanitizememory
pmap_san_enter_early_alloc_4k(uint64_t pabase)11755 pmap_san_enter_early_alloc_4k(uint64_t pabase)
11756 {
11757 	static uint8_t data[PAGE_SIZE * SAN_EARLY_PAGES] __aligned(PAGE_SIZE);
11758 	static size_t offset = 0;
11759 	uint64_t pa;
11760 
11761 	if (offset == sizeof(data)) {
11762 		panic("%s: ran out of memory for the bootstrap shadow map",
11763 		    __func__);
11764 	}
11765 
11766 	pa = pabase + ((vm_offset_t)&data[offset] - KERNSTART);
11767 	offset += PAGE_SIZE;
11768 	return (pa);
11769 }
11770 
11771 /*
11772  * Map a shadow page, before the kernel has bootstrapped its page tables.  This
11773  * is currently only used to shadow the temporary boot stack set up by locore.
11774  */
11775 static void __nosanitizeaddress __nosanitizememory
pmap_san_enter_early(vm_offset_t va)11776 pmap_san_enter_early(vm_offset_t va)
11777 {
11778 	static bool first = true;
11779 	pml4_entry_t *pml4e;
11780 	pdp_entry_t *pdpe;
11781 	pd_entry_t *pde;
11782 	pt_entry_t *pte;
11783 	uint64_t cr3, pa, base;
11784 	int i;
11785 
11786 	base = amd64_loadaddr();
11787 	cr3 = rcr3();
11788 
11789 	if (first) {
11790 		/*
11791 		 * If this the first call, we need to allocate new PML4Es for
11792 		 * the bootstrap shadow map(s).  We don't know how the PML4 page
11793 		 * was initialized by the boot loader, so we can't simply test
11794 		 * whether the shadow map's PML4Es are zero.
11795 		 */
11796 		first = false;
11797 #ifdef KASAN
11798 		for (i = 0; i < NKASANPML4E; i++) {
11799 			pa = pmap_san_enter_early_alloc_4k(base);
11800 
11801 			pml4e = (pml4_entry_t *)cr3 +
11802 			    pmap_pml4e_index(KASAN_MIN_ADDRESS + i * NBPML4);
11803 			*pml4e = (pml4_entry_t)(pa | X86_PG_RW | X86_PG_V);
11804 		}
11805 #else
11806 		for (i = 0; i < NKMSANORIGPML4E; i++) {
11807 			pa = pmap_san_enter_early_alloc_4k(base);
11808 
11809 			pml4e = (pml4_entry_t *)cr3 +
11810 			    pmap_pml4e_index(KMSAN_ORIG_MIN_ADDRESS +
11811 			    i * NBPML4);
11812 			*pml4e = (pml4_entry_t)(pa | X86_PG_RW | X86_PG_V);
11813 		}
11814 		for (i = 0; i < NKMSANSHADPML4E; i++) {
11815 			pa = pmap_san_enter_early_alloc_4k(base);
11816 
11817 			pml4e = (pml4_entry_t *)cr3 +
11818 			    pmap_pml4e_index(KMSAN_SHAD_MIN_ADDRESS +
11819 			    i * NBPML4);
11820 			*pml4e = (pml4_entry_t)(pa | X86_PG_RW | X86_PG_V);
11821 		}
11822 #endif
11823 	}
11824 	pml4e = (pml4_entry_t *)cr3 + pmap_pml4e_index(va);
11825 	pdpe = (pdp_entry_t *)(*pml4e & PG_FRAME) + pmap_pdpe_index(va);
11826 	if (*pdpe == 0) {
11827 		pa = pmap_san_enter_early_alloc_4k(base);
11828 		*pdpe = (pdp_entry_t)(pa | X86_PG_RW | X86_PG_V);
11829 	}
11830 	pde = (pd_entry_t *)(*pdpe & PG_FRAME) + pmap_pde_index(va);
11831 	if (*pde == 0) {
11832 		pa = pmap_san_enter_early_alloc_4k(base);
11833 		*pde = (pd_entry_t)(pa | X86_PG_RW | X86_PG_V);
11834 	}
11835 	pte = (pt_entry_t *)(*pde & PG_FRAME) + pmap_pte_index(va);
11836 	if (*pte != 0)
11837 		panic("%s: PTE for %#lx is already initialized", __func__, va);
11838 	pa = pmap_san_enter_early_alloc_4k(base);
11839 	*pte = (pt_entry_t)(pa | X86_PG_A | X86_PG_M | X86_PG_RW | X86_PG_V);
11840 }
11841 
11842 static vm_page_t
pmap_san_enter_alloc_4k(void)11843 pmap_san_enter_alloc_4k(void)
11844 {
11845 	vm_page_t m;
11846 
11847 	m = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED |
11848 	    VM_ALLOC_ZERO);
11849 	if (m == NULL)
11850 		panic("%s: no memory to grow shadow map", __func__);
11851 	return (m);
11852 }
11853 
11854 static vm_page_t
pmap_san_enter_alloc_2m(void)11855 pmap_san_enter_alloc_2m(void)
11856 {
11857 	return (vm_page_alloc_noobj_contig(VM_ALLOC_WIRED | VM_ALLOC_ZERO,
11858 	    NPTEPG, 0, ~0ul, NBPDR, 0, VM_MEMATTR_DEFAULT));
11859 }
11860 
11861 /*
11862  * Grow a shadow map by at least one 4KB page at the specified address.  Use 2MB
11863  * pages when possible.
11864  */
11865 void __nosanitizeaddress __nosanitizememory
pmap_san_enter(vm_offset_t va)11866 pmap_san_enter(vm_offset_t va)
11867 {
11868 	pdp_entry_t *pdpe;
11869 	pd_entry_t *pde;
11870 	pt_entry_t *pte;
11871 	vm_page_t m;
11872 
11873 	if (kernphys == 0) {
11874 		/*
11875 		 * We're creating a temporary shadow map for the boot stack.
11876 		 */
11877 		pmap_san_enter_early(va);
11878 		return;
11879 	}
11880 
11881 	mtx_assert(&kernel_map->system_mtx, MA_OWNED);
11882 
11883 	pdpe = pmap_pdpe(kernel_pmap, va);
11884 	if ((*pdpe & X86_PG_V) == 0) {
11885 		m = pmap_san_enter_alloc_4k();
11886 		*pdpe = (pdp_entry_t)(VM_PAGE_TO_PHYS(m) | X86_PG_RW |
11887 		    X86_PG_V | pg_nx);
11888 	}
11889 	pde = pmap_pdpe_to_pde(pdpe, va);
11890 	if ((*pde & X86_PG_V) == 0) {
11891 		m = pmap_san_enter_alloc_2m();
11892 		if (m != NULL) {
11893 			*pde = (pd_entry_t)(VM_PAGE_TO_PHYS(m) | X86_PG_RW |
11894 			    X86_PG_PS | X86_PG_V | X86_PG_A | X86_PG_M | pg_nx);
11895 		} else {
11896 			m = pmap_san_enter_alloc_4k();
11897 			*pde = (pd_entry_t)(VM_PAGE_TO_PHYS(m) | X86_PG_RW |
11898 			    X86_PG_V | pg_nx);
11899 		}
11900 	}
11901 	if ((*pde & X86_PG_PS) != 0)
11902 		return;
11903 	pte = pmap_pde_to_pte(pde, va);
11904 	if ((*pte & X86_PG_V) != 0)
11905 		return;
11906 	m = pmap_san_enter_alloc_4k();
11907 	*pte = (pt_entry_t)(VM_PAGE_TO_PHYS(m) | X86_PG_RW | X86_PG_V |
11908 	    X86_PG_M | X86_PG_A | pg_nx);
11909 }
11910 #endif
11911 
11912 /*
11913  * Track a range of the kernel's virtual address space that is contiguous
11914  * in various mapping attributes.
11915  */
11916 struct pmap_kernel_map_range {
11917 	vm_offset_t sva;
11918 	pt_entry_t attrs;
11919 	int ptes;
11920 	int pdes;
11921 	int pdpes;
11922 };
11923 
11924 static void
sysctl_kmaps_dump(struct sbuf * sb,struct pmap_kernel_map_range * range,vm_offset_t eva)11925 sysctl_kmaps_dump(struct sbuf *sb, struct pmap_kernel_map_range *range,
11926     vm_offset_t eva)
11927 {
11928 	const char *mode;
11929 	int i, pat_idx;
11930 
11931 	if (eva <= range->sva)
11932 		return;
11933 
11934 	pat_idx = pmap_pat_index(kernel_pmap, range->attrs, true);
11935 	for (i = 0; i < PAT_INDEX_SIZE; i++)
11936 		if (pat_index[i] == pat_idx)
11937 			break;
11938 
11939 	switch (i) {
11940 	case PAT_WRITE_BACK:
11941 		mode = "WB";
11942 		break;
11943 	case PAT_WRITE_THROUGH:
11944 		mode = "WT";
11945 		break;
11946 	case PAT_UNCACHEABLE:
11947 		mode = "UC";
11948 		break;
11949 	case PAT_UNCACHED:
11950 		mode = "U-";
11951 		break;
11952 	case PAT_WRITE_PROTECTED:
11953 		mode = "WP";
11954 		break;
11955 	case PAT_WRITE_COMBINING:
11956 		mode = "WC";
11957 		break;
11958 	default:
11959 		printf("%s: unknown PAT mode %#x for range 0x%016lx-0x%016lx\n",
11960 		    __func__, pat_idx, range->sva, eva);
11961 		mode = "??";
11962 		break;
11963 	}
11964 
11965 	sbuf_printf(sb, "0x%016lx-0x%016lx r%c%c%c%c %s %d %d %d\n",
11966 	    range->sva, eva,
11967 	    (range->attrs & X86_PG_RW) != 0 ? 'w' : '-',
11968 	    (range->attrs & pg_nx) != 0 ? '-' : 'x',
11969 	    (range->attrs & X86_PG_U) != 0 ? 'u' : 's',
11970 	    (range->attrs & X86_PG_G) != 0 ? 'g' : '-',
11971 	    mode, range->pdpes, range->pdes, range->ptes);
11972 
11973 	/* Reset to sentinel value. */
11974 	range->sva = la57 ? KV5ADDR(NPML5EPG - 1, NPML4EPG - 1, NPDPEPG - 1,
11975 	    NPDEPG - 1, NPTEPG - 1) : KV4ADDR(NPML4EPG - 1, NPDPEPG - 1,
11976 	    NPDEPG - 1, NPTEPG - 1);
11977 }
11978 
11979 /*
11980  * Determine whether the attributes specified by a page table entry match those
11981  * being tracked by the current range.  This is not quite as simple as a direct
11982  * flag comparison since some PAT modes have multiple representations.
11983  */
11984 static bool
sysctl_kmaps_match(struct pmap_kernel_map_range * range,pt_entry_t attrs)11985 sysctl_kmaps_match(struct pmap_kernel_map_range *range, pt_entry_t attrs)
11986 {
11987 	pt_entry_t diff, mask;
11988 
11989 	mask = X86_PG_G | X86_PG_RW | X86_PG_U | X86_PG_PDE_CACHE | pg_nx;
11990 	diff = (range->attrs ^ attrs) & mask;
11991 	if (diff == 0)
11992 		return (true);
11993 	if ((diff & ~X86_PG_PDE_PAT) == 0 &&
11994 	    pmap_pat_index(kernel_pmap, range->attrs, true) ==
11995 	    pmap_pat_index(kernel_pmap, attrs, true))
11996 		return (true);
11997 	return (false);
11998 }
11999 
12000 static void
sysctl_kmaps_reinit(struct pmap_kernel_map_range * range,vm_offset_t va,pt_entry_t attrs)12001 sysctl_kmaps_reinit(struct pmap_kernel_map_range *range, vm_offset_t va,
12002     pt_entry_t attrs)
12003 {
12004 
12005 	memset(range, 0, sizeof(*range));
12006 	range->sva = va;
12007 	range->attrs = attrs;
12008 }
12009 
12010 /*
12011  * Given a leaf PTE, derive the mapping's attributes.  If they do not match
12012  * those of the current run, dump the address range and its attributes, and
12013  * begin a new run.
12014  */
12015 static void
sysctl_kmaps_check(struct sbuf * sb,struct pmap_kernel_map_range * range,vm_offset_t va,pml4_entry_t pml4e,pdp_entry_t pdpe,pd_entry_t pde,pt_entry_t pte)12016 sysctl_kmaps_check(struct sbuf *sb, struct pmap_kernel_map_range *range,
12017     vm_offset_t va, pml4_entry_t pml4e, pdp_entry_t pdpe, pd_entry_t pde,
12018     pt_entry_t pte)
12019 {
12020 	pt_entry_t attrs;
12021 
12022 	attrs = pml4e & (X86_PG_RW | X86_PG_U | pg_nx);
12023 
12024 	attrs |= pdpe & pg_nx;
12025 	attrs &= pg_nx | (pdpe & (X86_PG_RW | X86_PG_U));
12026 	if ((pdpe & PG_PS) != 0) {
12027 		attrs |= pdpe & (X86_PG_G | X86_PG_PDE_CACHE);
12028 	} else if (pde != 0) {
12029 		attrs |= pde & pg_nx;
12030 		attrs &= pg_nx | (pde & (X86_PG_RW | X86_PG_U));
12031 	}
12032 	if ((pde & PG_PS) != 0) {
12033 		attrs |= pde & (X86_PG_G | X86_PG_PDE_CACHE);
12034 	} else if (pte != 0) {
12035 		attrs |= pte & pg_nx;
12036 		attrs &= pg_nx | (pte & (X86_PG_RW | X86_PG_U));
12037 		attrs |= pte & (X86_PG_G | X86_PG_PTE_CACHE);
12038 
12039 		/* Canonicalize by always using the PDE PAT bit. */
12040 		if ((attrs & X86_PG_PTE_PAT) != 0)
12041 			attrs ^= X86_PG_PDE_PAT | X86_PG_PTE_PAT;
12042 	}
12043 
12044 	if (range->sva > va || !sysctl_kmaps_match(range, attrs)) {
12045 		sysctl_kmaps_dump(sb, range, va);
12046 		sysctl_kmaps_reinit(range, va, attrs);
12047 	}
12048 }
12049 
12050 static int
sysctl_kmaps(SYSCTL_HANDLER_ARGS)12051 sysctl_kmaps(SYSCTL_HANDLER_ARGS)
12052 {
12053 	struct pmap_kernel_map_range range;
12054 	struct sbuf sbuf, *sb;
12055 	pml4_entry_t pml4e;
12056 	pdp_entry_t *pdp, pdpe;
12057 	pd_entry_t *pd, pde;
12058 	pt_entry_t *pt, pte;
12059 	vm_offset_t sva;
12060 	vm_paddr_t pa;
12061 	int error, i, j, k, l;
12062 
12063 	error = sysctl_wire_old_buffer(req, 0);
12064 	if (error != 0)
12065 		return (error);
12066 	sb = &sbuf;
12067 	sbuf_new_for_sysctl(sb, NULL, PAGE_SIZE, req);
12068 
12069 	/* Sentinel value. */
12070 	range.sva = la57 ? KV5ADDR(NPML5EPG - 1, NPML4EPG - 1, NPDPEPG - 1,
12071 	    NPDEPG - 1, NPTEPG - 1) : KV4ADDR(NPML4EPG - 1, NPDPEPG - 1,
12072 	    NPDEPG - 1, NPTEPG - 1);
12073 
12074 	/*
12075 	 * Iterate over the kernel page tables without holding the kernel pmap
12076 	 * lock.  Outside of the large map, kernel page table pages are never
12077 	 * freed, so at worst we will observe inconsistencies in the output.
12078 	 * Within the large map, ensure that PDP and PD page addresses are
12079 	 * valid before descending.
12080 	 */
12081 	for (sva = 0, i = pmap_pml4e_index(sva); i < NPML4EPG; i++) {
12082 		switch (i) {
12083 		case PML4PML4I:
12084 			sbuf_printf(sb, "\nRecursive map:\n");
12085 			break;
12086 		case DMPML4I:
12087 			sbuf_printf(sb, "\nDirect map:\n");
12088 			break;
12089 #ifdef KASAN
12090 		case KASANPML4I:
12091 			sbuf_printf(sb, "\nKASAN shadow map:\n");
12092 			break;
12093 #endif
12094 #ifdef KMSAN
12095 		case KMSANSHADPML4I:
12096 			sbuf_printf(sb, "\nKMSAN shadow map:\n");
12097 			break;
12098 		case KMSANORIGPML4I:
12099 			sbuf_printf(sb, "\nKMSAN origin map:\n");
12100 			break;
12101 #endif
12102 		case KPML4BASE:
12103 			sbuf_printf(sb, "\nKernel map:\n");
12104 			break;
12105 		case LMSPML4I:
12106 			sbuf_printf(sb, "\nLarge map:\n");
12107 			break;
12108 		}
12109 
12110 		/* Convert to canonical form. */
12111 		if (sva == 1ul << 47)
12112 			sva |= -1ul << 48;
12113 
12114 restart:
12115 		pml4e = kernel_pml4[i];
12116 		if ((pml4e & X86_PG_V) == 0) {
12117 			sva = rounddown2(sva, NBPML4);
12118 			sysctl_kmaps_dump(sb, &range, sva);
12119 			sva += NBPML4;
12120 			continue;
12121 		}
12122 		pa = pml4e & PG_FRAME;
12123 		pdp = (pdp_entry_t *)PHYS_TO_DMAP(pa);
12124 
12125 		for (j = pmap_pdpe_index(sva); j < NPDPEPG; j++) {
12126 			pdpe = pdp[j];
12127 			if ((pdpe & X86_PG_V) == 0) {
12128 				sva = rounddown2(sva, NBPDP);
12129 				sysctl_kmaps_dump(sb, &range, sva);
12130 				sva += NBPDP;
12131 				continue;
12132 			}
12133 			pa = pdpe & PG_FRAME;
12134 			if ((pdpe & PG_PS) != 0) {
12135 				sva = rounddown2(sva, NBPDP);
12136 				sysctl_kmaps_check(sb, &range, sva, pml4e, pdpe,
12137 				    0, 0);
12138 				range.pdpes++;
12139 				sva += NBPDP;
12140 				continue;
12141 			}
12142 			if (PMAP_ADDRESS_IN_LARGEMAP(sva) &&
12143 			    vm_phys_paddr_to_vm_page(pa) == NULL) {
12144 				/*
12145 				 * Page table pages for the large map may be
12146 				 * freed.  Validate the next-level address
12147 				 * before descending.
12148 				 */
12149 				goto restart;
12150 			}
12151 			pd = (pd_entry_t *)PHYS_TO_DMAP(pa);
12152 
12153 			for (k = pmap_pde_index(sva); k < NPDEPG; k++) {
12154 				pde = pd[k];
12155 				if ((pde & X86_PG_V) == 0) {
12156 					sva = rounddown2(sva, NBPDR);
12157 					sysctl_kmaps_dump(sb, &range, sva);
12158 					sva += NBPDR;
12159 					continue;
12160 				}
12161 				pa = pde & PG_FRAME;
12162 				if ((pde & PG_PS) != 0) {
12163 					sva = rounddown2(sva, NBPDR);
12164 					sysctl_kmaps_check(sb, &range, sva,
12165 					    pml4e, pdpe, pde, 0);
12166 					range.pdes++;
12167 					sva += NBPDR;
12168 					continue;
12169 				}
12170 				if (PMAP_ADDRESS_IN_LARGEMAP(sva) &&
12171 				    vm_phys_paddr_to_vm_page(pa) == NULL) {
12172 					/*
12173 					 * Page table pages for the large map
12174 					 * may be freed.  Validate the
12175 					 * next-level address before descending.
12176 					 */
12177 					goto restart;
12178 				}
12179 				pt = (pt_entry_t *)PHYS_TO_DMAP(pa);
12180 
12181 				for (l = pmap_pte_index(sva); l < NPTEPG; l++,
12182 				    sva += PAGE_SIZE) {
12183 					pte = pt[l];
12184 					if ((pte & X86_PG_V) == 0) {
12185 						sysctl_kmaps_dump(sb, &range,
12186 						    sva);
12187 						continue;
12188 					}
12189 					sysctl_kmaps_check(sb, &range, sva,
12190 					    pml4e, pdpe, pde, pte);
12191 					range.ptes++;
12192 				}
12193 			}
12194 		}
12195 	}
12196 
12197 	error = sbuf_finish(sb);
12198 	sbuf_delete(sb);
12199 	return (error);
12200 }
12201 SYSCTL_OID(_vm_pmap, OID_AUTO, kernel_maps,
12202     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_SKIP,
12203     NULL, 0, sysctl_kmaps, "A",
12204     "Dump kernel address layout");
12205 
12206 #ifdef DDB
DB_SHOW_COMMAND(pte,pmap_print_pte)12207 DB_SHOW_COMMAND(pte, pmap_print_pte)
12208 {
12209 	pmap_t pmap;
12210 	pml5_entry_t *pml5;
12211 	pml4_entry_t *pml4;
12212 	pdp_entry_t *pdp;
12213 	pd_entry_t *pde;
12214 	pt_entry_t *pte, PG_V;
12215 	vm_offset_t va;
12216 
12217 	if (!have_addr) {
12218 		db_printf("show pte addr\n");
12219 		return;
12220 	}
12221 	va = (vm_offset_t)addr;
12222 
12223 	if (kdb_thread != NULL)
12224 		pmap = vmspace_pmap(kdb_thread->td_proc->p_vmspace);
12225 	else
12226 		pmap = PCPU_GET(curpmap);
12227 
12228 	PG_V = pmap_valid_bit(pmap);
12229 	db_printf("VA 0x%016lx", va);
12230 
12231 	if (pmap_is_la57(pmap)) {
12232 		pml5 = pmap_pml5e(pmap, va);
12233 		db_printf(" pml5e@0x%016lx 0x%016lx", (uint64_t)pml5, *pml5);
12234 		if ((*pml5 & PG_V) == 0) {
12235 			db_printf("\n");
12236 			return;
12237 		}
12238 		pml4 = pmap_pml5e_to_pml4e(pml5, va);
12239 	} else {
12240 		pml4 = pmap_pml4e(pmap, va);
12241 	}
12242 	db_printf(" pml4e@0x%016lx 0x%016lx", (uint64_t)pml4, *pml4);
12243 	if ((*pml4 & PG_V) == 0) {
12244 		db_printf("\n");
12245 		return;
12246 	}
12247 	pdp = pmap_pml4e_to_pdpe(pml4, va);
12248 	db_printf(" pdpe@0x%016lx 0x%016lx", (uint64_t)pdp, *pdp);
12249 	if ((*pdp & PG_V) == 0 || (*pdp & PG_PS) != 0) {
12250 		db_printf("\n");
12251 		return;
12252 	}
12253 	pde = pmap_pdpe_to_pde(pdp, va);
12254 	db_printf(" pde@0x%016lx 0x%016lx", (uint64_t)pde, *pde);
12255 	if ((*pde & PG_V) == 0 || (*pde & PG_PS) != 0) {
12256 		db_printf("\n");
12257 		return;
12258 	}
12259 	pte = pmap_pde_to_pte(pde, va);
12260 	db_printf(" pte@0x%016lx 0x%016lx\n", (uint64_t)pte, *pte);
12261 }
12262 
DB_SHOW_COMMAND(phys2dmap,pmap_phys2dmap)12263 DB_SHOW_COMMAND(phys2dmap, pmap_phys2dmap)
12264 {
12265 	vm_paddr_t a;
12266 
12267 	if (have_addr) {
12268 		a = (vm_paddr_t)addr;
12269 		db_printf("0x%jx\n", (uintmax_t)PHYS_TO_DMAP(a));
12270 	} else {
12271 		db_printf("show phys2dmap addr\n");
12272 	}
12273 }
12274 
12275 static void
ptpages_show_page(int level,int idx,vm_page_t pg)12276 ptpages_show_page(int level, int idx, vm_page_t pg)
12277 {
12278 	db_printf("l %d i %d pg %p phys %#lx ref %x\n",
12279 	    level, idx, pg, VM_PAGE_TO_PHYS(pg), pg->ref_count);
12280 }
12281 
12282 static void
ptpages_show_complain(int level,int idx,uint64_t pte)12283 ptpages_show_complain(int level, int idx, uint64_t pte)
12284 {
12285 	db_printf("l %d i %d pte %#lx\n", level, idx, pte);
12286 }
12287 
12288 static void
ptpages_show_pml4(vm_page_t pg4,int num_entries,uint64_t PG_V)12289 ptpages_show_pml4(vm_page_t pg4, int num_entries, uint64_t PG_V)
12290 {
12291 	vm_page_t pg3, pg2, pg1;
12292 	pml4_entry_t *pml4;
12293 	pdp_entry_t *pdp;
12294 	pd_entry_t *pd;
12295 	int i4, i3, i2;
12296 
12297 	pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pg4));
12298 	for (i4 = 0; i4 < num_entries; i4++) {
12299 		if ((pml4[i4] & PG_V) == 0)
12300 			continue;
12301 		pg3 = PHYS_TO_VM_PAGE(pml4[i4] & PG_FRAME);
12302 		if (pg3 == NULL) {
12303 			ptpages_show_complain(3, i4, pml4[i4]);
12304 			continue;
12305 		}
12306 		ptpages_show_page(3, i4, pg3);
12307 		pdp = (pdp_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pg3));
12308 		for (i3 = 0; i3 < NPDPEPG; i3++) {
12309 			if ((pdp[i3] & PG_V) == 0)
12310 				continue;
12311 			pg2 = PHYS_TO_VM_PAGE(pdp[i3] & PG_FRAME);
12312 			if (pg3 == NULL) {
12313 				ptpages_show_complain(2, i3, pdp[i3]);
12314 				continue;
12315 			}
12316 			ptpages_show_page(2, i3, pg2);
12317 			pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pg2));
12318 			for (i2 = 0; i2 < NPDEPG; i2++) {
12319 				if ((pd[i2] & PG_V) == 0)
12320 					continue;
12321 				pg1 = PHYS_TO_VM_PAGE(pd[i2] & PG_FRAME);
12322 				if (pg1 == NULL) {
12323 					ptpages_show_complain(1, i2, pd[i2]);
12324 					continue;
12325 				}
12326 				ptpages_show_page(1, i2, pg1);
12327 			}
12328 		}
12329 	}
12330 }
12331 
DB_SHOW_COMMAND(ptpages,pmap_ptpages)12332 DB_SHOW_COMMAND(ptpages, pmap_ptpages)
12333 {
12334 	pmap_t pmap;
12335 	vm_page_t pg;
12336 	pml5_entry_t *pml5;
12337 	uint64_t PG_V;
12338 	int i5;
12339 
12340 	if (have_addr)
12341 		pmap = (pmap_t)addr;
12342 	else
12343 		pmap = PCPU_GET(curpmap);
12344 
12345 	PG_V = pmap_valid_bit(pmap);
12346 
12347 	if (pmap_is_la57(pmap)) {
12348 		pml5 = pmap->pm_pmltop;
12349 		for (i5 = 0; i5 < NUPML5E; i5++) {
12350 			if ((pml5[i5] & PG_V) == 0)
12351 				continue;
12352 			pg = PHYS_TO_VM_PAGE(pml5[i5] & PG_FRAME);
12353 			if (pg == NULL) {
12354 				ptpages_show_complain(4, i5, pml5[i5]);
12355 				continue;
12356 			}
12357 			ptpages_show_page(4, i5, pg);
12358 			ptpages_show_pml4(pg, NPML4EPG, PG_V);
12359 		}
12360 	} else {
12361 		ptpages_show_pml4(PHYS_TO_VM_PAGE(DMAP_TO_PHYS(
12362 		    (vm_offset_t)pmap->pm_pmltop)), NUP4ML4E, PG_V);
12363 	}
12364 }
12365 #endif
12366