xref: /freebsd/sys/arm64/arm64/pmap.c (revision ef9017aa174db96ee741b936b984f2b5d61dff9f)
1 /*-
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  * Copyright (c) 1994 John S. Dyson
5  * All rights reserved.
6  * Copyright (c) 1994 David Greenman
7  * All rights reserved.
8  * Copyright (c) 2003 Peter Wemm
9  * All rights reserved.
10  * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu>
11  * All rights reserved.
12  * Copyright (c) 2014 Andrew Turner
13  * All rights reserved.
14  * Copyright (c) 2014-2016 The FreeBSD Foundation
15  * All rights reserved.
16  *
17  * This code is derived from software contributed to Berkeley by
18  * the Systems Programming Group of the University of Utah Computer
19  * Science Department and William Jolitz of UUNET Technologies Inc.
20  *
21  * This software was developed by Andrew Turner under sponsorship from
22  * the FreeBSD Foundation.
23  *
24  * Redistribution and use in source and binary forms, with or without
25  * modification, are permitted provided that the following conditions
26  * are met:
27  * 1. Redistributions of source code must retain the above copyright
28  *    notice, this list of conditions and the following disclaimer.
29  * 2. Redistributions in binary form must reproduce the above copyright
30  *    notice, this list of conditions and the following disclaimer in the
31  *    documentation and/or other materials provided with the distribution.
32  * 3. All advertising materials mentioning features or use of this software
33  *    must display the following acknowledgement:
34  *	This product includes software developed by the University of
35  *	California, Berkeley and its contributors.
36  * 4. Neither the name of the University nor the names of its contributors
37  *    may be used to endorse or promote products derived from this software
38  *    without specific prior written permission.
39  *
40  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
41  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
44  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50  * SUCH DAMAGE.
51  */
52 /*-
53  * Copyright (c) 2003 Networks Associates Technology, Inc.
54  * All rights reserved.
55  *
56  * This software was developed for the FreeBSD Project by Jake Burkholder,
57  * Safeport Network Services, and Network Associates Laboratories, the
58  * Security Research Division of Network Associates, Inc. under
59  * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
60  * CHATS research program.
61  *
62  * Redistribution and use in source and binary forms, with or without
63  * modification, are permitted provided that the following conditions
64  * are met:
65  * 1. Redistributions of source code must retain the above copyright
66  *    notice, this list of conditions and the following disclaimer.
67  * 2. Redistributions in binary form must reproduce the above copyright
68  *    notice, this list of conditions and the following disclaimer in the
69  *    documentation and/or other materials provided with the distribution.
70  *
71  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
72  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
73  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
74  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
75  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
76  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
77  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
78  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
79  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
80  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
81  * SUCH DAMAGE.
82  */
83 
84 #include <sys/cdefs.h>
85 /*
86  *	Manages physical address maps.
87  *
88  *	Since the information managed by this module is
89  *	also stored by the logical address mapping module,
90  *	this module may throw away valid virtual-to-physical
91  *	mappings at almost any time.  However, invalidations
92  *	of virtual-to-physical mappings must be done as
93  *	requested.
94  *
95  *	In order to cope with hardware architectures which
96  *	make virtual-to-physical map invalidates expensive,
97  *	this module may delay invalidate or reduced protection
98  *	operations until such time as they are actually
99  *	necessary.  This module is given full information as
100  *	to which processors are currently using which maps,
101  *	and to when physical maps must be made correct.
102  */
103 
104 #include "opt_vm.h"
105 
106 #include <sys/param.h>
107 #include <sys/asan.h>
108 #include <sys/bitstring.h>
109 #include <sys/bus.h>
110 #include <sys/systm.h>
111 #include <sys/kernel.h>
112 #include <sys/ktr.h>
113 #include <sys/limits.h>
114 #include <sys/lock.h>
115 #include <sys/malloc.h>
116 #include <sys/mman.h>
117 #include <sys/msan.h>
118 #include <sys/msgbuf.h>
119 #include <sys/mutex.h>
120 #include <sys/physmem.h>
121 #include <sys/proc.h>
122 #include <sys/rangeset.h>
123 #include <sys/rwlock.h>
124 #include <sys/sbuf.h>
125 #include <sys/sx.h>
126 #include <sys/vmem.h>
127 #include <sys/vmmeter.h>
128 #include <sys/sched.h>
129 #include <sys/sysctl.h>
130 #include <sys/_unrhdr.h>
131 #include <sys/smp.h>
132 
133 #include <vm/vm.h>
134 #include <vm/vm_param.h>
135 #include <vm/vm_kern.h>
136 #include <vm/vm_page.h>
137 #include <vm/vm_map.h>
138 #include <vm/vm_object.h>
139 #include <vm/vm_extern.h>
140 #include <vm/vm_pageout.h>
141 #include <vm/vm_pager.h>
142 #include <vm/vm_phys.h>
143 #include <vm/vm_radix.h>
144 #include <vm/vm_reserv.h>
145 #include <vm/vm_dumpset.h>
146 #include <vm/uma.h>
147 
148 #include <machine/asan.h>
149 #include <machine/cpu_feat.h>
150 #include <machine/machdep.h>
151 #include <machine/md_var.h>
152 #include <machine/pcb.h>
153 
154 #ifdef NUMA
155 #define	PMAP_MEMDOM	MAXMEMDOM
156 #else
157 #define	PMAP_MEMDOM	1
158 #endif
159 
160 #define	PMAP_ASSERT_STAGE1(pmap)	MPASS((pmap)->pm_stage == PM_STAGE1)
161 #define	PMAP_ASSERT_STAGE2(pmap)	MPASS((pmap)->pm_stage == PM_STAGE2)
162 
163 #define	NL0PG		(PAGE_SIZE/(sizeof (pd_entry_t)))
164 #define	NL1PG		(PAGE_SIZE/(sizeof (pd_entry_t)))
165 #define	NL2PG		(PAGE_SIZE/(sizeof (pd_entry_t)))
166 #define	NL3PG		(PAGE_SIZE/(sizeof (pt_entry_t)))
167 
168 #define	NUL0E		L0_ENTRIES
169 #define	NUL1E		(NUL0E * NL1PG)
170 #define	NUL2E		(NUL1E * NL2PG)
171 
172 #ifdef PV_STATS
173 #define PV_STAT(x)	do { x ; } while (0)
174 #define __pvused
175 #else
176 #define PV_STAT(x)	do { } while (0)
177 #define __pvused	__unused
178 #endif
179 
180 #define	pmap_l0_pindex(v)	(NUL2E + NUL1E + ((v) >> L0_SHIFT))
181 #define	pmap_l1_pindex(v)	(NUL2E + ((v) >> L1_SHIFT))
182 #define	pmap_l2_pindex(v)	((v) >> L2_SHIFT)
183 
184 #ifdef __ARM_FEATURE_BTI_DEFAULT
185 pt_entry_t __read_mostly pmap_gp_attr;
186 #define	ATTR_KERN_GP		pmap_gp_attr
187 #else
188 #define	ATTR_KERN_GP		0
189 #endif
190 #define	PMAP_SAN_PTE_BITS	(ATTR_AF | ATTR_S1_XN | pmap_sh_attr | \
191   ATTR_KERN_GP | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | ATTR_S1_AP(ATTR_S1_AP_RW))
192 
193 struct pmap_large_md_page {
194 	struct rwlock   pv_lock;
195 	struct md_page  pv_page;
196 	/* Pad to a power of 2, see pmap_init_pv_table(). */
197 	int		pv_pad[2];
198 };
199 
200 __exclusive_cache_line static struct pmap_large_md_page pv_dummy_large;
201 #define pv_dummy pv_dummy_large.pv_page
202 __read_mostly static struct pmap_large_md_page *pv_table;
203 
204 static struct pmap_large_md_page *
_pa_to_pmdp(vm_paddr_t pa)205 _pa_to_pmdp(vm_paddr_t pa)
206 {
207 	struct vm_phys_seg *seg;
208 
209 	if ((seg = vm_phys_paddr_to_seg(pa)) != NULL)
210 		return ((struct pmap_large_md_page *)seg->md_first +
211 		    pmap_l2_pindex(pa) - pmap_l2_pindex(seg->start));
212 	return (NULL);
213 }
214 
215 static struct pmap_large_md_page *
pa_to_pmdp(vm_paddr_t pa)216 pa_to_pmdp(vm_paddr_t pa)
217 {
218 	struct pmap_large_md_page *pvd;
219 
220 	pvd = _pa_to_pmdp(pa);
221 	if (pvd == NULL)
222 		panic("pa 0x%jx not within vm_phys_segs", (uintmax_t)pa);
223 	return (pvd);
224 }
225 
226 static struct pmap_large_md_page *
page_to_pmdp(vm_page_t m)227 page_to_pmdp(vm_page_t m)
228 {
229 	struct vm_phys_seg *seg;
230 
231 	seg = &vm_phys_segs[m->segind];
232 	return ((struct pmap_large_md_page *)seg->md_first +
233 	    pmap_l2_pindex(VM_PAGE_TO_PHYS(m)) - pmap_l2_pindex(seg->start));
234 }
235 
236 #define	pa_to_pvh(pa)	(&(pa_to_pmdp(pa)->pv_page))
237 #define	page_to_pvh(m)	(&(page_to_pmdp(m)->pv_page))
238 
239 #define	PHYS_TO_PV_LIST_LOCK(pa)	({			\
240 	struct pmap_large_md_page *_pvd;			\
241 	struct rwlock *_lock;					\
242 	_pvd = _pa_to_pmdp(pa);					\
243 	if (__predict_false(_pvd == NULL))			\
244 		_lock = &pv_dummy_large.pv_lock;		\
245 	else							\
246 		_lock = &(_pvd->pv_lock);			\
247 	_lock;							\
248 })
249 
250 static struct rwlock *
VM_PAGE_TO_PV_LIST_LOCK(vm_page_t m)251 VM_PAGE_TO_PV_LIST_LOCK(vm_page_t m)
252 {
253 	if ((m->flags & PG_FICTITIOUS) == 0)
254 		return (&page_to_pmdp(m)->pv_lock);
255 	else
256 		return (&pv_dummy_large.pv_lock);
257 }
258 
259 #define	CHANGE_PV_LIST_LOCK(lockp, new_lock)	do {	\
260 	struct rwlock **_lockp = (lockp);		\
261 	struct rwlock *_new_lock = (new_lock);		\
262 							\
263 	if (_new_lock != *_lockp) {			\
264 		if (*_lockp != NULL)			\
265 			rw_wunlock(*_lockp);		\
266 		*_lockp = _new_lock;			\
267 		rw_wlock(*_lockp);			\
268 	}						\
269 } while (0)
270 
271 #define	CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa)		\
272 			CHANGE_PV_LIST_LOCK(lockp, PHYS_TO_PV_LIST_LOCK(pa))
273 
274 #define	CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m)	\
275 			CHANGE_PV_LIST_LOCK(lockp, VM_PAGE_TO_PV_LIST_LOCK(m))
276 
277 #define	RELEASE_PV_LIST_LOCK(lockp)		do {	\
278 	struct rwlock **_lockp = (lockp);		\
279 							\
280 	if (*_lockp != NULL) {				\
281 		rw_wunlock(*_lockp);			\
282 		*_lockp = NULL;				\
283 	}						\
284 } while (0)
285 
286 #define PTE_TO_VM_PAGE(pte) PHYS_TO_VM_PAGE(PTE_TO_PHYS(pte))
287 #define VM_PAGE_TO_PTE(m) PHYS_TO_PTE(VM_PAGE_TO_PHYS(m))
288 
289 /*
290  * The presence of this flag indicates that the mapping is writeable.
291  * If the ATTR_S1_AP_RO bit is also set, then the mapping is clean, otherwise
292  * it is dirty.  This flag may only be set on managed mappings.
293  *
294  * The DBM bit is reserved on ARMv8.0 but it seems we can safely treat it
295  * as a software managed bit.
296  */
297 #define	ATTR_SW_DBM	ATTR_DBM
298 
299 struct pmap kernel_pmap_store;
300 
301 /* Used for mapping ACPI memory before VM is initialized */
302 #define	PMAP_PREINIT_MAPPING_COUNT	32
303 #define	PMAP_PREINIT_MAPPING_SIZE	(PMAP_PREINIT_MAPPING_COUNT * L2_SIZE)
304 static vm_offset_t preinit_map_va;	/* Start VA of pre-init mapping space */
305 static int vm_initialized = 0;		/* No need to use pre-init maps when set */
306 
307 /*
308  * Reserve a few L2 blocks starting from 'preinit_map_va' pointer.
309  * Always map entire L2 block for simplicity.
310  * VA of L2 block = preinit_map_va + i * L2_SIZE
311  */
312 static struct pmap_preinit_mapping {
313 	vm_paddr_t	pa;
314 	vm_offset_t	va;
315 	vm_size_t	size;
316 } pmap_preinit_mapping[PMAP_PREINIT_MAPPING_COUNT];
317 
318 vm_offset_t virtual_avail;	/* VA of first avail page (after kernel bss) */
319 vm_offset_t virtual_end;	/* VA of last avail page (end of kernel AS) */
320 vm_offset_t kernel_vm_end = 0;
321 
322 /*
323  * Data for the pv entry allocation mechanism.
324  */
325 #ifdef NUMA
326 static __inline int
pc_to_domain(struct pv_chunk * pc)327 pc_to_domain(struct pv_chunk *pc)
328 {
329 	return (vm_phys_domain(DMAP_TO_PHYS((vm_offset_t)pc)));
330 }
331 #else
332 static __inline int
pc_to_domain(struct pv_chunk * pc __unused)333 pc_to_domain(struct pv_chunk *pc __unused)
334 {
335 	return (0);
336 }
337 #endif
338 
339 struct pv_chunks_list {
340 	struct mtx pvc_lock;
341 	TAILQ_HEAD(pch, pv_chunk) pvc_list;
342 	int active_reclaims;
343 } __aligned(CACHE_LINE_SIZE);
344 
345 struct pv_chunks_list __exclusive_cache_line pv_chunks[PMAP_MEMDOM];
346 
347 vm_paddr_t dmap_phys_base;	/* The start of the dmap region */
348 vm_paddr_t dmap_phys_max;	/* The limit of the dmap region */
349 vm_offset_t dmap_max_addr;	/* The virtual address limit of the dmap */
350 
351 extern pt_entry_t pagetable_l0_ttbr1[];
352 
353 #define	PHYSMAP_SIZE	(2 * (VM_PHYSSEG_MAX - 1))
354 static vm_paddr_t physmap[PHYSMAP_SIZE];
355 static u_int physmap_idx;
356 
357 static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
358     "VM/pmap parameters");
359 
360 static int pmap_growkernel_panic = 0;
361 SYSCTL_INT(_vm_pmap, OID_AUTO, growkernel_panic, CTLFLAG_RDTUN,
362     &pmap_growkernel_panic, 0,
363     "panic on failure to allocate kernel page table page");
364 
365 bool pmap_lpa_enabled __read_mostly = false;
366 pt_entry_t pmap_sh_attr __read_mostly = ATTR_SH(ATTR_SH_IS);
367 
368 #if PAGE_SIZE == PAGE_SIZE_4K
369 #define	L1_BLOCKS_SUPPORTED	1
370 #else
371 #define	L1_BLOCKS_SUPPORTED	(pmap_lpa_enabled)
372 #endif
373 
374 #define	PMAP_ASSERT_L1_BLOCKS_SUPPORTED	MPASS(L1_BLOCKS_SUPPORTED)
375 
376 static bool pmap_l1_supported __read_mostly = false;
377 
378 /*
379  * This ASID allocator uses a bit vector ("asid_set") to remember which ASIDs
380  * that it has currently allocated to a pmap, a cursor ("asid_next") to
381  * optimize its search for a free ASID in the bit vector, and an epoch number
382  * ("asid_epoch") to indicate when it has reclaimed all previously allocated
383  * ASIDs that are not currently active on a processor.
384  *
385  * The current epoch number is always in the range [0, INT_MAX).  Negative
386  * numbers and INT_MAX are reserved for special cases that are described
387  * below.
388  */
389 struct asid_set {
390 	int asid_bits;
391 	bitstr_t *asid_set;
392 	int asid_set_size;
393 	int asid_next;
394 	int asid_epoch;
395 	struct mtx asid_set_mutex;
396 };
397 
398 static struct asid_set asids;
399 static struct asid_set vmids;
400 
401 static SYSCTL_NODE(_vm_pmap, OID_AUTO, asid, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
402     "ASID allocator");
403 SYSCTL_INT(_vm_pmap_asid, OID_AUTO, bits, CTLFLAG_RD, &asids.asid_bits, 0,
404     "The number of bits in an ASID");
405 SYSCTL_INT(_vm_pmap_asid, OID_AUTO, next, CTLFLAG_RD, &asids.asid_next, 0,
406     "The last allocated ASID plus one");
407 SYSCTL_INT(_vm_pmap_asid, OID_AUTO, epoch, CTLFLAG_RD, &asids.asid_epoch, 0,
408     "The current epoch number");
409 
410 static SYSCTL_NODE(_vm_pmap, OID_AUTO, vmid, CTLFLAG_RD, 0, "VMID allocator");
411 SYSCTL_INT(_vm_pmap_vmid, OID_AUTO, bits, CTLFLAG_RD, &vmids.asid_bits, 0,
412     "The number of bits in an VMID");
413 SYSCTL_INT(_vm_pmap_vmid, OID_AUTO, next, CTLFLAG_RD, &vmids.asid_next, 0,
414     "The last allocated VMID plus one");
415 SYSCTL_INT(_vm_pmap_vmid, OID_AUTO, epoch, CTLFLAG_RD, &vmids.asid_epoch, 0,
416     "The current epoch number");
417 
418 void (*pmap_clean_stage2_tlbi)(void);
419 void (*pmap_stage2_invalidate_range)(uint64_t, vm_offset_t, vm_offset_t, bool);
420 void (*pmap_stage2_invalidate_all)(uint64_t);
421 
422 /*
423  * A pmap's cookie encodes an ASID and epoch number.  Cookies for reserved
424  * ASIDs have a negative epoch number, specifically, INT_MIN.  Cookies for
425  * dynamically allocated ASIDs have a non-negative epoch number.
426  *
427  * An invalid ASID is represented by -1.
428  *
429  * There are two special-case cookie values: (1) COOKIE_FROM(-1, INT_MIN),
430  * which indicates that an ASID should never be allocated to the pmap, and
431  * (2) COOKIE_FROM(-1, INT_MAX), which indicates that an ASID should be
432  * allocated when the pmap is next activated.
433  */
434 #define	COOKIE_FROM(asid, epoch)	((long)((u_int)(asid) |	\
435 					    ((u_long)(epoch) << 32)))
436 #define	COOKIE_TO_ASID(cookie)		((int)(cookie))
437 #define	COOKIE_TO_EPOCH(cookie)		((int)((u_long)(cookie) >> 32))
438 
439 #define	TLBI_VA_SHIFT			12
440 #define	TLBI_VA_MASK			((1ul << 44) - 1)
441 #define	TLBI_VA(addr)			(((addr) >> TLBI_VA_SHIFT) & TLBI_VA_MASK)
442 
443 static int __read_frequently superpages_enabled = 1;
444 SYSCTL_INT(_vm_pmap, OID_AUTO, superpages_enabled,
445     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &superpages_enabled, 0,
446     "Are large page mappings enabled?");
447 
448 /*
449  * True when Branch Target Identification should be used by userspace. This
450  * allows pmap to mark pages as guarded with ATTR_S1_GP.
451  */
452 __read_mostly static bool pmap_bti_support = false;
453 
454 /*
455  * Internal flags for pmap_enter()'s helper functions.
456  */
457 #define	PMAP_ENTER_NORECLAIM	0x1000000	/* Don't reclaim PV entries. */
458 #define	PMAP_ENTER_NOREPLACE	0x2000000	/* Don't replace mappings. */
459 
460 TAILQ_HEAD(pv_chunklist, pv_chunk);
461 
462 static void	free_pv_chunk(struct pv_chunk *pc);
463 static void	free_pv_chunk_batch(struct pv_chunklist *batch);
464 static void	free_pv_entry(pmap_t pmap, pv_entry_t pv);
465 static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp);
466 static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp);
467 static void	pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
468 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
469 		    vm_offset_t va);
470 
471 static void pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte);
472 static bool pmap_activate_int(pmap_t pmap);
473 static void pmap_alloc_asid(pmap_t pmap);
474 static int pmap_change_props_locked(vm_offset_t va, vm_size_t size,
475     vm_prot_t prot, int mode, bool skip_unmapped);
476 static bool pmap_copy_l3c(pmap_t pmap, pt_entry_t *l3p, vm_offset_t va,
477     pt_entry_t l3e, vm_page_t ml3, struct rwlock **lockp);
478 static pt_entry_t *pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va);
479 static pt_entry_t *pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2,
480     vm_offset_t va, struct rwlock **lockp);
481 static pt_entry_t *pmap_demote_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va);
482 static bool pmap_demote_l2c(pmap_t pmap, pt_entry_t *l2p, vm_offset_t va);
483 static bool pmap_demote_l3c(pmap_t pmap, pt_entry_t *l3p, vm_offset_t va);
484 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
485     vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp);
486 static int pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2,
487     u_int flags, vm_page_t m, struct rwlock **lockp);
488 static int pmap_enter_l3c(pmap_t pmap, vm_offset_t va, pt_entry_t l3e, u_int flags,
489     vm_page_t m, vm_page_t *ml3p, struct rwlock **lockp);
490 static bool pmap_every_pte_zero(vm_paddr_t pa);
491 static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted,
492     bool all_l3e_AF_set);
493 static pt_entry_t pmap_load_l3c(pt_entry_t *l3p);
494 static void pmap_mask_set_l3c(pmap_t pmap, pt_entry_t *l3p, vm_offset_t va,
495     vm_offset_t *vap, vm_offset_t va_next, pt_entry_t mask, pt_entry_t nbits);
496 static bool pmap_pv_insert_l3c(pmap_t pmap, vm_offset_t va, vm_page_t m,
497     struct rwlock **lockp);
498 static void pmap_remove_kernel_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va);
499 static int pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva,
500     pd_entry_t l1e, struct spglist *free, struct rwlock **lockp);
501 static int pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t sva,
502     pd_entry_t l2e, struct spglist *free, struct rwlock **lockp);
503 static bool pmap_remove_l3c(pmap_t pmap, pt_entry_t *l3p, vm_offset_t va,
504     vm_offset_t *vap, vm_offset_t va_next, vm_page_t ml3, struct spglist *free,
505     struct rwlock **lockp);
506 static void pmap_reset_asid_set(pmap_t pmap);
507 static bool pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
508     vm_page_t m, struct rwlock **lockp);
509 
510 static vm_page_t _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex,
511 		struct rwlock **lockp);
512 
513 static void _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m,
514     struct spglist *free);
515 static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, struct spglist *);
516 static void pmap_update_entry(pmap_t pmap, pd_entry_t *pte, pd_entry_t newpte,
517     vm_offset_t va, vm_size_t size);
518 static __inline vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va);
519 
520 static uma_zone_t pmap_bti_ranges_zone;
521 static bool pmap_bti_same(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
522     pt_entry_t *pte);
523 static pt_entry_t pmap_pte_bti(pmap_t pmap, vm_offset_t va);
524 static void pmap_bti_on_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva);
525 static void *bti_dup_range(void *ctx, void *data);
526 static void bti_free_range(void *ctx, void *node);
527 static int pmap_bti_copy(pmap_t dst_pmap, pmap_t src_pmap);
528 static void pmap_bti_deassign_all(pmap_t pmap);
529 
530 /*
531  * These load the old table data and store the new value.
532  * They need to be atomic as the System MMU may write to the table at
533  * the same time as the CPU.
534  */
535 #define	pmap_clear(table)		atomic_store_64(table, 0)
536 #define	pmap_clear_bits(table, bits)	atomic_clear_64(table, bits)
537 #define	pmap_load(table)		(*table)
538 #define	pmap_load_clear(table)		atomic_swap_64(table, 0)
539 #define	pmap_load_store(table, entry)	atomic_swap_64(table, entry)
540 #define	pmap_set_bits(table, bits)	atomic_set_64(table, bits)
541 #define	pmap_store(table, entry)	atomic_store_64(table, entry)
542 
543 /********************/
544 /* Inline functions */
545 /********************/
546 
547 static __inline void
pagecopy(void * s,void * d)548 pagecopy(void *s, void *d)
549 {
550 
551 	memcpy(d, s, PAGE_SIZE);
552 }
553 
554 static __inline pd_entry_t *
pmap_l0(pmap_t pmap,vm_offset_t va)555 pmap_l0(pmap_t pmap, vm_offset_t va)
556 {
557 
558 	return (&pmap->pm_l0[pmap_l0_index(va)]);
559 }
560 
561 static __inline pd_entry_t *
pmap_l0_to_l1(pd_entry_t * l0,vm_offset_t va)562 pmap_l0_to_l1(pd_entry_t *l0, vm_offset_t va)
563 {
564 	pd_entry_t *l1;
565 
566 	l1 = (pd_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(pmap_load(l0)));
567 	return (&l1[pmap_l1_index(va)]);
568 }
569 
570 static __inline pd_entry_t *
pmap_l1(pmap_t pmap,vm_offset_t va)571 pmap_l1(pmap_t pmap, vm_offset_t va)
572 {
573 	pd_entry_t *l0;
574 
575 	l0 = pmap_l0(pmap, va);
576 	if ((pmap_load(l0) & ATTR_DESCR_MASK) != L0_TABLE)
577 		return (NULL);
578 
579 	return (pmap_l0_to_l1(l0, va));
580 }
581 
582 static __inline pd_entry_t *
pmap_l1_to_l2(pd_entry_t * l1p,vm_offset_t va)583 pmap_l1_to_l2(pd_entry_t *l1p, vm_offset_t va)
584 {
585 	pd_entry_t l1, *l2p;
586 
587 	l1 = pmap_load(l1p);
588 
589 	KASSERT(ADDR_IS_CANONICAL(va),
590 	    ("%s: Address not in canonical form: %lx", __func__, va));
591 	/*
592 	 * The valid bit may be clear if pmap_update_entry() is concurrently
593 	 * modifying the entry, so for KVA only the entry type may be checked.
594 	 */
595 	KASSERT(ADDR_IS_KERNEL(va) || (l1 & ATTR_DESCR_VALID) != 0,
596 	    ("%s: L1 entry %#lx for %#lx is invalid", __func__, l1, va));
597 	KASSERT((l1 & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_TABLE,
598 	    ("%s: L1 entry %#lx for %#lx is a leaf", __func__, l1, va));
599 	l2p = (pd_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(l1));
600 	return (&l2p[pmap_l2_index(va)]);
601 }
602 
603 static __inline pd_entry_t *
pmap_l2(pmap_t pmap,vm_offset_t va)604 pmap_l2(pmap_t pmap, vm_offset_t va)
605 {
606 	pd_entry_t *l1;
607 
608 	l1 = pmap_l1(pmap, va);
609 	if ((pmap_load(l1) & ATTR_DESCR_MASK) != L1_TABLE)
610 		return (NULL);
611 
612 	return (pmap_l1_to_l2(l1, va));
613 }
614 
615 static __inline pt_entry_t *
pmap_l2_to_l3(pd_entry_t * l2p,vm_offset_t va)616 pmap_l2_to_l3(pd_entry_t *l2p, vm_offset_t va)
617 {
618 	pd_entry_t l2;
619 	pt_entry_t *l3p;
620 
621 	l2 = pmap_load(l2p);
622 
623 	KASSERT(ADDR_IS_CANONICAL(va),
624 	    ("%s: Address not in canonical form: %lx", __func__, va));
625 	/*
626 	 * The valid bit may be clear if pmap_update_entry() is concurrently
627 	 * modifying the entry, so for KVA only the entry type may be checked.
628 	 */
629 	KASSERT(ADDR_IS_KERNEL(va) || (l2 & ATTR_DESCR_VALID) != 0,
630 	    ("%s: L2 entry %#lx for %#lx is invalid", __func__, l2, va));
631 	KASSERT((l2 & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_TABLE,
632 	    ("%s: L2 entry %#lx for %#lx is a leaf", __func__, l2, va));
633 	l3p = (pt_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(l2));
634 	return (&l3p[pmap_l3_index(va)]);
635 }
636 
637 /*
638  * Returns the lowest valid pde for a given virtual address.
639  * The next level may or may not point to a valid page or block.
640  */
641 static __inline pd_entry_t *
pmap_pde(pmap_t pmap,vm_offset_t va,int * level)642 pmap_pde(pmap_t pmap, vm_offset_t va, int *level)
643 {
644 	pd_entry_t *l0, *l1, *l2, desc;
645 
646 	l0 = pmap_l0(pmap, va);
647 	desc = pmap_load(l0) & ATTR_DESCR_MASK;
648 	if (desc != L0_TABLE) {
649 		*level = -1;
650 		return (NULL);
651 	}
652 
653 	l1 = pmap_l0_to_l1(l0, va);
654 	desc = pmap_load(l1) & ATTR_DESCR_MASK;
655 	if (desc != L1_TABLE) {
656 		*level = 0;
657 		return (l0);
658 	}
659 
660 	l2 = pmap_l1_to_l2(l1, va);
661 	desc = pmap_load(l2) & ATTR_DESCR_MASK;
662 	if (desc != L2_TABLE) {
663 		*level = 1;
664 		return (l1);
665 	}
666 
667 	*level = 2;
668 	return (l2);
669 }
670 
671 /*
672  * Returns the lowest valid pte block or table entry for a given virtual
673  * address. If there are no valid entries return NULL and set the level to
674  * the first invalid level.
675  */
676 static __inline pt_entry_t *
pmap_pte(pmap_t pmap,vm_offset_t va,int * level)677 pmap_pte(pmap_t pmap, vm_offset_t va, int *level)
678 {
679 	pd_entry_t *l1, *l2, desc;
680 	pt_entry_t *l3;
681 
682 	l1 = pmap_l1(pmap, va);
683 	if (l1 == NULL) {
684 		*level = 0;
685 		return (NULL);
686 	}
687 	desc = pmap_load(l1) & ATTR_DESCR_MASK;
688 	if (desc == L1_BLOCK) {
689 		PMAP_ASSERT_L1_BLOCKS_SUPPORTED;
690 		*level = 1;
691 		return (l1);
692 	}
693 
694 	if (desc != L1_TABLE) {
695 		*level = 1;
696 		return (NULL);
697 	}
698 
699 	l2 = pmap_l1_to_l2(l1, va);
700 	desc = pmap_load(l2) & ATTR_DESCR_MASK;
701 	if (desc == L2_BLOCK) {
702 		*level = 2;
703 		return (l2);
704 	}
705 
706 	if (desc != L2_TABLE) {
707 		*level = 2;
708 		return (NULL);
709 	}
710 
711 	*level = 3;
712 	l3 = pmap_l2_to_l3(l2, va);
713 	if ((pmap_load(l3) & ATTR_DESCR_MASK) != L3_PAGE)
714 		return (NULL);
715 
716 	return (l3);
717 }
718 
719 /*
720  * If the given pmap has an L{1,2}_BLOCK or L3_PAGE entry at the specified
721  * level that maps the specified virtual address, then a pointer to that entry
722  * is returned.  Otherwise, NULL is returned, unless INVARIANTS are enabled
723  * and a diagnostic message is provided, in which case this function panics.
724  */
725 static __always_inline pt_entry_t *
pmap_pte_exists(pmap_t pmap,vm_offset_t va,int level,const char * diag)726 pmap_pte_exists(pmap_t pmap, vm_offset_t va, int level, const char *diag)
727 {
728 	pd_entry_t *l0p, *l1p, *l2p;
729 	pt_entry_t desc, *l3p;
730 	int walk_level __diagused;
731 
732 	KASSERT(level >= 0 && level < 4,
733 	    ("%s: %s passed an out-of-range level (%d)", __func__, diag,
734 	    level));
735 	l0p = pmap_l0(pmap, va);
736 	desc = pmap_load(l0p) & ATTR_DESCR_MASK;
737 	if (desc == L0_TABLE && level > 0) {
738 		l1p = pmap_l0_to_l1(l0p, va);
739 		desc = pmap_load(l1p) & ATTR_DESCR_MASK;
740 		if (desc == L1_BLOCK && level == 1) {
741 			PMAP_ASSERT_L1_BLOCKS_SUPPORTED;
742 			return (l1p);
743 		}
744 		if (desc == L1_TABLE && level > 1) {
745 			l2p = pmap_l1_to_l2(l1p, va);
746 			desc = pmap_load(l2p) & ATTR_DESCR_MASK;
747 			if (desc == L2_BLOCK && level == 2)
748 				return (l2p);
749 			else if (desc == L2_TABLE && level > 2) {
750 				l3p = pmap_l2_to_l3(l2p, va);
751 				desc = pmap_load(l3p) & ATTR_DESCR_MASK;
752 				if (desc == L3_PAGE && level == 3)
753 					return (l3p);
754 				else
755 					walk_level = 3;
756 			} else
757 				walk_level = 2;
758 		} else
759 			walk_level = 1;
760 	} else
761 		walk_level = 0;
762 	KASSERT(diag == NULL,
763 	    ("%s: va %#lx not mapped at level %d, desc %ld at level %d",
764 	    diag, va, level, desc, walk_level));
765 	return (NULL);
766 }
767 
768 bool
pmap_ps_enabled(pmap_t pmap)769 pmap_ps_enabled(pmap_t pmap)
770 {
771 	/*
772 	 * Promotion requires a hypervisor call when the kernel is running
773 	 * in EL1. To stop this disable superpage support on non-stage 1
774 	 * pmaps for now.
775 	 */
776 	if (pmap->pm_stage != PM_STAGE1)
777 		return (false);
778 
779 #ifdef KMSAN
780 	/*
781 	 * The break-before-make in pmap_update_entry() results in a situation
782 	 * where a CPU may call into the KMSAN runtime while the entry is
783 	 * invalid.  If the entry is used to map the current thread structure,
784 	 * then the runtime will attempt to access unmapped memory.  Avoid this
785 	 * by simply disabling superpage promotion for the kernel map.
786 	 */
787 	if (pmap == kernel_pmap)
788 		return (false);
789 #endif
790 
791 	return (superpages_enabled != 0);
792 }
793 
794 bool
pmap_get_tables(pmap_t pmap,vm_offset_t va,pd_entry_t ** l0,pd_entry_t ** l1,pd_entry_t ** l2,pt_entry_t ** l3)795 pmap_get_tables(pmap_t pmap, vm_offset_t va, pd_entry_t **l0, pd_entry_t **l1,
796     pd_entry_t **l2, pt_entry_t **l3)
797 {
798 	pd_entry_t *l0p, *l1p, *l2p;
799 
800 	if (pmap->pm_l0 == NULL)
801 		return (false);
802 
803 	l0p = pmap_l0(pmap, va);
804 	*l0 = l0p;
805 
806 	if ((pmap_load(l0p) & ATTR_DESCR_MASK) != L0_TABLE)
807 		return (false);
808 
809 	l1p = pmap_l0_to_l1(l0p, va);
810 	*l1 = l1p;
811 
812 	if ((pmap_load(l1p) & ATTR_DESCR_MASK) == L1_BLOCK) {
813 		PMAP_ASSERT_L1_BLOCKS_SUPPORTED;
814 		*l2 = NULL;
815 		*l3 = NULL;
816 		return (true);
817 	}
818 
819 	if ((pmap_load(l1p) & ATTR_DESCR_MASK) != L1_TABLE)
820 		return (false);
821 
822 	l2p = pmap_l1_to_l2(l1p, va);
823 	*l2 = l2p;
824 
825 	if ((pmap_load(l2p) & ATTR_DESCR_MASK) == L2_BLOCK) {
826 		*l3 = NULL;
827 		return (true);
828 	}
829 
830 	if ((pmap_load(l2p) & ATTR_DESCR_MASK) != L2_TABLE)
831 		return (false);
832 
833 	*l3 = pmap_l2_to_l3(l2p, va);
834 
835 	return (true);
836 }
837 
838 static __inline int
pmap_l3_valid(pt_entry_t l3)839 pmap_l3_valid(pt_entry_t l3)
840 {
841 
842 	return ((l3 & ATTR_DESCR_MASK) == L3_PAGE);
843 }
844 
845 CTASSERT(L1_BLOCK == L2_BLOCK);
846 
847 static pt_entry_t
pmap_pte_memattr(pmap_t pmap,vm_memattr_t memattr)848 pmap_pte_memattr(pmap_t pmap, vm_memattr_t memattr)
849 {
850 	pt_entry_t val;
851 
852 	if (pmap->pm_stage == PM_STAGE1) {
853 		val = ATTR_S1_IDX(memattr);
854 		if (memattr == VM_MEMATTR_DEVICE)
855 			val |= ATTR_S1_XN;
856 		return (val);
857 	}
858 
859 	val = 0;
860 
861 	switch (memattr) {
862 	case VM_MEMATTR_DEVICE:
863 		return (ATTR_S2_MEMATTR(ATTR_S2_MEMATTR_DEVICE_nGnRnE) |
864 		    ATTR_S2_XN(ATTR_S2_XN_ALL));
865 	case VM_MEMATTR_UNCACHEABLE:
866 		return (ATTR_S2_MEMATTR(ATTR_S2_MEMATTR_NC));
867 	case VM_MEMATTR_WRITE_BACK:
868 		return (ATTR_S2_MEMATTR(ATTR_S2_MEMATTR_WB));
869 	case VM_MEMATTR_WRITE_THROUGH:
870 		return (ATTR_S2_MEMATTR(ATTR_S2_MEMATTR_WT));
871 	default:
872 		panic("%s: invalid memory attribute %x", __func__, memattr);
873 	}
874 }
875 
876 static pt_entry_t
pmap_pte_prot(pmap_t pmap,vm_prot_t prot)877 pmap_pte_prot(pmap_t pmap, vm_prot_t prot)
878 {
879 	pt_entry_t val;
880 
881 	val = 0;
882 	if (pmap->pm_stage == PM_STAGE1) {
883 		if ((prot & VM_PROT_EXECUTE) == 0)
884 			val |= ATTR_S1_XN;
885 		if ((prot & VM_PROT_WRITE) == 0)
886 			val |= ATTR_S1_AP(ATTR_S1_AP_RO);
887 	} else {
888 		if ((prot & VM_PROT_WRITE) != 0)
889 			val |= ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE);
890 		if ((prot & VM_PROT_READ) != 0)
891 			val |= ATTR_S2_S2AP(ATTR_S2_S2AP_READ);
892 		if ((prot & VM_PROT_EXECUTE) == 0)
893 			val |= ATTR_S2_XN(ATTR_S2_XN_ALL);
894 	}
895 
896 	return (val);
897 }
898 
899 /*
900  * Checks if the PTE is dirty.
901  */
902 static inline int
pmap_pte_dirty(pmap_t pmap,pt_entry_t pte)903 pmap_pte_dirty(pmap_t pmap, pt_entry_t pte)
904 {
905 
906 	KASSERT((pte & ATTR_SW_MANAGED) != 0, ("pte %#lx is unmanaged", pte));
907 
908 	if (pmap->pm_stage == PM_STAGE1) {
909 		KASSERT((pte & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) != 0,
910 		    ("pte %#lx is writeable and missing ATTR_SW_DBM", pte));
911 
912 		return ((pte & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) ==
913 		    (ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_SW_DBM));
914 	}
915 
916 	return ((pte & ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE)) ==
917 	    ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE));
918 }
919 
920 static __inline void
pmap_resident_count_inc(pmap_t pmap,int count)921 pmap_resident_count_inc(pmap_t pmap, int count)
922 {
923 
924 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
925 	pmap->pm_stats.resident_count += count;
926 }
927 
928 static __inline void
pmap_resident_count_dec(pmap_t pmap,int count)929 pmap_resident_count_dec(pmap_t pmap, int count)
930 {
931 
932 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
933 	KASSERT(pmap->pm_stats.resident_count >= count,
934 	    ("pmap %p resident count underflow %ld %d", pmap,
935 	    pmap->pm_stats.resident_count, count));
936 	pmap->pm_stats.resident_count -= count;
937 }
938 
939 static vm_paddr_t
pmap_early_vtophys(vm_offset_t va)940 pmap_early_vtophys(vm_offset_t va)
941 {
942 	vm_paddr_t pa_page;
943 
944 	pa_page = arm64_address_translate_s1e1r(va) & PAR_PA_MASK;
945 	return (pa_page | (va & PAR_LOW_MASK));
946 }
947 
948 /* State of the bootstrapped DMAP page tables */
949 struct pmap_bootstrap_state {
950 	pt_entry_t	*l1;
951 	pt_entry_t	*l2;
952 	pt_entry_t	*l3;
953 	vm_offset_t	freemempos;
954 	vm_offset_t	va;
955 	vm_paddr_t	pa;
956 	pt_entry_t	table_attrs;
957 	u_int		l0_slot;
958 	u_int		l1_slot;
959 	u_int		l2_slot;
960 	bool		dmap_valid;
961 };
962 
963 /* The bootstrap state */
964 static struct pmap_bootstrap_state bs_state = {
965 	.l1 = NULL,
966 	.l2 = NULL,
967 	.l3 = NULL,
968 	.table_attrs = TATTR_PXN_TABLE,
969 	.l0_slot = L0_ENTRIES,
970 	.l1_slot = Ln_ENTRIES,
971 	.l2_slot = Ln_ENTRIES,
972 	.dmap_valid = false,
973 };
974 
975 static void
pmap_bootstrap_l0_table(struct pmap_bootstrap_state * state)976 pmap_bootstrap_l0_table(struct pmap_bootstrap_state *state)
977 {
978 	vm_paddr_t l1_pa;
979 	pd_entry_t l0e;
980 	u_int l0_slot;
981 
982 	/* Link the level 0 table to a level 1 table */
983 	l0_slot = pmap_l0_index(state->va);
984 	if (l0_slot != state->l0_slot) {
985 		/*
986 		 * Make sure we move from a low address to high address
987 		 * before the DMAP region is ready. This ensures we never
988 		 * modify an existing mapping until we can map from a
989 		 * physical address to a virtual address.
990 		 */
991 		MPASS(state->l0_slot < l0_slot ||
992 		    state->l0_slot == L0_ENTRIES ||
993 		    state->dmap_valid);
994 
995 		/* Reset lower levels */
996 		state->l2 = NULL;
997 		state->l3 = NULL;
998 		state->l1_slot = Ln_ENTRIES;
999 		state->l2_slot = Ln_ENTRIES;
1000 
1001 		/* Check the existing L0 entry */
1002 		state->l0_slot = l0_slot;
1003 		if (state->dmap_valid) {
1004 			l0e = pagetable_l0_ttbr1[l0_slot];
1005 			if ((l0e & ATTR_DESCR_VALID) != 0) {
1006 				MPASS((l0e & ATTR_DESCR_MASK) == L0_TABLE);
1007 				l1_pa = PTE_TO_PHYS(l0e);
1008 				state->l1 = (pt_entry_t *)PHYS_TO_DMAP(l1_pa);
1009 				return;
1010 			}
1011 		}
1012 
1013 		/* Create a new L0 table entry */
1014 		state->l1 = (pt_entry_t *)state->freemempos;
1015 		memset(state->l1, 0, PAGE_SIZE);
1016 		state->freemempos += PAGE_SIZE;
1017 
1018 		l1_pa = pmap_early_vtophys((vm_offset_t)state->l1);
1019 		MPASS((l1_pa & Ln_TABLE_MASK) == 0);
1020 		MPASS(pagetable_l0_ttbr1[l0_slot] == 0);
1021 		pmap_store(&pagetable_l0_ttbr1[l0_slot], PHYS_TO_PTE(l1_pa) |
1022 		    TATTR_UXN_TABLE | TATTR_AP_TABLE_NO_EL0 | L0_TABLE);
1023 	}
1024 	KASSERT(state->l1 != NULL, ("%s: NULL l1", __func__));
1025 }
1026 
1027 static void
pmap_bootstrap_l1_table(struct pmap_bootstrap_state * state)1028 pmap_bootstrap_l1_table(struct pmap_bootstrap_state *state)
1029 {
1030 	vm_paddr_t l2_pa;
1031 	pd_entry_t l1e;
1032 	u_int l1_slot;
1033 
1034 	/* Make sure there is a valid L0 -> L1 table */
1035 	pmap_bootstrap_l0_table(state);
1036 
1037 	/* Link the level 1 table to a level 2 table */
1038 	l1_slot = pmap_l1_index(state->va);
1039 	if (l1_slot != state->l1_slot) {
1040 		/* See pmap_bootstrap_l0_table for a description */
1041 		MPASS(state->l1_slot < l1_slot ||
1042 		    state->l1_slot == Ln_ENTRIES ||
1043 		    state->dmap_valid);
1044 
1045 		/* Reset lower levels */
1046 		state->l3 = NULL;
1047 		state->l2_slot = Ln_ENTRIES;
1048 
1049 		/* Check the existing L1 entry */
1050 		state->l1_slot = l1_slot;
1051 		if (state->dmap_valid) {
1052 			l1e = state->l1[l1_slot];
1053 			if ((l1e & ATTR_DESCR_VALID) != 0) {
1054 				MPASS((l1e & ATTR_DESCR_MASK) == L1_TABLE);
1055 				l2_pa = PTE_TO_PHYS(l1e);
1056 				state->l2 = (pt_entry_t *)PHYS_TO_DMAP(l2_pa);
1057 				return;
1058 			}
1059 		}
1060 
1061 		/* Create a new L1 table entry */
1062 		state->l2 = (pt_entry_t *)state->freemempos;
1063 		memset(state->l2, 0, PAGE_SIZE);
1064 		state->freemempos += PAGE_SIZE;
1065 
1066 		l2_pa = pmap_early_vtophys((vm_offset_t)state->l2);
1067 		MPASS((l2_pa & Ln_TABLE_MASK) == 0);
1068 		MPASS(state->l1[l1_slot] == 0);
1069 		pmap_store(&state->l1[l1_slot], PHYS_TO_PTE(l2_pa) |
1070 		    state->table_attrs | L1_TABLE);
1071 	}
1072 	KASSERT(state->l2 != NULL, ("%s: NULL l2", __func__));
1073 }
1074 
1075 static void
pmap_bootstrap_l2_table(struct pmap_bootstrap_state * state)1076 pmap_bootstrap_l2_table(struct pmap_bootstrap_state *state)
1077 {
1078 	vm_paddr_t l3_pa;
1079 	pd_entry_t l2e;
1080 	u_int l2_slot;
1081 
1082 	/* Make sure there is a valid L1 -> L2 table */
1083 	pmap_bootstrap_l1_table(state);
1084 
1085 	/* Link the level 2 table to a level 3 table */
1086 	l2_slot = pmap_l2_index(state->va);
1087 	if (l2_slot != state->l2_slot) {
1088 		/* See pmap_bootstrap_l0_table for a description */
1089 		MPASS(state->l2_slot < l2_slot ||
1090 		    state->l2_slot == Ln_ENTRIES ||
1091 		    state->dmap_valid);
1092 
1093 		/* Check the existing L2 entry */
1094 		state->l2_slot = l2_slot;
1095 		if (state->dmap_valid) {
1096 			l2e = state->l2[l2_slot];
1097 			if ((l2e & ATTR_DESCR_VALID) != 0) {
1098 				MPASS((l2e & ATTR_DESCR_MASK) == L2_TABLE);
1099 				l3_pa = PTE_TO_PHYS(l2e);
1100 				state->l3 = (pt_entry_t *)PHYS_TO_DMAP(l3_pa);
1101 				return;
1102 			}
1103 		}
1104 
1105 		/* Create a new L2 table entry */
1106 		state->l3 = (pt_entry_t *)state->freemempos;
1107 		memset(state->l3, 0, PAGE_SIZE);
1108 		state->freemempos += PAGE_SIZE;
1109 
1110 		l3_pa = pmap_early_vtophys((vm_offset_t)state->l3);
1111 		MPASS((l3_pa & Ln_TABLE_MASK) == 0);
1112 		MPASS(state->l2[l2_slot] == 0);
1113 		pmap_store(&state->l2[l2_slot], PHYS_TO_PTE(l3_pa) |
1114 		    state->table_attrs | L2_TABLE);
1115 	}
1116 	KASSERT(state->l3 != NULL, ("%s: NULL l3", __func__));
1117 }
1118 
1119 static void
pmap_bootstrap_l2_block(struct pmap_bootstrap_state * state,int i)1120 pmap_bootstrap_l2_block(struct pmap_bootstrap_state *state, int i)
1121 {
1122 	pt_entry_t contig;
1123 	u_int l2_slot;
1124 	bool first;
1125 
1126 	if ((physmap[i + 1] - state->pa) < L2_SIZE)
1127 		return;
1128 
1129 	/* Make sure there is a valid L1 table */
1130 	pmap_bootstrap_l1_table(state);
1131 
1132 	MPASS((state->va & L2_OFFSET) == 0);
1133 	for (first = true, contig = 0;
1134 	    state->va < DMAP_MAX_ADDRESS &&
1135 	    (physmap[i + 1] - state->pa) >= L2_SIZE;
1136 	    state->va += L2_SIZE, state->pa += L2_SIZE) {
1137 		/*
1138 		 * Stop if we are about to walk off the end of what the
1139 		 * current L1 slot can address.
1140 		 */
1141 		if (!first && (state->pa & L1_OFFSET) == 0)
1142 			break;
1143 
1144 		/*
1145 		 * If we have an aligned, contiguous chunk of L2C_ENTRIES
1146 		 * L2 blocks, set the contiguous bit within each PTE so that
1147 		 * the chunk can be cached using only one TLB entry.
1148 		 */
1149 		if ((state->pa & L2C_OFFSET) == 0) {
1150 			if (state->va + L2C_SIZE < DMAP_MAX_ADDRESS &&
1151 			    physmap[i + 1] - state->pa >= L2C_SIZE) {
1152 				contig = ATTR_CONTIGUOUS;
1153 			} else {
1154 				contig = 0;
1155 			}
1156 		}
1157 
1158 		first = false;
1159 		l2_slot = pmap_l2_index(state->va);
1160 		MPASS((state->pa & L2_OFFSET) == 0);
1161 		MPASS(state->l2[l2_slot] == 0);
1162 		pmap_store(&state->l2[l2_slot], PHYS_TO_PTE(state->pa) |
1163 		    ATTR_AF | pmap_sh_attr | ATTR_S1_XN | ATTR_KERN_GP |
1164 		    ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | contig | L2_BLOCK);
1165 	}
1166 	MPASS(state->va == (state->pa - dmap_phys_base + DMAP_MIN_ADDRESS));
1167 }
1168 
1169 static void
pmap_bootstrap_l3_page(struct pmap_bootstrap_state * state,int i)1170 pmap_bootstrap_l3_page(struct pmap_bootstrap_state *state, int i)
1171 {
1172 	pt_entry_t contig;
1173 	u_int l3_slot;
1174 	bool first;
1175 
1176 	if (physmap[i + 1] - state->pa < L3_SIZE)
1177 		return;
1178 
1179 	/* Make sure there is a valid L2 table */
1180 	pmap_bootstrap_l2_table(state);
1181 
1182 	MPASS((state->va & L3_OFFSET) == 0);
1183 	for (first = true, contig = 0;
1184 	    state->va < DMAP_MAX_ADDRESS &&
1185 	    physmap[i + 1] - state->pa >= L3_SIZE;
1186 	    state->va += L3_SIZE, state->pa += L3_SIZE) {
1187 		/*
1188 		 * Stop if we are about to walk off the end of what the
1189 		 * current L2 slot can address.
1190 		 */
1191 		if (!first && (state->pa & L2_OFFSET) == 0)
1192 			break;
1193 
1194 		/*
1195 		 * If we have an aligned, contiguous chunk of L3C_ENTRIES
1196 		 * L3 pages, set the contiguous bit within each PTE so that
1197 		 * the chunk can be cached using only one TLB entry.
1198 		 */
1199 		if ((state->pa & L3C_OFFSET) == 0) {
1200 			if (state->va + L3C_SIZE < DMAP_MAX_ADDRESS &&
1201 			    physmap[i + 1] - state->pa >= L3C_SIZE) {
1202 				contig = ATTR_CONTIGUOUS;
1203 			} else {
1204 				contig = 0;
1205 			}
1206 		}
1207 
1208 		first = false;
1209 		l3_slot = pmap_l3_index(state->va);
1210 		MPASS((state->pa & L3_OFFSET) == 0);
1211 		MPASS(state->l3[l3_slot] == 0);
1212 		pmap_store(&state->l3[l3_slot], PHYS_TO_PTE(state->pa) |
1213 		    ATTR_AF | pmap_sh_attr | ATTR_S1_XN | ATTR_KERN_GP |
1214 		    ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | contig | L3_PAGE);
1215 	}
1216 	MPASS(state->va == (state->pa - dmap_phys_base + DMAP_MIN_ADDRESS));
1217 }
1218 
1219 void
pmap_bootstrap_dmap(vm_size_t kernlen)1220 pmap_bootstrap_dmap(vm_size_t kernlen)
1221 {
1222 	vm_paddr_t start_pa, pa;
1223 	uint64_t tcr;
1224 	int i;
1225 
1226 	tcr = READ_SPECIALREG(tcr_el1);
1227 
1228 	/* Verify that the ASID is set through TTBR0. */
1229 	KASSERT((tcr & TCR_A1) == 0, ("pmap_bootstrap: TCR_EL1.A1 != 0"));
1230 
1231 	if ((tcr & TCR_DS) != 0)
1232 		pmap_lpa_enabled = true;
1233 
1234 	pmap_l1_supported = L1_BLOCKS_SUPPORTED;
1235 
1236 	start_pa = pmap_early_vtophys(KERNBASE);
1237 
1238 	bs_state.freemempos = KERNBASE + kernlen;
1239 	bs_state.freemempos = roundup2(bs_state.freemempos, PAGE_SIZE);
1240 
1241 	/* Fill in physmap array. */
1242 	physmap_idx = physmem_avail(physmap, nitems(physmap));
1243 
1244 	dmap_phys_base = physmap[0] & ~L1_OFFSET;
1245 	dmap_phys_max = 0;
1246 	dmap_max_addr = 0;
1247 
1248 	for (i = 0; i < physmap_idx; i += 2) {
1249 		bs_state.pa = physmap[i] & ~L3_OFFSET;
1250 		bs_state.va = bs_state.pa - dmap_phys_base + DMAP_MIN_ADDRESS;
1251 
1252 		/* Create L3 mappings at the start of the region */
1253 		if ((bs_state.pa & L2_OFFSET) != 0)
1254 			pmap_bootstrap_l3_page(&bs_state, i);
1255 		MPASS(bs_state.pa <= physmap[i + 1]);
1256 
1257 		if (L1_BLOCKS_SUPPORTED) {
1258 			/* Create L2 mappings at the start of the region */
1259 			if ((bs_state.pa & L1_OFFSET) != 0)
1260 				pmap_bootstrap_l2_block(&bs_state, i);
1261 			MPASS(bs_state.pa <= physmap[i + 1]);
1262 
1263 			/* Create the main L1 block mappings */
1264 			for (; bs_state.va < DMAP_MAX_ADDRESS &&
1265 			    (physmap[i + 1] - bs_state.pa) >= L1_SIZE;
1266 			    bs_state.va += L1_SIZE, bs_state.pa += L1_SIZE) {
1267 				/* Make sure there is a valid L1 table */
1268 				pmap_bootstrap_l0_table(&bs_state);
1269 				MPASS((bs_state.pa & L1_OFFSET) == 0);
1270 				pmap_store(
1271 				    &bs_state.l1[pmap_l1_index(bs_state.va)],
1272 				    PHYS_TO_PTE(bs_state.pa) | ATTR_AF |
1273 				    pmap_sh_attr |
1274 				    ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) |
1275 				    ATTR_S1_XN | ATTR_KERN_GP | L1_BLOCK);
1276 			}
1277 			MPASS(bs_state.pa <= physmap[i + 1]);
1278 
1279 			/* Create L2 mappings at the end of the region */
1280 			pmap_bootstrap_l2_block(&bs_state, i);
1281 		} else {
1282 			while (bs_state.va < DMAP_MAX_ADDRESS &&
1283 			    (physmap[i + 1] - bs_state.pa) >= L2_SIZE) {
1284 				pmap_bootstrap_l2_block(&bs_state, i);
1285 			}
1286 		}
1287 		MPASS(bs_state.pa <= physmap[i + 1]);
1288 
1289 		/* Create L3 mappings at the end of the region */
1290 		pmap_bootstrap_l3_page(&bs_state, i);
1291 		MPASS(bs_state.pa == physmap[i + 1]);
1292 
1293 		if (bs_state.pa > dmap_phys_max) {
1294 			dmap_phys_max = bs_state.pa;
1295 			dmap_max_addr = bs_state.va;
1296 		}
1297 	}
1298 
1299 	cpu_tlb_flushID();
1300 
1301 	bs_state.dmap_valid = true;
1302 
1303 	/* Exclude the kernel and DMAP region */
1304 	pa = pmap_early_vtophys(bs_state.freemempos);
1305 	physmem_exclude_region(start_pa, pa - start_pa, EXFLAG_NOALLOC);
1306 }
1307 
1308 static void
pmap_bootstrap_l2(vm_offset_t va)1309 pmap_bootstrap_l2(vm_offset_t va)
1310 {
1311 	KASSERT((va & L1_OFFSET) == 0, ("Invalid virtual address"));
1312 
1313 	/* Leave bs_state.pa as it's only needed to bootstrap blocks and pages*/
1314 	bs_state.va = va;
1315 
1316 	for (; bs_state.va < VM_MAX_KERNEL_ADDRESS; bs_state.va += L1_SIZE)
1317 		pmap_bootstrap_l1_table(&bs_state);
1318 }
1319 
1320 static void
pmap_bootstrap_l3(vm_offset_t va)1321 pmap_bootstrap_l3(vm_offset_t va)
1322 {
1323 	KASSERT((va & L2_OFFSET) == 0, ("Invalid virtual address"));
1324 
1325 	/* Leave bs_state.pa as it's only needed to bootstrap blocks and pages*/
1326 	bs_state.va = va;
1327 
1328 	for (; bs_state.va < VM_MAX_KERNEL_ADDRESS; bs_state.va += L2_SIZE)
1329 		pmap_bootstrap_l2_table(&bs_state);
1330 }
1331 
1332 /*
1333  *	Bootstrap the system enough to run with virtual memory.
1334  */
1335 void
pmap_bootstrap(void)1336 pmap_bootstrap(void)
1337 {
1338 	vm_offset_t dpcpu, msgbufpv;
1339 	vm_paddr_t start_pa, pa;
1340 	size_t largest_phys_size;
1341 
1342 	/* Set this early so we can use the pagetable walking functions */
1343 	kernel_pmap_store.pm_l0 = pagetable_l0_ttbr1;
1344 	PMAP_LOCK_INIT(kernel_pmap);
1345 	kernel_pmap->pm_l0_paddr =
1346 	    pmap_early_vtophys((vm_offset_t)kernel_pmap_store.pm_l0);
1347 	TAILQ_INIT(&kernel_pmap->pm_pvchunk);
1348 	vm_radix_init(&kernel_pmap->pm_root);
1349 	kernel_pmap->pm_cookie = COOKIE_FROM(-1, INT_MIN);
1350 	kernel_pmap->pm_stage = PM_STAGE1;
1351 	kernel_pmap->pm_levels = 4;
1352 	kernel_pmap->pm_ttbr = kernel_pmap->pm_l0_paddr;
1353 	kernel_pmap->pm_asid_set = &asids;
1354 
1355 	/* Reserve some VA space for early BIOS/ACPI mapping */
1356 	preinit_map_va = roundup2(bs_state.freemempos, L2_SIZE);
1357 
1358 	virtual_avail = preinit_map_va + PMAP_PREINIT_MAPPING_SIZE;
1359 	virtual_avail = roundup2(virtual_avail, L1_SIZE);
1360 	virtual_end = VM_MAX_KERNEL_ADDRESS - PMAP_MAPDEV_EARLY_SIZE;
1361 	kernel_vm_end = virtual_avail;
1362 
1363 	/*
1364 	 * We only use PXN when we know nothing will be executed from it, e.g.
1365 	 * the DMAP region.
1366 	 */
1367 	bs_state.table_attrs &= ~TATTR_PXN_TABLE;
1368 
1369 	/*
1370 	 * Find the physical memory we could use. This needs to be after we
1371 	 * exclude any memory that is mapped into the DMAP region but should
1372 	 * not be used by the kernel, e.g. some UEFI memory types.
1373 	 */
1374 	physmap_idx = physmem_avail(physmap, nitems(physmap));
1375 
1376 	/*
1377 	 * Find space for early allocations. We search for the largest
1378 	 * region. This is because the user may choose a large msgbuf.
1379 	 * This could be smarter, e.g. to allow multiple regions to be
1380 	 * used & switch to the next when one is full.
1381 	 */
1382 	largest_phys_size = 0;
1383 	for (int i = 0; i < physmap_idx; i += 2) {
1384 		if ((physmap[i + 1] - physmap[i]) > largest_phys_size) {
1385 			largest_phys_size = physmap[i + 1] - physmap[i];
1386 			bs_state.freemempos = PHYS_TO_DMAP(physmap[i]);
1387 		}
1388 	}
1389 
1390 	start_pa = pmap_early_vtophys(bs_state.freemempos);
1391 
1392 	/*
1393 	 * Create the l2 tables up to VM_MAX_KERNEL_ADDRESS.  We assume that the
1394 	 * loader allocated the first and only l2 page table page used to map
1395 	 * the kernel, preloaded files and module metadata.
1396 	 */
1397 	pmap_bootstrap_l2(KERNBASE + L1_SIZE);
1398 	/* And the l3 tables for the early devmap */
1399 	pmap_bootstrap_l3(VM_MAX_KERNEL_ADDRESS - (PMAP_MAPDEV_EARLY_SIZE));
1400 
1401 	cpu_tlb_flushID();
1402 
1403 #define alloc_pages(var, np)						\
1404 	(var) = bs_state.freemempos;					\
1405 	bs_state.freemempos += (np * PAGE_SIZE);			\
1406 	memset((char *)(var), 0, ((np) * PAGE_SIZE));
1407 
1408 	/* Allocate dynamic per-cpu area. */
1409 	alloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE);
1410 	dpcpu_init((void *)dpcpu, 0);
1411 
1412 	/* Allocate memory for the msgbuf, e.g. for /sbin/dmesg */
1413 	alloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
1414 	msgbufp = (void *)msgbufpv;
1415 
1416 	pa = pmap_early_vtophys(bs_state.freemempos);
1417 
1418 	physmem_exclude_region(start_pa, pa - start_pa, EXFLAG_NOALLOC);
1419 }
1420 
1421 #if defined(KASAN) || defined(KMSAN)
1422 static void
pmap_bootstrap_allocate_san_l2(vm_paddr_t start_pa,vm_paddr_t end_pa,vm_offset_t * vap,vm_offset_t eva)1423 pmap_bootstrap_allocate_san_l2(vm_paddr_t start_pa, vm_paddr_t end_pa,
1424     vm_offset_t *vap, vm_offset_t eva)
1425 {
1426 	vm_paddr_t pa;
1427 	vm_offset_t va;
1428 	pd_entry_t *l2;
1429 
1430 	va = *vap;
1431 	pa = rounddown2(end_pa - L2_SIZE, L2_SIZE);
1432 	for (; pa >= start_pa && va < eva; va += L2_SIZE, pa -= L2_SIZE) {
1433 		l2 = pmap_l2(kernel_pmap, va);
1434 
1435 		/*
1436 		 * KASAN stack checking results in us having already allocated
1437 		 * part of our shadow map, so we can just skip those segments.
1438 		 */
1439 		if ((pmap_load(l2) & ATTR_DESCR_VALID) != 0) {
1440 			pa += L2_SIZE;
1441 			continue;
1442 		}
1443 
1444 		bzero((void *)PHYS_TO_DMAP(pa), L2_SIZE);
1445 		physmem_exclude_region(pa, L2_SIZE, EXFLAG_NOALLOC);
1446 		pmap_store(l2, PHYS_TO_PTE(pa) | PMAP_SAN_PTE_BITS | L2_BLOCK);
1447 	}
1448 	*vap = va;
1449 }
1450 
1451 /*
1452  * Finish constructing the initial shadow map:
1453  * - Count how many pages from KERNBASE to virtual_avail (scaled for
1454  *   shadow map)
1455  * - Map that entire range using L2 superpages.
1456  */
1457 static void
pmap_bootstrap_san1(vm_offset_t va,int scale)1458 pmap_bootstrap_san1(vm_offset_t va, int scale)
1459 {
1460 	vm_offset_t eva;
1461 	vm_paddr_t kernstart;
1462 	int i;
1463 
1464 	kernstart = pmap_early_vtophys(KERNBASE);
1465 
1466 	/*
1467 	 * Rebuild physmap one more time, we may have excluded more regions from
1468 	 * allocation since pmap_bootstrap().
1469 	 */
1470 	physmap_idx = physmem_avail(physmap, nitems(physmap));
1471 
1472 	eva = va + (virtual_avail - VM_MIN_KERNEL_ADDRESS) / scale;
1473 
1474 	/*
1475 	 * Find a slot in the physmap large enough for what we needed.  We try to put
1476 	 * the shadow map as high up as we can to avoid depleting the lower 4GB in case
1477 	 * it's needed for, e.g., an xhci controller that can only do 32-bit DMA.
1478 	 */
1479 	for (i = physmap_idx - 2; i >= 0; i -= 2) {
1480 		vm_paddr_t plow, phigh;
1481 
1482 		/* L2 mappings must be backed by memory that is L2-aligned */
1483 		plow = roundup2(physmap[i], L2_SIZE);
1484 		phigh = physmap[i + 1];
1485 		if (plow >= phigh)
1486 			continue;
1487 		if (kernstart >= plow && kernstart < phigh)
1488 			phigh = kernstart;
1489 		if (phigh - plow >= L2_SIZE) {
1490 			pmap_bootstrap_allocate_san_l2(plow, phigh, &va, eva);
1491 			if (va >= eva)
1492 				break;
1493 		}
1494 	}
1495 	if (i < 0)
1496 		panic("Could not find phys region for shadow map");
1497 
1498 	/*
1499 	 * Done. We should now have a valid shadow address mapped for all KVA
1500 	 * that has been mapped so far, i.e., KERNBASE to virtual_avail. Thus,
1501 	 * shadow accesses by the sanitizer runtime will succeed for this range.
1502 	 * When the kernel virtual address range is later expanded, as will
1503 	 * happen in vm_mem_init(), the shadow map will be grown as well. This
1504 	 * is handled by pmap_san_enter().
1505 	 */
1506 }
1507 
1508 void
pmap_bootstrap_san(void)1509 pmap_bootstrap_san(void)
1510 {
1511 #ifdef KASAN
1512 	pmap_bootstrap_san1(KASAN_MIN_ADDRESS, KASAN_SHADOW_SCALE);
1513 #else
1514 	static uint8_t kmsan_shad_ptp[PAGE_SIZE * 2] __aligned(PAGE_SIZE);
1515 	static uint8_t kmsan_orig_ptp[PAGE_SIZE * 2] __aligned(PAGE_SIZE);
1516 	pd_entry_t *l0, *l1;
1517 
1518 	if (virtual_avail - VM_MIN_KERNEL_ADDRESS > L1_SIZE)
1519 		panic("initial kernel map is too large");
1520 
1521 	l0 = pmap_l0(kernel_pmap, KMSAN_SHAD_MIN_ADDRESS);
1522 	pmap_store(l0, L0_TABLE | PHYS_TO_PTE(
1523 	    pmap_early_vtophys((vm_offset_t)kmsan_shad_ptp)));
1524 	l1 = pmap_l0_to_l1(l0, KMSAN_SHAD_MIN_ADDRESS);
1525 	pmap_store(l1, L1_TABLE | PHYS_TO_PTE(
1526 	    pmap_early_vtophys((vm_offset_t)kmsan_shad_ptp + PAGE_SIZE)));
1527 	pmap_bootstrap_san1(KMSAN_SHAD_MIN_ADDRESS, 1);
1528 
1529 	l0 = pmap_l0(kernel_pmap, KMSAN_ORIG_MIN_ADDRESS);
1530 	pmap_store(l0, L0_TABLE | PHYS_TO_PTE(
1531 	    pmap_early_vtophys((vm_offset_t)kmsan_orig_ptp)));
1532 	l1 = pmap_l0_to_l1(l0, KMSAN_ORIG_MIN_ADDRESS);
1533 	pmap_store(l1, L1_TABLE | PHYS_TO_PTE(
1534 	    pmap_early_vtophys((vm_offset_t)kmsan_orig_ptp + PAGE_SIZE)));
1535 	pmap_bootstrap_san1(KMSAN_ORIG_MIN_ADDRESS, 1);
1536 #endif
1537 }
1538 #endif
1539 
1540 /*
1541  *	Initialize a vm_page's machine-dependent fields.
1542  */
1543 void
pmap_page_init(vm_page_t m)1544 pmap_page_init(vm_page_t m)
1545 {
1546 
1547 	TAILQ_INIT(&m->md.pv_list);
1548 	m->md.pv_memattr = VM_MEMATTR_WRITE_BACK;
1549 }
1550 
1551 static void
pmap_init_asids(struct asid_set * set,int bits)1552 pmap_init_asids(struct asid_set *set, int bits)
1553 {
1554 	int i;
1555 
1556 	set->asid_bits = bits;
1557 
1558 	/*
1559 	 * We may be too early in the overall initialization process to use
1560 	 * bit_alloc().
1561 	 */
1562 	set->asid_set_size = 1 << set->asid_bits;
1563 	set->asid_set = kmem_malloc(bitstr_size(set->asid_set_size),
1564 	    M_WAITOK | M_ZERO);
1565 	for (i = 0; i < ASID_FIRST_AVAILABLE; i++)
1566 		bit_set(set->asid_set, i);
1567 	set->asid_next = ASID_FIRST_AVAILABLE;
1568 	mtx_init(&set->asid_set_mutex, "asid set", NULL, MTX_SPIN);
1569 }
1570 
1571 static void
pmap_init_pv_table(void)1572 pmap_init_pv_table(void)
1573 {
1574 	struct vm_phys_seg *seg, *next_seg;
1575 	struct pmap_large_md_page *pvd;
1576 	vm_size_t s;
1577 	int domain, i, j, pages;
1578 
1579 	/*
1580 	 * We depend on the size being evenly divisible into a page so
1581 	 * that the pv_table array can be indexed directly while
1582 	 * safely spanning multiple pages from different domains.
1583 	 */
1584 	CTASSERT(PAGE_SIZE % sizeof(*pvd) == 0);
1585 
1586 	/*
1587 	 * Calculate the size of the array.
1588 	 */
1589 	s = 0;
1590 	for (i = 0; i < vm_phys_nsegs; i++) {
1591 		seg = &vm_phys_segs[i];
1592 		pages = pmap_l2_pindex(roundup2(seg->end, L2_SIZE)) -
1593 		    pmap_l2_pindex(seg->start);
1594 		s += round_page(pages * sizeof(*pvd));
1595 	}
1596 	pv_table = (struct pmap_large_md_page *)kva_alloc(s);
1597 	if (pv_table == NULL)
1598 		panic("%s: kva_alloc failed\n", __func__);
1599 
1600 	/*
1601 	 * Iterate physical segments to allocate domain-local memory for PV
1602 	 * list headers.
1603 	 */
1604 	pvd = pv_table;
1605 	for (i = 0; i < vm_phys_nsegs; i++) {
1606 		seg = &vm_phys_segs[i];
1607 		pages = pmap_l2_pindex(roundup2(seg->end, L2_SIZE)) -
1608 		    pmap_l2_pindex(seg->start);
1609 		domain = seg->domain;
1610 
1611 		s = round_page(pages * sizeof(*pvd));
1612 
1613 		for (j = 0; j < s; j += PAGE_SIZE) {
1614 			vm_page_t m = vm_page_alloc_noobj_domain(domain,
1615 			    VM_ALLOC_ZERO);
1616 			if (m == NULL)
1617 				panic("failed to allocate PV table page");
1618 			pmap_qenter((vm_offset_t)pvd + j, &m, 1);
1619 		}
1620 
1621 		for (j = 0; j < s / sizeof(*pvd); j++) {
1622 			rw_init_flags(&pvd->pv_lock, "pmap pv list", RW_NEW);
1623 			TAILQ_INIT(&pvd->pv_page.pv_list);
1624 			pvd++;
1625 		}
1626 	}
1627 	pvd = &pv_dummy_large;
1628 	memset(pvd, 0, sizeof(*pvd));
1629 	rw_init_flags(&pvd->pv_lock, "pmap pv list dummy", RW_NEW);
1630 	TAILQ_INIT(&pvd->pv_page.pv_list);
1631 
1632 	/*
1633 	 * Set pointers from vm_phys_segs to pv_table.
1634 	 */
1635 	for (i = 0, pvd = pv_table; i < vm_phys_nsegs; i++) {
1636 		seg = &vm_phys_segs[i];
1637 		seg->md_first = pvd;
1638 		pvd += pmap_l2_pindex(roundup2(seg->end, L2_SIZE)) -
1639 		    pmap_l2_pindex(seg->start);
1640 
1641 		/*
1642 		 * If there is a following segment, and the final
1643 		 * superpage of this segment and the initial superpage
1644 		 * of the next segment are the same then adjust the
1645 		 * pv_table entry for that next segment down by one so
1646 		 * that the pv_table entries will be shared.
1647 		 */
1648 		if (i + 1 < vm_phys_nsegs) {
1649 			next_seg = &vm_phys_segs[i + 1];
1650 			if (pmap_l2_pindex(roundup2(seg->end, L2_SIZE)) - 1 ==
1651 			    pmap_l2_pindex(next_seg->start)) {
1652 				pvd--;
1653 			}
1654 		}
1655 	}
1656 }
1657 
1658 static bool
pmap_dbm_check(const struct cpu_feat * feat __unused,u_int midr __unused)1659 pmap_dbm_check(const struct cpu_feat *feat __unused, u_int midr __unused)
1660 {
1661 	uint64_t id_aa64mmfr1;
1662 
1663 	id_aa64mmfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
1664 	return (ID_AA64MMFR1_HAFDBS_VAL(id_aa64mmfr1) >=
1665 	    ID_AA64MMFR1_HAFDBS_AF_DBS);
1666 }
1667 
1668 static bool
pmap_dbm_has_errata(const struct cpu_feat * feat __unused,u_int midr,u_int ** errata_list,u_int * errata_count)1669 pmap_dbm_has_errata(const struct cpu_feat *feat __unused, u_int midr,
1670     u_int **errata_list, u_int *errata_count)
1671 {
1672 	/* Disable on Cortex-A55 for erratum 1024718 - all revisions */
1673 	if (CPU_MATCH(CPU_IMPL_MASK | CPU_PART_MASK, CPU_IMPL_ARM,
1674 	    CPU_PART_CORTEX_A55, 0, 0)) {
1675 		static u_int errata_id = 1024718;
1676 
1677 		*errata_list = &errata_id;
1678 		*errata_count = 1;
1679 		return (true);
1680 	}
1681 
1682 	/* Disable on Cortex-A510 for erratum 2051678 - r0p0 to r0p2 */
1683 	if (CPU_MATCH(CPU_IMPL_MASK | CPU_PART_MASK | CPU_VAR_MASK,
1684 	    CPU_IMPL_ARM, CPU_PART_CORTEX_A510, 0, 0)) {
1685 		if (CPU_REV(PCPU_GET(midr)) < 3) {
1686 			static u_int errata_id = 2051678;
1687 
1688 			*errata_list = &errata_id;
1689 			*errata_count = 1;
1690 			return (true);
1691 		}
1692 	}
1693 
1694 	return (false);
1695 }
1696 
1697 static void
pmap_dbm_enable(const struct cpu_feat * feat __unused,cpu_feat_errata errata_status,u_int * errata_list __unused,u_int errata_count)1698 pmap_dbm_enable(const struct cpu_feat *feat __unused,
1699     cpu_feat_errata errata_status, u_int *errata_list __unused,
1700     u_int errata_count)
1701 {
1702 	uint64_t tcr;
1703 
1704 	/* Skip if there is an erratum affecting DBM */
1705 	if (errata_status != ERRATA_NONE)
1706 		return;
1707 
1708 	tcr = READ_SPECIALREG(tcr_el1) | TCR_HD;
1709 	WRITE_SPECIALREG(tcr_el1, tcr);
1710 	isb();
1711 	/* Flush the local TLB for the TCR_HD flag change */
1712 	dsb(nshst);
1713 	__asm __volatile("tlbi vmalle1");
1714 	dsb(nsh);
1715 	isb();
1716 }
1717 
1718 static struct cpu_feat feat_dbm = {
1719 	.feat_name		= "FEAT_HAFDBS (DBM)",
1720 	.feat_check		= pmap_dbm_check,
1721 	.feat_has_errata	= pmap_dbm_has_errata,
1722 	.feat_enable		= pmap_dbm_enable,
1723 	.feat_flags		= CPU_FEAT_AFTER_DEV | CPU_FEAT_PER_CPU,
1724 };
1725 DATA_SET(cpu_feat_set, feat_dbm);
1726 
1727 /*
1728  *	Initialize the pmap module.
1729  *
1730  *	Called by vm_mem_init(), to initialize any structures that the pmap
1731  *	system needs to map virtual memory.
1732  */
1733 void
pmap_init(void)1734 pmap_init(void)
1735 {
1736 	uint64_t mmfr1;
1737 	int i, vmid_bits;
1738 
1739 	/*
1740 	 * Are large page mappings enabled?
1741 	 */
1742 	TUNABLE_INT_FETCH("vm.pmap.superpages_enabled", &superpages_enabled);
1743 	if (superpages_enabled) {
1744 		KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0,
1745 		    ("pmap_init: can't assign to pagesizes[1]"));
1746 		pagesizes[1] = L3C_SIZE;
1747 		KASSERT(MAXPAGESIZES > 2 && pagesizes[2] == 0,
1748 		    ("pmap_init: can't assign to pagesizes[2]"));
1749 		pagesizes[2] = L2_SIZE;
1750 		if (L1_BLOCKS_SUPPORTED) {
1751 			KASSERT(MAXPAGESIZES > 3 && pagesizes[3] == 0,
1752 			    ("pmap_init: can't assign to pagesizes[3]"));
1753 			pagesizes[3] = L1_SIZE;
1754 		}
1755 	}
1756 
1757 	/*
1758 	 * Initialize the ASID allocator.
1759 	 */
1760 	pmap_init_asids(&asids,
1761 	    (READ_SPECIALREG(tcr_el1) & TCR_ASID_16) != 0 ? 16 : 8);
1762 
1763 	if (has_hyp()) {
1764 		mmfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
1765 		vmid_bits = 8;
1766 
1767 		if (ID_AA64MMFR1_VMIDBits_VAL(mmfr1) ==
1768 		    ID_AA64MMFR1_VMIDBits_16)
1769 			vmid_bits = 16;
1770 		pmap_init_asids(&vmids, vmid_bits);
1771 	}
1772 
1773 	/*
1774 	 * Initialize pv chunk lists.
1775 	 */
1776 	for (i = 0; i < PMAP_MEMDOM; i++) {
1777 		mtx_init(&pv_chunks[i].pvc_lock, "pmap pv chunk list", NULL,
1778 		    MTX_DEF);
1779 		TAILQ_INIT(&pv_chunks[i].pvc_list);
1780 	}
1781 	pmap_init_pv_table();
1782 
1783 	vm_initialized = 1;
1784 }
1785 
1786 static SYSCTL_NODE(_vm_pmap, OID_AUTO, l1, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
1787     "L1 (1GB/64GB) page mapping counters");
1788 
1789 static COUNTER_U64_DEFINE_EARLY(pmap_l1_demotions);
1790 SYSCTL_COUNTER_U64(_vm_pmap_l1, OID_AUTO, demotions, CTLFLAG_RD,
1791     &pmap_l1_demotions, "L1 (1GB/64GB) page demotions");
1792 
1793 SYSCTL_BOOL(_vm_pmap_l1, OID_AUTO, supported, CTLFLAG_RD, &pmap_l1_supported,
1794     0, "L1 blocks are supported");
1795 
1796 static SYSCTL_NODE(_vm_pmap, OID_AUTO, l2c, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
1797     "L2C (32MB/1GB) page mapping counters");
1798 
1799 static COUNTER_U64_DEFINE_EARLY(pmap_l2c_demotions);
1800 SYSCTL_COUNTER_U64(_vm_pmap_l2c, OID_AUTO, demotions, CTLFLAG_RD,
1801     &pmap_l2c_demotions, "L2C (32MB/1GB) page demotions");
1802 
1803 static SYSCTL_NODE(_vm_pmap, OID_AUTO, l2, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
1804     "2MB page mapping counters");
1805 
1806 static COUNTER_U64_DEFINE_EARLY(pmap_l2_demotions);
1807 SYSCTL_COUNTER_U64(_vm_pmap_l2, OID_AUTO, demotions, CTLFLAG_RD,
1808     &pmap_l2_demotions, "L2 (2MB/32MB) page demotions");
1809 
1810 static COUNTER_U64_DEFINE_EARLY(pmap_l2_mappings);
1811 SYSCTL_COUNTER_U64(_vm_pmap_l2, OID_AUTO, mappings, CTLFLAG_RD,
1812     &pmap_l2_mappings, "L2 (2MB/32MB) page mappings");
1813 
1814 static COUNTER_U64_DEFINE_EARLY(pmap_l2_p_failures);
1815 SYSCTL_COUNTER_U64(_vm_pmap_l2, OID_AUTO, p_failures, CTLFLAG_RD,
1816     &pmap_l2_p_failures, "L2 (2MB/32MB) page promotion failures");
1817 
1818 static COUNTER_U64_DEFINE_EARLY(pmap_l2_promotions);
1819 SYSCTL_COUNTER_U64(_vm_pmap_l2, OID_AUTO, promotions, CTLFLAG_RD,
1820     &pmap_l2_promotions, "L2 (2MB/32MB) page promotions");
1821 
1822 static SYSCTL_NODE(_vm_pmap, OID_AUTO, l3c, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
1823     "L3C (64KB/2MB) page mapping counters");
1824 
1825 static COUNTER_U64_DEFINE_EARLY(pmap_l3c_demotions);
1826 SYSCTL_COUNTER_U64(_vm_pmap_l3c, OID_AUTO, demotions, CTLFLAG_RD,
1827     &pmap_l3c_demotions, "L3C (64KB/2MB) page demotions");
1828 
1829 static COUNTER_U64_DEFINE_EARLY(pmap_l3c_mappings);
1830 SYSCTL_COUNTER_U64(_vm_pmap_l3c, OID_AUTO, mappings, CTLFLAG_RD,
1831     &pmap_l3c_mappings, "L3C (64KB/2MB) page mappings");
1832 
1833 static COUNTER_U64_DEFINE_EARLY(pmap_l3c_p_failures);
1834 SYSCTL_COUNTER_U64(_vm_pmap_l3c, OID_AUTO, p_failures, CTLFLAG_RD,
1835     &pmap_l3c_p_failures, "L3C (64KB/2MB) page promotion failures");
1836 
1837 static COUNTER_U64_DEFINE_EARLY(pmap_l3c_promotions);
1838 SYSCTL_COUNTER_U64(_vm_pmap_l3c, OID_AUTO, promotions, CTLFLAG_RD,
1839     &pmap_l3c_promotions, "L3C (64KB/2MB) page promotions");
1840 
1841 /*
1842  * If the given value for "final_only" is false, then any cached intermediate-
1843  * level entries, i.e., L{0,1,2}_TABLE entries, are invalidated in addition to
1844  * any cached final-level entry, i.e., either an L{1,2}_BLOCK or L3_PAGE entry.
1845  * Otherwise, just the cached final-level entry is invalidated.
1846  */
1847 static __inline void
pmap_s1_invalidate_kernel(uint64_t r,bool final_only)1848 pmap_s1_invalidate_kernel(uint64_t r, bool final_only)
1849 {
1850 	if (final_only)
1851 		__asm __volatile("tlbi vaale1is, %0" : : "r" (r));
1852 	else
1853 		__asm __volatile("tlbi vaae1is, %0" : : "r" (r));
1854 }
1855 
1856 static __inline void
pmap_s1_invalidate_user(uint64_t r,bool final_only)1857 pmap_s1_invalidate_user(uint64_t r, bool final_only)
1858 {
1859 	if (final_only)
1860 		__asm __volatile("tlbi vale1is, %0" : : "r" (r));
1861 	else
1862 		__asm __volatile("tlbi vae1is, %0" : : "r" (r));
1863 }
1864 
1865 /*
1866  * Invalidates any cached final- and optionally intermediate-level TLB entries
1867  * for the specified virtual address in the given virtual address space.
1868  */
1869 static __inline void
pmap_s1_invalidate_page(pmap_t pmap,vm_offset_t va,bool final_only)1870 pmap_s1_invalidate_page(pmap_t pmap, vm_offset_t va, bool final_only)
1871 {
1872 	uint64_t r;
1873 
1874 	PMAP_ASSERT_STAGE1(pmap);
1875 
1876 	dsb(ishst);
1877 	r = TLBI_VA(va);
1878 	if (pmap == kernel_pmap) {
1879 		pmap_s1_invalidate_kernel(r, final_only);
1880 	} else {
1881 		r |= ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie));
1882 		pmap_s1_invalidate_user(r, final_only);
1883 	}
1884 	dsb(ish);
1885 	isb();
1886 }
1887 
1888 static __inline void
pmap_s2_invalidate_page(pmap_t pmap,vm_offset_t va,bool final_only)1889 pmap_s2_invalidate_page(pmap_t pmap, vm_offset_t va, bool final_only)
1890 {
1891 	PMAP_ASSERT_STAGE2(pmap);
1892 	MPASS(pmap_stage2_invalidate_range != NULL);
1893 	pmap_stage2_invalidate_range(pmap_to_ttbr0(pmap), va, va + PAGE_SIZE,
1894 	    final_only);
1895 }
1896 
1897 static __inline void
pmap_invalidate_page(pmap_t pmap,vm_offset_t va,bool final_only)1898 pmap_invalidate_page(pmap_t pmap, vm_offset_t va, bool final_only)
1899 {
1900 	if (pmap->pm_stage == PM_STAGE1)
1901 		pmap_s1_invalidate_page(pmap, va, final_only);
1902 	else
1903 		pmap_s2_invalidate_page(pmap, va, final_only);
1904 }
1905 
1906 /*
1907  * Use stride L{1,2}_SIZE when invalidating the TLB entries for L{1,2}_BLOCK
1908  * mappings.  Otherwise, use stride L3_SIZE.
1909  */
1910 static __inline void
pmap_s1_invalidate_strided(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,vm_offset_t stride,bool final_only)1911 pmap_s1_invalidate_strided(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
1912     vm_offset_t stride, bool final_only)
1913 {
1914 	uint64_t end, r, start;
1915 
1916 	PMAP_ASSERT_STAGE1(pmap);
1917 
1918 	dsb(ishst);
1919 	if (pmap == kernel_pmap) {
1920 		start = TLBI_VA(sva);
1921 		end = TLBI_VA(eva);
1922 		for (r = start; r < end; r += TLBI_VA(stride))
1923 			pmap_s1_invalidate_kernel(r, final_only);
1924 	} else {
1925 		start = end = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie));
1926 		start |= TLBI_VA(sva);
1927 		end |= TLBI_VA(eva);
1928 		for (r = start; r < end; r += TLBI_VA(stride))
1929 			pmap_s1_invalidate_user(r, final_only);
1930 	}
1931 	dsb(ish);
1932 	isb();
1933 }
1934 
1935 /*
1936  * Invalidates any cached final- and optionally intermediate-level TLB entries
1937  * for the specified virtual address range in the given virtual address space.
1938  */
1939 static __inline void
pmap_s1_invalidate_range(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,bool final_only)1940 pmap_s1_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
1941     bool final_only)
1942 {
1943 	pmap_s1_invalidate_strided(pmap, sva, eva, L3_SIZE, final_only);
1944 }
1945 
1946 static __inline void
pmap_s2_invalidate_range(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,bool final_only)1947 pmap_s2_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
1948     bool final_only)
1949 {
1950 	PMAP_ASSERT_STAGE2(pmap);
1951 	MPASS(pmap_stage2_invalidate_range != NULL);
1952 	pmap_stage2_invalidate_range(pmap_to_ttbr0(pmap), sva, eva, final_only);
1953 }
1954 
1955 static __inline void
pmap_invalidate_range(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,bool final_only)1956 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
1957     bool final_only)
1958 {
1959 	if (pmap->pm_stage == PM_STAGE1)
1960 		pmap_s1_invalidate_range(pmap, sva, eva, final_only);
1961 	else
1962 		pmap_s2_invalidate_range(pmap, sva, eva, final_only);
1963 }
1964 
1965 /*
1966  * Invalidates all cached intermediate- and final-level TLB entries for the
1967  * given virtual address space.
1968  */
1969 static __inline void
pmap_s1_invalidate_all(pmap_t pmap)1970 pmap_s1_invalidate_all(pmap_t pmap)
1971 {
1972 	uint64_t r;
1973 
1974 	PMAP_ASSERT_STAGE1(pmap);
1975 
1976 	dsb(ishst);
1977 	if (pmap == kernel_pmap) {
1978 		__asm __volatile("tlbi vmalle1is");
1979 	} else {
1980 		r = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie));
1981 		__asm __volatile("tlbi aside1is, %0" : : "r" (r));
1982 	}
1983 	dsb(ish);
1984 	isb();
1985 }
1986 
1987 static __inline void
pmap_s2_invalidate_all(pmap_t pmap)1988 pmap_s2_invalidate_all(pmap_t pmap)
1989 {
1990 	PMAP_ASSERT_STAGE2(pmap);
1991 	MPASS(pmap_stage2_invalidate_all != NULL);
1992 	pmap_stage2_invalidate_all(pmap_to_ttbr0(pmap));
1993 }
1994 
1995 static __inline void
pmap_invalidate_all(pmap_t pmap)1996 pmap_invalidate_all(pmap_t pmap)
1997 {
1998 	if (pmap->pm_stage == PM_STAGE1)
1999 		pmap_s1_invalidate_all(pmap);
2000 	else
2001 		pmap_s2_invalidate_all(pmap);
2002 }
2003 
2004 /*
2005  *	Routine:	pmap_extract
2006  *	Function:
2007  *		Extract the physical page address associated
2008  *		with the given map/virtual_address pair.
2009  */
2010 vm_paddr_t
pmap_extract(pmap_t pmap,vm_offset_t va)2011 pmap_extract(pmap_t pmap, vm_offset_t va)
2012 {
2013 	pt_entry_t *pte, tpte;
2014 	vm_paddr_t pa;
2015 	int lvl;
2016 
2017 	pa = 0;
2018 	PMAP_LOCK(pmap);
2019 	/*
2020 	 * Find the block or page map for this virtual address. pmap_pte
2021 	 * will return either a valid block/page entry, or NULL.
2022 	 */
2023 	pte = pmap_pte(pmap, va, &lvl);
2024 	if (pte != NULL) {
2025 		tpte = pmap_load(pte);
2026 		pa = PTE_TO_PHYS(tpte);
2027 		switch(lvl) {
2028 		case 1:
2029 			PMAP_ASSERT_L1_BLOCKS_SUPPORTED;
2030 			KASSERT((tpte & ATTR_DESCR_MASK) == L1_BLOCK,
2031 			    ("pmap_extract: Invalid L1 pte found: %lx",
2032 			    tpte & ATTR_DESCR_MASK));
2033 			pa |= (va & L1_OFFSET);
2034 			break;
2035 		case 2:
2036 			KASSERT((tpte & ATTR_DESCR_MASK) == L2_BLOCK,
2037 			    ("pmap_extract: Invalid L2 pte found: %lx",
2038 			    tpte & ATTR_DESCR_MASK));
2039 			pa |= (va & L2_OFFSET);
2040 			break;
2041 		case 3:
2042 			KASSERT((tpte & ATTR_DESCR_MASK) == L3_PAGE,
2043 			    ("pmap_extract: Invalid L3 pte found: %lx",
2044 			    tpte & ATTR_DESCR_MASK));
2045 			pa |= (va & L3_OFFSET);
2046 			break;
2047 		}
2048 	}
2049 	PMAP_UNLOCK(pmap);
2050 	return (pa);
2051 }
2052 
2053 /*
2054  *	Routine:	pmap_extract_and_hold
2055  *	Function:
2056  *		Atomically extract and hold the physical page
2057  *		with the given pmap and virtual address pair
2058  *		if that mapping permits the given protection.
2059  */
2060 vm_page_t
pmap_extract_and_hold(pmap_t pmap,vm_offset_t va,vm_prot_t prot)2061 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
2062 {
2063 	pt_entry_t *pte, tpte;
2064 	vm_offset_t off;
2065 	vm_page_t m;
2066 	int lvl;
2067 	bool use;
2068 
2069 	m = NULL;
2070 	PMAP_LOCK(pmap);
2071 	pte = pmap_pte(pmap, va, &lvl);
2072 	if (pte != NULL) {
2073 		tpte = pmap_load(pte);
2074 
2075 		KASSERT(lvl > 0 && lvl <= 3,
2076 		    ("pmap_extract_and_hold: Invalid level %d", lvl));
2077 		/*
2078 		 * Check that the pte is either a L3 page, or a L1 or L2 block
2079 		 * entry. We can assume L1_BLOCK == L2_BLOCK.
2080 		 */
2081 		KASSERT((lvl == 3 && (tpte & ATTR_DESCR_MASK) == L3_PAGE) ||
2082 		    (lvl < 3 && (tpte & ATTR_DESCR_MASK) == L1_BLOCK),
2083 		    ("pmap_extract_and_hold: Invalid pte at L%d: %lx", lvl,
2084 		     tpte & ATTR_DESCR_MASK));
2085 
2086 		use = false;
2087 		if ((prot & VM_PROT_WRITE) == 0)
2088 			use = true;
2089 		else if (pmap->pm_stage == PM_STAGE1 &&
2090 		    (tpte & ATTR_S1_AP_RW_BIT) == ATTR_S1_AP(ATTR_S1_AP_RW))
2091 			use = true;
2092 		else if (pmap->pm_stage == PM_STAGE2 &&
2093 		    ((tpte & ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE)) ==
2094 		     ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE)))
2095 			use = true;
2096 
2097 		if (use) {
2098 			switch (lvl) {
2099 			case 1:
2100 				off = va & L1_OFFSET;
2101 				break;
2102 			case 2:
2103 				off = va & L2_OFFSET;
2104 				break;
2105 			case 3:
2106 			default:
2107 				off = 0;
2108 			}
2109 			m = PHYS_TO_VM_PAGE(PTE_TO_PHYS(tpte) | off);
2110 			if (m != NULL && !vm_page_wire_mapped(m))
2111 				m = NULL;
2112 		}
2113 	}
2114 	PMAP_UNLOCK(pmap);
2115 	return (m);
2116 }
2117 
2118 /*
2119  * Returns true if the entire kernel virtual address range is mapped
2120  */
2121 static bool
pmap_kmapped_range(vm_offset_t sva,vm_size_t size)2122 pmap_kmapped_range(vm_offset_t sva, vm_size_t size)
2123 {
2124 	pt_entry_t *pte, tpte;
2125 	vm_offset_t eva;
2126 
2127 	KASSERT(sva >= VM_MIN_KERNEL_ADDRESS,
2128 	    ("%s: Invalid virtual address: %lx", __func__, sva));
2129 	MPASS(size != 0);
2130 	eva = sva + size - 1;
2131 	KASSERT(eva > sva, ("%s: Size too large: sva %lx, size %lx", __func__,
2132 	    sva, size));
2133 
2134 	while (sva <= eva) {
2135 		pte = pmap_l1(kernel_pmap, sva);
2136 		if (pte == NULL)
2137 			return (false);
2138 		tpte = pmap_load(pte);
2139 		if (tpte == 0)
2140 			return (false);
2141 		if ((tpte & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_BLOCK) {
2142 			sva = (sva & ~L1_OFFSET) + L1_SIZE;
2143 			continue;
2144 		}
2145 
2146 		pte = pmap_l1_to_l2(&tpte, sva);
2147 		tpte = pmap_load(pte);
2148 		if (tpte == 0)
2149 			return (false);
2150 		if ((tpte & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_BLOCK) {
2151 			sva = (sva & ~L2_OFFSET) + L2_SIZE;
2152 			continue;
2153 		}
2154 		pte = pmap_l2_to_l3(&tpte, sva);
2155 		tpte = pmap_load(pte);
2156 		if (tpte == 0)
2157 			return (false);
2158 		MPASS((tpte & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_PAGE);
2159 		if ((tpte & ATTR_CONTIGUOUS) == ATTR_CONTIGUOUS)
2160 			sva = (sva & ~L3C_OFFSET) + L3C_SIZE;
2161 		else
2162 			sva = (sva & ~L3_OFFSET) + L3_SIZE;
2163 	}
2164 
2165 	return (true);
2166 }
2167 
2168 /*
2169  * Walks the page tables to translate a kernel virtual address to a
2170  * physical address. Returns true if the kva is valid and stores the
2171  * physical address in pa if it is not NULL.
2172  *
2173  * See the comment above data_abort() for the rationale for specifying
2174  * NO_PERTHREAD_SSP here.
2175  */
2176 bool NO_PERTHREAD_SSP
pmap_klookup(vm_offset_t va,vm_paddr_t * pa)2177 pmap_klookup(vm_offset_t va, vm_paddr_t *pa)
2178 {
2179 	pt_entry_t *pte, tpte;
2180 	register_t intr;
2181 	uint64_t par;
2182 
2183 	/*
2184 	 * Disable interrupts so we don't get interrupted between asking
2185 	 * for address translation, and getting the result back.
2186 	 */
2187 	intr = intr_disable();
2188 	par = arm64_address_translate_s1e1r(va);
2189 	intr_restore(intr);
2190 
2191 	if (PAR_SUCCESS(par)) {
2192 		if (pa != NULL)
2193 			*pa = (par & PAR_PA_MASK) | (va & PAR_LOW_MASK);
2194 		return (true);
2195 	}
2196 
2197 	/*
2198 	 * Fall back to walking the page table. The address translation
2199 	 * instruction may fail when the page is in a break-before-make
2200 	 * sequence. As we only clear the valid bit in said sequence we
2201 	 * can walk the page table to find the physical address.
2202 	 */
2203 
2204 	pte = pmap_l1(kernel_pmap, va);
2205 	if (pte == NULL)
2206 		return (false);
2207 
2208 	/*
2209 	 * A concurrent pmap_update_entry() will clear the entry's valid bit
2210 	 * but leave the rest of the entry unchanged.  Therefore, we treat a
2211 	 * non-zero entry as being valid, and we ignore the valid bit when
2212 	 * determining whether the entry maps a block, page, or table.
2213 	 */
2214 	tpte = pmap_load(pte);
2215 	if (tpte == 0)
2216 		return (false);
2217 	if ((tpte & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_BLOCK) {
2218 		if (pa != NULL)
2219 			*pa = PTE_TO_PHYS(tpte) | (va & L1_OFFSET);
2220 		return (true);
2221 	}
2222 	pte = pmap_l1_to_l2(&tpte, va);
2223 	tpte = pmap_load(pte);
2224 	if (tpte == 0)
2225 		return (false);
2226 	if ((tpte & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_BLOCK) {
2227 		if (pa != NULL)
2228 			*pa = PTE_TO_PHYS(tpte) | (va & L2_OFFSET);
2229 		return (true);
2230 	}
2231 	pte = pmap_l2_to_l3(&tpte, va);
2232 	tpte = pmap_load(pte);
2233 	if (tpte == 0)
2234 		return (false);
2235 	if (pa != NULL)
2236 		*pa = PTE_TO_PHYS(tpte) | (va & L3_OFFSET);
2237 	return (true);
2238 }
2239 
2240 /*
2241  *	Routine:	pmap_kextract
2242  *	Function:
2243  *		Extract the physical page address associated with the given kernel
2244  *		virtual address.
2245  */
2246 vm_paddr_t
pmap_kextract(vm_offset_t va)2247 pmap_kextract(vm_offset_t va)
2248 {
2249 	vm_paddr_t pa;
2250 
2251 	if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS)
2252 		return (DMAP_TO_PHYS(va));
2253 
2254 	if (pmap_klookup(va, &pa) == false)
2255 		return (0);
2256 	return (pa);
2257 }
2258 
2259 /***************************************************
2260  * Low level mapping routines.....
2261  ***************************************************/
2262 
2263 void
pmap_kenter(vm_offset_t sva,vm_size_t size,vm_paddr_t pa,int mode)2264 pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode)
2265 {
2266 	pd_entry_t *pde;
2267 	pt_entry_t attr, old_l3e, *pte;
2268 	vm_offset_t va;
2269 	vm_page_t mpte;
2270 	int error, lvl;
2271 
2272 	KASSERT((pa & L3_OFFSET) == 0,
2273 	    ("pmap_kenter: Invalid physical address"));
2274 	KASSERT((sva & L3_OFFSET) == 0,
2275 	    ("pmap_kenter: Invalid virtual address"));
2276 	KASSERT((size & PAGE_MASK) == 0,
2277 	    ("pmap_kenter: Mapping is not page-sized"));
2278 
2279 	attr = ATTR_AF | pmap_sh_attr | ATTR_S1_AP(ATTR_S1_AP_RW) |
2280 	    ATTR_S1_XN | ATTR_KERN_GP | ATTR_S1_IDX(mode);
2281 	old_l3e = 0;
2282 	va = sva;
2283 	while (size != 0) {
2284 		pde = pmap_pde(kernel_pmap, va, &lvl);
2285 		KASSERT(pde != NULL,
2286 		    ("pmap_kenter: Invalid page entry, va: 0x%lx", va));
2287 		KASSERT(lvl == 2, ("pmap_kenter: Invalid level %d", lvl));
2288 
2289 		/*
2290 		 * If we have an aligned, contiguous chunk of L2_SIZE, try
2291 		 * to create an L2_BLOCK mapping.
2292 		 */
2293 		if ((va & L2_OFFSET) == 0 && size >= L2_SIZE &&
2294 		    (pa & L2_OFFSET) == 0 && vm_initialized) {
2295 			mpte = PTE_TO_VM_PAGE(pmap_load(pde));
2296 			KASSERT(pmap_every_pte_zero(VM_PAGE_TO_PHYS(mpte)),
2297 			    ("pmap_kenter: Unexpected mapping"));
2298 			PMAP_LOCK(kernel_pmap);
2299 			error = pmap_insert_pt_page(kernel_pmap, mpte, false,
2300 			    false);
2301 			if (error == 0) {
2302 				attr &= ~ATTR_CONTIGUOUS;
2303 
2304 				/*
2305 				 * Although the page table page "mpte" should
2306 				 * be devoid of mappings, the TLB might hold
2307 				 * intermediate entries that reference it, so
2308 				 * we perform a single-page invalidation.
2309 				 */
2310 				pmap_update_entry(kernel_pmap, pde,
2311 				    PHYS_TO_PTE(pa) | attr | L2_BLOCK, va,
2312 				    PAGE_SIZE);
2313 			}
2314 			PMAP_UNLOCK(kernel_pmap);
2315 			if (error == 0) {
2316 				va += L2_SIZE;
2317 				pa += L2_SIZE;
2318 				size -= L2_SIZE;
2319 				continue;
2320 			}
2321 		}
2322 
2323 		/*
2324 		 * If we have an aligned, contiguous chunk of L3C_ENTRIES
2325 		 * L3 pages, set the contiguous bit within each PTE so that
2326 		 * the chunk can be cached using only one TLB entry.
2327 		 */
2328 		if ((va & L3C_OFFSET) == 0 && (pa & L3C_OFFSET) == 0) {
2329 			if (size >= L3C_SIZE)
2330 				attr |= ATTR_CONTIGUOUS;
2331 			else
2332 				attr &= ~ATTR_CONTIGUOUS;
2333 		}
2334 
2335 		pte = pmap_l2_to_l3(pde, va);
2336 		old_l3e |= pmap_load_store(pte, PHYS_TO_PTE(pa) | attr |
2337 		    L3_PAGE);
2338 
2339 		va += PAGE_SIZE;
2340 		pa += PAGE_SIZE;
2341 		size -= PAGE_SIZE;
2342 	}
2343 	if ((old_l3e & ATTR_DESCR_VALID) != 0)
2344 		pmap_s1_invalidate_range(kernel_pmap, sva, va, true);
2345 	else {
2346 		/*
2347 		 * Because the old entries were invalid and the new mappings
2348 		 * are not executable, an isb is not required.
2349 		 */
2350 		dsb(ishst);
2351 	}
2352 }
2353 
2354 void
pmap_kenter_device(vm_offset_t sva,vm_size_t size,vm_paddr_t pa)2355 pmap_kenter_device(vm_offset_t sva, vm_size_t size, vm_paddr_t pa)
2356 {
2357 
2358 	pmap_kenter(sva, size, pa, VM_MEMATTR_DEVICE);
2359 }
2360 
2361 /*
2362  * Remove a page from the kernel pagetables.
2363  */
2364 void
pmap_kremove(vm_offset_t va)2365 pmap_kremove(vm_offset_t va)
2366 {
2367 	pt_entry_t *pte;
2368 
2369 	pte = pmap_pte_exists(kernel_pmap, va, 3, __func__);
2370 	KASSERT((pmap_load(pte) & ATTR_CONTIGUOUS) == 0,
2371 	    ("pmap_kremove: unexpected ATTR_CONTIGUOUS"));
2372 	pmap_clear(pte);
2373 	pmap_s1_invalidate_page(kernel_pmap, va, true);
2374 }
2375 
2376 /*
2377  * Remove the specified range of mappings from the kernel address space.
2378  *
2379  * Should only be applied to mappings that were created by pmap_kenter() or
2380  * pmap_kenter_device().  Nothing about this function is actually specific
2381  * to device mappings.
2382  */
2383 void
pmap_kremove_device(vm_offset_t sva,vm_size_t size)2384 pmap_kremove_device(vm_offset_t sva, vm_size_t size)
2385 {
2386 	pt_entry_t *ptep, *ptep_end;
2387 	vm_offset_t va;
2388 	int lvl;
2389 
2390 	KASSERT((sva & L3_OFFSET) == 0,
2391 	    ("pmap_kremove_device: Invalid virtual address"));
2392 	KASSERT((size & PAGE_MASK) == 0,
2393 	    ("pmap_kremove_device: Mapping is not page-sized"));
2394 
2395 	va = sva;
2396 	while (size != 0) {
2397 		ptep = pmap_pte(kernel_pmap, va, &lvl);
2398 		KASSERT(ptep != NULL, ("Invalid page table, va: 0x%lx", va));
2399 		switch (lvl) {
2400 		case 2:
2401 			KASSERT((va & L2_OFFSET) == 0,
2402 			    ("Unaligned virtual address"));
2403 			KASSERT(size >= L2_SIZE, ("Insufficient size"));
2404 
2405 			if (va != sva) {
2406 				pmap_s1_invalidate_range(kernel_pmap, sva, va,
2407 				    true);
2408 			}
2409 			pmap_clear(ptep);
2410 			pmap_s1_invalidate_page(kernel_pmap, va, true);
2411 			PMAP_LOCK(kernel_pmap);
2412 			pmap_remove_kernel_l2(kernel_pmap, ptep, va);
2413 			PMAP_UNLOCK(kernel_pmap);
2414 
2415 			va += L2_SIZE;
2416 			sva = va;
2417 			size -= L2_SIZE;
2418 			break;
2419 		case 3:
2420 			if ((pmap_load(ptep) & ATTR_CONTIGUOUS) != 0) {
2421 				KASSERT((va & L3C_OFFSET) == 0,
2422 				    ("Unaligned L3C virtual address"));
2423 				KASSERT(size >= L3C_SIZE,
2424 				    ("Insufficient L3C size"));
2425 
2426 				ptep_end = ptep + L3C_ENTRIES;
2427 				for (; ptep < ptep_end; ptep++)
2428 					pmap_clear(ptep);
2429 
2430 				va += L3C_SIZE;
2431 				size -= L3C_SIZE;
2432 				break;
2433 			}
2434 			pmap_clear(ptep);
2435 
2436 			va += PAGE_SIZE;
2437 			size -= PAGE_SIZE;
2438 			break;
2439 		default:
2440 			__assert_unreachable();
2441 			break;
2442 		}
2443 	}
2444 	if (va != sva)
2445 		pmap_s1_invalidate_range(kernel_pmap, sva, va, true);
2446 }
2447 
2448 /*
2449  *	Used to map a range of physical addresses into kernel
2450  *	virtual address space.
2451  *
2452  *	The value passed in '*virt' is a suggested virtual address for
2453  *	the mapping. Architectures which can support a direct-mapped
2454  *	physical to virtual region can return the appropriate address
2455  *	within that region, leaving '*virt' unchanged. Other
2456  *	architectures should map the pages starting at '*virt' and
2457  *	update '*virt' with the first usable address after the mapped
2458  *	region.
2459  */
2460 vm_offset_t
pmap_map(vm_offset_t * virt,vm_paddr_t start,vm_paddr_t end,int prot)2461 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
2462 {
2463 	return PHYS_TO_DMAP(start);
2464 }
2465 
2466 /*
2467  * Add a list of wired pages to the kva
2468  * this routine is only used for temporary
2469  * kernel mappings that do not need to have
2470  * page modification or references recorded.
2471  * Note that old mappings are simply written
2472  * over.  The page *must* be wired.
2473  * Note: SMP coherent.  Uses a ranged shootdown IPI.
2474  */
2475 void
pmap_qenter(vm_offset_t sva,vm_page_t * ma,int count)2476 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
2477 {
2478 	pd_entry_t *pde;
2479 	pt_entry_t attr, old_l3e, *pte;
2480 	vm_offset_t va;
2481 	vm_page_t m;
2482 	int i, lvl;
2483 
2484 	old_l3e = 0;
2485 	va = sva;
2486 	for (i = 0; i < count; i++) {
2487 		pde = pmap_pde(kernel_pmap, va, &lvl);
2488 		KASSERT(pde != NULL,
2489 		    ("pmap_qenter: Invalid page entry, va: 0x%lx", va));
2490 		KASSERT(lvl == 2,
2491 		    ("pmap_qenter: Invalid level %d", lvl));
2492 
2493 		m = ma[i];
2494 		attr = ATTR_AF | pmap_sh_attr |
2495 		    ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_XN |
2496 		    ATTR_KERN_GP | ATTR_S1_IDX(m->md.pv_memattr) | L3_PAGE;
2497 		pte = pmap_l2_to_l3(pde, va);
2498 		old_l3e |= pmap_load_store(pte, VM_PAGE_TO_PTE(m) | attr);
2499 
2500 		va += L3_SIZE;
2501 	}
2502 	if ((old_l3e & ATTR_DESCR_VALID) != 0)
2503 		pmap_s1_invalidate_range(kernel_pmap, sva, va, true);
2504 	else {
2505 		/*
2506 		 * Because the old entries were invalid and the new mappings
2507 		 * are not executable, an isb is not required.
2508 		 */
2509 		dsb(ishst);
2510 	}
2511 }
2512 
2513 /*
2514  * This routine tears out page mappings from the
2515  * kernel -- it is meant only for temporary mappings.
2516  */
2517 void
pmap_qremove(vm_offset_t sva,int count)2518 pmap_qremove(vm_offset_t sva, int count)
2519 {
2520 	pt_entry_t *pte;
2521 	vm_offset_t va;
2522 
2523 	KASSERT(ADDR_IS_CANONICAL(sva),
2524 	    ("%s: Address not in canonical form: %lx", __func__, sva));
2525 	KASSERT(ADDR_IS_KERNEL(sva), ("usermode va %lx", sva));
2526 
2527 	va = sva;
2528 	while (count-- > 0) {
2529 		pte = pmap_pte_exists(kernel_pmap, va, 3, NULL);
2530 		if (pte != NULL) {
2531 			pmap_clear(pte);
2532 		}
2533 
2534 		va += PAGE_SIZE;
2535 	}
2536 	pmap_s1_invalidate_range(kernel_pmap, sva, va, true);
2537 }
2538 
2539 /***************************************************
2540  * Page table page management routines.....
2541  ***************************************************/
2542 /*
2543  * Schedule the specified unused page table page to be freed.  Specifically,
2544  * add the page to the specified list of pages that will be released to the
2545  * physical memory manager after the TLB has been updated.
2546  */
2547 static __inline void
pmap_add_delayed_free_list(vm_page_t m,struct spglist * free,bool set_PG_ZERO)2548 pmap_add_delayed_free_list(vm_page_t m, struct spglist *free, bool set_PG_ZERO)
2549 {
2550 
2551 	if (set_PG_ZERO)
2552 		m->flags |= PG_ZERO;
2553 	else
2554 		m->flags &= ~PG_ZERO;
2555 	SLIST_INSERT_HEAD(free, m, plinks.s.ss);
2556 }
2557 
2558 /*
2559  * Decrements a page table page's reference count, which is used to record the
2560  * number of valid page table entries within the page.  If the reference count
2561  * drops to zero, then the page table page is unmapped.  Returns true if the
2562  * page table page was unmapped and false otherwise.
2563  */
2564 static inline bool
pmap_unwire_l3(pmap_t pmap,vm_offset_t va,vm_page_t m,struct spglist * free)2565 pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
2566 {
2567 
2568 	--m->ref_count;
2569 	if (m->ref_count == 0) {
2570 		_pmap_unwire_l3(pmap, va, m, free);
2571 		return (true);
2572 	} else
2573 		return (false);
2574 }
2575 
2576 static void
_pmap_unwire_l3(pmap_t pmap,vm_offset_t va,vm_page_t m,struct spglist * free)2577 _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
2578 {
2579 
2580 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2581 	/*
2582 	 * unmap the page table page
2583 	 */
2584 	if (m->pindex >= (NUL2E + NUL1E)) {
2585 		/* l1 page */
2586 		pd_entry_t *l0;
2587 
2588 		l0 = pmap_l0(pmap, va);
2589 		pmap_clear(l0);
2590 	} else if (m->pindex >= NUL2E) {
2591 		/* l2 page */
2592 		pd_entry_t *l1;
2593 
2594 		l1 = pmap_l1(pmap, va);
2595 		pmap_clear(l1);
2596 	} else {
2597 		/* l3 page */
2598 		pd_entry_t *l2;
2599 
2600 		l2 = pmap_l2(pmap, va);
2601 		pmap_clear(l2);
2602 	}
2603 	pmap_resident_count_dec(pmap, 1);
2604 	if (m->pindex < NUL2E) {
2605 		/* We just released an l3, unhold the matching l2 */
2606 		pd_entry_t *l1, tl1;
2607 		vm_page_t l2pg;
2608 
2609 		l1 = pmap_l1(pmap, va);
2610 		tl1 = pmap_load(l1);
2611 		l2pg = PTE_TO_VM_PAGE(tl1);
2612 		pmap_unwire_l3(pmap, va, l2pg, free);
2613 	} else if (m->pindex < (NUL2E + NUL1E)) {
2614 		/* We just released an l2, unhold the matching l1 */
2615 		pd_entry_t *l0, tl0;
2616 		vm_page_t l1pg;
2617 
2618 		l0 = pmap_l0(pmap, va);
2619 		tl0 = pmap_load(l0);
2620 		l1pg = PTE_TO_VM_PAGE(tl0);
2621 		pmap_unwire_l3(pmap, va, l1pg, free);
2622 	}
2623 	pmap_invalidate_page(pmap, va, false);
2624 
2625 	/*
2626 	 * Put page on a list so that it is released after
2627 	 * *ALL* TLB shootdown is done
2628 	 */
2629 	pmap_add_delayed_free_list(m, free, true);
2630 }
2631 
2632 /*
2633  * After removing a page table entry, this routine is used to
2634  * conditionally free the page, and manage the reference count.
2635  */
2636 static int
pmap_unuse_pt(pmap_t pmap,vm_offset_t va,pd_entry_t ptepde,struct spglist * free)2637 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde,
2638     struct spglist *free)
2639 {
2640 	vm_page_t mpte;
2641 
2642 	KASSERT(ADDR_IS_CANONICAL(va),
2643 	    ("%s: Address not in canonical form: %lx", __func__, va));
2644 	if (ADDR_IS_KERNEL(va))
2645 		return (0);
2646 	KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0"));
2647 	mpte = PTE_TO_VM_PAGE(ptepde);
2648 	return (pmap_unwire_l3(pmap, va, mpte, free));
2649 }
2650 
2651 /*
2652  * Release a page table page reference after a failed attempt to create a
2653  * mapping.
2654  */
2655 static void
pmap_abort_ptp(pmap_t pmap,vm_offset_t va,vm_page_t mpte)2656 pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte)
2657 {
2658 	struct spglist free;
2659 
2660 	SLIST_INIT(&free);
2661 	if (pmap_unwire_l3(pmap, va, mpte, &free))
2662 		vm_page_free_pages_toq(&free, true);
2663 }
2664 
2665 void
pmap_pinit0(pmap_t pmap)2666 pmap_pinit0(pmap_t pmap)
2667 {
2668 
2669 	PMAP_LOCK_INIT(pmap);
2670 	bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
2671 	pmap->pm_l0_paddr = READ_SPECIALREG(ttbr0_el1);
2672 	pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(pmap->pm_l0_paddr);
2673 	TAILQ_INIT(&pmap->pm_pvchunk);
2674 	vm_radix_init(&pmap->pm_root);
2675 	pmap->pm_cookie = COOKIE_FROM(ASID_RESERVED_FOR_PID_0, INT_MIN);
2676 	pmap->pm_stage = PM_STAGE1;
2677 	pmap->pm_levels = 4;
2678 	pmap->pm_ttbr = pmap->pm_l0_paddr;
2679 	pmap->pm_asid_set = &asids;
2680 	pmap->pm_bti = NULL;
2681 
2682 	PCPU_SET(curpmap, pmap);
2683 }
2684 
2685 int
pmap_pinit_stage(pmap_t pmap,enum pmap_stage stage,int levels)2686 pmap_pinit_stage(pmap_t pmap, enum pmap_stage stage, int levels)
2687 {
2688 	vm_page_t m;
2689 
2690 	/*
2691 	 * allocate the l0 page
2692 	 */
2693 	m = vm_page_alloc_noobj(VM_ALLOC_WAITOK | VM_ALLOC_WIRED |
2694 	    VM_ALLOC_ZERO);
2695 	pmap->pm_l0_paddr = VM_PAGE_TO_PHYS(m);
2696 	pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(pmap->pm_l0_paddr);
2697 
2698 	TAILQ_INIT(&pmap->pm_pvchunk);
2699 	vm_radix_init(&pmap->pm_root);
2700 	bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
2701 	pmap->pm_cookie = COOKIE_FROM(-1, INT_MAX);
2702 
2703 	MPASS(levels == 3 || levels == 4);
2704 	pmap->pm_levels = levels;
2705 	pmap->pm_stage = stage;
2706 	pmap->pm_bti = NULL;
2707 	switch (stage) {
2708 	case PM_STAGE1:
2709 		pmap->pm_asid_set = &asids;
2710 		if (pmap_bti_support) {
2711 			pmap->pm_bti = malloc(sizeof(struct rangeset), M_DEVBUF,
2712 			    M_ZERO | M_WAITOK);
2713 			rangeset_init(pmap->pm_bti, bti_dup_range,
2714 			    bti_free_range, pmap, M_NOWAIT);
2715 		}
2716 		break;
2717 	case PM_STAGE2:
2718 		pmap->pm_asid_set = &vmids;
2719 		break;
2720 	default:
2721 		panic("%s: Invalid pmap type %d", __func__, stage);
2722 		break;
2723 	}
2724 
2725 	/* XXX Temporarily disable deferred ASID allocation. */
2726 	pmap_alloc_asid(pmap);
2727 
2728 	/*
2729 	 * Allocate the level 1 entry to use as the root. This will increase
2730 	 * the refcount on the level 1 page so it won't be removed until
2731 	 * pmap_release() is called.
2732 	 */
2733 	if (pmap->pm_levels == 3) {
2734 		PMAP_LOCK(pmap);
2735 		m = _pmap_alloc_l3(pmap, NUL2E + NUL1E, NULL);
2736 		PMAP_UNLOCK(pmap);
2737 	}
2738 	pmap->pm_ttbr = VM_PAGE_TO_PHYS(m);
2739 
2740 	return (1);
2741 }
2742 
2743 int
pmap_pinit(pmap_t pmap)2744 pmap_pinit(pmap_t pmap)
2745 {
2746 
2747 	return (pmap_pinit_stage(pmap, PM_STAGE1, 4));
2748 }
2749 
2750 /*
2751  * This routine is called if the desired page table page does not exist.
2752  *
2753  * If page table page allocation fails, this routine may sleep before
2754  * returning NULL.  It sleeps only if a lock pointer was given.
2755  *
2756  * Note: If a page allocation fails at page table level two or three,
2757  * one or two pages may be held during the wait, only to be released
2758  * afterwards.  This conservative approach is easily argued to avoid
2759  * race conditions.
2760  */
2761 static vm_page_t
_pmap_alloc_l3(pmap_t pmap,vm_pindex_t ptepindex,struct rwlock ** lockp)2762 _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
2763 {
2764 	vm_page_t m, l1pg, l2pg;
2765 
2766 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2767 
2768 	/*
2769 	 * Allocate a page table page.
2770 	 */
2771 	if ((m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
2772 		if (lockp != NULL) {
2773 			RELEASE_PV_LIST_LOCK(lockp);
2774 			PMAP_UNLOCK(pmap);
2775 			vm_wait(NULL);
2776 			PMAP_LOCK(pmap);
2777 		}
2778 
2779 		/*
2780 		 * Indicate the need to retry.  While waiting, the page table
2781 		 * page may have been allocated.
2782 		 */
2783 		return (NULL);
2784 	}
2785 	m->pindex = ptepindex;
2786 
2787 	/*
2788 	 * Because of AArch64's weak memory consistency model, we must have a
2789 	 * barrier here to ensure that the stores for zeroing "m", whether by
2790 	 * pmap_zero_page() or an earlier function, are visible before adding
2791 	 * "m" to the page table.  Otherwise, a page table walk by another
2792 	 * processor's MMU could see the mapping to "m" and a stale, non-zero
2793 	 * PTE within "m".
2794 	 */
2795 	dmb(ishst);
2796 
2797 	/*
2798 	 * Map the pagetable page into the process address space, if
2799 	 * it isn't already there.
2800 	 */
2801 
2802 	if (ptepindex >= (NUL2E + NUL1E)) {
2803 		pd_entry_t *l0p, l0e;
2804 		vm_pindex_t l0index;
2805 
2806 		l0index = ptepindex - (NUL2E + NUL1E);
2807 		l0p = &pmap->pm_l0[l0index];
2808 		KASSERT((pmap_load(l0p) & ATTR_DESCR_VALID) == 0,
2809 		    ("%s: L0 entry %#lx is valid", __func__, pmap_load(l0p)));
2810 		l0e = VM_PAGE_TO_PTE(m) | L0_TABLE;
2811 
2812 		/*
2813 		 * Mark all kernel memory as not accessible from userspace
2814 		 * and userspace memory as not executable from the kernel.
2815 		 * This has been done for the bootstrap L0 entries in
2816 		 * locore.S.
2817 		 */
2818 		if (pmap == kernel_pmap)
2819 			l0e |= TATTR_UXN_TABLE | TATTR_AP_TABLE_NO_EL0;
2820 		else
2821 			l0e |= TATTR_PXN_TABLE;
2822 		pmap_store(l0p, l0e);
2823 	} else if (ptepindex >= NUL2E) {
2824 		vm_pindex_t l0index, l1index;
2825 		pd_entry_t *l0, *l1;
2826 		pd_entry_t tl0;
2827 
2828 		l1index = ptepindex - NUL2E;
2829 		l0index = l1index >> Ln_ENTRIES_SHIFT;
2830 
2831 		l0 = &pmap->pm_l0[l0index];
2832 		tl0 = pmap_load(l0);
2833 		if (tl0 == 0) {
2834 			/* recurse for allocating page dir */
2835 			if (_pmap_alloc_l3(pmap, NUL2E + NUL1E + l0index,
2836 			    lockp) == NULL) {
2837 				vm_page_unwire_noq(m);
2838 				vm_page_free_zero(m);
2839 				return (NULL);
2840 			}
2841 		} else {
2842 			l1pg = PTE_TO_VM_PAGE(tl0);
2843 			l1pg->ref_count++;
2844 		}
2845 
2846 		l1 = (pd_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(pmap_load(l0)));
2847 		l1 = &l1[ptepindex & Ln_ADDR_MASK];
2848 		KASSERT((pmap_load(l1) & ATTR_DESCR_VALID) == 0,
2849 		    ("%s: L1 entry %#lx is valid", __func__, pmap_load(l1)));
2850 		pmap_store(l1, VM_PAGE_TO_PTE(m) | L1_TABLE);
2851 	} else {
2852 		vm_pindex_t l0index, l1index;
2853 		pd_entry_t *l0, *l1, *l2;
2854 		pd_entry_t tl0, tl1;
2855 
2856 		l1index = ptepindex >> Ln_ENTRIES_SHIFT;
2857 		l0index = l1index >> Ln_ENTRIES_SHIFT;
2858 
2859 		l0 = &pmap->pm_l0[l0index];
2860 		tl0 = pmap_load(l0);
2861 		if (tl0 == 0) {
2862 			/* recurse for allocating page dir */
2863 			if (_pmap_alloc_l3(pmap, NUL2E + l1index,
2864 			    lockp) == NULL) {
2865 				vm_page_unwire_noq(m);
2866 				vm_page_free_zero(m);
2867 				return (NULL);
2868 			}
2869 			tl0 = pmap_load(l0);
2870 			l1 = (pd_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(tl0));
2871 			l1 = &l1[l1index & Ln_ADDR_MASK];
2872 		} else {
2873 			l1 = (pd_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(tl0));
2874 			l1 = &l1[l1index & Ln_ADDR_MASK];
2875 			tl1 = pmap_load(l1);
2876 			if (tl1 == 0) {
2877 				/* recurse for allocating page dir */
2878 				if (_pmap_alloc_l3(pmap, NUL2E + l1index,
2879 				    lockp) == NULL) {
2880 					vm_page_unwire_noq(m);
2881 					vm_page_free_zero(m);
2882 					return (NULL);
2883 				}
2884 			} else {
2885 				l2pg = PTE_TO_VM_PAGE(tl1);
2886 				l2pg->ref_count++;
2887 			}
2888 		}
2889 
2890 		l2 = (pd_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(pmap_load(l1)));
2891 		l2 = &l2[ptepindex & Ln_ADDR_MASK];
2892 		KASSERT((pmap_load(l2) & ATTR_DESCR_VALID) == 0,
2893 		    ("%s: L2 entry %#lx is valid", __func__, pmap_load(l2)));
2894 		pmap_store(l2, VM_PAGE_TO_PTE(m) | L2_TABLE);
2895 	}
2896 
2897 	pmap_resident_count_inc(pmap, 1);
2898 
2899 	return (m);
2900 }
2901 
2902 static pd_entry_t *
pmap_alloc_l2(pmap_t pmap,vm_offset_t va,vm_page_t * l2pgp,struct rwlock ** lockp)2903 pmap_alloc_l2(pmap_t pmap, vm_offset_t va, vm_page_t *l2pgp,
2904     struct rwlock **lockp)
2905 {
2906 	pd_entry_t *l1, *l2;
2907 	vm_page_t l2pg;
2908 	vm_pindex_t l2pindex;
2909 
2910 	KASSERT(ADDR_IS_CANONICAL(va),
2911 	    ("%s: Address not in canonical form: %lx", __func__, va));
2912 
2913 retry:
2914 	l1 = pmap_l1(pmap, va);
2915 	if (l1 != NULL && (pmap_load(l1) & ATTR_DESCR_MASK) == L1_TABLE) {
2916 		l2 = pmap_l1_to_l2(l1, va);
2917 		if (!ADDR_IS_KERNEL(va)) {
2918 			/* Add a reference to the L2 page. */
2919 			l2pg = PTE_TO_VM_PAGE(pmap_load(l1));
2920 			l2pg->ref_count++;
2921 		} else
2922 			l2pg = NULL;
2923 	} else if (!ADDR_IS_KERNEL(va)) {
2924 		/* Allocate a L2 page. */
2925 		l2pindex = pmap_l2_pindex(va) >> Ln_ENTRIES_SHIFT;
2926 		l2pg = _pmap_alloc_l3(pmap, NUL2E + l2pindex, lockp);
2927 		if (l2pg == NULL) {
2928 			if (lockp != NULL)
2929 				goto retry;
2930 			else
2931 				return (NULL);
2932 		}
2933 		l2 = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(l2pg));
2934 		l2 = &l2[pmap_l2_index(va)];
2935 	} else
2936 		panic("pmap_alloc_l2: missing page table page for va %#lx",
2937 		    va);
2938 	*l2pgp = l2pg;
2939 	return (l2);
2940 }
2941 
2942 static vm_page_t
pmap_alloc_l3(pmap_t pmap,vm_offset_t va,struct rwlock ** lockp)2943 pmap_alloc_l3(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
2944 {
2945 	vm_pindex_t ptepindex;
2946 	pd_entry_t *pde, tpde;
2947 #ifdef INVARIANTS
2948 	pt_entry_t *pte;
2949 #endif
2950 	vm_page_t m;
2951 	int lvl;
2952 
2953 	/*
2954 	 * Calculate pagetable page index
2955 	 */
2956 	ptepindex = pmap_l2_pindex(va);
2957 retry:
2958 	/*
2959 	 * Get the page directory entry
2960 	 */
2961 	pde = pmap_pde(pmap, va, &lvl);
2962 
2963 	/*
2964 	 * If the page table page is mapped, we just increment the hold count,
2965 	 * and activate it. If we get a level 2 pde it will point to a level 3
2966 	 * table.
2967 	 */
2968 	switch (lvl) {
2969 	case -1:
2970 		break;
2971 	case 0:
2972 #ifdef INVARIANTS
2973 		pte = pmap_l0_to_l1(pde, va);
2974 		KASSERT(pmap_load(pte) == 0,
2975 		    ("pmap_alloc_l3: TODO: l0 superpages"));
2976 #endif
2977 		break;
2978 	case 1:
2979 #ifdef INVARIANTS
2980 		pte = pmap_l1_to_l2(pde, va);
2981 		KASSERT(pmap_load(pte) == 0,
2982 		    ("pmap_alloc_l3: TODO: l1 superpages"));
2983 #endif
2984 		break;
2985 	case 2:
2986 		tpde = pmap_load(pde);
2987 		if (tpde != 0) {
2988 			m = PTE_TO_VM_PAGE(tpde);
2989 			m->ref_count++;
2990 			return (m);
2991 		}
2992 		break;
2993 	default:
2994 		panic("pmap_alloc_l3: Invalid level %d", lvl);
2995 	}
2996 
2997 	/*
2998 	 * Here if the pte page isn't mapped, or if it has been deallocated.
2999 	 */
3000 	m = _pmap_alloc_l3(pmap, ptepindex, lockp);
3001 	if (m == NULL && lockp != NULL)
3002 		goto retry;
3003 
3004 	return (m);
3005 }
3006 
3007 /***************************************************
3008  * Pmap allocation/deallocation routines.
3009  ***************************************************/
3010 
3011 /*
3012  * Release any resources held by the given physical map.
3013  * Called when a pmap initialized by pmap_pinit is being released.
3014  * Should only be called if the map contains no valid mappings.
3015  */
3016 void
pmap_release(pmap_t pmap)3017 pmap_release(pmap_t pmap)
3018 {
3019 	bool rv __diagused;
3020 	struct spglist freelist;
3021 	struct asid_set *set;
3022 	vm_page_t m;
3023 	int asid;
3024 
3025 	if (pmap->pm_levels != 4) {
3026 		PMAP_ASSERT_STAGE2(pmap);
3027 		KASSERT(pmap->pm_stats.resident_count == 1,
3028 		    ("pmap_release: pmap resident count %ld != 0",
3029 		    pmap->pm_stats.resident_count));
3030 		KASSERT((pmap->pm_l0[0] & ATTR_DESCR_VALID) == ATTR_DESCR_VALID,
3031 		    ("pmap_release: Invalid l0 entry: %lx", pmap->pm_l0[0]));
3032 
3033 		SLIST_INIT(&freelist);
3034 		m = PHYS_TO_VM_PAGE(pmap->pm_ttbr);
3035 		PMAP_LOCK(pmap);
3036 		rv = pmap_unwire_l3(pmap, 0, m, &freelist);
3037 		PMAP_UNLOCK(pmap);
3038 		MPASS(rv == true);
3039 		vm_page_free_pages_toq(&freelist, true);
3040 	}
3041 
3042 	KASSERT(pmap->pm_stats.resident_count == 0,
3043 	    ("pmap_release: pmap resident count %ld != 0",
3044 	    pmap->pm_stats.resident_count));
3045 	KASSERT(vm_radix_is_empty(&pmap->pm_root),
3046 	    ("pmap_release: pmap has reserved page table page(s)"));
3047 
3048 	set = pmap->pm_asid_set;
3049 	KASSERT(set != NULL, ("%s: NULL asid set", __func__));
3050 
3051 	/*
3052 	 * Allow the ASID to be reused. In stage 2 VMIDs we don't invalidate
3053 	 * the entries when removing them so rely on a later tlb invalidation.
3054 	 * this will happen when updating the VMID generation. Because of this
3055 	 * we don't reuse VMIDs within a generation.
3056 	 */
3057 	if (pmap->pm_stage == PM_STAGE1) {
3058 		mtx_lock_spin(&set->asid_set_mutex);
3059 		if (COOKIE_TO_EPOCH(pmap->pm_cookie) == set->asid_epoch) {
3060 			asid = COOKIE_TO_ASID(pmap->pm_cookie);
3061 			KASSERT(asid >= ASID_FIRST_AVAILABLE &&
3062 			    asid < set->asid_set_size,
3063 			    ("pmap_release: pmap cookie has out-of-range asid"));
3064 			bit_clear(set->asid_set, asid);
3065 		}
3066 		mtx_unlock_spin(&set->asid_set_mutex);
3067 
3068 		if (pmap->pm_bti != NULL) {
3069 			rangeset_fini(pmap->pm_bti);
3070 			free(pmap->pm_bti, M_DEVBUF);
3071 		}
3072 	}
3073 
3074 	m = PHYS_TO_VM_PAGE(pmap->pm_l0_paddr);
3075 	vm_page_unwire_noq(m);
3076 	vm_page_free_zero(m);
3077 }
3078 
3079 static int
kvm_size(SYSCTL_HANDLER_ARGS)3080 kvm_size(SYSCTL_HANDLER_ARGS)
3081 {
3082 	unsigned long ksize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
3083 
3084 	return sysctl_handle_long(oidp, &ksize, 0, req);
3085 }
3086 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
3087     0, 0, kvm_size, "LU",
3088     "Size of KVM");
3089 
3090 static int
kvm_free(SYSCTL_HANDLER_ARGS)3091 kvm_free(SYSCTL_HANDLER_ARGS)
3092 {
3093 	unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
3094 
3095 	return sysctl_handle_long(oidp, &kfree, 0, req);
3096 }
3097 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
3098     0, 0, kvm_free, "LU",
3099     "Amount of KVM free");
3100 
3101 /*
3102  * grow the number of kernel page table entries, if needed
3103  */
3104 static int
pmap_growkernel_nopanic(vm_offset_t addr)3105 pmap_growkernel_nopanic(vm_offset_t addr)
3106 {
3107 	vm_page_t nkpg;
3108 	pd_entry_t *l0, *l1, *l2;
3109 
3110 	mtx_assert(&kernel_map->system_mtx, MA_OWNED);
3111 
3112 	addr = roundup2(addr, L2_SIZE);
3113 	if (addr - 1 >= vm_map_max(kernel_map))
3114 		addr = vm_map_max(kernel_map);
3115 	if (kernel_vm_end < addr) {
3116 		kasan_shadow_map(kernel_vm_end, addr - kernel_vm_end);
3117 		kmsan_shadow_map(kernel_vm_end, addr - kernel_vm_end);
3118 	}
3119 	while (kernel_vm_end < addr) {
3120 		l0 = pmap_l0(kernel_pmap, kernel_vm_end);
3121 		KASSERT(pmap_load(l0) != 0,
3122 		    ("pmap_growkernel: No level 0 kernel entry"));
3123 
3124 		l1 = pmap_l0_to_l1(l0, kernel_vm_end);
3125 		if (pmap_load(l1) == 0) {
3126 			/* We need a new PDP entry */
3127 			nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT |
3128 			    VM_ALLOC_NOFREE | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
3129 			if (nkpg == NULL)
3130 				return (KERN_RESOURCE_SHORTAGE);
3131 			nkpg->pindex = pmap_l1_pindex(kernel_vm_end);
3132 			/* See the dmb() in _pmap_alloc_l3(). */
3133 			dmb(ishst);
3134 			pmap_store(l1, VM_PAGE_TO_PTE(nkpg) | L1_TABLE);
3135 			continue; /* try again */
3136 		}
3137 		l2 = pmap_l1_to_l2(l1, kernel_vm_end);
3138 		if (pmap_load(l2) != 0) {
3139 			kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
3140 			if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
3141 				kernel_vm_end = vm_map_max(kernel_map);
3142 				break;
3143 			}
3144 			continue;
3145 		}
3146 
3147 		nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT |
3148 		    VM_ALLOC_NOFREE | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
3149 		if (nkpg == NULL)
3150 			return (KERN_RESOURCE_SHORTAGE);
3151 		nkpg->pindex = pmap_l2_pindex(kernel_vm_end);
3152 		/* See the dmb() in _pmap_alloc_l3(). */
3153 		dmb(ishst);
3154 		pmap_store(l2, VM_PAGE_TO_PTE(nkpg) | L2_TABLE);
3155 
3156 		kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
3157 		if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
3158 			kernel_vm_end = vm_map_max(kernel_map);
3159 			break;
3160 		}
3161 	}
3162 	return (KERN_SUCCESS);
3163 }
3164 
3165 int
pmap_growkernel(vm_offset_t addr)3166 pmap_growkernel(vm_offset_t addr)
3167 {
3168 	int rv;
3169 
3170 	rv = pmap_growkernel_nopanic(addr);
3171 	if (rv != KERN_SUCCESS && pmap_growkernel_panic)
3172 		panic("pmap_growkernel: no memory to grow kernel");
3173 	return (rv);
3174 }
3175 
3176 /***************************************************
3177  * page management routines.
3178  ***************************************************/
3179 
3180 static const uint64_t pc_freemask[_NPCM] = {
3181 	[0 ... _NPCM - 2] = PC_FREEN,
3182 	[_NPCM - 1] = PC_FREEL
3183 };
3184 
3185 #ifdef PV_STATS
3186 static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
3187 
3188 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
3189 	"Current number of pv entry chunks");
3190 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
3191 	"Current number of pv entry chunks allocated");
3192 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
3193 	"Current number of pv entry chunks frees");
3194 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
3195 	"Number of times tried to get a chunk page but failed.");
3196 
3197 static long pv_entry_frees, pv_entry_allocs, pv_entry_count;
3198 static int pv_entry_spare;
3199 
3200 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
3201 	"Current number of pv entry frees");
3202 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
3203 	"Current number of pv entry allocs");
3204 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
3205 	"Current number of pv entries");
3206 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
3207 	"Current number of spare pv entries");
3208 #endif
3209 
3210 /*
3211  * We are in a serious low memory condition.  Resort to
3212  * drastic measures to free some pages so we can allocate
3213  * another pv entry chunk.
3214  *
3215  * Returns NULL if PV entries were reclaimed from the specified pmap.
3216  *
3217  * We do not, however, unmap 2mpages because subsequent accesses will
3218  * allocate per-page pv entries until repromotion occurs, thereby
3219  * exacerbating the shortage of free pv entries.
3220  */
3221 static vm_page_t
reclaim_pv_chunk_domain(pmap_t locked_pmap,struct rwlock ** lockp,int domain)3222 reclaim_pv_chunk_domain(pmap_t locked_pmap, struct rwlock **lockp, int domain)
3223 {
3224 	struct pv_chunks_list *pvc;
3225 	struct pv_chunk *pc, *pc_marker, *pc_marker_end;
3226 	struct pv_chunk_header pc_marker_b, pc_marker_end_b;
3227 	struct md_page *pvh;
3228 	pd_entry_t *pde;
3229 	pmap_t next_pmap, pmap;
3230 	pt_entry_t *pte, tpte;
3231 	pv_entry_t pv;
3232 	vm_offset_t va;
3233 	vm_page_t m, m_pc;
3234 	struct spglist free;
3235 	uint64_t inuse;
3236 	int bit, field, freed, lvl;
3237 
3238 	PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
3239 	KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL"));
3240 
3241 	pmap = NULL;
3242 	m_pc = NULL;
3243 	SLIST_INIT(&free);
3244 	bzero(&pc_marker_b, sizeof(pc_marker_b));
3245 	bzero(&pc_marker_end_b, sizeof(pc_marker_end_b));
3246 	pc_marker = (struct pv_chunk *)&pc_marker_b;
3247 	pc_marker_end = (struct pv_chunk *)&pc_marker_end_b;
3248 
3249 	pvc = &pv_chunks[domain];
3250 	mtx_lock(&pvc->pvc_lock);
3251 	pvc->active_reclaims++;
3252 	TAILQ_INSERT_HEAD(&pvc->pvc_list, pc_marker, pc_lru);
3253 	TAILQ_INSERT_TAIL(&pvc->pvc_list, pc_marker_end, pc_lru);
3254 	while ((pc = TAILQ_NEXT(pc_marker, pc_lru)) != pc_marker_end &&
3255 	    SLIST_EMPTY(&free)) {
3256 		next_pmap = pc->pc_pmap;
3257 		if (next_pmap == NULL) {
3258 			/*
3259 			 * The next chunk is a marker.  However, it is
3260 			 * not our marker, so active_reclaims must be
3261 			 * > 1.  Consequently, the next_chunk code
3262 			 * will not rotate the pv_chunks list.
3263 			 */
3264 			goto next_chunk;
3265 		}
3266 		mtx_unlock(&pvc->pvc_lock);
3267 
3268 		/*
3269 		 * A pv_chunk can only be removed from the pc_lru list
3270 		 * when both pvc->pvc_lock is owned and the
3271 		 * corresponding pmap is locked.
3272 		 */
3273 		if (pmap != next_pmap) {
3274 			if (pmap != NULL && pmap != locked_pmap)
3275 				PMAP_UNLOCK(pmap);
3276 			pmap = next_pmap;
3277 			/* Avoid deadlock and lock recursion. */
3278 			if (pmap > locked_pmap) {
3279 				RELEASE_PV_LIST_LOCK(lockp);
3280 				PMAP_LOCK(pmap);
3281 				mtx_lock(&pvc->pvc_lock);
3282 				continue;
3283 			} else if (pmap != locked_pmap) {
3284 				if (PMAP_TRYLOCK(pmap)) {
3285 					mtx_lock(&pvc->pvc_lock);
3286 					continue;
3287 				} else {
3288 					pmap = NULL; /* pmap is not locked */
3289 					mtx_lock(&pvc->pvc_lock);
3290 					pc = TAILQ_NEXT(pc_marker, pc_lru);
3291 					if (pc == NULL ||
3292 					    pc->pc_pmap != next_pmap)
3293 						continue;
3294 					goto next_chunk;
3295 				}
3296 			}
3297 		}
3298 
3299 		/*
3300 		 * Destroy every non-wired, 4 KB page mapping in the chunk.
3301 		 */
3302 		freed = 0;
3303 		for (field = 0; field < _NPCM; field++) {
3304 			for (inuse = ~pc->pc_map[field] & pc_freemask[field];
3305 			    inuse != 0; inuse &= ~(1UL << bit)) {
3306 				bit = ffsl(inuse) - 1;
3307 				pv = &pc->pc_pventry[field * 64 + bit];
3308 				va = pv->pv_va;
3309 				pde = pmap_pde(pmap, va, &lvl);
3310 				if (lvl != 2)
3311 					continue;
3312 				pte = pmap_l2_to_l3(pde, va);
3313 				tpte = pmap_load(pte);
3314 				if ((tpte & ATTR_SW_WIRED) != 0)
3315 					continue;
3316 				if ((tpte & ATTR_CONTIGUOUS) != 0)
3317 					(void)pmap_demote_l3c(pmap, pte, va);
3318 				tpte = pmap_load_clear(pte);
3319 				m = PTE_TO_VM_PAGE(tpte);
3320 				if (pmap_pte_dirty(pmap, tpte))
3321 					vm_page_dirty(m);
3322 				if ((tpte & ATTR_AF) != 0) {
3323 					pmap_s1_invalidate_page(pmap, va, true);
3324 					vm_page_aflag_set(m, PGA_REFERENCED);
3325 				}
3326 				CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
3327 				TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
3328 				m->md.pv_gen++;
3329 				if (TAILQ_EMPTY(&m->md.pv_list) &&
3330 				    (m->flags & PG_FICTITIOUS) == 0) {
3331 					pvh = page_to_pvh(m);
3332 					if (TAILQ_EMPTY(&pvh->pv_list)) {
3333 						vm_page_aflag_clear(m,
3334 						    PGA_WRITEABLE);
3335 					}
3336 				}
3337 				pc->pc_map[field] |= 1UL << bit;
3338 				pmap_unuse_pt(pmap, va, pmap_load(pde), &free);
3339 				freed++;
3340 			}
3341 		}
3342 		if (freed == 0) {
3343 			mtx_lock(&pvc->pvc_lock);
3344 			goto next_chunk;
3345 		}
3346 		/* Every freed mapping is for a 4 KB page. */
3347 		pmap_resident_count_dec(pmap, freed);
3348 		PV_STAT(atomic_add_long(&pv_entry_frees, freed));
3349 		PV_STAT(atomic_add_int(&pv_entry_spare, freed));
3350 		PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
3351 		TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
3352 		if (pc_is_free(pc)) {
3353 			PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
3354 			PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
3355 			PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
3356 			/* Entire chunk is free; return it. */
3357 			m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
3358 			dump_drop_page(m_pc->phys_addr);
3359 			mtx_lock(&pvc->pvc_lock);
3360 			TAILQ_REMOVE(&pvc->pvc_list, pc, pc_lru);
3361 			break;
3362 		}
3363 		TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
3364 		mtx_lock(&pvc->pvc_lock);
3365 		/* One freed pv entry in locked_pmap is sufficient. */
3366 		if (pmap == locked_pmap)
3367 			break;
3368 
3369 next_chunk:
3370 		TAILQ_REMOVE(&pvc->pvc_list, pc_marker, pc_lru);
3371 		TAILQ_INSERT_AFTER(&pvc->pvc_list, pc, pc_marker, pc_lru);
3372 		if (pvc->active_reclaims == 1 && pmap != NULL) {
3373 			/*
3374 			 * Rotate the pv chunks list so that we do not
3375 			 * scan the same pv chunks that could not be
3376 			 * freed (because they contained a wired
3377 			 * and/or superpage mapping) on every
3378 			 * invocation of reclaim_pv_chunk().
3379 			 */
3380 			while ((pc = TAILQ_FIRST(&pvc->pvc_list)) != pc_marker){
3381 				MPASS(pc->pc_pmap != NULL);
3382 				TAILQ_REMOVE(&pvc->pvc_list, pc, pc_lru);
3383 				TAILQ_INSERT_TAIL(&pvc->pvc_list, pc, pc_lru);
3384 			}
3385 		}
3386 	}
3387 	TAILQ_REMOVE(&pvc->pvc_list, pc_marker, pc_lru);
3388 	TAILQ_REMOVE(&pvc->pvc_list, pc_marker_end, pc_lru);
3389 	pvc->active_reclaims--;
3390 	mtx_unlock(&pvc->pvc_lock);
3391 	if (pmap != NULL && pmap != locked_pmap)
3392 		PMAP_UNLOCK(pmap);
3393 	if (m_pc == NULL && !SLIST_EMPTY(&free)) {
3394 		m_pc = SLIST_FIRST(&free);
3395 		SLIST_REMOVE_HEAD(&free, plinks.s.ss);
3396 		/* Recycle a freed page table page. */
3397 		m_pc->ref_count = 1;
3398 	}
3399 	vm_page_free_pages_toq(&free, true);
3400 	return (m_pc);
3401 }
3402 
3403 static vm_page_t
reclaim_pv_chunk(pmap_t locked_pmap,struct rwlock ** lockp)3404 reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
3405 {
3406 	vm_page_t m;
3407 	int i, domain;
3408 
3409 	domain = PCPU_GET(domain);
3410 	for (i = 0; i < vm_ndomains; i++) {
3411 		m = reclaim_pv_chunk_domain(locked_pmap, lockp, domain);
3412 		if (m != NULL)
3413 			break;
3414 		domain = (domain + 1) % vm_ndomains;
3415 	}
3416 
3417 	return (m);
3418 }
3419 
3420 /*
3421  * free the pv_entry back to the free list
3422  */
3423 static void
free_pv_entry(pmap_t pmap,pv_entry_t pv)3424 free_pv_entry(pmap_t pmap, pv_entry_t pv)
3425 {
3426 	struct pv_chunk *pc;
3427 	int idx, field, bit;
3428 
3429 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3430 	PV_STAT(atomic_add_long(&pv_entry_frees, 1));
3431 	PV_STAT(atomic_add_int(&pv_entry_spare, 1));
3432 	PV_STAT(atomic_subtract_long(&pv_entry_count, 1));
3433 	pc = pv_to_chunk(pv);
3434 	idx = pv - &pc->pc_pventry[0];
3435 	field = idx / 64;
3436 	bit = idx % 64;
3437 	pc->pc_map[field] |= 1ul << bit;
3438 	if (!pc_is_free(pc)) {
3439 		/* 98% of the time, pc is already at the head of the list. */
3440 		if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) {
3441 			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
3442 			TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
3443 		}
3444 		return;
3445 	}
3446 	TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
3447 	free_pv_chunk(pc);
3448 }
3449 
3450 static void
free_pv_chunk_dequeued(struct pv_chunk * pc)3451 free_pv_chunk_dequeued(struct pv_chunk *pc)
3452 {
3453 	vm_page_t m;
3454 
3455 	PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
3456 	PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
3457 	PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
3458 	/* entire chunk is free, return it */
3459 	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
3460 	dump_drop_page(m->phys_addr);
3461 	vm_page_unwire_noq(m);
3462 	vm_page_free(m);
3463 }
3464 
3465 static void
free_pv_chunk(struct pv_chunk * pc)3466 free_pv_chunk(struct pv_chunk *pc)
3467 {
3468 	struct pv_chunks_list *pvc;
3469 
3470 	pvc = &pv_chunks[pc_to_domain(pc)];
3471 	mtx_lock(&pvc->pvc_lock);
3472 	TAILQ_REMOVE(&pvc->pvc_list, pc, pc_lru);
3473 	mtx_unlock(&pvc->pvc_lock);
3474 	free_pv_chunk_dequeued(pc);
3475 }
3476 
3477 static void
free_pv_chunk_batch(struct pv_chunklist * batch)3478 free_pv_chunk_batch(struct pv_chunklist *batch)
3479 {
3480 	struct pv_chunks_list *pvc;
3481 	struct pv_chunk *pc, *npc;
3482 	int i;
3483 
3484 	for (i = 0; i < vm_ndomains; i++) {
3485 		if (TAILQ_EMPTY(&batch[i]))
3486 			continue;
3487 		pvc = &pv_chunks[i];
3488 		mtx_lock(&pvc->pvc_lock);
3489 		TAILQ_FOREACH(pc, &batch[i], pc_list) {
3490 			TAILQ_REMOVE(&pvc->pvc_list, pc, pc_lru);
3491 		}
3492 		mtx_unlock(&pvc->pvc_lock);
3493 	}
3494 
3495 	for (i = 0; i < vm_ndomains; i++) {
3496 		TAILQ_FOREACH_SAFE(pc, &batch[i], pc_list, npc) {
3497 			free_pv_chunk_dequeued(pc);
3498 		}
3499 	}
3500 }
3501 
3502 /*
3503  * Returns a new PV entry, allocating a new PV chunk from the system when
3504  * needed.  If this PV chunk allocation fails and a PV list lock pointer was
3505  * given, a PV chunk is reclaimed from an arbitrary pmap.  Otherwise, NULL is
3506  * returned.
3507  *
3508  * The given PV list lock may be released.
3509  */
3510 static pv_entry_t
get_pv_entry(pmap_t pmap,struct rwlock ** lockp)3511 get_pv_entry(pmap_t pmap, struct rwlock **lockp)
3512 {
3513 	struct pv_chunks_list *pvc;
3514 	int bit, field;
3515 	pv_entry_t pv;
3516 	struct pv_chunk *pc;
3517 	vm_page_t m;
3518 
3519 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3520 	PV_STAT(atomic_add_long(&pv_entry_allocs, 1));
3521 retry:
3522 	pc = TAILQ_FIRST(&pmap->pm_pvchunk);
3523 	if (pc != NULL) {
3524 		for (field = 0; field < _NPCM; field++) {
3525 			if (pc->pc_map[field]) {
3526 				bit = ffsl(pc->pc_map[field]) - 1;
3527 				break;
3528 			}
3529 		}
3530 		if (field < _NPCM) {
3531 			pv = &pc->pc_pventry[field * 64 + bit];
3532 			pc->pc_map[field] &= ~(1ul << bit);
3533 			/* If this was the last item, move it to tail */
3534 			if (pc_is_full(pc)) {
3535 				TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
3536 				TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc,
3537 				    pc_list);
3538 			}
3539 			PV_STAT(atomic_add_long(&pv_entry_count, 1));
3540 			PV_STAT(atomic_subtract_int(&pv_entry_spare, 1));
3541 			return (pv);
3542 		}
3543 	}
3544 	/* No free items, allocate another chunk */
3545 	m = vm_page_alloc_noobj(VM_ALLOC_WIRED);
3546 	if (m == NULL) {
3547 		if (lockp == NULL) {
3548 			PV_STAT(pc_chunk_tryfail++);
3549 			return (NULL);
3550 		}
3551 		m = reclaim_pv_chunk(pmap, lockp);
3552 		if (m == NULL)
3553 			goto retry;
3554 	}
3555 	PV_STAT(atomic_add_int(&pc_chunk_count, 1));
3556 	PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
3557 	dump_add_page(m->phys_addr);
3558 	pc = (void *)PHYS_TO_DMAP(m->phys_addr);
3559 	pc->pc_pmap = pmap;
3560 	memcpy(pc->pc_map, pc_freemask, sizeof(pc_freemask));
3561 	pc->pc_map[0] &= ~1ul;		/* preallocated bit 0 */
3562 	pvc = &pv_chunks[vm_page_domain(m)];
3563 	mtx_lock(&pvc->pvc_lock);
3564 	TAILQ_INSERT_TAIL(&pvc->pvc_list, pc, pc_lru);
3565 	mtx_unlock(&pvc->pvc_lock);
3566 	pv = &pc->pc_pventry[0];
3567 	TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
3568 	PV_STAT(atomic_add_long(&pv_entry_count, 1));
3569 	PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV - 1));
3570 	return (pv);
3571 }
3572 
3573 /*
3574  * Ensure that the number of spare PV entries in the specified pmap meets or
3575  * exceeds the given count, "needed".
3576  *
3577  * The given PV list lock may be released.
3578  */
3579 static void
reserve_pv_entries(pmap_t pmap,int needed,struct rwlock ** lockp)3580 reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp)
3581 {
3582 	struct pv_chunks_list *pvc;
3583 	struct pch new_tail[PMAP_MEMDOM];
3584 	struct pv_chunk *pc;
3585 	vm_page_t m;
3586 	int avail, free, i;
3587 	bool reclaimed;
3588 
3589 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3590 	KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL"));
3591 
3592 	/*
3593 	 * Newly allocated PV chunks must be stored in a private list until
3594 	 * the required number of PV chunks have been allocated.  Otherwise,
3595 	 * reclaim_pv_chunk() could recycle one of these chunks.  In
3596 	 * contrast, these chunks must be added to the pmap upon allocation.
3597 	 */
3598 	for (i = 0; i < PMAP_MEMDOM; i++)
3599 		TAILQ_INIT(&new_tail[i]);
3600 retry:
3601 	avail = 0;
3602 	TAILQ_FOREACH(pc, &pmap->pm_pvchunk, pc_list) {
3603 		bit_count((bitstr_t *)pc->pc_map, 0,
3604 		    sizeof(pc->pc_map) * NBBY, &free);
3605 		if (free == 0)
3606 			break;
3607 		avail += free;
3608 		if (avail >= needed)
3609 			break;
3610 	}
3611 	for (reclaimed = false; avail < needed; avail += _NPCPV) {
3612 		m = vm_page_alloc_noobj(VM_ALLOC_WIRED);
3613 		if (m == NULL) {
3614 			m = reclaim_pv_chunk(pmap, lockp);
3615 			if (m == NULL)
3616 				goto retry;
3617 			reclaimed = true;
3618 		}
3619 		PV_STAT(atomic_add_int(&pc_chunk_count, 1));
3620 		PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
3621 		dump_add_page(m->phys_addr);
3622 		pc = (void *)PHYS_TO_DMAP(m->phys_addr);
3623 		pc->pc_pmap = pmap;
3624 		memcpy(pc->pc_map, pc_freemask, sizeof(pc_freemask));
3625 		TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
3626 		TAILQ_INSERT_TAIL(&new_tail[vm_page_domain(m)], pc, pc_lru);
3627 		PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV));
3628 
3629 		/*
3630 		 * The reclaim might have freed a chunk from the current pmap.
3631 		 * If that chunk contained available entries, we need to
3632 		 * re-count the number of available entries.
3633 		 */
3634 		if (reclaimed)
3635 			goto retry;
3636 	}
3637 	for (i = 0; i < vm_ndomains; i++) {
3638 		if (TAILQ_EMPTY(&new_tail[i]))
3639 			continue;
3640 		pvc = &pv_chunks[i];
3641 		mtx_lock(&pvc->pvc_lock);
3642 		TAILQ_CONCAT(&pvc->pvc_list, &new_tail[i], pc_lru);
3643 		mtx_unlock(&pvc->pvc_lock);
3644 	}
3645 }
3646 
3647 /*
3648  * First find and then remove the pv entry for the specified pmap and virtual
3649  * address from the specified pv list.  Returns the pv entry if found and NULL
3650  * otherwise.  This operation can be performed on pv lists for either 4KB or
3651  * 2MB page mappings.
3652  */
3653 static __inline pv_entry_t
pmap_pvh_remove(struct md_page * pvh,pmap_t pmap,vm_offset_t va)3654 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
3655 {
3656 	pv_entry_t pv;
3657 
3658 	TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
3659 		if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
3660 			TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
3661 			pvh->pv_gen++;
3662 			break;
3663 		}
3664 	}
3665 	return (pv);
3666 }
3667 
3668 /*
3669  * After demotion from a 2MB page mapping to 512 4KB page mappings,
3670  * destroy the pv entry for the 2MB page mapping and reinstantiate the pv
3671  * entries for each of the 4KB page mappings.
3672  */
3673 static void
pmap_pv_demote_l2(pmap_t pmap,vm_offset_t va,vm_paddr_t pa,struct rwlock ** lockp)3674 pmap_pv_demote_l2(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
3675     struct rwlock **lockp)
3676 {
3677 	struct md_page *pvh;
3678 	struct pv_chunk *pc;
3679 	pv_entry_t pv;
3680 	vm_offset_t va_last;
3681 	vm_page_t m;
3682 	int bit, field;
3683 
3684 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3685 	KASSERT((va & L2_OFFSET) == 0,
3686 	    ("pmap_pv_demote_l2: va is not 2mpage aligned"));
3687 	KASSERT((pa & L2_OFFSET) == 0,
3688 	    ("pmap_pv_demote_l2: pa is not 2mpage aligned"));
3689 	CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
3690 
3691 	/*
3692 	 * Transfer the 2mpage's pv entry for this mapping to the first
3693 	 * page's pv list.  Once this transfer begins, the pv list lock
3694 	 * must not be released until the last pv entry is reinstantiated.
3695 	 */
3696 	pvh = pa_to_pvh(pa);
3697 	pv = pmap_pvh_remove(pvh, pmap, va);
3698 	KASSERT(pv != NULL, ("pmap_pv_demote_l2: pv not found"));
3699 	m = PHYS_TO_VM_PAGE(pa);
3700 	TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
3701 	m->md.pv_gen++;
3702 	/* Instantiate the remaining Ln_ENTRIES - 1 pv entries. */
3703 	PV_STAT(atomic_add_long(&pv_entry_allocs, Ln_ENTRIES - 1));
3704 	va_last = va + L2_SIZE - PAGE_SIZE;
3705 	for (;;) {
3706 		pc = TAILQ_FIRST(&pmap->pm_pvchunk);
3707 		KASSERT(!pc_is_full(pc), ("pmap_pv_demote_l2: missing spare"));
3708 		for (field = 0; field < _NPCM; field++) {
3709 			while (pc->pc_map[field]) {
3710 				bit = ffsl(pc->pc_map[field]) - 1;
3711 				pc->pc_map[field] &= ~(1ul << bit);
3712 				pv = &pc->pc_pventry[field * 64 + bit];
3713 				va += PAGE_SIZE;
3714 				pv->pv_va = va;
3715 				m++;
3716 				KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3717 			    ("pmap_pv_demote_l2: page %p is not managed", m));
3718 				TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
3719 				m->md.pv_gen++;
3720 				if (va == va_last)
3721 					goto out;
3722 			}
3723 		}
3724 		TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
3725 		TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
3726 	}
3727 out:
3728 	if (pc_is_full(pc)) {
3729 		TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
3730 		TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
3731 	}
3732 	PV_STAT(atomic_add_long(&pv_entry_count, Ln_ENTRIES - 1));
3733 	PV_STAT(atomic_subtract_int(&pv_entry_spare, Ln_ENTRIES - 1));
3734 }
3735 
3736 /*
3737  * First find and then destroy the pv entry for the specified pmap and virtual
3738  * address.  This operation can be performed on pv lists for either 4KB or 2MB
3739  * page mappings.
3740  */
3741 static void
pmap_pvh_free(struct md_page * pvh,pmap_t pmap,vm_offset_t va)3742 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
3743 {
3744 	pv_entry_t pv;
3745 
3746 	pv = pmap_pvh_remove(pvh, pmap, va);
3747 	KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
3748 	free_pv_entry(pmap, pv);
3749 }
3750 
3751 /*
3752  * Conditionally create the PV entry for a 4KB page mapping if the required
3753  * memory can be allocated without resorting to reclamation.
3754  */
3755 static bool
pmap_try_insert_pv_entry(pmap_t pmap,vm_offset_t va,vm_page_t m,struct rwlock ** lockp)3756 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
3757     struct rwlock **lockp)
3758 {
3759 	pv_entry_t pv;
3760 
3761 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3762 	/* Pass NULL instead of the lock pointer to disable reclamation. */
3763 	if ((pv = get_pv_entry(pmap, NULL)) != NULL) {
3764 		pv->pv_va = va;
3765 		CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
3766 		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
3767 		m->md.pv_gen++;
3768 		return (true);
3769 	} else
3770 		return (false);
3771 }
3772 
3773 /*
3774  * Create the PV entry for a 2MB page mapping.  Always returns true unless the
3775  * flag PMAP_ENTER_NORECLAIM is specified.  If that flag is specified, returns
3776  * false if the PV entry cannot be allocated without resorting to reclamation.
3777  */
3778 static bool
pmap_pv_insert_l2(pmap_t pmap,vm_offset_t va,pd_entry_t l2e,u_int flags,struct rwlock ** lockp)3779 pmap_pv_insert_l2(pmap_t pmap, vm_offset_t va, pd_entry_t l2e, u_int flags,
3780     struct rwlock **lockp)
3781 {
3782 	struct md_page *pvh;
3783 	pv_entry_t pv;
3784 	vm_paddr_t pa;
3785 
3786 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3787 	/* Pass NULL instead of the lock pointer to disable reclamation. */
3788 	if ((pv = get_pv_entry(pmap, (flags & PMAP_ENTER_NORECLAIM) != 0 ?
3789 	    NULL : lockp)) == NULL)
3790 		return (false);
3791 	pv->pv_va = va;
3792 	pa = PTE_TO_PHYS(l2e);
3793 	CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
3794 	pvh = pa_to_pvh(pa);
3795 	TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
3796 	pvh->pv_gen++;
3797 	return (true);
3798 }
3799 
3800 /*
3801  * Conditionally creates the PV entries for a L3C superpage mapping if
3802  * the required memory can be allocated without resorting to reclamation.
3803  */
3804 static bool
pmap_pv_insert_l3c(pmap_t pmap,vm_offset_t va,vm_page_t m,struct rwlock ** lockp)3805 pmap_pv_insert_l3c(pmap_t pmap, vm_offset_t va, vm_page_t m,
3806     struct rwlock **lockp)
3807 {
3808 	pv_entry_t pv;
3809 	vm_offset_t tva;
3810 	vm_paddr_t pa __diagused;
3811 	vm_page_t mt;
3812 
3813 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3814 	KASSERT((va & L3C_OFFSET) == 0,
3815 	    ("pmap_pv_insert_l3c: va is not aligned"));
3816 	pa = VM_PAGE_TO_PHYS(m);
3817 	KASSERT((pa & L3C_OFFSET) == 0,
3818 	    ("pmap_pv_insert_l3c: pa is not aligned"));
3819 	CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
3820 	for (mt = m, tva = va; mt < &m[L3C_ENTRIES]; mt++, tva += L3_SIZE) {
3821 		/* Pass NULL instead of lockp to disable reclamation. */
3822 		pv = get_pv_entry(pmap, NULL);
3823 		if (__predict_false(pv == NULL)) {
3824 			while (tva > va) {
3825 				mt--;
3826 				tva -= L3_SIZE;
3827 				pmap_pvh_free(&mt->md, pmap, tva);
3828 			}
3829 			return (false);
3830 		}
3831 		pv->pv_va = tva;
3832 		TAILQ_INSERT_TAIL(&mt->md.pv_list, pv, pv_next);
3833 		mt->md.pv_gen++;
3834 	}
3835 	return (true);
3836 }
3837 
3838 static void
pmap_remove_kernel_l2(pmap_t pmap,pt_entry_t * l2,vm_offset_t va)3839 pmap_remove_kernel_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va)
3840 {
3841 	pt_entry_t newl2, oldl2 __diagused;
3842 	vm_page_t ml3;
3843 	vm_paddr_t ml3pa;
3844 
3845 	KASSERT(!VIRT_IN_DMAP(va), ("removing direct mapping of %#lx", va));
3846 	KASSERT(pmap == kernel_pmap, ("pmap %p is not kernel_pmap", pmap));
3847 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3848 
3849 	ml3 = pmap_remove_pt_page(pmap, va);
3850 	if (ml3 == NULL)
3851 		panic("pmap_remove_kernel_l2: Missing pt page");
3852 
3853 	ml3pa = VM_PAGE_TO_PHYS(ml3);
3854 	newl2 = PHYS_TO_PTE(ml3pa) | L2_TABLE;
3855 
3856 	/*
3857 	 * If this page table page was unmapped by a promotion, then it
3858 	 * contains valid mappings.  Zero it to invalidate those mappings.
3859 	 */
3860 	if (vm_page_any_valid(ml3))
3861 		pagezero((void *)PHYS_TO_DMAP(ml3pa));
3862 
3863 	/*
3864 	 * Demote the mapping.  The caller must have already invalidated the
3865 	 * mapping (i.e., the "break" in break-before-make).
3866 	 */
3867 	oldl2 = pmap_load_store(l2, newl2);
3868 	KASSERT(oldl2 == 0, ("%s: found existing mapping at %p: %#lx",
3869 	    __func__, l2, oldl2));
3870 }
3871 
3872 /*
3873  * pmap_remove_l2: Do the things to unmap a level 2 superpage.
3874  */
3875 static int
pmap_remove_l2(pmap_t pmap,pt_entry_t * l2,vm_offset_t sva,pd_entry_t l1e,struct spglist * free,struct rwlock ** lockp)3876 pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva,
3877     pd_entry_t l1e, struct spglist *free, struct rwlock **lockp)
3878 {
3879 	struct md_page *pvh;
3880 	pt_entry_t old_l2;
3881 	vm_page_t m, ml3, mt;
3882 
3883 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3884 	KASSERT((sva & L2_OFFSET) == 0, ("pmap_remove_l2: sva is not aligned"));
3885 	old_l2 = pmap_load_clear(l2);
3886 	KASSERT((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK,
3887 	    ("pmap_remove_l2: L2e %lx is not a block mapping", old_l2));
3888 
3889 	/*
3890 	 * Since a promotion must break the 4KB page mappings before making
3891 	 * the 2MB page mapping, a pmap_s1_invalidate_page() suffices.
3892 	 */
3893 	pmap_s1_invalidate_page(pmap, sva, true);
3894 
3895 	if (old_l2 & ATTR_SW_WIRED)
3896 		pmap->pm_stats.wired_count -= L2_SIZE / PAGE_SIZE;
3897 	pmap_resident_count_dec(pmap, L2_SIZE / PAGE_SIZE);
3898 	if (old_l2 & ATTR_SW_MANAGED) {
3899 		m = PTE_TO_VM_PAGE(old_l2);
3900 		pvh = page_to_pvh(m);
3901 		CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
3902 		pmap_pvh_free(pvh, pmap, sva);
3903 		for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++) {
3904 			if (pmap_pte_dirty(pmap, old_l2))
3905 				vm_page_dirty(mt);
3906 			if (old_l2 & ATTR_AF)
3907 				vm_page_aflag_set(mt, PGA_REFERENCED);
3908 			if (TAILQ_EMPTY(&mt->md.pv_list) &&
3909 			    TAILQ_EMPTY(&pvh->pv_list))
3910 				vm_page_aflag_clear(mt, PGA_WRITEABLE);
3911 		}
3912 	}
3913 	if (pmap == kernel_pmap) {
3914 		pmap_remove_kernel_l2(pmap, l2, sva);
3915 	} else {
3916 		ml3 = pmap_remove_pt_page(pmap, sva);
3917 		if (ml3 != NULL) {
3918 			KASSERT(vm_page_any_valid(ml3),
3919 			    ("pmap_remove_l2: l3 page not promoted"));
3920 			pmap_resident_count_dec(pmap, 1);
3921 			KASSERT(ml3->ref_count == NL3PG,
3922 			    ("pmap_remove_l2: l3 page ref count error"));
3923 			ml3->ref_count = 0;
3924 			pmap_add_delayed_free_list(ml3, free, false);
3925 		}
3926 	}
3927 	return (pmap_unuse_pt(pmap, sva, l1e, free));
3928 }
3929 
3930 /*
3931  * pmap_remove_l3: do the things to unmap a page in a process
3932  */
3933 static int
pmap_remove_l3(pmap_t pmap,pt_entry_t * l3,vm_offset_t va,pd_entry_t l2e,struct spglist * free,struct rwlock ** lockp)3934 pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t va,
3935     pd_entry_t l2e, struct spglist *free, struct rwlock **lockp)
3936 {
3937 	struct md_page *pvh;
3938 	pt_entry_t old_l3;
3939 	vm_page_t m;
3940 
3941 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3942 	old_l3 = pmap_load(l3);
3943 	if ((old_l3 & ATTR_CONTIGUOUS) != 0)
3944 		(void)pmap_demote_l3c(pmap, l3, va);
3945 	old_l3 = pmap_load_clear(l3);
3946 	pmap_s1_invalidate_page(pmap, va, true);
3947 	if (old_l3 & ATTR_SW_WIRED)
3948 		pmap->pm_stats.wired_count -= 1;
3949 	pmap_resident_count_dec(pmap, 1);
3950 	if (old_l3 & ATTR_SW_MANAGED) {
3951 		m = PTE_TO_VM_PAGE(old_l3);
3952 		if (pmap_pte_dirty(pmap, old_l3))
3953 			vm_page_dirty(m);
3954 		if (old_l3 & ATTR_AF)
3955 			vm_page_aflag_set(m, PGA_REFERENCED);
3956 		CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
3957 		pmap_pvh_free(&m->md, pmap, va);
3958 		if (TAILQ_EMPTY(&m->md.pv_list) &&
3959 		    (m->flags & PG_FICTITIOUS) == 0) {
3960 			pvh = page_to_pvh(m);
3961 			if (TAILQ_EMPTY(&pvh->pv_list))
3962 				vm_page_aflag_clear(m, PGA_WRITEABLE);
3963 		}
3964 	}
3965 	return (pmap_unuse_pt(pmap, va, l2e, free));
3966 }
3967 
3968 /*
3969  * Removes the specified L3C superpage mapping.  Requests TLB invalidations
3970  * to be performed by the caller through the returned "*vap". Returns true
3971  * if the level 3 table "ml3" was unmapped and added to the spglist "free".
3972  * Otherwise, returns false.
3973  */
3974 static bool
pmap_remove_l3c(pmap_t pmap,pt_entry_t * l3p,vm_offset_t va,vm_offset_t * vap,vm_offset_t va_next,vm_page_t ml3,struct spglist * free,struct rwlock ** lockp)3975 pmap_remove_l3c(pmap_t pmap, pt_entry_t *l3p, vm_offset_t va, vm_offset_t *vap,
3976     vm_offset_t va_next, vm_page_t ml3, struct spglist *free,
3977     struct rwlock **lockp)
3978 {
3979 	struct md_page *pvh;
3980 	struct rwlock *new_lock;
3981 	pt_entry_t first_l3e, l3e, *tl3p;
3982 	vm_offset_t tva;
3983 	vm_page_t m, mt;
3984 
3985 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3986 	KASSERT(((uintptr_t)l3p & ((L3C_ENTRIES * sizeof(pt_entry_t)) - 1)) ==
3987 	    0, ("pmap_remove_l3c: l3p is not aligned"));
3988 	KASSERT((va & L3C_OFFSET) == 0,
3989 	    ("pmap_remove_l3c: va is not aligned"));
3990 
3991 	/*
3992 	 * Hardware accessed and dirty bit maintenance might only update a
3993 	 * single L3 entry, so we must combine the accessed and dirty bits
3994 	 * from this entire set of contiguous L3 entries.
3995 	 */
3996 	first_l3e = pmap_load_clear(l3p);
3997 	for (tl3p = l3p + 1; tl3p < &l3p[L3C_ENTRIES]; tl3p++) {
3998 		l3e = pmap_load_clear(tl3p);
3999 		KASSERT((l3e & ATTR_CONTIGUOUS) != 0,
4000 		    ("pmap_remove_l3c: l3e is missing ATTR_CONTIGUOUS"));
4001 		if ((l3e & (ATTR_SW_DBM | ATTR_S1_AP_RW_BIT)) ==
4002 		    (ATTR_SW_DBM | ATTR_S1_AP(ATTR_S1_AP_RW)))
4003 			first_l3e &= ~ATTR_S1_AP_RW_BIT;
4004 		first_l3e |= l3e & ATTR_AF;
4005 	}
4006 	if ((first_l3e & ATTR_SW_WIRED) != 0)
4007 		pmap->pm_stats.wired_count -= L3C_ENTRIES;
4008 	pmap_resident_count_dec(pmap, L3C_ENTRIES);
4009 	if ((first_l3e & ATTR_SW_MANAGED) != 0) {
4010 		m = PTE_TO_VM_PAGE(first_l3e);
4011 		new_lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4012 		if (new_lock != *lockp) {
4013 			if (*lockp != NULL) {
4014 				/*
4015 				 * Pending TLB invalidations must be
4016 				 * performed before the PV list lock is
4017 				 * released.  Otherwise, a concurrent
4018 				 * pmap_remove_all() on a physical page
4019 				 * could return while a stale TLB entry
4020 				 * still provides access to that page.
4021 				 */
4022 				if (*vap != va_next) {
4023 					pmap_invalidate_range(pmap, *vap, va,
4024 					    true);
4025 					*vap = va_next;
4026 				}
4027 				rw_wunlock(*lockp);
4028 			}
4029 			*lockp = new_lock;
4030 			rw_wlock(*lockp);
4031 		}
4032 		pvh = page_to_pvh(m);
4033 		for (mt = m, tva = va; mt < &m[L3C_ENTRIES]; mt++, tva +=
4034 		    L3_SIZE) {
4035 			if (pmap_pte_dirty(pmap, first_l3e))
4036 				vm_page_dirty(mt);
4037 			if ((first_l3e & ATTR_AF) != 0)
4038 				vm_page_aflag_set(mt, PGA_REFERENCED);
4039 			pmap_pvh_free(&mt->md, pmap, tva);
4040 			if (TAILQ_EMPTY(&mt->md.pv_list) &&
4041 			    TAILQ_EMPTY(&pvh->pv_list))
4042 				vm_page_aflag_clear(mt, PGA_WRITEABLE);
4043 		}
4044 	}
4045 	if (*vap == va_next)
4046 		*vap = va;
4047 	if (ml3 != NULL) {
4048 		ml3->ref_count -= L3C_ENTRIES;
4049 		if (ml3->ref_count == 0) {
4050 			_pmap_unwire_l3(pmap, va, ml3, free);
4051 			return (true);
4052 		}
4053 	}
4054 	return (false);
4055 }
4056 
4057 /*
4058  * Remove the specified range of addresses from the L3 page table that is
4059  * identified by the given L2 entry.
4060  */
4061 static void
pmap_remove_l3_range(pmap_t pmap,pd_entry_t l2e,vm_offset_t sva,vm_offset_t eva,struct spglist * free,struct rwlock ** lockp)4062 pmap_remove_l3_range(pmap_t pmap, pd_entry_t l2e, vm_offset_t sva,
4063     vm_offset_t eva, struct spglist *free, struct rwlock **lockp)
4064 {
4065 	struct md_page *pvh;
4066 	struct rwlock *new_lock;
4067 	pt_entry_t *l3, old_l3;
4068 	vm_offset_t va;
4069 	vm_page_t l3pg, m;
4070 
4071 	KASSERT(ADDR_IS_CANONICAL(sva),
4072 	    ("%s: Start address not in canonical form: %lx", __func__, sva));
4073 	KASSERT(ADDR_IS_CANONICAL(eva) || eva == VM_MAX_USER_ADDRESS,
4074 	    ("%s: End address not in canonical form: %lx", __func__, eva));
4075 
4076 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4077 	KASSERT(rounddown2(sva, L2_SIZE) + L2_SIZE == roundup2(eva, L2_SIZE),
4078 	    ("pmap_remove_l3_range: range crosses an L3 page table boundary"));
4079 	l3pg = !ADDR_IS_KERNEL(sva) ? PTE_TO_VM_PAGE(l2e) : NULL;
4080 	va = eva;
4081 	for (l3 = pmap_l2_to_l3(&l2e, sva); sva != eva; l3++, sva += L3_SIZE) {
4082 		old_l3 = pmap_load(l3);
4083 		if (!pmap_l3_valid(old_l3)) {
4084 			if (va != eva) {
4085 				pmap_invalidate_range(pmap, va, sva, true);
4086 				va = eva;
4087 			}
4088 			continue;
4089 		}
4090 		if ((old_l3 & ATTR_CONTIGUOUS) != 0) {
4091 			/*
4092 			 * Is this entire set of contiguous L3 entries being
4093 			 * removed?  Handle the possibility that "eva" is zero
4094 			 * because of address wraparound.
4095 			 */
4096 			if ((sva & L3C_OFFSET) == 0 &&
4097 			    sva + L3C_OFFSET <= eva - 1) {
4098 				if (pmap_remove_l3c(pmap, l3, sva, &va, eva,
4099 				    l3pg, free, lockp)) {
4100 					/* The L3 table was unmapped. */
4101 					sva += L3C_SIZE;
4102 					break;
4103 				}
4104 				l3 += L3C_ENTRIES - 1;
4105 				sva += L3C_SIZE - L3_SIZE;
4106 				continue;
4107 			}
4108 
4109 			(void)pmap_demote_l3c(pmap, l3, sva);
4110 		}
4111 		old_l3 = pmap_load_clear(l3);
4112 		if ((old_l3 & ATTR_SW_WIRED) != 0)
4113 			pmap->pm_stats.wired_count--;
4114 		pmap_resident_count_dec(pmap, 1);
4115 		if ((old_l3 & ATTR_SW_MANAGED) != 0) {
4116 			m = PTE_TO_VM_PAGE(old_l3);
4117 			if (pmap_pte_dirty(pmap, old_l3))
4118 				vm_page_dirty(m);
4119 			if ((old_l3 & ATTR_AF) != 0)
4120 				vm_page_aflag_set(m, PGA_REFERENCED);
4121 			new_lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4122 			if (new_lock != *lockp) {
4123 				if (*lockp != NULL) {
4124 					/*
4125 					 * Pending TLB invalidations must be
4126 					 * performed before the PV list lock is
4127 					 * released.  Otherwise, a concurrent
4128 					 * pmap_remove_all() on a physical page
4129 					 * could return while a stale TLB entry
4130 					 * still provides access to that page.
4131 					 */
4132 					if (va != eva) {
4133 						pmap_invalidate_range(pmap, va,
4134 						    sva, true);
4135 						va = eva;
4136 					}
4137 					rw_wunlock(*lockp);
4138 				}
4139 				*lockp = new_lock;
4140 				rw_wlock(*lockp);
4141 			}
4142 			pmap_pvh_free(&m->md, pmap, sva);
4143 			if (TAILQ_EMPTY(&m->md.pv_list) &&
4144 			    (m->flags & PG_FICTITIOUS) == 0) {
4145 				pvh = page_to_pvh(m);
4146 				if (TAILQ_EMPTY(&pvh->pv_list))
4147 					vm_page_aflag_clear(m, PGA_WRITEABLE);
4148 			}
4149 		}
4150 		if (l3pg != NULL && pmap_unwire_l3(pmap, sva, l3pg, free)) {
4151 			/*
4152 			 * _pmap_unwire_l3() has already invalidated the TLB
4153 			 * entries at all levels for "sva".  So, we need not
4154 			 * perform "sva += L3_SIZE;" here.  Moreover, we need
4155 			 * not perform "va = sva;" if "sva" is at the start
4156 			 * of a new valid range consisting of a single page.
4157 			 */
4158 			break;
4159 		}
4160 		if (va == eva)
4161 			va = sva;
4162 	}
4163 	if (va != eva)
4164 		pmap_invalidate_range(pmap, va, sva, true);
4165 }
4166 
4167 static void
pmap_remove1(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,bool map_delete)4168 pmap_remove1(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, bool map_delete)
4169 {
4170 	struct rwlock *lock;
4171 	vm_offset_t va_next;
4172 	pd_entry_t *l0, *l1, *l2;
4173 	pt_entry_t l3_paddr;
4174 	struct spglist free;
4175 
4176 	/*
4177 	 * Perform an unsynchronized read.  This is, however, safe.
4178 	 */
4179 	if (pmap->pm_stats.resident_count == 0)
4180 		return;
4181 
4182 	SLIST_INIT(&free);
4183 
4184 	PMAP_LOCK(pmap);
4185 	if (map_delete)
4186 		pmap_bti_on_remove(pmap, sva, eva);
4187 
4188 	lock = NULL;
4189 	for (; sva < eva; sva = va_next) {
4190 		if (pmap->pm_stats.resident_count == 0)
4191 			break;
4192 
4193 		l0 = pmap_l0(pmap, sva);
4194 		if (pmap_load(l0) == 0) {
4195 			va_next = (sva + L0_SIZE) & ~L0_OFFSET;
4196 			if (va_next < sva)
4197 				va_next = eva;
4198 			continue;
4199 		}
4200 
4201 		va_next = (sva + L1_SIZE) & ~L1_OFFSET;
4202 		if (va_next < sva)
4203 			va_next = eva;
4204 		l1 = pmap_l0_to_l1(l0, sva);
4205 		if (pmap_load(l1) == 0)
4206 			continue;
4207 		if ((pmap_load(l1) & ATTR_DESCR_MASK) == L1_BLOCK) {
4208 			PMAP_ASSERT_L1_BLOCKS_SUPPORTED;
4209 			KASSERT(va_next <= eva,
4210 			    ("partial update of non-transparent 1G page "
4211 			    "l1 %#lx sva %#lx eva %#lx va_next %#lx",
4212 			    pmap_load(l1), sva, eva, va_next));
4213 			MPASS(pmap != kernel_pmap);
4214 			MPASS((pmap_load(l1) & ATTR_SW_MANAGED) == 0);
4215 			pmap_clear(l1);
4216 			pmap_s1_invalidate_page(pmap, sva, true);
4217 			pmap_resident_count_dec(pmap, L1_SIZE / PAGE_SIZE);
4218 			pmap_unuse_pt(pmap, sva, pmap_load(l0), &free);
4219 			continue;
4220 		}
4221 
4222 		/*
4223 		 * Calculate index for next page table.
4224 		 */
4225 		va_next = (sva + L2_SIZE) & ~L2_OFFSET;
4226 		if (va_next < sva)
4227 			va_next = eva;
4228 
4229 		l2 = pmap_l1_to_l2(l1, sva);
4230 		l3_paddr = pmap_load(l2);
4231 
4232 		if ((l3_paddr & ATTR_DESCR_MASK) == L2_BLOCK) {
4233 			if (sva + L2_SIZE == va_next && eva >= va_next) {
4234 				pmap_remove_l2(pmap, l2, sva, pmap_load(l1),
4235 				    &free, &lock);
4236 				continue;
4237 			} else if (pmap_demote_l2_locked(pmap, l2, sva,
4238 			    &lock) == NULL)
4239 				continue;
4240 			l3_paddr = pmap_load(l2);
4241 		}
4242 
4243 		/*
4244 		 * Weed out invalid mappings.
4245 		 */
4246 		if ((l3_paddr & ATTR_DESCR_MASK) != L2_TABLE)
4247 			continue;
4248 
4249 		/*
4250 		 * Limit our scan to either the end of the va represented
4251 		 * by the current page table page, or to the end of the
4252 		 * range being removed.
4253 		 */
4254 		if (va_next > eva)
4255 			va_next = eva;
4256 
4257 		pmap_remove_l3_range(pmap, l3_paddr, sva, va_next, &free,
4258 		    &lock);
4259 	}
4260 	if (lock != NULL)
4261 		rw_wunlock(lock);
4262 	PMAP_UNLOCK(pmap);
4263 	vm_page_free_pages_toq(&free, true);
4264 }
4265 
4266 /*
4267  *	Remove the given range of addresses from the specified map.
4268  *
4269  *	It is assumed that the start and end are properly
4270  *	rounded to the page size.
4271  */
4272 void
pmap_remove(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)4273 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
4274 {
4275 	pmap_remove1(pmap, sva, eva, false);
4276 }
4277 
4278 /*
4279  *	Remove the given range of addresses as part of a logical unmap
4280  *	operation. This has the effect of calling pmap_remove(), but
4281  *	also clears any metadata that should persist for the lifetime
4282  *	of a logical mapping.
4283  */
4284 void
pmap_map_delete(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)4285 pmap_map_delete(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
4286 {
4287 	pmap_remove1(pmap, sva, eva, true);
4288 }
4289 
4290 /*
4291  *	Routine:	pmap_remove_all
4292  *	Function:
4293  *		Removes this physical page from
4294  *		all physical maps in which it resides.
4295  *		Reflects back modify bits to the pager.
4296  *
4297  *	Notes:
4298  *		Original versions of this routine were very
4299  *		inefficient because they iteratively called
4300  *		pmap_remove (slow...)
4301  */
4302 
4303 void
pmap_remove_all(vm_page_t m)4304 pmap_remove_all(vm_page_t m)
4305 {
4306 	struct md_page *pvh;
4307 	pv_entry_t pv;
4308 	pmap_t pmap;
4309 	struct rwlock *lock;
4310 	pd_entry_t *pde, tpde;
4311 	pt_entry_t *pte, tpte;
4312 	vm_offset_t va;
4313 	struct spglist free;
4314 	int lvl, pvh_gen, md_gen;
4315 
4316 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4317 	    ("pmap_remove_all: page %p is not managed", m));
4318 	SLIST_INIT(&free);
4319 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4320 	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : page_to_pvh(m);
4321 	rw_wlock(lock);
4322 retry:
4323 	while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
4324 		pmap = PV_PMAP(pv);
4325 		if (!PMAP_TRYLOCK(pmap)) {
4326 			pvh_gen = pvh->pv_gen;
4327 			rw_wunlock(lock);
4328 			PMAP_LOCK(pmap);
4329 			rw_wlock(lock);
4330 			if (pvh_gen != pvh->pv_gen) {
4331 				PMAP_UNLOCK(pmap);
4332 				goto retry;
4333 			}
4334 		}
4335 		va = pv->pv_va;
4336 		pte = pmap_pte_exists(pmap, va, 2, __func__);
4337 		pmap_demote_l2_locked(pmap, pte, va, &lock);
4338 		PMAP_UNLOCK(pmap);
4339 	}
4340 	while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
4341 		pmap = PV_PMAP(pv);
4342 		if (!PMAP_TRYLOCK(pmap)) {
4343 			pvh_gen = pvh->pv_gen;
4344 			md_gen = m->md.pv_gen;
4345 			rw_wunlock(lock);
4346 			PMAP_LOCK(pmap);
4347 			rw_wlock(lock);
4348 			if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
4349 				PMAP_UNLOCK(pmap);
4350 				goto retry;
4351 			}
4352 		}
4353 		pmap_resident_count_dec(pmap, 1);
4354 
4355 		pde = pmap_pde(pmap, pv->pv_va, &lvl);
4356 		KASSERT(pde != NULL,
4357 		    ("pmap_remove_all: no page directory entry found"));
4358 		KASSERT(lvl == 2,
4359 		    ("pmap_remove_all: invalid pde level %d", lvl));
4360 		tpde = pmap_load(pde);
4361 
4362 		pte = pmap_l2_to_l3(pde, pv->pv_va);
4363 		tpte = pmap_load(pte);
4364 		if ((tpte & ATTR_CONTIGUOUS) != 0)
4365 			(void)pmap_demote_l3c(pmap, pte, pv->pv_va);
4366 		tpte = pmap_load_clear(pte);
4367 		if (tpte & ATTR_SW_WIRED)
4368 			pmap->pm_stats.wired_count--;
4369 		if ((tpte & ATTR_AF) != 0) {
4370 			pmap_invalidate_page(pmap, pv->pv_va, true);
4371 			vm_page_aflag_set(m, PGA_REFERENCED);
4372 		}
4373 
4374 		/*
4375 		 * Update the vm_page_t clean and reference bits.
4376 		 */
4377 		if (pmap_pte_dirty(pmap, tpte))
4378 			vm_page_dirty(m);
4379 		pmap_unuse_pt(pmap, pv->pv_va, tpde, &free);
4380 		TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
4381 		m->md.pv_gen++;
4382 		free_pv_entry(pmap, pv);
4383 		PMAP_UNLOCK(pmap);
4384 	}
4385 	vm_page_aflag_clear(m, PGA_WRITEABLE);
4386 	rw_wunlock(lock);
4387 	vm_page_free_pages_toq(&free, true);
4388 }
4389 
4390 /*
4391  * Masks and sets bits in a level 2 page table entries in the specified pmap
4392  */
4393 static void
pmap_protect_l2(pmap_t pmap,pt_entry_t * l2,vm_offset_t sva,pt_entry_t mask,pt_entry_t nbits)4394 pmap_protect_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva, pt_entry_t mask,
4395     pt_entry_t nbits)
4396 {
4397 	pd_entry_t old_l2;
4398 	vm_page_t m, mt;
4399 
4400 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4401 	PMAP_ASSERT_STAGE1(pmap);
4402 	KASSERT((sva & L2_OFFSET) == 0,
4403 	    ("pmap_protect_l2: sva is not 2mpage aligned"));
4404 	old_l2 = pmap_load(l2);
4405 	KASSERT((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK,
4406 	    ("pmap_protect_l2: L2e %lx is not a block mapping", old_l2));
4407 
4408 	/*
4409 	 * Return if the L2 entry already has the desired access restrictions
4410 	 * in place.
4411 	 */
4412 	if ((old_l2 & mask) == nbits)
4413 		return;
4414 
4415 	while (!atomic_fcmpset_64(l2, &old_l2, (old_l2 & ~mask) | nbits))
4416 		cpu_spinwait();
4417 
4418 	/*
4419 	 * When a dirty read/write superpage mapping is write protected,
4420 	 * update the dirty field of each of the superpage's constituent 4KB
4421 	 * pages.
4422 	 */
4423 	if ((old_l2 & ATTR_SW_MANAGED) != 0 &&
4424 	    (nbits & ATTR_S1_AP(ATTR_S1_AP_RO)) != 0 &&
4425 	    pmap_pte_dirty(pmap, old_l2)) {
4426 		m = PTE_TO_VM_PAGE(old_l2);
4427 		for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
4428 			vm_page_dirty(mt);
4429 	}
4430 
4431 	/*
4432 	 * Since a promotion must break the 4KB page mappings before making
4433 	 * the 2MB page mapping, a pmap_s1_invalidate_page() suffices.
4434 	 */
4435 	pmap_s1_invalidate_page(pmap, sva, true);
4436 }
4437 
4438 /*
4439  * Masks and sets bits in the specified L3C superpage mapping.
4440  *
4441  * Requests TLB invalidations to be performed by the caller through the
4442  * returned "*vap".
4443  */
4444 static void
pmap_mask_set_l3c(pmap_t pmap,pt_entry_t * l3p,vm_offset_t va,vm_offset_t * vap,vm_offset_t va_next,pt_entry_t mask,pt_entry_t nbits)4445 pmap_mask_set_l3c(pmap_t pmap, pt_entry_t *l3p, vm_offset_t va,
4446     vm_offset_t *vap, vm_offset_t va_next, pt_entry_t mask, pt_entry_t nbits)
4447 {
4448 	pt_entry_t l3e, *tl3p;
4449 	vm_page_t m, mt;
4450 	bool dirty;
4451 
4452 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4453 	KASSERT(((uintptr_t)l3p & ((L3C_ENTRIES * sizeof(pt_entry_t)) - 1)) ==
4454 	    0, ("pmap_mask_set_l3c: l3p is not aligned"));
4455 	KASSERT((va & L3C_OFFSET) == 0,
4456 	    ("pmap_mask_set_l3c: va is not aligned"));
4457 	dirty = false;
4458 	for (tl3p = l3p; tl3p < &l3p[L3C_ENTRIES]; tl3p++) {
4459 		l3e = pmap_load(tl3p);
4460 		KASSERT((l3e & ATTR_CONTIGUOUS) != 0,
4461 		    ("pmap_mask_set_l3c: l3e is missing ATTR_CONTIGUOUS"));
4462 		while (!atomic_fcmpset_64(tl3p, &l3e, (l3e & ~mask) | nbits))
4463 			cpu_spinwait();
4464 		if ((l3e & (ATTR_SW_DBM | ATTR_S1_AP_RW_BIT)) ==
4465 		    (ATTR_SW_DBM | ATTR_S1_AP(ATTR_S1_AP_RW)))
4466 			dirty = true;
4467 	}
4468 
4469 	/*
4470 	 * When a dirty read/write superpage mapping is write protected,
4471 	 * update the dirty field of each of the superpage's constituent 4KB
4472 	 * pages.
4473 	 */
4474 	if ((l3e & ATTR_SW_MANAGED) != 0 &&
4475 	    (nbits & ATTR_S1_AP(ATTR_S1_AP_RO)) != 0 &&
4476 	    dirty) {
4477 		m = PTE_TO_VM_PAGE(pmap_load(l3p));
4478 		for (mt = m; mt < &m[L3C_ENTRIES]; mt++)
4479 			vm_page_dirty(mt);
4480 	}
4481 
4482 	if (*vap == va_next)
4483 		*vap = va;
4484 }
4485 
4486 /*
4487  * Masks and sets bits in last level page table entries in the specified
4488  * pmap and range
4489  */
4490 static void
pmap_mask_set_locked(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,pt_entry_t mask,pt_entry_t nbits,bool invalidate)4491 pmap_mask_set_locked(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, pt_entry_t mask,
4492     pt_entry_t nbits, bool invalidate)
4493 {
4494 	vm_offset_t va, va_next;
4495 	pd_entry_t *l0, *l1, *l2;
4496 	pt_entry_t *l3p, l3;
4497 
4498 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4499 	for (; sva < eva; sva = va_next) {
4500 		l0 = pmap_l0(pmap, sva);
4501 		if (pmap_load(l0) == 0) {
4502 			va_next = (sva + L0_SIZE) & ~L0_OFFSET;
4503 			if (va_next < sva)
4504 				va_next = eva;
4505 			continue;
4506 		}
4507 
4508 		va_next = (sva + L1_SIZE) & ~L1_OFFSET;
4509 		if (va_next < sva)
4510 			va_next = eva;
4511 		l1 = pmap_l0_to_l1(l0, sva);
4512 		if (pmap_load(l1) == 0)
4513 			continue;
4514 		if ((pmap_load(l1) & ATTR_DESCR_MASK) == L1_BLOCK) {
4515 			PMAP_ASSERT_L1_BLOCKS_SUPPORTED;
4516 			KASSERT(va_next <= eva,
4517 			    ("partial update of non-transparent 1G page "
4518 			    "l1 %#lx sva %#lx eva %#lx va_next %#lx",
4519 			    pmap_load(l1), sva, eva, va_next));
4520 			MPASS((pmap_load(l1) & ATTR_SW_MANAGED) == 0);
4521 			if ((pmap_load(l1) & mask) != nbits) {
4522 				pmap_store(l1, (pmap_load(l1) & ~mask) | nbits);
4523 				if (invalidate)
4524 					pmap_s1_invalidate_page(pmap, sva, true);
4525 			}
4526 			continue;
4527 		}
4528 
4529 		va_next = (sva + L2_SIZE) & ~L2_OFFSET;
4530 		if (va_next < sva)
4531 			va_next = eva;
4532 
4533 		l2 = pmap_l1_to_l2(l1, sva);
4534 		if (pmap_load(l2) == 0)
4535 			continue;
4536 
4537 		if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK) {
4538 			if (sva + L2_SIZE == va_next && eva >= va_next) {
4539 				pmap_protect_l2(pmap, l2, sva, mask, nbits);
4540 				continue;
4541 			} else if ((pmap_load(l2) & mask) == nbits ||
4542 			    pmap_demote_l2(pmap, l2, sva) == NULL)
4543 				continue;
4544 		}
4545 		KASSERT((pmap_load(l2) & ATTR_DESCR_MASK) == L2_TABLE,
4546 		    ("pmap_protect: Invalid L2 entry after demotion"));
4547 
4548 		if (va_next > eva)
4549 			va_next = eva;
4550 
4551 		va = va_next;
4552 		for (l3p = pmap_l2_to_l3(l2, sva); sva != va_next; l3p++,
4553 		    sva += L3_SIZE) {
4554 			l3 = pmap_load(l3p);
4555 
4556 			/*
4557 			 * Go to the next L3 entry if the current one is
4558 			 * invalid or already has the desired access
4559 			 * restrictions in place.  (The latter case occurs
4560 			 * frequently.  For example, in a "buildworld"
4561 			 * workload, almost 1 out of 4 L3 entries already
4562 			 * have the desired restrictions.)
4563 			 */
4564 			if (!pmap_l3_valid(l3) || (l3 & mask) == nbits) {
4565 				if (va != va_next) {
4566 					if (invalidate)
4567 						pmap_s1_invalidate_range(pmap,
4568 						    va, sva, true);
4569 					va = va_next;
4570 				}
4571 				if ((l3 & ATTR_CONTIGUOUS) != 0) {
4572 					/*
4573 					 * Does this L3C page extend beyond
4574 					 * the requested range?  Handle the
4575 					 * possibility that "va_next" is zero.
4576 					 */
4577 					if ((sva | L3C_OFFSET) > va_next - 1)
4578 						break;
4579 
4580 					/*
4581 					 * Skip ahead to the last L3_PAGE
4582 					 * within this L3C page.
4583 					 */
4584 					l3p = (pt_entry_t *)((uintptr_t)l3p |
4585 					    ((L3C_ENTRIES - 1) *
4586 					    sizeof(pt_entry_t)));
4587 					sva |= L3C_SIZE - L3_SIZE;
4588 				}
4589 				continue;
4590 			}
4591 
4592 			if ((l3 & ATTR_CONTIGUOUS) != 0) {
4593 				/*
4594 				 * Is this entire set of contiguous L3 entries
4595 				 * being protected?  Handle the possibility
4596 				 * that "va_next" is zero because of address
4597 				 * wraparound.
4598 				 */
4599 				if ((sva & L3C_OFFSET) == 0 &&
4600 				    sva + L3C_OFFSET <= va_next - 1) {
4601 					pmap_mask_set_l3c(pmap, l3p, sva, &va,
4602 					    va_next, mask, nbits);
4603 					l3p += L3C_ENTRIES - 1;
4604 					sva += L3C_SIZE - L3_SIZE;
4605 					continue;
4606 				}
4607 
4608 				(void)pmap_demote_l3c(pmap, l3p, sva);
4609 
4610 				/*
4611 				 * The L3 entry's accessed bit may have changed.
4612 				 */
4613 				l3 = pmap_load(l3p);
4614 			}
4615 			while (!atomic_fcmpset_64(l3p, &l3, (l3 & ~mask) |
4616 			    nbits))
4617 				cpu_spinwait();
4618 
4619 			/*
4620 			 * When a dirty read/write mapping is write protected,
4621 			 * update the page's dirty field.
4622 			 */
4623 			if ((l3 & ATTR_SW_MANAGED) != 0 &&
4624 			    (nbits & ATTR_S1_AP(ATTR_S1_AP_RO)) != 0 &&
4625 			    pmap_pte_dirty(pmap, l3))
4626 				vm_page_dirty(PTE_TO_VM_PAGE(l3));
4627 
4628 			if (va == va_next)
4629 				va = sva;
4630 		}
4631 		if (va != va_next && invalidate)
4632 			pmap_s1_invalidate_range(pmap, va, sva, true);
4633 	}
4634 }
4635 
4636 static void
pmap_mask_set(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,pt_entry_t mask,pt_entry_t nbits,bool invalidate)4637 pmap_mask_set(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, pt_entry_t mask,
4638     pt_entry_t nbits, bool invalidate)
4639 {
4640 	PMAP_LOCK(pmap);
4641 	pmap_mask_set_locked(pmap, sva, eva, mask, nbits, invalidate);
4642 	PMAP_UNLOCK(pmap);
4643 }
4644 
4645 /*
4646  *	Set the physical protection on the
4647  *	specified range of this map as requested.
4648  */
4649 void
pmap_protect(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,vm_prot_t prot)4650 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
4651 {
4652 	pt_entry_t mask, nbits;
4653 
4654 	PMAP_ASSERT_STAGE1(pmap);
4655 	KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot));
4656 	if (prot == VM_PROT_NONE) {
4657 		pmap_remove(pmap, sva, eva);
4658 		return;
4659 	}
4660 
4661 	mask = nbits = 0;
4662 	if ((prot & VM_PROT_WRITE) == 0) {
4663 		mask |= ATTR_S1_AP_RW_BIT | ATTR_SW_DBM;
4664 		nbits |= ATTR_S1_AP(ATTR_S1_AP_RO);
4665 	}
4666 	if ((prot & VM_PROT_EXECUTE) == 0) {
4667 		mask |= ATTR_S1_XN;
4668 		nbits |= ATTR_S1_XN;
4669 	}
4670 	if (pmap == kernel_pmap) {
4671 		mask |= ATTR_KERN_GP;
4672 		nbits |= ATTR_KERN_GP;
4673 	}
4674 	if (mask == 0)
4675 		return;
4676 
4677 	pmap_mask_set(pmap, sva, eva, mask, nbits, true);
4678 }
4679 
4680 void
pmap_disable_promotion(vm_offset_t sva,vm_size_t size)4681 pmap_disable_promotion(vm_offset_t sva, vm_size_t size)
4682 {
4683 
4684 	MPASS((sva & L3_OFFSET) == 0);
4685 	MPASS(((sva + size) & L3_OFFSET) == 0);
4686 
4687 	pmap_mask_set(kernel_pmap, sva, sva + size, ATTR_SW_NO_PROMOTE,
4688 	    ATTR_SW_NO_PROMOTE, false);
4689 }
4690 
4691 /*
4692  * Inserts the specified page table page into the specified pmap's collection
4693  * of idle page table pages.  Each of a pmap's page table pages is responsible
4694  * for mapping a distinct range of virtual addresses.  The pmap's collection is
4695  * ordered by this virtual address range.
4696  *
4697  * If "promoted" is false, then the page table page "mpte" must be zero filled;
4698  * "mpte"'s valid field will be set to 0.
4699  *
4700  * If "promoted" is true and "all_l3e_AF_set" is false, then "mpte" must
4701  * contain valid mappings with identical attributes except for ATTR_AF;
4702  * "mpte"'s valid field will be set to 1.
4703  *
4704  * If "promoted" and "all_l3e_AF_set" are both true, then "mpte" must contain
4705  * valid mappings with identical attributes including ATTR_AF; "mpte"'s valid
4706  * field will be set to VM_PAGE_BITS_ALL.
4707  */
4708 static __inline int
pmap_insert_pt_page(pmap_t pmap,vm_page_t mpte,bool promoted,bool all_l3e_AF_set)4709 pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted,
4710     bool all_l3e_AF_set)
4711 {
4712 
4713 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4714 	KASSERT(promoted || !all_l3e_AF_set,
4715 	    ("a zero-filled PTP can't have ATTR_AF set in every PTE"));
4716 	mpte->valid = promoted ? (all_l3e_AF_set ? VM_PAGE_BITS_ALL : 1) : 0;
4717 	return (vm_radix_insert(&pmap->pm_root, mpte));
4718 }
4719 
4720 /*
4721  * Removes the page table page mapping the specified virtual address from the
4722  * specified pmap's collection of idle page table pages, and returns it.
4723  * Otherwise, returns NULL if there is no page table page corresponding to the
4724  * specified virtual address.
4725  */
4726 static __inline vm_page_t
pmap_remove_pt_page(pmap_t pmap,vm_offset_t va)4727 pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
4728 {
4729 
4730 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4731 	return (vm_radix_remove(&pmap->pm_root, pmap_l2_pindex(va)));
4732 }
4733 
4734 /*
4735  * Performs a break-before-make update of a pmap entry. This is needed when
4736  * either promoting or demoting pages to ensure the TLB doesn't get into an
4737  * inconsistent state.
4738  */
4739 static void
pmap_update_entry(pmap_t pmap,pd_entry_t * ptep,pd_entry_t newpte,vm_offset_t va,vm_size_t size)4740 pmap_update_entry(pmap_t pmap, pd_entry_t *ptep, pd_entry_t newpte,
4741     vm_offset_t va, vm_size_t size)
4742 {
4743 	register_t intr;
4744 
4745 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4746 	KASSERT((newpte & ATTR_SW_NO_PROMOTE) == 0,
4747 	    ("%s: Updating non-promote pte", __func__));
4748 
4749 	/*
4750 	 * Ensure we don't get switched out with the page table in an
4751 	 * inconsistent state. We also need to ensure no interrupts fire
4752 	 * as they may make use of an address we are about to invalidate.
4753 	 */
4754 	intr = intr_disable();
4755 
4756 	/*
4757 	 * Clear the old mapping's valid bit, but leave the rest of the entry
4758 	 * unchanged, so that a lockless, concurrent pmap_kextract() can still
4759 	 * lookup the physical address.
4760 	 */
4761 	pmap_clear_bits(ptep, ATTR_DESCR_VALID);
4762 
4763 	/*
4764 	 * When promoting, the L{1,2}_TABLE entry that is being replaced might
4765 	 * be cached, so we invalidate intermediate entries as well as final
4766 	 * entries.
4767 	 */
4768 	pmap_s1_invalidate_range(pmap, va, va + size, false);
4769 
4770 	/* Create the new mapping */
4771 	pmap_store(ptep, newpte);
4772 	dsb(ishst);
4773 
4774 	intr_restore(intr);
4775 }
4776 
4777 /*
4778  * Performs a break-before-make update of an ATTR_CONTIGUOUS mapping.
4779  */
4780 static void __nosanitizecoverage
pmap_update_strided(pmap_t pmap,pd_entry_t * ptep,pd_entry_t * ptep_end,pd_entry_t newpte,vm_offset_t va,vm_offset_t stride,vm_size_t size)4781 pmap_update_strided(pmap_t pmap, pd_entry_t *ptep, pd_entry_t *ptep_end,
4782     pd_entry_t newpte, vm_offset_t va, vm_offset_t stride, vm_size_t size)
4783 {
4784 	pd_entry_t *lip;
4785 	register_t intr;
4786 
4787 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4788 	KASSERT((newpte & ATTR_SW_NO_PROMOTE) == 0,
4789 	    ("%s: Updating non-promote pte", __func__));
4790 
4791 	/*
4792 	 * Ensure we don't get switched out with the page table in an
4793 	 * inconsistent state. We also need to ensure no interrupts fire
4794 	 * as they may make use of an address we are about to invalidate.
4795 	 */
4796 	intr = intr_disable();
4797 
4798 	/*
4799 	 * Clear the old mapping's valid bits, but leave the rest of each
4800 	 * entry unchanged, so that a lockless, concurrent pmap_kextract() can
4801 	 * still lookup the physical address.
4802 	 */
4803 	for (lip = ptep; lip < ptep_end; lip++)
4804 		pmap_clear_bits(lip, ATTR_DESCR_VALID);
4805 
4806 	/* Only final entries are changing. */
4807 	pmap_s1_invalidate_strided(pmap, va, va + size, stride, true);
4808 
4809 	/* Create the new mapping. */
4810 	for (lip = ptep; lip < ptep_end; lip++) {
4811 		pmap_store(lip, newpte);
4812 		newpte += stride;
4813 	}
4814 	dsb(ishst);
4815 
4816 	intr_restore(intr);
4817 }
4818 
4819 #if VM_NRESERVLEVEL > 0
4820 /*
4821  * After promotion from 512 4KB page mappings to a single 2MB page mapping,
4822  * replace the many pv entries for the 4KB page mappings by a single pv entry
4823  * for the 2MB page mapping.
4824  */
4825 static void
pmap_pv_promote_l2(pmap_t pmap,vm_offset_t va,vm_paddr_t pa,struct rwlock ** lockp)4826 pmap_pv_promote_l2(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
4827     struct rwlock **lockp)
4828 {
4829 	struct md_page *pvh;
4830 	pv_entry_t pv;
4831 	vm_offset_t va_last;
4832 	vm_page_t m;
4833 
4834 	KASSERT((pa & L2_OFFSET) == 0,
4835 	    ("pmap_pv_promote_l2: pa is not 2mpage aligned"));
4836 	CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
4837 
4838 	/*
4839 	 * Transfer the first page's pv entry for this mapping to the 2mpage's
4840 	 * pv list.  Aside from avoiding the cost of a call to get_pv_entry(),
4841 	 * a transfer avoids the possibility that get_pv_entry() calls
4842 	 * reclaim_pv_chunk() and that reclaim_pv_chunk() removes one of the
4843 	 * mappings that is being promoted.
4844 	 */
4845 	m = PHYS_TO_VM_PAGE(pa);
4846 	va = va & ~L2_OFFSET;
4847 	pv = pmap_pvh_remove(&m->md, pmap, va);
4848 	KASSERT(pv != NULL, ("pmap_pv_promote_l2: pv not found"));
4849 	pvh = page_to_pvh(m);
4850 	TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
4851 	pvh->pv_gen++;
4852 	/* Free the remaining NPTEPG - 1 pv entries. */
4853 	va_last = va + L2_SIZE - PAGE_SIZE;
4854 	do {
4855 		m++;
4856 		va += PAGE_SIZE;
4857 		pmap_pvh_free(&m->md, pmap, va);
4858 	} while (va < va_last);
4859 }
4860 
4861 /*
4862  * Tries to promote the 512, contiguous 4KB page mappings that are within a
4863  * single level 2 table entry to a single 2MB page mapping.  For promotion
4864  * to occur, two conditions must be met: (1) the 4KB page mappings must map
4865  * aligned, contiguous physical memory and (2) the 4KB page mappings must have
4866  * identical characteristics.
4867  */
4868 static bool
pmap_promote_l2(pmap_t pmap,pd_entry_t * l2,vm_offset_t va,vm_page_t mpte,struct rwlock ** lockp)4869 pmap_promote_l2(pmap_t pmap, pd_entry_t *l2, vm_offset_t va, vm_page_t mpte,
4870     struct rwlock **lockp)
4871 {
4872 	pt_entry_t all_l3e_AF, *firstl3, *l3, newl2, oldl3, pa;
4873 
4874 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4875 
4876 	/*
4877 	 * Currently, this function only supports promotion on stage 1 pmaps
4878 	 * because it tests stage 1 specific fields and performs a break-
4879 	 * before-make sequence that is incorrect for stage 2 pmaps.
4880 	 */
4881 	if (pmap->pm_stage != PM_STAGE1 || !pmap_ps_enabled(pmap))
4882 		return (false);
4883 
4884 	/*
4885 	 * Examine the first L3E in the specified PTP.  Abort if this L3E is
4886 	 * ineligible for promotion...
4887 	 */
4888 	firstl3 = (pt_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(pmap_load(l2)));
4889 	newl2 = pmap_load(firstl3);
4890 	if ((newl2 & ATTR_SW_NO_PROMOTE) != 0)
4891 		return (false);
4892 	/* ... is not the first physical page within an L2 block */
4893 	if ((PTE_TO_PHYS(newl2) & L2_OFFSET) != 0 ||
4894 	    ((newl2 & ATTR_DESCR_MASK) != L3_PAGE)) { /* ... or is invalid */
4895 		counter_u64_add(pmap_l2_p_failures, 1);
4896 		CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx"
4897 		    " in pmap %p", va, pmap);
4898 		return (false);
4899 	}
4900 
4901 	/*
4902 	 * Both here and in the below "for" loop, to allow for repromotion
4903 	 * after MADV_FREE, conditionally write protect a clean L3E before
4904 	 * possibly aborting the promotion due to other L3E attributes.  Why?
4905 	 * Suppose that MADV_FREE is applied to a part of a superpage, the
4906 	 * address range [S, E).  pmap_advise() will demote the superpage
4907 	 * mapping, destroy the 4KB page mapping at the end of [S, E), and
4908 	 * set AP_RO and clear AF in the L3Es for the rest of [S, E).  Later,
4909 	 * imagine that the memory in [S, E) is recycled, but the last 4KB
4910 	 * page in [S, E) is not the last to be rewritten, or simply accessed.
4911 	 * In other words, there is still a 4KB page in [S, E), call it P,
4912 	 * that is writeable but AP_RO is set and AF is clear in P's L3E.
4913 	 * Unless we write protect P before aborting the promotion, if and
4914 	 * when P is finally rewritten, there won't be a page fault to trigger
4915 	 * repromotion.
4916 	 */
4917 setl2:
4918 	if ((newl2 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) ==
4919 	    (ATTR_S1_AP(ATTR_S1_AP_RO) | ATTR_SW_DBM)) {
4920 		/*
4921 		 * When the mapping is clean, i.e., ATTR_S1_AP_RO is set,
4922 		 * ATTR_SW_DBM can be cleared without a TLB invalidation.
4923 		 */
4924 		if (!atomic_fcmpset_64(firstl3, &newl2, newl2 & ~ATTR_SW_DBM))
4925 			goto setl2;
4926 		newl2 &= ~ATTR_SW_DBM;
4927 		CTR2(KTR_PMAP, "pmap_promote_l2: protect for va %#lx"
4928 		    " in pmap %p", va & ~L2_OFFSET, pmap);
4929 	}
4930 
4931 	/*
4932 	 * Examine each of the other L3Es in the specified PTP.  Abort if this
4933 	 * L3E maps an unexpected 4KB physical page or does not have identical
4934 	 * characteristics to the first L3E.  If ATTR_AF is not set in every
4935 	 * PTE, then request that the PTP be refilled on demotion.
4936 	 */
4937 	all_l3e_AF = newl2 & ATTR_AF;
4938 	pa = (PTE_TO_PHYS(newl2) | (newl2 & ATTR_DESCR_MASK))
4939 	    + L2_SIZE - PAGE_SIZE;
4940 	for (l3 = firstl3 + NL3PG - 1; l3 > firstl3; l3--) {
4941 		oldl3 = pmap_load(l3);
4942 		if ((PTE_TO_PHYS(oldl3) | (oldl3 & ATTR_DESCR_MASK)) != pa) {
4943 			counter_u64_add(pmap_l2_p_failures, 1);
4944 			CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx"
4945 			    " in pmap %p", va, pmap);
4946 			return (false);
4947 		}
4948 setl3:
4949 		if ((oldl3 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) ==
4950 		    (ATTR_S1_AP(ATTR_S1_AP_RO) | ATTR_SW_DBM)) {
4951 			/*
4952 			 * When the mapping is clean, i.e., ATTR_S1_AP_RO is
4953 			 * set, ATTR_SW_DBM can be cleared without a TLB
4954 			 * invalidation.
4955 			 */
4956 			if (!atomic_fcmpset_64(l3, &oldl3, oldl3 &
4957 			    ~ATTR_SW_DBM))
4958 				goto setl3;
4959 			oldl3 &= ~ATTR_SW_DBM;
4960 		}
4961 		if ((oldl3 & ATTR_PROMOTE) != (newl2 & ATTR_PROMOTE)) {
4962 			counter_u64_add(pmap_l2_p_failures, 1);
4963 			CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx"
4964 			    " in pmap %p", va, pmap);
4965 			return (false);
4966 		}
4967 		all_l3e_AF &= oldl3;
4968 		pa -= PAGE_SIZE;
4969 	}
4970 
4971 	/*
4972 	 * Unless all PTEs have ATTR_AF set, clear it from the superpage
4973 	 * mapping, so that promotions triggered by speculative mappings,
4974 	 * such as pmap_enter_quick(), don't automatically mark the
4975 	 * underlying pages as referenced.
4976 	 */
4977 	newl2 &= ~(ATTR_CONTIGUOUS | ATTR_AF | ATTR_DESCR_MASK) | all_l3e_AF;
4978 
4979 	/*
4980 	 * Save the page table page in its current state until the L2
4981 	 * mapping the superpage is demoted by pmap_demote_l2() or
4982 	 * destroyed by pmap_remove_l3().
4983 	 */
4984 	if (mpte == NULL)
4985 		mpte = PTE_TO_VM_PAGE(pmap_load(l2));
4986 	KASSERT(mpte >= vm_page_array &&
4987 	    mpte < &vm_page_array[vm_page_array_size],
4988 	    ("pmap_promote_l2: page table page is out of range"));
4989 	KASSERT(mpte->pindex == pmap_l2_pindex(va),
4990 	    ("pmap_promote_l2: page table page's pindex is wrong"));
4991 	if (pmap_insert_pt_page(pmap, mpte, true, all_l3e_AF != 0)) {
4992 		counter_u64_add(pmap_l2_p_failures, 1);
4993 		CTR2(KTR_PMAP,
4994 		    "pmap_promote_l2: failure for va %#lx in pmap %p", va,
4995 		    pmap);
4996 		return (false);
4997 	}
4998 
4999 	if ((newl2 & ATTR_SW_MANAGED) != 0)
5000 		pmap_pv_promote_l2(pmap, va, PTE_TO_PHYS(newl2), lockp);
5001 
5002 	pmap_update_entry(pmap, l2, newl2 | L2_BLOCK, va & ~L2_OFFSET, L2_SIZE);
5003 
5004 	counter_u64_add(pmap_l2_promotions, 1);
5005 	CTR2(KTR_PMAP, "pmap_promote_l2: success for va %#lx in pmap %p", va,
5006 	    pmap);
5007 	return (true);
5008 }
5009 
5010 /*
5011  * Tries to promote an aligned, contiguous set of base page mappings to a
5012  * single L3C page mapping.  For promotion to occur, two conditions must be
5013  * met: (1) the base page mappings must map aligned, contiguous physical
5014  * memory and (2) the base page mappings must have identical characteristics
5015  * except for the accessed flag.
5016  */
5017 static bool
pmap_promote_l3c(pmap_t pmap,pd_entry_t * l3p,vm_offset_t va)5018 pmap_promote_l3c(pmap_t pmap, pd_entry_t *l3p, vm_offset_t va)
5019 {
5020 	pd_entry_t all_l3e_AF, firstl3c, *l3, oldl3, pa;
5021 
5022 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5023 
5024 	/*
5025 	 * Currently, this function only supports promotion on stage 1 pmaps
5026 	 * because it tests stage 1 specific fields and performs a break-
5027 	 * before-make sequence that is incorrect for stage 2 pmaps.
5028 	 */
5029 	if (pmap->pm_stage != PM_STAGE1 || !pmap_ps_enabled(pmap))
5030 		return (false);
5031 
5032 	/*
5033 	 * Compute the address of the first L3 entry in the superpage
5034 	 * candidate.
5035 	 */
5036 	l3p = (pt_entry_t *)((uintptr_t)l3p & ~((L3C_ENTRIES *
5037 	    sizeof(pt_entry_t)) - 1));
5038 
5039 	firstl3c = pmap_load(l3p);
5040 
5041 	/*
5042 	 * Examine the first L3 entry. Abort if this L3E is ineligible for
5043 	 * promotion...
5044 	 */
5045 	if ((firstl3c & ATTR_SW_NO_PROMOTE) != 0)
5046 		return (false);
5047 	/* ...is not properly aligned... */
5048 	if ((PTE_TO_PHYS(firstl3c) & L3C_OFFSET) != 0 ||
5049 	    (firstl3c & ATTR_DESCR_MASK) != L3_PAGE) { /* ...or is invalid. */
5050 		counter_u64_add(pmap_l3c_p_failures, 1);
5051 		CTR2(KTR_PMAP, "pmap_promote_l3c: failure for va %#lx"
5052 		    " in pmap %p", va, pmap);
5053 		return (false);
5054 	}
5055 
5056 	/*
5057 	 * If the first L3 entry is a clean read-write mapping, convert it
5058 	 * to a read-only mapping.  See pmap_promote_l2() for the rationale.
5059 	 */
5060 set_first:
5061 	if ((firstl3c & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) ==
5062 	    (ATTR_S1_AP(ATTR_S1_AP_RO) | ATTR_SW_DBM)) {
5063 		/*
5064 		 * When the mapping is clean, i.e., ATTR_S1_AP_RO is set,
5065 		 * ATTR_SW_DBM can be cleared without a TLB invalidation.
5066 		 */
5067 		if (!atomic_fcmpset_64(l3p, &firstl3c, firstl3c & ~ATTR_SW_DBM))
5068 			goto set_first;
5069 		firstl3c &= ~ATTR_SW_DBM;
5070 		CTR2(KTR_PMAP, "pmap_promote_l3c: protect for va %#lx"
5071 		    " in pmap %p", va & ~L3C_OFFSET, pmap);
5072 	}
5073 
5074 	/*
5075 	 * Check that the rest of the L3 entries are compatible with the first,
5076 	 * and convert clean read-write mappings to read-only mappings.
5077 	 */
5078 	all_l3e_AF = firstl3c & ATTR_AF;
5079 	pa = (PTE_TO_PHYS(firstl3c) | (firstl3c & ATTR_DESCR_MASK)) +
5080 	    L3C_SIZE - PAGE_SIZE;
5081 	for (l3 = l3p + L3C_ENTRIES - 1; l3 > l3p; l3--) {
5082 		oldl3 = pmap_load(l3);
5083 		if ((PTE_TO_PHYS(oldl3) | (oldl3 & ATTR_DESCR_MASK)) != pa) {
5084 			counter_u64_add(pmap_l3c_p_failures, 1);
5085 			CTR2(KTR_PMAP, "pmap_promote_l3c: failure for va %#lx"
5086 			    " in pmap %p", va, pmap);
5087 			return (false);
5088 		}
5089 set_l3:
5090 		if ((oldl3 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) ==
5091 		    (ATTR_S1_AP(ATTR_S1_AP_RO) | ATTR_SW_DBM)) {
5092 			/*
5093 			 * When the mapping is clean, i.e., ATTR_S1_AP_RO is
5094 			 * set, ATTR_SW_DBM can be cleared without a TLB
5095 			 * invalidation.
5096 			 */
5097 			if (!atomic_fcmpset_64(l3, &oldl3, oldl3 &
5098 			    ~ATTR_SW_DBM))
5099 				goto set_l3;
5100 			oldl3 &= ~ATTR_SW_DBM;
5101 			CTR2(KTR_PMAP, "pmap_promote_l3c: protect for va %#lx"
5102 			    " in pmap %p", (oldl3 & ~ATTR_MASK & L3C_OFFSET) |
5103 			    (va & ~L3C_OFFSET), pmap);
5104 		}
5105 		if ((oldl3 & ATTR_PROMOTE) != (firstl3c & ATTR_PROMOTE)) {
5106 			counter_u64_add(pmap_l3c_p_failures, 1);
5107 			CTR2(KTR_PMAP, "pmap_promote_l3c: failure for va %#lx"
5108 			    " in pmap %p", va, pmap);
5109 			return (false);
5110 		}
5111 		all_l3e_AF &= oldl3;
5112 		pa -= PAGE_SIZE;
5113 	}
5114 
5115 	/*
5116 	 * Unless all PTEs have ATTR_AF set, clear it from the superpage
5117 	 * mapping, so that promotions triggered by speculative mappings,
5118 	 * such as pmap_enter_quick(), don't automatically mark the
5119 	 * underlying pages as referenced.
5120 	 */
5121 	firstl3c &= ~ATTR_AF | all_l3e_AF;
5122 
5123 	/*
5124 	 * Remake the mappings with the contiguous bit set.
5125 	 */
5126 	pmap_update_strided(pmap, l3p, l3p + L3C_ENTRIES, firstl3c |
5127 	    ATTR_CONTIGUOUS, va & ~L3C_OFFSET, L3_SIZE, L3C_SIZE);
5128 
5129 	counter_u64_add(pmap_l3c_promotions, 1);
5130 	CTR2(KTR_PMAP, "pmap_promote_l3c: success for va %#lx in pmap %p", va,
5131 	    pmap);
5132 	return (true);
5133 }
5134 #endif /* VM_NRESERVLEVEL > 0 */
5135 
5136 static int
pmap_enter_largepage(pmap_t pmap,vm_offset_t va,pt_entry_t pte,int flags,int psind)5137 pmap_enter_largepage(pmap_t pmap, vm_offset_t va, pt_entry_t pte, int flags,
5138     int psind)
5139 {
5140 	pd_entry_t *l0p, *l1p, *l2p, *l3p, newpte, origpte, *tl3p;
5141 	vm_page_t mp;
5142 
5143 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5144 	KASSERT(psind > 0 && psind < MAXPAGESIZES,
5145 	    ("psind %d unexpected", psind));
5146 	KASSERT((PTE_TO_PHYS(pte) & (pagesizes[psind] - 1)) == 0,
5147 	    ("unaligned phys address %#lx pte %#lx psind %d",
5148 	    PTE_TO_PHYS(pte), pte, psind));
5149 
5150 restart:
5151 	newpte = pte;
5152 	if (!pmap_bti_same(pmap, va, va + pagesizes[psind], &newpte))
5153 		return (KERN_PROTECTION_FAILURE);
5154 	if (psind == 3) {
5155 		PMAP_ASSERT_L1_BLOCKS_SUPPORTED;
5156 
5157 		KASSERT(pagesizes[psind] == L1_SIZE,
5158 		    ("pagesizes[%d] != L1_SIZE", psind));
5159 		l0p = pmap_l0(pmap, va);
5160 		if ((pmap_load(l0p) & ATTR_DESCR_VALID) == 0) {
5161 			mp = _pmap_alloc_l3(pmap, pmap_l0_pindex(va), NULL);
5162 			if (mp == NULL) {
5163 				if ((flags & PMAP_ENTER_NOSLEEP) != 0)
5164 					return (KERN_RESOURCE_SHORTAGE);
5165 				PMAP_UNLOCK(pmap);
5166 				vm_wait(NULL);
5167 				PMAP_LOCK(pmap);
5168 				goto restart;
5169 			}
5170 			l1p = pmap_l0_to_l1(l0p, va);
5171 			KASSERT(l1p != NULL, ("va %#lx lost l1 entry", va));
5172 			origpte = pmap_load(l1p);
5173 		} else {
5174 			l1p = pmap_l0_to_l1(l0p, va);
5175 			KASSERT(l1p != NULL, ("va %#lx lost l1 entry", va));
5176 			origpte = pmap_load(l1p);
5177 			if ((origpte & ATTR_DESCR_VALID) == 0) {
5178 				mp = PTE_TO_VM_PAGE(pmap_load(l0p));
5179 				mp->ref_count++;
5180 			}
5181 		}
5182 		KASSERT((PTE_TO_PHYS(origpte) == PTE_TO_PHYS(newpte) &&
5183 		    (origpte & ATTR_DESCR_MASK) == L1_BLOCK) ||
5184 		    (origpte & ATTR_DESCR_VALID) == 0,
5185 		    ("va %#lx changing 1G phys page l1 %#lx newpte %#lx",
5186 		    va, origpte, newpte));
5187 		pmap_store(l1p, newpte);
5188 	} else if (psind == 2) {
5189 		KASSERT(pagesizes[psind] == L2_SIZE,
5190 		    ("pagesizes[%d] != L2_SIZE", psind));
5191 		l2p = pmap_l2(pmap, va);
5192 		if (l2p == NULL) {
5193 			mp = _pmap_alloc_l3(pmap, pmap_l1_pindex(va), NULL);
5194 			if (mp == NULL) {
5195 				if ((flags & PMAP_ENTER_NOSLEEP) != 0)
5196 					return (KERN_RESOURCE_SHORTAGE);
5197 				PMAP_UNLOCK(pmap);
5198 				vm_wait(NULL);
5199 				PMAP_LOCK(pmap);
5200 				goto restart;
5201 			}
5202 			l2p = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mp));
5203 			l2p = &l2p[pmap_l2_index(va)];
5204 			origpte = pmap_load(l2p);
5205 		} else {
5206 			l1p = pmap_l1(pmap, va);
5207 			origpte = pmap_load(l2p);
5208 			if ((origpte & ATTR_DESCR_VALID) == 0) {
5209 				mp = PTE_TO_VM_PAGE(pmap_load(l1p));
5210 				mp->ref_count++;
5211 			}
5212 		}
5213 		KASSERT((origpte & ATTR_DESCR_VALID) == 0 ||
5214 		    ((origpte & ATTR_DESCR_MASK) == L2_BLOCK &&
5215 		    PTE_TO_PHYS(origpte) == PTE_TO_PHYS(newpte)),
5216 		    ("va %#lx changing 2M phys page l2 %#lx newpte %#lx",
5217 		    va, origpte, newpte));
5218 		pmap_store(l2p, newpte);
5219 	} else /* (psind == 1) */ {
5220 		KASSERT(pagesizes[psind] == L3C_SIZE,
5221 		    ("pagesizes[%d] != L3C_SIZE", psind));
5222 		l2p = pmap_l2(pmap, va);
5223 		if (l2p == NULL || (pmap_load(l2p) & ATTR_DESCR_VALID) == 0) {
5224 			mp = _pmap_alloc_l3(pmap, pmap_l2_pindex(va), NULL);
5225 			if (mp == NULL) {
5226 				if ((flags & PMAP_ENTER_NOSLEEP) != 0)
5227 					return (KERN_RESOURCE_SHORTAGE);
5228 				PMAP_UNLOCK(pmap);
5229 				vm_wait(NULL);
5230 				PMAP_LOCK(pmap);
5231 				goto restart;
5232 			}
5233 			mp->ref_count += L3C_ENTRIES - 1;
5234 			l3p = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mp));
5235 			l3p = &l3p[pmap_l3_index(va)];
5236 		} else {
5237 			l3p = pmap_l2_to_l3(l2p, va);
5238 			if ((pmap_load(l3p) & ATTR_DESCR_VALID) == 0) {
5239 				mp = PTE_TO_VM_PAGE(pmap_load(l2p));
5240 				mp->ref_count += L3C_ENTRIES;
5241 			}
5242 		}
5243 		for (tl3p = l3p; tl3p < &l3p[L3C_ENTRIES]; tl3p++) {
5244 			origpte = pmap_load(tl3p);
5245 			KASSERT((origpte & ATTR_DESCR_VALID) == 0 ||
5246 			    ((origpte & ATTR_CONTIGUOUS) != 0 &&
5247 			    PTE_TO_PHYS(origpte) == PTE_TO_PHYS(newpte)),
5248 			    ("va %#lx changing 64K phys page l3 %#lx newpte %#lx",
5249 			    va, origpte, newpte));
5250 			pmap_store(tl3p, newpte);
5251 			newpte += L3_SIZE;
5252 		}
5253 	}
5254 	dsb(ishst);
5255 
5256 	if ((origpte & ATTR_DESCR_VALID) == 0)
5257 		pmap_resident_count_inc(pmap, pagesizes[psind] / PAGE_SIZE);
5258 	if ((newpte & ATTR_SW_WIRED) != 0 && (origpte & ATTR_SW_WIRED) == 0)
5259 		pmap->pm_stats.wired_count += pagesizes[psind] / PAGE_SIZE;
5260 	else if ((newpte & ATTR_SW_WIRED) == 0 &&
5261 	    (origpte & ATTR_SW_WIRED) != 0)
5262 		pmap->pm_stats.wired_count -= pagesizes[psind] / PAGE_SIZE;
5263 
5264 	return (KERN_SUCCESS);
5265 }
5266 
5267 /*
5268  *	Insert the given physical page (p) at
5269  *	the specified virtual address (v) in the
5270  *	target physical map with the protection requested.
5271  *
5272  *	If specified, the page will be wired down, meaning
5273  *	that the related pte can not be reclaimed.
5274  *
5275  *	NB:  This is the only routine which MAY NOT lazy-evaluate
5276  *	or lose information.  That is, this routine must actually
5277  *	insert this page into the given map NOW.
5278  */
5279 int
pmap_enter(pmap_t pmap,vm_offset_t va,vm_page_t m,vm_prot_t prot,u_int flags,int8_t psind)5280 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
5281     u_int flags, int8_t psind)
5282 {
5283 	struct rwlock *lock;
5284 	pd_entry_t *pde;
5285 	pt_entry_t new_l3, orig_l3;
5286 	pt_entry_t *l2, *l3;
5287 	pv_entry_t pv;
5288 	vm_paddr_t opa, pa;
5289 	vm_page_t mpte, om;
5290 	bool nosleep;
5291 	int full_lvl, lvl, rv;
5292 
5293 	KASSERT(ADDR_IS_CANONICAL(va),
5294 	    ("%s: Address not in canonical form: %lx", __func__, va));
5295 
5296 	va = trunc_page(va);
5297 	if ((m->oflags & VPO_UNMANAGED) == 0)
5298 		VM_PAGE_OBJECT_BUSY_ASSERT(m);
5299 	pa = VM_PAGE_TO_PHYS(m);
5300 	new_l3 = (pt_entry_t)(PHYS_TO_PTE(pa) | ATTR_AF | pmap_sh_attr |
5301 	    L3_PAGE);
5302 	new_l3 |= pmap_pte_memattr(pmap, m->md.pv_memattr);
5303 	new_l3 |= pmap_pte_prot(pmap, prot);
5304 	if ((flags & PMAP_ENTER_WIRED) != 0)
5305 		new_l3 |= ATTR_SW_WIRED;
5306 	if (pmap->pm_stage == PM_STAGE1) {
5307 		if (!ADDR_IS_KERNEL(va))
5308 			new_l3 |= ATTR_S1_AP(ATTR_S1_AP_USER) | ATTR_S1_PXN;
5309 		else
5310 			new_l3 |= ATTR_S1_UXN;
5311 		if (pmap != kernel_pmap)
5312 			new_l3 |= ATTR_S1_nG;
5313 	} else {
5314 		/*
5315 		 * Clear the access flag on executable mappings, this will be
5316 		 * set later when the page is accessed. The fault handler is
5317 		 * required to invalidate the I-cache.
5318 		 *
5319 		 * TODO: Switch to the valid flag to allow hardware management
5320 		 * of the access flag. Much of the pmap code assumes the
5321 		 * valid flag is set and fails to destroy the old page tables
5322 		 * correctly if it is clear.
5323 		 */
5324 		if (prot & VM_PROT_EXECUTE)
5325 			new_l3 &= ~ATTR_AF;
5326 	}
5327 	if ((m->oflags & VPO_UNMANAGED) == 0) {
5328 		new_l3 |= ATTR_SW_MANAGED;
5329 		if ((prot & VM_PROT_WRITE) != 0) {
5330 			new_l3 |= ATTR_SW_DBM;
5331 			if ((flags & VM_PROT_WRITE) == 0) {
5332 				if (pmap->pm_stage == PM_STAGE1)
5333 					new_l3 |= ATTR_S1_AP(ATTR_S1_AP_RO);
5334 				else
5335 					new_l3 &=
5336 					    ~ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE);
5337 			}
5338 		}
5339 	}
5340 
5341 	CTR2(KTR_PMAP, "pmap_enter: %.16lx -> %.16lx", va, pa);
5342 
5343 	lock = NULL;
5344 	PMAP_LOCK(pmap);
5345 	if ((flags & PMAP_ENTER_LARGEPAGE) != 0) {
5346 		KASSERT((m->oflags & VPO_UNMANAGED) != 0,
5347 		    ("managed largepage va %#lx flags %#x", va, flags));
5348 		if (psind == 3) {
5349 			PMAP_ASSERT_L1_BLOCKS_SUPPORTED;
5350 			new_l3 &= ~L3_PAGE;
5351 			new_l3 |= L1_BLOCK;
5352 		} else if (psind == 2) {
5353 			new_l3 &= ~L3_PAGE;
5354 			new_l3 |= L2_BLOCK;
5355 		} else /* (psind == 1) */
5356 			new_l3 |= ATTR_CONTIGUOUS;
5357 		rv = pmap_enter_largepage(pmap, va, new_l3, flags, psind);
5358 		goto out;
5359 	}
5360 	if (psind == 2) {
5361 		/* Assert the required virtual and physical alignment. */
5362 		KASSERT((va & L2_OFFSET) == 0, ("pmap_enter: va unaligned"));
5363 		KASSERT(m->psind > 1, ("pmap_enter: m->psind < psind"));
5364 		rv = pmap_enter_l2(pmap, va, (new_l3 & ~L3_PAGE) | L2_BLOCK,
5365 		    flags, m, &lock);
5366 		goto out;
5367 	}
5368 	mpte = NULL;
5369 	if (psind == 1) {
5370 		KASSERT((va & L3C_OFFSET) == 0, ("pmap_enter: va unaligned"));
5371 		KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind"));
5372 		rv = pmap_enter_l3c(pmap, va, new_l3 | ATTR_CONTIGUOUS, flags,
5373 		    m, &mpte, &lock);
5374 #if VM_NRESERVLEVEL > 0
5375 		/*
5376 		 * Attempt L2 promotion, if both the PTP and a level 1
5377 		 * reservation are fully populated.
5378 		 */
5379 		if (rv == KERN_SUCCESS &&
5380 		    (mpte == NULL || mpte->ref_count == NL3PG) &&
5381 		    (m->flags & PG_FICTITIOUS) == 0 &&
5382 		    vm_reserv_level_iffullpop(m) == 1) {
5383 			pde = pmap_l2(pmap, va);
5384 			(void)pmap_promote_l2(pmap, pde, va, mpte, &lock);
5385 		}
5386 #endif
5387 		goto out;
5388 	}
5389 
5390 	/*
5391 	 * In the case that a page table page is not
5392 	 * resident, we are creating it here.
5393 	 */
5394 retry:
5395 	pde = pmap_pde(pmap, va, &lvl);
5396 	if (pde != NULL && lvl == 2) {
5397 		l3 = pmap_l2_to_l3(pde, va);
5398 		if (!ADDR_IS_KERNEL(va) && mpte == NULL) {
5399 			mpte = PTE_TO_VM_PAGE(pmap_load(pde));
5400 			mpte->ref_count++;
5401 		}
5402 		goto havel3;
5403 	} else if (pde != NULL && lvl == 1) {
5404 		l2 = pmap_l1_to_l2(pde, va);
5405 		if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK &&
5406 		    (l3 = pmap_demote_l2_locked(pmap, l2, va, &lock)) != NULL) {
5407 			l3 = &l3[pmap_l3_index(va)];
5408 			if (!ADDR_IS_KERNEL(va)) {
5409 				mpte = PTE_TO_VM_PAGE(pmap_load(l2));
5410 				mpte->ref_count++;
5411 			}
5412 			goto havel3;
5413 		}
5414 		/* We need to allocate an L3 table. */
5415 	}
5416 	if (!ADDR_IS_KERNEL(va)) {
5417 		nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
5418 
5419 		/*
5420 		 * We use _pmap_alloc_l3() instead of pmap_alloc_l3() in order
5421 		 * to handle the possibility that a superpage mapping for "va"
5422 		 * was created while we slept.
5423 		 */
5424 		mpte = _pmap_alloc_l3(pmap, pmap_l2_pindex(va),
5425 		    nosleep ? NULL : &lock);
5426 		if (mpte == NULL && nosleep) {
5427 			CTR0(KTR_PMAP, "pmap_enter: mpte == NULL");
5428 			rv = KERN_RESOURCE_SHORTAGE;
5429 			goto out;
5430 		}
5431 		goto retry;
5432 	} else
5433 		panic("pmap_enter: missing L3 table for kernel va %#lx", va);
5434 
5435 havel3:
5436 	orig_l3 = pmap_load(l3);
5437 	opa = PTE_TO_PHYS(orig_l3);
5438 	pv = NULL;
5439 	new_l3 |= pmap_pte_bti(pmap, va);
5440 
5441 	/*
5442 	 * Is the specified virtual address already mapped?
5443 	 */
5444 	if (pmap_l3_valid(orig_l3)) {
5445 		/*
5446 		 * Wiring change, just update stats. We don't worry about
5447 		 * wiring PT pages as they remain resident as long as there
5448 		 * are valid mappings in them. Hence, if a user page is wired,
5449 		 * the PT page will be also.
5450 		 */
5451 		if ((flags & PMAP_ENTER_WIRED) != 0 &&
5452 		    (orig_l3 & ATTR_SW_WIRED) == 0)
5453 			pmap->pm_stats.wired_count++;
5454 		else if ((flags & PMAP_ENTER_WIRED) == 0 &&
5455 		    (orig_l3 & ATTR_SW_WIRED) != 0)
5456 			pmap->pm_stats.wired_count--;
5457 
5458 		/*
5459 		 * Remove the extra PT page reference.
5460 		 */
5461 		if (mpte != NULL) {
5462 			mpte->ref_count--;
5463 			KASSERT(mpte->ref_count > 0,
5464 			    ("pmap_enter: missing reference to page table page,"
5465 			     " va: 0x%lx", va));
5466 		}
5467 
5468 		/*
5469 		 * Has the physical page changed?
5470 		 */
5471 		if (opa == pa) {
5472 			/*
5473 			 * No, might be a protection or wiring change.
5474 			 */
5475 			if ((orig_l3 & ATTR_SW_MANAGED) != 0 &&
5476 			    (new_l3 & ATTR_SW_DBM) != 0)
5477 				vm_page_aflag_set(m, PGA_WRITEABLE);
5478 			goto validate;
5479 		}
5480 
5481 		/*
5482 		 * The physical page has changed.  Temporarily invalidate
5483 		 * the mapping.
5484 		 */
5485 		if ((orig_l3 & ATTR_CONTIGUOUS) != 0)
5486 			(void)pmap_demote_l3c(pmap, l3, va);
5487 		orig_l3 = pmap_load_clear(l3);
5488 		KASSERT(PTE_TO_PHYS(orig_l3) == opa,
5489 		    ("pmap_enter: unexpected pa update for %#lx", va));
5490 		if ((orig_l3 & ATTR_SW_MANAGED) != 0) {
5491 			om = PHYS_TO_VM_PAGE(opa);
5492 
5493 			/*
5494 			 * The pmap lock is sufficient to synchronize with
5495 			 * concurrent calls to pmap_page_test_mappings() and
5496 			 * pmap_ts_referenced().
5497 			 */
5498 			if (pmap_pte_dirty(pmap, orig_l3))
5499 				vm_page_dirty(om);
5500 			if ((orig_l3 & ATTR_AF) != 0) {
5501 				pmap_invalidate_page(pmap, va, true);
5502 				vm_page_aflag_set(om, PGA_REFERENCED);
5503 			}
5504 			CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, om);
5505 			pv = pmap_pvh_remove(&om->md, pmap, va);
5506 			if ((m->oflags & VPO_UNMANAGED) != 0)
5507 				free_pv_entry(pmap, pv);
5508 			if ((om->a.flags & PGA_WRITEABLE) != 0 &&
5509 			    TAILQ_EMPTY(&om->md.pv_list) &&
5510 			    ((om->flags & PG_FICTITIOUS) != 0 ||
5511 			    TAILQ_EMPTY(&page_to_pvh(om)->pv_list)))
5512 				vm_page_aflag_clear(om, PGA_WRITEABLE);
5513 		} else {
5514 			KASSERT((orig_l3 & ATTR_AF) != 0,
5515 			    ("pmap_enter: unmanaged mapping lacks ATTR_AF"));
5516 			pmap_invalidate_page(pmap, va, true);
5517 		}
5518 		orig_l3 = 0;
5519 	} else {
5520 		/*
5521 		 * Increment the counters.
5522 		 */
5523 		if ((new_l3 & ATTR_SW_WIRED) != 0)
5524 			pmap->pm_stats.wired_count++;
5525 		pmap_resident_count_inc(pmap, 1);
5526 	}
5527 	/*
5528 	 * Enter on the PV list if part of our managed memory.
5529 	 */
5530 	if ((m->oflags & VPO_UNMANAGED) == 0) {
5531 		if (pv == NULL) {
5532 			pv = get_pv_entry(pmap, &lock);
5533 			pv->pv_va = va;
5534 		}
5535 		CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m);
5536 		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
5537 		m->md.pv_gen++;
5538 		if ((new_l3 & ATTR_SW_DBM) != 0)
5539 			vm_page_aflag_set(m, PGA_WRITEABLE);
5540 	}
5541 
5542 validate:
5543 	if (pmap->pm_stage == PM_STAGE1) {
5544 		/*
5545 		 * Sync icache if exec permission and attribute
5546 		 * VM_MEMATTR_WRITE_BACK is set. Do it now, before the mapping
5547 		 * is stored and made valid for hardware table walk. If done
5548 		 * later, then other can access this page before caches are
5549 		 * properly synced. Don't do it for kernel memory which is
5550 		 * mapped with exec permission even if the memory isn't going
5551 		 * to hold executable code. The only time when icache sync is
5552 		 * needed is after kernel module is loaded and the relocation
5553 		 * info is processed. And it's done in elf_cpu_load_file().
5554 		*/
5555 		if ((prot & VM_PROT_EXECUTE) &&  pmap != kernel_pmap &&
5556 		    m->md.pv_memattr == VM_MEMATTR_WRITE_BACK &&
5557 		    (opa != pa || (orig_l3 & ATTR_S1_XN))) {
5558 			PMAP_ASSERT_STAGE1(pmap);
5559 			cpu_icache_sync_range((void *)PHYS_TO_DMAP(pa),
5560 			    PAGE_SIZE);
5561 		}
5562 	} else {
5563 		cpu_dcache_wb_range((void *)PHYS_TO_DMAP(pa), PAGE_SIZE);
5564 	}
5565 
5566 	/*
5567 	 * Update the L3 entry
5568 	 */
5569 	if (pmap_l3_valid(orig_l3)) {
5570 		KASSERT(opa == pa, ("pmap_enter: invalid update"));
5571 		if ((orig_l3 & ~ATTR_AF) != (new_l3 & ~ATTR_AF)) {
5572 			/* same PA, different attributes */
5573 			if ((orig_l3 & ATTR_CONTIGUOUS) != 0)
5574 				(void)pmap_demote_l3c(pmap, l3, va);
5575 			orig_l3 = pmap_load_store(l3, new_l3);
5576 			pmap_invalidate_page(pmap, va, true);
5577 			if ((orig_l3 & ATTR_SW_MANAGED) != 0 &&
5578 			    pmap_pte_dirty(pmap, orig_l3))
5579 				vm_page_dirty(m);
5580 		} else {
5581 			/*
5582 			 * orig_l3 == new_l3
5583 			 * This can happens if multiple threads simultaneously
5584 			 * access not yet mapped page. This bad for performance
5585 			 * since this can cause full demotion-NOP-promotion
5586 			 * cycle.
5587 			 * Another possible reasons are:
5588 			 * - VM and pmap memory layout are diverged
5589 			 * - tlb flush is missing somewhere and CPU doesn't see
5590 			 *   actual mapping.
5591 			 */
5592 			CTR4(KTR_PMAP, "%s: already mapped page - "
5593 			    "pmap %p va 0x%#lx pte 0x%lx",
5594 			    __func__, pmap, va, new_l3);
5595 		}
5596 	} else {
5597 		/* New mapping */
5598 		pmap_store(l3, new_l3);
5599 		dsb(ishst);
5600 	}
5601 
5602 #if VM_NRESERVLEVEL > 0
5603 	/*
5604 	 * First, attempt L3C promotion, if the virtual and physical addresses
5605 	 * are aligned with each other and an underlying reservation has the
5606 	 * neighboring L3 pages allocated.  The first condition is simply an
5607 	 * optimization that recognizes some eventual promotion failures early
5608 	 * at a lower run-time cost.  Then, if both a level 1 reservation and
5609 	 * the PTP are fully populated, attempt L2 promotion.
5610 	 */
5611 	if ((va & L3C_OFFSET) == (pa & L3C_OFFSET) &&
5612 	    (m->flags & PG_FICTITIOUS) == 0 &&
5613 	    (full_lvl = vm_reserv_level_iffullpop(m)) >= 0 &&
5614 	    pmap_promote_l3c(pmap, l3, va) &&
5615 	    full_lvl == 1 && (mpte == NULL || mpte->ref_count == NL3PG))
5616 		(void)pmap_promote_l2(pmap, pde, va, mpte, &lock);
5617 #endif
5618 
5619 	rv = KERN_SUCCESS;
5620 out:
5621 	if (lock != NULL)
5622 		rw_wunlock(lock);
5623 	PMAP_UNLOCK(pmap);
5624 	return (rv);
5625 }
5626 
5627 /*
5628  * Tries to create a read- and/or execute-only L2 page mapping.  Returns
5629  * KERN_SUCCESS if the mapping was created.  Otherwise, returns an error
5630  * value.  See pmap_enter_l2() for the possible error values when "no sleep",
5631  * "no replace", and "no reclaim" are specified.
5632  */
5633 static int
pmap_enter_l2_rx(pmap_t pmap,vm_offset_t va,vm_page_t m,vm_prot_t prot,struct rwlock ** lockp)5634 pmap_enter_l2_rx(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
5635     struct rwlock **lockp)
5636 {
5637 	pd_entry_t new_l2;
5638 
5639 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5640 	PMAP_ASSERT_STAGE1(pmap);
5641 	KASSERT(ADDR_IS_CANONICAL(va),
5642 	    ("%s: Address not in canonical form: %lx", __func__, va));
5643 
5644 	new_l2 = (pd_entry_t)(VM_PAGE_TO_PTE(m) | pmap_sh_attr |
5645 	    ATTR_S1_IDX(m->md.pv_memattr) | ATTR_S1_AP(ATTR_S1_AP_RO) |
5646 	    L2_BLOCK);
5647 	if ((m->oflags & VPO_UNMANAGED) == 0)
5648 		new_l2 |= ATTR_SW_MANAGED;
5649 	else
5650 		new_l2 |= ATTR_AF;
5651 	if ((prot & VM_PROT_EXECUTE) == 0 ||
5652 	    m->md.pv_memattr == VM_MEMATTR_DEVICE)
5653 		new_l2 |= ATTR_S1_XN;
5654 	if (!ADDR_IS_KERNEL(va))
5655 		new_l2 |= ATTR_S1_AP(ATTR_S1_AP_USER) | ATTR_S1_PXN;
5656 	else
5657 		new_l2 |= ATTR_S1_UXN;
5658 	if (pmap != kernel_pmap)
5659 		new_l2 |= ATTR_S1_nG;
5660 	return (pmap_enter_l2(pmap, va, new_l2, PMAP_ENTER_NOSLEEP |
5661 	    PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, m, lockp));
5662 }
5663 
5664 /*
5665  * Returns true if every page table entry in the specified page table is
5666  * zero.
5667  */
5668 static bool
pmap_every_pte_zero(vm_paddr_t pa)5669 pmap_every_pte_zero(vm_paddr_t pa)
5670 {
5671 	pt_entry_t *pt_end, *pte;
5672 
5673 	KASSERT((pa & PAGE_MASK) == 0, ("pa is misaligned"));
5674 	pte = (pt_entry_t *)PHYS_TO_DMAP(pa);
5675 	for (pt_end = pte + Ln_ENTRIES; pte < pt_end; pte++) {
5676 		if (*pte != 0)
5677 			return (false);
5678 	}
5679 	return (true);
5680 }
5681 
5682 /*
5683  * Tries to create the specified L2 page mapping.  Returns KERN_SUCCESS if
5684  * the mapping was created, and one of KERN_FAILURE, KERN_NO_SPACE, or
5685  * KERN_RESOURCE_SHORTAGE otherwise.  Returns KERN_FAILURE if
5686  * PMAP_ENTER_NOREPLACE was specified and a base page mapping already exists
5687  * within the L2 virtual address range starting at the specified virtual
5688  * address.  Returns KERN_NO_SPACE if PMAP_ENTER_NOREPLACE was specified and a
5689  * L2 page mapping already exists at the specified virtual address.  Returns
5690  * KERN_RESOURCE_SHORTAGE if either (1) PMAP_ENTER_NOSLEEP was specified and a
5691  * page table page allocation failed or (2) PMAP_ENTER_NORECLAIM was specified
5692  * and a PV entry allocation failed.
5693  */
5694 static int
pmap_enter_l2(pmap_t pmap,vm_offset_t va,pd_entry_t new_l2,u_int flags,vm_page_t m,struct rwlock ** lockp)5695 pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags,
5696     vm_page_t m, struct rwlock **lockp)
5697 {
5698 	struct spglist free;
5699 	pd_entry_t *l2, old_l2;
5700 	vm_page_t l2pg, mt;
5701 	vm_page_t uwptpg;
5702 
5703 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5704 	KASSERT(ADDR_IS_CANONICAL(va),
5705 	    ("%s: Address not in canonical form: %lx", __func__, va));
5706 
5707 	if ((l2 = pmap_alloc_l2(pmap, va, &l2pg, (flags &
5708 	    PMAP_ENTER_NOSLEEP) != 0 ? NULL : lockp)) == NULL) {
5709 		CTR2(KTR_PMAP, "pmap_enter_l2: failure for va %#lx in pmap %p",
5710 		    va, pmap);
5711 		return (KERN_RESOURCE_SHORTAGE);
5712 	}
5713 
5714 	/*
5715 	 * If bti is not the same for the whole l2 range, return failure
5716 	 * and let vm_fault() cope.  Check after l2 allocation, since
5717 	 * it could sleep.
5718 	 */
5719 	if (!pmap_bti_same(pmap, va, va + L2_SIZE, &new_l2)) {
5720 		KASSERT(l2pg != NULL, ("pmap_enter_l2: missing L2 PTP"));
5721 		pmap_abort_ptp(pmap, va, l2pg);
5722 		return (KERN_PROTECTION_FAILURE);
5723 	}
5724 
5725 	/*
5726 	 * If there are existing mappings, either abort or remove them.
5727 	 */
5728 	if ((old_l2 = pmap_load(l2)) != 0) {
5729 		KASSERT(l2pg == NULL || l2pg->ref_count > 1,
5730 		    ("pmap_enter_l2: l2pg's ref count is too low"));
5731 		if ((flags & PMAP_ENTER_NOREPLACE) != 0) {
5732 			if ((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK) {
5733 				if (l2pg != NULL)
5734 					l2pg->ref_count--;
5735 				CTR2(KTR_PMAP,
5736 				    "pmap_enter_l2: no space for va %#lx"
5737 				    " in pmap %p", va, pmap);
5738 				return (KERN_NO_SPACE);
5739 			} else if (!ADDR_IS_KERNEL(va) ||
5740 			    !pmap_every_pte_zero(PTE_TO_PHYS(old_l2))) {
5741 				if (l2pg != NULL)
5742 					l2pg->ref_count--;
5743 				CTR2(KTR_PMAP,
5744 				    "pmap_enter_l2: failure for va %#lx"
5745 				    " in pmap %p", va, pmap);
5746 				return (KERN_FAILURE);
5747 			}
5748 		}
5749 		SLIST_INIT(&free);
5750 		if ((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK)
5751 			(void)pmap_remove_l2(pmap, l2, va,
5752 			    pmap_load(pmap_l1(pmap, va)), &free, lockp);
5753 		else
5754 			pmap_remove_l3_range(pmap, old_l2, va, va + L2_SIZE,
5755 			    &free, lockp);
5756 		if (!ADDR_IS_KERNEL(va)) {
5757 			vm_page_free_pages_toq(&free, true);
5758 			KASSERT(pmap_load(l2) == 0,
5759 			    ("pmap_enter_l2: non-zero L2 entry %p", l2));
5760 		} else {
5761 			KASSERT(SLIST_EMPTY(&free),
5762 			    ("pmap_enter_l2: freed kernel page table page"));
5763 
5764 			/*
5765 			 * Both pmap_remove_l2() and pmap_remove_l3_range()
5766 			 * will leave the kernel page table page zero filled.
5767 			 * Nonetheless, the TLB could have an intermediate
5768 			 * entry for the kernel page table page, so request
5769 			 * an invalidation at all levels after clearing
5770 			 * the L2_TABLE entry.
5771 			 */
5772 			mt = PTE_TO_VM_PAGE(pmap_load(l2));
5773 			if (pmap_insert_pt_page(pmap, mt, false, false))
5774 				panic("pmap_enter_l2: trie insert failed");
5775 			pmap_clear(l2);
5776 			pmap_s1_invalidate_page(pmap, va, false);
5777 		}
5778 	}
5779 
5780 	/*
5781 	 * Allocate leaf ptpage for wired userspace pages.
5782 	 */
5783 	uwptpg = NULL;
5784 	if ((new_l2 & ATTR_SW_WIRED) != 0 && pmap != kernel_pmap) {
5785 		uwptpg = vm_page_alloc_noobj(VM_ALLOC_WIRED);
5786 		if (uwptpg == NULL) {
5787 			pmap_abort_ptp(pmap, va, l2pg);
5788 			return (KERN_RESOURCE_SHORTAGE);
5789 		}
5790 		uwptpg->pindex = pmap_l2_pindex(va);
5791 		if (pmap_insert_pt_page(pmap, uwptpg, true, false)) {
5792 			vm_page_unwire_noq(uwptpg);
5793 			vm_page_free(uwptpg);
5794 			pmap_abort_ptp(pmap, va, l2pg);
5795 			return (KERN_RESOURCE_SHORTAGE);
5796 		}
5797 		pmap_resident_count_inc(pmap, 1);
5798 		uwptpg->ref_count = NL3PG;
5799 	}
5800 	if ((new_l2 & ATTR_SW_MANAGED) != 0) {
5801 		/*
5802 		 * Abort this mapping if its PV entry could not be created.
5803 		 */
5804 		if (!pmap_pv_insert_l2(pmap, va, new_l2, flags, lockp)) {
5805 			if (l2pg != NULL)
5806 				pmap_abort_ptp(pmap, va, l2pg);
5807 			if (uwptpg != NULL) {
5808 				mt = pmap_remove_pt_page(pmap, va);
5809 				KASSERT(mt == uwptpg,
5810 				    ("removed pt page %p, expected %p", mt,
5811 				    uwptpg));
5812 				pmap_resident_count_dec(pmap, 1);
5813 				uwptpg->ref_count = 1;
5814 				vm_page_unwire_noq(uwptpg);
5815 				vm_page_free(uwptpg);
5816 			}
5817 			CTR2(KTR_PMAP,
5818 			    "pmap_enter_l2: failure for va %#lx in pmap %p",
5819 			    va, pmap);
5820 			return (KERN_RESOURCE_SHORTAGE);
5821 		}
5822 		if ((new_l2 & ATTR_SW_DBM) != 0)
5823 			for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
5824 				vm_page_aflag_set(mt, PGA_WRITEABLE);
5825 	}
5826 
5827 	/*
5828 	 * Increment counters.
5829 	 */
5830 	if ((new_l2 & ATTR_SW_WIRED) != 0)
5831 		pmap->pm_stats.wired_count += L2_SIZE / PAGE_SIZE;
5832 	pmap->pm_stats.resident_count += L2_SIZE / PAGE_SIZE;
5833 
5834 	/*
5835 	 * Conditionally sync the icache.  See pmap_enter() for details.
5836 	 */
5837 	if ((new_l2 & ATTR_S1_XN) == 0 && (PTE_TO_PHYS(new_l2) !=
5838 	    PTE_TO_PHYS(old_l2) || (old_l2 & ATTR_S1_XN) != 0) &&
5839 	    pmap != kernel_pmap && m->md.pv_memattr == VM_MEMATTR_WRITE_BACK) {
5840 		cpu_icache_sync_range((void *)PHYS_TO_DMAP(PTE_TO_PHYS(new_l2)),
5841 		    L2_SIZE);
5842 	}
5843 
5844 	/*
5845 	 * Map the superpage.
5846 	 */
5847 	pmap_store(l2, new_l2);
5848 	dsb(ishst);
5849 
5850 	counter_u64_add(pmap_l2_mappings, 1);
5851 	CTR2(KTR_PMAP, "pmap_enter_l2: success for va %#lx in pmap %p",
5852 	    va, pmap);
5853 
5854 	return (KERN_SUCCESS);
5855 }
5856 
5857 /*
5858  * Tries to create a read- and/or execute-only L3C page mapping.  Returns
5859  * KERN_SUCCESS if the mapping was created.  Otherwise, returns an error
5860  * value.
5861  */
5862 static int
pmap_enter_l3c_rx(pmap_t pmap,vm_offset_t va,vm_page_t m,vm_page_t * ml3p,vm_prot_t prot,struct rwlock ** lockp)5863 pmap_enter_l3c_rx(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t *ml3p,
5864     vm_prot_t prot, struct rwlock **lockp)
5865 {
5866 	pt_entry_t l3e;
5867 
5868 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5869 	PMAP_ASSERT_STAGE1(pmap);
5870 	KASSERT(ADDR_IS_CANONICAL(va),
5871 	    ("%s: Address not in canonical form: %lx", __func__, va));
5872 
5873 	l3e = VM_PAGE_TO_PTE(m) | pmap_sh_attr |
5874 	    ATTR_S1_IDX(m->md.pv_memattr) | ATTR_S1_AP(ATTR_S1_AP_RO) |
5875 	    ATTR_CONTIGUOUS | L3_PAGE;
5876 	if ((m->oflags & VPO_UNMANAGED) == 0)
5877 		l3e |= ATTR_SW_MANAGED;
5878 	else
5879 		l3e |= ATTR_AF;
5880 	if ((prot & VM_PROT_EXECUTE) == 0 ||
5881 	    m->md.pv_memattr == VM_MEMATTR_DEVICE)
5882 		l3e |= ATTR_S1_XN;
5883 	if (!ADDR_IS_KERNEL(va))
5884 		l3e |= ATTR_S1_AP(ATTR_S1_AP_USER) | ATTR_S1_PXN;
5885 	else
5886 		l3e |= ATTR_S1_UXN;
5887 	if (pmap != kernel_pmap)
5888 		l3e |= ATTR_S1_nG;
5889 	return (pmap_enter_l3c(pmap, va, l3e, PMAP_ENTER_NOSLEEP |
5890 	    PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, m, ml3p, lockp));
5891 }
5892 
5893 static int
pmap_enter_l3c(pmap_t pmap,vm_offset_t va,pt_entry_t l3e,u_int flags,vm_page_t m,vm_page_t * ml3p,struct rwlock ** lockp)5894 pmap_enter_l3c(pmap_t pmap, vm_offset_t va, pt_entry_t l3e, u_int flags,
5895     vm_page_t m, vm_page_t *ml3p, struct rwlock **lockp)
5896 {
5897 	pd_entry_t *l2p, *pde;
5898 	pt_entry_t *l3p, *tl3p;
5899 	vm_page_t mt;
5900 	vm_paddr_t pa;
5901 	vm_pindex_t l2pindex;
5902 	int lvl;
5903 
5904 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5905 	KASSERT((va & L3C_OFFSET) == 0,
5906 	    ("pmap_enter_l3c: va is not aligned"));
5907 	KASSERT(!VA_IS_CLEANMAP(va) || (l3e & ATTR_SW_MANAGED) == 0,
5908 	    ("pmap_enter_l3c: managed mapping within the clean submap"));
5909 	KASSERT((l3e & ATTR_CONTIGUOUS) != 0,
5910 	    ("pmap_enter_l3c: l3e is missing ATTR_CONTIGUOUS"));
5911 
5912 	/*
5913 	 * If the L3 PTP is not resident, we attempt to create it here.
5914 	 */
5915 	if (!ADDR_IS_KERNEL(va)) {
5916 		/*
5917 		 * Were we given the correct L3 PTP?  If so, we can simply
5918 		 * increment its ref count.
5919 		 */
5920 		l2pindex = pmap_l2_pindex(va);
5921 		if (*ml3p != NULL && (*ml3p)->pindex == l2pindex) {
5922 			(*ml3p)->ref_count += L3C_ENTRIES;
5923 		} else {
5924 retry:
5925 			/*
5926 			 * Get the L2 entry.
5927 			 */
5928 			pde = pmap_pde(pmap, va, &lvl);
5929 
5930 			/*
5931 			 * If the L2 entry is a superpage, we either abort or
5932 			 * demote depending on the given flags.
5933 			 */
5934 			if (lvl == 1) {
5935 				l2p = pmap_l1_to_l2(pde, va);
5936 				if ((pmap_load(l2p) & ATTR_DESCR_MASK) ==
5937 				    L2_BLOCK) {
5938 					if ((flags & PMAP_ENTER_NOREPLACE) != 0)
5939 						return (KERN_FAILURE);
5940 					l3p = pmap_demote_l2_locked(pmap, l2p,
5941 					    va, lockp);
5942 					if (l3p != NULL) {
5943 						*ml3p = PTE_TO_VM_PAGE(
5944 						    pmap_load(l2p));
5945 						(*ml3p)->ref_count +=
5946 						    L3C_ENTRIES;
5947 						goto have_l3p;
5948 					}
5949 				}
5950 				/* We need to allocate an L3 PTP. */
5951 			}
5952 
5953 			/*
5954 			 * If the L3 PTP is mapped, we just increment its ref
5955 			 * count.  Otherwise, we attempt to allocate it.
5956 			 */
5957 			if (lvl == 2 && pmap_load(pde) != 0) {
5958 				*ml3p = PTE_TO_VM_PAGE(pmap_load(pde));
5959 				(*ml3p)->ref_count += L3C_ENTRIES;
5960 			} else {
5961 				*ml3p = _pmap_alloc_l3(pmap, l2pindex, (flags &
5962 				    PMAP_ENTER_NOSLEEP) != 0 ? NULL : lockp);
5963 				if (*ml3p == NULL) {
5964 					if ((flags & PMAP_ENTER_NOSLEEP) != 0)
5965 						return (KERN_FAILURE);
5966 
5967 					/*
5968 					 * The page table may have changed
5969 					 * while we slept.
5970 					 */
5971 					goto retry;
5972 				}
5973 				(*ml3p)->ref_count += L3C_ENTRIES - 1;
5974 			}
5975 		}
5976 		l3p = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*ml3p));
5977 	} else {
5978 		*ml3p = NULL;
5979 
5980 		/*
5981 		 * If the L2 entry is a superpage, we either abort or demote
5982 		 * depending on the given flags.
5983 		 */
5984 		pde = pmap_pde(kernel_pmap, va, &lvl);
5985 		if (lvl == 1) {
5986 			l2p = pmap_l1_to_l2(pde, va);
5987 			KASSERT((pmap_load(l2p) & ATTR_DESCR_MASK) == L2_BLOCK,
5988 			    ("pmap_enter_l3c: missing L2 block"));
5989 			if ((flags & PMAP_ENTER_NOREPLACE) != 0)
5990 				return (KERN_FAILURE);
5991 			l3p = pmap_demote_l2_locked(pmap, l2p, va, lockp);
5992 		} else {
5993 			KASSERT(lvl == 2,
5994 			    ("pmap_enter_l3c: Invalid level %d", lvl));
5995 			l3p = (pt_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(
5996 			    pmap_load(pde)));
5997 		}
5998 	}
5999 have_l3p:
6000 	l3p = &l3p[pmap_l3_index(va)];
6001 
6002 	/*
6003 	 * If bti is not the same for the whole L3C range, return failure
6004 	 * and let vm_fault() cope.  Check after L3 allocation, since
6005 	 * it could sleep.
6006 	 */
6007 	if (!pmap_bti_same(pmap, va, va + L3C_SIZE, &l3e)) {
6008 		KASSERT(*ml3p != NULL, ("pmap_enter_l3c: missing L3 PTP"));
6009 		(*ml3p)->ref_count -= L3C_ENTRIES - 1;
6010 		pmap_abort_ptp(pmap, va, *ml3p);
6011 		*ml3p = NULL;
6012 		return (KERN_PROTECTION_FAILURE);
6013 	}
6014 
6015 	/*
6016 	 * If there are existing mappings, either abort or remove them.
6017 	 */
6018 	if ((flags & PMAP_ENTER_NOREPLACE) != 0) {
6019 		for (tl3p = l3p; tl3p < &l3p[L3C_ENTRIES]; tl3p++) {
6020 			if (pmap_load(tl3p) != 0) {
6021 				if (*ml3p != NULL)
6022 					(*ml3p)->ref_count -= L3C_ENTRIES;
6023 				return (KERN_FAILURE);
6024 			}
6025 		}
6026 	} else {
6027 		/*
6028 		 * Because we increment the L3 page's reference count above,
6029 		 * it is guaranteed not to be freed here and we can pass NULL
6030 		 * instead of a valid free list.
6031 		 */
6032 		pmap_remove_l3_range(pmap, pmap_load(pmap_l2(pmap, va)), va,
6033 		    va + L3C_SIZE, NULL, lockp);
6034 	}
6035 
6036 	/*
6037 	 * Enter on the PV list if part of our managed memory.
6038 	 */
6039 	if ((l3e & ATTR_SW_MANAGED) != 0) {
6040 		if (!pmap_pv_insert_l3c(pmap, va, m, lockp)) {
6041 			if (*ml3p != NULL) {
6042 				(*ml3p)->ref_count -= L3C_ENTRIES - 1;
6043 				pmap_abort_ptp(pmap, va, *ml3p);
6044 				*ml3p = NULL;
6045 			}
6046 			return (KERN_RESOURCE_SHORTAGE);
6047 		}
6048 		if ((l3e & ATTR_SW_DBM) != 0)
6049 			for (mt = m; mt < &m[L3C_ENTRIES]; mt++)
6050 				vm_page_aflag_set(mt, PGA_WRITEABLE);
6051 	}
6052 
6053 	/*
6054 	 * Increment counters.
6055 	 */
6056 	if ((l3e & ATTR_SW_WIRED) != 0)
6057 		pmap->pm_stats.wired_count += L3C_ENTRIES;
6058 	pmap_resident_count_inc(pmap, L3C_ENTRIES);
6059 
6060 	pa = VM_PAGE_TO_PHYS(m);
6061 	KASSERT((pa & L3C_OFFSET) == 0, ("pmap_enter_l3c: pa is not aligned"));
6062 
6063 	/*
6064 	 * Sync the icache before the mapping is stored.
6065 	 */
6066 	if ((l3e & ATTR_S1_XN) == 0 && pmap != kernel_pmap &&
6067 	    m->md.pv_memattr == VM_MEMATTR_WRITE_BACK)
6068 		cpu_icache_sync_range((void *)PHYS_TO_DMAP(pa), L3C_SIZE);
6069 
6070 	/*
6071 	 * Map the superpage.
6072 	 */
6073 	for (tl3p = l3p; tl3p < &l3p[L3C_ENTRIES]; tl3p++) {
6074 		pmap_store(tl3p, l3e);
6075 		l3e += L3_SIZE;
6076 	}
6077 	dsb(ishst);
6078 
6079 	counter_u64_add(pmap_l3c_mappings, 1);
6080 	CTR2(KTR_PMAP, "pmap_enter_l3c: success for va %#lx in pmap %p",
6081 	    va, pmap);
6082 	return (KERN_SUCCESS);
6083 }
6084 
6085 /*
6086  * Maps a sequence of resident pages belonging to the same object.
6087  * The sequence begins with the given page m_start.  This page is
6088  * mapped at the given virtual address start.  Each subsequent page is
6089  * mapped at a virtual address that is offset from start by the same
6090  * amount as the page is offset from m_start within the object.  The
6091  * last page in the sequence is the page with the largest offset from
6092  * m_start that can be mapped at a virtual address less than the given
6093  * virtual address end.  Not every virtual page between start and end
6094  * is mapped; only those for which a resident page exists with the
6095  * corresponding offset from m_start are mapped.
6096  */
6097 void
pmap_enter_object(pmap_t pmap,vm_offset_t start,vm_offset_t end,vm_page_t m_start,vm_prot_t prot)6098 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
6099     vm_page_t m_start, vm_prot_t prot)
6100 {
6101 	struct pctrie_iter pages;
6102 	struct rwlock *lock;
6103 	vm_offset_t va;
6104 	vm_page_t m, mpte;
6105 	int rv;
6106 
6107 	VM_OBJECT_ASSERT_LOCKED(m_start->object);
6108 
6109 	mpte = NULL;
6110 	vm_page_iter_limit_init(&pages, m_start->object,
6111 	    m_start->pindex + atop(end - start));
6112 	m = vm_radix_iter_lookup(&pages, m_start->pindex);
6113 	lock = NULL;
6114 	PMAP_LOCK(pmap);
6115 	while (m != NULL) {
6116 		va = start + ptoa(m->pindex - m_start->pindex);
6117 		if ((va & L2_OFFSET) == 0 && va + L2_SIZE <= end &&
6118 		    m->psind == 2 && pmap_ps_enabled(pmap) &&
6119 		    ((rv = pmap_enter_l2_rx(pmap, va, m, prot, &lock)) ==
6120 		    KERN_SUCCESS || rv == KERN_NO_SPACE)) {
6121 			m = vm_radix_iter_jump(&pages, L2_SIZE / PAGE_SIZE);
6122 		} else if ((va & L3C_OFFSET) == 0 && va + L3C_SIZE <= end &&
6123 		    m->psind >= 1 && pmap_ps_enabled(pmap) &&
6124 		    ((rv = pmap_enter_l3c_rx(pmap, va, m, &mpte, prot,
6125 		    &lock)) == KERN_SUCCESS || rv == KERN_NO_SPACE)) {
6126 			m = vm_radix_iter_jump(&pages, L3C_ENTRIES);
6127 		} else {
6128 			/*
6129 			 * In general, if a superpage mapping were possible,
6130 			 * it would have been created above.  That said, if
6131 			 * start and end are not superpage aligned, then
6132 			 * promotion might be possible at the ends of [start,
6133 			 * end).  However, in practice, those promotion
6134 			 * attempts are so unlikely to succeed that they are
6135 			 * not worth trying.
6136 			 */
6137 			mpte = pmap_enter_quick_locked(pmap, va, m, prot |
6138 			    VM_PROT_NO_PROMOTE, mpte, &lock);
6139 			m = vm_radix_iter_step(&pages);
6140 		}
6141 	}
6142 	if (lock != NULL)
6143 		rw_wunlock(lock);
6144 	PMAP_UNLOCK(pmap);
6145 }
6146 
6147 /*
6148  * this code makes some *MAJOR* assumptions:
6149  * 1. Current pmap & pmap exists.
6150  * 2. Not wired.
6151  * 3. Read access.
6152  * 4. No page table pages.
6153  * but is *MUCH* faster than pmap_enter...
6154  */
6155 
6156 void
pmap_enter_quick(pmap_t pmap,vm_offset_t va,vm_page_t m,vm_prot_t prot)6157 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
6158 {
6159 	struct rwlock *lock;
6160 
6161 	lock = NULL;
6162 	PMAP_LOCK(pmap);
6163 	(void)pmap_enter_quick_locked(pmap, va, m, prot, NULL, &lock);
6164 	if (lock != NULL)
6165 		rw_wunlock(lock);
6166 	PMAP_UNLOCK(pmap);
6167 }
6168 
6169 static vm_page_t
pmap_enter_quick_locked(pmap_t pmap,vm_offset_t va,vm_page_t m,vm_prot_t prot,vm_page_t mpte,struct rwlock ** lockp)6170 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
6171     vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp)
6172 {
6173 	pt_entry_t *l1, *l2, *l3, l3_val;
6174 	vm_paddr_t pa;
6175 	int full_lvl, lvl;
6176 
6177 	KASSERT(!VA_IS_CLEANMAP(va) ||
6178 	    (m->oflags & VPO_UNMANAGED) != 0,
6179 	    ("pmap_enter_quick_locked: managed mapping within the clean submap"));
6180 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
6181 	PMAP_ASSERT_STAGE1(pmap);
6182 	KASSERT(ADDR_IS_CANONICAL(va),
6183 	    ("%s: Address not in canonical form: %lx", __func__, va));
6184 	l2 = NULL;
6185 
6186 	CTR2(KTR_PMAP, "pmap_enter_quick_locked: %p %lx", pmap, va);
6187 	/*
6188 	 * In the case that a page table page is not
6189 	 * resident, we are creating it here.
6190 	 */
6191 	if (!ADDR_IS_KERNEL(va)) {
6192 		vm_pindex_t l2pindex;
6193 
6194 		/*
6195 		 * Calculate pagetable page index
6196 		 */
6197 		l2pindex = pmap_l2_pindex(va);
6198 		if (mpte && (mpte->pindex == l2pindex)) {
6199 			mpte->ref_count++;
6200 		} else {
6201 			/*
6202 			 * If the page table page is mapped, we just increment
6203 			 * the hold count, and activate it.  Otherwise, we
6204 			 * attempt to allocate a page table page, passing NULL
6205 			 * instead of the PV list lock pointer because we don't
6206 			 * intend to sleep.  If this attempt fails, we don't
6207 			 * retry.  Instead, we give up.
6208 			 */
6209 			l1 = pmap_l1(pmap, va);
6210 			if (l1 != NULL && pmap_load(l1) != 0) {
6211 				if ((pmap_load(l1) & ATTR_DESCR_MASK) ==
6212 				    L1_BLOCK)
6213 					return (NULL);
6214 				l2 = pmap_l1_to_l2(l1, va);
6215 				if (pmap_load(l2) != 0) {
6216 					if ((pmap_load(l2) & ATTR_DESCR_MASK) ==
6217 					    L2_BLOCK)
6218 						return (NULL);
6219 					mpte = PTE_TO_VM_PAGE(pmap_load(l2));
6220 					mpte->ref_count++;
6221 				} else {
6222 					mpte = _pmap_alloc_l3(pmap, l2pindex,
6223 					    NULL);
6224 					if (mpte == NULL)
6225 						return (mpte);
6226 				}
6227 			} else {
6228 				mpte = _pmap_alloc_l3(pmap, l2pindex, NULL);
6229 				if (mpte == NULL)
6230 					return (mpte);
6231 			}
6232 		}
6233 		l3 = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte));
6234 		l3 = &l3[pmap_l3_index(va)];
6235 	} else {
6236 		mpte = NULL;
6237 		l2 = pmap_pde(kernel_pmap, va, &lvl);
6238 		KASSERT(l2 != NULL,
6239 		    ("pmap_enter_quick_locked: Invalid page entry, va: 0x%lx",
6240 		     va));
6241 		KASSERT(lvl == 2,
6242 		    ("pmap_enter_quick_locked: Invalid level %d", lvl));
6243 		l3 = pmap_l2_to_l3(l2, va);
6244 	}
6245 
6246 	/*
6247 	 * Abort if a mapping already exists.
6248 	 */
6249 	if (pmap_load(l3) != 0) {
6250 		if (mpte != NULL)
6251 			mpte->ref_count--;
6252 		return (NULL);
6253 	}
6254 
6255 	/*
6256 	 * Enter on the PV list if part of our managed memory.
6257 	 */
6258 	if ((m->oflags & VPO_UNMANAGED) == 0 &&
6259 	    !pmap_try_insert_pv_entry(pmap, va, m, lockp)) {
6260 		if (mpte != NULL)
6261 			pmap_abort_ptp(pmap, va, mpte);
6262 		return (NULL);
6263 	}
6264 
6265 	/*
6266 	 * Increment counters
6267 	 */
6268 	pmap_resident_count_inc(pmap, 1);
6269 
6270 	pa = VM_PAGE_TO_PHYS(m);
6271 	l3_val = PHYS_TO_PTE(pa) | pmap_sh_attr |
6272 	    ATTR_S1_IDX(m->md.pv_memattr) | ATTR_S1_AP(ATTR_S1_AP_RO) | L3_PAGE;
6273 	l3_val |= pmap_pte_bti(pmap, va);
6274 	if ((prot & VM_PROT_EXECUTE) == 0 ||
6275 	    m->md.pv_memattr == VM_MEMATTR_DEVICE)
6276 		l3_val |= ATTR_S1_XN;
6277 	if (!ADDR_IS_KERNEL(va))
6278 		l3_val |= ATTR_S1_AP(ATTR_S1_AP_USER) | ATTR_S1_PXN;
6279 	else
6280 		l3_val |= ATTR_S1_UXN;
6281 	if (pmap != kernel_pmap)
6282 		l3_val |= ATTR_S1_nG;
6283 
6284 	/*
6285 	 * Now validate mapping with RO protection
6286 	 */
6287 	if ((m->oflags & VPO_UNMANAGED) == 0)
6288 		l3_val |= ATTR_SW_MANAGED;
6289 	else
6290 		l3_val |= ATTR_AF;
6291 
6292 	/* Sync icache before the mapping is stored to PTE */
6293 	if ((prot & VM_PROT_EXECUTE) && pmap != kernel_pmap &&
6294 	    m->md.pv_memattr == VM_MEMATTR_WRITE_BACK)
6295 		cpu_icache_sync_range((void *)PHYS_TO_DMAP(pa), PAGE_SIZE);
6296 
6297 	pmap_store(l3, l3_val);
6298 	dsb(ishst);
6299 
6300 #if VM_NRESERVLEVEL > 0
6301 	/*
6302 	 * First, attempt L3C promotion, if the virtual and physical addresses
6303 	 * are aligned with each other and an underlying reservation has the
6304 	 * neighboring L3 pages allocated.  The first condition is simply an
6305 	 * optimization that recognizes some eventual promotion failures early
6306 	 * at a lower run-time cost.  Then, attempt L2 promotion, if both a
6307 	 * level 1 reservation and the PTP are fully populated.
6308 	 */
6309 	if ((prot & VM_PROT_NO_PROMOTE) == 0 &&
6310 	    (va & L3C_OFFSET) == (pa & L3C_OFFSET) &&
6311 	    (m->flags & PG_FICTITIOUS) == 0 &&
6312 	    (full_lvl = vm_reserv_level_iffullpop(m)) >= 0 &&
6313 	    pmap_promote_l3c(pmap, l3, va) &&
6314 	    full_lvl == 1 && (mpte == NULL || mpte->ref_count == NL3PG)) {
6315 		if (l2 == NULL)
6316 			l2 = pmap_l2(pmap, va);
6317 
6318 		/*
6319 		 * If promotion succeeds, then the next call to this function
6320 		 * should not be given the unmapped PTP as a hint.
6321 		 */
6322 		if (pmap_promote_l2(pmap, l2, va, mpte, lockp))
6323 			mpte = NULL;
6324 	}
6325 #endif
6326 
6327 	return (mpte);
6328 }
6329 
6330 /*
6331  * This code maps large physical mmap regions into the
6332  * processor address space.  Note that some shortcuts
6333  * are taken, but the code works.
6334  */
6335 void
pmap_object_init_pt(pmap_t pmap,vm_offset_t addr,vm_object_t object,vm_pindex_t pindex,vm_size_t size)6336 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
6337     vm_pindex_t pindex, vm_size_t size)
6338 {
6339 
6340 	VM_OBJECT_ASSERT_WLOCKED(object);
6341 	KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
6342 	    ("pmap_object_init_pt: non-device object"));
6343 }
6344 
6345 /*
6346  *	Clear the wired attribute from the mappings for the specified range of
6347  *	addresses in the given pmap.  Every valid mapping within that range
6348  *	must have the wired attribute set.  In contrast, invalid mappings
6349  *	cannot have the wired attribute set, so they are ignored.
6350  *
6351  *	The wired attribute of the page table entry is not a hardware feature,
6352  *	so there is no need to invalidate any TLB entries.
6353  */
6354 void
pmap_unwire(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)6355 pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
6356 {
6357 	vm_offset_t va_next;
6358 	pd_entry_t *l0, *l1, *l2;
6359 	pt_entry_t *l3;
6360 	bool partial_l3c;
6361 
6362 	PMAP_LOCK(pmap);
6363 	for (; sva < eva; sva = va_next) {
6364 		l0 = pmap_l0(pmap, sva);
6365 		if (pmap_load(l0) == 0) {
6366 			va_next = (sva + L0_SIZE) & ~L0_OFFSET;
6367 			if (va_next < sva)
6368 				va_next = eva;
6369 			continue;
6370 		}
6371 
6372 		l1 = pmap_l0_to_l1(l0, sva);
6373 		va_next = (sva + L1_SIZE) & ~L1_OFFSET;
6374 		if (va_next < sva)
6375 			va_next = eva;
6376 		if (pmap_load(l1) == 0)
6377 			continue;
6378 
6379 		if ((pmap_load(l1) & ATTR_DESCR_MASK) == L1_BLOCK) {
6380 			PMAP_ASSERT_L1_BLOCKS_SUPPORTED;
6381 			KASSERT(va_next <= eva,
6382 			    ("partial update of non-transparent 1G page "
6383 			    "l1 %#lx sva %#lx eva %#lx va_next %#lx",
6384 			    pmap_load(l1), sva, eva, va_next));
6385 			MPASS(pmap != kernel_pmap);
6386 			MPASS((pmap_load(l1) & (ATTR_SW_MANAGED |
6387 			    ATTR_SW_WIRED)) == ATTR_SW_WIRED);
6388 			pmap_clear_bits(l1, ATTR_SW_WIRED);
6389 			pmap->pm_stats.wired_count -= L1_SIZE / PAGE_SIZE;
6390 			continue;
6391 		}
6392 
6393 		va_next = (sva + L2_SIZE) & ~L2_OFFSET;
6394 		if (va_next < sva)
6395 			va_next = eva;
6396 
6397 		l2 = pmap_l1_to_l2(l1, sva);
6398 		if (pmap_load(l2) == 0)
6399 			continue;
6400 
6401 		if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK) {
6402 			if ((pmap_load(l2) & ATTR_SW_WIRED) == 0)
6403 				panic("pmap_unwire: l2 %#jx is missing "
6404 				    "ATTR_SW_WIRED", (uintmax_t)pmap_load(l2));
6405 
6406 			/*
6407 			 * Are we unwiring the entire large page?  If not,
6408 			 * demote the mapping and fall through.
6409 			 */
6410 			if (sva + L2_SIZE == va_next && eva >= va_next) {
6411 				pmap_clear_bits(l2, ATTR_SW_WIRED);
6412 				pmap->pm_stats.wired_count -= L2_SIZE /
6413 				    PAGE_SIZE;
6414 				continue;
6415 			} else if (pmap_demote_l2(pmap, l2, sva) == NULL)
6416 				panic("pmap_unwire: demotion failed");
6417 		}
6418 		KASSERT((pmap_load(l2) & ATTR_DESCR_MASK) == L2_TABLE,
6419 		    ("pmap_unwire: Invalid l2 entry after demotion"));
6420 
6421 		if (va_next > eva)
6422 			va_next = eva;
6423 		for (partial_l3c = true, l3 = pmap_l2_to_l3(l2, sva);
6424 		    sva != va_next; l3++, sva += L3_SIZE) {
6425 			if (pmap_load(l3) == 0)
6426 				continue;
6427 			if ((pmap_load(l3) & ATTR_CONTIGUOUS) != 0) {
6428 				/*
6429 				 * Avoid demotion for whole-page unwiring.
6430 				 */
6431 				if ((sva & L3C_OFFSET) == 0) {
6432 					/*
6433 					 * Handle the possibility that
6434 					 * "va_next" is zero because of
6435 					 * address wraparound.
6436 					 */
6437 					partial_l3c = sva + L3C_OFFSET >
6438 					    va_next - 1;
6439 				}
6440 				if (partial_l3c)
6441 					(void)pmap_demote_l3c(pmap, l3, sva);
6442 			}
6443 			if ((pmap_load(l3) & ATTR_SW_WIRED) == 0)
6444 				panic("pmap_unwire: l3 %#jx is missing "
6445 				    "ATTR_SW_WIRED", (uintmax_t)pmap_load(l3));
6446 
6447 			/*
6448 			 * ATTR_SW_WIRED must be cleared atomically.  Although
6449 			 * the pmap lock synchronizes access to ATTR_SW_WIRED,
6450 			 * the System MMU may write to the entry concurrently.
6451 			 */
6452 			pmap_clear_bits(l3, ATTR_SW_WIRED);
6453 			pmap->pm_stats.wired_count--;
6454 		}
6455 	}
6456 	PMAP_UNLOCK(pmap);
6457 }
6458 
6459 /*
6460  * This function requires that the caller has already added one to ml3's
6461  * ref_count in anticipation of creating a 4KB page mapping.
6462  */
6463 static bool
pmap_copy_l3c(pmap_t pmap,pt_entry_t * l3p,vm_offset_t va,pt_entry_t l3e,vm_page_t ml3,struct rwlock ** lockp)6464 pmap_copy_l3c(pmap_t pmap, pt_entry_t *l3p, vm_offset_t va, pt_entry_t l3e,
6465     vm_page_t ml3, struct rwlock **lockp)
6466 {
6467 	pt_entry_t *tl3p;
6468 
6469 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
6470 	KASSERT((va & L3C_OFFSET) == 0,
6471 	    ("pmap_copy_l3c: va is not aligned"));
6472 	KASSERT((l3e & ATTR_SW_MANAGED) != 0,
6473 	    ("pmap_copy_l3c: l3e is not managed"));
6474 
6475 	/*
6476 	 * Abort if a mapping already exists.
6477 	 */
6478 	for (tl3p = l3p; tl3p < &l3p[L3C_ENTRIES]; tl3p++)
6479 		if (pmap_load(tl3p) != 0) {
6480 			if (ml3 != NULL)
6481 				ml3->ref_count--;
6482 			return (false);
6483 		}
6484 
6485 	if (!pmap_pv_insert_l3c(pmap, va, PTE_TO_VM_PAGE(l3e), lockp)) {
6486 		if (ml3 != NULL)
6487 			pmap_abort_ptp(pmap, va, ml3);
6488 		return (false);
6489 	}
6490 	ml3->ref_count += L3C_ENTRIES - 1;
6491 
6492 	/*
6493 	 * Clear the wired and accessed bits.  However, leave the dirty bit
6494 	 * unchanged because read/write superpage mappings are required to be
6495 	 * dirty.
6496 	 */
6497 	l3e &= ~(ATTR_SW_WIRED | ATTR_AF);
6498 
6499 	for (tl3p = l3p; tl3p < &l3p[L3C_ENTRIES]; tl3p++) {
6500 		pmap_store(tl3p, l3e);
6501 		l3e += L3_SIZE;
6502 	}
6503 	pmap_resident_count_inc(pmap, L3C_ENTRIES);
6504 	counter_u64_add(pmap_l3c_mappings, 1);
6505 	CTR2(KTR_PMAP, "pmap_copy_l3c: success for va %#lx in pmap %p",
6506 	    va, pmap);
6507 	return (true);
6508 }
6509 
6510 /*
6511  *	Copy the range specified by src_addr/len
6512  *	from the source map to the range dst_addr/len
6513  *	in the destination map.
6514  *
6515  *	This routine is only advisory and need not do anything.
6516  *
6517  *	Because the executable mappings created by this routine are copied,
6518  *	it should not have to flush the instruction cache.
6519  */
6520 void
pmap_copy(pmap_t dst_pmap,pmap_t src_pmap,vm_offset_t dst_addr,vm_size_t len,vm_offset_t src_addr)6521 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
6522     vm_offset_t src_addr)
6523 {
6524 	struct rwlock *lock;
6525 	pd_entry_t *l0, *l1, *l2, srcptepaddr;
6526 	pt_entry_t *dst_pte, mask, nbits, ptetemp, *src_pte;
6527 	vm_offset_t addr, end_addr, va_next;
6528 	vm_page_t dst_m, dstmpte, srcmpte;
6529 
6530 	PMAP_ASSERT_STAGE1(dst_pmap);
6531 	PMAP_ASSERT_STAGE1(src_pmap);
6532 
6533 	if (dst_addr != src_addr)
6534 		return;
6535 	end_addr = src_addr + len;
6536 	lock = NULL;
6537 	if (dst_pmap < src_pmap) {
6538 		PMAP_LOCK(dst_pmap);
6539 		PMAP_LOCK(src_pmap);
6540 	} else {
6541 		PMAP_LOCK(src_pmap);
6542 		PMAP_LOCK(dst_pmap);
6543 	}
6544 	for (addr = src_addr; addr < end_addr; addr = va_next) {
6545 		l0 = pmap_l0(src_pmap, addr);
6546 		if (pmap_load(l0) == 0) {
6547 			va_next = (addr + L0_SIZE) & ~L0_OFFSET;
6548 			if (va_next < addr)
6549 				va_next = end_addr;
6550 			continue;
6551 		}
6552 
6553 		va_next = (addr + L1_SIZE) & ~L1_OFFSET;
6554 		if (va_next < addr)
6555 			va_next = end_addr;
6556 		l1 = pmap_l0_to_l1(l0, addr);
6557 		if (pmap_load(l1) == 0)
6558 			continue;
6559 		if ((pmap_load(l1) & ATTR_DESCR_MASK) == L1_BLOCK) {
6560 			PMAP_ASSERT_L1_BLOCKS_SUPPORTED;
6561 			KASSERT(va_next <= end_addr,
6562 			    ("partial update of non-transparent 1G page "
6563 			    "l1 %#lx addr %#lx end_addr %#lx va_next %#lx",
6564 			    pmap_load(l1), addr, end_addr, va_next));
6565 			srcptepaddr = pmap_load(l1);
6566 			l1 = pmap_l1(dst_pmap, addr);
6567 			if (l1 == NULL) {
6568 				if (_pmap_alloc_l3(dst_pmap,
6569 				    pmap_l0_pindex(addr), NULL) == NULL)
6570 					break;
6571 				l1 = pmap_l1(dst_pmap, addr);
6572 			} else {
6573 				l0 = pmap_l0(dst_pmap, addr);
6574 				dst_m = PTE_TO_VM_PAGE(pmap_load(l0));
6575 				dst_m->ref_count++;
6576 			}
6577 			KASSERT(pmap_load(l1) == 0,
6578 			    ("1G mapping present in dst pmap "
6579 			    "l1 %#lx addr %#lx end_addr %#lx va_next %#lx",
6580 			    pmap_load(l1), addr, end_addr, va_next));
6581 			pmap_store(l1, srcptepaddr & ~ATTR_SW_WIRED);
6582 			pmap_resident_count_inc(dst_pmap, L1_SIZE / PAGE_SIZE);
6583 			continue;
6584 		}
6585 
6586 		va_next = (addr + L2_SIZE) & ~L2_OFFSET;
6587 		if (va_next < addr)
6588 			va_next = end_addr;
6589 		l2 = pmap_l1_to_l2(l1, addr);
6590 		srcptepaddr = pmap_load(l2);
6591 		if (srcptepaddr == 0)
6592 			continue;
6593 		if ((srcptepaddr & ATTR_DESCR_MASK) == L2_BLOCK) {
6594 			/*
6595 			 * We can only virtual copy whole superpages.
6596 			 */
6597 			if ((addr & L2_OFFSET) != 0 ||
6598 			    addr + L2_SIZE > end_addr)
6599 				continue;
6600 			l2 = pmap_alloc_l2(dst_pmap, addr, &dst_m, NULL);
6601 			if (l2 == NULL)
6602 				break;
6603 			if (pmap_load(l2) == 0 &&
6604 			    ((srcptepaddr & ATTR_SW_MANAGED) == 0 ||
6605 			    pmap_pv_insert_l2(dst_pmap, addr, srcptepaddr,
6606 			    PMAP_ENTER_NORECLAIM, &lock))) {
6607 				/*
6608 				 * We leave the dirty bit unchanged because
6609 				 * managed read/write superpage mappings are
6610 				 * required to be dirty.  However, managed
6611 				 * superpage mappings are not required to
6612 				 * have their accessed bit set, so we clear
6613 				 * it because we don't know if this mapping
6614 				 * will be used.
6615 				 */
6616 				srcptepaddr &= ~ATTR_SW_WIRED;
6617 				if ((srcptepaddr & ATTR_SW_MANAGED) != 0)
6618 					srcptepaddr &= ~ATTR_AF;
6619 				pmap_store(l2, srcptepaddr);
6620 				pmap_resident_count_inc(dst_pmap, L2_SIZE /
6621 				    PAGE_SIZE);
6622 				counter_u64_add(pmap_l2_mappings, 1);
6623 			} else
6624 				pmap_abort_ptp(dst_pmap, addr, dst_m);
6625 			continue;
6626 		}
6627 		KASSERT((srcptepaddr & ATTR_DESCR_MASK) == L2_TABLE,
6628 		    ("pmap_copy: invalid L2 entry"));
6629 		srcmpte = PTE_TO_VM_PAGE(srcptepaddr);
6630 		KASSERT(srcmpte->ref_count > 0,
6631 		    ("pmap_copy: source page table page is unused"));
6632 		if (va_next > end_addr)
6633 			va_next = end_addr;
6634 		src_pte = (pt_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(srcptepaddr));
6635 		src_pte = &src_pte[pmap_l3_index(addr)];
6636 		dstmpte = NULL;
6637 		for (; addr < va_next; addr += PAGE_SIZE, src_pte++) {
6638 			ptetemp = pmap_load(src_pte);
6639 
6640 			/*
6641 			 * We only virtual copy managed pages.
6642 			 */
6643 			if ((ptetemp & ATTR_SW_MANAGED) == 0)
6644 				continue;
6645 
6646 			if (dstmpte != NULL) {
6647 				KASSERT(dstmpte->pindex == pmap_l2_pindex(addr),
6648 				    ("dstmpte pindex/addr mismatch"));
6649 				dstmpte->ref_count++;
6650 			} else if ((dstmpte = pmap_alloc_l3(dst_pmap, addr,
6651 			    NULL)) == NULL)
6652 				goto out;
6653 			dst_pte = (pt_entry_t *)
6654 			    PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte));
6655 			dst_pte = &dst_pte[pmap_l3_index(addr)];
6656 			if ((ptetemp & ATTR_CONTIGUOUS) != 0 && (addr &
6657 			    L3C_OFFSET) == 0 && addr + L3C_OFFSET <=
6658 			    va_next - 1) {
6659 				if (!pmap_copy_l3c(dst_pmap, dst_pte, addr,
6660 				    ptetemp, dstmpte, &lock))
6661 					goto out;
6662 				addr += L3C_SIZE - PAGE_SIZE;
6663 				src_pte += L3C_ENTRIES - 1;
6664 			} else if (pmap_load(dst_pte) == 0 &&
6665 			    pmap_try_insert_pv_entry(dst_pmap, addr,
6666 			    PTE_TO_VM_PAGE(ptetemp), &lock)) {
6667 				/*
6668 				 * Clear the wired, contiguous, modified, and
6669 				 * accessed bits from the destination PTE.
6670 				 * The contiguous bit is cleared because we
6671 				 * are not copying the entire L3C superpage.
6672 				 */
6673 				mask = ATTR_SW_WIRED | ATTR_CONTIGUOUS |
6674 				    ATTR_AF;
6675 				nbits = 0;
6676 				if ((ptetemp & ATTR_SW_DBM) != 0)
6677 					nbits |= ATTR_S1_AP_RW_BIT;
6678 				pmap_store(dst_pte, (ptetemp & ~mask) | nbits);
6679 				pmap_resident_count_inc(dst_pmap, 1);
6680 			} else {
6681 				pmap_abort_ptp(dst_pmap, addr, dstmpte);
6682 				goto out;
6683 			}
6684 			/* Have we copied all of the valid mappings? */
6685 			if (dstmpte->ref_count >= srcmpte->ref_count)
6686 				break;
6687 		}
6688 	}
6689 out:
6690 	/*
6691 	 * XXX This barrier may not be needed because the destination pmap is
6692 	 * not active.
6693 	 */
6694 	dsb(ishst);
6695 
6696 	if (lock != NULL)
6697 		rw_wunlock(lock);
6698 	PMAP_UNLOCK(src_pmap);
6699 	PMAP_UNLOCK(dst_pmap);
6700 }
6701 
6702 int
pmap_vmspace_copy(pmap_t dst_pmap,pmap_t src_pmap)6703 pmap_vmspace_copy(pmap_t dst_pmap, pmap_t src_pmap)
6704 {
6705 	int error;
6706 
6707 	if (dst_pmap->pm_stage != src_pmap->pm_stage)
6708 		return (EINVAL);
6709 
6710 	if (dst_pmap->pm_stage != PM_STAGE1 || src_pmap->pm_bti == NULL)
6711 		return (0);
6712 
6713 	for (;;) {
6714 		if (dst_pmap < src_pmap) {
6715 			PMAP_LOCK(dst_pmap);
6716 			PMAP_LOCK(src_pmap);
6717 		} else {
6718 			PMAP_LOCK(src_pmap);
6719 			PMAP_LOCK(dst_pmap);
6720 		}
6721 		error = pmap_bti_copy(dst_pmap, src_pmap);
6722 		/* Clean up partial copy on failure due to no memory. */
6723 		if (error == ENOMEM)
6724 			pmap_bti_deassign_all(dst_pmap);
6725 		PMAP_UNLOCK(src_pmap);
6726 		PMAP_UNLOCK(dst_pmap);
6727 		if (error != ENOMEM)
6728 			break;
6729 		vm_wait(NULL);
6730 	}
6731 	return (error);
6732 }
6733 
6734 /*
6735  *	pmap_zero_page zeros the specified hardware page by mapping
6736  *	the page into KVM and using bzero to clear its contents.
6737  */
6738 void
pmap_zero_page(vm_page_t m)6739 pmap_zero_page(vm_page_t m)
6740 {
6741 	vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
6742 
6743 	pagezero((void *)va);
6744 }
6745 
6746 /*
6747  *	pmap_zero_page_area zeros the specified hardware page by mapping
6748  *	the page into KVM and using bzero to clear its contents.
6749  *
6750  *	off and size may not cover an area beyond a single hardware page.
6751  */
6752 void
pmap_zero_page_area(vm_page_t m,int off,int size)6753 pmap_zero_page_area(vm_page_t m, int off, int size)
6754 {
6755 	vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
6756 
6757 	if (off == 0 && size == PAGE_SIZE)
6758 		pagezero((void *)va);
6759 	else
6760 		bzero((char *)va + off, size);
6761 }
6762 
6763 /*
6764  *	pmap_copy_page copies the specified (machine independent)
6765  *	page by mapping the page into virtual memory and using
6766  *	bcopy to copy the page, one machine dependent page at a
6767  *	time.
6768  */
6769 void
pmap_copy_page(vm_page_t msrc,vm_page_t mdst)6770 pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
6771 {
6772 	vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc));
6773 	vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst));
6774 
6775 	pagecopy((void *)src, (void *)dst);
6776 }
6777 
6778 int unmapped_buf_allowed = 1;
6779 
6780 void
pmap_copy_pages(vm_page_t ma[],vm_offset_t a_offset,vm_page_t mb[],vm_offset_t b_offset,int xfersize)6781 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
6782     vm_offset_t b_offset, int xfersize)
6783 {
6784 	void *a_cp, *b_cp;
6785 	vm_page_t m_a, m_b;
6786 	vm_paddr_t p_a, p_b;
6787 	vm_offset_t a_pg_offset, b_pg_offset;
6788 	int cnt;
6789 
6790 	while (xfersize > 0) {
6791 		a_pg_offset = a_offset & PAGE_MASK;
6792 		m_a = ma[a_offset >> PAGE_SHIFT];
6793 		p_a = m_a->phys_addr;
6794 		b_pg_offset = b_offset & PAGE_MASK;
6795 		m_b = mb[b_offset >> PAGE_SHIFT];
6796 		p_b = m_b->phys_addr;
6797 		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
6798 		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
6799 		if (__predict_false(!PHYS_IN_DMAP(p_a))) {
6800 			panic("!DMAP a %lx", p_a);
6801 		} else {
6802 			a_cp = (char *)PHYS_TO_DMAP(p_a) + a_pg_offset;
6803 		}
6804 		if (__predict_false(!PHYS_IN_DMAP(p_b))) {
6805 			panic("!DMAP b %lx", p_b);
6806 		} else {
6807 			b_cp = (char *)PHYS_TO_DMAP(p_b) + b_pg_offset;
6808 		}
6809 		bcopy(a_cp, b_cp, cnt);
6810 		a_offset += cnt;
6811 		b_offset += cnt;
6812 		xfersize -= cnt;
6813 	}
6814 }
6815 
6816 vm_offset_t
pmap_quick_enter_page(vm_page_t m)6817 pmap_quick_enter_page(vm_page_t m)
6818 {
6819 
6820 	return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
6821 }
6822 
6823 void
pmap_quick_remove_page(vm_offset_t addr)6824 pmap_quick_remove_page(vm_offset_t addr)
6825 {
6826 }
6827 
6828 /*
6829  * Returns true if the pmap's pv is one of the first
6830  * 16 pvs linked to from this page.  This count may
6831  * be changed upwards or downwards in the future; it
6832  * is only necessary that true be returned for a small
6833  * subset of pmaps for proper page aging.
6834  */
6835 bool
pmap_page_exists_quick(pmap_t pmap,vm_page_t m)6836 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
6837 {
6838 	struct md_page *pvh;
6839 	struct rwlock *lock;
6840 	pv_entry_t pv;
6841 	int loops = 0;
6842 	bool rv;
6843 
6844 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
6845 	    ("pmap_page_exists_quick: page %p is not managed", m));
6846 	rv = false;
6847 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
6848 	rw_rlock(lock);
6849 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
6850 		if (PV_PMAP(pv) == pmap) {
6851 			rv = true;
6852 			break;
6853 		}
6854 		loops++;
6855 		if (loops >= 16)
6856 			break;
6857 	}
6858 	if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
6859 		pvh = page_to_pvh(m);
6860 		TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
6861 			if (PV_PMAP(pv) == pmap) {
6862 				rv = true;
6863 				break;
6864 			}
6865 			loops++;
6866 			if (loops >= 16)
6867 				break;
6868 		}
6869 	}
6870 	rw_runlock(lock);
6871 	return (rv);
6872 }
6873 
6874 /*
6875  *	pmap_page_wired_mappings:
6876  *
6877  *	Return the number of managed mappings to the given physical page
6878  *	that are wired.
6879  */
6880 int
pmap_page_wired_mappings(vm_page_t m)6881 pmap_page_wired_mappings(vm_page_t m)
6882 {
6883 	struct rwlock *lock;
6884 	struct md_page *pvh;
6885 	pmap_t pmap;
6886 	pt_entry_t *pte;
6887 	pv_entry_t pv;
6888 	int count, md_gen, pvh_gen;
6889 
6890 	if ((m->oflags & VPO_UNMANAGED) != 0)
6891 		return (0);
6892 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
6893 	rw_rlock(lock);
6894 restart:
6895 	count = 0;
6896 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
6897 		pmap = PV_PMAP(pv);
6898 		if (!PMAP_TRYLOCK(pmap)) {
6899 			md_gen = m->md.pv_gen;
6900 			rw_runlock(lock);
6901 			PMAP_LOCK(pmap);
6902 			rw_rlock(lock);
6903 			if (md_gen != m->md.pv_gen) {
6904 				PMAP_UNLOCK(pmap);
6905 				goto restart;
6906 			}
6907 		}
6908 		pte = pmap_pte_exists(pmap, pv->pv_va, 3, __func__);
6909 		if ((pmap_load(pte) & ATTR_SW_WIRED) != 0)
6910 			count++;
6911 		PMAP_UNLOCK(pmap);
6912 	}
6913 	if ((m->flags & PG_FICTITIOUS) == 0) {
6914 		pvh = page_to_pvh(m);
6915 		TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
6916 			pmap = PV_PMAP(pv);
6917 			if (!PMAP_TRYLOCK(pmap)) {
6918 				md_gen = m->md.pv_gen;
6919 				pvh_gen = pvh->pv_gen;
6920 				rw_runlock(lock);
6921 				PMAP_LOCK(pmap);
6922 				rw_rlock(lock);
6923 				if (md_gen != m->md.pv_gen ||
6924 				    pvh_gen != pvh->pv_gen) {
6925 					PMAP_UNLOCK(pmap);
6926 					goto restart;
6927 				}
6928 			}
6929 			pte = pmap_pte_exists(pmap, pv->pv_va, 2, __func__);
6930 			if ((pmap_load(pte) & ATTR_SW_WIRED) != 0)
6931 				count++;
6932 			PMAP_UNLOCK(pmap);
6933 		}
6934 	}
6935 	rw_runlock(lock);
6936 	return (count);
6937 }
6938 
6939 /*
6940  * Returns true if the given page is mapped individually or as part of
6941  * a 2mpage.  Otherwise, returns false.
6942  */
6943 bool
pmap_page_is_mapped(vm_page_t m)6944 pmap_page_is_mapped(vm_page_t m)
6945 {
6946 	struct rwlock *lock;
6947 	bool rv;
6948 
6949 	if ((m->oflags & VPO_UNMANAGED) != 0)
6950 		return (false);
6951 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
6952 	rw_rlock(lock);
6953 	rv = !TAILQ_EMPTY(&m->md.pv_list) ||
6954 	    ((m->flags & PG_FICTITIOUS) == 0 &&
6955 	    !TAILQ_EMPTY(&page_to_pvh(m)->pv_list));
6956 	rw_runlock(lock);
6957 	return (rv);
6958 }
6959 
6960 /*
6961  * Destroy all managed, non-wired mappings in the given user-space
6962  * pmap.  This pmap cannot be active on any processor besides the
6963  * caller.
6964  *
6965  * This function cannot be applied to the kernel pmap.  Moreover, it
6966  * is not intended for general use.  It is only to be used during
6967  * process termination.  Consequently, it can be implemented in ways
6968  * that make it faster than pmap_remove().  First, it can more quickly
6969  * destroy mappings by iterating over the pmap's collection of PV
6970  * entries, rather than searching the page table.  Second, it doesn't
6971  * have to test and clear the page table entries atomically, because
6972  * no processor is currently accessing the user address space.  In
6973  * particular, a page table entry's dirty bit won't change state once
6974  * this function starts.
6975  */
6976 void
pmap_remove_pages(pmap_t pmap)6977 pmap_remove_pages(pmap_t pmap)
6978 {
6979 	pd_entry_t *pde;
6980 	pt_entry_t *pte, tpte;
6981 	struct spglist free;
6982 	struct pv_chunklist free_chunks[PMAP_MEMDOM];
6983 	vm_page_t m, ml3, mt;
6984 	pv_entry_t pv;
6985 	struct md_page *pvh;
6986 	struct pv_chunk *pc, *npc;
6987 	struct rwlock *lock;
6988 	int64_t bit;
6989 	uint64_t inuse, bitmask;
6990 	int allfree, field, i, idx, lvl;
6991 	int freed __pvused;
6992 	vm_paddr_t pa;
6993 
6994 	lock = NULL;
6995 
6996 	for (i = 0; i < PMAP_MEMDOM; i++)
6997 		TAILQ_INIT(&free_chunks[i]);
6998 	SLIST_INIT(&free);
6999 	PMAP_LOCK(pmap);
7000 	TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
7001 		allfree = 1;
7002 		freed = 0;
7003 		for (field = 0; field < _NPCM; field++) {
7004 			inuse = ~pc->pc_map[field] & pc_freemask[field];
7005 			while (inuse != 0) {
7006 				bit = ffsl(inuse) - 1;
7007 				bitmask = 1UL << bit;
7008 				idx = field * 64 + bit;
7009 				pv = &pc->pc_pventry[idx];
7010 				inuse &= ~bitmask;
7011 
7012 				pde = pmap_pde(pmap, pv->pv_va, &lvl);
7013 				KASSERT(pde != NULL,
7014 				    ("Attempting to remove an unmapped page"));
7015 
7016 				switch(lvl) {
7017 				case 1:
7018 					pte = pmap_l1_to_l2(pde, pv->pv_va);
7019 					tpte = pmap_load(pte);
7020 					KASSERT((tpte & ATTR_DESCR_MASK) ==
7021 					    L2_BLOCK,
7022 					    ("Attempting to remove an invalid "
7023 					    "block: %lx", tpte));
7024 					break;
7025 				case 2:
7026 					pte = pmap_l2_to_l3(pde, pv->pv_va);
7027 					tpte = pmap_load(pte);
7028 					KASSERT((tpte & ATTR_DESCR_MASK) ==
7029 					    L3_PAGE,
7030 					    ("Attempting to remove an invalid "
7031 					     "page: %lx", tpte));
7032 					break;
7033 				default:
7034 					panic(
7035 					    "Invalid page directory level: %d",
7036 					    lvl);
7037 				}
7038 
7039 				/*
7040 				 * We cannot remove wired mappings at this time.
7041 				 *
7042 				 * For L3C superpages, all of the constituent PTEs
7043 				 * should have the wired bit set, so we don't
7044 				 * check for ATTR_CONTIGUOUS here.
7045 				 */
7046 				if (tpte & ATTR_SW_WIRED) {
7047 					allfree = 0;
7048 					continue;
7049 				}
7050 
7051 				/* Mark free */
7052 				pc->pc_map[field] |= bitmask;
7053 
7054 				/*
7055 				 * Because this pmap is not active on other
7056 				 * processors, the dirty bit cannot have
7057 				 * changed state since we last loaded pte.
7058 				 */
7059 				pmap_clear(pte);
7060 
7061 				pa = PTE_TO_PHYS(tpte);
7062 
7063 				m = PHYS_TO_VM_PAGE(pa);
7064 				KASSERT(m->phys_addr == pa,
7065 				    ("vm_page_t %p phys_addr mismatch %016jx %016jx",
7066 				    m, (uintmax_t)m->phys_addr,
7067 				    (uintmax_t)tpte));
7068 
7069 				KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
7070 				    m < &vm_page_array[vm_page_array_size],
7071 				    ("pmap_remove_pages: bad pte %#jx",
7072 				    (uintmax_t)tpte));
7073 
7074 				/*
7075 				 * Update the vm_page_t clean/reference bits.
7076 				 *
7077 				 * We don't check for ATTR_CONTIGUOUS here
7078 				 * because writeable L3C superpages are expected
7079 				 * to be dirty, i.e., every constituent PTE
7080 				 * should be dirty.
7081 				 */
7082 				if (pmap_pte_dirty(pmap, tpte)) {
7083 					switch (lvl) {
7084 					case 1:
7085 						for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
7086 							vm_page_dirty(mt);
7087 						break;
7088 					case 2:
7089 						vm_page_dirty(m);
7090 						break;
7091 					}
7092 				}
7093 
7094 				CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m);
7095 
7096 				switch (lvl) {
7097 				case 1:
7098 					pmap_resident_count_dec(pmap,
7099 					    L2_SIZE / PAGE_SIZE);
7100 					pvh = page_to_pvh(m);
7101 					TAILQ_REMOVE(&pvh->pv_list, pv,pv_next);
7102 					pvh->pv_gen++;
7103 					if (TAILQ_EMPTY(&pvh->pv_list)) {
7104 						for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
7105 							if ((mt->a.flags & PGA_WRITEABLE) != 0 &&
7106 							    TAILQ_EMPTY(&mt->md.pv_list))
7107 								vm_page_aflag_clear(mt, PGA_WRITEABLE);
7108 					}
7109 					ml3 = pmap_remove_pt_page(pmap,
7110 					    pv->pv_va);
7111 					if (ml3 != NULL) {
7112 						KASSERT(vm_page_any_valid(ml3),
7113 						    ("pmap_remove_pages: l3 page not promoted"));
7114 						pmap_resident_count_dec(pmap,1);
7115 						KASSERT(ml3->ref_count == NL3PG,
7116 						    ("pmap_remove_pages: l3 page ref count error"));
7117 						ml3->ref_count = 0;
7118 						pmap_add_delayed_free_list(ml3,
7119 						    &free, false);
7120 					}
7121 					break;
7122 				case 2:
7123 					pmap_resident_count_dec(pmap, 1);
7124 					TAILQ_REMOVE(&m->md.pv_list, pv,
7125 					    pv_next);
7126 					m->md.pv_gen++;
7127 					if ((m->a.flags & PGA_WRITEABLE) != 0 &&
7128 					    TAILQ_EMPTY(&m->md.pv_list) &&
7129 					    (m->flags & PG_FICTITIOUS) == 0) {
7130 						pvh = page_to_pvh(m);
7131 						if (TAILQ_EMPTY(&pvh->pv_list))
7132 							vm_page_aflag_clear(m,
7133 							    PGA_WRITEABLE);
7134 					}
7135 					break;
7136 				}
7137 				pmap_unuse_pt(pmap, pv->pv_va, pmap_load(pde),
7138 				    &free);
7139 				freed++;
7140 			}
7141 		}
7142 		PV_STAT(atomic_add_long(&pv_entry_frees, freed));
7143 		PV_STAT(atomic_add_int(&pv_entry_spare, freed));
7144 		PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
7145 		if (allfree) {
7146 			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
7147 			TAILQ_INSERT_TAIL(&free_chunks[pc_to_domain(pc)], pc,
7148 			    pc_list);
7149 		}
7150 	}
7151 	if (lock != NULL)
7152 		rw_wunlock(lock);
7153 	pmap_invalidate_all(pmap);
7154 	pmap_bti_deassign_all(pmap);
7155 	free_pv_chunk_batch(free_chunks);
7156 	PMAP_UNLOCK(pmap);
7157 	vm_page_free_pages_toq(&free, true);
7158 }
7159 
7160 /*
7161  * This is used to check if a page has been accessed or modified.
7162  */
7163 static bool
pmap_page_test_mappings(vm_page_t m,bool accessed,bool modified)7164 pmap_page_test_mappings(vm_page_t m, bool accessed, bool modified)
7165 {
7166 	struct rwlock *lock;
7167 	pv_entry_t pv;
7168 	struct md_page *pvh;
7169 	pt_entry_t l3e, mask, *pte, value;
7170 	pmap_t pmap;
7171 	int md_gen, pvh_gen;
7172 	bool rv;
7173 
7174 	rv = false;
7175 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
7176 	rw_rlock(lock);
7177 restart:
7178 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
7179 		pmap = PV_PMAP(pv);
7180 		PMAP_ASSERT_STAGE1(pmap);
7181 		if (!PMAP_TRYLOCK(pmap)) {
7182 			md_gen = m->md.pv_gen;
7183 			rw_runlock(lock);
7184 			PMAP_LOCK(pmap);
7185 			rw_rlock(lock);
7186 			if (md_gen != m->md.pv_gen) {
7187 				PMAP_UNLOCK(pmap);
7188 				goto restart;
7189 			}
7190 		}
7191 		pte = pmap_pte_exists(pmap, pv->pv_va, 3, __func__);
7192 		mask = 0;
7193 		value = 0;
7194 		if (modified) {
7195 			mask |= ATTR_S1_AP_RW_BIT;
7196 			value |= ATTR_S1_AP(ATTR_S1_AP_RW);
7197 		}
7198 		if (accessed) {
7199 			mask |= ATTR_AF | ATTR_DESCR_MASK;
7200 			value |= ATTR_AF | L3_PAGE;
7201 		}
7202 		l3e = pmap_load(pte);
7203 		if ((l3e & ATTR_CONTIGUOUS) != 0)
7204 			l3e = pmap_load_l3c(pte);
7205 		PMAP_UNLOCK(pmap);
7206 		rv = (l3e & mask) == value;
7207 		if (rv)
7208 			goto out;
7209 	}
7210 	if ((m->flags & PG_FICTITIOUS) == 0) {
7211 		pvh = page_to_pvh(m);
7212 		TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
7213 			pmap = PV_PMAP(pv);
7214 			PMAP_ASSERT_STAGE1(pmap);
7215 			if (!PMAP_TRYLOCK(pmap)) {
7216 				md_gen = m->md.pv_gen;
7217 				pvh_gen = pvh->pv_gen;
7218 				rw_runlock(lock);
7219 				PMAP_LOCK(pmap);
7220 				rw_rlock(lock);
7221 				if (md_gen != m->md.pv_gen ||
7222 				    pvh_gen != pvh->pv_gen) {
7223 					PMAP_UNLOCK(pmap);
7224 					goto restart;
7225 				}
7226 			}
7227 			pte = pmap_pte_exists(pmap, pv->pv_va, 2, __func__);
7228 			mask = 0;
7229 			value = 0;
7230 			if (modified) {
7231 				mask |= ATTR_S1_AP_RW_BIT;
7232 				value |= ATTR_S1_AP(ATTR_S1_AP_RW);
7233 			}
7234 			if (accessed) {
7235 				mask |= ATTR_AF | ATTR_DESCR_MASK;
7236 				value |= ATTR_AF | L2_BLOCK;
7237 			}
7238 			rv = (pmap_load(pte) & mask) == value;
7239 			PMAP_UNLOCK(pmap);
7240 			if (rv)
7241 				goto out;
7242 		}
7243 	}
7244 out:
7245 	rw_runlock(lock);
7246 	return (rv);
7247 }
7248 
7249 /*
7250  *	pmap_is_modified:
7251  *
7252  *	Return whether or not the specified physical page was modified
7253  *	in any physical maps.
7254  */
7255 bool
pmap_is_modified(vm_page_t m)7256 pmap_is_modified(vm_page_t m)
7257 {
7258 
7259 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
7260 	    ("pmap_is_modified: page %p is not managed", m));
7261 
7262 	/*
7263 	 * If the page is not busied then this check is racy.
7264 	 */
7265 	if (!pmap_page_is_write_mapped(m))
7266 		return (false);
7267 	return (pmap_page_test_mappings(m, false, true));
7268 }
7269 
7270 /*
7271  *	pmap_is_prefaultable:
7272  *
7273  *	Return whether or not the specified virtual address is eligible
7274  *	for prefault.
7275  */
7276 bool
pmap_is_prefaultable(pmap_t pmap,vm_offset_t addr)7277 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
7278 {
7279 	pd_entry_t *pde;
7280 	pt_entry_t *pte;
7281 	bool rv;
7282 	int lvl;
7283 
7284 	/*
7285 	 * Return true if and only if the L3 entry for the specified virtual
7286 	 * address is allocated but invalid.
7287 	 */
7288 	rv = false;
7289 	PMAP_LOCK(pmap);
7290 	pde = pmap_pde(pmap, addr, &lvl);
7291 	if (pde != NULL && lvl == 2) {
7292 		pte = pmap_l2_to_l3(pde, addr);
7293 		rv = pmap_load(pte) == 0;
7294 	}
7295 	PMAP_UNLOCK(pmap);
7296 	return (rv);
7297 }
7298 
7299 /*
7300  *	pmap_is_referenced:
7301  *
7302  *	Return whether or not the specified physical page was referenced
7303  *	in any physical maps.
7304  */
7305 bool
pmap_is_referenced(vm_page_t m)7306 pmap_is_referenced(vm_page_t m)
7307 {
7308 
7309 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
7310 	    ("pmap_is_referenced: page %p is not managed", m));
7311 	return (pmap_page_test_mappings(m, true, false));
7312 }
7313 
7314 /*
7315  * Clear the write and modified bits in each of the given page's mappings.
7316  */
7317 void
pmap_remove_write(vm_page_t m)7318 pmap_remove_write(vm_page_t m)
7319 {
7320 	struct md_page *pvh;
7321 	pmap_t pmap;
7322 	struct rwlock *lock;
7323 	pv_entry_t next_pv, pv;
7324 	pt_entry_t oldpte, *pte, set, clear, mask, val;
7325 	vm_offset_t va;
7326 	int md_gen, pvh_gen;
7327 
7328 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
7329 	    ("pmap_remove_write: page %p is not managed", m));
7330 	vm_page_assert_busied(m);
7331 
7332 	if (!pmap_page_is_write_mapped(m))
7333 		return;
7334 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
7335 	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : page_to_pvh(m);
7336 	rw_wlock(lock);
7337 retry:
7338 	TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
7339 		pmap = PV_PMAP(pv);
7340 		PMAP_ASSERT_STAGE1(pmap);
7341 		if (!PMAP_TRYLOCK(pmap)) {
7342 			pvh_gen = pvh->pv_gen;
7343 			rw_wunlock(lock);
7344 			PMAP_LOCK(pmap);
7345 			rw_wlock(lock);
7346 			if (pvh_gen != pvh->pv_gen) {
7347 				PMAP_UNLOCK(pmap);
7348 				goto retry;
7349 			}
7350 		}
7351 		va = pv->pv_va;
7352 		pte = pmap_pte_exists(pmap, va, 2, __func__);
7353 		if ((pmap_load(pte) & ATTR_SW_DBM) != 0)
7354 			(void)pmap_demote_l2_locked(pmap, pte, va, &lock);
7355 		KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
7356 		    ("inconsistent pv lock %p %p for page %p",
7357 		    lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
7358 		PMAP_UNLOCK(pmap);
7359 	}
7360 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
7361 		pmap = PV_PMAP(pv);
7362 		if (!PMAP_TRYLOCK(pmap)) {
7363 			pvh_gen = pvh->pv_gen;
7364 			md_gen = m->md.pv_gen;
7365 			rw_wunlock(lock);
7366 			PMAP_LOCK(pmap);
7367 			rw_wlock(lock);
7368 			if (pvh_gen != pvh->pv_gen ||
7369 			    md_gen != m->md.pv_gen) {
7370 				PMAP_UNLOCK(pmap);
7371 				goto retry;
7372 			}
7373 		}
7374 		pte = pmap_pte_exists(pmap, pv->pv_va, 3, __func__);
7375 		oldpte = pmap_load(pte);
7376 		if ((oldpte & ATTR_SW_DBM) != 0) {
7377 			if ((oldpte & ATTR_CONTIGUOUS) != 0) {
7378 				(void)pmap_demote_l3c(pmap, pte, pv->pv_va);
7379 
7380 				/*
7381 				 * The L3 entry's accessed bit may have
7382 				 * changed.
7383 				 */
7384 				oldpte = pmap_load(pte);
7385 			}
7386 			if (pmap->pm_stage == PM_STAGE1) {
7387 				set = ATTR_S1_AP_RW_BIT;
7388 				clear = 0;
7389 				mask = ATTR_S1_AP_RW_BIT;
7390 				val = ATTR_S1_AP(ATTR_S1_AP_RW);
7391 			} else {
7392 				set = 0;
7393 				clear = ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE);
7394 				mask = ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE);
7395 				val = ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE);
7396 			}
7397 			clear |= ATTR_SW_DBM;
7398 			while (!atomic_fcmpset_64(pte, &oldpte,
7399 			    (oldpte | set) & ~clear))
7400 				cpu_spinwait();
7401 
7402 			if ((oldpte & mask) == val)
7403 				vm_page_dirty(m);
7404 			pmap_invalidate_page(pmap, pv->pv_va, true);
7405 		}
7406 		PMAP_UNLOCK(pmap);
7407 	}
7408 	rw_wunlock(lock);
7409 	vm_page_aflag_clear(m, PGA_WRITEABLE);
7410 }
7411 
7412 /*
7413  *	pmap_ts_referenced:
7414  *
7415  *	Return a count of reference bits for a page, clearing those bits.
7416  *	It is not necessary for every reference bit to be cleared, but it
7417  *	is necessary that 0 only be returned when there are truly no
7418  *	reference bits set.
7419  *
7420  *	As an optimization, update the page's dirty field if a modified bit is
7421  *	found while counting reference bits.  This opportunistic update can be
7422  *	performed at low cost and can eliminate the need for some future calls
7423  *	to pmap_is_modified().  However, since this function stops after
7424  *	finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
7425  *	dirty pages.  Those dirty pages will only be detected by a future call
7426  *	to pmap_is_modified().
7427  */
7428 int
pmap_ts_referenced(vm_page_t m)7429 pmap_ts_referenced(vm_page_t m)
7430 {
7431 	struct md_page *pvh;
7432 	pv_entry_t pv, pvf;
7433 	pmap_t pmap;
7434 	struct rwlock *lock;
7435 	pt_entry_t *pte, tpte;
7436 	vm_offset_t va;
7437 	vm_paddr_t pa;
7438 	int cleared, md_gen, not_cleared, pvh_gen;
7439 	struct spglist free;
7440 
7441 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
7442 	    ("pmap_ts_referenced: page %p is not managed", m));
7443 	SLIST_INIT(&free);
7444 	cleared = 0;
7445 	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : page_to_pvh(m);
7446 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
7447 	rw_wlock(lock);
7448 retry:
7449 	not_cleared = 0;
7450 	if ((pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL)
7451 		goto small_mappings;
7452 	pv = pvf;
7453 	do {
7454 		if (pvf == NULL)
7455 			pvf = pv;
7456 		pmap = PV_PMAP(pv);
7457 		if (!PMAP_TRYLOCK(pmap)) {
7458 			pvh_gen = pvh->pv_gen;
7459 			rw_wunlock(lock);
7460 			PMAP_LOCK(pmap);
7461 			rw_wlock(lock);
7462 			if (pvh_gen != pvh->pv_gen) {
7463 				PMAP_UNLOCK(pmap);
7464 				goto retry;
7465 			}
7466 		}
7467 		va = pv->pv_va;
7468 		pte = pmap_pte_exists(pmap, va, 2, __func__);
7469 		tpte = pmap_load(pte);
7470 		if (pmap_pte_dirty(pmap, tpte)) {
7471 			/*
7472 			 * Although "tpte" is mapping a 2MB page, because
7473 			 * this function is called at a 4KB page granularity,
7474 			 * we only update the 4KB page under test.
7475 			 */
7476 			vm_page_dirty(m);
7477 		}
7478 		if ((tpte & ATTR_AF) != 0) {
7479 			pa = VM_PAGE_TO_PHYS(m);
7480 
7481 			/*
7482 			 * Since this reference bit is shared by 512 4KB pages,
7483 			 * it should not be cleared every time it is tested.
7484 			 * Apply a simple "hash" function on the physical page
7485 			 * number, the virtual superpage number, and the pmap
7486 			 * address to select one 4KB page out of the 512 on
7487 			 * which testing the reference bit will result in
7488 			 * clearing that reference bit.  This function is
7489 			 * designed to avoid the selection of the same 4KB page
7490 			 * for every 2MB page mapping.
7491 			 *
7492 			 * On demotion, a mapping that hasn't been referenced
7493 			 * is simply destroyed.  To avoid the possibility of a
7494 			 * subsequent page fault on a demoted wired mapping,
7495 			 * always leave its reference bit set.  Moreover,
7496 			 * since the superpage is wired, the current state of
7497 			 * its reference bit won't affect page replacement.
7498 			 */
7499 			if ((((pa >> PAGE_SHIFT) ^ (va >> L2_SHIFT) ^
7500 			    (uintptr_t)pmap) & (Ln_ENTRIES - 1)) == 0 &&
7501 			    (tpte & ATTR_SW_WIRED) == 0) {
7502 				pmap_clear_bits(pte, ATTR_AF);
7503 				pmap_invalidate_page(pmap, va, true);
7504 				cleared++;
7505 			} else
7506 				not_cleared++;
7507 		}
7508 		PMAP_UNLOCK(pmap);
7509 		/* Rotate the PV list if it has more than one entry. */
7510 		if (TAILQ_NEXT(pv, pv_next) != NULL) {
7511 			TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
7512 			TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
7513 			pvh->pv_gen++;
7514 		}
7515 		if (cleared + not_cleared >= PMAP_TS_REFERENCED_MAX)
7516 			goto out;
7517 	} while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf);
7518 small_mappings:
7519 	if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL)
7520 		goto out;
7521 	pv = pvf;
7522 	do {
7523 		if (pvf == NULL)
7524 			pvf = pv;
7525 		pmap = PV_PMAP(pv);
7526 		if (!PMAP_TRYLOCK(pmap)) {
7527 			pvh_gen = pvh->pv_gen;
7528 			md_gen = m->md.pv_gen;
7529 			rw_wunlock(lock);
7530 			PMAP_LOCK(pmap);
7531 			rw_wlock(lock);
7532 			if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
7533 				PMAP_UNLOCK(pmap);
7534 				goto retry;
7535 			}
7536 		}
7537 		pte = pmap_pte_exists(pmap, pv->pv_va, 3, __func__);
7538 		tpte = pmap_load(pte);
7539 		if (pmap_pte_dirty(pmap, tpte))
7540 			vm_page_dirty(m);
7541 		if ((tpte & ATTR_AF) != 0) {
7542 			if ((tpte & ATTR_SW_WIRED) == 0) {
7543 				/*
7544 				 * Clear the accessed bit in this L3 entry
7545 				 * regardless of the contiguous bit.
7546 				 */
7547 				pmap_clear_bits(pte, ATTR_AF);
7548 				pmap_invalidate_page(pmap, pv->pv_va, true);
7549 				cleared++;
7550 			} else
7551 				not_cleared++;
7552 		} else if ((tpte & ATTR_CONTIGUOUS) != 0 &&
7553 		    (pmap_load_l3c(pte) & ATTR_AF) != 0) {
7554 			/*
7555 			 * An L3C superpage mapping is regarded as accessed
7556 			 * until the accessed bit has been cleared in all
7557 			 * of its constituent entries.
7558 			 */
7559 			not_cleared++;
7560 		}
7561 		PMAP_UNLOCK(pmap);
7562 		/* Rotate the PV list if it has more than one entry. */
7563 		if (TAILQ_NEXT(pv, pv_next) != NULL) {
7564 			TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
7565 			TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
7566 			m->md.pv_gen++;
7567 		}
7568 	} while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && cleared +
7569 	    not_cleared < PMAP_TS_REFERENCED_MAX);
7570 out:
7571 	rw_wunlock(lock);
7572 	vm_page_free_pages_toq(&free, true);
7573 	return (cleared + not_cleared);
7574 }
7575 
7576 /*
7577  *	Apply the given advice to the specified range of addresses within the
7578  *	given pmap.  Depending on the advice, clear the referenced and/or
7579  *	modified flags in each mapping and set the mapped page's dirty field.
7580  */
7581 void
pmap_advise(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,int advice)7582 pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
7583 {
7584 	struct rwlock *lock;
7585 	vm_offset_t va, va_next, dva;
7586 	vm_page_t m;
7587 	pd_entry_t *l0, *l1, *l2, oldl2;
7588 	pt_entry_t *l3, *dl3, oldl3;
7589 
7590 	PMAP_ASSERT_STAGE1(pmap);
7591 
7592 	if (advice != MADV_DONTNEED && advice != MADV_FREE)
7593 		return;
7594 
7595 	PMAP_LOCK(pmap);
7596 	for (; sva < eva; sva = va_next) {
7597 		l0 = pmap_l0(pmap, sva);
7598 		if (pmap_load(l0) == 0) {
7599 			va_next = (sva + L0_SIZE) & ~L0_OFFSET;
7600 			if (va_next < sva)
7601 				va_next = eva;
7602 			continue;
7603 		}
7604 
7605 		va_next = (sva + L1_SIZE) & ~L1_OFFSET;
7606 		if (va_next < sva)
7607 			va_next = eva;
7608 		l1 = pmap_l0_to_l1(l0, sva);
7609 		if (pmap_load(l1) == 0)
7610 			continue;
7611 		if ((pmap_load(l1) & ATTR_DESCR_MASK) == L1_BLOCK) {
7612 			PMAP_ASSERT_L1_BLOCKS_SUPPORTED;
7613 			continue;
7614 		}
7615 
7616 		va_next = (sva + L2_SIZE) & ~L2_OFFSET;
7617 		if (va_next < sva)
7618 			va_next = eva;
7619 		l2 = pmap_l1_to_l2(l1, sva);
7620 		oldl2 = pmap_load(l2);
7621 		if (oldl2 == 0)
7622 			continue;
7623 		if ((oldl2 & ATTR_DESCR_MASK) == L2_BLOCK) {
7624 			if ((oldl2 & ATTR_SW_MANAGED) == 0)
7625 				continue;
7626 			lock = NULL;
7627 			if (!pmap_demote_l2_locked(pmap, l2, sva, &lock)) {
7628 				if (lock != NULL)
7629 					rw_wunlock(lock);
7630 
7631 				/*
7632 				 * The 2MB page mapping was destroyed.
7633 				 */
7634 				continue;
7635 			}
7636 
7637 			/*
7638 			 * Unless the page mappings are wired, remove the
7639 			 * mapping to a single page so that a subsequent
7640 			 * access may repromote.  Choosing the last page
7641 			 * within the address range [sva, min(va_next, eva))
7642 			 * generally results in more repromotions.  Since the
7643 			 * underlying page table page is fully populated, this
7644 			 * removal never frees a page table page.
7645 			 */
7646 			if ((oldl2 & ATTR_SW_WIRED) == 0) {
7647 				va = eva;
7648 				if (va > va_next)
7649 					va = va_next;
7650 				va -= PAGE_SIZE;
7651 				KASSERT(va >= sva,
7652 				    ("pmap_advise: no address gap"));
7653 				l3 = pmap_l2_to_l3(l2, va);
7654 				KASSERT(pmap_load(l3) != 0,
7655 				    ("pmap_advise: invalid PTE"));
7656 				pmap_remove_l3(pmap, l3, va, pmap_load(l2),
7657 				    NULL, &lock);
7658 			}
7659 			if (lock != NULL)
7660 				rw_wunlock(lock);
7661 		}
7662 		KASSERT((pmap_load(l2) & ATTR_DESCR_MASK) == L2_TABLE,
7663 		    ("pmap_advise: invalid L2 entry after demotion"));
7664 		if (va_next > eva)
7665 			va_next = eva;
7666 		va = va_next;
7667 		for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++,
7668 		    sva += L3_SIZE) {
7669 			oldl3 = pmap_load(l3);
7670 			if ((oldl3 & (ATTR_SW_MANAGED | ATTR_DESCR_MASK)) !=
7671 			    (ATTR_SW_MANAGED | L3_PAGE))
7672 				goto maybe_invlrng;
7673 			else if (pmap_pte_dirty(pmap, oldl3)) {
7674 				if (advice == MADV_DONTNEED) {
7675 					/*
7676 					 * Future calls to pmap_is_modified()
7677 					 * can be avoided by making the page
7678 					 * dirty now.
7679 					 */
7680 					m = PTE_TO_VM_PAGE(oldl3);
7681 					vm_page_dirty(m);
7682 				}
7683 				if ((oldl3 & ATTR_CONTIGUOUS) != 0) {
7684 					/*
7685 					 * Unconditionally demote the L3C
7686 					 * superpage because we do not allow
7687 					 * writeable, clean superpages.
7688 					 */
7689 					(void)pmap_demote_l3c(pmap, l3, sva);
7690 
7691 					/*
7692                                          * Destroy the final mapping before the
7693                                          * next L3C boundary or va_next,
7694 					 * whichever comes first, so that a
7695 					 * subsequent access may act as a
7696 					 * repromotion trigger.
7697 					 */
7698                                         if ((oldl3 & ATTR_SW_WIRED) == 0) {
7699 						dva = MIN((sva & ~L3C_OFFSET) +
7700 						    L3C_SIZE - PAGE_SIZE,
7701 						    va_next - PAGE_SIZE);
7702 						dl3 = pmap_l2_to_l3(l2, dva);
7703 						KASSERT(pmap_load(dl3) != 0,
7704 						    ("pmap_advise: invalid PTE"));
7705 						lock = NULL;
7706 						pmap_remove_l3(pmap, dl3, dva,
7707 						    pmap_load(l2), NULL, &lock);
7708 						if (lock != NULL)
7709 							rw_wunlock(lock);
7710 					}
7711 
7712 					/*
7713 					 * The L3 entry's accessed bit may have
7714 					 * changed.
7715 					 */
7716 					oldl3 = pmap_load(l3);
7717 				}
7718 
7719 				/*
7720 				 * Check that we did not just destroy this entry so
7721 				 * we avoid corrupting the page able.
7722 				 */
7723 				if (oldl3 != 0) {
7724 					while (!atomic_fcmpset_long(l3, &oldl3,
7725 					    (oldl3 & ~ATTR_AF) |
7726 					    ATTR_S1_AP(ATTR_S1_AP_RO)))
7727 						cpu_spinwait();
7728 				}
7729 			} else if ((oldl3 & ATTR_AF) != 0) {
7730 				/*
7731 				 * Clear the accessed bit in this L3 entry
7732 				 * regardless of the contiguous bit.
7733 				 */
7734 				pmap_clear_bits(l3, ATTR_AF);
7735 			} else
7736 				goto maybe_invlrng;
7737 			if (va == va_next)
7738 				va = sva;
7739 			continue;
7740 maybe_invlrng:
7741 			if (va != va_next) {
7742 				pmap_s1_invalidate_range(pmap, va, sva, true);
7743 				va = va_next;
7744 			}
7745 		}
7746 		if (va != va_next)
7747 			pmap_s1_invalidate_range(pmap, va, sva, true);
7748 	}
7749 	PMAP_UNLOCK(pmap);
7750 }
7751 
7752 /*
7753  *	Clear the modify bits on the specified physical page.
7754  */
7755 void
pmap_clear_modify(vm_page_t m)7756 pmap_clear_modify(vm_page_t m)
7757 {
7758 	struct md_page *pvh;
7759 	struct rwlock *lock;
7760 	pmap_t pmap;
7761 	pv_entry_t next_pv, pv;
7762 	pd_entry_t *l2, oldl2;
7763 	pt_entry_t *l3, oldl3;
7764 	vm_offset_t va;
7765 	int md_gen, pvh_gen;
7766 
7767 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
7768 	    ("pmap_clear_modify: page %p is not managed", m));
7769 	vm_page_assert_busied(m);
7770 
7771 	if (!pmap_page_is_write_mapped(m))
7772 		return;
7773 	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : page_to_pvh(m);
7774 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
7775 	rw_wlock(lock);
7776 restart:
7777 	TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
7778 		pmap = PV_PMAP(pv);
7779 		PMAP_ASSERT_STAGE1(pmap);
7780 		if (!PMAP_TRYLOCK(pmap)) {
7781 			pvh_gen = pvh->pv_gen;
7782 			rw_wunlock(lock);
7783 			PMAP_LOCK(pmap);
7784 			rw_wlock(lock);
7785 			if (pvh_gen != pvh->pv_gen) {
7786 				PMAP_UNLOCK(pmap);
7787 				goto restart;
7788 			}
7789 		}
7790 		va = pv->pv_va;
7791 		l2 = pmap_l2(pmap, va);
7792 		oldl2 = pmap_load(l2);
7793 		/* If oldl2 has ATTR_SW_DBM set, then it is also dirty. */
7794 		if ((oldl2 & ATTR_SW_DBM) != 0 &&
7795 		    pmap_demote_l2_locked(pmap, l2, va, &lock) &&
7796 		    (oldl2 & ATTR_SW_WIRED) == 0) {
7797 			/*
7798 			 * Write protect the mapping to a single page so that
7799 			 * a subsequent write access may repromote.
7800 			 */
7801 			va += VM_PAGE_TO_PHYS(m) - PTE_TO_PHYS(oldl2);
7802 			l3 = pmap_l2_to_l3(l2, va);
7803 			oldl3 = pmap_load(l3);
7804 			while (!atomic_fcmpset_long(l3, &oldl3,
7805 			    (oldl3 & ~ATTR_SW_DBM) | ATTR_S1_AP(ATTR_S1_AP_RO)))
7806 				cpu_spinwait();
7807 			vm_page_dirty(m);
7808 			pmap_s1_invalidate_page(pmap, va, true);
7809 		}
7810 		PMAP_UNLOCK(pmap);
7811 	}
7812 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
7813 		pmap = PV_PMAP(pv);
7814 		PMAP_ASSERT_STAGE1(pmap);
7815 		if (!PMAP_TRYLOCK(pmap)) {
7816 			md_gen = m->md.pv_gen;
7817 			pvh_gen = pvh->pv_gen;
7818 			rw_wunlock(lock);
7819 			PMAP_LOCK(pmap);
7820 			rw_wlock(lock);
7821 			if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
7822 				PMAP_UNLOCK(pmap);
7823 				goto restart;
7824 			}
7825 		}
7826 		l2 = pmap_l2(pmap, pv->pv_va);
7827 		l3 = pmap_l2_to_l3(l2, pv->pv_va);
7828 		oldl3 = pmap_load(l3);
7829 		KASSERT((oldl3 & ATTR_CONTIGUOUS) == 0 ||
7830 		    (oldl3 & (ATTR_SW_DBM | ATTR_S1_AP_RW_BIT)) !=
7831 		    (ATTR_SW_DBM | ATTR_S1_AP(ATTR_S1_AP_RO)),
7832 		    ("writeable L3C superpage not dirty"));
7833 		if ((oldl3 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) == ATTR_SW_DBM) {
7834 			if ((oldl3 & ATTR_CONTIGUOUS) != 0)
7835 				(void)pmap_demote_l3c(pmap, l3, pv->pv_va);
7836 			pmap_set_bits(l3, ATTR_S1_AP(ATTR_S1_AP_RO));
7837 			pmap_s1_invalidate_page(pmap, pv->pv_va, true);
7838 		}
7839 		PMAP_UNLOCK(pmap);
7840 	}
7841 	rw_wunlock(lock);
7842 }
7843 
7844 void *
pmap_mapbios(vm_paddr_t pa,vm_size_t size)7845 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
7846 {
7847 	struct pmap_preinit_mapping *ppim;
7848 	vm_offset_t va, offset;
7849 	pd_entry_t old_l2e, *pde;
7850 	pt_entry_t *l2;
7851 	int i, lvl, l2_blocks, free_l2_count, start_idx;
7852 
7853 	/* Use the DMAP region if we can */
7854 	if (PHYS_IN_DMAP(pa) && PHYS_IN_DMAP(pa + size - 1) &&
7855 	    pmap_kmapped_range(PHYS_TO_DMAP(pa), size))
7856 		return ((void *)PHYS_TO_DMAP(pa));
7857 
7858 	if (!vm_initialized) {
7859 		/*
7860 		 * No L3 ptables so map entire L2 blocks where start VA is:
7861 		 * 	preinit_map_va + start_idx * L2_SIZE
7862 		 * There may be duplicate mappings (multiple VA -> same PA) but
7863 		 * ARM64 dcache is always PIPT so that's acceptable.
7864 		 */
7865 		 if (size == 0)
7866 			 return (NULL);
7867 
7868 		 /* Calculate how many L2 blocks are needed for the mapping */
7869 		l2_blocks = (roundup2(pa + size, L2_SIZE) -
7870 		    rounddown2(pa, L2_SIZE)) >> L2_SHIFT;
7871 
7872 		offset = pa & L2_OFFSET;
7873 
7874 		if (preinit_map_va == 0)
7875 			return (NULL);
7876 
7877 		/* Map 2MiB L2 blocks from reserved VA space */
7878 
7879 		free_l2_count = 0;
7880 		start_idx = -1;
7881 		/* Find enough free contiguous VA space */
7882 		for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
7883 			ppim = pmap_preinit_mapping + i;
7884 			if (free_l2_count > 0 && ppim->pa != 0) {
7885 				/* Not enough space here */
7886 				free_l2_count = 0;
7887 				start_idx = -1;
7888 				continue;
7889 			}
7890 
7891 			if (ppim->pa == 0) {
7892 				/* Free L2 block */
7893 				if (start_idx == -1)
7894 					start_idx = i;
7895 				free_l2_count++;
7896 				if (free_l2_count == l2_blocks)
7897 					break;
7898 			}
7899 		}
7900 		if (free_l2_count != l2_blocks)
7901 			panic("%s: too many preinit mappings", __func__);
7902 
7903 		va = preinit_map_va + (start_idx * L2_SIZE);
7904 		for (i = start_idx; i < start_idx + l2_blocks; i++) {
7905 			/* Mark entries as allocated */
7906 			ppim = pmap_preinit_mapping + i;
7907 			ppim->pa = pa;
7908 			ppim->va = va + offset;
7909 			ppim->size = size;
7910 		}
7911 
7912 		/* Map L2 blocks */
7913 		pa = rounddown2(pa, L2_SIZE);
7914 		old_l2e = 0;
7915 		for (i = 0; i < l2_blocks; i++) {
7916 			pde = pmap_pde(kernel_pmap, va, &lvl);
7917 			KASSERT(pde != NULL,
7918 			    ("pmap_mapbios: Invalid page entry, va: 0x%lx",
7919 			    va));
7920 			KASSERT(lvl == 1,
7921 			    ("pmap_mapbios: Invalid level %d", lvl));
7922 
7923 			/* Insert L2_BLOCK */
7924 			l2 = pmap_l1_to_l2(pde, va);
7925 			old_l2e |= pmap_load_store(l2,
7926 			    PHYS_TO_PTE(pa) | ATTR_AF | pmap_sh_attr |
7927 			    ATTR_S1_XN | ATTR_KERN_GP |
7928 			    ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | L2_BLOCK);
7929 
7930 			va += L2_SIZE;
7931 			pa += L2_SIZE;
7932 		}
7933 		if ((old_l2e & ATTR_DESCR_VALID) != 0)
7934 			pmap_s1_invalidate_all(kernel_pmap);
7935 		else {
7936 			/*
7937 			 * Because the old entries were invalid and the new
7938 			 * mappings are not executable, an isb is not required.
7939 			 */
7940 			dsb(ishst);
7941 		}
7942 
7943 		va = preinit_map_va + (start_idx * L2_SIZE);
7944 
7945 	} else {
7946 		/* kva_alloc may be used to map the pages */
7947 		offset = pa & PAGE_MASK;
7948 		size = round_page(offset + size);
7949 
7950 		va = kva_alloc(size);
7951 		if (va == 0)
7952 			panic("%s: Couldn't allocate KVA", __func__);
7953 
7954 		pde = pmap_pde(kernel_pmap, va, &lvl);
7955 		KASSERT(lvl == 2, ("pmap_mapbios: Invalid level %d", lvl));
7956 
7957 		/* L3 table is linked */
7958 		va = trunc_page(va);
7959 		pa = trunc_page(pa);
7960 		pmap_kenter(va, size, pa, memory_mapping_mode(pa));
7961 	}
7962 
7963 	return ((void *)(va + offset));
7964 }
7965 
7966 void
pmap_unmapbios(void * p,vm_size_t size)7967 pmap_unmapbios(void *p, vm_size_t size)
7968 {
7969 	struct pmap_preinit_mapping *ppim;
7970 	vm_offset_t offset, va, va_trunc;
7971 	pd_entry_t *pde;
7972 	pt_entry_t *l2;
7973 	int error __diagused, i, lvl, l2_blocks, block;
7974 	bool preinit_map;
7975 
7976 	va = (vm_offset_t)p;
7977 	if (VIRT_IN_DMAP(va)) {
7978 		KASSERT(VIRT_IN_DMAP(va + size - 1),
7979 		    ("%s: End address not in DMAP region: %lx", __func__,
7980 		    va + size - 1));
7981 		/* Ensure the attributes are as expected for the DMAP region */
7982 		PMAP_LOCK(kernel_pmap);
7983 		error = pmap_change_props_locked(va, size,
7984 		    PROT_READ | PROT_WRITE, VM_MEMATTR_DEFAULT, false);
7985 		PMAP_UNLOCK(kernel_pmap);
7986 		KASSERT(error == 0, ("%s: Failed to reset DMAP attributes: %d",
7987 		    __func__, error));
7988 
7989 		return;
7990 	}
7991 
7992 	l2_blocks =
7993 	   (roundup2(va + size, L2_SIZE) - rounddown2(va, L2_SIZE)) >> L2_SHIFT;
7994 	KASSERT(l2_blocks > 0, ("pmap_unmapbios: invalid size %lx", size));
7995 
7996 	/* Remove preinit mapping */
7997 	preinit_map = false;
7998 	block = 0;
7999 	for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
8000 		ppim = pmap_preinit_mapping + i;
8001 		if (ppim->va == va) {
8002 			KASSERT(ppim->size == size,
8003 			    ("pmap_unmapbios: size mismatch"));
8004 			ppim->va = 0;
8005 			ppim->pa = 0;
8006 			ppim->size = 0;
8007 			preinit_map = true;
8008 			offset = block * L2_SIZE;
8009 			va_trunc = rounddown2(va, L2_SIZE) + offset;
8010 
8011 			/* Remove L2_BLOCK */
8012 			pde = pmap_pde(kernel_pmap, va_trunc, &lvl);
8013 			KASSERT(pde != NULL,
8014 			    ("pmap_unmapbios: Invalid page entry, va: 0x%lx",
8015 			    va_trunc));
8016 			l2 = pmap_l1_to_l2(pde, va_trunc);
8017 			pmap_clear(l2);
8018 
8019 			if (block == (l2_blocks - 1))
8020 				break;
8021 			block++;
8022 		}
8023 	}
8024 	if (preinit_map) {
8025 		pmap_s1_invalidate_all(kernel_pmap);
8026 		return;
8027 	}
8028 
8029 	/* Unmap the pages reserved with kva_alloc. */
8030 	if (vm_initialized) {
8031 		offset = va & PAGE_MASK;
8032 		size = round_page(offset + size);
8033 		va = trunc_page(va);
8034 
8035 		/* Unmap and invalidate the pages */
8036 		pmap_kremove_device(va, size);
8037 
8038 		kva_free(va, size);
8039 	}
8040 }
8041 
8042 /*
8043  * Sets the memory attribute for the specified page.
8044  */
8045 void
pmap_page_set_memattr(vm_page_t m,vm_memattr_t ma)8046 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
8047 {
8048 
8049 	m->md.pv_memattr = ma;
8050 
8051 	/*
8052 	 * If "m" is a normal page, update its direct mapping.  This update
8053 	 * can be relied upon to perform any cache operations that are
8054 	 * required for data coherence.
8055 	 */
8056 	if ((m->flags & PG_FICTITIOUS) == 0 &&
8057 	    pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), PAGE_SIZE,
8058 	    m->md.pv_memattr) != 0)
8059 		panic("memory attribute change on the direct map failed");
8060 }
8061 
8062 /*
8063  * Changes the specified virtual address range's memory type to that given by
8064  * the parameter "mode".  The specified virtual address range must be
8065  * completely contained within either the direct map or the kernel map.  If
8066  * the virtual address range is contained within the kernel map, then the
8067  * memory type for each of the corresponding ranges of the direct map is also
8068  * changed.  (The corresponding ranges of the direct map are those ranges that
8069  * map the same physical pages as the specified virtual address range.)  These
8070  * changes to the direct map are necessary because Intel describes the
8071  * behavior of their processors as "undefined" if two or more mappings to the
8072  * same physical page have different memory types.
8073  *
8074  * Returns zero if the change completed successfully, and either EINVAL or
8075  * ENOMEM if the change failed.  Specifically, EINVAL is returned if some part
8076  * of the virtual address range was not mapped, and ENOMEM is returned if
8077  * there was insufficient memory available to complete the change.  In the
8078  * latter case, the memory type may have been changed on some part of the
8079  * virtual address range or the direct map.
8080  */
8081 int
pmap_change_attr(vm_offset_t va,vm_size_t size,int mode)8082 pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
8083 {
8084 	int error;
8085 
8086 	PMAP_LOCK(kernel_pmap);
8087 	error = pmap_change_props_locked(va, size, PROT_NONE, mode, false);
8088 	PMAP_UNLOCK(kernel_pmap);
8089 	return (error);
8090 }
8091 
8092 /*
8093  * Changes the specified virtual address range's protections to those
8094  * specified by "prot".  Like pmap_change_attr(), protections for aliases
8095  * in the direct map are updated as well.  Protections on aliasing mappings may
8096  * be a subset of the requested protections; for example, mappings in the direct
8097  * map are never executable.
8098  */
8099 int
pmap_change_prot(vm_offset_t va,vm_size_t size,vm_prot_t prot)8100 pmap_change_prot(vm_offset_t va, vm_size_t size, vm_prot_t prot)
8101 {
8102 	int error;
8103 
8104 	/* Only supported within the kernel map. */
8105 	if (va < VM_MIN_KERNEL_ADDRESS)
8106 		return (EINVAL);
8107 
8108 	PMAP_LOCK(kernel_pmap);
8109 	error = pmap_change_props_locked(va, size, prot, -1, false);
8110 	PMAP_UNLOCK(kernel_pmap);
8111 	return (error);
8112 }
8113 
8114 static int
pmap_change_props_locked(vm_offset_t va,vm_size_t size,vm_prot_t prot,int mode,bool skip_unmapped)8115 pmap_change_props_locked(vm_offset_t va, vm_size_t size, vm_prot_t prot,
8116     int mode, bool skip_unmapped)
8117 {
8118 	vm_offset_t base, offset, tmpva;
8119 	vm_size_t pte_size;
8120 	vm_paddr_t pa;
8121 	pt_entry_t pte, *ptep, *newpte;
8122 	pt_entry_t bits, mask;
8123 	int lvl, rv;
8124 
8125 	PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED);
8126 	base = trunc_page(va);
8127 	offset = va & PAGE_MASK;
8128 	size = round_page(offset + size);
8129 
8130 	if (!VIRT_IN_DMAP(base) &&
8131 	    !(base >= VM_MIN_KERNEL_ADDRESS && base < VM_MAX_KERNEL_ADDRESS))
8132 		return (EINVAL);
8133 
8134 	bits = 0;
8135 	mask = 0;
8136 	if (mode != -1) {
8137 		bits = ATTR_S1_IDX(mode);
8138 		mask = ATTR_S1_IDX_MASK;
8139 		if (mode == VM_MEMATTR_DEVICE) {
8140 			mask |= ATTR_S1_XN;
8141 			bits |= ATTR_S1_XN;
8142 		}
8143 	}
8144 	if (prot != VM_PROT_NONE) {
8145 		/* Don't mark the DMAP as executable. It never is on arm64. */
8146 		if (VIRT_IN_DMAP(base)) {
8147 			prot &= ~VM_PROT_EXECUTE;
8148 			/*
8149 			 * XXX Mark the DMAP as writable for now. We rely
8150 			 * on this in ddb & dtrace to insert breakpoint
8151 			 * instructions.
8152 			 */
8153 			prot |= VM_PROT_WRITE;
8154 		}
8155 
8156 		if ((prot & VM_PROT_WRITE) == 0) {
8157 			bits |= ATTR_S1_AP(ATTR_S1_AP_RO);
8158 		}
8159 		if ((prot & VM_PROT_EXECUTE) == 0) {
8160 			bits |= ATTR_S1_PXN;
8161 		}
8162 		bits |= ATTR_S1_UXN;
8163 		mask |= ATTR_S1_AP_MASK | ATTR_S1_XN;
8164 	}
8165 
8166 	for (tmpva = base; tmpva < base + size; ) {
8167 		ptep = pmap_pte(kernel_pmap, tmpva, &lvl);
8168 		if (ptep == NULL && !skip_unmapped) {
8169 			return (EINVAL);
8170 		} else if ((ptep == NULL && skip_unmapped) ||
8171 		    (pmap_load(ptep) & mask) == bits) {
8172 			/*
8173 			 * We already have the correct attribute or there
8174 			 * is no memory mapped at this address and we are
8175 			 * skipping unmapped memory.
8176 			 */
8177 			switch (lvl) {
8178 			default:
8179 				panic("Invalid DMAP table level: %d\n", lvl);
8180 			case 1:
8181 				tmpva = (tmpva & ~L1_OFFSET) + L1_SIZE;
8182 				break;
8183 			case 2:
8184 				tmpva = (tmpva & ~L2_OFFSET) + L2_SIZE;
8185 				break;
8186 			case 3:
8187 				tmpva += PAGE_SIZE;
8188 				break;
8189 			}
8190 		} else {
8191 			/* We can't demote/promote this entry */
8192 			MPASS((pmap_load(ptep) & ATTR_SW_NO_PROMOTE) == 0);
8193 
8194 			/*
8195 			 * Find the entry and demote it if the requested change
8196 			 * only applies to part of the address range mapped by
8197 			 * the entry.
8198 			 */
8199 			switch (lvl) {
8200 			default:
8201 				panic("Invalid DMAP table level: %d\n", lvl);
8202 			case 1:
8203 				PMAP_ASSERT_L1_BLOCKS_SUPPORTED;
8204 				if ((tmpva & L1_OFFSET) == 0 &&
8205 				    (base + size - tmpva) >= L1_SIZE) {
8206 					pte_size = L1_SIZE;
8207 					break;
8208 				}
8209 				newpte = pmap_demote_l1(kernel_pmap, ptep,
8210 				    tmpva & ~L1_OFFSET);
8211 				if (newpte == NULL)
8212 					return (EINVAL);
8213 				ptep = pmap_l1_to_l2(ptep, tmpva);
8214 				/* FALLTHROUGH */
8215 			case 2:
8216 				if ((pmap_load(ptep) & ATTR_CONTIGUOUS) != 0) {
8217 					if ((tmpva & L2C_OFFSET) == 0 &&
8218 					    (base + size - tmpva) >= L2C_SIZE) {
8219 						pte_size = L2C_SIZE;
8220 						break;
8221 					}
8222 					if (!pmap_demote_l2c(kernel_pmap, ptep,
8223 					    tmpva))
8224 						return (EINVAL);
8225 				}
8226 				if ((tmpva & L2_OFFSET) == 0 &&
8227 				    (base + size - tmpva) >= L2_SIZE) {
8228 					pte_size = L2_SIZE;
8229 					break;
8230 				}
8231 				newpte = pmap_demote_l2(kernel_pmap, ptep,
8232 				    tmpva);
8233 				if (newpte == NULL)
8234 					return (EINVAL);
8235 				ptep = pmap_l2_to_l3(ptep, tmpva);
8236 				/* FALLTHROUGH */
8237 			case 3:
8238 				if ((pmap_load(ptep) & ATTR_CONTIGUOUS) != 0) {
8239 					if ((tmpva & L3C_OFFSET) == 0 &&
8240 					    (base + size - tmpva) >= L3C_SIZE) {
8241 						pte_size = L3C_SIZE;
8242 						break;
8243 					}
8244 					if (!pmap_demote_l3c(kernel_pmap, ptep,
8245 					    tmpva))
8246 						return (EINVAL);
8247 				}
8248 				pte_size = PAGE_SIZE;
8249 				break;
8250 			}
8251 
8252 			/* Update the entry */
8253 			pte = pmap_load(ptep);
8254 			pte &= ~mask;
8255 			pte |= bits;
8256 
8257 			switch (pte_size) {
8258 			case L2C_SIZE:
8259 				pmap_update_strided(kernel_pmap, ptep, ptep +
8260 				    L2C_ENTRIES, pte, tmpva, L2_SIZE, L2C_SIZE);
8261 				break;
8262 			case L3C_SIZE:
8263 				pmap_update_strided(kernel_pmap, ptep, ptep +
8264 				    L3C_ENTRIES, pte, tmpva, L3_SIZE, L3C_SIZE);
8265 				break;
8266 			default:
8267 				/*
8268 				 * We are updating a single block or page entry,
8269 				 * so regardless of pte_size pass PAGE_SIZE in
8270 				 * order that a single TLB invalidation is
8271 				 * performed.
8272 				 */
8273 				pmap_update_entry(kernel_pmap, ptep, pte, tmpva,
8274 				    PAGE_SIZE);
8275 				break;
8276 			}
8277 
8278 			pa = PTE_TO_PHYS(pte);
8279 			if (!VIRT_IN_DMAP(tmpva) && PHYS_IN_DMAP(pa)) {
8280 				/*
8281 				 * Keep the DMAP memory in sync.
8282 				 */
8283 				rv = pmap_change_props_locked(
8284 				    PHYS_TO_DMAP(pa), pte_size,
8285 				    prot, mode, true);
8286 				if (rv != 0)
8287 					return (rv);
8288 			}
8289 
8290 			/*
8291 			 * If moving to a non-cacheable entry flush
8292 			 * the cache.
8293 			 */
8294 			if (mode == VM_MEMATTR_UNCACHEABLE)
8295 				cpu_dcache_wbinv_range((void *)tmpva, pte_size);
8296 			tmpva += pte_size;
8297 		}
8298 	}
8299 
8300 	return (0);
8301 }
8302 
8303 /*
8304  * Create an L2 table to map all addresses within an L1 mapping.
8305  */
8306 static pt_entry_t *
pmap_demote_l1(pmap_t pmap,pt_entry_t * l1,vm_offset_t va)8307 pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va)
8308 {
8309 	pt_entry_t *l2, newl2, oldl1;
8310 	vm_offset_t tmpl1;
8311 	vm_paddr_t l2phys, phys;
8312 	vm_page_t ml2;
8313 	int i;
8314 
8315 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
8316 	oldl1 = pmap_load(l1);
8317 	PMAP_ASSERT_L1_BLOCKS_SUPPORTED;
8318 	KASSERT((oldl1 & ATTR_DESCR_MASK) == L1_BLOCK,
8319 	    ("pmap_demote_l1: Demoting a non-block entry"));
8320 	KASSERT((va & L1_OFFSET) == 0,
8321 	    ("pmap_demote_l1: Invalid virtual address %#lx", va));
8322 	KASSERT((oldl1 & ATTR_SW_MANAGED) == 0,
8323 	    ("pmap_demote_l1: Level 1 table shouldn't be managed"));
8324 	KASSERT((oldl1 & ATTR_SW_NO_PROMOTE) == 0,
8325 	    ("pmap_demote_l1: Demoting entry with no-demote flag set"));
8326 
8327 	tmpl1 = 0;
8328 	if (va <= (vm_offset_t)l1 && va + L1_SIZE > (vm_offset_t)l1) {
8329 		tmpl1 = kva_alloc(PAGE_SIZE);
8330 		if (tmpl1 == 0)
8331 			return (NULL);
8332 	}
8333 
8334 	if ((ml2 = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED)) ==
8335 	    NULL) {
8336 		CTR2(KTR_PMAP, "pmap_demote_l1: failure for va %#lx"
8337 		    " in pmap %p", va, pmap);
8338 		l2 = NULL;
8339 		goto fail;
8340 	}
8341 
8342 	l2phys = VM_PAGE_TO_PHYS(ml2);
8343 	l2 = (pt_entry_t *)PHYS_TO_DMAP(l2phys);
8344 
8345 	/* Address the range points at */
8346 	phys = PTE_TO_PHYS(oldl1);
8347 	/* The attributed from the old l1 table to be copied */
8348 	newl2 = oldl1 & ATTR_MASK;
8349 
8350 	/* Create the new entries */
8351 	newl2 |= ATTR_CONTIGUOUS;
8352 	for (i = 0; i < Ln_ENTRIES; i++) {
8353 		l2[i] = newl2 | phys;
8354 		phys += L2_SIZE;
8355 	}
8356 	KASSERT(l2[0] == (ATTR_CONTIGUOUS | (oldl1 & ~ATTR_DESCR_MASK) |
8357 	    L2_BLOCK), ("Invalid l2 page (%lx != %lx)", l2[0],
8358 	    ATTR_CONTIGUOUS | (oldl1 & ~ATTR_DESCR_MASK) | L2_BLOCK));
8359 
8360 	if (tmpl1 != 0) {
8361 		pmap_kenter(tmpl1, PAGE_SIZE,
8362 		    DMAP_TO_PHYS((vm_offset_t)l1) & ~L3_OFFSET,
8363 		    VM_MEMATTR_WRITE_BACK);
8364 		l1 = (pt_entry_t *)(tmpl1 + ((vm_offset_t)l1 & PAGE_MASK));
8365 	}
8366 
8367 	pmap_update_entry(pmap, l1, l2phys | L1_TABLE, va, PAGE_SIZE);
8368 
8369 	counter_u64_add(pmap_l1_demotions, 1);
8370 fail:
8371 	if (tmpl1 != 0) {
8372 		pmap_kremove(tmpl1);
8373 		kva_free(tmpl1, PAGE_SIZE);
8374 	}
8375 
8376 	return (l2);
8377 }
8378 
8379 static void
pmap_fill_l3(pt_entry_t * firstl3,pt_entry_t newl3)8380 pmap_fill_l3(pt_entry_t *firstl3, pt_entry_t newl3)
8381 {
8382 	pt_entry_t *l3;
8383 
8384 	for (l3 = firstl3; l3 - firstl3 < Ln_ENTRIES; l3++) {
8385 		*l3 = newl3;
8386 		newl3 += L3_SIZE;
8387 	}
8388 }
8389 
8390 static void
pmap_demote_l2_check(pt_entry_t * firstl3p __unused,pt_entry_t newl3e __unused)8391 pmap_demote_l2_check(pt_entry_t *firstl3p __unused, pt_entry_t newl3e __unused)
8392 {
8393 #ifdef INVARIANTS
8394 #ifdef DIAGNOSTIC
8395 	pt_entry_t *xl3p, *yl3p;
8396 
8397 	for (xl3p = firstl3p; xl3p < firstl3p + Ln_ENTRIES;
8398 	    xl3p++, newl3e += PAGE_SIZE) {
8399 		if (PTE_TO_PHYS(pmap_load(xl3p)) != PTE_TO_PHYS(newl3e)) {
8400 			printf("pmap_demote_l2: xl3e %zd and newl3e map "
8401 			    "different pages: found %#lx, expected %#lx\n",
8402 			    xl3p - firstl3p, pmap_load(xl3p), newl3e);
8403 			printf("page table dump\n");
8404 			for (yl3p = firstl3p; yl3p < firstl3p + Ln_ENTRIES;
8405 			    yl3p++) {
8406 				printf("%zd %#lx\n", yl3p - firstl3p,
8407 				    pmap_load(yl3p));
8408 			}
8409 			panic("firstpte");
8410 		}
8411 	}
8412 #else
8413 	KASSERT(PTE_TO_PHYS(pmap_load(firstl3p)) == PTE_TO_PHYS(newl3e),
8414 	    ("pmap_demote_l2: firstl3 and newl3e map different physical"
8415 	    " addresses"));
8416 #endif
8417 #endif
8418 }
8419 
8420 static void
pmap_demote_l2_abort(pmap_t pmap,vm_offset_t va,pt_entry_t * l2,struct rwlock ** lockp)8421 pmap_demote_l2_abort(pmap_t pmap, vm_offset_t va, pt_entry_t *l2,
8422     struct rwlock **lockp)
8423 {
8424 	struct spglist free;
8425 
8426 	SLIST_INIT(&free);
8427 	(void)pmap_remove_l2(pmap, l2, va, pmap_load(pmap_l1(pmap, va)), &free,
8428 	    lockp);
8429 	vm_page_free_pages_toq(&free, true);
8430 }
8431 
8432 /*
8433  * Create an L3 table to map all addresses within an L2 mapping.
8434  */
8435 static pt_entry_t *
pmap_demote_l2_locked(pmap_t pmap,pt_entry_t * l2,vm_offset_t va,struct rwlock ** lockp)8436 pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2, vm_offset_t va,
8437     struct rwlock **lockp)
8438 {
8439 	pt_entry_t *l3, newl3, oldl2;
8440 	vm_offset_t tmpl2;
8441 	vm_paddr_t l3phys;
8442 	vm_page_t ml3;
8443 
8444 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
8445 	PMAP_ASSERT_STAGE1(pmap);
8446 	KASSERT(ADDR_IS_CANONICAL(va),
8447 	    ("%s: Address not in canonical form: %lx", __func__, va));
8448 
8449 	l3 = NULL;
8450 	oldl2 = pmap_load(l2);
8451 	KASSERT((oldl2 & ATTR_DESCR_MASK) == L2_BLOCK,
8452 	    ("pmap_demote_l2: Demoting a non-block entry"));
8453 	KASSERT((oldl2 & ATTR_SW_NO_PROMOTE) == 0,
8454 	    ("pmap_demote_l2: Demoting entry with no-demote flag set"));
8455 	va &= ~L2_OFFSET;
8456 
8457 	tmpl2 = 0;
8458 	if (va <= (vm_offset_t)l2 && va + L2_SIZE > (vm_offset_t)l2) {
8459 		tmpl2 = kva_alloc(PAGE_SIZE);
8460 		if (tmpl2 == 0)
8461 			return (NULL);
8462 	}
8463 
8464 	/*
8465 	 * Invalidate the 2MB page mapping and return "failure" if the
8466 	 * mapping was never accessed.
8467 	 */
8468 	if ((oldl2 & ATTR_AF) == 0) {
8469 		KASSERT((oldl2 & ATTR_SW_WIRED) == 0,
8470 		    ("pmap_demote_l2: a wired mapping is missing ATTR_AF"));
8471 		pmap_demote_l2_abort(pmap, va, l2, lockp);
8472 		CTR2(KTR_PMAP, "pmap_demote_l2: failure for va %#lx in pmap %p",
8473 		    va, pmap);
8474 		goto fail;
8475 	}
8476 
8477 	if ((ml3 = pmap_remove_pt_page(pmap, va)) == NULL) {
8478 		KASSERT((oldl2 & ATTR_SW_WIRED) == 0,
8479 		    ("pmap_demote_l2: page table page for a wired mapping"
8480 		    " is missing"));
8481 
8482 		/*
8483 		 * If the page table page is missing and the mapping
8484 		 * is for a kernel address, the mapping must belong to
8485 		 * either the direct map or the early kernel memory.
8486 		 * Page table pages are preallocated for every other
8487 		 * part of the kernel address space, so the direct map
8488 		 * region and early kernel memory are the only parts of the
8489 		 * kernel address space that must be handled here.
8490 		 */
8491 		KASSERT(!ADDR_IS_KERNEL(va) || VIRT_IN_DMAP(va) ||
8492 		    (va >= VM_MIN_KERNEL_ADDRESS && va < kernel_vm_end),
8493 		    ("pmap_demote_l2: No saved mpte for va %#lx", va));
8494 
8495 		/*
8496 		 * If the 2MB page mapping belongs to the direct map
8497 		 * region of the kernel's address space, then the page
8498 		 * allocation request specifies the highest possible
8499 		 * priority (VM_ALLOC_INTERRUPT).  Otherwise, the
8500 		 * priority is normal.
8501 		 */
8502 		ml3 = vm_page_alloc_noobj(
8503 		    (VIRT_IN_DMAP(va) ? VM_ALLOC_INTERRUPT : 0) |
8504 		    VM_ALLOC_WIRED);
8505 
8506 		/*
8507 		 * If the allocation of the new page table page fails,
8508 		 * invalidate the 2MB page mapping and return "failure".
8509 		 */
8510 		if (ml3 == NULL) {
8511 			pmap_demote_l2_abort(pmap, va, l2, lockp);
8512 			CTR2(KTR_PMAP, "pmap_demote_l2: failure for va %#lx"
8513 			    " in pmap %p", va, pmap);
8514 			goto fail;
8515 		}
8516 		ml3->pindex = pmap_l2_pindex(va);
8517 
8518 		if (!ADDR_IS_KERNEL(va)) {
8519 			ml3->ref_count = NL3PG;
8520 			pmap_resident_count_inc(pmap, 1);
8521 		}
8522 	}
8523 	l3phys = VM_PAGE_TO_PHYS(ml3);
8524 	l3 = (pt_entry_t *)PHYS_TO_DMAP(l3phys);
8525 	newl3 = ATTR_CONTIGUOUS | (oldl2 & ~ATTR_DESCR_MASK) | L3_PAGE;
8526 	KASSERT((oldl2 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) !=
8527 	    (ATTR_S1_AP(ATTR_S1_AP_RO) | ATTR_SW_DBM),
8528 	    ("pmap_demote_l2: L2 entry is writeable but not dirty"));
8529 
8530 	/*
8531 	 * If the PTP is not leftover from an earlier promotion or it does not
8532 	 * have ATTR_AF set in every L3E, then fill it.  The new L3Es will all
8533 	 * have ATTR_AF set.
8534 	 *
8535 	 * When pmap_update_entry() clears the old L2 mapping, it (indirectly)
8536 	 * performs a dsb().  That dsb() ensures that the stores for filling
8537 	 * "l3" are visible before "l3" is added to the page table.
8538 	 */
8539 	if (!vm_page_all_valid(ml3))
8540 		pmap_fill_l3(l3, newl3);
8541 
8542 	pmap_demote_l2_check(l3, newl3);
8543 
8544 	/*
8545 	 * If the mapping has changed attributes, update the L3Es.
8546 	 */
8547 	if ((pmap_load(l3) & ATTR_PROMOTE) != (newl3 & ATTR_PROMOTE))
8548 		pmap_fill_l3(l3, newl3);
8549 
8550 	/*
8551 	 * Map the temporary page so we don't lose access to the l2 table.
8552 	 */
8553 	if (tmpl2 != 0) {
8554 		pmap_kenter(tmpl2, PAGE_SIZE,
8555 		    DMAP_TO_PHYS((vm_offset_t)l2) & ~L3_OFFSET,
8556 		    VM_MEMATTR_WRITE_BACK);
8557 		l2 = (pt_entry_t *)(tmpl2 + ((vm_offset_t)l2 & PAGE_MASK));
8558 	}
8559 
8560 	/*
8561 	 * The spare PV entries must be reserved prior to demoting the
8562 	 * mapping, that is, prior to changing the PDE.  Otherwise, the state
8563 	 * of the L2 and the PV lists will be inconsistent, which can result
8564 	 * in reclaim_pv_chunk() attempting to remove a PV entry from the
8565 	 * wrong PV list and pmap_pv_demote_l2() failing to find the expected
8566 	 * PV entry for the 2MB page mapping that is being demoted.
8567 	 */
8568 	if ((oldl2 & ATTR_SW_MANAGED) != 0)
8569 		reserve_pv_entries(pmap, Ln_ENTRIES - 1, lockp);
8570 
8571 	/*
8572 	 * Pass PAGE_SIZE so that a single TLB invalidation is performed on
8573 	 * the 2MB page mapping.
8574 	 */
8575 	pmap_update_entry(pmap, l2, l3phys | L2_TABLE, va, PAGE_SIZE);
8576 
8577 	/*
8578 	 * Demote the PV entry.
8579 	 */
8580 	if ((oldl2 & ATTR_SW_MANAGED) != 0)
8581 		pmap_pv_demote_l2(pmap, va, PTE_TO_PHYS(oldl2), lockp);
8582 
8583 	counter_u64_add(pmap_l2_demotions, 1);
8584 	CTR3(KTR_PMAP, "pmap_demote_l2: success for va %#lx"
8585 	    " in pmap %p %lx", va, pmap, l3[0]);
8586 
8587 fail:
8588 	if (tmpl2 != 0) {
8589 		pmap_kremove(tmpl2);
8590 		kva_free(tmpl2, PAGE_SIZE);
8591 	}
8592 
8593 	return (l3);
8594 
8595 }
8596 
8597 static pt_entry_t *
pmap_demote_l2(pmap_t pmap,pt_entry_t * l2,vm_offset_t va)8598 pmap_demote_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va)
8599 {
8600 	struct rwlock *lock;
8601 	pt_entry_t *l3;
8602 
8603 	lock = NULL;
8604 	l3 = pmap_demote_l2_locked(pmap, l2, va, &lock);
8605 	if (lock != NULL)
8606 		rw_wunlock(lock);
8607 	return (l3);
8608 }
8609 
8610 /*
8611  * Demote an L2C superpage mapping to L2C_ENTRIES L2 block mappings.
8612  */
8613 static bool
pmap_demote_l2c(pmap_t pmap,pt_entry_t * l2p,vm_offset_t va)8614 pmap_demote_l2c(pmap_t pmap, pt_entry_t *l2p, vm_offset_t va)
8615 {
8616 	pd_entry_t *l2c_end, *l2c_start, l2e, mask, nbits, *tl2p;
8617 	vm_offset_t tmpl3;
8618 	register_t intr;
8619 
8620 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
8621 	PMAP_ASSERT_STAGE1(pmap);
8622 	l2c_start = (pd_entry_t *)((uintptr_t)l2p & ~((L2C_ENTRIES *
8623 	    sizeof(pd_entry_t)) - 1));
8624 	l2c_end = l2c_start + L2C_ENTRIES;
8625 	tmpl3 = 0;
8626 	if ((va & ~L2C_OFFSET) < (vm_offset_t)l2c_end &&
8627 	    (vm_offset_t)l2c_start < (va & ~L2C_OFFSET) + L2C_SIZE) {
8628 		tmpl3 = kva_alloc(PAGE_SIZE);
8629 		if (tmpl3 == 0)
8630 			return (false);
8631 		pmap_kenter(tmpl3, PAGE_SIZE,
8632 		    DMAP_TO_PHYS((vm_offset_t)l2c_start) & ~L3_OFFSET,
8633 		    VM_MEMATTR_WRITE_BACK);
8634 		l2c_start = (pd_entry_t *)(tmpl3 +
8635 		    ((vm_offset_t)l2c_start & PAGE_MASK));
8636 		l2c_end = (pd_entry_t *)(tmpl3 +
8637 		    ((vm_offset_t)l2c_end & PAGE_MASK));
8638 	}
8639 	mask = 0;
8640 	nbits = ATTR_DESCR_VALID;
8641 	intr = intr_disable();
8642 
8643 	/*
8644 	 * Break the mappings.
8645 	 */
8646 	for (tl2p = l2c_start; tl2p < l2c_end; tl2p++) {
8647 		/*
8648 		 * Clear the mapping's contiguous and valid bits, but leave
8649 		 * the rest of the entry unchanged, so that a lockless,
8650 		 * concurrent pmap_kextract() can still lookup the physical
8651 		 * address.
8652 		 */
8653 		l2e = pmap_load(tl2p);
8654 		KASSERT((l2e & ATTR_CONTIGUOUS) != 0,
8655 		    ("pmap_demote_l2c: missing ATTR_CONTIGUOUS"));
8656 		KASSERT((l2e & (ATTR_SW_DBM | ATTR_S1_AP_RW_BIT)) !=
8657 		    (ATTR_SW_DBM | ATTR_S1_AP(ATTR_S1_AP_RO)),
8658 		    ("pmap_demote_l2c: missing ATTR_S1_AP_RW"));
8659 		while (!atomic_fcmpset_64(tl2p, &l2e, l2e & ~(ATTR_CONTIGUOUS |
8660 		    ATTR_DESCR_VALID)))
8661 			cpu_spinwait();
8662 
8663 		/*
8664 		 * Hardware accessed and dirty bit maintenance might only
8665 		 * update a single L2 entry, so we must combine the accessed
8666 		 * and dirty bits from this entire set of contiguous L2
8667 		 * entries.
8668 		 */
8669 		if ((l2e & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) ==
8670 		    (ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_SW_DBM))
8671 			mask = ATTR_S1_AP_RW_BIT;
8672 		nbits |= l2e & ATTR_AF;
8673 	}
8674 	if ((nbits & ATTR_AF) != 0) {
8675 		pmap_s1_invalidate_strided(pmap, va & ~L2C_OFFSET, (va +
8676 		    L2C_SIZE) & ~L2C_OFFSET, L2_SIZE, true);
8677 	}
8678 
8679 	/*
8680 	 * Remake the mappings, updating the accessed and dirty bits.
8681 	 */
8682 	l2e = (pmap_load(l2c_start) & ~mask) | nbits;
8683 	for (tl2p = l2c_start; tl2p < l2c_end; tl2p++) {
8684 		pmap_store(tl2p, l2e);
8685 		l2e += L2_SIZE;
8686 	}
8687 	dsb(ishst);
8688 
8689 	intr_restore(intr);
8690 	if (tmpl3 != 0) {
8691 		pmap_kremove(tmpl3);
8692 		kva_free(tmpl3, PAGE_SIZE);
8693 	}
8694 	counter_u64_add(pmap_l2c_demotions, 1);
8695 	CTR2(KTR_PMAP, "pmap_demote_l2c: success for va %#lx in pmap %p",
8696 	    va, pmap);
8697 	return (true);
8698 }
8699 
8700 /*
8701  * Demote a L3C superpage mapping to L3C_ENTRIES 4KB page mappings.
8702  */
8703 static bool
pmap_demote_l3c(pmap_t pmap,pt_entry_t * l3p,vm_offset_t va)8704 pmap_demote_l3c(pmap_t pmap, pt_entry_t *l3p, vm_offset_t va)
8705 {
8706 	pt_entry_t *l3c_end, *l3c_start, l3e, mask, nbits, *tl3p;
8707 	vm_offset_t tmpl3;
8708 	register_t intr;
8709 
8710 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
8711 	l3c_start = (pt_entry_t *)((uintptr_t)l3p & ~((L3C_ENTRIES *
8712 	    sizeof(pt_entry_t)) - 1));
8713 	l3c_end = l3c_start + L3C_ENTRIES;
8714 	tmpl3 = 0;
8715 	if ((va & ~L3C_OFFSET) < (vm_offset_t)l3c_end &&
8716 	    (vm_offset_t)l3c_start < (va & ~L3C_OFFSET) + L3C_SIZE) {
8717 		tmpl3 = kva_alloc(PAGE_SIZE);
8718 		if (tmpl3 == 0)
8719 			return (false);
8720 		pmap_kenter(tmpl3, PAGE_SIZE,
8721 		    DMAP_TO_PHYS((vm_offset_t)l3c_start) & ~L3_OFFSET,
8722 		    VM_MEMATTR_WRITE_BACK);
8723 		l3c_start = (pt_entry_t *)(tmpl3 +
8724 		    ((vm_offset_t)l3c_start & PAGE_MASK));
8725 		l3c_end = (pt_entry_t *)(tmpl3 +
8726 		    ((vm_offset_t)l3c_end & PAGE_MASK));
8727 	}
8728 	mask = 0;
8729 	nbits = ATTR_DESCR_VALID;
8730 	intr = intr_disable();
8731 
8732 	/*
8733 	 * Break the mappings.
8734 	 */
8735 	for (tl3p = l3c_start; tl3p < l3c_end; tl3p++) {
8736 		/*
8737 		 * Clear the mapping's contiguous and valid bits, but leave
8738 		 * the rest of the entry unchanged, so that a lockless,
8739 		 * concurrent pmap_kextract() can still lookup the physical
8740 		 * address.
8741 		 */
8742 		l3e = pmap_load(tl3p);
8743 		KASSERT((l3e & ATTR_CONTIGUOUS) != 0,
8744 		    ("pmap_demote_l3c: missing ATTR_CONTIGUOUS"));
8745 		KASSERT((l3e & (ATTR_SW_DBM | ATTR_S1_AP_RW_BIT)) !=
8746 		    (ATTR_SW_DBM | ATTR_S1_AP(ATTR_S1_AP_RO)),
8747 		    ("pmap_demote_l3c: missing ATTR_S1_AP_RW"));
8748 		while (!atomic_fcmpset_64(tl3p, &l3e, l3e & ~(ATTR_CONTIGUOUS |
8749 		    ATTR_DESCR_VALID)))
8750 			cpu_spinwait();
8751 
8752 		/*
8753 		 * Hardware accessed and dirty bit maintenance might only
8754 		 * update a single L3 entry, so we must combine the accessed
8755 		 * and dirty bits from this entire set of contiguous L3
8756 		 * entries.
8757 		 */
8758 		if ((l3e & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) ==
8759 		    (ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_SW_DBM))
8760 			mask = ATTR_S1_AP_RW_BIT;
8761 		nbits |= l3e & ATTR_AF;
8762 	}
8763 	if ((nbits & ATTR_AF) != 0) {
8764 		pmap_invalidate_range(pmap, va & ~L3C_OFFSET, (va + L3C_SIZE) &
8765 		    ~L3C_OFFSET, true);
8766 	}
8767 
8768 	/*
8769 	 * Remake the mappings, updating the accessed and dirty bits.
8770 	 */
8771 	l3e = (pmap_load(l3c_start) & ~mask) | nbits;
8772 	for (tl3p = l3c_start; tl3p < l3c_end; tl3p++) {
8773 		pmap_store(tl3p, l3e);
8774 		l3e += L3_SIZE;
8775 	}
8776 	dsb(ishst);
8777 
8778 	intr_restore(intr);
8779 	if (tmpl3 != 0) {
8780 		pmap_kremove(tmpl3);
8781 		kva_free(tmpl3, PAGE_SIZE);
8782 	}
8783 	counter_u64_add(pmap_l3c_demotions, 1);
8784 	CTR2(KTR_PMAP, "pmap_demote_l3c: success for va %#lx in pmap %p",
8785 	    va, pmap);
8786 	return (true);
8787 }
8788 
8789 /*
8790  * Accumulate the accessed and dirty bits within a L3C superpage and
8791  * return the specified PTE with them applied correctly.
8792  */
8793 static pt_entry_t
pmap_load_l3c(pt_entry_t * l3p)8794 pmap_load_l3c(pt_entry_t *l3p)
8795 {
8796 	pt_entry_t *l3c_end, *l3c_start, l3e, mask, nbits, *tl3p;
8797 
8798 	l3c_start = (pt_entry_t *)((uintptr_t)l3p & ~((L3C_ENTRIES *
8799 	    sizeof(pt_entry_t)) - 1));
8800 	l3c_end = l3c_start + L3C_ENTRIES;
8801 	mask = 0;
8802 	nbits = 0;
8803 	/* Iterate over each mapping in the superpage. */
8804 	for (tl3p = l3c_start; tl3p < l3c_end; tl3p++) {
8805 		l3e = pmap_load(tl3p);
8806 		KASSERT((l3e & ATTR_CONTIGUOUS) != 0,
8807 		    ("pmap_load_l3c: missing ATTR_CONTIGUOUS"));
8808 		/* Update mask if the current page has its dirty bit set. */
8809 		if ((l3e & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) ==
8810 		    (ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_SW_DBM))
8811 			mask = ATTR_S1_AP_RW_BIT;
8812 		/* Update nbits if the accessed bit is set. */
8813 		nbits |= l3e & ATTR_AF;
8814 	}
8815 	return ((pmap_load(l3p) & ~mask) | nbits);
8816 }
8817 
8818 /*
8819  * Perform the pmap work for mincore(2).  If the page is not both referenced and
8820  * modified by this pmap, returns its physical address so that the caller can
8821  * find other mappings.
8822  */
8823 int
pmap_mincore(pmap_t pmap,vm_offset_t addr,vm_paddr_t * pap)8824 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
8825 {
8826 	pt_entry_t *pte, tpte;
8827 	vm_paddr_t mask, pa;
8828 	int lvl, psind, val;
8829 	bool managed;
8830 
8831 	PMAP_ASSERT_STAGE1(pmap);
8832 	PMAP_LOCK(pmap);
8833 	pte = pmap_pte(pmap, addr, &lvl);
8834 	if (pte != NULL) {
8835 		tpte = pmap_load(pte);
8836 
8837 		switch (lvl) {
8838 		case 3:
8839 			mask = L3_OFFSET;
8840 			psind = (tpte & ATTR_CONTIGUOUS) != 0 ? 1 : 0;
8841 			break;
8842 		case 2:
8843 			mask = L2_OFFSET;
8844 			psind = 2;
8845 			break;
8846 		case 1:
8847 			mask = L1_OFFSET;
8848 			psind = 3;
8849 			break;
8850 		default:
8851 			panic("pmap_mincore: invalid level %d", lvl);
8852 		}
8853 
8854 		managed = (tpte & ATTR_SW_MANAGED) != 0;
8855 		val = MINCORE_INCORE | MINCORE_PSIND(psind);
8856 		if ((managed && pmap_pte_dirty(pmap, tpte)) || (!managed &&
8857 		    (tpte & ATTR_S1_AP_RW_BIT) == ATTR_S1_AP(ATTR_S1_AP_RW)))
8858 			val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
8859 		if ((tpte & ATTR_AF) == ATTR_AF)
8860 			val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
8861 
8862 		pa = PTE_TO_PHYS(tpte) | (addr & mask);
8863 	} else {
8864 		managed = false;
8865 		val = 0;
8866 	}
8867 
8868 	if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
8869 	    (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) {
8870 		*pap = pa;
8871 	}
8872 	PMAP_UNLOCK(pmap);
8873 	return (val);
8874 }
8875 
8876 /*
8877  * Garbage collect every ASID that is neither active on a processor nor
8878  * reserved.
8879  */
8880 static void
pmap_reset_asid_set(pmap_t pmap)8881 pmap_reset_asid_set(pmap_t pmap)
8882 {
8883 	pmap_t curpmap;
8884 	int asid, cpuid, epoch;
8885 	struct asid_set *set;
8886 	enum pmap_stage stage;
8887 
8888 	set = pmap->pm_asid_set;
8889 	stage = pmap->pm_stage;
8890 
8891 	set = pmap->pm_asid_set;
8892 	KASSERT(set != NULL, ("%s: NULL asid set", __func__));
8893 	mtx_assert(&set->asid_set_mutex, MA_OWNED);
8894 
8895 	/*
8896 	 * Ensure that the store to asid_epoch is globally visible before the
8897 	 * loads from pc_curpmap are performed.
8898 	 */
8899 	epoch = set->asid_epoch + 1;
8900 	if (epoch == INT_MAX)
8901 		epoch = 0;
8902 	set->asid_epoch = epoch;
8903 	dsb(ishst);
8904 	if (stage == PM_STAGE1) {
8905 		__asm __volatile("tlbi vmalle1is");
8906 	} else {
8907 		KASSERT(pmap_clean_stage2_tlbi != NULL,
8908 		    ("%s: Unset stage 2 tlb invalidation callback\n",
8909 		    __func__));
8910 		pmap_clean_stage2_tlbi();
8911 	}
8912 	dsb(ish);
8913 	bit_nclear(set->asid_set, ASID_FIRST_AVAILABLE,
8914 	    set->asid_set_size - 1);
8915 	CPU_FOREACH(cpuid) {
8916 		if (cpuid == curcpu)
8917 			continue;
8918 		if (stage == PM_STAGE1) {
8919 			curpmap = pcpu_find(cpuid)->pc_curpmap;
8920 			PMAP_ASSERT_STAGE1(pmap);
8921 		} else {
8922 			curpmap = pcpu_find(cpuid)->pc_curvmpmap;
8923 			if (curpmap == NULL)
8924 				continue;
8925 			PMAP_ASSERT_STAGE2(pmap);
8926 		}
8927 		KASSERT(curpmap->pm_asid_set == set, ("Incorrect set"));
8928 		asid = COOKIE_TO_ASID(curpmap->pm_cookie);
8929 		if (asid == -1)
8930 			continue;
8931 		bit_set(set->asid_set, asid);
8932 		curpmap->pm_cookie = COOKIE_FROM(asid, epoch);
8933 	}
8934 }
8935 
8936 /*
8937  * Allocate a new ASID for the specified pmap.
8938  */
8939 static void
pmap_alloc_asid(pmap_t pmap)8940 pmap_alloc_asid(pmap_t pmap)
8941 {
8942 	struct asid_set *set;
8943 	int new_asid;
8944 
8945 	set = pmap->pm_asid_set;
8946 	KASSERT(set != NULL, ("%s: NULL asid set", __func__));
8947 
8948 	mtx_lock_spin(&set->asid_set_mutex);
8949 
8950 	/*
8951 	 * While this processor was waiting to acquire the asid set mutex,
8952 	 * pmap_reset_asid_set() running on another processor might have
8953 	 * updated this pmap's cookie to the current epoch.  In which case, we
8954 	 * don't need to allocate a new ASID.
8955 	 */
8956 	if (COOKIE_TO_EPOCH(pmap->pm_cookie) == set->asid_epoch)
8957 		goto out;
8958 
8959 	bit_ffc_at(set->asid_set, set->asid_next, set->asid_set_size,
8960 	    &new_asid);
8961 	if (new_asid == -1) {
8962 		bit_ffc_at(set->asid_set, ASID_FIRST_AVAILABLE,
8963 		    set->asid_next, &new_asid);
8964 		if (new_asid == -1) {
8965 			pmap_reset_asid_set(pmap);
8966 			bit_ffc_at(set->asid_set, ASID_FIRST_AVAILABLE,
8967 			    set->asid_set_size, &new_asid);
8968 			KASSERT(new_asid != -1, ("ASID allocation failure"));
8969 		}
8970 	}
8971 	bit_set(set->asid_set, new_asid);
8972 	set->asid_next = new_asid + 1;
8973 	pmap->pm_cookie = COOKIE_FROM(new_asid, set->asid_epoch);
8974 out:
8975 	mtx_unlock_spin(&set->asid_set_mutex);
8976 }
8977 
8978 static uint64_t __read_mostly ttbr_flags;
8979 
8980 /*
8981  * Compute the value that should be stored in ttbr0 to activate the specified
8982  * pmap.  This value may change from time to time.
8983  */
8984 uint64_t
pmap_to_ttbr0(pmap_t pmap)8985 pmap_to_ttbr0(pmap_t pmap)
8986 {
8987 	uint64_t ttbr;
8988 
8989 	ttbr = pmap->pm_ttbr;
8990 	ttbr |= ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie));
8991 	ttbr |= ttbr_flags;
8992 
8993 	return (ttbr);
8994 }
8995 
8996 static void
pmap_set_cnp(void * arg)8997 pmap_set_cnp(void *arg)
8998 {
8999 	uint64_t ttbr0, ttbr1;
9000 	u_int cpuid;
9001 
9002 	cpuid = *(u_int *)arg;
9003 	if (cpuid == curcpu) {
9004 		/*
9005 		 * Set the flags while all CPUs are handling the
9006 		 * smp_rendezvous so will not call pmap_to_ttbr0. Any calls
9007 		 * to pmap_to_ttbr0 after this will have the CnP flag set.
9008 		 * The dsb after invalidating the TLB will act as a barrier
9009 		 * to ensure all CPUs can observe this change.
9010 		 */
9011 		ttbr_flags |= TTBR_CnP;
9012 	}
9013 
9014 	ttbr0 = READ_SPECIALREG(ttbr0_el1);
9015 	ttbr0 |= TTBR_CnP;
9016 
9017 	ttbr1 = READ_SPECIALREG(ttbr1_el1);
9018 	ttbr1 |= TTBR_CnP;
9019 
9020 	/* Update ttbr{0,1}_el1 with the CnP flag */
9021 	WRITE_SPECIALREG(ttbr0_el1, ttbr0);
9022 	WRITE_SPECIALREG(ttbr1_el1, ttbr1);
9023 	isb();
9024 	__asm __volatile("tlbi vmalle1is");
9025 	dsb(ish);
9026 	isb();
9027 }
9028 
9029 /*
9030  * Defer enabling some features until we have read the ID registers to know
9031  * if they are supported on all CPUs.
9032  */
9033 static void
pmap_init_mp(void * dummy __unused)9034 pmap_init_mp(void *dummy __unused)
9035 {
9036 	uint64_t reg;
9037 
9038 	if (get_kernel_reg(ID_AA64PFR1_EL1, &reg)) {
9039 		if (ID_AA64PFR1_BT_VAL(reg) != ID_AA64PFR1_BT_NONE) {
9040 			if (bootverbose)
9041 				printf("Enabling BTI\n");
9042 			pmap_bti_support = true;
9043 
9044 			pmap_bti_ranges_zone = uma_zcreate("BTI ranges",
9045 			    sizeof(struct rs_el), NULL, NULL, NULL, NULL,
9046 			    UMA_ALIGN_PTR, 0);
9047 		}
9048 	}
9049 }
9050 SYSINIT(pmap_init_mp, SI_SUB_CPU, SI_ORDER_ANY, pmap_init_mp, NULL);
9051 
9052 /*
9053  * Defer enabling CnP until we have read the ID registers to know if it's
9054  * supported on all CPUs.
9055  */
9056 static void
pmap_init_cnp(void * dummy __unused)9057 pmap_init_cnp(void *dummy __unused)
9058 {
9059 	uint64_t reg;
9060 	u_int cpuid;
9061 
9062 	if (!get_kernel_reg(ID_AA64MMFR2_EL1, &reg))
9063 		return;
9064 
9065 	if (ID_AA64MMFR2_CnP_VAL(reg) != ID_AA64MMFR2_CnP_NONE) {
9066 		if (bootverbose)
9067 			printf("Enabling CnP\n");
9068 		cpuid = curcpu;
9069 		smp_rendezvous(NULL, pmap_set_cnp, NULL, &cpuid);
9070 	}
9071 
9072 }
9073 SYSINIT(pmap_init_cnp, SI_SUB_SMP, SI_ORDER_ANY, pmap_init_cnp, NULL);
9074 
9075 static bool
pmap_activate_int(pmap_t pmap)9076 pmap_activate_int(pmap_t pmap)
9077 {
9078 	struct asid_set *set;
9079 	int epoch;
9080 
9081 	KASSERT(PCPU_GET(curpmap) != NULL, ("no active pmap"));
9082 	KASSERT(pmap != kernel_pmap, ("kernel pmap activation"));
9083 
9084 	if ((pmap->pm_stage == PM_STAGE1 && pmap == PCPU_GET(curpmap)) ||
9085 	    (pmap->pm_stage == PM_STAGE2 && pmap == PCPU_GET(curvmpmap))) {
9086 		/*
9087 		 * Handle the possibility that the old thread was preempted
9088 		 * after an "ic" or "tlbi" instruction but before it performed
9089 		 * a "dsb" instruction.  If the old thread migrates to a new
9090 		 * processor, its completion of a "dsb" instruction on that
9091 		 * new processor does not guarantee that the "ic" or "tlbi"
9092 		 * instructions performed on the old processor have completed.
9093 		 */
9094 		dsb(ish);
9095 		return (false);
9096 	}
9097 
9098 	set = pmap->pm_asid_set;
9099 	KASSERT(set != NULL, ("%s: NULL asid set", __func__));
9100 
9101 	/*
9102 	 * Ensure that the store to curpmap is globally visible before the
9103 	 * load from asid_epoch is performed.
9104 	 */
9105 	if (pmap->pm_stage == PM_STAGE1)
9106 		PCPU_SET(curpmap, pmap);
9107 	else
9108 		PCPU_SET(curvmpmap, pmap);
9109 	dsb(ish);
9110 	epoch = COOKIE_TO_EPOCH(pmap->pm_cookie);
9111 	if (epoch >= 0 && epoch != set->asid_epoch)
9112 		pmap_alloc_asid(pmap);
9113 
9114 	if (pmap->pm_stage == PM_STAGE1) {
9115 		set_ttbr0(pmap_to_ttbr0(pmap));
9116 		if (PCPU_GET(bcast_tlbi_workaround) != 0)
9117 			invalidate_local_icache();
9118 	}
9119 	return (true);
9120 }
9121 
9122 void
pmap_activate_vm(pmap_t pmap)9123 pmap_activate_vm(pmap_t pmap)
9124 {
9125 
9126 	PMAP_ASSERT_STAGE2(pmap);
9127 
9128 	(void)pmap_activate_int(pmap);
9129 }
9130 
9131 void
pmap_activate(struct thread * td)9132 pmap_activate(struct thread *td)
9133 {
9134 	pmap_t	pmap;
9135 
9136 	pmap = vmspace_pmap(td->td_proc->p_vmspace);
9137 	PMAP_ASSERT_STAGE1(pmap);
9138 	critical_enter();
9139 	(void)pmap_activate_int(pmap);
9140 	critical_exit();
9141 }
9142 
9143 /*
9144  * Activate the thread we are switching to.
9145  * To simplify the assembly in cpu_throw return the new threads pcb.
9146  */
9147 struct pcb *
pmap_switch(struct thread * new)9148 pmap_switch(struct thread *new)
9149 {
9150 	pcpu_bp_harden bp_harden;
9151 	struct pcb *pcb;
9152 
9153 	/* Store the new curthread */
9154 	PCPU_SET(curthread, new);
9155 
9156 	/* And the new pcb */
9157 	pcb = new->td_pcb;
9158 	PCPU_SET(curpcb, pcb);
9159 
9160 	/*
9161 	 * TODO: We may need to flush the cache here if switching
9162 	 * to a user process.
9163 	 */
9164 
9165 	if (pmap_activate_int(vmspace_pmap(new->td_proc->p_vmspace))) {
9166 		/*
9167 		 * Stop userspace from training the branch predictor against
9168 		 * other processes. This will call into a CPU specific
9169 		 * function that clears the branch predictor state.
9170 		 */
9171 		bp_harden = PCPU_GET(bp_harden);
9172 		if (bp_harden != NULL)
9173 			bp_harden();
9174 	}
9175 
9176 	return (pcb);
9177 }
9178 
9179 void
pmap_sync_icache(pmap_t pmap,vm_offset_t va,vm_size_t sz)9180 pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t sz)
9181 {
9182 
9183 	PMAP_ASSERT_STAGE1(pmap);
9184 	KASSERT(ADDR_IS_CANONICAL(va),
9185 	    ("%s: Address not in canonical form: %lx", __func__, va));
9186 
9187 	if (ADDR_IS_KERNEL(va)) {
9188 		cpu_icache_sync_range((void *)va, sz);
9189 	} else {
9190 		u_int len, offset;
9191 		vm_paddr_t pa;
9192 
9193 		/* Find the length of data in this page to flush */
9194 		offset = va & PAGE_MASK;
9195 		len = imin(PAGE_SIZE - offset, sz);
9196 
9197 		while (sz != 0) {
9198 			/* Extract the physical address & find it in the DMAP */
9199 			pa = pmap_extract(pmap, va);
9200 			if (pa != 0)
9201 				cpu_icache_sync_range((void *)PHYS_TO_DMAP(pa),
9202 				    len);
9203 
9204 			/* Move to the next page */
9205 			sz -= len;
9206 			va += len;
9207 			/* Set the length for the next iteration */
9208 			len = imin(PAGE_SIZE, sz);
9209 		}
9210 	}
9211 }
9212 
9213 static int
pmap_stage2_fault(pmap_t pmap,uint64_t esr,uint64_t far)9214 pmap_stage2_fault(pmap_t pmap, uint64_t esr, uint64_t far)
9215 {
9216 	pd_entry_t *pdep;
9217 	pt_entry_t *ptep, pte;
9218 	int rv, lvl, dfsc;
9219 
9220 	PMAP_ASSERT_STAGE2(pmap);
9221 	rv = KERN_FAILURE;
9222 
9223 	/* Data and insn aborts use same encoding for FSC field. */
9224 	dfsc = esr & ISS_DATA_DFSC_MASK;
9225 	switch (dfsc) {
9226 	case ISS_DATA_DFSC_TF_L0:
9227 	case ISS_DATA_DFSC_TF_L1:
9228 	case ISS_DATA_DFSC_TF_L2:
9229 	case ISS_DATA_DFSC_TF_L3:
9230 		PMAP_LOCK(pmap);
9231 		pdep = pmap_pde(pmap, far, &lvl);
9232 		if (pdep == NULL || lvl != (dfsc - ISS_DATA_DFSC_TF_L1)) {
9233 			PMAP_UNLOCK(pmap);
9234 			break;
9235 		}
9236 
9237 		switch (lvl) {
9238 		case 0:
9239 			ptep = pmap_l0_to_l1(pdep, far);
9240 			break;
9241 		case 1:
9242 			ptep = pmap_l1_to_l2(pdep, far);
9243 			break;
9244 		case 2:
9245 			ptep = pmap_l2_to_l3(pdep, far);
9246 			break;
9247 		default:
9248 			panic("%s: Invalid pde level %d", __func__,lvl);
9249 		}
9250 		goto fault_exec;
9251 
9252 	case ISS_DATA_DFSC_AFF_L1:
9253 	case ISS_DATA_DFSC_AFF_L2:
9254 	case ISS_DATA_DFSC_AFF_L3:
9255 		PMAP_LOCK(pmap);
9256 		ptep = pmap_pte(pmap, far, &lvl);
9257 fault_exec:
9258 		if (ptep != NULL && (pte = pmap_load(ptep)) != 0) {
9259 			/*
9260 			 * If accessing an executable page invalidate
9261 			 * the I-cache so it will be valid when we
9262 			 * continue execution in the guest. The D-cache
9263 			 * is assumed to already be clean to the Point
9264 			 * of Coherency.
9265 			 */
9266 			if ((pte & ATTR_S2_XN_MASK) !=
9267 			    ATTR_S2_XN(ATTR_S2_XN_NONE)) {
9268 				invalidate_icache();
9269 			}
9270 			pmap_set_bits(ptep, ATTR_AF | ATTR_DESCR_VALID);
9271 			rv = KERN_SUCCESS;
9272 		}
9273 		PMAP_UNLOCK(pmap);
9274 		break;
9275 	}
9276 
9277 	return (rv);
9278 }
9279 
9280 int
pmap_fault(pmap_t pmap,uint64_t esr,uint64_t far)9281 pmap_fault(pmap_t pmap, uint64_t esr, uint64_t far)
9282 {
9283 	pt_entry_t pte, *ptep;
9284 	register_t intr;
9285 	uint64_t ec, par;
9286 	int lvl, rv;
9287 
9288 	rv = KERN_FAILURE;
9289 
9290 	ec = ESR_ELx_EXCEPTION(esr);
9291 	switch (ec) {
9292 	case EXCP_INSN_ABORT_L:
9293 	case EXCP_INSN_ABORT:
9294 	case EXCP_DATA_ABORT_L:
9295 	case EXCP_DATA_ABORT:
9296 		break;
9297 	default:
9298 		return (rv);
9299 	}
9300 
9301 	if (pmap->pm_stage == PM_STAGE2)
9302 		return (pmap_stage2_fault(pmap, esr, far));
9303 
9304 	/* Data and insn aborts use same encoding for FSC field. */
9305 	switch (esr & ISS_DATA_DFSC_MASK) {
9306 	case ISS_DATA_DFSC_AFF_L1:
9307 	case ISS_DATA_DFSC_AFF_L2:
9308 	case ISS_DATA_DFSC_AFF_L3:
9309 		PMAP_LOCK(pmap);
9310 		ptep = pmap_pte(pmap, far, &lvl);
9311 		if (ptep != NULL) {
9312 			pmap_set_bits(ptep, ATTR_AF);
9313 			rv = KERN_SUCCESS;
9314 			/*
9315 			 * XXXMJ as an optimization we could mark the entry
9316 			 * dirty if this is a write fault.
9317 			 */
9318 		}
9319 		PMAP_UNLOCK(pmap);
9320 		break;
9321 	case ISS_DATA_DFSC_PF_L1:
9322 	case ISS_DATA_DFSC_PF_L2:
9323 	case ISS_DATA_DFSC_PF_L3:
9324 		if ((ec != EXCP_DATA_ABORT_L && ec != EXCP_DATA_ABORT) ||
9325 		    (esr & ISS_DATA_WnR) == 0)
9326 			return (rv);
9327 		PMAP_LOCK(pmap);
9328 		ptep = pmap_pte(pmap, far, &lvl);
9329 		if (ptep != NULL &&
9330 		    ((pte = pmap_load(ptep)) & ATTR_SW_DBM) != 0) {
9331 			if ((pte & ATTR_S1_AP_RW_BIT) ==
9332 			    ATTR_S1_AP(ATTR_S1_AP_RO)) {
9333 				pmap_clear_bits(ptep, ATTR_S1_AP_RW_BIT);
9334 				pmap_s1_invalidate_page(pmap, far, true);
9335 			}
9336 			rv = KERN_SUCCESS;
9337 		}
9338 		PMAP_UNLOCK(pmap);
9339 		break;
9340 	case ISS_DATA_DFSC_TF_L0:
9341 	case ISS_DATA_DFSC_TF_L1:
9342 	case ISS_DATA_DFSC_TF_L2:
9343 	case ISS_DATA_DFSC_TF_L3:
9344 		/*
9345 		 * Retry the translation.  A break-before-make sequence can
9346 		 * produce a transient fault.
9347 		 */
9348 		if (pmap == kernel_pmap) {
9349 			/*
9350 			 * The translation fault may have occurred within a
9351 			 * critical section.  Therefore, we must check the
9352 			 * address without acquiring the kernel pmap's lock.
9353 			 */
9354 			if (pmap_klookup(far, NULL))
9355 				rv = KERN_SUCCESS;
9356 		} else {
9357 			bool owned;
9358 
9359 			/*
9360 			 * In the EFIRT driver we lock the pmap before
9361 			 * calling into the runtime service. As the lock
9362 			 * is already owned by the current thread skip
9363 			 * locking it again.
9364 			 */
9365 			owned = PMAP_OWNED(pmap);
9366 			if (!owned)
9367 				PMAP_LOCK(pmap);
9368 			/* Ask the MMU to check the address. */
9369 			intr = intr_disable();
9370 			par = arm64_address_translate_s1e0r(far);
9371 			intr_restore(intr);
9372 			if (!owned)
9373 				PMAP_UNLOCK(pmap);
9374 
9375 			/*
9376 			 * If the translation was successful, then we can
9377 			 * return success to the trap handler.
9378 			 */
9379 			if (PAR_SUCCESS(par))
9380 				rv = KERN_SUCCESS;
9381 		}
9382 		break;
9383 	}
9384 
9385 	return (rv);
9386 }
9387 
9388 /*
9389  *	Increase the starting virtual address of the given mapping if a
9390  *	different alignment might result in more superpage mappings.
9391  */
9392 void
pmap_align_superpage(vm_object_t object,vm_ooffset_t offset,vm_offset_t * addr,vm_size_t size)9393 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
9394     vm_offset_t *addr, vm_size_t size)
9395 {
9396 	vm_offset_t superpage_offset;
9397 
9398 	if (size < L3C_SIZE)
9399 		return;
9400 	if (object != NULL && (object->flags & OBJ_COLORED) != 0)
9401 		offset += ptoa(object->pg_color);
9402 
9403 	/*
9404 	 * Considering the object's physical alignment, is the mapping large
9405 	 * enough to encompass an L2 (2MB/32MB) superpage ...
9406 	 */
9407 	superpage_offset = offset & L2_OFFSET;
9408 	if (size - ((L2_SIZE - superpage_offset) & L2_OFFSET) >= L2_SIZE) {
9409 		/*
9410 		 * If the virtual and physical alignments differ, then
9411 		 * increase the virtual address so that the alignments match.
9412 		 */
9413 		if ((*addr & L2_OFFSET) < superpage_offset)
9414 			*addr = (*addr & ~L2_OFFSET) + superpage_offset;
9415 		else if ((*addr & L2_OFFSET) > superpage_offset)
9416 			*addr = ((*addr + L2_OFFSET) & ~L2_OFFSET) +
9417 			    superpage_offset;
9418 		return;
9419 	}
9420 	/* ... or an L3C (64KB/2MB) superpage? */
9421 	superpage_offset = offset & L3C_OFFSET;
9422 	if (size - ((L3C_SIZE - superpage_offset) & L3C_OFFSET) >= L3C_SIZE) {
9423 		if ((*addr & L3C_OFFSET) < superpage_offset)
9424 			*addr = (*addr & ~L3C_OFFSET) + superpage_offset;
9425 		else if ((*addr & L3C_OFFSET) > superpage_offset)
9426 			*addr = ((*addr + L3C_OFFSET) & ~L3C_OFFSET) +
9427 			    superpage_offset;
9428 	}
9429 }
9430 
9431 /**
9432  * Get the kernel virtual address of a set of physical pages. If there are
9433  * physical addresses not covered by the DMAP perform a transient mapping
9434  * that will be removed when calling pmap_unmap_io_transient.
9435  *
9436  * \param page        The pages the caller wishes to obtain the virtual
9437  *                    address on the kernel memory map.
9438  * \param vaddr       On return contains the kernel virtual memory address
9439  *                    of the pages passed in the page parameter.
9440  * \param count       Number of pages passed in.
9441  * \param can_fault   true if the thread using the mapped pages can take
9442  *                    page faults, false otherwise.
9443  *
9444  * \returns true if the caller must call pmap_unmap_io_transient when
9445  *          finished or false otherwise.
9446  *
9447  */
9448 bool
pmap_map_io_transient(vm_page_t page[],vm_offset_t vaddr[],int count,bool can_fault)9449 pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
9450     bool can_fault)
9451 {
9452 	vm_paddr_t paddr;
9453 	bool needs_mapping;
9454 	int error __diagused, i;
9455 
9456 	/*
9457 	 * Allocate any KVA space that we need, this is done in a separate
9458 	 * loop to prevent calling vmem_alloc while pinned.
9459 	 */
9460 	needs_mapping = false;
9461 	for (i = 0; i < count; i++) {
9462 		paddr = VM_PAGE_TO_PHYS(page[i]);
9463 		if (__predict_false(!PHYS_IN_DMAP(paddr))) {
9464 			error = vmem_alloc(kernel_arena, PAGE_SIZE,
9465 			    M_BESTFIT | M_WAITOK, &vaddr[i]);
9466 			KASSERT(error == 0, ("vmem_alloc failed: %d", error));
9467 			needs_mapping = true;
9468 		} else {
9469 			vaddr[i] = PHYS_TO_DMAP(paddr);
9470 		}
9471 	}
9472 
9473 	/* Exit early if everything is covered by the DMAP */
9474 	if (!needs_mapping)
9475 		return (false);
9476 
9477 	if (!can_fault)
9478 		sched_pin();
9479 	for (i = 0; i < count; i++) {
9480 		paddr = VM_PAGE_TO_PHYS(page[i]);
9481 		if (!PHYS_IN_DMAP(paddr)) {
9482 			panic(
9483 			   "pmap_map_io_transient: TODO: Map out of DMAP data");
9484 		}
9485 	}
9486 
9487 	return (needs_mapping);
9488 }
9489 
9490 void
pmap_unmap_io_transient(vm_page_t page[],vm_offset_t vaddr[],int count,bool can_fault)9491 pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
9492     bool can_fault)
9493 {
9494 	vm_paddr_t paddr;
9495 	int i;
9496 
9497 	if (!can_fault)
9498 		sched_unpin();
9499 	for (i = 0; i < count; i++) {
9500 		paddr = VM_PAGE_TO_PHYS(page[i]);
9501 		if (!PHYS_IN_DMAP(paddr)) {
9502 			panic("ARM64TODO: pmap_unmap_io_transient: Unmap data");
9503 		}
9504 	}
9505 }
9506 
9507 bool
pmap_is_valid_memattr(pmap_t pmap __unused,vm_memattr_t mode)9508 pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode)
9509 {
9510 
9511 	return (mode >= VM_MEMATTR_DEVICE && mode <= VM_MEMATTR_WRITE_THROUGH);
9512 }
9513 
9514 static void *
bti_dup_range(void * ctx __unused,void * data)9515 bti_dup_range(void *ctx __unused, void *data)
9516 {
9517 	struct rs_el *node, *new_node;
9518 
9519 	new_node = uma_zalloc(pmap_bti_ranges_zone, M_NOWAIT);
9520 	if (new_node == NULL)
9521 		return (NULL);
9522 	node = data;
9523 	memcpy(new_node, node, sizeof(*node));
9524 	return (new_node);
9525 }
9526 
9527 static void
bti_free_range(void * ctx __unused,void * node)9528 bti_free_range(void *ctx __unused, void *node)
9529 {
9530 
9531 	uma_zfree(pmap_bti_ranges_zone, node);
9532 }
9533 
9534 static int
pmap_bti_assign(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)9535 pmap_bti_assign(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
9536 {
9537 	struct rs_el *rs;
9538 	int error;
9539 
9540 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
9541 	PMAP_ASSERT_STAGE1(pmap);
9542 	MPASS(pmap->pm_bti != NULL);
9543 	rs = uma_zalloc(pmap_bti_ranges_zone, M_NOWAIT);
9544 	if (rs == NULL)
9545 		return (ENOMEM);
9546 	error = rangeset_insert(pmap->pm_bti, sva, eva, rs);
9547 	if (error != 0)
9548 		uma_zfree(pmap_bti_ranges_zone, rs);
9549 	return (error);
9550 }
9551 
9552 static void
pmap_bti_deassign_all(pmap_t pmap)9553 pmap_bti_deassign_all(pmap_t pmap)
9554 {
9555 
9556 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
9557 	if (pmap->pm_bti != NULL)
9558 		rangeset_remove_all(pmap->pm_bti);
9559 }
9560 
9561 /*
9562  * Returns true if the BTI setting is the same across the specified address
9563  * range, and false otherwise.  When returning true, updates the referenced PTE
9564  * to reflect the BTI setting.
9565  *
9566  * Only stage 1 pmaps support BTI.  The kernel pmap is always a stage 1 pmap
9567  * that has the same BTI setting implicitly across its entire address range.
9568  */
9569 static bool
pmap_bti_same(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,pt_entry_t * pte)9570 pmap_bti_same(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, pt_entry_t *pte)
9571 {
9572 	struct rs_el *rs;
9573 	vm_offset_t va;
9574 
9575 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
9576 	KASSERT(ADDR_IS_CANONICAL(sva),
9577 	    ("%s: Start address not in canonical form: %lx", __func__, sva));
9578 	KASSERT(ADDR_IS_CANONICAL(eva),
9579 	    ("%s: End address not in canonical form: %lx", __func__, eva));
9580 	KASSERT((*pte & ATTR_S1_GP) == 0,
9581 	    ("%s: pte %lx has ATTR_S1_GP preset", __func__, *pte));
9582 
9583 	if (pmap == kernel_pmap) {
9584 		*pte |= ATTR_KERN_GP;
9585 		return (true);
9586 	}
9587 	if (pmap->pm_bti == NULL)
9588 		return (true);
9589 	PMAP_ASSERT_STAGE1(pmap);
9590 	rs = rangeset_containing(pmap->pm_bti, sva);
9591 	if (rs == NULL)
9592 		return (rangeset_empty(pmap->pm_bti, sva, eva));
9593 	while ((va = rs->re_end) < eva) {
9594 		if ((rs = rangeset_beginning(pmap->pm_bti, va)) == NULL)
9595 			return (false);
9596 	}
9597 	*pte |= ATTR_S1_GP;
9598 	return (true);
9599 }
9600 
9601 static pt_entry_t
pmap_pte_bti(pmap_t pmap,vm_offset_t va)9602 pmap_pte_bti(pmap_t pmap, vm_offset_t va)
9603 {
9604 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
9605 	MPASS(ADDR_IS_CANONICAL(va));
9606 
9607 	if (pmap->pm_stage != PM_STAGE1)
9608 		return (0);
9609 	if (pmap == kernel_pmap)
9610 		return (ATTR_KERN_GP);
9611 	if (pmap->pm_bti != NULL &&
9612 	    rangeset_containing(pmap->pm_bti, va) != NULL)
9613 		return (ATTR_S1_GP);
9614 	return (0);
9615 }
9616 
9617 static void
pmap_bti_on_remove(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)9618 pmap_bti_on_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
9619 {
9620 
9621 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
9622 	if (pmap->pm_bti != NULL)
9623 		rangeset_remove(pmap->pm_bti, sva, eva);
9624 }
9625 
9626 static int
pmap_bti_copy(pmap_t dst_pmap,pmap_t src_pmap)9627 pmap_bti_copy(pmap_t dst_pmap, pmap_t src_pmap)
9628 {
9629 
9630 	PMAP_LOCK_ASSERT(dst_pmap, MA_OWNED);
9631 	PMAP_LOCK_ASSERT(src_pmap, MA_OWNED);
9632 	MPASS(src_pmap->pm_stage == dst_pmap->pm_stage);
9633 	MPASS(src_pmap->pm_bti != NULL);
9634 	MPASS(dst_pmap->pm_bti != NULL);
9635 	if (src_pmap->pm_bti->rs_data_ctx == NULL)
9636 		return (0);
9637 	return (rangeset_copy(dst_pmap->pm_bti, src_pmap->pm_bti));
9638 }
9639 
9640 static void
pmap_bti_update_range(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,bool set)9641 pmap_bti_update_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, bool set)
9642 {
9643 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
9644 	PMAP_ASSERT_STAGE1(pmap);
9645 
9646 	pmap_mask_set_locked(pmap, sva, eva, ATTR_S1_GP, set ? ATTR_S1_GP : 0,
9647 	    true);
9648 }
9649 
9650 int
pmap_bti_set(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)9651 pmap_bti_set(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
9652 {
9653 	int error;
9654 
9655 	if (pmap->pm_bti == NULL)
9656 		return (0);
9657 	if (!ADDR_IS_CANONICAL(sva) || !ADDR_IS_CANONICAL(eva))
9658 		return (EINVAL);
9659 	if (pmap->pm_stage != PM_STAGE1)
9660 		return (EINVAL);
9661 	if (eva <= sva || ADDR_IS_KERNEL(eva))
9662 		return (EFAULT);
9663 
9664 	sva = trunc_page(sva);
9665 	eva = round_page(eva);
9666 	for (;;) {
9667 		PMAP_LOCK(pmap);
9668 		error = pmap_bti_assign(pmap, sva, eva);
9669 		if (error == 0)
9670 			pmap_bti_update_range(pmap, sva, eva, true);
9671 		PMAP_UNLOCK(pmap);
9672 		if (error != ENOMEM)
9673 			break;
9674 		vm_wait(NULL);
9675 	}
9676 	return (error);
9677 }
9678 
9679 #if defined(KASAN) || defined(KMSAN)
9680 static pd_entry_t	*pmap_san_early_l2;
9681 
9682 #define	SAN_BOOTSTRAP_L2_SIZE	(1 * L2_SIZE)
9683 #define	SAN_BOOTSTRAP_SIZE	(2 * PAGE_SIZE)
9684 static vm_offset_t __nosanitizeaddress
pmap_san_enter_bootstrap_alloc_l2(void)9685 pmap_san_enter_bootstrap_alloc_l2(void)
9686 {
9687 	static uint8_t bootstrap_data[SAN_BOOTSTRAP_L2_SIZE] __aligned(L2_SIZE);
9688 	static size_t offset = 0;
9689 	vm_offset_t addr;
9690 
9691 	if (offset + L2_SIZE > sizeof(bootstrap_data)) {
9692 		panic("%s: out of memory for the bootstrap shadow map L2 entries",
9693 		    __func__);
9694 	}
9695 
9696 	addr = (uintptr_t)&bootstrap_data[offset];
9697 	offset += L2_SIZE;
9698 	return (addr);
9699 }
9700 
9701 /*
9702  * SAN L1 + L2 pages, maybe L3 entries later?
9703  */
9704 static vm_offset_t __nosanitizeaddress
pmap_san_enter_bootstrap_alloc_pages(int npages)9705 pmap_san_enter_bootstrap_alloc_pages(int npages)
9706 {
9707 	static uint8_t bootstrap_data[SAN_BOOTSTRAP_SIZE] __aligned(PAGE_SIZE);
9708 	static size_t offset = 0;
9709 	vm_offset_t addr;
9710 
9711 	if (offset + (npages * PAGE_SIZE) > sizeof(bootstrap_data)) {
9712 		panic("%s: out of memory for the bootstrap shadow map",
9713 		    __func__);
9714 	}
9715 
9716 	addr = (uintptr_t)&bootstrap_data[offset];
9717 	offset += (npages * PAGE_SIZE);
9718 	return (addr);
9719 }
9720 
9721 static void __nosanitizeaddress
pmap_san_enter_bootstrap(void)9722 pmap_san_enter_bootstrap(void)
9723 {
9724 	vm_offset_t freemempos;
9725 
9726 	/* L1, L2 */
9727 	freemempos = pmap_san_enter_bootstrap_alloc_pages(2);
9728 	bs_state.freemempos = freemempos;
9729 	bs_state.va = KASAN_MIN_ADDRESS;
9730 	pmap_bootstrap_l1_table(&bs_state);
9731 	pmap_san_early_l2 = bs_state.l2;
9732 }
9733 
9734 static vm_page_t
pmap_san_enter_alloc_l3(void)9735 pmap_san_enter_alloc_l3(void)
9736 {
9737 	vm_page_t m;
9738 
9739 	m = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED |
9740 	    VM_ALLOC_ZERO);
9741 	if (m == NULL)
9742 		panic("%s: no memory to grow shadow map", __func__);
9743 	return (m);
9744 }
9745 
9746 static vm_page_t
pmap_san_enter_alloc_l2(void)9747 pmap_san_enter_alloc_l2(void)
9748 {
9749 	return (vm_page_alloc_noobj_contig(VM_ALLOC_WIRED | VM_ALLOC_ZERO,
9750 	    Ln_ENTRIES, 0, ~0ul, L2_SIZE, 0, VM_MEMATTR_DEFAULT));
9751 }
9752 
9753 void __nosanitizeaddress __nosanitizememory
pmap_san_enter(vm_offset_t va)9754 pmap_san_enter(vm_offset_t va)
9755 {
9756 	pd_entry_t *l1, *l2;
9757 	pt_entry_t *l3;
9758 	vm_page_t m;
9759 
9760 	if (virtual_avail == 0) {
9761 		vm_offset_t block;
9762 		int slot;
9763 		bool first;
9764 
9765 		/* Temporary shadow map prior to pmap_bootstrap(). */
9766 		first = pmap_san_early_l2 == NULL;
9767 		if (first)
9768 			pmap_san_enter_bootstrap();
9769 
9770 		l2 = pmap_san_early_l2;
9771 		slot = pmap_l2_index(va);
9772 
9773 		if ((pmap_load(&l2[slot]) & ATTR_DESCR_VALID) == 0) {
9774 			MPASS(first);
9775 			block = pmap_san_enter_bootstrap_alloc_l2();
9776 			pmap_store(&l2[slot],
9777 			    PHYS_TO_PTE(pmap_early_vtophys(block)) |
9778 			    PMAP_SAN_PTE_BITS | L2_BLOCK);
9779 			dmb(ishst);
9780 		}
9781 
9782 		return;
9783 	}
9784 
9785 	mtx_assert(&kernel_map->system_mtx, MA_OWNED);
9786 	l1 = pmap_l1(kernel_pmap, va);
9787 	MPASS(l1 != NULL);
9788 	if ((pmap_load(l1) & ATTR_DESCR_VALID) == 0) {
9789 		m = pmap_san_enter_alloc_l3();
9790 		pmap_store(l1, VM_PAGE_TO_PTE(m) | L1_TABLE);
9791 	}
9792 	l2 = pmap_l1_to_l2(l1, va);
9793 	if ((pmap_load(l2) & ATTR_DESCR_VALID) == 0) {
9794 		m = pmap_san_enter_alloc_l2();
9795 		if (m != NULL) {
9796 			pmap_store(l2, VM_PAGE_TO_PTE(m) |
9797 			    PMAP_SAN_PTE_BITS | L2_BLOCK);
9798 		} else {
9799 			m = pmap_san_enter_alloc_l3();
9800 			pmap_store(l2, VM_PAGE_TO_PTE(m) | L2_TABLE);
9801 		}
9802 		dmb(ishst);
9803 	}
9804 	if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK)
9805 		return;
9806 	l3 = pmap_l2_to_l3(l2, va);
9807 	if ((pmap_load(l3) & ATTR_DESCR_VALID) != 0)
9808 		return;
9809 	m = pmap_san_enter_alloc_l3();
9810 	pmap_store(l3, VM_PAGE_TO_PTE(m) | PMAP_SAN_PTE_BITS | L3_PAGE);
9811 	dmb(ishst);
9812 }
9813 #endif /* KASAN || KMSAN */
9814 
9815 /*
9816  * Track a range of the kernel's virtual address space that is contiguous
9817  * in various mapping attributes.
9818  */
9819 struct pmap_kernel_map_range {
9820 	vm_offset_t sva;
9821 	pt_entry_t attrs;
9822 	int l3pages;
9823 	int l3contig;
9824 	int l2blocks;
9825 	int l2contig;
9826 	int l1blocks;
9827 };
9828 
9829 static void
sysctl_kmaps_dump(struct sbuf * sb,struct pmap_kernel_map_range * range,vm_offset_t eva)9830 sysctl_kmaps_dump(struct sbuf *sb, struct pmap_kernel_map_range *range,
9831     vm_offset_t eva)
9832 {
9833 	const char *mode;
9834 	int index;
9835 
9836 	if (eva <= range->sva)
9837 		return;
9838 
9839 	index = range->attrs & ATTR_S1_IDX_MASK;
9840 	switch (index) {
9841 	case ATTR_S1_IDX(VM_MEMATTR_DEVICE_NP):
9842 		mode = "DEV-NP";
9843 		break;
9844 	case ATTR_S1_IDX(VM_MEMATTR_DEVICE):
9845 		mode = "DEV";
9846 		break;
9847 	case ATTR_S1_IDX(VM_MEMATTR_UNCACHEABLE):
9848 		mode = "UC";
9849 		break;
9850 	case ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK):
9851 		mode = "WB";
9852 		break;
9853 	case ATTR_S1_IDX(VM_MEMATTR_WRITE_THROUGH):
9854 		mode = "WT";
9855 		break;
9856 	default:
9857 		printf(
9858 		    "%s: unknown memory type %x for range 0x%016lx-0x%016lx\n",
9859 		    __func__, index, range->sva, eva);
9860 		mode = "??";
9861 		break;
9862 	}
9863 
9864 	sbuf_printf(sb, "0x%016lx-0x%016lx r%c%c%c%c%c %6s %d %d %d %d %d\n",
9865 	    range->sva, eva,
9866 	    (range->attrs & ATTR_S1_AP_RW_BIT) == ATTR_S1_AP_RW ? 'w' : '-',
9867 	    (range->attrs & ATTR_S1_PXN) != 0 ? '-' : 'x',
9868 	    (range->attrs & ATTR_S1_UXN) != 0 ? '-' : 'X',
9869 	    (range->attrs & ATTR_S1_AP(ATTR_S1_AP_USER)) != 0 ? 'u' : 's',
9870 	    (range->attrs & ATTR_S1_GP) != 0 ? 'g' : '-',
9871 	    mode, range->l1blocks, range->l2contig, range->l2blocks,
9872 	    range->l3contig, range->l3pages);
9873 
9874 	/* Reset to sentinel value. */
9875 	range->sva = 0xfffffffffffffffful;
9876 }
9877 
9878 /*
9879  * Determine whether the attributes specified by a page table entry match those
9880  * being tracked by the current range.
9881  */
9882 static bool
sysctl_kmaps_match(struct pmap_kernel_map_range * range,pt_entry_t attrs)9883 sysctl_kmaps_match(struct pmap_kernel_map_range *range, pt_entry_t attrs)
9884 {
9885 
9886 	return (range->attrs == attrs);
9887 }
9888 
9889 static void
sysctl_kmaps_reinit(struct pmap_kernel_map_range * range,vm_offset_t va,pt_entry_t attrs)9890 sysctl_kmaps_reinit(struct pmap_kernel_map_range *range, vm_offset_t va,
9891     pt_entry_t attrs)
9892 {
9893 
9894 	memset(range, 0, sizeof(*range));
9895 	range->sva = va;
9896 	range->attrs = attrs;
9897 }
9898 
9899 /* Get the block/page attributes that correspond to the table attributes */
9900 static pt_entry_t
sysctl_kmaps_table_attrs(pd_entry_t table)9901 sysctl_kmaps_table_attrs(pd_entry_t table)
9902 {
9903 	pt_entry_t attrs;
9904 
9905 	attrs = 0;
9906 	if ((table & TATTR_UXN_TABLE) != 0)
9907 		attrs |= ATTR_S1_UXN;
9908 	if ((table & TATTR_PXN_TABLE) != 0)
9909 		attrs |= ATTR_S1_PXN;
9910 	if ((table & TATTR_AP_TABLE_RO) != 0)
9911 		attrs |= ATTR_S1_AP(ATTR_S1_AP_RO);
9912 
9913 	return (attrs);
9914 }
9915 
9916 /* Read the block/page attributes we care about */
9917 static pt_entry_t
sysctl_kmaps_block_attrs(pt_entry_t block)9918 sysctl_kmaps_block_attrs(pt_entry_t block)
9919 {
9920 	return (block & (ATTR_S1_AP_MASK | ATTR_S1_XN | ATTR_S1_IDX_MASK |
9921 	    ATTR_S1_GP));
9922 }
9923 
9924 /*
9925  * Given a leaf PTE, derive the mapping's attributes.  If they do not match
9926  * those of the current run, dump the address range and its attributes, and
9927  * begin a new run.
9928  */
9929 static void
sysctl_kmaps_check(struct sbuf * sb,struct pmap_kernel_map_range * range,vm_offset_t va,pd_entry_t l0e,pd_entry_t l1e,pd_entry_t l2e,pt_entry_t l3e)9930 sysctl_kmaps_check(struct sbuf *sb, struct pmap_kernel_map_range *range,
9931     vm_offset_t va, pd_entry_t l0e, pd_entry_t l1e, pd_entry_t l2e,
9932     pt_entry_t l3e)
9933 {
9934 	pt_entry_t attrs;
9935 
9936 	attrs = sysctl_kmaps_table_attrs(l0e);
9937 
9938 	if ((l1e & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_BLOCK) {
9939 		attrs |= sysctl_kmaps_block_attrs(l1e);
9940 		goto done;
9941 	}
9942 	attrs |= sysctl_kmaps_table_attrs(l1e);
9943 
9944 	if ((l2e & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_BLOCK) {
9945 		attrs |= sysctl_kmaps_block_attrs(l2e);
9946 		goto done;
9947 	}
9948 	attrs |= sysctl_kmaps_table_attrs(l2e);
9949 	attrs |= sysctl_kmaps_block_attrs(l3e);
9950 
9951 done:
9952 	if (range->sva > va || !sysctl_kmaps_match(range, attrs)) {
9953 		sysctl_kmaps_dump(sb, range, va);
9954 		sysctl_kmaps_reinit(range, va, attrs);
9955 	}
9956 }
9957 
9958 static int
sysctl_kmaps(SYSCTL_HANDLER_ARGS)9959 sysctl_kmaps(SYSCTL_HANDLER_ARGS)
9960 {
9961 	struct pmap_kernel_map_range range;
9962 	struct sbuf sbuf, *sb;
9963 	pd_entry_t l0e, *l1, l1e, *l2, l2e;
9964 	pt_entry_t *l3, l3e;
9965 	vm_offset_t sva;
9966 	vm_paddr_t pa;
9967 	int error, i, j, k, l;
9968 
9969 	error = sysctl_wire_old_buffer(req, 0);
9970 	if (error != 0)
9971 		return (error);
9972 	sb = &sbuf;
9973 	sbuf_new_for_sysctl(sb, NULL, PAGE_SIZE, req);
9974 
9975 	/* Sentinel value. */
9976 	range.sva = 0xfffffffffffffffful;
9977 
9978 	/*
9979 	 * Iterate over the kernel page tables without holding the kernel pmap
9980 	 * lock.  Kernel page table pages are never freed, so at worst we will
9981 	 * observe inconsistencies in the output.
9982 	 */
9983 	for (sva = 0xffff000000000000ul, i = pmap_l0_index(sva); i < Ln_ENTRIES;
9984 	    i++) {
9985 		if (i == pmap_l0_index(DMAP_MIN_ADDRESS))
9986 			sbuf_printf(sb, "\nDirect map:\n");
9987 		else if (i == pmap_l0_index(VM_MIN_KERNEL_ADDRESS))
9988 			sbuf_printf(sb, "\nKernel map:\n");
9989 #ifdef KASAN
9990 		else if (i == pmap_l0_index(KASAN_MIN_ADDRESS))
9991 			sbuf_printf(sb, "\nKASAN shadow map:\n");
9992 #endif
9993 #ifdef KMSAN
9994 		else if (i == pmap_l0_index(KMSAN_SHAD_MIN_ADDRESS))
9995 			sbuf_printf(sb, "\nKMSAN shadow map:\n");
9996 		else if (i == pmap_l0_index(KMSAN_ORIG_MIN_ADDRESS))
9997 			sbuf_printf(sb, "\nKMSAN origin map:\n");
9998 #endif
9999 
10000 		l0e = kernel_pmap->pm_l0[i];
10001 		if ((l0e & ATTR_DESCR_VALID) == 0) {
10002 			sysctl_kmaps_dump(sb, &range, sva);
10003 			sva += L0_SIZE;
10004 			continue;
10005 		}
10006 		pa = PTE_TO_PHYS(l0e);
10007 		l1 = (pd_entry_t *)PHYS_TO_DMAP(pa);
10008 
10009 		for (j = pmap_l1_index(sva); j < Ln_ENTRIES; j++) {
10010 			l1e = l1[j];
10011 			if ((l1e & ATTR_DESCR_VALID) == 0) {
10012 				sysctl_kmaps_dump(sb, &range, sva);
10013 				sva += L1_SIZE;
10014 				continue;
10015 			}
10016 			if ((l1e & ATTR_DESCR_MASK) == L1_BLOCK) {
10017 				PMAP_ASSERT_L1_BLOCKS_SUPPORTED;
10018 				sysctl_kmaps_check(sb, &range, sva, l0e, l1e,
10019 				    0, 0);
10020 				range.l1blocks++;
10021 				sva += L1_SIZE;
10022 				continue;
10023 			}
10024 			pa = PTE_TO_PHYS(l1e);
10025 			l2 = (pd_entry_t *)PHYS_TO_DMAP(pa);
10026 
10027 			for (k = pmap_l2_index(sva); k < Ln_ENTRIES; k++) {
10028 				l2e = l2[k];
10029 				if ((l2e & ATTR_DESCR_VALID) == 0) {
10030 					sysctl_kmaps_dump(sb, &range, sva);
10031 					sva += L2_SIZE;
10032 					continue;
10033 				}
10034 				if ((l2e & ATTR_DESCR_MASK) == L2_BLOCK) {
10035 					sysctl_kmaps_check(sb, &range, sva,
10036 					    l0e, l1e, l2e, 0);
10037 					if ((l2e & ATTR_CONTIGUOUS) != 0)
10038 						range.l2contig +=
10039 						    k % L2C_ENTRIES == 0 ?
10040 						    1 : 0;
10041 					else
10042 						range.l2blocks++;
10043 					sva += L2_SIZE;
10044 					continue;
10045 				}
10046 				pa = PTE_TO_PHYS(l2e);
10047 				l3 = (pt_entry_t *)PHYS_TO_DMAP(pa);
10048 
10049 				for (l = pmap_l3_index(sva); l < Ln_ENTRIES;
10050 				    l++, sva += L3_SIZE) {
10051 					l3e = l3[l];
10052 					if ((l3e & ATTR_DESCR_VALID) == 0) {
10053 						sysctl_kmaps_dump(sb, &range,
10054 						    sva);
10055 						continue;
10056 					}
10057 					sysctl_kmaps_check(sb, &range, sva,
10058 					    l0e, l1e, l2e, l3e);
10059 					if ((l3e & ATTR_CONTIGUOUS) != 0)
10060 						range.l3contig +=
10061 						    l % L3C_ENTRIES == 0 ?
10062 						    1 : 0;
10063 					else
10064 						range.l3pages++;
10065 				}
10066 			}
10067 		}
10068 	}
10069 
10070 	error = sbuf_finish(sb);
10071 	sbuf_delete(sb);
10072 	return (error);
10073 }
10074 SYSCTL_OID(_vm_pmap, OID_AUTO, kernel_maps,
10075     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_SKIP,
10076     NULL, 0, sysctl_kmaps, "A",
10077     "Dump kernel address layout");
10078