xref: /freebsd/sys/arm64/arm64/pmap.c (revision a884f699e4bfc1be4e721d3ec4fa93915be18a86)
1 /*-
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  * Copyright (c) 1994 John S. Dyson
5  * All rights reserved.
6  * Copyright (c) 1994 David Greenman
7  * All rights reserved.
8  * Copyright (c) 2003 Peter Wemm
9  * All rights reserved.
10  * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu>
11  * All rights reserved.
12  * Copyright (c) 2014 Andrew Turner
13  * All rights reserved.
14  * Copyright (c) 2014-2016 The FreeBSD Foundation
15  * All rights reserved.
16  *
17  * This code is derived from software contributed to Berkeley by
18  * the Systems Programming Group of the University of Utah Computer
19  * Science Department and William Jolitz of UUNET Technologies Inc.
20  *
21  * This software was developed by Andrew Turner under sponsorship from
22  * the FreeBSD Foundation.
23  *
24  * Redistribution and use in source and binary forms, with or without
25  * modification, are permitted provided that the following conditions
26  * are met:
27  * 1. Redistributions of source code must retain the above copyright
28  *    notice, this list of conditions and the following disclaimer.
29  * 2. Redistributions in binary form must reproduce the above copyright
30  *    notice, this list of conditions and the following disclaimer in the
31  *    documentation and/or other materials provided with the distribution.
32  * 3. All advertising materials mentioning features or use of this software
33  *    must display the following acknowledgement:
34  *	This product includes software developed by the University of
35  *	California, Berkeley and its contributors.
36  * 4. Neither the name of the University nor the names of its contributors
37  *    may be used to endorse or promote products derived from this software
38  *    without specific prior written permission.
39  *
40  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
41  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
44  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50  * SUCH DAMAGE.
51  */
52 /*-
53  * Copyright (c) 2003 Networks Associates Technology, Inc.
54  * All rights reserved.
55  *
56  * This software was developed for the FreeBSD Project by Jake Burkholder,
57  * Safeport Network Services, and Network Associates Laboratories, the
58  * Security Research Division of Network Associates, Inc. under
59  * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
60  * CHATS research program.
61  *
62  * Redistribution and use in source and binary forms, with or without
63  * modification, are permitted provided that the following conditions
64  * are met:
65  * 1. Redistributions of source code must retain the above copyright
66  *    notice, this list of conditions and the following disclaimer.
67  * 2. Redistributions in binary form must reproduce the above copyright
68  *    notice, this list of conditions and the following disclaimer in the
69  *    documentation and/or other materials provided with the distribution.
70  *
71  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
72  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
73  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
74  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
75  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
76  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
77  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
78  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
79  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
80  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
81  * SUCH DAMAGE.
82  */
83 
84 #include <sys/cdefs.h>
85 /*
86  *	Manages physical address maps.
87  *
88  *	Since the information managed by this module is
89  *	also stored by the logical address mapping module,
90  *	this module may throw away valid virtual-to-physical
91  *	mappings at almost any time.  However, invalidations
92  *	of virtual-to-physical mappings must be done as
93  *	requested.
94  *
95  *	In order to cope with hardware architectures which
96  *	make virtual-to-physical map invalidates expensive,
97  *	this module may delay invalidate or reduced protection
98  *	operations until such time as they are actually
99  *	necessary.  This module is given full information as
100  *	to which processors are currently using which maps,
101  *	and to when physical maps must be made correct.
102  */
103 
104 #include "opt_vm.h"
105 
106 #include <sys/param.h>
107 #include <sys/asan.h>
108 #include <sys/bitstring.h>
109 #include <sys/bus.h>
110 #include <sys/systm.h>
111 #include <sys/kernel.h>
112 #include <sys/ktr.h>
113 #include <sys/limits.h>
114 #include <sys/lock.h>
115 #include <sys/malloc.h>
116 #include <sys/mman.h>
117 #include <sys/msan.h>
118 #include <sys/msgbuf.h>
119 #include <sys/mutex.h>
120 #include <sys/physmem.h>
121 #include <sys/proc.h>
122 #include <sys/rangeset.h>
123 #include <sys/rwlock.h>
124 #include <sys/sbuf.h>
125 #include <sys/sx.h>
126 #include <sys/vmem.h>
127 #include <sys/vmmeter.h>
128 #include <sys/sched.h>
129 #include <sys/sysctl.h>
130 #include <sys/_unrhdr.h>
131 #include <sys/smp.h>
132 
133 #include <vm/vm.h>
134 #include <vm/vm_param.h>
135 #include <vm/vm_kern.h>
136 #include <vm/vm_page.h>
137 #include <vm/vm_map.h>
138 #include <vm/vm_object.h>
139 #include <vm/vm_extern.h>
140 #include <vm/vm_pageout.h>
141 #include <vm/vm_pager.h>
142 #include <vm/vm_phys.h>
143 #include <vm/vm_radix.h>
144 #include <vm/vm_reserv.h>
145 #include <vm/vm_dumpset.h>
146 #include <vm/uma.h>
147 
148 #include <machine/asan.h>
149 #include <machine/cpu_feat.h>
150 #include <machine/machdep.h>
151 #include <machine/md_var.h>
152 #include <machine/pcb.h>
153 
154 #ifdef NUMA
155 #define	PMAP_MEMDOM	MAXMEMDOM
156 #else
157 #define	PMAP_MEMDOM	1
158 #endif
159 
160 #define	PMAP_ASSERT_STAGE1(pmap)	MPASS((pmap)->pm_stage == PM_STAGE1)
161 #define	PMAP_ASSERT_STAGE2(pmap)	MPASS((pmap)->pm_stage == PM_STAGE2)
162 
163 #define	NL0PG		(PAGE_SIZE/(sizeof (pd_entry_t)))
164 #define	NL1PG		(PAGE_SIZE/(sizeof (pd_entry_t)))
165 #define	NL2PG		(PAGE_SIZE/(sizeof (pd_entry_t)))
166 #define	NL3PG		(PAGE_SIZE/(sizeof (pt_entry_t)))
167 
168 #define	NUL0E		L0_ENTRIES
169 #define	NUL1E		(NUL0E * NL1PG)
170 #define	NUL2E		(NUL1E * NL2PG)
171 
172 #ifdef PV_STATS
173 #define PV_STAT(x)	do { x ; } while (0)
174 #define __pvused
175 #else
176 #define PV_STAT(x)	do { } while (0)
177 #define __pvused	__unused
178 #endif
179 
180 #define	pmap_l0_pindex(v)	(NUL2E + NUL1E + ((v) >> L0_SHIFT))
181 #define	pmap_l1_pindex(v)	(NUL2E + ((v) >> L1_SHIFT))
182 #define	pmap_l2_pindex(v)	((v) >> L2_SHIFT)
183 
184 #ifdef __ARM_FEATURE_BTI_DEFAULT
185 pt_entry_t __read_mostly pmap_gp_attr;
186 #define	ATTR_KERN_GP		pmap_gp_attr
187 #else
188 #define	ATTR_KERN_GP		0
189 #endif
190 #define	PMAP_SAN_PTE_BITS	(ATTR_AF | ATTR_S1_XN | pmap_sh_attr | \
191   ATTR_KERN_GP | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | ATTR_S1_AP(ATTR_S1_AP_RW))
192 
193 static bool __read_mostly pmap_multiple_tlbi = false;
194 
195 struct pmap_large_md_page {
196 	struct rwlock   pv_lock;
197 	struct md_page  pv_page;
198 	/* Pad to a power of 2, see pmap_init_pv_table(). */
199 	int		pv_pad[2];
200 };
201 
202 __exclusive_cache_line static struct pmap_large_md_page pv_dummy_large;
203 #define pv_dummy pv_dummy_large.pv_page
204 __read_mostly static struct pmap_large_md_page *pv_table;
205 
206 static struct pmap_large_md_page *
_pa_to_pmdp(vm_paddr_t pa)207 _pa_to_pmdp(vm_paddr_t pa)
208 {
209 	struct vm_phys_seg *seg;
210 
211 	if ((seg = vm_phys_paddr_to_seg(pa)) != NULL)
212 		return ((struct pmap_large_md_page *)seg->md_first +
213 		    pmap_l2_pindex(pa) - pmap_l2_pindex(seg->start));
214 	return (NULL);
215 }
216 
217 static struct pmap_large_md_page *
pa_to_pmdp(vm_paddr_t pa)218 pa_to_pmdp(vm_paddr_t pa)
219 {
220 	struct pmap_large_md_page *pvd;
221 
222 	pvd = _pa_to_pmdp(pa);
223 	if (pvd == NULL)
224 		panic("pa 0x%jx not within vm_phys_segs", (uintmax_t)pa);
225 	return (pvd);
226 }
227 
228 static struct pmap_large_md_page *
page_to_pmdp(vm_page_t m)229 page_to_pmdp(vm_page_t m)
230 {
231 	struct vm_phys_seg *seg;
232 
233 	seg = &vm_phys_segs[m->segind];
234 	return ((struct pmap_large_md_page *)seg->md_first +
235 	    pmap_l2_pindex(VM_PAGE_TO_PHYS(m)) - pmap_l2_pindex(seg->start));
236 }
237 
238 #define	pa_to_pvh(pa)	(&(pa_to_pmdp(pa)->pv_page))
239 #define	page_to_pvh(m)	(&(page_to_pmdp(m)->pv_page))
240 
241 #define	PHYS_TO_PV_LIST_LOCK(pa)	({			\
242 	struct pmap_large_md_page *_pvd;			\
243 	struct rwlock *_lock;					\
244 	_pvd = _pa_to_pmdp(pa);					\
245 	if (__predict_false(_pvd == NULL))			\
246 		_lock = &pv_dummy_large.pv_lock;		\
247 	else							\
248 		_lock = &(_pvd->pv_lock);			\
249 	_lock;							\
250 })
251 
252 static struct rwlock *
VM_PAGE_TO_PV_LIST_LOCK(vm_page_t m)253 VM_PAGE_TO_PV_LIST_LOCK(vm_page_t m)
254 {
255 	if ((m->flags & PG_FICTITIOUS) == 0)
256 		return (&page_to_pmdp(m)->pv_lock);
257 	else
258 		return (&pv_dummy_large.pv_lock);
259 }
260 
261 #define	CHANGE_PV_LIST_LOCK(lockp, new_lock)	do {	\
262 	struct rwlock **_lockp = (lockp);		\
263 	struct rwlock *_new_lock = (new_lock);		\
264 							\
265 	if (_new_lock != *_lockp) {			\
266 		if (*_lockp != NULL)			\
267 			rw_wunlock(*_lockp);		\
268 		*_lockp = _new_lock;			\
269 		rw_wlock(*_lockp);			\
270 	}						\
271 } while (0)
272 
273 #define	CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa)		\
274 			CHANGE_PV_LIST_LOCK(lockp, PHYS_TO_PV_LIST_LOCK(pa))
275 
276 #define	CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m)	\
277 			CHANGE_PV_LIST_LOCK(lockp, VM_PAGE_TO_PV_LIST_LOCK(m))
278 
279 #define	RELEASE_PV_LIST_LOCK(lockp)		do {	\
280 	struct rwlock **_lockp = (lockp);		\
281 							\
282 	if (*_lockp != NULL) {				\
283 		rw_wunlock(*_lockp);			\
284 		*_lockp = NULL;				\
285 	}						\
286 } while (0)
287 
288 #define PTE_TO_VM_PAGE(pte) PHYS_TO_VM_PAGE(PTE_TO_PHYS(pte))
289 #define VM_PAGE_TO_PTE(m) PHYS_TO_PTE(VM_PAGE_TO_PHYS(m))
290 
291 /*
292  * The presence of this flag indicates that the mapping is writeable.
293  * If the ATTR_S1_AP_RO bit is also set, then the mapping is clean, otherwise
294  * it is dirty.  This flag may only be set on managed mappings.
295  *
296  * The DBM bit is reserved on ARMv8.0 but it seems we can safely treat it
297  * as a software managed bit.
298  */
299 #define	ATTR_SW_DBM	ATTR_DBM
300 
301 struct pmap kernel_pmap_store;
302 
303 /* Used for mapping ACPI memory before VM is initialized */
304 #define	PMAP_PREINIT_MAPPING_COUNT	32
305 #define	PMAP_PREINIT_MAPPING_SIZE	(PMAP_PREINIT_MAPPING_COUNT * L2_SIZE)
306 static vm_offset_t preinit_map_va;	/* Start VA of pre-init mapping space */
307 static int vm_initialized = 0;		/* No need to use pre-init maps when set */
308 
309 /*
310  * Reserve a few L2 blocks starting from 'preinit_map_va' pointer.
311  * Always map entire L2 block for simplicity.
312  * VA of L2 block = preinit_map_va + i * L2_SIZE
313  */
314 static struct pmap_preinit_mapping {
315 	vm_paddr_t	pa;
316 	vm_offset_t	va;
317 	vm_size_t	size;
318 } pmap_preinit_mapping[PMAP_PREINIT_MAPPING_COUNT];
319 
320 vm_offset_t virtual_avail;	/* VA of first avail page (after kernel bss) */
321 vm_offset_t virtual_end;	/* VA of last avail page (end of kernel AS) */
322 vm_offset_t kernel_vm_end = 0;
323 
324 /*
325  * Data for the pv entry allocation mechanism.
326  */
327 #ifdef NUMA
328 static __inline int
pc_to_domain(struct pv_chunk * pc)329 pc_to_domain(struct pv_chunk *pc)
330 {
331 	return (vm_phys_domain(DMAP_TO_PHYS((vm_offset_t)pc)));
332 }
333 #else
334 static __inline int
pc_to_domain(struct pv_chunk * pc __unused)335 pc_to_domain(struct pv_chunk *pc __unused)
336 {
337 	return (0);
338 }
339 #endif
340 
341 struct pv_chunks_list {
342 	struct mtx pvc_lock;
343 	TAILQ_HEAD(pch, pv_chunk) pvc_list;
344 	int active_reclaims;
345 } __aligned(CACHE_LINE_SIZE);
346 
347 struct pv_chunks_list __exclusive_cache_line pv_chunks[PMAP_MEMDOM];
348 
349 vm_paddr_t dmap_phys_base;	/* The start of the dmap region */
350 vm_paddr_t dmap_phys_max;	/* The limit of the dmap region */
351 vm_offset_t dmap_max_addr;	/* The virtual address limit of the dmap */
352 
353 extern pt_entry_t pagetable_l0_ttbr1[];
354 
355 #define	PHYSMAP_SIZE	(2 * (VM_PHYSSEG_MAX - 1))
356 static vm_paddr_t physmap[PHYSMAP_SIZE];
357 static u_int physmap_idx;
358 
359 static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
360     "VM/pmap parameters");
361 
362 static int pmap_growkernel_panic = 0;
363 SYSCTL_INT(_vm_pmap, OID_AUTO, growkernel_panic, CTLFLAG_RDTUN,
364     &pmap_growkernel_panic, 0,
365     "panic on failure to allocate kernel page table page");
366 
367 bool pmap_lpa_enabled __read_mostly = false;
368 pt_entry_t pmap_sh_attr __read_mostly = ATTR_SH(ATTR_SH_IS);
369 
370 #if PAGE_SIZE == PAGE_SIZE_4K
371 #define	L1_BLOCKS_SUPPORTED	1
372 #else
373 #define	L1_BLOCKS_SUPPORTED	(pmap_lpa_enabled)
374 #endif
375 
376 #define	PMAP_ASSERT_L1_BLOCKS_SUPPORTED	MPASS(L1_BLOCKS_SUPPORTED)
377 
378 static bool pmap_l1_supported __read_mostly = false;
379 
380 /*
381  * This ASID allocator uses a bit vector ("asid_set") to remember which ASIDs
382  * that it has currently allocated to a pmap, a cursor ("asid_next") to
383  * optimize its search for a free ASID in the bit vector, and an epoch number
384  * ("asid_epoch") to indicate when it has reclaimed all previously allocated
385  * ASIDs that are not currently active on a processor.
386  *
387  * The current epoch number is always in the range [0, INT_MAX).  Negative
388  * numbers and INT_MAX are reserved for special cases that are described
389  * below.
390  */
391 struct asid_set {
392 	int asid_bits;
393 	bitstr_t *asid_set;
394 	int asid_set_size;
395 	int asid_next;
396 	int asid_epoch;
397 	struct mtx asid_set_mutex;
398 };
399 
400 static struct asid_set asids;
401 static struct asid_set vmids;
402 
403 static SYSCTL_NODE(_vm_pmap, OID_AUTO, asid, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
404     "ASID allocator");
405 SYSCTL_INT(_vm_pmap_asid, OID_AUTO, bits, CTLFLAG_RD, &asids.asid_bits, 0,
406     "The number of bits in an ASID");
407 SYSCTL_INT(_vm_pmap_asid, OID_AUTO, next, CTLFLAG_RD, &asids.asid_next, 0,
408     "The last allocated ASID plus one");
409 SYSCTL_INT(_vm_pmap_asid, OID_AUTO, epoch, CTLFLAG_RD, &asids.asid_epoch, 0,
410     "The current epoch number");
411 
412 static SYSCTL_NODE(_vm_pmap, OID_AUTO, vmid, CTLFLAG_RD, 0, "VMID allocator");
413 SYSCTL_INT(_vm_pmap_vmid, OID_AUTO, bits, CTLFLAG_RD, &vmids.asid_bits, 0,
414     "The number of bits in an VMID");
415 SYSCTL_INT(_vm_pmap_vmid, OID_AUTO, next, CTLFLAG_RD, &vmids.asid_next, 0,
416     "The last allocated VMID plus one");
417 SYSCTL_INT(_vm_pmap_vmid, OID_AUTO, epoch, CTLFLAG_RD, &vmids.asid_epoch, 0,
418     "The current epoch number");
419 
420 void (*pmap_clean_stage2_tlbi)(void);
421 void (*pmap_stage2_invalidate_range)(uint64_t, vm_offset_t, vm_offset_t, bool);
422 void (*pmap_stage2_invalidate_all)(uint64_t);
423 
424 /*
425  * A pmap's cookie encodes an ASID and epoch number.  Cookies for reserved
426  * ASIDs have a negative epoch number, specifically, INT_MIN.  Cookies for
427  * dynamically allocated ASIDs have a non-negative epoch number.
428  *
429  * An invalid ASID is represented by -1.
430  *
431  * There are two special-case cookie values: (1) COOKIE_FROM(-1, INT_MIN),
432  * which indicates that an ASID should never be allocated to the pmap, and
433  * (2) COOKIE_FROM(-1, INT_MAX), which indicates that an ASID should be
434  * allocated when the pmap is next activated.
435  */
436 #define	COOKIE_FROM(asid, epoch)	((long)((u_int)(asid) |	\
437 					    ((u_long)(epoch) << 32)))
438 #define	COOKIE_TO_ASID(cookie)		((int)(cookie))
439 #define	COOKIE_TO_EPOCH(cookie)		((int)((u_long)(cookie) >> 32))
440 
441 #define	TLBI_VA_SHIFT			12
442 #define	TLBI_VA_MASK			((1ul << 44) - 1)
443 #define	TLBI_VA(addr)			(((addr) >> TLBI_VA_SHIFT) & TLBI_VA_MASK)
444 
445 static int __read_frequently superpages_enabled = 1;
446 SYSCTL_INT(_vm_pmap, OID_AUTO, superpages_enabled,
447     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &superpages_enabled, 0,
448     "Are large page mappings enabled?");
449 
450 /*
451  * True when Branch Target Identification should be used by userspace. This
452  * allows pmap to mark pages as guarded with ATTR_S1_GP.
453  */
454 __read_mostly static bool pmap_bti_support = false;
455 
456 /*
457  * Internal flags for pmap_enter()'s helper functions.
458  */
459 #define	PMAP_ENTER_NORECLAIM	0x1000000	/* Don't reclaim PV entries. */
460 #define	PMAP_ENTER_NOREPLACE	0x2000000	/* Don't replace mappings. */
461 
462 TAILQ_HEAD(pv_chunklist, pv_chunk);
463 
464 static void	free_pv_chunk(struct pv_chunk *pc);
465 static void	free_pv_chunk_batch(struct pv_chunklist *batch);
466 static void	free_pv_entry(pmap_t pmap, pv_entry_t pv);
467 static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp);
468 static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp);
469 static void	pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
470 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
471 		    vm_offset_t va);
472 
473 static void pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte);
474 static bool pmap_activate_int(struct thread *td, pmap_t pmap);
475 static void pmap_alloc_asid(pmap_t pmap);
476 static int pmap_change_props_locked(vm_offset_t va, vm_size_t size,
477     vm_prot_t prot, int mode, bool skip_unmapped);
478 static bool pmap_copy_l3c(pmap_t pmap, pt_entry_t *l3p, vm_offset_t va,
479     pt_entry_t l3e, vm_page_t ml3, struct rwlock **lockp);
480 static pt_entry_t *pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va);
481 static pt_entry_t *pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2,
482     vm_offset_t va, struct rwlock **lockp);
483 static pt_entry_t *pmap_demote_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va);
484 static bool pmap_demote_l2c(pmap_t pmap, pt_entry_t *l2p, vm_offset_t va);
485 static bool pmap_demote_l3c(pmap_t pmap, pt_entry_t *l3p, vm_offset_t va);
486 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
487     vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp);
488 static int pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2,
489     u_int flags, vm_page_t m, struct rwlock **lockp);
490 static int pmap_enter_l3c(pmap_t pmap, vm_offset_t va, pt_entry_t l3e, u_int flags,
491     vm_page_t m, vm_page_t *ml3p, struct rwlock **lockp);
492 static bool pmap_every_pte_zero(vm_paddr_t pa);
493 static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted,
494     bool all_l3e_AF_set);
495 static pt_entry_t pmap_load_l3c(pt_entry_t *l3p);
496 static void pmap_mask_set_l3c(pmap_t pmap, pt_entry_t *l3p, vm_offset_t va,
497     vm_offset_t *vap, vm_offset_t va_next, pt_entry_t mask, pt_entry_t nbits);
498 static bool pmap_pv_insert_l3c(pmap_t pmap, vm_offset_t va, vm_page_t m,
499     struct rwlock **lockp);
500 static void pmap_remove_kernel_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va);
501 static int pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva,
502     pd_entry_t l1e, bool demote_kl2e, struct spglist *free,
503     struct rwlock **lockp);
504 static int pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t sva,
505     pd_entry_t l2e, struct spglist *free, struct rwlock **lockp);
506 static bool pmap_remove_l3c(pmap_t pmap, pt_entry_t *l3p, vm_offset_t va,
507     vm_offset_t *vap, vm_offset_t va_next, vm_page_t ml3, struct spglist *free,
508     struct rwlock **lockp);
509 static void pmap_reset_asid_set(pmap_t pmap);
510 static bool pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
511     vm_page_t m, struct rwlock **lockp);
512 
513 static vm_page_t _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex,
514 		struct rwlock **lockp);
515 
516 static void _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m,
517     struct spglist *free);
518 static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, struct spglist *);
519 static void pmap_update_entry(pmap_t pmap, pd_entry_t *pte, pd_entry_t newpte,
520     vm_offset_t va, vm_size_t size);
521 static __inline vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va);
522 
523 static uma_zone_t pmap_bti_ranges_zone;
524 static bool pmap_bti_same(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
525     pt_entry_t *pte);
526 static pt_entry_t pmap_pte_bti(pmap_t pmap, vm_offset_t va);
527 static void pmap_bti_on_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva);
528 static void *bti_dup_range(void *ctx, void *data);
529 static void bti_free_range(void *ctx, void *node);
530 static int pmap_bti_copy(pmap_t dst_pmap, pmap_t src_pmap);
531 static void pmap_bti_deassign_all(pmap_t pmap);
532 
533 /*
534  * These load the old table data and store the new value.
535  * They need to be atomic as the System MMU may write to the table at
536  * the same time as the CPU.
537  */
538 #define	pmap_clear(table)		atomic_store_64(table, 0)
539 #define	pmap_clear_bits(table, bits)	atomic_clear_64(table, bits)
540 #define	pmap_load(table)		(*table)
541 #define	pmap_load_clear(table)		atomic_swap_64(table, 0)
542 #define	pmap_load_store(table, entry)	atomic_swap_64(table, entry)
543 #define	pmap_set_bits(table, bits)	atomic_set_64(table, bits)
544 #define	pmap_store(table, entry)	atomic_store_64(table, entry)
545 
546 /********************/
547 /* Inline functions */
548 /********************/
549 
550 static __inline void
pagecopy(void * s,void * d)551 pagecopy(void *s, void *d)
552 {
553 
554 	memcpy(d, s, PAGE_SIZE);
555 }
556 
557 static __inline pd_entry_t *
pmap_l0(pmap_t pmap,vm_offset_t va)558 pmap_l0(pmap_t pmap, vm_offset_t va)
559 {
560 
561 	return (&pmap->pm_l0[pmap_l0_index(va)]);
562 }
563 
564 static __inline pd_entry_t *
pmap_l0_to_l1(pd_entry_t * l0,vm_offset_t va)565 pmap_l0_to_l1(pd_entry_t *l0, vm_offset_t va)
566 {
567 	pd_entry_t *l1;
568 
569 	l1 = (pd_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(pmap_load(l0)));
570 	return (&l1[pmap_l1_index(va)]);
571 }
572 
573 static __inline pd_entry_t *
pmap_l1(pmap_t pmap,vm_offset_t va)574 pmap_l1(pmap_t pmap, vm_offset_t va)
575 {
576 	pd_entry_t *l0;
577 
578 	l0 = pmap_l0(pmap, va);
579 	if ((pmap_load(l0) & ATTR_DESCR_MASK) != L0_TABLE)
580 		return (NULL);
581 
582 	return (pmap_l0_to_l1(l0, va));
583 }
584 
585 static __inline pd_entry_t *
pmap_l1_to_l2(pd_entry_t * l1p,vm_offset_t va)586 pmap_l1_to_l2(pd_entry_t *l1p, vm_offset_t va)
587 {
588 	pd_entry_t l1, *l2p;
589 
590 	l1 = pmap_load(l1p);
591 
592 	KASSERT(ADDR_IS_CANONICAL(va),
593 	    ("%s: Address not in canonical form: %lx", __func__, va));
594 	/*
595 	 * The valid bit may be clear if pmap_update_entry() is concurrently
596 	 * modifying the entry, so for KVA only the entry type may be checked.
597 	 */
598 	KASSERT(ADDR_IS_KERNEL(va) || (l1 & ATTR_DESCR_VALID) != 0,
599 	    ("%s: L1 entry %#lx for %#lx is invalid", __func__, l1, va));
600 	KASSERT((l1 & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_TABLE,
601 	    ("%s: L1 entry %#lx for %#lx is a leaf", __func__, l1, va));
602 	l2p = (pd_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(l1));
603 	return (&l2p[pmap_l2_index(va)]);
604 }
605 
606 static __inline pd_entry_t *
pmap_l2(pmap_t pmap,vm_offset_t va)607 pmap_l2(pmap_t pmap, vm_offset_t va)
608 {
609 	pd_entry_t *l1;
610 
611 	l1 = pmap_l1(pmap, va);
612 	if ((pmap_load(l1) & ATTR_DESCR_MASK) != L1_TABLE)
613 		return (NULL);
614 
615 	return (pmap_l1_to_l2(l1, va));
616 }
617 
618 static __inline pt_entry_t *
pmap_l2_to_l3(pd_entry_t * l2p,vm_offset_t va)619 pmap_l2_to_l3(pd_entry_t *l2p, vm_offset_t va)
620 {
621 	pd_entry_t l2;
622 	pt_entry_t *l3p;
623 
624 	l2 = pmap_load(l2p);
625 
626 	KASSERT(ADDR_IS_CANONICAL(va),
627 	    ("%s: Address not in canonical form: %lx", __func__, va));
628 	/*
629 	 * The valid bit may be clear if pmap_update_entry() is concurrently
630 	 * modifying the entry, so for KVA only the entry type may be checked.
631 	 */
632 	KASSERT(ADDR_IS_KERNEL(va) || (l2 & ATTR_DESCR_VALID) != 0,
633 	    ("%s: L2 entry %#lx for %#lx is invalid", __func__, l2, va));
634 	KASSERT((l2 & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_TABLE,
635 	    ("%s: L2 entry %#lx for %#lx is a leaf", __func__, l2, va));
636 	l3p = (pt_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(l2));
637 	return (&l3p[pmap_l3_index(va)]);
638 }
639 
640 /*
641  * Returns the lowest valid pde for a given virtual address.
642  * The next level may or may not point to a valid page or block.
643  */
644 static __inline pd_entry_t *
pmap_pde(pmap_t pmap,vm_offset_t va,int * level)645 pmap_pde(pmap_t pmap, vm_offset_t va, int *level)
646 {
647 	pd_entry_t *l0, *l1, *l2, desc;
648 
649 	l0 = pmap_l0(pmap, va);
650 	desc = pmap_load(l0) & ATTR_DESCR_MASK;
651 	if (desc != L0_TABLE) {
652 		*level = -1;
653 		return (NULL);
654 	}
655 
656 	l1 = pmap_l0_to_l1(l0, va);
657 	desc = pmap_load(l1) & ATTR_DESCR_MASK;
658 	if (desc != L1_TABLE) {
659 		*level = 0;
660 		return (l0);
661 	}
662 
663 	l2 = pmap_l1_to_l2(l1, va);
664 	desc = pmap_load(l2) & ATTR_DESCR_MASK;
665 	if (desc != L2_TABLE) {
666 		*level = 1;
667 		return (l1);
668 	}
669 
670 	*level = 2;
671 	return (l2);
672 }
673 
674 /*
675  * Returns the lowest valid pte block or table entry for a given virtual
676  * address. If there are no valid entries return NULL and set the level to
677  * the first invalid level.
678  */
679 static __inline pt_entry_t *
pmap_pte(pmap_t pmap,vm_offset_t va,int * level)680 pmap_pte(pmap_t pmap, vm_offset_t va, int *level)
681 {
682 	pd_entry_t *l1, *l2, desc;
683 	pt_entry_t *l3;
684 
685 	l1 = pmap_l1(pmap, va);
686 	if (l1 == NULL) {
687 		*level = 0;
688 		return (NULL);
689 	}
690 	desc = pmap_load(l1) & ATTR_DESCR_MASK;
691 	if (desc == L1_BLOCK) {
692 		PMAP_ASSERT_L1_BLOCKS_SUPPORTED;
693 		*level = 1;
694 		return (l1);
695 	}
696 
697 	if (desc != L1_TABLE) {
698 		*level = 1;
699 		return (NULL);
700 	}
701 
702 	l2 = pmap_l1_to_l2(l1, va);
703 	desc = pmap_load(l2) & ATTR_DESCR_MASK;
704 	if (desc == L2_BLOCK) {
705 		*level = 2;
706 		return (l2);
707 	}
708 
709 	if (desc != L2_TABLE) {
710 		*level = 2;
711 		return (NULL);
712 	}
713 
714 	*level = 3;
715 	l3 = pmap_l2_to_l3(l2, va);
716 	if ((pmap_load(l3) & ATTR_DESCR_MASK) != L3_PAGE)
717 		return (NULL);
718 
719 	return (l3);
720 }
721 
722 /*
723  * If the given pmap has an L{1,2}_BLOCK or L3_PAGE entry at the specified
724  * level that maps the specified virtual address, then a pointer to that entry
725  * is returned.  Otherwise, NULL is returned, unless INVARIANTS are enabled
726  * and a diagnostic message is provided, in which case this function panics.
727  */
728 static __always_inline pt_entry_t *
pmap_pte_exists(pmap_t pmap,vm_offset_t va,int level,const char * diag)729 pmap_pte_exists(pmap_t pmap, vm_offset_t va, int level, const char *diag)
730 {
731 	pd_entry_t *l0p, *l1p, *l2p;
732 	pt_entry_t desc, *l3p;
733 	int walk_level __diagused;
734 
735 	KASSERT(level >= 0 && level < 4,
736 	    ("%s: %s passed an out-of-range level (%d)", __func__, diag,
737 	    level));
738 	l0p = pmap_l0(pmap, va);
739 	desc = pmap_load(l0p) & ATTR_DESCR_MASK;
740 	if (desc == L0_TABLE && level > 0) {
741 		l1p = pmap_l0_to_l1(l0p, va);
742 		desc = pmap_load(l1p) & ATTR_DESCR_MASK;
743 		if (desc == L1_BLOCK && level == 1) {
744 			PMAP_ASSERT_L1_BLOCKS_SUPPORTED;
745 			return (l1p);
746 		}
747 		if (desc == L1_TABLE && level > 1) {
748 			l2p = pmap_l1_to_l2(l1p, va);
749 			desc = pmap_load(l2p) & ATTR_DESCR_MASK;
750 			if (desc == L2_BLOCK && level == 2)
751 				return (l2p);
752 			else if (desc == L2_TABLE && level > 2) {
753 				l3p = pmap_l2_to_l3(l2p, va);
754 				desc = pmap_load(l3p) & ATTR_DESCR_MASK;
755 				if (desc == L3_PAGE && level == 3)
756 					return (l3p);
757 				else
758 					walk_level = 3;
759 			} else
760 				walk_level = 2;
761 		} else
762 			walk_level = 1;
763 	} else
764 		walk_level = 0;
765 	KASSERT(diag == NULL,
766 	    ("%s: va %#lx not mapped at level %d, desc %ld at level %d",
767 	    diag, va, level, desc, walk_level));
768 	return (NULL);
769 }
770 
771 bool
pmap_ps_enabled(pmap_t pmap)772 pmap_ps_enabled(pmap_t pmap)
773 {
774 	/*
775 	 * Promotion requires a hypervisor call when the kernel is running
776 	 * in EL1. To stop this disable superpage support on non-stage 1
777 	 * pmaps for now.
778 	 */
779 	if (pmap->pm_stage != PM_STAGE1)
780 		return (false);
781 
782 #ifdef KMSAN
783 	/*
784 	 * The break-before-make in pmap_update_entry() results in a situation
785 	 * where a CPU may call into the KMSAN runtime while the entry is
786 	 * invalid.  If the entry is used to map the current thread structure,
787 	 * then the runtime will attempt to access unmapped memory.  Avoid this
788 	 * by simply disabling superpage promotion for the kernel map.
789 	 */
790 	if (pmap == kernel_pmap)
791 		return (false);
792 #endif
793 
794 	return (superpages_enabled != 0);
795 }
796 
797 bool
pmap_get_tables(pmap_t pmap,vm_offset_t va,pd_entry_t ** l0,pd_entry_t ** l1,pd_entry_t ** l2,pt_entry_t ** l3)798 pmap_get_tables(pmap_t pmap, vm_offset_t va, pd_entry_t **l0, pd_entry_t **l1,
799     pd_entry_t **l2, pt_entry_t **l3)
800 {
801 	pd_entry_t *l0p, *l1p, *l2p;
802 
803 	if (pmap->pm_l0 == NULL)
804 		return (false);
805 
806 	l0p = pmap_l0(pmap, va);
807 	*l0 = l0p;
808 
809 	if ((pmap_load(l0p) & ATTR_DESCR_MASK) != L0_TABLE)
810 		return (false);
811 
812 	l1p = pmap_l0_to_l1(l0p, va);
813 	*l1 = l1p;
814 
815 	if ((pmap_load(l1p) & ATTR_DESCR_MASK) == L1_BLOCK) {
816 		PMAP_ASSERT_L1_BLOCKS_SUPPORTED;
817 		*l2 = NULL;
818 		*l3 = NULL;
819 		return (true);
820 	}
821 
822 	if ((pmap_load(l1p) & ATTR_DESCR_MASK) != L1_TABLE)
823 		return (false);
824 
825 	l2p = pmap_l1_to_l2(l1p, va);
826 	*l2 = l2p;
827 
828 	if ((pmap_load(l2p) & ATTR_DESCR_MASK) == L2_BLOCK) {
829 		*l3 = NULL;
830 		return (true);
831 	}
832 
833 	if ((pmap_load(l2p) & ATTR_DESCR_MASK) != L2_TABLE)
834 		return (false);
835 
836 	*l3 = pmap_l2_to_l3(l2p, va);
837 
838 	return (true);
839 }
840 
841 static __inline int
pmap_l3_valid(pt_entry_t l3)842 pmap_l3_valid(pt_entry_t l3)
843 {
844 
845 	return ((l3 & ATTR_DESCR_MASK) == L3_PAGE);
846 }
847 
848 CTASSERT(L1_BLOCK == L2_BLOCK);
849 
850 static pt_entry_t
pmap_pte_memattr(pmap_t pmap,vm_memattr_t memattr)851 pmap_pte_memattr(pmap_t pmap, vm_memattr_t memattr)
852 {
853 	pt_entry_t val;
854 
855 	if (pmap->pm_stage == PM_STAGE1) {
856 		val = ATTR_S1_IDX(memattr);
857 		if (memattr == VM_MEMATTR_DEVICE)
858 			val |= ATTR_S1_XN;
859 		return (val);
860 	}
861 
862 	val = 0;
863 
864 	switch (memattr) {
865 	case VM_MEMATTR_DEVICE:
866 		return (ATTR_S2_MEMATTR(ATTR_S2_MEMATTR_DEVICE_nGnRnE) |
867 		    ATTR_S2_XN(ATTR_S2_XN_ALL));
868 	case VM_MEMATTR_UNCACHEABLE:
869 		return (ATTR_S2_MEMATTR(ATTR_S2_MEMATTR_NC));
870 	case VM_MEMATTR_WRITE_BACK:
871 		return (ATTR_S2_MEMATTR(ATTR_S2_MEMATTR_WB));
872 	case VM_MEMATTR_WRITE_THROUGH:
873 		return (ATTR_S2_MEMATTR(ATTR_S2_MEMATTR_WT));
874 	default:
875 		panic("%s: invalid memory attribute %x", __func__, memattr);
876 	}
877 }
878 
879 static pt_entry_t
pmap_pte_prot(pmap_t pmap,vm_prot_t prot)880 pmap_pte_prot(pmap_t pmap, vm_prot_t prot)
881 {
882 	pt_entry_t val;
883 
884 	val = 0;
885 	if (pmap->pm_stage == PM_STAGE1) {
886 		if ((prot & VM_PROT_EXECUTE) == 0)
887 			val |= ATTR_S1_XN;
888 		if ((prot & VM_PROT_WRITE) == 0)
889 			val |= ATTR_S1_AP(ATTR_S1_AP_RO);
890 	} else {
891 		if ((prot & VM_PROT_WRITE) != 0)
892 			val |= ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE);
893 		if ((prot & VM_PROT_READ) != 0)
894 			val |= ATTR_S2_S2AP(ATTR_S2_S2AP_READ);
895 		if ((prot & VM_PROT_EXECUTE) == 0)
896 			val |= ATTR_S2_XN(ATTR_S2_XN_ALL);
897 	}
898 
899 	return (val);
900 }
901 
902 /*
903  * Checks if the PTE is dirty.
904  */
905 static inline int
pmap_pte_dirty(pmap_t pmap,pt_entry_t pte)906 pmap_pte_dirty(pmap_t pmap, pt_entry_t pte)
907 {
908 
909 	KASSERT((pte & ATTR_SW_MANAGED) != 0, ("pte %#lx is unmanaged", pte));
910 
911 	if (pmap->pm_stage == PM_STAGE1) {
912 		KASSERT((pte & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) != 0,
913 		    ("pte %#lx is writeable and missing ATTR_SW_DBM", pte));
914 
915 		return ((pte & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) ==
916 		    (ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_SW_DBM));
917 	}
918 
919 	return ((pte & ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE)) ==
920 	    ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE));
921 }
922 
923 static __inline void
pmap_resident_count_inc(pmap_t pmap,int count)924 pmap_resident_count_inc(pmap_t pmap, int count)
925 {
926 
927 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
928 	pmap->pm_stats.resident_count += count;
929 }
930 
931 static __inline void
pmap_resident_count_dec(pmap_t pmap,int count)932 pmap_resident_count_dec(pmap_t pmap, int count)
933 {
934 
935 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
936 	KASSERT(pmap->pm_stats.resident_count >= count,
937 	    ("pmap %p resident count underflow %ld %d", pmap,
938 	    pmap->pm_stats.resident_count, count));
939 	pmap->pm_stats.resident_count -= count;
940 }
941 
942 static vm_paddr_t
pmap_early_vtophys(vm_offset_t va)943 pmap_early_vtophys(vm_offset_t va)
944 {
945 	vm_paddr_t pa_page;
946 
947 	pa_page = arm64_address_translate_s1e1r(va) & PAR_PA_MASK;
948 	return (pa_page | (va & PAR_LOW_MASK));
949 }
950 
951 /* State of the bootstrapped DMAP page tables */
952 struct pmap_bootstrap_state {
953 	pt_entry_t	*l1;
954 	pt_entry_t	*l2;
955 	pt_entry_t	*l3;
956 	vm_offset_t	freemempos;
957 	vm_offset_t	va;
958 	vm_paddr_t	pa;
959 	pt_entry_t	table_attrs;
960 	u_int		l0_slot;
961 	u_int		l1_slot;
962 	u_int		l2_slot;
963 	bool		dmap_valid;
964 };
965 
966 /* The bootstrap state */
967 static struct pmap_bootstrap_state bs_state = {
968 	.l1 = NULL,
969 	.l2 = NULL,
970 	.l3 = NULL,
971 	.table_attrs = TATTR_PXN_TABLE,
972 	.l0_slot = L0_ENTRIES,
973 	.l1_slot = Ln_ENTRIES,
974 	.l2_slot = Ln_ENTRIES,
975 	.dmap_valid = false,
976 };
977 
978 static void
pmap_bootstrap_l0_table(struct pmap_bootstrap_state * state)979 pmap_bootstrap_l0_table(struct pmap_bootstrap_state *state)
980 {
981 	vm_paddr_t l1_pa;
982 	pd_entry_t l0e;
983 	u_int l0_slot;
984 
985 	/* Link the level 0 table to a level 1 table */
986 	l0_slot = pmap_l0_index(state->va);
987 	if (l0_slot != state->l0_slot) {
988 		/*
989 		 * Make sure we move from a low address to high address
990 		 * before the DMAP region is ready. This ensures we never
991 		 * modify an existing mapping until we can map from a
992 		 * physical address to a virtual address.
993 		 */
994 		MPASS(state->l0_slot < l0_slot ||
995 		    state->l0_slot == L0_ENTRIES ||
996 		    state->dmap_valid);
997 
998 		/* Reset lower levels */
999 		state->l2 = NULL;
1000 		state->l3 = NULL;
1001 		state->l1_slot = Ln_ENTRIES;
1002 		state->l2_slot = Ln_ENTRIES;
1003 
1004 		/* Check the existing L0 entry */
1005 		state->l0_slot = l0_slot;
1006 		if (state->dmap_valid) {
1007 			l0e = pagetable_l0_ttbr1[l0_slot];
1008 			if ((l0e & ATTR_DESCR_VALID) != 0) {
1009 				MPASS((l0e & ATTR_DESCR_MASK) == L0_TABLE);
1010 				l1_pa = PTE_TO_PHYS(l0e);
1011 				state->l1 = (pt_entry_t *)PHYS_TO_DMAP(l1_pa);
1012 				return;
1013 			}
1014 		}
1015 
1016 		/* Create a new L0 table entry */
1017 		state->l1 = (pt_entry_t *)state->freemempos;
1018 		memset(state->l1, 0, PAGE_SIZE);
1019 		state->freemempos += PAGE_SIZE;
1020 
1021 		l1_pa = pmap_early_vtophys((vm_offset_t)state->l1);
1022 		MPASS((l1_pa & Ln_TABLE_MASK) == 0);
1023 		MPASS(pagetable_l0_ttbr1[l0_slot] == 0);
1024 		pmap_store(&pagetable_l0_ttbr1[l0_slot], PHYS_TO_PTE(l1_pa) |
1025 		    TATTR_UXN_TABLE | TATTR_AP_TABLE_NO_EL0 | L0_TABLE);
1026 	}
1027 	KASSERT(state->l1 != NULL, ("%s: NULL l1", __func__));
1028 }
1029 
1030 static void
pmap_bootstrap_l1_table(struct pmap_bootstrap_state * state)1031 pmap_bootstrap_l1_table(struct pmap_bootstrap_state *state)
1032 {
1033 	vm_paddr_t l2_pa;
1034 	pd_entry_t l1e;
1035 	u_int l1_slot;
1036 
1037 	/* Make sure there is a valid L0 -> L1 table */
1038 	pmap_bootstrap_l0_table(state);
1039 
1040 	/* Link the level 1 table to a level 2 table */
1041 	l1_slot = pmap_l1_index(state->va);
1042 	if (l1_slot != state->l1_slot) {
1043 		/* See pmap_bootstrap_l0_table for a description */
1044 		MPASS(state->l1_slot < l1_slot ||
1045 		    state->l1_slot == Ln_ENTRIES ||
1046 		    state->dmap_valid);
1047 
1048 		/* Reset lower levels */
1049 		state->l3 = NULL;
1050 		state->l2_slot = Ln_ENTRIES;
1051 
1052 		/* Check the existing L1 entry */
1053 		state->l1_slot = l1_slot;
1054 		if (state->dmap_valid) {
1055 			l1e = state->l1[l1_slot];
1056 			if ((l1e & ATTR_DESCR_VALID) != 0) {
1057 				MPASS((l1e & ATTR_DESCR_MASK) == L1_TABLE);
1058 				l2_pa = PTE_TO_PHYS(l1e);
1059 				state->l2 = (pt_entry_t *)PHYS_TO_DMAP(l2_pa);
1060 				return;
1061 			}
1062 		}
1063 
1064 		/* Create a new L1 table entry */
1065 		state->l2 = (pt_entry_t *)state->freemempos;
1066 		memset(state->l2, 0, PAGE_SIZE);
1067 		state->freemempos += PAGE_SIZE;
1068 
1069 		l2_pa = pmap_early_vtophys((vm_offset_t)state->l2);
1070 		MPASS((l2_pa & Ln_TABLE_MASK) == 0);
1071 		MPASS(state->l1[l1_slot] == 0);
1072 		pmap_store(&state->l1[l1_slot], PHYS_TO_PTE(l2_pa) |
1073 		    state->table_attrs | L1_TABLE);
1074 	}
1075 	KASSERT(state->l2 != NULL, ("%s: NULL l2", __func__));
1076 }
1077 
1078 static void
pmap_bootstrap_l2_table(struct pmap_bootstrap_state * state)1079 pmap_bootstrap_l2_table(struct pmap_bootstrap_state *state)
1080 {
1081 	vm_paddr_t l3_pa;
1082 	pd_entry_t l2e;
1083 	u_int l2_slot;
1084 
1085 	/* Make sure there is a valid L1 -> L2 table */
1086 	pmap_bootstrap_l1_table(state);
1087 
1088 	/* Link the level 2 table to a level 3 table */
1089 	l2_slot = pmap_l2_index(state->va);
1090 	if (l2_slot != state->l2_slot) {
1091 		/* See pmap_bootstrap_l0_table for a description */
1092 		MPASS(state->l2_slot < l2_slot ||
1093 		    state->l2_slot == Ln_ENTRIES ||
1094 		    state->dmap_valid);
1095 
1096 		/* Check the existing L2 entry */
1097 		state->l2_slot = l2_slot;
1098 		if (state->dmap_valid) {
1099 			l2e = state->l2[l2_slot];
1100 			if ((l2e & ATTR_DESCR_VALID) != 0) {
1101 				MPASS((l2e & ATTR_DESCR_MASK) == L2_TABLE);
1102 				l3_pa = PTE_TO_PHYS(l2e);
1103 				state->l3 = (pt_entry_t *)PHYS_TO_DMAP(l3_pa);
1104 				return;
1105 			}
1106 		}
1107 
1108 		/* Create a new L2 table entry */
1109 		state->l3 = (pt_entry_t *)state->freemempos;
1110 		memset(state->l3, 0, PAGE_SIZE);
1111 		state->freemempos += PAGE_SIZE;
1112 
1113 		l3_pa = pmap_early_vtophys((vm_offset_t)state->l3);
1114 		MPASS((l3_pa & Ln_TABLE_MASK) == 0);
1115 		MPASS(state->l2[l2_slot] == 0);
1116 		pmap_store(&state->l2[l2_slot], PHYS_TO_PTE(l3_pa) |
1117 		    state->table_attrs | L2_TABLE);
1118 	}
1119 	KASSERT(state->l3 != NULL, ("%s: NULL l3", __func__));
1120 }
1121 
1122 static void
pmap_bootstrap_l2_block(struct pmap_bootstrap_state * state,int i)1123 pmap_bootstrap_l2_block(struct pmap_bootstrap_state *state, int i)
1124 {
1125 	pt_entry_t contig;
1126 	u_int l2_slot;
1127 	bool first;
1128 
1129 	if ((physmap[i + 1] - state->pa) < L2_SIZE)
1130 		return;
1131 
1132 	/* Make sure there is a valid L1 table */
1133 	pmap_bootstrap_l1_table(state);
1134 
1135 	MPASS((state->va & L2_OFFSET) == 0);
1136 	for (first = true, contig = 0;
1137 	    state->va < DMAP_MAX_ADDRESS &&
1138 	    (physmap[i + 1] - state->pa) >= L2_SIZE;
1139 	    state->va += L2_SIZE, state->pa += L2_SIZE) {
1140 		/*
1141 		 * Stop if we are about to walk off the end of what the
1142 		 * current L1 slot can address.
1143 		 */
1144 		if (!first && (state->pa & L1_OFFSET) == 0)
1145 			break;
1146 
1147 		/*
1148 		 * If we have an aligned, contiguous chunk of L2C_ENTRIES
1149 		 * L2 blocks, set the contiguous bit within each PTE so that
1150 		 * the chunk can be cached using only one TLB entry.
1151 		 */
1152 		if ((state->pa & L2C_OFFSET) == 0) {
1153 			if (state->va + L2C_SIZE < DMAP_MAX_ADDRESS &&
1154 			    physmap[i + 1] - state->pa >= L2C_SIZE) {
1155 				contig = ATTR_CONTIGUOUS;
1156 			} else {
1157 				contig = 0;
1158 			}
1159 		}
1160 
1161 		first = false;
1162 		l2_slot = pmap_l2_index(state->va);
1163 		MPASS((state->pa & L2_OFFSET) == 0);
1164 		MPASS(state->l2[l2_slot] == 0);
1165 		pmap_store(&state->l2[l2_slot], PHYS_TO_PTE(state->pa) |
1166 		    ATTR_AF | pmap_sh_attr | ATTR_S1_XN | ATTR_KERN_GP |
1167 		    ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | contig | L2_BLOCK);
1168 	}
1169 	MPASS(state->va == (state->pa - dmap_phys_base + DMAP_MIN_ADDRESS));
1170 }
1171 
1172 static void
pmap_bootstrap_l3_page(struct pmap_bootstrap_state * state,int i)1173 pmap_bootstrap_l3_page(struct pmap_bootstrap_state *state, int i)
1174 {
1175 	pt_entry_t contig;
1176 	u_int l3_slot;
1177 	bool first;
1178 
1179 	if (physmap[i + 1] - state->pa < L3_SIZE)
1180 		return;
1181 
1182 	/* Make sure there is a valid L2 table */
1183 	pmap_bootstrap_l2_table(state);
1184 
1185 	MPASS((state->va & L3_OFFSET) == 0);
1186 	for (first = true, contig = 0;
1187 	    state->va < DMAP_MAX_ADDRESS &&
1188 	    physmap[i + 1] - state->pa >= L3_SIZE;
1189 	    state->va += L3_SIZE, state->pa += L3_SIZE) {
1190 		/*
1191 		 * Stop if we are about to walk off the end of what the
1192 		 * current L2 slot can address.
1193 		 */
1194 		if (!first && (state->pa & L2_OFFSET) == 0)
1195 			break;
1196 
1197 		/*
1198 		 * If we have an aligned, contiguous chunk of L3C_ENTRIES
1199 		 * L3 pages, set the contiguous bit within each PTE so that
1200 		 * the chunk can be cached using only one TLB entry.
1201 		 */
1202 		if ((state->pa & L3C_OFFSET) == 0) {
1203 			if (state->va + L3C_SIZE < DMAP_MAX_ADDRESS &&
1204 			    physmap[i + 1] - state->pa >= L3C_SIZE) {
1205 				contig = ATTR_CONTIGUOUS;
1206 			} else {
1207 				contig = 0;
1208 			}
1209 		}
1210 
1211 		first = false;
1212 		l3_slot = pmap_l3_index(state->va);
1213 		MPASS((state->pa & L3_OFFSET) == 0);
1214 		MPASS(state->l3[l3_slot] == 0);
1215 		pmap_store(&state->l3[l3_slot], PHYS_TO_PTE(state->pa) |
1216 		    ATTR_AF | pmap_sh_attr | ATTR_S1_XN | ATTR_KERN_GP |
1217 		    ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | contig | L3_PAGE);
1218 	}
1219 	MPASS(state->va == (state->pa - dmap_phys_base + DMAP_MIN_ADDRESS));
1220 }
1221 
1222 void
pmap_bootstrap_dmap(vm_size_t kernlen)1223 pmap_bootstrap_dmap(vm_size_t kernlen)
1224 {
1225 	vm_paddr_t start_pa, pa;
1226 	uint64_t tcr;
1227 	int i;
1228 
1229 	tcr = READ_SPECIALREG(tcr_el1);
1230 
1231 	/* Verify that the ASID is set through TTBR0. */
1232 	KASSERT((tcr & TCR_A1) == 0, ("pmap_bootstrap: TCR_EL1.A1 != 0"));
1233 
1234 	if ((tcr & TCR_DS) != 0)
1235 		pmap_lpa_enabled = true;
1236 
1237 	pmap_l1_supported = L1_BLOCKS_SUPPORTED;
1238 
1239 	start_pa = pmap_early_vtophys(KERNBASE);
1240 
1241 	bs_state.freemempos = KERNBASE + kernlen;
1242 	bs_state.freemempos = roundup2(bs_state.freemempos, PAGE_SIZE);
1243 
1244 	/* Fill in physmap array. */
1245 	physmap_idx = physmem_avail(physmap, nitems(physmap));
1246 
1247 	dmap_phys_base = physmap[0] & ~L1_OFFSET;
1248 	dmap_phys_max = 0;
1249 	dmap_max_addr = 0;
1250 
1251 	for (i = 0; i < physmap_idx; i += 2) {
1252 		bs_state.pa = physmap[i] & ~L3_OFFSET;
1253 		bs_state.va = bs_state.pa - dmap_phys_base + DMAP_MIN_ADDRESS;
1254 
1255 		/* Create L3 mappings at the start of the region */
1256 		if ((bs_state.pa & L2_OFFSET) != 0)
1257 			pmap_bootstrap_l3_page(&bs_state, i);
1258 		MPASS(bs_state.pa <= physmap[i + 1]);
1259 
1260 		if (L1_BLOCKS_SUPPORTED) {
1261 			/* Create L2 mappings at the start of the region */
1262 			if ((bs_state.pa & L1_OFFSET) != 0)
1263 				pmap_bootstrap_l2_block(&bs_state, i);
1264 			MPASS(bs_state.pa <= physmap[i + 1]);
1265 
1266 			/* Create the main L1 block mappings */
1267 			for (; bs_state.va < DMAP_MAX_ADDRESS &&
1268 			    (physmap[i + 1] - bs_state.pa) >= L1_SIZE;
1269 			    bs_state.va += L1_SIZE, bs_state.pa += L1_SIZE) {
1270 				/* Make sure there is a valid L1 table */
1271 				pmap_bootstrap_l0_table(&bs_state);
1272 				MPASS((bs_state.pa & L1_OFFSET) == 0);
1273 				pmap_store(
1274 				    &bs_state.l1[pmap_l1_index(bs_state.va)],
1275 				    PHYS_TO_PTE(bs_state.pa) | ATTR_AF |
1276 				    pmap_sh_attr |
1277 				    ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) |
1278 				    ATTR_S1_XN | ATTR_KERN_GP | L1_BLOCK);
1279 			}
1280 			MPASS(bs_state.pa <= physmap[i + 1]);
1281 
1282 			/* Create L2 mappings at the end of the region */
1283 			pmap_bootstrap_l2_block(&bs_state, i);
1284 		} else {
1285 			while (bs_state.va < DMAP_MAX_ADDRESS &&
1286 			    (physmap[i + 1] - bs_state.pa) >= L2_SIZE) {
1287 				pmap_bootstrap_l2_block(&bs_state, i);
1288 			}
1289 		}
1290 		MPASS(bs_state.pa <= physmap[i + 1]);
1291 
1292 		/* Create L3 mappings at the end of the region */
1293 		pmap_bootstrap_l3_page(&bs_state, i);
1294 		MPASS(bs_state.pa == physmap[i + 1]);
1295 
1296 		if (bs_state.pa > dmap_phys_max) {
1297 			dmap_phys_max = bs_state.pa;
1298 			dmap_max_addr = bs_state.va;
1299 		}
1300 	}
1301 
1302 	pmap_s1_invalidate_all_kernel();
1303 
1304 	bs_state.dmap_valid = true;
1305 
1306 	/* Exclude the kernel and DMAP region */
1307 	pa = pmap_early_vtophys(bs_state.freemempos);
1308 	physmem_exclude_region(start_pa, pa - start_pa, EXFLAG_NOALLOC);
1309 }
1310 
1311 static void
pmap_bootstrap_l2(vm_offset_t va)1312 pmap_bootstrap_l2(vm_offset_t va)
1313 {
1314 	KASSERT((va & L1_OFFSET) == 0, ("Invalid virtual address"));
1315 
1316 	/* Leave bs_state.pa as it's only needed to bootstrap blocks and pages*/
1317 	bs_state.va = va;
1318 
1319 	for (; bs_state.va < VM_MAX_KERNEL_ADDRESS; bs_state.va += L1_SIZE)
1320 		pmap_bootstrap_l1_table(&bs_state);
1321 }
1322 
1323 static void
pmap_bootstrap_l3(vm_offset_t va)1324 pmap_bootstrap_l3(vm_offset_t va)
1325 {
1326 	KASSERT((va & L2_OFFSET) == 0, ("Invalid virtual address"));
1327 
1328 	/* Leave bs_state.pa as it's only needed to bootstrap blocks and pages*/
1329 	bs_state.va = va;
1330 
1331 	for (; bs_state.va < VM_MAX_KERNEL_ADDRESS; bs_state.va += L2_SIZE)
1332 		pmap_bootstrap_l2_table(&bs_state);
1333 }
1334 
1335 /*
1336  *	Bootstrap the system enough to run with virtual memory.
1337  */
1338 void
pmap_bootstrap(void)1339 pmap_bootstrap(void)
1340 {
1341 	vm_offset_t dpcpu, msgbufpv;
1342 	vm_paddr_t start_pa, pa;
1343 	size_t largest_phys_size;
1344 
1345 	/* Set this early so we can use the pagetable walking functions */
1346 	kernel_pmap_store.pm_l0 = pagetable_l0_ttbr1;
1347 	PMAP_LOCK_INIT(kernel_pmap);
1348 	kernel_pmap->pm_l0_paddr =
1349 	    pmap_early_vtophys((vm_offset_t)kernel_pmap_store.pm_l0);
1350 	TAILQ_INIT(&kernel_pmap->pm_pvchunk);
1351 	vm_radix_init(&kernel_pmap->pm_root);
1352 	kernel_pmap->pm_cookie = COOKIE_FROM(-1, INT_MIN);
1353 	kernel_pmap->pm_stage = PM_STAGE1;
1354 	kernel_pmap->pm_levels = 4;
1355 	kernel_pmap->pm_ttbr = kernel_pmap->pm_l0_paddr;
1356 	kernel_pmap->pm_asid_set = &asids;
1357 
1358 	/* Reserve some VA space for early BIOS/ACPI mapping */
1359 	preinit_map_va = roundup2(bs_state.freemempos, L2_SIZE);
1360 
1361 	virtual_avail = preinit_map_va + PMAP_PREINIT_MAPPING_SIZE;
1362 	virtual_avail = roundup2(virtual_avail, L1_SIZE);
1363 	virtual_end = VM_MAX_KERNEL_ADDRESS - PMAP_MAPDEV_EARLY_SIZE;
1364 	kernel_vm_end = virtual_avail;
1365 
1366 	/*
1367 	 * We only use PXN when we know nothing will be executed from it, e.g.
1368 	 * the DMAP region.
1369 	 */
1370 	bs_state.table_attrs &= ~TATTR_PXN_TABLE;
1371 
1372 	/*
1373 	 * Find the physical memory we could use. This needs to be after we
1374 	 * exclude any memory that is mapped into the DMAP region but should
1375 	 * not be used by the kernel, e.g. some UEFI memory types.
1376 	 */
1377 	physmap_idx = physmem_avail(physmap, nitems(physmap));
1378 
1379 	/*
1380 	 * Find space for early allocations. We search for the largest
1381 	 * region. This is because the user may choose a large msgbuf.
1382 	 * This could be smarter, e.g. to allow multiple regions to be
1383 	 * used & switch to the next when one is full.
1384 	 */
1385 	largest_phys_size = 0;
1386 	for (int i = 0; i < physmap_idx; i += 2) {
1387 		if ((physmap[i + 1] - physmap[i]) > largest_phys_size) {
1388 			largest_phys_size = physmap[i + 1] - physmap[i];
1389 			bs_state.freemempos = PHYS_TO_DMAP(physmap[i]);
1390 		}
1391 	}
1392 
1393 	start_pa = pmap_early_vtophys(bs_state.freemempos);
1394 
1395 	/*
1396 	 * Create the l2 tables up to VM_MAX_KERNEL_ADDRESS.  We assume that the
1397 	 * loader allocated the first and only l2 page table page used to map
1398 	 * the kernel, preloaded files and module metadata.
1399 	 */
1400 	pmap_bootstrap_l2(KERNBASE + L1_SIZE);
1401 	/* And the l3 tables for the early devmap */
1402 	pmap_bootstrap_l3(VM_MAX_KERNEL_ADDRESS - (PMAP_MAPDEV_EARLY_SIZE));
1403 
1404 	pmap_s1_invalidate_all_kernel();
1405 
1406 #define alloc_pages(var, np)						\
1407 	(var) = bs_state.freemempos;					\
1408 	bs_state.freemempos += (np * PAGE_SIZE);			\
1409 	memset((char *)(var), 0, ((np) * PAGE_SIZE));
1410 
1411 	/* Allocate dynamic per-cpu area. */
1412 	alloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE);
1413 	dpcpu_init((void *)dpcpu, 0);
1414 
1415 	/* Allocate memory for the msgbuf, e.g. for /sbin/dmesg */
1416 	alloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
1417 	msgbufp = (void *)msgbufpv;
1418 
1419 	pa = pmap_early_vtophys(bs_state.freemempos);
1420 
1421 	physmem_exclude_region(start_pa, pa - start_pa, EXFLAG_NOALLOC);
1422 }
1423 
1424 #if defined(KASAN) || defined(KMSAN)
1425 static void
pmap_bootstrap_allocate_san_l2(vm_paddr_t start_pa,vm_paddr_t end_pa,vm_offset_t * vap,vm_offset_t eva)1426 pmap_bootstrap_allocate_san_l2(vm_paddr_t start_pa, vm_paddr_t end_pa,
1427     vm_offset_t *vap, vm_offset_t eva)
1428 {
1429 	vm_paddr_t pa;
1430 	vm_offset_t va;
1431 	pd_entry_t *l2;
1432 
1433 	va = *vap;
1434 	pa = rounddown2(end_pa - L2_SIZE, L2_SIZE);
1435 	for (; pa >= start_pa && va < eva; va += L2_SIZE, pa -= L2_SIZE) {
1436 		l2 = pmap_l2(kernel_pmap, va);
1437 
1438 		/*
1439 		 * KASAN stack checking results in us having already allocated
1440 		 * part of our shadow map, so we can just skip those segments.
1441 		 */
1442 		if ((pmap_load(l2) & ATTR_DESCR_VALID) != 0) {
1443 			pa += L2_SIZE;
1444 			continue;
1445 		}
1446 
1447 		bzero((void *)PHYS_TO_DMAP(pa), L2_SIZE);
1448 		physmem_exclude_region(pa, L2_SIZE, EXFLAG_NOALLOC);
1449 		pmap_store(l2, PHYS_TO_PTE(pa) | PMAP_SAN_PTE_BITS | L2_BLOCK);
1450 	}
1451 	*vap = va;
1452 }
1453 
1454 /*
1455  * Finish constructing the initial shadow map:
1456  * - Count how many pages from KERNBASE to virtual_avail (scaled for
1457  *   shadow map)
1458  * - Map that entire range using L2 superpages.
1459  */
1460 static void
pmap_bootstrap_san1(vm_offset_t va,int scale)1461 pmap_bootstrap_san1(vm_offset_t va, int scale)
1462 {
1463 	vm_offset_t eva;
1464 	vm_paddr_t kernstart;
1465 	int i;
1466 
1467 	kernstart = pmap_early_vtophys(KERNBASE);
1468 
1469 	/*
1470 	 * Rebuild physmap one more time, we may have excluded more regions from
1471 	 * allocation since pmap_bootstrap().
1472 	 */
1473 	physmap_idx = physmem_avail(physmap, nitems(physmap));
1474 
1475 	eva = va + (virtual_avail - VM_MIN_KERNEL_ADDRESS) / scale;
1476 
1477 	/*
1478 	 * Find a slot in the physmap large enough for what we needed.  We try to put
1479 	 * the shadow map as high up as we can to avoid depleting the lower 4GB in case
1480 	 * it's needed for, e.g., an xhci controller that can only do 32-bit DMA.
1481 	 */
1482 	for (i = physmap_idx - 2; i >= 0; i -= 2) {
1483 		vm_paddr_t plow, phigh;
1484 
1485 		/* L2 mappings must be backed by memory that is L2-aligned */
1486 		plow = roundup2(physmap[i], L2_SIZE);
1487 		phigh = physmap[i + 1];
1488 		if (plow >= phigh)
1489 			continue;
1490 		if (kernstart >= plow && kernstart < phigh)
1491 			phigh = kernstart;
1492 		if (phigh - plow >= L2_SIZE) {
1493 			pmap_bootstrap_allocate_san_l2(plow, phigh, &va, eva);
1494 			if (va >= eva)
1495 				break;
1496 		}
1497 	}
1498 	if (i < 0)
1499 		panic("Could not find phys region for shadow map");
1500 
1501 	/*
1502 	 * Done. We should now have a valid shadow address mapped for all KVA
1503 	 * that has been mapped so far, i.e., KERNBASE to virtual_avail. Thus,
1504 	 * shadow accesses by the sanitizer runtime will succeed for this range.
1505 	 * When the kernel virtual address range is later expanded, as will
1506 	 * happen in vm_mem_init(), the shadow map will be grown as well. This
1507 	 * is handled by pmap_san_enter().
1508 	 */
1509 }
1510 
1511 void
pmap_bootstrap_san(void)1512 pmap_bootstrap_san(void)
1513 {
1514 #ifdef KASAN
1515 	pmap_bootstrap_san1(KASAN_MIN_ADDRESS, KASAN_SHADOW_SCALE);
1516 #else
1517 	static uint8_t kmsan_shad_ptp[PAGE_SIZE * 2] __aligned(PAGE_SIZE);
1518 	static uint8_t kmsan_orig_ptp[PAGE_SIZE * 2] __aligned(PAGE_SIZE);
1519 	pd_entry_t *l0, *l1;
1520 
1521 	if (virtual_avail - VM_MIN_KERNEL_ADDRESS > L1_SIZE)
1522 		panic("initial kernel map is too large");
1523 
1524 	l0 = pmap_l0(kernel_pmap, KMSAN_SHAD_MIN_ADDRESS);
1525 	pmap_store(l0, L0_TABLE | PHYS_TO_PTE(
1526 	    pmap_early_vtophys((vm_offset_t)kmsan_shad_ptp)));
1527 	l1 = pmap_l0_to_l1(l0, KMSAN_SHAD_MIN_ADDRESS);
1528 	pmap_store(l1, L1_TABLE | PHYS_TO_PTE(
1529 	    pmap_early_vtophys((vm_offset_t)kmsan_shad_ptp + PAGE_SIZE)));
1530 	pmap_bootstrap_san1(KMSAN_SHAD_MIN_ADDRESS, 1);
1531 
1532 	l0 = pmap_l0(kernel_pmap, KMSAN_ORIG_MIN_ADDRESS);
1533 	pmap_store(l0, L0_TABLE | PHYS_TO_PTE(
1534 	    pmap_early_vtophys((vm_offset_t)kmsan_orig_ptp)));
1535 	l1 = pmap_l0_to_l1(l0, KMSAN_ORIG_MIN_ADDRESS);
1536 	pmap_store(l1, L1_TABLE | PHYS_TO_PTE(
1537 	    pmap_early_vtophys((vm_offset_t)kmsan_orig_ptp + PAGE_SIZE)));
1538 	pmap_bootstrap_san1(KMSAN_ORIG_MIN_ADDRESS, 1);
1539 #endif
1540 }
1541 #endif
1542 
1543 /*
1544  *	Initialize a vm_page's machine-dependent fields.
1545  */
1546 void
pmap_page_init(vm_page_t m)1547 pmap_page_init(vm_page_t m)
1548 {
1549 
1550 	TAILQ_INIT(&m->md.pv_list);
1551 	m->md.pv_memattr = VM_MEMATTR_WRITE_BACK;
1552 }
1553 
1554 static void
pmap_init_asids(struct asid_set * set,int bits)1555 pmap_init_asids(struct asid_set *set, int bits)
1556 {
1557 	int i;
1558 
1559 	set->asid_bits = bits;
1560 
1561 	/*
1562 	 * We may be too early in the overall initialization process to use
1563 	 * bit_alloc().
1564 	 */
1565 	set->asid_set_size = 1 << set->asid_bits;
1566 	set->asid_set = kmem_malloc(bitstr_size(set->asid_set_size),
1567 	    M_WAITOK | M_ZERO);
1568 	for (i = 0; i < ASID_FIRST_AVAILABLE; i++)
1569 		bit_set(set->asid_set, i);
1570 	set->asid_next = ASID_FIRST_AVAILABLE;
1571 	mtx_init(&set->asid_set_mutex, "asid set", NULL, MTX_SPIN);
1572 }
1573 
1574 static void
pmap_init_pv_table(void)1575 pmap_init_pv_table(void)
1576 {
1577 	struct vm_phys_seg *seg, *next_seg;
1578 	struct pmap_large_md_page *pvd;
1579 	vm_size_t s;
1580 	int domain, i, j, pages;
1581 
1582 	/*
1583 	 * We depend on the size being evenly divisible into a page so
1584 	 * that the pv_table array can be indexed directly while
1585 	 * safely spanning multiple pages from different domains.
1586 	 */
1587 	CTASSERT(PAGE_SIZE % sizeof(*pvd) == 0);
1588 
1589 	/*
1590 	 * Calculate the size of the array.
1591 	 */
1592 	s = 0;
1593 	for (i = 0; i < vm_phys_nsegs; i++) {
1594 		seg = &vm_phys_segs[i];
1595 		pages = pmap_l2_pindex(roundup2(seg->end, L2_SIZE)) -
1596 		    pmap_l2_pindex(seg->start);
1597 		s += round_page(pages * sizeof(*pvd));
1598 	}
1599 	pv_table = (struct pmap_large_md_page *)kva_alloc(s);
1600 	if (pv_table == NULL)
1601 		panic("%s: kva_alloc failed\n", __func__);
1602 
1603 	/*
1604 	 * Iterate physical segments to allocate domain-local memory for PV
1605 	 * list headers.
1606 	 */
1607 	pvd = pv_table;
1608 	for (i = 0; i < vm_phys_nsegs; i++) {
1609 		seg = &vm_phys_segs[i];
1610 		pages = pmap_l2_pindex(roundup2(seg->end, L2_SIZE)) -
1611 		    pmap_l2_pindex(seg->start);
1612 		domain = seg->domain;
1613 
1614 		s = round_page(pages * sizeof(*pvd));
1615 
1616 		for (j = 0; j < s; j += PAGE_SIZE) {
1617 			vm_page_t m = vm_page_alloc_noobj_domain(domain,
1618 			    VM_ALLOC_ZERO);
1619 			if (m == NULL)
1620 				panic("failed to allocate PV table page");
1621 			pmap_qenter((vm_offset_t)pvd + j, &m, 1);
1622 		}
1623 
1624 		for (j = 0; j < s / sizeof(*pvd); j++) {
1625 			rw_init_flags(&pvd->pv_lock, "pmap pv list", RW_NEW);
1626 			TAILQ_INIT(&pvd->pv_page.pv_list);
1627 			pvd++;
1628 		}
1629 	}
1630 	pvd = &pv_dummy_large;
1631 	memset(pvd, 0, sizeof(*pvd));
1632 	rw_init_flags(&pvd->pv_lock, "pmap pv list dummy", RW_NEW);
1633 	TAILQ_INIT(&pvd->pv_page.pv_list);
1634 
1635 	/*
1636 	 * Set pointers from vm_phys_segs to pv_table.
1637 	 */
1638 	for (i = 0, pvd = pv_table; i < vm_phys_nsegs; i++) {
1639 		seg = &vm_phys_segs[i];
1640 		seg->md_first = pvd;
1641 		pvd += pmap_l2_pindex(roundup2(seg->end, L2_SIZE)) -
1642 		    pmap_l2_pindex(seg->start);
1643 
1644 		/*
1645 		 * If there is a following segment, and the final
1646 		 * superpage of this segment and the initial superpage
1647 		 * of the next segment are the same then adjust the
1648 		 * pv_table entry for that next segment down by one so
1649 		 * that the pv_table entries will be shared.
1650 		 */
1651 		if (i + 1 < vm_phys_nsegs) {
1652 			next_seg = &vm_phys_segs[i + 1];
1653 			if (pmap_l2_pindex(roundup2(seg->end, L2_SIZE)) - 1 ==
1654 			    pmap_l2_pindex(next_seg->start)) {
1655 				pvd--;
1656 			}
1657 		}
1658 	}
1659 }
1660 
1661 static cpu_feat_en
pmap_dbm_check(const struct cpu_feat * feat __unused,u_int midr __unused)1662 pmap_dbm_check(const struct cpu_feat *feat __unused, u_int midr __unused)
1663 {
1664 	uint64_t id_aa64mmfr1;
1665 
1666 	id_aa64mmfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
1667 	if (ID_AA64MMFR1_HAFDBS_VAL(id_aa64mmfr1) >=
1668 	    ID_AA64MMFR1_HAFDBS_AF_DBS)
1669 		return (FEAT_DEFAULT_ENABLE);
1670 
1671 	return (FEAT_ALWAYS_DISABLE);
1672 }
1673 
1674 static bool
pmap_dbm_has_errata(const struct cpu_feat * feat __unused,u_int midr,u_int ** errata_list,u_int * errata_count)1675 pmap_dbm_has_errata(const struct cpu_feat *feat __unused, u_int midr,
1676     u_int **errata_list, u_int *errata_count)
1677 {
1678 	/* Disable on Cortex-A55 for erratum 1024718 - all revisions */
1679 	if (CPU_IMPL(midr) == CPU_IMPL_ARM &&
1680 	    CPU_PART(midr) == CPU_PART_CORTEX_A55) {
1681 		static u_int errata_id = 1024718;
1682 
1683 		*errata_list = &errata_id;
1684 		*errata_count = 1;
1685 		return (true);
1686 	}
1687 
1688 	/* Disable on Cortex-A510 for erratum 2051678 - r0p0 to r0p2 */
1689 	if (midr_check_var_part_range(midr, CPU_IMPL_ARM, CPU_PART_CORTEX_A510,
1690 	    0, 0, 0, 2)) {
1691 		static u_int errata_id = 2051678;
1692 
1693 		*errata_list = &errata_id;
1694 		*errata_count = 1;
1695 		return (true);
1696 	}
1697 
1698 	return (false);
1699 }
1700 
1701 static bool
pmap_dbm_enable(const struct cpu_feat * feat __unused,cpu_feat_errata errata_status,u_int * errata_list __unused,u_int errata_count)1702 pmap_dbm_enable(const struct cpu_feat *feat __unused,
1703     cpu_feat_errata errata_status, u_int *errata_list __unused,
1704     u_int errata_count)
1705 {
1706 	uint64_t tcr;
1707 
1708 	/* Skip if there is an erratum affecting DBM */
1709 	if (errata_status != ERRATA_NONE)
1710 		return (false);
1711 
1712 	tcr = READ_SPECIALREG(tcr_el1) | TCR_HD;
1713 	WRITE_SPECIALREG(tcr_el1, tcr);
1714 	isb();
1715 	/* Flush the local TLB for the TCR_HD flag change */
1716 	dsb(nshst);
1717 	__asm __volatile("tlbi vmalle1");
1718 	dsb(nsh);
1719 	isb();
1720 
1721 	return (true);
1722 }
1723 
1724 CPU_FEAT(feat_hafdbs, "Hardware management of the Access flag and dirty state",
1725     pmap_dbm_check, pmap_dbm_has_errata, pmap_dbm_enable,
1726     CPU_FEAT_AFTER_DEV | CPU_FEAT_PER_CPU);
1727 
1728 static cpu_feat_en
pmap_multiple_tlbi_check(const struct cpu_feat * feat __unused,u_int midr)1729 pmap_multiple_tlbi_check(const struct cpu_feat *feat __unused, u_int midr)
1730 {
1731 	/*
1732 	 * Cortex-A55 erratum 2441007 (Cat B rare)
1733 	 * Present in all revisions
1734 	 */
1735 	if (CPU_IMPL(midr) == CPU_IMPL_ARM &&
1736 	    CPU_PART(midr) == CPU_PART_CORTEX_A55)
1737 		return (FEAT_DEFAULT_DISABLE);
1738 
1739 	/*
1740 	 * Cortex-A76 erratum 1286807 (Cat B rare)
1741 	 * Present in r0p0 - r3p0
1742 	 * Fixed in r3p1
1743 	 */
1744 	if (midr_check_var_part_range(midr, CPU_IMPL_ARM, CPU_PART_CORTEX_A76,
1745 	    0, 0, 3, 0))
1746 		return (FEAT_DEFAULT_DISABLE);
1747 
1748 	/*
1749 	 * Cortex-A510 erratum 2441009 (Cat B rare)
1750 	 * Present in r0p0 - r1p1
1751 	 * Fixed in r1p2
1752 	 */
1753 	if (midr_check_var_part_range(midr, CPU_IMPL_ARM, CPU_PART_CORTEX_A510,
1754 	    0, 0, 1, 1))
1755 		return (FEAT_DEFAULT_DISABLE);
1756 
1757 	return (FEAT_ALWAYS_DISABLE);
1758 }
1759 
1760 static bool
pmap_multiple_tlbi_enable(const struct cpu_feat * feat __unused,cpu_feat_errata errata_status,u_int * errata_list __unused,u_int errata_count __unused)1761 pmap_multiple_tlbi_enable(const struct cpu_feat *feat __unused,
1762     cpu_feat_errata errata_status, u_int *errata_list __unused,
1763     u_int errata_count __unused)
1764 {
1765 	pmap_multiple_tlbi = true;
1766 	return (true);
1767 }
1768 
1769 CPU_FEAT(errata_multi_tlbi, "Multiple TLBI errata",
1770     pmap_multiple_tlbi_check, NULL, pmap_multiple_tlbi_enable,
1771     CPU_FEAT_EARLY_BOOT | CPU_FEAT_PER_CPU);
1772 
1773 /*
1774  *	Initialize the pmap module.
1775  *
1776  *	Called by vm_mem_init(), to initialize any structures that the pmap
1777  *	system needs to map virtual memory.
1778  */
1779 void
pmap_init(void)1780 pmap_init(void)
1781 {
1782 	uint64_t mmfr1;
1783 	int i, vmid_bits;
1784 
1785 	/*
1786 	 * Are large page mappings enabled?
1787 	 */
1788 	TUNABLE_INT_FETCH("vm.pmap.superpages_enabled", &superpages_enabled);
1789 	if (superpages_enabled) {
1790 		KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0,
1791 		    ("pmap_init: can't assign to pagesizes[1]"));
1792 		pagesizes[1] = L3C_SIZE;
1793 		KASSERT(MAXPAGESIZES > 2 && pagesizes[2] == 0,
1794 		    ("pmap_init: can't assign to pagesizes[2]"));
1795 		pagesizes[2] = L2_SIZE;
1796 		if (L1_BLOCKS_SUPPORTED) {
1797 			KASSERT(MAXPAGESIZES > 3 && pagesizes[3] == 0,
1798 			    ("pmap_init: can't assign to pagesizes[3]"));
1799 			pagesizes[3] = L1_SIZE;
1800 		}
1801 	}
1802 
1803 	/*
1804 	 * Initialize the ASID allocator.
1805 	 */
1806 	pmap_init_asids(&asids,
1807 	    (READ_SPECIALREG(tcr_el1) & TCR_ASID_16) != 0 ? 16 : 8);
1808 
1809 	if (has_hyp()) {
1810 		mmfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
1811 		vmid_bits = 8;
1812 
1813 		if (ID_AA64MMFR1_VMIDBits_VAL(mmfr1) ==
1814 		    ID_AA64MMFR1_VMIDBits_16)
1815 			vmid_bits = 16;
1816 		pmap_init_asids(&vmids, vmid_bits);
1817 	}
1818 
1819 	/*
1820 	 * Initialize pv chunk lists.
1821 	 */
1822 	for (i = 0; i < PMAP_MEMDOM; i++) {
1823 		mtx_init(&pv_chunks[i].pvc_lock, "pmap pv chunk list", NULL,
1824 		    MTX_DEF);
1825 		TAILQ_INIT(&pv_chunks[i].pvc_list);
1826 	}
1827 	pmap_init_pv_table();
1828 
1829 	vm_initialized = 1;
1830 }
1831 
1832 static SYSCTL_NODE(_vm_pmap, OID_AUTO, l1, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
1833     "L1 (1GB/64GB) page mapping counters");
1834 
1835 static COUNTER_U64_DEFINE_EARLY(pmap_l1_demotions);
1836 SYSCTL_COUNTER_U64(_vm_pmap_l1, OID_AUTO, demotions, CTLFLAG_RD,
1837     &pmap_l1_demotions, "L1 (1GB/64GB) page demotions");
1838 
1839 SYSCTL_BOOL(_vm_pmap_l1, OID_AUTO, supported, CTLFLAG_RD, &pmap_l1_supported,
1840     0, "L1 blocks are supported");
1841 
1842 static SYSCTL_NODE(_vm_pmap, OID_AUTO, l2c, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
1843     "L2C (32MB/1GB) page mapping counters");
1844 
1845 static COUNTER_U64_DEFINE_EARLY(pmap_l2c_demotions);
1846 SYSCTL_COUNTER_U64(_vm_pmap_l2c, OID_AUTO, demotions, CTLFLAG_RD,
1847     &pmap_l2c_demotions, "L2C (32MB/1GB) page demotions");
1848 
1849 static SYSCTL_NODE(_vm_pmap, OID_AUTO, l2, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
1850     "2MB page mapping counters");
1851 
1852 static COUNTER_U64_DEFINE_EARLY(pmap_l2_demotions);
1853 SYSCTL_COUNTER_U64(_vm_pmap_l2, OID_AUTO, demotions, CTLFLAG_RD,
1854     &pmap_l2_demotions, "L2 (2MB/32MB) page demotions");
1855 
1856 static COUNTER_U64_DEFINE_EARLY(pmap_l2_mappings);
1857 SYSCTL_COUNTER_U64(_vm_pmap_l2, OID_AUTO, mappings, CTLFLAG_RD,
1858     &pmap_l2_mappings, "L2 (2MB/32MB) page mappings");
1859 
1860 static COUNTER_U64_DEFINE_EARLY(pmap_l2_p_failures);
1861 SYSCTL_COUNTER_U64(_vm_pmap_l2, OID_AUTO, p_failures, CTLFLAG_RD,
1862     &pmap_l2_p_failures, "L2 (2MB/32MB) page promotion failures");
1863 
1864 static COUNTER_U64_DEFINE_EARLY(pmap_l2_promotions);
1865 SYSCTL_COUNTER_U64(_vm_pmap_l2, OID_AUTO, promotions, CTLFLAG_RD,
1866     &pmap_l2_promotions, "L2 (2MB/32MB) page promotions");
1867 
1868 static SYSCTL_NODE(_vm_pmap, OID_AUTO, l3c, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
1869     "L3C (64KB/2MB) page mapping counters");
1870 
1871 static COUNTER_U64_DEFINE_EARLY(pmap_l3c_demotions);
1872 SYSCTL_COUNTER_U64(_vm_pmap_l3c, OID_AUTO, demotions, CTLFLAG_RD,
1873     &pmap_l3c_demotions, "L3C (64KB/2MB) page demotions");
1874 
1875 static COUNTER_U64_DEFINE_EARLY(pmap_l3c_mappings);
1876 SYSCTL_COUNTER_U64(_vm_pmap_l3c, OID_AUTO, mappings, CTLFLAG_RD,
1877     &pmap_l3c_mappings, "L3C (64KB/2MB) page mappings");
1878 
1879 static COUNTER_U64_DEFINE_EARLY(pmap_l3c_p_failures);
1880 SYSCTL_COUNTER_U64(_vm_pmap_l3c, OID_AUTO, p_failures, CTLFLAG_RD,
1881     &pmap_l3c_p_failures, "L3C (64KB/2MB) page promotion failures");
1882 
1883 static COUNTER_U64_DEFINE_EARLY(pmap_l3c_promotions);
1884 SYSCTL_COUNTER_U64(_vm_pmap_l3c, OID_AUTO, promotions, CTLFLAG_RD,
1885     &pmap_l3c_promotions, "L3C (64KB/2MB) page promotions");
1886 
1887 /*
1888  * If the given value for "final_only" is false, then any cached intermediate-
1889  * level entries, i.e., L{0,1,2}_TABLE entries, are invalidated in addition to
1890  * any cached final-level entry, i.e., either an L{1,2}_BLOCK or L3_PAGE entry.
1891  * Otherwise, just the cached final-level entry is invalidated.
1892  */
1893 static __inline void
pmap_s1_invalidate_kernel(uint64_t r,bool final_only)1894 pmap_s1_invalidate_kernel(uint64_t r, bool final_only)
1895 {
1896 	if (final_only)
1897 		__asm __volatile("tlbi vaale1is, %0" : : "r" (r));
1898 	else
1899 		__asm __volatile("tlbi vaae1is, %0" : : "r" (r));
1900 }
1901 
1902 static __inline void
pmap_s1_invalidate_user(uint64_t r,bool final_only)1903 pmap_s1_invalidate_user(uint64_t r, bool final_only)
1904 {
1905 	if (final_only)
1906 		__asm __volatile("tlbi vale1is, %0" : : "r" (r));
1907 	else
1908 		__asm __volatile("tlbi vae1is, %0" : : "r" (r));
1909 }
1910 
1911 /*
1912  * Invalidates any cached final- and optionally intermediate-level TLB entries
1913  * for the specified virtual address in the given virtual address space.
1914  */
1915 static __inline void
pmap_s1_invalidate_page(pmap_t pmap,vm_offset_t va,bool final_only)1916 pmap_s1_invalidate_page(pmap_t pmap, vm_offset_t va, bool final_only)
1917 {
1918 	uint64_t r;
1919 
1920 	PMAP_ASSERT_STAGE1(pmap);
1921 
1922 	dsb(ishst);
1923 	r = TLBI_VA(va);
1924 	if (pmap == kernel_pmap) {
1925 		pmap_s1_invalidate_kernel(r, final_only);
1926 		if (pmap_multiple_tlbi) {
1927 			dsb(ish);
1928 			pmap_s1_invalidate_kernel(r, final_only);
1929 		}
1930 	} else {
1931 		r |= ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie));
1932 		pmap_s1_invalidate_user(r, final_only);
1933 		if (pmap_multiple_tlbi) {
1934 			dsb(ish);
1935 			pmap_s1_invalidate_user(r, final_only);
1936 		}
1937 	}
1938 	dsb(ish);
1939 	isb();
1940 }
1941 
1942 static __inline void
pmap_s2_invalidate_page(pmap_t pmap,vm_offset_t va,bool final_only)1943 pmap_s2_invalidate_page(pmap_t pmap, vm_offset_t va, bool final_only)
1944 {
1945 	PMAP_ASSERT_STAGE2(pmap);
1946 	MPASS(pmap_stage2_invalidate_range != NULL);
1947 	pmap_stage2_invalidate_range(pmap_to_ttbr0(pmap), va, va + PAGE_SIZE,
1948 	    final_only);
1949 }
1950 
1951 static __inline void
pmap_invalidate_page(pmap_t pmap,vm_offset_t va,bool final_only)1952 pmap_invalidate_page(pmap_t pmap, vm_offset_t va, bool final_only)
1953 {
1954 	if (pmap->pm_stage == PM_STAGE1)
1955 		pmap_s1_invalidate_page(pmap, va, final_only);
1956 	else
1957 		pmap_s2_invalidate_page(pmap, va, final_only);
1958 }
1959 
1960 /*
1961  * Use stride L{1,2}_SIZE when invalidating the TLB entries for L{1,2}_BLOCK
1962  * mappings.  Otherwise, use stride L3_SIZE.
1963  */
1964 static __inline void
pmap_s1_invalidate_strided(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,vm_offset_t stride,bool final_only)1965 pmap_s1_invalidate_strided(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
1966     vm_offset_t stride, bool final_only)
1967 {
1968 	uint64_t end, r, start;
1969 
1970 	PMAP_ASSERT_STAGE1(pmap);
1971 
1972 	dsb(ishst);
1973 	if (pmap == kernel_pmap) {
1974 		start = TLBI_VA(sva);
1975 		end = TLBI_VA(eva);
1976 		for (r = start; r < end; r += TLBI_VA(stride))
1977 			pmap_s1_invalidate_kernel(r, final_only);
1978 
1979 		if (pmap_multiple_tlbi) {
1980 			dsb(ish);
1981 			for (r = start; r < end; r += TLBI_VA(stride))
1982 				pmap_s1_invalidate_kernel(r, final_only);
1983 		}
1984 	} else {
1985 		start = end = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie));
1986 		start |= TLBI_VA(sva);
1987 		end |= TLBI_VA(eva);
1988 		for (r = start; r < end; r += TLBI_VA(stride))
1989 			pmap_s1_invalidate_user(r, final_only);
1990 
1991 		if (pmap_multiple_tlbi) {
1992 			dsb(ish);
1993 			for (r = start; r < end; r += TLBI_VA(stride))
1994 				pmap_s1_invalidate_user(r, final_only);
1995 		}
1996 	}
1997 	dsb(ish);
1998 	isb();
1999 }
2000 
2001 /*
2002  * Invalidates any cached final- and optionally intermediate-level TLB entries
2003  * for the specified virtual address range in the given virtual address space.
2004  */
2005 static __inline void
pmap_s1_invalidate_range(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,bool final_only)2006 pmap_s1_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
2007     bool final_only)
2008 {
2009 	pmap_s1_invalidate_strided(pmap, sva, eva, L3_SIZE, final_only);
2010 }
2011 
2012 static __inline void
pmap_s2_invalidate_range(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,bool final_only)2013 pmap_s2_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
2014     bool final_only)
2015 {
2016 	PMAP_ASSERT_STAGE2(pmap);
2017 	MPASS(pmap_stage2_invalidate_range != NULL);
2018 	pmap_stage2_invalidate_range(pmap_to_ttbr0(pmap), sva, eva, final_only);
2019 }
2020 
2021 static __inline void
pmap_invalidate_range(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,bool final_only)2022 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
2023     bool final_only)
2024 {
2025 	if (pmap->pm_stage == PM_STAGE1)
2026 		pmap_s1_invalidate_range(pmap, sva, eva, final_only);
2027 	else
2028 		pmap_s2_invalidate_range(pmap, sva, eva, final_only);
2029 }
2030 
2031 void
pmap_s1_invalidate_all_kernel(void)2032 pmap_s1_invalidate_all_kernel(void)
2033 {
2034 	dsb(ishst);
2035 	__asm __volatile("tlbi vmalle1is");
2036 	dsb(ish);
2037 	if (pmap_multiple_tlbi) {
2038 		__asm __volatile("tlbi vmalle1is");
2039 		dsb(ish);
2040 	}
2041 	isb();
2042 }
2043 
2044 /*
2045  * Invalidates all cached intermediate- and final-level TLB entries for the
2046  * given virtual address space.
2047  */
2048 static __inline void
pmap_s1_invalidate_all(pmap_t pmap)2049 pmap_s1_invalidate_all(pmap_t pmap)
2050 {
2051 	uint64_t r;
2052 
2053 	PMAP_ASSERT_STAGE1(pmap);
2054 
2055 	dsb(ishst);
2056 	if (pmap == kernel_pmap) {
2057 		__asm __volatile("tlbi vmalle1is");
2058 		if (pmap_multiple_tlbi) {
2059 			dsb(ish);
2060 			__asm __volatile("tlbi vmalle1is");
2061 		}
2062 	} else {
2063 		r = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie));
2064 		__asm __volatile("tlbi aside1is, %0" : : "r" (r));
2065 		if (pmap_multiple_tlbi) {
2066 			dsb(ish);
2067 			__asm __volatile("tlbi aside1is, %0" : : "r" (r));
2068 		}
2069 	}
2070 	dsb(ish);
2071 	isb();
2072 }
2073 
2074 static __inline void
pmap_s2_invalidate_all(pmap_t pmap)2075 pmap_s2_invalidate_all(pmap_t pmap)
2076 {
2077 	PMAP_ASSERT_STAGE2(pmap);
2078 	MPASS(pmap_stage2_invalidate_all != NULL);
2079 	pmap_stage2_invalidate_all(pmap_to_ttbr0(pmap));
2080 }
2081 
2082 static __inline void
pmap_invalidate_all(pmap_t pmap)2083 pmap_invalidate_all(pmap_t pmap)
2084 {
2085 	if (pmap->pm_stage == PM_STAGE1)
2086 		pmap_s1_invalidate_all(pmap);
2087 	else
2088 		pmap_s2_invalidate_all(pmap);
2089 }
2090 
2091 /*
2092  *	Routine:	pmap_extract
2093  *	Function:
2094  *		Extract the physical page address associated
2095  *		with the given map/virtual_address pair.
2096  */
2097 vm_paddr_t
pmap_extract(pmap_t pmap,vm_offset_t va)2098 pmap_extract(pmap_t pmap, vm_offset_t va)
2099 {
2100 	pt_entry_t *pte, tpte;
2101 	vm_paddr_t pa;
2102 	int lvl;
2103 
2104 	pa = 0;
2105 	PMAP_LOCK(pmap);
2106 	/*
2107 	 * Find the block or page map for this virtual address. pmap_pte
2108 	 * will return either a valid block/page entry, or NULL.
2109 	 */
2110 	pte = pmap_pte(pmap, va, &lvl);
2111 	if (pte != NULL) {
2112 		tpte = pmap_load(pte);
2113 		pa = PTE_TO_PHYS(tpte);
2114 		switch(lvl) {
2115 		case 1:
2116 			PMAP_ASSERT_L1_BLOCKS_SUPPORTED;
2117 			KASSERT((tpte & ATTR_DESCR_MASK) == L1_BLOCK,
2118 			    ("pmap_extract: Invalid L1 pte found: %lx",
2119 			    tpte & ATTR_DESCR_MASK));
2120 			pa |= (va & L1_OFFSET);
2121 			break;
2122 		case 2:
2123 			KASSERT((tpte & ATTR_DESCR_MASK) == L2_BLOCK,
2124 			    ("pmap_extract: Invalid L2 pte found: %lx",
2125 			    tpte & ATTR_DESCR_MASK));
2126 			pa |= (va & L2_OFFSET);
2127 			break;
2128 		case 3:
2129 			KASSERT((tpte & ATTR_DESCR_MASK) == L3_PAGE,
2130 			    ("pmap_extract: Invalid L3 pte found: %lx",
2131 			    tpte & ATTR_DESCR_MASK));
2132 			pa |= (va & L3_OFFSET);
2133 			break;
2134 		}
2135 	}
2136 	PMAP_UNLOCK(pmap);
2137 	return (pa);
2138 }
2139 
2140 /*
2141  *	Routine:	pmap_extract_and_hold
2142  *	Function:
2143  *		Atomically extract and hold the physical page
2144  *		with the given pmap and virtual address pair
2145  *		if that mapping permits the given protection.
2146  */
2147 vm_page_t
pmap_extract_and_hold(pmap_t pmap,vm_offset_t va,vm_prot_t prot)2148 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
2149 {
2150 	pt_entry_t *pte, tpte;
2151 	vm_offset_t off;
2152 	vm_page_t m;
2153 	int lvl;
2154 	bool use;
2155 
2156 	m = NULL;
2157 	PMAP_LOCK(pmap);
2158 	pte = pmap_pte(pmap, va, &lvl);
2159 	if (pte != NULL) {
2160 		tpte = pmap_load(pte);
2161 
2162 		KASSERT(lvl > 0 && lvl <= 3,
2163 		    ("pmap_extract_and_hold: Invalid level %d", lvl));
2164 		/*
2165 		 * Check that the pte is either a L3 page, or a L1 or L2 block
2166 		 * entry. We can assume L1_BLOCK == L2_BLOCK.
2167 		 */
2168 		KASSERT((lvl == 3 && (tpte & ATTR_DESCR_MASK) == L3_PAGE) ||
2169 		    (lvl < 3 && (tpte & ATTR_DESCR_MASK) == L1_BLOCK),
2170 		    ("pmap_extract_and_hold: Invalid pte at L%d: %lx", lvl,
2171 		     tpte & ATTR_DESCR_MASK));
2172 
2173 		use = false;
2174 		if ((prot & VM_PROT_WRITE) == 0)
2175 			use = true;
2176 		else if (pmap->pm_stage == PM_STAGE1 &&
2177 		    (tpte & ATTR_S1_AP_RW_BIT) == ATTR_S1_AP(ATTR_S1_AP_RW))
2178 			use = true;
2179 		else if (pmap->pm_stage == PM_STAGE2 &&
2180 		    ((tpte & ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE)) ==
2181 		     ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE)))
2182 			use = true;
2183 
2184 		if (use) {
2185 			switch (lvl) {
2186 			case 1:
2187 				off = va & L1_OFFSET;
2188 				break;
2189 			case 2:
2190 				off = va & L2_OFFSET;
2191 				break;
2192 			case 3:
2193 			default:
2194 				off = 0;
2195 			}
2196 			m = PHYS_TO_VM_PAGE(PTE_TO_PHYS(tpte) | off);
2197 			if (m != NULL && !vm_page_wire_mapped(m))
2198 				m = NULL;
2199 		}
2200 	}
2201 	PMAP_UNLOCK(pmap);
2202 	return (m);
2203 }
2204 
2205 /*
2206  * Returns true if the entire kernel virtual address range is mapped
2207  */
2208 static bool
pmap_kmapped_range(vm_offset_t sva,vm_size_t size)2209 pmap_kmapped_range(vm_offset_t sva, vm_size_t size)
2210 {
2211 	pt_entry_t *pte, tpte;
2212 	vm_offset_t eva;
2213 
2214 	KASSERT(sva >= VM_MIN_KERNEL_ADDRESS,
2215 	    ("%s: Invalid virtual address: %lx", __func__, sva));
2216 	MPASS(size != 0);
2217 	eva = sva + size - 1;
2218 	KASSERT(eva > sva, ("%s: Size too large: sva %lx, size %lx", __func__,
2219 	    sva, size));
2220 
2221 	while (sva <= eva) {
2222 		pte = pmap_l1(kernel_pmap, sva);
2223 		if (pte == NULL)
2224 			return (false);
2225 		tpte = pmap_load(pte);
2226 		if (tpte == 0)
2227 			return (false);
2228 		if ((tpte & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_BLOCK) {
2229 			sva = (sva & ~L1_OFFSET) + L1_SIZE;
2230 			continue;
2231 		}
2232 
2233 		pte = pmap_l1_to_l2(&tpte, sva);
2234 		tpte = pmap_load(pte);
2235 		if (tpte == 0)
2236 			return (false);
2237 		if ((tpte & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_BLOCK) {
2238 			sva = (sva & ~L2_OFFSET) + L2_SIZE;
2239 			continue;
2240 		}
2241 		pte = pmap_l2_to_l3(&tpte, sva);
2242 		tpte = pmap_load(pte);
2243 		if (tpte == 0)
2244 			return (false);
2245 		MPASS((tpte & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_PAGE);
2246 		if ((tpte & ATTR_CONTIGUOUS) == ATTR_CONTIGUOUS)
2247 			sva = (sva & ~L3C_OFFSET) + L3C_SIZE;
2248 		else
2249 			sva = (sva & ~L3_OFFSET) + L3_SIZE;
2250 	}
2251 
2252 	return (true);
2253 }
2254 
2255 /*
2256  * Walks the page tables to translate a kernel virtual address to a
2257  * physical address. Returns true if the kva is valid and stores the
2258  * physical address in pa if it is not NULL.
2259  *
2260  * See the comment above data_abort() for the rationale for specifying
2261  * NO_PERTHREAD_SSP here.
2262  */
2263 bool NO_PERTHREAD_SSP
pmap_klookup(vm_offset_t va,vm_paddr_t * pa)2264 pmap_klookup(vm_offset_t va, vm_paddr_t *pa)
2265 {
2266 	pt_entry_t *pte, tpte;
2267 	register_t intr;
2268 	uint64_t par;
2269 
2270 	/*
2271 	 * Disable interrupts so we don't get interrupted between asking
2272 	 * for address translation, and getting the result back.
2273 	 */
2274 	intr = intr_disable();
2275 	par = arm64_address_translate_s1e1r(va);
2276 	intr_restore(intr);
2277 
2278 	if (PAR_SUCCESS(par)) {
2279 		if (pa != NULL)
2280 			*pa = (par & PAR_PA_MASK) | (va & PAR_LOW_MASK);
2281 		return (true);
2282 	}
2283 
2284 	/*
2285 	 * Fall back to walking the page table. The address translation
2286 	 * instruction may fail when the page is in a break-before-make
2287 	 * sequence. As we only clear the valid bit in said sequence we
2288 	 * can walk the page table to find the physical address.
2289 	 */
2290 
2291 	pte = pmap_l1(kernel_pmap, va);
2292 	if (pte == NULL)
2293 		return (false);
2294 
2295 	/*
2296 	 * A concurrent pmap_update_entry() will clear the entry's valid bit
2297 	 * but leave the rest of the entry unchanged.  Therefore, we treat a
2298 	 * non-zero entry as being valid, and we ignore the valid bit when
2299 	 * determining whether the entry maps a block, page, or table.
2300 	 */
2301 	tpte = pmap_load(pte);
2302 	if (tpte == 0)
2303 		return (false);
2304 	if ((tpte & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_BLOCK) {
2305 		if (pa != NULL)
2306 			*pa = PTE_TO_PHYS(tpte) | (va & L1_OFFSET);
2307 		return (true);
2308 	}
2309 	pte = pmap_l1_to_l2(&tpte, va);
2310 	tpte = pmap_load(pte);
2311 	if (tpte == 0)
2312 		return (false);
2313 	if ((tpte & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_BLOCK) {
2314 		if (pa != NULL)
2315 			*pa = PTE_TO_PHYS(tpte) | (va & L2_OFFSET);
2316 		return (true);
2317 	}
2318 	pte = pmap_l2_to_l3(&tpte, va);
2319 	tpte = pmap_load(pte);
2320 	if (tpte == 0)
2321 		return (false);
2322 	if (pa != NULL)
2323 		*pa = PTE_TO_PHYS(tpte) | (va & L3_OFFSET);
2324 	return (true);
2325 }
2326 
2327 /*
2328  *	Routine:	pmap_kextract
2329  *	Function:
2330  *		Extract the physical page address associated with the given kernel
2331  *		virtual address.
2332  */
2333 vm_paddr_t
pmap_kextract(vm_offset_t va)2334 pmap_kextract(vm_offset_t va)
2335 {
2336 	vm_paddr_t pa;
2337 
2338 	if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS)
2339 		return (DMAP_TO_PHYS(va));
2340 
2341 	if (pmap_klookup(va, &pa) == false)
2342 		return (0);
2343 	return (pa);
2344 }
2345 
2346 /***************************************************
2347  * Low level mapping routines.....
2348  ***************************************************/
2349 
2350 void
pmap_kenter(vm_offset_t sva,vm_size_t size,vm_paddr_t pa,int mode)2351 pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode)
2352 {
2353 	pd_entry_t *pde;
2354 	pt_entry_t attr, old_l3e, *pte;
2355 	vm_offset_t va;
2356 	vm_page_t mpte;
2357 	int error, lvl;
2358 
2359 	KASSERT((pa & L3_OFFSET) == 0,
2360 	    ("pmap_kenter: Invalid physical address"));
2361 	KASSERT((sva & L3_OFFSET) == 0,
2362 	    ("pmap_kenter: Invalid virtual address"));
2363 	KASSERT((size & PAGE_MASK) == 0,
2364 	    ("pmap_kenter: Mapping is not page-sized"));
2365 
2366 	attr = ATTR_AF | pmap_sh_attr | ATTR_S1_AP(ATTR_S1_AP_RW) |
2367 	    ATTR_S1_XN | ATTR_KERN_GP | ATTR_S1_IDX(mode);
2368 	old_l3e = 0;
2369 	va = sva;
2370 	while (size != 0) {
2371 		pde = pmap_pde(kernel_pmap, va, &lvl);
2372 		KASSERT(pde != NULL,
2373 		    ("pmap_kenter: Invalid page entry, va: 0x%lx", va));
2374 		KASSERT(lvl == 2, ("pmap_kenter: Invalid level %d", lvl));
2375 
2376 		/*
2377 		 * If we have an aligned, contiguous chunk of L2_SIZE, try
2378 		 * to create an L2_BLOCK mapping.
2379 		 */
2380 		if ((va & L2_OFFSET) == 0 && size >= L2_SIZE &&
2381 		    (pa & L2_OFFSET) == 0 && vm_initialized) {
2382 			mpte = PTE_TO_VM_PAGE(pmap_load(pde));
2383 			KASSERT(pmap_every_pte_zero(VM_PAGE_TO_PHYS(mpte)),
2384 			    ("pmap_kenter: Unexpected mapping"));
2385 			PMAP_LOCK(kernel_pmap);
2386 			error = pmap_insert_pt_page(kernel_pmap, mpte, false,
2387 			    false);
2388 			if (error == 0) {
2389 				attr &= ~ATTR_CONTIGUOUS;
2390 
2391 				/*
2392 				 * Although the page table page "mpte" should
2393 				 * be devoid of mappings, the TLB might hold
2394 				 * intermediate entries that reference it, so
2395 				 * we perform a single-page invalidation.
2396 				 */
2397 				pmap_update_entry(kernel_pmap, pde,
2398 				    PHYS_TO_PTE(pa) | attr | L2_BLOCK, va,
2399 				    PAGE_SIZE);
2400 			}
2401 			PMAP_UNLOCK(kernel_pmap);
2402 			if (error == 0) {
2403 				va += L2_SIZE;
2404 				pa += L2_SIZE;
2405 				size -= L2_SIZE;
2406 				continue;
2407 			}
2408 		}
2409 
2410 		/*
2411 		 * If we have an aligned, contiguous chunk of L3C_ENTRIES
2412 		 * L3 pages, set the contiguous bit within each PTE so that
2413 		 * the chunk can be cached using only one TLB entry.
2414 		 */
2415 		if ((va & L3C_OFFSET) == 0 && (pa & L3C_OFFSET) == 0) {
2416 			if (size >= L3C_SIZE)
2417 				attr |= ATTR_CONTIGUOUS;
2418 			else
2419 				attr &= ~ATTR_CONTIGUOUS;
2420 		}
2421 
2422 		pte = pmap_l2_to_l3(pde, va);
2423 		old_l3e |= pmap_load_store(pte, PHYS_TO_PTE(pa) | attr |
2424 		    L3_PAGE);
2425 
2426 		va += PAGE_SIZE;
2427 		pa += PAGE_SIZE;
2428 		size -= PAGE_SIZE;
2429 	}
2430 	if ((old_l3e & ATTR_DESCR_VALID) != 0)
2431 		pmap_s1_invalidate_range(kernel_pmap, sva, va, true);
2432 	else {
2433 		/*
2434 		 * Because the old entries were invalid and the new mappings
2435 		 * are not executable, an isb is not required.
2436 		 */
2437 		dsb(ishst);
2438 	}
2439 }
2440 
2441 void
pmap_kenter_device(vm_offset_t sva,vm_size_t size,vm_paddr_t pa)2442 pmap_kenter_device(vm_offset_t sva, vm_size_t size, vm_paddr_t pa)
2443 {
2444 
2445 	pmap_kenter(sva, size, pa, VM_MEMATTR_DEVICE);
2446 }
2447 
2448 /*
2449  * Remove a page from the kernel pagetables.
2450  */
2451 void
pmap_kremove(vm_offset_t va)2452 pmap_kremove(vm_offset_t va)
2453 {
2454 	pt_entry_t *pte;
2455 
2456 	pte = pmap_pte_exists(kernel_pmap, va, 3, __func__);
2457 	KASSERT((pmap_load(pte) & ATTR_CONTIGUOUS) == 0,
2458 	    ("pmap_kremove: unexpected ATTR_CONTIGUOUS"));
2459 	pmap_clear(pte);
2460 	pmap_s1_invalidate_page(kernel_pmap, va, true);
2461 }
2462 
2463 /*
2464  * Remove the specified range of mappings from the kernel address space.
2465  *
2466  * Should only be applied to mappings that were created by pmap_kenter() or
2467  * pmap_kenter_device().  Nothing about this function is actually specific
2468  * to device mappings.
2469  */
2470 void
pmap_kremove_device(vm_offset_t sva,vm_size_t size)2471 pmap_kremove_device(vm_offset_t sva, vm_size_t size)
2472 {
2473 	pt_entry_t *ptep, *ptep_end;
2474 	vm_offset_t va;
2475 	int lvl;
2476 
2477 	KASSERT((sva & L3_OFFSET) == 0,
2478 	    ("pmap_kremove_device: Invalid virtual address"));
2479 	KASSERT((size & PAGE_MASK) == 0,
2480 	    ("pmap_kremove_device: Mapping is not page-sized"));
2481 
2482 	va = sva;
2483 	while (size != 0) {
2484 		ptep = pmap_pte(kernel_pmap, va, &lvl);
2485 		KASSERT(ptep != NULL, ("Invalid page table, va: 0x%lx", va));
2486 		switch (lvl) {
2487 		case 2:
2488 			KASSERT((va & L2_OFFSET) == 0,
2489 			    ("Unaligned virtual address"));
2490 			KASSERT(size >= L2_SIZE, ("Insufficient size"));
2491 
2492 			if (va != sva) {
2493 				pmap_s1_invalidate_range(kernel_pmap, sva, va,
2494 				    true);
2495 			}
2496 			pmap_clear(ptep);
2497 			pmap_s1_invalidate_page(kernel_pmap, va, true);
2498 			PMAP_LOCK(kernel_pmap);
2499 			pmap_remove_kernel_l2(kernel_pmap, ptep, va);
2500 			PMAP_UNLOCK(kernel_pmap);
2501 
2502 			va += L2_SIZE;
2503 			sva = va;
2504 			size -= L2_SIZE;
2505 			break;
2506 		case 3:
2507 			if ((pmap_load(ptep) & ATTR_CONTIGUOUS) != 0) {
2508 				KASSERT((va & L3C_OFFSET) == 0,
2509 				    ("Unaligned L3C virtual address"));
2510 				KASSERT(size >= L3C_SIZE,
2511 				    ("Insufficient L3C size"));
2512 
2513 				ptep_end = ptep + L3C_ENTRIES;
2514 				for (; ptep < ptep_end; ptep++)
2515 					pmap_clear(ptep);
2516 
2517 				va += L3C_SIZE;
2518 				size -= L3C_SIZE;
2519 				break;
2520 			}
2521 			pmap_clear(ptep);
2522 
2523 			va += PAGE_SIZE;
2524 			size -= PAGE_SIZE;
2525 			break;
2526 		default:
2527 			__assert_unreachable();
2528 			break;
2529 		}
2530 	}
2531 	if (va != sva)
2532 		pmap_s1_invalidate_range(kernel_pmap, sva, va, true);
2533 }
2534 
2535 /*
2536  *	Used to map a range of physical addresses into kernel
2537  *	virtual address space.
2538  *
2539  *	The value passed in '*virt' is a suggested virtual address for
2540  *	the mapping. Architectures which can support a direct-mapped
2541  *	physical to virtual region can return the appropriate address
2542  *	within that region, leaving '*virt' unchanged. Other
2543  *	architectures should map the pages starting at '*virt' and
2544  *	update '*virt' with the first usable address after the mapped
2545  *	region.
2546  */
2547 vm_offset_t
pmap_map(vm_offset_t * virt,vm_paddr_t start,vm_paddr_t end,int prot)2548 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
2549 {
2550 	return PHYS_TO_DMAP(start);
2551 }
2552 
2553 /*
2554  * Add a list of wired pages to the kva
2555  * this routine is only used for temporary
2556  * kernel mappings that do not need to have
2557  * page modification or references recorded.
2558  * Note that old mappings are simply written
2559  * over.  The page *must* be wired.
2560  * Note: SMP coherent.  Uses a ranged shootdown IPI.
2561  */
2562 void
pmap_qenter(vm_offset_t sva,vm_page_t * ma,int count)2563 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
2564 {
2565 	pd_entry_t *pde;
2566 	pt_entry_t attr, old_l3e, *pte;
2567 	vm_offset_t va;
2568 	vm_page_t m;
2569 	int i, lvl;
2570 
2571 	old_l3e = 0;
2572 	va = sva;
2573 	for (i = 0; i < count; i++) {
2574 		pde = pmap_pde(kernel_pmap, va, &lvl);
2575 		KASSERT(pde != NULL,
2576 		    ("pmap_qenter: Invalid page entry, va: 0x%lx", va));
2577 		KASSERT(lvl == 2,
2578 		    ("pmap_qenter: Invalid level %d", lvl));
2579 
2580 		m = ma[i];
2581 		attr = ATTR_AF | pmap_sh_attr |
2582 		    ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_XN |
2583 		    ATTR_KERN_GP | ATTR_S1_IDX(m->md.pv_memattr) | L3_PAGE;
2584 		pte = pmap_l2_to_l3(pde, va);
2585 		old_l3e |= pmap_load_store(pte, VM_PAGE_TO_PTE(m) | attr);
2586 
2587 		va += L3_SIZE;
2588 	}
2589 	if ((old_l3e & ATTR_DESCR_VALID) != 0)
2590 		pmap_s1_invalidate_range(kernel_pmap, sva, va, true);
2591 	else {
2592 		/*
2593 		 * Because the old entries were invalid and the new mappings
2594 		 * are not executable, an isb is not required.
2595 		 */
2596 		dsb(ishst);
2597 	}
2598 }
2599 
2600 /*
2601  * This routine tears out page mappings from the
2602  * kernel -- it is meant only for temporary mappings.
2603  */
2604 void
pmap_qremove(vm_offset_t sva,int count)2605 pmap_qremove(vm_offset_t sva, int count)
2606 {
2607 	pt_entry_t *pte;
2608 	vm_offset_t va;
2609 
2610 	KASSERT(ADDR_IS_CANONICAL(sva),
2611 	    ("%s: Address not in canonical form: %lx", __func__, sva));
2612 	KASSERT(ADDR_IS_KERNEL(sva), ("usermode va %lx", sva));
2613 
2614 	va = sva;
2615 	while (count-- > 0) {
2616 		pte = pmap_pte_exists(kernel_pmap, va, 3, NULL);
2617 		if (pte != NULL) {
2618 			pmap_clear(pte);
2619 		}
2620 
2621 		va += PAGE_SIZE;
2622 	}
2623 	pmap_s1_invalidate_range(kernel_pmap, sva, va, true);
2624 }
2625 
2626 /***************************************************
2627  * Page table page management routines.....
2628  ***************************************************/
2629 /*
2630  * Schedule the specified unused page table page to be freed.  Specifically,
2631  * add the page to the specified list of pages that will be released to the
2632  * physical memory manager after the TLB has been updated.
2633  */
2634 static __inline void
pmap_add_delayed_free_list(vm_page_t m,struct spglist * free,bool set_PG_ZERO)2635 pmap_add_delayed_free_list(vm_page_t m, struct spglist *free, bool set_PG_ZERO)
2636 {
2637 
2638 	if (set_PG_ZERO)
2639 		m->flags |= PG_ZERO;
2640 	else
2641 		m->flags &= ~PG_ZERO;
2642 	SLIST_INSERT_HEAD(free, m, plinks.s.ss);
2643 }
2644 
2645 /*
2646  * Decrements a page table page's reference count, which is used to record the
2647  * number of valid page table entries within the page.  If the reference count
2648  * drops to zero, then the page table page is unmapped.  Returns true if the
2649  * page table page was unmapped and false otherwise.
2650  */
2651 static inline bool
pmap_unwire_l3(pmap_t pmap,vm_offset_t va,vm_page_t m,struct spglist * free)2652 pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
2653 {
2654 
2655 	--m->ref_count;
2656 	if (m->ref_count == 0) {
2657 		_pmap_unwire_l3(pmap, va, m, free);
2658 		return (true);
2659 	} else
2660 		return (false);
2661 }
2662 
2663 static void
_pmap_unwire_l3(pmap_t pmap,vm_offset_t va,vm_page_t m,struct spglist * free)2664 _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
2665 {
2666 
2667 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2668 	/*
2669 	 * unmap the page table page
2670 	 */
2671 	if (m->pindex >= (NUL2E + NUL1E)) {
2672 		/* l1 page */
2673 		pd_entry_t *l0;
2674 
2675 		l0 = pmap_l0(pmap, va);
2676 		pmap_clear(l0);
2677 	} else if (m->pindex >= NUL2E) {
2678 		/* l2 page */
2679 		pd_entry_t *l1;
2680 
2681 		l1 = pmap_l1(pmap, va);
2682 		pmap_clear(l1);
2683 	} else {
2684 		/* l3 page */
2685 		pd_entry_t *l2;
2686 
2687 		l2 = pmap_l2(pmap, va);
2688 		pmap_clear(l2);
2689 	}
2690 	pmap_resident_count_dec(pmap, 1);
2691 	if (m->pindex < NUL2E) {
2692 		/* We just released an l3, unhold the matching l2 */
2693 		pd_entry_t *l1, tl1;
2694 		vm_page_t l2pg;
2695 
2696 		l1 = pmap_l1(pmap, va);
2697 		tl1 = pmap_load(l1);
2698 		l2pg = PTE_TO_VM_PAGE(tl1);
2699 		pmap_unwire_l3(pmap, va, l2pg, free);
2700 	} else if (m->pindex < (NUL2E + NUL1E)) {
2701 		/* We just released an l2, unhold the matching l1 */
2702 		pd_entry_t *l0, tl0;
2703 		vm_page_t l1pg;
2704 
2705 		l0 = pmap_l0(pmap, va);
2706 		tl0 = pmap_load(l0);
2707 		l1pg = PTE_TO_VM_PAGE(tl0);
2708 		pmap_unwire_l3(pmap, va, l1pg, free);
2709 	}
2710 	pmap_invalidate_page(pmap, va, false);
2711 
2712 	/*
2713 	 * Put page on a list so that it is released after
2714 	 * *ALL* TLB shootdown is done
2715 	 */
2716 	pmap_add_delayed_free_list(m, free, true);
2717 }
2718 
2719 /*
2720  * After removing a page table entry, this routine is used to
2721  * conditionally free the page, and manage the reference count.
2722  */
2723 static int
pmap_unuse_pt(pmap_t pmap,vm_offset_t va,pd_entry_t ptepde,struct spglist * free)2724 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde,
2725     struct spglist *free)
2726 {
2727 	vm_page_t mpte;
2728 
2729 	KASSERT(ADDR_IS_CANONICAL(va),
2730 	    ("%s: Address not in canonical form: %lx", __func__, va));
2731 	if (ADDR_IS_KERNEL(va))
2732 		return (0);
2733 	KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0"));
2734 	mpte = PTE_TO_VM_PAGE(ptepde);
2735 	return (pmap_unwire_l3(pmap, va, mpte, free));
2736 }
2737 
2738 /*
2739  * Release a page table page reference after a failed attempt to create a
2740  * mapping.
2741  */
2742 static void
pmap_abort_ptp(pmap_t pmap,vm_offset_t va,vm_page_t mpte)2743 pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte)
2744 {
2745 	struct spglist free;
2746 
2747 	SLIST_INIT(&free);
2748 	if (pmap_unwire_l3(pmap, va, mpte, &free))
2749 		vm_page_free_pages_toq(&free, true);
2750 }
2751 
2752 void
pmap_pinit0(pmap_t pmap)2753 pmap_pinit0(pmap_t pmap)
2754 {
2755 
2756 	PMAP_LOCK_INIT(pmap);
2757 	bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
2758 	pmap->pm_l0_paddr = READ_SPECIALREG(ttbr0_el1);
2759 	pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(pmap->pm_l0_paddr);
2760 	TAILQ_INIT(&pmap->pm_pvchunk);
2761 	vm_radix_init(&pmap->pm_root);
2762 	pmap->pm_cookie = COOKIE_FROM(ASID_RESERVED_FOR_PID_0, INT_MIN);
2763 	pmap->pm_stage = PM_STAGE1;
2764 	pmap->pm_levels = 4;
2765 	pmap->pm_ttbr = pmap->pm_l0_paddr;
2766 	pmap->pm_asid_set = &asids;
2767 	pmap->pm_bti = NULL;
2768 
2769 	PCPU_SET(curpmap, pmap);
2770 }
2771 
2772 int
pmap_pinit_stage(pmap_t pmap,enum pmap_stage stage,int levels)2773 pmap_pinit_stage(pmap_t pmap, enum pmap_stage stage, int levels)
2774 {
2775 	vm_page_t m;
2776 
2777 	/*
2778 	 * allocate the l0 page
2779 	 */
2780 	m = vm_page_alloc_noobj(VM_ALLOC_WAITOK | VM_ALLOC_WIRED |
2781 	    VM_ALLOC_ZERO);
2782 	pmap->pm_l0_paddr = VM_PAGE_TO_PHYS(m);
2783 	pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(pmap->pm_l0_paddr);
2784 
2785 	TAILQ_INIT(&pmap->pm_pvchunk);
2786 	vm_radix_init(&pmap->pm_root);
2787 	bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
2788 	pmap->pm_cookie = COOKIE_FROM(-1, INT_MAX);
2789 
2790 	MPASS(levels == 3 || levels == 4);
2791 	pmap->pm_levels = levels;
2792 	pmap->pm_stage = stage;
2793 	pmap->pm_bti = NULL;
2794 	switch (stage) {
2795 	case PM_STAGE1:
2796 		pmap->pm_asid_set = &asids;
2797 		if (pmap_bti_support) {
2798 			pmap->pm_bti = malloc(sizeof(struct rangeset), M_DEVBUF,
2799 			    M_ZERO | M_WAITOK);
2800 			rangeset_init(pmap->pm_bti, bti_dup_range,
2801 			    bti_free_range, pmap, M_NOWAIT);
2802 		}
2803 		break;
2804 	case PM_STAGE2:
2805 		pmap->pm_asid_set = &vmids;
2806 		break;
2807 	default:
2808 		panic("%s: Invalid pmap type %d", __func__, stage);
2809 		break;
2810 	}
2811 
2812 	/* XXX Temporarily disable deferred ASID allocation. */
2813 	pmap_alloc_asid(pmap);
2814 
2815 	/*
2816 	 * Allocate the level 1 entry to use as the root. This will increase
2817 	 * the refcount on the level 1 page so it won't be removed until
2818 	 * pmap_release() is called.
2819 	 */
2820 	if (pmap->pm_levels == 3) {
2821 		PMAP_LOCK(pmap);
2822 		m = _pmap_alloc_l3(pmap, NUL2E + NUL1E, NULL);
2823 		PMAP_UNLOCK(pmap);
2824 	}
2825 	pmap->pm_ttbr = VM_PAGE_TO_PHYS(m);
2826 
2827 	return (1);
2828 }
2829 
2830 int
pmap_pinit(pmap_t pmap)2831 pmap_pinit(pmap_t pmap)
2832 {
2833 
2834 	return (pmap_pinit_stage(pmap, PM_STAGE1, 4));
2835 }
2836 
2837 /*
2838  * This routine is called if the desired page table page does not exist.
2839  *
2840  * If page table page allocation fails, this routine may sleep before
2841  * returning NULL.  It sleeps only if a lock pointer was given.
2842  *
2843  * Note: If a page allocation fails at page table level two or three,
2844  * one or two pages may be held during the wait, only to be released
2845  * afterwards.  This conservative approach is easily argued to avoid
2846  * race conditions.
2847  */
2848 static vm_page_t
_pmap_alloc_l3(pmap_t pmap,vm_pindex_t ptepindex,struct rwlock ** lockp)2849 _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
2850 {
2851 	vm_page_t m, l1pg, l2pg;
2852 
2853 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2854 
2855 	/*
2856 	 * Allocate a page table page.
2857 	 */
2858 	if ((m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
2859 		if (lockp != NULL) {
2860 			RELEASE_PV_LIST_LOCK(lockp);
2861 			PMAP_UNLOCK(pmap);
2862 			vm_wait(NULL);
2863 			PMAP_LOCK(pmap);
2864 		}
2865 
2866 		/*
2867 		 * Indicate the need to retry.  While waiting, the page table
2868 		 * page may have been allocated.
2869 		 */
2870 		return (NULL);
2871 	}
2872 	m->pindex = ptepindex;
2873 
2874 	/*
2875 	 * Because of AArch64's weak memory consistency model, we must have a
2876 	 * barrier here to ensure that the stores for zeroing "m", whether by
2877 	 * pmap_zero_page() or an earlier function, are visible before adding
2878 	 * "m" to the page table.  Otherwise, a page table walk by another
2879 	 * processor's MMU could see the mapping to "m" and a stale, non-zero
2880 	 * PTE within "m".
2881 	 */
2882 	dmb(ishst);
2883 
2884 	/*
2885 	 * Map the pagetable page into the process address space, if
2886 	 * it isn't already there.
2887 	 */
2888 
2889 	if (ptepindex >= (NUL2E + NUL1E)) {
2890 		pd_entry_t *l0p, l0e;
2891 		vm_pindex_t l0index;
2892 
2893 		l0index = ptepindex - (NUL2E + NUL1E);
2894 		l0p = &pmap->pm_l0[l0index];
2895 		KASSERT((pmap_load(l0p) & ATTR_DESCR_VALID) == 0,
2896 		    ("%s: L0 entry %#lx is valid", __func__, pmap_load(l0p)));
2897 		l0e = VM_PAGE_TO_PTE(m) | L0_TABLE;
2898 
2899 		/*
2900 		 * Mark all kernel memory as not accessible from userspace
2901 		 * and userspace memory as not executable from the kernel.
2902 		 * This has been done for the bootstrap L0 entries in
2903 		 * locore.S.
2904 		 */
2905 		if (pmap == kernel_pmap)
2906 			l0e |= TATTR_UXN_TABLE | TATTR_AP_TABLE_NO_EL0;
2907 		else
2908 			l0e |= TATTR_PXN_TABLE;
2909 		pmap_store(l0p, l0e);
2910 	} else if (ptepindex >= NUL2E) {
2911 		vm_pindex_t l0index, l1index;
2912 		pd_entry_t *l0, *l1;
2913 		pd_entry_t tl0;
2914 
2915 		l1index = ptepindex - NUL2E;
2916 		l0index = l1index >> Ln_ENTRIES_SHIFT;
2917 
2918 		l0 = &pmap->pm_l0[l0index];
2919 		tl0 = pmap_load(l0);
2920 		if (tl0 == 0) {
2921 			/* recurse for allocating page dir */
2922 			if (_pmap_alloc_l3(pmap, NUL2E + NUL1E + l0index,
2923 			    lockp) == NULL) {
2924 				vm_page_unwire_noq(m);
2925 				vm_page_free_zero(m);
2926 				return (NULL);
2927 			}
2928 		} else {
2929 			l1pg = PTE_TO_VM_PAGE(tl0);
2930 			l1pg->ref_count++;
2931 		}
2932 
2933 		l1 = (pd_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(pmap_load(l0)));
2934 		l1 = &l1[ptepindex & Ln_ADDR_MASK];
2935 		KASSERT((pmap_load(l1) & ATTR_DESCR_VALID) == 0,
2936 		    ("%s: L1 entry %#lx is valid", __func__, pmap_load(l1)));
2937 		pmap_store(l1, VM_PAGE_TO_PTE(m) | L1_TABLE);
2938 	} else {
2939 		vm_pindex_t l0index, l1index;
2940 		pd_entry_t *l0, *l1, *l2;
2941 		pd_entry_t tl0, tl1;
2942 
2943 		l1index = ptepindex >> Ln_ENTRIES_SHIFT;
2944 		l0index = l1index >> Ln_ENTRIES_SHIFT;
2945 
2946 		l0 = &pmap->pm_l0[l0index];
2947 		tl0 = pmap_load(l0);
2948 		if (tl0 == 0) {
2949 			/* recurse for allocating page dir */
2950 			if (_pmap_alloc_l3(pmap, NUL2E + l1index,
2951 			    lockp) == NULL) {
2952 				vm_page_unwire_noq(m);
2953 				vm_page_free_zero(m);
2954 				return (NULL);
2955 			}
2956 			tl0 = pmap_load(l0);
2957 			l1 = (pd_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(tl0));
2958 			l1 = &l1[l1index & Ln_ADDR_MASK];
2959 		} else {
2960 			l1 = (pd_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(tl0));
2961 			l1 = &l1[l1index & Ln_ADDR_MASK];
2962 			tl1 = pmap_load(l1);
2963 			if (tl1 == 0) {
2964 				/* recurse for allocating page dir */
2965 				if (_pmap_alloc_l3(pmap, NUL2E + l1index,
2966 				    lockp) == NULL) {
2967 					vm_page_unwire_noq(m);
2968 					vm_page_free_zero(m);
2969 					return (NULL);
2970 				}
2971 			} else {
2972 				l2pg = PTE_TO_VM_PAGE(tl1);
2973 				l2pg->ref_count++;
2974 			}
2975 		}
2976 
2977 		l2 = (pd_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(pmap_load(l1)));
2978 		l2 = &l2[ptepindex & Ln_ADDR_MASK];
2979 		KASSERT((pmap_load(l2) & ATTR_DESCR_VALID) == 0,
2980 		    ("%s: L2 entry %#lx is valid", __func__, pmap_load(l2)));
2981 		pmap_store(l2, VM_PAGE_TO_PTE(m) | L2_TABLE);
2982 	}
2983 
2984 	pmap_resident_count_inc(pmap, 1);
2985 
2986 	return (m);
2987 }
2988 
2989 static pd_entry_t *
pmap_alloc_l2(pmap_t pmap,vm_offset_t va,vm_page_t * l2pgp,struct rwlock ** lockp)2990 pmap_alloc_l2(pmap_t pmap, vm_offset_t va, vm_page_t *l2pgp,
2991     struct rwlock **lockp)
2992 {
2993 	pd_entry_t *l1, *l2;
2994 	vm_page_t l2pg;
2995 	vm_pindex_t l2pindex;
2996 
2997 	KASSERT(ADDR_IS_CANONICAL(va),
2998 	    ("%s: Address not in canonical form: %lx", __func__, va));
2999 
3000 retry:
3001 	l1 = pmap_l1(pmap, va);
3002 	if (l1 != NULL && (pmap_load(l1) & ATTR_DESCR_MASK) == L1_TABLE) {
3003 		l2 = pmap_l1_to_l2(l1, va);
3004 		if (ADDR_IS_USER(va)) {
3005 			/* Add a reference to the L2 page. */
3006 			l2pg = PTE_TO_VM_PAGE(pmap_load(l1));
3007 			l2pg->ref_count++;
3008 		} else
3009 			l2pg = NULL;
3010 	} else if (ADDR_IS_USER(va)) {
3011 		/* Allocate a L2 page. */
3012 		l2pindex = pmap_l2_pindex(va) >> Ln_ENTRIES_SHIFT;
3013 		l2pg = _pmap_alloc_l3(pmap, NUL2E + l2pindex, lockp);
3014 		if (l2pg == NULL) {
3015 			if (lockp != NULL)
3016 				goto retry;
3017 			else
3018 				return (NULL);
3019 		}
3020 		l2 = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(l2pg));
3021 		l2 = &l2[pmap_l2_index(va)];
3022 	} else
3023 		panic("pmap_alloc_l2: missing page table page for va %#lx",
3024 		    va);
3025 	*l2pgp = l2pg;
3026 	return (l2);
3027 }
3028 
3029 static vm_page_t
pmap_alloc_l3(pmap_t pmap,vm_offset_t va,struct rwlock ** lockp)3030 pmap_alloc_l3(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
3031 {
3032 	vm_pindex_t ptepindex;
3033 	pd_entry_t *pde, tpde;
3034 #ifdef INVARIANTS
3035 	pt_entry_t *pte;
3036 #endif
3037 	vm_page_t m;
3038 	int lvl;
3039 
3040 	/*
3041 	 * Calculate pagetable page index
3042 	 */
3043 	ptepindex = pmap_l2_pindex(va);
3044 retry:
3045 	/*
3046 	 * Get the page directory entry
3047 	 */
3048 	pde = pmap_pde(pmap, va, &lvl);
3049 
3050 	/*
3051 	 * If the page table page is mapped, we just increment the hold count,
3052 	 * and activate it. If we get a level 2 pde it will point to a level 3
3053 	 * table.
3054 	 */
3055 	switch (lvl) {
3056 	case -1:
3057 		break;
3058 	case 0:
3059 #ifdef INVARIANTS
3060 		pte = pmap_l0_to_l1(pde, va);
3061 		KASSERT(pmap_load(pte) == 0,
3062 		    ("pmap_alloc_l3: TODO: l0 superpages"));
3063 #endif
3064 		break;
3065 	case 1:
3066 #ifdef INVARIANTS
3067 		pte = pmap_l1_to_l2(pde, va);
3068 		KASSERT(pmap_load(pte) == 0,
3069 		    ("pmap_alloc_l3: TODO: l1 superpages"));
3070 #endif
3071 		break;
3072 	case 2:
3073 		tpde = pmap_load(pde);
3074 		if (tpde != 0) {
3075 			m = PTE_TO_VM_PAGE(tpde);
3076 			m->ref_count++;
3077 			return (m);
3078 		}
3079 		break;
3080 	default:
3081 		panic("pmap_alloc_l3: Invalid level %d", lvl);
3082 	}
3083 
3084 	/*
3085 	 * Here if the pte page isn't mapped, or if it has been deallocated.
3086 	 */
3087 	m = _pmap_alloc_l3(pmap, ptepindex, lockp);
3088 	if (m == NULL && lockp != NULL)
3089 		goto retry;
3090 
3091 	return (m);
3092 }
3093 
3094 /***************************************************
3095  * Pmap allocation/deallocation routines.
3096  ***************************************************/
3097 
3098 /*
3099  * Release any resources held by the given physical map.
3100  * Called when a pmap initialized by pmap_pinit is being released.
3101  * Should only be called if the map contains no valid mappings.
3102  */
3103 void
pmap_release(pmap_t pmap)3104 pmap_release(pmap_t pmap)
3105 {
3106 	bool rv __diagused;
3107 	struct spglist freelist;
3108 	struct asid_set *set;
3109 	vm_page_t m;
3110 	int asid;
3111 
3112 	if (pmap->pm_levels != 4) {
3113 		PMAP_ASSERT_STAGE2(pmap);
3114 		KASSERT(pmap->pm_stats.resident_count == 1,
3115 		    ("pmap_release: pmap resident count %ld != 0",
3116 		    pmap->pm_stats.resident_count));
3117 		KASSERT((pmap->pm_l0[0] & ATTR_DESCR_VALID) == ATTR_DESCR_VALID,
3118 		    ("pmap_release: Invalid l0 entry: %lx", pmap->pm_l0[0]));
3119 
3120 		SLIST_INIT(&freelist);
3121 		m = PHYS_TO_VM_PAGE(pmap->pm_ttbr);
3122 		PMAP_LOCK(pmap);
3123 		rv = pmap_unwire_l3(pmap, 0, m, &freelist);
3124 		PMAP_UNLOCK(pmap);
3125 		MPASS(rv == true);
3126 		vm_page_free_pages_toq(&freelist, true);
3127 	}
3128 
3129 	KASSERT(pmap->pm_stats.resident_count == 0,
3130 	    ("pmap_release: pmap resident count %ld != 0",
3131 	    pmap->pm_stats.resident_count));
3132 	KASSERT(vm_radix_is_empty(&pmap->pm_root),
3133 	    ("pmap_release: pmap has reserved page table page(s)"));
3134 
3135 	set = pmap->pm_asid_set;
3136 	KASSERT(set != NULL, ("%s: NULL asid set", __func__));
3137 
3138 	/*
3139 	 * Allow the ASID to be reused. In stage 2 VMIDs we don't invalidate
3140 	 * the entries when removing them so rely on a later tlb invalidation.
3141 	 * this will happen when updating the VMID generation. Because of this
3142 	 * we don't reuse VMIDs within a generation.
3143 	 */
3144 	if (pmap->pm_stage == PM_STAGE1) {
3145 		mtx_lock_spin(&set->asid_set_mutex);
3146 		if (COOKIE_TO_EPOCH(pmap->pm_cookie) == set->asid_epoch) {
3147 			asid = COOKIE_TO_ASID(pmap->pm_cookie);
3148 			KASSERT(asid >= ASID_FIRST_AVAILABLE &&
3149 			    asid < set->asid_set_size,
3150 			    ("pmap_release: pmap cookie has out-of-range asid"));
3151 			bit_clear(set->asid_set, asid);
3152 		}
3153 		mtx_unlock_spin(&set->asid_set_mutex);
3154 
3155 		if (pmap->pm_bti != NULL) {
3156 			rangeset_fini(pmap->pm_bti);
3157 			free(pmap->pm_bti, M_DEVBUF);
3158 		}
3159 	}
3160 
3161 	m = PHYS_TO_VM_PAGE(pmap->pm_l0_paddr);
3162 	vm_page_unwire_noq(m);
3163 	vm_page_free_zero(m);
3164 }
3165 
3166 static int
kvm_size(SYSCTL_HANDLER_ARGS)3167 kvm_size(SYSCTL_HANDLER_ARGS)
3168 {
3169 	unsigned long ksize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
3170 
3171 	return sysctl_handle_long(oidp, &ksize, 0, req);
3172 }
3173 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
3174     0, 0, kvm_size, "LU",
3175     "Size of KVM");
3176 
3177 static int
kvm_free(SYSCTL_HANDLER_ARGS)3178 kvm_free(SYSCTL_HANDLER_ARGS)
3179 {
3180 	unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
3181 
3182 	return sysctl_handle_long(oidp, &kfree, 0, req);
3183 }
3184 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
3185     0, 0, kvm_free, "LU",
3186     "Amount of KVM free");
3187 
3188 /*
3189  * grow the number of kernel page table entries, if needed
3190  */
3191 static int
pmap_growkernel_nopanic(vm_offset_t addr)3192 pmap_growkernel_nopanic(vm_offset_t addr)
3193 {
3194 	vm_page_t nkpg;
3195 	pd_entry_t *l0, *l1, *l2;
3196 
3197 	mtx_assert(&kernel_map->system_mtx, MA_OWNED);
3198 
3199 	addr = roundup2(addr, L2_SIZE);
3200 	if (addr - 1 >= vm_map_max(kernel_map))
3201 		addr = vm_map_max(kernel_map);
3202 	if (kernel_vm_end < addr) {
3203 		kasan_shadow_map(kernel_vm_end, addr - kernel_vm_end);
3204 		kmsan_shadow_map(kernel_vm_end, addr - kernel_vm_end);
3205 	}
3206 	while (kernel_vm_end < addr) {
3207 		l0 = pmap_l0(kernel_pmap, kernel_vm_end);
3208 		KASSERT(pmap_load(l0) != 0,
3209 		    ("pmap_growkernel: No level 0 kernel entry"));
3210 
3211 		l1 = pmap_l0_to_l1(l0, kernel_vm_end);
3212 		if (pmap_load(l1) == 0) {
3213 			/* We need a new PDP entry */
3214 			nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT |
3215 			    VM_ALLOC_NOFREE | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
3216 			if (nkpg == NULL)
3217 				return (KERN_RESOURCE_SHORTAGE);
3218 			nkpg->pindex = pmap_l1_pindex(kernel_vm_end);
3219 			/* See the dmb() in _pmap_alloc_l3(). */
3220 			dmb(ishst);
3221 			pmap_store(l1, VM_PAGE_TO_PTE(nkpg) | L1_TABLE);
3222 			continue; /* try again */
3223 		}
3224 		l2 = pmap_l1_to_l2(l1, kernel_vm_end);
3225 		if (pmap_load(l2) != 0) {
3226 			kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
3227 			if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
3228 				kernel_vm_end = vm_map_max(kernel_map);
3229 				break;
3230 			}
3231 			continue;
3232 		}
3233 
3234 		nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT |
3235 		    VM_ALLOC_NOFREE | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
3236 		if (nkpg == NULL)
3237 			return (KERN_RESOURCE_SHORTAGE);
3238 		nkpg->pindex = pmap_l2_pindex(kernel_vm_end);
3239 		/* See the dmb() in _pmap_alloc_l3(). */
3240 		dmb(ishst);
3241 		pmap_store(l2, VM_PAGE_TO_PTE(nkpg) | L2_TABLE);
3242 
3243 		kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
3244 		if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
3245 			kernel_vm_end = vm_map_max(kernel_map);
3246 			break;
3247 		}
3248 	}
3249 	return (KERN_SUCCESS);
3250 }
3251 
3252 int
pmap_growkernel(vm_offset_t addr)3253 pmap_growkernel(vm_offset_t addr)
3254 {
3255 	int rv;
3256 
3257 	rv = pmap_growkernel_nopanic(addr);
3258 	if (rv != KERN_SUCCESS && pmap_growkernel_panic)
3259 		panic("pmap_growkernel: no memory to grow kernel");
3260 	return (rv);
3261 }
3262 
3263 /***************************************************
3264  * page management routines.
3265  ***************************************************/
3266 
3267 static const uint64_t pc_freemask[_NPCM] = {
3268 	[0 ... _NPCM - 2] = PC_FREEN,
3269 	[_NPCM - 1] = PC_FREEL
3270 };
3271 
3272 #ifdef PV_STATS
3273 static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
3274 
3275 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
3276 	"Current number of pv entry chunks");
3277 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
3278 	"Current number of pv entry chunks allocated");
3279 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
3280 	"Current number of pv entry chunks frees");
3281 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
3282 	"Number of times tried to get a chunk page but failed.");
3283 
3284 static long pv_entry_frees, pv_entry_allocs, pv_entry_count;
3285 static int pv_entry_spare;
3286 
3287 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
3288 	"Current number of pv entry frees");
3289 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
3290 	"Current number of pv entry allocs");
3291 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
3292 	"Current number of pv entries");
3293 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
3294 	"Current number of spare pv entries");
3295 #endif
3296 
3297 /*
3298  * We are in a serious low memory condition.  Resort to
3299  * drastic measures to free some pages so we can allocate
3300  * another pv entry chunk.
3301  *
3302  * Returns NULL if PV entries were reclaimed from the specified pmap.
3303  *
3304  * We do not, however, unmap 2mpages because subsequent accesses will
3305  * allocate per-page pv entries until repromotion occurs, thereby
3306  * exacerbating the shortage of free pv entries.
3307  */
3308 static vm_page_t
reclaim_pv_chunk_domain(pmap_t locked_pmap,struct rwlock ** lockp,int domain)3309 reclaim_pv_chunk_domain(pmap_t locked_pmap, struct rwlock **lockp, int domain)
3310 {
3311 	struct pv_chunks_list *pvc;
3312 	struct pv_chunk *pc, *pc_marker, *pc_marker_end;
3313 	struct pv_chunk_header pc_marker_b, pc_marker_end_b;
3314 	struct md_page *pvh;
3315 	pd_entry_t *pde;
3316 	pmap_t next_pmap, pmap;
3317 	pt_entry_t *pte, tpte;
3318 	pv_entry_t pv;
3319 	vm_offset_t va;
3320 	vm_page_t m, m_pc;
3321 	struct spglist free;
3322 	uint64_t inuse;
3323 	int bit, field, freed, lvl;
3324 
3325 	PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
3326 	KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL"));
3327 
3328 	pmap = NULL;
3329 	m_pc = NULL;
3330 	SLIST_INIT(&free);
3331 	bzero(&pc_marker_b, sizeof(pc_marker_b));
3332 	bzero(&pc_marker_end_b, sizeof(pc_marker_end_b));
3333 	pc_marker = (struct pv_chunk *)&pc_marker_b;
3334 	pc_marker_end = (struct pv_chunk *)&pc_marker_end_b;
3335 
3336 	pvc = &pv_chunks[domain];
3337 	mtx_lock(&pvc->pvc_lock);
3338 	pvc->active_reclaims++;
3339 	TAILQ_INSERT_HEAD(&pvc->pvc_list, pc_marker, pc_lru);
3340 	TAILQ_INSERT_TAIL(&pvc->pvc_list, pc_marker_end, pc_lru);
3341 	while ((pc = TAILQ_NEXT(pc_marker, pc_lru)) != pc_marker_end &&
3342 	    SLIST_EMPTY(&free)) {
3343 		next_pmap = pc->pc_pmap;
3344 		if (next_pmap == NULL) {
3345 			/*
3346 			 * The next chunk is a marker.  However, it is
3347 			 * not our marker, so active_reclaims must be
3348 			 * > 1.  Consequently, the next_chunk code
3349 			 * will not rotate the pv_chunks list.
3350 			 */
3351 			goto next_chunk;
3352 		}
3353 		mtx_unlock(&pvc->pvc_lock);
3354 
3355 		/*
3356 		 * A pv_chunk can only be removed from the pc_lru list
3357 		 * when both pvc->pvc_lock is owned and the
3358 		 * corresponding pmap is locked.
3359 		 */
3360 		if (pmap != next_pmap) {
3361 			if (pmap != NULL && pmap != locked_pmap)
3362 				PMAP_UNLOCK(pmap);
3363 			pmap = next_pmap;
3364 			/* Avoid deadlock and lock recursion. */
3365 			if (pmap > locked_pmap) {
3366 				RELEASE_PV_LIST_LOCK(lockp);
3367 				PMAP_LOCK(pmap);
3368 				mtx_lock(&pvc->pvc_lock);
3369 				continue;
3370 			} else if (pmap != locked_pmap) {
3371 				if (PMAP_TRYLOCK(pmap)) {
3372 					mtx_lock(&pvc->pvc_lock);
3373 					continue;
3374 				} else {
3375 					pmap = NULL; /* pmap is not locked */
3376 					mtx_lock(&pvc->pvc_lock);
3377 					pc = TAILQ_NEXT(pc_marker, pc_lru);
3378 					if (pc == NULL ||
3379 					    pc->pc_pmap != next_pmap)
3380 						continue;
3381 					goto next_chunk;
3382 				}
3383 			}
3384 		}
3385 
3386 		/*
3387 		 * Destroy every non-wired, 4 KB page mapping in the chunk.
3388 		 */
3389 		freed = 0;
3390 		for (field = 0; field < _NPCM; field++) {
3391 			for (inuse = ~pc->pc_map[field] & pc_freemask[field];
3392 			    inuse != 0; inuse &= ~(1UL << bit)) {
3393 				bit = ffsl(inuse) - 1;
3394 				pv = &pc->pc_pventry[field * 64 + bit];
3395 				va = pv->pv_va;
3396 				pde = pmap_pde(pmap, va, &lvl);
3397 				if (lvl != 2)
3398 					continue;
3399 				pte = pmap_l2_to_l3(pde, va);
3400 				tpte = pmap_load(pte);
3401 				if ((tpte & ATTR_SW_WIRED) != 0)
3402 					continue;
3403 				if ((tpte & ATTR_CONTIGUOUS) != 0)
3404 					(void)pmap_demote_l3c(pmap, pte, va);
3405 				tpte = pmap_load_clear(pte);
3406 				m = PTE_TO_VM_PAGE(tpte);
3407 				if (pmap_pte_dirty(pmap, tpte))
3408 					vm_page_dirty(m);
3409 				if ((tpte & ATTR_AF) != 0) {
3410 					pmap_s1_invalidate_page(pmap, va, true);
3411 					vm_page_aflag_set(m, PGA_REFERENCED);
3412 				}
3413 				CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
3414 				TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
3415 				m->md.pv_gen++;
3416 				if (TAILQ_EMPTY(&m->md.pv_list) &&
3417 				    (m->flags & PG_FICTITIOUS) == 0) {
3418 					pvh = page_to_pvh(m);
3419 					if (TAILQ_EMPTY(&pvh->pv_list)) {
3420 						vm_page_aflag_clear(m,
3421 						    PGA_WRITEABLE);
3422 					}
3423 				}
3424 				pc->pc_map[field] |= 1UL << bit;
3425 				pmap_unuse_pt(pmap, va, pmap_load(pde), &free);
3426 				freed++;
3427 			}
3428 		}
3429 		if (freed == 0) {
3430 			mtx_lock(&pvc->pvc_lock);
3431 			goto next_chunk;
3432 		}
3433 		/* Every freed mapping is for a 4 KB page. */
3434 		pmap_resident_count_dec(pmap, freed);
3435 		PV_STAT(atomic_add_long(&pv_entry_frees, freed));
3436 		PV_STAT(atomic_add_int(&pv_entry_spare, freed));
3437 		PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
3438 		TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
3439 		if (pc_is_free(pc)) {
3440 			PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
3441 			PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
3442 			PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
3443 			/* Entire chunk is free; return it. */
3444 			m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
3445 			dump_drop_page(m_pc->phys_addr);
3446 			mtx_lock(&pvc->pvc_lock);
3447 			TAILQ_REMOVE(&pvc->pvc_list, pc, pc_lru);
3448 			break;
3449 		}
3450 		TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
3451 		mtx_lock(&pvc->pvc_lock);
3452 		/* One freed pv entry in locked_pmap is sufficient. */
3453 		if (pmap == locked_pmap)
3454 			break;
3455 
3456 next_chunk:
3457 		TAILQ_REMOVE(&pvc->pvc_list, pc_marker, pc_lru);
3458 		TAILQ_INSERT_AFTER(&pvc->pvc_list, pc, pc_marker, pc_lru);
3459 		if (pvc->active_reclaims == 1 && pmap != NULL) {
3460 			/*
3461 			 * Rotate the pv chunks list so that we do not
3462 			 * scan the same pv chunks that could not be
3463 			 * freed (because they contained a wired
3464 			 * and/or superpage mapping) on every
3465 			 * invocation of reclaim_pv_chunk().
3466 			 */
3467 			while ((pc = TAILQ_FIRST(&pvc->pvc_list)) != pc_marker){
3468 				MPASS(pc->pc_pmap != NULL);
3469 				TAILQ_REMOVE(&pvc->pvc_list, pc, pc_lru);
3470 				TAILQ_INSERT_TAIL(&pvc->pvc_list, pc, pc_lru);
3471 			}
3472 		}
3473 	}
3474 	TAILQ_REMOVE(&pvc->pvc_list, pc_marker, pc_lru);
3475 	TAILQ_REMOVE(&pvc->pvc_list, pc_marker_end, pc_lru);
3476 	pvc->active_reclaims--;
3477 	mtx_unlock(&pvc->pvc_lock);
3478 	if (pmap != NULL && pmap != locked_pmap)
3479 		PMAP_UNLOCK(pmap);
3480 	if (m_pc == NULL && !SLIST_EMPTY(&free)) {
3481 		m_pc = SLIST_FIRST(&free);
3482 		SLIST_REMOVE_HEAD(&free, plinks.s.ss);
3483 		/* Recycle a freed page table page. */
3484 		m_pc->ref_count = 1;
3485 	}
3486 	vm_page_free_pages_toq(&free, true);
3487 	return (m_pc);
3488 }
3489 
3490 static vm_page_t
reclaim_pv_chunk(pmap_t locked_pmap,struct rwlock ** lockp)3491 reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
3492 {
3493 	vm_page_t m;
3494 	int i, domain;
3495 
3496 	domain = PCPU_GET(domain);
3497 	for (i = 0; i < vm_ndomains; i++) {
3498 		m = reclaim_pv_chunk_domain(locked_pmap, lockp, domain);
3499 		if (m != NULL)
3500 			break;
3501 		domain = (domain + 1) % vm_ndomains;
3502 	}
3503 
3504 	return (m);
3505 }
3506 
3507 /*
3508  * free the pv_entry back to the free list
3509  */
3510 static void
free_pv_entry(pmap_t pmap,pv_entry_t pv)3511 free_pv_entry(pmap_t pmap, pv_entry_t pv)
3512 {
3513 	struct pv_chunk *pc;
3514 	int idx, field, bit;
3515 
3516 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3517 	PV_STAT(atomic_add_long(&pv_entry_frees, 1));
3518 	PV_STAT(atomic_add_int(&pv_entry_spare, 1));
3519 	PV_STAT(atomic_subtract_long(&pv_entry_count, 1));
3520 	pc = pv_to_chunk(pv);
3521 	idx = pv - &pc->pc_pventry[0];
3522 	field = idx / 64;
3523 	bit = idx % 64;
3524 	pc->pc_map[field] |= 1ul << bit;
3525 	if (!pc_is_free(pc)) {
3526 		/* 98% of the time, pc is already at the head of the list. */
3527 		if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) {
3528 			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
3529 			TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
3530 		}
3531 		return;
3532 	}
3533 	TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
3534 	free_pv_chunk(pc);
3535 }
3536 
3537 static void
free_pv_chunk_dequeued(struct pv_chunk * pc)3538 free_pv_chunk_dequeued(struct pv_chunk *pc)
3539 {
3540 	vm_page_t m;
3541 
3542 	PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
3543 	PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
3544 	PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
3545 	/* entire chunk is free, return it */
3546 	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
3547 	dump_drop_page(m->phys_addr);
3548 	vm_page_unwire_noq(m);
3549 	vm_page_free(m);
3550 }
3551 
3552 static void
free_pv_chunk(struct pv_chunk * pc)3553 free_pv_chunk(struct pv_chunk *pc)
3554 {
3555 	struct pv_chunks_list *pvc;
3556 
3557 	pvc = &pv_chunks[pc_to_domain(pc)];
3558 	mtx_lock(&pvc->pvc_lock);
3559 	TAILQ_REMOVE(&pvc->pvc_list, pc, pc_lru);
3560 	mtx_unlock(&pvc->pvc_lock);
3561 	free_pv_chunk_dequeued(pc);
3562 }
3563 
3564 static void
free_pv_chunk_batch(struct pv_chunklist * batch)3565 free_pv_chunk_batch(struct pv_chunklist *batch)
3566 {
3567 	struct pv_chunks_list *pvc;
3568 	struct pv_chunk *pc, *npc;
3569 	int i;
3570 
3571 	for (i = 0; i < vm_ndomains; i++) {
3572 		if (TAILQ_EMPTY(&batch[i]))
3573 			continue;
3574 		pvc = &pv_chunks[i];
3575 		mtx_lock(&pvc->pvc_lock);
3576 		TAILQ_FOREACH(pc, &batch[i], pc_list) {
3577 			TAILQ_REMOVE(&pvc->pvc_list, pc, pc_lru);
3578 		}
3579 		mtx_unlock(&pvc->pvc_lock);
3580 	}
3581 
3582 	for (i = 0; i < vm_ndomains; i++) {
3583 		TAILQ_FOREACH_SAFE(pc, &batch[i], pc_list, npc) {
3584 			free_pv_chunk_dequeued(pc);
3585 		}
3586 	}
3587 }
3588 
3589 /*
3590  * Returns a new PV entry, allocating a new PV chunk from the system when
3591  * needed.  If this PV chunk allocation fails and a PV list lock pointer was
3592  * given, a PV chunk is reclaimed from an arbitrary pmap.  Otherwise, NULL is
3593  * returned.
3594  *
3595  * The given PV list lock may be released.
3596  */
3597 static pv_entry_t
get_pv_entry(pmap_t pmap,struct rwlock ** lockp)3598 get_pv_entry(pmap_t pmap, struct rwlock **lockp)
3599 {
3600 	struct pv_chunks_list *pvc;
3601 	int bit, field;
3602 	pv_entry_t pv;
3603 	struct pv_chunk *pc;
3604 	vm_page_t m;
3605 
3606 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3607 	PV_STAT(atomic_add_long(&pv_entry_allocs, 1));
3608 retry:
3609 	pc = TAILQ_FIRST(&pmap->pm_pvchunk);
3610 	if (pc != NULL) {
3611 		for (field = 0; field < _NPCM; field++) {
3612 			if (pc->pc_map[field]) {
3613 				bit = ffsl(pc->pc_map[field]) - 1;
3614 				break;
3615 			}
3616 		}
3617 		if (field < _NPCM) {
3618 			pv = &pc->pc_pventry[field * 64 + bit];
3619 			pc->pc_map[field] &= ~(1ul << bit);
3620 			/* If this was the last item, move it to tail */
3621 			if (pc_is_full(pc)) {
3622 				TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
3623 				TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc,
3624 				    pc_list);
3625 			}
3626 			PV_STAT(atomic_add_long(&pv_entry_count, 1));
3627 			PV_STAT(atomic_subtract_int(&pv_entry_spare, 1));
3628 			return (pv);
3629 		}
3630 	}
3631 	/* No free items, allocate another chunk */
3632 	m = vm_page_alloc_noobj(VM_ALLOC_WIRED);
3633 	if (m == NULL) {
3634 		if (lockp == NULL) {
3635 			PV_STAT(pc_chunk_tryfail++);
3636 			return (NULL);
3637 		}
3638 		m = reclaim_pv_chunk(pmap, lockp);
3639 		if (m == NULL)
3640 			goto retry;
3641 	}
3642 	PV_STAT(atomic_add_int(&pc_chunk_count, 1));
3643 	PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
3644 	dump_add_page(m->phys_addr);
3645 	pc = (void *)PHYS_TO_DMAP(m->phys_addr);
3646 	pc->pc_pmap = pmap;
3647 	memcpy(pc->pc_map, pc_freemask, sizeof(pc_freemask));
3648 	pc->pc_map[0] &= ~1ul;		/* preallocated bit 0 */
3649 	pvc = &pv_chunks[vm_page_domain(m)];
3650 	mtx_lock(&pvc->pvc_lock);
3651 	TAILQ_INSERT_TAIL(&pvc->pvc_list, pc, pc_lru);
3652 	mtx_unlock(&pvc->pvc_lock);
3653 	pv = &pc->pc_pventry[0];
3654 	TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
3655 	PV_STAT(atomic_add_long(&pv_entry_count, 1));
3656 	PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV - 1));
3657 	return (pv);
3658 }
3659 
3660 /*
3661  * Ensure that the number of spare PV entries in the specified pmap meets or
3662  * exceeds the given count, "needed".
3663  *
3664  * The given PV list lock may be released.
3665  */
3666 static void
reserve_pv_entries(pmap_t pmap,int needed,struct rwlock ** lockp)3667 reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp)
3668 {
3669 	struct pv_chunks_list *pvc;
3670 	struct pch new_tail[PMAP_MEMDOM];
3671 	struct pv_chunk *pc;
3672 	vm_page_t m;
3673 	int avail, free, i;
3674 	bool reclaimed;
3675 
3676 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3677 	KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL"));
3678 
3679 	/*
3680 	 * Newly allocated PV chunks must be stored in a private list until
3681 	 * the required number of PV chunks have been allocated.  Otherwise,
3682 	 * reclaim_pv_chunk() could recycle one of these chunks.  In
3683 	 * contrast, these chunks must be added to the pmap upon allocation.
3684 	 */
3685 	for (i = 0; i < PMAP_MEMDOM; i++)
3686 		TAILQ_INIT(&new_tail[i]);
3687 retry:
3688 	avail = 0;
3689 	TAILQ_FOREACH(pc, &pmap->pm_pvchunk, pc_list) {
3690 		bit_count((bitstr_t *)pc->pc_map, 0,
3691 		    sizeof(pc->pc_map) * NBBY, &free);
3692 		if (free == 0)
3693 			break;
3694 		avail += free;
3695 		if (avail >= needed)
3696 			break;
3697 	}
3698 	for (reclaimed = false; avail < needed; avail += _NPCPV) {
3699 		m = vm_page_alloc_noobj(VM_ALLOC_WIRED);
3700 		if (m == NULL) {
3701 			m = reclaim_pv_chunk(pmap, lockp);
3702 			if (m == NULL)
3703 				goto retry;
3704 			reclaimed = true;
3705 		}
3706 		PV_STAT(atomic_add_int(&pc_chunk_count, 1));
3707 		PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
3708 		dump_add_page(m->phys_addr);
3709 		pc = (void *)PHYS_TO_DMAP(m->phys_addr);
3710 		pc->pc_pmap = pmap;
3711 		memcpy(pc->pc_map, pc_freemask, sizeof(pc_freemask));
3712 		TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
3713 		TAILQ_INSERT_TAIL(&new_tail[vm_page_domain(m)], pc, pc_lru);
3714 		PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV));
3715 
3716 		/*
3717 		 * The reclaim might have freed a chunk from the current pmap.
3718 		 * If that chunk contained available entries, we need to
3719 		 * re-count the number of available entries.
3720 		 */
3721 		if (reclaimed)
3722 			goto retry;
3723 	}
3724 	for (i = 0; i < vm_ndomains; i++) {
3725 		if (TAILQ_EMPTY(&new_tail[i]))
3726 			continue;
3727 		pvc = &pv_chunks[i];
3728 		mtx_lock(&pvc->pvc_lock);
3729 		TAILQ_CONCAT(&pvc->pvc_list, &new_tail[i], pc_lru);
3730 		mtx_unlock(&pvc->pvc_lock);
3731 	}
3732 }
3733 
3734 /*
3735  * First find and then remove the pv entry for the specified pmap and virtual
3736  * address from the specified pv list.  Returns the pv entry if found and NULL
3737  * otherwise.  This operation can be performed on pv lists for either 4KB or
3738  * 2MB page mappings.
3739  */
3740 static __inline pv_entry_t
pmap_pvh_remove(struct md_page * pvh,pmap_t pmap,vm_offset_t va)3741 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
3742 {
3743 	pv_entry_t pv;
3744 
3745 	TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
3746 		if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
3747 			TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
3748 			pvh->pv_gen++;
3749 			break;
3750 		}
3751 	}
3752 	return (pv);
3753 }
3754 
3755 /*
3756  * After demotion from a 2MB page mapping to 512 4KB page mappings,
3757  * destroy the pv entry for the 2MB page mapping and reinstantiate the pv
3758  * entries for each of the 4KB page mappings.
3759  */
3760 static void
pmap_pv_demote_l2(pmap_t pmap,vm_offset_t va,vm_paddr_t pa,struct rwlock ** lockp)3761 pmap_pv_demote_l2(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
3762     struct rwlock **lockp)
3763 {
3764 	struct md_page *pvh;
3765 	struct pv_chunk *pc;
3766 	pv_entry_t pv;
3767 	vm_offset_t va_last;
3768 	vm_page_t m;
3769 	int bit, field;
3770 
3771 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3772 	KASSERT((va & L2_OFFSET) == 0,
3773 	    ("pmap_pv_demote_l2: va is not 2mpage aligned"));
3774 	KASSERT((pa & L2_OFFSET) == 0,
3775 	    ("pmap_pv_demote_l2: pa is not 2mpage aligned"));
3776 	CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
3777 
3778 	/*
3779 	 * Transfer the 2mpage's pv entry for this mapping to the first
3780 	 * page's pv list.  Once this transfer begins, the pv list lock
3781 	 * must not be released until the last pv entry is reinstantiated.
3782 	 */
3783 	pvh = pa_to_pvh(pa);
3784 	pv = pmap_pvh_remove(pvh, pmap, va);
3785 	KASSERT(pv != NULL, ("pmap_pv_demote_l2: pv not found"));
3786 	m = PHYS_TO_VM_PAGE(pa);
3787 	TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
3788 	m->md.pv_gen++;
3789 	/* Instantiate the remaining Ln_ENTRIES - 1 pv entries. */
3790 	PV_STAT(atomic_add_long(&pv_entry_allocs, Ln_ENTRIES - 1));
3791 	va_last = va + L2_SIZE - PAGE_SIZE;
3792 	for (;;) {
3793 		pc = TAILQ_FIRST(&pmap->pm_pvchunk);
3794 		KASSERT(!pc_is_full(pc), ("pmap_pv_demote_l2: missing spare"));
3795 		for (field = 0; field < _NPCM; field++) {
3796 			while (pc->pc_map[field]) {
3797 				bit = ffsl(pc->pc_map[field]) - 1;
3798 				pc->pc_map[field] &= ~(1ul << bit);
3799 				pv = &pc->pc_pventry[field * 64 + bit];
3800 				va += PAGE_SIZE;
3801 				pv->pv_va = va;
3802 				m++;
3803 				KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3804 			    ("pmap_pv_demote_l2: page %p is not managed", m));
3805 				TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
3806 				m->md.pv_gen++;
3807 				if (va == va_last)
3808 					goto out;
3809 			}
3810 		}
3811 		TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
3812 		TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
3813 	}
3814 out:
3815 	if (pc_is_full(pc)) {
3816 		TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
3817 		TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
3818 	}
3819 	PV_STAT(atomic_add_long(&pv_entry_count, Ln_ENTRIES - 1));
3820 	PV_STAT(atomic_subtract_int(&pv_entry_spare, Ln_ENTRIES - 1));
3821 }
3822 
3823 /*
3824  * First find and then destroy the pv entry for the specified pmap and virtual
3825  * address.  This operation can be performed on pv lists for either 4KB or 2MB
3826  * page mappings.
3827  */
3828 static void
pmap_pvh_free(struct md_page * pvh,pmap_t pmap,vm_offset_t va)3829 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
3830 {
3831 	pv_entry_t pv;
3832 
3833 	pv = pmap_pvh_remove(pvh, pmap, va);
3834 	KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
3835 	free_pv_entry(pmap, pv);
3836 }
3837 
3838 /*
3839  * Conditionally create the PV entry for a 4KB page mapping if the required
3840  * memory can be allocated without resorting to reclamation.
3841  */
3842 static bool
pmap_try_insert_pv_entry(pmap_t pmap,vm_offset_t va,vm_page_t m,struct rwlock ** lockp)3843 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
3844     struct rwlock **lockp)
3845 {
3846 	pv_entry_t pv;
3847 
3848 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3849 	/* Pass NULL instead of the lock pointer to disable reclamation. */
3850 	if ((pv = get_pv_entry(pmap, NULL)) != NULL) {
3851 		pv->pv_va = va;
3852 		CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
3853 		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
3854 		m->md.pv_gen++;
3855 		return (true);
3856 	} else
3857 		return (false);
3858 }
3859 
3860 /*
3861  * Create the PV entry for a 2MB page mapping.  Always returns true unless the
3862  * flag PMAP_ENTER_NORECLAIM is specified.  If that flag is specified, returns
3863  * false if the PV entry cannot be allocated without resorting to reclamation.
3864  */
3865 static bool
pmap_pv_insert_l2(pmap_t pmap,vm_offset_t va,pd_entry_t l2e,u_int flags,struct rwlock ** lockp)3866 pmap_pv_insert_l2(pmap_t pmap, vm_offset_t va, pd_entry_t l2e, u_int flags,
3867     struct rwlock **lockp)
3868 {
3869 	struct md_page *pvh;
3870 	pv_entry_t pv;
3871 	vm_paddr_t pa;
3872 
3873 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3874 	/* Pass NULL instead of the lock pointer to disable reclamation. */
3875 	if ((pv = get_pv_entry(pmap, (flags & PMAP_ENTER_NORECLAIM) != 0 ?
3876 	    NULL : lockp)) == NULL)
3877 		return (false);
3878 	pv->pv_va = va;
3879 	pa = PTE_TO_PHYS(l2e);
3880 	CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
3881 	pvh = pa_to_pvh(pa);
3882 	TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
3883 	pvh->pv_gen++;
3884 	return (true);
3885 }
3886 
3887 /*
3888  * Conditionally creates the PV entries for a L3C superpage mapping if
3889  * the required memory can be allocated without resorting to reclamation.
3890  */
3891 static bool
pmap_pv_insert_l3c(pmap_t pmap,vm_offset_t va,vm_page_t m,struct rwlock ** lockp)3892 pmap_pv_insert_l3c(pmap_t pmap, vm_offset_t va, vm_page_t m,
3893     struct rwlock **lockp)
3894 {
3895 	pv_entry_t pv;
3896 	vm_offset_t tva;
3897 	vm_paddr_t pa __diagused;
3898 	vm_page_t mt;
3899 
3900 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3901 	KASSERT((va & L3C_OFFSET) == 0,
3902 	    ("pmap_pv_insert_l3c: va is not aligned"));
3903 	pa = VM_PAGE_TO_PHYS(m);
3904 	KASSERT((pa & L3C_OFFSET) == 0,
3905 	    ("pmap_pv_insert_l3c: pa is not aligned"));
3906 	CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
3907 	for (mt = m, tva = va; mt < &m[L3C_ENTRIES]; mt++, tva += L3_SIZE) {
3908 		/* Pass NULL instead of lockp to disable reclamation. */
3909 		pv = get_pv_entry(pmap, NULL);
3910 		if (__predict_false(pv == NULL)) {
3911 			while (tva > va) {
3912 				mt--;
3913 				tva -= L3_SIZE;
3914 				pmap_pvh_free(&mt->md, pmap, tva);
3915 			}
3916 			return (false);
3917 		}
3918 		pv->pv_va = tva;
3919 		TAILQ_INSERT_TAIL(&mt->md.pv_list, pv, pv_next);
3920 		mt->md.pv_gen++;
3921 	}
3922 	return (true);
3923 }
3924 
3925 static void
pmap_remove_kernel_l2(pmap_t pmap,pt_entry_t * l2,vm_offset_t va)3926 pmap_remove_kernel_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va)
3927 {
3928 	pt_entry_t newl2, oldl2 __diagused;
3929 	vm_page_t ml3;
3930 	vm_paddr_t ml3pa;
3931 
3932 	KASSERT(!VIRT_IN_DMAP(va), ("removing direct mapping of %#lx", va));
3933 	KASSERT(pmap == kernel_pmap, ("pmap %p is not kernel_pmap", pmap));
3934 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3935 
3936 	ml3 = pmap_remove_pt_page(pmap, va);
3937 	KASSERT(ml3 != NULL, ("pmap_remove_kernel_l2: missing pt page"));
3938 
3939 	ml3pa = VM_PAGE_TO_PHYS(ml3);
3940 	newl2 = PHYS_TO_PTE(ml3pa) | L2_TABLE;
3941 
3942 	/*
3943 	 * If this page table page was unmapped by a promotion, then it
3944 	 * contains valid mappings.  Zero it to invalidate those mappings.
3945 	 */
3946 	if (vm_page_any_valid(ml3))
3947 		pagezero((void *)PHYS_TO_DMAP(ml3pa));
3948 
3949 	/*
3950 	 * Demote the mapping.  The caller must have already invalidated the
3951 	 * mapping (i.e., the "break" in break-before-make).
3952 	 */
3953 	oldl2 = pmap_load_store(l2, newl2);
3954 	KASSERT(oldl2 == 0, ("%s: found existing mapping at %p: %#lx",
3955 	    __func__, l2, oldl2));
3956 }
3957 
3958 /*
3959  * pmap_remove_l2: Do the things to unmap a level 2 superpage.
3960  */
3961 static int
pmap_remove_l2(pmap_t pmap,pt_entry_t * l2,vm_offset_t sva,pd_entry_t l1e,bool demote_kl2e,struct spglist * free,struct rwlock ** lockp)3962 pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva, pd_entry_t l1e,
3963     bool demote_kl2e, struct spglist *free, struct rwlock **lockp)
3964 {
3965 	struct md_page *pvh;
3966 	pt_entry_t old_l2;
3967 	vm_page_t m, ml3, mt;
3968 
3969 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3970 	KASSERT((sva & L2_OFFSET) == 0, ("pmap_remove_l2: sva is not aligned"));
3971 	old_l2 = pmap_load_clear(l2);
3972 	KASSERT((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK,
3973 	    ("pmap_remove_l2: L2e %lx is not a block mapping", old_l2));
3974 
3975 	/*
3976 	 * Since a promotion must break the 4KB page mappings before making
3977 	 * the 2MB page mapping, a pmap_s1_invalidate_page() suffices.
3978 	 */
3979 	pmap_s1_invalidate_page(pmap, sva, true);
3980 
3981 	if (old_l2 & ATTR_SW_WIRED)
3982 		pmap->pm_stats.wired_count -= L2_SIZE / PAGE_SIZE;
3983 	pmap_resident_count_dec(pmap, L2_SIZE / PAGE_SIZE);
3984 	if (old_l2 & ATTR_SW_MANAGED) {
3985 		m = PTE_TO_VM_PAGE(old_l2);
3986 		pvh = page_to_pvh(m);
3987 		CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
3988 		pmap_pvh_free(pvh, pmap, sva);
3989 		for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++) {
3990 			if (pmap_pte_dirty(pmap, old_l2))
3991 				vm_page_dirty(mt);
3992 			if (old_l2 & ATTR_AF)
3993 				vm_page_aflag_set(mt, PGA_REFERENCED);
3994 			if (TAILQ_EMPTY(&mt->md.pv_list) &&
3995 			    TAILQ_EMPTY(&pvh->pv_list))
3996 				vm_page_aflag_clear(mt, PGA_WRITEABLE);
3997 		}
3998 	}
3999 	if (pmap != kernel_pmap) {
4000 		ml3 = pmap_remove_pt_page(pmap, sva);
4001 		if (ml3 != NULL) {
4002 			KASSERT(vm_page_any_valid(ml3),
4003 			    ("pmap_remove_l2: l3 page not promoted"));
4004 			pmap_resident_count_dec(pmap, 1);
4005 			KASSERT(ml3->ref_count == NL3PG,
4006 			    ("pmap_remove_l2: l3 page ref count error"));
4007 			ml3->ref_count = 0;
4008 			pmap_add_delayed_free_list(ml3, free, false);
4009 		}
4010 	} else if (demote_kl2e) {
4011 		pmap_remove_kernel_l2(pmap, l2, sva);
4012 	} else {
4013 		ml3 = vm_radix_lookup(&pmap->pm_root, pmap_l2_pindex(sva));
4014 		if (vm_page_any_valid(ml3)) {
4015 			ml3->valid = 0;
4016 			pmap_zero_page(ml3);
4017 		}
4018 	}
4019 	return (pmap_unuse_pt(pmap, sva, l1e, free));
4020 }
4021 
4022 /*
4023  * pmap_remove_l3: do the things to unmap a page in a process
4024  */
4025 static int
pmap_remove_l3(pmap_t pmap,pt_entry_t * l3,vm_offset_t va,pd_entry_t l2e,struct spglist * free,struct rwlock ** lockp)4026 pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t va,
4027     pd_entry_t l2e, struct spglist *free, struct rwlock **lockp)
4028 {
4029 	struct md_page *pvh;
4030 	pt_entry_t old_l3;
4031 	vm_page_t m;
4032 
4033 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4034 	old_l3 = pmap_load(l3);
4035 	if ((old_l3 & ATTR_CONTIGUOUS) != 0)
4036 		(void)pmap_demote_l3c(pmap, l3, va);
4037 	old_l3 = pmap_load_clear(l3);
4038 	pmap_s1_invalidate_page(pmap, va, true);
4039 	if (old_l3 & ATTR_SW_WIRED)
4040 		pmap->pm_stats.wired_count -= 1;
4041 	pmap_resident_count_dec(pmap, 1);
4042 	if (old_l3 & ATTR_SW_MANAGED) {
4043 		m = PTE_TO_VM_PAGE(old_l3);
4044 		if (pmap_pte_dirty(pmap, old_l3))
4045 			vm_page_dirty(m);
4046 		if (old_l3 & ATTR_AF)
4047 			vm_page_aflag_set(m, PGA_REFERENCED);
4048 		CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
4049 		pmap_pvh_free(&m->md, pmap, va);
4050 		if (TAILQ_EMPTY(&m->md.pv_list) &&
4051 		    (m->flags & PG_FICTITIOUS) == 0) {
4052 			pvh = page_to_pvh(m);
4053 			if (TAILQ_EMPTY(&pvh->pv_list))
4054 				vm_page_aflag_clear(m, PGA_WRITEABLE);
4055 		}
4056 	}
4057 	return (pmap_unuse_pt(pmap, va, l2e, free));
4058 }
4059 
4060 /*
4061  * Removes the specified L3C superpage mapping.  Requests TLB invalidations
4062  * to be performed by the caller through the returned "*vap". Returns true
4063  * if the level 3 table "ml3" was unmapped and added to the spglist "free".
4064  * Otherwise, returns false.
4065  */
4066 static bool
pmap_remove_l3c(pmap_t pmap,pt_entry_t * l3p,vm_offset_t va,vm_offset_t * vap,vm_offset_t va_next,vm_page_t ml3,struct spglist * free,struct rwlock ** lockp)4067 pmap_remove_l3c(pmap_t pmap, pt_entry_t *l3p, vm_offset_t va, vm_offset_t *vap,
4068     vm_offset_t va_next, vm_page_t ml3, struct spglist *free,
4069     struct rwlock **lockp)
4070 {
4071 	struct md_page *pvh;
4072 	struct rwlock *new_lock;
4073 	pt_entry_t first_l3e, l3e, *tl3p;
4074 	vm_offset_t tva;
4075 	vm_page_t m, mt;
4076 
4077 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4078 	KASSERT(((uintptr_t)l3p & ((L3C_ENTRIES * sizeof(pt_entry_t)) - 1)) ==
4079 	    0, ("pmap_remove_l3c: l3p is not aligned"));
4080 	KASSERT((va & L3C_OFFSET) == 0,
4081 	    ("pmap_remove_l3c: va is not aligned"));
4082 
4083 	/*
4084 	 * Hardware accessed and dirty bit maintenance might only update a
4085 	 * single L3 entry, so we must combine the accessed and dirty bits
4086 	 * from this entire set of contiguous L3 entries.
4087 	 */
4088 	first_l3e = pmap_load_clear(l3p);
4089 	for (tl3p = l3p + 1; tl3p < &l3p[L3C_ENTRIES]; tl3p++) {
4090 		l3e = pmap_load_clear(tl3p);
4091 		KASSERT((l3e & ATTR_CONTIGUOUS) != 0,
4092 		    ("pmap_remove_l3c: l3e is missing ATTR_CONTIGUOUS"));
4093 		if ((l3e & (ATTR_SW_DBM | ATTR_S1_AP_RW_BIT)) ==
4094 		    (ATTR_SW_DBM | ATTR_S1_AP(ATTR_S1_AP_RW)))
4095 			first_l3e &= ~ATTR_S1_AP_RW_BIT;
4096 		first_l3e |= l3e & ATTR_AF;
4097 	}
4098 	if ((first_l3e & ATTR_SW_WIRED) != 0)
4099 		pmap->pm_stats.wired_count -= L3C_ENTRIES;
4100 	pmap_resident_count_dec(pmap, L3C_ENTRIES);
4101 	if ((first_l3e & ATTR_SW_MANAGED) != 0) {
4102 		m = PTE_TO_VM_PAGE(first_l3e);
4103 		new_lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4104 		if (new_lock != *lockp) {
4105 			if (*lockp != NULL) {
4106 				/*
4107 				 * Pending TLB invalidations must be
4108 				 * performed before the PV list lock is
4109 				 * released.  Otherwise, a concurrent
4110 				 * pmap_remove_all() on a physical page
4111 				 * could return while a stale TLB entry
4112 				 * still provides access to that page.
4113 				 */
4114 				if (*vap != va_next) {
4115 					pmap_invalidate_range(pmap, *vap, va,
4116 					    true);
4117 					*vap = va_next;
4118 				}
4119 				rw_wunlock(*lockp);
4120 			}
4121 			*lockp = new_lock;
4122 			rw_wlock(*lockp);
4123 		}
4124 		pvh = page_to_pvh(m);
4125 		for (mt = m, tva = va; mt < &m[L3C_ENTRIES]; mt++, tva +=
4126 		    L3_SIZE) {
4127 			if (pmap_pte_dirty(pmap, first_l3e))
4128 				vm_page_dirty(mt);
4129 			if ((first_l3e & ATTR_AF) != 0)
4130 				vm_page_aflag_set(mt, PGA_REFERENCED);
4131 			pmap_pvh_free(&mt->md, pmap, tva);
4132 			if (TAILQ_EMPTY(&mt->md.pv_list) &&
4133 			    TAILQ_EMPTY(&pvh->pv_list))
4134 				vm_page_aflag_clear(mt, PGA_WRITEABLE);
4135 		}
4136 	}
4137 	if (*vap == va_next)
4138 		*vap = va;
4139 	if (ml3 != NULL) {
4140 		ml3->ref_count -= L3C_ENTRIES;
4141 		if (ml3->ref_count == 0) {
4142 			_pmap_unwire_l3(pmap, va, ml3, free);
4143 			return (true);
4144 		}
4145 	}
4146 	return (false);
4147 }
4148 
4149 /*
4150  * Remove the specified range of addresses from the L3 page table that is
4151  * identified by the given L2 entry.
4152  */
4153 static void
pmap_remove_l3_range(pmap_t pmap,pd_entry_t l2e,vm_offset_t sva,vm_offset_t eva,struct spglist * free,struct rwlock ** lockp)4154 pmap_remove_l3_range(pmap_t pmap, pd_entry_t l2e, vm_offset_t sva,
4155     vm_offset_t eva, struct spglist *free, struct rwlock **lockp)
4156 {
4157 	struct md_page *pvh;
4158 	struct rwlock *new_lock;
4159 	pt_entry_t *l3, old_l3;
4160 	vm_offset_t va;
4161 	vm_page_t l3pg, m;
4162 
4163 	KASSERT(ADDR_IS_CANONICAL(sva),
4164 	    ("%s: Start address not in canonical form: %lx", __func__, sva));
4165 	KASSERT(ADDR_IS_CANONICAL(eva) || eva == VM_MAX_USER_ADDRESS,
4166 	    ("%s: End address not in canonical form: %lx", __func__, eva));
4167 
4168 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4169 	KASSERT(rounddown2(sva, L2_SIZE) + L2_SIZE == roundup2(eva, L2_SIZE),
4170 	    ("pmap_remove_l3_range: range crosses an L3 page table boundary"));
4171 	l3pg = ADDR_IS_USER(sva) ? PTE_TO_VM_PAGE(l2e) : NULL;
4172 	va = eva;
4173 	for (l3 = pmap_l2_to_l3(&l2e, sva); sva != eva; l3++, sva += L3_SIZE) {
4174 		old_l3 = pmap_load(l3);
4175 		if (!pmap_l3_valid(old_l3)) {
4176 			if (va != eva) {
4177 				pmap_invalidate_range(pmap, va, sva, true);
4178 				va = eva;
4179 			}
4180 			continue;
4181 		}
4182 		if ((old_l3 & ATTR_CONTIGUOUS) != 0) {
4183 			/*
4184 			 * Is this entire set of contiguous L3 entries being
4185 			 * removed?  Handle the possibility that "eva" is zero
4186 			 * because of address wraparound.
4187 			 */
4188 			if ((sva & L3C_OFFSET) == 0 &&
4189 			    sva + L3C_OFFSET <= eva - 1) {
4190 				if (pmap_remove_l3c(pmap, l3, sva, &va, eva,
4191 				    l3pg, free, lockp)) {
4192 					/* The L3 table was unmapped. */
4193 					sva += L3C_SIZE;
4194 					break;
4195 				}
4196 				l3 += L3C_ENTRIES - 1;
4197 				sva += L3C_SIZE - L3_SIZE;
4198 				continue;
4199 			}
4200 
4201 			(void)pmap_demote_l3c(pmap, l3, sva);
4202 		}
4203 		old_l3 = pmap_load_clear(l3);
4204 		if ((old_l3 & ATTR_SW_WIRED) != 0)
4205 			pmap->pm_stats.wired_count--;
4206 		pmap_resident_count_dec(pmap, 1);
4207 		if ((old_l3 & ATTR_SW_MANAGED) != 0) {
4208 			m = PTE_TO_VM_PAGE(old_l3);
4209 			if (pmap_pte_dirty(pmap, old_l3))
4210 				vm_page_dirty(m);
4211 			if ((old_l3 & ATTR_AF) != 0)
4212 				vm_page_aflag_set(m, PGA_REFERENCED);
4213 			new_lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4214 			if (new_lock != *lockp) {
4215 				if (*lockp != NULL) {
4216 					/*
4217 					 * Pending TLB invalidations must be
4218 					 * performed before the PV list lock is
4219 					 * released.  Otherwise, a concurrent
4220 					 * pmap_remove_all() on a physical page
4221 					 * could return while a stale TLB entry
4222 					 * still provides access to that page.
4223 					 */
4224 					if (va != eva) {
4225 						pmap_invalidate_range(pmap, va,
4226 						    sva, true);
4227 						va = eva;
4228 					}
4229 					rw_wunlock(*lockp);
4230 				}
4231 				*lockp = new_lock;
4232 				rw_wlock(*lockp);
4233 			}
4234 			pmap_pvh_free(&m->md, pmap, sva);
4235 			if (TAILQ_EMPTY(&m->md.pv_list) &&
4236 			    (m->flags & PG_FICTITIOUS) == 0) {
4237 				pvh = page_to_pvh(m);
4238 				if (TAILQ_EMPTY(&pvh->pv_list))
4239 					vm_page_aflag_clear(m, PGA_WRITEABLE);
4240 			}
4241 		}
4242 		if (l3pg != NULL && pmap_unwire_l3(pmap, sva, l3pg, free)) {
4243 			/*
4244 			 * _pmap_unwire_l3() has already invalidated the TLB
4245 			 * entries at all levels for "sva".  So, we need not
4246 			 * perform "sva += L3_SIZE;" here.  Moreover, we need
4247 			 * not perform "va = sva;" if "sva" is at the start
4248 			 * of a new valid range consisting of a single page.
4249 			 */
4250 			break;
4251 		}
4252 		if (va == eva)
4253 			va = sva;
4254 	}
4255 	if (va != eva)
4256 		pmap_invalidate_range(pmap, va, sva, true);
4257 }
4258 
4259 static void
pmap_remove1(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,bool map_delete)4260 pmap_remove1(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, bool map_delete)
4261 {
4262 	struct rwlock *lock;
4263 	vm_offset_t va_next;
4264 	pd_entry_t *l0, *l1, *l2;
4265 	pt_entry_t l3_paddr;
4266 	struct spglist free;
4267 
4268 	/*
4269 	 * Perform an unsynchronized read.  This is, however, safe.
4270 	 */
4271 	if (pmap->pm_stats.resident_count == 0)
4272 		return;
4273 
4274 	SLIST_INIT(&free);
4275 
4276 	PMAP_LOCK(pmap);
4277 	if (map_delete)
4278 		pmap_bti_on_remove(pmap, sva, eva);
4279 
4280 	lock = NULL;
4281 	for (; sva < eva; sva = va_next) {
4282 		if (pmap->pm_stats.resident_count == 0)
4283 			break;
4284 
4285 		l0 = pmap_l0(pmap, sva);
4286 		if (pmap_load(l0) == 0) {
4287 			va_next = (sva + L0_SIZE) & ~L0_OFFSET;
4288 			if (va_next < sva)
4289 				va_next = eva;
4290 			continue;
4291 		}
4292 
4293 		va_next = (sva + L1_SIZE) & ~L1_OFFSET;
4294 		if (va_next < sva)
4295 			va_next = eva;
4296 		l1 = pmap_l0_to_l1(l0, sva);
4297 		if (pmap_load(l1) == 0)
4298 			continue;
4299 		if ((pmap_load(l1) & ATTR_DESCR_MASK) == L1_BLOCK) {
4300 			PMAP_ASSERT_L1_BLOCKS_SUPPORTED;
4301 			KASSERT(va_next <= eva,
4302 			    ("partial update of non-transparent 1G page "
4303 			    "l1 %#lx sva %#lx eva %#lx va_next %#lx",
4304 			    pmap_load(l1), sva, eva, va_next));
4305 			MPASS(pmap != kernel_pmap);
4306 			MPASS((pmap_load(l1) & ATTR_SW_MANAGED) == 0);
4307 			pmap_clear(l1);
4308 			pmap_s1_invalidate_page(pmap, sva, true);
4309 			pmap_resident_count_dec(pmap, L1_SIZE / PAGE_SIZE);
4310 			pmap_unuse_pt(pmap, sva, pmap_load(l0), &free);
4311 			continue;
4312 		}
4313 
4314 		/*
4315 		 * Calculate index for next page table.
4316 		 */
4317 		va_next = (sva + L2_SIZE) & ~L2_OFFSET;
4318 		if (va_next < sva)
4319 			va_next = eva;
4320 
4321 		l2 = pmap_l1_to_l2(l1, sva);
4322 		l3_paddr = pmap_load(l2);
4323 
4324 		if ((l3_paddr & ATTR_DESCR_MASK) == L2_BLOCK) {
4325 			if (sva + L2_SIZE == va_next && eva >= va_next) {
4326 				pmap_remove_l2(pmap, l2, sva, pmap_load(l1),
4327 				    true, &free, &lock);
4328 				continue;
4329 			} else if (pmap_demote_l2_locked(pmap, l2, sva,
4330 			    &lock) == NULL)
4331 				continue;
4332 			l3_paddr = pmap_load(l2);
4333 		}
4334 
4335 		/*
4336 		 * Weed out invalid mappings.
4337 		 */
4338 		if ((l3_paddr & ATTR_DESCR_MASK) != L2_TABLE)
4339 			continue;
4340 
4341 		/*
4342 		 * Limit our scan to either the end of the va represented
4343 		 * by the current page table page, or to the end of the
4344 		 * range being removed.
4345 		 */
4346 		if (va_next > eva)
4347 			va_next = eva;
4348 
4349 		pmap_remove_l3_range(pmap, l3_paddr, sva, va_next, &free,
4350 		    &lock);
4351 	}
4352 	if (lock != NULL)
4353 		rw_wunlock(lock);
4354 	PMAP_UNLOCK(pmap);
4355 	vm_page_free_pages_toq(&free, true);
4356 }
4357 
4358 /*
4359  *	Remove the given range of addresses from the specified map.
4360  *
4361  *	It is assumed that the start and end are properly
4362  *	rounded to the page size.
4363  */
4364 void
pmap_remove(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)4365 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
4366 {
4367 	pmap_remove1(pmap, sva, eva, false);
4368 }
4369 
4370 /*
4371  *	Remove the given range of addresses as part of a logical unmap
4372  *	operation. This has the effect of calling pmap_remove(), but
4373  *	also clears any metadata that should persist for the lifetime
4374  *	of a logical mapping.
4375  */
4376 void
pmap_map_delete(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)4377 pmap_map_delete(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
4378 {
4379 	pmap_remove1(pmap, sva, eva, true);
4380 }
4381 
4382 /*
4383  *	Routine:	pmap_remove_all
4384  *	Function:
4385  *		Removes this physical page from
4386  *		all physical maps in which it resides.
4387  *		Reflects back modify bits to the pager.
4388  *
4389  *	Notes:
4390  *		Original versions of this routine were very
4391  *		inefficient because they iteratively called
4392  *		pmap_remove (slow...)
4393  */
4394 
4395 void
pmap_remove_all(vm_page_t m)4396 pmap_remove_all(vm_page_t m)
4397 {
4398 	struct md_page *pvh;
4399 	pv_entry_t pv;
4400 	pmap_t pmap;
4401 	struct rwlock *lock;
4402 	pd_entry_t *pde, tpde;
4403 	pt_entry_t *pte, tpte;
4404 	vm_offset_t va;
4405 	struct spglist free;
4406 	int lvl, pvh_gen, md_gen;
4407 
4408 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4409 	    ("pmap_remove_all: page %p is not managed", m));
4410 	SLIST_INIT(&free);
4411 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4412 	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : page_to_pvh(m);
4413 	rw_wlock(lock);
4414 retry:
4415 	while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
4416 		pmap = PV_PMAP(pv);
4417 		if (!PMAP_TRYLOCK(pmap)) {
4418 			pvh_gen = pvh->pv_gen;
4419 			rw_wunlock(lock);
4420 			PMAP_LOCK(pmap);
4421 			rw_wlock(lock);
4422 			if (pvh_gen != pvh->pv_gen) {
4423 				PMAP_UNLOCK(pmap);
4424 				goto retry;
4425 			}
4426 		}
4427 		va = pv->pv_va;
4428 		pte = pmap_pte_exists(pmap, va, 2, __func__);
4429 		pmap_demote_l2_locked(pmap, pte, va, &lock);
4430 		PMAP_UNLOCK(pmap);
4431 	}
4432 	while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
4433 		pmap = PV_PMAP(pv);
4434 		if (!PMAP_TRYLOCK(pmap)) {
4435 			pvh_gen = pvh->pv_gen;
4436 			md_gen = m->md.pv_gen;
4437 			rw_wunlock(lock);
4438 			PMAP_LOCK(pmap);
4439 			rw_wlock(lock);
4440 			if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
4441 				PMAP_UNLOCK(pmap);
4442 				goto retry;
4443 			}
4444 		}
4445 		pmap_resident_count_dec(pmap, 1);
4446 
4447 		pde = pmap_pde(pmap, pv->pv_va, &lvl);
4448 		KASSERT(pde != NULL,
4449 		    ("pmap_remove_all: no page directory entry found"));
4450 		KASSERT(lvl == 2,
4451 		    ("pmap_remove_all: invalid pde level %d", lvl));
4452 		tpde = pmap_load(pde);
4453 
4454 		pte = pmap_l2_to_l3(pde, pv->pv_va);
4455 		tpte = pmap_load(pte);
4456 		if ((tpte & ATTR_CONTIGUOUS) != 0)
4457 			(void)pmap_demote_l3c(pmap, pte, pv->pv_va);
4458 		tpte = pmap_load_clear(pte);
4459 		if (tpte & ATTR_SW_WIRED)
4460 			pmap->pm_stats.wired_count--;
4461 		if ((tpte & ATTR_AF) != 0) {
4462 			pmap_invalidate_page(pmap, pv->pv_va, true);
4463 			vm_page_aflag_set(m, PGA_REFERENCED);
4464 		}
4465 
4466 		/*
4467 		 * Update the vm_page_t clean and reference bits.
4468 		 */
4469 		if (pmap_pte_dirty(pmap, tpte))
4470 			vm_page_dirty(m);
4471 		pmap_unuse_pt(pmap, pv->pv_va, tpde, &free);
4472 		TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
4473 		m->md.pv_gen++;
4474 		free_pv_entry(pmap, pv);
4475 		PMAP_UNLOCK(pmap);
4476 	}
4477 	vm_page_aflag_clear(m, PGA_WRITEABLE);
4478 	rw_wunlock(lock);
4479 	vm_page_free_pages_toq(&free, true);
4480 }
4481 
4482 /*
4483  * Masks and sets bits in a level 2 page table entries in the specified pmap
4484  */
4485 static void
pmap_protect_l2(pmap_t pmap,pt_entry_t * l2,vm_offset_t sva,pt_entry_t mask,pt_entry_t nbits)4486 pmap_protect_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva, pt_entry_t mask,
4487     pt_entry_t nbits)
4488 {
4489 	pd_entry_t old_l2;
4490 	vm_page_t m, mt;
4491 
4492 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4493 	PMAP_ASSERT_STAGE1(pmap);
4494 	KASSERT((sva & L2_OFFSET) == 0,
4495 	    ("pmap_protect_l2: sva is not 2mpage aligned"));
4496 	old_l2 = pmap_load(l2);
4497 	KASSERT((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK,
4498 	    ("pmap_protect_l2: L2e %lx is not a block mapping", old_l2));
4499 
4500 	/*
4501 	 * Return if the L2 entry already has the desired access restrictions
4502 	 * in place.
4503 	 */
4504 	if ((old_l2 & mask) == nbits)
4505 		return;
4506 
4507 	while (!atomic_fcmpset_64(l2, &old_l2, (old_l2 & ~mask) | nbits))
4508 		cpu_spinwait();
4509 
4510 	/*
4511 	 * When a dirty read/write superpage mapping is write protected,
4512 	 * update the dirty field of each of the superpage's constituent 4KB
4513 	 * pages.
4514 	 */
4515 	if ((old_l2 & ATTR_SW_MANAGED) != 0 &&
4516 	    (nbits & ATTR_S1_AP(ATTR_S1_AP_RO)) != 0 &&
4517 	    pmap_pte_dirty(pmap, old_l2)) {
4518 		m = PTE_TO_VM_PAGE(old_l2);
4519 		for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
4520 			vm_page_dirty(mt);
4521 	}
4522 
4523 	/*
4524 	 * Since a promotion must break the 4KB page mappings before making
4525 	 * the 2MB page mapping, a pmap_s1_invalidate_page() suffices.
4526 	 */
4527 	pmap_s1_invalidate_page(pmap, sva, true);
4528 }
4529 
4530 /*
4531  * Masks and sets bits in the specified L3C superpage mapping.
4532  *
4533  * Requests TLB invalidations to be performed by the caller through the
4534  * returned "*vap".
4535  */
4536 static void
pmap_mask_set_l3c(pmap_t pmap,pt_entry_t * l3p,vm_offset_t va,vm_offset_t * vap,vm_offset_t va_next,pt_entry_t mask,pt_entry_t nbits)4537 pmap_mask_set_l3c(pmap_t pmap, pt_entry_t *l3p, vm_offset_t va,
4538     vm_offset_t *vap, vm_offset_t va_next, pt_entry_t mask, pt_entry_t nbits)
4539 {
4540 	pt_entry_t l3e, *tl3p;
4541 	vm_page_t m, mt;
4542 	bool dirty;
4543 
4544 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4545 	KASSERT(((uintptr_t)l3p & ((L3C_ENTRIES * sizeof(pt_entry_t)) - 1)) ==
4546 	    0, ("pmap_mask_set_l3c: l3p is not aligned"));
4547 	KASSERT((va & L3C_OFFSET) == 0,
4548 	    ("pmap_mask_set_l3c: va is not aligned"));
4549 	dirty = false;
4550 	for (tl3p = l3p; tl3p < &l3p[L3C_ENTRIES]; tl3p++) {
4551 		l3e = pmap_load(tl3p);
4552 		KASSERT((l3e & ATTR_CONTIGUOUS) != 0,
4553 		    ("pmap_mask_set_l3c: l3e is missing ATTR_CONTIGUOUS"));
4554 		while (!atomic_fcmpset_64(tl3p, &l3e, (l3e & ~mask) | nbits))
4555 			cpu_spinwait();
4556 		if ((l3e & (ATTR_SW_DBM | ATTR_S1_AP_RW_BIT)) ==
4557 		    (ATTR_SW_DBM | ATTR_S1_AP(ATTR_S1_AP_RW)))
4558 			dirty = true;
4559 	}
4560 
4561 	/*
4562 	 * When a dirty read/write superpage mapping is write protected,
4563 	 * update the dirty field of each of the superpage's constituent 4KB
4564 	 * pages.
4565 	 */
4566 	if ((l3e & ATTR_SW_MANAGED) != 0 &&
4567 	    (nbits & ATTR_S1_AP(ATTR_S1_AP_RO)) != 0 &&
4568 	    dirty) {
4569 		m = PTE_TO_VM_PAGE(pmap_load(l3p));
4570 		for (mt = m; mt < &m[L3C_ENTRIES]; mt++)
4571 			vm_page_dirty(mt);
4572 	}
4573 
4574 	if (*vap == va_next)
4575 		*vap = va;
4576 }
4577 
4578 /*
4579  * Masks and sets bits in last level page table entries in the specified
4580  * pmap and range
4581  */
4582 static void
pmap_mask_set_locked(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,pt_entry_t mask,pt_entry_t nbits,bool invalidate)4583 pmap_mask_set_locked(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, pt_entry_t mask,
4584     pt_entry_t nbits, bool invalidate)
4585 {
4586 	vm_offset_t va, va_next;
4587 	pd_entry_t *l0, *l1, *l2;
4588 	pt_entry_t *l3p, l3;
4589 
4590 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4591 	for (; sva < eva; sva = va_next) {
4592 		l0 = pmap_l0(pmap, sva);
4593 		if (pmap_load(l0) == 0) {
4594 			va_next = (sva + L0_SIZE) & ~L0_OFFSET;
4595 			if (va_next < sva)
4596 				va_next = eva;
4597 			continue;
4598 		}
4599 
4600 		va_next = (sva + L1_SIZE) & ~L1_OFFSET;
4601 		if (va_next < sva)
4602 			va_next = eva;
4603 		l1 = pmap_l0_to_l1(l0, sva);
4604 		if (pmap_load(l1) == 0)
4605 			continue;
4606 		if ((pmap_load(l1) & ATTR_DESCR_MASK) == L1_BLOCK) {
4607 			PMAP_ASSERT_L1_BLOCKS_SUPPORTED;
4608 			KASSERT(va_next <= eva,
4609 			    ("partial update of non-transparent 1G page "
4610 			    "l1 %#lx sva %#lx eva %#lx va_next %#lx",
4611 			    pmap_load(l1), sva, eva, va_next));
4612 			MPASS((pmap_load(l1) & ATTR_SW_MANAGED) == 0);
4613 			if ((pmap_load(l1) & mask) != nbits) {
4614 				pmap_store(l1, (pmap_load(l1) & ~mask) | nbits);
4615 				if (invalidate)
4616 					pmap_s1_invalidate_page(pmap, sva, true);
4617 			}
4618 			continue;
4619 		}
4620 
4621 		va_next = (sva + L2_SIZE) & ~L2_OFFSET;
4622 		if (va_next < sva)
4623 			va_next = eva;
4624 
4625 		l2 = pmap_l1_to_l2(l1, sva);
4626 		if (pmap_load(l2) == 0)
4627 			continue;
4628 
4629 		if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK) {
4630 			if (sva + L2_SIZE == va_next && eva >= va_next) {
4631 				pmap_protect_l2(pmap, l2, sva, mask, nbits);
4632 				continue;
4633 			} else if ((pmap_load(l2) & mask) == nbits ||
4634 			    pmap_demote_l2(pmap, l2, sva) == NULL)
4635 				continue;
4636 		}
4637 		KASSERT((pmap_load(l2) & ATTR_DESCR_MASK) == L2_TABLE,
4638 		    ("pmap_protect: Invalid L2 entry after demotion"));
4639 
4640 		if (va_next > eva)
4641 			va_next = eva;
4642 
4643 		va = va_next;
4644 		for (l3p = pmap_l2_to_l3(l2, sva); sva != va_next; l3p++,
4645 		    sva += L3_SIZE) {
4646 			l3 = pmap_load(l3p);
4647 
4648 			/*
4649 			 * Go to the next L3 entry if the current one is
4650 			 * invalid or already has the desired access
4651 			 * restrictions in place.  (The latter case occurs
4652 			 * frequently.  For example, in a "buildworld"
4653 			 * workload, almost 1 out of 4 L3 entries already
4654 			 * have the desired restrictions.)
4655 			 */
4656 			if (!pmap_l3_valid(l3) || (l3 & mask) == nbits) {
4657 				if (va != va_next) {
4658 					if (invalidate)
4659 						pmap_s1_invalidate_range(pmap,
4660 						    va, sva, true);
4661 					va = va_next;
4662 				}
4663 				if ((l3 & ATTR_CONTIGUOUS) != 0) {
4664 					/*
4665 					 * Does this L3C page extend beyond
4666 					 * the requested range?  Handle the
4667 					 * possibility that "va_next" is zero.
4668 					 */
4669 					if ((sva | L3C_OFFSET) > va_next - 1)
4670 						break;
4671 
4672 					/*
4673 					 * Skip ahead to the last L3_PAGE
4674 					 * within this L3C page.
4675 					 */
4676 					l3p = (pt_entry_t *)((uintptr_t)l3p |
4677 					    ((L3C_ENTRIES - 1) *
4678 					    sizeof(pt_entry_t)));
4679 					sva |= L3C_SIZE - L3_SIZE;
4680 				}
4681 				continue;
4682 			}
4683 
4684 			if ((l3 & ATTR_CONTIGUOUS) != 0) {
4685 				/*
4686 				 * Is this entire set of contiguous L3 entries
4687 				 * being protected?  Handle the possibility
4688 				 * that "va_next" is zero because of address
4689 				 * wraparound.
4690 				 */
4691 				if ((sva & L3C_OFFSET) == 0 &&
4692 				    sva + L3C_OFFSET <= va_next - 1) {
4693 					pmap_mask_set_l3c(pmap, l3p, sva, &va,
4694 					    va_next, mask, nbits);
4695 					l3p += L3C_ENTRIES - 1;
4696 					sva += L3C_SIZE - L3_SIZE;
4697 					continue;
4698 				}
4699 
4700 				(void)pmap_demote_l3c(pmap, l3p, sva);
4701 
4702 				/*
4703 				 * The L3 entry's accessed bit may have changed.
4704 				 */
4705 				l3 = pmap_load(l3p);
4706 			}
4707 			while (!atomic_fcmpset_64(l3p, &l3, (l3 & ~mask) |
4708 			    nbits))
4709 				cpu_spinwait();
4710 
4711 			/*
4712 			 * When a dirty read/write mapping is write protected,
4713 			 * update the page's dirty field.
4714 			 */
4715 			if ((l3 & ATTR_SW_MANAGED) != 0 &&
4716 			    (nbits & ATTR_S1_AP(ATTR_S1_AP_RO)) != 0 &&
4717 			    pmap_pte_dirty(pmap, l3))
4718 				vm_page_dirty(PTE_TO_VM_PAGE(l3));
4719 
4720 			if (va == va_next)
4721 				va = sva;
4722 		}
4723 		if (va != va_next && invalidate)
4724 			pmap_s1_invalidate_range(pmap, va, sva, true);
4725 	}
4726 }
4727 
4728 static void
pmap_mask_set(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,pt_entry_t mask,pt_entry_t nbits,bool invalidate)4729 pmap_mask_set(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, pt_entry_t mask,
4730     pt_entry_t nbits, bool invalidate)
4731 {
4732 	PMAP_LOCK(pmap);
4733 	pmap_mask_set_locked(pmap, sva, eva, mask, nbits, invalidate);
4734 	PMAP_UNLOCK(pmap);
4735 }
4736 
4737 /*
4738  *	Set the physical protection on the
4739  *	specified range of this map as requested.
4740  */
4741 void
pmap_protect(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,vm_prot_t prot)4742 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
4743 {
4744 	pt_entry_t mask, nbits;
4745 
4746 	PMAP_ASSERT_STAGE1(pmap);
4747 	KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot));
4748 	if (prot == VM_PROT_NONE) {
4749 		pmap_remove(pmap, sva, eva);
4750 		return;
4751 	}
4752 
4753 	mask = nbits = 0;
4754 	if ((prot & VM_PROT_WRITE) == 0) {
4755 		mask |= ATTR_S1_AP_RW_BIT | ATTR_SW_DBM;
4756 		nbits |= ATTR_S1_AP(ATTR_S1_AP_RO);
4757 	}
4758 	if ((prot & VM_PROT_EXECUTE) == 0) {
4759 		mask |= ATTR_S1_XN;
4760 		nbits |= ATTR_S1_XN;
4761 	}
4762 	if (pmap == kernel_pmap) {
4763 		mask |= ATTR_KERN_GP;
4764 		nbits |= ATTR_KERN_GP;
4765 	}
4766 	if (mask == 0)
4767 		return;
4768 
4769 	pmap_mask_set(pmap, sva, eva, mask, nbits, true);
4770 }
4771 
4772 void
pmap_disable_promotion(vm_offset_t sva,vm_size_t size)4773 pmap_disable_promotion(vm_offset_t sva, vm_size_t size)
4774 {
4775 
4776 	MPASS((sva & L3_OFFSET) == 0);
4777 	MPASS(((sva + size) & L3_OFFSET) == 0);
4778 
4779 	pmap_mask_set(kernel_pmap, sva, sva + size, ATTR_SW_NO_PROMOTE,
4780 	    ATTR_SW_NO_PROMOTE, false);
4781 }
4782 
4783 /*
4784  * Inserts the specified page table page into the specified pmap's collection
4785  * of idle page table pages.  Each of a pmap's page table pages is responsible
4786  * for mapping a distinct range of virtual addresses.  The pmap's collection is
4787  * ordered by this virtual address range.
4788  *
4789  * If "promoted" is false, then the page table page "mpte" must be zero filled;
4790  * "mpte"'s valid field will be set to 0.
4791  *
4792  * If "promoted" is true and "all_l3e_AF_set" is false, then "mpte" must
4793  * contain valid mappings with identical attributes except for ATTR_AF;
4794  * "mpte"'s valid field will be set to 1.
4795  *
4796  * If "promoted" and "all_l3e_AF_set" are both true, then "mpte" must contain
4797  * valid mappings with identical attributes including ATTR_AF; "mpte"'s valid
4798  * field will be set to VM_PAGE_BITS_ALL.
4799  */
4800 static __inline int
pmap_insert_pt_page(pmap_t pmap,vm_page_t mpte,bool promoted,bool all_l3e_AF_set)4801 pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted,
4802     bool all_l3e_AF_set)
4803 {
4804 
4805 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4806 	KASSERT(promoted || !all_l3e_AF_set,
4807 	    ("a zero-filled PTP can't have ATTR_AF set in every PTE"));
4808 	mpte->valid = promoted ? (all_l3e_AF_set ? VM_PAGE_BITS_ALL : 1) : 0;
4809 	return (vm_radix_insert(&pmap->pm_root, mpte));
4810 }
4811 
4812 /*
4813  * Removes the page table page mapping the specified virtual address from the
4814  * specified pmap's collection of idle page table pages, and returns it.
4815  * Otherwise, returns NULL if there is no page table page corresponding to the
4816  * specified virtual address.
4817  */
4818 static __inline vm_page_t
pmap_remove_pt_page(pmap_t pmap,vm_offset_t va)4819 pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
4820 {
4821 
4822 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4823 	return (vm_radix_remove(&pmap->pm_root, pmap_l2_pindex(va)));
4824 }
4825 
4826 /*
4827  * Performs a break-before-make update of a pmap entry. This is needed when
4828  * either promoting or demoting pages to ensure the TLB doesn't get into an
4829  * inconsistent state.
4830  */
4831 static void
pmap_update_entry(pmap_t pmap,pd_entry_t * ptep,pd_entry_t newpte,vm_offset_t va,vm_size_t size)4832 pmap_update_entry(pmap_t pmap, pd_entry_t *ptep, pd_entry_t newpte,
4833     vm_offset_t va, vm_size_t size)
4834 {
4835 	register_t intr;
4836 
4837 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4838 	KASSERT((newpte & ATTR_SW_NO_PROMOTE) == 0,
4839 	    ("%s: Updating non-promote pte", __func__));
4840 
4841 	/*
4842 	 * Ensure we don't get switched out with the page table in an
4843 	 * inconsistent state. We also need to ensure no interrupts fire
4844 	 * as they may make use of an address we are about to invalidate.
4845 	 */
4846 	intr = intr_disable();
4847 
4848 	/*
4849 	 * Clear the old mapping's valid bit, but leave the rest of the entry
4850 	 * unchanged, so that a lockless, concurrent pmap_kextract() can still
4851 	 * lookup the physical address.
4852 	 */
4853 	pmap_clear_bits(ptep, ATTR_DESCR_VALID);
4854 
4855 	/*
4856 	 * When promoting, the L{1,2}_TABLE entry that is being replaced might
4857 	 * be cached, so we invalidate intermediate entries as well as final
4858 	 * entries.
4859 	 */
4860 	pmap_s1_invalidate_range(pmap, va, va + size, false);
4861 
4862 	/* Create the new mapping */
4863 	pmap_store(ptep, newpte);
4864 	dsb(ishst);
4865 
4866 	intr_restore(intr);
4867 }
4868 
4869 /*
4870  * Performs a break-before-make update of an ATTR_CONTIGUOUS mapping.
4871  */
4872 static void __nosanitizecoverage
pmap_update_strided(pmap_t pmap,pd_entry_t * ptep,pd_entry_t * ptep_end,pd_entry_t newpte,vm_offset_t va,vm_offset_t stride,vm_size_t size)4873 pmap_update_strided(pmap_t pmap, pd_entry_t *ptep, pd_entry_t *ptep_end,
4874     pd_entry_t newpte, vm_offset_t va, vm_offset_t stride, vm_size_t size)
4875 {
4876 	pd_entry_t *lip;
4877 	register_t intr;
4878 
4879 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4880 	KASSERT((newpte & ATTR_SW_NO_PROMOTE) == 0,
4881 	    ("%s: Updating non-promote pte", __func__));
4882 
4883 	/*
4884 	 * Ensure we don't get switched out with the page table in an
4885 	 * inconsistent state. We also need to ensure no interrupts fire
4886 	 * as they may make use of an address we are about to invalidate.
4887 	 */
4888 	intr = intr_disable();
4889 
4890 	/*
4891 	 * Clear the old mapping's valid bits, but leave the rest of each
4892 	 * entry unchanged, so that a lockless, concurrent pmap_kextract() can
4893 	 * still lookup the physical address.
4894 	 */
4895 	for (lip = ptep; lip < ptep_end; lip++)
4896 		pmap_clear_bits(lip, ATTR_DESCR_VALID);
4897 
4898 	/* Only final entries are changing. */
4899 	pmap_s1_invalidate_strided(pmap, va, va + size, stride, true);
4900 
4901 	/* Create the new mapping. */
4902 	for (lip = ptep; lip < ptep_end; lip++) {
4903 		pmap_store(lip, newpte);
4904 		newpte += stride;
4905 	}
4906 	dsb(ishst);
4907 
4908 	intr_restore(intr);
4909 }
4910 
4911 #if VM_NRESERVLEVEL > 0
4912 /*
4913  * After promotion from 512 4KB page mappings to a single 2MB page mapping,
4914  * replace the many pv entries for the 4KB page mappings by a single pv entry
4915  * for the 2MB page mapping.
4916  */
4917 static void
pmap_pv_promote_l2(pmap_t pmap,vm_offset_t va,vm_paddr_t pa,struct rwlock ** lockp)4918 pmap_pv_promote_l2(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
4919     struct rwlock **lockp)
4920 {
4921 	struct md_page *pvh;
4922 	pv_entry_t pv;
4923 	vm_offset_t va_last;
4924 	vm_page_t m;
4925 
4926 	KASSERT((pa & L2_OFFSET) == 0,
4927 	    ("pmap_pv_promote_l2: pa is not 2mpage aligned"));
4928 	CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
4929 
4930 	/*
4931 	 * Transfer the first page's pv entry for this mapping to the 2mpage's
4932 	 * pv list.  Aside from avoiding the cost of a call to get_pv_entry(),
4933 	 * a transfer avoids the possibility that get_pv_entry() calls
4934 	 * reclaim_pv_chunk() and that reclaim_pv_chunk() removes one of the
4935 	 * mappings that is being promoted.
4936 	 */
4937 	m = PHYS_TO_VM_PAGE(pa);
4938 	va = va & ~L2_OFFSET;
4939 	pv = pmap_pvh_remove(&m->md, pmap, va);
4940 	KASSERT(pv != NULL, ("pmap_pv_promote_l2: pv not found"));
4941 	pvh = page_to_pvh(m);
4942 	TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
4943 	pvh->pv_gen++;
4944 	/* Free the remaining NPTEPG - 1 pv entries. */
4945 	va_last = va + L2_SIZE - PAGE_SIZE;
4946 	do {
4947 		m++;
4948 		va += PAGE_SIZE;
4949 		pmap_pvh_free(&m->md, pmap, va);
4950 	} while (va < va_last);
4951 }
4952 
4953 /*
4954  * Tries to promote the 512, contiguous 4KB page mappings that are within a
4955  * single level 2 table entry to a single 2MB page mapping.  For promotion
4956  * to occur, two conditions must be met: (1) the 4KB page mappings must map
4957  * aligned, contiguous physical memory and (2) the 4KB page mappings must have
4958  * identical characteristics.
4959  */
4960 static bool
pmap_promote_l2(pmap_t pmap,pd_entry_t * l2,vm_offset_t va,vm_page_t mpte,struct rwlock ** lockp)4961 pmap_promote_l2(pmap_t pmap, pd_entry_t *l2, vm_offset_t va, vm_page_t mpte,
4962     struct rwlock **lockp)
4963 {
4964 	pt_entry_t all_l3e_AF, *firstl3, *l3, newl2, oldl3, pa;
4965 
4966 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4967 
4968 	/*
4969 	 * Currently, this function only supports promotion on stage 1 pmaps
4970 	 * because it tests stage 1 specific fields and performs a break-
4971 	 * before-make sequence that is incorrect for stage 2 pmaps.
4972 	 */
4973 	if (pmap->pm_stage != PM_STAGE1 || !pmap_ps_enabled(pmap))
4974 		return (false);
4975 
4976 	/*
4977 	 * Examine the first L3E in the specified PTP.  Abort if this L3E is
4978 	 * ineligible for promotion...
4979 	 */
4980 	firstl3 = (pt_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(pmap_load(l2)));
4981 	newl2 = pmap_load(firstl3);
4982 	if ((newl2 & ATTR_SW_NO_PROMOTE) != 0)
4983 		return (false);
4984 	/* ... is not the first physical page within an L2 block */
4985 	if ((PTE_TO_PHYS(newl2) & L2_OFFSET) != 0 ||
4986 	    ((newl2 & ATTR_DESCR_MASK) != L3_PAGE)) { /* ... or is invalid */
4987 		counter_u64_add(pmap_l2_p_failures, 1);
4988 		CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx"
4989 		    " in pmap %p", va, pmap);
4990 		return (false);
4991 	}
4992 
4993 	/*
4994 	 * Both here and in the below "for" loop, to allow for repromotion
4995 	 * after MADV_FREE, conditionally write protect a clean L3E before
4996 	 * possibly aborting the promotion due to other L3E attributes.  Why?
4997 	 * Suppose that MADV_FREE is applied to a part of a superpage, the
4998 	 * address range [S, E).  pmap_advise() will demote the superpage
4999 	 * mapping, destroy the 4KB page mapping at the end of [S, E), and
5000 	 * set AP_RO and clear AF in the L3Es for the rest of [S, E).  Later,
5001 	 * imagine that the memory in [S, E) is recycled, but the last 4KB
5002 	 * page in [S, E) is not the last to be rewritten, or simply accessed.
5003 	 * In other words, there is still a 4KB page in [S, E), call it P,
5004 	 * that is writeable but AP_RO is set and AF is clear in P's L3E.
5005 	 * Unless we write protect P before aborting the promotion, if and
5006 	 * when P is finally rewritten, there won't be a page fault to trigger
5007 	 * repromotion.
5008 	 */
5009 setl2:
5010 	if ((newl2 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) ==
5011 	    (ATTR_S1_AP(ATTR_S1_AP_RO) | ATTR_SW_DBM)) {
5012 		/*
5013 		 * When the mapping is clean, i.e., ATTR_S1_AP_RO is set,
5014 		 * ATTR_SW_DBM can be cleared without a TLB invalidation.
5015 		 */
5016 		if (!atomic_fcmpset_64(firstl3, &newl2, newl2 & ~ATTR_SW_DBM))
5017 			goto setl2;
5018 		newl2 &= ~ATTR_SW_DBM;
5019 		CTR2(KTR_PMAP, "pmap_promote_l2: protect for va %#lx"
5020 		    " in pmap %p", va & ~L2_OFFSET, pmap);
5021 	}
5022 
5023 	/*
5024 	 * Examine each of the other L3Es in the specified PTP.  Abort if this
5025 	 * L3E maps an unexpected 4KB physical page or does not have identical
5026 	 * characteristics to the first L3E.  If ATTR_AF is not set in every
5027 	 * PTE, then request that the PTP be refilled on demotion.
5028 	 */
5029 	all_l3e_AF = newl2 & ATTR_AF;
5030 	pa = (PTE_TO_PHYS(newl2) | (newl2 & ATTR_DESCR_MASK))
5031 	    + L2_SIZE - PAGE_SIZE;
5032 	for (l3 = firstl3 + NL3PG - 1; l3 > firstl3; l3--) {
5033 		oldl3 = pmap_load(l3);
5034 		if ((PTE_TO_PHYS(oldl3) | (oldl3 & ATTR_DESCR_MASK)) != pa) {
5035 			counter_u64_add(pmap_l2_p_failures, 1);
5036 			CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx"
5037 			    " in pmap %p", va, pmap);
5038 			return (false);
5039 		}
5040 setl3:
5041 		if ((oldl3 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) ==
5042 		    (ATTR_S1_AP(ATTR_S1_AP_RO) | ATTR_SW_DBM)) {
5043 			/*
5044 			 * When the mapping is clean, i.e., ATTR_S1_AP_RO is
5045 			 * set, ATTR_SW_DBM can be cleared without a TLB
5046 			 * invalidation.
5047 			 */
5048 			if (!atomic_fcmpset_64(l3, &oldl3, oldl3 &
5049 			    ~ATTR_SW_DBM))
5050 				goto setl3;
5051 			oldl3 &= ~ATTR_SW_DBM;
5052 		}
5053 		if ((oldl3 & ATTR_PROMOTE) != (newl2 & ATTR_PROMOTE)) {
5054 			counter_u64_add(pmap_l2_p_failures, 1);
5055 			CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx"
5056 			    " in pmap %p", va, pmap);
5057 			return (false);
5058 		}
5059 		all_l3e_AF &= oldl3;
5060 		pa -= PAGE_SIZE;
5061 	}
5062 
5063 	/*
5064 	 * Unless all PTEs have ATTR_AF set, clear it from the superpage
5065 	 * mapping, so that promotions triggered by speculative mappings,
5066 	 * such as pmap_enter_quick(), don't automatically mark the
5067 	 * underlying pages as referenced.
5068 	 */
5069 	newl2 &= ~(ATTR_CONTIGUOUS | ATTR_AF | ATTR_DESCR_MASK) | all_l3e_AF;
5070 
5071 	/*
5072 	 * Save the page table page in its current state until the L2
5073 	 * mapping the superpage is demoted by pmap_demote_l2() or
5074 	 * destroyed by pmap_remove_l3().
5075 	 */
5076 	if (mpte == NULL)
5077 		mpte = PTE_TO_VM_PAGE(pmap_load(l2));
5078 	KASSERT(mpte >= vm_page_array &&
5079 	    mpte < &vm_page_array[vm_page_array_size],
5080 	    ("pmap_promote_l2: page table page is out of range"));
5081 	KASSERT(mpte->pindex == pmap_l2_pindex(va),
5082 	    ("pmap_promote_l2: page table page's pindex is wrong"));
5083 	if (pmap_insert_pt_page(pmap, mpte, true, all_l3e_AF != 0)) {
5084 		counter_u64_add(pmap_l2_p_failures, 1);
5085 		CTR2(KTR_PMAP,
5086 		    "pmap_promote_l2: failure for va %#lx in pmap %p", va,
5087 		    pmap);
5088 		return (false);
5089 	}
5090 
5091 	if ((newl2 & ATTR_SW_MANAGED) != 0)
5092 		pmap_pv_promote_l2(pmap, va, PTE_TO_PHYS(newl2), lockp);
5093 
5094 	pmap_update_entry(pmap, l2, newl2 | L2_BLOCK, va & ~L2_OFFSET, L2_SIZE);
5095 
5096 	counter_u64_add(pmap_l2_promotions, 1);
5097 	CTR2(KTR_PMAP, "pmap_promote_l2: success for va %#lx in pmap %p", va,
5098 	    pmap);
5099 	return (true);
5100 }
5101 
5102 /*
5103  * Tries to promote an aligned, contiguous set of base page mappings to a
5104  * single L3C page mapping.  For promotion to occur, two conditions must be
5105  * met: (1) the base page mappings must map aligned, contiguous physical
5106  * memory and (2) the base page mappings must have identical characteristics
5107  * except for the accessed flag.
5108  */
5109 static bool
pmap_promote_l3c(pmap_t pmap,pd_entry_t * l3p,vm_offset_t va)5110 pmap_promote_l3c(pmap_t pmap, pd_entry_t *l3p, vm_offset_t va)
5111 {
5112 	pd_entry_t all_l3e_AF, firstl3c, *l3, oldl3, pa;
5113 
5114 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5115 
5116 	/*
5117 	 * Currently, this function only supports promotion on stage 1 pmaps
5118 	 * because it tests stage 1 specific fields and performs a break-
5119 	 * before-make sequence that is incorrect for stage 2 pmaps.
5120 	 */
5121 	if (pmap->pm_stage != PM_STAGE1 || !pmap_ps_enabled(pmap))
5122 		return (false);
5123 
5124 	/*
5125 	 * Compute the address of the first L3 entry in the superpage
5126 	 * candidate.
5127 	 */
5128 	l3p = (pt_entry_t *)((uintptr_t)l3p & ~((L3C_ENTRIES *
5129 	    sizeof(pt_entry_t)) - 1));
5130 
5131 	firstl3c = pmap_load(l3p);
5132 
5133 	/*
5134 	 * Examine the first L3 entry. Abort if this L3E is ineligible for
5135 	 * promotion...
5136 	 */
5137 	if ((firstl3c & ATTR_SW_NO_PROMOTE) != 0)
5138 		return (false);
5139 	/* ...is not properly aligned... */
5140 	if ((PTE_TO_PHYS(firstl3c) & L3C_OFFSET) != 0 ||
5141 	    (firstl3c & ATTR_DESCR_MASK) != L3_PAGE) { /* ...or is invalid. */
5142 		counter_u64_add(pmap_l3c_p_failures, 1);
5143 		CTR2(KTR_PMAP, "pmap_promote_l3c: failure for va %#lx"
5144 		    " in pmap %p", va, pmap);
5145 		return (false);
5146 	}
5147 
5148 	/*
5149 	 * If the first L3 entry is a clean read-write mapping, convert it
5150 	 * to a read-only mapping.  See pmap_promote_l2() for the rationale.
5151 	 */
5152 set_first:
5153 	if ((firstl3c & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) ==
5154 	    (ATTR_S1_AP(ATTR_S1_AP_RO) | ATTR_SW_DBM)) {
5155 		/*
5156 		 * When the mapping is clean, i.e., ATTR_S1_AP_RO is set,
5157 		 * ATTR_SW_DBM can be cleared without a TLB invalidation.
5158 		 */
5159 		if (!atomic_fcmpset_64(l3p, &firstl3c, firstl3c & ~ATTR_SW_DBM))
5160 			goto set_first;
5161 		firstl3c &= ~ATTR_SW_DBM;
5162 		CTR2(KTR_PMAP, "pmap_promote_l3c: protect for va %#lx"
5163 		    " in pmap %p", va & ~L3C_OFFSET, pmap);
5164 	}
5165 
5166 	/*
5167 	 * Check that the rest of the L3 entries are compatible with the first,
5168 	 * and convert clean read-write mappings to read-only mappings.
5169 	 */
5170 	all_l3e_AF = firstl3c & ATTR_AF;
5171 	pa = (PTE_TO_PHYS(firstl3c) | (firstl3c & ATTR_DESCR_MASK)) +
5172 	    L3C_SIZE - PAGE_SIZE;
5173 	for (l3 = l3p + L3C_ENTRIES - 1; l3 > l3p; l3--) {
5174 		oldl3 = pmap_load(l3);
5175 		if ((PTE_TO_PHYS(oldl3) | (oldl3 & ATTR_DESCR_MASK)) != pa) {
5176 			counter_u64_add(pmap_l3c_p_failures, 1);
5177 			CTR2(KTR_PMAP, "pmap_promote_l3c: failure for va %#lx"
5178 			    " in pmap %p", va, pmap);
5179 			return (false);
5180 		}
5181 set_l3:
5182 		if ((oldl3 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) ==
5183 		    (ATTR_S1_AP(ATTR_S1_AP_RO) | ATTR_SW_DBM)) {
5184 			/*
5185 			 * When the mapping is clean, i.e., ATTR_S1_AP_RO is
5186 			 * set, ATTR_SW_DBM can be cleared without a TLB
5187 			 * invalidation.
5188 			 */
5189 			if (!atomic_fcmpset_64(l3, &oldl3, oldl3 &
5190 			    ~ATTR_SW_DBM))
5191 				goto set_l3;
5192 			oldl3 &= ~ATTR_SW_DBM;
5193 			CTR2(KTR_PMAP, "pmap_promote_l3c: protect for va %#lx"
5194 			    " in pmap %p", (oldl3 & ~ATTR_MASK & L3C_OFFSET) |
5195 			    (va & ~L3C_OFFSET), pmap);
5196 		}
5197 		if ((oldl3 & ATTR_PROMOTE) != (firstl3c & ATTR_PROMOTE)) {
5198 			counter_u64_add(pmap_l3c_p_failures, 1);
5199 			CTR2(KTR_PMAP, "pmap_promote_l3c: failure for va %#lx"
5200 			    " in pmap %p", va, pmap);
5201 			return (false);
5202 		}
5203 		all_l3e_AF &= oldl3;
5204 		pa -= PAGE_SIZE;
5205 	}
5206 
5207 	/*
5208 	 * Unless all PTEs have ATTR_AF set, clear it from the superpage
5209 	 * mapping, so that promotions triggered by speculative mappings,
5210 	 * such as pmap_enter_quick(), don't automatically mark the
5211 	 * underlying pages as referenced.
5212 	 */
5213 	firstl3c &= ~ATTR_AF | all_l3e_AF;
5214 
5215 	/*
5216 	 * Remake the mappings with the contiguous bit set.
5217 	 */
5218 	pmap_update_strided(pmap, l3p, l3p + L3C_ENTRIES, firstl3c |
5219 	    ATTR_CONTIGUOUS, va & ~L3C_OFFSET, L3_SIZE, L3C_SIZE);
5220 
5221 	counter_u64_add(pmap_l3c_promotions, 1);
5222 	CTR2(KTR_PMAP, "pmap_promote_l3c: success for va %#lx in pmap %p", va,
5223 	    pmap);
5224 	return (true);
5225 }
5226 #endif /* VM_NRESERVLEVEL > 0 */
5227 
5228 static int
pmap_enter_largepage(pmap_t pmap,vm_offset_t va,pt_entry_t pte,int flags,int psind)5229 pmap_enter_largepage(pmap_t pmap, vm_offset_t va, pt_entry_t pte, int flags,
5230     int psind)
5231 {
5232 	pd_entry_t *l0p, *l1p, *l2p, *l3p, newpte, origpte, *tl3p;
5233 	vm_page_t mp;
5234 
5235 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5236 	KASSERT(psind > 0 && psind < MAXPAGESIZES,
5237 	    ("psind %d unexpected", psind));
5238 	KASSERT((PTE_TO_PHYS(pte) & (pagesizes[psind] - 1)) == 0,
5239 	    ("unaligned phys address %#lx pte %#lx psind %d",
5240 	    PTE_TO_PHYS(pte), pte, psind));
5241 
5242 restart:
5243 	newpte = pte;
5244 	if (!pmap_bti_same(pmap, va, va + pagesizes[psind], &newpte))
5245 		return (KERN_PROTECTION_FAILURE);
5246 	if (psind == 3) {
5247 		PMAP_ASSERT_L1_BLOCKS_SUPPORTED;
5248 
5249 		KASSERT(pagesizes[psind] == L1_SIZE,
5250 		    ("pagesizes[%d] != L1_SIZE", psind));
5251 		l0p = pmap_l0(pmap, va);
5252 		if ((pmap_load(l0p) & ATTR_DESCR_VALID) == 0) {
5253 			mp = _pmap_alloc_l3(pmap, pmap_l0_pindex(va), NULL);
5254 			if (mp == NULL) {
5255 				if ((flags & PMAP_ENTER_NOSLEEP) != 0)
5256 					return (KERN_RESOURCE_SHORTAGE);
5257 				PMAP_UNLOCK(pmap);
5258 				vm_wait(NULL);
5259 				PMAP_LOCK(pmap);
5260 				goto restart;
5261 			}
5262 			l1p = pmap_l0_to_l1(l0p, va);
5263 			KASSERT(l1p != NULL, ("va %#lx lost l1 entry", va));
5264 			origpte = pmap_load(l1p);
5265 		} else {
5266 			l1p = pmap_l0_to_l1(l0p, va);
5267 			KASSERT(l1p != NULL, ("va %#lx lost l1 entry", va));
5268 			origpte = pmap_load(l1p);
5269 			if ((origpte & ATTR_DESCR_VALID) == 0) {
5270 				mp = PTE_TO_VM_PAGE(pmap_load(l0p));
5271 				mp->ref_count++;
5272 			}
5273 		}
5274 		KASSERT((PTE_TO_PHYS(origpte) == PTE_TO_PHYS(newpte) &&
5275 		    (origpte & ATTR_DESCR_MASK) == L1_BLOCK) ||
5276 		    (origpte & ATTR_DESCR_VALID) == 0,
5277 		    ("va %#lx changing 1G phys page l1 %#lx newpte %#lx",
5278 		    va, origpte, newpte));
5279 		pmap_store(l1p, newpte);
5280 	} else if (psind == 2) {
5281 		KASSERT(pagesizes[psind] == L2_SIZE,
5282 		    ("pagesizes[%d] != L2_SIZE", psind));
5283 		l2p = pmap_l2(pmap, va);
5284 		if (l2p == NULL) {
5285 			mp = _pmap_alloc_l3(pmap, pmap_l1_pindex(va), NULL);
5286 			if (mp == NULL) {
5287 				if ((flags & PMAP_ENTER_NOSLEEP) != 0)
5288 					return (KERN_RESOURCE_SHORTAGE);
5289 				PMAP_UNLOCK(pmap);
5290 				vm_wait(NULL);
5291 				PMAP_LOCK(pmap);
5292 				goto restart;
5293 			}
5294 			l2p = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mp));
5295 			l2p = &l2p[pmap_l2_index(va)];
5296 			origpte = pmap_load(l2p);
5297 		} else {
5298 			l1p = pmap_l1(pmap, va);
5299 			origpte = pmap_load(l2p);
5300 			if ((origpte & ATTR_DESCR_VALID) == 0) {
5301 				mp = PTE_TO_VM_PAGE(pmap_load(l1p));
5302 				mp->ref_count++;
5303 			}
5304 		}
5305 		KASSERT((origpte & ATTR_DESCR_VALID) == 0 ||
5306 		    ((origpte & ATTR_DESCR_MASK) == L2_BLOCK &&
5307 		    PTE_TO_PHYS(origpte) == PTE_TO_PHYS(newpte)),
5308 		    ("va %#lx changing 2M phys page l2 %#lx newpte %#lx",
5309 		    va, origpte, newpte));
5310 		pmap_store(l2p, newpte);
5311 	} else /* (psind == 1) */ {
5312 		KASSERT(pagesizes[psind] == L3C_SIZE,
5313 		    ("pagesizes[%d] != L3C_SIZE", psind));
5314 		l2p = pmap_l2(pmap, va);
5315 		if (l2p == NULL || (pmap_load(l2p) & ATTR_DESCR_VALID) == 0) {
5316 			mp = _pmap_alloc_l3(pmap, pmap_l2_pindex(va), NULL);
5317 			if (mp == NULL) {
5318 				if ((flags & PMAP_ENTER_NOSLEEP) != 0)
5319 					return (KERN_RESOURCE_SHORTAGE);
5320 				PMAP_UNLOCK(pmap);
5321 				vm_wait(NULL);
5322 				PMAP_LOCK(pmap);
5323 				goto restart;
5324 			}
5325 			mp->ref_count += L3C_ENTRIES - 1;
5326 			l3p = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mp));
5327 			l3p = &l3p[pmap_l3_index(va)];
5328 		} else {
5329 			l3p = pmap_l2_to_l3(l2p, va);
5330 			if ((pmap_load(l3p) & ATTR_DESCR_VALID) == 0) {
5331 				mp = PTE_TO_VM_PAGE(pmap_load(l2p));
5332 				mp->ref_count += L3C_ENTRIES;
5333 			}
5334 		}
5335 		for (tl3p = l3p; tl3p < &l3p[L3C_ENTRIES]; tl3p++) {
5336 			origpte = pmap_load(tl3p);
5337 			KASSERT((origpte & ATTR_DESCR_VALID) == 0 ||
5338 			    ((origpte & ATTR_CONTIGUOUS) != 0 &&
5339 			    PTE_TO_PHYS(origpte) == PTE_TO_PHYS(newpte)),
5340 			    ("va %#lx changing 64K phys page l3 %#lx newpte %#lx",
5341 			    va, origpte, newpte));
5342 			pmap_store(tl3p, newpte);
5343 			newpte += L3_SIZE;
5344 		}
5345 	}
5346 	dsb(ishst);
5347 
5348 	if ((origpte & ATTR_DESCR_VALID) == 0)
5349 		pmap_resident_count_inc(pmap, pagesizes[psind] / PAGE_SIZE);
5350 	if ((newpte & ATTR_SW_WIRED) != 0 && (origpte & ATTR_SW_WIRED) == 0)
5351 		pmap->pm_stats.wired_count += pagesizes[psind] / PAGE_SIZE;
5352 	else if ((newpte & ATTR_SW_WIRED) == 0 &&
5353 	    (origpte & ATTR_SW_WIRED) != 0)
5354 		pmap->pm_stats.wired_count -= pagesizes[psind] / PAGE_SIZE;
5355 
5356 	return (KERN_SUCCESS);
5357 }
5358 
5359 /*
5360  *	Insert the given physical page (p) at
5361  *	the specified virtual address (v) in the
5362  *	target physical map with the protection requested.
5363  *
5364  *	If specified, the page will be wired down, meaning
5365  *	that the related pte can not be reclaimed.
5366  *
5367  *	NB:  This is the only routine which MAY NOT lazy-evaluate
5368  *	or lose information.  That is, this routine must actually
5369  *	insert this page into the given map NOW.
5370  */
5371 int
pmap_enter(pmap_t pmap,vm_offset_t va,vm_page_t m,vm_prot_t prot,u_int flags,int8_t psind)5372 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
5373     u_int flags, int8_t psind)
5374 {
5375 	struct rwlock *lock;
5376 	pd_entry_t *pde;
5377 	pt_entry_t new_l3, orig_l3;
5378 	pt_entry_t *l2, *l3;
5379 	pv_entry_t pv;
5380 	vm_paddr_t opa, pa;
5381 	vm_page_t mpte, om;
5382 	bool nosleep;
5383 	int full_lvl, lvl, rv;
5384 
5385 	KASSERT(ADDR_IS_CANONICAL(va),
5386 	    ("%s: Address not in canonical form: %lx", __func__, va));
5387 
5388 	va = trunc_page(va);
5389 	if ((m->oflags & VPO_UNMANAGED) == 0)
5390 		VM_PAGE_OBJECT_BUSY_ASSERT(m);
5391 	pa = VM_PAGE_TO_PHYS(m);
5392 	new_l3 = (pt_entry_t)(PHYS_TO_PTE(pa) | ATTR_AF | pmap_sh_attr |
5393 	    L3_PAGE);
5394 	new_l3 |= pmap_pte_memattr(pmap, m->md.pv_memattr);
5395 	new_l3 |= pmap_pte_prot(pmap, prot);
5396 	if ((flags & PMAP_ENTER_WIRED) != 0)
5397 		new_l3 |= ATTR_SW_WIRED;
5398 	if (pmap->pm_stage == PM_STAGE1) {
5399 		if (ADDR_IS_USER(va))
5400 			new_l3 |= ATTR_S1_AP(ATTR_S1_AP_USER) | ATTR_S1_PXN;
5401 		else
5402 			new_l3 |= ATTR_S1_UXN;
5403 		if (pmap != kernel_pmap)
5404 			new_l3 |= ATTR_S1_nG;
5405 	} else {
5406 		/*
5407 		 * Clear the access flag on executable mappings, this will be
5408 		 * set later when the page is accessed. The fault handler is
5409 		 * required to invalidate the I-cache.
5410 		 *
5411 		 * TODO: Switch to the valid flag to allow hardware management
5412 		 * of the access flag. Much of the pmap code assumes the
5413 		 * valid flag is set and fails to destroy the old page tables
5414 		 * correctly if it is clear.
5415 		 */
5416 		if (prot & VM_PROT_EXECUTE)
5417 			new_l3 &= ~ATTR_AF;
5418 	}
5419 	if ((m->oflags & VPO_UNMANAGED) == 0) {
5420 		new_l3 |= ATTR_SW_MANAGED;
5421 		if ((prot & VM_PROT_WRITE) != 0) {
5422 			new_l3 |= ATTR_SW_DBM;
5423 			if ((flags & VM_PROT_WRITE) == 0) {
5424 				if (pmap->pm_stage == PM_STAGE1)
5425 					new_l3 |= ATTR_S1_AP(ATTR_S1_AP_RO);
5426 				else
5427 					new_l3 &=
5428 					    ~ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE);
5429 			}
5430 		}
5431 	}
5432 
5433 	CTR2(KTR_PMAP, "pmap_enter: %.16lx -> %.16lx", va, pa);
5434 
5435 	lock = NULL;
5436 	PMAP_LOCK(pmap);
5437 	if ((flags & PMAP_ENTER_LARGEPAGE) != 0) {
5438 		KASSERT((m->oflags & VPO_UNMANAGED) != 0,
5439 		    ("managed largepage va %#lx flags %#x", va, flags));
5440 		if (psind == 3) {
5441 			PMAP_ASSERT_L1_BLOCKS_SUPPORTED;
5442 			new_l3 &= ~L3_PAGE;
5443 			new_l3 |= L1_BLOCK;
5444 		} else if (psind == 2) {
5445 			new_l3 &= ~L3_PAGE;
5446 			new_l3 |= L2_BLOCK;
5447 		} else /* (psind == 1) */
5448 			new_l3 |= ATTR_CONTIGUOUS;
5449 		rv = pmap_enter_largepage(pmap, va, new_l3, flags, psind);
5450 		goto out;
5451 	}
5452 	if (psind == 2) {
5453 		/* Assert the required virtual and physical alignment. */
5454 		KASSERT((va & L2_OFFSET) == 0, ("pmap_enter: va unaligned"));
5455 		KASSERT(m->psind > 1, ("pmap_enter: m->psind < psind"));
5456 		rv = pmap_enter_l2(pmap, va, (new_l3 & ~L3_PAGE) | L2_BLOCK,
5457 		    flags, m, &lock);
5458 		goto out;
5459 	}
5460 	mpte = NULL;
5461 	if (psind == 1) {
5462 		KASSERT((va & L3C_OFFSET) == 0, ("pmap_enter: va unaligned"));
5463 		KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind"));
5464 		rv = pmap_enter_l3c(pmap, va, new_l3 | ATTR_CONTIGUOUS, flags,
5465 		    m, &mpte, &lock);
5466 #if VM_NRESERVLEVEL > 0
5467 		/*
5468 		 * Attempt L2 promotion, if both the PTP and a level 1
5469 		 * reservation are fully populated.
5470 		 */
5471 		if (rv == KERN_SUCCESS &&
5472 		    (mpte == NULL || mpte->ref_count == NL3PG) &&
5473 		    (m->flags & PG_FICTITIOUS) == 0 &&
5474 		    vm_reserv_level_iffullpop(m) == 1) {
5475 			pde = pmap_l2(pmap, va);
5476 			(void)pmap_promote_l2(pmap, pde, va, mpte, &lock);
5477 		}
5478 #endif
5479 		goto out;
5480 	}
5481 
5482 	/*
5483 	 * In the case that a page table page is not
5484 	 * resident, we are creating it here.
5485 	 */
5486 retry:
5487 	pde = pmap_pde(pmap, va, &lvl);
5488 	if (pde != NULL && lvl == 2) {
5489 		l3 = pmap_l2_to_l3(pde, va);
5490 		if (ADDR_IS_USER(va) && mpte == NULL) {
5491 			mpte = PTE_TO_VM_PAGE(pmap_load(pde));
5492 			mpte->ref_count++;
5493 		}
5494 		goto havel3;
5495 	} else if (pde != NULL && lvl == 1) {
5496 		l2 = pmap_l1_to_l2(pde, va);
5497 		if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK &&
5498 		    (l3 = pmap_demote_l2_locked(pmap, l2, va, &lock)) != NULL) {
5499 			l3 = &l3[pmap_l3_index(va)];
5500 			if (ADDR_IS_USER(va)) {
5501 				mpte = PTE_TO_VM_PAGE(pmap_load(l2));
5502 				mpte->ref_count++;
5503 			}
5504 			goto havel3;
5505 		}
5506 		/* We need to allocate an L3 table. */
5507 	}
5508 	if (ADDR_IS_USER(va)) {
5509 		nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
5510 
5511 		/*
5512 		 * We use _pmap_alloc_l3() instead of pmap_alloc_l3() in order
5513 		 * to handle the possibility that a superpage mapping for "va"
5514 		 * was created while we slept.
5515 		 */
5516 		mpte = _pmap_alloc_l3(pmap, pmap_l2_pindex(va),
5517 		    nosleep ? NULL : &lock);
5518 		if (mpte == NULL && nosleep) {
5519 			CTR0(KTR_PMAP, "pmap_enter: mpte == NULL");
5520 			rv = KERN_RESOURCE_SHORTAGE;
5521 			goto out;
5522 		}
5523 		goto retry;
5524 	} else
5525 		panic("pmap_enter: missing L3 table for kernel va %#lx", va);
5526 
5527 havel3:
5528 	orig_l3 = pmap_load(l3);
5529 	opa = PTE_TO_PHYS(orig_l3);
5530 	pv = NULL;
5531 	new_l3 |= pmap_pte_bti(pmap, va);
5532 
5533 	/*
5534 	 * Is the specified virtual address already mapped?
5535 	 */
5536 	if (pmap_l3_valid(orig_l3)) {
5537 		/*
5538 		 * Wiring change, just update stats. We don't worry about
5539 		 * wiring PT pages as they remain resident as long as there
5540 		 * are valid mappings in them. Hence, if a user page is wired,
5541 		 * the PT page will be also.
5542 		 */
5543 		if ((flags & PMAP_ENTER_WIRED) != 0 &&
5544 		    (orig_l3 & ATTR_SW_WIRED) == 0)
5545 			pmap->pm_stats.wired_count++;
5546 		else if ((flags & PMAP_ENTER_WIRED) == 0 &&
5547 		    (orig_l3 & ATTR_SW_WIRED) != 0)
5548 			pmap->pm_stats.wired_count--;
5549 
5550 		/*
5551 		 * Remove the extra PT page reference.
5552 		 */
5553 		if (mpte != NULL) {
5554 			mpte->ref_count--;
5555 			KASSERT(mpte->ref_count > 0,
5556 			    ("pmap_enter: missing reference to page table page,"
5557 			     " va: 0x%lx", va));
5558 		}
5559 
5560 		/*
5561 		 * Has the physical page changed?
5562 		 */
5563 		if (opa == pa) {
5564 			/*
5565 			 * No, might be a protection or wiring change.
5566 			 */
5567 			if ((orig_l3 & ATTR_SW_MANAGED) != 0 &&
5568 			    (new_l3 & ATTR_SW_DBM) != 0)
5569 				vm_page_aflag_set(m, PGA_WRITEABLE);
5570 			goto validate;
5571 		}
5572 
5573 		/*
5574 		 * The physical page has changed.  Temporarily invalidate
5575 		 * the mapping.
5576 		 */
5577 		if ((orig_l3 & ATTR_CONTIGUOUS) != 0)
5578 			(void)pmap_demote_l3c(pmap, l3, va);
5579 		orig_l3 = pmap_load_clear(l3);
5580 		KASSERT(PTE_TO_PHYS(orig_l3) == opa,
5581 		    ("pmap_enter: unexpected pa update for %#lx", va));
5582 		if ((orig_l3 & ATTR_SW_MANAGED) != 0) {
5583 			om = PHYS_TO_VM_PAGE(opa);
5584 
5585 			/*
5586 			 * The pmap lock is sufficient to synchronize with
5587 			 * concurrent calls to pmap_page_test_mappings() and
5588 			 * pmap_ts_referenced().
5589 			 */
5590 			if (pmap_pte_dirty(pmap, orig_l3))
5591 				vm_page_dirty(om);
5592 			if ((orig_l3 & ATTR_AF) != 0) {
5593 				pmap_invalidate_page(pmap, va, true);
5594 				vm_page_aflag_set(om, PGA_REFERENCED);
5595 			}
5596 			CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, om);
5597 			pv = pmap_pvh_remove(&om->md, pmap, va);
5598 			if ((m->oflags & VPO_UNMANAGED) != 0)
5599 				free_pv_entry(pmap, pv);
5600 			if ((om->a.flags & PGA_WRITEABLE) != 0 &&
5601 			    TAILQ_EMPTY(&om->md.pv_list) &&
5602 			    ((om->flags & PG_FICTITIOUS) != 0 ||
5603 			    TAILQ_EMPTY(&page_to_pvh(om)->pv_list)))
5604 				vm_page_aflag_clear(om, PGA_WRITEABLE);
5605 		} else {
5606 			KASSERT((orig_l3 & ATTR_AF) != 0,
5607 			    ("pmap_enter: unmanaged mapping lacks ATTR_AF"));
5608 			pmap_invalidate_page(pmap, va, true);
5609 		}
5610 		orig_l3 = 0;
5611 	} else {
5612 		/*
5613 		 * Increment the counters.
5614 		 */
5615 		if ((new_l3 & ATTR_SW_WIRED) != 0)
5616 			pmap->pm_stats.wired_count++;
5617 		pmap_resident_count_inc(pmap, 1);
5618 	}
5619 	/*
5620 	 * Enter on the PV list if part of our managed memory.
5621 	 */
5622 	if ((m->oflags & VPO_UNMANAGED) == 0) {
5623 		if (pv == NULL) {
5624 			pv = get_pv_entry(pmap, &lock);
5625 			pv->pv_va = va;
5626 		}
5627 		CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m);
5628 		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
5629 		m->md.pv_gen++;
5630 		if ((new_l3 & ATTR_SW_DBM) != 0)
5631 			vm_page_aflag_set(m, PGA_WRITEABLE);
5632 	}
5633 
5634 validate:
5635 	if (pmap->pm_stage == PM_STAGE1) {
5636 		/*
5637 		 * Sync icache if exec permission and attribute
5638 		 * VM_MEMATTR_WRITE_BACK is set. Do it now, before the mapping
5639 		 * is stored and made valid for hardware table walk. If done
5640 		 * later, then other can access this page before caches are
5641 		 * properly synced. Don't do it for kernel memory which is
5642 		 * mapped with exec permission even if the memory isn't going
5643 		 * to hold executable code. The only time when icache sync is
5644 		 * needed is after kernel module is loaded and the relocation
5645 		 * info is processed. And it's done in elf_cpu_load_file().
5646 		*/
5647 		if ((prot & VM_PROT_EXECUTE) &&  pmap != kernel_pmap &&
5648 		    m->md.pv_memattr == VM_MEMATTR_WRITE_BACK &&
5649 		    (opa != pa || (orig_l3 & ATTR_S1_XN))) {
5650 			PMAP_ASSERT_STAGE1(pmap);
5651 			cpu_icache_sync_range((void *)PHYS_TO_DMAP(pa),
5652 			    PAGE_SIZE);
5653 		}
5654 	} else {
5655 		cpu_dcache_wb_range((void *)PHYS_TO_DMAP(pa), PAGE_SIZE);
5656 	}
5657 
5658 	/*
5659 	 * Update the L3 entry
5660 	 */
5661 	if (pmap_l3_valid(orig_l3)) {
5662 		KASSERT(opa == pa, ("pmap_enter: invalid update"));
5663 		if ((orig_l3 & ~ATTR_AF) != (new_l3 & ~ATTR_AF)) {
5664 			/* same PA, different attributes */
5665 			if ((orig_l3 & ATTR_CONTIGUOUS) != 0)
5666 				(void)pmap_demote_l3c(pmap, l3, va);
5667 			orig_l3 = pmap_load_store(l3, new_l3);
5668 			pmap_invalidate_page(pmap, va, true);
5669 			if ((orig_l3 & ATTR_SW_MANAGED) != 0 &&
5670 			    pmap_pte_dirty(pmap, orig_l3))
5671 				vm_page_dirty(m);
5672 		} else {
5673 			/*
5674 			 * orig_l3 == new_l3
5675 			 * This can happens if multiple threads simultaneously
5676 			 * access not yet mapped page. This bad for performance
5677 			 * since this can cause full demotion-NOP-promotion
5678 			 * cycle.
5679 			 * Another possible reasons are:
5680 			 * - VM and pmap memory layout are diverged
5681 			 * - tlb flush is missing somewhere and CPU doesn't see
5682 			 *   actual mapping.
5683 			 */
5684 			CTR4(KTR_PMAP, "%s: already mapped page - "
5685 			    "pmap %p va 0x%#lx pte 0x%lx",
5686 			    __func__, pmap, va, new_l3);
5687 		}
5688 	} else {
5689 		/* New mapping */
5690 		pmap_store(l3, new_l3);
5691 		dsb(ishst);
5692 	}
5693 
5694 #if VM_NRESERVLEVEL > 0
5695 	/*
5696 	 * First, attempt L3C promotion, if the virtual and physical addresses
5697 	 * are aligned with each other and an underlying reservation has the
5698 	 * neighboring L3 pages allocated.  The first condition is simply an
5699 	 * optimization that recognizes some eventual promotion failures early
5700 	 * at a lower run-time cost.  Then, if both a level 1 reservation and
5701 	 * the PTP are fully populated, attempt L2 promotion.
5702 	 */
5703 	if ((va & L3C_OFFSET) == (pa & L3C_OFFSET) &&
5704 	    (m->flags & PG_FICTITIOUS) == 0 &&
5705 	    (full_lvl = vm_reserv_level_iffullpop(m)) >= 0 &&
5706 	    pmap_promote_l3c(pmap, l3, va) &&
5707 	    full_lvl == 1 && (mpte == NULL || mpte->ref_count == NL3PG))
5708 		(void)pmap_promote_l2(pmap, pde, va, mpte, &lock);
5709 #endif
5710 
5711 	rv = KERN_SUCCESS;
5712 out:
5713 	if (lock != NULL)
5714 		rw_wunlock(lock);
5715 	PMAP_UNLOCK(pmap);
5716 	return (rv);
5717 }
5718 
5719 /*
5720  * Tries to create a read- and/or execute-only L2 page mapping.  Returns
5721  * KERN_SUCCESS if the mapping was created.  Otherwise, returns an error
5722  * value.  See pmap_enter_l2() for the possible error values when "no sleep",
5723  * "no replace", and "no reclaim" are specified.
5724  */
5725 static int
pmap_enter_l2_rx(pmap_t pmap,vm_offset_t va,vm_page_t m,vm_prot_t prot,struct rwlock ** lockp)5726 pmap_enter_l2_rx(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
5727     struct rwlock **lockp)
5728 {
5729 	pd_entry_t new_l2;
5730 
5731 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5732 	PMAP_ASSERT_STAGE1(pmap);
5733 	KASSERT(ADDR_IS_CANONICAL(va),
5734 	    ("%s: Address not in canonical form: %lx", __func__, va));
5735 
5736 	new_l2 = (pd_entry_t)(VM_PAGE_TO_PTE(m) | pmap_sh_attr |
5737 	    ATTR_S1_IDX(m->md.pv_memattr) | ATTR_S1_AP(ATTR_S1_AP_RO) |
5738 	    L2_BLOCK);
5739 	if ((m->oflags & VPO_UNMANAGED) == 0)
5740 		new_l2 |= ATTR_SW_MANAGED;
5741 	else
5742 		new_l2 |= ATTR_AF;
5743 	if ((prot & VM_PROT_EXECUTE) == 0 ||
5744 	    m->md.pv_memattr == VM_MEMATTR_DEVICE)
5745 		new_l2 |= ATTR_S1_XN;
5746 	if (ADDR_IS_USER(va))
5747 		new_l2 |= ATTR_S1_AP(ATTR_S1_AP_USER) | ATTR_S1_PXN;
5748 	else
5749 		new_l2 |= ATTR_S1_UXN;
5750 	if (pmap != kernel_pmap)
5751 		new_l2 |= ATTR_S1_nG;
5752 	return (pmap_enter_l2(pmap, va, new_l2, PMAP_ENTER_NOSLEEP |
5753 	    PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, m, lockp));
5754 }
5755 
5756 /*
5757  * Returns true if every page table entry in the specified page table is
5758  * zero.
5759  */
5760 static bool
pmap_every_pte_zero(vm_paddr_t pa)5761 pmap_every_pte_zero(vm_paddr_t pa)
5762 {
5763 	pt_entry_t *pt_end, *pte;
5764 
5765 	KASSERT((pa & PAGE_MASK) == 0, ("pa is misaligned"));
5766 	pte = (pt_entry_t *)PHYS_TO_DMAP(pa);
5767 	for (pt_end = pte + Ln_ENTRIES; pte < pt_end; pte++) {
5768 		if (*pte != 0)
5769 			return (false);
5770 	}
5771 	return (true);
5772 }
5773 
5774 /*
5775  * Tries to create the specified L2 page mapping.  Returns KERN_SUCCESS if
5776  * the mapping was created, and one of KERN_FAILURE, KERN_NO_SPACE, or
5777  * KERN_RESOURCE_SHORTAGE otherwise.  Returns KERN_FAILURE if
5778  * PMAP_ENTER_NOREPLACE was specified and a base page mapping already exists
5779  * within the L2 virtual address range starting at the specified virtual
5780  * address.  Returns KERN_NO_SPACE if PMAP_ENTER_NOREPLACE was specified and a
5781  * L2 page mapping already exists at the specified virtual address.  Returns
5782  * KERN_RESOURCE_SHORTAGE if either (1) PMAP_ENTER_NOSLEEP was specified and a
5783  * page table page allocation failed or (2) PMAP_ENTER_NORECLAIM was specified
5784  * and a PV entry allocation failed.
5785  */
5786 static int
pmap_enter_l2(pmap_t pmap,vm_offset_t va,pd_entry_t new_l2,u_int flags,vm_page_t m,struct rwlock ** lockp)5787 pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags,
5788     vm_page_t m, struct rwlock **lockp)
5789 {
5790 	struct spglist free;
5791 	pd_entry_t *l2, old_l2;
5792 	vm_page_t l2pg, mt;
5793 	vm_page_t uwptpg;
5794 
5795 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5796 	KASSERT(ADDR_IS_CANONICAL(va),
5797 	    ("%s: Address not in canonical form: %lx", __func__, va));
5798 	KASSERT((flags & (PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM)) !=
5799 	    PMAP_ENTER_NORECLAIM,
5800 	    ("pmap_enter_l2: flags is missing PMAP_ENTER_NOREPLACE"));
5801 
5802 	if ((l2 = pmap_alloc_l2(pmap, va, &l2pg, (flags &
5803 	    PMAP_ENTER_NOSLEEP) != 0 ? NULL : lockp)) == NULL) {
5804 		CTR2(KTR_PMAP, "pmap_enter_l2: failure for va %#lx in pmap %p",
5805 		    va, pmap);
5806 		return (KERN_RESOURCE_SHORTAGE);
5807 	}
5808 
5809 	/*
5810 	 * If bti is not the same for the whole l2 range, return failure
5811 	 * and let vm_fault() cope.  Check after l2 allocation, since
5812 	 * it could sleep.
5813 	 */
5814 	if (!pmap_bti_same(pmap, va, va + L2_SIZE, &new_l2)) {
5815 		KASSERT(l2pg != NULL, ("pmap_enter_l2: missing L2 PTP"));
5816 		pmap_abort_ptp(pmap, va, l2pg);
5817 		return (KERN_PROTECTION_FAILURE);
5818 	}
5819 
5820 	/*
5821 	 * If there are existing mappings, either abort or remove them.
5822 	 */
5823 	if ((old_l2 = pmap_load(l2)) != 0) {
5824 		KASSERT(l2pg == NULL || l2pg->ref_count > 1,
5825 		    ("pmap_enter_l2: l2pg's ref count is too low"));
5826 		if ((flags & PMAP_ENTER_NOREPLACE) != 0) {
5827 			if ((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK) {
5828 				if (l2pg != NULL)
5829 					l2pg->ref_count--;
5830 				CTR2(KTR_PMAP,
5831 				    "pmap_enter_l2: no space for va %#lx"
5832 				    " in pmap %p", va, pmap);
5833 				return (KERN_NO_SPACE);
5834 			} else if (ADDR_IS_USER(va) ||
5835 			    !pmap_every_pte_zero(PTE_TO_PHYS(old_l2))) {
5836 				if (l2pg != NULL)
5837 					l2pg->ref_count--;
5838 				CTR2(KTR_PMAP,
5839 				    "pmap_enter_l2: failure for va %#lx"
5840 				    " in pmap %p", va, pmap);
5841 				return (KERN_FAILURE);
5842 			}
5843 		}
5844 		SLIST_INIT(&free);
5845 		if ((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK) {
5846 			(void)pmap_remove_l2(pmap, l2, va,
5847 			    pmap_load(pmap_l1(pmap, va)), false, &free, lockp);
5848 		} else {
5849 			if (ADDR_IS_KERNEL(va)) {
5850 				/*
5851 				 * Try to save the ptp in the trie
5852 				 * before any changes to mappings are
5853 				 * made.  Abort on failure.
5854 				 */
5855 				mt = PTE_TO_VM_PAGE(old_l2);
5856 				if (pmap_insert_pt_page(pmap, mt, false,
5857 				    false)) {
5858 					CTR1(KTR_PMAP,
5859 			    "pmap_enter_l2: cannot ins kern ptp va %#lx",
5860 					    va);
5861 					return (KERN_RESOURCE_SHORTAGE);
5862 				}
5863 				/*
5864 				 * Both pmap_remove_l2() and
5865 				 * pmap_remove_l3_range() will zero fill
5866 				 * the L3 kernel page table page.
5867 				 */
5868 			}
5869 			pmap_remove_l3_range(pmap, old_l2, va, va + L2_SIZE,
5870 			    &free, lockp);
5871 			if (ADDR_IS_KERNEL(va)) {
5872 				/*
5873 				 * The TLB could have an intermediate
5874 				 * entry for the L3 kernel page table
5875 				 * page, so request an invalidation at
5876 				 * all levels after clearing the
5877 				 * L2_TABLE entry.
5878 				 */
5879 				pmap_clear(l2);
5880 				pmap_s1_invalidate_page(pmap, va, false);
5881 			}
5882 		}
5883 		KASSERT(pmap_load(l2) == 0,
5884 		    ("pmap_enter_l2: non-zero L2 entry %p", l2));
5885 		if (ADDR_IS_USER(va)) {
5886 			vm_page_free_pages_toq(&free, true);
5887 		} else {
5888 			KASSERT(SLIST_EMPTY(&free),
5889 			    ("pmap_enter_l2: freed kernel page table page"));
5890 		}
5891 	}
5892 
5893 	/*
5894 	 * Allocate leaf ptpage for wired userspace pages.
5895 	 */
5896 	uwptpg = NULL;
5897 	if ((new_l2 & ATTR_SW_WIRED) != 0 && pmap != kernel_pmap) {
5898 		uwptpg = vm_page_alloc_noobj(VM_ALLOC_WIRED);
5899 		if (uwptpg == NULL) {
5900 			pmap_abort_ptp(pmap, va, l2pg);
5901 			return (KERN_RESOURCE_SHORTAGE);
5902 		}
5903 		uwptpg->pindex = pmap_l2_pindex(va);
5904 		if (pmap_insert_pt_page(pmap, uwptpg, true, false)) {
5905 			vm_page_unwire_noq(uwptpg);
5906 			vm_page_free(uwptpg);
5907 			pmap_abort_ptp(pmap, va, l2pg);
5908 			return (KERN_RESOURCE_SHORTAGE);
5909 		}
5910 		pmap_resident_count_inc(pmap, 1);
5911 		uwptpg->ref_count = NL3PG;
5912 	}
5913 	if ((new_l2 & ATTR_SW_MANAGED) != 0) {
5914 		/*
5915 		 * Abort this mapping if its PV entry could not be created.
5916 		 */
5917 		if (!pmap_pv_insert_l2(pmap, va, new_l2, flags, lockp)) {
5918 			if (l2pg != NULL)
5919 				pmap_abort_ptp(pmap, va, l2pg);
5920 			else {
5921 				KASSERT(ADDR_IS_KERNEL(va) &&
5922 				    (pmap_load(l2) & ATTR_DESCR_MASK) ==
5923 				    L2_TABLE,
5924 				    ("pmap_enter_l2: invalid kernel L2E"));
5925 				mt = pmap_remove_pt_page(pmap, va);
5926 				KASSERT(mt != NULL,
5927 				    ("pmap_enter_l2: missing kernel PTP"));
5928 			}
5929 			if (uwptpg != NULL) {
5930 				mt = pmap_remove_pt_page(pmap, va);
5931 				KASSERT(mt == uwptpg,
5932 				    ("removed pt page %p, expected %p", mt,
5933 				    uwptpg));
5934 				pmap_resident_count_dec(pmap, 1);
5935 				uwptpg->ref_count = 1;
5936 				vm_page_unwire_noq(uwptpg);
5937 				vm_page_free(uwptpg);
5938 			}
5939 			CTR2(KTR_PMAP,
5940 			    "pmap_enter_l2: failure for va %#lx in pmap %p",
5941 			    va, pmap);
5942 			return (KERN_RESOURCE_SHORTAGE);
5943 		}
5944 		if ((new_l2 & ATTR_SW_DBM) != 0)
5945 			for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
5946 				vm_page_aflag_set(mt, PGA_WRITEABLE);
5947 	}
5948 
5949 	/*
5950 	 * Increment counters.
5951 	 */
5952 	if ((new_l2 & ATTR_SW_WIRED) != 0)
5953 		pmap->pm_stats.wired_count += L2_SIZE / PAGE_SIZE;
5954 	pmap->pm_stats.resident_count += L2_SIZE / PAGE_SIZE;
5955 
5956 	/*
5957 	 * Conditionally sync the icache.  See pmap_enter() for details.
5958 	 */
5959 	if ((new_l2 & ATTR_S1_XN) == 0 && (PTE_TO_PHYS(new_l2) !=
5960 	    PTE_TO_PHYS(old_l2) || (old_l2 & ATTR_S1_XN) != 0) &&
5961 	    pmap != kernel_pmap && m->md.pv_memattr == VM_MEMATTR_WRITE_BACK) {
5962 		cpu_icache_sync_range((void *)PHYS_TO_DMAP(PTE_TO_PHYS(new_l2)),
5963 		    L2_SIZE);
5964 	}
5965 
5966 	/*
5967 	 * Map the superpage.
5968 	 */
5969 	pmap_store(l2, new_l2);
5970 	dsb(ishst);
5971 
5972 	counter_u64_add(pmap_l2_mappings, 1);
5973 	CTR2(KTR_PMAP, "pmap_enter_l2: success for va %#lx in pmap %p",
5974 	    va, pmap);
5975 
5976 	return (KERN_SUCCESS);
5977 }
5978 
5979 /*
5980  * Tries to create a read- and/or execute-only L3C page mapping.  Returns
5981  * KERN_SUCCESS if the mapping was created.  Otherwise, returns an error
5982  * value.
5983  */
5984 static int
pmap_enter_l3c_rx(pmap_t pmap,vm_offset_t va,vm_page_t m,vm_page_t * ml3p,vm_prot_t prot,struct rwlock ** lockp)5985 pmap_enter_l3c_rx(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t *ml3p,
5986     vm_prot_t prot, struct rwlock **lockp)
5987 {
5988 	pt_entry_t l3e;
5989 
5990 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5991 	PMAP_ASSERT_STAGE1(pmap);
5992 	KASSERT(ADDR_IS_CANONICAL(va),
5993 	    ("%s: Address not in canonical form: %lx", __func__, va));
5994 
5995 	l3e = VM_PAGE_TO_PTE(m) | pmap_sh_attr |
5996 	    ATTR_S1_IDX(m->md.pv_memattr) | ATTR_S1_AP(ATTR_S1_AP_RO) |
5997 	    ATTR_CONTIGUOUS | L3_PAGE;
5998 	if ((m->oflags & VPO_UNMANAGED) == 0)
5999 		l3e |= ATTR_SW_MANAGED;
6000 	else
6001 		l3e |= ATTR_AF;
6002 	if ((prot & VM_PROT_EXECUTE) == 0 ||
6003 	    m->md.pv_memattr == VM_MEMATTR_DEVICE)
6004 		l3e |= ATTR_S1_XN;
6005 	if (ADDR_IS_USER(va))
6006 		l3e |= ATTR_S1_AP(ATTR_S1_AP_USER) | ATTR_S1_PXN;
6007 	else
6008 		l3e |= ATTR_S1_UXN;
6009 	if (pmap != kernel_pmap)
6010 		l3e |= ATTR_S1_nG;
6011 	return (pmap_enter_l3c(pmap, va, l3e, PMAP_ENTER_NOSLEEP |
6012 	    PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, m, ml3p, lockp));
6013 }
6014 
6015 static int
pmap_enter_l3c(pmap_t pmap,vm_offset_t va,pt_entry_t l3e,u_int flags,vm_page_t m,vm_page_t * ml3p,struct rwlock ** lockp)6016 pmap_enter_l3c(pmap_t pmap, vm_offset_t va, pt_entry_t l3e, u_int flags,
6017     vm_page_t m, vm_page_t *ml3p, struct rwlock **lockp)
6018 {
6019 	pd_entry_t *l2p, *pde;
6020 	pt_entry_t *l3p, *tl3p;
6021 	vm_page_t mt;
6022 	vm_paddr_t pa;
6023 	vm_pindex_t l2pindex;
6024 	int lvl;
6025 
6026 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
6027 	KASSERT((va & L3C_OFFSET) == 0,
6028 	    ("pmap_enter_l3c: va is not aligned"));
6029 	KASSERT(!VA_IS_CLEANMAP(va) || (l3e & ATTR_SW_MANAGED) == 0,
6030 	    ("pmap_enter_l3c: managed mapping within the clean submap"));
6031 	KASSERT((l3e & ATTR_CONTIGUOUS) != 0,
6032 	    ("pmap_enter_l3c: l3e is missing ATTR_CONTIGUOUS"));
6033 
6034 	/*
6035 	 * If the L3 PTP is not resident, we attempt to create it here.
6036 	 */
6037 	if (ADDR_IS_USER(va)) {
6038 		/*
6039 		 * Were we given the correct L3 PTP?  If so, we can simply
6040 		 * increment its ref count.
6041 		 */
6042 		l2pindex = pmap_l2_pindex(va);
6043 		if (*ml3p != NULL && (*ml3p)->pindex == l2pindex) {
6044 			(*ml3p)->ref_count += L3C_ENTRIES;
6045 		} else {
6046 retry:
6047 			/*
6048 			 * Get the L2 entry.
6049 			 */
6050 			pde = pmap_pde(pmap, va, &lvl);
6051 
6052 			/*
6053 			 * If the L2 entry is a superpage, we either abort or
6054 			 * demote depending on the given flags.
6055 			 */
6056 			if (lvl == 1) {
6057 				l2p = pmap_l1_to_l2(pde, va);
6058 				if ((pmap_load(l2p) & ATTR_DESCR_MASK) ==
6059 				    L2_BLOCK) {
6060 					if ((flags & PMAP_ENTER_NOREPLACE) != 0)
6061 						return (KERN_FAILURE);
6062 					l3p = pmap_demote_l2_locked(pmap, l2p,
6063 					    va, lockp);
6064 					if (l3p != NULL) {
6065 						*ml3p = PTE_TO_VM_PAGE(
6066 						    pmap_load(l2p));
6067 						(*ml3p)->ref_count +=
6068 						    L3C_ENTRIES;
6069 						goto have_l3p;
6070 					}
6071 				}
6072 				/* We need to allocate an L3 PTP. */
6073 			}
6074 
6075 			/*
6076 			 * If the L3 PTP is mapped, we just increment its ref
6077 			 * count.  Otherwise, we attempt to allocate it.
6078 			 */
6079 			if (lvl == 2 && pmap_load(pde) != 0) {
6080 				*ml3p = PTE_TO_VM_PAGE(pmap_load(pde));
6081 				(*ml3p)->ref_count += L3C_ENTRIES;
6082 			} else {
6083 				*ml3p = _pmap_alloc_l3(pmap, l2pindex, (flags &
6084 				    PMAP_ENTER_NOSLEEP) != 0 ? NULL : lockp);
6085 				if (*ml3p == NULL) {
6086 					if ((flags & PMAP_ENTER_NOSLEEP) != 0)
6087 						return (KERN_FAILURE);
6088 
6089 					/*
6090 					 * The page table may have changed
6091 					 * while we slept.
6092 					 */
6093 					goto retry;
6094 				}
6095 				(*ml3p)->ref_count += L3C_ENTRIES - 1;
6096 			}
6097 		}
6098 		l3p = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*ml3p));
6099 	} else {
6100 		*ml3p = NULL;
6101 
6102 		/*
6103 		 * If the L2 entry is a superpage, we either abort or demote
6104 		 * depending on the given flags.
6105 		 */
6106 		pde = pmap_pde(kernel_pmap, va, &lvl);
6107 		if (lvl == 1) {
6108 			l2p = pmap_l1_to_l2(pde, va);
6109 			KASSERT((pmap_load(l2p) & ATTR_DESCR_MASK) == L2_BLOCK,
6110 			    ("pmap_enter_l3c: missing L2 block"));
6111 			if ((flags & PMAP_ENTER_NOREPLACE) != 0)
6112 				return (KERN_FAILURE);
6113 			l3p = pmap_demote_l2_locked(pmap, l2p, va, lockp);
6114 		} else {
6115 			KASSERT(lvl == 2,
6116 			    ("pmap_enter_l3c: Invalid level %d", lvl));
6117 			l3p = (pt_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(
6118 			    pmap_load(pde)));
6119 		}
6120 	}
6121 have_l3p:
6122 	l3p = &l3p[pmap_l3_index(va)];
6123 
6124 	/*
6125 	 * If bti is not the same for the whole L3C range, return failure
6126 	 * and let vm_fault() cope.  Check after L3 allocation, since
6127 	 * it could sleep.
6128 	 */
6129 	if (!pmap_bti_same(pmap, va, va + L3C_SIZE, &l3e)) {
6130 		KASSERT(*ml3p != NULL, ("pmap_enter_l3c: missing L3 PTP"));
6131 		(*ml3p)->ref_count -= L3C_ENTRIES - 1;
6132 		pmap_abort_ptp(pmap, va, *ml3p);
6133 		*ml3p = NULL;
6134 		return (KERN_PROTECTION_FAILURE);
6135 	}
6136 
6137 	/*
6138 	 * If there are existing mappings, either abort or remove them.
6139 	 */
6140 	if ((flags & PMAP_ENTER_NOREPLACE) != 0) {
6141 		for (tl3p = l3p; tl3p < &l3p[L3C_ENTRIES]; tl3p++) {
6142 			if (pmap_load(tl3p) != 0) {
6143 				if (*ml3p != NULL)
6144 					(*ml3p)->ref_count -= L3C_ENTRIES;
6145 				return (KERN_FAILURE);
6146 			}
6147 		}
6148 	} else {
6149 		/*
6150 		 * Because we increment the L3 page's reference count above,
6151 		 * it is guaranteed not to be freed here and we can pass NULL
6152 		 * instead of a valid free list.
6153 		 */
6154 		pmap_remove_l3_range(pmap, pmap_load(pmap_l2(pmap, va)), va,
6155 		    va + L3C_SIZE, NULL, lockp);
6156 	}
6157 
6158 	/*
6159 	 * Enter on the PV list if part of our managed memory.
6160 	 */
6161 	if ((l3e & ATTR_SW_MANAGED) != 0) {
6162 		if (!pmap_pv_insert_l3c(pmap, va, m, lockp)) {
6163 			if (*ml3p != NULL) {
6164 				(*ml3p)->ref_count -= L3C_ENTRIES - 1;
6165 				pmap_abort_ptp(pmap, va, *ml3p);
6166 				*ml3p = NULL;
6167 			}
6168 			return (KERN_RESOURCE_SHORTAGE);
6169 		}
6170 		if ((l3e & ATTR_SW_DBM) != 0)
6171 			for (mt = m; mt < &m[L3C_ENTRIES]; mt++)
6172 				vm_page_aflag_set(mt, PGA_WRITEABLE);
6173 	}
6174 
6175 	/*
6176 	 * Increment counters.
6177 	 */
6178 	if ((l3e & ATTR_SW_WIRED) != 0)
6179 		pmap->pm_stats.wired_count += L3C_ENTRIES;
6180 	pmap_resident_count_inc(pmap, L3C_ENTRIES);
6181 
6182 	pa = VM_PAGE_TO_PHYS(m);
6183 	KASSERT((pa & L3C_OFFSET) == 0, ("pmap_enter_l3c: pa is not aligned"));
6184 
6185 	/*
6186 	 * Sync the icache before the mapping is stored.
6187 	 */
6188 	if ((l3e & ATTR_S1_XN) == 0 && pmap != kernel_pmap &&
6189 	    m->md.pv_memattr == VM_MEMATTR_WRITE_BACK)
6190 		cpu_icache_sync_range((void *)PHYS_TO_DMAP(pa), L3C_SIZE);
6191 
6192 	/*
6193 	 * Map the superpage.
6194 	 */
6195 	for (tl3p = l3p; tl3p < &l3p[L3C_ENTRIES]; tl3p++) {
6196 		pmap_store(tl3p, l3e);
6197 		l3e += L3_SIZE;
6198 	}
6199 	dsb(ishst);
6200 
6201 	counter_u64_add(pmap_l3c_mappings, 1);
6202 	CTR2(KTR_PMAP, "pmap_enter_l3c: success for va %#lx in pmap %p",
6203 	    va, pmap);
6204 	return (KERN_SUCCESS);
6205 }
6206 
6207 /*
6208  * Maps a sequence of resident pages belonging to the same object.
6209  * The sequence begins with the given page m_start.  This page is
6210  * mapped at the given virtual address start.  Each subsequent page is
6211  * mapped at a virtual address that is offset from start by the same
6212  * amount as the page is offset from m_start within the object.  The
6213  * last page in the sequence is the page with the largest offset from
6214  * m_start that can be mapped at a virtual address less than the given
6215  * virtual address end.  Not every virtual page between start and end
6216  * is mapped; only those for which a resident page exists with the
6217  * corresponding offset from m_start are mapped.
6218  */
6219 void
pmap_enter_object(pmap_t pmap,vm_offset_t start,vm_offset_t end,vm_page_t m_start,vm_prot_t prot)6220 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
6221     vm_page_t m_start, vm_prot_t prot)
6222 {
6223 	struct pctrie_iter pages;
6224 	struct rwlock *lock;
6225 	vm_offset_t va;
6226 	vm_page_t m, mpte;
6227 	int rv;
6228 
6229 	VM_OBJECT_ASSERT_LOCKED(m_start->object);
6230 
6231 	mpte = NULL;
6232 	vm_page_iter_limit_init(&pages, m_start->object,
6233 	    m_start->pindex + atop(end - start));
6234 	m = vm_radix_iter_lookup(&pages, m_start->pindex);
6235 	lock = NULL;
6236 	PMAP_LOCK(pmap);
6237 	while (m != NULL) {
6238 		va = start + ptoa(m->pindex - m_start->pindex);
6239 		if ((va & L2_OFFSET) == 0 && va + L2_SIZE <= end &&
6240 		    m->psind == 2 && pmap_ps_enabled(pmap) &&
6241 		    ((rv = pmap_enter_l2_rx(pmap, va, m, prot, &lock)) ==
6242 		    KERN_SUCCESS || rv == KERN_NO_SPACE)) {
6243 			m = vm_radix_iter_jump(&pages, L2_SIZE / PAGE_SIZE);
6244 		} else if ((va & L3C_OFFSET) == 0 && va + L3C_SIZE <= end &&
6245 		    m->psind >= 1 && pmap_ps_enabled(pmap) &&
6246 		    ((rv = pmap_enter_l3c_rx(pmap, va, m, &mpte, prot,
6247 		    &lock)) == KERN_SUCCESS || rv == KERN_NO_SPACE)) {
6248 			m = vm_radix_iter_jump(&pages, L3C_ENTRIES);
6249 		} else {
6250 			/*
6251 			 * In general, if a superpage mapping were possible,
6252 			 * it would have been created above.  That said, if
6253 			 * start and end are not superpage aligned, then
6254 			 * promotion might be possible at the ends of [start,
6255 			 * end).  However, in practice, those promotion
6256 			 * attempts are so unlikely to succeed that they are
6257 			 * not worth trying.
6258 			 */
6259 			mpte = pmap_enter_quick_locked(pmap, va, m, prot |
6260 			    VM_PROT_NO_PROMOTE, mpte, &lock);
6261 			m = vm_radix_iter_step(&pages);
6262 		}
6263 	}
6264 	if (lock != NULL)
6265 		rw_wunlock(lock);
6266 	PMAP_UNLOCK(pmap);
6267 }
6268 
6269 /*
6270  * this code makes some *MAJOR* assumptions:
6271  * 1. Current pmap & pmap exists.
6272  * 2. Not wired.
6273  * 3. Read access.
6274  * 4. No page table pages.
6275  * but is *MUCH* faster than pmap_enter...
6276  */
6277 
6278 void
pmap_enter_quick(pmap_t pmap,vm_offset_t va,vm_page_t m,vm_prot_t prot)6279 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
6280 {
6281 	struct rwlock *lock;
6282 
6283 	lock = NULL;
6284 	PMAP_LOCK(pmap);
6285 	(void)pmap_enter_quick_locked(pmap, va, m, prot, NULL, &lock);
6286 	if (lock != NULL)
6287 		rw_wunlock(lock);
6288 	PMAP_UNLOCK(pmap);
6289 }
6290 
6291 static vm_page_t
pmap_enter_quick_locked(pmap_t pmap,vm_offset_t va,vm_page_t m,vm_prot_t prot,vm_page_t mpte,struct rwlock ** lockp)6292 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
6293     vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp)
6294 {
6295 	pt_entry_t *l1, *l2, *l3, l3_val;
6296 	vm_paddr_t pa;
6297 	int full_lvl, lvl;
6298 
6299 	KASSERT(!VA_IS_CLEANMAP(va) ||
6300 	    (m->oflags & VPO_UNMANAGED) != 0,
6301 	    ("pmap_enter_quick_locked: managed mapping within the clean submap"));
6302 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
6303 	PMAP_ASSERT_STAGE1(pmap);
6304 	KASSERT(ADDR_IS_CANONICAL(va),
6305 	    ("%s: Address not in canonical form: %lx", __func__, va));
6306 	l2 = NULL;
6307 
6308 	CTR2(KTR_PMAP, "pmap_enter_quick_locked: %p %lx", pmap, va);
6309 	/*
6310 	 * In the case that a page table page is not
6311 	 * resident, we are creating it here.
6312 	 */
6313 	if (ADDR_IS_USER(va)) {
6314 		vm_pindex_t l2pindex;
6315 
6316 		/*
6317 		 * Calculate pagetable page index
6318 		 */
6319 		l2pindex = pmap_l2_pindex(va);
6320 		if (mpte && (mpte->pindex == l2pindex)) {
6321 			mpte->ref_count++;
6322 		} else {
6323 			/*
6324 			 * If the page table page is mapped, we just increment
6325 			 * the hold count, and activate it.  Otherwise, we
6326 			 * attempt to allocate a page table page, passing NULL
6327 			 * instead of the PV list lock pointer because we don't
6328 			 * intend to sleep.  If this attempt fails, we don't
6329 			 * retry.  Instead, we give up.
6330 			 */
6331 			l1 = pmap_l1(pmap, va);
6332 			if (l1 != NULL && pmap_load(l1) != 0) {
6333 				if ((pmap_load(l1) & ATTR_DESCR_MASK) ==
6334 				    L1_BLOCK)
6335 					return (NULL);
6336 				l2 = pmap_l1_to_l2(l1, va);
6337 				if (pmap_load(l2) != 0) {
6338 					if ((pmap_load(l2) & ATTR_DESCR_MASK) ==
6339 					    L2_BLOCK)
6340 						return (NULL);
6341 					mpte = PTE_TO_VM_PAGE(pmap_load(l2));
6342 					mpte->ref_count++;
6343 				} else {
6344 					mpte = _pmap_alloc_l3(pmap, l2pindex,
6345 					    NULL);
6346 					if (mpte == NULL)
6347 						return (mpte);
6348 				}
6349 			} else {
6350 				mpte = _pmap_alloc_l3(pmap, l2pindex, NULL);
6351 				if (mpte == NULL)
6352 					return (mpte);
6353 			}
6354 		}
6355 		l3 = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte));
6356 		l3 = &l3[pmap_l3_index(va)];
6357 	} else {
6358 		mpte = NULL;
6359 		l2 = pmap_pde(kernel_pmap, va, &lvl);
6360 		KASSERT(l2 != NULL,
6361 		    ("pmap_enter_quick_locked: Invalid page entry, va: 0x%lx",
6362 		     va));
6363 		KASSERT(lvl == 2,
6364 		    ("pmap_enter_quick_locked: Invalid level %d", lvl));
6365 		l3 = pmap_l2_to_l3(l2, va);
6366 	}
6367 
6368 	/*
6369 	 * Abort if a mapping already exists.
6370 	 */
6371 	if (pmap_load(l3) != 0) {
6372 		if (mpte != NULL)
6373 			mpte->ref_count--;
6374 		return (NULL);
6375 	}
6376 
6377 	/*
6378 	 * Enter on the PV list if part of our managed memory.
6379 	 */
6380 	if ((m->oflags & VPO_UNMANAGED) == 0 &&
6381 	    !pmap_try_insert_pv_entry(pmap, va, m, lockp)) {
6382 		if (mpte != NULL)
6383 			pmap_abort_ptp(pmap, va, mpte);
6384 		return (NULL);
6385 	}
6386 
6387 	/*
6388 	 * Increment counters
6389 	 */
6390 	pmap_resident_count_inc(pmap, 1);
6391 
6392 	pa = VM_PAGE_TO_PHYS(m);
6393 	l3_val = PHYS_TO_PTE(pa) | pmap_sh_attr |
6394 	    ATTR_S1_IDX(m->md.pv_memattr) | ATTR_S1_AP(ATTR_S1_AP_RO) | L3_PAGE;
6395 	l3_val |= pmap_pte_bti(pmap, va);
6396 	if ((prot & VM_PROT_EXECUTE) == 0 ||
6397 	    m->md.pv_memattr == VM_MEMATTR_DEVICE)
6398 		l3_val |= ATTR_S1_XN;
6399 	if (ADDR_IS_USER(va))
6400 		l3_val |= ATTR_S1_AP(ATTR_S1_AP_USER) | ATTR_S1_PXN;
6401 	else
6402 		l3_val |= ATTR_S1_UXN;
6403 	if (pmap != kernel_pmap)
6404 		l3_val |= ATTR_S1_nG;
6405 
6406 	/*
6407 	 * Now validate mapping with RO protection
6408 	 */
6409 	if ((m->oflags & VPO_UNMANAGED) == 0)
6410 		l3_val |= ATTR_SW_MANAGED;
6411 	else
6412 		l3_val |= ATTR_AF;
6413 
6414 	/* Sync icache before the mapping is stored to PTE */
6415 	if ((prot & VM_PROT_EXECUTE) && pmap != kernel_pmap &&
6416 	    m->md.pv_memattr == VM_MEMATTR_WRITE_BACK)
6417 		cpu_icache_sync_range((void *)PHYS_TO_DMAP(pa), PAGE_SIZE);
6418 
6419 	pmap_store(l3, l3_val);
6420 	dsb(ishst);
6421 
6422 #if VM_NRESERVLEVEL > 0
6423 	/*
6424 	 * First, attempt L3C promotion, if the virtual and physical addresses
6425 	 * are aligned with each other and an underlying reservation has the
6426 	 * neighboring L3 pages allocated.  The first condition is simply an
6427 	 * optimization that recognizes some eventual promotion failures early
6428 	 * at a lower run-time cost.  Then, attempt L2 promotion, if both a
6429 	 * level 1 reservation and the PTP are fully populated.
6430 	 */
6431 	if ((prot & VM_PROT_NO_PROMOTE) == 0 &&
6432 	    (va & L3C_OFFSET) == (pa & L3C_OFFSET) &&
6433 	    (m->flags & PG_FICTITIOUS) == 0 &&
6434 	    (full_lvl = vm_reserv_level_iffullpop(m)) >= 0 &&
6435 	    pmap_promote_l3c(pmap, l3, va) &&
6436 	    full_lvl == 1 && (mpte == NULL || mpte->ref_count == NL3PG)) {
6437 		if (l2 == NULL)
6438 			l2 = pmap_l2(pmap, va);
6439 
6440 		/*
6441 		 * If promotion succeeds, then the next call to this function
6442 		 * should not be given the unmapped PTP as a hint.
6443 		 */
6444 		if (pmap_promote_l2(pmap, l2, va, mpte, lockp))
6445 			mpte = NULL;
6446 	}
6447 #endif
6448 
6449 	return (mpte);
6450 }
6451 
6452 /*
6453  * This code maps large physical mmap regions into the
6454  * processor address space.  Note that some shortcuts
6455  * are taken, but the code works.
6456  */
6457 void
pmap_object_init_pt(pmap_t pmap,vm_offset_t addr,vm_object_t object,vm_pindex_t pindex,vm_size_t size)6458 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
6459     vm_pindex_t pindex, vm_size_t size)
6460 {
6461 
6462 	VM_OBJECT_ASSERT_WLOCKED(object);
6463 	KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
6464 	    ("pmap_object_init_pt: non-device object"));
6465 }
6466 
6467 /*
6468  *	Clear the wired attribute from the mappings for the specified range of
6469  *	addresses in the given pmap.  Every valid mapping within that range
6470  *	must have the wired attribute set.  In contrast, invalid mappings
6471  *	cannot have the wired attribute set, so they are ignored.
6472  *
6473  *	The wired attribute of the page table entry is not a hardware feature,
6474  *	so there is no need to invalidate any TLB entries.
6475  */
6476 void
pmap_unwire(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)6477 pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
6478 {
6479 	vm_offset_t va_next;
6480 	pd_entry_t *l0, *l1, *l2;
6481 	pt_entry_t *l3;
6482 	bool partial_l3c;
6483 
6484 	PMAP_LOCK(pmap);
6485 	for (; sva < eva; sva = va_next) {
6486 		l0 = pmap_l0(pmap, sva);
6487 		if (pmap_load(l0) == 0) {
6488 			va_next = (sva + L0_SIZE) & ~L0_OFFSET;
6489 			if (va_next < sva)
6490 				va_next = eva;
6491 			continue;
6492 		}
6493 
6494 		l1 = pmap_l0_to_l1(l0, sva);
6495 		va_next = (sva + L1_SIZE) & ~L1_OFFSET;
6496 		if (va_next < sva)
6497 			va_next = eva;
6498 		if (pmap_load(l1) == 0)
6499 			continue;
6500 
6501 		if ((pmap_load(l1) & ATTR_DESCR_MASK) == L1_BLOCK) {
6502 			PMAP_ASSERT_L1_BLOCKS_SUPPORTED;
6503 			KASSERT(va_next <= eva,
6504 			    ("partial update of non-transparent 1G page "
6505 			    "l1 %#lx sva %#lx eva %#lx va_next %#lx",
6506 			    pmap_load(l1), sva, eva, va_next));
6507 			MPASS(pmap != kernel_pmap);
6508 			MPASS((pmap_load(l1) & (ATTR_SW_MANAGED |
6509 			    ATTR_SW_WIRED)) == ATTR_SW_WIRED);
6510 			pmap_clear_bits(l1, ATTR_SW_WIRED);
6511 			pmap->pm_stats.wired_count -= L1_SIZE / PAGE_SIZE;
6512 			continue;
6513 		}
6514 
6515 		va_next = (sva + L2_SIZE) & ~L2_OFFSET;
6516 		if (va_next < sva)
6517 			va_next = eva;
6518 
6519 		l2 = pmap_l1_to_l2(l1, sva);
6520 		if (pmap_load(l2) == 0)
6521 			continue;
6522 
6523 		if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK) {
6524 			if ((pmap_load(l2) & ATTR_SW_WIRED) == 0)
6525 				panic("pmap_unwire: l2 %#jx is missing "
6526 				    "ATTR_SW_WIRED", (uintmax_t)pmap_load(l2));
6527 
6528 			/*
6529 			 * Are we unwiring the entire large page?  If not,
6530 			 * demote the mapping and fall through.
6531 			 */
6532 			if (sva + L2_SIZE == va_next && eva >= va_next) {
6533 				pmap_clear_bits(l2, ATTR_SW_WIRED);
6534 				pmap->pm_stats.wired_count -= L2_SIZE /
6535 				    PAGE_SIZE;
6536 				continue;
6537 			} else if (pmap_demote_l2(pmap, l2, sva) == NULL)
6538 				panic("pmap_unwire: demotion failed");
6539 		}
6540 		KASSERT((pmap_load(l2) & ATTR_DESCR_MASK) == L2_TABLE,
6541 		    ("pmap_unwire: Invalid l2 entry after demotion"));
6542 
6543 		if (va_next > eva)
6544 			va_next = eva;
6545 		for (partial_l3c = true, l3 = pmap_l2_to_l3(l2, sva);
6546 		    sva != va_next; l3++, sva += L3_SIZE) {
6547 			if (pmap_load(l3) == 0)
6548 				continue;
6549 			if ((pmap_load(l3) & ATTR_CONTIGUOUS) != 0) {
6550 				/*
6551 				 * Avoid demotion for whole-page unwiring.
6552 				 */
6553 				if ((sva & L3C_OFFSET) == 0) {
6554 					/*
6555 					 * Handle the possibility that
6556 					 * "va_next" is zero because of
6557 					 * address wraparound.
6558 					 */
6559 					partial_l3c = sva + L3C_OFFSET >
6560 					    va_next - 1;
6561 				}
6562 				if (partial_l3c)
6563 					(void)pmap_demote_l3c(pmap, l3, sva);
6564 			}
6565 			if ((pmap_load(l3) & ATTR_SW_WIRED) == 0)
6566 				panic("pmap_unwire: l3 %#jx is missing "
6567 				    "ATTR_SW_WIRED", (uintmax_t)pmap_load(l3));
6568 
6569 			/*
6570 			 * ATTR_SW_WIRED must be cleared atomically.  Although
6571 			 * the pmap lock synchronizes access to ATTR_SW_WIRED,
6572 			 * the System MMU may write to the entry concurrently.
6573 			 */
6574 			pmap_clear_bits(l3, ATTR_SW_WIRED);
6575 			pmap->pm_stats.wired_count--;
6576 		}
6577 	}
6578 	PMAP_UNLOCK(pmap);
6579 }
6580 
6581 /*
6582  * This function requires that the caller has already added one to ml3's
6583  * ref_count in anticipation of creating a 4KB page mapping.
6584  */
6585 static bool
pmap_copy_l3c(pmap_t pmap,pt_entry_t * l3p,vm_offset_t va,pt_entry_t l3e,vm_page_t ml3,struct rwlock ** lockp)6586 pmap_copy_l3c(pmap_t pmap, pt_entry_t *l3p, vm_offset_t va, pt_entry_t l3e,
6587     vm_page_t ml3, struct rwlock **lockp)
6588 {
6589 	pt_entry_t *tl3p;
6590 
6591 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
6592 	KASSERT((va & L3C_OFFSET) == 0,
6593 	    ("pmap_copy_l3c: va is not aligned"));
6594 	KASSERT((l3e & ATTR_SW_MANAGED) != 0,
6595 	    ("pmap_copy_l3c: l3e is not managed"));
6596 
6597 	/*
6598 	 * Abort if a mapping already exists.
6599 	 */
6600 	for (tl3p = l3p; tl3p < &l3p[L3C_ENTRIES]; tl3p++)
6601 		if (pmap_load(tl3p) != 0) {
6602 			if (ml3 != NULL)
6603 				ml3->ref_count--;
6604 			return (false);
6605 		}
6606 
6607 	if (!pmap_pv_insert_l3c(pmap, va, PTE_TO_VM_PAGE(l3e), lockp)) {
6608 		if (ml3 != NULL)
6609 			pmap_abort_ptp(pmap, va, ml3);
6610 		return (false);
6611 	}
6612 	ml3->ref_count += L3C_ENTRIES - 1;
6613 
6614 	/*
6615 	 * Clear the wired and accessed bits.  However, leave the dirty bit
6616 	 * unchanged because read/write superpage mappings are required to be
6617 	 * dirty.
6618 	 */
6619 	l3e &= ~(ATTR_SW_WIRED | ATTR_AF);
6620 
6621 	for (tl3p = l3p; tl3p < &l3p[L3C_ENTRIES]; tl3p++) {
6622 		pmap_store(tl3p, l3e);
6623 		l3e += L3_SIZE;
6624 	}
6625 	pmap_resident_count_inc(pmap, L3C_ENTRIES);
6626 	counter_u64_add(pmap_l3c_mappings, 1);
6627 	CTR2(KTR_PMAP, "pmap_copy_l3c: success for va %#lx in pmap %p",
6628 	    va, pmap);
6629 	return (true);
6630 }
6631 
6632 /*
6633  *	Copy the range specified by src_addr/len
6634  *	from the source map to the range dst_addr/len
6635  *	in the destination map.
6636  *
6637  *	This routine is only advisory and need not do anything.
6638  *
6639  *	Because the executable mappings created by this routine are copied,
6640  *	it should not have to flush the instruction cache.
6641  */
6642 void
pmap_copy(pmap_t dst_pmap,pmap_t src_pmap,vm_offset_t dst_addr,vm_size_t len,vm_offset_t src_addr)6643 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
6644     vm_offset_t src_addr)
6645 {
6646 	struct rwlock *lock;
6647 	pd_entry_t *l0, *l1, *l2, srcptepaddr;
6648 	pt_entry_t *dst_pte, mask, nbits, ptetemp, *src_pte;
6649 	vm_offset_t addr, end_addr, va_next;
6650 	vm_page_t dst_m, dstmpte, srcmpte;
6651 
6652 	PMAP_ASSERT_STAGE1(dst_pmap);
6653 	PMAP_ASSERT_STAGE1(src_pmap);
6654 
6655 	if (dst_addr != src_addr)
6656 		return;
6657 	end_addr = src_addr + len;
6658 	lock = NULL;
6659 	if (dst_pmap < src_pmap) {
6660 		PMAP_LOCK(dst_pmap);
6661 		PMAP_LOCK(src_pmap);
6662 	} else {
6663 		PMAP_LOCK(src_pmap);
6664 		PMAP_LOCK(dst_pmap);
6665 	}
6666 	for (addr = src_addr; addr < end_addr; addr = va_next) {
6667 		l0 = pmap_l0(src_pmap, addr);
6668 		if (pmap_load(l0) == 0) {
6669 			va_next = (addr + L0_SIZE) & ~L0_OFFSET;
6670 			if (va_next < addr)
6671 				va_next = end_addr;
6672 			continue;
6673 		}
6674 
6675 		va_next = (addr + L1_SIZE) & ~L1_OFFSET;
6676 		if (va_next < addr)
6677 			va_next = end_addr;
6678 		l1 = pmap_l0_to_l1(l0, addr);
6679 		if (pmap_load(l1) == 0)
6680 			continue;
6681 		if ((pmap_load(l1) & ATTR_DESCR_MASK) == L1_BLOCK) {
6682 			PMAP_ASSERT_L1_BLOCKS_SUPPORTED;
6683 			KASSERT(va_next <= end_addr,
6684 			    ("partial update of non-transparent 1G page "
6685 			    "l1 %#lx addr %#lx end_addr %#lx va_next %#lx",
6686 			    pmap_load(l1), addr, end_addr, va_next));
6687 			srcptepaddr = pmap_load(l1);
6688 			l1 = pmap_l1(dst_pmap, addr);
6689 			if (l1 == NULL) {
6690 				if (_pmap_alloc_l3(dst_pmap,
6691 				    pmap_l0_pindex(addr), NULL) == NULL)
6692 					break;
6693 				l1 = pmap_l1(dst_pmap, addr);
6694 			} else {
6695 				l0 = pmap_l0(dst_pmap, addr);
6696 				dst_m = PTE_TO_VM_PAGE(pmap_load(l0));
6697 				dst_m->ref_count++;
6698 			}
6699 			KASSERT(pmap_load(l1) == 0,
6700 			    ("1G mapping present in dst pmap "
6701 			    "l1 %#lx addr %#lx end_addr %#lx va_next %#lx",
6702 			    pmap_load(l1), addr, end_addr, va_next));
6703 			pmap_store(l1, srcptepaddr & ~ATTR_SW_WIRED);
6704 			pmap_resident_count_inc(dst_pmap, L1_SIZE / PAGE_SIZE);
6705 			continue;
6706 		}
6707 
6708 		va_next = (addr + L2_SIZE) & ~L2_OFFSET;
6709 		if (va_next < addr)
6710 			va_next = end_addr;
6711 		l2 = pmap_l1_to_l2(l1, addr);
6712 		srcptepaddr = pmap_load(l2);
6713 		if (srcptepaddr == 0)
6714 			continue;
6715 		if ((srcptepaddr & ATTR_DESCR_MASK) == L2_BLOCK) {
6716 			/*
6717 			 * We can only virtual copy whole superpages.
6718 			 */
6719 			if ((addr & L2_OFFSET) != 0 ||
6720 			    addr + L2_SIZE > end_addr)
6721 				continue;
6722 			l2 = pmap_alloc_l2(dst_pmap, addr, &dst_m, NULL);
6723 			if (l2 == NULL)
6724 				break;
6725 			if (pmap_load(l2) == 0 &&
6726 			    ((srcptepaddr & ATTR_SW_MANAGED) == 0 ||
6727 			    pmap_pv_insert_l2(dst_pmap, addr, srcptepaddr,
6728 			    PMAP_ENTER_NORECLAIM, &lock))) {
6729 				/*
6730 				 * We leave the dirty bit unchanged because
6731 				 * managed read/write superpage mappings are
6732 				 * required to be dirty.  However, managed
6733 				 * superpage mappings are not required to
6734 				 * have their accessed bit set, so we clear
6735 				 * it because we don't know if this mapping
6736 				 * will be used.
6737 				 */
6738 				srcptepaddr &= ~ATTR_SW_WIRED;
6739 				if ((srcptepaddr & ATTR_SW_MANAGED) != 0)
6740 					srcptepaddr &= ~ATTR_AF;
6741 				pmap_store(l2, srcptepaddr);
6742 				pmap_resident_count_inc(dst_pmap, L2_SIZE /
6743 				    PAGE_SIZE);
6744 				counter_u64_add(pmap_l2_mappings, 1);
6745 			} else
6746 				pmap_abort_ptp(dst_pmap, addr, dst_m);
6747 			continue;
6748 		}
6749 		KASSERT((srcptepaddr & ATTR_DESCR_MASK) == L2_TABLE,
6750 		    ("pmap_copy: invalid L2 entry"));
6751 		srcmpte = PTE_TO_VM_PAGE(srcptepaddr);
6752 		KASSERT(srcmpte->ref_count > 0,
6753 		    ("pmap_copy: source page table page is unused"));
6754 		if (va_next > end_addr)
6755 			va_next = end_addr;
6756 		src_pte = (pt_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(srcptepaddr));
6757 		src_pte = &src_pte[pmap_l3_index(addr)];
6758 		dstmpte = NULL;
6759 		for (; addr < va_next; addr += PAGE_SIZE, src_pte++) {
6760 			ptetemp = pmap_load(src_pte);
6761 
6762 			/*
6763 			 * We only virtual copy managed pages.
6764 			 */
6765 			if ((ptetemp & ATTR_SW_MANAGED) == 0)
6766 				continue;
6767 
6768 			if (dstmpte != NULL) {
6769 				KASSERT(dstmpte->pindex == pmap_l2_pindex(addr),
6770 				    ("dstmpte pindex/addr mismatch"));
6771 				dstmpte->ref_count++;
6772 			} else if ((dstmpte = pmap_alloc_l3(dst_pmap, addr,
6773 			    NULL)) == NULL)
6774 				goto out;
6775 			dst_pte = (pt_entry_t *)
6776 			    PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte));
6777 			dst_pte = &dst_pte[pmap_l3_index(addr)];
6778 			if ((ptetemp & ATTR_CONTIGUOUS) != 0 && (addr &
6779 			    L3C_OFFSET) == 0 && addr + L3C_OFFSET <=
6780 			    va_next - 1) {
6781 				if (!pmap_copy_l3c(dst_pmap, dst_pte, addr,
6782 				    ptetemp, dstmpte, &lock))
6783 					goto out;
6784 				addr += L3C_SIZE - PAGE_SIZE;
6785 				src_pte += L3C_ENTRIES - 1;
6786 			} else if (pmap_load(dst_pte) == 0 &&
6787 			    pmap_try_insert_pv_entry(dst_pmap, addr,
6788 			    PTE_TO_VM_PAGE(ptetemp), &lock)) {
6789 				/*
6790 				 * Clear the wired, contiguous, modified, and
6791 				 * accessed bits from the destination PTE.
6792 				 * The contiguous bit is cleared because we
6793 				 * are not copying the entire L3C superpage.
6794 				 */
6795 				mask = ATTR_SW_WIRED | ATTR_CONTIGUOUS |
6796 				    ATTR_AF;
6797 				nbits = 0;
6798 				if ((ptetemp & ATTR_SW_DBM) != 0)
6799 					nbits |= ATTR_S1_AP_RW_BIT;
6800 				pmap_store(dst_pte, (ptetemp & ~mask) | nbits);
6801 				pmap_resident_count_inc(dst_pmap, 1);
6802 			} else {
6803 				pmap_abort_ptp(dst_pmap, addr, dstmpte);
6804 				goto out;
6805 			}
6806 			/* Have we copied all of the valid mappings? */
6807 			if (dstmpte->ref_count >= srcmpte->ref_count)
6808 				break;
6809 		}
6810 	}
6811 out:
6812 	/*
6813 	 * XXX This barrier may not be needed because the destination pmap is
6814 	 * not active.
6815 	 */
6816 	dsb(ishst);
6817 
6818 	if (lock != NULL)
6819 		rw_wunlock(lock);
6820 	PMAP_UNLOCK(src_pmap);
6821 	PMAP_UNLOCK(dst_pmap);
6822 }
6823 
6824 int
pmap_vmspace_copy(pmap_t dst_pmap,pmap_t src_pmap)6825 pmap_vmspace_copy(pmap_t dst_pmap, pmap_t src_pmap)
6826 {
6827 	int error;
6828 
6829 	if (dst_pmap->pm_stage != src_pmap->pm_stage)
6830 		return (EINVAL);
6831 
6832 	if (dst_pmap->pm_stage != PM_STAGE1 || src_pmap->pm_bti == NULL)
6833 		return (0);
6834 
6835 	for (;;) {
6836 		if (dst_pmap < src_pmap) {
6837 			PMAP_LOCK(dst_pmap);
6838 			PMAP_LOCK(src_pmap);
6839 		} else {
6840 			PMAP_LOCK(src_pmap);
6841 			PMAP_LOCK(dst_pmap);
6842 		}
6843 		error = pmap_bti_copy(dst_pmap, src_pmap);
6844 		/* Clean up partial copy on failure due to no memory. */
6845 		if (error == ENOMEM)
6846 			pmap_bti_deassign_all(dst_pmap);
6847 		PMAP_UNLOCK(src_pmap);
6848 		PMAP_UNLOCK(dst_pmap);
6849 		if (error != ENOMEM)
6850 			break;
6851 		vm_wait(NULL);
6852 	}
6853 	return (error);
6854 }
6855 
6856 /*
6857  *	pmap_zero_page zeros the specified hardware page by mapping
6858  *	the page into KVM and using bzero to clear its contents.
6859  */
6860 void
pmap_zero_page(vm_page_t m)6861 pmap_zero_page(vm_page_t m)
6862 {
6863 	vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
6864 
6865 	pagezero((void *)va);
6866 }
6867 
6868 /*
6869  *	pmap_zero_page_area zeros the specified hardware page by mapping
6870  *	the page into KVM and using bzero to clear its contents.
6871  *
6872  *	off and size may not cover an area beyond a single hardware page.
6873  */
6874 void
pmap_zero_page_area(vm_page_t m,int off,int size)6875 pmap_zero_page_area(vm_page_t m, int off, int size)
6876 {
6877 	vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
6878 
6879 	if (off == 0 && size == PAGE_SIZE)
6880 		pagezero((void *)va);
6881 	else
6882 		bzero((char *)va + off, size);
6883 }
6884 
6885 /*
6886  *	pmap_copy_page copies the specified (machine independent)
6887  *	page by mapping the page into virtual memory and using
6888  *	bcopy to copy the page, one machine dependent page at a
6889  *	time.
6890  */
6891 void
pmap_copy_page(vm_page_t msrc,vm_page_t mdst)6892 pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
6893 {
6894 	vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc));
6895 	vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst));
6896 
6897 	pagecopy((void *)src, (void *)dst);
6898 }
6899 
6900 int unmapped_buf_allowed = 1;
6901 
6902 void
pmap_copy_pages(vm_page_t ma[],vm_offset_t a_offset,vm_page_t mb[],vm_offset_t b_offset,int xfersize)6903 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
6904     vm_offset_t b_offset, int xfersize)
6905 {
6906 	void *a_cp, *b_cp;
6907 	vm_page_t m_a, m_b;
6908 	vm_paddr_t p_a, p_b;
6909 	vm_offset_t a_pg_offset, b_pg_offset;
6910 	int cnt;
6911 
6912 	while (xfersize > 0) {
6913 		a_pg_offset = a_offset & PAGE_MASK;
6914 		m_a = ma[a_offset >> PAGE_SHIFT];
6915 		p_a = m_a->phys_addr;
6916 		b_pg_offset = b_offset & PAGE_MASK;
6917 		m_b = mb[b_offset >> PAGE_SHIFT];
6918 		p_b = m_b->phys_addr;
6919 		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
6920 		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
6921 		if (__predict_false(!PHYS_IN_DMAP(p_a))) {
6922 			panic("!DMAP a %lx", p_a);
6923 		} else {
6924 			a_cp = (char *)PHYS_TO_DMAP(p_a) + a_pg_offset;
6925 		}
6926 		if (__predict_false(!PHYS_IN_DMAP(p_b))) {
6927 			panic("!DMAP b %lx", p_b);
6928 		} else {
6929 			b_cp = (char *)PHYS_TO_DMAP(p_b) + b_pg_offset;
6930 		}
6931 		bcopy(a_cp, b_cp, cnt);
6932 		a_offset += cnt;
6933 		b_offset += cnt;
6934 		xfersize -= cnt;
6935 	}
6936 }
6937 
6938 vm_offset_t
pmap_quick_enter_page(vm_page_t m)6939 pmap_quick_enter_page(vm_page_t m)
6940 {
6941 
6942 	return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
6943 }
6944 
6945 void
pmap_quick_remove_page(vm_offset_t addr)6946 pmap_quick_remove_page(vm_offset_t addr)
6947 {
6948 }
6949 
6950 /*
6951  * Returns true if the pmap's pv is one of the first
6952  * 16 pvs linked to from this page.  This count may
6953  * be changed upwards or downwards in the future; it
6954  * is only necessary that true be returned for a small
6955  * subset of pmaps for proper page aging.
6956  */
6957 bool
pmap_page_exists_quick(pmap_t pmap,vm_page_t m)6958 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
6959 {
6960 	struct md_page *pvh;
6961 	struct rwlock *lock;
6962 	pv_entry_t pv;
6963 	int loops = 0;
6964 	bool rv;
6965 
6966 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
6967 	    ("pmap_page_exists_quick: page %p is not managed", m));
6968 	rv = false;
6969 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
6970 	rw_rlock(lock);
6971 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
6972 		if (PV_PMAP(pv) == pmap) {
6973 			rv = true;
6974 			break;
6975 		}
6976 		loops++;
6977 		if (loops >= 16)
6978 			break;
6979 	}
6980 	if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
6981 		pvh = page_to_pvh(m);
6982 		TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
6983 			if (PV_PMAP(pv) == pmap) {
6984 				rv = true;
6985 				break;
6986 			}
6987 			loops++;
6988 			if (loops >= 16)
6989 				break;
6990 		}
6991 	}
6992 	rw_runlock(lock);
6993 	return (rv);
6994 }
6995 
6996 /*
6997  *	pmap_page_wired_mappings:
6998  *
6999  *	Return the number of managed mappings to the given physical page
7000  *	that are wired.
7001  */
7002 int
pmap_page_wired_mappings(vm_page_t m)7003 pmap_page_wired_mappings(vm_page_t m)
7004 {
7005 	struct rwlock *lock;
7006 	struct md_page *pvh;
7007 	pmap_t pmap;
7008 	pt_entry_t *pte;
7009 	pv_entry_t pv;
7010 	int count, md_gen, pvh_gen;
7011 
7012 	if ((m->oflags & VPO_UNMANAGED) != 0)
7013 		return (0);
7014 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
7015 	rw_rlock(lock);
7016 restart:
7017 	count = 0;
7018 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
7019 		pmap = PV_PMAP(pv);
7020 		if (!PMAP_TRYLOCK(pmap)) {
7021 			md_gen = m->md.pv_gen;
7022 			rw_runlock(lock);
7023 			PMAP_LOCK(pmap);
7024 			rw_rlock(lock);
7025 			if (md_gen != m->md.pv_gen) {
7026 				PMAP_UNLOCK(pmap);
7027 				goto restart;
7028 			}
7029 		}
7030 		pte = pmap_pte_exists(pmap, pv->pv_va, 3, __func__);
7031 		if ((pmap_load(pte) & ATTR_SW_WIRED) != 0)
7032 			count++;
7033 		PMAP_UNLOCK(pmap);
7034 	}
7035 	if ((m->flags & PG_FICTITIOUS) == 0) {
7036 		pvh = page_to_pvh(m);
7037 		TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
7038 			pmap = PV_PMAP(pv);
7039 			if (!PMAP_TRYLOCK(pmap)) {
7040 				md_gen = m->md.pv_gen;
7041 				pvh_gen = pvh->pv_gen;
7042 				rw_runlock(lock);
7043 				PMAP_LOCK(pmap);
7044 				rw_rlock(lock);
7045 				if (md_gen != m->md.pv_gen ||
7046 				    pvh_gen != pvh->pv_gen) {
7047 					PMAP_UNLOCK(pmap);
7048 					goto restart;
7049 				}
7050 			}
7051 			pte = pmap_pte_exists(pmap, pv->pv_va, 2, __func__);
7052 			if ((pmap_load(pte) & ATTR_SW_WIRED) != 0)
7053 				count++;
7054 			PMAP_UNLOCK(pmap);
7055 		}
7056 	}
7057 	rw_runlock(lock);
7058 	return (count);
7059 }
7060 
7061 /*
7062  * Returns true if the given page is mapped individually or as part of
7063  * a 2mpage.  Otherwise, returns false.
7064  */
7065 bool
pmap_page_is_mapped(vm_page_t m)7066 pmap_page_is_mapped(vm_page_t m)
7067 {
7068 	struct rwlock *lock;
7069 	bool rv;
7070 
7071 	if ((m->oflags & VPO_UNMANAGED) != 0)
7072 		return (false);
7073 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
7074 	rw_rlock(lock);
7075 	rv = !TAILQ_EMPTY(&m->md.pv_list) ||
7076 	    ((m->flags & PG_FICTITIOUS) == 0 &&
7077 	    !TAILQ_EMPTY(&page_to_pvh(m)->pv_list));
7078 	rw_runlock(lock);
7079 	return (rv);
7080 }
7081 
7082 /*
7083  * Destroy all managed, non-wired mappings in the given user-space
7084  * pmap.  This pmap cannot be active on any processor besides the
7085  * caller.
7086  *
7087  * This function cannot be applied to the kernel pmap.  Moreover, it
7088  * is not intended for general use.  It is only to be used during
7089  * process termination.  Consequently, it can be implemented in ways
7090  * that make it faster than pmap_remove().  First, it can more quickly
7091  * destroy mappings by iterating over the pmap's collection of PV
7092  * entries, rather than searching the page table.  Second, it doesn't
7093  * have to test and clear the page table entries atomically, because
7094  * no processor is currently accessing the user address space.  In
7095  * particular, a page table entry's dirty bit won't change state once
7096  * this function starts.
7097  */
7098 void
pmap_remove_pages(pmap_t pmap)7099 pmap_remove_pages(pmap_t pmap)
7100 {
7101 	pd_entry_t *pde;
7102 	pt_entry_t *pte, tpte;
7103 	struct spglist free;
7104 	struct pv_chunklist free_chunks[PMAP_MEMDOM];
7105 	vm_page_t m, ml3, mt;
7106 	pv_entry_t pv;
7107 	struct md_page *pvh;
7108 	struct pv_chunk *pc, *npc;
7109 	struct rwlock *lock;
7110 	int64_t bit;
7111 	uint64_t inuse, bitmask;
7112 	int allfree, field, i, idx, lvl;
7113 	int freed __pvused;
7114 	vm_paddr_t pa;
7115 
7116 	lock = NULL;
7117 
7118 	for (i = 0; i < PMAP_MEMDOM; i++)
7119 		TAILQ_INIT(&free_chunks[i]);
7120 	SLIST_INIT(&free);
7121 	PMAP_LOCK(pmap);
7122 	TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
7123 		allfree = 1;
7124 		freed = 0;
7125 		for (field = 0; field < _NPCM; field++) {
7126 			inuse = ~pc->pc_map[field] & pc_freemask[field];
7127 			while (inuse != 0) {
7128 				bit = ffsl(inuse) - 1;
7129 				bitmask = 1UL << bit;
7130 				idx = field * 64 + bit;
7131 				pv = &pc->pc_pventry[idx];
7132 				inuse &= ~bitmask;
7133 
7134 				pde = pmap_pde(pmap, pv->pv_va, &lvl);
7135 				KASSERT(pde != NULL,
7136 				    ("Attempting to remove an unmapped page"));
7137 
7138 				switch(lvl) {
7139 				case 1:
7140 					pte = pmap_l1_to_l2(pde, pv->pv_va);
7141 					tpte = pmap_load(pte);
7142 					KASSERT((tpte & ATTR_DESCR_MASK) ==
7143 					    L2_BLOCK,
7144 					    ("Attempting to remove an invalid "
7145 					    "block: %lx", tpte));
7146 					break;
7147 				case 2:
7148 					pte = pmap_l2_to_l3(pde, pv->pv_va);
7149 					tpte = pmap_load(pte);
7150 					KASSERT((tpte & ATTR_DESCR_MASK) ==
7151 					    L3_PAGE,
7152 					    ("Attempting to remove an invalid "
7153 					     "page: %lx", tpte));
7154 					break;
7155 				default:
7156 					panic(
7157 					    "Invalid page directory level: %d",
7158 					    lvl);
7159 				}
7160 
7161 				/*
7162 				 * We cannot remove wired mappings at this time.
7163 				 *
7164 				 * For L3C superpages, all of the constituent PTEs
7165 				 * should have the wired bit set, so we don't
7166 				 * check for ATTR_CONTIGUOUS here.
7167 				 */
7168 				if (tpte & ATTR_SW_WIRED) {
7169 					allfree = 0;
7170 					continue;
7171 				}
7172 
7173 				/* Mark free */
7174 				pc->pc_map[field] |= bitmask;
7175 
7176 				/*
7177 				 * Because this pmap is not active on other
7178 				 * processors, the dirty bit cannot have
7179 				 * changed state since we last loaded pte.
7180 				 */
7181 				pmap_clear(pte);
7182 
7183 				pa = PTE_TO_PHYS(tpte);
7184 
7185 				m = PHYS_TO_VM_PAGE(pa);
7186 				KASSERT(m->phys_addr == pa,
7187 				    ("vm_page_t %p phys_addr mismatch %016jx %016jx",
7188 				    m, (uintmax_t)m->phys_addr,
7189 				    (uintmax_t)tpte));
7190 
7191 				KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
7192 				    m < &vm_page_array[vm_page_array_size],
7193 				    ("pmap_remove_pages: bad pte %#jx",
7194 				    (uintmax_t)tpte));
7195 
7196 				/*
7197 				 * Update the vm_page_t clean/reference bits.
7198 				 *
7199 				 * We don't check for ATTR_CONTIGUOUS here
7200 				 * because writeable L3C superpages are expected
7201 				 * to be dirty, i.e., every constituent PTE
7202 				 * should be dirty.
7203 				 */
7204 				if (pmap_pte_dirty(pmap, tpte)) {
7205 					switch (lvl) {
7206 					case 1:
7207 						for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
7208 							vm_page_dirty(mt);
7209 						break;
7210 					case 2:
7211 						vm_page_dirty(m);
7212 						break;
7213 					}
7214 				}
7215 
7216 				CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m);
7217 
7218 				switch (lvl) {
7219 				case 1:
7220 					pmap_resident_count_dec(pmap,
7221 					    L2_SIZE / PAGE_SIZE);
7222 					pvh = page_to_pvh(m);
7223 					TAILQ_REMOVE(&pvh->pv_list, pv,pv_next);
7224 					pvh->pv_gen++;
7225 					if (TAILQ_EMPTY(&pvh->pv_list)) {
7226 						for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
7227 							if ((mt->a.flags & PGA_WRITEABLE) != 0 &&
7228 							    TAILQ_EMPTY(&mt->md.pv_list))
7229 								vm_page_aflag_clear(mt, PGA_WRITEABLE);
7230 					}
7231 					ml3 = pmap_remove_pt_page(pmap,
7232 					    pv->pv_va);
7233 					if (ml3 != NULL) {
7234 						KASSERT(vm_page_any_valid(ml3),
7235 						    ("pmap_remove_pages: l3 page not promoted"));
7236 						pmap_resident_count_dec(pmap,1);
7237 						KASSERT(ml3->ref_count == NL3PG,
7238 						    ("pmap_remove_pages: l3 page ref count error"));
7239 						ml3->ref_count = 0;
7240 						pmap_add_delayed_free_list(ml3,
7241 						    &free, false);
7242 					}
7243 					break;
7244 				case 2:
7245 					pmap_resident_count_dec(pmap, 1);
7246 					TAILQ_REMOVE(&m->md.pv_list, pv,
7247 					    pv_next);
7248 					m->md.pv_gen++;
7249 					if ((m->a.flags & PGA_WRITEABLE) != 0 &&
7250 					    TAILQ_EMPTY(&m->md.pv_list) &&
7251 					    (m->flags & PG_FICTITIOUS) == 0) {
7252 						pvh = page_to_pvh(m);
7253 						if (TAILQ_EMPTY(&pvh->pv_list))
7254 							vm_page_aflag_clear(m,
7255 							    PGA_WRITEABLE);
7256 					}
7257 					break;
7258 				}
7259 				pmap_unuse_pt(pmap, pv->pv_va, pmap_load(pde),
7260 				    &free);
7261 				freed++;
7262 			}
7263 		}
7264 		PV_STAT(atomic_add_long(&pv_entry_frees, freed));
7265 		PV_STAT(atomic_add_int(&pv_entry_spare, freed));
7266 		PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
7267 		if (allfree) {
7268 			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
7269 			TAILQ_INSERT_TAIL(&free_chunks[pc_to_domain(pc)], pc,
7270 			    pc_list);
7271 		}
7272 	}
7273 	if (lock != NULL)
7274 		rw_wunlock(lock);
7275 	pmap_invalidate_all(pmap);
7276 	pmap_bti_deassign_all(pmap);
7277 	free_pv_chunk_batch(free_chunks);
7278 	PMAP_UNLOCK(pmap);
7279 	vm_page_free_pages_toq(&free, true);
7280 }
7281 
7282 /*
7283  * This is used to check if a page has been accessed or modified.
7284  */
7285 static bool
pmap_page_test_mappings(vm_page_t m,bool accessed,bool modified)7286 pmap_page_test_mappings(vm_page_t m, bool accessed, bool modified)
7287 {
7288 	struct rwlock *lock;
7289 	pv_entry_t pv;
7290 	struct md_page *pvh;
7291 	pt_entry_t l3e, mask, *pte, value;
7292 	pmap_t pmap;
7293 	int md_gen, pvh_gen;
7294 	bool rv;
7295 
7296 	rv = false;
7297 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
7298 	rw_rlock(lock);
7299 restart:
7300 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
7301 		pmap = PV_PMAP(pv);
7302 		PMAP_ASSERT_STAGE1(pmap);
7303 		if (!PMAP_TRYLOCK(pmap)) {
7304 			md_gen = m->md.pv_gen;
7305 			rw_runlock(lock);
7306 			PMAP_LOCK(pmap);
7307 			rw_rlock(lock);
7308 			if (md_gen != m->md.pv_gen) {
7309 				PMAP_UNLOCK(pmap);
7310 				goto restart;
7311 			}
7312 		}
7313 		pte = pmap_pte_exists(pmap, pv->pv_va, 3, __func__);
7314 		mask = 0;
7315 		value = 0;
7316 		if (modified) {
7317 			mask |= ATTR_S1_AP_RW_BIT;
7318 			value |= ATTR_S1_AP(ATTR_S1_AP_RW);
7319 		}
7320 		if (accessed) {
7321 			mask |= ATTR_AF | ATTR_DESCR_MASK;
7322 			value |= ATTR_AF | L3_PAGE;
7323 		}
7324 		l3e = pmap_load(pte);
7325 		if ((l3e & ATTR_CONTIGUOUS) != 0)
7326 			l3e = pmap_load_l3c(pte);
7327 		PMAP_UNLOCK(pmap);
7328 		rv = (l3e & mask) == value;
7329 		if (rv)
7330 			goto out;
7331 	}
7332 	if ((m->flags & PG_FICTITIOUS) == 0) {
7333 		pvh = page_to_pvh(m);
7334 		TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
7335 			pmap = PV_PMAP(pv);
7336 			PMAP_ASSERT_STAGE1(pmap);
7337 			if (!PMAP_TRYLOCK(pmap)) {
7338 				md_gen = m->md.pv_gen;
7339 				pvh_gen = pvh->pv_gen;
7340 				rw_runlock(lock);
7341 				PMAP_LOCK(pmap);
7342 				rw_rlock(lock);
7343 				if (md_gen != m->md.pv_gen ||
7344 				    pvh_gen != pvh->pv_gen) {
7345 					PMAP_UNLOCK(pmap);
7346 					goto restart;
7347 				}
7348 			}
7349 			pte = pmap_pte_exists(pmap, pv->pv_va, 2, __func__);
7350 			mask = 0;
7351 			value = 0;
7352 			if (modified) {
7353 				mask |= ATTR_S1_AP_RW_BIT;
7354 				value |= ATTR_S1_AP(ATTR_S1_AP_RW);
7355 			}
7356 			if (accessed) {
7357 				mask |= ATTR_AF | ATTR_DESCR_MASK;
7358 				value |= ATTR_AF | L2_BLOCK;
7359 			}
7360 			rv = (pmap_load(pte) & mask) == value;
7361 			PMAP_UNLOCK(pmap);
7362 			if (rv)
7363 				goto out;
7364 		}
7365 	}
7366 out:
7367 	rw_runlock(lock);
7368 	return (rv);
7369 }
7370 
7371 /*
7372  *	pmap_is_modified:
7373  *
7374  *	Return whether or not the specified physical page was modified
7375  *	in any physical maps.
7376  */
7377 bool
pmap_is_modified(vm_page_t m)7378 pmap_is_modified(vm_page_t m)
7379 {
7380 
7381 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
7382 	    ("pmap_is_modified: page %p is not managed", m));
7383 
7384 	/*
7385 	 * If the page is not busied then this check is racy.
7386 	 */
7387 	if (!pmap_page_is_write_mapped(m))
7388 		return (false);
7389 	return (pmap_page_test_mappings(m, false, true));
7390 }
7391 
7392 /*
7393  *	pmap_is_prefaultable:
7394  *
7395  *	Return whether or not the specified virtual address is eligible
7396  *	for prefault.
7397  */
7398 bool
pmap_is_prefaultable(pmap_t pmap,vm_offset_t addr)7399 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
7400 {
7401 	pd_entry_t *pde;
7402 	pt_entry_t *pte;
7403 	bool rv;
7404 	int lvl;
7405 
7406 	/*
7407 	 * Return true if and only if the L3 entry for the specified virtual
7408 	 * address is allocated but invalid.
7409 	 */
7410 	rv = false;
7411 	PMAP_LOCK(pmap);
7412 	pde = pmap_pde(pmap, addr, &lvl);
7413 	if (pde != NULL && lvl == 2) {
7414 		pte = pmap_l2_to_l3(pde, addr);
7415 		rv = pmap_load(pte) == 0;
7416 	}
7417 	PMAP_UNLOCK(pmap);
7418 	return (rv);
7419 }
7420 
7421 /*
7422  *	pmap_is_referenced:
7423  *
7424  *	Return whether or not the specified physical page was referenced
7425  *	in any physical maps.
7426  */
7427 bool
pmap_is_referenced(vm_page_t m)7428 pmap_is_referenced(vm_page_t m)
7429 {
7430 
7431 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
7432 	    ("pmap_is_referenced: page %p is not managed", m));
7433 	return (pmap_page_test_mappings(m, true, false));
7434 }
7435 
7436 /*
7437  * Clear the write and modified bits in each of the given page's mappings.
7438  */
7439 void
pmap_remove_write(vm_page_t m)7440 pmap_remove_write(vm_page_t m)
7441 {
7442 	struct md_page *pvh;
7443 	pmap_t pmap;
7444 	struct rwlock *lock;
7445 	pv_entry_t next_pv, pv;
7446 	pt_entry_t oldpte, *pte, set, clear, mask, val;
7447 	vm_offset_t va;
7448 	int md_gen, pvh_gen;
7449 
7450 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
7451 	    ("pmap_remove_write: page %p is not managed", m));
7452 	vm_page_assert_busied(m);
7453 
7454 	if (!pmap_page_is_write_mapped(m))
7455 		return;
7456 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
7457 	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : page_to_pvh(m);
7458 	rw_wlock(lock);
7459 retry:
7460 	TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
7461 		pmap = PV_PMAP(pv);
7462 		PMAP_ASSERT_STAGE1(pmap);
7463 		if (!PMAP_TRYLOCK(pmap)) {
7464 			pvh_gen = pvh->pv_gen;
7465 			rw_wunlock(lock);
7466 			PMAP_LOCK(pmap);
7467 			rw_wlock(lock);
7468 			if (pvh_gen != pvh->pv_gen) {
7469 				PMAP_UNLOCK(pmap);
7470 				goto retry;
7471 			}
7472 		}
7473 		va = pv->pv_va;
7474 		pte = pmap_pte_exists(pmap, va, 2, __func__);
7475 		if ((pmap_load(pte) & ATTR_SW_DBM) != 0)
7476 			(void)pmap_demote_l2_locked(pmap, pte, va, &lock);
7477 		KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
7478 		    ("inconsistent pv lock %p %p for page %p",
7479 		    lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
7480 		PMAP_UNLOCK(pmap);
7481 	}
7482 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
7483 		pmap = PV_PMAP(pv);
7484 		if (!PMAP_TRYLOCK(pmap)) {
7485 			pvh_gen = pvh->pv_gen;
7486 			md_gen = m->md.pv_gen;
7487 			rw_wunlock(lock);
7488 			PMAP_LOCK(pmap);
7489 			rw_wlock(lock);
7490 			if (pvh_gen != pvh->pv_gen ||
7491 			    md_gen != m->md.pv_gen) {
7492 				PMAP_UNLOCK(pmap);
7493 				goto retry;
7494 			}
7495 		}
7496 		pte = pmap_pte_exists(pmap, pv->pv_va, 3, __func__);
7497 		oldpte = pmap_load(pte);
7498 		if ((oldpte & ATTR_SW_DBM) != 0) {
7499 			if ((oldpte & ATTR_CONTIGUOUS) != 0) {
7500 				(void)pmap_demote_l3c(pmap, pte, pv->pv_va);
7501 
7502 				/*
7503 				 * The L3 entry's accessed bit may have
7504 				 * changed.
7505 				 */
7506 				oldpte = pmap_load(pte);
7507 			}
7508 			if (pmap->pm_stage == PM_STAGE1) {
7509 				set = ATTR_S1_AP_RW_BIT;
7510 				clear = 0;
7511 				mask = ATTR_S1_AP_RW_BIT;
7512 				val = ATTR_S1_AP(ATTR_S1_AP_RW);
7513 			} else {
7514 				set = 0;
7515 				clear = ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE);
7516 				mask = ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE);
7517 				val = ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE);
7518 			}
7519 			clear |= ATTR_SW_DBM;
7520 			while (!atomic_fcmpset_64(pte, &oldpte,
7521 			    (oldpte | set) & ~clear))
7522 				cpu_spinwait();
7523 
7524 			if ((oldpte & mask) == val)
7525 				vm_page_dirty(m);
7526 			pmap_invalidate_page(pmap, pv->pv_va, true);
7527 		}
7528 		PMAP_UNLOCK(pmap);
7529 	}
7530 	rw_wunlock(lock);
7531 	vm_page_aflag_clear(m, PGA_WRITEABLE);
7532 }
7533 
7534 /*
7535  *	pmap_ts_referenced:
7536  *
7537  *	Return a count of reference bits for a page, clearing those bits.
7538  *	It is not necessary for every reference bit to be cleared, but it
7539  *	is necessary that 0 only be returned when there are truly no
7540  *	reference bits set.
7541  *
7542  *	As an optimization, update the page's dirty field if a modified bit is
7543  *	found while counting reference bits.  This opportunistic update can be
7544  *	performed at low cost and can eliminate the need for some future calls
7545  *	to pmap_is_modified().  However, since this function stops after
7546  *	finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
7547  *	dirty pages.  Those dirty pages will only be detected by a future call
7548  *	to pmap_is_modified().
7549  */
7550 int
pmap_ts_referenced(vm_page_t m)7551 pmap_ts_referenced(vm_page_t m)
7552 {
7553 	struct md_page *pvh;
7554 	pv_entry_t pv, pvf;
7555 	pmap_t pmap;
7556 	struct rwlock *lock;
7557 	pt_entry_t *pte, tpte;
7558 	vm_offset_t va;
7559 	vm_paddr_t pa;
7560 	int cleared, md_gen, not_cleared, pvh_gen;
7561 	struct spglist free;
7562 
7563 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
7564 	    ("pmap_ts_referenced: page %p is not managed", m));
7565 	SLIST_INIT(&free);
7566 	cleared = 0;
7567 	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : page_to_pvh(m);
7568 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
7569 	rw_wlock(lock);
7570 retry:
7571 	not_cleared = 0;
7572 	if ((pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL)
7573 		goto small_mappings;
7574 	pv = pvf;
7575 	do {
7576 		if (pvf == NULL)
7577 			pvf = pv;
7578 		pmap = PV_PMAP(pv);
7579 		if (!PMAP_TRYLOCK(pmap)) {
7580 			pvh_gen = pvh->pv_gen;
7581 			rw_wunlock(lock);
7582 			PMAP_LOCK(pmap);
7583 			rw_wlock(lock);
7584 			if (pvh_gen != pvh->pv_gen) {
7585 				PMAP_UNLOCK(pmap);
7586 				goto retry;
7587 			}
7588 		}
7589 		va = pv->pv_va;
7590 		pte = pmap_pte_exists(pmap, va, 2, __func__);
7591 		tpte = pmap_load(pte);
7592 		if (pmap_pte_dirty(pmap, tpte)) {
7593 			/*
7594 			 * Although "tpte" is mapping a 2MB page, because
7595 			 * this function is called at a 4KB page granularity,
7596 			 * we only update the 4KB page under test.
7597 			 */
7598 			vm_page_dirty(m);
7599 		}
7600 		if ((tpte & ATTR_AF) != 0) {
7601 			pa = VM_PAGE_TO_PHYS(m);
7602 
7603 			/*
7604 			 * Since this reference bit is shared by 512 4KB pages,
7605 			 * it should not be cleared every time it is tested.
7606 			 * Apply a simple "hash" function on the physical page
7607 			 * number, the virtual superpage number, and the pmap
7608 			 * address to select one 4KB page out of the 512 on
7609 			 * which testing the reference bit will result in
7610 			 * clearing that reference bit.  This function is
7611 			 * designed to avoid the selection of the same 4KB page
7612 			 * for every 2MB page mapping.
7613 			 *
7614 			 * On demotion, a mapping that hasn't been referenced
7615 			 * is simply destroyed.  To avoid the possibility of a
7616 			 * subsequent page fault on a demoted wired mapping,
7617 			 * always leave its reference bit set.  Moreover,
7618 			 * since the superpage is wired, the current state of
7619 			 * its reference bit won't affect page replacement.
7620 			 */
7621 			if ((((pa >> PAGE_SHIFT) ^ (va >> L2_SHIFT) ^
7622 			    (uintptr_t)pmap) & (Ln_ENTRIES - 1)) == 0 &&
7623 			    (tpte & ATTR_SW_WIRED) == 0) {
7624 				pmap_clear_bits(pte, ATTR_AF);
7625 				pmap_invalidate_page(pmap, va, true);
7626 				cleared++;
7627 			} else
7628 				not_cleared++;
7629 		}
7630 		PMAP_UNLOCK(pmap);
7631 		/* Rotate the PV list if it has more than one entry. */
7632 		if (TAILQ_NEXT(pv, pv_next) != NULL) {
7633 			TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
7634 			TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
7635 			pvh->pv_gen++;
7636 		}
7637 		if (cleared + not_cleared >= PMAP_TS_REFERENCED_MAX)
7638 			goto out;
7639 	} while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf);
7640 small_mappings:
7641 	if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL)
7642 		goto out;
7643 	pv = pvf;
7644 	do {
7645 		if (pvf == NULL)
7646 			pvf = pv;
7647 		pmap = PV_PMAP(pv);
7648 		if (!PMAP_TRYLOCK(pmap)) {
7649 			pvh_gen = pvh->pv_gen;
7650 			md_gen = m->md.pv_gen;
7651 			rw_wunlock(lock);
7652 			PMAP_LOCK(pmap);
7653 			rw_wlock(lock);
7654 			if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
7655 				PMAP_UNLOCK(pmap);
7656 				goto retry;
7657 			}
7658 		}
7659 		pte = pmap_pte_exists(pmap, pv->pv_va, 3, __func__);
7660 		tpte = pmap_load(pte);
7661 		if (pmap_pte_dirty(pmap, tpte))
7662 			vm_page_dirty(m);
7663 		if ((tpte & ATTR_AF) != 0) {
7664 			if ((tpte & ATTR_SW_WIRED) == 0) {
7665 				/*
7666 				 * Clear the accessed bit in this L3 entry
7667 				 * regardless of the contiguous bit.
7668 				 */
7669 				pmap_clear_bits(pte, ATTR_AF);
7670 				pmap_invalidate_page(pmap, pv->pv_va, true);
7671 				cleared++;
7672 			} else
7673 				not_cleared++;
7674 		} else if ((tpte & ATTR_CONTIGUOUS) != 0 &&
7675 		    (pmap_load_l3c(pte) & ATTR_AF) != 0) {
7676 			/*
7677 			 * An L3C superpage mapping is regarded as accessed
7678 			 * until the accessed bit has been cleared in all
7679 			 * of its constituent entries.
7680 			 */
7681 			not_cleared++;
7682 		}
7683 		PMAP_UNLOCK(pmap);
7684 		/* Rotate the PV list if it has more than one entry. */
7685 		if (TAILQ_NEXT(pv, pv_next) != NULL) {
7686 			TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
7687 			TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
7688 			m->md.pv_gen++;
7689 		}
7690 	} while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && cleared +
7691 	    not_cleared < PMAP_TS_REFERENCED_MAX);
7692 out:
7693 	rw_wunlock(lock);
7694 	vm_page_free_pages_toq(&free, true);
7695 	return (cleared + not_cleared);
7696 }
7697 
7698 /*
7699  *	Apply the given advice to the specified range of addresses within the
7700  *	given pmap.  Depending on the advice, clear the referenced and/or
7701  *	modified flags in each mapping and set the mapped page's dirty field.
7702  */
7703 void
pmap_advise(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,int advice)7704 pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
7705 {
7706 	struct rwlock *lock;
7707 	vm_offset_t va, va_next, dva;
7708 	vm_page_t m;
7709 	pd_entry_t *l0, *l1, *l2, oldl2;
7710 	pt_entry_t *l3, *dl3, oldl3;
7711 
7712 	PMAP_ASSERT_STAGE1(pmap);
7713 
7714 	if (advice != MADV_DONTNEED && advice != MADV_FREE)
7715 		return;
7716 
7717 	PMAP_LOCK(pmap);
7718 	for (; sva < eva; sva = va_next) {
7719 		l0 = pmap_l0(pmap, sva);
7720 		if (pmap_load(l0) == 0) {
7721 			va_next = (sva + L0_SIZE) & ~L0_OFFSET;
7722 			if (va_next < sva)
7723 				va_next = eva;
7724 			continue;
7725 		}
7726 
7727 		va_next = (sva + L1_SIZE) & ~L1_OFFSET;
7728 		if (va_next < sva)
7729 			va_next = eva;
7730 		l1 = pmap_l0_to_l1(l0, sva);
7731 		if (pmap_load(l1) == 0)
7732 			continue;
7733 		if ((pmap_load(l1) & ATTR_DESCR_MASK) == L1_BLOCK) {
7734 			PMAP_ASSERT_L1_BLOCKS_SUPPORTED;
7735 			continue;
7736 		}
7737 
7738 		va_next = (sva + L2_SIZE) & ~L2_OFFSET;
7739 		if (va_next < sva)
7740 			va_next = eva;
7741 		l2 = pmap_l1_to_l2(l1, sva);
7742 		oldl2 = pmap_load(l2);
7743 		if (oldl2 == 0)
7744 			continue;
7745 		if ((oldl2 & ATTR_DESCR_MASK) == L2_BLOCK) {
7746 			if ((oldl2 & ATTR_SW_MANAGED) == 0)
7747 				continue;
7748 			lock = NULL;
7749 			if (!pmap_demote_l2_locked(pmap, l2, sva, &lock)) {
7750 				if (lock != NULL)
7751 					rw_wunlock(lock);
7752 
7753 				/*
7754 				 * The 2MB page mapping was destroyed.
7755 				 */
7756 				continue;
7757 			}
7758 
7759 			/*
7760 			 * Unless the page mappings are wired, remove the
7761 			 * mapping to a single page so that a subsequent
7762 			 * access may repromote.  Choosing the last page
7763 			 * within the address range [sva, min(va_next, eva))
7764 			 * generally results in more repromotions.  Since the
7765 			 * underlying page table page is fully populated, this
7766 			 * removal never frees a page table page.
7767 			 */
7768 			if ((oldl2 & ATTR_SW_WIRED) == 0) {
7769 				va = eva;
7770 				if (va > va_next)
7771 					va = va_next;
7772 				va -= PAGE_SIZE;
7773 				KASSERT(va >= sva,
7774 				    ("pmap_advise: no address gap"));
7775 				l3 = pmap_l2_to_l3(l2, va);
7776 				KASSERT(pmap_load(l3) != 0,
7777 				    ("pmap_advise: invalid PTE"));
7778 				pmap_remove_l3(pmap, l3, va, pmap_load(l2),
7779 				    NULL, &lock);
7780 			}
7781 			if (lock != NULL)
7782 				rw_wunlock(lock);
7783 		}
7784 		KASSERT((pmap_load(l2) & ATTR_DESCR_MASK) == L2_TABLE,
7785 		    ("pmap_advise: invalid L2 entry after demotion"));
7786 		if (va_next > eva)
7787 			va_next = eva;
7788 		va = va_next;
7789 		for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++,
7790 		    sva += L3_SIZE) {
7791 			oldl3 = pmap_load(l3);
7792 			if ((oldl3 & (ATTR_SW_MANAGED | ATTR_DESCR_MASK)) !=
7793 			    (ATTR_SW_MANAGED | L3_PAGE))
7794 				goto maybe_invlrng;
7795 			else if (pmap_pte_dirty(pmap, oldl3)) {
7796 				if (advice == MADV_DONTNEED) {
7797 					/*
7798 					 * Future calls to pmap_is_modified()
7799 					 * can be avoided by making the page
7800 					 * dirty now.
7801 					 */
7802 					m = PTE_TO_VM_PAGE(oldl3);
7803 					vm_page_dirty(m);
7804 				}
7805 				if ((oldl3 & ATTR_CONTIGUOUS) != 0) {
7806 					/*
7807 					 * Unconditionally demote the L3C
7808 					 * superpage because we do not allow
7809 					 * writeable, clean superpages.
7810 					 */
7811 					(void)pmap_demote_l3c(pmap, l3, sva);
7812 
7813 					/*
7814                                          * Destroy the final mapping before the
7815                                          * next L3C boundary or va_next,
7816 					 * whichever comes first, so that a
7817 					 * subsequent access may act as a
7818 					 * repromotion trigger.
7819 					 */
7820                                         if ((oldl3 & ATTR_SW_WIRED) == 0) {
7821 						dva = MIN((sva & ~L3C_OFFSET) +
7822 						    L3C_SIZE - PAGE_SIZE,
7823 						    va_next - PAGE_SIZE);
7824 						dl3 = pmap_l2_to_l3(l2, dva);
7825 						KASSERT(pmap_load(dl3) != 0,
7826 						    ("pmap_advise: invalid PTE"));
7827 						lock = NULL;
7828 						pmap_remove_l3(pmap, dl3, dva,
7829 						    pmap_load(l2), NULL, &lock);
7830 						if (lock != NULL)
7831 							rw_wunlock(lock);
7832 					}
7833 
7834 					/*
7835 					 * The L3 entry's accessed bit may have
7836 					 * changed.
7837 					 */
7838 					oldl3 = pmap_load(l3);
7839 				}
7840 
7841 				/*
7842 				 * Check that we did not just destroy this entry so
7843 				 * we avoid corrupting the page able.
7844 				 */
7845 				if (oldl3 != 0) {
7846 					while (!atomic_fcmpset_long(l3, &oldl3,
7847 					    (oldl3 & ~ATTR_AF) |
7848 					    ATTR_S1_AP(ATTR_S1_AP_RO)))
7849 						cpu_spinwait();
7850 				}
7851 			} else if ((oldl3 & ATTR_AF) != 0) {
7852 				/*
7853 				 * Clear the accessed bit in this L3 entry
7854 				 * regardless of the contiguous bit.
7855 				 */
7856 				pmap_clear_bits(l3, ATTR_AF);
7857 			} else
7858 				goto maybe_invlrng;
7859 			if (va == va_next)
7860 				va = sva;
7861 			continue;
7862 maybe_invlrng:
7863 			if (va != va_next) {
7864 				pmap_s1_invalidate_range(pmap, va, sva, true);
7865 				va = va_next;
7866 			}
7867 		}
7868 		if (va != va_next)
7869 			pmap_s1_invalidate_range(pmap, va, sva, true);
7870 	}
7871 	PMAP_UNLOCK(pmap);
7872 }
7873 
7874 /*
7875  *	Clear the modify bits on the specified physical page.
7876  */
7877 void
pmap_clear_modify(vm_page_t m)7878 pmap_clear_modify(vm_page_t m)
7879 {
7880 	struct md_page *pvh;
7881 	struct rwlock *lock;
7882 	pmap_t pmap;
7883 	pv_entry_t next_pv, pv;
7884 	pd_entry_t *l2, oldl2;
7885 	pt_entry_t *l3, oldl3;
7886 	vm_offset_t va;
7887 	int md_gen, pvh_gen;
7888 
7889 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
7890 	    ("pmap_clear_modify: page %p is not managed", m));
7891 	vm_page_assert_busied(m);
7892 
7893 	if (!pmap_page_is_write_mapped(m))
7894 		return;
7895 	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : page_to_pvh(m);
7896 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
7897 	rw_wlock(lock);
7898 restart:
7899 	TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
7900 		pmap = PV_PMAP(pv);
7901 		PMAP_ASSERT_STAGE1(pmap);
7902 		if (!PMAP_TRYLOCK(pmap)) {
7903 			pvh_gen = pvh->pv_gen;
7904 			rw_wunlock(lock);
7905 			PMAP_LOCK(pmap);
7906 			rw_wlock(lock);
7907 			if (pvh_gen != pvh->pv_gen) {
7908 				PMAP_UNLOCK(pmap);
7909 				goto restart;
7910 			}
7911 		}
7912 		va = pv->pv_va;
7913 		l2 = pmap_l2(pmap, va);
7914 		oldl2 = pmap_load(l2);
7915 		/* If oldl2 has ATTR_SW_DBM set, then it is also dirty. */
7916 		if ((oldl2 & ATTR_SW_DBM) != 0 &&
7917 		    pmap_demote_l2_locked(pmap, l2, va, &lock) &&
7918 		    (oldl2 & ATTR_SW_WIRED) == 0) {
7919 			/*
7920 			 * Write protect the mapping to a single page so that
7921 			 * a subsequent write access may repromote.
7922 			 */
7923 			va += VM_PAGE_TO_PHYS(m) - PTE_TO_PHYS(oldl2);
7924 			l3 = pmap_l2_to_l3(l2, va);
7925 			oldl3 = pmap_load(l3);
7926 			while (!atomic_fcmpset_long(l3, &oldl3,
7927 			    (oldl3 & ~ATTR_SW_DBM) | ATTR_S1_AP(ATTR_S1_AP_RO)))
7928 				cpu_spinwait();
7929 			vm_page_dirty(m);
7930 			pmap_s1_invalidate_page(pmap, va, true);
7931 		}
7932 		PMAP_UNLOCK(pmap);
7933 	}
7934 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
7935 		pmap = PV_PMAP(pv);
7936 		PMAP_ASSERT_STAGE1(pmap);
7937 		if (!PMAP_TRYLOCK(pmap)) {
7938 			md_gen = m->md.pv_gen;
7939 			pvh_gen = pvh->pv_gen;
7940 			rw_wunlock(lock);
7941 			PMAP_LOCK(pmap);
7942 			rw_wlock(lock);
7943 			if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
7944 				PMAP_UNLOCK(pmap);
7945 				goto restart;
7946 			}
7947 		}
7948 		l2 = pmap_l2(pmap, pv->pv_va);
7949 		l3 = pmap_l2_to_l3(l2, pv->pv_va);
7950 		oldl3 = pmap_load(l3);
7951 		KASSERT((oldl3 & ATTR_CONTIGUOUS) == 0 ||
7952 		    (oldl3 & (ATTR_SW_DBM | ATTR_S1_AP_RW_BIT)) !=
7953 		    (ATTR_SW_DBM | ATTR_S1_AP(ATTR_S1_AP_RO)),
7954 		    ("writeable L3C superpage not dirty"));
7955 		if ((oldl3 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) == ATTR_SW_DBM) {
7956 			if ((oldl3 & ATTR_CONTIGUOUS) != 0)
7957 				(void)pmap_demote_l3c(pmap, l3, pv->pv_va);
7958 			pmap_set_bits(l3, ATTR_S1_AP(ATTR_S1_AP_RO));
7959 			pmap_s1_invalidate_page(pmap, pv->pv_va, true);
7960 		}
7961 		PMAP_UNLOCK(pmap);
7962 	}
7963 	rw_wunlock(lock);
7964 }
7965 
7966 void *
pmap_mapbios(vm_paddr_t pa,vm_size_t size)7967 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
7968 {
7969 	struct pmap_preinit_mapping *ppim;
7970 	vm_offset_t va, offset;
7971 	pd_entry_t old_l2e, *pde;
7972 	pt_entry_t *l2;
7973 	int i, lvl, l2_blocks, free_l2_count, start_idx;
7974 
7975 	/* Use the DMAP region if we can */
7976 	if (PHYS_IN_DMAP(pa) && PHYS_IN_DMAP(pa + size - 1) &&
7977 	    pmap_kmapped_range(PHYS_TO_DMAP(pa), size))
7978 		return ((void *)PHYS_TO_DMAP(pa));
7979 
7980 	if (!vm_initialized) {
7981 		/*
7982 		 * No L3 ptables so map entire L2 blocks where start VA is:
7983 		 * 	preinit_map_va + start_idx * L2_SIZE
7984 		 * There may be duplicate mappings (multiple VA -> same PA) but
7985 		 * ARM64 dcache is always PIPT so that's acceptable.
7986 		 */
7987 		 if (size == 0)
7988 			 return (NULL);
7989 
7990 		 /* Calculate how many L2 blocks are needed for the mapping */
7991 		l2_blocks = (roundup2(pa + size, L2_SIZE) -
7992 		    rounddown2(pa, L2_SIZE)) >> L2_SHIFT;
7993 
7994 		offset = pa & L2_OFFSET;
7995 
7996 		if (preinit_map_va == 0)
7997 			return (NULL);
7998 
7999 		/* Map 2MiB L2 blocks from reserved VA space */
8000 
8001 		free_l2_count = 0;
8002 		start_idx = -1;
8003 		/* Find enough free contiguous VA space */
8004 		for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
8005 			ppim = pmap_preinit_mapping + i;
8006 			if (free_l2_count > 0 && ppim->pa != 0) {
8007 				/* Not enough space here */
8008 				free_l2_count = 0;
8009 				start_idx = -1;
8010 				continue;
8011 			}
8012 
8013 			if (ppim->pa == 0) {
8014 				/* Free L2 block */
8015 				if (start_idx == -1)
8016 					start_idx = i;
8017 				free_l2_count++;
8018 				if (free_l2_count == l2_blocks)
8019 					break;
8020 			}
8021 		}
8022 		if (free_l2_count != l2_blocks)
8023 			panic("%s: too many preinit mappings", __func__);
8024 
8025 		va = preinit_map_va + (start_idx * L2_SIZE);
8026 		for (i = start_idx; i < start_idx + l2_blocks; i++) {
8027 			/* Mark entries as allocated */
8028 			ppim = pmap_preinit_mapping + i;
8029 			ppim->pa = pa;
8030 			ppim->va = va + offset;
8031 			ppim->size = size;
8032 		}
8033 
8034 		/* Map L2 blocks */
8035 		pa = rounddown2(pa, L2_SIZE);
8036 		old_l2e = 0;
8037 		for (i = 0; i < l2_blocks; i++) {
8038 			pde = pmap_pde(kernel_pmap, va, &lvl);
8039 			KASSERT(pde != NULL,
8040 			    ("pmap_mapbios: Invalid page entry, va: 0x%lx",
8041 			    va));
8042 			KASSERT(lvl == 1,
8043 			    ("pmap_mapbios: Invalid level %d", lvl));
8044 
8045 			/* Insert L2_BLOCK */
8046 			l2 = pmap_l1_to_l2(pde, va);
8047 			old_l2e |= pmap_load_store(l2,
8048 			    PHYS_TO_PTE(pa) | ATTR_AF | pmap_sh_attr |
8049 			    ATTR_S1_XN | ATTR_KERN_GP |
8050 			    ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | L2_BLOCK);
8051 
8052 			va += L2_SIZE;
8053 			pa += L2_SIZE;
8054 		}
8055 		if ((old_l2e & ATTR_DESCR_VALID) != 0)
8056 			pmap_s1_invalidate_all_kernel();
8057 		else {
8058 			/*
8059 			 * Because the old entries were invalid and the new
8060 			 * mappings are not executable, an isb is not required.
8061 			 */
8062 			dsb(ishst);
8063 		}
8064 
8065 		va = preinit_map_va + (start_idx * L2_SIZE);
8066 
8067 	} else {
8068 		/* kva_alloc may be used to map the pages */
8069 		offset = pa & PAGE_MASK;
8070 		size = round_page(offset + size);
8071 
8072 		va = kva_alloc(size);
8073 		if (va == 0)
8074 			panic("%s: Couldn't allocate KVA", __func__);
8075 
8076 		pde = pmap_pde(kernel_pmap, va, &lvl);
8077 		KASSERT(lvl == 2, ("pmap_mapbios: Invalid level %d", lvl));
8078 
8079 		/* L3 table is linked */
8080 		va = trunc_page(va);
8081 		pa = trunc_page(pa);
8082 		pmap_kenter(va, size, pa, memory_mapping_mode(pa));
8083 	}
8084 
8085 	return ((void *)(va + offset));
8086 }
8087 
8088 void
pmap_unmapbios(void * p,vm_size_t size)8089 pmap_unmapbios(void *p, vm_size_t size)
8090 {
8091 	struct pmap_preinit_mapping *ppim;
8092 	vm_offset_t offset, va, va_trunc;
8093 	pd_entry_t *pde;
8094 	pt_entry_t *l2;
8095 	int error __diagused, i, lvl, l2_blocks, block;
8096 	bool preinit_map;
8097 
8098 	va = (vm_offset_t)p;
8099 	if (VIRT_IN_DMAP(va)) {
8100 		KASSERT(VIRT_IN_DMAP(va + size - 1),
8101 		    ("%s: End address not in DMAP region: %lx", __func__,
8102 		    va + size - 1));
8103 		/* Ensure the attributes are as expected for the DMAP region */
8104 		PMAP_LOCK(kernel_pmap);
8105 		error = pmap_change_props_locked(va, size,
8106 		    PROT_READ | PROT_WRITE, VM_MEMATTR_DEFAULT, false);
8107 		PMAP_UNLOCK(kernel_pmap);
8108 		KASSERT(error == 0, ("%s: Failed to reset DMAP attributes: %d",
8109 		    __func__, error));
8110 
8111 		return;
8112 	}
8113 
8114 	l2_blocks =
8115 	   (roundup2(va + size, L2_SIZE) - rounddown2(va, L2_SIZE)) >> L2_SHIFT;
8116 	KASSERT(l2_blocks > 0, ("pmap_unmapbios: invalid size %lx", size));
8117 
8118 	/* Remove preinit mapping */
8119 	preinit_map = false;
8120 	block = 0;
8121 	for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
8122 		ppim = pmap_preinit_mapping + i;
8123 		if (ppim->va == va) {
8124 			KASSERT(ppim->size == size,
8125 			    ("pmap_unmapbios: size mismatch"));
8126 			ppim->va = 0;
8127 			ppim->pa = 0;
8128 			ppim->size = 0;
8129 			preinit_map = true;
8130 			offset = block * L2_SIZE;
8131 			va_trunc = rounddown2(va, L2_SIZE) + offset;
8132 
8133 			/* Remove L2_BLOCK */
8134 			pde = pmap_pde(kernel_pmap, va_trunc, &lvl);
8135 			KASSERT(pde != NULL,
8136 			    ("pmap_unmapbios: Invalid page entry, va: 0x%lx",
8137 			    va_trunc));
8138 			l2 = pmap_l1_to_l2(pde, va_trunc);
8139 			pmap_clear(l2);
8140 
8141 			if (block == (l2_blocks - 1))
8142 				break;
8143 			block++;
8144 		}
8145 	}
8146 	if (preinit_map) {
8147 		pmap_s1_invalidate_all_kernel();
8148 		return;
8149 	}
8150 
8151 	/* Unmap the pages reserved with kva_alloc. */
8152 	if (vm_initialized) {
8153 		offset = va & PAGE_MASK;
8154 		size = round_page(offset + size);
8155 		va = trunc_page(va);
8156 
8157 		/* Unmap and invalidate the pages */
8158 		pmap_kremove_device(va, size);
8159 
8160 		kva_free(va, size);
8161 	}
8162 }
8163 
8164 /*
8165  * Sets the memory attribute for the specified page.
8166  */
8167 void
pmap_page_set_memattr(vm_page_t m,vm_memattr_t ma)8168 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
8169 {
8170 	if (m->md.pv_memattr == ma)
8171 		return;
8172 
8173 	m->md.pv_memattr = ma;
8174 
8175 	/*
8176 	 * If "m" is a normal page, update its direct mapping.  This update
8177 	 * can be relied upon to perform any cache operations that are
8178 	 * required for data coherence.
8179 	 */
8180 	if ((m->flags & PG_FICTITIOUS) == 0 &&
8181 	    pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), PAGE_SIZE,
8182 	    m->md.pv_memattr) != 0)
8183 		panic("memory attribute change on the direct map failed");
8184 }
8185 
8186 /*
8187  * Changes the specified virtual address range's memory type to that given by
8188  * the parameter "mode".  The specified virtual address range must be
8189  * completely contained within either the direct map or the kernel map.  If
8190  * the virtual address range is contained within the kernel map, then the
8191  * memory type for each of the corresponding ranges of the direct map is also
8192  * changed.  (The corresponding ranges of the direct map are those ranges that
8193  * map the same physical pages as the specified virtual address range.)  These
8194  * changes to the direct map are necessary because Intel describes the
8195  * behavior of their processors as "undefined" if two or more mappings to the
8196  * same physical page have different memory types.
8197  *
8198  * Returns zero if the change completed successfully, and either EINVAL or
8199  * ENOMEM if the change failed.  Specifically, EINVAL is returned if some part
8200  * of the virtual address range was not mapped, and ENOMEM is returned if
8201  * there was insufficient memory available to complete the change.  In the
8202  * latter case, the memory type may have been changed on some part of the
8203  * virtual address range or the direct map.
8204  */
8205 int
pmap_change_attr(vm_offset_t va,vm_size_t size,int mode)8206 pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
8207 {
8208 	int error;
8209 
8210 	PMAP_LOCK(kernel_pmap);
8211 	error = pmap_change_props_locked(va, size, PROT_NONE, mode, false);
8212 	PMAP_UNLOCK(kernel_pmap);
8213 	return (error);
8214 }
8215 
8216 /*
8217  * Changes the specified virtual address range's protections to those
8218  * specified by "prot".  Like pmap_change_attr(), protections for aliases
8219  * in the direct map are updated as well.  Protections on aliasing mappings may
8220  * be a subset of the requested protections; for example, mappings in the direct
8221  * map are never executable.
8222  */
8223 int
pmap_change_prot(vm_offset_t va,vm_size_t size,vm_prot_t prot)8224 pmap_change_prot(vm_offset_t va, vm_size_t size, vm_prot_t prot)
8225 {
8226 	int error;
8227 
8228 	/* Only supported within the kernel map. */
8229 	if (va < VM_MIN_KERNEL_ADDRESS)
8230 		return (EINVAL);
8231 
8232 	PMAP_LOCK(kernel_pmap);
8233 	error = pmap_change_props_locked(va, size, prot, -1, false);
8234 	PMAP_UNLOCK(kernel_pmap);
8235 	return (error);
8236 }
8237 
8238 static int
pmap_change_props_locked(vm_offset_t va,vm_size_t size,vm_prot_t prot,int mode,bool skip_unmapped)8239 pmap_change_props_locked(vm_offset_t va, vm_size_t size, vm_prot_t prot,
8240     int mode, bool skip_unmapped)
8241 {
8242 	vm_offset_t base, offset, tmpva;
8243 	vm_size_t pte_size;
8244 	vm_paddr_t pa;
8245 	pt_entry_t pte, *ptep, *newpte;
8246 	pt_entry_t bits, mask;
8247 	int lvl, rv;
8248 
8249 	PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED);
8250 	base = trunc_page(va);
8251 	offset = va & PAGE_MASK;
8252 	size = round_page(offset + size);
8253 
8254 	if (!VIRT_IN_DMAP(base) &&
8255 	    !(base >= VM_MIN_KERNEL_ADDRESS && base < VM_MAX_KERNEL_ADDRESS))
8256 		return (EINVAL);
8257 
8258 	bits = 0;
8259 	mask = 0;
8260 	if (mode != -1) {
8261 		bits = ATTR_S1_IDX(mode);
8262 		mask = ATTR_S1_IDX_MASK;
8263 		if (mode == VM_MEMATTR_DEVICE) {
8264 			mask |= ATTR_S1_XN;
8265 			bits |= ATTR_S1_XN;
8266 		}
8267 	}
8268 	if (prot != VM_PROT_NONE) {
8269 		/* Don't mark the DMAP as executable. It never is on arm64. */
8270 		if (VIRT_IN_DMAP(base)) {
8271 			prot &= ~VM_PROT_EXECUTE;
8272 			/*
8273 			 * XXX Mark the DMAP as writable for now. We rely
8274 			 * on this in ddb & dtrace to insert breakpoint
8275 			 * instructions.
8276 			 */
8277 			prot |= VM_PROT_WRITE;
8278 		}
8279 
8280 		if ((prot & VM_PROT_WRITE) == 0) {
8281 			bits |= ATTR_S1_AP(ATTR_S1_AP_RO);
8282 		}
8283 		if ((prot & VM_PROT_EXECUTE) == 0) {
8284 			bits |= ATTR_S1_PXN;
8285 		}
8286 		bits |= ATTR_S1_UXN;
8287 		mask |= ATTR_S1_AP_MASK | ATTR_S1_XN;
8288 	}
8289 
8290 	for (tmpva = base; tmpva < base + size; ) {
8291 		ptep = pmap_pte(kernel_pmap, tmpva, &lvl);
8292 		if (ptep == NULL && !skip_unmapped) {
8293 			return (EINVAL);
8294 		} else if ((ptep == NULL && skip_unmapped) ||
8295 		    (pmap_load(ptep) & mask) == bits) {
8296 			/*
8297 			 * We already have the correct attribute or there
8298 			 * is no memory mapped at this address and we are
8299 			 * skipping unmapped memory.
8300 			 */
8301 			switch (lvl) {
8302 			default:
8303 				panic("Invalid DMAP table level: %d\n", lvl);
8304 			case 1:
8305 				tmpva = (tmpva & ~L1_OFFSET) + L1_SIZE;
8306 				break;
8307 			case 2:
8308 				tmpva = (tmpva & ~L2_OFFSET) + L2_SIZE;
8309 				break;
8310 			case 3:
8311 				tmpva += PAGE_SIZE;
8312 				break;
8313 			}
8314 		} else {
8315 			/* We can't demote/promote this entry */
8316 			MPASS((pmap_load(ptep) & ATTR_SW_NO_PROMOTE) == 0);
8317 
8318 			/*
8319 			 * Find the entry and demote it if the requested change
8320 			 * only applies to part of the address range mapped by
8321 			 * the entry.
8322 			 */
8323 			switch (lvl) {
8324 			default:
8325 				panic("Invalid DMAP table level: %d\n", lvl);
8326 			case 1:
8327 				PMAP_ASSERT_L1_BLOCKS_SUPPORTED;
8328 				if ((tmpva & L1_OFFSET) == 0 &&
8329 				    (base + size - tmpva) >= L1_SIZE) {
8330 					pte_size = L1_SIZE;
8331 					break;
8332 				}
8333 				newpte = pmap_demote_l1(kernel_pmap, ptep,
8334 				    tmpva & ~L1_OFFSET);
8335 				if (newpte == NULL)
8336 					return (EINVAL);
8337 				ptep = pmap_l1_to_l2(ptep, tmpva);
8338 				/* FALLTHROUGH */
8339 			case 2:
8340 				if ((pmap_load(ptep) & ATTR_CONTIGUOUS) != 0) {
8341 					if ((tmpva & L2C_OFFSET) == 0 &&
8342 					    (base + size - tmpva) >= L2C_SIZE) {
8343 						pte_size = L2C_SIZE;
8344 						break;
8345 					}
8346 					if (!pmap_demote_l2c(kernel_pmap, ptep,
8347 					    tmpva))
8348 						return (EINVAL);
8349 				}
8350 				if ((tmpva & L2_OFFSET) == 0 &&
8351 				    (base + size - tmpva) >= L2_SIZE) {
8352 					pte_size = L2_SIZE;
8353 					break;
8354 				}
8355 				newpte = pmap_demote_l2(kernel_pmap, ptep,
8356 				    tmpva);
8357 				if (newpte == NULL)
8358 					return (EINVAL);
8359 				ptep = pmap_l2_to_l3(ptep, tmpva);
8360 				/* FALLTHROUGH */
8361 			case 3:
8362 				if ((pmap_load(ptep) & ATTR_CONTIGUOUS) != 0) {
8363 					if ((tmpva & L3C_OFFSET) == 0 &&
8364 					    (base + size - tmpva) >= L3C_SIZE) {
8365 						pte_size = L3C_SIZE;
8366 						break;
8367 					}
8368 					if (!pmap_demote_l3c(kernel_pmap, ptep,
8369 					    tmpva))
8370 						return (EINVAL);
8371 				}
8372 				pte_size = PAGE_SIZE;
8373 				break;
8374 			}
8375 
8376 			/* Update the entry */
8377 			pte = pmap_load(ptep);
8378 			pte &= ~mask;
8379 			pte |= bits;
8380 
8381 			switch (pte_size) {
8382 			case L2C_SIZE:
8383 				pmap_update_strided(kernel_pmap, ptep, ptep +
8384 				    L2C_ENTRIES, pte, tmpva, L2_SIZE, L2C_SIZE);
8385 				break;
8386 			case L3C_SIZE:
8387 				pmap_update_strided(kernel_pmap, ptep, ptep +
8388 				    L3C_ENTRIES, pte, tmpva, L3_SIZE, L3C_SIZE);
8389 				break;
8390 			default:
8391 				/*
8392 				 * We are updating a single block or page entry,
8393 				 * so regardless of pte_size pass PAGE_SIZE in
8394 				 * order that a single TLB invalidation is
8395 				 * performed.
8396 				 */
8397 				pmap_update_entry(kernel_pmap, ptep, pte, tmpva,
8398 				    PAGE_SIZE);
8399 				break;
8400 			}
8401 
8402 			pa = PTE_TO_PHYS(pte);
8403 			if (!VIRT_IN_DMAP(tmpva) && PHYS_IN_DMAP(pa)) {
8404 				/*
8405 				 * Keep the DMAP memory in sync.
8406 				 */
8407 				rv = pmap_change_props_locked(
8408 				    PHYS_TO_DMAP(pa), pte_size,
8409 				    prot, mode, true);
8410 				if (rv != 0)
8411 					return (rv);
8412 			}
8413 
8414 			/*
8415 			 * If moving to a non-cacheable entry flush
8416 			 * the cache.
8417 			 */
8418 			if (mode == VM_MEMATTR_UNCACHEABLE)
8419 				cpu_dcache_wbinv_range((void *)tmpva, pte_size);
8420 			tmpva += pte_size;
8421 		}
8422 	}
8423 
8424 	return (0);
8425 }
8426 
8427 /*
8428  * Create an L2 table to map all addresses within an L1 mapping.
8429  */
8430 static pt_entry_t *
pmap_demote_l1(pmap_t pmap,pt_entry_t * l1,vm_offset_t va)8431 pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va)
8432 {
8433 	pt_entry_t *l2, newl2, oldl1;
8434 	vm_offset_t tmpl1;
8435 	vm_paddr_t l2phys, phys;
8436 	vm_page_t ml2;
8437 	int i;
8438 
8439 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
8440 	oldl1 = pmap_load(l1);
8441 	PMAP_ASSERT_L1_BLOCKS_SUPPORTED;
8442 	KASSERT((oldl1 & ATTR_DESCR_MASK) == L1_BLOCK,
8443 	    ("pmap_demote_l1: Demoting a non-block entry"));
8444 	KASSERT((va & L1_OFFSET) == 0,
8445 	    ("pmap_demote_l1: Invalid virtual address %#lx", va));
8446 	KASSERT((oldl1 & ATTR_SW_MANAGED) == 0,
8447 	    ("pmap_demote_l1: Level 1 table shouldn't be managed"));
8448 	KASSERT((oldl1 & ATTR_SW_NO_PROMOTE) == 0,
8449 	    ("pmap_demote_l1: Demoting entry with no-demote flag set"));
8450 
8451 	tmpl1 = 0;
8452 	if (va <= (vm_offset_t)l1 && va + L1_SIZE > (vm_offset_t)l1) {
8453 		tmpl1 = kva_alloc(PAGE_SIZE);
8454 		if (tmpl1 == 0)
8455 			return (NULL);
8456 	}
8457 
8458 	if ((ml2 = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED)) ==
8459 	    NULL) {
8460 		CTR2(KTR_PMAP, "pmap_demote_l1: failure for va %#lx"
8461 		    " in pmap %p", va, pmap);
8462 		l2 = NULL;
8463 		goto fail;
8464 	}
8465 
8466 	l2phys = VM_PAGE_TO_PHYS(ml2);
8467 	l2 = (pt_entry_t *)PHYS_TO_DMAP(l2phys);
8468 
8469 	/* Address the range points at */
8470 	phys = PTE_TO_PHYS(oldl1);
8471 	/* The attributed from the old l1 table to be copied */
8472 	newl2 = oldl1 & ATTR_MASK;
8473 
8474 	/* Create the new entries */
8475 	newl2 |= ATTR_CONTIGUOUS;
8476 	for (i = 0; i < Ln_ENTRIES; i++) {
8477 		l2[i] = newl2 | phys;
8478 		phys += L2_SIZE;
8479 	}
8480 	KASSERT(l2[0] == (ATTR_CONTIGUOUS | (oldl1 & ~ATTR_DESCR_MASK) |
8481 	    L2_BLOCK), ("Invalid l2 page (%lx != %lx)", l2[0],
8482 	    ATTR_CONTIGUOUS | (oldl1 & ~ATTR_DESCR_MASK) | L2_BLOCK));
8483 
8484 	if (tmpl1 != 0) {
8485 		pmap_kenter(tmpl1, PAGE_SIZE,
8486 		    DMAP_TO_PHYS((vm_offset_t)l1) & ~L3_OFFSET,
8487 		    VM_MEMATTR_WRITE_BACK);
8488 		l1 = (pt_entry_t *)(tmpl1 + ((vm_offset_t)l1 & PAGE_MASK));
8489 	}
8490 
8491 	pmap_update_entry(pmap, l1, l2phys | L1_TABLE, va, PAGE_SIZE);
8492 
8493 	counter_u64_add(pmap_l1_demotions, 1);
8494 fail:
8495 	if (tmpl1 != 0) {
8496 		pmap_kremove(tmpl1);
8497 		kva_free(tmpl1, PAGE_SIZE);
8498 	}
8499 
8500 	return (l2);
8501 }
8502 
8503 static void
pmap_fill_l3(pt_entry_t * firstl3,pt_entry_t newl3)8504 pmap_fill_l3(pt_entry_t *firstl3, pt_entry_t newl3)
8505 {
8506 	pt_entry_t *l3;
8507 
8508 	for (l3 = firstl3; l3 - firstl3 < Ln_ENTRIES; l3++) {
8509 		*l3 = newl3;
8510 		newl3 += L3_SIZE;
8511 	}
8512 }
8513 
8514 static void
pmap_demote_l2_check(pt_entry_t * firstl3p __unused,pt_entry_t newl3e __unused)8515 pmap_demote_l2_check(pt_entry_t *firstl3p __unused, pt_entry_t newl3e __unused)
8516 {
8517 #ifdef INVARIANTS
8518 #ifdef DIAGNOSTIC
8519 	pt_entry_t *xl3p, *yl3p;
8520 
8521 	for (xl3p = firstl3p; xl3p < firstl3p + Ln_ENTRIES;
8522 	    xl3p++, newl3e += PAGE_SIZE) {
8523 		if (PTE_TO_PHYS(pmap_load(xl3p)) != PTE_TO_PHYS(newl3e)) {
8524 			printf("pmap_demote_l2: xl3e %zd and newl3e map "
8525 			    "different pages: found %#lx, expected %#lx\n",
8526 			    xl3p - firstl3p, pmap_load(xl3p), newl3e);
8527 			printf("page table dump\n");
8528 			for (yl3p = firstl3p; yl3p < firstl3p + Ln_ENTRIES;
8529 			    yl3p++) {
8530 				printf("%zd %#lx\n", yl3p - firstl3p,
8531 				    pmap_load(yl3p));
8532 			}
8533 			panic("firstpte");
8534 		}
8535 	}
8536 #else
8537 	KASSERT(PTE_TO_PHYS(pmap_load(firstl3p)) == PTE_TO_PHYS(newl3e),
8538 	    ("pmap_demote_l2: firstl3 and newl3e map different physical"
8539 	    " addresses"));
8540 #endif
8541 #endif
8542 }
8543 
8544 static void
pmap_demote_l2_abort(pmap_t pmap,vm_offset_t va,pt_entry_t * l2,struct rwlock ** lockp)8545 pmap_demote_l2_abort(pmap_t pmap, vm_offset_t va, pt_entry_t *l2,
8546     struct rwlock **lockp)
8547 {
8548 	struct spglist free;
8549 
8550 	SLIST_INIT(&free);
8551 	(void)pmap_remove_l2(pmap, l2, va, pmap_load(pmap_l1(pmap, va)), true,
8552 	    &free, lockp);
8553 	vm_page_free_pages_toq(&free, true);
8554 }
8555 
8556 /*
8557  * Create an L3 table to map all addresses within an L2 mapping.
8558  */
8559 static pt_entry_t *
pmap_demote_l2_locked(pmap_t pmap,pt_entry_t * l2,vm_offset_t va,struct rwlock ** lockp)8560 pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2, vm_offset_t va,
8561     struct rwlock **lockp)
8562 {
8563 	pt_entry_t *l3, newl3, oldl2;
8564 	vm_offset_t tmpl2;
8565 	vm_paddr_t l3phys;
8566 	vm_page_t ml3;
8567 
8568 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
8569 	PMAP_ASSERT_STAGE1(pmap);
8570 	KASSERT(ADDR_IS_CANONICAL(va),
8571 	    ("%s: Address not in canonical form: %lx", __func__, va));
8572 
8573 	l3 = NULL;
8574 	oldl2 = pmap_load(l2);
8575 	KASSERT((oldl2 & ATTR_DESCR_MASK) == L2_BLOCK,
8576 	    ("pmap_demote_l2: Demoting a non-block entry"));
8577 	KASSERT((oldl2 & ATTR_SW_NO_PROMOTE) == 0,
8578 	    ("pmap_demote_l2: Demoting entry with no-demote flag set"));
8579 	va &= ~L2_OFFSET;
8580 
8581 	tmpl2 = 0;
8582 	if (va <= (vm_offset_t)l2 && va + L2_SIZE > (vm_offset_t)l2) {
8583 		tmpl2 = kva_alloc(PAGE_SIZE);
8584 		if (tmpl2 == 0)
8585 			return (NULL);
8586 	}
8587 
8588 	/*
8589 	 * Invalidate the 2MB page mapping and return "failure" if the
8590 	 * mapping was never accessed and not wired.
8591 	 */
8592 	if ((oldl2 & ATTR_AF) == 0) {
8593 		if ((oldl2 & ATTR_SW_WIRED) == 0) {
8594 			pmap_demote_l2_abort(pmap, va, l2, lockp);
8595 			CTR2(KTR_PMAP,
8596 			    "pmap_demote_l2: failure for va %#lx in pmap %p",
8597 			    va, pmap);
8598 			goto fail;
8599 		}
8600 		ml3 = pmap_remove_pt_page(pmap, va);
8601 		/* Fill the PTP with L3Es that have ATTR_AF cleared. */
8602 		ml3->valid = 0;
8603 	} else if ((ml3 = pmap_remove_pt_page(pmap, va)) == NULL) {
8604 		KASSERT((oldl2 & ATTR_SW_WIRED) == 0,
8605 		    ("pmap_demote_l2: page table page for a wired mapping"
8606 		    " is missing"));
8607 
8608 		/*
8609 		 * If the page table page is missing and the mapping
8610 		 * is for a kernel address, the mapping must belong to
8611 		 * either the direct map or the early kernel memory.
8612 		 * Page table pages are preallocated for every other
8613 		 * part of the kernel address space, so the direct map
8614 		 * region and early kernel memory are the only parts of the
8615 		 * kernel address space that must be handled here.
8616 		 */
8617 		KASSERT(ADDR_IS_USER(va) || VIRT_IN_DMAP(va) ||
8618 		    (va >= VM_MIN_KERNEL_ADDRESS && va < kernel_vm_end),
8619 		    ("pmap_demote_l2: No saved mpte for va %#lx", va));
8620 
8621 		/*
8622 		 * If the 2MB page mapping belongs to the direct map
8623 		 * region of the kernel's address space, then the page
8624 		 * allocation request specifies the highest possible
8625 		 * priority (VM_ALLOC_INTERRUPT).  Otherwise, the
8626 		 * priority is normal.
8627 		 */
8628 		ml3 = vm_page_alloc_noobj(
8629 		    (VIRT_IN_DMAP(va) ? VM_ALLOC_INTERRUPT : 0) |
8630 		    VM_ALLOC_WIRED);
8631 
8632 		/*
8633 		 * If the allocation of the new page table page fails,
8634 		 * invalidate the 2MB page mapping and return "failure".
8635 		 */
8636 		if (ml3 == NULL) {
8637 			pmap_demote_l2_abort(pmap, va, l2, lockp);
8638 			CTR2(KTR_PMAP, "pmap_demote_l2: failure for va %#lx"
8639 			    " in pmap %p", va, pmap);
8640 			goto fail;
8641 		}
8642 		ml3->pindex = pmap_l2_pindex(va);
8643 
8644 		if (ADDR_IS_USER(va)) {
8645 			ml3->ref_count = NL3PG;
8646 			pmap_resident_count_inc(pmap, 1);
8647 		}
8648 	}
8649 	l3phys = VM_PAGE_TO_PHYS(ml3);
8650 	l3 = (pt_entry_t *)PHYS_TO_DMAP(l3phys);
8651 	newl3 = ATTR_CONTIGUOUS | (oldl2 & ~ATTR_DESCR_MASK) | L3_PAGE;
8652 	KASSERT((oldl2 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) !=
8653 	    (ATTR_S1_AP(ATTR_S1_AP_RO) | ATTR_SW_DBM),
8654 	    ("pmap_demote_l2: L2 entry is writeable but not dirty"));
8655 
8656 	/*
8657 	 * If the PTP is not leftover from an earlier promotion or it does not
8658 	 * have ATTR_AF set in every L3E, then fill it.  The new L3Es will all
8659 	 * have ATTR_AF set, unless this is a wired mapping with ATTR_AF clear.
8660 	 *
8661 	 * When pmap_update_entry() clears the old L2 mapping, it (indirectly)
8662 	 * performs a dsb().  That dsb() ensures that the stores for filling
8663 	 * "l3" are visible before "l3" is added to the page table.
8664 	 */
8665 	if (!vm_page_all_valid(ml3))
8666 		pmap_fill_l3(l3, newl3);
8667 
8668 	pmap_demote_l2_check(l3, newl3);
8669 
8670 	/*
8671 	 * If the mapping has changed attributes, update the L3Es.
8672 	 */
8673 	if ((pmap_load(l3) & ATTR_PROMOTE) != (newl3 & ATTR_PROMOTE))
8674 		pmap_fill_l3(l3, newl3);
8675 
8676 	/*
8677 	 * Map the temporary page so we don't lose access to the l2 table.
8678 	 */
8679 	if (tmpl2 != 0) {
8680 		pmap_kenter(tmpl2, PAGE_SIZE,
8681 		    DMAP_TO_PHYS((vm_offset_t)l2) & ~L3_OFFSET,
8682 		    VM_MEMATTR_WRITE_BACK);
8683 		l2 = (pt_entry_t *)(tmpl2 + ((vm_offset_t)l2 & PAGE_MASK));
8684 	}
8685 
8686 	/*
8687 	 * The spare PV entries must be reserved prior to demoting the
8688 	 * mapping, that is, prior to changing the PDE.  Otherwise, the state
8689 	 * of the L2 and the PV lists will be inconsistent, which can result
8690 	 * in reclaim_pv_chunk() attempting to remove a PV entry from the
8691 	 * wrong PV list and pmap_pv_demote_l2() failing to find the expected
8692 	 * PV entry for the 2MB page mapping that is being demoted.
8693 	 */
8694 	if ((oldl2 & ATTR_SW_MANAGED) != 0)
8695 		reserve_pv_entries(pmap, Ln_ENTRIES - 1, lockp);
8696 
8697 	/*
8698 	 * Pass PAGE_SIZE so that a single TLB invalidation is performed on
8699 	 * the 2MB page mapping.
8700 	 */
8701 	pmap_update_entry(pmap, l2, l3phys | L2_TABLE, va, PAGE_SIZE);
8702 
8703 	/*
8704 	 * Demote the PV entry.
8705 	 */
8706 	if ((oldl2 & ATTR_SW_MANAGED) != 0)
8707 		pmap_pv_demote_l2(pmap, va, PTE_TO_PHYS(oldl2), lockp);
8708 
8709 	counter_u64_add(pmap_l2_demotions, 1);
8710 	CTR3(KTR_PMAP, "pmap_demote_l2: success for va %#lx"
8711 	    " in pmap %p %lx", va, pmap, l3[0]);
8712 
8713 fail:
8714 	if (tmpl2 != 0) {
8715 		pmap_kremove(tmpl2);
8716 		kva_free(tmpl2, PAGE_SIZE);
8717 	}
8718 
8719 	return (l3);
8720 
8721 }
8722 
8723 static pt_entry_t *
pmap_demote_l2(pmap_t pmap,pt_entry_t * l2,vm_offset_t va)8724 pmap_demote_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va)
8725 {
8726 	struct rwlock *lock;
8727 	pt_entry_t *l3;
8728 
8729 	lock = NULL;
8730 	l3 = pmap_demote_l2_locked(pmap, l2, va, &lock);
8731 	if (lock != NULL)
8732 		rw_wunlock(lock);
8733 	return (l3);
8734 }
8735 
8736 /*
8737  * Demote an L2C superpage mapping to L2C_ENTRIES L2 block mappings.
8738  */
8739 static bool
pmap_demote_l2c(pmap_t pmap,pt_entry_t * l2p,vm_offset_t va)8740 pmap_demote_l2c(pmap_t pmap, pt_entry_t *l2p, vm_offset_t va)
8741 {
8742 	pd_entry_t *l2c_end, *l2c_start, l2e, mask, nbits, *tl2p;
8743 	vm_offset_t tmpl3;
8744 	register_t intr;
8745 
8746 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
8747 	PMAP_ASSERT_STAGE1(pmap);
8748 	l2c_start = (pd_entry_t *)((uintptr_t)l2p & ~((L2C_ENTRIES *
8749 	    sizeof(pd_entry_t)) - 1));
8750 	l2c_end = l2c_start + L2C_ENTRIES;
8751 	tmpl3 = 0;
8752 	if ((va & ~L2C_OFFSET) < (vm_offset_t)l2c_end &&
8753 	    (vm_offset_t)l2c_start < (va & ~L2C_OFFSET) + L2C_SIZE) {
8754 		tmpl3 = kva_alloc(PAGE_SIZE);
8755 		if (tmpl3 == 0)
8756 			return (false);
8757 		pmap_kenter(tmpl3, PAGE_SIZE,
8758 		    DMAP_TO_PHYS((vm_offset_t)l2c_start) & ~L3_OFFSET,
8759 		    VM_MEMATTR_WRITE_BACK);
8760 		l2c_start = (pd_entry_t *)(tmpl3 +
8761 		    ((vm_offset_t)l2c_start & PAGE_MASK));
8762 		l2c_end = (pd_entry_t *)(tmpl3 +
8763 		    ((vm_offset_t)l2c_end & PAGE_MASK));
8764 	}
8765 	mask = 0;
8766 	nbits = ATTR_DESCR_VALID;
8767 	intr = intr_disable();
8768 
8769 	/*
8770 	 * Break the mappings.
8771 	 */
8772 	for (tl2p = l2c_start; tl2p < l2c_end; tl2p++) {
8773 		/*
8774 		 * Clear the mapping's contiguous and valid bits, but leave
8775 		 * the rest of the entry unchanged, so that a lockless,
8776 		 * concurrent pmap_kextract() can still lookup the physical
8777 		 * address.
8778 		 */
8779 		l2e = pmap_load(tl2p);
8780 		KASSERT((l2e & ATTR_CONTIGUOUS) != 0,
8781 		    ("pmap_demote_l2c: missing ATTR_CONTIGUOUS"));
8782 		KASSERT((l2e & (ATTR_SW_DBM | ATTR_S1_AP_RW_BIT)) !=
8783 		    (ATTR_SW_DBM | ATTR_S1_AP(ATTR_S1_AP_RO)),
8784 		    ("pmap_demote_l2c: missing ATTR_S1_AP_RW"));
8785 		while (!atomic_fcmpset_64(tl2p, &l2e, l2e & ~(ATTR_CONTIGUOUS |
8786 		    ATTR_DESCR_VALID)))
8787 			cpu_spinwait();
8788 
8789 		/*
8790 		 * Hardware accessed and dirty bit maintenance might only
8791 		 * update a single L2 entry, so we must combine the accessed
8792 		 * and dirty bits from this entire set of contiguous L2
8793 		 * entries.
8794 		 */
8795 		if ((l2e & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) ==
8796 		    (ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_SW_DBM))
8797 			mask = ATTR_S1_AP_RW_BIT;
8798 		nbits |= l2e & ATTR_AF;
8799 	}
8800 	if ((nbits & ATTR_AF) != 0) {
8801 		pmap_s1_invalidate_strided(pmap, va & ~L2C_OFFSET, (va +
8802 		    L2C_SIZE) & ~L2C_OFFSET, L2_SIZE, true);
8803 	}
8804 
8805 	/*
8806 	 * Remake the mappings, updating the accessed and dirty bits.
8807 	 */
8808 	l2e = (pmap_load(l2c_start) & ~mask) | nbits;
8809 	for (tl2p = l2c_start; tl2p < l2c_end; tl2p++) {
8810 		pmap_store(tl2p, l2e);
8811 		l2e += L2_SIZE;
8812 	}
8813 	dsb(ishst);
8814 
8815 	intr_restore(intr);
8816 	if (tmpl3 != 0) {
8817 		pmap_kremove(tmpl3);
8818 		kva_free(tmpl3, PAGE_SIZE);
8819 	}
8820 	counter_u64_add(pmap_l2c_demotions, 1);
8821 	CTR2(KTR_PMAP, "pmap_demote_l2c: success for va %#lx in pmap %p",
8822 	    va, pmap);
8823 	return (true);
8824 }
8825 
8826 /*
8827  * Demote a L3C superpage mapping to L3C_ENTRIES 4KB page mappings.
8828  */
8829 static bool
pmap_demote_l3c(pmap_t pmap,pt_entry_t * l3p,vm_offset_t va)8830 pmap_demote_l3c(pmap_t pmap, pt_entry_t *l3p, vm_offset_t va)
8831 {
8832 	pt_entry_t *l3c_end, *l3c_start, l3e, mask, nbits, *tl3p;
8833 	vm_offset_t tmpl3;
8834 	register_t intr;
8835 
8836 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
8837 	l3c_start = (pt_entry_t *)((uintptr_t)l3p & ~((L3C_ENTRIES *
8838 	    sizeof(pt_entry_t)) - 1));
8839 	l3c_end = l3c_start + L3C_ENTRIES;
8840 	tmpl3 = 0;
8841 	if ((va & ~L3C_OFFSET) < (vm_offset_t)l3c_end &&
8842 	    (vm_offset_t)l3c_start < (va & ~L3C_OFFSET) + L3C_SIZE) {
8843 		tmpl3 = kva_alloc(PAGE_SIZE);
8844 		if (tmpl3 == 0)
8845 			return (false);
8846 		pmap_kenter(tmpl3, PAGE_SIZE,
8847 		    DMAP_TO_PHYS((vm_offset_t)l3c_start) & ~L3_OFFSET,
8848 		    VM_MEMATTR_WRITE_BACK);
8849 		l3c_start = (pt_entry_t *)(tmpl3 +
8850 		    ((vm_offset_t)l3c_start & PAGE_MASK));
8851 		l3c_end = (pt_entry_t *)(tmpl3 +
8852 		    ((vm_offset_t)l3c_end & PAGE_MASK));
8853 	}
8854 	mask = 0;
8855 	nbits = ATTR_DESCR_VALID;
8856 	intr = intr_disable();
8857 
8858 	/*
8859 	 * Break the mappings.
8860 	 */
8861 	for (tl3p = l3c_start; tl3p < l3c_end; tl3p++) {
8862 		/*
8863 		 * Clear the mapping's contiguous and valid bits, but leave
8864 		 * the rest of the entry unchanged, so that a lockless,
8865 		 * concurrent pmap_kextract() can still lookup the physical
8866 		 * address.
8867 		 */
8868 		l3e = pmap_load(tl3p);
8869 		KASSERT((l3e & ATTR_CONTIGUOUS) != 0,
8870 		    ("pmap_demote_l3c: missing ATTR_CONTIGUOUS"));
8871 		KASSERT((l3e & (ATTR_SW_DBM | ATTR_S1_AP_RW_BIT)) !=
8872 		    (ATTR_SW_DBM | ATTR_S1_AP(ATTR_S1_AP_RO)),
8873 		    ("pmap_demote_l3c: missing ATTR_S1_AP_RW"));
8874 		while (!atomic_fcmpset_64(tl3p, &l3e, l3e & ~(ATTR_CONTIGUOUS |
8875 		    ATTR_DESCR_VALID)))
8876 			cpu_spinwait();
8877 
8878 		/*
8879 		 * Hardware accessed and dirty bit maintenance might only
8880 		 * update a single L3 entry, so we must combine the accessed
8881 		 * and dirty bits from this entire set of contiguous L3
8882 		 * entries.
8883 		 */
8884 		if ((l3e & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) ==
8885 		    (ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_SW_DBM))
8886 			mask = ATTR_S1_AP_RW_BIT;
8887 		nbits |= l3e & ATTR_AF;
8888 	}
8889 	if ((nbits & ATTR_AF) != 0) {
8890 		pmap_invalidate_range(pmap, va & ~L3C_OFFSET, (va + L3C_SIZE) &
8891 		    ~L3C_OFFSET, true);
8892 	}
8893 
8894 	/*
8895 	 * Remake the mappings, updating the accessed and dirty bits.
8896 	 */
8897 	l3e = (pmap_load(l3c_start) & ~mask) | nbits;
8898 	for (tl3p = l3c_start; tl3p < l3c_end; tl3p++) {
8899 		pmap_store(tl3p, l3e);
8900 		l3e += L3_SIZE;
8901 	}
8902 	dsb(ishst);
8903 
8904 	intr_restore(intr);
8905 	if (tmpl3 != 0) {
8906 		pmap_kremove(tmpl3);
8907 		kva_free(tmpl3, PAGE_SIZE);
8908 	}
8909 	counter_u64_add(pmap_l3c_demotions, 1);
8910 	CTR2(KTR_PMAP, "pmap_demote_l3c: success for va %#lx in pmap %p",
8911 	    va, pmap);
8912 	return (true);
8913 }
8914 
8915 /*
8916  * Accumulate the accessed and dirty bits within a L3C superpage and
8917  * return the specified PTE with them applied correctly.
8918  */
8919 static pt_entry_t
pmap_load_l3c(pt_entry_t * l3p)8920 pmap_load_l3c(pt_entry_t *l3p)
8921 {
8922 	pt_entry_t *l3c_end, *l3c_start, l3e, mask, nbits, *tl3p;
8923 
8924 	l3c_start = (pt_entry_t *)((uintptr_t)l3p & ~((L3C_ENTRIES *
8925 	    sizeof(pt_entry_t)) - 1));
8926 	l3c_end = l3c_start + L3C_ENTRIES;
8927 	mask = 0;
8928 	nbits = 0;
8929 	/* Iterate over each mapping in the superpage. */
8930 	for (tl3p = l3c_start; tl3p < l3c_end; tl3p++) {
8931 		l3e = pmap_load(tl3p);
8932 		KASSERT((l3e & ATTR_CONTIGUOUS) != 0,
8933 		    ("pmap_load_l3c: missing ATTR_CONTIGUOUS"));
8934 		/* Update mask if the current page has its dirty bit set. */
8935 		if ((l3e & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) ==
8936 		    (ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_SW_DBM))
8937 			mask = ATTR_S1_AP_RW_BIT;
8938 		/* Update nbits if the accessed bit is set. */
8939 		nbits |= l3e & ATTR_AF;
8940 	}
8941 	return ((pmap_load(l3p) & ~mask) | nbits);
8942 }
8943 
8944 /*
8945  * Perform the pmap work for mincore(2).  If the page is not both referenced and
8946  * modified by this pmap, returns its physical address so that the caller can
8947  * find other mappings.
8948  */
8949 int
pmap_mincore(pmap_t pmap,vm_offset_t addr,vm_paddr_t * pap)8950 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
8951 {
8952 	pt_entry_t *pte, tpte;
8953 	vm_paddr_t mask, pa;
8954 	int lvl, psind, val;
8955 	bool managed;
8956 
8957 	PMAP_ASSERT_STAGE1(pmap);
8958 	PMAP_LOCK(pmap);
8959 	pte = pmap_pte(pmap, addr, &lvl);
8960 	if (pte != NULL) {
8961 		tpte = pmap_load(pte);
8962 
8963 		switch (lvl) {
8964 		case 3:
8965 			mask = L3_OFFSET;
8966 			psind = (tpte & ATTR_CONTIGUOUS) != 0 ? 1 : 0;
8967 			break;
8968 		case 2:
8969 			mask = L2_OFFSET;
8970 			psind = 2;
8971 			break;
8972 		case 1:
8973 			mask = L1_OFFSET;
8974 			psind = 3;
8975 			break;
8976 		default:
8977 			panic("pmap_mincore: invalid level %d", lvl);
8978 		}
8979 
8980 		managed = (tpte & ATTR_SW_MANAGED) != 0;
8981 		val = MINCORE_INCORE | MINCORE_PSIND(psind);
8982 		if ((managed && pmap_pte_dirty(pmap, tpte)) || (!managed &&
8983 		    (tpte & ATTR_S1_AP_RW_BIT) == ATTR_S1_AP(ATTR_S1_AP_RW)))
8984 			val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
8985 		if ((tpte & ATTR_AF) == ATTR_AF)
8986 			val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
8987 
8988 		pa = PTE_TO_PHYS(tpte) | (addr & mask);
8989 	} else {
8990 		managed = false;
8991 		val = 0;
8992 	}
8993 
8994 	if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
8995 	    (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) {
8996 		*pap = pa;
8997 	}
8998 	PMAP_UNLOCK(pmap);
8999 	return (val);
9000 }
9001 
9002 /*
9003  * Garbage collect every ASID that is neither active on a processor nor
9004  * reserved.
9005  */
9006 static void
pmap_reset_asid_set(pmap_t pmap)9007 pmap_reset_asid_set(pmap_t pmap)
9008 {
9009 	pmap_t curpmap;
9010 	int asid, cpuid, epoch;
9011 	struct asid_set *set;
9012 	enum pmap_stage stage;
9013 
9014 	set = pmap->pm_asid_set;
9015 	stage = pmap->pm_stage;
9016 
9017 	set = pmap->pm_asid_set;
9018 	KASSERT(set != NULL, ("%s: NULL asid set", __func__));
9019 	mtx_assert(&set->asid_set_mutex, MA_OWNED);
9020 
9021 	/*
9022 	 * Ensure that the store to asid_epoch is globally visible before the
9023 	 * loads from pc_curpmap are performed.
9024 	 */
9025 	epoch = set->asid_epoch + 1;
9026 	if (epoch == INT_MAX)
9027 		epoch = 0;
9028 	set->asid_epoch = epoch;
9029 	dsb(ishst);
9030 	if (stage == PM_STAGE1) {
9031 		__asm __volatile("tlbi vmalle1is");
9032 	} else {
9033 		KASSERT(pmap_clean_stage2_tlbi != NULL,
9034 		    ("%s: Unset stage 2 tlb invalidation callback\n",
9035 		    __func__));
9036 		pmap_clean_stage2_tlbi();
9037 	}
9038 	dsb(ish);
9039 	bit_nclear(set->asid_set, ASID_FIRST_AVAILABLE,
9040 	    set->asid_set_size - 1);
9041 	CPU_FOREACH(cpuid) {
9042 		if (cpuid == curcpu)
9043 			continue;
9044 		if (stage == PM_STAGE1) {
9045 			curpmap = pcpu_find(cpuid)->pc_curpmap;
9046 			PMAP_ASSERT_STAGE1(pmap);
9047 		} else {
9048 			curpmap = pcpu_find(cpuid)->pc_curvmpmap;
9049 			if (curpmap == NULL)
9050 				continue;
9051 			PMAP_ASSERT_STAGE2(pmap);
9052 		}
9053 		KASSERT(curpmap->pm_asid_set == set, ("Incorrect set"));
9054 		asid = COOKIE_TO_ASID(curpmap->pm_cookie);
9055 		if (asid == -1)
9056 			continue;
9057 		bit_set(set->asid_set, asid);
9058 		curpmap->pm_cookie = COOKIE_FROM(asid, epoch);
9059 	}
9060 }
9061 
9062 /*
9063  * Allocate a new ASID for the specified pmap.
9064  */
9065 static void
pmap_alloc_asid(pmap_t pmap)9066 pmap_alloc_asid(pmap_t pmap)
9067 {
9068 	struct asid_set *set;
9069 	int new_asid;
9070 
9071 	set = pmap->pm_asid_set;
9072 	KASSERT(set != NULL, ("%s: NULL asid set", __func__));
9073 
9074 	mtx_lock_spin(&set->asid_set_mutex);
9075 
9076 	/*
9077 	 * While this processor was waiting to acquire the asid set mutex,
9078 	 * pmap_reset_asid_set() running on another processor might have
9079 	 * updated this pmap's cookie to the current epoch.  In which case, we
9080 	 * don't need to allocate a new ASID.
9081 	 */
9082 	if (COOKIE_TO_EPOCH(pmap->pm_cookie) == set->asid_epoch)
9083 		goto out;
9084 
9085 	bit_ffc_at(set->asid_set, set->asid_next, set->asid_set_size,
9086 	    &new_asid);
9087 	if (new_asid == -1) {
9088 		bit_ffc_at(set->asid_set, ASID_FIRST_AVAILABLE,
9089 		    set->asid_next, &new_asid);
9090 		if (new_asid == -1) {
9091 			pmap_reset_asid_set(pmap);
9092 			bit_ffc_at(set->asid_set, ASID_FIRST_AVAILABLE,
9093 			    set->asid_set_size, &new_asid);
9094 			KASSERT(new_asid != -1, ("ASID allocation failure"));
9095 		}
9096 	}
9097 	bit_set(set->asid_set, new_asid);
9098 	set->asid_next = new_asid + 1;
9099 	pmap->pm_cookie = COOKIE_FROM(new_asid, set->asid_epoch);
9100 out:
9101 	mtx_unlock_spin(&set->asid_set_mutex);
9102 }
9103 
9104 static uint64_t __read_mostly ttbr_flags;
9105 
9106 /*
9107  * Compute the value that should be stored in ttbr0 to activate the specified
9108  * pmap.  This value may change from time to time.
9109  */
9110 uint64_t
pmap_to_ttbr0(pmap_t pmap)9111 pmap_to_ttbr0(pmap_t pmap)
9112 {
9113 	uint64_t ttbr;
9114 
9115 	ttbr = pmap->pm_ttbr;
9116 	ttbr |= ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie));
9117 	ttbr |= ttbr_flags;
9118 
9119 	return (ttbr);
9120 }
9121 
9122 static void
pmap_set_cnp(void * arg)9123 pmap_set_cnp(void *arg)
9124 {
9125 	uint64_t ttbr0, ttbr1;
9126 	u_int cpuid;
9127 
9128 	cpuid = *(u_int *)arg;
9129 	if (cpuid == curcpu) {
9130 		/*
9131 		 * Set the flags while all CPUs are handling the
9132 		 * smp_rendezvous so will not call pmap_to_ttbr0. Any calls
9133 		 * to pmap_to_ttbr0 after this will have the CnP flag set.
9134 		 * The dsb after invalidating the TLB will act as a barrier
9135 		 * to ensure all CPUs can observe this change.
9136 		 */
9137 		ttbr_flags |= TTBR_CnP;
9138 	}
9139 
9140 	ttbr0 = READ_SPECIALREG(ttbr0_el1);
9141 	ttbr0 |= TTBR_CnP;
9142 
9143 	ttbr1 = READ_SPECIALREG(ttbr1_el1);
9144 	ttbr1 |= TTBR_CnP;
9145 
9146 	/* Update ttbr{0,1}_el1 with the CnP flag */
9147 	WRITE_SPECIALREG(ttbr0_el1, ttbr0);
9148 	WRITE_SPECIALREG(ttbr1_el1, ttbr1);
9149 	isb();
9150 	__asm __volatile("tlbi vmalle1is");
9151 	dsb(ish);
9152 	isb();
9153 }
9154 
9155 /*
9156  * Defer enabling some features until we have read the ID registers to know
9157  * if they are supported on all CPUs.
9158  */
9159 static void
pmap_init_mp(void * dummy __unused)9160 pmap_init_mp(void *dummy __unused)
9161 {
9162 	uint64_t reg;
9163 
9164 	if (get_kernel_reg(ID_AA64PFR1_EL1, &reg)) {
9165 		if (ID_AA64PFR1_BT_VAL(reg) != ID_AA64PFR1_BT_NONE) {
9166 			if (bootverbose)
9167 				printf("Enabling BTI\n");
9168 			pmap_bti_support = true;
9169 
9170 			pmap_bti_ranges_zone = uma_zcreate("BTI ranges",
9171 			    sizeof(struct rs_el), NULL, NULL, NULL, NULL,
9172 			    UMA_ALIGN_PTR, 0);
9173 		}
9174 	}
9175 }
9176 SYSINIT(pmap_init_mp, SI_SUB_CPU, SI_ORDER_ANY, pmap_init_mp, NULL);
9177 
9178 /*
9179  * Defer enabling CnP until we have read the ID registers to know if it's
9180  * supported on all CPUs.
9181  */
9182 static void
pmap_init_cnp(void * dummy __unused)9183 pmap_init_cnp(void *dummy __unused)
9184 {
9185 	uint64_t reg;
9186 	u_int cpuid;
9187 
9188 	if (!get_kernel_reg(ID_AA64MMFR2_EL1, &reg))
9189 		return;
9190 
9191 	if (ID_AA64MMFR2_CnP_VAL(reg) != ID_AA64MMFR2_CnP_NONE) {
9192 		if (bootverbose)
9193 			printf("Enabling CnP\n");
9194 		cpuid = curcpu;
9195 		smp_rendezvous(NULL, pmap_set_cnp, NULL, &cpuid);
9196 	}
9197 
9198 }
9199 SYSINIT(pmap_init_cnp, SI_SUB_SMP, SI_ORDER_ANY, pmap_init_cnp, NULL);
9200 
9201 static bool
pmap_activate_int(struct thread * td,pmap_t pmap)9202 pmap_activate_int(struct thread *td, pmap_t pmap)
9203 {
9204 	struct asid_set *set;
9205 	int epoch;
9206 
9207 	KASSERT(PCPU_GET(curpmap) != NULL, ("no active pmap"));
9208 	KASSERT(pmap != kernel_pmap, ("kernel pmap activation"));
9209 
9210 	if ((pmap->pm_stage == PM_STAGE1 && pmap == PCPU_GET(curpmap)) ||
9211 	    (pmap->pm_stage == PM_STAGE2 && pmap == PCPU_GET(curvmpmap))) {
9212 		/*
9213 		 * Handle the possibility that the old thread was preempted
9214 		 * after an "ic" or "tlbi" instruction but before it performed
9215 		 * a "dsb" instruction.  If the old thread migrates to a new
9216 		 * processor, its completion of a "dsb" instruction on that
9217 		 * new processor does not guarantee that the "ic" or "tlbi"
9218 		 * instructions performed on the old processor have completed.
9219 		 */
9220 		dsb(ish);
9221 		return (false);
9222 	}
9223 
9224 	set = pmap->pm_asid_set;
9225 	KASSERT(set != NULL, ("%s: NULL asid set", __func__));
9226 
9227 	/*
9228 	 * Ensure that the store to curpmap is globally visible before the
9229 	 * load from asid_epoch is performed.
9230 	 */
9231 	if (pmap->pm_stage == PM_STAGE1)
9232 		PCPU_SET(curpmap, pmap);
9233 	else
9234 		PCPU_SET(curvmpmap, pmap);
9235 	dsb(ish);
9236 	epoch = COOKIE_TO_EPOCH(pmap->pm_cookie);
9237 	if (epoch >= 0 && epoch != set->asid_epoch)
9238 		pmap_alloc_asid(pmap);
9239 
9240 	if (pmap->pm_stage == PM_STAGE1) {
9241 		uint64_t new_tcr, tcr;
9242 
9243 		new_tcr = td->td_proc->p_md.md_tcr;
9244 		tcr = READ_SPECIALREG(tcr_el1);
9245 		if ((tcr & MD_TCR_FIELDS) != new_tcr) {
9246 			tcr &= ~MD_TCR_FIELDS;
9247 			tcr |= new_tcr;
9248 			WRITE_SPECIALREG(tcr_el1, tcr);
9249 		}
9250 		set_ttbr0(pmap_to_ttbr0(pmap));
9251 		if (PCPU_GET(bcast_tlbi_workaround) != 0)
9252 			invalidate_local_icache();
9253 	}
9254 	return (true);
9255 }
9256 
9257 void
pmap_activate_vm(pmap_t pmap)9258 pmap_activate_vm(pmap_t pmap)
9259 {
9260 
9261 	PMAP_ASSERT_STAGE2(pmap);
9262 
9263 	(void)pmap_activate_int(NULL, pmap);
9264 }
9265 
9266 void
pmap_activate(struct thread * td)9267 pmap_activate(struct thread *td)
9268 {
9269 	pmap_t	pmap;
9270 
9271 	pmap = vmspace_pmap(td->td_proc->p_vmspace);
9272 	PMAP_ASSERT_STAGE1(pmap);
9273 	critical_enter();
9274 	(void)pmap_activate_int(td, pmap);
9275 	critical_exit();
9276 }
9277 
9278 /*
9279  * Activate the thread we are switching to.
9280  * To simplify the assembly in cpu_throw return the new threads pcb.
9281  */
9282 struct pcb *
pmap_switch(struct thread * new)9283 pmap_switch(struct thread *new)
9284 {
9285 	pcpu_bp_harden bp_harden;
9286 	struct pcb *pcb;
9287 
9288 	/* Store the new curthread */
9289 	PCPU_SET(curthread, new);
9290 
9291 	/* And the new pcb */
9292 	pcb = new->td_pcb;
9293 	PCPU_SET(curpcb, pcb);
9294 
9295 	/*
9296 	 * TODO: We may need to flush the cache here if switching
9297 	 * to a user process.
9298 	 */
9299 
9300 	if (pmap_activate_int(new, vmspace_pmap(new->td_proc->p_vmspace))) {
9301 		/*
9302 		 * Stop userspace from training the branch predictor against
9303 		 * other processes. This will call into a CPU specific
9304 		 * function that clears the branch predictor state.
9305 		 */
9306 		bp_harden = PCPU_GET(bp_harden);
9307 		if (bp_harden != NULL)
9308 			bp_harden();
9309 	}
9310 
9311 	return (pcb);
9312 }
9313 
9314 void
pmap_sync_icache(pmap_t pmap,vm_offset_t va,vm_size_t sz)9315 pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t sz)
9316 {
9317 
9318 	PMAP_ASSERT_STAGE1(pmap);
9319 	KASSERT(ADDR_IS_CANONICAL(va),
9320 	    ("%s: Address not in canonical form: %lx", __func__, va));
9321 
9322 	if (ADDR_IS_KERNEL(va)) {
9323 		cpu_icache_sync_range((void *)va, sz);
9324 	} else {
9325 		u_int len, offset;
9326 		vm_paddr_t pa;
9327 
9328 		/* Find the length of data in this page to flush */
9329 		offset = va & PAGE_MASK;
9330 		len = imin(PAGE_SIZE - offset, sz);
9331 
9332 		while (sz != 0) {
9333 			/* Extract the physical address & find it in the DMAP */
9334 			pa = pmap_extract(pmap, va);
9335 			if (pa != 0)
9336 				cpu_icache_sync_range((void *)PHYS_TO_DMAP(pa),
9337 				    len);
9338 
9339 			/* Move to the next page */
9340 			sz -= len;
9341 			va += len;
9342 			/* Set the length for the next iteration */
9343 			len = imin(PAGE_SIZE, sz);
9344 		}
9345 	}
9346 }
9347 
9348 static int
pmap_stage2_fault(pmap_t pmap,uint64_t esr,uint64_t far)9349 pmap_stage2_fault(pmap_t pmap, uint64_t esr, uint64_t far)
9350 {
9351 	pd_entry_t *pdep;
9352 	pt_entry_t *ptep, pte;
9353 	int rv, lvl, dfsc;
9354 
9355 	PMAP_ASSERT_STAGE2(pmap);
9356 	rv = KERN_FAILURE;
9357 
9358 	/* Data and insn aborts use same encoding for FSC field. */
9359 	dfsc = esr & ISS_DATA_DFSC_MASK;
9360 	switch (dfsc) {
9361 	case ISS_DATA_DFSC_TF_L0:
9362 	case ISS_DATA_DFSC_TF_L1:
9363 	case ISS_DATA_DFSC_TF_L2:
9364 	case ISS_DATA_DFSC_TF_L3:
9365 		PMAP_LOCK(pmap);
9366 		pdep = pmap_pde(pmap, far, &lvl);
9367 		if (pdep == NULL || lvl != (dfsc - ISS_DATA_DFSC_TF_L1)) {
9368 			PMAP_UNLOCK(pmap);
9369 			break;
9370 		}
9371 
9372 		switch (lvl) {
9373 		case 0:
9374 			ptep = pmap_l0_to_l1(pdep, far);
9375 			break;
9376 		case 1:
9377 			ptep = pmap_l1_to_l2(pdep, far);
9378 			break;
9379 		case 2:
9380 			ptep = pmap_l2_to_l3(pdep, far);
9381 			break;
9382 		default:
9383 			panic("%s: Invalid pde level %d", __func__,lvl);
9384 		}
9385 		goto fault_exec;
9386 
9387 	case ISS_DATA_DFSC_AFF_L1:
9388 	case ISS_DATA_DFSC_AFF_L2:
9389 	case ISS_DATA_DFSC_AFF_L3:
9390 		PMAP_LOCK(pmap);
9391 		ptep = pmap_pte(pmap, far, &lvl);
9392 fault_exec:
9393 		if (ptep != NULL && (pte = pmap_load(ptep)) != 0) {
9394 			/*
9395 			 * If accessing an executable page invalidate
9396 			 * the I-cache so it will be valid when we
9397 			 * continue execution in the guest. The D-cache
9398 			 * is assumed to already be clean to the Point
9399 			 * of Coherency.
9400 			 */
9401 			if ((pte & ATTR_S2_XN_MASK) !=
9402 			    ATTR_S2_XN(ATTR_S2_XN_NONE)) {
9403 				invalidate_icache();
9404 			}
9405 			pmap_set_bits(ptep, ATTR_AF | ATTR_DESCR_VALID);
9406 			rv = KERN_SUCCESS;
9407 		}
9408 		PMAP_UNLOCK(pmap);
9409 		break;
9410 	}
9411 
9412 	return (rv);
9413 }
9414 
9415 int
pmap_fault(pmap_t pmap,uint64_t esr,uint64_t far)9416 pmap_fault(pmap_t pmap, uint64_t esr, uint64_t far)
9417 {
9418 	pt_entry_t pte, *ptep;
9419 	register_t intr;
9420 	uint64_t ec, par;
9421 	int lvl, rv;
9422 
9423 	rv = KERN_FAILURE;
9424 
9425 	ec = ESR_ELx_EXCEPTION(esr);
9426 	switch (ec) {
9427 	case EXCP_INSN_ABORT_L:
9428 	case EXCP_INSN_ABORT:
9429 	case EXCP_DATA_ABORT_L:
9430 	case EXCP_DATA_ABORT:
9431 		break;
9432 	default:
9433 		return (rv);
9434 	}
9435 
9436 	if (pmap->pm_stage == PM_STAGE2)
9437 		return (pmap_stage2_fault(pmap, esr, far));
9438 
9439 	/* Data and insn aborts use same encoding for FSC field. */
9440 	switch (esr & ISS_DATA_DFSC_MASK) {
9441 	case ISS_DATA_DFSC_AFF_L1:
9442 	case ISS_DATA_DFSC_AFF_L2:
9443 	case ISS_DATA_DFSC_AFF_L3:
9444 		PMAP_LOCK(pmap);
9445 		ptep = pmap_pte(pmap, far, &lvl);
9446 		if (ptep != NULL) {
9447 			pmap_set_bits(ptep, ATTR_AF);
9448 			rv = KERN_SUCCESS;
9449 			/*
9450 			 * XXXMJ as an optimization we could mark the entry
9451 			 * dirty if this is a write fault.
9452 			 */
9453 		}
9454 		PMAP_UNLOCK(pmap);
9455 		break;
9456 	case ISS_DATA_DFSC_PF_L1:
9457 	case ISS_DATA_DFSC_PF_L2:
9458 	case ISS_DATA_DFSC_PF_L3:
9459 		if ((ec != EXCP_DATA_ABORT_L && ec != EXCP_DATA_ABORT) ||
9460 		    (esr & ISS_DATA_WnR) == 0)
9461 			return (rv);
9462 		PMAP_LOCK(pmap);
9463 		ptep = pmap_pte(pmap, far, &lvl);
9464 		if (ptep != NULL &&
9465 		    ((pte = pmap_load(ptep)) & ATTR_SW_DBM) != 0) {
9466 			if ((pte & ATTR_S1_AP_RW_BIT) ==
9467 			    ATTR_S1_AP(ATTR_S1_AP_RO)) {
9468 				pmap_clear_bits(ptep, ATTR_S1_AP_RW_BIT);
9469 				pmap_s1_invalidate_page(pmap, far, true);
9470 			}
9471 			rv = KERN_SUCCESS;
9472 		}
9473 		PMAP_UNLOCK(pmap);
9474 		break;
9475 	case ISS_DATA_DFSC_TF_L0:
9476 	case ISS_DATA_DFSC_TF_L1:
9477 	case ISS_DATA_DFSC_TF_L2:
9478 	case ISS_DATA_DFSC_TF_L3:
9479 		/*
9480 		 * Retry the translation.  A break-before-make sequence can
9481 		 * produce a transient fault.
9482 		 */
9483 		if (pmap == kernel_pmap) {
9484 			/*
9485 			 * The translation fault may have occurred within a
9486 			 * critical section.  Therefore, we must check the
9487 			 * address without acquiring the kernel pmap's lock.
9488 			 */
9489 			if (pmap_klookup(far, NULL))
9490 				rv = KERN_SUCCESS;
9491 		} else {
9492 			bool owned;
9493 
9494 			/*
9495 			 * In the EFIRT driver we lock the pmap before
9496 			 * calling into the runtime service. As the lock
9497 			 * is already owned by the current thread skip
9498 			 * locking it again.
9499 			 */
9500 			owned = PMAP_OWNED(pmap);
9501 			if (!owned)
9502 				PMAP_LOCK(pmap);
9503 			/* Ask the MMU to check the address. */
9504 			intr = intr_disable();
9505 			par = arm64_address_translate_s1e0r(far);
9506 			intr_restore(intr);
9507 			if (!owned)
9508 				PMAP_UNLOCK(pmap);
9509 
9510 			/*
9511 			 * If the translation was successful, then we can
9512 			 * return success to the trap handler.
9513 			 */
9514 			if (PAR_SUCCESS(par))
9515 				rv = KERN_SUCCESS;
9516 		}
9517 		break;
9518 	}
9519 
9520 	return (rv);
9521 }
9522 
9523 /*
9524  *	Increase the starting virtual address of the given mapping if a
9525  *	different alignment might result in more superpage mappings.
9526  */
9527 void
pmap_align_superpage(vm_object_t object,vm_ooffset_t offset,vm_offset_t * addr,vm_size_t size)9528 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
9529     vm_offset_t *addr, vm_size_t size)
9530 {
9531 	vm_offset_t superpage_offset;
9532 
9533 	if (size < L3C_SIZE)
9534 		return;
9535 	if (object != NULL && (object->flags & OBJ_COLORED) != 0)
9536 		offset += ptoa(object->pg_color);
9537 
9538 	/*
9539 	 * Considering the object's physical alignment, is the mapping large
9540 	 * enough to encompass an L2 (2MB/32MB) superpage ...
9541 	 */
9542 	superpage_offset = offset & L2_OFFSET;
9543 	if (size - ((L2_SIZE - superpage_offset) & L2_OFFSET) >= L2_SIZE) {
9544 		/*
9545 		 * If the virtual and physical alignments differ, then
9546 		 * increase the virtual address so that the alignments match.
9547 		 */
9548 		if ((*addr & L2_OFFSET) < superpage_offset)
9549 			*addr = (*addr & ~L2_OFFSET) + superpage_offset;
9550 		else if ((*addr & L2_OFFSET) > superpage_offset)
9551 			*addr = ((*addr + L2_OFFSET) & ~L2_OFFSET) +
9552 			    superpage_offset;
9553 		return;
9554 	}
9555 	/* ... or an L3C (64KB/2MB) superpage? */
9556 	superpage_offset = offset & L3C_OFFSET;
9557 	if (size - ((L3C_SIZE - superpage_offset) & L3C_OFFSET) >= L3C_SIZE) {
9558 		if ((*addr & L3C_OFFSET) < superpage_offset)
9559 			*addr = (*addr & ~L3C_OFFSET) + superpage_offset;
9560 		else if ((*addr & L3C_OFFSET) > superpage_offset)
9561 			*addr = ((*addr + L3C_OFFSET) & ~L3C_OFFSET) +
9562 			    superpage_offset;
9563 	}
9564 }
9565 
9566 /**
9567  * Get the kernel virtual address of a set of physical pages. If there are
9568  * physical addresses not covered by the DMAP perform a transient mapping
9569  * that will be removed when calling pmap_unmap_io_transient.
9570  *
9571  * \param page        The pages the caller wishes to obtain the virtual
9572  *                    address on the kernel memory map.
9573  * \param vaddr       On return contains the kernel virtual memory address
9574  *                    of the pages passed in the page parameter.
9575  * \param count       Number of pages passed in.
9576  * \param can_fault   true if the thread using the mapped pages can take
9577  *                    page faults, false otherwise.
9578  *
9579  * \returns true if the caller must call pmap_unmap_io_transient when
9580  *          finished or false otherwise.
9581  *
9582  */
9583 bool
pmap_map_io_transient(vm_page_t page[],vm_offset_t vaddr[],int count,bool can_fault)9584 pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
9585     bool can_fault)
9586 {
9587 	vm_paddr_t paddr;
9588 	bool needs_mapping;
9589 	int error __diagused, i;
9590 
9591 	/*
9592 	 * Allocate any KVA space that we need, this is done in a separate
9593 	 * loop to prevent calling vmem_alloc while pinned.
9594 	 */
9595 	needs_mapping = false;
9596 	for (i = 0; i < count; i++) {
9597 		paddr = VM_PAGE_TO_PHYS(page[i]);
9598 		if (__predict_false(!PHYS_IN_DMAP(paddr))) {
9599 			error = vmem_alloc(kernel_arena, PAGE_SIZE,
9600 			    M_BESTFIT | M_WAITOK, &vaddr[i]);
9601 			KASSERT(error == 0, ("vmem_alloc failed: %d", error));
9602 			needs_mapping = true;
9603 		} else {
9604 			vaddr[i] = PHYS_TO_DMAP(paddr);
9605 		}
9606 	}
9607 
9608 	/* Exit early if everything is covered by the DMAP */
9609 	if (!needs_mapping)
9610 		return (false);
9611 
9612 	if (!can_fault)
9613 		sched_pin();
9614 	for (i = 0; i < count; i++) {
9615 		paddr = VM_PAGE_TO_PHYS(page[i]);
9616 		if (!PHYS_IN_DMAP(paddr)) {
9617 			panic(
9618 			   "pmap_map_io_transient: TODO: Map out of DMAP data");
9619 		}
9620 	}
9621 
9622 	return (needs_mapping);
9623 }
9624 
9625 void
pmap_unmap_io_transient(vm_page_t page[],vm_offset_t vaddr[],int count,bool can_fault)9626 pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
9627     bool can_fault)
9628 {
9629 	vm_paddr_t paddr;
9630 	int i;
9631 
9632 	if (!can_fault)
9633 		sched_unpin();
9634 	for (i = 0; i < count; i++) {
9635 		paddr = VM_PAGE_TO_PHYS(page[i]);
9636 		if (!PHYS_IN_DMAP(paddr)) {
9637 			panic("ARM64TODO: pmap_unmap_io_transient: Unmap data");
9638 		}
9639 	}
9640 }
9641 
9642 bool
pmap_is_valid_memattr(pmap_t pmap __unused,vm_memattr_t mode)9643 pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode)
9644 {
9645 
9646 	return (mode >= VM_MEMATTR_DEVICE && mode <= VM_MEMATTR_WRITE_THROUGH);
9647 }
9648 
9649 static void *
bti_dup_range(void * ctx __unused,void * data)9650 bti_dup_range(void *ctx __unused, void *data)
9651 {
9652 	struct rs_el *node, *new_node;
9653 
9654 	new_node = uma_zalloc(pmap_bti_ranges_zone, M_NOWAIT);
9655 	if (new_node == NULL)
9656 		return (NULL);
9657 	node = data;
9658 	memcpy(new_node, node, sizeof(*node));
9659 	return (new_node);
9660 }
9661 
9662 static void
bti_free_range(void * ctx __unused,void * node)9663 bti_free_range(void *ctx __unused, void *node)
9664 {
9665 
9666 	uma_zfree(pmap_bti_ranges_zone, node);
9667 }
9668 
9669 static int
pmap_bti_assign(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)9670 pmap_bti_assign(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
9671 {
9672 	struct rs_el *rs;
9673 	int error;
9674 
9675 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
9676 	PMAP_ASSERT_STAGE1(pmap);
9677 	MPASS(pmap->pm_bti != NULL);
9678 	rs = uma_zalloc(pmap_bti_ranges_zone, M_NOWAIT);
9679 	if (rs == NULL)
9680 		return (ENOMEM);
9681 	error = rangeset_insert(pmap->pm_bti, sva, eva, rs);
9682 	if (error != 0)
9683 		uma_zfree(pmap_bti_ranges_zone, rs);
9684 	return (error);
9685 }
9686 
9687 static void
pmap_bti_deassign_all(pmap_t pmap)9688 pmap_bti_deassign_all(pmap_t pmap)
9689 {
9690 
9691 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
9692 	if (pmap->pm_bti != NULL)
9693 		rangeset_remove_all(pmap->pm_bti);
9694 }
9695 
9696 /*
9697  * Returns true if the BTI setting is the same across the specified address
9698  * range, and false otherwise.  When returning true, updates the referenced PTE
9699  * to reflect the BTI setting.
9700  *
9701  * Only stage 1 pmaps support BTI.  The kernel pmap is always a stage 1 pmap
9702  * that has the same BTI setting implicitly across its entire address range.
9703  */
9704 static bool
pmap_bti_same(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,pt_entry_t * pte)9705 pmap_bti_same(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, pt_entry_t *pte)
9706 {
9707 	struct rs_el *rs;
9708 	vm_offset_t va;
9709 
9710 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
9711 	KASSERT(ADDR_IS_CANONICAL(sva),
9712 	    ("%s: Start address not in canonical form: %lx", __func__, sva));
9713 	KASSERT(ADDR_IS_CANONICAL(eva),
9714 	    ("%s: End address not in canonical form: %lx", __func__, eva));
9715 	KASSERT((*pte & ATTR_S1_GP) == 0,
9716 	    ("%s: pte %lx has ATTR_S1_GP preset", __func__, *pte));
9717 
9718 	if (pmap == kernel_pmap) {
9719 		*pte |= ATTR_KERN_GP;
9720 		return (true);
9721 	}
9722 	if (pmap->pm_bti == NULL)
9723 		return (true);
9724 	PMAP_ASSERT_STAGE1(pmap);
9725 	rs = rangeset_containing(pmap->pm_bti, sva);
9726 	if (rs == NULL)
9727 		return (rangeset_empty(pmap->pm_bti, sva, eva));
9728 	while ((va = rs->re_end) < eva) {
9729 		if ((rs = rangeset_beginning(pmap->pm_bti, va)) == NULL)
9730 			return (false);
9731 	}
9732 	*pte |= ATTR_S1_GP;
9733 	return (true);
9734 }
9735 
9736 static pt_entry_t
pmap_pte_bti(pmap_t pmap,vm_offset_t va)9737 pmap_pte_bti(pmap_t pmap, vm_offset_t va)
9738 {
9739 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
9740 	MPASS(ADDR_IS_CANONICAL(va));
9741 
9742 	if (pmap->pm_stage != PM_STAGE1)
9743 		return (0);
9744 	if (pmap == kernel_pmap)
9745 		return (ATTR_KERN_GP);
9746 	if (pmap->pm_bti != NULL &&
9747 	    rangeset_containing(pmap->pm_bti, va) != NULL)
9748 		return (ATTR_S1_GP);
9749 	return (0);
9750 }
9751 
9752 static void
pmap_bti_on_remove(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)9753 pmap_bti_on_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
9754 {
9755 
9756 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
9757 	if (pmap->pm_bti != NULL)
9758 		rangeset_remove(pmap->pm_bti, sva, eva);
9759 }
9760 
9761 static int
pmap_bti_copy(pmap_t dst_pmap,pmap_t src_pmap)9762 pmap_bti_copy(pmap_t dst_pmap, pmap_t src_pmap)
9763 {
9764 
9765 	PMAP_LOCK_ASSERT(dst_pmap, MA_OWNED);
9766 	PMAP_LOCK_ASSERT(src_pmap, MA_OWNED);
9767 	MPASS(src_pmap->pm_stage == dst_pmap->pm_stage);
9768 	MPASS(src_pmap->pm_bti != NULL);
9769 	MPASS(dst_pmap->pm_bti != NULL);
9770 	if (src_pmap->pm_bti->rs_data_ctx == NULL)
9771 		return (0);
9772 	return (rangeset_copy(dst_pmap->pm_bti, src_pmap->pm_bti));
9773 }
9774 
9775 static void
pmap_bti_update_range(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,bool set)9776 pmap_bti_update_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, bool set)
9777 {
9778 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
9779 	PMAP_ASSERT_STAGE1(pmap);
9780 
9781 	pmap_mask_set_locked(pmap, sva, eva, ATTR_S1_GP, set ? ATTR_S1_GP : 0,
9782 	    true);
9783 }
9784 
9785 int
pmap_bti_set(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)9786 pmap_bti_set(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
9787 {
9788 	int error;
9789 
9790 	if (pmap->pm_bti == NULL)
9791 		return (0);
9792 	if (!ADDR_IS_CANONICAL(sva) || !ADDR_IS_CANONICAL(eva))
9793 		return (EINVAL);
9794 	if (pmap->pm_stage != PM_STAGE1)
9795 		return (EINVAL);
9796 	if (eva <= sva || ADDR_IS_KERNEL(eva))
9797 		return (EFAULT);
9798 
9799 	sva = trunc_page(sva);
9800 	eva = round_page(eva);
9801 	for (;;) {
9802 		PMAP_LOCK(pmap);
9803 		error = pmap_bti_assign(pmap, sva, eva);
9804 		if (error == 0)
9805 			pmap_bti_update_range(pmap, sva, eva, true);
9806 		PMAP_UNLOCK(pmap);
9807 		if (error != ENOMEM)
9808 			break;
9809 		vm_wait(NULL);
9810 	}
9811 	return (error);
9812 }
9813 
9814 #if defined(KASAN) || defined(KMSAN)
9815 static pd_entry_t	*pmap_san_early_l2;
9816 
9817 #define	SAN_BOOTSTRAP_L2_SIZE	(1 * L2_SIZE)
9818 #define	SAN_BOOTSTRAP_SIZE	(2 * PAGE_SIZE)
9819 static vm_offset_t __nosanitizeaddress
pmap_san_enter_bootstrap_alloc_l2(void)9820 pmap_san_enter_bootstrap_alloc_l2(void)
9821 {
9822 	static uint8_t bootstrap_data[SAN_BOOTSTRAP_L2_SIZE] __aligned(L2_SIZE);
9823 	static size_t offset = 0;
9824 	vm_offset_t addr;
9825 
9826 	if (offset + L2_SIZE > sizeof(bootstrap_data)) {
9827 		panic("%s: out of memory for the bootstrap shadow map L2 entries",
9828 		    __func__);
9829 	}
9830 
9831 	addr = (uintptr_t)&bootstrap_data[offset];
9832 	offset += L2_SIZE;
9833 	return (addr);
9834 }
9835 
9836 /*
9837  * SAN L1 + L2 pages, maybe L3 entries later?
9838  */
9839 static vm_offset_t __nosanitizeaddress
pmap_san_enter_bootstrap_alloc_pages(int npages)9840 pmap_san_enter_bootstrap_alloc_pages(int npages)
9841 {
9842 	static uint8_t bootstrap_data[SAN_BOOTSTRAP_SIZE] __aligned(PAGE_SIZE);
9843 	static size_t offset = 0;
9844 	vm_offset_t addr;
9845 
9846 	if (offset + (npages * PAGE_SIZE) > sizeof(bootstrap_data)) {
9847 		panic("%s: out of memory for the bootstrap shadow map",
9848 		    __func__);
9849 	}
9850 
9851 	addr = (uintptr_t)&bootstrap_data[offset];
9852 	offset += (npages * PAGE_SIZE);
9853 	return (addr);
9854 }
9855 
9856 static void __nosanitizeaddress
pmap_san_enter_bootstrap(void)9857 pmap_san_enter_bootstrap(void)
9858 {
9859 	vm_offset_t freemempos;
9860 
9861 	/* L1, L2 */
9862 	freemempos = pmap_san_enter_bootstrap_alloc_pages(2);
9863 	bs_state.freemempos = freemempos;
9864 	bs_state.va = KASAN_MIN_ADDRESS;
9865 	pmap_bootstrap_l1_table(&bs_state);
9866 	pmap_san_early_l2 = bs_state.l2;
9867 }
9868 
9869 static vm_page_t
pmap_san_enter_alloc_l3(void)9870 pmap_san_enter_alloc_l3(void)
9871 {
9872 	vm_page_t m;
9873 
9874 	m = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED |
9875 	    VM_ALLOC_ZERO);
9876 	if (m == NULL)
9877 		panic("%s: no memory to grow shadow map", __func__);
9878 	return (m);
9879 }
9880 
9881 static vm_page_t
pmap_san_enter_alloc_l2(void)9882 pmap_san_enter_alloc_l2(void)
9883 {
9884 	return (vm_page_alloc_noobj_contig(VM_ALLOC_WIRED | VM_ALLOC_ZERO,
9885 	    Ln_ENTRIES, 0, ~0ul, L2_SIZE, 0, VM_MEMATTR_DEFAULT));
9886 }
9887 
9888 void __nosanitizeaddress __nosanitizememory
pmap_san_enter(vm_offset_t va)9889 pmap_san_enter(vm_offset_t va)
9890 {
9891 	pd_entry_t *l1, *l2;
9892 	pt_entry_t *l3;
9893 	vm_page_t m;
9894 
9895 	if (virtual_avail == 0) {
9896 		vm_offset_t block;
9897 		int slot;
9898 		bool first;
9899 
9900 		/* Temporary shadow map prior to pmap_bootstrap(). */
9901 		first = pmap_san_early_l2 == NULL;
9902 		if (first)
9903 			pmap_san_enter_bootstrap();
9904 
9905 		l2 = pmap_san_early_l2;
9906 		slot = pmap_l2_index(va);
9907 
9908 		if ((pmap_load(&l2[slot]) & ATTR_DESCR_VALID) == 0) {
9909 			MPASS(first);
9910 			block = pmap_san_enter_bootstrap_alloc_l2();
9911 			pmap_store(&l2[slot],
9912 			    PHYS_TO_PTE(pmap_early_vtophys(block)) |
9913 			    PMAP_SAN_PTE_BITS | L2_BLOCK);
9914 			dmb(ishst);
9915 		}
9916 
9917 		return;
9918 	}
9919 
9920 	mtx_assert(&kernel_map->system_mtx, MA_OWNED);
9921 	l1 = pmap_l1(kernel_pmap, va);
9922 	MPASS(l1 != NULL);
9923 	if ((pmap_load(l1) & ATTR_DESCR_VALID) == 0) {
9924 		m = pmap_san_enter_alloc_l3();
9925 		pmap_store(l1, VM_PAGE_TO_PTE(m) | L1_TABLE);
9926 	}
9927 	l2 = pmap_l1_to_l2(l1, va);
9928 	if ((pmap_load(l2) & ATTR_DESCR_VALID) == 0) {
9929 		m = pmap_san_enter_alloc_l2();
9930 		if (m != NULL) {
9931 			pmap_store(l2, VM_PAGE_TO_PTE(m) |
9932 			    PMAP_SAN_PTE_BITS | L2_BLOCK);
9933 		} else {
9934 			m = pmap_san_enter_alloc_l3();
9935 			pmap_store(l2, VM_PAGE_TO_PTE(m) | L2_TABLE);
9936 		}
9937 		dmb(ishst);
9938 	}
9939 	if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK)
9940 		return;
9941 	l3 = pmap_l2_to_l3(l2, va);
9942 	if ((pmap_load(l3) & ATTR_DESCR_VALID) != 0)
9943 		return;
9944 	m = pmap_san_enter_alloc_l3();
9945 	pmap_store(l3, VM_PAGE_TO_PTE(m) | PMAP_SAN_PTE_BITS | L3_PAGE);
9946 	dmb(ishst);
9947 }
9948 #endif /* KASAN || KMSAN */
9949 
9950 /*
9951  * Track a range of the kernel's virtual address space that is contiguous
9952  * in various mapping attributes.
9953  */
9954 struct pmap_kernel_map_range {
9955 	vm_offset_t sva;
9956 	pt_entry_t attrs;
9957 	int l3pages;
9958 	int l3contig;
9959 	int l2blocks;
9960 	int l2contig;
9961 	int l1blocks;
9962 };
9963 
9964 static void
sysctl_kmaps_dump(struct sbuf * sb,struct pmap_kernel_map_range * range,vm_offset_t eva)9965 sysctl_kmaps_dump(struct sbuf *sb, struct pmap_kernel_map_range *range,
9966     vm_offset_t eva)
9967 {
9968 	const char *mode;
9969 	int index;
9970 
9971 	if (eva <= range->sva)
9972 		return;
9973 
9974 	index = range->attrs & ATTR_S1_IDX_MASK;
9975 	switch (index) {
9976 	case ATTR_S1_IDX(VM_MEMATTR_DEVICE_NP):
9977 		mode = "DEV-NP";
9978 		break;
9979 	case ATTR_S1_IDX(VM_MEMATTR_DEVICE):
9980 		mode = "DEV";
9981 		break;
9982 	case ATTR_S1_IDX(VM_MEMATTR_UNCACHEABLE):
9983 		mode = "UC";
9984 		break;
9985 	case ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK):
9986 		mode = "WB";
9987 		break;
9988 	case ATTR_S1_IDX(VM_MEMATTR_WRITE_THROUGH):
9989 		mode = "WT";
9990 		break;
9991 	default:
9992 		printf(
9993 		    "%s: unknown memory type %x for range 0x%016lx-0x%016lx\n",
9994 		    __func__, index, range->sva, eva);
9995 		mode = "??";
9996 		break;
9997 	}
9998 
9999 	sbuf_printf(sb, "0x%016lx-0x%016lx r%c%c%c%c%c %6s %d %d %d %d %d\n",
10000 	    range->sva, eva,
10001 	    (range->attrs & ATTR_S1_AP_RW_BIT) == ATTR_S1_AP_RW ? 'w' : '-',
10002 	    (range->attrs & ATTR_S1_PXN) != 0 ? '-' : 'x',
10003 	    (range->attrs & ATTR_S1_UXN) != 0 ? '-' : 'X',
10004 	    (range->attrs & ATTR_S1_AP(ATTR_S1_AP_USER)) != 0 ? 'u' : 's',
10005 	    (range->attrs & ATTR_S1_GP) != 0 ? 'g' : '-',
10006 	    mode, range->l1blocks, range->l2contig, range->l2blocks,
10007 	    range->l3contig, range->l3pages);
10008 
10009 	/* Reset to sentinel value. */
10010 	range->sva = 0xfffffffffffffffful;
10011 }
10012 
10013 /*
10014  * Determine whether the attributes specified by a page table entry match those
10015  * being tracked by the current range.
10016  */
10017 static bool
sysctl_kmaps_match(struct pmap_kernel_map_range * range,pt_entry_t attrs)10018 sysctl_kmaps_match(struct pmap_kernel_map_range *range, pt_entry_t attrs)
10019 {
10020 
10021 	return (range->attrs == attrs);
10022 }
10023 
10024 static void
sysctl_kmaps_reinit(struct pmap_kernel_map_range * range,vm_offset_t va,pt_entry_t attrs)10025 sysctl_kmaps_reinit(struct pmap_kernel_map_range *range, vm_offset_t va,
10026     pt_entry_t attrs)
10027 {
10028 
10029 	memset(range, 0, sizeof(*range));
10030 	range->sva = va;
10031 	range->attrs = attrs;
10032 }
10033 
10034 /* Get the block/page attributes that correspond to the table attributes */
10035 static pt_entry_t
sysctl_kmaps_table_attrs(pd_entry_t table)10036 sysctl_kmaps_table_attrs(pd_entry_t table)
10037 {
10038 	pt_entry_t attrs;
10039 
10040 	attrs = 0;
10041 	if ((table & TATTR_UXN_TABLE) != 0)
10042 		attrs |= ATTR_S1_UXN;
10043 	if ((table & TATTR_PXN_TABLE) != 0)
10044 		attrs |= ATTR_S1_PXN;
10045 	if ((table & TATTR_AP_TABLE_RO) != 0)
10046 		attrs |= ATTR_S1_AP(ATTR_S1_AP_RO);
10047 
10048 	return (attrs);
10049 }
10050 
10051 /* Read the block/page attributes we care about */
10052 static pt_entry_t
sysctl_kmaps_block_attrs(pt_entry_t block)10053 sysctl_kmaps_block_attrs(pt_entry_t block)
10054 {
10055 	return (block & (ATTR_S1_AP_MASK | ATTR_S1_XN | ATTR_S1_IDX_MASK |
10056 	    ATTR_S1_GP));
10057 }
10058 
10059 /*
10060  * Given a leaf PTE, derive the mapping's attributes.  If they do not match
10061  * those of the current run, dump the address range and its attributes, and
10062  * begin a new run.
10063  */
10064 static void
sysctl_kmaps_check(struct sbuf * sb,struct pmap_kernel_map_range * range,vm_offset_t va,pd_entry_t l0e,pd_entry_t l1e,pd_entry_t l2e,pt_entry_t l3e)10065 sysctl_kmaps_check(struct sbuf *sb, struct pmap_kernel_map_range *range,
10066     vm_offset_t va, pd_entry_t l0e, pd_entry_t l1e, pd_entry_t l2e,
10067     pt_entry_t l3e)
10068 {
10069 	pt_entry_t attrs;
10070 
10071 	attrs = sysctl_kmaps_table_attrs(l0e);
10072 
10073 	if ((l1e & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_BLOCK) {
10074 		attrs |= sysctl_kmaps_block_attrs(l1e);
10075 		goto done;
10076 	}
10077 	attrs |= sysctl_kmaps_table_attrs(l1e);
10078 
10079 	if ((l2e & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_BLOCK) {
10080 		attrs |= sysctl_kmaps_block_attrs(l2e);
10081 		goto done;
10082 	}
10083 	attrs |= sysctl_kmaps_table_attrs(l2e);
10084 	attrs |= sysctl_kmaps_block_attrs(l3e);
10085 
10086 done:
10087 	if (range->sva > va || !sysctl_kmaps_match(range, attrs)) {
10088 		sysctl_kmaps_dump(sb, range, va);
10089 		sysctl_kmaps_reinit(range, va, attrs);
10090 	}
10091 }
10092 
10093 static int
sysctl_kmaps(SYSCTL_HANDLER_ARGS)10094 sysctl_kmaps(SYSCTL_HANDLER_ARGS)
10095 {
10096 	struct pmap_kernel_map_range range;
10097 	struct sbuf sbuf, *sb;
10098 	pd_entry_t l0e, *l1, l1e, *l2, l2e;
10099 	pt_entry_t *l3, l3e;
10100 	vm_offset_t sva;
10101 	vm_paddr_t pa;
10102 	int error, i, j, k, l;
10103 
10104 	error = sysctl_wire_old_buffer(req, 0);
10105 	if (error != 0)
10106 		return (error);
10107 	sb = &sbuf;
10108 	sbuf_new_for_sysctl(sb, NULL, PAGE_SIZE, req);
10109 
10110 	/* Sentinel value. */
10111 	range.sva = 0xfffffffffffffffful;
10112 
10113 	/*
10114 	 * Iterate over the kernel page tables without holding the kernel pmap
10115 	 * lock.  Kernel page table pages are never freed, so at worst we will
10116 	 * observe inconsistencies in the output.
10117 	 */
10118 	for (sva = 0xffff000000000000ul, i = pmap_l0_index(sva); i < Ln_ENTRIES;
10119 	    i++) {
10120 		if (i == pmap_l0_index(DMAP_MIN_ADDRESS))
10121 			sbuf_printf(sb, "\nDirect map:\n");
10122 		else if (i == pmap_l0_index(VM_MIN_KERNEL_ADDRESS))
10123 			sbuf_printf(sb, "\nKernel map:\n");
10124 #ifdef KASAN
10125 		else if (i == pmap_l0_index(KASAN_MIN_ADDRESS))
10126 			sbuf_printf(sb, "\nKASAN shadow map:\n");
10127 #endif
10128 #ifdef KMSAN
10129 		else if (i == pmap_l0_index(KMSAN_SHAD_MIN_ADDRESS))
10130 			sbuf_printf(sb, "\nKMSAN shadow map:\n");
10131 		else if (i == pmap_l0_index(KMSAN_ORIG_MIN_ADDRESS))
10132 			sbuf_printf(sb, "\nKMSAN origin map:\n");
10133 #endif
10134 
10135 		l0e = kernel_pmap->pm_l0[i];
10136 		if ((l0e & ATTR_DESCR_VALID) == 0) {
10137 			sysctl_kmaps_dump(sb, &range, sva);
10138 			sva += L0_SIZE;
10139 			continue;
10140 		}
10141 		pa = PTE_TO_PHYS(l0e);
10142 		l1 = (pd_entry_t *)PHYS_TO_DMAP(pa);
10143 
10144 		for (j = pmap_l1_index(sva); j < Ln_ENTRIES; j++) {
10145 			l1e = l1[j];
10146 			if ((l1e & ATTR_DESCR_VALID) == 0) {
10147 				sysctl_kmaps_dump(sb, &range, sva);
10148 				sva += L1_SIZE;
10149 				continue;
10150 			}
10151 			if ((l1e & ATTR_DESCR_MASK) == L1_BLOCK) {
10152 				PMAP_ASSERT_L1_BLOCKS_SUPPORTED;
10153 				sysctl_kmaps_check(sb, &range, sva, l0e, l1e,
10154 				    0, 0);
10155 				range.l1blocks++;
10156 				sva += L1_SIZE;
10157 				continue;
10158 			}
10159 			pa = PTE_TO_PHYS(l1e);
10160 			l2 = (pd_entry_t *)PHYS_TO_DMAP(pa);
10161 
10162 			for (k = pmap_l2_index(sva); k < Ln_ENTRIES; k++) {
10163 				l2e = l2[k];
10164 				if ((l2e & ATTR_DESCR_VALID) == 0) {
10165 					sysctl_kmaps_dump(sb, &range, sva);
10166 					sva += L2_SIZE;
10167 					continue;
10168 				}
10169 				if ((l2e & ATTR_DESCR_MASK) == L2_BLOCK) {
10170 					sysctl_kmaps_check(sb, &range, sva,
10171 					    l0e, l1e, l2e, 0);
10172 					if ((l2e & ATTR_CONTIGUOUS) != 0)
10173 						range.l2contig +=
10174 						    k % L2C_ENTRIES == 0 ?
10175 						    1 : 0;
10176 					else
10177 						range.l2blocks++;
10178 					sva += L2_SIZE;
10179 					continue;
10180 				}
10181 				pa = PTE_TO_PHYS(l2e);
10182 				l3 = (pt_entry_t *)PHYS_TO_DMAP(pa);
10183 
10184 				for (l = pmap_l3_index(sva); l < Ln_ENTRIES;
10185 				    l++, sva += L3_SIZE) {
10186 					l3e = l3[l];
10187 					if ((l3e & ATTR_DESCR_VALID) == 0) {
10188 						sysctl_kmaps_dump(sb, &range,
10189 						    sva);
10190 						continue;
10191 					}
10192 					sysctl_kmaps_check(sb, &range, sva,
10193 					    l0e, l1e, l2e, l3e);
10194 					if ((l3e & ATTR_CONTIGUOUS) != 0)
10195 						range.l3contig +=
10196 						    l % L3C_ENTRIES == 0 ?
10197 						    1 : 0;
10198 					else
10199 						range.l3pages++;
10200 				}
10201 			}
10202 		}
10203 	}
10204 
10205 	error = sbuf_finish(sb);
10206 	sbuf_delete(sb);
10207 	return (error);
10208 }
10209 SYSCTL_OID(_vm_pmap, OID_AUTO, kernel_maps,
10210     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_SKIP,
10211     NULL, 0, sysctl_kmaps, "A",
10212     "Dump kernel address layout");
10213