xref: /freebsd/sys/arm/include/pmap.h (revision 1f4bcc459a76b7aa664f3fd557684cd0ba6da352)
1 /*-
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * the Systems Programming Group of the University of Utah Computer
7  * Science Department and William Jolitz of UUNET Technologies Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *      This product includes software developed by the University of
20  *      California, Berkeley and its contributors.
21  * 4. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  * Derived from hp300 version by Mike Hibler, this version by William
38  * Jolitz uses a recursive map [a pde points to the page directory] to
39  * map the page tables using the pagetables themselves. This is done to
40  * reduce the impact on kernel virtual memory for lots of sparse address
41  * space, and to reduce the cost of memory to each process.
42  *
43  *      from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
44  *      from: @(#)pmap.h        7.4 (Berkeley) 5/12/91
45  * 	from: FreeBSD: src/sys/i386/include/pmap.h,v 1.70 2000/11/30
46  *
47  * $FreeBSD$
48  */
49  #include <machine/acle-compat.h>
50 
51 #if __ARM_ARCH >= 6
52 #include <machine/pmap-v6.h>
53 #else /* __ARM_ARCH >= 6 */
54 
55 #ifndef _MACHINE_PMAP_H_
56 #define _MACHINE_PMAP_H_
57 
58 #include <machine/pte.h>
59 #include <machine/cpuconf.h>
60 /*
61  * Pte related macros
62  */
63 #define PTE_NOCACHE	1
64 #define PTE_CACHE	2
65 #define PTE_DEVICE	PTE_NOCACHE
66 #define PTE_PAGETABLE	3
67 
68 enum mem_type {
69 	STRONG_ORD = 0,
70 	DEVICE_NOSHARE,
71 	DEVICE_SHARE,
72 	NRML_NOCACHE,
73 	NRML_IWT_OWT,
74 	NRML_IWB_OWB,
75 	NRML_IWBA_OWBA
76 };
77 
78 #ifndef LOCORE
79 
80 #include <sys/queue.h>
81 #include <sys/_cpuset.h>
82 #include <sys/_lock.h>
83 #include <sys/_mutex.h>
84 
85 #define PDESIZE		sizeof(pd_entry_t)	/* for assembly files */
86 #define PTESIZE		sizeof(pt_entry_t)	/* for assembly files */
87 
88 #ifdef _KERNEL
89 
90 #define vtophys(va)	pmap_kextract((vm_offset_t)(va))
91 
92 #endif
93 
94 #define	pmap_page_get_memattr(m)	((m)->md.pv_memattr)
95 #define	pmap_page_is_write_mapped(m)	(((m)->aflags & PGA_WRITEABLE) != 0)
96 #define	pmap_page_is_mapped(m)	(!TAILQ_EMPTY(&(m)->md.pv_list))
97 void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma);
98 
99 /*
100  * Pmap stuff
101  */
102 
103 /*
104  * This structure is used to hold a virtual<->physical address
105  * association and is used mostly by bootstrap code
106  */
107 struct pv_addr {
108 	SLIST_ENTRY(pv_addr) pv_list;
109 	vm_offset_t	pv_va;
110 	vm_paddr_t	pv_pa;
111 };
112 
113 struct	pv_entry;
114 struct	pv_chunk;
115 
116 struct	md_page {
117 	int pvh_attrs;
118 	vm_memattr_t	 pv_memattr;
119 	vm_offset_t pv_kva;		/* first kernel VA mapping */
120 	TAILQ_HEAD(,pv_entry)	pv_list;
121 };
122 
123 struct l1_ttable;
124 struct l2_dtable;
125 
126 
127 /*
128  * The number of L2 descriptor tables which can be tracked by an l2_dtable.
129  * A bucket size of 16 provides for 16MB of contiguous virtual address
130  * space per l2_dtable. Most processes will, therefore, require only two or
131  * three of these to map their whole working set.
132  */
133 #define	L2_BUCKET_LOG2	4
134 #define	L2_BUCKET_SIZE	(1 << L2_BUCKET_LOG2)
135 /*
136  * Given the above "L2-descriptors-per-l2_dtable" constant, the number
137  * of l2_dtable structures required to track all possible page descriptors
138  * mappable by an L1 translation table is given by the following constants:
139  */
140 #define	L2_LOG2		((32 - L1_S_SHIFT) - L2_BUCKET_LOG2)
141 #define	L2_SIZE		(1 << L2_LOG2)
142 
143 struct	pmap {
144 	struct mtx		pm_mtx;
145 	u_int8_t		pm_domain;
146 	struct l1_ttable	*pm_l1;
147 	struct l2_dtable	*pm_l2[L2_SIZE];
148 	cpuset_t		pm_active;	/* active on cpus */
149 	struct pmap_statistics	pm_stats;	/* pmap statictics */
150 	TAILQ_HEAD(,pv_entry)	pm_pvlist;	/* list of mappings in pmap */
151 };
152 
153 typedef struct pmap *pmap_t;
154 
155 #ifdef _KERNEL
156 extern struct pmap	kernel_pmap_store;
157 #define kernel_pmap	(&kernel_pmap_store)
158 
159 #define	PMAP_ASSERT_LOCKED(pmap) \
160 				mtx_assert(&(pmap)->pm_mtx, MA_OWNED)
161 #define	PMAP_LOCK(pmap)		mtx_lock(&(pmap)->pm_mtx)
162 #define	PMAP_LOCK_DESTROY(pmap)	mtx_destroy(&(pmap)->pm_mtx)
163 #define	PMAP_LOCK_INIT(pmap)	mtx_init(&(pmap)->pm_mtx, "pmap", \
164 				    NULL, MTX_DEF | MTX_DUPOK)
165 #define	PMAP_OWNED(pmap)	mtx_owned(&(pmap)->pm_mtx)
166 #define	PMAP_MTX(pmap)		(&(pmap)->pm_mtx)
167 #define	PMAP_TRYLOCK(pmap)	mtx_trylock(&(pmap)->pm_mtx)
168 #define	PMAP_UNLOCK(pmap)	mtx_unlock(&(pmap)->pm_mtx)
169 #endif
170 
171 
172 /*
173  * For each vm_page_t, there is a list of all currently valid virtual
174  * mappings of that page.  An entry is a pv_entry_t, the list is pv_list.
175  */
176 typedef struct pv_entry {
177 	vm_offset_t     pv_va;          /* virtual address for mapping */
178 	TAILQ_ENTRY(pv_entry)   pv_list;
179 	int		pv_flags;	/* flags (wired, etc...) */
180 	pmap_t          pv_pmap;        /* pmap where mapping lies */
181 	TAILQ_ENTRY(pv_entry)	pv_plist;
182 } *pv_entry_t;
183 
184 /*
185  * pv_entries are allocated in chunks per-process.  This avoids the
186  * need to track per-pmap assignments.
187  */
188 #define	_NPCM	8
189 #define	_NPCPV	252
190 
191 struct pv_chunk {
192 	pmap_t			pc_pmap;
193 	TAILQ_ENTRY(pv_chunk)	pc_list;
194 	uint32_t		pc_map[_NPCM];	/* bitmap; 1 = free */
195 	uint32_t		pc_dummy[3];	/* aligns pv_chunk to 4KB */
196 	TAILQ_ENTRY(pv_chunk)	pc_lru;
197 	struct pv_entry		pc_pventry[_NPCPV];
198 };
199 
200 #ifdef _KERNEL
201 
202 boolean_t pmap_get_pde_pte(pmap_t, vm_offset_t, pd_entry_t **, pt_entry_t **);
203 
204 /*
205  * virtual address to page table entry and
206  * to physical address. Likewise for alternate address space.
207  * Note: these work recursively, thus vtopte of a pte will give
208  * the corresponding pde that in turn maps it.
209  */
210 
211 /*
212  * The current top of kernel VM.
213  */
214 extern vm_offset_t pmap_curmaxkvaddr;
215 
216 struct pcb;
217 
218 void	pmap_set_pcb_pagedir(pmap_t, struct pcb *);
219 /* Virtual address to page table entry */
220 static __inline pt_entry_t *
221 vtopte(vm_offset_t va)
222 {
223 	pd_entry_t *pdep;
224 	pt_entry_t *ptep;
225 
226 	if (pmap_get_pde_pte(kernel_pmap, va, &pdep, &ptep) == FALSE)
227 		return (NULL);
228 	return (ptep);
229 }
230 
231 extern vm_paddr_t phys_avail[];
232 extern vm_offset_t virtual_avail;
233 extern vm_offset_t virtual_end;
234 
235 void	pmap_bootstrap(vm_offset_t firstaddr, struct pv_addr *l1pt);
236 int	pmap_change_attr(vm_offset_t, vm_size_t, int);
237 void	pmap_kenter(vm_offset_t va, vm_paddr_t pa);
238 void	pmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa);
239 void	pmap_kenter_device(vm_offset_t, vm_size_t, vm_paddr_t);
240 void	pmap_kremove_device(vm_offset_t, vm_size_t);
241 void	*pmap_kenter_temporary(vm_paddr_t pa, int i);
242 void 	pmap_kenter_user(vm_offset_t va, vm_paddr_t pa);
243 vm_paddr_t pmap_kextract(vm_offset_t va);
244 vm_paddr_t pmap_dump_kextract(vm_offset_t, pt2_entry_t *);
245 void	pmap_kremove(vm_offset_t);
246 void	*pmap_mapdev(vm_offset_t, vm_size_t);
247 void	pmap_unmapdev(vm_offset_t, vm_size_t);
248 vm_page_t	pmap_use_pt(pmap_t, vm_offset_t);
249 void	pmap_debug(int);
250 void	pmap_map_section(vm_offset_t, vm_offset_t, vm_offset_t, int, int);
251 void	pmap_link_l2pt(vm_offset_t, vm_offset_t, struct pv_addr *);
252 vm_size_t	pmap_map_chunk(vm_offset_t, vm_offset_t, vm_offset_t, vm_size_t, int, int);
253 void
254 pmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot,
255     int cache);
256 int pmap_fault_fixup(pmap_t, vm_offset_t, vm_prot_t, int);
257 
258 /*
259  * Definitions for MMU domains
260  */
261 #define	PMAP_DOMAINS		15	/* 15 'user' domains (1-15) */
262 #define	PMAP_DOMAIN_KERNEL	0	/* The kernel uses domain #0 */
263 
264 /*
265  * The new pmap ensures that page-tables are always mapping Write-Thru.
266  * Thus, on some platforms we can run fast and loose and avoid syncing PTEs
267  * on every change.
268  *
269  * Unfortunately, not all CPUs have a write-through cache mode.  So we
270  * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs,
271  * and if there is the chance for PTE syncs to be needed, we define
272  * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run)
273  * the code.
274  */
275 extern int pmap_needs_pte_sync;
276 
277 /*
278  * These macros define the various bit masks in the PTE.
279  *
280  * We use these macros since we use different bits on different processor
281  * models.
282  */
283 
284 #define	L1_S_CACHE_MASK_generic	(L1_S_B|L1_S_C)
285 #define	L1_S_CACHE_MASK_xscale	(L1_S_B|L1_S_C|L1_S_XSCALE_TEX(TEX_XSCALE_X)|\
286     				L1_S_XSCALE_TEX(TEX_XSCALE_T))
287 
288 #define	L2_L_CACHE_MASK_generic	(L2_B|L2_C)
289 #define	L2_L_CACHE_MASK_xscale	(L2_B|L2_C|L2_XSCALE_L_TEX(TEX_XSCALE_X) | \
290     				L2_XSCALE_L_TEX(TEX_XSCALE_T))
291 
292 #define	L2_S_PROT_U_generic	(L2_AP(AP_U))
293 #define	L2_S_PROT_W_generic	(L2_AP(AP_W))
294 #define	L2_S_PROT_MASK_generic	(L2_S_PROT_U|L2_S_PROT_W)
295 
296 #define	L2_S_PROT_U_xscale	(L2_AP0(AP_U))
297 #define	L2_S_PROT_W_xscale	(L2_AP0(AP_W))
298 #define	L2_S_PROT_MASK_xscale	(L2_S_PROT_U|L2_S_PROT_W)
299 
300 #define	L2_S_CACHE_MASK_generic	(L2_B|L2_C)
301 #define	L2_S_CACHE_MASK_xscale	(L2_B|L2_C|L2_XSCALE_T_TEX(TEX_XSCALE_X)| \
302     				 L2_XSCALE_T_TEX(TEX_XSCALE_X))
303 
304 #define	L1_S_PROTO_generic	(L1_TYPE_S | L1_S_IMP)
305 #define	L1_S_PROTO_xscale	(L1_TYPE_S)
306 
307 #define	L1_C_PROTO_generic	(L1_TYPE_C | L1_C_IMP2)
308 #define	L1_C_PROTO_xscale	(L1_TYPE_C)
309 
310 #define	L2_L_PROTO		(L2_TYPE_L)
311 
312 #define	L2_S_PROTO_generic	(L2_TYPE_S)
313 #define	L2_S_PROTO_xscale	(L2_TYPE_XSCALE_XS)
314 
315 /*
316  * User-visible names for the ones that vary with MMU class.
317  */
318 #define	L2_AP(x)	(L2_AP0(x) | L2_AP1(x) | L2_AP2(x) | L2_AP3(x))
319 
320 #if ARM_NMMUS > 1
321 /* More than one MMU class configured; use variables. */
322 #define	L2_S_PROT_U		pte_l2_s_prot_u
323 #define	L2_S_PROT_W		pte_l2_s_prot_w
324 #define	L2_S_PROT_MASK		pte_l2_s_prot_mask
325 
326 #define	L1_S_CACHE_MASK		pte_l1_s_cache_mask
327 #define	L2_L_CACHE_MASK		pte_l2_l_cache_mask
328 #define	L2_S_CACHE_MASK		pte_l2_s_cache_mask
329 
330 #define	L1_S_PROTO		pte_l1_s_proto
331 #define	L1_C_PROTO		pte_l1_c_proto
332 #define	L2_S_PROTO		pte_l2_s_proto
333 
334 #elif ARM_MMU_GENERIC != 0
335 #define	L2_S_PROT_U		L2_S_PROT_U_generic
336 #define	L2_S_PROT_W		L2_S_PROT_W_generic
337 #define	L2_S_PROT_MASK		L2_S_PROT_MASK_generic
338 
339 #define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_generic
340 #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_generic
341 #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_generic
342 
343 #define	L1_S_PROTO		L1_S_PROTO_generic
344 #define	L1_C_PROTO		L1_C_PROTO_generic
345 #define	L2_S_PROTO		L2_S_PROTO_generic
346 
347 #elif ARM_MMU_XSCALE == 1
348 #define	L2_S_PROT_U		L2_S_PROT_U_xscale
349 #define	L2_S_PROT_W		L2_S_PROT_W_xscale
350 #define	L2_S_PROT_MASK		L2_S_PROT_MASK_xscale
351 
352 #define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_xscale
353 #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_xscale
354 #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_xscale
355 
356 #define	L1_S_PROTO		L1_S_PROTO_xscale
357 #define	L1_C_PROTO		L1_C_PROTO_xscale
358 #define	L2_S_PROTO		L2_S_PROTO_xscale
359 
360 #endif /* ARM_NMMUS > 1 */
361 
362 #if defined(CPU_XSCALE_81342)
363 #define PMAP_NEEDS_PTE_SYNC	1
364 #define PMAP_INCLUDE_PTE_SYNC
365 #else
366 #define	PMAP_NEEDS_PTE_SYNC	0
367 #endif
368 
369 /*
370  * These macros return various bits based on kernel/user and protection.
371  * Note that the compiler will usually fold these at compile time.
372  */
373 #define	L1_S_PROT_U		(L1_S_AP(AP_U))
374 #define	L1_S_PROT_W		(L1_S_AP(AP_W))
375 #define	L1_S_PROT_MASK		(L1_S_PROT_U|L1_S_PROT_W)
376 #define	L1_S_WRITABLE(pd)	((pd) & L1_S_PROT_W)
377 
378 #define	L1_S_PROT(ku, pr)	((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \
379 				 (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0))
380 
381 #define	L2_L_PROT_U		(L2_AP(AP_U))
382 #define	L2_L_PROT_W		(L2_AP(AP_W))
383 #define	L2_L_PROT_MASK		(L2_L_PROT_U|L2_L_PROT_W)
384 
385 #define	L2_L_PROT(ku, pr)	((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \
386 				 (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0))
387 
388 #define	L2_S_PROT(ku, pr)	((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \
389 				 (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0))
390 
391 /*
392  * Macros to test if a mapping is mappable with an L1 Section mapping
393  * or an L2 Large Page mapping.
394  */
395 #define	L1_S_MAPPABLE_P(va, pa, size)					\
396 	((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE)
397 
398 #define	L2_L_MAPPABLE_P(va, pa, size)					\
399 	((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE)
400 
401 /*
402  * Provide a fallback in case we were not able to determine it at
403  * compile-time.
404  */
405 #ifndef PMAP_NEEDS_PTE_SYNC
406 #define	PMAP_NEEDS_PTE_SYNC	pmap_needs_pte_sync
407 #define	PMAP_INCLUDE_PTE_SYNC
408 #endif
409 
410 #ifdef ARM_L2_PIPT
411 #define _sync_l2(pte, size) 	cpu_l2cache_wb_range(vtophys(pte), size)
412 #else
413 #define _sync_l2(pte, size) 	cpu_l2cache_wb_range(pte, size)
414 #endif
415 
416 #define	PTE_SYNC(pte)							\
417 do {									\
418 	if (PMAP_NEEDS_PTE_SYNC) {					\
419 		cpu_dcache_wb_range((vm_offset_t)(pte), sizeof(pt_entry_t));\
420 		cpu_drain_writebuf();					\
421 		_sync_l2((vm_offset_t)(pte), sizeof(pt_entry_t));\
422 	} else								\
423 		cpu_drain_writebuf();					\
424 } while (/*CONSTCOND*/0)
425 
426 #define	PTE_SYNC_RANGE(pte, cnt)					\
427 do {									\
428 	if (PMAP_NEEDS_PTE_SYNC) {					\
429 		cpu_dcache_wb_range((vm_offset_t)(pte),			\
430 		    (cnt) << 2); /* * sizeof(pt_entry_t) */		\
431 		cpu_drain_writebuf();					\
432 		_sync_l2((vm_offset_t)(pte),		 		\
433 		    (cnt) << 2); /* * sizeof(pt_entry_t) */		\
434 	} else								\
435 		cpu_drain_writebuf();					\
436 } while (/*CONSTCOND*/0)
437 
438 extern pt_entry_t		pte_l1_s_cache_mode;
439 extern pt_entry_t		pte_l1_s_cache_mask;
440 
441 extern pt_entry_t		pte_l2_l_cache_mode;
442 extern pt_entry_t		pte_l2_l_cache_mask;
443 
444 extern pt_entry_t		pte_l2_s_cache_mode;
445 extern pt_entry_t		pte_l2_s_cache_mask;
446 
447 extern pt_entry_t		pte_l1_s_cache_mode_pt;
448 extern pt_entry_t		pte_l2_l_cache_mode_pt;
449 extern pt_entry_t		pte_l2_s_cache_mode_pt;
450 
451 extern pt_entry_t		pte_l2_s_prot_u;
452 extern pt_entry_t		pte_l2_s_prot_w;
453 extern pt_entry_t		pte_l2_s_prot_mask;
454 
455 extern pt_entry_t		pte_l1_s_proto;
456 extern pt_entry_t		pte_l1_c_proto;
457 extern pt_entry_t		pte_l2_s_proto;
458 
459 extern void (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t);
460 extern void (*pmap_copy_page_offs_func)(vm_paddr_t a_phys,
461     vm_offset_t a_offs, vm_paddr_t b_phys, vm_offset_t b_offs, int cnt);
462 extern void (*pmap_zero_page_func)(vm_paddr_t, int, int);
463 
464 #if ARM_MMU_GENERIC != 0 || defined(CPU_XSCALE_81342)
465 void	pmap_copy_page_generic(vm_paddr_t, vm_paddr_t);
466 void	pmap_zero_page_generic(vm_paddr_t, int, int);
467 
468 void	pmap_pte_init_generic(void);
469 #endif /* ARM_MMU_GENERIC != 0 */
470 
471 #if ARM_MMU_XSCALE == 1
472 void	pmap_copy_page_xscale(vm_paddr_t, vm_paddr_t);
473 void	pmap_zero_page_xscale(vm_paddr_t, int, int);
474 
475 void	pmap_pte_init_xscale(void);
476 
477 void	xscale_setup_minidata(vm_offset_t, vm_offset_t, vm_offset_t);
478 
479 void	pmap_use_minicache(vm_offset_t, vm_size_t);
480 #endif /* ARM_MMU_XSCALE == 1 */
481 #if defined(CPU_XSCALE_81342)
482 #define ARM_HAVE_SUPERSECTIONS
483 #endif
484 
485 #define PTE_KERNEL	0
486 #define PTE_USER	1
487 #define	l1pte_valid(pde)	((pde) != 0)
488 #define	l1pte_section_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_S)
489 #define	l1pte_page_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_C)
490 #define	l1pte_fpage_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_F)
491 
492 #define l2pte_index(v)		(((v) & L2_ADDR_BITS) >> L2_S_SHIFT)
493 #define	l2pte_valid(pte)	((pte) != 0)
494 #define	l2pte_pa(pte)		((pte) & L2_S_FRAME)
495 #define l2pte_minidata(pte)	(((pte) & \
496 				 (L2_B | L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))\
497 				 == (L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))
498 
499 /* L1 and L2 page table macros */
500 #define pmap_pde_v(pde)		l1pte_valid(*(pde))
501 #define pmap_pde_section(pde)	l1pte_section_p(*(pde))
502 #define pmap_pde_page(pde)	l1pte_page_p(*(pde))
503 #define pmap_pde_fpage(pde)	l1pte_fpage_p(*(pde))
504 
505 #define	pmap_pte_v(pte)		l2pte_valid(*(pte))
506 #define	pmap_pte_pa(pte)	l2pte_pa(*(pte))
507 
508 /*
509  * Flags that indicate attributes of pages or mappings of pages.
510  *
511  * The PVF_MOD and PVF_REF flags are stored in the mdpage for each
512  * page.  PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual
513  * pv_entry's for each page.  They live in the same "namespace" so
514  * that we can clear multiple attributes at a time.
515  *
516  * Note the "non-cacheable" flag generally means the page has
517  * multiple mappings in a given address space.
518  */
519 #define	PVF_MOD		0x01		/* page is modified */
520 #define	PVF_REF		0x02		/* page is referenced */
521 #define	PVF_WIRED	0x04		/* mapping is wired */
522 #define	PVF_WRITE	0x08		/* mapping is writable */
523 #define	PVF_EXEC	0x10		/* mapping is executable */
524 #define	PVF_NC		0x20		/* mapping is non-cacheable */
525 #define	PVF_MWC		0x40		/* mapping is used multiple times in userland */
526 #define	PVF_UNMAN	0x80		/* mapping is unmanaged */
527 
528 void vector_page_setprot(int);
529 
530 #define SECTION_CACHE	0x1
531 #define SECTION_PT	0x2
532 void	pmap_kenter_section(vm_offset_t, vm_paddr_t, int flags);
533 #ifdef ARM_HAVE_SUPERSECTIONS
534 void	pmap_kenter_supersection(vm_offset_t, uint64_t, int flags);
535 #endif
536 
537 extern char *_tmppt;
538 
539 void	pmap_postinit(void);
540 
541 extern vm_paddr_t dump_avail[];
542 #endif	/* _KERNEL */
543 
544 #endif	/* !LOCORE */
545 
546 #endif	/* !_MACHINE_PMAP_H_ */
547 #endif	/* __ARM_ARCH >= 6 */
548