xref: /freebsd/sys/arm/include/pmap.h (revision 0f2bd1e89db1a2f09268edea21e0ead329e092df)
1 /*-
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * the Systems Programming Group of the University of Utah Computer
7  * Science Department and William Jolitz of UUNET Technologies Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *      This product includes software developed by the University of
20  *      California, Berkeley and its contributors.
21  * 4. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  * Derived from hp300 version by Mike Hibler, this version by William
38  * Jolitz uses a recursive map [a pde points to the page directory] to
39  * map the page tables using the pagetables themselves. This is done to
40  * reduce the impact on kernel virtual memory for lots of sparse address
41  * space, and to reduce the cost of memory to each process.
42  *
43  *      from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
44  *      from: @(#)pmap.h        7.4 (Berkeley) 5/12/91
45  * 	from: FreeBSD: src/sys/i386/include/pmap.h,v 1.70 2000/11/30
46  *
47  * $FreeBSD$
48  */
49 
50 #ifndef _MACHINE_PMAP_H_
51 #define _MACHINE_PMAP_H_
52 
53 #include <machine/pte.h>
54 #include <machine/cpuconf.h>
55 /*
56  * Pte related macros
57  */
58 #define PTE_NOCACHE	0
59 #define PTE_CACHE	1
60 #define PTE_PAGETABLE	2
61 
62 #ifndef LOCORE
63 
64 #include <sys/queue.h>
65 #include <sys/_lock.h>
66 #include <sys/_mutex.h>
67 
68 #define PDESIZE		sizeof(pd_entry_t)	/* for assembly files */
69 #define PTESIZE		sizeof(pt_entry_t)	/* for assembly files */
70 
71 #ifdef _KERNEL
72 
73 #define vtophys(va)	pmap_extract(pmap_kernel(), (vm_offset_t)(va))
74 #define pmap_kextract(va)	pmap_extract(pmap_kernel(), (vm_offset_t)(va))
75 
76 #endif
77 
78 #define	pmap_page_get_memattr(m)	VM_MEMATTR_DEFAULT
79 #define	pmap_page_is_mapped(m)	(!TAILQ_EMPTY(&(m)->md.pv_list))
80 #define	pmap_page_set_memattr(m, ma)	(void)0
81 
82 /*
83  * Pmap stuff
84  */
85 
86 /*
87  * This structure is used to hold a virtual<->physical address
88  * association and is used mostly by bootstrap code
89  */
90 struct pv_addr {
91 	SLIST_ENTRY(pv_addr) pv_list;
92 	vm_offset_t	pv_va;
93 	vm_paddr_t	pv_pa;
94 };
95 
96 struct	pv_entry;
97 
98 struct	md_page {
99 	int pvh_attrs;
100 	vm_offset_t pv_kva;		/* first kernel VA mapping */
101 	TAILQ_HEAD(,pv_entry)	pv_list;
102 };
103 
104 #define	VM_MDPAGE_INIT(pg)						\
105 do {									\
106 	TAILQ_INIT(&pg->pv_list);					\
107 	mtx_init(&(pg)->md_page.pvh_mtx, "MDPAGE Mutex", NULL, MTX_DEV);\
108 	(pg)->mdpage.pvh_attrs = 0;					\
109 } while (/*CONSTCOND*/0)
110 
111 struct l1_ttable;
112 struct l2_dtable;
113 
114 
115 /*
116  * The number of L2 descriptor tables which can be tracked by an l2_dtable.
117  * A bucket size of 16 provides for 16MB of contiguous virtual address
118  * space per l2_dtable. Most processes will, therefore, require only two or
119  * three of these to map their whole working set.
120  */
121 #define	L2_BUCKET_LOG2	4
122 #define	L2_BUCKET_SIZE	(1 << L2_BUCKET_LOG2)
123 /*
124  * Given the above "L2-descriptors-per-l2_dtable" constant, the number
125  * of l2_dtable structures required to track all possible page descriptors
126  * mappable by an L1 translation table is given by the following constants:
127  */
128 #define	L2_LOG2		((32 - L1_S_SHIFT) - L2_BUCKET_LOG2)
129 #define	L2_SIZE		(1 << L2_LOG2)
130 
131 struct	pmap {
132 	struct mtx		pm_mtx;
133 	u_int8_t		pm_domain;
134 	struct l1_ttable	*pm_l1;
135 	struct l2_dtable	*pm_l2[L2_SIZE];
136 	pd_entry_t		*pm_pdir;	/* KVA of page directory */
137 	uint32_t		pm_gen_count;	/* generation count (pmap lock dropped) */
138 	u_int			pm_retries;
139 	cpumask_t			pm_active;	/* active on cpus */
140 	struct pmap_statistics	pm_stats;	/* pmap statictics */
141 	TAILQ_HEAD(,pv_entry)	pm_pvlist;	/* list of mappings in pmap */
142 };
143 
144 typedef struct pmap *pmap_t;
145 
146 #ifdef _KERNEL
147 extern struct pmap	kernel_pmap_store;
148 #define kernel_pmap	(&kernel_pmap_store)
149 #define pmap_kernel() kernel_pmap
150 
151 #define	PMAP_ASSERT_LOCKED(pmap) \
152 				mtx_assert(&(pmap)->pm_mtx, MA_OWNED)
153 #define	PMAP_LOCK(pmap)		mtx_lock(&(pmap)->pm_mtx)
154 #define	PMAP_LOCK_DESTROY(pmap)	mtx_destroy(&(pmap)->pm_mtx)
155 #define	PMAP_LOCK_INIT(pmap)	mtx_init(&(pmap)->pm_mtx, "pmap", \
156 				    NULL, MTX_DEF | MTX_DUPOK)
157 #define	PMAP_OWNED(pmap)	mtx_owned(&(pmap)->pm_mtx)
158 #define	PMAP_MTX(pmap)		(&(pmap)->pm_mtx)
159 #define	PMAP_TRYLOCK(pmap)	mtx_trylock(&(pmap)->pm_mtx)
160 #define	PMAP_UNLOCK(pmap)	mtx_unlock(&(pmap)->pm_mtx)
161 #endif
162 
163 
164 /*
165  * For each vm_page_t, there is a list of all currently valid virtual
166  * mappings of that page.  An entry is a pv_entry_t, the list is pv_list.
167  */
168 typedef struct pv_entry {
169 	pmap_t          pv_pmap;        /* pmap where mapping lies */
170 	vm_offset_t     pv_va;          /* virtual address for mapping */
171 	TAILQ_ENTRY(pv_entry)   pv_list;
172 	TAILQ_ENTRY(pv_entry)	pv_plist;
173 	int		pv_flags;	/* flags (wired, etc...) */
174 } *pv_entry_t;
175 
176 #ifdef _KERNEL
177 
178 boolean_t pmap_get_pde_pte(pmap_t, vm_offset_t, pd_entry_t **, pt_entry_t **);
179 
180 /*
181  * virtual address to page table entry and
182  * to physical address. Likewise for alternate address space.
183  * Note: these work recursively, thus vtopte of a pte will give
184  * the corresponding pde that in turn maps it.
185  */
186 
187 /*
188  * The current top of kernel VM.
189  */
190 extern vm_offset_t pmap_curmaxkvaddr;
191 
192 struct pcb;
193 
194 void	pmap_set_pcb_pagedir(pmap_t, struct pcb *);
195 /* Virtual address to page table entry */
196 static __inline pt_entry_t *
197 vtopte(vm_offset_t va)
198 {
199 	pd_entry_t *pdep;
200 	pt_entry_t *ptep;
201 
202 	if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == FALSE)
203 		return (NULL);
204 	return (ptep);
205 }
206 
207 extern vm_offset_t phys_avail[];
208 extern vm_offset_t virtual_avail;
209 extern vm_offset_t virtual_end;
210 
211 void	pmap_bootstrap(vm_offset_t, vm_offset_t, struct pv_addr *);
212 void	pmap_kenter(vm_offset_t va, vm_paddr_t pa);
213 void	pmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa);
214 void	*pmap_kenter_temp(vm_paddr_t pa, int i);
215 void 	pmap_kenter_user(vm_offset_t va, vm_paddr_t pa);
216 void	pmap_kremove(vm_offset_t);
217 void	*pmap_mapdev(vm_offset_t, vm_size_t);
218 void	pmap_unmapdev(vm_offset_t, vm_size_t);
219 vm_page_t	pmap_use_pt(pmap_t, vm_offset_t);
220 void	pmap_debug(int);
221 void	pmap_map_section(vm_offset_t, vm_offset_t, vm_offset_t, int, int);
222 void	pmap_link_l2pt(vm_offset_t, vm_offset_t, struct pv_addr *);
223 vm_size_t	pmap_map_chunk(vm_offset_t, vm_offset_t, vm_offset_t, vm_size_t, int, int);
224 void
225 pmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot,
226     int cache);
227 int pmap_fault_fixup(pmap_t, vm_offset_t, vm_prot_t, int);
228 
229 /*
230  * Definitions for MMU domains
231  */
232 #define	PMAP_DOMAINS		15	/* 15 'user' domains (1-15) */
233 #define	PMAP_DOMAIN_KERNEL	0	/* The kernel uses domain #0 */
234 
235 /*
236  * The new pmap ensures that page-tables are always mapping Write-Thru.
237  * Thus, on some platforms we can run fast and loose and avoid syncing PTEs
238  * on every change.
239  *
240  * Unfortunately, not all CPUs have a write-through cache mode.  So we
241  * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs,
242  * and if there is the chance for PTE syncs to be needed, we define
243  * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run)
244  * the code.
245  */
246 extern int pmap_needs_pte_sync;
247 
248 /*
249  * These macros define the various bit masks in the PTE.
250  *
251  * We use these macros since we use different bits on different processor
252  * models.
253  */
254 #define	L1_S_PROT_U		(L1_S_AP(AP_U))
255 #define	L1_S_PROT_W		(L1_S_AP(AP_W))
256 #define	L1_S_PROT_MASK		(L1_S_PROT_U|L1_S_PROT_W)
257 
258 #define	L1_S_CACHE_MASK_generic	(L1_S_B|L1_S_C)
259 #define	L1_S_CACHE_MASK_xscale	(L1_S_B|L1_S_C|L1_S_XSCALE_TEX(TEX_XSCALE_X)|\
260     				L1_S_XSCALE_TEX(TEX_XSCALE_T))
261 
262 #define	L2_L_PROT_U		(L2_AP(AP_U))
263 #define	L2_L_PROT_W		(L2_AP(AP_W))
264 #define	L2_L_PROT_MASK		(L2_L_PROT_U|L2_L_PROT_W)
265 
266 #define	L2_L_CACHE_MASK_generic	(L2_B|L2_C)
267 #define	L2_L_CACHE_MASK_xscale	(L2_B|L2_C|L2_XSCALE_L_TEX(TEX_XSCALE_X) | \
268     				L2_XSCALE_L_TEX(TEX_XSCALE_T))
269 
270 #define	L2_S_PROT_U_generic	(L2_AP(AP_U))
271 #define	L2_S_PROT_W_generic	(L2_AP(AP_W))
272 #define	L2_S_PROT_MASK_generic	(L2_S_PROT_U|L2_S_PROT_W)
273 
274 #define	L2_S_PROT_U_xscale	(L2_AP0(AP_U))
275 #define	L2_S_PROT_W_xscale	(L2_AP0(AP_W))
276 #define	L2_S_PROT_MASK_xscale	(L2_S_PROT_U|L2_S_PROT_W)
277 
278 #define	L2_S_CACHE_MASK_generic	(L2_B|L2_C)
279 #define	L2_S_CACHE_MASK_xscale	(L2_B|L2_C|L2_XSCALE_T_TEX(TEX_XSCALE_X)| \
280     				 L2_XSCALE_T_TEX(TEX_XSCALE_X))
281 
282 #define	L1_S_PROTO_generic	(L1_TYPE_S | L1_S_IMP)
283 #define	L1_S_PROTO_xscale	(L1_TYPE_S)
284 
285 #define	L1_C_PROTO_generic	(L1_TYPE_C | L1_C_IMP2)
286 #define	L1_C_PROTO_xscale	(L1_TYPE_C)
287 
288 #define	L2_L_PROTO		(L2_TYPE_L)
289 
290 #define	L2_S_PROTO_generic	(L2_TYPE_S)
291 #define	L2_S_PROTO_xscale	(L2_TYPE_XSCALE_XS)
292 
293 /*
294  * User-visible names for the ones that vary with MMU class.
295  */
296 
297 #if ARM_NMMUS > 1
298 /* More than one MMU class configured; use variables. */
299 #define	L2_S_PROT_U		pte_l2_s_prot_u
300 #define	L2_S_PROT_W		pte_l2_s_prot_w
301 #define	L2_S_PROT_MASK		pte_l2_s_prot_mask
302 
303 #define	L1_S_CACHE_MASK		pte_l1_s_cache_mask
304 #define	L2_L_CACHE_MASK		pte_l2_l_cache_mask
305 #define	L2_S_CACHE_MASK		pte_l2_s_cache_mask
306 
307 #define	L1_S_PROTO		pte_l1_s_proto
308 #define	L1_C_PROTO		pte_l1_c_proto
309 #define	L2_S_PROTO		pte_l2_s_proto
310 
311 #elif (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
312 #define	L2_S_PROT_U		L2_S_PROT_U_generic
313 #define	L2_S_PROT_W		L2_S_PROT_W_generic
314 #define	L2_S_PROT_MASK		L2_S_PROT_MASK_generic
315 
316 #define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_generic
317 #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_generic
318 #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_generic
319 
320 #define	L1_S_PROTO		L1_S_PROTO_generic
321 #define	L1_C_PROTO		L1_C_PROTO_generic
322 #define	L2_S_PROTO		L2_S_PROTO_generic
323 
324 #elif ARM_MMU_XSCALE == 1
325 #define	L2_S_PROT_U		L2_S_PROT_U_xscale
326 #define	L2_S_PROT_W		L2_S_PROT_W_xscale
327 #define	L2_S_PROT_MASK		L2_S_PROT_MASK_xscale
328 
329 #define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_xscale
330 #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_xscale
331 #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_xscale
332 
333 #define	L1_S_PROTO		L1_S_PROTO_xscale
334 #define	L1_C_PROTO		L1_C_PROTO_xscale
335 #define	L2_S_PROTO		L2_S_PROTO_xscale
336 
337 #endif /* ARM_NMMUS > 1 */
338 
339 #ifdef SKYEYE_WORKAROUNDS
340 #define PMAP_NEEDS_PTE_SYNC     1
341 #define PMAP_INCLUDE_PTE_SYNC
342 #else
343 #if (ARM_MMU_SA1 == 1) && (ARM_NMMUS == 1)
344 #define	PMAP_NEEDS_PTE_SYNC	1
345 #define	PMAP_INCLUDE_PTE_SYNC
346 #elif defined(CPU_XSCALE_81342)
347 #define PMAP_NEEDS_PTE_SYNC	1
348 #define PMAP_INCLUDE_PTE_SYNC
349 #elif (ARM_MMU_SA1 == 0)
350 #define	PMAP_NEEDS_PTE_SYNC	0
351 #endif
352 #endif
353 
354 /*
355  * These macros return various bits based on kernel/user and protection.
356  * Note that the compiler will usually fold these at compile time.
357  */
358 #define	L1_S_PROT(ku, pr)	((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \
359 				 (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0))
360 
361 #define	L2_L_PROT(ku, pr)	((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \
362 				 (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0))
363 
364 #define	L2_S_PROT(ku, pr)	((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \
365 				 (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0))
366 
367 /*
368  * Macros to test if a mapping is mappable with an L1 Section mapping
369  * or an L2 Large Page mapping.
370  */
371 #define	L1_S_MAPPABLE_P(va, pa, size)					\
372 	((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE)
373 
374 #define	L2_L_MAPPABLE_P(va, pa, size)					\
375 	((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE)
376 
377 /*
378  * Provide a fallback in case we were not able to determine it at
379  * compile-time.
380  */
381 #ifndef PMAP_NEEDS_PTE_SYNC
382 #define	PMAP_NEEDS_PTE_SYNC	pmap_needs_pte_sync
383 #define	PMAP_INCLUDE_PTE_SYNC
384 #endif
385 
386 #define	PTE_SYNC(pte)							\
387 do {									\
388 	if (PMAP_NEEDS_PTE_SYNC) {					\
389 		cpu_dcache_wb_range((vm_offset_t)(pte), sizeof(pt_entry_t));\
390 		cpu_l2cache_wb_range((vm_offset_t)(pte), sizeof(pt_entry_t));\
391 	}\
392 } while (/*CONSTCOND*/0)
393 
394 #define	PTE_SYNC_RANGE(pte, cnt)					\
395 do {									\
396 	if (PMAP_NEEDS_PTE_SYNC) {					\
397 		cpu_dcache_wb_range((vm_offset_t)(pte),			\
398 		    (cnt) << 2); /* * sizeof(pt_entry_t) */		\
399 		cpu_l2cache_wb_range((vm_offset_t)(pte), 		\
400 		    (cnt) << 2); /* * sizeof(pt_entry_t) */		\
401 	}								\
402 } while (/*CONSTCOND*/0)
403 
404 extern pt_entry_t		pte_l1_s_cache_mode;
405 extern pt_entry_t		pte_l1_s_cache_mask;
406 
407 extern pt_entry_t		pte_l2_l_cache_mode;
408 extern pt_entry_t		pte_l2_l_cache_mask;
409 
410 extern pt_entry_t		pte_l2_s_cache_mode;
411 extern pt_entry_t		pte_l2_s_cache_mask;
412 
413 extern pt_entry_t		pte_l1_s_cache_mode_pt;
414 extern pt_entry_t		pte_l2_l_cache_mode_pt;
415 extern pt_entry_t		pte_l2_s_cache_mode_pt;
416 
417 extern pt_entry_t		pte_l2_s_prot_u;
418 extern pt_entry_t		pte_l2_s_prot_w;
419 extern pt_entry_t		pte_l2_s_prot_mask;
420 
421 extern pt_entry_t		pte_l1_s_proto;
422 extern pt_entry_t		pte_l1_c_proto;
423 extern pt_entry_t		pte_l2_s_proto;
424 
425 extern void (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t);
426 extern void (*pmap_zero_page_func)(vm_paddr_t, int, int);
427 
428 #if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 || defined(CPU_XSCALE_81342)
429 void	pmap_copy_page_generic(vm_paddr_t, vm_paddr_t);
430 void	pmap_zero_page_generic(vm_paddr_t, int, int);
431 
432 void	pmap_pte_init_generic(void);
433 #if defined(CPU_ARM8)
434 void	pmap_pte_init_arm8(void);
435 #endif
436 #if defined(CPU_ARM9)
437 void	pmap_pte_init_arm9(void);
438 #endif /* CPU_ARM9 */
439 #if defined(CPU_ARM10)
440 void	pmap_pte_init_arm10(void);
441 #endif /* CPU_ARM10 */
442 #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
443 
444 #if /* ARM_MMU_SA1 == */1
445 void	pmap_pte_init_sa1(void);
446 #endif /* ARM_MMU_SA1 == 1 */
447 
448 #if ARM_MMU_XSCALE == 1
449 void	pmap_copy_page_xscale(vm_paddr_t, vm_paddr_t);
450 void	pmap_zero_page_xscale(vm_paddr_t, int, int);
451 
452 void	pmap_pte_init_xscale(void);
453 
454 void	xscale_setup_minidata(vm_offset_t, vm_offset_t, vm_offset_t);
455 
456 void	pmap_use_minicache(vm_offset_t, vm_size_t);
457 #endif /* ARM_MMU_XSCALE == 1 */
458 #if defined(CPU_XSCALE_81342)
459 #define ARM_HAVE_SUPERSECTIONS
460 #endif
461 
462 #define PTE_KERNEL	0
463 #define PTE_USER	1
464 #define	l1pte_valid(pde)	((pde) != 0)
465 #define	l1pte_section_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_S)
466 #define	l1pte_page_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_C)
467 #define	l1pte_fpage_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_F)
468 
469 #define l2pte_index(v)		(((v) & L2_ADDR_BITS) >> L2_S_SHIFT)
470 #define	l2pte_valid(pte)	((pte) != 0)
471 #define	l2pte_pa(pte)		((pte) & L2_S_FRAME)
472 #define l2pte_minidata(pte)	(((pte) & \
473 				 (L2_B | L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))\
474 				 == (L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))
475 
476 /* L1 and L2 page table macros */
477 #define pmap_pde_v(pde)		l1pte_valid(*(pde))
478 #define pmap_pde_section(pde)	l1pte_section_p(*(pde))
479 #define pmap_pde_page(pde)	l1pte_page_p(*(pde))
480 #define pmap_pde_fpage(pde)	l1pte_fpage_p(*(pde))
481 
482 #define	pmap_pte_v(pte)		l2pte_valid(*(pte))
483 #define	pmap_pte_pa(pte)	l2pte_pa(*(pte))
484 
485 /*
486  * Flags that indicate attributes of pages or mappings of pages.
487  *
488  * The PVF_MOD and PVF_REF flags are stored in the mdpage for each
489  * page.  PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual
490  * pv_entry's for each page.  They live in the same "namespace" so
491  * that we can clear multiple attributes at a time.
492  *
493  * Note the "non-cacheable" flag generally means the page has
494  * multiple mappings in a given address space.
495  */
496 #define	PVF_MOD		0x01		/* page is modified */
497 #define	PVF_REF		0x02		/* page is referenced */
498 #define	PVF_WIRED	0x04		/* mapping is wired */
499 #define	PVF_WRITE	0x08		/* mapping is writable */
500 #define	PVF_EXEC	0x10		/* mapping is executable */
501 #define	PVF_NC		0x20		/* mapping is non-cacheable */
502 #define	PVF_MWC		0x40		/* mapping is used multiple times in userland */
503 #define	PVF_UNMAN	0x80		/* mapping is unmanaged */
504 
505 void vector_page_setprot(int);
506 
507 void pmap_update(pmap_t);
508 
509 /*
510  * This structure is used by machine-dependent code to describe
511  * static mappings of devices, created at bootstrap time.
512  */
513 struct pmap_devmap {
514 	vm_offset_t	pd_va;		/* virtual address */
515 	vm_paddr_t	pd_pa;		/* physical address */
516 	vm_size_t	pd_size;	/* size of region */
517 	vm_prot_t	pd_prot;	/* protection code */
518 	int		pd_cache;	/* cache attributes */
519 };
520 
521 const struct pmap_devmap *pmap_devmap_find_pa(vm_paddr_t, vm_size_t);
522 const struct pmap_devmap *pmap_devmap_find_va(vm_offset_t, vm_size_t);
523 
524 void	pmap_devmap_bootstrap(vm_offset_t, const struct pmap_devmap *);
525 void	pmap_devmap_register(const struct pmap_devmap *);
526 
527 #define SECTION_CACHE	0x1
528 #define SECTION_PT	0x2
529 void	pmap_kenter_section(vm_offset_t, vm_paddr_t, int flags);
530 #ifdef ARM_HAVE_SUPERSECTIONS
531 void	pmap_kenter_supersection(vm_offset_t, uint64_t, int flags);
532 #endif
533 
534 extern char *_tmppt;
535 
536 void	pmap_postinit(void);
537 
538 #ifdef ARM_USE_SMALL_ALLOC
539 void	arm_add_smallalloc_pages(void *, void *, int, int);
540 vm_offset_t arm_ptovirt(vm_paddr_t);
541 void arm_init_smallalloc(void);
542 struct arm_small_page {
543 	void *addr;
544 	TAILQ_ENTRY(arm_small_page) pg_list;
545 };
546 
547 #endif
548 
549 #define ARM_NOCACHE_KVA_SIZE 0x1000000
550 extern vm_offset_t arm_nocache_startaddr;
551 void *arm_remap_nocache(void *, vm_size_t);
552 void arm_unmap_nocache(void *, vm_size_t);
553 
554 extern vm_paddr_t dump_avail[];
555 #endif	/* _KERNEL */
556 
557 #endif	/* !LOCORE */
558 
559 #endif	/* !_MACHINE_PMAP_H_ */
560