xref: /freebsd/sys/arm/include/pmap.h (revision 7562eaabc01a48e6b11d5b558c41e3b92dae5c2d)
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * the Systems Programming Group of the University of Utah Computer
7  * Science Department and William Jolitz of UUNET Technologies Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *      This product includes software developed by the University of
20  *      California, Berkeley and its contributors.
21  * 4. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  * Derived from hp300 version by Mike Hibler, this version by William
38  * Jolitz uses a recursive map [a pde points to the page directory] to
39  * map the page tables using the pagetables themselves. This is done to
40  * reduce the impact on kernel virtual memory for lots of sparse address
41  * space, and to reduce the cost of memory to each process.
42  *
43  *      from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
44  *      from: @(#)pmap.h        7.4 (Berkeley) 5/12/91
45  * 	from: FreeBSD: src/sys/i386/include/pmap.h,v 1.70 2000/11/30
46  *
47  * $FreeBSD$
48  */
49 
50 #ifndef _MACHINE_PMAP_H_
51 #define _MACHINE_PMAP_H_
52 
53 #include <machine/pte.h>
54 
55 /*
56  * Pte related macros
57  */
58 #define PTE_NOCACHE	0
59 #define PTE_CACHE	1
60 
61 #define VADDR(pdi, pti) ((vm_offset_t)(((pdi)<<PDR_SHIFT)+((pti)<<PAGE_SHIFT)))
62 #define PTDIPDE(ptd)	((ptd)/1024)
63 #define PTDIPTE(ptd)	((ptd)%256)
64 
65 #ifndef	NKPT
66 #define NKPT		120	/* actual number of kernel page tables */
67 #endif
68 
69 #ifndef NKPDE
70 #define NKPDE		1019	/* Maximum number of kernel PDE */
71 #endif
72 
73 #define NPDEPTD		16	/* Number of PDE in each PTD */
74 
75 /*
76  * The *PTDI values control the layout of virtual memory
77  */
78 
79 #define KPTDI		(NPDEPG-NKPDE)	/* ptd entry for kernel space begin */
80 #define PTDPTDI		(KPTDI-1)	/* ptd entry that points to ptd! */
81 #define KPTPTDI		(PTDPTDI-1)	/* ptd entry for kernel PTEs */
82 #define UPTPTDI		(KPTPTDI-3)	/* ptd entry for uspace PTEs */
83 #define UMAXPTDI	(UPTPTDI-1)	/* ptd entry for user space end */
84 #define UMAXPTEOFF	(NPTEPG)	/* pte entry for user space end */
85 
86 #ifndef LOCORE
87 
88 #include <sys/queue.h>
89 
90 #define PDESIZE		sizeof(pd_entry_t)	/* for assembly files */
91 #define PTESIZE		sizeof(pt_entry_t)	/* for assembly files */
92 
93 #ifdef _KERNEL
94 
95 #define vtophys(va)	pmap_extract(pmap_kernel(), (vm_offset_t)(va))
96 #define pmap_kextract(va)	pmap_extract(pmap_kernel(), (vm_offset_t)(va))
97 
98 #endif
99 
100 #define	pmap_page_is_mapped(m)	(!TAILQ_EMPTY(&(m)->md.pv_list))
101 /*
102  * Pmap sutff
103  */
104 
105 /*
106  * This structure is used to hold a virtual<->physical address
107  * association and is used mostly by bootstrap code
108  */
109 struct pv_addr {
110 	SLIST_ENTRY(pv_addr) pv_list;
111 	vm_offset_t	pv_va;
112 	vm_paddr_t	pv_pa;
113 };
114 
115 struct	pv_entry;
116 
117 struct	md_page {
118 	int pvh_attrs;
119 	u_int uro_mappings;
120 	u_int urw_mappings;
121 	union {
122 		u_short s_mappings[2]; /* Assume kernel count <= 65535 */
123 		u_int i_mappings;
124 	} k_u;
125 #define	kro_mappings	k_u.s_mappings[0]
126 #define	krw_mappings	k_u.s_mappings[1]
127 #define	k_mappings	k_u.i_mappings
128 	int			pv_list_count;
129 	TAILQ_HEAD(,pv_entry)	pv_list;
130 };
131 
132 #define	VM_MDPAGE_INIT(pg)						\
133 do {									\
134 	TAILQ_INIT(&pg->pv_list);					\
135 	mtx_init(&(pg)->md_page.pvh_mtx, "MDPAGE Mutex", NULL, MTX_DEV);\
136 	(pg)->mdpage.pvh_attrs = 0;					\
137 	(pg)->mdpage.uro_mappings = 0;					\
138 	(pg)->mdpage.urw_mappings = 0;					\
139 	(pg)->mdpage.k_mappings = 0;					\
140 } while (/*CONSTCOND*/0)
141 
142 struct l1_ttable;
143 struct l2_dtable;
144 
145 
146 /*
147  * The number of L2 descriptor tables which can be tracked by an l2_dtable.
148  * A bucket size of 16 provides for 16MB of contiguous virtual address
149  * space per l2_dtable. Most processes will, therefore, require only two or
150  * three of these to map their whole working set.
151  */
152 #define	L2_BUCKET_LOG2	4
153 #define	L2_BUCKET_SIZE	(1 << L2_BUCKET_LOG2)
154 /*
155  * Given the above "L2-descriptors-per-l2_dtable" constant, the number
156  * of l2_dtable structures required to track all possible page descriptors
157  * mappable by an L1 translation table is given by the following constants:
158  */
159 #define	L2_LOG2		((32 - L1_S_SHIFT) - L2_BUCKET_LOG2)
160 #define	L2_SIZE		(1 << L2_LOG2)
161 
162 struct	pmap {
163 	u_int8_t		pm_domain;
164 	struct l1_ttable	*pm_l1;
165 	struct l2_dtable	*pm_l2[L2_SIZE];
166 	pd_entry_t		*pm_pdir;	/* KVA of page directory */
167 	int			pm_count;	/* reference count */
168 	int			pm_active;	/* active on cpus */
169 	struct pmap_statistics	pm_stats;	/* pmap statictics */
170 	LIST_ENTRY(pmap)	pm_list;	/* List of all pmaps */
171 };
172 
173 typedef struct pmap *pmap_t;
174 
175 #ifdef _KERNEL
176 extern pmap_t	kernel_pmap;
177 #define pmap_kernel() kernel_pmap
178 #endif
179 
180 
181 /*
182  * For each vm_page_t, there is a list of all currently valid virtual
183  * mappings of that page.  An entry is a pv_entry_t, the list is pv_table.
184  */
185 typedef struct pv_entry {
186         pmap_t          pv_pmap;        /* pmap where mapping lies */
187         vm_offset_t     pv_va;          /* virtual address for mapping */
188         TAILQ_ENTRY(pv_entry)   pv_list;
189         vm_page_t       pv_ptem;        /* VM page for pte */
190 	int		pv_flags;	/* flags (wired, etc...) */
191 } *pv_entry_t;
192 
193 #define PV_ENTRY_NULL   ((pv_entry_t) 0)
194 
195 #define PV_CI           0x01    /* all entries must be cache inhibited */
196 #define PV_PTPAGE       0x02    /* entry maps a page table page */
197 
198 /*
199  * Page hooks.
200  * For speed we store the both the virtual address and the page table
201  * entry address for each page hook.
202  */
203 typedef struct {
204         vm_offset_t va;
205         pt_entry_t *pte;
206 } pagehook_t;
207 
208 
209 #ifdef _KERNEL
210 
211 boolean_t pmap_get_pde_pte(pmap_t, vm_offset_t, pd_entry_t **, pt_entry_t **);
212 
213 /*
214  * virtual address to page table entry and
215  * to physical address. Likewise for alternate address space.
216  * Note: these work recursively, thus vtopte of a pte will give
217  * the corresponding pde that in turn maps it.
218  */
219 
220 /*
221  * The current top of kernel VM.
222  */
223 extern vm_offset_t pmap_curmaxkvaddr;
224 
225 struct pcb;
226 
227 void	pmap_set_pcb_pagedir(pmap_t, struct pcb *);
228 /* Virtual address to page table entry */
229 static __inline pt_entry_t *
230 vtopte(vm_offset_t va)
231 {
232 	pd_entry_t *pdep;
233 	pt_entry_t *ptep;
234 
235 	if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == FALSE)
236 		return (NULL);
237 	return (ptep);
238 }
239 
240 extern vm_offset_t avail_end;
241 extern vm_offset_t clean_eva;
242 extern vm_offset_t clean_sva;
243 extern vm_offset_t phys_avail[];
244 extern vm_offset_t virtual_avail;
245 extern vm_offset_t virtual_end;
246 
247 void	pmap_bootstrap(vm_offset_t, vm_offset_t, struct pv_addr *);
248 void	pmap_kenter(vm_offset_t va, vm_paddr_t pa);
249 void	pmap_kremove(vm_offset_t);
250 void	*pmap_mapdev(vm_offset_t, vm_size_t);
251 void	pmap_unmapdev(vm_offset_t, vm_size_t);
252 vm_page_t	pmap_use_pt(pmap_t, vm_offset_t);
253 void	pmap_debug(int);
254 void	pmap_map_section(vm_offset_t, vm_offset_t, vm_offset_t, int, int);
255 void	pmap_link_l2pt(vm_offset_t, vm_offset_t, struct pv_addr *);
256 vm_size_t	pmap_map_chunk(vm_offset_t, vm_offset_t, vm_offset_t, vm_size_t, int, int);
257 void
258 pmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot,
259     int cache);
260 int pmap_fault_fixup(pmap_t, vm_offset_t, vm_prot_t, int);
261 
262 /*
263  * Definitions for MMU domains
264  */
265 #define	PMAP_DOMAINS		15	/* 15 'user' domains (0-14) */
266 #define	PMAP_DOMAIN_KERNEL	15	/* The kernel uses domain #15 */
267 
268 /*
269  * The new pmap ensures that page-tables are always mapping Write-Thru.
270  * Thus, on some platforms we can run fast and loose and avoid syncing PTEs
271  * on every change.
272  *
273  * Unfortunately, not all CPUs have a write-through cache mode.  So we
274  * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs,
275  * and if there is the chance for PTE syncs to be needed, we define
276  * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run)
277  * the code.
278  */
279 extern int pmap_needs_pte_sync;
280 
281 /*
282  * These macros define the various bit masks in the PTE.
283  *
284  * We use these macros since we use different bits on different processor
285  * models.
286  */
287 #define	L1_S_PROT_U		(L1_S_AP(AP_U))
288 #define	L1_S_PROT_W		(L1_S_AP(AP_W))
289 #define	L1_S_PROT_MASK		(L1_S_PROT_U|L1_S_PROT_W)
290 
291 #define	L1_S_CACHE_MASK_generic	(L1_S_B|L1_S_C)
292 #define	L1_S_CACHE_MASK_xscale	(L1_S_B|L1_S_C|L1_S_XSCALE_TEX(TEX_XSCALE_X))
293 
294 #define	L2_L_PROT_U		(L2_AP(AP_U))
295 #define	L2_L_PROT_W		(L2_AP(AP_W))
296 #define	L2_L_PROT_MASK		(L2_L_PROT_U|L2_L_PROT_W)
297 
298 #define	L2_L_CACHE_MASK_generic	(L2_B|L2_C)
299 #define	L2_L_CACHE_MASK_xscale	(L2_B|L2_C|L2_XSCALE_L_TEX(TEX_XSCALE_X))
300 
301 #define	L2_S_PROT_U_generic	(L2_AP(AP_U))
302 #define	L2_S_PROT_W_generic	(L2_AP(AP_W))
303 #define	L2_S_PROT_MASK_generic	(L2_S_PROT_U|L2_S_PROT_W)
304 
305 #define	L2_S_PROT_U_xscale	(L2_AP0(AP_U))
306 #define	L2_S_PROT_W_xscale	(L2_AP0(AP_W))
307 #define	L2_S_PROT_MASK_xscale	(L2_S_PROT_U|L2_S_PROT_W)
308 
309 #define	L2_S_CACHE_MASK_generic	(L2_B|L2_C)
310 #define	L2_S_CACHE_MASK_xscale	(L2_B|L2_C|L2_XSCALE_T_TEX(TEX_XSCALE_X))
311 
312 #define	L1_S_PROTO_generic	(L1_TYPE_S | L1_S_IMP)
313 #define	L1_S_PROTO_xscale	(L1_TYPE_S)
314 
315 #define	L1_C_PROTO_generic	(L1_TYPE_C | L1_C_IMP2)
316 #define	L1_C_PROTO_xscale	(L1_TYPE_C)
317 
318 #define	L2_L_PROTO		(L2_TYPE_L)
319 
320 #define	L2_S_PROTO_generic	(L2_TYPE_S)
321 #define	L2_S_PROTO_xscale	(L2_TYPE_XSCALE_XS)
322 
323 /*
324  * User-visible names for the ones that vary with MMU class.
325  */
326 
327 #if ARM_NMMUS > 1
328 /* More than one MMU class configured; use variables. */
329 #define	L2_S_PROT_U		pte_l2_s_prot_u
330 #define	L2_S_PROT_W		pte_l2_s_prot_w
331 #define	L2_S_PROT_MASK		pte_l2_s_prot_mask
332 
333 #define	L1_S_CACHE_MASK		pte_l1_s_cache_mask
334 #define	L2_L_CACHE_MASK		pte_l2_l_cache_mask
335 #define	L2_S_CACHE_MASK		pte_l2_s_cache_mask
336 
337 #define	L1_S_PROTO		pte_l1_s_proto
338 #define	L1_C_PROTO		pte_l1_c_proto
339 #define	L2_S_PROTO		pte_l2_s_proto
340 
341 #elif (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
342 #define	L2_S_PROT_U		L2_S_PROT_U_generic
343 #define	L2_S_PROT_W		L2_S_PROT_W_generic
344 #define	L2_S_PROT_MASK		L2_S_PROT_MASK_generic
345 
346 #define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_generic
347 #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_generic
348 #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_generic
349 
350 #define	L1_S_PROTO		L1_S_PROTO_generic
351 #define	L1_C_PROTO		L1_C_PROTO_generic
352 #define	L2_S_PROTO		L2_S_PROTO_generic
353 
354 #elif ARM_MMU_XSCALE == 1
355 #define	L2_S_PROT_U		L2_S_PROT_U_xscale
356 #define	L2_S_PROT_W		L2_S_PROT_W_xscale
357 #define	L2_S_PROT_MASK		L2_S_PROT_MASK_xscale
358 
359 #define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_xscale
360 #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_xscale
361 #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_xscale
362 
363 #define	L1_S_PROTO		L1_S_PROTO_xscale
364 #define	L1_C_PROTO		L1_C_PROTO_xscale
365 #define	L2_S_PROTO		L2_S_PROTO_xscale
366 
367 #endif /* ARM_NMMUS > 1 */
368 
369 #if (ARM_MMU_SA1 == 1) && (ARM_NMMUS == 1)
370 #define	PMAP_NEEDS_PTE_SYNC	1
371 #define	PMAP_INCLUDE_PTE_SYNC
372 #elif (ARM_MMU_SA1 == 0)
373 #define	PMAP_NEEDS_PTE_SYNC	0
374 #endif
375 
376 /*
377  * These macros return various bits based on kernel/user and protection.
378  * Note that the compiler will usually fold these at compile time.
379  */
380 #define	L1_S_PROT(ku, pr)	((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \
381 				 (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0))
382 
383 #define	L2_L_PROT(ku, pr)	((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \
384 				 (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0))
385 
386 #define	L2_S_PROT(ku, pr)	((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \
387 				 (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0))
388 
389 /*
390  * Macros to test if a mapping is mappable with an L1 Section mapping
391  * or an L2 Large Page mapping.
392  */
393 #define	L1_S_MAPPABLE_P(va, pa, size)					\
394 	((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE)
395 
396 #define	L2_L_MAPPABLE_P(va, pa, size)					\
397 	((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE)
398 
399 /*
400  * Provide a fallback in case we were not able to determine it at
401  * compile-time.
402  */
403 #ifndef PMAP_NEEDS_PTE_SYNC
404 #define	PMAP_NEEDS_PTE_SYNC	pmap_needs_pte_sync
405 #define	PMAP_INCLUDE_PTE_SYNC
406 #endif
407 
408 #define	PTE_SYNC(pte)							\
409 do {									\
410 	if (PMAP_NEEDS_PTE_SYNC)					\
411 		cpu_dcache_wb_range((vm_offset_t)(pte), sizeof(pt_entry_t));\
412 } while (/*CONSTCOND*/0)
413 
414 #define	PTE_SYNC_RANGE(pte, cnt)					\
415 do {									\
416 	if (PMAP_NEEDS_PTE_SYNC) {					\
417 		cpu_dcache_wb_range((vm_offset_t)(pte),			\
418 		    (cnt) << 2); /* * sizeof(pt_entry_t) */		\
419 	}								\
420 } while (/*CONSTCOND*/0)
421 
422 extern pt_entry_t		pte_l1_s_cache_mode;
423 extern pt_entry_t		pte_l1_s_cache_mask;
424 
425 extern pt_entry_t		pte_l2_l_cache_mode;
426 extern pt_entry_t		pte_l2_l_cache_mask;
427 
428 extern pt_entry_t		pte_l2_s_cache_mode;
429 extern pt_entry_t		pte_l2_s_cache_mask;
430 
431 extern pt_entry_t		pte_l1_s_cache_mode_pt;
432 extern pt_entry_t		pte_l2_l_cache_mode_pt;
433 extern pt_entry_t		pte_l2_s_cache_mode_pt;
434 
435 extern pt_entry_t		pte_l2_s_prot_u;
436 extern pt_entry_t		pte_l2_s_prot_w;
437 extern pt_entry_t		pte_l2_s_prot_mask;
438 
439 extern pt_entry_t		pte_l1_s_proto;
440 extern pt_entry_t		pte_l1_c_proto;
441 extern pt_entry_t		pte_l2_s_proto;
442 
443 extern void (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t);
444 extern void (*pmap_zero_page_func)(vm_paddr_t, int, int);
445 
446 #if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
447 void	pmap_copy_page_generic(vm_paddr_t, vm_paddr_t);
448 void	pmap_zero_page_generic(vm_paddr_t, int, int);
449 
450 void	pmap_pte_init_generic(void);
451 #if defined(CPU_ARM8)
452 void	pmap_pte_init_arm8(void);
453 #endif
454 #if defined(CPU_ARM9)
455 void	pmap_pte_init_arm9(void);
456 #endif /* CPU_ARM9 */
457 #if defined(CPU_ARM10)
458 void	pmap_pte_init_arm10(void);
459 #endif /* CPU_ARM10 */
460 #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
461 
462 #if /* ARM_MMU_SA1 == */1
463 void	pmap_pte_init_sa1(void);
464 #endif /* ARM_MMU_SA1 == 1 */
465 
466 #if ARM_MMU_XSCALE == 1
467 void	pmap_copy_page_xscale(vm_paddr_t, vm_paddr_t);
468 void	pmap_zero_page_xscale(vm_paddr_t, int, int);
469 
470 void	pmap_pte_init_xscale(void);
471 
472 void	xscale_setup_minidata(vm_offset_t, vm_offset_t, vm_offset_t);
473 
474 void	pmap_use_minicache(vm_offset_t, vm_size_t);
475 #endif /* ARM_MMU_XSCALE == 1 */
476 #define PTE_KERNEL	0
477 #define PTE_USER	1
478 #define	l1pte_valid(pde)	((pde) != 0)
479 #define	l1pte_section_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_S)
480 #define	l1pte_page_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_C)
481 #define	l1pte_fpage_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_F)
482 
483 #define l2pte_index(v)		(((v) & L2_ADDR_BITS) >> L2_S_SHIFT)
484 #define	l2pte_valid(pte)	((pte) != 0)
485 #define	l2pte_pa(pte)		((pte) & L2_S_FRAME)
486 #define l2pte_minidata(pte)	(((pte) & \
487 				 (L2_B | L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))\
488 				 == (L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))
489 
490 /* L1 and L2 page table macros */
491 #define pmap_pde_v(pde)		l1pte_valid(*(pde))
492 #define pmap_pde_section(pde)	l1pte_section_p(*(pde))
493 #define pmap_pde_page(pde)	l1pte_page_p(*(pde))
494 #define pmap_pde_fpage(pde)	l1pte_fpage_p(*(pde))
495 
496 #define	pmap_pte_v(pte)		l2pte_valid(*(pte))
497 #define	pmap_pte_pa(pte)	l2pte_pa(*(pte))
498 
499 /* Size of the kernel part of the L1 page table */
500 #define KERNEL_PD_SIZE	\
501 	(L1_TABLE_SIZE - (KERNEL_BASE >> L1_S_SHIFT) * sizeof(pd_entry_t))
502 #define PTE_PAGETABLE	2
503 
504 /*
505  * Flags that indicate attributes of pages or mappings of pages.
506  *
507  * The PVF_MOD and PVF_REF flags are stored in the mdpage for each
508  * page.  PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual
509  * pv_entry's for each page.  They live in the same "namespace" so
510  * that we can clear multiple attributes at a time.
511  *
512  * Note the "non-cacheable" flag generally means the page has
513  * multiple mappings in a given address space.
514  */
515 #define	PVF_MOD		0x01		/* page is modified */
516 #define	PVF_REF		0x02		/* page is referenced */
517 #define	PVF_WIRED	0x04		/* mapping is wired */
518 #define	PVF_WRITE	0x08		/* mapping is writable */
519 #define	PVF_EXEC	0x10		/* mapping is executable */
520 #define	PVF_UNC		0x20		/* mapping is 'user' non-cacheable */
521 #define	PVF_KNC		0x40		/* mapping is 'kernel' non-cacheable */
522 #define	PVF_NC		(PVF_UNC|PVF_KNC)
523 
524 void vector_page_setprot(int);
525 
526 void pmap_update(pmap_t);
527 
528 /*
529  * This structure is used by machine-dependent code to describe
530  * static mappings of devices, created at bootstrap time.
531  */
532 struct pmap_devmap {
533 	vm_offset_t	pd_va;		/* virtual address */
534 	vm_paddr_t	pd_pa;		/* physical address */
535 	vm_size_t	pd_size;	/* size of region */
536 	vm_prot_t	pd_prot;	/* protection code */
537 	int		pd_cache;	/* cache attributes */
538 };
539 
540 const struct pmap_devmap *pmap_devmap_find_pa(vm_paddr_t, vm_size_t);
541 const struct pmap_devmap *pmap_devmap_find_va(vm_offset_t, vm_size_t);
542 
543 void	pmap_devmap_bootstrap(vm_offset_t, const struct pmap_devmap *);
544 void	pmap_devmap_register(const struct pmap_devmap *);
545 #endif	/* _KERNEL */
546 
547 #endif	/* !LOCORE */
548 
549 #endif	/* !_MACHINE_PMAP_H_ */
550