1f9bac91bSBenno Rice /*-
271e3c308SPedro F. Giffuni * SPDX-License-Identifier: BSD-3-Clause AND BSD-4-Clause
351369649SPedro F. Giffuni *
4ffb56695SRafal Jaworowski * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
5ffb56695SRafal Jaworowski * All rights reserved.
6ffb56695SRafal Jaworowski *
7ffb56695SRafal Jaworowski * Adapted for Freescale's e500 core CPUs.
8ffb56695SRafal Jaworowski *
9ffb56695SRafal Jaworowski * Redistribution and use in source and binary forms, with or without
10ffb56695SRafal Jaworowski * modification, are permitted provided that the following conditions
11ffb56695SRafal Jaworowski * are met:
12ffb56695SRafal Jaworowski * 1. Redistributions of source code must retain the above copyright
13ffb56695SRafal Jaworowski * notice, this list of conditions and the following disclaimer.
14ffb56695SRafal Jaworowski * 2. Redistributions in binary form must reproduce the above copyright
15ffb56695SRafal Jaworowski * notice, this list of conditions and the following disclaimer in the
16ffb56695SRafal Jaworowski * documentation and/or other materials provided with the distribution.
17ffb56695SRafal Jaworowski * 3. The name of the author may not be used to endorse or promote products
18ffb56695SRafal Jaworowski * derived from this software without specific prior written permission.
19ffb56695SRafal Jaworowski *
20ffb56695SRafal Jaworowski * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21ffb56695SRafal Jaworowski * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22ffb56695SRafal Jaworowski * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
23ffb56695SRafal Jaworowski * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24ffb56695SRafal Jaworowski * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
25ffb56695SRafal Jaworowski * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26ffb56695SRafal Jaworowski * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27ffb56695SRafal Jaworowski * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28ffb56695SRafal Jaworowski * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29ffb56695SRafal Jaworowski * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30ffb56695SRafal Jaworowski */
31ffb56695SRafal Jaworowski /*-
32f9bac91bSBenno Rice * Copyright (C) 1995, 1996 Wolfgang Solfrank.
33f9bac91bSBenno Rice * Copyright (C) 1995, 1996 TooLs GmbH.
34f9bac91bSBenno Rice * All rights reserved.
35f9bac91bSBenno Rice *
36f9bac91bSBenno Rice * Redistribution and use in source and binary forms, with or without
37f9bac91bSBenno Rice * modification, are permitted provided that the following conditions
38f9bac91bSBenno Rice * are met:
39f9bac91bSBenno Rice * 1. Redistributions of source code must retain the above copyright
40f9bac91bSBenno Rice * notice, this list of conditions and the following disclaimer.
41f9bac91bSBenno Rice * 2. Redistributions in binary form must reproduce the above copyright
42f9bac91bSBenno Rice * notice, this list of conditions and the following disclaimer in the
43f9bac91bSBenno Rice * documentation and/or other materials provided with the distribution.
44f9bac91bSBenno Rice * 3. All advertising materials mentioning features or use of this software
45f9bac91bSBenno Rice * must display the following acknowledgement:
46f9bac91bSBenno Rice * This product includes software developed by TooLs GmbH.
47f9bac91bSBenno Rice * 4. The name of TooLs GmbH may not be used to endorse or promote products
48f9bac91bSBenno Rice * derived from this software without specific prior written permission.
49f9bac91bSBenno Rice *
50f9bac91bSBenno Rice * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
51f9bac91bSBenno Rice * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
52f9bac91bSBenno Rice * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
53f9bac91bSBenno Rice * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
54f9bac91bSBenno Rice * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
55f9bac91bSBenno Rice * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
56f9bac91bSBenno Rice * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
57f9bac91bSBenno Rice * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
58f9bac91bSBenno Rice * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
59f9bac91bSBenno Rice * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60f9bac91bSBenno Rice *
61ffb56695SRafal Jaworowski * from: $NetBSD: pmap.h,v 1.17 2000/03/30 16:18:24 jdolecek Exp $
62f9bac91bSBenno Rice */
63f9bac91bSBenno Rice
64f9bac91bSBenno Rice #ifndef _MACHINE_PMAP_H_
65f9bac91bSBenno Rice #define _MACHINE_PMAP_H_
66f9bac91bSBenno Rice
6748d0b1a0SAlan Cox #include <sys/queue.h>
68c3e289e1SNathan Whitehorn #include <sys/tree.h>
69c47dd3dbSAttilio Rao #include <sys/_cpuset.h>
7048d0b1a0SAlan Cox #include <sys/_lock.h>
7148d0b1a0SAlan Cox #include <sys/_mutex.h>
725244eac9SBenno Rice #include <machine/sr.h>
737f89270bSPeter Grehan #include <machine/pte.h>
74c3e289e1SNathan Whitehorn #include <machine/slb.h>
75ffb56695SRafal Jaworowski #include <machine/tlb.h>
7621943937SJeff Roberson #include <machine/vmparam.h>
7765bbba25SJustin Hibbits #ifdef __powerpc64__
78b24ed9c5SDoug Moore #include <vm/_vm_radix.h>
7965bbba25SJustin Hibbits #endif
8065bbba25SJustin Hibbits
8165bbba25SJustin Hibbits /*
8265bbba25SJustin Hibbits * The radix page table structure is described by levels 1-4.
8365bbba25SJustin Hibbits * See Fig 33. on p. 1002 of Power ISA v3.0B
8465bbba25SJustin Hibbits *
8565bbba25SJustin Hibbits * Page directories and tables must be size aligned.
8665bbba25SJustin Hibbits */
8765bbba25SJustin Hibbits
8865bbba25SJustin Hibbits /* Root page directory - 64k -- each entry covers 512GB */
8965bbba25SJustin Hibbits typedef uint64_t pml1_entry_t;
9065bbba25SJustin Hibbits /* l2 page directory - 4k -- each entry covers 1GB */
9165bbba25SJustin Hibbits typedef uint64_t pml2_entry_t;
9265bbba25SJustin Hibbits /* l3 page directory - 4k -- each entry covers 2MB */
9365bbba25SJustin Hibbits typedef uint64_t pml3_entry_t;
9465bbba25SJustin Hibbits /* l4 page directory - 256B/4k -- each entry covers 64k/4k */
9565bbba25SJustin Hibbits typedef uint64_t pml4_entry_t;
9665bbba25SJustin Hibbits
9765bbba25SJustin Hibbits typedef uint64_t pt_entry_t;
98ffb56695SRafal Jaworowski
994026b447SJustin Hibbits struct pmap;
1004026b447SJustin Hibbits typedef struct pmap *pmap_t;
1014026b447SJustin Hibbits
1022a499f92SKonstantin Belousov #define PMAP_ENTER_QUICK_LOCKED 0x10000000
1032a499f92SKonstantin Belousov
1047c277971SPeter Grehan #if !defined(NPMAPS)
1057c277971SPeter Grehan #define NPMAPS 32768
1067c277971SPeter Grehan #endif /* !defined(NPMAPS) */
1077c277971SPeter Grehan
10895fa3335SNathan Whitehorn struct slbtnode;
109f9bac91bSBenno Rice
1105244eac9SBenno Rice struct pvo_entry {
1115244eac9SBenno Rice LIST_ENTRY(pvo_entry) pvo_vlink; /* Link to common virt page */
112827cc9b9SNathan Whitehorn #ifndef __powerpc64__
1135244eac9SBenno Rice LIST_ENTRY(pvo_entry) pvo_olink; /* Link to overflow entry */
114827cc9b9SNathan Whitehorn #endif
115be010188SJustin Hibbits union {
116ccc4a5c7SNathan Whitehorn RB_ENTRY(pvo_entry) pvo_plink; /* Link to pmap entries */
117be010188SJustin Hibbits SLIST_ENTRY(pvo_entry) pvo_dlink; /* Link to delete enty */
118be010188SJustin Hibbits };
119827cc9b9SNathan Whitehorn struct {
120827cc9b9SNathan Whitehorn #ifndef __powerpc64__
121827cc9b9SNathan Whitehorn /* 32-bit fields */
1225d67b612SJustin Hibbits pte_t pte;
123827cc9b9SNathan Whitehorn #endif
124827cc9b9SNathan Whitehorn /* 64-bit fields */
125827cc9b9SNathan Whitehorn uintptr_t slot;
126827cc9b9SNathan Whitehorn vm_paddr_t pa;
127827cc9b9SNathan Whitehorn vm_prot_t prot;
12852a7870dSNathan Whitehorn } pvo_pte;
1295244eac9SBenno Rice pmap_t pvo_pmap; /* Owning pmap */
1305244eac9SBenno Rice vm_offset_t pvo_vaddr; /* VA of entry */
131c3e289e1SNathan Whitehorn uint64_t pvo_vpn; /* Virtual page number */
1325244eac9SBenno Rice };
1335244eac9SBenno Rice LIST_HEAD(pvo_head, pvo_entry);
134be010188SJustin Hibbits SLIST_HEAD(pvo_dlist, pvo_entry);
135ccc4a5c7SNathan Whitehorn RB_HEAD(pvo_tree, pvo_entry);
136ccc4a5c7SNathan Whitehorn int pvo_vaddr_compare(struct pvo_entry *, struct pvo_entry *);
137ccc4a5c7SNathan Whitehorn RB_PROTOTYPE(pvo_tree, pvo_entry, pvo_plink, pvo_vaddr_compare);
1385244eac9SBenno Rice
139827cc9b9SNathan Whitehorn /* Used by 32-bit PMAP */
140bef5da7fSNathan Whitehorn #define PVO_PTEGIDX_MASK 0x007UL /* which PTEG slot */
141bef5da7fSNathan Whitehorn #define PVO_PTEGIDX_VALID 0x008UL /* slot is valid */
142827cc9b9SNathan Whitehorn /* Used by 64-bit PMAP */
143827cc9b9SNathan Whitehorn #define PVO_HID 0x008UL /* PVO entry in alternate hash*/
144827cc9b9SNathan Whitehorn /* Used by both */
145bef5da7fSNathan Whitehorn #define PVO_WIRED 0x010UL /* PVO entry is wired */
146bef5da7fSNathan Whitehorn #define PVO_MANAGED 0x020UL /* PVO entry is managed */
147bef5da7fSNathan Whitehorn #define PVO_BOOTSTRAP 0x080UL /* PVO entry allocated during
148bef5da7fSNathan Whitehorn bootstrap */
149827cc9b9SNathan Whitehorn #define PVO_DEAD 0x100UL /* waiting to be deleted */
150bef5da7fSNathan Whitehorn #define PVO_LARGE 0x200UL /* large page */
151bef5da7fSNathan Whitehorn #define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF)
152bef5da7fSNathan Whitehorn #define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
153bef5da7fSNathan Whitehorn #define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
154bef5da7fSNathan Whitehorn #define PVO_PTEGIDX_CLR(pvo) \
155bef5da7fSNathan Whitehorn ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
156bef5da7fSNathan Whitehorn #define PVO_PTEGIDX_SET(pvo, i) \
157bef5da7fSNathan Whitehorn ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
158bef5da7fSNathan Whitehorn #define PVO_VSID(pvo) ((pvo)->pvo_vpn >> 16)
159bef5da7fSNathan Whitehorn
160598d99ddSNathan Whitehorn struct pmap {
1614026b447SJustin Hibbits struct pmap_statistics pm_stats;
162598d99ddSNathan Whitehorn struct mtx pm_mtx;
1635d67b612SJustin Hibbits cpuset_t pm_active;
1645d67b612SJustin Hibbits union {
1655d67b612SJustin Hibbits struct {
166598d99ddSNathan Whitehorn #ifdef __powerpc64__
167598d99ddSNathan Whitehorn struct slbtnode *pm_slb_tree_root;
168598d99ddSNathan Whitehorn struct slb **pm_slb;
169598d99ddSNathan Whitehorn int pm_slb_len;
170598d99ddSNathan Whitehorn #else
171598d99ddSNathan Whitehorn register_t pm_sr[16];
172598d99ddSNathan Whitehorn #endif
173598d99ddSNathan Whitehorn
174598d99ddSNathan Whitehorn struct pmap *pmap_phys;
175ccc4a5c7SNathan Whitehorn struct pvo_tree pmap_pvo;
176598d99ddSNathan Whitehorn };
17765bbba25SJustin Hibbits #ifdef __powerpc64__
17865bbba25SJustin Hibbits /* Radix support */
17965bbba25SJustin Hibbits struct {
18065bbba25SJustin Hibbits pml1_entry_t *pm_pml1; /* KVA of root page directory */
18165bbba25SJustin Hibbits struct vm_radix pm_radix; /* spare page table pages */
18265bbba25SJustin Hibbits TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */
18365bbba25SJustin Hibbits uint64_t pm_pid; /* PIDR value */
18465bbba25SJustin Hibbits int pm_flags;
18565bbba25SJustin Hibbits };
18665bbba25SJustin Hibbits #endif
1875d67b612SJustin Hibbits struct {
1885d67b612SJustin Hibbits /* TID to identify this pmap entries in TLB */
1895d67b612SJustin Hibbits tlbtid_t pm_tid[MAXCPU];
1905d67b612SJustin Hibbits
1915d67b612SJustin Hibbits #ifdef __powerpc64__
1925d67b612SJustin Hibbits /*
1935d67b612SJustin Hibbits * Page table directory,
1945d67b612SJustin Hibbits * array of pointers to page directories.
1955d67b612SJustin Hibbits */
196dd8775a1SJustin Hibbits pte_t ****pm_root;
1975d67b612SJustin Hibbits #else
1985d67b612SJustin Hibbits /*
1995d67b612SJustin Hibbits * Page table directory,
2005d67b612SJustin Hibbits * array of pointers to page tables.
2015d67b612SJustin Hibbits */
2023be09f30SJustin Hibbits pte_t **pm_pdir;
2035d67b612SJustin Hibbits
2045d67b612SJustin Hibbits /* List of allocated ptbl bufs (ptbl kva regions). */
2055d67b612SJustin Hibbits TAILQ_HEAD(, ptbl_buf) pm_ptbl_list;
2065d67b612SJustin Hibbits #endif
2075d67b612SJustin Hibbits };
20865bbba25SJustin Hibbits } __aligned(CACHE_LINE_SIZE);
2095d67b612SJustin Hibbits };
21065bbba25SJustin Hibbits
21165bbba25SJustin Hibbits /*
21265bbba25SJustin Hibbits * pv_entries are allocated in chunks per-process. This avoids the
21365bbba25SJustin Hibbits * need to track per-pmap assignments.
21465bbba25SJustin Hibbits */
21565bbba25SJustin Hibbits #define _NPCPV 126
216e6639073SJohn Baldwin #define _NPCM howmany(_NPCPV, 64)
217e6639073SJohn Baldwin
21865bbba25SJustin Hibbits #define PV_CHUNK_HEADER \
21965bbba25SJustin Hibbits pmap_t pc_pmap; \
22065bbba25SJustin Hibbits TAILQ_ENTRY(pv_chunk) pc_list; \
22165bbba25SJustin Hibbits uint64_t pc_map[_NPCM]; /* bitmap; 1 = free */ \
22265bbba25SJustin Hibbits TAILQ_ENTRY(pv_chunk) pc_lru;
2235d67b612SJustin Hibbits
2245d67b612SJustin Hibbits struct pv_entry {
2255d67b612SJustin Hibbits pmap_t pv_pmap;
2265d67b612SJustin Hibbits vm_offset_t pv_va;
2275d67b612SJustin Hibbits TAILQ_ENTRY(pv_entry) pv_link;
2285d67b612SJustin Hibbits };
2295d67b612SJustin Hibbits typedef struct pv_entry *pv_entry_t;
230598d99ddSNathan Whitehorn
23165bbba25SJustin Hibbits struct pv_chunk_header {
23265bbba25SJustin Hibbits PV_CHUNK_HEADER
23365bbba25SJustin Hibbits };
23465bbba25SJustin Hibbits struct pv_chunk {
23565bbba25SJustin Hibbits PV_CHUNK_HEADER
23665bbba25SJustin Hibbits uint64_t reserved;
23765bbba25SJustin Hibbits struct pv_entry pc_pventry[_NPCPV];
23865bbba25SJustin Hibbits };
23965bbba25SJustin Hibbits
240038c6159SJustin Hibbits struct md_page {
2415d67b612SJustin Hibbits union {
2425d67b612SJustin Hibbits struct {
243038c6159SJustin Hibbits volatile int32_t mdpg_attrs;
244038c6159SJustin Hibbits vm_memattr_t mdpg_cache_attrs;
245038c6159SJustin Hibbits struct pvo_head mdpg_pvoh;
24665bbba25SJustin Hibbits int pv_gen; /* (p) */
247038c6159SJustin Hibbits };
2485d67b612SJustin Hibbits struct {
2495d67b612SJustin Hibbits int pv_tracked;
2505d67b612SJustin Hibbits };
2515d67b612SJustin Hibbits };
25265bbba25SJustin Hibbits TAILQ_HEAD(, pv_entry) pv_list; /* (p) */
2535d67b612SJustin Hibbits };
254038c6159SJustin Hibbits
2555d67b612SJustin Hibbits #ifdef AIM
256c1f4123bSNathan Whitehorn #define pmap_page_get_memattr(m) ((m)->md.mdpg_cache_attrs)
2575d67b612SJustin Hibbits #else
2585d67b612SJustin Hibbits #define pmap_page_get_memattr(m) VM_MEMATTR_DEFAULT
25965bbba25SJustin Hibbits #endif /* AIM */
260ffb56695SRafal Jaworowski
261c3e289e1SNathan Whitehorn /*
262c3e289e1SNathan Whitehorn * Return the VSID corresponding to a given virtual address.
263c3e289e1SNathan Whitehorn * If no VSID is currently defined, it will allocate one, and add
264c3e289e1SNathan Whitehorn * it to a free slot if available.
265c3e289e1SNathan Whitehorn *
266c3e289e1SNathan Whitehorn * NB: The PMAP MUST be locked already.
267c3e289e1SNathan Whitehorn */
268c3e289e1SNathan Whitehorn uint64_t va_to_vsid(pmap_t pm, vm_offset_t va);
26995fa3335SNathan Whitehorn
27095fa3335SNathan Whitehorn /* Lock-free, non-allocating lookup routines */
27195fa3335SNathan Whitehorn uint64_t kernel_va_to_slbv(vm_offset_t va);
27295fa3335SNathan Whitehorn struct slb *user_va_to_slb_entry(pmap_t pm, vm_offset_t va);
273c3e289e1SNathan Whitehorn
2746416b9a8SNathan Whitehorn uint64_t allocate_user_vsid(pmap_t pm, uint64_t esid, int large);
27595fa3335SNathan Whitehorn void free_vsid(pmap_t pm, uint64_t esid, int large);
2766416b9a8SNathan Whitehorn void slb_insert_user(pmap_t pm, struct slb *slb);
2776416b9a8SNathan Whitehorn void slb_insert_kernel(uint64_t slbe, uint64_t slbv);
27895fa3335SNathan Whitehorn
27995fa3335SNathan Whitehorn struct slbtnode *slb_alloc_tree(void);
28095fa3335SNathan Whitehorn void slb_free_tree(pmap_t pm);
2816416b9a8SNathan Whitehorn struct slb **slb_alloc_user_cache(void);
2826416b9a8SNathan Whitehorn void slb_free_user_cache(struct slb **);
283c3e289e1SNathan Whitehorn
2845244eac9SBenno Rice extern struct pmap kernel_pmap_store;
2855244eac9SBenno Rice #define kernel_pmap (&kernel_pmap_store)
2865244eac9SBenno Rice
287f9bac91bSBenno Rice #ifdef _KERNEL
288f9bac91bSBenno Rice
28948d0b1a0SAlan Cox #define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx)
29048d0b1a0SAlan Cox #define PMAP_LOCK_ASSERT(pmap, type) \
29148d0b1a0SAlan Cox mtx_assert(&(pmap)->pm_mtx, (type))
29248d0b1a0SAlan Cox #define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx)
293629e40e4SNathan Whitehorn #define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, \
294629e40e4SNathan Whitehorn (pmap == kernel_pmap) ? "kernelpmap" : \
29565bbba25SJustin Hibbits "pmap", NULL, MTX_DEF | MTX_DUPOK)
29648d0b1a0SAlan Cox #define PMAP_LOCKED(pmap) mtx_owned(&(pmap)->pm_mtx)
29748d0b1a0SAlan Cox #define PMAP_MTX(pmap) (&(pmap)->pm_mtx)
29848d0b1a0SAlan Cox #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx)
29948d0b1a0SAlan Cox #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx)
30048d0b1a0SAlan Cox
3015cff1f4dSMark Johnston #define pmap_page_is_write_mapped(m) (((m)->a.flags & PGA_WRITEABLE) != 0)
3026031c68dSAlan Cox
303041b7317SKonstantin Belousov #define pmap_vm_page_alloc_check(m)
304041b7317SKonstantin Belousov
3055244eac9SBenno Rice void pmap_bootstrap(vm_offset_t, vm_offset_t);
30620b79612SRafal Jaworowski void pmap_kenter(vm_offset_t va, vm_paddr_t pa);
307611aec25SJustin Hibbits void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t);
3085501d40bSJake Burkholder void pmap_kremove(vm_offset_t);
30920b79612SRafal Jaworowski void *pmap_mapdev(vm_paddr_t, vm_size_t);
3102109efd1SJustin Hibbits void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t);
3117ae99f80SJohn Baldwin void pmap_unmapdev(void *, vm_size_t);
312c1f4123bSNathan Whitehorn void pmap_page_set_memattr(vm_page_t, vm_memattr_t);
3130f7aeab0SJustin Hibbits int pmap_change_attr(vm_offset_t, vm_size_t, vm_memattr_t);
31404329fa7SNathan Whitehorn int pmap_map_user_ptr(pmap_t pm, volatile const void *uaddr,
31504329fa7SNathan Whitehorn void **kaddr, size_t ulen, size_t *klen);
316eb1baf72SNathan Whitehorn int pmap_decode_kernel_ptr(vm_offset_t addr, int *is_user,
317eb1baf72SNathan Whitehorn vm_offset_t *decoded_addr);
318ac6ba8bdSBenno Rice void pmap_deactivate(struct thread *);
31920b79612SRafal Jaworowski vm_paddr_t pmap_kextract(vm_offset_t);
32020b79612SRafal Jaworowski int pmap_dev_direct_mapped(vm_paddr_t, vm_size_t);
321*1f1b2286SJohn Baldwin bool pmap_mmu_install(char *name, int prio);
32245b69dd6SJustin Hibbits void pmap_mmu_init(void);
3230ecc478bSLeandro Lupori const char *pmap_mmu_name(void);
32465bbba25SJustin Hibbits bool pmap_ps_enabled(pmap_t pmap);
32565bbba25SJustin Hibbits int pmap_nofault(pmap_t pmap, vm_offset_t va, vm_prot_t flags);
326*1f1b2286SJohn Baldwin bool pmap_page_is_mapped(vm_page_t m);
3271e0e335bSKonstantin Belousov #define pmap_map_delete(pmap, sva, eva) pmap_remove(pmap, sva, eva)
328f9c702dbSPeter Grehan
329caef3e12SJustin Hibbits void pmap_page_array_startup(long count);
330caef3e12SJustin Hibbits
331696effb6SJohn Baldwin #define vtophys(va) pmap_kextract((vm_offset_t)(va))
332a0889814SBenno Rice
333f9bac91bSBenno Rice extern vm_offset_t virtual_avail;
334f9bac91bSBenno Rice extern vm_offset_t virtual_end;
3350ecc478bSLeandro Lupori extern caddr_t crashdumpmap;
336f9bac91bSBenno Rice
3375244eac9SBenno Rice extern vm_offset_t msgbuf_phys;
338f9bac91bSBenno Rice
339f9c702dbSPeter Grehan extern int pmap_bootstrapped;
34065bbba25SJustin Hibbits extern int radix_mmu;
3415b58b1aaSLeandro Lupori extern int superpages_enabled;
342f9c702dbSPeter Grehan
34376384bd1SLeandro Lupori #ifdef AIM
34476384bd1SLeandro Lupori void pmap_early_io_map_init(void);
34576384bd1SLeandro Lupori #endif
34633724f17SNathan Whitehorn vm_offset_t pmap_early_io_map(vm_paddr_t pa, vm_size_t size);
34760152a40SJustin Hibbits void pmap_early_io_unmap(vm_offset_t va, vm_size_t size);
348b2f831c0SJustin Hibbits void pmap_track_page(pmap_t pmap, vm_offset_t va);
34965bbba25SJustin Hibbits void pmap_page_print_mappings(vm_page_t m);
350b923b34aSJustin Hibbits void pmap_tlbie_all(void);
35133724f17SNathan Whitehorn
352e7a9df16SKonstantin Belousov static inline int
pmap_vmspace_copy(pmap_t dst_pmap __unused,pmap_t src_pmap __unused)353e7a9df16SKonstantin Belousov pmap_vmspace_copy(pmap_t dst_pmap __unused, pmap_t src_pmap __unused)
354e7a9df16SKonstantin Belousov {
355e7a9df16SKonstantin Belousov
356e7a9df16SKonstantin Belousov return (0);
357e7a9df16SKonstantin Belousov }
358e7a9df16SKonstantin Belousov
359f9bac91bSBenno Rice #endif
360f9bac91bSBenno Rice
3615244eac9SBenno Rice #endif /* !_MACHINE_PMAP_H_ */
362