1f9bac91bSBenno Rice /*- 271e3c308SPedro F. Giffuni * SPDX-License-Identifier: BSD-3-Clause AND BSD-4-Clause 351369649SPedro F. Giffuni * 4ffb56695SRafal Jaworowski * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com> 5ffb56695SRafal Jaworowski * All rights reserved. 6ffb56695SRafal Jaworowski * 7ffb56695SRafal Jaworowski * Adapted for Freescale's e500 core CPUs. 8ffb56695SRafal Jaworowski * 9ffb56695SRafal Jaworowski * Redistribution and use in source and binary forms, with or without 10ffb56695SRafal Jaworowski * modification, are permitted provided that the following conditions 11ffb56695SRafal Jaworowski * are met: 12ffb56695SRafal Jaworowski * 1. Redistributions of source code must retain the above copyright 13ffb56695SRafal Jaworowski * notice, this list of conditions and the following disclaimer. 14ffb56695SRafal Jaworowski * 2. Redistributions in binary form must reproduce the above copyright 15ffb56695SRafal Jaworowski * notice, this list of conditions and the following disclaimer in the 16ffb56695SRafal Jaworowski * documentation and/or other materials provided with the distribution. 17ffb56695SRafal Jaworowski * 3. The name of the author may not be used to endorse or promote products 18ffb56695SRafal Jaworowski * derived from this software without specific prior written permission. 19ffb56695SRafal Jaworowski * 20ffb56695SRafal Jaworowski * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 21ffb56695SRafal Jaworowski * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22ffb56695SRafal Jaworowski * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 23ffb56695SRafal Jaworowski * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24ffb56695SRafal Jaworowski * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 25ffb56695SRafal Jaworowski * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 26ffb56695SRafal Jaworowski * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 27ffb56695SRafal Jaworowski * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 28ffb56695SRafal Jaworowski * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 29ffb56695SRafal Jaworowski * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30ffb56695SRafal Jaworowski * 31ffb56695SRafal Jaworowski * $FreeBSD$ 32ffb56695SRafal Jaworowski */ 33ffb56695SRafal Jaworowski /*- 34f9bac91bSBenno Rice * Copyright (C) 1995, 1996 Wolfgang Solfrank. 35f9bac91bSBenno Rice * Copyright (C) 1995, 1996 TooLs GmbH. 36f9bac91bSBenno Rice * All rights reserved. 37f9bac91bSBenno Rice * 38f9bac91bSBenno Rice * Redistribution and use in source and binary forms, with or without 39f9bac91bSBenno Rice * modification, are permitted provided that the following conditions 40f9bac91bSBenno Rice * are met: 41f9bac91bSBenno Rice * 1. Redistributions of source code must retain the above copyright 42f9bac91bSBenno Rice * notice, this list of conditions and the following disclaimer. 43f9bac91bSBenno Rice * 2. Redistributions in binary form must reproduce the above copyright 44f9bac91bSBenno Rice * notice, this list of conditions and the following disclaimer in the 45f9bac91bSBenno Rice * documentation and/or other materials provided with the distribution. 46f9bac91bSBenno Rice * 3. All advertising materials mentioning features or use of this software 47f9bac91bSBenno Rice * must display the following acknowledgement: 48f9bac91bSBenno Rice * This product includes software developed by TooLs GmbH. 49f9bac91bSBenno Rice * 4. The name of TooLs GmbH may not be used to endorse or promote products 50f9bac91bSBenno Rice * derived from this software without specific prior written permission. 51f9bac91bSBenno Rice * 52f9bac91bSBenno Rice * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 53f9bac91bSBenno Rice * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 54f9bac91bSBenno Rice * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 55f9bac91bSBenno Rice * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 56f9bac91bSBenno Rice * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 57f9bac91bSBenno Rice * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 58f9bac91bSBenno Rice * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 59f9bac91bSBenno Rice * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 60f9bac91bSBenno Rice * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 61f9bac91bSBenno Rice * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 62f9bac91bSBenno Rice * 63ffb56695SRafal Jaworowski * from: $NetBSD: pmap.h,v 1.17 2000/03/30 16:18:24 jdolecek Exp $ 64f9bac91bSBenno Rice */ 65f9bac91bSBenno Rice 66f9bac91bSBenno Rice #ifndef _MACHINE_PMAP_H_ 67f9bac91bSBenno Rice #define _MACHINE_PMAP_H_ 68f9bac91bSBenno Rice 6948d0b1a0SAlan Cox #include <sys/queue.h> 70c3e289e1SNathan Whitehorn #include <sys/tree.h> 71c47dd3dbSAttilio Rao #include <sys/_cpuset.h> 7248d0b1a0SAlan Cox #include <sys/_lock.h> 7348d0b1a0SAlan Cox #include <sys/_mutex.h> 745244eac9SBenno Rice #include <machine/sr.h> 757f89270bSPeter Grehan #include <machine/pte.h> 76c3e289e1SNathan Whitehorn #include <machine/slb.h> 77ffb56695SRafal Jaworowski #include <machine/tlb.h> 7821943937SJeff Roberson #include <machine/vmparam.h> 7965bbba25SJustin Hibbits #ifdef __powerpc64__ 8065bbba25SJustin Hibbits #include <vm/vm_radix.h> 8165bbba25SJustin Hibbits #endif 8265bbba25SJustin Hibbits 8365bbba25SJustin Hibbits /* 8465bbba25SJustin Hibbits * The radix page table structure is described by levels 1-4. 8565bbba25SJustin Hibbits * See Fig 33. on p. 1002 of Power ISA v3.0B 8665bbba25SJustin Hibbits * 8765bbba25SJustin Hibbits * Page directories and tables must be size aligned. 8865bbba25SJustin Hibbits */ 8965bbba25SJustin Hibbits 9065bbba25SJustin Hibbits /* Root page directory - 64k -- each entry covers 512GB */ 9165bbba25SJustin Hibbits typedef uint64_t pml1_entry_t; 9265bbba25SJustin Hibbits /* l2 page directory - 4k -- each entry covers 1GB */ 9365bbba25SJustin Hibbits typedef uint64_t pml2_entry_t; 9465bbba25SJustin Hibbits /* l3 page directory - 4k -- each entry covers 2MB */ 9565bbba25SJustin Hibbits typedef uint64_t pml3_entry_t; 9665bbba25SJustin Hibbits /* l4 page directory - 256B/4k -- each entry covers 64k/4k */ 9765bbba25SJustin Hibbits typedef uint64_t pml4_entry_t; 9865bbba25SJustin Hibbits 9965bbba25SJustin Hibbits typedef uint64_t pt_entry_t; 100ffb56695SRafal Jaworowski 1014026b447SJustin Hibbits struct pmap; 1024026b447SJustin Hibbits typedef struct pmap *pmap_t; 1034026b447SJustin Hibbits 1042a499f92SKonstantin Belousov #define PMAP_ENTER_QUICK_LOCKED 0x10000000 1052a499f92SKonstantin Belousov 1067c277971SPeter Grehan #if !defined(NPMAPS) 1077c277971SPeter Grehan #define NPMAPS 32768 1087c277971SPeter Grehan #endif /* !defined(NPMAPS) */ 1097c277971SPeter Grehan 11095fa3335SNathan Whitehorn struct slbtnode; 111f9bac91bSBenno Rice 1125244eac9SBenno Rice struct pvo_entry { 1135244eac9SBenno Rice LIST_ENTRY(pvo_entry) pvo_vlink; /* Link to common virt page */ 114827cc9b9SNathan Whitehorn #ifndef __powerpc64__ 1155244eac9SBenno Rice LIST_ENTRY(pvo_entry) pvo_olink; /* Link to overflow entry */ 116827cc9b9SNathan Whitehorn #endif 117be010188SJustin Hibbits union { 118ccc4a5c7SNathan Whitehorn RB_ENTRY(pvo_entry) pvo_plink; /* Link to pmap entries */ 119be010188SJustin Hibbits SLIST_ENTRY(pvo_entry) pvo_dlink; /* Link to delete enty */ 120be010188SJustin Hibbits }; 121827cc9b9SNathan Whitehorn struct { 122827cc9b9SNathan Whitehorn #ifndef __powerpc64__ 123827cc9b9SNathan Whitehorn /* 32-bit fields */ 1245d67b612SJustin Hibbits pte_t pte; 125827cc9b9SNathan Whitehorn #endif 126827cc9b9SNathan Whitehorn /* 64-bit fields */ 127827cc9b9SNathan Whitehorn uintptr_t slot; 128827cc9b9SNathan Whitehorn vm_paddr_t pa; 129827cc9b9SNathan Whitehorn vm_prot_t prot; 13052a7870dSNathan Whitehorn } pvo_pte; 1315244eac9SBenno Rice pmap_t pvo_pmap; /* Owning pmap */ 1325244eac9SBenno Rice vm_offset_t pvo_vaddr; /* VA of entry */ 133c3e289e1SNathan Whitehorn uint64_t pvo_vpn; /* Virtual page number */ 1345244eac9SBenno Rice }; 1355244eac9SBenno Rice LIST_HEAD(pvo_head, pvo_entry); 136be010188SJustin Hibbits SLIST_HEAD(pvo_dlist, pvo_entry); 137ccc4a5c7SNathan Whitehorn RB_HEAD(pvo_tree, pvo_entry); 138ccc4a5c7SNathan Whitehorn int pvo_vaddr_compare(struct pvo_entry *, struct pvo_entry *); 139ccc4a5c7SNathan Whitehorn RB_PROTOTYPE(pvo_tree, pvo_entry, pvo_plink, pvo_vaddr_compare); 1405244eac9SBenno Rice 141827cc9b9SNathan Whitehorn /* Used by 32-bit PMAP */ 142bef5da7fSNathan Whitehorn #define PVO_PTEGIDX_MASK 0x007UL /* which PTEG slot */ 143bef5da7fSNathan Whitehorn #define PVO_PTEGIDX_VALID 0x008UL /* slot is valid */ 144827cc9b9SNathan Whitehorn /* Used by 64-bit PMAP */ 145827cc9b9SNathan Whitehorn #define PVO_HID 0x008UL /* PVO entry in alternate hash*/ 146827cc9b9SNathan Whitehorn /* Used by both */ 147bef5da7fSNathan Whitehorn #define PVO_WIRED 0x010UL /* PVO entry is wired */ 148bef5da7fSNathan Whitehorn #define PVO_MANAGED 0x020UL /* PVO entry is managed */ 149bef5da7fSNathan Whitehorn #define PVO_BOOTSTRAP 0x080UL /* PVO entry allocated during 150bef5da7fSNathan Whitehorn bootstrap */ 151827cc9b9SNathan Whitehorn #define PVO_DEAD 0x100UL /* waiting to be deleted */ 152bef5da7fSNathan Whitehorn #define PVO_LARGE 0x200UL /* large page */ 153bef5da7fSNathan Whitehorn #define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF) 154bef5da7fSNathan Whitehorn #define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK) 155bef5da7fSNathan Whitehorn #define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID) 156bef5da7fSNathan Whitehorn #define PVO_PTEGIDX_CLR(pvo) \ 157bef5da7fSNathan Whitehorn ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK))) 158bef5da7fSNathan Whitehorn #define PVO_PTEGIDX_SET(pvo, i) \ 159bef5da7fSNathan Whitehorn ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID)) 160bef5da7fSNathan Whitehorn #define PVO_VSID(pvo) ((pvo)->pvo_vpn >> 16) 161bef5da7fSNathan Whitehorn 162598d99ddSNathan Whitehorn struct pmap { 1634026b447SJustin Hibbits struct pmap_statistics pm_stats; 164598d99ddSNathan Whitehorn struct mtx pm_mtx; 1655d67b612SJustin Hibbits cpuset_t pm_active; 1665d67b612SJustin Hibbits union { 1675d67b612SJustin Hibbits struct { 168598d99ddSNathan Whitehorn #ifdef __powerpc64__ 169598d99ddSNathan Whitehorn struct slbtnode *pm_slb_tree_root; 170598d99ddSNathan Whitehorn struct slb **pm_slb; 171598d99ddSNathan Whitehorn int pm_slb_len; 172598d99ddSNathan Whitehorn #else 173598d99ddSNathan Whitehorn register_t pm_sr[16]; 174598d99ddSNathan Whitehorn #endif 175598d99ddSNathan Whitehorn 176598d99ddSNathan Whitehorn struct pmap *pmap_phys; 177ccc4a5c7SNathan Whitehorn struct pvo_tree pmap_pvo; 178598d99ddSNathan Whitehorn }; 17965bbba25SJustin Hibbits #ifdef __powerpc64__ 18065bbba25SJustin Hibbits /* Radix support */ 18165bbba25SJustin Hibbits struct { 18265bbba25SJustin Hibbits pml1_entry_t *pm_pml1; /* KVA of root page directory */ 18365bbba25SJustin Hibbits struct vm_radix pm_radix; /* spare page table pages */ 18465bbba25SJustin Hibbits TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */ 18565bbba25SJustin Hibbits uint64_t pm_pid; /* PIDR value */ 18665bbba25SJustin Hibbits int pm_flags; 18765bbba25SJustin Hibbits }; 18865bbba25SJustin Hibbits #endif 1895d67b612SJustin Hibbits struct { 1905d67b612SJustin Hibbits /* TID to identify this pmap entries in TLB */ 1915d67b612SJustin Hibbits tlbtid_t pm_tid[MAXCPU]; 1925d67b612SJustin Hibbits 1935d67b612SJustin Hibbits #ifdef __powerpc64__ 1945d67b612SJustin Hibbits /* 1955d67b612SJustin Hibbits * Page table directory, 1965d67b612SJustin Hibbits * array of pointers to page directories. 1975d67b612SJustin Hibbits */ 198dd8775a1SJustin Hibbits pte_t ****pm_root; 1995d67b612SJustin Hibbits #else 2005d67b612SJustin Hibbits /* 2015d67b612SJustin Hibbits * Page table directory, 2025d67b612SJustin Hibbits * array of pointers to page tables. 2035d67b612SJustin Hibbits */ 2043be09f30SJustin Hibbits pte_t **pm_pdir; 2055d67b612SJustin Hibbits 2065d67b612SJustin Hibbits /* List of allocated ptbl bufs (ptbl kva regions). */ 2075d67b612SJustin Hibbits TAILQ_HEAD(, ptbl_buf) pm_ptbl_list; 2085d67b612SJustin Hibbits #endif 2095d67b612SJustin Hibbits }; 21065bbba25SJustin Hibbits } __aligned(CACHE_LINE_SIZE); 2115d67b612SJustin Hibbits }; 21265bbba25SJustin Hibbits 21365bbba25SJustin Hibbits /* 21465bbba25SJustin Hibbits * pv_entries are allocated in chunks per-process. This avoids the 21565bbba25SJustin Hibbits * need to track per-pmap assignments. 21665bbba25SJustin Hibbits */ 21765bbba25SJustin Hibbits #define _NPCM 2 21865bbba25SJustin Hibbits #define _NPCPV 126 21965bbba25SJustin Hibbits #define PV_CHUNK_HEADER \ 22065bbba25SJustin Hibbits pmap_t pc_pmap; \ 22165bbba25SJustin Hibbits TAILQ_ENTRY(pv_chunk) pc_list; \ 22265bbba25SJustin Hibbits uint64_t pc_map[_NPCM]; /* bitmap; 1 = free */ \ 22365bbba25SJustin Hibbits TAILQ_ENTRY(pv_chunk) pc_lru; 2245d67b612SJustin Hibbits 2255d67b612SJustin Hibbits struct pv_entry { 2265d67b612SJustin Hibbits pmap_t pv_pmap; 2275d67b612SJustin Hibbits vm_offset_t pv_va; 2285d67b612SJustin Hibbits TAILQ_ENTRY(pv_entry) pv_link; 2295d67b612SJustin Hibbits }; 2305d67b612SJustin Hibbits typedef struct pv_entry *pv_entry_t; 231598d99ddSNathan Whitehorn 23265bbba25SJustin Hibbits struct pv_chunk_header { 23365bbba25SJustin Hibbits PV_CHUNK_HEADER 23465bbba25SJustin Hibbits }; 23565bbba25SJustin Hibbits struct pv_chunk { 23665bbba25SJustin Hibbits PV_CHUNK_HEADER 23765bbba25SJustin Hibbits uint64_t reserved; 23865bbba25SJustin Hibbits struct pv_entry pc_pventry[_NPCPV]; 23965bbba25SJustin Hibbits }; 24065bbba25SJustin Hibbits 241038c6159SJustin Hibbits struct md_page { 2425d67b612SJustin Hibbits union { 2435d67b612SJustin Hibbits struct { 244038c6159SJustin Hibbits volatile int32_t mdpg_attrs; 245038c6159SJustin Hibbits vm_memattr_t mdpg_cache_attrs; 246038c6159SJustin Hibbits struct pvo_head mdpg_pvoh; 24765bbba25SJustin Hibbits int pv_gen; /* (p) */ 248038c6159SJustin Hibbits }; 2495d67b612SJustin Hibbits struct { 2505d67b612SJustin Hibbits int pv_tracked; 2515d67b612SJustin Hibbits }; 2525d67b612SJustin Hibbits }; 25365bbba25SJustin Hibbits TAILQ_HEAD(, pv_entry) pv_list; /* (p) */ 2545d67b612SJustin Hibbits }; 255038c6159SJustin Hibbits 2565d67b612SJustin Hibbits #ifdef AIM 257c1f4123bSNathan Whitehorn #define pmap_page_get_memattr(m) ((m)->md.mdpg_cache_attrs) 2585d67b612SJustin Hibbits #else 2595d67b612SJustin Hibbits #define pmap_page_get_memattr(m) VM_MEMATTR_DEFAULT 26065bbba25SJustin Hibbits #endif /* AIM */ 261ffb56695SRafal Jaworowski 262c3e289e1SNathan Whitehorn /* 263c3e289e1SNathan Whitehorn * Return the VSID corresponding to a given virtual address. 264c3e289e1SNathan Whitehorn * If no VSID is currently defined, it will allocate one, and add 265c3e289e1SNathan Whitehorn * it to a free slot if available. 266c3e289e1SNathan Whitehorn * 267c3e289e1SNathan Whitehorn * NB: The PMAP MUST be locked already. 268c3e289e1SNathan Whitehorn */ 269c3e289e1SNathan Whitehorn uint64_t va_to_vsid(pmap_t pm, vm_offset_t va); 27095fa3335SNathan Whitehorn 27195fa3335SNathan Whitehorn /* Lock-free, non-allocating lookup routines */ 27295fa3335SNathan Whitehorn uint64_t kernel_va_to_slbv(vm_offset_t va); 27395fa3335SNathan Whitehorn struct slb *user_va_to_slb_entry(pmap_t pm, vm_offset_t va); 274c3e289e1SNathan Whitehorn 2756416b9a8SNathan Whitehorn uint64_t allocate_user_vsid(pmap_t pm, uint64_t esid, int large); 27695fa3335SNathan Whitehorn void free_vsid(pmap_t pm, uint64_t esid, int large); 2776416b9a8SNathan Whitehorn void slb_insert_user(pmap_t pm, struct slb *slb); 2786416b9a8SNathan Whitehorn void slb_insert_kernel(uint64_t slbe, uint64_t slbv); 27995fa3335SNathan Whitehorn 28095fa3335SNathan Whitehorn struct slbtnode *slb_alloc_tree(void); 28195fa3335SNathan Whitehorn void slb_free_tree(pmap_t pm); 2826416b9a8SNathan Whitehorn struct slb **slb_alloc_user_cache(void); 2836416b9a8SNathan Whitehorn void slb_free_user_cache(struct slb **); 284c3e289e1SNathan Whitehorn 2855244eac9SBenno Rice extern struct pmap kernel_pmap_store; 2865244eac9SBenno Rice #define kernel_pmap (&kernel_pmap_store) 2875244eac9SBenno Rice 288f9bac91bSBenno Rice #ifdef _KERNEL 289f9bac91bSBenno Rice 29048d0b1a0SAlan Cox #define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx) 29148d0b1a0SAlan Cox #define PMAP_LOCK_ASSERT(pmap, type) \ 29248d0b1a0SAlan Cox mtx_assert(&(pmap)->pm_mtx, (type)) 29348d0b1a0SAlan Cox #define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx) 294629e40e4SNathan Whitehorn #define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, \ 295629e40e4SNathan Whitehorn (pmap == kernel_pmap) ? "kernelpmap" : \ 29665bbba25SJustin Hibbits "pmap", NULL, MTX_DEF | MTX_DUPOK) 29748d0b1a0SAlan Cox #define PMAP_LOCKED(pmap) mtx_owned(&(pmap)->pm_mtx) 29848d0b1a0SAlan Cox #define PMAP_MTX(pmap) (&(pmap)->pm_mtx) 29948d0b1a0SAlan Cox #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx) 30048d0b1a0SAlan Cox #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx) 30148d0b1a0SAlan Cox 3025cff1f4dSMark Johnston #define pmap_page_is_write_mapped(m) (((m)->a.flags & PGA_WRITEABLE) != 0) 3036031c68dSAlan Cox 304041b7317SKonstantin Belousov #define pmap_vm_page_alloc_check(m) 305041b7317SKonstantin Belousov 3065244eac9SBenno Rice void pmap_bootstrap(vm_offset_t, vm_offset_t); 30720b79612SRafal Jaworowski void pmap_kenter(vm_offset_t va, vm_paddr_t pa); 308611aec25SJustin Hibbits void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t); 3095501d40bSJake Burkholder void pmap_kremove(vm_offset_t); 31020b79612SRafal Jaworowski void *pmap_mapdev(vm_paddr_t, vm_size_t); 3112109efd1SJustin Hibbits void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t); 3128bbfa33aSBenno Rice void pmap_unmapdev(vm_offset_t, vm_size_t); 313c1f4123bSNathan Whitehorn void pmap_page_set_memattr(vm_page_t, vm_memattr_t); 3140f7aeab0SJustin Hibbits int pmap_change_attr(vm_offset_t, vm_size_t, vm_memattr_t); 31504329fa7SNathan Whitehorn int pmap_map_user_ptr(pmap_t pm, volatile const void *uaddr, 31604329fa7SNathan Whitehorn void **kaddr, size_t ulen, size_t *klen); 317eb1baf72SNathan Whitehorn int pmap_decode_kernel_ptr(vm_offset_t addr, int *is_user, 318eb1baf72SNathan Whitehorn vm_offset_t *decoded_addr); 319ac6ba8bdSBenno Rice void pmap_deactivate(struct thread *); 32020b79612SRafal Jaworowski vm_paddr_t pmap_kextract(vm_offset_t); 32120b79612SRafal Jaworowski int pmap_dev_direct_mapped(vm_paddr_t, vm_size_t); 322f9c702dbSPeter Grehan boolean_t pmap_mmu_install(char *name, int prio); 32345b69dd6SJustin Hibbits void pmap_mmu_init(void); 3240ecc478bSLeandro Lupori const char *pmap_mmu_name(void); 32565bbba25SJustin Hibbits bool pmap_ps_enabled(pmap_t pmap); 32665bbba25SJustin Hibbits int pmap_nofault(pmap_t pmap, vm_offset_t va, vm_prot_t flags); 32765bbba25SJustin Hibbits boolean_t pmap_page_is_mapped(vm_page_t m); 328f9c702dbSPeter Grehan 329caef3e12SJustin Hibbits void pmap_page_array_startup(long count); 330caef3e12SJustin Hibbits 331696effb6SJohn Baldwin #define vtophys(va) pmap_kextract((vm_offset_t)(va)) 332a0889814SBenno Rice 333f9bac91bSBenno Rice extern vm_offset_t virtual_avail; 334f9bac91bSBenno Rice extern vm_offset_t virtual_end; 3350ecc478bSLeandro Lupori extern caddr_t crashdumpmap; 336f9bac91bSBenno Rice 3375244eac9SBenno Rice extern vm_offset_t msgbuf_phys; 338f9bac91bSBenno Rice 339f9c702dbSPeter Grehan extern int pmap_bootstrapped; 34065bbba25SJustin Hibbits extern int radix_mmu; 3415b58b1aaSLeandro Lupori extern int superpages_enabled; 342f9c702dbSPeter Grehan 343*76384bd1SLeandro Lupori #ifdef AIM 344*76384bd1SLeandro Lupori void pmap_early_io_map_init(void); 345*76384bd1SLeandro Lupori #endif 34633724f17SNathan Whitehorn vm_offset_t pmap_early_io_map(vm_paddr_t pa, vm_size_t size); 34760152a40SJustin Hibbits void pmap_early_io_unmap(vm_offset_t va, vm_size_t size); 348b2f831c0SJustin Hibbits void pmap_track_page(pmap_t pmap, vm_offset_t va); 34965bbba25SJustin Hibbits void pmap_page_print_mappings(vm_page_t m); 350b923b34aSJustin Hibbits void pmap_tlbie_all(void); 35133724f17SNathan Whitehorn 352e7a9df16SKonstantin Belousov static inline int 353e7a9df16SKonstantin Belousov pmap_vmspace_copy(pmap_t dst_pmap __unused, pmap_t src_pmap __unused) 354e7a9df16SKonstantin Belousov { 355e7a9df16SKonstantin Belousov 356e7a9df16SKonstantin Belousov return (0); 357e7a9df16SKonstantin Belousov } 358e7a9df16SKonstantin Belousov 359f9bac91bSBenno Rice #endif 360f9bac91bSBenno Rice 3615244eac9SBenno Rice #endif /* !_MACHINE_PMAP_H_ */ 362