1f9bac91bSBenno Rice /*- 2ffb56695SRafal Jaworowski * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com> 3ffb56695SRafal Jaworowski * All rights reserved. 4ffb56695SRafal Jaworowski * 5ffb56695SRafal Jaworowski * Adapted for Freescale's e500 core CPUs. 6ffb56695SRafal Jaworowski * 7ffb56695SRafal Jaworowski * Redistribution and use in source and binary forms, with or without 8ffb56695SRafal Jaworowski * modification, are permitted provided that the following conditions 9ffb56695SRafal Jaworowski * are met: 10ffb56695SRafal Jaworowski * 1. Redistributions of source code must retain the above copyright 11ffb56695SRafal Jaworowski * notice, this list of conditions and the following disclaimer. 12ffb56695SRafal Jaworowski * 2. Redistributions in binary form must reproduce the above copyright 13ffb56695SRafal Jaworowski * notice, this list of conditions and the following disclaimer in the 14ffb56695SRafal Jaworowski * documentation and/or other materials provided with the distribution. 15ffb56695SRafal Jaworowski * 3. The name of the author may not be used to endorse or promote products 16ffb56695SRafal Jaworowski * derived from this software without specific prior written permission. 17ffb56695SRafal Jaworowski * 18ffb56695SRafal Jaworowski * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19ffb56695SRafal Jaworowski * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20ffb56695SRafal Jaworowski * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 21ffb56695SRafal Jaworowski * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22ffb56695SRafal Jaworowski * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 23ffb56695SRafal Jaworowski * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 24ffb56695SRafal Jaworowski * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 25ffb56695SRafal Jaworowski * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 26ffb56695SRafal Jaworowski * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 27ffb56695SRafal Jaworowski * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28ffb56695SRafal Jaworowski * 29ffb56695SRafal Jaworowski * $FreeBSD$ 30ffb56695SRafal Jaworowski */ 31ffb56695SRafal Jaworowski /*- 32f9bac91bSBenno Rice * Copyright (C) 1995, 1996 Wolfgang Solfrank. 33f9bac91bSBenno Rice * Copyright (C) 1995, 1996 TooLs GmbH. 34f9bac91bSBenno Rice * All rights reserved. 35f9bac91bSBenno Rice * 36f9bac91bSBenno Rice * Redistribution and use in source and binary forms, with or without 37f9bac91bSBenno Rice * modification, are permitted provided that the following conditions 38f9bac91bSBenno Rice * are met: 39f9bac91bSBenno Rice * 1. Redistributions of source code must retain the above copyright 40f9bac91bSBenno Rice * notice, this list of conditions and the following disclaimer. 41f9bac91bSBenno Rice * 2. Redistributions in binary form must reproduce the above copyright 42f9bac91bSBenno Rice * notice, this list of conditions and the following disclaimer in the 43f9bac91bSBenno Rice * documentation and/or other materials provided with the distribution. 44f9bac91bSBenno Rice * 3. All advertising materials mentioning features or use of this software 45f9bac91bSBenno Rice * must display the following acknowledgement: 46f9bac91bSBenno Rice * This product includes software developed by TooLs GmbH. 47f9bac91bSBenno Rice * 4. The name of TooLs GmbH may not be used to endorse or promote products 48f9bac91bSBenno Rice * derived from this software without specific prior written permission. 49f9bac91bSBenno Rice * 50f9bac91bSBenno Rice * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 51f9bac91bSBenno Rice * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 52f9bac91bSBenno Rice * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 53f9bac91bSBenno Rice * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 54f9bac91bSBenno Rice * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 55f9bac91bSBenno Rice * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 56f9bac91bSBenno Rice * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 57f9bac91bSBenno Rice * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 58f9bac91bSBenno Rice * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 59f9bac91bSBenno Rice * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60f9bac91bSBenno Rice * 61ffb56695SRafal Jaworowski * from: $NetBSD: pmap.h,v 1.17 2000/03/30 16:18:24 jdolecek Exp $ 62f9bac91bSBenno Rice */ 63f9bac91bSBenno Rice 64f9bac91bSBenno Rice #ifndef _MACHINE_PMAP_H_ 65f9bac91bSBenno Rice #define _MACHINE_PMAP_H_ 66f9bac91bSBenno Rice 6748d0b1a0SAlan Cox #include <sys/queue.h> 68c3e289e1SNathan Whitehorn #include <sys/tree.h> 6948d0b1a0SAlan Cox #include <sys/_lock.h> 7048d0b1a0SAlan Cox #include <sys/_mutex.h> 715244eac9SBenno Rice #include <machine/sr.h> 727f89270bSPeter Grehan #include <machine/pte.h> 73c3e289e1SNathan Whitehorn #include <machine/slb.h> 74ffb56695SRafal Jaworowski #include <machine/tlb.h> 75ffb56695SRafal Jaworowski 7627457a80SMarcel Moolenaar struct pmap_md { 7727457a80SMarcel Moolenaar u_int md_index; 7827457a80SMarcel Moolenaar vm_paddr_t md_paddr; 7927457a80SMarcel Moolenaar vm_offset_t md_vaddr; 8027457a80SMarcel Moolenaar vm_size_t md_size; 8127457a80SMarcel Moolenaar }; 8227457a80SMarcel Moolenaar 83ffb56695SRafal Jaworowski #if defined(AIM) 84f9bac91bSBenno Rice 857c277971SPeter Grehan #if !defined(NPMAPS) 867c277971SPeter Grehan #define NPMAPS 32768 877c277971SPeter Grehan #endif /* !defined(NPMAPS) */ 887c277971SPeter Grehan 8995fa3335SNathan Whitehorn struct slbtnode; 90c3e289e1SNathan Whitehorn 91f9bac91bSBenno Rice struct pmap { 9248d0b1a0SAlan Cox struct mtx pm_mtx; 93c3e289e1SNathan Whitehorn 94c3e289e1SNathan Whitehorn #ifdef __powerpc64__ 9595fa3335SNathan Whitehorn struct slbtnode *pm_slb_tree_root; 96*6416b9a8SNathan Whitehorn struct slb **pm_slb; 97*6416b9a8SNathan Whitehorn int pm_slb_len; 98c3e289e1SNathan Whitehorn #else 99c3e289e1SNathan Whitehorn register_t pm_sr[16]; 100c3e289e1SNathan Whitehorn #endif 10160c7b36bSJohn Baldwin cpumask_t pm_active; 1022965a453SKip Macy uint32_t pm_gen_count; /* generation count (pmap lock dropped) */ 1032965a453SKip Macy u_int pm_retries; 10452a7870dSNathan Whitehorn 10552a7870dSNathan Whitehorn struct pmap *pmap_phys; 1065244eac9SBenno Rice struct pmap_statistics pm_stats; 107f9bac91bSBenno Rice }; 108f9bac91bSBenno Rice 109f9bac91bSBenno Rice typedef struct pmap *pmap_t; 110f9bac91bSBenno Rice 1115244eac9SBenno Rice struct pvo_entry { 1125244eac9SBenno Rice LIST_ENTRY(pvo_entry) pvo_vlink; /* Link to common virt page */ 1135244eac9SBenno Rice LIST_ENTRY(pvo_entry) pvo_olink; /* Link to overflow entry */ 11452a7870dSNathan Whitehorn union { 11552a7870dSNathan Whitehorn struct pte pte; /* 32 bit PTE */ 11652a7870dSNathan Whitehorn struct lpte lpte; /* 64 bit PTE */ 11752a7870dSNathan Whitehorn } pvo_pte; 1185244eac9SBenno Rice pmap_t pvo_pmap; /* Owning pmap */ 1195244eac9SBenno Rice vm_offset_t pvo_vaddr; /* VA of entry */ 120c3e289e1SNathan Whitehorn uint64_t pvo_vpn; /* Virtual page number */ 1215244eac9SBenno Rice }; 1225244eac9SBenno Rice LIST_HEAD(pvo_head, pvo_entry); 1235244eac9SBenno Rice 1245244eac9SBenno Rice struct md_page { 12552a7870dSNathan Whitehorn u_int64_t mdpg_attrs; 1265244eac9SBenno Rice struct pvo_head mdpg_pvoh; 1275244eac9SBenno Rice }; 1285244eac9SBenno Rice 1293153e878SAlan Cox #define pmap_page_get_memattr(m) VM_MEMATTR_DEFAULT 130ffb56695SRafal Jaworowski #define pmap_page_is_mapped(m) (!LIST_EMPTY(&(m)->md.mdpg_pvoh)) 1313153e878SAlan Cox #define pmap_page_set_memattr(m, ma) (void)0 132ffb56695SRafal Jaworowski 133c3e289e1SNathan Whitehorn /* 134c3e289e1SNathan Whitehorn * Return the VSID corresponding to a given virtual address. 135c3e289e1SNathan Whitehorn * If no VSID is currently defined, it will allocate one, and add 136c3e289e1SNathan Whitehorn * it to a free slot if available. 137c3e289e1SNathan Whitehorn * 138c3e289e1SNathan Whitehorn * NB: The PMAP MUST be locked already. 139c3e289e1SNathan Whitehorn */ 140c3e289e1SNathan Whitehorn uint64_t va_to_vsid(pmap_t pm, vm_offset_t va); 14195fa3335SNathan Whitehorn 14295fa3335SNathan Whitehorn /* Lock-free, non-allocating lookup routines */ 14395fa3335SNathan Whitehorn uint64_t kernel_va_to_slbv(vm_offset_t va); 14495fa3335SNathan Whitehorn struct slb *user_va_to_slb_entry(pmap_t pm, vm_offset_t va); 145c3e289e1SNathan Whitehorn 146*6416b9a8SNathan Whitehorn uint64_t allocate_user_vsid(pmap_t pm, uint64_t esid, int large); 14795fa3335SNathan Whitehorn void free_vsid(pmap_t pm, uint64_t esid, int large); 148*6416b9a8SNathan Whitehorn void slb_insert_user(pmap_t pm, struct slb *slb); 149*6416b9a8SNathan Whitehorn void slb_insert_kernel(uint64_t slbe, uint64_t slbv); 15095fa3335SNathan Whitehorn 15195fa3335SNathan Whitehorn struct slbtnode *slb_alloc_tree(void); 15295fa3335SNathan Whitehorn void slb_free_tree(pmap_t pm); 153*6416b9a8SNathan Whitehorn struct slb **slb_alloc_user_cache(void); 154*6416b9a8SNathan Whitehorn void slb_free_user_cache(struct slb **); 155c3e289e1SNathan Whitehorn 156ffb56695SRafal Jaworowski #else 157ffb56695SRafal Jaworowski 158ffb56695SRafal Jaworowski struct pmap { 159ffb56695SRafal Jaworowski struct mtx pm_mtx; /* pmap mutex */ 160b2b734e7SRafal Jaworowski tlbtid_t pm_tid[MAXCPU]; /* TID to identify this pmap entries in TLB */ 16160c7b36bSJohn Baldwin cpumask_t pm_active; /* active on cpus */ 162ffb56695SRafal Jaworowski int pm_refs; /* ref count */ 163ffb56695SRafal Jaworowski struct pmap_statistics pm_stats; /* pmap statistics */ 164ffb56695SRafal Jaworowski 165ffb56695SRafal Jaworowski /* Page table directory, array of pointers to page tables. */ 166ffb56695SRafal Jaworowski pte_t *pm_pdir[PDIR_NENTRIES]; 167ffb56695SRafal Jaworowski 168c3e289e1SNathan Whitehorn /* generation count (pmap lock dropped) */ 169c3e289e1SNathan Whitehorn uint32_t pm_gen_count; 170c3e289e1SNathan Whitehorn u_int pm_retries; 171c3e289e1SNathan Whitehorn 172ffb56695SRafal Jaworowski /* List of allocated ptbl bufs (ptbl kva regions). */ 173b2b734e7SRafal Jaworowski TAILQ_HEAD(, ptbl_buf) pm_ptbl_list; 174ffb56695SRafal Jaworowski }; 175ffb56695SRafal Jaworowski typedef struct pmap *pmap_t; 176ffb56695SRafal Jaworowski 177ffb56695SRafal Jaworowski struct pv_entry { 178ffb56695SRafal Jaworowski pmap_t pv_pmap; 179ffb56695SRafal Jaworowski vm_offset_t pv_va; 180ffb56695SRafal Jaworowski TAILQ_ENTRY(pv_entry) pv_link; 181ffb56695SRafal Jaworowski }; 182ffb56695SRafal Jaworowski typedef struct pv_entry *pv_entry_t; 183ffb56695SRafal Jaworowski 184ffb56695SRafal Jaworowski struct md_page { 185ffb56695SRafal Jaworowski TAILQ_HEAD(, pv_entry) pv_list; 186ffb56695SRafal Jaworowski }; 187ffb56695SRafal Jaworowski 1883153e878SAlan Cox #define pmap_page_get_memattr(m) VM_MEMATTR_DEFAULT 189ffb56695SRafal Jaworowski #define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list)) 1903153e878SAlan Cox #define pmap_page_set_memattr(m, ma) (void)0 191ffb56695SRafal Jaworowski 192ffb56695SRafal Jaworowski #endif /* AIM */ 193ffb56695SRafal Jaworowski 1945244eac9SBenno Rice extern struct pmap kernel_pmap_store; 1955244eac9SBenno Rice #define kernel_pmap (&kernel_pmap_store) 1965244eac9SBenno Rice 197f9bac91bSBenno Rice #ifdef _KERNEL 198f9bac91bSBenno Rice 19948d0b1a0SAlan Cox #define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx) 20048d0b1a0SAlan Cox #define PMAP_LOCK_ASSERT(pmap, type) \ 20148d0b1a0SAlan Cox mtx_assert(&(pmap)->pm_mtx, (type)) 20248d0b1a0SAlan Cox #define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx) 20348d0b1a0SAlan Cox #define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \ 20448d0b1a0SAlan Cox NULL, MTX_DEF) 20548d0b1a0SAlan Cox #define PMAP_LOCKED(pmap) mtx_owned(&(pmap)->pm_mtx) 20648d0b1a0SAlan Cox #define PMAP_MTX(pmap) (&(pmap)->pm_mtx) 20748d0b1a0SAlan Cox #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx) 20848d0b1a0SAlan Cox #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx) 20948d0b1a0SAlan Cox 2105244eac9SBenno Rice void pmap_bootstrap(vm_offset_t, vm_offset_t); 2115501d40bSJake Burkholder void pmap_kenter(vm_offset_t va, vm_offset_t pa); 2125501d40bSJake Burkholder void pmap_kremove(vm_offset_t); 2138bbfa33aSBenno Rice void *pmap_mapdev(vm_offset_t, vm_size_t); 2148bbfa33aSBenno Rice void pmap_unmapdev(vm_offset_t, vm_size_t); 215ac6ba8bdSBenno Rice void pmap_deactivate(struct thread *); 2165244eac9SBenno Rice vm_offset_t pmap_kextract(vm_offset_t); 217c0763d37SSuleiman Souhlal int pmap_dev_direct_mapped(vm_offset_t, vm_size_t); 218f9c702dbSPeter Grehan boolean_t pmap_mmu_install(char *name, int prio); 219f9c702dbSPeter Grehan 220696effb6SJohn Baldwin #define vtophys(va) pmap_kextract((vm_offset_t)(va)) 221a0889814SBenno Rice 222f9c702dbSPeter Grehan #define PHYS_AVAIL_SZ 128 223f9c702dbSPeter Grehan extern vm_offset_t phys_avail[PHYS_AVAIL_SZ]; 224f9bac91bSBenno Rice extern vm_offset_t virtual_avail; 225f9bac91bSBenno Rice extern vm_offset_t virtual_end; 226f9bac91bSBenno Rice 2275244eac9SBenno Rice extern vm_offset_t msgbuf_phys; 228f9bac91bSBenno Rice 229f9c702dbSPeter Grehan extern int pmap_bootstrapped; 230f9c702dbSPeter Grehan 23127457a80SMarcel Moolenaar extern vm_offset_t pmap_dumpsys_map(struct pmap_md *, vm_size_t, vm_size_t *); 23227457a80SMarcel Moolenaar extern void pmap_dumpsys_unmap(struct pmap_md *, vm_size_t, vm_offset_t); 23327457a80SMarcel Moolenaar 23427457a80SMarcel Moolenaar extern struct pmap_md *pmap_scan_md(struct pmap_md *); 23527457a80SMarcel Moolenaar 236f9bac91bSBenno Rice #endif 237f9bac91bSBenno Rice 2385244eac9SBenno Rice #endif /* !_MACHINE_PMAP_H_ */ 239