18d7e7a98SRuslan Bukin /*-
28d7e7a98SRuslan Bukin * Copyright (c) 1991 Regents of the University of California.
38d7e7a98SRuslan Bukin * All rights reserved.
48d7e7a98SRuslan Bukin *
58d7e7a98SRuslan Bukin * This code is derived from software contributed to Berkeley by
68d7e7a98SRuslan Bukin * the Systems Programming Group of the University of Utah Computer
78d7e7a98SRuslan Bukin * Science Department and William Jolitz of UUNET Technologies Inc.
88d7e7a98SRuslan Bukin *
98d7e7a98SRuslan Bukin * Redistribution and use in source and binary forms, with or without
108d7e7a98SRuslan Bukin * modification, are permitted provided that the following conditions
118d7e7a98SRuslan Bukin * are met:
128d7e7a98SRuslan Bukin * 1. Redistributions of source code must retain the above copyright
138d7e7a98SRuslan Bukin * notice, this list of conditions and the following disclaimer.
148d7e7a98SRuslan Bukin * 2. Redistributions in binary form must reproduce the above copyright
158d7e7a98SRuslan Bukin * notice, this list of conditions and the following disclaimer in the
168d7e7a98SRuslan Bukin * documentation and/or other materials provided with the distribution.
178d7e7a98SRuslan Bukin * 3. Neither the name of the University nor the names of its contributors
188d7e7a98SRuslan Bukin * may be used to endorse or promote products derived from this software
198d7e7a98SRuslan Bukin * without specific prior written permission.
208d7e7a98SRuslan Bukin *
218d7e7a98SRuslan Bukin * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
228d7e7a98SRuslan Bukin * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
238d7e7a98SRuslan Bukin * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
248d7e7a98SRuslan Bukin * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
258d7e7a98SRuslan Bukin * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
268d7e7a98SRuslan Bukin * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
278d7e7a98SRuslan Bukin * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
288d7e7a98SRuslan Bukin * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
298d7e7a98SRuslan Bukin * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
308d7e7a98SRuslan Bukin * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
318d7e7a98SRuslan Bukin * SUCH DAMAGE.
328d7e7a98SRuslan Bukin */
338d7e7a98SRuslan Bukin
348d7e7a98SRuslan Bukin #ifndef _MACHINE_PMAP_H_
358d7e7a98SRuslan Bukin #define _MACHINE_PMAP_H_
368d7e7a98SRuslan Bukin
378d7e7a98SRuslan Bukin #include <machine/pte.h>
388d7e7a98SRuslan Bukin
398d7e7a98SRuslan Bukin #ifndef LOCORE
408d7e7a98SRuslan Bukin
418d7e7a98SRuslan Bukin #include <sys/queue.h>
4235c91b0cSMark Johnston #include <sys/_cpuset.h>
438d7e7a98SRuslan Bukin #include <sys/_lock.h>
448d7e7a98SRuslan Bukin #include <sys/_mutex.h>
454d90a5afSJohn Baldwin #include <sys/_pv_entry.h>
468d7e7a98SRuslan Bukin
47f6893f09SMark Johnston #include <vm/_vm_radix.h>
48f6893f09SMark Johnston
498d7e7a98SRuslan Bukin #ifdef _KERNEL
508d7e7a98SRuslan Bukin
518d7e7a98SRuslan Bukin #define vtophys(va) pmap_kextract((vm_offset_t)(va))
528d7e7a98SRuslan Bukin
538d7e7a98SRuslan Bukin #endif
548d7e7a98SRuslan Bukin
558d7e7a98SRuslan Bukin #define pmap_page_get_memattr(m) ((m)->md.pv_memattr)
565cff1f4dSMark Johnston #define pmap_page_is_write_mapped(m) (((m)->a.flags & PGA_WRITEABLE) != 0)
578d7e7a98SRuslan Bukin void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma);
581e0e335bSKonstantin Belousov #define pmap_map_delete(pmap, sva, eva) pmap_remove(pmap, sva, eva)
598d7e7a98SRuslan Bukin
608d7e7a98SRuslan Bukin /*
618d7e7a98SRuslan Bukin * Pmap stuff
628d7e7a98SRuslan Bukin */
638d7e7a98SRuslan Bukin
648d7e7a98SRuslan Bukin struct md_page {
658d7e7a98SRuslan Bukin TAILQ_HEAD(,pv_entry) pv_list;
668d7e7a98SRuslan Bukin int pv_gen;
678d7e7a98SRuslan Bukin vm_memattr_t pv_memattr;
688d7e7a98SRuslan Bukin };
698d7e7a98SRuslan Bukin
7003b330e1SRuslan Bukin enum pmap_stage {
7103b330e1SRuslan Bukin PM_INVALID,
7203b330e1SRuslan Bukin PM_STAGE1,
7303b330e1SRuslan Bukin PM_STAGE2,
7403b330e1SRuslan Bukin };
7503b330e1SRuslan Bukin
768d7e7a98SRuslan Bukin struct pmap {
778d7e7a98SRuslan Bukin struct mtx pm_mtx;
788d7e7a98SRuslan Bukin struct pmap_statistics pm_stats; /* pmap statictics */
7982f4e0d0SMark Johnston pd_entry_t *pm_top; /* top-level page table page */
8035c91b0cSMark Johnston u_long pm_satp; /* value for SATP register */
8135c91b0cSMark Johnston cpuset_t pm_active; /* active on cpus */
828d7e7a98SRuslan Bukin TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */
839af94226SRuslan Bukin LIST_ENTRY(pmap) pm_list; /* List of all pmaps */
84f6893f09SMark Johnston struct vm_radix pm_root;
8503b330e1SRuslan Bukin enum pmap_stage pm_stage;
868d7e7a98SRuslan Bukin };
878d7e7a98SRuslan Bukin
888d7e7a98SRuslan Bukin typedef struct pmap *pmap_t;
898d7e7a98SRuslan Bukin
908d7e7a98SRuslan Bukin #ifdef _KERNEL
918d7e7a98SRuslan Bukin extern struct pmap kernel_pmap_store;
928d7e7a98SRuslan Bukin #define kernel_pmap (&kernel_pmap_store)
938d7e7a98SRuslan Bukin #define pmap_kernel() kernel_pmap
948d7e7a98SRuslan Bukin
958d7e7a98SRuslan Bukin #define PMAP_ASSERT_LOCKED(pmap) \
968d7e7a98SRuslan Bukin mtx_assert(&(pmap)->pm_mtx, MA_OWNED)
978d7e7a98SRuslan Bukin #define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx)
988d7e7a98SRuslan Bukin #define PMAP_LOCK_ASSERT(pmap, type) \
998d7e7a98SRuslan Bukin mtx_assert(&(pmap)->pm_mtx, (type))
1008d7e7a98SRuslan Bukin #define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx)
1018d7e7a98SRuslan Bukin #define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \
1028d7e7a98SRuslan Bukin NULL, MTX_DEF | MTX_DUPOK)
1038d7e7a98SRuslan Bukin #define PMAP_OWNED(pmap) mtx_owned(&(pmap)->pm_mtx)
1048d7e7a98SRuslan Bukin #define PMAP_MTX(pmap) (&(pmap)->pm_mtx)
1058d7e7a98SRuslan Bukin #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx)
1068d7e7a98SRuslan Bukin #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx)
1078d7e7a98SRuslan Bukin
1088d7e7a98SRuslan Bukin extern vm_offset_t virtual_avail;
1098d7e7a98SRuslan Bukin extern vm_offset_t virtual_end;
1108d7e7a98SRuslan Bukin
1118d7e7a98SRuslan Bukin /*
1128d7e7a98SRuslan Bukin * Macros to test if a mapping is mappable with an L1 Section mapping
1138d7e7a98SRuslan Bukin * or an L2 Large Page mapping.
1148d7e7a98SRuslan Bukin */
1158d7e7a98SRuslan Bukin #define L1_MAPPABLE_P(va, pa, size) \
1168d7e7a98SRuslan Bukin ((((va) | (pa)) & L1_OFFSET) == 0 && (size) >= L1_SIZE)
1178d7e7a98SRuslan Bukin
11859f192c5SMark Johnston enum pmap_mode {
11959f192c5SMark Johnston PMAP_MODE_SV39,
12059f192c5SMark Johnston PMAP_MODE_SV48,
12159f192c5SMark Johnston };
12259f192c5SMark Johnston
12359f192c5SMark Johnston extern enum pmap_mode pmap_mode;
12459f192c5SMark Johnston
125828ea49dSMark Johnston /* Check if an address resides in a mappable region. */
126828ea49dSMark Johnston #define VIRT_IS_VALID(va) \
127828ea49dSMark Johnston ((va) < (pmap_mode == PMAP_MODE_SV39 ? VM_MAX_USER_ADDRESS_SV39 : \
128828ea49dSMark Johnston VM_MAX_USER_ADDRESS_SV48) || (va) >= VM_MIN_KERNEL_ADDRESS)
129828ea49dSMark Johnston
13035c91b0cSMark Johnston struct thread;
13135c91b0cSMark Johnston
132041b7317SKonstantin Belousov #define pmap_vm_page_alloc_check(m)
133041b7317SKonstantin Belousov
13435c91b0cSMark Johnston void pmap_activate_boot(pmap_t);
13535c91b0cSMark Johnston void pmap_activate_sw(struct thread *);
136*18051cc6SMitchell Horne void pmap_bootstrap(vm_paddr_t, vm_size_t);
1371be2e16dSJessica Clarke int pmap_change_attr(vm_offset_t va, vm_size_t size, int mode);
138682c00a6SJessica Clarke void pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode);
1398d7e7a98SRuslan Bukin void pmap_kenter_device(vm_offset_t, vm_size_t, vm_paddr_t);
1408d7e7a98SRuslan Bukin vm_paddr_t pmap_kextract(vm_offset_t va);
1418d7e7a98SRuslan Bukin void pmap_kremove(vm_offset_t);
1428d7e7a98SRuslan Bukin void pmap_kremove_device(vm_offset_t, vm_size_t);
143ea8f128cSJohn Baldwin void *pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t ma);
14403b330e1SRuslan Bukin int pmap_pinit_stage(pmap_t, enum pmap_stage);
145d4586dd3SMark Johnston bool pmap_page_is_mapped(vm_page_t m);
146f6893f09SMark Johnston bool pmap_ps_enabled(pmap_t);
1478d7e7a98SRuslan Bukin
148ea8f128cSJohn Baldwin void *pmap_mapdev(vm_paddr_t, vm_size_t);
1498d7e7a98SRuslan Bukin void *pmap_mapbios(vm_paddr_t, vm_size_t);
1507ae99f80SJohn Baldwin void pmap_unmapdev(void *, vm_size_t);
1517ae99f80SJohn Baldwin void pmap_unmapbios(void *, vm_size_t);
1528d7e7a98SRuslan Bukin
1534961faaaSJohn Baldwin bool pmap_map_io_transient(vm_page_t *, vm_offset_t *, int, bool);
1544961faaaSJohn Baldwin void pmap_unmap_io_transient(vm_page_t *, vm_offset_t *, int, bool);
1558d7e7a98SRuslan Bukin
1568d7e7a98SRuslan Bukin bool pmap_get_tables(pmap_t, vm_offset_t, pd_entry_t **, pd_entry_t **,
1578d7e7a98SRuslan Bukin pt_entry_t **);
1588d7e7a98SRuslan Bukin
159317113bbSMark Johnston int pmap_fault(pmap_t, vm_offset_t, vm_prot_t);
160b977d819SRuslan Bukin
161e7a9df16SKonstantin Belousov static inline int
pmap_vmspace_copy(pmap_t dst_pmap __unused,pmap_t src_pmap __unused)162e7a9df16SKonstantin Belousov pmap_vmspace_copy(pmap_t dst_pmap __unused, pmap_t src_pmap __unused)
163e7a9df16SKonstantin Belousov {
164e7a9df16SKonstantin Belousov
165e7a9df16SKonstantin Belousov return (0);
166e7a9df16SKonstantin Belousov }
167e7a9df16SKonstantin Belousov
1688d7e7a98SRuslan Bukin #endif /* _KERNEL */
1698d7e7a98SRuslan Bukin
1708d7e7a98SRuslan Bukin #endif /* !LOCORE */
1718d7e7a98SRuslan Bukin
1728d7e7a98SRuslan Bukin #endif /* !_MACHINE_PMAP_H_ */
173