130b72b68SRuslan Bukin /*- 230b72b68SRuslan Bukin * Copyright (c) 2013 Ian Lepore <ian@freebsd.org> 330b72b68SRuslan Bukin * All rights reserved. 430b72b68SRuslan Bukin * 530b72b68SRuslan Bukin * Redistribution and use in source and binary forms, with or without 630b72b68SRuslan Bukin * modification, are permitted provided that the following conditions 730b72b68SRuslan Bukin * are met: 830b72b68SRuslan Bukin * 1. Redistributions of source code must retain the above copyright 930b72b68SRuslan Bukin * notice, this list of conditions and the following disclaimer. 1030b72b68SRuslan Bukin * 2. Redistributions in binary form must reproduce the above copyright 1130b72b68SRuslan Bukin * notice, this list of conditions and the following disclaimer in the 1230b72b68SRuslan Bukin * documentation and/or other materials provided with the distribution. 1330b72b68SRuslan Bukin * 1430b72b68SRuslan Bukin * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 1530b72b68SRuslan Bukin * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 1630b72b68SRuslan Bukin * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 1730b72b68SRuslan Bukin * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 1830b72b68SRuslan Bukin * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 1930b72b68SRuslan Bukin * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2030b72b68SRuslan Bukin * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2130b72b68SRuslan Bukin * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2230b72b68SRuslan Bukin * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2330b72b68SRuslan Bukin * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2430b72b68SRuslan Bukin * SUCH DAMAGE. 2530b72b68SRuslan Bukin */ 2630b72b68SRuslan Bukin 2730b72b68SRuslan Bukin #include <sys/cdefs.h> 2830b72b68SRuslan Bukin __FBSDID("$FreeBSD$"); 2930b72b68SRuslan Bukin 3030b72b68SRuslan Bukin /* Routines for mapping device memory. */ 3130b72b68SRuslan Bukin 3230b72b68SRuslan Bukin #include "opt_ddb.h" 3330b72b68SRuslan Bukin 3430b72b68SRuslan Bukin #include <sys/param.h> 3530b72b68SRuslan Bukin #include <sys/systm.h> 3630b72b68SRuslan Bukin #include <sys/devmap.h> 3730b72b68SRuslan Bukin #include <vm/vm.h> 3830b72b68SRuslan Bukin #include <vm/vm_extern.h> 3930b72b68SRuslan Bukin #include <vm/pmap.h> 4030b72b68SRuslan Bukin #include <machine/vmparam.h> 4130b72b68SRuslan Bukin 4230b72b68SRuslan Bukin static const struct devmap_entry *devmap_table; 4330b72b68SRuslan Bukin static boolean_t devmap_bootstrap_done = false; 4430b72b68SRuslan Bukin 4530b72b68SRuslan Bukin /* 4630b72b68SRuslan Bukin * The allocated-kva (akva) devmap table and metadata. Platforms can call 4730b72b68SRuslan Bukin * devmap_add_entry() to add static device mappings to this table using 4830b72b68SRuslan Bukin * automatically allocated virtual addresses carved out of the top of kva space. 491d954438SKristof Provost * Allocation begins immediately below the max kernel virtual address. 5030b72b68SRuslan Bukin */ 5130b72b68SRuslan Bukin #define AKVA_DEVMAP_MAX_ENTRIES 32 5230b72b68SRuslan Bukin static struct devmap_entry akva_devmap_entries[AKVA_DEVMAP_MAX_ENTRIES]; 5330b72b68SRuslan Bukin static u_int akva_devmap_idx; 5430b72b68SRuslan Bukin static vm_offset_t akva_devmap_vaddr = DEVMAP_MAX_VADDR; 5530b72b68SRuslan Bukin 56ca20f8ecSRuslan Bukin #if defined(__aarch64__) || defined(__riscv) 5730b72b68SRuslan Bukin extern int early_boot; 5830b72b68SRuslan Bukin #endif 5930b72b68SRuslan Bukin 6030b72b68SRuslan Bukin /* 6130b72b68SRuslan Bukin * Print the contents of the static mapping table using the provided printf-like 6230b72b68SRuslan Bukin * output function (which will be either printf or db_printf). 6330b72b68SRuslan Bukin */ 6430b72b68SRuslan Bukin static void 6530b72b68SRuslan Bukin devmap_dump_table(int (*prfunc)(const char *, ...)) 6630b72b68SRuslan Bukin { 6730b72b68SRuslan Bukin const struct devmap_entry *pd; 6830b72b68SRuslan Bukin 6930b72b68SRuslan Bukin if (devmap_table == NULL || devmap_table[0].pd_size == 0) { 7030b72b68SRuslan Bukin prfunc("No static device mappings.\n"); 7130b72b68SRuslan Bukin return; 7230b72b68SRuslan Bukin } 7330b72b68SRuslan Bukin 7430b72b68SRuslan Bukin prfunc("Static device mappings:\n"); 7530b72b68SRuslan Bukin for (pd = devmap_table; pd->pd_size != 0; ++pd) { 761b6dd6d7SPhilip Paeps prfunc(" 0x%08jx - 0x%08jx mapped at VA 0x%08jx\n", 7730b72b68SRuslan Bukin pd->pd_pa, pd->pd_pa + pd->pd_size - 1, pd->pd_va); 7830b72b68SRuslan Bukin } 7930b72b68SRuslan Bukin } 8030b72b68SRuslan Bukin 8130b72b68SRuslan Bukin /* 8230b72b68SRuslan Bukin * Print the contents of the static mapping table. Used for bootverbose. 8330b72b68SRuslan Bukin */ 8430b72b68SRuslan Bukin void 8530b72b68SRuslan Bukin devmap_print_table() 8630b72b68SRuslan Bukin { 8730b72b68SRuslan Bukin devmap_dump_table(printf); 8830b72b68SRuslan Bukin } 8930b72b68SRuslan Bukin 9030b72b68SRuslan Bukin /* 9130b72b68SRuslan Bukin * Return the "last" kva address used by the registered devmap table. It's 9230b72b68SRuslan Bukin * actually the lowest address used by the static mappings, i.e., the address of 9330b72b68SRuslan Bukin * the first unusable byte of KVA. 9430b72b68SRuslan Bukin */ 9530b72b68SRuslan Bukin vm_offset_t 9630b72b68SRuslan Bukin devmap_lastaddr() 9730b72b68SRuslan Bukin { 9830b72b68SRuslan Bukin const struct devmap_entry *pd; 9930b72b68SRuslan Bukin vm_offset_t lowaddr; 10030b72b68SRuslan Bukin 10130b72b68SRuslan Bukin if (akva_devmap_idx > 0) 10230b72b68SRuslan Bukin return (akva_devmap_vaddr); 10330b72b68SRuslan Bukin 10430b72b68SRuslan Bukin lowaddr = DEVMAP_MAX_VADDR; 10530b72b68SRuslan Bukin for (pd = devmap_table; pd != NULL && pd->pd_size != 0; ++pd) { 10630b72b68SRuslan Bukin if (lowaddr > pd->pd_va) 10730b72b68SRuslan Bukin lowaddr = pd->pd_va; 10830b72b68SRuslan Bukin } 10930b72b68SRuslan Bukin 11030b72b68SRuslan Bukin return (lowaddr); 11130b72b68SRuslan Bukin } 11230b72b68SRuslan Bukin 11330b72b68SRuslan Bukin /* 11430b72b68SRuslan Bukin * Add an entry to the internal "akva" static devmap table using the given 11530b72b68SRuslan Bukin * physical address and size and a virtual address allocated from the top of 11630b72b68SRuslan Bukin * kva. This automatically registers the akva table on the first call, so all a 11730b72b68SRuslan Bukin * platform has to do is call this routine to install as many mappings as it 1181d954438SKristof Provost * needs and when the platform-specific init function calls devmap_bootstrap() 1191d954438SKristof Provost * it will pick up all the entries in the akva table automatically. 12030b72b68SRuslan Bukin */ 12130b72b68SRuslan Bukin void 12230b72b68SRuslan Bukin devmap_add_entry(vm_paddr_t pa, vm_size_t sz) 12330b72b68SRuslan Bukin { 12430b72b68SRuslan Bukin struct devmap_entry *m; 12530b72b68SRuslan Bukin 12630b72b68SRuslan Bukin if (devmap_bootstrap_done) 12730b72b68SRuslan Bukin panic("devmap_add_entry() after devmap_bootstrap()"); 12830b72b68SRuslan Bukin 12930b72b68SRuslan Bukin if (akva_devmap_idx == (AKVA_DEVMAP_MAX_ENTRIES - 1)) 13030b72b68SRuslan Bukin panic("AKVA_DEVMAP_MAX_ENTRIES is too small"); 13130b72b68SRuslan Bukin 13230b72b68SRuslan Bukin if (akva_devmap_idx == 0) 13330b72b68SRuslan Bukin devmap_register_table(akva_devmap_entries); 13430b72b68SRuslan Bukin 1351d954438SKristof Provost /* Allocate virtual address space from the top of kva downwards. */ 13630b72b68SRuslan Bukin #ifdef __arm__ 1371d954438SKristof Provost /* 1381d954438SKristof Provost * If the range being mapped is aligned and sized to 1MB boundaries then 1391d954438SKristof Provost * also align the virtual address to the next-lower 1MB boundary so that 1401d954438SKristof Provost * we end with a nice efficient section mapping. 1411d954438SKristof Provost */ 14230b72b68SRuslan Bukin if ((pa & 0x000fffff) == 0 && (sz & 0x000fffff) == 0) { 14330b72b68SRuslan Bukin akva_devmap_vaddr = trunc_1mpage(akva_devmap_vaddr - sz); 14430b72b68SRuslan Bukin } else 14530b72b68SRuslan Bukin #endif 14630b72b68SRuslan Bukin { 14730b72b68SRuslan Bukin akva_devmap_vaddr = trunc_page(akva_devmap_vaddr - sz); 14830b72b68SRuslan Bukin } 14930b72b68SRuslan Bukin m = &akva_devmap_entries[akva_devmap_idx++]; 15030b72b68SRuslan Bukin m->pd_va = akva_devmap_vaddr; 15130b72b68SRuslan Bukin m->pd_pa = pa; 15230b72b68SRuslan Bukin m->pd_size = sz; 15330b72b68SRuslan Bukin } 15430b72b68SRuslan Bukin 15530b72b68SRuslan Bukin /* 15630b72b68SRuslan Bukin * Register the given table as the one to use in devmap_bootstrap(). 15730b72b68SRuslan Bukin */ 15830b72b68SRuslan Bukin void 15930b72b68SRuslan Bukin devmap_register_table(const struct devmap_entry *table) 16030b72b68SRuslan Bukin { 16130b72b68SRuslan Bukin 16230b72b68SRuslan Bukin devmap_table = table; 16330b72b68SRuslan Bukin } 16430b72b68SRuslan Bukin 16530b72b68SRuslan Bukin /* 16630b72b68SRuslan Bukin * Map all of the static regions in the devmap table, and remember the devmap 16730b72b68SRuslan Bukin * table so the mapdev, ptov, and vtop functions can do lookups later. 16830b72b68SRuslan Bukin * 16930b72b68SRuslan Bukin * If a non-NULL table pointer is given it is used unconditionally, otherwise 17030b72b68SRuslan Bukin * the previously-registered table is used. This smooths transition from legacy 17130b72b68SRuslan Bukin * code that fills in a local table then calls this function passing that table, 17230b72b68SRuslan Bukin * and newer code that uses devmap_register_table() in platform-specific 1731d954438SKristof Provost * code, then lets the common platform-specific init function call this function 1741d954438SKristof Provost * with a NULL pointer. 17530b72b68SRuslan Bukin */ 17630b72b68SRuslan Bukin void 17730b72b68SRuslan Bukin devmap_bootstrap(vm_offset_t l1pt, const struct devmap_entry *table) 17830b72b68SRuslan Bukin { 17930b72b68SRuslan Bukin const struct devmap_entry *pd; 18030b72b68SRuslan Bukin 18130b72b68SRuslan Bukin devmap_bootstrap_done = true; 18230b72b68SRuslan Bukin 18330b72b68SRuslan Bukin /* 18430b72b68SRuslan Bukin * If given a table pointer, use it. Otherwise, if a table was 18530b72b68SRuslan Bukin * previously registered, use it. Otherwise, no work to do. 18630b72b68SRuslan Bukin */ 18730b72b68SRuslan Bukin if (table != NULL) 18830b72b68SRuslan Bukin devmap_table = table; 18930b72b68SRuslan Bukin else if (devmap_table == NULL) 19030b72b68SRuslan Bukin return; 19130b72b68SRuslan Bukin 19230b72b68SRuslan Bukin for (pd = devmap_table; pd->pd_size != 0; ++pd) { 19330b72b68SRuslan Bukin #if defined(__arm__) 19430b72b68SRuslan Bukin #if __ARM_ARCH >= 6 19530b72b68SRuslan Bukin pmap_preboot_map_attr(pd->pd_pa, pd->pd_va, pd->pd_size, 19630b72b68SRuslan Bukin VM_PROT_READ | VM_PROT_WRITE, VM_MEMATTR_DEVICE); 19730b72b68SRuslan Bukin #else 19830b72b68SRuslan Bukin pmap_map_chunk(l1pt, pd->pd_va, pd->pd_pa, pd->pd_size, 19930b72b68SRuslan Bukin VM_PROT_READ | VM_PROT_WRITE, PTE_DEVICE); 20030b72b68SRuslan Bukin #endif 201ca20f8ecSRuslan Bukin #elif defined(__aarch64__) || defined(__riscv) 20230b72b68SRuslan Bukin pmap_kenter_device(pd->pd_va, pd->pd_size, pd->pd_pa); 20330b72b68SRuslan Bukin #endif 20430b72b68SRuslan Bukin } 20530b72b68SRuslan Bukin } 20630b72b68SRuslan Bukin 20730b72b68SRuslan Bukin /* 20830b72b68SRuslan Bukin * Look up the given physical address in the static mapping data and return the 20930b72b68SRuslan Bukin * corresponding virtual address, or NULL if not found. 21030b72b68SRuslan Bukin */ 21130b72b68SRuslan Bukin void * 21230b72b68SRuslan Bukin devmap_ptov(vm_paddr_t pa, vm_size_t size) 21330b72b68SRuslan Bukin { 21430b72b68SRuslan Bukin const struct devmap_entry *pd; 21530b72b68SRuslan Bukin 21630b72b68SRuslan Bukin if (devmap_table == NULL) 21730b72b68SRuslan Bukin return (NULL); 21830b72b68SRuslan Bukin 21930b72b68SRuslan Bukin for (pd = devmap_table; pd->pd_size != 0; ++pd) { 22030b72b68SRuslan Bukin if (pa >= pd->pd_pa && pa + size <= pd->pd_pa + pd->pd_size) 22130b72b68SRuslan Bukin return ((void *)(pd->pd_va + (pa - pd->pd_pa))); 22230b72b68SRuslan Bukin } 22330b72b68SRuslan Bukin 22430b72b68SRuslan Bukin return (NULL); 22530b72b68SRuslan Bukin } 22630b72b68SRuslan Bukin 22730b72b68SRuslan Bukin /* 22830b72b68SRuslan Bukin * Look up the given virtual address in the static mapping data and return the 22930b72b68SRuslan Bukin * corresponding physical address, or DEVMAP_PADDR_NOTFOUND if not found. 23030b72b68SRuslan Bukin */ 23130b72b68SRuslan Bukin vm_paddr_t 23230b72b68SRuslan Bukin devmap_vtop(void * vpva, vm_size_t size) 23330b72b68SRuslan Bukin { 23430b72b68SRuslan Bukin const struct devmap_entry *pd; 23530b72b68SRuslan Bukin vm_offset_t va; 23630b72b68SRuslan Bukin 23730b72b68SRuslan Bukin if (devmap_table == NULL) 23830b72b68SRuslan Bukin return (DEVMAP_PADDR_NOTFOUND); 23930b72b68SRuslan Bukin 24030b72b68SRuslan Bukin va = (vm_offset_t)vpva; 24130b72b68SRuslan Bukin for (pd = devmap_table; pd->pd_size != 0; ++pd) { 24230b72b68SRuslan Bukin if (va >= pd->pd_va && va + size <= pd->pd_va + pd->pd_size) 24330b72b68SRuslan Bukin return ((vm_paddr_t)(pd->pd_pa + (va - pd->pd_va))); 24430b72b68SRuslan Bukin } 24530b72b68SRuslan Bukin 24630b72b68SRuslan Bukin return (DEVMAP_PADDR_NOTFOUND); 24730b72b68SRuslan Bukin } 24830b72b68SRuslan Bukin 24930b72b68SRuslan Bukin /* 25030b72b68SRuslan Bukin * Map a set of physical memory pages into the kernel virtual address space. 25130b72b68SRuslan Bukin * Return a pointer to where it is mapped. 25230b72b68SRuslan Bukin * 25330b72b68SRuslan Bukin * This uses a pre-established static mapping if one exists for the requested 25430b72b68SRuslan Bukin * range, otherwise it allocates kva space and maps the physical pages into it. 25530b72b68SRuslan Bukin * 25630b72b68SRuslan Bukin * This routine is intended to be used for mapping device memory, NOT real 25730b72b68SRuslan Bukin * memory; the mapping type is inherently VM_MEMATTR_DEVICE in 25830b72b68SRuslan Bukin * pmap_kenter_device(). 25930b72b68SRuslan Bukin */ 26030b72b68SRuslan Bukin void * 26130b72b68SRuslan Bukin pmap_mapdev(vm_offset_t pa, vm_size_t size) 26230b72b68SRuslan Bukin { 26330b72b68SRuslan Bukin vm_offset_t va, offset; 26430b72b68SRuslan Bukin void * rva; 26530b72b68SRuslan Bukin 26630b72b68SRuslan Bukin /* First look in the static mapping table. */ 26730b72b68SRuslan Bukin if ((rva = devmap_ptov(pa, size)) != NULL) 26830b72b68SRuslan Bukin return (rva); 26930b72b68SRuslan Bukin 27030b72b68SRuslan Bukin offset = pa & PAGE_MASK; 27130b72b68SRuslan Bukin pa = trunc_page(pa); 27230b72b68SRuslan Bukin size = round_page(size + offset); 27330b72b68SRuslan Bukin 274ca20f8ecSRuslan Bukin #if defined(__aarch64__) || defined(__riscv) 27530b72b68SRuslan Bukin if (early_boot) { 27630b72b68SRuslan Bukin akva_devmap_vaddr = trunc_page(akva_devmap_vaddr - size); 27730b72b68SRuslan Bukin va = akva_devmap_vaddr; 278*818390ceSMitchell Horne KASSERT(va >= VM_MAX_KERNEL_ADDRESS - PMAP_MAPDEV_EARLY_SIZE, 27930b72b68SRuslan Bukin ("Too many early devmap mappings")); 28030b72b68SRuslan Bukin } else 28130b72b68SRuslan Bukin #endif 28230b72b68SRuslan Bukin va = kva_alloc(size); 28330b72b68SRuslan Bukin if (!va) 28430b72b68SRuslan Bukin panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); 28530b72b68SRuslan Bukin 28630b72b68SRuslan Bukin pmap_kenter_device(va, size, pa); 28730b72b68SRuslan Bukin 28830b72b68SRuslan Bukin return ((void *)(va + offset)); 28930b72b68SRuslan Bukin } 29030b72b68SRuslan Bukin 29178442297SEmmanuel Vadot #if defined(__aarch64__) 29278442297SEmmanuel Vadot void * 29378442297SEmmanuel Vadot pmap_mapdev_attr(vm_offset_t pa, vm_size_t size, vm_memattr_t ma) 29478442297SEmmanuel Vadot { 29578442297SEmmanuel Vadot vm_offset_t va, offset; 29678442297SEmmanuel Vadot void * rva; 29778442297SEmmanuel Vadot 29878442297SEmmanuel Vadot /* First look in the static mapping table. */ 29978442297SEmmanuel Vadot if ((rva = devmap_ptov(pa, size)) != NULL) 30078442297SEmmanuel Vadot return (rva); 30178442297SEmmanuel Vadot 30278442297SEmmanuel Vadot offset = pa & PAGE_MASK; 30378442297SEmmanuel Vadot pa = trunc_page(pa); 30478442297SEmmanuel Vadot size = round_page(size + offset); 30578442297SEmmanuel Vadot 30678442297SEmmanuel Vadot if (early_boot) { 30778442297SEmmanuel Vadot akva_devmap_vaddr = trunc_page(akva_devmap_vaddr - size); 30878442297SEmmanuel Vadot va = akva_devmap_vaddr; 309c54fe25dSEmmanuel Vadot KASSERT(va >= (VM_MAX_KERNEL_ADDRESS - (PMAP_MAPDEV_EARLY_SIZE)), 310c54fe25dSEmmanuel Vadot ("Too many early devmap mappings 2")); 31178442297SEmmanuel Vadot } else 31278442297SEmmanuel Vadot va = kva_alloc(size); 31378442297SEmmanuel Vadot if (!va) 31478442297SEmmanuel Vadot panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); 31578442297SEmmanuel Vadot 31678442297SEmmanuel Vadot pmap_kenter(va, size, pa, ma); 31778442297SEmmanuel Vadot 31878442297SEmmanuel Vadot return ((void *)(va + offset)); 31978442297SEmmanuel Vadot } 32078442297SEmmanuel Vadot #endif 32178442297SEmmanuel Vadot 32230b72b68SRuslan Bukin /* 32330b72b68SRuslan Bukin * Unmap device memory and free the kva space. 32430b72b68SRuslan Bukin */ 32530b72b68SRuslan Bukin void 32630b72b68SRuslan Bukin pmap_unmapdev(vm_offset_t va, vm_size_t size) 32730b72b68SRuslan Bukin { 32830b72b68SRuslan Bukin vm_offset_t offset; 32930b72b68SRuslan Bukin 33030b72b68SRuslan Bukin /* Nothing to do if we find the mapping in the static table. */ 33130b72b68SRuslan Bukin if (devmap_vtop((void*)va, size) != DEVMAP_PADDR_NOTFOUND) 33230b72b68SRuslan Bukin return; 33330b72b68SRuslan Bukin 33430b72b68SRuslan Bukin offset = va & PAGE_MASK; 33530b72b68SRuslan Bukin va = trunc_page(va); 33630b72b68SRuslan Bukin size = round_page(size + offset); 33730b72b68SRuslan Bukin 33830b72b68SRuslan Bukin pmap_kremove_device(va, size); 33930b72b68SRuslan Bukin kva_free(va, size); 34030b72b68SRuslan Bukin } 34130b72b68SRuslan Bukin 34230b72b68SRuslan Bukin #ifdef DDB 34330b72b68SRuslan Bukin #include <ddb/ddb.h> 34430b72b68SRuslan Bukin 34530b72b68SRuslan Bukin DB_SHOW_COMMAND(devmap, db_show_devmap) 34630b72b68SRuslan Bukin { 34730b72b68SRuslan Bukin devmap_dump_table(db_printf); 34830b72b68SRuslan Bukin } 34930b72b68SRuslan Bukin 35030b72b68SRuslan Bukin #endif /* DDB */ 351