13024e8afSRuslan Bukin /*- 24d846d26SWarner Losh * SPDX-License-Identifier: BSD-2-Clause 33024e8afSRuslan Bukin * 43024e8afSRuslan Bukin * Copyright (c) 2013 The FreeBSD Foundation 53024e8afSRuslan Bukin * 63024e8afSRuslan Bukin * This software was developed by Konstantin Belousov <kib@FreeBSD.org> 73024e8afSRuslan Bukin * under sponsorship from the FreeBSD Foundation. 83024e8afSRuslan Bukin * 93024e8afSRuslan Bukin * Redistribution and use in source and binary forms, with or without 103024e8afSRuslan Bukin * modification, are permitted provided that the following conditions 113024e8afSRuslan Bukin * are met: 123024e8afSRuslan Bukin * 1. Redistributions of source code must retain the above copyright 133024e8afSRuslan Bukin * notice, this list of conditions and the following disclaimer. 143024e8afSRuslan Bukin * 2. Redistributions in binary form must reproduce the above copyright 153024e8afSRuslan Bukin * notice, this list of conditions and the following disclaimer in the 163024e8afSRuslan Bukin * documentation and/or other materials provided with the distribution. 173024e8afSRuslan Bukin * 183024e8afSRuslan Bukin * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 193024e8afSRuslan Bukin * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 203024e8afSRuslan Bukin * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 213024e8afSRuslan Bukin * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 223024e8afSRuslan Bukin * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 233024e8afSRuslan Bukin * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 243024e8afSRuslan Bukin * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 253024e8afSRuslan Bukin * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 263024e8afSRuslan Bukin * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 273024e8afSRuslan Bukin * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 283024e8afSRuslan Bukin * SUCH DAMAGE. 293024e8afSRuslan Bukin */ 303024e8afSRuslan Bukin 31b16f993eSDoug Moore #define RB_AUGMENT_CHECK(entry) iommu_gas_augment_entry(entry) 323024e8afSRuslan Bukin 333024e8afSRuslan Bukin #include <sys/param.h> 343024e8afSRuslan Bukin #include <sys/systm.h> 353024e8afSRuslan Bukin #include <sys/malloc.h> 363024e8afSRuslan Bukin #include <sys/bus.h> 373024e8afSRuslan Bukin #include <sys/interrupt.h> 383024e8afSRuslan Bukin #include <sys/kernel.h> 393024e8afSRuslan Bukin #include <sys/ktr.h> 403024e8afSRuslan Bukin #include <sys/lock.h> 413024e8afSRuslan Bukin #include <sys/proc.h> 423024e8afSRuslan Bukin #include <sys/rwlock.h> 433024e8afSRuslan Bukin #include <sys/memdesc.h> 443024e8afSRuslan Bukin #include <sys/mutex.h> 453024e8afSRuslan Bukin #include <sys/sysctl.h> 463024e8afSRuslan Bukin #include <sys/rman.h> 473024e8afSRuslan Bukin #include <sys/taskqueue.h> 483024e8afSRuslan Bukin #include <sys/tree.h> 493024e8afSRuslan Bukin #include <sys/uio.h> 503024e8afSRuslan Bukin #include <sys/vmem.h> 513024e8afSRuslan Bukin #include <vm/vm.h> 523024e8afSRuslan Bukin #include <vm/vm_extern.h> 533024e8afSRuslan Bukin #include <vm/vm_kern.h> 543024e8afSRuslan Bukin #include <vm/vm_object.h> 553024e8afSRuslan Bukin #include <vm/vm_page.h> 563024e8afSRuslan Bukin #include <vm/vm_map.h> 573024e8afSRuslan Bukin #include <vm/uma.h> 58c8597a1fSRuslan Bukin #include <dev/pci/pcireg.h> 59c8597a1fSRuslan Bukin #include <dev/pci/pcivar.h> 60c8597a1fSRuslan Bukin #include <dev/iommu/iommu.h> 61f23f7d3aSRuslan Bukin #include <dev/iommu/iommu_gas.h> 62e707c8beSRuslan Bukin #include <dev/iommu/iommu_msi.h> 633024e8afSRuslan Bukin #include <machine/atomic.h> 643024e8afSRuslan Bukin #include <machine/bus.h> 653024e8afSRuslan Bukin #include <machine/md_var.h> 66c4cd6990SRuslan Bukin #include <machine/iommu.h> 67c8597a1fSRuslan Bukin #include <dev/iommu/busdma_iommu.h> 683024e8afSRuslan Bukin 693024e8afSRuslan Bukin /* 703024e8afSRuslan Bukin * Guest Address Space management. 713024e8afSRuslan Bukin */ 723024e8afSRuslan Bukin 733024e8afSRuslan Bukin static uma_zone_t iommu_map_entry_zone; 743024e8afSRuslan Bukin 759c843a40SRuslan Bukin #ifdef INVARIANTS 769c843a40SRuslan Bukin static int iommu_check_free; 779c843a40SRuslan Bukin #endif 789c843a40SRuslan Bukin 793024e8afSRuslan Bukin static void 803024e8afSRuslan Bukin intel_gas_init(void) 813024e8afSRuslan Bukin { 823024e8afSRuslan Bukin 833024e8afSRuslan Bukin iommu_map_entry_zone = uma_zcreate("IOMMU_MAP_ENTRY", 843024e8afSRuslan Bukin sizeof(struct iommu_map_entry), NULL, NULL, 853024e8afSRuslan Bukin NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NODUMP); 863024e8afSRuslan Bukin } 873024e8afSRuslan Bukin SYSINIT(intel_gas, SI_SUB_DRIVERS, SI_ORDER_FIRST, intel_gas_init, NULL); 883024e8afSRuslan Bukin 893024e8afSRuslan Bukin struct iommu_map_entry * 903024e8afSRuslan Bukin iommu_gas_alloc_entry(struct iommu_domain *domain, u_int flags) 913024e8afSRuslan Bukin { 923024e8afSRuslan Bukin struct iommu_map_entry *res; 933024e8afSRuslan Bukin 9415f6baf4SRuslan Bukin KASSERT((flags & ~(IOMMU_PGF_WAITOK)) == 0, 953024e8afSRuslan Bukin ("unsupported flags %x", flags)); 963024e8afSRuslan Bukin 9715f6baf4SRuslan Bukin res = uma_zalloc(iommu_map_entry_zone, ((flags & IOMMU_PGF_WAITOK) != 983024e8afSRuslan Bukin 0 ? M_WAITOK : M_NOWAIT) | M_ZERO); 9942736dc4SAlan Cox if (res != NULL && domain != NULL) { 1003024e8afSRuslan Bukin res->domain = domain; 1013024e8afSRuslan Bukin atomic_add_int(&domain->entries_cnt, 1); 1023024e8afSRuslan Bukin } 1033024e8afSRuslan Bukin return (res); 1043024e8afSRuslan Bukin } 1053024e8afSRuslan Bukin 1063024e8afSRuslan Bukin void 1074670f908SAlan Cox iommu_gas_free_entry(struct iommu_map_entry *entry) 1083024e8afSRuslan Bukin { 1094670f908SAlan Cox struct iommu_domain *domain; 1103024e8afSRuslan Bukin 1114670f908SAlan Cox domain = entry->domain; 11242736dc4SAlan Cox if (domain != NULL) 1133024e8afSRuslan Bukin atomic_subtract_int(&domain->entries_cnt, 1); 1143024e8afSRuslan Bukin uma_zfree(iommu_map_entry_zone, entry); 1153024e8afSRuslan Bukin } 1163024e8afSRuslan Bukin 1173024e8afSRuslan Bukin static int 1183024e8afSRuslan Bukin iommu_gas_cmp_entries(struct iommu_map_entry *a, struct iommu_map_entry *b) 1193024e8afSRuslan Bukin { 1203024e8afSRuslan Bukin 1213024e8afSRuslan Bukin /* Last entry have zero size, so <= */ 1223024e8afSRuslan Bukin KASSERT(a->start <= a->end, ("inverted entry %p (%jx, %jx)", 1233024e8afSRuslan Bukin a, (uintmax_t)a->start, (uintmax_t)a->end)); 1243024e8afSRuslan Bukin KASSERT(b->start <= b->end, ("inverted entry %p (%jx, %jx)", 1253024e8afSRuslan Bukin b, (uintmax_t)b->start, (uintmax_t)b->end)); 126a59c2529SKonstantin Belousov KASSERT(((a->flags | b->flags) & IOMMU_MAP_ENTRY_FAKE) != 0 || 127a59c2529SKonstantin Belousov a->end <= b->start || b->end <= a->start || 1283024e8afSRuslan Bukin a->end == a->start || b->end == b->start, 129733da1ebSKonstantin Belousov ("overlapping entries %p (%jx, %jx) f %#x %p (%jx, %jx) f %#x" 130733da1ebSKonstantin Belousov " domain %p %p", 131733da1ebSKonstantin Belousov a, (uintmax_t)a->start, (uintmax_t)a->end, a->flags, 132733da1ebSKonstantin Belousov b, (uintmax_t)b->start, (uintmax_t)b->end, b->flags, 133733da1ebSKonstantin Belousov a->domain, b->domain)); 1343024e8afSRuslan Bukin 1353024e8afSRuslan Bukin if (a->end < b->end) 1363024e8afSRuslan Bukin return (-1); 1373024e8afSRuslan Bukin else if (b->end < a->end) 1383024e8afSRuslan Bukin return (1); 1393024e8afSRuslan Bukin return (0); 1403024e8afSRuslan Bukin } 1413024e8afSRuslan Bukin 142b16f993eSDoug Moore /* 143b16f993eSDoug Moore * Update augmentation data based on data from children. 144b16f993eSDoug Moore * Return true if and only if the update changes the augmentation data. 145b16f993eSDoug Moore */ 146b16f993eSDoug Moore static bool 1473024e8afSRuslan Bukin iommu_gas_augment_entry(struct iommu_map_entry *entry) 1483024e8afSRuslan Bukin { 1493024e8afSRuslan Bukin struct iommu_map_entry *child; 150b16f993eSDoug Moore iommu_gaddr_t bound, delta, free_down; 1513024e8afSRuslan Bukin 1523024e8afSRuslan Bukin free_down = 0; 153b16f993eSDoug Moore bound = entry->start; 1543024e8afSRuslan Bukin if ((child = RB_LEFT(entry, rb_entry)) != NULL) { 155b16f993eSDoug Moore free_down = MAX(child->free_down, bound - child->last); 156b16f993eSDoug Moore bound = child->first; 157b16f993eSDoug Moore } 158b16f993eSDoug Moore delta = bound - entry->first; 159b16f993eSDoug Moore entry->first = bound; 160b16f993eSDoug Moore bound = entry->end; 1613024e8afSRuslan Bukin if ((child = RB_RIGHT(entry, rb_entry)) != NULL) { 1623024e8afSRuslan Bukin free_down = MAX(free_down, child->free_down); 163b16f993eSDoug Moore free_down = MAX(free_down, child->first - bound); 164b16f993eSDoug Moore bound = child->last; 165b16f993eSDoug Moore } 166b16f993eSDoug Moore delta += entry->last - bound; 167b16f993eSDoug Moore if (delta == 0) 168b16f993eSDoug Moore delta = entry->free_down - free_down; 169b16f993eSDoug Moore entry->last = bound; 1703024e8afSRuslan Bukin entry->free_down = free_down; 171b16f993eSDoug Moore 172b16f993eSDoug Moore /* 173b16f993eSDoug Moore * Return true either if the value of last-first changed, 174b16f993eSDoug Moore * or if free_down changed. 175b16f993eSDoug Moore */ 176b16f993eSDoug Moore return (delta != 0); 1773024e8afSRuslan Bukin } 1783024e8afSRuslan Bukin 1793024e8afSRuslan Bukin RB_GENERATE(iommu_gas_entries_tree, iommu_map_entry, rb_entry, 1803024e8afSRuslan Bukin iommu_gas_cmp_entries); 1813024e8afSRuslan Bukin 1823024e8afSRuslan Bukin #ifdef INVARIANTS 1833024e8afSRuslan Bukin static void 1843024e8afSRuslan Bukin iommu_gas_check_free(struct iommu_domain *domain) 1853024e8afSRuslan Bukin { 1863024e8afSRuslan Bukin struct iommu_map_entry *entry, *l, *r; 1873024e8afSRuslan Bukin iommu_gaddr_t v; 1883024e8afSRuslan Bukin 1893024e8afSRuslan Bukin RB_FOREACH(entry, iommu_gas_entries_tree, &domain->rb_root) { 190b64dca2bSRuslan Bukin KASSERT(domain == entry->domain, 1913024e8afSRuslan Bukin ("mismatched free domain %p entry %p entry->domain %p", 1923024e8afSRuslan Bukin domain, entry, entry->domain)); 1933024e8afSRuslan Bukin l = RB_LEFT(entry, rb_entry); 1943024e8afSRuslan Bukin r = RB_RIGHT(entry, rb_entry); 1953024e8afSRuslan Bukin v = 0; 1963024e8afSRuslan Bukin if (l != NULL) { 1973024e8afSRuslan Bukin v = MAX(v, l->free_down); 1983024e8afSRuslan Bukin v = MAX(v, entry->start - l->last); 1993024e8afSRuslan Bukin } 2003024e8afSRuslan Bukin if (r != NULL) { 2013024e8afSRuslan Bukin v = MAX(v, r->free_down); 2023024e8afSRuslan Bukin v = MAX(v, r->first - entry->end); 2033024e8afSRuslan Bukin } 2043024e8afSRuslan Bukin MPASS(entry->free_down == v); 2053024e8afSRuslan Bukin } 2063024e8afSRuslan Bukin } 2073024e8afSRuslan Bukin #endif 2083024e8afSRuslan Bukin 2093024e8afSRuslan Bukin static void 2103024e8afSRuslan Bukin iommu_gas_rb_remove(struct iommu_domain *domain, struct iommu_map_entry *entry) 2113024e8afSRuslan Bukin { 2128b221ca6SDoug Moore struct iommu_map_entry *nbr; 2133024e8afSRuslan Bukin 2148b221ca6SDoug Moore /* Removing entry may open a new free gap before domain->start_gap. */ 2158b221ca6SDoug Moore if (entry->end <= domain->start_gap->end) { 2168b221ca6SDoug Moore if (RB_RIGHT(entry, rb_entry) != NULL) 2178b221ca6SDoug Moore nbr = iommu_gas_entries_tree_RB_NEXT(entry); 2188b221ca6SDoug Moore else if (RB_LEFT(entry, rb_entry) != NULL) 2198b221ca6SDoug Moore nbr = RB_LEFT(entry, rb_entry); 2208b221ca6SDoug Moore else 2218b221ca6SDoug Moore nbr = RB_PARENT(entry, rb_entry); 2228b221ca6SDoug Moore domain->start_gap = nbr; 2238b221ca6SDoug Moore } 2243024e8afSRuslan Bukin RB_REMOVE(iommu_gas_entries_tree, &domain->rb_root, entry); 2253024e8afSRuslan Bukin } 2263024e8afSRuslan Bukin 227f5cafae1SRuslan Bukin struct iommu_domain * 228f5cafae1SRuslan Bukin iommu_get_ctx_domain(struct iommu_ctx *ctx) 229f5cafae1SRuslan Bukin { 230f5cafae1SRuslan Bukin 231f5cafae1SRuslan Bukin return (ctx->domain); 232f5cafae1SRuslan Bukin } 233f5cafae1SRuslan Bukin 2343024e8afSRuslan Bukin void 2353024e8afSRuslan Bukin iommu_gas_init_domain(struct iommu_domain *domain) 2363024e8afSRuslan Bukin { 2373024e8afSRuslan Bukin struct iommu_map_entry *begin, *end; 2383024e8afSRuslan Bukin 23915f6baf4SRuslan Bukin begin = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK); 24015f6baf4SRuslan Bukin end = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK); 2413024e8afSRuslan Bukin 2423024e8afSRuslan Bukin IOMMU_DOMAIN_LOCK(domain); 2433024e8afSRuslan Bukin KASSERT(domain->entries_cnt == 2, ("dirty domain %p", domain)); 2443024e8afSRuslan Bukin KASSERT(RB_EMPTY(&domain->rb_root), 2453024e8afSRuslan Bukin ("non-empty entries %p", domain)); 2463024e8afSRuslan Bukin 247b16f993eSDoug Moore /* 248b16f993eSDoug Moore * The end entry must be inserted first because it has a zero-length gap 249b16f993eSDoug Moore * between start and end. Initially, all augmentation data for a new 250b16f993eSDoug Moore * entry is zero. Function iommu_gas_augment_entry will compute no 251b16f993eSDoug Moore * change in the value of (start-end) and no change in the value of 252b16f993eSDoug Moore * free_down, so it will return false to suggest that nothing changed in 253b16f993eSDoug Moore * the entry. Thus, inserting the end entry second prevents 254b16f993eSDoug Moore * augmentation information to be propogated to the begin entry at the 255b16f993eSDoug Moore * tree root. So it is inserted first. 256b16f993eSDoug Moore */ 2573024e8afSRuslan Bukin end->start = domain->end; 2583024e8afSRuslan Bukin end->end = domain->end; 2593024e8afSRuslan Bukin end->flags = IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED; 260368ee2f8SDoug Moore RB_INSERT(iommu_gas_entries_tree, &domain->rb_root, end); 2613024e8afSRuslan Bukin 262b16f993eSDoug Moore begin->start = 0; 263b16f993eSDoug Moore begin->end = IOMMU_PAGE_SIZE; 264b16f993eSDoug Moore begin->flags = IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED; 265368ee2f8SDoug Moore RB_INSERT_PREV(iommu_gas_entries_tree, &domain->rb_root, end, begin); 266b16f993eSDoug Moore 26787d405eaSDoug Moore domain->start_gap = begin; 2683024e8afSRuslan Bukin domain->first_place = begin; 2693024e8afSRuslan Bukin domain->last_place = end; 27015f6baf4SRuslan Bukin domain->flags |= IOMMU_DOMAIN_GAS_INITED; 2713024e8afSRuslan Bukin IOMMU_DOMAIN_UNLOCK(domain); 2723024e8afSRuslan Bukin } 2733024e8afSRuslan Bukin 2743024e8afSRuslan Bukin void 2753024e8afSRuslan Bukin iommu_gas_fini_domain(struct iommu_domain *domain) 2763024e8afSRuslan Bukin { 277a2c57c60SDoug Moore struct iommu_map_entry *entry; 2783024e8afSRuslan Bukin 2793024e8afSRuslan Bukin IOMMU_DOMAIN_ASSERT_LOCKED(domain); 2803024e8afSRuslan Bukin KASSERT(domain->entries_cnt == 2, 2813024e8afSRuslan Bukin ("domain still in use %p", domain)); 2823024e8afSRuslan Bukin 2833024e8afSRuslan Bukin entry = RB_MIN(iommu_gas_entries_tree, &domain->rb_root); 2843024e8afSRuslan Bukin KASSERT(entry->start == 0, ("start entry start %p", domain)); 2853024e8afSRuslan Bukin KASSERT(entry->end == IOMMU_PAGE_SIZE, ("start entry end %p", domain)); 286dea8594fSRuslan Bukin KASSERT(entry->flags == 287dea8594fSRuslan Bukin (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED), 2883024e8afSRuslan Bukin ("start entry flags %p", domain)); 289368ee2f8SDoug Moore iommu_gas_rb_remove(domain, entry); 2904670f908SAlan Cox iommu_gas_free_entry(entry); 2913024e8afSRuslan Bukin 2923024e8afSRuslan Bukin entry = RB_MAX(iommu_gas_entries_tree, &domain->rb_root); 2933024e8afSRuslan Bukin KASSERT(entry->start == domain->end, ("end entry start %p", domain)); 2943024e8afSRuslan Bukin KASSERT(entry->end == domain->end, ("end entry end %p", domain)); 295dea8594fSRuslan Bukin KASSERT(entry->flags == 296dea8594fSRuslan Bukin (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED), 2973024e8afSRuslan Bukin ("end entry flags %p", domain)); 298368ee2f8SDoug Moore iommu_gas_rb_remove(domain, entry); 2994670f908SAlan Cox iommu_gas_free_entry(entry); 3003024e8afSRuslan Bukin } 3013024e8afSRuslan Bukin 3023024e8afSRuslan Bukin struct iommu_gas_match_args { 3033024e8afSRuslan Bukin iommu_gaddr_t size; 3043024e8afSRuslan Bukin int offset; 3053024e8afSRuslan Bukin const struct bus_dma_tag_common *common; 3063024e8afSRuslan Bukin u_int gas_flags; 3073024e8afSRuslan Bukin struct iommu_map_entry *entry; 3083024e8afSRuslan Bukin }; 3093024e8afSRuslan Bukin 3103024e8afSRuslan Bukin /* 3113024e8afSRuslan Bukin * The interval [beg, end) is a free interval between two iommu_map_entries. 3125b9b55fbSDoug Moore * Addresses can be allocated only in the range [lbound, ubound]. Try to 313e0e8d0c8SDoug Moore * allocate space in the free interval, subject to the conditions expressed by 314e0e8d0c8SDoug Moore * a, and return 'true' if and only if the allocation attempt succeeds. 3153024e8afSRuslan Bukin */ 3163024e8afSRuslan Bukin static bool 3173024e8afSRuslan Bukin iommu_gas_match_one(struct iommu_gas_match_args *a, iommu_gaddr_t beg, 318e0e8d0c8SDoug Moore iommu_gaddr_t end, iommu_gaddr_t lbound, iommu_gaddr_t ubound) 3193024e8afSRuslan Bukin { 320e0e8d0c8SDoug Moore struct iommu_map_entry *entry; 321e0e8d0c8SDoug Moore iommu_gaddr_t first, size, start; 322e0e8d0c8SDoug Moore int offset; 3233024e8afSRuslan Bukin 324da33f6d7SAlan Cox /* 325da33f6d7SAlan Cox * The prev->end is always aligned on the page size, which 326da33f6d7SAlan Cox * causes page alignment for the entry->start too. 327da33f6d7SAlan Cox * 328e0e8d0c8SDoug Moore * Create IOMMU_PAGE_SIZE gaps before, after new entry 329e0e8d0c8SDoug Moore * to ensure that out-of-bounds accesses fault. 330da33f6d7SAlan Cox */ 331e0e8d0c8SDoug Moore beg = MAX(beg + IOMMU_PAGE_SIZE, lbound); 332e0e8d0c8SDoug Moore start = roundup2(beg, a->common->alignment); 333e0e8d0c8SDoug Moore if (start < beg) 334e0e8d0c8SDoug Moore return (false); 335a869643eSKonstantin Belousov if (end < IOMMU_PAGE_SIZE + 1) 336a869643eSKonstantin Belousov return (false); 3375b9b55fbSDoug Moore end = MIN(end - IOMMU_PAGE_SIZE - 1, ubound); 338e0e8d0c8SDoug Moore offset = a->offset; 339e0e8d0c8SDoug Moore size = a->size; 3405b9b55fbSDoug Moore if (start + offset + size - 1 > end) 3413024e8afSRuslan Bukin return (false); 3423024e8afSRuslan Bukin 343e0e8d0c8SDoug Moore /* Check for and try to skip past boundary crossing. */ 344e0e8d0c8SDoug Moore if (!vm_addr_bound_ok(start + offset, size, a->common->boundary)) { 3453024e8afSRuslan Bukin /* 3463024e8afSRuslan Bukin * The start + offset to start + offset + size region crosses 347e0e8d0c8SDoug Moore * the boundary. Check if there is enough space after the next 348e0e8d0c8SDoug Moore * boundary after the beg. 3493024e8afSRuslan Bukin */ 350e0e8d0c8SDoug Moore first = start; 351e0e8d0c8SDoug Moore beg = roundup2(start + offset + 1, a->common->boundary); 352e0e8d0c8SDoug Moore start = roundup2(beg, a->common->alignment); 353e0e8d0c8SDoug Moore 3545b9b55fbSDoug Moore if (start + offset + size - 1 > end || 355e0e8d0c8SDoug Moore !vm_addr_bound_ok(start + offset, size, 3563024e8afSRuslan Bukin a->common->boundary)) { 3573024e8afSRuslan Bukin /* 358e0e8d0c8SDoug Moore * Not enough space to align at the requested boundary, 359e0e8d0c8SDoug Moore * or boundary is smaller than the size, but allowed to 360e0e8d0c8SDoug Moore * split. We already checked that start + size does not 361e0e8d0c8SDoug Moore * overlap ubound. 3623024e8afSRuslan Bukin * 363e0e8d0c8SDoug Moore * XXXKIB. It is possible that beg is exactly at the 364e0e8d0c8SDoug Moore * start of the next entry, then we do not have gap. 365e0e8d0c8SDoug Moore * Ignore for now. 3663024e8afSRuslan Bukin */ 367e0e8d0c8SDoug Moore if ((a->gas_flags & IOMMU_MF_CANSPLIT) == 0) 368e0e8d0c8SDoug Moore return (false); 369e0e8d0c8SDoug Moore size = beg - first - offset; 370e0e8d0c8SDoug Moore start = first; 371e0e8d0c8SDoug Moore } 372e0e8d0c8SDoug Moore } 373e0e8d0c8SDoug Moore entry = a->entry; 374e0e8d0c8SDoug Moore entry->start = start; 375e0e8d0c8SDoug Moore entry->end = start + roundup2(size + offset, IOMMU_PAGE_SIZE); 376e0e8d0c8SDoug Moore entry->flags = IOMMU_MAP_ENTRY_MAP; 3773024e8afSRuslan Bukin return (true); 3783024e8afSRuslan Bukin } 3793024e8afSRuslan Bukin 380e0e8d0c8SDoug Moore /* Find the next entry that might abut a big-enough range. */ 381e0e8d0c8SDoug Moore static struct iommu_map_entry * 382e0e8d0c8SDoug Moore iommu_gas_next(struct iommu_map_entry *curr, iommu_gaddr_t min_free) 3833024e8afSRuslan Bukin { 384e0e8d0c8SDoug Moore struct iommu_map_entry *next; 3853024e8afSRuslan Bukin 386e0e8d0c8SDoug Moore if ((next = RB_RIGHT(curr, rb_entry)) != NULL && 387e0e8d0c8SDoug Moore next->free_down >= min_free) { 388e0e8d0c8SDoug Moore /* Find next entry in right subtree. */ 389e0e8d0c8SDoug Moore do 390e0e8d0c8SDoug Moore curr = next; 391e0e8d0c8SDoug Moore while ((next = RB_LEFT(curr, rb_entry)) != NULL && 392e0e8d0c8SDoug Moore next->free_down >= min_free); 393e0e8d0c8SDoug Moore } else { 394e0e8d0c8SDoug Moore /* Find next entry in a left-parent ancestor. */ 395e0e8d0c8SDoug Moore while ((next = RB_PARENT(curr, rb_entry)) != NULL && 396e0e8d0c8SDoug Moore curr == RB_RIGHT(next, rb_entry)) 397e0e8d0c8SDoug Moore curr = next; 398e0e8d0c8SDoug Moore curr = next; 399e0e8d0c8SDoug Moore } 400e0e8d0c8SDoug Moore return (curr); 4013024e8afSRuslan Bukin } 4023024e8afSRuslan Bukin 4038b221ca6SDoug Moore /* 4048b221ca6SDoug Moore * Address-ordered first-fit search of 'domain' for free space satisfying the 4058b221ca6SDoug Moore * conditions of 'a'. The space allocated is at least one page big, and is 406a2c57c60SDoug Moore * bounded by guard pages to the left and right. The allocated space for 407a2c57c60SDoug Moore * 'domain' is described by an rb-tree of map entries at domain->rb_root, and 408a2c57c60SDoug Moore * domain->start_gap points to a map entry less than or adjacent to the first 4098b221ca6SDoug Moore * free-space of size at least 3 pages. 4108b221ca6SDoug Moore */ 4113024e8afSRuslan Bukin static int 4128b221ca6SDoug Moore iommu_gas_find_space(struct iommu_domain *domain, 4138b221ca6SDoug Moore struct iommu_gas_match_args *a) 4143024e8afSRuslan Bukin { 415e0e8d0c8SDoug Moore struct iommu_map_entry *curr, *first; 416e0e8d0c8SDoug Moore iommu_gaddr_t addr, min_free; 417e0e8d0c8SDoug Moore 4188b221ca6SDoug Moore IOMMU_DOMAIN_ASSERT_LOCKED(domain); 419e0e8d0c8SDoug Moore KASSERT(a->entry->flags == 0, 4208b221ca6SDoug Moore ("dirty entry %p %p", domain, a->entry)); 4218b221ca6SDoug Moore 4228b221ca6SDoug Moore /* 4238b221ca6SDoug Moore * start_gap may point to an entry adjacent to gaps too small for any 4248b221ca6SDoug Moore * new allocation. In that case, advance start_gap to the first free 4258b221ca6SDoug Moore * space big enough for a minimum allocation plus two guard pages. 4268b221ca6SDoug Moore */ 4278b221ca6SDoug Moore min_free = 3 * IOMMU_PAGE_SIZE; 4288b221ca6SDoug Moore first = domain->start_gap; 4298b221ca6SDoug Moore while (first != NULL && first->free_down < min_free) 4308b221ca6SDoug Moore first = RB_PARENT(first, rb_entry); 4318b221ca6SDoug Moore for (curr = first; curr != NULL; 4328b221ca6SDoug Moore curr = iommu_gas_next(curr, min_free)) { 4338b221ca6SDoug Moore if ((first = RB_LEFT(curr, rb_entry)) != NULL && 4348b221ca6SDoug Moore first->last + min_free <= curr->start) 4358b221ca6SDoug Moore break; 4368b221ca6SDoug Moore if ((first = RB_RIGHT(curr, rb_entry)) != NULL && 4378b221ca6SDoug Moore curr->end + min_free <= first->first) 4388b221ca6SDoug Moore break; 4398b221ca6SDoug Moore } 4408b221ca6SDoug Moore domain->start_gap = curr; 4413024e8afSRuslan Bukin 442b831865fSDoug Moore /* 443b831865fSDoug Moore * If the subtree doesn't have free space for the requested allocation 444f979ad00SDoug Moore * plus two guard pages, skip it. 445b831865fSDoug Moore */ 446f979ad00SDoug Moore min_free = 2 * IOMMU_PAGE_SIZE + 447f979ad00SDoug Moore roundup2(a->size + a->offset, IOMMU_PAGE_SIZE); 448f979ad00SDoug Moore 4498b221ca6SDoug Moore /* Climb to find a node in the subtree of big-enough ranges. */ 450e0e8d0c8SDoug Moore first = curr; 4518b221ca6SDoug Moore while (first != NULL && first->free_down < min_free) 4528b221ca6SDoug Moore first = RB_PARENT(first, rb_entry); 453f979ad00SDoug Moore 454f979ad00SDoug Moore /* 4558b221ca6SDoug Moore * Walk the big-enough ranges tree until one satisfies alignment 456f979ad00SDoug Moore * requirements, or violates lowaddr address requirement. 457f979ad00SDoug Moore */ 4585b9b55fbSDoug Moore addr = a->common->lowaddr; 459e0e8d0c8SDoug Moore for (curr = first; curr != NULL; 460e0e8d0c8SDoug Moore curr = iommu_gas_next(curr, min_free)) { 461e0e8d0c8SDoug Moore if ((first = RB_LEFT(curr, rb_entry)) != NULL && 462e0e8d0c8SDoug Moore iommu_gas_match_one(a, first->last, curr->start, 463368ee2f8SDoug Moore 0, addr)) { 464368ee2f8SDoug Moore RB_INSERT_PREV(iommu_gas_entries_tree, 465368ee2f8SDoug Moore &domain->rb_root, curr, a->entry); 46630031172SDoug Moore return (0); 467368ee2f8SDoug Moore } 468e0e8d0c8SDoug Moore if (curr->end >= addr) { 4695b9b55fbSDoug Moore /* All remaining ranges > addr */ 470f979ad00SDoug Moore break; 471f979ad00SDoug Moore } 472e0e8d0c8SDoug Moore if ((first = RB_RIGHT(curr, rb_entry)) != NULL && 473e0e8d0c8SDoug Moore iommu_gas_match_one(a, curr->end, first->first, 474368ee2f8SDoug Moore 0, addr)) { 475368ee2f8SDoug Moore RB_INSERT_NEXT(iommu_gas_entries_tree, 476368ee2f8SDoug Moore &domain->rb_root, curr, a->entry); 4773024e8afSRuslan Bukin return (0); 4783024e8afSRuslan Bukin } 479368ee2f8SDoug Moore } 4803024e8afSRuslan Bukin 481b831865fSDoug Moore /* 482e0e8d0c8SDoug Moore * To resume the search at the start of the upper region, first climb to 483e0e8d0c8SDoug Moore * the nearest ancestor that spans highaddr. Then find the last entry 484e0e8d0c8SDoug Moore * before highaddr that could abut a big-enough range. 485b831865fSDoug Moore */ 486e0e8d0c8SDoug Moore addr = a->common->highaddr; 487e0e8d0c8SDoug Moore while (curr != NULL && curr->last < addr) 488e0e8d0c8SDoug Moore curr = RB_PARENT(curr, rb_entry); 489e0e8d0c8SDoug Moore first = NULL; 490e0e8d0c8SDoug Moore while (curr != NULL && curr->free_down >= min_free) { 491e0e8d0c8SDoug Moore if (addr < curr->end) 492e0e8d0c8SDoug Moore curr = RB_LEFT(curr, rb_entry); 493e0e8d0c8SDoug Moore else { 494e0e8d0c8SDoug Moore first = curr; 495e0e8d0c8SDoug Moore curr = RB_RIGHT(curr, rb_entry); 4963024e8afSRuslan Bukin } 4973024e8afSRuslan Bukin } 4983024e8afSRuslan Bukin 499e0e8d0c8SDoug Moore /* 500e0e8d0c8SDoug Moore * Walk the remaining big-enough ranges until one satisfies alignment 501e0e8d0c8SDoug Moore * requirements. 502e0e8d0c8SDoug Moore */ 503e0e8d0c8SDoug Moore for (curr = first; curr != NULL; 504e0e8d0c8SDoug Moore curr = iommu_gas_next(curr, min_free)) { 505e0e8d0c8SDoug Moore if ((first = RB_LEFT(curr, rb_entry)) != NULL && 506e0e8d0c8SDoug Moore iommu_gas_match_one(a, first->last, curr->start, 5075b9b55fbSDoug Moore addr + 1, domain->end - 1)) { 508368ee2f8SDoug Moore RB_INSERT_PREV(iommu_gas_entries_tree, 509368ee2f8SDoug Moore &domain->rb_root, curr, a->entry); 5103024e8afSRuslan Bukin return (0); 511368ee2f8SDoug Moore } 512e0e8d0c8SDoug Moore if ((first = RB_RIGHT(curr, rb_entry)) != NULL && 513e0e8d0c8SDoug Moore iommu_gas_match_one(a, curr->end, first->first, 5145b9b55fbSDoug Moore addr + 1, domain->end - 1)) { 515368ee2f8SDoug Moore RB_INSERT_NEXT(iommu_gas_entries_tree, 516368ee2f8SDoug Moore &domain->rb_root, curr, a->entry); 517e0e8d0c8SDoug Moore return (0); 5183024e8afSRuslan Bukin } 519368ee2f8SDoug Moore } 520e0e8d0c8SDoug Moore 5213024e8afSRuslan Bukin return (ENOMEM); 5223024e8afSRuslan Bukin } 5233024e8afSRuslan Bukin 5243024e8afSRuslan Bukin static int 5253024e8afSRuslan Bukin iommu_gas_alloc_region(struct iommu_domain *domain, struct iommu_map_entry *entry, 5263024e8afSRuslan Bukin u_int flags) 5273024e8afSRuslan Bukin { 5283024e8afSRuslan Bukin struct iommu_map_entry *next, *prev; 5293024e8afSRuslan Bukin 5303024e8afSRuslan Bukin IOMMU_DOMAIN_ASSERT_LOCKED(domain); 5313024e8afSRuslan Bukin 5323024e8afSRuslan Bukin if ((entry->start & IOMMU_PAGE_MASK) != 0 || 5333024e8afSRuslan Bukin (entry->end & IOMMU_PAGE_MASK) != 0) 5343024e8afSRuslan Bukin return (EINVAL); 5353024e8afSRuslan Bukin if (entry->start >= entry->end) 5363024e8afSRuslan Bukin return (EINVAL); 5373024e8afSRuslan Bukin if (entry->end >= domain->end) 5383024e8afSRuslan Bukin return (EINVAL); 5393024e8afSRuslan Bukin 540a59c2529SKonstantin Belousov entry->flags |= IOMMU_MAP_ENTRY_FAKE; 5413024e8afSRuslan Bukin next = RB_NFIND(iommu_gas_entries_tree, &domain->rb_root, entry); 5423024e8afSRuslan Bukin KASSERT(next != NULL, ("next must be non-null %p %jx", domain, 5433024e8afSRuslan Bukin (uintmax_t)entry->start)); 5443024e8afSRuslan Bukin prev = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, next); 5453024e8afSRuslan Bukin /* prev could be NULL */ 546a59c2529SKonstantin Belousov entry->flags &= ~IOMMU_MAP_ENTRY_FAKE; 5473024e8afSRuslan Bukin 5483024e8afSRuslan Bukin /* 5493024e8afSRuslan Bukin * Adapt to broken BIOSes which specify overlapping RMRR 5503024e8afSRuslan Bukin * entries. 5513024e8afSRuslan Bukin * 5523024e8afSRuslan Bukin * XXXKIB: this does not handle a case when prev or next 5533024e8afSRuslan Bukin * entries are completely covered by the current one, which 5543024e8afSRuslan Bukin * extends both ways. 5553024e8afSRuslan Bukin */ 5563024e8afSRuslan Bukin if (prev != NULL && prev->end > entry->start && 5573024e8afSRuslan Bukin (prev->flags & IOMMU_MAP_ENTRY_PLACE) == 0) { 5583024e8afSRuslan Bukin if ((flags & IOMMU_MF_RMRR) == 0 || 5593024e8afSRuslan Bukin (prev->flags & IOMMU_MAP_ENTRY_RMRR) == 0) 5603024e8afSRuslan Bukin return (EBUSY); 5613024e8afSRuslan Bukin entry->start = prev->end; 5623024e8afSRuslan Bukin } 5633024e8afSRuslan Bukin if (next->start < entry->end && 5643024e8afSRuslan Bukin (next->flags & IOMMU_MAP_ENTRY_PLACE) == 0) { 5653024e8afSRuslan Bukin if ((flags & IOMMU_MF_RMRR) == 0 || 5663024e8afSRuslan Bukin (next->flags & IOMMU_MAP_ENTRY_RMRR) == 0) 5673024e8afSRuslan Bukin return (EBUSY); 5683024e8afSRuslan Bukin entry->end = next->start; 5693024e8afSRuslan Bukin } 5703024e8afSRuslan Bukin if (entry->end == entry->start) 5713024e8afSRuslan Bukin return (0); 5723024e8afSRuslan Bukin 5733024e8afSRuslan Bukin if (prev != NULL && prev->end > entry->start) { 5743024e8afSRuslan Bukin /* This assumes that prev is the placeholder entry. */ 5753024e8afSRuslan Bukin iommu_gas_rb_remove(domain, prev); 5763024e8afSRuslan Bukin prev = NULL; 5773024e8afSRuslan Bukin } 578368ee2f8SDoug Moore RB_INSERT_PREV(iommu_gas_entries_tree, 579368ee2f8SDoug Moore &domain->rb_root, next, entry); 5803024e8afSRuslan Bukin if (next->start < entry->end) { 5813024e8afSRuslan Bukin iommu_gas_rb_remove(domain, next); 5823024e8afSRuslan Bukin next = NULL; 5833024e8afSRuslan Bukin } 5843024e8afSRuslan Bukin 5853024e8afSRuslan Bukin if ((flags & IOMMU_MF_RMRR) != 0) 5863024e8afSRuslan Bukin entry->flags = IOMMU_MAP_ENTRY_RMRR; 5873024e8afSRuslan Bukin 5883024e8afSRuslan Bukin #ifdef INVARIANTS 5893024e8afSRuslan Bukin struct iommu_map_entry *ip, *in; 5903024e8afSRuslan Bukin ip = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, entry); 5913024e8afSRuslan Bukin in = RB_NEXT(iommu_gas_entries_tree, &domain->rb_root, entry); 5923024e8afSRuslan Bukin KASSERT(prev == NULL || ip == prev, 5933024e8afSRuslan Bukin ("RMRR %p (%jx %jx) prev %p (%jx %jx) ins prev %p (%jx %jx)", 5943024e8afSRuslan Bukin entry, entry->start, entry->end, prev, 5953024e8afSRuslan Bukin prev == NULL ? 0 : prev->start, prev == NULL ? 0 : prev->end, 5963024e8afSRuslan Bukin ip, ip == NULL ? 0 : ip->start, ip == NULL ? 0 : ip->end)); 5973024e8afSRuslan Bukin KASSERT(next == NULL || in == next, 5983024e8afSRuslan Bukin ("RMRR %p (%jx %jx) next %p (%jx %jx) ins next %p (%jx %jx)", 5993024e8afSRuslan Bukin entry, entry->start, entry->end, next, 6003024e8afSRuslan Bukin next == NULL ? 0 : next->start, next == NULL ? 0 : next->end, 6013024e8afSRuslan Bukin in, in == NULL ? 0 : in->start, in == NULL ? 0 : in->end)); 6023024e8afSRuslan Bukin #endif 6033024e8afSRuslan Bukin 6043024e8afSRuslan Bukin return (0); 6053024e8afSRuslan Bukin } 6063024e8afSRuslan Bukin 6073024e8afSRuslan Bukin void 6084670f908SAlan Cox iommu_gas_free_space(struct iommu_map_entry *entry) 6093024e8afSRuslan Bukin { 6104670f908SAlan Cox struct iommu_domain *domain; 6113024e8afSRuslan Bukin 6124670f908SAlan Cox domain = entry->domain; 6133024e8afSRuslan Bukin KASSERT((entry->flags & (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_RMRR | 6143024e8afSRuslan Bukin IOMMU_MAP_ENTRY_MAP)) == IOMMU_MAP_ENTRY_MAP, 6153024e8afSRuslan Bukin ("permanent entry %p %p", domain, entry)); 6163024e8afSRuslan Bukin 6174670f908SAlan Cox IOMMU_DOMAIN_LOCK(domain); 6183024e8afSRuslan Bukin iommu_gas_rb_remove(domain, entry); 6193024e8afSRuslan Bukin entry->flags &= ~IOMMU_MAP_ENTRY_MAP; 6203024e8afSRuslan Bukin #ifdef INVARIANTS 6213024e8afSRuslan Bukin if (iommu_check_free) 6223024e8afSRuslan Bukin iommu_gas_check_free(domain); 6233024e8afSRuslan Bukin #endif 6244670f908SAlan Cox IOMMU_DOMAIN_UNLOCK(domain); 6253024e8afSRuslan Bukin } 6263024e8afSRuslan Bukin 6273024e8afSRuslan Bukin void 6284670f908SAlan Cox iommu_gas_free_region(struct iommu_map_entry *entry) 6293024e8afSRuslan Bukin { 6304670f908SAlan Cox struct iommu_domain *domain; 6313024e8afSRuslan Bukin 6324670f908SAlan Cox domain = entry->domain; 6333024e8afSRuslan Bukin KASSERT((entry->flags & (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_RMRR | 6343024e8afSRuslan Bukin IOMMU_MAP_ENTRY_MAP)) == IOMMU_MAP_ENTRY_RMRR, 6353024e8afSRuslan Bukin ("non-RMRR entry %p %p", domain, entry)); 6363024e8afSRuslan Bukin 6374670f908SAlan Cox IOMMU_DOMAIN_LOCK(domain); 63887cd087aSDoug Moore if (entry != domain->first_place && 63987cd087aSDoug Moore entry != domain->last_place) 6403024e8afSRuslan Bukin iommu_gas_rb_remove(domain, entry); 6413024e8afSRuslan Bukin entry->flags &= ~IOMMU_MAP_ENTRY_RMRR; 6424670f908SAlan Cox IOMMU_DOMAIN_UNLOCK(domain); 6433024e8afSRuslan Bukin } 6443024e8afSRuslan Bukin 645c9e4d250SKonstantin Belousov static struct iommu_map_entry * 646c9e4d250SKonstantin Belousov iommu_gas_remove_clip_left(struct iommu_domain *domain, iommu_gaddr_t start, 647c9e4d250SKonstantin Belousov iommu_gaddr_t end, struct iommu_map_entry **r) 648c9e4d250SKonstantin Belousov { 649c9e4d250SKonstantin Belousov struct iommu_map_entry *entry, *res, fentry; 650c9e4d250SKonstantin Belousov 651c9e4d250SKonstantin Belousov IOMMU_DOMAIN_ASSERT_LOCKED(domain); 652c9e4d250SKonstantin Belousov MPASS(start <= end); 65387cd087aSDoug Moore MPASS(end <= domain->end); 654c9e4d250SKonstantin Belousov 655c9e4d250SKonstantin Belousov /* 656c9e4d250SKonstantin Belousov * Find an entry which contains the supplied guest's address 657c9e4d250SKonstantin Belousov * start, or the first entry after the start. Since we 658c9e4d250SKonstantin Belousov * asserted that start is below domain end, entry should 659c9e4d250SKonstantin Belousov * exist. Then clip it if needed. 660c9e4d250SKonstantin Belousov */ 661cb1d664bSKonstantin Belousov bzero(&fentry, sizeof(fentry)); 662c9e4d250SKonstantin Belousov fentry.start = start + 1; 663c9e4d250SKonstantin Belousov fentry.end = start + 1; 664a59c2529SKonstantin Belousov fentry.flags = IOMMU_MAP_ENTRY_FAKE; 665c9e4d250SKonstantin Belousov entry = RB_NFIND(iommu_gas_entries_tree, &domain->rb_root, &fentry); 666c9e4d250SKonstantin Belousov 667c9e4d250SKonstantin Belousov if (entry->start >= start || 668c9e4d250SKonstantin Belousov (entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0) 669c9e4d250SKonstantin Belousov return (entry); 670c9e4d250SKonstantin Belousov 671c9e4d250SKonstantin Belousov res = *r; 672c9e4d250SKonstantin Belousov *r = NULL; 673c9e4d250SKonstantin Belousov *res = *entry; 674c9e4d250SKonstantin Belousov res->start = entry->end = start; 675c9e4d250SKonstantin Belousov RB_UPDATE_AUGMENT(entry, rb_entry); 676368ee2f8SDoug Moore RB_INSERT_NEXT(iommu_gas_entries_tree, 677368ee2f8SDoug Moore &domain->rb_root, entry, res); 678c9e4d250SKonstantin Belousov return (res); 679c9e4d250SKonstantin Belousov } 680c9e4d250SKonstantin Belousov 681c9e4d250SKonstantin Belousov static bool 682c9e4d250SKonstantin Belousov iommu_gas_remove_clip_right(struct iommu_domain *domain, 683c9e4d250SKonstantin Belousov iommu_gaddr_t end, struct iommu_map_entry *entry, 684c9e4d250SKonstantin Belousov struct iommu_map_entry *r) 685c9e4d250SKonstantin Belousov { 686c9e4d250SKonstantin Belousov if (entry->start >= end || (entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0) 687c9e4d250SKonstantin Belousov return (false); 688c9e4d250SKonstantin Belousov 689c9e4d250SKonstantin Belousov *r = *entry; 690c9e4d250SKonstantin Belousov r->end = entry->start = end; 691c9e4d250SKonstantin Belousov RB_UPDATE_AUGMENT(entry, rb_entry); 692368ee2f8SDoug Moore RB_INSERT_PREV(iommu_gas_entries_tree, 693368ee2f8SDoug Moore &domain->rb_root, entry, r); 694c9e4d250SKonstantin Belousov return (true); 695c9e4d250SKonstantin Belousov } 696c9e4d250SKonstantin Belousov 697c9e4d250SKonstantin Belousov static void 698c9e4d250SKonstantin Belousov iommu_gas_remove_unmap(struct iommu_domain *domain, 699c9e4d250SKonstantin Belousov struct iommu_map_entry *entry, struct iommu_map_entries_tailq *gcp) 700c9e4d250SKonstantin Belousov { 701c9e4d250SKonstantin Belousov IOMMU_DOMAIN_ASSERT_LOCKED(domain); 702c9e4d250SKonstantin Belousov 703c9e4d250SKonstantin Belousov if ((entry->flags & (IOMMU_MAP_ENTRY_UNMAPPED | 704c9e4d250SKonstantin Belousov IOMMU_MAP_ENTRY_REMOVING)) != 0) 705c9e4d250SKonstantin Belousov return; 706c9e4d250SKonstantin Belousov MPASS((entry->flags & IOMMU_MAP_ENTRY_PLACE) == 0); 707c9e4d250SKonstantin Belousov entry->flags |= IOMMU_MAP_ENTRY_REMOVING; 708c9e4d250SKonstantin Belousov TAILQ_INSERT_TAIL(gcp, entry, dmamap_link); 709c9e4d250SKonstantin Belousov } 710c9e4d250SKonstantin Belousov 711*273b4de3SKonstantin Belousov static void 712*273b4de3SKonstantin Belousov iommu_gas_remove_locked(struct iommu_domain *domain, 713*273b4de3SKonstantin Belousov iommu_gaddr_t start, iommu_gaddr_t size, 714*273b4de3SKonstantin Belousov struct iommu_map_entries_tailq *gc, 715*273b4de3SKonstantin Belousov struct iommu_map_entry **r1, struct iommu_map_entry **r2) 716c9e4d250SKonstantin Belousov { 717*273b4de3SKonstantin Belousov struct iommu_map_entry *entry, *nentry; 718c9e4d250SKonstantin Belousov iommu_gaddr_t end; 719c9e4d250SKonstantin Belousov 720*273b4de3SKonstantin Belousov IOMMU_DOMAIN_ASSERT_LOCKED(domain); 721*273b4de3SKonstantin Belousov 722c9e4d250SKonstantin Belousov end = start + size; 723c9e4d250SKonstantin Belousov 724*273b4de3SKonstantin Belousov nentry = iommu_gas_remove_clip_left(domain, start, end, r1); 725c9e4d250SKonstantin Belousov RB_FOREACH_FROM(entry, iommu_gas_entries_tree, nentry) { 726c9e4d250SKonstantin Belousov if (entry->start >= end) 727c9e4d250SKonstantin Belousov break; 728c9e4d250SKonstantin Belousov KASSERT(start <= entry->start, 729c9e4d250SKonstantin Belousov ("iommu_gas_remove entry (%#jx, %#jx) start %#jx", 730c9e4d250SKonstantin Belousov entry->start, entry->end, start)); 731c9e4d250SKonstantin Belousov if ((entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0) 732c9e4d250SKonstantin Belousov continue; 733*273b4de3SKonstantin Belousov iommu_gas_remove_unmap(domain, entry, gc); 734c9e4d250SKonstantin Belousov } 735*273b4de3SKonstantin Belousov if (iommu_gas_remove_clip_right(domain, end, entry, *r2)) { 736*273b4de3SKonstantin Belousov iommu_gas_remove_unmap(domain, *r2, gc); 737*273b4de3SKonstantin Belousov *r2 = NULL; 738c9e4d250SKonstantin Belousov } 739c9e4d250SKonstantin Belousov 740c9e4d250SKonstantin Belousov #ifdef INVARIANTS 741c9e4d250SKonstantin Belousov RB_FOREACH(entry, iommu_gas_entries_tree, &domain->rb_root) { 742c9e4d250SKonstantin Belousov if ((entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0) 743c9e4d250SKonstantin Belousov continue; 744c9e4d250SKonstantin Belousov KASSERT(entry->end <= start || entry->start >= end, 745c9e4d250SKonstantin Belousov ("iommu_gas_remove leftover entry (%#jx, %#jx) range " 746c9e4d250SKonstantin Belousov "(%#jx, %#jx)", 747c9e4d250SKonstantin Belousov entry->start, entry->end, start, end)); 748c9e4d250SKonstantin Belousov } 749c9e4d250SKonstantin Belousov #endif 750*273b4de3SKonstantin Belousov } 751c9e4d250SKonstantin Belousov 752*273b4de3SKonstantin Belousov static void 753*273b4de3SKonstantin Belousov iommu_gas_remove_init(struct iommu_domain *domain, 754*273b4de3SKonstantin Belousov struct iommu_map_entries_tailq *gc, struct iommu_map_entry **r1, 755*273b4de3SKonstantin Belousov struct iommu_map_entry **r2) 756*273b4de3SKonstantin Belousov { 757*273b4de3SKonstantin Belousov TAILQ_INIT(gc); 758*273b4de3SKonstantin Belousov *r1 = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK); 759*273b4de3SKonstantin Belousov *r2 = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK); 760*273b4de3SKonstantin Belousov } 761*273b4de3SKonstantin Belousov 762*273b4de3SKonstantin Belousov static void 763*273b4de3SKonstantin Belousov iommu_gas_remove_cleanup(struct iommu_domain *domain, 764*273b4de3SKonstantin Belousov struct iommu_map_entries_tailq *gc, struct iommu_map_entry **r1, 765*273b4de3SKonstantin Belousov struct iommu_map_entry **r2) 766*273b4de3SKonstantin Belousov { 767*273b4de3SKonstantin Belousov if (*r1 != NULL) { 768*273b4de3SKonstantin Belousov iommu_gas_free_entry(*r1); 769*273b4de3SKonstantin Belousov *r1 = NULL; 770*273b4de3SKonstantin Belousov } 771*273b4de3SKonstantin Belousov if (*r2 != NULL) { 772*273b4de3SKonstantin Belousov iommu_gas_free_entry(*r2); 773*273b4de3SKonstantin Belousov *r2 = NULL; 774*273b4de3SKonstantin Belousov } 775*273b4de3SKonstantin Belousov iommu_domain_unload(domain, gc, true); 776*273b4de3SKonstantin Belousov } 777*273b4de3SKonstantin Belousov 778*273b4de3SKonstantin Belousov /* 779*273b4de3SKonstantin Belousov * Remove specified range from the GAS of the domain. Note that the 780*273b4de3SKonstantin Belousov * removal is not guaranteed to occur upon the function return, it 781*273b4de3SKonstantin Belousov * might be finalized some time after, when hardware reports that 782*273b4de3SKonstantin Belousov * (queued) IOTLB invalidation was performed. 783*273b4de3SKonstantin Belousov */ 784*273b4de3SKonstantin Belousov void 785*273b4de3SKonstantin Belousov iommu_gas_remove(struct iommu_domain *domain, iommu_gaddr_t start, 786*273b4de3SKonstantin Belousov iommu_gaddr_t size) 787*273b4de3SKonstantin Belousov { 788*273b4de3SKonstantin Belousov struct iommu_map_entry *r1, *r2; 789*273b4de3SKonstantin Belousov struct iommu_map_entries_tailq gc; 790*273b4de3SKonstantin Belousov 791*273b4de3SKonstantin Belousov iommu_gas_remove_init(domain, &gc, &r1, &r2); 792*273b4de3SKonstantin Belousov IOMMU_DOMAIN_LOCK(domain); 793*273b4de3SKonstantin Belousov iommu_gas_remove_locked(domain, start, size, &gc, &r1, &r2); 794c9e4d250SKonstantin Belousov IOMMU_DOMAIN_UNLOCK(domain); 795*273b4de3SKonstantin Belousov iommu_gas_remove_cleanup(domain, &gc, &r1, &r2); 796c9e4d250SKonstantin Belousov } 797c9e4d250SKonstantin Belousov 7983024e8afSRuslan Bukin int 7993024e8afSRuslan Bukin iommu_gas_map(struct iommu_domain *domain, 8003024e8afSRuslan Bukin const struct bus_dma_tag_common *common, iommu_gaddr_t size, int offset, 8013024e8afSRuslan Bukin u_int eflags, u_int flags, vm_page_t *ma, struct iommu_map_entry **res) 8023024e8afSRuslan Bukin { 803e0e8d0c8SDoug Moore struct iommu_gas_match_args a; 8043024e8afSRuslan Bukin struct iommu_map_entry *entry; 8053024e8afSRuslan Bukin int error; 8063024e8afSRuslan Bukin 8073024e8afSRuslan Bukin KASSERT((flags & ~(IOMMU_MF_CANWAIT | IOMMU_MF_CANSPLIT)) == 0, 8083024e8afSRuslan Bukin ("invalid flags 0x%x", flags)); 8093024e8afSRuslan Bukin 810e0e8d0c8SDoug Moore a.size = size; 811e0e8d0c8SDoug Moore a.offset = offset; 812e0e8d0c8SDoug Moore a.common = common; 813e0e8d0c8SDoug Moore a.gas_flags = flags; 8143024e8afSRuslan Bukin entry = iommu_gas_alloc_entry(domain, 81515f6baf4SRuslan Bukin (flags & IOMMU_MF_CANWAIT) != 0 ? IOMMU_PGF_WAITOK : 0); 8163024e8afSRuslan Bukin if (entry == NULL) 8173024e8afSRuslan Bukin return (ENOMEM); 818e0e8d0c8SDoug Moore a.entry = entry; 8193024e8afSRuslan Bukin IOMMU_DOMAIN_LOCK(domain); 8208b221ca6SDoug Moore error = iommu_gas_find_space(domain, &a); 8213024e8afSRuslan Bukin if (error == ENOMEM) { 8223024e8afSRuslan Bukin IOMMU_DOMAIN_UNLOCK(domain); 8234670f908SAlan Cox iommu_gas_free_entry(entry); 8243024e8afSRuslan Bukin return (error); 8253024e8afSRuslan Bukin } 8263024e8afSRuslan Bukin #ifdef INVARIANTS 8273024e8afSRuslan Bukin if (iommu_check_free) 8283024e8afSRuslan Bukin iommu_gas_check_free(domain); 8293024e8afSRuslan Bukin #endif 8303024e8afSRuslan Bukin KASSERT(error == 0, 8313024e8afSRuslan Bukin ("unexpected error %d from iommu_gas_find_entry", error)); 8323024e8afSRuslan Bukin KASSERT(entry->end < domain->end, ("allocated GPA %jx, max GPA %jx", 8333024e8afSRuslan Bukin (uintmax_t)entry->end, (uintmax_t)domain->end)); 8343024e8afSRuslan Bukin entry->flags |= eflags; 8353024e8afSRuslan Bukin IOMMU_DOMAIN_UNLOCK(domain); 8363024e8afSRuslan Bukin 8370eed04c8SRuslan Bukin error = domain->ops->map(domain, entry->start, 8380eed04c8SRuslan Bukin entry->end - entry->start, ma, eflags, 83915f6baf4SRuslan Bukin ((flags & IOMMU_MF_CANWAIT) != 0 ? IOMMU_PGF_WAITOK : 0)); 8403024e8afSRuslan Bukin if (error == ENOMEM) { 8418bc36738SAlan Cox iommu_domain_unload_entry(entry, true, 8428bc36738SAlan Cox (flags & IOMMU_MF_CANWAIT) != 0); 8433024e8afSRuslan Bukin return (error); 8443024e8afSRuslan Bukin } 8453024e8afSRuslan Bukin KASSERT(error == 0, 8463024e8afSRuslan Bukin ("unexpected error %d from domain_map_buf", error)); 8473024e8afSRuslan Bukin 8483024e8afSRuslan Bukin *res = entry; 8493024e8afSRuslan Bukin return (0); 8503024e8afSRuslan Bukin } 8513024e8afSRuslan Bukin 8523024e8afSRuslan Bukin int 8533024e8afSRuslan Bukin iommu_gas_map_region(struct iommu_domain *domain, struct iommu_map_entry *entry, 8543024e8afSRuslan Bukin u_int eflags, u_int flags, vm_page_t *ma) 8553024e8afSRuslan Bukin { 8563024e8afSRuslan Bukin iommu_gaddr_t start; 8573024e8afSRuslan Bukin int error; 8583024e8afSRuslan Bukin 8594670f908SAlan Cox KASSERT(entry->domain == domain, 8604670f908SAlan Cox ("mismatched domain %p entry %p entry->domain %p", domain, 8614670f908SAlan Cox entry, entry->domain)); 8623024e8afSRuslan Bukin KASSERT(entry->flags == 0, ("used RMRR entry %p %p %x", domain, 8633024e8afSRuslan Bukin entry, entry->flags)); 8643024e8afSRuslan Bukin KASSERT((flags & ~(IOMMU_MF_CANWAIT | IOMMU_MF_RMRR)) == 0, 8653024e8afSRuslan Bukin ("invalid flags 0x%x", flags)); 8663024e8afSRuslan Bukin 8673024e8afSRuslan Bukin start = entry->start; 8683024e8afSRuslan Bukin IOMMU_DOMAIN_LOCK(domain); 8693024e8afSRuslan Bukin error = iommu_gas_alloc_region(domain, entry, flags); 8703024e8afSRuslan Bukin if (error != 0) { 8713024e8afSRuslan Bukin IOMMU_DOMAIN_UNLOCK(domain); 8723024e8afSRuslan Bukin return (error); 8733024e8afSRuslan Bukin } 8743024e8afSRuslan Bukin entry->flags |= eflags; 8753024e8afSRuslan Bukin IOMMU_DOMAIN_UNLOCK(domain); 8763024e8afSRuslan Bukin if (entry->end == entry->start) 8773024e8afSRuslan Bukin return (0); 8783024e8afSRuslan Bukin 8790eed04c8SRuslan Bukin error = domain->ops->map(domain, entry->start, 8800eed04c8SRuslan Bukin entry->end - entry->start, ma + OFF_TO_IDX(start - entry->start), 8810eed04c8SRuslan Bukin eflags, ((flags & IOMMU_MF_CANWAIT) != 0 ? IOMMU_PGF_WAITOK : 0)); 8823024e8afSRuslan Bukin if (error == ENOMEM) { 8838bc36738SAlan Cox iommu_domain_unload_entry(entry, false, 8848bc36738SAlan Cox (flags & IOMMU_MF_CANWAIT) != 0); 8853024e8afSRuslan Bukin return (error); 8863024e8afSRuslan Bukin } 8873024e8afSRuslan Bukin KASSERT(error == 0, 8883024e8afSRuslan Bukin ("unexpected error %d from domain_map_buf", error)); 8893024e8afSRuslan Bukin 8903024e8afSRuslan Bukin return (0); 8913024e8afSRuslan Bukin } 8923024e8afSRuslan Bukin 893ee47a12aSRyan Libby static int 894ee47a12aSRyan Libby iommu_gas_reserve_region_locked(struct iommu_domain *domain, 895ee47a12aSRyan Libby iommu_gaddr_t start, iommu_gaddr_t end, struct iommu_map_entry *entry) 896ee47a12aSRyan Libby { 897ee47a12aSRyan Libby int error; 898ee47a12aSRyan Libby 899ee47a12aSRyan Libby IOMMU_DOMAIN_ASSERT_LOCKED(domain); 900ee47a12aSRyan Libby 901ee47a12aSRyan Libby entry->start = start; 902ee47a12aSRyan Libby entry->end = end; 903ee47a12aSRyan Libby error = iommu_gas_alloc_region(domain, entry, IOMMU_MF_CANWAIT); 904ee47a12aSRyan Libby if (error == 0) 905ee47a12aSRyan Libby entry->flags |= IOMMU_MAP_ENTRY_UNMAPPED; 906ee47a12aSRyan Libby return (error); 907ee47a12aSRyan Libby } 908ee47a12aSRyan Libby 9093024e8afSRuslan Bukin int 9103024e8afSRuslan Bukin iommu_gas_reserve_region(struct iommu_domain *domain, iommu_gaddr_t start, 91194dfb28eSRuslan Bukin iommu_gaddr_t end, struct iommu_map_entry **entry0) 9123024e8afSRuslan Bukin { 9133024e8afSRuslan Bukin struct iommu_map_entry *entry; 9143024e8afSRuslan Bukin int error; 9153024e8afSRuslan Bukin 91615f6baf4SRuslan Bukin entry = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK); 9173024e8afSRuslan Bukin IOMMU_DOMAIN_LOCK(domain); 918ee47a12aSRyan Libby error = iommu_gas_reserve_region_locked(domain, start, end, entry); 9193024e8afSRuslan Bukin IOMMU_DOMAIN_UNLOCK(domain); 9203024e8afSRuslan Bukin if (error != 0) 9214670f908SAlan Cox iommu_gas_free_entry(entry); 92294dfb28eSRuslan Bukin else if (entry0 != NULL) 92394dfb28eSRuslan Bukin *entry0 = entry; 9243024e8afSRuslan Bukin return (error); 9253024e8afSRuslan Bukin } 9263024e8afSRuslan Bukin 927ee47a12aSRyan Libby /* 928ee47a12aSRyan Libby * As in iommu_gas_reserve_region, reserve [start, end), but allow for existing 929ee47a12aSRyan Libby * entries. 930ee47a12aSRyan Libby */ 931ee47a12aSRyan Libby int 932ee47a12aSRyan Libby iommu_gas_reserve_region_extend(struct iommu_domain *domain, 933ee47a12aSRyan Libby iommu_gaddr_t start, iommu_gaddr_t end) 934ee47a12aSRyan Libby { 935ee47a12aSRyan Libby struct iommu_map_entry *entry, *next, *prev, key = {}; 936ee47a12aSRyan Libby iommu_gaddr_t entry_start, entry_end; 937ee47a12aSRyan Libby int error; 938ee47a12aSRyan Libby 939ee47a12aSRyan Libby error = 0; 940ee47a12aSRyan Libby entry = NULL; 941ee47a12aSRyan Libby end = ummin(end, domain->end); 942ee47a12aSRyan Libby while (start < end) { 943ee47a12aSRyan Libby /* Preallocate an entry. */ 944ee47a12aSRyan Libby if (entry == NULL) 945ee47a12aSRyan Libby entry = iommu_gas_alloc_entry(domain, 946ee47a12aSRyan Libby IOMMU_PGF_WAITOK); 947ee47a12aSRyan Libby /* Calculate the free region from here to the next entry. */ 948ee47a12aSRyan Libby key.start = key.end = start; 949ee47a12aSRyan Libby IOMMU_DOMAIN_LOCK(domain); 950ee47a12aSRyan Libby next = RB_NFIND(iommu_gas_entries_tree, &domain->rb_root, &key); 951ee47a12aSRyan Libby KASSERT(next != NULL, ("domain %p with end %#jx has no entry " 952ee47a12aSRyan Libby "after %#jx", domain, (uintmax_t)domain->end, 953ee47a12aSRyan Libby (uintmax_t)start)); 954ee47a12aSRyan Libby entry_end = ummin(end, next->start); 955ee47a12aSRyan Libby prev = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, next); 956ee47a12aSRyan Libby if (prev != NULL) 957ee47a12aSRyan Libby entry_start = ummax(start, prev->end); 958ee47a12aSRyan Libby else 959ee47a12aSRyan Libby entry_start = start; 960ee47a12aSRyan Libby start = next->end; 961ee47a12aSRyan Libby /* Reserve the region if non-empty. */ 962ee47a12aSRyan Libby if (entry_start != entry_end) { 963ee47a12aSRyan Libby error = iommu_gas_reserve_region_locked(domain, 964ee47a12aSRyan Libby entry_start, entry_end, entry); 9650ba1d860SAlan Cox if (error != 0) { 9660ba1d860SAlan Cox IOMMU_DOMAIN_UNLOCK(domain); 967ee47a12aSRyan Libby break; 9680ba1d860SAlan Cox } 969ee47a12aSRyan Libby entry = NULL; 970ee47a12aSRyan Libby } 971ee47a12aSRyan Libby IOMMU_DOMAIN_UNLOCK(domain); 972ee47a12aSRyan Libby } 973ee47a12aSRyan Libby /* Release a preallocated entry if it was not used. */ 974ee47a12aSRyan Libby if (entry != NULL) 9754670f908SAlan Cox iommu_gas_free_entry(entry); 976ee47a12aSRyan Libby return (error); 977ee47a12aSRyan Libby } 978ee47a12aSRyan Libby 979f32f0095SRuslan Bukin void 980f32f0095SRuslan Bukin iommu_unmap_msi(struct iommu_ctx *ctx) 981f32f0095SRuslan Bukin { 982f32f0095SRuslan Bukin struct iommu_map_entry *entry; 983f32f0095SRuslan Bukin struct iommu_domain *domain; 984f32f0095SRuslan Bukin 985f32f0095SRuslan Bukin domain = ctx->domain; 986f32f0095SRuslan Bukin entry = domain->msi_entry; 987f32f0095SRuslan Bukin if (entry == NULL) 988f32f0095SRuslan Bukin return; 989f32f0095SRuslan Bukin 990f32f0095SRuslan Bukin domain->ops->unmap(domain, entry->start, entry->end - 991f32f0095SRuslan Bukin entry->start, IOMMU_PGF_WAITOK); 992f32f0095SRuslan Bukin 9934670f908SAlan Cox iommu_gas_free_space(entry); 994f32f0095SRuslan Bukin 9954670f908SAlan Cox iommu_gas_free_entry(entry); 996f32f0095SRuslan Bukin 997f32f0095SRuslan Bukin domain->msi_entry = NULL; 998f32f0095SRuslan Bukin domain->msi_base = 0; 999f32f0095SRuslan Bukin domain->msi_phys = 0; 1000f32f0095SRuslan Bukin } 1001f32f0095SRuslan Bukin 10023024e8afSRuslan Bukin int 1003e707c8beSRuslan Bukin iommu_map_msi(struct iommu_ctx *ctx, iommu_gaddr_t size, int offset, 1004e707c8beSRuslan Bukin u_int eflags, u_int flags, vm_page_t *ma) 1005e707c8beSRuslan Bukin { 1006e707c8beSRuslan Bukin struct iommu_domain *domain; 1007e707c8beSRuslan Bukin struct iommu_map_entry *entry; 1008e707c8beSRuslan Bukin int error; 1009e707c8beSRuslan Bukin 1010e707c8beSRuslan Bukin error = 0; 1011e707c8beSRuslan Bukin domain = ctx->domain; 1012e707c8beSRuslan Bukin 1013e707c8beSRuslan Bukin /* Check if there is already an MSI page allocated */ 1014e707c8beSRuslan Bukin IOMMU_DOMAIN_LOCK(domain); 1015e707c8beSRuslan Bukin entry = domain->msi_entry; 1016e707c8beSRuslan Bukin IOMMU_DOMAIN_UNLOCK(domain); 1017e707c8beSRuslan Bukin 1018e707c8beSRuslan Bukin if (entry == NULL) { 1019e707c8beSRuslan Bukin error = iommu_gas_map(domain, &ctx->tag->common, size, offset, 1020e707c8beSRuslan Bukin eflags, flags, ma, &entry); 1021e707c8beSRuslan Bukin IOMMU_DOMAIN_LOCK(domain); 1022e707c8beSRuslan Bukin if (error == 0) { 1023e707c8beSRuslan Bukin if (domain->msi_entry == NULL) { 1024e707c8beSRuslan Bukin MPASS(domain->msi_base == 0); 1025e707c8beSRuslan Bukin MPASS(domain->msi_phys == 0); 1026e707c8beSRuslan Bukin 1027e707c8beSRuslan Bukin domain->msi_entry = entry; 1028e707c8beSRuslan Bukin domain->msi_base = entry->start; 1029e707c8beSRuslan Bukin domain->msi_phys = VM_PAGE_TO_PHYS(ma[0]); 1030e707c8beSRuslan Bukin } else { 1031e707c8beSRuslan Bukin /* 1032e707c8beSRuslan Bukin * We lost the race and already have an 1033e707c8beSRuslan Bukin * MSI page allocated. Free the unneeded entry. 1034e707c8beSRuslan Bukin */ 10354670f908SAlan Cox iommu_gas_free_entry(entry); 1036e707c8beSRuslan Bukin } 1037e707c8beSRuslan Bukin } else if (domain->msi_entry != NULL) { 1038e707c8beSRuslan Bukin /* 1039e707c8beSRuslan Bukin * The allocation failed, but another succeeded. 1040e707c8beSRuslan Bukin * Return success as there is a valid MSI page. 1041e707c8beSRuslan Bukin */ 1042e707c8beSRuslan Bukin error = 0; 1043e707c8beSRuslan Bukin } 1044e707c8beSRuslan Bukin IOMMU_DOMAIN_UNLOCK(domain); 1045e707c8beSRuslan Bukin } 1046e707c8beSRuslan Bukin 1047e707c8beSRuslan Bukin return (error); 1048e707c8beSRuslan Bukin } 1049e707c8beSRuslan Bukin 1050e707c8beSRuslan Bukin void 1051e707c8beSRuslan Bukin iommu_translate_msi(struct iommu_domain *domain, uint64_t *addr) 1052e707c8beSRuslan Bukin { 1053e707c8beSRuslan Bukin 1054e707c8beSRuslan Bukin *addr = (*addr - domain->msi_phys) + domain->msi_base; 1055e707c8beSRuslan Bukin 1056e707c8beSRuslan Bukin KASSERT(*addr >= domain->msi_entry->start, 1057e707c8beSRuslan Bukin ("%s: Address is below the MSI entry start address (%jx < %jx)", 1058e707c8beSRuslan Bukin __func__, (uintmax_t)*addr, (uintmax_t)domain->msi_entry->start)); 1059e707c8beSRuslan Bukin 1060e707c8beSRuslan Bukin KASSERT(*addr + sizeof(*addr) <= domain->msi_entry->end, 1061e707c8beSRuslan Bukin ("%s: Address is above the MSI entry end address (%jx < %jx)", 1062e707c8beSRuslan Bukin __func__, (uintmax_t)*addr, (uintmax_t)domain->msi_entry->end)); 1063e707c8beSRuslan Bukin } 1064e707c8beSRuslan Bukin 1065357149f0SRuslan Bukin SYSCTL_NODE(_hw, OID_AUTO, iommu, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, ""); 1066357149f0SRuslan Bukin 10679c843a40SRuslan Bukin #ifdef INVARIANTS 10689c843a40SRuslan Bukin SYSCTL_INT(_hw_iommu, OID_AUTO, check_free, CTLFLAG_RWTUN, 10699c843a40SRuslan Bukin &iommu_check_free, 0, 10709c843a40SRuslan Bukin "Check the GPA RBtree for free_down and free_after validity"); 10719c843a40SRuslan Bukin #endif 107230ce85caSKonstantin Belousov 107330ce85caSKonstantin Belousov #include "opt_ddb.h" 107430ce85caSKonstantin Belousov #ifdef DDB 107530ce85caSKonstantin Belousov 107630ce85caSKonstantin Belousov #include <ddb/ddb.h> 107730ce85caSKonstantin Belousov 107830ce85caSKonstantin Belousov static void 107930ce85caSKonstantin Belousov iommu_debug_dump_gas(struct iommu_domain *domain) 108030ce85caSKonstantin Belousov { 108130ce85caSKonstantin Belousov struct iommu_map_entry *entry; 108230ce85caSKonstantin Belousov 108330ce85caSKonstantin Belousov db_printf("iommu_domain %p tree %p iommu %p fl %#x\n", domain, 108430ce85caSKonstantin Belousov &domain->rb_root, domain->iommu, domain->flags); 108530ce85caSKonstantin Belousov db_printf("iommu_domain %p tree %p\n", domain, &domain->rb_root); 108630ce85caSKonstantin Belousov RB_FOREACH(entry, iommu_gas_entries_tree, &domain->rb_root) { 108730ce85caSKonstantin Belousov db_printf( 108830ce85caSKonstantin Belousov " e %p [%#jx %#jx] fl %#x first %#jx last %#jx free_down %#jx", 108930ce85caSKonstantin Belousov entry, (uintmax_t)entry->start, (uintmax_t)entry->end, 109030ce85caSKonstantin Belousov entry->flags, 109130ce85caSKonstantin Belousov (uintmax_t)entry->first, (uintmax_t)entry->last, 109230ce85caSKonstantin Belousov (uintmax_t)entry->free_down); 109330ce85caSKonstantin Belousov if (entry == domain->start_gap) 109430ce85caSKonstantin Belousov db_printf(" start_gap"); 109530ce85caSKonstantin Belousov if (entry == domain->first_place) 109630ce85caSKonstantin Belousov db_printf(" first_place"); 109730ce85caSKonstantin Belousov if (entry == domain->last_place) 109830ce85caSKonstantin Belousov db_printf(" last_place"); 109930ce85caSKonstantin Belousov db_printf("\n"); 110030ce85caSKonstantin Belousov } 110130ce85caSKonstantin Belousov } 110230ce85caSKonstantin Belousov 110330ce85caSKonstantin Belousov DB_SHOW_COMMAND(iommu_domain, iommu_domain_show) 110430ce85caSKonstantin Belousov { 110530ce85caSKonstantin Belousov struct iommu_domain *domain; 110630ce85caSKonstantin Belousov 110730ce85caSKonstantin Belousov if (!have_addr) { 110830ce85caSKonstantin Belousov db_printf("show iommu_domain addr\n"); 110930ce85caSKonstantin Belousov return; 111030ce85caSKonstantin Belousov } 111130ce85caSKonstantin Belousov 111230ce85caSKonstantin Belousov domain = (void *)addr; 111330ce85caSKonstantin Belousov iommu_debug_dump_gas(domain); 111430ce85caSKonstantin Belousov } 111530ce85caSKonstantin Belousov 111630ce85caSKonstantin Belousov #endif 1117