13024e8afSRuslan Bukin /*- 24d846d26SWarner Losh * SPDX-License-Identifier: BSD-2-Clause 33024e8afSRuslan Bukin * 43024e8afSRuslan Bukin * Copyright (c) 2013 The FreeBSD Foundation 53024e8afSRuslan Bukin * 63024e8afSRuslan Bukin * This software was developed by Konstantin Belousov <kib@FreeBSD.org> 73024e8afSRuslan Bukin * under sponsorship from the FreeBSD Foundation. 83024e8afSRuslan Bukin * 93024e8afSRuslan Bukin * Redistribution and use in source and binary forms, with or without 103024e8afSRuslan Bukin * modification, are permitted provided that the following conditions 113024e8afSRuslan Bukin * are met: 123024e8afSRuslan Bukin * 1. Redistributions of source code must retain the above copyright 133024e8afSRuslan Bukin * notice, this list of conditions and the following disclaimer. 143024e8afSRuslan Bukin * 2. Redistributions in binary form must reproduce the above copyright 153024e8afSRuslan Bukin * notice, this list of conditions and the following disclaimer in the 163024e8afSRuslan Bukin * documentation and/or other materials provided with the distribution. 173024e8afSRuslan Bukin * 183024e8afSRuslan Bukin * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 193024e8afSRuslan Bukin * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 203024e8afSRuslan Bukin * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 213024e8afSRuslan Bukin * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 223024e8afSRuslan Bukin * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 233024e8afSRuslan Bukin * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 243024e8afSRuslan Bukin * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 253024e8afSRuslan Bukin * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 263024e8afSRuslan Bukin * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 273024e8afSRuslan Bukin * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 283024e8afSRuslan Bukin * SUCH DAMAGE. 293024e8afSRuslan Bukin */ 303024e8afSRuslan Bukin 31b16f993eSDoug Moore #define RB_AUGMENT_CHECK(entry) iommu_gas_augment_entry(entry) 323024e8afSRuslan Bukin 333024e8afSRuslan Bukin #include <sys/param.h> 343024e8afSRuslan Bukin #include <sys/systm.h> 353024e8afSRuslan Bukin #include <sys/malloc.h> 363024e8afSRuslan Bukin #include <sys/bus.h> 373024e8afSRuslan Bukin #include <sys/interrupt.h> 383024e8afSRuslan Bukin #include <sys/kernel.h> 393024e8afSRuslan Bukin #include <sys/ktr.h> 403024e8afSRuslan Bukin #include <sys/lock.h> 413024e8afSRuslan Bukin #include <sys/proc.h> 423024e8afSRuslan Bukin #include <sys/rwlock.h> 433024e8afSRuslan Bukin #include <sys/memdesc.h> 443024e8afSRuslan Bukin #include <sys/mutex.h> 453024e8afSRuslan Bukin #include <sys/sysctl.h> 463024e8afSRuslan Bukin #include <sys/rman.h> 473024e8afSRuslan Bukin #include <sys/taskqueue.h> 483024e8afSRuslan Bukin #include <sys/tree.h> 493024e8afSRuslan Bukin #include <sys/uio.h> 503024e8afSRuslan Bukin #include <sys/vmem.h> 513024e8afSRuslan Bukin #include <vm/vm.h> 523024e8afSRuslan Bukin #include <vm/vm_extern.h> 533024e8afSRuslan Bukin #include <vm/vm_kern.h> 543024e8afSRuslan Bukin #include <vm/vm_object.h> 553024e8afSRuslan Bukin #include <vm/vm_page.h> 563024e8afSRuslan Bukin #include <vm/vm_map.h> 573024e8afSRuslan Bukin #include <vm/uma.h> 58c8597a1fSRuslan Bukin #include <dev/pci/pcireg.h> 59c8597a1fSRuslan Bukin #include <dev/pci/pcivar.h> 60c8597a1fSRuslan Bukin #include <dev/iommu/iommu.h> 61f23f7d3aSRuslan Bukin #include <dev/iommu/iommu_gas.h> 62e707c8beSRuslan Bukin #include <dev/iommu/iommu_msi.h> 633024e8afSRuslan Bukin #include <machine/atomic.h> 643024e8afSRuslan Bukin #include <machine/bus.h> 653024e8afSRuslan Bukin #include <machine/md_var.h> 66c4cd6990SRuslan Bukin #include <machine/iommu.h> 67c8597a1fSRuslan Bukin #include <dev/iommu/busdma_iommu.h> 683024e8afSRuslan Bukin 693024e8afSRuslan Bukin /* 703024e8afSRuslan Bukin * Guest Address Space management. 713024e8afSRuslan Bukin */ 723024e8afSRuslan Bukin 733024e8afSRuslan Bukin static uma_zone_t iommu_map_entry_zone; 743024e8afSRuslan Bukin 759c843a40SRuslan Bukin #ifdef INVARIANTS 769c843a40SRuslan Bukin static int iommu_check_free; 779c843a40SRuslan Bukin #endif 789c843a40SRuslan Bukin 793024e8afSRuslan Bukin static void 803024e8afSRuslan Bukin intel_gas_init(void) 813024e8afSRuslan Bukin { 823024e8afSRuslan Bukin 833024e8afSRuslan Bukin iommu_map_entry_zone = uma_zcreate("IOMMU_MAP_ENTRY", 843024e8afSRuslan Bukin sizeof(struct iommu_map_entry), NULL, NULL, 853024e8afSRuslan Bukin NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NODUMP); 863024e8afSRuslan Bukin } 873024e8afSRuslan Bukin SYSINIT(intel_gas, SI_SUB_DRIVERS, SI_ORDER_FIRST, intel_gas_init, NULL); 883024e8afSRuslan Bukin 893024e8afSRuslan Bukin struct iommu_map_entry * 903024e8afSRuslan Bukin iommu_gas_alloc_entry(struct iommu_domain *domain, u_int flags) 913024e8afSRuslan Bukin { 923024e8afSRuslan Bukin struct iommu_map_entry *res; 933024e8afSRuslan Bukin 9415f6baf4SRuslan Bukin KASSERT((flags & ~(IOMMU_PGF_WAITOK)) == 0, 953024e8afSRuslan Bukin ("unsupported flags %x", flags)); 963024e8afSRuslan Bukin 9715f6baf4SRuslan Bukin res = uma_zalloc(iommu_map_entry_zone, ((flags & IOMMU_PGF_WAITOK) != 983024e8afSRuslan Bukin 0 ? M_WAITOK : M_NOWAIT) | M_ZERO); 9942736dc4SAlan Cox if (res != NULL && domain != NULL) { 1003024e8afSRuslan Bukin res->domain = domain; 1013024e8afSRuslan Bukin atomic_add_int(&domain->entries_cnt, 1); 1023024e8afSRuslan Bukin } 1033024e8afSRuslan Bukin return (res); 1043024e8afSRuslan Bukin } 1053024e8afSRuslan Bukin 1063024e8afSRuslan Bukin void 1074670f908SAlan Cox iommu_gas_free_entry(struct iommu_map_entry *entry) 1083024e8afSRuslan Bukin { 1094670f908SAlan Cox struct iommu_domain *domain; 1103024e8afSRuslan Bukin 1114670f908SAlan Cox domain = entry->domain; 11242736dc4SAlan Cox if (domain != NULL) 1133024e8afSRuslan Bukin atomic_subtract_int(&domain->entries_cnt, 1); 1143024e8afSRuslan Bukin uma_zfree(iommu_map_entry_zone, entry); 1153024e8afSRuslan Bukin } 1163024e8afSRuslan Bukin 1173024e8afSRuslan Bukin static int 1183024e8afSRuslan Bukin iommu_gas_cmp_entries(struct iommu_map_entry *a, struct iommu_map_entry *b) 1193024e8afSRuslan Bukin { 1203024e8afSRuslan Bukin 1213024e8afSRuslan Bukin /* Last entry have zero size, so <= */ 1223024e8afSRuslan Bukin KASSERT(a->start <= a->end, ("inverted entry %p (%jx, %jx)", 1233024e8afSRuslan Bukin a, (uintmax_t)a->start, (uintmax_t)a->end)); 1243024e8afSRuslan Bukin KASSERT(b->start <= b->end, ("inverted entry %p (%jx, %jx)", 1253024e8afSRuslan Bukin b, (uintmax_t)b->start, (uintmax_t)b->end)); 1263024e8afSRuslan Bukin KASSERT(a->end <= b->start || b->end <= a->start || 1273024e8afSRuslan Bukin a->end == a->start || b->end == b->start, 128*733da1ebSKonstantin Belousov ("overlapping entries %p (%jx, %jx) f %#x %p (%jx, %jx) f %#x" 129*733da1ebSKonstantin Belousov " domain %p %p", 130*733da1ebSKonstantin Belousov a, (uintmax_t)a->start, (uintmax_t)a->end, a->flags, 131*733da1ebSKonstantin Belousov b, (uintmax_t)b->start, (uintmax_t)b->end, b->flags, 132*733da1ebSKonstantin Belousov a->domain, b->domain)); 1333024e8afSRuslan Bukin 1343024e8afSRuslan Bukin if (a->end < b->end) 1353024e8afSRuslan Bukin return (-1); 1363024e8afSRuslan Bukin else if (b->end < a->end) 1373024e8afSRuslan Bukin return (1); 1383024e8afSRuslan Bukin return (0); 1393024e8afSRuslan Bukin } 1403024e8afSRuslan Bukin 141b16f993eSDoug Moore /* 142b16f993eSDoug Moore * Update augmentation data based on data from children. 143b16f993eSDoug Moore * Return true if and only if the update changes the augmentation data. 144b16f993eSDoug Moore */ 145b16f993eSDoug Moore static bool 1463024e8afSRuslan Bukin iommu_gas_augment_entry(struct iommu_map_entry *entry) 1473024e8afSRuslan Bukin { 1483024e8afSRuslan Bukin struct iommu_map_entry *child; 149b16f993eSDoug Moore iommu_gaddr_t bound, delta, free_down; 1503024e8afSRuslan Bukin 1513024e8afSRuslan Bukin free_down = 0; 152b16f993eSDoug Moore bound = entry->start; 1533024e8afSRuslan Bukin if ((child = RB_LEFT(entry, rb_entry)) != NULL) { 154b16f993eSDoug Moore free_down = MAX(child->free_down, bound - child->last); 155b16f993eSDoug Moore bound = child->first; 156b16f993eSDoug Moore } 157b16f993eSDoug Moore delta = bound - entry->first; 158b16f993eSDoug Moore entry->first = bound; 159b16f993eSDoug Moore bound = entry->end; 1603024e8afSRuslan Bukin if ((child = RB_RIGHT(entry, rb_entry)) != NULL) { 1613024e8afSRuslan Bukin free_down = MAX(free_down, child->free_down); 162b16f993eSDoug Moore free_down = MAX(free_down, child->first - bound); 163b16f993eSDoug Moore bound = child->last; 164b16f993eSDoug Moore } 165b16f993eSDoug Moore delta += entry->last - bound; 166b16f993eSDoug Moore if (delta == 0) 167b16f993eSDoug Moore delta = entry->free_down - free_down; 168b16f993eSDoug Moore entry->last = bound; 1693024e8afSRuslan Bukin entry->free_down = free_down; 170b16f993eSDoug Moore 171b16f993eSDoug Moore /* 172b16f993eSDoug Moore * Return true either if the value of last-first changed, 173b16f993eSDoug Moore * or if free_down changed. 174b16f993eSDoug Moore */ 175b16f993eSDoug Moore return (delta != 0); 1763024e8afSRuslan Bukin } 1773024e8afSRuslan Bukin 1783024e8afSRuslan Bukin RB_GENERATE(iommu_gas_entries_tree, iommu_map_entry, rb_entry, 1793024e8afSRuslan Bukin iommu_gas_cmp_entries); 1803024e8afSRuslan Bukin 1813024e8afSRuslan Bukin #ifdef INVARIANTS 1823024e8afSRuslan Bukin static void 1833024e8afSRuslan Bukin iommu_gas_check_free(struct iommu_domain *domain) 1843024e8afSRuslan Bukin { 1853024e8afSRuslan Bukin struct iommu_map_entry *entry, *l, *r; 1863024e8afSRuslan Bukin iommu_gaddr_t v; 1873024e8afSRuslan Bukin 1883024e8afSRuslan Bukin RB_FOREACH(entry, iommu_gas_entries_tree, &domain->rb_root) { 189b64dca2bSRuslan Bukin KASSERT(domain == entry->domain, 1903024e8afSRuslan Bukin ("mismatched free domain %p entry %p entry->domain %p", 1913024e8afSRuslan Bukin domain, entry, entry->domain)); 1923024e8afSRuslan Bukin l = RB_LEFT(entry, rb_entry); 1933024e8afSRuslan Bukin r = RB_RIGHT(entry, rb_entry); 1943024e8afSRuslan Bukin v = 0; 1953024e8afSRuslan Bukin if (l != NULL) { 1963024e8afSRuslan Bukin v = MAX(v, l->free_down); 1973024e8afSRuslan Bukin v = MAX(v, entry->start - l->last); 1983024e8afSRuslan Bukin } 1993024e8afSRuslan Bukin if (r != NULL) { 2003024e8afSRuslan Bukin v = MAX(v, r->free_down); 2013024e8afSRuslan Bukin v = MAX(v, r->first - entry->end); 2023024e8afSRuslan Bukin } 2033024e8afSRuslan Bukin MPASS(entry->free_down == v); 2043024e8afSRuslan Bukin } 2053024e8afSRuslan Bukin } 2063024e8afSRuslan Bukin #endif 2073024e8afSRuslan Bukin 2083024e8afSRuslan Bukin static void 2093024e8afSRuslan Bukin iommu_gas_rb_remove(struct iommu_domain *domain, struct iommu_map_entry *entry) 2103024e8afSRuslan Bukin { 2118b221ca6SDoug Moore struct iommu_map_entry *nbr; 2123024e8afSRuslan Bukin 2138b221ca6SDoug Moore /* Removing entry may open a new free gap before domain->start_gap. */ 2148b221ca6SDoug Moore if (entry->end <= domain->start_gap->end) { 2158b221ca6SDoug Moore if (RB_RIGHT(entry, rb_entry) != NULL) 2168b221ca6SDoug Moore nbr = iommu_gas_entries_tree_RB_NEXT(entry); 2178b221ca6SDoug Moore else if (RB_LEFT(entry, rb_entry) != NULL) 2188b221ca6SDoug Moore nbr = RB_LEFT(entry, rb_entry); 2198b221ca6SDoug Moore else 2208b221ca6SDoug Moore nbr = RB_PARENT(entry, rb_entry); 2218b221ca6SDoug Moore domain->start_gap = nbr; 2228b221ca6SDoug Moore } 2233024e8afSRuslan Bukin RB_REMOVE(iommu_gas_entries_tree, &domain->rb_root, entry); 2243024e8afSRuslan Bukin } 2253024e8afSRuslan Bukin 226f5cafae1SRuslan Bukin struct iommu_domain * 227f5cafae1SRuslan Bukin iommu_get_ctx_domain(struct iommu_ctx *ctx) 228f5cafae1SRuslan Bukin { 229f5cafae1SRuslan Bukin 230f5cafae1SRuslan Bukin return (ctx->domain); 231f5cafae1SRuslan Bukin } 232f5cafae1SRuslan Bukin 2333024e8afSRuslan Bukin void 2343024e8afSRuslan Bukin iommu_gas_init_domain(struct iommu_domain *domain) 2353024e8afSRuslan Bukin { 2363024e8afSRuslan Bukin struct iommu_map_entry *begin, *end; 2373024e8afSRuslan Bukin 23815f6baf4SRuslan Bukin begin = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK); 23915f6baf4SRuslan Bukin end = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK); 2403024e8afSRuslan Bukin 2413024e8afSRuslan Bukin IOMMU_DOMAIN_LOCK(domain); 2423024e8afSRuslan Bukin KASSERT(domain->entries_cnt == 2, ("dirty domain %p", domain)); 2433024e8afSRuslan Bukin KASSERT(RB_EMPTY(&domain->rb_root), 2443024e8afSRuslan Bukin ("non-empty entries %p", domain)); 2453024e8afSRuslan Bukin 246b16f993eSDoug Moore /* 247b16f993eSDoug Moore * The end entry must be inserted first because it has a zero-length gap 248b16f993eSDoug Moore * between start and end. Initially, all augmentation data for a new 249b16f993eSDoug Moore * entry is zero. Function iommu_gas_augment_entry will compute no 250b16f993eSDoug Moore * change in the value of (start-end) and no change in the value of 251b16f993eSDoug Moore * free_down, so it will return false to suggest that nothing changed in 252b16f993eSDoug Moore * the entry. Thus, inserting the end entry second prevents 253b16f993eSDoug Moore * augmentation information to be propogated to the begin entry at the 254b16f993eSDoug Moore * tree root. So it is inserted first. 255b16f993eSDoug Moore */ 2563024e8afSRuslan Bukin end->start = domain->end; 2573024e8afSRuslan Bukin end->end = domain->end; 2583024e8afSRuslan Bukin end->flags = IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED; 259368ee2f8SDoug Moore RB_INSERT(iommu_gas_entries_tree, &domain->rb_root, end); 2603024e8afSRuslan Bukin 261b16f993eSDoug Moore begin->start = 0; 262b16f993eSDoug Moore begin->end = IOMMU_PAGE_SIZE; 263b16f993eSDoug Moore begin->flags = IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED; 264368ee2f8SDoug Moore RB_INSERT_PREV(iommu_gas_entries_tree, &domain->rb_root, end, begin); 265b16f993eSDoug Moore 26687d405eaSDoug Moore domain->start_gap = begin; 2673024e8afSRuslan Bukin domain->first_place = begin; 2683024e8afSRuslan Bukin domain->last_place = end; 26915f6baf4SRuslan Bukin domain->flags |= IOMMU_DOMAIN_GAS_INITED; 2703024e8afSRuslan Bukin IOMMU_DOMAIN_UNLOCK(domain); 2713024e8afSRuslan Bukin } 2723024e8afSRuslan Bukin 2733024e8afSRuslan Bukin void 2743024e8afSRuslan Bukin iommu_gas_fini_domain(struct iommu_domain *domain) 2753024e8afSRuslan Bukin { 276a2c57c60SDoug Moore struct iommu_map_entry *entry; 2773024e8afSRuslan Bukin 2783024e8afSRuslan Bukin IOMMU_DOMAIN_ASSERT_LOCKED(domain); 2793024e8afSRuslan Bukin KASSERT(domain->entries_cnt == 2, 2803024e8afSRuslan Bukin ("domain still in use %p", domain)); 2813024e8afSRuslan Bukin 2823024e8afSRuslan Bukin entry = RB_MIN(iommu_gas_entries_tree, &domain->rb_root); 2833024e8afSRuslan Bukin KASSERT(entry->start == 0, ("start entry start %p", domain)); 2843024e8afSRuslan Bukin KASSERT(entry->end == IOMMU_PAGE_SIZE, ("start entry end %p", domain)); 285dea8594fSRuslan Bukin KASSERT(entry->flags == 286dea8594fSRuslan Bukin (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED), 2873024e8afSRuslan Bukin ("start entry flags %p", domain)); 288368ee2f8SDoug Moore iommu_gas_rb_remove(domain, entry); 2894670f908SAlan Cox iommu_gas_free_entry(entry); 2903024e8afSRuslan Bukin 2913024e8afSRuslan Bukin entry = RB_MAX(iommu_gas_entries_tree, &domain->rb_root); 2923024e8afSRuslan Bukin KASSERT(entry->start == domain->end, ("end entry start %p", domain)); 2933024e8afSRuslan Bukin KASSERT(entry->end == domain->end, ("end entry end %p", domain)); 294dea8594fSRuslan Bukin KASSERT(entry->flags == 295dea8594fSRuslan Bukin (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED), 2963024e8afSRuslan Bukin ("end entry flags %p", domain)); 297368ee2f8SDoug Moore iommu_gas_rb_remove(domain, entry); 2984670f908SAlan Cox iommu_gas_free_entry(entry); 2993024e8afSRuslan Bukin } 3003024e8afSRuslan Bukin 3013024e8afSRuslan Bukin struct iommu_gas_match_args { 3023024e8afSRuslan Bukin iommu_gaddr_t size; 3033024e8afSRuslan Bukin int offset; 3043024e8afSRuslan Bukin const struct bus_dma_tag_common *common; 3053024e8afSRuslan Bukin u_int gas_flags; 3063024e8afSRuslan Bukin struct iommu_map_entry *entry; 3073024e8afSRuslan Bukin }; 3083024e8afSRuslan Bukin 3093024e8afSRuslan Bukin /* 3103024e8afSRuslan Bukin * The interval [beg, end) is a free interval between two iommu_map_entries. 3115b9b55fbSDoug Moore * Addresses can be allocated only in the range [lbound, ubound]. Try to 312e0e8d0c8SDoug Moore * allocate space in the free interval, subject to the conditions expressed by 313e0e8d0c8SDoug Moore * a, and return 'true' if and only if the allocation attempt succeeds. 3143024e8afSRuslan Bukin */ 3153024e8afSRuslan Bukin static bool 3163024e8afSRuslan Bukin iommu_gas_match_one(struct iommu_gas_match_args *a, iommu_gaddr_t beg, 317e0e8d0c8SDoug Moore iommu_gaddr_t end, iommu_gaddr_t lbound, iommu_gaddr_t ubound) 3183024e8afSRuslan Bukin { 319e0e8d0c8SDoug Moore struct iommu_map_entry *entry; 320e0e8d0c8SDoug Moore iommu_gaddr_t first, size, start; 321e0e8d0c8SDoug Moore int offset; 3223024e8afSRuslan Bukin 323da33f6d7SAlan Cox /* 324da33f6d7SAlan Cox * The prev->end is always aligned on the page size, which 325da33f6d7SAlan Cox * causes page alignment for the entry->start too. 326da33f6d7SAlan Cox * 327e0e8d0c8SDoug Moore * Create IOMMU_PAGE_SIZE gaps before, after new entry 328e0e8d0c8SDoug Moore * to ensure that out-of-bounds accesses fault. 329da33f6d7SAlan Cox */ 330e0e8d0c8SDoug Moore beg = MAX(beg + IOMMU_PAGE_SIZE, lbound); 331e0e8d0c8SDoug Moore start = roundup2(beg, a->common->alignment); 332e0e8d0c8SDoug Moore if (start < beg) 333e0e8d0c8SDoug Moore return (false); 334a869643eSKonstantin Belousov if (end < IOMMU_PAGE_SIZE + 1) 335a869643eSKonstantin Belousov return (false); 3365b9b55fbSDoug Moore end = MIN(end - IOMMU_PAGE_SIZE - 1, ubound); 337e0e8d0c8SDoug Moore offset = a->offset; 338e0e8d0c8SDoug Moore size = a->size; 3395b9b55fbSDoug Moore if (start + offset + size - 1 > end) 3403024e8afSRuslan Bukin return (false); 3413024e8afSRuslan Bukin 342e0e8d0c8SDoug Moore /* Check for and try to skip past boundary crossing. */ 343e0e8d0c8SDoug Moore if (!vm_addr_bound_ok(start + offset, size, a->common->boundary)) { 3443024e8afSRuslan Bukin /* 3453024e8afSRuslan Bukin * The start + offset to start + offset + size region crosses 346e0e8d0c8SDoug Moore * the boundary. Check if there is enough space after the next 347e0e8d0c8SDoug Moore * boundary after the beg. 3483024e8afSRuslan Bukin */ 349e0e8d0c8SDoug Moore first = start; 350e0e8d0c8SDoug Moore beg = roundup2(start + offset + 1, a->common->boundary); 351e0e8d0c8SDoug Moore start = roundup2(beg, a->common->alignment); 352e0e8d0c8SDoug Moore 3535b9b55fbSDoug Moore if (start + offset + size - 1 > end || 354e0e8d0c8SDoug Moore !vm_addr_bound_ok(start + offset, size, 3553024e8afSRuslan Bukin a->common->boundary)) { 3563024e8afSRuslan Bukin /* 357e0e8d0c8SDoug Moore * Not enough space to align at the requested boundary, 358e0e8d0c8SDoug Moore * or boundary is smaller than the size, but allowed to 359e0e8d0c8SDoug Moore * split. We already checked that start + size does not 360e0e8d0c8SDoug Moore * overlap ubound. 3613024e8afSRuslan Bukin * 362e0e8d0c8SDoug Moore * XXXKIB. It is possible that beg is exactly at the 363e0e8d0c8SDoug Moore * start of the next entry, then we do not have gap. 364e0e8d0c8SDoug Moore * Ignore for now. 3653024e8afSRuslan Bukin */ 366e0e8d0c8SDoug Moore if ((a->gas_flags & IOMMU_MF_CANSPLIT) == 0) 367e0e8d0c8SDoug Moore return (false); 368e0e8d0c8SDoug Moore size = beg - first - offset; 369e0e8d0c8SDoug Moore start = first; 370e0e8d0c8SDoug Moore } 371e0e8d0c8SDoug Moore } 372e0e8d0c8SDoug Moore entry = a->entry; 373e0e8d0c8SDoug Moore entry->start = start; 374e0e8d0c8SDoug Moore entry->end = start + roundup2(size + offset, IOMMU_PAGE_SIZE); 375e0e8d0c8SDoug Moore entry->flags = IOMMU_MAP_ENTRY_MAP; 3763024e8afSRuslan Bukin return (true); 3773024e8afSRuslan Bukin } 3783024e8afSRuslan Bukin 379e0e8d0c8SDoug Moore /* Find the next entry that might abut a big-enough range. */ 380e0e8d0c8SDoug Moore static struct iommu_map_entry * 381e0e8d0c8SDoug Moore iommu_gas_next(struct iommu_map_entry *curr, iommu_gaddr_t min_free) 3823024e8afSRuslan Bukin { 383e0e8d0c8SDoug Moore struct iommu_map_entry *next; 3843024e8afSRuslan Bukin 385e0e8d0c8SDoug Moore if ((next = RB_RIGHT(curr, rb_entry)) != NULL && 386e0e8d0c8SDoug Moore next->free_down >= min_free) { 387e0e8d0c8SDoug Moore /* Find next entry in right subtree. */ 388e0e8d0c8SDoug Moore do 389e0e8d0c8SDoug Moore curr = next; 390e0e8d0c8SDoug Moore while ((next = RB_LEFT(curr, rb_entry)) != NULL && 391e0e8d0c8SDoug Moore next->free_down >= min_free); 392e0e8d0c8SDoug Moore } else { 393e0e8d0c8SDoug Moore /* Find next entry in a left-parent ancestor. */ 394e0e8d0c8SDoug Moore while ((next = RB_PARENT(curr, rb_entry)) != NULL && 395e0e8d0c8SDoug Moore curr == RB_RIGHT(next, rb_entry)) 396e0e8d0c8SDoug Moore curr = next; 397e0e8d0c8SDoug Moore curr = next; 398e0e8d0c8SDoug Moore } 399e0e8d0c8SDoug Moore return (curr); 4003024e8afSRuslan Bukin } 4013024e8afSRuslan Bukin 4028b221ca6SDoug Moore /* 4038b221ca6SDoug Moore * Address-ordered first-fit search of 'domain' for free space satisfying the 4048b221ca6SDoug Moore * conditions of 'a'. The space allocated is at least one page big, and is 405a2c57c60SDoug Moore * bounded by guard pages to the left and right. The allocated space for 406a2c57c60SDoug Moore * 'domain' is described by an rb-tree of map entries at domain->rb_root, and 407a2c57c60SDoug Moore * domain->start_gap points to a map entry less than or adjacent to the first 4088b221ca6SDoug Moore * free-space of size at least 3 pages. 4098b221ca6SDoug Moore */ 4103024e8afSRuslan Bukin static int 4118b221ca6SDoug Moore iommu_gas_find_space(struct iommu_domain *domain, 4128b221ca6SDoug Moore struct iommu_gas_match_args *a) 4133024e8afSRuslan Bukin { 414e0e8d0c8SDoug Moore struct iommu_map_entry *curr, *first; 415e0e8d0c8SDoug Moore iommu_gaddr_t addr, min_free; 416e0e8d0c8SDoug Moore 4178b221ca6SDoug Moore IOMMU_DOMAIN_ASSERT_LOCKED(domain); 418e0e8d0c8SDoug Moore KASSERT(a->entry->flags == 0, 4198b221ca6SDoug Moore ("dirty entry %p %p", domain, a->entry)); 4208b221ca6SDoug Moore 4218b221ca6SDoug Moore /* 4228b221ca6SDoug Moore * start_gap may point to an entry adjacent to gaps too small for any 4238b221ca6SDoug Moore * new allocation. In that case, advance start_gap to the first free 4248b221ca6SDoug Moore * space big enough for a minimum allocation plus two guard pages. 4258b221ca6SDoug Moore */ 4268b221ca6SDoug Moore min_free = 3 * IOMMU_PAGE_SIZE; 4278b221ca6SDoug Moore first = domain->start_gap; 4288b221ca6SDoug Moore while (first != NULL && first->free_down < min_free) 4298b221ca6SDoug Moore first = RB_PARENT(first, rb_entry); 4308b221ca6SDoug Moore for (curr = first; curr != NULL; 4318b221ca6SDoug Moore curr = iommu_gas_next(curr, min_free)) { 4328b221ca6SDoug Moore if ((first = RB_LEFT(curr, rb_entry)) != NULL && 4338b221ca6SDoug Moore first->last + min_free <= curr->start) 4348b221ca6SDoug Moore break; 4358b221ca6SDoug Moore if ((first = RB_RIGHT(curr, rb_entry)) != NULL && 4368b221ca6SDoug Moore curr->end + min_free <= first->first) 4378b221ca6SDoug Moore break; 4388b221ca6SDoug Moore } 4398b221ca6SDoug Moore domain->start_gap = curr; 4403024e8afSRuslan Bukin 441b831865fSDoug Moore /* 442b831865fSDoug Moore * If the subtree doesn't have free space for the requested allocation 443f979ad00SDoug Moore * plus two guard pages, skip it. 444b831865fSDoug Moore */ 445f979ad00SDoug Moore min_free = 2 * IOMMU_PAGE_SIZE + 446f979ad00SDoug Moore roundup2(a->size + a->offset, IOMMU_PAGE_SIZE); 447f979ad00SDoug Moore 4488b221ca6SDoug Moore /* Climb to find a node in the subtree of big-enough ranges. */ 449e0e8d0c8SDoug Moore first = curr; 4508b221ca6SDoug Moore while (first != NULL && first->free_down < min_free) 4518b221ca6SDoug Moore first = RB_PARENT(first, rb_entry); 452f979ad00SDoug Moore 453f979ad00SDoug Moore /* 4548b221ca6SDoug Moore * Walk the big-enough ranges tree until one satisfies alignment 455f979ad00SDoug Moore * requirements, or violates lowaddr address requirement. 456f979ad00SDoug Moore */ 4575b9b55fbSDoug Moore addr = a->common->lowaddr; 458e0e8d0c8SDoug Moore for (curr = first; curr != NULL; 459e0e8d0c8SDoug Moore curr = iommu_gas_next(curr, min_free)) { 460e0e8d0c8SDoug Moore if ((first = RB_LEFT(curr, rb_entry)) != NULL && 461e0e8d0c8SDoug Moore iommu_gas_match_one(a, first->last, curr->start, 462368ee2f8SDoug Moore 0, addr)) { 463368ee2f8SDoug Moore RB_INSERT_PREV(iommu_gas_entries_tree, 464368ee2f8SDoug Moore &domain->rb_root, curr, a->entry); 46530031172SDoug Moore return (0); 466368ee2f8SDoug Moore } 467e0e8d0c8SDoug Moore if (curr->end >= addr) { 4685b9b55fbSDoug Moore /* All remaining ranges > addr */ 469f979ad00SDoug Moore break; 470f979ad00SDoug Moore } 471e0e8d0c8SDoug Moore if ((first = RB_RIGHT(curr, rb_entry)) != NULL && 472e0e8d0c8SDoug Moore iommu_gas_match_one(a, curr->end, first->first, 473368ee2f8SDoug Moore 0, addr)) { 474368ee2f8SDoug Moore RB_INSERT_NEXT(iommu_gas_entries_tree, 475368ee2f8SDoug Moore &domain->rb_root, curr, a->entry); 4763024e8afSRuslan Bukin return (0); 4773024e8afSRuslan Bukin } 478368ee2f8SDoug Moore } 4793024e8afSRuslan Bukin 480b831865fSDoug Moore /* 481e0e8d0c8SDoug Moore * To resume the search at the start of the upper region, first climb to 482e0e8d0c8SDoug Moore * the nearest ancestor that spans highaddr. Then find the last entry 483e0e8d0c8SDoug Moore * before highaddr that could abut a big-enough range. 484b831865fSDoug Moore */ 485e0e8d0c8SDoug Moore addr = a->common->highaddr; 486e0e8d0c8SDoug Moore while (curr != NULL && curr->last < addr) 487e0e8d0c8SDoug Moore curr = RB_PARENT(curr, rb_entry); 488e0e8d0c8SDoug Moore first = NULL; 489e0e8d0c8SDoug Moore while (curr != NULL && curr->free_down >= min_free) { 490e0e8d0c8SDoug Moore if (addr < curr->end) 491e0e8d0c8SDoug Moore curr = RB_LEFT(curr, rb_entry); 492e0e8d0c8SDoug Moore else { 493e0e8d0c8SDoug Moore first = curr; 494e0e8d0c8SDoug Moore curr = RB_RIGHT(curr, rb_entry); 4953024e8afSRuslan Bukin } 4963024e8afSRuslan Bukin } 4973024e8afSRuslan Bukin 498e0e8d0c8SDoug Moore /* 499e0e8d0c8SDoug Moore * Walk the remaining big-enough ranges until one satisfies alignment 500e0e8d0c8SDoug Moore * requirements. 501e0e8d0c8SDoug Moore */ 502e0e8d0c8SDoug Moore for (curr = first; curr != NULL; 503e0e8d0c8SDoug Moore curr = iommu_gas_next(curr, min_free)) { 504e0e8d0c8SDoug Moore if ((first = RB_LEFT(curr, rb_entry)) != NULL && 505e0e8d0c8SDoug Moore iommu_gas_match_one(a, first->last, curr->start, 5065b9b55fbSDoug Moore addr + 1, domain->end - 1)) { 507368ee2f8SDoug Moore RB_INSERT_PREV(iommu_gas_entries_tree, 508368ee2f8SDoug Moore &domain->rb_root, curr, a->entry); 5093024e8afSRuslan Bukin return (0); 510368ee2f8SDoug Moore } 511e0e8d0c8SDoug Moore if ((first = RB_RIGHT(curr, rb_entry)) != NULL && 512e0e8d0c8SDoug Moore iommu_gas_match_one(a, curr->end, first->first, 5135b9b55fbSDoug Moore addr + 1, domain->end - 1)) { 514368ee2f8SDoug Moore RB_INSERT_NEXT(iommu_gas_entries_tree, 515368ee2f8SDoug Moore &domain->rb_root, curr, a->entry); 516e0e8d0c8SDoug Moore return (0); 5173024e8afSRuslan Bukin } 518368ee2f8SDoug Moore } 519e0e8d0c8SDoug Moore 5203024e8afSRuslan Bukin return (ENOMEM); 5213024e8afSRuslan Bukin } 5223024e8afSRuslan Bukin 5233024e8afSRuslan Bukin static int 5243024e8afSRuslan Bukin iommu_gas_alloc_region(struct iommu_domain *domain, struct iommu_map_entry *entry, 5253024e8afSRuslan Bukin u_int flags) 5263024e8afSRuslan Bukin { 5273024e8afSRuslan Bukin struct iommu_map_entry *next, *prev; 5283024e8afSRuslan Bukin 5293024e8afSRuslan Bukin IOMMU_DOMAIN_ASSERT_LOCKED(domain); 5303024e8afSRuslan Bukin 5313024e8afSRuslan Bukin if ((entry->start & IOMMU_PAGE_MASK) != 0 || 5323024e8afSRuslan Bukin (entry->end & IOMMU_PAGE_MASK) != 0) 5333024e8afSRuslan Bukin return (EINVAL); 5343024e8afSRuslan Bukin if (entry->start >= entry->end) 5353024e8afSRuslan Bukin return (EINVAL); 5363024e8afSRuslan Bukin if (entry->end >= domain->end) 5373024e8afSRuslan Bukin return (EINVAL); 5383024e8afSRuslan Bukin 5393024e8afSRuslan Bukin next = RB_NFIND(iommu_gas_entries_tree, &domain->rb_root, entry); 5403024e8afSRuslan Bukin KASSERT(next != NULL, ("next must be non-null %p %jx", domain, 5413024e8afSRuslan Bukin (uintmax_t)entry->start)); 5423024e8afSRuslan Bukin prev = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, next); 5433024e8afSRuslan Bukin /* prev could be NULL */ 5443024e8afSRuslan Bukin 5453024e8afSRuslan Bukin /* 5463024e8afSRuslan Bukin * Adapt to broken BIOSes which specify overlapping RMRR 5473024e8afSRuslan Bukin * entries. 5483024e8afSRuslan Bukin * 5493024e8afSRuslan Bukin * XXXKIB: this does not handle a case when prev or next 5503024e8afSRuslan Bukin * entries are completely covered by the current one, which 5513024e8afSRuslan Bukin * extends both ways. 5523024e8afSRuslan Bukin */ 5533024e8afSRuslan Bukin if (prev != NULL && prev->end > entry->start && 5543024e8afSRuslan Bukin (prev->flags & IOMMU_MAP_ENTRY_PLACE) == 0) { 5553024e8afSRuslan Bukin if ((flags & IOMMU_MF_RMRR) == 0 || 5563024e8afSRuslan Bukin (prev->flags & IOMMU_MAP_ENTRY_RMRR) == 0) 5573024e8afSRuslan Bukin return (EBUSY); 5583024e8afSRuslan Bukin entry->start = prev->end; 5593024e8afSRuslan Bukin } 5603024e8afSRuslan Bukin if (next->start < entry->end && 5613024e8afSRuslan Bukin (next->flags & IOMMU_MAP_ENTRY_PLACE) == 0) { 5623024e8afSRuslan Bukin if ((flags & IOMMU_MF_RMRR) == 0 || 5633024e8afSRuslan Bukin (next->flags & IOMMU_MAP_ENTRY_RMRR) == 0) 5643024e8afSRuslan Bukin return (EBUSY); 5653024e8afSRuslan Bukin entry->end = next->start; 5663024e8afSRuslan Bukin } 5673024e8afSRuslan Bukin if (entry->end == entry->start) 5683024e8afSRuslan Bukin return (0); 5693024e8afSRuslan Bukin 5703024e8afSRuslan Bukin if (prev != NULL && prev->end > entry->start) { 5713024e8afSRuslan Bukin /* This assumes that prev is the placeholder entry. */ 5723024e8afSRuslan Bukin iommu_gas_rb_remove(domain, prev); 5733024e8afSRuslan Bukin prev = NULL; 5743024e8afSRuslan Bukin } 575368ee2f8SDoug Moore RB_INSERT_PREV(iommu_gas_entries_tree, 576368ee2f8SDoug Moore &domain->rb_root, next, entry); 5773024e8afSRuslan Bukin if (next->start < entry->end) { 5783024e8afSRuslan Bukin iommu_gas_rb_remove(domain, next); 5793024e8afSRuslan Bukin next = NULL; 5803024e8afSRuslan Bukin } 5813024e8afSRuslan Bukin 5823024e8afSRuslan Bukin if ((flags & IOMMU_MF_RMRR) != 0) 5833024e8afSRuslan Bukin entry->flags = IOMMU_MAP_ENTRY_RMRR; 5843024e8afSRuslan Bukin 5853024e8afSRuslan Bukin #ifdef INVARIANTS 5863024e8afSRuslan Bukin struct iommu_map_entry *ip, *in; 5873024e8afSRuslan Bukin ip = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, entry); 5883024e8afSRuslan Bukin in = RB_NEXT(iommu_gas_entries_tree, &domain->rb_root, entry); 5893024e8afSRuslan Bukin KASSERT(prev == NULL || ip == prev, 5903024e8afSRuslan Bukin ("RMRR %p (%jx %jx) prev %p (%jx %jx) ins prev %p (%jx %jx)", 5913024e8afSRuslan Bukin entry, entry->start, entry->end, prev, 5923024e8afSRuslan Bukin prev == NULL ? 0 : prev->start, prev == NULL ? 0 : prev->end, 5933024e8afSRuslan Bukin ip, ip == NULL ? 0 : ip->start, ip == NULL ? 0 : ip->end)); 5943024e8afSRuslan Bukin KASSERT(next == NULL || in == next, 5953024e8afSRuslan Bukin ("RMRR %p (%jx %jx) next %p (%jx %jx) ins next %p (%jx %jx)", 5963024e8afSRuslan Bukin entry, entry->start, entry->end, next, 5973024e8afSRuslan Bukin next == NULL ? 0 : next->start, next == NULL ? 0 : next->end, 5983024e8afSRuslan Bukin in, in == NULL ? 0 : in->start, in == NULL ? 0 : in->end)); 5993024e8afSRuslan Bukin #endif 6003024e8afSRuslan Bukin 6013024e8afSRuslan Bukin return (0); 6023024e8afSRuslan Bukin } 6033024e8afSRuslan Bukin 6043024e8afSRuslan Bukin void 6054670f908SAlan Cox iommu_gas_free_space(struct iommu_map_entry *entry) 6063024e8afSRuslan Bukin { 6074670f908SAlan Cox struct iommu_domain *domain; 6083024e8afSRuslan Bukin 6094670f908SAlan Cox domain = entry->domain; 6103024e8afSRuslan Bukin KASSERT((entry->flags & (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_RMRR | 6113024e8afSRuslan Bukin IOMMU_MAP_ENTRY_MAP)) == IOMMU_MAP_ENTRY_MAP, 6123024e8afSRuslan Bukin ("permanent entry %p %p", domain, entry)); 6133024e8afSRuslan Bukin 6144670f908SAlan Cox IOMMU_DOMAIN_LOCK(domain); 6153024e8afSRuslan Bukin iommu_gas_rb_remove(domain, entry); 6163024e8afSRuslan Bukin entry->flags &= ~IOMMU_MAP_ENTRY_MAP; 6173024e8afSRuslan Bukin #ifdef INVARIANTS 6183024e8afSRuslan Bukin if (iommu_check_free) 6193024e8afSRuslan Bukin iommu_gas_check_free(domain); 6203024e8afSRuslan Bukin #endif 6214670f908SAlan Cox IOMMU_DOMAIN_UNLOCK(domain); 6223024e8afSRuslan Bukin } 6233024e8afSRuslan Bukin 6243024e8afSRuslan Bukin void 6254670f908SAlan Cox iommu_gas_free_region(struct iommu_map_entry *entry) 6263024e8afSRuslan Bukin { 6274670f908SAlan Cox struct iommu_domain *domain; 6283024e8afSRuslan Bukin 6294670f908SAlan Cox domain = entry->domain; 6303024e8afSRuslan Bukin KASSERT((entry->flags & (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_RMRR | 6313024e8afSRuslan Bukin IOMMU_MAP_ENTRY_MAP)) == IOMMU_MAP_ENTRY_RMRR, 6323024e8afSRuslan Bukin ("non-RMRR entry %p %p", domain, entry)); 6333024e8afSRuslan Bukin 6344670f908SAlan Cox IOMMU_DOMAIN_LOCK(domain); 63587cd087aSDoug Moore if (entry != domain->first_place && 63687cd087aSDoug Moore entry != domain->last_place) 6373024e8afSRuslan Bukin iommu_gas_rb_remove(domain, entry); 6383024e8afSRuslan Bukin entry->flags &= ~IOMMU_MAP_ENTRY_RMRR; 6394670f908SAlan Cox IOMMU_DOMAIN_UNLOCK(domain); 6403024e8afSRuslan Bukin } 6413024e8afSRuslan Bukin 642c9e4d250SKonstantin Belousov static struct iommu_map_entry * 643c9e4d250SKonstantin Belousov iommu_gas_remove_clip_left(struct iommu_domain *domain, iommu_gaddr_t start, 644c9e4d250SKonstantin Belousov iommu_gaddr_t end, struct iommu_map_entry **r) 645c9e4d250SKonstantin Belousov { 646c9e4d250SKonstantin Belousov struct iommu_map_entry *entry, *res, fentry; 647c9e4d250SKonstantin Belousov 648c9e4d250SKonstantin Belousov IOMMU_DOMAIN_ASSERT_LOCKED(domain); 649c9e4d250SKonstantin Belousov MPASS(start <= end); 65087cd087aSDoug Moore MPASS(end <= domain->end); 651c9e4d250SKonstantin Belousov 652c9e4d250SKonstantin Belousov /* 653c9e4d250SKonstantin Belousov * Find an entry which contains the supplied guest's address 654c9e4d250SKonstantin Belousov * start, or the first entry after the start. Since we 655c9e4d250SKonstantin Belousov * asserted that start is below domain end, entry should 656c9e4d250SKonstantin Belousov * exist. Then clip it if needed. 657c9e4d250SKonstantin Belousov */ 658c9e4d250SKonstantin Belousov fentry.start = start + 1; 659c9e4d250SKonstantin Belousov fentry.end = start + 1; 660c9e4d250SKonstantin Belousov entry = RB_NFIND(iommu_gas_entries_tree, &domain->rb_root, &fentry); 661c9e4d250SKonstantin Belousov 662c9e4d250SKonstantin Belousov if (entry->start >= start || 663c9e4d250SKonstantin Belousov (entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0) 664c9e4d250SKonstantin Belousov return (entry); 665c9e4d250SKonstantin Belousov 666c9e4d250SKonstantin Belousov res = *r; 667c9e4d250SKonstantin Belousov *r = NULL; 668c9e4d250SKonstantin Belousov *res = *entry; 669c9e4d250SKonstantin Belousov res->start = entry->end = start; 670c9e4d250SKonstantin Belousov RB_UPDATE_AUGMENT(entry, rb_entry); 671368ee2f8SDoug Moore RB_INSERT_NEXT(iommu_gas_entries_tree, 672368ee2f8SDoug Moore &domain->rb_root, entry, res); 673c9e4d250SKonstantin Belousov return (res); 674c9e4d250SKonstantin Belousov } 675c9e4d250SKonstantin Belousov 676c9e4d250SKonstantin Belousov static bool 677c9e4d250SKonstantin Belousov iommu_gas_remove_clip_right(struct iommu_domain *domain, 678c9e4d250SKonstantin Belousov iommu_gaddr_t end, struct iommu_map_entry *entry, 679c9e4d250SKonstantin Belousov struct iommu_map_entry *r) 680c9e4d250SKonstantin Belousov { 681c9e4d250SKonstantin Belousov if (entry->start >= end || (entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0) 682c9e4d250SKonstantin Belousov return (false); 683c9e4d250SKonstantin Belousov 684c9e4d250SKonstantin Belousov *r = *entry; 685c9e4d250SKonstantin Belousov r->end = entry->start = end; 686c9e4d250SKonstantin Belousov RB_UPDATE_AUGMENT(entry, rb_entry); 687368ee2f8SDoug Moore RB_INSERT_PREV(iommu_gas_entries_tree, 688368ee2f8SDoug Moore &domain->rb_root, entry, r); 689c9e4d250SKonstantin Belousov return (true); 690c9e4d250SKonstantin Belousov } 691c9e4d250SKonstantin Belousov 692c9e4d250SKonstantin Belousov static void 693c9e4d250SKonstantin Belousov iommu_gas_remove_unmap(struct iommu_domain *domain, 694c9e4d250SKonstantin Belousov struct iommu_map_entry *entry, struct iommu_map_entries_tailq *gcp) 695c9e4d250SKonstantin Belousov { 696c9e4d250SKonstantin Belousov IOMMU_DOMAIN_ASSERT_LOCKED(domain); 697c9e4d250SKonstantin Belousov 698c9e4d250SKonstantin Belousov if ((entry->flags & (IOMMU_MAP_ENTRY_UNMAPPED | 699c9e4d250SKonstantin Belousov IOMMU_MAP_ENTRY_REMOVING)) != 0) 700c9e4d250SKonstantin Belousov return; 701c9e4d250SKonstantin Belousov MPASS((entry->flags & IOMMU_MAP_ENTRY_PLACE) == 0); 702c9e4d250SKonstantin Belousov entry->flags |= IOMMU_MAP_ENTRY_REMOVING; 703c9e4d250SKonstantin Belousov TAILQ_INSERT_TAIL(gcp, entry, dmamap_link); 704c9e4d250SKonstantin Belousov } 705c9e4d250SKonstantin Belousov 706c9e4d250SKonstantin Belousov /* 707c9e4d250SKonstantin Belousov * Remove specified range from the GAS of the domain. Note that the 708c9e4d250SKonstantin Belousov * removal is not guaranteed to occur upon the function return, it 709c9e4d250SKonstantin Belousov * might be finalized some time after, when hardware reports that 710c9e4d250SKonstantin Belousov * (queued) IOTLB invalidation was performed. 711c9e4d250SKonstantin Belousov */ 712c9e4d250SKonstantin Belousov void 713c9e4d250SKonstantin Belousov iommu_gas_remove(struct iommu_domain *domain, iommu_gaddr_t start, 714c9e4d250SKonstantin Belousov iommu_gaddr_t size) 715c9e4d250SKonstantin Belousov { 716c9e4d250SKonstantin Belousov struct iommu_map_entry *entry, *nentry, *r1, *r2; 717c9e4d250SKonstantin Belousov struct iommu_map_entries_tailq gc; 718c9e4d250SKonstantin Belousov iommu_gaddr_t end; 719c9e4d250SKonstantin Belousov 720c9e4d250SKonstantin Belousov end = start + size; 721c9e4d250SKonstantin Belousov r1 = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK); 722c9e4d250SKonstantin Belousov r2 = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK); 723c9e4d250SKonstantin Belousov TAILQ_INIT(&gc); 724c9e4d250SKonstantin Belousov 725c9e4d250SKonstantin Belousov IOMMU_DOMAIN_LOCK(domain); 726c9e4d250SKonstantin Belousov 727c9e4d250SKonstantin Belousov nentry = iommu_gas_remove_clip_left(domain, start, end, &r1); 728c9e4d250SKonstantin Belousov RB_FOREACH_FROM(entry, iommu_gas_entries_tree, nentry) { 729c9e4d250SKonstantin Belousov if (entry->start >= end) 730c9e4d250SKonstantin Belousov break; 731c9e4d250SKonstantin Belousov KASSERT(start <= entry->start, 732c9e4d250SKonstantin Belousov ("iommu_gas_remove entry (%#jx, %#jx) start %#jx", 733c9e4d250SKonstantin Belousov entry->start, entry->end, start)); 734c9e4d250SKonstantin Belousov if ((entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0) 735c9e4d250SKonstantin Belousov continue; 736c9e4d250SKonstantin Belousov iommu_gas_remove_unmap(domain, entry, &gc); 737c9e4d250SKonstantin Belousov } 738c9e4d250SKonstantin Belousov if (iommu_gas_remove_clip_right(domain, end, entry, r2)) { 739c9e4d250SKonstantin Belousov iommu_gas_remove_unmap(domain, r2, &gc); 740c9e4d250SKonstantin Belousov r2 = NULL; 741c9e4d250SKonstantin Belousov } 742c9e4d250SKonstantin Belousov 743c9e4d250SKonstantin Belousov #ifdef INVARIANTS 744c9e4d250SKonstantin Belousov RB_FOREACH(entry, iommu_gas_entries_tree, &domain->rb_root) { 745c9e4d250SKonstantin Belousov if ((entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0) 746c9e4d250SKonstantin Belousov continue; 747c9e4d250SKonstantin Belousov KASSERT(entry->end <= start || entry->start >= end, 748c9e4d250SKonstantin Belousov ("iommu_gas_remove leftover entry (%#jx, %#jx) range " 749c9e4d250SKonstantin Belousov "(%#jx, %#jx)", 750c9e4d250SKonstantin Belousov entry->start, entry->end, start, end)); 751c9e4d250SKonstantin Belousov } 752c9e4d250SKonstantin Belousov #endif 753c9e4d250SKonstantin Belousov 754c9e4d250SKonstantin Belousov IOMMU_DOMAIN_UNLOCK(domain); 755c9e4d250SKonstantin Belousov if (r1 != NULL) 756c9e4d250SKonstantin Belousov iommu_gas_free_entry(r1); 757c9e4d250SKonstantin Belousov if (r2 != NULL) 758c9e4d250SKonstantin Belousov iommu_gas_free_entry(r2); 759c9e4d250SKonstantin Belousov iommu_domain_unload(domain, &gc, true); 760c9e4d250SKonstantin Belousov } 761c9e4d250SKonstantin Belousov 7623024e8afSRuslan Bukin int 7633024e8afSRuslan Bukin iommu_gas_map(struct iommu_domain *domain, 7643024e8afSRuslan Bukin const struct bus_dma_tag_common *common, iommu_gaddr_t size, int offset, 7653024e8afSRuslan Bukin u_int eflags, u_int flags, vm_page_t *ma, struct iommu_map_entry **res) 7663024e8afSRuslan Bukin { 767e0e8d0c8SDoug Moore struct iommu_gas_match_args a; 7683024e8afSRuslan Bukin struct iommu_map_entry *entry; 7693024e8afSRuslan Bukin int error; 7703024e8afSRuslan Bukin 7713024e8afSRuslan Bukin KASSERT((flags & ~(IOMMU_MF_CANWAIT | IOMMU_MF_CANSPLIT)) == 0, 7723024e8afSRuslan Bukin ("invalid flags 0x%x", flags)); 7733024e8afSRuslan Bukin 774e0e8d0c8SDoug Moore a.size = size; 775e0e8d0c8SDoug Moore a.offset = offset; 776e0e8d0c8SDoug Moore a.common = common; 777e0e8d0c8SDoug Moore a.gas_flags = flags; 7783024e8afSRuslan Bukin entry = iommu_gas_alloc_entry(domain, 77915f6baf4SRuslan Bukin (flags & IOMMU_MF_CANWAIT) != 0 ? IOMMU_PGF_WAITOK : 0); 7803024e8afSRuslan Bukin if (entry == NULL) 7813024e8afSRuslan Bukin return (ENOMEM); 782e0e8d0c8SDoug Moore a.entry = entry; 7833024e8afSRuslan Bukin IOMMU_DOMAIN_LOCK(domain); 7848b221ca6SDoug Moore error = iommu_gas_find_space(domain, &a); 7853024e8afSRuslan Bukin if (error == ENOMEM) { 7863024e8afSRuslan Bukin IOMMU_DOMAIN_UNLOCK(domain); 7874670f908SAlan Cox iommu_gas_free_entry(entry); 7883024e8afSRuslan Bukin return (error); 7893024e8afSRuslan Bukin } 7903024e8afSRuslan Bukin #ifdef INVARIANTS 7913024e8afSRuslan Bukin if (iommu_check_free) 7923024e8afSRuslan Bukin iommu_gas_check_free(domain); 7933024e8afSRuslan Bukin #endif 7943024e8afSRuslan Bukin KASSERT(error == 0, 7953024e8afSRuslan Bukin ("unexpected error %d from iommu_gas_find_entry", error)); 7963024e8afSRuslan Bukin KASSERT(entry->end < domain->end, ("allocated GPA %jx, max GPA %jx", 7973024e8afSRuslan Bukin (uintmax_t)entry->end, (uintmax_t)domain->end)); 7983024e8afSRuslan Bukin entry->flags |= eflags; 7993024e8afSRuslan Bukin IOMMU_DOMAIN_UNLOCK(domain); 8003024e8afSRuslan Bukin 8010eed04c8SRuslan Bukin error = domain->ops->map(domain, entry->start, 8020eed04c8SRuslan Bukin entry->end - entry->start, ma, eflags, 80315f6baf4SRuslan Bukin ((flags & IOMMU_MF_CANWAIT) != 0 ? IOMMU_PGF_WAITOK : 0)); 8043024e8afSRuslan Bukin if (error == ENOMEM) { 8058bc36738SAlan Cox iommu_domain_unload_entry(entry, true, 8068bc36738SAlan Cox (flags & IOMMU_MF_CANWAIT) != 0); 8073024e8afSRuslan Bukin return (error); 8083024e8afSRuslan Bukin } 8093024e8afSRuslan Bukin KASSERT(error == 0, 8103024e8afSRuslan Bukin ("unexpected error %d from domain_map_buf", error)); 8113024e8afSRuslan Bukin 8123024e8afSRuslan Bukin *res = entry; 8133024e8afSRuslan Bukin return (0); 8143024e8afSRuslan Bukin } 8153024e8afSRuslan Bukin 8163024e8afSRuslan Bukin int 8173024e8afSRuslan Bukin iommu_gas_map_region(struct iommu_domain *domain, struct iommu_map_entry *entry, 8183024e8afSRuslan Bukin u_int eflags, u_int flags, vm_page_t *ma) 8193024e8afSRuslan Bukin { 8203024e8afSRuslan Bukin iommu_gaddr_t start; 8213024e8afSRuslan Bukin int error; 8223024e8afSRuslan Bukin 8234670f908SAlan Cox KASSERT(entry->domain == domain, 8244670f908SAlan Cox ("mismatched domain %p entry %p entry->domain %p", domain, 8254670f908SAlan Cox entry, entry->domain)); 8263024e8afSRuslan Bukin KASSERT(entry->flags == 0, ("used RMRR entry %p %p %x", domain, 8273024e8afSRuslan Bukin entry, entry->flags)); 8283024e8afSRuslan Bukin KASSERT((flags & ~(IOMMU_MF_CANWAIT | IOMMU_MF_RMRR)) == 0, 8293024e8afSRuslan Bukin ("invalid flags 0x%x", flags)); 8303024e8afSRuslan Bukin 8313024e8afSRuslan Bukin start = entry->start; 8323024e8afSRuslan Bukin IOMMU_DOMAIN_LOCK(domain); 8333024e8afSRuslan Bukin error = iommu_gas_alloc_region(domain, entry, flags); 8343024e8afSRuslan Bukin if (error != 0) { 8353024e8afSRuslan Bukin IOMMU_DOMAIN_UNLOCK(domain); 8363024e8afSRuslan Bukin return (error); 8373024e8afSRuslan Bukin } 8383024e8afSRuslan Bukin entry->flags |= eflags; 8393024e8afSRuslan Bukin IOMMU_DOMAIN_UNLOCK(domain); 8403024e8afSRuslan Bukin if (entry->end == entry->start) 8413024e8afSRuslan Bukin return (0); 8423024e8afSRuslan Bukin 8430eed04c8SRuslan Bukin error = domain->ops->map(domain, entry->start, 8440eed04c8SRuslan Bukin entry->end - entry->start, ma + OFF_TO_IDX(start - entry->start), 8450eed04c8SRuslan Bukin eflags, ((flags & IOMMU_MF_CANWAIT) != 0 ? IOMMU_PGF_WAITOK : 0)); 8463024e8afSRuslan Bukin if (error == ENOMEM) { 8478bc36738SAlan Cox iommu_domain_unload_entry(entry, false, 8488bc36738SAlan Cox (flags & IOMMU_MF_CANWAIT) != 0); 8493024e8afSRuslan Bukin return (error); 8503024e8afSRuslan Bukin } 8513024e8afSRuslan Bukin KASSERT(error == 0, 8523024e8afSRuslan Bukin ("unexpected error %d from domain_map_buf", error)); 8533024e8afSRuslan Bukin 8543024e8afSRuslan Bukin return (0); 8553024e8afSRuslan Bukin } 8563024e8afSRuslan Bukin 857ee47a12aSRyan Libby static int 858ee47a12aSRyan Libby iommu_gas_reserve_region_locked(struct iommu_domain *domain, 859ee47a12aSRyan Libby iommu_gaddr_t start, iommu_gaddr_t end, struct iommu_map_entry *entry) 860ee47a12aSRyan Libby { 861ee47a12aSRyan Libby int error; 862ee47a12aSRyan Libby 863ee47a12aSRyan Libby IOMMU_DOMAIN_ASSERT_LOCKED(domain); 864ee47a12aSRyan Libby 865ee47a12aSRyan Libby entry->start = start; 866ee47a12aSRyan Libby entry->end = end; 867ee47a12aSRyan Libby error = iommu_gas_alloc_region(domain, entry, IOMMU_MF_CANWAIT); 868ee47a12aSRyan Libby if (error == 0) 869ee47a12aSRyan Libby entry->flags |= IOMMU_MAP_ENTRY_UNMAPPED; 870ee47a12aSRyan Libby return (error); 871ee47a12aSRyan Libby } 872ee47a12aSRyan Libby 8733024e8afSRuslan Bukin int 8743024e8afSRuslan Bukin iommu_gas_reserve_region(struct iommu_domain *domain, iommu_gaddr_t start, 87594dfb28eSRuslan Bukin iommu_gaddr_t end, struct iommu_map_entry **entry0) 8763024e8afSRuslan Bukin { 8773024e8afSRuslan Bukin struct iommu_map_entry *entry; 8783024e8afSRuslan Bukin int error; 8793024e8afSRuslan Bukin 88015f6baf4SRuslan Bukin entry = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK); 8813024e8afSRuslan Bukin IOMMU_DOMAIN_LOCK(domain); 882ee47a12aSRyan Libby error = iommu_gas_reserve_region_locked(domain, start, end, entry); 8833024e8afSRuslan Bukin IOMMU_DOMAIN_UNLOCK(domain); 8843024e8afSRuslan Bukin if (error != 0) 8854670f908SAlan Cox iommu_gas_free_entry(entry); 88694dfb28eSRuslan Bukin else if (entry0 != NULL) 88794dfb28eSRuslan Bukin *entry0 = entry; 8883024e8afSRuslan Bukin return (error); 8893024e8afSRuslan Bukin } 8903024e8afSRuslan Bukin 891ee47a12aSRyan Libby /* 892ee47a12aSRyan Libby * As in iommu_gas_reserve_region, reserve [start, end), but allow for existing 893ee47a12aSRyan Libby * entries. 894ee47a12aSRyan Libby */ 895ee47a12aSRyan Libby int 896ee47a12aSRyan Libby iommu_gas_reserve_region_extend(struct iommu_domain *domain, 897ee47a12aSRyan Libby iommu_gaddr_t start, iommu_gaddr_t end) 898ee47a12aSRyan Libby { 899ee47a12aSRyan Libby struct iommu_map_entry *entry, *next, *prev, key = {}; 900ee47a12aSRyan Libby iommu_gaddr_t entry_start, entry_end; 901ee47a12aSRyan Libby int error; 902ee47a12aSRyan Libby 903ee47a12aSRyan Libby error = 0; 904ee47a12aSRyan Libby entry = NULL; 905ee47a12aSRyan Libby end = ummin(end, domain->end); 906ee47a12aSRyan Libby while (start < end) { 907ee47a12aSRyan Libby /* Preallocate an entry. */ 908ee47a12aSRyan Libby if (entry == NULL) 909ee47a12aSRyan Libby entry = iommu_gas_alloc_entry(domain, 910ee47a12aSRyan Libby IOMMU_PGF_WAITOK); 911ee47a12aSRyan Libby /* Calculate the free region from here to the next entry. */ 912ee47a12aSRyan Libby key.start = key.end = start; 913ee47a12aSRyan Libby IOMMU_DOMAIN_LOCK(domain); 914ee47a12aSRyan Libby next = RB_NFIND(iommu_gas_entries_tree, &domain->rb_root, &key); 915ee47a12aSRyan Libby KASSERT(next != NULL, ("domain %p with end %#jx has no entry " 916ee47a12aSRyan Libby "after %#jx", domain, (uintmax_t)domain->end, 917ee47a12aSRyan Libby (uintmax_t)start)); 918ee47a12aSRyan Libby entry_end = ummin(end, next->start); 919ee47a12aSRyan Libby prev = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, next); 920ee47a12aSRyan Libby if (prev != NULL) 921ee47a12aSRyan Libby entry_start = ummax(start, prev->end); 922ee47a12aSRyan Libby else 923ee47a12aSRyan Libby entry_start = start; 924ee47a12aSRyan Libby start = next->end; 925ee47a12aSRyan Libby /* Reserve the region if non-empty. */ 926ee47a12aSRyan Libby if (entry_start != entry_end) { 927ee47a12aSRyan Libby error = iommu_gas_reserve_region_locked(domain, 928ee47a12aSRyan Libby entry_start, entry_end, entry); 9290ba1d860SAlan Cox if (error != 0) { 9300ba1d860SAlan Cox IOMMU_DOMAIN_UNLOCK(domain); 931ee47a12aSRyan Libby break; 9320ba1d860SAlan Cox } 933ee47a12aSRyan Libby entry = NULL; 934ee47a12aSRyan Libby } 935ee47a12aSRyan Libby IOMMU_DOMAIN_UNLOCK(domain); 936ee47a12aSRyan Libby } 937ee47a12aSRyan Libby /* Release a preallocated entry if it was not used. */ 938ee47a12aSRyan Libby if (entry != NULL) 9394670f908SAlan Cox iommu_gas_free_entry(entry); 940ee47a12aSRyan Libby return (error); 941ee47a12aSRyan Libby } 942ee47a12aSRyan Libby 943f32f0095SRuslan Bukin void 944f32f0095SRuslan Bukin iommu_unmap_msi(struct iommu_ctx *ctx) 945f32f0095SRuslan Bukin { 946f32f0095SRuslan Bukin struct iommu_map_entry *entry; 947f32f0095SRuslan Bukin struct iommu_domain *domain; 948f32f0095SRuslan Bukin 949f32f0095SRuslan Bukin domain = ctx->domain; 950f32f0095SRuslan Bukin entry = domain->msi_entry; 951f32f0095SRuslan Bukin if (entry == NULL) 952f32f0095SRuslan Bukin return; 953f32f0095SRuslan Bukin 954f32f0095SRuslan Bukin domain->ops->unmap(domain, entry->start, entry->end - 955f32f0095SRuslan Bukin entry->start, IOMMU_PGF_WAITOK); 956f32f0095SRuslan Bukin 9574670f908SAlan Cox iommu_gas_free_space(entry); 958f32f0095SRuslan Bukin 9594670f908SAlan Cox iommu_gas_free_entry(entry); 960f32f0095SRuslan Bukin 961f32f0095SRuslan Bukin domain->msi_entry = NULL; 962f32f0095SRuslan Bukin domain->msi_base = 0; 963f32f0095SRuslan Bukin domain->msi_phys = 0; 964f32f0095SRuslan Bukin } 965f32f0095SRuslan Bukin 9663024e8afSRuslan Bukin int 967e707c8beSRuslan Bukin iommu_map_msi(struct iommu_ctx *ctx, iommu_gaddr_t size, int offset, 968e707c8beSRuslan Bukin u_int eflags, u_int flags, vm_page_t *ma) 969e707c8beSRuslan Bukin { 970e707c8beSRuslan Bukin struct iommu_domain *domain; 971e707c8beSRuslan Bukin struct iommu_map_entry *entry; 972e707c8beSRuslan Bukin int error; 973e707c8beSRuslan Bukin 974e707c8beSRuslan Bukin error = 0; 975e707c8beSRuslan Bukin domain = ctx->domain; 976e707c8beSRuslan Bukin 977e707c8beSRuslan Bukin /* Check if there is already an MSI page allocated */ 978e707c8beSRuslan Bukin IOMMU_DOMAIN_LOCK(domain); 979e707c8beSRuslan Bukin entry = domain->msi_entry; 980e707c8beSRuslan Bukin IOMMU_DOMAIN_UNLOCK(domain); 981e707c8beSRuslan Bukin 982e707c8beSRuslan Bukin if (entry == NULL) { 983e707c8beSRuslan Bukin error = iommu_gas_map(domain, &ctx->tag->common, size, offset, 984e707c8beSRuslan Bukin eflags, flags, ma, &entry); 985e707c8beSRuslan Bukin IOMMU_DOMAIN_LOCK(domain); 986e707c8beSRuslan Bukin if (error == 0) { 987e707c8beSRuslan Bukin if (domain->msi_entry == NULL) { 988e707c8beSRuslan Bukin MPASS(domain->msi_base == 0); 989e707c8beSRuslan Bukin MPASS(domain->msi_phys == 0); 990e707c8beSRuslan Bukin 991e707c8beSRuslan Bukin domain->msi_entry = entry; 992e707c8beSRuslan Bukin domain->msi_base = entry->start; 993e707c8beSRuslan Bukin domain->msi_phys = VM_PAGE_TO_PHYS(ma[0]); 994e707c8beSRuslan Bukin } else { 995e707c8beSRuslan Bukin /* 996e707c8beSRuslan Bukin * We lost the race and already have an 997e707c8beSRuslan Bukin * MSI page allocated. Free the unneeded entry. 998e707c8beSRuslan Bukin */ 9994670f908SAlan Cox iommu_gas_free_entry(entry); 1000e707c8beSRuslan Bukin } 1001e707c8beSRuslan Bukin } else if (domain->msi_entry != NULL) { 1002e707c8beSRuslan Bukin /* 1003e707c8beSRuslan Bukin * The allocation failed, but another succeeded. 1004e707c8beSRuslan Bukin * Return success as there is a valid MSI page. 1005e707c8beSRuslan Bukin */ 1006e707c8beSRuslan Bukin error = 0; 1007e707c8beSRuslan Bukin } 1008e707c8beSRuslan Bukin IOMMU_DOMAIN_UNLOCK(domain); 1009e707c8beSRuslan Bukin } 1010e707c8beSRuslan Bukin 1011e707c8beSRuslan Bukin return (error); 1012e707c8beSRuslan Bukin } 1013e707c8beSRuslan Bukin 1014e707c8beSRuslan Bukin void 1015e707c8beSRuslan Bukin iommu_translate_msi(struct iommu_domain *domain, uint64_t *addr) 1016e707c8beSRuslan Bukin { 1017e707c8beSRuslan Bukin 1018e707c8beSRuslan Bukin *addr = (*addr - domain->msi_phys) + domain->msi_base; 1019e707c8beSRuslan Bukin 1020e707c8beSRuslan Bukin KASSERT(*addr >= domain->msi_entry->start, 1021e707c8beSRuslan Bukin ("%s: Address is below the MSI entry start address (%jx < %jx)", 1022e707c8beSRuslan Bukin __func__, (uintmax_t)*addr, (uintmax_t)domain->msi_entry->start)); 1023e707c8beSRuslan Bukin 1024e707c8beSRuslan Bukin KASSERT(*addr + sizeof(*addr) <= domain->msi_entry->end, 1025e707c8beSRuslan Bukin ("%s: Address is above the MSI entry end address (%jx < %jx)", 1026e707c8beSRuslan Bukin __func__, (uintmax_t)*addr, (uintmax_t)domain->msi_entry->end)); 1027e707c8beSRuslan Bukin } 1028e707c8beSRuslan Bukin 1029357149f0SRuslan Bukin SYSCTL_NODE(_hw, OID_AUTO, iommu, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, ""); 1030357149f0SRuslan Bukin 10319c843a40SRuslan Bukin #ifdef INVARIANTS 10329c843a40SRuslan Bukin SYSCTL_INT(_hw_iommu, OID_AUTO, check_free, CTLFLAG_RWTUN, 10339c843a40SRuslan Bukin &iommu_check_free, 0, 10349c843a40SRuslan Bukin "Check the GPA RBtree for free_down and free_after validity"); 10359c843a40SRuslan Bukin #endif 103630ce85caSKonstantin Belousov 103730ce85caSKonstantin Belousov #include "opt_ddb.h" 103830ce85caSKonstantin Belousov #ifdef DDB 103930ce85caSKonstantin Belousov 104030ce85caSKonstantin Belousov #include <ddb/ddb.h> 104130ce85caSKonstantin Belousov 104230ce85caSKonstantin Belousov static void 104330ce85caSKonstantin Belousov iommu_debug_dump_gas(struct iommu_domain *domain) 104430ce85caSKonstantin Belousov { 104530ce85caSKonstantin Belousov struct iommu_map_entry *entry; 104630ce85caSKonstantin Belousov 104730ce85caSKonstantin Belousov db_printf("iommu_domain %p tree %p iommu %p fl %#x\n", domain, 104830ce85caSKonstantin Belousov &domain->rb_root, domain->iommu, domain->flags); 104930ce85caSKonstantin Belousov db_printf("iommu_domain %p tree %p\n", domain, &domain->rb_root); 105030ce85caSKonstantin Belousov RB_FOREACH(entry, iommu_gas_entries_tree, &domain->rb_root) { 105130ce85caSKonstantin Belousov db_printf( 105230ce85caSKonstantin Belousov " e %p [%#jx %#jx] fl %#x first %#jx last %#jx free_down %#jx", 105330ce85caSKonstantin Belousov entry, (uintmax_t)entry->start, (uintmax_t)entry->end, 105430ce85caSKonstantin Belousov entry->flags, 105530ce85caSKonstantin Belousov (uintmax_t)entry->first, (uintmax_t)entry->last, 105630ce85caSKonstantin Belousov (uintmax_t)entry->free_down); 105730ce85caSKonstantin Belousov if (entry == domain->start_gap) 105830ce85caSKonstantin Belousov db_printf(" start_gap"); 105930ce85caSKonstantin Belousov if (entry == domain->first_place) 106030ce85caSKonstantin Belousov db_printf(" first_place"); 106130ce85caSKonstantin Belousov if (entry == domain->last_place) 106230ce85caSKonstantin Belousov db_printf(" last_place"); 106330ce85caSKonstantin Belousov db_printf("\n"); 106430ce85caSKonstantin Belousov } 106530ce85caSKonstantin Belousov } 106630ce85caSKonstantin Belousov 106730ce85caSKonstantin Belousov DB_SHOW_COMMAND(iommu_domain, iommu_domain_show) 106830ce85caSKonstantin Belousov { 106930ce85caSKonstantin Belousov struct iommu_domain *domain; 107030ce85caSKonstantin Belousov 107130ce85caSKonstantin Belousov if (!have_addr) { 107230ce85caSKonstantin Belousov db_printf("show iommu_domain addr\n"); 107330ce85caSKonstantin Belousov return; 107430ce85caSKonstantin Belousov } 107530ce85caSKonstantin Belousov 107630ce85caSKonstantin Belousov domain = (void *)addr; 107730ce85caSKonstantin Belousov iommu_debug_dump_gas(domain); 107830ce85caSKonstantin Belousov } 107930ce85caSKonstantin Belousov 108030ce85caSKonstantin Belousov #endif 1081