13024e8afSRuslan Bukin /*- 23024e8afSRuslan Bukin * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 33024e8afSRuslan Bukin * 43024e8afSRuslan Bukin * Copyright (c) 2013 The FreeBSD Foundation 53024e8afSRuslan Bukin * 63024e8afSRuslan Bukin * This software was developed by Konstantin Belousov <kib@FreeBSD.org> 73024e8afSRuslan Bukin * under sponsorship from the FreeBSD Foundation. 83024e8afSRuslan Bukin * 93024e8afSRuslan Bukin * Redistribution and use in source and binary forms, with or without 103024e8afSRuslan Bukin * modification, are permitted provided that the following conditions 113024e8afSRuslan Bukin * are met: 123024e8afSRuslan Bukin * 1. Redistributions of source code must retain the above copyright 133024e8afSRuslan Bukin * notice, this list of conditions and the following disclaimer. 143024e8afSRuslan Bukin * 2. Redistributions in binary form must reproduce the above copyright 153024e8afSRuslan Bukin * notice, this list of conditions and the following disclaimer in the 163024e8afSRuslan Bukin * documentation and/or other materials provided with the distribution. 173024e8afSRuslan Bukin * 183024e8afSRuslan Bukin * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 193024e8afSRuslan Bukin * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 203024e8afSRuslan Bukin * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 213024e8afSRuslan Bukin * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 223024e8afSRuslan Bukin * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 233024e8afSRuslan Bukin * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 243024e8afSRuslan Bukin * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 253024e8afSRuslan Bukin * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 263024e8afSRuslan Bukin * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 273024e8afSRuslan Bukin * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 283024e8afSRuslan Bukin * SUCH DAMAGE. 293024e8afSRuslan Bukin */ 303024e8afSRuslan Bukin 313024e8afSRuslan Bukin #include <sys/cdefs.h> 323024e8afSRuslan Bukin __FBSDID("$FreeBSD$"); 333024e8afSRuslan Bukin 34b16f993eSDoug Moore #define RB_AUGMENT_CHECK(entry) iommu_gas_augment_entry(entry) 353024e8afSRuslan Bukin 363024e8afSRuslan Bukin #include <sys/param.h> 373024e8afSRuslan Bukin #include <sys/systm.h> 383024e8afSRuslan Bukin #include <sys/malloc.h> 393024e8afSRuslan Bukin #include <sys/bus.h> 403024e8afSRuslan Bukin #include <sys/interrupt.h> 413024e8afSRuslan Bukin #include <sys/kernel.h> 423024e8afSRuslan Bukin #include <sys/ktr.h> 433024e8afSRuslan Bukin #include <sys/lock.h> 443024e8afSRuslan Bukin #include <sys/proc.h> 453024e8afSRuslan Bukin #include <sys/rwlock.h> 463024e8afSRuslan Bukin #include <sys/memdesc.h> 473024e8afSRuslan Bukin #include <sys/mutex.h> 483024e8afSRuslan Bukin #include <sys/sysctl.h> 493024e8afSRuslan Bukin #include <sys/rman.h> 503024e8afSRuslan Bukin #include <sys/taskqueue.h> 513024e8afSRuslan Bukin #include <sys/tree.h> 523024e8afSRuslan Bukin #include <sys/uio.h> 533024e8afSRuslan Bukin #include <sys/vmem.h> 543024e8afSRuslan Bukin #include <vm/vm.h> 553024e8afSRuslan Bukin #include <vm/vm_extern.h> 563024e8afSRuslan Bukin #include <vm/vm_kern.h> 573024e8afSRuslan Bukin #include <vm/vm_object.h> 583024e8afSRuslan Bukin #include <vm/vm_page.h> 593024e8afSRuslan Bukin #include <vm/vm_map.h> 603024e8afSRuslan Bukin #include <vm/uma.h> 61c8597a1fSRuslan Bukin #include <dev/pci/pcireg.h> 62c8597a1fSRuslan Bukin #include <dev/pci/pcivar.h> 63c8597a1fSRuslan Bukin #include <dev/iommu/iommu.h> 64f23f7d3aSRuslan Bukin #include <dev/iommu/iommu_gas.h> 65e707c8beSRuslan Bukin #include <dev/iommu/iommu_msi.h> 663024e8afSRuslan Bukin #include <machine/atomic.h> 673024e8afSRuslan Bukin #include <machine/bus.h> 683024e8afSRuslan Bukin #include <machine/md_var.h> 69c4cd6990SRuslan Bukin #include <machine/iommu.h> 70c8597a1fSRuslan Bukin #include <dev/iommu/busdma_iommu.h> 713024e8afSRuslan Bukin 723024e8afSRuslan Bukin /* 733024e8afSRuslan Bukin * Guest Address Space management. 743024e8afSRuslan Bukin */ 753024e8afSRuslan Bukin 763024e8afSRuslan Bukin static uma_zone_t iommu_map_entry_zone; 773024e8afSRuslan Bukin 789c843a40SRuslan Bukin #ifdef INVARIANTS 799c843a40SRuslan Bukin static int iommu_check_free; 809c843a40SRuslan Bukin #endif 819c843a40SRuslan Bukin 823024e8afSRuslan Bukin static void 833024e8afSRuslan Bukin intel_gas_init(void) 843024e8afSRuslan Bukin { 853024e8afSRuslan Bukin 863024e8afSRuslan Bukin iommu_map_entry_zone = uma_zcreate("IOMMU_MAP_ENTRY", 873024e8afSRuslan Bukin sizeof(struct iommu_map_entry), NULL, NULL, 883024e8afSRuslan Bukin NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NODUMP); 893024e8afSRuslan Bukin } 903024e8afSRuslan Bukin SYSINIT(intel_gas, SI_SUB_DRIVERS, SI_ORDER_FIRST, intel_gas_init, NULL); 913024e8afSRuslan Bukin 923024e8afSRuslan Bukin struct iommu_map_entry * 933024e8afSRuslan Bukin iommu_gas_alloc_entry(struct iommu_domain *domain, u_int flags) 943024e8afSRuslan Bukin { 953024e8afSRuslan Bukin struct iommu_map_entry *res; 963024e8afSRuslan Bukin 9715f6baf4SRuslan Bukin KASSERT((flags & ~(IOMMU_PGF_WAITOK)) == 0, 983024e8afSRuslan Bukin ("unsupported flags %x", flags)); 993024e8afSRuslan Bukin 10015f6baf4SRuslan Bukin res = uma_zalloc(iommu_map_entry_zone, ((flags & IOMMU_PGF_WAITOK) != 1013024e8afSRuslan Bukin 0 ? M_WAITOK : M_NOWAIT) | M_ZERO); 10242736dc4SAlan Cox if (res != NULL && domain != NULL) { 1033024e8afSRuslan Bukin res->domain = domain; 1043024e8afSRuslan Bukin atomic_add_int(&domain->entries_cnt, 1); 1053024e8afSRuslan Bukin } 1063024e8afSRuslan Bukin return (res); 1073024e8afSRuslan Bukin } 1083024e8afSRuslan Bukin 1093024e8afSRuslan Bukin void 1104670f908SAlan Cox iommu_gas_free_entry(struct iommu_map_entry *entry) 1113024e8afSRuslan Bukin { 1124670f908SAlan Cox struct iommu_domain *domain; 1133024e8afSRuslan Bukin 1144670f908SAlan Cox domain = entry->domain; 11542736dc4SAlan Cox if (domain != NULL) 1163024e8afSRuslan Bukin atomic_subtract_int(&domain->entries_cnt, 1); 1173024e8afSRuslan Bukin uma_zfree(iommu_map_entry_zone, entry); 1183024e8afSRuslan Bukin } 1193024e8afSRuslan Bukin 1203024e8afSRuslan Bukin static int 1213024e8afSRuslan Bukin iommu_gas_cmp_entries(struct iommu_map_entry *a, struct iommu_map_entry *b) 1223024e8afSRuslan Bukin { 1233024e8afSRuslan Bukin 1243024e8afSRuslan Bukin /* Last entry have zero size, so <= */ 1253024e8afSRuslan Bukin KASSERT(a->start <= a->end, ("inverted entry %p (%jx, %jx)", 1263024e8afSRuslan Bukin a, (uintmax_t)a->start, (uintmax_t)a->end)); 1273024e8afSRuslan Bukin KASSERT(b->start <= b->end, ("inverted entry %p (%jx, %jx)", 1283024e8afSRuslan Bukin b, (uintmax_t)b->start, (uintmax_t)b->end)); 1293024e8afSRuslan Bukin KASSERT(a->end <= b->start || b->end <= a->start || 1303024e8afSRuslan Bukin a->end == a->start || b->end == b->start, 1313024e8afSRuslan Bukin ("overlapping entries %p (%jx, %jx) %p (%jx, %jx)", 1323024e8afSRuslan Bukin a, (uintmax_t)a->start, (uintmax_t)a->end, 1333024e8afSRuslan Bukin b, (uintmax_t)b->start, (uintmax_t)b->end)); 1343024e8afSRuslan Bukin 1353024e8afSRuslan Bukin if (a->end < b->end) 1363024e8afSRuslan Bukin return (-1); 1373024e8afSRuslan Bukin else if (b->end < a->end) 1383024e8afSRuslan Bukin return (1); 1393024e8afSRuslan Bukin return (0); 1403024e8afSRuslan Bukin } 1413024e8afSRuslan Bukin 142b16f993eSDoug Moore /* 143b16f993eSDoug Moore * Update augmentation data based on data from children. 144b16f993eSDoug Moore * Return true if and only if the update changes the augmentation data. 145b16f993eSDoug Moore */ 146b16f993eSDoug Moore static bool 1473024e8afSRuslan Bukin iommu_gas_augment_entry(struct iommu_map_entry *entry) 1483024e8afSRuslan Bukin { 1493024e8afSRuslan Bukin struct iommu_map_entry *child; 150b16f993eSDoug Moore iommu_gaddr_t bound, delta, free_down; 1513024e8afSRuslan Bukin 1523024e8afSRuslan Bukin free_down = 0; 153b16f993eSDoug Moore bound = entry->start; 1543024e8afSRuslan Bukin if ((child = RB_LEFT(entry, rb_entry)) != NULL) { 155b16f993eSDoug Moore free_down = MAX(child->free_down, bound - child->last); 156b16f993eSDoug Moore bound = child->first; 157b16f993eSDoug Moore } 158b16f993eSDoug Moore delta = bound - entry->first; 159b16f993eSDoug Moore entry->first = bound; 160b16f993eSDoug Moore bound = entry->end; 1613024e8afSRuslan Bukin if ((child = RB_RIGHT(entry, rb_entry)) != NULL) { 1623024e8afSRuslan Bukin free_down = MAX(free_down, child->free_down); 163b16f993eSDoug Moore free_down = MAX(free_down, child->first - bound); 164b16f993eSDoug Moore bound = child->last; 165b16f993eSDoug Moore } 166b16f993eSDoug Moore delta += entry->last - bound; 167b16f993eSDoug Moore if (delta == 0) 168b16f993eSDoug Moore delta = entry->free_down - free_down; 169b16f993eSDoug Moore entry->last = bound; 1703024e8afSRuslan Bukin entry->free_down = free_down; 171b16f993eSDoug Moore 172b16f993eSDoug Moore /* 173b16f993eSDoug Moore * Return true either if the value of last-first changed, 174b16f993eSDoug Moore * or if free_down changed. 175b16f993eSDoug Moore */ 176b16f993eSDoug Moore return (delta != 0); 1773024e8afSRuslan Bukin } 1783024e8afSRuslan Bukin 1793024e8afSRuslan Bukin RB_GENERATE(iommu_gas_entries_tree, iommu_map_entry, rb_entry, 1803024e8afSRuslan Bukin iommu_gas_cmp_entries); 1813024e8afSRuslan Bukin 1823024e8afSRuslan Bukin #ifdef INVARIANTS 1833024e8afSRuslan Bukin static void 1843024e8afSRuslan Bukin iommu_gas_check_free(struct iommu_domain *domain) 1853024e8afSRuslan Bukin { 1863024e8afSRuslan Bukin struct iommu_map_entry *entry, *l, *r; 1873024e8afSRuslan Bukin iommu_gaddr_t v; 1883024e8afSRuslan Bukin 1893024e8afSRuslan Bukin RB_FOREACH(entry, iommu_gas_entries_tree, &domain->rb_root) { 190b64dca2bSRuslan Bukin KASSERT(domain == entry->domain, 1913024e8afSRuslan Bukin ("mismatched free domain %p entry %p entry->domain %p", 1923024e8afSRuslan Bukin domain, entry, entry->domain)); 1933024e8afSRuslan Bukin l = RB_LEFT(entry, rb_entry); 1943024e8afSRuslan Bukin r = RB_RIGHT(entry, rb_entry); 1953024e8afSRuslan Bukin v = 0; 1963024e8afSRuslan Bukin if (l != NULL) { 1973024e8afSRuslan Bukin v = MAX(v, l->free_down); 1983024e8afSRuslan Bukin v = MAX(v, entry->start - l->last); 1993024e8afSRuslan Bukin } 2003024e8afSRuslan Bukin if (r != NULL) { 2013024e8afSRuslan Bukin v = MAX(v, r->free_down); 2023024e8afSRuslan Bukin v = MAX(v, r->first - entry->end); 2033024e8afSRuslan Bukin } 2043024e8afSRuslan Bukin MPASS(entry->free_down == v); 2053024e8afSRuslan Bukin } 2063024e8afSRuslan Bukin } 2073024e8afSRuslan Bukin #endif 2083024e8afSRuslan Bukin 2093024e8afSRuslan Bukin static void 2103024e8afSRuslan Bukin iommu_gas_rb_remove(struct iommu_domain *domain, struct iommu_map_entry *entry) 2113024e8afSRuslan Bukin { 2128b221ca6SDoug Moore struct iommu_map_entry *nbr; 2133024e8afSRuslan Bukin 2148b221ca6SDoug Moore /* Removing entry may open a new free gap before domain->start_gap. */ 2158b221ca6SDoug Moore if (entry->end <= domain->start_gap->end) { 2168b221ca6SDoug Moore if (RB_RIGHT(entry, rb_entry) != NULL) 2178b221ca6SDoug Moore nbr = iommu_gas_entries_tree_RB_NEXT(entry); 2188b221ca6SDoug Moore else if (RB_LEFT(entry, rb_entry) != NULL) 2198b221ca6SDoug Moore nbr = RB_LEFT(entry, rb_entry); 2208b221ca6SDoug Moore else 2218b221ca6SDoug Moore nbr = RB_PARENT(entry, rb_entry); 2228b221ca6SDoug Moore domain->start_gap = nbr; 2238b221ca6SDoug Moore } 2243024e8afSRuslan Bukin RB_REMOVE(iommu_gas_entries_tree, &domain->rb_root, entry); 2253024e8afSRuslan Bukin } 2263024e8afSRuslan Bukin 227f5cafae1SRuslan Bukin struct iommu_domain * 228f5cafae1SRuslan Bukin iommu_get_ctx_domain(struct iommu_ctx *ctx) 229f5cafae1SRuslan Bukin { 230f5cafae1SRuslan Bukin 231f5cafae1SRuslan Bukin return (ctx->domain); 232f5cafae1SRuslan Bukin } 233f5cafae1SRuslan Bukin 2343024e8afSRuslan Bukin void 2353024e8afSRuslan Bukin iommu_gas_init_domain(struct iommu_domain *domain) 2363024e8afSRuslan Bukin { 2373024e8afSRuslan Bukin struct iommu_map_entry *begin, *end; 2383024e8afSRuslan Bukin 23915f6baf4SRuslan Bukin begin = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK); 24015f6baf4SRuslan Bukin end = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK); 2413024e8afSRuslan Bukin 2423024e8afSRuslan Bukin IOMMU_DOMAIN_LOCK(domain); 2433024e8afSRuslan Bukin KASSERT(domain->entries_cnt == 2, ("dirty domain %p", domain)); 2443024e8afSRuslan Bukin KASSERT(RB_EMPTY(&domain->rb_root), 2453024e8afSRuslan Bukin ("non-empty entries %p", domain)); 2463024e8afSRuslan Bukin 247b16f993eSDoug Moore /* 248b16f993eSDoug Moore * The end entry must be inserted first because it has a zero-length gap 249b16f993eSDoug Moore * between start and end. Initially, all augmentation data for a new 250b16f993eSDoug Moore * entry is zero. Function iommu_gas_augment_entry will compute no 251b16f993eSDoug Moore * change in the value of (start-end) and no change in the value of 252b16f993eSDoug Moore * free_down, so it will return false to suggest that nothing changed in 253b16f993eSDoug Moore * the entry. Thus, inserting the end entry second prevents 254b16f993eSDoug Moore * augmentation information to be propogated to the begin entry at the 255b16f993eSDoug Moore * tree root. So it is inserted first. 256b16f993eSDoug Moore */ 2573024e8afSRuslan Bukin end->start = domain->end; 2583024e8afSRuslan Bukin end->end = domain->end; 2593024e8afSRuslan Bukin end->flags = IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED; 260368ee2f8SDoug Moore RB_INSERT(iommu_gas_entries_tree, &domain->rb_root, end); 2613024e8afSRuslan Bukin 262b16f993eSDoug Moore begin->start = 0; 263b16f993eSDoug Moore begin->end = IOMMU_PAGE_SIZE; 264b16f993eSDoug Moore begin->flags = IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED; 265368ee2f8SDoug Moore RB_INSERT_PREV(iommu_gas_entries_tree, &domain->rb_root, end, begin); 266b16f993eSDoug Moore 2678b221ca6SDoug Moore domain->start_gap = end; 2683024e8afSRuslan Bukin domain->first_place = begin; 2693024e8afSRuslan Bukin domain->last_place = end; 27015f6baf4SRuslan Bukin domain->flags |= IOMMU_DOMAIN_GAS_INITED; 2713024e8afSRuslan Bukin IOMMU_DOMAIN_UNLOCK(domain); 2723024e8afSRuslan Bukin } 2733024e8afSRuslan Bukin 2743024e8afSRuslan Bukin void 2753024e8afSRuslan Bukin iommu_gas_fini_domain(struct iommu_domain *domain) 2763024e8afSRuslan Bukin { 277*a2c57c60SDoug Moore struct iommu_map_entry *entry; 2783024e8afSRuslan Bukin 2793024e8afSRuslan Bukin IOMMU_DOMAIN_ASSERT_LOCKED(domain); 2803024e8afSRuslan Bukin KASSERT(domain->entries_cnt == 2, 2813024e8afSRuslan Bukin ("domain still in use %p", domain)); 2823024e8afSRuslan Bukin 2833024e8afSRuslan Bukin entry = RB_MIN(iommu_gas_entries_tree, &domain->rb_root); 2843024e8afSRuslan Bukin KASSERT(entry->start == 0, ("start entry start %p", domain)); 2853024e8afSRuslan Bukin KASSERT(entry->end == IOMMU_PAGE_SIZE, ("start entry end %p", domain)); 286dea8594fSRuslan Bukin KASSERT(entry->flags == 287dea8594fSRuslan Bukin (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED), 2883024e8afSRuslan Bukin ("start entry flags %p", domain)); 289368ee2f8SDoug Moore iommu_gas_rb_remove(domain, entry); 2904670f908SAlan Cox iommu_gas_free_entry(entry); 2913024e8afSRuslan Bukin 2923024e8afSRuslan Bukin entry = RB_MAX(iommu_gas_entries_tree, &domain->rb_root); 2933024e8afSRuslan Bukin KASSERT(entry->start == domain->end, ("end entry start %p", domain)); 2943024e8afSRuslan Bukin KASSERT(entry->end == domain->end, ("end entry end %p", domain)); 295dea8594fSRuslan Bukin KASSERT(entry->flags == 296dea8594fSRuslan Bukin (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED), 2973024e8afSRuslan Bukin ("end entry flags %p", domain)); 298368ee2f8SDoug Moore iommu_gas_rb_remove(domain, entry); 2994670f908SAlan Cox iommu_gas_free_entry(entry); 3003024e8afSRuslan Bukin } 3013024e8afSRuslan Bukin 3023024e8afSRuslan Bukin struct iommu_gas_match_args { 3033024e8afSRuslan Bukin iommu_gaddr_t size; 3043024e8afSRuslan Bukin int offset; 3053024e8afSRuslan Bukin const struct bus_dma_tag_common *common; 3063024e8afSRuslan Bukin u_int gas_flags; 3073024e8afSRuslan Bukin struct iommu_map_entry *entry; 3083024e8afSRuslan Bukin }; 3093024e8afSRuslan Bukin 3103024e8afSRuslan Bukin /* 3113024e8afSRuslan Bukin * The interval [beg, end) is a free interval between two iommu_map_entries. 312e0e8d0c8SDoug Moore * Addresses can be allocated only in the range [lbound, ubound). Try to 313e0e8d0c8SDoug Moore * allocate space in the free interval, subject to the conditions expressed by 314e0e8d0c8SDoug Moore * a, and return 'true' if and only if the allocation attempt succeeds. 3153024e8afSRuslan Bukin */ 3163024e8afSRuslan Bukin static bool 3173024e8afSRuslan Bukin iommu_gas_match_one(struct iommu_gas_match_args *a, iommu_gaddr_t beg, 318e0e8d0c8SDoug Moore iommu_gaddr_t end, iommu_gaddr_t lbound, iommu_gaddr_t ubound) 3193024e8afSRuslan Bukin { 320e0e8d0c8SDoug Moore struct iommu_map_entry *entry; 321e0e8d0c8SDoug Moore iommu_gaddr_t first, size, start; 322e0e8d0c8SDoug Moore int offset; 3233024e8afSRuslan Bukin 324da33f6d7SAlan Cox /* 325da33f6d7SAlan Cox * The prev->end is always aligned on the page size, which 326da33f6d7SAlan Cox * causes page alignment for the entry->start too. 327da33f6d7SAlan Cox * 328e0e8d0c8SDoug Moore * Create IOMMU_PAGE_SIZE gaps before, after new entry 329e0e8d0c8SDoug Moore * to ensure that out-of-bounds accesses fault. 330da33f6d7SAlan Cox */ 331e0e8d0c8SDoug Moore beg = MAX(beg + IOMMU_PAGE_SIZE, lbound); 332e0e8d0c8SDoug Moore start = roundup2(beg, a->common->alignment); 333e0e8d0c8SDoug Moore if (start < beg) 334e0e8d0c8SDoug Moore return (false); 335e0e8d0c8SDoug Moore end = MIN(end - IOMMU_PAGE_SIZE, ubound); 336e0e8d0c8SDoug Moore offset = a->offset; 337e0e8d0c8SDoug Moore size = a->size; 338e0e8d0c8SDoug Moore if (start + offset + size > end) 3393024e8afSRuslan Bukin return (false); 3403024e8afSRuslan Bukin 341e0e8d0c8SDoug Moore /* Check for and try to skip past boundary crossing. */ 342e0e8d0c8SDoug Moore if (!vm_addr_bound_ok(start + offset, size, a->common->boundary)) { 3433024e8afSRuslan Bukin /* 3443024e8afSRuslan Bukin * The start + offset to start + offset + size region crosses 345e0e8d0c8SDoug Moore * the boundary. Check if there is enough space after the next 346e0e8d0c8SDoug Moore * boundary after the beg. 3473024e8afSRuslan Bukin */ 348e0e8d0c8SDoug Moore first = start; 349e0e8d0c8SDoug Moore beg = roundup2(start + offset + 1, a->common->boundary); 350e0e8d0c8SDoug Moore start = roundup2(beg, a->common->alignment); 351e0e8d0c8SDoug Moore 352e0e8d0c8SDoug Moore if (start + offset + size > end || 353e0e8d0c8SDoug Moore !vm_addr_bound_ok(start + offset, size, 3543024e8afSRuslan Bukin a->common->boundary)) { 3553024e8afSRuslan Bukin /* 356e0e8d0c8SDoug Moore * Not enough space to align at the requested boundary, 357e0e8d0c8SDoug Moore * or boundary is smaller than the size, but allowed to 358e0e8d0c8SDoug Moore * split. We already checked that start + size does not 359e0e8d0c8SDoug Moore * overlap ubound. 3603024e8afSRuslan Bukin * 361e0e8d0c8SDoug Moore * XXXKIB. It is possible that beg is exactly at the 362e0e8d0c8SDoug Moore * start of the next entry, then we do not have gap. 363e0e8d0c8SDoug Moore * Ignore for now. 3643024e8afSRuslan Bukin */ 365e0e8d0c8SDoug Moore if ((a->gas_flags & IOMMU_MF_CANSPLIT) == 0) 366e0e8d0c8SDoug Moore return (false); 367e0e8d0c8SDoug Moore size = beg - first - offset; 368e0e8d0c8SDoug Moore start = first; 369e0e8d0c8SDoug Moore } 370e0e8d0c8SDoug Moore } 371e0e8d0c8SDoug Moore entry = a->entry; 372e0e8d0c8SDoug Moore entry->start = start; 373e0e8d0c8SDoug Moore entry->end = start + roundup2(size + offset, IOMMU_PAGE_SIZE); 374e0e8d0c8SDoug Moore entry->flags = IOMMU_MAP_ENTRY_MAP; 3753024e8afSRuslan Bukin return (true); 3763024e8afSRuslan Bukin } 3773024e8afSRuslan Bukin 378e0e8d0c8SDoug Moore /* Find the next entry that might abut a big-enough range. */ 379e0e8d0c8SDoug Moore static struct iommu_map_entry * 380e0e8d0c8SDoug Moore iommu_gas_next(struct iommu_map_entry *curr, iommu_gaddr_t min_free) 3813024e8afSRuslan Bukin { 382e0e8d0c8SDoug Moore struct iommu_map_entry *next; 3833024e8afSRuslan Bukin 384e0e8d0c8SDoug Moore if ((next = RB_RIGHT(curr, rb_entry)) != NULL && 385e0e8d0c8SDoug Moore next->free_down >= min_free) { 386e0e8d0c8SDoug Moore /* Find next entry in right subtree. */ 387e0e8d0c8SDoug Moore do 388e0e8d0c8SDoug Moore curr = next; 389e0e8d0c8SDoug Moore while ((next = RB_LEFT(curr, rb_entry)) != NULL && 390e0e8d0c8SDoug Moore next->free_down >= min_free); 391e0e8d0c8SDoug Moore } else { 392e0e8d0c8SDoug Moore /* Find next entry in a left-parent ancestor. */ 393e0e8d0c8SDoug Moore while ((next = RB_PARENT(curr, rb_entry)) != NULL && 394e0e8d0c8SDoug Moore curr == RB_RIGHT(next, rb_entry)) 395e0e8d0c8SDoug Moore curr = next; 396e0e8d0c8SDoug Moore curr = next; 397e0e8d0c8SDoug Moore } 398e0e8d0c8SDoug Moore return (curr); 3993024e8afSRuslan Bukin } 4003024e8afSRuslan Bukin 4018b221ca6SDoug Moore /* 4028b221ca6SDoug Moore * Address-ordered first-fit search of 'domain' for free space satisfying the 4038b221ca6SDoug Moore * conditions of 'a'. The space allocated is at least one page big, and is 404*a2c57c60SDoug Moore * bounded by guard pages to the left and right. The allocated space for 405*a2c57c60SDoug Moore * 'domain' is described by an rb-tree of map entries at domain->rb_root, and 406*a2c57c60SDoug Moore * domain->start_gap points to a map entry less than or adjacent to the first 4078b221ca6SDoug Moore * free-space of size at least 3 pages. 4088b221ca6SDoug Moore */ 4093024e8afSRuslan Bukin static int 4108b221ca6SDoug Moore iommu_gas_find_space(struct iommu_domain *domain, 4118b221ca6SDoug Moore struct iommu_gas_match_args *a) 4123024e8afSRuslan Bukin { 413e0e8d0c8SDoug Moore struct iommu_map_entry *curr, *first; 414e0e8d0c8SDoug Moore iommu_gaddr_t addr, min_free; 415e0e8d0c8SDoug Moore 4168b221ca6SDoug Moore IOMMU_DOMAIN_ASSERT_LOCKED(domain); 417e0e8d0c8SDoug Moore KASSERT(a->entry->flags == 0, 4188b221ca6SDoug Moore ("dirty entry %p %p", domain, a->entry)); 4198b221ca6SDoug Moore 4208b221ca6SDoug Moore /* 4218b221ca6SDoug Moore * start_gap may point to an entry adjacent to gaps too small for any 4228b221ca6SDoug Moore * new allocation. In that case, advance start_gap to the first free 4238b221ca6SDoug Moore * space big enough for a minimum allocation plus two guard pages. 4248b221ca6SDoug Moore */ 4258b221ca6SDoug Moore min_free = 3 * IOMMU_PAGE_SIZE; 4268b221ca6SDoug Moore first = domain->start_gap; 4278b221ca6SDoug Moore while (first != NULL && first->free_down < min_free) 4288b221ca6SDoug Moore first = RB_PARENT(first, rb_entry); 4298b221ca6SDoug Moore for (curr = first; curr != NULL; 4308b221ca6SDoug Moore curr = iommu_gas_next(curr, min_free)) { 4318b221ca6SDoug Moore if ((first = RB_LEFT(curr, rb_entry)) != NULL && 4328b221ca6SDoug Moore first->last + min_free <= curr->start) 4338b221ca6SDoug Moore break; 4348b221ca6SDoug Moore if ((first = RB_RIGHT(curr, rb_entry)) != NULL && 4358b221ca6SDoug Moore curr->end + min_free <= first->first) 4368b221ca6SDoug Moore break; 4378b221ca6SDoug Moore } 4388b221ca6SDoug Moore domain->start_gap = curr; 4393024e8afSRuslan Bukin 440b831865fSDoug Moore /* 441b831865fSDoug Moore * If the subtree doesn't have free space for the requested allocation 442f979ad00SDoug Moore * plus two guard pages, skip it. 443b831865fSDoug Moore */ 444f979ad00SDoug Moore min_free = 2 * IOMMU_PAGE_SIZE + 445f979ad00SDoug Moore roundup2(a->size + a->offset, IOMMU_PAGE_SIZE); 446f979ad00SDoug Moore 4478b221ca6SDoug Moore /* Climb to find a node in the subtree of big-enough ranges. */ 448e0e8d0c8SDoug Moore first = curr; 4498b221ca6SDoug Moore while (first != NULL && first->free_down < min_free) 4508b221ca6SDoug Moore first = RB_PARENT(first, rb_entry); 451f979ad00SDoug Moore 452f979ad00SDoug Moore /* 4538b221ca6SDoug Moore * Walk the big-enough ranges tree until one satisfies alignment 454f979ad00SDoug Moore * requirements, or violates lowaddr address requirement. 455f979ad00SDoug Moore */ 456e0e8d0c8SDoug Moore addr = a->common->lowaddr + 1; 457e0e8d0c8SDoug Moore for (curr = first; curr != NULL; 458e0e8d0c8SDoug Moore curr = iommu_gas_next(curr, min_free)) { 459e0e8d0c8SDoug Moore if ((first = RB_LEFT(curr, rb_entry)) != NULL && 460e0e8d0c8SDoug Moore iommu_gas_match_one(a, first->last, curr->start, 461368ee2f8SDoug Moore 0, addr)) { 462368ee2f8SDoug Moore RB_INSERT_PREV(iommu_gas_entries_tree, 463368ee2f8SDoug Moore &domain->rb_root, curr, a->entry); 46430031172SDoug Moore return (0); 465368ee2f8SDoug Moore } 466e0e8d0c8SDoug Moore if (curr->end >= addr) { 467e0e8d0c8SDoug Moore /* All remaining ranges >= addr */ 468f979ad00SDoug Moore break; 469f979ad00SDoug Moore } 470e0e8d0c8SDoug Moore if ((first = RB_RIGHT(curr, rb_entry)) != NULL && 471e0e8d0c8SDoug Moore iommu_gas_match_one(a, curr->end, first->first, 472368ee2f8SDoug Moore 0, addr)) { 473368ee2f8SDoug Moore RB_INSERT_NEXT(iommu_gas_entries_tree, 474368ee2f8SDoug Moore &domain->rb_root, curr, a->entry); 4753024e8afSRuslan Bukin return (0); 4763024e8afSRuslan Bukin } 477368ee2f8SDoug Moore } 4783024e8afSRuslan Bukin 479b831865fSDoug Moore /* 480e0e8d0c8SDoug Moore * To resume the search at the start of the upper region, first climb to 481e0e8d0c8SDoug Moore * the nearest ancestor that spans highaddr. Then find the last entry 482e0e8d0c8SDoug Moore * before highaddr that could abut a big-enough range. 483b831865fSDoug Moore */ 484e0e8d0c8SDoug Moore addr = a->common->highaddr; 485e0e8d0c8SDoug Moore while (curr != NULL && curr->last < addr) 486e0e8d0c8SDoug Moore curr = RB_PARENT(curr, rb_entry); 487e0e8d0c8SDoug Moore first = NULL; 488e0e8d0c8SDoug Moore while (curr != NULL && curr->free_down >= min_free) { 489e0e8d0c8SDoug Moore if (addr < curr->end) 490e0e8d0c8SDoug Moore curr = RB_LEFT(curr, rb_entry); 491e0e8d0c8SDoug Moore else { 492e0e8d0c8SDoug Moore first = curr; 493e0e8d0c8SDoug Moore curr = RB_RIGHT(curr, rb_entry); 4943024e8afSRuslan Bukin } 4953024e8afSRuslan Bukin } 4963024e8afSRuslan Bukin 497e0e8d0c8SDoug Moore /* 498e0e8d0c8SDoug Moore * Walk the remaining big-enough ranges until one satisfies alignment 499e0e8d0c8SDoug Moore * requirements. 500e0e8d0c8SDoug Moore */ 501e0e8d0c8SDoug Moore for (curr = first; curr != NULL; 502e0e8d0c8SDoug Moore curr = iommu_gas_next(curr, min_free)) { 503e0e8d0c8SDoug Moore if ((first = RB_LEFT(curr, rb_entry)) != NULL && 504e0e8d0c8SDoug Moore iommu_gas_match_one(a, first->last, curr->start, 505368ee2f8SDoug Moore addr + 1, domain->end)) { 506368ee2f8SDoug Moore RB_INSERT_PREV(iommu_gas_entries_tree, 507368ee2f8SDoug Moore &domain->rb_root, curr, a->entry); 5083024e8afSRuslan Bukin return (0); 509368ee2f8SDoug Moore } 510e0e8d0c8SDoug Moore if ((first = RB_RIGHT(curr, rb_entry)) != NULL && 511e0e8d0c8SDoug Moore iommu_gas_match_one(a, curr->end, first->first, 512368ee2f8SDoug Moore addr + 1, domain->end)) { 513368ee2f8SDoug Moore RB_INSERT_NEXT(iommu_gas_entries_tree, 514368ee2f8SDoug Moore &domain->rb_root, curr, a->entry); 515e0e8d0c8SDoug Moore return (0); 5163024e8afSRuslan Bukin } 517368ee2f8SDoug Moore } 518e0e8d0c8SDoug Moore 5193024e8afSRuslan Bukin return (ENOMEM); 5203024e8afSRuslan Bukin } 5213024e8afSRuslan Bukin 5223024e8afSRuslan Bukin static int 5233024e8afSRuslan Bukin iommu_gas_alloc_region(struct iommu_domain *domain, struct iommu_map_entry *entry, 5243024e8afSRuslan Bukin u_int flags) 5253024e8afSRuslan Bukin { 5263024e8afSRuslan Bukin struct iommu_map_entry *next, *prev; 5273024e8afSRuslan Bukin 5283024e8afSRuslan Bukin IOMMU_DOMAIN_ASSERT_LOCKED(domain); 5293024e8afSRuslan Bukin 5303024e8afSRuslan Bukin if ((entry->start & IOMMU_PAGE_MASK) != 0 || 5313024e8afSRuslan Bukin (entry->end & IOMMU_PAGE_MASK) != 0) 5323024e8afSRuslan Bukin return (EINVAL); 5333024e8afSRuslan Bukin if (entry->start >= entry->end) 5343024e8afSRuslan Bukin return (EINVAL); 5353024e8afSRuslan Bukin if (entry->end >= domain->end) 5363024e8afSRuslan Bukin return (EINVAL); 5373024e8afSRuslan Bukin 5383024e8afSRuslan Bukin next = RB_NFIND(iommu_gas_entries_tree, &domain->rb_root, entry); 5393024e8afSRuslan Bukin KASSERT(next != NULL, ("next must be non-null %p %jx", domain, 5403024e8afSRuslan Bukin (uintmax_t)entry->start)); 5413024e8afSRuslan Bukin prev = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, next); 5423024e8afSRuslan Bukin /* prev could be NULL */ 5433024e8afSRuslan Bukin 5443024e8afSRuslan Bukin /* 5453024e8afSRuslan Bukin * Adapt to broken BIOSes which specify overlapping RMRR 5463024e8afSRuslan Bukin * entries. 5473024e8afSRuslan Bukin * 5483024e8afSRuslan Bukin * XXXKIB: this does not handle a case when prev or next 5493024e8afSRuslan Bukin * entries are completely covered by the current one, which 5503024e8afSRuslan Bukin * extends both ways. 5513024e8afSRuslan Bukin */ 5523024e8afSRuslan Bukin if (prev != NULL && prev->end > entry->start && 5533024e8afSRuslan Bukin (prev->flags & IOMMU_MAP_ENTRY_PLACE) == 0) { 5543024e8afSRuslan Bukin if ((flags & IOMMU_MF_RMRR) == 0 || 5553024e8afSRuslan Bukin (prev->flags & IOMMU_MAP_ENTRY_RMRR) == 0) 5563024e8afSRuslan Bukin return (EBUSY); 5573024e8afSRuslan Bukin entry->start = prev->end; 5583024e8afSRuslan Bukin } 5593024e8afSRuslan Bukin if (next->start < entry->end && 5603024e8afSRuslan Bukin (next->flags & IOMMU_MAP_ENTRY_PLACE) == 0) { 5613024e8afSRuslan Bukin if ((flags & IOMMU_MF_RMRR) == 0 || 5623024e8afSRuslan Bukin (next->flags & IOMMU_MAP_ENTRY_RMRR) == 0) 5633024e8afSRuslan Bukin return (EBUSY); 5643024e8afSRuslan Bukin entry->end = next->start; 5653024e8afSRuslan Bukin } 5663024e8afSRuslan Bukin if (entry->end == entry->start) 5673024e8afSRuslan Bukin return (0); 5683024e8afSRuslan Bukin 5693024e8afSRuslan Bukin if (prev != NULL && prev->end > entry->start) { 5703024e8afSRuslan Bukin /* This assumes that prev is the placeholder entry. */ 5713024e8afSRuslan Bukin iommu_gas_rb_remove(domain, prev); 5723024e8afSRuslan Bukin prev = NULL; 5733024e8afSRuslan Bukin } 574368ee2f8SDoug Moore RB_INSERT_PREV(iommu_gas_entries_tree, 575368ee2f8SDoug Moore &domain->rb_root, next, entry); 5763024e8afSRuslan Bukin if (next->start < entry->end) { 5773024e8afSRuslan Bukin iommu_gas_rb_remove(domain, next); 5783024e8afSRuslan Bukin next = NULL; 5793024e8afSRuslan Bukin } 5803024e8afSRuslan Bukin 5813024e8afSRuslan Bukin if ((flags & IOMMU_MF_RMRR) != 0) 5823024e8afSRuslan Bukin entry->flags = IOMMU_MAP_ENTRY_RMRR; 5833024e8afSRuslan Bukin 5843024e8afSRuslan Bukin #ifdef INVARIANTS 5853024e8afSRuslan Bukin struct iommu_map_entry *ip, *in; 5863024e8afSRuslan Bukin ip = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, entry); 5873024e8afSRuslan Bukin in = RB_NEXT(iommu_gas_entries_tree, &domain->rb_root, entry); 5883024e8afSRuslan Bukin KASSERT(prev == NULL || ip == prev, 5893024e8afSRuslan Bukin ("RMRR %p (%jx %jx) prev %p (%jx %jx) ins prev %p (%jx %jx)", 5903024e8afSRuslan Bukin entry, entry->start, entry->end, prev, 5913024e8afSRuslan Bukin prev == NULL ? 0 : prev->start, prev == NULL ? 0 : prev->end, 5923024e8afSRuslan Bukin ip, ip == NULL ? 0 : ip->start, ip == NULL ? 0 : ip->end)); 5933024e8afSRuslan Bukin KASSERT(next == NULL || in == next, 5943024e8afSRuslan Bukin ("RMRR %p (%jx %jx) next %p (%jx %jx) ins next %p (%jx %jx)", 5953024e8afSRuslan Bukin entry, entry->start, entry->end, next, 5963024e8afSRuslan Bukin next == NULL ? 0 : next->start, next == NULL ? 0 : next->end, 5973024e8afSRuslan Bukin in, in == NULL ? 0 : in->start, in == NULL ? 0 : in->end)); 5983024e8afSRuslan Bukin #endif 5993024e8afSRuslan Bukin 6003024e8afSRuslan Bukin return (0); 6013024e8afSRuslan Bukin } 6023024e8afSRuslan Bukin 6033024e8afSRuslan Bukin void 6044670f908SAlan Cox iommu_gas_free_space(struct iommu_map_entry *entry) 6053024e8afSRuslan Bukin { 6064670f908SAlan Cox struct iommu_domain *domain; 6073024e8afSRuslan Bukin 6084670f908SAlan Cox domain = entry->domain; 6093024e8afSRuslan Bukin KASSERT((entry->flags & (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_RMRR | 6103024e8afSRuslan Bukin IOMMU_MAP_ENTRY_MAP)) == IOMMU_MAP_ENTRY_MAP, 6113024e8afSRuslan Bukin ("permanent entry %p %p", domain, entry)); 6123024e8afSRuslan Bukin 6134670f908SAlan Cox IOMMU_DOMAIN_LOCK(domain); 6143024e8afSRuslan Bukin iommu_gas_rb_remove(domain, entry); 6153024e8afSRuslan Bukin entry->flags &= ~IOMMU_MAP_ENTRY_MAP; 6163024e8afSRuslan Bukin #ifdef INVARIANTS 6173024e8afSRuslan Bukin if (iommu_check_free) 6183024e8afSRuslan Bukin iommu_gas_check_free(domain); 6193024e8afSRuslan Bukin #endif 6204670f908SAlan Cox IOMMU_DOMAIN_UNLOCK(domain); 6213024e8afSRuslan Bukin } 6223024e8afSRuslan Bukin 6233024e8afSRuslan Bukin void 6244670f908SAlan Cox iommu_gas_free_region(struct iommu_map_entry *entry) 6253024e8afSRuslan Bukin { 6264670f908SAlan Cox struct iommu_domain *domain; 6273024e8afSRuslan Bukin 6284670f908SAlan Cox domain = entry->domain; 6293024e8afSRuslan Bukin KASSERT((entry->flags & (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_RMRR | 6303024e8afSRuslan Bukin IOMMU_MAP_ENTRY_MAP)) == IOMMU_MAP_ENTRY_RMRR, 6313024e8afSRuslan Bukin ("non-RMRR entry %p %p", domain, entry)); 6323024e8afSRuslan Bukin 6334670f908SAlan Cox IOMMU_DOMAIN_LOCK(domain); 63487cd087aSDoug Moore if (entry != domain->first_place && 63587cd087aSDoug Moore entry != domain->last_place) 6363024e8afSRuslan Bukin iommu_gas_rb_remove(domain, entry); 6373024e8afSRuslan Bukin entry->flags &= ~IOMMU_MAP_ENTRY_RMRR; 6384670f908SAlan Cox IOMMU_DOMAIN_UNLOCK(domain); 6393024e8afSRuslan Bukin } 6403024e8afSRuslan Bukin 641c9e4d250SKonstantin Belousov static struct iommu_map_entry * 642c9e4d250SKonstantin Belousov iommu_gas_remove_clip_left(struct iommu_domain *domain, iommu_gaddr_t start, 643c9e4d250SKonstantin Belousov iommu_gaddr_t end, struct iommu_map_entry **r) 644c9e4d250SKonstantin Belousov { 645c9e4d250SKonstantin Belousov struct iommu_map_entry *entry, *res, fentry; 646c9e4d250SKonstantin Belousov 647c9e4d250SKonstantin Belousov IOMMU_DOMAIN_ASSERT_LOCKED(domain); 648c9e4d250SKonstantin Belousov MPASS(start <= end); 64987cd087aSDoug Moore MPASS(end <= domain->end); 650c9e4d250SKonstantin Belousov 651c9e4d250SKonstantin Belousov /* 652c9e4d250SKonstantin Belousov * Find an entry which contains the supplied guest's address 653c9e4d250SKonstantin Belousov * start, or the first entry after the start. Since we 654c9e4d250SKonstantin Belousov * asserted that start is below domain end, entry should 655c9e4d250SKonstantin Belousov * exist. Then clip it if needed. 656c9e4d250SKonstantin Belousov */ 657c9e4d250SKonstantin Belousov fentry.start = start + 1; 658c9e4d250SKonstantin Belousov fentry.end = start + 1; 659c9e4d250SKonstantin Belousov entry = RB_NFIND(iommu_gas_entries_tree, &domain->rb_root, &fentry); 660c9e4d250SKonstantin Belousov 661c9e4d250SKonstantin Belousov if (entry->start >= start || 662c9e4d250SKonstantin Belousov (entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0) 663c9e4d250SKonstantin Belousov return (entry); 664c9e4d250SKonstantin Belousov 665c9e4d250SKonstantin Belousov res = *r; 666c9e4d250SKonstantin Belousov *r = NULL; 667c9e4d250SKonstantin Belousov *res = *entry; 668c9e4d250SKonstantin Belousov res->start = entry->end = start; 669c9e4d250SKonstantin Belousov RB_UPDATE_AUGMENT(entry, rb_entry); 670368ee2f8SDoug Moore RB_INSERT_NEXT(iommu_gas_entries_tree, 671368ee2f8SDoug Moore &domain->rb_root, entry, res); 672c9e4d250SKonstantin Belousov return (res); 673c9e4d250SKonstantin Belousov } 674c9e4d250SKonstantin Belousov 675c9e4d250SKonstantin Belousov static bool 676c9e4d250SKonstantin Belousov iommu_gas_remove_clip_right(struct iommu_domain *domain, 677c9e4d250SKonstantin Belousov iommu_gaddr_t end, struct iommu_map_entry *entry, 678c9e4d250SKonstantin Belousov struct iommu_map_entry *r) 679c9e4d250SKonstantin Belousov { 680c9e4d250SKonstantin Belousov if (entry->start >= end || (entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0) 681c9e4d250SKonstantin Belousov return (false); 682c9e4d250SKonstantin Belousov 683c9e4d250SKonstantin Belousov *r = *entry; 684c9e4d250SKonstantin Belousov r->end = entry->start = end; 685c9e4d250SKonstantin Belousov RB_UPDATE_AUGMENT(entry, rb_entry); 686368ee2f8SDoug Moore RB_INSERT_PREV(iommu_gas_entries_tree, 687368ee2f8SDoug Moore &domain->rb_root, entry, r); 688c9e4d250SKonstantin Belousov return (true); 689c9e4d250SKonstantin Belousov } 690c9e4d250SKonstantin Belousov 691c9e4d250SKonstantin Belousov static void 692c9e4d250SKonstantin Belousov iommu_gas_remove_unmap(struct iommu_domain *domain, 693c9e4d250SKonstantin Belousov struct iommu_map_entry *entry, struct iommu_map_entries_tailq *gcp) 694c9e4d250SKonstantin Belousov { 695c9e4d250SKonstantin Belousov IOMMU_DOMAIN_ASSERT_LOCKED(domain); 696c9e4d250SKonstantin Belousov 697c9e4d250SKonstantin Belousov if ((entry->flags & (IOMMU_MAP_ENTRY_UNMAPPED | 698c9e4d250SKonstantin Belousov IOMMU_MAP_ENTRY_REMOVING)) != 0) 699c9e4d250SKonstantin Belousov return; 700c9e4d250SKonstantin Belousov MPASS((entry->flags & IOMMU_MAP_ENTRY_PLACE) == 0); 701c9e4d250SKonstantin Belousov entry->flags |= IOMMU_MAP_ENTRY_REMOVING; 702c9e4d250SKonstantin Belousov TAILQ_INSERT_TAIL(gcp, entry, dmamap_link); 703c9e4d250SKonstantin Belousov } 704c9e4d250SKonstantin Belousov 705c9e4d250SKonstantin Belousov /* 706c9e4d250SKonstantin Belousov * Remove specified range from the GAS of the domain. Note that the 707c9e4d250SKonstantin Belousov * removal is not guaranteed to occur upon the function return, it 708c9e4d250SKonstantin Belousov * might be finalized some time after, when hardware reports that 709c9e4d250SKonstantin Belousov * (queued) IOTLB invalidation was performed. 710c9e4d250SKonstantin Belousov */ 711c9e4d250SKonstantin Belousov void 712c9e4d250SKonstantin Belousov iommu_gas_remove(struct iommu_domain *domain, iommu_gaddr_t start, 713c9e4d250SKonstantin Belousov iommu_gaddr_t size) 714c9e4d250SKonstantin Belousov { 715c9e4d250SKonstantin Belousov struct iommu_map_entry *entry, *nentry, *r1, *r2; 716c9e4d250SKonstantin Belousov struct iommu_map_entries_tailq gc; 717c9e4d250SKonstantin Belousov iommu_gaddr_t end; 718c9e4d250SKonstantin Belousov 719c9e4d250SKonstantin Belousov end = start + size; 720c9e4d250SKonstantin Belousov r1 = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK); 721c9e4d250SKonstantin Belousov r2 = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK); 722c9e4d250SKonstantin Belousov TAILQ_INIT(&gc); 723c9e4d250SKonstantin Belousov 724c9e4d250SKonstantin Belousov IOMMU_DOMAIN_LOCK(domain); 725c9e4d250SKonstantin Belousov 726c9e4d250SKonstantin Belousov nentry = iommu_gas_remove_clip_left(domain, start, end, &r1); 727c9e4d250SKonstantin Belousov RB_FOREACH_FROM(entry, iommu_gas_entries_tree, nentry) { 728c9e4d250SKonstantin Belousov if (entry->start >= end) 729c9e4d250SKonstantin Belousov break; 730c9e4d250SKonstantin Belousov KASSERT(start <= entry->start, 731c9e4d250SKonstantin Belousov ("iommu_gas_remove entry (%#jx, %#jx) start %#jx", 732c9e4d250SKonstantin Belousov entry->start, entry->end, start)); 733c9e4d250SKonstantin Belousov if ((entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0) 734c9e4d250SKonstantin Belousov continue; 735c9e4d250SKonstantin Belousov iommu_gas_remove_unmap(domain, entry, &gc); 736c9e4d250SKonstantin Belousov } 737c9e4d250SKonstantin Belousov if (iommu_gas_remove_clip_right(domain, end, entry, r2)) { 738c9e4d250SKonstantin Belousov iommu_gas_remove_unmap(domain, r2, &gc); 739c9e4d250SKonstantin Belousov r2 = NULL; 740c9e4d250SKonstantin Belousov } 741c9e4d250SKonstantin Belousov 742c9e4d250SKonstantin Belousov #ifdef INVARIANTS 743c9e4d250SKonstantin Belousov RB_FOREACH(entry, iommu_gas_entries_tree, &domain->rb_root) { 744c9e4d250SKonstantin Belousov if ((entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0) 745c9e4d250SKonstantin Belousov continue; 746c9e4d250SKonstantin Belousov KASSERT(entry->end <= start || entry->start >= end, 747c9e4d250SKonstantin Belousov ("iommu_gas_remove leftover entry (%#jx, %#jx) range " 748c9e4d250SKonstantin Belousov "(%#jx, %#jx)", 749c9e4d250SKonstantin Belousov entry->start, entry->end, start, end)); 750c9e4d250SKonstantin Belousov } 751c9e4d250SKonstantin Belousov #endif 752c9e4d250SKonstantin Belousov 753c9e4d250SKonstantin Belousov IOMMU_DOMAIN_UNLOCK(domain); 754c9e4d250SKonstantin Belousov if (r1 != NULL) 755c9e4d250SKonstantin Belousov iommu_gas_free_entry(r1); 756c9e4d250SKonstantin Belousov if (r2 != NULL) 757c9e4d250SKonstantin Belousov iommu_gas_free_entry(r2); 758c9e4d250SKonstantin Belousov iommu_domain_unload(domain, &gc, true); 759c9e4d250SKonstantin Belousov } 760c9e4d250SKonstantin Belousov 7613024e8afSRuslan Bukin int 7623024e8afSRuslan Bukin iommu_gas_map(struct iommu_domain *domain, 7633024e8afSRuslan Bukin const struct bus_dma_tag_common *common, iommu_gaddr_t size, int offset, 7643024e8afSRuslan Bukin u_int eflags, u_int flags, vm_page_t *ma, struct iommu_map_entry **res) 7653024e8afSRuslan Bukin { 766e0e8d0c8SDoug Moore struct iommu_gas_match_args a; 7673024e8afSRuslan Bukin struct iommu_map_entry *entry; 7683024e8afSRuslan Bukin int error; 7693024e8afSRuslan Bukin 7703024e8afSRuslan Bukin KASSERT((flags & ~(IOMMU_MF_CANWAIT | IOMMU_MF_CANSPLIT)) == 0, 7713024e8afSRuslan Bukin ("invalid flags 0x%x", flags)); 7723024e8afSRuslan Bukin 773e0e8d0c8SDoug Moore a.size = size; 774e0e8d0c8SDoug Moore a.offset = offset; 775e0e8d0c8SDoug Moore a.common = common; 776e0e8d0c8SDoug Moore a.gas_flags = flags; 7773024e8afSRuslan Bukin entry = iommu_gas_alloc_entry(domain, 77815f6baf4SRuslan Bukin (flags & IOMMU_MF_CANWAIT) != 0 ? IOMMU_PGF_WAITOK : 0); 7793024e8afSRuslan Bukin if (entry == NULL) 7803024e8afSRuslan Bukin return (ENOMEM); 781e0e8d0c8SDoug Moore a.entry = entry; 7823024e8afSRuslan Bukin IOMMU_DOMAIN_LOCK(domain); 7838b221ca6SDoug Moore error = iommu_gas_find_space(domain, &a); 7843024e8afSRuslan Bukin if (error == ENOMEM) { 7853024e8afSRuslan Bukin IOMMU_DOMAIN_UNLOCK(domain); 7864670f908SAlan Cox iommu_gas_free_entry(entry); 7873024e8afSRuslan Bukin return (error); 7883024e8afSRuslan Bukin } 7893024e8afSRuslan Bukin #ifdef INVARIANTS 7903024e8afSRuslan Bukin if (iommu_check_free) 7913024e8afSRuslan Bukin iommu_gas_check_free(domain); 7923024e8afSRuslan Bukin #endif 7933024e8afSRuslan Bukin KASSERT(error == 0, 7943024e8afSRuslan Bukin ("unexpected error %d from iommu_gas_find_entry", error)); 7953024e8afSRuslan Bukin KASSERT(entry->end < domain->end, ("allocated GPA %jx, max GPA %jx", 7963024e8afSRuslan Bukin (uintmax_t)entry->end, (uintmax_t)domain->end)); 7973024e8afSRuslan Bukin entry->flags |= eflags; 7983024e8afSRuslan Bukin IOMMU_DOMAIN_UNLOCK(domain); 7993024e8afSRuslan Bukin 8000eed04c8SRuslan Bukin error = domain->ops->map(domain, entry->start, 8010eed04c8SRuslan Bukin entry->end - entry->start, ma, eflags, 80215f6baf4SRuslan Bukin ((flags & IOMMU_MF_CANWAIT) != 0 ? IOMMU_PGF_WAITOK : 0)); 8033024e8afSRuslan Bukin if (error == ENOMEM) { 8048bc36738SAlan Cox iommu_domain_unload_entry(entry, true, 8058bc36738SAlan Cox (flags & IOMMU_MF_CANWAIT) != 0); 8063024e8afSRuslan Bukin return (error); 8073024e8afSRuslan Bukin } 8083024e8afSRuslan Bukin KASSERT(error == 0, 8093024e8afSRuslan Bukin ("unexpected error %d from domain_map_buf", error)); 8103024e8afSRuslan Bukin 8113024e8afSRuslan Bukin *res = entry; 8123024e8afSRuslan Bukin return (0); 8133024e8afSRuslan Bukin } 8143024e8afSRuslan Bukin 8153024e8afSRuslan Bukin int 8163024e8afSRuslan Bukin iommu_gas_map_region(struct iommu_domain *domain, struct iommu_map_entry *entry, 8173024e8afSRuslan Bukin u_int eflags, u_int flags, vm_page_t *ma) 8183024e8afSRuslan Bukin { 8193024e8afSRuslan Bukin iommu_gaddr_t start; 8203024e8afSRuslan Bukin int error; 8213024e8afSRuslan Bukin 8224670f908SAlan Cox KASSERT(entry->domain == domain, 8234670f908SAlan Cox ("mismatched domain %p entry %p entry->domain %p", domain, 8244670f908SAlan Cox entry, entry->domain)); 8253024e8afSRuslan Bukin KASSERT(entry->flags == 0, ("used RMRR entry %p %p %x", domain, 8263024e8afSRuslan Bukin entry, entry->flags)); 8273024e8afSRuslan Bukin KASSERT((flags & ~(IOMMU_MF_CANWAIT | IOMMU_MF_RMRR)) == 0, 8283024e8afSRuslan Bukin ("invalid flags 0x%x", flags)); 8293024e8afSRuslan Bukin 8303024e8afSRuslan Bukin start = entry->start; 8313024e8afSRuslan Bukin IOMMU_DOMAIN_LOCK(domain); 8323024e8afSRuslan Bukin error = iommu_gas_alloc_region(domain, entry, flags); 8333024e8afSRuslan Bukin if (error != 0) { 8343024e8afSRuslan Bukin IOMMU_DOMAIN_UNLOCK(domain); 8353024e8afSRuslan Bukin return (error); 8363024e8afSRuslan Bukin } 8373024e8afSRuslan Bukin entry->flags |= eflags; 8383024e8afSRuslan Bukin IOMMU_DOMAIN_UNLOCK(domain); 8393024e8afSRuslan Bukin if (entry->end == entry->start) 8403024e8afSRuslan Bukin return (0); 8413024e8afSRuslan Bukin 8420eed04c8SRuslan Bukin error = domain->ops->map(domain, entry->start, 8430eed04c8SRuslan Bukin entry->end - entry->start, ma + OFF_TO_IDX(start - entry->start), 8440eed04c8SRuslan Bukin eflags, ((flags & IOMMU_MF_CANWAIT) != 0 ? IOMMU_PGF_WAITOK : 0)); 8453024e8afSRuslan Bukin if (error == ENOMEM) { 8468bc36738SAlan Cox iommu_domain_unload_entry(entry, false, 8478bc36738SAlan Cox (flags & IOMMU_MF_CANWAIT) != 0); 8483024e8afSRuslan Bukin return (error); 8493024e8afSRuslan Bukin } 8503024e8afSRuslan Bukin KASSERT(error == 0, 8513024e8afSRuslan Bukin ("unexpected error %d from domain_map_buf", error)); 8523024e8afSRuslan Bukin 8533024e8afSRuslan Bukin return (0); 8543024e8afSRuslan Bukin } 8553024e8afSRuslan Bukin 856ee47a12aSRyan Libby static int 857ee47a12aSRyan Libby iommu_gas_reserve_region_locked(struct iommu_domain *domain, 858ee47a12aSRyan Libby iommu_gaddr_t start, iommu_gaddr_t end, struct iommu_map_entry *entry) 859ee47a12aSRyan Libby { 860ee47a12aSRyan Libby int error; 861ee47a12aSRyan Libby 862ee47a12aSRyan Libby IOMMU_DOMAIN_ASSERT_LOCKED(domain); 863ee47a12aSRyan Libby 864ee47a12aSRyan Libby entry->start = start; 865ee47a12aSRyan Libby entry->end = end; 866ee47a12aSRyan Libby error = iommu_gas_alloc_region(domain, entry, IOMMU_MF_CANWAIT); 867ee47a12aSRyan Libby if (error == 0) 868ee47a12aSRyan Libby entry->flags |= IOMMU_MAP_ENTRY_UNMAPPED; 869ee47a12aSRyan Libby return (error); 870ee47a12aSRyan Libby } 871ee47a12aSRyan Libby 8723024e8afSRuslan Bukin int 8733024e8afSRuslan Bukin iommu_gas_reserve_region(struct iommu_domain *domain, iommu_gaddr_t start, 87494dfb28eSRuslan Bukin iommu_gaddr_t end, struct iommu_map_entry **entry0) 8753024e8afSRuslan Bukin { 8763024e8afSRuslan Bukin struct iommu_map_entry *entry; 8773024e8afSRuslan Bukin int error; 8783024e8afSRuslan Bukin 87915f6baf4SRuslan Bukin entry = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK); 8803024e8afSRuslan Bukin IOMMU_DOMAIN_LOCK(domain); 881ee47a12aSRyan Libby error = iommu_gas_reserve_region_locked(domain, start, end, entry); 8823024e8afSRuslan Bukin IOMMU_DOMAIN_UNLOCK(domain); 8833024e8afSRuslan Bukin if (error != 0) 8844670f908SAlan Cox iommu_gas_free_entry(entry); 88594dfb28eSRuslan Bukin else if (entry0 != NULL) 88694dfb28eSRuslan Bukin *entry0 = entry; 8873024e8afSRuslan Bukin return (error); 8883024e8afSRuslan Bukin } 8893024e8afSRuslan Bukin 890ee47a12aSRyan Libby /* 891ee47a12aSRyan Libby * As in iommu_gas_reserve_region, reserve [start, end), but allow for existing 892ee47a12aSRyan Libby * entries. 893ee47a12aSRyan Libby */ 894ee47a12aSRyan Libby int 895ee47a12aSRyan Libby iommu_gas_reserve_region_extend(struct iommu_domain *domain, 896ee47a12aSRyan Libby iommu_gaddr_t start, iommu_gaddr_t end) 897ee47a12aSRyan Libby { 898ee47a12aSRyan Libby struct iommu_map_entry *entry, *next, *prev, key = {}; 899ee47a12aSRyan Libby iommu_gaddr_t entry_start, entry_end; 900ee47a12aSRyan Libby int error; 901ee47a12aSRyan Libby 902ee47a12aSRyan Libby error = 0; 903ee47a12aSRyan Libby entry = NULL; 904ee47a12aSRyan Libby end = ummin(end, domain->end); 905ee47a12aSRyan Libby while (start < end) { 906ee47a12aSRyan Libby /* Preallocate an entry. */ 907ee47a12aSRyan Libby if (entry == NULL) 908ee47a12aSRyan Libby entry = iommu_gas_alloc_entry(domain, 909ee47a12aSRyan Libby IOMMU_PGF_WAITOK); 910ee47a12aSRyan Libby /* Calculate the free region from here to the next entry. */ 911ee47a12aSRyan Libby key.start = key.end = start; 912ee47a12aSRyan Libby IOMMU_DOMAIN_LOCK(domain); 913ee47a12aSRyan Libby next = RB_NFIND(iommu_gas_entries_tree, &domain->rb_root, &key); 914ee47a12aSRyan Libby KASSERT(next != NULL, ("domain %p with end %#jx has no entry " 915ee47a12aSRyan Libby "after %#jx", domain, (uintmax_t)domain->end, 916ee47a12aSRyan Libby (uintmax_t)start)); 917ee47a12aSRyan Libby entry_end = ummin(end, next->start); 918ee47a12aSRyan Libby prev = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, next); 919ee47a12aSRyan Libby if (prev != NULL) 920ee47a12aSRyan Libby entry_start = ummax(start, prev->end); 921ee47a12aSRyan Libby else 922ee47a12aSRyan Libby entry_start = start; 923ee47a12aSRyan Libby start = next->end; 924ee47a12aSRyan Libby /* Reserve the region if non-empty. */ 925ee47a12aSRyan Libby if (entry_start != entry_end) { 926ee47a12aSRyan Libby error = iommu_gas_reserve_region_locked(domain, 927ee47a12aSRyan Libby entry_start, entry_end, entry); 9280ba1d860SAlan Cox if (error != 0) { 9290ba1d860SAlan Cox IOMMU_DOMAIN_UNLOCK(domain); 930ee47a12aSRyan Libby break; 9310ba1d860SAlan Cox } 932ee47a12aSRyan Libby entry = NULL; 933ee47a12aSRyan Libby } 934ee47a12aSRyan Libby IOMMU_DOMAIN_UNLOCK(domain); 935ee47a12aSRyan Libby } 936ee47a12aSRyan Libby /* Release a preallocated entry if it was not used. */ 937ee47a12aSRyan Libby if (entry != NULL) 9384670f908SAlan Cox iommu_gas_free_entry(entry); 939ee47a12aSRyan Libby return (error); 940ee47a12aSRyan Libby } 941ee47a12aSRyan Libby 942f32f0095SRuslan Bukin void 943f32f0095SRuslan Bukin iommu_unmap_msi(struct iommu_ctx *ctx) 944f32f0095SRuslan Bukin { 945f32f0095SRuslan Bukin struct iommu_map_entry *entry; 946f32f0095SRuslan Bukin struct iommu_domain *domain; 947f32f0095SRuslan Bukin 948f32f0095SRuslan Bukin domain = ctx->domain; 949f32f0095SRuslan Bukin entry = domain->msi_entry; 950f32f0095SRuslan Bukin if (entry == NULL) 951f32f0095SRuslan Bukin return; 952f32f0095SRuslan Bukin 953f32f0095SRuslan Bukin domain->ops->unmap(domain, entry->start, entry->end - 954f32f0095SRuslan Bukin entry->start, IOMMU_PGF_WAITOK); 955f32f0095SRuslan Bukin 9564670f908SAlan Cox iommu_gas_free_space(entry); 957f32f0095SRuslan Bukin 9584670f908SAlan Cox iommu_gas_free_entry(entry); 959f32f0095SRuslan Bukin 960f32f0095SRuslan Bukin domain->msi_entry = NULL; 961f32f0095SRuslan Bukin domain->msi_base = 0; 962f32f0095SRuslan Bukin domain->msi_phys = 0; 963f32f0095SRuslan Bukin } 964f32f0095SRuslan Bukin 9653024e8afSRuslan Bukin int 966e707c8beSRuslan Bukin iommu_map_msi(struct iommu_ctx *ctx, iommu_gaddr_t size, int offset, 967e707c8beSRuslan Bukin u_int eflags, u_int flags, vm_page_t *ma) 968e707c8beSRuslan Bukin { 969e707c8beSRuslan Bukin struct iommu_domain *domain; 970e707c8beSRuslan Bukin struct iommu_map_entry *entry; 971e707c8beSRuslan Bukin int error; 972e707c8beSRuslan Bukin 973e707c8beSRuslan Bukin error = 0; 974e707c8beSRuslan Bukin domain = ctx->domain; 975e707c8beSRuslan Bukin 976e707c8beSRuslan Bukin /* Check if there is already an MSI page allocated */ 977e707c8beSRuslan Bukin IOMMU_DOMAIN_LOCK(domain); 978e707c8beSRuslan Bukin entry = domain->msi_entry; 979e707c8beSRuslan Bukin IOMMU_DOMAIN_UNLOCK(domain); 980e707c8beSRuslan Bukin 981e707c8beSRuslan Bukin if (entry == NULL) { 982e707c8beSRuslan Bukin error = iommu_gas_map(domain, &ctx->tag->common, size, offset, 983e707c8beSRuslan Bukin eflags, flags, ma, &entry); 984e707c8beSRuslan Bukin IOMMU_DOMAIN_LOCK(domain); 985e707c8beSRuslan Bukin if (error == 0) { 986e707c8beSRuslan Bukin if (domain->msi_entry == NULL) { 987e707c8beSRuslan Bukin MPASS(domain->msi_base == 0); 988e707c8beSRuslan Bukin MPASS(domain->msi_phys == 0); 989e707c8beSRuslan Bukin 990e707c8beSRuslan Bukin domain->msi_entry = entry; 991e707c8beSRuslan Bukin domain->msi_base = entry->start; 992e707c8beSRuslan Bukin domain->msi_phys = VM_PAGE_TO_PHYS(ma[0]); 993e707c8beSRuslan Bukin } else { 994e707c8beSRuslan Bukin /* 995e707c8beSRuslan Bukin * We lost the race and already have an 996e707c8beSRuslan Bukin * MSI page allocated. Free the unneeded entry. 997e707c8beSRuslan Bukin */ 9984670f908SAlan Cox iommu_gas_free_entry(entry); 999e707c8beSRuslan Bukin } 1000e707c8beSRuslan Bukin } else if (domain->msi_entry != NULL) { 1001e707c8beSRuslan Bukin /* 1002e707c8beSRuslan Bukin * The allocation failed, but another succeeded. 1003e707c8beSRuslan Bukin * Return success as there is a valid MSI page. 1004e707c8beSRuslan Bukin */ 1005e707c8beSRuslan Bukin error = 0; 1006e707c8beSRuslan Bukin } 1007e707c8beSRuslan Bukin IOMMU_DOMAIN_UNLOCK(domain); 1008e707c8beSRuslan Bukin } 1009e707c8beSRuslan Bukin 1010e707c8beSRuslan Bukin return (error); 1011e707c8beSRuslan Bukin } 1012e707c8beSRuslan Bukin 1013e707c8beSRuslan Bukin void 1014e707c8beSRuslan Bukin iommu_translate_msi(struct iommu_domain *domain, uint64_t *addr) 1015e707c8beSRuslan Bukin { 1016e707c8beSRuslan Bukin 1017e707c8beSRuslan Bukin *addr = (*addr - domain->msi_phys) + domain->msi_base; 1018e707c8beSRuslan Bukin 1019e707c8beSRuslan Bukin KASSERT(*addr >= domain->msi_entry->start, 1020e707c8beSRuslan Bukin ("%s: Address is below the MSI entry start address (%jx < %jx)", 1021e707c8beSRuslan Bukin __func__, (uintmax_t)*addr, (uintmax_t)domain->msi_entry->start)); 1022e707c8beSRuslan Bukin 1023e707c8beSRuslan Bukin KASSERT(*addr + sizeof(*addr) <= domain->msi_entry->end, 1024e707c8beSRuslan Bukin ("%s: Address is above the MSI entry end address (%jx < %jx)", 1025e707c8beSRuslan Bukin __func__, (uintmax_t)*addr, (uintmax_t)domain->msi_entry->end)); 1026e707c8beSRuslan Bukin } 1027e707c8beSRuslan Bukin 1028357149f0SRuslan Bukin SYSCTL_NODE(_hw, OID_AUTO, iommu, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, ""); 1029357149f0SRuslan Bukin 10309c843a40SRuslan Bukin #ifdef INVARIANTS 10319c843a40SRuslan Bukin SYSCTL_INT(_hw_iommu, OID_AUTO, check_free, CTLFLAG_RWTUN, 10329c843a40SRuslan Bukin &iommu_check_free, 0, 10339c843a40SRuslan Bukin "Check the GPA RBtree for free_down and free_after validity"); 10349c843a40SRuslan Bukin #endif 1035