1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2013 The FreeBSD Foundation 5 * All rights reserved. 6 * 7 * This software was developed by Konstantin Belousov <kib@FreeBSD.org> 8 * under sponsorship from the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #define RB_AUGMENT(entry) iommu_gas_augment_entry(entry) 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/malloc.h> 40 #include <sys/bus.h> 41 #include <sys/interrupt.h> 42 #include <sys/kernel.h> 43 #include <sys/ktr.h> 44 #include <sys/lock.h> 45 #include <sys/proc.h> 46 #include <sys/rwlock.h> 47 #include <sys/memdesc.h> 48 #include <sys/mutex.h> 49 #include <sys/sysctl.h> 50 #include <sys/rman.h> 51 #include <sys/taskqueue.h> 52 #include <sys/tree.h> 53 #include <sys/uio.h> 54 #include <sys/vmem.h> 55 #include <vm/vm.h> 56 #include <vm/vm_extern.h> 57 #include <vm/vm_kern.h> 58 #include <vm/vm_object.h> 59 #include <vm/vm_page.h> 60 #include <vm/vm_map.h> 61 #include <vm/uma.h> 62 #include <dev/pci/pcireg.h> 63 #include <dev/pci/pcivar.h> 64 #include <dev/iommu/iommu.h> 65 #include <machine/atomic.h> 66 #include <machine/bus.h> 67 #include <machine/md_var.h> 68 #include <machine/iommu.h> 69 #include <dev/iommu/busdma_iommu.h> 70 71 /* 72 * Guest Address Space management. 73 */ 74 75 static uma_zone_t iommu_map_entry_zone; 76 77 #ifdef INVARIANTS 78 static int iommu_check_free; 79 #endif 80 81 static void 82 intel_gas_init(void) 83 { 84 85 iommu_map_entry_zone = uma_zcreate("IOMMU_MAP_ENTRY", 86 sizeof(struct iommu_map_entry), NULL, NULL, 87 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NODUMP); 88 } 89 SYSINIT(intel_gas, SI_SUB_DRIVERS, SI_ORDER_FIRST, intel_gas_init, NULL); 90 91 struct iommu_map_entry * 92 iommu_gas_alloc_entry(struct iommu_domain *domain, u_int flags) 93 { 94 struct iommu_map_entry *res; 95 96 KASSERT((flags & ~(IOMMU_PGF_WAITOK)) == 0, 97 ("unsupported flags %x", flags)); 98 99 res = uma_zalloc(iommu_map_entry_zone, ((flags & IOMMU_PGF_WAITOK) != 100 0 ? M_WAITOK : M_NOWAIT) | M_ZERO); 101 if (res != NULL) { 102 res->domain = domain; 103 atomic_add_int(&domain->entries_cnt, 1); 104 } 105 return (res); 106 } 107 108 void 109 iommu_gas_free_entry(struct iommu_domain *domain, struct iommu_map_entry *entry) 110 { 111 112 KASSERT(domain == entry->domain, 113 ("mismatched free domain %p entry %p entry->domain %p", domain, 114 entry, entry->domain)); 115 atomic_subtract_int(&domain->entries_cnt, 1); 116 uma_zfree(iommu_map_entry_zone, entry); 117 } 118 119 static int 120 iommu_gas_cmp_entries(struct iommu_map_entry *a, struct iommu_map_entry *b) 121 { 122 123 /* Last entry have zero size, so <= */ 124 KASSERT(a->start <= a->end, ("inverted entry %p (%jx, %jx)", 125 a, (uintmax_t)a->start, (uintmax_t)a->end)); 126 KASSERT(b->start <= b->end, ("inverted entry %p (%jx, %jx)", 127 b, (uintmax_t)b->start, (uintmax_t)b->end)); 128 KASSERT(a->end <= b->start || b->end <= a->start || 129 a->end == a->start || b->end == b->start, 130 ("overlapping entries %p (%jx, %jx) %p (%jx, %jx)", 131 a, (uintmax_t)a->start, (uintmax_t)a->end, 132 b, (uintmax_t)b->start, (uintmax_t)b->end)); 133 134 if (a->end < b->end) 135 return (-1); 136 else if (b->end < a->end) 137 return (1); 138 return (0); 139 } 140 141 static void 142 iommu_gas_augment_entry(struct iommu_map_entry *entry) 143 { 144 struct iommu_map_entry *child; 145 iommu_gaddr_t free_down; 146 147 free_down = 0; 148 if ((child = RB_LEFT(entry, rb_entry)) != NULL) { 149 free_down = MAX(free_down, child->free_down); 150 free_down = MAX(free_down, entry->start - child->last); 151 entry->first = child->first; 152 } else 153 entry->first = entry->start; 154 155 if ((child = RB_RIGHT(entry, rb_entry)) != NULL) { 156 free_down = MAX(free_down, child->free_down); 157 free_down = MAX(free_down, child->first - entry->end); 158 entry->last = child->last; 159 } else 160 entry->last = entry->end; 161 entry->free_down = free_down; 162 } 163 164 RB_GENERATE(iommu_gas_entries_tree, iommu_map_entry, rb_entry, 165 iommu_gas_cmp_entries); 166 167 #ifdef INVARIANTS 168 static void 169 iommu_gas_check_free(struct iommu_domain *domain) 170 { 171 struct iommu_map_entry *entry, *l, *r; 172 iommu_gaddr_t v; 173 174 RB_FOREACH(entry, iommu_gas_entries_tree, &domain->rb_root) { 175 KASSERT(domain == entry->domain, 176 ("mismatched free domain %p entry %p entry->domain %p", 177 domain, entry, entry->domain)); 178 l = RB_LEFT(entry, rb_entry); 179 r = RB_RIGHT(entry, rb_entry); 180 v = 0; 181 if (l != NULL) { 182 v = MAX(v, l->free_down); 183 v = MAX(v, entry->start - l->last); 184 } 185 if (r != NULL) { 186 v = MAX(v, r->free_down); 187 v = MAX(v, r->first - entry->end); 188 } 189 MPASS(entry->free_down == v); 190 } 191 } 192 #endif 193 194 static bool 195 iommu_gas_rb_insert(struct iommu_domain *domain, struct iommu_map_entry *entry) 196 { 197 struct iommu_map_entry *found; 198 199 found = RB_INSERT(iommu_gas_entries_tree, 200 &domain->rb_root, entry); 201 return (found == NULL); 202 } 203 204 static void 205 iommu_gas_rb_remove(struct iommu_domain *domain, struct iommu_map_entry *entry) 206 { 207 208 RB_REMOVE(iommu_gas_entries_tree, &domain->rb_root, entry); 209 } 210 211 struct iommu_domain * 212 iommu_get_ctx_domain(struct iommu_ctx *ctx) 213 { 214 215 return (ctx->domain); 216 } 217 218 void 219 iommu_gas_init_domain(struct iommu_domain *domain) 220 { 221 struct iommu_map_entry *begin, *end; 222 223 begin = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK); 224 end = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK); 225 226 IOMMU_DOMAIN_LOCK(domain); 227 KASSERT(domain->entries_cnt == 2, ("dirty domain %p", domain)); 228 KASSERT(RB_EMPTY(&domain->rb_root), 229 ("non-empty entries %p", domain)); 230 231 begin->start = 0; 232 begin->end = IOMMU_PAGE_SIZE; 233 begin->flags = IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED; 234 iommu_gas_rb_insert(domain, begin); 235 236 end->start = domain->end; 237 end->end = domain->end; 238 end->flags = IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED; 239 iommu_gas_rb_insert(domain, end); 240 241 domain->first_place = begin; 242 domain->last_place = end; 243 domain->flags |= IOMMU_DOMAIN_GAS_INITED; 244 IOMMU_DOMAIN_UNLOCK(domain); 245 } 246 247 void 248 iommu_gas_fini_domain(struct iommu_domain *domain) 249 { 250 struct iommu_map_entry *entry, *entry1; 251 252 IOMMU_DOMAIN_ASSERT_LOCKED(domain); 253 KASSERT(domain->entries_cnt == 2, 254 ("domain still in use %p", domain)); 255 256 entry = RB_MIN(iommu_gas_entries_tree, &domain->rb_root); 257 KASSERT(entry->start == 0, ("start entry start %p", domain)); 258 KASSERT(entry->end == IOMMU_PAGE_SIZE, ("start entry end %p", domain)); 259 KASSERT(entry->flags == IOMMU_MAP_ENTRY_PLACE, 260 ("start entry flags %p", domain)); 261 RB_REMOVE(iommu_gas_entries_tree, &domain->rb_root, entry); 262 iommu_gas_free_entry(domain, entry); 263 264 entry = RB_MAX(iommu_gas_entries_tree, &domain->rb_root); 265 KASSERT(entry->start == domain->end, ("end entry start %p", domain)); 266 KASSERT(entry->end == domain->end, ("end entry end %p", domain)); 267 KASSERT(entry->flags == IOMMU_MAP_ENTRY_PLACE, 268 ("end entry flags %p", domain)); 269 RB_REMOVE(iommu_gas_entries_tree, &domain->rb_root, entry); 270 iommu_gas_free_entry(domain, entry); 271 272 RB_FOREACH_SAFE(entry, iommu_gas_entries_tree, &domain->rb_root, 273 entry1) { 274 KASSERT((entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0, 275 ("non-RMRR entry left %p", domain)); 276 RB_REMOVE(iommu_gas_entries_tree, &domain->rb_root, 277 entry); 278 iommu_gas_free_entry(domain, entry); 279 } 280 } 281 282 struct iommu_gas_match_args { 283 struct iommu_domain *domain; 284 iommu_gaddr_t size; 285 int offset; 286 const struct bus_dma_tag_common *common; 287 u_int gas_flags; 288 struct iommu_map_entry *entry; 289 }; 290 291 /* 292 * The interval [beg, end) is a free interval between two iommu_map_entries. 293 * maxaddr is an upper bound on addresses that can be allocated. Try to 294 * allocate space in the free interval, subject to the conditions expressed 295 * by a, and return 'true' if and only if the allocation attempt succeeds. 296 */ 297 static bool 298 iommu_gas_match_one(struct iommu_gas_match_args *a, iommu_gaddr_t beg, 299 iommu_gaddr_t end, iommu_gaddr_t maxaddr) 300 { 301 iommu_gaddr_t bs, start; 302 303 a->entry->start = roundup2(beg + IOMMU_PAGE_SIZE, 304 a->common->alignment); 305 if (a->entry->start + a->size > maxaddr) 306 return (false); 307 308 /* IOMMU_PAGE_SIZE to create gap after new entry. */ 309 if (a->entry->start < beg + IOMMU_PAGE_SIZE || 310 a->entry->start + a->size + a->offset + IOMMU_PAGE_SIZE > end) 311 return (false); 312 313 /* No boundary crossing. */ 314 if (iommu_test_boundary(a->entry->start + a->offset, a->size, 315 a->common->boundary)) 316 return (true); 317 318 /* 319 * The start + offset to start + offset + size region crosses 320 * the boundary. Check if there is enough space after the 321 * next boundary after the beg. 322 */ 323 bs = rounddown2(a->entry->start + a->offset + a->common->boundary, 324 a->common->boundary); 325 start = roundup2(bs, a->common->alignment); 326 /* IOMMU_PAGE_SIZE to create gap after new entry. */ 327 if (start + a->offset + a->size + IOMMU_PAGE_SIZE <= end && 328 start + a->offset + a->size <= maxaddr && 329 iommu_test_boundary(start + a->offset, a->size, 330 a->common->boundary)) { 331 a->entry->start = start; 332 return (true); 333 } 334 335 /* 336 * Not enough space to align at the requested boundary, or 337 * boundary is smaller than the size, but allowed to split. 338 * We already checked that start + size does not overlap maxaddr. 339 * 340 * XXXKIB. It is possible that bs is exactly at the start of 341 * the next entry, then we do not have gap. Ignore for now. 342 */ 343 if ((a->gas_flags & IOMMU_MF_CANSPLIT) != 0) { 344 a->size = bs - a->entry->start; 345 return (true); 346 } 347 348 return (false); 349 } 350 351 static void 352 iommu_gas_match_insert(struct iommu_gas_match_args *a) 353 { 354 bool found; 355 356 /* 357 * The prev->end is always aligned on the page size, which 358 * causes page alignment for the entry->start too. The size 359 * is checked to be multiple of the page size. 360 * 361 * The page sized gap is created between consequent 362 * allocations to ensure that out-of-bounds accesses fault. 363 */ 364 a->entry->end = a->entry->start + a->size; 365 366 found = iommu_gas_rb_insert(a->domain, a->entry); 367 KASSERT(found, ("found dup %p start %jx size %jx", 368 a->domain, (uintmax_t)a->entry->start, (uintmax_t)a->size)); 369 a->entry->flags = IOMMU_MAP_ENTRY_MAP; 370 } 371 372 static int 373 iommu_gas_lowermatch(struct iommu_gas_match_args *a, struct iommu_map_entry *entry) 374 { 375 struct iommu_map_entry *child; 376 377 child = RB_RIGHT(entry, rb_entry); 378 if (child != NULL && entry->end < a->common->lowaddr && 379 iommu_gas_match_one(a, entry->end, child->first, 380 a->common->lowaddr)) { 381 iommu_gas_match_insert(a); 382 return (0); 383 } 384 if (entry->free_down < a->size + a->offset + IOMMU_PAGE_SIZE) 385 return (ENOMEM); 386 if (entry->first >= a->common->lowaddr) 387 return (ENOMEM); 388 child = RB_LEFT(entry, rb_entry); 389 if (child != NULL && 0 == iommu_gas_lowermatch(a, child)) 390 return (0); 391 if (child != NULL && child->last < a->common->lowaddr && 392 iommu_gas_match_one(a, child->last, entry->start, 393 a->common->lowaddr)) { 394 iommu_gas_match_insert(a); 395 return (0); 396 } 397 child = RB_RIGHT(entry, rb_entry); 398 if (child != NULL && 0 == iommu_gas_lowermatch(a, child)) 399 return (0); 400 return (ENOMEM); 401 } 402 403 static int 404 iommu_gas_uppermatch(struct iommu_gas_match_args *a, struct iommu_map_entry *entry) 405 { 406 struct iommu_map_entry *child; 407 408 if (entry->free_down < a->size + a->offset + IOMMU_PAGE_SIZE) 409 return (ENOMEM); 410 if (entry->last < a->common->highaddr) 411 return (ENOMEM); 412 child = RB_LEFT(entry, rb_entry); 413 if (child != NULL && 0 == iommu_gas_uppermatch(a, child)) 414 return (0); 415 if (child != NULL && child->last >= a->common->highaddr && 416 iommu_gas_match_one(a, child->last, entry->start, 417 a->domain->end)) { 418 iommu_gas_match_insert(a); 419 return (0); 420 } 421 child = RB_RIGHT(entry, rb_entry); 422 if (child != NULL && entry->end >= a->common->highaddr && 423 iommu_gas_match_one(a, entry->end, child->first, 424 a->domain->end)) { 425 iommu_gas_match_insert(a); 426 return (0); 427 } 428 if (child != NULL && 0 == iommu_gas_uppermatch(a, child)) 429 return (0); 430 return (ENOMEM); 431 } 432 433 static int 434 iommu_gas_find_space(struct iommu_domain *domain, 435 const struct bus_dma_tag_common *common, iommu_gaddr_t size, 436 int offset, u_int flags, struct iommu_map_entry *entry) 437 { 438 struct iommu_gas_match_args a; 439 int error; 440 441 IOMMU_DOMAIN_ASSERT_LOCKED(domain); 442 KASSERT(entry->flags == 0, ("dirty entry %p %p", domain, entry)); 443 KASSERT((size & IOMMU_PAGE_MASK) == 0, ("size %jx", (uintmax_t)size)); 444 445 a.domain = domain; 446 a.size = size; 447 a.offset = offset; 448 a.common = common; 449 a.gas_flags = flags; 450 a.entry = entry; 451 452 /* Handle lower region. */ 453 if (common->lowaddr > 0) { 454 error = iommu_gas_lowermatch(&a, 455 RB_ROOT(&domain->rb_root)); 456 if (error == 0) 457 return (0); 458 KASSERT(error == ENOMEM, 459 ("error %d from iommu_gas_lowermatch", error)); 460 } 461 /* Handle upper region. */ 462 if (common->highaddr >= domain->end) 463 return (ENOMEM); 464 error = iommu_gas_uppermatch(&a, RB_ROOT(&domain->rb_root)); 465 KASSERT(error == ENOMEM, 466 ("error %d from iommu_gas_uppermatch", error)); 467 return (error); 468 } 469 470 static int 471 iommu_gas_alloc_region(struct iommu_domain *domain, struct iommu_map_entry *entry, 472 u_int flags) 473 { 474 struct iommu_map_entry *next, *prev; 475 bool found; 476 477 IOMMU_DOMAIN_ASSERT_LOCKED(domain); 478 479 if ((entry->start & IOMMU_PAGE_MASK) != 0 || 480 (entry->end & IOMMU_PAGE_MASK) != 0) 481 return (EINVAL); 482 if (entry->start >= entry->end) 483 return (EINVAL); 484 if (entry->end >= domain->end) 485 return (EINVAL); 486 487 next = RB_NFIND(iommu_gas_entries_tree, &domain->rb_root, entry); 488 KASSERT(next != NULL, ("next must be non-null %p %jx", domain, 489 (uintmax_t)entry->start)); 490 prev = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, next); 491 /* prev could be NULL */ 492 493 /* 494 * Adapt to broken BIOSes which specify overlapping RMRR 495 * entries. 496 * 497 * XXXKIB: this does not handle a case when prev or next 498 * entries are completely covered by the current one, which 499 * extends both ways. 500 */ 501 if (prev != NULL && prev->end > entry->start && 502 (prev->flags & IOMMU_MAP_ENTRY_PLACE) == 0) { 503 if ((flags & IOMMU_MF_RMRR) == 0 || 504 (prev->flags & IOMMU_MAP_ENTRY_RMRR) == 0) 505 return (EBUSY); 506 entry->start = prev->end; 507 } 508 if (next->start < entry->end && 509 (next->flags & IOMMU_MAP_ENTRY_PLACE) == 0) { 510 if ((flags & IOMMU_MF_RMRR) == 0 || 511 (next->flags & IOMMU_MAP_ENTRY_RMRR) == 0) 512 return (EBUSY); 513 entry->end = next->start; 514 } 515 if (entry->end == entry->start) 516 return (0); 517 518 if (prev != NULL && prev->end > entry->start) { 519 /* This assumes that prev is the placeholder entry. */ 520 iommu_gas_rb_remove(domain, prev); 521 prev = NULL; 522 } 523 if (next->start < entry->end) { 524 iommu_gas_rb_remove(domain, next); 525 next = NULL; 526 } 527 528 found = iommu_gas_rb_insert(domain, entry); 529 KASSERT(found, ("found RMRR dup %p start %jx end %jx", 530 domain, (uintmax_t)entry->start, (uintmax_t)entry->end)); 531 if ((flags & IOMMU_MF_RMRR) != 0) 532 entry->flags = IOMMU_MAP_ENTRY_RMRR; 533 534 #ifdef INVARIANTS 535 struct iommu_map_entry *ip, *in; 536 ip = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, entry); 537 in = RB_NEXT(iommu_gas_entries_tree, &domain->rb_root, entry); 538 KASSERT(prev == NULL || ip == prev, 539 ("RMRR %p (%jx %jx) prev %p (%jx %jx) ins prev %p (%jx %jx)", 540 entry, entry->start, entry->end, prev, 541 prev == NULL ? 0 : prev->start, prev == NULL ? 0 : prev->end, 542 ip, ip == NULL ? 0 : ip->start, ip == NULL ? 0 : ip->end)); 543 KASSERT(next == NULL || in == next, 544 ("RMRR %p (%jx %jx) next %p (%jx %jx) ins next %p (%jx %jx)", 545 entry, entry->start, entry->end, next, 546 next == NULL ? 0 : next->start, next == NULL ? 0 : next->end, 547 in, in == NULL ? 0 : in->start, in == NULL ? 0 : in->end)); 548 #endif 549 550 return (0); 551 } 552 553 void 554 iommu_gas_free_space(struct iommu_domain *domain, struct iommu_map_entry *entry) 555 { 556 557 IOMMU_DOMAIN_ASSERT_LOCKED(domain); 558 KASSERT((entry->flags & (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_RMRR | 559 IOMMU_MAP_ENTRY_MAP)) == IOMMU_MAP_ENTRY_MAP, 560 ("permanent entry %p %p", domain, entry)); 561 562 iommu_gas_rb_remove(domain, entry); 563 entry->flags &= ~IOMMU_MAP_ENTRY_MAP; 564 #ifdef INVARIANTS 565 if (iommu_check_free) 566 iommu_gas_check_free(domain); 567 #endif 568 } 569 570 void 571 iommu_gas_free_region(struct iommu_domain *domain, struct iommu_map_entry *entry) 572 { 573 struct iommu_map_entry *next, *prev; 574 575 IOMMU_DOMAIN_ASSERT_LOCKED(domain); 576 KASSERT((entry->flags & (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_RMRR | 577 IOMMU_MAP_ENTRY_MAP)) == IOMMU_MAP_ENTRY_RMRR, 578 ("non-RMRR entry %p %p", domain, entry)); 579 580 prev = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, entry); 581 next = RB_NEXT(iommu_gas_entries_tree, &domain->rb_root, entry); 582 iommu_gas_rb_remove(domain, entry); 583 entry->flags &= ~IOMMU_MAP_ENTRY_RMRR; 584 585 if (prev == NULL) 586 iommu_gas_rb_insert(domain, domain->first_place); 587 if (next == NULL) 588 iommu_gas_rb_insert(domain, domain->last_place); 589 } 590 591 int 592 iommu_gas_map(struct iommu_domain *domain, 593 const struct bus_dma_tag_common *common, iommu_gaddr_t size, int offset, 594 u_int eflags, u_int flags, vm_page_t *ma, struct iommu_map_entry **res) 595 { 596 struct iommu_map_entry *entry; 597 int error; 598 599 KASSERT((flags & ~(IOMMU_MF_CANWAIT | IOMMU_MF_CANSPLIT)) == 0, 600 ("invalid flags 0x%x", flags)); 601 602 entry = iommu_gas_alloc_entry(domain, 603 (flags & IOMMU_MF_CANWAIT) != 0 ? IOMMU_PGF_WAITOK : 0); 604 if (entry == NULL) 605 return (ENOMEM); 606 IOMMU_DOMAIN_LOCK(domain); 607 error = iommu_gas_find_space(domain, common, size, offset, flags, 608 entry); 609 if (error == ENOMEM) { 610 IOMMU_DOMAIN_UNLOCK(domain); 611 iommu_gas_free_entry(domain, entry); 612 return (error); 613 } 614 #ifdef INVARIANTS 615 if (iommu_check_free) 616 iommu_gas_check_free(domain); 617 #endif 618 KASSERT(error == 0, 619 ("unexpected error %d from iommu_gas_find_entry", error)); 620 KASSERT(entry->end < domain->end, ("allocated GPA %jx, max GPA %jx", 621 (uintmax_t)entry->end, (uintmax_t)domain->end)); 622 entry->flags |= eflags; 623 IOMMU_DOMAIN_UNLOCK(domain); 624 625 error = domain->ops->map(domain, entry->start, 626 entry->end - entry->start, ma, eflags, 627 ((flags & IOMMU_MF_CANWAIT) != 0 ? IOMMU_PGF_WAITOK : 0)); 628 if (error == ENOMEM) { 629 iommu_domain_unload_entry(entry, true); 630 return (error); 631 } 632 KASSERT(error == 0, 633 ("unexpected error %d from domain_map_buf", error)); 634 635 *res = entry; 636 return (0); 637 } 638 639 int 640 iommu_gas_map_region(struct iommu_domain *domain, struct iommu_map_entry *entry, 641 u_int eflags, u_int flags, vm_page_t *ma) 642 { 643 iommu_gaddr_t start; 644 int error; 645 646 KASSERT(entry->flags == 0, ("used RMRR entry %p %p %x", domain, 647 entry, entry->flags)); 648 KASSERT((flags & ~(IOMMU_MF_CANWAIT | IOMMU_MF_RMRR)) == 0, 649 ("invalid flags 0x%x", flags)); 650 651 start = entry->start; 652 IOMMU_DOMAIN_LOCK(domain); 653 error = iommu_gas_alloc_region(domain, entry, flags); 654 if (error != 0) { 655 IOMMU_DOMAIN_UNLOCK(domain); 656 return (error); 657 } 658 entry->flags |= eflags; 659 IOMMU_DOMAIN_UNLOCK(domain); 660 if (entry->end == entry->start) 661 return (0); 662 663 error = domain->ops->map(domain, entry->start, 664 entry->end - entry->start, ma + OFF_TO_IDX(start - entry->start), 665 eflags, ((flags & IOMMU_MF_CANWAIT) != 0 ? IOMMU_PGF_WAITOK : 0)); 666 if (error == ENOMEM) { 667 iommu_domain_unload_entry(entry, false); 668 return (error); 669 } 670 KASSERT(error == 0, 671 ("unexpected error %d from domain_map_buf", error)); 672 673 return (0); 674 } 675 676 int 677 iommu_gas_reserve_region(struct iommu_domain *domain, iommu_gaddr_t start, 678 iommu_gaddr_t end) 679 { 680 struct iommu_map_entry *entry; 681 int error; 682 683 entry = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK); 684 entry->start = start; 685 entry->end = end; 686 IOMMU_DOMAIN_LOCK(domain); 687 error = iommu_gas_alloc_region(domain, entry, IOMMU_MF_CANWAIT); 688 if (error == 0) 689 entry->flags |= IOMMU_MAP_ENTRY_UNMAPPED; 690 IOMMU_DOMAIN_UNLOCK(domain); 691 if (error != 0) 692 iommu_gas_free_entry(domain, entry); 693 return (error); 694 } 695 696 struct iommu_map_entry * 697 iommu_map_alloc_entry(struct iommu_domain *domain, u_int flags) 698 { 699 struct iommu_map_entry *res; 700 701 res = iommu_gas_alloc_entry(domain, flags); 702 703 return (res); 704 } 705 706 void 707 iommu_map_free_entry(struct iommu_domain *domain, struct iommu_map_entry *entry) 708 { 709 710 iommu_gas_free_entry(domain, entry); 711 } 712 713 int 714 iommu_map(struct iommu_domain *domain, 715 const struct bus_dma_tag_common *common, iommu_gaddr_t size, int offset, 716 u_int eflags, u_int flags, vm_page_t *ma, struct iommu_map_entry **res) 717 { 718 int error; 719 720 error = iommu_gas_map(domain, common, size, offset, eflags, flags, 721 ma, res); 722 723 return (error); 724 } 725 726 int 727 iommu_map_region(struct iommu_domain *domain, struct iommu_map_entry *entry, 728 u_int eflags, u_int flags, vm_page_t *ma) 729 { 730 int error; 731 732 error = iommu_gas_map_region(domain, entry, eflags, flags, ma); 733 734 return (error); 735 } 736 737 SYSCTL_NODE(_hw, OID_AUTO, iommu, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, ""); 738 739 #ifdef INVARIANTS 740 SYSCTL_INT(_hw_iommu, OID_AUTO, check_free, CTLFLAG_RWTUN, 741 &iommu_check_free, 0, 742 "Check the GPA RBtree for free_down and free_after validity"); 743 #endif 744