xref: /freebsd/sys/dev/iommu/iommu_gas.c (revision cf83038cec6874aafacf56a1cac531d079af016b)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2013 The FreeBSD Foundation
5  *
6  * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
7  * under sponsorship from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #define	RB_AUGMENT(entry) iommu_gas_augment_entry(entry)
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/malloc.h>
39 #include <sys/bus.h>
40 #include <sys/interrupt.h>
41 #include <sys/kernel.h>
42 #include <sys/ktr.h>
43 #include <sys/lock.h>
44 #include <sys/proc.h>
45 #include <sys/rwlock.h>
46 #include <sys/memdesc.h>
47 #include <sys/mutex.h>
48 #include <sys/sysctl.h>
49 #include <sys/rman.h>
50 #include <sys/taskqueue.h>
51 #include <sys/tree.h>
52 #include <sys/uio.h>
53 #include <sys/vmem.h>
54 #include <vm/vm.h>
55 #include <vm/vm_extern.h>
56 #include <vm/vm_kern.h>
57 #include <vm/vm_object.h>
58 #include <vm/vm_page.h>
59 #include <vm/vm_map.h>
60 #include <vm/uma.h>
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
63 #include <dev/iommu/iommu.h>
64 #include <dev/iommu/iommu_gas.h>
65 #include <dev/iommu/iommu_msi.h>
66 #include <machine/atomic.h>
67 #include <machine/bus.h>
68 #include <machine/md_var.h>
69 #include <machine/iommu.h>
70 #include <dev/iommu/busdma_iommu.h>
71 
72 /*
73  * Guest Address Space management.
74  */
75 
76 static uma_zone_t iommu_map_entry_zone;
77 
78 #ifdef INVARIANTS
79 static int iommu_check_free;
80 #endif
81 
82 static void
83 intel_gas_init(void)
84 {
85 
86 	iommu_map_entry_zone = uma_zcreate("IOMMU_MAP_ENTRY",
87 	    sizeof(struct iommu_map_entry), NULL, NULL,
88 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NODUMP);
89 }
90 SYSINIT(intel_gas, SI_SUB_DRIVERS, SI_ORDER_FIRST, intel_gas_init, NULL);
91 
92 struct iommu_map_entry *
93 iommu_gas_alloc_entry(struct iommu_domain *domain, u_int flags)
94 {
95 	struct iommu_map_entry *res;
96 
97 	KASSERT((flags & ~(IOMMU_PGF_WAITOK)) == 0,
98 	    ("unsupported flags %x", flags));
99 
100 	res = uma_zalloc(iommu_map_entry_zone, ((flags & IOMMU_PGF_WAITOK) !=
101 	    0 ? M_WAITOK : M_NOWAIT) | M_ZERO);
102 	if (res != NULL) {
103 		res->domain = domain;
104 		atomic_add_int(&domain->entries_cnt, 1);
105 	}
106 	return (res);
107 }
108 
109 void
110 iommu_gas_free_entry(struct iommu_domain *domain, struct iommu_map_entry *entry)
111 {
112 
113 	KASSERT(domain == entry->domain,
114 	    ("mismatched free domain %p entry %p entry->domain %p", domain,
115 	    entry, entry->domain));
116 	atomic_subtract_int(&domain->entries_cnt, 1);
117 	uma_zfree(iommu_map_entry_zone, entry);
118 }
119 
120 static int
121 iommu_gas_cmp_entries(struct iommu_map_entry *a, struct iommu_map_entry *b)
122 {
123 
124 	/* Last entry have zero size, so <= */
125 	KASSERT(a->start <= a->end, ("inverted entry %p (%jx, %jx)",
126 	    a, (uintmax_t)a->start, (uintmax_t)a->end));
127 	KASSERT(b->start <= b->end, ("inverted entry %p (%jx, %jx)",
128 	    b, (uintmax_t)b->start, (uintmax_t)b->end));
129 	KASSERT(a->end <= b->start || b->end <= a->start ||
130 	    a->end == a->start || b->end == b->start,
131 	    ("overlapping entries %p (%jx, %jx) %p (%jx, %jx)",
132 	    a, (uintmax_t)a->start, (uintmax_t)a->end,
133 	    b, (uintmax_t)b->start, (uintmax_t)b->end));
134 
135 	if (a->end < b->end)
136 		return (-1);
137 	else if (b->end < a->end)
138 		return (1);
139 	return (0);
140 }
141 
142 static void
143 iommu_gas_augment_entry(struct iommu_map_entry *entry)
144 {
145 	struct iommu_map_entry *child;
146 	iommu_gaddr_t free_down;
147 
148 	free_down = 0;
149 	if ((child = RB_LEFT(entry, rb_entry)) != NULL) {
150 		free_down = MAX(free_down, child->free_down);
151 		free_down = MAX(free_down, entry->start - child->last);
152 		entry->first = child->first;
153 	} else
154 		entry->first = entry->start;
155 
156 	if ((child = RB_RIGHT(entry, rb_entry)) != NULL) {
157 		free_down = MAX(free_down, child->free_down);
158 		free_down = MAX(free_down, child->first - entry->end);
159 		entry->last = child->last;
160 	} else
161 		entry->last = entry->end;
162 	entry->free_down = free_down;
163 }
164 
165 RB_GENERATE(iommu_gas_entries_tree, iommu_map_entry, rb_entry,
166     iommu_gas_cmp_entries);
167 
168 #ifdef INVARIANTS
169 static void
170 iommu_gas_check_free(struct iommu_domain *domain)
171 {
172 	struct iommu_map_entry *entry, *l, *r;
173 	iommu_gaddr_t v;
174 
175 	RB_FOREACH(entry, iommu_gas_entries_tree, &domain->rb_root) {
176 		KASSERT(domain == entry->domain,
177 		    ("mismatched free domain %p entry %p entry->domain %p",
178 		    domain, entry, entry->domain));
179 		l = RB_LEFT(entry, rb_entry);
180 		r = RB_RIGHT(entry, rb_entry);
181 		v = 0;
182 		if (l != NULL) {
183 			v = MAX(v, l->free_down);
184 			v = MAX(v, entry->start - l->last);
185 		}
186 		if (r != NULL) {
187 			v = MAX(v, r->free_down);
188 			v = MAX(v, r->first - entry->end);
189 		}
190 		MPASS(entry->free_down == v);
191 	}
192 }
193 #endif
194 
195 static bool
196 iommu_gas_rb_insert(struct iommu_domain *domain, struct iommu_map_entry *entry)
197 {
198 	struct iommu_map_entry *found;
199 
200 	found = RB_INSERT(iommu_gas_entries_tree,
201 	    &domain->rb_root, entry);
202 	return (found == NULL);
203 }
204 
205 static void
206 iommu_gas_rb_remove(struct iommu_domain *domain, struct iommu_map_entry *entry)
207 {
208 
209 	RB_REMOVE(iommu_gas_entries_tree, &domain->rb_root, entry);
210 }
211 
212 struct iommu_domain *
213 iommu_get_ctx_domain(struct iommu_ctx *ctx)
214 {
215 
216 	return (ctx->domain);
217 }
218 
219 void
220 iommu_gas_init_domain(struct iommu_domain *domain)
221 {
222 	struct iommu_map_entry *begin, *end;
223 
224 	begin = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK);
225 	end = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK);
226 
227 	IOMMU_DOMAIN_LOCK(domain);
228 	KASSERT(domain->entries_cnt == 2, ("dirty domain %p", domain));
229 	KASSERT(RB_EMPTY(&domain->rb_root),
230 	    ("non-empty entries %p", domain));
231 
232 	begin->start = 0;
233 	begin->end = IOMMU_PAGE_SIZE;
234 	begin->flags = IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED;
235 	iommu_gas_rb_insert(domain, begin);
236 
237 	end->start = domain->end;
238 	end->end = domain->end;
239 	end->flags = IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED;
240 	iommu_gas_rb_insert(domain, end);
241 
242 	domain->first_place = begin;
243 	domain->last_place = end;
244 	domain->flags |= IOMMU_DOMAIN_GAS_INITED;
245 	IOMMU_DOMAIN_UNLOCK(domain);
246 }
247 
248 void
249 iommu_gas_fini_domain(struct iommu_domain *domain)
250 {
251 	struct iommu_map_entry *entry, *entry1;
252 
253 	IOMMU_DOMAIN_ASSERT_LOCKED(domain);
254 	KASSERT(domain->entries_cnt == 2,
255 	    ("domain still in use %p", domain));
256 
257 	entry = RB_MIN(iommu_gas_entries_tree, &domain->rb_root);
258 	KASSERT(entry->start == 0, ("start entry start %p", domain));
259 	KASSERT(entry->end == IOMMU_PAGE_SIZE, ("start entry end %p", domain));
260 	KASSERT(entry->flags ==
261 	    (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED),
262 	    ("start entry flags %p", domain));
263 	RB_REMOVE(iommu_gas_entries_tree, &domain->rb_root, entry);
264 	iommu_gas_free_entry(domain, entry);
265 
266 	entry = RB_MAX(iommu_gas_entries_tree, &domain->rb_root);
267 	KASSERT(entry->start == domain->end, ("end entry start %p", domain));
268 	KASSERT(entry->end == domain->end, ("end entry end %p", domain));
269 	KASSERT(entry->flags ==
270 	    (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED),
271 	    ("end entry flags %p", domain));
272 	RB_REMOVE(iommu_gas_entries_tree, &domain->rb_root, entry);
273 	iommu_gas_free_entry(domain, entry);
274 
275 	RB_FOREACH_SAFE(entry, iommu_gas_entries_tree, &domain->rb_root,
276 	    entry1) {
277 		KASSERT((entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0,
278 		    ("non-RMRR entry left %p", domain));
279 		RB_REMOVE(iommu_gas_entries_tree, &domain->rb_root,
280 		    entry);
281 		iommu_gas_free_entry(domain, entry);
282 	}
283 }
284 
285 struct iommu_gas_match_args {
286 	struct iommu_domain *domain;
287 	iommu_gaddr_t size;
288 	int offset;
289 	const struct bus_dma_tag_common *common;
290 	u_int gas_flags;
291 	struct iommu_map_entry *entry;
292 };
293 
294 /*
295  * The interval [beg, end) is a free interval between two iommu_map_entries.
296  * maxaddr is an upper bound on addresses that can be allocated. Try to
297  * allocate space in the free interval, subject to the conditions expressed
298  * by a, and return 'true' if and only if the allocation attempt succeeds.
299  */
300 static bool
301 iommu_gas_match_one(struct iommu_gas_match_args *a, iommu_gaddr_t beg,
302     iommu_gaddr_t end, iommu_gaddr_t maxaddr)
303 {
304 	iommu_gaddr_t bs, start;
305 
306 	a->entry->start = roundup2(beg + IOMMU_PAGE_SIZE,
307 	    a->common->alignment);
308 	if (a->entry->start + a->offset + a->size > maxaddr)
309 		return (false);
310 
311 	/* IOMMU_PAGE_SIZE to create gap after new entry. */
312 	if (a->entry->start < beg + IOMMU_PAGE_SIZE ||
313 	    a->entry->start + a->size + a->offset + IOMMU_PAGE_SIZE > end)
314 		return (false);
315 
316 	/* No boundary crossing. */
317 	if (vm_addr_bound_ok(a->entry->start + a->offset, a->size,
318 	    a->common->boundary))
319 		return (true);
320 
321 	/*
322 	 * The start + offset to start + offset + size region crosses
323 	 * the boundary.  Check if there is enough space after the
324 	 * next boundary after the beg.
325 	 */
326 	bs = rounddown2(a->entry->start + a->offset + a->common->boundary,
327 	    a->common->boundary);
328 	start = roundup2(bs, a->common->alignment);
329 	/* IOMMU_PAGE_SIZE to create gap after new entry. */
330 	if (start + a->offset + a->size + IOMMU_PAGE_SIZE <= end &&
331 	    start + a->offset + a->size <= maxaddr &&
332 	    vm_addr_bound_ok(start + a->offset, a->size,
333 	    a->common->boundary)) {
334 		a->entry->start = start;
335 		return (true);
336 	}
337 
338 	/*
339 	 * Not enough space to align at the requested boundary, or
340 	 * boundary is smaller than the size, but allowed to split.
341 	 * We already checked that start + size does not overlap maxaddr.
342 	 *
343 	 * XXXKIB. It is possible that bs is exactly at the start of
344 	 * the next entry, then we do not have gap.  Ignore for now.
345 	 */
346 	if ((a->gas_flags & IOMMU_MF_CANSPLIT) != 0) {
347 		a->size = bs - a->entry->start;
348 		return (true);
349 	}
350 
351 	return (false);
352 }
353 
354 static void
355 iommu_gas_match_insert(struct iommu_gas_match_args *a)
356 {
357 	bool found __diagused;
358 
359 	/*
360 	 * The prev->end is always aligned on the page size, which
361 	 * causes page alignment for the entry->start too.
362 	 *
363 	 * The page sized gap is created between consequent
364 	 * allocations to ensure that out-of-bounds accesses fault.
365 	 */
366 	a->entry->end = a->entry->start +
367 	    roundup2(a->size + a->offset, IOMMU_PAGE_SIZE);
368 
369 	found = iommu_gas_rb_insert(a->domain, a->entry);
370 	KASSERT(found, ("found dup %p start %jx size %jx",
371 	    a->domain, (uintmax_t)a->entry->start, (uintmax_t)a->size));
372 	a->entry->flags = IOMMU_MAP_ENTRY_MAP;
373 }
374 
375 static int
376 iommu_gas_lowermatch(struct iommu_gas_match_args *a, struct iommu_map_entry *entry)
377 {
378 	struct iommu_map_entry *first;
379 	iommu_gaddr_t min_free;
380 
381 	/*
382 	 * If the subtree doesn't have free space for the requested allocation
383 	 * plus two guard pages, skip it.
384 	 */
385 	min_free = 2 * IOMMU_PAGE_SIZE +
386 	    roundup2(a->size + a->offset, IOMMU_PAGE_SIZE);
387 
388 	/* Find the first entry that could abut a big-enough range. */
389 	first = NULL;
390 	while (entry != NULL && entry->free_down >= min_free) {
391 		first = entry;
392 		entry = RB_LEFT(entry, rb_entry);
393 	}
394 
395 	/*
396 	 * Walk the big-enough ranges until one satisfies alignment
397 	 * requirements, or violates lowaddr address requirement.
398 	 */
399 	entry = first;
400 	while (entry != NULL) {
401 		if ((first = RB_LEFT(entry, rb_entry)) != NULL &&
402 		    iommu_gas_match_one(a, first->last, entry->start,
403 		    a->common->lowaddr)) {
404 			iommu_gas_match_insert(a);
405 			return (0);
406 		}
407 		if (entry->end >= a->common->lowaddr) {
408 			/* All remaining ranges >= lowaddr */
409 			break;
410 		}
411 		if ((first = RB_RIGHT(entry, rb_entry)) != NULL &&
412 		    iommu_gas_match_one(a, entry->end, first->first,
413 		    a->common->lowaddr)) {
414 			iommu_gas_match_insert(a);
415 			return (0);
416 		}
417 		/* Find the next entry that might abut a big-enough range. */
418 		if (first != NULL && first->free_down >= min_free) {
419 			/* Find next entry in right subtree. */
420 			do
421 				entry = first;
422 			while ((first = RB_LEFT(entry, rb_entry)) != NULL &&
423 			    first->free_down >= min_free);
424 		} else {
425 			/* Find next entry in a left-parent ancestor. */
426 			while ((first = RB_PARENT(entry, rb_entry)) != NULL &&
427 			    entry == RB_RIGHT(first, rb_entry))
428 				entry = first;
429 			entry = first;
430 		}
431 	}
432 	return (ENOMEM);
433 }
434 
435 static int
436 iommu_gas_uppermatch(struct iommu_gas_match_args *a, struct iommu_map_entry *entry)
437 {
438 	struct iommu_map_entry *child;
439 
440 	/*
441 	 * If the subtree doesn't have free space for the requested allocation
442 	 * plus two guard pages, give up.
443 	 */
444 	if (entry->free_down < 2 * IOMMU_PAGE_SIZE +
445 	    roundup2(a->size + a->offset, IOMMU_PAGE_SIZE))
446 		return (ENOMEM);
447 	if (entry->last < a->common->highaddr)
448 		return (ENOMEM);
449 	child = RB_LEFT(entry, rb_entry);
450 	if (child != NULL && 0 == iommu_gas_uppermatch(a, child))
451 		return (0);
452 	if (child != NULL && child->last >= a->common->highaddr &&
453 	    iommu_gas_match_one(a, child->last, entry->start,
454 	    a->domain->end)) {
455 		iommu_gas_match_insert(a);
456 		return (0);
457 	}
458 	child = RB_RIGHT(entry, rb_entry);
459 	if (child != NULL && entry->end >= a->common->highaddr &&
460 	    iommu_gas_match_one(a, entry->end, child->first,
461 	    a->domain->end)) {
462 		iommu_gas_match_insert(a);
463 		return (0);
464 	}
465 	if (child != NULL && 0 == iommu_gas_uppermatch(a, child))
466 		return (0);
467 	return (ENOMEM);
468 }
469 
470 static int
471 iommu_gas_find_space(struct iommu_domain *domain,
472     const struct bus_dma_tag_common *common, iommu_gaddr_t size,
473     int offset, u_int flags, struct iommu_map_entry *entry)
474 {
475 	struct iommu_gas_match_args a;
476 	int error;
477 
478 	IOMMU_DOMAIN_ASSERT_LOCKED(domain);
479 	KASSERT(entry->flags == 0, ("dirty entry %p %p", domain, entry));
480 
481 	a.domain = domain;
482 	a.size = size;
483 	a.offset = offset;
484 	a.common = common;
485 	a.gas_flags = flags;
486 	a.entry = entry;
487 
488 	/* Handle lower region. */
489 	if (common->lowaddr > 0) {
490 		error = iommu_gas_lowermatch(&a,
491 		    RB_ROOT(&domain->rb_root));
492 		if (error == 0)
493 			return (0);
494 		KASSERT(error == ENOMEM,
495 		    ("error %d from iommu_gas_lowermatch", error));
496 	}
497 	/* Handle upper region. */
498 	if (common->highaddr >= domain->end)
499 		return (ENOMEM);
500 	error = iommu_gas_uppermatch(&a, RB_ROOT(&domain->rb_root));
501 	KASSERT(error == 0 || error == ENOMEM,
502 	    ("error %d from iommu_gas_uppermatch", error));
503 	return (error);
504 }
505 
506 static int
507 iommu_gas_alloc_region(struct iommu_domain *domain, struct iommu_map_entry *entry,
508     u_int flags)
509 {
510 	struct iommu_map_entry *next, *prev;
511 	bool found __diagused;
512 
513 	IOMMU_DOMAIN_ASSERT_LOCKED(domain);
514 
515 	if ((entry->start & IOMMU_PAGE_MASK) != 0 ||
516 	    (entry->end & IOMMU_PAGE_MASK) != 0)
517 		return (EINVAL);
518 	if (entry->start >= entry->end)
519 		return (EINVAL);
520 	if (entry->end >= domain->end)
521 		return (EINVAL);
522 
523 	next = RB_NFIND(iommu_gas_entries_tree, &domain->rb_root, entry);
524 	KASSERT(next != NULL, ("next must be non-null %p %jx", domain,
525 	    (uintmax_t)entry->start));
526 	prev = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, next);
527 	/* prev could be NULL */
528 
529 	/*
530 	 * Adapt to broken BIOSes which specify overlapping RMRR
531 	 * entries.
532 	 *
533 	 * XXXKIB: this does not handle a case when prev or next
534 	 * entries are completely covered by the current one, which
535 	 * extends both ways.
536 	 */
537 	if (prev != NULL && prev->end > entry->start &&
538 	    (prev->flags & IOMMU_MAP_ENTRY_PLACE) == 0) {
539 		if ((flags & IOMMU_MF_RMRR) == 0 ||
540 		    (prev->flags & IOMMU_MAP_ENTRY_RMRR) == 0)
541 			return (EBUSY);
542 		entry->start = prev->end;
543 	}
544 	if (next->start < entry->end &&
545 	    (next->flags & IOMMU_MAP_ENTRY_PLACE) == 0) {
546 		if ((flags & IOMMU_MF_RMRR) == 0 ||
547 		    (next->flags & IOMMU_MAP_ENTRY_RMRR) == 0)
548 			return (EBUSY);
549 		entry->end = next->start;
550 	}
551 	if (entry->end == entry->start)
552 		return (0);
553 
554 	if (prev != NULL && prev->end > entry->start) {
555 		/* This assumes that prev is the placeholder entry. */
556 		iommu_gas_rb_remove(domain, prev);
557 		prev = NULL;
558 	}
559 	if (next->start < entry->end) {
560 		iommu_gas_rb_remove(domain, next);
561 		next = NULL;
562 	}
563 
564 	found = iommu_gas_rb_insert(domain, entry);
565 	KASSERT(found, ("found RMRR dup %p start %jx end %jx",
566 	    domain, (uintmax_t)entry->start, (uintmax_t)entry->end));
567 	if ((flags & IOMMU_MF_RMRR) != 0)
568 		entry->flags = IOMMU_MAP_ENTRY_RMRR;
569 
570 #ifdef INVARIANTS
571 	struct iommu_map_entry *ip, *in;
572 	ip = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, entry);
573 	in = RB_NEXT(iommu_gas_entries_tree, &domain->rb_root, entry);
574 	KASSERT(prev == NULL || ip == prev,
575 	    ("RMRR %p (%jx %jx) prev %p (%jx %jx) ins prev %p (%jx %jx)",
576 	    entry, entry->start, entry->end, prev,
577 	    prev == NULL ? 0 : prev->start, prev == NULL ? 0 : prev->end,
578 	    ip, ip == NULL ? 0 : ip->start, ip == NULL ? 0 : ip->end));
579 	KASSERT(next == NULL || in == next,
580 	    ("RMRR %p (%jx %jx) next %p (%jx %jx) ins next %p (%jx %jx)",
581 	    entry, entry->start, entry->end, next,
582 	    next == NULL ? 0 : next->start, next == NULL ? 0 : next->end,
583 	    in, in == NULL ? 0 : in->start, in == NULL ? 0 : in->end));
584 #endif
585 
586 	return (0);
587 }
588 
589 void
590 iommu_gas_free_space(struct iommu_domain *domain, struct iommu_map_entry *entry)
591 {
592 
593 	IOMMU_DOMAIN_ASSERT_LOCKED(domain);
594 	KASSERT((entry->flags & (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_RMRR |
595 	    IOMMU_MAP_ENTRY_MAP)) == IOMMU_MAP_ENTRY_MAP,
596 	    ("permanent entry %p %p", domain, entry));
597 
598 	iommu_gas_rb_remove(domain, entry);
599 	entry->flags &= ~IOMMU_MAP_ENTRY_MAP;
600 #ifdef INVARIANTS
601 	if (iommu_check_free)
602 		iommu_gas_check_free(domain);
603 #endif
604 }
605 
606 void
607 iommu_gas_free_region(struct iommu_domain *domain, struct iommu_map_entry *entry)
608 {
609 	struct iommu_map_entry *next, *prev;
610 
611 	IOMMU_DOMAIN_ASSERT_LOCKED(domain);
612 	KASSERT((entry->flags & (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_RMRR |
613 	    IOMMU_MAP_ENTRY_MAP)) == IOMMU_MAP_ENTRY_RMRR,
614 	    ("non-RMRR entry %p %p", domain, entry));
615 
616 	prev = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, entry);
617 	next = RB_NEXT(iommu_gas_entries_tree, &domain->rb_root, entry);
618 	iommu_gas_rb_remove(domain, entry);
619 	entry->flags &= ~IOMMU_MAP_ENTRY_RMRR;
620 
621 	if (prev == NULL)
622 		iommu_gas_rb_insert(domain, domain->first_place);
623 	if (next == NULL)
624 		iommu_gas_rb_insert(domain, domain->last_place);
625 }
626 
627 int
628 iommu_gas_map(struct iommu_domain *domain,
629     const struct bus_dma_tag_common *common, iommu_gaddr_t size, int offset,
630     u_int eflags, u_int flags, vm_page_t *ma, struct iommu_map_entry **res)
631 {
632 	struct iommu_map_entry *entry;
633 	int error;
634 
635 	KASSERT((flags & ~(IOMMU_MF_CANWAIT | IOMMU_MF_CANSPLIT)) == 0,
636 	    ("invalid flags 0x%x", flags));
637 
638 	entry = iommu_gas_alloc_entry(domain,
639 	    (flags & IOMMU_MF_CANWAIT) != 0 ?  IOMMU_PGF_WAITOK : 0);
640 	if (entry == NULL)
641 		return (ENOMEM);
642 	IOMMU_DOMAIN_LOCK(domain);
643 	error = iommu_gas_find_space(domain, common, size, offset, flags,
644 	    entry);
645 	if (error == ENOMEM) {
646 		IOMMU_DOMAIN_UNLOCK(domain);
647 		iommu_gas_free_entry(domain, entry);
648 		return (error);
649 	}
650 #ifdef INVARIANTS
651 	if (iommu_check_free)
652 		iommu_gas_check_free(domain);
653 #endif
654 	KASSERT(error == 0,
655 	    ("unexpected error %d from iommu_gas_find_entry", error));
656 	KASSERT(entry->end < domain->end, ("allocated GPA %jx, max GPA %jx",
657 	    (uintmax_t)entry->end, (uintmax_t)domain->end));
658 	entry->flags |= eflags;
659 	IOMMU_DOMAIN_UNLOCK(domain);
660 
661 	error = domain->ops->map(domain, entry->start,
662 	    entry->end - entry->start, ma, eflags,
663 	    ((flags & IOMMU_MF_CANWAIT) != 0 ?  IOMMU_PGF_WAITOK : 0));
664 	if (error == ENOMEM) {
665 		iommu_domain_unload_entry(entry, true);
666 		return (error);
667 	}
668 	KASSERT(error == 0,
669 	    ("unexpected error %d from domain_map_buf", error));
670 
671 	*res = entry;
672 	return (0);
673 }
674 
675 int
676 iommu_gas_map_region(struct iommu_domain *domain, struct iommu_map_entry *entry,
677     u_int eflags, u_int flags, vm_page_t *ma)
678 {
679 	iommu_gaddr_t start;
680 	int error;
681 
682 	KASSERT(entry->flags == 0, ("used RMRR entry %p %p %x", domain,
683 	    entry, entry->flags));
684 	KASSERT((flags & ~(IOMMU_MF_CANWAIT | IOMMU_MF_RMRR)) == 0,
685 	    ("invalid flags 0x%x", flags));
686 
687 	start = entry->start;
688 	IOMMU_DOMAIN_LOCK(domain);
689 	error = iommu_gas_alloc_region(domain, entry, flags);
690 	if (error != 0) {
691 		IOMMU_DOMAIN_UNLOCK(domain);
692 		return (error);
693 	}
694 	entry->flags |= eflags;
695 	IOMMU_DOMAIN_UNLOCK(domain);
696 	if (entry->end == entry->start)
697 		return (0);
698 
699 	error = domain->ops->map(domain, entry->start,
700 	    entry->end - entry->start, ma + OFF_TO_IDX(start - entry->start),
701 	    eflags, ((flags & IOMMU_MF_CANWAIT) != 0 ? IOMMU_PGF_WAITOK : 0));
702 	if (error == ENOMEM) {
703 		iommu_domain_unload_entry(entry, false);
704 		return (error);
705 	}
706 	KASSERT(error == 0,
707 	    ("unexpected error %d from domain_map_buf", error));
708 
709 	return (0);
710 }
711 
712 static int
713 iommu_gas_reserve_region_locked(struct iommu_domain *domain,
714     iommu_gaddr_t start, iommu_gaddr_t end, struct iommu_map_entry *entry)
715 {
716 	int error;
717 
718 	IOMMU_DOMAIN_ASSERT_LOCKED(domain);
719 
720 	entry->start = start;
721 	entry->end = end;
722 	error = iommu_gas_alloc_region(domain, entry, IOMMU_MF_CANWAIT);
723 	if (error == 0)
724 		entry->flags |= IOMMU_MAP_ENTRY_UNMAPPED;
725 	return (error);
726 }
727 
728 int
729 iommu_gas_reserve_region(struct iommu_domain *domain, iommu_gaddr_t start,
730     iommu_gaddr_t end, struct iommu_map_entry **entry0)
731 {
732 	struct iommu_map_entry *entry;
733 	int error;
734 
735 	entry = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK);
736 	IOMMU_DOMAIN_LOCK(domain);
737 	error = iommu_gas_reserve_region_locked(domain, start, end, entry);
738 	IOMMU_DOMAIN_UNLOCK(domain);
739 	if (error != 0)
740 		iommu_gas_free_entry(domain, entry);
741 	else if (entry0 != NULL)
742 		*entry0 = entry;
743 	return (error);
744 }
745 
746 /*
747  * As in iommu_gas_reserve_region, reserve [start, end), but allow for existing
748  * entries.
749  */
750 int
751 iommu_gas_reserve_region_extend(struct iommu_domain *domain,
752     iommu_gaddr_t start, iommu_gaddr_t end)
753 {
754 	struct iommu_map_entry *entry, *next, *prev, key = {};
755 	iommu_gaddr_t entry_start, entry_end;
756 	int error;
757 
758 	error = 0;
759 	entry = NULL;
760 	end = ummin(end, domain->end);
761 	while (start < end) {
762 		/* Preallocate an entry. */
763 		if (entry == NULL)
764 			entry = iommu_gas_alloc_entry(domain,
765 			    IOMMU_PGF_WAITOK);
766 		/* Calculate the free region from here to the next entry. */
767 		key.start = key.end = start;
768 		IOMMU_DOMAIN_LOCK(domain);
769 		next = RB_NFIND(iommu_gas_entries_tree, &domain->rb_root, &key);
770 		KASSERT(next != NULL, ("domain %p with end %#jx has no entry "
771 		    "after %#jx", domain, (uintmax_t)domain->end,
772 		    (uintmax_t)start));
773 		entry_end = ummin(end, next->start);
774 		prev = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, next);
775 		if (prev != NULL)
776 			entry_start = ummax(start, prev->end);
777 		else
778 			entry_start = start;
779 		start = next->end;
780 		/* Reserve the region if non-empty. */
781 		if (entry_start != entry_end) {
782 			error = iommu_gas_reserve_region_locked(domain,
783 			    entry_start, entry_end, entry);
784 			if (error != 0) {
785 				IOMMU_DOMAIN_UNLOCK(domain);
786 				break;
787 			}
788 			entry = NULL;
789 		}
790 		IOMMU_DOMAIN_UNLOCK(domain);
791 	}
792 	/* Release a preallocated entry if it was not used. */
793 	if (entry != NULL)
794 		iommu_gas_free_entry(domain, entry);
795 	return (error);
796 }
797 
798 void
799 iommu_unmap_msi(struct iommu_ctx *ctx)
800 {
801 	struct iommu_map_entry *entry;
802 	struct iommu_domain *domain;
803 
804 	domain = ctx->domain;
805 	entry = domain->msi_entry;
806 	if (entry == NULL)
807 		return;
808 
809 	domain->ops->unmap(domain, entry->start, entry->end -
810 	    entry->start, IOMMU_PGF_WAITOK);
811 
812 	IOMMU_DOMAIN_LOCK(domain);
813 	iommu_gas_free_space(domain, entry);
814 	IOMMU_DOMAIN_UNLOCK(domain);
815 
816 	iommu_gas_free_entry(domain, entry);
817 
818 	domain->msi_entry = NULL;
819 	domain->msi_base = 0;
820 	domain->msi_phys = 0;
821 }
822 
823 int
824 iommu_map_msi(struct iommu_ctx *ctx, iommu_gaddr_t size, int offset,
825     u_int eflags, u_int flags, vm_page_t *ma)
826 {
827 	struct iommu_domain *domain;
828 	struct iommu_map_entry *entry;
829 	int error;
830 
831 	error = 0;
832 	domain = ctx->domain;
833 
834 	/* Check if there is already an MSI page allocated */
835 	IOMMU_DOMAIN_LOCK(domain);
836 	entry = domain->msi_entry;
837 	IOMMU_DOMAIN_UNLOCK(domain);
838 
839 	if (entry == NULL) {
840 		error = iommu_gas_map(domain, &ctx->tag->common, size, offset,
841 		    eflags, flags, ma, &entry);
842 		IOMMU_DOMAIN_LOCK(domain);
843 		if (error == 0) {
844 			if (domain->msi_entry == NULL) {
845 				MPASS(domain->msi_base == 0);
846 				MPASS(domain->msi_phys == 0);
847 
848 				domain->msi_entry = entry;
849 				domain->msi_base = entry->start;
850 				domain->msi_phys = VM_PAGE_TO_PHYS(ma[0]);
851 			} else {
852 				/*
853 				 * We lost the race and already have an
854 				 * MSI page allocated. Free the unneeded entry.
855 				 */
856 				iommu_gas_free_entry(domain, entry);
857 			}
858 		} else if (domain->msi_entry != NULL) {
859 			/*
860 			 * The allocation failed, but another succeeded.
861 			 * Return success as there is a valid MSI page.
862 			 */
863 			error = 0;
864 		}
865 		IOMMU_DOMAIN_UNLOCK(domain);
866 	}
867 
868 	return (error);
869 }
870 
871 void
872 iommu_translate_msi(struct iommu_domain *domain, uint64_t *addr)
873 {
874 
875 	*addr = (*addr - domain->msi_phys) + domain->msi_base;
876 
877 	KASSERT(*addr >= domain->msi_entry->start,
878 	    ("%s: Address is below the MSI entry start address (%jx < %jx)",
879 	    __func__, (uintmax_t)*addr, (uintmax_t)domain->msi_entry->start));
880 
881 	KASSERT(*addr + sizeof(*addr) <= domain->msi_entry->end,
882 	    ("%s: Address is above the MSI entry end address (%jx < %jx)",
883 	    __func__, (uintmax_t)*addr, (uintmax_t)domain->msi_entry->end));
884 }
885 
886 SYSCTL_NODE(_hw, OID_AUTO, iommu, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, "");
887 
888 #ifdef INVARIANTS
889 SYSCTL_INT(_hw_iommu, OID_AUTO, check_free, CTLFLAG_RWTUN,
890     &iommu_check_free, 0,
891     "Check the GPA RBtree for free_down and free_after validity");
892 #endif
893