xref: /freebsd/sys/dev/iommu/iommu_gas.c (revision dd41de95a84d979615a2ef11df6850622bf6184e)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2013 The FreeBSD Foundation
5  * All rights reserved.
6  *
7  * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
8  * under sponsorship from the FreeBSD Foundation.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #define	RB_AUGMENT(entry) iommu_gas_augment_entry(entry)
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/malloc.h>
40 #include <sys/bus.h>
41 #include <sys/interrupt.h>
42 #include <sys/kernel.h>
43 #include <sys/ktr.h>
44 #include <sys/lock.h>
45 #include <sys/proc.h>
46 #include <sys/rwlock.h>
47 #include <sys/memdesc.h>
48 #include <sys/mutex.h>
49 #include <sys/sysctl.h>
50 #include <sys/rman.h>
51 #include <sys/taskqueue.h>
52 #include <sys/tree.h>
53 #include <sys/uio.h>
54 #include <sys/vmem.h>
55 #include <vm/vm.h>
56 #include <vm/vm_extern.h>
57 #include <vm/vm_kern.h>
58 #include <vm/vm_object.h>
59 #include <vm/vm_page.h>
60 #include <vm/vm_map.h>
61 #include <vm/uma.h>
62 #include <dev/pci/pcireg.h>
63 #include <dev/pci/pcivar.h>
64 #include <dev/iommu/iommu.h>
65 #include <dev/iommu/iommu_gas.h>
66 #include <dev/iommu/iommu_msi.h>
67 #include <machine/atomic.h>
68 #include <machine/bus.h>
69 #include <machine/md_var.h>
70 #include <machine/iommu.h>
71 #include <dev/iommu/busdma_iommu.h>
72 
73 /*
74  * Guest Address Space management.
75  */
76 
77 static uma_zone_t iommu_map_entry_zone;
78 
79 #ifdef INVARIANTS
80 static int iommu_check_free;
81 #endif
82 
83 static void
84 intel_gas_init(void)
85 {
86 
87 	iommu_map_entry_zone = uma_zcreate("IOMMU_MAP_ENTRY",
88 	    sizeof(struct iommu_map_entry), NULL, NULL,
89 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NODUMP);
90 }
91 SYSINIT(intel_gas, SI_SUB_DRIVERS, SI_ORDER_FIRST, intel_gas_init, NULL);
92 
93 struct iommu_map_entry *
94 iommu_gas_alloc_entry(struct iommu_domain *domain, u_int flags)
95 {
96 	struct iommu_map_entry *res;
97 
98 	KASSERT((flags & ~(IOMMU_PGF_WAITOK)) == 0,
99 	    ("unsupported flags %x", flags));
100 
101 	res = uma_zalloc(iommu_map_entry_zone, ((flags & IOMMU_PGF_WAITOK) !=
102 	    0 ? M_WAITOK : M_NOWAIT) | M_ZERO);
103 	if (res != NULL) {
104 		res->domain = domain;
105 		atomic_add_int(&domain->entries_cnt, 1);
106 	}
107 	return (res);
108 }
109 
110 void
111 iommu_gas_free_entry(struct iommu_domain *domain, struct iommu_map_entry *entry)
112 {
113 
114 	KASSERT(domain == entry->domain,
115 	    ("mismatched free domain %p entry %p entry->domain %p", domain,
116 	    entry, entry->domain));
117 	atomic_subtract_int(&domain->entries_cnt, 1);
118 	uma_zfree(iommu_map_entry_zone, entry);
119 }
120 
121 static int
122 iommu_gas_cmp_entries(struct iommu_map_entry *a, struct iommu_map_entry *b)
123 {
124 
125 	/* Last entry have zero size, so <= */
126 	KASSERT(a->start <= a->end, ("inverted entry %p (%jx, %jx)",
127 	    a, (uintmax_t)a->start, (uintmax_t)a->end));
128 	KASSERT(b->start <= b->end, ("inverted entry %p (%jx, %jx)",
129 	    b, (uintmax_t)b->start, (uintmax_t)b->end));
130 	KASSERT(a->end <= b->start || b->end <= a->start ||
131 	    a->end == a->start || b->end == b->start,
132 	    ("overlapping entries %p (%jx, %jx) %p (%jx, %jx)",
133 	    a, (uintmax_t)a->start, (uintmax_t)a->end,
134 	    b, (uintmax_t)b->start, (uintmax_t)b->end));
135 
136 	if (a->end < b->end)
137 		return (-1);
138 	else if (b->end < a->end)
139 		return (1);
140 	return (0);
141 }
142 
143 static void
144 iommu_gas_augment_entry(struct iommu_map_entry *entry)
145 {
146 	struct iommu_map_entry *child;
147 	iommu_gaddr_t free_down;
148 
149 	free_down = 0;
150 	if ((child = RB_LEFT(entry, rb_entry)) != NULL) {
151 		free_down = MAX(free_down, child->free_down);
152 		free_down = MAX(free_down, entry->start - child->last);
153 		entry->first = child->first;
154 	} else
155 		entry->first = entry->start;
156 
157 	if ((child = RB_RIGHT(entry, rb_entry)) != NULL) {
158 		free_down = MAX(free_down, child->free_down);
159 		free_down = MAX(free_down, child->first - entry->end);
160 		entry->last = child->last;
161 	} else
162 		entry->last = entry->end;
163 	entry->free_down = free_down;
164 }
165 
166 RB_GENERATE(iommu_gas_entries_tree, iommu_map_entry, rb_entry,
167     iommu_gas_cmp_entries);
168 
169 #ifdef INVARIANTS
170 static void
171 iommu_gas_check_free(struct iommu_domain *domain)
172 {
173 	struct iommu_map_entry *entry, *l, *r;
174 	iommu_gaddr_t v;
175 
176 	RB_FOREACH(entry, iommu_gas_entries_tree, &domain->rb_root) {
177 		KASSERT(domain == entry->domain,
178 		    ("mismatched free domain %p entry %p entry->domain %p",
179 		    domain, entry, entry->domain));
180 		l = RB_LEFT(entry, rb_entry);
181 		r = RB_RIGHT(entry, rb_entry);
182 		v = 0;
183 		if (l != NULL) {
184 			v = MAX(v, l->free_down);
185 			v = MAX(v, entry->start - l->last);
186 		}
187 		if (r != NULL) {
188 			v = MAX(v, r->free_down);
189 			v = MAX(v, r->first - entry->end);
190 		}
191 		MPASS(entry->free_down == v);
192 	}
193 }
194 #endif
195 
196 static bool
197 iommu_gas_rb_insert(struct iommu_domain *domain, struct iommu_map_entry *entry)
198 {
199 	struct iommu_map_entry *found;
200 
201 	found = RB_INSERT(iommu_gas_entries_tree,
202 	    &domain->rb_root, entry);
203 	return (found == NULL);
204 }
205 
206 static void
207 iommu_gas_rb_remove(struct iommu_domain *domain, struct iommu_map_entry *entry)
208 {
209 
210 	RB_REMOVE(iommu_gas_entries_tree, &domain->rb_root, entry);
211 }
212 
213 struct iommu_domain *
214 iommu_get_ctx_domain(struct iommu_ctx *ctx)
215 {
216 
217 	return (ctx->domain);
218 }
219 
220 void
221 iommu_gas_init_domain(struct iommu_domain *domain)
222 {
223 	struct iommu_map_entry *begin, *end;
224 
225 	begin = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK);
226 	end = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK);
227 
228 	IOMMU_DOMAIN_LOCK(domain);
229 	KASSERT(domain->entries_cnt == 2, ("dirty domain %p", domain));
230 	KASSERT(RB_EMPTY(&domain->rb_root),
231 	    ("non-empty entries %p", domain));
232 
233 	begin->start = 0;
234 	begin->end = IOMMU_PAGE_SIZE;
235 	begin->flags = IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED;
236 	iommu_gas_rb_insert(domain, begin);
237 
238 	end->start = domain->end;
239 	end->end = domain->end;
240 	end->flags = IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED;
241 	iommu_gas_rb_insert(domain, end);
242 
243 	domain->first_place = begin;
244 	domain->last_place = end;
245 	domain->flags |= IOMMU_DOMAIN_GAS_INITED;
246 	IOMMU_DOMAIN_UNLOCK(domain);
247 }
248 
249 void
250 iommu_gas_fini_domain(struct iommu_domain *domain)
251 {
252 	struct iommu_map_entry *entry, *entry1;
253 
254 	IOMMU_DOMAIN_ASSERT_LOCKED(domain);
255 	KASSERT(domain->entries_cnt == 2,
256 	    ("domain still in use %p", domain));
257 
258 	entry = RB_MIN(iommu_gas_entries_tree, &domain->rb_root);
259 	KASSERT(entry->start == 0, ("start entry start %p", domain));
260 	KASSERT(entry->end == IOMMU_PAGE_SIZE, ("start entry end %p", domain));
261 	KASSERT(entry->flags ==
262 	    (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED),
263 	    ("start entry flags %p", domain));
264 	RB_REMOVE(iommu_gas_entries_tree, &domain->rb_root, entry);
265 	iommu_gas_free_entry(domain, entry);
266 
267 	entry = RB_MAX(iommu_gas_entries_tree, &domain->rb_root);
268 	KASSERT(entry->start == domain->end, ("end entry start %p", domain));
269 	KASSERT(entry->end == domain->end, ("end entry end %p", domain));
270 	KASSERT(entry->flags ==
271 	    (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED),
272 	    ("end entry flags %p", domain));
273 	RB_REMOVE(iommu_gas_entries_tree, &domain->rb_root, entry);
274 	iommu_gas_free_entry(domain, entry);
275 
276 	RB_FOREACH_SAFE(entry, iommu_gas_entries_tree, &domain->rb_root,
277 	    entry1) {
278 		KASSERT((entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0,
279 		    ("non-RMRR entry left %p", domain));
280 		RB_REMOVE(iommu_gas_entries_tree, &domain->rb_root,
281 		    entry);
282 		iommu_gas_free_entry(domain, entry);
283 	}
284 }
285 
286 struct iommu_gas_match_args {
287 	struct iommu_domain *domain;
288 	iommu_gaddr_t size;
289 	int offset;
290 	const struct bus_dma_tag_common *common;
291 	u_int gas_flags;
292 	struct iommu_map_entry *entry;
293 };
294 
295 /*
296  * The interval [beg, end) is a free interval between two iommu_map_entries.
297  * maxaddr is an upper bound on addresses that can be allocated. Try to
298  * allocate space in the free interval, subject to the conditions expressed
299  * by a, and return 'true' if and only if the allocation attempt succeeds.
300  */
301 static bool
302 iommu_gas_match_one(struct iommu_gas_match_args *a, iommu_gaddr_t beg,
303     iommu_gaddr_t end, iommu_gaddr_t maxaddr)
304 {
305 	iommu_gaddr_t bs, start;
306 
307 	a->entry->start = roundup2(beg + IOMMU_PAGE_SIZE,
308 	    a->common->alignment);
309 	if (a->entry->start + a->size > maxaddr)
310 		return (false);
311 
312 	/* IOMMU_PAGE_SIZE to create gap after new entry. */
313 	if (a->entry->start < beg + IOMMU_PAGE_SIZE ||
314 	    a->entry->start + a->size + a->offset + IOMMU_PAGE_SIZE > end)
315 		return (false);
316 
317 	/* No boundary crossing. */
318 	if (iommu_test_boundary(a->entry->start + a->offset, a->size,
319 	    a->common->boundary))
320 		return (true);
321 
322 	/*
323 	 * The start + offset to start + offset + size region crosses
324 	 * the boundary.  Check if there is enough space after the
325 	 * next boundary after the beg.
326 	 */
327 	bs = rounddown2(a->entry->start + a->offset + a->common->boundary,
328 	    a->common->boundary);
329 	start = roundup2(bs, a->common->alignment);
330 	/* IOMMU_PAGE_SIZE to create gap after new entry. */
331 	if (start + a->offset + a->size + IOMMU_PAGE_SIZE <= end &&
332 	    start + a->offset + a->size <= maxaddr &&
333 	    iommu_test_boundary(start + a->offset, a->size,
334 	    a->common->boundary)) {
335 		a->entry->start = start;
336 		return (true);
337 	}
338 
339 	/*
340 	 * Not enough space to align at the requested boundary, or
341 	 * boundary is smaller than the size, but allowed to split.
342 	 * We already checked that start + size does not overlap maxaddr.
343 	 *
344 	 * XXXKIB. It is possible that bs is exactly at the start of
345 	 * the next entry, then we do not have gap.  Ignore for now.
346 	 */
347 	if ((a->gas_flags & IOMMU_MF_CANSPLIT) != 0) {
348 		a->size = bs - a->entry->start;
349 		return (true);
350 	}
351 
352 	return (false);
353 }
354 
355 static void
356 iommu_gas_match_insert(struct iommu_gas_match_args *a)
357 {
358 	bool found;
359 
360 	/*
361 	 * The prev->end is always aligned on the page size, which
362 	 * causes page alignment for the entry->start too.  The size
363 	 * is checked to be multiple of the page size.
364 	 *
365 	 * The page sized gap is created between consequent
366 	 * allocations to ensure that out-of-bounds accesses fault.
367 	 */
368 	a->entry->end = a->entry->start + a->size;
369 
370 	found = iommu_gas_rb_insert(a->domain, a->entry);
371 	KASSERT(found, ("found dup %p start %jx size %jx",
372 	    a->domain, (uintmax_t)a->entry->start, (uintmax_t)a->size));
373 	a->entry->flags = IOMMU_MAP_ENTRY_MAP;
374 }
375 
376 static int
377 iommu_gas_lowermatch(struct iommu_gas_match_args *a, struct iommu_map_entry *entry)
378 {
379 	struct iommu_map_entry *child;
380 
381 	child = RB_RIGHT(entry, rb_entry);
382 	if (child != NULL && entry->end < a->common->lowaddr &&
383 	    iommu_gas_match_one(a, entry->end, child->first,
384 	    a->common->lowaddr)) {
385 		iommu_gas_match_insert(a);
386 		return (0);
387 	}
388 	if (entry->free_down < a->size + a->offset + IOMMU_PAGE_SIZE)
389 		return (ENOMEM);
390 	if (entry->first >= a->common->lowaddr)
391 		return (ENOMEM);
392 	child = RB_LEFT(entry, rb_entry);
393 	if (child != NULL && 0 == iommu_gas_lowermatch(a, child))
394 		return (0);
395 	if (child != NULL && child->last < a->common->lowaddr &&
396 	    iommu_gas_match_one(a, child->last, entry->start,
397 	    a->common->lowaddr)) {
398 		iommu_gas_match_insert(a);
399 		return (0);
400 	}
401 	child = RB_RIGHT(entry, rb_entry);
402 	if (child != NULL && 0 == iommu_gas_lowermatch(a, child))
403 		return (0);
404 	return (ENOMEM);
405 }
406 
407 static int
408 iommu_gas_uppermatch(struct iommu_gas_match_args *a, struct iommu_map_entry *entry)
409 {
410 	struct iommu_map_entry *child;
411 
412 	if (entry->free_down < a->size + a->offset + IOMMU_PAGE_SIZE)
413 		return (ENOMEM);
414 	if (entry->last < a->common->highaddr)
415 		return (ENOMEM);
416 	child = RB_LEFT(entry, rb_entry);
417 	if (child != NULL && 0 == iommu_gas_uppermatch(a, child))
418 		return (0);
419 	if (child != NULL && child->last >= a->common->highaddr &&
420 	    iommu_gas_match_one(a, child->last, entry->start,
421 	    a->domain->end)) {
422 		iommu_gas_match_insert(a);
423 		return (0);
424 	}
425 	child = RB_RIGHT(entry, rb_entry);
426 	if (child != NULL && entry->end >= a->common->highaddr &&
427 	    iommu_gas_match_one(a, entry->end, child->first,
428 	    a->domain->end)) {
429 		iommu_gas_match_insert(a);
430 		return (0);
431 	}
432 	if (child != NULL && 0 == iommu_gas_uppermatch(a, child))
433 		return (0);
434 	return (ENOMEM);
435 }
436 
437 static int
438 iommu_gas_find_space(struct iommu_domain *domain,
439     const struct bus_dma_tag_common *common, iommu_gaddr_t size,
440     int offset, u_int flags, struct iommu_map_entry *entry)
441 {
442 	struct iommu_gas_match_args a;
443 	int error;
444 
445 	IOMMU_DOMAIN_ASSERT_LOCKED(domain);
446 	KASSERT(entry->flags == 0, ("dirty entry %p %p", domain, entry));
447 	KASSERT((size & IOMMU_PAGE_MASK) == 0, ("size %jx", (uintmax_t)size));
448 
449 	a.domain = domain;
450 	a.size = size;
451 	a.offset = offset;
452 	a.common = common;
453 	a.gas_flags = flags;
454 	a.entry = entry;
455 
456 	/* Handle lower region. */
457 	if (common->lowaddr > 0) {
458 		error = iommu_gas_lowermatch(&a,
459 		    RB_ROOT(&domain->rb_root));
460 		if (error == 0)
461 			return (0);
462 		KASSERT(error == ENOMEM,
463 		    ("error %d from iommu_gas_lowermatch", error));
464 	}
465 	/* Handle upper region. */
466 	if (common->highaddr >= domain->end)
467 		return (ENOMEM);
468 	error = iommu_gas_uppermatch(&a, RB_ROOT(&domain->rb_root));
469 	KASSERT(error == ENOMEM,
470 	    ("error %d from iommu_gas_uppermatch", error));
471 	return (error);
472 }
473 
474 static int
475 iommu_gas_alloc_region(struct iommu_domain *domain, struct iommu_map_entry *entry,
476     u_int flags)
477 {
478 	struct iommu_map_entry *next, *prev;
479 	bool found;
480 
481 	IOMMU_DOMAIN_ASSERT_LOCKED(domain);
482 
483 	if ((entry->start & IOMMU_PAGE_MASK) != 0 ||
484 	    (entry->end & IOMMU_PAGE_MASK) != 0)
485 		return (EINVAL);
486 	if (entry->start >= entry->end)
487 		return (EINVAL);
488 	if (entry->end >= domain->end)
489 		return (EINVAL);
490 
491 	next = RB_NFIND(iommu_gas_entries_tree, &domain->rb_root, entry);
492 	KASSERT(next != NULL, ("next must be non-null %p %jx", domain,
493 	    (uintmax_t)entry->start));
494 	prev = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, next);
495 	/* prev could be NULL */
496 
497 	/*
498 	 * Adapt to broken BIOSes which specify overlapping RMRR
499 	 * entries.
500 	 *
501 	 * XXXKIB: this does not handle a case when prev or next
502 	 * entries are completely covered by the current one, which
503 	 * extends both ways.
504 	 */
505 	if (prev != NULL && prev->end > entry->start &&
506 	    (prev->flags & IOMMU_MAP_ENTRY_PLACE) == 0) {
507 		if ((flags & IOMMU_MF_RMRR) == 0 ||
508 		    (prev->flags & IOMMU_MAP_ENTRY_RMRR) == 0)
509 			return (EBUSY);
510 		entry->start = prev->end;
511 	}
512 	if (next->start < entry->end &&
513 	    (next->flags & IOMMU_MAP_ENTRY_PLACE) == 0) {
514 		if ((flags & IOMMU_MF_RMRR) == 0 ||
515 		    (next->flags & IOMMU_MAP_ENTRY_RMRR) == 0)
516 			return (EBUSY);
517 		entry->end = next->start;
518 	}
519 	if (entry->end == entry->start)
520 		return (0);
521 
522 	if (prev != NULL && prev->end > entry->start) {
523 		/* This assumes that prev is the placeholder entry. */
524 		iommu_gas_rb_remove(domain, prev);
525 		prev = NULL;
526 	}
527 	if (next->start < entry->end) {
528 		iommu_gas_rb_remove(domain, next);
529 		next = NULL;
530 	}
531 
532 	found = iommu_gas_rb_insert(domain, entry);
533 	KASSERT(found, ("found RMRR dup %p start %jx end %jx",
534 	    domain, (uintmax_t)entry->start, (uintmax_t)entry->end));
535 	if ((flags & IOMMU_MF_RMRR) != 0)
536 		entry->flags = IOMMU_MAP_ENTRY_RMRR;
537 
538 #ifdef INVARIANTS
539 	struct iommu_map_entry *ip, *in;
540 	ip = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, entry);
541 	in = RB_NEXT(iommu_gas_entries_tree, &domain->rb_root, entry);
542 	KASSERT(prev == NULL || ip == prev,
543 	    ("RMRR %p (%jx %jx) prev %p (%jx %jx) ins prev %p (%jx %jx)",
544 	    entry, entry->start, entry->end, prev,
545 	    prev == NULL ? 0 : prev->start, prev == NULL ? 0 : prev->end,
546 	    ip, ip == NULL ? 0 : ip->start, ip == NULL ? 0 : ip->end));
547 	KASSERT(next == NULL || in == next,
548 	    ("RMRR %p (%jx %jx) next %p (%jx %jx) ins next %p (%jx %jx)",
549 	    entry, entry->start, entry->end, next,
550 	    next == NULL ? 0 : next->start, next == NULL ? 0 : next->end,
551 	    in, in == NULL ? 0 : in->start, in == NULL ? 0 : in->end));
552 #endif
553 
554 	return (0);
555 }
556 
557 void
558 iommu_gas_free_space(struct iommu_domain *domain, struct iommu_map_entry *entry)
559 {
560 
561 	IOMMU_DOMAIN_ASSERT_LOCKED(domain);
562 	KASSERT((entry->flags & (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_RMRR |
563 	    IOMMU_MAP_ENTRY_MAP)) == IOMMU_MAP_ENTRY_MAP,
564 	    ("permanent entry %p %p", domain, entry));
565 
566 	iommu_gas_rb_remove(domain, entry);
567 	entry->flags &= ~IOMMU_MAP_ENTRY_MAP;
568 #ifdef INVARIANTS
569 	if (iommu_check_free)
570 		iommu_gas_check_free(domain);
571 #endif
572 }
573 
574 void
575 iommu_gas_free_region(struct iommu_domain *domain, struct iommu_map_entry *entry)
576 {
577 	struct iommu_map_entry *next, *prev;
578 
579 	IOMMU_DOMAIN_ASSERT_LOCKED(domain);
580 	KASSERT((entry->flags & (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_RMRR |
581 	    IOMMU_MAP_ENTRY_MAP)) == IOMMU_MAP_ENTRY_RMRR,
582 	    ("non-RMRR entry %p %p", domain, entry));
583 
584 	prev = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, entry);
585 	next = RB_NEXT(iommu_gas_entries_tree, &domain->rb_root, entry);
586 	iommu_gas_rb_remove(domain, entry);
587 	entry->flags &= ~IOMMU_MAP_ENTRY_RMRR;
588 
589 	if (prev == NULL)
590 		iommu_gas_rb_insert(domain, domain->first_place);
591 	if (next == NULL)
592 		iommu_gas_rb_insert(domain, domain->last_place);
593 }
594 
595 int
596 iommu_gas_map(struct iommu_domain *domain,
597     const struct bus_dma_tag_common *common, iommu_gaddr_t size, int offset,
598     u_int eflags, u_int flags, vm_page_t *ma, struct iommu_map_entry **res)
599 {
600 	struct iommu_map_entry *entry;
601 	int error;
602 
603 	KASSERT((flags & ~(IOMMU_MF_CANWAIT | IOMMU_MF_CANSPLIT)) == 0,
604 	    ("invalid flags 0x%x", flags));
605 
606 	entry = iommu_gas_alloc_entry(domain,
607 	    (flags & IOMMU_MF_CANWAIT) != 0 ?  IOMMU_PGF_WAITOK : 0);
608 	if (entry == NULL)
609 		return (ENOMEM);
610 	IOMMU_DOMAIN_LOCK(domain);
611 	error = iommu_gas_find_space(domain, common, size, offset, flags,
612 	    entry);
613 	if (error == ENOMEM) {
614 		IOMMU_DOMAIN_UNLOCK(domain);
615 		iommu_gas_free_entry(domain, entry);
616 		return (error);
617 	}
618 #ifdef INVARIANTS
619 	if (iommu_check_free)
620 		iommu_gas_check_free(domain);
621 #endif
622 	KASSERT(error == 0,
623 	    ("unexpected error %d from iommu_gas_find_entry", error));
624 	KASSERT(entry->end < domain->end, ("allocated GPA %jx, max GPA %jx",
625 	    (uintmax_t)entry->end, (uintmax_t)domain->end));
626 	entry->flags |= eflags;
627 	IOMMU_DOMAIN_UNLOCK(domain);
628 
629 	error = domain->ops->map(domain, entry->start,
630 	    entry->end - entry->start, ma, eflags,
631 	    ((flags & IOMMU_MF_CANWAIT) != 0 ?  IOMMU_PGF_WAITOK : 0));
632 	if (error == ENOMEM) {
633 		iommu_domain_unload_entry(entry, true);
634 		return (error);
635 	}
636 	KASSERT(error == 0,
637 	    ("unexpected error %d from domain_map_buf", error));
638 
639 	*res = entry;
640 	return (0);
641 }
642 
643 int
644 iommu_gas_map_region(struct iommu_domain *domain, struct iommu_map_entry *entry,
645     u_int eflags, u_int flags, vm_page_t *ma)
646 {
647 	iommu_gaddr_t start;
648 	int error;
649 
650 	KASSERT(entry->flags == 0, ("used RMRR entry %p %p %x", domain,
651 	    entry, entry->flags));
652 	KASSERT((flags & ~(IOMMU_MF_CANWAIT | IOMMU_MF_RMRR)) == 0,
653 	    ("invalid flags 0x%x", flags));
654 
655 	start = entry->start;
656 	IOMMU_DOMAIN_LOCK(domain);
657 	error = iommu_gas_alloc_region(domain, entry, flags);
658 	if (error != 0) {
659 		IOMMU_DOMAIN_UNLOCK(domain);
660 		return (error);
661 	}
662 	entry->flags |= eflags;
663 	IOMMU_DOMAIN_UNLOCK(domain);
664 	if (entry->end == entry->start)
665 		return (0);
666 
667 	error = domain->ops->map(domain, entry->start,
668 	    entry->end - entry->start, ma + OFF_TO_IDX(start - entry->start),
669 	    eflags, ((flags & IOMMU_MF_CANWAIT) != 0 ? IOMMU_PGF_WAITOK : 0));
670 	if (error == ENOMEM) {
671 		iommu_domain_unload_entry(entry, false);
672 		return (error);
673 	}
674 	KASSERT(error == 0,
675 	    ("unexpected error %d from domain_map_buf", error));
676 
677 	return (0);
678 }
679 
680 static int
681 iommu_gas_reserve_region_locked(struct iommu_domain *domain,
682     iommu_gaddr_t start, iommu_gaddr_t end, struct iommu_map_entry *entry)
683 {
684 	int error;
685 
686 	IOMMU_DOMAIN_ASSERT_LOCKED(domain);
687 
688 	entry->start = start;
689 	entry->end = end;
690 	error = iommu_gas_alloc_region(domain, entry, IOMMU_MF_CANWAIT);
691 	if (error == 0)
692 		entry->flags |= IOMMU_MAP_ENTRY_UNMAPPED;
693 	return (error);
694 }
695 
696 int
697 iommu_gas_reserve_region(struct iommu_domain *domain, iommu_gaddr_t start,
698     iommu_gaddr_t end, struct iommu_map_entry **entry0)
699 {
700 	struct iommu_map_entry *entry;
701 	int error;
702 
703 	entry = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK);
704 	IOMMU_DOMAIN_LOCK(domain);
705 	error = iommu_gas_reserve_region_locked(domain, start, end, entry);
706 	IOMMU_DOMAIN_UNLOCK(domain);
707 	if (error != 0)
708 		iommu_gas_free_entry(domain, entry);
709 	else if (entry0 != NULL)
710 		*entry0 = entry;
711 	return (error);
712 }
713 
714 /*
715  * As in iommu_gas_reserve_region, reserve [start, end), but allow for existing
716  * entries.
717  */
718 int
719 iommu_gas_reserve_region_extend(struct iommu_domain *domain,
720     iommu_gaddr_t start, iommu_gaddr_t end)
721 {
722 	struct iommu_map_entry *entry, *next, *prev, key = {};
723 	iommu_gaddr_t entry_start, entry_end;
724 	int error;
725 
726 	error = 0;
727 	entry = NULL;
728 	end = ummin(end, domain->end);
729 	while (start < end) {
730 		/* Preallocate an entry. */
731 		if (entry == NULL)
732 			entry = iommu_gas_alloc_entry(domain,
733 			    IOMMU_PGF_WAITOK);
734 		/* Calculate the free region from here to the next entry. */
735 		key.start = key.end = start;
736 		IOMMU_DOMAIN_LOCK(domain);
737 		next = RB_NFIND(iommu_gas_entries_tree, &domain->rb_root, &key);
738 		KASSERT(next != NULL, ("domain %p with end %#jx has no entry "
739 		    "after %#jx", domain, (uintmax_t)domain->end,
740 		    (uintmax_t)start));
741 		entry_end = ummin(end, next->start);
742 		prev = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, next);
743 		if (prev != NULL)
744 			entry_start = ummax(start, prev->end);
745 		else
746 			entry_start = start;
747 		start = next->end;
748 		/* Reserve the region if non-empty. */
749 		if (entry_start != entry_end) {
750 			error = iommu_gas_reserve_region_locked(domain,
751 			    entry_start, entry_end, entry);
752 			if (error != 0)
753 				break;
754 			entry = NULL;
755 		}
756 		IOMMU_DOMAIN_UNLOCK(domain);
757 	}
758 	/* Release a preallocated entry if it was not used. */
759 	if (entry != NULL)
760 		iommu_gas_free_entry(domain, entry);
761 	return (error);
762 }
763 
764 struct iommu_map_entry *
765 iommu_map_alloc_entry(struct iommu_domain *domain, u_int flags)
766 {
767 	struct iommu_map_entry *res;
768 
769 	res = iommu_gas_alloc_entry(domain, flags);
770 
771 	return (res);
772 }
773 
774 void
775 iommu_map_free_entry(struct iommu_domain *domain, struct iommu_map_entry *entry)
776 {
777 
778 	iommu_gas_free_entry(domain, entry);
779 }
780 
781 int
782 iommu_map(struct iommu_domain *domain,
783     const struct bus_dma_tag_common *common, iommu_gaddr_t size, int offset,
784     u_int eflags, u_int flags, vm_page_t *ma, struct iommu_map_entry **res)
785 {
786 	int error;
787 
788 	error = iommu_gas_map(domain, common, size, offset, eflags, flags,
789 	    ma, res);
790 
791 	return (error);
792 }
793 
794 void
795 iommu_unmap_msi(struct iommu_ctx *ctx)
796 {
797 	struct iommu_map_entry *entry;
798 	struct iommu_domain *domain;
799 
800 	domain = ctx->domain;
801 	entry = domain->msi_entry;
802 	if (entry == NULL)
803 		return;
804 
805 	domain->ops->unmap(domain, entry->start, entry->end -
806 	    entry->start, IOMMU_PGF_WAITOK);
807 
808 	IOMMU_DOMAIN_LOCK(domain);
809 	iommu_gas_free_space(domain, entry);
810 	IOMMU_DOMAIN_UNLOCK(domain);
811 
812 	iommu_gas_free_entry(domain, entry);
813 
814 	domain->msi_entry = NULL;
815 	domain->msi_base = 0;
816 	domain->msi_phys = 0;
817 }
818 
819 int
820 iommu_map_msi(struct iommu_ctx *ctx, iommu_gaddr_t size, int offset,
821     u_int eflags, u_int flags, vm_page_t *ma)
822 {
823 	struct iommu_domain *domain;
824 	struct iommu_map_entry *entry;
825 	int error;
826 
827 	error = 0;
828 	domain = ctx->domain;
829 
830 	/* Check if there is already an MSI page allocated */
831 	IOMMU_DOMAIN_LOCK(domain);
832 	entry = domain->msi_entry;
833 	IOMMU_DOMAIN_UNLOCK(domain);
834 
835 	if (entry == NULL) {
836 		error = iommu_gas_map(domain, &ctx->tag->common, size, offset,
837 		    eflags, flags, ma, &entry);
838 		IOMMU_DOMAIN_LOCK(domain);
839 		if (error == 0) {
840 			if (domain->msi_entry == NULL) {
841 				MPASS(domain->msi_base == 0);
842 				MPASS(domain->msi_phys == 0);
843 
844 				domain->msi_entry = entry;
845 				domain->msi_base = entry->start;
846 				domain->msi_phys = VM_PAGE_TO_PHYS(ma[0]);
847 			} else {
848 				/*
849 				 * We lost the race and already have an
850 				 * MSI page allocated. Free the unneeded entry.
851 				 */
852 				iommu_gas_free_entry(domain, entry);
853 			}
854 		} else if (domain->msi_entry != NULL) {
855 			/*
856 			 * The allocation failed, but another succeeded.
857 			 * Return success as there is a valid MSI page.
858 			 */
859 			error = 0;
860 		}
861 		IOMMU_DOMAIN_UNLOCK(domain);
862 	}
863 
864 	return (error);
865 }
866 
867 void
868 iommu_translate_msi(struct iommu_domain *domain, uint64_t *addr)
869 {
870 
871 	*addr = (*addr - domain->msi_phys) + domain->msi_base;
872 
873 	KASSERT(*addr >= domain->msi_entry->start,
874 	    ("%s: Address is below the MSI entry start address (%jx < %jx)",
875 	    __func__, (uintmax_t)*addr, (uintmax_t)domain->msi_entry->start));
876 
877 	KASSERT(*addr + sizeof(*addr) <= domain->msi_entry->end,
878 	    ("%s: Address is above the MSI entry end address (%jx < %jx)",
879 	    __func__, (uintmax_t)*addr, (uintmax_t)domain->msi_entry->end));
880 }
881 
882 int
883 iommu_map_region(struct iommu_domain *domain, struct iommu_map_entry *entry,
884     u_int eflags, u_int flags, vm_page_t *ma)
885 {
886 	int error;
887 
888 	error = iommu_gas_map_region(domain, entry, eflags, flags, ma);
889 
890 	return (error);
891 }
892 
893 SYSCTL_NODE(_hw, OID_AUTO, iommu, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, "");
894 
895 #ifdef INVARIANTS
896 SYSCTL_INT(_hw_iommu, OID_AUTO, check_free, CTLFLAG_RWTUN,
897     &iommu_check_free, 0,
898     "Check the GPA RBtree for free_down and free_after validity");
899 #endif
900