xref: /freebsd/sys/dev/iommu/iommu_gas.c (revision bc7512cc58af2e8bbe5bbf5ca0059b1daa1da897)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2013 The FreeBSD Foundation
5  *
6  * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
7  * under sponsorship from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #define	RB_AUGMENT(entry) iommu_gas_augment_entry(entry)
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/malloc.h>
39 #include <sys/bus.h>
40 #include <sys/interrupt.h>
41 #include <sys/kernel.h>
42 #include <sys/ktr.h>
43 #include <sys/lock.h>
44 #include <sys/proc.h>
45 #include <sys/rwlock.h>
46 #include <sys/memdesc.h>
47 #include <sys/mutex.h>
48 #include <sys/sysctl.h>
49 #include <sys/rman.h>
50 #include <sys/taskqueue.h>
51 #include <sys/tree.h>
52 #include <sys/uio.h>
53 #include <sys/vmem.h>
54 #include <vm/vm.h>
55 #include <vm/vm_extern.h>
56 #include <vm/vm_kern.h>
57 #include <vm/vm_object.h>
58 #include <vm/vm_page.h>
59 #include <vm/vm_map.h>
60 #include <vm/uma.h>
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
63 #include <dev/iommu/iommu.h>
64 #include <dev/iommu/iommu_gas.h>
65 #include <dev/iommu/iommu_msi.h>
66 #include <machine/atomic.h>
67 #include <machine/bus.h>
68 #include <machine/md_var.h>
69 #include <machine/iommu.h>
70 #include <dev/iommu/busdma_iommu.h>
71 
72 /*
73  * Guest Address Space management.
74  */
75 
76 static uma_zone_t iommu_map_entry_zone;
77 
78 #ifdef INVARIANTS
79 static int iommu_check_free;
80 #endif
81 
82 static void
83 intel_gas_init(void)
84 {
85 
86 	iommu_map_entry_zone = uma_zcreate("IOMMU_MAP_ENTRY",
87 	    sizeof(struct iommu_map_entry), NULL, NULL,
88 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NODUMP);
89 }
90 SYSINIT(intel_gas, SI_SUB_DRIVERS, SI_ORDER_FIRST, intel_gas_init, NULL);
91 
92 struct iommu_map_entry *
93 iommu_gas_alloc_entry(struct iommu_domain *domain, u_int flags)
94 {
95 	struct iommu_map_entry *res;
96 
97 	KASSERT((flags & ~(IOMMU_PGF_WAITOK)) == 0,
98 	    ("unsupported flags %x", flags));
99 
100 	res = uma_zalloc(iommu_map_entry_zone, ((flags & IOMMU_PGF_WAITOK) !=
101 	    0 ? M_WAITOK : M_NOWAIT) | M_ZERO);
102 	if (res != NULL) {
103 		res->domain = domain;
104 		atomic_add_int(&domain->entries_cnt, 1);
105 	}
106 	return (res);
107 }
108 
109 void
110 iommu_gas_free_entry(struct iommu_domain *domain, struct iommu_map_entry *entry)
111 {
112 
113 	KASSERT(domain == entry->domain,
114 	    ("mismatched free domain %p entry %p entry->domain %p", domain,
115 	    entry, entry->domain));
116 	atomic_subtract_int(&domain->entries_cnt, 1);
117 	uma_zfree(iommu_map_entry_zone, entry);
118 }
119 
120 static int
121 iommu_gas_cmp_entries(struct iommu_map_entry *a, struct iommu_map_entry *b)
122 {
123 
124 	/* Last entry have zero size, so <= */
125 	KASSERT(a->start <= a->end, ("inverted entry %p (%jx, %jx)",
126 	    a, (uintmax_t)a->start, (uintmax_t)a->end));
127 	KASSERT(b->start <= b->end, ("inverted entry %p (%jx, %jx)",
128 	    b, (uintmax_t)b->start, (uintmax_t)b->end));
129 	KASSERT(a->end <= b->start || b->end <= a->start ||
130 	    a->end == a->start || b->end == b->start,
131 	    ("overlapping entries %p (%jx, %jx) %p (%jx, %jx)",
132 	    a, (uintmax_t)a->start, (uintmax_t)a->end,
133 	    b, (uintmax_t)b->start, (uintmax_t)b->end));
134 
135 	if (a->end < b->end)
136 		return (-1);
137 	else if (b->end < a->end)
138 		return (1);
139 	return (0);
140 }
141 
142 static void
143 iommu_gas_augment_entry(struct iommu_map_entry *entry)
144 {
145 	struct iommu_map_entry *child;
146 	iommu_gaddr_t free_down;
147 
148 	free_down = 0;
149 	if ((child = RB_LEFT(entry, rb_entry)) != NULL) {
150 		free_down = MAX(free_down, child->free_down);
151 		free_down = MAX(free_down, entry->start - child->last);
152 		entry->first = child->first;
153 	} else
154 		entry->first = entry->start;
155 
156 	if ((child = RB_RIGHT(entry, rb_entry)) != NULL) {
157 		free_down = MAX(free_down, child->free_down);
158 		free_down = MAX(free_down, child->first - entry->end);
159 		entry->last = child->last;
160 	} else
161 		entry->last = entry->end;
162 	entry->free_down = free_down;
163 }
164 
165 RB_GENERATE(iommu_gas_entries_tree, iommu_map_entry, rb_entry,
166     iommu_gas_cmp_entries);
167 
168 #ifdef INVARIANTS
169 static void
170 iommu_gas_check_free(struct iommu_domain *domain)
171 {
172 	struct iommu_map_entry *entry, *l, *r;
173 	iommu_gaddr_t v;
174 
175 	RB_FOREACH(entry, iommu_gas_entries_tree, &domain->rb_root) {
176 		KASSERT(domain == entry->domain,
177 		    ("mismatched free domain %p entry %p entry->domain %p",
178 		    domain, entry, entry->domain));
179 		l = RB_LEFT(entry, rb_entry);
180 		r = RB_RIGHT(entry, rb_entry);
181 		v = 0;
182 		if (l != NULL) {
183 			v = MAX(v, l->free_down);
184 			v = MAX(v, entry->start - l->last);
185 		}
186 		if (r != NULL) {
187 			v = MAX(v, r->free_down);
188 			v = MAX(v, r->first - entry->end);
189 		}
190 		MPASS(entry->free_down == v);
191 	}
192 }
193 #endif
194 
195 static bool
196 iommu_gas_rb_insert(struct iommu_domain *domain, struct iommu_map_entry *entry)
197 {
198 	struct iommu_map_entry *found;
199 
200 	found = RB_INSERT(iommu_gas_entries_tree,
201 	    &domain->rb_root, entry);
202 	return (found == NULL);
203 }
204 
205 static void
206 iommu_gas_rb_remove(struct iommu_domain *domain, struct iommu_map_entry *entry)
207 {
208 
209 	RB_REMOVE(iommu_gas_entries_tree, &domain->rb_root, entry);
210 }
211 
212 struct iommu_domain *
213 iommu_get_ctx_domain(struct iommu_ctx *ctx)
214 {
215 
216 	return (ctx->domain);
217 }
218 
219 void
220 iommu_gas_init_domain(struct iommu_domain *domain)
221 {
222 	struct iommu_map_entry *begin, *end;
223 
224 	begin = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK);
225 	end = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK);
226 
227 	IOMMU_DOMAIN_LOCK(domain);
228 	KASSERT(domain->entries_cnt == 2, ("dirty domain %p", domain));
229 	KASSERT(RB_EMPTY(&domain->rb_root),
230 	    ("non-empty entries %p", domain));
231 
232 	begin->start = 0;
233 	begin->end = IOMMU_PAGE_SIZE;
234 	begin->flags = IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED;
235 	iommu_gas_rb_insert(domain, begin);
236 
237 	end->start = domain->end;
238 	end->end = domain->end;
239 	end->flags = IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED;
240 	iommu_gas_rb_insert(domain, end);
241 
242 	domain->first_place = begin;
243 	domain->last_place = end;
244 	domain->flags |= IOMMU_DOMAIN_GAS_INITED;
245 	IOMMU_DOMAIN_UNLOCK(domain);
246 }
247 
248 void
249 iommu_gas_fini_domain(struct iommu_domain *domain)
250 {
251 	struct iommu_map_entry *entry, *entry1;
252 
253 	IOMMU_DOMAIN_ASSERT_LOCKED(domain);
254 	KASSERT(domain->entries_cnt == 2,
255 	    ("domain still in use %p", domain));
256 
257 	entry = RB_MIN(iommu_gas_entries_tree, &domain->rb_root);
258 	KASSERT(entry->start == 0, ("start entry start %p", domain));
259 	KASSERT(entry->end == IOMMU_PAGE_SIZE, ("start entry end %p", domain));
260 	KASSERT(entry->flags ==
261 	    (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED),
262 	    ("start entry flags %p", domain));
263 	RB_REMOVE(iommu_gas_entries_tree, &domain->rb_root, entry);
264 	iommu_gas_free_entry(domain, entry);
265 
266 	entry = RB_MAX(iommu_gas_entries_tree, &domain->rb_root);
267 	KASSERT(entry->start == domain->end, ("end entry start %p", domain));
268 	KASSERT(entry->end == domain->end, ("end entry end %p", domain));
269 	KASSERT(entry->flags ==
270 	    (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED),
271 	    ("end entry flags %p", domain));
272 	RB_REMOVE(iommu_gas_entries_tree, &domain->rb_root, entry);
273 	iommu_gas_free_entry(domain, entry);
274 
275 	RB_FOREACH_SAFE(entry, iommu_gas_entries_tree, &domain->rb_root,
276 	    entry1) {
277 		KASSERT((entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0,
278 		    ("non-RMRR entry left %p", domain));
279 		RB_REMOVE(iommu_gas_entries_tree, &domain->rb_root,
280 		    entry);
281 		iommu_gas_free_entry(domain, entry);
282 	}
283 }
284 
285 struct iommu_gas_match_args {
286 	struct iommu_domain *domain;
287 	iommu_gaddr_t size;
288 	int offset;
289 	const struct bus_dma_tag_common *common;
290 	u_int gas_flags;
291 	struct iommu_map_entry *entry;
292 };
293 
294 /*
295  * The interval [beg, end) is a free interval between two iommu_map_entries.
296  * maxaddr is an upper bound on addresses that can be allocated. Try to
297  * allocate space in the free interval, subject to the conditions expressed
298  * by a, and return 'true' if and only if the allocation attempt succeeds.
299  */
300 static bool
301 iommu_gas_match_one(struct iommu_gas_match_args *a, iommu_gaddr_t beg,
302     iommu_gaddr_t end, iommu_gaddr_t maxaddr)
303 {
304 	iommu_gaddr_t bs, start;
305 
306 	a->entry->start = roundup2(beg + IOMMU_PAGE_SIZE,
307 	    a->common->alignment);
308 	if (a->entry->start + a->offset + a->size > maxaddr)
309 		return (false);
310 
311 	/* IOMMU_PAGE_SIZE to create gap after new entry. */
312 	if (a->entry->start < beg + IOMMU_PAGE_SIZE ||
313 	    a->entry->start + a->size + a->offset + IOMMU_PAGE_SIZE > end)
314 		return (false);
315 
316 	/* No boundary crossing. */
317 	if (vm_addr_bound_ok(a->entry->start + a->offset, a->size,
318 	    a->common->boundary))
319 		return (true);
320 
321 	/*
322 	 * The start + offset to start + offset + size region crosses
323 	 * the boundary.  Check if there is enough space after the
324 	 * next boundary after the beg.
325 	 */
326 	bs = rounddown2(a->entry->start + a->offset + a->common->boundary,
327 	    a->common->boundary);
328 	start = roundup2(bs, a->common->alignment);
329 	/* IOMMU_PAGE_SIZE to create gap after new entry. */
330 	if (start + a->offset + a->size + IOMMU_PAGE_SIZE <= end &&
331 	    start + a->offset + a->size <= maxaddr &&
332 	    vm_addr_bound_ok(start + a->offset, a->size,
333 	    a->common->boundary)) {
334 		a->entry->start = start;
335 		return (true);
336 	}
337 
338 	/*
339 	 * Not enough space to align at the requested boundary, or
340 	 * boundary is smaller than the size, but allowed to split.
341 	 * We already checked that start + size does not overlap maxaddr.
342 	 *
343 	 * XXXKIB. It is possible that bs is exactly at the start of
344 	 * the next entry, then we do not have gap.  Ignore for now.
345 	 */
346 	if ((a->gas_flags & IOMMU_MF_CANSPLIT) != 0) {
347 		a->size = bs - a->entry->start;
348 		return (true);
349 	}
350 
351 	return (false);
352 }
353 
354 static void
355 iommu_gas_match_insert(struct iommu_gas_match_args *a)
356 {
357 	bool found __diagused;
358 
359 	/*
360 	 * The prev->end is always aligned on the page size, which
361 	 * causes page alignment for the entry->start too.  The size
362 	 * is checked to be multiple of the page size.
363 	 *
364 	 * The page sized gap is created between consequent
365 	 * allocations to ensure that out-of-bounds accesses fault.
366 	 */
367 	a->entry->end = a->entry->start + a->size;
368 
369 	found = iommu_gas_rb_insert(a->domain, a->entry);
370 	KASSERT(found, ("found dup %p start %jx size %jx",
371 	    a->domain, (uintmax_t)a->entry->start, (uintmax_t)a->size));
372 	a->entry->flags = IOMMU_MAP_ENTRY_MAP;
373 }
374 
375 static int
376 iommu_gas_lowermatch(struct iommu_gas_match_args *a, struct iommu_map_entry *entry)
377 {
378 	struct iommu_map_entry *child;
379 
380 	child = RB_RIGHT(entry, rb_entry);
381 	if (child != NULL && entry->end < a->common->lowaddr &&
382 	    iommu_gas_match_one(a, entry->end, child->first,
383 	    a->common->lowaddr)) {
384 		iommu_gas_match_insert(a);
385 		return (0);
386 	}
387 	if (entry->free_down < a->size + a->offset + IOMMU_PAGE_SIZE)
388 		return (ENOMEM);
389 	if (entry->first >= a->common->lowaddr)
390 		return (ENOMEM);
391 	child = RB_LEFT(entry, rb_entry);
392 	if (child != NULL && 0 == iommu_gas_lowermatch(a, child))
393 		return (0);
394 	if (child != NULL && child->last < a->common->lowaddr &&
395 	    iommu_gas_match_one(a, child->last, entry->start,
396 	    a->common->lowaddr)) {
397 		iommu_gas_match_insert(a);
398 		return (0);
399 	}
400 	child = RB_RIGHT(entry, rb_entry);
401 	if (child != NULL && 0 == iommu_gas_lowermatch(a, child))
402 		return (0);
403 	return (ENOMEM);
404 }
405 
406 static int
407 iommu_gas_uppermatch(struct iommu_gas_match_args *a, struct iommu_map_entry *entry)
408 {
409 	struct iommu_map_entry *child;
410 
411 	if (entry->free_down < a->size + a->offset + IOMMU_PAGE_SIZE)
412 		return (ENOMEM);
413 	if (entry->last < a->common->highaddr)
414 		return (ENOMEM);
415 	child = RB_LEFT(entry, rb_entry);
416 	if (child != NULL && 0 == iommu_gas_uppermatch(a, child))
417 		return (0);
418 	if (child != NULL && child->last >= a->common->highaddr &&
419 	    iommu_gas_match_one(a, child->last, entry->start,
420 	    a->domain->end)) {
421 		iommu_gas_match_insert(a);
422 		return (0);
423 	}
424 	child = RB_RIGHT(entry, rb_entry);
425 	if (child != NULL && entry->end >= a->common->highaddr &&
426 	    iommu_gas_match_one(a, entry->end, child->first,
427 	    a->domain->end)) {
428 		iommu_gas_match_insert(a);
429 		return (0);
430 	}
431 	if (child != NULL && 0 == iommu_gas_uppermatch(a, child))
432 		return (0);
433 	return (ENOMEM);
434 }
435 
436 static int
437 iommu_gas_find_space(struct iommu_domain *domain,
438     const struct bus_dma_tag_common *common, iommu_gaddr_t size,
439     int offset, u_int flags, struct iommu_map_entry *entry)
440 {
441 	struct iommu_gas_match_args a;
442 	int error;
443 
444 	IOMMU_DOMAIN_ASSERT_LOCKED(domain);
445 	KASSERT(entry->flags == 0, ("dirty entry %p %p", domain, entry));
446 	KASSERT((size & IOMMU_PAGE_MASK) == 0, ("size %jx", (uintmax_t)size));
447 
448 	a.domain = domain;
449 	a.size = size;
450 	a.offset = offset;
451 	a.common = common;
452 	a.gas_flags = flags;
453 	a.entry = entry;
454 
455 	/* Handle lower region. */
456 	if (common->lowaddr > 0) {
457 		error = iommu_gas_lowermatch(&a,
458 		    RB_ROOT(&domain->rb_root));
459 		if (error == 0)
460 			return (0);
461 		KASSERT(error == ENOMEM,
462 		    ("error %d from iommu_gas_lowermatch", error));
463 	}
464 	/* Handle upper region. */
465 	if (common->highaddr >= domain->end)
466 		return (ENOMEM);
467 	error = iommu_gas_uppermatch(&a, RB_ROOT(&domain->rb_root));
468 	KASSERT(error == ENOMEM,
469 	    ("error %d from iommu_gas_uppermatch", error));
470 	return (error);
471 }
472 
473 static int
474 iommu_gas_alloc_region(struct iommu_domain *domain, struct iommu_map_entry *entry,
475     u_int flags)
476 {
477 	struct iommu_map_entry *next, *prev;
478 	bool found __diagused;
479 
480 	IOMMU_DOMAIN_ASSERT_LOCKED(domain);
481 
482 	if ((entry->start & IOMMU_PAGE_MASK) != 0 ||
483 	    (entry->end & IOMMU_PAGE_MASK) != 0)
484 		return (EINVAL);
485 	if (entry->start >= entry->end)
486 		return (EINVAL);
487 	if (entry->end >= domain->end)
488 		return (EINVAL);
489 
490 	next = RB_NFIND(iommu_gas_entries_tree, &domain->rb_root, entry);
491 	KASSERT(next != NULL, ("next must be non-null %p %jx", domain,
492 	    (uintmax_t)entry->start));
493 	prev = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, next);
494 	/* prev could be NULL */
495 
496 	/*
497 	 * Adapt to broken BIOSes which specify overlapping RMRR
498 	 * entries.
499 	 *
500 	 * XXXKIB: this does not handle a case when prev or next
501 	 * entries are completely covered by the current one, which
502 	 * extends both ways.
503 	 */
504 	if (prev != NULL && prev->end > entry->start &&
505 	    (prev->flags & IOMMU_MAP_ENTRY_PLACE) == 0) {
506 		if ((flags & IOMMU_MF_RMRR) == 0 ||
507 		    (prev->flags & IOMMU_MAP_ENTRY_RMRR) == 0)
508 			return (EBUSY);
509 		entry->start = prev->end;
510 	}
511 	if (next->start < entry->end &&
512 	    (next->flags & IOMMU_MAP_ENTRY_PLACE) == 0) {
513 		if ((flags & IOMMU_MF_RMRR) == 0 ||
514 		    (next->flags & IOMMU_MAP_ENTRY_RMRR) == 0)
515 			return (EBUSY);
516 		entry->end = next->start;
517 	}
518 	if (entry->end == entry->start)
519 		return (0);
520 
521 	if (prev != NULL && prev->end > entry->start) {
522 		/* This assumes that prev is the placeholder entry. */
523 		iommu_gas_rb_remove(domain, prev);
524 		prev = NULL;
525 	}
526 	if (next->start < entry->end) {
527 		iommu_gas_rb_remove(domain, next);
528 		next = NULL;
529 	}
530 
531 	found = iommu_gas_rb_insert(domain, entry);
532 	KASSERT(found, ("found RMRR dup %p start %jx end %jx",
533 	    domain, (uintmax_t)entry->start, (uintmax_t)entry->end));
534 	if ((flags & IOMMU_MF_RMRR) != 0)
535 		entry->flags = IOMMU_MAP_ENTRY_RMRR;
536 
537 #ifdef INVARIANTS
538 	struct iommu_map_entry *ip, *in;
539 	ip = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, entry);
540 	in = RB_NEXT(iommu_gas_entries_tree, &domain->rb_root, entry);
541 	KASSERT(prev == NULL || ip == prev,
542 	    ("RMRR %p (%jx %jx) prev %p (%jx %jx) ins prev %p (%jx %jx)",
543 	    entry, entry->start, entry->end, prev,
544 	    prev == NULL ? 0 : prev->start, prev == NULL ? 0 : prev->end,
545 	    ip, ip == NULL ? 0 : ip->start, ip == NULL ? 0 : ip->end));
546 	KASSERT(next == NULL || in == next,
547 	    ("RMRR %p (%jx %jx) next %p (%jx %jx) ins next %p (%jx %jx)",
548 	    entry, entry->start, entry->end, next,
549 	    next == NULL ? 0 : next->start, next == NULL ? 0 : next->end,
550 	    in, in == NULL ? 0 : in->start, in == NULL ? 0 : in->end));
551 #endif
552 
553 	return (0);
554 }
555 
556 void
557 iommu_gas_free_space(struct iommu_domain *domain, struct iommu_map_entry *entry)
558 {
559 
560 	IOMMU_DOMAIN_ASSERT_LOCKED(domain);
561 	KASSERT((entry->flags & (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_RMRR |
562 	    IOMMU_MAP_ENTRY_MAP)) == IOMMU_MAP_ENTRY_MAP,
563 	    ("permanent entry %p %p", domain, entry));
564 
565 	iommu_gas_rb_remove(domain, entry);
566 	entry->flags &= ~IOMMU_MAP_ENTRY_MAP;
567 #ifdef INVARIANTS
568 	if (iommu_check_free)
569 		iommu_gas_check_free(domain);
570 #endif
571 }
572 
573 void
574 iommu_gas_free_region(struct iommu_domain *domain, struct iommu_map_entry *entry)
575 {
576 	struct iommu_map_entry *next, *prev;
577 
578 	IOMMU_DOMAIN_ASSERT_LOCKED(domain);
579 	KASSERT((entry->flags & (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_RMRR |
580 	    IOMMU_MAP_ENTRY_MAP)) == IOMMU_MAP_ENTRY_RMRR,
581 	    ("non-RMRR entry %p %p", domain, entry));
582 
583 	prev = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, entry);
584 	next = RB_NEXT(iommu_gas_entries_tree, &domain->rb_root, entry);
585 	iommu_gas_rb_remove(domain, entry);
586 	entry->flags &= ~IOMMU_MAP_ENTRY_RMRR;
587 
588 	if (prev == NULL)
589 		iommu_gas_rb_insert(domain, domain->first_place);
590 	if (next == NULL)
591 		iommu_gas_rb_insert(domain, domain->last_place);
592 }
593 
594 int
595 iommu_gas_map(struct iommu_domain *domain,
596     const struct bus_dma_tag_common *common, iommu_gaddr_t size, int offset,
597     u_int eflags, u_int flags, vm_page_t *ma, struct iommu_map_entry **res)
598 {
599 	struct iommu_map_entry *entry;
600 	int error;
601 
602 	KASSERT((flags & ~(IOMMU_MF_CANWAIT | IOMMU_MF_CANSPLIT)) == 0,
603 	    ("invalid flags 0x%x", flags));
604 
605 	entry = iommu_gas_alloc_entry(domain,
606 	    (flags & IOMMU_MF_CANWAIT) != 0 ?  IOMMU_PGF_WAITOK : 0);
607 	if (entry == NULL)
608 		return (ENOMEM);
609 	IOMMU_DOMAIN_LOCK(domain);
610 	error = iommu_gas_find_space(domain, common, size, offset, flags,
611 	    entry);
612 	if (error == ENOMEM) {
613 		IOMMU_DOMAIN_UNLOCK(domain);
614 		iommu_gas_free_entry(domain, entry);
615 		return (error);
616 	}
617 #ifdef INVARIANTS
618 	if (iommu_check_free)
619 		iommu_gas_check_free(domain);
620 #endif
621 	KASSERT(error == 0,
622 	    ("unexpected error %d from iommu_gas_find_entry", error));
623 	KASSERT(entry->end < domain->end, ("allocated GPA %jx, max GPA %jx",
624 	    (uintmax_t)entry->end, (uintmax_t)domain->end));
625 	entry->flags |= eflags;
626 	IOMMU_DOMAIN_UNLOCK(domain);
627 
628 	error = domain->ops->map(domain, entry->start,
629 	    entry->end - entry->start, ma, eflags,
630 	    ((flags & IOMMU_MF_CANWAIT) != 0 ?  IOMMU_PGF_WAITOK : 0));
631 	if (error == ENOMEM) {
632 		iommu_domain_unload_entry(entry, true);
633 		return (error);
634 	}
635 	KASSERT(error == 0,
636 	    ("unexpected error %d from domain_map_buf", error));
637 
638 	*res = entry;
639 	return (0);
640 }
641 
642 int
643 iommu_gas_map_region(struct iommu_domain *domain, struct iommu_map_entry *entry,
644     u_int eflags, u_int flags, vm_page_t *ma)
645 {
646 	iommu_gaddr_t start;
647 	int error;
648 
649 	KASSERT(entry->flags == 0, ("used RMRR entry %p %p %x", domain,
650 	    entry, entry->flags));
651 	KASSERT((flags & ~(IOMMU_MF_CANWAIT | IOMMU_MF_RMRR)) == 0,
652 	    ("invalid flags 0x%x", flags));
653 
654 	start = entry->start;
655 	IOMMU_DOMAIN_LOCK(domain);
656 	error = iommu_gas_alloc_region(domain, entry, flags);
657 	if (error != 0) {
658 		IOMMU_DOMAIN_UNLOCK(domain);
659 		return (error);
660 	}
661 	entry->flags |= eflags;
662 	IOMMU_DOMAIN_UNLOCK(domain);
663 	if (entry->end == entry->start)
664 		return (0);
665 
666 	error = domain->ops->map(domain, entry->start,
667 	    entry->end - entry->start, ma + OFF_TO_IDX(start - entry->start),
668 	    eflags, ((flags & IOMMU_MF_CANWAIT) != 0 ? IOMMU_PGF_WAITOK : 0));
669 	if (error == ENOMEM) {
670 		iommu_domain_unload_entry(entry, false);
671 		return (error);
672 	}
673 	KASSERT(error == 0,
674 	    ("unexpected error %d from domain_map_buf", error));
675 
676 	return (0);
677 }
678 
679 static int
680 iommu_gas_reserve_region_locked(struct iommu_domain *domain,
681     iommu_gaddr_t start, iommu_gaddr_t end, struct iommu_map_entry *entry)
682 {
683 	int error;
684 
685 	IOMMU_DOMAIN_ASSERT_LOCKED(domain);
686 
687 	entry->start = start;
688 	entry->end = end;
689 	error = iommu_gas_alloc_region(domain, entry, IOMMU_MF_CANWAIT);
690 	if (error == 0)
691 		entry->flags |= IOMMU_MAP_ENTRY_UNMAPPED;
692 	return (error);
693 }
694 
695 int
696 iommu_gas_reserve_region(struct iommu_domain *domain, iommu_gaddr_t start,
697     iommu_gaddr_t end, struct iommu_map_entry **entry0)
698 {
699 	struct iommu_map_entry *entry;
700 	int error;
701 
702 	entry = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK);
703 	IOMMU_DOMAIN_LOCK(domain);
704 	error = iommu_gas_reserve_region_locked(domain, start, end, entry);
705 	IOMMU_DOMAIN_UNLOCK(domain);
706 	if (error != 0)
707 		iommu_gas_free_entry(domain, entry);
708 	else if (entry0 != NULL)
709 		*entry0 = entry;
710 	return (error);
711 }
712 
713 /*
714  * As in iommu_gas_reserve_region, reserve [start, end), but allow for existing
715  * entries.
716  */
717 int
718 iommu_gas_reserve_region_extend(struct iommu_domain *domain,
719     iommu_gaddr_t start, iommu_gaddr_t end)
720 {
721 	struct iommu_map_entry *entry, *next, *prev, key = {};
722 	iommu_gaddr_t entry_start, entry_end;
723 	int error;
724 
725 	error = 0;
726 	entry = NULL;
727 	end = ummin(end, domain->end);
728 	while (start < end) {
729 		/* Preallocate an entry. */
730 		if (entry == NULL)
731 			entry = iommu_gas_alloc_entry(domain,
732 			    IOMMU_PGF_WAITOK);
733 		/* Calculate the free region from here to the next entry. */
734 		key.start = key.end = start;
735 		IOMMU_DOMAIN_LOCK(domain);
736 		next = RB_NFIND(iommu_gas_entries_tree, &domain->rb_root, &key);
737 		KASSERT(next != NULL, ("domain %p with end %#jx has no entry "
738 		    "after %#jx", domain, (uintmax_t)domain->end,
739 		    (uintmax_t)start));
740 		entry_end = ummin(end, next->start);
741 		prev = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, next);
742 		if (prev != NULL)
743 			entry_start = ummax(start, prev->end);
744 		else
745 			entry_start = start;
746 		start = next->end;
747 		/* Reserve the region if non-empty. */
748 		if (entry_start != entry_end) {
749 			error = iommu_gas_reserve_region_locked(domain,
750 			    entry_start, entry_end, entry);
751 			if (error != 0)
752 				break;
753 			entry = NULL;
754 		}
755 		IOMMU_DOMAIN_UNLOCK(domain);
756 	}
757 	/* Release a preallocated entry if it was not used. */
758 	if (entry != NULL)
759 		iommu_gas_free_entry(domain, entry);
760 	return (error);
761 }
762 
763 struct iommu_map_entry *
764 iommu_map_alloc_entry(struct iommu_domain *domain, u_int flags)
765 {
766 	struct iommu_map_entry *res;
767 
768 	res = iommu_gas_alloc_entry(domain, flags);
769 
770 	return (res);
771 }
772 
773 void
774 iommu_map_free_entry(struct iommu_domain *domain, struct iommu_map_entry *entry)
775 {
776 
777 	iommu_gas_free_entry(domain, entry);
778 }
779 
780 int
781 iommu_map(struct iommu_domain *domain,
782     const struct bus_dma_tag_common *common, iommu_gaddr_t size, int offset,
783     u_int eflags, u_int flags, vm_page_t *ma, struct iommu_map_entry **res)
784 {
785 	int error;
786 
787 	error = iommu_gas_map(domain, common, size, offset, eflags, flags,
788 	    ma, res);
789 
790 	return (error);
791 }
792 
793 void
794 iommu_unmap_msi(struct iommu_ctx *ctx)
795 {
796 	struct iommu_map_entry *entry;
797 	struct iommu_domain *domain;
798 
799 	domain = ctx->domain;
800 	entry = domain->msi_entry;
801 	if (entry == NULL)
802 		return;
803 
804 	domain->ops->unmap(domain, entry->start, entry->end -
805 	    entry->start, IOMMU_PGF_WAITOK);
806 
807 	IOMMU_DOMAIN_LOCK(domain);
808 	iommu_gas_free_space(domain, entry);
809 	IOMMU_DOMAIN_UNLOCK(domain);
810 
811 	iommu_gas_free_entry(domain, entry);
812 
813 	domain->msi_entry = NULL;
814 	domain->msi_base = 0;
815 	domain->msi_phys = 0;
816 }
817 
818 int
819 iommu_map_msi(struct iommu_ctx *ctx, iommu_gaddr_t size, int offset,
820     u_int eflags, u_int flags, vm_page_t *ma)
821 {
822 	struct iommu_domain *domain;
823 	struct iommu_map_entry *entry;
824 	int error;
825 
826 	error = 0;
827 	domain = ctx->domain;
828 
829 	/* Check if there is already an MSI page allocated */
830 	IOMMU_DOMAIN_LOCK(domain);
831 	entry = domain->msi_entry;
832 	IOMMU_DOMAIN_UNLOCK(domain);
833 
834 	if (entry == NULL) {
835 		error = iommu_gas_map(domain, &ctx->tag->common, size, offset,
836 		    eflags, flags, ma, &entry);
837 		IOMMU_DOMAIN_LOCK(domain);
838 		if (error == 0) {
839 			if (domain->msi_entry == NULL) {
840 				MPASS(domain->msi_base == 0);
841 				MPASS(domain->msi_phys == 0);
842 
843 				domain->msi_entry = entry;
844 				domain->msi_base = entry->start;
845 				domain->msi_phys = VM_PAGE_TO_PHYS(ma[0]);
846 			} else {
847 				/*
848 				 * We lost the race and already have an
849 				 * MSI page allocated. Free the unneeded entry.
850 				 */
851 				iommu_gas_free_entry(domain, entry);
852 			}
853 		} else if (domain->msi_entry != NULL) {
854 			/*
855 			 * The allocation failed, but another succeeded.
856 			 * Return success as there is a valid MSI page.
857 			 */
858 			error = 0;
859 		}
860 		IOMMU_DOMAIN_UNLOCK(domain);
861 	}
862 
863 	return (error);
864 }
865 
866 void
867 iommu_translate_msi(struct iommu_domain *domain, uint64_t *addr)
868 {
869 
870 	*addr = (*addr - domain->msi_phys) + domain->msi_base;
871 
872 	KASSERT(*addr >= domain->msi_entry->start,
873 	    ("%s: Address is below the MSI entry start address (%jx < %jx)",
874 	    __func__, (uintmax_t)*addr, (uintmax_t)domain->msi_entry->start));
875 
876 	KASSERT(*addr + sizeof(*addr) <= domain->msi_entry->end,
877 	    ("%s: Address is above the MSI entry end address (%jx < %jx)",
878 	    __func__, (uintmax_t)*addr, (uintmax_t)domain->msi_entry->end));
879 }
880 
881 int
882 iommu_map_region(struct iommu_domain *domain, struct iommu_map_entry *entry,
883     u_int eflags, u_int flags, vm_page_t *ma)
884 {
885 	int error;
886 
887 	error = iommu_gas_map_region(domain, entry, eflags, flags, ma);
888 
889 	return (error);
890 }
891 
892 SYSCTL_NODE(_hw, OID_AUTO, iommu, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, "");
893 
894 #ifdef INVARIANTS
895 SYSCTL_INT(_hw_iommu, OID_AUTO, check_free, CTLFLAG_RWTUN,
896     &iommu_check_free, 0,
897     "Check the GPA RBtree for free_down and free_after validity");
898 #endif
899