xref: /freebsd/sys/dev/iommu/iommu_gas.c (revision b197d4b893974c9eb4d7b38704c6d5c486235d6f)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2013 The FreeBSD Foundation
5  *
6  * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
7  * under sponsorship from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #define	RB_AUGMENT(entry) iommu_gas_augment_entry(entry)
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/malloc.h>
39 #include <sys/bus.h>
40 #include <sys/interrupt.h>
41 #include <sys/kernel.h>
42 #include <sys/ktr.h>
43 #include <sys/lock.h>
44 #include <sys/proc.h>
45 #include <sys/rwlock.h>
46 #include <sys/memdesc.h>
47 #include <sys/mutex.h>
48 #include <sys/sysctl.h>
49 #include <sys/rman.h>
50 #include <sys/taskqueue.h>
51 #include <sys/tree.h>
52 #include <sys/uio.h>
53 #include <sys/vmem.h>
54 #include <vm/vm.h>
55 #include <vm/vm_extern.h>
56 #include <vm/vm_kern.h>
57 #include <vm/vm_object.h>
58 #include <vm/vm_page.h>
59 #include <vm/vm_map.h>
60 #include <vm/uma.h>
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
63 #include <dev/iommu/iommu.h>
64 #include <dev/iommu/iommu_gas.h>
65 #include <dev/iommu/iommu_msi.h>
66 #include <machine/atomic.h>
67 #include <machine/bus.h>
68 #include <machine/md_var.h>
69 #include <machine/iommu.h>
70 #include <dev/iommu/busdma_iommu.h>
71 
72 /*
73  * Guest Address Space management.
74  */
75 
76 static uma_zone_t iommu_map_entry_zone;
77 
78 #ifdef INVARIANTS
79 static int iommu_check_free;
80 #endif
81 
82 static void
83 intel_gas_init(void)
84 {
85 
86 	iommu_map_entry_zone = uma_zcreate("IOMMU_MAP_ENTRY",
87 	    sizeof(struct iommu_map_entry), NULL, NULL,
88 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NODUMP);
89 }
90 SYSINIT(intel_gas, SI_SUB_DRIVERS, SI_ORDER_FIRST, intel_gas_init, NULL);
91 
92 struct iommu_map_entry *
93 iommu_gas_alloc_entry(struct iommu_domain *domain, u_int flags)
94 {
95 	struct iommu_map_entry *res;
96 
97 	KASSERT((flags & ~(IOMMU_PGF_WAITOK)) == 0,
98 	    ("unsupported flags %x", flags));
99 
100 	res = uma_zalloc(iommu_map_entry_zone, ((flags & IOMMU_PGF_WAITOK) !=
101 	    0 ? M_WAITOK : M_NOWAIT) | M_ZERO);
102 	if (res != NULL && domain != NULL) {
103 		res->domain = domain;
104 		atomic_add_int(&domain->entries_cnt, 1);
105 	}
106 	return (res);
107 }
108 
109 void
110 iommu_gas_free_entry(struct iommu_map_entry *entry)
111 {
112 	struct iommu_domain *domain;
113 
114 	domain = entry->domain;
115 	if (domain != NULL)
116 		atomic_subtract_int(&domain->entries_cnt, 1);
117 	uma_zfree(iommu_map_entry_zone, entry);
118 }
119 
120 static int
121 iommu_gas_cmp_entries(struct iommu_map_entry *a, struct iommu_map_entry *b)
122 {
123 
124 	/* Last entry have zero size, so <= */
125 	KASSERT(a->start <= a->end, ("inverted entry %p (%jx, %jx)",
126 	    a, (uintmax_t)a->start, (uintmax_t)a->end));
127 	KASSERT(b->start <= b->end, ("inverted entry %p (%jx, %jx)",
128 	    b, (uintmax_t)b->start, (uintmax_t)b->end));
129 	KASSERT(a->end <= b->start || b->end <= a->start ||
130 	    a->end == a->start || b->end == b->start,
131 	    ("overlapping entries %p (%jx, %jx) %p (%jx, %jx)",
132 	    a, (uintmax_t)a->start, (uintmax_t)a->end,
133 	    b, (uintmax_t)b->start, (uintmax_t)b->end));
134 
135 	if (a->end < b->end)
136 		return (-1);
137 	else if (b->end < a->end)
138 		return (1);
139 	return (0);
140 }
141 
142 static void
143 iommu_gas_augment_entry(struct iommu_map_entry *entry)
144 {
145 	struct iommu_map_entry *child;
146 	iommu_gaddr_t free_down;
147 
148 	free_down = 0;
149 	if ((child = RB_LEFT(entry, rb_entry)) != NULL) {
150 		free_down = MAX(free_down, child->free_down);
151 		free_down = MAX(free_down, entry->start - child->last);
152 		entry->first = child->first;
153 	} else
154 		entry->first = entry->start;
155 
156 	if ((child = RB_RIGHT(entry, rb_entry)) != NULL) {
157 		free_down = MAX(free_down, child->free_down);
158 		free_down = MAX(free_down, child->first - entry->end);
159 		entry->last = child->last;
160 	} else
161 		entry->last = entry->end;
162 	entry->free_down = free_down;
163 }
164 
165 RB_GENERATE(iommu_gas_entries_tree, iommu_map_entry, rb_entry,
166     iommu_gas_cmp_entries);
167 
168 #ifdef INVARIANTS
169 static void
170 iommu_gas_check_free(struct iommu_domain *domain)
171 {
172 	struct iommu_map_entry *entry, *l, *r;
173 	iommu_gaddr_t v;
174 
175 	RB_FOREACH(entry, iommu_gas_entries_tree, &domain->rb_root) {
176 		KASSERT(domain == entry->domain,
177 		    ("mismatched free domain %p entry %p entry->domain %p",
178 		    domain, entry, entry->domain));
179 		l = RB_LEFT(entry, rb_entry);
180 		r = RB_RIGHT(entry, rb_entry);
181 		v = 0;
182 		if (l != NULL) {
183 			v = MAX(v, l->free_down);
184 			v = MAX(v, entry->start - l->last);
185 		}
186 		if (r != NULL) {
187 			v = MAX(v, r->free_down);
188 			v = MAX(v, r->first - entry->end);
189 		}
190 		MPASS(entry->free_down == v);
191 	}
192 }
193 #endif
194 
195 static bool
196 iommu_gas_rb_insert(struct iommu_domain *domain, struct iommu_map_entry *entry)
197 {
198 	struct iommu_map_entry *found;
199 
200 	found = RB_INSERT(iommu_gas_entries_tree, &domain->rb_root, entry);
201 	return (found == NULL);
202 }
203 
204 static void
205 iommu_gas_rb_remove(struct iommu_domain *domain, struct iommu_map_entry *entry)
206 {
207 
208 	RB_REMOVE(iommu_gas_entries_tree, &domain->rb_root, entry);
209 }
210 
211 struct iommu_domain *
212 iommu_get_ctx_domain(struct iommu_ctx *ctx)
213 {
214 
215 	return (ctx->domain);
216 }
217 
218 void
219 iommu_gas_init_domain(struct iommu_domain *domain)
220 {
221 	struct iommu_map_entry *begin, *end;
222 
223 	begin = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK);
224 	end = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK);
225 
226 	IOMMU_DOMAIN_LOCK(domain);
227 	KASSERT(domain->entries_cnt == 2, ("dirty domain %p", domain));
228 	KASSERT(RB_EMPTY(&domain->rb_root),
229 	    ("non-empty entries %p", domain));
230 
231 	begin->start = 0;
232 	begin->end = IOMMU_PAGE_SIZE;
233 	begin->flags = IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED;
234 	iommu_gas_rb_insert(domain, begin);
235 
236 	end->start = domain->end;
237 	end->end = domain->end;
238 	end->flags = IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED;
239 	iommu_gas_rb_insert(domain, end);
240 
241 	domain->first_place = begin;
242 	domain->last_place = end;
243 	domain->flags |= IOMMU_DOMAIN_GAS_INITED;
244 	IOMMU_DOMAIN_UNLOCK(domain);
245 }
246 
247 void
248 iommu_gas_fini_domain(struct iommu_domain *domain)
249 {
250 	struct iommu_map_entry *entry, *entry1;
251 
252 	IOMMU_DOMAIN_ASSERT_LOCKED(domain);
253 	KASSERT(domain->entries_cnt == 2,
254 	    ("domain still in use %p", domain));
255 
256 	entry = RB_MIN(iommu_gas_entries_tree, &domain->rb_root);
257 	KASSERT(entry->start == 0, ("start entry start %p", domain));
258 	KASSERT(entry->end == IOMMU_PAGE_SIZE, ("start entry end %p", domain));
259 	KASSERT(entry->flags ==
260 	    (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED),
261 	    ("start entry flags %p", domain));
262 	RB_REMOVE(iommu_gas_entries_tree, &domain->rb_root, entry);
263 	iommu_gas_free_entry(entry);
264 
265 	entry = RB_MAX(iommu_gas_entries_tree, &domain->rb_root);
266 	KASSERT(entry->start == domain->end, ("end entry start %p", domain));
267 	KASSERT(entry->end == domain->end, ("end entry end %p", domain));
268 	KASSERT(entry->flags ==
269 	    (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED),
270 	    ("end entry flags %p", domain));
271 	RB_REMOVE(iommu_gas_entries_tree, &domain->rb_root, entry);
272 	iommu_gas_free_entry(entry);
273 
274 	RB_FOREACH_SAFE(entry, iommu_gas_entries_tree, &domain->rb_root,
275 	    entry1) {
276 		KASSERT((entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0,
277 		    ("non-RMRR entry left %p", domain));
278 		RB_REMOVE(iommu_gas_entries_tree, &domain->rb_root,
279 		    entry);
280 		iommu_gas_free_entry(entry);
281 	}
282 }
283 
284 struct iommu_gas_match_args {
285 	struct iommu_domain *domain;
286 	iommu_gaddr_t size;
287 	int offset;
288 	const struct bus_dma_tag_common *common;
289 	u_int gas_flags;
290 	struct iommu_map_entry *entry;
291 };
292 
293 /*
294  * The interval [beg, end) is a free interval between two iommu_map_entries.
295  * Addresses can be allocated only in the range [lbound, ubound). Try to
296  * allocate space in the free interval, subject to the conditions expressed by
297  * a, and return 'true' if and only if the allocation attempt succeeds.
298  */
299 static bool
300 iommu_gas_match_one(struct iommu_gas_match_args *a, iommu_gaddr_t beg,
301     iommu_gaddr_t end, iommu_gaddr_t lbound, iommu_gaddr_t ubound)
302 {
303 	struct iommu_map_entry *entry;
304 	iommu_gaddr_t first, size, start;
305 	bool found __diagused;
306 	int offset;
307 
308 	/*
309 	 * The prev->end is always aligned on the page size, which
310 	 * causes page alignment for the entry->start too.
311 	 *
312 	 * Create IOMMU_PAGE_SIZE gaps before, after new entry
313 	 * to ensure that out-of-bounds accesses fault.
314 	 */
315 	beg = MAX(beg + IOMMU_PAGE_SIZE, lbound);
316 	start = roundup2(beg, a->common->alignment);
317 	if (start < beg)
318 		return (false);
319 	end = MIN(end - IOMMU_PAGE_SIZE, ubound);
320 	offset = a->offset;
321 	size = a->size;
322 	if (start + offset + size > end)
323 		return (false);
324 
325 	/* Check for and try to skip past boundary crossing. */
326 	if (!vm_addr_bound_ok(start + offset, size, a->common->boundary)) {
327 		/*
328 		 * The start + offset to start + offset + size region crosses
329 		 * the boundary.  Check if there is enough space after the next
330 		 * boundary after the beg.
331 		 */
332 		first = start;
333 		beg = roundup2(start + offset + 1, a->common->boundary);
334 		start = roundup2(beg, a->common->alignment);
335 
336 		if (start + offset + size > end ||
337 		    !vm_addr_bound_ok(start + offset, size,
338 		    a->common->boundary)) {
339 			/*
340 			 * Not enough space to align at the requested boundary,
341 			 * or boundary is smaller than the size, but allowed to
342 			 * split.  We already checked that start + size does not
343 			 * overlap ubound.
344 			 *
345 			 * XXXKIB. It is possible that beg is exactly at the
346 			 * start of the next entry, then we do not have gap.
347 			 * Ignore for now.
348 			 */
349 			if ((a->gas_flags & IOMMU_MF_CANSPLIT) == 0)
350 				return (false);
351 			size = beg - first - offset;
352 			start = first;
353 		}
354 	}
355 	entry = a->entry;
356 	entry->start = start;
357 	entry->end = start + roundup2(size + offset, IOMMU_PAGE_SIZE);
358 	entry->flags = IOMMU_MAP_ENTRY_MAP;
359 	found = iommu_gas_rb_insert(a->domain, entry);
360 	KASSERT(found, ("found dup %p start %jx size %jx",
361 	    a->domain, (uintmax_t)start, (uintmax_t)size));
362 	return (true);
363 }
364 
365 /* Find the next entry that might abut a big-enough range. */
366 static struct iommu_map_entry *
367 iommu_gas_next(struct iommu_map_entry *curr, iommu_gaddr_t min_free)
368 {
369 	struct iommu_map_entry *next;
370 
371 	if ((next = RB_RIGHT(curr, rb_entry)) != NULL &&
372 	    next->free_down >= min_free) {
373 		/* Find next entry in right subtree. */
374 		do
375 			curr = next;
376 		while ((next = RB_LEFT(curr, rb_entry)) != NULL &&
377 		    next->free_down >= min_free);
378 	} else {
379 		/* Find next entry in a left-parent ancestor. */
380 		while ((next = RB_PARENT(curr, rb_entry)) != NULL &&
381 		    curr == RB_RIGHT(next, rb_entry))
382 			curr = next;
383 		curr = next;
384 	}
385 	return (curr);
386 }
387 
388 static int
389 iommu_gas_find_space(struct iommu_gas_match_args *a)
390 {
391 	struct iommu_domain *domain;
392 	struct iommu_map_entry *curr, *first;
393 	iommu_gaddr_t addr, min_free;
394 
395 	IOMMU_DOMAIN_ASSERT_LOCKED(a->domain);
396 	KASSERT(a->entry->flags == 0,
397 	    ("dirty entry %p %p", a->domain, a->entry));
398 
399 	/*
400 	 * If the subtree doesn't have free space for the requested allocation
401 	 * plus two guard pages, skip it.
402 	 */
403 	min_free = 2 * IOMMU_PAGE_SIZE +
404 	    roundup2(a->size + a->offset, IOMMU_PAGE_SIZE);
405 
406 	/*
407 	 * Find the first entry in the lower region that could abut a big-enough
408 	 * range.
409 	 */
410 	curr = RB_ROOT(&a->domain->rb_root);
411 	first = NULL;
412 	while (curr != NULL && curr->free_down >= min_free) {
413 		first = curr;
414 		curr = RB_LEFT(curr, rb_entry);
415 	}
416 
417 	/*
418 	 * Walk the big-enough ranges until one satisfies alignment
419 	 * requirements, or violates lowaddr address requirement.
420 	 */
421 	addr = a->common->lowaddr + 1;
422 	for (curr = first; curr != NULL;
423 	    curr = iommu_gas_next(curr, min_free)) {
424 		if ((first = RB_LEFT(curr, rb_entry)) != NULL &&
425 		    iommu_gas_match_one(a, first->last, curr->start,
426 		    0, addr))
427 			return (0);
428 		if (curr->end >= addr) {
429 			/* All remaining ranges >= addr */
430 			break;
431 		}
432 		if ((first = RB_RIGHT(curr, rb_entry)) != NULL &&
433 		    iommu_gas_match_one(a, curr->end, first->first,
434 		    0, addr))
435 			return (0);
436 	}
437 
438 	/*
439 	 * To resume the search at the start of the upper region, first climb to
440 	 * the nearest ancestor that spans highaddr.  Then find the last entry
441 	 * before highaddr that could abut a big-enough range.
442 	 */
443 	addr = a->common->highaddr;
444 	while (curr != NULL && curr->last < addr)
445 		curr = RB_PARENT(curr, rb_entry);
446 	first = NULL;
447 	while (curr != NULL && curr->free_down >= min_free) {
448 		if (addr < curr->end)
449 			curr = RB_LEFT(curr, rb_entry);
450 		else {
451 			first = curr;
452 			curr = RB_RIGHT(curr, rb_entry);
453 		}
454 	}
455 
456 	/*
457 	 * Walk the remaining big-enough ranges until one satisfies alignment
458 	 * requirements.
459 	 */
460 	domain = a->domain;
461 	for (curr = first; curr != NULL;
462 	    curr = iommu_gas_next(curr, min_free)) {
463 		if ((first = RB_LEFT(curr, rb_entry)) != NULL &&
464 		    iommu_gas_match_one(a, first->last, curr->start,
465 		    addr + 1, domain->end))
466 			return (0);
467 		if ((first = RB_RIGHT(curr, rb_entry)) != NULL &&
468 		    iommu_gas_match_one(a, curr->end, first->first,
469 		    addr + 1, domain->end))
470 			return (0);
471 	}
472 
473 	return (ENOMEM);
474 }
475 
476 static int
477 iommu_gas_alloc_region(struct iommu_domain *domain, struct iommu_map_entry *entry,
478     u_int flags)
479 {
480 	struct iommu_map_entry *next, *prev;
481 	bool found __diagused;
482 
483 	IOMMU_DOMAIN_ASSERT_LOCKED(domain);
484 
485 	if ((entry->start & IOMMU_PAGE_MASK) != 0 ||
486 	    (entry->end & IOMMU_PAGE_MASK) != 0)
487 		return (EINVAL);
488 	if (entry->start >= entry->end)
489 		return (EINVAL);
490 	if (entry->end >= domain->end)
491 		return (EINVAL);
492 
493 	next = RB_NFIND(iommu_gas_entries_tree, &domain->rb_root, entry);
494 	KASSERT(next != NULL, ("next must be non-null %p %jx", domain,
495 	    (uintmax_t)entry->start));
496 	prev = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, next);
497 	/* prev could be NULL */
498 
499 	/*
500 	 * Adapt to broken BIOSes which specify overlapping RMRR
501 	 * entries.
502 	 *
503 	 * XXXKIB: this does not handle a case when prev or next
504 	 * entries are completely covered by the current one, which
505 	 * extends both ways.
506 	 */
507 	if (prev != NULL && prev->end > entry->start &&
508 	    (prev->flags & IOMMU_MAP_ENTRY_PLACE) == 0) {
509 		if ((flags & IOMMU_MF_RMRR) == 0 ||
510 		    (prev->flags & IOMMU_MAP_ENTRY_RMRR) == 0)
511 			return (EBUSY);
512 		entry->start = prev->end;
513 	}
514 	if (next->start < entry->end &&
515 	    (next->flags & IOMMU_MAP_ENTRY_PLACE) == 0) {
516 		if ((flags & IOMMU_MF_RMRR) == 0 ||
517 		    (next->flags & IOMMU_MAP_ENTRY_RMRR) == 0)
518 			return (EBUSY);
519 		entry->end = next->start;
520 	}
521 	if (entry->end == entry->start)
522 		return (0);
523 
524 	if (prev != NULL && prev->end > entry->start) {
525 		/* This assumes that prev is the placeholder entry. */
526 		iommu_gas_rb_remove(domain, prev);
527 		prev = NULL;
528 	}
529 	if (next->start < entry->end) {
530 		iommu_gas_rb_remove(domain, next);
531 		next = NULL;
532 	}
533 
534 	found = iommu_gas_rb_insert(domain, entry);
535 	KASSERT(found, ("found RMRR dup %p start %jx end %jx",
536 	    domain, (uintmax_t)entry->start, (uintmax_t)entry->end));
537 	if ((flags & IOMMU_MF_RMRR) != 0)
538 		entry->flags = IOMMU_MAP_ENTRY_RMRR;
539 
540 #ifdef INVARIANTS
541 	struct iommu_map_entry *ip, *in;
542 	ip = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, entry);
543 	in = RB_NEXT(iommu_gas_entries_tree, &domain->rb_root, entry);
544 	KASSERT(prev == NULL || ip == prev,
545 	    ("RMRR %p (%jx %jx) prev %p (%jx %jx) ins prev %p (%jx %jx)",
546 	    entry, entry->start, entry->end, prev,
547 	    prev == NULL ? 0 : prev->start, prev == NULL ? 0 : prev->end,
548 	    ip, ip == NULL ? 0 : ip->start, ip == NULL ? 0 : ip->end));
549 	KASSERT(next == NULL || in == next,
550 	    ("RMRR %p (%jx %jx) next %p (%jx %jx) ins next %p (%jx %jx)",
551 	    entry, entry->start, entry->end, next,
552 	    next == NULL ? 0 : next->start, next == NULL ? 0 : next->end,
553 	    in, in == NULL ? 0 : in->start, in == NULL ? 0 : in->end));
554 #endif
555 
556 	return (0);
557 }
558 
559 void
560 iommu_gas_free_space(struct iommu_map_entry *entry)
561 {
562 	struct iommu_domain *domain;
563 
564 	domain = entry->domain;
565 	KASSERT((entry->flags & (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_RMRR |
566 	    IOMMU_MAP_ENTRY_MAP)) == IOMMU_MAP_ENTRY_MAP,
567 	    ("permanent entry %p %p", domain, entry));
568 
569 	IOMMU_DOMAIN_LOCK(domain);
570 	iommu_gas_rb_remove(domain, entry);
571 	entry->flags &= ~IOMMU_MAP_ENTRY_MAP;
572 #ifdef INVARIANTS
573 	if (iommu_check_free)
574 		iommu_gas_check_free(domain);
575 #endif
576 	IOMMU_DOMAIN_UNLOCK(domain);
577 }
578 
579 void
580 iommu_gas_free_region(struct iommu_map_entry *entry)
581 {
582 	struct iommu_domain *domain;
583 	struct iommu_map_entry *next, *prev;
584 
585 	domain = entry->domain;
586 	KASSERT((entry->flags & (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_RMRR |
587 	    IOMMU_MAP_ENTRY_MAP)) == IOMMU_MAP_ENTRY_RMRR,
588 	    ("non-RMRR entry %p %p", domain, entry));
589 
590 	IOMMU_DOMAIN_LOCK(domain);
591 	prev = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, entry);
592 	next = RB_NEXT(iommu_gas_entries_tree, &domain->rb_root, entry);
593 	iommu_gas_rb_remove(domain, entry);
594 	entry->flags &= ~IOMMU_MAP_ENTRY_RMRR;
595 
596 	if (prev == NULL)
597 		iommu_gas_rb_insert(domain, domain->first_place);
598 	if (next == NULL)
599 		iommu_gas_rb_insert(domain, domain->last_place);
600 	IOMMU_DOMAIN_UNLOCK(domain);
601 }
602 
603 static struct iommu_map_entry *
604 iommu_gas_remove_clip_left(struct iommu_domain *domain, iommu_gaddr_t start,
605     iommu_gaddr_t end, struct iommu_map_entry **r)
606 {
607 	struct iommu_map_entry *entry, *res, fentry;
608 
609 	IOMMU_DOMAIN_ASSERT_LOCKED(domain);
610 	MPASS(start <= end);
611 	MPASS(end <= domain->last_place->end);
612 
613 	/*
614 	 * Find an entry which contains the supplied guest's address
615 	 * start, or the first entry after the start.  Since we
616 	 * asserted that start is below domain end, entry should
617 	 * exist.  Then clip it if needed.
618 	 */
619 	fentry.start = start + 1;
620 	fentry.end = start + 1;
621 	entry = RB_NFIND(iommu_gas_entries_tree, &domain->rb_root, &fentry);
622 
623 	if (entry->start >= start ||
624 	    (entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0)
625 		return (entry);
626 
627 	res = *r;
628 	*r = NULL;
629 	*res = *entry;
630 	res->start = entry->end = start;
631 	RB_UPDATE_AUGMENT(entry, rb_entry);
632 	iommu_gas_rb_insert(domain, res);
633 	return (res);
634 }
635 
636 static bool
637 iommu_gas_remove_clip_right(struct iommu_domain *domain,
638     iommu_gaddr_t end, struct iommu_map_entry *entry,
639     struct iommu_map_entry *r)
640 {
641 	if (entry->start >= end || (entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0)
642 		return (false);
643 
644 	*r = *entry;
645 	r->end = entry->start = end;
646 	RB_UPDATE_AUGMENT(entry, rb_entry);
647 	iommu_gas_rb_insert(domain, r);
648 	return (true);
649 }
650 
651 static void
652 iommu_gas_remove_unmap(struct iommu_domain *domain,
653     struct iommu_map_entry *entry, struct iommu_map_entries_tailq *gcp)
654 {
655 	IOMMU_DOMAIN_ASSERT_LOCKED(domain);
656 
657 	if ((entry->flags & (IOMMU_MAP_ENTRY_UNMAPPED |
658 	    IOMMU_MAP_ENTRY_REMOVING)) != 0)
659 		return;
660 	MPASS((entry->flags & IOMMU_MAP_ENTRY_PLACE) == 0);
661 	entry->flags |= IOMMU_MAP_ENTRY_REMOVING;
662 	TAILQ_INSERT_TAIL(gcp, entry, dmamap_link);
663 }
664 
665 /*
666  * Remove specified range from the GAS of the domain.  Note that the
667  * removal is not guaranteed to occur upon the function return, it
668  * might be finalized some time after, when hardware reports that
669  * (queued) IOTLB invalidation was performed.
670  */
671 void
672 iommu_gas_remove(struct iommu_domain *domain, iommu_gaddr_t start,
673     iommu_gaddr_t size)
674 {
675 	struct iommu_map_entry *entry, *nentry, *r1, *r2;
676 	struct iommu_map_entries_tailq gc;
677 	iommu_gaddr_t end;
678 
679 	end = start + size;
680 	r1 = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK);
681 	r2 = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK);
682 	TAILQ_INIT(&gc);
683 
684 	IOMMU_DOMAIN_LOCK(domain);
685 
686 	nentry = iommu_gas_remove_clip_left(domain, start, end, &r1);
687 	RB_FOREACH_FROM(entry, iommu_gas_entries_tree, nentry) {
688 		if (entry->start >= end)
689 			break;
690 		KASSERT(start <= entry->start,
691 		    ("iommu_gas_remove entry (%#jx, %#jx) start %#jx",
692 		    entry->start, entry->end, start));
693 		if ((entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0)
694 			continue;
695 		iommu_gas_remove_unmap(domain, entry, &gc);
696 	}
697 	if (iommu_gas_remove_clip_right(domain, end, entry, r2)) {
698 		iommu_gas_remove_unmap(domain, r2, &gc);
699 		r2 = NULL;
700 	}
701 
702 #ifdef INVARIANTS
703 	RB_FOREACH(entry, iommu_gas_entries_tree, &domain->rb_root) {
704 		if ((entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0)
705 			continue;
706 		KASSERT(entry->end <= start || entry->start >= end,
707 		    ("iommu_gas_remove leftover entry (%#jx, %#jx) range "
708 		    "(%#jx, %#jx)",
709 		    entry->start, entry->end, start, end));
710 	}
711 #endif
712 
713 	IOMMU_DOMAIN_UNLOCK(domain);
714 	if (r1 != NULL)
715 		iommu_gas_free_entry(r1);
716 	if (r2 != NULL)
717 		iommu_gas_free_entry(r2);
718 	iommu_domain_unload(domain, &gc, true);
719 }
720 
721 int
722 iommu_gas_map(struct iommu_domain *domain,
723     const struct bus_dma_tag_common *common, iommu_gaddr_t size, int offset,
724     u_int eflags, u_int flags, vm_page_t *ma, struct iommu_map_entry **res)
725 {
726 	struct iommu_gas_match_args a;
727 	struct iommu_map_entry *entry;
728 	int error;
729 
730 	KASSERT((flags & ~(IOMMU_MF_CANWAIT | IOMMU_MF_CANSPLIT)) == 0,
731 	    ("invalid flags 0x%x", flags));
732 
733 	a.domain = domain;
734 	a.size = size;
735 	a.offset = offset;
736 	a.common = common;
737 	a.gas_flags = flags;
738 	entry = iommu_gas_alloc_entry(domain,
739 	    (flags & IOMMU_MF_CANWAIT) != 0 ? IOMMU_PGF_WAITOK : 0);
740 	if (entry == NULL)
741 		return (ENOMEM);
742 	a.entry = entry;
743 	IOMMU_DOMAIN_LOCK(domain);
744 	error = iommu_gas_find_space(&a);
745 	if (error == ENOMEM) {
746 		IOMMU_DOMAIN_UNLOCK(domain);
747 		iommu_gas_free_entry(entry);
748 		return (error);
749 	}
750 #ifdef INVARIANTS
751 	if (iommu_check_free)
752 		iommu_gas_check_free(domain);
753 #endif
754 	KASSERT(error == 0,
755 	    ("unexpected error %d from iommu_gas_find_entry", error));
756 	KASSERT(entry->end < domain->end, ("allocated GPA %jx, max GPA %jx",
757 	    (uintmax_t)entry->end, (uintmax_t)domain->end));
758 	entry->flags |= eflags;
759 	IOMMU_DOMAIN_UNLOCK(domain);
760 
761 	error = domain->ops->map(domain, entry->start,
762 	    entry->end - entry->start, ma, eflags,
763 	    ((flags & IOMMU_MF_CANWAIT) != 0 ? IOMMU_PGF_WAITOK : 0));
764 	if (error == ENOMEM) {
765 		iommu_domain_unload_entry(entry, true,
766 		    (flags & IOMMU_MF_CANWAIT) != 0);
767 		return (error);
768 	}
769 	KASSERT(error == 0,
770 	    ("unexpected error %d from domain_map_buf", error));
771 
772 	*res = entry;
773 	return (0);
774 }
775 
776 int
777 iommu_gas_map_region(struct iommu_domain *domain, struct iommu_map_entry *entry,
778     u_int eflags, u_int flags, vm_page_t *ma)
779 {
780 	iommu_gaddr_t start;
781 	int error;
782 
783 	KASSERT(entry->domain == domain,
784 	    ("mismatched domain %p entry %p entry->domain %p", domain,
785 	    entry, entry->domain));
786 	KASSERT(entry->flags == 0, ("used RMRR entry %p %p %x", domain,
787 	    entry, entry->flags));
788 	KASSERT((flags & ~(IOMMU_MF_CANWAIT | IOMMU_MF_RMRR)) == 0,
789 	    ("invalid flags 0x%x", flags));
790 
791 	start = entry->start;
792 	IOMMU_DOMAIN_LOCK(domain);
793 	error = iommu_gas_alloc_region(domain, entry, flags);
794 	if (error != 0) {
795 		IOMMU_DOMAIN_UNLOCK(domain);
796 		return (error);
797 	}
798 	entry->flags |= eflags;
799 	IOMMU_DOMAIN_UNLOCK(domain);
800 	if (entry->end == entry->start)
801 		return (0);
802 
803 	error = domain->ops->map(domain, entry->start,
804 	    entry->end - entry->start, ma + OFF_TO_IDX(start - entry->start),
805 	    eflags, ((flags & IOMMU_MF_CANWAIT) != 0 ? IOMMU_PGF_WAITOK : 0));
806 	if (error == ENOMEM) {
807 		iommu_domain_unload_entry(entry, false,
808 		    (flags & IOMMU_MF_CANWAIT) != 0);
809 		return (error);
810 	}
811 	KASSERT(error == 0,
812 	    ("unexpected error %d from domain_map_buf", error));
813 
814 	return (0);
815 }
816 
817 static int
818 iommu_gas_reserve_region_locked(struct iommu_domain *domain,
819     iommu_gaddr_t start, iommu_gaddr_t end, struct iommu_map_entry *entry)
820 {
821 	int error;
822 
823 	IOMMU_DOMAIN_ASSERT_LOCKED(domain);
824 
825 	entry->start = start;
826 	entry->end = end;
827 	error = iommu_gas_alloc_region(domain, entry, IOMMU_MF_CANWAIT);
828 	if (error == 0)
829 		entry->flags |= IOMMU_MAP_ENTRY_UNMAPPED;
830 	return (error);
831 }
832 
833 int
834 iommu_gas_reserve_region(struct iommu_domain *domain, iommu_gaddr_t start,
835     iommu_gaddr_t end, struct iommu_map_entry **entry0)
836 {
837 	struct iommu_map_entry *entry;
838 	int error;
839 
840 	entry = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK);
841 	IOMMU_DOMAIN_LOCK(domain);
842 	error = iommu_gas_reserve_region_locked(domain, start, end, entry);
843 	IOMMU_DOMAIN_UNLOCK(domain);
844 	if (error != 0)
845 		iommu_gas_free_entry(entry);
846 	else if (entry0 != NULL)
847 		*entry0 = entry;
848 	return (error);
849 }
850 
851 /*
852  * As in iommu_gas_reserve_region, reserve [start, end), but allow for existing
853  * entries.
854  */
855 int
856 iommu_gas_reserve_region_extend(struct iommu_domain *domain,
857     iommu_gaddr_t start, iommu_gaddr_t end)
858 {
859 	struct iommu_map_entry *entry, *next, *prev, key = {};
860 	iommu_gaddr_t entry_start, entry_end;
861 	int error;
862 
863 	error = 0;
864 	entry = NULL;
865 	end = ummin(end, domain->end);
866 	while (start < end) {
867 		/* Preallocate an entry. */
868 		if (entry == NULL)
869 			entry = iommu_gas_alloc_entry(domain,
870 			    IOMMU_PGF_WAITOK);
871 		/* Calculate the free region from here to the next entry. */
872 		key.start = key.end = start;
873 		IOMMU_DOMAIN_LOCK(domain);
874 		next = RB_NFIND(iommu_gas_entries_tree, &domain->rb_root, &key);
875 		KASSERT(next != NULL, ("domain %p with end %#jx has no entry "
876 		    "after %#jx", domain, (uintmax_t)domain->end,
877 		    (uintmax_t)start));
878 		entry_end = ummin(end, next->start);
879 		prev = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, next);
880 		if (prev != NULL)
881 			entry_start = ummax(start, prev->end);
882 		else
883 			entry_start = start;
884 		start = next->end;
885 		/* Reserve the region if non-empty. */
886 		if (entry_start != entry_end) {
887 			error = iommu_gas_reserve_region_locked(domain,
888 			    entry_start, entry_end, entry);
889 			if (error != 0) {
890 				IOMMU_DOMAIN_UNLOCK(domain);
891 				break;
892 			}
893 			entry = NULL;
894 		}
895 		IOMMU_DOMAIN_UNLOCK(domain);
896 	}
897 	/* Release a preallocated entry if it was not used. */
898 	if (entry != NULL)
899 		iommu_gas_free_entry(entry);
900 	return (error);
901 }
902 
903 void
904 iommu_unmap_msi(struct iommu_ctx *ctx)
905 {
906 	struct iommu_map_entry *entry;
907 	struct iommu_domain *domain;
908 
909 	domain = ctx->domain;
910 	entry = domain->msi_entry;
911 	if (entry == NULL)
912 		return;
913 
914 	domain->ops->unmap(domain, entry->start, entry->end -
915 	    entry->start, IOMMU_PGF_WAITOK);
916 
917 	iommu_gas_free_space(entry);
918 
919 	iommu_gas_free_entry(entry);
920 
921 	domain->msi_entry = NULL;
922 	domain->msi_base = 0;
923 	domain->msi_phys = 0;
924 }
925 
926 int
927 iommu_map_msi(struct iommu_ctx *ctx, iommu_gaddr_t size, int offset,
928     u_int eflags, u_int flags, vm_page_t *ma)
929 {
930 	struct iommu_domain *domain;
931 	struct iommu_map_entry *entry;
932 	int error;
933 
934 	error = 0;
935 	domain = ctx->domain;
936 
937 	/* Check if there is already an MSI page allocated */
938 	IOMMU_DOMAIN_LOCK(domain);
939 	entry = domain->msi_entry;
940 	IOMMU_DOMAIN_UNLOCK(domain);
941 
942 	if (entry == NULL) {
943 		error = iommu_gas_map(domain, &ctx->tag->common, size, offset,
944 		    eflags, flags, ma, &entry);
945 		IOMMU_DOMAIN_LOCK(domain);
946 		if (error == 0) {
947 			if (domain->msi_entry == NULL) {
948 				MPASS(domain->msi_base == 0);
949 				MPASS(domain->msi_phys == 0);
950 
951 				domain->msi_entry = entry;
952 				domain->msi_base = entry->start;
953 				domain->msi_phys = VM_PAGE_TO_PHYS(ma[0]);
954 			} else {
955 				/*
956 				 * We lost the race and already have an
957 				 * MSI page allocated. Free the unneeded entry.
958 				 */
959 				iommu_gas_free_entry(entry);
960 			}
961 		} else if (domain->msi_entry != NULL) {
962 			/*
963 			 * The allocation failed, but another succeeded.
964 			 * Return success as there is a valid MSI page.
965 			 */
966 			error = 0;
967 		}
968 		IOMMU_DOMAIN_UNLOCK(domain);
969 	}
970 
971 	return (error);
972 }
973 
974 void
975 iommu_translate_msi(struct iommu_domain *domain, uint64_t *addr)
976 {
977 
978 	*addr = (*addr - domain->msi_phys) + domain->msi_base;
979 
980 	KASSERT(*addr >= domain->msi_entry->start,
981 	    ("%s: Address is below the MSI entry start address (%jx < %jx)",
982 	    __func__, (uintmax_t)*addr, (uintmax_t)domain->msi_entry->start));
983 
984 	KASSERT(*addr + sizeof(*addr) <= domain->msi_entry->end,
985 	    ("%s: Address is above the MSI entry end address (%jx < %jx)",
986 	    __func__, (uintmax_t)*addr, (uintmax_t)domain->msi_entry->end));
987 }
988 
989 SYSCTL_NODE(_hw, OID_AUTO, iommu, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, "");
990 
991 #ifdef INVARIANTS
992 SYSCTL_INT(_hw_iommu, OID_AUTO, check_free, CTLFLAG_RWTUN,
993     &iommu_check_free, 0,
994     "Check the GPA RBtree for free_down and free_after validity");
995 #endif
996