xref: /freebsd/sys/arm64/iommu/iommu.c (revision 6f1af0d7d2af54b339b5212434cd6d4fda628d80)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2020 Ruslan Bukin <br@bsdpad.com>
5  *
6  * This software was developed by SRI International and the University of
7  * Cambridge Computer Laboratory (Department of Computer Science and
8  * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
9  * DARPA SSITH research programme.
10  *
11  * Portions of this work was supported by Innovate UK project 105694,
12  * "Digital Security by Design (DSbD) Technology Platform Prototype".
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 #include "opt_platform.h"
37 
38 #include <sys/cdefs.h>
39 #include <sys/param.h>
40 #include <sys/bus.h>
41 #include <sys/kernel.h>
42 #include <sys/malloc.h>
43 #include <sys/memdesc.h>
44 #include <sys/tree.h>
45 #include <sys/taskqueue.h>
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 #include <sys/sx.h>
49 #include <sys/sysctl.h>
50 #include <vm/vm.h>
51 
52 #include <dev/pci/pcireg.h>
53 #include <dev/pci/pcivar.h>
54 #include <machine/bus.h>
55 #include <dev/iommu/busdma_iommu.h>
56 #include <machine/vmparam.h>
57 
58 #ifdef FDT
59 #include <dev/fdt/fdt_common.h>
60 #include <dev/ofw/ofw_bus.h>
61 #include <dev/ofw/ofw_bus_subr.h>
62 #endif
63 
64 #include "iommu.h"
65 #include "iommu_if.h"
66 
67 static MALLOC_DEFINE(M_IOMMU, "IOMMU", "IOMMU framework");
68 
69 #define	IOMMU_LIST_LOCK()		sx_xlock(&iommu_sx)
70 #define	IOMMU_LIST_UNLOCK()		sx_xunlock(&iommu_sx)
71 #define	IOMMU_LIST_ASSERT_LOCKED()	sx_assert(&iommu_sx, SA_XLOCKED)
72 
73 #define dprintf(fmt, ...)
74 
75 static struct sx iommu_sx;
76 
77 struct iommu_entry {
78 	struct iommu_unit *iommu;
79 	LIST_ENTRY(iommu_entry) next;
80 };
81 static LIST_HEAD(, iommu_entry) iommu_list = LIST_HEAD_INITIALIZER(iommu_list);
82 
83 static int
84 iommu_domain_unmap_buf(struct iommu_domain *iodom, iommu_gaddr_t base,
85     iommu_gaddr_t size, int flags)
86 {
87 	struct iommu_unit *iommu;
88 	int error;
89 
90 	iommu = iodom->iommu;
91 
92 	error = IOMMU_UNMAP(iommu->dev, iodom, base, size);
93 
94 	return (error);
95 }
96 
97 static int
98 iommu_domain_map_buf(struct iommu_domain *iodom, iommu_gaddr_t base,
99     iommu_gaddr_t size, vm_page_t *ma, uint64_t eflags, int flags)
100 {
101 	struct iommu_unit *iommu;
102 	vm_prot_t prot;
103 	vm_offset_t va;
104 	int error;
105 
106 	dprintf("%s: base %lx, size %lx\n", __func__, base, size);
107 
108 	prot = 0;
109 	if (eflags & IOMMU_MAP_ENTRY_READ)
110 		prot |= VM_PROT_READ;
111 	if (eflags & IOMMU_MAP_ENTRY_WRITE)
112 		prot |= VM_PROT_WRITE;
113 
114 	va = base;
115 
116 	iommu = iodom->iommu;
117 
118 	error = IOMMU_MAP(iommu->dev, iodom, va, ma, size, prot);
119 
120 	return (error);
121 }
122 
123 static const struct iommu_domain_map_ops domain_map_ops = {
124 	.map = iommu_domain_map_buf,
125 	.unmap = iommu_domain_unmap_buf,
126 };
127 
128 static struct iommu_domain *
129 iommu_domain_alloc(struct iommu_unit *iommu)
130 {
131 	struct iommu_domain *iodom;
132 
133 	iodom = IOMMU_DOMAIN_ALLOC(iommu->dev, iommu);
134 	if (iodom == NULL)
135 		return (NULL);
136 
137 	KASSERT(iodom->end != 0, ("domain end is not set"));
138 
139 	iommu_domain_init(iommu, iodom, &domain_map_ops);
140 	iodom->iommu = iommu;
141 	iommu_gas_init_domain(iodom);
142 
143 	return (iodom);
144 }
145 
146 static int
147 iommu_domain_free(struct iommu_domain *iodom)
148 {
149 	struct iommu_unit *iommu;
150 
151 	iommu = iodom->iommu;
152 
153 	IOMMU_LOCK(iommu);
154 
155 	if ((iodom->flags & IOMMU_DOMAIN_GAS_INITED) != 0) {
156 		IOMMU_DOMAIN_LOCK(iodom);
157 		iommu_gas_fini_domain(iodom);
158 		IOMMU_DOMAIN_UNLOCK(iodom);
159 	}
160 
161 	iommu_domain_fini(iodom);
162 
163 	IOMMU_DOMAIN_FREE(iommu->dev, iodom);
164 	IOMMU_UNLOCK(iommu);
165 
166 	return (0);
167 }
168 
169 static void
170 iommu_tag_init(struct iommu_domain *iodom, struct bus_dma_tag_iommu *t)
171 {
172 	bus_addr_t maxaddr;
173 
174 	maxaddr = MIN(iodom->end, BUS_SPACE_MAXADDR);
175 
176 	t->common.ref_count = 0;
177 	t->common.impl = &bus_dma_iommu_impl;
178 	t->common.alignment = 1;
179 	t->common.boundary = 0;
180 	t->common.lowaddr = maxaddr;
181 	t->common.highaddr = maxaddr;
182 	t->common.maxsize = maxaddr;
183 	t->common.nsegments = BUS_SPACE_UNRESTRICTED;
184 	t->common.maxsegsz = maxaddr;
185 }
186 
187 static struct iommu_ctx *
188 iommu_ctx_alloc(device_t requester, struct iommu_domain *iodom, bool disabled)
189 {
190 	struct iommu_unit *iommu;
191 	struct iommu_ctx *ioctx;
192 
193 	iommu = iodom->iommu;
194 
195 	ioctx = IOMMU_CTX_ALLOC(iommu->dev, iodom, requester, disabled);
196 	if (ioctx == NULL)
197 		return (NULL);
198 
199 	ioctx->domain = iodom;
200 
201 	return (ioctx);
202 }
203 
204 static int
205 iommu_ctx_init(device_t requester, struct iommu_ctx *ioctx)
206 {
207 	struct bus_dma_tag_iommu *tag;
208 	struct iommu_domain *iodom;
209 	struct iommu_unit *iommu;
210 	int error;
211 
212 	iodom = ioctx->domain;
213 	iommu = iodom->iommu;
214 
215 	error = IOMMU_CTX_INIT(iommu->dev, ioctx);
216 	if (error)
217 		return (error);
218 
219 	tag = ioctx->tag = malloc(sizeof(struct bus_dma_tag_iommu),
220 	    M_IOMMU, M_WAITOK | M_ZERO);
221 	tag->owner = requester;
222 	tag->ctx = ioctx;
223 	tag->ctx->domain = iodom;
224 
225 	iommu_tag_init(iodom, tag);
226 
227 	return (error);
228 }
229 
230 static struct iommu_unit *
231 iommu_lookup(device_t dev)
232 {
233 	struct iommu_entry *entry;
234 	struct iommu_unit *iommu;
235 
236 	IOMMU_LIST_LOCK();
237 	LIST_FOREACH(entry, &iommu_list, next) {
238 		iommu = entry->iommu;
239 		if (iommu->dev == dev) {
240 			IOMMU_LIST_UNLOCK();
241 			return (iommu);
242 		}
243 	}
244 	IOMMU_LIST_UNLOCK();
245 
246 	return (NULL);
247 }
248 
249 #ifdef FDT
250 struct iommu_ctx *
251 iommu_get_ctx_ofw(device_t dev, int channel)
252 {
253 	struct iommu_domain *iodom;
254 	struct iommu_unit *iommu;
255 	struct iommu_ctx *ioctx;
256 	phandle_t node, parent;
257 	device_t iommu_dev;
258 	pcell_t *cells;
259 	int niommus;
260 	int ncells;
261 	int error;
262 
263 	node = ofw_bus_get_node(dev);
264 	if (node <= 0) {
265 		device_printf(dev,
266 		    "%s called on not ofw based device.\n", __func__);
267 		return (NULL);
268 	}
269 
270 	error = ofw_bus_parse_xref_list_get_length(node,
271 	    "iommus", "#iommu-cells", &niommus);
272 	if (error) {
273 		device_printf(dev, "%s can't get iommu list.\n", __func__);
274 		return (NULL);
275 	}
276 
277 	if (niommus == 0) {
278 		device_printf(dev, "%s iommu list is empty.\n", __func__);
279 		return (NULL);
280 	}
281 
282 	error = ofw_bus_parse_xref_list_alloc(node, "iommus", "#iommu-cells",
283 	    channel, &parent, &ncells, &cells);
284 	if (error != 0) {
285 		device_printf(dev, "%s can't get iommu device xref.\n",
286 		    __func__);
287 		return (NULL);
288 	}
289 
290 	iommu_dev = OF_device_from_xref(parent);
291 	if (iommu_dev == NULL) {
292 		device_printf(dev, "%s can't get iommu device.\n", __func__);
293 		return (NULL);
294 	}
295 
296 	iommu = iommu_lookup(iommu_dev);
297 	if (iommu == NULL) {
298 		device_printf(dev, "%s can't lookup iommu.\n", __func__);
299 		return (NULL);
300 	}
301 
302 	/*
303 	 * In our current configuration we have a domain per each ctx,
304 	 * so allocate a domain first.
305 	 */
306 	iodom = iommu_domain_alloc(iommu);
307 	if (iodom == NULL) {
308 		device_printf(dev, "%s can't allocate domain.\n", __func__);
309 		return (NULL);
310 	}
311 
312 	ioctx = iommu_ctx_alloc(dev, iodom, false);
313 	if (ioctx == NULL) {
314 		iommu_domain_free(iodom);
315 		return (NULL);
316 	}
317 
318 	ioctx->domain = iodom;
319 
320 	error = IOMMU_OFW_MD_DATA(iommu->dev, ioctx, cells, ncells);
321 	if (error) {
322 		device_printf(dev, "%s can't set MD data\n", __func__);
323 		return (NULL);
324 	}
325 
326 	error = iommu_ctx_init(dev, ioctx);
327 	if (error) {
328 		IOMMU_CTX_FREE(iommu->dev, ioctx);
329 		iommu_domain_free(iodom);
330 		return (NULL);
331 	}
332 
333 	return (ioctx);
334 }
335 #endif
336 
337 struct iommu_ctx *
338 iommu_get_ctx(struct iommu_unit *iommu, device_t requester,
339     uint16_t rid, bool disabled, bool rmrr)
340 {
341 	struct iommu_domain *iodom;
342 	struct iommu_ctx *ioctx;
343 	int error;
344 
345 	IOMMU_LOCK(iommu);
346 	ioctx = IOMMU_CTX_LOOKUP(iommu->dev, requester);
347 	if (ioctx) {
348 		IOMMU_UNLOCK(iommu);
349 		return (ioctx);
350 	}
351 	IOMMU_UNLOCK(iommu);
352 
353 	/*
354 	 * In our current configuration we have a domain per each ctx.
355 	 * So allocate a domain first.
356 	 */
357 	iodom = iommu_domain_alloc(iommu);
358 	if (iodom == NULL)
359 		return (NULL);
360 
361 	ioctx = iommu_ctx_alloc(requester, iodom, disabled);
362 	if (ioctx == NULL) {
363 		iommu_domain_free(iodom);
364 		return (NULL);
365 	}
366 
367 	error = iommu_ctx_init(requester, ioctx);
368 	if (error) {
369 		IOMMU_CTX_FREE(iommu->dev, ioctx);
370 		iommu_domain_free(iodom);
371 		return (NULL);
372 	}
373 
374 	return (ioctx);
375 }
376 
377 void
378 iommu_free_ctx_locked(struct iommu_unit *iommu, struct iommu_ctx *ioctx)
379 {
380 	struct bus_dma_tag_iommu *tag;
381 
382 	IOMMU_ASSERT_LOCKED(iommu);
383 
384 	tag = ioctx->tag;
385 
386 	IOMMU_CTX_FREE(iommu->dev, ioctx);
387 
388 	free(tag, M_IOMMU);
389 }
390 
391 void
392 iommu_free_ctx(struct iommu_ctx *ioctx)
393 {
394 	struct iommu_unit *iommu;
395 	struct iommu_domain *iodom;
396 	int error;
397 
398 	iodom = ioctx->domain;
399 	iommu = iodom->iommu;
400 
401 	IOMMU_LOCK(iommu);
402 	iommu_free_ctx_locked(iommu, ioctx);
403 	IOMMU_UNLOCK(iommu);
404 
405 	/* Since we have a domain per each ctx, remove the domain too. */
406 	error = iommu_domain_free(iodom);
407 	if (error)
408 		device_printf(iommu->dev, "Could not free a domain\n");
409 }
410 
411 static void
412 iommu_domain_free_entry(struct iommu_map_entry *entry, bool free)
413 {
414 	iommu_gas_free_space(entry);
415 
416 	if (free)
417 		iommu_gas_free_entry(entry);
418 	else
419 		entry->flags = 0;
420 }
421 
422 void
423 iommu_domain_unload(struct iommu_domain *iodom,
424     struct iommu_map_entries_tailq *entries, bool cansleep)
425 {
426 	struct iommu_map_entry *entry, *entry1;
427 	int error __diagused;
428 
429 	TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) {
430 		KASSERT((entry->flags & IOMMU_MAP_ENTRY_MAP) != 0,
431 		    ("not mapped entry %p %p", iodom, entry));
432 		error = iodom->ops->unmap(iodom, entry->start, entry->end -
433 		    entry->start, cansleep ? IOMMU_PGF_WAITOK : 0);
434 		KASSERT(error == 0, ("unmap %p error %d", iodom, error));
435 		TAILQ_REMOVE(entries, entry, dmamap_link);
436 		iommu_domain_free_entry(entry, true);
437         }
438 
439 	if (TAILQ_EMPTY(entries))
440 		return;
441 
442 	panic("entries map is not empty");
443 }
444 
445 int
446 iommu_register(struct iommu_unit *iommu)
447 {
448 	struct iommu_entry *entry;
449 
450 	mtx_init(&iommu->lock, "IOMMU", NULL, MTX_DEF);
451 
452 	entry = malloc(sizeof(struct iommu_entry), M_IOMMU, M_WAITOK | M_ZERO);
453 	entry->iommu = iommu;
454 
455 	IOMMU_LIST_LOCK();
456 	LIST_INSERT_HEAD(&iommu_list, entry, next);
457 	IOMMU_LIST_UNLOCK();
458 
459 	iommu_init_busdma(iommu);
460 
461 	return (0);
462 }
463 
464 int
465 iommu_unregister(struct iommu_unit *iommu)
466 {
467 	struct iommu_entry *entry, *tmp;
468 
469 	IOMMU_LIST_LOCK();
470 	LIST_FOREACH_SAFE(entry, &iommu_list, next, tmp) {
471 		if (entry->iommu == iommu) {
472 			LIST_REMOVE(entry, next);
473 			free(entry, M_IOMMU);
474 		}
475 	}
476 	IOMMU_LIST_UNLOCK();
477 
478 	iommu_fini_busdma(iommu);
479 
480 	mtx_destroy(&iommu->lock);
481 
482 	return (0);
483 }
484 
485 struct iommu_unit *
486 iommu_find(device_t dev, bool verbose)
487 {
488 	struct iommu_entry *entry;
489 	struct iommu_unit *iommu;
490 	int error;
491 
492 	IOMMU_LIST_LOCK();
493 	LIST_FOREACH(entry, &iommu_list, next) {
494 		iommu = entry->iommu;
495 		error = IOMMU_FIND(iommu->dev, dev);
496 		if (error == 0) {
497 			IOMMU_LIST_UNLOCK();
498 			return (entry->iommu);
499 		}
500 	}
501 	IOMMU_LIST_UNLOCK();
502 
503 	return (NULL);
504 }
505 
506 void
507 iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free,
508     bool cansleep __unused)
509 {
510 
511 	dprintf("%s\n", __func__);
512 
513 	iommu_domain_free_entry(entry, free);
514 }
515 
516 static void
517 iommu_init(void)
518 {
519 
520 	sx_init(&iommu_sx, "IOMMU list");
521 }
522 
523 SYSINIT(iommu, SI_SUB_DRIVERS, SI_ORDER_FIRST, iommu_init, NULL);
524