xref: /freebsd/sys/arm64/iommu/iommu.c (revision ffd294a1f4c23863c3e515d16dce31d5509bcb01)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2020 Ruslan Bukin <br@bsdpad.com>
5  *
6  * This software was developed by SRI International and the University of
7  * Cambridge Computer Laboratory (Department of Computer Science and
8  * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
9  * DARPA SSITH research programme.
10  *
11  * Portions of this work was supported by Innovate UK project 105694,
12  * "Digital Security by Design (DSbD) Technology Platform Prototype".
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 #include "opt_platform.h"
37 
38 #include <sys/param.h>
39 #include <sys/bus.h>
40 #include <sys/kernel.h>
41 #include <sys/malloc.h>
42 #include <sys/memdesc.h>
43 #include <sys/tree.h>
44 #include <sys/taskqueue.h>
45 #include <sys/lock.h>
46 #include <sys/mutex.h>
47 #include <sys/sx.h>
48 #include <sys/sysctl.h>
49 #include <vm/vm.h>
50 
51 #include <dev/pci/pcireg.h>
52 #include <dev/pci/pcivar.h>
53 #include <machine/bus.h>
54 #include <dev/iommu/busdma_iommu.h>
55 #include <machine/vmparam.h>
56 
57 #ifdef FDT
58 #include <dev/fdt/fdt_common.h>
59 #include <dev/ofw/ofw_bus.h>
60 #include <dev/ofw/ofw_bus_subr.h>
61 #endif
62 
63 #include "iommu.h"
64 #include "iommu_if.h"
65 
66 static MALLOC_DEFINE(M_IOMMU, "IOMMU", "IOMMU framework");
67 
68 #define	IOMMU_LIST_LOCK()		sx_xlock(&iommu_sx)
69 #define	IOMMU_LIST_UNLOCK()		sx_xunlock(&iommu_sx)
70 #define	IOMMU_LIST_ASSERT_LOCKED()	sx_assert(&iommu_sx, SA_XLOCKED)
71 
72 #define dprintf(fmt, ...)
73 
74 static struct sx iommu_sx;
75 
76 struct iommu_entry {
77 	struct iommu_unit *iommu;
78 	LIST_ENTRY(iommu_entry) next;
79 };
80 static LIST_HEAD(, iommu_entry) iommu_list = LIST_HEAD_INITIALIZER(iommu_list);
81 
82 static int
83 iommu_domain_unmap_buf(struct iommu_domain *iodom,
84     struct iommu_map_entry *entry, int flags)
85 {
86 	struct iommu_unit *iommu;
87 	int error;
88 
89 	iommu = iodom->iommu;
90 	error = IOMMU_UNMAP(iommu->dev, iodom, entry->start, entry->end -
91 	    entry->start);
92 	return (error);
93 }
94 
95 static int
96 iommu_domain_map_buf(struct iommu_domain *iodom, struct iommu_map_entry *entry,
97     vm_page_t *ma, uint64_t eflags, int flags)
98 {
99 	struct iommu_unit *iommu;
100 	vm_prot_t prot;
101 	vm_offset_t va;
102 	int error;
103 
104 	dprintf("%s: base %lx, size %lx\n", __func__, base, size);
105 
106 	prot = 0;
107 	if (eflags & IOMMU_MAP_ENTRY_READ)
108 		prot |= VM_PROT_READ;
109 	if (eflags & IOMMU_MAP_ENTRY_WRITE)
110 		prot |= VM_PROT_WRITE;
111 
112 	va = entry->start;
113 	iommu = iodom->iommu;
114 	error = IOMMU_MAP(iommu->dev, iodom, va, ma, entry->end -
115 	    entry->start, prot);
116 	return (error);
117 }
118 
119 static const struct iommu_domain_map_ops domain_map_ops = {
120 	.map = iommu_domain_map_buf,
121 	.unmap = iommu_domain_unmap_buf,
122 };
123 
124 static struct iommu_domain *
125 iommu_domain_alloc(struct iommu_unit *iommu)
126 {
127 	struct iommu_domain *iodom;
128 
129 	iodom = IOMMU_DOMAIN_ALLOC(iommu->dev, iommu);
130 	if (iodom == NULL)
131 		return (NULL);
132 
133 	KASSERT(iodom->end != 0, ("domain end is not set"));
134 
135 	iommu_domain_init(iommu, iodom, &domain_map_ops);
136 	iodom->iommu = iommu;
137 	iommu_gas_init_domain(iodom);
138 
139 	return (iodom);
140 }
141 
142 static int
143 iommu_domain_free(struct iommu_domain *iodom)
144 {
145 	struct iommu_unit *iommu;
146 
147 	iommu = iodom->iommu;
148 
149 	IOMMU_LOCK(iommu);
150 
151 	if ((iodom->flags & IOMMU_DOMAIN_GAS_INITED) != 0) {
152 		IOMMU_DOMAIN_LOCK(iodom);
153 		iommu_gas_fini_domain(iodom);
154 		IOMMU_DOMAIN_UNLOCK(iodom);
155 	}
156 
157 	iommu_domain_fini(iodom);
158 
159 	IOMMU_DOMAIN_FREE(iommu->dev, iodom);
160 	IOMMU_UNLOCK(iommu);
161 
162 	return (0);
163 }
164 
165 static void
166 iommu_tag_init(struct iommu_domain *iodom, struct bus_dma_tag_iommu *t)
167 {
168 	bus_addr_t maxaddr;
169 
170 	maxaddr = MIN(iodom->end, BUS_SPACE_MAXADDR);
171 
172 	t->common.impl = &bus_dma_iommu_impl;
173 	t->common.alignment = 1;
174 	t->common.boundary = 0;
175 	t->common.lowaddr = maxaddr;
176 	t->common.highaddr = maxaddr;
177 	t->common.maxsize = maxaddr;
178 	t->common.nsegments = BUS_SPACE_UNRESTRICTED;
179 	t->common.maxsegsz = maxaddr;
180 }
181 
182 static struct iommu_ctx *
183 iommu_ctx_alloc(device_t requester, struct iommu_domain *iodom, bool disabled)
184 {
185 	struct iommu_unit *iommu;
186 	struct iommu_ctx *ioctx;
187 
188 	iommu = iodom->iommu;
189 
190 	ioctx = IOMMU_CTX_ALLOC(iommu->dev, iodom, requester, disabled);
191 	if (ioctx == NULL)
192 		return (NULL);
193 
194 	ioctx->domain = iodom;
195 
196 	return (ioctx);
197 }
198 
199 static int
200 iommu_ctx_init(device_t requester, struct iommu_ctx *ioctx)
201 {
202 	struct bus_dma_tag_iommu *tag;
203 	struct iommu_domain *iodom;
204 	struct iommu_unit *iommu;
205 	int error;
206 
207 	iodom = ioctx->domain;
208 	iommu = iodom->iommu;
209 
210 	error = IOMMU_CTX_INIT(iommu->dev, ioctx);
211 	if (error)
212 		return (error);
213 
214 	tag = ioctx->tag = malloc(sizeof(struct bus_dma_tag_iommu),
215 	    M_IOMMU, M_WAITOK | M_ZERO);
216 	tag->owner = requester;
217 	tag->ctx = ioctx;
218 	tag->ctx->domain = iodom;
219 
220 	iommu_tag_init(iodom, tag);
221 
222 	return (error);
223 }
224 
225 static struct iommu_unit *
226 iommu_lookup(device_t dev)
227 {
228 	struct iommu_entry *entry;
229 	struct iommu_unit *iommu;
230 
231 	IOMMU_LIST_LOCK();
232 	LIST_FOREACH(entry, &iommu_list, next) {
233 		iommu = entry->iommu;
234 		if (iommu->dev == dev) {
235 			IOMMU_LIST_UNLOCK();
236 			return (iommu);
237 		}
238 	}
239 	IOMMU_LIST_UNLOCK();
240 
241 	return (NULL);
242 }
243 
244 #ifdef FDT
245 struct iommu_ctx *
246 iommu_get_ctx_ofw(device_t dev, int channel)
247 {
248 	struct iommu_domain *iodom;
249 	struct iommu_unit *iommu;
250 	struct iommu_ctx *ioctx;
251 	phandle_t node, parent;
252 	device_t iommu_dev;
253 	pcell_t *cells;
254 	int niommus;
255 	int ncells;
256 	int error;
257 
258 	node = ofw_bus_get_node(dev);
259 	if (node <= 0) {
260 		device_printf(dev,
261 		    "%s called on not ofw based device.\n", __func__);
262 		return (NULL);
263 	}
264 
265 	error = ofw_bus_parse_xref_list_get_length(node,
266 	    "iommus", "#iommu-cells", &niommus);
267 	if (error) {
268 		device_printf(dev, "%s can't get iommu list.\n", __func__);
269 		return (NULL);
270 	}
271 
272 	if (niommus == 0) {
273 		device_printf(dev, "%s iommu list is empty.\n", __func__);
274 		return (NULL);
275 	}
276 
277 	error = ofw_bus_parse_xref_list_alloc(node, "iommus", "#iommu-cells",
278 	    channel, &parent, &ncells, &cells);
279 	if (error != 0) {
280 		device_printf(dev, "%s can't get iommu device xref.\n",
281 		    __func__);
282 		return (NULL);
283 	}
284 
285 	iommu_dev = OF_device_from_xref(parent);
286 	if (iommu_dev == NULL) {
287 		device_printf(dev, "%s can't get iommu device.\n", __func__);
288 		return (NULL);
289 	}
290 
291 	iommu = iommu_lookup(iommu_dev);
292 	if (iommu == NULL) {
293 		device_printf(dev, "%s can't lookup iommu.\n", __func__);
294 		return (NULL);
295 	}
296 
297 	/*
298 	 * In our current configuration we have a domain per each ctx,
299 	 * so allocate a domain first.
300 	 */
301 	iodom = iommu_domain_alloc(iommu);
302 	if (iodom == NULL) {
303 		device_printf(dev, "%s can't allocate domain.\n", __func__);
304 		return (NULL);
305 	}
306 
307 	ioctx = iommu_ctx_alloc(dev, iodom, false);
308 	if (ioctx == NULL) {
309 		iommu_domain_free(iodom);
310 		return (NULL);
311 	}
312 
313 	ioctx->domain = iodom;
314 
315 	error = IOMMU_OFW_MD_DATA(iommu->dev, ioctx, cells, ncells);
316 	if (error) {
317 		device_printf(dev, "%s can't set MD data\n", __func__);
318 		return (NULL);
319 	}
320 
321 	error = iommu_ctx_init(dev, ioctx);
322 	if (error) {
323 		IOMMU_CTX_FREE(iommu->dev, ioctx);
324 		iommu_domain_free(iodom);
325 		return (NULL);
326 	}
327 
328 	return (ioctx);
329 }
330 #endif
331 
332 struct iommu_ctx *
333 iommu_get_ctx(struct iommu_unit *iommu, device_t requester,
334     uint16_t rid, bool disabled, bool rmrr)
335 {
336 	struct iommu_domain *iodom;
337 	struct iommu_ctx *ioctx;
338 	int error;
339 
340 	IOMMU_LOCK(iommu);
341 	ioctx = IOMMU_CTX_LOOKUP(iommu->dev, requester);
342 	if (ioctx) {
343 		IOMMU_UNLOCK(iommu);
344 		return (ioctx);
345 	}
346 	IOMMU_UNLOCK(iommu);
347 
348 	/*
349 	 * In our current configuration we have a domain per each ctx.
350 	 * So allocate a domain first.
351 	 */
352 	iodom = iommu_domain_alloc(iommu);
353 	if (iodom == NULL)
354 		return (NULL);
355 
356 	ioctx = iommu_ctx_alloc(requester, iodom, disabled);
357 	if (ioctx == NULL) {
358 		iommu_domain_free(iodom);
359 		return (NULL);
360 	}
361 
362 	error = iommu_ctx_init(requester, ioctx);
363 	if (error) {
364 		IOMMU_CTX_FREE(iommu->dev, ioctx);
365 		iommu_domain_free(iodom);
366 		return (NULL);
367 	}
368 
369 	return (ioctx);
370 }
371 
372 void
373 iommu_free_ctx_locked(struct iommu_unit *iommu, struct iommu_ctx *ioctx)
374 {
375 	struct bus_dma_tag_iommu *tag;
376 
377 	IOMMU_ASSERT_LOCKED(iommu);
378 
379 	tag = ioctx->tag;
380 
381 	IOMMU_CTX_FREE(iommu->dev, ioctx);
382 
383 	free(tag, M_IOMMU);
384 }
385 
386 void
387 iommu_free_ctx(struct iommu_ctx *ioctx)
388 {
389 	struct iommu_unit *iommu;
390 	struct iommu_domain *iodom;
391 	int error;
392 
393 	iodom = ioctx->domain;
394 	iommu = iodom->iommu;
395 
396 	IOMMU_LOCK(iommu);
397 	iommu_free_ctx_locked(iommu, ioctx);
398 	IOMMU_UNLOCK(iommu);
399 
400 	/* Since we have a domain per each ctx, remove the domain too. */
401 	error = iommu_domain_free(iodom);
402 	if (error)
403 		device_printf(iommu->dev, "Could not free a domain\n");
404 }
405 
406 static void
407 iommu_domain_free_entry(struct iommu_map_entry *entry, bool free)
408 {
409 	iommu_gas_free_space(entry);
410 
411 	if (free)
412 		iommu_gas_free_entry(entry);
413 	else
414 		entry->flags = 0;
415 }
416 
417 void
418 iommu_domain_unload(struct iommu_domain *iodom,
419     struct iommu_map_entries_tailq *entries, bool cansleep)
420 {
421 	struct iommu_map_entry *entry, *entry1;
422 	int error __diagused;
423 
424 	TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) {
425 		KASSERT((entry->flags & IOMMU_MAP_ENTRY_MAP) != 0,
426 		    ("not mapped entry %p %p", iodom, entry));
427 		error = iodom->ops->unmap(iodom, entry,
428 		    cansleep ? IOMMU_PGF_WAITOK : 0);
429 		KASSERT(error == 0, ("unmap %p error %d", iodom, error));
430 		TAILQ_REMOVE(entries, entry, dmamap_link);
431 		iommu_domain_free_entry(entry, true);
432         }
433 
434 	if (TAILQ_EMPTY(entries))
435 		return;
436 
437 	panic("entries map is not empty");
438 }
439 
440 int
441 iommu_register(struct iommu_unit *iommu)
442 {
443 	struct iommu_entry *entry;
444 
445 	mtx_init(&iommu->lock, "IOMMU", NULL, MTX_DEF);
446 
447 	entry = malloc(sizeof(struct iommu_entry), M_IOMMU, M_WAITOK | M_ZERO);
448 	entry->iommu = iommu;
449 
450 	IOMMU_LIST_LOCK();
451 	LIST_INSERT_HEAD(&iommu_list, entry, next);
452 	IOMMU_LIST_UNLOCK();
453 
454 	sysctl_ctx_init(&iommu->sysctl_ctx);
455 	iommu_init_busdma(iommu);
456 
457 	return (0);
458 }
459 
460 int
461 iommu_unregister(struct iommu_unit *iommu)
462 {
463 	struct iommu_entry *entry, *tmp;
464 
465 	IOMMU_LIST_LOCK();
466 	LIST_FOREACH_SAFE(entry, &iommu_list, next, tmp) {
467 		if (entry->iommu == iommu) {
468 			LIST_REMOVE(entry, next);
469 			free(entry, M_IOMMU);
470 		}
471 	}
472 	IOMMU_LIST_UNLOCK();
473 
474 	iommu_fini_busdma(iommu);
475 	sysctl_ctx_free(&iommu->sysctl_ctx);
476 
477 	mtx_destroy(&iommu->lock);
478 
479 	return (0);
480 }
481 
482 struct iommu_unit *
483 iommu_find(device_t dev, bool verbose)
484 {
485 	struct iommu_entry *entry;
486 	struct iommu_unit *iommu;
487 	int error;
488 
489 	IOMMU_LIST_LOCK();
490 	LIST_FOREACH(entry, &iommu_list, next) {
491 		iommu = entry->iommu;
492 		error = IOMMU_FIND(iommu->dev, dev);
493 		if (error == 0) {
494 			IOMMU_LIST_UNLOCK();
495 			return (entry->iommu);
496 		}
497 	}
498 	IOMMU_LIST_UNLOCK();
499 
500 	return (NULL);
501 }
502 
503 void
504 iommu_unit_pre_instantiate_ctx(struct iommu_unit *unit)
505 {
506 }
507 
508 void
509 iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free,
510     bool cansleep __unused)
511 {
512 
513 	dprintf("%s\n", __func__);
514 
515 	iommu_domain_free_entry(entry, free);
516 }
517 
518 static void
519 iommu_init(void)
520 {
521 
522 	sx_init(&iommu_sx, "IOMMU list");
523 }
524 
525 SYSINIT(iommu, SI_SUB_DRIVERS, SI_ORDER_FIRST, iommu_init, NULL);
526