1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2020 Ruslan Bukin <br@bsdpad.com>
5 *
6 * This software was developed by SRI International and the University of
7 * Cambridge Computer Laboratory (Department of Computer Science and
8 * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
9 * DARPA SSITH research programme.
10 *
11 * Portions of this work was supported by Innovate UK project 105694,
12 * "Digital Security by Design (DSbD) Technology Platform Prototype".
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36 #include "opt_platform.h"
37
38 #include <sys/param.h>
39 #include <sys/bus.h>
40 #include <sys/kernel.h>
41 #include <sys/malloc.h>
42 #include <sys/memdesc.h>
43 #include <sys/tree.h>
44 #include <sys/taskqueue.h>
45 #include <sys/lock.h>
46 #include <sys/mutex.h>
47 #include <sys/sx.h>
48 #include <sys/sysctl.h>
49 #include <vm/vm.h>
50
51 #include <dev/pci/pcireg.h>
52 #include <dev/pci/pcivar.h>
53 #include <machine/bus.h>
54 #include <dev/iommu/busdma_iommu.h>
55 #include <machine/vmparam.h>
56
57 #ifdef FDT
58 #include <dev/fdt/fdt_common.h>
59 #include <dev/ofw/ofw_bus.h>
60 #include <dev/ofw/ofw_bus_subr.h>
61 #endif
62
63 #include "iommu.h"
64 #include "iommu_if.h"
65
66 static MALLOC_DEFINE(M_IOMMU, "IOMMU", "IOMMU framework");
67
68 #define IOMMU_LIST_LOCK() sx_xlock(&iommu_sx)
69 #define IOMMU_LIST_UNLOCK() sx_xunlock(&iommu_sx)
70 #define IOMMU_LIST_ASSERT_LOCKED() sx_assert(&iommu_sx, SA_XLOCKED)
71
72 #define dprintf(fmt, ...)
73
74 static struct sx iommu_sx;
75
76 struct iommu_entry {
77 struct iommu_unit *iommu;
78 LIST_ENTRY(iommu_entry) next;
79 };
80 static LIST_HEAD(, iommu_entry) iommu_list = LIST_HEAD_INITIALIZER(iommu_list);
81
82 static int
iommu_domain_unmap_buf(struct iommu_domain * iodom,struct iommu_map_entry * entry,int flags)83 iommu_domain_unmap_buf(struct iommu_domain *iodom,
84 struct iommu_map_entry *entry, int flags)
85 {
86 struct iommu_unit *iommu;
87 int error;
88
89 iommu = iodom->iommu;
90 error = IOMMU_UNMAP(iommu->dev, iodom, entry->start, entry->end -
91 entry->start);
92 return (error);
93 }
94
95 static int
iommu_domain_map_buf(struct iommu_domain * iodom,struct iommu_map_entry * entry,vm_page_t * ma,uint64_t eflags,int flags)96 iommu_domain_map_buf(struct iommu_domain *iodom, struct iommu_map_entry *entry,
97 vm_page_t *ma, uint64_t eflags, int flags)
98 {
99 struct iommu_unit *iommu;
100 vm_prot_t prot;
101 vm_offset_t va;
102 int error;
103
104 dprintf("%s: base %lx, size %lx\n", __func__, base, size);
105
106 prot = 0;
107 if (eflags & IOMMU_MAP_ENTRY_READ)
108 prot |= VM_PROT_READ;
109 if (eflags & IOMMU_MAP_ENTRY_WRITE)
110 prot |= VM_PROT_WRITE;
111
112 va = entry->start;
113 iommu = iodom->iommu;
114 error = IOMMU_MAP(iommu->dev, iodom, va, ma, entry->end -
115 entry->start, prot);
116 return (error);
117 }
118
119 static const struct iommu_domain_map_ops domain_map_ops = {
120 .map = iommu_domain_map_buf,
121 .unmap = iommu_domain_unmap_buf,
122 };
123
124 static struct iommu_domain *
iommu_domain_alloc(struct iommu_unit * iommu)125 iommu_domain_alloc(struct iommu_unit *iommu)
126 {
127 struct iommu_domain *iodom;
128
129 iodom = IOMMU_DOMAIN_ALLOC(iommu->dev, iommu);
130 if (iodom == NULL)
131 return (NULL);
132
133 KASSERT(iodom->end != 0, ("domain end is not set"));
134
135 iommu_domain_init(iommu, iodom, &domain_map_ops);
136 iodom->iommu = iommu;
137 iommu_gas_init_domain(iodom);
138
139 return (iodom);
140 }
141
142 static int
iommu_domain_free(struct iommu_domain * iodom)143 iommu_domain_free(struct iommu_domain *iodom)
144 {
145 struct iommu_unit *iommu;
146
147 iommu = iodom->iommu;
148
149 IOMMU_LOCK(iommu);
150
151 if ((iodom->flags & IOMMU_DOMAIN_GAS_INITED) != 0) {
152 IOMMU_DOMAIN_LOCK(iodom);
153 iommu_gas_fini_domain(iodom);
154 IOMMU_DOMAIN_UNLOCK(iodom);
155 }
156
157 iommu_domain_fini(iodom);
158
159 IOMMU_DOMAIN_FREE(iommu->dev, iodom);
160 IOMMU_UNLOCK(iommu);
161
162 return (0);
163 }
164
165 static void
iommu_tag_init(struct iommu_domain * iodom,struct bus_dma_tag_iommu * t)166 iommu_tag_init(struct iommu_domain *iodom, struct bus_dma_tag_iommu *t)
167 {
168 bus_addr_t maxaddr;
169
170 maxaddr = MIN(iodom->end, BUS_SPACE_MAXADDR);
171
172 t->common.impl = &bus_dma_iommu_impl;
173 t->common.alignment = 1;
174 t->common.boundary = 0;
175 t->common.lowaddr = maxaddr;
176 t->common.highaddr = maxaddr;
177 t->common.maxsize = maxaddr;
178 t->common.nsegments = BUS_SPACE_UNRESTRICTED;
179 t->common.maxsegsz = maxaddr;
180 }
181
182 static struct iommu_ctx *
iommu_ctx_alloc(device_t requester,struct iommu_domain * iodom,bool disabled)183 iommu_ctx_alloc(device_t requester, struct iommu_domain *iodom, bool disabled)
184 {
185 struct iommu_unit *iommu;
186 struct iommu_ctx *ioctx;
187
188 iommu = iodom->iommu;
189
190 ioctx = IOMMU_CTX_ALLOC(iommu->dev, iodom, requester, disabled);
191 if (ioctx == NULL)
192 return (NULL);
193
194 ioctx->domain = iodom;
195
196 return (ioctx);
197 }
198
199 static int
iommu_ctx_init(device_t requester,struct iommu_ctx * ioctx)200 iommu_ctx_init(device_t requester, struct iommu_ctx *ioctx)
201 {
202 struct bus_dma_tag_iommu *tag;
203 struct iommu_domain *iodom;
204 struct iommu_unit *iommu;
205 int error;
206
207 iodom = ioctx->domain;
208 iommu = iodom->iommu;
209
210 error = IOMMU_CTX_INIT(iommu->dev, ioctx);
211 if (error)
212 return (error);
213
214 tag = ioctx->tag = malloc(sizeof(struct bus_dma_tag_iommu),
215 M_IOMMU, M_WAITOK | M_ZERO);
216 tag->owner = requester;
217 tag->ctx = ioctx;
218 tag->ctx->domain = iodom;
219
220 iommu_tag_init(iodom, tag);
221
222 return (error);
223 }
224
225 static struct iommu_unit *
iommu_lookup(device_t dev)226 iommu_lookup(device_t dev)
227 {
228 struct iommu_entry *entry;
229 struct iommu_unit *iommu;
230
231 IOMMU_LIST_LOCK();
232 LIST_FOREACH(entry, &iommu_list, next) {
233 iommu = entry->iommu;
234 if (iommu->dev == dev) {
235 IOMMU_LIST_UNLOCK();
236 return (iommu);
237 }
238 }
239 IOMMU_LIST_UNLOCK();
240
241 return (NULL);
242 }
243
244 #ifdef FDT
245 struct iommu_ctx *
iommu_get_ctx_ofw(device_t dev,int channel)246 iommu_get_ctx_ofw(device_t dev, int channel)
247 {
248 struct iommu_domain *iodom;
249 struct iommu_unit *iommu;
250 struct iommu_ctx *ioctx;
251 phandle_t node, parent;
252 device_t iommu_dev;
253 pcell_t *cells;
254 int niommus;
255 int ncells;
256 int error;
257
258 node = ofw_bus_get_node(dev);
259 if (node <= 0) {
260 device_printf(dev,
261 "%s called on not ofw based device.\n", __func__);
262 return (NULL);
263 }
264
265 error = ofw_bus_parse_xref_list_get_length(node,
266 "iommus", "#iommu-cells", &niommus);
267 if (error) {
268 device_printf(dev, "%s can't get iommu list.\n", __func__);
269 return (NULL);
270 }
271
272 if (niommus == 0) {
273 device_printf(dev, "%s iommu list is empty.\n", __func__);
274 return (NULL);
275 }
276
277 error = ofw_bus_parse_xref_list_alloc(node, "iommus", "#iommu-cells",
278 channel, &parent, &ncells, &cells);
279 if (error != 0) {
280 device_printf(dev, "%s can't get iommu device xref.\n",
281 __func__);
282 return (NULL);
283 }
284
285 iommu_dev = OF_device_from_xref(parent);
286 if (iommu_dev == NULL) {
287 device_printf(dev, "%s can't get iommu device.\n", __func__);
288 return (NULL);
289 }
290
291 iommu = iommu_lookup(iommu_dev);
292 if (iommu == NULL) {
293 device_printf(dev, "%s can't lookup iommu.\n", __func__);
294 return (NULL);
295 }
296
297 /*
298 * In our current configuration we have a domain per each ctx,
299 * so allocate a domain first.
300 */
301 iodom = iommu_domain_alloc(iommu);
302 if (iodom == NULL) {
303 device_printf(dev, "%s can't allocate domain.\n", __func__);
304 return (NULL);
305 }
306
307 ioctx = iommu_ctx_alloc(dev, iodom, false);
308 if (ioctx == NULL) {
309 iommu_domain_free(iodom);
310 return (NULL);
311 }
312
313 ioctx->domain = iodom;
314
315 error = IOMMU_OFW_MD_DATA(iommu->dev, ioctx, cells, ncells);
316 if (error) {
317 device_printf(dev, "%s can't set MD data\n", __func__);
318 return (NULL);
319 }
320
321 error = iommu_ctx_init(dev, ioctx);
322 if (error) {
323 IOMMU_CTX_FREE(iommu->dev, ioctx);
324 iommu_domain_free(iodom);
325 return (NULL);
326 }
327
328 return (ioctx);
329 }
330 #endif
331
332 struct iommu_ctx *
iommu_get_ctx(struct iommu_unit * iommu,device_t requester,uint16_t rid,bool disabled,bool rmrr)333 iommu_get_ctx(struct iommu_unit *iommu, device_t requester,
334 uint16_t rid, bool disabled, bool rmrr)
335 {
336 struct iommu_domain *iodom;
337 struct iommu_ctx *ioctx;
338 int error;
339
340 IOMMU_LOCK(iommu);
341 ioctx = IOMMU_CTX_LOOKUP(iommu->dev, requester);
342 if (ioctx) {
343 IOMMU_UNLOCK(iommu);
344 return (ioctx);
345 }
346 IOMMU_UNLOCK(iommu);
347
348 /*
349 * In our current configuration we have a domain per each ctx.
350 * So allocate a domain first.
351 */
352 iodom = iommu_domain_alloc(iommu);
353 if (iodom == NULL)
354 return (NULL);
355
356 ioctx = iommu_ctx_alloc(requester, iodom, disabled);
357 if (ioctx == NULL) {
358 iommu_domain_free(iodom);
359 return (NULL);
360 }
361
362 error = iommu_ctx_init(requester, ioctx);
363 if (error) {
364 IOMMU_CTX_FREE(iommu->dev, ioctx);
365 iommu_domain_free(iodom);
366 return (NULL);
367 }
368
369 return (ioctx);
370 }
371
372 void
iommu_free_ctx_locked(struct iommu_unit * iommu,struct iommu_ctx * ioctx)373 iommu_free_ctx_locked(struct iommu_unit *iommu, struct iommu_ctx *ioctx)
374 {
375 struct bus_dma_tag_iommu *tag;
376 int error;
377
378 IOMMU_ASSERT_LOCKED(iommu);
379
380 tag = ioctx->tag;
381
382 IOMMU_CTX_FREE(iommu->dev, ioctx);
383 IOMMU_UNLOCK(iommu);
384
385 free(tag, M_IOMMU);
386
387 /* Since we have a domain per each ctx, remove the domain too. */
388 error = iommu_domain_free(ioctx->domain);
389 if (error)
390 device_printf(iommu->dev, "Could not free a domain\n");
391 }
392
393 static void
iommu_domain_free_entry(struct iommu_map_entry * entry,bool free)394 iommu_domain_free_entry(struct iommu_map_entry *entry, bool free)
395 {
396 iommu_gas_free_space(entry);
397
398 if (free)
399 iommu_gas_free_entry(entry);
400 else
401 entry->flags = 0;
402 }
403
404 void
iommu_domain_unload(struct iommu_domain * iodom,struct iommu_map_entries_tailq * entries,bool cansleep)405 iommu_domain_unload(struct iommu_domain *iodom,
406 struct iommu_map_entries_tailq *entries, bool cansleep)
407 {
408 struct iommu_map_entry *entry, *entry1;
409 int error __diagused;
410
411 TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) {
412 KASSERT((entry->flags & IOMMU_MAP_ENTRY_MAP) != 0,
413 ("not mapped entry %p %p", iodom, entry));
414 error = iodom->ops->unmap(iodom, entry,
415 cansleep ? IOMMU_PGF_WAITOK : 0);
416 KASSERT(error == 0, ("unmap %p error %d", iodom, error));
417 TAILQ_REMOVE(entries, entry, dmamap_link);
418 iommu_domain_free_entry(entry, true);
419 }
420
421 if (TAILQ_EMPTY(entries))
422 return;
423
424 panic("entries map is not empty");
425 }
426
427 int
iommu_register(struct iommu_unit * iommu)428 iommu_register(struct iommu_unit *iommu)
429 {
430 struct iommu_entry *entry;
431
432 mtx_init(&iommu->lock, "IOMMU", NULL, MTX_DEF);
433
434 entry = malloc(sizeof(struct iommu_entry), M_IOMMU, M_WAITOK | M_ZERO);
435 entry->iommu = iommu;
436
437 IOMMU_LIST_LOCK();
438 LIST_INSERT_HEAD(&iommu_list, entry, next);
439 IOMMU_LIST_UNLOCK();
440
441 sysctl_ctx_init(&iommu->sysctl_ctx);
442 iommu_init_busdma(iommu);
443
444 return (0);
445 }
446
447 int
iommu_unregister(struct iommu_unit * iommu)448 iommu_unregister(struct iommu_unit *iommu)
449 {
450 struct iommu_entry *entry, *tmp;
451
452 IOMMU_LIST_LOCK();
453 LIST_FOREACH_SAFE(entry, &iommu_list, next, tmp) {
454 if (entry->iommu == iommu) {
455 LIST_REMOVE(entry, next);
456 free(entry, M_IOMMU);
457 }
458 }
459 IOMMU_LIST_UNLOCK();
460
461 iommu_fini_busdma(iommu);
462 sysctl_ctx_free(&iommu->sysctl_ctx);
463
464 mtx_destroy(&iommu->lock);
465
466 return (0);
467 }
468
469 struct iommu_unit *
iommu_find(device_t dev,bool verbose)470 iommu_find(device_t dev, bool verbose)
471 {
472 struct iommu_entry *entry;
473 struct iommu_unit *iommu;
474 int error;
475
476 IOMMU_LIST_LOCK();
477 LIST_FOREACH(entry, &iommu_list, next) {
478 iommu = entry->iommu;
479 error = IOMMU_FIND(iommu->dev, dev);
480 if (error == 0) {
481 IOMMU_LIST_UNLOCK();
482 return (entry->iommu);
483 }
484 }
485 IOMMU_LIST_UNLOCK();
486
487 return (NULL);
488 }
489
490 void
iommu_unit_pre_instantiate_ctx(struct iommu_unit * unit)491 iommu_unit_pre_instantiate_ctx(struct iommu_unit *unit)
492 {
493 }
494
495 void
iommu_domain_unload_entry(struct iommu_map_entry * entry,bool free,bool cansleep __unused)496 iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free,
497 bool cansleep __unused)
498 {
499
500 dprintf("%s\n", __func__);
501
502 iommu_domain_free_entry(entry, free);
503 }
504
505 static void
iommu_init(void)506 iommu_init(void)
507 {
508
509 sx_init(&iommu_sx, "IOMMU list");
510 }
511
512 SYSINIT(iommu, SI_SUB_DRIVERS, SI_ORDER_FIRST, iommu_init, NULL);
513