1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2024 The FreeBSD Foundation
5 *
6 * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
7 * under sponsorship from the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/malloc.h>
34 #include <sys/bus.h>
35 #include <sys/interrupt.h>
36 #include <sys/kernel.h>
37 #include <sys/ktr.h>
38 #include <sys/limits.h>
39 #include <sys/lock.h>
40 #include <sys/memdesc.h>
41 #include <sys/mutex.h>
42 #include <sys/proc.h>
43 #include <sys/rwlock.h>
44 #include <sys/rman.h>
45 #include <sys/sysctl.h>
46 #include <sys/taskqueue.h>
47 #include <sys/tree.h>
48 #include <sys/uio.h>
49 #include <sys/vmem.h>
50 #include <vm/vm.h>
51 #include <vm/vm_extern.h>
52 #include <vm/vm_kern.h>
53 #include <vm/vm_object.h>
54 #include <vm/vm_page.h>
55 #include <vm/vm_pager.h>
56 #include <vm/vm_map.h>
57 #include <contrib/dev/acpica/include/acpi.h>
58 #include <contrib/dev/acpica/include/accommon.h>
59 #include <dev/pci/pcireg.h>
60 #include <dev/pci/pcivar.h>
61 #include <machine/atomic.h>
62 #include <machine/bus.h>
63 #include <machine/md_var.h>
64 #include <machine/specialreg.h>
65 #include <x86/include/busdma_impl.h>
66 #include <dev/iommu/busdma_iommu.h>
67 #include <x86/iommu/amd_reg.h>
68 #include <x86/iommu/x86_iommu.h>
69 #include <x86/iommu/amd_iommu.h>
70
71 static MALLOC_DEFINE(M_AMDIOMMU_CTX, "amdiommu_ctx", "AMD IOMMU Context");
72 static MALLOC_DEFINE(M_AMDIOMMU_DOMAIN, "amdiommu_dom", "AMD IOMMU Domain");
73
74 static void amdiommu_unref_domain_locked(struct amdiommu_unit *unit,
75 struct amdiommu_domain *domain);
76
77 static struct amdiommu_dte *
amdiommu_get_dtep(struct amdiommu_ctx * ctx)78 amdiommu_get_dtep(struct amdiommu_ctx *ctx)
79 {
80 return (&CTX2AMD(ctx)->dev_tbl[ctx->context.rid]);
81 }
82
83 void
amdiommu_domain_unload_entry(struct iommu_map_entry * entry,bool free,bool cansleep)84 amdiommu_domain_unload_entry(struct iommu_map_entry *entry, bool free,
85 bool cansleep)
86 {
87 struct amdiommu_domain *domain;
88 struct amdiommu_unit *unit;
89
90 domain = IODOM2DOM(entry->domain);
91 unit = DOM2AMD(domain);
92
93 /*
94 * If "free" is false, then the IOTLB invalidation must be performed
95 * synchronously. Otherwise, the caller might free the entry before
96 * dmar_qi_task() is finished processing it.
97 */
98 if (free) {
99 AMDIOMMU_LOCK(unit);
100 iommu_qi_invalidate_locked(&domain->iodom, entry, true);
101 AMDIOMMU_UNLOCK(unit);
102 } else {
103 iommu_qi_invalidate_sync(&domain->iodom, entry->start,
104 entry->end - entry->start, cansleep);
105 iommu_domain_free_entry(entry, false);
106 }
107 }
108
109 static bool
amdiommu_domain_unload_emit_wait(struct amdiommu_domain * domain,struct iommu_map_entry * entry)110 amdiommu_domain_unload_emit_wait(struct amdiommu_domain *domain,
111 struct iommu_map_entry *entry)
112 {
113 return (true); /* XXXKIB */
114 }
115
116 void
amdiommu_domain_unload(struct iommu_domain * iodom,struct iommu_map_entries_tailq * entries,bool cansleep)117 amdiommu_domain_unload(struct iommu_domain *iodom,
118 struct iommu_map_entries_tailq *entries, bool cansleep)
119 {
120 struct amdiommu_domain *domain;
121 struct amdiommu_unit *unit;
122 struct iommu_map_entry *entry, *entry1;
123 int error __diagused;
124
125 domain = IODOM2DOM(iodom);
126 unit = DOM2AMD(domain);
127
128 TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) {
129 KASSERT((entry->flags & IOMMU_MAP_ENTRY_MAP) != 0,
130 ("not mapped entry %p %p", domain, entry));
131 error = iodom->ops->unmap(iodom, entry,
132 cansleep ? IOMMU_PGF_WAITOK : 0);
133 KASSERT(error == 0, ("unmap %p error %d", domain, error));
134 }
135 if (TAILQ_EMPTY(entries))
136 return;
137
138 AMDIOMMU_LOCK(unit);
139 while ((entry = TAILQ_FIRST(entries)) != NULL) {
140 TAILQ_REMOVE(entries, entry, dmamap_link);
141 iommu_qi_invalidate_locked(&domain->iodom, entry,
142 amdiommu_domain_unload_emit_wait(domain, entry));
143 }
144 AMDIOMMU_UNLOCK(unit);
145 }
146
147 static void
amdiommu_domain_destroy(struct amdiommu_domain * domain)148 amdiommu_domain_destroy(struct amdiommu_domain *domain)
149 {
150 struct iommu_domain *iodom;
151 struct amdiommu_unit *unit;
152
153 iodom = DOM2IODOM(domain);
154
155 KASSERT(TAILQ_EMPTY(&domain->iodom.unload_entries),
156 ("unfinished unloads %p", domain));
157 KASSERT(LIST_EMPTY(&iodom->contexts),
158 ("destroying dom %p with contexts", domain));
159 KASSERT(domain->ctx_cnt == 0,
160 ("destroying dom %p with ctx_cnt %d", domain, domain->ctx_cnt));
161 KASSERT(domain->refs == 0,
162 ("destroying dom %p with refs %d", domain, domain->refs));
163
164 if ((domain->iodom.flags & IOMMU_DOMAIN_GAS_INITED) != 0) {
165 AMDIOMMU_DOMAIN_LOCK(domain);
166 iommu_gas_fini_domain(iodom);
167 AMDIOMMU_DOMAIN_UNLOCK(domain);
168 }
169 if ((domain->iodom.flags & IOMMU_DOMAIN_PGTBL_INITED) != 0) {
170 if (domain->pgtbl_obj != NULL)
171 AMDIOMMU_DOMAIN_PGLOCK(domain);
172 amdiommu_domain_free_pgtbl(domain);
173 }
174 iommu_domain_fini(iodom);
175 unit = DOM2AMD(domain);
176 free_unr(unit->domids, domain->domain);
177 free(domain, M_AMDIOMMU_DOMAIN);
178 }
179
180 static iommu_gaddr_t
lvl2addr(int lvl)181 lvl2addr(int lvl)
182 {
183 int x;
184
185 x = IOMMU_PAGE_SHIFT + IOMMU_NPTEPGSHIFT * lvl;
186 /* Level 6 has only 8 bits for page table index */
187 if (x >= NBBY * sizeof(uint64_t))
188 return (-1ull);
189 return (1ull < (1ull << x));
190 }
191
192 static void
amdiommu_domain_init_pglvl(struct amdiommu_unit * unit,struct amdiommu_domain * domain)193 amdiommu_domain_init_pglvl(struct amdiommu_unit *unit,
194 struct amdiommu_domain *domain)
195 {
196 iommu_gaddr_t end;
197 int hats, i;
198 uint64_t efr_hats;
199
200 end = DOM2IODOM(domain)->end;
201 for (i = AMDIOMMU_PGTBL_MAXLVL; i > 1; i--) {
202 if (lvl2addr(i) >= end && lvl2addr(i - 1) < end)
203 break;
204 }
205 domain->pglvl = i;
206
207 efr_hats = unit->efr & AMDIOMMU_EFR_HATS_MASK;
208 switch (efr_hats) {
209 case AMDIOMMU_EFR_HATS_6LVL:
210 hats = 6;
211 break;
212 case AMDIOMMU_EFR_HATS_5LVL:
213 hats = 5;
214 break;
215 case AMDIOMMU_EFR_HATS_4LVL:
216 hats = 4;
217 break;
218 default:
219 printf("amdiommu%d: HATS %#jx (reserved) ignoring\n",
220 unit->iommu.unit, (uintmax_t)efr_hats);
221 return;
222 }
223 if (hats >= domain->pglvl)
224 return;
225
226 printf("amdiommu%d: domain %d HATS %d pglvl %d reducing to HATS\n",
227 unit->iommu.unit, domain->domain, hats, domain->pglvl);
228 domain->pglvl = hats;
229 domain->iodom.end = lvl2addr(hats);
230 }
231
232 static struct amdiommu_domain *
amdiommu_domain_alloc(struct amdiommu_unit * unit,bool id_mapped)233 amdiommu_domain_alloc(struct amdiommu_unit *unit, bool id_mapped)
234 {
235 struct amdiommu_domain *domain;
236 struct iommu_domain *iodom;
237 int error, id;
238
239 id = alloc_unr(unit->domids);
240 if (id == -1)
241 return (NULL);
242 domain = malloc(sizeof(*domain), M_AMDIOMMU_DOMAIN, M_WAITOK | M_ZERO);
243 iodom = DOM2IODOM(domain);
244 domain->domain = id;
245 LIST_INIT(&iodom->contexts);
246 iommu_domain_init(AMD2IOMMU(unit), iodom, &amdiommu_domain_map_ops);
247
248 domain->unit = unit;
249
250 domain->iodom.end = id_mapped ? ptoa(Maxmem) : BUS_SPACE_MAXADDR;
251 amdiommu_domain_init_pglvl(unit, domain);
252 iommu_gas_init_domain(DOM2IODOM(domain));
253
254 if (id_mapped) {
255 domain->iodom.flags |= IOMMU_DOMAIN_IDMAP;
256 } else {
257 error = amdiommu_domain_alloc_pgtbl(domain);
258 if (error != 0)
259 goto fail;
260 /* Disable local apic region access */
261 error = iommu_gas_reserve_region(iodom, 0xfee00000,
262 0xfeefffff + 1, &iodom->msi_entry);
263 if (error != 0)
264 goto fail;
265 }
266
267 return (domain);
268
269 fail:
270 amdiommu_domain_destroy(domain);
271 return (NULL);
272 }
273
274 static struct amdiommu_ctx *
amdiommu_ctx_alloc(struct amdiommu_domain * domain,uint16_t rid)275 amdiommu_ctx_alloc(struct amdiommu_domain *domain, uint16_t rid)
276 {
277 struct amdiommu_ctx *ctx;
278
279 ctx = malloc(sizeof(*ctx), M_AMDIOMMU_CTX, M_WAITOK | M_ZERO);
280 ctx->context.domain = DOM2IODOM(domain);
281 ctx->context.tag = malloc(sizeof(struct bus_dma_tag_iommu),
282 M_AMDIOMMU_CTX, M_WAITOK | M_ZERO);
283 ctx->context.rid = rid;
284 ctx->context.refs = 1;
285 return (ctx);
286 }
287
288 static void
amdiommu_ctx_link(struct amdiommu_ctx * ctx)289 amdiommu_ctx_link(struct amdiommu_ctx *ctx)
290 {
291 struct amdiommu_domain *domain;
292
293 domain = CTX2DOM(ctx);
294 IOMMU_ASSERT_LOCKED(domain->iodom.iommu);
295 KASSERT(domain->refs >= domain->ctx_cnt,
296 ("dom %p ref underflow %d %d", domain, domain->refs,
297 domain->ctx_cnt));
298 domain->refs++;
299 domain->ctx_cnt++;
300 LIST_INSERT_HEAD(&domain->iodom.contexts, &ctx->context, link);
301 }
302
303 static void
amdiommu_ctx_unlink(struct amdiommu_ctx * ctx)304 amdiommu_ctx_unlink(struct amdiommu_ctx *ctx)
305 {
306 struct amdiommu_domain *domain;
307
308 domain = CTX2DOM(ctx);
309 IOMMU_ASSERT_LOCKED(domain->iodom.iommu);
310 KASSERT(domain->refs > 0,
311 ("domain %p ctx dtr refs %d", domain, domain->refs));
312 KASSERT(domain->ctx_cnt >= domain->refs,
313 ("domain %p ctx dtr refs %d ctx_cnt %d", domain,
314 domain->refs, domain->ctx_cnt));
315 domain->refs--;
316 domain->ctx_cnt--;
317 LIST_REMOVE(&ctx->context, link);
318 }
319
320 struct amdiommu_ctx *
amdiommu_find_ctx_locked(struct amdiommu_unit * unit,uint16_t rid)321 amdiommu_find_ctx_locked(struct amdiommu_unit *unit, uint16_t rid)
322 {
323 struct amdiommu_domain *domain;
324 struct iommu_ctx *ctx;
325
326 AMDIOMMU_ASSERT_LOCKED(unit);
327
328 LIST_FOREACH(domain, &unit->domains, link) {
329 LIST_FOREACH(ctx, &domain->iodom.contexts, link) {
330 if (ctx->rid == rid)
331 return (IOCTX2CTX(ctx));
332 }
333 }
334 return (NULL);
335 }
336
337 struct amdiommu_domain *
amdiommu_find_domain(struct amdiommu_unit * unit,uint16_t rid)338 amdiommu_find_domain(struct amdiommu_unit *unit, uint16_t rid)
339 {
340 struct amdiommu_domain *domain;
341 struct iommu_ctx *ctx;
342
343 AMDIOMMU_LOCK(unit);
344 LIST_FOREACH(domain, &unit->domains, link) {
345 LIST_FOREACH(ctx, &domain->iodom.contexts, link) {
346 if (ctx->rid == rid)
347 break;
348 }
349 }
350 AMDIOMMU_UNLOCK(unit);
351 return (domain);
352 }
353
354 static void
amdiommu_free_ctx_locked(struct amdiommu_unit * unit,struct amdiommu_ctx * ctx)355 amdiommu_free_ctx_locked(struct amdiommu_unit *unit, struct amdiommu_ctx *ctx)
356 {
357 struct amdiommu_dte *dtep;
358 struct amdiommu_domain *domain;
359
360 AMDIOMMU_ASSERT_LOCKED(unit);
361 KASSERT(ctx->context.refs >= 1,
362 ("amdiommu %p ctx %p refs %u", unit, ctx, ctx->context.refs));
363
364 /*
365 * If our reference is not last, only the dereference should
366 * be performed.
367 */
368 if (ctx->context.refs > 1) {
369 ctx->context.refs--;
370 AMDIOMMU_UNLOCK(unit);
371 return;
372 }
373
374 KASSERT((ctx->context.flags & IOMMU_CTX_DISABLED) == 0,
375 ("lost ref on disabled ctx %p", ctx));
376
377 /*
378 * Otherwise, the device table entry must be cleared before
379 * the page table is destroyed.
380 */
381 dtep = amdiommu_get_dtep(ctx);
382 dtep->v = 0;
383 atomic_thread_fence_rel();
384 memset(dtep, 0, sizeof(*dtep));
385
386 domain = CTX2DOM(ctx);
387 amdiommu_qi_invalidate_ctx_locked_nowait(ctx);
388 amdiommu_qi_invalidate_ir_locked_nowait(unit, ctx->context.rid);
389 amdiommu_qi_invalidate_all_pages_locked_nowait(domain);
390 amdiommu_qi_invalidate_wait_sync(AMD2IOMMU(CTX2AMD(ctx)));
391
392 if (unit->irte_enabled)
393 amdiommu_ctx_fini_irte(ctx);
394
395 amdiommu_ctx_unlink(ctx);
396 free(ctx->context.tag, M_AMDIOMMU_CTX);
397 free(ctx, M_AMDIOMMU_CTX);
398 amdiommu_unref_domain_locked(unit, domain);
399 }
400
401 static void
amdiommu_unref_domain_locked(struct amdiommu_unit * unit,struct amdiommu_domain * domain)402 amdiommu_unref_domain_locked(struct amdiommu_unit *unit,
403 struct amdiommu_domain *domain)
404 {
405 AMDIOMMU_ASSERT_LOCKED(unit);
406 KASSERT(domain->refs >= 1,
407 ("amdiommu%d domain %p refs %u", unit->iommu.unit, domain,
408 domain->refs));
409 KASSERT(domain->refs > domain->ctx_cnt,
410 ("amdiommu%d domain %p refs %d ctx_cnt %d", unit->iommu.unit,
411 domain, domain->refs, domain->ctx_cnt));
412
413 if (domain->refs > 1) {
414 domain->refs--;
415 AMDIOMMU_UNLOCK(unit);
416 return;
417 }
418
419 LIST_REMOVE(domain, link);
420 AMDIOMMU_UNLOCK(unit);
421
422 taskqueue_drain(unit->iommu.delayed_taskqueue,
423 &domain->iodom.unload_task);
424 amdiommu_domain_destroy(domain);
425 }
426
427 static void
dte_entry_init_one(struct amdiommu_dte * dtep,struct amdiommu_ctx * ctx,vm_page_t pgtblr,uint8_t dte,uint32_t edte)428 dte_entry_init_one(struct amdiommu_dte *dtep, struct amdiommu_ctx *ctx,
429 vm_page_t pgtblr, uint8_t dte, uint32_t edte)
430 {
431 struct amdiommu_domain *domain;
432 struct amdiommu_unit *unit;
433
434 domain = CTX2DOM(ctx);
435 unit = DOM2AMD(domain);
436
437 dtep->tv = 1;
438 /* dtep->had not used for now */
439 dtep->ir = 1;
440 dtep->iw = 1;
441 dtep->domainid = domain->domain;
442 dtep->pioctl = AMDIOMMU_DTE_PIOCTL_DIS;
443
444 /* fill device interrupt passing hints from IVHD. */
445 dtep->initpass = (dte & ACPI_IVHD_INIT_PASS) != 0;
446 dtep->eintpass = (dte & ACPI_IVHD_EINT_PASS) != 0;
447 dtep->nmipass = (dte & ACPI_IVHD_NMI_PASS) != 0;
448 dtep->sysmgt = (dte & ACPI_IVHD_SYSTEM_MGMT) >> 4;
449 dtep->lint0pass = (dte & ACPI_IVHD_LINT0_PASS) != 0;
450 dtep->lint1pass = (dte & ACPI_IVHD_LINT1_PASS) != 0;
451
452 if (unit->irte_enabled) {
453 dtep->iv = 1;
454 dtep->i = 0;
455 dtep->inttablen = ilog2(unit->irte_nentries);
456 dtep->intrroot = pmap_kextract(unit->irte_x2apic ?
457 (vm_offset_t)ctx->irtx2 :
458 (vm_offset_t)ctx->irtb) >> 6;
459
460 dtep->intctl = AMDIOMMU_DTE_INTCTL_MAP;
461 }
462
463 if ((DOM2IODOM(domain)->flags & IOMMU_DOMAIN_IDMAP) != 0) {
464 dtep->pgmode = AMDIOMMU_DTE_PGMODE_1T1;
465 } else {
466 MPASS(domain->pglvl > 0 && domain->pglvl <=
467 AMDIOMMU_PGTBL_MAXLVL);
468 dtep->pgmode = domain->pglvl;
469 dtep->ptroot = VM_PAGE_TO_PHYS(pgtblr) >> 12;
470 }
471
472 atomic_thread_fence_rel();
473 dtep->v = 1;
474 }
475
476 static void
dte_entry_init(struct amdiommu_ctx * ctx,bool move,uint8_t dte,uint32_t edte)477 dte_entry_init(struct amdiommu_ctx *ctx, bool move, uint8_t dte, uint32_t edte)
478 {
479 struct amdiommu_dte *dtep;
480 struct amdiommu_unit *unit;
481 struct amdiommu_domain *domain;
482 int i;
483
484 domain = CTX2DOM(ctx);
485 unit = DOM2AMD(domain);
486
487 dtep = amdiommu_get_dtep(ctx);
488 KASSERT(dtep->v == 0,
489 ("amdiommu%d initializing valid dte @%p %#jx",
490 CTX2AMD(ctx)->iommu.unit, dtep, (uintmax_t)(*(uint64_t *)dtep)));
491
492 if (iommu_is_buswide_ctx(AMD2IOMMU(unit),
493 PCI_RID2BUS(ctx->context.rid))) {
494 MPASS(!move);
495 for (i = 0; i <= PCI_BUSMAX; i++) {
496 dte_entry_init_one(&dtep[i], ctx, domain->pgtblr,
497 dte, edte);
498 }
499 } else {
500 dte_entry_init_one(dtep, ctx, domain->pgtblr, dte, edte);
501 }
502 }
503
504 struct amdiommu_ctx *
amdiommu_get_ctx_for_dev(struct amdiommu_unit * unit,device_t dev,uint16_t rid,int dev_domain,bool id_mapped,bool rmrr_init,uint8_t dte,uint32_t edte)505 amdiommu_get_ctx_for_dev(struct amdiommu_unit *unit, device_t dev, uint16_t rid,
506 int dev_domain, bool id_mapped, bool rmrr_init, uint8_t dte, uint32_t edte)
507 {
508 struct amdiommu_domain *domain, *domain1;
509 struct amdiommu_ctx *ctx, *ctx1;
510 int bus, slot, func;
511
512 if (dev != NULL) {
513 bus = pci_get_bus(dev);
514 slot = pci_get_slot(dev);
515 func = pci_get_function(dev);
516 } else {
517 bus = PCI_RID2BUS(rid);
518 slot = PCI_RID2SLOT(rid);
519 func = PCI_RID2FUNC(rid);
520 }
521 AMDIOMMU_LOCK(unit);
522 KASSERT(!iommu_is_buswide_ctx(AMD2IOMMU(unit), bus) ||
523 (slot == 0 && func == 0),
524 ("iommu%d pci%d:%d:%d get_ctx for buswide", AMD2IOMMU(unit)->unit,
525 bus, slot, func));
526 ctx = amdiommu_find_ctx_locked(unit, rid);
527 if (ctx == NULL) {
528 /*
529 * Perform the allocations which require sleep or have
530 * higher chance to succeed if the sleep is allowed.
531 */
532 AMDIOMMU_UNLOCK(unit);
533 domain1 = amdiommu_domain_alloc(unit, id_mapped);
534 if (domain1 == NULL)
535 return (NULL);
536 if (!id_mapped) {
537 /*
538 * XXXKIB IVMD seems to be less significant
539 * and less used on AMD than RMRR on Intel.
540 * Not implemented for now.
541 */
542 }
543 ctx1 = amdiommu_ctx_alloc(domain1, rid);
544 amdiommu_ctx_init_irte(ctx1);
545 AMDIOMMU_LOCK(unit);
546
547 /*
548 * Recheck the contexts, other thread might have
549 * already allocated needed one.
550 */
551 ctx = amdiommu_find_ctx_locked(unit, rid);
552 if (ctx == NULL) {
553 domain = domain1;
554 ctx = ctx1;
555 amdiommu_ctx_link(ctx);
556 ctx->context.tag->owner = dev;
557 iommu_device_tag_init(CTX2IOCTX(ctx), dev);
558
559 LIST_INSERT_HEAD(&unit->domains, domain, link);
560 dte_entry_init(ctx, false, dte, edte);
561 amdiommu_qi_invalidate_ctx_locked(ctx);
562 if (dev != NULL) {
563 device_printf(dev,
564 "amdiommu%d pci%d:%d:%d:%d rid %x domain %d "
565 "%s-mapped\n",
566 AMD2IOMMU(unit)->unit, unit->unit_dom,
567 bus, slot, func, rid, domain->domain,
568 id_mapped ? "id" : "re");
569 }
570 } else {
571 amdiommu_domain_destroy(domain1);
572 /* Nothing needs to be done to destroy ctx1. */
573 free(ctx1, M_AMDIOMMU_CTX);
574 domain = CTX2DOM(ctx);
575 ctx->context.refs++; /* tag referenced us */
576 }
577 } else {
578 domain = CTX2DOM(ctx);
579 if (ctx->context.tag->owner == NULL)
580 ctx->context.tag->owner = dev;
581 ctx->context.refs++; /* tag referenced us */
582 }
583 AMDIOMMU_UNLOCK(unit);
584
585 return (ctx);
586 }
587
588 struct iommu_ctx *
amdiommu_get_ctx(struct iommu_unit * iommu,device_t dev,uint16_t rid,bool id_mapped,bool rmrr_init)589 amdiommu_get_ctx(struct iommu_unit *iommu, device_t dev, uint16_t rid,
590 bool id_mapped, bool rmrr_init)
591 {
592 struct amdiommu_unit *unit;
593 struct amdiommu_ctx *ret;
594 int error;
595 uint32_t edte;
596 uint16_t rid1;
597 uint8_t dte;
598
599 error = amdiommu_find_unit(dev, &unit, &rid1, &dte, &edte,
600 bootverbose);
601 if (error != 0)
602 return (NULL);
603 if (AMD2IOMMU(unit) != iommu) /* XXX complain loudly */
604 return (NULL);
605 ret = amdiommu_get_ctx_for_dev(unit, dev, rid1, pci_get_domain(dev),
606 id_mapped, rmrr_init, dte, edte);
607 return (CTX2IOCTX(ret));
608 }
609
610 void
amdiommu_free_ctx_locked_method(struct iommu_unit * iommu,struct iommu_ctx * context)611 amdiommu_free_ctx_locked_method(struct iommu_unit *iommu,
612 struct iommu_ctx *context)
613 {
614 struct amdiommu_unit *unit;
615 struct amdiommu_ctx *ctx;
616
617 unit = IOMMU2AMD(iommu);
618 ctx = IOCTX2CTX(context);
619 amdiommu_free_ctx_locked(unit, ctx);
620 }
621