Lines Matching +full:iommu +full:- +full:ctx

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
66 #include <dev/iommu/busdma_iommu.h>
67 #include <x86/iommu/amd_reg.h>
68 #include <x86/iommu/x86_iommu.h>
69 #include <x86/iommu/amd_iommu.h>
71 static MALLOC_DEFINE(M_AMDIOMMU_CTX, "amdiommu_ctx", "AMD IOMMU Context");
72 static MALLOC_DEFINE(M_AMDIOMMU_DOMAIN, "amdiommu_dom", "AMD IOMMU Domain");
78 amdiommu_get_dtep(struct amdiommu_ctx *ctx) in amdiommu_get_dtep() argument
80 return (&CTX2AMD(ctx)->dev_tbl[ctx->context.rid]); in amdiommu_get_dtep()
90 domain = IODOM2DOM(entry->domain); in amdiommu_domain_unload_entry()
100 iommu_qi_invalidate_locked(&domain->iodom, entry, true); in amdiommu_domain_unload_entry()
103 iommu_qi_invalidate_sync(&domain->iodom, entry->start, in amdiommu_domain_unload_entry()
104 entry->end - entry->start, cansleep); in amdiommu_domain_unload_entry()
129 KASSERT((entry->flags & IOMMU_MAP_ENTRY_MAP) != 0, in amdiommu_domain_unload()
131 error = iodom->ops->unmap(iodom, entry, in amdiommu_domain_unload()
141 iommu_qi_invalidate_locked(&domain->iodom, entry, in amdiommu_domain_unload()
155 KASSERT(TAILQ_EMPTY(&domain->iodom.unload_entries), in amdiommu_domain_destroy()
157 KASSERT(LIST_EMPTY(&iodom->contexts), in amdiommu_domain_destroy()
159 KASSERT(domain->ctx_cnt == 0, in amdiommu_domain_destroy()
160 ("destroying dom %p with ctx_cnt %d", domain, domain->ctx_cnt)); in amdiommu_domain_destroy()
161 KASSERT(domain->refs == 0, in amdiommu_domain_destroy()
162 ("destroying dom %p with refs %d", domain, domain->refs)); in amdiommu_domain_destroy()
164 if ((domain->iodom.flags & IOMMU_DOMAIN_GAS_INITED) != 0) { in amdiommu_domain_destroy()
169 if ((domain->iodom.flags & IOMMU_DOMAIN_PGTBL_INITED) != 0) { in amdiommu_domain_destroy()
170 if (domain->pgtbl_obj != NULL) in amdiommu_domain_destroy()
176 free_unr(unit->domids, domain->domain); in amdiommu_domain_destroy()
188 return (-1ull); in lvl2addr()
200 end = DOM2IODOM(domain)->end; in amdiommu_domain_init_pglvl()
201 for (i = AMDIOMMU_PGTBL_MAXLVL; i > 1; i--) { in amdiommu_domain_init_pglvl()
202 if (lvl2addr(i) >= end && lvl2addr(i - 1) < end) in amdiommu_domain_init_pglvl()
205 domain->pglvl = i; in amdiommu_domain_init_pglvl()
207 efr_hats = unit->efr & AMDIOMMU_EFR_HATS_MASK; in amdiommu_domain_init_pglvl()
220 unit->iommu.unit, (uintmax_t)efr_hats); in amdiommu_domain_init_pglvl()
223 if (hats >= domain->pglvl) in amdiommu_domain_init_pglvl()
227 unit->iommu.unit, domain->domain, hats, domain->pglvl); in amdiommu_domain_init_pglvl()
228 domain->pglvl = hats; in amdiommu_domain_init_pglvl()
229 domain->iodom.end = lvl2addr(hats); in amdiommu_domain_init_pglvl()
239 id = alloc_unr(unit->domids); in amdiommu_domain_alloc()
240 if (id == -1) in amdiommu_domain_alloc()
244 domain->domain = id; in amdiommu_domain_alloc()
245 LIST_INIT(&iodom->contexts); in amdiommu_domain_alloc()
248 domain->unit = unit; in amdiommu_domain_alloc()
250 domain->iodom.end = id_mapped ? ptoa(Maxmem) : BUS_SPACE_MAXADDR; in amdiommu_domain_alloc()
255 domain->iodom.flags |= IOMMU_DOMAIN_IDMAP; in amdiommu_domain_alloc()
262 0xfeefffff + 1, &iodom->msi_entry); in amdiommu_domain_alloc()
277 struct amdiommu_ctx *ctx; in amdiommu_ctx_alloc() local
279 ctx = malloc(sizeof(*ctx), M_AMDIOMMU_CTX, M_WAITOK | M_ZERO); in amdiommu_ctx_alloc()
280 ctx->context.domain = DOM2IODOM(domain); in amdiommu_ctx_alloc()
281 ctx->context.tag = malloc(sizeof(struct bus_dma_tag_iommu), in amdiommu_ctx_alloc()
283 ctx->context.rid = rid; in amdiommu_ctx_alloc()
284 ctx->context.refs = 1; in amdiommu_ctx_alloc()
285 return (ctx); in amdiommu_ctx_alloc()
289 amdiommu_ctx_link(struct amdiommu_ctx *ctx) in amdiommu_ctx_link() argument
293 domain = CTX2DOM(ctx); in amdiommu_ctx_link()
294 IOMMU_ASSERT_LOCKED(domain->iodom.iommu); in amdiommu_ctx_link()
295 KASSERT(domain->refs >= domain->ctx_cnt, in amdiommu_ctx_link()
296 ("dom %p ref underflow %d %d", domain, domain->refs, in amdiommu_ctx_link()
297 domain->ctx_cnt)); in amdiommu_ctx_link()
298 domain->refs++; in amdiommu_ctx_link()
299 domain->ctx_cnt++; in amdiommu_ctx_link()
300 LIST_INSERT_HEAD(&domain->iodom.contexts, &ctx->context, link); in amdiommu_ctx_link()
304 amdiommu_ctx_unlink(struct amdiommu_ctx *ctx) in amdiommu_ctx_unlink() argument
308 domain = CTX2DOM(ctx); in amdiommu_ctx_unlink()
309 IOMMU_ASSERT_LOCKED(domain->iodom.iommu); in amdiommu_ctx_unlink()
310 KASSERT(domain->refs > 0, in amdiommu_ctx_unlink()
311 ("domain %p ctx dtr refs %d", domain, domain->refs)); in amdiommu_ctx_unlink()
312 KASSERT(domain->ctx_cnt >= domain->refs, in amdiommu_ctx_unlink()
313 ("domain %p ctx dtr refs %d ctx_cnt %d", domain, in amdiommu_ctx_unlink()
314 domain->refs, domain->ctx_cnt)); in amdiommu_ctx_unlink()
315 domain->refs--; in amdiommu_ctx_unlink()
316 domain->ctx_cnt--; in amdiommu_ctx_unlink()
317 LIST_REMOVE(&ctx->context, link); in amdiommu_ctx_unlink()
324 struct iommu_ctx *ctx; in amdiommu_find_ctx_locked() local
328 LIST_FOREACH(domain, &unit->domains, link) { in amdiommu_find_ctx_locked()
329 LIST_FOREACH(ctx, &domain->iodom.contexts, link) { in amdiommu_find_ctx_locked()
330 if (ctx->rid == rid) in amdiommu_find_ctx_locked()
331 return (IOCTX2CTX(ctx)); in amdiommu_find_ctx_locked()
341 struct iommu_ctx *ctx; in amdiommu_find_domain() local
344 LIST_FOREACH(domain, &unit->domains, link) { in amdiommu_find_domain()
345 LIST_FOREACH(ctx, &domain->iodom.contexts, link) { in amdiommu_find_domain()
346 if (ctx->rid == rid) in amdiommu_find_domain()
355 amdiommu_free_ctx_locked(struct amdiommu_unit *unit, struct amdiommu_ctx *ctx) in amdiommu_free_ctx_locked() argument
361 KASSERT(ctx->context.refs >= 1, in amdiommu_free_ctx_locked()
362 ("amdiommu %p ctx %p refs %u", unit, ctx, ctx->context.refs)); in amdiommu_free_ctx_locked()
368 if (ctx->context.refs > 1) { in amdiommu_free_ctx_locked()
369 ctx->context.refs--; in amdiommu_free_ctx_locked()
374 KASSERT((ctx->context.flags & IOMMU_CTX_DISABLED) == 0, in amdiommu_free_ctx_locked()
375 ("lost ref on disabled ctx %p", ctx)); in amdiommu_free_ctx_locked()
381 dtep = amdiommu_get_dtep(ctx); in amdiommu_free_ctx_locked()
382 dtep->v = 0; in amdiommu_free_ctx_locked()
386 domain = CTX2DOM(ctx); in amdiommu_free_ctx_locked()
387 amdiommu_qi_invalidate_ctx_locked_nowait(ctx); in amdiommu_free_ctx_locked()
388 amdiommu_qi_invalidate_ir_locked_nowait(unit, ctx->context.rid); in amdiommu_free_ctx_locked()
390 amdiommu_qi_invalidate_wait_sync(AMD2IOMMU(CTX2AMD(ctx))); in amdiommu_free_ctx_locked()
392 if (unit->irte_enabled) in amdiommu_free_ctx_locked()
393 amdiommu_ctx_fini_irte(ctx); in amdiommu_free_ctx_locked()
395 amdiommu_ctx_unlink(ctx); in amdiommu_free_ctx_locked()
396 free(ctx->context.tag, M_AMDIOMMU_CTX); in amdiommu_free_ctx_locked()
397 free(ctx, M_AMDIOMMU_CTX); in amdiommu_free_ctx_locked()
406 KASSERT(domain->refs >= 1, in amdiommu_unref_domain_locked()
407 ("amdiommu%d domain %p refs %u", unit->iommu.unit, domain, in amdiommu_unref_domain_locked()
408 domain->refs)); in amdiommu_unref_domain_locked()
409 KASSERT(domain->refs > domain->ctx_cnt, in amdiommu_unref_domain_locked()
410 ("amdiommu%d domain %p refs %d ctx_cnt %d", unit->iommu.unit, in amdiommu_unref_domain_locked()
411 domain, domain->refs, domain->ctx_cnt)); in amdiommu_unref_domain_locked()
413 if (domain->refs > 1) { in amdiommu_unref_domain_locked()
414 domain->refs--; in amdiommu_unref_domain_locked()
422 taskqueue_drain(unit->iommu.delayed_taskqueue, in amdiommu_unref_domain_locked()
423 &domain->iodom.unload_task); in amdiommu_unref_domain_locked()
428 dte_entry_init_one(struct amdiommu_dte *dtep, struct amdiommu_ctx *ctx, in dte_entry_init_one() argument
434 domain = CTX2DOM(ctx); in dte_entry_init_one()
437 dtep->tv = 1; in dte_entry_init_one()
438 /* dtep->had not used for now */ in dte_entry_init_one()
439 dtep->ir = 1; in dte_entry_init_one()
440 dtep->iw = 1; in dte_entry_init_one()
441 dtep->domainid = domain->domain; in dte_entry_init_one()
442 dtep->pioctl = AMDIOMMU_DTE_PIOCTL_DIS; in dte_entry_init_one()
445 dtep->initpass = (dte & ACPI_IVHD_INIT_PASS) != 0; in dte_entry_init_one()
446 dtep->eintpass = (dte & ACPI_IVHD_EINT_PASS) != 0; in dte_entry_init_one()
447 dtep->nmipass = (dte & ACPI_IVHD_NMI_PASS) != 0; in dte_entry_init_one()
448 dtep->sysmgt = (dte & ACPI_IVHD_SYSTEM_MGMT) >> 4; in dte_entry_init_one()
449 dtep->lint0pass = (dte & ACPI_IVHD_LINT0_PASS) != 0; in dte_entry_init_one()
450 dtep->lint1pass = (dte & ACPI_IVHD_LINT1_PASS) != 0; in dte_entry_init_one()
452 if (unit->irte_enabled) { in dte_entry_init_one()
453 dtep->iv = 1; in dte_entry_init_one()
454 dtep->i = 0; in dte_entry_init_one()
455 dtep->inttablen = ilog2(unit->irte_nentries); in dte_entry_init_one()
456 dtep->intrroot = pmap_kextract(unit->irte_x2apic ? in dte_entry_init_one()
457 (vm_offset_t)ctx->irtx2 : in dte_entry_init_one()
458 (vm_offset_t)ctx->irtb) >> 6; in dte_entry_init_one()
460 dtep->intctl = AMDIOMMU_DTE_INTCTL_MAP; in dte_entry_init_one()
463 if ((DOM2IODOM(domain)->flags & IOMMU_DOMAIN_IDMAP) != 0) { in dte_entry_init_one()
464 dtep->pgmode = AMDIOMMU_DTE_PGMODE_1T1; in dte_entry_init_one()
466 MPASS(domain->pglvl > 0 && domain->pglvl <= in dte_entry_init_one()
468 dtep->pgmode = domain->pglvl; in dte_entry_init_one()
469 dtep->ptroot = VM_PAGE_TO_PHYS(pgtblr) >> 12; in dte_entry_init_one()
473 dtep->v = 1; in dte_entry_init_one()
477 dte_entry_init(struct amdiommu_ctx *ctx, bool move, uint8_t dte, uint32_t edte) in dte_entry_init() argument
484 domain = CTX2DOM(ctx); in dte_entry_init()
487 dtep = amdiommu_get_dtep(ctx); in dte_entry_init()
488 KASSERT(dtep->v == 0, in dte_entry_init()
490 CTX2AMD(ctx)->iommu.unit, dtep, (uintmax_t)(*(uint64_t *)dtep))); in dte_entry_init()
493 PCI_RID2BUS(ctx->context.rid))) { in dte_entry_init()
496 dte_entry_init_one(&dtep[i], ctx, domain->pgtblr, in dte_entry_init()
500 dte_entry_init_one(dtep, ctx, domain->pgtblr, dte, edte); in dte_entry_init()
509 struct amdiommu_ctx *ctx, *ctx1; in amdiommu_get_ctx_for_dev() local
524 ("iommu%d pci%d:%d:%d get_ctx for buswide", AMD2IOMMU(unit)->unit, in amdiommu_get_ctx_for_dev()
526 ctx = amdiommu_find_ctx_locked(unit, rid); in amdiommu_get_ctx_for_dev()
527 if (ctx == NULL) { in amdiommu_get_ctx_for_dev()
551 ctx = amdiommu_find_ctx_locked(unit, rid); in amdiommu_get_ctx_for_dev()
552 if (ctx == NULL) { in amdiommu_get_ctx_for_dev()
554 ctx = ctx1; in amdiommu_get_ctx_for_dev()
555 amdiommu_ctx_link(ctx); in amdiommu_get_ctx_for_dev()
556 ctx->context.tag->owner = dev; in amdiommu_get_ctx_for_dev()
557 iommu_device_tag_init(CTX2IOCTX(ctx), dev); in amdiommu_get_ctx_for_dev()
559 LIST_INSERT_HEAD(&unit->domains, domain, link); in amdiommu_get_ctx_for_dev()
560 dte_entry_init(ctx, false, dte, edte); in amdiommu_get_ctx_for_dev()
561 amdiommu_qi_invalidate_ctx_locked(ctx); in amdiommu_get_ctx_for_dev()
565 "%s-mapped\n", in amdiommu_get_ctx_for_dev()
566 AMD2IOMMU(unit)->unit, unit->unit_dom, in amdiommu_get_ctx_for_dev()
567 bus, slot, func, rid, domain->domain, in amdiommu_get_ctx_for_dev()
574 domain = CTX2DOM(ctx); in amdiommu_get_ctx_for_dev()
575 ctx->context.refs++; /* tag referenced us */ in amdiommu_get_ctx_for_dev()
578 domain = CTX2DOM(ctx); in amdiommu_get_ctx_for_dev()
579 if (ctx->context.tag->owner == NULL) in amdiommu_get_ctx_for_dev()
580 ctx->context.tag->owner = dev; in amdiommu_get_ctx_for_dev()
581 ctx->context.refs++; /* tag referenced us */ in amdiommu_get_ctx_for_dev()
585 return (ctx); in amdiommu_get_ctx_for_dev()
589 amdiommu_get_ctx(struct iommu_unit *iommu, device_t dev, uint16_t rid, in amdiommu_get_ctx() argument
603 if (AMD2IOMMU(unit) != iommu) /* XXX complain loudly */ in amdiommu_get_ctx()
611 amdiommu_free_ctx_locked_method(struct iommu_unit *iommu, in amdiommu_free_ctx_locked_method() argument
615 struct amdiommu_ctx *ctx; in amdiommu_free_ctx_locked_method() local
617 unit = IOMMU2AMD(iommu); in amdiommu_free_ctx_locked_method()
618 ctx = IOCTX2CTX(context); in amdiommu_free_ctx_locked_method()
619 amdiommu_free_ctx_locked(unit, ctx); in amdiommu_free_ctx_locked_method()