Lines Matching +full:iommu +full:- +full:ctx

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
66 #include <dev/iommu/busdma_iommu.h>
67 #include <x86/iommu/intel_reg.h>
68 #include <x86/iommu/x86_iommu.h>
69 #include <x86/iommu/intel_dmar.h>
78 static void dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx);
90 ctxm = iommu_pgalloc(dmar->ctx_obj, 1 + bus, IOMMU_PGF_NOALLOC); in dmar_ensure_ctx_page()
101 ctxm = iommu_pgalloc(dmar->ctx_obj, 1 + bus, IOMMU_PGF_ZERO | in dmar_ensure_ctx_page()
103 re = iommu_map_pgtbl(dmar->ctx_obj, 0, IOMMU_PGF_NOALLOC, &sf); in dmar_ensure_ctx_page()
105 dmar_pte_store(&re->r1, DMAR_ROOT_R1_P | (DMAR_ROOT_R1_CTP_MASK & in dmar_ensure_ctx_page()
113 dmar_map_ctx_entry(struct dmar_ctx *ctx, struct sf_buf **sfp) in dmar_map_ctx_entry() argument
118 dmar = CTX2DMAR(ctx); in dmar_map_ctx_entry()
120 ctxp = iommu_map_pgtbl(dmar->ctx_obj, 1 + PCI_RID2BUS(ctx->context.rid), in dmar_map_ctx_entry()
122 ctxp += ctx->context.rid & 0xff; in dmar_map_ctx_entry()
137 * to clear P bit in the ctx entry for update. in ctx_id_entry_init_one()
139 dmar_pte_store1(&ctxp->ctx2, DMAR_CTX2_DID(domain->domain) | in ctx_id_entry_init_one()
140 domain->awlvl); in ctx_id_entry_init_one()
142 dmar_pte_store1(&ctxp->ctx1, DMAR_CTX1_T_PASS | DMAR_CTX1_P); in ctx_id_entry_init_one()
144 dmar_pte_store1(&ctxp->ctx1, DMAR_CTX1_T_UNTR | in ctx_id_entry_init_one()
151 ctx_id_entry_init(struct dmar_ctx *ctx, dmar_ctx_entry_t *ctxp, bool move, in ctx_id_entry_init() argument
159 domain = CTX2DOM(ctx); in ctx_id_entry_init()
161 KASSERT(move || (ctxp->ctx1 == 0 && ctxp->ctx2 == 0), in ctx_id_entry_init()
162 ("dmar%d: initialized ctx entry %d:%d:%d 0x%jx 0x%jx", in ctx_id_entry_init()
163 unit->iommu.unit, busno, pci_get_slot(ctx->context.tag->owner), in ctx_id_entry_init()
164 pci_get_function(ctx->context.tag->owner), in ctx_id_entry_init()
165 ctxp->ctx1, ctxp->ctx2)); in ctx_id_entry_init()
167 if ((domain->iodom.flags & IOMMU_DOMAIN_IDMAP) != 0 && in ctx_id_entry_init()
168 (unit->hw_ecap & DMAR_ECAP_PT) != 0) { in ctx_id_entry_init()
169 KASSERT(domain->pgtbl_obj == NULL, in ctx_id_entry_init()
170 ("ctx %p non-null pgtbl_obj", ctx)); in ctx_id_entry_init()
173 ctx_root = iommu_pgalloc(domain->pgtbl_obj, 0, in ctx_id_entry_init()
198 if ((dmar->hw_cap & DMAR_CAP_CM) == 0 && !force) in dmar_flush_for_ctx_entry()
200 if (dmar->qi_enabled) { in dmar_flush_for_ctx_entry()
202 if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0 || force) in dmar_flush_for_ctx_entry()
207 if (error == 0 && ((dmar->hw_ecap & DMAR_ECAP_DI) != 0 || force)) in dmar_flush_for_ctx_entry()
233 * VT-d specification requires that the start of an in domain_init_rmrr()
234 * RMRR entry is 4k-aligned. Buggy BIOSes put in domain_init_rmrr()
241 start = entry->start; in domain_init_rmrr()
242 end = entry->end; in domain_init_rmrr()
244 printf("dmar%d ctx pci%d:%d:%d RMRR [%#jx, %#jx]\n", in domain_init_rmrr()
245 domain->iodom.iommu->unit, bus, slot, func, in domain_init_rmrr()
247 entry->start = trunc_page(start); in domain_init_rmrr()
248 entry->end = round_page(end); in domain_init_rmrr()
249 if (entry->start == entry->end) { in domain_init_rmrr()
257 domain->iodom.iommu->unit, start, end); in domain_init_rmrr()
259 entry->end += IOMMU_PAGE_SIZE * 0x20; in domain_init_rmrr()
261 size = OFF_TO_IDX(entry->end - entry->start); in domain_init_rmrr()
264 ma[i] = vm_page_getfake(entry->start + PAGE_SIZE * i, in domain_init_rmrr()
271 * Non-failed RMRR entries are owned by context rb in domain_init_rmrr()
276 if (error1 == 0 && entry->end != entry->start) { in domain_init_rmrr()
277 IOMMU_LOCK(domain->iodom.iommu); in domain_init_rmrr()
278 domain->refs++; /* XXXKIB prevent free */ in domain_init_rmrr()
279 domain->iodom.flags |= IOMMU_DOMAIN_RMRR; in domain_init_rmrr()
280 IOMMU_UNLOCK(domain->iodom.iommu); in domain_init_rmrr()
288 domain->iodom.iommu->unit, start, end, in domain_init_rmrr()
303 * PCI memory address space is shared between memory-mapped devices (MMIO) and
304 * host memory (which may be remapped by an IOMMU). Device accesses to an
306 * peer-to-peer and not forwarded to an IOMMU. To avoid this, reserve the
308 * by the IOMMU for remapping.
330 device_printf(dev, "DMAR reserve [%#jx-%#jx] (error %d)\n", in dmar_reserve_pci_regions()
353 device_printf(dev, "DMAR reserve [%#jx-%#jx] " in dmar_reserve_pci_regions()
370 id = alloc_unr(dmar->domids); in dmar_domain_alloc()
371 if (id == -1) in dmar_domain_alloc()
376 domain->domain = id; in dmar_domain_alloc()
377 LIST_INIT(&iodom->contexts); in dmar_domain_alloc()
380 domain->dmar = dmar; in dmar_domain_alloc()
388 domain->iodom.end = id_mapped ? ptoa(Maxmem) : BUS_SPACE_MAXADDR; in dmar_domain_alloc()
389 mgaw = dmar_maxaddr2mgaw(dmar, domain->iodom.end, !id_mapped); in dmar_domain_alloc()
395 domain->iodom.end = 1ULL << (domain->agaw - 1); in dmar_domain_alloc()
400 if ((dmar->hw_ecap & DMAR_ECAP_PT) == 0) { in dmar_domain_alloc()
401 domain->pgtbl_obj = dmar_get_idmap_pgtbl(domain, in dmar_domain_alloc()
402 domain->iodom.end); in dmar_domain_alloc()
404 domain->iodom.flags |= IOMMU_DOMAIN_IDMAP; in dmar_domain_alloc()
411 0xfeefffff + 1, &iodom->msi_entry); in dmar_domain_alloc()
425 struct dmar_ctx *ctx; in dmar_ctx_alloc() local
427 ctx = malloc(sizeof(*ctx), M_DMAR_CTX, M_WAITOK | M_ZERO); in dmar_ctx_alloc()
428 ctx->context.domain = DOM2IODOM(domain); in dmar_ctx_alloc()
429 ctx->context.tag = malloc(sizeof(struct bus_dma_tag_iommu), in dmar_ctx_alloc()
431 ctx->context.rid = rid; in dmar_ctx_alloc()
432 ctx->context.refs = 1; in dmar_ctx_alloc()
433 return (ctx); in dmar_ctx_alloc()
437 dmar_ctx_link(struct dmar_ctx *ctx) in dmar_ctx_link() argument
441 domain = CTX2DOM(ctx); in dmar_ctx_link()
442 IOMMU_ASSERT_LOCKED(domain->iodom.iommu); in dmar_ctx_link()
443 KASSERT(domain->refs >= domain->ctx_cnt, in dmar_ctx_link()
444 ("dom %p ref underflow %d %d", domain, domain->refs, in dmar_ctx_link()
445 domain->ctx_cnt)); in dmar_ctx_link()
446 domain->refs++; in dmar_ctx_link()
447 domain->ctx_cnt++; in dmar_ctx_link()
448 LIST_INSERT_HEAD(&domain->iodom.contexts, &ctx->context, link); in dmar_ctx_link()
452 dmar_ctx_unlink(struct dmar_ctx *ctx) in dmar_ctx_unlink() argument
456 domain = CTX2DOM(ctx); in dmar_ctx_unlink()
457 IOMMU_ASSERT_LOCKED(domain->iodom.iommu); in dmar_ctx_unlink()
458 KASSERT(domain->refs > 0, in dmar_ctx_unlink()
459 ("domain %p ctx dtr refs %d", domain, domain->refs)); in dmar_ctx_unlink()
460 KASSERT(domain->ctx_cnt >= domain->refs, in dmar_ctx_unlink()
461 ("domain %p ctx dtr refs %d ctx_cnt %d", domain, in dmar_ctx_unlink()
462 domain->refs, domain->ctx_cnt)); in dmar_ctx_unlink()
463 domain->refs--; in dmar_ctx_unlink()
464 domain->ctx_cnt--; in dmar_ctx_unlink()
465 LIST_REMOVE(&ctx->context, link); in dmar_ctx_unlink()
476 KASSERT(TAILQ_EMPTY(&domain->iodom.unload_entries), in dmar_domain_destroy()
478 KASSERT(LIST_EMPTY(&iodom->contexts), in dmar_domain_destroy()
480 KASSERT(domain->ctx_cnt == 0, in dmar_domain_destroy()
481 ("destroying dom %p with ctx_cnt %d", domain, domain->ctx_cnt)); in dmar_domain_destroy()
482 KASSERT(domain->refs == 0, in dmar_domain_destroy()
483 ("destroying dom %p with refs %d", domain, domain->refs)); in dmar_domain_destroy()
484 if ((domain->iodom.flags & IOMMU_DOMAIN_GAS_INITED) != 0) { in dmar_domain_destroy()
489 if ((domain->iodom.flags & IOMMU_DOMAIN_PGTBL_INITED) != 0) { in dmar_domain_destroy()
490 if (domain->pgtbl_obj != NULL) in dmar_domain_destroy()
496 free_unr(dmar->domids, domain->domain); in dmar_domain_destroy()
506 struct dmar_ctx *ctx, *ctx1; in dmar_get_ctx_for_dev1() local
527 ("iommu%d pci%d:%d:%d get_ctx for buswide", dmar->iommu.unit, bus, in dmar_get_ctx_for_dev1()
529 ctx = dmar_find_ctx_locked(dmar, rid); in dmar_get_ctx_for_dev1()
531 if (ctx == NULL) { in dmar_get_ctx_for_dev1()
563 ctx = dmar_find_ctx_locked(dmar, rid); in dmar_get_ctx_for_dev1()
564 if (ctx == NULL) { in dmar_get_ctx_for_dev1()
566 ctx = ctx1; in dmar_get_ctx_for_dev1()
567 dmar_ctx_link(ctx); in dmar_get_ctx_for_dev1()
568 ctx->context.tag->owner = dev; in dmar_get_ctx_for_dev1()
569 iommu_device_tag_init(CTX2IOCTX(ctx), dev); in dmar_get_ctx_for_dev1()
576 if (LIST_EMPTY(&dmar->domains)) in dmar_get_ctx_for_dev1()
578 LIST_INSERT_HEAD(&dmar->domains, domain, link); in dmar_get_ctx_for_dev1()
579 ctx_id_entry_init(ctx, ctxp, false, bus); in dmar_get_ctx_for_dev1()
583 "agaw %d %s-mapped\n", in dmar_get_ctx_for_dev1()
584 dmar->iommu.unit, dmar->segment, bus, slot, in dmar_get_ctx_for_dev1()
585 func, rid, domain->domain, domain->mgaw, in dmar_get_ctx_for_dev1()
586 domain->agaw, id_mapped ? "id" : "re"); in dmar_get_ctx_for_dev1()
594 domain = CTX2DOM(ctx); in dmar_get_ctx_for_dev1()
595 ctx->context.refs++; /* tag referenced us */ in dmar_get_ctx_for_dev1()
598 domain = CTX2DOM(ctx); in dmar_get_ctx_for_dev1()
599 if (ctx->context.tag->owner == NULL) in dmar_get_ctx_for_dev1()
600 ctx->context.tag->owner = dev; in dmar_get_ctx_for_dev1()
601 ctx->context.refs++; /* tag referenced us */ in dmar_get_ctx_for_dev1()
606 dmar_free_ctx_locked(dmar, ctx); in dmar_get_ctx_for_dev1()
616 if (enable && !rmrr_init && (dmar->hw_gcmd & DMAR_GCMD_TE) == 0) { in dmar_get_ctx_for_dev1()
620 dmar->iommu.unit); in dmar_get_ctx_for_dev1()
625 dmar->iommu.unit); in dmar_get_ctx_for_dev1()
629 "error %d\n", dmar->iommu.unit, error); in dmar_get_ctx_for_dev1()
630 dmar_free_ctx_locked(dmar, ctx); in dmar_get_ctx_for_dev1()
637 return (ctx); in dmar_get_ctx_for_dev1()
666 dmar_move_ctx_to_domain(struct dmar_domain *domain, struct dmar_ctx *ctx) in dmar_move_ctx_to_domain() argument
674 dmar = domain->dmar; in dmar_move_ctx_to_domain()
675 old_domain = CTX2DOM(ctx); in dmar_move_ctx_to_domain()
678 KASSERT(old_domain->iodom.iommu == domain->iodom.iommu, in dmar_move_ctx_to_domain()
680 domain->domain, old_domain->iodom.iommu->unit, in dmar_move_ctx_to_domain()
681 domain->iodom.iommu->unit)); in dmar_move_ctx_to_domain()
684 ctxp = dmar_map_ctx_entry(ctx, &sf); in dmar_move_ctx_to_domain()
686 dmar_ctx_unlink(ctx); in dmar_move_ctx_to_domain()
687 ctx->context.domain = &domain->iodom; in dmar_move_ctx_to_domain()
688 dmar_ctx_link(ctx); in dmar_move_ctx_to_domain()
689 ctx_id_entry_init(ctx, ctxp, true, PCI_BUSMAX + 100); in dmar_move_ctx_to_domain()
693 printf("dmar%d rid %x domain %d->%d %s-mapped\n", in dmar_move_ctx_to_domain()
694 dmar->iommu.unit, ctx->context.rid, old_domain->domain, in dmar_move_ctx_to_domain()
695 domain->domain, (domain->iodom.flags & IOMMU_DOMAIN_IDMAP) != 0 ? in dmar_move_ctx_to_domain()
707 KASSERT(domain->refs >= 1, in dmar_unref_domain_locked()
708 ("dmar %d domain %p refs %u", dmar->iommu.unit, domain, in dmar_unref_domain_locked()
709 domain->refs)); in dmar_unref_domain_locked()
710 KASSERT(domain->refs > domain->ctx_cnt, in dmar_unref_domain_locked()
711 ("dmar %d domain %p refs %d ctx_cnt %d", dmar->iommu.unit, domain, in dmar_unref_domain_locked()
712 domain->refs, domain->ctx_cnt)); in dmar_unref_domain_locked()
714 if (domain->refs > 1) { in dmar_unref_domain_locked()
715 domain->refs--; in dmar_unref_domain_locked()
720 KASSERT((domain->iodom.flags & IOMMU_DOMAIN_RMRR) == 0, in dmar_unref_domain_locked()
726 taskqueue_drain(dmar->iommu.delayed_taskqueue, in dmar_unref_domain_locked()
727 &domain->iodom.unload_task); in dmar_unref_domain_locked()
732 dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx) in dmar_free_ctx_locked() argument
739 KASSERT(ctx->context.refs >= 1, in dmar_free_ctx_locked()
740 ("dmar %p ctx %p refs %u", dmar, ctx, ctx->context.refs)); in dmar_free_ctx_locked()
746 if (ctx->context.refs > 1) { in dmar_free_ctx_locked()
747 ctx->context.refs--; in dmar_free_ctx_locked()
752 KASSERT((ctx->context.flags & IOMMU_CTX_DISABLED) == 0, in dmar_free_ctx_locked()
753 ("lost ref on disabled ctx %p", ctx)); in dmar_free_ctx_locked()
762 ctxp = dmar_map_ctx_entry(ctx, &sf); in dmar_free_ctx_locked()
764 KASSERT(ctx->context.refs >= 1, in dmar_free_ctx_locked()
765 ("dmar %p ctx %p refs %u", dmar, ctx, ctx->context.refs)); in dmar_free_ctx_locked()
771 if (ctx->context.refs > 1) { in dmar_free_ctx_locked()
772 ctx->context.refs--; in dmar_free_ctx_locked()
779 KASSERT((ctx->context.flags & IOMMU_CTX_DISABLED) == 0, in dmar_free_ctx_locked()
780 ("lost ref on disabled ctx %p", ctx)); in dmar_free_ctx_locked()
786 dmar_pte_clear(&ctxp->ctx1); in dmar_free_ctx_locked()
787 ctxp->ctx2 = 0; in dmar_free_ctx_locked()
790 if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0) { in dmar_free_ctx_locked()
791 if (dmar->qi_enabled) in dmar_free_ctx_locked()
797 domain = CTX2DOM(ctx); in dmar_free_ctx_locked()
798 dmar_ctx_unlink(ctx); in dmar_free_ctx_locked()
799 free(ctx->context.tag, M_DMAR_CTX); in dmar_free_ctx_locked()
800 free(ctx, M_DMAR_CTX); in dmar_free_ctx_locked()
812 struct iommu_ctx *ctx; in dmar_find_ctx_locked() local
816 LIST_FOREACH(domain, &dmar->domains, link) { in dmar_find_ctx_locked()
817 LIST_FOREACH(ctx, &domain->iodom.contexts, link) { in dmar_find_ctx_locked()
818 if (ctx->rid == rid) in dmar_find_ctx_locked()
819 return (IOCTX2CTX(ctx)); in dmar_find_ctx_locked()
836 domain = IODOM2DOM(entry->domain); in dmar_domain_unload_entry()
844 if (unit->qi_enabled) { in dmar_domain_unload_entry()
847 iommu_qi_invalidate_locked(&domain->iodom, entry, in dmar_domain_unload_entry()
851 iommu_qi_invalidate_sync(&domain->iodom, entry->start, in dmar_domain_unload_entry()
852 entry->end - entry->start, cansleep); in dmar_domain_unload_entry()
856 dmar_flush_iotlb_sync(domain, entry->start, entry->end - in dmar_domain_unload_entry()
857 entry->start); in dmar_domain_unload_entry()
869 return (domain->batch_no++ % iommu_qi_batch_coalesce == 0); in dmar_domain_unload_emit_wait()
885 KASSERT((entry->flags & IOMMU_MAP_ENTRY_MAP) != 0, in dmar_domain_unload()
887 error = iodom->ops->unmap(iodom, entry, in dmar_domain_unload()
890 if (!unit->qi_enabled) { in dmar_domain_unload()
891 dmar_flush_iotlb_sync(domain, entry->start, in dmar_domain_unload()
892 entry->end - entry->start); in dmar_domain_unload()
900 KASSERT(unit->qi_enabled, ("loaded entry left")); in dmar_domain_unload()
904 iommu_qi_invalidate_locked(&domain->iodom, entry, in dmar_domain_unload()
911 dmar_get_ctx(struct iommu_unit *iommu, device_t dev, uint16_t rid, in dmar_get_ctx() argument
917 dmar = IOMMU2DMAR(iommu); in dmar_get_ctx()
923 dmar_free_ctx_locked_method(struct iommu_unit *iommu, in dmar_free_ctx_locked_method() argument
927 struct dmar_ctx *ctx; in dmar_free_ctx_locked_method() local
929 dmar = IOMMU2DMAR(iommu); in dmar_free_ctx_locked_method()
930 ctx = IOCTX2CTX(context); in dmar_free_ctx_locked_method()
931 dmar_free_ctx_locked(dmar, ctx); in dmar_free_ctx_locked_method()