Lines Matching +full:iommu +full:- +full:ctx

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
59 #include <dev/iommu/iommu.h>
63 #include <machine/iommu.h>
64 #include <dev/iommu/busdma_iommu.h>
68 * IOMMU units from Intel VT-d.
78 static const char iommu_str[] = "iommu"; in iommu_bus_dma_is_dev_disabled()
110 * the IOMMU unit and used for page table lookup. PCI bridges may take
114 * domain, and must collectively be assigned to use either IOMMU or
139 * host port to find the translating bridge nearest the IOMMU in iommu_get_requester()
147 ("iommu_get_requester(%s): non-pci parent %s for %s", in iommu_get_requester()
157 * so pcib isn't a PCI->PCI bridge but rather a host in iommu_get_requester()
170 * unlikely) to have a PCI->PCIe bridge in iommu_get_requester()
177 * requester by IOMMU unit. Check whether the in iommu_get_requester()
204 * PCIe->PCI bridge. Assume that the in iommu_get_requester()
210 * the bridge is PCIe->PCI-X, and the in iommu_get_requester()
215 * non-taken transactions. in iommu_get_requester()
223 * conventional PCI->PCI bridge, which in iommu_get_requester()
239 struct iommu_ctx *ctx; in iommu_instantiate_ctx() local
246 * If the user requested the IOMMU disabled for the device, we in iommu_instantiate_ctx()
247 * cannot disable the IOMMU unit, due to possibility of other in iommu_instantiate_ctx()
248 * devices on the same IOMMU unit still requiring translation. in iommu_instantiate_ctx()
255 ctx = iommu_get_ctx(unit, requester, rid, disabled, rmrr); in iommu_instantiate_ctx()
256 if (ctx == NULL) in iommu_instantiate_ctx()
264 if ((ctx->flags & IOMMU_CTX_DISABLED) == 0) { in iommu_instantiate_ctx()
265 ctx->flags |= IOMMU_CTX_DISABLED; in iommu_instantiate_ctx()
268 iommu_free_ctx_locked(unit, ctx); in iommu_instantiate_ctx()
270 ctx = NULL; in iommu_instantiate_ctx()
272 return (ctx); in iommu_instantiate_ctx()
281 /* Not in scope of any IOMMU ? */ in iommu_get_dev_ctx()
284 if (!unit->dma_enabled) in iommu_get_dev_ctx()
294 struct iommu_ctx *ctx; in iommu_get_dma_tag() local
297 ctx = iommu_get_dev_ctx(child); in iommu_get_dma_tag()
298 if (ctx == NULL) in iommu_get_dma_tag()
301 res = (bus_dma_tag_t)ctx->tag; in iommu_get_dma_tag()
324 "iommu%d pci%d:%d:%d requested buswide busdma\n", in bus_dma_iommu_set_buswide()
325 unit->unit, busno, slot, func); in bus_dma_iommu_set_buswide()
339 unit->buswide_ctxs[busno / NBBY / sizeof(uint32_t)] |= in iommu_set_buswide_ctx()
349 return ((unit->buswide_ctxs[busno / NBBY / sizeof(uint32_t)] & in iommu_is_buswide_ctx()
353 static MALLOC_DEFINE(M_IOMMU_DMAMAP, "iommu_dmamap", "IOMMU DMA Map");
369 &((struct bus_dma_tag_iommu *)parent)->common : NULL, alignment, in iommu_bus_dma_tag_create()
377 newtag->common.impl = &bus_dma_iommu_impl; in iommu_bus_dma_tag_create()
378 newtag->ctx = oldtag->ctx; in iommu_bus_dma_tag_create()
379 newtag->owner = oldtag->owner; in iommu_bus_dma_tag_create()
384 __func__, newtag, (newtag != NULL ? newtag->common.flags : 0), in iommu_bus_dma_tag_create()
400 struct iommu_unit *iommu; in iommu_bus_dma_tag_destroy() local
401 struct iommu_ctx *ctx; in iommu_bus_dma_tag_destroy() local
408 if (dmat->map_count != 0) { in iommu_bus_dma_tag_destroy()
412 ctx = dmat->ctx; in iommu_bus_dma_tag_destroy()
413 if (dmat == ctx->tag) { in iommu_bus_dma_tag_destroy()
414 iommu = ctx->domain->iommu; in iommu_bus_dma_tag_destroy()
415 IOMMU_LOCK(iommu); in iommu_bus_dma_tag_destroy()
416 iommu_free_ctx_locked(iommu, dmat->ctx); in iommu_bus_dma_tag_destroy()
418 free(dmat->segments, M_IOMMU_DMAMAP); in iommu_bus_dma_tag_destroy()
441 DOMAINSET_PREF(tag->common.domain), M_NOWAIT | M_ZERO); in iommu_bus_dmamap_create()
446 if (tag->segments == NULL) { in iommu_bus_dmamap_create()
447 tag->segments = malloc_domainset(sizeof(bus_dma_segment_t) * in iommu_bus_dmamap_create()
448 tag->common.nsegments, M_IOMMU_DMAMAP, in iommu_bus_dmamap_create()
449 DOMAINSET_PREF(tag->common.domain), M_NOWAIT); in iommu_bus_dmamap_create()
450 if (tag->segments == NULL) { in iommu_bus_dmamap_create()
457 TAILQ_INIT(&map->map_entries); in iommu_bus_dmamap_create()
458 map->tag = tag; in iommu_bus_dmamap_create()
459 map->locked = true; in iommu_bus_dmamap_create()
460 map->cansleep = false; in iommu_bus_dmamap_create()
461 tag->map_count++; in iommu_bus_dmamap_create()
477 if (!TAILQ_EMPTY(&map->map_entries)) { in iommu_bus_dmamap_destroy()
484 tag->map_count--; in iommu_bus_dmamap_destroy()
510 if (tag->common.maxsize < PAGE_SIZE && in iommu_bus_dmamem_alloc()
511 tag->common.alignment <= tag->common.maxsize && in iommu_bus_dmamem_alloc()
513 *vaddr = malloc_domainset(tag->common.maxsize, M_DEVBUF, in iommu_bus_dmamem_alloc()
514 DOMAINSET_PREF(tag->common.domain), mflags); in iommu_bus_dmamem_alloc()
515 map->flags |= BUS_DMAMAP_IOMMU_MALLOC; in iommu_bus_dmamem_alloc()
518 DOMAINSET_PREF(tag->common.domain), tag->common.maxsize, in iommu_bus_dmamem_alloc()
520 map->flags |= BUS_DMAMAP_IOMMU_KMEM_ALLOC; in iommu_bus_dmamem_alloc()
539 if ((map->flags & BUS_DMAMAP_IOMMU_MALLOC) != 0) { in iommu_bus_dmamem_free()
541 map->flags &= ~BUS_DMAMAP_IOMMU_MALLOC; in iommu_bus_dmamem_free()
543 KASSERT((map->flags & BUS_DMAMAP_IOMMU_KMEM_ALLOC) != 0, in iommu_bus_dmamem_free()
545 kmem_free(vaddr, tag->common.maxsize); in iommu_bus_dmamem_free()
546 map->flags &= ~BUS_DMAMAP_IOMMU_KMEM_ALLOC; in iommu_bus_dmamem_free()
558 struct iommu_ctx *ctx; in iommu_bus_dmamap_load_something1() local
566 segs = tag->segments; in iommu_bus_dmamap_load_something1()
567 ctx = tag->ctx; in iommu_bus_dmamap_load_something1()
568 domain = ctx->domain; in iommu_bus_dmamap_load_something1()
576 if (seg >= tag->common.nsegments) { in iommu_bus_dmamap_load_something1()
580 buflen1 = buflen > tag->common.maxsegsz ? in iommu_bus_dmamap_load_something1()
581 tag->common.maxsegsz : buflen; in iommu_bus_dmamap_load_something1()
587 gas_flags = map->cansleep ? IOMMU_MF_CANWAIT : 0; in iommu_bus_dmamap_load_something1()
588 if (seg + 1 < tag->common.nsegments) in iommu_bus_dmamap_load_something1()
591 error = iommu_gas_map(domain, &tag->common, buflen1, in iommu_bus_dmamap_load_something1()
596 if (buflen1 > entry->end - entry->start - offset) in iommu_bus_dmamap_load_something1()
597 buflen1 = entry->end - entry->start - offset; in iommu_bus_dmamap_load_something1()
599 KASSERT(vm_addr_align_ok(entry->start + offset, in iommu_bus_dmamap_load_something1()
600 tag->common.alignment), in iommu_bus_dmamap_load_something1()
601 ("alignment failed: ctx %p start 0x%jx offset %x " in iommu_bus_dmamap_load_something1()
602 "align 0x%jx", ctx, (uintmax_t)entry->start, offset, in iommu_bus_dmamap_load_something1()
603 (uintmax_t)tag->common.alignment)); in iommu_bus_dmamap_load_something1()
604 KASSERT(entry->end <= tag->common.lowaddr || in iommu_bus_dmamap_load_something1()
605 entry->start >= tag->common.highaddr, in iommu_bus_dmamap_load_something1()
606 ("entry placement failed: ctx %p start 0x%jx end 0x%jx " in iommu_bus_dmamap_load_something1()
607 "lowaddr 0x%jx highaddr 0x%jx", ctx, in iommu_bus_dmamap_load_something1()
608 (uintmax_t)entry->start, (uintmax_t)entry->end, in iommu_bus_dmamap_load_something1()
609 (uintmax_t)tag->common.lowaddr, in iommu_bus_dmamap_load_something1()
610 (uintmax_t)tag->common.highaddr)); in iommu_bus_dmamap_load_something1()
611 KASSERT(vm_addr_bound_ok(entry->start + offset, buflen1, in iommu_bus_dmamap_load_something1()
612 tag->common.boundary), in iommu_bus_dmamap_load_something1()
613 ("boundary failed: ctx %p start 0x%jx end 0x%jx " in iommu_bus_dmamap_load_something1()
614 "boundary 0x%jx", ctx, (uintmax_t)entry->start, in iommu_bus_dmamap_load_something1()
615 (uintmax_t)entry->end, (uintmax_t)tag->common.boundary)); in iommu_bus_dmamap_load_something1()
616 KASSERT(buflen1 <= tag->common.maxsegsz, in iommu_bus_dmamap_load_something1()
617 ("segment too large: ctx %p start 0x%jx end 0x%jx " in iommu_bus_dmamap_load_something1()
618 "buflen1 0x%jx maxsegsz 0x%jx", ctx, in iommu_bus_dmamap_load_something1()
619 (uintmax_t)entry->start, (uintmax_t)entry->end, in iommu_bus_dmamap_load_something1()
620 (uintmax_t)buflen1, (uintmax_t)tag->common.maxsegsz)); in iommu_bus_dmamap_load_something1()
622 KASSERT((entry->flags & IOMMU_MAP_ENTRY_MAP) != 0, in iommu_bus_dmamap_load_something1()
626 segs[seg].ds_addr = entry->start + offset; in iommu_bus_dmamap_load_something1()
632 buflen -= buflen1; in iommu_bus_dmamap_load_something1()
644 struct iommu_ctx *ctx; in iommu_bus_dmamap_load_something() local
649 ctx = tag->ctx; in iommu_bus_dmamap_load_something()
650 domain = ctx->domain; in iommu_bus_dmamap_load_something()
651 atomic_add_long(&ctx->loads, 1); in iommu_bus_dmamap_load_something()
658 TAILQ_CONCAT(&map->map_entries, &entries, dmamap_link); in iommu_bus_dmamap_load_something()
667 TAILQ_CONCAT(&domain->unload_entries, &entries, dmamap_link); in iommu_bus_dmamap_load_something()
669 taskqueue_enqueue(domain->iommu->delayed_taskqueue, in iommu_bus_dmamap_load_something()
670 &domain->unload_task); in iommu_bus_dmamap_load_something()
674 !map->cansleep) in iommu_bus_dmamap_load_something()
677 iommu_bus_schedule_dmamap(domain->iommu, map); in iommu_bus_dmamap_load_something()
711 ma_cnt = OFF_TO_IDX(pend - pstart); in iommu_bus_dmamap_load_phys()
712 mflags = map->cansleep ? M_WAITOK : M_NOWAIT; in iommu_bus_dmamap_load_phys()
762 ma_cnt = OFF_TO_IDX(pend - pstart); in iommu_bus_dmamap_load_buffer()
763 mflags = map->cansleep ? M_WAITOK : M_NOWAIT; in iommu_bus_dmamap_load_buffer()
808 map->mem = *mem; in iommu_bus_dmamap_waitok()
809 map->tag = (struct bus_dma_tag_iommu *)dmat; in iommu_bus_dmamap_waitok()
810 map->callback = callback; in iommu_bus_dmamap_waitok()
811 map->callback_arg = callback_arg; in iommu_bus_dmamap_waitok()
824 if (!map->locked) { in iommu_bus_dmamap_complete()
825 KASSERT(map->cansleep, in iommu_bus_dmamap_complete()
832 (tag->common.lockfunc)(tag->common.lockfuncarg, BUS_DMA_LOCK); in iommu_bus_dmamap_complete()
833 map->locked = true; in iommu_bus_dmamap_complete()
837 segs = tag->segments; in iommu_bus_dmamap_complete()
842 * The limitations of busdma KPI forces the iommu to perform the actual
856 struct iommu_ctx *ctx; in iommu_bus_dmamap_unload() local
862 ctx = tag->ctx; in iommu_bus_dmamap_unload()
863 domain = ctx->domain; in iommu_bus_dmamap_unload()
864 atomic_add_long(&ctx->unloads, 1); in iommu_bus_dmamap_unload()
868 TAILQ_CONCAT(&entries, &map->map_entries, dmamap_link); in iommu_bus_dmamap_unload()
872 TAILQ_CONCAT(&domain->unload_entries, &entries, dmamap_link); in iommu_bus_dmamap_unload()
874 taskqueue_enqueue(domain->iommu->delayed_taskqueue, in iommu_bus_dmamap_unload()
875 &domain->unload_task); in iommu_bus_dmamap_unload()
880 KASSERT(TAILQ_EMPTY(&entries), ("lazy iommu_ctx_unload %p", ctx)); in iommu_bus_dmamap_unload()
891 kmsan_bus_dmamap_sync(&map->kmsan_mem, op); in iommu_bus_dmamap_sync()
903 memcpy(&map->kmsan_mem, mem, sizeof(struct memdesc)); in iommu_bus_dmamap_load_kmsan()
937 while ((map = TAILQ_FIRST(&unit->delayed_maps)) != NULL) { in iommu_bus_task_dmamap()
938 TAILQ_REMOVE(&unit->delayed_maps, map, delay_link); in iommu_bus_task_dmamap()
940 tag = map->tag; in iommu_bus_task_dmamap()
941 map->cansleep = true; in iommu_bus_task_dmamap()
942 map->locked = false; in iommu_bus_task_dmamap()
944 &map->mem, map->callback, map->callback_arg, in iommu_bus_task_dmamap()
946 map->cansleep = false; in iommu_bus_task_dmamap()
947 if (map->locked) { in iommu_bus_task_dmamap()
948 (tag->common.lockfunc)(tag->common.lockfuncarg, in iommu_bus_task_dmamap()
951 map->locked = true; in iommu_bus_task_dmamap()
952 map->cansleep = false; in iommu_bus_task_dmamap()
962 map->locked = false; in iommu_bus_schedule_dmamap()
964 TAILQ_INSERT_TAIL(&unit->delayed_maps, map, delay_link); in iommu_bus_schedule_dmamap()
966 taskqueue_enqueue(unit->delayed_taskqueue, &unit->dmamap_load_task); in iommu_bus_schedule_dmamap()
974 unit->dma_enabled = 0; in iommu_init_busdma()
975 error = TUNABLE_INT_FETCH("hw.iommu.dma", &unit->dma_enabled); in iommu_init_busdma()
977 TUNABLE_INT_FETCH("hw.dmar.dma", &unit->dma_enabled); in iommu_init_busdma()
978 SYSCTL_ADD_INT(&unit->sysctl_ctx, in iommu_init_busdma()
979 SYSCTL_CHILDREN(device_get_sysctl_tree(unit->dev)), in iommu_init_busdma()
980 OID_AUTO, "dma", CTLFLAG_RD, &unit->dma_enabled, 0, in iommu_init_busdma()
982 TAILQ_INIT(&unit->delayed_maps); in iommu_init_busdma()
983 TASK_INIT(&unit->dmamap_load_task, 0, iommu_bus_task_dmamap, unit); in iommu_init_busdma()
984 unit->delayed_taskqueue = taskqueue_create("iommu", M_WAITOK, in iommu_init_busdma()
985 taskqueue_thread_enqueue, &unit->delayed_taskqueue); in iommu_init_busdma()
986 taskqueue_start_threads(&unit->delayed_taskqueue, 1, PI_DISK, in iommu_init_busdma()
987 "iommu%d busdma taskq", unit->unit); in iommu_init_busdma()
995 if (unit->delayed_taskqueue == NULL) in iommu_fini_busdma()
998 taskqueue_drain(unit->delayed_taskqueue, &unit->dmamap_load_task); in iommu_fini_busdma()
999 taskqueue_free(unit->delayed_taskqueue); in iommu_fini_busdma()
1000 unit->delayed_taskqueue = NULL; in iommu_fini_busdma()
1010 struct iommu_ctx *ctx; in bus_dma_iommu_load_ident() local
1025 if (tc->impl != &bus_dma_iommu_impl) in bus_dma_iommu_load_ident()
1029 ctx = tag->ctx; in bus_dma_iommu_load_ident()
1030 domain = ctx->domain; in bus_dma_iommu_load_ident()
1037 entry->start = start; in bus_dma_iommu_load_ident()
1038 entry->end = start + length; in bus_dma_iommu_load_ident()
1046 ma[i] = vm_page_getfake(entry->start + PAGE_SIZE * i, in bus_dma_iommu_load_ident()
1054 TAILQ_INSERT_TAIL(&map->map_entries, entry, dmamap_link); in bus_dma_iommu_load_ident()
1076 TAILQ_SWAP(&domain->unload_entries, &entries, in iommu_domain_unload_task()
1090 domain->ops = ops; in iommu_domain_init()
1091 domain->iommu = unit; in iommu_domain_init()
1093 TASK_INIT(&domain->unload_task, 0, iommu_domain_unload_task, domain); in iommu_domain_init()
1094 RB_INIT(&domain->rb_root); in iommu_domain_init()
1095 TAILQ_INIT(&domain->unload_entries); in iommu_domain_init()
1096 mtx_init(&domain->lock, "iodom", NULL, MTX_DEF); in iommu_domain_init()
1103 mtx_destroy(&domain->lock); in iommu_domain_fini()