Lines Matching +full:parent +full:- +full:locked

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
68 * IOMMU units from Intel VT-d.
148 "iommu_get_requester(%s): NULL parent for %s\n", in iommu_get_requester()
158 "iommu_get_requester(%s): non-pci parent %s for %s\n", in iommu_get_requester()
180 * The parent of our "bridge" isn't another PCI bus, in iommu_get_requester()
181 * so pcib isn't a PCI->PCI bridge but rather a host in iommu_get_requester()
194 * unlikely) to have a PCI->PCIe bridge in iommu_get_requester()
228 * PCIe->PCI bridge. Assume that the in iommu_get_requester()
234 * the bridge is PCIe->PCI-X, and the in iommu_get_requester()
239 * non-taken transactions. in iommu_get_requester()
247 * conventional PCI->PCI bridge, which in iommu_get_requester()
292 if ((ctx->flags & IOMMU_CTX_DISABLED) == 0) { in iommu_instantiate_ctx()
293 ctx->flags |= IOMMU_CTX_DISABLED; in iommu_instantiate_ctx()
312 if (!unit->dma_enabled) in iommu_get_dev_ctx()
317 if (ctx != NULL && (ctx->flags & IOMMU_CTX_DISABLED) != 0) in iommu_get_dev_ctx()
332 res = (bus_dma_tag_t)ctx->tag; in iommu_get_dma_tag()
340 device_t parent; in bus_dma_iommu_set_buswide() local
343 parent = device_get_parent(dev); in bus_dma_iommu_set_buswide()
344 if (device_get_devclass(parent) != devclass_find("pci")) in bus_dma_iommu_set_buswide()
356 unit->unit, busno, slot, func); in bus_dma_iommu_set_buswide()
370 unit->buswide_ctxs[busno / NBBY / sizeof(uint32_t)] |= in iommu_set_buswide_ctx()
380 return ((unit->buswide_ctxs[busno / NBBY / sizeof(uint32_t)] & in iommu_is_buswide_ctx()
390 iommu_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, in iommu_bus_dma_tag_create() argument
399 error = common_bus_dma_tag_create(parent != NULL ? in iommu_bus_dma_tag_create()
400 &((struct bus_dma_tag_iommu *)parent)->common : NULL, alignment, in iommu_bus_dma_tag_create()
407 oldtag = (struct bus_dma_tag_iommu *)parent; in iommu_bus_dma_tag_create()
408 newtag->common.impl = &bus_dma_iommu_impl; in iommu_bus_dma_tag_create()
409 newtag->ctx = oldtag->ctx; in iommu_bus_dma_tag_create()
410 newtag->owner = oldtag->owner; in iommu_bus_dma_tag_create()
415 __func__, newtag, (newtag != NULL ? newtag->common.flags : 0), in iommu_bus_dma_tag_create()
439 if (dmat->map_count != 0) { in iommu_bus_dma_tag_destroy()
443 ctx = dmat->ctx; in iommu_bus_dma_tag_destroy()
444 if (dmat == ctx->tag) { in iommu_bus_dma_tag_destroy()
445 iommu = ctx->domain->iommu; in iommu_bus_dma_tag_destroy()
447 iommu_free_ctx_locked(iommu, dmat->ctx); in iommu_bus_dma_tag_destroy()
449 free(dmat->segments, M_IOMMU_DMAMAP); in iommu_bus_dma_tag_destroy()
472 DOMAINSET_PREF(tag->common.domain), M_NOWAIT | M_ZERO); in iommu_bus_dmamap_create()
477 if (tag->segments == NULL) { in iommu_bus_dmamap_create()
478 tag->segments = malloc_domainset(sizeof(bus_dma_segment_t) * in iommu_bus_dmamap_create()
479 tag->common.nsegments, M_IOMMU_DMAMAP, in iommu_bus_dmamap_create()
480 DOMAINSET_PREF(tag->common.domain), M_NOWAIT); in iommu_bus_dmamap_create()
481 if (tag->segments == NULL) { in iommu_bus_dmamap_create()
488 TAILQ_INIT(&map->map_entries); in iommu_bus_dmamap_create()
489 map->tag = tag; in iommu_bus_dmamap_create()
490 map->locked = true; in iommu_bus_dmamap_create()
491 map->cansleep = false; in iommu_bus_dmamap_create()
492 tag->map_count++; in iommu_bus_dmamap_create()
508 if (!TAILQ_EMPTY(&map->map_entries)) { in iommu_bus_dmamap_destroy()
515 tag->map_count--; in iommu_bus_dmamap_destroy()
541 if (tag->common.maxsize < PAGE_SIZE && in iommu_bus_dmamem_alloc()
542 tag->common.alignment <= tag->common.maxsize && in iommu_bus_dmamem_alloc()
544 *vaddr = malloc_domainset(tag->common.maxsize, M_DEVBUF, in iommu_bus_dmamem_alloc()
545 DOMAINSET_PREF(tag->common.domain), mflags); in iommu_bus_dmamem_alloc()
546 map->flags |= BUS_DMAMAP_IOMMU_MALLOC; in iommu_bus_dmamem_alloc()
549 DOMAINSET_PREF(tag->common.domain), tag->common.maxsize, in iommu_bus_dmamem_alloc()
551 map->flags |= BUS_DMAMAP_IOMMU_KMEM_ALLOC; in iommu_bus_dmamem_alloc()
570 if ((map->flags & BUS_DMAMAP_IOMMU_MALLOC) != 0) { in iommu_bus_dmamem_free()
572 map->flags &= ~BUS_DMAMAP_IOMMU_MALLOC; in iommu_bus_dmamem_free()
574 KASSERT((map->flags & BUS_DMAMAP_IOMMU_KMEM_ALLOC) != 0, in iommu_bus_dmamem_free()
576 kmem_free(vaddr, tag->common.maxsize); in iommu_bus_dmamem_free()
577 map->flags &= ~BUS_DMAMAP_IOMMU_KMEM_ALLOC; in iommu_bus_dmamem_free()
597 segs = tag->segments; in iommu_bus_dmamap_load_something1()
598 ctx = tag->ctx; in iommu_bus_dmamap_load_something1()
599 domain = ctx->domain; in iommu_bus_dmamap_load_something1()
607 if (seg >= tag->common.nsegments) { in iommu_bus_dmamap_load_something1()
611 buflen1 = buflen > tag->common.maxsegsz ? in iommu_bus_dmamap_load_something1()
612 tag->common.maxsegsz : buflen; in iommu_bus_dmamap_load_something1()
618 gas_flags = map->cansleep ? IOMMU_MF_CANWAIT : 0; in iommu_bus_dmamap_load_something1()
619 if (seg + 1 < tag->common.nsegments) in iommu_bus_dmamap_load_something1()
622 error = iommu_gas_map(domain, &tag->common, buflen1, in iommu_bus_dmamap_load_something1()
627 if (buflen1 > entry->end - entry->start - offset) in iommu_bus_dmamap_load_something1()
628 buflen1 = entry->end - entry->start - offset; in iommu_bus_dmamap_load_something1()
630 KASSERT(vm_addr_align_ok(entry->start + offset, in iommu_bus_dmamap_load_something1()
631 tag->common.alignment), in iommu_bus_dmamap_load_something1()
633 "align 0x%jx", ctx, (uintmax_t)entry->start, offset, in iommu_bus_dmamap_load_something1()
634 (uintmax_t)tag->common.alignment)); in iommu_bus_dmamap_load_something1()
635 KASSERT(entry->end <= tag->common.lowaddr || in iommu_bus_dmamap_load_something1()
636 entry->start >= tag->common.highaddr, in iommu_bus_dmamap_load_something1()
639 (uintmax_t)entry->start, (uintmax_t)entry->end, in iommu_bus_dmamap_load_something1()
640 (uintmax_t)tag->common.lowaddr, in iommu_bus_dmamap_load_something1()
641 (uintmax_t)tag->common.highaddr)); in iommu_bus_dmamap_load_something1()
642 KASSERT(vm_addr_bound_ok(entry->start + offset, buflen1, in iommu_bus_dmamap_load_something1()
643 tag->common.boundary), in iommu_bus_dmamap_load_something1()
645 "boundary 0x%jx", ctx, (uintmax_t)entry->start, in iommu_bus_dmamap_load_something1()
646 (uintmax_t)entry->end, (uintmax_t)tag->common.boundary)); in iommu_bus_dmamap_load_something1()
647 KASSERT(buflen1 <= tag->common.maxsegsz, in iommu_bus_dmamap_load_something1()
650 (uintmax_t)entry->start, (uintmax_t)entry->end, in iommu_bus_dmamap_load_something1()
651 (uintmax_t)buflen1, (uintmax_t)tag->common.maxsegsz)); in iommu_bus_dmamap_load_something1()
653 KASSERT((entry->flags & IOMMU_MAP_ENTRY_MAP) != 0, in iommu_bus_dmamap_load_something1()
657 segs[seg].ds_addr = entry->start + offset; in iommu_bus_dmamap_load_something1()
663 buflen -= buflen1; in iommu_bus_dmamap_load_something1()
680 ctx = tag->ctx; in iommu_bus_dmamap_load_something()
681 domain = ctx->domain; in iommu_bus_dmamap_load_something()
682 atomic_add_long(&ctx->loads, 1); in iommu_bus_dmamap_load_something()
689 TAILQ_CONCAT(&map->map_entries, &entries, dmamap_link); in iommu_bus_dmamap_load_something()
698 TAILQ_CONCAT(&domain->unload_entries, &entries, dmamap_link); in iommu_bus_dmamap_load_something()
700 taskqueue_enqueue(domain->iommu->delayed_taskqueue, in iommu_bus_dmamap_load_something()
701 &domain->unload_task); in iommu_bus_dmamap_load_something()
705 !map->cansleep) in iommu_bus_dmamap_load_something()
708 iommu_bus_schedule_dmamap(domain->iommu, map); in iommu_bus_dmamap_load_something()
742 ma_cnt = OFF_TO_IDX(pend - pstart); in iommu_bus_dmamap_load_phys()
743 mflags = map->cansleep ? M_WAITOK : M_NOWAIT; in iommu_bus_dmamap_load_phys()
793 ma_cnt = OFF_TO_IDX(pend - pstart); in iommu_bus_dmamap_load_buffer()
794 mflags = map->cansleep ? M_WAITOK : M_NOWAIT; in iommu_bus_dmamap_load_buffer()
839 map->mem = *mem; in iommu_bus_dmamap_waitok()
840 map->tag = (struct bus_dma_tag_iommu *)dmat; in iommu_bus_dmamap_waitok()
841 map->callback = callback; in iommu_bus_dmamap_waitok()
842 map->callback_arg = callback_arg; in iommu_bus_dmamap_waitok()
855 if (!map->locked) { in iommu_bus_dmamap_complete()
856 KASSERT(map->cansleep, in iommu_bus_dmamap_complete()
857 ("map not locked and not sleepable context %p", map)); in iommu_bus_dmamap_complete()
863 (tag->common.lockfunc)(tag->common.lockfuncarg, BUS_DMA_LOCK); in iommu_bus_dmamap_complete()
864 map->locked = true; in iommu_bus_dmamap_complete()
868 segs = tag->segments; in iommu_bus_dmamap_complete()
893 ctx = tag->ctx; in iommu_bus_dmamap_unload()
894 domain = ctx->domain; in iommu_bus_dmamap_unload()
895 atomic_add_long(&ctx->unloads, 1); in iommu_bus_dmamap_unload()
899 TAILQ_CONCAT(&entries, &map->map_entries, dmamap_link); in iommu_bus_dmamap_unload()
903 TAILQ_CONCAT(&domain->unload_entries, &entries, dmamap_link); in iommu_bus_dmamap_unload()
905 taskqueue_enqueue(domain->iommu->delayed_taskqueue, in iommu_bus_dmamap_unload()
906 &domain->unload_task); in iommu_bus_dmamap_unload()
922 kmsan_bus_dmamap_sync(&map->kmsan_mem, op); in iommu_bus_dmamap_sync()
934 memcpy(&map->kmsan_mem, mem, sizeof(struct memdesc)); in iommu_bus_dmamap_load_kmsan()
968 while ((map = TAILQ_FIRST(&unit->delayed_maps)) != NULL) { in iommu_bus_task_dmamap()
969 TAILQ_REMOVE(&unit->delayed_maps, map, delay_link); in iommu_bus_task_dmamap()
971 tag = map->tag; in iommu_bus_task_dmamap()
972 map->cansleep = true; in iommu_bus_task_dmamap()
973 map->locked = false; in iommu_bus_task_dmamap()
975 &map->mem, map->callback, map->callback_arg, in iommu_bus_task_dmamap()
977 map->cansleep = false; in iommu_bus_task_dmamap()
978 if (map->locked) { in iommu_bus_task_dmamap()
979 (tag->common.lockfunc)(tag->common.lockfuncarg, in iommu_bus_task_dmamap()
982 map->locked = true; in iommu_bus_task_dmamap()
983 map->cansleep = false; in iommu_bus_task_dmamap()
993 map->locked = false; in iommu_bus_schedule_dmamap()
995 TAILQ_INSERT_TAIL(&unit->delayed_maps, map, delay_link); in iommu_bus_schedule_dmamap()
997 taskqueue_enqueue(unit->delayed_taskqueue, &unit->dmamap_load_task); in iommu_bus_schedule_dmamap()
1005 unit->dma_enabled = 0; in iommu_init_busdma()
1006 error = TUNABLE_INT_FETCH("hw.iommu.dma", &unit->dma_enabled); in iommu_init_busdma()
1008 TUNABLE_INT_FETCH("hw.dmar.dma", &unit->dma_enabled); in iommu_init_busdma()
1009 SYSCTL_ADD_INT(&unit->sysctl_ctx, in iommu_init_busdma()
1010 SYSCTL_CHILDREN(device_get_sysctl_tree(unit->dev)), in iommu_init_busdma()
1011 OID_AUTO, "dma", CTLFLAG_RD, &unit->dma_enabled, 0, in iommu_init_busdma()
1013 TAILQ_INIT(&unit->delayed_maps); in iommu_init_busdma()
1014 TASK_INIT(&unit->dmamap_load_task, 0, iommu_bus_task_dmamap, unit); in iommu_init_busdma()
1015 unit->delayed_taskqueue = taskqueue_create("iommu", M_WAITOK, in iommu_init_busdma()
1016 taskqueue_thread_enqueue, &unit->delayed_taskqueue); in iommu_init_busdma()
1017 taskqueue_start_threads(&unit->delayed_taskqueue, 1, PI_DISK, in iommu_init_busdma()
1018 "iommu%d busdma taskq", unit->unit); in iommu_init_busdma()
1026 if (unit->delayed_taskqueue == NULL) in iommu_fini_busdma()
1029 taskqueue_drain(unit->delayed_taskqueue, &unit->dmamap_load_task); in iommu_fini_busdma()
1030 taskqueue_free(unit->delayed_taskqueue); in iommu_fini_busdma()
1031 unit->delayed_taskqueue = NULL; in iommu_fini_busdma()
1056 if (tc->impl != &bus_dma_iommu_impl) in bus_dma_iommu_load_ident()
1060 ctx = tag->ctx; in bus_dma_iommu_load_ident()
1061 domain = ctx->domain; in bus_dma_iommu_load_ident()
1068 entry->start = start; in bus_dma_iommu_load_ident()
1069 entry->end = start + length; in bus_dma_iommu_load_ident()
1077 ma[i] = vm_page_getfake(entry->start + PAGE_SIZE * i, in bus_dma_iommu_load_ident()
1085 TAILQ_INSERT_TAIL(&map->map_entries, entry, dmamap_link); in bus_dma_iommu_load_ident()
1107 TAILQ_SWAP(&domain->unload_entries, &entries, in iommu_domain_unload_task()
1121 domain->ops = ops; in iommu_domain_init()
1122 domain->iommu = unit; in iommu_domain_init()
1124 TASK_INIT(&domain->unload_task, 0, iommu_domain_unload_task, domain); in iommu_domain_init()
1125 RB_INIT(&domain->rb_root); in iommu_domain_init()
1126 TAILQ_INIT(&domain->unload_entries); in iommu_domain_init()
1127 mtx_init(&domain->lock, "iodom", NULL, MTX_DEF); in iommu_domain_init()
1134 mtx_destroy(&domain->lock); in iommu_domain_fini()