Lines Matching +full:pci +full:- +full:domain

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
51 #include <dev/pci/pcireg.h>
52 #include <dev/pci/pcivar.h>
68 * IOMMU units from Intel VT-d.
72 iommu_bus_dma_is_dev_disabled(int domain, int bus, int slot, int func) in iommu_bus_dma_is_dev_disabled() argument
92 snprintf(str, sizeof(str), "hw.busdma.pci%d.%d.%d.%d", in iommu_bus_dma_is_dev_disabled()
93 domain, bus, slot, func); in iommu_bus_dma_is_dev_disabled()
110 * the IOMMU unit and used for page table lookup. PCI bridges may take
114 * domain, and must collectively be assigned to use either IOMMU or
121 device_t l, pci, pcib, pcip, pcibp, requester; in iommu_get_requester() local
126 pci_class = devclass_find("pci"); in iommu_get_requester()
129 pci = device_get_parent(dev); in iommu_get_requester()
130 if (pci == NULL || device_get_devclass(pci) != pci_class) { in iommu_get_requester()
143 pci = device_get_parent(l); in iommu_get_requester()
144 KASSERT(pci != NULL, ("iommu_get_requester(%s): NULL parent " in iommu_get_requester()
146 KASSERT(device_get_devclass(pci) == pci_class, in iommu_get_requester()
147 ("iommu_get_requester(%s): non-pci parent %s for %s", in iommu_get_requester()
148 device_get_name(dev), device_get_name(pci), in iommu_get_requester()
151 pcib = device_get_parent(pci); in iommu_get_requester()
153 "for %s", device_get_name(dev), device_get_name(pci))); in iommu_get_requester()
156 * The parent of our "bridge" isn't another PCI bus, in iommu_get_requester()
157 * so pcib isn't a PCI->PCI bridge but rather a host in iommu_get_requester()
170 * unlikely) to have a PCI->PCIe bridge in iommu_get_requester()
185 * Check for a buggy PCIe/PCI bridge that in iommu_get_requester()
188 * PCI bridge, then we know pcib is actually a in iommu_get_requester()
189 * PCIe/PCI bridge. in iommu_get_requester()
204 * PCIe->PCI bridge. Assume that the in iommu_get_requester()
210 * the bridge is PCIe->PCI-X, and the in iommu_get_requester()
215 * non-taken transactions. in iommu_get_requester()
223 * conventional PCI->PCI bridge, which in iommu_get_requester()
264 if ((ctx->flags & IOMMU_CTX_DISABLED) == 0) { in iommu_instantiate_ctx()
265 ctx->flags |= IOMMU_CTX_DISABLED; in iommu_instantiate_ctx()
284 if (!unit->dma_enabled) in iommu_get_dev_ctx()
301 res = (bus_dma_tag_t)ctx->tag; in iommu_get_dma_tag()
313 if (device_get_devclass(parent) != devclass_find("pci")) in bus_dma_iommu_set_buswide()
324 "iommu%d pci%d:%d:%d requested buswide busdma\n", in bus_dma_iommu_set_buswide()
325 unit->unit, busno, slot, func); in bus_dma_iommu_set_buswide()
339 unit->buswide_ctxs[busno / NBBY / sizeof(uint32_t)] |= in iommu_set_buswide_ctx()
349 return ((unit->buswide_ctxs[busno / NBBY / sizeof(uint32_t)] & in iommu_is_buswide_ctx()
369 &((struct bus_dma_tag_iommu *)parent)->common : NULL, alignment, in iommu_bus_dma_tag_create()
377 newtag->common.impl = &bus_dma_iommu_impl; in iommu_bus_dma_tag_create()
378 newtag->ctx = oldtag->ctx; in iommu_bus_dma_tag_create()
379 newtag->owner = oldtag->owner; in iommu_bus_dma_tag_create()
384 __func__, newtag, (newtag != NULL ? newtag->common.flags : 0), in iommu_bus_dma_tag_create()
408 if (dmat->map_count != 0) { in iommu_bus_dma_tag_destroy()
412 ctx = dmat->ctx; in iommu_bus_dma_tag_destroy()
413 if (dmat == ctx->tag) { in iommu_bus_dma_tag_destroy()
414 iommu = ctx->domain->iommu; in iommu_bus_dma_tag_destroy()
416 iommu_free_ctx_locked(iommu, dmat->ctx); in iommu_bus_dma_tag_destroy()
418 free(dmat->segments, M_IOMMU_DMAMAP); in iommu_bus_dma_tag_destroy()
441 DOMAINSET_PREF(tag->common.domain), M_NOWAIT | M_ZERO); in iommu_bus_dmamap_create()
446 if (tag->segments == NULL) { in iommu_bus_dmamap_create()
447 tag->segments = malloc_domainset(sizeof(bus_dma_segment_t) * in iommu_bus_dmamap_create()
448 tag->common.nsegments, M_IOMMU_DMAMAP, in iommu_bus_dmamap_create()
449 DOMAINSET_PREF(tag->common.domain), M_NOWAIT); in iommu_bus_dmamap_create()
450 if (tag->segments == NULL) { in iommu_bus_dmamap_create()
457 TAILQ_INIT(&map->map_entries); in iommu_bus_dmamap_create()
458 map->tag = tag; in iommu_bus_dmamap_create()
459 map->locked = true; in iommu_bus_dmamap_create()
460 map->cansleep = false; in iommu_bus_dmamap_create()
461 tag->map_count++; in iommu_bus_dmamap_create()
477 if (!TAILQ_EMPTY(&map->map_entries)) { in iommu_bus_dmamap_destroy()
484 tag->map_count--; in iommu_bus_dmamap_destroy()
510 if (tag->common.maxsize < PAGE_SIZE && in iommu_bus_dmamem_alloc()
511 tag->common.alignment <= tag->common.maxsize && in iommu_bus_dmamem_alloc()
513 *vaddr = malloc_domainset(tag->common.maxsize, M_DEVBUF, in iommu_bus_dmamem_alloc()
514 DOMAINSET_PREF(tag->common.domain), mflags); in iommu_bus_dmamem_alloc()
515 map->flags |= BUS_DMAMAP_IOMMU_MALLOC; in iommu_bus_dmamem_alloc()
518 DOMAINSET_PREF(tag->common.domain), tag->common.maxsize, in iommu_bus_dmamem_alloc()
520 map->flags |= BUS_DMAMAP_IOMMU_KMEM_ALLOC; in iommu_bus_dmamem_alloc()
539 if ((map->flags & BUS_DMAMAP_IOMMU_MALLOC) != 0) { in iommu_bus_dmamem_free()
541 map->flags &= ~BUS_DMAMAP_IOMMU_MALLOC; in iommu_bus_dmamem_free()
543 KASSERT((map->flags & BUS_DMAMAP_IOMMU_KMEM_ALLOC) != 0, in iommu_bus_dmamem_free()
545 kmem_free(vaddr, tag->common.maxsize); in iommu_bus_dmamem_free()
546 map->flags &= ~BUS_DMAMAP_IOMMU_KMEM_ALLOC; in iommu_bus_dmamem_free()
559 struct iommu_domain *domain; in iommu_bus_dmamap_load_something1() local
566 segs = tag->segments; in iommu_bus_dmamap_load_something1()
567 ctx = tag->ctx; in iommu_bus_dmamap_load_something1()
568 domain = ctx->domain; in iommu_bus_dmamap_load_something1()
576 if (seg >= tag->common.nsegments) { in iommu_bus_dmamap_load_something1()
580 buflen1 = buflen > tag->common.maxsegsz ? in iommu_bus_dmamap_load_something1()
581 tag->common.maxsegsz : buflen; in iommu_bus_dmamap_load_something1()
587 gas_flags = map->cansleep ? IOMMU_MF_CANWAIT : 0; in iommu_bus_dmamap_load_something1()
588 if (seg + 1 < tag->common.nsegments) in iommu_bus_dmamap_load_something1()
591 error = iommu_gas_map(domain, &tag->common, buflen1, in iommu_bus_dmamap_load_something1()
596 if (buflen1 > entry->end - entry->start - offset) in iommu_bus_dmamap_load_something1()
597 buflen1 = entry->end - entry->start - offset; in iommu_bus_dmamap_load_something1()
599 KASSERT(vm_addr_align_ok(entry->start + offset, in iommu_bus_dmamap_load_something1()
600 tag->common.alignment), in iommu_bus_dmamap_load_something1()
602 "align 0x%jx", ctx, (uintmax_t)entry->start, offset, in iommu_bus_dmamap_load_something1()
603 (uintmax_t)tag->common.alignment)); in iommu_bus_dmamap_load_something1()
604 KASSERT(entry->end <= tag->common.lowaddr || in iommu_bus_dmamap_load_something1()
605 entry->start >= tag->common.highaddr, in iommu_bus_dmamap_load_something1()
608 (uintmax_t)entry->start, (uintmax_t)entry->end, in iommu_bus_dmamap_load_something1()
609 (uintmax_t)tag->common.lowaddr, in iommu_bus_dmamap_load_something1()
610 (uintmax_t)tag->common.highaddr)); in iommu_bus_dmamap_load_something1()
611 KASSERT(vm_addr_bound_ok(entry->start + offset, buflen1, in iommu_bus_dmamap_load_something1()
612 tag->common.boundary), in iommu_bus_dmamap_load_something1()
614 "boundary 0x%jx", ctx, (uintmax_t)entry->start, in iommu_bus_dmamap_load_something1()
615 (uintmax_t)entry->end, (uintmax_t)tag->common.boundary)); in iommu_bus_dmamap_load_something1()
616 KASSERT(buflen1 <= tag->common.maxsegsz, in iommu_bus_dmamap_load_something1()
619 (uintmax_t)entry->start, (uintmax_t)entry->end, in iommu_bus_dmamap_load_something1()
620 (uintmax_t)buflen1, (uintmax_t)tag->common.maxsegsz)); in iommu_bus_dmamap_load_something1()
622 KASSERT((entry->flags & IOMMU_MAP_ENTRY_MAP) != 0, in iommu_bus_dmamap_load_something1()
626 segs[seg].ds_addr = entry->start + offset; in iommu_bus_dmamap_load_something1()
632 buflen -= buflen1; in iommu_bus_dmamap_load_something1()
645 struct iommu_domain *domain; in iommu_bus_dmamap_load_something() local
649 ctx = tag->ctx; in iommu_bus_dmamap_load_something()
650 domain = ctx->domain; in iommu_bus_dmamap_load_something()
651 atomic_add_long(&ctx->loads, 1); in iommu_bus_dmamap_load_something()
658 TAILQ_CONCAT(&map->map_entries, &entries, dmamap_link); in iommu_bus_dmamap_load_something()
666 IOMMU_DOMAIN_LOCK(domain); in iommu_bus_dmamap_load_something()
667 TAILQ_CONCAT(&domain->unload_entries, &entries, dmamap_link); in iommu_bus_dmamap_load_something()
668 IOMMU_DOMAIN_UNLOCK(domain); in iommu_bus_dmamap_load_something()
669 taskqueue_enqueue(domain->iommu->delayed_taskqueue, in iommu_bus_dmamap_load_something()
670 &domain->unload_task); in iommu_bus_dmamap_load_something()
674 !map->cansleep) in iommu_bus_dmamap_load_something()
677 iommu_bus_schedule_dmamap(domain->iommu, map); in iommu_bus_dmamap_load_something()
711 ma_cnt = OFF_TO_IDX(pend - pstart); in iommu_bus_dmamap_load_phys()
712 mflags = map->cansleep ? M_WAITOK : M_NOWAIT; in iommu_bus_dmamap_load_phys()
762 ma_cnt = OFF_TO_IDX(pend - pstart); in iommu_bus_dmamap_load_buffer()
763 mflags = map->cansleep ? M_WAITOK : M_NOWAIT; in iommu_bus_dmamap_load_buffer()
808 map->mem = *mem; in iommu_bus_dmamap_waitok()
809 map->tag = (struct bus_dma_tag_iommu *)dmat; in iommu_bus_dmamap_waitok()
810 map->callback = callback; in iommu_bus_dmamap_waitok()
811 map->callback_arg = callback_arg; in iommu_bus_dmamap_waitok()
824 if (!map->locked) { in iommu_bus_dmamap_complete()
825 KASSERT(map->cansleep, in iommu_bus_dmamap_complete()
832 (tag->common.lockfunc)(tag->common.lockfuncarg, BUS_DMA_LOCK); in iommu_bus_dmamap_complete()
833 map->locked = true; in iommu_bus_dmamap_complete()
837 segs = tag->segments; in iommu_bus_dmamap_complete()
857 struct iommu_domain *domain; in iommu_bus_dmamap_unload() local
862 ctx = tag->ctx; in iommu_bus_dmamap_unload()
863 domain = ctx->domain; in iommu_bus_dmamap_unload()
864 atomic_add_long(&ctx->unloads, 1); in iommu_bus_dmamap_unload()
868 TAILQ_CONCAT(&entries, &map->map_entries, dmamap_link); in iommu_bus_dmamap_unload()
871 IOMMU_DOMAIN_LOCK(domain); in iommu_bus_dmamap_unload()
872 TAILQ_CONCAT(&domain->unload_entries, &entries, dmamap_link); in iommu_bus_dmamap_unload()
873 IOMMU_DOMAIN_UNLOCK(domain); in iommu_bus_dmamap_unload()
874 taskqueue_enqueue(domain->iommu->delayed_taskqueue, in iommu_bus_dmamap_unload()
875 &domain->unload_task); in iommu_bus_dmamap_unload()
878 iommu_domain_unload(domain, &entries, false); in iommu_bus_dmamap_unload()
891 kmsan_bus_dmamap_sync(&map->kmsan_mem, op); in iommu_bus_dmamap_sync()
903 memcpy(&map->kmsan_mem, mem, sizeof(struct memdesc)); in iommu_bus_dmamap_load_kmsan()
937 while ((map = TAILQ_FIRST(&unit->delayed_maps)) != NULL) { in iommu_bus_task_dmamap()
938 TAILQ_REMOVE(&unit->delayed_maps, map, delay_link); in iommu_bus_task_dmamap()
940 tag = map->tag; in iommu_bus_task_dmamap()
941 map->cansleep = true; in iommu_bus_task_dmamap()
942 map->locked = false; in iommu_bus_task_dmamap()
944 &map->mem, map->callback, map->callback_arg, in iommu_bus_task_dmamap()
946 map->cansleep = false; in iommu_bus_task_dmamap()
947 if (map->locked) { in iommu_bus_task_dmamap()
948 (tag->common.lockfunc)(tag->common.lockfuncarg, in iommu_bus_task_dmamap()
951 map->locked = true; in iommu_bus_task_dmamap()
952 map->cansleep = false; in iommu_bus_task_dmamap()
962 map->locked = false; in iommu_bus_schedule_dmamap()
964 TAILQ_INSERT_TAIL(&unit->delayed_maps, map, delay_link); in iommu_bus_schedule_dmamap()
966 taskqueue_enqueue(unit->delayed_taskqueue, &unit->dmamap_load_task); in iommu_bus_schedule_dmamap()
974 unit->dma_enabled = 0; in iommu_init_busdma()
975 error = TUNABLE_INT_FETCH("hw.iommu.dma", &unit->dma_enabled); in iommu_init_busdma()
977 TUNABLE_INT_FETCH("hw.dmar.dma", &unit->dma_enabled); in iommu_init_busdma()
978 SYSCTL_ADD_INT(&unit->sysctl_ctx, in iommu_init_busdma()
979 SYSCTL_CHILDREN(device_get_sysctl_tree(unit->dev)), in iommu_init_busdma()
980 OID_AUTO, "dma", CTLFLAG_RD, &unit->dma_enabled, 0, in iommu_init_busdma()
982 TAILQ_INIT(&unit->delayed_maps); in iommu_init_busdma()
983 TASK_INIT(&unit->dmamap_load_task, 0, iommu_bus_task_dmamap, unit); in iommu_init_busdma()
984 unit->delayed_taskqueue = taskqueue_create("iommu", M_WAITOK, in iommu_init_busdma()
985 taskqueue_thread_enqueue, &unit->delayed_taskqueue); in iommu_init_busdma()
986 taskqueue_start_threads(&unit->delayed_taskqueue, 1, PI_DISK, in iommu_init_busdma()
987 "iommu%d busdma taskq", unit->unit); in iommu_init_busdma()
995 if (unit->delayed_taskqueue == NULL) in iommu_fini_busdma()
998 taskqueue_drain(unit->delayed_taskqueue, &unit->dmamap_load_task); in iommu_fini_busdma()
999 taskqueue_free(unit->delayed_taskqueue); in iommu_fini_busdma()
1000 unit->delayed_taskqueue = NULL; in iommu_fini_busdma()
1011 struct iommu_domain *domain; in bus_dma_iommu_load_ident() local
1025 if (tc->impl != &bus_dma_iommu_impl) in bus_dma_iommu_load_ident()
1029 ctx = tag->ctx; in bus_dma_iommu_load_ident()
1030 domain = ctx->domain; in bus_dma_iommu_load_ident()
1034 entry = iommu_gas_alloc_entry(domain, waitok ? 0 : IOMMU_PGF_WAITOK); in bus_dma_iommu_load_ident()
1037 entry->start = start; in bus_dma_iommu_load_ident()
1038 entry->end = start + length; in bus_dma_iommu_load_ident()
1046 ma[i] = vm_page_getfake(entry->start + PAGE_SIZE * i, in bus_dma_iommu_load_ident()
1049 error = iommu_gas_map_region(domain, entry, IOMMU_MAP_ENTRY_READ | in bus_dma_iommu_load_ident()
1054 TAILQ_INSERT_TAIL(&map->map_entries, entry, dmamap_link); in bus_dma_iommu_load_ident()
1068 struct iommu_domain *domain; in iommu_domain_unload_task() local
1071 domain = arg; in iommu_domain_unload_task()
1075 IOMMU_DOMAIN_LOCK(domain); in iommu_domain_unload_task()
1076 TAILQ_SWAP(&domain->unload_entries, &entries, in iommu_domain_unload_task()
1078 IOMMU_DOMAIN_UNLOCK(domain); in iommu_domain_unload_task()
1081 iommu_domain_unload(domain, &entries, true); in iommu_domain_unload_task()
1086 iommu_domain_init(struct iommu_unit *unit, struct iommu_domain *domain, in iommu_domain_init() argument
1090 domain->ops = ops; in iommu_domain_init()
1091 domain->iommu = unit; in iommu_domain_init()
1093 TASK_INIT(&domain->unload_task, 0, iommu_domain_unload_task, domain); in iommu_domain_init()
1094 RB_INIT(&domain->rb_root); in iommu_domain_init()
1095 TAILQ_INIT(&domain->unload_entries); in iommu_domain_init()
1096 mtx_init(&domain->lock, "iodom", NULL, MTX_DEF); in iommu_domain_init()
1100 iommu_domain_fini(struct iommu_domain *domain) in iommu_domain_fini() argument
1103 mtx_destroy(&domain->lock); in iommu_domain_fini()