Lines Matching +full:iommu +full:- +full:ctx

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
55 #include <x86/iommu/intel_reg.h>
56 #include <dev/iommu/busdma_iommu.h>
57 #include <x86/iommu/x86_iommu.h>
58 #include <x86/iommu/intel_dmar.h>
64 * unit->fault_log, and schedules a task.
69 * register file. The task is usually long-running, since printf() is
84 if (faultp == unit->fault_log_size) in dmar_fault_next()
96 printf("DMAR%d: Invalidation timed out\n", unit->iommu.unit); in dmar_fault_intr_clear()
101 unit->iommu.unit); in dmar_fault_intr_clear()
106 unit->iommu.unit); in dmar_fault_intr_clear()
110 printf("DMAR%d: Advanced pending fault\n", unit->iommu.unit); in dmar_fault_intr_clear()
114 printf("DMAR%d: Advanced fault overflow\n", unit->iommu.unit); in dmar_fault_intr_clear()
140 frir = (DMAR_CAP_FRO(unit->hw_cap) + fri) * 16; in dmar_fault_intr()
147 faultp = unit->fault_log_head; in dmar_fault_intr()
148 if (dmar_fault_next(unit, faultp) == unit->fault_log_tail) { in dmar_fault_intr()
151 unit->fault_log[faultp] = fault_rec[0]; in dmar_fault_intr()
152 unit->fault_log[faultp + 1] = fault_rec[1]; in dmar_fault_intr()
153 unit->fault_log_head = dmar_fault_next(unit, faultp); in dmar_fault_intr()
158 if (fri >= DMAR_CAP_NFR(unit->hw_cap)) in dmar_fault_intr()
165 * BV100, and Haswell errata HSD40, "Spurious Intel VT-d in dmar_fault_intr()
176 printf("DMAR%d: Fault Overflow\n", unit->iommu.unit); in dmar_fault_intr()
181 taskqueue_enqueue(unit->fault_taskqueue, in dmar_fault_intr()
182 &unit->fault_task); in dmar_fault_intr()
191 struct dmar_ctx *ctx; in dmar_fault_task() local
198 faultp = unit->fault_log_tail; in dmar_fault_task()
199 if (faultp == unit->fault_log_head) in dmar_fault_task()
202 fault_rec[0] = unit->fault_log[faultp]; in dmar_fault_task()
203 fault_rec[1] = unit->fault_log[faultp + 1]; in dmar_fault_task()
204 unit->fault_log_tail = dmar_fault_next(unit, faultp); in dmar_fault_task()
208 printf("DMAR%d: ", unit->iommu.unit); in dmar_fault_task()
210 ctx = dmar_find_ctx_locked(unit, sid); in dmar_fault_task()
211 if (ctx == NULL) { in dmar_fault_task()
216 * if ARI is in use, but without a ctx entry we have in dmar_fault_task()
223 ctx->context.flags |= IOMMU_CTX_FAULTED; in dmar_fault_task()
224 ctx->last_fault_rec[0] = fault_rec[0]; in dmar_fault_task()
225 ctx->last_fault_rec[1] = fault_rec[1]; in dmar_fault_task()
226 device_print_prettyname(ctx->context.tag->owner); in dmar_fault_task()
227 bus = pci_get_bus(ctx->context.tag->owner); in dmar_fault_task()
228 slot = pci_get_slot(ctx->context.tag->owner); in dmar_fault_task()
229 func = pci_get_function(ctx->context.tag->owner); in dmar_fault_task()
249 for (i = 0; i < DMAR_CAP_NFR(unit->hw_cap); i++) { in dmar_clear_faults()
250 frir = (DMAR_CAP_FRO(unit->hw_cap) + i) * 16; in dmar_clear_faults()
264 mtx_init(&unit->fault_lock, "dmarflt", NULL, MTX_SPIN); in dmar_init_fault_log()
265 unit->fault_log_size = 256; /* 128 fault log entries */ in dmar_init_fault_log()
266 TUNABLE_INT_FETCH("hw.dmar.fault_log_size", &unit->fault_log_size); in dmar_init_fault_log()
267 if (unit->fault_log_size % 2 != 0) in dmar_init_fault_log()
269 unit->fault_log = malloc(sizeof(uint64_t) * unit->fault_log_size, in dmar_init_fault_log()
272 TASK_INIT(&unit->fault_task, 0, dmar_fault_task, unit); in dmar_init_fault_log()
273 unit->fault_taskqueue = taskqueue_create_fast("dmarff", M_WAITOK, in dmar_init_fault_log()
274 taskqueue_thread_enqueue, &unit->fault_taskqueue); in dmar_init_fault_log()
275 taskqueue_start_threads(&unit->fault_taskqueue, 1, PI_AV, in dmar_init_fault_log()
276 "dmar%d fault taskq", unit->iommu.unit); in dmar_init_fault_log()
279 dmar_disable_fault_intr(&unit->iommu); in dmar_init_fault_log()
281 dmar_enable_fault_intr(&unit->iommu); in dmar_init_fault_log()
291 if (unit->fault_taskqueue == NULL) in dmar_fini_fault_log()
295 dmar_disable_fault_intr(&unit->iommu); in dmar_fini_fault_log()
298 taskqueue_drain(unit->fault_taskqueue, &unit->fault_task); in dmar_fini_fault_log()
299 taskqueue_free(unit->fault_taskqueue); in dmar_fini_fault_log()
300 unit->fault_taskqueue = NULL; in dmar_fini_fault_log()
301 mtx_destroy(&unit->fault_lock); in dmar_fini_fault_log()
303 free(unit->fault_log, M_DEVBUF); in dmar_fini_fault_log()
304 unit->fault_log = NULL; in dmar_fini_fault_log()
305 unit->fault_log_head = unit->fault_log_tail = 0; in dmar_fini_fault_log()
309 dmar_enable_fault_intr(struct iommu_unit *iommu) in dmar_enable_fault_intr() argument
314 unit = IOMMU2DMAR(iommu); in dmar_enable_fault_intr()
322 dmar_disable_fault_intr(struct iommu_unit *iommu) in dmar_disable_fault_intr() argument
327 unit = IOMMU2DMAR(iommu); in dmar_disable_fault_intr()