Lines Matching +full:iommu +full:- +full:ctx
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
66 #include <dev/iommu/iommu.h>
67 #include <dev/iommu/busdma_iommu.h>
68 #include <x86/iommu/amd_reg.h>
69 #include <x86/iommu/x86_iommu.h>
70 #include <x86/iommu/amd_iommu.h>
82 unit->event_log_head++; in amdiommu_event_log_inc_head()
83 if (unit->event_log_head >= unit->event_log_size) in amdiommu_event_log_inc_head()
84 unit->event_log_head = 0; in amdiommu_event_log_inc_head()
92 unit->iommu.unit, evp->code, evp->w0, evp->ww1, evp->w2, evp->w3); in amdiommu_event_log_print()
97 if (evp->code == AMDIOMMU_EV_ILL_DEV_TABLE_ENTRY) { in amdiommu_event_log_print()
105 dte = &unit->dev_tbl[ev_dte_p->devid]; in amdiommu_event_log_print()
112 } else if (evp->code == AMDIOMMU_EV_IO_PAGE_FAULT) { in amdiommu_event_log_print()
114 struct amdiommu_ctx *ctx; in amdiommu_event_log_print() local
120 ev_iopf_p->devid, ev_iopf_p->pasid); in amdiommu_event_log_print()
121 ctx = amdiommu_find_ctx_locked(unit, ev_iopf_p->devid); in amdiommu_event_log_print()
122 if (ctx != NULL) { in amdiommu_event_log_print()
123 dev = ctx->context.tag->owner; in amdiommu_event_log_print()
130 ev_iopf_p->gn, ev_iopf_p->nx, ev_iopf_p->us, ev_iopf_p->i, in amdiommu_event_log_print()
131 ev_iopf_p->pr, ev_iopf_p->rw, ev_iopf_p->pe, ev_iopf_p->rz, in amdiommu_event_log_print()
132 ev_iopf_p->tr, in amdiommu_event_log_print()
133 (((uintmax_t)(ev_iopf_p->addr2)) << 32) | in amdiommu_event_log_print()
134 ev_iopf_p->addr1); in amdiommu_event_log_print()
149 if (idx == nitems(((struct amdiommu_unit *)NULL)->event_copy_log)) in amdiommu_event_copy_log_inc()
157 return (unit->event_copy_tail != amdiommu_event_copy_log_inc( in amdiommu_event_copy_log_hasspace()
158 unit->event_copy_head)); in amdiommu_event_copy_log_hasspace()
173 for (; hw_tail != unit->event_log_head; in amdiommu_event_intr()
175 evp = &unit->event_log[unit->event_log_head]; in amdiommu_event_intr()
176 mtx_lock_spin(&unit->event_lock); in amdiommu_event_intr()
178 unit->event_copy_log[unit->event_copy_head] = in amdiommu_event_intr()
180 unit->event_copy_head = in amdiommu_event_intr()
181 amdiommu_event_copy_log_inc(unit-> in amdiommu_event_intr()
187 mtx_unlock_spin(&unit->event_lock); in amdiommu_event_intr()
190 unit->event_log_head << AMDIOMMU_EV_SZ_SHIFT); in amdiommu_event_intr()
196 taskqueue_enqueue(unit->event_taskqueue, &unit->event_task); in amdiommu_event_intr()
209 if ((unit->efr & AMDIOMMU_EFR_HWEV_SUP) != 0) { in amdiommu_event_task()
216 printf("amdiommu%d: hw event%s\n", unit->iommu.unit, in amdiommu_event_task()
227 printf("amdiommu%d: event log overflow\n", unit->iommu.unit); in amdiommu_event_task()
234 unit->hw_ctrl &= ~AMDIOMMU_CTRL_EVNTLOG_EN; in amdiommu_event_task()
235 amdiommu_write8(unit, AMDIOMMU_CTRL, unit->hw_ctrl); in amdiommu_event_task()
237 unit->event_log_head = 0; in amdiommu_event_task()
243 unit->hw_ctrl |= AMDIOMMU_CTRL_EVNTLOG_EN; in amdiommu_event_task()
244 amdiommu_write8(unit, AMDIOMMU_CTRL, unit->hw_ctrl); in amdiommu_event_task()
249 mtx_lock_spin(&unit->event_lock); in amdiommu_event_task()
250 while (unit->event_copy_head != unit->event_copy_tail) { in amdiommu_event_task()
251 mtx_unlock_spin(&unit->event_lock); in amdiommu_event_task()
252 amdiommu_event_log_print(unit, &unit->event_copy_log[ in amdiommu_event_task()
253 unit->event_copy_tail], true); in amdiommu_event_task()
254 mtx_lock_spin(&unit->event_lock); in amdiommu_event_task()
255 unit->event_copy_tail = amdiommu_event_copy_log_inc(unit-> in amdiommu_event_task()
258 mtx_unlock_spin(&unit->event_lock); in amdiommu_event_task()
268 mtx_init(&unit->event_lock, "amdevl", NULL, MTX_SPIN); in amdiommu_init_event()
271 unit->event_log_size = AMDIOMMU_EVNTLOG_MIN; in amdiommu_init_event()
272 TUNABLE_INT_FETCH("hw.amdiommu.event_log_size", &unit->event_log_size); in amdiommu_init_event()
273 if (unit->event_log_size < AMDIOMMU_EVNTLOG_MIN || in amdiommu_init_event()
274 unit->event_log_size > AMDIOMMU_EVNTLOG_MAX || in amdiommu_init_event()
275 !powerof2(unit->event_log_size)) in amdiommu_init_event()
277 unit->event_log = kmem_alloc_contig(AMDIOMMU_EV_SZ * in amdiommu_init_event()
278 unit->event_log_size, M_WAITOK | M_ZERO, 0, ~0ull, PAGE_SIZE, in amdiommu_init_event()
281 TASK_INIT(&unit->event_task, 0, amdiommu_event_task, unit); in amdiommu_init_event()
282 unit->event_taskqueue = taskqueue_create_fast("amdiommuff", M_WAITOK, in amdiommu_init_event()
283 taskqueue_thread_enqueue, &unit->event_taskqueue); in amdiommu_init_event()
284 taskqueue_start_threads(&unit->event_taskqueue, 1, PI_AV, in amdiommu_init_event()
285 "amdiommu%d event taskq", unit->iommu.unit); in amdiommu_init_event()
287 base_reg = pmap_kextract((vm_offset_t)unit->event_log) | in amdiommu_init_event()
288 (((uint64_t)0x8 + ilog2(unit->event_log_size / in amdiommu_init_event()
292 * Re-arm before enabling interrupt, to not loose it when in amdiommu_init_event()
293 * re-arming in the interrupt handler. in amdiommu_init_event()
297 unit->hw_ctrl |= AMDIOMMU_CTRL_EVNTLOG_EN | AMDIOMMU_CTRL_EVENTINT_EN; in amdiommu_init_event()
298 amdiommu_write8(unit, AMDIOMMU_CTRL, unit->hw_ctrl); in amdiommu_init_event()
308 unit->hw_ctrl &= ~(AMDIOMMU_CTRL_EVNTLOG_EN | in amdiommu_fini_event()
310 amdiommu_write8(unit, AMDIOMMU_CTRL, unit->hw_ctrl); in amdiommu_fini_event()
314 taskqueue_drain(unit->event_taskqueue, &unit->event_task); in amdiommu_fini_event()
315 taskqueue_free(unit->event_taskqueue); in amdiommu_fini_event()
316 unit->event_taskqueue = NULL; in amdiommu_fini_event()
318 kmem_free(unit->event_log, unit->event_log_size * AMDIOMMU_EV_SZ); in amdiommu_fini_event()
319 unit->event_log = NULL; in amdiommu_fini_event()
320 unit->event_log_head = unit->event_log_tail = 0; in amdiommu_fini_event()
322 mtx_destroy(&unit->event_lock); in amdiommu_fini_event()