Lines Matching +full:iommu +full:- +full:ctx

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
56 #include <dev/iommu/busdma_iommu.h>
57 #include <x86/iommu/amd_reg.h>
58 #include <x86/iommu/x86_iommu.h>
59 #include <x86/iommu/amd_iommu.h>
66 unit->hw_ctrl |= AMDIOMMU_CTRL_CMDBUF_EN; in amdiommu_enable_cmdbuf()
67 amdiommu_write8(unit, AMDIOMMU_CTRL, unit->hw_ctrl); in amdiommu_enable_cmdbuf()
75 unit->hw_ctrl &= ~AMDIOMMU_CTRL_CMDBUF_EN; in amdiommu_disable_cmdbuf()
76 amdiommu_write8(unit, AMDIOMMU_CTRL, unit->hw_ctrl); in amdiommu_disable_cmdbuf()
81 amdiommu_enable_qi_intr(struct iommu_unit *iommu) in amdiommu_enable_qi_intr() argument
85 unit = IOMMU2AMD(iommu); in amdiommu_enable_qi_intr()
87 unit->hw_ctrl |= AMDIOMMU_CTRL_COMWINT_EN; in amdiommu_enable_qi_intr()
88 amdiommu_write8(unit, AMDIOMMU_CTRL, unit->hw_ctrl); in amdiommu_enable_qi_intr()
94 amdiommu_disable_qi_intr(struct iommu_unit *iommu) in amdiommu_disable_qi_intr() argument
98 unit = IOMMU2AMD(iommu); in amdiommu_disable_qi_intr()
100 unit->hw_ctrl &= ~AMDIOMMU_CTRL_COMWINT_EN; in amdiommu_disable_qi_intr()
101 amdiommu_write8(unit, AMDIOMMU_CTRL, unit->hw_ctrl); in amdiommu_disable_qi_intr()
105 amdiommu_cmd_advance_tail(struct iommu_unit *iommu) in amdiommu_cmd_advance_tail() argument
109 unit = IOMMU2AMD(iommu); in amdiommu_cmd_advance_tail()
111 amdiommu_write8(unit, AMDIOMMU_CMDBUF_TAIL, unit->x86c.inv_queue_tail); in amdiommu_cmd_advance_tail()
115 amdiommu_cmd_ensure(struct iommu_unit *iommu, int descr_count) in amdiommu_cmd_ensure() argument
121 unit = IOMMU2AMD(iommu); in amdiommu_cmd_ensure()
125 if (bytes <= unit->x86c.inv_queue_avail) in amdiommu_cmd_ensure()
130 unit->x86c.inv_queue_avail = head - unit->x86c.inv_queue_tail - in amdiommu_cmd_ensure()
132 if (head <= unit->x86c.inv_queue_tail) in amdiommu_cmd_ensure()
133 unit->x86c.inv_queue_avail += unit->x86c.inv_queue_size; in amdiommu_cmd_ensure()
134 if (bytes <= unit->x86c.inv_queue_avail) in amdiommu_cmd_ensure()
147 amdiommu_cmd_advance_tail(iommu); in amdiommu_cmd_ensure()
148 unit->x86c.inv_queue_full++; in amdiommu_cmd_ensure()
151 unit->x86c.inv_queue_avail -= bytes; in amdiommu_cmd_ensure()
160 memcpy(unit->x86c.inv_queue + unit->x86c.inv_queue_tail, cmd, in amdiommu_cmd_emit()
162 unit->x86c.inv_queue_tail += AMDIOMMU_CMD_SZ; in amdiommu_cmd_emit()
163 KASSERT(unit->x86c.inv_queue_tail <= unit->x86c.inv_queue_size, in amdiommu_cmd_emit()
164 ("tail overflow 0x%x 0x%jx", unit->x86c.inv_queue_tail, in amdiommu_cmd_emit()
165 (uintmax_t)unit->x86c.inv_queue_size)); in amdiommu_cmd_emit()
166 unit->x86c.inv_queue_tail &= unit->x86c.inv_queue_size - 1; in amdiommu_cmd_emit()
170 amdiommu_cmd_emit_wait_descr(struct iommu_unit *iommu, uint32_t seq, in amdiommu_cmd_emit_wait_descr() argument
176 unit = IOMMU2AMD(iommu); in amdiommu_cmd_emit_wait_descr()
185 x = unit->x86c.inv_waitd_seq_hw_phys; in amdiommu_cmd_emit_wait_descr()
188 x = unit->x86c.inv_waitd_seq_hw_phys >> 32; in amdiommu_cmd_emit_wait_descr()
209 unit = domain->unit; in amdiommu_qi_invalidate_emit()
213 c.domainid = domain->domain; in amdiommu_qi_invalidate_emit()
216 for (; size > 0; base += isize, size -= isize) { in amdiommu_qi_invalidate_emit()
232 unit = domain->unit; in amdiommu_qi_invalidate_all_pages_locked_nowait()
236 c.domainid = domain->domain; in amdiommu_qi_invalidate_all_pages_locked_nowait()
251 amdiommu_qi_invalidate_wait_sync(struct iommu_unit *iommu) in amdiommu_qi_invalidate_wait_sync() argument
255 amdiommu_cmd_ensure(iommu, 1); in amdiommu_qi_invalidate_wait_sync()
256 iommu_qi_emit_wait_seq(iommu, &gseq, true); in amdiommu_qi_invalidate_wait_sync()
257 IOMMU2AMD(iommu)->x86c.inv_seq_waiters++; in amdiommu_qi_invalidate_wait_sync()
258 amdiommu_cmd_advance_tail(iommu); in amdiommu_qi_invalidate_wait_sync()
259 iommu_qi_wait_for_seq(iommu, &gseq, true); in amdiommu_qi_invalidate_wait_sync()
263 amdiommu_qi_invalidate_ctx_locked_nowait(struct amdiommu_ctx *ctx) in amdiommu_qi_invalidate_ctx_locked_nowait() argument
267 amdiommu_cmd_ensure(AMD2IOMMU(CTX2AMD(ctx)), 1); in amdiommu_qi_invalidate_ctx_locked_nowait()
270 c.devid = ctx->context.rid; in amdiommu_qi_invalidate_ctx_locked_nowait()
271 amdiommu_cmd_emit(CTX2AMD(ctx), (struct amdiommu_cmd_generic *)&c); in amdiommu_qi_invalidate_ctx_locked_nowait()
276 amdiommu_qi_invalidate_ctx_locked(struct amdiommu_ctx *ctx) in amdiommu_qi_invalidate_ctx_locked() argument
278 amdiommu_qi_invalidate_ctx_locked_nowait(ctx); in amdiommu_qi_invalidate_ctx_locked()
279 amdiommu_qi_invalidate_wait_sync(AMD2IOMMU(CTX2AMD(ctx))); in amdiommu_qi_invalidate_ctx_locked()
313 if (unit->x86c.inv_seq_waiters > 0) in amdiommu_qi_task()
314 wakeup(&unit->x86c.inv_seq_waiters); in amdiommu_qi_task()
323 unit->x86c.qi_buf_maxsz = ilog2(AMDIOMMU_CMDBUF_MAX / PAGE_SIZE); in amdiommu_init_cmd()
324 unit->x86c.qi_cmd_sz = AMDIOMMU_CMD_SZ; in amdiommu_init_cmd()
326 get_x86_iommu()->qi_ensure = amdiommu_cmd_ensure; in amdiommu_init_cmd()
327 get_x86_iommu()->qi_emit_wait_descr = amdiommu_cmd_emit_wait_descr; in amdiommu_init_cmd()
328 get_x86_iommu()->qi_advance_tail = amdiommu_cmd_advance_tail; in amdiommu_init_cmd()
329 get_x86_iommu()->qi_invalidate_emit = amdiommu_qi_invalidate_emit; in amdiommu_init_cmd()
331 rv = pmap_kextract((uintptr_t)unit->x86c.inv_queue); in amdiommu_init_cmd()
337 qi_sz = ilog2(unit->x86c.inv_queue_size / PAGE_SIZE) + 8; in amdiommu_init_cmd()
350 amdiommu_fini_cmd_helper(struct iommu_unit *iommu) in amdiommu_fini_cmd_helper() argument
352 amdiommu_disable_cmdbuf(IOMMU2AMD(iommu)); in amdiommu_fini_cmd_helper()
353 amdiommu_disable_qi_intr(iommu); in amdiommu_fini_cmd_helper()