Lines Matching +full:iommu +full:- +full:ctx
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
64 #include <dev/iommu/busdma_iommu.h>
65 #include <x86/iommu/intel_reg.h>
66 #include <x86/iommu/x86_iommu.h>
67 #include <x86/iommu/intel_dmar.h>
102 * 6-level paging (DMAR_CAP_SAGAW_6LVL) is not supported on any
103 * current VT-d hardware and its SAGAW field value is listed as
104 * reserved in the VT-d spec. If support is added in the future,
118 if ((DMAR_CAP_SAGAW(unit->hw_cap) & sagaw_bits[i].cap) != 0) in dmar_pglvl_supported()
129 domain->mgaw = mgaw; in domain_set_agaw()
130 sagaw = DMAR_CAP_SAGAW(domain->dmar->hw_cap); in domain_set_agaw()
133 domain->agaw = sagaw_bits[i].agaw; in domain_set_agaw()
134 domain->pglvl = sagaw_bits[i].pglvl; in domain_set_agaw()
135 domain->awlvl = sagaw_bits[i].awlvl; in domain_set_agaw()
139 device_printf(domain->dmar->iommu.dev, in domain_set_agaw()
147 * - if allow_less is false, must find sagaw which maps all requested
149 * - if allow_less is true, and no supported sagaw can map all requested
159 (DMAR_CAP_SAGAW(unit->hw_cap) & sagaw_bits[i].cap) != 0) in dmar_maxaddr2mgaw()
164 i--; in dmar_maxaddr2mgaw()
165 } while ((DMAR_CAP_SAGAW(unit->hw_cap) & sagaw_bits[i].cap) in dmar_maxaddr2mgaw()
172 return (-1); in dmar_maxaddr2mgaw()
177 * the context ctx.
190 alvl = domain->pglvl - lvl - 1; in domain_is_sp_lvl()
191 cap_sps = DMAR_CAP_SPS(domain->dmar->hw_cap); in domain_is_sp_lvl()
199 return (pglvl_page_size(domain->pglvl, lvl)); in domain_page_size()
209 for (am = DMAR_CAP_MAMV(unit->hw_cap);; am--) { in calc_am()
211 if ((base & (isize - 1)) == 0 && size >= isize) in calc_am()
273 VM_OBJECT_RLOCK(unit->ctx_obj); in dmar_load_root_entry_ptr()
274 root_entry = vm_page_lookup(unit->ctx_obj, 0); in dmar_load_root_entry_ptr()
275 VM_OBJECT_RUNLOCK(unit->ctx_obj); in dmar_load_root_entry_ptr()
277 dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd | DMAR_GCMD_SRTP); in dmar_load_root_entry_ptr()
297 KASSERT(!unit->qi_enabled, ("QI enabled")); in dmar_inv_ctx_glob()
320 KASSERT(!unit->qi_enabled, ("QI enabled")); in dmar_inv_iotlb_glob()
322 reg = 16 * DMAR_ECAP_IRO(unit->hw_ecap); in dmar_inv_iotlb_glob()
345 KASSERT((unit->hw_cap & DMAR_CAP_RWBF) != 0, in dmar_flush_write_bufs()
346 ("dmar%d: no RWBF", unit->iommu.unit)); in dmar_flush_write_bufs()
348 dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd | DMAR_GCMD_WBF); in dmar_flush_write_bufs()
369 if ((unit->hw_cap & (DMAR_CAP_PLMR | DMAR_CAP_PHMR)) == 0) in dmar_disable_protected_regions()
390 unit->hw_gcmd |= DMAR_GCMD_TE; in dmar_enable_translation()
391 dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd); in dmar_enable_translation()
403 unit->hw_gcmd &= ~DMAR_GCMD_TE; in dmar_disable_translation()
404 dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd); in dmar_disable_translation()
417 irta = unit->irt_phys; in dmar_load_irt_ptr()
420 s = fls(unit->irte_cnt) - 2; in dmar_load_irt_ptr()
421 KASSERT(unit->irte_cnt >= 2 && s <= DMAR_IRTA_S_MASK && in dmar_load_irt_ptr()
422 powerof2(unit->irte_cnt), in dmar_load_irt_ptr()
423 ("IRTA_REG_S overflow %x", unit->irte_cnt)); in dmar_load_irt_ptr()
426 dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd | DMAR_GCMD_SIRTP); in dmar_load_irt_ptr()
438 unit->hw_gcmd |= DMAR_GCMD_IRE; in dmar_enable_ir()
439 unit->hw_gcmd &= ~DMAR_GCMD_CFI; in dmar_enable_ir()
440 dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd); in dmar_enable_ir()
452 unit->hw_gcmd &= ~DMAR_GCMD_IRE; in dmar_disable_ir()
453 dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd); in dmar_disable_ir()
472 if ((dmar->barrier_flags & f_done) != 0) { in dmar_barrier_enter()
477 if ((dmar->barrier_flags & f_inproc) != 0) { in dmar_barrier_enter()
478 while ((dmar->barrier_flags & f_inproc) != 0) { in dmar_barrier_enter()
479 dmar->barrier_flags |= f_wakeup; in dmar_barrier_enter()
480 msleep(&dmar->barrier_flags, &dmar->iommu.lock, 0, in dmar_barrier_enter()
483 KASSERT((dmar->barrier_flags & f_done) != 0, in dmar_barrier_enter()
484 ("dmar%d barrier %d missing done", dmar->iommu.unit, in dmar_barrier_enter()
490 dmar->barrier_flags |= f_inproc; in dmar_barrier_enter()
501 KASSERT((dmar->barrier_flags & (f_done | f_inproc)) == f_inproc, in dmar_barrier_exit()
502 ("dmar%d barrier %d missed entry", dmar->iommu.unit, barrier_id)); in dmar_barrier_exit()
503 dmar->barrier_flags |= f_done; in dmar_barrier_exit()
504 if ((dmar->barrier_flags & f_wakeup) != 0) in dmar_barrier_exit()
505 wakeup(&dmar->barrier_flags); in dmar_barrier_exit()
506 dmar->barrier_flags &= ~(f_inproc | f_wakeup); in dmar_barrier_exit()
542 if (error != 0 || req->newptr == NULL) in dmar_timeout_sysctl()