/freebsd/sys/x86/iommu/ |
H A D | intel_utils.c | 91 int pglvl; member 94 .pglvl = 2}, 96 .pglvl = 3}, 98 .pglvl = 4}, 100 .pglvl = 5} 111 dmar_pglvl_supported(struct dmar_unit *unit, int pglvl) in dmar_pglvl_supported() argument 116 if (sagaw_bits[i].pglvl != pglvl) in dmar_pglvl_supported() 134 domain->pglvl = sagaw_bits[i].pglvl; in domain_set_agaw() 190 alvl = domain->pglvl - lvl - 1; in domain_is_sp_lvl() 199 return (pglvl_page_size(domain->pglvl, lvl)); in domain_page_size()
|
H A D | intel_idpgtbl.c | 87 int pglvl; /* Total page table levels ignoring member 127 pg_sz = pglvl_page_size(tbl->pglvl, lvl); in dmar_idmap_nextlvl() 179 for (i = 0; i < domain->pglvl; i++) { in dmar_get_idmap_pgtbl() 180 if (i == domain->pglvl - 1 || domain_is_sp_lvl(domain, i)) { in dmar_get_idmap_pgtbl() 197 dmar_pglvl_supported(domain->dmar, tbl->pglvl) && in dmar_get_idmap_pgtbl() 202 domain->pglvl = tbl->pglvl; /* XXXKIB ? */ in dmar_get_idmap_pgtbl() 216 dmar_pglvl_supported(domain->dmar, tbl->pglvl) && in dmar_get_idmap_pgtbl() 221 domain->pglvl = tbl->pglvl; /* XXXKIB ? */ in dmar_get_idmap_pgtbl() 230 tbl->pglvl = domain->pglvl; in dmar_get_idmap_pgtbl() 234 IDX_TO_OFF(pglvl_max_pages(tbl->pglvl)), 0, 0, NULL); in dmar_get_idmap_pgtbl() [all …]
|
H A D | amd_idpgtbl.c | 87 IDX_TO_OFF(pglvl_max_pages(domain->pglvl)), 0, 0, NULL); in amdiommu_domain_alloc_pgtbl() 136 idx = pglvl_pgtbl_get_pindex(domain->pglvl, base, lvl); in amdiommu_pgtbl_map_pte() 173 ((domain->pglvl - lvl) << AMDIOMMU_PTE_NLVL_SHIFT); in amdiommu_pgtbl_map_pte() 181 pte += pglvl_pgtbl_pte_off(domain->pglvl, base, lvl); in amdiommu_pgtbl_map_pte() 208 pte = amdiommu_pgtbl_map_pte(domain, base, domain->pglvl - 1, in amdiommu_map_buf_locked() 362 domain->pglvl - 1, flags, &idx, &sf); in amdiommu_unmap_buf_locked() 365 domain, (uintmax_t)base, domain->pglvl - 1, flags)); in amdiommu_unmap_buf_locked() 366 amdiommu_unmap_clear_pte(domain, base, domain->pglvl - 1, in amdiommu_unmap_buf_locked()
|
H A D | amd_ctx.c | 205 domain->pglvl = i; in amdiommu_domain_init_pglvl() 223 if (hats >= domain->pglvl) in amdiommu_domain_init_pglvl() 227 unit->iommu.unit, domain->domain, hats, domain->pglvl); in amdiommu_domain_init_pglvl() 228 domain->pglvl = hats; in amdiommu_domain_init_pglvl() 466 MPASS(domain->pglvl > 0 && domain->pglvl <= in dte_entry_init_one() 468 dtep->pgmode = domain->pglvl; in dte_entry_init_one()
|
H A D | x86_iommu.h | 192 int pglvl_pgtbl_pte_off(int pglvl, iommu_gaddr_t base, int lvl); 193 vm_pindex_t pglvl_pgtbl_get_pindex(int pglvl, iommu_gaddr_t base, int lvl); 194 vm_pindex_t pglvl_max_pages(int pglvl);
|
H A D | iommu_utils.c | 688 pglvl_pgtbl_pte_off(int pglvl, iommu_gaddr_t base, int lvl) in pglvl_pgtbl_pte_off() argument 691 base >>= IOMMU_PAGE_SHIFT + (pglvl - lvl - 1) * in pglvl_pgtbl_pte_off() 702 pglvl_pgtbl_get_pindex(int pglvl, iommu_gaddr_t base, int lvl) in pglvl_pgtbl_get_pindex() argument 707 KASSERT(lvl >= 0 && lvl < pglvl, in pglvl_pgtbl_get_pindex() 708 ("wrong lvl %d %d", pglvl, lvl)); in pglvl_pgtbl_get_pindex() 711 idx = pglvl_pgtbl_pte_off(pglvl, base, i) + in pglvl_pgtbl_get_pindex() 722 pglvl_max_pages(int pglvl) in pglvl_max_pages() argument 727 for (res = 0, i = pglvl; i > 0; i--) { in pglvl_max_pages()
|
H A D | intel_dmar.h | 61 int pglvl; /* (c) The pagelevel */ member 174 bool dmar_pglvl_supported(struct dmar_unit *unit, int pglvl);
|
H A D | amd_iommu.h | 50 u_int pglvl; /* (c) Page table levels */ member
|
H A D | amd_drv.c | 1119 domain, domain->domain, domain->pglvl, in amdiommu_print_domain()
|
H A D | intel_drv.c | 1114 domain, domain->domain, domain->mgaw, domain->agaw, domain->pglvl, in dmar_print_domain()
|