Lines Matching +full:page +full:- +full:size

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
73 iommu_gaddr_t base, iommu_gaddr_t size, int flags,
77 * The cache of the identity mapping page tables for the DMARs. Using
78 * the cache saves significant amount of memory for page tables by
79 * reusing the page tables, since usually DMARs are identical and have
81 * to match DMAR capabilities and page table format, to correctly
86 iommu_gaddr_t maxaddr; /* Page table covers the guest address
88 int pglvl; /* Total page table levels ignoring
90 int leaf; /* The last materialized page table
91 level, it is non-zero if superpages
93 vm_object_t pgtbl_obj; /* The page table pages */
104 * Build the next level of the page tables for the identity mapping.
105 * - lvl is the level to build;
106 * - idx is the index of the page table page in the pgtbl_obj, which is
108 * - addr is the starting address in the bus address space which is
109 * mapped by the page table page.
122 VM_OBJECT_ASSERT_LOCKED(tbl->pgtbl_obj); in dmar_idmap_nextlvl()
123 if (addr >= tbl->maxaddr) in dmar_idmap_nextlvl()
125 (void)iommu_pgalloc(tbl->pgtbl_obj, idx, IOMMU_PGF_OBJL | in dmar_idmap_nextlvl()
127 base = idx * IOMMU_NPTEPG + 1; /* Index of the first child page of idx */ in dmar_idmap_nextlvl()
128 pg_sz = pglvl_page_size(tbl->pglvl, lvl); in dmar_idmap_nextlvl()
129 if (lvl != tbl->leaf) { in dmar_idmap_nextlvl()
133 VM_OBJECT_WUNLOCK(tbl->pgtbl_obj); in dmar_idmap_nextlvl()
134 pte = iommu_map_pgtbl(tbl->pgtbl_obj, idx, IOMMU_PGF_WAITOK, &sf); in dmar_idmap_nextlvl()
135 if (lvl == tbl->leaf) { in dmar_idmap_nextlvl()
137 if (f >= tbl->maxaddr) in dmar_idmap_nextlvl()
144 if (f >= tbl->maxaddr) in dmar_idmap_nextlvl()
146 m1 = iommu_pgalloc(tbl->pgtbl_obj, base + i, in dmar_idmap_nextlvl()
148 KASSERT(m1 != NULL, ("lost page table page")); in dmar_idmap_nextlvl()
155 VM_OBJECT_WLOCK(tbl->pgtbl_obj); in dmar_idmap_nextlvl()
159 * Find a ready and compatible identity-mapping page table in the
160 * cache. If not found, populate the identity-mapping page table for
181 for (i = 0; i < domain->pglvl; i++) { in dmar_get_idmap_pgtbl()
182 if (i == domain->pglvl - 1 || domain_is_sp_lvl(domain, i)) { in dmar_get_idmap_pgtbl()
189 * Search the cache for a compatible page table. Qualified in dmar_get_idmap_pgtbl()
190 * page table must map up to maxaddr, its level must be in dmar_get_idmap_pgtbl()
198 if (tbl->maxaddr >= maxaddr && in dmar_get_idmap_pgtbl()
199 dmar_pglvl_supported(domain->dmar, tbl->pglvl) && in dmar_get_idmap_pgtbl()
200 tbl->leaf == leaf) { in dmar_get_idmap_pgtbl()
201 res = tbl->pgtbl_obj; in dmar_get_idmap_pgtbl()
204 domain->pglvl = tbl->pglvl; /* XXXKIB ? */ in dmar_get_idmap_pgtbl()
217 if (tbl->maxaddr >= maxaddr && in dmar_get_idmap_pgtbl()
218 dmar_pglvl_supported(domain->dmar, tbl->pglvl) && in dmar_get_idmap_pgtbl()
219 tbl->leaf == leaf) { in dmar_get_idmap_pgtbl()
220 res = tbl->pgtbl_obj; in dmar_get_idmap_pgtbl()
223 domain->pglvl = tbl->pglvl; /* XXXKIB ? */ in dmar_get_idmap_pgtbl()
229 * Still not found, create new page table. in dmar_get_idmap_pgtbl()
232 tbl->pglvl = domain->pglvl; in dmar_get_idmap_pgtbl()
233 tbl->leaf = leaf; in dmar_get_idmap_pgtbl()
234 tbl->maxaddr = maxaddr; in dmar_get_idmap_pgtbl()
235 tbl->pgtbl_obj = vm_pager_allocate(OBJT_PHYS, NULL, in dmar_get_idmap_pgtbl()
236 IDX_TO_OFF(pglvl_max_pages(tbl->pglvl)), 0, 0, NULL); in dmar_get_idmap_pgtbl()
241 VM_OBJECT_WLOCK(tbl->pgtbl_obj); in dmar_get_idmap_pgtbl()
243 VM_OBJECT_WUNLOCK(tbl->pgtbl_obj); in dmar_get_idmap_pgtbl()
245 res = tbl->pgtbl_obj; in dmar_get_idmap_pgtbl()
256 * page table creation, since DMAR which was passed at the in dmar_get_idmap_pgtbl()
263 unit = domain->dmar; in dmar_get_idmap_pgtbl()
271 if ((unit->hw_cap & DMAR_CAP_RWBF) != 0) { in dmar_get_idmap_pgtbl()
281 * Return a reference to the identity mapping page table to the cache.
290 KASSERT(obj->ref_count >= 2, ("lost cache reference")); in dmar_put_idmap_pgtbl()
294 * Cache always owns one last reference on the page table object. in dmar_put_idmap_pgtbl()
297 if (obj->ref_count > 1) { in dmar_put_idmap_pgtbl()
304 * page table object, returning the page table pages to the in dmar_put_idmap_pgtbl()
310 rmobj = tbl->pgtbl_obj; in dmar_put_idmap_pgtbl()
311 if (rmobj->ref_count == 1) { in dmar_put_idmap_pgtbl()
314 rmobj->resident_page_count); in dmar_put_idmap_pgtbl()
339 idx = pglvl_pgtbl_get_pindex(domain->pglvl, base, lvl); in dmar_pgtbl_map_pte()
347 pte = iommu_map_pgtbl(domain->pgtbl_obj, idx, flags, sf); in dmar_pgtbl_map_pte()
350 ("lost root page table page %p", domain)); in dmar_pgtbl_map_pte()
352 * Page table page does not exist, allocate in dmar_pgtbl_map_pte()
353 * it and create a pte in the preceeding page level in dmar_pgtbl_map_pte()
354 * to reference the allocated page table page. in dmar_pgtbl_map_pte()
356 m = iommu_pgalloc(domain->pgtbl_obj, idx, flags | in dmar_pgtbl_map_pte()
371 ptep = dmar_pgtbl_map_pte(domain, base, lvl - 1, in dmar_pgtbl_map_pte()
374 KASSERT(m->pindex != 0, in dmar_pgtbl_map_pte()
375 ("loosing root page %p", domain)); in dmar_pgtbl_map_pte()
377 iommu_pgfree(domain->pgtbl_obj, m->pindex, in dmar_pgtbl_map_pte()
381 dmar_pte_store(&ptep->pte, DMAR_PTE_R | DMAR_PTE_W | in dmar_pgtbl_map_pte()
383 dmar_flush_pte_to_ram(domain->dmar, ptep); in dmar_pgtbl_map_pte()
391 pte += pglvl_pgtbl_pte_off(domain->pglvl, base, lvl); in dmar_pgtbl_map_pte()
397 iommu_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags, in dmar_map_buf_locked() argument
413 for (sf = NULL, pi = 0; size > 0; base += pg_sz, size -= pg_sz, in dmar_map_buf_locked()
418 if (lvl == domain->pglvl - 1) in dmar_map_buf_locked()
427 * Next, look at the size of the mapping and in dmar_map_buf_locked()
430 if (size < pg_sz || (base & (pg_sz - 1)) != 0 || in dmar_map_buf_locked()
431 (VM_PAGE_TO_PHYS(ma[pi]) & (pg_sz - 1)) != 0) in dmar_map_buf_locked()
437 VM_PAGE_TO_PHYS(ma[pi + c - 1]) + in dmar_map_buf_locked()
447 KASSERT(size >= pg_sz, in dmar_map_buf_locked()
449 (uintmax_t)base, (uintmax_t)size, (uintmax_t)pg_sz)); in dmar_map_buf_locked()
457 dmar_unmap_buf_locked(domain, base1, base - base1, in dmar_map_buf_locked()
462 dmar_pte_store(&pte->pte, VM_PAGE_TO_PHYS(ma[pi]) | pflags | in dmar_map_buf_locked()
464 dmar_flush_pte_to_ram(domain->dmar, pte); in dmar_map_buf_locked()
479 iommu_gaddr_t base, size; in dmar_map_buf() local
483 base = entry->start; in dmar_map_buf()
484 size = entry->end - entry->start; in dmar_map_buf()
492 unit = domain->dmar; in dmar_map_buf()
494 KASSERT((iodom->flags & IOMMU_DOMAIN_IDMAP) == 0, in dmar_map_buf()
497 ("non-aligned base %p %jx %jx", domain, (uintmax_t)base, in dmar_map_buf()
498 (uintmax_t)size)); in dmar_map_buf()
499 KASSERT((size & IOMMU_PAGE_MASK) == 0, in dmar_map_buf()
500 ("non-aligned size %p %jx %jx", domain, (uintmax_t)base, in dmar_map_buf()
501 (uintmax_t)size)); in dmar_map_buf()
502 KASSERT(size > 0, ("zero size %p %jx %jx", domain, (uintmax_t)base, in dmar_map_buf()
503 (uintmax_t)size)); in dmar_map_buf()
504 KASSERT(base < (1ULL << domain->agaw), in dmar_map_buf()
506 (uintmax_t)size, domain->agaw)); in dmar_map_buf()
507 KASSERT(base + size < (1ULL << domain->agaw), in dmar_map_buf()
509 (uintmax_t)size, domain->agaw)); in dmar_map_buf()
510 KASSERT(base + size > base, in dmar_map_buf()
511 ("size overflow %p %jx %jx", domain, (uintmax_t)base, in dmar_map_buf()
512 (uintmax_t)size)); in dmar_map_buf()
519 (unit->hw_ecap & DMAR_ECAP_SC) != 0, in dmar_map_buf()
523 (unit->hw_ecap & DMAR_ECAP_DI) != 0, in dmar_map_buf()
529 error = dmar_map_buf_locked(domain, base, size, ma, pflags, flags, in dmar_map_buf()
535 if ((unit->hw_cap & DMAR_CAP_CM) != 0) in dmar_map_buf()
536 dmar_flush_iotlb_sync(domain, base, size); in dmar_map_buf()
537 else if ((unit->hw_cap & DMAR_CAP_RWBF) != 0) { in dmar_map_buf()
571 dmar_pte_clear(&pte->pte); in dmar_unmap_clear_pte()
572 dmar_flush_pte_to_ram(domain->dmar, pte); in dmar_unmap_clear_pte()
583 KASSERT(m->pindex != 0, in dmar_unmap_clear_pte()
586 iommu_pgfree(domain->pgtbl_obj, m->pindex, flags, entry); in dmar_unmap_clear_pte()
587 dmar_free_pgtbl_pde(domain, base, lvl - 1, flags, entry); in dmar_unmap_clear_pte()
595 iommu_gaddr_t size, int flags, struct iommu_map_entry *entry) in dmar_unmap_buf_locked() argument
604 if (size == 0) in dmar_unmap_buf_locked()
607 KASSERT((domain->iodom.flags & IOMMU_DOMAIN_IDMAP) == 0, in dmar_unmap_buf_locked()
610 ("non-aligned base %p %jx %jx", domain, (uintmax_t)base, in dmar_unmap_buf_locked()
611 (uintmax_t)size)); in dmar_unmap_buf_locked()
612 KASSERT((size & IOMMU_PAGE_MASK) == 0, in dmar_unmap_buf_locked()
613 ("non-aligned size %p %jx %jx", domain, (uintmax_t)base, in dmar_unmap_buf_locked()
614 (uintmax_t)size)); in dmar_unmap_buf_locked()
615 KASSERT(base < (1ULL << domain->agaw), in dmar_unmap_buf_locked()
617 (uintmax_t)size, domain->agaw)); in dmar_unmap_buf_locked()
618 KASSERT(base + size < (1ULL << domain->agaw), in dmar_unmap_buf_locked()
620 (uintmax_t)size, domain->agaw)); in dmar_unmap_buf_locked()
621 KASSERT(base + size > base, in dmar_unmap_buf_locked()
622 ("size overflow %p %jx %jx", domain, (uintmax_t)base, in dmar_unmap_buf_locked()
623 (uintmax_t)size)); in dmar_unmap_buf_locked()
630 for (sf = NULL; size > 0; base += pg_sz, size -= pg_sz) { in dmar_unmap_buf_locked()
631 for (lvl = 0; lvl < domain->pglvl; lvl++) { in dmar_unmap_buf_locked()
632 if (lvl != domain->pglvl - 1 && in dmar_unmap_buf_locked()
636 if (pg_sz > size) in dmar_unmap_buf_locked()
641 ("sleeping or page missed %p %jx %d 0x%x", in dmar_unmap_buf_locked()
643 if ((pte->pte & DMAR_PTE_SP) != 0 || in dmar_unmap_buf_locked()
644 lvl == domain->pglvl - 1) { in dmar_unmap_buf_locked()
650 KASSERT(size >= pg_sz, in dmar_unmap_buf_locked()
652 (uintmax_t)base, (uintmax_t)size, (uintmax_t)pg_sz)); in dmar_unmap_buf_locked()
675 error = dmar_unmap_buf_locked(domain, entry->start, entry->end - in dmar_unmap_buf()
676 entry->start, flags, entry); in dmar_unmap_buf()
687 KASSERT(domain->pgtbl_obj == NULL, in dmar_domain_alloc_pgtbl()
690 unit = domain->dmar; in dmar_domain_alloc_pgtbl()
691 domain->pgtbl_obj = vm_pager_allocate(OBJT_PHYS, NULL, in dmar_domain_alloc_pgtbl()
692 IDX_TO_OFF(pglvl_max_pages(domain->pglvl)), 0, 0, NULL); in dmar_domain_alloc_pgtbl()
693 if (unit->memdomain != -1) { in dmar_domain_alloc_pgtbl()
694 domain->pgtbl_obj->domain.dr_policy = DOMAINSET_PREF( in dmar_domain_alloc_pgtbl()
695 unit->memdomain); in dmar_domain_alloc_pgtbl()
698 m = iommu_pgalloc(domain->pgtbl_obj, 0, IOMMU_PGF_WAITOK | in dmar_domain_alloc_pgtbl()
700 /* No implicit free of the top level page table page. */ in dmar_domain_alloc_pgtbl()
704 domain->iodom.flags |= IOMMU_DOMAIN_PGTBL_INITED; in dmar_domain_alloc_pgtbl()
716 obj = domain->pgtbl_obj; in dmar_domain_free_pgtbl()
718 KASSERT((domain->dmar->hw_ecap & DMAR_ECAP_PT) != 0 && in dmar_domain_free_pgtbl()
719 (domain->iodom.flags & IOMMU_DOMAIN_IDMAP) != 0, in dmar_domain_free_pgtbl()
724 domain->pgtbl_obj = NULL; in dmar_domain_free_pgtbl()
726 if ((domain->iodom.flags & IOMMU_DOMAIN_IDMAP) != 0) { in dmar_domain_free_pgtbl()
728 domain->iodom.flags &= ~IOMMU_DOMAIN_IDMAP; in dmar_domain_free_pgtbl()
761 iommu_gaddr_t size) in dmar_flush_iotlb_sync() argument
768 unit = domain->dmar; in dmar_flush_iotlb_sync()
769 KASSERT(!unit->qi_enabled, ("dmar%d: sync iotlb flush call", in dmar_flush_iotlb_sync()
770 unit->iommu.unit)); in dmar_flush_iotlb_sync()
771 iro = DMAR_ECAP_IRO(unit->hw_ecap) * 16; in dmar_flush_iotlb_sync()
773 if ((unit->hw_cap & DMAR_CAP_PSI) == 0 || size > 2 * 1024 * 1024) { in dmar_flush_iotlb_sync()
775 DMAR_IOTLB_DID(domain->domain), iro); in dmar_flush_iotlb_sync()
778 ("dmar%d: invalidation failed %jx", unit->iommu.unit, in dmar_flush_iotlb_sync()
781 for (; size > 0; base += isize, size -= isize) { in dmar_flush_iotlb_sync()
782 am = calc_am(unit, base, size, &isize); in dmar_flush_iotlb_sync()
786 DMAR_IOTLB_DID(domain->domain), iro); in dmar_flush_iotlb_sync()
790 "iotlbr 0x%jx base 0x%jx size 0x%jx am %d", in dmar_flush_iotlb_sync()
791 unit->iommu.unit, (uintmax_t)iotlbr, in dmar_flush_iotlb_sync()
792 (uintmax_t)base, (uintmax_t)size, am)); in dmar_flush_iotlb_sync()
794 * Any non-page granularity covers whole guest in dmar_flush_iotlb_sync()