Lines Matching +full:page +full:- +full:level
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2020-2021 Ruslan Bukin <br@bsdpad.com>
5 * Copyright (c) 2014-2021 Andrew Turner
6 * Copyright (c) 2014-2016 The FreeBSD Foundation
63 #define SMMU_PMAP_LOCK(pmap) mtx_lock(&(pmap)->sp_mtx)
64 #define SMMU_PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->sp_mtx)
66 mtx_assert(&(pmap)->sp_mtx, (type))
107 return (&pmap->sp_l0[smmu_l0_index(va)]); in smmu_pmap_l0()
184 * The next level may or may not point to a valid page or block.
187 smmu_pmap_pde(struct smmu_pmap *pmap, vm_offset_t va, int *level) in smmu_pmap_pde() argument
194 *level = -1; in smmu_pmap_pde()
201 *level = 0; in smmu_pmap_pde()
208 *level = 1; in smmu_pmap_pde()
212 *level = 2; in smmu_pmap_pde()
218 * address. If there are no valid entries return NULL and set the level to
219 * the first invalid level.
222 smmu_pmap_pte(struct smmu_pmap *pmap, vm_offset_t va, int *level) in smmu_pmap_pte() argument
229 *level = 0; in smmu_pmap_pte()
234 *level = 1; in smmu_pmap_pte()
239 *level = 1; in smmu_pmap_pte()
246 *level = 2; in smmu_pmap_pte()
251 *level = 2; in smmu_pmap_pte()
255 *level = 3; in smmu_pmap_pte()
278 pmap->sp_resident_count += count; in smmu_pmap_resident_count_inc()
286 KASSERT(pmap->sp_resident_count >= count, in smmu_pmap_resident_count_dec()
288 pmap->sp_resident_count, count)); in smmu_pmap_resident_count_dec()
289 pmap->sp_resident_count -= count; in smmu_pmap_resident_count_dec()
304 * Page table page management routines.....
307 * Schedule the specified unused page table page to be freed. Specifically,
308 * add the page to the specified list of pages that will be released to the
317 m->flags |= PG_ZERO; in smmu_pmap_add_delayed_free_list()
319 m->flags &= ~PG_ZERO; in smmu_pmap_add_delayed_free_list()
324 * Low level mapping routines.....
328 * Decrements a page table page's reference count, which is used to record the
329 * number of valid page table entries within the page. If the reference count
330 * drops to zero, then the page table page is unmapped. Returns TRUE if the
331 * page table page was unmapped and FALSE otherwise.
338 --m->ref_count; in smmu_pmap_unwire_l3()
339 if (m->ref_count == 0) { in smmu_pmap_unwire_l3()
353 * unmap the page table page in _smmu_pmap_unwire_l3()
355 if (m->pindex >= (NUL2E + NUL1E)) { in _smmu_pmap_unwire_l3()
356 /* l1 page */ in _smmu_pmap_unwire_l3()
361 } else if (m->pindex >= NUL2E) { in _smmu_pmap_unwire_l3()
362 /* l2 page */ in _smmu_pmap_unwire_l3()
368 /* l3 page */ in _smmu_pmap_unwire_l3()
375 if (m->pindex < NUL2E) { in _smmu_pmap_unwire_l3()
384 } else if (m->pindex < (NUL2E + NUL1E)) { in _smmu_pmap_unwire_l3()
396 * Put page on a list so that it is released after in _smmu_pmap_unwire_l3()
408 * allocate the l0 page in smmu_pmap_pinit()
412 pmap->sp_l0_paddr = VM_PAGE_TO_PHYS(m); in smmu_pmap_pinit()
413 pmap->sp_l0 = (pd_entry_t *)PHYS_TO_DMAP(pmap->sp_l0_paddr); in smmu_pmap_pinit()
416 pmap->sp_resident_count = 0; in smmu_pmap_pinit()
418 mtx_init(&pmap->sp_mtx, "smmu pmap", NULL, MTX_DEF); in smmu_pmap_pinit()
424 * This routine is called if the desired page table page does not exist.
426 * If page table page allocation fails, this routine may sleep before
429 * Note: If a page allocation fails at page table level two or three,
442 * Allocate a page table page. in _pmap_alloc_l3()
446 * Indicate the need to retry. While waiting, the page table in _pmap_alloc_l3()
447 * page may have been allocated. in _pmap_alloc_l3()
451 m->pindex = ptepindex; in _pmap_alloc_l3()
457 * "m" to the page table. Otherwise, a page table walk by another in _pmap_alloc_l3()
458 * processor's MMU could see the mapping to "m" and a stale, non-zero in _pmap_alloc_l3()
464 * Map the pagetable page into the process address space, if in _pmap_alloc_l3()
472 l0index = ptepindex - (NUL2E + NUL1E); in _pmap_alloc_l3()
473 l0 = &pmap->sp_l0[l0index]; in _pmap_alloc_l3()
480 l1index = ptepindex - NUL2E; in _pmap_alloc_l3()
483 l0 = &pmap->sp_l0[l0index]; in _pmap_alloc_l3()
486 /* recurse for allocating page dir */ in _pmap_alloc_l3()
495 l1pg->ref_count++; in _pmap_alloc_l3()
509 l0 = &pmap->sp_l0[l0index]; in _pmap_alloc_l3()
512 /* recurse for allocating page dir */ in _pmap_alloc_l3()
526 /* recurse for allocating page dir */ in _pmap_alloc_l3()
535 l2pg->ref_count++; in _pmap_alloc_l3()
563 KASSERT(pmap->sp_resident_count == 0, in smmu_pmap_release()
565 pmap->sp_resident_count)); in smmu_pmap_release()
567 m = PHYS_TO_VM_PAGE(pmap->sp_l0_paddr); in smmu_pmap_release()
570 mtx_destroy(&pmap->sp_mtx); in smmu_pmap_release()
574 * page management routines.
607 CTR2(KTR_PMAP, "pmap_gpu_enter: %.16lx -> %.16lx", va, pa); in pmap_gpu_enter()
612 * In the case that a page table page is not in pmap_gpu_enter()
719 CTR2(KTR_PMAP, "pmap_senter: %.16lx -> %.16lx", va, pa); in smmu_pmap_enter()
724 * In the case that a page table page is not in smmu_pmap_enter()
770 ("Invalid SMMU pagetable level: %d != 3", lvl)); in smmu_pmap_remove()
803 l0e = pmap->sp_l0[i]; in smmu_pmap_remove_pages()
856 smmu_pmap_clear(&pmap->sp_l0[i]); in smmu_pmap_remove_pages()
859 KASSERT(pmap->sp_resident_count == 0, in smmu_pmap_remove_pages()
860 ("Invalid resident count %jd", pmap->sp_resident_count)); in smmu_pmap_remove_pages()