Home
last modified time | relevance | path

Searched refs:vm_page_array (Results 1 – 13 of 13) sorted by relevance

/freebsd/sys/amd64/include/
H A Dasan.h59 kernmin = vm_page_array == NULL ? VM_MIN_KERNEL_ADDRESS : in kasan_md_unsupported()
60 (vm_offset_t)(vm_page_array + vm_page_array_size); in kasan_md_unsupported()
/freebsd/sys/kern/
H A Dsubr_sfbuf.c68 #define SF_BUF_HASH(m) (((m) - vm_page_array) & sf_buf_hashmask)
/freebsd/sys/powerpc/aim/
H A Dslb.c222 va >= (vm_offset_t)vm_page_array && in kernel_va_to_slbv()
223 va <= (uintptr_t)(&vm_page_array[vm_page_array_size])) in kernel_va_to_slbv()
H A Dmmu_radix.c2068 * vm_page_array (upper bound). in mmu_radix_late_bootstrap()
2795 KASSERT(mpte >= vm_page_array && in pmap_promote_l3e()
2796 mpte < &vm_page_array[vm_page_array_size], in pmap_promote_l3e()
3690 KASSERT(mpte >= vm_page_array && in mmu_radix_init()
3691 mpte < &vm_page_array[vm_page_array_size], in mmu_radix_init()
5540 m < &vm_page_array[vm_page_array_size], in mmu_radix_remove_pages()
6445 /* TODO: NUMA vm_page_array. Blocked out until then (copied from amd64). */ in mmu_radix_page_array_startup()
6466 vm_page_array = (vm_page_t)start; in mmu_radix_page_array_startup()
H A Dmmu_oea64.c3449 vm_page_array = (vm_page_t)vm_page_base; in moea64_page_array_startup()
3495 vm_page_array = (vm_page_t)vm_page_base; in moea64_page_array_startup()
/freebsd/sys/vm/
H A Dvm_kern.c848 * that handle vm_page_array allocation can simply adjust virtual_avail in kmem_init()
851 (void)vm_map_insert(kernel_map, NULL, 0, (vm_offset_t)vm_page_array, in kmem_init()
852 (vm_offset_t)vm_page_array + round_2mpage(vm_page_array_size * in kmem_init()
H A Dvm_phys.c507 * Requires that vm_page_array is initialized!
578 seg->first_page = &vm_page_array[npages]; in vm_phys_init()
1078 fp = &vm_page_array[pi - first_page]; in vm_phys_fictitious_reg_range()
1082 * of vm_page_array, but ends outside of it. in vm_phys_fictitious_reg_range()
1084 * Use vm_page_array pages for those that are in vm_phys_fictitious_reg_range()
1085 * inside of the vm_page_array range, and in vm_phys_fictitious_reg_range()
1096 * We can allocate the full range from vm_page_array, in vm_phys_fictitious_reg_range()
1103 * We have a segment that ends inside of vm_page_array, in vm_phys_fictitious_reg_range()
1106 fp = &vm_page_array[0]; in vm_phys_fictitious_reg_range()
1116 * and after vm_page_array in vm_phys_fictitious_reg_range()
[all...]
H A Dvm_page.h500 extern vm_page_t vm_page_array; /* First resident page in table */
H A Dvm_page.c151 vm_page_t vm_page_array; variable
537 vm_page_array = (vm_page_t)pmap_map(vaddr, new_end, end, in vm_page_array_alloc()
775 m = &vm_page_array[ii]; in vm_page_startup()
1292 m = &vm_page_array[pi - first_page]; in PHYS_TO_VM_PAGE()
/freebsd/sys/i386/i386/
H A Dpmap.c974 KASSERT(mpte >= vm_page_array && in __CONCAT()
975 mpte < &vm_page_array[vm_page_array_size], in __CONCAT()
3595 KASSERT(mpte >= vm_page_array && in pmap_promote_pde()
3596 mpte < &vm_page_array[vm_page_array_size], in pmap_promote_pde()
4906 m < &vm_page_array[vm_page_array_size], in __CONCAT()
/freebsd/sys/amd64/amd64/
H A Dpmap.c2518 KASSERT(mpte >= vm_page_array && in pmap_init()
2519 mpte < &vm_page_array[vm_page_array_size], in pmap_init()
5109 vm_page_array = (vm_page_t)start; in pmap_page_array_startup()
7034 KASSERT(mpte >= vm_page_array && in pmap_promote_pde()
7035 mpte < &vm_page_array[vm_page_array_size], in pmap_promote_pde()
8661 m < &vm_page_array[vm_page_array_size], in pmap_remove_pages()
/freebsd/sys/arm64/arm64/
H A Dpmap.c4896 KASSERT(mpte >= vm_page_array && in pmap_promote_l2()
4897 mpte < &vm_page_array[vm_page_array_size], in pmap_promote_l2()
6979 m < &vm_page_array[vm_page_array_size], in pmap_remove_pages()
/freebsd/sys/riscv/riscv/
H A Dpmap.c4278 m < &vm_page_array[vm_page_array_size], in pmap_remove_pages()