Lines Matching refs:va

298 static void		moea64_syncicache(pmap_t pmap, vm_offset_t va,
366 static int moea64_sp_enter(pmap_t pmap, vm_offset_t va,
372 static void moea64_sp_promote(pmap_t pmap, vm_offset_t va, vm_page_t m);
440 void **va);
572 init_pvo_entry(struct pvo_entry *pvo, pmap_t pmap, vm_offset_t va) in init_pvo_entry() argument
581 va &= ~ADDR_POFF; in init_pvo_entry()
582 pvo->pvo_vaddr |= va; in init_pvo_entry()
583 vsid = va_to_vsid(pmap, va); in init_pvo_entry()
584 pvo->pvo_vpn = (uint64_t)((va & ADDR_PIDX) >> ADDR_PIDX_SHFT) in init_pvo_entry()
591 hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)va & ADDR_PIDX) >> shift); in init_pvo_entry()
787 moea64_bootstrap_slb_prefault(vm_offset_t va, int large) in moea64_bootstrap_slb_prefault() argument
795 esid = va >> ADDR_SR_SHFT; in moea64_bootstrap_slb_prefault()
813 moea64_kenter_large(vm_offset_t va, vm_paddr_t pa, uint64_t attr, int bootstrap) in moea64_kenter_large() argument
824 init_pvo_entry(pvo, kernel_pmap, va); in moea64_kenter_large()
1144 vm_offset_t pa, va; in moea64_late_bootstrap() local
1189 for (va = virtual_avail; va < virtual_end; va += SEGMENT_LENGTH) in moea64_late_bootstrap()
1190 moea64_bootstrap_slb_prefault(va, 0); in moea64_late_bootstrap()
1219 va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; in moea64_late_bootstrap()
1220 virtual_avail = va + kstack_pages * PAGE_SIZE; in moea64_late_bootstrap()
1221 CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va); in moea64_late_bootstrap()
1222 thread0.td_kstack = va; in moea64_late_bootstrap()
1225 moea64_kenter(va, pa); in moea64_late_bootstrap()
1227 va += PAGE_SIZE; in moea64_late_bootstrap()
1235 va = virtual_avail; in moea64_late_bootstrap()
1237 while (va < virtual_avail) { in moea64_late_bootstrap()
1238 moea64_kenter(va, pa); in moea64_late_bootstrap()
1240 va += PAGE_SIZE; in moea64_late_bootstrap()
1248 va = virtual_avail; in moea64_late_bootstrap()
1250 while (va < virtual_avail) { in moea64_late_bootstrap()
1251 moea64_kenter(va, pa); in moea64_late_bootstrap()
1253 va += PAGE_SIZE; in moea64_late_bootstrap()
1570 vm_offset_t va, off; in moea64_zero_page() local
1575 va = moea64_scratchpage_va[0]; in moea64_zero_page()
1578 __asm __volatile("dcbz 0,%0" :: "r"(va + off)); in moea64_zero_page()
1587 vm_offset_t va, off; in moea64_zero_page_dmap() local
1589 va = PHYS_TO_DMAP(pa); in moea64_zero_page_dmap()
1591 __asm __volatile("dcbz 0,%0" :: "r"(va + off)); in moea64_zero_page_dmap()
1651 moea64_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, in moea64_enter() argument
1668 return (moea64_sp_enter(pmap, va, m, prot, flags, psind)); in moea64_enter()
1693 init_pvo_entry(pvo, pmap, va); in moea64_enter()
1696 (tpvo = moea64_pvo_find_va(pmap, va & ~HPT_SP_MASK)) != NULL && in moea64_enter()
1700 __func__, (uintmax_t)va); in moea64_enter()
1754 moea64_syncicache(pmap, va, pa, PAGE_SIZE); in moea64_enter()
1769 (va & HPT_SP_MASK) == (pa & HPT_SP_MASK) && in moea64_enter()
1774 moea64_sp_promote(pmap, va, m); in moea64_enter()
1781 moea64_syncicache(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, in moea64_syncicache() argument
1801 __syncicache((void *)va, sz); in moea64_syncicache()
1811 (va & ADDR_POFF)), sz); in moea64_syncicache()
1835 vm_offset_t va; in moea64_enter_object() local
1844 va = start + ptoa(m->pindex - m_start->pindex); in moea64_enter_object()
1845 if ((va & HPT_SP_MASK) == 0 && va + HPT_SP_SIZE <= end && in moea64_enter_object()
1850 moea64_enter(pm, va, m, prot & in moea64_enter_object()
1861 moea64_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, in moea64_enter_quick() argument
1865 moea64_enter(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE | in moea64_enter_quick()
1871 moea64_extract(pmap_t pm, vm_offset_t va) in moea64_extract() argument
1877 pvo = moea64_pvo_find_va(pm, va); in moea64_extract()
1881 pa = PVO_PADDR(pvo) | (va - PVO_VADDR(pvo)); in moea64_extract()
1893 moea64_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) in moea64_extract_and_hold() argument
1900 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF); in moea64_extract_and_hold()
1915 vm_offset_t va; in moea64_uma_page_alloc() local
1934 va = VM_PAGE_TO_PHYS(m); in moea64_uma_page_alloc()
1944 init_pvo_entry(pvo, kernel_pmap, va); in moea64_uma_page_alloc()
1952 return (void *)va; in moea64_uma_page_alloc()
2030 moea64_is_prefaultable(pmap_t pmap, vm_offset_t va) in moea64_is_prefaultable() argument
2036 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF); in moea64_is_prefaultable()
2185 moea64_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma) in moea64_kenter_attr() argument
2200 oldpvo = moea64_pvo_find_va(kernel_pmap, va); in moea64_kenter_attr()
2203 init_pvo_entry(pvo, kernel_pmap, va); in moea64_kenter_attr()
2214 panic("moea64_kenter: failed to enter va %#zx pa %#jx: %d", va, in moea64_kenter_attr()
2219 moea64_kenter(vm_offset_t va, vm_paddr_t pa) in moea64_kenter() argument
2222 moea64_kenter_attr(va, pa, VM_MEMATTR_DEFAULT); in moea64_kenter()
2230 moea64_kextract(vm_offset_t va) in moea64_kextract() argument
2240 if (va < VM_MIN_KERNEL_ADDRESS) in moea64_kextract()
2241 return (va & ~DMAP_BASE_ADDRESS); in moea64_kextract()
2244 pvo = moea64_pvo_find_va(kernel_pmap, va); in moea64_kextract()
2246 va)); in moea64_kextract()
2247 pa = PVO_PADDR(pvo) | (va - PVO_VADDR(pvo)); in moea64_kextract()
2256 moea64_kremove(vm_offset_t va) in moea64_kremove() argument
2258 moea64_remove(kernel_pmap, va, va + PAGE_SIZE); in moea64_kremove()
2363 vm_offset_t sva, va; in moea64_map() local
2372 for (va = pa_start; va < pa_end; va += PAGE_SIZE) in moea64_map()
2373 if (moea64_calc_wimg(va, VM_MEMATTR_DEFAULT) != LPTE_M) in moea64_map()
2375 if (va == pa_end) in moea64_map()
2379 va = sva; in moea64_map()
2381 for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) in moea64_map()
2382 moea64_kenter(va, pa_start); in moea64_map()
2383 *virt = va; in moea64_map()
2648 moea64_qenter(vm_offset_t va, vm_page_t *m, int count) in moea64_qenter() argument
2651 moea64_kenter(va, VM_PAGE_TO_PHYS(*m)); in moea64_qenter()
2652 va += PAGE_SIZE; in moea64_qenter()
2662 moea64_qremove(vm_offset_t va, int count) in moea64_qremove() argument
2665 moea64_kremove(va); in moea64_qremove()
2666 va += PAGE_SIZE; in moea64_qremove()
3040 moea64_pvo_find_va(pmap_t pm, vm_offset_t va) in moea64_pvo_find_va() argument
3046 key.pvo_vaddr = va & ~ADDR_POFF; in moea64_pvo_find_va()
3194 vm_offset_t va, tmpva, ppa, offset; in moea64_mapdev_attr() local
3200 va = kva_alloc(size); in moea64_mapdev_attr()
3202 if (!va) in moea64_mapdev_attr()
3205 for (tmpva = va; size > 0;) { in moea64_mapdev_attr()
3212 return ((void *)(va + offset)); in moea64_mapdev_attr()
3225 vm_offset_t base, offset, va; in moea64_unmapdev() local
3227 va = (vm_offset_t)p; in moea64_unmapdev()
3228 base = trunc_page(va); in moea64_unmapdev()
3229 offset = va & PAGE_MASK; in moea64_unmapdev()
3237 moea64_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) in moea64_sync_icache() argument
3249 lim = round_page(va+1); in moea64_sync_icache()
3250 len = MIN(lim - va, sz); in moea64_sync_icache()
3251 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF); in moea64_sync_icache()
3253 pa = PVO_PADDR(pvo) | (va & ADDR_POFF); in moea64_sync_icache()
3254 moea64_syncicache(pm, va, pa, len); in moea64_sync_icache()
3256 va += len; in moea64_sync_icache()
3263 moea64_dumpsys_map(vm_paddr_t pa, size_t sz, void **va) in moea64_dumpsys_map() argument
3266 *va = (void *)(uintptr_t)pa; in moea64_dumpsys_map()
3275 vm_offset_t va; in moea64_scan_init() local
3302 va = dump_map[1].pa_start + dump_map[1].pa_size; in moea64_scan_init()
3304 while (va < virtual_end) { in moea64_scan_init()
3306 if (va >= kmi.buffer_sva && va < kmi.buffer_eva) { in moea64_scan_init()
3307 va = kmi.buffer_eva; in moea64_scan_init()
3310 pvo = moea64_pvo_find_va(kernel_pmap, va & ~ADDR_POFF); in moea64_scan_init()
3313 va += PAGE_SIZE; in moea64_scan_init()
3315 if (va < virtual_end) { in moea64_scan_init()
3316 dump_map[2].pa_start = va; in moea64_scan_init()
3317 va += PAGE_SIZE; in moea64_scan_init()
3319 while (va < virtual_end) { in moea64_scan_init()
3321 if (va == kmi.buffer_sva) in moea64_scan_init()
3323 pvo = moea64_pvo_find_va(kernel_pmap, va & ~ADDR_POFF); in moea64_scan_init()
3326 va += PAGE_SIZE; in moea64_scan_init()
3328 dump_map[2].pa_size = va - dump_map[2].pa_start; in moea64_scan_init()
3339 vm_offset_t va, pgva, kstart, kend, kstart_lp, kend_lp; in moea64_scan_pmap() local
3354 va = pvo->pvo_vaddr; in moea64_scan_pmap()
3356 if (va & PVO_DEAD) in moea64_scan_pmap()
3360 if (va >= DMAP_BASE_ADDRESS && va <= DMAP_MAX_ADDRESS) { in moea64_scan_pmap()
3361 if (va & PVO_LARGE) { in moea64_scan_pmap()
3362 pgva = va & ~moea64_large_page_mask; in moea64_scan_pmap()
3366 pgva = trunc_page(va); in moea64_scan_pmap()
3374 if (va & PVO_LARGE) { in moea64_scan_pmap()
3419 moea64_map_range(vm_offset_t va, vm_paddr_t pa, vm_size_t npages) in moea64_map_range() argument
3425 (va & moea64_large_page_mask) == 0 && in moea64_map_range()
3428 moea64_kenter_large(va, pa, 0, 0); in moea64_map_range()
3431 va += moea64_large_page_size; in moea64_map_range()
3434 moea64_kenter(va, pa); in moea64_map_range()
3436 va += PAGE_SIZE; in moea64_map_range()
3446 vm_offset_t va, vm_page_base; in moea64_page_array_startup() local
3490 va = vm_page_base; in moea64_page_array_startup()
3500 moea64_map_range(va, pa, size >> PAGE_SHIFT); in moea64_page_array_startup()
3504 va += size; in moea64_page_array_startup()
3691 moea64_sp_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, in moea64_sp_enter() argument
3704 KASSERT((va & HPT_SP_MASK) == 0, ("%s: va %#jx unaligned", in moea64_sp_enter()
3705 __func__, (uintmax_t)va)); in moea64_sp_enter()
3713 __func__, (uintmax_t)va, (uintmax_t)VM_PAGE_TO_PHYS(m), in moea64_sp_enter()
3718 sva = va; in moea64_sp_enter()
3745 moea64_remove_locked(pmap, va, va + HPT_SP_SIZE, &tofree); in moea64_sp_enter()
3749 i++, va += PAGE_SIZE, pa += PAGE_SIZE, m++) { in moea64_sp_enter()
3767 init_pvo_entry(pvo, pmap, va); in moea64_sp_enter()
3810 moea64_sp_promote(pmap_t pmap, vm_offset_t va, vm_page_t m) in moea64_sp_promote() argument
3820 va &= ~HPT_SP_MASK; in moea64_sp_promote()
3821 sva = va; in moea64_sp_promote()
3846 pa < pa_end; pa += PAGE_SIZE, va += PAGE_SIZE) { in moea64_sp_promote()
3850 __func__, pmap, (uintmax_t)va); in moea64_sp_promote()
3856 __func__, pmap, (uintmax_t)va, in moea64_sp_promote()
3865 __func__, pmap, (uintmax_t)va, in moea64_sp_promote()
3874 __func__, pmap, (uintmax_t)va, in moea64_sp_promote()
3883 __func__, pmap, (uintmax_t)va, in moea64_sp_promote()
3941 vm_offset_t va, va_end; in moea64_sp_demote_aligned() local
3956 va = PVO_VADDR(pvo); in moea64_sp_demote_aligned()
3960 for (pvo = sp, va_end = va + HPT_SP_SIZE; in moea64_sp_demote_aligned()
3963 va += PAGE_SIZE, pa += PAGE_SIZE) { in moea64_sp_demote_aligned()
3964 KASSERT(pvo && PVO_VADDR(pvo) == va, in moea64_sp_demote_aligned()
3965 ("%s: missing PVO for va %#jx", __func__, (uintmax_t)va)); in moea64_sp_demote_aligned()