Lines Matching refs:pa
133 #define PV_LOCK_IDX(pa) ((pa_index(pa) * (((pa) >> 45) + 1)) % PV_LOCK_COUNT) argument
135 #define PV_LOCK_IDX(pa) (pa_index(pa) % PV_LOCK_COUNT) argument
137 #define PV_LOCKPTR(pa) ((struct mtx *)(&pv_lock[PV_LOCK_IDX(pa)])) argument
138 #define PV_LOCK(pa) mtx_lock(PV_LOCKPTR(pa)) argument
139 #define PV_UNLOCK(pa) mtx_unlock(PV_LOCKPTR(pa)) argument
140 #define PV_LOCKASSERT(pa) mtx_assert(PV_LOCKPTR(pa), MA_OWNED) argument
150 moea64_sp_pv_lock(vm_paddr_t pa) in moea64_sp_pv_lock() argument
155 pa_end = pa + (HPT_SP_SIZE - PV_LOCK_SIZE); in moea64_sp_pv_lock()
157 mtx_lock_flags(PV_LOCKPTR(pa), MTX_DUPOK); in moea64_sp_pv_lock()
158 if (pa == pa_end) in moea64_sp_pv_lock()
160 pa += PV_LOCK_SIZE; in moea64_sp_pv_lock()
165 moea64_sp_pv_unlock(vm_paddr_t pa) in moea64_sp_pv_unlock() argument
170 pa_end = pa; in moea64_sp_pv_unlock()
171 pa += HPT_SP_SIZE - PV_LOCK_SIZE; in moea64_sp_pv_unlock()
173 mtx_unlock_flags(PV_LOCKPTR(pa), MTX_DUPOK); in moea64_sp_pv_unlock()
174 if (pa == pa_end) in moea64_sp_pv_unlock()
176 pa -= PV_LOCK_SIZE; in moea64_sp_pv_unlock()
180 #define SP_PV_LOCK_ALIGNED(pa) moea64_sp_pv_lock(pa) argument
181 #define SP_PV_UNLOCK_ALIGNED(pa) moea64_sp_pv_unlock(pa) argument
182 #define SP_PV_LOCK(pa) moea64_sp_pv_lock((pa) & ~HPT_SP_MASK) argument
183 #define SP_PV_UNLOCK(pa) moea64_sp_pv_unlock((pa) & ~HPT_SP_MASK) argument
295 vm_paddr_t pa, vm_size_t sz);
433 void moea64_dumpsys_map(vm_paddr_t pa, size_t sz,
523 vm_paddr_t pa; in moea64_pvo_paddr() local
525 pa = (pvo)->pvo_pte.pa & LPTE_RPGN; in moea64_pvo_paddr()
528 pa &= ~HPT_SP_MASK; /* This is needed to clear LPTE_LP bits. */ in moea64_pvo_paddr()
529 pa |= PVO_VADDR(pvo) & HPT_SP_MASK; in moea64_pvo_paddr()
531 return (pa); in moea64_pvo_paddr()
611 lpte->pte_lo = pvo->pvo_pte.pa; /* Includes WIMG bits */ in moea64_pte_from_pvo()
622 moea64_calc_wimg(vm_paddr_t pa, vm_memattr_t ma) in moea64_calc_wimg() argument
648 if ((pa >= pregions[i].mr_start) && in moea64_calc_wimg()
649 (pa < (pregions[i].mr_start + pregions[i].mr_size))) { in moea64_calc_wimg()
807 moea64_kenter_large(vm_offset_t va, vm_paddr_t pa, uint64_t attr, int bootstrap) in moea64_kenter_large() argument
822 pvo->pvo_pte.pa = pa | pte_lo; in moea64_kenter_large()
834 vm_paddr_t pa, pkernelstart, pkernelend; in moea64_setup_direct_map() local
846 for (pa = pregions[i].mr_start; pa < pregions[i].mr_start + in moea64_setup_direct_map()
847 pregions[i].mr_size; pa += moea64_large_page_size) { in moea64_setup_direct_map()
849 if (pa & moea64_large_page_mask) { in moea64_setup_direct_map()
850 pa &= moea64_large_page_mask; in moea64_setup_direct_map()
853 if (pa + moea64_large_page_size > in moea64_setup_direct_map()
857 moea64_kenter_large(PHYS_TO_DMAP(pa), pa, pte_lo, 1); in moea64_setup_direct_map()
874 for (pa = kernelstart & ~PAGE_MASK; pa < kernelend; in moea64_setup_direct_map()
875 pa += PAGE_SIZE) in moea64_setup_direct_map()
876 moea64_kenter(pa, pa); in moea64_setup_direct_map()
880 for (pa = pkernelstart & ~PAGE_MASK; pa < pkernelend; in moea64_setup_direct_map()
881 pa += PAGE_SIZE) in moea64_setup_direct_map()
882 moea64_kenter(pa | DMAP_BASE_ADDRESS, pa); in moea64_setup_direct_map()
888 for (pa = off; pa < off + size; pa += PAGE_SIZE) in moea64_setup_direct_map()
889 moea64_kenter(pa, pa); in moea64_setup_direct_map()
892 for (pa = EXC_RSVD; pa < EXC_LAST; pa += PAGE_SIZE) in moea64_setup_direct_map()
893 moea64_kenter(pa | DMAP_BASE_ADDRESS, pa); in moea64_setup_direct_map()
910 const vm_paddr_t *pa = a, *pb = b; in pa_cmp() local
912 if (*pa < *pb) in pa_cmp()
914 else if (*pa > *pb) in pa_cmp()
1138 vm_offset_t pa, va; in moea64_late_bootstrap() local
1212 pa = moea64_bootstrap_alloc(kstack_pages * PAGE_SIZE, PAGE_SIZE); in moea64_late_bootstrap()
1215 CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va); in moea64_late_bootstrap()
1219 moea64_kenter(va, pa); in moea64_late_bootstrap()
1220 pa += PAGE_SIZE; in moea64_late_bootstrap()
1227 pa = msgbuf_phys = moea64_bootstrap_alloc(msgbufsize, PAGE_SIZE); in moea64_late_bootstrap()
1232 moea64_kenter(va, pa); in moea64_late_bootstrap()
1233 pa += PAGE_SIZE; in moea64_late_bootstrap()
1240 pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE); in moea64_late_bootstrap()
1245 moea64_kenter(va, pa); in moea64_late_bootstrap()
1246 pa += PAGE_SIZE; in moea64_late_bootstrap()
1391 vm_paddr_t pa; in moea64_mincore() local
1400 pa = PVO_PADDR(pvo); in moea64_mincore()
1401 m = PHYS_TO_VM_PAGE(pa); in moea64_mincore()
1428 *pap = pa; in moea64_mincore()
1442 void moea64_set_scratchpage_pa(int which, vm_paddr_t pa) in moea64_set_scratchpage_pa() argument
1451 pvo->pvo_pte.pa = in moea64_set_scratchpage_pa()
1452 moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa; in moea64_set_scratchpage_pa()
1542 vm_paddr_t pa = VM_PAGE_TO_PHYS(m); in moea64_zero_page_area() local
1548 bzero((caddr_t)(uintptr_t)PHYS_TO_DMAP(pa) + off, size); in moea64_zero_page_area()
1551 moea64_set_scratchpage_pa(0, pa); in moea64_zero_page_area()
1563 vm_paddr_t pa = VM_PAGE_TO_PHYS(m); in moea64_zero_page() local
1568 moea64_set_scratchpage_pa(0, pa); in moea64_zero_page()
1580 vm_paddr_t pa = VM_PAGE_TO_PHYS(m); in moea64_zero_page_dmap() local
1583 va = PHYS_TO_DMAP(pa); in moea64_zero_page_dmap()
1592 vm_paddr_t pa = VM_PAGE_TO_PHYS(m); in moea64_quick_enter_page() local
1606 pvo->pvo_pte.pa = moea64_calc_wimg(pa, pmap_page_get_memattr(m)) | in moea64_quick_enter_page()
1607 (uint64_t)pa; in moea64_quick_enter_page()
1652 vm_paddr_t pa; in moea64_enter() local
1670 pa = VM_PAGE_TO_PHYS(m); in moea64_enter()
1671 pte_lo = moea64_calc_wimg(pa, pmap_page_get_memattr(m)); in moea64_enter()
1672 pvo->pvo_pte.pa = pa | pte_lo; in moea64_enter()
1684 PV_LOCK(pa); in moea64_enter()
1706 oldpvo->pvo_pte.pa == pvo->pvo_pte.pa && in moea64_enter()
1719 PV_UNLOCK(pa); in moea64_enter()
1732 PV_UNLOCK(pa); in moea64_enter()
1748 moea64_syncicache(pmap, va, pa, PAGE_SIZE); in moea64_enter()
1763 (va & HPT_SP_MASK) == (pa & HPT_SP_MASK) && in moea64_enter()
1775 moea64_syncicache(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, in moea64_syncicache() argument
1793 __syncicache((void *)(uintptr_t)pa, sz); in moea64_syncicache()
1797 __syncicache((void *)(uintptr_t)PHYS_TO_DMAP(pa), sz); in moea64_syncicache()
1803 moea64_set_scratchpage_pa(1, pa & ~ADDR_POFF); in moea64_syncicache()
1866 vm_paddr_t pa; in moea64_extract() local
1871 pa = 0; in moea64_extract()
1873 pa = PVO_PADDR(pvo) | (va - PVO_VADDR(pvo)); in moea64_extract()
1876 return (pa); in moea64_extract()
1931 pvo->pvo_pte.pa = VM_PAGE_TO_PHYS(m) | LPTE_M; in moea64_uma_page_alloc()
2146 pvo->pvo_pte.pa &= ~LPTE_WIMG; in moea64_page_set_memattr()
2147 pvo->pvo_pte.pa |= lo; in moea64_page_set_memattr()
2174 moea64_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma) in moea64_kenter_attr() argument
2185 pvo->pvo_pte.pa = (pa & ~ADDR_POFF) | moea64_calc_wimg(pa, ma); in moea64_kenter_attr()
2204 (uintmax_t)pa, error); in moea64_kenter_attr()
2208 moea64_kenter(vm_offset_t va, vm_paddr_t pa) in moea64_kenter() argument
2211 moea64_kenter_attr(va, pa, VM_MEMATTR_DEFAULT); in moea64_kenter()
2222 vm_paddr_t pa; in moea64_kextract() local
2236 pa = PVO_PADDR(pvo) | (va - PVO_VADDR(pvo)); in moea64_kextract()
2238 return (pa); in moea64_kextract()
2573 (pvo->pvo_pte.pa & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { in moea64_pvo_protect()
3149 moea64_dev_direct_mapped(vm_paddr_t pa, vm_size_t size) in moea64_dev_direct_mapped() argument
3155 if (hw_direct_map && mem_valid(pa, size) == 0) in moea64_dev_direct_mapped()
3159 ppa = pa & ~ADDR_POFF; in moea64_dev_direct_mapped()
3162 ppa < pa + size; ppa += PAGE_SIZE, in moea64_dev_direct_mapped()
3181 moea64_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t ma) in moea64_mapdev_attr() argument
3185 ppa = trunc_page(pa); in moea64_mapdev_attr()
3186 offset = pa & PAGE_MASK; in moea64_mapdev_attr()
3205 moea64_mapdev(vm_paddr_t pa, vm_size_t size) in moea64_mapdev() argument
3208 return moea64_mapdev_attr(pa, size, VM_MEMATTR_DEFAULT); in moea64_mapdev()
3230 vm_paddr_t pa; in moea64_sync_icache() local
3241 if (pvo != NULL && !(pvo->pvo_pte.pa & LPTE_I)) { in moea64_sync_icache()
3242 pa = PVO_PADDR(pvo) | (va & ADDR_POFF); in moea64_sync_icache()
3243 moea64_syncicache(pm, va, pa, len); in moea64_sync_icache()
3252 moea64_dumpsys_map(vm_paddr_t pa, size_t sz, void **va) in moea64_dumpsys_map() argument
3255 *va = (void *)(uintptr_t)pa; in moea64_dumpsys_map()
3327 vm_paddr_t pa, pa_end; in moea64_scan_pmap() local
3361 pa = PVO_PADDR(pvo); in moea64_scan_pmap()
3364 pa_end = pa + lpsize; in moea64_scan_pmap()
3365 for (; pa < pa_end; pa += PAGE_SIZE) { in moea64_scan_pmap()
3366 if (vm_phys_is_dumpable(pa)) in moea64_scan_pmap()
3367 vm_page_dump_add(dump_bitset, pa); in moea64_scan_pmap()
3370 if (vm_phys_is_dumpable(pa)) in moea64_scan_pmap()
3371 vm_page_dump_add(dump_bitset, pa); in moea64_scan_pmap()
3408 moea64_map_range(vm_offset_t va, vm_paddr_t pa, vm_size_t npages) in moea64_map_range() argument
3413 (pa & moea64_large_page_mask) == 0 && in moea64_map_range()
3417 moea64_kenter_large(va, pa, 0, 0); in moea64_map_range()
3419 pa += moea64_large_page_size; in moea64_map_range()
3423 moea64_kenter(va, pa); in moea64_map_range()
3424 pa += PAGE_SIZE; in moea64_map_range()
3434 vm_paddr_t pa; in moea64_page_array_startup() local
3445 pa = vm_phys_early_alloc(0, size); in moea64_page_array_startup()
3447 pa, pa + size, VM_PROT_READ | VM_PROT_WRITE); in moea64_page_array_startup()
3487 pa = vm_phys_early_alloc(i, size); in moea64_page_array_startup()
3489 moea64_map_range(va, pa, size >> PAGE_SHIFT); in moea64_page_array_startup()
3687 vm_paddr_t pa, spa; in moea64_sp_enter() local
3709 spa = pa = VM_PAGE_TO_PHYS(sm); in moea64_sp_enter()
3738 i++, va += PAGE_SIZE, pa += PAGE_SIZE, m++) { in moea64_sp_enter()
3742 pvo->pvo_pte.pa = (pa & ~HPT_SP_MASK) | LPTE_LP_4K_16M | in moea64_sp_enter()
3743 moea64_calc_wimg(pa, pmap_page_get_memattr(m)); in moea64_sp_enter()
3786 if (sync && (pvo->pvo_pte.pa & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) in moea64_sp_enter()
3801 vm_paddr_t pa, pa_end; in moea64_sp_promote() local
3811 pa = VM_PAGE_TO_PHYS(m) & ~HPT_SP_MASK; in moea64_sp_promote()
3812 m = PHYS_TO_VM_PAGE(pa); in moea64_sp_promote()
3833 for (pa_end = pa + HPT_SP_SIZE; in moea64_sp_promote()
3834 pa < pa_end; pa += PAGE_SIZE, va += PAGE_SIZE) { in moea64_sp_promote()
3841 if (PVO_PADDR(pvo) != pa) { in moea64_sp_promote()
3845 (uintmax_t)PVO_PADDR(pvo), (uintmax_t)pa); in moea64_sp_promote()
3867 if ((first->pvo_pte.pa & LPTE_WIMG) != in moea64_sp_promote()
3868 (pvo->pvo_pte.pa & LPTE_WIMG)) { in moea64_sp_promote()
3872 (uintmax_t)(pvo->pvo_pte.pa & LPTE_WIMG), in moea64_sp_promote()
3873 (uintmax_t)(first->pvo_pte.pa & LPTE_WIMG)); in moea64_sp_promote()
3899 pvo->pvo_pte.pa &= ADDR_POFF | ~HPT_SP_MASK; in moea64_sp_promote()
3900 pvo->pvo_pte.pa |= LPTE_LP_4K_16M; in moea64_sp_promote()
3929 vm_paddr_t pa; in moea64_sp_demote_aligned() local
3944 pa = PVO_PADDR(pvo); in moea64_sp_demote_aligned()
3945 m = PHYS_TO_VM_PAGE(pa); in moea64_sp_demote_aligned()
3950 va += PAGE_SIZE, pa += PAGE_SIZE) { in moea64_sp_demote_aligned()
3955 pvo->pvo_pte.pa &= ~LPTE_RPGN; in moea64_sp_demote_aligned()
3956 pvo->pvo_pte.pa |= pa; in moea64_sp_demote_aligned()
4075 (sp->pvo_pte.pa & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { in moea64_sp_protect()