Lines Matching +full:- +full:pvs
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2008-2015 Nathan Whitehorn
132 * Cheap NUMA-izing of the pv locks, to reduce contention across domains.
159 pa_end = pa + (HPT_SP_SIZE - PV_LOCK_SIZE); in moea64_sp_pv_lock()
175 pa += HPT_SP_SIZE - PV_LOCK_SIZE; in moea64_sp_pv_unlock()
180 pa -= PV_LOCK_SIZE; in moea64_sp_pv_unlock()
315 #define PVO_IS_SP(pvo) (((pvo)->pvo_vaddr & PVO_LARGE) && \
316 (pvo)->pvo_pmap != kernel_pmap)
531 pa = (pvo)->pvo_pte.pa & LPTE_RPGN; in moea64_pvo_paddr()
545 return (&m->md.mdpg_pvoh); in vm_page_to_pvoh()
564 pvo->pvo_vaddr = PVO_BOOTSTRAP; in alloc_pvo_entry()
580 pvo->pvo_pmap = pmap; in init_pvo_entry()
582 pvo->pvo_vaddr |= va; in init_pvo_entry()
584 pvo->pvo_vpn = (uint64_t)((va & ADDR_PIDX) >> ADDR_PIDX_SHFT) in init_pvo_entry()
587 if (pmap == kernel_pmap && (pvo->pvo_vaddr & PVO_LARGE) != 0) in init_pvo_entry()
592 pvo->pvo_pte.slot = (hash & moea64_pteg_mask) << 3; in init_pvo_entry()
599 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) in free_pvo_entry()
607 lpte->pte_hi = moea64_pte_vpn_from_pvo_vpn(pvo); in moea64_pte_from_pvo()
608 lpte->pte_hi |= LPTE_VALID; in moea64_pte_from_pvo()
610 if (pvo->pvo_vaddr & PVO_LARGE) in moea64_pte_from_pvo()
611 lpte->pte_hi |= LPTE_BIG; in moea64_pte_from_pvo()
612 if (pvo->pvo_vaddr & PVO_WIRED) in moea64_pte_from_pvo()
613 lpte->pte_hi |= LPTE_WIRED; in moea64_pte_from_pvo()
614 if (pvo->pvo_vaddr & PVO_HID) in moea64_pte_from_pvo()
615 lpte->pte_hi |= LPTE_HID; in moea64_pte_from_pvo()
617 lpte->pte_lo = pvo->pvo_pte.pa; /* Includes WIMG bits */ in moea64_pte_from_pvo()
618 if (pvo->pvo_pte.prot & VM_PROT_WRITE) in moea64_pte_from_pvo()
619 lpte->pte_lo |= LPTE_BW; in moea64_pte_from_pvo()
621 lpte->pte_lo |= LPTE_BR; in moea64_pte_from_pvo()
623 if (!(pvo->pvo_pte.prot & VM_PROT_EXECUTE)) in moea64_pte_from_pvo()
624 lpte->pte_lo |= LPTE_NOEXEC; in moea64_pte_from_pvo()
678 if (mapa->om_pa < mapb->om_pa) in om_cmp()
679 return (-1); in om_cmp()
680 else if (mapa->om_pa > mapb->om_pa) in om_cmp()
698 OF_getencprop(OF_finddevice("/"), "#address-cells", &acells, in moea64_add_ofw_mappings()
700 if (OF_getencprop(mmu, "translations", trans_cells, sz) == -1) in moea64_add_ofw_mappings()
725 panic("OFW translations above 32-bit boundary!"); in moea64_add_ofw_mappings()
729 panic("OFW translation not page-aligned (phys)!"); in moea64_add_ofw_mappings()
731 panic("OFW translation not page-aligned (virt)!"); in moea64_add_ofw_mappings()
740 /* If this address is direct-mapped, skip remapping */ in moea64_add_ofw_mappings()
783 moea64_large_page_mask = moea64_large_page_size - 1; in moea64_probe_large_page()
823 pvo->pvo_vaddr |= PVO_WIRED | PVO_LARGE; in moea64_kenter_large()
826 pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE | in moea64_kenter_large()
828 pvo->pvo_pte.pa = pa | pte_lo; in moea64_kenter_large()
872 * out of the direct-mapped region. in moea64_setup_direct_map()
876 * For pre-dmap execution, we need to use identity mapping in moea64_setup_direct_map()
919 return (-1); in pa_cmp()
948 bcopy(&slbtrap, (void *)EXC_DSE,(size_t)&slbtrapend - (size_t)&slbtrap); in moea64_early_bootstrap()
949 bcopy(&slbtrap, (void *)EXC_ISE,(size_t)&slbtrapend - (size_t)&slbtrap); in moea64_early_bootstrap()
969 CTR3(KTR_PMAP, "region: %#zx - %#zx (%#zx)", in moea64_early_bootstrap()
977 hwphyssz - physsz; in moea64_early_bootstrap()
1037 phys_avail_count -= rm_pavail; in moea64_early_bootstrap()
1065 moea64_pteg_mask = moea64_pteg_count - 1; in moea64_mid_bootstrap()
1105 moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NVSIDS - 1)) / VSID_NBPW] in moea64_mid_bootstrap()
1115 pcpup->pc_aim.slb[i].slbv = 0; in moea64_mid_bootstrap()
1116 pcpup->pc_aim.slb[i].slbe = 0; in moea64_mid_bootstrap()
1120 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i; in moea64_mid_bootstrap()
1123 kernel_pmap->pmap_phys = kernel_pmap; in moea64_mid_bootstrap()
1124 CPU_FILL(&kernel_pmap->pm_active); in moea64_mid_bootstrap()
1125 RB_INIT(&kernel_pmap->pmap_pvo); in moea64_mid_bootstrap()
1153 if (chosen != -1 && OF_getencprop(chosen, "mmu", &mmui, 4) != -1) { in moea64_late_bootstrap()
1155 if (mmu == -1 || in moea64_late_bootstrap()
1156 (sz = OF_getproplen(mmu, "translations")) == -1) in moea64_late_bootstrap()
1158 if (sz > 6144 /* tmpstksz - 2 KB headroom */) in moea64_late_bootstrap()
1263 * of the PVO book-keeping or other parts of the VM system in moea64_late_bootstrap()
1271 moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE; in moea64_late_bootstrap()
1272 virtual_end -= PAGE_SIZE; in moea64_late_bootstrap()
1297 pc->pc_qmap_addr = kva_alloc(PAGE_SIZE); in moea64_pmap_init_qpages()
1298 if (pc->pc_qmap_addr == 0) in moea64_pmap_init_qpages()
1301 pc->pc_aim.qmap_pvo = in moea64_pmap_init_qpages()
1302 moea64_pvo_find_va(kernel_pmap, pc->pc_qmap_addr); in moea64_pmap_init_qpages()
1304 mtx_init(&pc->pc_aim.qmap_lock, "qmap lock", NULL, MTX_DEF); in moea64_pmap_init_qpages()
1311 * Activate a user pmap. This mostly involves setting some non-CPU
1319 pm = &td->td_proc->p_vmspace->vm_pmap; in moea64_activate()
1320 CPU_SET(PCPU_GET(cpuid), &pm->pm_active); in moea64_activate()
1323 PCPU_SET(aim.userslb, pm->pm_slb); in moea64_activate()
1325 "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE)); in moea64_activate()
1327 PCPU_SET(curpmap, pm->pmap_phys); in moea64_activate()
1328 mtsrin(USER_SR << ADDR_SR_SHFT, td->td_pcb->pcb_cpu.aim.usr_vsid); in moea64_activate()
1339 pm = &td->td_proc->p_vmspace->vm_pmap; in moea64_deactivate()
1340 CPU_CLR(PCPU_GET(cpuid), &pm->pm_active); in moea64_deactivate()
1357 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); in moea64_unwire()
1359 pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) { in moea64_unwire()
1371 if ((pvo->pvo_vaddr & PVO_WIRED) == 0) in moea64_unwire()
1374 pvo->pvo_vaddr &= ~PVO_WIRED; in moea64_unwire()
1376 if ((pvo->pvo_vaddr & PVO_MANAGED) && in moea64_unwire()
1377 (pvo->pvo_pte.prot & VM_PROT_WRITE)) { in moea64_unwire()
1382 refchg |= atomic_readandclear_32(&m->md.mdpg_attrs); in moea64_unwire()
1388 pm->pm_stats.wired_count--; in moea64_unwire()
1408 managed = (pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED; in moea64_mincore()
1456 PMAP_LOCK(pvo->pvo_pmap); in moea64_set_scratchpage_pa()
1457 pvo->pvo_pte.pa = in moea64_set_scratchpage_pa()
1460 PMAP_UNLOCK(pvo->pvo_pmap); in moea64_set_scratchpage_pa()
1501 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); in moea64_copy_pages_dmap()
1506 cnt = min(cnt, PAGE_SIZE - b_pg_offset); in moea64_copy_pages_dmap()
1513 xfersize -= cnt; in moea64_copy_pages_dmap()
1528 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); in moea64_copy_pages()
1533 cnt = min(cnt, PAGE_SIZE - b_pg_offset); in moea64_copy_pages()
1540 xfersize -= cnt; in moea64_copy_pages()
1612 pvo->pvo_pte.pa = moea64_calc_wimg(pa, pmap_page_get_memattr(m)) | in moea64_quick_enter_page()
1641 return (!LIST_EMPTY(&(m)->md.mdpg_pvoh)); in moea64_page_is_mapped()
1660 if ((m->oflags & VPO_UNMANAGED) == 0) { in moea64_enter()
1664 VM_OBJECT_ASSERT_LOCKED(m->object); in moea64_enter()
1673 pvo->pvo_pmap = NULL; /* to be filled in later */ in moea64_enter()
1674 pvo->pvo_pte.prot = prot; in moea64_enter()
1678 pvo->pvo_pte.pa = pa | pte_lo; in moea64_enter()
1681 pvo->pvo_vaddr |= PVO_WIRED; in moea64_enter()
1683 if ((m->oflags & VPO_UNMANAGED) != 0 || !moea64_initialized) { in moea64_enter()
1686 pvo_head = &m->md.mdpg_pvoh; in moea64_enter()
1687 pvo->pvo_vaddr |= PVO_MANAGED; in moea64_enter()
1692 if (pvo->pvo_pmap == NULL) in moea64_enter()
1706 (m->oflags & VPO_UNMANAGED) == 0) in moea64_enter()
1711 if (oldpvo->pvo_vaddr == pvo->pvo_vaddr && in moea64_enter()
1712 oldpvo->pvo_pte.pa == pvo->pvo_pte.pa && in moea64_enter()
1713 oldpvo->pvo_pte.prot == prot) { in moea64_enter()
1719 STAT_MOEA64(moea64_pte_overflow--); in moea64_enter()
1731 KASSERT(oldpvo->pvo_pmap == pmap, ("pmap of old " in moea64_enter()
1751 if (pmap != kernel_pmap && (m->a.flags & PGA_EXECUTABLE) == 0 && in moea64_enter()
1768 (pvo->pvo_vaddr & PVO_MANAGED) != 0 && in moea64_enter()
1772 (m->flags & PG_FICTITIOUS) == 0 && in moea64_enter()
1838 VM_OBJECT_ASSERT_LOCKED(m_start->object); in moea64_enter_object()
1840 vm_page_iter_limit_init(&pages, m_start->object, in moea64_enter_object()
1841 m_start->pindex + atop(end - start)); in moea64_enter_object()
1842 m = vm_radix_iter_lookup(&pages, m_start->pindex); in moea64_enter_object()
1844 va = start + ptoa(m->pindex - m_start->pindex); in moea64_enter_object()
1846 m->psind == 1 && moea64_ps_enabled(pm)) in moea64_enter_object()
1881 pa = PVO_PADDR(pvo) | (va - PVO_VADDR(pvo)); in moea64_extract()
1901 if (pvo != NULL && (pvo->pvo_pte.prot & prot) == prot) { in moea64_extract_and_hold()
1923 * can lead to multiply locking non-recursive mutexes. in moea64_uma_page_alloc()
1938 pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE; in moea64_uma_page_alloc()
1939 pvo->pvo_pte.pa = VM_PAGE_TO_PHYS(m) | LPTE_M; in moea64_uma_page_alloc()
1945 pvo->pvo_vaddr |= PVO_WIRED; in moea64_uma_page_alloc()
2007 KASSERT((m->oflags & VPO_UNMANAGED) == 0, in moea64_is_referenced()
2017 KASSERT((m->oflags & VPO_UNMANAGED) == 0, in moea64_is_modified()
2047 KASSERT((m->oflags & VPO_UNMANAGED) == 0, in moea64_clear_modify()
2066 KASSERT((m->oflags & VPO_UNMANAGED) == 0, in moea64_remove_write()
2077 pmap = pvo->pvo_pmap; in moea64_remove_write()
2079 if (!(pvo->pvo_vaddr & PVO_DEAD) && in moea64_remove_write()
2080 (pvo->pvo_pte.prot & VM_PROT_WRITE)) { in moea64_remove_write()
2086 pvo->pvo_pte.prot &= ~VM_PROT_WRITE; in moea64_remove_write()
2091 if (pvo->pvo_pmap == kernel_pmap) in moea64_remove_write()
2096 if ((refchg | atomic_readandclear_32(&m->md.mdpg_attrs)) & LPTE_CHG) in moea64_remove_write()
2118 KASSERT((m->oflags & VPO_UNMANAGED) == 0, in moea64_ts_referenced()
2137 if (m->md.mdpg_cache_attrs == ma) in moea64_page_set_memattr()
2140 if ((m->oflags & VPO_UNMANAGED) != 0) { in moea64_page_set_memattr()
2141 m->md.mdpg_cache_attrs = ma; in moea64_page_set_memattr()
2149 pmap = pvo->pvo_pmap; in moea64_page_set_memattr()
2151 if (!(pvo->pvo_vaddr & PVO_DEAD)) { in moea64_page_set_memattr()
2157 pvo->pvo_pte.pa &= ~LPTE_WIMG; in moea64_page_set_memattr()
2158 pvo->pvo_pte.pa |= lo; in moea64_page_set_memattr()
2161 refchg = (pvo->pvo_pte.prot & VM_PROT_WRITE) ? in moea64_page_set_memattr()
2163 if ((pvo->pvo_vaddr & PVO_MANAGED) && in moea64_page_set_memattr()
2164 (pvo->pvo_pte.prot & VM_PROT_WRITE)) { in moea64_page_set_memattr()
2166 atomic_readandclear_32(&m->md.mdpg_attrs); in moea64_page_set_memattr()
2172 if (pvo->pvo_pmap == kernel_pmap) in moea64_page_set_memattr()
2177 m->md.mdpg_cache_attrs = ma; in moea64_page_set_memattr()
2195 pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; in moea64_kenter_attr()
2196 pvo->pvo_pte.pa = (pa & ~ADDR_POFF) | moea64_calc_wimg(pa, ma); in moea64_kenter_attr()
2197 pvo->pvo_vaddr |= PVO_WIRED; in moea64_kenter_attr()
2236 * Shortcut the direct-mapped case when applicable. We never put in moea64_kextract()
2237 * anything but 1:1 (or 62-bit aliased) mappings below in moea64_kextract()
2247 pa = PVO_PADDR(pvo) | (va - PVO_VADDR(pvo)); in moea64_kextract()
2277 l = ((char *)USER_ADDR + SEGMENT_LENGTH) - (char *)(*kaddr); in moea64_map_user_ptr()
2286 /* Try lockless look-up first */ in moea64_map_user_ptr()
2290 /* If it isn't there, we need to pre-fault the VSID */ in moea64_map_user_ptr()
2295 slbv = slb->slbv; in moea64_map_user_ptr()
2298 /* Mark segment no-execute */ in moea64_map_user_ptr()
2303 /* Mark segment no-execute */ in moea64_map_user_ptr()
2308 if (curthread->td_pcb->pcb_cpu.aim.usr_vsid == slbv) in moea64_map_user_ptr()
2312 curthread->td_pcb->pcb_cpu.aim.usr_segm = in moea64_map_user_ptr()
2314 curthread->td_pcb->pcb_cpu.aim.usr_vsid = slbv; in moea64_map_user_ptr()
2337 user_sr = curthread->td_pcb->pcb_cpu.aim.usr_segm; in moea64_decode_kernel_ptr()
2354 * Architectures which can support a direct-mapped physical to virtual region
2390 * 16 pvs linked to from this page. This count may
2402 KASSERT((m->oflags & VPO_UNMANAGED) == 0, in moea64_page_exists_quick()
2408 if (!(pvo->pvo_vaddr & PVO_DEAD) && pvo->pvo_pmap == pmap) { in moea64_page_exists_quick()
2423 m->md.mdpg_attrs = 0; in moea64_page_init()
2424 m->md.mdpg_cache_attrs = VM_MEMATTR_DEFAULT; in moea64_page_init()
2425 LIST_INIT(&m->md.mdpg_pvoh); in moea64_page_init()
2439 if ((m->oflags & VPO_UNMANAGED) != 0) in moea64_page_wired_mappings()
2443 if ((pvo->pvo_vaddr & (PVO_DEAD | PVO_WIRED)) == PVO_WIRED) in moea64_page_wired_mappings()
2473 hash = moea64_vsidcontext & (NVSIDS - 1); in moea64_get_unique_vsid()
2477 mask = 1 << (hash & (VSID_NBPW - 1)); in moea64_get_unique_vsid()
2485 i = ffs(~moea64_vsid_bitmap[n]) - 1; in moea64_get_unique_vsid()
2493 ("Allocating in-use VSID %#zx\n", hash)); in moea64_get_unique_vsid()
2508 RB_INIT(&pmap->pmap_pvo); in moea64_pinit()
2510 pmap->pm_slb_tree_root = slb_alloc_tree(); in moea64_pinit()
2511 pmap->pm_slb = slb_alloc_user_cache(); in moea64_pinit()
2512 pmap->pm_slb_len = 0; in moea64_pinit()
2523 RB_INIT(&pmap->pmap_pvo); in moea64_pinit()
2526 pmap->pmap_phys = (pmap_t)moea64_kextract((vm_offset_t)pmap); in moea64_pinit()
2528 pmap->pmap_phys = pmap; in moea64_pinit()
2536 pmap->pm_sr[i] = VSID_MAKE(i, hash); in moea64_pinit()
2538 KASSERT(pmap->pm_sr[0] != 0, ("moea64_pinit: pm_sr[0] = 0")); in moea64_pinit()
2553 bzero(&pm->pm_stats, sizeof(pm->pm_stats)); in moea64_pinit0()
2571 oldprot = pvo->pvo_pte.prot; in moea64_pvo_protect()
2572 pvo->pvo_pte.prot = prot; in moea64_pvo_protect()
2583 (pg->a.flags & PGA_EXECUTABLE) == 0 && in moea64_pvo_protect()
2584 (pvo->pvo_pte.pa & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { in moea64_pvo_protect()
2585 if ((pg->oflags & VPO_UNMANAGED) == 0) in moea64_pvo_protect()
2595 if (pg != NULL && (pvo->pvo_vaddr & PVO_MANAGED) && in moea64_pvo_protect()
2597 refchg |= atomic_readandclear_32(&pg->md.mdpg_attrs); in moea64_pvo_protect()
2614 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, in moea64_protect()
2624 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); in moea64_protect()
2626 pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) { in moea64_protect()
2650 while (count-- > 0) { in moea64_qenter()
2664 while (count-- > 0) { in moea64_qremove()
2676 idx = vsid & (NVSIDS-1); in moea64_release_vsid()
2694 slb_free_user_cache(pmap->pm_slb); in moea64_release()
2696 KASSERT(pmap->pm_sr[0] != 0, ("moea64_release: pm_sr[0] = 0")); in moea64_release()
2698 moea64_release_vsid(VSID_TO_HASH(pmap->pm_sr[0])); in moea64_release()
2714 RB_FOREACH_SAFE(pvo, pvo_tree, &pm->pmap_pvo, tpvo) { in moea64_remove_pages()
2715 if (pvo->pvo_vaddr & PVO_WIRED) in moea64_remove_pages()
2745 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); in moea64_remove_locked()
2757 tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); in moea64_remove_locked()
2781 if (pm->pm_stats.resident_count == 0) in moea64_remove()
2813 pmap = pvo->pvo_pmap; in moea64_remove_all()
2815 wasdead = (pvo->pvo_vaddr & PVO_DEAD); in moea64_remove_all()
2831 KASSERT((m->a.flags & PGA_WRITEABLE) == 0, ("Page still writable")); in moea64_remove_all()
2867 phys_avail[i + 1] -= size; in moea64_bootstrap_alloc()
2869 for (j = phys_avail_count * 2; j > i; j -= 2) { in moea64_bootstrap_alloc()
2870 phys_avail[j] = phys_avail[j - 2]; in moea64_bootstrap_alloc()
2871 phys_avail[j + 1] = phys_avail[j - 1]; in moea64_bootstrap_alloc()
2892 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); in moea64_pvo_enter()
2899 old_pvo = RB_INSERT(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo); in moea64_pvo_enter()
2911 if (pvo->pvo_vaddr & PVO_WIRED) in moea64_pvo_enter()
2912 pvo->pvo_pmap->pm_stats.wired_count++; in moea64_pvo_enter()
2913 pvo->pvo_pmap->pm_stats.resident_count++; in moea64_pvo_enter()
2925 if (pvo->pvo_pmap == kernel_pmap) in moea64_pvo_enter()
2935 pvo->pvo_vaddr & PVO_LARGE); in moea64_pvo_enter()
2947 KASSERT(pvo->pvo_pmap != NULL, ("Trying to remove PVO with no pmap")); in moea64_pvo_remove_from_pmap()
2948 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); in moea64_pvo_remove_from_pmap()
2949 KASSERT(!(pvo->pvo_vaddr & PVO_DEAD), ("Trying to remove dead PVO")); in moea64_pvo_remove_from_pmap()
2960 if (pvo->pvo_pte.prot & VM_PROT_WRITE) in moea64_pvo_remove_from_pmap()
2969 pvo->pvo_pmap->pm_stats.resident_count--; in moea64_pvo_remove_from_pmap()
2970 if (pvo->pvo_vaddr & PVO_WIRED) in moea64_pvo_remove_from_pmap()
2971 pvo->pvo_pmap->pm_stats.wired_count--; in moea64_pvo_remove_from_pmap()
2976 RB_REMOVE(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo); in moea64_pvo_remove_from_pmap()
2981 pvo->pvo_vaddr |= PVO_DEAD; in moea64_pvo_remove_from_pmap()
2984 if ((pvo->pvo_vaddr & PVO_MANAGED) && in moea64_pvo_remove_from_pmap()
2985 (pvo->pvo_pte.prot & VM_PROT_WRITE)) { in moea64_pvo_remove_from_pmap()
2988 refchg |= atomic_readandclear_32(&pg->md.mdpg_attrs); in moea64_pvo_remove_from_pmap()
3002 KASSERT(pvo->pvo_vaddr & PVO_DEAD, ("Trying to delink live page")); in moea64_pvo_remove_from_page_locked()
3005 if (pvo->pvo_pmap == NULL) in moea64_pvo_remove_from_page_locked()
3007 pvo->pvo_pmap = NULL; in moea64_pvo_remove_from_page_locked()
3013 if (pvo->pvo_vaddr & PVO_MANAGED) { in moea64_pvo_remove_from_page_locked()
3022 STAT_MOEA64(moea64_pvo_entries--); in moea64_pvo_remove_from_page_locked()
3031 if (pvo->pvo_vaddr & PVO_MANAGED) in moea64_pvo_remove_from_page()
3047 return (RB_FIND(pvo_tree, &pm->pmap_pvo, &key)); in moea64_pvo_find_va()
3063 if ((m->md.mdpg_attrs & ptebit) != 0 || in moea64_query_bit()
3065 (sp->md.mdpg_attrs & (ptebit | MDPG_ATTR_SP)) == in moea64_query_bit()
3082 if (ret != -1) { in moea64_query_bit()
3099 PMAP_LOCK(pvo->pvo_pmap); in moea64_query_bit()
3100 if (!(pvo->pvo_vaddr & PVO_DEAD)) in moea64_query_bit()
3102 PMAP_UNLOCK(pvo->pvo_pmap); in moea64_query_bit()
3105 atomic_set_32(&m->md.mdpg_attrs, in moea64_query_bit()
3138 if ((ret = moea64_sp_clear(pvo, m, ptebit)) != -1) { in moea64_clear_bit()
3145 PMAP_LOCK(pvo->pvo_pmap); in moea64_clear_bit()
3146 if (!(pvo->pvo_vaddr & PVO_DEAD)) in moea64_clear_bit()
3148 PMAP_UNLOCK(pvo->pvo_pmap); in moea64_clear_bit()
3153 atomic_clear_32(&m->md.mdpg_attrs, ptebit); in moea64_clear_bit()
3172 for (pvo = RB_FIND(pvo_tree, &kernel_pmap->pmap_pvo, &key); in moea64_dev_direct_mapped()
3174 pvo = RB_NEXT(pvo_tree, &kernel_pmap->pmap_pvo, pvo)) { in moea64_dev_direct_mapped()
3207 size -= PAGE_SIZE; in moea64_mapdev_attr()
3245 pm = &curthread->td_proc->p_vmspace->vm_pmap; in moea64_sync_icache()
3250 len = MIN(lim - va, sz); in moea64_sync_icache()
3252 if (pvo != NULL && !(pvo->pvo_pte.pa & LPTE_I)) { in moea64_sync_icache()
3257 sz -= len; in moea64_sync_icache()
3294 dump_map[0].pa_size = round_page((uintptr_t)_end) - in moea64_scan_init()
3298 dump_map[1].pa_start = (vm_paddr_t)(uintptr_t)msgbufp->msg_ptr; in moea64_scan_init()
3299 dump_map[1].pa_size = round_page(msgbufp->msg_size); in moea64_scan_init()
3311 if (pvo != NULL && !(pvo->pvo_vaddr & PVO_DEAD)) in moea64_scan_init()
3324 if (pvo == NULL || (pvo->pvo_vaddr & PVO_DEAD)) in moea64_scan_init()
3328 dump_map[2].pa_size = va - dump_map[2].pa_start; in moea64_scan_init()
3353 RB_FOREACH(pvo, pvo_tree, &kernel_pmap->pmap_pvo) { in moea64_scan_pmap()
3354 va = pvo->pvo_vaddr; in moea64_scan_pmap()
3422 for (; npages > 0; --npages) { in moea64_map_range()
3432 npages -= (moea64_large_page_size >> PAGE_SHIFT) - 1; in moea64_map_range()
3453 /* Short-circuit single-domain systems. */ in moea64_page_array_startup()
3473 size = btoc(vm_phys_segs[i].end - vm_phys_segs[i].start); in moea64_page_array_startup()
3481 size = btoc(phys_avail[i + 1] - phys_avail[i]); in moea64_page_array_startup()
3494 size = ulmin(pages - vm_page_array_size, dom_pages[i]); in moea64_page_array_startup()
3503 vm_page_base += size - needed; in moea64_page_array_startup()
3534 f = moea64_ops->func; \
3542 if (hw_direct_map == -1) { in moea64_install()
3554 * Default to non-DMAP, and switch over to DMAP functions once we know in moea64_install()
3599 if (object != NULL && (object->flags & OBJ_COLORED) != 0) in moea64_align_superpage()
3600 offset += ptoa(object->pg_color); in moea64_align_superpage()
3602 if (size - ((HPT_SP_SIZE - sp_offset) & HPT_SP_MASK) < HPT_SP_SIZE || in moea64_align_superpage()
3622 if (pvo->pvo_vaddr & PVO_DEAD) in moea64_pvo_cleanup()
3634 if ((pvo->pvo_pte.prot & VM_PROT_WRITE) != 0) in pvo_to_vmpage_flags()
3636 if ((pvo->pvo_pte.prot & VM_PROT_EXECUTE) != 0) in pvo_to_vmpage_flags()
3643 * Check if the given pvo and its superpage are in sva-eva range.
3676 if ((sp->pvo_vaddr & PVO_MANAGED) != 0 && (prot & VM_PROT_WRITE) != 0) { in moea64_sp_refchg_process()
3679 atomic_readandclear_32(&m->md.mdpg_attrs); in moea64_sp_refchg_process()
3707 KASSERT(m->psind == 1, ("%s: invalid m->psind: %d", in moea64_sp_enter()
3708 __func__, m->psind)); in moea64_sp_enter()
3734 for (i = i - 1; i >= 0; i--) in moea64_sp_enter()
3752 pvo->pvo_pte.prot = prot; in moea64_sp_enter()
3753 pvo->pvo_pte.pa = (pa & ~HPT_SP_MASK) | LPTE_LP_4K_16M | in moea64_sp_enter()
3757 pvo->pvo_vaddr |= PVO_WIRED; in moea64_sp_enter()
3758 pvo->pvo_vaddr |= PVO_LARGE; in moea64_sp_enter()
3760 if ((m->oflags & VPO_UNMANAGED) != 0) in moea64_sp_enter()
3763 pvo_head = &m->md.mdpg_pvoh; in moea64_sp_enter()
3764 pvo->pvo_vaddr |= PVO_MANAGED; in moea64_sp_enter()
3782 sync = (sm->a.flags & PGA_EXECUTABLE) == 0; in moea64_sp_enter()
3797 if (sync && (pvo->pvo_pte.pa & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) in moea64_sp_enter()
3836 * - When a superpage is first entered read-only and later becomes in moea64_sp_promote()
3837 * read-write. in moea64_sp_promote()
3838 * - When some of the superpage's virtual addresses map to previously in moea64_sp_promote()
3847 if (pvo == NULL || (pvo->pvo_vaddr & PVO_DEAD) != 0) { in moea64_sp_promote()
3861 if ((first->pvo_vaddr & PVO_FLAGS_PROMOTE) != in moea64_sp_promote()
3862 (pvo->pvo_vaddr & PVO_FLAGS_PROMOTE)) { in moea64_sp_promote()
3866 (uintmax_t)(pvo->pvo_vaddr & PVO_FLAGS_PROMOTE), in moea64_sp_promote()
3867 (uintmax_t)(first->pvo_vaddr & PVO_FLAGS_PROMOTE)); in moea64_sp_promote()
3871 if (first->pvo_pte.prot != pvo->pvo_pte.prot) { in moea64_sp_promote()
3875 pvo->pvo_pte.prot, first->pvo_pte.prot); in moea64_sp_promote()
3879 if ((first->pvo_pte.pa & LPTE_WIMG) != in moea64_sp_promote()
3880 (pvo->pvo_pte.pa & LPTE_WIMG)) { in moea64_sp_promote()
3884 (uintmax_t)(pvo->pvo_pte.pa & LPTE_WIMG), in moea64_sp_promote()
3885 (uintmax_t)(first->pvo_pte.pa & LPTE_WIMG)); in moea64_sp_promote()
3890 pvo = RB_NEXT(pvo_tree, &pmap->pmap_pvo, pvo); in moea64_sp_promote()
3900 * 1- If a page is being promoted, it was referenced. in moea64_sp_promote()
3901 * 2- If promoted pages are writable, they were modified. in moea64_sp_promote()
3904 ((first->pvo_pte.prot & VM_PROT_WRITE) != 0 ? LPTE_CHG : 0); in moea64_sp_promote()
3910 pvo = RB_NEXT(pvo_tree, &pmap->pmap_pvo, pvo)) { in moea64_sp_promote()
3911 pvo->pvo_pte.pa &= ADDR_POFF | ~HPT_SP_MASK; in moea64_sp_promote()
3912 pvo->pvo_pte.pa |= LPTE_LP_4K_16M; in moea64_sp_promote()
3913 pvo->pvo_vaddr |= PVO_LARGE; in moea64_sp_promote()
3918 moea64_sp_refchg_process(first, m, sp_refchg, first->pvo_pte.prot); in moea64_sp_promote()
3921 atomic_set_32(&m->md.mdpg_attrs, sp_refchg | MDPG_ATTR_SP); in moea64_sp_promote()
3949 pmap = sp->pvo_pmap; in moea64_sp_demote_aligned()
3962 pvo = RB_NEXT(pvo_tree, &pmap->pmap_pvo, pvo), in moea64_sp_demote_aligned()
3967 pvo->pvo_vaddr &= ~PVO_LARGE; in moea64_sp_demote_aligned()
3968 pvo->pvo_pte.pa &= ~LPTE_RPGN; in moea64_sp_demote_aligned()
3969 pvo->pvo_pte.pa |= pa; in moea64_sp_demote_aligned()
3982 atomic_clear_32(&m->md.mdpg_attrs, MDPG_ATTR_SP); in moea64_sp_demote_aligned()
3988 moea64_sp_refchg_process(sp, m, refchg, sp->pvo_pte.prot); in moea64_sp_demote_aligned()
3998 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); in moea64_sp_demote()
4001 pvo = moea64_pvo_find_va(pvo->pvo_pmap, in moea64_sp_demote()
4019 pm = sp->pvo_pmap; in moea64_sp_unwire()
4025 prev = pvo, pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) { in moea64_sp_unwire()
4026 if ((pvo->pvo_vaddr & PVO_WIRED) == 0) in moea64_sp_unwire()
4029 pvo->pvo_vaddr &= ~PVO_WIRED; in moea64_sp_unwire()
4037 pm->pm_stats.wired_count--; in moea64_sp_unwire()
4042 refchg, sp->pvo_pte.prot); in moea64_sp_unwire()
4060 pm = sp->pvo_pmap; in moea64_sp_protect()
4063 oldprot = sp->pvo_pte.prot; in moea64_sp_protect()
4071 prev = pvo, pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) { in moea64_sp_protect()
4072 pvo->pvo_pte.prot = prot; in moea64_sp_protect()
4087 if ((m->a.flags & PGA_EXECUTABLE) == 0 && in moea64_sp_protect()
4088 (sp->pvo_pte.pa & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { in moea64_sp_protect()
4089 if ((m->oflags & VPO_UNMANAGED) == 0) in moea64_sp_protect()
4108 pm = sp->pvo_pmap; in moea64_sp_remove()
4113 tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); in moea64_sp_remove()
4130 atomic_clear_32(&PHYS_TO_VM_PAGE(PVO_PADDR(sp))->md.mdpg_attrs, in moea64_sp_remove()
4145 pmap = pvo->pvo_pmap; in moea64_sp_query_locked()
4159 pvo = RB_NEXT(pvo_tree, &pmap->pmap_pvo, pvo)) { in moea64_sp_query_locked()
4171 atomic_set_32(&m->md.mdpg_attrs, refchg | MDPG_ATTR_SP); in moea64_sp_query_locked()
4183 pmap = pvo->pvo_pmap; in moea64_sp_query()
4189 if (!PVO_IS_SP(pvo) || (pvo->pvo_vaddr & PVO_DEAD) != 0) { in moea64_sp_query()
4193 return (-1); in moea64_sp_query()
4215 pmap = pvo->pvo_pmap; in moea64_sp_pvo_clear()
4221 if (!PVO_IS_SP(pvo) || (pvo->pvo_vaddr & PVO_DEAD) != 0) { in moea64_sp_pvo_clear()
4225 return (-1); in moea64_sp_pvo_clear()
4239 pvo = RB_NEXT(pvo_tree, &pmap->pmap_pvo, pvo)) { in moea64_sp_pvo_clear()
4246 atomic_clear_32(&m->md.mdpg_attrs, ptebit); in moea64_sp_pvo_clear()
4263 pmap = pvo->pvo_pmap; in moea64_sp_clear()
4281 (HPT_SP_PAGES - 1)) == 0 && (pvo->pvo_vaddr & PVO_WIRED) == 0) { in moea64_sp_clear()
4282 if ((ret = moea64_sp_pvo_clear(pvo, ptebit)) == -1) in moea64_sp_clear()
4283 return (-1); in moea64_sp_clear()
4311 if (!PVO_IS_SP(pvo) || (pvo->pvo_vaddr & PVO_DEAD) != 0) { in moea64_sp_clear()
4315 return (-1); in moea64_sp_clear()
4333 if ((pvo->pvo_vaddr & PVO_WIRED) == 0) in moea64_sp_clear()
4335 pvo->pvo_pte.prot & ~VM_PROT_WRITE); in moea64_sp_clear()