Lines Matching +full:bat +full:- +full:temp +full:- +full:ok

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2008-2015 Nathan Whitehorn
94 #include <machine/bat.h>
128 * Cheap NUMA-izing of the pv locks, to reduce contention across domains.
155 pa_end = pa + (HPT_SP_SIZE - PV_LOCK_SIZE); in moea64_sp_pv_lock()
171 pa += HPT_SP_SIZE - PV_LOCK_SIZE; in moea64_sp_pv_unlock()
176 pa -= PV_LOCK_SIZE; in moea64_sp_pv_unlock()
311 #define PVO_IS_SP(pvo) (((pvo)->pvo_vaddr & PVO_LARGE) && \
312 (pvo)->pvo_pmap != kernel_pmap)
525 pa = (pvo)->pvo_pte.pa & LPTE_RPGN; in moea64_pvo_paddr()
539 return (&m->md.mdpg_pvoh); in vm_page_to_pvoh()
558 pvo->pvo_vaddr = PVO_BOOTSTRAP; in alloc_pvo_entry()
574 pvo->pvo_pmap = pmap; in init_pvo_entry()
576 pvo->pvo_vaddr |= va; in init_pvo_entry()
578 pvo->pvo_vpn = (uint64_t)((va & ADDR_PIDX) >> ADDR_PIDX_SHFT) in init_pvo_entry()
581 if (pmap == kernel_pmap && (pvo->pvo_vaddr & PVO_LARGE) != 0) in init_pvo_entry()
586 pvo->pvo_pte.slot = (hash & moea64_pteg_mask) << 3; in init_pvo_entry()
593 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) in free_pvo_entry()
601 lpte->pte_hi = moea64_pte_vpn_from_pvo_vpn(pvo); in moea64_pte_from_pvo()
602 lpte->pte_hi |= LPTE_VALID; in moea64_pte_from_pvo()
604 if (pvo->pvo_vaddr & PVO_LARGE) in moea64_pte_from_pvo()
605 lpte->pte_hi |= LPTE_BIG; in moea64_pte_from_pvo()
606 if (pvo->pvo_vaddr & PVO_WIRED) in moea64_pte_from_pvo()
607 lpte->pte_hi |= LPTE_WIRED; in moea64_pte_from_pvo()
608 if (pvo->pvo_vaddr & PVO_HID) in moea64_pte_from_pvo()
609 lpte->pte_hi |= LPTE_HID; in moea64_pte_from_pvo()
611 lpte->pte_lo = pvo->pvo_pte.pa; /* Includes WIMG bits */ in moea64_pte_from_pvo()
612 if (pvo->pvo_pte.prot & VM_PROT_WRITE) in moea64_pte_from_pvo()
613 lpte->pte_lo |= LPTE_BW; in moea64_pte_from_pvo()
615 lpte->pte_lo |= LPTE_BR; in moea64_pte_from_pvo()
617 if (!(pvo->pvo_pte.prot & VM_PROT_EXECUTE)) in moea64_pte_from_pvo()
618 lpte->pte_lo |= LPTE_NOEXEC; in moea64_pte_from_pvo()
672 if (mapa->om_pa < mapb->om_pa) in om_cmp()
673 return (-1); in om_cmp()
674 else if (mapa->om_pa > mapb->om_pa) in om_cmp()
692 OF_getencprop(OF_finddevice("/"), "#address-cells", &acells, in moea64_add_ofw_mappings()
694 if (OF_getencprop(mmu, "translations", trans_cells, sz) == -1) in moea64_add_ofw_mappings()
719 panic("OFW translations above 32-bit boundary!"); in moea64_add_ofw_mappings()
723 panic("OFW translation not page-aligned (phys)!"); in moea64_add_ofw_mappings()
725 panic("OFW translation not page-aligned (virt)!"); in moea64_add_ofw_mappings()
734 /* If this address is direct-mapped, skip remapping */ in moea64_add_ofw_mappings()
777 moea64_large_page_mask = moea64_large_page_size - 1; in moea64_probe_large_page()
817 pvo->pvo_vaddr |= PVO_WIRED | PVO_LARGE; in moea64_kenter_large()
820 pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE | in moea64_kenter_large()
822 pvo->pvo_pte.pa = pa | pte_lo; in moea64_kenter_large()
866 * out of the direct-mapped region. in moea64_setup_direct_map()
870 * For pre-dmap execution, we need to use identity mapping in moea64_setup_direct_map()
913 return (-1); in pa_cmp()
932 /* We don't have a direct map since there is no BAT */ in moea64_early_bootstrap()
935 /* Make sure battable is zero, since we have no BAT */ in moea64_early_bootstrap()
942 bcopy(&slbtrap, (void *)EXC_DSE,(size_t)&slbtrapend - (size_t)&slbtrap); in moea64_early_bootstrap()
943 bcopy(&slbtrap, (void *)EXC_ISE,(size_t)&slbtrapend - (size_t)&slbtrap); in moea64_early_bootstrap()
963 CTR3(KTR_PMAP, "region: %#zx - %#zx (%#zx)", in moea64_early_bootstrap()
971 hwphyssz - physsz; in moea64_early_bootstrap()
1031 phys_avail_count -= rm_pavail; in moea64_early_bootstrap()
1059 moea64_pteg_mask = moea64_pteg_count - 1; in moea64_mid_bootstrap()
1099 moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NVSIDS - 1)) / VSID_NBPW] in moea64_mid_bootstrap()
1109 pcpup->pc_aim.slb[i].slbv = 0; in moea64_mid_bootstrap()
1110 pcpup->pc_aim.slb[i].slbe = 0; in moea64_mid_bootstrap()
1114 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i; in moea64_mid_bootstrap()
1117 kernel_pmap->pmap_phys = kernel_pmap; in moea64_mid_bootstrap()
1118 CPU_FILL(&kernel_pmap->pm_active); in moea64_mid_bootstrap()
1119 RB_INIT(&kernel_pmap->pmap_pvo); in moea64_mid_bootstrap()
1147 if (chosen != -1 && OF_getencprop(chosen, "mmu", &mmui, 4) != -1) { in moea64_late_bootstrap()
1149 if (mmu == -1 || in moea64_late_bootstrap()
1150 (sz = OF_getproplen(mmu, "translations")) == -1) in moea64_late_bootstrap()
1152 if (sz > 6144 /* tmpstksz - 2 KB headroom */) in moea64_late_bootstrap()
1257 * of the PVO book-keeping or other parts of the VM system in moea64_late_bootstrap()
1265 moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE; in moea64_late_bootstrap()
1266 virtual_end -= PAGE_SIZE; in moea64_late_bootstrap()
1291 pc->pc_qmap_addr = kva_alloc(PAGE_SIZE); in moea64_pmap_init_qpages()
1292 if (pc->pc_qmap_addr == 0) in moea64_pmap_init_qpages()
1295 pc->pc_aim.qmap_pvo = in moea64_pmap_init_qpages()
1296 moea64_pvo_find_va(kernel_pmap, pc->pc_qmap_addr); in moea64_pmap_init_qpages()
1298 mtx_init(&pc->pc_aim.qmap_lock, "qmap lock", NULL, MTX_DEF); in moea64_pmap_init_qpages()
1305 * Activate a user pmap. This mostly involves setting some non-CPU
1313 pm = &td->td_proc->p_vmspace->vm_pmap; in moea64_activate()
1314 CPU_SET(PCPU_GET(cpuid), &pm->pm_active); in moea64_activate()
1317 PCPU_SET(aim.userslb, pm->pm_slb); in moea64_activate()
1319 "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE)); in moea64_activate()
1321 PCPU_SET(curpmap, pm->pmap_phys); in moea64_activate()
1322 mtsrin(USER_SR << ADDR_SR_SHFT, td->td_pcb->pcb_cpu.aim.usr_vsid); in moea64_activate()
1333 pm = &td->td_proc->p_vmspace->vm_pmap; in moea64_deactivate()
1334 CPU_CLR(PCPU_GET(cpuid), &pm->pm_active); in moea64_deactivate()
1351 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); in moea64_unwire()
1353 pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) { in moea64_unwire()
1365 if ((pvo->pvo_vaddr & PVO_WIRED) == 0) in moea64_unwire()
1368 pvo->pvo_vaddr &= ~PVO_WIRED; in moea64_unwire()
1370 if ((pvo->pvo_vaddr & PVO_MANAGED) && in moea64_unwire()
1371 (pvo->pvo_pte.prot & VM_PROT_WRITE)) { in moea64_unwire()
1376 refchg |= atomic_readandclear_32(&m->md.mdpg_attrs); in moea64_unwire()
1382 pm->pm_stats.wired_count--; in moea64_unwire()
1402 managed = (pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED; in moea64_mincore()
1450 PMAP_LOCK(pvo->pvo_pmap); in moea64_set_scratchpage_pa()
1451 pvo->pvo_pte.pa = in moea64_set_scratchpage_pa()
1454 PMAP_UNLOCK(pvo->pvo_pmap); in moea64_set_scratchpage_pa()
1495 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); in moea64_copy_pages_dmap()
1500 cnt = min(cnt, PAGE_SIZE - b_pg_offset); in moea64_copy_pages_dmap()
1507 xfersize -= cnt; in moea64_copy_pages_dmap()
1522 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); in moea64_copy_pages()
1527 cnt = min(cnt, PAGE_SIZE - b_pg_offset); in moea64_copy_pages()
1534 xfersize -= cnt; in moea64_copy_pages()
1606 pvo->pvo_pte.pa = moea64_calc_wimg(pa, pmap_page_get_memattr(m)) | in moea64_quick_enter_page()
1635 return (!LIST_EMPTY(&(m)->md.mdpg_pvoh)); in moea64_page_is_mapped()
1654 if ((m->oflags & VPO_UNMANAGED) == 0) { in moea64_enter()
1658 VM_OBJECT_ASSERT_LOCKED(m->object); in moea64_enter()
1667 pvo->pvo_pmap = NULL; /* to be filled in later */ in moea64_enter()
1668 pvo->pvo_pte.prot = prot; in moea64_enter()
1672 pvo->pvo_pte.pa = pa | pte_lo; in moea64_enter()
1675 pvo->pvo_vaddr |= PVO_WIRED; in moea64_enter()
1677 if ((m->oflags & VPO_UNMANAGED) != 0 || !moea64_initialized) { in moea64_enter()
1680 pvo_head = &m->md.mdpg_pvoh; in moea64_enter()
1681 pvo->pvo_vaddr |= PVO_MANAGED; in moea64_enter()
1686 if (pvo->pvo_pmap == NULL) in moea64_enter()
1700 (m->oflags & VPO_UNMANAGED) == 0) in moea64_enter()
1705 if (oldpvo->pvo_vaddr == pvo->pvo_vaddr && in moea64_enter()
1706 oldpvo->pvo_pte.pa == pvo->pvo_pte.pa && in moea64_enter()
1707 oldpvo->pvo_pte.prot == prot) { in moea64_enter()
1713 STAT_MOEA64(moea64_pte_overflow--); in moea64_enter()
1725 KASSERT(oldpvo->pvo_pmap == pmap, ("pmap of old " in moea64_enter()
1745 if (pmap != kernel_pmap && (m->a.flags & PGA_EXECUTABLE) == 0 && in moea64_enter()
1762 (pvo->pvo_vaddr & PVO_MANAGED) != 0 && in moea64_enter()
1766 (m->flags & PG_FICTITIOUS) == 0 && in moea64_enter()
1799 /* Use the scratch page to set up a temp mapping */ in moea64_syncicache()
1832 VM_OBJECT_ASSERT_LOCKED(m_start->object); in moea64_enter_object()
1834 psize = atop(end - start); in moea64_enter_object()
1836 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { in moea64_enter_object()
1839 m->psind == 1 && moea64_ps_enabled(pm)) in moea64_enter_object()
1847 m = &m[HPT_SP_SIZE / PAGE_SIZE - 1]; in moea64_enter_object()
1873 pa = PVO_PADDR(pvo) | (va - PVO_VADDR(pvo)); in moea64_extract()
1893 if (pvo != NULL && (pvo->pvo_pte.prot & prot) == prot) { in moea64_extract_and_hold()
1915 * can lead to multiply locking non-recursive mutexes. in moea64_uma_page_alloc()
1930 pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE; in moea64_uma_page_alloc()
1931 pvo->pvo_pte.pa = VM_PAGE_TO_PHYS(m) | LPTE_M; in moea64_uma_page_alloc()
1937 pvo->pvo_vaddr |= PVO_WIRED; in moea64_uma_page_alloc()
1999 KASSERT((m->oflags & VPO_UNMANAGED) == 0, in moea64_is_referenced()
2009 KASSERT((m->oflags & VPO_UNMANAGED) == 0, in moea64_is_modified()
2039 KASSERT((m->oflags & VPO_UNMANAGED) == 0, in moea64_clear_modify()
2058 KASSERT((m->oflags & VPO_UNMANAGED) == 0, in moea64_remove_write()
2069 pmap = pvo->pvo_pmap; in moea64_remove_write()
2071 if (!(pvo->pvo_vaddr & PVO_DEAD) && in moea64_remove_write()
2072 (pvo->pvo_pte.prot & VM_PROT_WRITE)) { in moea64_remove_write()
2078 pvo->pvo_pte.prot &= ~VM_PROT_WRITE; in moea64_remove_write()
2083 if (pvo->pvo_pmap == kernel_pmap) in moea64_remove_write()
2088 if ((refchg | atomic_readandclear_32(&m->md.mdpg_attrs)) & LPTE_CHG) in moea64_remove_write()
2110 KASSERT((m->oflags & VPO_UNMANAGED) == 0, in moea64_ts_referenced()
2129 if ((m->oflags & VPO_UNMANAGED) != 0) { in moea64_page_set_memattr()
2130 m->md.mdpg_cache_attrs = ma; in moea64_page_set_memattr()
2138 pmap = pvo->pvo_pmap; in moea64_page_set_memattr()
2140 if (!(pvo->pvo_vaddr & PVO_DEAD)) { in moea64_page_set_memattr()
2146 pvo->pvo_pte.pa &= ~LPTE_WIMG; in moea64_page_set_memattr()
2147 pvo->pvo_pte.pa |= lo; in moea64_page_set_memattr()
2150 refchg = (pvo->pvo_pte.prot & VM_PROT_WRITE) ? in moea64_page_set_memattr()
2152 if ((pvo->pvo_vaddr & PVO_MANAGED) && in moea64_page_set_memattr()
2153 (pvo->pvo_pte.prot & VM_PROT_WRITE)) { in moea64_page_set_memattr()
2155 atomic_readandclear_32(&m->md.mdpg_attrs); in moea64_page_set_memattr()
2161 if (pvo->pvo_pmap == kernel_pmap) in moea64_page_set_memattr()
2166 m->md.mdpg_cache_attrs = ma; in moea64_page_set_memattr()
2184 pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; in moea64_kenter_attr()
2185 pvo->pvo_pte.pa = (pa & ~ADDR_POFF) | moea64_calc_wimg(pa, ma); in moea64_kenter_attr()
2186 pvo->pvo_vaddr |= PVO_WIRED; in moea64_kenter_attr()
2225 * Shortcut the direct-mapped case when applicable. We never put in moea64_kextract()
2226 * anything but 1:1 (or 62-bit aliased) mappings below in moea64_kextract()
2236 pa = PVO_PADDR(pvo) | (va - PVO_VADDR(pvo)); in moea64_kextract()
2266 l = ((char *)USER_ADDR + SEGMENT_LENGTH) - (char *)(*kaddr); in moea64_map_user_ptr()
2275 /* Try lockless look-up first */ in moea64_map_user_ptr()
2279 /* If it isn't there, we need to pre-fault the VSID */ in moea64_map_user_ptr()
2284 slbv = slb->slbv; in moea64_map_user_ptr()
2287 /* Mark segment no-execute */ in moea64_map_user_ptr()
2292 /* Mark segment no-execute */ in moea64_map_user_ptr()
2297 if (curthread->td_pcb->pcb_cpu.aim.usr_vsid == slbv) in moea64_map_user_ptr()
2301 curthread->td_pcb->pcb_cpu.aim.usr_segm = in moea64_map_user_ptr()
2303 curthread->td_pcb->pcb_cpu.aim.usr_vsid = slbv; in moea64_map_user_ptr()
2326 user_sr = curthread->td_pcb->pcb_cpu.aim.usr_segm; in moea64_decode_kernel_ptr()
2343 * Architectures which can support a direct-mapped physical to virtual region
2391 KASSERT((m->oflags & VPO_UNMANAGED) == 0, in moea64_page_exists_quick()
2397 if (!(pvo->pvo_vaddr & PVO_DEAD) && pvo->pvo_pmap == pmap) { in moea64_page_exists_quick()
2412 m->md.mdpg_attrs = 0; in moea64_page_init()
2413 m->md.mdpg_cache_attrs = VM_MEMATTR_DEFAULT; in moea64_page_init()
2414 LIST_INIT(&m->md.mdpg_pvoh); in moea64_page_init()
2428 if ((m->oflags & VPO_UNMANAGED) != 0) in moea64_page_wired_mappings()
2432 if ((pvo->pvo_vaddr & (PVO_DEAD | PVO_WIRED)) == PVO_WIRED) in moea64_page_wired_mappings()
2462 hash = moea64_vsidcontext & (NVSIDS - 1); in moea64_get_unique_vsid()
2466 mask = 1 << (hash & (VSID_NBPW - 1)); in moea64_get_unique_vsid()
2474 i = ffs(~moea64_vsid_bitmap[n]) - 1; in moea64_get_unique_vsid()
2482 ("Allocating in-use VSID %#zx\n", hash)); in moea64_get_unique_vsid()
2497 RB_INIT(&pmap->pmap_pvo); in moea64_pinit()
2499 pmap->pm_slb_tree_root = slb_alloc_tree(); in moea64_pinit()
2500 pmap->pm_slb = slb_alloc_user_cache(); in moea64_pinit()
2501 pmap->pm_slb_len = 0; in moea64_pinit()
2512 RB_INIT(&pmap->pmap_pvo); in moea64_pinit()
2515 pmap->pmap_phys = (pmap_t)moea64_kextract((vm_offset_t)pmap); in moea64_pinit()
2517 pmap->pmap_phys = pmap; in moea64_pinit()
2525 pmap->pm_sr[i] = VSID_MAKE(i, hash); in moea64_pinit()
2527 KASSERT(pmap->pm_sr[0] != 0, ("moea64_pinit: pm_sr[0] = 0")); in moea64_pinit()
2542 bzero(&pm->pm_stats, sizeof(pm->pm_stats)); in moea64_pinit0()
2560 oldprot = pvo->pvo_pte.prot; in moea64_pvo_protect()
2561 pvo->pvo_pte.prot = prot; in moea64_pvo_protect()
2572 (pg->a.flags & PGA_EXECUTABLE) == 0 && in moea64_pvo_protect()
2573 (pvo->pvo_pte.pa & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { in moea64_pvo_protect()
2574 if ((pg->oflags & VPO_UNMANAGED) == 0) in moea64_pvo_protect()
2584 if (pg != NULL && (pvo->pvo_vaddr & PVO_MANAGED) && in moea64_pvo_protect()
2586 refchg |= atomic_readandclear_32(&pg->md.mdpg_attrs); in moea64_pvo_protect()
2603 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, in moea64_protect()
2613 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); in moea64_protect()
2615 pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) { in moea64_protect()
2639 while (count-- > 0) { in moea64_qenter()
2653 while (count-- > 0) { in moea64_qremove()
2665 idx = vsid & (NVSIDS-1); in moea64_release_vsid()
2683 slb_free_user_cache(pmap->pm_slb); in moea64_release()
2685 KASSERT(pmap->pm_sr[0] != 0, ("moea64_release: pm_sr[0] = 0")); in moea64_release()
2687 moea64_release_vsid(VSID_TO_HASH(pmap->pm_sr[0])); in moea64_release()
2703 RB_FOREACH_SAFE(pvo, pvo_tree, &pm->pmap_pvo, tpvo) { in moea64_remove_pages()
2704 if (pvo->pvo_vaddr & PVO_WIRED) in moea64_remove_pages()
2734 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); in moea64_remove_locked()
2746 tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); in moea64_remove_locked()
2770 if (pm->pm_stats.resident_count == 0) in moea64_remove()
2802 pmap = pvo->pvo_pmap; in moea64_remove_all()
2804 wasdead = (pvo->pvo_vaddr & PVO_DEAD); in moea64_remove_all()
2820 KASSERT((m->a.flags & PGA_WRITEABLE) == 0, ("Page still writable")); in moea64_remove_all()
2856 phys_avail[i + 1] -= size; in moea64_bootstrap_alloc()
2858 for (j = phys_avail_count * 2; j > i; j -= 2) { in moea64_bootstrap_alloc()
2859 phys_avail[j] = phys_avail[j - 2]; in moea64_bootstrap_alloc()
2860 phys_avail[j + 1] = phys_avail[j - 1]; in moea64_bootstrap_alloc()
2881 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); in moea64_pvo_enter()
2888 old_pvo = RB_INSERT(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo); in moea64_pvo_enter()
2900 if (pvo->pvo_vaddr & PVO_WIRED) in moea64_pvo_enter()
2901 pvo->pvo_pmap->pm_stats.wired_count++; in moea64_pvo_enter()
2902 pvo->pvo_pmap->pm_stats.resident_count++; in moea64_pvo_enter()
2914 if (pvo->pvo_pmap == kernel_pmap) in moea64_pvo_enter()
2924 pvo->pvo_vaddr & PVO_LARGE); in moea64_pvo_enter()
2936 KASSERT(pvo->pvo_pmap != NULL, ("Trying to remove PVO with no pmap")); in moea64_pvo_remove_from_pmap()
2937 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); in moea64_pvo_remove_from_pmap()
2938 KASSERT(!(pvo->pvo_vaddr & PVO_DEAD), ("Trying to remove dead PVO")); in moea64_pvo_remove_from_pmap()
2949 if (pvo->pvo_pte.prot & VM_PROT_WRITE) in moea64_pvo_remove_from_pmap()
2958 pvo->pvo_pmap->pm_stats.resident_count--; in moea64_pvo_remove_from_pmap()
2959 if (pvo->pvo_vaddr & PVO_WIRED) in moea64_pvo_remove_from_pmap()
2960 pvo->pvo_pmap->pm_stats.wired_count--; in moea64_pvo_remove_from_pmap()
2965 RB_REMOVE(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo); in moea64_pvo_remove_from_pmap()
2970 pvo->pvo_vaddr |= PVO_DEAD; in moea64_pvo_remove_from_pmap()
2973 if ((pvo->pvo_vaddr & PVO_MANAGED) && in moea64_pvo_remove_from_pmap()
2974 (pvo->pvo_pte.prot & VM_PROT_WRITE)) { in moea64_pvo_remove_from_pmap()
2977 refchg |= atomic_readandclear_32(&pg->md.mdpg_attrs); in moea64_pvo_remove_from_pmap()
2991 KASSERT(pvo->pvo_vaddr & PVO_DEAD, ("Trying to delink live page")); in moea64_pvo_remove_from_page_locked()
2994 if (pvo->pvo_pmap == NULL) in moea64_pvo_remove_from_page_locked()
2996 pvo->pvo_pmap = NULL; in moea64_pvo_remove_from_page_locked()
3002 if (pvo->pvo_vaddr & PVO_MANAGED) { in moea64_pvo_remove_from_page_locked()
3011 STAT_MOEA64(moea64_pvo_entries--); in moea64_pvo_remove_from_page_locked()
3020 if (pvo->pvo_vaddr & PVO_MANAGED) in moea64_pvo_remove_from_page()
3036 return (RB_FIND(pvo_tree, &pm->pmap_pvo, &key)); in moea64_pvo_find_va()
3052 if ((m->md.mdpg_attrs & ptebit) != 0 || in moea64_query_bit()
3054 (sp->md.mdpg_attrs & (ptebit | MDPG_ATTR_SP)) == in moea64_query_bit()
3071 if (ret != -1) { in moea64_query_bit()
3088 PMAP_LOCK(pvo->pvo_pmap); in moea64_query_bit()
3089 if (!(pvo->pvo_vaddr & PVO_DEAD)) in moea64_query_bit()
3091 PMAP_UNLOCK(pvo->pvo_pmap); in moea64_query_bit()
3094 atomic_set_32(&m->md.mdpg_attrs, in moea64_query_bit()
3127 if ((ret = moea64_sp_clear(pvo, m, ptebit)) != -1) { in moea64_clear_bit()
3134 PMAP_LOCK(pvo->pvo_pmap); in moea64_clear_bit()
3135 if (!(pvo->pvo_vaddr & PVO_DEAD)) in moea64_clear_bit()
3137 PMAP_UNLOCK(pvo->pvo_pmap); in moea64_clear_bit()
3142 atomic_clear_32(&m->md.mdpg_attrs, ptebit); in moea64_clear_bit()
3161 for (pvo = RB_FIND(pvo_tree, &kernel_pmap->pmap_pvo, &key); in moea64_dev_direct_mapped()
3163 pvo = RB_NEXT(pvo_tree, &kernel_pmap->pmap_pvo, pvo)) { in moea64_dev_direct_mapped()
3196 size -= PAGE_SIZE; in moea64_mapdev_attr()
3234 pm = &curthread->td_proc->p_vmspace->vm_pmap; in moea64_sync_icache()
3239 len = MIN(lim - va, sz); in moea64_sync_icache()
3241 if (pvo != NULL && !(pvo->pvo_pte.pa & LPTE_I)) { in moea64_sync_icache()
3246 sz -= len; in moea64_sync_icache()
3283 dump_map[0].pa_size = round_page((uintptr_t)_end) - in moea64_scan_init()
3287 dump_map[1].pa_start = (vm_paddr_t)(uintptr_t)msgbufp->msg_ptr; in moea64_scan_init()
3288 dump_map[1].pa_size = round_page(msgbufp->msg_size); in moea64_scan_init()
3300 if (pvo != NULL && !(pvo->pvo_vaddr & PVO_DEAD)) in moea64_scan_init()
3313 if (pvo == NULL || (pvo->pvo_vaddr & PVO_DEAD)) in moea64_scan_init()
3317 dump_map[2].pa_size = va - dump_map[2].pa_start; in moea64_scan_init()
3342 RB_FOREACH(pvo, pvo_tree, &kernel_pmap->pmap_pvo) { in moea64_scan_pmap()
3343 va = pvo->pvo_vaddr; in moea64_scan_pmap()
3411 for (; npages > 0; --npages) { in moea64_map_range()
3421 npages -= (moea64_large_page_size >> PAGE_SHIFT) - 1; in moea64_map_range()
3442 /* Short-circuit single-domain systems. */ in moea64_page_array_startup()
3462 size = btoc(vm_phys_segs[i].end - vm_phys_segs[i].start); in moea64_page_array_startup()
3470 size = btoc(phys_avail[i + 1] - phys_avail[i]); in moea64_page_array_startup()
3483 size = ulmin(pages - vm_page_array_size, dom_pages[i]); in moea64_page_array_startup()
3492 vm_page_base += size - needed; in moea64_page_array_startup()
3523 f = moea64_ops->func; \
3531 if (hw_direct_map == -1) { in moea64_install()
3543 * Default to non-DMAP, and switch over to DMAP functions once we know in moea64_install()
3588 if (object != NULL && (object->flags & OBJ_COLORED) != 0) in moea64_align_superpage()
3589 offset += ptoa(object->pg_color); in moea64_align_superpage()
3591 if (size - ((HPT_SP_SIZE - sp_offset) & HPT_SP_MASK) < HPT_SP_SIZE || in moea64_align_superpage()
3611 if (pvo->pvo_vaddr & PVO_DEAD) in moea64_pvo_cleanup()
3623 if ((pvo->pvo_pte.prot & VM_PROT_WRITE) != 0) in pvo_to_vmpage_flags()
3625 if ((pvo->pvo_pte.prot & VM_PROT_EXECUTE) != 0) in pvo_to_vmpage_flags()
3632 * Check if the given pvo and its superpage are in sva-eva range.
3665 if ((sp->pvo_vaddr & PVO_MANAGED) != 0 && (prot & VM_PROT_WRITE) != 0) { in moea64_sp_refchg_process()
3668 atomic_readandclear_32(&m->md.mdpg_attrs); in moea64_sp_refchg_process()
3696 KASSERT(m->psind == 1, ("%s: invalid m->psind: %d", in moea64_sp_enter()
3697 __func__, m->psind)); in moea64_sp_enter()
3723 for (i = i - 1; i >= 0; i--) in moea64_sp_enter()
3741 pvo->pvo_pte.prot = prot; in moea64_sp_enter()
3742 pvo->pvo_pte.pa = (pa & ~HPT_SP_MASK) | LPTE_LP_4K_16M | in moea64_sp_enter()
3746 pvo->pvo_vaddr |= PVO_WIRED; in moea64_sp_enter()
3747 pvo->pvo_vaddr |= PVO_LARGE; in moea64_sp_enter()
3749 if ((m->oflags & VPO_UNMANAGED) != 0) in moea64_sp_enter()
3752 pvo_head = &m->md.mdpg_pvoh; in moea64_sp_enter()
3753 pvo->pvo_vaddr |= PVO_MANAGED; in moea64_sp_enter()
3771 sync = (sm->a.flags & PGA_EXECUTABLE) == 0; in moea64_sp_enter()
3786 if (sync && (pvo->pvo_pte.pa & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) in moea64_sp_enter()
3824 * - When a superpage is first entered read-only and later becomes in moea64_sp_promote()
3825 * read-write. in moea64_sp_promote()
3826 * - When some of the superpage's virtual addresses map to previously in moea64_sp_promote()
3835 if (pvo == NULL || (pvo->pvo_vaddr & PVO_DEAD) != 0) { in moea64_sp_promote()
3849 if ((first->pvo_vaddr & PVO_FLAGS_PROMOTE) != in moea64_sp_promote()
3850 (pvo->pvo_vaddr & PVO_FLAGS_PROMOTE)) { in moea64_sp_promote()
3854 (uintmax_t)(pvo->pvo_vaddr & PVO_FLAGS_PROMOTE), in moea64_sp_promote()
3855 (uintmax_t)(first->pvo_vaddr & PVO_FLAGS_PROMOTE)); in moea64_sp_promote()
3859 if (first->pvo_pte.prot != pvo->pvo_pte.prot) { in moea64_sp_promote()
3863 pvo->pvo_pte.prot, first->pvo_pte.prot); in moea64_sp_promote()
3867 if ((first->pvo_pte.pa & LPTE_WIMG) != in moea64_sp_promote()
3868 (pvo->pvo_pte.pa & LPTE_WIMG)) { in moea64_sp_promote()
3872 (uintmax_t)(pvo->pvo_pte.pa & LPTE_WIMG), in moea64_sp_promote()
3873 (uintmax_t)(first->pvo_pte.pa & LPTE_WIMG)); in moea64_sp_promote()
3878 pvo = RB_NEXT(pvo_tree, &pmap->pmap_pvo, pvo); in moea64_sp_promote()
3881 /* All OK, promote. */ in moea64_sp_promote()
3888 * 1- If a page is being promoted, it was referenced. in moea64_sp_promote()
3889 * 2- If promoted pages are writable, they were modified. in moea64_sp_promote()
3892 ((first->pvo_pte.prot & VM_PROT_WRITE) != 0 ? LPTE_CHG : 0); in moea64_sp_promote()
3898 pvo = RB_NEXT(pvo_tree, &pmap->pmap_pvo, pvo)) { in moea64_sp_promote()
3899 pvo->pvo_pte.pa &= ADDR_POFF | ~HPT_SP_MASK; in moea64_sp_promote()
3900 pvo->pvo_pte.pa |= LPTE_LP_4K_16M; in moea64_sp_promote()
3901 pvo->pvo_vaddr |= PVO_LARGE; in moea64_sp_promote()
3906 moea64_sp_refchg_process(first, m, sp_refchg, first->pvo_pte.prot); in moea64_sp_promote()
3909 atomic_set_32(&m->md.mdpg_attrs, sp_refchg | MDPG_ATTR_SP); in moea64_sp_promote()
3936 pmap = sp->pvo_pmap; in moea64_sp_demote_aligned()
3949 pvo = RB_NEXT(pvo_tree, &pmap->pmap_pvo, pvo), in moea64_sp_demote_aligned()
3954 pvo->pvo_vaddr &= ~PVO_LARGE; in moea64_sp_demote_aligned()
3955 pvo->pvo_pte.pa &= ~LPTE_RPGN; in moea64_sp_demote_aligned()
3956 pvo->pvo_pte.pa |= pa; in moea64_sp_demote_aligned()
3967 * in cache, it should be ok to clear it here. in moea64_sp_demote_aligned()
3969 atomic_clear_32(&m->md.mdpg_attrs, MDPG_ATTR_SP); in moea64_sp_demote_aligned()
3975 moea64_sp_refchg_process(sp, m, refchg, sp->pvo_pte.prot); in moea64_sp_demote_aligned()
3985 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); in moea64_sp_demote()
3988 pvo = moea64_pvo_find_va(pvo->pvo_pmap, in moea64_sp_demote()
4006 pm = sp->pvo_pmap; in moea64_sp_unwire()
4012 prev = pvo, pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) { in moea64_sp_unwire()
4013 if ((pvo->pvo_vaddr & PVO_WIRED) == 0) in moea64_sp_unwire()
4016 pvo->pvo_vaddr &= ~PVO_WIRED; in moea64_sp_unwire()
4024 pm->pm_stats.wired_count--; in moea64_sp_unwire()
4029 refchg, sp->pvo_pte.prot); in moea64_sp_unwire()
4047 pm = sp->pvo_pmap; in moea64_sp_protect()
4050 oldprot = sp->pvo_pte.prot; in moea64_sp_protect()
4058 prev = pvo, pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) { in moea64_sp_protect()
4059 pvo->pvo_pte.prot = prot; in moea64_sp_protect()
4074 if ((m->a.flags & PGA_EXECUTABLE) == 0 && in moea64_sp_protect()
4075 (sp->pvo_pte.pa & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { in moea64_sp_protect()
4076 if ((m->oflags & VPO_UNMANAGED) == 0) in moea64_sp_protect()
4095 pm = sp->pvo_pmap; in moea64_sp_remove()
4100 tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); in moea64_sp_remove()
4115 * ok to always clear the SP bit on remove/demote. in moea64_sp_remove()
4117 atomic_clear_32(&PHYS_TO_VM_PAGE(PVO_PADDR(sp))->md.mdpg_attrs, in moea64_sp_remove()
4132 pmap = pvo->pvo_pmap; in moea64_sp_query_locked()
4146 pvo = RB_NEXT(pvo_tree, &pmap->pmap_pvo, pvo)) { in moea64_sp_query_locked()
4158 atomic_set_32(&m->md.mdpg_attrs, refchg | MDPG_ATTR_SP); in moea64_sp_query_locked()
4170 pmap = pvo->pvo_pmap; in moea64_sp_query()
4176 if (!PVO_IS_SP(pvo) || (pvo->pvo_vaddr & PVO_DEAD) != 0) { in moea64_sp_query()
4180 return (-1); in moea64_sp_query()
4202 pmap = pvo->pvo_pmap; in moea64_sp_pvo_clear()
4208 if (!PVO_IS_SP(pvo) || (pvo->pvo_vaddr & PVO_DEAD) != 0) { in moea64_sp_pvo_clear()
4212 return (-1); in moea64_sp_pvo_clear()
4226 pvo = RB_NEXT(pvo_tree, &pmap->pmap_pvo, pvo)) { in moea64_sp_pvo_clear()
4233 atomic_clear_32(&m->md.mdpg_attrs, ptebit); in moea64_sp_pvo_clear()
4250 pmap = pvo->pvo_pmap; in moea64_sp_clear()
4268 (HPT_SP_PAGES - 1)) == 0 && (pvo->pvo_vaddr & PVO_WIRED) == 0) { in moea64_sp_clear()
4269 if ((ret = moea64_sp_pvo_clear(pvo, ptebit)) == -1) in moea64_sp_clear()
4270 return (-1); in moea64_sp_clear()
4298 if (!PVO_IS_SP(pvo) || (pvo->pvo_vaddr & PVO_DEAD) != 0) { in moea64_sp_clear()
4302 return (-1); in moea64_sp_clear()
4320 if ((pvo->pvo_vaddr & PVO_WIRED) == 0) in moea64_sp_clear()
4322 pvo->pvo_pte.prot & ~VM_PROT_WRITE); in moea64_sp_clear()