Lines Matching +full:chg +full:- +full:int
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause AND BSD-4-Clause
8 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
31 /*-
63 /*-
162 #define PVO_PADDR(pvo) ((pvo)->pvo_pte.pte.pte_lo & PTE_RPGN)
180 static int regions_sz, pregions_sz;
213 static int moea_bpvo_pool_index = 0;
253 static int moea_pte_insert(u_int, struct pte *);
258 static int moea_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *,
259 vm_offset_t, vm_paddr_t, u_int, int);
260 static void moea_pvo_remove(struct pvo_entry *, int);
261 static struct pvo_entry *moea_pvo_find_va(pmap_t, vm_offset_t, int *);
262 static struct pte *moea_pvo_to_pte(const struct pvo_entry *, int);
267 static int moea_enter_locked(pmap_t, vm_offset_t, vm_page_t,
270 static bool moea_query_bit(vm_page_t, int);
271 static u_int moea_clear_bit(vm_page_t, int);
273 int moea_pte_spill(vm_offset_t);
281 vm_page_t *mb, vm_offset_t b_offset, int xfersize);
282 int moea_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int,
293 int moea_ts_referenced(vm_page_t);
294 vm_offset_t moea_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
295 static int moea_mincore(pmap_t, vm_offset_t, vm_paddr_t *);
298 int moea_page_wired_mappings(vm_page_t);
299 int moea_pinit(pmap_t);
302 void moea_qenter(vm_offset_t, vm_page_t *, int);
303 void moea_qremove(vm_offset_t, int);
310 void moea_zero_page_area(vm_page_t, int, int);
313 void moea_cpu_bootstrap(int);
322 int moea_dev_direct_mapped(vm_paddr_t, vm_size_t);
330 static int moea_map_user_ptr(pmap_t pm,
332 static int moea_decode_kernel_ptr(vm_offset_t addr,
333 int *is_user, vm_offset_t *decoded_addr);
397 int i; in moea_calc_wimg()
480 static __inline int
500 return (&m->md.mdpg_pvoh); in vm_page_to_pvoh()
504 moea_attr_clear(vm_page_t m, int ptebit) in moea_attr_clear()
508 m->md.mdpg_attrs &= ~ptebit; in moea_attr_clear()
511 static __inline int
515 return (m->md.mdpg_attrs); in moea_attr_fetch()
519 moea_attr_save(vm_page_t m, int ptebit) in moea_attr_save()
523 m->md.mdpg_attrs |= ptebit; in moea_attr_save()
526 static __inline int
529 if (pt->pte_hi == pvo_pt->pte_hi) in moea_pte_compare()
535 static __inline int
536 moea_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which) in moea_pte_match()
538 return (pt->pte_hi & ~PTE_VALID) == in moea_pte_match()
555 pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | in moea_pte_create()
557 pt->pte_lo = pte_lo; in moea_pte_create()
565 pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG); in moea_pte_synch()
569 moea_pte_clear(struct pte *pt, vm_offset_t va, int ptebit) in moea_pte_clear()
577 pt->pte_lo &= ~ptebit; in moea_pte_clear()
586 pvo_pt->pte_hi |= PTE_VALID; in moea_pte_set()
590 * Note that the REF/CHG bits are from pvo_pt and thus should have in moea_pte_set()
593 pt->pte_lo = pvo_pt->pte_lo; in moea_pte_set()
595 pt->pte_hi = pvo_pt->pte_hi; in moea_pte_set()
605 pvo_pt->pte_hi &= ~PTE_VALID; in moea_pte_unset()
608 * Force the reg & chg bits back into the PTEs. in moea_pte_unset()
615 pt->pte_hi &= ~PTE_VALID; in moea_pte_unset()
620 * Save the reg & chg bits. in moea_pte_unset()
623 moea_pte_valid--; in moea_pte_unset()
640 static int om_cmp(const void *a, const void *b);
642 static int
650 if (mapa->om_pa < mapb->om_pa) in om_cmp()
651 return (-1); in om_cmp()
652 else if (mapa->om_pa > mapb->om_pa) in om_cmp()
659 moea_cpu_bootstrap(int ap) in moea_cpu_bootstrap()
662 int i; in moea_cpu_bootstrap()
686 mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]); in moea_cpu_bootstrap()
701 int sz; in moea_bootstrap()
702 int i, j; in moea_bootstrap()
739 CTR3(KTR_PMAP, "physregion: %#x - %#x (%#x)", in moea_bootstrap()
745 * of physmem to be convered by on-demand BAT entries. in moea_bootstrap()
769 CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, in moea_bootstrap()
777 hwphyssz - physsz; in moea_bootstrap()
842 moea_pteg_mask = moea_pteg_count - 1; in moea_bootstrap()
874 moea_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW] in moea_bootstrap()
883 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i; in moea_bootstrap()
884 CPU_FILL(&kernel_pmap->pm_active); in moea_bootstrap()
885 RB_INIT(&kernel_pmap->pmap_pvo); in moea_bootstrap()
896 if (chosen != -1 && OF_getprop(chosen, "mmu", &mmui, 4) != -1 && in moea_bootstrap()
897 (mmu = OF_instance_to_package(mmui)) != -1 && in moea_bootstrap()
898 (sz = OF_getproplen(mmu, "translations")) != -1) { in moea_bootstrap()
909 if (OF_getprop(mmu, "translations", translations, sz) == -1) in moea_bootstrap()
921 * on-demand BAT tables take care of the translation. in moea_bootstrap()
924 * which is mixed-protection and therefore not in moea_bootstrap()
1014 pm = &td->td_proc->p_vmspace->vm_pmap; in moea_activate()
1015 pmr = pm->pmap_phys; in moea_activate()
1017 CPU_SET(PCPU_GET(cpuid), &pm->pm_active); in moea_activate()
1020 mtsrin(USER_SR << ADDR_SR_SHFT, td->td_pcb->pcb_cpu.aim.usr_vsid); in moea_activate()
1028 pm = &td->td_proc->p_vmspace->vm_pmap; in moea_deactivate()
1029 CPU_CLR(PCPU_GET(cpuid), &pm->pm_active); in moea_deactivate()
1040 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); in moea_unwire()
1042 pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) { in moea_unwire()
1043 if ((pvo->pvo_vaddr & PVO_WIRED) == 0) in moea_unwire()
1045 pvo->pvo_vaddr &= ~PVO_WIRED; in moea_unwire()
1046 pm->pm_stats.wired_count--; in moea_unwire()
1065 vm_page_t *mb, vm_offset_t b_offset, int xfersize) in moea_copy_pages()
1069 int cnt; in moea_copy_pages()
1073 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); in moea_copy_pages()
1077 cnt = min(cnt, PAGE_SIZE - b_pg_offset); in moea_copy_pages()
1083 xfersize -= cnt; in moea_copy_pages()
1100 moea_zero_page_area(vm_page_t m, int off, int size) in moea_zero_page_area()
1123 return (!LIST_EMPTY(&(m)->md.mdpg_pvoh)); in moea_page_is_mapped()
1137 int
1141 int error; in moea_enter()
1153 VM_OBJECT_ASSERT_UNLOCKED(m->object); in moea_enter()
1165 static int
1172 int error; in moea_enter_locked()
1177 if ((m->oflags & VPO_UNMANAGED) == 0) { in moea_enter_locked()
1181 VM_OBJECT_ASSERT_LOCKED(m->object); in moea_enter_locked()
1184 if ((m->oflags & VPO_UNMANAGED) != 0 || !moea_initialized) { in moea_enter_locked()
1199 (m->oflags & VPO_UNMANAGED) == 0) in moea_enter_locked()
1243 VM_OBJECT_ASSERT_LOCKED(m_start->object); in moea_enter_object()
1245 vm_page_iter_limit_init(&pages, m_start->object, in moea_enter_object()
1246 m_start->pindex + atop(end - start)); in moea_enter_object()
1247 m = vm_radix_iter_lookup(&pages, m_start->pindex); in moea_enter_object()
1251 va = start + ptoa(m->pindex - m_start->pindex); in moea_enter_object()
1304 if (pvo != NULL && (pvo->pvo_pte.pte.pte_hi & PTE_VALID) && in moea_extract_and_hold()
1305 ((pvo->pvo_pte.pte.pte_lo & PTE_PP) == PTE_RW || in moea_extract_and_hold()
1333 KASSERT((m->oflags & VPO_UNMANAGED) == 0, in moea_is_referenced()
1346 KASSERT((m->oflags & VPO_UNMANAGED) == 0, in moea_is_modified()
1369 rv = pvo == NULL || (pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0; in moea_is_prefaultable()
1378 KASSERT((m->oflags & VPO_UNMANAGED) == 0, in moea_clear_modify()
1400 KASSERT((m->oflags & VPO_UNMANAGED) == 0, in moea_remove_write()
1410 pmap = pvo->pvo_pmap; in moea_remove_write()
1412 if ((pvo->pvo_pte.pte.pte_lo & PTE_PP) != PTE_BR) { in moea_remove_write()
1413 pt = moea_pvo_to_pte(pvo, -1); in moea_remove_write()
1414 pvo->pvo_pte.pte.pte_lo &= ~PTE_PP; in moea_remove_write()
1415 pvo->pvo_pte.pte.pte_lo |= PTE_BR; in moea_remove_write()
1417 moea_pte_synch(pt, &pvo->pvo_pte.pte); in moea_remove_write()
1418 lo |= pvo->pvo_pte.pte.pte_lo; in moea_remove_write()
1419 pvo->pvo_pte.pte.pte_lo &= ~PTE_CHG; in moea_remove_write()
1420 moea_pte_change(pt, &pvo->pvo_pte.pte, in moea_remove_write()
1421 pvo->pvo_vaddr); in moea_remove_write()
1447 int
1450 int count; in moea_ts_referenced()
1452 KASSERT((m->oflags & VPO_UNMANAGED) == 0, in moea_ts_referenced()
1472 if (m->md.mdpg_cache_attrs == ma) in moea_page_set_memattr()
1475 if ((m->oflags & VPO_UNMANAGED) != 0) { in moea_page_set_memattr()
1476 m->md.mdpg_cache_attrs = ma; in moea_page_set_memattr()
1485 pmap = pvo->pvo_pmap; in moea_page_set_memattr()
1487 pt = moea_pvo_to_pte(pvo, -1); in moea_page_set_memattr()
1488 pvo->pvo_pte.pte.pte_lo &= ~PTE_WIMG; in moea_page_set_memattr()
1489 pvo->pvo_pte.pte.pte_lo |= lo; in moea_page_set_memattr()
1491 moea_pte_change(pt, &pvo->pvo_pte.pte, in moea_page_set_memattr()
1492 pvo->pvo_vaddr); in moea_page_set_memattr()
1493 if (pvo->pvo_pmap == kernel_pmap) in moea_page_set_memattr()
1499 m->md.mdpg_cache_attrs = ma; in moea_page_set_memattr()
1517 int error; in moea_kenter_attr()
1521 panic("moea_kenter: attempt to enter non-kernel address %#x", in moea_kenter_attr()
1549 * Allow direct mappings on 32-bit OEA in moea_kextract()
1578 int
1586 l = ((char *)USER_ADDR + SEGMENT_LENGTH) - (char *)(*kaddr); in moea_map_user_ptr()
1596 /* Mark segment no-execute */ in moea_map_user_ptr()
1600 if (curthread->td_pcb->pcb_cpu.aim.usr_vsid == vsid) in moea_map_user_ptr()
1604 curthread->td_pcb->pcb_cpu.aim.usr_segm = in moea_map_user_ptr()
1606 curthread->td_pcb->pcb_cpu.aim.usr_vsid = vsid; in moea_map_user_ptr()
1617 static int
1618 moea_decode_kernel_ptr(vm_offset_t addr, int *is_user, in moea_decode_kernel_ptr()
1624 user_sr = curthread->td_pcb->pcb_cpu.aim.usr_segm; in moea_decode_kernel_ptr()
1641 * Architectures which can support a direct-mapped physical to virtual region
1648 vm_paddr_t pa_end, int prot) in moea_map()
1670 int loops; in moea_page_exists_quick()
1674 KASSERT((m->oflags & VPO_UNMANAGED) == 0, in moea_page_exists_quick()
1680 if (pvo->pvo_pmap == pmap) { in moea_page_exists_quick()
1695 m->md.mdpg_attrs = 0; in moea_page_init()
1696 m->md.mdpg_cache_attrs = VM_MEMATTR_DEFAULT; in moea_page_init()
1697 LIST_INIT(&m->md.mdpg_pvoh); in moea_page_init()
1704 int
1708 int count; in moea_page_wired_mappings()
1711 if ((m->oflags & VPO_UNMANAGED) != 0) in moea_page_wired_mappings()
1715 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) in moea_page_wired_mappings()
1723 int
1726 int i, mask; in moea_pinit()
1729 RB_INIT(&pmap->pmap_pvo); in moea_pinit()
1734 if ((pmap->pmap_phys = (pmap_t)moea_kextract((vm_offset_t)pmap)) in moea_pinit()
1736 pmap->pmap_phys = pmap; in moea_pinit()
1754 hash = moea_vsidcontext & (NPMAPS - 1); in moea_pinit()
1758 mask = 1 << (hash & (VSID_NBPW - 1)); in moea_pinit()
1766 i = ffs(~moea_vsid_bitmap[n]) - 1; in moea_pinit()
1772 ("Allocating in-use VSID group %#x\n", hash)); in moea_pinit()
1775 pmap->pm_sr[i] = VSID_MAKE(i, hash); in moea_pinit()
1793 bzero(&pm->pm_stats, sizeof(pm->pm_stats)); in moea_pinit0()
1806 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, in moea_protect()
1817 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); in moea_protect()
1819 tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); in moea_protect()
1825 pt = moea_pvo_to_pte(pvo, -1); in moea_protect()
1829 pvo->pvo_pte.pte.pte_lo &= ~PTE_PP; in moea_protect()
1830 pvo->pvo_pte.pte.pte_lo |= PTE_BR; in moea_protect()
1836 moea_pte_change(pt, &pvo->pvo_pte.pte, pvo->pvo_vaddr); in moea_protect()
1850 moea_qenter(vm_offset_t sva, vm_page_t *m, int count) in moea_qenter()
1855 while (count-- > 0) { in moea_qenter()
1867 moea_qremove(vm_offset_t sva, int count) in moea_qremove()
1872 while (count-- > 0) { in moea_qremove()
1881 int idx, mask; in moea_release()
1886 if (pmap->pm_sr[0] == 0) in moea_release()
1890 idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1); in moea_release()
1908 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); in moea_remove()
1910 tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); in moea_remove()
1911 moea_pvo_remove(pvo, -1); in moea_remove()
1933 pmap = pvo->pvo_pmap; in moea_remove_all()
1935 moea_pvo_remove(pvo, -1); in moea_remove_all()
1938 if ((m->a.flags & PGA_WRITEABLE) && moea_query_bit(m, PTE_CHG)) { in moea_remove_all()
1946 static int
1952 int val; in moea_mincore()
1961 managed = (pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED; in moea_mincore()
1999 int i, j; in moea_bootstrap_alloc()
2015 phys_avail[i + 1] -= size; in moea_bootstrap_alloc()
2017 for (j = phys_avail_count * 2; j > i; j -= 2) { in moea_bootstrap_alloc()
2018 phys_avail[j] = phys_avail[j - 2]; in moea_bootstrap_alloc()
2019 phys_avail[j + 1] = phys_avail[j - 1]; in moea_bootstrap_alloc()
2039 static int
2041 vm_offset_t va, vm_paddr_t pa, u_int pte_lo, int flags) in moea_pvo_enter()
2045 int first; in moea_pvo_enter()
2047 int i; in moea_pvo_enter()
2048 int bootstrap; in moea_pvo_enter()
2058 sr = va_to_sr(pm->pm_sr, va); in moea_pvo_enter()
2067 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { in moea_pvo_enter()
2069 (pvo->pvo_pte.pte.pte_lo & PTE_PP) == in moea_pvo_enter()
2078 (pvo->pvo_vaddr & PVO_WIRED) == 0) { in moea_pvo_enter()
2079 pvo->pvo_vaddr |= PVO_WIRED; in moea_pvo_enter()
2080 pm->pm_stats.wired_count++; in moea_pvo_enter()
2082 (pvo->pvo_vaddr & PVO_WIRED) != 0) { in moea_pvo_enter()
2083 pvo->pvo_vaddr &= ~PVO_WIRED; in moea_pvo_enter()
2084 pm->pm_stats.wired_count--; in moea_pvo_enter()
2088 moea_pvo_remove(pvo, -1); in moea_pvo_enter()
2115 pvo->pvo_vaddr = va; in moea_pvo_enter()
2116 pvo->pvo_pmap = pm; in moea_pvo_enter()
2118 pvo->pvo_vaddr &= ~ADDR_POFF; in moea_pvo_enter()
2120 pvo->pvo_vaddr |= PVO_WIRED; in moea_pvo_enter()
2122 pvo->pvo_vaddr |= PVO_MANAGED; in moea_pvo_enter()
2124 pvo->pvo_vaddr |= PVO_BOOTSTRAP; in moea_pvo_enter()
2126 moea_pte_create(&pvo->pvo_pte.pte, sr, va, pa | pte_lo); in moea_pvo_enter()
2131 RB_INSERT(pvo_tree, &pm->pmap_pvo, pvo); in moea_pvo_enter()
2141 if (pvo->pvo_vaddr & PVO_WIRED) in moea_pvo_enter()
2142 pm->pm_stats.wired_count++; in moea_pvo_enter()
2143 pm->pm_stats.resident_count++; in moea_pvo_enter()
2145 i = moea_pte_insert(ptegidx, &pvo->pvo_pte.pte); in moea_pvo_enter()
2159 moea_pvo_remove(struct pvo_entry *pvo, int pteidx) in moea_pvo_remove()
2169 moea_pte_unset(pt, &pvo->pvo_pte.pte, pvo->pvo_vaddr); in moea_pvo_remove()
2173 moea_pte_overflow--; in moea_pvo_remove()
2179 pvo->pvo_pmap->pm_stats.resident_count--; in moea_pvo_remove()
2180 if (pvo->pvo_vaddr & PVO_WIRED) in moea_pvo_remove()
2181 pvo->pvo_pmap->pm_stats.wired_count--; in moea_pvo_remove()
2187 RB_REMOVE(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo); in moea_pvo_remove()
2190 * Save the REF/CHG bits into their cache if the page is managed. in moea_pvo_remove()
2193 if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED) { in moea_pvo_remove()
2198 moea_attr_save(pg, pvo->pvo_pte.pte.pte_lo & in moea_pvo_remove()
2200 if (LIST_EMPTY(&pg->md.mdpg_pvoh)) in moea_pvo_remove()
2210 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) in moea_pvo_remove()
2211 uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? moea_mpvo_zone : in moea_pvo_remove()
2213 moea_pvo_entries--; in moea_pvo_remove()
2217 static __inline int
2218 moea_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx) in moea_pvo_pte_index()
2220 int pteidx; in moea_pvo_pte_index()
2228 if (pvo->pvo_pte.pte.pte_hi & PTE_HID) in moea_pvo_pte_index()
2235 moea_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p) in moea_pvo_find_va()
2238 int ptegidx; in moea_pvo_find_va()
2242 sr = va_to_sr(pm->pm_sr, va); in moea_pvo_find_va()
2247 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { in moea_pvo_find_va()
2259 moea_pvo_to_pte(const struct pvo_entry *pvo, int pteidx) in moea_pvo_to_pte()
2266 if (pteidx == -1) { in moea_pvo_to_pte()
2267 int ptegidx; in moea_pvo_to_pte()
2270 sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr); in moea_pvo_to_pte()
2271 ptegidx = va_to_pteg(sr, pvo->pvo_vaddr); in moea_pvo_to_pte()
2278 if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) { in moea_pvo_to_pte()
2283 if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) { in moea_pvo_to_pte()
2288 if ((pt->pte_hi ^ (pvo->pvo_pte.pte.pte_hi & ~PTE_VALID)) == PTE_VALID) { in moea_pvo_to_pte()
2289 if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0) { in moea_pvo_to_pte()
2294 if (((pt->pte_lo ^ pvo->pvo_pte.pte.pte_lo) & ~(PTE_CHG|PTE_REF)) in moea_pvo_to_pte()
2304 if (pvo->pvo_pte.pte.pte_hi & PTE_VALID) { in moea_pvo_to_pte()
2306 "moea_pteg_table but valid in pvo: %8x, %8x", pvo, pt, pvo->pvo_pte.pte.pte_hi, pt->pte_hi); in moea_pvo_to_pte()
2316 int
2321 int ptegidx, i, j; in moea_pte_spill()
2339 pt = &pteg->pt[i]; in moea_pte_spill()
2348 moea_pte_match(&pvo->pvo_pte.pte, sr, addr, in moea_pte_spill()
2349 pvo->pvo_pte.pte.pte_hi & PTE_HID)) { in moea_pte_spill()
2354 j = moea_pte_insert(ptegidx, &pvo->pvo_pte.pte); in moea_pte_spill()
2358 moea_pte_overflow--; in moea_pte_spill()
2373 if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL && in moea_pte_spill()
2374 moea_pte_compare(pt, &pvo->pvo_pte.pte)) { in moea_pte_spill()
2387 if ((pt->pte_hi & PTE_HID) == 0) in moea_pte_spill()
2388 panic("moea_pte_spill: victim p-pte (%p) has no pvo" in moea_pte_spill()
2401 if (moea_pte_compare(pt, &pvo->pvo_pte.pte)) { in moea_pte_spill()
2408 panic("moea_pte_spill: victim s-pte (%p) has no pvo" in moea_pte_spill()
2414 * though it's valid. If we don't, we lose any ref/chg bit changes in moea_pte_spill()
2417 source_pvo->pvo_pte.pte.pte_hi &= ~PTE_HID; in moea_pte_spill()
2419 moea_pte_unset(pt, &victim_pvo->pvo_pte.pte, victim_pvo->pvo_vaddr); in moea_pte_spill()
2420 moea_pte_set(pt, &source_pvo->pvo_pte.pte); in moea_pte_spill()
2437 if (pvo_walk->pvo_vaddr & PVO_WIRED) in moea_pte_spillable_ident()
2440 if (!(pvo_walk->pvo_pte.pte.pte_hi & PTE_VALID)) in moea_pte_spillable_ident()
2443 pt = moea_pvo_to_pte(pvo_walk, -1); in moea_pte_spillable_ident()
2451 if (!(pt->pte_lo & PTE_REF)) in moea_pte_spillable_ident()
2458 static int
2463 int i; in moea_pte_insert()
2464 int victim_idx; in moea_pte_insert()
2473 if ((pt->pte_hi & PTE_VALID) == 0) { in moea_pte_insert()
2474 pvo_pt->pte_hi &= ~PTE_HID; in moea_pte_insert()
2486 if ((pt->pte_hi & PTE_VALID) == 0) { in moea_pte_insert()
2487 pvo_pt->pte_hi |= PTE_HID; in moea_pte_insert()
2504 return (-1); in moea_pte_insert()
2510 pvo_pt->pte_hi &= ~PTE_HID; in moea_pte_insert()
2512 pvo_pt->pte_hi |= PTE_HID; in moea_pte_insert()
2521 if (pt->pte_hi != victim_pvo->pvo_pte.pte.pte_hi) in moea_pte_insert()
2522 …panic("Victim PVO doesn't match PTE! PVO: %8x, PTE: %8x", victim_pvo->pvo_pte.pte.pte_hi, pt->pte_… in moea_pte_insert()
2527 moea_pte_unset(pt, &victim_pvo->pvo_pte.pte, victim_pvo->pvo_vaddr); in moea_pte_insert()
2536 moea_query_bit(vm_page_t m, int ptebit) in moea_query_bit()
2550 if (pvo->pvo_pte.pte.pte_lo & ptebit) { in moea_query_bit()
2558 * themselves. Sync so that any pending REF/CHG bits are flushed to in moea_query_bit()
2565 * REF/CHG bits from the valid PTE. If the appropriate in moea_query_bit()
2568 pt = moea_pvo_to_pte(pvo, -1); in moea_query_bit()
2570 moea_pte_synch(pt, &pvo->pvo_pte.pte); in moea_query_bit()
2572 if (pvo->pvo_pte.pte.pte_lo & ptebit) { in moea_query_bit()
2583 moea_clear_bit(vm_page_t m, int ptebit) in moea_clear_bit()
2597 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so in moea_clear_bit()
2601 * REF/CHG bits. in moea_clear_bit()
2611 pt = moea_pvo_to_pte(pvo, -1); in moea_clear_bit()
2613 moea_pte_synch(pt, &pvo->pvo_pte.pte); in moea_clear_bit()
2614 if (pvo->pvo_pte.pte.pte_lo & ptebit) { in moea_clear_bit()
2620 pvo->pvo_pte.pte.pte_lo &= ~ptebit; in moea_clear_bit()
2629 static int
2630 moea_bat_mapped(int idx, vm_paddr_t pa, vm_size_t size) in moea_bat_mapped()
2644 * The BAT entry must be cache-inhibited, guarded, and r/w in moea_bat_mapped()
2666 int
2669 int i; in moea_dev_direct_mapped()
2700 int i; in moea_mapdev_attr()
2723 size -= PAGE_SIZE; in moea_mapdev_attr()
2761 len = MIN(lim - va, sz); in moea_sync_icache()
2768 sz -= len; in moea_sync_icache()
2787 int i; in moea_scan_init()
2806 round_page((uintptr_t)_end) - dump_map[0].pa_start; in moea_scan_init()
2809 dump_map[1].pa_start = (vm_paddr_t)msgbufp->msg_ptr; in moea_scan_init()
2810 dump_map[1].pa_size = round_page(msgbufp->msg_size); in moea_scan_init()
2822 if (pvo != NULL && (pvo->pvo_pte.pte.pte_hi & PTE_VALID)) in moea_scan_init()
2837 !(pvo->pvo_pte.pte.pte_hi & PTE_VALID)) in moea_scan_init()
2841 dump_map[2].pa_size = va - dump_map[2].pa_start; in moea_scan_init()