Lines Matching +full:chg +full:- +full:done

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause AND BSD-4-Clause
8 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
31 /*-
63 /*-
95 * mappings must be done as requested.
161 #define PVO_PADDR(pvo) ((pvo)->pvo_pte.pte.pte_lo & PTE_RPGN)
499 return (&m->md.mdpg_pvoh); in vm_page_to_pvoh()
507 m->md.mdpg_attrs &= ~ptebit; in moea_attr_clear()
514 return (m->md.mdpg_attrs); in moea_attr_fetch()
522 m->md.mdpg_attrs |= ptebit; in moea_attr_save()
528 if (pt->pte_hi == pvo_pt->pte_hi) in moea_pte_compare()
537 return (pt->pte_hi & ~PTE_VALID) == in moea_pte_match()
554 pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | in moea_pte_create()
556 pt->pte_lo = pte_lo; in moea_pte_create()
564 pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG); in moea_pte_synch()
576 pt->pte_lo &= ~ptebit; in moea_pte_clear()
585 pvo_pt->pte_hi |= PTE_VALID; in moea_pte_set()
589 * Note that the REF/CHG bits are from pvo_pt and thus should have in moea_pte_set()
592 pt->pte_lo = pvo_pt->pte_lo; in moea_pte_set()
594 pt->pte_hi = pvo_pt->pte_hi; in moea_pte_set()
604 pvo_pt->pte_hi &= ~PTE_VALID; in moea_pte_unset()
607 * Force the reg & chg bits back into the PTEs. in moea_pte_unset()
614 pt->pte_hi &= ~PTE_VALID; in moea_pte_unset()
619 * Save the reg & chg bits. in moea_pte_unset()
622 moea_pte_valid--; in moea_pte_unset()
649 if (mapa->om_pa < mapb->om_pa) in om_cmp()
650 return (-1); in om_cmp()
651 else if (mapa->om_pa > mapb->om_pa) in om_cmp()
685 mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]); in moea_cpu_bootstrap()
738 CTR3(KTR_PMAP, "physregion: %#x - %#x (%#x)", in moea_bootstrap()
744 * of physmem to be convered by on-demand BAT entries. in moea_bootstrap()
768 CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, in moea_bootstrap()
776 hwphyssz - physsz; in moea_bootstrap()
841 moea_pteg_mask = moea_pteg_count - 1; in moea_bootstrap()
873 moea_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW] in moea_bootstrap()
882 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i; in moea_bootstrap()
883 CPU_FILL(&kernel_pmap->pm_active); in moea_bootstrap()
884 RB_INIT(&kernel_pmap->pmap_pvo); in moea_bootstrap()
895 if (chosen != -1 && OF_getprop(chosen, "mmu", &mmui, 4) != -1 && in moea_bootstrap()
896 (mmu = OF_instance_to_package(mmui)) != -1 && in moea_bootstrap()
897 (sz = OF_getproplen(mmu, "translations")) != -1) { in moea_bootstrap()
908 if (OF_getprop(mmu, "translations", translations, sz) == -1) in moea_bootstrap()
920 * on-demand BAT tables take care of the translation. in moea_bootstrap()
923 * which is mixed-protection and therefore not in moea_bootstrap()
1013 pm = &td->td_proc->p_vmspace->vm_pmap; in moea_activate()
1014 pmr = pm->pmap_phys; in moea_activate()
1016 CPU_SET(PCPU_GET(cpuid), &pm->pm_active); in moea_activate()
1019 mtsrin(USER_SR << ADDR_SR_SHFT, td->td_pcb->pcb_cpu.aim.usr_vsid); in moea_activate()
1027 pm = &td->td_proc->p_vmspace->vm_pmap; in moea_deactivate()
1028 CPU_CLR(PCPU_GET(cpuid), &pm->pm_active); in moea_deactivate()
1039 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); in moea_unwire()
1041 pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) { in moea_unwire()
1042 if ((pvo->pvo_vaddr & PVO_WIRED) == 0) in moea_unwire()
1044 pvo->pvo_vaddr &= ~PVO_WIRED; in moea_unwire()
1045 pm->pm_stats.wired_count--; in moea_unwire()
1072 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); in moea_copy_pages()
1076 cnt = min(cnt, PAGE_SIZE - b_pg_offset); in moea_copy_pages()
1082 xfersize -= cnt; in moea_copy_pages()
1122 return (!LIST_EMPTY(&(m)->md.mdpg_pvoh)); in moea_page_is_mapped()
1152 VM_OBJECT_ASSERT_UNLOCKED(m->object); in moea_enter()
1176 if ((m->oflags & VPO_UNMANAGED) == 0) { in moea_enter_locked()
1180 VM_OBJECT_ASSERT_LOCKED(m->object); in moea_enter_locked()
1183 if ((m->oflags & VPO_UNMANAGED) != 0 || !moea_initialized) { in moea_enter_locked()
1198 (m->oflags & VPO_UNMANAGED) == 0) in moea_enter_locked()
1210 * Flush the real page from the instruction cache. This has be done in moea_enter_locked()
1241 VM_OBJECT_ASSERT_LOCKED(m_start->object); in moea_enter_object()
1243 psize = atop(end - start); in moea_enter_object()
1247 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { in moea_enter_object()
1300 if (pvo != NULL && (pvo->pvo_pte.pte.pte_hi & PTE_VALID) && in moea_extract_and_hold()
1301 ((pvo->pvo_pte.pte.pte_lo & PTE_PP) == PTE_RW || in moea_extract_and_hold()
1329 KASSERT((m->oflags & VPO_UNMANAGED) == 0, in moea_is_referenced()
1342 KASSERT((m->oflags & VPO_UNMANAGED) == 0, in moea_is_modified()
1365 rv = pvo == NULL || (pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0; in moea_is_prefaultable()
1374 KASSERT((m->oflags & VPO_UNMANAGED) == 0, in moea_clear_modify()
1396 KASSERT((m->oflags & VPO_UNMANAGED) == 0, in moea_remove_write()
1406 pmap = pvo->pvo_pmap; in moea_remove_write()
1408 if ((pvo->pvo_pte.pte.pte_lo & PTE_PP) != PTE_BR) { in moea_remove_write()
1409 pt = moea_pvo_to_pte(pvo, -1); in moea_remove_write()
1410 pvo->pvo_pte.pte.pte_lo &= ~PTE_PP; in moea_remove_write()
1411 pvo->pvo_pte.pte.pte_lo |= PTE_BR; in moea_remove_write()
1413 moea_pte_synch(pt, &pvo->pvo_pte.pte); in moea_remove_write()
1414 lo |= pvo->pvo_pte.pte.pte_lo; in moea_remove_write()
1415 pvo->pvo_pte.pte.pte_lo &= ~PTE_CHG; in moea_remove_write()
1416 moea_pte_change(pt, &pvo->pvo_pte.pte, in moea_remove_write()
1417 pvo->pvo_vaddr); in moea_remove_write()
1448 KASSERT((m->oflags & VPO_UNMANAGED) == 0, in moea_ts_referenced()
1468 if ((m->oflags & VPO_UNMANAGED) != 0) { in moea_page_set_memattr()
1469 m->md.mdpg_cache_attrs = ma; in moea_page_set_memattr()
1478 pmap = pvo->pvo_pmap; in moea_page_set_memattr()
1480 pt = moea_pvo_to_pte(pvo, -1); in moea_page_set_memattr()
1481 pvo->pvo_pte.pte.pte_lo &= ~PTE_WIMG; in moea_page_set_memattr()
1482 pvo->pvo_pte.pte.pte_lo |= lo; in moea_page_set_memattr()
1484 moea_pte_change(pt, &pvo->pvo_pte.pte, in moea_page_set_memattr()
1485 pvo->pvo_vaddr); in moea_page_set_memattr()
1486 if (pvo->pvo_pmap == kernel_pmap) in moea_page_set_memattr()
1492 m->md.mdpg_cache_attrs = ma; in moea_page_set_memattr()
1514 panic("moea_kenter: attempt to enter non-kernel address %#x", in moea_kenter_attr()
1542 * Allow direct mappings on 32-bit OEA in moea_kextract()
1579 l = ((char *)USER_ADDR + SEGMENT_LENGTH) - (char *)(*kaddr); in moea_map_user_ptr()
1589 /* Mark segment no-execute */ in moea_map_user_ptr()
1593 if (curthread->td_pcb->pcb_cpu.aim.usr_vsid == vsid) in moea_map_user_ptr()
1597 curthread->td_pcb->pcb_cpu.aim.usr_segm = in moea_map_user_ptr()
1599 curthread->td_pcb->pcb_cpu.aim.usr_vsid = vsid; in moea_map_user_ptr()
1617 user_sr = curthread->td_pcb->pcb_cpu.aim.usr_segm; in moea_decode_kernel_ptr()
1634 * Architectures which can support a direct-mapped physical to virtual region
1667 KASSERT((m->oflags & VPO_UNMANAGED) == 0, in moea_page_exists_quick()
1673 if (pvo->pvo_pmap == pmap) { in moea_page_exists_quick()
1688 m->md.mdpg_attrs = 0; in moea_page_init()
1689 m->md.mdpg_cache_attrs = VM_MEMATTR_DEFAULT; in moea_page_init()
1690 LIST_INIT(&m->md.mdpg_pvoh); in moea_page_init()
1704 if ((m->oflags & VPO_UNMANAGED) != 0) in moea_page_wired_mappings()
1708 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) in moea_page_wired_mappings()
1722 RB_INIT(&pmap->pmap_pvo); in moea_pinit()
1727 if ((pmap->pmap_phys = (pmap_t)moea_kextract((vm_offset_t)pmap)) in moea_pinit()
1729 pmap->pmap_phys = pmap; in moea_pinit()
1747 hash = moea_vsidcontext & (NPMAPS - 1); in moea_pinit()
1751 mask = 1 << (hash & (VSID_NBPW - 1)); in moea_pinit()
1759 i = ffs(~moea_vsid_bitmap[n]) - 1; in moea_pinit()
1765 ("Allocating in-use VSID group %#x\n", hash)); in moea_pinit()
1768 pmap->pm_sr[i] = VSID_MAKE(i, hash); in moea_pinit()
1786 bzero(&pm->pm_stats, sizeof(pm->pm_stats)); in moea_pinit0()
1799 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, in moea_protect()
1810 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); in moea_protect()
1812 tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); in moea_protect()
1818 pt = moea_pvo_to_pte(pvo, -1); in moea_protect()
1822 pvo->pvo_pte.pte.pte_lo &= ~PTE_PP; in moea_protect()
1823 pvo->pvo_pte.pte.pte_lo |= PTE_BR; in moea_protect()
1829 moea_pte_change(pt, &pvo->pvo_pte.pte, pvo->pvo_vaddr); in moea_protect()
1848 while (count-- > 0) { in moea_qenter()
1865 while (count-- > 0) { in moea_qremove()
1879 if (pmap->pm_sr[0] == 0) in moea_release()
1883 idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1); in moea_release()
1901 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); in moea_remove()
1903 tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); in moea_remove()
1904 moea_pvo_remove(pvo, -1); in moea_remove()
1926 pmap = pvo->pvo_pmap; in moea_remove_all()
1928 moea_pvo_remove(pvo, -1); in moea_remove_all()
1931 if ((m->a.flags & PGA_WRITEABLE) && moea_query_bit(m, PTE_CHG)) { in moea_remove_all()
1954 managed = (pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED; in moea_mincore()
2008 phys_avail[i + 1] -= size; in moea_bootstrap_alloc()
2010 for (j = phys_avail_count * 2; j > i; j -= 2) { in moea_bootstrap_alloc()
2011 phys_avail[j] = phys_avail[j - 2]; in moea_bootstrap_alloc()
2012 phys_avail[j + 1] = phys_avail[j - 1]; in moea_bootstrap_alloc()
2051 sr = va_to_sr(pm->pm_sr, va); in moea_pvo_enter()
2060 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { in moea_pvo_enter()
2062 (pvo->pvo_pte.pte.pte_lo & PTE_PP) == in moea_pvo_enter()
2071 (pvo->pvo_vaddr & PVO_WIRED) == 0) { in moea_pvo_enter()
2072 pvo->pvo_vaddr |= PVO_WIRED; in moea_pvo_enter()
2073 pm->pm_stats.wired_count++; in moea_pvo_enter()
2075 (pvo->pvo_vaddr & PVO_WIRED) != 0) { in moea_pvo_enter()
2076 pvo->pvo_vaddr &= ~PVO_WIRED; in moea_pvo_enter()
2077 pm->pm_stats.wired_count--; in moea_pvo_enter()
2081 moea_pvo_remove(pvo, -1); in moea_pvo_enter()
2108 pvo->pvo_vaddr = va; in moea_pvo_enter()
2109 pvo->pvo_pmap = pm; in moea_pvo_enter()
2111 pvo->pvo_vaddr &= ~ADDR_POFF; in moea_pvo_enter()
2113 pvo->pvo_vaddr |= PVO_WIRED; in moea_pvo_enter()
2115 pvo->pvo_vaddr |= PVO_MANAGED; in moea_pvo_enter()
2117 pvo->pvo_vaddr |= PVO_BOOTSTRAP; in moea_pvo_enter()
2119 moea_pte_create(&pvo->pvo_pte.pte, sr, va, pa | pte_lo); in moea_pvo_enter()
2124 RB_INSERT(pvo_tree, &pm->pmap_pvo, pvo); in moea_pvo_enter()
2134 if (pvo->pvo_vaddr & PVO_WIRED) in moea_pvo_enter()
2135 pm->pm_stats.wired_count++; in moea_pvo_enter()
2136 pm->pm_stats.resident_count++; in moea_pvo_enter()
2138 i = moea_pte_insert(ptegidx, &pvo->pvo_pte.pte); in moea_pvo_enter()
2162 moea_pte_unset(pt, &pvo->pvo_pte.pte, pvo->pvo_vaddr); in moea_pvo_remove()
2166 moea_pte_overflow--; in moea_pvo_remove()
2172 pvo->pvo_pmap->pm_stats.resident_count--; in moea_pvo_remove()
2173 if (pvo->pvo_vaddr & PVO_WIRED) in moea_pvo_remove()
2174 pvo->pvo_pmap->pm_stats.wired_count--; in moea_pvo_remove()
2180 RB_REMOVE(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo); in moea_pvo_remove()
2183 * Save the REF/CHG bits into their cache if the page is managed. in moea_pvo_remove()
2186 if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED) { in moea_pvo_remove()
2191 moea_attr_save(pg, pvo->pvo_pte.pte.pte_lo & in moea_pvo_remove()
2193 if (LIST_EMPTY(&pg->md.mdpg_pvoh)) in moea_pvo_remove()
2203 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) in moea_pvo_remove()
2204 uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? moea_mpvo_zone : in moea_pvo_remove()
2206 moea_pvo_entries--; in moea_pvo_remove()
2221 if (pvo->pvo_pte.pte.pte_hi & PTE_HID) in moea_pvo_pte_index()
2235 sr = va_to_sr(pm->pm_sr, va); in moea_pvo_find_va()
2240 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { in moea_pvo_find_va()
2259 if (pteidx == -1) { in moea_pvo_to_pte()
2263 sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr); in moea_pvo_to_pte()
2264 ptegidx = va_to_pteg(sr, pvo->pvo_vaddr); in moea_pvo_to_pte()
2271 if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) { in moea_pvo_to_pte()
2276 if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) { in moea_pvo_to_pte()
2281 if ((pt->pte_hi ^ (pvo->pvo_pte.pte.pte_hi & ~PTE_VALID)) == PTE_VALID) { in moea_pvo_to_pte()
2282 if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0) { in moea_pvo_to_pte()
2287 if (((pt->pte_lo ^ pvo->pvo_pte.pte.pte_lo) & ~(PTE_CHG|PTE_REF)) in moea_pvo_to_pte()
2297 if (pvo->pvo_pte.pte.pte_hi & PTE_VALID) { in moea_pvo_to_pte()
2299 "moea_pteg_table but valid in pvo: %8x, %8x", pvo, pt, pvo->pvo_pte.pte.pte_hi, pt->pte_hi); in moea_pvo_to_pte()
2332 pt = &pteg->pt[i]; in moea_pte_spill()
2341 moea_pte_match(&pvo->pvo_pte.pte, sr, addr, in moea_pte_spill()
2342 pvo->pvo_pte.pte.pte_hi & PTE_HID)) { in moea_pte_spill()
2347 j = moea_pte_insert(ptegidx, &pvo->pvo_pte.pte); in moea_pte_spill()
2351 moea_pte_overflow--; in moea_pte_spill()
2366 if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL && in moea_pte_spill()
2367 moea_pte_compare(pt, &pvo->pvo_pte.pte)) { in moea_pte_spill()
2380 if ((pt->pte_hi & PTE_HID) == 0) in moea_pte_spill()
2381 panic("moea_pte_spill: victim p-pte (%p) has no pvo" in moea_pte_spill()
2394 if (moea_pte_compare(pt, &pvo->pvo_pte.pte)) { in moea_pte_spill()
2401 panic("moea_pte_spill: victim s-pte (%p) has no pvo" in moea_pte_spill()
2407 * though it's valid. If we don't, we lose any ref/chg bit changes in moea_pte_spill()
2410 source_pvo->pvo_pte.pte.pte_hi &= ~PTE_HID; in moea_pte_spill()
2412 moea_pte_unset(pt, &victim_pvo->pvo_pte.pte, victim_pvo->pvo_vaddr); in moea_pte_spill()
2413 moea_pte_set(pt, &source_pvo->pvo_pte.pte); in moea_pte_spill()
2430 if (pvo_walk->pvo_vaddr & PVO_WIRED) in moea_pte_spillable_ident()
2433 if (!(pvo_walk->pvo_pte.pte.pte_hi & PTE_VALID)) in moea_pte_spillable_ident()
2436 pt = moea_pvo_to_pte(pvo_walk, -1); in moea_pte_spillable_ident()
2444 if (!(pt->pte_lo & PTE_REF)) in moea_pte_spillable_ident()
2466 if ((pt->pte_hi & PTE_VALID) == 0) { in moea_pte_insert()
2467 pvo_pt->pte_hi &= ~PTE_HID; in moea_pte_insert()
2479 if ((pt->pte_hi & PTE_VALID) == 0) { in moea_pte_insert()
2480 pvo_pt->pte_hi |= PTE_HID; in moea_pte_insert()
2497 return (-1); in moea_pte_insert()
2503 pvo_pt->pte_hi &= ~PTE_HID; in moea_pte_insert()
2505 pvo_pt->pte_hi |= PTE_HID; in moea_pte_insert()
2514 if (pt->pte_hi != victim_pvo->pvo_pte.pte.pte_hi) in moea_pte_insert()
2515 …panic("Victim PVO doesn't match PTE! PVO: %8x, PTE: %8x", victim_pvo->pvo_pte.pte.pte_hi, pt->pte_… in moea_pte_insert()
2520 moea_pte_unset(pt, &victim_pvo->pvo_pte.pte, victim_pvo->pvo_vaddr); in moea_pte_insert()
2543 if (pvo->pvo_pte.pte.pte_lo & ptebit) { in moea_query_bit()
2551 * themselves. Sync so that any pending REF/CHG bits are flushed to in moea_query_bit()
2558 * REF/CHG bits from the valid PTE. If the appropriate in moea_query_bit()
2561 pt = moea_pvo_to_pte(pvo, -1); in moea_query_bit()
2563 moea_pte_synch(pt, &pvo->pvo_pte.pte); in moea_query_bit()
2565 if (pvo->pvo_pte.pte.pte_lo & ptebit) { in moea_query_bit()
2590 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so in moea_clear_bit()
2594 * REF/CHG bits. in moea_clear_bit()
2604 pt = moea_pvo_to_pte(pvo, -1); in moea_clear_bit()
2606 moea_pte_synch(pt, &pvo->pvo_pte.pte); in moea_clear_bit()
2607 if (pvo->pvo_pte.pte.pte_lo & ptebit) { in moea_clear_bit()
2613 pvo->pvo_pte.pte.pte_lo &= ~ptebit; in moea_clear_bit()
2637 * The BAT entry must be cache-inhibited, guarded, and r/w in moea_bat_mapped()
2716 size -= PAGE_SIZE; in moea_mapdev_attr()
2754 len = MIN(lim - va, sz); in moea_sync_icache()
2761 sz -= len; in moea_sync_icache()
2799 round_page((uintptr_t)_end) - dump_map[0].pa_start; in moea_scan_init()
2802 dump_map[1].pa_start = (vm_paddr_t)msgbufp->msg_ptr; in moea_scan_init()
2803 dump_map[1].pa_size = round_page(msgbufp->msg_size); in moea_scan_init()
2815 if (pvo != NULL && (pvo->pvo_pte.pte.pte_hi & PTE_VALID)) in moea_scan_init()
2830 !(pvo->pvo_pte.pte.pte_hi & PTE_VALID)) in moea_scan_init()
2834 dump_map[2].pa_size = va - dump_map[2].pa_start; in moea_scan_init()