Lines Matching +full:i +full:- +full:tlb +full:- +full:sets

1 // SPDX-License-Identifier: GPL-2.0-only
3 * TLB Management (flush/create/diagnostics) for MMUv3 and MMUv4
5 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
22 unsigned int ver, pg_sz_k, s_pg_sz_m, pae, sets, ways; member
26 * Utility Routine to erase a J-TLB entry
63 /* Locate the TLB entry for this vaddr + ASID */ in tlb_entry_erase()
82 * This also sets up PD0 (vaddr, ASID..) for final commit in tlb_entry_insert()
89 * with existing location. This will cause Write CMD to over-write in tlb_entry_insert()
95 /* setup the other half of TLB entry (pfn, rwx..) */ in tlb_entry_insert()
101 * which doesn't flush uTLBs. I'd rather be safe than sorry. in tlb_entry_insert()
131 * Un-conditionally (without lookup) erase the entire MMU contents
139 int num_tlb = mmu->sets * mmu->ways; in local_flush_tlb_all()
152 /* write this entry to the TLB */ in local_flush_tlb_all()
185 if (atomic_read(&mm->mm_users) == 0) in local_flush_tlb_mm()
189 * - Move to a new ASID, but only if the mm is still wired in in local_flush_tlb_mm()
190 * (Android Binder ended up calling this for vma->mm != tsk->mm, in local_flush_tlb_mm()
191 * causing h/w - s/w ASID to get out of sync) in local_flush_tlb_mm()
192 * - Also get_new_mmu_context() new implementation allocates a new in local_flush_tlb_mm()
193 * ASID only if it is not allocated already - so unallocate first in local_flush_tlb_mm()
196 if (current->mm == mm) in local_flush_tlb_mm()
201 * Flush a Range of TLB entries for userland.
204 * -Here the fastest way (if range is too large) is to move to next ASID
206 * -In case of kernel Flush, entry has to be shot down explicitly
214 /* If range @start to @end is more than 32 TLB entries deep, in local_flush_tlb_range()
221 if (unlikely((end - start) >= PAGE_SIZE * 32)) { in local_flush_tlb_range()
222 local_flush_tlb_mm(vma->vm_mm); in local_flush_tlb_range()
235 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { in local_flush_tlb_range()
237 tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu)); in local_flush_tlb_range()
245 /* Flush the kernel TLB entries - vmalloc/modules (Global from MMU perspective)
247 * Interestingly, shared TLB entries can also be flushed using just
255 /* exactly same as above, except for TLB entry not taking ASID */ in local_flush_tlb_kernel_range()
257 if (unlikely((end - start) >= PAGE_SIZE * 32)) { in local_flush_tlb_kernel_range()
274 * Delete TLB entry in MMU for a given page (??? address)
275 * NOTE One TLB entry contains translation for single PAGE
284 * checking the ASID and using it flush the TLB entry in local_flush_tlb_page()
288 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { in local_flush_tlb_page()
289 tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu)); in local_flush_tlb_page()
307 local_flush_tlb_page(ta->ta_vma, ta->ta_start); in ipi_flush_tlb_page()
314 local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); in ipi_flush_tlb_range()
322 local_flush_pmd_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); in ipi_flush_pmd_tlb_range()
330 local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end); in ipi_flush_tlb_kernel_range()
351 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1); in flush_tlb_page()
363 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1); in flush_tlb_range()
376 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_pmd_tlb_range, &ta, 1); in flush_pmd_tlb_range()
392 * Routine to create a TLB entry
402 * create_tlb() assumes that current->mm == vma->mm, since in create_tlb()
403 * -it ASID for TLB entry is fetched from MMU ASID reg (valid for curr) in create_tlb()
404 * -completes the lazy write to SASID reg (again valid for curr tsk) in create_tlb()
407 * -Using vma->mm->context{ASID,SASID}, as opposed to MMU reg. in create_tlb()
408 * -More importantly it makes this handler inconsistent with fast-path in create_tlb()
409 * TLB Refill handler which always deals with "current" in create_tlb()
411 * Let's see the use cases when current->mm != vma->mm and we land here in create_tlb()
412 * 1. execve->copy_strings()->__get_user_pages->handle_mm_fault in create_tlb()
413 * Here VM wants to pre-install a TLB entry for user stack while in create_tlb()
414 * current->mm still points to pre-execve mm (hence the condition). in create_tlb()
416 * move_page_tables() tries to undo that TLB entry. in create_tlb()
417 * Thus not creating TLB entry is not any worse. in create_tlb()
419 * 2. ptrace(POKETEXT) causes a CoW - debugger(current) inserting a in create_tlb()
420 * breakpoint in debugged task. Not creating a TLB now is not in create_tlb()
425 if (current->active_mm != vma->vm_mm) in create_tlb()
435 /* Create HW TLB(PD0,PD1) from PTE */ in create_tlb()
444 * however Linux only saves 1 set to save PTE real-estate in create_tlb()
446 * -Kernel only entries have Kr Kw Kx 0 0 0 in create_tlb()
447 * -User entries have mirrored K and U bits in create_tlb()
465 * -pre-install the corresponding TLB entry into MMU
466 * -Finalize the delayed D-cache flush of kernel mapping of page due to
469 * Note that flush (when done) involves both WBACK - so physical page is
470 * in sync as well as INV - so any non-congruent aliases don't remain
486 * dirty K-mapping of a code page needs to be wback+inv so that in update_mmu_cache_range()
489 if (vma->vm_flags & VM_EXEC) { in update_mmu_cache_range()
491 int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags); in update_mmu_cache_range()
495 paddr -= offset; in update_mmu_cache_range()
496 vaddr -= offset; in update_mmu_cache_range()
497 /* wback + inv dcache lines (K-mapping) */ in update_mmu_cache_range()
500 /* invalidate any existing icache lines (U-mapping) */ in update_mmu_cache_range()
501 if (vma->vm_flags & VM_EXEC) in update_mmu_cache_range()
513 * Normal and Super pages can co-exist (ofcourse not overlap) in TLB with a
514 * new bit "SZ" in TLB page descriptor to distinguish between them.
519 * - MMU page size (typical 8K, RTL fixed)
520 * - software page walker address split between PGD:PTE:PFN (typical
547 if (likely(asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID)) { in local_flush_pmd_tlb_range()
548 unsigned int asid = hw_pid(vma->vm_mm, cpu); in local_flush_pmd_tlb_range()
573 mmu->ver = (bcr >> 24); in arc_mmu_mumbojumbo()
575 if (is_isa_arcompact() && mmu->ver == 3) { in arc_mmu_mumbojumbo()
577 mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1); in arc_mmu_mumbojumbo()
578 mmu->sets = 1 << mmu3->sets; in arc_mmu_mumbojumbo()
579 mmu->ways = 1 << mmu3->ways; in arc_mmu_mumbojumbo()
580 u_dtlb = mmu3->u_dtlb; in arc_mmu_mumbojumbo()
581 u_itlb = mmu3->u_itlb; in arc_mmu_mumbojumbo()
582 sasid = mmu3->sasid; in arc_mmu_mumbojumbo()
585 mmu->pg_sz_k = 1 << (mmu4->sz0 - 1); in arc_mmu_mumbojumbo()
586 mmu->s_pg_sz_m = 1 << (mmu4->sz1 - 11); in arc_mmu_mumbojumbo()
587 mmu->sets = 64 << mmu4->n_entry; in arc_mmu_mumbojumbo()
588 mmu->ways = mmu4->n_ways * 2; in arc_mmu_mumbojumbo()
589 u_dtlb = mmu4->u_dtlb * 4; in arc_mmu_mumbojumbo()
590 u_itlb = mmu4->u_itlb * 4; in arc_mmu_mumbojumbo()
591 sasid = mmu4->sasid; in arc_mmu_mumbojumbo()
592 mmu->pae = mmu4->pae; in arc_mmu_mumbojumbo()
595 if (mmu->s_pg_sz_m) in arc_mmu_mumbojumbo()
597 mmu->s_pg_sz_m, in arc_mmu_mumbojumbo()
600 n += scnprintf(buf + n, len - n, in arc_mmu_mumbojumbo()
602 mmu->ver, mmu->pg_sz_k, super_pg, CONFIG_PGTABLE_LEVELS, in arc_mmu_mumbojumbo()
603 mmu->sets, mmu->ways, in arc_mmu_mumbojumbo()
606 IS_AVAIL2(mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40)); in arc_mmu_mumbojumbo()
634 * - For older ARC700 cpus, only v3 supported in arc_mmu_init()
635 * - For HS cpus, v4 was baseline and v5 is backwards compatible in arc_mmu_init()
638 if (is_isa_arcompact() && mmu->ver == 3) in arc_mmu_init()
640 else if (is_isa_arcv2() && mmu->ver >= 4) in arc_mmu_init()
644 panic("MMU ver %d doesn't match kernel built for\n", mmu->ver); in arc_mmu_init()
646 if (mmu->pg_sz_k != TO_KB(PAGE_SIZE)) in arc_mmu_init()
650 mmu->s_pg_sz_m != TO_MB(HPAGE_PMD_SIZE)) in arc_mmu_init()
654 if (IS_ENABLED(CONFIG_ARC_HAS_PAE40) && !mmu->pae) in arc_mmu_init()
668 * TLB Programmer's Model uses Linear Indexes: 0 to {255, 511} for 128 x {2,4}
669 * The mapping is Column-first.
670 * --------------------- -----------
672 * --------------------- -----------
677 * --------------------- -----------
682 #define SET_WAY_TO_IDX(mmu, set, way) ((set) * mmu->ways + (way))
684 /* Handling of Duplicate PD (TLB entry) in MMU.
685 * -Could be due to buggy customer tapeouts or obscure kernel bugs
686 * -MMU complaints not at the time of duplicate PD installation, but at the
688 * -Ideally these should never happen - but if they do - workaround by deleting
690 * -Knob to be verbose abt it.(TODO: hook them up to debugfs)
699 int set, n_ways = mmu->ways; in do_tlb_overlap_fault()
702 BUG_ON(mmu->ways > 4); in do_tlb_overlap_fault()
706 /* loop thru all sets of TLB */ in do_tlb_overlap_fault()
707 for (set = 0; set < mmu->sets; set++) { in do_tlb_overlap_fault()
727 for (way = 0; way < n_ways - 1; way++) { in do_tlb_overlap_fault()
739 pr_info("Dup TLB PD0 %08x @ set %d ways %d,%d\n", in do_tlb_overlap_fault()