xref: /linux/arch/xtensa/mm/tlb.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
13f65ce4dSChris Zankel /*
2f30c2269SUwe Zeisberger  * arch/xtensa/mm/tlb.c
33f65ce4dSChris Zankel  *
43f65ce4dSChris Zankel  * Logic that manipulates the Xtensa MMU.  Derived from MIPS.
53f65ce4dSChris Zankel  *
63f65ce4dSChris Zankel  * This file is subject to the terms and conditions of the GNU General Public
73f65ce4dSChris Zankel  * License.  See the file "COPYING" in the main directory of this archive
83f65ce4dSChris Zankel  * for more details.
93f65ce4dSChris Zankel  *
103f65ce4dSChris Zankel  * Copyright (C) 2001 - 2003 Tensilica Inc.
113f65ce4dSChris Zankel  *
123f65ce4dSChris Zankel  * Joe Taylor
133f65ce4dSChris Zankel  * Chris Zankel	<chris@zankel.net>
143f65ce4dSChris Zankel  * Marc Gauthier
153f65ce4dSChris Zankel  */
163f65ce4dSChris Zankel 
173f65ce4dSChris Zankel #include <linux/mm.h>
183f65ce4dSChris Zankel #include <asm/processor.h>
193f65ce4dSChris Zankel #include <asm/mmu_context.h>
2025b9a3caSRandy Dunlap #include <asm/tlb.h>
213f65ce4dSChris Zankel #include <asm/tlbflush.h>
223f65ce4dSChris Zankel #include <asm/cacheflush.h>
233f65ce4dSChris Zankel 
243f65ce4dSChris Zankel 
__flush_itlb_all(void)253f65ce4dSChris Zankel static inline void __flush_itlb_all (void)
263f65ce4dSChris Zankel {
27173d6681SChris Zankel 	int w, i;
283f65ce4dSChris Zankel 
29173d6681SChris Zankel 	for (w = 0; w < ITLB_ARF_WAYS; w++) {
30173d6681SChris Zankel 		for (i = 0; i < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); i++) {
31173d6681SChris Zankel 			int e = w + (i << PAGE_SHIFT);
32173d6681SChris Zankel 			invalidate_itlb_entry_no_isync(e);
333f65ce4dSChris Zankel 		}
343f65ce4dSChris Zankel 	}
353f65ce4dSChris Zankel 	asm volatile ("isync\n");
363f65ce4dSChris Zankel }
373f65ce4dSChris Zankel 
__flush_dtlb_all(void)383f65ce4dSChris Zankel static inline void __flush_dtlb_all (void)
393f65ce4dSChris Zankel {
40173d6681SChris Zankel 	int w, i;
413f65ce4dSChris Zankel 
42173d6681SChris Zankel 	for (w = 0; w < DTLB_ARF_WAYS; w++) {
43173d6681SChris Zankel 		for (i = 0; i < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); i++) {
44173d6681SChris Zankel 			int e = w + (i << PAGE_SHIFT);
45173d6681SChris Zankel 			invalidate_dtlb_entry_no_isync(e);
463f65ce4dSChris Zankel 		}
473f65ce4dSChris Zankel 	}
483f65ce4dSChris Zankel 	asm volatile ("isync\n");
493f65ce4dSChris Zankel }
503f65ce4dSChris Zankel 
513f65ce4dSChris Zankel 
local_flush_tlb_all(void)52f615136cSMax Filippov void local_flush_tlb_all(void)
533f65ce4dSChris Zankel {
543f65ce4dSChris Zankel 	__flush_itlb_all();
553f65ce4dSChris Zankel 	__flush_dtlb_all();
563f65ce4dSChris Zankel }
573f65ce4dSChris Zankel 
583f65ce4dSChris Zankel /* If mm is current, we simply assign the current task a new ASID, thus,
593f65ce4dSChris Zankel  * invalidating all previous tlb entries. If mm is someone else's user mapping,
603f65ce4dSChris Zankel  * wie invalidate the context, thus, when that user mapping is swapped in,
613f65ce4dSChris Zankel  * a new context will be assigned to it.
623f65ce4dSChris Zankel  */
633f65ce4dSChris Zankel 
local_flush_tlb_mm(struct mm_struct * mm)64f615136cSMax Filippov void local_flush_tlb_mm(struct mm_struct *mm)
653f65ce4dSChris Zankel {
66f615136cSMax Filippov 	int cpu = smp_processor_id();
67f615136cSMax Filippov 
683f65ce4dSChris Zankel 	if (mm == current->active_mm) {
69382cb5b9SMax Filippov 		unsigned long flags;
7087962c4dSMax Filippov 		local_irq_save(flags);
71f615136cSMax Filippov 		mm->context.asid[cpu] = NO_CONTEXT;
72f615136cSMax Filippov 		activate_context(mm, cpu);
733f65ce4dSChris Zankel 		local_irq_restore(flags);
74f615136cSMax Filippov 	} else {
75f615136cSMax Filippov 		mm->context.asid[cpu] = NO_CONTEXT;
76f615136cSMax Filippov 		mm->context.cpu = -1;
773f65ce4dSChris Zankel 	}
783f65ce4dSChris Zankel }
793f65ce4dSChris Zankel 
80f615136cSMax Filippov 
81173d6681SChris Zankel #define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2)
82173d6681SChris Zankel #define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2)
83173d6681SChris Zankel #if _ITLB_ENTRIES > _DTLB_ENTRIES
84173d6681SChris Zankel # define _TLB_ENTRIES _ITLB_ENTRIES
85173d6681SChris Zankel #else
86173d6681SChris Zankel # define _TLB_ENTRIES _DTLB_ENTRIES
87173d6681SChris Zankel #endif
88173d6681SChris Zankel 
local_flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)89f615136cSMax Filippov void local_flush_tlb_range(struct vm_area_struct *vma,
903f65ce4dSChris Zankel 		unsigned long start, unsigned long end)
913f65ce4dSChris Zankel {
92f615136cSMax Filippov 	int cpu = smp_processor_id();
933f65ce4dSChris Zankel 	struct mm_struct *mm = vma->vm_mm;
943f65ce4dSChris Zankel 	unsigned long flags;
953f65ce4dSChris Zankel 
96f615136cSMax Filippov 	if (mm->context.asid[cpu] == NO_CONTEXT)
973f65ce4dSChris Zankel 		return;
983f65ce4dSChris Zankel 
99c130d3beSMax Filippov 	pr_debug("[tlbrange<%02lx,%08lx,%08lx>]\n",
100f615136cSMax Filippov 		 (unsigned long)mm->context.asid[cpu], start, end);
10187962c4dSMax Filippov 	local_irq_save(flags);
1023f65ce4dSChris Zankel 
103173d6681SChris Zankel 	if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) {
1043f65ce4dSChris Zankel 		int oldpid = get_rasid_register();
105f615136cSMax Filippov 
106f615136cSMax Filippov 		set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
1073f65ce4dSChris Zankel 		start &= PAGE_MASK;
1083f65ce4dSChris Zankel 		if (vma->vm_flags & VM_EXEC)
1093f65ce4dSChris Zankel 			while(start < end) {
1103f65ce4dSChris Zankel 				invalidate_itlb_mapping(start);
1113f65ce4dSChris Zankel 				invalidate_dtlb_mapping(start);
1123f65ce4dSChris Zankel 				start += PAGE_SIZE;
1133f65ce4dSChris Zankel 			}
1143f65ce4dSChris Zankel 		else
1153f65ce4dSChris Zankel 			while(start < end) {
1163f65ce4dSChris Zankel 				invalidate_dtlb_mapping(start);
1173f65ce4dSChris Zankel 				start += PAGE_SIZE;
1183f65ce4dSChris Zankel 			}
1193f65ce4dSChris Zankel 
1203f65ce4dSChris Zankel 		set_rasid_register(oldpid);
1213f65ce4dSChris Zankel 	} else {
122f615136cSMax Filippov 		local_flush_tlb_mm(mm);
1233f65ce4dSChris Zankel 	}
1243f65ce4dSChris Zankel 	local_irq_restore(flags);
1253f65ce4dSChris Zankel }
1263f65ce4dSChris Zankel 
local_flush_tlb_page(struct vm_area_struct * vma,unsigned long page)127f615136cSMax Filippov void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1283f65ce4dSChris Zankel {
129f615136cSMax Filippov 	int cpu = smp_processor_id();
1303f65ce4dSChris Zankel 	struct mm_struct* mm = vma->vm_mm;
1313f65ce4dSChris Zankel 	unsigned long flags;
1323f65ce4dSChris Zankel 	int oldpid;
1333f65ce4dSChris Zankel 
134f615136cSMax Filippov 	if (mm->context.asid[cpu] == NO_CONTEXT)
1353f65ce4dSChris Zankel 		return;
1363f65ce4dSChris Zankel 
13787962c4dSMax Filippov 	local_irq_save(flags);
1383f65ce4dSChris Zankel 
1393f65ce4dSChris Zankel 	oldpid = get_rasid_register();
140f615136cSMax Filippov 	set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
1413f65ce4dSChris Zankel 
1423f65ce4dSChris Zankel 	if (vma->vm_flags & VM_EXEC)
1433f65ce4dSChris Zankel 		invalidate_itlb_mapping(page);
1443f65ce4dSChris Zankel 	invalidate_dtlb_mapping(page);
1453f65ce4dSChris Zankel 
1463f65ce4dSChris Zankel 	set_rasid_register(oldpid);
1473f65ce4dSChris Zankel 
1483f65ce4dSChris Zankel 	local_irq_restore(flags);
1493f65ce4dSChris Zankel }
150a99e07eeSMax Filippov 
local_flush_tlb_kernel_range(unsigned long start,unsigned long end)15104c6b3e2SMax Filippov void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
15204c6b3e2SMax Filippov {
15304c6b3e2SMax Filippov 	if (end > start && start >= TASK_SIZE && end <= PAGE_OFFSET &&
15404c6b3e2SMax Filippov 	    end - start < _TLB_ENTRIES << PAGE_SHIFT) {
15504c6b3e2SMax Filippov 		start &= PAGE_MASK;
15604c6b3e2SMax Filippov 		while (start < end) {
15704c6b3e2SMax Filippov 			invalidate_itlb_mapping(start);
15804c6b3e2SMax Filippov 			invalidate_dtlb_mapping(start);
15904c6b3e2SMax Filippov 			start += PAGE_SIZE;
16004c6b3e2SMax Filippov 		}
16104c6b3e2SMax Filippov 	} else {
16204c6b3e2SMax Filippov 		local_flush_tlb_all();
16304c6b3e2SMax Filippov 	}
16404c6b3e2SMax Filippov }
16504c6b3e2SMax Filippov 
update_mmu_tlb_range(struct vm_area_struct * vma,unsigned long address,pte_t * ptep,unsigned int nr)166*23b1b44eSBang Li void update_mmu_tlb_range(struct vm_area_struct *vma,
167*23b1b44eSBang Li 			unsigned long address, pte_t *ptep, unsigned int nr)
168*23b1b44eSBang Li {
169*23b1b44eSBang Li 	local_flush_tlb_range(vma, address, address + PAGE_SIZE * nr);
170*23b1b44eSBang Li }
171*23b1b44eSBang Li 
172a99e07eeSMax Filippov #ifdef CONFIG_DEBUG_TLB_SANITY
173a99e07eeSMax Filippov 
get_pte_for_vaddr(unsigned vaddr)174a99e07eeSMax Filippov static unsigned get_pte_for_vaddr(unsigned vaddr)
175a99e07eeSMax Filippov {
176a99e07eeSMax Filippov 	struct task_struct *task = get_current();
177a99e07eeSMax Filippov 	struct mm_struct *mm = task->mm;
178a99e07eeSMax Filippov 	pgd_t *pgd;
179f5ee2567SMike Rapoport 	p4d_t *p4d;
180f0d1eab8SMike Rapoport 	pud_t *pud;
181a99e07eeSMax Filippov 	pmd_t *pmd;
182a99e07eeSMax Filippov 	pte_t *pte;
18356e0d1cbSHugh Dickins 	unsigned int pteval;
184a99e07eeSMax Filippov 
185a99e07eeSMax Filippov 	if (!mm)
186a99e07eeSMax Filippov 		mm = task->active_mm;
187a99e07eeSMax Filippov 	pgd = pgd_offset(mm, vaddr);
188a99e07eeSMax Filippov 	if (pgd_none_or_clear_bad(pgd))
189a99e07eeSMax Filippov 		return 0;
190f5ee2567SMike Rapoport 	p4d = p4d_offset(pgd, vaddr);
191f5ee2567SMike Rapoport 	if (p4d_none_or_clear_bad(p4d))
192f5ee2567SMike Rapoport 		return 0;
193f5ee2567SMike Rapoport 	pud = pud_offset(p4d, vaddr);
194f0d1eab8SMike Rapoport 	if (pud_none_or_clear_bad(pud))
195f0d1eab8SMike Rapoport 		return 0;
196f0d1eab8SMike Rapoport 	pmd = pmd_offset(pud, vaddr);
197a99e07eeSMax Filippov 	if (pmd_none_or_clear_bad(pmd))
198a99e07eeSMax Filippov 		return 0;
199a99e07eeSMax Filippov 	pte = pte_offset_map(pmd, vaddr);
200a99e07eeSMax Filippov 	if (!pte)
201a99e07eeSMax Filippov 		return 0;
20256e0d1cbSHugh Dickins 	pteval = pte_val(*pte);
20356e0d1cbSHugh Dickins 	pte_unmap(pte);
20456e0d1cbSHugh Dickins 	return pteval;
205a99e07eeSMax Filippov }
206a99e07eeSMax Filippov 
207a99e07eeSMax Filippov enum {
208a99e07eeSMax Filippov 	TLB_SUSPICIOUS	= 1,
209a99e07eeSMax Filippov 	TLB_INSANE	= 2,
210a99e07eeSMax Filippov };
211a99e07eeSMax Filippov 
tlb_insane(void)212a99e07eeSMax Filippov static void tlb_insane(void)
213a99e07eeSMax Filippov {
214a99e07eeSMax Filippov 	BUG_ON(1);
215a99e07eeSMax Filippov }
216a99e07eeSMax Filippov 
tlb_suspicious(void)217a99e07eeSMax Filippov static void tlb_suspicious(void)
218a99e07eeSMax Filippov {
219a99e07eeSMax Filippov 	WARN_ON(1);
220a99e07eeSMax Filippov }
221a99e07eeSMax Filippov 
222a99e07eeSMax Filippov /*
223a99e07eeSMax Filippov  * Check that TLB entries with kernel ASID (1) have kernel VMA (>= TASK_SIZE),
224a99e07eeSMax Filippov  * and TLB entries with user ASID (>=4) have VMA < TASK_SIZE.
225a99e07eeSMax Filippov  *
226a99e07eeSMax Filippov  * Check that valid TLB entries either have the same PA as the PTE, or PTE is
227a99e07eeSMax Filippov  * marked as non-present. Non-present PTE and the page with non-zero refcount
228a99e07eeSMax Filippov  * and zero mapcount is normal for batched TLB flush operation. Zero refcount
229a99e07eeSMax Filippov  * means that the page was freed prematurely. Non-zero mapcount is unusual,
230a99e07eeSMax Filippov  * but does not necessary means an error, thus marked as suspicious.
231a99e07eeSMax Filippov  */
check_tlb_entry(unsigned w,unsigned e,bool dtlb)232a99e07eeSMax Filippov static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
233a99e07eeSMax Filippov {
234a99e07eeSMax Filippov 	unsigned tlbidx = w | (e << PAGE_SHIFT);
235a99e07eeSMax Filippov 	unsigned r0 = dtlb ?
236a99e07eeSMax Filippov 		read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx);
23736de10c4SMax Filippov 	unsigned r1 = dtlb ?
23836de10c4SMax Filippov 		read_dtlb_translation(tlbidx) : read_itlb_translation(tlbidx);
239a99e07eeSMax Filippov 	unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT);
240a99e07eeSMax Filippov 	unsigned pte = get_pte_for_vaddr(vpn);
241a99e07eeSMax Filippov 	unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK;
242a99e07eeSMax Filippov 	unsigned tlb_asid = r0 & ASID_MASK;
243a99e07eeSMax Filippov 	bool kernel = tlb_asid == 1;
244a99e07eeSMax Filippov 	int rc = 0;
245a99e07eeSMax Filippov 
246a99e07eeSMax Filippov 	if (tlb_asid > 0 && ((vpn < TASK_SIZE) == kernel)) {
247a99e07eeSMax Filippov 		pr_err("%cTLB: way: %u, entry: %u, VPN %08x in %s PTE\n",
248a99e07eeSMax Filippov 				dtlb ? 'D' : 'I', w, e, vpn,
249a99e07eeSMax Filippov 				kernel ? "kernel" : "user");
250a99e07eeSMax Filippov 		rc |= TLB_INSANE;
251a99e07eeSMax Filippov 	}
252a99e07eeSMax Filippov 
253a99e07eeSMax Filippov 	if (tlb_asid == mm_asid) {
254a99e07eeSMax Filippov 		if ((pte ^ r1) & PAGE_MASK) {
255a99e07eeSMax Filippov 			pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n",
256a99e07eeSMax Filippov 					dtlb ? 'D' : 'I', w, e, r0, r1, pte);
257a99e07eeSMax Filippov 			if (pte == 0 || !pte_present(__pte(pte))) {
258a99e07eeSMax Filippov 				struct page *p = pfn_to_page(r1 >> PAGE_SHIFT);
2595f8856cdSDavid Hildenbrand 				struct folio *f = page_folio(p);
2605f8856cdSDavid Hildenbrand 
2615f8856cdSDavid Hildenbrand 				pr_err("folio refcount: %d, mapcount: %d\n",
2625f8856cdSDavid Hildenbrand 					folio_ref_count(f), folio_mapcount(f));
2635f8856cdSDavid Hildenbrand 				if (!folio_ref_count(f))
264a99e07eeSMax Filippov 					rc |= TLB_INSANE;
2655f8856cdSDavid Hildenbrand 				else if (folio_mapped(f))
266a99e07eeSMax Filippov 					rc |= TLB_SUSPICIOUS;
267a99e07eeSMax Filippov 			} else {
268a99e07eeSMax Filippov 				rc |= TLB_INSANE;
269a99e07eeSMax Filippov 			}
270a99e07eeSMax Filippov 		}
271a99e07eeSMax Filippov 	}
272a99e07eeSMax Filippov 	return rc;
273a99e07eeSMax Filippov }
274a99e07eeSMax Filippov 
check_tlb_sanity(void)275a99e07eeSMax Filippov void check_tlb_sanity(void)
276a99e07eeSMax Filippov {
277a99e07eeSMax Filippov 	unsigned long flags;
278a99e07eeSMax Filippov 	unsigned w, e;
279a99e07eeSMax Filippov 	int bug = 0;
280a99e07eeSMax Filippov 
281a99e07eeSMax Filippov 	local_irq_save(flags);
282a99e07eeSMax Filippov 	for (w = 0; w < DTLB_ARF_WAYS; ++w)
283a99e07eeSMax Filippov 		for (e = 0; e < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); ++e)
284a99e07eeSMax Filippov 			bug |= check_tlb_entry(w, e, true);
285a99e07eeSMax Filippov 	for (w = 0; w < ITLB_ARF_WAYS; ++w)
286a99e07eeSMax Filippov 		for (e = 0; e < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); ++e)
287a99e07eeSMax Filippov 			bug |= check_tlb_entry(w, e, false);
288a99e07eeSMax Filippov 	if (bug & TLB_INSANE)
289a99e07eeSMax Filippov 		tlb_insane();
290a99e07eeSMax Filippov 	if (bug & TLB_SUSPICIOUS)
291a99e07eeSMax Filippov 		tlb_suspicious();
292a99e07eeSMax Filippov 	local_irq_restore(flags);
293a99e07eeSMax Filippov }
294a99e07eeSMax Filippov 
295a99e07eeSMax Filippov #endif /* CONFIG_DEBUG_TLB_SANITY */
296