xref: /linux/arch/arc/mm/tlb.c (revision e724e7aaf9ca794670a4d4931af7a7e24e37fec3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * TLB Management (flush/create/diagnostics) for MMUv3 and MMUv4
4  *
5  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
6  *
7  */
8 
9 #include <linux/module.h>
10 #include <linux/bug.h>
11 #include <linux/mm_types.h>
12 
13 #include <asm/arcregs.h>
14 #include <asm/setup.h>
15 #include <asm/mmu_context.h>
16 #include <asm/mmu.h>
17 
18 /* A copy of the ASID from the PID reg is kept in asid_cache */
19 DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;
20 
21 static int __read_mostly pae_exists;
22 
23 /*
24  * Utility Routine to erase a J-TLB entry
25  * Caller needs to setup Index Reg (manually or via getIndex)
26  */
27 static inline void __tlb_entry_erase(void)
28 {
29 	write_aux_reg(ARC_REG_TLBPD1, 0);
30 
31 	if (is_pae40_enabled())
32 		write_aux_reg(ARC_REG_TLBPD1HI, 0);
33 
34 	write_aux_reg(ARC_REG_TLBPD0, 0);
35 	write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
36 }
37 
38 static void utlb_invalidate(void)
39 {
40 	write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB);
41 }
42 
43 #ifdef CONFIG_ARC_MMU_V3
44 
45 static inline unsigned int tlb_entry_lkup(unsigned long vaddr_n_asid)
46 {
47 	unsigned int idx;
48 
49 	write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid);
50 
51 	write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe);
52 	idx = read_aux_reg(ARC_REG_TLBINDEX);
53 
54 	return idx;
55 }
56 
57 static void tlb_entry_erase(unsigned int vaddr_n_asid)
58 {
59 	unsigned int idx;
60 
61 	/* Locate the TLB entry for this vaddr + ASID */
62 	idx = tlb_entry_lkup(vaddr_n_asid);
63 
64 	/* No error means entry found, zero it out */
65 	if (likely(!(idx & TLB_LKUP_ERR))) {
66 		__tlb_entry_erase();
67 	} else {
68 		/* Duplicate entry error */
69 		WARN(idx == TLB_DUP_ERR, "Probe returned Dup PD for %x\n",
70 					   vaddr_n_asid);
71 	}
72 }
73 
74 static void tlb_entry_insert(unsigned int pd0, phys_addr_t pd1)
75 {
76 	unsigned int idx;
77 
78 	/*
79 	 * First verify if entry for this vaddr+ASID already exists
80 	 * This also sets up PD0 (vaddr, ASID..) for final commit
81 	 */
82 	idx = tlb_entry_lkup(pd0);
83 
84 	/*
85 	 * If Not already present get a free slot from MMU.
86 	 * Otherwise, Probe would have located the entry and set INDEX Reg
87 	 * with existing location. This will cause Write CMD to over-write
88 	 * existing entry with new PD0 and PD1
89 	 */
90 	if (likely(idx & TLB_LKUP_ERR))
91 		write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex);
92 
93 	/* setup the other half of TLB entry (pfn, rwx..) */
94 	write_aux_reg(ARC_REG_TLBPD1, pd1);
95 
96 	/*
97 	 * Commit the Entry to MMU
98 	 * It doesn't sound safe to use the TLBWriteNI cmd here
99 	 * which doesn't flush uTLBs. I'd rather be safe than sorry.
100 	 */
101 	write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
102 }
103 
104 #else	/* MMUv4 */
105 
106 static void tlb_entry_erase(unsigned int vaddr_n_asid)
107 {
108 	write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid | _PAGE_PRESENT);
109 	write_aux_reg(ARC_REG_TLBCOMMAND, TLBDeleteEntry);
110 }
111 
112 static void tlb_entry_insert(unsigned int pd0, phys_addr_t pd1)
113 {
114 	write_aux_reg(ARC_REG_TLBPD0, pd0);
115 
116 	if (!is_pae40_enabled()) {
117 		write_aux_reg(ARC_REG_TLBPD1, pd1);
118 	} else {
119 		write_aux_reg(ARC_REG_TLBPD1, pd1 & 0xFFFFFFFF);
120 		write_aux_reg(ARC_REG_TLBPD1HI, (u64)pd1 >> 32);
121 	}
122 
123 	write_aux_reg(ARC_REG_TLBCOMMAND, TLBInsertEntry);
124 }
125 
126 #endif
127 
128 /*
129  * Un-conditionally (without lookup) erase the entire MMU contents
130  */
131 
132 noinline void local_flush_tlb_all(void)
133 {
134 	struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
135 	unsigned long flags;
136 	unsigned int entry;
137 	int num_tlb = mmu->sets * mmu->ways;
138 
139 	local_irq_save(flags);
140 
141 	/* Load PD0 and PD1 with template for a Blank Entry */
142 	write_aux_reg(ARC_REG_TLBPD1, 0);
143 
144 	if (is_pae40_enabled())
145 		write_aux_reg(ARC_REG_TLBPD1HI, 0);
146 
147 	write_aux_reg(ARC_REG_TLBPD0, 0);
148 
149 	for (entry = 0; entry < num_tlb; entry++) {
150 		/* write this entry to the TLB */
151 		write_aux_reg(ARC_REG_TLBINDEX, entry);
152 		write_aux_reg(ARC_REG_TLBCOMMAND, TLBWriteNI);
153 	}
154 
155 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
156 		const int stlb_idx = 0x800;
157 
158 		/* Blank sTLB entry */
159 		write_aux_reg(ARC_REG_TLBPD0, _PAGE_HW_SZ);
160 
161 		for (entry = stlb_idx; entry < stlb_idx + 16; entry++) {
162 			write_aux_reg(ARC_REG_TLBINDEX, entry);
163 			write_aux_reg(ARC_REG_TLBCOMMAND, TLBWriteNI);
164 		}
165 	}
166 
167 	utlb_invalidate();
168 
169 	local_irq_restore(flags);
170 }
171 
172 /*
173  * Flush the entire MM for userland. The fastest way is to move to Next ASID
174  */
175 noinline void local_flush_tlb_mm(struct mm_struct *mm)
176 {
177 	/*
178 	 * Small optimisation courtesy IA64
179 	 * flush_mm called during fork,exit,munmap etc, multiple times as well.
180 	 * Only for fork( ) do we need to move parent to a new MMU ctxt,
181 	 * all other cases are NOPs, hence this check.
182 	 */
183 	if (atomic_read(&mm->mm_users) == 0)
184 		return;
185 
186 	/*
187 	 * - Move to a new ASID, but only if the mm is still wired in
188 	 *   (Android Binder ended up calling this for vma->mm != tsk->mm,
189 	 *    causing h/w - s/w ASID to get out of sync)
190 	 * - Also get_new_mmu_context() new implementation allocates a new
191 	 *   ASID only if it is not allocated already - so unallocate first
192 	 */
193 	destroy_context(mm);
194 	if (current->mm == mm)
195 		get_new_mmu_context(mm);
196 }
197 
198 /*
199  * Flush a Range of TLB entries for userland.
200  * @start is inclusive, while @end is exclusive
201  * Difference between this and Kernel Range Flush is
202  *  -Here the fastest way (if range is too large) is to move to next ASID
203  *      without doing any explicit Shootdown
204  *  -In case of kernel Flush, entry has to be shot down explicitly
205  */
206 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
207 			   unsigned long end)
208 {
209 	const unsigned int cpu = smp_processor_id();
210 	unsigned long flags;
211 
212 	/* If range @start to @end is more than 32 TLB entries deep,
213 	 * its better to move to a new ASID rather than searching for
214 	 * individual entries and then shooting them down
215 	 *
216 	 * The calc above is rough, doesn't account for unaligned parts,
217 	 * since this is heuristics based anyways
218 	 */
219 	if (unlikely((end - start) >= PAGE_SIZE * 32)) {
220 		local_flush_tlb_mm(vma->vm_mm);
221 		return;
222 	}
223 
224 	/*
225 	 * @start moved to page start: this alone suffices for checking
226 	 * loop end condition below, w/o need for aligning @end to end
227 	 * e.g. 2000 to 4001 will anyhow loop twice
228 	 */
229 	start &= PAGE_MASK;
230 
231 	local_irq_save(flags);
232 
233 	if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
234 		while (start < end) {
235 			tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu));
236 			start += PAGE_SIZE;
237 		}
238 	}
239 
240 	local_irq_restore(flags);
241 }
242 
243 /* Flush the kernel TLB entries - vmalloc/modules (Global from MMU perspective)
244  *  @start, @end interpreted as kvaddr
245  * Interestingly, shared TLB entries can also be flushed using just
246  * @start,@end alone (interpreted as user vaddr), although technically SASID
247  * is also needed. However our smart TLbProbe lookup takes care of that.
248  */
249 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
250 {
251 	unsigned long flags;
252 
253 	/* exactly same as above, except for TLB entry not taking ASID */
254 
255 	if (unlikely((end - start) >= PAGE_SIZE * 32)) {
256 		local_flush_tlb_all();
257 		return;
258 	}
259 
260 	start &= PAGE_MASK;
261 
262 	local_irq_save(flags);
263 	while (start < end) {
264 		tlb_entry_erase(start);
265 		start += PAGE_SIZE;
266 	}
267 
268 	local_irq_restore(flags);
269 }
270 
271 /*
272  * Delete TLB entry in MMU for a given page (??? address)
273  * NOTE One TLB entry contains translation for single PAGE
274  */
275 
276 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
277 {
278 	const unsigned int cpu = smp_processor_id();
279 	unsigned long flags;
280 
281 	/* Note that it is critical that interrupts are DISABLED between
282 	 * checking the ASID and using it flush the TLB entry
283 	 */
284 	local_irq_save(flags);
285 
286 	if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
287 		tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu));
288 	}
289 
290 	local_irq_restore(flags);
291 }
292 
293 #ifdef CONFIG_SMP
294 
295 struct tlb_args {
296 	struct vm_area_struct *ta_vma;
297 	unsigned long ta_start;
298 	unsigned long ta_end;
299 };
300 
301 static inline void ipi_flush_tlb_page(void *arg)
302 {
303 	struct tlb_args *ta = arg;
304 
305 	local_flush_tlb_page(ta->ta_vma, ta->ta_start);
306 }
307 
308 static inline void ipi_flush_tlb_range(void *arg)
309 {
310 	struct tlb_args *ta = arg;
311 
312 	local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
313 }
314 
315 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
316 static inline void ipi_flush_pmd_tlb_range(void *arg)
317 {
318 	struct tlb_args *ta = arg;
319 
320 	local_flush_pmd_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
321 }
322 #endif
323 
324 static inline void ipi_flush_tlb_kernel_range(void *arg)
325 {
326 	struct tlb_args *ta = (struct tlb_args *)arg;
327 
328 	local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
329 }
330 
331 void flush_tlb_all(void)
332 {
333 	on_each_cpu((smp_call_func_t)local_flush_tlb_all, NULL, 1);
334 }
335 
336 void flush_tlb_mm(struct mm_struct *mm)
337 {
338 	on_each_cpu_mask(mm_cpumask(mm), (smp_call_func_t)local_flush_tlb_mm,
339 			 mm, 1);
340 }
341 
342 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
343 {
344 	struct tlb_args ta = {
345 		.ta_vma = vma,
346 		.ta_start = uaddr
347 	};
348 
349 	on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1);
350 }
351 
352 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
353 		     unsigned long end)
354 {
355 	struct tlb_args ta = {
356 		.ta_vma = vma,
357 		.ta_start = start,
358 		.ta_end = end
359 	};
360 
361 	on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1);
362 }
363 
364 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
365 void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
366 			 unsigned long end)
367 {
368 	struct tlb_args ta = {
369 		.ta_vma = vma,
370 		.ta_start = start,
371 		.ta_end = end
372 	};
373 
374 	on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_pmd_tlb_range, &ta, 1);
375 }
376 #endif
377 
378 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
379 {
380 	struct tlb_args ta = {
381 		.ta_start = start,
382 		.ta_end = end
383 	};
384 
385 	on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
386 }
387 #endif
388 
389 /*
390  * Routine to create a TLB entry
391  */
392 void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep)
393 {
394 	unsigned long flags;
395 	unsigned int asid_or_sasid, rwx;
396 	unsigned long pd0;
397 	phys_addr_t pd1;
398 
399 	/*
400 	 * create_tlb() assumes that current->mm == vma->mm, since
401 	 * -it ASID for TLB entry is fetched from MMU ASID reg (valid for curr)
402 	 * -completes the lazy write to SASID reg (again valid for curr tsk)
403 	 *
404 	 * Removing the assumption involves
405 	 * -Using vma->mm->context{ASID,SASID}, as opposed to MMU reg.
406 	 * -More importantly it makes this handler inconsistent with fast-path
407 	 *  TLB Refill handler which always deals with "current"
408 	 *
409 	 * Lets see the use cases when current->mm != vma->mm and we land here
410 	 *  1. execve->copy_strings()->__get_user_pages->handle_mm_fault
411 	 *     Here VM wants to pre-install a TLB entry for user stack while
412 	 *     current->mm still points to pre-execve mm (hence the condition).
413 	 *     However the stack vaddr is soon relocated (randomization) and
414 	 *     move_page_tables() tries to undo that TLB entry.
415 	 *     Thus not creating TLB entry is not any worse.
416 	 *
417 	 *  2. ptrace(POKETEXT) causes a CoW - debugger(current) inserting a
418 	 *     breakpoint in debugged task. Not creating a TLB now is not
419 	 *     performance critical.
420 	 *
421 	 * Both the cases above are not good enough for code churn.
422 	 */
423 	if (current->active_mm != vma->vm_mm)
424 		return;
425 
426 	local_irq_save(flags);
427 
428 	vaddr &= PAGE_MASK;
429 
430 	/* update this PTE credentials */
431 	pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED);
432 
433 	/* Create HW TLB(PD0,PD1) from PTE  */
434 
435 	/* ASID for this task */
436 	asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff;
437 
438 	pd0 = vaddr | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0);
439 
440 	/*
441 	 * ARC MMU provides fully orthogonal access bits for K/U mode,
442 	 * however Linux only saves 1 set to save PTE real-estate
443 	 * Here we convert 3 PTE bits into 6 MMU bits:
444 	 * -Kernel only entries have Kr Kw Kx 0 0 0
445 	 * -User entries have mirrored K and U bits
446 	 */
447 	rwx = pte_val(*ptep) & PTE_BITS_RWX;
448 
449 	if (pte_val(*ptep) & _PAGE_GLOBAL)
450 		rwx <<= 3;		/* r w x => Kr Kw Kx 0 0 0 */
451 	else
452 		rwx |= (rwx << 3);	/* r w x => Kr Kw Kx Ur Uw Ux */
453 
454 	pd1 = rwx | (pte_val(*ptep) & PTE_BITS_NON_RWX_IN_PD1);
455 
456 	tlb_entry_insert(pd0, pd1);
457 
458 	local_irq_restore(flags);
459 }
460 
461 /*
462  * Called at the end of pagefault, for a userspace mapped page
463  *  -pre-install the corresponding TLB entry into MMU
464  *  -Finalize the delayed D-cache flush of kernel mapping of page due to
465  *  	flush_dcache_page(), copy_user_page()
466  *
467  * Note that flush (when done) involves both WBACK - so physical page is
468  * in sync as well as INV - so any non-congruent aliases don't remain
469  */
470 void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
471 		unsigned long vaddr_unaligned, pte_t *ptep, unsigned int nr)
472 {
473 	unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
474 	phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK_PHYS;
475 	struct page *page = pfn_to_page(pte_pfn(*ptep));
476 
477 	create_tlb(vma, vaddr, ptep);
478 
479 	if (page == ZERO_PAGE(0)) {
480 		return;
481 	}
482 
483 	/*
484 	 * Exec page : Independent of aliasing/page-color considerations,
485 	 *	       since icache doesn't snoop dcache on ARC, any dirty
486 	 *	       K-mapping of a code page needs to be wback+inv so that
487 	 *	       icache fetch by userspace sees code correctly.
488 	 * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it
489 	 *	       so userspace sees the right data.
490 	 *  (Avoids the flush for Non-exec + congruent mapping case)
491 	 */
492 	if ((vma->vm_flags & VM_EXEC) ||
493 	     addr_not_cache_congruent(paddr, vaddr)) {
494 		struct folio *folio = page_folio(page);
495 		int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags);
496 		if (dirty) {
497 			unsigned long offset = offset_in_folio(folio, paddr);
498 			nr = folio_nr_pages(folio);
499 			paddr -= offset;
500 			vaddr -= offset;
501 			/* wback + inv dcache lines (K-mapping) */
502 			__flush_dcache_pages(paddr, paddr, nr);
503 
504 			/* invalidate any existing icache lines (U-mapping) */
505 			if (vma->vm_flags & VM_EXEC)
506 				__inv_icache_pages(paddr, vaddr, nr);
507 		}
508 	}
509 }
510 
511 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
512 
513 /*
514  * MMUv4 in HS38x cores supports Super Pages which are basis for Linux THP
515  * support.
516  *
517  * Normal and Super pages can co-exist (ofcourse not overlap) in TLB with a
518  * new bit "SZ" in TLB page descriptor to distinguish between them.
519  * Super Page size is configurable in hardware (4K to 16M), but fixed once
520  * RTL builds.
521  *
522  * The exact THP size a Linux configuration will support is a function of:
523  *  - MMU page size (typical 8K, RTL fixed)
524  *  - software page walker address split between PGD:PTE:PFN (typical
525  *    11:8:13, but can be changed with 1 line)
526  * So for above default, THP size supported is 8K * (2^8) = 2M
527  *
528  * Default Page Walker is 2 levels, PGD:PTE:PFN, which in THP regime
529  * reduces to 1 level (as PTE is folded into PGD and canonically referred
530  * to as PMD).
531  * Thus THP PMD accessors are implemented in terms of PTE (just like sparc)
532  */
533 
534 void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
535 				 pmd_t *pmd)
536 {
537 	pte_t pte = __pte(pmd_val(*pmd));
538 	update_mmu_cache_range(NULL, vma, addr, &pte, HPAGE_PMD_NR);
539 }
540 
541 void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
542 			       unsigned long end)
543 {
544 	unsigned int cpu;
545 	unsigned long flags;
546 
547 	local_irq_save(flags);
548 
549 	cpu = smp_processor_id();
550 
551 	if (likely(asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID)) {
552 		unsigned int asid = hw_pid(vma->vm_mm, cpu);
553 
554 		/* No need to loop here: this will always be for 1 Huge Page */
555 		tlb_entry_erase(start | _PAGE_HW_SZ | asid);
556 	}
557 
558 	local_irq_restore(flags);
559 }
560 
561 #endif
562 
563 /* Read the Cache Build Configuration Registers, Decode them and save into
564  * the cpuinfo structure for later use.
565  * No Validation is done here, simply read/convert the BCRs
566  */
567 void read_decode_mmu_bcr(void)
568 {
569 	struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
570 	unsigned int tmp;
571 	struct bcr_mmu_3 {
572 #ifdef CONFIG_CPU_BIG_ENDIAN
573 	unsigned int ver:8, ways:4, sets:4, res:3, sasid:1, pg_sz:4,
574 		     u_itlb:4, u_dtlb:4;
575 #else
576 	unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, sasid:1, res:3, sets:4,
577 		     ways:4, ver:8;
578 #endif
579 	} *mmu3;
580 
581 	struct bcr_mmu_4 {
582 #ifdef CONFIG_CPU_BIG_ENDIAN
583 	unsigned int ver:8, sasid:1, sz1:4, sz0:4, res:2, pae:1,
584 		     n_ways:2, n_entry:2, n_super:2, u_itlb:3, u_dtlb:3;
585 #else
586 	/*           DTLB      ITLB      JES        JE         JA      */
587 	unsigned int u_dtlb:3, u_itlb:3, n_super:2, n_entry:2, n_ways:2,
588 		     pae:1, res:2, sz0:4, sz1:4, sasid:1, ver:8;
589 #endif
590 	} *mmu4;
591 
592 	tmp = read_aux_reg(ARC_REG_MMU_BCR);
593 	mmu->ver = (tmp >> 24);
594 
595 	if (is_isa_arcompact() && mmu->ver == 3) {
596 		mmu3 = (struct bcr_mmu_3 *)&tmp;
597 		mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1);
598 		mmu->sets = 1 << mmu3->sets;
599 		mmu->ways = 1 << mmu3->ways;
600 		mmu->u_dtlb = mmu3->u_dtlb;
601 		mmu->u_itlb = mmu3->u_itlb;
602 		mmu->sasid = mmu3->sasid;
603 	} else {
604 		mmu4 = (struct bcr_mmu_4 *)&tmp;
605 		mmu->pg_sz_k = 1 << (mmu4->sz0 - 1);
606 		mmu->s_pg_sz_m = 1 << (mmu4->sz1 - 11);
607 		mmu->sets = 64 << mmu4->n_entry;
608 		mmu->ways = mmu4->n_ways * 2;
609 		mmu->u_dtlb = mmu4->u_dtlb * 4;
610 		mmu->u_itlb = mmu4->u_itlb * 4;
611 		mmu->sasid = mmu4->sasid;
612 		pae_exists = mmu->pae = mmu4->pae;
613 	}
614 }
615 
616 char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
617 {
618 	int n = 0;
619 	struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[cpu_id].mmu;
620 	char super_pg[64] = "";
621 
622 	if (p_mmu->s_pg_sz_m)
623 		scnprintf(super_pg, 64, "%dM Super Page %s",
624 			  p_mmu->s_pg_sz_m,
625 			  IS_USED_CFG(CONFIG_TRANSPARENT_HUGEPAGE));
626 
627 	n += scnprintf(buf + n, len - n,
628 		      "MMU [v%x]\t: %dk PAGE, %s, swalk %d lvl, JTLB %d (%dx%d), uDTLB %d, uITLB %d%s%s\n",
629 		       p_mmu->ver, p_mmu->pg_sz_k, super_pg,  CONFIG_PGTABLE_LEVELS,
630 		       p_mmu->sets * p_mmu->ways, p_mmu->sets, p_mmu->ways,
631 		       p_mmu->u_dtlb, p_mmu->u_itlb,
632 		       IS_AVAIL2(p_mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40));
633 
634 	return buf;
635 }
636 
637 int pae40_exist_but_not_enab(void)
638 {
639 	return pae_exists && !is_pae40_enabled();
640 }
641 
642 void arc_mmu_init(void)
643 {
644 	struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
645 	char str[256];
646 	int compat = 0;
647 
648 	pr_info("%s", arc_mmu_mumbojumbo(0, str, sizeof(str)));
649 
650 	/*
651 	 * Can't be done in processor.h due to header include dependencies
652 	 */
653 	BUILD_BUG_ON(!IS_ALIGNED((CONFIG_ARC_KVADDR_SIZE << 20), PMD_SIZE));
654 
655 	/*
656 	 * stack top size sanity check,
657 	 * Can't be done in processor.h due to header include dependencies
658 	 */
659 	BUILD_BUG_ON(!IS_ALIGNED(STACK_TOP, PMD_SIZE));
660 
661 	/*
662 	 * Ensure that MMU features assumed by kernel exist in hardware.
663 	 *  - For older ARC700 cpus, only v3 supported
664 	 *  - For HS cpus, v4 was baseline and v5 is backwards compatible
665 	 *    (will run older software).
666 	 */
667 	if (is_isa_arcompact() && mmu->ver == 3)
668 		compat = 1;
669 	else if (is_isa_arcv2() && mmu->ver >= 4)
670 		compat = 1;
671 
672 	if (!compat)
673 		panic("MMU ver %d doesn't match kernel built for\n", mmu->ver);
674 
675 	if (mmu->pg_sz_k != TO_KB(PAGE_SIZE))
676 		panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE));
677 
678 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
679 	    mmu->s_pg_sz_m != TO_MB(HPAGE_PMD_SIZE))
680 		panic("MMU Super pg size != Linux HPAGE_PMD_SIZE (%luM)\n",
681 		      (unsigned long)TO_MB(HPAGE_PMD_SIZE));
682 
683 	if (IS_ENABLED(CONFIG_ARC_HAS_PAE40) && !mmu->pae)
684 		panic("Hardware doesn't support PAE40\n");
685 
686 	/* Enable the MMU with ASID 0 */
687 	mmu_setup_asid(NULL, 0);
688 
689 	/* cache the pgd pointer in MMU SCRATCH reg (ARCv2 only) */
690 	mmu_setup_pgd(NULL, swapper_pg_dir);
691 
692 	if (pae40_exist_but_not_enab())
693 		write_aux_reg(ARC_REG_TLBPD1HI, 0);
694 }
695 
696 /*
697  * TLB Programmer's Model uses Linear Indexes: 0 to {255, 511} for 128 x {2,4}
698  * The mapping is Column-first.
699  *		---------------------	-----------
700  *		|way0|way1|way2|way3|	|way0|way1|
701  *		---------------------	-----------
702  * [set0]	|  0 |  1 |  2 |  3 |	|  0 |  1 |
703  * [set1]	|  4 |  5 |  6 |  7 |	|  2 |  3 |
704  *		~		    ~	~	  ~
705  * [set127]	| 508| 509| 510| 511|	| 254| 255|
706  *		---------------------	-----------
707  * For normal operations we don't(must not) care how above works since
708  * MMU cmd getIndex(vaddr) abstracts that out.
709  * However for walking WAYS of a SET, we need to know this
710  */
711 #define SET_WAY_TO_IDX(mmu, set, way)  ((set) * mmu->ways + (way))
712 
713 /* Handling of Duplicate PD (TLB entry) in MMU.
714  * -Could be due to buggy customer tapeouts or obscure kernel bugs
715  * -MMU complaints not at the time of duplicate PD installation, but at the
716  *      time of lookup matching multiple ways.
717  * -Ideally these should never happen - but if they do - workaround by deleting
718  *      the duplicate one.
719  * -Knob to be verbose abt it.(TODO: hook them up to debugfs)
720  */
721 volatile int dup_pd_silent; /* Be silent abt it or complain (default) */
722 
723 void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
724 			  struct pt_regs *regs)
725 {
726 	struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
727 	unsigned long flags;
728 	int set, n_ways = mmu->ways;
729 
730 	n_ways = min(n_ways, 4);
731 	BUG_ON(mmu->ways > 4);
732 
733 	local_irq_save(flags);
734 
735 	/* loop thru all sets of TLB */
736 	for (set = 0; set < mmu->sets; set++) {
737 
738 		int is_valid, way;
739 		unsigned int pd0[4];
740 
741 		/* read out all the ways of current set */
742 		for (way = 0, is_valid = 0; way < n_ways; way++) {
743 			write_aux_reg(ARC_REG_TLBINDEX,
744 					  SET_WAY_TO_IDX(mmu, set, way));
745 			write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead);
746 			pd0[way] = read_aux_reg(ARC_REG_TLBPD0);
747 			is_valid |= pd0[way] & _PAGE_PRESENT;
748 			pd0[way] &= PAGE_MASK;
749 		}
750 
751 		/* If all the WAYS in SET are empty, skip to next SET */
752 		if (!is_valid)
753 			continue;
754 
755 		/* Scan the set for duplicate ways: needs a nested loop */
756 		for (way = 0; way < n_ways - 1; way++) {
757 
758 			int n;
759 
760 			if (!pd0[way])
761 				continue;
762 
763 			for (n = way + 1; n < n_ways; n++) {
764 				if (pd0[way] != pd0[n])
765 					continue;
766 
767 				if (!dup_pd_silent)
768 					pr_info("Dup TLB PD0 %08x @ set %d ways %d,%d\n",
769 						pd0[way], set, way, n);
770 
771 				/*
772 				 * clear entry @way and not @n.
773 				 * This is critical to our optimised loop
774 				 */
775 				pd0[way] = 0;
776 				write_aux_reg(ARC_REG_TLBINDEX,
777 						SET_WAY_TO_IDX(mmu, set, way));
778 				__tlb_entry_erase();
779 			}
780 		}
781 	}
782 
783 	local_irq_restore(flags);
784 }
785