1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/arch/sparc/mm/leon_m.c
4 *
5 * Copyright (C) 2004 Konrad Eisele (eiselekd@web.de, konrad@gaisler.com) Gaisler Research
6 * Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB
7 * Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB
8 *
9 * do srmmu probe in software
10 *
11 */
12
13 #include <linux/kernel.h>
14 #include <linux/mm.h>
15 #include <asm/asi.h>
16 #include <asm/leon.h>
17 #include <asm/tlbflush.h>
18
19 #include "mm_32.h"
20
21 int leon_flush_during_switch = 1;
22 static int srmmu_swprobe_trace;
23
leon_get_ctable_ptr(void)24 static inline unsigned long leon_get_ctable_ptr(void)
25 {
26 unsigned int retval;
27
28 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
29 "=r" (retval) :
30 "r" (SRMMU_CTXTBL_PTR),
31 "i" (ASI_LEON_MMUREGS));
32 return (retval & SRMMU_CTX_PMASK) << 4;
33 }
34
35
leon_swprobe(unsigned long vaddr,unsigned long * paddr)36 unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr)
37 {
38
39 unsigned int ctxtbl;
40 unsigned int pgd, pmd, ped;
41 unsigned int ptr;
42 unsigned int lvl, pte;
43 unsigned int ctx;
44 unsigned int paddr_calc;
45
46 if (srmmu_swprobe_trace)
47 printk(KERN_INFO "swprobe: trace on\n");
48
49 ctxtbl = leon_get_ctable_ptr();
50 if (!(ctxtbl)) {
51 if (srmmu_swprobe_trace)
52 printk(KERN_INFO "swprobe: leon_get_ctable_ptr returned 0=>0\n");
53 return 0;
54 }
55 if (!_pfn_valid(PFN(ctxtbl))) {
56 if (srmmu_swprobe_trace)
57 printk(KERN_INFO
58 "swprobe: !_pfn_valid(%x)=>0\n",
59 PFN(ctxtbl));
60 return 0;
61 }
62
63 ctx = srmmu_get_context();
64 if (srmmu_swprobe_trace)
65 printk(KERN_INFO "swprobe: --- ctx (%x) ---\n", ctx);
66
67 pgd = LEON_BYPASS_LOAD_PA(ctxtbl + (ctx * 4));
68
69 if (((pgd & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
70 if (srmmu_swprobe_trace)
71 printk(KERN_INFO "swprobe: pgd is entry level 3\n");
72 lvl = 3;
73 pte = pgd;
74 goto ready;
75 }
76 if (((pgd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
77 if (srmmu_swprobe_trace)
78 printk(KERN_INFO "swprobe: pgd is invalid => 0\n");
79 return 0;
80 }
81
82 if (srmmu_swprobe_trace)
83 printk(KERN_INFO "swprobe: --- pgd (%x) ---\n", pgd);
84
85 ptr = (pgd & SRMMU_PTD_PMASK) << 4;
86 ptr += ((((vaddr) >> LEON_PGD_SH) & LEON_PGD_M) * 4);
87 if (!_pfn_valid(PFN(ptr)))
88 return 0;
89
90 pmd = LEON_BYPASS_LOAD_PA(ptr);
91 if (((pmd & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
92 if (srmmu_swprobe_trace)
93 printk(KERN_INFO "swprobe: pmd is entry level 2\n");
94 lvl = 2;
95 pte = pmd;
96 goto ready;
97 }
98 if (((pmd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
99 if (srmmu_swprobe_trace)
100 printk(KERN_INFO "swprobe: pmd is invalid => 0\n");
101 return 0;
102 }
103
104 if (srmmu_swprobe_trace)
105 printk(KERN_INFO "swprobe: --- pmd (%x) ---\n", pmd);
106
107 ptr = (pmd & SRMMU_PTD_PMASK) << 4;
108 ptr += (((vaddr >> LEON_PMD_SH) & LEON_PMD_M) * 4);
109 if (!_pfn_valid(PFN(ptr))) {
110 if (srmmu_swprobe_trace)
111 printk(KERN_INFO "swprobe: !_pfn_valid(%x)=>0\n",
112 PFN(ptr));
113 return 0;
114 }
115
116 ped = LEON_BYPASS_LOAD_PA(ptr);
117
118 if (((ped & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
119 if (srmmu_swprobe_trace)
120 printk(KERN_INFO "swprobe: ped is entry level 1\n");
121 lvl = 1;
122 pte = ped;
123 goto ready;
124 }
125 if (((ped & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
126 if (srmmu_swprobe_trace)
127 printk(KERN_INFO "swprobe: ped is invalid => 0\n");
128 return 0;
129 }
130
131 if (srmmu_swprobe_trace)
132 printk(KERN_INFO "swprobe: --- ped (%x) ---\n", ped);
133
134 ptr = (ped & SRMMU_PTD_PMASK) << 4;
135 ptr += (((vaddr >> LEON_PTE_SH) & LEON_PTE_M) * 4);
136 if (!_pfn_valid(PFN(ptr)))
137 return 0;
138
139 ptr = LEON_BYPASS_LOAD_PA(ptr);
140 if (((ptr & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
141 if (srmmu_swprobe_trace)
142 printk(KERN_INFO "swprobe: ptr is entry level 0\n");
143 lvl = 0;
144 pte = ptr;
145 goto ready;
146 }
147 if (srmmu_swprobe_trace)
148 printk(KERN_INFO "swprobe: ptr is invalid => 0\n");
149 return 0;
150
151 ready:
152 switch (lvl) {
153 case 0:
154 paddr_calc =
155 (vaddr & ~(-1 << LEON_PTE_SH)) | ((pte & ~0xff) << 4);
156 break;
157 case 1:
158 paddr_calc =
159 (vaddr & ~(-1 << LEON_PMD_SH)) | ((pte & ~0xff) << 4);
160 break;
161 case 2:
162 paddr_calc =
163 (vaddr & ~(-1 << LEON_PGD_SH)) | ((pte & ~0xff) << 4);
164 break;
165 default:
166 case 3:
167 paddr_calc = vaddr;
168 break;
169 }
170 if (srmmu_swprobe_trace)
171 printk(KERN_INFO "swprobe: padde %x\n", paddr_calc);
172 if (paddr)
173 *paddr = paddr_calc;
174 return pte;
175 }
176
leon_flush_icache_all(void)177 void leon_flush_icache_all(void)
178 {
179 __asm__ __volatile__(" flush "); /*iflush*/
180 }
181
leon_flush_dcache_all(void)182 void leon_flush_dcache_all(void)
183 {
184 __asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : :
185 "i"(ASI_LEON_DFLUSH) : "memory");
186 }
187
leon_flush_pcache_all(struct vm_area_struct * vma,unsigned long page)188 void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page)
189 {
190 if (vma->vm_flags & VM_EXEC)
191 leon_flush_icache_all();
192 leon_flush_dcache_all();
193 }
194
leon_flush_cache_all(void)195 void leon_flush_cache_all(void)
196 {
197 __asm__ __volatile__(" flush "); /*iflush*/
198 __asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : :
199 "i"(ASI_LEON_DFLUSH) : "memory");
200 }
201
leon_flush_tlb_all(void)202 void leon_flush_tlb_all(void)
203 {
204 leon_flush_cache_all();
205 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : "r"(0x400),
206 "i"(ASI_LEON_MMUFLUSH) : "memory");
207 }
208
209 /* get all cache regs */
leon3_getCacheRegs(struct leon3_cacheregs * regs)210 void leon3_getCacheRegs(struct leon3_cacheregs *regs)
211 {
212 unsigned long ccr, iccr, dccr;
213
214 if (!regs)
215 return;
216 /* Get Cache regs from "Cache ASI" address 0x0, 0x8 and 0xC */
217 __asm__ __volatile__("lda [%%g0] %3, %0\n\t"
218 "mov 0x08, %%g1\n\t"
219 "lda [%%g1] %3, %1\n\t"
220 "mov 0x0c, %%g1\n\t"
221 "lda [%%g1] %3, %2\n\t"
222 : "=r"(ccr), "=r"(iccr), "=r"(dccr)
223 /* output */
224 : "i"(ASI_LEON_CACHEREGS) /* input */
225 : "g1" /* clobber list */
226 );
227 regs->ccr = ccr;
228 regs->iccr = iccr;
229 regs->dccr = dccr;
230 }
231
232 /* Due to virtual cache we need to check cache configuration if
233 * it is possible to skip flushing in some cases.
234 *
235 * Leon2 and Leon3 differ in their way of telling cache information
236 *
237 */
leon_flush_needed(void)238 int __init leon_flush_needed(void)
239 {
240 int flush_needed = -1;
241 unsigned int ssize, sets;
242 char *setStr[4] =
243 { "direct mapped", "2-way associative", "3-way associative",
244 "4-way associative"
245 };
246 /* leon 3 */
247 struct leon3_cacheregs cregs;
248 leon3_getCacheRegs(&cregs);
249 sets = (cregs.dccr & LEON3_XCCR_SETS_MASK) >> 24;
250 /* (ssize=>realsize) 0=>1k, 1=>2k, 2=>4k, 3=>8k ... */
251 ssize = 1 << ((cregs.dccr & LEON3_XCCR_SSIZE_MASK) >> 20);
252
253 printk(KERN_INFO "CACHE: %s cache, set size %dk\n",
254 sets > 3 ? "unknown" : setStr[sets], ssize);
255 if ((ssize <= (PAGE_SIZE / 1024)) && (sets == 0)) {
256 /* Set Size <= Page size ==>
257 flush on every context switch not needed. */
258 flush_needed = 0;
259 printk(KERN_INFO "CACHE: not flushing on every context switch\n");
260 }
261 return flush_needed;
262 }
263
leon_switch_mm(void)264 void leon_switch_mm(void)
265 {
266 flush_tlb_mm((void *)0);
267 if (leon_flush_during_switch)
268 leon_flush_cache_all();
269 }
270
leon_flush_cache_mm(struct mm_struct * mm)271 static void leon_flush_cache_mm(struct mm_struct *mm)
272 {
273 leon_flush_cache_all();
274 }
275
leon_flush_cache_page(struct vm_area_struct * vma,unsigned long page)276 static void leon_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
277 {
278 leon_flush_pcache_all(vma, page);
279 }
280
leon_flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)281 static void leon_flush_cache_range(struct vm_area_struct *vma,
282 unsigned long start,
283 unsigned long end)
284 {
285 leon_flush_cache_all();
286 }
287
leon_flush_tlb_mm(struct mm_struct * mm)288 static void leon_flush_tlb_mm(struct mm_struct *mm)
289 {
290 leon_flush_tlb_all();
291 }
292
leon_flush_tlb_page(struct vm_area_struct * vma,unsigned long page)293 static void leon_flush_tlb_page(struct vm_area_struct *vma,
294 unsigned long page)
295 {
296 leon_flush_tlb_all();
297 }
298
leon_flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)299 static void leon_flush_tlb_range(struct vm_area_struct *vma,
300 unsigned long start,
301 unsigned long end)
302 {
303 leon_flush_tlb_all();
304 }
305
leon_flush_page_to_ram(unsigned long page)306 static void leon_flush_page_to_ram(unsigned long page)
307 {
308 leon_flush_cache_all();
309 }
310
leon_flush_sig_insns(struct mm_struct * mm,unsigned long page)311 static void leon_flush_sig_insns(struct mm_struct *mm, unsigned long page)
312 {
313 leon_flush_cache_all();
314 }
315
leon_flush_page_for_dma(unsigned long page)316 static void leon_flush_page_for_dma(unsigned long page)
317 {
318 leon_flush_dcache_all();
319 }
320
poke_leonsparc(void)321 void __init poke_leonsparc(void)
322 {
323 }
324
325 static const struct sparc32_cachetlb_ops leon_ops = {
326 .cache_all = leon_flush_cache_all,
327 .cache_mm = leon_flush_cache_mm,
328 .cache_page = leon_flush_cache_page,
329 .cache_range = leon_flush_cache_range,
330 .tlb_all = leon_flush_tlb_all,
331 .tlb_mm = leon_flush_tlb_mm,
332 .tlb_page = leon_flush_tlb_page,
333 .tlb_range = leon_flush_tlb_range,
334 .page_to_ram = leon_flush_page_to_ram,
335 .sig_insns = leon_flush_sig_insns,
336 .page_for_dma = leon_flush_page_for_dma,
337 };
338
init_leon(void)339 void __init init_leon(void)
340 {
341 srmmu_name = "LEON";
342 sparc32_cachetlb_ops = &leon_ops;
343 poke_srmmu = poke_leonsparc;
344
345 leon_flush_during_switch = leon_flush_needed();
346 }
347