xref: /linux/arch/loongarch/mm/cache.c (revision 4359a011e259a4608afc7fb3635370c9d4ba5943)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4  *
5  * Derived from MIPS:
6  * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
7  * Copyright (C) 2007 MIPS Technologies, Inc.
8  */
9 #include <linux/export.h>
10 #include <linux/fcntl.h>
11 #include <linux/fs.h>
12 #include <linux/highmem.h>
13 #include <linux/kernel.h>
14 #include <linux/linkage.h>
15 #include <linux/mm.h>
16 #include <linux/sched.h>
17 #include <linux/syscalls.h>
18 
19 #include <asm/cacheflush.h>
20 #include <asm/cpu.h>
21 #include <asm/cpu-features.h>
22 #include <asm/dma.h>
23 #include <asm/loongarch.h>
24 #include <asm/processor.h>
25 #include <asm/setup.h>
26 
27 /*
28  * LoongArch maintains ICache/DCache coherency by hardware,
29  * we just need "ibar" to avoid instruction hazard here.
30  */
31 void local_flush_icache_range(unsigned long start, unsigned long end)
32 {
33 	asm volatile ("\tibar 0\n"::);
34 }
35 EXPORT_SYMBOL(local_flush_icache_range);
36 
37 void cache_error_setup(void)
38 {
39 	extern char __weak except_vec_cex;
40 	set_merr_handler(0x0, &except_vec_cex, 0x80);
41 }
42 
43 static unsigned long icache_size __read_mostly;
44 static unsigned long dcache_size __read_mostly;
45 static unsigned long vcache_size __read_mostly;
46 static unsigned long scache_size __read_mostly;
47 
48 static char *way_string[] = { NULL, "direct mapped", "2-way",
49 	"3-way", "4-way", "5-way", "6-way", "7-way", "8-way",
50 	"9-way", "10-way", "11-way", "12-way",
51 	"13-way", "14-way", "15-way", "16-way",
52 };
53 
54 static void probe_pcache(void)
55 {
56 	struct cpuinfo_loongarch *c = &current_cpu_data;
57 	unsigned int lsize, sets, ways;
58 	unsigned int config;
59 
60 	config = read_cpucfg(LOONGARCH_CPUCFG17);
61 	lsize = 1 << ((config & CPUCFG17_L1I_SIZE_M) >> CPUCFG17_L1I_SIZE);
62 	sets  = 1 << ((config & CPUCFG17_L1I_SETS_M) >> CPUCFG17_L1I_SETS);
63 	ways  = ((config & CPUCFG17_L1I_WAYS_M) >> CPUCFG17_L1I_WAYS) + 1;
64 
65 	c->icache.linesz = lsize;
66 	c->icache.sets = sets;
67 	c->icache.ways = ways;
68 	icache_size = sets * ways * lsize;
69 	c->icache.waysize = icache_size / c->icache.ways;
70 
71 	config = read_cpucfg(LOONGARCH_CPUCFG18);
72 	lsize = 1 << ((config & CPUCFG18_L1D_SIZE_M) >> CPUCFG18_L1D_SIZE);
73 	sets  = 1 << ((config & CPUCFG18_L1D_SETS_M) >> CPUCFG18_L1D_SETS);
74 	ways  = ((config & CPUCFG18_L1D_WAYS_M) >> CPUCFG18_L1D_WAYS) + 1;
75 
76 	c->dcache.linesz = lsize;
77 	c->dcache.sets = sets;
78 	c->dcache.ways = ways;
79 	dcache_size = sets * ways * lsize;
80 	c->dcache.waysize = dcache_size / c->dcache.ways;
81 
82 	c->options |= LOONGARCH_CPU_PREFETCH;
83 
84 	pr_info("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
85 		icache_size >> 10, way_string[c->icache.ways], "VIPT", c->icache.linesz);
86 
87 	pr_info("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
88 		dcache_size >> 10, way_string[c->dcache.ways], "VIPT", "no aliases", c->dcache.linesz);
89 }
90 
91 static void probe_vcache(void)
92 {
93 	struct cpuinfo_loongarch *c = &current_cpu_data;
94 	unsigned int lsize, sets, ways;
95 	unsigned int config;
96 
97 	config = read_cpucfg(LOONGARCH_CPUCFG19);
98 	lsize = 1 << ((config & CPUCFG19_L2_SIZE_M) >> CPUCFG19_L2_SIZE);
99 	sets  = 1 << ((config & CPUCFG19_L2_SETS_M) >> CPUCFG19_L2_SETS);
100 	ways  = ((config & CPUCFG19_L2_WAYS_M) >> CPUCFG19_L2_WAYS) + 1;
101 
102 	c->vcache.linesz = lsize;
103 	c->vcache.sets = sets;
104 	c->vcache.ways = ways;
105 	vcache_size = lsize * sets * ways;
106 	c->vcache.waysize = vcache_size / c->vcache.ways;
107 
108 	pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n",
109 		vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz);
110 }
111 
112 static void probe_scache(void)
113 {
114 	struct cpuinfo_loongarch *c = &current_cpu_data;
115 	unsigned int lsize, sets, ways;
116 	unsigned int config;
117 
118 	config = read_cpucfg(LOONGARCH_CPUCFG20);
119 	lsize = 1 << ((config & CPUCFG20_L3_SIZE_M) >> CPUCFG20_L3_SIZE);
120 	sets  = 1 << ((config & CPUCFG20_L3_SETS_M) >> CPUCFG20_L3_SETS);
121 	ways  = ((config & CPUCFG20_L3_WAYS_M) >> CPUCFG20_L3_WAYS) + 1;
122 
123 	c->scache.linesz = lsize;
124 	c->scache.sets = sets;
125 	c->scache.ways = ways;
126 	/* 4 cores. scaches are shared */
127 	scache_size = lsize * sets * ways;
128 	c->scache.waysize = scache_size / c->scache.ways;
129 
130 	pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
131 		scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
132 }
133 
134 void cpu_cache_init(void)
135 {
136 	probe_pcache();
137 	probe_vcache();
138 	probe_scache();
139 
140 	shm_align_mask = PAGE_SIZE - 1;
141 }
142 
143 static const pgprot_t protection_map[16] = {
144 	[VM_NONE]					= __pgprot(_CACHE_CC | _PAGE_USER |
145 								   _PAGE_PROTNONE | _PAGE_NO_EXEC |
146 								   _PAGE_NO_READ),
147 	[VM_READ]					= __pgprot(_CACHE_CC | _PAGE_VALID |
148 								   _PAGE_USER | _PAGE_PRESENT |
149 								   _PAGE_NO_EXEC),
150 	[VM_WRITE]					= __pgprot(_CACHE_CC | _PAGE_VALID |
151 								   _PAGE_USER | _PAGE_PRESENT |
152 								   _PAGE_NO_EXEC),
153 	[VM_WRITE | VM_READ]				= __pgprot(_CACHE_CC | _PAGE_VALID |
154 								   _PAGE_USER | _PAGE_PRESENT |
155 								   _PAGE_NO_EXEC),
156 	[VM_EXEC]					= __pgprot(_CACHE_CC | _PAGE_VALID |
157 								   _PAGE_USER | _PAGE_PRESENT),
158 	[VM_EXEC | VM_READ]				= __pgprot(_CACHE_CC | _PAGE_VALID |
159 								   _PAGE_USER | _PAGE_PRESENT),
160 	[VM_EXEC | VM_WRITE]				= __pgprot(_CACHE_CC | _PAGE_VALID |
161 								   _PAGE_USER | _PAGE_PRESENT),
162 	[VM_EXEC | VM_WRITE | VM_READ]			= __pgprot(_CACHE_CC | _PAGE_VALID |
163 								   _PAGE_USER | _PAGE_PRESENT),
164 	[VM_SHARED]					= __pgprot(_CACHE_CC | _PAGE_USER |
165 								   _PAGE_PROTNONE | _PAGE_NO_EXEC |
166 								   _PAGE_NO_READ),
167 	[VM_SHARED | VM_READ]				= __pgprot(_CACHE_CC | _PAGE_VALID |
168 								   _PAGE_USER | _PAGE_PRESENT |
169 								   _PAGE_NO_EXEC),
170 	[VM_SHARED | VM_WRITE]				= __pgprot(_CACHE_CC | _PAGE_VALID |
171 								   _PAGE_USER | _PAGE_PRESENT |
172 								   _PAGE_NO_EXEC | _PAGE_WRITE),
173 	[VM_SHARED | VM_WRITE | VM_READ]		= __pgprot(_CACHE_CC | _PAGE_VALID |
174 								   _PAGE_USER | _PAGE_PRESENT |
175 								   _PAGE_NO_EXEC | _PAGE_WRITE),
176 	[VM_SHARED | VM_EXEC]				= __pgprot(_CACHE_CC | _PAGE_VALID |
177 								   _PAGE_USER | _PAGE_PRESENT),
178 	[VM_SHARED | VM_EXEC | VM_READ]			= __pgprot(_CACHE_CC | _PAGE_VALID |
179 								   _PAGE_USER | _PAGE_PRESENT),
180 	[VM_SHARED | VM_EXEC | VM_WRITE]		= __pgprot(_CACHE_CC | _PAGE_VALID |
181 								   _PAGE_USER | _PAGE_PRESENT |
182 								   _PAGE_WRITE),
183 	[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]	= __pgprot(_CACHE_CC | _PAGE_VALID |
184 								   _PAGE_USER | _PAGE_PRESENT |
185 								   _PAGE_WRITE)
186 };
187 DECLARE_VM_GET_PAGE_PROT
188