xref: /linux/arch/mips/mm/c-octeon.c (revision f11cbd74c5ff3614f6390b4de67a6ffdc614c378)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2005-2007 Cavium Networks
7  */
8 #include <linux/init.h>
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/smp.h>
12 #include <linux/mm.h>
13 #include <linux/bitops.h>
14 #include <linux/cpu.h>
15 #include <linux/io.h>
16 
17 #include <asm/bcache.h>
18 #include <asm/bootinfo.h>
19 #include <asm/cacheops.h>
20 #include <asm/cpu-features.h>
21 #include <asm/page.h>
22 #include <asm/pgtable.h>
23 #include <asm/r4kcache.h>
24 #include <asm/system.h>
25 #include <asm/mmu_context.h>
26 #include <asm/war.h>
27 
28 #include <asm/octeon/octeon.h>
29 
30 unsigned long long cache_err_dcache[NR_CPUS];
31 
32 /**
33  * Octeon automatically flushes the dcache on tlb changes, so
34  * from Linux's viewpoint it acts much like a physically
35  * tagged cache. No flushing is needed
36  *
37  */
38 static void octeon_flush_data_cache_page(unsigned long addr)
39 {
40     /* Nothing to do */
41 }
42 
43 static inline void octeon_local_flush_icache(void)
44 {
45 	asm volatile ("synci 0($0)");
46 }
47 
48 /*
49  * Flush local I-cache for the specified range.
50  */
51 static void local_octeon_flush_icache_range(unsigned long start,
52 					    unsigned long end)
53 {
54 	octeon_local_flush_icache();
55 }
56 
57 /**
58  * Flush caches as necessary for all cores affected by a
59  * vma. If no vma is supplied, all cores are flushed.
60  *
61  * @vma:    VMA to flush or NULL to flush all icaches.
62  */
63 static void octeon_flush_icache_all_cores(struct vm_area_struct *vma)
64 {
65 	extern void octeon_send_ipi_single(int cpu, unsigned int action);
66 #ifdef CONFIG_SMP
67 	int cpu;
68 	cpumask_t mask;
69 #endif
70 
71 	mb();
72 	octeon_local_flush_icache();
73 #ifdef CONFIG_SMP
74 	preempt_disable();
75 	cpu = smp_processor_id();
76 
77 	/*
78 	 * If we have a vma structure, we only need to worry about
79 	 * cores it has been used on
80 	 */
81 	if (vma)
82 		mask = *mm_cpumask(vma->vm_mm);
83 	else
84 		mask = cpu_online_map;
85 	cpu_clear(cpu, mask);
86 	for_each_cpu_mask(cpu, mask)
87 		octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH);
88 
89 	preempt_enable();
90 #endif
91 }
92 
93 
94 /**
95  * Called to flush the icache on all cores
96  */
97 static void octeon_flush_icache_all(void)
98 {
99 	octeon_flush_icache_all_cores(NULL);
100 }
101 
102 
103 /**
104  * Called to flush all memory associated with a memory
105  * context.
106  *
107  * @mm:     Memory context to flush
108  */
109 static void octeon_flush_cache_mm(struct mm_struct *mm)
110 {
111 	/*
112 	 * According to the R4K version of this file, CPUs without
113 	 * dcache aliases don't need to do anything here
114 	 */
115 }
116 
117 
118 /**
119  * Flush a range of kernel addresses out of the icache
120  *
121  */
122 static void octeon_flush_icache_range(unsigned long start, unsigned long end)
123 {
124 	octeon_flush_icache_all_cores(NULL);
125 }
126 
127 
128 /**
129  * Flush the icache for a trampoline. These are used for interrupt
130  * and exception hooking.
131  *
132  * @addr:   Address to flush
133  */
134 static void octeon_flush_cache_sigtramp(unsigned long addr)
135 {
136 	struct vm_area_struct *vma;
137 
138 	vma = find_vma(current->mm, addr);
139 	octeon_flush_icache_all_cores(vma);
140 }
141 
142 
143 /**
144  * Flush a range out of a vma
145  *
146  * @vma:    VMA to flush
147  * @start:
148  * @end:
149  */
150 static void octeon_flush_cache_range(struct vm_area_struct *vma,
151 				     unsigned long start, unsigned long end)
152 {
153 	if (vma->vm_flags & VM_EXEC)
154 		octeon_flush_icache_all_cores(vma);
155 }
156 
157 
158 /**
159  * Flush a specific page of a vma
160  *
161  * @vma:    VMA to flush page for
162  * @page:   Page to flush
163  * @pfn:
164  */
165 static void octeon_flush_cache_page(struct vm_area_struct *vma,
166 				    unsigned long page, unsigned long pfn)
167 {
168 	if (vma->vm_flags & VM_EXEC)
169 		octeon_flush_icache_all_cores(vma);
170 }
171 
172 
173 /**
174  * Probe Octeon's caches
175  *
176  */
177 static void __cpuinit probe_octeon(void)
178 {
179 	unsigned long icache_size;
180 	unsigned long dcache_size;
181 	unsigned int config1;
182 	struct cpuinfo_mips *c = &current_cpu_data;
183 
184 	switch (c->cputype) {
185 	case CPU_CAVIUM_OCTEON:
186 		config1 = read_c0_config1();
187 		c->icache.linesz = 2 << ((config1 >> 19) & 7);
188 		c->icache.sets = 64 << ((config1 >> 22) & 7);
189 		c->icache.ways = 1 + ((config1 >> 16) & 7);
190 		c->icache.flags |= MIPS_CACHE_VTAG;
191 		icache_size =
192 			c->icache.sets * c->icache.ways * c->icache.linesz;
193 		c->icache.waybit = ffs(icache_size / c->icache.ways) - 1;
194 		c->dcache.linesz = 128;
195 		if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
196 			c->dcache.sets = 1; /* CN3XXX has one Dcache set */
197 		else
198 			c->dcache.sets = 2; /* CN5XXX has two Dcache sets */
199 		c->dcache.ways = 64;
200 		dcache_size =
201 			c->dcache.sets * c->dcache.ways * c->dcache.linesz;
202 		c->dcache.waybit = ffs(dcache_size / c->dcache.ways) - 1;
203 		c->options |= MIPS_CPU_PREFETCH;
204 		break;
205 
206 	default:
207 		panic("Unsupported Cavium Networks CPU type\n");
208 		break;
209 	}
210 
211 	/* compute a couple of other cache variables */
212 	c->icache.waysize = icache_size / c->icache.ways;
213 	c->dcache.waysize = dcache_size / c->dcache.ways;
214 
215 	c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways);
216 	c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways);
217 
218 	if (smp_processor_id() == 0) {
219 		pr_notice("Primary instruction cache %ldkB, %s, %d way, "
220 			  "%d sets, linesize %d bytes.\n",
221 			  icache_size >> 10,
222 			  cpu_has_vtag_icache ?
223 				"virtually tagged" : "physically tagged",
224 			  c->icache.ways, c->icache.sets, c->icache.linesz);
225 
226 		pr_notice("Primary data cache %ldkB, %d-way, %d sets, "
227 			  "linesize %d bytes.\n",
228 			  dcache_size >> 10, c->dcache.ways,
229 			  c->dcache.sets, c->dcache.linesz);
230 	}
231 }
232 
233 
234 /**
235  * Setup the Octeon cache flush routines
236  *
237  */
238 void __cpuinit octeon_cache_init(void)
239 {
240 	extern unsigned long ebase;
241 	extern char except_vec2_octeon;
242 
243 	memcpy((void *)(ebase + 0x100), &except_vec2_octeon, 0x80);
244 	octeon_flush_cache_sigtramp(ebase + 0x100);
245 
246 	probe_octeon();
247 
248 	shm_align_mask = PAGE_SIZE - 1;
249 
250 	flush_cache_all			= octeon_flush_icache_all;
251 	__flush_cache_all		= octeon_flush_icache_all;
252 	flush_cache_mm			= octeon_flush_cache_mm;
253 	flush_cache_page		= octeon_flush_cache_page;
254 	flush_cache_range		= octeon_flush_cache_range;
255 	flush_cache_sigtramp		= octeon_flush_cache_sigtramp;
256 	flush_icache_all		= octeon_flush_icache_all;
257 	flush_data_cache_page		= octeon_flush_data_cache_page;
258 	flush_icache_range		= octeon_flush_icache_range;
259 	local_flush_icache_range	= local_octeon_flush_icache_range;
260 
261 	build_clear_page();
262 	build_copy_page();
263 }
264 
265 /**
266  * Handle a cache error exception
267  */
268 
269 static void  cache_parity_error_octeon(int non_recoverable)
270 {
271 	unsigned long coreid = cvmx_get_core_num();
272 	uint64_t icache_err = read_octeon_c0_icacheerr();
273 
274 	pr_err("Cache error exception:\n");
275 	pr_err("cp0_errorepc == %lx\n", read_c0_errorepc());
276 	if (icache_err & 1) {
277 		pr_err("CacheErr (Icache) == %llx\n",
278 		       (unsigned long long)icache_err);
279 		write_octeon_c0_icacheerr(0);
280 	}
281 	if (cache_err_dcache[coreid] & 1) {
282 		pr_err("CacheErr (Dcache) == %llx\n",
283 		       (unsigned long long)cache_err_dcache[coreid]);
284 		cache_err_dcache[coreid] = 0;
285 	}
286 
287 	if (non_recoverable)
288 		panic("Can't handle cache error: nested exception");
289 }
290 
291 /**
292  * Called when the the exception is recoverable
293  */
294 
295 asmlinkage void cache_parity_error_octeon_recoverable(void)
296 {
297 	cache_parity_error_octeon(0);
298 }
299 
300 /**
301  * Called when the the exception is not recoverable
302  */
303 
304 asmlinkage void cache_parity_error_octeon_non_recoverable(void)
305 {
306 	cache_parity_error_octeon(1);
307 }
308 
309