xref: /linux/arch/mips/mm/c-octeon.c (revision 62597c60816967100243338421782469b831563d)
15b3b1688SDavid Daney /*
25b3b1688SDavid Daney  * This file is subject to the terms and conditions of the GNU General Public
35b3b1688SDavid Daney  * License.  See the file "COPYING" in the main directory of this archive
45b3b1688SDavid Daney  * for more details.
55b3b1688SDavid Daney  *
65b3b1688SDavid Daney  * Copyright (C) 2005-2007 Cavium Networks
75b3b1688SDavid Daney  */
8f65aad41SRalf Baechle #include <linux/export.h>
95b3b1688SDavid Daney #include <linux/init.h>
105b3b1688SDavid Daney #include <linux/kernel.h>
115b3b1688SDavid Daney #include <linux/sched.h>
12631330f5SRalf Baechle #include <linux/smp.h>
135b3b1688SDavid Daney #include <linux/mm.h>
145b3b1688SDavid Daney #include <linux/bitops.h>
155b3b1688SDavid Daney #include <linux/cpu.h>
165b3b1688SDavid Daney #include <linux/io.h>
175b3b1688SDavid Daney 
185b3b1688SDavid Daney #include <asm/bcache.h>
195b3b1688SDavid Daney #include <asm/bootinfo.h>
205b3b1688SDavid Daney #include <asm/cacheops.h>
215b3b1688SDavid Daney #include <asm/cpu-features.h>
225b3b1688SDavid Daney #include <asm/page.h>
235b3b1688SDavid Daney #include <asm/pgtable.h>
245b3b1688SDavid Daney #include <asm/r4kcache.h>
25586016ebSDavid Daney #include <asm/traps.h>
265b3b1688SDavid Daney #include <asm/mmu_context.h>
275b3b1688SDavid Daney #include <asm/war.h>
285b3b1688SDavid Daney 
295b3b1688SDavid Daney #include <asm/octeon/octeon.h>
305b3b1688SDavid Daney 
315b3b1688SDavid Daney unsigned long long cache_err_dcache[NR_CPUS];
32f65aad41SRalf Baechle EXPORT_SYMBOL_GPL(cache_err_dcache);
335b3b1688SDavid Daney 
345b3b1688SDavid Daney /**
355b3b1688SDavid Daney  * Octeon automatically flushes the dcache on tlb changes, so
365b3b1688SDavid Daney  * from Linux's viewpoint it acts much like a physically
375b3b1688SDavid Daney  * tagged cache. No flushing is needed
385b3b1688SDavid Daney  *
395b3b1688SDavid Daney  */
405b3b1688SDavid Daney static void octeon_flush_data_cache_page(unsigned long addr)
415b3b1688SDavid Daney {
425b3b1688SDavid Daney     /* Nothing to do */
435b3b1688SDavid Daney }
445b3b1688SDavid Daney 
455b3b1688SDavid Daney static inline void octeon_local_flush_icache(void)
465b3b1688SDavid Daney {
475b3b1688SDavid Daney 	asm volatile ("synci 0($0)");
485b3b1688SDavid Daney }
495b3b1688SDavid Daney 
505b3b1688SDavid Daney /*
515b3b1688SDavid Daney  * Flush local I-cache for the specified range.
525b3b1688SDavid Daney  */
535b3b1688SDavid Daney static void local_octeon_flush_icache_range(unsigned long start,
545b3b1688SDavid Daney 					    unsigned long end)
555b3b1688SDavid Daney {
565b3b1688SDavid Daney 	octeon_local_flush_icache();
575b3b1688SDavid Daney }
585b3b1688SDavid Daney 
595b3b1688SDavid Daney /**
605b3b1688SDavid Daney  * Flush caches as necessary for all cores affected by a
615b3b1688SDavid Daney  * vma. If no vma is supplied, all cores are flushed.
625b3b1688SDavid Daney  *
635b3b1688SDavid Daney  * @vma:    VMA to flush or NULL to flush all icaches.
645b3b1688SDavid Daney  */
655b3b1688SDavid Daney static void octeon_flush_icache_all_cores(struct vm_area_struct *vma)
665b3b1688SDavid Daney {
675b3b1688SDavid Daney 	extern void octeon_send_ipi_single(int cpu, unsigned int action);
685b3b1688SDavid Daney #ifdef CONFIG_SMP
695b3b1688SDavid Daney 	int cpu;
705b3b1688SDavid Daney 	cpumask_t mask;
715b3b1688SDavid Daney #endif
725b3b1688SDavid Daney 
735b3b1688SDavid Daney 	mb();
745b3b1688SDavid Daney 	octeon_local_flush_icache();
755b3b1688SDavid Daney #ifdef CONFIG_SMP
765b3b1688SDavid Daney 	preempt_disable();
775b3b1688SDavid Daney 	cpu = smp_processor_id();
785b3b1688SDavid Daney 
795b3b1688SDavid Daney 	/*
805b3b1688SDavid Daney 	 * If we have a vma structure, we only need to worry about
815b3b1688SDavid Daney 	 * cores it has been used on
825b3b1688SDavid Daney 	 */
835b3b1688SDavid Daney 	if (vma)
8455b8cab4SRusty Russell 		mask = *mm_cpumask(vma->vm_mm);
855b3b1688SDavid Daney 	else
860b5f9c00SRusty Russell 		mask = *cpu_online_mask;
870b5f9c00SRusty Russell 	cpumask_clear_cpu(cpu, &mask);
880b5f9c00SRusty Russell 	for_each_cpu(cpu, &mask)
895b3b1688SDavid Daney 		octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH);
905b3b1688SDavid Daney 
915b3b1688SDavid Daney 	preempt_enable();
925b3b1688SDavid Daney #endif
935b3b1688SDavid Daney }
945b3b1688SDavid Daney 
955b3b1688SDavid Daney 
965b3b1688SDavid Daney /**
975b3b1688SDavid Daney  * Called to flush the icache on all cores
985b3b1688SDavid Daney  */
995b3b1688SDavid Daney static void octeon_flush_icache_all(void)
1005b3b1688SDavid Daney {
1015b3b1688SDavid Daney 	octeon_flush_icache_all_cores(NULL);
1025b3b1688SDavid Daney }
1035b3b1688SDavid Daney 
1045b3b1688SDavid Daney 
1055b3b1688SDavid Daney /**
1065b3b1688SDavid Daney  * Called to flush all memory associated with a memory
1075b3b1688SDavid Daney  * context.
1085b3b1688SDavid Daney  *
1095b3b1688SDavid Daney  * @mm:	    Memory context to flush
1105b3b1688SDavid Daney  */
1115b3b1688SDavid Daney static void octeon_flush_cache_mm(struct mm_struct *mm)
1125b3b1688SDavid Daney {
1135b3b1688SDavid Daney 	/*
1145b3b1688SDavid Daney 	 * According to the R4K version of this file, CPUs without
1155b3b1688SDavid Daney 	 * dcache aliases don't need to do anything here
1165b3b1688SDavid Daney 	 */
1175b3b1688SDavid Daney }
1185b3b1688SDavid Daney 
1195b3b1688SDavid Daney 
1205b3b1688SDavid Daney /**
1215b3b1688SDavid Daney  * Flush a range of kernel addresses out of the icache
1225b3b1688SDavid Daney  *
1235b3b1688SDavid Daney  */
1245b3b1688SDavid Daney static void octeon_flush_icache_range(unsigned long start, unsigned long end)
1255b3b1688SDavid Daney {
1265b3b1688SDavid Daney 	octeon_flush_icache_all_cores(NULL);
1275b3b1688SDavid Daney }
1285b3b1688SDavid Daney 
1295b3b1688SDavid Daney 
1305b3b1688SDavid Daney /**
1315b3b1688SDavid Daney  * Flush the icache for a trampoline. These are used for interrupt
1325b3b1688SDavid Daney  * and exception hooking.
1335b3b1688SDavid Daney  *
1345b3b1688SDavid Daney  * @addr:   Address to flush
1355b3b1688SDavid Daney  */
1365b3b1688SDavid Daney static void octeon_flush_cache_sigtramp(unsigned long addr)
1375b3b1688SDavid Daney {
1385b3b1688SDavid Daney 	struct vm_area_struct *vma;
1395b3b1688SDavid Daney 
1405b3b1688SDavid Daney 	vma = find_vma(current->mm, addr);
1415b3b1688SDavid Daney 	octeon_flush_icache_all_cores(vma);
1425b3b1688SDavid Daney }
1435b3b1688SDavid Daney 
1445b3b1688SDavid Daney 
1455b3b1688SDavid Daney /**
1465b3b1688SDavid Daney  * Flush a range out of a vma
1475b3b1688SDavid Daney  *
1485b3b1688SDavid Daney  * @vma:    VMA to flush
1495b3b1688SDavid Daney  * @start:
1505b3b1688SDavid Daney  * @end:
1515b3b1688SDavid Daney  */
1525b3b1688SDavid Daney static void octeon_flush_cache_range(struct vm_area_struct *vma,
1535b3b1688SDavid Daney 				     unsigned long start, unsigned long end)
1545b3b1688SDavid Daney {
1555b3b1688SDavid Daney 	if (vma->vm_flags & VM_EXEC)
1565b3b1688SDavid Daney 		octeon_flush_icache_all_cores(vma);
1575b3b1688SDavid Daney }
1585b3b1688SDavid Daney 
1595b3b1688SDavid Daney 
1605b3b1688SDavid Daney /**
1615b3b1688SDavid Daney  * Flush a specific page of a vma
1625b3b1688SDavid Daney  *
1635b3b1688SDavid Daney  * @vma:    VMA to flush page for
1645b3b1688SDavid Daney  * @page:   Page to flush
1655b3b1688SDavid Daney  * @pfn:
1665b3b1688SDavid Daney  */
1675b3b1688SDavid Daney static void octeon_flush_cache_page(struct vm_area_struct *vma,
1685b3b1688SDavid Daney 				    unsigned long page, unsigned long pfn)
1695b3b1688SDavid Daney {
1705b3b1688SDavid Daney 	if (vma->vm_flags & VM_EXEC)
1715b3b1688SDavid Daney 		octeon_flush_icache_all_cores(vma);
1725b3b1688SDavid Daney }
1735b3b1688SDavid Daney 
174d9cdc901SRalf Baechle static void octeon_flush_kernel_vmap_range(unsigned long vaddr, int size)
175d9cdc901SRalf Baechle {
176d9cdc901SRalf Baechle 	BUG();
177d9cdc901SRalf Baechle }
1785b3b1688SDavid Daney 
1795b3b1688SDavid Daney /**
1805b3b1688SDavid Daney  * Probe Octeon's caches
1815b3b1688SDavid Daney  *
1825b3b1688SDavid Daney  */
183078a55fcSPaul Gortmaker static void probe_octeon(void)
1845b3b1688SDavid Daney {
1855b3b1688SDavid Daney 	unsigned long icache_size;
1865b3b1688SDavid Daney 	unsigned long dcache_size;
1875b3b1688SDavid Daney 	unsigned int config1;
1885b3b1688SDavid Daney 	struct cpuinfo_mips *c = &current_cpu_data;
1895b3b1688SDavid Daney 
190f8bf7e68SDavid Daney 	config1 = read_c0_config1();
1915b3b1688SDavid Daney 	switch (c->cputype) {
1925b3b1688SDavid Daney 	case CPU_CAVIUM_OCTEON:
1936f329468SDavid Daney 	case CPU_CAVIUM_OCTEON_PLUS:
1945b3b1688SDavid Daney 		c->icache.linesz = 2 << ((config1 >> 19) & 7);
1955b3b1688SDavid Daney 		c->icache.sets = 64 << ((config1 >> 22) & 7);
1965b3b1688SDavid Daney 		c->icache.ways = 1 + ((config1 >> 16) & 7);
1975b3b1688SDavid Daney 		c->icache.flags |= MIPS_CACHE_VTAG;
1985b3b1688SDavid Daney 		icache_size =
1995b3b1688SDavid Daney 			c->icache.sets * c->icache.ways * c->icache.linesz;
2005b3b1688SDavid Daney 		c->icache.waybit = ffs(icache_size / c->icache.ways) - 1;
2015b3b1688SDavid Daney 		c->dcache.linesz = 128;
2026f329468SDavid Daney 		if (c->cputype == CPU_CAVIUM_OCTEON_PLUS)
2035b3b1688SDavid Daney 			c->dcache.sets = 2; /* CN5XXX has two Dcache sets */
2046f329468SDavid Daney 		else
2056f329468SDavid Daney 			c->dcache.sets = 1; /* CN3XXX has one Dcache set */
2065b3b1688SDavid Daney 		c->dcache.ways = 64;
2075b3b1688SDavid Daney 		dcache_size =
2085b3b1688SDavid Daney 			c->dcache.sets * c->dcache.ways * c->dcache.linesz;
2095b3b1688SDavid Daney 		c->dcache.waybit = ffs(dcache_size / c->dcache.ways) - 1;
2105b3b1688SDavid Daney 		c->options |= MIPS_CPU_PREFETCH;
2115b3b1688SDavid Daney 		break;
2125b3b1688SDavid Daney 
213f8bf7e68SDavid Daney 	case CPU_CAVIUM_OCTEON2:
214f8bf7e68SDavid Daney 		c->icache.linesz = 2 << ((config1 >> 19) & 7);
215f8bf7e68SDavid Daney 		c->icache.sets = 8;
216f8bf7e68SDavid Daney 		c->icache.ways = 37;
217f8bf7e68SDavid Daney 		c->icache.flags |= MIPS_CACHE_VTAG;
218f8bf7e68SDavid Daney 		icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
219f8bf7e68SDavid Daney 
220f8bf7e68SDavid Daney 		c->dcache.linesz = 128;
221f8bf7e68SDavid Daney 		c->dcache.ways = 32;
222f8bf7e68SDavid Daney 		c->dcache.sets = 8;
223f8bf7e68SDavid Daney 		dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
224f8bf7e68SDavid Daney 		c->options |= MIPS_CPU_PREFETCH;
225f8bf7e68SDavid Daney 		break;
226f8bf7e68SDavid Daney 
227*62597c60SDavid Daney 	case CPU_CAVIUM_OCTEON3:
228*62597c60SDavid Daney 		c->icache.linesz = 128;
229*62597c60SDavid Daney 		c->icache.sets = 16;
230*62597c60SDavid Daney 		c->icache.ways = 39;
231*62597c60SDavid Daney 		c->icache.flags |= MIPS_CACHE_VTAG;
232*62597c60SDavid Daney 		icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
233*62597c60SDavid Daney 
234*62597c60SDavid Daney 		c->dcache.linesz = 128;
235*62597c60SDavid Daney 		c->dcache.ways = 32;
236*62597c60SDavid Daney 		c->dcache.sets = 8;
237*62597c60SDavid Daney 		dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
238*62597c60SDavid Daney 		c->options |= MIPS_CPU_PREFETCH;
239*62597c60SDavid Daney 		break;
240*62597c60SDavid Daney 
2415b3b1688SDavid Daney 	default:
242ab75dc02SRalf Baechle 		panic("Unsupported Cavium Networks CPU type");
2435b3b1688SDavid Daney 		break;
2445b3b1688SDavid Daney 	}
2455b3b1688SDavid Daney 
2465b3b1688SDavid Daney 	/* compute a couple of other cache variables */
2475b3b1688SDavid Daney 	c->icache.waysize = icache_size / c->icache.ways;
2485b3b1688SDavid Daney 	c->dcache.waysize = dcache_size / c->dcache.ways;
2495b3b1688SDavid Daney 
2505b3b1688SDavid Daney 	c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways);
2515b3b1688SDavid Daney 	c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways);
2525b3b1688SDavid Daney 
2535b3b1688SDavid Daney 	if (smp_processor_id() == 0) {
2545b3b1688SDavid Daney 		pr_notice("Primary instruction cache %ldkB, %s, %d way, "
2555b3b1688SDavid Daney 			  "%d sets, linesize %d bytes.\n",
2565b3b1688SDavid Daney 			  icache_size >> 10,
2575b3b1688SDavid Daney 			  cpu_has_vtag_icache ?
2585b3b1688SDavid Daney 				"virtually tagged" : "physically tagged",
2595b3b1688SDavid Daney 			  c->icache.ways, c->icache.sets, c->icache.linesz);
2605b3b1688SDavid Daney 
2615b3b1688SDavid Daney 		pr_notice("Primary data cache %ldkB, %d-way, %d sets, "
2625b3b1688SDavid Daney 			  "linesize %d bytes.\n",
2635b3b1688SDavid Daney 			  dcache_size >> 10, c->dcache.ways,
2645b3b1688SDavid Daney 			  c->dcache.sets, c->dcache.linesz);
2655b3b1688SDavid Daney 	}
2665b3b1688SDavid Daney }
2675b3b1688SDavid Daney 
268078a55fcSPaul Gortmaker static void  octeon_cache_error_setup(void)
269586016ebSDavid Daney {
270586016ebSDavid Daney 	extern char except_vec2_octeon;
271586016ebSDavid Daney 	set_handler(0x100, &except_vec2_octeon, 0x80);
272586016ebSDavid Daney }
2735b3b1688SDavid Daney 
2745b3b1688SDavid Daney /**
2755b3b1688SDavid Daney  * Setup the Octeon cache flush routines
2765b3b1688SDavid Daney  *
2775b3b1688SDavid Daney  */
278078a55fcSPaul Gortmaker void octeon_cache_init(void)
2795b3b1688SDavid Daney {
2805b3b1688SDavid Daney 	probe_octeon();
2815b3b1688SDavid Daney 
2825b3b1688SDavid Daney 	shm_align_mask = PAGE_SIZE - 1;
2835b3b1688SDavid Daney 
2845b3b1688SDavid Daney 	flush_cache_all			= octeon_flush_icache_all;
2855b3b1688SDavid Daney 	__flush_cache_all		= octeon_flush_icache_all;
2865b3b1688SDavid Daney 	flush_cache_mm			= octeon_flush_cache_mm;
2875b3b1688SDavid Daney 	flush_cache_page		= octeon_flush_cache_page;
2885b3b1688SDavid Daney 	flush_cache_range		= octeon_flush_cache_range;
2895b3b1688SDavid Daney 	flush_cache_sigtramp		= octeon_flush_cache_sigtramp;
2905b3b1688SDavid Daney 	flush_icache_all		= octeon_flush_icache_all;
2915b3b1688SDavid Daney 	flush_data_cache_page		= octeon_flush_data_cache_page;
2925b3b1688SDavid Daney 	flush_icache_range		= octeon_flush_icache_range;
2935b3b1688SDavid Daney 	local_flush_icache_range	= local_octeon_flush_icache_range;
2945b3b1688SDavid Daney 
295d9cdc901SRalf Baechle 	__flush_kernel_vmap_range	= octeon_flush_kernel_vmap_range;
296d9cdc901SRalf Baechle 
2975b3b1688SDavid Daney 	build_clear_page();
2985b3b1688SDavid Daney 	build_copy_page();
299586016ebSDavid Daney 
300586016ebSDavid Daney 	board_cache_error_setup = octeon_cache_error_setup;
3015b3b1688SDavid Daney }
3025b3b1688SDavid Daney 
303e1ced097SDavid Daney /*
3045b3b1688SDavid Daney  * Handle a cache error exception
3055b3b1688SDavid Daney  */
306f65aad41SRalf Baechle static RAW_NOTIFIER_HEAD(co_cache_error_chain);
307f65aad41SRalf Baechle 
308f65aad41SRalf Baechle int register_co_cache_error_notifier(struct notifier_block *nb)
3095b3b1688SDavid Daney {
310f65aad41SRalf Baechle 	return raw_notifier_chain_register(&co_cache_error_chain, nb);
3115b3b1688SDavid Daney }
312f65aad41SRalf Baechle EXPORT_SYMBOL_GPL(register_co_cache_error_notifier);
3135b3b1688SDavid Daney 
314f65aad41SRalf Baechle int unregister_co_cache_error_notifier(struct notifier_block *nb)
315f65aad41SRalf Baechle {
316f65aad41SRalf Baechle 	return raw_notifier_chain_unregister(&co_cache_error_chain, nb);
317f65aad41SRalf Baechle }
318f65aad41SRalf Baechle EXPORT_SYMBOL_GPL(unregister_co_cache_error_notifier);
319f65aad41SRalf Baechle 
320e1ced097SDavid Daney static void co_cache_error_call_notifiers(unsigned long val)
321f65aad41SRalf Baechle {
322e1ced097SDavid Daney 	int rv = raw_notifier_call_chain(&co_cache_error_chain, val, NULL);
323e1ced097SDavid Daney 	if ((rv & ~NOTIFY_STOP_MASK) != NOTIFY_OK) {
324e1ced097SDavid Daney 		u64 dcache_err;
325e1ced097SDavid Daney 		unsigned long coreid = cvmx_get_core_num();
326e1ced097SDavid Daney 		u64 icache_err = read_octeon_c0_icacheerr();
327e1ced097SDavid Daney 
328e1ced097SDavid Daney 		if (val) {
329e1ced097SDavid Daney 			dcache_err = cache_err_dcache[coreid];
330e1ced097SDavid Daney 			cache_err_dcache[coreid] = 0;
331e1ced097SDavid Daney 		} else {
332e1ced097SDavid Daney 			dcache_err = read_octeon_c0_dcacheerr();
3335b3b1688SDavid Daney 		}
3345b3b1688SDavid Daney 
335e1ced097SDavid Daney 		pr_err("Core%lu: Cache error exception:\n", coreid);
336e1ced097SDavid Daney 		pr_err("cp0_errorepc == %lx\n", read_c0_errorepc());
337e1ced097SDavid Daney 		if (icache_err & 1) {
338e1ced097SDavid Daney 			pr_err("CacheErr (Icache) == %llx\n",
339e1ced097SDavid Daney 			       (unsigned long long)icache_err);
340e1ced097SDavid Daney 			write_octeon_c0_icacheerr(0);
341e1ced097SDavid Daney 		}
342e1ced097SDavid Daney 		if (dcache_err & 1) {
343e1ced097SDavid Daney 			pr_err("CacheErr (Dcache) == %llx\n",
344e1ced097SDavid Daney 			       (unsigned long long)dcache_err);
345e1ced097SDavid Daney 		}
346e1ced097SDavid Daney 	}
347e1ced097SDavid Daney }
348e1ced097SDavid Daney 
349e1ced097SDavid Daney /*
3501c1a90d8SRalf Baechle  * Called when the the exception is recoverable
3515b3b1688SDavid Daney  */
352e1ced097SDavid Daney 
3535b3b1688SDavid Daney asmlinkage void cache_parity_error_octeon_recoverable(void)
3545b3b1688SDavid Daney {
355f65aad41SRalf Baechle 	co_cache_error_call_notifiers(0);
3565b3b1688SDavid Daney }
3575b3b1688SDavid Daney 
3585b3b1688SDavid Daney /**
3591c1a90d8SRalf Baechle  * Called when the the exception is not recoverable
3605b3b1688SDavid Daney  */
361e1ced097SDavid Daney 
3625b3b1688SDavid Daney asmlinkage void cache_parity_error_octeon_non_recoverable(void)
3635b3b1688SDavid Daney {
364f65aad41SRalf Baechle 	co_cache_error_call_notifiers(1);
365f65aad41SRalf Baechle 	panic("Can't handle cache error: nested exception");
3665b3b1688SDavid Daney }
367