15b3b1688SDavid Daney /*
25b3b1688SDavid Daney * This file is subject to the terms and conditions of the GNU General Public
35b3b1688SDavid Daney * License. See the file "COPYING" in the main directory of this archive
45b3b1688SDavid Daney * for more details.
55b3b1688SDavid Daney *
65b3b1688SDavid Daney * Copyright (C) 2005-2007 Cavium Networks
75b3b1688SDavid Daney */
8f65aad41SRalf Baechle #include <linux/export.h>
95b3b1688SDavid Daney #include <linux/kernel.h>
105b3b1688SDavid Daney #include <linux/sched.h>
11631330f5SRalf Baechle #include <linux/smp.h>
125b3b1688SDavid Daney #include <linux/mm.h>
135b3b1688SDavid Daney #include <linux/bitops.h>
145b3b1688SDavid Daney #include <linux/cpu.h>
155b3b1688SDavid Daney #include <linux/io.h>
165b3b1688SDavid Daney
175b3b1688SDavid Daney #include <asm/bcache.h>
185b3b1688SDavid Daney #include <asm/bootinfo.h>
195b3b1688SDavid Daney #include <asm/cacheops.h>
205b3b1688SDavid Daney #include <asm/cpu-features.h>
2169f24d17SRalf Baechle #include <asm/cpu-type.h>
225b3b1688SDavid Daney #include <asm/page.h>
235b3b1688SDavid Daney #include <asm/r4kcache.h>
24586016ebSDavid Daney #include <asm/traps.h>
255b3b1688SDavid Daney #include <asm/mmu_context.h>
265b3b1688SDavid Daney
275b3b1688SDavid Daney #include <asm/octeon/octeon.h>
285b3b1688SDavid Daney
295b3b1688SDavid Daney unsigned long long cache_err_dcache[NR_CPUS];
30f65aad41SRalf Baechle EXPORT_SYMBOL_GPL(cache_err_dcache);
315b3b1688SDavid Daney
32d2ac3a11SRandy Dunlap /*
335b3b1688SDavid Daney * Octeon automatically flushes the dcache on tlb changes, so
345b3b1688SDavid Daney * from Linux's viewpoint it acts much like a physically
355b3b1688SDavid Daney * tagged cache. No flushing is needed
365b3b1688SDavid Daney *
375b3b1688SDavid Daney */
octeon_flush_data_cache_page(unsigned long addr)385b3b1688SDavid Daney static void octeon_flush_data_cache_page(unsigned long addr)
395b3b1688SDavid Daney {
405b3b1688SDavid Daney /* Nothing to do */
415b3b1688SDavid Daney }
425b3b1688SDavid Daney
octeon_local_flush_icache(void)435b3b1688SDavid Daney static inline void octeon_local_flush_icache(void)
445b3b1688SDavid Daney {
455b3b1688SDavid Daney asm volatile ("synci 0($0)");
465b3b1688SDavid Daney }
475b3b1688SDavid Daney
485b3b1688SDavid Daney /*
495b3b1688SDavid Daney * Flush local I-cache for the specified range.
505b3b1688SDavid Daney */
local_octeon_flush_icache_range(unsigned long start,unsigned long end)515b3b1688SDavid Daney static void local_octeon_flush_icache_range(unsigned long start,
525b3b1688SDavid Daney unsigned long end)
535b3b1688SDavid Daney {
545b3b1688SDavid Daney octeon_local_flush_icache();
555b3b1688SDavid Daney }
565b3b1688SDavid Daney
575b3b1688SDavid Daney /**
58d2ac3a11SRandy Dunlap * octeon_flush_icache_all_cores - Flush caches as necessary for all cores
59d2ac3a11SRandy Dunlap * affected by a vma. If no vma is supplied, all cores are flushed.
605b3b1688SDavid Daney *
615b3b1688SDavid Daney * @vma: VMA to flush or NULL to flush all icaches.
625b3b1688SDavid Daney */
octeon_flush_icache_all_cores(struct vm_area_struct * vma)635b3b1688SDavid Daney static void octeon_flush_icache_all_cores(struct vm_area_struct *vma)
645b3b1688SDavid Daney {
655b3b1688SDavid Daney extern void octeon_send_ipi_single(int cpu, unsigned int action);
665b3b1688SDavid Daney #ifdef CONFIG_SMP
675b3b1688SDavid Daney int cpu;
685b3b1688SDavid Daney cpumask_t mask;
695b3b1688SDavid Daney #endif
705b3b1688SDavid Daney
715b3b1688SDavid Daney mb();
725b3b1688SDavid Daney octeon_local_flush_icache();
735b3b1688SDavid Daney #ifdef CONFIG_SMP
745b3b1688SDavid Daney preempt_disable();
755b3b1688SDavid Daney cpu = smp_processor_id();
765b3b1688SDavid Daney
775b3b1688SDavid Daney /*
785b3b1688SDavid Daney * If we have a vma structure, we only need to worry about
795b3b1688SDavid Daney * cores it has been used on
805b3b1688SDavid Daney */
815b3b1688SDavid Daney if (vma)
8255b8cab4SRusty Russell mask = *mm_cpumask(vma->vm_mm);
835b3b1688SDavid Daney else
840b5f9c00SRusty Russell mask = *cpu_online_mask;
850b5f9c00SRusty Russell cpumask_clear_cpu(cpu, &mask);
86*6e900491SJiaxun Yang #ifdef CONFIG_CAVIUM_OCTEON_SOC
870b5f9c00SRusty Russell for_each_cpu(cpu, &mask)
885b3b1688SDavid Daney octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH);
89*6e900491SJiaxun Yang #else
90*6e900491SJiaxun Yang smp_call_function_many(&mask, (smp_call_func_t)octeon_local_flush_icache,
91*6e900491SJiaxun Yang NULL, 1);
92*6e900491SJiaxun Yang #endif
935b3b1688SDavid Daney
945b3b1688SDavid Daney preempt_enable();
955b3b1688SDavid Daney #endif
965b3b1688SDavid Daney }
975b3b1688SDavid Daney
985b3b1688SDavid Daney
99d2ac3a11SRandy Dunlap /*
1005b3b1688SDavid Daney * Called to flush the icache on all cores
1015b3b1688SDavid Daney */
octeon_flush_icache_all(void)1025b3b1688SDavid Daney static void octeon_flush_icache_all(void)
1035b3b1688SDavid Daney {
1045b3b1688SDavid Daney octeon_flush_icache_all_cores(NULL);
1055b3b1688SDavid Daney }
1065b3b1688SDavid Daney
1075b3b1688SDavid Daney
1085b3b1688SDavid Daney /**
109d2ac3a11SRandy Dunlap * octeon_flush_cache_mm - flush all memory associated with a memory context.
1105b3b1688SDavid Daney *
1115b3b1688SDavid Daney * @mm: Memory context to flush
1125b3b1688SDavid Daney */
octeon_flush_cache_mm(struct mm_struct * mm)1135b3b1688SDavid Daney static void octeon_flush_cache_mm(struct mm_struct *mm)
1145b3b1688SDavid Daney {
1155b3b1688SDavid Daney /*
1165b3b1688SDavid Daney * According to the R4K version of this file, CPUs without
1175b3b1688SDavid Daney * dcache aliases don't need to do anything here
1185b3b1688SDavid Daney */
1195b3b1688SDavid Daney }
1205b3b1688SDavid Daney
1215b3b1688SDavid Daney
122d2ac3a11SRandy Dunlap /*
1235b3b1688SDavid Daney * Flush a range of kernel addresses out of the icache
1245b3b1688SDavid Daney *
1255b3b1688SDavid Daney */
octeon_flush_icache_range(unsigned long start,unsigned long end)1265b3b1688SDavid Daney static void octeon_flush_icache_range(unsigned long start, unsigned long end)
1275b3b1688SDavid Daney {
1285b3b1688SDavid Daney octeon_flush_icache_all_cores(NULL);
1295b3b1688SDavid Daney }
1305b3b1688SDavid Daney
1315b3b1688SDavid Daney
1325b3b1688SDavid Daney /**
133d2ac3a11SRandy Dunlap * octeon_flush_cache_range - Flush a range out of a vma
1345b3b1688SDavid Daney *
1355b3b1688SDavid Daney * @vma: VMA to flush
136d2ac3a11SRandy Dunlap * @start: beginning address for flush
137d2ac3a11SRandy Dunlap * @end: ending address for flush
1385b3b1688SDavid Daney */
octeon_flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)1395b3b1688SDavid Daney static void octeon_flush_cache_range(struct vm_area_struct *vma,
1405b3b1688SDavid Daney unsigned long start, unsigned long end)
1415b3b1688SDavid Daney {
1425b3b1688SDavid Daney if (vma->vm_flags & VM_EXEC)
1435b3b1688SDavid Daney octeon_flush_icache_all_cores(vma);
1445b3b1688SDavid Daney }
1455b3b1688SDavid Daney
1465b3b1688SDavid Daney
1475b3b1688SDavid Daney /**
148d2ac3a11SRandy Dunlap * octeon_flush_cache_page - Flush a specific page of a vma
1495b3b1688SDavid Daney *
1505b3b1688SDavid Daney * @vma: VMA to flush page for
1515b3b1688SDavid Daney * @page: Page to flush
152d2ac3a11SRandy Dunlap * @pfn: Page frame number
1535b3b1688SDavid Daney */
octeon_flush_cache_page(struct vm_area_struct * vma,unsigned long page,unsigned long pfn)1545b3b1688SDavid Daney static void octeon_flush_cache_page(struct vm_area_struct *vma,
1555b3b1688SDavid Daney unsigned long page, unsigned long pfn)
1565b3b1688SDavid Daney {
1575b3b1688SDavid Daney if (vma->vm_flags & VM_EXEC)
1585b3b1688SDavid Daney octeon_flush_icache_all_cores(vma);
1595b3b1688SDavid Daney }
1605b3b1688SDavid Daney
octeon_flush_kernel_vmap_range(unsigned long vaddr,int size)161d9cdc901SRalf Baechle static void octeon_flush_kernel_vmap_range(unsigned long vaddr, int size)
162d9cdc901SRalf Baechle {
163d9cdc901SRalf Baechle BUG();
164d9cdc901SRalf Baechle }
1655b3b1688SDavid Daney
166d2ac3a11SRandy Dunlap /*
1675b3b1688SDavid Daney * Probe Octeon's caches
1685b3b1688SDavid Daney *
1695b3b1688SDavid Daney */
probe_octeon(void)170078a55fcSPaul Gortmaker static void probe_octeon(void)
1715b3b1688SDavid Daney {
1725b3b1688SDavid Daney unsigned long icache_size;
1735b3b1688SDavid Daney unsigned long dcache_size;
1745b3b1688SDavid Daney unsigned int config1;
1755b3b1688SDavid Daney struct cpuinfo_mips *c = ¤t_cpu_data;
17669f24d17SRalf Baechle int cputype = current_cpu_type();
1775b3b1688SDavid Daney
178f8bf7e68SDavid Daney config1 = read_c0_config1();
17969f24d17SRalf Baechle switch (cputype) {
1805b3b1688SDavid Daney case CPU_CAVIUM_OCTEON:
1816f329468SDavid Daney case CPU_CAVIUM_OCTEON_PLUS:
1825b3b1688SDavid Daney c->icache.linesz = 2 << ((config1 >> 19) & 7);
1835b3b1688SDavid Daney c->icache.sets = 64 << ((config1 >> 22) & 7);
1845b3b1688SDavid Daney c->icache.ways = 1 + ((config1 >> 16) & 7);
1855b3b1688SDavid Daney c->icache.flags |= MIPS_CACHE_VTAG;
1865b3b1688SDavid Daney icache_size =
1875b3b1688SDavid Daney c->icache.sets * c->icache.ways * c->icache.linesz;
1885b3b1688SDavid Daney c->icache.waybit = ffs(icache_size / c->icache.ways) - 1;
1895b3b1688SDavid Daney c->dcache.linesz = 128;
19069f24d17SRalf Baechle if (cputype == CPU_CAVIUM_OCTEON_PLUS)
1915b3b1688SDavid Daney c->dcache.sets = 2; /* CN5XXX has two Dcache sets */
1926f329468SDavid Daney else
1936f329468SDavid Daney c->dcache.sets = 1; /* CN3XXX has one Dcache set */
1945b3b1688SDavid Daney c->dcache.ways = 64;
1955b3b1688SDavid Daney dcache_size =
1965b3b1688SDavid Daney c->dcache.sets * c->dcache.ways * c->dcache.linesz;
1975b3b1688SDavid Daney c->dcache.waybit = ffs(dcache_size / c->dcache.ways) - 1;
1985b3b1688SDavid Daney c->options |= MIPS_CPU_PREFETCH;
1995b3b1688SDavid Daney break;
2005b3b1688SDavid Daney
201f8bf7e68SDavid Daney case CPU_CAVIUM_OCTEON2:
202f8bf7e68SDavid Daney c->icache.linesz = 2 << ((config1 >> 19) & 7);
203f8bf7e68SDavid Daney c->icache.sets = 8;
204f8bf7e68SDavid Daney c->icache.ways = 37;
205f8bf7e68SDavid Daney c->icache.flags |= MIPS_CACHE_VTAG;
206f8bf7e68SDavid Daney icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
207f8bf7e68SDavid Daney
208f8bf7e68SDavid Daney c->dcache.linesz = 128;
209f8bf7e68SDavid Daney c->dcache.ways = 32;
210f8bf7e68SDavid Daney c->dcache.sets = 8;
211f8bf7e68SDavid Daney dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
212f8bf7e68SDavid Daney c->options |= MIPS_CPU_PREFETCH;
213f8bf7e68SDavid Daney break;
214f8bf7e68SDavid Daney
21562597c60SDavid Daney case CPU_CAVIUM_OCTEON3:
21662597c60SDavid Daney c->icache.linesz = 128;
21762597c60SDavid Daney c->icache.sets = 16;
21862597c60SDavid Daney c->icache.ways = 39;
21962597c60SDavid Daney c->icache.flags |= MIPS_CACHE_VTAG;
22062597c60SDavid Daney icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
22162597c60SDavid Daney
22262597c60SDavid Daney c->dcache.linesz = 128;
22362597c60SDavid Daney c->dcache.ways = 32;
22462597c60SDavid Daney c->dcache.sets = 8;
22562597c60SDavid Daney dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
22662597c60SDavid Daney c->options |= MIPS_CPU_PREFETCH;
22762597c60SDavid Daney break;
22862597c60SDavid Daney
2295b3b1688SDavid Daney default:
230ab75dc02SRalf Baechle panic("Unsupported Cavium Networks CPU type");
2315b3b1688SDavid Daney break;
2325b3b1688SDavid Daney }
2335b3b1688SDavid Daney
2345b3b1688SDavid Daney /* compute a couple of other cache variables */
2355b3b1688SDavid Daney c->icache.waysize = icache_size / c->icache.ways;
2365b3b1688SDavid Daney c->dcache.waysize = dcache_size / c->dcache.ways;
2375b3b1688SDavid Daney
2385b3b1688SDavid Daney c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways);
2395b3b1688SDavid Daney c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways);
2405b3b1688SDavid Daney
2415b3b1688SDavid Daney if (smp_processor_id() == 0) {
242bea176fbSOleksij Rempel pr_info("Primary instruction cache %ldkB, %s, %d way, "
2435b3b1688SDavid Daney "%d sets, linesize %d bytes.\n",
2445b3b1688SDavid Daney icache_size >> 10,
2455b3b1688SDavid Daney cpu_has_vtag_icache ?
2465b3b1688SDavid Daney "virtually tagged" : "physically tagged",
2475b3b1688SDavid Daney c->icache.ways, c->icache.sets, c->icache.linesz);
2485b3b1688SDavid Daney
249bea176fbSOleksij Rempel pr_info("Primary data cache %ldkB, %d-way, %d sets, "
2505b3b1688SDavid Daney "linesize %d bytes.\n",
2515b3b1688SDavid Daney dcache_size >> 10, c->dcache.ways,
2525b3b1688SDavid Daney c->dcache.sets, c->dcache.linesz);
2535b3b1688SDavid Daney }
2545b3b1688SDavid Daney }
2555b3b1688SDavid Daney
octeon_cache_error_setup(void)256078a55fcSPaul Gortmaker static void octeon_cache_error_setup(void)
257586016ebSDavid Daney {
258586016ebSDavid Daney extern char except_vec2_octeon;
259586016ebSDavid Daney set_handler(0x100, &except_vec2_octeon, 0x80);
260586016ebSDavid Daney }
2615b3b1688SDavid Daney
262d2ac3a11SRandy Dunlap /*
2635b3b1688SDavid Daney * Setup the Octeon cache flush routines
2645b3b1688SDavid Daney *
2655b3b1688SDavid Daney */
octeon_cache_init(void)266078a55fcSPaul Gortmaker void octeon_cache_init(void)
2675b3b1688SDavid Daney {
2685b3b1688SDavid Daney probe_octeon();
2695b3b1688SDavid Daney
2705b3b1688SDavid Daney shm_align_mask = PAGE_SIZE - 1;
2715b3b1688SDavid Daney
2725b3b1688SDavid Daney flush_cache_all = octeon_flush_icache_all;
2735b3b1688SDavid Daney __flush_cache_all = octeon_flush_icache_all;
2745b3b1688SDavid Daney flush_cache_mm = octeon_flush_cache_mm;
2755b3b1688SDavid Daney flush_cache_page = octeon_flush_cache_page;
2765b3b1688SDavid Daney flush_cache_range = octeon_flush_cache_range;
2775b3b1688SDavid Daney flush_icache_all = octeon_flush_icache_all;
2785b3b1688SDavid Daney flush_data_cache_page = octeon_flush_data_cache_page;
2795b3b1688SDavid Daney flush_icache_range = octeon_flush_icache_range;
2805b3b1688SDavid Daney local_flush_icache_range = local_octeon_flush_icache_range;
28101882b4dSJames Hogan __flush_icache_user_range = octeon_flush_icache_range;
28201882b4dSJames Hogan __local_flush_icache_user_range = local_octeon_flush_icache_range;
2835b3b1688SDavid Daney
284d9cdc901SRalf Baechle __flush_kernel_vmap_range = octeon_flush_kernel_vmap_range;
285d9cdc901SRalf Baechle
2865b3b1688SDavid Daney build_clear_page();
2875b3b1688SDavid Daney build_copy_page();
288586016ebSDavid Daney
289586016ebSDavid Daney board_cache_error_setup = octeon_cache_error_setup;
2905b3b1688SDavid Daney }
2915b3b1688SDavid Daney
292e1ced097SDavid Daney /*
2935b3b1688SDavid Daney * Handle a cache error exception
2945b3b1688SDavid Daney */
295f65aad41SRalf Baechle static RAW_NOTIFIER_HEAD(co_cache_error_chain);
296f65aad41SRalf Baechle
register_co_cache_error_notifier(struct notifier_block * nb)297f65aad41SRalf Baechle int register_co_cache_error_notifier(struct notifier_block *nb)
2985b3b1688SDavid Daney {
299f65aad41SRalf Baechle return raw_notifier_chain_register(&co_cache_error_chain, nb);
3005b3b1688SDavid Daney }
301f65aad41SRalf Baechle EXPORT_SYMBOL_GPL(register_co_cache_error_notifier);
3025b3b1688SDavid Daney
unregister_co_cache_error_notifier(struct notifier_block * nb)303f65aad41SRalf Baechle int unregister_co_cache_error_notifier(struct notifier_block *nb)
304f65aad41SRalf Baechle {
305f65aad41SRalf Baechle return raw_notifier_chain_unregister(&co_cache_error_chain, nb);
306f65aad41SRalf Baechle }
307f65aad41SRalf Baechle EXPORT_SYMBOL_GPL(unregister_co_cache_error_notifier);
308f65aad41SRalf Baechle
co_cache_error_call_notifiers(unsigned long val)309e1ced097SDavid Daney static void co_cache_error_call_notifiers(unsigned long val)
310f65aad41SRalf Baechle {
311e1ced097SDavid Daney int rv = raw_notifier_call_chain(&co_cache_error_chain, val, NULL);
312e1ced097SDavid Daney if ((rv & ~NOTIFY_STOP_MASK) != NOTIFY_OK) {
313e1ced097SDavid Daney u64 dcache_err;
314e1ced097SDavid Daney unsigned long coreid = cvmx_get_core_num();
315e1ced097SDavid Daney u64 icache_err = read_octeon_c0_icacheerr();
316e1ced097SDavid Daney
317e1ced097SDavid Daney if (val) {
318e1ced097SDavid Daney dcache_err = cache_err_dcache[coreid];
319e1ced097SDavid Daney cache_err_dcache[coreid] = 0;
320e1ced097SDavid Daney } else {
321e1ced097SDavid Daney dcache_err = read_octeon_c0_dcacheerr();
3225b3b1688SDavid Daney }
3235b3b1688SDavid Daney
324e1ced097SDavid Daney pr_err("Core%lu: Cache error exception:\n", coreid);
325e1ced097SDavid Daney pr_err("cp0_errorepc == %lx\n", read_c0_errorepc());
326e1ced097SDavid Daney if (icache_err & 1) {
327e1ced097SDavid Daney pr_err("CacheErr (Icache) == %llx\n",
328e1ced097SDavid Daney (unsigned long long)icache_err);
329e1ced097SDavid Daney write_octeon_c0_icacheerr(0);
330e1ced097SDavid Daney }
331e1ced097SDavid Daney if (dcache_err & 1) {
332e1ced097SDavid Daney pr_err("CacheErr (Dcache) == %llx\n",
333e1ced097SDavid Daney (unsigned long long)dcache_err);
334e1ced097SDavid Daney }
335e1ced097SDavid Daney }
336e1ced097SDavid Daney }
337e1ced097SDavid Daney
338e1ced097SDavid Daney /*
33913166af2SJason Wang * Called when the exception is recoverable
3405b3b1688SDavid Daney */
341e1ced097SDavid Daney
cache_parity_error_octeon_recoverable(void)3425b3b1688SDavid Daney asmlinkage void cache_parity_error_octeon_recoverable(void)
3435b3b1688SDavid Daney {
344f65aad41SRalf Baechle co_cache_error_call_notifiers(0);
3455b3b1688SDavid Daney }
3465b3b1688SDavid Daney
347d2ac3a11SRandy Dunlap /*
348dae39cffSJason Wang * Called when the exception is not recoverable
3495b3b1688SDavid Daney */
350e1ced097SDavid Daney
cache_parity_error_octeon_non_recoverable(void)3515b3b1688SDavid Daney asmlinkage void cache_parity_error_octeon_non_recoverable(void)
3525b3b1688SDavid Daney {
353f65aad41SRalf Baechle co_cache_error_call_notifiers(1);
354f65aad41SRalf Baechle panic("Can't handle cache error: nested exception");
3555b3b1688SDavid Daney }
356