15b3b1688SDavid Daney /* 25b3b1688SDavid Daney * This file is subject to the terms and conditions of the GNU General Public 35b3b1688SDavid Daney * License. See the file "COPYING" in the main directory of this archive 45b3b1688SDavid Daney * for more details. 55b3b1688SDavid Daney * 65b3b1688SDavid Daney * Copyright (C) 2005-2007 Cavium Networks 75b3b1688SDavid Daney */ 8f65aad41SRalf Baechle #include <linux/export.h> 95b3b1688SDavid Daney #include <linux/kernel.h> 105b3b1688SDavid Daney #include <linux/sched.h> 11631330f5SRalf Baechle #include <linux/smp.h> 125b3b1688SDavid Daney #include <linux/mm.h> 135b3b1688SDavid Daney #include <linux/bitops.h> 145b3b1688SDavid Daney #include <linux/cpu.h> 155b3b1688SDavid Daney #include <linux/io.h> 165b3b1688SDavid Daney 175b3b1688SDavid Daney #include <asm/bcache.h> 185b3b1688SDavid Daney #include <asm/bootinfo.h> 195b3b1688SDavid Daney #include <asm/cacheops.h> 205b3b1688SDavid Daney #include <asm/cpu-features.h> 2169f24d17SRalf Baechle #include <asm/cpu-type.h> 225b3b1688SDavid Daney #include <asm/page.h> 235b3b1688SDavid Daney #include <asm/r4kcache.h> 24586016ebSDavid Daney #include <asm/traps.h> 255b3b1688SDavid Daney #include <asm/mmu_context.h> 265b3b1688SDavid Daney #include <asm/war.h> 275b3b1688SDavid Daney 285b3b1688SDavid Daney #include <asm/octeon/octeon.h> 295b3b1688SDavid Daney 305b3b1688SDavid Daney unsigned long long cache_err_dcache[NR_CPUS]; 31f65aad41SRalf Baechle EXPORT_SYMBOL_GPL(cache_err_dcache); 325b3b1688SDavid Daney 33*d2ac3a11SRandy Dunlap /* 345b3b1688SDavid Daney * Octeon automatically flushes the dcache on tlb changes, so 355b3b1688SDavid Daney * from Linux's viewpoint it acts much like a physically 365b3b1688SDavid Daney * tagged cache. No flushing is needed 375b3b1688SDavid Daney * 385b3b1688SDavid Daney */ 395b3b1688SDavid Daney static void octeon_flush_data_cache_page(unsigned long addr) 405b3b1688SDavid Daney { 415b3b1688SDavid Daney /* Nothing to do */ 425b3b1688SDavid Daney } 435b3b1688SDavid Daney 445b3b1688SDavid Daney static inline void octeon_local_flush_icache(void) 455b3b1688SDavid Daney { 465b3b1688SDavid Daney asm volatile ("synci 0($0)"); 475b3b1688SDavid Daney } 485b3b1688SDavid Daney 495b3b1688SDavid Daney /* 505b3b1688SDavid Daney * Flush local I-cache for the specified range. 515b3b1688SDavid Daney */ 525b3b1688SDavid Daney static void local_octeon_flush_icache_range(unsigned long start, 535b3b1688SDavid Daney unsigned long end) 545b3b1688SDavid Daney { 555b3b1688SDavid Daney octeon_local_flush_icache(); 565b3b1688SDavid Daney } 575b3b1688SDavid Daney 585b3b1688SDavid Daney /** 59*d2ac3a11SRandy Dunlap * octeon_flush_icache_all_cores - Flush caches as necessary for all cores 60*d2ac3a11SRandy Dunlap * affected by a vma. If no vma is supplied, all cores are flushed. 615b3b1688SDavid Daney * 625b3b1688SDavid Daney * @vma: VMA to flush or NULL to flush all icaches. 635b3b1688SDavid Daney */ 645b3b1688SDavid Daney static void octeon_flush_icache_all_cores(struct vm_area_struct *vma) 655b3b1688SDavid Daney { 665b3b1688SDavid Daney extern void octeon_send_ipi_single(int cpu, unsigned int action); 675b3b1688SDavid Daney #ifdef CONFIG_SMP 685b3b1688SDavid Daney int cpu; 695b3b1688SDavid Daney cpumask_t mask; 705b3b1688SDavid Daney #endif 715b3b1688SDavid Daney 725b3b1688SDavid Daney mb(); 735b3b1688SDavid Daney octeon_local_flush_icache(); 745b3b1688SDavid Daney #ifdef CONFIG_SMP 755b3b1688SDavid Daney preempt_disable(); 765b3b1688SDavid Daney cpu = smp_processor_id(); 775b3b1688SDavid Daney 785b3b1688SDavid Daney /* 795b3b1688SDavid Daney * If we have a vma structure, we only need to worry about 805b3b1688SDavid Daney * cores it has been used on 815b3b1688SDavid Daney */ 825b3b1688SDavid Daney if (vma) 8355b8cab4SRusty Russell mask = *mm_cpumask(vma->vm_mm); 845b3b1688SDavid Daney else 850b5f9c00SRusty Russell mask = *cpu_online_mask; 860b5f9c00SRusty Russell cpumask_clear_cpu(cpu, &mask); 870b5f9c00SRusty Russell for_each_cpu(cpu, &mask) 885b3b1688SDavid Daney octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH); 895b3b1688SDavid Daney 905b3b1688SDavid Daney preempt_enable(); 915b3b1688SDavid Daney #endif 925b3b1688SDavid Daney } 935b3b1688SDavid Daney 945b3b1688SDavid Daney 95*d2ac3a11SRandy Dunlap /* 965b3b1688SDavid Daney * Called to flush the icache on all cores 975b3b1688SDavid Daney */ 985b3b1688SDavid Daney static void octeon_flush_icache_all(void) 995b3b1688SDavid Daney { 1005b3b1688SDavid Daney octeon_flush_icache_all_cores(NULL); 1015b3b1688SDavid Daney } 1025b3b1688SDavid Daney 1035b3b1688SDavid Daney 1045b3b1688SDavid Daney /** 105*d2ac3a11SRandy Dunlap * octeon_flush_cache_mm - flush all memory associated with a memory context. 1065b3b1688SDavid Daney * 1075b3b1688SDavid Daney * @mm: Memory context to flush 1085b3b1688SDavid Daney */ 1095b3b1688SDavid Daney static void octeon_flush_cache_mm(struct mm_struct *mm) 1105b3b1688SDavid Daney { 1115b3b1688SDavid Daney /* 1125b3b1688SDavid Daney * According to the R4K version of this file, CPUs without 1135b3b1688SDavid Daney * dcache aliases don't need to do anything here 1145b3b1688SDavid Daney */ 1155b3b1688SDavid Daney } 1165b3b1688SDavid Daney 1175b3b1688SDavid Daney 118*d2ac3a11SRandy Dunlap /* 1195b3b1688SDavid Daney * Flush a range of kernel addresses out of the icache 1205b3b1688SDavid Daney * 1215b3b1688SDavid Daney */ 1225b3b1688SDavid Daney static void octeon_flush_icache_range(unsigned long start, unsigned long end) 1235b3b1688SDavid Daney { 1245b3b1688SDavid Daney octeon_flush_icache_all_cores(NULL); 1255b3b1688SDavid Daney } 1265b3b1688SDavid Daney 1275b3b1688SDavid Daney 1285b3b1688SDavid Daney /** 129*d2ac3a11SRandy Dunlap * octeon_flush_cache_range - Flush a range out of a vma 1305b3b1688SDavid Daney * 1315b3b1688SDavid Daney * @vma: VMA to flush 132*d2ac3a11SRandy Dunlap * @start: beginning address for flush 133*d2ac3a11SRandy Dunlap * @end: ending address for flush 1345b3b1688SDavid Daney */ 1355b3b1688SDavid Daney static void octeon_flush_cache_range(struct vm_area_struct *vma, 1365b3b1688SDavid Daney unsigned long start, unsigned long end) 1375b3b1688SDavid Daney { 1385b3b1688SDavid Daney if (vma->vm_flags & VM_EXEC) 1395b3b1688SDavid Daney octeon_flush_icache_all_cores(vma); 1405b3b1688SDavid Daney } 1415b3b1688SDavid Daney 1425b3b1688SDavid Daney 1435b3b1688SDavid Daney /** 144*d2ac3a11SRandy Dunlap * octeon_flush_cache_page - Flush a specific page of a vma 1455b3b1688SDavid Daney * 1465b3b1688SDavid Daney * @vma: VMA to flush page for 1475b3b1688SDavid Daney * @page: Page to flush 148*d2ac3a11SRandy Dunlap * @pfn: Page frame number 1495b3b1688SDavid Daney */ 1505b3b1688SDavid Daney static void octeon_flush_cache_page(struct vm_area_struct *vma, 1515b3b1688SDavid Daney unsigned long page, unsigned long pfn) 1525b3b1688SDavid Daney { 1535b3b1688SDavid Daney if (vma->vm_flags & VM_EXEC) 1545b3b1688SDavid Daney octeon_flush_icache_all_cores(vma); 1555b3b1688SDavid Daney } 1565b3b1688SDavid Daney 157d9cdc901SRalf Baechle static void octeon_flush_kernel_vmap_range(unsigned long vaddr, int size) 158d9cdc901SRalf Baechle { 159d9cdc901SRalf Baechle BUG(); 160d9cdc901SRalf Baechle } 1615b3b1688SDavid Daney 162*d2ac3a11SRandy Dunlap /* 1635b3b1688SDavid Daney * Probe Octeon's caches 1645b3b1688SDavid Daney * 1655b3b1688SDavid Daney */ 166078a55fcSPaul Gortmaker static void probe_octeon(void) 1675b3b1688SDavid Daney { 1685b3b1688SDavid Daney unsigned long icache_size; 1695b3b1688SDavid Daney unsigned long dcache_size; 1705b3b1688SDavid Daney unsigned int config1; 1715b3b1688SDavid Daney struct cpuinfo_mips *c = ¤t_cpu_data; 17269f24d17SRalf Baechle int cputype = current_cpu_type(); 1735b3b1688SDavid Daney 174f8bf7e68SDavid Daney config1 = read_c0_config1(); 17569f24d17SRalf Baechle switch (cputype) { 1765b3b1688SDavid Daney case CPU_CAVIUM_OCTEON: 1776f329468SDavid Daney case CPU_CAVIUM_OCTEON_PLUS: 1785b3b1688SDavid Daney c->icache.linesz = 2 << ((config1 >> 19) & 7); 1795b3b1688SDavid Daney c->icache.sets = 64 << ((config1 >> 22) & 7); 1805b3b1688SDavid Daney c->icache.ways = 1 + ((config1 >> 16) & 7); 1815b3b1688SDavid Daney c->icache.flags |= MIPS_CACHE_VTAG; 1825b3b1688SDavid Daney icache_size = 1835b3b1688SDavid Daney c->icache.sets * c->icache.ways * c->icache.linesz; 1845b3b1688SDavid Daney c->icache.waybit = ffs(icache_size / c->icache.ways) - 1; 1855b3b1688SDavid Daney c->dcache.linesz = 128; 18669f24d17SRalf Baechle if (cputype == CPU_CAVIUM_OCTEON_PLUS) 1875b3b1688SDavid Daney c->dcache.sets = 2; /* CN5XXX has two Dcache sets */ 1886f329468SDavid Daney else 1896f329468SDavid Daney c->dcache.sets = 1; /* CN3XXX has one Dcache set */ 1905b3b1688SDavid Daney c->dcache.ways = 64; 1915b3b1688SDavid Daney dcache_size = 1925b3b1688SDavid Daney c->dcache.sets * c->dcache.ways * c->dcache.linesz; 1935b3b1688SDavid Daney c->dcache.waybit = ffs(dcache_size / c->dcache.ways) - 1; 1945b3b1688SDavid Daney c->options |= MIPS_CPU_PREFETCH; 1955b3b1688SDavid Daney break; 1965b3b1688SDavid Daney 197f8bf7e68SDavid Daney case CPU_CAVIUM_OCTEON2: 198f8bf7e68SDavid Daney c->icache.linesz = 2 << ((config1 >> 19) & 7); 199f8bf7e68SDavid Daney c->icache.sets = 8; 200f8bf7e68SDavid Daney c->icache.ways = 37; 201f8bf7e68SDavid Daney c->icache.flags |= MIPS_CACHE_VTAG; 202f8bf7e68SDavid Daney icache_size = c->icache.sets * c->icache.ways * c->icache.linesz; 203f8bf7e68SDavid Daney 204f8bf7e68SDavid Daney c->dcache.linesz = 128; 205f8bf7e68SDavid Daney c->dcache.ways = 32; 206f8bf7e68SDavid Daney c->dcache.sets = 8; 207f8bf7e68SDavid Daney dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz; 208f8bf7e68SDavid Daney c->options |= MIPS_CPU_PREFETCH; 209f8bf7e68SDavid Daney break; 210f8bf7e68SDavid Daney 21162597c60SDavid Daney case CPU_CAVIUM_OCTEON3: 21262597c60SDavid Daney c->icache.linesz = 128; 21362597c60SDavid Daney c->icache.sets = 16; 21462597c60SDavid Daney c->icache.ways = 39; 21562597c60SDavid Daney c->icache.flags |= MIPS_CACHE_VTAG; 21662597c60SDavid Daney icache_size = c->icache.sets * c->icache.ways * c->icache.linesz; 21762597c60SDavid Daney 21862597c60SDavid Daney c->dcache.linesz = 128; 21962597c60SDavid Daney c->dcache.ways = 32; 22062597c60SDavid Daney c->dcache.sets = 8; 22162597c60SDavid Daney dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz; 22262597c60SDavid Daney c->options |= MIPS_CPU_PREFETCH; 22362597c60SDavid Daney break; 22462597c60SDavid Daney 2255b3b1688SDavid Daney default: 226ab75dc02SRalf Baechle panic("Unsupported Cavium Networks CPU type"); 2275b3b1688SDavid Daney break; 2285b3b1688SDavid Daney } 2295b3b1688SDavid Daney 2305b3b1688SDavid Daney /* compute a couple of other cache variables */ 2315b3b1688SDavid Daney c->icache.waysize = icache_size / c->icache.ways; 2325b3b1688SDavid Daney c->dcache.waysize = dcache_size / c->dcache.ways; 2335b3b1688SDavid Daney 2345b3b1688SDavid Daney c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways); 2355b3b1688SDavid Daney c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways); 2365b3b1688SDavid Daney 2375b3b1688SDavid Daney if (smp_processor_id() == 0) { 238bea176fbSOleksij Rempel pr_info("Primary instruction cache %ldkB, %s, %d way, " 2395b3b1688SDavid Daney "%d sets, linesize %d bytes.\n", 2405b3b1688SDavid Daney icache_size >> 10, 2415b3b1688SDavid Daney cpu_has_vtag_icache ? 2425b3b1688SDavid Daney "virtually tagged" : "physically tagged", 2435b3b1688SDavid Daney c->icache.ways, c->icache.sets, c->icache.linesz); 2445b3b1688SDavid Daney 245bea176fbSOleksij Rempel pr_info("Primary data cache %ldkB, %d-way, %d sets, " 2465b3b1688SDavid Daney "linesize %d bytes.\n", 2475b3b1688SDavid Daney dcache_size >> 10, c->dcache.ways, 2485b3b1688SDavid Daney c->dcache.sets, c->dcache.linesz); 2495b3b1688SDavid Daney } 2505b3b1688SDavid Daney } 2515b3b1688SDavid Daney 252078a55fcSPaul Gortmaker static void octeon_cache_error_setup(void) 253586016ebSDavid Daney { 254586016ebSDavid Daney extern char except_vec2_octeon; 255586016ebSDavid Daney set_handler(0x100, &except_vec2_octeon, 0x80); 256586016ebSDavid Daney } 2575b3b1688SDavid Daney 258*d2ac3a11SRandy Dunlap /* 2595b3b1688SDavid Daney * Setup the Octeon cache flush routines 2605b3b1688SDavid Daney * 2615b3b1688SDavid Daney */ 262078a55fcSPaul Gortmaker void octeon_cache_init(void) 2635b3b1688SDavid Daney { 2645b3b1688SDavid Daney probe_octeon(); 2655b3b1688SDavid Daney 2665b3b1688SDavid Daney shm_align_mask = PAGE_SIZE - 1; 2675b3b1688SDavid Daney 2685b3b1688SDavid Daney flush_cache_all = octeon_flush_icache_all; 2695b3b1688SDavid Daney __flush_cache_all = octeon_flush_icache_all; 2705b3b1688SDavid Daney flush_cache_mm = octeon_flush_cache_mm; 2715b3b1688SDavid Daney flush_cache_page = octeon_flush_cache_page; 2725b3b1688SDavid Daney flush_cache_range = octeon_flush_cache_range; 2735b3b1688SDavid Daney flush_icache_all = octeon_flush_icache_all; 2745b3b1688SDavid Daney flush_data_cache_page = octeon_flush_data_cache_page; 2755b3b1688SDavid Daney flush_icache_range = octeon_flush_icache_range; 2765b3b1688SDavid Daney local_flush_icache_range = local_octeon_flush_icache_range; 27701882b4dSJames Hogan __flush_icache_user_range = octeon_flush_icache_range; 27801882b4dSJames Hogan __local_flush_icache_user_range = local_octeon_flush_icache_range; 2795b3b1688SDavid Daney 280d9cdc901SRalf Baechle __flush_kernel_vmap_range = octeon_flush_kernel_vmap_range; 281d9cdc901SRalf Baechle 2825b3b1688SDavid Daney build_clear_page(); 2835b3b1688SDavid Daney build_copy_page(); 284586016ebSDavid Daney 285586016ebSDavid Daney board_cache_error_setup = octeon_cache_error_setup; 2865b3b1688SDavid Daney } 2875b3b1688SDavid Daney 288e1ced097SDavid Daney /* 2895b3b1688SDavid Daney * Handle a cache error exception 2905b3b1688SDavid Daney */ 291f65aad41SRalf Baechle static RAW_NOTIFIER_HEAD(co_cache_error_chain); 292f65aad41SRalf Baechle 293f65aad41SRalf Baechle int register_co_cache_error_notifier(struct notifier_block *nb) 2945b3b1688SDavid Daney { 295f65aad41SRalf Baechle return raw_notifier_chain_register(&co_cache_error_chain, nb); 2965b3b1688SDavid Daney } 297f65aad41SRalf Baechle EXPORT_SYMBOL_GPL(register_co_cache_error_notifier); 2985b3b1688SDavid Daney 299f65aad41SRalf Baechle int unregister_co_cache_error_notifier(struct notifier_block *nb) 300f65aad41SRalf Baechle { 301f65aad41SRalf Baechle return raw_notifier_chain_unregister(&co_cache_error_chain, nb); 302f65aad41SRalf Baechle } 303f65aad41SRalf Baechle EXPORT_SYMBOL_GPL(unregister_co_cache_error_notifier); 304f65aad41SRalf Baechle 305e1ced097SDavid Daney static void co_cache_error_call_notifiers(unsigned long val) 306f65aad41SRalf Baechle { 307e1ced097SDavid Daney int rv = raw_notifier_call_chain(&co_cache_error_chain, val, NULL); 308e1ced097SDavid Daney if ((rv & ~NOTIFY_STOP_MASK) != NOTIFY_OK) { 309e1ced097SDavid Daney u64 dcache_err; 310e1ced097SDavid Daney unsigned long coreid = cvmx_get_core_num(); 311e1ced097SDavid Daney u64 icache_err = read_octeon_c0_icacheerr(); 312e1ced097SDavid Daney 313e1ced097SDavid Daney if (val) { 314e1ced097SDavid Daney dcache_err = cache_err_dcache[coreid]; 315e1ced097SDavid Daney cache_err_dcache[coreid] = 0; 316e1ced097SDavid Daney } else { 317e1ced097SDavid Daney dcache_err = read_octeon_c0_dcacheerr(); 3185b3b1688SDavid Daney } 3195b3b1688SDavid Daney 320e1ced097SDavid Daney pr_err("Core%lu: Cache error exception:\n", coreid); 321e1ced097SDavid Daney pr_err("cp0_errorepc == %lx\n", read_c0_errorepc()); 322e1ced097SDavid Daney if (icache_err & 1) { 323e1ced097SDavid Daney pr_err("CacheErr (Icache) == %llx\n", 324e1ced097SDavid Daney (unsigned long long)icache_err); 325e1ced097SDavid Daney write_octeon_c0_icacheerr(0); 326e1ced097SDavid Daney } 327e1ced097SDavid Daney if (dcache_err & 1) { 328e1ced097SDavid Daney pr_err("CacheErr (Dcache) == %llx\n", 329e1ced097SDavid Daney (unsigned long long)dcache_err); 330e1ced097SDavid Daney } 331e1ced097SDavid Daney } 332e1ced097SDavid Daney } 333e1ced097SDavid Daney 334e1ced097SDavid Daney /* 3351c1a90d8SRalf Baechle * Called when the the exception is recoverable 3365b3b1688SDavid Daney */ 337e1ced097SDavid Daney 3385b3b1688SDavid Daney asmlinkage void cache_parity_error_octeon_recoverable(void) 3395b3b1688SDavid Daney { 340f65aad41SRalf Baechle co_cache_error_call_notifiers(0); 3415b3b1688SDavid Daney } 3425b3b1688SDavid Daney 343*d2ac3a11SRandy Dunlap /* 3441c1a90d8SRalf Baechle * Called when the the exception is not recoverable 3455b3b1688SDavid Daney */ 346e1ced097SDavid Daney 3475b3b1688SDavid Daney asmlinkage void cache_parity_error_octeon_non_recoverable(void) 3485b3b1688SDavid Daney { 349f65aad41SRalf Baechle co_cache_error_call_notifiers(1); 350f65aad41SRalf Baechle panic("Can't handle cache error: nested exception"); 3515b3b1688SDavid Daney } 352