15b3b1688SDavid Daney /* 25b3b1688SDavid Daney * This file is subject to the terms and conditions of the GNU General Public 35b3b1688SDavid Daney * License. See the file "COPYING" in the main directory of this archive 45b3b1688SDavid Daney * for more details. 55b3b1688SDavid Daney * 65b3b1688SDavid Daney * Copyright (C) 2005-2007 Cavium Networks 75b3b1688SDavid Daney */ 8f65aad41SRalf Baechle #include <linux/export.h> 95b3b1688SDavid Daney #include <linux/init.h> 105b3b1688SDavid Daney #include <linux/kernel.h> 115b3b1688SDavid Daney #include <linux/sched.h> 12631330f5SRalf Baechle #include <linux/smp.h> 135b3b1688SDavid Daney #include <linux/mm.h> 145b3b1688SDavid Daney #include <linux/bitops.h> 155b3b1688SDavid Daney #include <linux/cpu.h> 165b3b1688SDavid Daney #include <linux/io.h> 175b3b1688SDavid Daney 185b3b1688SDavid Daney #include <asm/bcache.h> 195b3b1688SDavid Daney #include <asm/bootinfo.h> 205b3b1688SDavid Daney #include <asm/cacheops.h> 215b3b1688SDavid Daney #include <asm/cpu-features.h> 225b3b1688SDavid Daney #include <asm/page.h> 235b3b1688SDavid Daney #include <asm/pgtable.h> 245b3b1688SDavid Daney #include <asm/r4kcache.h> 25586016ebSDavid Daney #include <asm/traps.h> 265b3b1688SDavid Daney #include <asm/mmu_context.h> 275b3b1688SDavid Daney #include <asm/war.h> 285b3b1688SDavid Daney 295b3b1688SDavid Daney #include <asm/octeon/octeon.h> 305b3b1688SDavid Daney 315b3b1688SDavid Daney unsigned long long cache_err_dcache[NR_CPUS]; 32f65aad41SRalf Baechle EXPORT_SYMBOL_GPL(cache_err_dcache); 335b3b1688SDavid Daney 345b3b1688SDavid Daney /** 355b3b1688SDavid Daney * Octeon automatically flushes the dcache on tlb changes, so 365b3b1688SDavid Daney * from Linux's viewpoint it acts much like a physically 375b3b1688SDavid Daney * tagged cache. No flushing is needed 385b3b1688SDavid Daney * 395b3b1688SDavid Daney */ 405b3b1688SDavid Daney static void octeon_flush_data_cache_page(unsigned long addr) 415b3b1688SDavid Daney { 425b3b1688SDavid Daney /* Nothing to do */ 435b3b1688SDavid Daney } 445b3b1688SDavid Daney 455b3b1688SDavid Daney static inline void octeon_local_flush_icache(void) 465b3b1688SDavid Daney { 475b3b1688SDavid Daney asm volatile ("synci 0($0)"); 485b3b1688SDavid Daney } 495b3b1688SDavid Daney 505b3b1688SDavid Daney /* 515b3b1688SDavid Daney * Flush local I-cache for the specified range. 525b3b1688SDavid Daney */ 535b3b1688SDavid Daney static void local_octeon_flush_icache_range(unsigned long start, 545b3b1688SDavid Daney unsigned long end) 555b3b1688SDavid Daney { 565b3b1688SDavid Daney octeon_local_flush_icache(); 575b3b1688SDavid Daney } 585b3b1688SDavid Daney 595b3b1688SDavid Daney /** 605b3b1688SDavid Daney * Flush caches as necessary for all cores affected by a 615b3b1688SDavid Daney * vma. If no vma is supplied, all cores are flushed. 625b3b1688SDavid Daney * 635b3b1688SDavid Daney * @vma: VMA to flush or NULL to flush all icaches. 645b3b1688SDavid Daney */ 655b3b1688SDavid Daney static void octeon_flush_icache_all_cores(struct vm_area_struct *vma) 665b3b1688SDavid Daney { 675b3b1688SDavid Daney extern void octeon_send_ipi_single(int cpu, unsigned int action); 685b3b1688SDavid Daney #ifdef CONFIG_SMP 695b3b1688SDavid Daney int cpu; 705b3b1688SDavid Daney cpumask_t mask; 715b3b1688SDavid Daney #endif 725b3b1688SDavid Daney 735b3b1688SDavid Daney mb(); 745b3b1688SDavid Daney octeon_local_flush_icache(); 755b3b1688SDavid Daney #ifdef CONFIG_SMP 765b3b1688SDavid Daney preempt_disable(); 775b3b1688SDavid Daney cpu = smp_processor_id(); 785b3b1688SDavid Daney 795b3b1688SDavid Daney /* 805b3b1688SDavid Daney * If we have a vma structure, we only need to worry about 815b3b1688SDavid Daney * cores it has been used on 825b3b1688SDavid Daney */ 835b3b1688SDavid Daney if (vma) 8455b8cab4SRusty Russell mask = *mm_cpumask(vma->vm_mm); 855b3b1688SDavid Daney else 860b5f9c00SRusty Russell mask = *cpu_online_mask; 870b5f9c00SRusty Russell cpumask_clear_cpu(cpu, &mask); 880b5f9c00SRusty Russell for_each_cpu(cpu, &mask) 895b3b1688SDavid Daney octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH); 905b3b1688SDavid Daney 915b3b1688SDavid Daney preempt_enable(); 925b3b1688SDavid Daney #endif 935b3b1688SDavid Daney } 945b3b1688SDavid Daney 955b3b1688SDavid Daney 965b3b1688SDavid Daney /** 975b3b1688SDavid Daney * Called to flush the icache on all cores 985b3b1688SDavid Daney */ 995b3b1688SDavid Daney static void octeon_flush_icache_all(void) 1005b3b1688SDavid Daney { 1015b3b1688SDavid Daney octeon_flush_icache_all_cores(NULL); 1025b3b1688SDavid Daney } 1035b3b1688SDavid Daney 1045b3b1688SDavid Daney 1055b3b1688SDavid Daney /** 1065b3b1688SDavid Daney * Called to flush all memory associated with a memory 1075b3b1688SDavid Daney * context. 1085b3b1688SDavid Daney * 1095b3b1688SDavid Daney * @mm: Memory context to flush 1105b3b1688SDavid Daney */ 1115b3b1688SDavid Daney static void octeon_flush_cache_mm(struct mm_struct *mm) 1125b3b1688SDavid Daney { 1135b3b1688SDavid Daney /* 1145b3b1688SDavid Daney * According to the R4K version of this file, CPUs without 1155b3b1688SDavid Daney * dcache aliases don't need to do anything here 1165b3b1688SDavid Daney */ 1175b3b1688SDavid Daney } 1185b3b1688SDavid Daney 1195b3b1688SDavid Daney 1205b3b1688SDavid Daney /** 1215b3b1688SDavid Daney * Flush a range of kernel addresses out of the icache 1225b3b1688SDavid Daney * 1235b3b1688SDavid Daney */ 1245b3b1688SDavid Daney static void octeon_flush_icache_range(unsigned long start, unsigned long end) 1255b3b1688SDavid Daney { 1265b3b1688SDavid Daney octeon_flush_icache_all_cores(NULL); 1275b3b1688SDavid Daney } 1285b3b1688SDavid Daney 1295b3b1688SDavid Daney 1305b3b1688SDavid Daney /** 1315b3b1688SDavid Daney * Flush the icache for a trampoline. These are used for interrupt 1325b3b1688SDavid Daney * and exception hooking. 1335b3b1688SDavid Daney * 1345b3b1688SDavid Daney * @addr: Address to flush 1355b3b1688SDavid Daney */ 1365b3b1688SDavid Daney static void octeon_flush_cache_sigtramp(unsigned long addr) 1375b3b1688SDavid Daney { 1385b3b1688SDavid Daney struct vm_area_struct *vma; 1395b3b1688SDavid Daney 1405b3b1688SDavid Daney vma = find_vma(current->mm, addr); 1415b3b1688SDavid Daney octeon_flush_icache_all_cores(vma); 1425b3b1688SDavid Daney } 1435b3b1688SDavid Daney 1445b3b1688SDavid Daney 1455b3b1688SDavid Daney /** 1465b3b1688SDavid Daney * Flush a range out of a vma 1475b3b1688SDavid Daney * 1485b3b1688SDavid Daney * @vma: VMA to flush 1495b3b1688SDavid Daney * @start: 1505b3b1688SDavid Daney * @end: 1515b3b1688SDavid Daney */ 1525b3b1688SDavid Daney static void octeon_flush_cache_range(struct vm_area_struct *vma, 1535b3b1688SDavid Daney unsigned long start, unsigned long end) 1545b3b1688SDavid Daney { 1555b3b1688SDavid Daney if (vma->vm_flags & VM_EXEC) 1565b3b1688SDavid Daney octeon_flush_icache_all_cores(vma); 1575b3b1688SDavid Daney } 1585b3b1688SDavid Daney 1595b3b1688SDavid Daney 1605b3b1688SDavid Daney /** 1615b3b1688SDavid Daney * Flush a specific page of a vma 1625b3b1688SDavid Daney * 1635b3b1688SDavid Daney * @vma: VMA to flush page for 1645b3b1688SDavid Daney * @page: Page to flush 1655b3b1688SDavid Daney * @pfn: 1665b3b1688SDavid Daney */ 1675b3b1688SDavid Daney static void octeon_flush_cache_page(struct vm_area_struct *vma, 1685b3b1688SDavid Daney unsigned long page, unsigned long pfn) 1695b3b1688SDavid Daney { 1705b3b1688SDavid Daney if (vma->vm_flags & VM_EXEC) 1715b3b1688SDavid Daney octeon_flush_icache_all_cores(vma); 1725b3b1688SDavid Daney } 1735b3b1688SDavid Daney 174d9cdc901SRalf Baechle static void octeon_flush_kernel_vmap_range(unsigned long vaddr, int size) 175d9cdc901SRalf Baechle { 176d9cdc901SRalf Baechle BUG(); 177d9cdc901SRalf Baechle } 1785b3b1688SDavid Daney 1795b3b1688SDavid Daney /** 1805b3b1688SDavid Daney * Probe Octeon's caches 1815b3b1688SDavid Daney * 1825b3b1688SDavid Daney */ 183*078a55fcSPaul Gortmaker static void probe_octeon(void) 1845b3b1688SDavid Daney { 1855b3b1688SDavid Daney unsigned long icache_size; 1865b3b1688SDavid Daney unsigned long dcache_size; 1875b3b1688SDavid Daney unsigned int config1; 1885b3b1688SDavid Daney struct cpuinfo_mips *c = ¤t_cpu_data; 1895b3b1688SDavid Daney 190f8bf7e68SDavid Daney config1 = read_c0_config1(); 1915b3b1688SDavid Daney switch (c->cputype) { 1925b3b1688SDavid Daney case CPU_CAVIUM_OCTEON: 1936f329468SDavid Daney case CPU_CAVIUM_OCTEON_PLUS: 1945b3b1688SDavid Daney c->icache.linesz = 2 << ((config1 >> 19) & 7); 1955b3b1688SDavid Daney c->icache.sets = 64 << ((config1 >> 22) & 7); 1965b3b1688SDavid Daney c->icache.ways = 1 + ((config1 >> 16) & 7); 1975b3b1688SDavid Daney c->icache.flags |= MIPS_CACHE_VTAG; 1985b3b1688SDavid Daney icache_size = 1995b3b1688SDavid Daney c->icache.sets * c->icache.ways * c->icache.linesz; 2005b3b1688SDavid Daney c->icache.waybit = ffs(icache_size / c->icache.ways) - 1; 2015b3b1688SDavid Daney c->dcache.linesz = 128; 2026f329468SDavid Daney if (c->cputype == CPU_CAVIUM_OCTEON_PLUS) 2035b3b1688SDavid Daney c->dcache.sets = 2; /* CN5XXX has two Dcache sets */ 2046f329468SDavid Daney else 2056f329468SDavid Daney c->dcache.sets = 1; /* CN3XXX has one Dcache set */ 2065b3b1688SDavid Daney c->dcache.ways = 64; 2075b3b1688SDavid Daney dcache_size = 2085b3b1688SDavid Daney c->dcache.sets * c->dcache.ways * c->dcache.linesz; 2095b3b1688SDavid Daney c->dcache.waybit = ffs(dcache_size / c->dcache.ways) - 1; 2105b3b1688SDavid Daney c->options |= MIPS_CPU_PREFETCH; 2115b3b1688SDavid Daney break; 2125b3b1688SDavid Daney 213f8bf7e68SDavid Daney case CPU_CAVIUM_OCTEON2: 214f8bf7e68SDavid Daney c->icache.linesz = 2 << ((config1 >> 19) & 7); 215f8bf7e68SDavid Daney c->icache.sets = 8; 216f8bf7e68SDavid Daney c->icache.ways = 37; 217f8bf7e68SDavid Daney c->icache.flags |= MIPS_CACHE_VTAG; 218f8bf7e68SDavid Daney icache_size = c->icache.sets * c->icache.ways * c->icache.linesz; 219f8bf7e68SDavid Daney 220f8bf7e68SDavid Daney c->dcache.linesz = 128; 221f8bf7e68SDavid Daney c->dcache.ways = 32; 222f8bf7e68SDavid Daney c->dcache.sets = 8; 223f8bf7e68SDavid Daney dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz; 224f8bf7e68SDavid Daney c->options |= MIPS_CPU_PREFETCH; 225f8bf7e68SDavid Daney break; 226f8bf7e68SDavid Daney 2275b3b1688SDavid Daney default: 228ab75dc02SRalf Baechle panic("Unsupported Cavium Networks CPU type"); 2295b3b1688SDavid Daney break; 2305b3b1688SDavid Daney } 2315b3b1688SDavid Daney 2325b3b1688SDavid Daney /* compute a couple of other cache variables */ 2335b3b1688SDavid Daney c->icache.waysize = icache_size / c->icache.ways; 2345b3b1688SDavid Daney c->dcache.waysize = dcache_size / c->dcache.ways; 2355b3b1688SDavid Daney 2365b3b1688SDavid Daney c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways); 2375b3b1688SDavid Daney c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways); 2385b3b1688SDavid Daney 2395b3b1688SDavid Daney if (smp_processor_id() == 0) { 2405b3b1688SDavid Daney pr_notice("Primary instruction cache %ldkB, %s, %d way, " 2415b3b1688SDavid Daney "%d sets, linesize %d bytes.\n", 2425b3b1688SDavid Daney icache_size >> 10, 2435b3b1688SDavid Daney cpu_has_vtag_icache ? 2445b3b1688SDavid Daney "virtually tagged" : "physically tagged", 2455b3b1688SDavid Daney c->icache.ways, c->icache.sets, c->icache.linesz); 2465b3b1688SDavid Daney 2475b3b1688SDavid Daney pr_notice("Primary data cache %ldkB, %d-way, %d sets, " 2485b3b1688SDavid Daney "linesize %d bytes.\n", 2495b3b1688SDavid Daney dcache_size >> 10, c->dcache.ways, 2505b3b1688SDavid Daney c->dcache.sets, c->dcache.linesz); 2515b3b1688SDavid Daney } 2525b3b1688SDavid Daney } 2535b3b1688SDavid Daney 254*078a55fcSPaul Gortmaker static void octeon_cache_error_setup(void) 255586016ebSDavid Daney { 256586016ebSDavid Daney extern char except_vec2_octeon; 257586016ebSDavid Daney set_handler(0x100, &except_vec2_octeon, 0x80); 258586016ebSDavid Daney } 2595b3b1688SDavid Daney 2605b3b1688SDavid Daney /** 2615b3b1688SDavid Daney * Setup the Octeon cache flush routines 2625b3b1688SDavid Daney * 2635b3b1688SDavid Daney */ 264*078a55fcSPaul Gortmaker void octeon_cache_init(void) 2655b3b1688SDavid Daney { 2665b3b1688SDavid Daney probe_octeon(); 2675b3b1688SDavid Daney 2685b3b1688SDavid Daney shm_align_mask = PAGE_SIZE - 1; 2695b3b1688SDavid Daney 2705b3b1688SDavid Daney flush_cache_all = octeon_flush_icache_all; 2715b3b1688SDavid Daney __flush_cache_all = octeon_flush_icache_all; 2725b3b1688SDavid Daney flush_cache_mm = octeon_flush_cache_mm; 2735b3b1688SDavid Daney flush_cache_page = octeon_flush_cache_page; 2745b3b1688SDavid Daney flush_cache_range = octeon_flush_cache_range; 2755b3b1688SDavid Daney flush_cache_sigtramp = octeon_flush_cache_sigtramp; 2765b3b1688SDavid Daney flush_icache_all = octeon_flush_icache_all; 2775b3b1688SDavid Daney flush_data_cache_page = octeon_flush_data_cache_page; 2785b3b1688SDavid Daney flush_icache_range = octeon_flush_icache_range; 2795b3b1688SDavid Daney local_flush_icache_range = local_octeon_flush_icache_range; 2805b3b1688SDavid Daney 281d9cdc901SRalf Baechle __flush_kernel_vmap_range = octeon_flush_kernel_vmap_range; 282d9cdc901SRalf Baechle 2835b3b1688SDavid Daney build_clear_page(); 2845b3b1688SDavid Daney build_copy_page(); 285586016ebSDavid Daney 286586016ebSDavid Daney board_cache_error_setup = octeon_cache_error_setup; 2875b3b1688SDavid Daney } 2885b3b1688SDavid Daney 289e1ced097SDavid Daney /* 2905b3b1688SDavid Daney * Handle a cache error exception 2915b3b1688SDavid Daney */ 292f65aad41SRalf Baechle static RAW_NOTIFIER_HEAD(co_cache_error_chain); 293f65aad41SRalf Baechle 294f65aad41SRalf Baechle int register_co_cache_error_notifier(struct notifier_block *nb) 2955b3b1688SDavid Daney { 296f65aad41SRalf Baechle return raw_notifier_chain_register(&co_cache_error_chain, nb); 2975b3b1688SDavid Daney } 298f65aad41SRalf Baechle EXPORT_SYMBOL_GPL(register_co_cache_error_notifier); 2995b3b1688SDavid Daney 300f65aad41SRalf Baechle int unregister_co_cache_error_notifier(struct notifier_block *nb) 301f65aad41SRalf Baechle { 302f65aad41SRalf Baechle return raw_notifier_chain_unregister(&co_cache_error_chain, nb); 303f65aad41SRalf Baechle } 304f65aad41SRalf Baechle EXPORT_SYMBOL_GPL(unregister_co_cache_error_notifier); 305f65aad41SRalf Baechle 306e1ced097SDavid Daney static void co_cache_error_call_notifiers(unsigned long val) 307f65aad41SRalf Baechle { 308e1ced097SDavid Daney int rv = raw_notifier_call_chain(&co_cache_error_chain, val, NULL); 309e1ced097SDavid Daney if ((rv & ~NOTIFY_STOP_MASK) != NOTIFY_OK) { 310e1ced097SDavid Daney u64 dcache_err; 311e1ced097SDavid Daney unsigned long coreid = cvmx_get_core_num(); 312e1ced097SDavid Daney u64 icache_err = read_octeon_c0_icacheerr(); 313e1ced097SDavid Daney 314e1ced097SDavid Daney if (val) { 315e1ced097SDavid Daney dcache_err = cache_err_dcache[coreid]; 316e1ced097SDavid Daney cache_err_dcache[coreid] = 0; 317e1ced097SDavid Daney } else { 318e1ced097SDavid Daney dcache_err = read_octeon_c0_dcacheerr(); 3195b3b1688SDavid Daney } 3205b3b1688SDavid Daney 321e1ced097SDavid Daney pr_err("Core%lu: Cache error exception:\n", coreid); 322e1ced097SDavid Daney pr_err("cp0_errorepc == %lx\n", read_c0_errorepc()); 323e1ced097SDavid Daney if (icache_err & 1) { 324e1ced097SDavid Daney pr_err("CacheErr (Icache) == %llx\n", 325e1ced097SDavid Daney (unsigned long long)icache_err); 326e1ced097SDavid Daney write_octeon_c0_icacheerr(0); 327e1ced097SDavid Daney } 328e1ced097SDavid Daney if (dcache_err & 1) { 329e1ced097SDavid Daney pr_err("CacheErr (Dcache) == %llx\n", 330e1ced097SDavid Daney (unsigned long long)dcache_err); 331e1ced097SDavid Daney } 332e1ced097SDavid Daney } 333e1ced097SDavid Daney } 334e1ced097SDavid Daney 335e1ced097SDavid Daney /* 3361c1a90d8SRalf Baechle * Called when the the exception is recoverable 3375b3b1688SDavid Daney */ 338e1ced097SDavid Daney 3395b3b1688SDavid Daney asmlinkage void cache_parity_error_octeon_recoverable(void) 3405b3b1688SDavid Daney { 341f65aad41SRalf Baechle co_cache_error_call_notifiers(0); 3425b3b1688SDavid Daney } 3435b3b1688SDavid Daney 3445b3b1688SDavid Daney /** 3451c1a90d8SRalf Baechle * Called when the the exception is not recoverable 3465b3b1688SDavid Daney */ 347e1ced097SDavid Daney 3485b3b1688SDavid Daney asmlinkage void cache_parity_error_octeon_non_recoverable(void) 3495b3b1688SDavid Daney { 350f65aad41SRalf Baechle co_cache_error_call_notifiers(1); 351f65aad41SRalf Baechle panic("Can't handle cache error: nested exception"); 3525b3b1688SDavid Daney } 353