15b3b1688SDavid Daney /* 25b3b1688SDavid Daney * This file is subject to the terms and conditions of the GNU General Public 35b3b1688SDavid Daney * License. See the file "COPYING" in the main directory of this archive 45b3b1688SDavid Daney * for more details. 55b3b1688SDavid Daney * 65b3b1688SDavid Daney * Copyright (C) 2005-2007 Cavium Networks 75b3b1688SDavid Daney */ 85b3b1688SDavid Daney #include <linux/init.h> 95b3b1688SDavid Daney #include <linux/kernel.h> 105b3b1688SDavid Daney #include <linux/sched.h> 11631330f5SRalf Baechle #include <linux/smp.h> 125b3b1688SDavid Daney #include <linux/mm.h> 135b3b1688SDavid Daney #include <linux/bitops.h> 145b3b1688SDavid Daney #include <linux/cpu.h> 155b3b1688SDavid Daney #include <linux/io.h> 165b3b1688SDavid Daney 175b3b1688SDavid Daney #include <asm/bcache.h> 185b3b1688SDavid Daney #include <asm/bootinfo.h> 195b3b1688SDavid Daney #include <asm/cacheops.h> 205b3b1688SDavid Daney #include <asm/cpu-features.h> 215b3b1688SDavid Daney #include <asm/page.h> 225b3b1688SDavid Daney #include <asm/pgtable.h> 235b3b1688SDavid Daney #include <asm/r4kcache.h> 24*586016ebSDavid Daney #include <asm/traps.h> 255b3b1688SDavid Daney #include <asm/mmu_context.h> 265b3b1688SDavid Daney #include <asm/war.h> 275b3b1688SDavid Daney 285b3b1688SDavid Daney #include <asm/octeon/octeon.h> 295b3b1688SDavid Daney 305b3b1688SDavid Daney unsigned long long cache_err_dcache[NR_CPUS]; 315b3b1688SDavid Daney 325b3b1688SDavid Daney /** 335b3b1688SDavid Daney * Octeon automatically flushes the dcache on tlb changes, so 345b3b1688SDavid Daney * from Linux's viewpoint it acts much like a physically 355b3b1688SDavid Daney * tagged cache. No flushing is needed 365b3b1688SDavid Daney * 375b3b1688SDavid Daney */ 385b3b1688SDavid Daney static void octeon_flush_data_cache_page(unsigned long addr) 395b3b1688SDavid Daney { 405b3b1688SDavid Daney /* Nothing to do */ 415b3b1688SDavid Daney } 425b3b1688SDavid Daney 435b3b1688SDavid Daney static inline void octeon_local_flush_icache(void) 445b3b1688SDavid Daney { 455b3b1688SDavid Daney asm volatile ("synci 0($0)"); 465b3b1688SDavid Daney } 475b3b1688SDavid Daney 485b3b1688SDavid Daney /* 495b3b1688SDavid Daney * Flush local I-cache for the specified range. 505b3b1688SDavid Daney */ 515b3b1688SDavid Daney static void local_octeon_flush_icache_range(unsigned long start, 525b3b1688SDavid Daney unsigned long end) 535b3b1688SDavid Daney { 545b3b1688SDavid Daney octeon_local_flush_icache(); 555b3b1688SDavid Daney } 565b3b1688SDavid Daney 575b3b1688SDavid Daney /** 585b3b1688SDavid Daney * Flush caches as necessary for all cores affected by a 595b3b1688SDavid Daney * vma. If no vma is supplied, all cores are flushed. 605b3b1688SDavid Daney * 615b3b1688SDavid Daney * @vma: VMA to flush or NULL to flush all icaches. 625b3b1688SDavid Daney */ 635b3b1688SDavid Daney static void octeon_flush_icache_all_cores(struct vm_area_struct *vma) 645b3b1688SDavid Daney { 655b3b1688SDavid Daney extern void octeon_send_ipi_single(int cpu, unsigned int action); 665b3b1688SDavid Daney #ifdef CONFIG_SMP 675b3b1688SDavid Daney int cpu; 685b3b1688SDavid Daney cpumask_t mask; 695b3b1688SDavid Daney #endif 705b3b1688SDavid Daney 715b3b1688SDavid Daney mb(); 725b3b1688SDavid Daney octeon_local_flush_icache(); 735b3b1688SDavid Daney #ifdef CONFIG_SMP 745b3b1688SDavid Daney preempt_disable(); 755b3b1688SDavid Daney cpu = smp_processor_id(); 765b3b1688SDavid Daney 775b3b1688SDavid Daney /* 785b3b1688SDavid Daney * If we have a vma structure, we only need to worry about 795b3b1688SDavid Daney * cores it has been used on 805b3b1688SDavid Daney */ 815b3b1688SDavid Daney if (vma) 8255b8cab4SRusty Russell mask = *mm_cpumask(vma->vm_mm); 835b3b1688SDavid Daney else 840b5f9c00SRusty Russell mask = *cpu_online_mask; 850b5f9c00SRusty Russell cpumask_clear_cpu(cpu, &mask); 860b5f9c00SRusty Russell for_each_cpu(cpu, &mask) 875b3b1688SDavid Daney octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH); 885b3b1688SDavid Daney 895b3b1688SDavid Daney preempt_enable(); 905b3b1688SDavid Daney #endif 915b3b1688SDavid Daney } 925b3b1688SDavid Daney 935b3b1688SDavid Daney 945b3b1688SDavid Daney /** 955b3b1688SDavid Daney * Called to flush the icache on all cores 965b3b1688SDavid Daney */ 975b3b1688SDavid Daney static void octeon_flush_icache_all(void) 985b3b1688SDavid Daney { 995b3b1688SDavid Daney octeon_flush_icache_all_cores(NULL); 1005b3b1688SDavid Daney } 1015b3b1688SDavid Daney 1025b3b1688SDavid Daney 1035b3b1688SDavid Daney /** 1045b3b1688SDavid Daney * Called to flush all memory associated with a memory 1055b3b1688SDavid Daney * context. 1065b3b1688SDavid Daney * 1075b3b1688SDavid Daney * @mm: Memory context to flush 1085b3b1688SDavid Daney */ 1095b3b1688SDavid Daney static void octeon_flush_cache_mm(struct mm_struct *mm) 1105b3b1688SDavid Daney { 1115b3b1688SDavid Daney /* 1125b3b1688SDavid Daney * According to the R4K version of this file, CPUs without 1135b3b1688SDavid Daney * dcache aliases don't need to do anything here 1145b3b1688SDavid Daney */ 1155b3b1688SDavid Daney } 1165b3b1688SDavid Daney 1175b3b1688SDavid Daney 1185b3b1688SDavid Daney /** 1195b3b1688SDavid Daney * Flush a range of kernel addresses out of the icache 1205b3b1688SDavid Daney * 1215b3b1688SDavid Daney */ 1225b3b1688SDavid Daney static void octeon_flush_icache_range(unsigned long start, unsigned long end) 1235b3b1688SDavid Daney { 1245b3b1688SDavid Daney octeon_flush_icache_all_cores(NULL); 1255b3b1688SDavid Daney } 1265b3b1688SDavid Daney 1275b3b1688SDavid Daney 1285b3b1688SDavid Daney /** 1295b3b1688SDavid Daney * Flush the icache for a trampoline. These are used for interrupt 1305b3b1688SDavid Daney * and exception hooking. 1315b3b1688SDavid Daney * 1325b3b1688SDavid Daney * @addr: Address to flush 1335b3b1688SDavid Daney */ 1345b3b1688SDavid Daney static void octeon_flush_cache_sigtramp(unsigned long addr) 1355b3b1688SDavid Daney { 1365b3b1688SDavid Daney struct vm_area_struct *vma; 1375b3b1688SDavid Daney 1385b3b1688SDavid Daney vma = find_vma(current->mm, addr); 1395b3b1688SDavid Daney octeon_flush_icache_all_cores(vma); 1405b3b1688SDavid Daney } 1415b3b1688SDavid Daney 1425b3b1688SDavid Daney 1435b3b1688SDavid Daney /** 1445b3b1688SDavid Daney * Flush a range out of a vma 1455b3b1688SDavid Daney * 1465b3b1688SDavid Daney * @vma: VMA to flush 1475b3b1688SDavid Daney * @start: 1485b3b1688SDavid Daney * @end: 1495b3b1688SDavid Daney */ 1505b3b1688SDavid Daney static void octeon_flush_cache_range(struct vm_area_struct *vma, 1515b3b1688SDavid Daney unsigned long start, unsigned long end) 1525b3b1688SDavid Daney { 1535b3b1688SDavid Daney if (vma->vm_flags & VM_EXEC) 1545b3b1688SDavid Daney octeon_flush_icache_all_cores(vma); 1555b3b1688SDavid Daney } 1565b3b1688SDavid Daney 1575b3b1688SDavid Daney 1585b3b1688SDavid Daney /** 1595b3b1688SDavid Daney * Flush a specific page of a vma 1605b3b1688SDavid Daney * 1615b3b1688SDavid Daney * @vma: VMA to flush page for 1625b3b1688SDavid Daney * @page: Page to flush 1635b3b1688SDavid Daney * @pfn: 1645b3b1688SDavid Daney */ 1655b3b1688SDavid Daney static void octeon_flush_cache_page(struct vm_area_struct *vma, 1665b3b1688SDavid Daney unsigned long page, unsigned long pfn) 1675b3b1688SDavid Daney { 1685b3b1688SDavid Daney if (vma->vm_flags & VM_EXEC) 1695b3b1688SDavid Daney octeon_flush_icache_all_cores(vma); 1705b3b1688SDavid Daney } 1715b3b1688SDavid Daney 172d9cdc901SRalf Baechle static void octeon_flush_kernel_vmap_range(unsigned long vaddr, int size) 173d9cdc901SRalf Baechle { 174d9cdc901SRalf Baechle BUG(); 175d9cdc901SRalf Baechle } 1765b3b1688SDavid Daney 1775b3b1688SDavid Daney /** 1785b3b1688SDavid Daney * Probe Octeon's caches 1795b3b1688SDavid Daney * 1805b3b1688SDavid Daney */ 18163731c96SDavid Daney static void __cpuinit probe_octeon(void) 1825b3b1688SDavid Daney { 1835b3b1688SDavid Daney unsigned long icache_size; 1845b3b1688SDavid Daney unsigned long dcache_size; 1855b3b1688SDavid Daney unsigned int config1; 1865b3b1688SDavid Daney struct cpuinfo_mips *c = ¤t_cpu_data; 1875b3b1688SDavid Daney 188f8bf7e68SDavid Daney config1 = read_c0_config1(); 1895b3b1688SDavid Daney switch (c->cputype) { 1905b3b1688SDavid Daney case CPU_CAVIUM_OCTEON: 1916f329468SDavid Daney case CPU_CAVIUM_OCTEON_PLUS: 1925b3b1688SDavid Daney c->icache.linesz = 2 << ((config1 >> 19) & 7); 1935b3b1688SDavid Daney c->icache.sets = 64 << ((config1 >> 22) & 7); 1945b3b1688SDavid Daney c->icache.ways = 1 + ((config1 >> 16) & 7); 1955b3b1688SDavid Daney c->icache.flags |= MIPS_CACHE_VTAG; 1965b3b1688SDavid Daney icache_size = 1975b3b1688SDavid Daney c->icache.sets * c->icache.ways * c->icache.linesz; 1985b3b1688SDavid Daney c->icache.waybit = ffs(icache_size / c->icache.ways) - 1; 1995b3b1688SDavid Daney c->dcache.linesz = 128; 2006f329468SDavid Daney if (c->cputype == CPU_CAVIUM_OCTEON_PLUS) 2015b3b1688SDavid Daney c->dcache.sets = 2; /* CN5XXX has two Dcache sets */ 2026f329468SDavid Daney else 2036f329468SDavid Daney c->dcache.sets = 1; /* CN3XXX has one Dcache set */ 2045b3b1688SDavid Daney c->dcache.ways = 64; 2055b3b1688SDavid Daney dcache_size = 2065b3b1688SDavid Daney c->dcache.sets * c->dcache.ways * c->dcache.linesz; 2075b3b1688SDavid Daney c->dcache.waybit = ffs(dcache_size / c->dcache.ways) - 1; 2085b3b1688SDavid Daney c->options |= MIPS_CPU_PREFETCH; 2095b3b1688SDavid Daney break; 2105b3b1688SDavid Daney 211f8bf7e68SDavid Daney case CPU_CAVIUM_OCTEON2: 212f8bf7e68SDavid Daney c->icache.linesz = 2 << ((config1 >> 19) & 7); 213f8bf7e68SDavid Daney c->icache.sets = 8; 214f8bf7e68SDavid Daney c->icache.ways = 37; 215f8bf7e68SDavid Daney c->icache.flags |= MIPS_CACHE_VTAG; 216f8bf7e68SDavid Daney icache_size = c->icache.sets * c->icache.ways * c->icache.linesz; 217f8bf7e68SDavid Daney 218f8bf7e68SDavid Daney c->dcache.linesz = 128; 219f8bf7e68SDavid Daney c->dcache.ways = 32; 220f8bf7e68SDavid Daney c->dcache.sets = 8; 221f8bf7e68SDavid Daney dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz; 222f8bf7e68SDavid Daney c->options |= MIPS_CPU_PREFETCH; 223f8bf7e68SDavid Daney break; 224f8bf7e68SDavid Daney 2255b3b1688SDavid Daney default: 226ab75dc02SRalf Baechle panic("Unsupported Cavium Networks CPU type"); 2275b3b1688SDavid Daney break; 2285b3b1688SDavid Daney } 2295b3b1688SDavid Daney 2305b3b1688SDavid Daney /* compute a couple of other cache variables */ 2315b3b1688SDavid Daney c->icache.waysize = icache_size / c->icache.ways; 2325b3b1688SDavid Daney c->dcache.waysize = dcache_size / c->dcache.ways; 2335b3b1688SDavid Daney 2345b3b1688SDavid Daney c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways); 2355b3b1688SDavid Daney c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways); 2365b3b1688SDavid Daney 2375b3b1688SDavid Daney if (smp_processor_id() == 0) { 2385b3b1688SDavid Daney pr_notice("Primary instruction cache %ldkB, %s, %d way, " 2395b3b1688SDavid Daney "%d sets, linesize %d bytes.\n", 2405b3b1688SDavid Daney icache_size >> 10, 2415b3b1688SDavid Daney cpu_has_vtag_icache ? 2425b3b1688SDavid Daney "virtually tagged" : "physically tagged", 2435b3b1688SDavid Daney c->icache.ways, c->icache.sets, c->icache.linesz); 2445b3b1688SDavid Daney 2455b3b1688SDavid Daney pr_notice("Primary data cache %ldkB, %d-way, %d sets, " 2465b3b1688SDavid Daney "linesize %d bytes.\n", 2475b3b1688SDavid Daney dcache_size >> 10, c->dcache.ways, 2485b3b1688SDavid Daney c->dcache.sets, c->dcache.linesz); 2495b3b1688SDavid Daney } 2505b3b1688SDavid Daney } 2515b3b1688SDavid Daney 252*586016ebSDavid Daney static void __cpuinit octeon_cache_error_setup(void) 253*586016ebSDavid Daney { 254*586016ebSDavid Daney extern char except_vec2_octeon; 255*586016ebSDavid Daney set_handler(0x100, &except_vec2_octeon, 0x80); 256*586016ebSDavid Daney } 2575b3b1688SDavid Daney 2585b3b1688SDavid Daney /** 2595b3b1688SDavid Daney * Setup the Octeon cache flush routines 2605b3b1688SDavid Daney * 2615b3b1688SDavid Daney */ 26263731c96SDavid Daney void __cpuinit octeon_cache_init(void) 2635b3b1688SDavid Daney { 2645b3b1688SDavid Daney probe_octeon(); 2655b3b1688SDavid Daney 2665b3b1688SDavid Daney shm_align_mask = PAGE_SIZE - 1; 2675b3b1688SDavid Daney 2685b3b1688SDavid Daney flush_cache_all = octeon_flush_icache_all; 2695b3b1688SDavid Daney __flush_cache_all = octeon_flush_icache_all; 2705b3b1688SDavid Daney flush_cache_mm = octeon_flush_cache_mm; 2715b3b1688SDavid Daney flush_cache_page = octeon_flush_cache_page; 2725b3b1688SDavid Daney flush_cache_range = octeon_flush_cache_range; 2735b3b1688SDavid Daney flush_cache_sigtramp = octeon_flush_cache_sigtramp; 2745b3b1688SDavid Daney flush_icache_all = octeon_flush_icache_all; 2755b3b1688SDavid Daney flush_data_cache_page = octeon_flush_data_cache_page; 2765b3b1688SDavid Daney flush_icache_range = octeon_flush_icache_range; 2775b3b1688SDavid Daney local_flush_icache_range = local_octeon_flush_icache_range; 2785b3b1688SDavid Daney 279d9cdc901SRalf Baechle __flush_kernel_vmap_range = octeon_flush_kernel_vmap_range; 280d9cdc901SRalf Baechle 2815b3b1688SDavid Daney build_clear_page(); 2825b3b1688SDavid Daney build_copy_page(); 283*586016ebSDavid Daney 284*586016ebSDavid Daney board_cache_error_setup = octeon_cache_error_setup; 2855b3b1688SDavid Daney } 2865b3b1688SDavid Daney 2875b3b1688SDavid Daney /** 2885b3b1688SDavid Daney * Handle a cache error exception 2895b3b1688SDavid Daney */ 2905b3b1688SDavid Daney 2915b3b1688SDavid Daney static void cache_parity_error_octeon(int non_recoverable) 2925b3b1688SDavid Daney { 2935b3b1688SDavid Daney unsigned long coreid = cvmx_get_core_num(); 2945b3b1688SDavid Daney uint64_t icache_err = read_octeon_c0_icacheerr(); 2955b3b1688SDavid Daney 2965b3b1688SDavid Daney pr_err("Cache error exception:\n"); 2975b3b1688SDavid Daney pr_err("cp0_errorepc == %lx\n", read_c0_errorepc()); 2985b3b1688SDavid Daney if (icache_err & 1) { 2995b3b1688SDavid Daney pr_err("CacheErr (Icache) == %llx\n", 3005b3b1688SDavid Daney (unsigned long long)icache_err); 3015b3b1688SDavid Daney write_octeon_c0_icacheerr(0); 3025b3b1688SDavid Daney } 3035b3b1688SDavid Daney if (cache_err_dcache[coreid] & 1) { 3045b3b1688SDavid Daney pr_err("CacheErr (Dcache) == %llx\n", 3055b3b1688SDavid Daney (unsigned long long)cache_err_dcache[coreid]); 3065b3b1688SDavid Daney cache_err_dcache[coreid] = 0; 3075b3b1688SDavid Daney } 3085b3b1688SDavid Daney 3095b3b1688SDavid Daney if (non_recoverable) 3105b3b1688SDavid Daney panic("Can't handle cache error: nested exception"); 3115b3b1688SDavid Daney } 3125b3b1688SDavid Daney 3135b3b1688SDavid Daney /** 3141c1a90d8SRalf Baechle * Called when the the exception is recoverable 3155b3b1688SDavid Daney */ 3165b3b1688SDavid Daney 3175b3b1688SDavid Daney asmlinkage void cache_parity_error_octeon_recoverable(void) 3185b3b1688SDavid Daney { 3195b3b1688SDavid Daney cache_parity_error_octeon(0); 3205b3b1688SDavid Daney } 3215b3b1688SDavid Daney 3225b3b1688SDavid Daney /** 3231c1a90d8SRalf Baechle * Called when the the exception is not recoverable 3245b3b1688SDavid Daney */ 3255b3b1688SDavid Daney 3265b3b1688SDavid Daney asmlinkage void cache_parity_error_octeon_non_recoverable(void) 3275b3b1688SDavid Daney { 3285b3b1688SDavid Daney cache_parity_error_octeon(1); 3295b3b1688SDavid Daney } 330