1/* 2 * linux/arch/arm/mm/cache-v6.S 3 * 4 * Copyright (C) 2001 Deep Blue Solutions Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This is the "shell" of the ARMv6 processor support. 11 */ 12#include <linux/linkage.h> 13#include <linux/init.h> 14#include <asm/assembler.h> 15#include <asm/unwind.h> 16 17#include "proc-macros.S" 18 19#define HARVARD_CACHE 20#define CACHE_LINE_SIZE 32 21#define D_CACHE_LINE_SIZE 32 22#define BTB_FLUSH_SIZE 8 23 24#ifdef CONFIG_ARM_ERRATA_411920 25/* 26 * Invalidate the entire I cache (this code is a workaround for the ARM1136 27 * erratum 411920 - Invalidate Instruction Cache operation can fail. This 28 * erratum is present in 1136, 1156 and 1176. It does not affect the MPCore. 29 * 30 * Registers: 31 * r0 - set to 0 32 * r1 - corrupted 33 */ 34ENTRY(v6_icache_inval_all) 35 mov r0, #0 36 mrs r1, cpsr 37 cpsid ifa @ disable interrupts 38 mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache 39 mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache 40 mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache 41 mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache 42 msr cpsr_cx, r1 @ restore interrupts 43 .rept 11 @ ARM Ltd recommends at least 44 nop @ 11 NOPs 45 .endr 46 mov pc, lr 47#endif 48 49/* 50 * v6_flush_cache_all() 51 * 52 * Flush the entire cache. 53 * 54 * It is assumed that: 55 */ 56ENTRY(v6_flush_kern_cache_all) 57 mov r0, #0 58#ifdef HARVARD_CACHE 59 mcr p15, 0, r0, c7, c14, 0 @ D cache clean+invalidate 60#ifndef CONFIG_ARM_ERRATA_411920 61 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate 62#else 63 b v6_icache_inval_all 64#endif 65#else 66 mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate 67#endif 68 mov pc, lr 69 70/* 71 * v6_flush_cache_all() 72 * 73 * Flush all TLB entries in a particular address space 74 * 75 * - mm - mm_struct describing address space 76 */ 77ENTRY(v6_flush_user_cache_all) 78 /*FALLTHROUGH*/ 79 80/* 81 * v6_flush_cache_range(start, end, flags) 82 * 83 * Flush a range of TLB entries in the specified address space. 84 * 85 * - start - start address (may not be aligned) 86 * - end - end address (exclusive, may not be aligned) 87 * - flags - vm_area_struct flags describing address space 88 * 89 * It is assumed that: 90 * - we have a VIPT cache. 91 */ 92ENTRY(v6_flush_user_cache_range) 93 mov pc, lr 94 95/* 96 * v6_coherent_kern_range(start,end) 97 * 98 * Ensure that the I and D caches are coherent within specified 99 * region. This is typically used when code has been written to 100 * a memory region, and will be executed. 101 * 102 * - start - virtual start address of region 103 * - end - virtual end address of region 104 * 105 * It is assumed that: 106 * - the Icache does not read data from the write buffer 107 */ 108ENTRY(v6_coherent_kern_range) 109 /* FALLTHROUGH */ 110 111/* 112 * v6_coherent_user_range(start,end) 113 * 114 * Ensure that the I and D caches are coherent within specified 115 * region. This is typically used when code has been written to 116 * a memory region, and will be executed. 117 * 118 * - start - virtual start address of region 119 * - end - virtual end address of region 120 * 121 * It is assumed that: 122 * - the Icache does not read data from the write buffer 123 */ 124ENTRY(v6_coherent_user_range) 125 UNWIND(.fnstart ) 126#ifdef HARVARD_CACHE 127 bic r0, r0, #CACHE_LINE_SIZE - 1 1281: 129 USER( mcr p15, 0, r0, c7, c10, 1 ) @ clean D line 130 add r0, r0, #CACHE_LINE_SIZE 1312: 132 cmp r0, r1 133 blo 1b 134#endif 135 mov r0, #0 136#ifdef HARVARD_CACHE 137 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 138#ifndef CONFIG_ARM_ERRATA_411920 139 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate 140#else 141 b v6_icache_inval_all 142#endif 143#else 144 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB 145#endif 146 mov pc, lr 147 148/* 149 * Fault handling for the cache operation above. If the virtual address in r0 150 * isn't mapped, just try the next page. 151 */ 1529001: 153 mov r0, r0, lsr #12 154 mov r0, r0, lsl #12 155 add r0, r0, #4096 156 b 2b 157 UNWIND(.fnend ) 158ENDPROC(v6_coherent_user_range) 159ENDPROC(v6_coherent_kern_range) 160 161/* 162 * v6_flush_kern_dcache_page(kaddr) 163 * 164 * Ensure that the data held in the page kaddr is written back 165 * to the page in question. 166 * 167 * - kaddr - kernel address (guaranteed to be page aligned) 168 */ 169ENTRY(v6_flush_kern_dcache_page) 170 add r1, r0, #PAGE_SZ 1711: 172#ifdef HARVARD_CACHE 173 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line 174#else 175 mcr p15, 0, r0, c7, c15, 1 @ clean & invalidate unified line 176#endif 177 add r0, r0, #D_CACHE_LINE_SIZE 178 cmp r0, r1 179 blo 1b 180#ifdef HARVARD_CACHE 181 mov r0, #0 182 mcr p15, 0, r0, c7, c10, 4 183#endif 184 mov pc, lr 185 186 187/* 188 * v6_dma_inv_range(start,end) 189 * 190 * Invalidate the data cache within the specified region; we will 191 * be performing a DMA operation in this region and we want to 192 * purge old data in the cache. 193 * 194 * - start - virtual start address of region 195 * - end - virtual end address of region 196 */ 197ENTRY(v6_dma_inv_range) 198 tst r0, #D_CACHE_LINE_SIZE - 1 199 bic r0, r0, #D_CACHE_LINE_SIZE - 1 200#ifdef HARVARD_CACHE 201 mcrne p15, 0, r0, c7, c10, 1 @ clean D line 202#else 203 mcrne p15, 0, r0, c7, c11, 1 @ clean unified line 204#endif 205 tst r1, #D_CACHE_LINE_SIZE - 1 206 bic r1, r1, #D_CACHE_LINE_SIZE - 1 207#ifdef HARVARD_CACHE 208 mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line 209#else 210 mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line 211#endif 2121: 213#ifdef HARVARD_CACHE 214 mcr p15, 0, r0, c7, c6, 1 @ invalidate D line 215#else 216 mcr p15, 0, r0, c7, c7, 1 @ invalidate unified line 217#endif 218 add r0, r0, #D_CACHE_LINE_SIZE 219 cmp r0, r1 220 blo 1b 221 mov r0, #0 222 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 223 mov pc, lr 224 225/* 226 * v6_dma_clean_range(start,end) 227 * - start - virtual start address of region 228 * - end - virtual end address of region 229 */ 230ENTRY(v6_dma_clean_range) 231 bic r0, r0, #D_CACHE_LINE_SIZE - 1 2321: 233#ifdef HARVARD_CACHE 234 mcr p15, 0, r0, c7, c10, 1 @ clean D line 235#else 236 mcr p15, 0, r0, c7, c11, 1 @ clean unified line 237#endif 238 add r0, r0, #D_CACHE_LINE_SIZE 239 cmp r0, r1 240 blo 1b 241 mov r0, #0 242 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 243 mov pc, lr 244 245/* 246 * v6_dma_flush_range(start,end) 247 * - start - virtual start address of region 248 * - end - virtual end address of region 249 */ 250ENTRY(v6_dma_flush_range) 251 bic r0, r0, #D_CACHE_LINE_SIZE - 1 2521: 253#ifdef HARVARD_CACHE 254 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line 255#else 256 mcr p15, 0, r0, c7, c15, 1 @ clean & invalidate line 257#endif 258 add r0, r0, #D_CACHE_LINE_SIZE 259 cmp r0, r1 260 blo 1b 261 mov r0, #0 262 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 263 mov pc, lr 264 265 __INITDATA 266 267 .type v6_cache_fns, #object 268ENTRY(v6_cache_fns) 269 .long v6_flush_kern_cache_all 270 .long v6_flush_user_cache_all 271 .long v6_flush_user_cache_range 272 .long v6_coherent_kern_range 273 .long v6_coherent_user_range 274 .long v6_flush_kern_dcache_page 275 .long v6_dma_inv_range 276 .long v6_dma_clean_range 277 .long v6_dma_flush_range 278 .size v6_cache_fns, . - v6_cache_fns 279