1/* 2 * linux/arch/arm/mm/cache-v6.S 3 * 4 * Copyright (C) 2001 Deep Blue Solutions Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This is the "shell" of the ARMv6 processor support. 11 */ 12#include <linux/linkage.h> 13#include <linux/init.h> 14#include <asm/assembler.h> 15#include <asm/unwind.h> 16 17#include "proc-macros.S" 18 19#define HARVARD_CACHE 20#define CACHE_LINE_SIZE 32 21#define D_CACHE_LINE_SIZE 32 22#define BTB_FLUSH_SIZE 8 23 24#ifdef CONFIG_ARM_ERRATA_411920 25/* 26 * Invalidate the entire I cache (this code is a workaround for the ARM1136 27 * erratum 411920 - Invalidate Instruction Cache operation can fail. This 28 * erratum is present in 1136, 1156 and 1176. It does not affect the MPCore. 29 * 30 * Registers: 31 * r0 - set to 0 32 * r1 - corrupted 33 */ 34ENTRY(v6_icache_inval_all) 35 mov r0, #0 36 mrs r1, cpsr 37 cpsid ifa @ disable interrupts 38 mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache 39 mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache 40 mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache 41 mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache 42 msr cpsr_cx, r1 @ restore interrupts 43 .rept 11 @ ARM Ltd recommends at least 44 nop @ 11 NOPs 45 .endr 46 mov pc, lr 47#endif 48 49/* 50 * v6_flush_cache_all() 51 * 52 * Flush the entire cache. 53 * 54 * It is assumed that: 55 */ 56ENTRY(v6_flush_kern_cache_all) 57 mov r0, #0 58#ifdef HARVARD_CACHE 59 mcr p15, 0, r0, c7, c14, 0 @ D cache clean+invalidate 60#ifndef CONFIG_ARM_ERRATA_411920 61 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate 62#else 63 b v6_icache_inval_all 64#endif 65#else 66 mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate 67#endif 68 mov pc, lr 69 70/* 71 * v6_flush_cache_all() 72 * 73 * Flush all TLB entries in a particular address space 74 * 75 * - mm - mm_struct describing address space 76 */ 77ENTRY(v6_flush_user_cache_all) 78 /*FALLTHROUGH*/ 79 80/* 81 * v6_flush_cache_range(start, end, flags) 82 * 83 * Flush a range of TLB entries in the specified address space. 84 * 85 * - start - start address (may not be aligned) 86 * - end - end address (exclusive, may not be aligned) 87 * - flags - vm_area_struct flags describing address space 88 * 89 * It is assumed that: 90 * - we have a VIPT cache. 91 */ 92ENTRY(v6_flush_user_cache_range) 93 mov pc, lr 94 95/* 96 * v6_coherent_kern_range(start,end) 97 * 98 * Ensure that the I and D caches are coherent within specified 99 * region. This is typically used when code has been written to 100 * a memory region, and will be executed. 101 * 102 * - start - virtual start address of region 103 * - end - virtual end address of region 104 * 105 * It is assumed that: 106 * - the Icache does not read data from the write buffer 107 */ 108ENTRY(v6_coherent_kern_range) 109 /* FALLTHROUGH */ 110 111/* 112 * v6_coherent_user_range(start,end) 113 * 114 * Ensure that the I and D caches are coherent within specified 115 * region. This is typically used when code has been written to 116 * a memory region, and will be executed. 117 * 118 * - start - virtual start address of region 119 * - end - virtual end address of region 120 * 121 * It is assumed that: 122 * - the Icache does not read data from the write buffer 123 */ 124ENTRY(v6_coherent_user_range) 125 UNWIND(.fnstart ) 126#ifdef HARVARD_CACHE 127 bic r0, r0, #CACHE_LINE_SIZE - 1 1281: 129 USER( mcr p15, 0, r0, c7, c10, 1 ) @ clean D line 130 add r0, r0, #CACHE_LINE_SIZE 1312: 132 cmp r0, r1 133 blo 1b 134#endif 135 mov r0, #0 136#ifdef HARVARD_CACHE 137 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 138#ifndef CONFIG_ARM_ERRATA_411920 139 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate 140#else 141 b v6_icache_inval_all 142#endif 143#else 144 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB 145#endif 146 mov pc, lr 147 148/* 149 * Fault handling for the cache operation above. If the virtual address in r0 150 * isn't mapped, just try the next page. 151 */ 1529001: 153 mov r0, r0, lsr #12 154 mov r0, r0, lsl #12 155 add r0, r0, #4096 156 b 2b 157 UNWIND(.fnend ) 158ENDPROC(v6_coherent_user_range) 159ENDPROC(v6_coherent_kern_range) 160 161/* 162 * v6_flush_kern_dcache_area(void *addr, size_t size) 163 * 164 * Ensure that the data held in the page kaddr is written back 165 * to the page in question. 166 * 167 * - addr - kernel address 168 * - size - region size 169 */ 170ENTRY(v6_flush_kern_dcache_area) 171 add r1, r0, r1 1721: 173#ifdef HARVARD_CACHE 174 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line 175#else 176 mcr p15, 0, r0, c7, c15, 1 @ clean & invalidate unified line 177#endif 178 add r0, r0, #D_CACHE_LINE_SIZE 179 cmp r0, r1 180 blo 1b 181#ifdef HARVARD_CACHE 182 mov r0, #0 183 mcr p15, 0, r0, c7, c10, 4 184#endif 185 mov pc, lr 186 187 188/* 189 * v6_dma_inv_range(start,end) 190 * 191 * Invalidate the data cache within the specified region; we will 192 * be performing a DMA operation in this region and we want to 193 * purge old data in the cache. 194 * 195 * - start - virtual start address of region 196 * - end - virtual end address of region 197 */ 198v6_dma_inv_range: 199 tst r0, #D_CACHE_LINE_SIZE - 1 200 bic r0, r0, #D_CACHE_LINE_SIZE - 1 201#ifdef HARVARD_CACHE 202 mcrne p15, 0, r0, c7, c10, 1 @ clean D line 203#else 204 mcrne p15, 0, r0, c7, c11, 1 @ clean unified line 205#endif 206 tst r1, #D_CACHE_LINE_SIZE - 1 207 bic r1, r1, #D_CACHE_LINE_SIZE - 1 208#ifdef HARVARD_CACHE 209 mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line 210#else 211 mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line 212#endif 2131: 214#ifdef CONFIG_DMA_CACHE_RWFO 215 ldr r2, [r0] @ read for ownership 216 str r2, [r0] @ write for ownership 217#endif 218#ifdef HARVARD_CACHE 219 mcr p15, 0, r0, c7, c6, 1 @ invalidate D line 220#else 221 mcr p15, 0, r0, c7, c7, 1 @ invalidate unified line 222#endif 223 add r0, r0, #D_CACHE_LINE_SIZE 224 cmp r0, r1 225 blo 1b 226 mov r0, #0 227 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 228 mov pc, lr 229 230/* 231 * v6_dma_clean_range(start,end) 232 * - start - virtual start address of region 233 * - end - virtual end address of region 234 */ 235v6_dma_clean_range: 236 bic r0, r0, #D_CACHE_LINE_SIZE - 1 2371: 238#ifdef CONFIG_DMA_CACHE_RWFO 239 ldr r2, [r0] @ read for ownership 240#endif 241#ifdef HARVARD_CACHE 242 mcr p15, 0, r0, c7, c10, 1 @ clean D line 243#else 244 mcr p15, 0, r0, c7, c11, 1 @ clean unified line 245#endif 246 add r0, r0, #D_CACHE_LINE_SIZE 247 cmp r0, r1 248 blo 1b 249 mov r0, #0 250 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 251 mov pc, lr 252 253/* 254 * v6_dma_flush_range(start,end) 255 * - start - virtual start address of region 256 * - end - virtual end address of region 257 */ 258ENTRY(v6_dma_flush_range) 259 bic r0, r0, #D_CACHE_LINE_SIZE - 1 2601: 261#ifdef CONFIG_DMA_CACHE_RWFO 262 ldr r2, [r0] @ read for ownership 263 str r2, [r0] @ write for ownership 264#endif 265#ifdef HARVARD_CACHE 266 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line 267#else 268 mcr p15, 0, r0, c7, c15, 1 @ clean & invalidate line 269#endif 270 add r0, r0, #D_CACHE_LINE_SIZE 271 cmp r0, r1 272 blo 1b 273 mov r0, #0 274 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 275 mov pc, lr 276 277/* 278 * dma_map_area(start, size, dir) 279 * - start - kernel virtual start address 280 * - size - size of region 281 * - dir - DMA direction 282 */ 283ENTRY(v6_dma_map_area) 284 add r1, r1, r0 285 teq r2, #DMA_FROM_DEVICE 286 beq v6_dma_inv_range 287#ifndef CONFIG_DMA_CACHE_RWFO 288 b v6_dma_clean_range 289#else 290 teq r2, #DMA_TO_DEVICE 291 beq v6_dma_clean_range 292 b v6_dma_flush_range 293#endif 294ENDPROC(v6_dma_map_area) 295 296/* 297 * dma_unmap_area(start, size, dir) 298 * - start - kernel virtual start address 299 * - size - size of region 300 * - dir - DMA direction 301 */ 302ENTRY(v6_dma_unmap_area) 303#ifndef CONFIG_DMA_CACHE_RWFO 304 add r1, r1, r0 305 teq r2, #DMA_TO_DEVICE 306 bne v6_dma_inv_range 307#endif 308 mov pc, lr 309ENDPROC(v6_dma_unmap_area) 310 311 __INITDATA 312 313 .type v6_cache_fns, #object 314ENTRY(v6_cache_fns) 315 .long v6_flush_kern_cache_all 316 .long v6_flush_user_cache_all 317 .long v6_flush_user_cache_range 318 .long v6_coherent_kern_range 319 .long v6_coherent_user_range 320 .long v6_flush_kern_dcache_area 321 .long v6_dma_map_area 322 .long v6_dma_unmap_area 323 .long v6_dma_flush_range 324 .size v6_cache_fns, . - v6_cache_fns 325