1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * linux/arch/arm/mm/cache-v6.S 4 * 5 * Copyright (C) 2001 Deep Blue Solutions Ltd. 6 * 7 * This is the "shell" of the ARMv6 processor support. 8 */ 9#include <linux/linkage.h> 10#include <linux/init.h> 11#include <asm/assembler.h> 12#include <asm/errno.h> 13#include <asm/unwind.h> 14 15#include "proc-macros.S" 16 17#define HARVARD_CACHE 18#define CACHE_LINE_SIZE 32 19#define D_CACHE_LINE_SIZE 32 20#define BTB_FLUSH_SIZE 8 21 22.arch armv6 23 24/* 25 * v6_flush_icache_all() 26 * 27 * Flush the whole I-cache. 28 * 29 * ARM1136 erratum 411920 - Invalidate Instruction Cache operation can fail. 30 * This erratum is present in 1136, 1156 and 1176. It does not affect the 31 * MPCore. 32 * 33 * Registers: 34 * r0 - set to 0 35 * r1 - corrupted 36 */ 37ENTRY(v6_flush_icache_all) 38 mov r0, #0 39#ifdef CONFIG_ARM_ERRATA_411920 40 mrs r1, cpsr 41 cpsid ifa @ disable interrupts 42 mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache 43 mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache 44 mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache 45 mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache 46 msr cpsr_cx, r1 @ restore interrupts 47 .rept 11 @ ARM Ltd recommends at least 48 nop @ 11 NOPs 49 .endr 50#else 51 mcr p15, 0, r0, c7, c5, 0 @ invalidate I-cache 52#endif 53 ret lr 54ENDPROC(v6_flush_icache_all) 55 56/* 57 * v6_flush_cache_all() 58 * 59 * Flush the entire cache. 60 * 61 * It is assumed that: 62 */ 63ENTRY(v6_flush_kern_cache_all) 64 mov r0, #0 65#ifdef HARVARD_CACHE 66 mcr p15, 0, r0, c7, c14, 0 @ D cache clean+invalidate 67#ifndef CONFIG_ARM_ERRATA_411920 68 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate 69#else 70 b v6_flush_icache_all 71#endif 72#else 73 mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate 74#endif 75 ret lr 76 77/* 78 * v6_flush_cache_all() 79 * 80 * Flush all TLB entries in a particular address space 81 * 82 * - mm - mm_struct describing address space 83 */ 84ENTRY(v6_flush_user_cache_all) 85 /*FALLTHROUGH*/ 86 87/* 88 * v6_flush_cache_range(start, end, flags) 89 * 90 * Flush a range of TLB entries in the specified address space. 91 * 92 * - start - start address (may not be aligned) 93 * - end - end address (exclusive, may not be aligned) 94 * - flags - vm_area_struct flags describing address space 95 * 96 * It is assumed that: 97 * - we have a VIPT cache. 98 */ 99ENTRY(v6_flush_user_cache_range) 100 ret lr 101 102/* 103 * v6_coherent_kern_range(start,end) 104 * 105 * Ensure that the I and D caches are coherent within specified 106 * region. This is typically used when code has been written to 107 * a memory region, and will be executed. 108 * 109 * - start - virtual start address of region 110 * - end - virtual end address of region 111 * 112 * It is assumed that: 113 * - the Icache does not read data from the write buffer 114 */ 115ENTRY(v6_coherent_kern_range) 116 /* FALLTHROUGH */ 117 118/* 119 * v6_coherent_user_range(start,end) 120 * 121 * Ensure that the I and D caches are coherent within specified 122 * region. This is typically used when code has been written to 123 * a memory region, and will be executed. 124 * 125 * - start - virtual start address of region 126 * - end - virtual end address of region 127 * 128 * It is assumed that: 129 * - the Icache does not read data from the write buffer 130 */ 131ENTRY(v6_coherent_user_range) 132 UNWIND(.fnstart ) 133#ifdef HARVARD_CACHE 134 bic r0, r0, #CACHE_LINE_SIZE - 1 1351: 136 USER( mcr p15, 0, r0, c7, c10, 1 ) @ clean D line 137 add r0, r0, #CACHE_LINE_SIZE 138 cmp r0, r1 139 blo 1b 140#endif 141 mov r0, #0 142#ifdef HARVARD_CACHE 143 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 144#ifndef CONFIG_ARM_ERRATA_411920 145 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate 146#else 147 b v6_flush_icache_all 148#endif 149#else 150 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB 151#endif 152 ret lr 153 154/* 155 * Fault handling for the cache operation above. If the virtual address in r0 156 * isn't mapped, fail with -EFAULT. 157 */ 1589001: 159 mov r0, #-EFAULT 160 ret lr 161 UNWIND(.fnend ) 162ENDPROC(v6_coherent_user_range) 163ENDPROC(v6_coherent_kern_range) 164 165/* 166 * v6_flush_kern_dcache_area(void *addr, size_t size) 167 * 168 * Ensure that the data held in the page kaddr is written back 169 * to the page in question. 170 * 171 * - addr - kernel address 172 * - size - region size 173 */ 174ENTRY(v6_flush_kern_dcache_area) 175 add r1, r0, r1 176 bic r0, r0, #D_CACHE_LINE_SIZE - 1 1771: 178#ifdef HARVARD_CACHE 179 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line 180#else 181 mcr p15, 0, r0, c7, c15, 1 @ clean & invalidate unified line 182#endif 183 add r0, r0, #D_CACHE_LINE_SIZE 184 cmp r0, r1 185 blo 1b 186#ifdef HARVARD_CACHE 187 mov r0, #0 188 mcr p15, 0, r0, c7, c10, 4 189#endif 190 ret lr 191 192 193/* 194 * v6_dma_inv_range(start,end) 195 * 196 * Invalidate the data cache within the specified region; we will 197 * be performing a DMA operation in this region and we want to 198 * purge old data in the cache. 199 * 200 * - start - virtual start address of region 201 * - end - virtual end address of region 202 */ 203v6_dma_inv_range: 204 tst r0, #D_CACHE_LINE_SIZE - 1 205 bic r0, r0, #D_CACHE_LINE_SIZE - 1 206#ifdef HARVARD_CACHE 207 mcrne p15, 0, r0, c7, c10, 1 @ clean D line 208#else 209 mcrne p15, 0, r0, c7, c11, 1 @ clean unified line 210#endif 211 tst r1, #D_CACHE_LINE_SIZE - 1 212 bic r1, r1, #D_CACHE_LINE_SIZE - 1 213#ifdef HARVARD_CACHE 214 mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line 215#else 216 mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line 217#endif 2181: 219#ifdef HARVARD_CACHE 220 mcr p15, 0, r0, c7, c6, 1 @ invalidate D line 221#else 222 mcr p15, 0, r0, c7, c7, 1 @ invalidate unified line 223#endif 224 add r0, r0, #D_CACHE_LINE_SIZE 225 cmp r0, r1 226 blo 1b 227 mov r0, #0 228 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 229 ret lr 230 231/* 232 * v6_dma_clean_range(start,end) 233 * - start - virtual start address of region 234 * - end - virtual end address of region 235 */ 236v6_dma_clean_range: 237 bic r0, r0, #D_CACHE_LINE_SIZE - 1 2381: 239#ifdef HARVARD_CACHE 240 mcr p15, 0, r0, c7, c10, 1 @ clean D line 241#else 242 mcr p15, 0, r0, c7, c11, 1 @ clean unified line 243#endif 244 add r0, r0, #D_CACHE_LINE_SIZE 245 cmp r0, r1 246 blo 1b 247 mov r0, #0 248 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 249 ret lr 250 251/* 252 * v6_dma_flush_range(start,end) 253 * - start - virtual start address of region 254 * - end - virtual end address of region 255 */ 256ENTRY(v6_dma_flush_range) 257 bic r0, r0, #D_CACHE_LINE_SIZE - 1 2581: 259#ifdef HARVARD_CACHE 260 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line 261#else 262 mcr p15, 0, r0, c7, c15, 1 @ clean & invalidate line 263#endif 264 add r0, r0, #D_CACHE_LINE_SIZE 265 cmp r0, r1 266 blo 1b 267 mov r0, #0 268 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 269 ret lr 270 271/* 272 * dma_map_area(start, size, dir) 273 * - start - kernel virtual start address 274 * - size - size of region 275 * - dir - DMA direction 276 */ 277ENTRY(v6_dma_map_area) 278 add r1, r1, r0 279 teq r2, #DMA_FROM_DEVICE 280 beq v6_dma_inv_range 281 b v6_dma_clean_range 282ENDPROC(v6_dma_map_area) 283 284/* 285 * dma_unmap_area(start, size, dir) 286 * - start - kernel virtual start address 287 * - size - size of region 288 * - dir - DMA direction 289 */ 290ENTRY(v6_dma_unmap_area) 291 add r1, r1, r0 292 teq r2, #DMA_TO_DEVICE 293 bne v6_dma_inv_range 294 ret lr 295ENDPROC(v6_dma_unmap_area) 296 297 .globl v6_flush_kern_cache_louis 298 .equ v6_flush_kern_cache_louis, v6_flush_kern_cache_all 299 300 __INITDATA 301 302 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 303 define_cache_functions v6 304