1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * linux/arch/arm/mm/cache-v4wb.S 4 * 5 * Copyright (C) 1997-2002 Russell king 6 */ 7#include <linux/linkage.h> 8#include <linux/init.h> 9#include <linux/cfi_types.h> 10#include <asm/assembler.h> 11#include <asm/page.h> 12#include "proc-macros.S" 13 14/* 15 * The size of one data cache line. 16 */ 17#define CACHE_DLINESIZE 32 18 19/* 20 * The total size of the data cache. 21 */ 22#if defined(CONFIG_CPU_SA110) 23# define CACHE_DSIZE 16384 24#elif defined(CONFIG_CPU_SA1100) 25# define CACHE_DSIZE 8192 26#else 27# error Unknown cache size 28#endif 29 30/* 31 * This is the size at which it becomes more efficient to 32 * clean the whole cache, rather than using the individual 33 * cache line maintenance instructions. 34 * 35 * Size Clean (ticks) Dirty (ticks) 36 * 4096 21 20 21 53 55 54 37 * 8192 40 41 40 106 100 102 38 * 16384 77 77 76 140 140 138 39 * 32768 150 149 150 214 216 212 <--- 40 * 65536 296 297 296 351 358 361 41 * 131072 591 591 591 656 657 651 42 * Whole 132 136 132 221 217 207 <--- 43 */ 44#define CACHE_DLIMIT (CACHE_DSIZE * 4) 45 46 .data 47 .align 2 48flush_base: 49 .long FLUSH_BASE 50 .text 51 52/* 53 * flush_icache_all() 54 * 55 * Unconditionally clean and invalidate the entire icache. 56 */ 57SYM_TYPED_FUNC_START(v4wb_flush_icache_all) 58 mov r0, #0 59 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 60 ret lr 61SYM_FUNC_END(v4wb_flush_icache_all) 62 63/* 64 * flush_user_cache_all() 65 * 66 * Clean and invalidate all cache entries in a particular address 67 * space. 68 */ 69SYM_FUNC_ALIAS(v4wb_flush_user_cache_all, v4wb_flush_kern_cache_all) 70 71/* 72 * flush_kern_cache_all() 73 * 74 * Clean and invalidate the entire cache. 75 */ 76SYM_TYPED_FUNC_START(v4wb_flush_kern_cache_all) 77 mov ip, #0 78 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache 79__flush_whole_cache: 80 ldr r3, =flush_base 81 ldr r1, [r3, #0] 82 eor r1, r1, #CACHE_DSIZE 83 str r1, [r3, #0] 84 add r2, r1, #CACHE_DSIZE 851: ldr r3, [r1], #32 86 cmp r1, r2 87 blo 1b 88#ifdef FLUSH_BASE_MINICACHE 89 add r2, r2, #FLUSH_BASE_MINICACHE - FLUSH_BASE 90 sub r1, r2, #512 @ only 512 bytes 911: ldr r3, [r1], #32 92 cmp r1, r2 93 blo 1b 94#endif 95 mcr p15, 0, ip, c7, c10, 4 @ drain write buffer 96 ret lr 97SYM_FUNC_END(v4wb_flush_kern_cache_all) 98 99/* 100 * flush_user_cache_range(start, end, flags) 101 * 102 * Invalidate a range of cache entries in the specified 103 * address space. 104 * 105 * - start - start address (inclusive, page aligned) 106 * - end - end address (exclusive, page aligned) 107 * - flags - vma_area_struct flags describing address space 108 */ 109SYM_TYPED_FUNC_START(v4wb_flush_user_cache_range) 110 mov ip, #0 111 sub r3, r1, r0 @ calculate total size 112 tst r2, #VM_EXEC @ executable region? 113 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 114 115 cmp r3, #CACHE_DLIMIT @ total size >= limit? 116 bhs __flush_whole_cache @ flush whole D cache 117 1181: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 119 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 120 add r0, r0, #CACHE_DLINESIZE 121 cmp r0, r1 122 blo 1b 123 tst r2, #VM_EXEC 124 mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer 125 ret lr 126SYM_FUNC_END(v4wb_flush_user_cache_range) 127 128/* 129 * flush_kern_dcache_area(void *addr, size_t size) 130 * 131 * Ensure no D cache aliasing occurs, either with itself or 132 * the I cache 133 * 134 * - addr - kernel address 135 * - size - region size 136 */ 137SYM_TYPED_FUNC_START(v4wb_flush_kern_dcache_area) 138 add r1, r0, r1 139#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */ 140 b v4wb_coherent_user_range 141#endif 142SYM_FUNC_END(v4wb_flush_kern_dcache_area) 143 144/* 145 * coherent_kern_range(start, end) 146 * 147 * Ensure coherency between the Icache and the Dcache in the 148 * region described by start. If you have non-snooping 149 * Harvard caches, you need to implement this function. 150 * 151 * - start - virtual start address 152 * - end - virtual end address 153 */ 154SYM_TYPED_FUNC_START(v4wb_coherent_kern_range) 155#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */ 156 b v4wb_coherent_user_range 157#endif 158SYM_FUNC_END(v4wb_coherent_kern_range) 159 160/* 161 * coherent_user_range(start, end) 162 * 163 * Ensure coherency between the Icache and the Dcache in the 164 * region described by start. If you have non-snooping 165 * Harvard caches, you need to implement this function. 166 * 167 * - start - virtual start address 168 * - end - virtual end address 169 */ 170SYM_TYPED_FUNC_START(v4wb_coherent_user_range) 171 bic r0, r0, #CACHE_DLINESIZE - 1 1721: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 173 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 174 add r0, r0, #CACHE_DLINESIZE 175 cmp r0, r1 176 blo 1b 177 mov r0, #0 178 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 179 mcr p15, 0, r0, c7, c10, 4 @ drain WB 180 ret lr 181SYM_FUNC_END(v4wb_coherent_user_range) 182 183/* 184 * dma_inv_range(start, end) 185 * 186 * Invalidate (discard) the specified virtual address range. 187 * May not write back any entries. If 'start' or 'end' 188 * are not cache line aligned, those lines must be written 189 * back. 190 * 191 * - start - virtual start address 192 * - end - virtual end address 193 */ 194v4wb_dma_inv_range: 195 tst r0, #CACHE_DLINESIZE - 1 196 bic r0, r0, #CACHE_DLINESIZE - 1 197 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry 198 tst r1, #CACHE_DLINESIZE - 1 199 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 2001: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 201 add r0, r0, #CACHE_DLINESIZE 202 cmp r0, r1 203 blo 1b 204 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 205 ret lr 206 207/* 208 * dma_clean_range(start, end) 209 * 210 * Clean (write back) the specified virtual address range. 211 * 212 * - start - virtual start address 213 * - end - virtual end address 214 */ 215v4wb_dma_clean_range: 216 bic r0, r0, #CACHE_DLINESIZE - 1 2171: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 218 add r0, r0, #CACHE_DLINESIZE 219 cmp r0, r1 220 blo 1b 221 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 222 ret lr 223 224/* 225 * dma_flush_range(start, end) 226 * 227 * Clean and invalidate the specified virtual address range. 228 * 229 * - start - virtual start address 230 * - end - virtual end address 231 * 232 * This is actually the same as v4wb_coherent_kern_range() 233 */ 234 .globl v4wb_dma_flush_range 235 .set v4wb_dma_flush_range, v4wb_coherent_kern_range 236 237/* 238 * dma_map_area(start, size, dir) 239 * - start - kernel virtual start address 240 * - size - size of region 241 * - dir - DMA direction 242 */ 243SYM_TYPED_FUNC_START(v4wb_dma_map_area) 244 add r1, r1, r0 245 cmp r2, #DMA_TO_DEVICE 246 beq v4wb_dma_clean_range 247 bcs v4wb_dma_inv_range 248 b v4wb_dma_flush_range 249SYM_FUNC_END(v4wb_dma_map_area) 250 251/* 252 * dma_unmap_area(start, size, dir) 253 * - start - kernel virtual start address 254 * - size - size of region 255 * - dir - DMA direction 256 */ 257SYM_TYPED_FUNC_START(v4wb_dma_unmap_area) 258 ret lr 259SYM_FUNC_END(v4wb_dma_unmap_area) 260