1/* 2 * linux/arch/arm/mm/cache-v7.S 3 * 4 * Copyright (C) 2001 Deep Blue Solutions Ltd. 5 * Copyright (C) 2005 ARM Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * This is the "shell" of the ARMv7 processor support. 12 */ 13#include <linux/linkage.h> 14#include <linux/init.h> 15#include <asm/assembler.h> 16 17#include "proc-macros.S" 18 19/* 20 * v7_flush_dcache_all() 21 * 22 * Flush the whole D-cache. 23 * 24 * Corrupted registers: r0-r5, r7, r9-r11 25 * 26 * - mm - mm_struct describing address space 27 */ 28ENTRY(v7_flush_dcache_all) 29 mrc p15, 1, r0, c0, c0, 1 @ read clidr 30 ands r3, r0, #0x7000000 @ extract loc from clidr 31 mov r3, r3, lsr #23 @ left align loc bit field 32 beq finished @ if loc is 0, then no need to clean 33 mov r10, #0 @ start clean at cache level 0 34loop1: 35 add r2, r10, r10, lsr #1 @ work out 3x current cache level 36 mov r1, r0, lsr r2 @ extract cache type bits from clidr 37 and r1, r1, #7 @ mask of the bits for current cache only 38 cmp r1, #2 @ see what cache we have at this level 39 blt skip @ skip if no cache, or just i-cache 40 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr 41 isb @ isb to sych the new cssr&csidr 42 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr 43 and r2, r1, #7 @ extract the length of the cache lines 44 add r2, r2, #4 @ add 4 (line length offset) 45 ldr r4, =0x3ff 46 ands r4, r4, r1, lsr #3 @ find maximum number on the way size 47 clz r5, r4 @ find bit position of way size increment 48 ldr r7, =0x7fff 49 ands r7, r7, r1, lsr #13 @ extract max number of the index size 50loop2: 51 mov r9, r4 @ create working copy of max way size 52loop3: 53 orr r11, r10, r9, lsl r5 @ factor way and cache number into r11 54 orr r11, r11, r7, lsl r2 @ factor index number into r11 55 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way 56 subs r9, r9, #1 @ decrement the way 57 bge loop3 58 subs r7, r7, #1 @ decrement the index 59 bge loop2 60skip: 61 add r10, r10, #2 @ increment cache number 62 cmp r3, r10 63 bgt loop1 64finished: 65 mov r10, #0 @ swith back to cache level 0 66 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr 67 isb 68 mov pc, lr 69ENDPROC(v7_flush_dcache_all) 70 71/* 72 * v7_flush_cache_all() 73 * 74 * Flush the entire cache system. 75 * The data cache flush is now achieved using atomic clean / invalidates 76 * working outwards from L1 cache. This is done using Set/Way based cache 77 * maintainance instructions. 78 * The instruction cache can still be invalidated back to the point of 79 * unification in a single instruction. 80 * 81 */ 82ENTRY(v7_flush_kern_cache_all) 83 stmfd sp!, {r4-r5, r7, r9-r11, lr} 84 bl v7_flush_dcache_all 85 mov r0, #0 86 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate 87 ldmfd sp!, {r4-r5, r7, r9-r11, lr} 88 mov pc, lr 89ENDPROC(v7_flush_kern_cache_all) 90 91/* 92 * v7_flush_cache_all() 93 * 94 * Flush all TLB entries in a particular address space 95 * 96 * - mm - mm_struct describing address space 97 */ 98ENTRY(v7_flush_user_cache_all) 99 /*FALLTHROUGH*/ 100 101/* 102 * v7_flush_cache_range(start, end, flags) 103 * 104 * Flush a range of TLB entries in the specified address space. 105 * 106 * - start - start address (may not be aligned) 107 * - end - end address (exclusive, may not be aligned) 108 * - flags - vm_area_struct flags describing address space 109 * 110 * It is assumed that: 111 * - we have a VIPT cache. 112 */ 113ENTRY(v7_flush_user_cache_range) 114 mov pc, lr 115ENDPROC(v7_flush_user_cache_all) 116ENDPROC(v7_flush_user_cache_range) 117 118/* 119 * v7_coherent_kern_range(start,end) 120 * 121 * Ensure that the I and D caches are coherent within specified 122 * region. This is typically used when code has been written to 123 * a memory region, and will be executed. 124 * 125 * - start - virtual start address of region 126 * - end - virtual end address of region 127 * 128 * It is assumed that: 129 * - the Icache does not read data from the write buffer 130 */ 131ENTRY(v7_coherent_kern_range) 132 /* FALLTHROUGH */ 133 134/* 135 * v7_coherent_user_range(start,end) 136 * 137 * Ensure that the I and D caches are coherent within specified 138 * region. This is typically used when code has been written to 139 * a memory region, and will be executed. 140 * 141 * - start - virtual start address of region 142 * - end - virtual end address of region 143 * 144 * It is assumed that: 145 * - the Icache does not read data from the write buffer 146 */ 147ENTRY(v7_coherent_user_range) 148 dcache_line_size r2, r3 149 sub r3, r2, #1 150 bic r0, r0, r3 1511: mcr p15, 0, r0, c7, c11, 1 @ clean D line to the point of unification 152 dsb 153 mcr p15, 0, r0, c7, c5, 1 @ invalidate I line 154 add r0, r0, r2 155 cmp r0, r1 156 blo 1b 157 mov r0, #0 158 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB 159 dsb 160 isb 161 mov pc, lr 162ENDPROC(v7_coherent_kern_range) 163ENDPROC(v7_coherent_user_range) 164 165/* 166 * v7_flush_kern_dcache_page(kaddr) 167 * 168 * Ensure that the data held in the page kaddr is written back 169 * to the page in question. 170 * 171 * - kaddr - kernel address (guaranteed to be page aligned) 172 */ 173ENTRY(v7_flush_kern_dcache_page) 174 dcache_line_size r2, r3 175 add r1, r0, #PAGE_SZ 1761: 177 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line 178 add r0, r0, r2 179 cmp r0, r1 180 blo 1b 181 dsb 182 mov pc, lr 183ENDPROC(v7_flush_kern_dcache_page) 184 185/* 186 * v7_dma_inv_range(start,end) 187 * 188 * Invalidate the data cache within the specified region; we will 189 * be performing a DMA operation in this region and we want to 190 * purge old data in the cache. 191 * 192 * - start - virtual start address of region 193 * - end - virtual end address of region 194 */ 195ENTRY(v7_dma_inv_range) 196 dcache_line_size r2, r3 197 sub r3, r2, #1 198 tst r0, r3 199 bic r0, r0, r3 200 mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line 201 202 tst r1, r3 203 bic r1, r1, r3 204 mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D / U line 2051: 206 mcr p15, 0, r0, c7, c6, 1 @ invalidate D / U line 207 add r0, r0, r2 208 cmp r0, r1 209 blo 1b 210 dsb 211 mov pc, lr 212ENDPROC(v7_dma_inv_range) 213 214/* 215 * v7_dma_clean_range(start,end) 216 * - start - virtual start address of region 217 * - end - virtual end address of region 218 */ 219ENTRY(v7_dma_clean_range) 220 dcache_line_size r2, r3 221 sub r3, r2, #1 222 bic r0, r0, r3 2231: 224 mcr p15, 0, r0, c7, c10, 1 @ clean D / U line 225 add r0, r0, r2 226 cmp r0, r1 227 blo 1b 228 dsb 229 mov pc, lr 230ENDPROC(v7_dma_clean_range) 231 232/* 233 * v7_dma_flush_range(start,end) 234 * - start - virtual start address of region 235 * - end - virtual end address of region 236 */ 237ENTRY(v7_dma_flush_range) 238 dcache_line_size r2, r3 239 sub r3, r2, #1 240 bic r0, r0, r3 2411: 242 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line 243 add r0, r0, r2 244 cmp r0, r1 245 blo 1b 246 dsb 247 mov pc, lr 248ENDPROC(v7_dma_flush_range) 249 250 __INITDATA 251 252 .type v7_cache_fns, #object 253ENTRY(v7_cache_fns) 254 .long v7_flush_kern_cache_all 255 .long v7_flush_user_cache_all 256 .long v7_flush_user_cache_range 257 .long v7_coherent_kern_range 258 .long v7_coherent_user_range 259 .long v7_flush_kern_dcache_page 260 .long v7_dma_inv_range 261 .long v7_dma_clean_range 262 .long v7_dma_flush_range 263 .size v7_cache_fns, . - v7_cache_fns 264