1/* 2 * Cache maintenance 3 * 4 * Copyright (C) 2001 Deep Blue Solutions Ltd. 5 * Copyright (C) 2012 ARM Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20#include <linux/errno.h> 21#include <linux/linkage.h> 22#include <linux/init.h> 23#include <asm/assembler.h> 24#include <asm/cpufeature.h> 25#include <asm/alternative.h> 26#include <asm/asm-uaccess.h> 27 28/* 29 * flush_icache_range(start,end) 30 * 31 * Ensure that the I and D caches are coherent within specified region. 32 * This is typically used when code has been written to a memory region, 33 * and will be executed. 34 * 35 * - start - virtual start address of region 36 * - end - virtual end address of region 37 */ 38ENTRY(flush_icache_range) 39 /* FALLTHROUGH */ 40 41/* 42 * __flush_cache_user_range(start,end) 43 * 44 * Ensure that the I and D caches are coherent within specified region. 45 * This is typically used when code has been written to a memory region, 46 * and will be executed. 47 * 48 * - start - virtual start address of region 49 * - end - virtual end address of region 50 */ 51ENTRY(__flush_cache_user_range) 52 uaccess_ttbr0_enable x2, x3, x4 53alternative_if ARM64_HAS_CACHE_IDC 54 dsb ishst 55 b 7f 56alternative_else_nop_endif 57 dcache_line_size x2, x3 58 sub x3, x2, #1 59 bic x4, x0, x3 601: 61user_alt 9f, "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE 62 add x4, x4, x2 63 cmp x4, x1 64 b.lo 1b 65 dsb ish 66 677: 68alternative_if ARM64_HAS_CACHE_DIC 69 isb 70 b 8f 71alternative_else_nop_endif 72 invalidate_icache_by_line x0, x1, x2, x3, 9f 738: mov x0, #0 741: 75 uaccess_ttbr0_disable x1, x2 76 ret 779: 78 mov x0, #-EFAULT 79 b 1b 80ENDPROC(flush_icache_range) 81ENDPROC(__flush_cache_user_range) 82 83/* 84 * invalidate_icache_range(start,end) 85 * 86 * Ensure that the I cache is invalid within specified region. 87 * 88 * - start - virtual start address of region 89 * - end - virtual end address of region 90 */ 91ENTRY(invalidate_icache_range) 92alternative_if ARM64_HAS_CACHE_DIC 93 mov x0, xzr 94 isb 95 ret 96alternative_else_nop_endif 97 98 uaccess_ttbr0_enable x2, x3, x4 99 100 invalidate_icache_by_line x0, x1, x2, x3, 2f 101 mov x0, xzr 1021: 103 uaccess_ttbr0_disable x1, x2 104 ret 1052: 106 mov x0, #-EFAULT 107 b 1b 108ENDPROC(invalidate_icache_range) 109 110/* 111 * __flush_dcache_area(kaddr, size) 112 * 113 * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) 114 * are cleaned and invalidated to the PoC. 115 * 116 * - kaddr - kernel address 117 * - size - size in question 118 */ 119ENTRY(__flush_dcache_area) 120 dcache_by_line_op civac, sy, x0, x1, x2, x3 121 ret 122ENDPIPROC(__flush_dcache_area) 123 124/* 125 * __clean_dcache_area_pou(kaddr, size) 126 * 127 * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) 128 * are cleaned to the PoU. 129 * 130 * - kaddr - kernel address 131 * - size - size in question 132 */ 133ENTRY(__clean_dcache_area_pou) 134alternative_if ARM64_HAS_CACHE_IDC 135 dsb ishst 136 ret 137alternative_else_nop_endif 138 dcache_by_line_op cvau, ish, x0, x1, x2, x3 139 ret 140ENDPROC(__clean_dcache_area_pou) 141 142/* 143 * __inval_dcache_area(kaddr, size) 144 * 145 * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) 146 * are invalidated. Any partial lines at the ends of the interval are 147 * also cleaned to PoC to prevent data loss. 148 * 149 * - kaddr - kernel address 150 * - size - size in question 151 */ 152ENTRY(__inval_dcache_area) 153 /* FALLTHROUGH */ 154 155/* 156 * __dma_inv_area(start, size) 157 * - start - virtual start address of region 158 * - size - size in question 159 */ 160__dma_inv_area: 161 add x1, x1, x0 162 dcache_line_size x2, x3 163 sub x3, x2, #1 164 tst x1, x3 // end cache line aligned? 165 bic x1, x1, x3 166 b.eq 1f 167 dc civac, x1 // clean & invalidate D / U line 1681: tst x0, x3 // start cache line aligned? 169 bic x0, x0, x3 170 b.eq 2f 171 dc civac, x0 // clean & invalidate D / U line 172 b 3f 1732: dc ivac, x0 // invalidate D / U line 1743: add x0, x0, x2 175 cmp x0, x1 176 b.lo 2b 177 dsb sy 178 ret 179ENDPIPROC(__inval_dcache_area) 180ENDPROC(__dma_inv_area) 181 182/* 183 * __clean_dcache_area_poc(kaddr, size) 184 * 185 * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) 186 * are cleaned to the PoC. 187 * 188 * - kaddr - kernel address 189 * - size - size in question 190 */ 191ENTRY(__clean_dcache_area_poc) 192 /* FALLTHROUGH */ 193 194/* 195 * __dma_clean_area(start, size) 196 * - start - virtual start address of region 197 * - size - size in question 198 */ 199__dma_clean_area: 200 dcache_by_line_op cvac, sy, x0, x1, x2, x3 201 ret 202ENDPIPROC(__clean_dcache_area_poc) 203ENDPROC(__dma_clean_area) 204 205/* 206 * __clean_dcache_area_pop(kaddr, size) 207 * 208 * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) 209 * are cleaned to the PoP. 210 * 211 * - kaddr - kernel address 212 * - size - size in question 213 */ 214ENTRY(__clean_dcache_area_pop) 215 dcache_by_line_op cvap, sy, x0, x1, x2, x3 216 ret 217ENDPIPROC(__clean_dcache_area_pop) 218 219/* 220 * __dma_flush_area(start, size) 221 * 222 * clean & invalidate D / U line 223 * 224 * - start - virtual start address of region 225 * - size - size in question 226 */ 227ENTRY(__dma_flush_area) 228 dcache_by_line_op civac, sy, x0, x1, x2, x3 229 ret 230ENDPIPROC(__dma_flush_area) 231 232/* 233 * __dma_map_area(start, size, dir) 234 * - start - kernel virtual start address 235 * - size - size of region 236 * - dir - DMA direction 237 */ 238ENTRY(__dma_map_area) 239 cmp w2, #DMA_FROM_DEVICE 240 b.eq __dma_inv_area 241 b __dma_clean_area 242ENDPIPROC(__dma_map_area) 243 244/* 245 * __dma_unmap_area(start, size, dir) 246 * - start - kernel virtual start address 247 * - size - size of region 248 * - dir - DMA direction 249 */ 250ENTRY(__dma_unmap_area) 251 cmp w2, #DMA_TO_DEVICE 252 b.ne __dma_inv_area 253 ret 254ENDPIPROC(__dma_unmap_area) 255