1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * linux/arch/arm/mm/cache-v4.S 4 * 5 * Copyright (C) 1997-2002 Russell king 6 */ 7#include <linux/linkage.h> 8#include <linux/init.h> 9#include <linux/cfi_types.h> 10#include <asm/assembler.h> 11#include <asm/page.h> 12#include "proc-macros.S" 13 14/* 15 * flush_icache_all() 16 * 17 * Unconditionally clean and invalidate the entire icache. 18 */ 19SYM_TYPED_FUNC_START(v4_flush_icache_all) 20 ret lr 21SYM_FUNC_END(v4_flush_icache_all) 22 23/* 24 * flush_user_cache_all() 25 * 26 * Invalidate all cache entries in a particular address 27 * space. 28 * 29 * - mm - mm_struct describing address space 30 */ 31SYM_FUNC_ALIAS(v4_flush_user_cache_all, v4_flush_kern_cache_all) 32 33/* 34 * flush_kern_cache_all() 35 * 36 * Clean and invalidate the entire cache. 37 */ 38SYM_TYPED_FUNC_START(v4_flush_kern_cache_all) 39#ifdef CONFIG_CPU_CP15 40 mov r0, #0 41 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache 42 ret lr 43#else 44 ret lr 45#endif 46SYM_FUNC_END(v4_flush_kern_cache_all) 47 48/* 49 * flush_user_cache_range(start, end, flags) 50 * 51 * Invalidate a range of cache entries in the specified 52 * address space. 53 * 54 * - start - start address (may not be aligned) 55 * - end - end address (exclusive, may not be aligned) 56 * - flags - vma_area_struct flags describing address space 57 */ 58SYM_TYPED_FUNC_START(v4_flush_user_cache_range) 59#ifdef CONFIG_CPU_CP15 60 mov ip, #0 61 mcr p15, 0, ip, c7, c7, 0 @ flush ID cache 62 ret lr 63#else 64 ret lr 65#endif 66SYM_FUNC_END(v4_flush_user_cache_range) 67 68/* 69 * coherent_kern_range(start, end) 70 * 71 * Ensure coherency between the Icache and the Dcache in the 72 * region described by start. If you have non-snooping 73 * Harvard caches, you need to implement this function. 74 * 75 * - start - virtual start address 76 * - end - virtual end address 77 */ 78SYM_TYPED_FUNC_START(v4_coherent_kern_range) 79 ret lr 80SYM_FUNC_END(v4_coherent_kern_range) 81 82/* 83 * coherent_user_range(start, end) 84 * 85 * Ensure coherency between the Icache and the Dcache in the 86 * region described by start. If you have non-snooping 87 * Harvard caches, you need to implement this function. 88 * 89 * - start - virtual start address 90 * - end - virtual end address 91 */ 92SYM_TYPED_FUNC_START(v4_coherent_user_range) 93 mov r0, #0 94 ret lr 95SYM_FUNC_END(v4_coherent_user_range) 96 97/* 98 * flush_kern_dcache_area(void *addr, size_t size) 99 * 100 * Ensure no D cache aliasing occurs, either with itself or 101 * the I cache 102 * 103 * - addr - kernel address 104 * - size - region size 105 */ 106SYM_TYPED_FUNC_START(v4_flush_kern_dcache_area) 107#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */ 108 b v4_dma_flush_range 109#endif 110SYM_FUNC_END(v4_flush_kern_dcache_area) 111 112/* 113 * dma_flush_range(start, end) 114 * 115 * Clean and invalidate the specified virtual address range. 116 * 117 * - start - virtual start address 118 * - end - virtual end address 119 */ 120SYM_TYPED_FUNC_START(v4_dma_flush_range) 121#ifdef CONFIG_CPU_CP15 122 mov r0, #0 123 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache 124#endif 125 ret lr 126SYM_FUNC_END(v4_dma_flush_range) 127 128/* 129 * dma_unmap_area(start, size, dir) 130 * - start - kernel virtual start address 131 * - size - size of region 132 * - dir - DMA direction 133 */ 134SYM_TYPED_FUNC_START(v4_dma_unmap_area) 135 teq r2, #DMA_TO_DEVICE 136 bne v4_dma_flush_range 137 ret lr 138SYM_FUNC_END(v4_dma_unmap_area) 139 140/* 141 * dma_map_area(start, size, dir) 142 * - start - kernel virtual start address 143 * - size - size of region 144 * - dir - DMA direction 145 */ 146SYM_TYPED_FUNC_START(v4_dma_map_area) 147 ret lr 148SYM_FUNC_END(v4_dma_map_area) 149