1/* 2 * linux/arch/arm/mm/cache-v4.S 3 * 4 * Copyright (C) 1997-2002 Russell king 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10#include <linux/linkage.h> 11#include <linux/init.h> 12#include <asm/page.h> 13#include "proc-macros.S" 14 15/* 16 * flush_user_cache_all() 17 * 18 * Invalidate all cache entries in a particular address 19 * space. 20 * 21 * - mm - mm_struct describing address space 22 */ 23ENTRY(v4_flush_user_cache_all) 24 /* FALLTHROUGH */ 25/* 26 * flush_kern_cache_all() 27 * 28 * Clean and invalidate the entire cache. 29 */ 30ENTRY(v4_flush_kern_cache_all) 31#ifdef CONFIG_CPU_CP15 32 mov r0, #0 33 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache 34 mov pc, lr 35#else 36 /* FALLTHROUGH */ 37#endif 38 39/* 40 * flush_user_cache_range(start, end, flags) 41 * 42 * Invalidate a range of cache entries in the specified 43 * address space. 44 * 45 * - start - start address (may not be aligned) 46 * - end - end address (exclusive, may not be aligned) 47 * - flags - vma_area_struct flags describing address space 48 */ 49ENTRY(v4_flush_user_cache_range) 50#ifdef CONFIG_CPU_CP15 51 mov ip, #0 52 mcreq p15, 0, ip, c7, c7, 0 @ flush ID cache 53 mov pc, lr 54#else 55 /* FALLTHROUGH */ 56#endif 57 58/* 59 * coherent_kern_range(start, end) 60 * 61 * Ensure coherency between the Icache and the Dcache in the 62 * region described by start. If you have non-snooping 63 * Harvard caches, you need to implement this function. 64 * 65 * - start - virtual start address 66 * - end - virtual end address 67 */ 68ENTRY(v4_coherent_kern_range) 69 /* FALLTHROUGH */ 70 71/* 72 * coherent_user_range(start, end) 73 * 74 * Ensure coherency between the Icache and the Dcache in the 75 * region described by start. If you have non-snooping 76 * Harvard caches, you need to implement this function. 77 * 78 * - start - virtual start address 79 * - end - virtual end address 80 */ 81ENTRY(v4_coherent_user_range) 82 mov pc, lr 83 84/* 85 * flush_kern_dcache_area(void *addr, size_t size) 86 * 87 * Ensure no D cache aliasing occurs, either with itself or 88 * the I cache 89 * 90 * - addr - kernel address 91 * - size - region size 92 */ 93ENTRY(v4_flush_kern_dcache_area) 94 /* FALLTHROUGH */ 95 96/* 97 * dma_flush_range(start, end) 98 * 99 * Clean and invalidate the specified virtual address range. 100 * 101 * - start - virtual start address 102 * - end - virtual end address 103 */ 104ENTRY(v4_dma_flush_range) 105#ifdef CONFIG_CPU_CP15 106 mov r0, #0 107 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache 108#endif 109 mov pc, lr 110 111/* 112 * dma_unmap_area(start, size, dir) 113 * - start - kernel virtual start address 114 * - size - size of region 115 * - dir - DMA direction 116 */ 117ENTRY(v4_dma_unmap_area) 118 teq r2, #DMA_TO_DEVICE 119 bne v4_dma_flush_range 120 /* FALLTHROUGH */ 121 122/* 123 * dma_map_area(start, size, dir) 124 * - start - kernel virtual start address 125 * - size - size of region 126 * - dir - DMA direction 127 */ 128ENTRY(v4_dma_map_area) 129 mov pc, lr 130ENDPROC(v4_dma_unmap_area) 131ENDPROC(v4_dma_map_area) 132 133 __INITDATA 134 135 .type v4_cache_fns, #object 136ENTRY(v4_cache_fns) 137 .long v4_flush_kern_cache_all 138 .long v4_flush_user_cache_all 139 .long v4_flush_user_cache_range 140 .long v4_coherent_kern_range 141 .long v4_coherent_user_range 142 .long v4_flush_kern_dcache_area 143 .long v4_dma_map_area 144 .long v4_dma_unmap_area 145 .long v4_dma_flush_range 146 .size v4_cache_fns, . - v4_cache_fns 147