xref: /linux/arch/arm/mm/cache-v4wt.S (revision 4853f1f6ace32c68a04287353e428c4cfc3fa8ed)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 *  linux/arch/arm/mm/cache-v4wt.S
4 *
5 *  Copyright (C) 1997-2002 Russell king
6 *
7 *  ARMv4 write through cache operations support.
8 *
9 *  We assume that the write buffer is not enabled.
10 */
11#include <linux/linkage.h>
12#include <linux/init.h>
13#include <linux/cfi_types.h>
14#include <asm/assembler.h>
15#include <asm/page.h>
16#include "proc-macros.S"
17
18/*
19 * The size of one data cache line.
20 */
21#define CACHE_DLINESIZE	32
22
23/*
24 * The number of data cache segments.
25 */
26#define CACHE_DSEGMENTS	8
27
28/*
29 * The number of lines in a cache segment.
30 */
31#define CACHE_DENTRIES	64
32
33/*
34 * This is the size at which it becomes more efficient to
35 * clean the whole cache, rather than using the individual
36 * cache line maintenance instructions.
37 *
38 * *** This needs benchmarking
39 */
40#define CACHE_DLIMIT	16384
41
42/*
43 *	flush_icache_all()
44 *
45 *	Unconditionally clean and invalidate the entire icache.
46 */
47SYM_TYPED_FUNC_START(v4wt_flush_icache_all)
48	mov	r0, #0
49	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
50	ret	lr
51SYM_FUNC_END(v4wt_flush_icache_all)
52
53/*
54 *	flush_user_cache_all()
55 *
56 *	Invalidate all cache entries in a particular address
57 *	space.
58 */
59SYM_FUNC_ALIAS(v4wt_flush_user_cache_all, v4wt_flush_kern_cache_all)
60
61/*
62 *	flush_kern_cache_all()
63 *
64 *	Clean and invalidate the entire cache.
65 */
66SYM_TYPED_FUNC_START(v4wt_flush_kern_cache_all)
67	mov	r2, #VM_EXEC
68	mov	ip, #0
69__flush_whole_cache:
70	tst	r2, #VM_EXEC
71	mcrne	p15, 0, ip, c7, c5, 0		@ invalidate I cache
72	mcr	p15, 0, ip, c7, c6, 0		@ invalidate D cache
73	ret	lr
74SYM_FUNC_END(v4wt_flush_kern_cache_all)
75
76/*
77 *	flush_user_cache_range(start, end, flags)
78 *
79 *	Clean and invalidate a range of cache entries in the specified
80 *	address space.
81 *
82 *	- start - start address (inclusive, page aligned)
83 *	- end	- end address (exclusive, page aligned)
84 *	- flags	- vma_area_struct flags describing address space
85 */
86SYM_TYPED_FUNC_START(v4wt_flush_user_cache_range)
87	sub	r3, r1, r0			@ calculate total size
88	cmp	r3, #CACHE_DLIMIT
89	bhs	__flush_whole_cache
90
911:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
92	tst	r2, #VM_EXEC
93	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
94	add	r0, r0, #CACHE_DLINESIZE
95	cmp	r0, r1
96	blo	1b
97	ret	lr
98SYM_FUNC_END(v4wt_flush_user_cache_range)
99
100/*
101 *	coherent_kern_range(start, end)
102 *
103 *	Ensure coherency between the Icache and the Dcache in the
104 *	region described by start.  If you have non-snooping
105 *	Harvard caches, you need to implement this function.
106 *
107 *	- start  - virtual start address
108 *	- end	 - virtual end address
109 */
110SYM_TYPED_FUNC_START(v4wt_coherent_kern_range)
111#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
112	b	v4wt_coherent_user_range
113#endif
114SYM_FUNC_END(v4wt_coherent_kern_range)
115
116/*
117 *	coherent_user_range(start, end)
118 *
119 *	Ensure coherency between the Icache and the Dcache in the
120 *	region described by start.  If you have non-snooping
121 *	Harvard caches, you need to implement this function.
122 *
123 *	- start  - virtual start address
124 *	- end	 - virtual end address
125 */
126SYM_TYPED_FUNC_START(v4wt_coherent_user_range)
127	bic	r0, r0, #CACHE_DLINESIZE - 1
1281:	mcr	p15, 0, r0, c7, c5, 1		@ invalidate I entry
129	add	r0, r0, #CACHE_DLINESIZE
130	cmp	r0, r1
131	blo	1b
132	mov	r0, #0
133	ret	lr
134SYM_FUNC_END(v4wt_coherent_user_range)
135
136/*
137 *	flush_kern_dcache_area(void *addr, size_t size)
138 *
139 *	Ensure no D cache aliasing occurs, either with itself or
140 *	the I cache
141 *
142 *	- addr	- kernel address
143 *	- size	- region size
144 */
145SYM_TYPED_FUNC_START(v4wt_flush_kern_dcache_area)
146	mov	r2, #0
147	mcr	p15, 0, r2, c7, c5, 0		@ invalidate I cache
148	add	r1, r0, r1
149	b	v4wt_dma_inv_range
150SYM_FUNC_END(v4wt_flush_kern_dcache_area)
151
152/*
153 *	dma_inv_range(start, end)
154 *
155 *	Invalidate (discard) the specified virtual address range.
156 *	May not write back any entries.  If 'start' or 'end'
157 *	are not cache line aligned, those lines must be written
158 *	back.
159 *
160 *	- start  - virtual start address
161 *	- end	 - virtual end address
162 */
163v4wt_dma_inv_range:
164	bic	r0, r0, #CACHE_DLINESIZE - 1
1651:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
166	add	r0, r0, #CACHE_DLINESIZE
167	cmp	r0, r1
168	blo	1b
169	ret	lr
170
171/*
172 *	dma_flush_range(start, end)
173 *
174 *	Clean and invalidate the specified virtual address range.
175 *
176 *	- start  - virtual start address
177 *	- end	 - virtual end address
178*/
179SYM_TYPED_FUNC_START(v4wt_dma_flush_range)
180	b	v4wt_dma_inv_range
181SYM_FUNC_END(v4wt_dma_flush_range)
182
183/*
184 *	dma_unmap_area(start, size, dir)
185 *	- start	- kernel virtual start address
186 *	- size	- size of region
187 *	- dir	- DMA direction
188 */
189SYM_TYPED_FUNC_START(v4wt_dma_unmap_area)
190	add	r1, r1, r0
191	teq	r2, #DMA_TO_DEVICE
192	bne	v4wt_dma_inv_range
193	ret	lr
194SYM_FUNC_END(v4wt_dma_unmap_area)
195
196/*
197 *	dma_map_area(start, size, dir)
198 *	- start	- kernel virtual start address
199 *	- size	- size of region
200 *	- dir	- DMA direction
201 */
202SYM_TYPED_FUNC_START(v4wt_dma_map_area)
203	ret	lr
204SYM_FUNC_END(v4wt_dma_map_area)
205