xref: /linux/arch/arm/mm/proc-arm922.S (revision 5e8d780d745c1619aba81fe7166c5a4b5cad2b84)
1/*
2 *  linux/arch/arm/mm/proc-arm922.S: MMU functions for ARM922
3 *
4 *  Copyright (C) 1999,2000 ARM Limited
5 *  Copyright (C) 2000 Deep Blue Solutions Ltd.
6 *  Copyright (C) 2001 Altera Corporation
7 *  hacked for non-paged-MM by Hyok S. Choi, 2003.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
22 *
23 *
24 * These are the low level assembler for performing cache and TLB
25 * functions on the arm922.
26 *
27 *  CONFIG_CPU_ARM922_CPU_IDLE -> nohlt
28 */
29#include <linux/linkage.h>
30#include <linux/config.h>
31#include <linux/init.h>
32#include <asm/assembler.h>
33#include <asm/pgtable-hwdef.h>
34#include <asm/pgtable.h>
35#include <asm/procinfo.h>
36#include <asm/page.h>
37#include <asm/ptrace.h>
38#include "proc-macros.S"
39
40/*
41 * The size of one data cache line.
42 */
43#define CACHE_DLINESIZE	32
44
45/*
46 * The number of data cache segments.
47 */
48#define CACHE_DSEGMENTS	4
49
50/*
51 * The number of lines in a cache segment.
52 */
53#define CACHE_DENTRIES	64
54
55/*
56 * This is the size at which it becomes more efficient to
57 * clean the whole cache, rather than using the individual
58 * cache line maintainence instructions.  (I think this should
59 * be 32768).
60 */
61#define CACHE_DLIMIT	8192
62
63
64	.text
65/*
66 * cpu_arm922_proc_init()
67 */
68ENTRY(cpu_arm922_proc_init)
69	mov	pc, lr
70
71/*
72 * cpu_arm922_proc_fin()
73 */
74ENTRY(cpu_arm922_proc_fin)
75	stmfd	sp!, {lr}
76	mov	ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
77	msr	cpsr_c, ip
78#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
79	bl	arm922_flush_kern_cache_all
80#else
81	bl	v4wt_flush_kern_cache_all
82#endif
83	mrc	p15, 0, r0, c1, c0, 0		@ ctrl register
84	bic	r0, r0, #0x1000			@ ...i............
85	bic	r0, r0, #0x000e			@ ............wca.
86	mcr	p15, 0, r0, c1, c0, 0		@ disable caches
87	ldmfd	sp!, {pc}
88
89/*
90 * cpu_arm922_reset(loc)
91 *
92 * Perform a soft reset of the system.  Put the CPU into the
93 * same state as it would be if it had been reset, and branch
94 * to what would be the reset vector.
95 *
96 * loc: location to jump to for soft reset
97 */
98	.align	5
99ENTRY(cpu_arm922_reset)
100	mov	ip, #0
101	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches
102	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
103#ifdef CONFIG_MMU
104	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
105#endif
106	mrc	p15, 0, ip, c1, c0, 0		@ ctrl register
107	bic	ip, ip, #0x000f			@ ............wcam
108	bic	ip, ip, #0x1100			@ ...i...s........
109	mcr	p15, 0, ip, c1, c0, 0		@ ctrl register
110	mov	pc, r0
111
112/*
113 * cpu_arm922_do_idle()
114 */
115	.align	5
116ENTRY(cpu_arm922_do_idle)
117	mcr	p15, 0, r0, c7, c0, 4		@ Wait for interrupt
118	mov	pc, lr
119
120
121#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
122
123/*
124 *	flush_user_cache_all()
125 *
126 *	Clean and invalidate all cache entries in a particular
127 *	address space.
128 */
129ENTRY(arm922_flush_user_cache_all)
130	/* FALLTHROUGH */
131
132/*
133 *	flush_kern_cache_all()
134 *
135 *	Clean and invalidate the entire cache.
136 */
137ENTRY(arm922_flush_kern_cache_all)
138	mov	r2, #VM_EXEC
139	mov	ip, #0
140__flush_whole_cache:
141	mov	r1, #(CACHE_DSEGMENTS - 1) << 5	@ 8 segments
1421:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1432:	mcr	p15, 0, r3, c7, c14, 2		@ clean+invalidate D index
144	subs	r3, r3, #1 << 26
145	bcs	2b				@ entries 63 to 0
146	subs	r1, r1, #1 << 5
147	bcs	1b				@ segments 7 to 0
148	tst	r2, #VM_EXEC
149	mcrne	p15, 0, ip, c7, c5, 0		@ invalidate I cache
150	mcrne	p15, 0, ip, c7, c10, 4		@ drain WB
151	mov	pc, lr
152
153/*
154 *	flush_user_cache_range(start, end, flags)
155 *
156 *	Clean and invalidate a range of cache entries in the
157 *	specified address range.
158 *
159 *	- start	- start address (inclusive)
160 *	- end	- end address (exclusive)
161 *	- flags	- vm_flags describing address space
162 */
163ENTRY(arm922_flush_user_cache_range)
164	mov	ip, #0
165	sub	r3, r1, r0			@ calculate total size
166	cmp	r3, #CACHE_DLIMIT
167	bhs	__flush_whole_cache
168
1691:	mcr	p15, 0, r0, c7, c14, 1		@ clean+invalidate D entry
170	tst	r2, #VM_EXEC
171	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
172	add	r0, r0, #CACHE_DLINESIZE
173	cmp	r0, r1
174	blo	1b
175	tst	r2, #VM_EXEC
176	mcrne	p15, 0, ip, c7, c10, 4		@ drain WB
177	mov	pc, lr
178
179/*
180 *	coherent_kern_range(start, end)
181 *
182 *	Ensure coherency between the Icache and the Dcache in the
183 *	region described by start, end.  If you have non-snooping
184 *	Harvard caches, you need to implement this function.
185 *
186 *	- start	- virtual start address
187 *	- end	- virtual end address
188 */
189ENTRY(arm922_coherent_kern_range)
190	/* FALLTHROUGH */
191
192/*
193 *	coherent_user_range(start, end)
194 *
195 *	Ensure coherency between the Icache and the Dcache in the
196 *	region described by start, end.  If you have non-snooping
197 *	Harvard caches, you need to implement this function.
198 *
199 *	- start	- virtual start address
200 *	- end	- virtual end address
201 */
202ENTRY(arm922_coherent_user_range)
203	bic	r0, r0, #CACHE_DLINESIZE - 1
2041:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
205	mcr	p15, 0, r0, c7, c5, 1		@ invalidate I entry
206	add	r0, r0, #CACHE_DLINESIZE
207	cmp	r0, r1
208	blo	1b
209	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
210	mov	pc, lr
211
212/*
213 *	flush_kern_dcache_page(void *page)
214 *
215 *	Ensure no D cache aliasing occurs, either with itself or
216 *	the I cache
217 *
218 *	- addr	- page aligned address
219 */
220ENTRY(arm922_flush_kern_dcache_page)
221	add	r1, r0, #PAGE_SZ
2221:	mcr	p15, 0, r0, c7, c14, 1		@ clean+invalidate D entry
223	add	r0, r0, #CACHE_DLINESIZE
224	cmp	r0, r1
225	blo	1b
226	mov	r0, #0
227	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
228	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
229	mov	pc, lr
230
231/*
232 *	dma_inv_range(start, end)
233 *
234 *	Invalidate (discard) the specified virtual address range.
235 *	May not write back any entries.  If 'start' or 'end'
236 *	are not cache line aligned, those lines must be written
237 *	back.
238 *
239 *	- start	- virtual start address
240 *	- end	- virtual end address
241 *
242 * (same as v4wb)
243 */
244ENTRY(arm922_dma_inv_range)
245	tst	r0, #CACHE_DLINESIZE - 1
246	bic	r0, r0, #CACHE_DLINESIZE - 1
247	mcrne	p15, 0, r0, c7, c10, 1		@ clean D entry
248	tst	r1, #CACHE_DLINESIZE - 1
249	mcrne	p15, 0, r1, c7, c10, 1		@ clean D entry
2501:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
251	add	r0, r0, #CACHE_DLINESIZE
252	cmp	r0, r1
253	blo	1b
254	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
255	mov	pc, lr
256
257/*
258 *	dma_clean_range(start, end)
259 *
260 *	Clean the specified virtual address range.
261 *
262 *	- start	- virtual start address
263 *	- end	- virtual end address
264 *
265 * (same as v4wb)
266 */
267ENTRY(arm922_dma_clean_range)
268	bic	r0, r0, #CACHE_DLINESIZE - 1
2691:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
270	add	r0, r0, #CACHE_DLINESIZE
271	cmp	r0, r1
272	blo	1b
273	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
274	mov	pc, lr
275
276/*
277 *	dma_flush_range(start, end)
278 *
279 *	Clean and invalidate the specified virtual address range.
280 *
281 *	- start	- virtual start address
282 *	- end	- virtual end address
283 */
284ENTRY(arm922_dma_flush_range)
285	bic	r0, r0, #CACHE_DLINESIZE - 1
2861:	mcr	p15, 0, r0, c7, c14, 1		@ clean+invalidate D entry
287	add	r0, r0, #CACHE_DLINESIZE
288	cmp	r0, r1
289	blo	1b
290	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
291	mov	pc, lr
292
293ENTRY(arm922_cache_fns)
294	.long	arm922_flush_kern_cache_all
295	.long	arm922_flush_user_cache_all
296	.long	arm922_flush_user_cache_range
297	.long	arm922_coherent_kern_range
298	.long	arm922_coherent_user_range
299	.long	arm922_flush_kern_dcache_page
300	.long	arm922_dma_inv_range
301	.long	arm922_dma_clean_range
302	.long	arm922_dma_flush_range
303
304#endif
305
306
307ENTRY(cpu_arm922_dcache_clean_area)
308#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
3091:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
310	add	r0, r0, #CACHE_DLINESIZE
311	subs	r1, r1, #CACHE_DLINESIZE
312	bhi	1b
313#endif
314	mov	pc, lr
315
316/* =============================== PageTable ============================== */
317
318/*
319 * cpu_arm922_switch_mm(pgd)
320 *
321 * Set the translation base pointer to be as described by pgd.
322 *
323 * pgd: new page tables
324 */
325	.align	5
326ENTRY(cpu_arm922_switch_mm)
327#ifdef CONFIG_MMU
328	mov	ip, #0
329#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
330	mcr	p15, 0, ip, c7, c6, 0		@ invalidate D cache
331#else
332@ && 'Clean & Invalidate whole DCache'
333@ && Re-written to use Index Ops.
334@ && Uses registers r1, r3 and ip
335
336	mov	r1, #(CACHE_DSEGMENTS - 1) << 5	@ 4 segments
3371:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
3382:	mcr	p15, 0, r3, c7, c14, 2		@ clean & invalidate D index
339	subs	r3, r3, #1 << 26
340	bcs	2b				@ entries 63 to 0
341	subs	r1, r1, #1 << 5
342	bcs	1b				@ segments 7 to 0
343#endif
344	mcr	p15, 0, ip, c7, c5, 0		@ invalidate I cache
345	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
346	mcr	p15, 0, r0, c2, c0, 0		@ load page table pointer
347	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
348#endif
349	mov	pc, lr
350
351/*
352 * cpu_arm922_set_pte(ptep, pte)
353 *
354 * Set a PTE and flush it out
355 */
356	.align	5
357ENTRY(cpu_arm922_set_pte)
358#ifdef CONFIG_MMU
359	str	r1, [r0], #-2048		@ linux version
360
361	eor	r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
362
363	bic	r2, r1, #PTE_SMALL_AP_MASK
364	bic	r2, r2, #PTE_TYPE_MASK
365	orr	r2, r2, #PTE_TYPE_SMALL
366
367	tst	r1, #L_PTE_USER			@ User?
368	orrne	r2, r2, #PTE_SMALL_AP_URO_SRW
369
370	tst	r1, #L_PTE_WRITE | L_PTE_DIRTY	@ Write and Dirty?
371	orreq	r2, r2, #PTE_SMALL_AP_UNO_SRW
372
373	tst	r1, #L_PTE_PRESENT | L_PTE_YOUNG	@ Present and Young?
374	movne	r2, #0
375
376#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
377	eor	r3, r2, #0x0a			@ C & small page?
378	tst	r3, #0x0b
379	biceq	r2, r2, #4
380#endif
381	str	r2, [r0]			@ hardware version
382	mov	r0, r0
383	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
384	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
385#endif /* CONFIG_MMU */
386	mov	pc, lr
387
388	__INIT
389
390	.type	__arm922_setup, #function
391__arm922_setup:
392	mov	r0, #0
393	mcr	p15, 0, r0, c7, c7		@ invalidate I,D caches on v4
394	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer on v4
395#ifdef CONFIG_MMU
396	mcr	p15, 0, r0, c8, c7		@ invalidate I,D TLBs on v4
397#endif
398	mrc	p15, 0, r0, c1, c0		@ get control register v4
399	ldr	r5, arm922_cr1_clear
400	bic	r0, r0, r5
401	ldr	r5, arm922_cr1_set
402	orr	r0, r0, r5
403	mov	pc, lr
404	.size	__arm922_setup, . - __arm922_setup
405
406	/*
407	 *  R
408	 * .RVI ZFRS BLDP WCAM
409	 * ..11 0001 ..11 0101
410	 *
411	 */
412	.type	arm922_cr1_clear, #object
413	.type	arm922_cr1_set, #object
414arm922_cr1_clear:
415	.word	0x3f3f
416arm922_cr1_set:
417	.word	0x3135
418
419	__INITDATA
420
421/*
422 * Purpose : Function pointers used to access above functions - all calls
423 *	     come through these
424 */
425	.type	arm922_processor_functions, #object
426arm922_processor_functions:
427	.word	v4t_early_abort
428	.word	cpu_arm922_proc_init
429	.word	cpu_arm922_proc_fin
430	.word	cpu_arm922_reset
431	.word   cpu_arm922_do_idle
432	.word	cpu_arm922_dcache_clean_area
433	.word	cpu_arm922_switch_mm
434	.word	cpu_arm922_set_pte
435	.size	arm922_processor_functions, . - arm922_processor_functions
436
437	.section ".rodata"
438
439	.type	cpu_arch_name, #object
440cpu_arch_name:
441	.asciz	"armv4t"
442	.size	cpu_arch_name, . - cpu_arch_name
443
444	.type	cpu_elf_name, #object
445cpu_elf_name:
446	.asciz	"v4"
447	.size	cpu_elf_name, . - cpu_elf_name
448
449	.type	cpu_arm922_name, #object
450cpu_arm922_name:
451	.ascii	"ARM922T"
452#ifndef CONFIG_CPU_ICACHE_DISABLE
453	.ascii	"i"
454#endif
455#ifndef CONFIG_CPU_DCACHE_DISABLE
456	.ascii	"d"
457#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
458	.ascii	"(wt)"
459#else
460	.ascii	"(wb)"
461#endif
462#endif
463	.ascii	"\0"
464	.size	cpu_arm922_name, . - cpu_arm922_name
465
466	.align
467
468	.section ".proc.info.init", #alloc, #execinstr
469
470	.type	__arm922_proc_info,#object
471__arm922_proc_info:
472	.long	0x41009220
473	.long	0xff00fff0
474	.long   PMD_TYPE_SECT | \
475		PMD_SECT_BUFFERABLE | \
476		PMD_SECT_CACHEABLE | \
477		PMD_BIT4 | \
478		PMD_SECT_AP_WRITE | \
479		PMD_SECT_AP_READ
480	b	__arm922_setup
481	.long	cpu_arch_name
482	.long	cpu_elf_name
483	.long	HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
484	.long	cpu_arm922_name
485	.long	arm922_processor_functions
486	.long	v4wbi_tlb_fns
487	.long	v4wb_user_fns
488#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
489	.long	arm922_cache_fns
490#else
491	.long	v4wt_cache_fns
492#endif
493	.size	__arm922_proc_info, . - __arm922_proc_info
494