xref: /linux/arch/arm/mm/proc-arm1020.S (revision 5e8d780d745c1619aba81fe7166c5a4b5cad2b84)
1/*
2 *  linux/arch/arm/mm/proc-arm1020.S: MMU functions for ARM1020
3 *
4 *  Copyright (C) 2000 ARM Limited
5 *  Copyright (C) 2000 Deep Blue Solutions Ltd.
6 *  hacked for non-paged-MM by Hyok S. Choi, 2003.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21 *
22 *
23 * These are the low level assembler for performing cache and TLB
24 * functions on the arm1020.
25 *
26 *  CONFIG_CPU_ARM1020_CPU_IDLE -> nohlt
27 */
28#include <linux/linkage.h>
29#include <linux/config.h>
30#include <linux/init.h>
31#include <asm/assembler.h>
32#include <asm/asm-offsets.h>
33#include <asm/pgtable-hwdef.h>
34#include <asm/pgtable.h>
35#include <asm/procinfo.h>
36#include <asm/ptrace.h>
37
38/*
39 * This is the maximum size of an area which will be invalidated
40 * using the single invalidate entry instructions.  Anything larger
41 * than this, and we go for the whole cache.
42 *
43 * This value should be chosen such that we choose the cheapest
44 * alternative.
45 */
46#define MAX_AREA_SIZE	32768
47
48/*
49 * The size of one data cache line.
50 */
51#define CACHE_DLINESIZE	32
52
53/*
54 * The number of data cache segments.
55 */
56#define CACHE_DSEGMENTS	16
57
58/*
59 * The number of lines in a cache segment.
60 */
61#define CACHE_DENTRIES	64
62
63/*
64 * This is the size at which it becomes more efficient to
65 * clean the whole cache, rather than using the individual
66 * cache line maintainence instructions.
67 */
68#define CACHE_DLIMIT	32768
69
70	.text
71/*
72 * cpu_arm1020_proc_init()
73 */
74ENTRY(cpu_arm1020_proc_init)
75	mov	pc, lr
76
77/*
78 * cpu_arm1020_proc_fin()
79 */
80ENTRY(cpu_arm1020_proc_fin)
81	stmfd	sp!, {lr}
82	mov	ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
83	msr	cpsr_c, ip
84	bl	arm1020_flush_kern_cache_all
85	mrc	p15, 0, r0, c1, c0, 0		@ ctrl register
86	bic	r0, r0, #0x1000 		@ ...i............
87	bic	r0, r0, #0x000e 		@ ............wca.
88	mcr	p15, 0, r0, c1, c0, 0		@ disable caches
89	ldmfd	sp!, {pc}
90
91/*
92 * cpu_arm1020_reset(loc)
93 *
94 * Perform a soft reset of the system.	Put the CPU into the
95 * same state as it would be if it had been reset, and branch
96 * to what would be the reset vector.
97 *
98 * loc: location to jump to for soft reset
99 */
100	.align	5
101ENTRY(cpu_arm1020_reset)
102	mov	ip, #0
103	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches
104	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
105#ifdef CONFIG_MMU
106	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
107#endif
108	mrc	p15, 0, ip, c1, c0, 0		@ ctrl register
109	bic	ip, ip, #0x000f 		@ ............wcam
110	bic	ip, ip, #0x1100 		@ ...i...s........
111	mcr	p15, 0, ip, c1, c0, 0		@ ctrl register
112	mov	pc, r0
113
114/*
115 * cpu_arm1020_do_idle()
116 */
117	.align	5
118ENTRY(cpu_arm1020_do_idle)
119	mcr	p15, 0, r0, c7, c0, 4		@ Wait for interrupt
120	mov	pc, lr
121
122/* ================================= CACHE ================================ */
123
124	.align	5
125/*
126 *	flush_user_cache_all()
127 *
128 *	Invalidate all cache entries in a particular address
129 *	space.
130 */
131ENTRY(arm1020_flush_user_cache_all)
132	/* FALLTHROUGH */
133/*
134 *	flush_kern_cache_all()
135 *
136 *	Clean and invalidate the entire cache.
137 */
138ENTRY(arm1020_flush_kern_cache_all)
139	mov	r2, #VM_EXEC
140	mov	ip, #0
141__flush_whole_cache:
142#ifndef CONFIG_CPU_DCACHE_DISABLE
143	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
144	mov	r1, #(CACHE_DSEGMENTS - 1) << 5	@ 16 segments
1451:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1462:	mcr	p15, 0, r3, c7, c14, 2		@ clean+invalidate D index
147	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
148	subs	r3, r3, #1 << 26
149	bcs	2b				@ entries 63 to 0
150	subs	r1, r1, #1 << 5
151	bcs	1b				@ segments 15 to 0
152#endif
153	tst	r2, #VM_EXEC
154#ifndef CONFIG_CPU_ICACHE_DISABLE
155	mcrne	p15, 0, ip, c7, c5, 0		@ invalidate I cache
156#endif
157	mcrne	p15, 0, ip, c7, c10, 4		@ drain WB
158	mov	pc, lr
159
160/*
161 *	flush_user_cache_range(start, end, flags)
162 *
163 *	Invalidate a range of cache entries in the specified
164 *	address space.
165 *
166 *	- start	- start address (inclusive)
167 *	- end	- end address (exclusive)
168 *	- flags	- vm_flags for this space
169 */
170ENTRY(arm1020_flush_user_cache_range)
171	mov	ip, #0
172	sub	r3, r1, r0			@ calculate total size
173	cmp	r3, #CACHE_DLIMIT
174	bhs	__flush_whole_cache
175
176#ifndef CONFIG_CPU_DCACHE_DISABLE
177	mcr	p15, 0, ip, c7, c10, 4
1781:	mcr	p15, 0, r0, c7, c14, 1		@ clean+invalidate D entry
179	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
180	add	r0, r0, #CACHE_DLINESIZE
181	cmp	r0, r1
182	blo	1b
183#endif
184	tst	r2, #VM_EXEC
185#ifndef CONFIG_CPU_ICACHE_DISABLE
186	mcrne	p15, 0, ip, c7, c5, 0		@ invalidate I cache
187#endif
188	mcrne	p15, 0, ip, c7, c10, 4		@ drain WB
189	mov	pc, lr
190
191/*
192 *	coherent_kern_range(start, end)
193 *
194 *	Ensure coherency between the Icache and the Dcache in the
195 *	region described by start.  If you have non-snooping
196 *	Harvard caches, you need to implement this function.
197 *
198 *	- start	- virtual start address
199 *	- end	- virtual end address
200 */
201ENTRY(arm1020_coherent_kern_range)
202	/* FALLTRHOUGH */
203
204/*
205 *	coherent_user_range(start, end)
206 *
207 *	Ensure coherency between the Icache and the Dcache in the
208 *	region described by start.  If you have non-snooping
209 *	Harvard caches, you need to implement this function.
210 *
211 *	- start	- virtual start address
212 *	- end	- virtual end address
213 */
214ENTRY(arm1020_coherent_user_range)
215	mov	ip, #0
216	bic	r0, r0, #CACHE_DLINESIZE - 1
217	mcr	p15, 0, ip, c7, c10, 4
2181:
219#ifndef CONFIG_CPU_DCACHE_DISABLE
220	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
221	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
222#endif
223#ifndef CONFIG_CPU_ICACHE_DISABLE
224	mcr	p15, 0, r0, c7, c5, 1		@ invalidate I entry
225#endif
226	add	r0, r0, #CACHE_DLINESIZE
227	cmp	r0, r1
228	blo	1b
229	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
230	mov	pc, lr
231
232/*
233 *	flush_kern_dcache_page(void *page)
234 *
235 *	Ensure no D cache aliasing occurs, either with itself or
236 *	the I cache
237 *
238 *	- page	- page aligned address
239 */
240ENTRY(arm1020_flush_kern_dcache_page)
241	mov	ip, #0
242#ifndef CONFIG_CPU_DCACHE_DISABLE
243	add	r1, r0, #PAGE_SZ
2441:	mcr	p15, 0, r0, c7, c14, 1		@ clean+invalidate D entry
245	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
246	add	r0, r0, #CACHE_DLINESIZE
247	cmp	r0, r1
248	blo	1b
249#endif
250	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
251	mov	pc, lr
252
253/*
254 *	dma_inv_range(start, end)
255 *
256 *	Invalidate (discard) the specified virtual address range.
257 *	May not write back any entries.  If 'start' or 'end'
258 *	are not cache line aligned, those lines must be written
259 *	back.
260 *
261 *	- start	- virtual start address
262 *	- end	- virtual end address
263 *
264 * (same as v4wb)
265 */
266ENTRY(arm1020_dma_inv_range)
267	mov	ip, #0
268#ifndef CONFIG_CPU_DCACHE_DISABLE
269	tst	r0, #CACHE_DLINESIZE - 1
270	bic	r0, r0, #CACHE_DLINESIZE - 1
271	mcrne	p15, 0, ip, c7, c10, 4
272	mcrne	p15, 0, r0, c7, c10, 1		@ clean D entry
273	mcrne	p15, 0, ip, c7, c10, 4		@ drain WB
274	tst	r1, #CACHE_DLINESIZE - 1
275	mcrne	p15, 0, ip, c7, c10, 4
276	mcrne	p15, 0, r1, c7, c10, 1		@ clean D entry
277	mcrne	p15, 0, ip, c7, c10, 4		@ drain WB
2781:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
279	add	r0, r0, #CACHE_DLINESIZE
280	cmp	r0, r1
281	blo	1b
282#endif
283	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
284	mov	pc, lr
285
286/*
287 *	dma_clean_range(start, end)
288 *
289 *	Clean the specified virtual address range.
290 *
291 *	- start	- virtual start address
292 *	- end	- virtual end address
293 *
294 * (same as v4wb)
295 */
296ENTRY(arm1020_dma_clean_range)
297	mov	ip, #0
298#ifndef CONFIG_CPU_DCACHE_DISABLE
299	bic	r0, r0, #CACHE_DLINESIZE - 1
3001:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
301	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
302	add	r0, r0, #CACHE_DLINESIZE
303	cmp	r0, r1
304	blo	1b
305#endif
306	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
307	mov	pc, lr
308
309/*
310 *	dma_flush_range(start, end)
311 *
312 *	Clean and invalidate the specified virtual address range.
313 *
314 *	- start	- virtual start address
315 *	- end	- virtual end address
316 */
317ENTRY(arm1020_dma_flush_range)
318	mov	ip, #0
319#ifndef CONFIG_CPU_DCACHE_DISABLE
320	bic	r0, r0, #CACHE_DLINESIZE - 1
321	mcr	p15, 0, ip, c7, c10, 4
3221:	mcr	p15, 0, r0, c7, c14, 1		@ clean+invalidate D entry
323	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
324	add	r0, r0, #CACHE_DLINESIZE
325	cmp	r0, r1
326	blo	1b
327#endif
328	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
329	mov	pc, lr
330
331ENTRY(arm1020_cache_fns)
332	.long	arm1020_flush_kern_cache_all
333	.long	arm1020_flush_user_cache_all
334	.long	arm1020_flush_user_cache_range
335	.long	arm1020_coherent_kern_range
336	.long	arm1020_coherent_user_range
337	.long	arm1020_flush_kern_dcache_page
338	.long	arm1020_dma_inv_range
339	.long	arm1020_dma_clean_range
340	.long	arm1020_dma_flush_range
341
342	.align	5
343ENTRY(cpu_arm1020_dcache_clean_area)
344#ifndef CONFIG_CPU_DCACHE_DISABLE
345	mov	ip, #0
3461:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
347	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
348	add	r0, r0, #CACHE_DLINESIZE
349	subs	r1, r1, #CACHE_DLINESIZE
350	bhi	1b
351#endif
352	mov	pc, lr
353
354/* =============================== PageTable ============================== */
355
356/*
357 * cpu_arm1020_switch_mm(pgd)
358 *
359 * Set the translation base pointer to be as described by pgd.
360 *
361 * pgd: new page tables
362 */
363	.align	5
364ENTRY(cpu_arm1020_switch_mm)
365#ifdef CONFIG_MMU
366#ifndef CONFIG_CPU_DCACHE_DISABLE
367	mcr	p15, 0, r3, c7, c10, 4
368	mov	r1, #0xF			@ 16 segments
3691:	mov	r3, #0x3F			@ 64 entries
3702:	mov	ip, r3, LSL #26 		@ shift up entry
371	orr	ip, ip, r1, LSL #5		@ shift in/up index
372	mcr	p15, 0, ip, c7, c14, 2		@ Clean & Inval DCache entry
373	mov	ip, #0
374	mcr	p15, 0, ip, c7, c10, 4
375	subs	r3, r3, #1
376	cmp	r3, #0
377	bge	2b				@ entries 3F to 0
378	subs	r1, r1, #1
379	cmp	r1, #0
380	bge	1b				@ segments 15 to 0
381
382#endif
383	mov	r1, #0
384#ifndef CONFIG_CPU_ICACHE_DISABLE
385	mcr	p15, 0, r1, c7, c5, 0		@ invalidate I cache
386#endif
387	mcr	p15, 0, r1, c7, c10, 4		@ drain WB
388	mcr	p15, 0, r0, c2, c0, 0		@ load page table pointer
389	mcr	p15, 0, r1, c8, c7, 0		@ invalidate I & D TLBs
390#endif /* CONFIG_MMU */
391	mov	pc, lr
392
393/*
394 * cpu_arm1020_set_pte(ptep, pte)
395 *
396 * Set a PTE and flush it out
397 */
398	.align	5
399ENTRY(cpu_arm1020_set_pte)
400#ifdef CONFIG_MMU
401	str	r1, [r0], #-2048		@ linux version
402
403	eor	r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
404
405	bic	r2, r1, #PTE_SMALL_AP_MASK
406	bic	r2, r2, #PTE_TYPE_MASK
407	orr	r2, r2, #PTE_TYPE_SMALL
408
409	tst	r1, #L_PTE_USER			@ User?
410	orrne	r2, r2, #PTE_SMALL_AP_URO_SRW
411
412	tst	r1, #L_PTE_WRITE | L_PTE_DIRTY	@ Write and Dirty?
413	orreq	r2, r2, #PTE_SMALL_AP_UNO_SRW
414
415	tst	r1, #L_PTE_PRESENT | L_PTE_YOUNG	@ Present and Young?
416	movne	r2, #0
417
418#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
419	eor	r3, r1, #0x0a			@ C & small page?
420	tst	r3, #0x0b
421	biceq	r2, r2, #4
422#endif
423	str	r2, [r0]			@ hardware version
424	mov	r0, r0
425#ifndef CONFIG_CPU_DCACHE_DISABLE
426	mcr	p15, 0, r0, c7, c10, 4
427	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
428#endif
429	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
430#endif /* CONFIG_MMU */
431	mov	pc, lr
432
433	__INIT
434
435	.type	__arm1020_setup, #function
436__arm1020_setup:
437	mov	r0, #0
438	mcr	p15, 0, r0, c7, c7		@ invalidate I,D caches on v4
439	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer on v4
440#ifdef CONFIG_MMU
441	mcr	p15, 0, r0, c8, c7		@ invalidate I,D TLBs on v4
442#endif
443	mrc	p15, 0, r0, c1, c0		@ get control register v4
444	ldr	r5, arm1020_cr1_clear
445	bic	r0, r0, r5
446	ldr	r5, arm1020_cr1_set
447	orr	r0, r0, r5
448#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
449	orr	r0, r0, #0x4000 		@ .R.. .... .... ....
450#endif
451	mov	pc, lr
452	.size	__arm1020_setup, . - __arm1020_setup
453
454	/*
455	 *  R
456	 * .RVI ZFRS BLDP WCAM
457	 * .011 1001 ..11 0101
458	 */
459	.type	arm1020_cr1_clear, #object
460	.type	arm1020_cr1_set, #object
461arm1020_cr1_clear:
462	.word	0x593f
463arm1020_cr1_set:
464	.word	0x3935
465
466	__INITDATA
467
468/*
469 * Purpose : Function pointers used to access above functions - all calls
470 *	     come through these
471 */
472	.type	arm1020_processor_functions, #object
473arm1020_processor_functions:
474	.word	v4t_early_abort
475	.word	cpu_arm1020_proc_init
476	.word	cpu_arm1020_proc_fin
477	.word	cpu_arm1020_reset
478	.word	cpu_arm1020_do_idle
479	.word	cpu_arm1020_dcache_clean_area
480	.word	cpu_arm1020_switch_mm
481	.word	cpu_arm1020_set_pte
482	.size	arm1020_processor_functions, . - arm1020_processor_functions
483
484	.section ".rodata"
485
486	.type	cpu_arch_name, #object
487cpu_arch_name:
488	.asciz	"armv5t"
489	.size	cpu_arch_name, . - cpu_arch_name
490
491	.type	cpu_elf_name, #object
492cpu_elf_name:
493	.asciz	"v5"
494	.size	cpu_elf_name, . - cpu_elf_name
495
496	.type	cpu_arm1020_name, #object
497cpu_arm1020_name:
498	.ascii	"ARM1020"
499#ifndef CONFIG_CPU_ICACHE_DISABLE
500	.ascii	"i"
501#endif
502#ifndef CONFIG_CPU_DCACHE_DISABLE
503	.ascii	"d"
504#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
505	.ascii	"(wt)"
506#else
507	.ascii	"(wb)"
508#endif
509#endif
510#ifndef CONFIG_CPU_BPREDICT_DISABLE
511	.ascii	"B"
512#endif
513#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
514	.ascii	"RR"
515#endif
516	.ascii	"\0"
517	.size	cpu_arm1020_name, . - cpu_arm1020_name
518
519	.align
520
521	.section ".proc.info.init", #alloc, #execinstr
522
523	.type	__arm1020_proc_info,#object
524__arm1020_proc_info:
525	.long	0x4104a200			@ ARM 1020T (Architecture v5T)
526	.long	0xff0ffff0
527	.long   PMD_TYPE_SECT | \
528		PMD_SECT_AP_WRITE | \
529		PMD_SECT_AP_READ
530	b	__arm1020_setup
531	.long	cpu_arch_name
532	.long	cpu_elf_name
533	.long	HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
534	.long	cpu_arm1020_name
535	.long	arm1020_processor_functions
536	.long	v4wbi_tlb_fns
537	.long	v4wb_user_fns
538	.long	arm1020_cache_fns
539	.size	__arm1020_proc_info, . - __arm1020_proc_info
540