xref: /linux/arch/arm/mm/proc-v6.S (revision f3d9478b2ce468c3115b02ecae7e975990697f15)
1/*
2 *  linux/arch/arm/mm/proc-v6.S
3 *
4 *  Copyright (C) 2001 Deep Blue Solutions Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 *  This is the "shell" of the ARMv6 processor support.
11 */
12#include <linux/linkage.h>
13#include <asm/assembler.h>
14#include <asm/asm-offsets.h>
15#include <asm/hardware/arm_scu.h>
16#include <asm/procinfo.h>
17#include <asm/pgtable-hwdef.h>
18#include <asm/pgtable.h>
19
20#include "proc-macros.S"
21
22#define D_CACHE_LINE_SIZE	32
23
24#define TTB_C		(1 << 0)
25#define TTB_S		(1 << 1)
26#define TTB_IMP		(1 << 2)
27#define TTB_RGN_NC	(0 << 3)
28#define TTB_RGN_WBWA	(1 << 3)
29#define TTB_RGN_WT	(2 << 3)
30#define TTB_RGN_WB	(3 << 3)
31
32	.macro	cpsie, flags
33	.ifc \flags, f
34	.long	0xf1080040
35	.exitm
36	.endif
37	.ifc \flags, i
38	.long	0xf1080080
39	.exitm
40	.endif
41	.ifc \flags, if
42	.long	0xf10800c0
43	.exitm
44	.endif
45	.err
46	.endm
47
48	.macro	cpsid, flags
49	.ifc \flags, f
50	.long	0xf10c0040
51	.exitm
52	.endif
53	.ifc \flags, i
54	.long	0xf10c0080
55	.exitm
56	.endif
57	.ifc \flags, if
58	.long	0xf10c00c0
59	.exitm
60	.endif
61	.err
62	.endm
63
64ENTRY(cpu_v6_proc_init)
65	mov	pc, lr
66
67ENTRY(cpu_v6_proc_fin)
68	stmfd	sp!, {lr}
69	cpsid	if				@ disable interrupts
70	bl	v6_flush_kern_cache_all
71	mrc	p15, 0, r0, c1, c0, 0		@ ctrl register
72	bic	r0, r0, #0x1000			@ ...i............
73	bic	r0, r0, #0x0006			@ .............ca.
74	mcr	p15, 0, r0, c1, c0, 0		@ disable caches
75	ldmfd	sp!, {pc}
76
77/*
78 *	cpu_v6_reset(loc)
79 *
80 *	Perform a soft reset of the system.  Put the CPU into the
81 *	same state as it would be if it had been reset, and branch
82 *	to what would be the reset vector.
83 *
84 *	- loc   - location to jump to for soft reset
85 *
86 *	It is assumed that:
87 */
88	.align	5
89ENTRY(cpu_v6_reset)
90	mov	pc, r0
91
92/*
93 *	cpu_v6_do_idle()
94 *
95 *	Idle the processor (eg, wait for interrupt).
96 *
97 *	IRQs are already disabled.
98 */
99ENTRY(cpu_v6_do_idle)
100	mcr	p15, 0, r1, c7, c0, 4		@ wait for interrupt
101	mov	pc, lr
102
103ENTRY(cpu_v6_dcache_clean_area)
104#ifndef TLB_CAN_READ_FROM_L1_CACHE
1051:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
106	add	r0, r0, #D_CACHE_LINE_SIZE
107	subs	r1, r1, #D_CACHE_LINE_SIZE
108	bhi	1b
109#endif
110	mov	pc, lr
111
112/*
113 *	cpu_arm926_switch_mm(pgd_phys, tsk)
114 *
115 *	Set the translation table base pointer to be pgd_phys
116 *
117 *	- pgd_phys - physical address of new TTB
118 *
119 *	It is assumed that:
120 *	- we are not using split page tables
121 */
122ENTRY(cpu_v6_switch_mm)
123	mov	r2, #0
124	ldr	r1, [r1, #MM_CONTEXT_ID]	@ get mm->context.id
125#ifdef CONFIG_SMP
126	orr	r0, r0, #TTB_RGN_WBWA|TTB_S	@ mark PTWs shared, outer cacheable
127#endif
128	mcr	p15, 0, r2, c7, c5, 6		@ flush BTAC/BTB
129	mcr	p15, 0, r2, c7, c10, 4		@ drain write buffer
130	mcr	p15, 0, r0, c2, c0, 0		@ set TTB 0
131	mcr	p15, 0, r1, c13, c0, 1		@ set context ID
132	mov	pc, lr
133
134/*
135 *	cpu_v6_set_pte(ptep, pte)
136 *
137 *	Set a level 2 translation table entry.
138 *
139 *	- ptep  - pointer to level 2 translation table entry
140 *		  (hardware version is stored at -1024 bytes)
141 *	- pte   - PTE value to store
142 *
143 *	Permissions:
144 *	  YUWD  APX AP1 AP0	SVC	User
145 *	  0xxx   0   0   0	no acc	no acc
146 *	  100x   1   0   1	r/o	no acc
147 *	  10x0   1   0   1	r/o	no acc
148 *	  1011   0   0   1	r/w	no acc
149 *	  110x   0   1   0	r/w	r/o
150 *	  11x0   0   1   0	r/w	r/o
151 *	  1111   0   1   1	r/w	r/w
152 */
153ENTRY(cpu_v6_set_pte)
154	str	r1, [r0], #-2048		@ linux version
155
156	bic	r2, r1, #0x000003f0
157	bic	r2, r2, #0x00000003
158	orr	r2, r2, #PTE_EXT_AP0 | 2
159
160	tst	r1, #L_PTE_WRITE
161	tstne	r1, #L_PTE_DIRTY
162	orreq	r2, r2, #PTE_EXT_APX
163
164	tst	r1, #L_PTE_USER
165	orrne	r2, r2, #PTE_EXT_AP1
166	tstne	r2, #PTE_EXT_APX
167	bicne	r2, r2, #PTE_EXT_APX | PTE_EXT_AP0
168
169	tst	r1, #L_PTE_YOUNG
170	biceq	r2, r2, #PTE_EXT_APX | PTE_EXT_AP_MASK
171
172	tst	r1, #L_PTE_EXEC
173	orreq	r2, r2, #PTE_EXT_XN
174
175	tst	r1, #L_PTE_PRESENT
176	moveq	r2, #0
177
178	str	r2, [r0]
179	mcr	p15, 0, r0, c7, c10, 1 @ flush_pte
180	mov	pc, lr
181
182
183
184
185cpu_v6_name:
186	.asciz	"Some Random V6 Processor"
187	.align
188
189	.section ".text.init", #alloc, #execinstr
190
191/*
192 *	__v6_setup
193 *
194 *	Initialise TLB, Caches, and MMU state ready to switch the MMU
195 *	on.  Return in r0 the new CP15 C1 control register setting.
196 *
197 *	We automatically detect if we have a Harvard cache, and use the
198 *	Harvard cache control instructions insead of the unified cache
199 *	control instructions.
200 *
201 *	This should be able to cover all ARMv6 cores.
202 *
203 *	It is assumed that:
204 *	- cache type register is implemented
205 */
206__v6_setup:
207#ifdef CONFIG_SMP
208	/* Set up the SCU on core 0 only */
209	mrc	p15, 0, r0, c0, c0, 5		@ CPU core number
210	ands	r0, r0, #15
211	moveq	r0, #0x10000000 @ SCU_BASE
212	orreq	r0, r0, #0x00100000
213	ldreq	r5, [r0, #SCU_CTRL]
214	orreq	r5, r5, #1
215	streq	r5, [r0, #SCU_CTRL]
216
217#ifndef CONFIG_CPU_DCACHE_DISABLE
218	mrc	p15, 0, r0, c1, c0, 1		@ Enable SMP/nAMP mode
219	orr	r0, r0, #0x20
220	mcr	p15, 0, r0, c1, c0, 1
221#endif
222#endif
223
224	mov	r0, #0
225	mcr	p15, 0, r0, c7, c14, 0		@ clean+invalidate D cache
226	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
227	mcr	p15, 0, r0, c7, c15, 0		@ clean+invalidate cache
228	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
229	mcr	p15, 0, r0, c8, c7, 0		@ invalidate I + D TLBs
230	mcr	p15, 0, r0, c2, c0, 2		@ TTB control register
231#ifdef CONFIG_SMP
232	orr	r4, r4, #TTB_RGN_WBWA|TTB_S	@ mark PTWs shared, outer cacheable
233#endif
234	mcr	p15, 0, r4, c2, c0, 1		@ load TTB1
235#ifdef CONFIG_VFP
236	mrc	p15, 0, r0, c1, c0, 2
237	orr	r0, r0, #(0xf << 20)
238	mcr	p15, 0, r0, c1, c0, 2		@ Enable full access to VFP
239#endif
240	mrc	p15, 0, r0, c1, c0, 0		@ read control register
241	ldr	r5, v6_cr1_clear		@ get mask for bits to clear
242	bic	r0, r0, r5			@ clear bits them
243	ldr	r5, v6_cr1_set			@ get mask for bits to set
244	orr	r0, r0, r5			@ set them
245	mov	pc, lr				@ return to head.S:__ret
246
247	/*
248	 *         V X F   I D LR
249	 * .... ...E PUI. .T.T 4RVI ZFRS BLDP WCAM
250	 * rrrr rrrx xxx0 0101 xxxx xxxx x111 xxxx < forced
251	 *         0 110       0011 1.00 .111 1101 < we want
252	 */
253	.type	v6_cr1_clear, #object
254	.type	v6_cr1_set, #object
255v6_cr1_clear:
256	.word	0x01e0fb7f
257v6_cr1_set:
258	.word	0x00c0387d
259
260	.type	v6_processor_functions, #object
261ENTRY(v6_processor_functions)
262	.word	v6_early_abort
263	.word	cpu_v6_proc_init
264	.word	cpu_v6_proc_fin
265	.word	cpu_v6_reset
266	.word	cpu_v6_do_idle
267	.word	cpu_v6_dcache_clean_area
268	.word	cpu_v6_switch_mm
269	.word	cpu_v6_set_pte
270	.size	v6_processor_functions, . - v6_processor_functions
271
272	.type	cpu_arch_name, #object
273cpu_arch_name:
274	.asciz	"armv6"
275	.size	cpu_arch_name, . - cpu_arch_name
276
277	.type	cpu_elf_name, #object
278cpu_elf_name:
279	.asciz	"v6"
280	.size	cpu_elf_name, . - cpu_elf_name
281	.align
282
283	.section ".proc.info.init", #alloc, #execinstr
284
285	/*
286	 * Match any ARMv6 processor core.
287	 */
288	.type	__v6_proc_info, #object
289__v6_proc_info:
290	.long	0x0007b000
291	.long	0x0007f000
292	.long   PMD_TYPE_SECT | \
293		PMD_SECT_BUFFERABLE | \
294		PMD_SECT_CACHEABLE | \
295		PMD_SECT_AP_WRITE | \
296		PMD_SECT_AP_READ
297	b	__v6_setup
298	.long	cpu_arch_name
299	.long	cpu_elf_name
300	.long	HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_VFP|HWCAP_EDSP|HWCAP_JAVA
301	.long	cpu_v6_name
302	.long	v6_processor_functions
303	.long	v6wbi_tlb_fns
304	.long	v6_user_fns
305	.long	v6_cache_fns
306	.size	__v6_proc_info, . - __v6_proc_info
307