xref: /linux/arch/arm/mm/proc-v6.S (revision b454cc6636d254fbf6049b73e9560aee76fb04a3)
1/*
2 *  linux/arch/arm/mm/proc-v6.S
3 *
4 *  Copyright (C) 2001 Deep Blue Solutions Ltd.
5 *  Modified by Catalin Marinas for noMMU support
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 *  This is the "shell" of the ARMv6 processor support.
12 */
13#include <linux/linkage.h>
14#include <asm/assembler.h>
15#include <asm/asm-offsets.h>
16#include <asm/elf.h>
17#include <asm/hardware/arm_scu.h>
18#include <asm/pgtable-hwdef.h>
19#include <asm/pgtable.h>
20
21#include "proc-macros.S"
22
23#define D_CACHE_LINE_SIZE	32
24
25#define TTB_C		(1 << 0)
26#define TTB_S		(1 << 1)
27#define TTB_IMP		(1 << 2)
28#define TTB_RGN_NC	(0 << 3)
29#define TTB_RGN_WBWA	(1 << 3)
30#define TTB_RGN_WT	(2 << 3)
31#define TTB_RGN_WB	(3 << 3)
32
33ENTRY(cpu_v6_proc_init)
34	mov	pc, lr
35
36ENTRY(cpu_v6_proc_fin)
37	stmfd	sp!, {lr}
38	cpsid	if				@ disable interrupts
39	bl	v6_flush_kern_cache_all
40	mrc	p15, 0, r0, c1, c0, 0		@ ctrl register
41	bic	r0, r0, #0x1000			@ ...i............
42	bic	r0, r0, #0x0006			@ .............ca.
43	mcr	p15, 0, r0, c1, c0, 0		@ disable caches
44	ldmfd	sp!, {pc}
45
46/*
47 *	cpu_v6_reset(loc)
48 *
49 *	Perform a soft reset of the system.  Put the CPU into the
50 *	same state as it would be if it had been reset, and branch
51 *	to what would be the reset vector.
52 *
53 *	- loc   - location to jump to for soft reset
54 *
55 *	It is assumed that:
56 */
57	.align	5
58ENTRY(cpu_v6_reset)
59	mov	pc, r0
60
61/*
62 *	cpu_v6_do_idle()
63 *
64 *	Idle the processor (eg, wait for interrupt).
65 *
66 *	IRQs are already disabled.
67 */
68ENTRY(cpu_v6_do_idle)
69	mcr	p15, 0, r1, c7, c0, 4		@ wait for interrupt
70	mov	pc, lr
71
72ENTRY(cpu_v6_dcache_clean_area)
73#ifndef TLB_CAN_READ_FROM_L1_CACHE
741:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
75	add	r0, r0, #D_CACHE_LINE_SIZE
76	subs	r1, r1, #D_CACHE_LINE_SIZE
77	bhi	1b
78#endif
79	mov	pc, lr
80
81/*
82 *	cpu_arm926_switch_mm(pgd_phys, tsk)
83 *
84 *	Set the translation table base pointer to be pgd_phys
85 *
86 *	- pgd_phys - physical address of new TTB
87 *
88 *	It is assumed that:
89 *	- we are not using split page tables
90 */
91ENTRY(cpu_v6_switch_mm)
92#ifdef CONFIG_MMU
93	mov	r2, #0
94	ldr	r1, [r1, #MM_CONTEXT_ID]	@ get mm->context.id
95#ifdef CONFIG_SMP
96	orr	r0, r0, #TTB_RGN_WBWA|TTB_S	@ mark PTWs shared, outer cacheable
97#endif
98	mcr	p15, 0, r2, c7, c5, 6		@ flush BTAC/BTB
99	mcr	p15, 0, r2, c7, c10, 4		@ drain write buffer
100	mcr	p15, 0, r0, c2, c0, 0		@ set TTB 0
101	mcr	p15, 0, r1, c13, c0, 1		@ set context ID
102#endif
103	mov	pc, lr
104
105/*
106 *	cpu_v6_set_pte_ext(ptep, pte, ext)
107 *
108 *	Set a level 2 translation table entry.
109 *
110 *	- ptep  - pointer to level 2 translation table entry
111 *		  (hardware version is stored at -1024 bytes)
112 *	- pte   - PTE value to store
113 *	- ext	- value for extended PTE bits
114 *
115 *	Permissions:
116 *	  YUWD  APX AP1 AP0	SVC	User
117 *	  0xxx   0   0   0	no acc	no acc
118 *	  100x   1   0   1	r/o	no acc
119 *	  10x0   1   0   1	r/o	no acc
120 *	  1011   0   0   1	r/w	no acc
121 *	  110x   0   1   0	r/w	r/o
122 *	  11x0   0   1   0	r/w	r/o
123 *	  1111   0   1   1	r/w	r/w
124 */
125ENTRY(cpu_v6_set_pte_ext)
126#ifdef CONFIG_MMU
127	str	r1, [r0], #-2048		@ linux version
128
129	bic	r3, r1, #0x000003f0
130	bic	r3, r3, #0x00000003
131	orr	r3, r3, r2
132	orr	r3, r3, #PTE_EXT_AP0 | 2
133
134	tst	r1, #L_PTE_WRITE
135	tstne	r1, #L_PTE_DIRTY
136	orreq	r3, r3, #PTE_EXT_APX
137
138	tst	r1, #L_PTE_USER
139	orrne	r3, r3, #PTE_EXT_AP1
140	tstne	r3, #PTE_EXT_APX
141	bicne	r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
142
143	tst	r1, #L_PTE_YOUNG
144	biceq	r3, r3, #PTE_EXT_APX | PTE_EXT_AP_MASK
145
146	tst	r1, #L_PTE_EXEC
147	orreq	r3, r3, #PTE_EXT_XN
148
149	tst	r1, #L_PTE_PRESENT
150	moveq	r3, #0
151
152	str	r3, [r0]
153	mcr	p15, 0, r0, c7, c10, 1 @ flush_pte
154#endif
155	mov	pc, lr
156
157
158
159
160cpu_v6_name:
161	.asciz	"ARMv6-compatible processor"
162	.align
163
164	.section ".text.init", #alloc, #execinstr
165
166/*
167 *	__v6_setup
168 *
169 *	Initialise TLB, Caches, and MMU state ready to switch the MMU
170 *	on.  Return in r0 the new CP15 C1 control register setting.
171 *
172 *	We automatically detect if we have a Harvard cache, and use the
173 *	Harvard cache control instructions insead of the unified cache
174 *	control instructions.
175 *
176 *	This should be able to cover all ARMv6 cores.
177 *
178 *	It is assumed that:
179 *	- cache type register is implemented
180 */
181__v6_setup:
182#ifdef CONFIG_SMP
183	/* Set up the SCU on core 0 only */
184	mrc	p15, 0, r0, c0, c0, 5		@ CPU core number
185	ands	r0, r0, #15
186	moveq	r0, #0x10000000 @ SCU_BASE
187	orreq	r0, r0, #0x00100000
188	ldreq	r5, [r0, #SCU_CTRL]
189	orreq	r5, r5, #1
190	streq	r5, [r0, #SCU_CTRL]
191
192#ifndef CONFIG_CPU_DCACHE_DISABLE
193	mrc	p15, 0, r0, c1, c0, 1		@ Enable SMP/nAMP mode
194	orr	r0, r0, #0x20
195	mcr	p15, 0, r0, c1, c0, 1
196#endif
197#endif
198
199	mov	r0, #0
200	mcr	p15, 0, r0, c7, c14, 0		@ clean+invalidate D cache
201	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
202	mcr	p15, 0, r0, c7, c15, 0		@ clean+invalidate cache
203	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
204#ifdef CONFIG_MMU
205	mcr	p15, 0, r0, c8, c7, 0		@ invalidate I + D TLBs
206	mcr	p15, 0, r0, c2, c0, 2		@ TTB control register
207#ifdef CONFIG_SMP
208	orr	r4, r4, #TTB_RGN_WBWA|TTB_S	@ mark PTWs shared, outer cacheable
209#endif
210	mcr	p15, 0, r4, c2, c0, 1		@ load TTB1
211#endif /* CONFIG_MMU */
212	adr	r5, v6_crval
213	ldmia	r5, {r5, r6}
214	mrc	p15, 0, r0, c1, c0, 0		@ read control register
215	bic	r0, r0, r5			@ clear bits them
216	orr	r0, r0, r6			@ set them
217	mov	pc, lr				@ return to head.S:__ret
218
219	/*
220	 *         V X F   I D LR
221	 * .... ...E PUI. .T.T 4RVI ZFRS BLDP WCAM
222	 * rrrr rrrx xxx0 0101 xxxx xxxx x111 xxxx < forced
223	 *         0 110       0011 1.00 .111 1101 < we want
224	 */
225	.type	v6_crval, #object
226v6_crval:
227	crval	clear=0x01e0fb7f, mmuset=0x00c0387d, ucset=0x00c0187c
228
229	.type	v6_processor_functions, #object
230ENTRY(v6_processor_functions)
231	.word	v6_early_abort
232	.word	cpu_v6_proc_init
233	.word	cpu_v6_proc_fin
234	.word	cpu_v6_reset
235	.word	cpu_v6_do_idle
236	.word	cpu_v6_dcache_clean_area
237	.word	cpu_v6_switch_mm
238	.word	cpu_v6_set_pte_ext
239	.size	v6_processor_functions, . - v6_processor_functions
240
241	.type	cpu_arch_name, #object
242cpu_arch_name:
243	.asciz	"armv6"
244	.size	cpu_arch_name, . - cpu_arch_name
245
246	.type	cpu_elf_name, #object
247cpu_elf_name:
248	.asciz	"v6"
249	.size	cpu_elf_name, . - cpu_elf_name
250	.align
251
252	.section ".proc.info.init", #alloc, #execinstr
253
254	/*
255	 * Match any ARMv6 processor core.
256	 */
257	.type	__v6_proc_info, #object
258__v6_proc_info:
259	.long	0x0007b000
260	.long	0x0007f000
261	.long   PMD_TYPE_SECT | \
262		PMD_SECT_BUFFERABLE | \
263		PMD_SECT_CACHEABLE | \
264		PMD_SECT_AP_WRITE | \
265		PMD_SECT_AP_READ
266	.long   PMD_TYPE_SECT | \
267		PMD_SECT_XN | \
268		PMD_SECT_AP_WRITE | \
269		PMD_SECT_AP_READ
270	b	__v6_setup
271	.long	cpu_arch_name
272	.long	cpu_elf_name
273	.long	HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA
274	.long	cpu_v6_name
275	.long	v6_processor_functions
276	.long	v6wbi_tlb_fns
277	.long	v6_user_fns
278	.long	v6_cache_fns
279	.size	__v6_proc_info, . - __v6_proc_info
280