xref: /linux/arch/arm/mm/proc-mohawk.S (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1/*
2 *  linux/arch/arm/mm/proc-mohawk.S: MMU functions for Marvell PJ1 core
3 *
4 *  PJ1 (codename Mohawk) is a hybrid of the xscale3 and Marvell's own core.
5 *
6 *  Heavily based on proc-arm926.S and proc-xsc3.S
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21 */
22
23#include <linux/linkage.h>
24#include <linux/init.h>
25#include <asm/assembler.h>
26#include <asm/hwcap.h>
27#include <asm/pgtable-hwdef.h>
28#include <asm/pgtable.h>
29#include <asm/page.h>
30#include <asm/ptrace.h>
31#include "proc-macros.S"
32
33/*
34 * This is the maximum size of an area which will be flushed.  If the
35 * area is larger than this, then we flush the whole cache.
36 */
37#define CACHE_DLIMIT	32768
38
39/*
40 * The cache line size of the L1 D cache.
41 */
42#define CACHE_DLINESIZE	32
43
44/*
45 * cpu_mohawk_proc_init()
46 */
47ENTRY(cpu_mohawk_proc_init)
48	ret	lr
49
50/*
51 * cpu_mohawk_proc_fin()
52 */
53ENTRY(cpu_mohawk_proc_fin)
54	mrc	p15, 0, r0, c1, c0, 0		@ ctrl register
55	bic	r0, r0, #0x1800			@ ...iz...........
56	bic	r0, r0, #0x0006			@ .............ca.
57	mcr	p15, 0, r0, c1, c0, 0		@ disable caches
58	ret	lr
59
60/*
61 * cpu_mohawk_reset(loc)
62 *
63 * Perform a soft reset of the system.  Put the CPU into the
64 * same state as it would be if it had been reset, and branch
65 * to what would be the reset vector.
66 *
67 * loc: location to jump to for soft reset
68 *
69 * (same as arm926)
70 */
71	.align	5
72	.pushsection	.idmap.text, "ax"
73ENTRY(cpu_mohawk_reset)
74	mov	ip, #0
75	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches
76	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
77	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
78	mrc	p15, 0, ip, c1, c0, 0		@ ctrl register
79	bic	ip, ip, #0x0007			@ .............cam
80	bic	ip, ip, #0x1100			@ ...i...s........
81	mcr	p15, 0, ip, c1, c0, 0		@ ctrl register
82	ret	r0
83ENDPROC(cpu_mohawk_reset)
84	.popsection
85
86/*
87 * cpu_mohawk_do_idle()
88 *
89 * Called with IRQs disabled
90 */
91	.align	5
92ENTRY(cpu_mohawk_do_idle)
93	mov	r0, #0
94	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
95	mcr	p15, 0, r0, c7, c0, 4		@ wait for interrupt
96	ret	lr
97
98/*
99 *	flush_icache_all()
100 *
101 *	Unconditionally clean and invalidate the entire icache.
102 */
103ENTRY(mohawk_flush_icache_all)
104	mov	r0, #0
105	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
106	ret	lr
107ENDPROC(mohawk_flush_icache_all)
108
109/*
110 *	flush_user_cache_all()
111 *
112 *	Clean and invalidate all cache entries in a particular
113 *	address space.
114 */
115ENTRY(mohawk_flush_user_cache_all)
116	/* FALLTHROUGH */
117
118/*
119 *	flush_kern_cache_all()
120 *
121 *	Clean and invalidate the entire cache.
122 */
123ENTRY(mohawk_flush_kern_cache_all)
124	mov	r2, #VM_EXEC
125	mov	ip, #0
126__flush_whole_cache:
127	mcr	p15, 0, ip, c7, c14, 0		@ clean & invalidate all D cache
128	tst	r2, #VM_EXEC
129	mcrne	p15, 0, ip, c7, c5, 0		@ invalidate I cache
130	mcrne	p15, 0, ip, c7, c10, 0		@ drain write buffer
131	ret	lr
132
133/*
134 *	flush_user_cache_range(start, end, flags)
135 *
136 *	Clean and invalidate a range of cache entries in the
137 *	specified address range.
138 *
139 *	- start	- start address (inclusive)
140 *	- end	- end address (exclusive)
141 *	- flags	- vm_flags describing address space
142 *
143 * (same as arm926)
144 */
145ENTRY(mohawk_flush_user_cache_range)
146	mov	ip, #0
147	sub	r3, r1, r0			@ calculate total size
148	cmp	r3, #CACHE_DLIMIT
149	bgt	__flush_whole_cache
1501:	tst	r2, #VM_EXEC
151	mcr	p15, 0, r0, c7, c14, 1		@ clean and invalidate D entry
152	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
153	add	r0, r0, #CACHE_DLINESIZE
154	mcr	p15, 0, r0, c7, c14, 1		@ clean and invalidate D entry
155	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
156	add	r0, r0, #CACHE_DLINESIZE
157	cmp	r0, r1
158	blo	1b
159	tst	r2, #VM_EXEC
160	mcrne	p15, 0, ip, c7, c10, 4		@ drain WB
161	ret	lr
162
163/*
164 *	coherent_kern_range(start, end)
165 *
166 *	Ensure coherency between the Icache and the Dcache in the
167 *	region described by start, end.  If you have non-snooping
168 *	Harvard caches, you need to implement this function.
169 *
170 *	- start	- virtual start address
171 *	- end	- virtual end address
172 */
173ENTRY(mohawk_coherent_kern_range)
174	/* FALLTHROUGH */
175
176/*
177 *	coherent_user_range(start, end)
178 *
179 *	Ensure coherency between the Icache and the Dcache in the
180 *	region described by start, end.  If you have non-snooping
181 *	Harvard caches, you need to implement this function.
182 *
183 *	- start	- virtual start address
184 *	- end	- virtual end address
185 *
186 * (same as arm926)
187 */
188ENTRY(mohawk_coherent_user_range)
189	bic	r0, r0, #CACHE_DLINESIZE - 1
1901:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
191	mcr	p15, 0, r0, c7, c5, 1		@ invalidate I entry
192	add	r0, r0, #CACHE_DLINESIZE
193	cmp	r0, r1
194	blo	1b
195	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
196	mov	r0, #0
197	ret	lr
198
199/*
200 *	flush_kern_dcache_area(void *addr, size_t size)
201 *
202 *	Ensure no D cache aliasing occurs, either with itself or
203 *	the I cache
204 *
205 *	- addr	- kernel address
206 *	- size	- region size
207 */
208ENTRY(mohawk_flush_kern_dcache_area)
209	add	r1, r0, r1
2101:	mcr	p15, 0, r0, c7, c14, 1		@ clean+invalidate D entry
211	add	r0, r0, #CACHE_DLINESIZE
212	cmp	r0, r1
213	blo	1b
214	mov	r0, #0
215	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
216	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
217	ret	lr
218
219/*
220 *	dma_inv_range(start, end)
221 *
222 *	Invalidate (discard) the specified virtual address range.
223 *	May not write back any entries.  If 'start' or 'end'
224 *	are not cache line aligned, those lines must be written
225 *	back.
226 *
227 *	- start	- virtual start address
228 *	- end	- virtual end address
229 *
230 * (same as v4wb)
231 */
232mohawk_dma_inv_range:
233	tst	r0, #CACHE_DLINESIZE - 1
234	mcrne	p15, 0, r0, c7, c10, 1		@ clean D entry
235	tst	r1, #CACHE_DLINESIZE - 1
236	mcrne	p15, 0, r1, c7, c10, 1		@ clean D entry
237	bic	r0, r0, #CACHE_DLINESIZE - 1
2381:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
239	add	r0, r0, #CACHE_DLINESIZE
240	cmp	r0, r1
241	blo	1b
242	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
243	ret	lr
244
245/*
246 *	dma_clean_range(start, end)
247 *
248 *	Clean the specified virtual address range.
249 *
250 *	- start	- virtual start address
251 *	- end	- virtual end address
252 *
253 * (same as v4wb)
254 */
255mohawk_dma_clean_range:
256	bic	r0, r0, #CACHE_DLINESIZE - 1
2571:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
258	add	r0, r0, #CACHE_DLINESIZE
259	cmp	r0, r1
260	blo	1b
261	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
262	ret	lr
263
264/*
265 *	dma_flush_range(start, end)
266 *
267 *	Clean and invalidate the specified virtual address range.
268 *
269 *	- start	- virtual start address
270 *	- end	- virtual end address
271 */
272ENTRY(mohawk_dma_flush_range)
273	bic	r0, r0, #CACHE_DLINESIZE - 1
2741:
275	mcr	p15, 0, r0, c7, c14, 1		@ clean+invalidate D entry
276	add	r0, r0, #CACHE_DLINESIZE
277	cmp	r0, r1
278	blo	1b
279	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
280	ret	lr
281
282/*
283 *	dma_map_area(start, size, dir)
284 *	- start	- kernel virtual start address
285 *	- size	- size of region
286 *	- dir	- DMA direction
287 */
288ENTRY(mohawk_dma_map_area)
289	add	r1, r1, r0
290	cmp	r2, #DMA_TO_DEVICE
291	beq	mohawk_dma_clean_range
292	bcs	mohawk_dma_inv_range
293	b	mohawk_dma_flush_range
294ENDPROC(mohawk_dma_map_area)
295
296/*
297 *	dma_unmap_area(start, size, dir)
298 *	- start	- kernel virtual start address
299 *	- size	- size of region
300 *	- dir	- DMA direction
301 */
302ENTRY(mohawk_dma_unmap_area)
303	ret	lr
304ENDPROC(mohawk_dma_unmap_area)
305
306	.globl	mohawk_flush_kern_cache_louis
307	.equ	mohawk_flush_kern_cache_louis, mohawk_flush_kern_cache_all
308
309	@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
310	define_cache_functions mohawk
311
312ENTRY(cpu_mohawk_dcache_clean_area)
3131:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
314	add	r0, r0, #CACHE_DLINESIZE
315	subs	r1, r1, #CACHE_DLINESIZE
316	bhi	1b
317	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
318	ret	lr
319
320/*
321 * cpu_mohawk_switch_mm(pgd)
322 *
323 * Set the translation base pointer to be as described by pgd.
324 *
325 * pgd: new page tables
326 */
327	.align	5
328ENTRY(cpu_mohawk_switch_mm)
329	mov	ip, #0
330	mcr	p15, 0, ip, c7, c14, 0		@ clean & invalidate all D cache
331	mcr	p15, 0, ip, c7, c5, 0		@ invalidate I cache
332	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
333	orr	r0, r0, #0x18			@ cache the page table in L2
334	mcr	p15, 0, r0, c2, c0, 0		@ load page table pointer
335	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
336	ret	lr
337
338/*
339 * cpu_mohawk_set_pte_ext(ptep, pte, ext)
340 *
341 * Set a PTE and flush it out
342 */
343	.align	5
344ENTRY(cpu_mohawk_set_pte_ext)
345	armv3_set_pte_ext
346	mov	r0, r0
347	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
348	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
349	ret	lr
350
351.globl	cpu_mohawk_suspend_size
352.equ	cpu_mohawk_suspend_size, 4 * 6
353#ifdef CONFIG_ARM_CPU_SUSPEND
354ENTRY(cpu_mohawk_do_suspend)
355	stmfd	sp!, {r4 - r9, lr}
356	mrc	p14, 0, r4, c6, c0, 0	@ clock configuration, for turbo mode
357	mrc	p15, 0, r5, c15, c1, 0	@ CP access reg
358	mrc	p15, 0, r6, c13, c0, 0	@ PID
359	mrc 	p15, 0, r7, c3, c0, 0	@ domain ID
360	mrc	p15, 0, r8, c1, c0, 1	@ auxiliary control reg
361	mrc 	p15, 0, r9, c1, c0, 0	@ control reg
362	bic	r4, r4, #2		@ clear frequency change bit
363	stmia	r0, {r4 - r9}		@ store cp regs
364	ldmia	sp!, {r4 - r9, pc}
365ENDPROC(cpu_mohawk_do_suspend)
366
367ENTRY(cpu_mohawk_do_resume)
368	ldmia	r0, {r4 - r9}		@ load cp regs
369	mov	ip, #0
370	mcr	p15, 0, ip, c7, c7, 0	@ invalidate I & D caches, BTB
371	mcr	p15, 0, ip, c7, c10, 4	@ drain write (&fill) buffer
372	mcr	p15, 0, ip, c7, c5, 4	@ flush prefetch buffer
373	mcr	p15, 0, ip, c8, c7, 0	@ invalidate I & D TLBs
374	mcr	p14, 0, r4, c6, c0, 0	@ clock configuration, turbo mode.
375	mcr	p15, 0, r5, c15, c1, 0	@ CP access reg
376	mcr	p15, 0, r6, c13, c0, 0	@ PID
377	mcr	p15, 0, r7, c3, c0, 0	@ domain ID
378	orr	r1, r1, #0x18		@ cache the page table in L2
379	mcr	p15, 0, r1, c2, c0, 0	@ translation table base addr
380	mcr	p15, 0, r8, c1, c0, 1	@ auxiliary control reg
381	mov	r0, r9			@ control register
382	b	cpu_resume_mmu
383ENDPROC(cpu_mohawk_do_resume)
384#endif
385
386	.type	__mohawk_setup, #function
387__mohawk_setup:
388	mov	r0, #0
389	mcr	p15, 0, r0, c7, c7		@ invalidate I,D caches
390	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
391	mcr	p15, 0, r0, c8, c7		@ invalidate I,D TLBs
392	orr	r4, r4, #0x18			@ cache the page table in L2
393	mcr	p15, 0, r4, c2, c0, 0		@ load page table pointer
394
395	mov	r0, #0				@ don't allow CP access
396	mcr	p15, 0, r0, c15, c1, 0		@ write CP access register
397
398	adr	r5, mohawk_crval
399	ldmia	r5, {r5, r6}
400	mrc	p15, 0, r0, c1, c0		@ get control register
401	bic	r0, r0, r5
402	orr	r0, r0, r6
403	ret	lr
404
405	.size	__mohawk_setup, . - __mohawk_setup
406
407	/*
408	 *  R
409	 * .RVI ZFRS BLDP WCAM
410	 * .011 1001 ..00 0101
411	 *
412	 */
413	.type	mohawk_crval, #object
414mohawk_crval:
415	crval	clear=0x00007f3f, mmuset=0x00003905, ucset=0x00001134
416
417	__INITDATA
418
419	@ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
420	define_processor_functions mohawk, dabort=v5t_early_abort, pabort=legacy_pabort
421
422	.section ".rodata"
423
424	string	cpu_arch_name, "armv5te"
425	string	cpu_elf_name, "v5"
426	string	cpu_mohawk_name, "Marvell 88SV331x"
427
428	.align
429
430	.section ".proc.info.init", #alloc
431
432	.type	__88sv331x_proc_info,#object
433__88sv331x_proc_info:
434	.long	0x56158000			@ Marvell 88SV331x (MOHAWK)
435	.long	0xfffff000
436	.long   PMD_TYPE_SECT | \
437		PMD_SECT_BUFFERABLE | \
438		PMD_SECT_CACHEABLE | \
439		PMD_BIT4 | \
440		PMD_SECT_AP_WRITE | \
441		PMD_SECT_AP_READ
442	.long   PMD_TYPE_SECT | \
443		PMD_BIT4 | \
444		PMD_SECT_AP_WRITE | \
445		PMD_SECT_AP_READ
446	initfn	__mohawk_setup, __88sv331x_proc_info
447	.long	cpu_arch_name
448	.long	cpu_elf_name
449	.long	HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
450	.long	cpu_mohawk_name
451	.long	mohawk_processor_functions
452	.long	v4wbi_tlb_fns
453	.long	v4wb_user_fns
454	.long	mohawk_cache_fns
455	.size	__88sv331x_proc_info, . - __88sv331x_proc_info
456