xref: /freebsd/sys/arm/arm/locore.S (revision 145992504973bd16cf3518af9ba5ce185fefa82a)
1/*	$NetBSD: locore.S,v 1.14 2003/04/20 16:21:40 thorpej Exp $	*/
2
3/*-
4 * Copyright 2011 Semihalf
5 * Copyright (C) 1994-1997 Mark Brinicombe
6 * Copyright (C) 1994 Brini
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by Brini.
20 * 4. The name of Brini may not be used to endorse or promote products
21 *    derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36#include "assym.s"
37#include <sys/syscall.h>
38#include <machine/asm.h>
39#include <machine/armreg.h>
40#include <machine/pte.h>
41
42__FBSDID("$FreeBSD$");
43
44/* What size should this really be ? It is only used by initarm() */
45#define INIT_ARM_STACK_SIZE	(2048 * 4)
46
47#define	CPWAIT_BRANCH							 \
48	sub	pc, pc, #4
49
50#define	CPWAIT(tmp)							 \
51	mrc	p15, 0, tmp, c2, c0, 0	/* arbitrary read of CP15 */	;\
52	mov	tmp, tmp		/* wait for it to complete */	;\
53	CPWAIT_BRANCH			/* branch to next insn */
54
55/*
56 * This is for kvm_mkdb, and should be the address of the beginning
57 * of the kernel text segment (not necessarily the same as kernbase).
58 */
59	.text
60	.align	0
61.globl kernbase
62.set kernbase,KERNBASE
63.globl physaddr
64.set physaddr,PHYSADDR
65
66/*
67 * On entry for FreeBSD boot ABI:
68 *	r0 - metadata pointer or 0 (boothowto on AT91's boot2)
69 *	r1 - if (r0 == 0) then metadata pointer
70 * On entry for Linux boot ABI:
71 *	r0 - 0
72 *	r1 - machine type (passed as arg2 to initarm)
73 *	r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm)
74 *
75 * For both types of boot we gather up the args, put them in a struct arm_boot_params
76 * structure and pass that to initarm.
77 */
78ENTRY_NP(btext)
79ASENTRY_NP(_start)
80	mov	r9, r0		/* 0 or boot mode from boot2 */
81	mov	r8, r1		/* Save Machine type */
82	mov	ip, r2		/* Save meta data */
83	mov	fp, r3		/* Future expantion */
84
85	/* Make sure interrupts are disabled. */
86	mrs	r7, cpsr
87	orr	r7, r7, #(I32_bit|F32_bit)
88	msr	cpsr_c, r7
89
90#if defined (FLASHADDR) && defined(LOADERRAMADDR)
91	/* Check if we're running from flash. */
92	ldr	r7, =FLASHADDR
93	/*
94	 * If we're running with MMU disabled, test against the
95	 * physical address instead.
96	 */
97	mrc     p15, 0, r2, c1, c0, 0
98	ands	r2, r2, #CPU_CONTROL_MMU_ENABLE
99	ldreq	r6, =PHYSADDR
100	ldrne	r6, =LOADERRAMADDR
101	cmp	r7, r6
102	bls 	flash_lower
103	cmp	r7, pc
104	bhi	from_ram
105	b	do_copy
106
107flash_lower:
108	cmp	r6, pc
109	bls	from_ram
110do_copy:
111	ldr	r7, =KERNBASE
112	adr	r1, _start
113	ldr	r0, Lreal_start
114	ldr	r2, Lend
115	sub	r2, r2, r0
116	sub	r0, r0, r7
117	add	r0, r0, r6
118	mov	r4, r0
119	bl	memcpy
120	ldr	r0, Lram_offset
121	add	pc, r4, r0
122Lram_offset:	.word from_ram-_C_LABEL(_start)
123from_ram:
124	nop
125#endif
126	adr	r7, Lunmapped
127	bic     r7, r7, #0xf0000000
128	orr     r7, r7, #PHYSADDR
129
130
131disable_mmu:
132	/* Disable MMU for a while */
133	mrc     p15, 0, r2, c1, c0, 0
134	bic	r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
135	    CPU_CONTROL_WBUF_ENABLE)
136	bic	r2, r2, #(CPU_CONTROL_IC_ENABLE)
137	bic	r2, r2, #(CPU_CONTROL_BPRD_ENABLE)
138	mcr     p15, 0, r2, c1, c0, 0
139
140	nop
141	nop
142	nop
143	mov	pc, r7
144Lunmapped:
145#ifdef STARTUP_PAGETABLE_ADDR
146	/* build page table from scratch */
147	ldr	r0, Lstartup_pagetable
148	adr	r4, mmu_init_table
149	b	3f
150
1512:
152	str	r3, [r0, r2]
153	add	r2, r2, #4
154	add	r3, r3, #(L1_S_SIZE)
155	adds	r1, r1, #-1
156	bhi	2b
1573:
158	ldmia	r4!, {r1,r2,r3}   /* # of sections, VA, PA|attr */
159	cmp	r1, #0
160	adrne	r5, 2b
161	bicne	r5, r5, #0xf0000000
162	orrne	r5, r5, #PHYSADDR
163	movne	pc, r5
164
165#if defined(SMP)
166	orr 	r0, r0, #2		/* Set TTB shared memory flag */
167#endif
168	mcr	p15, 0, r0, c2, c0, 0	/* Set TTB */
169	mcr	p15, 0, r0, c8, c7, 0	/* Flush TLB */
170
171#if defined(CPU_ARM11) || defined(CPU_CORTEXA) || defined(CPU_MV_PJ4B)
172	mov	r0, #0
173	mcr	p15, 0, r0, c13, c0, 1	/* Set ASID to 0 */
174#endif
175
176	/* Set the Domain Access register.  Very important! */
177	mov     r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
178	mcr	p15, 0, r0, c3, c0, 0
179	/* Enable MMU */
180	mrc	p15, 0, r0, c1, c0, 0
181#if defined(CPU_ARM11) || defined(CPU_CORTEXA) || defined(CPU_MV_PJ4B)
182	orr	r0, r0, #CPU_CONTROL_V6_EXTPAGE
183#endif
184	orr	r0, r0, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE)
185	mcr	p15, 0, r0, c1, c0, 0
186	nop
187	nop
188	nop
189	CPWAIT(r0)
190
191#endif
192mmu_done:
193	nop
194	adr	r1, .Lstart
195	ldmia	r1, {r1, r2, sp}	/* Set initial stack and */
196	sub	r2, r2, r1		/* get zero init data */
197	mov	r3, #0
198.L1:
199	str	r3, [r1], #0x0004	/* get zero init data */
200	subs	r2, r2, #4
201	bgt	.L1
202	ldr	pc, .Lvirt_done
203
204virt_done:
205	mov	r1, #20			/* loader info size is 20 bytes also second arg */
206	subs	sp, sp, r1		/* allocate arm_boot_params struct on stack */
207	mov	r0, sp			/* loader info pointer is first arg */
208	str	r1, [r0]		/* Store length of loader info */
209	str	r9, [r0, #4]		/* Store r0 from boot loader */
210	str	r8, [r0, #8]		/* Store r1 from boot loader */
211	str	ip, [r0, #12]		/* store r2 from boot loader */
212	str	fp, [r0, #16]		/* store r3 from boot loader */
213	mov	fp, #0			/* trace back starts here */
214	bl	_C_LABEL(initarm)	/* Off we go */
215
216	/* init arm will return the new stack pointer. */
217	mov	sp, r0
218
219	bl	_C_LABEL(mi_startup)		/* call mi_startup()! */
220
221	adr	r0, .Lmainreturned
222	b	_C_LABEL(panic)
223	/* NOTREACHED */
224#ifdef STARTUP_PAGETABLE_ADDR
225#define MMU_INIT(va,pa,n_sec,attr) \
226	.word	n_sec					    ; \
227	.word	4*((va)>>L1_S_SHIFT)			    ; \
228	.word	(pa)|(attr)				    ;
229
230Lvirtaddr:
231	.word	KERNVIRTADDR
232Lphysaddr:
233	.word	KERNPHYSADDR
234Lreal_start:
235	.word	_start
236Lend:
237	.word	_edata
238Lstartup_pagetable:
239	.word	STARTUP_PAGETABLE_ADDR
240#ifdef SMP
241Lstartup_pagetable_secondary:
242	.word	temp_pagetable
243#endif
244mmu_init_table:
245	/* fill all table VA==PA */
246	/* map SDRAM VA==PA, WT cacheable */
247#if !defined(SMP)
248	MMU_INIT(PHYSADDR, PHYSADDR , 64, L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
249	/* map VA 0xc0000000..0xc3ffffff to PA */
250	MMU_INIT(KERNBASE, PHYSADDR, 64, L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
251#else
252	MMU_INIT(PHYSADDR, PHYSADDR , 64, L1_TYPE_S|L1_SHARED|L1_S_C|L1_S_AP(AP_KRW))
253	/* map VA 0xc0000000..0xc3ffffff to PA */
254	MMU_INIT(KERNBASE, PHYSADDR, 64, L1_TYPE_S|L1_SHARED|L1_S_C|L1_S_AP(AP_KRW))
255	MMU_INIT(0x48000000, 0x48000000, 1, L1_TYPE_S|L1_SHARED|L1_S_C|L1_S_AP(AP_KRW))
256#endif
257	.word 0	/* end of table */
258#endif
259.Lstart:
260	.word	_edata
261	.word	_end
262	.word	svcstk + INIT_ARM_STACK_SIZE
263
264.Lvirt_done:
265	.word	virt_done
266#if defined(SMP)
267.Lmpvirt_done:
268	.word	mpvirt_done
269#endif
270
271.Lmainreturned:
272	.asciz	"main() returned"
273	.align	0
274
275	.bss
276svcstk:
277	.space	INIT_ARM_STACK_SIZE
278
279	.text
280	.align	0
281
282.Lcpufuncs:
283	.word	_C_LABEL(cpufuncs)
284
285#if defined(SMP)
286Lsramaddr:
287	.word	0xffff0080
288
289#if 0
290#define	AP_DEBUG(tmp)			\
291	mrc	p15, 0, r1, c0, c0, 5;	\
292	ldr	r0, Lsramaddr;		\
293	add	r0, r1, lsl #2;		\
294	mov	r1, tmp;		\
295	str	r1, [r0], #0x0000;
296#else
297#define AP_DEBUG(tmp)
298#endif
299
300
301ASENTRY_NP(mptramp)
302	mov	r0, #0
303	mcr	p15, 0, r0, c7, c7, 0
304
305	AP_DEBUG(#1)
306
307	mrs	r3, cpsr_all
308	bic	r3, r3, #(PSR_MODE)
309	orr	r3, r3, #(PSR_SVC32_MODE)
310        msr	cpsr_all, r3
311
312	mrc	p15, 0, r0, c0, c0, 5
313	and	r0, #0x0f		/* Get CPU ID */
314
315	/* Read boot address for CPU */
316	mov	r1, #0x100
317	mul	r2, r0, r1
318	ldr	r1, Lpmureg
319	add	r0, r2, r1
320	ldr	r1, [r0], #0x00
321
322	mov pc, r1
323
324Lpmureg:
325        .word   0xd0022124
326
327ASENTRY_NP(mpentry)
328
329	AP_DEBUG(#2)
330
331	/* Make sure interrupts are disabled. */
332	mrs	r7, cpsr
333	orr	r7, r7, #(I32_bit|F32_bit)
334	msr	cpsr_c, r7
335
336
337	adr     r7, Ltag
338	bic     r7, r7, #0xf0000000
339	orr     r7, r7, #PHYSADDR
340
341	/* Disable MMU for a while */
342	mrc	p15, 0, r2, c1, c0, 0
343	bic	r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
344	    CPU_CONTROL_WBUF_ENABLE)
345	bic	r2, r2, #(CPU_CONTROL_IC_ENABLE)
346	bic	r2, r2, #(CPU_CONTROL_BPRD_ENABLE)
347	mcr	p15, 0, r2, c1, c0, 0
348
349	nop
350	nop
351	nop
352
353	AP_DEBUG(#3)
354
355Ltag:
356	ldr	r0, Lstartup_pagetable_secondary
357	bic	r0, r0, #0xf0000000
358	orr	r0, r0, #PHYSADDR
359	ldr	r0, [r0]
360#if defined(SMP)
361	orr 	r0, r0, #0		/* Set TTB shared memory flag */
362#endif
363	mcr	p15, 0, r0, c2, c0, 0	/* Set TTB */
364	mcr	p15, 0, r0, c8, c7, 0	/* Flush TLB */
365
366#if defined(CPU_ARM11) || defined(CPU_MV_PJ4B) || defined(CPU_CORTEXA)
367	mov	r0, #0
368	mcr	p15, 0, r0, c13, c0, 1	/* Set ASID to 0 */
369#endif
370
371	AP_DEBUG(#4)
372
373	/* Set the Domain Access register.  Very important! */
374	mov	r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
375	mcr	p15, 0, r0, c3, c0, 0
376	/* Enable MMU */
377	mrc	p15, 0, r0, c1, c0, 0
378#if defined(CPU_ARM11) || defined(CPU_MV_PJ4B) || defined(CPU_CORTEXA)
379	orr	r0, r0, #CPU_CONTROL_V6_EXTPAGE
380#endif
381	orr	r0, r0, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE)
382	mcr	p15, 0, r0, c1, c0, 0
383	nop
384	nop
385	nop
386	CPWAIT(r0)
387
388	adr	r1, .Lstart
389	ldmia	r1, {r1, r2, sp}	/* Set initial stack and */
390	mrc	p15, 0, r0, c0, c0, 5
391	and	r0, r0, #15
392	mov	r1, #2048
393	mul	r2, r1, r0
394	sub	sp, sp, r2
395	str	r1, [sp]
396	ldr	pc, .Lmpvirt_done
397
398mpvirt_done:
399
400	mov	fp, #0			/* trace back starts here */
401	bl	_C_LABEL(init_secondary)	/* Off we go */
402
403	adr	r0, .Lmpreturned
404	b	_C_LABEL(panic)
405	/* NOTREACHED */
406
407.Lmpreturned:
408	.asciz	"main() returned"
409	.align	0
410#endif
411
412ENTRY_NP(cpu_halt)
413	mrs     r2, cpsr
414	bic	r2, r2, #(PSR_MODE)
415	orr     r2, r2, #(PSR_SVC32_MODE)
416	orr	r2, r2, #(I32_bit | F32_bit)
417	msr     cpsr_all, r2
418
419	ldr	r4, .Lcpu_reset_address
420	ldr	r4, [r4]
421
422	ldr	r0, .Lcpufuncs
423	mov	lr, pc
424	ldr	pc, [r0, #CF_IDCACHE_WBINV_ALL]
425	mov	lr, pc
426	ldr	pc, [r0, #CF_L2CACHE_WBINV_ALL]
427
428	/*
429	 * Load the cpu_reset_needs_v4_MMU_disable flag to determine if it's
430	 * necessary.
431	 */
432
433	ldr	r1, .Lcpu_reset_needs_v4_MMU_disable
434	ldr	r1, [r1]
435	cmp	r1, #0
436	mov	r2, #0
437
438	/*
439	 * MMU & IDC off, 32 bit program & data space
440	 * Hurl ourselves into the ROM
441	 */
442	mov	r0, #(CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE)
443	mcr     15, 0, r0, c1, c0, 0
444	mcrne   15, 0, r2, c8, c7, 0 	/* nail I+D TLB on ARMv4 and greater */
445	mov     pc, r4
446
447	/*
448	 * _cpu_reset_address contains the address to branch to, to complete
449	 * the cpu reset after turning the MMU off
450	 * This variable is provided by the hardware specific code
451	 */
452.Lcpu_reset_address:
453	.word	_C_LABEL(cpu_reset_address)
454
455	/*
456	 * cpu_reset_needs_v4_MMU_disable contains a flag that signals if the
457	 * v4 MMU disable instruction needs executing... it is an illegal instruction
458	 * on f.e. ARM6/7 that locks up the computer in an endless illegal
459	 * instruction / data-abort / reset loop.
460	 */
461.Lcpu_reset_needs_v4_MMU_disable:
462	.word	_C_LABEL(cpu_reset_needs_v4_MMU_disable)
463
464
465/*
466 * setjump + longjmp
467 */
468ENTRY(setjmp)
469	stmia	r0, {r4-r14}
470	mov	r0, #0x00000000
471	RET
472
473ENTRY(longjmp)
474	ldmia	r0, {r4-r14}
475	mov	r0, #0x00000001
476	RET
477
478	.data
479	.global _C_LABEL(esym)
480_C_LABEL(esym):	.word	_C_LABEL(end)
481
482ENTRY_NP(abort)
483	b	_C_LABEL(abort)
484
485ENTRY_NP(sigcode)
486	mov	r0, sp
487	swi	SYS_sigreturn
488
489	/* Well if that failed we better exit quick ! */
490
491	swi	SYS_exit
492	b	. - 8
493
494	.align	0
495	.global _C_LABEL(esigcode)
496		_C_LABEL(esigcode):
497
498	.data
499	.global szsigcode
500szsigcode:
501	.long esigcode-sigcode
502/* End of locore.S */
503