xref: /freebsd/sys/arm/arm/locore.S (revision f1f890804985a1043da42a5def13c79dc005f5e9)
1/*	$NetBSD: locore.S,v 1.14 2003/04/20 16:21:40 thorpej Exp $	*/
2
3/*-
4 * Copyright 2011 Semihalf
5 * Copyright (C) 1994-1997 Mark Brinicombe
6 * Copyright (C) 1994 Brini
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by Brini.
20 * 4. The name of Brini may not be used to endorse or promote products
21 *    derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36#include "assym.s"
37#include <sys/syscall.h>
38#include <machine/asm.h>
39#include <machine/armreg.h>
40#include <machine/pte.h>
41
42__FBSDID("$FreeBSD$");
43
44/* What size should this really be ? It is only used by initarm() */
45#define INIT_ARM_STACK_SIZE	(2048 * 4)
46
47#define	CPWAIT_BRANCH							 \
48	sub	pc, pc, #4
49
50#define	CPWAIT(tmp)							 \
51	mrc	p15, 0, tmp, c2, c0, 0	/* arbitrary read of CP15 */	;\
52	mov	tmp, tmp		/* wait for it to complete */	;\
53	CPWAIT_BRANCH			/* branch to next insn */
54
55/*
56 * This is for kvm_mkdb, and should be the address of the beginning
57 * of the kernel text segment (not necessarily the same as kernbase).
58 */
59	.text
60	.align	0
61.globl kernbase
62.set kernbase,KERNBASE
63.globl physaddr
64.set physaddr,PHYSADDR
65
66/*
67 * On entry for FreeBSD boot ABI:
68 *	r0 - metadata pointer or 0 (boothowto on AT91's boot2)
69 *	r1 - if (r0 == 0) then metadata pointer
70 * On entry for Linux boot ABI:
71 *	r0 - 0
72 *	r1 - machine type (passed as arg2 to initarm)
73 *	r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm)
74 *
75 * For both types of boot we gather up the args, put them in a struct arm_boot_params
76 * structure and pass that to initarm.
77 */
78ENTRY_NP(btext)
79ASENTRY_NP(_start)
80	mov	r9, r0		/* 0 or boot mode from boot2 */
81	mov	r8, r1		/* Save Machine type */
82	mov	ip, r2		/* Save meta data */
83	mov	fp, r3		/* Future expantion */
84
85	/* Make sure interrupts are disabled. */
86	mrs	r7, cpsr
87	orr	r7, r7, #(I32_bit|F32_bit)
88	msr	cpsr_c, r7
89
90#if defined (FLASHADDR) && defined(LOADERRAMADDR)
91	/* Check if we're running from flash. */
92	ldr	r7, =FLASHADDR
93	/*
94	 * If we're running with MMU disabled, test against the
95	 * physical address instead.
96	 */
97	mrc     p15, 0, r2, c1, c0, 0
98	ands	r2, r2, #CPU_CONTROL_MMU_ENABLE
99	ldreq	r6, =PHYSADDR
100	ldrne	r6, =LOADERRAMADDR
101	cmp	r7, r6
102	bls 	flash_lower
103	cmp	r7, pc
104	bhi	from_ram
105	b	do_copy
106
107flash_lower:
108	cmp	r6, pc
109	bls	from_ram
110do_copy:
111	ldr	r7, =KERNBASE
112	adr	r1, _start
113	ldr	r0, Lreal_start
114	ldr	r2, Lend
115	sub	r2, r2, r0
116	sub	r0, r0, r7
117	add	r0, r0, r6
118	mov	r4, r0
119	bl	memcpy
120	ldr	r0, Lram_offset
121	add	pc, r4, r0
122Lram_offset:	.word from_ram-_C_LABEL(_start)
123from_ram:
124	nop
125#endif
126	adr	r7, Lunmapped
127	bic     r7, r7, #0xf0000000
128	orr     r7, r7, #PHYSADDR
129
130
131disable_mmu:
132	/* Disable MMU for a while */
133	mrc     p15, 0, r2, c1, c0, 0
134	bic	r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
135	    CPU_CONTROL_WBUF_ENABLE)
136	bic	r2, r2, #(CPU_CONTROL_IC_ENABLE)
137	bic	r2, r2, #(CPU_CONTROL_BPRD_ENABLE)
138	mcr     p15, 0, r2, c1, c0, 0
139
140	nop
141	nop
142	nop
143	mov	pc, r7
144Lunmapped:
145#ifdef STARTUP_PAGETABLE_ADDR
146	/* build page table from scratch */
147	ldr	r0, Lstartup_pagetable
148	adr	r4, mmu_init_table
149	b	3f
150
1512:
152	str	r3, [r0, r2]
153	add	r2, r2, #4
154	add	r3, r3, #(L1_S_SIZE)
155	adds	r1, r1, #-1
156	bhi	2b
1573:
158	ldmia	r4!, {r1,r2,r3}   /* # of sections, VA, PA|attr */
159	cmp	r1, #0
160	adrne	r5, 2b
161	bicne	r5, r5, #0xf0000000
162	orrne	r5, r5, #PHYSADDR
163	movne	pc, r5
164
165#if defined(SMP)
166	orr 	r0, r0, #2		/* Set TTB shared memory flag */
167#endif
168	mcr	p15, 0, r0, c2, c0, 0	/* Set TTB */
169	mcr	p15, 0, r0, c8, c7, 0	/* Flush TLB */
170
171#if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_CORTEXA) || defined(CPU_MV_PJ4B)
172	mov	r0, #0
173	mcr	p15, 0, r0, c13, c0, 1	/* Set ASID to 0 */
174#endif
175
176	/* Set the Domain Access register.  Very important! */
177	mov     r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
178	mcr	p15, 0, r0, c3, c0, 0
179	/* Enable MMU */
180	mrc	p15, 0, r0, c1, c0, 0
181#if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_CORTEXA) || defined(CPU_MV_PJ4B)
182	orr	r0, r0, #CPU_CONTROL_V6_EXTPAGE
183#endif
184	orr	r0, r0, #(CPU_CONTROL_MMU_ENABLE)
185	mcr	p15, 0, r0, c1, c0, 0
186	nop
187	nop
188	nop
189	CPWAIT(r0)
190
191#endif
192mmu_done:
193	nop
194	adr	r1, .Lstart
195	ldmia	r1, {r1, r2, sp}	/* Set initial stack and */
196	sub	r2, r2, r1		/* get zero init data */
197	mov	r3, #0
198.L1:
199	str	r3, [r1], #0x0004	/* get zero init data */
200	subs	r2, r2, #4
201	bgt	.L1
202	ldr	pc, .Lvirt_done
203
204virt_done:
205	mov	r1, #20			/* loader info size is 20 bytes also second arg */
206	subs	sp, sp, r1		/* allocate arm_boot_params struct on stack */
207	bic	sp, sp, #7		/* align stack to 8 bytes */
208	mov	r0, sp			/* loader info pointer is first arg */
209	str	r1, [r0]		/* Store length of loader info */
210	str	r9, [r0, #4]		/* Store r0 from boot loader */
211	str	r8, [r0, #8]		/* Store r1 from boot loader */
212	str	ip, [r0, #12]		/* store r2 from boot loader */
213	str	fp, [r0, #16]		/* store r3 from boot loader */
214	mov	fp, #0			/* trace back starts here */
215	bl	_C_LABEL(initarm)	/* Off we go */
216
217	/* init arm will return the new stack pointer. */
218	mov	sp, r0
219
220	bl	_C_LABEL(mi_startup)		/* call mi_startup()! */
221
222	adr	r0, .Lmainreturned
223	b	_C_LABEL(panic)
224	/* NOTREACHED */
225#ifdef STARTUP_PAGETABLE_ADDR
226#define MMU_INIT(va,pa,n_sec,attr) \
227	.word	n_sec					    ; \
228	.word	4*((va)>>L1_S_SHIFT)			    ; \
229	.word	(pa)|(attr)				    ;
230
231Lvirtaddr:
232	.word	KERNVIRTADDR
233Lphysaddr:
234	.word	KERNPHYSADDR
235Lreal_start:
236	.word	_start
237Lend:
238	.word	_edata
239Lstartup_pagetable:
240	.word	STARTUP_PAGETABLE_ADDR
241#ifdef SMP
242Lstartup_pagetable_secondary:
243	.word	temp_pagetable
244#endif
245mmu_init_table:
246	/* fill all table VA==PA */
247	/* map SDRAM VA==PA, WT cacheable */
248#if !defined(SMP)
249	MMU_INIT(PHYSADDR, PHYSADDR , 64, L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
250	/* map VA 0xc0000000..0xc3ffffff to PA */
251	MMU_INIT(KERNBASE, PHYSADDR, 64, L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
252#else
253	MMU_INIT(PHYSADDR, PHYSADDR , 64, L1_TYPE_S|L1_SHARED|L1_S_C|L1_S_AP(AP_KRW))
254	/* map VA 0xc0000000..0xc3ffffff to PA */
255	MMU_INIT(KERNBASE, PHYSADDR, 64, L1_TYPE_S|L1_SHARED|L1_S_C|L1_S_AP(AP_KRW))
256	MMU_INIT(0x48000000, 0x48000000, 1, L1_TYPE_S|L1_SHARED|L1_S_C|L1_S_AP(AP_KRW))
257#endif
258	.word 0	/* end of table */
259#endif
260.Lstart:
261	.word	_edata
262	.word	_end
263	.word	svcstk + INIT_ARM_STACK_SIZE
264
265.Lvirt_done:
266	.word	virt_done
267#if defined(SMP)
268.Lmpvirt_done:
269	.word	mpvirt_done
270#endif
271
272.Lmainreturned:
273	.asciz	"main() returned"
274	.align	0
275
276	.bss
277svcstk:
278	.space	INIT_ARM_STACK_SIZE
279
280	.text
281	.align	0
282
283.Lcpufuncs:
284	.word	_C_LABEL(cpufuncs)
285
286#if defined(SMP)
287Lsramaddr:
288	.word	0xffff0080
289
290#if 0
291#define	AP_DEBUG(tmp)			\
292	mrc	p15, 0, r1, c0, c0, 5;	\
293	ldr	r0, Lsramaddr;		\
294	add	r0, r1, lsl #2;		\
295	mov	r1, tmp;		\
296	str	r1, [r0], #0x0000;
297#else
298#define AP_DEBUG(tmp)
299#endif
300
301
302ASENTRY_NP(mptramp)
303	mov	r0, #0
304	mcr	p15, 0, r0, c7, c7, 0
305
306	AP_DEBUG(#1)
307
308	mrs	r3, cpsr_all
309	bic	r3, r3, #(PSR_MODE)
310	orr	r3, r3, #(PSR_SVC32_MODE)
311        msr	cpsr_all, r3
312
313	mrc	p15, 0, r0, c0, c0, 5
314	and	r0, #0x0f		/* Get CPU ID */
315
316	/* Read boot address for CPU */
317	mov	r1, #0x100
318	mul	r2, r0, r1
319	ldr	r1, Lpmureg
320	add	r0, r2, r1
321	ldr	r1, [r0], #0x00
322
323	mov pc, r1
324
325Lpmureg:
326        .word   0xd0022124
327
328ASENTRY_NP(mpentry)
329
330	AP_DEBUG(#2)
331
332	/* Make sure interrupts are disabled. */
333	mrs	r7, cpsr
334	orr	r7, r7, #(I32_bit|F32_bit)
335	msr	cpsr_c, r7
336
337
338	adr     r7, Ltag
339	bic     r7, r7, #0xf0000000
340	orr     r7, r7, #PHYSADDR
341
342	/* Disable MMU for a while */
343	mrc	p15, 0, r2, c1, c0, 0
344	bic	r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
345	    CPU_CONTROL_WBUF_ENABLE)
346	bic	r2, r2, #(CPU_CONTROL_IC_ENABLE)
347	bic	r2, r2, #(CPU_CONTROL_BPRD_ENABLE)
348	mcr	p15, 0, r2, c1, c0, 0
349
350	nop
351	nop
352	nop
353
354	AP_DEBUG(#3)
355
356Ltag:
357	ldr	r0, Lstartup_pagetable_secondary
358	bic	r0, r0, #0xf0000000
359	orr	r0, r0, #PHYSADDR
360	ldr	r0, [r0]
361#if defined(SMP)
362	orr 	r0, r0, #0		/* Set TTB shared memory flag */
363#endif
364	mcr	p15, 0, r0, c2, c0, 0	/* Set TTB */
365	mcr	p15, 0, r0, c8, c7, 0	/* Flush TLB */
366
367#if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_MV_PJ4B) || defined(CPU_CORTEXA)
368	mov	r0, #0
369	mcr	p15, 0, r0, c13, c0, 1	/* Set ASID to 0 */
370#endif
371
372	AP_DEBUG(#4)
373
374	/* Set the Domain Access register.  Very important! */
375	mov	r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
376	mcr	p15, 0, r0, c3, c0, 0
377	/* Enable MMU */
378	mrc	p15, 0, r0, c1, c0, 0
379#if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_MV_PJ4B) || defined(CPU_CORTEXA)
380	orr	r0, r0, #CPU_CONTROL_V6_EXTPAGE
381#endif
382	orr	r0, r0, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE)
383	mcr	p15, 0, r0, c1, c0, 0
384	nop
385	nop
386	nop
387	CPWAIT(r0)
388
389	adr	r1, .Lstart
390	ldmia	r1, {r1, r2, sp}	/* Set initial stack and */
391	mrc	p15, 0, r0, c0, c0, 5
392	and	r0, r0, #15
393	mov	r1, #2048
394	mul	r2, r1, r0
395	sub	sp, sp, r2
396	str	r1, [sp]
397	ldr	pc, .Lmpvirt_done
398
399mpvirt_done:
400
401	mov	fp, #0			/* trace back starts here */
402	bl	_C_LABEL(init_secondary)	/* Off we go */
403
404	adr	r0, .Lmpreturned
405	b	_C_LABEL(panic)
406	/* NOTREACHED */
407
408.Lmpreturned:
409	.asciz	"main() returned"
410	.align	0
411#endif
412
413ENTRY_NP(cpu_halt)
414	mrs     r2, cpsr
415	bic	r2, r2, #(PSR_MODE)
416	orr     r2, r2, #(PSR_SVC32_MODE)
417	orr	r2, r2, #(I32_bit | F32_bit)
418	msr     cpsr_all, r2
419
420	ldr	r4, .Lcpu_reset_address
421	ldr	r4, [r4]
422
423	ldr	r0, .Lcpufuncs
424	mov	lr, pc
425	ldr	pc, [r0, #CF_IDCACHE_WBINV_ALL]
426	mov	lr, pc
427	ldr	pc, [r0, #CF_L2CACHE_WBINV_ALL]
428
429	/*
430	 * Load the cpu_reset_needs_v4_MMU_disable flag to determine if it's
431	 * necessary.
432	 */
433
434	ldr	r1, .Lcpu_reset_needs_v4_MMU_disable
435	ldr	r1, [r1]
436	cmp	r1, #0
437	mov	r2, #0
438
439	/*
440	 * MMU & IDC off, 32 bit program & data space
441	 * Hurl ourselves into the ROM
442	 */
443	mov	r0, #(CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE)
444	mcr     15, 0, r0, c1, c0, 0
445	mcrne   15, 0, r2, c8, c7, 0 	/* nail I+D TLB on ARMv4 and greater */
446	mov     pc, r4
447
448	/*
449	 * _cpu_reset_address contains the address to branch to, to complete
450	 * the cpu reset after turning the MMU off
451	 * This variable is provided by the hardware specific code
452	 */
453.Lcpu_reset_address:
454	.word	_C_LABEL(cpu_reset_address)
455
456	/*
457	 * cpu_reset_needs_v4_MMU_disable contains a flag that signals if the
458	 * v4 MMU disable instruction needs executing... it is an illegal instruction
459	 * on f.e. ARM6/7 that locks up the computer in an endless illegal
460	 * instruction / data-abort / reset loop.
461	 */
462.Lcpu_reset_needs_v4_MMU_disable:
463	.word	_C_LABEL(cpu_reset_needs_v4_MMU_disable)
464
465
466/*
467 * setjump + longjmp
468 */
469ENTRY(setjmp)
470	stmia	r0, {r4-r14}
471	mov	r0, #0x00000000
472	RET
473
474ENTRY(longjmp)
475	ldmia	r0, {r4-r14}
476	mov	r0, #0x00000001
477	RET
478
479	.data
480	.global _C_LABEL(esym)
481_C_LABEL(esym):	.word	_C_LABEL(end)
482
483ENTRY_NP(abort)
484	b	_C_LABEL(abort)
485
486ENTRY_NP(sigcode)
487	mov	r0, sp
488
489	/*
490	 * Call the sigreturn system call.
491	 *
492	 * We have to load r7 manually rather than using
493	 * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is
494	 * correct. Using the alternative places esigcode at the address
495	 * of the data rather than the address one past the data.
496	 */
497
498	ldr	r7, [pc, #12]	/* Load SYS_sigreturn */
499	swi	SYS_sigreturn
500
501	/* Well if that failed we better exit quick ! */
502
503	ldr	r7, [pc, #8]	/* Load SYS_exit */
504	swi	SYS_exit
505
506	/* Branch back to retry SYS_sigreturn */
507	b	. - 16
508
509	.word	SYS_sigreturn
510	.word	SYS_exit
511
512	.align	0
513	.global _C_LABEL(esigcode)
514		_C_LABEL(esigcode):
515
516	.data
517	.global szsigcode
518szsigcode:
519	.long esigcode-sigcode
520/* End of locore.S */
521