xref: /freebsd/sys/arm/arm/locore.S (revision 87c1627502a5dde91e5284118eec8682b60f27a2)
1/*	$NetBSD: locore.S,v 1.14 2003/04/20 16:21:40 thorpej Exp $	*/
2
3/*-
4 * Copyright 2011 Semihalf
5 * Copyright (C) 1994-1997 Mark Brinicombe
6 * Copyright (C) 1994 Brini
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by Brini.
20 * 4. The name of Brini may not be used to endorse or promote products
21 *    derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36#include "assym.s"
37#include <sys/syscall.h>
38#include <machine/asm.h>
39#include <machine/armreg.h>
40#include <machine/pte.h>
41
42__FBSDID("$FreeBSD$");
43
44/* What size should this really be ? It is only used by initarm() */
45#define INIT_ARM_STACK_SIZE	(2048 * 4)
46
47#define	CPWAIT_BRANCH							 \
48	sub	pc, pc, #4
49
50#define	CPWAIT(tmp)							 \
51	mrc	p15, 0, tmp, c2, c0, 0	/* arbitrary read of CP15 */	;\
52	mov	tmp, tmp		/* wait for it to complete */	;\
53	CPWAIT_BRANCH			/* branch to next insn */
54
55/*
56 * This is for kvm_mkdb, and should be the address of the beginning
57 * of the kernel text segment (not necessarily the same as kernbase).
58 */
59	.text
60	.align	0
61.globl kernbase
62.set kernbase,KERNBASE
63.globl physaddr
64.set physaddr,PHYSADDR
65
66/*
67 * On entry for FreeBSD boot ABI:
68 *	r0 - metadata pointer or 0 (boothowto on AT91's boot2)
69 *	r1 - if (r0 == 0) then metadata pointer
70 * On entry for Linux boot ABI:
71 *	r0 - 0
72 *	r1 - machine type (passed as arg2 to initarm)
73 *	r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm)
74 *
75 * For both types of boot we gather up the args, put them in a struct arm_boot_params
76 * structure and pass that to initarm.
77 */
78ENTRY_NP(btext)
79ASENTRY_NP(_start)
80	STOP_UNWINDING		/* Can't unwind into the bootloader! */
81
82	mov	r9, r0		/* 0 or boot mode from boot2 */
83	mov	r8, r1		/* Save Machine type */
84	mov	ip, r2		/* Save meta data */
85	mov	fp, r3		/* Future expantion */
86
87	/* Make sure interrupts are disabled. */
88	mrs	r7, cpsr
89	orr	r7, r7, #(I32_bit|F32_bit)
90	msr	cpsr_c, r7
91
92#if defined (FLASHADDR) && defined(LOADERRAMADDR)
93	/* Check if we're running from flash. */
94	ldr	r7, =FLASHADDR
95	/*
96	 * If we're running with MMU disabled, test against the
97	 * physical address instead.
98	 */
99	mrc     p15, 0, r2, c1, c0, 0
100	ands	r2, r2, #CPU_CONTROL_MMU_ENABLE
101	ldreq	r6, =PHYSADDR
102	ldrne	r6, =LOADERRAMADDR
103	cmp	r7, r6
104	bls 	flash_lower
105	cmp	r7, pc
106	bhi	from_ram
107	b	do_copy
108
109flash_lower:
110	cmp	r6, pc
111	bls	from_ram
112do_copy:
113	ldr	r7, =KERNBASE
114	adr	r1, _start
115	ldr	r0, Lreal_start
116	ldr	r2, Lend
117	sub	r2, r2, r0
118	sub	r0, r0, r7
119	add	r0, r0, r6
120	mov	r4, r0
121	bl	memcpy
122	ldr	r0, Lram_offset
123	add	pc, r4, r0
124Lram_offset:	.word from_ram-_C_LABEL(_start)
125from_ram:
126	nop
127#endif
128	adr	r7, Lunmapped
129	bic     r7, r7, #0xf0000000
130	orr     r7, r7, #PHYSADDR
131
132
133disable_mmu:
134	/* Disable MMU for a while */
135	mrc     p15, 0, r2, c1, c0, 0
136	bic	r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
137	    CPU_CONTROL_WBUF_ENABLE)
138	bic	r2, r2, #(CPU_CONTROL_IC_ENABLE)
139	bic	r2, r2, #(CPU_CONTROL_BPRD_ENABLE)
140	mcr     p15, 0, r2, c1, c0, 0
141
142	nop
143	nop
144	nop
145	mov	pc, r7
146Lunmapped:
147#ifdef STARTUP_PAGETABLE_ADDR
148	/* build page table from scratch */
149	ldr	r0, Lstartup_pagetable
150	adr	r4, mmu_init_table
151	b	3f
152
1532:
154	str	r3, [r0, r2]
155	add	r2, r2, #4
156	add	r3, r3, #(L1_S_SIZE)
157	adds	r1, r1, #-1
158	bhi	2b
1593:
160	ldmia	r4!, {r1,r2,r3}   /* # of sections, VA, PA|attr */
161	cmp	r1, #0
162	adrne	r5, 2b
163	bicne	r5, r5, #0xf0000000
164	orrne	r5, r5, #PHYSADDR
165	movne	pc, r5
166
167#if defined(SMP)
168	orr 	r0, r0, #2		/* Set TTB shared memory flag */
169#endif
170	mcr	p15, 0, r0, c2, c0, 0	/* Set TTB */
171	mcr	p15, 0, r0, c8, c7, 0	/* Flush TLB */
172
173#if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_CORTEXA) || defined(CPU_MV_PJ4B)
174	mov	r0, #0
175	mcr	p15, 0, r0, c13, c0, 1	/* Set ASID to 0 */
176#endif
177
178	/* Set the Domain Access register.  Very important! */
179	mov     r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
180	mcr	p15, 0, r0, c3, c0, 0
181	/*
182	 * Enable MMU.
183	 * On armv6 enable extended page tables, and set alignment checking
184	 * to modulo-4 (CPU_CONTROL_UNAL_ENABLE) for the ldrd/strd
185	 * instructions emitted by clang.
186	 */
187	mrc	p15, 0, r0, c1, c0, 0
188#ifdef _ARM_ARCH_6
189	orr	r0, r0, #(CPU_CONTROL_V6_EXTPAGE | CPU_CONTROL_UNAL_ENABLE)
190	orr	r2, r2, #(CPU_CONTROL_AFLT_ENABLE)
191#endif
192	orr	r0, r0, #(CPU_CONTROL_MMU_ENABLE)
193	mcr	p15, 0, r0, c1, c0, 0
194	nop
195	nop
196	nop
197	CPWAIT(r0)
198
199#endif
200mmu_done:
201	nop
202	adr	r1, .Lstart
203	ldmia	r1, {r1, r2, sp}	/* Set initial stack and */
204	sub	r2, r2, r1		/* get zero init data */
205	mov	r3, #0
206.L1:
207	str	r3, [r1], #0x0004	/* get zero init data */
208	subs	r2, r2, #4
209	bgt	.L1
210	ldr	pc, .Lvirt_done
211
212virt_done:
213	mov	r1, #20			/* loader info size is 20 bytes also second arg */
214	subs	sp, sp, r1		/* allocate arm_boot_params struct on stack */
215	bic	sp, sp, #7		/* align stack to 8 bytes */
216	mov	r0, sp			/* loader info pointer is first arg */
217	str	r1, [r0]		/* Store length of loader info */
218	str	r9, [r0, #4]		/* Store r0 from boot loader */
219	str	r8, [r0, #8]		/* Store r1 from boot loader */
220	str	ip, [r0, #12]		/* store r2 from boot loader */
221	str	fp, [r0, #16]		/* store r3 from boot loader */
222	mov	fp, #0			/* trace back starts here */
223	bl	_C_LABEL(initarm)	/* Off we go */
224
225	/* init arm will return the new stack pointer. */
226	mov	sp, r0
227
228	bl	_C_LABEL(mi_startup)		/* call mi_startup()! */
229
230	adr	r0, .Lmainreturned
231	b	_C_LABEL(panic)
232	/* NOTREACHED */
233#ifdef STARTUP_PAGETABLE_ADDR
234#define MMU_INIT(va,pa,n_sec,attr) \
235	.word	n_sec					    ; \
236	.word	4*((va)>>L1_S_SHIFT)			    ; \
237	.word	(pa)|(attr)				    ;
238
239Lvirtaddr:
240	.word	KERNVIRTADDR
241Lphysaddr:
242	.word	KERNPHYSADDR
243Lreal_start:
244	.word	_start
245Lend:
246	.word	_edata
247Lstartup_pagetable:
248	.word	STARTUP_PAGETABLE_ADDR
249#ifdef SMP
250Lstartup_pagetable_secondary:
251	.word	temp_pagetable
252#endif
253END(btext)
254END(_start)
255
256mmu_init_table:
257	/* fill all table VA==PA */
258	/* map SDRAM VA==PA, WT cacheable */
259#if !defined(SMP)
260	MMU_INIT(PHYSADDR, PHYSADDR , 64, L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
261	/* map VA 0xc0000000..0xc3ffffff to PA */
262	MMU_INIT(KERNBASE, PHYSADDR, 64, L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
263#else
264	MMU_INIT(PHYSADDR, PHYSADDR , 64, L1_TYPE_S|L1_SHARED|L1_S_C|L1_S_AP(AP_KRW))
265	/* map VA 0xc0000000..0xc3ffffff to PA */
266	MMU_INIT(KERNBASE, PHYSADDR, 64, L1_TYPE_S|L1_SHARED|L1_S_C|L1_S_AP(AP_KRW))
267	MMU_INIT(0x48000000, 0x48000000, 1, L1_TYPE_S|L1_SHARED|L1_S_C|L1_S_AP(AP_KRW))
268#if defined(CPU_MV_PJ4B)
269	/* map VA 0xf1000000..0xf1100000 to PA 0xd0000000 */
270	MMU_INIT(0xf1000000, 0xd0000000, 1, L1_TYPE_S|L1_SHARED|L1_S_B|L1_S_AP(AP_KRW))
271#endif /* CPU_MV_PJ4B */
272#endif /* SMP */
273	.word 0	/* end of table */
274#endif
275.Lstart:
276	.word	_edata
277	.word	_end
278	.word	svcstk + INIT_ARM_STACK_SIZE
279
280.Lvirt_done:
281	.word	virt_done
282#if defined(SMP)
283.Lmpvirt_done:
284	.word	mpvirt_done
285#endif
286
287.Lmainreturned:
288	.asciz	"main() returned"
289	.align	0
290
291	.bss
292svcstk:
293	.space	INIT_ARM_STACK_SIZE
294
295	.text
296	.align	0
297
298.Lcpufuncs:
299	.word	_C_LABEL(cpufuncs)
300
301#if defined(SMP)
302Lsramaddr:
303	.word	0xffff0080
304
305#if 0
306#define	AP_DEBUG(tmp)			\
307	mrc	p15, 0, r1, c0, c0, 5;	\
308	ldr	r0, Lsramaddr;		\
309	add	r0, r1, lsl #2;		\
310	mov	r1, tmp;		\
311	str	r1, [r0], #0x0000;
312#else
313#define AP_DEBUG(tmp)
314#endif
315
316
317ASENTRY_NP(mptramp)
318	mov	r0, #0
319	mcr	p15, 0, r0, c7, c7, 0
320
321	AP_DEBUG(#1)
322
323	mrs	r3, cpsr_all
324	bic	r3, r3, #(PSR_MODE)
325	orr	r3, r3, #(PSR_SVC32_MODE)
326        msr	cpsr_all, r3
327
328	mrc	p15, 0, r0, c0, c0, 5
329	and	r0, #0x0f		/* Get CPU ID */
330
331	/* Read boot address for CPU */
332	mov	r1, #0x100
333	mul	r2, r0, r1
334	ldr	r1, Lpmureg
335	add	r0, r2, r1
336	ldr	r1, [r0], #0x00
337
338	mov pc, r1
339
340Lpmureg:
341        .word   0xd0022124
342END(mptramp)
343
344ASENTRY_NP(mpentry)
345
346	AP_DEBUG(#2)
347
348	/* Make sure interrupts are disabled. */
349	mrs	r7, cpsr
350	orr	r7, r7, #(I32_bit|F32_bit)
351	msr	cpsr_c, r7
352
353
354	adr     r7, Ltag
355	bic     r7, r7, #0xf0000000
356	orr     r7, r7, #PHYSADDR
357
358	/* Disable MMU for a while */
359	mrc	p15, 0, r2, c1, c0, 0
360	bic	r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
361	    CPU_CONTROL_WBUF_ENABLE)
362	bic	r2, r2, #(CPU_CONTROL_IC_ENABLE)
363	bic	r2, r2, #(CPU_CONTROL_BPRD_ENABLE)
364	mcr	p15, 0, r2, c1, c0, 0
365
366	nop
367	nop
368	nop
369
370	AP_DEBUG(#3)
371
372Ltag:
373	ldr	r0, Lstartup_pagetable_secondary
374	bic	r0, r0, #0xf0000000
375	orr	r0, r0, #PHYSADDR
376	ldr	r0, [r0]
377#if defined(SMP)
378	orr 	r0, r0, #0		/* Set TTB shared memory flag */
379#endif
380	mcr	p15, 0, r0, c2, c0, 0	/* Set TTB */
381	mcr	p15, 0, r0, c8, c7, 0	/* Flush TLB */
382
383#if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_MV_PJ4B) || defined(CPU_CORTEXA)
384	mov	r0, #0
385	mcr	p15, 0, r0, c13, c0, 1	/* Set ASID to 0 */
386#endif
387
388	AP_DEBUG(#4)
389
390	/* Set the Domain Access register.  Very important! */
391	mov	r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
392	mcr	p15, 0, r0, c3, c0, 0
393	/* Enable MMU */
394	mrc	p15, 0, r0, c1, c0, 0
395#if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_MV_PJ4B) || defined(CPU_CORTEXA)
396	orr	r0, r0, #CPU_CONTROL_V6_EXTPAGE
397#endif
398	orr	r0, r0, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE)
399	mcr	p15, 0, r0, c1, c0, 0
400	nop
401	nop
402	nop
403	CPWAIT(r0)
404
405	adr	r1, .Lstart
406	ldmia	r1, {r1, r2, sp}	/* Set initial stack and */
407	mrc	p15, 0, r0, c0, c0, 5
408	and	r0, r0, #15
409	mov	r1, #2048
410	mul	r2, r1, r0
411	sub	sp, sp, r2
412	str	r1, [sp]
413	ldr	pc, .Lmpvirt_done
414
415mpvirt_done:
416
417	mov	fp, #0			/* trace back starts here */
418	bl	_C_LABEL(init_secondary)	/* Off we go */
419
420	adr	r0, .Lmpreturned
421	b	_C_LABEL(panic)
422	/* NOTREACHED */
423
424.Lmpreturned:
425	.asciz	"main() returned"
426	.align	0
427END(mpentry)
428#endif
429
430ENTRY_NP(cpu_halt)
431	mrs     r2, cpsr
432	bic	r2, r2, #(PSR_MODE)
433	orr     r2, r2, #(PSR_SVC32_MODE)
434	orr	r2, r2, #(I32_bit | F32_bit)
435	msr     cpsr_all, r2
436
437	ldr	r4, .Lcpu_reset_address
438	ldr	r4, [r4]
439
440	ldr	r0, .Lcpufuncs
441	mov	lr, pc
442	ldr	pc, [r0, #CF_IDCACHE_WBINV_ALL]
443	mov	lr, pc
444	ldr	pc, [r0, #CF_L2CACHE_WBINV_ALL]
445
446	/*
447	 * Load the cpu_reset_needs_v4_MMU_disable flag to determine if it's
448	 * necessary.
449	 */
450
451	ldr	r1, .Lcpu_reset_needs_v4_MMU_disable
452	ldr	r1, [r1]
453	cmp	r1, #0
454	mov	r2, #0
455
456	/*
457	 * MMU & IDC off, 32 bit program & data space
458	 * Hurl ourselves into the ROM
459	 */
460	mov	r0, #(CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE)
461	mcr     15, 0, r0, c1, c0, 0
462	mcrne   15, 0, r2, c8, c7, 0 	/* nail I+D TLB on ARMv4 and greater */
463	mov     pc, r4
464
465	/*
466	 * _cpu_reset_address contains the address to branch to, to complete
467	 * the cpu reset after turning the MMU off
468	 * This variable is provided by the hardware specific code
469	 */
470.Lcpu_reset_address:
471	.word	_C_LABEL(cpu_reset_address)
472
473	/*
474	 * cpu_reset_needs_v4_MMU_disable contains a flag that signals if the
475	 * v4 MMU disable instruction needs executing... it is an illegal instruction
476	 * on f.e. ARM6/7 that locks up the computer in an endless illegal
477	 * instruction / data-abort / reset loop.
478	 */
479.Lcpu_reset_needs_v4_MMU_disable:
480	.word	_C_LABEL(cpu_reset_needs_v4_MMU_disable)
481END(cpu_halt)
482
483
484/*
485 * setjump + longjmp
486 */
487ENTRY(setjmp)
488	stmia	r0, {r4-r14}
489	mov	r0, #0x00000000
490	RET
491END(setjmp)
492
493ENTRY(longjmp)
494	ldmia	r0, {r4-r14}
495	mov	r0, #0x00000001
496	RET
497END(longjmp)
498
499	.data
500	.global _C_LABEL(esym)
501_C_LABEL(esym):	.word	_C_LABEL(end)
502
503ENTRY_NP(abort)
504	b	_C_LABEL(abort)
505END(abort)
506
507ENTRY_NP(sigcode)
508	mov	r0, sp
509
510	/*
511	 * Call the sigreturn system call.
512	 *
513	 * We have to load r7 manually rather than using
514	 * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is
515	 * correct. Using the alternative places esigcode at the address
516	 * of the data rather than the address one past the data.
517	 */
518
519	ldr	r7, [pc, #12]	/* Load SYS_sigreturn */
520	swi	SYS_sigreturn
521
522	/* Well if that failed we better exit quick ! */
523
524	ldr	r7, [pc, #8]	/* Load SYS_exit */
525	swi	SYS_exit
526
527	/* Branch back to retry SYS_sigreturn */
528	b	. - 16
529
530	.word	SYS_sigreturn
531	.word	SYS_exit
532
533	.align	0
534	.global _C_LABEL(esigcode)
535		_C_LABEL(esigcode):
536
537	.data
538	.global szsigcode
539szsigcode:
540	.long esigcode-sigcode
541END(sigcode)
542/* End of locore.S */
543