xref: /freebsd/sys/arm/arm/locore.S (revision 8d20be1e22095c27faf8fe8b2f0d089739cc742e)
1/*	$NetBSD: locore.S,v 1.14 2003/04/20 16:21:40 thorpej Exp $	*/
2
3/*-
4 * Copyright 2011 Semihalf
5 * Copyright (C) 1994-1997 Mark Brinicombe
6 * Copyright (C) 1994 Brini
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by Brini.
20 * 4. The name of Brini may not be used to endorse or promote products
21 *    derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36#include "assym.s"
37#include <sys/syscall.h>
38#include <machine/asm.h>
39#include <machine/armreg.h>
40#include <machine/pte.h>
41
42__FBSDID("$FreeBSD$");
43
44/* What size should this really be ? It is only used by initarm() */
45#define INIT_ARM_STACK_SIZE	(2048 * 4)
46
47#define	CPWAIT_BRANCH							 \
48	sub	pc, pc, #4
49
50#define	CPWAIT(tmp)							 \
51	mrc	p15, 0, tmp, c2, c0, 0	/* arbitrary read of CP15 */	;\
52	mov	tmp, tmp		/* wait for it to complete */	;\
53	CPWAIT_BRANCH			/* branch to next insn */
54
55/*
56 * This is for kvm_mkdb, and should be the address of the beginning
57 * of the kernel text segment (not necessarily the same as kernbase).
58 */
59	.text
60	.align	0
61.globl kernbase
62.set kernbase,KERNBASE
63.globl physaddr
64.set physaddr,PHYSADDR
65
66/*
67 * On entry for FreeBSD boot ABI:
68 *	r0 - metadata pointer or 0 (boothowto on AT91's boot2)
69 *	r1 - if (r0 == 0) then metadata pointer
70 * On entry for Linux boot ABI:
71 *	r0 - 0
72 *	r1 - machine type (passed as arg2 to initarm)
73 *	r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm)
74 *
75 * For both types of boot we gather up the args, put them in a struct arm_boot_params
76 * structure and pass that to initarm.
77 */
78ENTRY_NP(btext)
79ASENTRY_NP(_start)
80	STOP_UNWINDING		/* Can't unwind into the bootloader! */
81
82	mov	r9, r0		/* 0 or boot mode from boot2 */
83	mov	r8, r1		/* Save Machine type */
84	mov	ip, r2		/* Save meta data */
85	mov	fp, r3		/* Future expantion */
86
87	/* Make sure interrupts are disabled. */
88	mrs	r7, cpsr
89	orr	r7, r7, #(I32_bit|F32_bit)
90	msr	cpsr_c, r7
91
92#if defined (FLASHADDR) && defined(LOADERRAMADDR)
93	/* Check if we're running from flash. */
94	ldr	r7, =FLASHADDR
95	/*
96	 * If we're running with MMU disabled, test against the
97	 * physical address instead.
98	 */
99	mrc     p15, 0, r2, c1, c0, 0
100	ands	r2, r2, #CPU_CONTROL_MMU_ENABLE
101	ldreq	r6, =PHYSADDR
102	ldrne	r6, =LOADERRAMADDR
103	cmp	r7, r6
104	bls 	flash_lower
105	cmp	r7, pc
106	bhi	from_ram
107	b	do_copy
108
109flash_lower:
110	cmp	r6, pc
111	bls	from_ram
112do_copy:
113	ldr	r7, =KERNBASE
114	adr	r1, _start
115	ldr	r0, Lreal_start
116	ldr	r2, Lend
117	sub	r2, r2, r0
118	sub	r0, r0, r7
119	add	r0, r0, r6
120	mov	r4, r0
121	bl	memcpy
122	ldr	r0, Lram_offset
123	add	pc, r4, r0
124Lram_offset:	.word from_ram-_C_LABEL(_start)
125from_ram:
126	nop
127#endif
128	adr	r7, Lunmapped
129	bic     r7, r7, #0xf0000000
130	orr     r7, r7, #PHYSADDR
131
132
133disable_mmu:
134	/* Disable MMU for a while */
135	mrc     p15, 0, r2, c1, c0, 0
136	bic	r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
137	    CPU_CONTROL_WBUF_ENABLE)
138	bic	r2, r2, #(CPU_CONTROL_IC_ENABLE)
139	bic	r2, r2, #(CPU_CONTROL_BPRD_ENABLE)
140	mcr     p15, 0, r2, c1, c0, 0
141
142	nop
143	nop
144	nop
145	mov	pc, r7
146Lunmapped:
147#ifdef STARTUP_PAGETABLE_ADDR
148	/* build page table from scratch */
149	ldr	r0, Lstartup_pagetable
150	adr	r4, mmu_init_table
151	b	3f
152
1532:
154	str	r3, [r0, r2]
155	add	r2, r2, #4
156	add	r3, r3, #(L1_S_SIZE)
157	adds	r1, r1, #-1
158	bhi	2b
1593:
160	ldmia	r4!, {r1,r2,r3}   /* # of sections, VA, PA|attr */
161	cmp	r1, #0
162	adrne	r5, 2b
163	bicne	r5, r5, #0xf0000000
164	orrne	r5, r5, #PHYSADDR
165	movne	pc, r5
166
167#if defined(SMP)
168	orr 	r0, r0, #2		/* Set TTB shared memory flag */
169#endif
170	mcr	p15, 0, r0, c2, c0, 0	/* Set TTB */
171	mcr	p15, 0, r0, c8, c7, 0	/* Flush TLB */
172
173#if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_CORTEXA) || defined(CPU_MV_PJ4B)
174	mov	r0, #0
175	mcr	p15, 0, r0, c13, c0, 1	/* Set ASID to 0 */
176#endif
177
178	/* Set the Domain Access register.  Very important! */
179	mov     r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
180	mcr	p15, 0, r0, c3, c0, 0
181	/*
182	 * Enable MMU.
183	 * On armv6 enable extended page tables, and set alignment checking
184	 * to modulo-4 (CPU_CONTROL_UNAL_ENABLE) for the ldrd/strd
185	 * instructions emitted by clang.
186	 */
187	mrc	p15, 0, r0, c1, c0, 0
188#ifdef _ARM_ARCH_6
189	orr	r0, r0, #(CPU_CONTROL_V6_EXTPAGE | CPU_CONTROL_UNAL_ENABLE)
190	orr	r0, r0, #(CPU_CONTROL_AFLT_ENABLE)
191	orr	r0, r0, #(CPU_CONTROL_AF_ENABLE)
192#endif
193	orr	r0, r0, #(CPU_CONTROL_MMU_ENABLE)
194	mcr	p15, 0, r0, c1, c0, 0
195	nop
196	nop
197	nop
198	CPWAIT(r0)
199
200#endif
201mmu_done:
202	nop
203	adr	r1, .Lstart
204	ldmia	r1, {r1, r2, sp}	/* Set initial stack and */
205	sub	r2, r2, r1		/* get zero init data */
206	mov	r3, #0
207.L1:
208	str	r3, [r1], #0x0004	/* get zero init data */
209	subs	r2, r2, #4
210	bgt	.L1
211	ldr	pc, .Lvirt_done
212
213virt_done:
214	mov	r1, #20			/* loader info size is 20 bytes also second arg */
215	subs	sp, sp, r1		/* allocate arm_boot_params struct on stack */
216	bic	sp, sp, #7		/* align stack to 8 bytes */
217	mov	r0, sp			/* loader info pointer is first arg */
218	str	r1, [r0]		/* Store length of loader info */
219	str	r9, [r0, #4]		/* Store r0 from boot loader */
220	str	r8, [r0, #8]		/* Store r1 from boot loader */
221	str	ip, [r0, #12]		/* store r2 from boot loader */
222	str	fp, [r0, #16]		/* store r3 from boot loader */
223	mov	fp, #0			/* trace back starts here */
224	bl	_C_LABEL(initarm)	/* Off we go */
225
226	/* init arm will return the new stack pointer. */
227	mov	sp, r0
228
229	bl	_C_LABEL(mi_startup)		/* call mi_startup()! */
230
231	adr	r0, .Lmainreturned
232	b	_C_LABEL(panic)
233	/* NOTREACHED */
234#ifdef STARTUP_PAGETABLE_ADDR
235#define MMU_INIT(va,pa,n_sec,attr) \
236	.word	n_sec					    ; \
237	.word	4*((va)>>L1_S_SHIFT)			    ; \
238	.word	(pa)|(attr)				    ;
239
240Lvirtaddr:
241	.word	KERNVIRTADDR
242Lphysaddr:
243	.word	KERNPHYSADDR
244Lreal_start:
245	.word	_start
246Lend:
247	.word	_edata
248Lstartup_pagetable:
249	.word	STARTUP_PAGETABLE_ADDR
250#ifdef SMP
251Lstartup_pagetable_secondary:
252	.word	temp_pagetable
253#endif
254END(btext)
255END(_start)
256
257mmu_init_table:
258	/* fill all table VA==PA */
259	/* map SDRAM VA==PA, WT cacheable */
260#if !defined(SMP)
261	MMU_INIT(PHYSADDR, PHYSADDR , 64, L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
262	/* map VA 0xc0000000..0xc3ffffff to PA */
263	MMU_INIT(KERNBASE, PHYSADDR, 64, L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
264#else
265	MMU_INIT(PHYSADDR, PHYSADDR , 64, L1_TYPE_S|L1_SHARED|L1_S_C|L1_S_AP(AP_KRW))
266	/* map VA 0xc0000000..0xc3ffffff to PA */
267	MMU_INIT(KERNBASE, PHYSADDR, 64, L1_TYPE_S|L1_SHARED|L1_S_C|L1_S_AP(AP_KRW))
268	MMU_INIT(0x48000000, 0x48000000, 1, L1_TYPE_S|L1_SHARED|L1_S_C|L1_S_AP(AP_KRW))
269#endif /* SMP */
270	.word 0	/* end of table */
271#endif
272.Lstart:
273	.word	_edata
274	.word	_end
275	.word	svcstk + INIT_ARM_STACK_SIZE
276
277.Lvirt_done:
278	.word	virt_done
279#if defined(SMP)
280.Lmpvirt_done:
281	.word	mpvirt_done
282#endif
283
284.Lmainreturned:
285	.asciz	"main() returned"
286	.align	0
287
288	.bss
289svcstk:
290	.space	INIT_ARM_STACK_SIZE
291
292	.text
293	.align	0
294
295.Lcpufuncs:
296	.word	_C_LABEL(cpufuncs)
297
298#if defined(SMP)
299Lsramaddr:
300	.word	0xffff0080
301
302#if 0
303#define	AP_DEBUG(tmp)			\
304	mrc	p15, 0, r1, c0, c0, 5;	\
305	ldr	r0, Lsramaddr;		\
306	add	r0, r1, lsl #2;		\
307	mov	r1, tmp;		\
308	str	r1, [r0], #0x0000;
309#else
310#define AP_DEBUG(tmp)
311#endif
312
313
314ASENTRY_NP(mptramp)
315	mov	r0, #0
316	mcr	p15, 0, r0, c7, c7, 0
317
318	AP_DEBUG(#1)
319
320	mrs	r3, cpsr_all
321	bic	r3, r3, #(PSR_MODE)
322	orr	r3, r3, #(PSR_SVC32_MODE)
323        msr	cpsr_all, r3
324
325	mrc	p15, 0, r0, c0, c0, 5
326	and	r0, #0x0f		/* Get CPU ID */
327
328	/* Read boot address for CPU */
329	mov	r1, #0x100
330	mul	r2, r0, r1
331	ldr	r1, Lpmureg
332	add	r0, r2, r1
333	ldr	r1, [r0], #0x00
334
335	mov pc, r1
336
337Lpmureg:
338        .word   0xd0022124
339END(mptramp)
340
341ASENTRY_NP(mpentry)
342
343	AP_DEBUG(#2)
344
345	/* Make sure interrupts are disabled. */
346	mrs	r7, cpsr
347	orr	r7, r7, #(I32_bit|F32_bit)
348	msr	cpsr_c, r7
349
350
351	adr     r7, Ltag
352	bic     r7, r7, #0xf0000000
353	orr     r7, r7, #PHYSADDR
354
355	/* Disable MMU for a while */
356	mrc	p15, 0, r2, c1, c0, 0
357	bic	r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
358	    CPU_CONTROL_WBUF_ENABLE)
359	bic	r2, r2, #(CPU_CONTROL_IC_ENABLE)
360	bic	r2, r2, #(CPU_CONTROL_BPRD_ENABLE)
361	mcr	p15, 0, r2, c1, c0, 0
362
363	nop
364	nop
365	nop
366
367	AP_DEBUG(#3)
368
369Ltag:
370	ldr	r0, Lstartup_pagetable_secondary
371	bic	r0, r0, #0xf0000000
372	orr	r0, r0, #PHYSADDR
373	ldr	r0, [r0]
374#if defined(SMP)
375	orr 	r0, r0, #0		/* Set TTB shared memory flag */
376#endif
377	mcr	p15, 0, r0, c2, c0, 0	/* Set TTB */
378	mcr	p15, 0, r0, c8, c7, 0	/* Flush TLB */
379
380#if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_MV_PJ4B) || defined(CPU_CORTEXA)
381	mov	r0, #0
382	mcr	p15, 0, r0, c13, c0, 1	/* Set ASID to 0 */
383#endif
384
385	AP_DEBUG(#4)
386
387	/* Set the Domain Access register.  Very important! */
388	mov	r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
389	mcr	p15, 0, r0, c3, c0, 0
390	/* Enable MMU */
391	mrc	p15, 0, r0, c1, c0, 0
392#if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_MV_PJ4B) || defined(CPU_CORTEXA)
393	orr	r0, r0, #CPU_CONTROL_V6_EXTPAGE
394#endif
395	orr	r0, r0, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE)
396	mcr	p15, 0, r0, c1, c0, 0
397	nop
398	nop
399	nop
400	CPWAIT(r0)
401
402	adr	r1, .Lstart
403	ldmia	r1, {r1, r2, sp}	/* Set initial stack and */
404	mrc	p15, 0, r0, c0, c0, 5
405	and	r0, r0, #15
406	mov	r1, #2048
407	mul	r2, r1, r0
408	sub	sp, sp, r2
409	str	r1, [sp]
410	ldr	pc, .Lmpvirt_done
411
412mpvirt_done:
413
414	mov	fp, #0			/* trace back starts here */
415	bl	_C_LABEL(init_secondary)	/* Off we go */
416
417	adr	r0, .Lmpreturned
418	b	_C_LABEL(panic)
419	/* NOTREACHED */
420
421.Lmpreturned:
422	.asciz	"main() returned"
423	.align	0
424END(mpentry)
425#endif
426
427ENTRY_NP(cpu_halt)
428	mrs     r2, cpsr
429	bic	r2, r2, #(PSR_MODE)
430	orr     r2, r2, #(PSR_SVC32_MODE)
431	orr	r2, r2, #(I32_bit | F32_bit)
432	msr     cpsr_all, r2
433
434	ldr	r4, .Lcpu_reset_address
435	ldr	r4, [r4]
436
437	ldr	r0, .Lcpufuncs
438	mov	lr, pc
439	ldr	pc, [r0, #CF_IDCACHE_WBINV_ALL]
440	mov	lr, pc
441	ldr	pc, [r0, #CF_L2CACHE_WBINV_ALL]
442
443	/*
444	 * Load the cpu_reset_needs_v4_MMU_disable flag to determine if it's
445	 * necessary.
446	 */
447
448	ldr	r1, .Lcpu_reset_needs_v4_MMU_disable
449	ldr	r1, [r1]
450	cmp	r1, #0
451	mov	r2, #0
452
453	/*
454	 * MMU & IDC off, 32 bit program & data space
455	 * Hurl ourselves into the ROM
456	 */
457	mov	r0, #(CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE)
458	mcr     15, 0, r0, c1, c0, 0
459	mcrne   15, 0, r2, c8, c7, 0 	/* nail I+D TLB on ARMv4 and greater */
460	mov     pc, r4
461
462	/*
463	 * _cpu_reset_address contains the address to branch to, to complete
464	 * the cpu reset after turning the MMU off
465	 * This variable is provided by the hardware specific code
466	 */
467.Lcpu_reset_address:
468	.word	_C_LABEL(cpu_reset_address)
469
470	/*
471	 * cpu_reset_needs_v4_MMU_disable contains a flag that signals if the
472	 * v4 MMU disable instruction needs executing... it is an illegal instruction
473	 * on f.e. ARM6/7 that locks up the computer in an endless illegal
474	 * instruction / data-abort / reset loop.
475	 */
476.Lcpu_reset_needs_v4_MMU_disable:
477	.word	_C_LABEL(cpu_reset_needs_v4_MMU_disable)
478END(cpu_halt)
479
480
481/*
482 * setjump + longjmp
483 */
484ENTRY(setjmp)
485	stmia	r0, {r4-r14}
486	mov	r0, #0x00000000
487	RET
488END(setjmp)
489
490ENTRY(longjmp)
491	ldmia	r0, {r4-r14}
492	mov	r0, #0x00000001
493	RET
494END(longjmp)
495
496	.data
497	.global _C_LABEL(esym)
498_C_LABEL(esym):	.word	_C_LABEL(end)
499
500ENTRY_NP(abort)
501	b	_C_LABEL(abort)
502END(abort)
503
504ENTRY_NP(sigcode)
505	mov	r0, sp
506
507	/*
508	 * Call the sigreturn system call.
509	 *
510	 * We have to load r7 manually rather than using
511	 * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is
512	 * correct. Using the alternative places esigcode at the address
513	 * of the data rather than the address one past the data.
514	 */
515
516	ldr	r7, [pc, #12]	/* Load SYS_sigreturn */
517	swi	SYS_sigreturn
518
519	/* Well if that failed we better exit quick ! */
520
521	ldr	r7, [pc, #8]	/* Load SYS_exit */
522	swi	SYS_exit
523
524	/* Branch back to retry SYS_sigreturn */
525	b	. - 16
526
527	.word	SYS_sigreturn
528	.word	SYS_exit
529
530	.align	0
531	.global _C_LABEL(esigcode)
532		_C_LABEL(esigcode):
533
534	.data
535	.global szsigcode
536szsigcode:
537	.long esigcode-sigcode
538END(sigcode)
539/* End of locore.S */
540