xref: /freebsd/sys/arm/arm/locore.S (revision 71099ec5097cff9b4a566e5474b7f214bd539e8a)
1/*	$NetBSD: locore.S,v 1.14 2003/04/20 16:21:40 thorpej Exp $	*/
2
3/*-
4 * Copyright 2011 Semihalf
5 * Copyright (C) 1994-1997 Mark Brinicombe
6 * Copyright (C) 1994 Brini
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by Brini.
20 * 4. The name of Brini may not be used to endorse or promote products
21 *    derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36#include "assym.s"
37#include <sys/syscall.h>
38#include <machine/asm.h>
39#include <machine/armreg.h>
40#include <machine/pte.h>
41
42__FBSDID("$FreeBSD$");
43
44/* What size should this really be ? It is only used by initarm() */
45#define INIT_ARM_STACK_SIZE	(2048 * 4)
46
47#define	CPWAIT_BRANCH							 \
48	sub	pc, pc, #4
49
50#define	CPWAIT(tmp)							 \
51	mrc	p15, 0, tmp, c2, c0, 0	/* arbitrary read of CP15 */	;\
52	mov	tmp, tmp		/* wait for it to complete */	;\
53	CPWAIT_BRANCH			/* branch to next insn */
54
55/*
56 * This is for kvm_mkdb, and should be the address of the beginning
57 * of the kernel text segment (not necessarily the same as kernbase).
58 */
59	.text
60	.align	0
61.globl kernbase
62.set kernbase,KERNBASE
63.globl physaddr
64.set physaddr,PHYSADDR
65
66/*
67 * On entry for FreeBSD boot ABI:
68 *	r0 - metadata pointer or 0 (boothowto on AT91's boot2)
69 *	r1 - if (r0 == 0) then metadata pointer
70 * On entry for Linux boot ABI:
71 *	r0 - 0
72 *	r1 - machine type (passed as arg2 to initarm)
73 *	r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm)
74 *
75 * For both types of boot we gather up the args, put them in a struct arm_boot_params
76 * structure and pass that to initarm.
77 */
78ENTRY_NP(btext)
79ASENTRY_NP(_start)
80	STOP_UNWINDING		/* Can't unwind into the bootloader! */
81
82	mov	r9, r0		/* 0 or boot mode from boot2 */
83	mov	r8, r1		/* Save Machine type */
84	mov	ip, r2		/* Save meta data */
85	mov	fp, r3		/* Future expantion */
86
87	/* Make sure interrupts are disabled. */
88	mrs	r7, cpsr
89	orr	r7, r7, #(I32_bit|F32_bit)
90	msr	cpsr_c, r7
91
92#if defined (FLASHADDR) && defined(LOADERRAMADDR)
93	/* Check if we're running from flash. */
94	ldr	r7, =FLASHADDR
95	/*
96	 * If we're running with MMU disabled, test against the
97	 * physical address instead.
98	 */
99	mrc     p15, 0, r2, c1, c0, 0
100	ands	r2, r2, #CPU_CONTROL_MMU_ENABLE
101	ldreq	r6, =PHYSADDR
102	ldrne	r6, =LOADERRAMADDR
103	cmp	r7, r6
104	bls 	flash_lower
105	cmp	r7, pc
106	bhi	from_ram
107	b	do_copy
108
109flash_lower:
110	cmp	r6, pc
111	bls	from_ram
112do_copy:
113	ldr	r7, =KERNBASE
114	adr	r1, _start
115	ldr	r0, Lreal_start
116	ldr	r2, Lend
117	sub	r2, r2, r0
118	sub	r0, r0, r7
119	add	r0, r0, r6
120	mov	r4, r0
121	bl	memcpy
122	ldr	r0, Lram_offset
123	add	pc, r4, r0
124Lram_offset:	.word from_ram-_C_LABEL(_start)
125from_ram:
126	nop
127#endif
128	adr	r7, Lunmapped
129	bic     r7, r7, #0xf0000000
130	orr     r7, r7, #PHYSADDR
131
132
133disable_mmu:
134	/* Disable MMU for a while */
135	mrc     p15, 0, r2, c1, c0, 0
136	bic	r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
137	    CPU_CONTROL_WBUF_ENABLE)
138	bic	r2, r2, #(CPU_CONTROL_IC_ENABLE)
139	bic	r2, r2, #(CPU_CONTROL_BPRD_ENABLE)
140	mcr     p15, 0, r2, c1, c0, 0
141
142	nop
143	nop
144	nop
145	mov	pc, r7
146Lunmapped:
147	/*
148	 * Build page table from scratch.
149	 */
150
151	/* Load the page tables physical address */
152	ldr	r1, Lstartup_pagetable
153	ldr	r2, =(KERNVIRTADDR - KERNPHYSADDR)
154	sub	r0, r1, r2
155
156	adr	r4, mmu_init_table
157	b	3f
158
1592:
160	str	r3, [r0, r2]
161	add	r2, r2, #4
162	add	r3, r3, #(L1_S_SIZE)
163	adds	r1, r1, #-1
164	bhi	2b
1653:
166	ldmia	r4!, {r1,r2,r3}   /* # of sections, VA, PA|attr */
167	cmp	r1, #0
168	adrne	r5, 2b
169	bicne	r5, r5, #0xf0000000
170	orrne	r5, r5, #PHYSADDR
171	movne	pc, r5
172
173#if defined(SMP)
174	orr 	r0, r0, #2		/* Set TTB shared memory flag */
175#endif
176	mcr	p15, 0, r0, c2, c0, 0	/* Set TTB */
177	mcr	p15, 0, r0, c8, c7, 0	/* Flush TLB */
178
179#if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_CORTEXA) || defined(CPU_MV_PJ4B) || defined(CPU_KRAIT)
180	mov	r0, #0
181	mcr	p15, 0, r0, c13, c0, 1	/* Set ASID to 0 */
182#endif
183
184	/* Set the Domain Access register.  Very important! */
185	mov     r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
186	mcr	p15, 0, r0, c3, c0, 0
187	/*
188	 * Enable MMU.
189	 * On armv6 enable extended page tables, and set alignment checking
190	 * to modulo-4 (CPU_CONTROL_UNAL_ENABLE) for the ldrd/strd
191	 * instructions emitted by clang.
192	 */
193	mrc	p15, 0, r0, c1, c0, 0
194#ifdef _ARM_ARCH_6
195	orr	r0, r0, #(CPU_CONTROL_V6_EXTPAGE | CPU_CONTROL_UNAL_ENABLE)
196	orr	r0, r0, #(CPU_CONTROL_AFLT_ENABLE)
197	orr	r0, r0, #(CPU_CONTROL_AF_ENABLE)
198#endif
199	orr	r0, r0, #(CPU_CONTROL_MMU_ENABLE)
200	mcr	p15, 0, r0, c1, c0, 0
201	nop
202	nop
203	nop
204	CPWAIT(r0)
205
206mmu_done:
207	nop
208	adr	r1, .Lstart
209	ldmia	r1, {r1, r2, sp}	/* Set initial stack and */
210	sub	r2, r2, r1		/* get zero init data */
211	mov	r3, #0
212.L1:
213	str	r3, [r1], #0x0004	/* get zero init data */
214	subs	r2, r2, #4
215	bgt	.L1
216	ldr	pc, .Lvirt_done
217
218virt_done:
219	mov	r1, #24			/* loader info size is 24 bytes also second arg */
220	subs	sp, sp, r1		/* allocate arm_boot_params struct on stack */
221	bic	sp, sp, #7		/* align stack to 8 bytes */
222	mov	r0, sp			/* loader info pointer is first arg */
223	str	r1, [r0]		/* Store length of loader info */
224	str	r9, [r0, #4]		/* Store r0 from boot loader */
225	str	r8, [r0, #8]		/* Store r1 from boot loader */
226	str	ip, [r0, #12]		/* store r2 from boot loader */
227	str	fp, [r0, #16]		/* store r3 from boot loader */
228	ldr	r5, =KERNPHYSADDR	/* load KERNPHYSADDR as the physical address */
229	str	r5, [r0, #20]		/* store the physical address */
230	mov	fp, #0			/* trace back starts here */
231	bl	_C_LABEL(initarm)	/* Off we go */
232
233	/* init arm will return the new stack pointer. */
234	mov	sp, r0
235
236	bl	_C_LABEL(mi_startup)		/* call mi_startup()! */
237
238	adr	r0, .Lmainreturned
239	b	_C_LABEL(panic)
240	/* NOTREACHED */
241#define MMU_INIT(va,pa,n_sec,attr) \
242	.word	n_sec					    ; \
243	.word	4*((va)>>L1_S_SHIFT)			    ; \
244	.word	(pa)|(attr)				    ;
245
246Lvirtaddr:
247	.word	KERNVIRTADDR
248Lphysaddr:
249	.word	KERNPHYSADDR
250Lreal_start:
251	.word	_start
252Lend:
253	.word	_edata
254Lstartup_pagetable:
255	.word	pagetable
256#ifdef SMP
257Lstartup_pagetable_secondary:
258	.word	temp_pagetable
259#endif
260END(btext)
261END(_start)
262
263mmu_init_table:
264	/* fill all table VA==PA */
265	/* map SDRAM VA==PA, WT cacheable */
266#if !defined(SMP)
267	MMU_INIT(PHYSADDR, PHYSADDR, 64, L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
268	/* map VA 0xc0000000..0xc3ffffff to PA */
269	MMU_INIT(KERNBASE, PHYSADDR, 64, L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
270#if defined(SOCDEV_PA) && defined(SOCDEV_VA)
271	/* Map in 0x04000000 worth of the SoC's devices for bootstrap debugging */
272	MMU_INIT(SOCDEV_VA, SOCDEV_PA, 64, L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
273#endif
274#else
275	MMU_INIT(PHYSADDR, PHYSADDR , 64, L1_TYPE_S|L1_SHARED|L1_S_C|L1_S_AP(AP_KRW))
276	/* map VA 0xc0000000..0xc3ffffff to PA */
277	MMU_INIT(KERNBASE, PHYSADDR, 64, L1_TYPE_S|L1_SHARED|L1_S_C|L1_S_AP(AP_KRW))
278	MMU_INIT(0x48000000, 0x48000000, 1, L1_TYPE_S|L1_SHARED|L1_S_C|L1_S_AP(AP_KRW))
279#endif /* SMP */
280	.word 0	/* end of table */
281.Lstart:
282	.word	_edata
283	.word	_ebss
284	.word	svcstk + INIT_ARM_STACK_SIZE
285
286.Lvirt_done:
287	.word	virt_done
288#if defined(SMP)
289.Lmpvirt_done:
290	.word	mpvirt_done
291#endif
292
293.Lmainreturned:
294	.asciz	"main() returned"
295	.align	0
296
297	.bss
298svcstk:
299	.space	INIT_ARM_STACK_SIZE
300
301/*
302 * Memory for the initial pagetable. We are unable to place this in
303 * the bss as this will be cleared after the table is loaded.
304 */
305	.section ".init_pagetable"
306	.align	14 /* 16KiB aligned */
307pagetable:
308	.space	L1_TABLE_SIZE
309
310	.text
311	.align	0
312
313.Lcpufuncs:
314	.word	_C_LABEL(cpufuncs)
315
316#if defined(SMP)
317Lsramaddr:
318	.word	0xffff0080
319
320#if 0
321#define	AP_DEBUG(tmp)			\
322	mrc	p15, 0, r1, c0, c0, 5;	\
323	ldr	r0, Lsramaddr;		\
324	add	r0, r1, lsl #2;		\
325	mov	r1, tmp;		\
326	str	r1, [r0], #0x0000;
327#else
328#define AP_DEBUG(tmp)
329#endif
330
331
332ASENTRY_NP(mptramp)
333	mov	r0, #0
334	mcr	p15, 0, r0, c7, c7, 0
335
336	AP_DEBUG(#1)
337
338	mrs	r3, cpsr
339	bic	r3, r3, #(PSR_MODE)
340	orr	r3, r3, #(PSR_SVC32_MODE)
341        msr	cpsr_fsxc, r3
342
343	mrc	p15, 0, r0, c0, c0, 5
344	and	r0, #0x0f		/* Get CPU ID */
345
346	/* Read boot address for CPU */
347	mov	r1, #0x100
348	mul	r2, r0, r1
349	ldr	r1, Lpmureg
350	add	r0, r2, r1
351	ldr	r1, [r0], #0x00
352
353	mov pc, r1
354
355Lpmureg:
356        .word   0xd0022124
357END(mptramp)
358
359ASENTRY_NP(mpentry)
360
361	AP_DEBUG(#2)
362
363	/* Make sure interrupts are disabled. */
364	mrs	r7, cpsr
365	orr	r7, r7, #(I32_bit|F32_bit)
366	msr	cpsr_c, r7
367
368
369	adr     r7, Ltag
370	bic     r7, r7, #0xf0000000
371	orr     r7, r7, #PHYSADDR
372
373	/* Disable MMU for a while */
374	mrc	p15, 0, r2, c1, c0, 0
375	bic	r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
376	    CPU_CONTROL_WBUF_ENABLE)
377	bic	r2, r2, #(CPU_CONTROL_IC_ENABLE)
378	bic	r2, r2, #(CPU_CONTROL_BPRD_ENABLE)
379	mcr	p15, 0, r2, c1, c0, 0
380
381	nop
382	nop
383	nop
384
385	AP_DEBUG(#3)
386
387Ltag:
388	ldr	r0, Lstartup_pagetable_secondary
389	bic	r0, r0, #0xf0000000
390	orr	r0, r0, #PHYSADDR
391	ldr	r0, [r0]
392#if defined(SMP)
393	orr 	r0, r0, #0		/* Set TTB shared memory flag */
394#endif
395	mcr	p15, 0, r0, c2, c0, 0	/* Set TTB */
396	mcr	p15, 0, r0, c8, c7, 0	/* Flush TLB */
397
398#if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_MV_PJ4B) || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
399	mov	r0, #0
400	mcr	p15, 0, r0, c13, c0, 1	/* Set ASID to 0 */
401#endif
402
403	AP_DEBUG(#4)
404
405	/* Set the Domain Access register.  Very important! */
406	mov	r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
407	mcr	p15, 0, r0, c3, c0, 0
408	/* Enable MMU */
409	mrc	p15, 0, r0, c1, c0, 0
410#if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_MV_PJ4B) || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
411	orr	r0, r0, #CPU_CONTROL_V6_EXTPAGE
412	orr	r0, r0, #CPU_CONTROL_AF_ENABLE
413#endif
414	orr	r0, r0, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE)
415	mcr	p15, 0, r0, c1, c0, 0
416	nop
417	nop
418	nop
419	CPWAIT(r0)
420
421	adr	r1, .Lstart
422	ldmia	r1, {r1, r2, sp}	/* Set initial stack and */
423	mrc	p15, 0, r0, c0, c0, 5
424	and	r0, r0, #15
425	mov	r1, #2048
426	mul	r2, r1, r0
427	sub	sp, sp, r2
428	str	r1, [sp]
429	ldr	pc, .Lmpvirt_done
430
431mpvirt_done:
432
433	mov	fp, #0			/* trace back starts here */
434	bl	_C_LABEL(init_secondary)	/* Off we go */
435
436	adr	r0, .Lmpreturned
437	b	_C_LABEL(panic)
438	/* NOTREACHED */
439
440.Lmpreturned:
441	.asciz	"main() returned"
442	.align	0
443END(mpentry)
444#endif
445
446ENTRY_NP(cpu_halt)
447	mrs     r2, cpsr
448	bic	r2, r2, #(PSR_MODE)
449	orr     r2, r2, #(PSR_SVC32_MODE)
450	orr	r2, r2, #(I32_bit | F32_bit)
451	msr     cpsr_fsxc, r2
452
453	ldr	r4, .Lcpu_reset_address
454	ldr	r4, [r4]
455
456	ldr	r0, .Lcpufuncs
457	mov	lr, pc
458	ldr	pc, [r0, #CF_IDCACHE_WBINV_ALL]
459	mov	lr, pc
460	ldr	pc, [r0, #CF_L2CACHE_WBINV_ALL]
461
462	/*
463	 * Load the cpu_reset_needs_v4_MMU_disable flag to determine if it's
464	 * necessary.
465	 */
466
467	ldr	r1, .Lcpu_reset_needs_v4_MMU_disable
468	ldr	r1, [r1]
469	cmp	r1, #0
470	mov	r2, #0
471
472	/*
473	 * MMU & IDC off, 32 bit program & data space
474	 * Hurl ourselves into the ROM
475	 */
476	mov	r0, #(CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE)
477	mcr     15, 0, r0, c1, c0, 0
478	mcrne   15, 0, r2, c8, c7, 0 	/* nail I+D TLB on ARMv4 and greater */
479	mov     pc, r4
480
481	/*
482	 * _cpu_reset_address contains the address to branch to, to complete
483	 * the cpu reset after turning the MMU off
484	 * This variable is provided by the hardware specific code
485	 */
486.Lcpu_reset_address:
487	.word	_C_LABEL(cpu_reset_address)
488
489	/*
490	 * cpu_reset_needs_v4_MMU_disable contains a flag that signals if the
491	 * v4 MMU disable instruction needs executing... it is an illegal instruction
492	 * on f.e. ARM6/7 that locks up the computer in an endless illegal
493	 * instruction / data-abort / reset loop.
494	 */
495.Lcpu_reset_needs_v4_MMU_disable:
496	.word	_C_LABEL(cpu_reset_needs_v4_MMU_disable)
497END(cpu_halt)
498
499
500/*
501 * setjump + longjmp
502 */
503ENTRY(setjmp)
504	stmia	r0, {r4-r14}
505	mov	r0, #0x00000000
506	RET
507END(setjmp)
508
509ENTRY(longjmp)
510	ldmia	r0, {r4-r14}
511	mov	r0, #0x00000001
512	RET
513END(longjmp)
514
515	.data
516	.global _C_LABEL(esym)
517_C_LABEL(esym):	.word	_C_LABEL(end)
518
519ENTRY_NP(abort)
520	b	_C_LABEL(abort)
521END(abort)
522
523ENTRY_NP(sigcode)
524	mov	r0, sp
525
526	/*
527	 * Call the sigreturn system call.
528	 *
529	 * We have to load r7 manually rather than using
530	 * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is
531	 * correct. Using the alternative places esigcode at the address
532	 * of the data rather than the address one past the data.
533	 */
534
535	ldr	r7, [pc, #12]	/* Load SYS_sigreturn */
536	swi	SYS_sigreturn
537
538	/* Well if that failed we better exit quick ! */
539
540	ldr	r7, [pc, #8]	/* Load SYS_exit */
541	swi	SYS_exit
542
543	/* Branch back to retry SYS_sigreturn */
544	b	. - 16
545
546	.word	SYS_sigreturn
547	.word	SYS_exit
548
549	.align	0
550	.global _C_LABEL(esigcode)
551		_C_LABEL(esigcode):
552
553	.data
554	.global szsigcode
555szsigcode:
556	.long esigcode-sigcode
557END(sigcode)
558/* End of locore.S */
559