xref: /freebsd/sys/arm/arm/locore.S (revision a0e793cbf1951d07fc47a0d9ea389d7dacba5213)
1/*	$NetBSD: locore.S,v 1.14 2003/04/20 16:21:40 thorpej Exp $	*/
2
3/*-
4 * Copyright 2011 Semihalf
5 * Copyright (C) 1994-1997 Mark Brinicombe
6 * Copyright (C) 1994 Brini
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by Brini.
20 * 4. The name of Brini may not be used to endorse or promote products
21 *    derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36#include "assym.s"
37#include <sys/syscall.h>
38#include <machine/asm.h>
39#include <machine/armreg.h>
40#include <machine/pte.h>
41
42__FBSDID("$FreeBSD$");
43
44/* What size should this really be ? It is only used by initarm() */
45#define INIT_ARM_STACK_SIZE	(2048 * 4)
46
47#define	CPWAIT_BRANCH							 \
48	sub	pc, pc, #4
49
50#define	CPWAIT(tmp)							 \
51	mrc	p15, 0, tmp, c2, c0, 0	/* arbitrary read of CP15 */	;\
52	mov	tmp, tmp		/* wait for it to complete */	;\
53	CPWAIT_BRANCH			/* branch to next insn */
54
55/*
56 * This is for kvm_mkdb, and should be the address of the beginning
57 * of the kernel text segment (not necessarily the same as kernbase).
58 */
59	.text
60	.align	0
61.globl kernbase
62.set kernbase,KERNBASE
63.globl physaddr
64.set physaddr,PHYSADDR
65
66/*
67 * On entry for FreeBSD boot ABI:
68 *	r0 - metadata pointer or 0 (boothowto on AT91's boot2)
69 *	r1 - if (r0 == 0) then metadata pointer
70 * On entry for Linux boot ABI:
71 *	r0 - 0
72 *	r1 - machine type (passed as arg2 to initarm)
73 *	r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm)
74 *
75 * For both types of boot we gather up the args, put them in a struct arm_boot_params
76 * structure and pass that to initarm.
77 */
78	.globl	btext
79btext:
80ASENTRY_NP(_start)
81	STOP_UNWINDING		/* Can't unwind into the bootloader! */
82
83	mov	r9, r0		/* 0 or boot mode from boot2 */
84	mov	r8, r1		/* Save Machine type */
85	mov	ip, r2		/* Save meta data */
86	mov	fp, r3		/* Future expansion */
87
88	/* Make sure interrupts are disabled. */
89	mrs	r7, cpsr
90	orr	r7, r7, #(I32_bit|F32_bit)
91	msr	cpsr_c, r7
92
93#if defined (FLASHADDR) && defined(LOADERRAMADDR)
94	/* Check if we're running from flash. */
95	ldr	r7, =FLASHADDR
96	/*
97	 * If we're running with MMU disabled, test against the
98	 * physical address instead.
99	 */
100	mrc     p15, 0, r2, c1, c0, 0
101	ands	r2, r2, #CPU_CONTROL_MMU_ENABLE
102	ldreq	r6, =PHYSADDR
103	ldrne	r6, =LOADERRAMADDR
104	cmp	r7, r6
105	bls 	flash_lower
106	cmp	r7, pc
107	bhi	from_ram
108	b	do_copy
109
110flash_lower:
111	cmp	r6, pc
112	bls	from_ram
113do_copy:
114	ldr	r7, =KERNBASE
115	adr	r1, _start
116	ldr	r0, Lreal_start
117	ldr	r2, Lend
118	sub	r2, r2, r0
119	sub	r0, r0, r7
120	add	r0, r0, r6
121	mov	r4, r0
122	bl	memcpy
123	ldr	r0, Lram_offset
124	add	pc, r4, r0
125Lram_offset:	.word from_ram-_C_LABEL(_start)
126from_ram:
127	nop
128#endif
129	adr	r7, Lunmapped
130	bic     r7, r7, #0xf0000000
131	orr     r7, r7, #PHYSADDR
132
133
134disable_mmu:
135	/* Disable MMU for a while */
136	mrc     p15, 0, r2, c1, c0, 0
137	bic	r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
138	    CPU_CONTROL_WBUF_ENABLE)
139	bic	r2, r2, #(CPU_CONTROL_IC_ENABLE)
140	bic	r2, r2, #(CPU_CONTROL_BPRD_ENABLE)
141	mcr     p15, 0, r2, c1, c0, 0
142
143	nop
144	nop
145	nop
146	mov	pc, r7
147Lunmapped:
148	/*
149	 * Build page table from scratch.
150	 */
151
152	/* Find the delta between VA and PA */
153	adr	r0, Lpagetable
154	ldr	r1, [r0]
155	sub	r2, r1, r0
156	/* At this point: r2 = VA - PA */
157
158	/*
159	 * Find the physical address of the table. After these two
160	 * instructions:
161	 * r1 = va(pagetable)
162	 *
163	 * r0 = va(pagetable) - (VA - PA)
164	 *    = va(pagetable) - VA + PA
165	 *    = pa(pagetable)
166	 */
167	ldr	r1, [r0, #4]
168	sub	r0, r1, r2
169
170#ifndef _ARM_ARCH_6
171	/*
172	 * Some of the older ports (the various XScale, mostly) assume
173	 * that the memory before the kernel is mapped, and use it for
174	 * the various stacks, page tables, etc. For those CPUs, map the
175	 * 64 first MB of RAM, as it used to be.
176	 */
177	/*
178	 * Map PA == VA
179	 */
180	ldr     r5, =PHYSADDR
181	mov     r1, r5
182	mov     r2, r5
183	/* Map 64MiB, preserved over calls to build_pagetables */
184	mov     r3, #64
185	bl      build_pagetables
186
187	/* Create the kernel map to jump to */
188	mov     r1, r5
189	ldr     r2, =(KERNBASE)
190	bl      build_pagetables
191	ldr	r5, =(KERNPHYSADDR)
192#else
193	/*
194	 * Map PA == VA
195	 */
196	/* Find the start kernels load address */
197	adr	r5, _start
198	ldr	r2, =(L1_S_OFFSET)
199	bic	r5, r2
200	mov	r1, r5
201	mov	r2, r5
202	/* Map 64MiB, preserved over calls to build_pagetables */
203	mov	r3, #64
204	bl	build_pagetables
205
206	/* Create the kernel map to jump to */
207	mov	r1, r5
208	ldr	r2, =(KERNVIRTADDR)
209	bl	build_pagetables
210#endif
211
212#if defined(SOCDEV_PA) && defined(SOCDEV_VA)
213	/* Create the custom map */
214	ldr	r1, =SOCDEV_PA
215	ldr	r2, =SOCDEV_VA
216	bl	build_pagetables
217#endif
218
219#if defined(SMP)
220	orr 	r0, r0, #2		/* Set TTB shared memory flag */
221#endif
222	mcr	p15, 0, r0, c2, c0, 0	/* Set TTB */
223	mcr	p15, 0, r0, c8, c7, 0	/* Flush TLB */
224
225#if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_CORTEXA) || defined(CPU_MV_PJ4B) || defined(CPU_KRAIT)
226	mov	r0, #0
227	mcr	p15, 0, r0, c13, c0, 1	/* Set ASID to 0 */
228#endif
229
230	/* Set the Domain Access register.  Very important! */
231	mov     r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
232	mcr	p15, 0, r0, c3, c0, 0
233	/*
234	 * Enable MMU.
235	 * On armv6 enable extended page tables, and set alignment checking
236	 * to modulo-4 (CPU_CONTROL_UNAL_ENABLE) for the ldrd/strd
237	 * instructions emitted by clang.
238	 */
239	mrc	p15, 0, r0, c1, c0, 0
240#ifdef _ARM_ARCH_6
241	orr	r0, r0, #(CPU_CONTROL_V6_EXTPAGE | CPU_CONTROL_UNAL_ENABLE)
242	orr	r0, r0, #(CPU_CONTROL_AFLT_ENABLE)
243	orr	r0, r0, #(CPU_CONTROL_AF_ENABLE)
244#endif
245	orr	r0, r0, #(CPU_CONTROL_MMU_ENABLE)
246	mcr	p15, 0, r0, c1, c0, 0
247	nop
248	nop
249	nop
250	CPWAIT(r0)
251
252mmu_done:
253	nop
254	adr	r1, .Lstart
255	ldmia	r1, {r1, r2, sp}	/* Set initial stack and */
256	sub	r2, r2, r1		/* get zero init data */
257	mov	r3, #0
258.L1:
259	str	r3, [r1], #0x0004	/* get zero init data */
260	subs	r2, r2, #4
261	bgt	.L1
262	ldr	pc, .Lvirt_done
263
264virt_done:
265	mov	r1, #28			/* loader info size is 28 bytes also second arg */
266	subs	sp, sp, r1		/* allocate arm_boot_params struct on stack */
267	mov	r0, sp			/* loader info pointer is first arg */
268	bic	sp, sp, #7		/* align stack to 8 bytes */
269	str	r1, [r0]		/* Store length of loader info */
270	str	r9, [r0, #4]		/* Store r0 from boot loader */
271	str	r8, [r0, #8]		/* Store r1 from boot loader */
272	str	ip, [r0, #12]		/* store r2 from boot loader */
273	str	fp, [r0, #16]		/* store r3 from boot loader */
274	str	r5, [r0, #20]		/* store the physical address */
275	adr	r4, Lpagetable		/* load the pagetable address */
276	ldr	r5, [r4, #4]
277	str	r5, [r0, #24]		/* store the pagetable address */
278	mov	fp, #0			/* trace back starts here */
279	bl	_C_LABEL(initarm)	/* Off we go */
280
281	/* init arm will return the new stack pointer. */
282	mov	sp, r0
283
284	bl	_C_LABEL(mi_startup)		/* call mi_startup()! */
285
286	adr	r0, .Lmainreturned
287	b	_C_LABEL(panic)
288	/* NOTREACHED */
289END(_start)
290
291/*
292 * Builds the page table
293 * r0 - The table base address
294 * r1 - The physical address (trashed)
295 * r2 - The virtual address (trashed)
296 * r3 - The number of 1MiB sections
297 * r4 - Trashed
298 *
299 * Addresses must be 1MiB aligned
300 */
301build_pagetables:
302	/* Set the required page attributed */
303	ldr	r4, =(L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
304#if defined(SMP)
305	orr	r4, #(L1_SHARED)
306#endif
307	orr	r1, r4
308
309	/* Move the virtual address to the correct bit location */
310	lsr	r2, #(L1_S_SHIFT - 2)
311
312	mov	r4, r3
3131:
314	str	r1, [r0, r2]
315	add	r2, r2, #4
316	add	r1, r1, #(L1_S_SIZE)
317	adds	r4, r4, #-1
318	bhi	1b
319
320	RET
321
322Lpagetable:
323	.word	.
324	.word	pagetable
325
326Lvirtaddr:
327	.word	KERNVIRTADDR
328Lphysaddr:
329	.word	KERNPHYSADDR
330Lreal_start:
331	.word	_start
332Lend:
333	.word	_edata
334
335.Lstart:
336	.word	_edata
337	.word	_ebss
338	.word	svcstk + INIT_ARM_STACK_SIZE
339
340.Lvirt_done:
341	.word	virt_done
342
343.Lmainreturned:
344	.asciz	"main() returned"
345	.align	0
346
347	.bss
348svcstk:
349	.space	INIT_ARM_STACK_SIZE
350
351/*
352 * Memory for the initial pagetable. We are unable to place this in
353 * the bss as this will be cleared after the table is loaded.
354 */
355	.section ".init_pagetable"
356	.align	14 /* 16KiB aligned */
357pagetable:
358	.space	L1_TABLE_SIZE
359
360	.text
361	.align	0
362
363.Lcpufuncs:
364	.word	_C_LABEL(cpufuncs)
365
366#if defined(SMP)
367
368.Lmpvirt_done:
369	.word	mpvirt_done
370Lstartup_pagetable_secondary:
371	.word	temp_pagetable
372
373ASENTRY_NP(mpentry)
374
375	/* Make sure interrupts are disabled. */
376	mrs	r7, cpsr
377	orr	r7, r7, #(I32_bit|F32_bit)
378	msr	cpsr_c, r7
379
380	/* Disable MMU.  It should be disabled already, but make sure. */
381	mrc	p15, 0, r2, c1, c0, 0
382	bic	r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
383	    CPU_CONTROL_WBUF_ENABLE)
384	bic	r2, r2, #(CPU_CONTROL_IC_ENABLE)
385	bic	r2, r2, #(CPU_CONTROL_BPRD_ENABLE)
386	mcr	p15, 0, r2, c1, c0, 0
387	nop
388	nop
389	nop
390	CPWAIT(r0)
391
392#if defined(ARM_MMU_V6)
393	bl	armv6_idcache_inv_all	/* Modifies r0 only */
394#elif defined(ARM_MMU_V7)
395	bl	armv7_idcache_inv_all	/* Modifies r0-r3, ip */
396#endif
397
398	ldr	r0, Lstartup_pagetable_secondary
399	bic	r0, r0, #0xf0000000
400	orr	r0, r0, #PHYSADDR
401	ldr	r0, [r0]
402	orr 	r0, r0, #2		/* Set TTB shared memory flag */
403	mcr	p15, 0, r0, c2, c0, 0	/* Set TTB */
404	mcr	p15, 0, r0, c8, c7, 0	/* Flush TLB */
405
406	mov	r0, #0
407	mcr	p15, 0, r0, c13, c0, 1	/* Set ASID to 0 */
408
409	/* Set the Domain Access register.  Very important! */
410	mov	r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
411	mcr	p15, 0, r0, c3, c0, 0
412	/* Enable MMU */
413	mrc	p15, 0, r0, c1, c0, 0
414	orr	r0, r0, #CPU_CONTROL_V6_EXTPAGE
415	orr	r0, r0, #CPU_CONTROL_AF_ENABLE
416	orr	r0, r0, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
417	    CPU_CONTROL_WBUF_ENABLE)
418	orr	r0, r0, #(CPU_CONTROL_IC_ENABLE)
419	orr	r0, r0, #(CPU_CONTROL_BPRD_ENABLE)
420	mcr	p15, 0, r0, c1, c0, 0
421	nop
422	nop
423	nop
424	CPWAIT(r0)
425
426	adr	r1, .Lstart
427	ldmia	r1, {r1, r2, sp}	/* Set initial stack and */
428	mrc	p15, 0, r0, c0, c0, 5
429	and	r0, r0, #15
430	mov	r1, #2048
431	mul	r2, r1, r0
432	sub	sp, sp, r2
433	str	r1, [sp]
434	ldr	pc, .Lmpvirt_done
435
436mpvirt_done:
437
438	mov	fp, #0			/* trace back starts here */
439	bl	_C_LABEL(init_secondary)	/* Off we go */
440
441	adr	r0, .Lmpreturned
442	b	_C_LABEL(panic)
443	/* NOTREACHED */
444
445.Lmpreturned:
446	.asciz	"init_secondary() returned"
447	.align	0
448END(mpentry)
449#endif
450
451ENTRY_NP(cpu_halt)
452	mrs     r2, cpsr
453	bic	r2, r2, #(PSR_MODE)
454	orr     r2, r2, #(PSR_SVC32_MODE)
455	orr	r2, r2, #(I32_bit | F32_bit)
456	msr     cpsr_fsxc, r2
457
458	ldr	r4, .Lcpu_reset_address
459	ldr	r4, [r4]
460
461	ldr	r0, .Lcpufuncs
462	mov	lr, pc
463	ldr	pc, [r0, #CF_IDCACHE_WBINV_ALL]
464	mov	lr, pc
465	ldr	pc, [r0, #CF_L2CACHE_WBINV_ALL]
466
467	/*
468	 * Load the cpu_reset_needs_v4_MMU_disable flag to determine if it's
469	 * necessary.
470	 */
471
472	ldr	r1, .Lcpu_reset_needs_v4_MMU_disable
473	ldr	r1, [r1]
474	cmp	r1, #0
475	mov	r2, #0
476
477	/*
478	 * MMU & IDC off, 32 bit program & data space
479	 * Hurl ourselves into the ROM
480	 */
481	mov	r0, #(CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE)
482	mcr     15, 0, r0, c1, c0, 0
483	mcrne   15, 0, r2, c8, c7, 0 	/* nail I+D TLB on ARMv4 and greater */
484	mov     pc, r4
485
486	/*
487	 * _cpu_reset_address contains the address to branch to, to complete
488	 * the cpu reset after turning the MMU off
489	 * This variable is provided by the hardware specific code
490	 */
491.Lcpu_reset_address:
492	.word	_C_LABEL(cpu_reset_address)
493
494	/*
495	 * cpu_reset_needs_v4_MMU_disable contains a flag that signals if the
496	 * v4 MMU disable instruction needs executing... it is an illegal instruction
497	 * on f.e. ARM6/7 that locks up the computer in an endless illegal
498	 * instruction / data-abort / reset loop.
499	 */
500.Lcpu_reset_needs_v4_MMU_disable:
501	.word	_C_LABEL(cpu_reset_needs_v4_MMU_disable)
502END(cpu_halt)
503
504
505/*
506 * setjump + longjmp
507 */
508ENTRY(setjmp)
509	stmia	r0, {r4-r14}
510	mov	r0, #0x00000000
511	RET
512END(setjmp)
513
514ENTRY(longjmp)
515	ldmia	r0, {r4-r14}
516	mov	r0, #0x00000001
517	RET
518END(longjmp)
519
520	.data
521	.global _C_LABEL(esym)
522_C_LABEL(esym):	.word	_C_LABEL(end)
523
524ENTRY_NP(abort)
525	b	_C_LABEL(abort)
526END(abort)
527
528ENTRY_NP(sigcode)
529	mov	r0, sp
530	add	r0, r0, #SIGF_UC
531
532	/*
533	 * Call the sigreturn system call.
534	 *
535	 * We have to load r7 manually rather than using
536	 * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is
537	 * correct. Using the alternative places esigcode at the address
538	 * of the data rather than the address one past the data.
539	 */
540
541	ldr	r7, [pc, #12]	/* Load SYS_sigreturn */
542	swi	SYS_sigreturn
543
544	/* Well if that failed we better exit quick ! */
545
546	ldr	r7, [pc, #8]	/* Load SYS_exit */
547	swi	SYS_exit
548
549	/* Branch back to retry SYS_sigreturn */
550	b	. - 16
551END(sigcode)
552	.word	SYS_sigreturn
553	.word	SYS_exit
554
555	.align	0
556	.global _C_LABEL(esigcode)
557		_C_LABEL(esigcode):
558
559	.data
560	.global szsigcode
561szsigcode:
562	.long esigcode-sigcode
563
564/* End of locore.S */
565