xref: /freebsd/sys/arm/arm/locore.S (revision 0572ccaa4543b0abef8ef81e384c1d04de9f3da1)
1/*	$NetBSD: locore.S,v 1.14 2003/04/20 16:21:40 thorpej Exp $	*/
2
3/*-
4 * Copyright 2011 Semihalf
5 * Copyright (C) 1994-1997 Mark Brinicombe
6 * Copyright (C) 1994 Brini
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by Brini.
20 * 4. The name of Brini may not be used to endorse or promote products
21 *    derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36#include "assym.s"
37#include <sys/syscall.h>
38#include <machine/asm.h>
39#include <machine/armreg.h>
40#include <machine/pte.h>
41
42__FBSDID("$FreeBSD$");
43
44/* What size should this really be ? It is only used by initarm() */
45#define INIT_ARM_STACK_SIZE	(2048 * 4)
46
47#define	CPWAIT_BRANCH							 \
48	sub	pc, pc, #4
49
50#define	CPWAIT(tmp)							 \
51	mrc	p15, 0, tmp, c2, c0, 0	/* arbitrary read of CP15 */	;\
52	mov	tmp, tmp		/* wait for it to complete */	;\
53	CPWAIT_BRANCH			/* branch to next insn */
54
55/*
56 * This is for kvm_mkdb, and should be the address of the beginning
57 * of the kernel text segment (not necessarily the same as kernbase).
58 */
59	.text
60	.align	0
61.globl kernbase
62.set kernbase,KERNBASE
63.globl physaddr
64.set physaddr,PHYSADDR
65
66/*
67 * On entry for FreeBSD boot ABI:
68 *	r0 - metadata pointer or 0 (boothowto on AT91's boot2)
69 *	r1 - if (r0 == 0) then metadata pointer
70 * On entry for Linux boot ABI:
71 *	r0 - 0
72 *	r1 - machine type (passed as arg2 to initarm)
73 *	r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm)
74 *
75 * For both types of boot we gather up the args, put them in a struct arm_boot_params
76 * structure and pass that to initarm.
77 */
78ENTRY_NP(btext)
79ASENTRY_NP(_start)
80	STOP_UNWINDING		/* Can't unwind into the bootloader! */
81
82	mov	r9, r0		/* 0 or boot mode from boot2 */
83	mov	r8, r1		/* Save Machine type */
84	mov	ip, r2		/* Save meta data */
85	mov	fp, r3		/* Future expansion */
86
87	/* Make sure interrupts are disabled. */
88	mrs	r7, cpsr
89	orr	r7, r7, #(I32_bit|F32_bit)
90	msr	cpsr_c, r7
91
92#if defined (FLASHADDR) && defined(LOADERRAMADDR)
93	/* Check if we're running from flash. */
94	ldr	r7, =FLASHADDR
95	/*
96	 * If we're running with MMU disabled, test against the
97	 * physical address instead.
98	 */
99	mrc     p15, 0, r2, c1, c0, 0
100	ands	r2, r2, #CPU_CONTROL_MMU_ENABLE
101	ldreq	r6, =PHYSADDR
102	ldrne	r6, =LOADERRAMADDR
103	cmp	r7, r6
104	bls 	flash_lower
105	cmp	r7, pc
106	bhi	from_ram
107	b	do_copy
108
109flash_lower:
110	cmp	r6, pc
111	bls	from_ram
112do_copy:
113	ldr	r7, =KERNBASE
114	adr	r1, _start
115	ldr	r0, Lreal_start
116	ldr	r2, Lend
117	sub	r2, r2, r0
118	sub	r0, r0, r7
119	add	r0, r0, r6
120	mov	r4, r0
121	bl	memcpy
122	ldr	r0, Lram_offset
123	add	pc, r4, r0
124Lram_offset:	.word from_ram-_C_LABEL(_start)
125from_ram:
126	nop
127#endif
128	adr	r7, Lunmapped
129	bic     r7, r7, #0xf0000000
130	orr     r7, r7, #PHYSADDR
131
132
133disable_mmu:
134	/* Disable MMU for a while */
135	mrc     p15, 0, r2, c1, c0, 0
136	bic	r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
137	    CPU_CONTROL_WBUF_ENABLE)
138	bic	r2, r2, #(CPU_CONTROL_IC_ENABLE)
139	bic	r2, r2, #(CPU_CONTROL_BPRD_ENABLE)
140	mcr     p15, 0, r2, c1, c0, 0
141
142	nop
143	nop
144	nop
145	mov	pc, r7
146Lunmapped:
147	/*
148	 * Build page table from scratch.
149	 */
150
151	/* Find the delta between VA and PA */
152	adr	r0, Lpagetable
153	ldr	r1, [r0]
154	sub	r2, r1, r0
155	/* At this point: r2 = VA - PA */
156
157	/*
158	 * Find the physical address of the table. After these two
159	 * instructions:
160	 * r1 = va(pagetable)
161	 *
162	 * r0 = va(pagetable) - (VA - PA)
163	 *    = va(pagetable) - VA + PA
164	 *    = pa(pagetable)
165	 */
166	ldr	r1, [r0, #4]
167	sub	r0, r1, r2
168
169	/*
170	 * Map PA == VA
171	 */
172	/* Find the start kernels load address */
173	adr	r5, _start
174	ldr	r2, =(L1_S_OFFSET)
175	bic	r5, r2
176	mov	r1, r5
177	mov	r2, r5
178	/* Map 64MiB, preserved over calls to build_pagetables */
179	mov	r3, #64
180	bl	build_pagetables
181
182	/* Create the kernel map to jump to */
183	mov	r1, r5
184	ldr	r2, =(KERNVIRTADDR)
185	bl	build_pagetables
186
187#if defined(SOCDEV_PA) && defined(SOCDEV_VA)
188	/* Create the custom map */
189	ldr	r1, =SOCDEV_PA
190	ldr	r2, =SOCDEV_VA
191	bl	build_pagetables
192#endif
193
194#if defined(SMP)
195	orr 	r0, r0, #2		/* Set TTB shared memory flag */
196#endif
197	mcr	p15, 0, r0, c2, c0, 0	/* Set TTB */
198	mcr	p15, 0, r0, c8, c7, 0	/* Flush TLB */
199
200#if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_CORTEXA) || defined(CPU_MV_PJ4B) || defined(CPU_KRAIT)
201	mov	r0, #0
202	mcr	p15, 0, r0, c13, c0, 1	/* Set ASID to 0 */
203#endif
204
205	/* Set the Domain Access register.  Very important! */
206	mov     r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
207	mcr	p15, 0, r0, c3, c0, 0
208	/*
209	 * Enable MMU.
210	 * On armv6 enable extended page tables, and set alignment checking
211	 * to modulo-4 (CPU_CONTROL_UNAL_ENABLE) for the ldrd/strd
212	 * instructions emitted by clang.
213	 */
214	mrc	p15, 0, r0, c1, c0, 0
215#ifdef _ARM_ARCH_6
216	orr	r0, r0, #(CPU_CONTROL_V6_EXTPAGE | CPU_CONTROL_UNAL_ENABLE)
217	orr	r0, r0, #(CPU_CONTROL_AFLT_ENABLE)
218	orr	r0, r0, #(CPU_CONTROL_AF_ENABLE)
219#endif
220	orr	r0, r0, #(CPU_CONTROL_MMU_ENABLE)
221	mcr	p15, 0, r0, c1, c0, 0
222	nop
223	nop
224	nop
225	CPWAIT(r0)
226
227mmu_done:
228	nop
229	adr	r1, .Lstart
230	ldmia	r1, {r1, r2, sp}	/* Set initial stack and */
231	sub	r2, r2, r1		/* get zero init data */
232	mov	r3, #0
233.L1:
234	str	r3, [r1], #0x0004	/* get zero init data */
235	subs	r2, r2, #4
236	bgt	.L1
237	ldr	pc, .Lvirt_done
238
239virt_done:
240	mov	r1, #28			/* loader info size is 28 bytes also second arg */
241	subs	sp, sp, r1		/* allocate arm_boot_params struct on stack */
242	mov	r0, sp			/* loader info pointer is first arg */
243	bic	sp, sp, #7		/* align stack to 8 bytes */
244	str	r1, [r0]		/* Store length of loader info */
245	str	r9, [r0, #4]		/* Store r0 from boot loader */
246	str	r8, [r0, #8]		/* Store r1 from boot loader */
247	str	ip, [r0, #12]		/* store r2 from boot loader */
248	str	fp, [r0, #16]		/* store r3 from boot loader */
249	str	r5, [r0, #20]		/* store the physical address */
250	adr	r4, Lpagetable		/* load the pagetable address */
251	ldr	r5, [r4, #4]
252	str	r5, [r0, #24]		/* store the pagetable address */
253	mov	fp, #0			/* trace back starts here */
254	bl	_C_LABEL(initarm)	/* Off we go */
255
256	/* init arm will return the new stack pointer. */
257	mov	sp, r0
258
259	bl	_C_LABEL(mi_startup)		/* call mi_startup()! */
260
261	adr	r0, .Lmainreturned
262	b	_C_LABEL(panic)
263	/* NOTREACHED */
264END(btext)
265END(_start)
266
267/*
268 * Builds the page table
269 * r0 - The table base address
270 * r1 - The physical address (trashed)
271 * r2 - The virtual address (trashed)
272 * r3 - The number of 1MiB sections
273 * r4 - Trashed
274 *
275 * Addresses must be 1MiB aligned
276 */
277build_pagetables:
278	/* Set the required page attributed */
279	ldr	r4, =(L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
280#if defined(SMP)
281	orr	r4, #(L1_SHARED)
282#endif
283	orr	r1, r4
284
285	/* Move the virtual address to the correct bit location */
286	lsr	r2, #(L1_S_SHIFT - 2)
287
288	mov	r4, r3
2891:
290	str	r1, [r0, r2]
291	add	r2, r2, #4
292	add	r1, r1, #(L1_S_SIZE)
293	adds	r4, r4, #-1
294	bhi	1b
295
296	RET
297
298Lpagetable:
299	.word	.
300	.word	pagetable
301
302Lvirtaddr:
303	.word	KERNVIRTADDR
304Lphysaddr:
305	.word	KERNPHYSADDR
306Lreal_start:
307	.word	_start
308Lend:
309	.word	_edata
310
311.Lstart:
312	.word	_edata
313	.word	_ebss
314	.word	svcstk + INIT_ARM_STACK_SIZE
315
316.Lvirt_done:
317	.word	virt_done
318
319.Lmainreturned:
320	.asciz	"main() returned"
321	.align	0
322
323	.bss
324svcstk:
325	.space	INIT_ARM_STACK_SIZE
326
327/*
328 * Memory for the initial pagetable. We are unable to place this in
329 * the bss as this will be cleared after the table is loaded.
330 */
331	.section ".init_pagetable"
332	.align	14 /* 16KiB aligned */
333pagetable:
334	.space	L1_TABLE_SIZE
335
336	.text
337	.align	0
338
339.Lcpufuncs:
340	.word	_C_LABEL(cpufuncs)
341
342#if defined(SMP)
343
344.Lmpvirt_done:
345	.word	mpvirt_done
346Lstartup_pagetable_secondary:
347	.word	temp_pagetable
348
349ASENTRY_NP(mpentry)
350
351	/* Make sure interrupts are disabled. */
352	mrs	r7, cpsr
353	orr	r7, r7, #(I32_bit|F32_bit)
354	msr	cpsr_c, r7
355
356	/* Disable MMU.  It should be disabled already, but make sure. */
357	mrc	p15, 0, r2, c1, c0, 0
358	bic	r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
359	    CPU_CONTROL_WBUF_ENABLE)
360	bic	r2, r2, #(CPU_CONTROL_IC_ENABLE)
361	bic	r2, r2, #(CPU_CONTROL_BPRD_ENABLE)
362	mcr	p15, 0, r2, c1, c0, 0
363	nop
364	nop
365	nop
366	CPWAIT(r0)
367
368#if defined(ARM_MMU_V6)
369	bl	armv6_idcache_inv_all	/* Modifies r0 only */
370#elif defined(ARM_MMU_V7)
371	bl	armv7_idcache_inv_all	/* Modifies r0-r3, ip */
372#endif
373
374	ldr	r0, Lstartup_pagetable_secondary
375	bic	r0, r0, #0xf0000000
376	orr	r0, r0, #PHYSADDR
377	ldr	r0, [r0]
378	orr 	r0, r0, #2		/* Set TTB shared memory flag */
379	mcr	p15, 0, r0, c2, c0, 0	/* Set TTB */
380	mcr	p15, 0, r0, c8, c7, 0	/* Flush TLB */
381
382	mov	r0, #0
383	mcr	p15, 0, r0, c13, c0, 1	/* Set ASID to 0 */
384
385	/* Set the Domain Access register.  Very important! */
386	mov	r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
387	mcr	p15, 0, r0, c3, c0, 0
388	/* Enable MMU */
389	mrc	p15, 0, r0, c1, c0, 0
390	orr	r0, r0, #CPU_CONTROL_V6_EXTPAGE
391	orr	r0, r0, #CPU_CONTROL_AF_ENABLE
392	orr	r0, r0, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
393	    CPU_CONTROL_WBUF_ENABLE)
394	orr	r0, r0, #(CPU_CONTROL_IC_ENABLE)
395	orr	r0, r0, #(CPU_CONTROL_BPRD_ENABLE)
396	mcr	p15, 0, r0, c1, c0, 0
397	nop
398	nop
399	nop
400	CPWAIT(r0)
401
402	adr	r1, .Lstart
403	ldmia	r1, {r1, r2, sp}	/* Set initial stack and */
404	mrc	p15, 0, r0, c0, c0, 5
405	and	r0, r0, #15
406	mov	r1, #2048
407	mul	r2, r1, r0
408	sub	sp, sp, r2
409	str	r1, [sp]
410	ldr	pc, .Lmpvirt_done
411
412mpvirt_done:
413
414	mov	fp, #0			/* trace back starts here */
415	bl	_C_LABEL(init_secondary)	/* Off we go */
416
417	adr	r0, .Lmpreturned
418	b	_C_LABEL(panic)
419	/* NOTREACHED */
420
421.Lmpreturned:
422	.asciz	"init_secondary() returned"
423	.align	0
424END(mpentry)
425#endif
426
427ENTRY_NP(cpu_halt)
428	mrs     r2, cpsr
429	bic	r2, r2, #(PSR_MODE)
430	orr     r2, r2, #(PSR_SVC32_MODE)
431	orr	r2, r2, #(I32_bit | F32_bit)
432	msr     cpsr_fsxc, r2
433
434	ldr	r4, .Lcpu_reset_address
435	ldr	r4, [r4]
436
437	ldr	r0, .Lcpufuncs
438	mov	lr, pc
439	ldr	pc, [r0, #CF_IDCACHE_WBINV_ALL]
440	mov	lr, pc
441	ldr	pc, [r0, #CF_L2CACHE_WBINV_ALL]
442
443	/*
444	 * Load the cpu_reset_needs_v4_MMU_disable flag to determine if it's
445	 * necessary.
446	 */
447
448	ldr	r1, .Lcpu_reset_needs_v4_MMU_disable
449	ldr	r1, [r1]
450	cmp	r1, #0
451	mov	r2, #0
452
453	/*
454	 * MMU & IDC off, 32 bit program & data space
455	 * Hurl ourselves into the ROM
456	 */
457	mov	r0, #(CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE)
458	mcr     15, 0, r0, c1, c0, 0
459	mcrne   15, 0, r2, c8, c7, 0 	/* nail I+D TLB on ARMv4 and greater */
460	mov     pc, r4
461
462	/*
463	 * _cpu_reset_address contains the address to branch to, to complete
464	 * the cpu reset after turning the MMU off
465	 * This variable is provided by the hardware specific code
466	 */
467.Lcpu_reset_address:
468	.word	_C_LABEL(cpu_reset_address)
469
470	/*
471	 * cpu_reset_needs_v4_MMU_disable contains a flag that signals if the
472	 * v4 MMU disable instruction needs executing... it is an illegal instruction
473	 * on f.e. ARM6/7 that locks up the computer in an endless illegal
474	 * instruction / data-abort / reset loop.
475	 */
476.Lcpu_reset_needs_v4_MMU_disable:
477	.word	_C_LABEL(cpu_reset_needs_v4_MMU_disable)
478END(cpu_halt)
479
480
481/*
482 * setjump + longjmp
483 */
484ENTRY(setjmp)
485	stmia	r0, {r4-r14}
486	mov	r0, #0x00000000
487	RET
488END(setjmp)
489
490ENTRY(longjmp)
491	ldmia	r0, {r4-r14}
492	mov	r0, #0x00000001
493	RET
494END(longjmp)
495
496	.data
497	.global _C_LABEL(esym)
498_C_LABEL(esym):	.word	_C_LABEL(end)
499
500ENTRY_NP(abort)
501	b	_C_LABEL(abort)
502END(abort)
503
504ENTRY_NP(sigcode)
505	mov	r0, sp
506	add	r0, r0, #SIGF_UC
507
508	/*
509	 * Call the sigreturn system call.
510	 *
511	 * We have to load r7 manually rather than using
512	 * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is
513	 * correct. Using the alternative places esigcode at the address
514	 * of the data rather than the address one past the data.
515	 */
516
517	ldr	r7, [pc, #12]	/* Load SYS_sigreturn */
518	swi	SYS_sigreturn
519
520	/* Well if that failed we better exit quick ! */
521
522	ldr	r7, [pc, #8]	/* Load SYS_exit */
523	swi	SYS_exit
524
525	/* Branch back to retry SYS_sigreturn */
526	b	. - 16
527
528	.word	SYS_sigreturn
529	.word	SYS_exit
530
531	.align	0
532	.global _C_LABEL(esigcode)
533		_C_LABEL(esigcode):
534
535	.data
536	.global szsigcode
537szsigcode:
538	.long esigcode-sigcode
539END(sigcode)
540/* End of locore.S */
541