xref: /freebsd/sys/arm/arm/locore.S (revision 57718be8fa0bd5edc11ab9a72e68cc71982939a6)
1/*	$NetBSD: locore.S,v 1.14 2003/04/20 16:21:40 thorpej Exp $	*/
2
3/*-
4 * Copyright 2011 Semihalf
5 * Copyright (C) 1994-1997 Mark Brinicombe
6 * Copyright (C) 1994 Brini
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by Brini.
20 * 4. The name of Brini may not be used to endorse or promote products
21 *    derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36#include "assym.s"
37#include <sys/syscall.h>
38#include <machine/asm.h>
39#include <machine/armreg.h>
40#include <machine/cpuconf.h>
41#include <machine/pte.h>
42
43__FBSDID("$FreeBSD$");
44
45/* What size should this really be ? It is only used by initarm() */
46#define INIT_ARM_STACK_SIZE	(2048 * 4)
47
48#define	CPWAIT_BRANCH							 \
49	sub	pc, pc, #4
50
51#define	CPWAIT(tmp)							 \
52	mrc	p15, 0, tmp, c2, c0, 0	/* arbitrary read of CP15 */	;\
53	mov	tmp, tmp		/* wait for it to complete */	;\
54	CPWAIT_BRANCH			/* branch to next insn */
55
56/*
57 * This is for kvm_mkdb, and should be the address of the beginning
58 * of the kernel text segment (not necessarily the same as kernbase).
59 */
60	.text
61	.align	0
62.globl kernbase
63.set kernbase,KERNBASE
64.globl physaddr
65.set physaddr,PHYSADDR
66
67/*
68 * On entry for FreeBSD boot ABI:
69 *	r0 - metadata pointer or 0 (boothowto on AT91's boot2)
70 *	r1 - if (r0 == 0) then metadata pointer
71 * On entry for Linux boot ABI:
72 *	r0 - 0
73 *	r1 - machine type (passed as arg2 to initarm)
74 *	r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm)
75 *
76 * For both types of boot we gather up the args, put them in a struct arm_boot_params
77 * structure and pass that to initarm.
78 */
79	.globl	btext
80btext:
81ASENTRY_NP(_start)
82	STOP_UNWINDING		/* Can't unwind into the bootloader! */
83
84	mov	r9, r0		/* 0 or boot mode from boot2 */
85	mov	r8, r1		/* Save Machine type */
86	mov	ip, r2		/* Save meta data */
87	mov	fp, r3		/* Future expansion */
88
89	/* Make sure interrupts are disabled. */
90	mrs	r7, cpsr
91	orr	r7, r7, #(PSR_I | PSR_F)
92	msr	cpsr_c, r7
93
94#if defined (FLASHADDR) && defined(LOADERRAMADDR)
95	/* Check if we're running from flash. */
96	ldr	r7, =FLASHADDR
97	/*
98	 * If we're running with MMU disabled, test against the
99	 * physical address instead.
100	 */
101	mrc     p15, 0, r2, c1, c0, 0
102	ands	r2, r2, #CPU_CONTROL_MMU_ENABLE
103	ldreq	r6, =PHYSADDR
104	ldrne	r6, =LOADERRAMADDR
105	cmp	r7, r6
106	bls 	flash_lower
107	cmp	r7, pc
108	bhi	from_ram
109	b	do_copy
110
111flash_lower:
112	cmp	r6, pc
113	bls	from_ram
114do_copy:
115	ldr	r7, =KERNBASE
116	adr	r1, _start
117	ldr	r0, Lreal_start
118	ldr	r2, Lend
119	sub	r2, r2, r0
120	sub	r0, r0, r7
121	add	r0, r0, r6
122	mov	r4, r0
123	bl	memcpy
124	ldr	r0, Lram_offset
125	add	pc, r4, r0
126Lram_offset:	.word from_ram-_C_LABEL(_start)
127from_ram:
128	nop
129#endif
130
131disable_mmu:
132	/* Disable MMU for a while */
133	mrc     p15, 0, r2, c1, c0, 0
134	bic	r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
135	    CPU_CONTROL_WBUF_ENABLE)
136	bic	r2, r2, #(CPU_CONTROL_IC_ENABLE)
137	bic	r2, r2, #(CPU_CONTROL_BPRD_ENABLE)
138	mcr     p15, 0, r2, c1, c0, 0
139
140	nop
141	nop
142	nop
143	CPWAIT(r0)
144
145Lunmapped:
146	/*
147	 * Build page table from scratch.
148	 */
149
150	/* Find the delta between VA and PA */
151	adr	r0, Lpagetable
152	bl	translate_va_to_pa
153
154#ifndef _ARM_ARCH_6
155	/*
156	 * Some of the older ports (the various XScale, mostly) assume
157	 * that the memory before the kernel is mapped, and use it for
158	 * the various stacks, page tables, etc. For those CPUs, map the
159	 * 64 first MB of RAM, as it used to be.
160	 */
161	/*
162	 * Map PA == VA
163	 */
164	ldr     r5, =PHYSADDR
165	mov     r1, r5
166	mov     r2, r5
167	/* Map 64MiB, preserved over calls to build_pagetables */
168	mov     r3, #64
169	bl      build_pagetables
170
171	/* Create the kernel map to jump to */
172	mov     r1, r5
173	ldr     r2, =(KERNBASE)
174	bl      build_pagetables
175	ldr	r5, =(KERNPHYSADDR)
176#else
177	/*
178	 * Map PA == VA
179	 */
180	/* Find the start kernels load address */
181	adr	r5, _start
182	ldr	r2, =(L1_S_OFFSET)
183	bic	r5, r2
184	mov	r1, r5
185	mov	r2, r5
186	/* Map 64MiB, preserved over calls to build_pagetables */
187	mov	r3, #64
188	bl	build_pagetables
189
190	/* Create the kernel map to jump to */
191	mov	r1, r5
192	ldr	r2, =(KERNVIRTADDR)
193	bl	build_pagetables
194#endif
195
196#if defined(SOCDEV_PA) && defined(SOCDEV_VA)
197	/* Create the custom map */
198	ldr	r1, =SOCDEV_PA
199	ldr	r2, =SOCDEV_VA
200	bl	build_pagetables
201#endif
202
203#if defined(SMP)
204	orr 	r0, r0, #2		/* Set TTB shared memory flag */
205#endif
206	mcr	p15, 0, r0, c2, c0, 0	/* Set TTB */
207	mcr	p15, 0, r0, c8, c7, 0	/* Flush TLB */
208
209#if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_CORTEXA) || defined(CPU_MV_PJ4B) || defined(CPU_KRAIT)
210	mov	r0, #0
211	mcr	p15, 0, r0, c13, c0, 1	/* Set ASID to 0 */
212#endif
213
214	/* Set the Domain Access register.  Very important! */
215	mov     r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
216	mcr	p15, 0, r0, c3, c0, 0
217	/*
218	 * Enable MMU.
219	 * On armv6 enable extended page tables, and set alignment checking
220	 * to modulo-4 (CPU_CONTROL_UNAL_ENABLE) for the ldrd/strd
221	 * instructions emitted by clang.
222	 */
223	mrc	p15, 0, r0, c1, c0, 0
224#ifdef _ARM_ARCH_6
225	orr	r0, r0, #(CPU_CONTROL_V6_EXTPAGE | CPU_CONTROL_UNAL_ENABLE)
226	orr	r0, r0, #(CPU_CONTROL_AFLT_ENABLE)
227	orr	r0, r0, #(CPU_CONTROL_AF_ENABLE)
228#endif
229	orr	r0, r0, #(CPU_CONTROL_MMU_ENABLE)
230	mcr	p15, 0, r0, c1, c0, 0
231	nop
232	nop
233	nop
234	CPWAIT(r0)
235
236mmu_done:
237	nop
238	adr	r1, .Lstart
239	ldmia	r1, {r1, r2, sp}	/* Set initial stack and */
240	sub	r2, r2, r1		/* get zero init data */
241	mov	r3, #0
242.L1:
243	str	r3, [r1], #0x0004	/* get zero init data */
244	subs	r2, r2, #4
245	bgt	.L1
246	ldr	pc, .Lvirt_done
247
248virt_done:
249	mov	r1, #28			/* loader info size is 28 bytes also second arg */
250	subs	sp, sp, r1		/* allocate arm_boot_params struct on stack */
251	mov	r0, sp			/* loader info pointer is first arg */
252	bic	sp, sp, #7		/* align stack to 8 bytes */
253	str	r1, [r0]		/* Store length of loader info */
254	str	r9, [r0, #4]		/* Store r0 from boot loader */
255	str	r8, [r0, #8]		/* Store r1 from boot loader */
256	str	ip, [r0, #12]		/* store r2 from boot loader */
257	str	fp, [r0, #16]		/* store r3 from boot loader */
258	str	r5, [r0, #20]		/* store the physical address */
259	adr	r4, Lpagetable		/* load the pagetable address */
260	ldr	r5, [r4, #4]
261	str	r5, [r0, #24]		/* store the pagetable address */
262	mov	fp, #0			/* trace back starts here */
263	bl	_C_LABEL(initarm)	/* Off we go */
264
265	/* init arm will return the new stack pointer. */
266	mov	sp, r0
267
268	bl	_C_LABEL(mi_startup)		/* call mi_startup()! */
269
270	adr	r0, .Lmainreturned
271	b	_C_LABEL(panic)
272	/* NOTREACHED */
273END(_start)
274
275#define VA_TO_PA_POINTER(name, table)	 \
276name:					;\
277	.word	.			;\
278	.word	table
279
280/*
281 * Returns the physical address of a magic va to pa pointer.
282 * r0     - The pagetable data pointer. This must be built using the
283 *          VA_TO_PA_POINTER macro.
284 *          e.g.
285 *            VA_TO_PA_POINTER(Lpagetable, pagetable)
286 *            ...
287 *            adr  r0, Lpagetable
288 *            bl   translate_va_to_pa
289 *            r0 will now contain the physical address of pagetable
290 * r1, r2 - Trashed
291 */
292translate_va_to_pa:
293	ldr	r1, [r0]
294	sub	r2, r1, r0
295	/* At this point: r2 = VA - PA */
296
297	/*
298	 * Find the physical address of the table. After these two
299	 * instructions:
300	 * r1 = va(pagetable)
301	 *
302	 * r0 = va(pagetable) - (VA - PA)
303	 *    = va(pagetable) - VA + PA
304	 *    = pa(pagetable)
305	 */
306	ldr	r1, [r0, #4]
307	sub	r0, r1, r2
308	RET
309
310/*
311 * Builds the page table
312 * r0 - The table base address
313 * r1 - The physical address (trashed)
314 * r2 - The virtual address (trashed)
315 * r3 - The number of 1MiB sections
316 * r4 - Trashed
317 *
318 * Addresses must be 1MiB aligned
319 */
320build_pagetables:
321	/* Set the required page attributed */
322	ldr	r4, =(L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
323#if defined(SMP)
324	orr	r4, #(L1_SHARED)
325#endif
326	orr	r1, r4
327
328	/* Move the virtual address to the correct bit location */
329	lsr	r2, #(L1_S_SHIFT - 2)
330
331	mov	r4, r3
3321:
333	str	r1, [r0, r2]
334	add	r2, r2, #4
335	add	r1, r1, #(L1_S_SIZE)
336	adds	r4, r4, #-1
337	bhi	1b
338
339	RET
340
341VA_TO_PA_POINTER(Lpagetable, pagetable)
342
343Lreal_start:
344	.word	_start
345Lend:
346	.word	_edata
347
348.Lstart:
349	.word	_edata
350	.word	_ebss
351	.word	svcstk + INIT_ARM_STACK_SIZE
352
353.Lvirt_done:
354	.word	virt_done
355
356.Lmainreturned:
357	.asciz	"main() returned"
358	.align	0
359
360	.bss
361svcstk:
362	.space	INIT_ARM_STACK_SIZE
363
364/*
365 * Memory for the initial pagetable. We are unable to place this in
366 * the bss as this will be cleared after the table is loaded.
367 */
368	.section ".init_pagetable"
369	.align	14 /* 16KiB aligned */
370pagetable:
371	.space	L1_TABLE_SIZE
372
373	.text
374	.align	0
375
376.Lcpufuncs:
377	.word	_C_LABEL(cpufuncs)
378
379#if defined(SMP)
380
381.Lmpvirt_done:
382	.word	mpvirt_done
383VA_TO_PA_POINTER(Lstartup_pagetable_secondary, temp_pagetable)
384
385ASENTRY_NP(mpentry)
386
387	/* Make sure interrupts are disabled. */
388	mrs	r7, cpsr
389	orr	r7, r7, #(PSR_I | PSR_F)
390	msr	cpsr_c, r7
391
392	/* Disable MMU.  It should be disabled already, but make sure. */
393	mrc	p15, 0, r2, c1, c0, 0
394	bic	r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
395	    CPU_CONTROL_WBUF_ENABLE)
396	bic	r2, r2, #(CPU_CONTROL_IC_ENABLE)
397	bic	r2, r2, #(CPU_CONTROL_BPRD_ENABLE)
398	mcr	p15, 0, r2, c1, c0, 0
399	nop
400	nop
401	nop
402	CPWAIT(r0)
403
404#if ARM_MMU_V6
405	bl	armv6_idcache_inv_all	/* Modifies r0 only */
406#elif ARM_MMU_V7
407	bl	armv7_idcache_inv_all	/* Modifies r0-r3, ip */
408#endif
409
410	/* Load the page table physical address */
411	adr	r0, Lstartup_pagetable_secondary
412	bl	translate_va_to_pa
413	/* Load the address the secondary page table */
414	ldr	r0, [r0]
415
416	orr 	r0, r0, #2		/* Set TTB shared memory flag */
417	mcr	p15, 0, r0, c2, c0, 0	/* Set TTB */
418	mcr	p15, 0, r0, c8, c7, 0	/* Flush TLB */
419
420	mov	r0, #0
421	mcr	p15, 0, r0, c13, c0, 1	/* Set ASID to 0 */
422
423	/* Set the Domain Access register.  Very important! */
424	mov	r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
425	mcr	p15, 0, r0, c3, c0, 0
426	/* Enable MMU */
427	mrc	p15, 0, r0, c1, c0, 0
428	orr	r0, r0, #CPU_CONTROL_V6_EXTPAGE
429	orr	r0, r0, #CPU_CONTROL_AF_ENABLE
430	orr	r0, r0, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
431	    CPU_CONTROL_WBUF_ENABLE)
432	orr	r0, r0, #(CPU_CONTROL_IC_ENABLE)
433	orr	r0, r0, #(CPU_CONTROL_BPRD_ENABLE)
434	mcr	p15, 0, r0, c1, c0, 0
435	nop
436	nop
437	nop
438	CPWAIT(r0)
439
440	adr	r1, .Lstart
441	ldmia	r1, {r1, r2, sp}	/* Set initial stack and */
442	mrc	p15, 0, r0, c0, c0, 5
443	and	r0, r0, #15
444	mov	r1, #2048
445	mul	r2, r1, r0
446	sub	sp, sp, r2
447	str	r1, [sp]
448	ldr	pc, .Lmpvirt_done
449
450mpvirt_done:
451
452	mov	fp, #0			/* trace back starts here */
453	bl	_C_LABEL(init_secondary)	/* Off we go */
454
455	adr	r0, .Lmpreturned
456	b	_C_LABEL(panic)
457	/* NOTREACHED */
458
459.Lmpreturned:
460	.asciz	"init_secondary() returned"
461	.align	0
462END(mpentry)
463#endif
464
465ENTRY_NP(cpu_halt)
466	mrs     r2, cpsr
467	bic	r2, r2, #(PSR_MODE)
468	orr     r2, r2, #(PSR_SVC32_MODE)
469	orr	r2, r2, #(PSR_I | PSR_F)
470	msr     cpsr_fsxc, r2
471
472	ldr	r4, .Lcpu_reset_address
473	ldr	r4, [r4]
474
475	ldr	r0, .Lcpufuncs
476	mov	lr, pc
477	ldr	pc, [r0, #CF_IDCACHE_WBINV_ALL]
478	mov	lr, pc
479	ldr	pc, [r0, #CF_L2CACHE_WBINV_ALL]
480
481	/*
482	 * Load the cpu_reset_needs_v4_MMU_disable flag to determine if it's
483	 * necessary.
484	 */
485
486	ldr	r1, .Lcpu_reset_needs_v4_MMU_disable
487	ldr	r1, [r1]
488	cmp	r1, #0
489	mov	r2, #0
490
491	/*
492	 * MMU & IDC off, 32 bit program & data space
493	 * Hurl ourselves into the ROM
494	 */
495	mov	r0, #(CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE)
496	mcr     15, 0, r0, c1, c0, 0
497	mcrne   15, 0, r2, c8, c7, 0 	/* nail I+D TLB on ARMv4 and greater */
498	mov     pc, r4
499
500	/*
501	 * _cpu_reset_address contains the address to branch to, to complete
502	 * the cpu reset after turning the MMU off
503	 * This variable is provided by the hardware specific code
504	 */
505.Lcpu_reset_address:
506	.word	_C_LABEL(cpu_reset_address)
507
508	/*
509	 * cpu_reset_needs_v4_MMU_disable contains a flag that signals if the
510	 * v4 MMU disable instruction needs executing... it is an illegal instruction
511	 * on f.e. ARM6/7 that locks up the computer in an endless illegal
512	 * instruction / data-abort / reset loop.
513	 */
514.Lcpu_reset_needs_v4_MMU_disable:
515	.word	_C_LABEL(cpu_reset_needs_v4_MMU_disable)
516END(cpu_halt)
517
518
519/*
520 * setjump + longjmp
521 */
522ENTRY(setjmp)
523	stmia	r0, {r4-r14}
524	mov	r0, #0x00000000
525	RET
526END(setjmp)
527
528ENTRY(longjmp)
529	ldmia	r0, {r4-r14}
530	mov	r0, #0x00000001
531	RET
532END(longjmp)
533
534	.data
535	.global _C_LABEL(esym)
536_C_LABEL(esym):	.word	_C_LABEL(end)
537
538ENTRY_NP(abort)
539	b	_C_LABEL(abort)
540END(abort)
541
542ENTRY_NP(sigcode)
543	mov	r0, sp
544	add	r0, r0, #SIGF_UC
545
546	/*
547	 * Call the sigreturn system call.
548	 *
549	 * We have to load r7 manually rather than using
550	 * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is
551	 * correct. Using the alternative places esigcode at the address
552	 * of the data rather than the address one past the data.
553	 */
554
555	ldr	r7, [pc, #12]	/* Load SYS_sigreturn */
556	swi	SYS_sigreturn
557
558	/* Well if that failed we better exit quick ! */
559
560	ldr	r7, [pc, #8]	/* Load SYS_exit */
561	swi	SYS_exit
562
563	/* Branch back to retry SYS_sigreturn */
564	b	. - 16
565END(sigcode)
566	.word	SYS_sigreturn
567	.word	SYS_exit
568
569	.align	0
570	.global _C_LABEL(esigcode)
571		_C_LABEL(esigcode):
572
573	.data
574	.global szsigcode
575szsigcode:
576	.long esigcode-sigcode
577
578/* End of locore.S */
579