xref: /freebsd/sys/arm/arm/locore.S (revision 864c53ead899f7838cd2e1cca3b485a4a82f5cdc)
1/*	$NetBSD: locore.S,v 1.14 2003/04/20 16:21:40 thorpej Exp $	*/
2
3/*-
4 * Copyright 2011 Semihalf
5 * Copyright (C) 1994-1997 Mark Brinicombe
6 * Copyright (C) 1994 Brini
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by Brini.
20 * 4. The name of Brini may not be used to endorse or promote products
21 *    derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36#include "assym.s"
37#include <sys/syscall.h>
38#include <machine/asm.h>
39#include <machine/armreg.h>
40#include <machine/pte.h>
41
42__FBSDID("$FreeBSD$");
43
44/* What size should this really be ? It is only used by initarm() */
45#define INIT_ARM_STACK_SIZE	(2048 * 4)
46
47#define	CPWAIT_BRANCH							 \
48	sub	pc, pc, #4
49
50#define	CPWAIT(tmp)							 \
51	mrc	p15, 0, tmp, c2, c0, 0	/* arbitrary read of CP15 */	;\
52	mov	tmp, tmp		/* wait for it to complete */	;\
53	CPWAIT_BRANCH			/* branch to next insn */
54
55/*
56 * This is for kvm_mkdb, and should be the address of the beginning
57 * of the kernel text segment (not necessarily the same as kernbase).
58 */
59	.text
60	.align	0
61.globl kernbase
62.set kernbase,KERNBASE
63.globl physaddr
64.set physaddr,PHYSADDR
65
66/*
67 * On entry for FreeBSD boot ABI:
68 *	r0 - metadata pointer or 0 (boothowto on AT91's boot2)
69 *	r1 - if (r0 == 0) then metadata pointer
70 * On entry for Linux boot ABI:
71 *	r0 - 0
72 *	r1 - machine type (passed as arg2 to initarm)
73 *	r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm)
74 *
75 * For both types of boot we gather up the args, put them in a struct arm_boot_params
76 * structure and pass that to initarm.
77 */
78ENTRY_NP(btext)
79ASENTRY_NP(_start)
80	STOP_UNWINDING		/* Can't unwind into the bootloader! */
81
82	mov	r9, r0		/* 0 or boot mode from boot2 */
83	mov	r8, r1		/* Save Machine type */
84	mov	ip, r2		/* Save meta data */
85	mov	fp, r3		/* Future expansion */
86
87	/* Make sure interrupts are disabled. */
88	mrs	r7, cpsr
89	orr	r7, r7, #(I32_bit|F32_bit)
90	msr	cpsr_c, r7
91
92#if defined (FLASHADDR) && defined(LOADERRAMADDR)
93	/* Check if we're running from flash. */
94	ldr	r7, =FLASHADDR
95	/*
96	 * If we're running with MMU disabled, test against the
97	 * physical address instead.
98	 */
99	mrc     p15, 0, r2, c1, c0, 0
100	ands	r2, r2, #CPU_CONTROL_MMU_ENABLE
101	ldreq	r6, =PHYSADDR
102	ldrne	r6, =LOADERRAMADDR
103	cmp	r7, r6
104	bls 	flash_lower
105	cmp	r7, pc
106	bhi	from_ram
107	b	do_copy
108
109flash_lower:
110	cmp	r6, pc
111	bls	from_ram
112do_copy:
113	ldr	r7, =KERNBASE
114	adr	r1, _start
115	ldr	r0, Lreal_start
116	ldr	r2, Lend
117	sub	r2, r2, r0
118	sub	r0, r0, r7
119	add	r0, r0, r6
120	mov	r4, r0
121	bl	memcpy
122	ldr	r0, Lram_offset
123	add	pc, r4, r0
124Lram_offset:	.word from_ram-_C_LABEL(_start)
125from_ram:
126	nop
127#endif
128	adr	r7, Lunmapped
129	bic     r7, r7, #0xf0000000
130	orr     r7, r7, #PHYSADDR
131
132
133disable_mmu:
134	/* Disable MMU for a while */
135	mrc     p15, 0, r2, c1, c0, 0
136	bic	r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
137	    CPU_CONTROL_WBUF_ENABLE)
138	bic	r2, r2, #(CPU_CONTROL_IC_ENABLE)
139	bic	r2, r2, #(CPU_CONTROL_BPRD_ENABLE)
140	mcr     p15, 0, r2, c1, c0, 0
141
142	nop
143	nop
144	nop
145	mov	pc, r7
146Lunmapped:
147	/*
148	 * Build page table from scratch.
149	 */
150
151	/* Find the delta between VA and PA */
152	adr	r0, Lpagetable
153	ldr	r1, [r0]
154	sub	r2, r1, r0
155	/* At this point: r2 = VA - PA */
156
157	/*
158	 * Find the physical address of the table. After these two
159	 * instructions:
160	 * r1 = va(pagetable)
161	 *
162	 * r0 = va(pagetable) - (VA - PA)
163	 *    = va(pagetable) - VA + PA
164	 *    = pa(pagetable)
165	 */
166	ldr	r1, [r0, #4]
167	sub	r0, r1, r2
168
169#ifndef _ARM_ARCH_6
170	/*
171	 * Some of the older ports (the various XScale, mostly) assume
172	 * that the memory before the kernel is mapped, and use it for
173	 * the various stacks, page tables, etc. For those CPUs, map the
174	 * 64 first MB of RAM, as it used to be.
175	 */
176	/*
177	 * Map PA == VA
178	 */
179	ldr     r5, =PHYSADDR
180	mov     r1, r5
181	mov     r2, r5
182	/* Map 64MiB, preserved over calls to build_pagetables */
183	mov     r3, #64
184	bl      build_pagetables
185
186	/* Create the kernel map to jump to */
187	mov     r1, r5
188	ldr     r2, =(KERNBASE)
189	bl      build_pagetables
190	ldr	r5, =(KERNPHYSADDR)
191#else
192	/*
193	 * Map PA == VA
194	 */
195	/* Find the start kernels load address */
196	adr	r5, _start
197	ldr	r2, =(L1_S_OFFSET)
198	bic	r5, r2
199	mov	r1, r5
200	mov	r2, r5
201	/* Map 64MiB, preserved over calls to build_pagetables */
202	mov	r3, #64
203	bl	build_pagetables
204
205	/* Create the kernel map to jump to */
206	mov	r1, r5
207	ldr	r2, =(KERNVIRTADDR)
208	bl	build_pagetables
209#endif
210
211#if defined(SOCDEV_PA) && defined(SOCDEV_VA)
212	/* Create the custom map */
213	ldr	r1, =SOCDEV_PA
214	ldr	r2, =SOCDEV_VA
215	bl	build_pagetables
216#endif
217
218#if defined(SMP)
219	orr 	r0, r0, #2		/* Set TTB shared memory flag */
220#endif
221	mcr	p15, 0, r0, c2, c0, 0	/* Set TTB */
222	mcr	p15, 0, r0, c8, c7, 0	/* Flush TLB */
223
224#if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_CORTEXA) || defined(CPU_MV_PJ4B) || defined(CPU_KRAIT)
225	mov	r0, #0
226	mcr	p15, 0, r0, c13, c0, 1	/* Set ASID to 0 */
227#endif
228
229	/* Set the Domain Access register.  Very important! */
230	mov     r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
231	mcr	p15, 0, r0, c3, c0, 0
232	/*
233	 * Enable MMU.
234	 * On armv6 enable extended page tables, and set alignment checking
235	 * to modulo-4 (CPU_CONTROL_UNAL_ENABLE) for the ldrd/strd
236	 * instructions emitted by clang.
237	 */
238	mrc	p15, 0, r0, c1, c0, 0
239#ifdef _ARM_ARCH_6
240	orr	r0, r0, #(CPU_CONTROL_V6_EXTPAGE | CPU_CONTROL_UNAL_ENABLE)
241	orr	r0, r0, #(CPU_CONTROL_AFLT_ENABLE)
242	orr	r0, r0, #(CPU_CONTROL_AF_ENABLE)
243#endif
244	orr	r0, r0, #(CPU_CONTROL_MMU_ENABLE)
245	mcr	p15, 0, r0, c1, c0, 0
246	nop
247	nop
248	nop
249	CPWAIT(r0)
250
251mmu_done:
252	nop
253	adr	r1, .Lstart
254	ldmia	r1, {r1, r2, sp}	/* Set initial stack and */
255	sub	r2, r2, r1		/* get zero init data */
256	mov	r3, #0
257.L1:
258	str	r3, [r1], #0x0004	/* get zero init data */
259	subs	r2, r2, #4
260	bgt	.L1
261	ldr	pc, .Lvirt_done
262
263virt_done:
264	mov	r1, #28			/* loader info size is 28 bytes also second arg */
265	subs	sp, sp, r1		/* allocate arm_boot_params struct on stack */
266	mov	r0, sp			/* loader info pointer is first arg */
267	bic	sp, sp, #7		/* align stack to 8 bytes */
268	str	r1, [r0]		/* Store length of loader info */
269	str	r9, [r0, #4]		/* Store r0 from boot loader */
270	str	r8, [r0, #8]		/* Store r1 from boot loader */
271	str	ip, [r0, #12]		/* store r2 from boot loader */
272	str	fp, [r0, #16]		/* store r3 from boot loader */
273	str	r5, [r0, #20]		/* store the physical address */
274	adr	r4, Lpagetable		/* load the pagetable address */
275	ldr	r5, [r4, #4]
276	str	r5, [r0, #24]		/* store the pagetable address */
277	mov	fp, #0			/* trace back starts here */
278	bl	_C_LABEL(initarm)	/* Off we go */
279
280	/* init arm will return the new stack pointer. */
281	mov	sp, r0
282
283	bl	_C_LABEL(mi_startup)		/* call mi_startup()! */
284
285	adr	r0, .Lmainreturned
286	b	_C_LABEL(panic)
287	/* NOTREACHED */
288END(btext)
289END(_start)
290
291/*
292 * Builds the page table
293 * r0 - The table base address
294 * r1 - The physical address (trashed)
295 * r2 - The virtual address (trashed)
296 * r3 - The number of 1MiB sections
297 * r4 - Trashed
298 *
299 * Addresses must be 1MiB aligned
300 */
301build_pagetables:
302	/* Set the required page attributed */
303	ldr	r4, =(L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
304#if defined(SMP)
305	orr	r4, #(L1_SHARED)
306#endif
307	orr	r1, r4
308
309	/* Move the virtual address to the correct bit location */
310	lsr	r2, #(L1_S_SHIFT - 2)
311
312	mov	r4, r3
3131:
314	str	r1, [r0, r2]
315	add	r2, r2, #4
316	add	r1, r1, #(L1_S_SIZE)
317	adds	r4, r4, #-1
318	bhi	1b
319
320	RET
321
322Lpagetable:
323	.word	.
324	.word	pagetable
325
326Lvirtaddr:
327	.word	KERNVIRTADDR
328Lphysaddr:
329	.word	KERNPHYSADDR
330Lreal_start:
331	.word	_start
332Lend:
333	.word	_edata
334
335.Lstart:
336	.word	_edata
337	.word	_ebss
338	.word	svcstk + INIT_ARM_STACK_SIZE
339
340.Lvirt_done:
341	.word	virt_done
342
343.Lmainreturned:
344	.asciz	"main() returned"
345	.align	0
346
347	.bss
348svcstk:
349	.space	INIT_ARM_STACK_SIZE
350
351/*
352 * Memory for the initial pagetable. We are unable to place this in
353 * the bss as this will be cleared after the table is loaded.
354 */
355	.section ".init_pagetable"
356	.align	14 /* 16KiB aligned */
357pagetable:
358	.space	L1_TABLE_SIZE
359
360	.text
361	.align	0
362
363.Lcpufuncs:
364	.word	_C_LABEL(cpufuncs)
365
366#if defined(SMP)
367
368.Lmpvirt_done:
369	.word	mpvirt_done
370Lstartup_pagetable_secondary:
371	.word	temp_pagetable
372
373ASENTRY_NP(mpentry)
374
375	/* Make sure interrupts are disabled. */
376	mrs	r7, cpsr
377	orr	r7, r7, #(I32_bit|F32_bit)
378	msr	cpsr_c, r7
379
380	/* Disable MMU.  It should be disabled already, but make sure. */
381	mrc	p15, 0, r2, c1, c0, 0
382	bic	r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
383	    CPU_CONTROL_WBUF_ENABLE)
384	bic	r2, r2, #(CPU_CONTROL_IC_ENABLE)
385	bic	r2, r2, #(CPU_CONTROL_BPRD_ENABLE)
386	mcr	p15, 0, r2, c1, c0, 0
387	nop
388	nop
389	nop
390	CPWAIT(r0)
391
392#if defined(ARM_MMU_V6)
393	bl	armv6_idcache_inv_all	/* Modifies r0 only */
394#elif defined(ARM_MMU_V7)
395	bl	armv7_idcache_inv_all	/* Modifies r0-r3, ip */
396#endif
397
398	ldr	r0, Lstartup_pagetable_secondary
399	bic	r0, r0, #0xf0000000
400	orr	r0, r0, #PHYSADDR
401	ldr	r0, [r0]
402	orr 	r0, r0, #2		/* Set TTB shared memory flag */
403	mcr	p15, 0, r0, c2, c0, 0	/* Set TTB */
404	mcr	p15, 0, r0, c8, c7, 0	/* Flush TLB */
405
406	mov	r0, #0
407	mcr	p15, 0, r0, c13, c0, 1	/* Set ASID to 0 */
408
409	/* Set the Domain Access register.  Very important! */
410	mov	r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
411	mcr	p15, 0, r0, c3, c0, 0
412	/* Enable MMU */
413	mrc	p15, 0, r0, c1, c0, 0
414	orr	r0, r0, #CPU_CONTROL_V6_EXTPAGE
415	orr	r0, r0, #CPU_CONTROL_AF_ENABLE
416	orr	r0, r0, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
417	    CPU_CONTROL_WBUF_ENABLE)
418	orr	r0, r0, #(CPU_CONTROL_IC_ENABLE)
419	orr	r0, r0, #(CPU_CONTROL_BPRD_ENABLE)
420	mcr	p15, 0, r0, c1, c0, 0
421	nop
422	nop
423	nop
424	CPWAIT(r0)
425
426	adr	r1, .Lstart
427	ldmia	r1, {r1, r2, sp}	/* Set initial stack and */
428	mrc	p15, 0, r0, c0, c0, 5
429	and	r0, r0, #15
430	mov	r1, #2048
431	mul	r2, r1, r0
432	sub	sp, sp, r2
433	str	r1, [sp]
434	ldr	pc, .Lmpvirt_done
435
436mpvirt_done:
437
438	mov	fp, #0			/* trace back starts here */
439	bl	_C_LABEL(init_secondary)	/* Off we go */
440
441	adr	r0, .Lmpreturned
442	b	_C_LABEL(panic)
443	/* NOTREACHED */
444
445.Lmpreturned:
446	.asciz	"init_secondary() returned"
447	.align	0
448END(mpentry)
449#endif
450
451ENTRY_NP(cpu_halt)
452	mrs     r2, cpsr
453	bic	r2, r2, #(PSR_MODE)
454	orr     r2, r2, #(PSR_SVC32_MODE)
455	orr	r2, r2, #(I32_bit | F32_bit)
456	msr     cpsr_fsxc, r2
457
458	ldr	r4, .Lcpu_reset_address
459	ldr	r4, [r4]
460
461	ldr	r0, .Lcpufuncs
462	mov	lr, pc
463	ldr	pc, [r0, #CF_IDCACHE_WBINV_ALL]
464	mov	lr, pc
465	ldr	pc, [r0, #CF_L2CACHE_WBINV_ALL]
466
467	/*
468	 * Load the cpu_reset_needs_v4_MMU_disable flag to determine if it's
469	 * necessary.
470	 */
471
472	ldr	r1, .Lcpu_reset_needs_v4_MMU_disable
473	ldr	r1, [r1]
474	cmp	r1, #0
475	mov	r2, #0
476
477	/*
478	 * MMU & IDC off, 32 bit program & data space
479	 * Hurl ourselves into the ROM
480	 */
481	mov	r0, #(CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE)
482	mcr     15, 0, r0, c1, c0, 0
483	mcrne   15, 0, r2, c8, c7, 0 	/* nail I+D TLB on ARMv4 and greater */
484	mov     pc, r4
485
486	/*
487	 * _cpu_reset_address contains the address to branch to, to complete
488	 * the cpu reset after turning the MMU off
489	 * This variable is provided by the hardware specific code
490	 */
491.Lcpu_reset_address:
492	.word	_C_LABEL(cpu_reset_address)
493
494	/*
495	 * cpu_reset_needs_v4_MMU_disable contains a flag that signals if the
496	 * v4 MMU disable instruction needs executing... it is an illegal instruction
497	 * on f.e. ARM6/7 that locks up the computer in an endless illegal
498	 * instruction / data-abort / reset loop.
499	 */
500.Lcpu_reset_needs_v4_MMU_disable:
501	.word	_C_LABEL(cpu_reset_needs_v4_MMU_disable)
502END(cpu_halt)
503
504
505/*
506 * setjump + longjmp
507 */
508ENTRY(setjmp)
509	stmia	r0, {r4-r14}
510	mov	r0, #0x00000000
511	RET
512END(setjmp)
513
514ENTRY(longjmp)
515	ldmia	r0, {r4-r14}
516	mov	r0, #0x00000001
517	RET
518END(longjmp)
519
520	.data
521	.global _C_LABEL(esym)
522_C_LABEL(esym):	.word	_C_LABEL(end)
523
524ENTRY_NP(abort)
525	b	_C_LABEL(abort)
526END(abort)
527
528ENTRY_NP(sigcode)
529	mov	r0, sp
530	add	r0, r0, #SIGF_UC
531
532	/*
533	 * Call the sigreturn system call.
534	 *
535	 * We have to load r7 manually rather than using
536	 * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is
537	 * correct. Using the alternative places esigcode at the address
538	 * of the data rather than the address one past the data.
539	 */
540
541	ldr	r7, [pc, #12]	/* Load SYS_sigreturn */
542	swi	SYS_sigreturn
543
544	/* Well if that failed we better exit quick ! */
545
546	ldr	r7, [pc, #8]	/* Load SYS_exit */
547	swi	SYS_exit
548
549	/* Branch back to retry SYS_sigreturn */
550	b	. - 16
551
552	.word	SYS_sigreturn
553	.word	SYS_exit
554
555	.align	0
556	.global _C_LABEL(esigcode)
557		_C_LABEL(esigcode):
558
559	.data
560	.global szsigcode
561szsigcode:
562	.long esigcode-sigcode
563END(sigcode)
564/* End of locore.S */
565