xref: /freebsd/sys/arm/arm/locore.S (revision d29771a722acf17b3d2693e237c0da7ce866997f)
1/*-
2 * Copyright 2004-2014 Olivier Houchard <cognet@FreeBSD.org>
3 * Copyright 2012-2014 Ian Lepore <ian@FreeBSD.org>
4 * Copyright 2013-2014 Andrew Turner <andrew@FreeBSD.org>
5 * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
6 * Copyright 2014 Michal Meloun <meloun@miracle.cz>
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31#include "assym.inc"
32#include <sys/syscall.h>
33#include <machine/asm.h>
34#include <machine/asmacros.h>
35#include <machine/armreg.h>
36#include <machine/sysreg.h>
37#include <machine/pte.h>
38/* We map 64MB of kernel unless overridden in assym.inc by the kernel option. */
39#ifndef LOCORE_MAP_MB
40#define	LOCORE_MAP_MB	64
41#endif
42
43#if defined(__ARM_ARCH_7VE__) || defined(__clang__)
44/*
45 * HYP support is in bintuils >= 2.21 and gcc >= 4.9 defines __ARM_ARCH_7VE__
46 * when enabled. llvm >= 3.6 supports it too.
47 */
48.arch_extension virt
49#endif
50
51/* A small statically-allocated stack used only during initarm() and AP startup. */
52#define	INIT_ARM_STACK_SIZE	2048
53
54	.text
55	.align	2
56
57	.globl kernbase
58	.set kernbase,KERNVIRTADDR
59
60#define	HANDLE_HYP							\
61	/* Leave HYP mode */						;\
62	mrs	r0, cpsr						;\
63	and	r0, r0, #(PSR_MODE)   /* Mode is in the low 5 bits of CPSR */ ;\
64	teq	r0, #(PSR_HYP32_MODE) /* Hyp Mode? */			;\
65	bne	1f							;\
66	/* Install Hypervisor Stub Exception Vector */			;\
67	bl hypervisor_stub_vect_install					;\
68	mov	r0, 0							;\
69	adr	r1, hypmode_enabled					;\
70	str	r0, [r1]						;\
71	/* Ensure that IRQ, FIQ and Aborts will be disabled after eret */ ;\
72	mrs	r0, cpsr						;\
73	bic	r0, r0, #(PSR_MODE)					;\
74	orr	r0, r0, #(PSR_SVC32_MODE)				;\
75	orr	r0, r0, #(PSR_I | PSR_F | PSR_A)			;\
76	msr	spsr_cxsf, r0						;\
77	/* Exit hypervisor mode */					;\
78	adr	lr, 2f							;\
79	MSR_ELR_HYP(14)							;\
80	ERET								;\
811:									;\
82	mov	r0, -1							;\
83	adr	r1, hypmode_enabled					;\
84	str	r0, [r1]						;\
852:
86
87/*
88 * On entry for FreeBSD boot ABI:
89 *	r0 - metadata pointer or 0
90 *	r1 - if (r0 == 0) then metadata pointer
91 * On entry for Linux boot ABI:
92 *	r0 - 0
93 *	r1 - machine type (passed as arg2 to initarm)
94 *	r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm)
95 * For both types of boot we gather up the args, put them in a struct arm_boot_params
96 * structure and pass that to initarm.
97 */
98	.globl	btext
99btext:
100ASENTRY_NP(_start)
101	STOP_UNWINDING		/* Can't unwind into the bootloader! */
102
103	/* Make sure interrupts are disabled. */
104	cpsid	ifa
105
106	mov	r8, r0		/* 0 or boot mode from boot2 */
107	mov	r9, r1		/* Save Machine type */
108	mov	r10, r2		/* Save meta data */
109	mov	r11, r3		/* Future expansion */
110
111	# If HYP-MODE is active, install an exception vector stub
112	HANDLE_HYP
113
114	/*
115	 * Check whether data cache is enabled.  If it is, then we know
116	 * current tags are valid (not power-on garbage values) and there
117	 * might be dirty lines that need cleaning.  Disable cache to prevent
118	 * new lines being allocated, then call wbinv_poc_all to clean it.
119	 */
120	mrc	CP15_SCTLR(r7)
121	tst	r7, #CPU_CONTROL_DC_ENABLE
122	blne	dcache_wbinv_poc_all
123
124	/* ! Do not write to memory between wbinv and disabling cache ! */
125
126	/*
127	 * Now there are no dirty lines, but there may still be lines marked
128	 * valid.  Disable all caches and the MMU, and invalidate everything
129	 * before setting up new page tables and re-enabling the mmu.
130	 */
1311:
132	bic	r7, #CPU_CONTROL_DC_ENABLE
133	bic	r7, #CPU_CONTROL_AFLT_ENABLE
134	bic	r7, #CPU_CONTROL_MMU_ENABLE
135	bic	r7, #CPU_CONTROL_IC_ENABLE
136	bic	r7, #CPU_CONTROL_BPRD_ENABLE
137	bic	r7, #CPU_CONTROL_SW_ENABLE
138	orr	r7, #CPU_CONTROL_UNAL_ENABLE
139	orr	r7, #CPU_CONTROL_VECRELOC
140	mcr	CP15_SCTLR(r7)
141	DSB
142	ISB
143	bl	dcache_inv_poc_all
144	mcr	CP15_ICIALLU
145	DSB
146	ISB
147
148	/*
149	 * Build page table from scratch.
150	 */
151
152	/*
153	 * Figure out the physical address we're loaded at by assuming this
154	 * entry point code is in the first L1 section and so if we clear the
155	 * offset bits of the pc that will give us the section-aligned load
156	 * address, which remains in r5 throughout all the following code.
157	 */
158	ldr	r2, =(L1_S_OFFSET)
159	bic	r5, pc, r2
160
161	/* Find the delta between VA and PA, result stays in r0 throughout. */
162	adr	r0, Lpagetable
163	bl	translate_va_to_pa
164
165	/*
166	 * First map the entire 4GB address space as VA=PA.  It's mapped as
167	 * normal (cached) memory because it's for things like accessing the
168	 * parameters passed in from the bootloader, which might be at any
169	 * physical address, different for every platform.
170	 */
171	mov	r1, #0
172	mov	r2, #0
173	mov	r3, #4096
174	bl	build_pagetables
175
176	/*
177	 * Next we map the kernel starting at the physical load address, mapped
178	 * to the VA the kernel is linked for.  The default size we map is 64MiB
179	 * but it can be overridden with a kernel option.
180	 */
181	mov	r1, r5
182	ldr	r2, =(KERNVIRTADDR)
183	ldr	r3, =(LOCORE_MAP_MB)
184	bl	build_pagetables
185
186	/* Create a device mapping for early_printf if specified. */
187#if defined(SOCDEV_PA) && defined(SOCDEV_VA)
188	ldr	r1, =SOCDEV_PA
189	ldr	r2, =SOCDEV_VA
190	mov	r3, #1
191	bl	build_device_pagetables
192#endif
193	bl	init_mmu
194
195	/* Transition the PC from physical to virtual addressing. */
196	ldr	pc, =1f
1971:
198
199	/* Setup stack, clear BSS */
200	ldr	r1, =.Lstart
201	ldmia	r1, {r1, r2, sp}	/* Set initial stack and */
202	add	sp, sp,	#INIT_ARM_STACK_SIZE
203	sub	r2, r2, r1		/* get zero init data */
204	mov	r3, #0
2052:
206	str	r3, [r1], #0x0004	/* get zero init data */
207	subs	r2, r2, #4
208	bgt	2b
209
210	mov	r1, #28			/* loader info size is 28 bytes also second arg */
211	subs	sp, sp, r1		/* allocate arm_boot_params struct on stack */
212	mov	r0, sp			/* loader info pointer is first arg */
213	bic	sp, sp, #7		/* align stack to 8 bytes */
214	str	r1, [r0]		/* Store length of loader info */
215	str	r8, [r0, #4]		/* Store r0 from boot loader */
216	str	r9, [r0, #8]		/* Store r1 from boot loader */
217	str	r10, [r0, #12]		/* store r2 from boot loader */
218	str	r11, [r0, #16]		/* store r3 from boot loader */
219	str	r5, [r0, #20]		/* store the physical address */
220	adr	r4, Lpagetable		/* load the pagetable address */
221	ldr	r5, [r4, #4]
222	str	r5, [r0, #24]		/* store the pagetable address */
223	mov	fp, #0			/* trace back starts here */
224	bl	_C_LABEL(initarm)	/* Off we go */
225
226	/* init arm will return the new stack pointer. */
227	mov	sp, r0
228
229	bl	_C_LABEL(mi_startup)	/* call mi_startup()! */
230
231	ldr	r0, =.Lmainreturned
232	b	_C_LABEL(panic)
233	/* NOTREACHED */
234END(_start)
235
236#define VA_TO_PA_POINTER(name, table)	 \
237name:					;\
238	.word	.			;\
239	.word	table
240
241/*
242 * Returns the physical address of a magic va to pa pointer.
243 * r0     - The pagetable data pointer. This must be built using the
244 *          VA_TO_PA_POINTER macro.
245 *          e.g.
246 *            VA_TO_PA_POINTER(Lpagetable, pagetable)
247 *            ...
248 *            adr  r0, Lpagetable
249 *            bl   translate_va_to_pa
250 *            r0 will now contain the physical address of pagetable
251 * r1, r2 - Trashed
252 */
253translate_va_to_pa:
254	ldr	r1, [r0]
255	sub	r2, r1, r0
256	/* At this point: r2 = VA - PA */
257
258	/*
259	 * Find the physical address of the table. After these two
260	 * instructions:
261	 * r1 = va(pagetable)
262	 *
263	 * r0 = va(pagetable) - (VA - PA)
264	 *    = va(pagetable) - VA + PA
265	 *    = pa(pagetable)
266	 */
267	ldr	r1, [r0, #4]
268	sub	r0, r1, r2
269	mov	pc, lr
270
271/*
272 * Init MMU
273 * r0 - the table base address
274 */
275
276ASENTRY_NP(init_mmu)
277
278	/* Setup TLB and MMU registers */
279	mcr	CP15_TTBR0(r0)		/* Set TTB */
280	mov	r0, #0
281	mcr	CP15_CONTEXTIDR(r0)	/* Set ASID to 0 */
282
283	/* Set the Domain Access register */
284	mov	r0, #DOMAIN_CLIENT	/* Only domain #0 is used */
285	mcr	CP15_DACR(r0)
286
287	/*
288	 * Ensure that LPAE is disabled and that TTBR0 is used for translation,
289	 * use a 16KB translation table
290	 */
291	mov	r0, #0
292	mcr	CP15_TTBCR(r0)
293
294	/*
295	 * Set TEX remap registers
296	 *  - All is set to uncacheable memory
297	 */
298	ldr	r0, =0xAAAAA
299	mcr	CP15_PRRR(r0)
300	mov	r0, #0
301	mcr	CP15_NMRR(r0)
302	mcr	CP15_TLBIALL		/* Flush TLB */
303	DSB
304	ISB
305
306	/* Enable MMU */
307	mrc	CP15_SCTLR(r0)
308	orr	r0, r0,	#CPU_CONTROL_MMU_ENABLE
309	orr	r0, r0,	#CPU_CONTROL_V6_EXTPAGE
310	orr	r0, r0,	#CPU_CONTROL_TR_ENABLE
311	orr	r0, r0,	#CPU_CONTROL_AF_ENABLE
312	mcr	CP15_SCTLR(r0)
313	DSB
314	ISB
315	mcr	CP15_TLBIALL		/* Flush TLB */
316	mcr	CP15_BPIALL		/* Flush Branch predictor */
317	DSB
318	ISB
319
320	mov	pc, lr
321END(init_mmu)
322
323
324/*
325 * Init SMP coherent mode, enable caching and switch to final MMU table.
326 * Called with disabled caches
327 * r0 - The table base address
328 * r1 - clear bits for aux register
329 * r2 - set bits for aux register
330 */
331ASENTRY_NP(reinit_mmu)
332	push	{r4-r11, lr}
333	mov	r4, r0
334	mov	r5, r1
335	mov	r6, r2
336
337	/* !! Be very paranoid here !! */
338	/* !! We cannot write single bit here !! */
339
340#if 0	/* XXX writeback shouldn't be necessary */
341	/* Write back and invalidate all integrated caches */
342	bl 	dcache_wbinv_poc_all
343#else
344	bl	dcache_inv_pou_all
345#endif
346	mcr	CP15_ICIALLU
347	DSB
348	ISB
349
350	/* Set auxiliary register */
351	mrc	CP15_ACTLR(r7)
352	bic	r8, r7, r5		/* Mask bits */
353	eor 	r8, r8, r6		/* Set bits */
354	teq 	r7, r8
355	mcrne 	CP15_ACTLR(r8)
356	DSB
357	ISB
358
359	/* Enable caches. */
360	mrc	CP15_SCTLR(r7)
361	orr	r7, #CPU_CONTROL_DC_ENABLE
362	orr	r7, #CPU_CONTROL_IC_ENABLE
363	orr	r7, #CPU_CONTROL_BPRD_ENABLE
364	mcr	CP15_SCTLR(r7)
365	DSB
366
367	mcr	CP15_TTBR0(r4)		/* Set new TTB */
368	DSB
369	ISB
370
371	mcr	CP15_TLBIALL		/* Flush TLB */
372	mcr	CP15_BPIALL		/* Flush Branch predictor */
373	DSB
374	ISB
375
376#if 0 /* XXX writeback shouldn't be necessary */
377	/* Write back and invalidate all integrated caches */
378	bl 	dcache_wbinv_poc_all
379#else
380	bl	dcache_inv_pou_all
381#endif
382	mcr	CP15_ICIALLU
383	DSB
384	ISB
385
386	pop	{r4-r11, pc}
387END(reinit_mmu)
388
389
390/*
391 * Builds the page table
392 * r0 - The table base address
393 * r1 - The physical address (trashed)
394 * r2 - The virtual address (trashed)
395 * r3 - The number of 1MiB sections
396 * r4 - Trashed
397 *
398 * Addresses must be 1MiB aligned
399 */
400build_device_pagetables:
401	ldr	r4, =PTE1_V|PTE1_A|PTE1_AP_KRW|TEX1_CLASS_0
402	b	1f
403build_pagetables:
404	/* Set the required page attributed */
405	ldr	r4, =PTE1_V|PTE1_A|PTE1_AP_KRW|TEX1_CLASS_0
4061:
407	orr	r1, r4
408
409	/* Move the virtual address to the correct bit location */
410	lsr	r2, #(PTE1_SHIFT - 2)
411
412	mov	r4, r3
4132:
414	str	r1, [r0, r2]
415	add	r2, r2, #4
416	add	r1, r1, #(PTE1_SIZE)
417	adds	r4, r4, #-1
418	bhi	2b
419
420	mov	pc, lr
421
422VA_TO_PA_POINTER(Lpagetable, boot_pt1)
423
424	.global _C_LABEL(hypmode_enabled)
425_C_LABEL(hypmode_enabled):
426	.word 0
427
428.Lstart:
429	.word	_edata			/* Note that these three items are */
430	.word	_ebss			/* loaded with a single ldmia and */
431	.word	svcstk			/* must remain in order together. */
432
433.Lmainreturned:
434	.asciz	"main() returned"
435	.align	2
436
437	.bss
438svcstk:
439	.space	INIT_ARM_STACK_SIZE * MAXCPU
440
441/*
442 * Memory for the initial pagetable. We are unable to place this in
443 * the bss as this will be cleared after the table is loaded.
444 */
445	.section ".init_pagetable", "aw", %nobits
446	.align	14 /* 16KiB aligned */
447	.globl	boot_pt1
448boot_pt1:
449	.space	L1_TABLE_SIZE
450
451	.text
452	.align	2
453
454#if defined(SMP)
455
456ASENTRY_NP(mpentry)
457	/* Make sure interrupts are disabled. */
458	cpsid	ifa
459
460	HANDLE_HYP
461
462	/* Setup core, disable all caches. */
463	mrc	CP15_SCTLR(r0)
464	bic	r0, #CPU_CONTROL_MMU_ENABLE
465	bic	r0, #CPU_CONTROL_AFLT_ENABLE
466	bic	r0, #CPU_CONTROL_DC_ENABLE
467	bic	r0, #CPU_CONTROL_IC_ENABLE
468	bic	r0, #CPU_CONTROL_BPRD_ENABLE
469	bic	r0, #CPU_CONTROL_SW_ENABLE
470	orr	r0, #CPU_CONTROL_UNAL_ENABLE
471	orr	r0, #CPU_CONTROL_VECRELOC
472	mcr	CP15_SCTLR(r0)
473	DSB
474	ISB
475
476	/* Invalidate L1 cache I+D cache */
477	bl	dcache_inv_pou_all
478	mcr	CP15_ICIALLU
479	DSB
480	ISB
481
482	/* Find the delta between VA and PA */
483	adr	r0, Lpagetable
484	bl	translate_va_to_pa
485
486	bl	init_mmu
487
488	adr	r1, .Lstart+8		/* Get initstack pointer from */
489	ldr	sp, [r1]		/* startup data. */
490	mrc	CP15_MPIDR(r0)		/* Get processor id number. */
491	and	r0, r0,	#0x0f
492	mov	r1, #INIT_ARM_STACK_SIZE
493	mul	r2, r1,	r0		/* Point sp to initstack */
494	add	sp, sp,	r2		/* area for this processor. */
495
496	/* Switch to virtual addresses. */
497	ldr	pc, =1f
4981:
499	mov	fp, #0			/* trace back starts here */
500	bl	_C_LABEL(init_secondary)/* Off we go, cpu id in r0. */
501
502	adr	r0, .Lmpreturned
503	b	_C_LABEL(panic)
504	/* NOTREACHED */
505END(mpentry)
506
507.Lmpreturned:
508	.asciz	"init_secondary() returned"
509	.align	2
510#endif
511
512ENTRY_NP(cpu_halt)
513
514	/* XXX re-implement !!! */
515	cpsid	ifa
516	bl	dcache_wbinv_poc_all
517
518	ldr	r4, .Lcpu_reset_address
519	ldr	r4, [r4]
520	teq	r4, #0
521	movne	pc, r4
5221:
523	WFI
524	b	1b
525
526	/*
527	 * _cpu_reset_address contains the address to branch to, to complete
528	 * the cpu reset after turning the MMU off
529	 * This variable is provided by the hardware specific code
530	 */
531.Lcpu_reset_address:
532	.word	_C_LABEL(cpu_reset_address)
533END(cpu_halt)
534
535
536/*
537 * setjump + longjmp
538 */
539ENTRY(setjmp)
540	stmia	r0, {r4-r14}
541	mov	r0, #0x00000000
542	RET
543END(setjmp)
544
545ENTRY(longjmp)
546	ldmia	r0, {r4-r14}
547	mov	r0, #0x00000001
548	RET
549END(longjmp)
550
551	.data
552	.global	_C_LABEL(esym)
553_C_LABEL(esym):	.word	_C_LABEL(end)
554
555ENTRY_NP(abort)
556	b	_C_LABEL(abort)
557END(abort)
558
559ENTRY_NP(sigcode)
560	mov	r0, sp
561	add	r0, r0, #SIGF_UC
562
563	/*
564	 * Call the sigreturn system call.
565	 *
566	 * We have to load r7 manually rather than using
567	 * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is
568	 * correct. Using the alternative places esigcode at the address
569	 * of the data rather than the address one past the data.
570	 */
571
572	ldr	r7, [pc, #12]	/* Load SYS_sigreturn */
573	swi	SYS_sigreturn
574
575	/* Well if that failed we better exit quick ! */
576
577	ldr	r7, [pc, #8]	/* Load SYS_exit */
578	swi	SYS_exit
579
580	/* Branch back to retry SYS_sigreturn */
581	b	. - 16
582END(sigcode)
583	.word	SYS_sigreturn
584	.word	SYS_exit
585
586	.align	2
587	.global _C_LABEL(esigcode)
588		_C_LABEL(esigcode):
589
590	.data
591	.global szsigcode
592szsigcode:
593	.long esigcode-sigcode
594
595/* End of locore.S */
596