xref: /freebsd/sys/arm/arm/locore.S (revision 7554a5b611fd8e9a97cb6937a1110079568d4fd3)
1/*-
2 * Copyright 2004-2014 Olivier Houchard <cognet@FreeBSD.org>
3 * Copyright 2012-2014 Ian Lepore <ian@FreeBSD.org>
4 * Copyright 2013-2014 Andrew Turner <andrew@FreeBSD.org>
5 * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
6 * Copyright 2014 Michal Meloun <meloun@miracle.cz>
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31#include "assym.inc"
32#include <sys/syscall.h>
33#include <machine/asm.h>
34#include <machine/asmacros.h>
35#include <machine/armreg.h>
36#include <machine/sysreg.h>
37#include <machine/pte.h>
38/* We map 64MB of kernel unless overridden in assym.inc by the kernel option. */
39#ifndef LOCORE_MAP_MB
40#define	LOCORE_MAP_MB	64
41#endif
42
43#if defined(__ARM_ARCH_7VE__) || defined(__clang__)
44/*
45 * HYP support is in bintuils >= 2.21 and gcc >= 4.9 defines __ARM_ARCH_7VE__
46 * when enabled. llvm >= 3.6 supports it too.
47 */
48.arch_extension virt
49#endif
50
51/* A small statically-allocated stack used only during initarm() and AP startup. */
52#define	INIT_ARM_STACK_SIZE	2048
53
54	.text
55	.align	2
56
57	.globl kernbase
58	.set kernbase,KERNVIRTADDR
59
60#define	HANDLE_HYP							\
61	/* Leave HYP mode */						;\
62	mrs	r0, cpsr						;\
63	and	r0, r0, #(PSR_MODE)   /* Mode is in the low 5 bits of CPSR */ ;\
64	teq	r0, #(PSR_HYP32_MODE) /* Hyp Mode? */			;\
65	bne	1f							;\
66	/* Install Hypervisor Stub Exception Vector */			;\
67	bl hypervisor_stub_vect_install					;\
68	mov	r0, 0							;\
69	adr	r1, hypmode_enabled					;\
70	str	r0, [r1]						;\
71	/* Ensure that IRQ, FIQ and Aborts will be disabled after eret */ ;\
72	mrs	r0, cpsr						;\
73	bic	r0, r0, #(PSR_MODE)					;\
74	orr	r0, r0, #(PSR_SVC32_MODE)				;\
75	orr	r0, r0, #(PSR_I | PSR_F | PSR_A)			;\
76	msr	spsr_cxsf, r0						;\
77	/* Exit hypervisor mode */					;\
78	adr	lr, 2f							;\
79	MSR_ELR_HYP(14)							;\
80	ERET								;\
811:									;\
82	mov	r0, -1							;\
83	adr	r1, hypmode_enabled					;\
84	str	r0, [r1]						;\
852:
86
87/*
88 * On entry for FreeBSD boot ABI:
89 *	r0 - metadata pointer or 0
90 *	r1 - if (r0 == 0) then metadata pointer
91 * On entry for Linux boot ABI:
92 *	r0 - 0
93 *	r1 - machine type (passed as arg2 to initarm)
94 *	r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm)
95 * For both types of boot we gather up the args, put them in a struct arm_boot_params
96 * structure and pass that to initarm.
97 */
98	.globl	btext
99btext:
100ASENTRY_NP(_start)
101	.rept 8			/* zImage header */
102	mov r0, r0
103	.endr
104
105	b start
106
107	.word 0x016f2818
108	.word 0                 /* absolute load/run zImage address or 0 or PIC */
109	.word (_edata - btext)
110start:
111	STOP_UNWINDING		/* Can't unwind into the bootloader! */
112
113	/* Make sure interrupts are disabled. */
114	cpsid	ifa
115
116	mov	r8, r0		/* 0 or boot mode from boot2 */
117	mov	r9, r1		/* Save Machine type */
118	mov	r10, r2		/* Save meta data */
119	mov	r11, r3		/* Future expansion */
120
121	# If HYP-MODE is active, install an exception vector stub
122	HANDLE_HYP
123
124	/*
125	 * Check whether data cache is enabled.  If it is, then we know
126	 * current tags are valid (not power-on garbage values) and there
127	 * might be dirty lines that need cleaning.  Disable cache to prevent
128	 * new lines being allocated, then call wbinv_poc_all to clean it.
129	 */
130	mrc	CP15_SCTLR(r7)
131	tst	r7, #CPU_CONTROL_DC_ENABLE
132	blne	dcache_wbinv_poc_all
133
134	/* ! Do not write to memory between wbinv and disabling cache ! */
135
136	/*
137	 * Now there are no dirty lines, but there may still be lines marked
138	 * valid.  Disable all caches and the MMU, and invalidate everything
139	 * before setting up new page tables and re-enabling the mmu.
140	 */
1411:
142	bic	r7, #CPU_CONTROL_DC_ENABLE
143	bic	r7, #CPU_CONTROL_AFLT_ENABLE
144	bic	r7, #CPU_CONTROL_MMU_ENABLE
145	bic	r7, #CPU_CONTROL_IC_ENABLE
146	bic	r7, #CPU_CONTROL_BPRD_ENABLE
147	bic	r7, #CPU_CONTROL_SW_ENABLE
148	orr	r7, #CPU_CONTROL_UNAL_ENABLE
149	orr	r7, #CPU_CONTROL_VECRELOC
150	mcr	CP15_SCTLR(r7)
151	DSB
152	ISB
153	bl	dcache_inv_poc_all
154	mcr	CP15_ICIALLU
155	DSB
156	ISB
157
158	/*
159	 * Build page table from scratch.
160	 */
161
162	/*
163	 * Figure out the physical address we're loaded at by assuming this
164	 * entry point code is in the first L1 section and so if we clear the
165	 * offset bits of the pc that will give us the section-aligned load
166	 * address, which remains in r5 throughout all the following code.
167	 */
168	ldr	r2, =(L1_S_OFFSET)
169	bic	r5, pc, r2
170
171	/* Find the delta between VA and PA, result stays in r0 throughout. */
172	adr	r0, Lpagetable
173	bl	translate_va_to_pa
174
175	/*
176	 * First map the entire 4GB address space as VA=PA.  It's mapped as
177	 * normal (cached) memory because it's for things like accessing the
178	 * parameters passed in from the bootloader, which might be at any
179	 * physical address, different for every platform.
180	 */
181	mov	r1, #0
182	mov	r2, #0
183	mov	r3, #4096
184	bl	build_pagetables
185
186	/*
187	 * Next we map the kernel starting at the physical load address, mapped
188	 * to the VA the kernel is linked for.  The default size we map is 64MiB
189	 * but it can be overridden with a kernel option.
190	 */
191	mov	r1, r5
192	ldr	r2, =(KERNVIRTADDR)
193	ldr	r3, =(LOCORE_MAP_MB)
194	bl	build_pagetables
195
196	/* Create a device mapping for early_printf if specified. */
197#if defined(SOCDEV_PA) && defined(SOCDEV_VA)
198	ldr	r1, =SOCDEV_PA
199	ldr	r2, =SOCDEV_VA
200	mov	r3, #1
201	bl	build_device_pagetables
202#endif
203	bl	init_mmu
204
205	/* Transition the PC from physical to virtual addressing. */
206	ldr	pc, =1f
2071:
208
209	/* Setup stack, clear BSS */
210	ldr	r1, =.Lstart
211	ldmia	r1, {r1, r2, sp}	/* Set initial stack and */
212	add	sp, sp,	#INIT_ARM_STACK_SIZE
213	sub	r2, r2, r1		/* get zero init data */
214	mov	r3, #0
2152:
216	str	r3, [r1], #0x0004	/* get zero init data */
217	subs	r2, r2, #4
218	bgt	2b
219
220	mov	r1, #28			/* loader info size is 28 bytes also second arg */
221	subs	sp, sp, r1		/* allocate arm_boot_params struct on stack */
222	mov	r0, sp			/* loader info pointer is first arg */
223	bic	sp, sp, #7		/* align stack to 8 bytes */
224	str	r1, [r0]		/* Store length of loader info */
225	str	r8, [r0, #4]		/* Store r0 from boot loader */
226	str	r9, [r0, #8]		/* Store r1 from boot loader */
227	str	r10, [r0, #12]		/* store r2 from boot loader */
228	str	r11, [r0, #16]		/* store r3 from boot loader */
229	str	r5, [r0, #20]		/* store the physical address */
230	adr	r4, Lpagetable		/* load the pagetable address */
231	ldr	r5, [r4, #4]
232	str	r5, [r0, #24]		/* store the pagetable address */
233	mov	fp, #0			/* trace back starts here */
234	bl	_C_LABEL(initarm)	/* Off we go */
235
236	/* init arm will return the new stack pointer. */
237	mov	sp, r0
238
239	bl	_C_LABEL(mi_startup)	/* call mi_startup()! */
240
241	ldr	r0, =.Lmainreturned
242	b	_C_LABEL(panic)
243	/* NOTREACHED */
244END(_start)
245
246#define VA_TO_PA_POINTER(name, table)	 \
247name:					;\
248	.word	.			;\
249	.word	table
250
251/*
252 * Returns the physical address of a magic va to pa pointer.
253 * r0     - The pagetable data pointer. This must be built using the
254 *          VA_TO_PA_POINTER macro.
255 *          e.g.
256 *            VA_TO_PA_POINTER(Lpagetable, pagetable)
257 *            ...
258 *            adr  r0, Lpagetable
259 *            bl   translate_va_to_pa
260 *            r0 will now contain the physical address of pagetable
261 * r1, r2 - Trashed
262 */
263translate_va_to_pa:
264	ldr	r1, [r0]
265	sub	r2, r1, r0
266	/* At this point: r2 = VA - PA */
267
268	/*
269	 * Find the physical address of the table. After these two
270	 * instructions:
271	 * r1 = va(pagetable)
272	 *
273	 * r0 = va(pagetable) - (VA - PA)
274	 *    = va(pagetable) - VA + PA
275	 *    = pa(pagetable)
276	 */
277	ldr	r1, [r0, #4]
278	sub	r0, r1, r2
279	mov	pc, lr
280
281/*
282 * Init MMU
283 * r0 - the table base address
284 */
285
286ASENTRY_NP(init_mmu)
287
288	/* Setup TLB and MMU registers */
289	mcr	CP15_TTBR0(r0)		/* Set TTB */
290	mov	r0, #0
291	mcr	CP15_CONTEXTIDR(r0)	/* Set ASID to 0 */
292
293	/* Set the Domain Access register */
294	mov	r0, #DOMAIN_CLIENT	/* Only domain #0 is used */
295	mcr	CP15_DACR(r0)
296
297	/*
298	 * Ensure that LPAE is disabled and that TTBR0 is used for translation,
299	 * use a 16KB translation table
300	 */
301	mov	r0, #0
302	mcr	CP15_TTBCR(r0)
303
304	/*
305	 * Set TEX remap registers
306	 *  - All is set to uncacheable memory
307	 */
308	ldr	r0, =0xAAAAA
309	mcr	CP15_PRRR(r0)
310	mov	r0, #0
311	mcr	CP15_NMRR(r0)
312	mcr	CP15_TLBIALL		/* Flush TLB */
313	DSB
314	ISB
315
316	/* Enable MMU */
317	mrc	CP15_SCTLR(r0)
318	orr	r0, r0,	#CPU_CONTROL_MMU_ENABLE
319	orr	r0, r0,	#CPU_CONTROL_V6_EXTPAGE
320	orr	r0, r0,	#CPU_CONTROL_TR_ENABLE
321	orr	r0, r0,	#CPU_CONTROL_AF_ENABLE
322	mcr	CP15_SCTLR(r0)
323	DSB
324	ISB
325	mcr	CP15_TLBIALL		/* Flush TLB */
326	mcr	CP15_BPIALL		/* Flush Branch predictor */
327	DSB
328	ISB
329
330	mov	pc, lr
331END(init_mmu)
332
333
334/*
335 * Init SMP coherent mode, enable caching and switch to final MMU table.
336 * Called with disabled caches
337 * r0 - The table base address
338 * r1 - clear bits for aux register
339 * r2 - set bits for aux register
340 */
341ASENTRY_NP(reinit_mmu)
342	push	{r4-r11, lr}
343	mov	r4, r0
344	mov	r5, r1
345	mov	r6, r2
346
347	/* !! Be very paranoid here !! */
348	/* !! We cannot write single bit here !! */
349
350#if 0	/* XXX writeback shouldn't be necessary */
351	/* Write back and invalidate all integrated caches */
352	bl 	dcache_wbinv_poc_all
353#else
354	bl	dcache_inv_pou_all
355#endif
356	mcr	CP15_ICIALLU
357	DSB
358	ISB
359
360	/* Set auxiliary register */
361	mrc	CP15_ACTLR(r7)
362	bic	r8, r7, r5		/* Mask bits */
363	eor 	r8, r8, r6		/* Set bits */
364	teq 	r7, r8
365	mcrne 	CP15_ACTLR(r8)
366	DSB
367	ISB
368
369	/* Enable caches. */
370	mrc	CP15_SCTLR(r7)
371	orr	r7, #CPU_CONTROL_DC_ENABLE
372	orr	r7, #CPU_CONTROL_IC_ENABLE
373	orr	r7, #CPU_CONTROL_BPRD_ENABLE
374	mcr	CP15_SCTLR(r7)
375	DSB
376
377	mcr	CP15_TTBR0(r4)		/* Set new TTB */
378	DSB
379	ISB
380
381	mcr	CP15_TLBIALL		/* Flush TLB */
382	mcr	CP15_BPIALL		/* Flush Branch predictor */
383	DSB
384	ISB
385
386#if 0 /* XXX writeback shouldn't be necessary */
387	/* Write back and invalidate all integrated caches */
388	bl 	dcache_wbinv_poc_all
389#else
390	bl	dcache_inv_pou_all
391#endif
392	mcr	CP15_ICIALLU
393	DSB
394	ISB
395
396	pop	{r4-r11, pc}
397END(reinit_mmu)
398
399
400/*
401 * Builds the page table
402 * r0 - The table base address
403 * r1 - The physical address (trashed)
404 * r2 - The virtual address (trashed)
405 * r3 - The number of 1MiB sections
406 * r4 - Trashed
407 *
408 * Addresses must be 1MiB aligned
409 */
410build_device_pagetables:
411	ldr	r4, =PTE1_V|PTE1_A|PTE1_AP_KRW|TEX1_CLASS_0
412	b	1f
413build_pagetables:
414	/* Set the required page attributed */
415	ldr	r4, =PTE1_V|PTE1_A|PTE1_AP_KRW|TEX1_CLASS_0
4161:
417	orr	r1, r4
418
419	/* Move the virtual address to the correct bit location */
420	lsr	r2, #(PTE1_SHIFT - 2)
421
422	mov	r4, r3
4232:
424	str	r1, [r0, r2]
425	add	r2, r2, #4
426	add	r1, r1, #(PTE1_SIZE)
427	adds	r4, r4, #-1
428	bhi	2b
429
430	mov	pc, lr
431
432VA_TO_PA_POINTER(Lpagetable, boot_pt1)
433
434	.global _C_LABEL(hypmode_enabled)
435_C_LABEL(hypmode_enabled):
436	.word 0
437
438.Lstart:
439	.word	_edata			/* Note that these three items are */
440	.word	_ebss			/* loaded with a single ldmia and */
441	.word	svcstk			/* must remain in order together. */
442
443.Lmainreturned:
444	.asciz	"main() returned"
445	.align	2
446
447	.bss
448svcstk:
449	.space	INIT_ARM_STACK_SIZE * MAXCPU
450
451/*
452 * Memory for the initial pagetable. We are unable to place this in
453 * the bss as this will be cleared after the table is loaded.
454 */
455	.section ".init_pagetable", "aw", %nobits
456	.align	14 /* 16KiB aligned */
457	.globl	boot_pt1
458boot_pt1:
459	.space	L1_TABLE_SIZE
460
461	.text
462	.align	2
463
464#if defined(SMP)
465
466ASENTRY_NP(mpentry)
467	/* Make sure interrupts are disabled. */
468	cpsid	ifa
469
470	HANDLE_HYP
471
472	/* Setup core, disable all caches. */
473	mrc	CP15_SCTLR(r0)
474	bic	r0, #CPU_CONTROL_MMU_ENABLE
475	bic	r0, #CPU_CONTROL_AFLT_ENABLE
476	bic	r0, #CPU_CONTROL_DC_ENABLE
477	bic	r0, #CPU_CONTROL_IC_ENABLE
478	bic	r0, #CPU_CONTROL_BPRD_ENABLE
479	bic	r0, #CPU_CONTROL_SW_ENABLE
480	orr	r0, #CPU_CONTROL_UNAL_ENABLE
481	orr	r0, #CPU_CONTROL_VECRELOC
482	mcr	CP15_SCTLR(r0)
483	DSB
484	ISB
485
486	/* Invalidate L1 cache I+D cache */
487	bl	dcache_inv_pou_all
488	mcr	CP15_ICIALLU
489	DSB
490	ISB
491
492	/* Find the delta between VA and PA */
493	adr	r0, Lpagetable
494	bl	translate_va_to_pa
495
496	bl	init_mmu
497
498	adr	r1, .Lstart+8		/* Get initstack pointer from */
499	ldr	sp, [r1]		/* startup data. */
500	mrc	CP15_MPIDR(r0)		/* Get processor id number. */
501	and	r0, r0,	#0x0f
502	mov	r1, #INIT_ARM_STACK_SIZE
503	mul	r2, r1,	r0		/* Point sp to initstack */
504	add	sp, sp,	r2		/* area for this processor. */
505
506	/* Switch to virtual addresses. */
507	ldr	pc, =1f
5081:
509	mov	fp, #0			/* trace back starts here */
510	bl	_C_LABEL(init_secondary)/* Off we go, cpu id in r0. */
511
512	adr	r0, .Lmpreturned
513	b	_C_LABEL(panic)
514	/* NOTREACHED */
515END(mpentry)
516
517.Lmpreturned:
518	.asciz	"init_secondary() returned"
519	.align	2
520#endif
521
522ENTRY_NP(cpu_halt)
523
524	/* XXX re-implement !!! */
525	cpsid	ifa
526	bl	dcache_wbinv_poc_all
527
528	ldr	r4, .Lcpu_reset_address
529	ldr	r4, [r4]
530	teq	r4, #0
531	movne	pc, r4
5321:
533	WFI
534	b	1b
535
536	/*
537	 * _cpu_reset_address contains the address to branch to, to complete
538	 * the cpu reset after turning the MMU off
539	 * This variable is provided by the hardware specific code
540	 */
541.Lcpu_reset_address:
542	.word	_C_LABEL(cpu_reset_address)
543END(cpu_halt)
544
545
546/*
547 * setjump + longjmp
548 */
549ENTRY(setjmp)
550	stmia	r0, {r4-r14}
551	mov	r0, #0x00000000
552	RET
553END(setjmp)
554
555ENTRY(longjmp)
556	ldmia	r0, {r4-r14}
557	mov	r0, #0x00000001
558	RET
559END(longjmp)
560
561	.data
562	.global	_C_LABEL(esym)
563_C_LABEL(esym):	.word	_C_LABEL(end)
564
565ENTRY_NP(abort)
566	b	_C_LABEL(abort)
567END(abort)
568
569ENTRY_NP(sigcode)
570	mov	r0, sp
571	add	r0, r0, #SIGF_UC
572
573	/*
574	 * Call the sigreturn system call.
575	 *
576	 * We have to load r7 manually rather than using
577	 * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is
578	 * correct. Using the alternative places esigcode at the address
579	 * of the data rather than the address one past the data.
580	 */
581
582	ldr	r7, [pc, #12]	/* Load SYS_sigreturn */
583	swi	SYS_sigreturn
584
585	/* Well if that failed we better exit quick ! */
586
587	ldr	r7, [pc, #8]	/* Load SYS_exit */
588	swi	SYS_exit
589
590	/* Branch back to retry SYS_sigreturn */
591	b	. - 16
592END(sigcode)
593	.word	SYS_sigreturn
594	.word	SYS_exit
595
596	.align	2
597	.global _C_LABEL(esigcode)
598		_C_LABEL(esigcode):
599
600	.data
601	.global szsigcode
602szsigcode:
603	.long esigcode-sigcode
604
605/* End of locore.S */
606