xref: /freebsd/sys/arm64/arm64/locore.S (revision 6549718b70f0e660a15685369afb4f9caf2215ce)
1/*-
2 * Copyright (c) 2012-2014 Andrew Turner
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include "assym.inc"
28#include "opt_kstack_pages.h"
29#include <sys/elf_common.h>
30#include <sys/syscall.h>
31#include <machine/asm.h>
32#include <machine/armreg.h>
33#include <machine/cpu.h>
34#include <machine/hypervisor.h>
35#include <machine/param.h>
36#include <machine/pte.h>
37#include <machine/vm.h>
38#include <machine/vmparam.h>
39
40#define	VIRT_BITS	48
41
42#if PAGE_SIZE == PAGE_SIZE_16K
43/*
44 * The number of level 3 tables to create. 32 will allow for 1G of address
45 * space, the same as a single level 2 page with 4k pages.
46 */
47#define	L3_PAGE_COUNT	32
48#endif
49
50/*
51 * The size of our bootstrap stack.
52 */
53#define	BOOT_STACK_SIZE	(KSTACK_PAGES * PAGE_SIZE)
54
55	.globl	kernbase
56	.set	kernbase, KERNBASE
57
58/*
59 * We assume:
60 *  MMU      on with an identity map, or off
61 *  D-Cache: off
62 *  I-Cache: on or off
63 *  We are loaded at a 2MiB aligned address
64 */
65
66ENTRY(_start)
67	/* Enter the kernel exception level */
68	bl	enter_kernel_el
69
70	/* Set the context id */
71	msr	contextidr_el1, xzr
72
73	/* Get the virt -> phys offset */
74	bl	get_load_phys_addr
75
76	/*
77	 * At this point:
78	 * x28 = Our physical load address
79	 */
80
81	/* Create the page tables */
82	bl	create_pagetables
83
84	/*
85	 * At this point:
86	 * x27 = TTBR0 table
87	 * x26 = Kernel L1 table
88	 * x24 = TTBR1 table
89	 */
90
91	/* Enable the mmu */
92	bl	start_mmu
93
94	/* Load the new ttbr0 pagetable */
95	adrp	x27, pagetable_l0_ttbr0
96	add	x27, x27, :lo12:pagetable_l0_ttbr0
97
98	/* Jump to the virtual address space */
99	ldr	x15, .Lvirtdone
100	br	x15
101
102virtdone:
103	BTI_J
104
105	/* Set up the stack */
106	adrp	x25, initstack_end
107	add	x25, x25, :lo12:initstack_end
108	sub	sp, x25, #PCB_SIZE
109
110	/* Zero the BSS */
111	ldr	x15, .Lbss
112	ldr	x14, .Lend
1131:
114	str	xzr, [x15], #8
115	cmp	x15, x14
116	b.lo	1b
117
118#if defined(PERTHREAD_SSP)
119	/* Set sp_el0 to the boot canary for early per-thread SSP to work */
120	adrp	x15, boot_canary
121	add	x15, x15, :lo12:boot_canary
122	msr	sp_el0, x15
123#endif
124
125	/* Backup the module pointer */
126	mov	x1, x0
127
128	sub	sp, sp, #BOOTPARAMS_SIZE
129	mov	x0, sp
130
131	str	x1,  [x0, #BP_MODULEP]
132	adrp	x25, initstack
133	add	x25, x25, :lo12:initstack
134	str	x25, [x0, #BP_KERN_STACK]
135	str	x27, [x0, #BP_KERN_TTBR0]
136	str	x23, [x0, #BP_BOOT_EL]
137	str	x4,  [x0, #BP_HCR_EL2]
138
139#ifdef KASAN
140	/* Save bootparams */
141	mov	x19, x0
142
143	/* Bootstrap an early shadow map for the boot stack. */
144	ldr	x0, [x0, #BP_KERN_STACK]
145	ldr	x1, =BOOT_STACK_SIZE
146	bl	kasan_init_early
147
148	/* Restore bootparams */
149	mov	x0, x19
150#endif
151
152	/* trace back starts here */
153	mov	fp, #0
154	/* Branch to C code */
155	bl	initarm
156	/* We are done with the boot params */
157	add	sp, sp, #BOOTPARAMS_SIZE
158
159	/*
160	 * Enable pointer authentication in the kernel. We set the keys for
161	 * thread0 in initarm so have to wait until it returns to enable it.
162	 * If we were to enable it in initarm then any authentication when
163	 * returning would fail as it was called with pointer authentication
164	 * disabled.
165	 */
166	bl	ptrauth_start
167
168	bl	mi_startup
169
170	/* We should not get here */
171	brk	0
172
173	.align 3
174.Lvirtdone:
175	.quad	virtdone
176.Lbss:
177	.quad	__bss_start
178.Lend:
179	.quad	__bss_end
180END(_start)
181
182#ifdef SMP
183/*
184 * void
185 * mpentry_psci(unsigned long)
186 *
187 * Called by a core when it is being brought online with psci.
188 * The data in x0 is passed straight to init_secondary.
189 */
190ENTRY(mpentry_psci)
191	mov	x26, xzr
192	b	mpentry_common
193END(mpentry_psci)
194
195/*
196 * void
197 * mpentry_spintable(void)
198 *
199 * Called by a core when it is being brought online with a spin-table.
200 * Reads the new CPU ID and passes this to init_secondary.
201 */
202ENTRY(mpentry_spintable)
203	ldr	x26, =spintable_wait
204	b	mpentry_common
205END(mpentry_spintable)
206
207/* Wait for the current CPU to be released */
208LENTRY(spintable_wait)
209	/* Read the affinity bits from mpidr_el1 */
210	mrs	x1, mpidr_el1
211	ldr	x2, =CPU_AFF_MASK
212	and	x1, x1, x2
213
214	adrp	x2, ap_cpuid
2151:
216	ldr	x0, [x2, :lo12:ap_cpuid]
217	cmp	x0, x1
218	b.ne	1b
219
220	str	xzr, [x2, :lo12:ap_cpuid]
221	dsb	sy
222	sev
223
224	ret
225LEND(mpentry_spintable)
226
227LENTRY(mpentry_common)
228	/* Disable interrupts */
229	msr	daifset, #DAIF_INTR
230
231	/* Enter the kernel exception level */
232	bl	enter_kernel_el
233
234	/* Set the context id */
235	msr	contextidr_el1, xzr
236
237	/* Load the kernel page table */
238	adrp	x24, pagetable_l0_ttbr1
239	add	x24, x24, :lo12:pagetable_l0_ttbr1
240	/* Load the identity page table */
241	adrp	x27, pagetable_l0_ttbr0_bootstrap
242	add	x27, x27, :lo12:pagetable_l0_ttbr0_bootstrap
243
244	/* Enable the mmu */
245	bl	start_mmu
246
247	/* Load the new ttbr0 pagetable */
248	adrp	x27, pagetable_l0_ttbr0
249	add	x27, x27, :lo12:pagetable_l0_ttbr0
250
251	/* Jump to the virtual address space */
252	ldr	x15, =mp_virtdone
253	br	x15
254
255mp_virtdone:
256	BTI_J
257
258	/*
259	 * Allow this CPU to wait until the kernel is ready for it,
260	 * e.g. with spin-table but each CPU uses the same release address
261	 */
262	cbz	x26, 1f
263	blr	x26
2641:
265
266	/* Start using the AP boot stack */
267	adrp	x4, bootstack
268	ldr	x4, [x4, :lo12:bootstack]
269	mov	sp, x4
270
271#if defined(PERTHREAD_SSP)
272	/* Set sp_el0 to the boot canary for early per-thread SSP to work */
273	adrp	x15, boot_canary
274	add	x15, x15, :lo12:boot_canary
275	msr	sp_el0, x15
276#endif
277
278	/* Load the kernel ttbr0 pagetable */
279	msr	ttbr0_el1, x27
280	isb
281
282	/* Invalidate the TLB */
283	tlbi	vmalle1
284	dsb	sy
285	isb
286
287	/*
288	 * Initialize the per-CPU pointer before calling into C code, for the
289	 * benefit of kernel sanitizers.
290	 */
291	adrp	x18, bootpcpu
292	ldr	x18, [x18, :lo12:bootpcpu]
293	msr	tpidr_el1, x18
294
295	b	init_secondary
296LEND(mpentry_common)
297#endif
298
299/*
300 * If we are started in EL2, configure the required hypervisor
301 * registers and drop to EL1.
302 */
303LENTRY(enter_kernel_el)
304#define	INIT_SCTLR_EL1	(SCTLR_LSMAOE | SCTLR_nTLSMD | SCTLR_EIS | \
305    SCTLR_TSCXT | SCTLR_EOS)
306	mrs	x23, CurrentEL
307	and	x23, x23, #(CURRENTEL_EL_MASK)
308	cmp	x23, #(CURRENTEL_EL_EL2)
309	b.eq	1f
310
311	ldr	x2, =INIT_SCTLR_EL1
312	msr	sctlr_el1, x2
313	/* SCTLR_EOS is set so eret is a context synchronizing event so we
314	 * need an isb here to ensure it's observed by later instructions,
315	 * but don't need it in the eret below.
316	 */
317	isb
318
319	/* Ensure SPSR_EL1 and pstate are in sync. The only wat to set the
320	 * latter is to set the former and return from an exception with eret.
321	 */
322	mov	x2, #(PSR_DAIF | PSR_M_EL1h)
323	msr	spsr_el1, x2
324	msr	elr_el1, lr
325	eret
326
3271:
328	dsb	sy
329	/*
330	 * Set just the reserved bits in sctlr_el2. This will disable the
331	 * MMU which may have broken the kernel if we enter the kernel in
332	 * EL2, e.g. when using VHE.
333	 */
334	ldr	x2, =(SCTLR_EL2_RES1 | SCTLR_EL2_EIS | SCTLR_EL2_EOS)
335	msr	sctlr_el2, x2
336	isb
337
338	/* Configure the Hypervisor */
339	ldr	x2, =(HCR_RW | HCR_APK | HCR_API)
340	msr	hcr_el2, x2
341
342	/* Stash value of HCR_EL2 for later */
343	isb
344	mrs	x4, hcr_el2
345
346	/* Load the Virtualization Process ID Register */
347	mrs	x2, midr_el1
348	msr	vpidr_el2, x2
349
350	/* Load the Virtualization Multiprocess ID Register */
351	mrs	x2, mpidr_el1
352	msr	vmpidr_el2, x2
353
354	/* Set the initial sctlr_el1 */
355	ldr	x2, =INIT_SCTLR_EL1
356	msr	sctlr_el1, x2
357
358	/*
359	 * On some hardware, e.g., Apple M1, we can't clear E2H, so make sure we
360	 * don't trap to EL2 for SIMD register usage to have at least a
361	 * minimally usable system.
362	 */
363	tst	x4, #HCR_E2H
364	mov	x3, #CPTR_RES1	/* HCR_E2H == 0 */
365	mov	x5, #CPTR_FPEN	/* HCR_E2H == 1 */
366	csel	x2, x3, x5, eq
367	msr	cptr_el2, x2
368
369	/* Don't trap to EL2 for CP15 traps */
370	msr	hstr_el2, xzr
371
372	/* Enable access to the physical timers at EL1 */
373	tst	x4, #HCR_E2H
374	ldr	x3, =(CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN)
375	ldr	x5, =(CNTHCTL_E2H_EL1PCTEN | CNTHCTL_E2H_EL1PTEN)
376	csel	x2, x3, x5, eq
377	msr	cnthctl_el2, x2
378
379	/* Set the counter offset to a known value */
380	msr	cntvoff_el2, xzr
381
382	/* Hypervisor trap functions */
383	adrp	x2, hyp_stub_vectors
384	add	x2, x2, :lo12:hyp_stub_vectors
385	msr	vbar_el2, x2
386
387	/* Zero vttbr_el2 so a hypervisor can tell the host and guest apart */
388	msr	vttbr_el2, xzr
389
390	mov	x2, #(PSR_DAIF | PSR_M_EL1h)
391	msr	spsr_el2, x2
392
393	/* Configure GICv3 CPU interface */
394	mrs	x2, id_aa64pfr0_el1
395	/* Extract GIC bits from the register */
396	ubfx	x2, x2, #ID_AA64PFR0_GIC_SHIFT, #ID_AA64PFR0_GIC_BITS
397	/* GIC[3:0] != 0000 - GIC CPU interface via special regs. supported */
398	cbz	x2, 2f
399
400	mrs	x2, icc_sre_el2
401	orr	x2, x2, #ICC_SRE_EL2_EN	/* Enable access from insecure EL1 */
402	orr	x2, x2, #ICC_SRE_EL2_SRE	/* Enable system registers */
403	msr	icc_sre_el2, x2
4042:
405
406	/* Set the address to return to our return address */
407	msr	elr_el2, x30
408	isb
409
410	eret
411#undef INIT_SCTLR_EL1
412LEND(enter_kernel_el)
413
414/*
415 * Get the physical address the kernel was loaded at.
416 */
417LENTRY(get_load_phys_addr)
418	/* Load the offset of get_load_phys_addr from KERNBASE */
419	ldr	x28, =(get_load_phys_addr - KERNBASE)
420	/* Load the physical address of get_load_phys_addr */
421	adr	x29, get_load_phys_addr
422	/* Find the physical address of KERNBASE, i.e. our load address */
423	sub	x28, x29, x28
424	ret
425LEND(get_load_phys_addr)
426
427/*
428 * This builds the page tables containing the identity map, and the kernel
429 * virtual map.
430 *
431 * It relys on:
432 *  We were loaded to an address that is on a 2MiB boundary
433 *  All the memory must not cross a 1GiB boundaty
434 *  x28 contains the physical address we were loaded from
435 *
436 *  There are 7 or 8 pages before that address for the page tables
437 *   The pages used are:
438 *    - The Kernel L3 tables (only for 16k kernel)
439 *    - The Kernel L2 table
440 *    - The Kernel L1 table
441 *    - The Kernel L0 table             (TTBR1)
442 *    - The identity (PA = VA) L2 table
443 *    - The identity (PA = VA) L1 table
444 *    - The identity (PA = VA) L0 table (Early TTBR0)
445 *    - The Kernel empty L0 table       (Late TTBR0)
446 */
447LENTRY(create_pagetables)
448	/* Save the Link register */
449	mov	x5, x30
450
451	/* Clean the page table */
452	adrp	x6, pagetable
453	add	x6, x6, :lo12:pagetable
454	mov	x26, x6
455	adrp	x27, pagetable_end
456	add	x27, x27, :lo12:pagetable_end
4571:
458	stp	xzr, xzr, [x6], #16
459	stp	xzr, xzr, [x6], #16
460	stp	xzr, xzr, [x6], #16
461	stp	xzr, xzr, [x6], #16
462	cmp	x6, x27
463	b.lo	1b
464
465	/*
466	 * Build the TTBR1 maps.
467	 */
468
469	/* Find the size of the kernel */
470	mov	x6, #(KERNBASE)
471
472#if defined(LINUX_BOOT_ABI)
473	/* X19 is used as 'map FDT data' flag */
474	mov	x19, xzr
475
476	/* No modules or FDT pointer ? */
477	cbz	x0, booti_no_fdt
478
479	/*
480	 * Test if x0 points to modules descriptor(virtual address) or
481	 * to FDT (physical address)
482	 */
483	cmp	x0, x6		/* x6 is #(KERNBASE) */
484	b.lo	booti_fdt
485#endif
486
487	/* Booted with modules pointer */
488	/* Find modulep - begin */
489	sub	x8, x0, x6
490	/*
491	 * Add space for the module data. When PAGE_SIZE is 4k this will
492	 * add at least 2 level 2 blocks (2 * 2MiB). When PAGE_SIZE is
493	 * larger it will be at least as large as we use smaller level 3
494	 * pages.
495	 */
496	ldr	x7, =((6 * 1024 * 1024) - 1)
497	add	x8, x8, x7
498	b	common
499
500#if defined(LINUX_BOOT_ABI)
501booti_fdt:
502	/* Booted by U-Boot booti with FDT data */
503	/* Set 'map FDT data' flag */
504	mov	x19, #1
505
506booti_no_fdt:
507	/* Booted by U-Boot booti without FTD data */
508	/* Find the end - begin */
509	ldr     x7, .Lend
510	sub     x8, x7, x6
511
512	/*
513	 * Add one 2MiB page for copy of FDT data (maximum FDT size),
514	 * one for metadata and round up
515	 */
516	ldr	x7, =(3 * L2_SIZE - 1)
517	add	x8, x8, x7
518#endif
519
520common:
521#if PAGE_SIZE != PAGE_SIZE_4K
522	/*
523	 * Create L3 and L3C pages. The kernel will be loaded at a 2M aligned
524	 * address, enabling the creation of L3C pages. However, when the page
525	 * size is larger than 4k, L2 blocks are too large to map the kernel
526	 * with 2M alignment.
527	 */
528#define	PTE_SHIFT	L3_SHIFT
529#define	BUILD_PTE_FUNC	build_l3_page_pagetable
530#else
531#define	PTE_SHIFT	L2_SHIFT
532#define	BUILD_PTE_FUNC	build_l2_block_pagetable
533#endif
534
535	/* Get the number of blocks/pages to allocate, rounded down */
536	lsr	x10, x8, #(PTE_SHIFT)
537
538	/* Create the kernel space PTE table */
539	mov	x6, x26
540	mov	x7, #(ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK))
541	mov	x8, #(KERNBASE)
542	mov	x9, x28
543	bl	BUILD_PTE_FUNC
544
545#undef PTE_SHIFT
546#undef BUILD_PTE_FUNC
547
548#if PAGE_SIZE != PAGE_SIZE_4K
549	/* Move to the l2 table */
550	ldr	x9, =(PAGE_SIZE * L3_PAGE_COUNT)
551	add	x26, x26, x9
552
553	/* Link the l2 -> l3 table */
554	mov	x9, x6
555	mov	x6, x26
556	bl	link_l2_pagetable
557#endif
558
559	/* Move to the l1 table */
560	add	x26, x26, #PAGE_SIZE
561
562	/* Link the l1 -> l2 table */
563	mov	x9, x6
564	mov	x6, x26
565	bl	link_l1_pagetable
566
567	/* Move to the l0 table */
568	add	x24, x26, #PAGE_SIZE
569
570	/* Link the l0 -> l1 table */
571	mov	x9, x6
572	mov	x6, x24
573	mov	x10, #1
574	bl	link_l0_pagetable
575
576	/*
577	 * Build the TTBR0 maps.  As TTBR0 maps, they must specify ATTR_S1_nG.
578	 * They are only needed early on, so the VA = PA map is uncached.
579	 */
580	add	x27, x24, #PAGE_SIZE
581
582	mov	x6, x27		/* The initial page table */
583
584	/* Create the VA = PA map */
585	mov	x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK))
586	adrp	x16, _start
587	and	x16, x16, #(~L2_OFFSET)
588	mov	x9, x16		/* PA start */
589	mov	x8, x16		/* VA start (== PA start) */
590	mov	x10, #1
591	bl	build_l2_block_pagetable
592
593#if defined(SOCDEV_PA)
594	/* Create a table for the UART */
595	mov	x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_DEVICE))
596	ldr	x9, =(L2_SIZE)
597	add	x16, x16, x9	/* VA start */
598	mov	x8, x16
599
600	/* Store the socdev virtual address */
601	add	x17, x8, #(SOCDEV_PA & L2_OFFSET)
602	adrp	x9, socdev_va
603	str	x17, [x9, :lo12:socdev_va]
604
605	mov	x9, #(SOCDEV_PA & ~L2_OFFSET)	/* PA start */
606	mov	x10, #1
607	bl	build_l2_block_pagetable
608#endif
609
610#if defined(LINUX_BOOT_ABI)
611	/* Map FDT data ? */
612	cbz	x19, 1f
613
614	/* Create the mapping for FDT data (2 MiB max) */
615	mov	x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK))
616	ldr	x9, =(L2_SIZE)
617	add	x16, x16, x9	/* VA start */
618	mov	x8, x16
619	mov	x9, x0			/* PA start */
620	/* Update the module pointer to point at the allocated memory */
621	and	x0, x0, #(L2_OFFSET)	/* Keep the lower bits */
622	add	x0, x0, x8		/* Add the aligned virtual address */
623
624	mov	x10, #1
625	bl	build_l2_block_pagetable
626
6271:
628#endif
629
630	/* Move to the l1 table */
631	add	x27, x27, #PAGE_SIZE
632
633	/* Link the l1 -> l2 table */
634	mov	x9, x6
635	mov	x6, x27
636	bl	link_l1_pagetable
637
638	/* Move to the l0 table */
639	add	x27, x27, #PAGE_SIZE
640
641	/* Link the l0 -> l1 table */
642	mov	x9, x6
643	mov	x6, x27
644	mov	x10, #1
645	bl	link_l0_pagetable
646
647	/* Restore the Link register */
648	mov	x30, x5
649	ret
650LEND(create_pagetables)
651
652/*
653 * Builds an L0 -> L1 table descriptor
654 *
655 *  x6  = L0 table
656 *  x8  = Virtual Address
657 *  x9  = L1 PA (trashed)
658 *  x10 = Entry count (trashed)
659 *  x11, x12 and x13 are trashed
660 */
661LENTRY(link_l0_pagetable)
662	/*
663	 * Link an L0 -> L1 table entry.
664	 */
665	/* Find the table index */
666	lsr	x11, x8, #L0_SHIFT
667	and	x11, x11, #L0_ADDR_MASK
668
669	/* Build the L0 block entry */
670	mov	x12, #L0_TABLE
671	orr	x12, x12, #(TATTR_UXN_TABLE | TATTR_AP_TABLE_NO_EL0)
672
673	/* Only use the output address bits */
674	lsr	x9, x9, #PAGE_SHIFT
6751:	orr	x13, x12, x9, lsl #PAGE_SHIFT
676
677	/* Store the entry */
678	str	x13, [x6, x11, lsl #3]
679
680	sub	x10, x10, #1
681	add	x11, x11, #1
682	add	x9, x9, #1
683	cbnz	x10, 1b
684
685	ret
686LEND(link_l0_pagetable)
687
688/*
689 * Builds an L1 -> L2 table descriptor
690 *
691 *  x6  = L1 table
692 *  x8  = Virtual Address
693 *  x9  = L2 PA (trashed)
694 *  x11, x12 and x13 are trashed
695 */
696LENTRY(link_l1_pagetable)
697	/*
698	 * Link an L1 -> L2 table entry.
699	 */
700	/* Find the table index */
701	lsr	x11, x8, #L1_SHIFT
702	and	x11, x11, #Ln_ADDR_MASK
703
704	/* Build the L1 block entry */
705	mov	x12, #L1_TABLE
706
707	/* Only use the output address bits */
708	lsr	x9, x9, #PAGE_SHIFT
709	orr	x13, x12, x9, lsl #PAGE_SHIFT
710
711	/* Store the entry */
712	str	x13, [x6, x11, lsl #3]
713
714	ret
715LEND(link_l1_pagetable)
716
717/*
718 * Builds count 2 MiB page table entry
719 *  x6  = L2 table
720 *  x7  = Block attributes
721 *  x8  = VA start
722 *  x9  = PA start (trashed)
723 *  x10 = Entry count (trashed)
724 *  x11, x12 and x13 are trashed
725 */
726LENTRY(build_l2_block_pagetable)
727	/*
728	 * Build the L2 table entry.
729	 */
730	/* Find the table index */
731	lsr	x11, x8, #L2_SHIFT
732	and	x11, x11, #Ln_ADDR_MASK
733
734	/* Build the L2 block entry */
735	orr	x12, x7, #L2_BLOCK
736	orr	x12, x12, #(ATTR_DEFAULT)
737	orr	x12, x12, #(ATTR_S1_UXN)
738#ifdef __ARM_FEATURE_BTI_DEFAULT
739	orr	x12, x12, #(ATTR_S1_GP)
740#endif
741
742	/* Only use the output address bits */
743	lsr	x9, x9, #L2_SHIFT
744
745	/* Set the physical address for this virtual address */
7461:	orr	x13, x12, x9, lsl #L2_SHIFT
747
748	/* Store the entry */
749	str	x13, [x6, x11, lsl #3]
750
751	sub	x10, x10, #1
752	add	x11, x11, #1
753	add	x9, x9, #1
754	cbnz	x10, 1b
755
756	ret
757LEND(build_l2_block_pagetable)
758
759#if PAGE_SIZE != PAGE_SIZE_4K
760/*
761 * Builds an L2 -> L3 table descriptor
762 *
763 *  x6  = L2 table
764 *  x8  = Virtual Address
765 *  x9  = L3 PA (trashed)
766 *  x11, x12 and x13 are trashed
767 */
768LENTRY(link_l2_pagetable)
769	/*
770	 * Link an L2 -> L3 table entry.
771	 */
772	/* Find the table index */
773	lsr	x11, x8, #L2_SHIFT
774	and	x11, x11, #Ln_ADDR_MASK
775
776	/* Build the L1 block entry */
777	mov	x12, #L2_TABLE
778
779	/* Only use the output address bits */
780	lsr	x9, x9, #PAGE_SHIFT
781	orr	x13, x12, x9, lsl #PAGE_SHIFT
782
783	/* Store the entry */
784	str	x13, [x6, x11, lsl #3]
785
786	ret
787LEND(link_l2_pagetable)
788
789/*
790 * Builds count level 3 page table entries. Uses ATTR_CONTIGUOUS to create
791 * large page (L3C) mappings when the current VA and remaining count allow
792 * it.
793 *  x6  = L3 table
794 *  x7  = Block attributes
795 *  x8  = VA start
796 *  x9  = PA start (trashed)
797 *  x10 = Entry count (trashed)
798 *  x11, x12 and x13 are trashed
799 *
800 * VA start (x8) modulo L3C_SIZE must equal PA start (x9) modulo L3C_SIZE.
801 */
802LENTRY(build_l3_page_pagetable)
803	/*
804	 * Build the L3 table entry.
805	 */
806	/* Find the table index */
807	lsr	x11, x8, #L3_SHIFT
808	and	x11, x11, #Ln_ADDR_MASK
809
810	/* Build the L3 page entry */
811	orr	x12, x7, #L3_PAGE
812	orr	x12, x12, #(ATTR_DEFAULT)
813	orr	x12, x12, #(ATTR_S1_UXN)
814#ifdef __ARM_FEATURE_BTI_DEFAULT
815	orr	x12, x12, #(ATTR_S1_GP)
816#endif
817
818	/* Only use the output address bits */
819	lsr	x9, x9, #L3_SHIFT
820
821	/* Check if an ATTR_CONTIGUOUS mapping is possible */
8221:	tst	x11, #(L3C_ENTRIES - 1)
823	b.ne	2f
824	cmp	x10, #L3C_ENTRIES
825	b.lo	3f
826	orr	x12, x12, #(ATTR_CONTIGUOUS)
827	b	2f
8283:	and	x12, x12, #(~ATTR_CONTIGUOUS)
829
830	/* Set the physical address for this virtual address */
8312:	orr	x13, x12, x9, lsl #L3_SHIFT
832
833	/* Store the entry */
834	str	x13, [x6, x11, lsl #3]
835
836	sub	x10, x10, #1
837	add	x11, x11, #1
838	add	x9, x9, #1
839	cbnz	x10, 1b
840
841	ret
842LEND(build_l3_page_pagetable)
843#endif
844
845LENTRY(start_mmu)
846	dsb	sy
847
848	/* Load the exception vectors */
849	ldr	x2, =exception_vectors
850	msr	vbar_el1, x2
851
852	/* Load ttbr0 and ttbr1 */
853	msr	ttbr0_el1, x27
854	msr	ttbr1_el1, x24
855	isb
856
857	/* Clear the Monitor Debug System control register */
858	msr	mdscr_el1, xzr
859
860	/* Invalidate the TLB */
861	tlbi	vmalle1is
862	dsb	ish
863	isb
864
865	ldr	x2, mair
866	msr	mair_el1, x2
867
868	/*
869	 * Setup TCR according to the PARange and ASIDBits fields
870	 * from ID_AA64MMFR0_EL1 and the HAFDBS field from the
871	 * ID_AA64MMFR1_EL1.  More precisely, set TCR_EL1.AS
872	 * to 1 only if the ASIDBits field equals 0b0010.
873	 */
874	ldr	x2, tcr
875	mrs	x3, id_aa64mmfr0_el1
876
877	/* Copy the bottom 3 bits from id_aa64mmfr0_el1 into TCR.IPS */
878	bfi	x2, x3, #(TCR_IPS_SHIFT), #(TCR_IPS_WIDTH)
879	and	x3, x3, #(ID_AA64MMFR0_ASIDBits_MASK)
880
881	/* Check if the HW supports 16 bit ASIDS */
882	cmp	x3, #(ID_AA64MMFR0_ASIDBits_16)
883	/* If so x3 == 1, else x3 == 0 */
884	cset	x3, eq
885	/* Set TCR.AS with x3 */
886	bfi	x2, x3, #(TCR_ASID_SHIFT), #(TCR_ASID_WIDTH)
887
888	/*
889	 * Check if the HW supports access flag and dirty state updates,
890	 * and set TCR_EL1.HA and TCR_EL1.HD accordingly.
891	 */
892	mrs	x3, id_aa64mmfr1_el1
893	and	x3, x3, #(ID_AA64MMFR1_HAFDBS_MASK)
894	cmp	x3, #1
895	b.ne	1f
896	orr 	x2, x2, #(TCR_HA)
897	b	2f
8981:
899	cmp	x3, #2
900	b.ne	2f
901	orr 	x2, x2, #(TCR_HA | TCR_HD)
9022:
903	msr	tcr_el1, x2
904
905	/*
906	 * Setup SCTLR.
907	 */
908	ldr	x2, sctlr_set
909	ldr	x3, sctlr_clear
910	mrs	x1, sctlr_el1
911	bic	x1, x1, x3	/* Clear the required bits */
912	orr	x1, x1, x2	/* Set the required bits */
913	msr	sctlr_el1, x1
914	isb
915
916	ret
917
918	.align 3
919mair:
920	.quad	MAIR_ATTR(MAIR_DEVICE_nGnRnE, VM_MEMATTR_DEVICE_nGnRnE) | \
921		MAIR_ATTR(MAIR_NORMAL_NC, VM_MEMATTR_UNCACHEABLE)   |	\
922		MAIR_ATTR(MAIR_NORMAL_WB, VM_MEMATTR_WRITE_BACK)    |	\
923		MAIR_ATTR(MAIR_NORMAL_WT, VM_MEMATTR_WRITE_THROUGH) |	\
924		MAIR_ATTR(MAIR_DEVICE_nGnRE, VM_MEMATTR_DEVICE_nGnRE)
925tcr:
926#if PAGE_SIZE == PAGE_SIZE_4K
927#define	TCR_TG	(TCR_TG1_4K | TCR_TG0_4K)
928#elif PAGE_SIZE == PAGE_SIZE_16K
929#define	TCR_TG	(TCR_TG1_16K | TCR_TG0_16K)
930#else
931#error Unsupported page size
932#endif
933
934	.quad (TCR_TxSZ(64 - VIRT_BITS) | TCR_TG | \
935	    TCR_CACHE_ATTRS | TCR_SMP_ATTRS)
936sctlr_set:
937	/* Bits to set */
938	.quad (SCTLR_LSMAOE | SCTLR_nTLSMD | SCTLR_UCI | SCTLR_SPAN | \
939	    SCTLR_nTWE | SCTLR_nTWI | SCTLR_UCT | SCTLR_DZE | \
940	    SCTLR_I | SCTLR_SED | SCTLR_SA0 | SCTLR_SA | SCTLR_C | \
941	    SCTLR_M | SCTLR_CP15BEN | SCTLR_BT1 | SCTLR_BT0)
942sctlr_clear:
943	/* Bits to clear */
944	.quad (SCTLR_EE | SCTLR_E0E | SCTLR_IESB | SCTLR_WXN | SCTLR_UMA | \
945	    SCTLR_ITD | SCTLR_A)
946LEND(start_mmu)
947
948ENTRY(abort)
949	b abort
950END(abort)
951
952.bss
953	.align	PAGE_SHIFT
954initstack:
955	.space	BOOT_STACK_SIZE
956initstack_end:
957
958	.section .init_pagetable, "aw", %nobits
959	.align PAGE_SHIFT
960	/*
961	 * 6 initial tables (in the following order):
962	 *           L2 for kernel (High addresses)
963	 *           L1 for kernel
964	 *           L0 for kernel
965	 *           L1 bootstrap for user   (Low addresses)
966	 *           L0 bootstrap for user
967	 *           L0 for user
968	 */
969	.globl pagetable_l0_ttbr1
970pagetable:
971#if PAGE_SIZE != PAGE_SIZE_4K
972	.space	(PAGE_SIZE * L3_PAGE_COUNT)
973pagetable_l2_ttbr1:
974#endif
975	.space	PAGE_SIZE
976pagetable_l1_ttbr1:
977	.space	PAGE_SIZE
978pagetable_l0_ttbr1:
979	.space	PAGE_SIZE
980pagetable_l2_ttbr0_bootstrap:
981	.space	PAGE_SIZE
982pagetable_l1_ttbr0_bootstrap:
983	.space	PAGE_SIZE
984pagetable_l0_ttbr0_bootstrap:
985	.space	PAGE_SIZE
986pagetable_l0_ttbr0:
987	.space	PAGE_SIZE
988pagetable_end:
989
990el2_pagetable:
991	.space	PAGE_SIZE
992
993	.section .rodata, "a", %progbits
994	.globl	aarch32_sigcode
995	.align 2
996aarch32_sigcode:
997	.word 0xe1a0000d	// mov r0, sp
998	.word 0xe2800040	// add r0, r0, #SIGF_UC
999	.word 0xe59f700c	// ldr r7, [pc, #12]
1000	.word 0xef000000	// swi #0
1001	.word 0xe59f7008	// ldr r7, [pc, #8]
1002	.word 0xef000000	// swi #0
1003	.word 0xeafffffa	// b . - 16
1004	.word SYS_sigreturn
1005	.word SYS_exit
1006	.align	3
1007	.size aarch32_sigcode, . - aarch32_sigcode
1008aarch32_esigcode:
1009	.data
1010	.global sz_aarch32_sigcode
1011sz_aarch32_sigcode:
1012	.quad aarch32_esigcode - aarch32_sigcode
1013
1014GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL)
1015