xref: /freebsd/sys/arm64/arm64/locore.S (revision 02e9120893770924227138ba49df1edb3896112a)
1/*-
2 * Copyright (c) 2012-2014 Andrew Turner
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include "assym.inc"
28#include "opt_kstack_pages.h"
29#include <sys/syscall.h>
30#include <machine/asm.h>
31#include <machine/armreg.h>
32#include <machine/hypervisor.h>
33#include <machine/param.h>
34#include <machine/pte.h>
35#include <machine/vm.h>
36#include <machine/vmparam.h>
37
38#define	VIRT_BITS	48
39
40#if PAGE_SIZE == PAGE_SIZE_16K
41/*
42 * The number of level 3 tables to create. 32 will allow for 1G of address
43 * space, the same as a single level 2 page with 4k pages.
44 */
45#define	L3_PAGE_COUNT	32
46#endif
47
48	.globl	kernbase
49	.set	kernbase, KERNBASE
50
51/*
52 * We assume:
53 *  MMU      on with an identity map, or off
54 *  D-Cache: off
55 *  I-Cache: on or off
56 *  We are loaded at a 2MiB aligned address
57 */
58
59ENTRY(_start)
60	/* Drop to EL1 */
61	bl	drop_to_el1
62
63	/*
64	 * Disable the MMU. We may have entered the kernel with it on and
65	 * will need to update the tables later. If this has been set up
66	 * with anything other than a VA == PA map then this will fail,
67	 * but in this case the code to find where we are running from
68	 * would have also failed.
69	 */
70	dsb	sy
71	mrs	x2, sctlr_el1
72	bic	x2, x2, SCTLR_M
73	msr	sctlr_el1, x2
74	isb
75
76	/* Set the context id */
77	msr	contextidr_el1, xzr
78
79	/* Get the virt -> phys offset */
80	bl	get_load_phys_addr
81
82	/*
83	 * At this point:
84	 * x28 = Our physical load address
85	 */
86
87	/* Create the page tables */
88	bl	create_pagetables
89
90	/*
91	 * At this point:
92	 * x27 = TTBR0 table
93	 * x26 = Kernel L1 table
94	 * x24 = TTBR1 table
95	 */
96
97	/* Enable the mmu */
98	bl	start_mmu
99
100	/* Load the new ttbr0 pagetable */
101	adrp	x27, pagetable_l0_ttbr0
102	add	x27, x27, :lo12:pagetable_l0_ttbr0
103
104	/* Jump to the virtual address space */
105	ldr	x15, .Lvirtdone
106	br	x15
107
108virtdone:
109	BTI_J
110
111	/* Set up the stack */
112	adrp	x25, initstack_end
113	add	x25, x25, :lo12:initstack_end
114	sub	sp, x25, #PCB_SIZE
115
116	/* Zero the BSS */
117	ldr	x15, .Lbss
118	ldr	x14, .Lend
1191:
120	str	xzr, [x15], #8
121	cmp	x15, x14
122	b.lo	1b
123
124#if defined(PERTHREAD_SSP)
125	/* Set sp_el0 to the boot canary for early per-thread SSP to work */
126	adrp	x15, boot_canary
127	add	x15, x15, :lo12:boot_canary
128	msr	sp_el0, x15
129#endif
130
131	/* Backup the module pointer */
132	mov	x1, x0
133
134	sub	sp, sp, #BOOTPARAMS_SIZE
135	mov	x0, sp
136
137	str	x1,  [x0, #BP_MODULEP]
138	adrp	x25, initstack
139	add	x25, x25, :lo12:initstack
140	str	x25, [x0, #BP_KERN_STACK]
141	str	x27, [x0, #BP_KERN_TTBR0]
142	str	x23, [x0, #BP_BOOT_EL]
143	str	x4,  [x0, #BP_HCR_EL2]
144
145#ifdef KASAN
146	/* Save bootparams */
147	mov	x19, x0
148
149	/* Bootstrap an early shadow map for the boot stack. */
150	bl	pmap_san_bootstrap
151
152	/* Restore bootparams */
153	mov	x0, x19
154#endif
155
156	/* trace back starts here */
157	mov	fp, #0
158	/* Branch to C code */
159	bl	initarm
160	/* We are done with the boot params */
161	add	sp, sp, #BOOTPARAMS_SIZE
162
163	/*
164	 * Enable pointer authentication in the kernel. We set the keys for
165	 * thread0 in initarm so have to wait until it returns to enable it.
166	 * If we were to enable it in initarm then any authentication when
167	 * returning would fail as it was called with pointer authentication
168	 * disabled.
169	 */
170	bl	ptrauth_start
171
172	bl	mi_startup
173
174	/* We should not get here */
175	brk	0
176
177	.align 3
178.Lvirtdone:
179	.quad	virtdone
180.Lbss:
181	.quad	__bss_start
182.Lend:
183	.quad	__bss_end
184END(_start)
185
186#ifdef SMP
187/*
188 * mpentry(unsigned long)
189 *
190 * Called by a core when it is being brought online.
191 * The data in x0 is passed straight to init_secondary.
192 */
193ENTRY(mpentry)
194	/* Disable interrupts */
195	msr	daifset, #DAIF_INTR
196
197	/* Drop to EL1 */
198	bl	drop_to_el1
199
200	/* Set the context id */
201	msr	contextidr_el1, xzr
202
203	/* Load the kernel page table */
204	adrp	x24, pagetable_l0_ttbr1
205	add	x24, x24, :lo12:pagetable_l0_ttbr1
206	/* Load the identity page table */
207	adrp	x27, pagetable_l0_ttbr0_boostrap
208	add	x27, x27, :lo12:pagetable_l0_ttbr0_boostrap
209
210	/* Enable the mmu */
211	bl	start_mmu
212
213	/* Load the new ttbr0 pagetable */
214	adrp	x27, pagetable_l0_ttbr0
215	add	x27, x27, :lo12:pagetable_l0_ttbr0
216
217	/* Jump to the virtual address space */
218	ldr	x15, =mp_virtdone
219	br	x15
220
221mp_virtdone:
222	BTI_J
223
224	/* Start using the AP boot stack */
225	adrp	x4, bootstack
226	ldr	x4, [x4, :lo12:bootstack]
227	mov	sp, x4
228
229#if defined(PERTHREAD_SSP)
230	/* Set sp_el0 to the boot canary for early per-thread SSP to work */
231	adrp	x15, boot_canary
232	add	x15, x15, :lo12:boot_canary
233	msr	sp_el0, x15
234#endif
235
236	/* Load the kernel ttbr0 pagetable */
237	msr	ttbr0_el1, x27
238	isb
239
240	/* Invalidate the TLB */
241	tlbi	vmalle1
242	dsb	sy
243	isb
244
245	/*
246	 * Initialize the per-CPU pointer before calling into C code, for the
247	 * benefit of kernel sanitizers.
248	 */
249	adrp	x18, bootpcpu
250	ldr	x18, [x18, :lo12:bootpcpu]
251	msr	tpidr_el1, x18
252
253	b	init_secondary
254END(mpentry)
255#endif
256
257/*
258 * If we are started in EL2, configure the required hypervisor
259 * registers and drop to EL1.
260 */
261LENTRY(drop_to_el1)
262	mrs	x23, CurrentEL
263	lsr	x23, x23, #2
264	cmp	x23, #0x2
265	b.eq	1f
266	ret
2671:
268	/*
269	 * Disable the MMU. If the HCR_EL2.E2H field is set we will clear it
270	 * which may break address translation.
271	 */
272	dsb	sy
273	mrs	x2, sctlr_el2
274	bic	x2, x2, SCTLR_M
275	msr	sctlr_el2, x2
276	isb
277
278	/* Configure the Hypervisor */
279	ldr	x2, =(HCR_RW | HCR_APK | HCR_API)
280	msr	hcr_el2, x2
281
282	/* Stash value of HCR_EL2 for later */
283	isb
284	mrs	x4, hcr_el2
285
286	/* Load the Virtualization Process ID Register */
287	mrs	x2, midr_el1
288	msr	vpidr_el2, x2
289
290	/* Load the Virtualization Multiprocess ID Register */
291	mrs	x2, mpidr_el1
292	msr	vmpidr_el2, x2
293
294	/* Set the bits that need to be 1 in sctlr_el1 */
295	ldr	x2, .Lsctlr_res1
296	msr	sctlr_el1, x2
297
298	/*
299	 * On some hardware, e.g., Apple M1, we can't clear E2H, so make sure we
300	 * don't trap to EL2 for SIMD register usage to have at least a
301	 * minimally usable system.
302	 */
303	tst	x4, #HCR_E2H
304	mov	x3, #CPTR_RES1	/* HCR_E2H == 0 */
305	mov	x5, #CPTR_FPEN	/* HCR_E2H == 1 */
306	csel	x2, x3, x5, eq
307	msr	cptr_el2, x2
308
309	/* Don't trap to EL2 for CP15 traps */
310	msr	hstr_el2, xzr
311
312	/* Enable access to the physical timers at EL1 */
313	mrs	x2, cnthctl_el2
314	orr	x2, x2, #(CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN)
315	msr	cnthctl_el2, x2
316
317	/* Set the counter offset to a known value */
318	msr	cntvoff_el2, xzr
319
320	/* Hypervisor trap functions */
321	adrp	x2, hyp_stub_vectors
322	add	x2, x2, :lo12:hyp_stub_vectors
323	msr	vbar_el2, x2
324
325	/* Zero vttbr_el2 so a hypervisor can tell the host and guest apart */
326	msr	vttbr_el2, xzr
327
328	mov	x2, #(PSR_DAIF | PSR_M_EL1h)
329	msr	spsr_el2, x2
330
331	/* Configure GICv3 CPU interface */
332	mrs	x2, id_aa64pfr0_el1
333	/* Extract GIC bits from the register */
334	ubfx	x2, x2, #ID_AA64PFR0_GIC_SHIFT, #ID_AA64PFR0_GIC_BITS
335	/* GIC[3:0] == 0001 - GIC CPU interface via special regs. supported */
336	cmp	x2, #(ID_AA64PFR0_GIC_CPUIF_EN >> ID_AA64PFR0_GIC_SHIFT)
337	b.ne	2f
338
339	mrs	x2, icc_sre_el2
340	orr	x2, x2, #ICC_SRE_EL2_EN	/* Enable access from insecure EL1 */
341	orr	x2, x2, #ICC_SRE_EL2_SRE	/* Enable system registers */
342	msr	icc_sre_el2, x2
3432:
344
345	/* Set the address to return to our return address */
346	msr	elr_el2, x30
347	isb
348
349	eret
350
351	.align 3
352.Lsctlr_res1:
353	.quad SCTLR_RES1
354LEND(drop_to_el1)
355
356/*
357 * Get the physical address the kernel was loaded at.
358 */
359LENTRY(get_load_phys_addr)
360	/* Load the offset of get_load_phys_addr from KERNBASE */
361	ldr	x28, =(get_load_phys_addr - KERNBASE)
362	/* Load the physical address of get_load_phys_addr */
363	adr	x29, get_load_phys_addr
364	/* Find the physical address of KERNBASE, i.e. our load address */
365	sub	x28, x29, x28
366	ret
367LEND(get_load_phys_addr)
368
369/*
370 * This builds the page tables containing the identity map, and the kernel
371 * virtual map.
372 *
373 * It relys on:
374 *  We were loaded to an address that is on a 2MiB boundary
375 *  All the memory must not cross a 1GiB boundaty
376 *  x28 contains the physical address we were loaded from
377 *
378 * TODO: This is out of date.
379 *  There are at least 5 pages before that address for the page tables
380 *   The pages used are:
381 *    - The Kernel L2 table
382 *    - The Kernel L1 table
383 *    - The Kernel L0 table             (TTBR1)
384 *    - The identity (PA = VA) L1 table
385 *    - The identity (PA = VA) L0 table (TTBR0)
386 */
387LENTRY(create_pagetables)
388	/* Save the Link register */
389	mov	x5, x30
390
391	/* Clean the page table */
392	adrp	x6, pagetable
393	add	x6, x6, :lo12:pagetable
394	mov	x26, x6
395	adrp	x27, pagetable_end
396	add	x27, x27, :lo12:pagetable_end
3971:
398	stp	xzr, xzr, [x6], #16
399	stp	xzr, xzr, [x6], #16
400	stp	xzr, xzr, [x6], #16
401	stp	xzr, xzr, [x6], #16
402	cmp	x6, x27
403	b.lo	1b
404
405	/*
406	 * Build the TTBR1 maps.
407	 */
408
409	/* Find the size of the kernel */
410	mov	x6, #(KERNBASE)
411
412#if defined(LINUX_BOOT_ABI)
413	/* X19 is used as 'map FDT data' flag */
414	mov	x19, xzr
415
416	/* No modules or FDT pointer ? */
417	cbz	x0, booti_no_fdt
418
419	/*
420	 * Test if x0 points to modules descriptor(virtual address) or
421	 * to FDT (physical address)
422	 */
423	cmp	x0, x6		/* x6 is #(KERNBASE) */
424	b.lo	booti_fdt
425#endif
426
427	/* Booted with modules pointer */
428	/* Find modulep - begin */
429	sub	x8, x0, x6
430	/*
431	 * Add space for the module data. When PAGE_SIZE is 4k this will
432	 * add at least 2 level 2 blocks (2 * 2MiB). When PAGE_SIZE is
433	 * larger it will be at least as large as we use smaller level 3
434	 * pages.
435	 */
436	ldr	x7, =((6 * 1024 * 1024) - 1)
437	add	x8, x8, x7
438	b	common
439
440#if defined(LINUX_BOOT_ABI)
441booti_fdt:
442	/* Booted by U-Boot booti with FDT data */
443	/* Set 'map FDT data' flag */
444	mov	x19, #1
445
446booti_no_fdt:
447	/* Booted by U-Boot booti without FTD data */
448	/* Find the end - begin */
449	ldr     x7, .Lend
450	sub     x8, x7, x6
451
452	/*
453	 * Add one 2MiB page for copy of FDT data (maximum FDT size),
454	 * one for metadata and round up
455	 */
456	ldr	x7, =(3 * L2_SIZE - 1)
457	add	x8, x8, x7
458#endif
459
460common:
461#if PAGE_SIZE != PAGE_SIZE_4K
462	/*
463	 * Create L3 pages. The kernel will be loaded at a 2M aligned
464	 * address, however L2 blocks are too large when the page size is
465	 * not 4k to map the kernel with such an aligned address. However,
466	 * when the page size is larger than 4k, L2 blocks are too large to
467	 * map the kernel with such an alignment.
468	 */
469
470	/* Get the number of l3 pages to allocate, rounded down */
471	lsr	x10, x8, #(L3_SHIFT)
472
473	/* Create the kernel space L2 table */
474	mov	x6, x26
475	mov	x7, #(ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK))
476	mov	x8, #(KERNBASE)
477	mov	x9, x28
478	bl	build_l3_page_pagetable
479
480	/* Move to the l2 table */
481	ldr	x9, =(PAGE_SIZE * L3_PAGE_COUNT)
482	add	x26, x26, x9
483
484	/* Link the l2 -> l3 table */
485	mov	x9, x6
486	mov	x6, x26
487	bl	link_l2_pagetable
488#else
489	/* Get the number of l2 pages to allocate, rounded down */
490	lsr	x10, x8, #(L2_SHIFT)
491
492	/* Create the kernel space L2 table */
493	mov	x6, x26
494	mov	x7, #(ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK))
495	mov	x8, #(KERNBASE)
496	mov	x9, x28
497	bl	build_l2_block_pagetable
498#endif
499
500	/* Move to the l1 table */
501	add	x26, x26, #PAGE_SIZE
502
503	/* Link the l1 -> l2 table */
504	mov	x9, x6
505	mov	x6, x26
506	bl	link_l1_pagetable
507
508	/* Move to the l0 table */
509	add	x24, x26, #PAGE_SIZE
510
511	/* Link the l0 -> l1 table */
512	mov	x9, x6
513	mov	x6, x24
514	mov	x10, #1
515	bl	link_l0_pagetable
516
517	/*
518	 * Build the TTBR0 maps.  As TTBR0 maps, they must specify ATTR_S1_nG.
519	 * They are only needed early on, so the VA = PA map is uncached.
520	 */
521	add	x27, x24, #PAGE_SIZE
522
523	mov	x6, x27		/* The initial page table */
524
525	/* Create the VA = PA map */
526	mov	x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK))
527	adrp	x16, _start
528	and	x16, x16, #(~L2_OFFSET)
529	mov	x9, x16		/* PA start */
530	mov	x8, x16		/* VA start (== PA start) */
531	mov	x10, #1
532	bl	build_l2_block_pagetable
533
534#if defined(SOCDEV_PA)
535	/* Create a table for the UART */
536	mov	x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_DEVICE))
537	ldr	x9, =(L2_SIZE)
538	add	x16, x16, x9	/* VA start */
539	mov	x8, x16
540
541	/* Store the socdev virtual address */
542	add	x17, x8, #(SOCDEV_PA & L2_OFFSET)
543	adrp	x9, socdev_va
544	str	x17, [x9, :lo12:socdev_va]
545
546	mov	x9, #(SOCDEV_PA & ~L2_OFFSET)	/* PA start */
547	mov	x10, #1
548	bl	build_l2_block_pagetable
549#endif
550
551#if defined(LINUX_BOOT_ABI)
552	/* Map FDT data ? */
553	cbz	x19, 1f
554
555	/* Create the mapping for FDT data (2 MiB max) */
556	mov	x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK))
557	ldr	x9, =(L2_SIZE)
558	add	x16, x16, x9	/* VA start */
559	mov	x8, x16
560	mov	x9, x0			/* PA start */
561	/* Update the module pointer to point at the allocated memory */
562	and	x0, x0, #(L2_OFFSET)	/* Keep the lower bits */
563	add	x0, x0, x8		/* Add the aligned virtual address */
564
565	mov	x10, #1
566	bl	build_l2_block_pagetable
567
5681:
569#endif
570
571	/* Move to the l1 table */
572	add	x27, x27, #PAGE_SIZE
573
574	/* Link the l1 -> l2 table */
575	mov	x9, x6
576	mov	x6, x27
577	bl	link_l1_pagetable
578
579	/* Move to the l0 table */
580	add	x27, x27, #PAGE_SIZE
581
582	/* Link the l0 -> l1 table */
583	mov	x9, x6
584	mov	x6, x27
585	mov	x10, #1
586	bl	link_l0_pagetable
587
588	/* Restore the Link register */
589	mov	x30, x5
590	ret
591LEND(create_pagetables)
592
593/*
594 * Builds an L0 -> L1 table descriptor
595 *
596 *  x6  = L0 table
597 *  x8  = Virtual Address
598 *  x9  = L1 PA (trashed)
599 *  x10 = Entry count (trashed)
600 *  x11, x12 and x13 are trashed
601 */
602LENTRY(link_l0_pagetable)
603	/*
604	 * Link an L0 -> L1 table entry.
605	 */
606	/* Find the table index */
607	lsr	x11, x8, #L0_SHIFT
608	and	x11, x11, #L0_ADDR_MASK
609
610	/* Build the L0 block entry */
611	mov	x12, #L0_TABLE
612	orr	x12, x12, #(TATTR_UXN_TABLE | TATTR_AP_TABLE_NO_EL0)
613
614	/* Only use the output address bits */
615	lsr	x9, x9, #PAGE_SHIFT
6161:	orr	x13, x12, x9, lsl #PAGE_SHIFT
617
618	/* Store the entry */
619	str	x13, [x6, x11, lsl #3]
620
621	sub	x10, x10, #1
622	add	x11, x11, #1
623	add	x9, x9, #1
624	cbnz	x10, 1b
625
626	ret
627LEND(link_l0_pagetable)
628
629/*
630 * Builds an L1 -> L2 table descriptor
631 *
632 *  x6  = L1 table
633 *  x8  = Virtual Address
634 *  x9  = L2 PA (trashed)
635 *  x11, x12 and x13 are trashed
636 */
637LENTRY(link_l1_pagetable)
638	/*
639	 * Link an L1 -> L2 table entry.
640	 */
641	/* Find the table index */
642	lsr	x11, x8, #L1_SHIFT
643	and	x11, x11, #Ln_ADDR_MASK
644
645	/* Build the L1 block entry */
646	mov	x12, #L1_TABLE
647
648	/* Only use the output address bits */
649	lsr	x9, x9, #PAGE_SHIFT
650	orr	x13, x12, x9, lsl #PAGE_SHIFT
651
652	/* Store the entry */
653	str	x13, [x6, x11, lsl #3]
654
655	ret
656LEND(link_l1_pagetable)
657
658/*
659 * Builds count 2 MiB page table entry
660 *  x6  = L2 table
661 *  x7  = Block attributes
662 *  x8  = VA start
663 *  x9  = PA start (trashed)
664 *  x10 = Entry count (trashed)
665 *  x11, x12 and x13 are trashed
666 */
667LENTRY(build_l2_block_pagetable)
668	/*
669	 * Build the L2 table entry.
670	 */
671	/* Find the table index */
672	lsr	x11, x8, #L2_SHIFT
673	and	x11, x11, #Ln_ADDR_MASK
674
675	/* Build the L2 block entry */
676	orr	x12, x7, #L2_BLOCK
677	orr	x12, x12, #(ATTR_DEFAULT)
678	orr	x12, x12, #(ATTR_S1_UXN)
679#ifdef __ARM_FEATURE_BTI_DEFAULT
680	orr	x12, x12, #(ATTR_S1_GP)
681#endif
682
683	/* Only use the output address bits */
684	lsr	x9, x9, #L2_SHIFT
685
686	/* Set the physical address for this virtual address */
6871:	orr	x13, x12, x9, lsl #L2_SHIFT
688
689	/* Store the entry */
690	str	x13, [x6, x11, lsl #3]
691
692	sub	x10, x10, #1
693	add	x11, x11, #1
694	add	x9, x9, #1
695	cbnz	x10, 1b
696
697	ret
698LEND(build_l2_block_pagetable)
699
700#if PAGE_SIZE != PAGE_SIZE_4K
701/*
702 * Builds an L2 -> L3 table descriptor
703 *
704 *  x6  = L2 table
705 *  x8  = Virtual Address
706 *  x9  = L3 PA (trashed)
707 *  x11, x12 and x13 are trashed
708 */
709LENTRY(link_l2_pagetable)
710	/*
711	 * Link an L2 -> L3 table entry.
712	 */
713	/* Find the table index */
714	lsr	x11, x8, #L2_SHIFT
715	and	x11, x11, #Ln_ADDR_MASK
716
717	/* Build the L1 block entry */
718	mov	x12, #L2_TABLE
719
720	/* Only use the output address bits */
721	lsr	x9, x9, #PAGE_SHIFT
722	orr	x13, x12, x9, lsl #PAGE_SHIFT
723
724	/* Store the entry */
725	str	x13, [x6, x11, lsl #3]
726
727	ret
728LEND(link_l2_pagetable)
729
730/*
731 * Builds count level 3 page table entries
732 *  x6  = L3 table
733 *  x7  = Block attributes
734 *  x8  = VA start
735 *  x9  = PA start (trashed)
736 *  x10 = Entry count (trashed)
737 *  x11, x12 and x13 are trashed
738 */
739LENTRY(build_l3_page_pagetable)
740	/*
741	 * Build the L3 table entry.
742	 */
743	/* Find the table index */
744	lsr	x11, x8, #L3_SHIFT
745	and	x11, x11, #Ln_ADDR_MASK
746
747	/* Build the L3 page entry */
748	orr	x12, x7, #L3_PAGE
749	orr	x12, x12, #(ATTR_DEFAULT)
750	orr	x12, x12, #(ATTR_S1_UXN)
751#ifdef __ARM_FEATURE_BTI_DEFAULT
752	orr	x12, x12, #(ATTR_S1_GP)
753#endif
754
755	/* Only use the output address bits */
756	lsr	x9, x9, #L3_SHIFT
757
758	/* Set the physical address for this virtual address */
7591:	orr	x13, x12, x9, lsl #L3_SHIFT
760
761	/* Store the entry */
762	str	x13, [x6, x11, lsl #3]
763
764	sub	x10, x10, #1
765	add	x11, x11, #1
766	add	x9, x9, #1
767	cbnz	x10, 1b
768
769	ret
770LEND(build_l3_page_pagetable)
771#endif
772
773LENTRY(start_mmu)
774	dsb	sy
775
776	/* Load the exception vectors */
777	ldr	x2, =exception_vectors
778	msr	vbar_el1, x2
779
780	/* Load ttbr0 and ttbr1 */
781	msr	ttbr0_el1, x27
782	msr	ttbr1_el1, x24
783	isb
784
785	/* Clear the Monitor Debug System control register */
786	msr	mdscr_el1, xzr
787
788	/* Invalidate the TLB */
789	tlbi	vmalle1is
790	dsb	ish
791	isb
792
793	ldr	x2, mair
794	msr	mair_el1, x2
795
796	/*
797	 * Setup TCR according to the PARange and ASIDBits fields
798	 * from ID_AA64MMFR0_EL1 and the HAFDBS field from the
799	 * ID_AA64MMFR1_EL1.  More precisely, set TCR_EL1.AS
800	 * to 1 only if the ASIDBits field equals 0b0010.
801	 */
802	ldr	x2, tcr
803	mrs	x3, id_aa64mmfr0_el1
804
805	/* Copy the bottom 3 bits from id_aa64mmfr0_el1 into TCR.IPS */
806	bfi	x2, x3, #(TCR_IPS_SHIFT), #(TCR_IPS_WIDTH)
807	and	x3, x3, #(ID_AA64MMFR0_ASIDBits_MASK)
808
809	/* Check if the HW supports 16 bit ASIDS */
810	cmp	x3, #(ID_AA64MMFR0_ASIDBits_16)
811	/* If so x3 == 1, else x3 == 0 */
812	cset	x3, eq
813	/* Set TCR.AS with x3 */
814	bfi	x2, x3, #(TCR_ASID_SHIFT), #(TCR_ASID_WIDTH)
815
816	/*
817	 * Check if the HW supports access flag and dirty state updates,
818	 * and set TCR_EL1.HA and TCR_EL1.HD accordingly.
819	 */
820	mrs	x3, id_aa64mmfr1_el1
821	and	x3, x3, #(ID_AA64MMFR1_HAFDBS_MASK)
822	cmp	x3, #1
823	b.ne	1f
824	orr 	x2, x2, #(TCR_HA)
825	b	2f
8261:
827	cmp	x3, #2
828	b.ne	2f
829	orr 	x2, x2, #(TCR_HA | TCR_HD)
8302:
831	msr	tcr_el1, x2
832
833	/*
834	 * Setup SCTLR.
835	 */
836	ldr	x2, sctlr_set
837	ldr	x3, sctlr_clear
838	mrs	x1, sctlr_el1
839	bic	x1, x1, x3	/* Clear the required bits */
840	orr	x1, x1, x2	/* Set the required bits */
841	msr	sctlr_el1, x1
842	isb
843
844	ret
845
846	.align 3
847mair:
848	.quad	MAIR_ATTR(MAIR_DEVICE_nGnRnE, VM_MEMATTR_DEVICE_nGnRnE) | \
849		MAIR_ATTR(MAIR_NORMAL_NC, VM_MEMATTR_UNCACHEABLE)   |	\
850		MAIR_ATTR(MAIR_NORMAL_WB, VM_MEMATTR_WRITE_BACK)    |	\
851		MAIR_ATTR(MAIR_NORMAL_WT, VM_MEMATTR_WRITE_THROUGH) |	\
852		MAIR_ATTR(MAIR_DEVICE_nGnRE, VM_MEMATTR_DEVICE_nGnRE)
853tcr:
854#if PAGE_SIZE == PAGE_SIZE_4K
855#define	TCR_TG	(TCR_TG1_4K | TCR_TG0_4K)
856#elif PAGE_SIZE == PAGE_SIZE_16K
857#define	TCR_TG	(TCR_TG1_16K | TCR_TG0_16K)
858#else
859#error Unsupported page size
860#endif
861
862	.quad (TCR_TxSZ(64 - VIRT_BITS) | TCR_TG | \
863	    TCR_CACHE_ATTRS | TCR_SMP_ATTRS)
864sctlr_set:
865	/* Bits to set */
866	.quad (SCTLR_LSMAOE | SCTLR_nTLSMD | SCTLR_UCI | SCTLR_SPAN | \
867	    SCTLR_nTWE | SCTLR_nTWI | SCTLR_UCT | SCTLR_DZE | \
868	    SCTLR_I | SCTLR_SED | SCTLR_SA0 | SCTLR_SA | SCTLR_C | \
869	    SCTLR_M | SCTLR_CP15BEN | SCTLR_BT1 | SCTLR_BT0)
870sctlr_clear:
871	/* Bits to clear */
872	.quad (SCTLR_EE | SCTLR_E0E | SCTLR_IESB | SCTLR_WXN | SCTLR_UMA | \
873	    SCTLR_ITD | SCTLR_A)
874LEND(start_mmu)
875
876ENTRY(abort)
877	b abort
878END(abort)
879
880.bss
881	.align	PAGE_SHIFT
882initstack:
883	.space	(PAGE_SIZE * KSTACK_PAGES)
884initstack_end:
885
886	.section .init_pagetable, "aw", %nobits
887	.align PAGE_SHIFT
888	/*
889	 * 6 initial tables (in the following order):
890	 *           L2 for kernel (High addresses)
891	 *           L1 for kernel
892	 *           L0 for kernel
893	 *           L1 bootstrap for user   (Low addresses)
894	 *           L0 bootstrap for user
895	 *           L0 for user
896	 */
897	.globl pagetable_l0_ttbr1
898pagetable:
899#if PAGE_SIZE != PAGE_SIZE_4K
900	.space	(PAGE_SIZE * L3_PAGE_COUNT)
901pagetable_l2_ttbr1:
902#endif
903	.space	PAGE_SIZE
904pagetable_l1_ttbr1:
905	.space	PAGE_SIZE
906pagetable_l0_ttbr1:
907	.space	PAGE_SIZE
908pagetable_l2_ttbr0_bootstrap:
909	.space	PAGE_SIZE
910pagetable_l1_ttbr0_bootstrap:
911	.space	PAGE_SIZE
912pagetable_l0_ttbr0_boostrap:
913	.space	PAGE_SIZE
914pagetable_l0_ttbr0:
915	.space	PAGE_SIZE
916pagetable_end:
917
918el2_pagetable:
919	.space	PAGE_SIZE
920
921	.section .rodata, "a", %progbits
922	.globl	aarch32_sigcode
923	.align 2
924aarch32_sigcode:
925	.word 0xe1a0000d	// mov r0, sp
926	.word 0xe2800040	// add r0, r0, #SIGF_UC
927	.word 0xe59f700c	// ldr r7, [pc, #12]
928	.word 0xef000000	// swi #0
929	.word 0xe59f7008	// ldr r7, [pc, #8]
930	.word 0xef000000	// swi #0
931	.word 0xeafffffa	// b . - 16
932	.word SYS_sigreturn
933	.word SYS_exit
934	.align	3
935	.size aarch32_sigcode, . - aarch32_sigcode
936aarch32_esigcode:
937	.data
938	.global sz_aarch32_sigcode
939sz_aarch32_sigcode:
940	.quad aarch32_esigcode - aarch32_sigcode
941