xref: /freebsd/sys/arm64/arm64/locore.S (revision 78adacd4eab39a3508bd8c65f0aba94fc6b907ce)
1/*-
2 * Copyright (c) 2012-2014 Andrew Turner
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29#include "assym.inc"
30#include "opt_kstack_pages.h"
31#include <sys/syscall.h>
32#include <machine/asm.h>
33#include <machine/armreg.h>
34#include <machine/hypervisor.h>
35#include <machine/param.h>
36#include <machine/pte.h>
37#include <machine/vm.h>
38#include <machine/vmparam.h>
39
40#define	VIRT_BITS	48
41#define	DMAP_TABLES	((DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS) >> L0_SHIFT)
42
43	.globl	kernbase
44	.set	kernbase, KERNBASE
45
46
47/* U-Boot booti related constants. */
48#if defined(LINUX_BOOT_ABI)
49#ifndef UBOOT_IMAGE_OFFSET
50#define	UBOOT_IMAGE_OFFSET	0		/* Image offset from start of */
51#endif						/*  2 MiB page */
52
53#ifndef UBOOT_IMAGE_SIZE			/* Total size of image */
54#define	UBOOT_IMAGE_SIZE	 _end - _start
55#endif
56
57#ifndef UBOOT_IMAGE_FLAGS
58#define	UBOOT_IMAGE_FLAGS	0		/* LE kernel, unspecified */
59#endif						/*  page size */
60#endif /* defined(LINUX_BOOT_ABI) */
61
62/*
63 * We assume:
64 *  MMU      on with an identity map, or off
65 *  D-Cache: off
66 *  I-Cache: on or off
67 *  We are loaded at a 2MiB aligned address
68 */
69
70	.text
71	.globl _start
72_start:
73#if defined(LINUX_BOOT_ABI)
74	/* U-boot image header */
75	b	1f			/* code 0 */
76	.long	0			/* code 1 */
77	.quad	UBOOT_IMAGE_OFFSET	/* Image offset in 2 MiB page, LE */
78	.quad	UBOOT_IMAGE_SIZE	/* Image size, LE */
79	.quad	UBOOT_IMAGE_FLAGS	/* Flags for kernel. LE */
80	.quad 	0			/* Reserved */
81	.quad 	0			/* Reserved */
82	.quad 	0			/* Reserved */
83	.long 	0x644d5241		/* Magic  "ARM\x64", LE */
84	.long 	0			/* Reserved for PE COFF offset*/
851:
86#endif /* defined(LINUX_BOOT_ABI) */
87
88	/* Drop to EL1 */
89	bl	drop_to_el1
90
91	/*
92	 * Disable the MMU. We may have entered the kernel with it on and
93	 * will need to update the tables later. If this has been set up
94	 * with anything other than a VA == PA map then this will fail,
95	 * but in this case the code to find where we are running from
96	 * would have also failed.
97	 */
98	dsb	sy
99	mrs	x2, sctlr_el1
100	bic	x2, x2, SCTLR_M
101	msr	sctlr_el1, x2
102	isb
103
104	/* Set the context id */
105	msr	contextidr_el1, xzr
106
107	/* Get the virt -> phys offset */
108	bl	get_virt_delta
109
110	/*
111	 * At this point:
112	 * x29 = PA - VA
113	 * x28 = Our physical load address
114	 */
115
116	/* Create the page tables */
117	bl	create_pagetables
118
119	/*
120	 * At this point:
121	 * x27 = TTBR0 table
122	 * x26 = Kernel L1 table
123	 * x24 = TTBR1 table
124	 */
125
126	/* Enable the mmu */
127	bl	start_mmu
128
129	/* Load the new ttbr0 pagetable */
130	adr	x27, pagetable_l0_ttbr0
131
132	/* Jump to the virtual address space */
133	ldr	x15, .Lvirtdone
134	br	x15
135
136virtdone:
137	/* Set up the stack */
138	adr	x25, initstack_end
139	mov	sp, x25
140	sub	sp, sp, #PCB_SIZE
141
142	/* Zero the BSS */
143	ldr	x15, .Lbss
144	ldr	x14, .Lend
1451:
146	str	xzr, [x15], #8
147	cmp	x15, x14
148	b.lo	1b
149
150	/* Backup the module pointer */
151	mov	x1, x0
152
153	/* Make the page table base a virtual address */
154	sub	x26, x26, x29
155	sub	x24, x24, x29
156
157	sub	sp, sp, #BOOTPARAMS_SIZE
158	mov	x0, sp
159
160	/* Degate the delda so it is VA -> PA */
161	neg	x29, x29
162
163	str	x1,  [x0, #BP_MODULEP]
164	str	x26, [x0, #BP_KERN_L1PT]
165	str	x29, [x0, #BP_KERN_DELTA]
166	adr	x25, initstack
167	str	x25, [x0, #BP_KERN_STACK]
168	str	x24, [x0, #BP_KERN_L0PT]
169	str	x27, [x0, #BP_KERN_TTBR0]
170	str	x23, [x0, #BP_BOOT_EL]
171
172	/* trace back starts here */
173	mov	fp, #0
174	/* Branch to C code */
175	bl	initarm
176	/* We are done with the boot params */
177	add	sp, sp, #BOOTPARAMS_SIZE
178	bl	mi_startup
179
180	/* We should not get here */
181	brk	0
182
183	.align 3
184.Lvirtdone:
185	.quad	virtdone
186.Lbss:
187	.quad	__bss_start
188.Lend:
189	.quad	_end
190
191#ifdef SMP
192/*
193 * mpentry(unsigned long)
194 *
195 * Called by a core when it is being brought online.
196 * The data in x0 is passed straight to init_secondary.
197 */
198ENTRY(mpentry)
199	/* Disable interrupts */
200	msr	daifset, #2
201
202	/* Drop to EL1 */
203	bl	drop_to_el1
204
205	/* Set the context id */
206	msr	contextidr_el1, xzr
207
208	/* Load the kernel page table */
209	adr	x24, pagetable_l0_ttbr1
210	/* Load the identity page table */
211	adr	x27, pagetable_l0_ttbr0_boostrap
212
213	/* Enable the mmu */
214	bl	start_mmu
215
216	/* Load the new ttbr0 pagetable */
217	adr	x27, pagetable_l0_ttbr0
218
219	/* Jump to the virtual address space */
220	ldr	x15, =mp_virtdone
221	br	x15
222
223mp_virtdone:
224	/* Start using the AP boot stack */
225	ldr	x4, =bootstack
226	ldr	x4, [x4]
227	mov	sp, x4
228
229	/* Load the kernel ttbr0 pagetable */
230	msr	ttbr0_el1, x27
231	isb
232
233	/* Invalidate the TLB */
234	tlbi	vmalle1
235	dsb	sy
236	isb
237
238	b	init_secondary
239END(mpentry)
240#endif
241
242/*
243 * If we are started in EL2, configure the required hypervisor
244 * registers and drop to EL1.
245 */
246drop_to_el1:
247	mrs	x23, CurrentEL
248	lsr	x23, x23, #2
249	cmp	x23, #0x2
250	b.eq	1f
251	ret
2521:
253	/* Configure the Hypervisor */
254	mov	x2, #(HCR_RW)
255	msr	hcr_el2, x2
256
257	/* Load the Virtualization Process ID Register */
258	mrs	x2, midr_el1
259	msr	vpidr_el2, x2
260
261	/* Load the Virtualization Multiprocess ID Register */
262	mrs	x2, mpidr_el1
263	msr	vmpidr_el2, x2
264
265	/* Set the bits that need to be 1 in sctlr_el1 */
266	ldr	x2, .Lsctlr_res1
267	msr	sctlr_el1, x2
268
269	/* Don't trap to EL2 for exceptions */
270	mov	x2, #CPTR_RES1
271	msr	cptr_el2, x2
272
273	/* Don't trap to EL2 for CP15 traps */
274	msr	hstr_el2, xzr
275
276	/* Enable access to the physical timers at EL1 */
277	mrs	x2, cnthctl_el2
278	orr	x2, x2, #(CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN)
279	msr	cnthctl_el2, x2
280
281	/* Set the counter offset to a known value */
282	msr	cntvoff_el2, xzr
283
284	/* Hypervisor trap functions */
285	adr	x2, hyp_vectors
286	msr	vbar_el2, x2
287
288	mov	x2, #(PSR_F | PSR_I | PSR_A | PSR_D | PSR_M_EL1h)
289	msr	spsr_el2, x2
290
291	/* Configure GICv3 CPU interface */
292	mrs	x2, id_aa64pfr0_el1
293	/* Extract GIC bits from the register */
294	ubfx	x2, x2, #ID_AA64PFR0_GIC_SHIFT, #ID_AA64PFR0_GIC_BITS
295	/* GIC[3:0] == 0001 - GIC CPU interface via special regs. supported */
296	cmp	x2, #(ID_AA64PFR0_GIC_CPUIF_EN >> ID_AA64PFR0_GIC_SHIFT)
297	b.ne	2f
298
299	mrs	x2, icc_sre_el2
300	orr	x2, x2, #ICC_SRE_EL2_EN	/* Enable access from insecure EL1 */
301	orr	x2, x2, #ICC_SRE_EL2_SRE	/* Enable system registers */
302	msr	icc_sre_el2, x2
3032:
304
305	/* Set the address to return to our return address */
306	msr	elr_el2, x30
307	isb
308
309	eret
310
311	.align 3
312.Lsctlr_res1:
313	.quad SCTLR_RES1
314
315#define	VECT_EMPTY	\
316	.align 7;	\
317	1:	b	1b
318
319	.align 11
320hyp_vectors:
321	VECT_EMPTY	/* Synchronous EL2t */
322	VECT_EMPTY	/* IRQ EL2t */
323	VECT_EMPTY	/* FIQ EL2t */
324	VECT_EMPTY	/* Error EL2t */
325
326	VECT_EMPTY	/* Synchronous EL2h */
327	VECT_EMPTY	/* IRQ EL2h */
328	VECT_EMPTY	/* FIQ EL2h */
329	VECT_EMPTY	/* Error EL2h */
330
331	VECT_EMPTY	/* Synchronous 64-bit EL1 */
332	VECT_EMPTY	/* IRQ 64-bit EL1 */
333	VECT_EMPTY	/* FIQ 64-bit EL1 */
334	VECT_EMPTY	/* Error 64-bit EL1 */
335
336	VECT_EMPTY	/* Synchronous 32-bit EL1 */
337	VECT_EMPTY	/* IRQ 32-bit EL1 */
338	VECT_EMPTY	/* FIQ 32-bit EL1 */
339	VECT_EMPTY	/* Error 32-bit EL1 */
340
341/*
342 * Get the delta between the physical address we were loaded to and the
343 * virtual address we expect to run from. This is used when building the
344 * initial page table.
345 */
346get_virt_delta:
347	/* Load the physical address of virt_map */
348	adr	x29, virt_map
349	/* Load the virtual address of virt_map stored in virt_map */
350	ldr	x28, [x29]
351	/* Find PA - VA as PA' = VA' - VA + PA = VA' + (PA - VA) = VA' + x29 */
352	sub	x29, x29, x28
353	/* Find the load address for the kernel */
354	mov	x28, #(KERNBASE)
355	add	x28, x28, x29
356	ret
357
358	.align 3
359virt_map:
360	.quad	virt_map
361
362/*
363 * This builds the page tables containing the identity map, and the kernel
364 * virtual map.
365 *
366 * It relys on:
367 *  We were loaded to an address that is on a 2MiB boundary
368 *  All the memory must not cross a 1GiB boundaty
369 *  x28 contains the physical address we were loaded from
370 *
371 * TODO: This is out of date.
372 *  There are at least 5 pages before that address for the page tables
373 *   The pages used are:
374 *    - The Kernel L2 table
375 *    - The Kernel L1 table
376 *    - The Kernel L0 table             (TTBR1)
377 *    - The identity (PA = VA) L1 table
378 *    - The identity (PA = VA) L0 table (TTBR0)
379 *    - The DMAP L1 tables
380 */
381create_pagetables:
382	/* Save the Link register */
383	mov	x5, x30
384
385	/* Clean the page table */
386	adr	x6, pagetable
387	mov	x26, x6
388	adr	x27, pagetable_end
3891:
390	stp	xzr, xzr, [x6], #16
391	stp	xzr, xzr, [x6], #16
392	stp	xzr, xzr, [x6], #16
393	stp	xzr, xzr, [x6], #16
394	cmp	x6, x27
395	b.lo	1b
396
397	/*
398	 * Build the TTBR1 maps.
399	 */
400
401	/* Find the size of the kernel */
402	mov	x6, #(KERNBASE)
403
404#if defined(LINUX_BOOT_ABI)
405	/* X19 is used as 'map FDT data' flag */
406	mov	x19, xzr
407
408	/* No modules or FDT pointer ? */
409	cbz	x0, booti_no_fdt
410
411	/*
412	 * Test if x0 points to modules descriptor(virtual address) or
413	 * to FDT (physical address)
414	 */
415	cmp	x0, x6		/* x6 is #(KERNBASE) */
416	b.lo	booti_fdt
417#endif
418
419	/* Booted with modules pointer */
420	/* Find modulep - begin */
421	sub	x8, x0, x6
422	/* Add two 2MiB pages for the module data and round up */
423	ldr	x7, =(3 * L2_SIZE - 1)
424	add	x8, x8, x7
425	b	common
426
427#if defined(LINUX_BOOT_ABI)
428booti_fdt:
429	/* Booted by U-Boot booti with FDT data */
430	/* Set 'map FDT data' flag */
431	mov	x19, #1
432
433booti_no_fdt:
434	/* Booted by U-Boot booti without FTD data */
435	/* Find the end - begin */
436	ldr     x7, .Lend
437	sub     x8, x7, x6
438
439	/*
440	 * Add one 2MiB page for copy of FDT data (maximum FDT size),
441	 * one for metadata and round up
442	 */
443	ldr	x7, =(3 * L2_SIZE - 1)
444	add	x8, x8, x7
445#endif
446
447common:
448	/* Get the number of l2 pages to allocate, rounded down */
449	lsr	x10, x8, #(L2_SHIFT)
450
451	/* Create the kernel space L2 table */
452	mov	x6, x26
453	mov	x7, #VM_MEMATTR_WRITE_BACK
454	mov	x8, #(KERNBASE & L2_BLOCK_MASK)
455	mov	x9, x28
456	bl	build_l2_block_pagetable
457
458	/* Move to the l1 table */
459	add	x26, x26, #PAGE_SIZE
460
461	/* Link the l1 -> l2 table */
462	mov	x9, x6
463	mov	x6, x26
464	bl	link_l1_pagetable
465
466	/* Move to the l0 table */
467	add	x24, x26, #PAGE_SIZE
468
469	/* Link the l0 -> l1 table */
470	mov	x9, x6
471	mov	x6, x24
472	mov	x10, #1
473	bl	link_l0_pagetable
474
475	/* Link the DMAP tables */
476	ldr	x8, =DMAP_MIN_ADDRESS
477	adr	x9, pagetable_dmap;
478	mov	x10, #DMAP_TABLES
479	bl	link_l0_pagetable
480
481	/*
482	 * Build the TTBR0 maps.  As TTBR0 maps, they must specify ATTR_S1_nG.
483	 * They are only needed early on, so the VA = PA map is uncached.
484	 */
485	add	x27, x24, #PAGE_SIZE
486
487	mov	x6, x27		/* The initial page table */
488#if defined(SOCDEV_PA) && defined(SOCDEV_VA)
489	/* Create a table for the UART */
490	mov	x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_DEVICE))
491	mov	x8, #(SOCDEV_VA)	/* VA start */
492	mov	x9, #(SOCDEV_PA)	/* PA start */
493	mov	x10, #1
494	bl	build_l1_block_pagetable
495#endif
496
497#if defined(LINUX_BOOT_ABI)
498	/* Map FDT data ? */
499	cbz	x19, 1f
500
501	/* Create the identity mapping for FDT data (2 MiB max) */
502	mov	x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK))
503	mov	x9, x0
504	mov	x8, x0		/* VA start (== PA start) */
505	mov	x10, #1
506	bl	build_l1_block_pagetable
507
5081:
509#endif
510
511	/* Create the VA = PA map */
512	mov	x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK))
513	mov	x9, x27
514	mov	x8, x9		/* VA start (== PA start) */
515	mov	x10, #1
516	bl	build_l1_block_pagetable
517
518	/* Move to the l0 table */
519	add	x27, x27, #PAGE_SIZE
520
521	/* Link the l0 -> l1 table */
522	mov	x9, x6
523	mov	x6, x27
524	mov	x10, #1
525	bl	link_l0_pagetable
526
527	/* Restore the Link register */
528	mov	x30, x5
529	ret
530
531/*
532 * Builds an L0 -> L1 table descriptor
533 *
534 * This is a link for a 512GiB block of memory with up to 1GiB regions mapped
535 * within it by build_l1_block_pagetable.
536 *
537 *  x6  = L0 table
538 *  x8  = Virtual Address
539 *  x9  = L1 PA (trashed)
540 *  x10 = Entry count
541 *  x11, x12 and x13 are trashed
542 */
543link_l0_pagetable:
544	/*
545	 * Link an L0 -> L1 table entry.
546	 */
547	/* Find the table index */
548	lsr	x11, x8, #L0_SHIFT
549	and	x11, x11, #L0_ADDR_MASK
550
551	/* Build the L0 block entry */
552	mov	x12, #L0_TABLE
553
554	/* Only use the output address bits */
555	lsr	x9, x9, #PAGE_SHIFT
5561:	orr	x13, x12, x9, lsl #PAGE_SHIFT
557
558	/* Store the entry */
559	str	x13, [x6, x11, lsl #3]
560
561	sub	x10, x10, #1
562	add	x11, x11, #1
563	add	x9, x9, #1
564	cbnz	x10, 1b
565
566	ret
567
568/*
569 * Builds an L1 -> L2 table descriptor
570 *
571 * This is a link for a 1GiB block of memory with up to 2MiB regions mapped
572 * within it by build_l2_block_pagetable.
573 *
574 *  x6  = L1 table
575 *  x8  = Virtual Address
576 *  x9  = L2 PA (trashed)
577 *  x11, x12 and x13 are trashed
578 */
579link_l1_pagetable:
580	/*
581	 * Link an L1 -> L2 table entry.
582	 */
583	/* Find the table index */
584	lsr	x11, x8, #L1_SHIFT
585	and	x11, x11, #Ln_ADDR_MASK
586
587	/* Build the L1 block entry */
588	mov	x12, #L1_TABLE
589
590	/* Only use the output address bits */
591	lsr	x9, x9, #PAGE_SHIFT
592	orr	x13, x12, x9, lsl #PAGE_SHIFT
593
594	/* Store the entry */
595	str	x13, [x6, x11, lsl #3]
596
597	ret
598
599/*
600 * Builds count 1 GiB page table entry
601 *  x6  = L1 table
602 *  x7  = Variable lower block attributes
603 *  x8  = VA start
604 *  x9  = PA start (trashed)
605 *  x10 = Entry count
606 *  x11, x12 and x13 are trashed
607 */
608build_l1_block_pagetable:
609	/*
610	 * Build the L1 table entry.
611	 */
612	/* Find the table index */
613	lsr	x11, x8, #L1_SHIFT
614	and	x11, x11, #Ln_ADDR_MASK
615
616	/* Build the L1 block entry */
617	orr	x12, x7, #L1_BLOCK
618	orr	x12, x12, #(ATTR_DEFAULT)
619
620	/* Only use the output address bits */
621	lsr	x9, x9, #L1_SHIFT
622
623	/* Set the physical address for this virtual address */
6241:	orr	x13, x12, x9, lsl #L1_SHIFT
625
626	/* Store the entry */
627	str	x13, [x6, x11, lsl #3]
628
629	sub	x10, x10, #1
630	add	x11, x11, #1
631	add	x9, x9, #1
632	cbnz	x10, 1b
633
634	ret
635
636/*
637 * Builds count 2 MiB page table entry
638 *  x6  = L2 table
639 *  x7  = Type (0 = Device, 1 = Normal)
640 *  x8  = VA start
641 *  x9  = PA start (trashed)
642 *  x10 = Entry count
643 *  x11, x12 and x13 are trashed
644 */
645build_l2_block_pagetable:
646	/*
647	 * Build the L2 table entry.
648	 */
649	/* Find the table index */
650	lsr	x11, x8, #L2_SHIFT
651	and	x11, x11, #Ln_ADDR_MASK
652
653	/* Build the L2 block entry */
654	lsl	x12, x7, #2
655	orr	x12, x12, #L2_BLOCK
656	orr	x12, x12, #(ATTR_DEFAULT)
657	orr	x12, x12, #(ATTR_S1_UXN)
658
659	/* Only use the output address bits */
660	lsr	x9, x9, #L2_SHIFT
661
662	/* Set the physical address for this virtual address */
6631:	orr	x13, x12, x9, lsl #L2_SHIFT
664
665	/* Store the entry */
666	str	x13, [x6, x11, lsl #3]
667
668	sub	x10, x10, #1
669	add	x11, x11, #1
670	add	x9, x9, #1
671	cbnz	x10, 1b
672
673	ret
674
675start_mmu:
676	dsb	sy
677
678	/* Load the exception vectors */
679	ldr	x2, =exception_vectors
680	msr	vbar_el1, x2
681
682	/* Load ttbr0 and ttbr1 */
683	msr	ttbr0_el1, x27
684	msr	ttbr1_el1, x24
685	isb
686
687	/* Clear the Monitor Debug System control register */
688	msr	mdscr_el1, xzr
689
690	/* Invalidate the TLB */
691	tlbi	vmalle1is
692	dsb	ish
693	isb
694
695	ldr	x2, mair
696	msr	mair_el1, x2
697
698	/*
699	 * Setup TCR according to the PARange and ASIDBits fields
700	 * from ID_AA64MMFR0_EL1 and the HAFDBS field from the
701	 * ID_AA64MMFR1_EL1.  More precisely, set TCR_EL1.AS
702	 * to 1 only if the ASIDBits field equals 0b0010.
703	 */
704	ldr	x2, tcr
705	mrs	x3, id_aa64mmfr0_el1
706
707	/* Copy the bottom 3 bits from id_aa64mmfr0_el1 into TCR.IPS */
708	bfi	x2, x3, #(TCR_IPS_SHIFT), #(TCR_IPS_WIDTH)
709	and	x3, x3, #(ID_AA64MMFR0_ASIDBits_MASK)
710
711	/* Check if the HW supports 16 bit ASIDS */
712	cmp	x3, #(ID_AA64MMFR0_ASIDBits_16)
713	/* If so x3 == 1, else x3 == 0 */
714	cset	x3, eq
715	/* Set TCR.AS with x3 */
716	bfi	x2, x3, #(TCR_ASID_SHIFT), #(TCR_ASID_WIDTH)
717
718	/*
719	 * Check if the HW supports access flag and dirty state updates,
720	 * and set TCR_EL1.HA and TCR_EL1.HD accordingly.
721	 */
722	mrs	x3, id_aa64mmfr1_el1
723	and	x3, x3, #(ID_AA64MMFR1_HAFDBS_MASK)
724	cmp	x3, #1
725	b.ne	1f
726	orr 	x2, x2, #(TCR_HA)
727	b	2f
7281:
729	cmp	x3, #2
730	b.ne	2f
731	orr 	x2, x2, #(TCR_HA | TCR_HD)
7322:
733	msr	tcr_el1, x2
734
735	/*
736	 * Setup SCTLR.
737	 */
738	ldr	x2, sctlr_set
739	ldr	x3, sctlr_clear
740	mrs	x1, sctlr_el1
741	bic	x1, x1, x3	/* Clear the required bits */
742	orr	x1, x1, x2	/* Set the required bits */
743	msr	sctlr_el1, x1
744	isb
745
746	ret
747
748	.align 3
749mair:
750	.quad	MAIR_ATTR(MAIR_DEVICE_nGnRnE, VM_MEMATTR_DEVICE)    |	\
751		MAIR_ATTR(MAIR_NORMAL_NC, VM_MEMATTR_UNCACHEABLE)   |	\
752		MAIR_ATTR(MAIR_NORMAL_WB, VM_MEMATTR_WRITE_BACK)    |	\
753		MAIR_ATTR(MAIR_NORMAL_WT, VM_MEMATTR_WRITE_THROUGH)
754tcr:
755	.quad (TCR_TxSZ(64 - VIRT_BITS) | TCR_TG1_4K | \
756	    TCR_CACHE_ATTRS | TCR_SMP_ATTRS)
757sctlr_set:
758	/* Bits to set */
759	.quad (SCTLR_LSMAOE | SCTLR_nTLSMD | SCTLR_UCI | SCTLR_SPAN | \
760	    SCTLR_nTWE | SCTLR_nTWI | SCTLR_UCT | SCTLR_DZE | \
761	    SCTLR_I | SCTLR_SED | SCTLR_SA0 | SCTLR_SA | SCTLR_C | \
762	    SCTLR_M | SCTLR_CP15BEN)
763sctlr_clear:
764	/* Bits to clear */
765	.quad (SCTLR_EE | SCTLR_EOE | SCTLR_IESB | SCTLR_WXN | SCTLR_UMA | \
766	    SCTLR_ITD | SCTLR_A)
767
768	.globl abort
769abort:
770	b abort
771
772	//.section .init_pagetable
773	.align 12 /* 4KiB aligned */
774	/*
775	 * 6 initial tables (in the following order):
776	 *           L2 for kernel (High addresses)
777	 *           L1 for kernel
778	 *           L0 for kernel
779	 *           L1 bootstrap for user   (Low addresses)
780	 *           L0 bootstrap for user
781	 *           L0 for user
782	 */
783pagetable:
784	.space	PAGE_SIZE
785pagetable_l1_ttbr1:
786	.space	PAGE_SIZE
787pagetable_l0_ttbr1:
788	.space	PAGE_SIZE
789pagetable_l1_ttbr0_bootstrap:
790	.space	PAGE_SIZE
791pagetable_l0_ttbr0_boostrap:
792	.space	PAGE_SIZE
793pagetable_l0_ttbr0:
794	.space	PAGE_SIZE
795
796	.globl pagetable_dmap
797pagetable_dmap:
798	.space	PAGE_SIZE * DMAP_TABLES
799pagetable_end:
800
801el2_pagetable:
802	.space	PAGE_SIZE
803
804	.globl init_pt_va
805init_pt_va:
806	.quad pagetable		/* XXX: Keep page tables VA */
807
808	.align	4
809initstack:
810	.space	(PAGE_SIZE * KSTACK_PAGES)
811initstack_end:
812
813
814ENTRY(sigcode)
815	mov	x0, sp
816	add	x0, x0, #SF_UC
817
8181:
819	mov	x8, #SYS_sigreturn
820	svc	0
821
822	/* sigreturn failed, exit */
823	mov	x8, #SYS_exit
824	svc	0
825
826	b	1b
827END(sigcode)
828	/* This may be copied to the stack, keep it 16-byte aligned */
829	.align	3
830esigcode:
831
832	.data
833	.align	3
834	.global	szsigcode
835szsigcode:
836	.quad	esigcode - sigcode
837
838ENTRY(aarch32_sigcode)
839	.word 0xe1a0000d	// mov r0, sp
840	.word 0xe2800040	// add r0, r0, #SIGF_UC
841	.word 0xe59f700c	// ldr r7, [pc, #12]
842	.word 0xef000000	// swi #0
843	.word 0xe59f7008	// ldr r7, [pc, #8]
844	.word 0xef000000	// swi #0
845	.word 0xeafffffa	// b . - 16
846END(aarch32_sigcode)
847	.word SYS_sigreturn
848	.word SYS_exit
849	.align	3
850aarch32_esigcode:
851	.data
852	.global sz_aarch32_sigcode
853sz_aarch32_sigcode:
854	.quad aarch32_esigcode - aarch32_sigcode
855