xref: /freebsd/sys/riscv/riscv/locore.S (revision e9fa39918022746f56bf879d92a990f2836d659a)
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2015-2018 Ruslan Bukin <br@bsdpad.com>
5 * All rights reserved.
6 * Copyright (c) 2019-2021 Mitchell Horne <mhorne@FreeBSD.org>
7 * Copyright (c) 2022-2024 The FreeBSD Foundation
8 *
9 * Portions of this software were developed by SRI International and the
10 * University of Cambridge Computer Laboratory under DARPA/AFRL contract
11 * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme.
12 *
13 * Portions of this software were developed by the University of Cambridge
14 * Computer Laboratory as part of the CTSRD Project, with support from the
15 * UK Higher Education Innovation Fund (HEIF).
16 *
17 * Portions of this software were developed by Mitchell Horne
18 * <mhorne@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 * 1. Redistributions of source code must retain the above copyright
24 *    notice, this list of conditions and the following disclaimer.
25 * 2. Redistributions in binary form must reproduce the above copyright
26 *    notice, this list of conditions and the following disclaimer in the
27 *    documentation and/or other materials provided with the distribution.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 */
41
42#include "assym.inc"
43
44#include <machine/asm.h>
45#include <machine/cpu.h>
46#include <machine/param.h>
47#include <machine/pte.h>
48#include <machine/riscvreg.h>
49#include <machine/sbi.h>
50
51	.globl	kernbase
52	.set	kernbase, KERNBASE
53
54	.text
55/*
56 * Alternate entry point. Used when booting via SBI firmware. It must be placed
57 * at the beginning of the .text section. Arguments are as follows:
58 *  - a0 = hart ID
59 *  - a1 = dtbp
60 *
61 * Multiple CPUs might enter from this point, so we perform a hart lottery and
62 * send the losers to mpentry.
63 */
64	.globl _alt_start
65_alt_start:
66	/* Set the global pointer */
67.option push
68.option norelax
69	lla	gp, __global_pointer$
70.option pop
71
72	/* Pick a hart to run the boot process. */
73	lla	t0, hart_lottery
74	li	t1, 1
75	amoadd.w t0, t1, 0(t0)
76
77	/*
78	 * We must jump to mpentry in the non-BSP case because the offset is
79	 * too large to fit in a 12-bit branch immediate.
80	 */
81	beqz	t0, 1f
82	j	mpentry
831:
84	/* Store the boot hart */
85	lla	t0, boot_hart
86	sw	a0, 0(t0)
87
88	/*
89	 * Stash the DTB pointer in the callee-saved register s4, and zero s3
90	 * to indicate that we have no loader metadata.
91	 */
92	mv	s4, a1
93	mv	s3, zero
94	j	pagetables
95
96/*
97 * Main entry point. This routine is marked as the ELF entry, and is where
98 * loader(8) will enter the kernel. Arguments are as follows:
99 *  - a0 = modulep
100 *  - a1 = ???
101 *
102 * It is expected that only a single CPU will enter here.
103 */
104	.globl _start
105_start:
106	/* Set the global pointer */
107.option push
108.option norelax
109	lla	gp, __global_pointer$
110.option pop
111
112	/*
113	 * Stash modulep in the callee-saved register s3, and zero s4 to
114	 * indicate that we have no DTB pointer. It is already included in the
115	 * loader(8) metadata.
116	 */
117	mv	s3, a0
118	mv	s4, zero
119
120	/*
121	 * Set up page tables: Our goal is to enable virtual memory, doing the
122	 * minimum amount of work in assembly; just what is required to
123	 * bootstrap. We will construct the real page tables in C code, in
124	 * pmap_bootstrap().
125	 *
126	 * Here we map a 1GB region starting at KERNBASE using 2MB superpages,
127	 * starting from the first 2MB physical page into which the kernel was
128	 * loaded.
129	 *
130	 * We also use an L1 entry to create a 1GB identity map (1:1 PA->VA).
131	 * This is useful for two reasons:
132	 *  - handling the DTB pointer passed from SBI firmware (physical addr)
133	 *  - simpler construction of pagetables in pmap_bootstrap()
134	 *
135	 * Implementations are required to provide Sv39 mode, so we use that
136	 * here and will conditionally enable Sv48 (or higher) later.
137	 *
138	 * We arrive here with:
139	 *  s3 - modulep or zero
140	 *  s4 - zero or dtbp
141	 */
142pagetables:
143	/* Get the kernel's load address (kernstart) in s9 */
144	jal	get_physmem
145
146	/* Get PTE attribute bits in s8 */
147	jal	get_pte_fixup_bits
148
149	/* Construct 1GB Identity Map (1:1 PA->VA) */
150	lla	s1, bootstrap_pt_l1
151
152	srli	s2, s9, L1_SHIFT	/* kernstart >> L1_SHIFT */
153	andi	a5, s2, Ln_ADDR_MASK	/* & Ln_ADDR_MASK */
154	li	t4, (PTE_KERN)
155	or	t4, t4, s8		/* t4 |= pte bits */
156	slli	s2, s2, PTE_PPN2_S	/* (s2 << PTE_PPN2_S) */
157	or	t6, t4, s2
158
159	/* Store L1 PTE entry to position */
160	li	a6, PTE_SIZE
161	mulw	a5, a5, a6		/* calculate L1 slot */
162	add	t0, s1, a5
163	sd	t6, (t0)		/* Store new PTE */
164
165	/* Construct the virtual address space at KERNBASE */
166
167	/* Add L1 entry for kernel */
168	lla	s1, bootstrap_pt_l1
169	lla	s2, bootstrap_pt_l2	/* Link to next level PN */
170	srli	s2, s2, PAGE_SHIFT
171
172	li	a5, KERNBASE
173	srli	a5, a5, L1_SHIFT	/* >> L1_SHIFT */
174	andi	a5, a5, Ln_ADDR_MASK	/* & Ln_ADDR_MASK */
175	li	t4, PTE_V
176	slli	t5, s2, PTE_PPN0_S	/* (s2 << PTE_PPN0_S) */
177	or	t6, t4, t5
178
179	/* Store L1 PTE entry to position */
180	li	a6, PTE_SIZE
181	mulw	a5, a5, a6
182	add	t0, s1, a5
183	sd	t6, (t0)
184
185	/* Level 2 superpages (512 x 2MiB) */
186	lla	s1, bootstrap_pt_l2
187	srli	t4, s9, L2_SHIFT	/* Div physmem base by 2 MiB */
188	li	t2, Ln_ENTRIES		/* Build 512 entries */
189	add	t3, t4, t2
190	li	t0, (PTE_KERN | PTE_X)
191	or	t0, t0, s8		/* t0 |= pte bits */
1921:
193	slli	t2, t4, PTE_PPN1_S	/* << PTE_PPN1_S */
194	or	t5, t0, t2
195	sd	t5, (s1)		/* Store PTE entry to position */
196	addi	s1, s1, PTE_SIZE
197
198	addi	t4, t4, 1
199	bltu	t4, t3, 1b
200
201	/* Page tables END */
202
203	/*
204	 * Set the supervisor trap vector temporarily. Enabling virtual memory
205	 * may generate a page fault. We simply wish to continue onwards, so
206	 * have the trap deliver us to 'va'.
207	 */
208	lla	t0, va
209	sub	t0, t0, s9
210	li	t1, KERNBASE
211	add	t0, t0, t1
212	csrw	stvec, t0
213
214	/* Set page tables base register */
215	lla	s2, bootstrap_pt_l1
216	srli	s2, s2, PAGE_SHIFT
217	li	t0, SATP_MODE_SV39
218	or	s2, s2, t0
219	sfence.vma
220	csrw	satp, s2
221
222	.align 2
223va:
224	/* Set the global pointer again, this time with the virtual address. */
225.option push
226.option norelax
227	lla	gp, __global_pointer$
228.option pop
229
230	/* Set the trap vector to the real handler. */
231	la	t0, cpu_exception_handler
232	csrw	stvec, t0
233
234	/* Ensure sscratch is zero */
235	li	t0, 0
236	csrw	sscratch, t0
237
238	/* Initialize stack pointer */
239	la	sp, initstack_end
240
241	/* Clear frame pointer */
242	mv	s0, zero
243
244	/* Allocate space for thread0 PCB and riscv_bootparams */
245	addi	sp, sp, -(PCB_SIZE + RISCV_BOOTPARAMS_SIZE) & ~STACKALIGNBYTES
246
247	/* Clear BSS */
248	la	t0, _C_LABEL(__bss_start)
249	la	t1, _C_LABEL(_end)
2501:
251	sd	zero, 0(t0)
252	addi	t0, t0, 8
253	bltu	t0, t1, 1b
254
255	/* Fill riscv_bootparams */
256	sd	s9, RISCV_BOOTPARAMS_KERN_PHYS(sp)
257
258	la	t0, initstack
259	sd	t0, RISCV_BOOTPARAMS_KERN_STACK(sp)
260	sd	s4, RISCV_BOOTPARAMS_DTBP_PHYS(sp)
261	sd	s3, RISCV_BOOTPARAMS_MODULEP(sp)
262
263	mv	a0, sp
264	call	_C_LABEL(initriscv)	/* Off we go */
265	call	_C_LABEL(mi_startup)
266
267	/* We should never reach here, but if so just hang. */
2682:
269	wfi
270	j	2b
271
272/*
273 * Get the physical address the kernel is loaded to. Returned in s9.
274 */
275get_physmem:
276	lla	t0, virt_map	/* physical address of virt_map */
277	ld	t1, 0(t0)	/* virtual address of virt_map */
278	sub	t1, t1, t0	/* calculate phys->virt delta */
279	li	t2, KERNBASE
280	sub	s9, t2, t1	/* s9 = physmem base */
281	ret
282
283/*
284 * T-HEAD CPUs implement an alternate scheme for PTE attributes that is
285 * incompatible with the RISC-V PTE specification (see the definitions in
286 * pte.h). Worse, it defines a non-zero value for "main" memory, and this must
287 * be set in order to proceed with our new page tables.
288 *
289 * Therefore, we are forced to check the CPU identity here, which is both
290 * inconvenient and fragile.
291 *
292 * Return the required attribute bits in s8. For sane implementations this is
293 * zero.
294 */
295get_pte_fixup_bits:
296	mv	s8, zero
297	SBI_CALL(SBI_EXT_ID_BASE, SBI_BASE_GET_MVENDORID)
298	li	t0, MVENDORID_THEAD
299	xor	t0, t0, a1
300	bnez	t0, 1f		/* branch if a1 != t0 */
301	li	s8, PTE_THEAD_MA_NONE
3021:
303	ret
304
305	.align  4
306initstack:
307	.space  (PAGE_SIZE * KSTACK_PAGES)
308initstack_end:
309
310/*
311 * Static space for the bootstrap page tables. Unused after pmap_bootstrap().
312 */
313	.balign	PAGE_SIZE
314bootstrap_pt_l1:
315	.space	PAGE_SIZE
316bootstrap_pt_l2:
317	.space	PAGE_SIZE
318
319	.align 3
320virt_map:
321	.quad   virt_map
322hart_lottery:
323	.space	4
324
325#ifndef SMP
326ENTRY(mpentry)
3271:
328	wfi
329	j	1b
330END(mpentry)
331#else
332/*
333 * mpentry(unsigned long)
334 *
335 * Called by a core when it is being brought online.
336 */
337ENTRY(mpentry)
338	/*
339	 * Calculate the offset to __riscv_boot_ap
340	 * for the current core, cpuid is in a0.
341	 */
342	li	t1, 4
343	mulw	t1, t1, a0
344	/* Get the pointer */
345	lla	t0, __riscv_boot_ap
346	add	t0, t0, t1
347
3481:
349	/* Wait the kernel to be ready */
350	lw	t1, 0(t0)
351	beqz	t1, 1b
352
353	/* Setup stack pointer */
354	lla	t0, bootstack
355	ld	sp, 0(t0)
356
357	/* Get the kernel's load address */
358	jal	get_physmem
359
360	/*
361	 * Set the supervisor trap vector temporarily. Enabling virtual memory
362	 * may generate a page fault. We simply wish to continue onwards, so
363	 * have the trap deliver us to 'mpva'.
364	 */
365	lla	t0, mpva
366	sub	t0, t0, s9
367	li	t1, KERNBASE
368	add	t0, t0, t1
369	csrw	stvec, t0
370
371	/* Set page tables base register */
372	lla	t2, kernel_pmap_store
373	ld	s2, PM_SATP(t2)
374	sfence.vma
375	csrw	satp, s2
376
377	.align 2
378mpva:
379	/* Set the global pointer again, this time with the virtual address. */
380.option push
381.option norelax
382	lla	gp, __global_pointer$
383.option pop
384
385	/* Set the trap vector to the real handler. */
386	la	t0, cpu_exception_handler
387	csrw	stvec, t0
388
389	/* Ensure sscratch is zero */
390	li	t0, 0
391	csrw	sscratch, t0
392
393	call	init_secondary
394END(mpentry)
395#endif
396