xref: /linux/arch/riscv/kernel/head.S (revision 37744feebc086908fd89760650f458ab19071750)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2012 Regents of the University of California
4 */
5
6#include <asm/thread_info.h>
7#include <asm/asm-offsets.h>
8#include <asm/asm.h>
9#include <linux/init.h>
10#include <linux/linkage.h>
11#include <asm/thread_info.h>
12#include <asm/page.h>
13#include <asm/csr.h>
14#include <asm/hwcap.h>
15#include <asm/image.h>
16
17__HEAD
18ENTRY(_start)
19	/*
20	 * Image header expected by Linux boot-loaders. The image header data
21	 * structure is described in asm/image.h.
22	 * Do not modify it without modifying the structure and all bootloaders
23	 * that expects this header format!!
24	 */
25	/* jump to start kernel */
26	j _start_kernel
27	/* reserved */
28	.word 0
29	.balign 8
30#if __riscv_xlen == 64
31	/* Image load offset(2MB) from start of RAM */
32	.dword 0x200000
33#else
34	/* Image load offset(4MB) from start of RAM */
35	.dword 0x400000
36#endif
37	/* Effective size of kernel image */
38	.dword _end - _start
39	.dword __HEAD_FLAGS
40	.word RISCV_HEADER_VERSION
41	.word 0
42	.dword 0
43	.ascii RISCV_IMAGE_MAGIC
44	.balign 4
45	.ascii RISCV_IMAGE_MAGIC2
46	.word 0
47
48.align 2
49#ifdef CONFIG_MMU
50relocate:
51	/* Relocate return address */
52	li a1, PAGE_OFFSET
53	la a2, _start
54	sub a1, a1, a2
55	add ra, ra, a1
56
57	/* Point stvec to virtual address of intruction after satp write */
58	la a2, 1f
59	add a2, a2, a1
60	csrw CSR_TVEC, a2
61
62	/* Compute satp for kernel page tables, but don't load it yet */
63	srl a2, a0, PAGE_SHIFT
64	li a1, SATP_MODE
65	or a2, a2, a1
66
67	/*
68	 * Load trampoline page directory, which will cause us to trap to
69	 * stvec if VA != PA, or simply fall through if VA == PA.  We need a
70	 * full fence here because setup_vm() just wrote these PTEs and we need
71	 * to ensure the new translations are in use.
72	 */
73	la a0, trampoline_pg_dir
74	srl a0, a0, PAGE_SHIFT
75	or a0, a0, a1
76	sfence.vma
77	csrw CSR_SATP, a0
78.align 2
791:
80	/* Set trap vector to spin forever to help debug */
81	la a0, .Lsecondary_park
82	csrw CSR_TVEC, a0
83
84	/* Reload the global pointer */
85.option push
86.option norelax
87	la gp, __global_pointer$
88.option pop
89
90	/*
91	 * Switch to kernel page tables.  A full fence is necessary in order to
92	 * avoid using the trampoline translations, which are only correct for
93	 * the first superpage.  Fetching the fence is guarnteed to work
94	 * because that first superpage is translated the same way.
95	 */
96	csrw CSR_SATP, a2
97	sfence.vma
98
99	ret
100#endif /* CONFIG_MMU */
101#ifdef CONFIG_SMP
102	.global secondary_start_sbi
103secondary_start_sbi:
104	/* Mask all interrupts */
105	csrw CSR_IE, zero
106	csrw CSR_IP, zero
107
108	/* Load the global pointer */
109	.option push
110	.option norelax
111		la gp, __global_pointer$
112	.option pop
113
114	/*
115	 * Disable FPU to detect illegal usage of
116	 * floating point in kernel space
117	 */
118	li t0, SR_FS
119	csrc CSR_STATUS, t0
120
121	/* Set trap vector to spin forever to help debug */
122	la a3, .Lsecondary_park
123	csrw CSR_TVEC, a3
124
125	slli a3, a0, LGREG
126	la a4, __cpu_up_stack_pointer
127	la a5, __cpu_up_task_pointer
128	add a4, a3, a4
129	add a5, a3, a5
130	REG_L sp, (a4)
131	REG_L tp, (a5)
132
133	.global secondary_start_common
134secondary_start_common:
135
136#ifdef CONFIG_MMU
137	/* Enable virtual memory and relocate to virtual address */
138	la a0, swapper_pg_dir
139	call relocate
140#endif
141	tail smp_callin
142#endif /* CONFIG_SMP */
143
144.Lsecondary_park:
145	/* We lack SMP support or have too many harts, so park this hart */
146	wfi
147	j .Lsecondary_park
148
149END(_start)
150
151	__INIT
152ENTRY(_start_kernel)
153	/* Mask all interrupts */
154	csrw CSR_IE, zero
155	csrw CSR_IP, zero
156
157#ifdef CONFIG_RISCV_M_MODE
158	/* flush the instruction cache */
159	fence.i
160
161	/* Reset all registers except ra, a0, a1 */
162	call reset_regs
163
164	/* Setup a PMP to permit access to all of memory. */
165	li a0, -1
166	csrw CSR_PMPADDR0, a0
167	li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X)
168	csrw CSR_PMPCFG0, a0
169
170	/*
171	 * The hartid in a0 is expected later on, and we have no firmware
172	 * to hand it to us.
173	 */
174	csrr a0, CSR_MHARTID
175#endif /* CONFIG_RISCV_M_MODE */
176
177	/* Load the global pointer */
178.option push
179.option norelax
180	la gp, __global_pointer$
181.option pop
182
183	/*
184	 * Disable FPU to detect illegal usage of
185	 * floating point in kernel space
186	 */
187	li t0, SR_FS
188	csrc CSR_STATUS, t0
189
190#ifdef CONFIG_SMP
191	li t0, CONFIG_NR_CPUS
192	blt a0, t0, .Lgood_cores
193	tail .Lsecondary_park
194.Lgood_cores:
195#endif
196
197	/* Pick one hart to run the main boot sequence */
198	la a3, hart_lottery
199	li a2, 1
200	amoadd.w a3, a2, (a3)
201	bnez a3, .Lsecondary_start
202
203	/* Clear BSS for flat non-ELF images */
204	la a3, __bss_start
205	la a4, __bss_stop
206	ble a4, a3, clear_bss_done
207clear_bss:
208	REG_S zero, (a3)
209	add a3, a3, RISCV_SZPTR
210	blt a3, a4, clear_bss
211clear_bss_done:
212
213	/* Save hart ID and DTB physical address */
214	mv s0, a0
215	mv s1, a1
216	la a2, boot_cpu_hartid
217	REG_S a0, (a2)
218
219	/* Initialize page tables and relocate to virtual addresses */
220	la sp, init_thread_union + THREAD_SIZE
221	mv a0, s1
222	call setup_vm
223#ifdef CONFIG_MMU
224	la a0, early_pg_dir
225	call relocate
226#endif /* CONFIG_MMU */
227
228	/* Restore C environment */
229	la tp, init_task
230	sw zero, TASK_TI_CPU(tp)
231	la sp, init_thread_union + THREAD_SIZE
232
233#ifdef CONFIG_KASAN
234	call kasan_early_init
235#endif
236	/* Start the kernel */
237	call soc_early_init
238	call parse_dtb
239	tail start_kernel
240
241.Lsecondary_start:
242#ifdef CONFIG_SMP
243	/* Set trap vector to spin forever to help debug */
244	la a3, .Lsecondary_park
245	csrw CSR_TVEC, a3
246
247	slli a3, a0, LGREG
248	la a1, __cpu_up_stack_pointer
249	la a2, __cpu_up_task_pointer
250	add a1, a3, a1
251	add a2, a3, a2
252
253	/*
254	 * This hart didn't win the lottery, so we wait for the winning hart to
255	 * get far enough along the boot process that it should continue.
256	 */
257.Lwait_for_cpu_up:
258	/* FIXME: We should WFI to save some energy here. */
259	REG_L sp, (a1)
260	REG_L tp, (a2)
261	beqz sp, .Lwait_for_cpu_up
262	beqz tp, .Lwait_for_cpu_up
263	fence
264
265	tail secondary_start_common
266#endif
267
268END(_start_kernel)
269
270#ifdef CONFIG_RISCV_M_MODE
271ENTRY(reset_regs)
272	li	sp, 0
273	li	gp, 0
274	li	tp, 0
275	li	t0, 0
276	li	t1, 0
277	li	t2, 0
278	li	s0, 0
279	li	s1, 0
280	li	a2, 0
281	li	a3, 0
282	li	a4, 0
283	li	a5, 0
284	li	a6, 0
285	li	a7, 0
286	li	s2, 0
287	li	s3, 0
288	li	s4, 0
289	li	s5, 0
290	li	s6, 0
291	li	s7, 0
292	li	s8, 0
293	li	s9, 0
294	li	s10, 0
295	li	s11, 0
296	li	t3, 0
297	li	t4, 0
298	li	t5, 0
299	li	t6, 0
300	csrw	CSR_SCRATCH, 0
301
302#ifdef CONFIG_FPU
303	csrr	t0, CSR_MISA
304	andi	t0, t0, (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D)
305	beqz	t0, .Lreset_regs_done
306
307	li	t1, SR_FS
308	csrs	CSR_STATUS, t1
309	fmv.s.x	f0, zero
310	fmv.s.x	f1, zero
311	fmv.s.x	f2, zero
312	fmv.s.x	f3, zero
313	fmv.s.x	f4, zero
314	fmv.s.x	f5, zero
315	fmv.s.x	f6, zero
316	fmv.s.x	f7, zero
317	fmv.s.x	f8, zero
318	fmv.s.x	f9, zero
319	fmv.s.x	f10, zero
320	fmv.s.x	f11, zero
321	fmv.s.x	f12, zero
322	fmv.s.x	f13, zero
323	fmv.s.x	f14, zero
324	fmv.s.x	f15, zero
325	fmv.s.x	f16, zero
326	fmv.s.x	f17, zero
327	fmv.s.x	f18, zero
328	fmv.s.x	f19, zero
329	fmv.s.x	f20, zero
330	fmv.s.x	f21, zero
331	fmv.s.x	f22, zero
332	fmv.s.x	f23, zero
333	fmv.s.x	f24, zero
334	fmv.s.x	f25, zero
335	fmv.s.x	f26, zero
336	fmv.s.x	f27, zero
337	fmv.s.x	f28, zero
338	fmv.s.x	f29, zero
339	fmv.s.x	f30, zero
340	fmv.s.x	f31, zero
341	csrw	fcsr, 0
342	/* note that the caller must clear SR_FS */
343#endif /* CONFIG_FPU */
344.Lreset_regs_done:
345	ret
346END(reset_regs)
347#endif /* CONFIG_RISCV_M_MODE */
348
349__PAGE_ALIGNED_BSS
350	/* Empty zero page */
351	.balign PAGE_SIZE
352