xref: /linux/arch/riscv/kernel/entry.S (revision 23b0f90ba871f096474e1c27c3d14f455189d2d9)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2012 Regents of the University of California
4 * Copyright (C) 2017 SiFive
5 */
6
7#include <linux/init.h>
8#include <linux/linkage.h>
9
10#include <asm/alternative-macros.h>
11#include <asm/asm.h>
12#include <asm/csr.h>
13#include <asm/scs.h>
14#include <asm/unistd.h>
15#include <asm/page.h>
16#include <asm/thread_info.h>
17#include <asm/asm-offsets.h>
18#include <asm/errata_list.h>
19#include <linux/sizes.h>
20
21	.section .irqentry.text, "ax"
22
23.macro new_vmalloc_check
24	REG_S 	a0, TASK_TI_A0(tp)
25	csrr 	a0, CSR_CAUSE
26	/* Exclude IRQs */
27	blt  	a0, zero, .Lnew_vmalloc_restore_context_a0
28
29	REG_S 	a1, TASK_TI_A1(tp)
30	/* Only check new_vmalloc if we are in page/protection fault */
31	li   	a1, EXC_LOAD_PAGE_FAULT
32	beq  	a0, a1, .Lnew_vmalloc_kernel_address
33	li   	a1, EXC_STORE_PAGE_FAULT
34	beq  	a0, a1, .Lnew_vmalloc_kernel_address
35	li   	a1, EXC_INST_PAGE_FAULT
36	bne  	a0, a1, .Lnew_vmalloc_restore_context_a1
37
38.Lnew_vmalloc_kernel_address:
39	/* Is it a kernel address? */
40	csrr 	a0, CSR_TVAL
41	bge 	a0, zero, .Lnew_vmalloc_restore_context_a1
42
43	/* Check if a new vmalloc mapping appeared that could explain the trap */
44	REG_S	a2, TASK_TI_A2(tp)
45	/*
46	 * Computes:
47	 * a0 = &new_vmalloc[BIT_WORD(cpu)]
48	 * a1 = BIT_MASK(cpu)
49	 */
50	lw	a2, TASK_TI_CPU(tp)
51	/*
52	 * Compute the new_vmalloc element position:
53	 * (cpu / 64) * 8 = (cpu >> 6) << 3
54	 */
55	srli	a1, a2, 6
56	slli	a1, a1, 3
57	la	a0, new_vmalloc
58	add	a0, a0, a1
59	/*
60	 * Compute the bit position in the new_vmalloc element:
61	 * bit_pos = cpu % 64 = cpu - (cpu / 64) * 64 = cpu - (cpu >> 6) << 6
62	 * 	   = cpu - ((cpu >> 6) << 3) << 3
63	 */
64	slli	a1, a1, 3
65	sub	a1, a2, a1
66	/* Compute the "get mask": 1 << bit_pos */
67	li	a2, 1
68	sll	a1, a2, a1
69
70	/* Check the value of new_vmalloc for this cpu */
71	REG_L	a2, 0(a0)
72	and	a2, a2, a1
73	beq	a2, zero, .Lnew_vmalloc_restore_context
74
75	/* Atomically reset the current cpu bit in new_vmalloc */
76	amoxor.d	a0, a1, (a0)
77
78	/* Only emit a sfence.vma if the uarch caches invalid entries */
79	ALTERNATIVE("sfence.vma", "nop", 0, RISCV_ISA_EXT_SVVPTC, 1)
80
81	REG_L	a0, TASK_TI_A0(tp)
82	REG_L	a1, TASK_TI_A1(tp)
83	REG_L	a2, TASK_TI_A2(tp)
84	csrw	CSR_SCRATCH, x0
85	sret
86
87.Lnew_vmalloc_restore_context:
88	REG_L 	a2, TASK_TI_A2(tp)
89.Lnew_vmalloc_restore_context_a1:
90	REG_L 	a1, TASK_TI_A1(tp)
91.Lnew_vmalloc_restore_context_a0:
92	REG_L	a0, TASK_TI_A0(tp)
93.endm
94
95/*
96 * If previous mode was U, capture shadow stack pointer and save it away
97 * Zero CSR_SSP at the same time for sanitization.
98 */
99.macro save_userssp tmp, status
100	ALTERNATIVE("nops(4)",
101		__stringify(				\
102		andi \tmp, \status, SR_SPP;		\
103		bnez \tmp, skip_ssp_save;		\
104		csrrw \tmp, CSR_SSP, x0;		\
105		REG_S \tmp, TASK_TI_USER_SSP(tp);	\
106		skip_ssp_save:),
107		0,
108		RISCV_ISA_EXT_ZICFISS,
109		CONFIG_RISCV_USER_CFI)
110.endm
111
112.macro restore_userssp tmp, status
113	ALTERNATIVE("nops(4)",
114		__stringify(				\
115		andi \tmp, \status, SR_SPP;		\
116		bnez \tmp, skip_ssp_restore;		\
117		REG_L \tmp, TASK_TI_USER_SSP(tp);	\
118		csrw CSR_SSP, \tmp;			\
119		skip_ssp_restore:),
120		0,
121		RISCV_ISA_EXT_ZICFISS,
122		CONFIG_RISCV_USER_CFI)
123.endm
124
125SYM_CODE_START(handle_exception)
126	/*
127	 * If coming from userspace, preserve the user thread pointer and load
128	 * the kernel thread pointer.  If we came from the kernel, the scratch
129	 * register will contain 0, and we should continue on the current TP.
130	 */
131	csrrw tp, CSR_SCRATCH, tp
132	bnez tp, .Lsave_context
133
134.Lrestore_kernel_tpsp:
135	csrr tp, CSR_SCRATCH
136
137#ifdef CONFIG_64BIT
138	/*
139	 * The RISC-V kernel does not eagerly emit a sfence.vma after each
140	 * new vmalloc mapping, which may result in exceptions:
141	 * - if the uarch caches invalid entries, the new mapping would not be
142	 *   observed by the page table walker and an invalidation is needed.
143	 * - if the uarch does not cache invalid entries, a reordered access
144	 *   could "miss" the new mapping and traps: in that case, we only need
145	 *   to retry the access, no sfence.vma is required.
146	 */
147	new_vmalloc_check
148#endif
149
150	REG_S sp, TASK_TI_KERNEL_SP(tp)
151
152#ifdef CONFIG_VMAP_STACK
153	addi sp, sp, -(PT_SIZE_ON_STACK)
154	srli sp, sp, THREAD_SHIFT
155	andi sp, sp, 0x1
156	bnez sp, handle_kernel_stack_overflow
157	REG_L sp, TASK_TI_KERNEL_SP(tp)
158#endif
159
160.Lsave_context:
161	REG_S sp, TASK_TI_USER_SP(tp)
162	REG_L sp, TASK_TI_KERNEL_SP(tp)
163	addi sp, sp, -(PT_SIZE_ON_STACK)
164	REG_S x1,  PT_RA(sp)
165	REG_S x3,  PT_GP(sp)
166	REG_S x5,  PT_T0(sp)
167	save_from_x6_to_x31
168
169	/*
170	 * Disable user-mode memory access as it should only be set in the
171	 * actual user copy routines.
172	 *
173	 * Disable the FPU/Vector to detect illegal usage of floating point
174	 * or vector in kernel space.
175	 */
176	li t0, SR_SUM | SR_FS_VS
177#ifdef CONFIG_64BIT
178	li t1, SR_ELP
179	or t0, t0, t1
180#endif
181
182	REG_L s0, TASK_TI_USER_SP(tp)
183	csrrc s1, CSR_STATUS, t0
184	save_userssp s2, s1
185	csrr s2, CSR_EPC
186	csrr s3, CSR_TVAL
187	csrr s4, CSR_CAUSE
188	csrr s5, CSR_SCRATCH
189	REG_S s0, PT_SP(sp)
190	REG_S s1, PT_STATUS(sp)
191	REG_S s2, PT_EPC(sp)
192	REG_S s3, PT_BADADDR(sp)
193	REG_S s4, PT_CAUSE(sp)
194	REG_S s5, PT_TP(sp)
195
196	/*
197	 * Set the scratch register to 0, so that if a recursive exception
198	 * occurs, the exception vector knows it came from the kernel
199	 */
200	csrw CSR_SCRATCH, x0
201
202	/* Load the global pointer */
203	load_global_pointer
204
205	/* Load the kernel shadow call stack pointer if coming from userspace */
206	scs_load_current_if_task_changed s5
207
208#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
209	move a0, sp
210	call riscv_v_context_nesting_start
211#endif
212	move a0, sp /* pt_regs */
213
214	/*
215	 * MSB of cause differentiates between
216	 * interrupts and exceptions
217	 */
218	bge s4, zero, 1f
219
220	/* Handle interrupts */
221	call do_irq
222	j ret_from_exception
2231:
224	/* Handle other exceptions */
225	slli t0, s4, RISCV_LGPTR
226	la t1, excp_vect_table
227	la t2, excp_vect_table_end
228	add t0, t1, t0
229	/* Check if exception code lies within bounds */
230	bgeu t0, t2, 3f
231	REG_L t1, 0(t0)
2322:	jalr t1
233	j ret_from_exception
2343:
235
236	la t1, do_trap_unknown
237	j 2b
238SYM_CODE_END(handle_exception)
239ASM_NOKPROBE(handle_exception)
240
241/*
242 * The ret_from_exception must be called with interrupt disabled. Here is the
243 * caller list:
244 *  - handle_exception
245 *  - ret_from_fork
246 */
247SYM_CODE_START_NOALIGN(ret_from_exception)
248	REG_L s0, PT_STATUS(sp)
249#ifdef CONFIG_RISCV_M_MODE
250	/* the MPP value is too large to be used as an immediate arg for addi */
251	li t0, SR_MPP
252	and s0, s0, t0
253#else
254	andi s0, s0, SR_SPP
255#endif
256	bnez s0, 1f
257
258#ifdef CONFIG_KSTACK_ERASE
259	call	stackleak_erase_on_task_stack
260#endif
261
262	/* Save unwound kernel stack pointer in thread_info */
263	addi s0, sp, PT_SIZE_ON_STACK
264	REG_S s0, TASK_TI_KERNEL_SP(tp)
265
266	/* Save the kernel shadow call stack pointer */
267	scs_save_current
268
269	/*
270	 * Save TP into the scratch register , so we can find the kernel data
271	 * structures again.
272	 */
273	csrw CSR_SCRATCH, tp
2741:
275#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
276	move a0, sp
277	call riscv_v_context_nesting_end
278#endif
279	REG_L a0, PT_STATUS(sp)
280	restore_userssp s3, a0
281	/*
282	 * The current load reservation is effectively part of the processor's
283	 * state, in the sense that load reservations cannot be shared between
284	 * different hart contexts.  We can't actually save and restore a load
285	 * reservation, so instead here we clear any existing reservation --
286	 * it's always legal for implementations to clear load reservations at
287	 * any point (as long as the forward progress guarantee is kept, but
288	 * we'll ignore that here).
289	 *
290	 * Dangling load reservations can be the result of taking a trap in the
291	 * middle of an LR/SC sequence, but can also be the result of a taken
292	 * forward branch around an SC -- which is how we implement CAS.  As a
293	 * result we need to clear reservations between the last CAS and the
294	 * jump back to the new context.  While it is unlikely the store
295	 * completes, implementations are allowed to expand reservations to be
296	 * arbitrarily large.
297	 */
298	REG_L  a2, PT_EPC(sp)
299	REG_SC x0, a2, PT_EPC(sp)
300
301	csrw CSR_STATUS, a0
302	csrw CSR_EPC, a2
303
304	REG_L x1,  PT_RA(sp)
305	REG_L x3,  PT_GP(sp)
306	REG_L x4,  PT_TP(sp)
307	REG_L x5,  PT_T0(sp)
308	restore_from_x6_to_x31
309
310	REG_L x2,  PT_SP(sp)
311
312#ifdef CONFIG_RISCV_M_MODE
313	mret
314#else
315	sret
316#endif
317SYM_INNER_LABEL(ret_from_exception_end, SYM_L_GLOBAL)
318SYM_CODE_END(ret_from_exception)
319ASM_NOKPROBE(ret_from_exception)
320
321#ifdef CONFIG_VMAP_STACK
322SYM_CODE_START_LOCAL(handle_kernel_stack_overflow)
323	/* we reach here from kernel context, sscratch must be 0 */
324	csrrw x31, CSR_SCRATCH, x31
325	asm_per_cpu sp, overflow_stack, x31
326	li x31, OVERFLOW_STACK_SIZE
327	add sp, sp, x31
328	/* zero out x31 again and restore x31 */
329	xor x31, x31, x31
330	csrrw x31, CSR_SCRATCH, x31
331
332	addi sp, sp, -(PT_SIZE_ON_STACK)
333
334	//save context to overflow stack
335	REG_S x1,  PT_RA(sp)
336	REG_S x3,  PT_GP(sp)
337	REG_S x5,  PT_T0(sp)
338	save_from_x6_to_x31
339
340	REG_L s0, TASK_TI_KERNEL_SP(tp)
341	csrr s1, CSR_STATUS
342	csrr s2, CSR_EPC
343	csrr s3, CSR_TVAL
344	csrr s4, CSR_CAUSE
345	csrr s5, CSR_SCRATCH
346	REG_S s0, PT_SP(sp)
347	REG_S s1, PT_STATUS(sp)
348	REG_S s2, PT_EPC(sp)
349	REG_S s3, PT_BADADDR(sp)
350	REG_S s4, PT_CAUSE(sp)
351	REG_S s5, PT_TP(sp)
352	move a0, sp
353	tail handle_bad_stack
354SYM_CODE_END(handle_kernel_stack_overflow)
355ASM_NOKPROBE(handle_kernel_stack_overflow)
356#endif
357
358SYM_CODE_START(ret_from_fork_kernel_asm)
359	call schedule_tail
360	move a0, s1 /* fn_arg */
361	move a1, s0 /* fn */
362	move a2, sp /* pt_regs */
363	call ret_from_fork_kernel
364	j ret_from_exception
365SYM_CODE_END(ret_from_fork_kernel_asm)
366
367SYM_CODE_START(ret_from_fork_user_asm)
368	call schedule_tail
369	move a0, sp /* pt_regs */
370	call ret_from_fork_user
371	j ret_from_exception
372SYM_CODE_END(ret_from_fork_user_asm)
373
374#ifdef CONFIG_IRQ_STACKS
375/*
376 * void call_on_irq_stack(struct pt_regs *regs,
377 * 		          void (*func)(struct pt_regs *));
378 *
379 * Calls func(regs) using the per-CPU IRQ stack.
380 */
381SYM_FUNC_START(call_on_irq_stack)
382	/* Create a frame record to save ra and s0 (fp) */
383	addi	sp, sp, -STACKFRAME_SIZE_ON_STACK
384	REG_S	ra, STACKFRAME_RA(sp)
385	REG_S	s0, STACKFRAME_FP(sp)
386	addi	s0, sp, STACKFRAME_SIZE_ON_STACK
387
388	/* Switch to the per-CPU shadow call stack */
389	scs_save_current
390	scs_load_irq_stack t0
391
392	/* Switch to the per-CPU IRQ stack and call the handler */
393	load_per_cpu t0, irq_stack_ptr, t1
394	li	t1, IRQ_STACK_SIZE
395	add	sp, t0, t1
396	jalr	a1
397
398	/* Switch back to the thread shadow call stack */
399	scs_load_current
400
401	/* Switch back to the thread stack and restore ra and s0 */
402	addi	sp, s0, -STACKFRAME_SIZE_ON_STACK
403	REG_L	ra, STACKFRAME_RA(sp)
404	REG_L	s0, STACKFRAME_FP(sp)
405	addi	sp, sp, STACKFRAME_SIZE_ON_STACK
406
407	ret
408SYM_FUNC_END(call_on_irq_stack)
409#endif /* CONFIG_IRQ_STACKS */
410
411/*
412 * Integer register context switch
413 * The callee-saved registers must be saved and restored.
414 *
415 *   a0: previous task_struct (must be preserved across the switch)
416 *   a1: next task_struct
417 *
418 * The value of a0 and a1 must be preserved by this function, as that's how
419 * arguments are passed to schedule_tail.
420 */
421SYM_FUNC_START(__switch_to)
422	/* Save context into prev->thread */
423	li    a4,  TASK_THREAD_RA
424	add   a3, a0, a4
425	add   a4, a1, a4
426	REG_S ra,  TASK_THREAD_RA_RA(a3)
427	REG_S sp,  TASK_THREAD_SP_RA(a3)
428	REG_S s0,  TASK_THREAD_S0_RA(a3)
429	REG_S s1,  TASK_THREAD_S1_RA(a3)
430	REG_S s2,  TASK_THREAD_S2_RA(a3)
431	REG_S s3,  TASK_THREAD_S3_RA(a3)
432	REG_S s4,  TASK_THREAD_S4_RA(a3)
433	REG_S s5,  TASK_THREAD_S5_RA(a3)
434	REG_S s6,  TASK_THREAD_S6_RA(a3)
435	REG_S s7,  TASK_THREAD_S7_RA(a3)
436	REG_S s8,  TASK_THREAD_S8_RA(a3)
437	REG_S s9,  TASK_THREAD_S9_RA(a3)
438	REG_S s10, TASK_THREAD_S10_RA(a3)
439	REG_S s11, TASK_THREAD_S11_RA(a3)
440
441	/* save the user space access flag */
442	csrr  s0, CSR_STATUS
443	REG_S s0, TASK_THREAD_SUM_RA(a3)
444
445	/* Save the kernel shadow call stack pointer */
446	scs_save_current
447	/* Restore context from next->thread */
448	REG_L s0,  TASK_THREAD_SUM_RA(a4)
449	li    s1,  SR_SUM
450	and   s0,  s0, s1
451	csrs  CSR_STATUS, s0
452	REG_L ra,  TASK_THREAD_RA_RA(a4)
453	REG_L sp,  TASK_THREAD_SP_RA(a4)
454	REG_L s0,  TASK_THREAD_S0_RA(a4)
455	REG_L s1,  TASK_THREAD_S1_RA(a4)
456	REG_L s2,  TASK_THREAD_S2_RA(a4)
457	REG_L s3,  TASK_THREAD_S3_RA(a4)
458	REG_L s4,  TASK_THREAD_S4_RA(a4)
459	REG_L s5,  TASK_THREAD_S5_RA(a4)
460	REG_L s6,  TASK_THREAD_S6_RA(a4)
461	REG_L s7,  TASK_THREAD_S7_RA(a4)
462	REG_L s8,  TASK_THREAD_S8_RA(a4)
463	REG_L s9,  TASK_THREAD_S9_RA(a4)
464	REG_L s10, TASK_THREAD_S10_RA(a4)
465	REG_L s11, TASK_THREAD_S11_RA(a4)
466	/* The offset of thread_info in task_struct is zero. */
467	move tp, a1
468	/* Switch to the next shadow call stack */
469	scs_load_current
470	ret
471SYM_FUNC_END(__switch_to)
472
473#ifndef CONFIG_MMU
474#define do_page_fault do_trap_unknown
475#endif
476
477	.section ".rodata"
478	.align LGREG
479	/* Exception vector table */
480SYM_DATA_START_LOCAL(excp_vect_table)
481	RISCV_PTR do_trap_insn_misaligned
482	ALT_INSN_FAULT(RISCV_PTR do_trap_insn_fault)
483	RISCV_PTR do_trap_insn_illegal
484	RISCV_PTR do_trap_break
485	RISCV_PTR do_trap_load_misaligned
486	RISCV_PTR do_trap_load_fault
487	RISCV_PTR do_trap_store_misaligned
488	RISCV_PTR do_trap_store_fault
489	RISCV_PTR do_trap_ecall_u /* system call */
490	RISCV_PTR do_trap_ecall_s
491	RISCV_PTR do_trap_unknown
492	RISCV_PTR do_trap_ecall_m
493	/* instruction page fault */
494	ALT_PAGE_FAULT(RISCV_PTR do_page_fault)
495	RISCV_PTR do_page_fault   /* load page fault */
496	RISCV_PTR do_trap_unknown
497	RISCV_PTR do_page_fault   /* store page fault */
498	RISCV_PTR do_trap_unknown /* cause=16 */
499	RISCV_PTR do_trap_unknown /* cause=17 */
500	RISCV_PTR do_trap_software_check /* cause=18 is sw check exception */
501SYM_DATA_END_LABEL(excp_vect_table, SYM_L_LOCAL, excp_vect_table_end)
502
503#ifndef CONFIG_MMU
504SYM_DATA_START(__user_rt_sigreturn)
505	li a7, __NR_rt_sigreturn
506	ecall
507SYM_DATA_END(__user_rt_sigreturn)
508#endif
509