1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2012 Regents of the University of California 4 * Copyright (C) 2017 SiFive 5 */ 6 7#include <linux/init.h> 8#include <linux/linkage.h> 9 10#include <asm/asm.h> 11#include <asm/csr.h> 12#include <asm/scs.h> 13#include <asm/unistd.h> 14#include <asm/page.h> 15#include <asm/thread_info.h> 16#include <asm/asm-offsets.h> 17#include <asm/errata_list.h> 18#include <linux/sizes.h> 19 20 .section .irqentry.text, "ax" 21 22SYM_CODE_START(handle_exception) 23 /* 24 * If coming from userspace, preserve the user thread pointer and load 25 * the kernel thread pointer. If we came from the kernel, the scratch 26 * register will contain 0, and we should continue on the current TP. 27 */ 28 csrrw tp, CSR_SCRATCH, tp 29 bnez tp, .Lsave_context 30 31.Lrestore_kernel_tpsp: 32 csrr tp, CSR_SCRATCH 33 REG_S sp, TASK_TI_KERNEL_SP(tp) 34 35#ifdef CONFIG_VMAP_STACK 36 addi sp, sp, -(PT_SIZE_ON_STACK) 37 srli sp, sp, THREAD_SHIFT 38 andi sp, sp, 0x1 39 bnez sp, handle_kernel_stack_overflow 40 REG_L sp, TASK_TI_KERNEL_SP(tp) 41#endif 42 43.Lsave_context: 44 REG_S sp, TASK_TI_USER_SP(tp) 45 REG_L sp, TASK_TI_KERNEL_SP(tp) 46 addi sp, sp, -(PT_SIZE_ON_STACK) 47 REG_S x1, PT_RA(sp) 48 REG_S x3, PT_GP(sp) 49 REG_S x5, PT_T0(sp) 50 save_from_x6_to_x31 51 52 /* 53 * Disable user-mode memory access as it should only be set in the 54 * actual user copy routines. 55 * 56 * Disable the FPU/Vector to detect illegal usage of floating point 57 * or vector in kernel space. 58 */ 59 li t0, SR_SUM | SR_FS_VS 60 61 REG_L s0, TASK_TI_USER_SP(tp) 62 csrrc s1, CSR_STATUS, t0 63 csrr s2, CSR_EPC 64 csrr s3, CSR_TVAL 65 csrr s4, CSR_CAUSE 66 csrr s5, CSR_SCRATCH 67 REG_S s0, PT_SP(sp) 68 REG_S s1, PT_STATUS(sp) 69 REG_S s2, PT_EPC(sp) 70 REG_S s3, PT_BADADDR(sp) 71 REG_S s4, PT_CAUSE(sp) 72 REG_S s5, PT_TP(sp) 73 74 /* 75 * Set the scratch register to 0, so that if a recursive exception 76 * occurs, the exception vector knows it came from the kernel 77 */ 78 csrw CSR_SCRATCH, x0 79 80 /* Load the global pointer */ 81 load_global_pointer 82 83 /* Load the kernel shadow call stack pointer if coming from userspace */ 84 scs_load_current_if_task_changed s5 85 86#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE 87 move a0, sp 88 call riscv_v_context_nesting_start 89#endif 90 move a0, sp /* pt_regs */ 91 92 /* 93 * MSB of cause differentiates between 94 * interrupts and exceptions 95 */ 96 bge s4, zero, 1f 97 98 /* Handle interrupts */ 99 call do_irq 100 j ret_from_exception 1011: 102 /* Handle other exceptions */ 103 slli t0, s4, RISCV_LGPTR 104 la t1, excp_vect_table 105 la t2, excp_vect_table_end 106 add t0, t1, t0 107 /* Check if exception code lies within bounds */ 108 bgeu t0, t2, 3f 109 REG_L t1, 0(t0) 1102: jalr t1 111 j ret_from_exception 1123: 113 114 la t1, do_trap_unknown 115 j 2b 116SYM_CODE_END(handle_exception) 117ASM_NOKPROBE(handle_exception) 118 119/* 120 * The ret_from_exception must be called with interrupt disabled. Here is the 121 * caller list: 122 * - handle_exception 123 * - ret_from_fork 124 */ 125SYM_CODE_START_NOALIGN(ret_from_exception) 126 REG_L s0, PT_STATUS(sp) 127#ifdef CONFIG_RISCV_M_MODE 128 /* the MPP value is too large to be used as an immediate arg for addi */ 129 li t0, SR_MPP 130 and s0, s0, t0 131#else 132 andi s0, s0, SR_SPP 133#endif 134 bnez s0, 1f 135 136#ifdef CONFIG_GCC_PLUGIN_STACKLEAK 137 call stackleak_erase_on_task_stack 138#endif 139 140 /* Save unwound kernel stack pointer in thread_info */ 141 addi s0, sp, PT_SIZE_ON_STACK 142 REG_S s0, TASK_TI_KERNEL_SP(tp) 143 144 /* Save the kernel shadow call stack pointer */ 145 scs_save_current 146 147 /* 148 * Save TP into the scratch register , so we can find the kernel data 149 * structures again. 150 */ 151 csrw CSR_SCRATCH, tp 1521: 153#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE 154 move a0, sp 155 call riscv_v_context_nesting_end 156#endif 157 REG_L a0, PT_STATUS(sp) 158 /* 159 * The current load reservation is effectively part of the processor's 160 * state, in the sense that load reservations cannot be shared between 161 * different hart contexts. We can't actually save and restore a load 162 * reservation, so instead here we clear any existing reservation -- 163 * it's always legal for implementations to clear load reservations at 164 * any point (as long as the forward progress guarantee is kept, but 165 * we'll ignore that here). 166 * 167 * Dangling load reservations can be the result of taking a trap in the 168 * middle of an LR/SC sequence, but can also be the result of a taken 169 * forward branch around an SC -- which is how we implement CAS. As a 170 * result we need to clear reservations between the last CAS and the 171 * jump back to the new context. While it is unlikely the store 172 * completes, implementations are allowed to expand reservations to be 173 * arbitrarily large. 174 */ 175 REG_L a2, PT_EPC(sp) 176 REG_SC x0, a2, PT_EPC(sp) 177 178 csrw CSR_STATUS, a0 179 csrw CSR_EPC, a2 180 181 REG_L x1, PT_RA(sp) 182 REG_L x3, PT_GP(sp) 183 REG_L x4, PT_TP(sp) 184 REG_L x5, PT_T0(sp) 185 restore_from_x6_to_x31 186 187 REG_L x2, PT_SP(sp) 188 189#ifdef CONFIG_RISCV_M_MODE 190 mret 191#else 192 sret 193#endif 194SYM_CODE_END(ret_from_exception) 195ASM_NOKPROBE(ret_from_exception) 196 197#ifdef CONFIG_VMAP_STACK 198SYM_CODE_START_LOCAL(handle_kernel_stack_overflow) 199 /* we reach here from kernel context, sscratch must be 0 */ 200 csrrw x31, CSR_SCRATCH, x31 201 asm_per_cpu sp, overflow_stack, x31 202 li x31, OVERFLOW_STACK_SIZE 203 add sp, sp, x31 204 /* zero out x31 again and restore x31 */ 205 xor x31, x31, x31 206 csrrw x31, CSR_SCRATCH, x31 207 208 addi sp, sp, -(PT_SIZE_ON_STACK) 209 210 //save context to overflow stack 211 REG_S x1, PT_RA(sp) 212 REG_S x3, PT_GP(sp) 213 REG_S x5, PT_T0(sp) 214 save_from_x6_to_x31 215 216 REG_L s0, TASK_TI_KERNEL_SP(tp) 217 csrr s1, CSR_STATUS 218 csrr s2, CSR_EPC 219 csrr s3, CSR_TVAL 220 csrr s4, CSR_CAUSE 221 csrr s5, CSR_SCRATCH 222 REG_S s0, PT_SP(sp) 223 REG_S s1, PT_STATUS(sp) 224 REG_S s2, PT_EPC(sp) 225 REG_S s3, PT_BADADDR(sp) 226 REG_S s4, PT_CAUSE(sp) 227 REG_S s5, PT_TP(sp) 228 move a0, sp 229 tail handle_bad_stack 230SYM_CODE_END(handle_kernel_stack_overflow) 231ASM_NOKPROBE(handle_kernel_stack_overflow) 232#endif 233 234SYM_CODE_START(ret_from_fork) 235 call schedule_tail 236 beqz s0, 1f /* not from kernel thread */ 237 /* Call fn(arg) */ 238 move a0, s1 239 jalr s0 2401: 241 move a0, sp /* pt_regs */ 242 la ra, ret_from_exception 243 tail syscall_exit_to_user_mode 244SYM_CODE_END(ret_from_fork) 245 246#ifdef CONFIG_IRQ_STACKS 247/* 248 * void call_on_irq_stack(struct pt_regs *regs, 249 * void (*func)(struct pt_regs *)); 250 * 251 * Calls func(regs) using the per-CPU IRQ stack. 252 */ 253SYM_FUNC_START(call_on_irq_stack) 254 /* Create a frame record to save ra and s0 (fp) */ 255 addi sp, sp, -STACKFRAME_SIZE_ON_STACK 256 REG_S ra, STACKFRAME_RA(sp) 257 REG_S s0, STACKFRAME_FP(sp) 258 addi s0, sp, STACKFRAME_SIZE_ON_STACK 259 260 /* Switch to the per-CPU shadow call stack */ 261 scs_save_current 262 scs_load_irq_stack t0 263 264 /* Switch to the per-CPU IRQ stack and call the handler */ 265 load_per_cpu t0, irq_stack_ptr, t1 266 li t1, IRQ_STACK_SIZE 267 add sp, t0, t1 268 jalr a1 269 270 /* Switch back to the thread shadow call stack */ 271 scs_load_current 272 273 /* Switch back to the thread stack and restore ra and s0 */ 274 addi sp, s0, -STACKFRAME_SIZE_ON_STACK 275 REG_L ra, STACKFRAME_RA(sp) 276 REG_L s0, STACKFRAME_FP(sp) 277 addi sp, sp, STACKFRAME_SIZE_ON_STACK 278 279 ret 280SYM_FUNC_END(call_on_irq_stack) 281#endif /* CONFIG_IRQ_STACKS */ 282 283/* 284 * Integer register context switch 285 * The callee-saved registers must be saved and restored. 286 * 287 * a0: previous task_struct (must be preserved across the switch) 288 * a1: next task_struct 289 * 290 * The value of a0 and a1 must be preserved by this function, as that's how 291 * arguments are passed to schedule_tail. 292 */ 293SYM_FUNC_START(__switch_to) 294 /* Save context into prev->thread */ 295 li a4, TASK_THREAD_RA 296 add a3, a0, a4 297 add a4, a1, a4 298 REG_S ra, TASK_THREAD_RA_RA(a3) 299 REG_S sp, TASK_THREAD_SP_RA(a3) 300 REG_S s0, TASK_THREAD_S0_RA(a3) 301 REG_S s1, TASK_THREAD_S1_RA(a3) 302 REG_S s2, TASK_THREAD_S2_RA(a3) 303 REG_S s3, TASK_THREAD_S3_RA(a3) 304 REG_S s4, TASK_THREAD_S4_RA(a3) 305 REG_S s5, TASK_THREAD_S5_RA(a3) 306 REG_S s6, TASK_THREAD_S6_RA(a3) 307 REG_S s7, TASK_THREAD_S7_RA(a3) 308 REG_S s8, TASK_THREAD_S8_RA(a3) 309 REG_S s9, TASK_THREAD_S9_RA(a3) 310 REG_S s10, TASK_THREAD_S10_RA(a3) 311 REG_S s11, TASK_THREAD_S11_RA(a3) 312 /* Save the kernel shadow call stack pointer */ 313 scs_save_current 314 /* Restore context from next->thread */ 315 REG_L ra, TASK_THREAD_RA_RA(a4) 316 REG_L sp, TASK_THREAD_SP_RA(a4) 317 REG_L s0, TASK_THREAD_S0_RA(a4) 318 REG_L s1, TASK_THREAD_S1_RA(a4) 319 REG_L s2, TASK_THREAD_S2_RA(a4) 320 REG_L s3, TASK_THREAD_S3_RA(a4) 321 REG_L s4, TASK_THREAD_S4_RA(a4) 322 REG_L s5, TASK_THREAD_S5_RA(a4) 323 REG_L s6, TASK_THREAD_S6_RA(a4) 324 REG_L s7, TASK_THREAD_S7_RA(a4) 325 REG_L s8, TASK_THREAD_S8_RA(a4) 326 REG_L s9, TASK_THREAD_S9_RA(a4) 327 REG_L s10, TASK_THREAD_S10_RA(a4) 328 REG_L s11, TASK_THREAD_S11_RA(a4) 329 /* The offset of thread_info in task_struct is zero. */ 330 move tp, a1 331 /* Switch to the next shadow call stack */ 332 scs_load_current 333 ret 334SYM_FUNC_END(__switch_to) 335 336#ifndef CONFIG_MMU 337#define do_page_fault do_trap_unknown 338#endif 339 340 .section ".rodata" 341 .align LGREG 342 /* Exception vector table */ 343SYM_DATA_START_LOCAL(excp_vect_table) 344 RISCV_PTR do_trap_insn_misaligned 345 ALT_INSN_FAULT(RISCV_PTR do_trap_insn_fault) 346 RISCV_PTR do_trap_insn_illegal 347 RISCV_PTR do_trap_break 348 RISCV_PTR do_trap_load_misaligned 349 RISCV_PTR do_trap_load_fault 350 RISCV_PTR do_trap_store_misaligned 351 RISCV_PTR do_trap_store_fault 352 RISCV_PTR do_trap_ecall_u /* system call */ 353 RISCV_PTR do_trap_ecall_s 354 RISCV_PTR do_trap_unknown 355 RISCV_PTR do_trap_ecall_m 356 /* instruciton page fault */ 357 ALT_PAGE_FAULT(RISCV_PTR do_page_fault) 358 RISCV_PTR do_page_fault /* load page fault */ 359 RISCV_PTR do_trap_unknown 360 RISCV_PTR do_page_fault /* store page fault */ 361SYM_DATA_END_LABEL(excp_vect_table, SYM_L_LOCAL, excp_vect_table_end) 362 363#ifndef CONFIG_MMU 364SYM_DATA_START(__user_rt_sigreturn) 365 li a7, __NR_rt_sigreturn 366 ecall 367SYM_DATA_END(__user_rt_sigreturn) 368#endif 369