1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2012 Regents of the University of California 4 * Copyright (C) 2017 SiFive 5 */ 6 7#include <linux/init.h> 8#include <linux/linkage.h> 9 10#include <asm/asm.h> 11#include <asm/csr.h> 12#include <asm/unistd.h> 13#include <asm/thread_info.h> 14#include <asm/asm-offsets.h> 15#include <asm/errata_list.h> 16 17SYM_CODE_START(handle_exception) 18 /* 19 * If coming from userspace, preserve the user thread pointer and load 20 * the kernel thread pointer. If we came from the kernel, the scratch 21 * register will contain 0, and we should continue on the current TP. 22 */ 23 csrrw tp, CSR_SCRATCH, tp 24 bnez tp, _save_context 25 26_restore_kernel_tpsp: 27 csrr tp, CSR_SCRATCH 28 REG_S sp, TASK_TI_KERNEL_SP(tp) 29 30#ifdef CONFIG_VMAP_STACK 31 addi sp, sp, -(PT_SIZE_ON_STACK) 32 srli sp, sp, THREAD_SHIFT 33 andi sp, sp, 0x1 34 bnez sp, handle_kernel_stack_overflow 35 REG_L sp, TASK_TI_KERNEL_SP(tp) 36#endif 37 38_save_context: 39 REG_S sp, TASK_TI_USER_SP(tp) 40 REG_L sp, TASK_TI_KERNEL_SP(tp) 41 addi sp, sp, -(PT_SIZE_ON_STACK) 42 REG_S x1, PT_RA(sp) 43 REG_S x3, PT_GP(sp) 44 REG_S x5, PT_T0(sp) 45 save_from_x6_to_x31 46 47 /* 48 * Disable user-mode memory access as it should only be set in the 49 * actual user copy routines. 50 * 51 * Disable the FPU/Vector to detect illegal usage of floating point 52 * or vector in kernel space. 53 */ 54 li t0, SR_SUM | SR_FS_VS 55 56 REG_L s0, TASK_TI_USER_SP(tp) 57 csrrc s1, CSR_STATUS, t0 58 csrr s2, CSR_EPC 59 csrr s3, CSR_TVAL 60 csrr s4, CSR_CAUSE 61 csrr s5, CSR_SCRATCH 62 REG_S s0, PT_SP(sp) 63 REG_S s1, PT_STATUS(sp) 64 REG_S s2, PT_EPC(sp) 65 REG_S s3, PT_BADADDR(sp) 66 REG_S s4, PT_CAUSE(sp) 67 REG_S s5, PT_TP(sp) 68 69 /* 70 * Set the scratch register to 0, so that if a recursive exception 71 * occurs, the exception vector knows it came from the kernel 72 */ 73 csrw CSR_SCRATCH, x0 74 75 /* Load the global pointer */ 76.option push 77.option norelax 78 la gp, __global_pointer$ 79.option pop 80 move a0, sp /* pt_regs */ 81 la ra, ret_from_exception 82 83 /* 84 * MSB of cause differentiates between 85 * interrupts and exceptions 86 */ 87 bge s4, zero, 1f 88 89 /* Handle interrupts */ 90 tail do_irq 911: 92 /* Handle other exceptions */ 93 slli t0, s4, RISCV_LGPTR 94 la t1, excp_vect_table 95 la t2, excp_vect_table_end 96 add t0, t1, t0 97 /* Check if exception code lies within bounds */ 98 bgeu t0, t2, 1f 99 REG_L t0, 0(t0) 100 jr t0 1011: 102 tail do_trap_unknown 103SYM_CODE_END(handle_exception) 104 105/* 106 * The ret_from_exception must be called with interrupt disabled. Here is the 107 * caller list: 108 * - handle_exception 109 * - ret_from_fork 110 */ 111SYM_CODE_START_NOALIGN(ret_from_exception) 112 REG_L s0, PT_STATUS(sp) 113#ifdef CONFIG_RISCV_M_MODE 114 /* the MPP value is too large to be used as an immediate arg for addi */ 115 li t0, SR_MPP 116 and s0, s0, t0 117#else 118 andi s0, s0, SR_SPP 119#endif 120 bnez s0, 1f 121 122 /* Save unwound kernel stack pointer in thread_info */ 123 addi s0, sp, PT_SIZE_ON_STACK 124 REG_S s0, TASK_TI_KERNEL_SP(tp) 125 126 /* 127 * Save TP into the scratch register , so we can find the kernel data 128 * structures again. 129 */ 130 csrw CSR_SCRATCH, tp 1311: 132 REG_L a0, PT_STATUS(sp) 133 /* 134 * The current load reservation is effectively part of the processor's 135 * state, in the sense that load reservations cannot be shared between 136 * different hart contexts. We can't actually save and restore a load 137 * reservation, so instead here we clear any existing reservation -- 138 * it's always legal for implementations to clear load reservations at 139 * any point (as long as the forward progress guarantee is kept, but 140 * we'll ignore that here). 141 * 142 * Dangling load reservations can be the result of taking a trap in the 143 * middle of an LR/SC sequence, but can also be the result of a taken 144 * forward branch around an SC -- which is how we implement CAS. As a 145 * result we need to clear reservations between the last CAS and the 146 * jump back to the new context. While it is unlikely the store 147 * completes, implementations are allowed to expand reservations to be 148 * arbitrarily large. 149 */ 150 REG_L a2, PT_EPC(sp) 151 REG_SC x0, a2, PT_EPC(sp) 152 153 csrw CSR_STATUS, a0 154 csrw CSR_EPC, a2 155 156 REG_L x1, PT_RA(sp) 157 REG_L x3, PT_GP(sp) 158 REG_L x4, PT_TP(sp) 159 REG_L x5, PT_T0(sp) 160 restore_from_x6_to_x31 161 162 REG_L x2, PT_SP(sp) 163 164#ifdef CONFIG_RISCV_M_MODE 165 mret 166#else 167 sret 168#endif 169SYM_CODE_END(ret_from_exception) 170 171#ifdef CONFIG_VMAP_STACK 172SYM_CODE_START_LOCAL(handle_kernel_stack_overflow) 173 /* 174 * Takes the psuedo-spinlock for the shadow stack, in case multiple 175 * harts are concurrently overflowing their kernel stacks. We could 176 * store any value here, but since we're overflowing the kernel stack 177 * already we only have SP to use as a scratch register. So we just 178 * swap in the address of the spinlock, as that's definately non-zero. 179 * 180 * Pairs with a store_release in handle_bad_stack(). 181 */ 1821: la sp, spin_shadow_stack 183 REG_AMOSWAP_AQ sp, sp, (sp) 184 bnez sp, 1b 185 186 la sp, shadow_stack 187 addi sp, sp, SHADOW_OVERFLOW_STACK_SIZE 188 189 //save caller register to shadow stack 190 addi sp, sp, -(PT_SIZE_ON_STACK) 191 REG_S x1, PT_RA(sp) 192 REG_S x5, PT_T0(sp) 193 REG_S x6, PT_T1(sp) 194 REG_S x7, PT_T2(sp) 195 REG_S x10, PT_A0(sp) 196 REG_S x11, PT_A1(sp) 197 REG_S x12, PT_A2(sp) 198 REG_S x13, PT_A3(sp) 199 REG_S x14, PT_A4(sp) 200 REG_S x15, PT_A5(sp) 201 REG_S x16, PT_A6(sp) 202 REG_S x17, PT_A7(sp) 203 REG_S x28, PT_T3(sp) 204 REG_S x29, PT_T4(sp) 205 REG_S x30, PT_T5(sp) 206 REG_S x31, PT_T6(sp) 207 208 la ra, restore_caller_reg 209 tail get_overflow_stack 210 211restore_caller_reg: 212 //save per-cpu overflow stack 213 REG_S a0, -8(sp) 214 //restore caller register from shadow_stack 215 REG_L x1, PT_RA(sp) 216 REG_L x5, PT_T0(sp) 217 REG_L x6, PT_T1(sp) 218 REG_L x7, PT_T2(sp) 219 REG_L x10, PT_A0(sp) 220 REG_L x11, PT_A1(sp) 221 REG_L x12, PT_A2(sp) 222 REG_L x13, PT_A3(sp) 223 REG_L x14, PT_A4(sp) 224 REG_L x15, PT_A5(sp) 225 REG_L x16, PT_A6(sp) 226 REG_L x17, PT_A7(sp) 227 REG_L x28, PT_T3(sp) 228 REG_L x29, PT_T4(sp) 229 REG_L x30, PT_T5(sp) 230 REG_L x31, PT_T6(sp) 231 232 //load per-cpu overflow stack 233 REG_L sp, -8(sp) 234 addi sp, sp, -(PT_SIZE_ON_STACK) 235 236 //save context to overflow stack 237 REG_S x1, PT_RA(sp) 238 REG_S x3, PT_GP(sp) 239 REG_S x5, PT_T0(sp) 240 save_from_x6_to_x31 241 242 REG_L s0, TASK_TI_KERNEL_SP(tp) 243 csrr s1, CSR_STATUS 244 csrr s2, CSR_EPC 245 csrr s3, CSR_TVAL 246 csrr s4, CSR_CAUSE 247 csrr s5, CSR_SCRATCH 248 REG_S s0, PT_SP(sp) 249 REG_S s1, PT_STATUS(sp) 250 REG_S s2, PT_EPC(sp) 251 REG_S s3, PT_BADADDR(sp) 252 REG_S s4, PT_CAUSE(sp) 253 REG_S s5, PT_TP(sp) 254 move a0, sp 255 tail handle_bad_stack 256SYM_CODE_END(handle_kernel_stack_overflow) 257#endif 258 259SYM_CODE_START(ret_from_fork) 260 call schedule_tail 261 beqz s0, 1f /* not from kernel thread */ 262 /* Call fn(arg) */ 263 move a0, s1 264 jalr s0 2651: 266 move a0, sp /* pt_regs */ 267 la ra, ret_from_exception 268 tail syscall_exit_to_user_mode 269SYM_CODE_END(ret_from_fork) 270 271/* 272 * Integer register context switch 273 * The callee-saved registers must be saved and restored. 274 * 275 * a0: previous task_struct (must be preserved across the switch) 276 * a1: next task_struct 277 * 278 * The value of a0 and a1 must be preserved by this function, as that's how 279 * arguments are passed to schedule_tail. 280 */ 281SYM_FUNC_START(__switch_to) 282 /* Save context into prev->thread */ 283 li a4, TASK_THREAD_RA 284 add a3, a0, a4 285 add a4, a1, a4 286 REG_S ra, TASK_THREAD_RA_RA(a3) 287 REG_S sp, TASK_THREAD_SP_RA(a3) 288 REG_S s0, TASK_THREAD_S0_RA(a3) 289 REG_S s1, TASK_THREAD_S1_RA(a3) 290 REG_S s2, TASK_THREAD_S2_RA(a3) 291 REG_S s3, TASK_THREAD_S3_RA(a3) 292 REG_S s4, TASK_THREAD_S4_RA(a3) 293 REG_S s5, TASK_THREAD_S5_RA(a3) 294 REG_S s6, TASK_THREAD_S6_RA(a3) 295 REG_S s7, TASK_THREAD_S7_RA(a3) 296 REG_S s8, TASK_THREAD_S8_RA(a3) 297 REG_S s9, TASK_THREAD_S9_RA(a3) 298 REG_S s10, TASK_THREAD_S10_RA(a3) 299 REG_S s11, TASK_THREAD_S11_RA(a3) 300 /* Restore context from next->thread */ 301 REG_L ra, TASK_THREAD_RA_RA(a4) 302 REG_L sp, TASK_THREAD_SP_RA(a4) 303 REG_L s0, TASK_THREAD_S0_RA(a4) 304 REG_L s1, TASK_THREAD_S1_RA(a4) 305 REG_L s2, TASK_THREAD_S2_RA(a4) 306 REG_L s3, TASK_THREAD_S3_RA(a4) 307 REG_L s4, TASK_THREAD_S4_RA(a4) 308 REG_L s5, TASK_THREAD_S5_RA(a4) 309 REG_L s6, TASK_THREAD_S6_RA(a4) 310 REG_L s7, TASK_THREAD_S7_RA(a4) 311 REG_L s8, TASK_THREAD_S8_RA(a4) 312 REG_L s9, TASK_THREAD_S9_RA(a4) 313 REG_L s10, TASK_THREAD_S10_RA(a4) 314 REG_L s11, TASK_THREAD_S11_RA(a4) 315 /* The offset of thread_info in task_struct is zero. */ 316 move tp, a1 317 ret 318SYM_FUNC_END(__switch_to) 319 320#ifndef CONFIG_MMU 321#define do_page_fault do_trap_unknown 322#endif 323 324 .section ".rodata" 325 .align LGREG 326 /* Exception vector table */ 327SYM_CODE_START(excp_vect_table) 328 RISCV_PTR do_trap_insn_misaligned 329 ALT_INSN_FAULT(RISCV_PTR do_trap_insn_fault) 330 RISCV_PTR do_trap_insn_illegal 331 RISCV_PTR do_trap_break 332 RISCV_PTR do_trap_load_misaligned 333 RISCV_PTR do_trap_load_fault 334 RISCV_PTR do_trap_store_misaligned 335 RISCV_PTR do_trap_store_fault 336 RISCV_PTR do_trap_ecall_u /* system call */ 337 RISCV_PTR do_trap_ecall_s 338 RISCV_PTR do_trap_unknown 339 RISCV_PTR do_trap_ecall_m 340 /* instruciton page fault */ 341 ALT_PAGE_FAULT(RISCV_PTR do_page_fault) 342 RISCV_PTR do_page_fault /* load page fault */ 343 RISCV_PTR do_trap_unknown 344 RISCV_PTR do_page_fault /* store page fault */ 345excp_vect_table_end: 346SYM_CODE_END(excp_vect_table) 347 348#ifndef CONFIG_MMU 349SYM_CODE_START(__user_rt_sigreturn) 350 li a7, __NR_rt_sigreturn 351 ecall 352SYM_CODE_END(__user_rt_sigreturn) 353#endif 354