1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2012 Regents of the University of California 4 * Copyright (C) 2017 SiFive 5 */ 6 7#include <linux/init.h> 8#include <linux/linkage.h> 9 10#include <asm/asm.h> 11#include <asm/csr.h> 12#include <asm/unistd.h> 13#include <asm/thread_info.h> 14#include <asm/asm-offsets.h> 15 16#if !IS_ENABLED(CONFIG_PREEMPTION) 17.set resume_kernel, restore_all 18#endif 19 20ENTRY(handle_exception) 21 /* 22 * If coming from userspace, preserve the user thread pointer and load 23 * the kernel thread pointer. If we came from the kernel, the scratch 24 * register will contain 0, and we should continue on the current TP. 25 */ 26 csrrw tp, CSR_SCRATCH, tp 27 bnez tp, _save_context 28 29_restore_kernel_tpsp: 30 csrr tp, CSR_SCRATCH 31 REG_S sp, TASK_TI_KERNEL_SP(tp) 32_save_context: 33 REG_S sp, TASK_TI_USER_SP(tp) 34 REG_L sp, TASK_TI_KERNEL_SP(tp) 35 addi sp, sp, -(PT_SIZE_ON_STACK) 36 REG_S x1, PT_RA(sp) 37 REG_S x3, PT_GP(sp) 38 REG_S x5, PT_T0(sp) 39 REG_S x6, PT_T1(sp) 40 REG_S x7, PT_T2(sp) 41 REG_S x8, PT_S0(sp) 42 REG_S x9, PT_S1(sp) 43 REG_S x10, PT_A0(sp) 44 REG_S x11, PT_A1(sp) 45 REG_S x12, PT_A2(sp) 46 REG_S x13, PT_A3(sp) 47 REG_S x14, PT_A4(sp) 48 REG_S x15, PT_A5(sp) 49 REG_S x16, PT_A6(sp) 50 REG_S x17, PT_A7(sp) 51 REG_S x18, PT_S2(sp) 52 REG_S x19, PT_S3(sp) 53 REG_S x20, PT_S4(sp) 54 REG_S x21, PT_S5(sp) 55 REG_S x22, PT_S6(sp) 56 REG_S x23, PT_S7(sp) 57 REG_S x24, PT_S8(sp) 58 REG_S x25, PT_S9(sp) 59 REG_S x26, PT_S10(sp) 60 REG_S x27, PT_S11(sp) 61 REG_S x28, PT_T3(sp) 62 REG_S x29, PT_T4(sp) 63 REG_S x30, PT_T5(sp) 64 REG_S x31, PT_T6(sp) 65 66 /* 67 * Disable user-mode memory access as it should only be set in the 68 * actual user copy routines. 69 * 70 * Disable the FPU to detect illegal usage of floating point in kernel 71 * space. 72 */ 73 li t0, SR_SUM | SR_FS 74 75 REG_L s0, TASK_TI_USER_SP(tp) 76 csrrc s1, CSR_STATUS, t0 77 csrr s2, CSR_EPC 78 csrr s3, CSR_TVAL 79 csrr s4, CSR_CAUSE 80 csrr s5, CSR_SCRATCH 81 REG_S s0, PT_SP(sp) 82 REG_S s1, PT_STATUS(sp) 83 REG_S s2, PT_EPC(sp) 84 REG_S s3, PT_BADADDR(sp) 85 REG_S s4, PT_CAUSE(sp) 86 REG_S s5, PT_TP(sp) 87 88 /* 89 * Set the scratch register to 0, so that if a recursive exception 90 * occurs, the exception vector knows it came from the kernel 91 */ 92 csrw CSR_SCRATCH, x0 93 94 /* Load the global pointer */ 95.option push 96.option norelax 97 la gp, __global_pointer$ 98.option pop 99 100 la ra, ret_from_exception 101 /* 102 * MSB of cause differentiates between 103 * interrupts and exceptions 104 */ 105 bge s4, zero, 1f 106 107 /* Handle interrupts */ 108 move a0, sp /* pt_regs */ 109 tail do_IRQ 1101: 111 /* 112 * Exceptions run with interrupts enabled or disabled depending on the 113 * state of SR_PIE in m/sstatus. 114 */ 115 andi t0, s1, SR_PIE 116 beqz t0, 1f 117 csrs CSR_STATUS, SR_IE 118 1191: 120 /* Handle syscalls */ 121 li t0, EXC_SYSCALL 122 beq s4, t0, handle_syscall 123 124 /* Handle other exceptions */ 125 slli t0, s4, RISCV_LGPTR 126 la t1, excp_vect_table 127 la t2, excp_vect_table_end 128 move a0, sp /* pt_regs */ 129 add t0, t1, t0 130 /* Check if exception code lies within bounds */ 131 bgeu t0, t2, 1f 132 REG_L t0, 0(t0) 133 jr t0 1341: 135 tail do_trap_unknown 136 137handle_syscall: 138 /* save the initial A0 value (needed in signal handlers) */ 139 REG_S a0, PT_ORIG_A0(sp) 140 /* 141 * Advance SEPC to avoid executing the original 142 * scall instruction on sret 143 */ 144 addi s2, s2, 0x4 145 REG_S s2, PT_EPC(sp) 146 /* Trace syscalls, but only if requested by the user. */ 147 REG_L t0, TASK_TI_FLAGS(tp) 148 andi t0, t0, _TIF_SYSCALL_WORK 149 bnez t0, handle_syscall_trace_enter 150check_syscall_nr: 151 /* Check to make sure we don't jump to a bogus syscall number. */ 152 li t0, __NR_syscalls 153 la s0, sys_ni_syscall 154 /* 155 * Syscall number held in a7. 156 * If syscall number is above allowed value, redirect to ni_syscall. 157 */ 158 bge a7, t0, 1f 159 /* 160 * Check if syscall is rejected by tracer, i.e., a7 == -1. 161 * If yes, we pretend it was executed. 162 */ 163 li t1, -1 164 beq a7, t1, ret_from_syscall_rejected 165 blt a7, t1, 1f 166 /* Call syscall */ 167 la s0, sys_call_table 168 slli t0, a7, RISCV_LGPTR 169 add s0, s0, t0 170 REG_L s0, 0(s0) 1711: 172 jalr s0 173 174ret_from_syscall: 175 /* Set user a0 to kernel a0 */ 176 REG_S a0, PT_A0(sp) 177 /* 178 * We didn't execute the actual syscall. 179 * Seccomp already set return value for the current task pt_regs. 180 * (If it was configured with SECCOMP_RET_ERRNO/TRACE) 181 */ 182ret_from_syscall_rejected: 183 /* Trace syscalls, but only if requested by the user. */ 184 REG_L t0, TASK_TI_FLAGS(tp) 185 andi t0, t0, _TIF_SYSCALL_WORK 186 bnez t0, handle_syscall_trace_exit 187 188ret_from_exception: 189 REG_L s0, PT_STATUS(sp) 190 csrc CSR_STATUS, SR_IE 191#ifdef CONFIG_RISCV_M_MODE 192 /* the MPP value is too large to be used as an immediate arg for addi */ 193 li t0, SR_MPP 194 and s0, s0, t0 195#else 196 andi s0, s0, SR_SPP 197#endif 198 bnez s0, resume_kernel 199 200resume_userspace: 201 /* Interrupts must be disabled here so flags are checked atomically */ 202 REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */ 203 andi s1, s0, _TIF_WORK_MASK 204 bnez s1, work_pending 205 206 /* Save unwound kernel stack pointer in thread_info */ 207 addi s0, sp, PT_SIZE_ON_STACK 208 REG_S s0, TASK_TI_KERNEL_SP(tp) 209 210 /* 211 * Save TP into the scratch register , so we can find the kernel data 212 * structures again. 213 */ 214 csrw CSR_SCRATCH, tp 215 216restore_all: 217 REG_L a0, PT_STATUS(sp) 218 /* 219 * The current load reservation is effectively part of the processor's 220 * state, in the sense that load reservations cannot be shared between 221 * different hart contexts. We can't actually save and restore a load 222 * reservation, so instead here we clear any existing reservation -- 223 * it's always legal for implementations to clear load reservations at 224 * any point (as long as the forward progress guarantee is kept, but 225 * we'll ignore that here). 226 * 227 * Dangling load reservations can be the result of taking a trap in the 228 * middle of an LR/SC sequence, but can also be the result of a taken 229 * forward branch around an SC -- which is how we implement CAS. As a 230 * result we need to clear reservations between the last CAS and the 231 * jump back to the new context. While it is unlikely the store 232 * completes, implementations are allowed to expand reservations to be 233 * arbitrarily large. 234 */ 235 REG_L a2, PT_EPC(sp) 236 REG_SC x0, a2, PT_EPC(sp) 237 238 csrw CSR_STATUS, a0 239 csrw CSR_EPC, a2 240 241 REG_L x1, PT_RA(sp) 242 REG_L x3, PT_GP(sp) 243 REG_L x4, PT_TP(sp) 244 REG_L x5, PT_T0(sp) 245 REG_L x6, PT_T1(sp) 246 REG_L x7, PT_T2(sp) 247 REG_L x8, PT_S0(sp) 248 REG_L x9, PT_S1(sp) 249 REG_L x10, PT_A0(sp) 250 REG_L x11, PT_A1(sp) 251 REG_L x12, PT_A2(sp) 252 REG_L x13, PT_A3(sp) 253 REG_L x14, PT_A4(sp) 254 REG_L x15, PT_A5(sp) 255 REG_L x16, PT_A6(sp) 256 REG_L x17, PT_A7(sp) 257 REG_L x18, PT_S2(sp) 258 REG_L x19, PT_S3(sp) 259 REG_L x20, PT_S4(sp) 260 REG_L x21, PT_S5(sp) 261 REG_L x22, PT_S6(sp) 262 REG_L x23, PT_S7(sp) 263 REG_L x24, PT_S8(sp) 264 REG_L x25, PT_S9(sp) 265 REG_L x26, PT_S10(sp) 266 REG_L x27, PT_S11(sp) 267 REG_L x28, PT_T3(sp) 268 REG_L x29, PT_T4(sp) 269 REG_L x30, PT_T5(sp) 270 REG_L x31, PT_T6(sp) 271 272 REG_L x2, PT_SP(sp) 273 274#ifdef CONFIG_RISCV_M_MODE 275 mret 276#else 277 sret 278#endif 279 280#if IS_ENABLED(CONFIG_PREEMPTION) 281resume_kernel: 282 REG_L s0, TASK_TI_PREEMPT_COUNT(tp) 283 bnez s0, restore_all 284 REG_L s0, TASK_TI_FLAGS(tp) 285 andi s0, s0, _TIF_NEED_RESCHED 286 beqz s0, restore_all 287 call preempt_schedule_irq 288 j restore_all 289#endif 290 291work_pending: 292 /* Enter slow path for supplementary processing */ 293 la ra, ret_from_exception 294 andi s1, s0, _TIF_NEED_RESCHED 295 bnez s1, work_resched 296work_notifysig: 297 /* Handle pending signals and notify-resume requests */ 298 csrs CSR_STATUS, SR_IE /* Enable interrupts for do_notify_resume() */ 299 move a0, sp /* pt_regs */ 300 move a1, s0 /* current_thread_info->flags */ 301 tail do_notify_resume 302work_resched: 303 tail schedule 304 305/* Slow paths for ptrace. */ 306handle_syscall_trace_enter: 307 move a0, sp 308 call do_syscall_trace_enter 309 move t0, a0 310 REG_L a0, PT_A0(sp) 311 REG_L a1, PT_A1(sp) 312 REG_L a2, PT_A2(sp) 313 REG_L a3, PT_A3(sp) 314 REG_L a4, PT_A4(sp) 315 REG_L a5, PT_A5(sp) 316 REG_L a6, PT_A6(sp) 317 REG_L a7, PT_A7(sp) 318 bnez t0, ret_from_syscall_rejected 319 j check_syscall_nr 320handle_syscall_trace_exit: 321 move a0, sp 322 call do_syscall_trace_exit 323 j ret_from_exception 324 325END(handle_exception) 326 327ENTRY(ret_from_fork) 328 la ra, ret_from_exception 329 tail schedule_tail 330ENDPROC(ret_from_fork) 331 332ENTRY(ret_from_kernel_thread) 333 call schedule_tail 334 /* Call fn(arg) */ 335 la ra, ret_from_exception 336 move a0, s1 337 jr s0 338ENDPROC(ret_from_kernel_thread) 339 340 341/* 342 * Integer register context switch 343 * The callee-saved registers must be saved and restored. 344 * 345 * a0: previous task_struct (must be preserved across the switch) 346 * a1: next task_struct 347 * 348 * The value of a0 and a1 must be preserved by this function, as that's how 349 * arguments are passed to schedule_tail. 350 */ 351ENTRY(__switch_to) 352 /* Save context into prev->thread */ 353 li a4, TASK_THREAD_RA 354 add a3, a0, a4 355 add a4, a1, a4 356 REG_S ra, TASK_THREAD_RA_RA(a3) 357 REG_S sp, TASK_THREAD_SP_RA(a3) 358 REG_S s0, TASK_THREAD_S0_RA(a3) 359 REG_S s1, TASK_THREAD_S1_RA(a3) 360 REG_S s2, TASK_THREAD_S2_RA(a3) 361 REG_S s3, TASK_THREAD_S3_RA(a3) 362 REG_S s4, TASK_THREAD_S4_RA(a3) 363 REG_S s5, TASK_THREAD_S5_RA(a3) 364 REG_S s6, TASK_THREAD_S6_RA(a3) 365 REG_S s7, TASK_THREAD_S7_RA(a3) 366 REG_S s8, TASK_THREAD_S8_RA(a3) 367 REG_S s9, TASK_THREAD_S9_RA(a3) 368 REG_S s10, TASK_THREAD_S10_RA(a3) 369 REG_S s11, TASK_THREAD_S11_RA(a3) 370 /* Restore context from next->thread */ 371 REG_L ra, TASK_THREAD_RA_RA(a4) 372 REG_L sp, TASK_THREAD_SP_RA(a4) 373 REG_L s0, TASK_THREAD_S0_RA(a4) 374 REG_L s1, TASK_THREAD_S1_RA(a4) 375 REG_L s2, TASK_THREAD_S2_RA(a4) 376 REG_L s3, TASK_THREAD_S3_RA(a4) 377 REG_L s4, TASK_THREAD_S4_RA(a4) 378 REG_L s5, TASK_THREAD_S5_RA(a4) 379 REG_L s6, TASK_THREAD_S6_RA(a4) 380 REG_L s7, TASK_THREAD_S7_RA(a4) 381 REG_L s8, TASK_THREAD_S8_RA(a4) 382 REG_L s9, TASK_THREAD_S9_RA(a4) 383 REG_L s10, TASK_THREAD_S10_RA(a4) 384 REG_L s11, TASK_THREAD_S11_RA(a4) 385 /* Swap the CPU entry around. */ 386 lw a3, TASK_TI_CPU(a0) 387 lw a4, TASK_TI_CPU(a1) 388 sw a3, TASK_TI_CPU(a1) 389 sw a4, TASK_TI_CPU(a0) 390#if TASK_TI != 0 391#error "TASK_TI != 0: tp will contain a 'struct thread_info', not a 'struct task_struct' so get_current() won't work." 392 addi tp, a1, TASK_TI 393#else 394 move tp, a1 395#endif 396 ret 397ENDPROC(__switch_to) 398 399#ifndef CONFIG_MMU 400#define do_page_fault do_trap_unknown 401#endif 402 403 .section ".rodata" 404 /* Exception vector table */ 405ENTRY(excp_vect_table) 406 RISCV_PTR do_trap_insn_misaligned 407 RISCV_PTR do_trap_insn_fault 408 RISCV_PTR do_trap_insn_illegal 409 RISCV_PTR do_trap_break 410 RISCV_PTR do_trap_load_misaligned 411 RISCV_PTR do_trap_load_fault 412 RISCV_PTR do_trap_store_misaligned 413 RISCV_PTR do_trap_store_fault 414 RISCV_PTR do_trap_ecall_u /* system call, gets intercepted */ 415 RISCV_PTR do_trap_ecall_s 416 RISCV_PTR do_trap_unknown 417 RISCV_PTR do_trap_ecall_m 418 RISCV_PTR do_page_fault /* instruction page fault */ 419 RISCV_PTR do_page_fault /* load page fault */ 420 RISCV_PTR do_trap_unknown 421 RISCV_PTR do_page_fault /* store page fault */ 422excp_vect_table_end: 423END(excp_vect_table) 424 425#ifndef CONFIG_MMU 426ENTRY(__user_rt_sigreturn) 427 li a7, __NR_rt_sigreturn 428 scall 429END(__user_rt_sigreturn) 430#endif 431