1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2012 Regents of the University of California 4 * Copyright (C) 2017 SiFive 5 */ 6 7#include <linux/init.h> 8#include <linux/linkage.h> 9 10#include <asm/asm.h> 11#include <asm/csr.h> 12#include <asm/unistd.h> 13#include <asm/thread_info.h> 14#include <asm/asm-offsets.h> 15 16#if !IS_ENABLED(CONFIG_PREEMPTION) 17.set resume_kernel, restore_all 18#endif 19 20ENTRY(handle_exception) 21 /* 22 * If coming from userspace, preserve the user thread pointer and load 23 * the kernel thread pointer. If we came from the kernel, the scratch 24 * register will contain 0, and we should continue on the current TP. 25 */ 26 csrrw tp, CSR_SCRATCH, tp 27 bnez tp, _save_context 28 29_restore_kernel_tpsp: 30 csrr tp, CSR_SCRATCH 31 REG_S sp, TASK_TI_KERNEL_SP(tp) 32_save_context: 33 REG_S sp, TASK_TI_USER_SP(tp) 34 REG_L sp, TASK_TI_KERNEL_SP(tp) 35 addi sp, sp, -(PT_SIZE_ON_STACK) 36 REG_S x1, PT_RA(sp) 37 REG_S x3, PT_GP(sp) 38 REG_S x5, PT_T0(sp) 39 REG_S x6, PT_T1(sp) 40 REG_S x7, PT_T2(sp) 41 REG_S x8, PT_S0(sp) 42 REG_S x9, PT_S1(sp) 43 REG_S x10, PT_A0(sp) 44 REG_S x11, PT_A1(sp) 45 REG_S x12, PT_A2(sp) 46 REG_S x13, PT_A3(sp) 47 REG_S x14, PT_A4(sp) 48 REG_S x15, PT_A5(sp) 49 REG_S x16, PT_A6(sp) 50 REG_S x17, PT_A7(sp) 51 REG_S x18, PT_S2(sp) 52 REG_S x19, PT_S3(sp) 53 REG_S x20, PT_S4(sp) 54 REG_S x21, PT_S5(sp) 55 REG_S x22, PT_S6(sp) 56 REG_S x23, PT_S7(sp) 57 REG_S x24, PT_S8(sp) 58 REG_S x25, PT_S9(sp) 59 REG_S x26, PT_S10(sp) 60 REG_S x27, PT_S11(sp) 61 REG_S x28, PT_T3(sp) 62 REG_S x29, PT_T4(sp) 63 REG_S x30, PT_T5(sp) 64 REG_S x31, PT_T6(sp) 65 66 /* 67 * Disable user-mode memory access as it should only be set in the 68 * actual user copy routines. 69 * 70 * Disable the FPU to detect illegal usage of floating point in kernel 71 * space. 72 */ 73 li t0, SR_SUM | SR_FS 74 75 REG_L s0, TASK_TI_USER_SP(tp) 76 csrrc s1, CSR_STATUS, t0 77 csrr s2, CSR_EPC 78 csrr s3, CSR_TVAL 79 csrr s4, CSR_CAUSE 80 csrr s5, CSR_SCRATCH 81 REG_S s0, PT_SP(sp) 82 REG_S s1, PT_STATUS(sp) 83 REG_S s2, PT_EPC(sp) 84 REG_S s3, PT_BADADDR(sp) 85 REG_S s4, PT_CAUSE(sp) 86 REG_S s5, PT_TP(sp) 87 88 /* 89 * Set the scratch register to 0, so that if a recursive exception 90 * occurs, the exception vector knows it came from the kernel 91 */ 92 csrw CSR_SCRATCH, x0 93 94 /* Load the global pointer */ 95.option push 96.option norelax 97 la gp, __global_pointer$ 98.option pop 99 100#ifdef CONFIG_TRACE_IRQFLAGS 101 call trace_hardirqs_off 102#endif 103 104#ifdef CONFIG_CONTEXT_TRACKING 105 /* If previous state is in user mode, call context_tracking_user_exit. */ 106 li a0, SR_PP 107 and a0, s1, a0 108 bnez a0, skip_context_tracking 109 call context_tracking_user_exit 110skip_context_tracking: 111#endif 112 113 /* 114 * MSB of cause differentiates between 115 * interrupts and exceptions 116 */ 117 bge s4, zero, 1f 118 119 la ra, ret_from_exception 120 121 /* Handle interrupts */ 122 move a0, sp /* pt_regs */ 123 la a1, handle_arch_irq 124 REG_L a1, (a1) 125 jr a1 1261: 127#ifdef CONFIG_TRACE_IRQFLAGS 128 call trace_hardirqs_on 129#endif 130 /* 131 * Exceptions run with interrupts enabled or disabled depending on the 132 * state of SR_PIE in m/sstatus. 133 */ 134 andi t0, s1, SR_PIE 135 beqz t0, 1f 136 csrs CSR_STATUS, SR_IE 137 1381: 139 la ra, ret_from_exception 140 /* Handle syscalls */ 141 li t0, EXC_SYSCALL 142 beq s4, t0, handle_syscall 143 144 /* Handle other exceptions */ 145 slli t0, s4, RISCV_LGPTR 146 la t1, excp_vect_table 147 la t2, excp_vect_table_end 148 move a0, sp /* pt_regs */ 149 add t0, t1, t0 150 /* Check if exception code lies within bounds */ 151 bgeu t0, t2, 1f 152 REG_L t0, 0(t0) 153 jr t0 1541: 155 tail do_trap_unknown 156 157handle_syscall: 158#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING) 159 /* Recover a0 - a7 for system calls */ 160 REG_L a0, PT_A0(sp) 161 REG_L a1, PT_A1(sp) 162 REG_L a2, PT_A2(sp) 163 REG_L a3, PT_A3(sp) 164 REG_L a4, PT_A4(sp) 165 REG_L a5, PT_A5(sp) 166 REG_L a6, PT_A6(sp) 167 REG_L a7, PT_A7(sp) 168#endif 169 /* save the initial A0 value (needed in signal handlers) */ 170 REG_S a0, PT_ORIG_A0(sp) 171 /* 172 * Advance SEPC to avoid executing the original 173 * scall instruction on sret 174 */ 175 addi s2, s2, 0x4 176 REG_S s2, PT_EPC(sp) 177 /* Trace syscalls, but only if requested by the user. */ 178 REG_L t0, TASK_TI_FLAGS(tp) 179 andi t0, t0, _TIF_SYSCALL_WORK 180 bnez t0, handle_syscall_trace_enter 181check_syscall_nr: 182 /* Check to make sure we don't jump to a bogus syscall number. */ 183 li t0, __NR_syscalls 184 la s0, sys_ni_syscall 185 /* 186 * Syscall number held in a7. 187 * If syscall number is above allowed value, redirect to ni_syscall. 188 */ 189 bge a7, t0, 1f 190 /* 191 * Check if syscall is rejected by tracer, i.e., a7 == -1. 192 * If yes, we pretend it was executed. 193 */ 194 li t1, -1 195 beq a7, t1, ret_from_syscall_rejected 196 blt a7, t1, 1f 197 /* Call syscall */ 198 la s0, sys_call_table 199 slli t0, a7, RISCV_LGPTR 200 add s0, s0, t0 201 REG_L s0, 0(s0) 2021: 203 jalr s0 204 205ret_from_syscall: 206 /* Set user a0 to kernel a0 */ 207 REG_S a0, PT_A0(sp) 208 /* 209 * We didn't execute the actual syscall. 210 * Seccomp already set return value for the current task pt_regs. 211 * (If it was configured with SECCOMP_RET_ERRNO/TRACE) 212 */ 213ret_from_syscall_rejected: 214 /* Trace syscalls, but only if requested by the user. */ 215 REG_L t0, TASK_TI_FLAGS(tp) 216 andi t0, t0, _TIF_SYSCALL_WORK 217 bnez t0, handle_syscall_trace_exit 218 219ret_from_exception: 220 REG_L s0, PT_STATUS(sp) 221 csrc CSR_STATUS, SR_IE 222#ifdef CONFIG_TRACE_IRQFLAGS 223 call trace_hardirqs_off 224#endif 225#ifdef CONFIG_RISCV_M_MODE 226 /* the MPP value is too large to be used as an immediate arg for addi */ 227 li t0, SR_MPP 228 and s0, s0, t0 229#else 230 andi s0, s0, SR_SPP 231#endif 232 bnez s0, resume_kernel 233 234resume_userspace: 235 /* Interrupts must be disabled here so flags are checked atomically */ 236 REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */ 237 andi s1, s0, _TIF_WORK_MASK 238 bnez s1, work_pending 239 240#ifdef CONFIG_CONTEXT_TRACKING 241 call context_tracking_user_enter 242#endif 243 244 /* Save unwound kernel stack pointer in thread_info */ 245 addi s0, sp, PT_SIZE_ON_STACK 246 REG_S s0, TASK_TI_KERNEL_SP(tp) 247 248 /* 249 * Save TP into the scratch register , so we can find the kernel data 250 * structures again. 251 */ 252 csrw CSR_SCRATCH, tp 253 254restore_all: 255#ifdef CONFIG_TRACE_IRQFLAGS 256 REG_L s1, PT_STATUS(sp) 257 andi t0, s1, SR_PIE 258 beqz t0, 1f 259 call trace_hardirqs_on 260 j 2f 2611: 262 call trace_hardirqs_off 2632: 264#endif 265 REG_L a0, PT_STATUS(sp) 266 /* 267 * The current load reservation is effectively part of the processor's 268 * state, in the sense that load reservations cannot be shared between 269 * different hart contexts. We can't actually save and restore a load 270 * reservation, so instead here we clear any existing reservation -- 271 * it's always legal for implementations to clear load reservations at 272 * any point (as long as the forward progress guarantee is kept, but 273 * we'll ignore that here). 274 * 275 * Dangling load reservations can be the result of taking a trap in the 276 * middle of an LR/SC sequence, but can also be the result of a taken 277 * forward branch around an SC -- which is how we implement CAS. As a 278 * result we need to clear reservations between the last CAS and the 279 * jump back to the new context. While it is unlikely the store 280 * completes, implementations are allowed to expand reservations to be 281 * arbitrarily large. 282 */ 283 REG_L a2, PT_EPC(sp) 284 REG_SC x0, a2, PT_EPC(sp) 285 286 csrw CSR_STATUS, a0 287 csrw CSR_EPC, a2 288 289 REG_L x1, PT_RA(sp) 290 REG_L x3, PT_GP(sp) 291 REG_L x4, PT_TP(sp) 292 REG_L x5, PT_T0(sp) 293 REG_L x6, PT_T1(sp) 294 REG_L x7, PT_T2(sp) 295 REG_L x8, PT_S0(sp) 296 REG_L x9, PT_S1(sp) 297 REG_L x10, PT_A0(sp) 298 REG_L x11, PT_A1(sp) 299 REG_L x12, PT_A2(sp) 300 REG_L x13, PT_A3(sp) 301 REG_L x14, PT_A4(sp) 302 REG_L x15, PT_A5(sp) 303 REG_L x16, PT_A6(sp) 304 REG_L x17, PT_A7(sp) 305 REG_L x18, PT_S2(sp) 306 REG_L x19, PT_S3(sp) 307 REG_L x20, PT_S4(sp) 308 REG_L x21, PT_S5(sp) 309 REG_L x22, PT_S6(sp) 310 REG_L x23, PT_S7(sp) 311 REG_L x24, PT_S8(sp) 312 REG_L x25, PT_S9(sp) 313 REG_L x26, PT_S10(sp) 314 REG_L x27, PT_S11(sp) 315 REG_L x28, PT_T3(sp) 316 REG_L x29, PT_T4(sp) 317 REG_L x30, PT_T5(sp) 318 REG_L x31, PT_T6(sp) 319 320 REG_L x2, PT_SP(sp) 321 322#ifdef CONFIG_RISCV_M_MODE 323 mret 324#else 325 sret 326#endif 327 328#if IS_ENABLED(CONFIG_PREEMPTION) 329resume_kernel: 330 REG_L s0, TASK_TI_PREEMPT_COUNT(tp) 331 bnez s0, restore_all 332 REG_L s0, TASK_TI_FLAGS(tp) 333 andi s0, s0, _TIF_NEED_RESCHED 334 beqz s0, restore_all 335 call preempt_schedule_irq 336 j restore_all 337#endif 338 339work_pending: 340 /* Enter slow path for supplementary processing */ 341 la ra, ret_from_exception 342 andi s1, s0, _TIF_NEED_RESCHED 343 bnez s1, work_resched 344work_notifysig: 345 /* Handle pending signals and notify-resume requests */ 346 csrs CSR_STATUS, SR_IE /* Enable interrupts for do_notify_resume() */ 347 move a0, sp /* pt_regs */ 348 move a1, s0 /* current_thread_info->flags */ 349 tail do_notify_resume 350work_resched: 351 tail schedule 352 353/* Slow paths for ptrace. */ 354handle_syscall_trace_enter: 355 move a0, sp 356 call do_syscall_trace_enter 357 move t0, a0 358 REG_L a0, PT_A0(sp) 359 REG_L a1, PT_A1(sp) 360 REG_L a2, PT_A2(sp) 361 REG_L a3, PT_A3(sp) 362 REG_L a4, PT_A4(sp) 363 REG_L a5, PT_A5(sp) 364 REG_L a6, PT_A6(sp) 365 REG_L a7, PT_A7(sp) 366 bnez t0, ret_from_syscall_rejected 367 j check_syscall_nr 368handle_syscall_trace_exit: 369 move a0, sp 370 call do_syscall_trace_exit 371 j ret_from_exception 372 373END(handle_exception) 374 375ENTRY(ret_from_fork) 376 la ra, ret_from_exception 377 tail schedule_tail 378ENDPROC(ret_from_fork) 379 380ENTRY(ret_from_kernel_thread) 381 call schedule_tail 382 /* Call fn(arg) */ 383 la ra, ret_from_exception 384 move a0, s1 385 jr s0 386ENDPROC(ret_from_kernel_thread) 387 388 389/* 390 * Integer register context switch 391 * The callee-saved registers must be saved and restored. 392 * 393 * a0: previous task_struct (must be preserved across the switch) 394 * a1: next task_struct 395 * 396 * The value of a0 and a1 must be preserved by this function, as that's how 397 * arguments are passed to schedule_tail. 398 */ 399ENTRY(__switch_to) 400 /* Save context into prev->thread */ 401 li a4, TASK_THREAD_RA 402 add a3, a0, a4 403 add a4, a1, a4 404 REG_S ra, TASK_THREAD_RA_RA(a3) 405 REG_S sp, TASK_THREAD_SP_RA(a3) 406 REG_S s0, TASK_THREAD_S0_RA(a3) 407 REG_S s1, TASK_THREAD_S1_RA(a3) 408 REG_S s2, TASK_THREAD_S2_RA(a3) 409 REG_S s3, TASK_THREAD_S3_RA(a3) 410 REG_S s4, TASK_THREAD_S4_RA(a3) 411 REG_S s5, TASK_THREAD_S5_RA(a3) 412 REG_S s6, TASK_THREAD_S6_RA(a3) 413 REG_S s7, TASK_THREAD_S7_RA(a3) 414 REG_S s8, TASK_THREAD_S8_RA(a3) 415 REG_S s9, TASK_THREAD_S9_RA(a3) 416 REG_S s10, TASK_THREAD_S10_RA(a3) 417 REG_S s11, TASK_THREAD_S11_RA(a3) 418 /* Restore context from next->thread */ 419 REG_L ra, TASK_THREAD_RA_RA(a4) 420 REG_L sp, TASK_THREAD_SP_RA(a4) 421 REG_L s0, TASK_THREAD_S0_RA(a4) 422 REG_L s1, TASK_THREAD_S1_RA(a4) 423 REG_L s2, TASK_THREAD_S2_RA(a4) 424 REG_L s3, TASK_THREAD_S3_RA(a4) 425 REG_L s4, TASK_THREAD_S4_RA(a4) 426 REG_L s5, TASK_THREAD_S5_RA(a4) 427 REG_L s6, TASK_THREAD_S6_RA(a4) 428 REG_L s7, TASK_THREAD_S7_RA(a4) 429 REG_L s8, TASK_THREAD_S8_RA(a4) 430 REG_L s9, TASK_THREAD_S9_RA(a4) 431 REG_L s10, TASK_THREAD_S10_RA(a4) 432 REG_L s11, TASK_THREAD_S11_RA(a4) 433 /* Swap the CPU entry around. */ 434 lw a3, TASK_TI_CPU(a0) 435 lw a4, TASK_TI_CPU(a1) 436 sw a3, TASK_TI_CPU(a1) 437 sw a4, TASK_TI_CPU(a0) 438 /* The offset of thread_info in task_struct is zero. */ 439 move tp, a1 440 ret 441ENDPROC(__switch_to) 442 443#ifndef CONFIG_MMU 444#define do_page_fault do_trap_unknown 445#endif 446 447 .section ".rodata" 448 /* Exception vector table */ 449ENTRY(excp_vect_table) 450 RISCV_PTR do_trap_insn_misaligned 451 RISCV_PTR do_trap_insn_fault 452 RISCV_PTR do_trap_insn_illegal 453 RISCV_PTR do_trap_break 454 RISCV_PTR do_trap_load_misaligned 455 RISCV_PTR do_trap_load_fault 456 RISCV_PTR do_trap_store_misaligned 457 RISCV_PTR do_trap_store_fault 458 RISCV_PTR do_trap_ecall_u /* system call, gets intercepted */ 459 RISCV_PTR do_trap_ecall_s 460 RISCV_PTR do_trap_unknown 461 RISCV_PTR do_trap_ecall_m 462 RISCV_PTR do_page_fault /* instruction page fault */ 463 RISCV_PTR do_page_fault /* load page fault */ 464 RISCV_PTR do_trap_unknown 465 RISCV_PTR do_page_fault /* store page fault */ 466excp_vect_table_end: 467END(excp_vect_table) 468 469#ifndef CONFIG_MMU 470ENTRY(__user_rt_sigreturn) 471 li a7, __NR_rt_sigreturn 472 scall 473END(__user_rt_sigreturn) 474#endif 475