1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2012 Regents of the University of California 4 * Copyright (C) 2017 SiFive 5 */ 6 7#include <linux/init.h> 8#include <linux/linkage.h> 9 10#include <asm/asm.h> 11#include <asm/csr.h> 12#include <asm/unistd.h> 13#include <asm/thread_info.h> 14#include <asm/asm-offsets.h> 15#include <asm/errata_list.h> 16 17#if !IS_ENABLED(CONFIG_PREEMPTION) 18.set resume_kernel, restore_all 19#endif 20 21ENTRY(handle_exception) 22 /* 23 * If coming from userspace, preserve the user thread pointer and load 24 * the kernel thread pointer. If we came from the kernel, the scratch 25 * register will contain 0, and we should continue on the current TP. 26 */ 27 csrrw tp, CSR_SCRATCH, tp 28 bnez tp, _save_context 29 30_restore_kernel_tpsp: 31 csrr tp, CSR_SCRATCH 32 REG_S sp, TASK_TI_KERNEL_SP(tp) 33_save_context: 34 REG_S sp, TASK_TI_USER_SP(tp) 35 REG_L sp, TASK_TI_KERNEL_SP(tp) 36 addi sp, sp, -(PT_SIZE_ON_STACK) 37 REG_S x1, PT_RA(sp) 38 REG_S x3, PT_GP(sp) 39 REG_S x5, PT_T0(sp) 40 REG_S x6, PT_T1(sp) 41 REG_S x7, PT_T2(sp) 42 REG_S x8, PT_S0(sp) 43 REG_S x9, PT_S1(sp) 44 REG_S x10, PT_A0(sp) 45 REG_S x11, PT_A1(sp) 46 REG_S x12, PT_A2(sp) 47 REG_S x13, PT_A3(sp) 48 REG_S x14, PT_A4(sp) 49 REG_S x15, PT_A5(sp) 50 REG_S x16, PT_A6(sp) 51 REG_S x17, PT_A7(sp) 52 REG_S x18, PT_S2(sp) 53 REG_S x19, PT_S3(sp) 54 REG_S x20, PT_S4(sp) 55 REG_S x21, PT_S5(sp) 56 REG_S x22, PT_S6(sp) 57 REG_S x23, PT_S7(sp) 58 REG_S x24, PT_S8(sp) 59 REG_S x25, PT_S9(sp) 60 REG_S x26, PT_S10(sp) 61 REG_S x27, PT_S11(sp) 62 REG_S x28, PT_T3(sp) 63 REG_S x29, PT_T4(sp) 64 REG_S x30, PT_T5(sp) 65 REG_S x31, PT_T6(sp) 66 67 /* 68 * Disable user-mode memory access as it should only be set in the 69 * actual user copy routines. 70 * 71 * Disable the FPU to detect illegal usage of floating point in kernel 72 * space. 73 */ 74 li t0, SR_SUM | SR_FS 75 76 REG_L s0, TASK_TI_USER_SP(tp) 77 csrrc s1, CSR_STATUS, t0 78 csrr s2, CSR_EPC 79 csrr s3, CSR_TVAL 80 csrr s4, CSR_CAUSE 81 csrr s5, CSR_SCRATCH 82 REG_S s0, PT_SP(sp) 83 REG_S s1, PT_STATUS(sp) 84 REG_S s2, PT_EPC(sp) 85 REG_S s3, PT_BADADDR(sp) 86 REG_S s4, PT_CAUSE(sp) 87 REG_S s5, PT_TP(sp) 88 89 /* 90 * Set the scratch register to 0, so that if a recursive exception 91 * occurs, the exception vector knows it came from the kernel 92 */ 93 csrw CSR_SCRATCH, x0 94 95 /* Load the global pointer */ 96.option push 97.option norelax 98 la gp, __global_pointer$ 99.option pop 100 101#ifdef CONFIG_TRACE_IRQFLAGS 102 call trace_hardirqs_off 103#endif 104 105#ifdef CONFIG_CONTEXT_TRACKING 106 /* If previous state is in user mode, call context_tracking_user_exit. */ 107 li a0, SR_PP 108 and a0, s1, a0 109 bnez a0, skip_context_tracking 110 call context_tracking_user_exit 111skip_context_tracking: 112#endif 113 114 /* 115 * MSB of cause differentiates between 116 * interrupts and exceptions 117 */ 118 bge s4, zero, 1f 119 120 la ra, ret_from_exception 121 122 /* Handle interrupts */ 123 move a0, sp /* pt_regs */ 124 la a1, handle_arch_irq 125 REG_L a1, (a1) 126 jr a1 1271: 128 /* 129 * Exceptions run with interrupts enabled or disabled depending on the 130 * state of SR_PIE in m/sstatus. 131 */ 132 andi t0, s1, SR_PIE 133 beqz t0, 1f 134#ifdef CONFIG_TRACE_IRQFLAGS 135 call trace_hardirqs_on 136#endif 137 csrs CSR_STATUS, SR_IE 138 1391: 140 la ra, ret_from_exception 141 /* Handle syscalls */ 142 li t0, EXC_SYSCALL 143 beq s4, t0, handle_syscall 144 145 /* Handle other exceptions */ 146 slli t0, s4, RISCV_LGPTR 147 la t1, excp_vect_table 148 la t2, excp_vect_table_end 149 move a0, sp /* pt_regs */ 150 add t0, t1, t0 151 /* Check if exception code lies within bounds */ 152 bgeu t0, t2, 1f 153 REG_L t0, 0(t0) 154 jr t0 1551: 156 tail do_trap_unknown 157 158handle_syscall: 159#ifdef CONFIG_RISCV_M_MODE 160 /* 161 * When running is M-Mode (no MMU config), MPIE does not get set. 162 * As a result, we need to force enable interrupts here because 163 * handle_exception did not do set SR_IE as it always sees SR_PIE 164 * being cleared. 165 */ 166 csrs CSR_STATUS, SR_IE 167#endif 168#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING) 169 /* Recover a0 - a7 for system calls */ 170 REG_L a0, PT_A0(sp) 171 REG_L a1, PT_A1(sp) 172 REG_L a2, PT_A2(sp) 173 REG_L a3, PT_A3(sp) 174 REG_L a4, PT_A4(sp) 175 REG_L a5, PT_A5(sp) 176 REG_L a6, PT_A6(sp) 177 REG_L a7, PT_A7(sp) 178#endif 179 /* save the initial A0 value (needed in signal handlers) */ 180 REG_S a0, PT_ORIG_A0(sp) 181 /* 182 * Advance SEPC to avoid executing the original 183 * scall instruction on sret 184 */ 185 addi s2, s2, 0x4 186 REG_S s2, PT_EPC(sp) 187 /* Trace syscalls, but only if requested by the user. */ 188 REG_L t0, TASK_TI_FLAGS(tp) 189 andi t0, t0, _TIF_SYSCALL_WORK 190 bnez t0, handle_syscall_trace_enter 191check_syscall_nr: 192 /* Check to make sure we don't jump to a bogus syscall number. */ 193 li t0, __NR_syscalls 194 la s0, sys_ni_syscall 195 /* 196 * Syscall number held in a7. 197 * If syscall number is above allowed value, redirect to ni_syscall. 198 */ 199 bgeu a7, t0, 1f 200 /* Call syscall */ 201 la s0, sys_call_table 202 slli t0, a7, RISCV_LGPTR 203 add s0, s0, t0 204 REG_L s0, 0(s0) 2051: 206 jalr s0 207 208ret_from_syscall: 209 /* Set user a0 to kernel a0 */ 210 REG_S a0, PT_A0(sp) 211 /* 212 * We didn't execute the actual syscall. 213 * Seccomp already set return value for the current task pt_regs. 214 * (If it was configured with SECCOMP_RET_ERRNO/TRACE) 215 */ 216ret_from_syscall_rejected: 217 /* Trace syscalls, but only if requested by the user. */ 218 REG_L t0, TASK_TI_FLAGS(tp) 219 andi t0, t0, _TIF_SYSCALL_WORK 220 bnez t0, handle_syscall_trace_exit 221 222ret_from_exception: 223 REG_L s0, PT_STATUS(sp) 224 csrc CSR_STATUS, SR_IE 225#ifdef CONFIG_TRACE_IRQFLAGS 226 call trace_hardirqs_off 227#endif 228#ifdef CONFIG_RISCV_M_MODE 229 /* the MPP value is too large to be used as an immediate arg for addi */ 230 li t0, SR_MPP 231 and s0, s0, t0 232#else 233 andi s0, s0, SR_SPP 234#endif 235 bnez s0, resume_kernel 236 237resume_userspace: 238 /* Interrupts must be disabled here so flags are checked atomically */ 239 REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */ 240 andi s1, s0, _TIF_WORK_MASK 241 bnez s1, work_pending 242 243#ifdef CONFIG_CONTEXT_TRACKING 244 call context_tracking_user_enter 245#endif 246 247 /* Save unwound kernel stack pointer in thread_info */ 248 addi s0, sp, PT_SIZE_ON_STACK 249 REG_S s0, TASK_TI_KERNEL_SP(tp) 250 251 /* 252 * Save TP into the scratch register , so we can find the kernel data 253 * structures again. 254 */ 255 csrw CSR_SCRATCH, tp 256 257restore_all: 258#ifdef CONFIG_TRACE_IRQFLAGS 259 REG_L s1, PT_STATUS(sp) 260 andi t0, s1, SR_PIE 261 beqz t0, 1f 262 call trace_hardirqs_on 263 j 2f 2641: 265 call trace_hardirqs_off 2662: 267#endif 268 REG_L a0, PT_STATUS(sp) 269 /* 270 * The current load reservation is effectively part of the processor's 271 * state, in the sense that load reservations cannot be shared between 272 * different hart contexts. We can't actually save and restore a load 273 * reservation, so instead here we clear any existing reservation -- 274 * it's always legal for implementations to clear load reservations at 275 * any point (as long as the forward progress guarantee is kept, but 276 * we'll ignore that here). 277 * 278 * Dangling load reservations can be the result of taking a trap in the 279 * middle of an LR/SC sequence, but can also be the result of a taken 280 * forward branch around an SC -- which is how we implement CAS. As a 281 * result we need to clear reservations between the last CAS and the 282 * jump back to the new context. While it is unlikely the store 283 * completes, implementations are allowed to expand reservations to be 284 * arbitrarily large. 285 */ 286 REG_L a2, PT_EPC(sp) 287 REG_SC x0, a2, PT_EPC(sp) 288 289 csrw CSR_STATUS, a0 290 csrw CSR_EPC, a2 291 292 REG_L x1, PT_RA(sp) 293 REG_L x3, PT_GP(sp) 294 REG_L x4, PT_TP(sp) 295 REG_L x5, PT_T0(sp) 296 REG_L x6, PT_T1(sp) 297 REG_L x7, PT_T2(sp) 298 REG_L x8, PT_S0(sp) 299 REG_L x9, PT_S1(sp) 300 REG_L x10, PT_A0(sp) 301 REG_L x11, PT_A1(sp) 302 REG_L x12, PT_A2(sp) 303 REG_L x13, PT_A3(sp) 304 REG_L x14, PT_A4(sp) 305 REG_L x15, PT_A5(sp) 306 REG_L x16, PT_A6(sp) 307 REG_L x17, PT_A7(sp) 308 REG_L x18, PT_S2(sp) 309 REG_L x19, PT_S3(sp) 310 REG_L x20, PT_S4(sp) 311 REG_L x21, PT_S5(sp) 312 REG_L x22, PT_S6(sp) 313 REG_L x23, PT_S7(sp) 314 REG_L x24, PT_S8(sp) 315 REG_L x25, PT_S9(sp) 316 REG_L x26, PT_S10(sp) 317 REG_L x27, PT_S11(sp) 318 REG_L x28, PT_T3(sp) 319 REG_L x29, PT_T4(sp) 320 REG_L x30, PT_T5(sp) 321 REG_L x31, PT_T6(sp) 322 323 REG_L x2, PT_SP(sp) 324 325#ifdef CONFIG_RISCV_M_MODE 326 mret 327#else 328 sret 329#endif 330 331#if IS_ENABLED(CONFIG_PREEMPTION) 332resume_kernel: 333 REG_L s0, TASK_TI_PREEMPT_COUNT(tp) 334 bnez s0, restore_all 335 REG_L s0, TASK_TI_FLAGS(tp) 336 andi s0, s0, _TIF_NEED_RESCHED 337 beqz s0, restore_all 338 call preempt_schedule_irq 339 j restore_all 340#endif 341 342work_pending: 343 /* Enter slow path for supplementary processing */ 344 la ra, ret_from_exception 345 andi s1, s0, _TIF_NEED_RESCHED 346 bnez s1, work_resched 347work_notifysig: 348 /* Handle pending signals and notify-resume requests */ 349 csrs CSR_STATUS, SR_IE /* Enable interrupts for do_notify_resume() */ 350 move a0, sp /* pt_regs */ 351 move a1, s0 /* current_thread_info->flags */ 352 tail do_notify_resume 353work_resched: 354 tail schedule 355 356/* Slow paths for ptrace. */ 357handle_syscall_trace_enter: 358 move a0, sp 359 call do_syscall_trace_enter 360 move t0, a0 361 REG_L a0, PT_A0(sp) 362 REG_L a1, PT_A1(sp) 363 REG_L a2, PT_A2(sp) 364 REG_L a3, PT_A3(sp) 365 REG_L a4, PT_A4(sp) 366 REG_L a5, PT_A5(sp) 367 REG_L a6, PT_A6(sp) 368 REG_L a7, PT_A7(sp) 369 bnez t0, ret_from_syscall_rejected 370 j check_syscall_nr 371handle_syscall_trace_exit: 372 move a0, sp 373 call do_syscall_trace_exit 374 j ret_from_exception 375 376END(handle_exception) 377 378ENTRY(ret_from_fork) 379 la ra, ret_from_exception 380 tail schedule_tail 381ENDPROC(ret_from_fork) 382 383ENTRY(ret_from_kernel_thread) 384 call schedule_tail 385 /* Call fn(arg) */ 386 la ra, ret_from_exception 387 move a0, s1 388 jr s0 389ENDPROC(ret_from_kernel_thread) 390 391 392/* 393 * Integer register context switch 394 * The callee-saved registers must be saved and restored. 395 * 396 * a0: previous task_struct (must be preserved across the switch) 397 * a1: next task_struct 398 * 399 * The value of a0 and a1 must be preserved by this function, as that's how 400 * arguments are passed to schedule_tail. 401 */ 402ENTRY(__switch_to) 403 /* Save context into prev->thread */ 404 li a4, TASK_THREAD_RA 405 add a3, a0, a4 406 add a4, a1, a4 407 REG_S ra, TASK_THREAD_RA_RA(a3) 408 REG_S sp, TASK_THREAD_SP_RA(a3) 409 REG_S s0, TASK_THREAD_S0_RA(a3) 410 REG_S s1, TASK_THREAD_S1_RA(a3) 411 REG_S s2, TASK_THREAD_S2_RA(a3) 412 REG_S s3, TASK_THREAD_S3_RA(a3) 413 REG_S s4, TASK_THREAD_S4_RA(a3) 414 REG_S s5, TASK_THREAD_S5_RA(a3) 415 REG_S s6, TASK_THREAD_S6_RA(a3) 416 REG_S s7, TASK_THREAD_S7_RA(a3) 417 REG_S s8, TASK_THREAD_S8_RA(a3) 418 REG_S s9, TASK_THREAD_S9_RA(a3) 419 REG_S s10, TASK_THREAD_S10_RA(a3) 420 REG_S s11, TASK_THREAD_S11_RA(a3) 421 /* Restore context from next->thread */ 422 REG_L ra, TASK_THREAD_RA_RA(a4) 423 REG_L sp, TASK_THREAD_SP_RA(a4) 424 REG_L s0, TASK_THREAD_S0_RA(a4) 425 REG_L s1, TASK_THREAD_S1_RA(a4) 426 REG_L s2, TASK_THREAD_S2_RA(a4) 427 REG_L s3, TASK_THREAD_S3_RA(a4) 428 REG_L s4, TASK_THREAD_S4_RA(a4) 429 REG_L s5, TASK_THREAD_S5_RA(a4) 430 REG_L s6, TASK_THREAD_S6_RA(a4) 431 REG_L s7, TASK_THREAD_S7_RA(a4) 432 REG_L s8, TASK_THREAD_S8_RA(a4) 433 REG_L s9, TASK_THREAD_S9_RA(a4) 434 REG_L s10, TASK_THREAD_S10_RA(a4) 435 REG_L s11, TASK_THREAD_S11_RA(a4) 436 /* Swap the CPU entry around. */ 437 lw a3, TASK_TI_CPU(a0) 438 lw a4, TASK_TI_CPU(a1) 439 sw a3, TASK_TI_CPU(a1) 440 sw a4, TASK_TI_CPU(a0) 441 /* The offset of thread_info in task_struct is zero. */ 442 move tp, a1 443 ret 444ENDPROC(__switch_to) 445 446#ifndef CONFIG_MMU 447#define do_page_fault do_trap_unknown 448#endif 449 450 .section ".rodata" 451 /* Exception vector table */ 452ENTRY(excp_vect_table) 453 RISCV_PTR do_trap_insn_misaligned 454 ALT_INSN_FAULT(RISCV_PTR do_trap_insn_fault) 455 RISCV_PTR do_trap_insn_illegal 456 RISCV_PTR do_trap_break 457 RISCV_PTR do_trap_load_misaligned 458 RISCV_PTR do_trap_load_fault 459 RISCV_PTR do_trap_store_misaligned 460 RISCV_PTR do_trap_store_fault 461 RISCV_PTR do_trap_ecall_u /* system call, gets intercepted */ 462 RISCV_PTR do_trap_ecall_s 463 RISCV_PTR do_trap_unknown 464 RISCV_PTR do_trap_ecall_m 465 /* instruciton page fault */ 466 ALT_PAGE_FAULT(RISCV_PTR do_page_fault) 467 RISCV_PTR do_page_fault /* load page fault */ 468 RISCV_PTR do_trap_unknown 469 RISCV_PTR do_page_fault /* store page fault */ 470excp_vect_table_end: 471END(excp_vect_table) 472 473#ifndef CONFIG_MMU 474ENTRY(__user_rt_sigreturn) 475 li a7, __NR_rt_sigreturn 476 scall 477END(__user_rt_sigreturn) 478#endif 479