entry.S (8389a7b909f252e74ea92b2794de8d660cfee96e) | entry.S (6bd33e1ece528f67646db33bf97406b747dafda0) |
---|---|
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2012 Regents of the University of California 4 * Copyright (C) 2017 SiFive 5 */ 6 7#include <linux/init.h> 8#include <linux/linkage.h> --- 12 unchanged lines hidden (view full) --- 21 * stack. 22 */ 23 .macro SAVE_ALL 24 LOCAL _restore_kernel_tpsp 25 LOCAL _save_context 26 27 /* 28 * If coming from userspace, preserve the user thread pointer and load | 1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2012 Regents of the University of California 4 * Copyright (C) 2017 SiFive 5 */ 6 7#include <linux/init.h> 8#include <linux/linkage.h> --- 12 unchanged lines hidden (view full) --- 21 * stack. 22 */ 23 .macro SAVE_ALL 24 LOCAL _restore_kernel_tpsp 25 LOCAL _save_context 26 27 /* 28 * If coming from userspace, preserve the user thread pointer and load |
29 * the kernel thread pointer. If we came from the kernel, sscratch 30 * will contain 0, and we should continue on the current TP. | 29 * the kernel thread pointer. If we came from the kernel, the scratch 30 * register will contain 0, and we should continue on the current TP. |
31 */ | 31 */ |
32 csrrw tp, CSR_SSCRATCH, tp | 32 csrrw tp, CSR_SCRATCH, tp |
33 bnez tp, _save_context 34 35_restore_kernel_tpsp: | 33 bnez tp, _save_context 34 35_restore_kernel_tpsp: |
36 csrr tp, CSR_SSCRATCH | 36 csrr tp, CSR_SCRATCH |
37 REG_S sp, TASK_TI_KERNEL_SP(tp) 38_save_context: 39 REG_S sp, TASK_TI_USER_SP(tp) 40 REG_L sp, TASK_TI_KERNEL_SP(tp) 41 addi sp, sp, -(PT_SIZE_ON_STACK) 42 REG_S x1, PT_RA(sp) 43 REG_S x3, PT_GP(sp) 44 REG_S x5, PT_T0(sp) --- 29 unchanged lines hidden (view full) --- 74 * actual user copy routines. 75 * 76 * Disable the FPU to detect illegal usage of floating point in kernel 77 * space. 78 */ 79 li t0, SR_SUM | SR_FS 80 81 REG_L s0, TASK_TI_USER_SP(tp) | 37 REG_S sp, TASK_TI_KERNEL_SP(tp) 38_save_context: 39 REG_S sp, TASK_TI_USER_SP(tp) 40 REG_L sp, TASK_TI_KERNEL_SP(tp) 41 addi sp, sp, -(PT_SIZE_ON_STACK) 42 REG_S x1, PT_RA(sp) 43 REG_S x3, PT_GP(sp) 44 REG_S x5, PT_T0(sp) --- 29 unchanged lines hidden (view full) --- 74 * actual user copy routines. 75 * 76 * Disable the FPU to detect illegal usage of floating point in kernel 77 * space. 78 */ 79 li t0, SR_SUM | SR_FS 80 81 REG_L s0, TASK_TI_USER_SP(tp) |
82 csrrc s1, CSR_SSTATUS, t0 83 csrr s2, CSR_SEPC 84 csrr s3, CSR_STVAL 85 csrr s4, CSR_SCAUSE 86 csrr s5, CSR_SSCRATCH | 82 csrrc s1, CSR_STATUS, t0 83 csrr s2, CSR_EPC 84 csrr s3, CSR_TVAL 85 csrr s4, CSR_CAUSE 86 csrr s5, CSR_SCRATCH |
87 REG_S s0, PT_SP(sp) | 87 REG_S s0, PT_SP(sp) |
88 REG_S s1, PT_SSTATUS(sp) 89 REG_S s2, PT_SEPC(sp) 90 REG_S s3, PT_SBADADDR(sp) 91 REG_S s4, PT_SCAUSE(sp) | 88 REG_S s1, PT_STATUS(sp) 89 REG_S s2, PT_EPC(sp) 90 REG_S s3, PT_BADADDR(sp) 91 REG_S s4, PT_CAUSE(sp) |
92 REG_S s5, PT_TP(sp) 93 .endm 94 95/* 96 * Prepares to return from a system call or exception by restoring all 97 * registers from the stack. 98 */ 99 .macro RESTORE_ALL | 92 REG_S s5, PT_TP(sp) 93 .endm 94 95/* 96 * Prepares to return from a system call or exception by restoring all 97 * registers from the stack. 98 */ 99 .macro RESTORE_ALL |
100 REG_L a0, PT_SSTATUS(sp) | 100 REG_L a0, PT_STATUS(sp) |
101 /* 102 * The current load reservation is effectively part of the processor's 103 * state, in the sense that load reservations cannot be shared between 104 * different hart contexts. We can't actually save and restore a load 105 * reservation, so instead here we clear any existing reservation -- 106 * it's always legal for implementations to clear load reservations at 107 * any point (as long as the forward progress guarantee is kept, but 108 * we'll ignore that here). 109 * 110 * Dangling load reservations can be the result of taking a trap in the 111 * middle of an LR/SC sequence, but can also be the result of a taken 112 * forward branch around an SC -- which is how we implement CAS. As a 113 * result we need to clear reservations between the last CAS and the 114 * jump back to the new context. While it is unlikely the store 115 * completes, implementations are allowed to expand reservations to be 116 * arbitrarily large. 117 */ | 101 /* 102 * The current load reservation is effectively part of the processor's 103 * state, in the sense that load reservations cannot be shared between 104 * different hart contexts. We can't actually save and restore a load 105 * reservation, so instead here we clear any existing reservation -- 106 * it's always legal for implementations to clear load reservations at 107 * any point (as long as the forward progress guarantee is kept, but 108 * we'll ignore that here). 109 * 110 * Dangling load reservations can be the result of taking a trap in the 111 * middle of an LR/SC sequence, but can also be the result of a taken 112 * forward branch around an SC -- which is how we implement CAS. As a 113 * result we need to clear reservations between the last CAS and the 114 * jump back to the new context. While it is unlikely the store 115 * completes, implementations are allowed to expand reservations to be 116 * arbitrarily large. 117 */ |
118 REG_L a2, PT_SEPC(sp) 119 REG_SC x0, a2, PT_SEPC(sp) | 118 REG_L a2, PT_EPC(sp) 119 REG_SC x0, a2, PT_EPC(sp) |
120 | 120 |
121 csrw CSR_SSTATUS, a0 122 csrw CSR_SEPC, a2 | 121 csrw CSR_STATUS, a0 122 csrw CSR_EPC, a2 |
123 124 REG_L x1, PT_RA(sp) 125 REG_L x3, PT_GP(sp) 126 REG_L x4, PT_TP(sp) 127 REG_L x5, PT_T0(sp) 128 REG_L x6, PT_T1(sp) 129 REG_L x7, PT_T2(sp) 130 REG_L x8, PT_S0(sp) --- 27 unchanged lines hidden (view full) --- 158#if !IS_ENABLED(CONFIG_PREEMPT) 159.set resume_kernel, restore_all 160#endif 161 162ENTRY(handle_exception) 163 SAVE_ALL 164 165 /* | 123 124 REG_L x1, PT_RA(sp) 125 REG_L x3, PT_GP(sp) 126 REG_L x4, PT_TP(sp) 127 REG_L x5, PT_T0(sp) 128 REG_L x6, PT_T1(sp) 129 REG_L x7, PT_T2(sp) 130 REG_L x8, PT_S0(sp) --- 27 unchanged lines hidden (view full) --- 158#if !IS_ENABLED(CONFIG_PREEMPT) 159.set resume_kernel, restore_all 160#endif 161 162ENTRY(handle_exception) 163 SAVE_ALL 164 165 /* |
166 * Set sscratch register to 0, so that if a recursive exception | 166 * Set the scratch register to 0, so that if a recursive exception |
167 * occurs, the exception vector knows it came from the kernel 168 */ | 167 * occurs, the exception vector knows it came from the kernel 168 */ |
169 csrw CSR_SSCRATCH, x0 | 169 csrw CSR_SCRATCH, x0 |
170 171 /* Load the global pointer */ 172.option push 173.option norelax 174 la gp, __global_pointer$ 175.option pop 176 177 la ra, ret_from_exception 178 /* 179 * MSB of cause differentiates between 180 * interrupts and exceptions 181 */ 182 bge s4, zero, 1f 183 184 /* Handle interrupts */ 185 move a0, sp /* pt_regs */ 186 tail do_IRQ 1871: | 170 171 /* Load the global pointer */ 172.option push 173.option norelax 174 la gp, __global_pointer$ 175.option pop 176 177 la ra, ret_from_exception 178 /* 179 * MSB of cause differentiates between 180 * interrupts and exceptions 181 */ 182 bge s4, zero, 1f 183 184 /* Handle interrupts */ 185 move a0, sp /* pt_regs */ 186 tail do_IRQ 1871: |
188 /* Exceptions run with interrupts enabled or disabled 189 depending on the state of sstatus.SR_SPIE */ 190 andi t0, s1, SR_SPIE | 188 /* 189 * Exceptions run with interrupts enabled or disabled depending on the 190 * state of SR_PIE in m/sstatus. 191 */ 192 andi t0, s1, SR_PIE |
191 beqz t0, 1f | 193 beqz t0, 1f |
192 csrs CSR_SSTATUS, SR_SIE | 194 csrs CSR_STATUS, SR_IE |
193 1941: 195 /* Handle syscalls */ 196 li t0, EXC_SYSCALL 197 beq s4, t0, handle_syscall 198 199 /* Handle other exceptions */ 200 slli t0, s4, RISCV_LGPTR --- 11 unchanged lines hidden (view full) --- 212handle_syscall: 213 /* save the initial A0 value (needed in signal handlers) */ 214 REG_S a0, PT_ORIG_A0(sp) 215 /* 216 * Advance SEPC to avoid executing the original 217 * scall instruction on sret 218 */ 219 addi s2, s2, 0x4 | 195 1961: 197 /* Handle syscalls */ 198 li t0, EXC_SYSCALL 199 beq s4, t0, handle_syscall 200 201 /* Handle other exceptions */ 202 slli t0, s4, RISCV_LGPTR --- 11 unchanged lines hidden (view full) --- 214handle_syscall: 215 /* save the initial A0 value (needed in signal handlers) */ 216 REG_S a0, PT_ORIG_A0(sp) 217 /* 218 * Advance SEPC to avoid executing the original 219 * scall instruction on sret 220 */ 221 addi s2, s2, 0x4 |
220 REG_S s2, PT_SEPC(sp) | 222 REG_S s2, PT_EPC(sp) |
221 /* Trace syscalls, but only if requested by the user. */ 222 REG_L t0, TASK_TI_FLAGS(tp) 223 andi t0, t0, _TIF_SYSCALL_WORK 224 bnez t0, handle_syscall_trace_enter 225check_syscall_nr: 226 /* Check to make sure we don't jump to a bogus syscall number. */ 227 li t0, __NR_syscalls 228 la s0, sys_ni_syscall --- 10 unchanged lines hidden (view full) --- 239 /* Set user a0 to kernel a0 */ 240 REG_S a0, PT_A0(sp) 241 /* Trace syscalls, but only if requested by the user. */ 242 REG_L t0, TASK_TI_FLAGS(tp) 243 andi t0, t0, _TIF_SYSCALL_WORK 244 bnez t0, handle_syscall_trace_exit 245 246ret_from_exception: | 223 /* Trace syscalls, but only if requested by the user. */ 224 REG_L t0, TASK_TI_FLAGS(tp) 225 andi t0, t0, _TIF_SYSCALL_WORK 226 bnez t0, handle_syscall_trace_enter 227check_syscall_nr: 228 /* Check to make sure we don't jump to a bogus syscall number. */ 229 li t0, __NR_syscalls 230 la s0, sys_ni_syscall --- 10 unchanged lines hidden (view full) --- 241 /* Set user a0 to kernel a0 */ 242 REG_S a0, PT_A0(sp) 243 /* Trace syscalls, but only if requested by the user. */ 244 REG_L t0, TASK_TI_FLAGS(tp) 245 andi t0, t0, _TIF_SYSCALL_WORK 246 bnez t0, handle_syscall_trace_exit 247 248ret_from_exception: |
247 REG_L s0, PT_SSTATUS(sp) 248 csrc CSR_SSTATUS, SR_SIE | 249 REG_L s0, PT_STATUS(sp) 250 csrc CSR_STATUS, SR_IE 251#ifdef CONFIG_RISCV_M_MODE 252 /* the MPP value is too large to be used as an immediate arg for addi */ 253 li t0, SR_MPP 254 and s0, s0, t0 255#else |
249 andi s0, s0, SR_SPP | 256 andi s0, s0, SR_SPP |
257#endif |
|
250 bnez s0, resume_kernel 251 252resume_userspace: 253 /* Interrupts must be disabled here so flags are checked atomically */ 254 REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */ 255 andi s1, s0, _TIF_WORK_MASK 256 bnez s1, work_pending 257 258 /* Save unwound kernel stack pointer in thread_info */ 259 addi s0, sp, PT_SIZE_ON_STACK 260 REG_S s0, TASK_TI_KERNEL_SP(tp) 261 262 /* | 258 bnez s0, resume_kernel 259 260resume_userspace: 261 /* Interrupts must be disabled here so flags are checked atomically */ 262 REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */ 263 andi s1, s0, _TIF_WORK_MASK 264 bnez s1, work_pending 265 266 /* Save unwound kernel stack pointer in thread_info */ 267 addi s0, sp, PT_SIZE_ON_STACK 268 REG_S s0, TASK_TI_KERNEL_SP(tp) 269 270 /* |
263 * Save TP into sscratch, so we can find the kernel data structures 264 * again. | 271 * Save TP into the scratch register , so we can find the kernel data 272 * structures again. |
265 */ | 273 */ |
266 csrw CSR_SSCRATCH, tp | 274 csrw CSR_SCRATCH, tp |
267 268restore_all: 269 RESTORE_ALL | 275 276restore_all: 277 RESTORE_ALL |
278#ifdef CONFIG_RISCV_M_MODE 279 mret 280#else |
|
270 sret | 281 sret |
282#endif |
|
271 272#if IS_ENABLED(CONFIG_PREEMPT) 273resume_kernel: 274 REG_L s0, TASK_TI_PREEMPT_COUNT(tp) 275 bnez s0, restore_all 276 REG_L s0, TASK_TI_FLAGS(tp) 277 andi s0, s0, _TIF_NEED_RESCHED 278 beqz s0, restore_all 279 call preempt_schedule_irq 280 j restore_all 281#endif 282 283work_pending: 284 /* Enter slow path for supplementary processing */ 285 la ra, ret_from_exception 286 andi s1, s0, _TIF_NEED_RESCHED 287 bnez s1, work_resched 288work_notifysig: 289 /* Handle pending signals and notify-resume requests */ | 283 284#if IS_ENABLED(CONFIG_PREEMPT) 285resume_kernel: 286 REG_L s0, TASK_TI_PREEMPT_COUNT(tp) 287 bnez s0, restore_all 288 REG_L s0, TASK_TI_FLAGS(tp) 289 andi s0, s0, _TIF_NEED_RESCHED 290 beqz s0, restore_all 291 call preempt_schedule_irq 292 j restore_all 293#endif 294 295work_pending: 296 /* Enter slow path for supplementary processing */ 297 la ra, ret_from_exception 298 andi s1, s0, _TIF_NEED_RESCHED 299 bnez s1, work_resched 300work_notifysig: 301 /* Handle pending signals and notify-resume requests */ |
290 csrs CSR_SSTATUS, SR_SIE /* Enable interrupts for do_notify_resume() */ | 302 csrs CSR_STATUS, SR_IE /* Enable interrupts for do_notify_resume() */ |
291 move a0, sp /* pt_regs */ 292 move a1, s0 /* current_thread_info->flags */ 293 tail do_notify_resume 294work_resched: 295 tail schedule 296 297/* Slow paths for ptrace. */ 298handle_syscall_trace_enter: --- 82 unchanged lines hidden (view full) --- 381#error "TASK_TI != 0: tp will contain a 'struct thread_info', not a 'struct task_struct' so get_current() won't work." 382 addi tp, a1, TASK_TI 383#else 384 move tp, a1 385#endif 386 ret 387ENDPROC(__switch_to) 388 | 303 move a0, sp /* pt_regs */ 304 move a1, s0 /* current_thread_info->flags */ 305 tail do_notify_resume 306work_resched: 307 tail schedule 308 309/* Slow paths for ptrace. */ 310handle_syscall_trace_enter: --- 82 unchanged lines hidden (view full) --- 393#error "TASK_TI != 0: tp will contain a 'struct thread_info', not a 'struct task_struct' so get_current() won't work." 394 addi tp, a1, TASK_TI 395#else 396 move tp, a1 397#endif 398 ret 399ENDPROC(__switch_to) 400 |
401#ifndef CONFIG_MMU 402#define do_page_fault do_trap_unknown 403#endif 404 |
|
389 .section ".rodata" 390 /* Exception vector table */ 391ENTRY(excp_vect_table) 392 RISCV_PTR do_trap_insn_misaligned 393 RISCV_PTR do_trap_insn_fault 394 RISCV_PTR do_trap_insn_illegal 395 RISCV_PTR do_trap_break 396 RISCV_PTR do_trap_load_misaligned --- 5 unchanged lines hidden (view full) --- 402 RISCV_PTR do_trap_unknown 403 RISCV_PTR do_trap_ecall_m 404 RISCV_PTR do_page_fault /* instruction page fault */ 405 RISCV_PTR do_page_fault /* load page fault */ 406 RISCV_PTR do_trap_unknown 407 RISCV_PTR do_page_fault /* store page fault */ 408excp_vect_table_end: 409END(excp_vect_table) | 405 .section ".rodata" 406 /* Exception vector table */ 407ENTRY(excp_vect_table) 408 RISCV_PTR do_trap_insn_misaligned 409 RISCV_PTR do_trap_insn_fault 410 RISCV_PTR do_trap_insn_illegal 411 RISCV_PTR do_trap_break 412 RISCV_PTR do_trap_load_misaligned --- 5 unchanged lines hidden (view full) --- 418 RISCV_PTR do_trap_unknown 419 RISCV_PTR do_trap_ecall_m 420 RISCV_PTR do_page_fault /* instruction page fault */ 421 RISCV_PTR do_page_fault /* load page fault */ 422 RISCV_PTR do_trap_unknown 423 RISCV_PTR do_page_fault /* store page fault */ 424excp_vect_table_end: 425END(excp_vect_table) |
426 427#ifndef CONFIG_MMU 428ENTRY(__user_rt_sigreturn) 429 li a7, __NR_rt_sigreturn 430 scall 431END(__user_rt_sigreturn) 432#endif |
|