1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2012 Regents of the University of California 4 */ 5 6 #include <linux/cpu.h> 7 #include <linux/kernel.h> 8 #include <linux/init.h> 9 #include <linux/irqflags.h> 10 #include <linux/randomize_kstack.h> 11 #include <linux/sched.h> 12 #include <linux/sched/debug.h> 13 #include <linux/sched/signal.h> 14 #include <linux/signal.h> 15 #include <linux/kdebug.h> 16 #include <linux/uaccess.h> 17 #include <linux/kprobes.h> 18 #include <linux/uprobes.h> 19 #include <asm/uprobes.h> 20 #include <linux/mm.h> 21 #include <linux/module.h> 22 #include <linux/irq.h> 23 #include <linux/kexec.h> 24 #include <linux/entry-common.h> 25 26 #include <asm/asm-prototypes.h> 27 #include <asm/bug.h> 28 #include <asm/cfi.h> 29 #include <asm/csr.h> 30 #include <asm/processor.h> 31 #include <asm/ptrace.h> 32 #include <asm/syscall.h> 33 #include <asm/thread_info.h> 34 #include <asm/vector.h> 35 #include <asm/irq_stack.h> 36 37 int show_unhandled_signals = 1; 38 39 static DEFINE_RAW_SPINLOCK(die_lock); 40 41 static int copy_code(struct pt_regs *regs, u16 *val, const u16 *insns) 42 { 43 const void __user *uaddr = (__force const void __user *)insns; 44 45 if (!user_mode(regs)) 46 return get_kernel_nofault(*val, insns); 47 48 /* The user space code from other tasks cannot be accessed. */ 49 if (regs != task_pt_regs(current)) 50 return -EPERM; 51 52 return copy_from_user_nofault(val, uaddr, sizeof(*val)); 53 } 54 55 static void dump_instr(const char *loglvl, struct pt_regs *regs) 56 { 57 char str[sizeof("0000 ") * 12 + 2 + 1], *p = str; 58 const u16 *insns = (u16 *)instruction_pointer(regs); 59 long bad; 60 u16 val; 61 int i; 62 63 for (i = -10; i < 2; i++) { 64 bad = copy_code(regs, &val, &insns[i]); 65 if (!bad) { 66 p += sprintf(p, i == 0 ? "(%04hx) " : "%04hx ", val); 67 } else { 68 printk("%sCode: Unable to access instruction at 0x%px.\n", 69 loglvl, &insns[i]); 70 return; 71 } 72 } 73 printk("%sCode: %s\n", loglvl, str); 74 } 75 76 void die(struct pt_regs *regs, const char *str) 77 { 78 static int die_counter; 79 int ret; 80 long cause; 81 unsigned long flags; 82 83 oops_enter(); 84 85 raw_spin_lock_irqsave(&die_lock, flags); 86 console_verbose(); 87 bust_spinlocks(1); 88 89 pr_emerg("%s [#%d]\n", str, ++die_counter); 90 print_modules(); 91 if (regs) { 92 show_regs(regs); 93 dump_instr(KERN_EMERG, regs); 94 } 95 96 cause = regs ? regs->cause : -1; 97 ret = notify_die(DIE_OOPS, str, regs, 0, cause, SIGSEGV); 98 99 if (kexec_should_crash(current)) 100 crash_kexec(regs); 101 102 bust_spinlocks(0); 103 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 104 raw_spin_unlock_irqrestore(&die_lock, flags); 105 oops_exit(); 106 107 if (in_interrupt()) 108 panic("Fatal exception in interrupt"); 109 if (panic_on_oops) 110 panic("Fatal exception"); 111 if (ret != NOTIFY_STOP) 112 make_task_dead(SIGSEGV); 113 } 114 115 void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr) 116 { 117 struct task_struct *tsk = current; 118 119 if (show_unhandled_signals && unhandled_signal(tsk, signo) 120 && printk_ratelimit()) { 121 pr_info("%s[%d]: unhandled signal %d code 0x%x at 0x" REG_FMT, 122 tsk->comm, task_pid_nr(tsk), signo, code, addr); 123 print_vma_addr(KERN_CONT " in ", instruction_pointer(regs)); 124 pr_cont("\n"); 125 __show_regs(regs); 126 dump_instr(KERN_INFO, regs); 127 } 128 129 force_sig_fault(signo, code, (void __user *)addr); 130 } 131 132 static void do_trap_error(struct pt_regs *regs, int signo, int code, 133 unsigned long addr, const char *str) 134 { 135 current->thread.bad_cause = regs->cause; 136 137 if (user_mode(regs)) { 138 do_trap(regs, signo, code, addr); 139 } else { 140 if (!fixup_exception(regs)) 141 die(regs, str); 142 } 143 } 144 145 #define __trap_section noinstr 146 #define DO_ERROR_INFO(name, signo, code, str) \ 147 asmlinkage __visible __trap_section void name(struct pt_regs *regs) \ 148 { \ 149 if (user_mode(regs)) { \ 150 irqentry_enter_from_user_mode(regs); \ 151 local_irq_enable(); \ 152 do_trap_error(regs, signo, code, regs->epc, "Oops - " str); \ 153 local_irq_disable(); \ 154 irqentry_exit_to_user_mode(regs); \ 155 } else { \ 156 irqentry_state_t state = irqentry_nmi_enter(regs); \ 157 do_trap_error(regs, signo, code, regs->epc, "Oops - " str); \ 158 irqentry_nmi_exit(regs, state); \ 159 } \ 160 } 161 162 DO_ERROR_INFO(do_trap_unknown, 163 SIGILL, ILL_ILLTRP, "unknown exception"); 164 DO_ERROR_INFO(do_trap_hardware_error, 165 SIGBUS, BUS_MCEERR_AR, "hardware error"); 166 DO_ERROR_INFO(do_trap_insn_misaligned, 167 SIGBUS, BUS_ADRALN, "instruction address misaligned"); 168 DO_ERROR_INFO(do_trap_insn_fault, 169 SIGSEGV, SEGV_ACCERR, "instruction access fault"); 170 171 asmlinkage __visible __trap_section void do_trap_insn_illegal(struct pt_regs *regs) 172 { 173 bool handled; 174 175 if (user_mode(regs)) { 176 irqentry_enter_from_user_mode(regs); 177 local_irq_enable(); 178 179 handled = riscv_v_first_use_handler(regs); 180 if (!handled) 181 do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->epc, 182 "Oops - illegal instruction"); 183 184 local_irq_disable(); 185 irqentry_exit_to_user_mode(regs); 186 } else { 187 irqentry_state_t state = irqentry_nmi_enter(regs); 188 189 do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->epc, 190 "Oops - illegal instruction"); 191 192 irqentry_nmi_exit(regs, state); 193 } 194 } 195 196 DO_ERROR_INFO(do_trap_load_fault, 197 SIGSEGV, SEGV_ACCERR, "load access fault"); 198 199 enum misaligned_access_type { 200 MISALIGNED_STORE, 201 MISALIGNED_LOAD, 202 }; 203 static const struct { 204 const char *type_str; 205 int (*handler)(struct pt_regs *regs); 206 } misaligned_handler[] = { 207 [MISALIGNED_STORE] = { 208 .type_str = "Oops - store (or AMO) address misaligned", 209 .handler = handle_misaligned_store, 210 }, 211 [MISALIGNED_LOAD] = { 212 .type_str = "Oops - load address misaligned", 213 .handler = handle_misaligned_load, 214 }, 215 }; 216 217 static void do_trap_misaligned(struct pt_regs *regs, enum misaligned_access_type type) 218 { 219 irqentry_state_t state; 220 221 if (user_mode(regs)) { 222 irqentry_enter_from_user_mode(regs); 223 local_irq_enable(); 224 } else { 225 state = irqentry_nmi_enter(regs); 226 } 227 228 if (misaligned_handler[type].handler(regs)) 229 do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc, 230 misaligned_handler[type].type_str); 231 232 if (user_mode(regs)) { 233 local_irq_disable(); 234 irqentry_exit_to_user_mode(regs); 235 } else { 236 irqentry_nmi_exit(regs, state); 237 } 238 } 239 240 asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs) 241 { 242 do_trap_misaligned(regs, MISALIGNED_LOAD); 243 } 244 245 asmlinkage __visible __trap_section void do_trap_store_misaligned(struct pt_regs *regs) 246 { 247 do_trap_misaligned(regs, MISALIGNED_STORE); 248 } 249 250 DO_ERROR_INFO(do_trap_store_fault, 251 SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault"); 252 DO_ERROR_INFO(do_trap_ecall_s, 253 SIGILL, ILL_ILLTRP, "environment call from S-mode"); 254 DO_ERROR_INFO(do_trap_ecall_m, 255 SIGILL, ILL_ILLTRP, "environment call from M-mode"); 256 257 static inline unsigned long get_break_insn_length(unsigned long pc) 258 { 259 bug_insn_t insn; 260 261 if (get_kernel_nofault(insn, (bug_insn_t *)pc)) 262 return 0; 263 264 return GET_INSN_LENGTH(insn); 265 } 266 267 static bool probe_single_step_handler(struct pt_regs *regs) 268 { 269 bool user = user_mode(regs); 270 271 return user ? uprobe_single_step_handler(regs) : kprobe_single_step_handler(regs); 272 } 273 274 static bool probe_breakpoint_handler(struct pt_regs *regs) 275 { 276 bool user = user_mode(regs); 277 278 return user ? uprobe_breakpoint_handler(regs) : kprobe_breakpoint_handler(regs); 279 } 280 281 void handle_break(struct pt_regs *regs) 282 { 283 if (probe_single_step_handler(regs)) 284 return; 285 286 if (probe_breakpoint_handler(regs)) 287 return; 288 289 current->thread.bad_cause = regs->cause; 290 291 if (user_mode(regs)) 292 force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->epc); 293 #ifdef CONFIG_KGDB 294 else if (notify_die(DIE_TRAP, "EBREAK", regs, 0, regs->cause, SIGTRAP) 295 == NOTIFY_STOP) 296 return; 297 #endif 298 else if (report_bug(regs->epc, regs) == BUG_TRAP_TYPE_WARN || 299 handle_cfi_failure(regs) == BUG_TRAP_TYPE_WARN) 300 regs->epc += get_break_insn_length(regs->epc); 301 else 302 die(regs, "Kernel BUG"); 303 } 304 305 asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs) 306 { 307 if (user_mode(regs)) { 308 irqentry_enter_from_user_mode(regs); 309 local_irq_enable(); 310 311 handle_break(regs); 312 313 local_irq_disable(); 314 irqentry_exit_to_user_mode(regs); 315 } else { 316 irqentry_state_t state = irqentry_nmi_enter(regs); 317 318 handle_break(regs); 319 320 irqentry_nmi_exit(regs, state); 321 } 322 } 323 324 asmlinkage __visible __trap_section __no_stack_protector 325 void do_trap_ecall_u(struct pt_regs *regs) 326 { 327 if (user_mode(regs)) { 328 long syscall = regs->a7; 329 330 regs->epc += 4; 331 regs->orig_a0 = regs->a0; 332 regs->a0 = -ENOSYS; 333 334 riscv_v_vstate_discard(regs); 335 336 syscall = syscall_enter_from_user_mode(regs, syscall); 337 338 add_random_kstack_offset(); 339 340 if (syscall >= 0 && syscall < NR_syscalls) { 341 syscall = array_index_nospec(syscall, NR_syscalls); 342 syscall_handler(regs, syscall); 343 } 344 345 syscall_exit_to_user_mode(regs); 346 } else { 347 irqentry_state_t state = irqentry_nmi_enter(regs); 348 349 do_trap_error(regs, SIGILL, ILL_ILLTRP, regs->epc, 350 "Oops - environment call from U-mode"); 351 352 irqentry_nmi_exit(regs, state); 353 } 354 355 } 356 357 #define CFI_TVAL_FCFI_CODE 2 358 #define CFI_TVAL_BCFI_CODE 3 359 /* handle cfi violations */ 360 bool handle_user_cfi_violation(struct pt_regs *regs) 361 { 362 unsigned long tval = csr_read(CSR_TVAL); 363 bool is_fcfi = (tval == CFI_TVAL_FCFI_CODE && cpu_supports_indirect_br_lp_instr()); 364 bool is_bcfi = (tval == CFI_TVAL_BCFI_CODE && cpu_supports_shadow_stack()); 365 366 /* 367 * Handle uprobe event first. The probe point can be a valid target 368 * of indirect jumps or calls, in this case, forward cfi violation 369 * will be triggered instead of breakpoint exception. Clear ELP flag 370 * on sstatus image as well to avoid recurring fault. 371 */ 372 if (is_fcfi && probe_breakpoint_handler(regs)) { 373 regs->status &= ~SR_ELP; 374 return true; 375 } 376 377 if (is_fcfi || is_bcfi) { 378 do_trap_error(regs, SIGSEGV, SEGV_CPERR, regs->epc, 379 "Oops - control flow violation"); 380 return true; 381 } 382 383 return false; 384 } 385 386 /* 387 * software check exception is defined with risc-v cfi spec. Software check 388 * exception is raised when: 389 * a) An indirect branch doesn't land on 4 byte aligned PC or `lpad` 390 * instruction or `label` value programmed in `lpad` instr doesn't 391 * match with value setup in `x7`. reported code in `xtval` is 2. 392 * b) `sspopchk` instruction finds a mismatch between top of shadow stack (ssp) 393 * and x1/x5. reported code in `xtval` is 3. 394 */ 395 asmlinkage __visible __trap_section void do_trap_software_check(struct pt_regs *regs) 396 { 397 if (user_mode(regs)) { 398 irqentry_enter_from_user_mode(regs); 399 400 /* not a cfi violation, then merge into flow of unknown trap handler */ 401 if (!handle_user_cfi_violation(regs)) 402 do_trap_unknown(regs); 403 404 irqentry_exit_to_user_mode(regs); 405 } else { 406 /* sw check exception coming from kernel is a bug in kernel */ 407 die(regs, "Kernel BUG"); 408 } 409 } 410 411 #ifdef CONFIG_MMU 412 asmlinkage __visible noinstr void do_page_fault(struct pt_regs *regs) 413 { 414 irqentry_state_t state = irqentry_enter(regs); 415 416 handle_page_fault(regs); 417 418 local_irq_disable(); 419 420 irqentry_exit(regs, state); 421 } 422 #endif 423 424 static void noinstr handle_riscv_irq(struct pt_regs *regs) 425 { 426 struct pt_regs *old_regs; 427 428 irq_enter_rcu(); 429 old_regs = set_irq_regs(regs); 430 handle_arch_irq(regs); 431 set_irq_regs(old_regs); 432 irq_exit_rcu(); 433 } 434 435 asmlinkage void noinstr do_irq(struct pt_regs *regs) 436 { 437 irqentry_state_t state = irqentry_enter(regs); 438 439 if (IS_ENABLED(CONFIG_IRQ_STACKS) && on_thread_stack()) 440 call_on_irq_stack(regs, handle_riscv_irq); 441 else 442 handle_riscv_irq(regs); 443 444 irqentry_exit(regs, state); 445 } 446 447 #ifdef CONFIG_GENERIC_BUG 448 int is_valid_bugaddr(unsigned long pc) 449 { 450 bug_insn_t insn; 451 452 if (pc < VMALLOC_START) 453 return 0; 454 if (get_kernel_nofault(insn, (bug_insn_t *)pc)) 455 return 0; 456 if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32) 457 return (insn == __BUG_INSN_32); 458 else 459 return ((insn & __COMPRESSED_INSN_MASK) == __BUG_INSN_16); 460 } 461 #endif /* CONFIG_GENERIC_BUG */ 462 463 #ifdef CONFIG_VMAP_STACK 464 DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], 465 overflow_stack)__aligned(16); 466 467 asmlinkage void handle_bad_stack(struct pt_regs *regs) 468 { 469 unsigned long tsk_stk = (unsigned long)current->stack; 470 unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack); 471 472 console_verbose(); 473 474 pr_emerg("Insufficient stack space to handle exception!\n"); 475 pr_emerg("Task stack: [0x%016lx..0x%016lx]\n", 476 tsk_stk, tsk_stk + THREAD_SIZE); 477 pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n", 478 ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE); 479 480 __show_regs(regs); 481 panic("Kernel stack overflow"); 482 483 for (;;) 484 wait_for_interrupt(); 485 } 486 #endif 487