1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2012 Regents of the University of California 4 */ 5 6 #include <linux/cpu.h> 7 #include <linux/kernel.h> 8 #include <linux/init.h> 9 #include <linux/irqflags.h> 10 #include <linux/randomize_kstack.h> 11 #include <linux/sched.h> 12 #include <linux/sched/debug.h> 13 #include <linux/sched/signal.h> 14 #include <linux/signal.h> 15 #include <linux/kdebug.h> 16 #include <linux/uaccess.h> 17 #include <linux/kprobes.h> 18 #include <linux/uprobes.h> 19 #include <asm/uprobes.h> 20 #include <linux/mm.h> 21 #include <linux/module.h> 22 #include <linux/irq.h> 23 #include <linux/kexec.h> 24 #include <linux/entry-common.h> 25 26 #include <asm/asm-prototypes.h> 27 #include <asm/bug.h> 28 #include <asm/cfi.h> 29 #include <asm/csr.h> 30 #include <asm/processor.h> 31 #include <asm/ptrace.h> 32 #include <asm/syscall.h> 33 #include <asm/thread_info.h> 34 #include <asm/vector.h> 35 #include <asm/irq_stack.h> 36 37 int show_unhandled_signals = 1; 38 39 static DEFINE_RAW_SPINLOCK(die_lock); 40 41 static int copy_code(struct pt_regs *regs, u16 *val, const u16 *insns) 42 { 43 const void __user *uaddr = (__force const void __user *)insns; 44 45 if (!user_mode(regs)) 46 return get_kernel_nofault(*val, insns); 47 48 /* The user space code from other tasks cannot be accessed. */ 49 if (regs != task_pt_regs(current)) 50 return -EPERM; 51 52 return copy_from_user_nofault(val, uaddr, sizeof(*val)); 53 } 54 55 static void dump_instr(const char *loglvl, struct pt_regs *regs) 56 { 57 char str[sizeof("0000 ") * 12 + 2 + 1], *p = str; 58 const u16 *insns = (u16 *)instruction_pointer(regs); 59 long bad; 60 u16 val; 61 int i; 62 63 for (i = -10; i < 2; i++) { 64 bad = copy_code(regs, &val, &insns[i]); 65 if (!bad) { 66 p += sprintf(p, i == 0 ? "(%04hx) " : "%04hx ", val); 67 } else { 68 printk("%sCode: Unable to access instruction at 0x%px.\n", 69 loglvl, &insns[i]); 70 return; 71 } 72 } 73 printk("%sCode: %s\n", loglvl, str); 74 } 75 76 void die(struct pt_regs *regs, const char *str) 77 { 78 static int die_counter; 79 int ret; 80 long cause; 81 unsigned long flags; 82 83 oops_enter(); 84 85 raw_spin_lock_irqsave(&die_lock, flags); 86 console_verbose(); 87 bust_spinlocks(1); 88 89 pr_emerg("%s [#%d]\n", str, ++die_counter); 90 print_modules(); 91 if (regs) { 92 show_regs(regs); 93 dump_instr(KERN_EMERG, regs); 94 } 95 96 cause = regs ? regs->cause : -1; 97 ret = notify_die(DIE_OOPS, str, regs, 0, cause, SIGSEGV); 98 99 if (kexec_should_crash(current)) 100 crash_kexec(regs); 101 102 bust_spinlocks(0); 103 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 104 raw_spin_unlock_irqrestore(&die_lock, flags); 105 oops_exit(); 106 107 if (in_interrupt()) 108 panic("Fatal exception in interrupt"); 109 if (panic_on_oops) 110 panic("Fatal exception"); 111 if (ret != NOTIFY_STOP) 112 make_task_dead(SIGSEGV); 113 } 114 115 void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr) 116 { 117 struct task_struct *tsk = current; 118 119 if (show_unhandled_signals && unhandled_signal(tsk, signo) 120 && printk_ratelimit()) { 121 pr_info("%s[%d]: unhandled signal %d code 0x%x at 0x" REG_FMT, 122 tsk->comm, task_pid_nr(tsk), signo, code, addr); 123 print_vma_addr(KERN_CONT " in ", instruction_pointer(regs)); 124 pr_cont("\n"); 125 __show_regs(regs); 126 dump_instr(KERN_INFO, regs); 127 } 128 129 force_sig_fault(signo, code, (void __user *)addr); 130 } 131 132 static void do_trap_error(struct pt_regs *regs, int signo, int code, 133 unsigned long addr, const char *str) 134 { 135 current->thread.bad_cause = regs->cause; 136 137 if (user_mode(regs)) { 138 do_trap(regs, signo, code, addr); 139 } else { 140 if (!fixup_exception(regs)) 141 die(regs, str); 142 } 143 } 144 145 #if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_RISCV_ALTERNATIVE) 146 #define __trap_section __noinstr_section(".xip.traps") 147 #else 148 #define __trap_section noinstr 149 #endif 150 #define DO_ERROR_INFO(name, signo, code, str) \ 151 asmlinkage __visible __trap_section void name(struct pt_regs *regs) \ 152 { \ 153 if (user_mode(regs)) { \ 154 irqentry_enter_from_user_mode(regs); \ 155 local_irq_enable(); \ 156 do_trap_error(regs, signo, code, regs->epc, "Oops - " str); \ 157 local_irq_disable(); \ 158 irqentry_exit_to_user_mode(regs); \ 159 } else { \ 160 irqentry_state_t state = irqentry_nmi_enter(regs); \ 161 do_trap_error(regs, signo, code, regs->epc, "Oops - " str); \ 162 irqentry_nmi_exit(regs, state); \ 163 } \ 164 } 165 166 DO_ERROR_INFO(do_trap_unknown, 167 SIGILL, ILL_ILLTRP, "unknown exception"); 168 DO_ERROR_INFO(do_trap_insn_misaligned, 169 SIGBUS, BUS_ADRALN, "instruction address misaligned"); 170 DO_ERROR_INFO(do_trap_insn_fault, 171 SIGSEGV, SEGV_ACCERR, "instruction access fault"); 172 173 asmlinkage __visible __trap_section void do_trap_insn_illegal(struct pt_regs *regs) 174 { 175 bool handled; 176 177 if (user_mode(regs)) { 178 irqentry_enter_from_user_mode(regs); 179 local_irq_enable(); 180 181 handled = riscv_v_first_use_handler(regs); 182 if (!handled) 183 do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->epc, 184 "Oops - illegal instruction"); 185 186 local_irq_disable(); 187 irqentry_exit_to_user_mode(regs); 188 } else { 189 irqentry_state_t state = irqentry_nmi_enter(regs); 190 191 do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->epc, 192 "Oops - illegal instruction"); 193 194 irqentry_nmi_exit(regs, state); 195 } 196 } 197 198 DO_ERROR_INFO(do_trap_load_fault, 199 SIGSEGV, SEGV_ACCERR, "load access fault"); 200 201 enum misaligned_access_type { 202 MISALIGNED_STORE, 203 MISALIGNED_LOAD, 204 }; 205 static const struct { 206 const char *type_str; 207 int (*handler)(struct pt_regs *regs); 208 } misaligned_handler[] = { 209 [MISALIGNED_STORE] = { 210 .type_str = "Oops - store (or AMO) address misaligned", 211 .handler = handle_misaligned_store, 212 }, 213 [MISALIGNED_LOAD] = { 214 .type_str = "Oops - load address misaligned", 215 .handler = handle_misaligned_load, 216 }, 217 }; 218 219 static void do_trap_misaligned(struct pt_regs *regs, enum misaligned_access_type type) 220 { 221 irqentry_state_t state; 222 223 if (user_mode(regs)) { 224 irqentry_enter_from_user_mode(regs); 225 local_irq_enable(); 226 } else { 227 state = irqentry_nmi_enter(regs); 228 } 229 230 if (misaligned_handler[type].handler(regs)) 231 do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc, 232 misaligned_handler[type].type_str); 233 234 if (user_mode(regs)) { 235 local_irq_disable(); 236 irqentry_exit_to_user_mode(regs); 237 } else { 238 irqentry_nmi_exit(regs, state); 239 } 240 } 241 242 asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs) 243 { 244 do_trap_misaligned(regs, MISALIGNED_LOAD); 245 } 246 247 asmlinkage __visible __trap_section void do_trap_store_misaligned(struct pt_regs *regs) 248 { 249 do_trap_misaligned(regs, MISALIGNED_STORE); 250 } 251 252 DO_ERROR_INFO(do_trap_store_fault, 253 SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault"); 254 DO_ERROR_INFO(do_trap_ecall_s, 255 SIGILL, ILL_ILLTRP, "environment call from S-mode"); 256 DO_ERROR_INFO(do_trap_ecall_m, 257 SIGILL, ILL_ILLTRP, "environment call from M-mode"); 258 259 static inline unsigned long get_break_insn_length(unsigned long pc) 260 { 261 bug_insn_t insn; 262 263 if (get_kernel_nofault(insn, (bug_insn_t *)pc)) 264 return 0; 265 266 return GET_INSN_LENGTH(insn); 267 } 268 269 static bool probe_single_step_handler(struct pt_regs *regs) 270 { 271 bool user = user_mode(regs); 272 273 return user ? uprobe_single_step_handler(regs) : kprobe_single_step_handler(regs); 274 } 275 276 static bool probe_breakpoint_handler(struct pt_regs *regs) 277 { 278 bool user = user_mode(regs); 279 280 return user ? uprobe_breakpoint_handler(regs) : kprobe_breakpoint_handler(regs); 281 } 282 283 void handle_break(struct pt_regs *regs) 284 { 285 if (probe_single_step_handler(regs)) 286 return; 287 288 if (probe_breakpoint_handler(regs)) 289 return; 290 291 current->thread.bad_cause = regs->cause; 292 293 if (user_mode(regs)) 294 force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->epc); 295 #ifdef CONFIG_KGDB 296 else if (notify_die(DIE_TRAP, "EBREAK", regs, 0, regs->cause, SIGTRAP) 297 == NOTIFY_STOP) 298 return; 299 #endif 300 else if (report_bug(regs->epc, regs) == BUG_TRAP_TYPE_WARN || 301 handle_cfi_failure(regs) == BUG_TRAP_TYPE_WARN) 302 regs->epc += get_break_insn_length(regs->epc); 303 else 304 die(regs, "Kernel BUG"); 305 } 306 307 asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs) 308 { 309 if (user_mode(regs)) { 310 irqentry_enter_from_user_mode(regs); 311 local_irq_enable(); 312 313 handle_break(regs); 314 315 local_irq_disable(); 316 irqentry_exit_to_user_mode(regs); 317 } else { 318 irqentry_state_t state = irqentry_nmi_enter(regs); 319 320 handle_break(regs); 321 322 irqentry_nmi_exit(regs, state); 323 } 324 } 325 326 asmlinkage __visible __trap_section __no_stack_protector 327 void do_trap_ecall_u(struct pt_regs *regs) 328 { 329 if (user_mode(regs)) { 330 long syscall = regs->a7; 331 332 regs->epc += 4; 333 regs->orig_a0 = regs->a0; 334 regs->a0 = -ENOSYS; 335 336 riscv_v_vstate_discard(regs); 337 338 syscall = syscall_enter_from_user_mode(regs, syscall); 339 340 add_random_kstack_offset(); 341 342 if (syscall >= 0 && syscall < NR_syscalls) { 343 syscall = array_index_nospec(syscall, NR_syscalls); 344 syscall_handler(regs, syscall); 345 } 346 347 /* 348 * Ultimately, this value will get limited by KSTACK_OFFSET_MAX(), 349 * so the maximum stack offset is 1k bytes (10 bits). 350 * 351 * The actual entropy will be further reduced by the compiler when 352 * applying stack alignment constraints: 16-byte (i.e. 4-bit) aligned 353 * for RV32I or RV64I. 354 * 355 * The resulting 6 bits of entropy is seen in SP[9:4]. 356 */ 357 choose_random_kstack_offset(get_random_u16()); 358 359 syscall_exit_to_user_mode(regs); 360 } else { 361 irqentry_state_t state = irqentry_nmi_enter(regs); 362 363 do_trap_error(regs, SIGILL, ILL_ILLTRP, regs->epc, 364 "Oops - environment call from U-mode"); 365 366 irqentry_nmi_exit(regs, state); 367 } 368 369 } 370 371 #define CFI_TVAL_FCFI_CODE 2 372 #define CFI_TVAL_BCFI_CODE 3 373 /* handle cfi violations */ 374 bool handle_user_cfi_violation(struct pt_regs *regs) 375 { 376 unsigned long tval = csr_read(CSR_TVAL); 377 bool is_fcfi = (tval == CFI_TVAL_FCFI_CODE && cpu_supports_indirect_br_lp_instr()); 378 bool is_bcfi = (tval == CFI_TVAL_BCFI_CODE && cpu_supports_shadow_stack()); 379 380 /* 381 * Handle uprobe event first. The probe point can be a valid target 382 * of indirect jumps or calls, in this case, forward cfi violation 383 * will be triggered instead of breakpoint exception. Clear ELP flag 384 * on sstatus image as well to avoid recurring fault. 385 */ 386 if (is_fcfi && probe_breakpoint_handler(regs)) { 387 regs->status &= ~SR_ELP; 388 return true; 389 } 390 391 if (is_fcfi || is_bcfi) { 392 do_trap_error(regs, SIGSEGV, SEGV_CPERR, regs->epc, 393 "Oops - control flow violation"); 394 return true; 395 } 396 397 return false; 398 } 399 400 /* 401 * software check exception is defined with risc-v cfi spec. Software check 402 * exception is raised when: 403 * a) An indirect branch doesn't land on 4 byte aligned PC or `lpad` 404 * instruction or `label` value programmed in `lpad` instr doesn't 405 * match with value setup in `x7`. reported code in `xtval` is 2. 406 * b) `sspopchk` instruction finds a mismatch between top of shadow stack (ssp) 407 * and x1/x5. reported code in `xtval` is 3. 408 */ 409 asmlinkage __visible __trap_section void do_trap_software_check(struct pt_regs *regs) 410 { 411 if (user_mode(regs)) { 412 irqentry_enter_from_user_mode(regs); 413 414 /* not a cfi violation, then merge into flow of unknown trap handler */ 415 if (!handle_user_cfi_violation(regs)) 416 do_trap_unknown(regs); 417 418 irqentry_exit_to_user_mode(regs); 419 } else { 420 /* sw check exception coming from kernel is a bug in kernel */ 421 die(regs, "Kernel BUG"); 422 } 423 } 424 425 #ifdef CONFIG_MMU 426 asmlinkage __visible noinstr void do_page_fault(struct pt_regs *regs) 427 { 428 irqentry_state_t state = irqentry_enter(regs); 429 430 handle_page_fault(regs); 431 432 local_irq_disable(); 433 434 irqentry_exit(regs, state); 435 } 436 #endif 437 438 static void noinstr handle_riscv_irq(struct pt_regs *regs) 439 { 440 struct pt_regs *old_regs; 441 442 irq_enter_rcu(); 443 old_regs = set_irq_regs(regs); 444 handle_arch_irq(regs); 445 set_irq_regs(old_regs); 446 irq_exit_rcu(); 447 } 448 449 asmlinkage void noinstr do_irq(struct pt_regs *regs) 450 { 451 irqentry_state_t state = irqentry_enter(regs); 452 453 if (IS_ENABLED(CONFIG_IRQ_STACKS) && on_thread_stack()) 454 call_on_irq_stack(regs, handle_riscv_irq); 455 else 456 handle_riscv_irq(regs); 457 458 irqentry_exit(regs, state); 459 } 460 461 #ifdef CONFIG_GENERIC_BUG 462 int is_valid_bugaddr(unsigned long pc) 463 { 464 bug_insn_t insn; 465 466 if (pc < VMALLOC_START) 467 return 0; 468 if (get_kernel_nofault(insn, (bug_insn_t *)pc)) 469 return 0; 470 if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32) 471 return (insn == __BUG_INSN_32); 472 else 473 return ((insn & __COMPRESSED_INSN_MASK) == __BUG_INSN_16); 474 } 475 #endif /* CONFIG_GENERIC_BUG */ 476 477 #ifdef CONFIG_VMAP_STACK 478 DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], 479 overflow_stack)__aligned(16); 480 481 asmlinkage void handle_bad_stack(struct pt_regs *regs) 482 { 483 unsigned long tsk_stk = (unsigned long)current->stack; 484 unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack); 485 486 console_verbose(); 487 488 pr_emerg("Insufficient stack space to handle exception!\n"); 489 pr_emerg("Task stack: [0x%016lx..0x%016lx]\n", 490 tsk_stk, tsk_stk + THREAD_SIZE); 491 pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n", 492 ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE); 493 494 __show_regs(regs); 495 panic("Kernel stack overflow"); 496 497 for (;;) 498 wait_for_interrupt(); 499 } 500 #endif 501