1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2012 Regents of the University of California 4 */ 5 6 #include <linux/cpu.h> 7 #include <linux/kernel.h> 8 #include <linux/init.h> 9 #include <linux/sched.h> 10 #include <linux/sched/debug.h> 11 #include <linux/sched/signal.h> 12 #include <linux/signal.h> 13 #include <linux/kdebug.h> 14 #include <linux/uaccess.h> 15 #include <linux/kprobes.h> 16 #include <linux/uprobes.h> 17 #include <asm/uprobes.h> 18 #include <linux/mm.h> 19 #include <linux/module.h> 20 #include <linux/irq.h> 21 #include <linux/kexec.h> 22 #include <linux/entry-common.h> 23 24 #include <asm/asm-prototypes.h> 25 #include <asm/bug.h> 26 #include <asm/cfi.h> 27 #include <asm/csr.h> 28 #include <asm/processor.h> 29 #include <asm/ptrace.h> 30 #include <asm/syscall.h> 31 #include <asm/thread_info.h> 32 #include <asm/vector.h> 33 #include <asm/irq_stack.h> 34 35 int show_unhandled_signals = 1; 36 37 static DEFINE_SPINLOCK(die_lock); 38 39 static int copy_code(struct pt_regs *regs, u16 *val, const u16 *insns) 40 { 41 const void __user *uaddr = (__force const void __user *)insns; 42 43 if (!user_mode(regs)) 44 return get_kernel_nofault(*val, insns); 45 46 /* The user space code from other tasks cannot be accessed. */ 47 if (regs != task_pt_regs(current)) 48 return -EPERM; 49 50 return copy_from_user_nofault(val, uaddr, sizeof(*val)); 51 } 52 53 static void dump_instr(const char *loglvl, struct pt_regs *regs) 54 { 55 char str[sizeof("0000 ") * 12 + 2 + 1], *p = str; 56 const u16 *insns = (u16 *)instruction_pointer(regs); 57 long bad; 58 u16 val; 59 int i; 60 61 for (i = -10; i < 2; i++) { 62 bad = copy_code(regs, &val, &insns[i]); 63 if (!bad) { 64 p += sprintf(p, i == 0 ? "(%04hx) " : "%04hx ", val); 65 } else { 66 printk("%sCode: Unable to access instruction at 0x%px.\n", 67 loglvl, &insns[i]); 68 return; 69 } 70 } 71 printk("%sCode: %s\n", loglvl, str); 72 } 73 74 void die(struct pt_regs *regs, const char *str) 75 { 76 static int die_counter; 77 int ret; 78 long cause; 79 unsigned long flags; 80 81 oops_enter(); 82 83 spin_lock_irqsave(&die_lock, flags); 84 console_verbose(); 85 bust_spinlocks(1); 86 87 pr_emerg("%s [#%d]\n", str, ++die_counter); 88 print_modules(); 89 if (regs) { 90 show_regs(regs); 91 dump_instr(KERN_EMERG, regs); 92 } 93 94 cause = regs ? regs->cause : -1; 95 ret = notify_die(DIE_OOPS, str, regs, 0, cause, SIGSEGV); 96 97 if (kexec_should_crash(current)) 98 crash_kexec(regs); 99 100 bust_spinlocks(0); 101 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 102 spin_unlock_irqrestore(&die_lock, flags); 103 oops_exit(); 104 105 if (in_interrupt()) 106 panic("Fatal exception in interrupt"); 107 if (panic_on_oops) 108 panic("Fatal exception"); 109 if (ret != NOTIFY_STOP) 110 make_task_dead(SIGSEGV); 111 } 112 113 void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr) 114 { 115 struct task_struct *tsk = current; 116 117 if (show_unhandled_signals && unhandled_signal(tsk, signo) 118 && printk_ratelimit()) { 119 pr_info("%s[%d]: unhandled signal %d code 0x%x at 0x" REG_FMT, 120 tsk->comm, task_pid_nr(tsk), signo, code, addr); 121 print_vma_addr(KERN_CONT " in ", instruction_pointer(regs)); 122 pr_cont("\n"); 123 __show_regs(regs); 124 dump_instr(KERN_EMERG, regs); 125 } 126 127 force_sig_fault(signo, code, (void __user *)addr); 128 } 129 130 static void do_trap_error(struct pt_regs *regs, int signo, int code, 131 unsigned long addr, const char *str) 132 { 133 current->thread.bad_cause = regs->cause; 134 135 if (user_mode(regs)) { 136 do_trap(regs, signo, code, addr); 137 } else { 138 if (!fixup_exception(regs)) 139 die(regs, str); 140 } 141 } 142 143 #if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_RISCV_ALTERNATIVE) 144 #define __trap_section __noinstr_section(".xip.traps") 145 #else 146 #define __trap_section noinstr 147 #endif 148 #define DO_ERROR_INFO(name, signo, code, str) \ 149 asmlinkage __visible __trap_section void name(struct pt_regs *regs) \ 150 { \ 151 if (user_mode(regs)) { \ 152 irqentry_enter_from_user_mode(regs); \ 153 do_trap_error(regs, signo, code, regs->epc, "Oops - " str); \ 154 irqentry_exit_to_user_mode(regs); \ 155 } else { \ 156 irqentry_state_t state = irqentry_nmi_enter(regs); \ 157 do_trap_error(regs, signo, code, regs->epc, "Oops - " str); \ 158 irqentry_nmi_exit(regs, state); \ 159 } \ 160 } 161 162 DO_ERROR_INFO(do_trap_unknown, 163 SIGILL, ILL_ILLTRP, "unknown exception"); 164 DO_ERROR_INFO(do_trap_insn_misaligned, 165 SIGBUS, BUS_ADRALN, "instruction address misaligned"); 166 DO_ERROR_INFO(do_trap_insn_fault, 167 SIGSEGV, SEGV_ACCERR, "instruction access fault"); 168 169 asmlinkage __visible __trap_section void do_trap_insn_illegal(struct pt_regs *regs) 170 { 171 bool handled; 172 173 if (user_mode(regs)) { 174 irqentry_enter_from_user_mode(regs); 175 176 local_irq_enable(); 177 178 handled = riscv_v_first_use_handler(regs); 179 180 local_irq_disable(); 181 182 if (!handled) 183 do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->epc, 184 "Oops - illegal instruction"); 185 186 irqentry_exit_to_user_mode(regs); 187 } else { 188 irqentry_state_t state = irqentry_nmi_enter(regs); 189 190 do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->epc, 191 "Oops - illegal instruction"); 192 193 irqentry_nmi_exit(regs, state); 194 } 195 } 196 197 DO_ERROR_INFO(do_trap_load_fault, 198 SIGSEGV, SEGV_ACCERR, "load access fault"); 199 200 asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs) 201 { 202 if (user_mode(regs)) { 203 irqentry_enter_from_user_mode(regs); 204 205 if (handle_misaligned_load(regs)) 206 do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc, 207 "Oops - load address misaligned"); 208 209 irqentry_exit_to_user_mode(regs); 210 } else { 211 irqentry_state_t state = irqentry_nmi_enter(regs); 212 213 if (handle_misaligned_load(regs)) 214 do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc, 215 "Oops - load address misaligned"); 216 217 irqentry_nmi_exit(regs, state); 218 } 219 } 220 221 asmlinkage __visible __trap_section void do_trap_store_misaligned(struct pt_regs *regs) 222 { 223 if (user_mode(regs)) { 224 irqentry_enter_from_user_mode(regs); 225 226 if (handle_misaligned_store(regs)) 227 do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc, 228 "Oops - store (or AMO) address misaligned"); 229 230 irqentry_exit_to_user_mode(regs); 231 } else { 232 irqentry_state_t state = irqentry_nmi_enter(regs); 233 234 if (handle_misaligned_store(regs)) 235 do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc, 236 "Oops - store (or AMO) address misaligned"); 237 238 irqentry_nmi_exit(regs, state); 239 } 240 } 241 DO_ERROR_INFO(do_trap_store_fault, 242 SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault"); 243 DO_ERROR_INFO(do_trap_ecall_s, 244 SIGILL, ILL_ILLTRP, "environment call from S-mode"); 245 DO_ERROR_INFO(do_trap_ecall_m, 246 SIGILL, ILL_ILLTRP, "environment call from M-mode"); 247 248 static inline unsigned long get_break_insn_length(unsigned long pc) 249 { 250 bug_insn_t insn; 251 252 if (get_kernel_nofault(insn, (bug_insn_t *)pc)) 253 return 0; 254 255 return GET_INSN_LENGTH(insn); 256 } 257 258 static bool probe_single_step_handler(struct pt_regs *regs) 259 { 260 bool user = user_mode(regs); 261 262 return user ? uprobe_single_step_handler(regs) : kprobe_single_step_handler(regs); 263 } 264 265 static bool probe_breakpoint_handler(struct pt_regs *regs) 266 { 267 bool user = user_mode(regs); 268 269 return user ? uprobe_breakpoint_handler(regs) : kprobe_breakpoint_handler(regs); 270 } 271 272 void handle_break(struct pt_regs *regs) 273 { 274 if (probe_single_step_handler(regs)) 275 return; 276 277 if (probe_breakpoint_handler(regs)) 278 return; 279 280 current->thread.bad_cause = regs->cause; 281 282 if (user_mode(regs)) 283 force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->epc); 284 #ifdef CONFIG_KGDB 285 else if (notify_die(DIE_TRAP, "EBREAK", regs, 0, regs->cause, SIGTRAP) 286 == NOTIFY_STOP) 287 return; 288 #endif 289 else if (report_bug(regs->epc, regs) == BUG_TRAP_TYPE_WARN || 290 handle_cfi_failure(regs) == BUG_TRAP_TYPE_WARN) 291 regs->epc += get_break_insn_length(regs->epc); 292 else 293 die(regs, "Kernel BUG"); 294 } 295 296 asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs) 297 { 298 if (user_mode(regs)) { 299 irqentry_enter_from_user_mode(regs); 300 301 handle_break(regs); 302 303 irqentry_exit_to_user_mode(regs); 304 } else { 305 irqentry_state_t state = irqentry_nmi_enter(regs); 306 307 handle_break(regs); 308 309 irqentry_nmi_exit(regs, state); 310 } 311 } 312 313 asmlinkage __visible __trap_section void do_trap_ecall_u(struct pt_regs *regs) 314 { 315 if (user_mode(regs)) { 316 long syscall = regs->a7; 317 318 regs->epc += 4; 319 regs->orig_a0 = regs->a0; 320 321 riscv_v_vstate_discard(regs); 322 323 syscall = syscall_enter_from_user_mode(regs, syscall); 324 325 if (syscall >= 0 && syscall < NR_syscalls) 326 syscall_handler(regs, syscall); 327 else if (syscall != -1) 328 regs->a0 = -ENOSYS; 329 330 syscall_exit_to_user_mode(regs); 331 } else { 332 irqentry_state_t state = irqentry_nmi_enter(regs); 333 334 do_trap_error(regs, SIGILL, ILL_ILLTRP, regs->epc, 335 "Oops - environment call from U-mode"); 336 337 irqentry_nmi_exit(regs, state); 338 } 339 340 } 341 342 #ifdef CONFIG_MMU 343 asmlinkage __visible noinstr void do_page_fault(struct pt_regs *regs) 344 { 345 irqentry_state_t state = irqentry_enter(regs); 346 347 handle_page_fault(regs); 348 349 local_irq_disable(); 350 351 irqentry_exit(regs, state); 352 } 353 #endif 354 355 static void noinstr handle_riscv_irq(struct pt_regs *regs) 356 { 357 struct pt_regs *old_regs; 358 359 irq_enter_rcu(); 360 old_regs = set_irq_regs(regs); 361 handle_arch_irq(regs); 362 set_irq_regs(old_regs); 363 irq_exit_rcu(); 364 } 365 366 asmlinkage void noinstr do_irq(struct pt_regs *regs) 367 { 368 irqentry_state_t state = irqentry_enter(regs); 369 370 if (IS_ENABLED(CONFIG_IRQ_STACKS) && on_thread_stack()) 371 call_on_irq_stack(regs, handle_riscv_irq); 372 else 373 handle_riscv_irq(regs); 374 375 irqentry_exit(regs, state); 376 } 377 378 #ifdef CONFIG_GENERIC_BUG 379 int is_valid_bugaddr(unsigned long pc) 380 { 381 bug_insn_t insn; 382 383 if (pc < VMALLOC_START) 384 return 0; 385 if (get_kernel_nofault(insn, (bug_insn_t *)pc)) 386 return 0; 387 if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32) 388 return (insn == __BUG_INSN_32); 389 else 390 return ((insn & __COMPRESSED_INSN_MASK) == __BUG_INSN_16); 391 } 392 #endif /* CONFIG_GENERIC_BUG */ 393 394 #ifdef CONFIG_VMAP_STACK 395 DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], 396 overflow_stack)__aligned(16); 397 398 asmlinkage void handle_bad_stack(struct pt_regs *regs) 399 { 400 unsigned long tsk_stk = (unsigned long)current->stack; 401 unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack); 402 403 console_verbose(); 404 405 pr_emerg("Insufficient stack space to handle exception!\n"); 406 pr_emerg("Task stack: [0x%016lx..0x%016lx]\n", 407 tsk_stk, tsk_stk + THREAD_SIZE); 408 pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n", 409 ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE); 410 411 __show_regs(regs); 412 panic("Kernel stack overflow"); 413 414 for (;;) 415 wait_for_interrupt(); 416 } 417 #endif 418