1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * S390 version 4 * Copyright IBM Corp. 1999 5 * Author(s): Hartmut Penner (hp@de.ibm.com) 6 * Ulrich Weigand (uweigand@de.ibm.com) 7 * 8 * Derived from "arch/i386/mm/fault.c" 9 * Copyright (C) 1995 Linus Torvalds 10 */ 11 12 #include <linux/kernel_stat.h> 13 #include <linux/mmu_context.h> 14 #include <linux/cpufeature.h> 15 #include <linux/perf_event.h> 16 #include <linux/signal.h> 17 #include <linux/sched.h> 18 #include <linux/sched/debug.h> 19 #include <linux/kernel.h> 20 #include <linux/errno.h> 21 #include <linux/string.h> 22 #include <linux/types.h> 23 #include <linux/ptrace.h> 24 #include <linux/mman.h> 25 #include <linux/mm.h> 26 #include <linux/compat.h> 27 #include <linux/smp.h> 28 #include <linux/kdebug.h> 29 #include <linux/init.h> 30 #include <linux/console.h> 31 #include <linux/extable.h> 32 #include <linux/hardirq.h> 33 #include <linux/kprobes.h> 34 #include <linux/uaccess.h> 35 #include <linux/hugetlb.h> 36 #include <linux/kfence.h> 37 #include <linux/pagewalk.h> 38 #include <asm/asm-extable.h> 39 #include <asm/asm-offsets.h> 40 #include <asm/ptrace.h> 41 #include <asm/fault.h> 42 #include <asm/diag.h> 43 #include <asm/gmap.h> 44 #include <asm/irq.h> 45 #include <asm/facility.h> 46 #include <asm/uv.h> 47 #include "../kernel/entry.h" 48 49 /* 50 * Find out which address space caused the exception. 51 */ 52 static bool is_kernel_fault(struct pt_regs *regs) 53 { 54 union teid teid = { .val = regs->int_parm_long }; 55 56 if (user_mode(regs)) 57 return false; 58 if (teid.as == PSW_BITS_AS_SECONDARY) 59 return false; 60 return true; 61 } 62 63 static unsigned long get_fault_address(struct pt_regs *regs) 64 { 65 union teid teid = { .val = regs->int_parm_long }; 66 67 return teid.addr * PAGE_SIZE; 68 } 69 70 static __always_inline bool fault_is_write(struct pt_regs *regs) 71 { 72 union teid teid = { .val = regs->int_parm_long }; 73 74 if (test_facility(75)) 75 return teid.fsi == TEID_FSI_STORE; 76 return false; 77 } 78 79 static void dump_pagetable(unsigned long asce, unsigned long address) 80 { 81 unsigned long entry, *table = __va(asce & _ASCE_ORIGIN); 82 83 pr_alert("AS:%016lx ", asce); 84 switch (asce & _ASCE_TYPE_MASK) { 85 case _ASCE_TYPE_REGION1: 86 table += (address & _REGION1_INDEX) >> _REGION1_SHIFT; 87 if (get_kernel_nofault(entry, table)) 88 goto bad; 89 pr_cont("R1:%016lx ", entry); 90 if (entry & _REGION_ENTRY_INVALID) 91 goto out; 92 table = __va(entry & _REGION_ENTRY_ORIGIN); 93 fallthrough; 94 case _ASCE_TYPE_REGION2: 95 table += (address & _REGION2_INDEX) >> _REGION2_SHIFT; 96 if (get_kernel_nofault(entry, table)) 97 goto bad; 98 pr_cont("R2:%016lx ", entry); 99 if (entry & _REGION_ENTRY_INVALID) 100 goto out; 101 table = __va(entry & _REGION_ENTRY_ORIGIN); 102 fallthrough; 103 case _ASCE_TYPE_REGION3: 104 table += (address & _REGION3_INDEX) >> _REGION3_SHIFT; 105 if (get_kernel_nofault(entry, table)) 106 goto bad; 107 pr_cont("R3:%016lx ", entry); 108 if (entry & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE)) 109 goto out; 110 table = __va(entry & _REGION_ENTRY_ORIGIN); 111 fallthrough; 112 case _ASCE_TYPE_SEGMENT: 113 table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT; 114 if (get_kernel_nofault(entry, table)) 115 goto bad; 116 pr_cont("S:%016lx ", entry); 117 if (entry & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE)) 118 goto out; 119 table = __va(entry & _SEGMENT_ENTRY_ORIGIN); 120 } 121 table += (address & _PAGE_INDEX) >> PAGE_SHIFT; 122 if (get_kernel_nofault(entry, table)) 123 goto bad; 124 pr_cont("P:%016lx ", entry); 125 out: 126 pr_cont("\n"); 127 return; 128 bad: 129 pr_cont("BAD\n"); 130 } 131 132 static void dump_fault_info(struct pt_regs *regs) 133 { 134 union teid teid = { .val = regs->int_parm_long }; 135 unsigned long asce; 136 137 pr_alert("Failing address: %016lx TEID: %016lx\n", 138 get_fault_address(regs), teid.val); 139 pr_alert("Fault in "); 140 switch (teid.as) { 141 case PSW_BITS_AS_HOME: 142 pr_cont("home space "); 143 break; 144 case PSW_BITS_AS_SECONDARY: 145 pr_cont("secondary space "); 146 break; 147 case PSW_BITS_AS_ACCREG: 148 pr_cont("access register "); 149 break; 150 case PSW_BITS_AS_PRIMARY: 151 pr_cont("primary space "); 152 break; 153 } 154 pr_cont("mode while using "); 155 if (is_kernel_fault(regs)) { 156 asce = get_lowcore()->kernel_asce.val; 157 pr_cont("kernel "); 158 } else { 159 asce = get_lowcore()->user_asce.val; 160 pr_cont("user "); 161 } 162 pr_cont("ASCE.\n"); 163 dump_pagetable(asce, get_fault_address(regs)); 164 } 165 166 int show_unhandled_signals = 1; 167 168 static const struct ctl_table s390_fault_sysctl_table[] = { 169 { 170 .procname = "userprocess_debug", 171 .data = &show_unhandled_signals, 172 .maxlen = sizeof(int), 173 .mode = 0644, 174 .proc_handler = proc_dointvec, 175 }, 176 }; 177 178 static int __init init_s390_fault_sysctls(void) 179 { 180 register_sysctl_init("kernel", s390_fault_sysctl_table); 181 return 0; 182 } 183 arch_initcall(init_s390_fault_sysctls); 184 185 void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault) 186 { 187 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); 188 189 if ((task_pid_nr(current) > 1) && !show_unhandled_signals) 190 return; 191 if (!unhandled_signal(current, signr)) 192 return; 193 if (!__ratelimit(&rs)) 194 return; 195 pr_alert("User process fault: interruption code %04x ilc:%d ", 196 regs->int_code & 0xffff, regs->int_code >> 17); 197 print_vma_addr(KERN_CONT "in ", regs->psw.addr); 198 pr_cont("\n"); 199 if (is_mm_fault) 200 dump_fault_info(regs); 201 show_regs(regs); 202 } 203 204 static void do_sigsegv(struct pt_regs *regs, int si_code) 205 { 206 report_user_fault(regs, SIGSEGV, 1); 207 force_sig_fault(SIGSEGV, si_code, (void __user *)get_fault_address(regs)); 208 } 209 210 static void handle_fault_error_nolock(struct pt_regs *regs, int si_code) 211 { 212 unsigned long address; 213 bool is_write; 214 215 if (user_mode(regs)) { 216 if (WARN_ON_ONCE(!si_code)) 217 si_code = SEGV_MAPERR; 218 return do_sigsegv(regs, si_code); 219 } 220 if (fixup_exception(regs)) 221 return; 222 if (is_kernel_fault(regs)) { 223 address = get_fault_address(regs); 224 is_write = fault_is_write(regs); 225 if (kfence_handle_page_fault(address, is_write, regs)) 226 return; 227 pr_alert("Unable to handle kernel pointer dereference in virtual kernel address space\n"); 228 } else { 229 pr_alert("Unable to handle kernel paging request in virtual user address space\n"); 230 } 231 dump_fault_info(regs); 232 die(regs, "Oops"); 233 } 234 235 static void handle_fault_error(struct pt_regs *regs, int si_code) 236 { 237 struct mm_struct *mm = current->mm; 238 239 mmap_read_unlock(mm); 240 handle_fault_error_nolock(regs, si_code); 241 } 242 243 static void do_sigbus(struct pt_regs *regs) 244 { 245 force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)get_fault_address(regs)); 246 } 247 248 /* 249 * This routine handles page faults. It determines the address, 250 * and the problem, and then passes it off to one of the appropriate 251 * routines. 252 * 253 * interruption code (int_code): 254 * 04 Protection -> Write-Protection (suppression) 255 * 10 Segment translation -> Not present (nullification) 256 * 11 Page translation -> Not present (nullification) 257 * 3b Region third trans. -> Not present (nullification) 258 */ 259 static void do_exception(struct pt_regs *regs, int access) 260 { 261 struct vm_area_struct *vma; 262 unsigned long address; 263 struct mm_struct *mm; 264 unsigned int flags; 265 vm_fault_t fault; 266 bool is_write; 267 268 /* 269 * The instruction that caused the program check has 270 * been nullified. Don't signal single step via SIGTRAP. 271 */ 272 clear_thread_flag(TIF_PER_TRAP); 273 if (kprobe_page_fault(regs, 14)) 274 return; 275 mm = current->mm; 276 address = get_fault_address(regs); 277 is_write = fault_is_write(regs); 278 if (is_kernel_fault(regs) || faulthandler_disabled() || !mm) 279 return handle_fault_error_nolock(regs, 0); 280 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 281 flags = FAULT_FLAG_DEFAULT; 282 if (user_mode(regs)) 283 flags |= FAULT_FLAG_USER; 284 if (is_write) 285 access = VM_WRITE; 286 if (access == VM_WRITE) 287 flags |= FAULT_FLAG_WRITE; 288 if (!(flags & FAULT_FLAG_USER)) 289 goto lock_mmap; 290 vma = lock_vma_under_rcu(mm, address); 291 if (!vma) 292 goto lock_mmap; 293 if (!(vma->vm_flags & access)) { 294 vma_end_read(vma); 295 count_vm_vma_lock_event(VMA_LOCK_SUCCESS); 296 return handle_fault_error_nolock(regs, SEGV_ACCERR); 297 } 298 fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs); 299 if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED))) 300 vma_end_read(vma); 301 if (!(fault & VM_FAULT_RETRY)) { 302 count_vm_vma_lock_event(VMA_LOCK_SUCCESS); 303 goto done; 304 } 305 count_vm_vma_lock_event(VMA_LOCK_RETRY); 306 if (fault & VM_FAULT_MAJOR) 307 flags |= FAULT_FLAG_TRIED; 308 /* Quick path to respond to signals */ 309 if (fault_signal_pending(fault, regs)) { 310 if (!user_mode(regs)) 311 handle_fault_error_nolock(regs, 0); 312 return; 313 } 314 lock_mmap: 315 retry: 316 vma = lock_mm_and_find_vma(mm, address, regs); 317 if (!vma) 318 return handle_fault_error_nolock(regs, SEGV_MAPERR); 319 if (unlikely(!(vma->vm_flags & access))) 320 return handle_fault_error(regs, SEGV_ACCERR); 321 fault = handle_mm_fault(vma, address, flags, regs); 322 if (fault_signal_pending(fault, regs)) { 323 if (!user_mode(regs)) 324 handle_fault_error_nolock(regs, 0); 325 return; 326 } 327 /* The fault is fully completed (including releasing mmap lock) */ 328 if (fault & VM_FAULT_COMPLETED) 329 return; 330 if (fault & VM_FAULT_RETRY) { 331 flags |= FAULT_FLAG_TRIED; 332 goto retry; 333 } 334 mmap_read_unlock(mm); 335 done: 336 if (!(fault & VM_FAULT_ERROR)) 337 return; 338 if (fault & VM_FAULT_OOM) { 339 if (!user_mode(regs)) 340 handle_fault_error_nolock(regs, 0); 341 else 342 pagefault_out_of_memory(); 343 } else if (fault & VM_FAULT_SIGSEGV) { 344 if (!user_mode(regs)) 345 handle_fault_error_nolock(regs, 0); 346 else 347 do_sigsegv(regs, SEGV_MAPERR); 348 } else if (fault & (VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | 349 VM_FAULT_HWPOISON_LARGE)) { 350 if (!user_mode(regs)) 351 handle_fault_error_nolock(regs, 0); 352 else 353 do_sigbus(regs); 354 } else { 355 pr_emerg("Unexpected fault flags: %08x\n", fault); 356 BUG(); 357 } 358 } 359 360 void do_protection_exception(struct pt_regs *regs) 361 { 362 union teid teid = { .val = regs->int_parm_long }; 363 364 /* 365 * Protection exceptions are suppressing, decrement psw address. 366 * The exception to this rule are aborted transactions, for these 367 * the PSW already points to the correct location. 368 */ 369 if (!(regs->int_code & 0x200)) 370 regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16); 371 /* 372 * Check for low-address protection. This needs to be treated 373 * as a special case because the translation exception code 374 * field is not guaranteed to contain valid data in this case. 375 */ 376 if (unlikely(!teid.b61)) { 377 if (user_mode(regs)) { 378 /* Low-address protection in user mode: cannot happen */ 379 dump_fault_info(regs); 380 die(regs, "Low-address protection"); 381 } 382 /* 383 * Low-address protection in kernel mode means 384 * NULL pointer write access in kernel mode. 385 */ 386 return handle_fault_error_nolock(regs, 0); 387 } 388 if (unlikely(cpu_has_nx() && teid.b56)) { 389 regs->int_parm_long = (teid.addr * PAGE_SIZE) | (regs->psw.addr & PAGE_MASK); 390 return handle_fault_error_nolock(regs, SEGV_ACCERR); 391 } 392 do_exception(regs, VM_WRITE); 393 } 394 NOKPROBE_SYMBOL(do_protection_exception); 395 396 void do_dat_exception(struct pt_regs *regs) 397 { 398 do_exception(regs, VM_ACCESS_FLAGS); 399 } 400 NOKPROBE_SYMBOL(do_dat_exception); 401 402 #if IS_ENABLED(CONFIG_PGSTE) 403 404 void do_secure_storage_access(struct pt_regs *regs) 405 { 406 union teid teid = { .val = regs->int_parm_long }; 407 unsigned long addr = get_fault_address(regs); 408 struct vm_area_struct *vma; 409 struct folio_walk fw; 410 struct mm_struct *mm; 411 struct folio *folio; 412 int rc; 413 414 /* 415 * Bit 61 indicates if the address is valid, if it is not the 416 * kernel should be stopped or SIGSEGV should be sent to the 417 * process. Bit 61 is not reliable without the misc UV feature, 418 * therefore this needs to be checked too. 419 */ 420 if (uv_has_feature(BIT_UV_FEAT_MISC) && !teid.b61) { 421 /* 422 * When this happens, userspace did something that it 423 * was not supposed to do, e.g. branching into secure 424 * memory. Trigger a segmentation fault. 425 */ 426 if (user_mode(regs)) { 427 send_sig(SIGSEGV, current, 0); 428 return; 429 } 430 /* 431 * The kernel should never run into this case and 432 * there is no way out of this situation. 433 */ 434 panic("Unexpected PGM 0x3d with TEID bit 61=0"); 435 } 436 if (is_kernel_fault(regs)) { 437 folio = phys_to_folio(addr); 438 if (unlikely(!folio_try_get(folio))) 439 return; 440 rc = arch_make_folio_accessible(folio); 441 folio_put(folio); 442 if (rc) 443 BUG(); 444 } else { 445 mm = current->mm; 446 mmap_read_lock(mm); 447 vma = find_vma(mm, addr); 448 if (!vma) 449 return handle_fault_error(regs, SEGV_MAPERR); 450 folio = folio_walk_start(&fw, vma, addr, 0); 451 if (!folio) { 452 mmap_read_unlock(mm); 453 return; 454 } 455 /* arch_make_folio_accessible() needs a raised refcount. */ 456 folio_get(folio); 457 rc = arch_make_folio_accessible(folio); 458 folio_put(folio); 459 folio_walk_end(&fw, vma); 460 if (rc) 461 send_sig(SIGSEGV, current, 0); 462 mmap_read_unlock(mm); 463 } 464 } 465 NOKPROBE_SYMBOL(do_secure_storage_access); 466 467 #endif /* CONFIG_PGSTE */ 468