1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * PowerPC version 4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 5 * 6 * Derived from "arch/i386/mm/fault.c" 7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 8 * 9 * Modified by Cort Dougan and Paul Mackerras. 10 * 11 * Modified for PPC64 by Dave Engebretsen (engebret@ibm.com) 12 */ 13 14 #include <linux/signal.h> 15 #include <linux/sched.h> 16 #include <linux/sched/task_stack.h> 17 #include <linux/kernel.h> 18 #include <linux/errno.h> 19 #include <linux/string.h> 20 #include <linux/string_choices.h> 21 #include <linux/types.h> 22 #include <linux/pagemap.h> 23 #include <linux/ptrace.h> 24 #include <linux/mman.h> 25 #include <linux/mm.h> 26 #include <linux/interrupt.h> 27 #include <linux/highmem.h> 28 #include <linux/extable.h> 29 #include <linux/kprobes.h> 30 #include <linux/kdebug.h> 31 #include <linux/perf_event.h> 32 #include <linux/ratelimit.h> 33 #include <linux/context_tracking.h> 34 #include <linux/hugetlb.h> 35 #include <linux/uaccess.h> 36 #include <linux/kfence.h> 37 #include <linux/pkeys.h> 38 39 #include <asm/firmware.h> 40 #include <asm/interrupt.h> 41 #include <asm/page.h> 42 #include <asm/mmu.h> 43 #include <asm/mmu_context.h> 44 #include <asm/siginfo.h> 45 #include <asm/debug.h> 46 #include <asm/kup.h> 47 #include <asm/inst.h> 48 49 50 /* 51 * do_page_fault error handling helpers 52 */ 53 54 static int 55 __bad_area_nosemaphore(struct pt_regs *regs, unsigned long address, int si_code) 56 { 57 /* 58 * If we are in kernel mode, bail out with a SEGV, this will 59 * be caught by the assembly which will restore the non-volatile 60 * registers before calling bad_page_fault() 61 */ 62 if (!user_mode(regs)) 63 return SIGSEGV; 64 65 _exception(SIGSEGV, regs, si_code, address); 66 67 return 0; 68 } 69 70 static noinline int bad_area_nosemaphore(struct pt_regs *regs, unsigned long address) 71 { 72 return __bad_area_nosemaphore(regs, address, SEGV_MAPERR); 73 } 74 75 static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code, 76 struct mm_struct *mm, struct vm_area_struct *vma) 77 { 78 79 /* 80 * Something tried to access memory that isn't in our memory map.. 81 * Fix it, but check if it's kernel or user first.. 82 */ 83 if (mm) 84 mmap_read_unlock(mm); 85 else 86 vma_end_read(vma); 87 88 return __bad_area_nosemaphore(regs, address, si_code); 89 } 90 91 static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address, 92 struct mm_struct *mm, 93 struct vm_area_struct *vma) 94 { 95 int pkey; 96 97 /* 98 * We don't try to fetch the pkey from page table because reading 99 * page table without locking doesn't guarantee stable pte value. 100 * Hence the pkey value that we return to userspace can be different 101 * from the pkey that actually caused access error. 102 * 103 * It does *not* guarantee that the VMA we find here 104 * was the one that we faulted on. 105 * 106 * 1. T1 : mprotect_key(foo, PAGE_SIZE, pkey=4); 107 * 2. T1 : set AMR to deny access to pkey=4, touches, page 108 * 3. T1 : faults... 109 * 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5); 110 * 5. T1 : enters fault handler, takes mmap_lock, etc... 111 * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really 112 * faulted on a pte with its pkey=4. 113 */ 114 pkey = vma_pkey(vma); 115 116 if (mm) 117 mmap_read_unlock(mm); 118 else 119 vma_end_read(vma); 120 121 /* 122 * If we are in kernel mode, bail out with a SEGV, this will 123 * be caught by the assembly which will restore the non-volatile 124 * registers before calling bad_page_fault() 125 */ 126 if (!user_mode(regs)) 127 return SIGSEGV; 128 129 _exception_pkey(regs, address, pkey); 130 131 return 0; 132 } 133 134 static noinline int bad_access(struct pt_regs *regs, unsigned long address, 135 struct mm_struct *mm, struct vm_area_struct *vma) 136 { 137 return __bad_area(regs, address, SEGV_ACCERR, mm, vma); 138 } 139 140 static int do_sigbus(struct pt_regs *regs, unsigned long address, 141 vm_fault_t fault) 142 { 143 if (!user_mode(regs)) 144 return SIGBUS; 145 146 current->thread.trap_nr = BUS_ADRERR; 147 #ifdef CONFIG_MEMORY_FAILURE 148 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { 149 unsigned int lsb = 0; /* shutup gcc */ 150 151 pr_err("MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n", 152 current->comm, current->pid, address); 153 154 if (fault & VM_FAULT_HWPOISON_LARGE) 155 lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); 156 if (fault & VM_FAULT_HWPOISON) 157 lsb = PAGE_SHIFT; 158 159 force_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb); 160 return 0; 161 } 162 163 #endif 164 force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address); 165 return 0; 166 } 167 168 static int mm_fault_error(struct pt_regs *regs, unsigned long addr, 169 vm_fault_t fault) 170 { 171 /* 172 * Kernel page fault interrupted by SIGKILL. We have no reason to 173 * continue processing. 174 */ 175 if (fatal_signal_pending(current) && !user_mode(regs)) 176 return SIGKILL; 177 178 /* Out of memory */ 179 if (fault & VM_FAULT_OOM) { 180 /* 181 * We ran out of memory, or some other thing happened to us that 182 * made us unable to handle the page fault gracefully. 183 */ 184 if (!user_mode(regs)) 185 return SIGSEGV; 186 pagefault_out_of_memory(); 187 } else { 188 if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| 189 VM_FAULT_HWPOISON_LARGE)) 190 return do_sigbus(regs, addr, fault); 191 else if (fault & VM_FAULT_SIGSEGV) 192 return bad_area_nosemaphore(regs, addr); 193 else 194 BUG(); 195 } 196 return 0; 197 } 198 199 /* Is this a bad kernel fault ? */ 200 static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code, 201 unsigned long address, bool is_write) 202 { 203 int is_exec = TRAP(regs) == INTERRUPT_INST_STORAGE; 204 205 if (is_exec) { 206 pr_crit_ratelimited("kernel tried to execute %s page (%lx) - exploit attempt? (uid: %d)\n", 207 address >= TASK_SIZE ? "exec-protected" : "user", 208 address, 209 from_kuid(&init_user_ns, current_uid())); 210 211 // Kernel exec fault is always bad 212 return true; 213 } 214 215 // Kernel fault on kernel address is bad 216 if (address >= TASK_SIZE) 217 return true; 218 219 // Read/write fault blocked by KUAP is bad, it can never succeed. 220 if (bad_kuap_fault(regs, address, is_write)) { 221 pr_crit_ratelimited("Kernel attempted to %s user page (%lx) - exploit attempt? (uid: %d)\n", 222 str_write_read(is_write), address, 223 from_kuid(&init_user_ns, current_uid())); 224 225 // Fault on user outside of certain regions (eg. copy_tofrom_user()) is bad 226 if (!search_exception_tables(regs->nip)) 227 return true; 228 229 // Read/write fault in a valid region (the exception table search passed 230 // above), but blocked by KUAP is bad, it can never succeed. 231 return WARN(true, "Bug: %s fault blocked by KUAP!", is_write ? "Write" : "Read"); 232 } 233 234 // What's left? Kernel fault on user and allowed by KUAP in the faulting context. 235 return false; 236 } 237 238 static bool access_pkey_error(bool is_write, bool is_exec, bool is_pkey, 239 struct vm_area_struct *vma) 240 { 241 /* 242 * Make sure to check the VMA so that we do not perform 243 * faults just to hit a pkey fault as soon as we fill in a 244 * page. Only called for current mm, hence foreign == 0 245 */ 246 if (!arch_vma_access_permitted(vma, is_write, is_exec, 0)) 247 return true; 248 249 return false; 250 } 251 252 static bool access_error(bool is_write, bool is_exec, struct vm_area_struct *vma) 253 { 254 /* 255 * Allow execution from readable areas if the MMU does not 256 * provide separate controls over reading and executing. 257 * 258 * Note: That code used to not be enabled for 4xx/BookE. 259 * It is now as I/D cache coherency for these is done at 260 * set_pte_at() time and I see no reason why the test 261 * below wouldn't be valid on those processors. This -may- 262 * break programs compiled with a really old ABI though. 263 */ 264 if (is_exec) { 265 return !(vma->vm_flags & VM_EXEC) && 266 (cpu_has_feature(CPU_FTR_NOEXECUTE) || 267 !(vma->vm_flags & (VM_READ | VM_WRITE))); 268 } 269 270 if (is_write) { 271 if (unlikely(!(vma->vm_flags & VM_WRITE))) 272 return true; 273 return false; 274 } 275 276 /* 277 * VM_READ, VM_WRITE and VM_EXEC may imply read permissions, as 278 * defined in protection_map[]. In that case Read faults can only be 279 * caused by a PROT_NONE mapping. However a non exec access on a 280 * VM_EXEC only mapping is invalid anyway, so report it as such. 281 */ 282 if (unlikely(!vma_is_accessible(vma))) 283 return true; 284 285 if ((vma->vm_flags & VM_ACCESS_FLAGS) == VM_EXEC) 286 return true; 287 288 /* 289 * We should ideally do the vma pkey access check here. But in the 290 * fault path, handle_mm_fault() also does the same check. To avoid 291 * these multiple checks, we skip it here and handle access error due 292 * to pkeys later. 293 */ 294 return false; 295 } 296 297 #ifdef CONFIG_PPC_SMLPAR 298 static inline void cmo_account_page_fault(void) 299 { 300 if (firmware_has_feature(FW_FEATURE_CMO)) { 301 u32 page_ins; 302 303 preempt_disable(); 304 page_ins = be32_to_cpu(get_lppaca()->page_ins); 305 page_ins += 1 << PAGE_FACTOR; 306 get_lppaca()->page_ins = cpu_to_be32(page_ins); 307 preempt_enable(); 308 } 309 } 310 #else 311 static inline void cmo_account_page_fault(void) { } 312 #endif /* CONFIG_PPC_SMLPAR */ 313 314 static void sanity_check_fault(bool is_write, bool is_user, 315 unsigned long error_code, unsigned long address) 316 { 317 /* 318 * Userspace trying to access kernel address, we get PROTFAULT for that. 319 */ 320 if (is_user && address >= TASK_SIZE) { 321 if ((long)address == -1) 322 return; 323 324 pr_crit_ratelimited("%s[%d]: User access of kernel address (%lx) - exploit attempt? (uid: %d)\n", 325 current->comm, current->pid, address, 326 from_kuid(&init_user_ns, current_uid())); 327 return; 328 } 329 330 if (!IS_ENABLED(CONFIG_PPC_BOOK3S)) 331 return; 332 333 /* 334 * For hash translation mode, we should never get a 335 * PROTFAULT. Any update to pte to reduce access will result in us 336 * removing the hash page table entry, thus resulting in a DSISR_NOHPTE 337 * fault instead of DSISR_PROTFAULT. 338 * 339 * A pte update to relax the access will not result in a hash page table 340 * entry invalidate and hence can result in DSISR_PROTFAULT. 341 * ptep_set_access_flags() doesn't do a hpte flush. This is why we have 342 * the special !is_write in the below conditional. 343 * 344 * For platforms that doesn't supports coherent icache and do support 345 * per page noexec bit, we do setup things such that we do the 346 * sync between D/I cache via fault. But that is handled via low level 347 * hash fault code (hash_page_do_lazy_icache()) and we should not reach 348 * here in such case. 349 * 350 * For wrong access that can result in PROTFAULT, the above vma->vm_flags 351 * check should handle those and hence we should fall to the bad_area 352 * handling correctly. 353 * 354 * For embedded with per page exec support that doesn't support coherent 355 * icache we do get PROTFAULT and we handle that D/I cache sync in 356 * set_pte_at while taking the noexec/prot fault. Hence this is WARN_ON 357 * is conditional for server MMU. 358 * 359 * For radix, we can get prot fault for autonuma case, because radix 360 * page table will have them marked noaccess for user. 361 */ 362 if (radix_enabled() || is_write) 363 return; 364 365 WARN_ON_ONCE(error_code & DSISR_PROTFAULT); 366 } 367 368 /* 369 * Define the correct "is_write" bit in error_code based 370 * on the processor family 371 */ 372 #ifdef CONFIG_BOOKE 373 #define page_fault_is_write(__err) ((__err) & ESR_DST) 374 #else 375 #define page_fault_is_write(__err) ((__err) & DSISR_ISSTORE) 376 #endif 377 378 #ifdef CONFIG_BOOKE 379 #define page_fault_is_bad(__err) (0) 380 #elif defined(CONFIG_PPC_8xx) 381 #define page_fault_is_bad(__err) ((__err) & DSISR_NOEXEC_OR_G) 382 #elif defined(CONFIG_PPC64) 383 static int page_fault_is_bad(unsigned long err) 384 { 385 unsigned long flag = DSISR_BAD_FAULT_64S; 386 387 /* 388 * PAPR+ v2.11 § 14.15.3.4.1 (unreleased) 389 * If byte 0, bit 3 of pi-attribute-specifier-type in 390 * ibm,pi-features property is defined, ignore the DSI error 391 * which is caused by the paste instruction on the 392 * suspended NX window. 393 */ 394 if (mmu_has_feature(MMU_FTR_NX_DSI)) 395 flag &= ~DSISR_BAD_COPYPASTE; 396 397 return err & flag; 398 } 399 #else 400 #define page_fault_is_bad(__err) ((__err) & DSISR_BAD_FAULT_32S) 401 #endif 402 403 /* 404 * For 600- and 800-family processors, the error_code parameter is DSISR 405 * for a data fault, SRR1 for an instruction fault. 406 * For 400-family processors the error_code parameter is ESR for a data fault, 407 * 0 for an instruction fault. 408 * For 64-bit processors, the error_code parameter is DSISR for a data access 409 * fault, SRR1 & 0x08000000 for an instruction access fault. 410 * 411 * The return value is 0 if the fault was handled, or the signal 412 * number if this is a kernel fault that can't be handled here. 413 */ 414 static int ___do_page_fault(struct pt_regs *regs, unsigned long address, 415 unsigned long error_code) 416 { 417 struct vm_area_struct * vma; 418 struct mm_struct *mm = current->mm; 419 unsigned int flags = FAULT_FLAG_DEFAULT; 420 int is_exec = TRAP(regs) == INTERRUPT_INST_STORAGE; 421 int is_user = user_mode(regs); 422 int is_write = page_fault_is_write(error_code); 423 vm_fault_t fault, major = 0; 424 bool kprobe_fault = kprobe_page_fault(regs, 11); 425 426 if (unlikely(debugger_fault_handler(regs) || kprobe_fault)) 427 return 0; 428 429 if (unlikely(page_fault_is_bad(error_code))) { 430 if (is_user) { 431 _exception(SIGBUS, regs, BUS_OBJERR, address); 432 return 0; 433 } 434 return SIGBUS; 435 } 436 437 /* Additional sanity check(s) */ 438 sanity_check_fault(is_write, is_user, error_code, address); 439 440 /* 441 * The kernel should never take an execute fault nor should it 442 * take a page fault to a kernel address or a page fault to a user 443 * address outside of dedicated places. 444 * 445 * Rather than kfence directly reporting false negatives, search whether 446 * the NIP belongs to the fixup table for cases where fault could come 447 * from functions like copy_from_kernel_nofault(). 448 */ 449 if (unlikely(!is_user && bad_kernel_fault(regs, error_code, address, is_write))) { 450 if (is_kfence_address((void *)address) && 451 !search_exception_tables(instruction_pointer(regs)) && 452 kfence_handle_page_fault(address, is_write, regs)) 453 return 0; 454 455 return SIGSEGV; 456 } 457 458 /* 459 * If we're in an interrupt, have no user context or are running 460 * in a region with pagefaults disabled then we must not take the fault 461 */ 462 if (unlikely(faulthandler_disabled() || !mm)) { 463 if (is_user) 464 printk_ratelimited(KERN_ERR "Page fault in user mode" 465 " with faulthandler_disabled()=%d" 466 " mm=%p\n", 467 faulthandler_disabled(), mm); 468 return bad_area_nosemaphore(regs, address); 469 } 470 471 interrupt_cond_local_irq_enable(regs); 472 473 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 474 475 /* 476 * We want to do this outside mmap_lock, because reading code around nip 477 * can result in fault, which will cause a deadlock when called with 478 * mmap_lock held 479 */ 480 if (is_user) 481 flags |= FAULT_FLAG_USER; 482 if (is_write) 483 flags |= FAULT_FLAG_WRITE; 484 if (is_exec) 485 flags |= FAULT_FLAG_INSTRUCTION; 486 487 if (!(flags & FAULT_FLAG_USER)) 488 goto lock_mmap; 489 490 vma = lock_vma_under_rcu(mm, address); 491 if (!vma) 492 goto lock_mmap; 493 494 if (unlikely(access_pkey_error(is_write, is_exec, 495 (error_code & DSISR_KEYFAULT), vma))) { 496 count_vm_vma_lock_event(VMA_LOCK_SUCCESS); 497 return bad_access_pkey(regs, address, NULL, vma); 498 } 499 500 if (unlikely(access_error(is_write, is_exec, vma))) { 501 count_vm_vma_lock_event(VMA_LOCK_SUCCESS); 502 return bad_access(regs, address, NULL, vma); 503 } 504 505 fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs); 506 if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED))) 507 vma_end_read(vma); 508 509 if (!(fault & VM_FAULT_RETRY)) { 510 count_vm_vma_lock_event(VMA_LOCK_SUCCESS); 511 goto done; 512 } 513 count_vm_vma_lock_event(VMA_LOCK_RETRY); 514 if (fault & VM_FAULT_MAJOR) 515 flags |= FAULT_FLAG_TRIED; 516 517 if (fault_signal_pending(fault, regs)) 518 return user_mode(regs) ? 0 : SIGBUS; 519 520 lock_mmap: 521 522 /* When running in the kernel we expect faults to occur only to 523 * addresses in user space. All other faults represent errors in the 524 * kernel and should generate an OOPS. Unfortunately, in the case of an 525 * erroneous fault occurring in a code path which already holds mmap_lock 526 * we will deadlock attempting to validate the fault against the 527 * address space. Luckily the kernel only validly references user 528 * space from well defined areas of code, which are listed in the 529 * exceptions table. lock_mm_and_find_vma() handles that logic. 530 */ 531 retry: 532 vma = lock_mm_and_find_vma(mm, address, regs); 533 if (unlikely(!vma)) 534 return bad_area_nosemaphore(regs, address); 535 536 if (unlikely(access_pkey_error(is_write, is_exec, 537 (error_code & DSISR_KEYFAULT), vma))) 538 return bad_access_pkey(regs, address, mm, vma); 539 540 if (unlikely(access_error(is_write, is_exec, vma))) 541 return bad_access(regs, address, mm, vma); 542 543 /* 544 * If for any reason at all we couldn't handle the fault, 545 * make sure we exit gracefully rather than endlessly redo 546 * the fault. 547 */ 548 fault = handle_mm_fault(vma, address, flags, regs); 549 550 major |= fault & VM_FAULT_MAJOR; 551 552 if (fault_signal_pending(fault, regs)) 553 return user_mode(regs) ? 0 : SIGBUS; 554 555 /* The fault is fully completed (including releasing mmap lock) */ 556 if (fault & VM_FAULT_COMPLETED) 557 goto out; 558 559 /* 560 * Handle the retry right now, the mmap_lock has been released in that 561 * case. 562 */ 563 if (unlikely(fault & VM_FAULT_RETRY)) { 564 flags |= FAULT_FLAG_TRIED; 565 goto retry; 566 } 567 568 mmap_read_unlock(current->mm); 569 570 done: 571 if (unlikely(fault & VM_FAULT_ERROR)) 572 return mm_fault_error(regs, address, fault); 573 574 out: 575 /* 576 * Major/minor page fault accounting. 577 */ 578 if (major) 579 cmo_account_page_fault(); 580 581 return 0; 582 } 583 NOKPROBE_SYMBOL(___do_page_fault); 584 585 static __always_inline void __do_page_fault(struct pt_regs *regs) 586 { 587 long err; 588 589 err = ___do_page_fault(regs, regs->dar, regs->dsisr); 590 if (unlikely(err)) 591 bad_page_fault(regs, err); 592 } 593 594 DEFINE_INTERRUPT_HANDLER(do_page_fault) 595 { 596 __do_page_fault(regs); 597 } 598 599 #ifdef CONFIG_PPC_BOOK3S_64 600 /* Same as do_page_fault but interrupt entry has already run in do_hash_fault */ 601 void hash__do_page_fault(struct pt_regs *regs) 602 { 603 __do_page_fault(regs); 604 } 605 NOKPROBE_SYMBOL(hash__do_page_fault); 606 #endif 607 608 /* 609 * bad_page_fault is called when we have a bad access from the kernel. 610 * It is called from the DSI and ISI handlers in head.S and from some 611 * of the procedures in traps.c. 612 */ 613 static void __bad_page_fault(struct pt_regs *regs, int sig) 614 { 615 int is_write = page_fault_is_write(regs->dsisr); 616 const char *msg; 617 618 /* kernel has accessed a bad area */ 619 620 if (regs->dar < PAGE_SIZE) 621 msg = "Kernel NULL pointer dereference"; 622 else 623 msg = "Unable to handle kernel data access"; 624 625 switch (TRAP(regs)) { 626 case INTERRUPT_DATA_STORAGE: 627 case INTERRUPT_H_DATA_STORAGE: 628 pr_alert("BUG: %s on %s at 0x%08lx\n", msg, 629 str_write_read(is_write), regs->dar); 630 break; 631 case INTERRUPT_DATA_SEGMENT: 632 pr_alert("BUG: %s at 0x%08lx\n", msg, regs->dar); 633 break; 634 case INTERRUPT_INST_STORAGE: 635 case INTERRUPT_INST_SEGMENT: 636 pr_alert("BUG: Unable to handle kernel instruction fetch%s", 637 regs->nip < PAGE_SIZE ? " (NULL pointer?)\n" : "\n"); 638 break; 639 case INTERRUPT_ALIGNMENT: 640 pr_alert("BUG: Unable to handle kernel unaligned access at 0x%08lx\n", 641 regs->dar); 642 break; 643 default: 644 pr_alert("BUG: Unable to handle unknown paging fault at 0x%08lx\n", 645 regs->dar); 646 break; 647 } 648 printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n", 649 regs->nip); 650 651 if (task_stack_end_corrupted(current)) 652 printk(KERN_ALERT "Thread overran stack, or stack corrupted\n"); 653 654 die("Kernel access of bad area", regs, sig); 655 } 656 657 void bad_page_fault(struct pt_regs *regs, int sig) 658 { 659 const struct exception_table_entry *entry; 660 661 /* Are we prepared to handle this fault? */ 662 entry = search_exception_tables(instruction_pointer(regs)); 663 if (entry) 664 instruction_pointer_set(regs, extable_fixup(entry)); 665 else 666 __bad_page_fault(regs, sig); 667 } 668 669 #ifdef CONFIG_PPC_BOOK3S_64 670 DEFINE_INTERRUPT_HANDLER(do_bad_page_fault_segv) 671 { 672 bad_page_fault(regs, SIGSEGV); 673 } 674 675 /* 676 * In radix, segment interrupts indicate the EA is not addressable by the 677 * page table geometry, so they are always sent here. 678 * 679 * In hash, this is called if do_slb_fault returns error. Typically it is 680 * because the EA was outside the region allowed by software. 681 */ 682 DEFINE_INTERRUPT_HANDLER(do_bad_segment_interrupt) 683 { 684 int err = regs->result; 685 686 if (err == -EFAULT) { 687 if (user_mode(regs)) 688 _exception(SIGSEGV, regs, SEGV_BNDERR, regs->dar); 689 else 690 bad_page_fault(regs, SIGSEGV); 691 } else if (err == -EINVAL) { 692 unrecoverable_exception(regs); 693 } else { 694 BUG(); 695 } 696 } 697 #endif 698