1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Memory fault handling for Hexagon 4 * 5 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. 6 */ 7 8 /* 9 * Page fault handling for the Hexagon Virtual Machine. 10 * Can also be called by a native port emulating the HVM 11 * execptions. 12 */ 13 14 #include <asm/pgtable.h> 15 #include <asm/traps.h> 16 #include <linux/uaccess.h> 17 #include <linux/mm.h> 18 #include <linux/sched/signal.h> 19 #include <linux/signal.h> 20 #include <linux/extable.h> 21 #include <linux/hardirq.h> 22 23 /* 24 * Decode of hardware exception sends us to one of several 25 * entry points. At each, we generate canonical arguments 26 * for handling by the abstract memory management code. 27 */ 28 #define FLT_IFETCH -1 29 #define FLT_LOAD 0 30 #define FLT_STORE 1 31 32 33 /* 34 * Canonical page fault handler 35 */ 36 void do_page_fault(unsigned long address, long cause, struct pt_regs *regs) 37 { 38 struct vm_area_struct *vma; 39 struct mm_struct *mm = current->mm; 40 int si_signo; 41 int si_code = SEGV_MAPERR; 42 vm_fault_t fault; 43 const struct exception_table_entry *fixup; 44 unsigned int flags = FAULT_FLAG_DEFAULT; 45 46 /* 47 * If we're in an interrupt or have no user context, 48 * then must not take the fault. 49 */ 50 if (unlikely(in_interrupt() || !mm)) 51 goto no_context; 52 53 local_irq_enable(); 54 55 if (user_mode(regs)) 56 flags |= FAULT_FLAG_USER; 57 retry: 58 down_read(&mm->mmap_sem); 59 vma = find_vma(mm, address); 60 if (!vma) 61 goto bad_area; 62 63 if (vma->vm_start <= address) 64 goto good_area; 65 66 if (!(vma->vm_flags & VM_GROWSDOWN)) 67 goto bad_area; 68 69 if (expand_stack(vma, address)) 70 goto bad_area; 71 72 good_area: 73 /* Address space is OK. Now check access rights. */ 74 si_code = SEGV_ACCERR; 75 76 switch (cause) { 77 case FLT_IFETCH: 78 if (!(vma->vm_flags & VM_EXEC)) 79 goto bad_area; 80 break; 81 case FLT_LOAD: 82 if (!(vma->vm_flags & VM_READ)) 83 goto bad_area; 84 break; 85 case FLT_STORE: 86 if (!(vma->vm_flags & VM_WRITE)) 87 goto bad_area; 88 flags |= FAULT_FLAG_WRITE; 89 break; 90 } 91 92 fault = handle_mm_fault(vma, address, flags); 93 94 if (fault_signal_pending(fault, regs)) 95 return; 96 97 /* The most common case -- we are done. */ 98 if (likely(!(fault & VM_FAULT_ERROR))) { 99 if (flags & FAULT_FLAG_ALLOW_RETRY) { 100 if (fault & VM_FAULT_MAJOR) 101 current->maj_flt++; 102 else 103 current->min_flt++; 104 if (fault & VM_FAULT_RETRY) { 105 flags |= FAULT_FLAG_TRIED; 106 goto retry; 107 } 108 } 109 110 up_read(&mm->mmap_sem); 111 return; 112 } 113 114 up_read(&mm->mmap_sem); 115 116 /* Handle copyin/out exception cases */ 117 if (!user_mode(regs)) 118 goto no_context; 119 120 if (fault & VM_FAULT_OOM) { 121 pagefault_out_of_memory(); 122 return; 123 } 124 125 /* User-mode address is in the memory map, but we are 126 * unable to fix up the page fault. 127 */ 128 if (fault & VM_FAULT_SIGBUS) { 129 si_signo = SIGBUS; 130 si_code = BUS_ADRERR; 131 } 132 /* Address is not in the memory map */ 133 else { 134 si_signo = SIGSEGV; 135 si_code = SEGV_ACCERR; 136 } 137 force_sig_fault(si_signo, si_code, (void __user *)address); 138 return; 139 140 bad_area: 141 up_read(&mm->mmap_sem); 142 143 if (user_mode(regs)) { 144 force_sig_fault(SIGSEGV, si_code, (void __user *)address); 145 return; 146 } 147 /* Kernel-mode fault falls through */ 148 149 no_context: 150 fixup = search_exception_tables(pt_elr(regs)); 151 if (fixup) { 152 pt_set_elr(regs, fixup->fixup); 153 return; 154 } 155 156 /* Things are looking very, very bad now */ 157 bust_spinlocks(1); 158 printk(KERN_EMERG "Unable to handle kernel paging request at " 159 "virtual address 0x%08lx, regs %p\n", address, regs); 160 die("Bad Kernel VA", regs, SIGKILL); 161 } 162 163 164 void read_protection_fault(struct pt_regs *regs) 165 { 166 unsigned long badvadr = pt_badva(regs); 167 168 do_page_fault(badvadr, FLT_LOAD, regs); 169 } 170 171 void write_protection_fault(struct pt_regs *regs) 172 { 173 unsigned long badvadr = pt_badva(regs); 174 175 do_page_fault(badvadr, FLT_STORE, regs); 176 } 177 178 void execute_protection_fault(struct pt_regs *regs) 179 { 180 unsigned long badvadr = pt_badva(regs); 181 182 do_page_fault(badvadr, FLT_IFETCH, regs); 183 } 184