1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) 4 */ 5 6 #include <linux/mm.h> 7 #include <linux/sched/signal.h> 8 #include <linux/hardirq.h> 9 #include <linux/module.h> 10 #include <linux/uaccess.h> 11 #include <linux/sched/debug.h> 12 #include <asm/current.h> 13 #include <asm/tlbflush.h> 14 #include <arch.h> 15 #include <as-layout.h> 16 #include <kern_util.h> 17 #include <os.h> 18 #include <skas.h> 19 20 /* 21 * Note this is constrained to return 0, -EFAULT, -EACCES, -ENOMEM by 22 * segv(). 23 */ 24 int handle_page_fault(unsigned long address, unsigned long ip, 25 int is_write, int is_user, int *code_out) 26 { 27 struct mm_struct *mm = current->mm; 28 struct vm_area_struct *vma; 29 pmd_t *pmd; 30 pte_t *pte; 31 int err = -EFAULT; 32 unsigned int flags = FAULT_FLAG_DEFAULT; 33 34 *code_out = SEGV_MAPERR; 35 36 /* 37 * If the fault was with pagefaults disabled, don't take the fault, just 38 * fail. 39 */ 40 if (faulthandler_disabled()) 41 goto out_nosemaphore; 42 43 if (is_user) 44 flags |= FAULT_FLAG_USER; 45 retry: 46 mmap_read_lock(mm); 47 vma = find_vma(mm, address); 48 if (!vma) 49 goto out; 50 if (vma->vm_start <= address) 51 goto good_area; 52 if (!(vma->vm_flags & VM_GROWSDOWN)) 53 goto out; 54 if (is_user && !ARCH_IS_STACKGROW(address)) 55 goto out; 56 vma = expand_stack(mm, address); 57 if (!vma) 58 goto out_nosemaphore; 59 60 good_area: 61 *code_out = SEGV_ACCERR; 62 if (is_write) { 63 if (!(vma->vm_flags & VM_WRITE)) 64 goto out; 65 flags |= FAULT_FLAG_WRITE; 66 } else { 67 /* Don't require VM_READ|VM_EXEC for write faults! */ 68 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) 69 goto out; 70 } 71 72 do { 73 vm_fault_t fault; 74 75 fault = handle_mm_fault(vma, address, flags, NULL); 76 77 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 78 goto out_nosemaphore; 79 80 /* The fault is fully completed (including releasing mmap lock) */ 81 if (fault & VM_FAULT_COMPLETED) 82 return 0; 83 84 if (unlikely(fault & VM_FAULT_ERROR)) { 85 if (fault & VM_FAULT_OOM) { 86 goto out_of_memory; 87 } else if (fault & VM_FAULT_SIGSEGV) { 88 goto out; 89 } else if (fault & VM_FAULT_SIGBUS) { 90 err = -EACCES; 91 goto out; 92 } 93 BUG(); 94 } 95 if (fault & VM_FAULT_RETRY) { 96 flags |= FAULT_FLAG_TRIED; 97 98 goto retry; 99 } 100 101 pmd = pmd_off(mm, address); 102 pte = pte_offset_kernel(pmd, address); 103 } while (!pte_present(*pte)); 104 err = 0; 105 /* 106 * The below warning was added in place of 107 * pte_mkyoung(); if (is_write) pte_mkdirty(); 108 * If it's triggered, we'd see normally a hang here (a clean pte is 109 * marked read-only to emulate the dirty bit). 110 * However, the generic code can mark a PTE writable but clean on a 111 * concurrent read fault, triggering this harmlessly. So comment it out. 112 */ 113 #if 0 114 WARN_ON(!pte_young(*pte) || (is_write && !pte_dirty(*pte))); 115 #endif 116 flush_tlb_page(vma, address); 117 out: 118 mmap_read_unlock(mm); 119 out_nosemaphore: 120 return err; 121 122 out_of_memory: 123 /* 124 * We ran out of memory, call the OOM killer, and return the userspace 125 * (which will retry the fault, or kill us if we got oom-killed). 126 */ 127 mmap_read_unlock(mm); 128 if (!is_user) 129 goto out_nosemaphore; 130 pagefault_out_of_memory(); 131 return 0; 132 } 133 134 static void show_segv_info(struct uml_pt_regs *regs) 135 { 136 struct task_struct *tsk = current; 137 struct faultinfo *fi = UPT_FAULTINFO(regs); 138 139 if (!unhandled_signal(tsk, SIGSEGV)) 140 return; 141 142 if (!printk_ratelimit()) 143 return; 144 145 printk("%s%s[%d]: segfault at %lx ip %px sp %px error %x", 146 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, 147 tsk->comm, task_pid_nr(tsk), FAULT_ADDRESS(*fi), 148 (void *)UPT_IP(regs), (void *)UPT_SP(regs), 149 fi->error_code); 150 151 print_vma_addr(KERN_CONT " in ", UPT_IP(regs)); 152 printk(KERN_CONT "\n"); 153 } 154 155 static void bad_segv(struct faultinfo fi, unsigned long ip) 156 { 157 current->thread.arch.faultinfo = fi; 158 force_sig_fault(SIGSEGV, SEGV_ACCERR, (void __user *) FAULT_ADDRESS(fi)); 159 } 160 161 void fatal_sigsegv(void) 162 { 163 force_fatal_sig(SIGSEGV); 164 do_signal(¤t->thread.regs); 165 /* 166 * This is to tell gcc that we're not returning - do_signal 167 * can, in general, return, but in this case, it's not, since 168 * we just got a fatal SIGSEGV queued. 169 */ 170 os_dump_core(); 171 } 172 173 /** 174 * segv_handler() - the SIGSEGV handler 175 * @sig: the signal number 176 * @unused_si: the signal info struct; unused in this handler 177 * @regs: the ptrace register information 178 * 179 * The handler first extracts the faultinfo from the UML ptrace regs struct. 180 * If the userfault did not happen in an UML userspace process, bad_segv is called. 181 * Otherwise the signal did happen in a cloned userspace process, handle it. 182 */ 183 void segv_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) 184 { 185 struct faultinfo * fi = UPT_FAULTINFO(regs); 186 187 if (UPT_IS_USER(regs) && !SEGV_IS_FIXABLE(fi)) { 188 show_segv_info(regs); 189 bad_segv(*fi, UPT_IP(regs)); 190 return; 191 } 192 segv(*fi, UPT_IP(regs), UPT_IS_USER(regs), regs); 193 } 194 195 /* 196 * We give a *copy* of the faultinfo in the regs to segv. 197 * This must be done, since nesting SEGVs could overwrite 198 * the info in the regs. A pointer to the info then would 199 * give us bad data! 200 */ 201 unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, 202 struct uml_pt_regs *regs) 203 { 204 jmp_buf *catcher; 205 int si_code; 206 int err; 207 int is_write = FAULT_WRITE(fi); 208 unsigned long address = FAULT_ADDRESS(fi); 209 210 if (!is_user && regs) 211 current->thread.segv_regs = container_of(regs, struct pt_regs, regs); 212 213 if (!is_user && (address >= start_vm) && (address < end_vm)) { 214 flush_tlb_kernel_vm(); 215 goto out; 216 } 217 else if (current->mm == NULL) { 218 show_regs(container_of(regs, struct pt_regs, regs)); 219 panic("Segfault with no mm"); 220 } 221 else if (!is_user && address > PAGE_SIZE && address < TASK_SIZE) { 222 show_regs(container_of(regs, struct pt_regs, regs)); 223 panic("Kernel tried to access user memory at addr 0x%lx, ip 0x%lx", 224 address, ip); 225 } 226 227 if (SEGV_IS_FIXABLE(&fi)) 228 err = handle_page_fault(address, ip, is_write, is_user, 229 &si_code); 230 else { 231 err = -EFAULT; 232 /* 233 * A thread accessed NULL, we get a fault, but CR2 is invalid. 234 * This code is used in __do_copy_from_user() of TT mode. 235 * XXX tt mode is gone, so maybe this isn't needed any more 236 */ 237 address = 0; 238 } 239 240 catcher = current->thread.fault_catcher; 241 if (!err) 242 goto out; 243 else if (catcher != NULL) { 244 current->thread.fault_addr = (void *) address; 245 UML_LONGJMP(catcher, 1); 246 } 247 else if (current->thread.fault_addr != NULL) 248 panic("fault_addr set but no fault catcher"); 249 else if (!is_user && arch_fixup(ip, regs)) 250 goto out; 251 252 if (!is_user) { 253 show_regs(container_of(regs, struct pt_regs, regs)); 254 panic("Kernel mode fault at addr 0x%lx, ip 0x%lx", 255 address, ip); 256 } 257 258 show_segv_info(regs); 259 260 if (err == -EACCES) { 261 current->thread.arch.faultinfo = fi; 262 force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address); 263 } else { 264 BUG_ON(err != -EFAULT); 265 current->thread.arch.faultinfo = fi; 266 force_sig_fault(SIGSEGV, si_code, (void __user *) address); 267 } 268 269 out: 270 if (regs) 271 current->thread.segv_regs = NULL; 272 273 return 0; 274 } 275 276 void relay_signal(int sig, struct siginfo *si, struct uml_pt_regs *regs) 277 { 278 int code, err; 279 if (!UPT_IS_USER(regs)) { 280 if (sig == SIGBUS) 281 printk(KERN_ERR "Bus error - the host /dev/shm or /tmp " 282 "mount likely just ran out of space\n"); 283 panic("Kernel mode signal %d", sig); 284 } 285 286 arch_examine_signal(sig, regs); 287 288 /* Is the signal layout for the signal known? 289 * Signal data must be scrubbed to prevent information leaks. 290 */ 291 code = si->si_code; 292 err = si->si_errno; 293 if ((err == 0) && (siginfo_layout(sig, code) == SIL_FAULT)) { 294 struct faultinfo *fi = UPT_FAULTINFO(regs); 295 current->thread.arch.faultinfo = *fi; 296 force_sig_fault(sig, code, (void __user *)FAULT_ADDRESS(*fi)); 297 } else { 298 printk(KERN_ERR "Attempted to relay unknown signal %d (si_code = %d) with errno %d\n", 299 sig, code, err); 300 force_sig(sig); 301 } 302 } 303 304 void bus_handler(int sig, struct siginfo *si, struct uml_pt_regs *regs) 305 { 306 if (current->thread.fault_catcher != NULL) 307 UML_LONGJMP(current->thread.fault_catcher, 1); 308 else 309 relay_signal(sig, si, regs); 310 } 311 312 void winch(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) 313 { 314 do_IRQ(WINCH_IRQ, regs); 315 } 316