1 /* 2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) 3 * Licensed under the GPL 4 */ 5 6 #include <linux/mm.h> 7 #include <linux/sched.h> 8 #include <linux/hardirq.h> 9 #include <linux/module.h> 10 #include <asm/current.h> 11 #include <asm/pgtable.h> 12 #include <asm/tlbflush.h> 13 #include <arch.h> 14 #include <as-layout.h> 15 #include <kern_util.h> 16 #include <os.h> 17 #include <skas.h> 18 19 /* 20 * Note this is constrained to return 0, -EFAULT, -EACCESS, -ENOMEM by 21 * segv(). 22 */ 23 int handle_page_fault(unsigned long address, unsigned long ip, 24 int is_write, int is_user, int *code_out) 25 { 26 struct mm_struct *mm = current->mm; 27 struct vm_area_struct *vma; 28 pgd_t *pgd; 29 pud_t *pud; 30 pmd_t *pmd; 31 pte_t *pte; 32 int err = -EFAULT; 33 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | 34 (is_write ? FAULT_FLAG_WRITE : 0); 35 36 *code_out = SEGV_MAPERR; 37 38 /* 39 * If the fault was during atomic operation, don't take the fault, just 40 * fail. 41 */ 42 if (in_atomic()) 43 goto out_nosemaphore; 44 45 retry: 46 down_read(&mm->mmap_sem); 47 vma = find_vma(mm, address); 48 if (!vma) 49 goto out; 50 else if (vma->vm_start <= address) 51 goto good_area; 52 else if (!(vma->vm_flags & VM_GROWSDOWN)) 53 goto out; 54 else if (is_user && !ARCH_IS_STACKGROW(address)) 55 goto out; 56 else if (expand_stack(vma, address)) 57 goto out; 58 59 good_area: 60 *code_out = SEGV_ACCERR; 61 if (is_write && !(vma->vm_flags & VM_WRITE)) 62 goto out; 63 64 /* Don't require VM_READ|VM_EXEC for write faults! */ 65 if (!is_write && !(vma->vm_flags & (VM_READ | VM_EXEC))) 66 goto out; 67 68 do { 69 int fault; 70 71 fault = handle_mm_fault(mm, vma, address, flags); 72 73 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 74 goto out_nosemaphore; 75 76 if (unlikely(fault & VM_FAULT_ERROR)) { 77 if (fault & VM_FAULT_OOM) { 78 goto out_of_memory; 79 } else if (fault & VM_FAULT_SIGBUS) { 80 err = -EACCES; 81 goto out; 82 } 83 BUG(); 84 } 85 if (flags & FAULT_FLAG_ALLOW_RETRY) { 86 if (fault & VM_FAULT_MAJOR) 87 current->maj_flt++; 88 else 89 current->min_flt++; 90 if (fault & VM_FAULT_RETRY) { 91 flags &= ~FAULT_FLAG_ALLOW_RETRY; 92 flags |= FAULT_FLAG_TRIED; 93 94 goto retry; 95 } 96 } 97 98 pgd = pgd_offset(mm, address); 99 pud = pud_offset(pgd, address); 100 pmd = pmd_offset(pud, address); 101 pte = pte_offset_kernel(pmd, address); 102 } while (!pte_present(*pte)); 103 err = 0; 104 /* 105 * The below warning was added in place of 106 * pte_mkyoung(); if (is_write) pte_mkdirty(); 107 * If it's triggered, we'd see normally a hang here (a clean pte is 108 * marked read-only to emulate the dirty bit). 109 * However, the generic code can mark a PTE writable but clean on a 110 * concurrent read fault, triggering this harmlessly. So comment it out. 111 */ 112 #if 0 113 WARN_ON(!pte_young(*pte) || (is_write && !pte_dirty(*pte))); 114 #endif 115 flush_tlb_page(vma, address); 116 out: 117 up_read(&mm->mmap_sem); 118 out_nosemaphore: 119 return err; 120 121 out_of_memory: 122 /* 123 * We ran out of memory, call the OOM killer, and return the userspace 124 * (which will retry the fault, or kill us if we got oom-killed). 125 */ 126 up_read(&mm->mmap_sem); 127 pagefault_out_of_memory(); 128 return 0; 129 } 130 EXPORT_SYMBOL(handle_page_fault); 131 132 static void show_segv_info(struct uml_pt_regs *regs) 133 { 134 struct task_struct *tsk = current; 135 struct faultinfo *fi = UPT_FAULTINFO(regs); 136 137 if (!unhandled_signal(tsk, SIGSEGV)) 138 return; 139 140 if (!printk_ratelimit()) 141 return; 142 143 printk("%s%s[%d]: segfault at %lx ip %p sp %p error %x", 144 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, 145 tsk->comm, task_pid_nr(tsk), FAULT_ADDRESS(*fi), 146 (void *)UPT_IP(regs), (void *)UPT_SP(regs), 147 fi->error_code); 148 149 print_vma_addr(KERN_CONT " in ", UPT_IP(regs)); 150 printk(KERN_CONT "\n"); 151 } 152 153 static void bad_segv(struct faultinfo fi, unsigned long ip) 154 { 155 struct siginfo si; 156 157 si.si_signo = SIGSEGV; 158 si.si_code = SEGV_ACCERR; 159 si.si_addr = (void __user *) FAULT_ADDRESS(fi); 160 current->thread.arch.faultinfo = fi; 161 force_sig_info(SIGSEGV, &si, current); 162 } 163 164 void fatal_sigsegv(void) 165 { 166 force_sigsegv(SIGSEGV, current); 167 do_signal(); 168 /* 169 * This is to tell gcc that we're not returning - do_signal 170 * can, in general, return, but in this case, it's not, since 171 * we just got a fatal SIGSEGV queued. 172 */ 173 os_dump_core(); 174 } 175 176 void segv_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) 177 { 178 struct faultinfo * fi = UPT_FAULTINFO(regs); 179 180 if (UPT_IS_USER(regs) && !SEGV_IS_FIXABLE(fi)) { 181 show_segv_info(regs); 182 bad_segv(*fi, UPT_IP(regs)); 183 return; 184 } 185 segv(*fi, UPT_IP(regs), UPT_IS_USER(regs), regs); 186 } 187 188 /* 189 * We give a *copy* of the faultinfo in the regs to segv. 190 * This must be done, since nesting SEGVs could overwrite 191 * the info in the regs. A pointer to the info then would 192 * give us bad data! 193 */ 194 unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, 195 struct uml_pt_regs *regs) 196 { 197 struct siginfo si; 198 jmp_buf *catcher; 199 int err; 200 int is_write = FAULT_WRITE(fi); 201 unsigned long address = FAULT_ADDRESS(fi); 202 203 if (!is_user && (address >= start_vm) && (address < end_vm)) { 204 flush_tlb_kernel_vm(); 205 return 0; 206 } 207 else if (current->mm == NULL) { 208 show_regs(container_of(regs, struct pt_regs, regs)); 209 panic("Segfault with no mm"); 210 } 211 212 if (SEGV_IS_FIXABLE(&fi) || SEGV_MAYBE_FIXABLE(&fi)) 213 err = handle_page_fault(address, ip, is_write, is_user, 214 &si.si_code); 215 else { 216 err = -EFAULT; 217 /* 218 * A thread accessed NULL, we get a fault, but CR2 is invalid. 219 * This code is used in __do_copy_from_user() of TT mode. 220 * XXX tt mode is gone, so maybe this isn't needed any more 221 */ 222 address = 0; 223 } 224 225 catcher = current->thread.fault_catcher; 226 if (!err) 227 return 0; 228 else if (catcher != NULL) { 229 current->thread.fault_addr = (void *) address; 230 UML_LONGJMP(catcher, 1); 231 } 232 else if (current->thread.fault_addr != NULL) 233 panic("fault_addr set but no fault catcher"); 234 else if (!is_user && arch_fixup(ip, regs)) 235 return 0; 236 237 if (!is_user) { 238 show_regs(container_of(regs, struct pt_regs, regs)); 239 panic("Kernel mode fault at addr 0x%lx, ip 0x%lx", 240 address, ip); 241 } 242 243 show_segv_info(regs); 244 245 if (err == -EACCES) { 246 si.si_signo = SIGBUS; 247 si.si_errno = 0; 248 si.si_code = BUS_ADRERR; 249 si.si_addr = (void __user *)address; 250 current->thread.arch.faultinfo = fi; 251 force_sig_info(SIGBUS, &si, current); 252 } else { 253 BUG_ON(err != -EFAULT); 254 si.si_signo = SIGSEGV; 255 si.si_addr = (void __user *) address; 256 current->thread.arch.faultinfo = fi; 257 force_sig_info(SIGSEGV, &si, current); 258 } 259 return 0; 260 } 261 262 void relay_signal(int sig, struct siginfo *si, struct uml_pt_regs *regs) 263 { 264 struct faultinfo *fi; 265 struct siginfo clean_si; 266 267 if (!UPT_IS_USER(regs)) { 268 if (sig == SIGBUS) 269 printk(KERN_ERR "Bus error - the host /dev/shm or /tmp " 270 "mount likely just ran out of space\n"); 271 panic("Kernel mode signal %d", sig); 272 } 273 274 arch_examine_signal(sig, regs); 275 276 memset(&clean_si, 0, sizeof(clean_si)); 277 clean_si.si_signo = si->si_signo; 278 clean_si.si_errno = si->si_errno; 279 clean_si.si_code = si->si_code; 280 switch (sig) { 281 case SIGILL: 282 case SIGFPE: 283 case SIGSEGV: 284 case SIGBUS: 285 case SIGTRAP: 286 fi = UPT_FAULTINFO(regs); 287 clean_si.si_addr = (void __user *) FAULT_ADDRESS(*fi); 288 current->thread.arch.faultinfo = *fi; 289 #ifdef __ARCH_SI_TRAPNO 290 clean_si.si_trapno = si->si_trapno; 291 #endif 292 break; 293 default: 294 printk(KERN_ERR "Attempted to relay unknown signal %d (si_code = %d)\n", 295 sig, si->si_code); 296 } 297 298 force_sig_info(sig, &clean_si, current); 299 } 300 301 void bus_handler(int sig, struct siginfo *si, struct uml_pt_regs *regs) 302 { 303 if (current->thread.fault_catcher != NULL) 304 UML_LONGJMP(current->thread.fault_catcher, 1); 305 else 306 relay_signal(sig, si, regs); 307 } 308 309 void winch(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) 310 { 311 do_IRQ(WINCH_IRQ, regs); 312 } 313 314 void trap_init(void) 315 { 316 } 317