1 /* 2 * Copyright (C) 2000, 2001 Jeff Dike (jdike@karaya.com) 3 * Licensed under the GPL 4 */ 5 6 #include "linux/kernel.h" 7 #include "asm/errno.h" 8 #include "linux/sched.h" 9 #include "linux/mm.h" 10 #include "linux/spinlock.h" 11 #include "linux/init.h" 12 #include "linux/ptrace.h" 13 #include "asm/semaphore.h" 14 #include "asm/pgtable.h" 15 #include "asm/pgalloc.h" 16 #include "asm/tlbflush.h" 17 #include "asm/a.out.h" 18 #include "asm/current.h" 19 #include "asm/irq.h" 20 #include "sysdep/sigcontext.h" 21 #include "user_util.h" 22 #include "kern_util.h" 23 #include "kern.h" 24 #include "chan_kern.h" 25 #include "mconsole_kern.h" 26 #include "mem.h" 27 #include "mem_kern.h" 28 #include "sysdep/sigcontext.h" 29 #include "sysdep/ptrace.h" 30 #include "os.h" 31 #ifdef CONFIG_MODE_SKAS 32 #include "skas.h" 33 #endif 34 #include "os.h" 35 36 /* Note this is constrained to return 0, -EFAULT, -EACCESS, -ENOMEM by segv(). */ 37 int handle_page_fault(unsigned long address, unsigned long ip, 38 int is_write, int is_user, int *code_out) 39 { 40 struct mm_struct *mm = current->mm; 41 struct vm_area_struct *vma; 42 pgd_t *pgd; 43 pud_t *pud; 44 pmd_t *pmd; 45 pte_t *pte; 46 int err = -EFAULT; 47 48 *code_out = SEGV_MAPERR; 49 50 /* If the fault was during atomic operation, don't take the fault, just 51 * fail. */ 52 if (in_atomic()) 53 goto out_nosemaphore; 54 55 down_read(&mm->mmap_sem); 56 vma = find_vma(mm, address); 57 if(!vma) 58 goto out; 59 else if(vma->vm_start <= address) 60 goto good_area; 61 else if(!(vma->vm_flags & VM_GROWSDOWN)) 62 goto out; 63 else if(is_user && !ARCH_IS_STACKGROW(address)) 64 goto out; 65 else if(expand_stack(vma, address)) 66 goto out; 67 68 good_area: 69 *code_out = SEGV_ACCERR; 70 if(is_write && !(vma->vm_flags & VM_WRITE)) 71 goto out; 72 73 /* Don't require VM_READ|VM_EXEC for write faults! */ 74 if(!is_write && !(vma->vm_flags & (VM_READ | VM_EXEC))) 75 goto out; 76 77 do { 78 survive: 79 switch (handle_mm_fault(mm, vma, address, is_write)){ 80 case VM_FAULT_MINOR: 81 current->min_flt++; 82 break; 83 case VM_FAULT_MAJOR: 84 current->maj_flt++; 85 break; 86 case VM_FAULT_SIGBUS: 87 err = -EACCES; 88 goto out; 89 case VM_FAULT_OOM: 90 err = -ENOMEM; 91 goto out_of_memory; 92 default: 93 BUG(); 94 } 95 pgd = pgd_offset(mm, address); 96 pud = pud_offset(pgd, address); 97 pmd = pmd_offset(pud, address); 98 pte = pte_offset_kernel(pmd, address); 99 } while(!pte_present(*pte)); 100 err = 0; 101 /* The below warning was added in place of 102 * pte_mkyoung(); if (is_write) pte_mkdirty(); 103 * If it's triggered, we'd see normally a hang here (a clean pte is 104 * marked read-only to emulate the dirty bit). 105 * However, the generic code can mark a PTE writable but clean on a 106 * concurrent read fault, triggering this harmlessly. So comment it out. 107 */ 108 #if 0 109 WARN_ON(!pte_young(*pte) || (is_write && !pte_dirty(*pte))); 110 #endif 111 flush_tlb_page(vma, address); 112 out: 113 up_read(&mm->mmap_sem); 114 out_nosemaphore: 115 return(err); 116 117 /* 118 * We ran out of memory, or some other thing happened to us that made 119 * us unable to handle the page fault gracefully. 120 */ 121 out_of_memory: 122 if (is_init(current)) { 123 up_read(&mm->mmap_sem); 124 yield(); 125 down_read(&mm->mmap_sem); 126 goto survive; 127 } 128 goto out; 129 } 130 131 static void bad_segv(struct faultinfo fi, unsigned long ip) 132 { 133 struct siginfo si; 134 135 si.si_signo = SIGSEGV; 136 si.si_code = SEGV_ACCERR; 137 si.si_addr = (void __user *) FAULT_ADDRESS(fi); 138 current->thread.arch.faultinfo = fi; 139 force_sig_info(SIGSEGV, &si, current); 140 } 141 142 static void segv_handler(int sig, union uml_pt_regs *regs) 143 { 144 struct faultinfo * fi = UPT_FAULTINFO(regs); 145 146 if(UPT_IS_USER(regs) && !SEGV_IS_FIXABLE(fi)){ 147 bad_segv(*fi, UPT_IP(regs)); 148 return; 149 } 150 segv(*fi, UPT_IP(regs), UPT_IS_USER(regs), regs); 151 } 152 153 /* 154 * We give a *copy* of the faultinfo in the regs to segv. 155 * This must be done, since nesting SEGVs could overwrite 156 * the info in the regs. A pointer to the info then would 157 * give us bad data! 158 */ 159 unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, void *sc) 160 { 161 struct siginfo si; 162 void *catcher; 163 int err; 164 int is_write = FAULT_WRITE(fi); 165 unsigned long address = FAULT_ADDRESS(fi); 166 167 if(!is_user && (address >= start_vm) && (address < end_vm)){ 168 flush_tlb_kernel_vm(); 169 return(0); 170 } 171 else if(current->mm == NULL) 172 panic("Segfault with no mm"); 173 174 if (SEGV_IS_FIXABLE(&fi) || SEGV_MAYBE_FIXABLE(&fi)) 175 err = handle_page_fault(address, ip, is_write, is_user, &si.si_code); 176 else { 177 err = -EFAULT; 178 /* A thread accessed NULL, we get a fault, but CR2 is invalid. 179 * This code is used in __do_copy_from_user() of TT mode. */ 180 address = 0; 181 } 182 183 catcher = current->thread.fault_catcher; 184 if(!err) 185 return(0); 186 else if(catcher != NULL){ 187 current->thread.fault_addr = (void *) address; 188 do_longjmp(catcher, 1); 189 } 190 else if(current->thread.fault_addr != NULL) 191 panic("fault_addr set but no fault catcher"); 192 else if(!is_user && arch_fixup(ip, sc)) 193 return(0); 194 195 if(!is_user) 196 panic("Kernel mode fault at addr 0x%lx, ip 0x%lx", 197 address, ip); 198 199 if (err == -EACCES) { 200 si.si_signo = SIGBUS; 201 si.si_errno = 0; 202 si.si_code = BUS_ADRERR; 203 si.si_addr = (void __user *)address; 204 current->thread.arch.faultinfo = fi; 205 force_sig_info(SIGBUS, &si, current); 206 } else if (err == -ENOMEM) { 207 printk("VM: killing process %s\n", current->comm); 208 do_exit(SIGKILL); 209 } else { 210 BUG_ON(err != -EFAULT); 211 si.si_signo = SIGSEGV; 212 si.si_addr = (void __user *) address; 213 current->thread.arch.faultinfo = fi; 214 force_sig_info(SIGSEGV, &si, current); 215 } 216 return(0); 217 } 218 219 void relay_signal(int sig, union uml_pt_regs *regs) 220 { 221 if(arch_handle_signal(sig, regs)) 222 return; 223 224 if(!UPT_IS_USER(regs)){ 225 if(sig == SIGBUS) 226 printk("Bus error - the /dev/shm or /tmp mount likely " 227 "just ran out of space\n"); 228 panic("Kernel mode signal %d", sig); 229 } 230 231 current->thread.arch.faultinfo = *UPT_FAULTINFO(regs); 232 force_sig(sig, current); 233 } 234 235 static void bus_handler(int sig, union uml_pt_regs *regs) 236 { 237 if(current->thread.fault_catcher != NULL) 238 do_longjmp(current->thread.fault_catcher, 1); 239 else relay_signal(sig, regs); 240 } 241 242 static void winch(int sig, union uml_pt_regs *regs) 243 { 244 do_IRQ(WINCH_IRQ, regs); 245 } 246 247 const struct kern_handlers handlinfo_kern = { 248 .relay_signal = relay_signal, 249 .winch = winch, 250 .bus_handler = bus_handler, 251 .page_fault = segv_handler, 252 .sigio_handler = sigio_handler, 253 .timer_handler = timer_handler 254 }; 255 256 void trap_init(void) 257 { 258 } 259