1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1995, 1996, 1997, 2000, 2001, 05 by Ralf Baechle 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 * Copyright (C) 2001 MIPS Technologies, Inc. 9 */ 10 #include <linux/capability.h> 11 #include <linux/errno.h> 12 #include <linux/linkage.h> 13 #include <linux/mm.h> 14 #include <linux/fs.h> 15 #include <linux/smp.h> 16 #include <linux/mman.h> 17 #include <linux/ptrace.h> 18 #include <linux/sched.h> 19 #include <linux/string.h> 20 #include <linux/syscalls.h> 21 #include <linux/file.h> 22 #include <linux/utsname.h> 23 #include <linux/unistd.h> 24 #include <linux/sem.h> 25 #include <linux/msg.h> 26 #include <linux/shm.h> 27 #include <linux/compiler.h> 28 #include <linux/module.h> 29 #include <linux/ipc.h> 30 #include <linux/uaccess.h> 31 #include <linux/slab.h> 32 33 #include <asm/asm.h> 34 #include <asm/branch.h> 35 #include <asm/cachectl.h> 36 #include <asm/cacheflush.h> 37 #include <asm/asm-offsets.h> 38 #include <asm/signal.h> 39 #include <asm/sim.h> 40 #include <asm/shmparam.h> 41 #include <asm/sysmips.h> 42 #include <asm/uaccess.h> 43 44 /* 45 * For historic reasons the pipe(2) syscall on MIPS has an unusual calling 46 * convention. It returns results in registers $v0 / $v1 which means there 47 * is no need for it to do verify the validity of a userspace pointer 48 * argument. Historically that used to be expensive in Linux. These days 49 * the performance advantage is negligible. 50 */ 51 asmlinkage int sysm_pipe(nabi_no_regargs volatile struct pt_regs regs) 52 { 53 int fd[2]; 54 int error, res; 55 56 error = do_pipe_flags(fd, 0); 57 if (error) { 58 res = error; 59 goto out; 60 } 61 regs.regs[3] = fd[1]; 62 res = fd[0]; 63 out: 64 return res; 65 } 66 67 unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ 68 69 EXPORT_SYMBOL(shm_align_mask); 70 71 #define COLOUR_ALIGN(addr,pgoff) \ 72 ((((addr) + shm_align_mask) & ~shm_align_mask) + \ 73 (((pgoff) << PAGE_SHIFT) & shm_align_mask)) 74 75 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, 76 unsigned long len, unsigned long pgoff, unsigned long flags) 77 { 78 struct vm_area_struct * vmm; 79 int do_color_align; 80 unsigned long task_size; 81 82 #ifdef CONFIG_32BIT 83 task_size = TASK_SIZE; 84 #else /* Must be CONFIG_64BIT*/ 85 task_size = test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE; 86 #endif 87 88 if (len > task_size) 89 return -ENOMEM; 90 91 if (flags & MAP_FIXED) { 92 /* Even MAP_FIXED mappings must reside within task_size. */ 93 if (task_size - len < addr) 94 return -EINVAL; 95 96 /* 97 * We do not accept a shared mapping if it would violate 98 * cache aliasing constraints. 99 */ 100 if ((flags & MAP_SHARED) && 101 ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) 102 return -EINVAL; 103 return addr; 104 } 105 106 do_color_align = 0; 107 if (filp || (flags & MAP_SHARED)) 108 do_color_align = 1; 109 if (addr) { 110 if (do_color_align) 111 addr = COLOUR_ALIGN(addr, pgoff); 112 else 113 addr = PAGE_ALIGN(addr); 114 vmm = find_vma(current->mm, addr); 115 if (task_size - len >= addr && 116 (!vmm || addr + len <= vmm->vm_start)) 117 return addr; 118 } 119 addr = TASK_UNMAPPED_BASE; 120 if (do_color_align) 121 addr = COLOUR_ALIGN(addr, pgoff); 122 else 123 addr = PAGE_ALIGN(addr); 124 125 for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { 126 /* At this point: (!vmm || addr < vmm->vm_end). */ 127 if (task_size - len < addr) 128 return -ENOMEM; 129 if (!vmm || addr + len <= vmm->vm_start) 130 return addr; 131 addr = vmm->vm_end; 132 if (do_color_align) 133 addr = COLOUR_ALIGN(addr, pgoff); 134 } 135 } 136 137 SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len, 138 unsigned long, prot, unsigned long, flags, unsigned long, 139 fd, off_t, offset) 140 { 141 unsigned long result; 142 143 result = -EINVAL; 144 if (offset & ~PAGE_MASK) 145 goto out; 146 147 result = sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); 148 149 out: 150 return result; 151 } 152 153 SYSCALL_DEFINE6(mips_mmap2, unsigned long, addr, unsigned long, len, 154 unsigned long, prot, unsigned long, flags, unsigned long, fd, 155 unsigned long, pgoff) 156 { 157 if (pgoff & (~PAGE_MASK >> 12)) 158 return -EINVAL; 159 160 return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT-12)); 161 } 162 163 save_static_function(sys_fork); 164 static int __used noinline 165 _sys_fork(nabi_no_regargs struct pt_regs regs) 166 { 167 return do_fork(SIGCHLD, regs.regs[29], ®s, 0, NULL, NULL); 168 } 169 170 save_static_function(sys_clone); 171 static int __used noinline 172 _sys_clone(nabi_no_regargs struct pt_regs regs) 173 { 174 unsigned long clone_flags; 175 unsigned long newsp; 176 int __user *parent_tidptr, *child_tidptr; 177 178 clone_flags = regs.regs[4]; 179 newsp = regs.regs[5]; 180 if (!newsp) 181 newsp = regs.regs[29]; 182 parent_tidptr = (int __user *) regs.regs[6]; 183 #ifdef CONFIG_32BIT 184 /* We need to fetch the fifth argument off the stack. */ 185 child_tidptr = NULL; 186 if (clone_flags & (CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)) { 187 int __user *__user *usp = (int __user *__user *) regs.regs[29]; 188 if (regs.regs[2] == __NR_syscall) { 189 if (get_user (child_tidptr, &usp[5])) 190 return -EFAULT; 191 } 192 else if (get_user (child_tidptr, &usp[4])) 193 return -EFAULT; 194 } 195 #else 196 child_tidptr = (int __user *) regs.regs[8]; 197 #endif 198 return do_fork(clone_flags, newsp, ®s, 0, 199 parent_tidptr, child_tidptr); 200 } 201 202 /* 203 * sys_execve() executes a new program. 204 */ 205 asmlinkage int sys_execve(nabi_no_regargs struct pt_regs regs) 206 { 207 int error; 208 char * filename; 209 210 filename = getname((char __user *) (long)regs.regs[4]); 211 error = PTR_ERR(filename); 212 if (IS_ERR(filename)) 213 goto out; 214 error = do_execve(filename, (char __user *__user *) (long)regs.regs[5], 215 (char __user *__user *) (long)regs.regs[6], ®s); 216 putname(filename); 217 218 out: 219 return error; 220 } 221 222 SYSCALL_DEFINE1(set_thread_area, unsigned long, addr) 223 { 224 struct thread_info *ti = task_thread_info(current); 225 226 ti->tp_value = addr; 227 if (cpu_has_userlocal) 228 write_c0_userlocal(addr); 229 230 return 0; 231 } 232 233 static inline int mips_atomic_set(struct pt_regs *regs, 234 unsigned long addr, unsigned long new) 235 { 236 unsigned long old, tmp; 237 unsigned int err; 238 239 if (unlikely(addr & 3)) 240 return -EINVAL; 241 242 if (unlikely(!access_ok(VERIFY_WRITE, addr, 4))) 243 return -EINVAL; 244 245 if (cpu_has_llsc && R10000_LLSC_WAR) { 246 __asm__ __volatile__ ( 247 " .set mips3 \n" 248 " li %[err], 0 \n" 249 "1: ll %[old], (%[addr]) \n" 250 " move %[tmp], %[new] \n" 251 "2: sc %[tmp], (%[addr]) \n" 252 " beqzl %[tmp], 1b \n" 253 "3: \n" 254 " .section .fixup,\"ax\" \n" 255 "4: li %[err], %[efault] \n" 256 " j 3b \n" 257 " .previous \n" 258 " .section __ex_table,\"a\" \n" 259 " "STR(PTR)" 1b, 4b \n" 260 " "STR(PTR)" 2b, 4b \n" 261 " .previous \n" 262 " .set mips0 \n" 263 : [old] "=&r" (old), 264 [err] "=&r" (err), 265 [tmp] "=&r" (tmp) 266 : [addr] "r" (addr), 267 [new] "r" (new), 268 [efault] "i" (-EFAULT) 269 : "memory"); 270 } else if (cpu_has_llsc) { 271 __asm__ __volatile__ ( 272 " .set mips3 \n" 273 " li %[err], 0 \n" 274 "1: ll %[old], (%[addr]) \n" 275 " move %[tmp], %[new] \n" 276 "2: sc %[tmp], (%[addr]) \n" 277 " bnez %[tmp], 4f \n" 278 "3: \n" 279 " .subsection 2 \n" 280 "4: b 1b \n" 281 " .previous \n" 282 " \n" 283 " .section .fixup,\"ax\" \n" 284 "5: li %[err], %[efault] \n" 285 " j 3b \n" 286 " .previous \n" 287 " .section __ex_table,\"a\" \n" 288 " "STR(PTR)" 1b, 5b \n" 289 " "STR(PTR)" 2b, 5b \n" 290 " .previous \n" 291 " .set mips0 \n" 292 : [old] "=&r" (old), 293 [err] "=&r" (err), 294 [tmp] "=&r" (tmp) 295 : [addr] "r" (addr), 296 [new] "r" (new), 297 [efault] "i" (-EFAULT) 298 : "memory"); 299 } else { 300 do { 301 preempt_disable(); 302 ll_bit = 1; 303 ll_task = current; 304 preempt_enable(); 305 306 err = __get_user(old, (unsigned int *) addr); 307 err |= __put_user(new, (unsigned int *) addr); 308 if (err) 309 break; 310 rmb(); 311 } while (!ll_bit); 312 } 313 314 if (unlikely(err)) 315 return err; 316 317 regs->regs[2] = old; 318 regs->regs[7] = 0; /* No error */ 319 320 /* 321 * Don't let your children do this ... 322 */ 323 __asm__ __volatile__( 324 " move $29, %0 \n" 325 " j syscall_exit \n" 326 : /* no outputs */ 327 : "r" (regs)); 328 329 /* unreached. Honestly. */ 330 while (1); 331 } 332 333 save_static_function(sys_sysmips); 334 static int __used noinline 335 _sys_sysmips(nabi_no_regargs struct pt_regs regs) 336 { 337 long cmd, arg1, arg2, arg3; 338 339 cmd = regs.regs[4]; 340 arg1 = regs.regs[5]; 341 arg2 = regs.regs[6]; 342 arg3 = regs.regs[7]; 343 344 switch (cmd) { 345 case MIPS_ATOMIC_SET: 346 return mips_atomic_set(®s, arg1, arg2); 347 348 case MIPS_FIXADE: 349 if (arg1 & ~3) 350 return -EINVAL; 351 352 if (arg1 & 1) 353 set_thread_flag(TIF_FIXADE); 354 else 355 clear_thread_flag(TIF_FIXADE); 356 if (arg1 & 2) 357 set_thread_flag(TIF_LOGADE); 358 else 359 clear_thread_flag(TIF_FIXADE); 360 361 return 0; 362 363 case FLUSH_CACHE: 364 __flush_cache_all(); 365 return 0; 366 } 367 368 return -EINVAL; 369 } 370 371 /* 372 * No implemented yet ... 373 */ 374 SYSCALL_DEFINE3(cachectl, char *, addr, int, nbytes, int, op) 375 { 376 return -ENOSYS; 377 } 378 379 /* 380 * If we ever come here the user sp is bad. Zap the process right away. 381 * Due to the bad stack signaling wouldn't work. 382 */ 383 asmlinkage void bad_stack(void) 384 { 385 do_exit(SIGSEGV); 386 } 387 388 /* 389 * Do a system call from kernel instead of calling sys_execve so we 390 * end up with proper pt_regs. 391 */ 392 int kernel_execve(const char *filename, char *const argv[], char *const envp[]) 393 { 394 register unsigned long __a0 asm("$4") = (unsigned long) filename; 395 register unsigned long __a1 asm("$5") = (unsigned long) argv; 396 register unsigned long __a2 asm("$6") = (unsigned long) envp; 397 register unsigned long __a3 asm("$7"); 398 unsigned long __v0; 399 400 __asm__ volatile (" \n" 401 " .set noreorder \n" 402 " li $2, %5 # __NR_execve \n" 403 " syscall \n" 404 " move %0, $2 \n" 405 " .set reorder \n" 406 : "=&r" (__v0), "=r" (__a3) 407 : "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_execve) 408 : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", 409 "memory"); 410 411 if (__a3 == 0) 412 return __v0; 413 414 return -__v0; 415 } 416