1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1995, 1996, 1997, 2000, 2001, 05 by Ralf Baechle 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 * Copyright (C) 2001 MIPS Technologies, Inc. 9 */ 10 #include <linux/capability.h> 11 #include <linux/errno.h> 12 #include <linux/linkage.h> 13 #include <linux/mm.h> 14 #include <linux/fs.h> 15 #include <linux/smp.h> 16 #include <linux/mman.h> 17 #include <linux/ptrace.h> 18 #include <linux/sched.h> 19 #include <linux/string.h> 20 #include <linux/syscalls.h> 21 #include <linux/file.h> 22 #include <linux/utsname.h> 23 #include <linux/unistd.h> 24 #include <linux/sem.h> 25 #include <linux/msg.h> 26 #include <linux/shm.h> 27 #include <linux/compiler.h> 28 #include <linux/module.h> 29 #include <linux/ipc.h> 30 #include <linux/uaccess.h> 31 #include <linux/slab.h> 32 #include <linux/random.h> 33 #include <linux/elf.h> 34 35 #include <asm/asm.h> 36 #include <asm/branch.h> 37 #include <asm/cachectl.h> 38 #include <asm/cacheflush.h> 39 #include <asm/asm-offsets.h> 40 #include <asm/signal.h> 41 #include <asm/sim.h> 42 #include <asm/shmparam.h> 43 #include <asm/sysmips.h> 44 #include <asm/uaccess.h> 45 46 /* 47 * For historic reasons the pipe(2) syscall on MIPS has an unusual calling 48 * convention. It returns results in registers $v0 / $v1 which means there 49 * is no need for it to do verify the validity of a userspace pointer 50 * argument. Historically that used to be expensive in Linux. These days 51 * the performance advantage is negligible. 52 */ 53 asmlinkage int sysm_pipe(nabi_no_regargs volatile struct pt_regs regs) 54 { 55 int fd[2]; 56 int error, res; 57 58 error = do_pipe_flags(fd, 0); 59 if (error) { 60 res = error; 61 goto out; 62 } 63 regs.regs[3] = fd[1]; 64 res = fd[0]; 65 out: 66 return res; 67 } 68 69 unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ 70 71 EXPORT_SYMBOL(shm_align_mask); 72 73 #define COLOUR_ALIGN(addr,pgoff) \ 74 ((((addr) + shm_align_mask) & ~shm_align_mask) + \ 75 (((pgoff) << PAGE_SHIFT) & shm_align_mask)) 76 77 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, 78 unsigned long len, unsigned long pgoff, unsigned long flags) 79 { 80 struct vm_area_struct * vmm; 81 int do_color_align; 82 83 if (len > TASK_SIZE) 84 return -ENOMEM; 85 86 if (flags & MAP_FIXED) { 87 /* Even MAP_FIXED mappings must reside within TASK_SIZE. */ 88 if (TASK_SIZE - len < addr) 89 return -EINVAL; 90 91 /* 92 * We do not accept a shared mapping if it would violate 93 * cache aliasing constraints. 94 */ 95 if ((flags & MAP_SHARED) && 96 ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) 97 return -EINVAL; 98 return addr; 99 } 100 101 do_color_align = 0; 102 if (filp || (flags & MAP_SHARED)) 103 do_color_align = 1; 104 if (addr) { 105 if (do_color_align) 106 addr = COLOUR_ALIGN(addr, pgoff); 107 else 108 addr = PAGE_ALIGN(addr); 109 vmm = find_vma(current->mm, addr); 110 if (TASK_SIZE - len >= addr && 111 (!vmm || addr + len <= vmm->vm_start)) 112 return addr; 113 } 114 addr = current->mm->mmap_base; 115 if (do_color_align) 116 addr = COLOUR_ALIGN(addr, pgoff); 117 else 118 addr = PAGE_ALIGN(addr); 119 120 for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { 121 /* At this point: (!vmm || addr < vmm->vm_end). */ 122 if (TASK_SIZE - len < addr) 123 return -ENOMEM; 124 if (!vmm || addr + len <= vmm->vm_start) 125 return addr; 126 addr = vmm->vm_end; 127 if (do_color_align) 128 addr = COLOUR_ALIGN(addr, pgoff); 129 } 130 } 131 132 void arch_pick_mmap_layout(struct mm_struct *mm) 133 { 134 unsigned long random_factor = 0UL; 135 136 if (current->flags & PF_RANDOMIZE) { 137 random_factor = get_random_int(); 138 random_factor = random_factor << PAGE_SHIFT; 139 if (TASK_IS_32BIT_ADDR) 140 random_factor &= 0xfffffful; 141 else 142 random_factor &= 0xffffffful; 143 } 144 145 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; 146 mm->get_unmapped_area = arch_get_unmapped_area; 147 mm->unmap_area = arch_unmap_area; 148 } 149 150 static inline unsigned long brk_rnd(void) 151 { 152 unsigned long rnd = get_random_int(); 153 154 rnd = rnd << PAGE_SHIFT; 155 /* 8MB for 32bit, 256MB for 64bit */ 156 if (TASK_IS_32BIT_ADDR) 157 rnd = rnd & 0x7ffffful; 158 else 159 rnd = rnd & 0xffffffful; 160 161 return rnd; 162 } 163 164 unsigned long arch_randomize_brk(struct mm_struct *mm) 165 { 166 unsigned long base = mm->brk; 167 unsigned long ret; 168 169 ret = PAGE_ALIGN(base + brk_rnd()); 170 171 if (ret < mm->brk) 172 return mm->brk; 173 174 return ret; 175 } 176 177 SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len, 178 unsigned long, prot, unsigned long, flags, unsigned long, 179 fd, off_t, offset) 180 { 181 unsigned long result; 182 183 result = -EINVAL; 184 if (offset & ~PAGE_MASK) 185 goto out; 186 187 result = sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); 188 189 out: 190 return result; 191 } 192 193 SYSCALL_DEFINE6(mips_mmap2, unsigned long, addr, unsigned long, len, 194 unsigned long, prot, unsigned long, flags, unsigned long, fd, 195 unsigned long, pgoff) 196 { 197 if (pgoff & (~PAGE_MASK >> 12)) 198 return -EINVAL; 199 200 return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT-12)); 201 } 202 203 save_static_function(sys_fork); 204 static int __used noinline 205 _sys_fork(nabi_no_regargs struct pt_regs regs) 206 { 207 return do_fork(SIGCHLD, regs.regs[29], ®s, 0, NULL, NULL); 208 } 209 210 save_static_function(sys_clone); 211 static int __used noinline 212 _sys_clone(nabi_no_regargs struct pt_regs regs) 213 { 214 unsigned long clone_flags; 215 unsigned long newsp; 216 int __user *parent_tidptr, *child_tidptr; 217 218 clone_flags = regs.regs[4]; 219 newsp = regs.regs[5]; 220 if (!newsp) 221 newsp = regs.regs[29]; 222 parent_tidptr = (int __user *) regs.regs[6]; 223 #ifdef CONFIG_32BIT 224 /* We need to fetch the fifth argument off the stack. */ 225 child_tidptr = NULL; 226 if (clone_flags & (CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)) { 227 int __user *__user *usp = (int __user *__user *) regs.regs[29]; 228 if (regs.regs[2] == __NR_syscall) { 229 if (get_user (child_tidptr, &usp[5])) 230 return -EFAULT; 231 } 232 else if (get_user (child_tidptr, &usp[4])) 233 return -EFAULT; 234 } 235 #else 236 child_tidptr = (int __user *) regs.regs[8]; 237 #endif 238 return do_fork(clone_flags, newsp, ®s, 0, 239 parent_tidptr, child_tidptr); 240 } 241 242 /* 243 * sys_execve() executes a new program. 244 */ 245 asmlinkage int sys_execve(nabi_no_regargs struct pt_regs regs) 246 { 247 int error; 248 char * filename; 249 250 filename = getname((const char __user *) (long)regs.regs[4]); 251 error = PTR_ERR(filename); 252 if (IS_ERR(filename)) 253 goto out; 254 error = do_execve(filename, 255 (const char __user *const __user *) (long)regs.regs[5], 256 (const char __user *const __user *) (long)regs.regs[6], 257 ®s); 258 putname(filename); 259 260 out: 261 return error; 262 } 263 264 SYSCALL_DEFINE1(set_thread_area, unsigned long, addr) 265 { 266 struct thread_info *ti = task_thread_info(current); 267 268 ti->tp_value = addr; 269 if (cpu_has_userlocal) 270 write_c0_userlocal(addr); 271 272 return 0; 273 } 274 275 static inline int mips_atomic_set(struct pt_regs *regs, 276 unsigned long addr, unsigned long new) 277 { 278 unsigned long old, tmp; 279 unsigned int err; 280 281 if (unlikely(addr & 3)) 282 return -EINVAL; 283 284 if (unlikely(!access_ok(VERIFY_WRITE, addr, 4))) 285 return -EINVAL; 286 287 if (cpu_has_llsc && R10000_LLSC_WAR) { 288 __asm__ __volatile__ ( 289 " .set mips3 \n" 290 " li %[err], 0 \n" 291 "1: ll %[old], (%[addr]) \n" 292 " move %[tmp], %[new] \n" 293 "2: sc %[tmp], (%[addr]) \n" 294 " beqzl %[tmp], 1b \n" 295 "3: \n" 296 " .section .fixup,\"ax\" \n" 297 "4: li %[err], %[efault] \n" 298 " j 3b \n" 299 " .previous \n" 300 " .section __ex_table,\"a\" \n" 301 " "STR(PTR)" 1b, 4b \n" 302 " "STR(PTR)" 2b, 4b \n" 303 " .previous \n" 304 " .set mips0 \n" 305 : [old] "=&r" (old), 306 [err] "=&r" (err), 307 [tmp] "=&r" (tmp) 308 : [addr] "r" (addr), 309 [new] "r" (new), 310 [efault] "i" (-EFAULT) 311 : "memory"); 312 } else if (cpu_has_llsc) { 313 __asm__ __volatile__ ( 314 " .set mips3 \n" 315 " li %[err], 0 \n" 316 "1: ll %[old], (%[addr]) \n" 317 " move %[tmp], %[new] \n" 318 "2: sc %[tmp], (%[addr]) \n" 319 " bnez %[tmp], 4f \n" 320 "3: \n" 321 " .subsection 2 \n" 322 "4: b 1b \n" 323 " .previous \n" 324 " \n" 325 " .section .fixup,\"ax\" \n" 326 "5: li %[err], %[efault] \n" 327 " j 3b \n" 328 " .previous \n" 329 " .section __ex_table,\"a\" \n" 330 " "STR(PTR)" 1b, 5b \n" 331 " "STR(PTR)" 2b, 5b \n" 332 " .previous \n" 333 " .set mips0 \n" 334 : [old] "=&r" (old), 335 [err] "=&r" (err), 336 [tmp] "=&r" (tmp) 337 : [addr] "r" (addr), 338 [new] "r" (new), 339 [efault] "i" (-EFAULT) 340 : "memory"); 341 } else { 342 do { 343 preempt_disable(); 344 ll_bit = 1; 345 ll_task = current; 346 preempt_enable(); 347 348 err = __get_user(old, (unsigned int *) addr); 349 err |= __put_user(new, (unsigned int *) addr); 350 if (err) 351 break; 352 rmb(); 353 } while (!ll_bit); 354 } 355 356 if (unlikely(err)) 357 return err; 358 359 regs->regs[2] = old; 360 regs->regs[7] = 0; /* No error */ 361 362 /* 363 * Don't let your children do this ... 364 */ 365 __asm__ __volatile__( 366 " move $29, %0 \n" 367 " j syscall_exit \n" 368 : /* no outputs */ 369 : "r" (regs)); 370 371 /* unreached. Honestly. */ 372 while (1); 373 } 374 375 save_static_function(sys_sysmips); 376 static int __used noinline 377 _sys_sysmips(nabi_no_regargs struct pt_regs regs) 378 { 379 long cmd, arg1, arg2; 380 381 cmd = regs.regs[4]; 382 arg1 = regs.regs[5]; 383 arg2 = regs.regs[6]; 384 385 switch (cmd) { 386 case MIPS_ATOMIC_SET: 387 return mips_atomic_set(®s, arg1, arg2); 388 389 case MIPS_FIXADE: 390 if (arg1 & ~3) 391 return -EINVAL; 392 393 if (arg1 & 1) 394 set_thread_flag(TIF_FIXADE); 395 else 396 clear_thread_flag(TIF_FIXADE); 397 if (arg1 & 2) 398 set_thread_flag(TIF_LOGADE); 399 else 400 clear_thread_flag(TIF_LOGADE); 401 402 return 0; 403 404 case FLUSH_CACHE: 405 __flush_cache_all(); 406 return 0; 407 } 408 409 return -EINVAL; 410 } 411 412 /* 413 * No implemented yet ... 414 */ 415 SYSCALL_DEFINE3(cachectl, char *, addr, int, nbytes, int, op) 416 { 417 return -ENOSYS; 418 } 419 420 /* 421 * If we ever come here the user sp is bad. Zap the process right away. 422 * Due to the bad stack signaling wouldn't work. 423 */ 424 asmlinkage void bad_stack(void) 425 { 426 do_exit(SIGSEGV); 427 } 428 429 /* 430 * Do a system call from kernel instead of calling sys_execve so we 431 * end up with proper pt_regs. 432 */ 433 int kernel_execve(const char *filename, 434 const char *const argv[], 435 const char *const envp[]) 436 { 437 register unsigned long __a0 asm("$4") = (unsigned long) filename; 438 register unsigned long __a1 asm("$5") = (unsigned long) argv; 439 register unsigned long __a2 asm("$6") = (unsigned long) envp; 440 register unsigned long __a3 asm("$7"); 441 unsigned long __v0; 442 443 __asm__ volatile (" \n" 444 " .set noreorder \n" 445 " li $2, %5 # __NR_execve \n" 446 " syscall \n" 447 " move %0, $2 \n" 448 " .set reorder \n" 449 : "=&r" (__v0), "=r" (__a3) 450 : "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_execve) 451 : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", 452 "memory"); 453 454 if (__a3 == 0) 455 return __v0; 456 457 return -__v0; 458 } 459