1 /* 2 * linux/arch/m68k/kernel/sys_m68k.c 3 * 4 * This file contains various random system calls that 5 * have a non-standard calling sequence on the Linux/m68k 6 * platform. 7 */ 8 9 #include <linux/capability.h> 10 #include <linux/errno.h> 11 #include <linux/sched.h> 12 #include <linux/mm.h> 13 #include <linux/fs.h> 14 #include <linux/smp.h> 15 #include <linux/smp_lock.h> 16 #include <linux/sem.h> 17 #include <linux/msg.h> 18 #include <linux/shm.h> 19 #include <linux/stat.h> 20 #include <linux/syscalls.h> 21 #include <linux/mman.h> 22 #include <linux/file.h> 23 #include <linux/ipc.h> 24 25 #include <asm/setup.h> 26 #include <asm/uaccess.h> 27 #include <asm/cachectl.h> 28 #include <asm/traps.h> 29 #include <asm/page.h> 30 #include <asm/unistd.h> 31 #include <linux/elf.h> 32 #include <asm/tlb.h> 33 34 asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address, 35 unsigned long error_code); 36 37 asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, 38 unsigned long prot, unsigned long flags, 39 unsigned long fd, unsigned long pgoff) 40 { 41 /* 42 * This is wrong for sun3 - there PAGE_SIZE is 8Kb, 43 * so we need to shift the argument down by 1; m68k mmap64(3) 44 * (in libc) expects the last argument of mmap2 in 4Kb units. 45 */ 46 return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); 47 } 48 49 /* 50 * Perform the select(nd, in, out, ex, tv) and mmap() system 51 * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to 52 * handle more than 4 system call parameters, so these system calls 53 * used a memory block for parameter passing.. 54 */ 55 56 struct mmap_arg_struct { 57 unsigned long addr; 58 unsigned long len; 59 unsigned long prot; 60 unsigned long flags; 61 unsigned long fd; 62 unsigned long offset; 63 }; 64 65 asmlinkage int old_mmap(struct mmap_arg_struct __user *arg) 66 { 67 struct mmap_arg_struct a; 68 int error = -EFAULT; 69 70 if (copy_from_user(&a, arg, sizeof(a))) 71 goto out; 72 73 error = -EINVAL; 74 if (a.offset & ~PAGE_MASK) 75 goto out; 76 77 error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, 78 a.offset >> PAGE_SHIFT); 79 out: 80 return error; 81 } 82 83 struct sel_arg_struct { 84 unsigned long n; 85 fd_set __user *inp, *outp, *exp; 86 struct timeval __user *tvp; 87 }; 88 89 asmlinkage int old_select(struct sel_arg_struct __user *arg) 90 { 91 struct sel_arg_struct a; 92 93 if (copy_from_user(&a, arg, sizeof(a))) 94 return -EFAULT; 95 /* sys_select() does the appropriate kernel locking */ 96 return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp); 97 } 98 99 /* 100 * sys_ipc() is the de-multiplexer for the SysV IPC calls.. 101 * 102 * This is really horribly ugly. 103 */ 104 asmlinkage int sys_ipc (uint call, int first, int second, 105 int third, void __user *ptr, long fifth) 106 { 107 int version, ret; 108 109 version = call >> 16; /* hack for backward compatibility */ 110 call &= 0xffff; 111 112 if (call <= SEMCTL) 113 switch (call) { 114 case SEMOP: 115 return sys_semop (first, ptr, second); 116 case SEMGET: 117 return sys_semget (first, second, third); 118 case SEMCTL: { 119 union semun fourth; 120 if (!ptr) 121 return -EINVAL; 122 if (get_user(fourth.__pad, (void __user *__user *) ptr)) 123 return -EFAULT; 124 return sys_semctl (first, second, third, fourth); 125 } 126 default: 127 return -ENOSYS; 128 } 129 if (call <= MSGCTL) 130 switch (call) { 131 case MSGSND: 132 return sys_msgsnd (first, ptr, second, third); 133 case MSGRCV: 134 switch (version) { 135 case 0: { 136 struct ipc_kludge tmp; 137 if (!ptr) 138 return -EINVAL; 139 if (copy_from_user (&tmp, ptr, sizeof (tmp))) 140 return -EFAULT; 141 return sys_msgrcv (first, tmp.msgp, second, 142 tmp.msgtyp, third); 143 } 144 default: 145 return sys_msgrcv (first, ptr, 146 second, fifth, third); 147 } 148 case MSGGET: 149 return sys_msgget ((key_t) first, second); 150 case MSGCTL: 151 return sys_msgctl (first, second, ptr); 152 default: 153 return -ENOSYS; 154 } 155 if (call <= SHMCTL) 156 switch (call) { 157 case SHMAT: 158 switch (version) { 159 default: { 160 ulong raddr; 161 ret = do_shmat (first, ptr, second, &raddr); 162 if (ret) 163 return ret; 164 return put_user (raddr, (ulong __user *) third); 165 } 166 } 167 case SHMDT: 168 return sys_shmdt (ptr); 169 case SHMGET: 170 return sys_shmget (first, second, third); 171 case SHMCTL: 172 return sys_shmctl (first, second, ptr); 173 default: 174 return -ENOSYS; 175 } 176 177 return -EINVAL; 178 } 179 180 /* Convert virtual (user) address VADDR to physical address PADDR */ 181 #define virt_to_phys_040(vaddr) \ 182 ({ \ 183 unsigned long _mmusr, _paddr; \ 184 \ 185 __asm__ __volatile__ (".chip 68040\n\t" \ 186 "ptestr (%1)\n\t" \ 187 "movec %%mmusr,%0\n\t" \ 188 ".chip 68k" \ 189 : "=r" (_mmusr) \ 190 : "a" (vaddr)); \ 191 _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \ 192 _paddr; \ 193 }) 194 195 static inline int 196 cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len) 197 { 198 unsigned long paddr, i; 199 200 switch (scope) 201 { 202 case FLUSH_SCOPE_ALL: 203 switch (cache) 204 { 205 case FLUSH_CACHE_DATA: 206 /* This nop is needed for some broken versions of the 68040. */ 207 __asm__ __volatile__ ("nop\n\t" 208 ".chip 68040\n\t" 209 "cpusha %dc\n\t" 210 ".chip 68k"); 211 break; 212 case FLUSH_CACHE_INSN: 213 __asm__ __volatile__ ("nop\n\t" 214 ".chip 68040\n\t" 215 "cpusha %ic\n\t" 216 ".chip 68k"); 217 break; 218 default: 219 case FLUSH_CACHE_BOTH: 220 __asm__ __volatile__ ("nop\n\t" 221 ".chip 68040\n\t" 222 "cpusha %bc\n\t" 223 ".chip 68k"); 224 break; 225 } 226 break; 227 228 case FLUSH_SCOPE_LINE: 229 /* Find the physical address of the first mapped page in the 230 address range. */ 231 if ((paddr = virt_to_phys_040(addr))) { 232 paddr += addr & ~(PAGE_MASK | 15); 233 len = (len + (addr & 15) + 15) >> 4; 234 } else { 235 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK); 236 237 if (len <= tmp) 238 return 0; 239 addr += tmp; 240 len -= tmp; 241 tmp = PAGE_SIZE; 242 for (;;) 243 { 244 if ((paddr = virt_to_phys_040(addr))) 245 break; 246 if (len <= tmp) 247 return 0; 248 addr += tmp; 249 len -= tmp; 250 } 251 len = (len + 15) >> 4; 252 } 253 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4; 254 while (len--) 255 { 256 switch (cache) 257 { 258 case FLUSH_CACHE_DATA: 259 __asm__ __volatile__ ("nop\n\t" 260 ".chip 68040\n\t" 261 "cpushl %%dc,(%0)\n\t" 262 ".chip 68k" 263 : : "a" (paddr)); 264 break; 265 case FLUSH_CACHE_INSN: 266 __asm__ __volatile__ ("nop\n\t" 267 ".chip 68040\n\t" 268 "cpushl %%ic,(%0)\n\t" 269 ".chip 68k" 270 : : "a" (paddr)); 271 break; 272 default: 273 case FLUSH_CACHE_BOTH: 274 __asm__ __volatile__ ("nop\n\t" 275 ".chip 68040\n\t" 276 "cpushl %%bc,(%0)\n\t" 277 ".chip 68k" 278 : : "a" (paddr)); 279 break; 280 } 281 if (!--i && len) 282 { 283 /* 284 * No need to page align here since it is done by 285 * virt_to_phys_040(). 286 */ 287 addr += PAGE_SIZE; 288 i = PAGE_SIZE / 16; 289 /* Recompute physical address when crossing a page 290 boundary. */ 291 for (;;) 292 { 293 if ((paddr = virt_to_phys_040(addr))) 294 break; 295 if (len <= i) 296 return 0; 297 len -= i; 298 addr += PAGE_SIZE; 299 } 300 } 301 else 302 paddr += 16; 303 } 304 break; 305 306 default: 307 case FLUSH_SCOPE_PAGE: 308 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1); 309 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE) 310 { 311 if (!(paddr = virt_to_phys_040(addr))) 312 continue; 313 switch (cache) 314 { 315 case FLUSH_CACHE_DATA: 316 __asm__ __volatile__ ("nop\n\t" 317 ".chip 68040\n\t" 318 "cpushp %%dc,(%0)\n\t" 319 ".chip 68k" 320 : : "a" (paddr)); 321 break; 322 case FLUSH_CACHE_INSN: 323 __asm__ __volatile__ ("nop\n\t" 324 ".chip 68040\n\t" 325 "cpushp %%ic,(%0)\n\t" 326 ".chip 68k" 327 : : "a" (paddr)); 328 break; 329 default: 330 case FLUSH_CACHE_BOTH: 331 __asm__ __volatile__ ("nop\n\t" 332 ".chip 68040\n\t" 333 "cpushp %%bc,(%0)\n\t" 334 ".chip 68k" 335 : : "a" (paddr)); 336 break; 337 } 338 } 339 break; 340 } 341 return 0; 342 } 343 344 #define virt_to_phys_060(vaddr) \ 345 ({ \ 346 unsigned long paddr; \ 347 __asm__ __volatile__ (".chip 68060\n\t" \ 348 "plpar (%0)\n\t" \ 349 ".chip 68k" \ 350 : "=a" (paddr) \ 351 : "0" (vaddr)); \ 352 (paddr); /* XXX */ \ 353 }) 354 355 static inline int 356 cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len) 357 { 358 unsigned long paddr, i; 359 360 /* 361 * 68060 manual says: 362 * cpush %dc : flush DC, remains valid (with our %cacr setup) 363 * cpush %ic : invalidate IC 364 * cpush %bc : flush DC + invalidate IC 365 */ 366 switch (scope) 367 { 368 case FLUSH_SCOPE_ALL: 369 switch (cache) 370 { 371 case FLUSH_CACHE_DATA: 372 __asm__ __volatile__ (".chip 68060\n\t" 373 "cpusha %dc\n\t" 374 ".chip 68k"); 375 break; 376 case FLUSH_CACHE_INSN: 377 __asm__ __volatile__ (".chip 68060\n\t" 378 "cpusha %ic\n\t" 379 ".chip 68k"); 380 break; 381 default: 382 case FLUSH_CACHE_BOTH: 383 __asm__ __volatile__ (".chip 68060\n\t" 384 "cpusha %bc\n\t" 385 ".chip 68k"); 386 break; 387 } 388 break; 389 390 case FLUSH_SCOPE_LINE: 391 /* Find the physical address of the first mapped page in the 392 address range. */ 393 len += addr & 15; 394 addr &= -16; 395 if (!(paddr = virt_to_phys_060(addr))) { 396 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK); 397 398 if (len <= tmp) 399 return 0; 400 addr += tmp; 401 len -= tmp; 402 tmp = PAGE_SIZE; 403 for (;;) 404 { 405 if ((paddr = virt_to_phys_060(addr))) 406 break; 407 if (len <= tmp) 408 return 0; 409 addr += tmp; 410 len -= tmp; 411 } 412 } 413 len = (len + 15) >> 4; 414 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4; 415 while (len--) 416 { 417 switch (cache) 418 { 419 case FLUSH_CACHE_DATA: 420 __asm__ __volatile__ (".chip 68060\n\t" 421 "cpushl %%dc,(%0)\n\t" 422 ".chip 68k" 423 : : "a" (paddr)); 424 break; 425 case FLUSH_CACHE_INSN: 426 __asm__ __volatile__ (".chip 68060\n\t" 427 "cpushl %%ic,(%0)\n\t" 428 ".chip 68k" 429 : : "a" (paddr)); 430 break; 431 default: 432 case FLUSH_CACHE_BOTH: 433 __asm__ __volatile__ (".chip 68060\n\t" 434 "cpushl %%bc,(%0)\n\t" 435 ".chip 68k" 436 : : "a" (paddr)); 437 break; 438 } 439 if (!--i && len) 440 { 441 442 /* 443 * We just want to jump to the first cache line 444 * in the next page. 445 */ 446 addr += PAGE_SIZE; 447 addr &= PAGE_MASK; 448 449 i = PAGE_SIZE / 16; 450 /* Recompute physical address when crossing a page 451 boundary. */ 452 for (;;) 453 { 454 if ((paddr = virt_to_phys_060(addr))) 455 break; 456 if (len <= i) 457 return 0; 458 len -= i; 459 addr += PAGE_SIZE; 460 } 461 } 462 else 463 paddr += 16; 464 } 465 break; 466 467 default: 468 case FLUSH_SCOPE_PAGE: 469 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1); 470 addr &= PAGE_MASK; /* Workaround for bug in some 471 revisions of the 68060 */ 472 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE) 473 { 474 if (!(paddr = virt_to_phys_060(addr))) 475 continue; 476 switch (cache) 477 { 478 case FLUSH_CACHE_DATA: 479 __asm__ __volatile__ (".chip 68060\n\t" 480 "cpushp %%dc,(%0)\n\t" 481 ".chip 68k" 482 : : "a" (paddr)); 483 break; 484 case FLUSH_CACHE_INSN: 485 __asm__ __volatile__ (".chip 68060\n\t" 486 "cpushp %%ic,(%0)\n\t" 487 ".chip 68k" 488 : : "a" (paddr)); 489 break; 490 default: 491 case FLUSH_CACHE_BOTH: 492 __asm__ __volatile__ (".chip 68060\n\t" 493 "cpushp %%bc,(%0)\n\t" 494 ".chip 68k" 495 : : "a" (paddr)); 496 break; 497 } 498 } 499 break; 500 } 501 return 0; 502 } 503 504 /* sys_cacheflush -- flush (part of) the processor cache. */ 505 asmlinkage int 506 sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len) 507 { 508 struct vm_area_struct *vma; 509 int ret = -EINVAL; 510 511 lock_kernel(); 512 if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL || 513 cache & ~FLUSH_CACHE_BOTH) 514 goto out; 515 516 if (scope == FLUSH_SCOPE_ALL) { 517 /* Only the superuser may explicitly flush the whole cache. */ 518 ret = -EPERM; 519 if (!capable(CAP_SYS_ADMIN)) 520 goto out; 521 } else { 522 /* 523 * Verify that the specified address region actually belongs 524 * to this process. 525 */ 526 vma = find_vma (current->mm, addr); 527 ret = -EINVAL; 528 /* Check for overflow. */ 529 if (addr + len < addr) 530 goto out; 531 if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) 532 goto out; 533 } 534 535 if (CPU_IS_020_OR_030) { 536 if (scope == FLUSH_SCOPE_LINE && len < 256) { 537 unsigned long cacr; 538 __asm__ ("movec %%cacr, %0" : "=r" (cacr)); 539 if (cache & FLUSH_CACHE_INSN) 540 cacr |= 4; 541 if (cache & FLUSH_CACHE_DATA) 542 cacr |= 0x400; 543 len >>= 2; 544 while (len--) { 545 __asm__ __volatile__ ("movec %1, %%caar\n\t" 546 "movec %0, %%cacr" 547 : /* no outputs */ 548 : "r" (cacr), "r" (addr)); 549 addr += 4; 550 } 551 } else { 552 /* Flush the whole cache, even if page granularity requested. */ 553 unsigned long cacr; 554 __asm__ ("movec %%cacr, %0" : "=r" (cacr)); 555 if (cache & FLUSH_CACHE_INSN) 556 cacr |= 8; 557 if (cache & FLUSH_CACHE_DATA) 558 cacr |= 0x800; 559 __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr)); 560 } 561 ret = 0; 562 goto out; 563 } else { 564 /* 565 * 040 or 060: don't blindly trust 'scope', someone could 566 * try to flush a few megs of memory. 567 */ 568 569 if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE) 570 scope=FLUSH_SCOPE_PAGE; 571 if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL) 572 scope=FLUSH_SCOPE_ALL; 573 if (CPU_IS_040) { 574 ret = cache_flush_040 (addr, scope, cache, len); 575 } else if (CPU_IS_060) { 576 ret = cache_flush_060 (addr, scope, cache, len); 577 } 578 } 579 out: 580 unlock_kernel(); 581 return ret; 582 } 583 584 asmlinkage int sys_getpagesize(void) 585 { 586 return PAGE_SIZE; 587 } 588 589 /* 590 * Do a system call from kernel instead of calling sys_execve so we 591 * end up with proper pt_regs. 592 */ 593 int kernel_execve(const char *filename, char *const argv[], char *const envp[]) 594 { 595 register long __res asm ("%d0") = __NR_execve; 596 register long __a asm ("%d1") = (long)(filename); 597 register long __b asm ("%d2") = (long)(argv); 598 register long __c asm ("%d3") = (long)(envp); 599 asm volatile ("trap #0" : "+d" (__res) 600 : "d" (__a), "d" (__b), "d" (__c)); 601 return __res; 602 } 603 604 asmlinkage unsigned long sys_get_thread_area(void) 605 { 606 return current_thread_info()->tp_value; 607 } 608 609 asmlinkage int sys_set_thread_area(unsigned long tp) 610 { 611 current_thread_info()->tp_value = tp; 612 return 0; 613 } 614 615 /* This syscall gets its arguments in A0 (mem), D2 (oldval) and 616 D1 (newval). */ 617 asmlinkage int 618 sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5, 619 unsigned long __user * mem) 620 { 621 /* This was borrowed from ARM's implementation. */ 622 for (;;) { 623 struct mm_struct *mm = current->mm; 624 pgd_t *pgd; 625 pmd_t *pmd; 626 pte_t *pte; 627 spinlock_t *ptl; 628 unsigned long mem_value; 629 630 down_read(&mm->mmap_sem); 631 pgd = pgd_offset(mm, (unsigned long)mem); 632 if (!pgd_present(*pgd)) 633 goto bad_access; 634 pmd = pmd_offset(pgd, (unsigned long)mem); 635 if (!pmd_present(*pmd)) 636 goto bad_access; 637 pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl); 638 if (!pte_present(*pte) || !pte_dirty(*pte) 639 || !pte_write(*pte)) { 640 pte_unmap_unlock(pte, ptl); 641 goto bad_access; 642 } 643 644 mem_value = *mem; 645 if (mem_value == oldval) 646 *mem = newval; 647 648 pte_unmap_unlock(pte, ptl); 649 up_read(&mm->mmap_sem); 650 return mem_value; 651 652 bad_access: 653 up_read(&mm->mmap_sem); 654 /* This is not necessarily a bad access, we can get here if 655 a memory we're trying to write to should be copied-on-write. 656 Make the kernel do the necessary page stuff, then re-iterate. 657 Simulate a write access fault to do that. */ 658 { 659 /* The first argument of the function corresponds to 660 D1, which is the first field of struct pt_regs. */ 661 struct pt_regs *fp = (struct pt_regs *)&newval; 662 663 /* '3' is an RMW flag. */ 664 if (do_page_fault(fp, (unsigned long)mem, 3)) 665 /* If the do_page_fault() failed, we don't 666 have anything meaningful to return. 667 There should be a SIGSEGV pending for 668 the process. */ 669 return 0xdeadbeef; 670 } 671 } 672 } 673 674 asmlinkage int sys_atomic_barrier(void) 675 { 676 /* no code needed for uniprocs */ 677 return 0; 678 } 679