1 // SPDX-License-Identifier: GPL-2.0 2 /* linux/arch/sparc64/kernel/sys_sparc.c 3 * 4 * This file contains various random system calls that 5 * have a non-standard calling sequence on the Linux/sparc 6 * platform. 7 */ 8 9 #include <linux/errno.h> 10 #include <linux/types.h> 11 #include <linux/sched/signal.h> 12 #include <linux/sched/mm.h> 13 #include <linux/sched/debug.h> 14 #include <linux/fs.h> 15 #include <linux/file.h> 16 #include <linux/mm.h> 17 #include <linux/sem.h> 18 #include <linux/msg.h> 19 #include <linux/shm.h> 20 #include <linux/stat.h> 21 #include <linux/mman.h> 22 #include <linux/utsname.h> 23 #include <linux/smp.h> 24 #include <linux/slab.h> 25 #include <linux/syscalls.h> 26 #include <linux/ipc.h> 27 #include <linux/personality.h> 28 #include <linux/random.h> 29 #include <linux/export.h> 30 #include <linux/context_tracking.h> 31 #include <linux/timex.h> 32 #include <linux/uaccess.h> 33 34 #include <asm/utrap.h> 35 #include <asm/unistd.h> 36 37 #include "entry.h" 38 #include "kernel.h" 39 #include "systbls.h" 40 41 /* #define DEBUG_UNIMP_SYSCALL */ 42 43 SYSCALL_DEFINE0(getpagesize) 44 { 45 return PAGE_SIZE; 46 } 47 48 /* Does addr --> addr+len fall within 4GB of the VA-space hole or 49 * overflow past the end of the 64-bit address space? 50 */ 51 static inline int invalid_64bit_range(unsigned long addr, unsigned long len) 52 { 53 unsigned long va_exclude_start, va_exclude_end; 54 55 va_exclude_start = VA_EXCLUDE_START; 56 va_exclude_end = VA_EXCLUDE_END; 57 58 if (unlikely(len >= va_exclude_start)) 59 return 1; 60 61 if (unlikely((addr + len) < addr)) 62 return 1; 63 64 if (unlikely((addr >= va_exclude_start && addr < va_exclude_end) || 65 ((addr + len) >= va_exclude_start && 66 (addr + len) < va_exclude_end))) 67 return 1; 68 69 return 0; 70 } 71 72 /* These functions differ from the default implementations in 73 * mm/mmap.c in two ways: 74 * 75 * 1) For file backed MAP_SHARED mmap()'s we D-cache color align, 76 * for fixed such mappings we just validate what the user gave us. 77 * 2) For 64-bit tasks we avoid mapping anything within 4GB of 78 * the spitfire/niagara VA-hole. 79 */ 80 81 static inline unsigned long COLOR_ALIGN(unsigned long addr, 82 unsigned long pgoff) 83 { 84 unsigned long base = (addr+SHMLBA-1)&~(SHMLBA-1); 85 unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1); 86 87 return base + off; 88 } 89 90 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) 91 { 92 struct mm_struct *mm = current->mm; 93 struct vm_area_struct * vma; 94 unsigned long task_size = TASK_SIZE; 95 int do_color_align; 96 struct vm_unmapped_area_info info; 97 98 if (flags & MAP_FIXED) { 99 /* We do not accept a shared mapping if it would violate 100 * cache aliasing constraints. 101 */ 102 if ((flags & MAP_SHARED) && 103 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) 104 return -EINVAL; 105 return addr; 106 } 107 108 if (test_thread_flag(TIF_32BIT)) 109 task_size = STACK_TOP32; 110 if (unlikely(len > task_size || len >= VA_EXCLUDE_START)) 111 return -ENOMEM; 112 113 do_color_align = 0; 114 if (filp || (flags & MAP_SHARED)) 115 do_color_align = 1; 116 117 if (addr) { 118 if (do_color_align) 119 addr = COLOR_ALIGN(addr, pgoff); 120 else 121 addr = PAGE_ALIGN(addr); 122 123 vma = find_vma(mm, addr); 124 if (task_size - len >= addr && 125 (!vma || addr + len <= vm_start_gap(vma))) 126 return addr; 127 } 128 129 info.flags = 0; 130 info.length = len; 131 info.low_limit = TASK_UNMAPPED_BASE; 132 info.high_limit = min(task_size, VA_EXCLUDE_START); 133 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; 134 info.align_offset = pgoff << PAGE_SHIFT; 135 addr = vm_unmapped_area(&info); 136 137 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) { 138 VM_BUG_ON(addr != -ENOMEM); 139 info.low_limit = VA_EXCLUDE_END; 140 info.high_limit = task_size; 141 addr = vm_unmapped_area(&info); 142 } 143 144 return addr; 145 } 146 147 unsigned long 148 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, 149 const unsigned long len, const unsigned long pgoff, 150 const unsigned long flags) 151 { 152 struct vm_area_struct *vma; 153 struct mm_struct *mm = current->mm; 154 unsigned long task_size = STACK_TOP32; 155 unsigned long addr = addr0; 156 int do_color_align; 157 struct vm_unmapped_area_info info; 158 159 /* This should only ever run for 32-bit processes. */ 160 BUG_ON(!test_thread_flag(TIF_32BIT)); 161 162 if (flags & MAP_FIXED) { 163 /* We do not accept a shared mapping if it would violate 164 * cache aliasing constraints. 165 */ 166 if ((flags & MAP_SHARED) && 167 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) 168 return -EINVAL; 169 return addr; 170 } 171 172 if (unlikely(len > task_size)) 173 return -ENOMEM; 174 175 do_color_align = 0; 176 if (filp || (flags & MAP_SHARED)) 177 do_color_align = 1; 178 179 /* requesting a specific address */ 180 if (addr) { 181 if (do_color_align) 182 addr = COLOR_ALIGN(addr, pgoff); 183 else 184 addr = PAGE_ALIGN(addr); 185 186 vma = find_vma(mm, addr); 187 if (task_size - len >= addr && 188 (!vma || addr + len <= vm_start_gap(vma))) 189 return addr; 190 } 191 192 info.flags = VM_UNMAPPED_AREA_TOPDOWN; 193 info.length = len; 194 info.low_limit = PAGE_SIZE; 195 info.high_limit = mm->mmap_base; 196 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; 197 info.align_offset = pgoff << PAGE_SHIFT; 198 addr = vm_unmapped_area(&info); 199 200 /* 201 * A failed mmap() very likely causes application failure, 202 * so fall back to the bottom-up function here. This scenario 203 * can happen with large stack limits and large mmap() 204 * allocations. 205 */ 206 if (addr & ~PAGE_MASK) { 207 VM_BUG_ON(addr != -ENOMEM); 208 info.flags = 0; 209 info.low_limit = TASK_UNMAPPED_BASE; 210 info.high_limit = STACK_TOP32; 211 addr = vm_unmapped_area(&info); 212 } 213 214 return addr; 215 } 216 217 /* Try to align mapping such that we align it as much as possible. */ 218 unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags) 219 { 220 unsigned long align_goal, addr = -ENOMEM; 221 unsigned long (*get_area)(struct file *, unsigned long, 222 unsigned long, unsigned long, unsigned long); 223 224 get_area = current->mm->get_unmapped_area; 225 226 if (flags & MAP_FIXED) { 227 /* Ok, don't mess with it. */ 228 return get_area(NULL, orig_addr, len, pgoff, flags); 229 } 230 flags &= ~MAP_SHARED; 231 232 align_goal = PAGE_SIZE; 233 if (len >= (4UL * 1024 * 1024)) 234 align_goal = (4UL * 1024 * 1024); 235 else if (len >= (512UL * 1024)) 236 align_goal = (512UL * 1024); 237 else if (len >= (64UL * 1024)) 238 align_goal = (64UL * 1024); 239 240 do { 241 addr = get_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags); 242 if (!(addr & ~PAGE_MASK)) { 243 addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL); 244 break; 245 } 246 247 if (align_goal == (4UL * 1024 * 1024)) 248 align_goal = (512UL * 1024); 249 else if (align_goal == (512UL * 1024)) 250 align_goal = (64UL * 1024); 251 else 252 align_goal = PAGE_SIZE; 253 } while ((addr & ~PAGE_MASK) && align_goal > PAGE_SIZE); 254 255 /* Mapping is smaller than 64K or larger areas could not 256 * be obtained. 257 */ 258 if (addr & ~PAGE_MASK) 259 addr = get_area(NULL, orig_addr, len, pgoff, flags); 260 261 return addr; 262 } 263 EXPORT_SYMBOL(get_fb_unmapped_area); 264 265 /* Essentially the same as PowerPC. */ 266 static unsigned long mmap_rnd(void) 267 { 268 unsigned long rnd = 0UL; 269 270 if (current->flags & PF_RANDOMIZE) { 271 unsigned long val = get_random_long(); 272 if (test_thread_flag(TIF_32BIT)) 273 rnd = (val % (1UL << (23UL-PAGE_SHIFT))); 274 else 275 rnd = (val % (1UL << (30UL-PAGE_SHIFT))); 276 } 277 return rnd << PAGE_SHIFT; 278 } 279 280 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) 281 { 282 unsigned long random_factor = mmap_rnd(); 283 unsigned long gap; 284 285 /* 286 * Fall back to the standard layout if the personality 287 * bit is set, or if the expected stack growth is unlimited: 288 */ 289 gap = rlim_stack->rlim_cur; 290 if (!test_thread_flag(TIF_32BIT) || 291 (current->personality & ADDR_COMPAT_LAYOUT) || 292 gap == RLIM_INFINITY || 293 sysctl_legacy_va_layout) { 294 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; 295 mm->get_unmapped_area = arch_get_unmapped_area; 296 } else { 297 /* We know it's 32-bit */ 298 unsigned long task_size = STACK_TOP32; 299 300 if (gap < 128 * 1024 * 1024) 301 gap = 128 * 1024 * 1024; 302 if (gap > (task_size / 6 * 5)) 303 gap = (task_size / 6 * 5); 304 305 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor); 306 mm->get_unmapped_area = arch_get_unmapped_area_topdown; 307 } 308 } 309 310 /* 311 * sys_pipe() is the normal C calling standard for creating 312 * a pipe. It's not the way unix traditionally does this, though. 313 */ 314 SYSCALL_DEFINE0(sparc_pipe) 315 { 316 int fd[2]; 317 int error; 318 319 error = do_pipe_flags(fd, 0); 320 if (error) 321 goto out; 322 current_pt_regs()->u_regs[UREG_I1] = fd[1]; 323 error = fd[0]; 324 out: 325 return error; 326 } 327 328 /* 329 * sys_ipc() is the de-multiplexer for the SysV IPC calls.. 330 * 331 * This is really horribly ugly. 332 */ 333 334 SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second, 335 unsigned long, third, void __user *, ptr, long, fifth) 336 { 337 long err; 338 339 /* No need for backward compatibility. We can start fresh... */ 340 if (call <= SEMTIMEDOP) { 341 switch (call) { 342 case SEMOP: 343 err = sys_semtimedop(first, ptr, 344 (unsigned int)second, NULL); 345 goto out; 346 case SEMTIMEDOP: 347 err = sys_semtimedop(first, ptr, (unsigned int)second, 348 (const struct __kernel_timespec __user *) 349 (unsigned long) fifth); 350 goto out; 351 case SEMGET: 352 err = sys_semget(first, (int)second, (int)third); 353 goto out; 354 case SEMCTL: { 355 err = sys_semctl(first, second, 356 (int)third | IPC_64, 357 (unsigned long) ptr); 358 goto out; 359 } 360 default: 361 err = -ENOSYS; 362 goto out; 363 } 364 } 365 if (call <= MSGCTL) { 366 switch (call) { 367 case MSGSND: 368 err = sys_msgsnd(first, ptr, (size_t)second, 369 (int)third); 370 goto out; 371 case MSGRCV: 372 err = sys_msgrcv(first, ptr, (size_t)second, fifth, 373 (int)third); 374 goto out; 375 case MSGGET: 376 err = sys_msgget((key_t)first, (int)second); 377 goto out; 378 case MSGCTL: 379 err = sys_msgctl(first, (int)second | IPC_64, ptr); 380 goto out; 381 default: 382 err = -ENOSYS; 383 goto out; 384 } 385 } 386 if (call <= SHMCTL) { 387 switch (call) { 388 case SHMAT: { 389 ulong raddr; 390 err = do_shmat(first, ptr, (int)second, &raddr, SHMLBA); 391 if (!err) { 392 if (put_user(raddr, 393 (ulong __user *) third)) 394 err = -EFAULT; 395 } 396 goto out; 397 } 398 case SHMDT: 399 err = sys_shmdt(ptr); 400 goto out; 401 case SHMGET: 402 err = sys_shmget(first, (size_t)second, (int)third); 403 goto out; 404 case SHMCTL: 405 err = sys_shmctl(first, (int)second | IPC_64, ptr); 406 goto out; 407 default: 408 err = -ENOSYS; 409 goto out; 410 } 411 } else { 412 err = -ENOSYS; 413 } 414 out: 415 return err; 416 } 417 418 SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality) 419 { 420 long ret; 421 422 if (personality(current->personality) == PER_LINUX32 && 423 personality(personality) == PER_LINUX) 424 personality |= PER_LINUX32; 425 ret = sys_personality(personality); 426 if (personality(ret) == PER_LINUX32) 427 ret &= ~PER_LINUX32; 428 429 return ret; 430 } 431 432 int sparc_mmap_check(unsigned long addr, unsigned long len) 433 { 434 if (test_thread_flag(TIF_32BIT)) { 435 if (len >= STACK_TOP32) 436 return -EINVAL; 437 438 if (addr > STACK_TOP32 - len) 439 return -EINVAL; 440 } else { 441 if (len >= VA_EXCLUDE_START) 442 return -EINVAL; 443 444 if (invalid_64bit_range(addr, len)) 445 return -EINVAL; 446 } 447 448 return 0; 449 } 450 451 /* Linux version of mmap */ 452 SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, 453 unsigned long, prot, unsigned long, flags, unsigned long, fd, 454 unsigned long, off) 455 { 456 unsigned long retval = -EINVAL; 457 458 if ((off + PAGE_ALIGN(len)) < off) 459 goto out; 460 if (off & ~PAGE_MASK) 461 goto out; 462 retval = ksys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); 463 out: 464 return retval; 465 } 466 467 SYSCALL_DEFINE2(64_munmap, unsigned long, addr, size_t, len) 468 { 469 if (invalid_64bit_range(addr, len)) 470 return -EINVAL; 471 472 return vm_munmap(addr, len); 473 } 474 475 SYSCALL_DEFINE5(64_mremap, unsigned long, addr, unsigned long, old_len, 476 unsigned long, new_len, unsigned long, flags, 477 unsigned long, new_addr) 478 { 479 if (test_thread_flag(TIF_32BIT)) 480 return -EINVAL; 481 return sys_mremap(addr, old_len, new_len, flags, new_addr); 482 } 483 484 SYSCALL_DEFINE0(nis_syscall) 485 { 486 static int count; 487 struct pt_regs *regs = current_pt_regs(); 488 489 /* Don't make the system unusable, if someone goes stuck */ 490 if (count++ > 5) 491 return -ENOSYS; 492 493 printk ("Unimplemented SPARC system call %ld\n",regs->u_regs[1]); 494 #ifdef DEBUG_UNIMP_SYSCALL 495 show_regs (regs); 496 #endif 497 498 return -ENOSYS; 499 } 500 501 /* #define DEBUG_SPARC_BREAKPOINT */ 502 503 asmlinkage void sparc_breakpoint(struct pt_regs *regs) 504 { 505 enum ctx_state prev_state = exception_enter(); 506 507 if (test_thread_flag(TIF_32BIT)) { 508 regs->tpc &= 0xffffffff; 509 regs->tnpc &= 0xffffffff; 510 } 511 #ifdef DEBUG_SPARC_BREAKPOINT 512 printk ("TRAP: Entering kernel PC=%lx, nPC=%lx\n", regs->tpc, regs->tnpc); 513 #endif 514 force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->tpc, 0, current); 515 #ifdef DEBUG_SPARC_BREAKPOINT 516 printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs->tpc, regs->tnpc); 517 #endif 518 exception_exit(prev_state); 519 } 520 521 SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len) 522 { 523 int nlen, err; 524 char tmp[__NEW_UTS_LEN + 1]; 525 526 if (len < 0) 527 return -EINVAL; 528 529 down_read(&uts_sem); 530 531 nlen = strlen(utsname()->domainname) + 1; 532 err = -EINVAL; 533 if (nlen > len) 534 goto out_unlock; 535 memcpy(tmp, utsname()->domainname, nlen); 536 537 up_read(&uts_sem); 538 539 if (copy_to_user(name, tmp, nlen)) 540 return -EFAULT; 541 return 0; 542 543 out_unlock: 544 up_read(&uts_sem); 545 return err; 546 } 547 548 SYSCALL_DEFINE1(sparc_adjtimex, struct timex __user *, txc_p) 549 { 550 struct timex txc; /* Local copy of parameter */ 551 struct __kernel_timex *kt = (void *)&txc; 552 int ret; 553 554 /* Copy the user data space into the kernel copy 555 * structure. But bear in mind that the structures 556 * may change 557 */ 558 if (copy_from_user(&txc, txc_p, sizeof(struct timex))) 559 return -EFAULT; 560 561 /* 562 * override for sparc64 specific timeval type: tv_usec 563 * is 32 bit wide instead of 64-bit in __kernel_timex 564 */ 565 kt->time.tv_usec = txc.time.tv_usec; 566 ret = do_adjtimex(kt); 567 txc.time.tv_usec = kt->time.tv_usec; 568 569 return copy_to_user(txc_p, &txc, sizeof(struct timex)) ? -EFAULT : ret; 570 } 571 572 SYSCALL_DEFINE2(sparc_clock_adjtime, const clockid_t, which_clock,struct timex __user *, txc_p) 573 { 574 struct timex txc; /* Local copy of parameter */ 575 struct __kernel_timex *kt = (void *)&txc; 576 int ret; 577 578 if (!IS_ENABLED(CONFIG_POSIX_TIMERS)) { 579 pr_err_once("process %d (%s) attempted a POSIX timer syscall " 580 "while CONFIG_POSIX_TIMERS is not set\n", 581 current->pid, current->comm); 582 583 return -ENOSYS; 584 } 585 586 /* Copy the user data space into the kernel copy 587 * structure. But bear in mind that the structures 588 * may change 589 */ 590 if (copy_from_user(&txc, txc_p, sizeof(struct timex))) 591 return -EFAULT; 592 593 /* 594 * override for sparc64 specific timeval type: tv_usec 595 * is 32 bit wide instead of 64-bit in __kernel_timex 596 */ 597 kt->time.tv_usec = txc.time.tv_usec; 598 ret = do_clock_adjtime(which_clock, kt); 599 txc.time.tv_usec = kt->time.tv_usec; 600 601 return copy_to_user(txc_p, &txc, sizeof(struct timex)) ? -EFAULT : ret; 602 } 603 604 SYSCALL_DEFINE5(utrap_install, utrap_entry_t, type, 605 utrap_handler_t, new_p, utrap_handler_t, new_d, 606 utrap_handler_t __user *, old_p, 607 utrap_handler_t __user *, old_d) 608 { 609 if (type < UT_INSTRUCTION_EXCEPTION || type > UT_TRAP_INSTRUCTION_31) 610 return -EINVAL; 611 if (new_p == (utrap_handler_t)(long)UTH_NOCHANGE) { 612 if (old_p) { 613 if (!current_thread_info()->utraps) { 614 if (put_user(NULL, old_p)) 615 return -EFAULT; 616 } else { 617 if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p)) 618 return -EFAULT; 619 } 620 } 621 if (old_d) { 622 if (put_user(NULL, old_d)) 623 return -EFAULT; 624 } 625 return 0; 626 } 627 if (!current_thread_info()->utraps) { 628 current_thread_info()->utraps = 629 kcalloc(UT_TRAP_INSTRUCTION_31 + 1, sizeof(long), 630 GFP_KERNEL); 631 if (!current_thread_info()->utraps) 632 return -ENOMEM; 633 current_thread_info()->utraps[0] = 1; 634 } else { 635 if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p && 636 current_thread_info()->utraps[0] > 1) { 637 unsigned long *p = current_thread_info()->utraps; 638 639 current_thread_info()->utraps = 640 kmalloc_array(UT_TRAP_INSTRUCTION_31 + 1, 641 sizeof(long), 642 GFP_KERNEL); 643 if (!current_thread_info()->utraps) { 644 current_thread_info()->utraps = p; 645 return -ENOMEM; 646 } 647 p[0]--; 648 current_thread_info()->utraps[0] = 1; 649 memcpy(current_thread_info()->utraps+1, p+1, 650 UT_TRAP_INSTRUCTION_31*sizeof(long)); 651 } 652 } 653 if (old_p) { 654 if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p)) 655 return -EFAULT; 656 } 657 if (old_d) { 658 if (put_user(NULL, old_d)) 659 return -EFAULT; 660 } 661 current_thread_info()->utraps[type] = (long)new_p; 662 663 return 0; 664 } 665 666 SYSCALL_DEFINE1(memory_ordering, unsigned long, model) 667 { 668 struct pt_regs *regs = current_pt_regs(); 669 if (model >= 3) 670 return -EINVAL; 671 regs->tstate = (regs->tstate & ~TSTATE_MM) | (model << 14); 672 return 0; 673 } 674 675 SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act, 676 struct sigaction __user *, oact, void __user *, restorer, 677 size_t, sigsetsize) 678 { 679 struct k_sigaction new_ka, old_ka; 680 int ret; 681 682 /* XXX: Don't preclude handling different sized sigset_t's. */ 683 if (sigsetsize != sizeof(sigset_t)) 684 return -EINVAL; 685 686 if (act) { 687 new_ka.ka_restorer = restorer; 688 if (copy_from_user(&new_ka.sa, act, sizeof(*act))) 689 return -EFAULT; 690 } 691 692 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 693 694 if (!ret && oact) { 695 if (copy_to_user(oact, &old_ka.sa, sizeof(*oact))) 696 return -EFAULT; 697 } 698 699 return ret; 700 } 701 702 SYSCALL_DEFINE0(kern_features) 703 { 704 return KERN_FEATURE_MIXED_MODE_STACK; 705 } 706