1 /*- 2 * Copyright (c) 2000 Marcel Moolenaar 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer 10 * in this position and unchanged. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/lock.h> 34 #include <sys/mman.h> 35 #include <sys/mutex.h> 36 #include <sys/proc.h> 37 #include <sys/resource.h> 38 #include <sys/resourcevar.h> 39 #include <sys/stdint.h> 40 #include <sys/syscallsubr.h> 41 #include <sys/sysproto.h> 42 #include <sys/unistd.h> 43 44 #include <machine/frame.h> 45 #include <machine/psl.h> 46 #include <machine/segments.h> 47 #include <machine/sysarch.h> 48 49 #include <vm/vm.h> 50 #include <vm/pmap.h> 51 #include <vm/vm_map.h> 52 53 #include <i386/linux/linux.h> 54 #include <i386/linux/linux_proto.h> 55 #include <compat/linux/linux_ipc.h> 56 #include <compat/linux/linux_signal.h> 57 #include <compat/linux/linux_util.h> 58 59 struct l_descriptor { 60 l_uint entry_number; 61 l_ulong base_addr; 62 l_uint limit; 63 l_uint seg_32bit:1; 64 l_uint contents:2; 65 l_uint read_exec_only:1; 66 l_uint limit_in_pages:1; 67 l_uint seg_not_present:1; 68 l_uint useable:1; 69 }; 70 71 struct l_old_select_argv { 72 l_int nfds; 73 l_fd_set *readfds; 74 l_fd_set *writefds; 75 l_fd_set *exceptfds; 76 struct l_timeval *timeout; 77 }; 78 79 int 80 linux_to_bsd_sigaltstack(int lsa) 81 { 82 int bsa = 0; 83 84 if (lsa & LINUX_SS_DISABLE) 85 bsa |= SS_DISABLE; 86 if (lsa & LINUX_SS_ONSTACK) 87 bsa |= SS_ONSTACK; 88 return (bsa); 89 } 90 91 int 92 bsd_to_linux_sigaltstack(int bsa) 93 { 94 int lsa = 0; 95 96 if (bsa & SS_DISABLE) 97 lsa |= LINUX_SS_DISABLE; 98 if (bsa & SS_ONSTACK) 99 lsa |= LINUX_SS_ONSTACK; 100 return (lsa); 101 } 102 103 int 104 linux_execve(struct thread *td, struct linux_execve_args *args) 105 { 106 struct execve_args bsd; 107 caddr_t sg; 108 109 sg = stackgap_init(); 110 CHECKALTEXIST(td, &sg, args->path); 111 112 #ifdef DEBUG 113 if (ldebug(execve)) 114 printf(ARGS(execve, "%s"), args->path); 115 #endif 116 117 bsd.fname = args->path; 118 bsd.argv = args->argp; 119 bsd.envv = args->envp; 120 return (execve(td, &bsd)); 121 } 122 123 struct l_ipc_kludge { 124 struct l_msgbuf *msgp; 125 l_long msgtyp; 126 }; 127 128 int 129 linux_ipc(struct thread *td, struct linux_ipc_args *args) 130 { 131 132 switch (args->what & 0xFFFF) { 133 case LINUX_SEMOP: { 134 struct linux_semop_args a; 135 136 a.semid = args->arg1; 137 a.tsops = args->ptr; 138 a.nsops = args->arg2; 139 return (linux_semop(td, &a)); 140 } 141 case LINUX_SEMGET: { 142 struct linux_semget_args a; 143 144 a.key = args->arg1; 145 a.nsems = args->arg2; 146 a.semflg = args->arg3; 147 return (linux_semget(td, &a)); 148 } 149 case LINUX_SEMCTL: { 150 struct linux_semctl_args a; 151 int error; 152 153 a.semid = args->arg1; 154 a.semnum = args->arg2; 155 a.cmd = args->arg3; 156 error = copyin(args->ptr, &a.arg, sizeof(a.arg)); 157 if (error) 158 return (error); 159 return (linux_semctl(td, &a)); 160 } 161 case LINUX_MSGSND: { 162 struct linux_msgsnd_args a; 163 164 a.msqid = args->arg1; 165 a.msgp = args->ptr; 166 a.msgsz = args->arg2; 167 a.msgflg = args->arg3; 168 return (linux_msgsnd(td, &a)); 169 } 170 case LINUX_MSGRCV: { 171 struct linux_msgrcv_args a; 172 173 a.msqid = args->arg1; 174 a.msgsz = args->arg2; 175 a.msgflg = args->arg3; 176 if ((args->what >> 16) == 0) { 177 struct l_ipc_kludge tmp; 178 int error; 179 180 if (args->ptr == NULL) 181 return (EINVAL); 182 error = copyin(args->ptr, &tmp, sizeof(tmp)); 183 if (error) 184 return (error); 185 a.msgp = tmp.msgp; 186 a.msgtyp = tmp.msgtyp; 187 } else { 188 a.msgp = args->ptr; 189 a.msgtyp = args->arg5; 190 } 191 return (linux_msgrcv(td, &a)); 192 } 193 case LINUX_MSGGET: { 194 struct linux_msgget_args a; 195 196 a.key = args->arg1; 197 a.msgflg = args->arg2; 198 return (linux_msgget(td, &a)); 199 } 200 case LINUX_MSGCTL: { 201 struct linux_msgctl_args a; 202 203 a.msqid = args->arg1; 204 a.cmd = args->arg2; 205 a.buf = args->ptr; 206 return (linux_msgctl(td, &a)); 207 } 208 case LINUX_SHMAT: { 209 struct linux_shmat_args a; 210 211 a.shmid = args->arg1; 212 a.shmaddr = args->ptr; 213 a.shmflg = args->arg2; 214 a.raddr = (l_ulong *)args->arg3; 215 return (linux_shmat(td, &a)); 216 } 217 case LINUX_SHMDT: { 218 struct linux_shmdt_args a; 219 220 a.shmaddr = args->ptr; 221 return (linux_shmdt(td, &a)); 222 } 223 case LINUX_SHMGET: { 224 struct linux_shmget_args a; 225 226 a.key = args->arg1; 227 a.size = args->arg2; 228 a.shmflg = args->arg3; 229 return (linux_shmget(td, &a)); 230 } 231 case LINUX_SHMCTL: { 232 struct linux_shmctl_args a; 233 234 a.shmid = args->arg1; 235 a.cmd = args->arg2; 236 a.buf = args->ptr; 237 return (linux_shmctl(td, &a)); 238 } 239 default: 240 break; 241 } 242 243 return (EINVAL); 244 } 245 246 int 247 linux_old_select(struct thread *td, struct linux_old_select_args *args) 248 { 249 struct l_old_select_argv linux_args; 250 struct linux_select_args newsel; 251 int error; 252 253 #ifdef DEBUG 254 if (ldebug(old_select)) 255 printf(ARGS(old_select, "%p"), args->ptr); 256 #endif 257 258 error = copyin(args->ptr, &linux_args, sizeof(linux_args)); 259 if (error) 260 return (error); 261 262 newsel.nfds = linux_args.nfds; 263 newsel.readfds = linux_args.readfds; 264 newsel.writefds = linux_args.writefds; 265 newsel.exceptfds = linux_args.exceptfds; 266 newsel.timeout = linux_args.timeout; 267 return (linux_select(td, &newsel)); 268 } 269 270 int 271 linux_fork(struct thread *td, struct linux_fork_args *args) 272 { 273 int error; 274 275 #ifdef DEBUG 276 if (ldebug(fork)) 277 printf(ARGS(fork, "")); 278 #endif 279 280 if ((error = fork(td, (struct fork_args *)args)) != 0) 281 return (error); 282 283 if (td->td_retval[1] == 1) 284 td->td_retval[0] = 0; 285 return (0); 286 } 287 288 int 289 linux_vfork(struct thread *td, struct linux_vfork_args *args) 290 { 291 int error; 292 293 #ifdef DEBUG 294 if (ldebug(vfork)) 295 printf(ARGS(vfork, "")); 296 #endif 297 298 if ((error = vfork(td, (struct vfork_args *)args)) != 0) 299 return (error); 300 /* Are we the child? */ 301 if (td->td_retval[1] == 1) 302 td->td_retval[0] = 0; 303 return (0); 304 } 305 306 #define CLONE_VM 0x100 307 #define CLONE_FS 0x200 308 #define CLONE_FILES 0x400 309 #define CLONE_SIGHAND 0x800 310 #define CLONE_PID 0x1000 311 312 int 313 linux_clone(struct thread *td, struct linux_clone_args *args) 314 { 315 int error, ff = RFPROC | RFSTOPPED; 316 struct proc *p2; 317 int exit_signal; 318 319 #ifdef DEBUG 320 if (ldebug(clone)) { 321 printf(ARGS(clone, "flags %x, stack %x"), 322 (unsigned int)args->flags, (unsigned int)args->stack); 323 if (args->flags & CLONE_PID) 324 printf(LMSG("CLONE_PID not yet supported")); 325 } 326 #endif 327 328 if (!args->stack) 329 return (EINVAL); 330 331 exit_signal = args->flags & 0x000000ff; 332 if (exit_signal >= LINUX_NSIG) 333 return (EINVAL); 334 335 if (exit_signal <= LINUX_SIGTBLSZ) 336 exit_signal = linux_to_bsd_signal[_SIG_IDX(exit_signal)]; 337 338 if (args->flags & CLONE_VM) 339 ff |= RFMEM; 340 if (args->flags & CLONE_SIGHAND) 341 ff |= RFSIGSHARE; 342 if (!(args->flags & CLONE_FILES)) 343 ff |= RFFDG; 344 345 mtx_lock(&Giant); 346 error = fork1(td, ff, 0, &p2); 347 if (error == 0) { 348 td->td_retval[0] = p2->p_pid; 349 td->td_retval[1] = 0; 350 351 PROC_LOCK(p2); 352 p2->p_sigparent = exit_signal; 353 FIRST_THREAD_IN_PROC(p2)->td_frame->tf_esp = 354 (unsigned int)args->stack; 355 356 #ifdef DEBUG 357 if (ldebug(clone)) 358 printf(LMSG("clone: successful rfork to %ld"), 359 (long)p2->p_pid); 360 #endif 361 362 /* 363 * Make this runnable after we are finished with it. 364 */ 365 mtx_lock_spin(&sched_lock); 366 TD_SET_CAN_RUN(FIRST_THREAD_IN_PROC(p2)); 367 setrunqueue(FIRST_THREAD_IN_PROC(p2)); 368 mtx_unlock_spin(&sched_lock); 369 PROC_UNLOCK(p2); 370 } 371 mtx_unlock(&Giant); 372 373 return (error); 374 } 375 376 /* XXX move */ 377 struct l_mmap_argv { 378 l_caddr_t addr; 379 l_int len; 380 l_int prot; 381 l_int flags; 382 l_int fd; 383 l_int pos; 384 }; 385 386 #define STACK_SIZE (2 * 1024 * 1024) 387 #define GUARD_SIZE (4 * PAGE_SIZE) 388 389 static int linux_mmap_common(struct thread *, struct l_mmap_argv *); 390 391 int 392 linux_mmap2(struct thread *td, struct linux_mmap2_args *args) 393 { 394 struct l_mmap_argv linux_args; 395 396 #ifdef DEBUG 397 if (ldebug(mmap2)) 398 printf(ARGS(mmap2, "%p, %d, %d, 0x%08x, %d, %d"), 399 (void *)args->addr, args->len, args->prot, 400 args->flags, args->fd, args->pgoff); 401 #endif 402 403 linux_args.addr = (l_caddr_t)args->addr; 404 linux_args.len = args->len; 405 linux_args.prot = args->prot; 406 linux_args.flags = args->flags; 407 linux_args.fd = args->fd; 408 linux_args.pos = args->pgoff * PAGE_SIZE; 409 410 return (linux_mmap_common(td, &linux_args)); 411 } 412 413 int 414 linux_mmap(struct thread *td, struct linux_mmap_args *args) 415 { 416 int error; 417 struct l_mmap_argv linux_args; 418 419 error = copyin(args->ptr, &linux_args, sizeof(linux_args)); 420 if (error) 421 return (error); 422 423 #ifdef DEBUG 424 if (ldebug(mmap)) 425 printf(ARGS(mmap, "%p, %d, %d, 0x%08x, %d, %d"), 426 (void *)linux_args.addr, linux_args.len, linux_args.prot, 427 linux_args.flags, linux_args.fd, linux_args.pos); 428 #endif 429 430 return (linux_mmap_common(td, &linux_args)); 431 } 432 433 static int 434 linux_mmap_common(struct thread *td, struct l_mmap_argv *linux_args) 435 { 436 struct proc *p = td->td_proc; 437 struct mmap_args /* { 438 caddr_t addr; 439 size_t len; 440 int prot; 441 int flags; 442 int fd; 443 long pad; 444 off_t pos; 445 } */ bsd_args; 446 447 bsd_args.flags = 0; 448 if (linux_args->flags & LINUX_MAP_SHARED) 449 bsd_args.flags |= MAP_SHARED; 450 if (linux_args->flags & LINUX_MAP_PRIVATE) 451 bsd_args.flags |= MAP_PRIVATE; 452 if (linux_args->flags & LINUX_MAP_FIXED) 453 bsd_args.flags |= MAP_FIXED; 454 if (linux_args->flags & LINUX_MAP_ANON) 455 bsd_args.flags |= MAP_ANON; 456 else 457 bsd_args.flags |= MAP_NOSYNC; 458 if (linux_args->flags & LINUX_MAP_GROWSDOWN) { 459 bsd_args.flags |= MAP_STACK; 460 461 /* The linux MAP_GROWSDOWN option does not limit auto 462 * growth of the region. Linux mmap with this option 463 * takes as addr the inital BOS, and as len, the initial 464 * region size. It can then grow down from addr without 465 * limit. However, linux threads has an implicit internal 466 * limit to stack size of STACK_SIZE. Its just not 467 * enforced explicitly in linux. But, here we impose 468 * a limit of (STACK_SIZE - GUARD_SIZE) on the stack 469 * region, since we can do this with our mmap. 470 * 471 * Our mmap with MAP_STACK takes addr as the maximum 472 * downsize limit on BOS, and as len the max size of 473 * the region. It them maps the top SGROWSIZ bytes, 474 * and autgrows the region down, up to the limit 475 * in addr. 476 * 477 * If we don't use the MAP_STACK option, the effect 478 * of this code is to allocate a stack region of a 479 * fixed size of (STACK_SIZE - GUARD_SIZE). 480 */ 481 482 /* This gives us TOS */ 483 bsd_args.addr = linux_args->addr + linux_args->len; 484 485 if (bsd_args.addr > p->p_vmspace->vm_maxsaddr) { 486 /* Some linux apps will attempt to mmap 487 * thread stacks near the top of their 488 * address space. If their TOS is greater 489 * than vm_maxsaddr, vm_map_growstack() 490 * will confuse the thread stack with the 491 * process stack and deliver a SEGV if they 492 * attempt to grow the thread stack past their 493 * current stacksize rlimit. To avoid this, 494 * adjust vm_maxsaddr upwards to reflect 495 * the current stacksize rlimit rather 496 * than the maximum possible stacksize. 497 * It would be better to adjust the 498 * mmap'ed region, but some apps do not check 499 * mmap's return value. 500 */ 501 mtx_assert(&Giant, MA_OWNED); 502 p->p_vmspace->vm_maxsaddr = (char *)USRSTACK - 503 p->p_rlimit[RLIMIT_STACK].rlim_cur; 504 } 505 506 /* This gives us our maximum stack size */ 507 if (linux_args->len > STACK_SIZE - GUARD_SIZE) 508 bsd_args.len = linux_args->len; 509 else 510 bsd_args.len = STACK_SIZE - GUARD_SIZE; 511 512 /* This gives us a new BOS. If we're using VM_STACK, then 513 * mmap will just map the top SGROWSIZ bytes, and let 514 * the stack grow down to the limit at BOS. If we're 515 * not using VM_STACK we map the full stack, since we 516 * don't have a way to autogrow it. 517 */ 518 bsd_args.addr -= bsd_args.len; 519 } else { 520 bsd_args.addr = linux_args->addr; 521 bsd_args.len = linux_args->len; 522 } 523 524 bsd_args.prot = linux_args->prot | PROT_READ; /* always required */ 525 if (linux_args->flags & LINUX_MAP_ANON) 526 bsd_args.fd = -1; 527 else 528 bsd_args.fd = linux_args->fd; 529 bsd_args.pos = linux_args->pos; 530 bsd_args.pad = 0; 531 532 #ifdef DEBUG 533 if (ldebug(mmap)) 534 printf("-> (%p, %d, %d, 0x%08x, %d, %d)\n", 535 (void *)bsd_args.addr, bsd_args.len, bsd_args.prot, 536 bsd_args.flags, bsd_args.fd, (int)bsd_args.pos); 537 #endif 538 539 return (mmap(td, &bsd_args)); 540 } 541 542 int 543 linux_pipe(struct thread *td, struct linux_pipe_args *args) 544 { 545 int error; 546 int reg_edx; 547 548 #ifdef DEBUG 549 if (ldebug(pipe)) 550 printf(ARGS(pipe, "*")); 551 #endif 552 553 reg_edx = td->td_retval[1]; 554 error = pipe(td, 0); 555 if (error) { 556 td->td_retval[1] = reg_edx; 557 return (error); 558 } 559 560 error = copyout(td->td_retval, args->pipefds, 2*sizeof(int)); 561 if (error) { 562 td->td_retval[1] = reg_edx; 563 return (error); 564 } 565 566 td->td_retval[1] = reg_edx; 567 td->td_retval[0] = 0; 568 return (0); 569 } 570 571 int 572 linux_ioperm(struct thread *td, struct linux_ioperm_args *args) 573 { 574 struct sysarch_args sa; 575 struct i386_ioperm_args *iia; 576 caddr_t sg; 577 578 sg = stackgap_init(); 579 iia = stackgap_alloc(&sg, sizeof(struct i386_ioperm_args)); 580 iia->start = args->start; 581 iia->length = args->length; 582 iia->enable = args->enable; 583 sa.op = I386_SET_IOPERM; 584 sa.parms = (char *)iia; 585 return (sysarch(td, &sa)); 586 } 587 588 int 589 linux_iopl(struct thread *td, struct linux_iopl_args *args) 590 { 591 int error; 592 593 if (args->level < 0 || args->level > 3) 594 return (EINVAL); 595 if ((error = suser(td)) != 0) 596 return (error); 597 if ((error = securelevel_gt(td->td_ucred, 0)) != 0) 598 return (error); 599 td->td_frame->tf_eflags = (td->td_frame->tf_eflags & ~PSL_IOPL) | 600 (args->level * (PSL_IOPL / 3)); 601 return (0); 602 } 603 604 int 605 linux_modify_ldt(struct thread *td, struct linux_modify_ldt_args *uap) 606 { 607 int error; 608 caddr_t sg; 609 struct sysarch_args args; 610 struct i386_ldt_args *ldt; 611 struct l_descriptor ld; 612 union descriptor *desc; 613 614 sg = stackgap_init(); 615 616 if (uap->ptr == NULL) 617 return (EINVAL); 618 619 switch (uap->func) { 620 case 0x00: /* read_ldt */ 621 ldt = stackgap_alloc(&sg, sizeof(*ldt)); 622 ldt->start = 0; 623 ldt->descs = uap->ptr; 624 ldt->num = uap->bytecount / sizeof(union descriptor); 625 args.op = I386_GET_LDT; 626 args.parms = (char*)ldt; 627 error = sysarch(td, &args); 628 td->td_retval[0] *= sizeof(union descriptor); 629 break; 630 case 0x01: /* write_ldt */ 631 case 0x11: /* write_ldt */ 632 if (uap->bytecount != sizeof(ld)) 633 return (EINVAL); 634 635 error = copyin(uap->ptr, &ld, sizeof(ld)); 636 if (error) 637 return (error); 638 639 ldt = stackgap_alloc(&sg, sizeof(*ldt)); 640 desc = stackgap_alloc(&sg, sizeof(*desc)); 641 ldt->start = ld.entry_number; 642 ldt->descs = desc; 643 ldt->num = 1; 644 desc->sd.sd_lolimit = (ld.limit & 0x0000ffff); 645 desc->sd.sd_hilimit = (ld.limit & 0x000f0000) >> 16; 646 desc->sd.sd_lobase = (ld.base_addr & 0x00ffffff); 647 desc->sd.sd_hibase = (ld.base_addr & 0xff000000) >> 24; 648 desc->sd.sd_type = SDT_MEMRO | ((ld.read_exec_only ^ 1) << 1) | 649 (ld.contents << 2); 650 desc->sd.sd_dpl = 3; 651 desc->sd.sd_p = (ld.seg_not_present ^ 1); 652 desc->sd.sd_xx = 0; 653 desc->sd.sd_def32 = ld.seg_32bit; 654 desc->sd.sd_gran = ld.limit_in_pages; 655 args.op = I386_SET_LDT; 656 args.parms = (char*)ldt; 657 error = sysarch(td, &args); 658 break; 659 default: 660 error = EINVAL; 661 break; 662 } 663 664 if (error == EOPNOTSUPP) { 665 printf("linux: modify_ldt needs kernel option USER_LDT\n"); 666 error = ENOSYS; 667 } 668 669 return (error); 670 } 671 672 int 673 linux_sigaction(struct thread *td, struct linux_sigaction_args *args) 674 { 675 l_osigaction_t osa; 676 l_sigaction_t act, oact; 677 int error; 678 679 #ifdef DEBUG 680 if (ldebug(sigaction)) 681 printf(ARGS(sigaction, "%d, %p, %p"), 682 args->sig, (void *)args->nsa, (void *)args->osa); 683 #endif 684 685 if (args->nsa != NULL) { 686 error = copyin(args->nsa, &osa, sizeof(l_osigaction_t)); 687 if (error) 688 return (error); 689 act.lsa_handler = osa.lsa_handler; 690 act.lsa_flags = osa.lsa_flags; 691 act.lsa_restorer = osa.lsa_restorer; 692 LINUX_SIGEMPTYSET(act.lsa_mask); 693 act.lsa_mask.__bits[0] = osa.lsa_mask; 694 } 695 696 error = linux_do_sigaction(td, args->sig, args->nsa ? &act : NULL, 697 args->osa ? &oact : NULL); 698 699 if (args->osa != NULL && !error) { 700 osa.lsa_handler = oact.lsa_handler; 701 osa.lsa_flags = oact.lsa_flags; 702 osa.lsa_restorer = oact.lsa_restorer; 703 osa.lsa_mask = oact.lsa_mask.__bits[0]; 704 error = copyout(&osa, args->osa, sizeof(l_osigaction_t)); 705 } 706 707 return (error); 708 } 709 710 /* 711 * Linux has two extra args, restart and oldmask. We dont use these, 712 * but it seems that "restart" is actually a context pointer that 713 * enables the signal to happen with a different register set. 714 */ 715 int 716 linux_sigsuspend(struct thread *td, struct linux_sigsuspend_args *args) 717 { 718 sigset_t sigmask; 719 l_sigset_t mask; 720 721 #ifdef DEBUG 722 if (ldebug(sigsuspend)) 723 printf(ARGS(sigsuspend, "%08lx"), (unsigned long)args->mask); 724 #endif 725 726 LINUX_SIGEMPTYSET(mask); 727 mask.__bits[0] = args->mask; 728 linux_to_bsd_sigset(&mask, &sigmask); 729 return (kern_sigsuspend(td, sigmask)); 730 } 731 732 int 733 linux_rt_sigsuspend(struct thread *td, struct linux_rt_sigsuspend_args *uap) 734 { 735 l_sigset_t lmask; 736 sigset_t sigmask; 737 int error; 738 739 #ifdef DEBUG 740 if (ldebug(rt_sigsuspend)) 741 printf(ARGS(rt_sigsuspend, "%p, %d"), 742 (void *)uap->newset, uap->sigsetsize); 743 #endif 744 745 if (uap->sigsetsize != sizeof(l_sigset_t)) 746 return (EINVAL); 747 748 error = copyin(uap->newset, &lmask, sizeof(l_sigset_t)); 749 if (error) 750 return (error); 751 752 linux_to_bsd_sigset(&lmask, &sigmask); 753 return (kern_sigsuspend(td, sigmask)); 754 } 755 756 int 757 linux_pause(struct thread *td, struct linux_pause_args *args) 758 { 759 struct proc *p = td->td_proc; 760 sigset_t sigmask; 761 762 #ifdef DEBUG 763 if (ldebug(pause)) 764 printf(ARGS(pause, "")); 765 #endif 766 767 PROC_LOCK(p); 768 sigmask = p->p_sigmask; 769 PROC_UNLOCK(p); 770 return (kern_sigsuspend(td, sigmask)); 771 } 772 773 int 774 linux_sigaltstack(struct thread *td, struct linux_sigaltstack_args *uap) 775 { 776 stack_t ss, oss; 777 l_stack_t lss; 778 int error; 779 780 #ifdef DEBUG 781 if (ldebug(sigaltstack)) 782 printf(ARGS(sigaltstack, "%p, %p"), uap->uss, uap->uoss); 783 #endif 784 785 if (uap->uss != NULL) { 786 error = copyin(uap->uss, &lss, sizeof(l_stack_t)); 787 if (error) 788 return (error); 789 790 ss.ss_sp = lss.ss_sp; 791 ss.ss_size = lss.ss_size; 792 ss.ss_flags = linux_to_bsd_sigaltstack(lss.ss_flags); 793 } 794 error = kern_sigaltstack(td, (uap->uoss != NULL) ? &oss : NULL, 795 (uap->uss != NULL) ? &ss : NULL); 796 if (!error && uap->uoss != NULL) { 797 lss.ss_sp = oss.ss_sp; 798 lss.ss_size = oss.ss_size; 799 lss.ss_flags = bsd_to_linux_sigaltstack(oss.ss_flags); 800 error = copyout(&lss, uap->uoss, sizeof(l_stack_t)); 801 } 802 803 return (error); 804 } 805 806 int 807 linux_ftruncate64(struct thread *td, struct linux_ftruncate64_args *args) 808 { 809 struct ftruncate_args sa; 810 811 #ifdef DEBUG 812 if (ldebug(ftruncate64)) 813 printf(ARGS(ftruncate64, "%u, %jd"), args->fd, 814 (intmax_t)args->length); 815 #endif 816 817 sa.fd = args->fd; 818 sa.pad = 0; 819 sa.length = args->length; 820 return ftruncate(td, &sa); 821 } 822