1 /*- 2 * Copyright (c) 2000 Marcel Moolenaar 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer 10 * in this position and unchanged. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/file.h> 35 #include <sys/fcntl.h> 36 #include <sys/imgact.h> 37 #include <sys/lock.h> 38 #include <sys/malloc.h> 39 #include <sys/mman.h> 40 #include <sys/mutex.h> 41 #include <sys/sx.h> 42 #include <sys/priv.h> 43 #include <sys/proc.h> 44 #include <sys/queue.h> 45 #include <sys/resource.h> 46 #include <sys/resourcevar.h> 47 #include <sys/signalvar.h> 48 #include <sys/syscallsubr.h> 49 #include <sys/sysproto.h> 50 #include <sys/unistd.h> 51 #include <sys/wait.h> 52 #include <sys/sched.h> 53 54 #include <machine/frame.h> 55 #include <machine/psl.h> 56 #include <machine/segments.h> 57 #include <machine/sysarch.h> 58 59 #include <vm/vm.h> 60 #include <vm/pmap.h> 61 #include <vm/vm_map.h> 62 63 #include <i386/linux/linux.h> 64 #include <i386/linux/linux_proto.h> 65 #include <compat/linux/linux_ipc.h> 66 #include <compat/linux/linux_signal.h> 67 #include <compat/linux/linux_util.h> 68 #include <compat/linux/linux_emul.h> 69 70 #include <i386/include/pcb.h> /* needed for pcb definition in linux_set_thread_area */ 71 72 #include "opt_posix.h" 73 74 extern struct sysentvec elf32_freebsd_sysvec; /* defined in i386/i386/elf_machdep.c */ 75 76 struct l_descriptor { 77 l_uint entry_number; 78 l_ulong base_addr; 79 l_uint limit; 80 l_uint seg_32bit:1; 81 l_uint contents:2; 82 l_uint read_exec_only:1; 83 l_uint limit_in_pages:1; 84 l_uint seg_not_present:1; 85 l_uint useable:1; 86 }; 87 88 struct l_old_select_argv { 89 l_int nfds; 90 l_fd_set *readfds; 91 l_fd_set *writefds; 92 l_fd_set *exceptfds; 93 struct l_timeval *timeout; 94 }; 95 96 int 97 linux_to_bsd_sigaltstack(int lsa) 98 { 99 int bsa = 0; 100 101 if (lsa & LINUX_SS_DISABLE) 102 bsa |= SS_DISABLE; 103 if (lsa & LINUX_SS_ONSTACK) 104 bsa |= SS_ONSTACK; 105 return (bsa); 106 } 107 108 int 109 bsd_to_linux_sigaltstack(int bsa) 110 { 111 int lsa = 0; 112 113 if (bsa & SS_DISABLE) 114 lsa |= LINUX_SS_DISABLE; 115 if (bsa & SS_ONSTACK) 116 lsa |= LINUX_SS_ONSTACK; 117 return (lsa); 118 } 119 120 int 121 linux_execve(struct thread *td, struct linux_execve_args *args) 122 { 123 int error; 124 char *newpath; 125 struct image_args eargs; 126 127 LCONVPATHEXIST(td, args->path, &newpath); 128 129 #ifdef DEBUG 130 if (ldebug(execve)) 131 printf(ARGS(execve, "%s"), newpath); 132 #endif 133 134 error = exec_copyin_args(&eargs, newpath, UIO_SYSSPACE, 135 args->argp, args->envp); 136 free(newpath, M_TEMP); 137 if (error == 0) 138 error = kern_execve(td, &eargs, NULL); 139 if (error == 0) 140 /* linux process can exec fbsd one, dont attempt 141 * to create emuldata for such process using 142 * linux_proc_init, this leads to a panic on KASSERT 143 * because such process has p->p_emuldata == NULL 144 */ 145 if (td->td_proc->p_sysent == &elf_linux_sysvec) 146 error = linux_proc_init(td, 0, 0); 147 return (error); 148 } 149 150 struct l_ipc_kludge { 151 struct l_msgbuf *msgp; 152 l_long msgtyp; 153 }; 154 155 int 156 linux_ipc(struct thread *td, struct linux_ipc_args *args) 157 { 158 159 switch (args->what & 0xFFFF) { 160 case LINUX_SEMOP: { 161 struct linux_semop_args a; 162 163 a.semid = args->arg1; 164 a.tsops = args->ptr; 165 a.nsops = args->arg2; 166 return (linux_semop(td, &a)); 167 } 168 case LINUX_SEMGET: { 169 struct linux_semget_args a; 170 171 a.key = args->arg1; 172 a.nsems = args->arg2; 173 a.semflg = args->arg3; 174 return (linux_semget(td, &a)); 175 } 176 case LINUX_SEMCTL: { 177 struct linux_semctl_args a; 178 int error; 179 180 a.semid = args->arg1; 181 a.semnum = args->arg2; 182 a.cmd = args->arg3; 183 error = copyin(args->ptr, &a.arg, sizeof(a.arg)); 184 if (error) 185 return (error); 186 return (linux_semctl(td, &a)); 187 } 188 case LINUX_MSGSND: { 189 struct linux_msgsnd_args a; 190 191 a.msqid = args->arg1; 192 a.msgp = args->ptr; 193 a.msgsz = args->arg2; 194 a.msgflg = args->arg3; 195 return (linux_msgsnd(td, &a)); 196 } 197 case LINUX_MSGRCV: { 198 struct linux_msgrcv_args a; 199 200 a.msqid = args->arg1; 201 a.msgsz = args->arg2; 202 a.msgflg = args->arg3; 203 if ((args->what >> 16) == 0) { 204 struct l_ipc_kludge tmp; 205 int error; 206 207 if (args->ptr == NULL) 208 return (EINVAL); 209 error = copyin(args->ptr, &tmp, sizeof(tmp)); 210 if (error) 211 return (error); 212 a.msgp = tmp.msgp; 213 a.msgtyp = tmp.msgtyp; 214 } else { 215 a.msgp = args->ptr; 216 a.msgtyp = args->arg5; 217 } 218 return (linux_msgrcv(td, &a)); 219 } 220 case LINUX_MSGGET: { 221 struct linux_msgget_args a; 222 223 a.key = args->arg1; 224 a.msgflg = args->arg2; 225 return (linux_msgget(td, &a)); 226 } 227 case LINUX_MSGCTL: { 228 struct linux_msgctl_args a; 229 230 a.msqid = args->arg1; 231 a.cmd = args->arg2; 232 a.buf = args->ptr; 233 return (linux_msgctl(td, &a)); 234 } 235 case LINUX_SHMAT: { 236 struct linux_shmat_args a; 237 238 a.shmid = args->arg1; 239 a.shmaddr = args->ptr; 240 a.shmflg = args->arg2; 241 a.raddr = (l_ulong *)args->arg3; 242 return (linux_shmat(td, &a)); 243 } 244 case LINUX_SHMDT: { 245 struct linux_shmdt_args a; 246 247 a.shmaddr = args->ptr; 248 return (linux_shmdt(td, &a)); 249 } 250 case LINUX_SHMGET: { 251 struct linux_shmget_args a; 252 253 a.key = args->arg1; 254 a.size = args->arg2; 255 a.shmflg = args->arg3; 256 return (linux_shmget(td, &a)); 257 } 258 case LINUX_SHMCTL: { 259 struct linux_shmctl_args a; 260 261 a.shmid = args->arg1; 262 a.cmd = args->arg2; 263 a.buf = args->ptr; 264 return (linux_shmctl(td, &a)); 265 } 266 default: 267 break; 268 } 269 270 return (EINVAL); 271 } 272 273 int 274 linux_old_select(struct thread *td, struct linux_old_select_args *args) 275 { 276 struct l_old_select_argv linux_args; 277 struct linux_select_args newsel; 278 int error; 279 280 #ifdef DEBUG 281 if (ldebug(old_select)) 282 printf(ARGS(old_select, "%p"), args->ptr); 283 #endif 284 285 error = copyin(args->ptr, &linux_args, sizeof(linux_args)); 286 if (error) 287 return (error); 288 289 newsel.nfds = linux_args.nfds; 290 newsel.readfds = linux_args.readfds; 291 newsel.writefds = linux_args.writefds; 292 newsel.exceptfds = linux_args.exceptfds; 293 newsel.timeout = linux_args.timeout; 294 return (linux_select(td, &newsel)); 295 } 296 297 int 298 linux_fork(struct thread *td, struct linux_fork_args *args) 299 { 300 int error; 301 struct proc *p2; 302 struct thread *td2; 303 304 #ifdef DEBUG 305 if (ldebug(fork)) 306 printf(ARGS(fork, "")); 307 #endif 308 309 if ((error = fork1(td, RFFDG | RFPROC | RFSTOPPED, 0, &p2)) != 0) 310 return (error); 311 312 if (error == 0) { 313 td->td_retval[0] = p2->p_pid; 314 td->td_retval[1] = 0; 315 } 316 317 if (td->td_retval[1] == 1) 318 td->td_retval[0] = 0; 319 error = linux_proc_init(td, td->td_retval[0], 0); 320 if (error) 321 return (error); 322 323 td2 = FIRST_THREAD_IN_PROC(p2); 324 325 /* 326 * Make this runnable after we are finished with it. 327 */ 328 thread_lock(td2); 329 TD_SET_CAN_RUN(td2); 330 sched_add(td2, SRQ_BORING); 331 thread_unlock(td2); 332 333 return (0); 334 } 335 336 int 337 linux_vfork(struct thread *td, struct linux_vfork_args *args) 338 { 339 int error; 340 struct proc *p2; 341 struct thread *td2; 342 343 #ifdef DEBUG 344 if (ldebug(vfork)) 345 printf(ARGS(vfork, "")); 346 #endif 347 348 /* exclude RFPPWAIT */ 349 if ((error = fork1(td, RFFDG | RFPROC | RFMEM | RFSTOPPED, 0, &p2)) != 0) 350 return (error); 351 if (error == 0) { 352 td->td_retval[0] = p2->p_pid; 353 td->td_retval[1] = 0; 354 } 355 /* Are we the child? */ 356 if (td->td_retval[1] == 1) 357 td->td_retval[0] = 0; 358 error = linux_proc_init(td, td->td_retval[0], 0); 359 if (error) 360 return (error); 361 362 PROC_LOCK(p2); 363 p2->p_flag |= P_PPWAIT; 364 PROC_UNLOCK(p2); 365 366 td2 = FIRST_THREAD_IN_PROC(p2); 367 368 /* 369 * Make this runnable after we are finished with it. 370 */ 371 thread_lock(td2); 372 TD_SET_CAN_RUN(td2); 373 sched_add(td2, SRQ_BORING); 374 thread_unlock(td2); 375 376 /* wait for the children to exit, ie. emulate vfork */ 377 PROC_LOCK(p2); 378 while (p2->p_flag & P_PPWAIT) 379 msleep(td->td_proc, &p2->p_mtx, PWAIT, "ppwait", 0); 380 PROC_UNLOCK(p2); 381 382 return (0); 383 } 384 385 int 386 linux_clone(struct thread *td, struct linux_clone_args *args) 387 { 388 int error, ff = RFPROC | RFSTOPPED; 389 struct proc *p2; 390 struct thread *td2; 391 int exit_signal; 392 struct linux_emuldata *em; 393 394 #ifdef DEBUG 395 if (ldebug(clone)) { 396 printf(ARGS(clone, "flags %x, stack %x, parent tid: %x, child tid: %x"), 397 (unsigned int)args->flags, (unsigned int)args->stack, 398 (unsigned int)args->parent_tidptr, (unsigned int)args->child_tidptr); 399 } 400 #endif 401 402 exit_signal = args->flags & 0x000000ff; 403 if (LINUX_SIG_VALID(exit_signal)) { 404 if (exit_signal <= LINUX_SIGTBLSZ) 405 exit_signal = 406 linux_to_bsd_signal[_SIG_IDX(exit_signal)]; 407 } else if (exit_signal != 0) 408 return (EINVAL); 409 410 if (args->flags & LINUX_CLONE_VM) 411 ff |= RFMEM; 412 if (args->flags & LINUX_CLONE_SIGHAND) 413 ff |= RFSIGSHARE; 414 /* 415 * XXX: in linux sharing of fs info (chroot/cwd/umask) 416 * and open files is independant. in fbsd its in one 417 * structure but in reality it doesn't cause any problems 418 * because both of these flags are usually set together. 419 */ 420 if (!(args->flags & (LINUX_CLONE_FILES | LINUX_CLONE_FS))) 421 ff |= RFFDG; 422 423 /* 424 * Attempt to detect when linux_clone(2) is used for creating 425 * kernel threads. Unfortunately despite the existence of the 426 * CLONE_THREAD flag, version of linuxthreads package used in 427 * most popular distros as of beginning of 2005 doesn't make 428 * any use of it. Therefore, this detection relies on 429 * empirical observation that linuxthreads sets certain 430 * combination of flags, so that we can make more or less 431 * precise detection and notify the FreeBSD kernel that several 432 * processes are in fact part of the same threading group, so 433 * that special treatment is necessary for signal delivery 434 * between those processes and fd locking. 435 */ 436 if ((args->flags & 0xffffff00) == LINUX_THREADING_FLAGS) 437 ff |= RFTHREAD; 438 439 if (args->flags & LINUX_CLONE_PARENT_SETTID) 440 if (args->parent_tidptr == NULL) 441 return (EINVAL); 442 443 error = fork1(td, ff, 0, &p2); 444 if (error) 445 return (error); 446 447 if (args->flags & (LINUX_CLONE_PARENT | LINUX_CLONE_THREAD)) { 448 sx_xlock(&proctree_lock); 449 PROC_LOCK(p2); 450 proc_reparent(p2, td->td_proc->p_pptr); 451 PROC_UNLOCK(p2); 452 sx_xunlock(&proctree_lock); 453 } 454 455 /* create the emuldata */ 456 error = linux_proc_init(td, p2->p_pid, args->flags); 457 /* reference it - no need to check this */ 458 em = em_find(p2, EMUL_DOLOCK); 459 KASSERT(em != NULL, ("clone: emuldata not found.\n")); 460 /* and adjust it */ 461 462 if (args->flags & LINUX_CLONE_THREAD) { 463 /* XXX: linux mangles pgrp and pptr somehow 464 * I think it might be this but I am not sure. 465 */ 466 #ifdef notyet 467 PROC_LOCK(p2); 468 p2->p_pgrp = td->td_proc->p_pgrp; 469 PROC_UNLOCK(p2); 470 #endif 471 exit_signal = 0; 472 } 473 474 if (args->flags & LINUX_CLONE_CHILD_SETTID) 475 em->child_set_tid = args->child_tidptr; 476 else 477 em->child_set_tid = NULL; 478 479 if (args->flags & LINUX_CLONE_CHILD_CLEARTID) 480 em->child_clear_tid = args->child_tidptr; 481 else 482 em->child_clear_tid = NULL; 483 484 EMUL_UNLOCK(&emul_lock); 485 486 if (args->flags & LINUX_CLONE_PARENT_SETTID) { 487 error = copyout(&p2->p_pid, args->parent_tidptr, sizeof(p2->p_pid)); 488 if (error) 489 printf(LMSG("copyout failed!")); 490 } 491 492 PROC_LOCK(p2); 493 p2->p_sigparent = exit_signal; 494 PROC_UNLOCK(p2); 495 td2 = FIRST_THREAD_IN_PROC(p2); 496 /* 497 * in a case of stack = NULL we are supposed to COW calling process stack 498 * this is what normal fork() does so we just keep the tf_esp arg intact 499 */ 500 if (args->stack) 501 td2->td_frame->tf_esp = (unsigned int)args->stack; 502 503 if (args->flags & LINUX_CLONE_SETTLS) { 504 struct l_user_desc info; 505 int idx; 506 int a[2]; 507 struct segment_descriptor sd; 508 509 error = copyin((void *)td->td_frame->tf_esi, &info, sizeof(struct l_user_desc)); 510 if (error) { 511 printf(LMSG("copyin failed!")); 512 } else { 513 514 idx = info.entry_number; 515 516 /* 517 * looks like we're getting the idx we returned 518 * in the set_thread_area() syscall 519 */ 520 if (idx != 6 && idx != 3) { 521 printf(LMSG("resetting idx!")); 522 idx = 3; 523 } 524 525 /* this doesnt happen in practice */ 526 if (idx == 6) { 527 /* we might copy out the entry_number as 3 */ 528 info.entry_number = 3; 529 error = copyout(&info, (void *) td->td_frame->tf_esi, sizeof(struct l_user_desc)); 530 if (error) 531 printf(LMSG("copyout failed!")); 532 } 533 534 a[0] = LINUX_LDT_entry_a(&info); 535 a[1] = LINUX_LDT_entry_b(&info); 536 537 memcpy(&sd, &a, sizeof(a)); 538 #ifdef DEBUG 539 if (ldebug(clone)) 540 printf("Segment created in clone with CLONE_SETTLS: lobase: %x, hibase: %x, lolimit: %x, hilimit: %x, type: %i, dpl: %i, p: %i, xx: %i, def32: %i, gran: %i\n", sd.sd_lobase, 541 sd.sd_hibase, 542 sd.sd_lolimit, 543 sd.sd_hilimit, 544 sd.sd_type, 545 sd.sd_dpl, 546 sd.sd_p, 547 sd.sd_xx, 548 sd.sd_def32, 549 sd.sd_gran); 550 #endif 551 552 /* set %gs */ 553 td2->td_pcb->pcb_gsd = sd; 554 td2->td_pcb->pcb_gs = GSEL(GUGS_SEL, SEL_UPL); 555 } 556 } 557 558 #ifdef DEBUG 559 if (ldebug(clone)) 560 printf(LMSG("clone: successful rfork to %ld, stack %p sig = %d"), 561 (long)p2->p_pid, args->stack, exit_signal); 562 #endif 563 if (args->flags & LINUX_CLONE_VFORK) { 564 PROC_LOCK(p2); 565 p2->p_flag |= P_PPWAIT; 566 PROC_UNLOCK(p2); 567 } 568 569 /* 570 * Make this runnable after we are finished with it. 571 */ 572 thread_lock(td2); 573 TD_SET_CAN_RUN(td2); 574 sched_add(td2, SRQ_BORING); 575 thread_unlock(td2); 576 577 td->td_retval[0] = p2->p_pid; 578 td->td_retval[1] = 0; 579 580 if (args->flags & LINUX_CLONE_VFORK) { 581 /* wait for the children to exit, ie. emulate vfork */ 582 PROC_LOCK(p2); 583 while (p2->p_flag & P_PPWAIT) 584 msleep(td->td_proc, &p2->p_mtx, PWAIT, "ppwait", 0); 585 PROC_UNLOCK(p2); 586 } 587 588 return (0); 589 } 590 591 #define STACK_SIZE (2 * 1024 * 1024) 592 #define GUARD_SIZE (4 * PAGE_SIZE) 593 594 static int linux_mmap_common(struct thread *, struct l_mmap_argv *); 595 596 int 597 linux_mmap2(struct thread *td, struct linux_mmap2_args *args) 598 { 599 struct l_mmap_argv linux_args; 600 601 #ifdef DEBUG 602 if (ldebug(mmap2)) 603 printf(ARGS(mmap2, "%p, %d, %d, 0x%08x, %d, %d"), 604 (void *)args->addr, args->len, args->prot, 605 args->flags, args->fd, args->pgoff); 606 #endif 607 608 linux_args.addr = args->addr; 609 linux_args.len = args->len; 610 linux_args.prot = args->prot; 611 linux_args.flags = args->flags; 612 linux_args.fd = args->fd; 613 linux_args.pgoff = args->pgoff * PAGE_SIZE; 614 615 return (linux_mmap_common(td, &linux_args)); 616 } 617 618 int 619 linux_mmap(struct thread *td, struct linux_mmap_args *args) 620 { 621 int error; 622 struct l_mmap_argv linux_args; 623 624 error = copyin(args->ptr, &linux_args, sizeof(linux_args)); 625 if (error) 626 return (error); 627 628 #ifdef DEBUG 629 if (ldebug(mmap)) 630 printf(ARGS(mmap, "%p, %d, %d, 0x%08x, %d, %d"), 631 (void *)linux_args.addr, linux_args.len, linux_args.prot, 632 linux_args.flags, linux_args.fd, linux_args.pgoff); 633 #endif 634 635 return (linux_mmap_common(td, &linux_args)); 636 } 637 638 static int 639 linux_mmap_common(struct thread *td, struct l_mmap_argv *linux_args) 640 { 641 struct proc *p = td->td_proc; 642 struct mmap_args /* { 643 caddr_t addr; 644 size_t len; 645 int prot; 646 int flags; 647 int fd; 648 long pad; 649 off_t pos; 650 } */ bsd_args; 651 int error; 652 struct file *fp; 653 654 error = 0; 655 bsd_args.flags = 0; 656 fp = NULL; 657 658 /* 659 * Linux mmap(2): 660 * You must specify exactly one of MAP_SHARED and MAP_PRIVATE 661 */ 662 if (! ((linux_args->flags & LINUX_MAP_SHARED) ^ 663 (linux_args->flags & LINUX_MAP_PRIVATE))) 664 return (EINVAL); 665 666 if (linux_args->flags & LINUX_MAP_SHARED) 667 bsd_args.flags |= MAP_SHARED; 668 if (linux_args->flags & LINUX_MAP_PRIVATE) 669 bsd_args.flags |= MAP_PRIVATE; 670 if (linux_args->flags & LINUX_MAP_FIXED) 671 bsd_args.flags |= MAP_FIXED; 672 if (linux_args->flags & LINUX_MAP_ANON) 673 bsd_args.flags |= MAP_ANON; 674 else 675 bsd_args.flags |= MAP_NOSYNC; 676 if (linux_args->flags & LINUX_MAP_GROWSDOWN) 677 bsd_args.flags |= MAP_STACK; 678 679 /* 680 * PROT_READ, PROT_WRITE, or PROT_EXEC implies PROT_READ and PROT_EXEC 681 * on Linux/i386. We do this to ensure maximum compatibility. 682 * Linux/ia64 does the same in i386 emulation mode. 683 */ 684 bsd_args.prot = linux_args->prot; 685 if (bsd_args.prot & (PROT_READ | PROT_WRITE | PROT_EXEC)) 686 bsd_args.prot |= PROT_READ | PROT_EXEC; 687 688 /* Linux does not check file descriptor when MAP_ANONYMOUS is set. */ 689 bsd_args.fd = (bsd_args.flags & MAP_ANON) ? -1 : linux_args->fd; 690 if (bsd_args.fd != -1) { 691 /* 692 * Linux follows Solaris mmap(2) description: 693 * The file descriptor fildes is opened with 694 * read permission, regardless of the 695 * protection options specified. 696 */ 697 698 if ((error = fget(td, bsd_args.fd, &fp)) != 0) 699 return (error); 700 if (fp->f_type != DTYPE_VNODE) { 701 fdrop(fp, td); 702 return (EINVAL); 703 } 704 705 /* Linux mmap() just fails for O_WRONLY files */ 706 if (!(fp->f_flag & FREAD)) { 707 fdrop(fp, td); 708 return (EACCES); 709 } 710 711 fdrop(fp, td); 712 } 713 714 if (linux_args->flags & LINUX_MAP_GROWSDOWN) { 715 /* 716 * The linux MAP_GROWSDOWN option does not limit auto 717 * growth of the region. Linux mmap with this option 718 * takes as addr the inital BOS, and as len, the initial 719 * region size. It can then grow down from addr without 720 * limit. However, linux threads has an implicit internal 721 * limit to stack size of STACK_SIZE. Its just not 722 * enforced explicitly in linux. But, here we impose 723 * a limit of (STACK_SIZE - GUARD_SIZE) on the stack 724 * region, since we can do this with our mmap. 725 * 726 * Our mmap with MAP_STACK takes addr as the maximum 727 * downsize limit on BOS, and as len the max size of 728 * the region. It them maps the top SGROWSIZ bytes, 729 * and auto grows the region down, up to the limit 730 * in addr. 731 * 732 * If we don't use the MAP_STACK option, the effect 733 * of this code is to allocate a stack region of a 734 * fixed size of (STACK_SIZE - GUARD_SIZE). 735 */ 736 737 if ((caddr_t)PTRIN(linux_args->addr) + linux_args->len > 738 p->p_vmspace->vm_maxsaddr) { 739 /* 740 * Some linux apps will attempt to mmap 741 * thread stacks near the top of their 742 * address space. If their TOS is greater 743 * than vm_maxsaddr, vm_map_growstack() 744 * will confuse the thread stack with the 745 * process stack and deliver a SEGV if they 746 * attempt to grow the thread stack past their 747 * current stacksize rlimit. To avoid this, 748 * adjust vm_maxsaddr upwards to reflect 749 * the current stacksize rlimit rather 750 * than the maximum possible stacksize. 751 * It would be better to adjust the 752 * mmap'ed region, but some apps do not check 753 * mmap's return value. 754 */ 755 PROC_LOCK(p); 756 p->p_vmspace->vm_maxsaddr = (char *)USRSTACK - 757 lim_cur(p, RLIMIT_STACK); 758 PROC_UNLOCK(p); 759 } 760 761 /* 762 * This gives us our maximum stack size and a new BOS. 763 * If we're using VM_STACK, then mmap will just map 764 * the top SGROWSIZ bytes, and let the stack grow down 765 * to the limit at BOS. If we're not using VM_STACK 766 * we map the full stack, since we don't have a way 767 * to autogrow it. 768 */ 769 if (linux_args->len > STACK_SIZE - GUARD_SIZE) { 770 bsd_args.addr = (caddr_t)PTRIN(linux_args->addr); 771 bsd_args.len = linux_args->len; 772 } else { 773 bsd_args.addr = (caddr_t)PTRIN(linux_args->addr) - 774 (STACK_SIZE - GUARD_SIZE - linux_args->len); 775 bsd_args.len = STACK_SIZE - GUARD_SIZE; 776 } 777 } else { 778 bsd_args.addr = (caddr_t)PTRIN(linux_args->addr); 779 bsd_args.len = linux_args->len; 780 } 781 bsd_args.pos = linux_args->pgoff; 782 783 #ifdef DEBUG 784 if (ldebug(mmap)) 785 printf("-> %s(%p, %d, %d, 0x%08x, %d, 0x%x)\n", 786 __func__, 787 (void *)bsd_args.addr, bsd_args.len, bsd_args.prot, 788 bsd_args.flags, bsd_args.fd, (int)bsd_args.pos); 789 #endif 790 error = mmap(td, &bsd_args); 791 #ifdef DEBUG 792 if (ldebug(mmap)) 793 printf("-> %s() return: 0x%x (0x%08x)\n", 794 __func__, error, (u_int)td->td_retval[0]); 795 #endif 796 return (error); 797 } 798 799 int 800 linux_mprotect(struct thread *td, struct linux_mprotect_args *uap) 801 { 802 struct mprotect_args bsd_args; 803 804 bsd_args.addr = uap->addr; 805 bsd_args.len = uap->len; 806 bsd_args.prot = uap->prot; 807 if (bsd_args.prot & (PROT_READ | PROT_WRITE | PROT_EXEC)) 808 bsd_args.prot |= PROT_READ | PROT_EXEC; 809 return (mprotect(td, &bsd_args)); 810 } 811 812 int 813 linux_pipe(struct thread *td, struct linux_pipe_args *args) 814 { 815 int error; 816 int reg_edx; 817 818 #ifdef DEBUG 819 if (ldebug(pipe)) 820 printf(ARGS(pipe, "*")); 821 #endif 822 823 reg_edx = td->td_retval[1]; 824 error = pipe(td, 0); 825 if (error) { 826 td->td_retval[1] = reg_edx; 827 return (error); 828 } 829 830 error = copyout(td->td_retval, args->pipefds, 2*sizeof(int)); 831 if (error) { 832 td->td_retval[1] = reg_edx; 833 return (error); 834 } 835 836 td->td_retval[1] = reg_edx; 837 td->td_retval[0] = 0; 838 return (0); 839 } 840 841 int 842 linux_ioperm(struct thread *td, struct linux_ioperm_args *args) 843 { 844 int error; 845 struct i386_ioperm_args iia; 846 847 iia.start = args->start; 848 iia.length = args->length; 849 iia.enable = args->enable; 850 error = i386_set_ioperm(td, &iia); 851 return (error); 852 } 853 854 int 855 linux_iopl(struct thread *td, struct linux_iopl_args *args) 856 { 857 int error; 858 859 if (args->level < 0 || args->level > 3) 860 return (EINVAL); 861 if ((error = priv_check(td, PRIV_IO)) != 0) 862 return (error); 863 if ((error = securelevel_gt(td->td_ucred, 0)) != 0) 864 return (error); 865 td->td_frame->tf_eflags = (td->td_frame->tf_eflags & ~PSL_IOPL) | 866 (args->level * (PSL_IOPL / 3)); 867 return (0); 868 } 869 870 int 871 linux_modify_ldt(struct thread *td, struct linux_modify_ldt_args *uap) 872 { 873 int error; 874 struct i386_ldt_args ldt; 875 struct l_descriptor ld; 876 union descriptor desc; 877 int size, written; 878 879 if (uap->ptr == NULL) 880 return (EINVAL); 881 882 switch (uap->func) { 883 case 0x00: /* read_ldt */ 884 ldt.start = 0; 885 ldt.descs = uap->ptr; 886 ldt.num = uap->bytecount / sizeof(union descriptor); 887 error = i386_get_ldt(td, &ldt); 888 td->td_retval[0] *= sizeof(union descriptor); 889 break; 890 case 0x02: /* read_default_ldt = 0 */ 891 size = 5*sizeof(struct l_desc_struct); 892 if (size > uap->bytecount) 893 size = uap->bytecount; 894 for (written = error = 0; written < size && error == 0; written++) 895 error = subyte((char *)uap->ptr + written, 0); 896 td->td_retval[0] = written; 897 break; 898 case 0x01: /* write_ldt */ 899 case 0x11: /* write_ldt */ 900 if (uap->bytecount != sizeof(ld)) 901 return (EINVAL); 902 903 error = copyin(uap->ptr, &ld, sizeof(ld)); 904 if (error) 905 return (error); 906 907 ldt.start = ld.entry_number; 908 ldt.descs = &desc; 909 ldt.num = 1; 910 desc.sd.sd_lolimit = (ld.limit & 0x0000ffff); 911 desc.sd.sd_hilimit = (ld.limit & 0x000f0000) >> 16; 912 desc.sd.sd_lobase = (ld.base_addr & 0x00ffffff); 913 desc.sd.sd_hibase = (ld.base_addr & 0xff000000) >> 24; 914 desc.sd.sd_type = SDT_MEMRO | ((ld.read_exec_only ^ 1) << 1) | 915 (ld.contents << 2); 916 desc.sd.sd_dpl = 3; 917 desc.sd.sd_p = (ld.seg_not_present ^ 1); 918 desc.sd.sd_xx = 0; 919 desc.sd.sd_def32 = ld.seg_32bit; 920 desc.sd.sd_gran = ld.limit_in_pages; 921 error = i386_set_ldt(td, &ldt, &desc); 922 break; 923 default: 924 error = EINVAL; 925 break; 926 } 927 928 if (error == EOPNOTSUPP) { 929 printf("linux: modify_ldt needs kernel option USER_LDT\n"); 930 error = ENOSYS; 931 } 932 933 return (error); 934 } 935 936 int 937 linux_sigaction(struct thread *td, struct linux_sigaction_args *args) 938 { 939 l_osigaction_t osa; 940 l_sigaction_t act, oact; 941 int error; 942 943 #ifdef DEBUG 944 if (ldebug(sigaction)) 945 printf(ARGS(sigaction, "%d, %p, %p"), 946 args->sig, (void *)args->nsa, (void *)args->osa); 947 #endif 948 949 if (args->nsa != NULL) { 950 error = copyin(args->nsa, &osa, sizeof(l_osigaction_t)); 951 if (error) 952 return (error); 953 act.lsa_handler = osa.lsa_handler; 954 act.lsa_flags = osa.lsa_flags; 955 act.lsa_restorer = osa.lsa_restorer; 956 LINUX_SIGEMPTYSET(act.lsa_mask); 957 act.lsa_mask.__bits[0] = osa.lsa_mask; 958 } 959 960 error = linux_do_sigaction(td, args->sig, args->nsa ? &act : NULL, 961 args->osa ? &oact : NULL); 962 963 if (args->osa != NULL && !error) { 964 osa.lsa_handler = oact.lsa_handler; 965 osa.lsa_flags = oact.lsa_flags; 966 osa.lsa_restorer = oact.lsa_restorer; 967 osa.lsa_mask = oact.lsa_mask.__bits[0]; 968 error = copyout(&osa, args->osa, sizeof(l_osigaction_t)); 969 } 970 971 return (error); 972 } 973 974 /* 975 * Linux has two extra args, restart and oldmask. We dont use these, 976 * but it seems that "restart" is actually a context pointer that 977 * enables the signal to happen with a different register set. 978 */ 979 int 980 linux_sigsuspend(struct thread *td, struct linux_sigsuspend_args *args) 981 { 982 sigset_t sigmask; 983 l_sigset_t mask; 984 985 #ifdef DEBUG 986 if (ldebug(sigsuspend)) 987 printf(ARGS(sigsuspend, "%08lx"), (unsigned long)args->mask); 988 #endif 989 990 LINUX_SIGEMPTYSET(mask); 991 mask.__bits[0] = args->mask; 992 linux_to_bsd_sigset(&mask, &sigmask); 993 return (kern_sigsuspend(td, sigmask)); 994 } 995 996 int 997 linux_rt_sigsuspend(struct thread *td, struct linux_rt_sigsuspend_args *uap) 998 { 999 l_sigset_t lmask; 1000 sigset_t sigmask; 1001 int error; 1002 1003 #ifdef DEBUG 1004 if (ldebug(rt_sigsuspend)) 1005 printf(ARGS(rt_sigsuspend, "%p, %d"), 1006 (void *)uap->newset, uap->sigsetsize); 1007 #endif 1008 1009 if (uap->sigsetsize != sizeof(l_sigset_t)) 1010 return (EINVAL); 1011 1012 error = copyin(uap->newset, &lmask, sizeof(l_sigset_t)); 1013 if (error) 1014 return (error); 1015 1016 linux_to_bsd_sigset(&lmask, &sigmask); 1017 return (kern_sigsuspend(td, sigmask)); 1018 } 1019 1020 int 1021 linux_pause(struct thread *td, struct linux_pause_args *args) 1022 { 1023 struct proc *p = td->td_proc; 1024 sigset_t sigmask; 1025 1026 #ifdef DEBUG 1027 if (ldebug(pause)) 1028 printf(ARGS(pause, "")); 1029 #endif 1030 1031 PROC_LOCK(p); 1032 sigmask = td->td_sigmask; 1033 PROC_UNLOCK(p); 1034 return (kern_sigsuspend(td, sigmask)); 1035 } 1036 1037 int 1038 linux_sigaltstack(struct thread *td, struct linux_sigaltstack_args *uap) 1039 { 1040 stack_t ss, oss; 1041 l_stack_t lss; 1042 int error; 1043 1044 #ifdef DEBUG 1045 if (ldebug(sigaltstack)) 1046 printf(ARGS(sigaltstack, "%p, %p"), uap->uss, uap->uoss); 1047 #endif 1048 1049 if (uap->uss != NULL) { 1050 error = copyin(uap->uss, &lss, sizeof(l_stack_t)); 1051 if (error) 1052 return (error); 1053 1054 ss.ss_sp = lss.ss_sp; 1055 ss.ss_size = lss.ss_size; 1056 ss.ss_flags = linux_to_bsd_sigaltstack(lss.ss_flags); 1057 } 1058 error = kern_sigaltstack(td, (uap->uss != NULL) ? &ss : NULL, 1059 (uap->uoss != NULL) ? &oss : NULL); 1060 if (!error && uap->uoss != NULL) { 1061 lss.ss_sp = oss.ss_sp; 1062 lss.ss_size = oss.ss_size; 1063 lss.ss_flags = bsd_to_linux_sigaltstack(oss.ss_flags); 1064 error = copyout(&lss, uap->uoss, sizeof(l_stack_t)); 1065 } 1066 1067 return (error); 1068 } 1069 1070 int 1071 linux_ftruncate64(struct thread *td, struct linux_ftruncate64_args *args) 1072 { 1073 struct ftruncate_args sa; 1074 1075 #ifdef DEBUG 1076 if (ldebug(ftruncate64)) 1077 printf(ARGS(ftruncate64, "%u, %jd"), args->fd, 1078 (intmax_t)args->length); 1079 #endif 1080 1081 sa.fd = args->fd; 1082 sa.length = args->length; 1083 return ftruncate(td, &sa); 1084 } 1085 1086 int 1087 linux_set_thread_area(struct thread *td, struct linux_set_thread_area_args *args) 1088 { 1089 struct l_user_desc info; 1090 int error; 1091 int idx; 1092 int a[2]; 1093 struct segment_descriptor sd; 1094 1095 error = copyin(args->desc, &info, sizeof(struct l_user_desc)); 1096 if (error) 1097 return (error); 1098 1099 #ifdef DEBUG 1100 if (ldebug(set_thread_area)) 1101 printf(ARGS(set_thread_area, "%i, %x, %x, %i, %i, %i, %i, %i, %i\n"), 1102 info.entry_number, 1103 info.base_addr, 1104 info.limit, 1105 info.seg_32bit, 1106 info.contents, 1107 info.read_exec_only, 1108 info.limit_in_pages, 1109 info.seg_not_present, 1110 info.useable); 1111 #endif 1112 1113 idx = info.entry_number; 1114 /* 1115 * Semantics of linux version: every thread in the system has array of 1116 * 3 tls descriptors. 1st is GLIBC TLS, 2nd is WINE, 3rd unknown. This 1117 * syscall loads one of the selected tls decriptors with a value and 1118 * also loads GDT descriptors 6, 7 and 8 with the content of the 1119 * per-thread descriptors. 1120 * 1121 * Semantics of fbsd version: I think we can ignore that linux has 3 1122 * per-thread descriptors and use just the 1st one. The tls_array[] 1123 * is used only in set/get-thread_area() syscalls and for loading the 1124 * GDT descriptors. In fbsd we use just one GDT descriptor for TLS so 1125 * we will load just one. 1126 * 1127 * XXX: this doesn't work when a user space process tries to use more 1128 * than 1 TLS segment. Comment in the linux sources says wine might do 1129 * this. 1130 */ 1131 1132 /* 1133 * we support just GLIBC TLS now 1134 * we should let 3 proceed as well because we use this segment so 1135 * if code does two subsequent calls it should succeed 1136 */ 1137 if (idx != 6 && idx != -1 && idx != 3) 1138 return (EINVAL); 1139 1140 /* 1141 * we have to copy out the GDT entry we use 1142 * FreeBSD uses GDT entry #3 for storing %gs so load that 1143 * 1144 * XXX: what if a user space program doesn't check this value and tries 1145 * to use 6, 7 or 8? 1146 */ 1147 idx = info.entry_number = 3; 1148 error = copyout(&info, args->desc, sizeof(struct l_user_desc)); 1149 if (error) 1150 return (error); 1151 1152 if (LINUX_LDT_empty(&info)) { 1153 a[0] = 0; 1154 a[1] = 0; 1155 } else { 1156 a[0] = LINUX_LDT_entry_a(&info); 1157 a[1] = LINUX_LDT_entry_b(&info); 1158 } 1159 1160 memcpy(&sd, &a, sizeof(a)); 1161 #ifdef DEBUG 1162 if (ldebug(set_thread_area)) 1163 printf("Segment created in set_thread_area: lobase: %x, hibase: %x, lolimit: %x, hilimit: %x, type: %i, dpl: %i, p: %i, xx: %i, def32: %i, gran: %i\n", sd.sd_lobase, 1164 sd.sd_hibase, 1165 sd.sd_lolimit, 1166 sd.sd_hilimit, 1167 sd.sd_type, 1168 sd.sd_dpl, 1169 sd.sd_p, 1170 sd.sd_xx, 1171 sd.sd_def32, 1172 sd.sd_gran); 1173 #endif 1174 1175 /* this is taken from i386 version of cpu_set_user_tls() */ 1176 critical_enter(); 1177 /* set %gs */ 1178 td->td_pcb->pcb_gsd = sd; 1179 PCPU_GET(fsgs_gdt)[1] = sd; 1180 load_gs(GSEL(GUGS_SEL, SEL_UPL)); 1181 critical_exit(); 1182 1183 return (0); 1184 } 1185 1186 int 1187 linux_get_thread_area(struct thread *td, struct linux_get_thread_area_args *args) 1188 { 1189 1190 struct l_user_desc info; 1191 int error; 1192 int idx; 1193 struct l_desc_struct desc; 1194 struct segment_descriptor sd; 1195 1196 #ifdef DEBUG 1197 if (ldebug(get_thread_area)) 1198 printf(ARGS(get_thread_area, "%p"), args->desc); 1199 #endif 1200 1201 error = copyin(args->desc, &info, sizeof(struct l_user_desc)); 1202 if (error) 1203 return (error); 1204 1205 idx = info.entry_number; 1206 /* XXX: I am not sure if we want 3 to be allowed too. */ 1207 if (idx != 6 && idx != 3) 1208 return (EINVAL); 1209 1210 idx = 3; 1211 1212 memset(&info, 0, sizeof(info)); 1213 1214 sd = PCPU_GET(fsgs_gdt)[1]; 1215 1216 memcpy(&desc, &sd, sizeof(desc)); 1217 1218 info.entry_number = idx; 1219 info.base_addr = LINUX_GET_BASE(&desc); 1220 info.limit = LINUX_GET_LIMIT(&desc); 1221 info.seg_32bit = LINUX_GET_32BIT(&desc); 1222 info.contents = LINUX_GET_CONTENTS(&desc); 1223 info.read_exec_only = !LINUX_GET_WRITABLE(&desc); 1224 info.limit_in_pages = LINUX_GET_LIMIT_PAGES(&desc); 1225 info.seg_not_present = !LINUX_GET_PRESENT(&desc); 1226 info.useable = LINUX_GET_USEABLE(&desc); 1227 1228 error = copyout(&info, args->desc, sizeof(struct l_user_desc)); 1229 if (error) 1230 return (EFAULT); 1231 1232 return (0); 1233 } 1234 1235 /* copied from kern/kern_time.c */ 1236 int 1237 linux_timer_create(struct thread *td, struct linux_timer_create_args *args) 1238 { 1239 return ktimer_create(td, (struct ktimer_create_args *) args); 1240 } 1241 1242 int 1243 linux_timer_settime(struct thread *td, struct linux_timer_settime_args *args) 1244 { 1245 return ktimer_settime(td, (struct ktimer_settime_args *) args); 1246 } 1247 1248 int 1249 linux_timer_gettime(struct thread *td, struct linux_timer_gettime_args *args) 1250 { 1251 return ktimer_gettime(td, (struct ktimer_gettime_args *) args); 1252 } 1253 1254 int 1255 linux_timer_getoverrun(struct thread *td, struct linux_timer_getoverrun_args *args) 1256 { 1257 return ktimer_getoverrun(td, (struct ktimer_getoverrun_args *) args); 1258 } 1259 1260 int 1261 linux_timer_delete(struct thread *td, struct linux_timer_delete_args *args) 1262 { 1263 return ktimer_delete(td, (struct ktimer_delete_args *) args); 1264 } 1265 1266 /* XXX: this wont work with module - convert it */ 1267 int 1268 linux_mq_open(struct thread *td, struct linux_mq_open_args *args) 1269 { 1270 #ifdef P1003_1B_MQUEUE 1271 return kmq_open(td, (struct kmq_open_args *) args); 1272 #else 1273 return (ENOSYS); 1274 #endif 1275 } 1276 1277 int 1278 linux_mq_unlink(struct thread *td, struct linux_mq_unlink_args *args) 1279 { 1280 #ifdef P1003_1B_MQUEUE 1281 return kmq_unlink(td, (struct kmq_unlink_args *) args); 1282 #else 1283 return (ENOSYS); 1284 #endif 1285 } 1286 1287 int 1288 linux_mq_timedsend(struct thread *td, struct linux_mq_timedsend_args *args) 1289 { 1290 #ifdef P1003_1B_MQUEUE 1291 return kmq_timedsend(td, (struct kmq_timedsend_args *) args); 1292 #else 1293 return (ENOSYS); 1294 #endif 1295 } 1296 1297 int 1298 linux_mq_timedreceive(struct thread *td, struct linux_mq_timedreceive_args *args) 1299 { 1300 #ifdef P1003_1B_MQUEUE 1301 return kmq_timedreceive(td, (struct kmq_timedreceive_args *) args); 1302 #else 1303 return (ENOSYS); 1304 #endif 1305 } 1306 1307 int 1308 linux_mq_notify(struct thread *td, struct linux_mq_notify_args *args) 1309 { 1310 #ifdef P1003_1B_MQUEUE 1311 return kmq_notify(td, (struct kmq_notify_args *) args); 1312 #else 1313 return (ENOSYS); 1314 #endif 1315 } 1316 1317 int 1318 linux_mq_getsetattr(struct thread *td, struct linux_mq_getsetattr_args *args) 1319 { 1320 #ifdef P1003_1B_MQUEUE 1321 return kmq_setattr(td, (struct kmq_setattr_args *) args); 1322 #else 1323 return (ENOSYS); 1324 #endif 1325 } 1326 1327