1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2000 Marcel Moolenaar 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include "opt_posix.h" 30 31 #include <sys/param.h> 32 #include <sys/imgact_aout.h> 33 #include <sys/fcntl.h> 34 #include <sys/lock.h> 35 #include <sys/malloc.h> 36 #include <sys/mman.h> 37 #include <sys/mutex.h> 38 #include <sys/namei.h> 39 #include <sys/priv.h> 40 #include <sys/proc.h> 41 #include <sys/racct.h> 42 #include <sys/resource.h> 43 #include <sys/resourcevar.h> 44 #include <sys/syscallsubr.h> 45 #include <sys/sysproto.h> 46 #include <sys/vnode.h> 47 48 #include <security/audit/audit.h> 49 #include <security/mac/mac_framework.h> 50 51 #include <machine/frame.h> 52 #include <machine/pcb.h> /* needed for pcb definition in linux_set_thread_area */ 53 #include <machine/psl.h> 54 #include <machine/segments.h> 55 #include <machine/sysarch.h> 56 57 #include <vm/pmap.h> 58 #include <vm/vm.h> 59 #include <vm/vm_extern.h> 60 #include <vm/vm_kern.h> 61 #include <vm/vm_map.h> 62 #include <vm/vm_param.h> 63 64 #include <x86/reg.h> 65 66 #include <i386/linux/linux.h> 67 #include <i386/linux/linux_proto.h> 68 #include <compat/linux/linux_emul.h> 69 #include <compat/linux/linux_fork.h> 70 #include <compat/linux/linux_ipc.h> 71 #include <compat/linux/linux_misc.h> 72 #include <compat/linux/linux_mmap.h> 73 #include <compat/linux/linux_signal.h> 74 #include <compat/linux/linux_util.h> 75 76 77 struct l_descriptor { 78 l_uint entry_number; 79 l_ulong base_addr; 80 l_uint limit; 81 l_uint seg_32bit:1; 82 l_uint contents:2; 83 l_uint read_exec_only:1; 84 l_uint limit_in_pages:1; 85 l_uint seg_not_present:1; 86 l_uint useable:1; 87 }; 88 89 struct l_old_select_argv { 90 l_int nfds; 91 l_fd_set *readfds; 92 l_fd_set *writefds; 93 l_fd_set *exceptfds; 94 struct l_timeval *timeout; 95 }; 96 97 struct l_ipc_kludge { 98 struct l_msgbuf *msgp; 99 l_long msgtyp; 100 }; 101 102 int 103 linux_ipc(struct thread *td, struct linux_ipc_args *args) 104 { 105 106 switch (args->what & 0xFFFF) { 107 case LINUX_SEMOP: { 108 109 return (kern_semop(td, args->arg1, PTRIN(args->ptr), 110 args->arg2, NULL)); 111 } 112 case LINUX_SEMGET: { 113 struct linux_semget_args a; 114 115 a.key = args->arg1; 116 a.nsems = args->arg2; 117 a.semflg = args->arg3; 118 return (linux_semget(td, &a)); 119 } 120 case LINUX_SEMCTL: { 121 struct linux_semctl_args a; 122 int error; 123 124 a.semid = args->arg1; 125 a.semnum = args->arg2; 126 a.cmd = args->arg3; 127 error = copyin(PTRIN(args->ptr), &a.arg, sizeof(a.arg)); 128 if (error) 129 return (error); 130 return (linux_semctl(td, &a)); 131 } 132 case LINUX_SEMTIMEDOP: { 133 struct linux_semtimedop_args a; 134 135 a.semid = args->arg1; 136 a.tsops = PTRIN(args->ptr); 137 a.nsops = args->arg2; 138 a.timeout = PTRIN(args->arg5); 139 return (linux_semtimedop(td, &a)); 140 } 141 case LINUX_MSGSND: { 142 struct linux_msgsnd_args a; 143 144 a.msqid = args->arg1; 145 a.msgp = PTRIN(args->ptr); 146 a.msgsz = args->arg2; 147 a.msgflg = args->arg3; 148 return (linux_msgsnd(td, &a)); 149 } 150 case LINUX_MSGRCV: { 151 struct linux_msgrcv_args a; 152 153 a.msqid = args->arg1; 154 a.msgsz = args->arg2; 155 a.msgflg = args->arg3; 156 if ((args->what >> 16) == 0) { 157 struct l_ipc_kludge tmp; 158 int error; 159 160 if (args->ptr == 0) 161 return (EINVAL); 162 error = copyin(PTRIN(args->ptr), &tmp, sizeof(tmp)); 163 if (error) 164 return (error); 165 a.msgp = PTRIN(tmp.msgp); 166 a.msgtyp = tmp.msgtyp; 167 } else { 168 a.msgp = PTRIN(args->ptr); 169 a.msgtyp = args->arg5; 170 } 171 return (linux_msgrcv(td, &a)); 172 } 173 case LINUX_MSGGET: { 174 struct linux_msgget_args a; 175 176 a.key = args->arg1; 177 a.msgflg = args->arg2; 178 return (linux_msgget(td, &a)); 179 } 180 case LINUX_MSGCTL: { 181 struct linux_msgctl_args a; 182 183 a.msqid = args->arg1; 184 a.cmd = args->arg2; 185 a.buf = PTRIN(args->ptr); 186 return (linux_msgctl(td, &a)); 187 } 188 case LINUX_SHMAT: { 189 struct linux_shmat_args a; 190 l_uintptr_t addr; 191 int error; 192 193 a.shmid = args->arg1; 194 a.shmaddr = PTRIN(args->ptr); 195 a.shmflg = args->arg2; 196 error = linux_shmat(td, &a); 197 if (error != 0) 198 return (error); 199 addr = td->td_retval[0]; 200 error = copyout(&addr, PTRIN(args->arg3), sizeof(addr)); 201 td->td_retval[0] = 0; 202 return (error); 203 } 204 case LINUX_SHMDT: { 205 struct linux_shmdt_args a; 206 207 a.shmaddr = PTRIN(args->ptr); 208 return (linux_shmdt(td, &a)); 209 } 210 case LINUX_SHMGET: { 211 struct linux_shmget_args a; 212 213 a.key = args->arg1; 214 a.size = args->arg2; 215 a.shmflg = args->arg3; 216 return (linux_shmget(td, &a)); 217 } 218 case LINUX_SHMCTL: { 219 struct linux_shmctl_args a; 220 221 a.shmid = args->arg1; 222 a.cmd = args->arg2; 223 a.buf = PTRIN(args->ptr); 224 return (linux_shmctl(td, &a)); 225 } 226 default: 227 break; 228 } 229 230 return (EINVAL); 231 } 232 233 int 234 linux_old_select(struct thread *td, struct linux_old_select_args *args) 235 { 236 struct l_old_select_argv linux_args; 237 struct linux_select_args newsel; 238 int error; 239 240 error = copyin(args->ptr, &linux_args, sizeof(linux_args)); 241 if (error) 242 return (error); 243 244 newsel.nfds = linux_args.nfds; 245 newsel.readfds = linux_args.readfds; 246 newsel.writefds = linux_args.writefds; 247 newsel.exceptfds = linux_args.exceptfds; 248 newsel.timeout = linux_args.timeout; 249 return (linux_select(td, &newsel)); 250 } 251 252 int 253 linux_set_cloned_tls(struct thread *td, void *desc) 254 { 255 struct segment_descriptor sd; 256 struct l_user_desc info; 257 int idx, error; 258 int a[2]; 259 260 error = copyin(desc, &info, sizeof(struct l_user_desc)); 261 if (error) { 262 linux_msg(td, "set_cloned_tls copyin failed!"); 263 } else { 264 idx = info.entry_number; 265 266 /* 267 * looks like we're getting the idx we returned 268 * in the set_thread_area() syscall 269 */ 270 if (idx != 6 && idx != 3) { 271 linux_msg(td, "set_cloned_tls resetting idx!"); 272 idx = 3; 273 } 274 275 /* this doesnt happen in practice */ 276 if (idx == 6) { 277 /* we might copy out the entry_number as 3 */ 278 info.entry_number = 3; 279 error = copyout(&info, desc, sizeof(struct l_user_desc)); 280 if (error) 281 linux_msg(td, "set_cloned_tls copyout failed!"); 282 } 283 284 a[0] = LINUX_LDT_entry_a(&info); 285 a[1] = LINUX_LDT_entry_b(&info); 286 287 memcpy(&sd, &a, sizeof(a)); 288 /* set %gs */ 289 td->td_pcb->pcb_gsd = sd; 290 td->td_pcb->pcb_gs = GSEL(GUGS_SEL, SEL_UPL); 291 } 292 293 return (error); 294 } 295 296 int 297 linux_set_upcall(struct thread *td, register_t stack) 298 { 299 300 if (stack) 301 td->td_frame->tf_esp = stack; 302 303 /* 304 * The newly created Linux thread returns 305 * to the user space by the same path that a parent do. 306 */ 307 td->td_frame->tf_eax = 0; 308 return (0); 309 } 310 311 int 312 linux_mmap(struct thread *td, struct linux_mmap_args *args) 313 { 314 int error; 315 struct l_mmap_argv linux_args; 316 317 error = copyin(args->ptr, &linux_args, sizeof(linux_args)); 318 if (error) 319 return (error); 320 321 return (linux_mmap_common(td, linux_args.addr, linux_args.len, 322 linux_args.prot, linux_args.flags, linux_args.fd, 323 (uint32_t)linux_args.pgoff)); 324 } 325 326 int 327 linux_ioperm(struct thread *td, struct linux_ioperm_args *args) 328 { 329 int error; 330 struct i386_ioperm_args iia; 331 332 iia.start = args->start; 333 iia.length = args->length; 334 iia.enable = args->enable; 335 error = i386_set_ioperm(td, &iia); 336 return (error); 337 } 338 339 int 340 linux_iopl(struct thread *td, struct linux_iopl_args *args) 341 { 342 int error; 343 344 if (args->level < 0 || args->level > 3) 345 return (EINVAL); 346 if ((error = priv_check(td, PRIV_IO)) != 0) 347 return (error); 348 if ((error = securelevel_gt(td->td_ucred, 0)) != 0) 349 return (error); 350 td->td_frame->tf_eflags = (td->td_frame->tf_eflags & ~PSL_IOPL) | 351 (args->level * (PSL_IOPL / 3)); 352 return (0); 353 } 354 355 int 356 linux_modify_ldt(struct thread *td, struct linux_modify_ldt_args *uap) 357 { 358 int error; 359 struct i386_ldt_args ldt; 360 struct l_descriptor ld; 361 union descriptor desc; 362 int size, written; 363 364 switch (uap->func) { 365 case 0x00: /* read_ldt */ 366 ldt.start = 0; 367 ldt.descs = uap->ptr; 368 ldt.num = uap->bytecount / sizeof(union descriptor); 369 error = i386_get_ldt(td, &ldt); 370 td->td_retval[0] *= sizeof(union descriptor); 371 break; 372 case 0x02: /* read_default_ldt = 0 */ 373 size = 5*sizeof(struct l_desc_struct); 374 if (size > uap->bytecount) 375 size = uap->bytecount; 376 for (written = error = 0; written < size && error == 0; written++) 377 error = subyte((char *)uap->ptr + written, 0); 378 td->td_retval[0] = written; 379 break; 380 case 0x01: /* write_ldt */ 381 case 0x11: /* write_ldt */ 382 if (uap->bytecount != sizeof(ld)) 383 return (EINVAL); 384 385 error = copyin(uap->ptr, &ld, sizeof(ld)); 386 if (error) 387 return (error); 388 389 ldt.start = ld.entry_number; 390 ldt.descs = &desc; 391 ldt.num = 1; 392 desc.sd.sd_lolimit = (ld.limit & 0x0000ffff); 393 desc.sd.sd_hilimit = (ld.limit & 0x000f0000) >> 16; 394 desc.sd.sd_lobase = (ld.base_addr & 0x00ffffff); 395 desc.sd.sd_hibase = (ld.base_addr & 0xff000000) >> 24; 396 desc.sd.sd_type = SDT_MEMRO | ((ld.read_exec_only ^ 1) << 1) | 397 (ld.contents << 2); 398 desc.sd.sd_dpl = 3; 399 desc.sd.sd_p = (ld.seg_not_present ^ 1); 400 desc.sd.sd_xx = 0; 401 desc.sd.sd_def32 = ld.seg_32bit; 402 desc.sd.sd_gran = ld.limit_in_pages; 403 error = i386_set_ldt(td, &ldt, &desc); 404 break; 405 default: 406 error = ENOSYS; 407 break; 408 } 409 410 if (error == EOPNOTSUPP) { 411 linux_msg(td, "modify_ldt needs kernel option USER_LDT"); 412 error = ENOSYS; 413 } 414 415 return (error); 416 } 417 418 int 419 linux_sigaction(struct thread *td, struct linux_sigaction_args *args) 420 { 421 l_osigaction_t osa; 422 l_sigaction_t act, oact; 423 int error; 424 425 if (args->nsa != NULL) { 426 error = copyin(args->nsa, &osa, sizeof(l_osigaction_t)); 427 if (error) 428 return (error); 429 act.lsa_handler = osa.lsa_handler; 430 act.lsa_flags = osa.lsa_flags; 431 act.lsa_restorer = osa.lsa_restorer; 432 LINUX_SIGEMPTYSET(act.lsa_mask); 433 act.lsa_mask.__mask = osa.lsa_mask; 434 } 435 436 error = linux_do_sigaction(td, args->sig, args->nsa ? &act : NULL, 437 args->osa ? &oact : NULL); 438 439 if (args->osa != NULL && !error) { 440 osa.lsa_handler = oact.lsa_handler; 441 osa.lsa_flags = oact.lsa_flags; 442 osa.lsa_restorer = oact.lsa_restorer; 443 osa.lsa_mask = oact.lsa_mask.__mask; 444 error = copyout(&osa, args->osa, sizeof(l_osigaction_t)); 445 } 446 447 return (error); 448 } 449 450 /* 451 * Linux has two extra args, restart and oldmask. We dont use these, 452 * but it seems that "restart" is actually a context pointer that 453 * enables the signal to happen with a different register set. 454 */ 455 int 456 linux_sigsuspend(struct thread *td, struct linux_sigsuspend_args *args) 457 { 458 sigset_t sigmask; 459 l_sigset_t mask; 460 461 LINUX_SIGEMPTYSET(mask); 462 mask.__mask = args->mask; 463 linux_to_bsd_sigset(&mask, &sigmask); 464 return (kern_sigsuspend(td, sigmask)); 465 } 466 467 int 468 linux_pause(struct thread *td, struct linux_pause_args *args) 469 { 470 struct proc *p = td->td_proc; 471 sigset_t sigmask; 472 473 PROC_LOCK(p); 474 sigmask = td->td_sigmask; 475 PROC_UNLOCK(p); 476 return (kern_sigsuspend(td, sigmask)); 477 } 478 479 int 480 linux_set_thread_area(struct thread *td, struct linux_set_thread_area_args *args) 481 { 482 struct l_user_desc info; 483 int error; 484 int idx; 485 int a[2]; 486 struct segment_descriptor sd; 487 488 error = copyin(args->desc, &info, sizeof(struct l_user_desc)); 489 if (error) 490 return (error); 491 492 idx = info.entry_number; 493 /* 494 * Semantics of Linux version: every thread in the system has array of 495 * 3 tls descriptors. 1st is GLIBC TLS, 2nd is WINE, 3rd unknown. This 496 * syscall loads one of the selected tls descriptors with a value and 497 * also loads GDT descriptors 6, 7 and 8 with the content of the 498 * per-thread descriptors. 499 * 500 * Semantics of FreeBSD version: I think we can ignore that Linux has 3 501 * per-thread descriptors and use just the 1st one. The tls_array[] 502 * is used only in set/get-thread_area() syscalls and for loading the 503 * GDT descriptors. In FreeBSD we use just one GDT descriptor for TLS 504 * so we will load just one. 505 * 506 * XXX: this doesn't work when a user space process tries to use more 507 * than 1 TLS segment. Comment in the Linux sources says wine might do 508 * this. 509 */ 510 511 /* 512 * we support just GLIBC TLS now 513 * we should let 3 proceed as well because we use this segment so 514 * if code does two subsequent calls it should succeed 515 */ 516 if (idx != 6 && idx != -1 && idx != 3) 517 return (EINVAL); 518 519 /* 520 * we have to copy out the GDT entry we use 521 * FreeBSD uses GDT entry #3 for storing %gs so load that 522 * 523 * XXX: what if a user space program doesn't check this value and tries 524 * to use 6, 7 or 8? 525 */ 526 idx = info.entry_number = 3; 527 error = copyout(&info, args->desc, sizeof(struct l_user_desc)); 528 if (error) 529 return (error); 530 531 if (LINUX_LDT_empty(&info)) { 532 a[0] = 0; 533 a[1] = 0; 534 } else { 535 a[0] = LINUX_LDT_entry_a(&info); 536 a[1] = LINUX_LDT_entry_b(&info); 537 } 538 539 memcpy(&sd, &a, sizeof(a)); 540 /* this is taken from i386 version of cpu_set_user_tls() */ 541 critical_enter(); 542 /* set %gs */ 543 td->td_pcb->pcb_gsd = sd; 544 PCPU_GET(fsgs_gdt)[1] = sd; 545 load_gs(GSEL(GUGS_SEL, SEL_UPL)); 546 critical_exit(); 547 548 return (0); 549 } 550 551 int 552 linux_get_thread_area(struct thread *td, struct linux_get_thread_area_args *args) 553 { 554 555 struct l_user_desc info; 556 int error; 557 int idx; 558 struct l_desc_struct desc; 559 struct segment_descriptor sd; 560 561 error = copyin(args->desc, &info, sizeof(struct l_user_desc)); 562 if (error) 563 return (error); 564 565 idx = info.entry_number; 566 /* XXX: I am not sure if we want 3 to be allowed too. */ 567 if (idx != 6 && idx != 3) 568 return (EINVAL); 569 570 idx = 3; 571 572 memset(&info, 0, sizeof(info)); 573 574 sd = PCPU_GET(fsgs_gdt)[1]; 575 576 memcpy(&desc, &sd, sizeof(desc)); 577 578 info.entry_number = idx; 579 info.base_addr = LINUX_GET_BASE(&desc); 580 info.limit = LINUX_GET_LIMIT(&desc); 581 info.seg_32bit = LINUX_GET_32BIT(&desc); 582 info.contents = LINUX_GET_CONTENTS(&desc); 583 info.read_exec_only = !LINUX_GET_WRITABLE(&desc); 584 info.limit_in_pages = LINUX_GET_LIMIT_PAGES(&desc); 585 info.seg_not_present = !LINUX_GET_PRESENT(&desc); 586 info.useable = LINUX_GET_USEABLE(&desc); 587 588 error = copyout(&info, args->desc, sizeof(struct l_user_desc)); 589 if (error) 590 return (EFAULT); 591 592 return (0); 593 } 594 595 /* XXX: this wont work with module - convert it */ 596 int 597 linux_mq_open(struct thread *td, struct linux_mq_open_args *args) 598 { 599 #ifdef P1003_1B_MQUEUE 600 return (sys_kmq_open(td, (struct kmq_open_args *)args)); 601 #else 602 return (ENOSYS); 603 #endif 604 } 605 606 int 607 linux_mq_unlink(struct thread *td, struct linux_mq_unlink_args *args) 608 { 609 #ifdef P1003_1B_MQUEUE 610 return (sys_kmq_unlink(td, (struct kmq_unlink_args *)args)); 611 #else 612 return (ENOSYS); 613 #endif 614 } 615 616 int 617 linux_mq_timedsend(struct thread *td, struct linux_mq_timedsend_args *args) 618 { 619 #ifdef P1003_1B_MQUEUE 620 return (sys_kmq_timedsend(td, (struct kmq_timedsend_args *)args)); 621 #else 622 return (ENOSYS); 623 #endif 624 } 625 626 int 627 linux_mq_timedreceive(struct thread *td, struct linux_mq_timedreceive_args *args) 628 { 629 #ifdef P1003_1B_MQUEUE 630 return (sys_kmq_timedreceive(td, (struct kmq_timedreceive_args *)args)); 631 #else 632 return (ENOSYS); 633 #endif 634 } 635 636 int 637 linux_mq_notify(struct thread *td, struct linux_mq_notify_args *args) 638 { 639 #ifdef P1003_1B_MQUEUE 640 return (sys_kmq_notify(td, (struct kmq_notify_args *)args)); 641 #else 642 return (ENOSYS); 643 #endif 644 } 645 646 int 647 linux_mq_getsetattr(struct thread *td, struct linux_mq_getsetattr_args *args) 648 { 649 #ifdef P1003_1B_MQUEUE 650 return (sys_kmq_setattr(td, (struct kmq_setattr_args *)args)); 651 #else 652 return (ENOSYS); 653 #endif 654 } 655 656 void 657 bsd_to_linux_regset(const struct reg *b_reg, 658 struct linux_pt_regset *l_regset) 659 { 660 661 l_regset->ebx = b_reg->r_ebx; 662 l_regset->ecx = b_reg->r_ecx; 663 l_regset->edx = b_reg->r_edx; 664 l_regset->esi = b_reg->r_esi; 665 l_regset->edi = b_reg->r_edi; 666 l_regset->ebp = b_reg->r_ebp; 667 l_regset->eax = b_reg->r_eax; 668 l_regset->ds = b_reg->r_ds; 669 l_regset->es = b_reg->r_es; 670 l_regset->fs = b_reg->r_fs; 671 l_regset->gs = b_reg->r_gs; 672 l_regset->orig_eax = b_reg->r_eax; 673 l_regset->eip = b_reg->r_eip; 674 l_regset->cs = b_reg->r_cs; 675 l_regset->eflags = b_reg->r_eflags; 676 l_regset->esp = b_reg->r_esp; 677 l_regset->ss = b_reg->r_ss; 678 } 679 680 int 681 linux_uselib(struct thread *td, struct linux_uselib_args *args) 682 { 683 struct nameidata ni; 684 struct vnode *vp; 685 struct exec *a_out; 686 vm_map_t map; 687 vm_map_entry_t entry; 688 struct vattr attr; 689 vm_offset_t vmaddr; 690 unsigned long file_offset; 691 unsigned long bss_size; 692 ssize_t aresid; 693 int error; 694 bool locked, opened, textset; 695 696 a_out = NULL; 697 vp = NULL; 698 locked = false; 699 textset = false; 700 opened = false; 701 702 NDINIT(&ni, LOOKUP, ISOPEN | FOLLOW | LOCKLEAF | AUDITVNODE1, 703 UIO_USERSPACE, args->library); 704 error = namei(&ni); 705 if (error) 706 goto cleanup; 707 708 vp = ni.ni_vp; 709 NDFREE_PNBUF(&ni); 710 711 /* 712 * From here on down, we have a locked vnode that must be unlocked. 713 * XXX: The code below largely duplicates exec_check_permissions(). 714 */ 715 locked = true; 716 717 /* Executable? */ 718 error = VOP_GETATTR(vp, &attr, td->td_ucred); 719 if (error) 720 goto cleanup; 721 722 if ((vp->v_mount->mnt_flag & MNT_NOEXEC) || 723 ((attr.va_mode & 0111) == 0) || (attr.va_type != VREG)) { 724 /* EACCESS is what exec(2) returns. */ 725 error = ENOEXEC; 726 goto cleanup; 727 } 728 729 /* Sensible size? */ 730 if (attr.va_size == 0) { 731 error = ENOEXEC; 732 goto cleanup; 733 } 734 735 /* Can we access it? */ 736 error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td); 737 if (error) 738 goto cleanup; 739 740 /* 741 * XXX: This should use vn_open() so that it is properly authorized, 742 * and to reduce code redundancy all over the place here. 743 * XXX: Not really, it duplicates far more of exec_check_permissions() 744 * than vn_open(). 745 */ 746 #ifdef MAC 747 error = mac_vnode_check_open(td->td_ucred, vp, VREAD); 748 if (error) 749 goto cleanup; 750 #endif 751 error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL); 752 if (error) 753 goto cleanup; 754 opened = true; 755 756 /* Pull in executable header into exec_map */ 757 error = vm_mmap(exec_map, (vm_offset_t *)&a_out, PAGE_SIZE, 758 VM_PROT_READ, VM_PROT_READ, 0, OBJT_VNODE, vp, 0); 759 if (error) 760 goto cleanup; 761 762 /* Is it a Linux binary ? */ 763 if (((a_out->a_magic >> 16) & 0xff) != 0x64) { 764 error = ENOEXEC; 765 goto cleanup; 766 } 767 768 /* 769 * While we are here, we should REALLY do some more checks 770 */ 771 772 /* Set file/virtual offset based on a.out variant. */ 773 switch ((int)(a_out->a_magic & 0xffff)) { 774 case 0413: /* ZMAGIC */ 775 file_offset = 1024; 776 break; 777 case 0314: /* QMAGIC */ 778 file_offset = 0; 779 break; 780 default: 781 error = ENOEXEC; 782 goto cleanup; 783 } 784 785 bss_size = round_page(a_out->a_bss); 786 787 /* Check various fields in header for validity/bounds. */ 788 if (a_out->a_text & PAGE_MASK || a_out->a_data & PAGE_MASK) { 789 error = ENOEXEC; 790 goto cleanup; 791 } 792 793 /* text + data can't exceed file size */ 794 if (a_out->a_data + a_out->a_text > attr.va_size) { 795 error = EFAULT; 796 goto cleanup; 797 } 798 799 /* 800 * text/data/bss must not exceed limits 801 * XXX - this is not complete. it should check current usage PLUS 802 * the resources needed by this library. 803 */ 804 PROC_LOCK(td->td_proc); 805 if (a_out->a_text > maxtsiz || 806 a_out->a_data + bss_size > lim_cur_proc(td->td_proc, RLIMIT_DATA) || 807 racct_set(td->td_proc, RACCT_DATA, a_out->a_data + 808 bss_size) != 0) { 809 PROC_UNLOCK(td->td_proc); 810 error = ENOMEM; 811 goto cleanup; 812 } 813 PROC_UNLOCK(td->td_proc); 814 815 /* 816 * Prevent more writers. 817 */ 818 error = VOP_SET_TEXT(vp); 819 if (error != 0) 820 goto cleanup; 821 textset = true; 822 823 /* 824 * Lock no longer needed 825 */ 826 locked = false; 827 VOP_UNLOCK(vp); 828 829 /* 830 * Check if file_offset page aligned. Currently we cannot handle 831 * misalinged file offsets, and so we read in the entire image 832 * (what a waste). 833 */ 834 if (file_offset & PAGE_MASK) { 835 /* Map text+data read/write/execute */ 836 837 /* a_entry is the load address and is page aligned */ 838 vmaddr = trunc_page(a_out->a_entry); 839 840 /* get anon user mapping, read+write+execute */ 841 error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0, 842 &vmaddr, a_out->a_text + a_out->a_data, 0, VMFS_NO_SPACE, 843 VM_PROT_ALL, VM_PROT_ALL, 0); 844 if (error) 845 goto cleanup; 846 847 error = vn_rdwr(UIO_READ, vp, (void *)vmaddr, file_offset, 848 a_out->a_text + a_out->a_data, UIO_USERSPACE, 0, 849 td->td_ucred, NOCRED, &aresid, td); 850 if (error != 0) 851 goto cleanup; 852 if (aresid != 0) { 853 error = ENOEXEC; 854 goto cleanup; 855 } 856 } else { 857 /* 858 * for QMAGIC, a_entry is 20 bytes beyond the load address 859 * to skip the executable header 860 */ 861 vmaddr = trunc_page(a_out->a_entry); 862 863 /* 864 * Map it all into the process's space as a single 865 * copy-on-write "data" segment. 866 */ 867 map = &td->td_proc->p_vmspace->vm_map; 868 error = vm_mmap(map, &vmaddr, 869 a_out->a_text + a_out->a_data, VM_PROT_ALL, VM_PROT_ALL, 870 MAP_PRIVATE | MAP_FIXED, OBJT_VNODE, vp, file_offset); 871 if (error) 872 goto cleanup; 873 vm_map_lock(map); 874 if (!vm_map_lookup_entry(map, vmaddr, &entry)) { 875 vm_map_unlock(map); 876 error = EDOOFUS; 877 goto cleanup; 878 } 879 entry->eflags |= MAP_ENTRY_VN_EXEC; 880 vm_map_unlock(map); 881 textset = false; 882 } 883 884 if (bss_size != 0) { 885 /* Calculate BSS start address */ 886 vmaddr = trunc_page(a_out->a_entry) + a_out->a_text + 887 a_out->a_data; 888 889 /* allocate some 'anon' space */ 890 error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0, 891 &vmaddr, bss_size, 0, VMFS_NO_SPACE, VM_PROT_ALL, 892 VM_PROT_ALL, 0); 893 if (error) 894 goto cleanup; 895 } 896 897 cleanup: 898 if (opened) { 899 if (locked) 900 VOP_UNLOCK(vp); 901 locked = false; 902 VOP_CLOSE(vp, FREAD, td->td_ucred, td); 903 } 904 if (textset) { 905 if (!locked) { 906 locked = true; 907 VOP_LOCK(vp, LK_SHARED | LK_RETRY); 908 } 909 VOP_UNSET_TEXT_CHECKED(vp); 910 } 911 if (locked) 912 VOP_UNLOCK(vp); 913 914 /* Release the temporary mapping. */ 915 if (a_out) 916 kmap_free_wakeup(exec_map, (vm_offset_t)a_out, PAGE_SIZE); 917 918 return (error); 919 } 920