1 /*- 2 * Copyright (c) 1994-1995 S�ren Schmidt 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer 10 * in this position and unchanged. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 #include "opt_compat.h" 32 #include "opt_mac.h" 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/fcntl.h> 37 #include <sys/imgact_aout.h> 38 #include <sys/jail.h> 39 #include <sys/kernel.h> 40 #include <sys/lock.h> 41 #include <sys/mac.h> 42 #include <sys/mman.h> 43 #include <sys/mount.h> 44 #include <sys/mutex.h> 45 #include <sys/namei.h> 46 #include <sys/poll.h> 47 #include <sys/proc.h> 48 #include <sys/blist.h> 49 #include <sys/reboot.h> 50 #include <sys/resourcevar.h> 51 #include <sys/signalvar.h> 52 #include <sys/stat.h> 53 #include <sys/sysctl.h> 54 #include <sys/sysproto.h> 55 #include <sys/time.h> 56 #include <sys/unistd.h> 57 #include <sys/vmmeter.h> 58 #include <sys/vnode.h> 59 #include <sys/wait.h> 60 61 #include <vm/vm.h> 62 #include <vm/pmap.h> 63 #include <vm/vm_kern.h> 64 #include <vm/vm_map.h> 65 #include <vm/vm_extern.h> 66 #include <vm/vm_object.h> 67 #include <vm/swap_pager.h> 68 69 #include <machine/limits.h> 70 71 #include <posix4/sched.h> 72 73 #include <machine/../linux/linux.h> 74 #include <machine/../linux/linux_proto.h> 75 #include <compat/linux/linux_mib.h> 76 #include <compat/linux/linux_util.h> 77 78 #ifdef __alpha__ 79 #define BSD_TO_LINUX_SIGNAL(sig) (sig) 80 #else 81 #define BSD_TO_LINUX_SIGNAL(sig) \ 82 (((sig) <= LINUX_SIGTBLSZ) ? bsd_to_linux_signal[_SIG_IDX(sig)] : sig) 83 #endif 84 85 #ifndef __alpha__ 86 static unsigned int linux_to_bsd_resource[LINUX_RLIM_NLIMITS] = { 87 RLIMIT_CPU, RLIMIT_FSIZE, RLIMIT_DATA, RLIMIT_STACK, 88 RLIMIT_CORE, RLIMIT_RSS, RLIMIT_NPROC, RLIMIT_NOFILE, 89 RLIMIT_MEMLOCK, -1 90 }; 91 #endif /*!__alpha__*/ 92 93 struct l_sysinfo { 94 l_long uptime; /* Seconds since boot */ 95 l_ulong loads[3]; /* 1, 5, and 15 minute load averages */ 96 l_ulong totalram; /* Total usable main memory size */ 97 l_ulong freeram; /* Available memory size */ 98 l_ulong sharedram; /* Amount of shared memory */ 99 l_ulong bufferram; /* Memory used by buffers */ 100 l_ulong totalswap; /* Total swap space size */ 101 l_ulong freeswap; /* swap space still available */ 102 l_ushort procs; /* Number of current processes */ 103 char _f[22]; /* Pads structure to 64 bytes */ 104 }; 105 #ifndef __alpha__ 106 int 107 linux_sysinfo(struct thread *td, struct linux_sysinfo_args *args) 108 { 109 struct l_sysinfo sysinfo; 110 vm_object_t object; 111 int i; 112 struct timespec ts; 113 114 /* Uptime is copied out of print_uptime() in kern_shutdown.c */ 115 getnanouptime(&ts); 116 i = 0; 117 if (ts.tv_sec >= 86400) { 118 ts.tv_sec %= 86400; 119 i = 1; 120 } 121 if (i || ts.tv_sec >= 3600) { 122 ts.tv_sec %= 3600; 123 i = 1; 124 } 125 if (i || ts.tv_sec >= 60) { 126 ts.tv_sec %= 60; 127 i = 1; 128 } 129 sysinfo.uptime=ts.tv_sec; 130 131 /* Use the information from the mib to get our load averages */ 132 for (i = 0; i < 3; i++) 133 sysinfo.loads[i] = averunnable.ldavg[i]; 134 135 sysinfo.totalram = physmem * PAGE_SIZE; 136 sysinfo.freeram = sysinfo.totalram - cnt.v_wire_count * PAGE_SIZE; 137 138 sysinfo.sharedram = 0; 139 for (object = TAILQ_FIRST(&vm_object_list); object != NULL; 140 object = TAILQ_NEXT(object, object_list)) 141 if (object->shadow_count > 1) 142 sysinfo.sharedram += object->resident_page_count; 143 144 sysinfo.sharedram *= PAGE_SIZE; 145 sysinfo.bufferram = 0; 146 147 if (swapblist == NULL) { 148 sysinfo.totalswap= 0; 149 sysinfo.freeswap = 0; 150 } else { 151 sysinfo.totalswap = swapblist->bl_blocks * 1024; 152 sysinfo.freeswap = swapblist->bl_root->u.bmu_avail * PAGE_SIZE; 153 } 154 155 sysinfo.procs = 20; /* Hack */ 156 157 return copyout(&sysinfo, (caddr_t)args->info, sizeof(sysinfo)); 158 } 159 #endif /*!__alpha__*/ 160 161 #ifndef __alpha__ 162 int 163 linux_alarm(struct thread *td, struct linux_alarm_args *args) 164 { 165 struct itimerval it, old_it; 166 struct timeval tv; 167 int s; 168 169 #ifdef DEBUG 170 if (ldebug(alarm)) 171 printf(ARGS(alarm, "%u"), args->secs); 172 #endif 173 174 if (args->secs > 100000000) 175 return EINVAL; 176 177 it.it_value.tv_sec = (long)args->secs; 178 it.it_value.tv_usec = 0; 179 it.it_interval.tv_sec = 0; 180 it.it_interval.tv_usec = 0; 181 s = splsoftclock(); 182 old_it = td->td_proc->p_realtimer; 183 getmicrouptime(&tv); 184 if (timevalisset(&old_it.it_value)) 185 callout_stop(&td->td_proc->p_itcallout); 186 if (it.it_value.tv_sec != 0) { 187 callout_reset(&td->td_proc->p_itcallout, tvtohz(&it.it_value), 188 realitexpire, td->td_proc); 189 timevaladd(&it.it_value, &tv); 190 } 191 td->td_proc->p_realtimer = it; 192 splx(s); 193 if (timevalcmp(&old_it.it_value, &tv, >)) { 194 timevalsub(&old_it.it_value, &tv); 195 if (old_it.it_value.tv_usec != 0) 196 old_it.it_value.tv_sec++; 197 td->td_retval[0] = old_it.it_value.tv_sec; 198 } 199 return 0; 200 } 201 #endif /*!__alpha__*/ 202 203 int 204 linux_brk(struct thread *td, struct linux_brk_args *args) 205 { 206 struct vmspace *vm = td->td_proc->p_vmspace; 207 vm_offset_t new, old; 208 struct obreak_args /* { 209 char * nsize; 210 } */ tmp; 211 212 #ifdef DEBUG 213 if (ldebug(brk)) 214 printf(ARGS(brk, "%p"), (void *)args->dsend); 215 #endif 216 old = (vm_offset_t)vm->vm_daddr + ctob(vm->vm_dsize); 217 new = (vm_offset_t)args->dsend; 218 tmp.nsize = (char *) new; 219 if (((caddr_t)new > vm->vm_daddr) && !obreak(td, &tmp)) 220 td->td_retval[0] = (long)new; 221 else 222 td->td_retval[0] = (long)old; 223 224 return 0; 225 } 226 227 int 228 linux_uselib(struct thread *td, struct linux_uselib_args *args) 229 { 230 struct nameidata ni; 231 struct vnode *vp; 232 struct exec *a_out; 233 struct vattr attr; 234 vm_offset_t vmaddr; 235 unsigned long file_offset; 236 vm_offset_t buffer; 237 unsigned long bss_size; 238 int error; 239 caddr_t sg; 240 int locked; 241 242 sg = stackgap_init(); 243 CHECKALTEXIST(td, &sg, args->library); 244 245 #ifdef DEBUG 246 if (ldebug(uselib)) 247 printf(ARGS(uselib, "%s"), args->library); 248 #endif 249 250 a_out = NULL; 251 locked = 0; 252 vp = NULL; 253 254 /* 255 * XXX: This code should make use of vn_open(), rather than doing 256 * all this stuff itself. 257 */ 258 NDINIT(&ni, LOOKUP, FOLLOW|LOCKLEAF, UIO_USERSPACE, args->library, td); 259 error = namei(&ni); 260 if (error) 261 goto cleanup; 262 263 vp = ni.ni_vp; 264 /* 265 * XXX - This looks like a bogus check. A LOCKLEAF namei should not 266 * succeed without returning a vnode. 267 */ 268 if (vp == NULL) { 269 error = ENOEXEC; /* ?? */ 270 goto cleanup; 271 } 272 NDFREE(&ni, NDF_ONLY_PNBUF); 273 274 /* 275 * From here on down, we have a locked vnode that must be unlocked. 276 */ 277 locked++; 278 279 /* Writable? */ 280 if (vp->v_writecount) { 281 error = ETXTBSY; 282 goto cleanup; 283 } 284 285 /* Executable? */ 286 error = VOP_GETATTR(vp, &attr, td->td_ucred, td); 287 if (error) 288 goto cleanup; 289 290 if ((vp->v_mount->mnt_flag & MNT_NOEXEC) || 291 ((attr.va_mode & 0111) == 0) || (attr.va_type != VREG)) { 292 error = ENOEXEC; 293 goto cleanup; 294 } 295 296 /* Sensible size? */ 297 if (attr.va_size == 0) { 298 error = ENOEXEC; 299 goto cleanup; 300 } 301 302 /* Can we access it? */ 303 error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td); 304 if (error) 305 goto cleanup; 306 307 /* 308 * XXX: This should use vn_open() so that it is properly authorized, 309 * and to reduce code redundancy all over the place here. 310 */ 311 #ifdef MAC 312 error = mac_check_vnode_open(td->td_ucred, vp, FREAD); 313 if (error) 314 goto cleanup; 315 #endif 316 error = VOP_OPEN(vp, FREAD, td->td_ucred, td); 317 if (error) 318 goto cleanup; 319 320 /* 321 * Lock no longer needed 322 */ 323 VOP_UNLOCK(vp, 0, td); 324 locked = 0; 325 326 /* Pull in executable header into kernel_map */ 327 error = vm_mmap(kernel_map, (vm_offset_t *)&a_out, PAGE_SIZE, 328 VM_PROT_READ, VM_PROT_READ, 0, (caddr_t)vp, 0); 329 if (error) 330 goto cleanup; 331 332 /* Is it a Linux binary ? */ 333 if (((a_out->a_magic >> 16) & 0xff) != 0x64) { 334 error = ENOEXEC; 335 goto cleanup; 336 } 337 338 /* 339 * While we are here, we should REALLY do some more checks 340 */ 341 342 /* Set file/virtual offset based on a.out variant. */ 343 switch ((int)(a_out->a_magic & 0xffff)) { 344 case 0413: /* ZMAGIC */ 345 file_offset = 1024; 346 break; 347 case 0314: /* QMAGIC */ 348 file_offset = 0; 349 break; 350 default: 351 error = ENOEXEC; 352 goto cleanup; 353 } 354 355 bss_size = round_page(a_out->a_bss); 356 357 /* Check various fields in header for validity/bounds. */ 358 if (a_out->a_text & PAGE_MASK || a_out->a_data & PAGE_MASK) { 359 error = ENOEXEC; 360 goto cleanup; 361 } 362 363 /* text + data can't exceed file size */ 364 if (a_out->a_data + a_out->a_text > attr.va_size) { 365 error = EFAULT; 366 goto cleanup; 367 } 368 369 /* To protect td->td_proc->p_rlimit in the if condition. */ 370 mtx_assert(&Giant, MA_OWNED); 371 372 /* 373 * text/data/bss must not exceed limits 374 * XXX - this is not complete. it should check current usage PLUS 375 * the resources needed by this library. 376 */ 377 if (a_out->a_text > maxtsiz || 378 a_out->a_data + bss_size > 379 td->td_proc->p_rlimit[RLIMIT_DATA].rlim_cur) { 380 error = ENOMEM; 381 goto cleanup; 382 } 383 384 /* prevent more writers */ 385 vp->v_flag |= VTEXT; 386 387 /* 388 * Check if file_offset page aligned. Currently we cannot handle 389 * misalinged file offsets, and so we read in the entire image 390 * (what a waste). 391 */ 392 if (file_offset & PAGE_MASK) { 393 #ifdef DEBUG 394 printf("uselib: Non page aligned binary %lu\n", file_offset); 395 #endif 396 /* Map text+data read/write/execute */ 397 398 /* a_entry is the load address and is page aligned */ 399 vmaddr = trunc_page(a_out->a_entry); 400 401 /* get anon user mapping, read+write+execute */ 402 error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0, 403 &vmaddr, a_out->a_text + a_out->a_data, FALSE, VM_PROT_ALL, 404 VM_PROT_ALL, 0); 405 if (error) 406 goto cleanup; 407 408 /* map file into kernel_map */ 409 error = vm_mmap(kernel_map, &buffer, 410 round_page(a_out->a_text + a_out->a_data + file_offset), 411 VM_PROT_READ, VM_PROT_READ, 0, (caddr_t)vp, 412 trunc_page(file_offset)); 413 if (error) 414 goto cleanup; 415 416 /* copy from kernel VM space to user space */ 417 error = copyout((caddr_t)(uintptr_t)(buffer + file_offset), 418 (caddr_t)vmaddr, a_out->a_text + a_out->a_data); 419 420 /* release temporary kernel space */ 421 vm_map_remove(kernel_map, buffer, buffer + 422 round_page(a_out->a_text + a_out->a_data + file_offset)); 423 424 if (error) 425 goto cleanup; 426 } else { 427 #ifdef DEBUG 428 printf("uselib: Page aligned binary %lu\n", file_offset); 429 #endif 430 /* 431 * for QMAGIC, a_entry is 20 bytes beyond the load address 432 * to skip the executable header 433 */ 434 vmaddr = trunc_page(a_out->a_entry); 435 436 /* 437 * Map it all into the process's space as a single 438 * copy-on-write "data" segment. 439 */ 440 error = vm_mmap(&td->td_proc->p_vmspace->vm_map, &vmaddr, 441 a_out->a_text + a_out->a_data, VM_PROT_ALL, VM_PROT_ALL, 442 MAP_PRIVATE | MAP_FIXED, (caddr_t)vp, file_offset); 443 if (error) 444 goto cleanup; 445 } 446 #ifdef DEBUG 447 printf("mem=%08lx = %08lx %08lx\n", (long)vmaddr, ((long*)vmaddr)[0], 448 ((long*)vmaddr)[1]); 449 #endif 450 if (bss_size != 0) { 451 /* Calculate BSS start address */ 452 vmaddr = trunc_page(a_out->a_entry) + a_out->a_text + 453 a_out->a_data; 454 455 /* allocate some 'anon' space */ 456 error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0, 457 &vmaddr, bss_size, FALSE, VM_PROT_ALL, VM_PROT_ALL, 0); 458 if (error) 459 goto cleanup; 460 } 461 462 cleanup: 463 /* Unlock vnode if needed */ 464 if (locked) 465 VOP_UNLOCK(vp, 0, td); 466 467 /* Release the kernel mapping. */ 468 if (a_out) 469 vm_map_remove(kernel_map, (vm_offset_t)a_out, 470 (vm_offset_t)a_out + PAGE_SIZE); 471 472 return error; 473 } 474 475 int 476 linux_select(struct thread *td, struct linux_select_args *args) 477 { 478 struct select_args bsa; 479 struct timeval tv0, tv1, utv, *tvp; 480 caddr_t sg; 481 int error; 482 483 #ifdef DEBUG 484 if (ldebug(select)) 485 printf(ARGS(select, "%d, %p, %p, %p, %p"), args->nfds, 486 (void *)args->readfds, (void *)args->writefds, 487 (void *)args->exceptfds, (void *)args->timeout); 488 #endif 489 490 error = 0; 491 bsa.nd = args->nfds; 492 bsa.in = args->readfds; 493 bsa.ou = args->writefds; 494 bsa.ex = args->exceptfds; 495 bsa.tv = (struct timeval *)args->timeout; 496 497 /* 498 * Store current time for computation of the amount of 499 * time left. 500 */ 501 if (args->timeout) { 502 if ((error = copyin((caddr_t)args->timeout, &utv, 503 sizeof(utv)))) 504 goto select_out; 505 #ifdef DEBUG 506 if (ldebug(select)) 507 printf(LMSG("incoming timeout (%ld/%ld)"), 508 utv.tv_sec, utv.tv_usec); 509 #endif 510 511 if (itimerfix(&utv)) { 512 /* 513 * The timeval was invalid. Convert it to something 514 * valid that will act as it does under Linux. 515 */ 516 sg = stackgap_init(); 517 tvp = stackgap_alloc(&sg, sizeof(utv)); 518 utv.tv_sec += utv.tv_usec / 1000000; 519 utv.tv_usec %= 1000000; 520 if (utv.tv_usec < 0) { 521 utv.tv_sec -= 1; 522 utv.tv_usec += 1000000; 523 } 524 if (utv.tv_sec < 0) 525 timevalclear(&utv); 526 if ((error = copyout(&utv, tvp, sizeof(utv)))) 527 goto select_out; 528 bsa.tv = tvp; 529 } 530 microtime(&tv0); 531 } 532 533 error = select(td, &bsa); 534 #ifdef DEBUG 535 if (ldebug(select)) 536 printf(LMSG("real select returns %d"), error); 537 #endif 538 if (error) { 539 /* 540 * See fs/select.c in the Linux kernel. Without this, 541 * Maelstrom doesn't work. 542 */ 543 if (error == ERESTART) 544 error = EINTR; 545 goto select_out; 546 } 547 548 if (args->timeout) { 549 if (td->td_retval[0]) { 550 /* 551 * Compute how much time was left of the timeout, 552 * by subtracting the current time and the time 553 * before we started the call, and subtracting 554 * that result from the user-supplied value. 555 */ 556 microtime(&tv1); 557 timevalsub(&tv1, &tv0); 558 timevalsub(&utv, &tv1); 559 if (utv.tv_sec < 0) 560 timevalclear(&utv); 561 } else 562 timevalclear(&utv); 563 #ifdef DEBUG 564 if (ldebug(select)) 565 printf(LMSG("outgoing timeout (%ld/%ld)"), 566 utv.tv_sec, utv.tv_usec); 567 #endif 568 if ((error = copyout(&utv, (caddr_t)args->timeout, 569 sizeof(utv)))) 570 goto select_out; 571 } 572 573 select_out: 574 #ifdef DEBUG 575 if (ldebug(select)) 576 printf(LMSG("select_out -> %d"), error); 577 #endif 578 return error; 579 } 580 581 int 582 linux_mremap(struct thread *td, struct linux_mremap_args *args) 583 { 584 struct munmap_args /* { 585 void *addr; 586 size_t len; 587 } */ bsd_args; 588 int error = 0; 589 590 #ifdef DEBUG 591 if (ldebug(mremap)) 592 printf(ARGS(mremap, "%p, %08lx, %08lx, %08lx"), 593 (void *)args->addr, 594 (unsigned long)args->old_len, 595 (unsigned long)args->new_len, 596 (unsigned long)args->flags); 597 #endif 598 args->new_len = round_page(args->new_len); 599 args->old_len = round_page(args->old_len); 600 601 if (args->new_len > args->old_len) { 602 td->td_retval[0] = 0; 603 return ENOMEM; 604 } 605 606 if (args->new_len < args->old_len) { 607 bsd_args.addr = (caddr_t)(args->addr + args->new_len); 608 bsd_args.len = args->old_len - args->new_len; 609 error = munmap(td, &bsd_args); 610 } 611 612 td->td_retval[0] = error ? 0 : (u_long)args->addr; 613 return error; 614 } 615 616 int 617 linux_msync(struct thread *td, struct linux_msync_args *args) 618 { 619 struct msync_args bsd_args; 620 621 bsd_args.addr = (caddr_t)args->addr; 622 bsd_args.len = args->len; 623 bsd_args.flags = 0; /* XXX ignore */ 624 625 return msync(td, &bsd_args); 626 } 627 628 #ifndef __alpha__ 629 int 630 linux_time(struct thread *td, struct linux_time_args *args) 631 { 632 struct timeval tv; 633 l_time_t tm; 634 int error; 635 636 #ifdef DEBUG 637 if (ldebug(time)) 638 printf(ARGS(time, "*")); 639 #endif 640 641 microtime(&tv); 642 tm = tv.tv_sec; 643 if (args->tm && (error = copyout(&tm, (caddr_t)args->tm, sizeof(tm)))) 644 return error; 645 td->td_retval[0] = tm; 646 return 0; 647 } 648 #endif /*!__alpha__*/ 649 650 struct l_times_argv { 651 l_long tms_utime; 652 l_long tms_stime; 653 l_long tms_cutime; 654 l_long tms_cstime; 655 }; 656 657 #ifdef __alpha__ 658 #define CLK_TCK 1024 /* Linux uses 1024 on alpha */ 659 #else 660 #define CLK_TCK 100 /* Linux uses 100 */ 661 #endif 662 663 #define CONVTCK(r) (r.tv_sec * CLK_TCK + r.tv_usec / (1000000 / CLK_TCK)) 664 665 int 666 linux_times(struct thread *td, struct linux_times_args *args) 667 { 668 struct timeval tv; 669 struct l_times_argv tms; 670 struct rusage ru; 671 int error; 672 673 #ifdef DEBUG 674 if (ldebug(times)) 675 printf(ARGS(times, "*")); 676 #endif 677 678 mtx_lock_spin(&sched_lock); 679 calcru(td->td_proc, &ru.ru_utime, &ru.ru_stime, NULL); 680 mtx_unlock_spin(&sched_lock); 681 682 tms.tms_utime = CONVTCK(ru.ru_utime); 683 tms.tms_stime = CONVTCK(ru.ru_stime); 684 685 tms.tms_cutime = CONVTCK(td->td_proc->p_stats->p_cru.ru_utime); 686 tms.tms_cstime = CONVTCK(td->td_proc->p_stats->p_cru.ru_stime); 687 688 if ((error = copyout(&tms, (caddr_t)args->buf, sizeof(tms)))) 689 return error; 690 691 microuptime(&tv); 692 td->td_retval[0] = (int)CONVTCK(tv); 693 return 0; 694 } 695 696 int 697 linux_newuname(struct thread *td, struct linux_newuname_args *args) 698 { 699 struct l_new_utsname utsname; 700 char osname[LINUX_MAX_UTSNAME]; 701 char osrelease[LINUX_MAX_UTSNAME]; 702 703 #ifdef DEBUG 704 if (ldebug(newuname)) 705 printf(ARGS(newuname, "*")); 706 #endif 707 708 linux_get_osname(td->td_proc, osname); 709 linux_get_osrelease(td->td_proc, osrelease); 710 711 bzero(&utsname, sizeof(utsname)); 712 strncpy(utsname.sysname, osname, LINUX_MAX_UTSNAME-1); 713 getcredhostname(td->td_ucred, utsname.nodename, LINUX_MAX_UTSNAME-1); 714 strncpy(utsname.release, osrelease, LINUX_MAX_UTSNAME-1); 715 strncpy(utsname.version, version, LINUX_MAX_UTSNAME-1); 716 strncpy(utsname.machine, machine, LINUX_MAX_UTSNAME-1); 717 strncpy(utsname.domainname, domainname, LINUX_MAX_UTSNAME-1); 718 719 return (copyout(&utsname, (caddr_t)args->buf, sizeof(utsname))); 720 } 721 722 #if defined(__i386__) 723 struct l_utimbuf { 724 l_time_t l_actime; 725 l_time_t l_modtime; 726 }; 727 728 int 729 linux_utime(struct thread *td, struct linux_utime_args *args) 730 { 731 struct utimes_args /* { 732 char *path; 733 struct timeval *tptr; 734 } */ bsdutimes; 735 struct timeval tv[2], *tvp; 736 struct l_utimbuf lut; 737 int error; 738 caddr_t sg; 739 740 sg = stackgap_init(); 741 CHECKALTEXIST(td, &sg, args->fname); 742 743 #ifdef DEBUG 744 if (ldebug(utime)) 745 printf(ARGS(utime, "%s, *"), args->fname); 746 #endif 747 748 if (args->times) { 749 if ((error = copyin((caddr_t)args->times, &lut, sizeof lut))) 750 return error; 751 tv[0].tv_sec = lut.l_actime; 752 tv[0].tv_usec = 0; 753 tv[1].tv_sec = lut.l_modtime; 754 tv[1].tv_usec = 0; 755 /* so that utimes can copyin */ 756 tvp = (struct timeval *)stackgap_alloc(&sg, sizeof(tv)); 757 if (tvp == NULL) 758 return (ENAMETOOLONG); 759 if ((error = copyout(tv, tvp, sizeof(tv)))) 760 return error; 761 bsdutimes.tptr = tvp; 762 } else 763 bsdutimes.tptr = NULL; 764 765 bsdutimes.path = args->fname; 766 return utimes(td, &bsdutimes); 767 } 768 #endif /* __i386__ */ 769 770 #define __WCLONE 0x80000000 771 772 #ifndef __alpha__ 773 int 774 linux_waitpid(struct thread *td, struct linux_waitpid_args *args) 775 { 776 struct wait_args /* { 777 int pid; 778 int *status; 779 int options; 780 struct rusage *rusage; 781 } */ tmp; 782 int error, tmpstat; 783 784 #ifdef DEBUG 785 if (ldebug(waitpid)) 786 printf(ARGS(waitpid, "%d, %p, %d"), 787 args->pid, (void *)args->status, args->options); 788 #endif 789 790 tmp.pid = args->pid; 791 tmp.status = args->status; 792 tmp.options = (args->options & (WNOHANG | WUNTRACED)); 793 /* WLINUXCLONE should be equal to __WCLONE, but we make sure */ 794 if (args->options & __WCLONE) 795 tmp.options |= WLINUXCLONE; 796 tmp.rusage = NULL; 797 798 if ((error = wait4(td, &tmp)) != 0) 799 return error; 800 801 if (args->status) { 802 if ((error = copyin((caddr_t)args->status, &tmpstat, 803 sizeof(int))) != 0) 804 return error; 805 tmpstat &= 0xffff; 806 if (WIFSIGNALED(tmpstat)) 807 tmpstat = (tmpstat & 0xffffff80) | 808 BSD_TO_LINUX_SIGNAL(WTERMSIG(tmpstat)); 809 else if (WIFSTOPPED(tmpstat)) 810 tmpstat = (tmpstat & 0xffff00ff) | 811 (BSD_TO_LINUX_SIGNAL(WSTOPSIG(tmpstat)) << 8); 812 return copyout(&tmpstat, (caddr_t)args->status, sizeof(int)); 813 } 814 815 return 0; 816 } 817 #endif /*!__alpha__*/ 818 819 int 820 linux_wait4(struct thread *td, struct linux_wait4_args *args) 821 { 822 struct wait_args /* { 823 int pid; 824 int *status; 825 int options; 826 struct rusage *rusage; 827 } */ tmp; 828 int error, tmpstat; 829 830 #ifdef DEBUG 831 if (ldebug(wait4)) 832 printf(ARGS(wait4, "%d, %p, %d, %p"), 833 args->pid, (void *)args->status, args->options, 834 (void *)args->rusage); 835 #endif 836 837 tmp.pid = args->pid; 838 tmp.status = args->status; 839 tmp.options = (args->options & (WNOHANG | WUNTRACED)); 840 /* WLINUXCLONE should be equal to __WCLONE, but we make sure */ 841 if (args->options & __WCLONE) 842 tmp.options |= WLINUXCLONE; 843 tmp.rusage = (struct rusage *)args->rusage; 844 845 if ((error = wait4(td, &tmp)) != 0) 846 return error; 847 848 SIGDELSET(td->td_proc->p_siglist, SIGCHLD); 849 850 if (args->status) { 851 if ((error = copyin((caddr_t)args->status, &tmpstat, 852 sizeof(int))) != 0) 853 return error; 854 tmpstat &= 0xffff; 855 if (WIFSIGNALED(tmpstat)) 856 tmpstat = (tmpstat & 0xffffff80) | 857 BSD_TO_LINUX_SIGNAL(WTERMSIG(tmpstat)); 858 else if (WIFSTOPPED(tmpstat)) 859 tmpstat = (tmpstat & 0xffff00ff) | 860 (BSD_TO_LINUX_SIGNAL(WSTOPSIG(tmpstat)) << 8); 861 return copyout(&tmpstat, (caddr_t)args->status, sizeof(int)); 862 } 863 864 return 0; 865 } 866 867 int 868 linux_mknod(struct thread *td, struct linux_mknod_args *args) 869 { 870 caddr_t sg; 871 struct mknod_args bsd_mknod; 872 struct mkfifo_args bsd_mkfifo; 873 874 sg = stackgap_init(); 875 876 CHECKALTCREAT(td, &sg, args->path); 877 878 #ifdef DEBUG 879 if (ldebug(mknod)) 880 printf(ARGS(mknod, "%s, %d, %d"), 881 args->path, args->mode, args->dev); 882 #endif 883 884 if (args->mode & S_IFIFO) { 885 bsd_mkfifo.path = args->path; 886 bsd_mkfifo.mode = args->mode; 887 return mkfifo(td, &bsd_mkfifo); 888 } else { 889 bsd_mknod.path = args->path; 890 bsd_mknod.mode = args->mode; 891 bsd_mknod.dev = args->dev; 892 return mknod(td, &bsd_mknod); 893 } 894 } 895 896 /* 897 * UGH! This is just about the dumbest idea I've ever heard!! 898 */ 899 int 900 linux_personality(struct thread *td, struct linux_personality_args *args) 901 { 902 #ifdef DEBUG 903 if (ldebug(personality)) 904 printf(ARGS(personality, "%d"), args->per); 905 #endif 906 #ifndef __alpha__ 907 if (args->per != 0) 908 return EINVAL; 909 #endif 910 911 /* Yes Jim, it's still a Linux... */ 912 td->td_retval[0] = 0; 913 return 0; 914 } 915 916 /* 917 * Wrappers for get/setitimer for debugging.. 918 */ 919 int 920 linux_setitimer(struct thread *td, struct linux_setitimer_args *args) 921 { 922 struct setitimer_args bsa; 923 struct itimerval foo; 924 int error; 925 926 #ifdef DEBUG 927 if (ldebug(setitimer)) 928 printf(ARGS(setitimer, "%p, %p"), 929 (void *)args->itv, (void *)args->oitv); 930 #endif 931 bsa.which = args->which; 932 bsa.itv = (struct itimerval *)args->itv; 933 bsa.oitv = (struct itimerval *)args->oitv; 934 if (args->itv) { 935 if ((error = copyin((caddr_t)args->itv, &foo, sizeof(foo)))) 936 return error; 937 #ifdef DEBUG 938 if (ldebug(setitimer)) { 939 printf("setitimer: value: sec: %ld, usec: %ld\n", 940 foo.it_value.tv_sec, foo.it_value.tv_usec); 941 printf("setitimer: interval: sec: %ld, usec: %ld\n", 942 foo.it_interval.tv_sec, foo.it_interval.tv_usec); 943 } 944 #endif 945 } 946 return setitimer(td, &bsa); 947 } 948 949 int 950 linux_getitimer(struct thread *td, struct linux_getitimer_args *args) 951 { 952 struct getitimer_args bsa; 953 #ifdef DEBUG 954 if (ldebug(getitimer)) 955 printf(ARGS(getitimer, "%p"), (void *)args->itv); 956 #endif 957 bsa.which = args->which; 958 bsa.itv = (struct itimerval *)args->itv; 959 return getitimer(td, &bsa); 960 } 961 962 #ifndef __alpha__ 963 int 964 linux_nice(struct thread *td, struct linux_nice_args *args) 965 { 966 struct setpriority_args bsd_args; 967 968 bsd_args.which = PRIO_PROCESS; 969 bsd_args.who = 0; /* current process */ 970 bsd_args.prio = args->inc; 971 return setpriority(td, &bsd_args); 972 } 973 #endif /*!__alpha__*/ 974 975 int 976 linux_setgroups(struct thread *td, struct linux_setgroups_args *args) 977 { 978 struct ucred *newcred, *oldcred; 979 l_gid_t linux_gidset[NGROUPS]; 980 gid_t *bsd_gidset; 981 int ngrp, error; 982 struct proc *p; 983 984 ngrp = args->gidsetsize; 985 if (ngrp >= NGROUPS) 986 return (EINVAL); 987 error = copyin((caddr_t)args->grouplist, linux_gidset, 988 ngrp * sizeof(l_gid_t)); 989 if (error) 990 return (error); 991 newcred = crget(); 992 p = td->td_proc; 993 PROC_LOCK(p); 994 oldcred = p->p_ucred; 995 996 /* 997 * cr_groups[0] holds egid. Setting the whole set from 998 * the supplied set will cause egid to be changed too. 999 * Keep cr_groups[0] unchanged to prevent that. 1000 */ 1001 1002 if ((error = suser_cred(oldcred, PRISON_ROOT)) != 0) { 1003 PROC_UNLOCK(p); 1004 crfree(newcred); 1005 return (error); 1006 } 1007 1008 crcopy(newcred, oldcred); 1009 if (ngrp > 0) { 1010 newcred->cr_ngroups = ngrp + 1; 1011 1012 bsd_gidset = newcred->cr_groups; 1013 ngrp--; 1014 while (ngrp >= 0) { 1015 bsd_gidset[ngrp + 1] = linux_gidset[ngrp]; 1016 ngrp--; 1017 } 1018 } 1019 else 1020 newcred->cr_ngroups = 1; 1021 1022 setsugid(p); 1023 p->p_ucred = newcred; 1024 PROC_UNLOCK(p); 1025 crfree(oldcred); 1026 return (0); 1027 } 1028 1029 int 1030 linux_getgroups(struct thread *td, struct linux_getgroups_args *args) 1031 { 1032 struct ucred *cred; 1033 l_gid_t linux_gidset[NGROUPS]; 1034 gid_t *bsd_gidset; 1035 int bsd_gidsetsz, ngrp, error; 1036 1037 cred = td->td_ucred; 1038 bsd_gidset = cred->cr_groups; 1039 bsd_gidsetsz = cred->cr_ngroups - 1; 1040 1041 /* 1042 * cr_groups[0] holds egid. Returning the whole set 1043 * here will cause a duplicate. Exclude cr_groups[0] 1044 * to prevent that. 1045 */ 1046 1047 if ((ngrp = args->gidsetsize) == 0) { 1048 td->td_retval[0] = bsd_gidsetsz; 1049 return (0); 1050 } 1051 1052 if (ngrp < bsd_gidsetsz) 1053 return (EINVAL); 1054 1055 ngrp = 0; 1056 while (ngrp < bsd_gidsetsz) { 1057 linux_gidset[ngrp] = bsd_gidset[ngrp + 1]; 1058 ngrp++; 1059 } 1060 1061 if ((error = copyout(linux_gidset, (caddr_t)args->grouplist, 1062 ngrp * sizeof(l_gid_t)))) 1063 return (error); 1064 1065 td->td_retval[0] = ngrp; 1066 return (0); 1067 } 1068 1069 #ifndef __alpha__ 1070 int 1071 linux_setrlimit(struct thread *td, struct linux_setrlimit_args *args) 1072 { 1073 struct __setrlimit_args bsd; 1074 struct l_rlimit rlim; 1075 int error; 1076 caddr_t sg = stackgap_init(); 1077 1078 #ifdef DEBUG 1079 if (ldebug(setrlimit)) 1080 printf(ARGS(setrlimit, "%d, %p"), 1081 args->resource, (void *)args->rlim); 1082 #endif 1083 1084 if (args->resource >= LINUX_RLIM_NLIMITS) 1085 return (EINVAL); 1086 1087 bsd.which = linux_to_bsd_resource[args->resource]; 1088 if (bsd.which == -1) 1089 return (EINVAL); 1090 1091 error = copyin((caddr_t)args->rlim, &rlim, sizeof(rlim)); 1092 if (error) 1093 return (error); 1094 1095 bsd.rlp = stackgap_alloc(&sg, sizeof(struct rlimit)); 1096 bsd.rlp->rlim_cur = (rlim_t)rlim.rlim_cur; 1097 bsd.rlp->rlim_max = (rlim_t)rlim.rlim_max; 1098 return (setrlimit(td, &bsd)); 1099 } 1100 1101 int 1102 linux_old_getrlimit(struct thread *td, struct linux_old_getrlimit_args *args) 1103 { 1104 struct __getrlimit_args bsd; 1105 struct l_rlimit rlim; 1106 int error; 1107 caddr_t sg = stackgap_init(); 1108 1109 #ifdef DEBUG 1110 if (ldebug(old_getrlimit)) 1111 printf(ARGS(old_getrlimit, "%d, %p"), 1112 args->resource, (void *)args->rlim); 1113 #endif 1114 1115 if (args->resource >= LINUX_RLIM_NLIMITS) 1116 return (EINVAL); 1117 1118 bsd.which = linux_to_bsd_resource[args->resource]; 1119 if (bsd.which == -1) 1120 return (EINVAL); 1121 1122 bsd.rlp = stackgap_alloc(&sg, sizeof(struct rlimit)); 1123 error = getrlimit(td, &bsd); 1124 if (error) 1125 return (error); 1126 1127 rlim.rlim_cur = (unsigned long)bsd.rlp->rlim_cur; 1128 if (rlim.rlim_cur == ULONG_MAX) 1129 rlim.rlim_cur = LONG_MAX; 1130 rlim.rlim_max = (unsigned long)bsd.rlp->rlim_max; 1131 if (rlim.rlim_max == ULONG_MAX) 1132 rlim.rlim_max = LONG_MAX; 1133 return (copyout(&rlim, (caddr_t)args->rlim, sizeof(rlim))); 1134 } 1135 1136 int 1137 linux_getrlimit(struct thread *td, struct linux_getrlimit_args *args) 1138 { 1139 struct __getrlimit_args bsd; 1140 struct l_rlimit rlim; 1141 int error; 1142 caddr_t sg = stackgap_init(); 1143 1144 #ifdef DEBUG 1145 if (ldebug(getrlimit)) 1146 printf(ARGS(getrlimit, "%d, %p"), 1147 args->resource, (void *)args->rlim); 1148 #endif 1149 1150 if (args->resource >= LINUX_RLIM_NLIMITS) 1151 return (EINVAL); 1152 1153 bsd.which = linux_to_bsd_resource[args->resource]; 1154 if (bsd.which == -1) 1155 return (EINVAL); 1156 1157 bsd.rlp = stackgap_alloc(&sg, sizeof(struct rlimit)); 1158 error = getrlimit(td, &bsd); 1159 if (error) 1160 return (error); 1161 1162 rlim.rlim_cur = (l_ulong)bsd.rlp->rlim_cur; 1163 rlim.rlim_max = (l_ulong)bsd.rlp->rlim_max; 1164 return (copyout(&rlim, (caddr_t)args->rlim, sizeof(rlim))); 1165 } 1166 #endif /*!__alpha__*/ 1167 1168 int 1169 linux_sched_setscheduler(struct thread *td, 1170 struct linux_sched_setscheduler_args *args) 1171 { 1172 struct sched_setscheduler_args bsd; 1173 1174 #ifdef DEBUG 1175 if (ldebug(sched_setscheduler)) 1176 printf(ARGS(sched_setscheduler, "%d, %d, %p"), 1177 args->pid, args->policy, (const void *)args->param); 1178 #endif 1179 1180 switch (args->policy) { 1181 case LINUX_SCHED_OTHER: 1182 bsd.policy = SCHED_OTHER; 1183 break; 1184 case LINUX_SCHED_FIFO: 1185 bsd.policy = SCHED_FIFO; 1186 break; 1187 case LINUX_SCHED_RR: 1188 bsd.policy = SCHED_RR; 1189 break; 1190 default: 1191 return EINVAL; 1192 } 1193 1194 bsd.pid = args->pid; 1195 bsd.param = (struct sched_param *)args->param; 1196 return sched_setscheduler(td, &bsd); 1197 } 1198 1199 int 1200 linux_sched_getscheduler(struct thread *td, 1201 struct linux_sched_getscheduler_args *args) 1202 { 1203 struct sched_getscheduler_args bsd; 1204 int error; 1205 1206 #ifdef DEBUG 1207 if (ldebug(sched_getscheduler)) 1208 printf(ARGS(sched_getscheduler, "%d"), args->pid); 1209 #endif 1210 1211 bsd.pid = args->pid; 1212 error = sched_getscheduler(td, &bsd); 1213 1214 switch (td->td_retval[0]) { 1215 case SCHED_OTHER: 1216 td->td_retval[0] = LINUX_SCHED_OTHER; 1217 break; 1218 case SCHED_FIFO: 1219 td->td_retval[0] = LINUX_SCHED_FIFO; 1220 break; 1221 case SCHED_RR: 1222 td->td_retval[0] = LINUX_SCHED_RR; 1223 break; 1224 } 1225 1226 return error; 1227 } 1228 1229 int 1230 linux_sched_get_priority_max(struct thread *td, 1231 struct linux_sched_get_priority_max_args *args) 1232 { 1233 struct sched_get_priority_max_args bsd; 1234 1235 #ifdef DEBUG 1236 if (ldebug(sched_get_priority_max)) 1237 printf(ARGS(sched_get_priority_max, "%d"), args->policy); 1238 #endif 1239 1240 switch (args->policy) { 1241 case LINUX_SCHED_OTHER: 1242 bsd.policy = SCHED_OTHER; 1243 break; 1244 case LINUX_SCHED_FIFO: 1245 bsd.policy = SCHED_FIFO; 1246 break; 1247 case LINUX_SCHED_RR: 1248 bsd.policy = SCHED_RR; 1249 break; 1250 default: 1251 return EINVAL; 1252 } 1253 return sched_get_priority_max(td, &bsd); 1254 } 1255 1256 int 1257 linux_sched_get_priority_min(struct thread *td, 1258 struct linux_sched_get_priority_min_args *args) 1259 { 1260 struct sched_get_priority_min_args bsd; 1261 1262 #ifdef DEBUG 1263 if (ldebug(sched_get_priority_min)) 1264 printf(ARGS(sched_get_priority_min, "%d"), args->policy); 1265 #endif 1266 1267 switch (args->policy) { 1268 case LINUX_SCHED_OTHER: 1269 bsd.policy = SCHED_OTHER; 1270 break; 1271 case LINUX_SCHED_FIFO: 1272 bsd.policy = SCHED_FIFO; 1273 break; 1274 case LINUX_SCHED_RR: 1275 bsd.policy = SCHED_RR; 1276 break; 1277 default: 1278 return EINVAL; 1279 } 1280 return sched_get_priority_min(td, &bsd); 1281 } 1282 1283 #define REBOOT_CAD_ON 0x89abcdef 1284 #define REBOOT_CAD_OFF 0 1285 #define REBOOT_HALT 0xcdef0123 1286 1287 int 1288 linux_reboot(struct thread *td, struct linux_reboot_args *args) 1289 { 1290 struct reboot_args bsd_args; 1291 1292 #ifdef DEBUG 1293 if (ldebug(reboot)) 1294 printf(ARGS(reboot, "0x%x"), args->cmd); 1295 #endif 1296 if (args->cmd == REBOOT_CAD_ON || args->cmd == REBOOT_CAD_OFF) 1297 return (0); 1298 bsd_args.opt = (args->cmd == REBOOT_HALT) ? RB_HALT : 0; 1299 return (reboot(td, &bsd_args)); 1300 } 1301 1302 #ifndef __alpha__ 1303 1304 /* 1305 * The FreeBSD native getpid(2), getgid(2) and getuid(2) also modify 1306 * td->td_retval[1] when COMPAT_43 or COMPAT_SUNOS is defined. This 1307 * globbers registers that are assumed to be preserved. The following 1308 * lightweight syscalls fixes this. See also linux_getgid16() and 1309 * linux_getuid16() in linux_uid16.c. 1310 * 1311 * linux_getpid() - MP SAFE 1312 * linux_getgid() - MP SAFE 1313 * linux_getuid() - MP SAFE 1314 */ 1315 1316 int 1317 linux_getpid(struct thread *td, struct linux_getpid_args *args) 1318 { 1319 1320 td->td_retval[0] = td->td_proc->p_pid; 1321 return (0); 1322 } 1323 1324 int 1325 linux_getgid(struct thread *td, struct linux_getgid_args *args) 1326 { 1327 1328 td->td_retval[0] = td->td_ucred->cr_rgid; 1329 return (0); 1330 } 1331 1332 int 1333 linux_getuid(struct thread *td, struct linux_getuid_args *args) 1334 { 1335 1336 td->td_retval[0] = td->td_ucred->cr_ruid; 1337 return (0); 1338 } 1339 1340 #endif /*!__alpha__*/ 1341 1342 int 1343 linux_getsid(struct thread *td, struct linux_getsid_args *args) 1344 { 1345 struct getsid_args bsd; 1346 bsd.pid = args->pid; 1347 return getsid(td, &bsd); 1348 } 1349