1 /*- 2 * Copyright (c) 2002 Doug Rabson 3 * Copyright (c) 1994-1995 S�ren Schmidt 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer 11 * in this position and unchanged. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of the author may not be used to endorse or promote products 16 * derived from this software without specific prior written permission 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_mac.h" 34 35 #include <sys/param.h> 36 #include <sys/blist.h> 37 #include <sys/fcntl.h> 38 #if defined(__i386__) || defined(__alpha__) 39 #include <sys/imgact_aout.h> 40 #endif 41 #include <sys/jail.h> 42 #include <sys/kernel.h> 43 #include <sys/limits.h> 44 #include <sys/lock.h> 45 #include <sys/mac.h> 46 #include <sys/malloc.h> 47 #include <sys/mman.h> 48 #include <sys/mount.h> 49 #include <sys/mutex.h> 50 #include <sys/namei.h> 51 #include <sys/proc.h> 52 #include <sys/reboot.h> 53 #include <sys/resourcevar.h> 54 #include <sys/signalvar.h> 55 #include <sys/stat.h> 56 #include <sys/syscallsubr.h> 57 #include <sys/sysctl.h> 58 #include <sys/sysproto.h> 59 #include <sys/systm.h> 60 #include <sys/time.h> 61 #include <sys/vmmeter.h> 62 #include <sys/vnode.h> 63 #include <sys/wait.h> 64 65 #include <vm/vm.h> 66 #include <vm/pmap.h> 67 #include <vm/vm_kern.h> 68 #include <vm/vm_map.h> 69 #include <vm/vm_extern.h> 70 #include <vm/vm_object.h> 71 #include <vm/swap_pager.h> 72 73 #include <posix4/sched.h> 74 75 #include "opt_compat.h" 76 77 #ifdef COMPAT_LINUX32 78 #include <machine/../linux32/linux.h> 79 #include <machine/../linux32/linux32_proto.h> 80 #else 81 #include <machine/../linux/linux.h> 82 #include <machine/../linux/linux_proto.h> 83 #endif 84 85 #include <compat/linux/linux_mib.h> 86 #include <compat/linux/linux_util.h> 87 88 #ifdef __i386__ 89 #include <machine/cputypes.h> 90 #endif 91 92 #ifdef __alpha__ 93 #define BSD_TO_LINUX_SIGNAL(sig) (sig) 94 #else 95 #define BSD_TO_LINUX_SIGNAL(sig) \ 96 (((sig) <= LINUX_SIGTBLSZ) ? bsd_to_linux_signal[_SIG_IDX(sig)] : sig) 97 #endif 98 99 #ifndef __alpha__ 100 static unsigned int linux_to_bsd_resource[LINUX_RLIM_NLIMITS] = { 101 RLIMIT_CPU, RLIMIT_FSIZE, RLIMIT_DATA, RLIMIT_STACK, 102 RLIMIT_CORE, RLIMIT_RSS, RLIMIT_NPROC, RLIMIT_NOFILE, 103 RLIMIT_MEMLOCK, -1 104 }; 105 #endif /*!__alpha__*/ 106 107 struct l_sysinfo { 108 l_long uptime; /* Seconds since boot */ 109 l_ulong loads[3]; /* 1, 5, and 15 minute load averages */ 110 #define LINUX_SYSINFO_LOADS_SCALE 65536 111 l_ulong totalram; /* Total usable main memory size */ 112 l_ulong freeram; /* Available memory size */ 113 l_ulong sharedram; /* Amount of shared memory */ 114 l_ulong bufferram; /* Memory used by buffers */ 115 l_ulong totalswap; /* Total swap space size */ 116 l_ulong freeswap; /* swap space still available */ 117 l_ushort procs; /* Number of current processes */ 118 l_ulong totalbig; 119 l_ulong freebig; 120 l_uint mem_unit; 121 char _f[6]; /* Pads structure to 64 bytes */ 122 }; 123 #ifndef __alpha__ 124 int 125 linux_sysinfo(struct thread *td, struct linux_sysinfo_args *args) 126 { 127 struct l_sysinfo sysinfo; 128 vm_object_t object; 129 int i, j; 130 struct timespec ts; 131 132 /* Uptime is copied out of print_uptime() in kern_shutdown.c */ 133 getnanouptime(&ts); 134 i = 0; 135 if (ts.tv_sec >= 86400) { 136 ts.tv_sec %= 86400; 137 i = 1; 138 } 139 if (i || ts.tv_sec >= 3600) { 140 ts.tv_sec %= 3600; 141 i = 1; 142 } 143 if (i || ts.tv_sec >= 60) { 144 ts.tv_sec %= 60; 145 i = 1; 146 } 147 sysinfo.uptime=ts.tv_sec; 148 149 /* Use the information from the mib to get our load averages */ 150 for (i = 0; i < 3; i++) 151 sysinfo.loads[i] = averunnable.ldavg[i] * 152 LINUX_SYSINFO_LOADS_SCALE / averunnable.fscale; 153 154 sysinfo.totalram = physmem * PAGE_SIZE; 155 sysinfo.freeram = sysinfo.totalram - cnt.v_wire_count * PAGE_SIZE; 156 157 sysinfo.sharedram = 0; 158 mtx_lock(&vm_object_list_mtx); 159 TAILQ_FOREACH(object, &vm_object_list, object_list) 160 if (object->shadow_count > 1) 161 sysinfo.sharedram += object->resident_page_count; 162 mtx_unlock(&vm_object_list_mtx); 163 164 sysinfo.sharedram *= PAGE_SIZE; 165 sysinfo.bufferram = 0; 166 167 swap_pager_status(&i, &j); 168 sysinfo.totalswap= i * PAGE_SIZE; 169 sysinfo.freeswap = (i - j) * PAGE_SIZE; 170 171 sysinfo.procs = nprocs; 172 173 /* The following are only present in newer Linux kernels. */ 174 sysinfo.totalbig = 0; 175 sysinfo.freebig = 0; 176 sysinfo.mem_unit = 1; 177 178 return copyout(&sysinfo, args->info, sizeof(sysinfo)); 179 } 180 #endif /*!__alpha__*/ 181 182 #ifndef __alpha__ 183 int 184 linux_alarm(struct thread *td, struct linux_alarm_args *args) 185 { 186 struct itimerval it, old_it; 187 int error; 188 189 #ifdef DEBUG 190 if (ldebug(alarm)) 191 printf(ARGS(alarm, "%u"), args->secs); 192 #endif 193 194 if (args->secs > 100000000) 195 return (EINVAL); 196 197 it.it_value.tv_sec = (long)args->secs; 198 it.it_value.tv_usec = 0; 199 it.it_interval.tv_sec = 0; 200 it.it_interval.tv_usec = 0; 201 error = kern_setitimer(td, ITIMER_REAL, &it, &old_it); 202 if (error) 203 return (error); 204 if (timevalisset(&old_it.it_value)) { 205 if (old_it.it_value.tv_usec != 0) 206 old_it.it_value.tv_sec++; 207 td->td_retval[0] = old_it.it_value.tv_sec; 208 } 209 return (0); 210 } 211 #endif /*!__alpha__*/ 212 213 int 214 linux_brk(struct thread *td, struct linux_brk_args *args) 215 { 216 struct vmspace *vm = td->td_proc->p_vmspace; 217 vm_offset_t new, old; 218 struct obreak_args /* { 219 char * nsize; 220 } */ tmp; 221 222 #ifdef DEBUG 223 if (ldebug(brk)) 224 printf(ARGS(brk, "%p"), (void *)(uintptr_t)args->dsend); 225 #endif 226 old = (vm_offset_t)vm->vm_daddr + ctob(vm->vm_dsize); 227 new = (vm_offset_t)args->dsend; 228 tmp.nsize = (char *) new; 229 if (((caddr_t)new > vm->vm_daddr) && !obreak(td, &tmp)) 230 td->td_retval[0] = (long)new; 231 else 232 td->td_retval[0] = (long)old; 233 234 return 0; 235 } 236 237 #if defined(__i386__) || defined(__alpha__) 238 239 int 240 linux_uselib(struct thread *td, struct linux_uselib_args *args) 241 { 242 struct nameidata ni; 243 struct vnode *vp; 244 struct exec *a_out; 245 struct vattr attr; 246 vm_offset_t vmaddr; 247 unsigned long file_offset; 248 vm_offset_t buffer; 249 unsigned long bss_size; 250 char *library; 251 int error; 252 int locked; 253 254 LCONVPATHEXIST(td, args->library, &library); 255 256 #ifdef DEBUG 257 if (ldebug(uselib)) 258 printf(ARGS(uselib, "%s"), library); 259 #endif 260 261 a_out = NULL; 262 locked = 0; 263 vp = NULL; 264 265 /* 266 * XXX: This code should make use of vn_open(), rather than doing 267 * all this stuff itself. 268 */ 269 NDINIT(&ni, LOOKUP, FOLLOW|LOCKLEAF, UIO_SYSSPACE, library, td); 270 error = namei(&ni); 271 LFREEPATH(library); 272 if (error) 273 goto cleanup; 274 275 vp = ni.ni_vp; 276 /* 277 * XXX - This looks like a bogus check. A LOCKLEAF namei should not 278 * succeed without returning a vnode. 279 */ 280 if (vp == NULL) { 281 error = ENOEXEC; /* ?? */ 282 goto cleanup; 283 } 284 NDFREE(&ni, NDF_ONLY_PNBUF); 285 286 /* 287 * From here on down, we have a locked vnode that must be unlocked. 288 */ 289 locked++; 290 291 /* Writable? */ 292 if (vp->v_writecount) { 293 error = ETXTBSY; 294 goto cleanup; 295 } 296 297 /* Executable? */ 298 error = VOP_GETATTR(vp, &attr, td->td_ucred, td); 299 if (error) 300 goto cleanup; 301 302 if ((vp->v_mount->mnt_flag & MNT_NOEXEC) || 303 ((attr.va_mode & 0111) == 0) || (attr.va_type != VREG)) { 304 error = ENOEXEC; 305 goto cleanup; 306 } 307 308 /* Sensible size? */ 309 if (attr.va_size == 0) { 310 error = ENOEXEC; 311 goto cleanup; 312 } 313 314 /* Can we access it? */ 315 error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td); 316 if (error) 317 goto cleanup; 318 319 /* 320 * XXX: This should use vn_open() so that it is properly authorized, 321 * and to reduce code redundancy all over the place here. 322 */ 323 #ifdef MAC 324 error = mac_check_vnode_open(td->td_ucred, vp, FREAD); 325 if (error) 326 goto cleanup; 327 #endif 328 error = VOP_OPEN(vp, FREAD, td->td_ucred, td, -1); 329 if (error) 330 goto cleanup; 331 332 /* Pull in executable header into kernel_map */ 333 error = vm_mmap(kernel_map, (vm_offset_t *)&a_out, PAGE_SIZE, 334 VM_PROT_READ, VM_PROT_READ, 0, (caddr_t)vp, 0); 335 /* 336 * Lock no longer needed 337 */ 338 locked = 0; 339 VOP_UNLOCK(vp, 0, td); 340 341 if (error) 342 goto cleanup; 343 344 /* Is it a Linux binary ? */ 345 if (((a_out->a_magic >> 16) & 0xff) != 0x64) { 346 error = ENOEXEC; 347 goto cleanup; 348 } 349 350 /* 351 * While we are here, we should REALLY do some more checks 352 */ 353 354 /* Set file/virtual offset based on a.out variant. */ 355 switch ((int)(a_out->a_magic & 0xffff)) { 356 case 0413: /* ZMAGIC */ 357 file_offset = 1024; 358 break; 359 case 0314: /* QMAGIC */ 360 file_offset = 0; 361 break; 362 default: 363 error = ENOEXEC; 364 goto cleanup; 365 } 366 367 bss_size = round_page(a_out->a_bss); 368 369 /* Check various fields in header for validity/bounds. */ 370 if (a_out->a_text & PAGE_MASK || a_out->a_data & PAGE_MASK) { 371 error = ENOEXEC; 372 goto cleanup; 373 } 374 375 /* text + data can't exceed file size */ 376 if (a_out->a_data + a_out->a_text > attr.va_size) { 377 error = EFAULT; 378 goto cleanup; 379 } 380 381 /* 382 * text/data/bss must not exceed limits 383 * XXX - this is not complete. it should check current usage PLUS 384 * the resources needed by this library. 385 */ 386 PROC_LOCK(td->td_proc); 387 if (a_out->a_text > maxtsiz || 388 a_out->a_data + bss_size > lim_cur(td->td_proc, RLIMIT_DATA)) { 389 PROC_UNLOCK(td->td_proc); 390 error = ENOMEM; 391 goto cleanup; 392 } 393 PROC_UNLOCK(td->td_proc); 394 395 mp_fixme("Unlocked vflags access."); 396 /* prevent more writers */ 397 vp->v_vflag |= VV_TEXT; 398 399 /* 400 * Check if file_offset page aligned. Currently we cannot handle 401 * misalinged file offsets, and so we read in the entire image 402 * (what a waste). 403 */ 404 if (file_offset & PAGE_MASK) { 405 #ifdef DEBUG 406 printf("uselib: Non page aligned binary %lu\n", file_offset); 407 #endif 408 /* Map text+data read/write/execute */ 409 410 /* a_entry is the load address and is page aligned */ 411 vmaddr = trunc_page(a_out->a_entry); 412 413 /* get anon user mapping, read+write+execute */ 414 error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0, 415 &vmaddr, a_out->a_text + a_out->a_data, FALSE, VM_PROT_ALL, 416 VM_PROT_ALL, 0); 417 if (error) 418 goto cleanup; 419 420 /* map file into kernel_map */ 421 error = vm_mmap(kernel_map, &buffer, 422 round_page(a_out->a_text + a_out->a_data + file_offset), 423 VM_PROT_READ, VM_PROT_READ, 0, (caddr_t)vp, 424 trunc_page(file_offset)); 425 if (error) 426 goto cleanup; 427 428 /* copy from kernel VM space to user space */ 429 error = copyout(PTRIN(buffer + file_offset), 430 (void *)vmaddr, a_out->a_text + a_out->a_data); 431 432 /* release temporary kernel space */ 433 vm_map_remove(kernel_map, buffer, buffer + 434 round_page(a_out->a_text + a_out->a_data + file_offset)); 435 436 if (error) 437 goto cleanup; 438 } else { 439 #ifdef DEBUG 440 printf("uselib: Page aligned binary %lu\n", file_offset); 441 #endif 442 /* 443 * for QMAGIC, a_entry is 20 bytes beyond the load address 444 * to skip the executable header 445 */ 446 vmaddr = trunc_page(a_out->a_entry); 447 448 /* 449 * Map it all into the process's space as a single 450 * copy-on-write "data" segment. 451 */ 452 error = vm_mmap(&td->td_proc->p_vmspace->vm_map, &vmaddr, 453 a_out->a_text + a_out->a_data, VM_PROT_ALL, VM_PROT_ALL, 454 MAP_PRIVATE | MAP_FIXED, (caddr_t)vp, file_offset); 455 if (error) 456 goto cleanup; 457 } 458 #ifdef DEBUG 459 printf("mem=%08lx = %08lx %08lx\n", (long)vmaddr, ((long*)vmaddr)[0], 460 ((long*)vmaddr)[1]); 461 #endif 462 if (bss_size != 0) { 463 /* Calculate BSS start address */ 464 vmaddr = trunc_page(a_out->a_entry) + a_out->a_text + 465 a_out->a_data; 466 467 /* allocate some 'anon' space */ 468 error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0, 469 &vmaddr, bss_size, FALSE, VM_PROT_ALL, VM_PROT_ALL, 0); 470 if (error) 471 goto cleanup; 472 } 473 474 cleanup: 475 /* Unlock vnode if needed */ 476 if (locked) 477 VOP_UNLOCK(vp, 0, td); 478 479 /* Release the kernel mapping. */ 480 if (a_out) 481 vm_map_remove(kernel_map, (vm_offset_t)a_out, 482 (vm_offset_t)a_out + PAGE_SIZE); 483 484 return error; 485 } 486 487 #endif /* __i386__ || __alpha__ */ 488 489 int 490 linux_select(struct thread *td, struct linux_select_args *args) 491 { 492 l_timeval ltv; 493 struct timeval tv0, tv1, utv, *tvp; 494 int error; 495 496 #ifdef DEBUG 497 if (ldebug(select)) 498 printf(ARGS(select, "%d, %p, %p, %p, %p"), args->nfds, 499 (void *)args->readfds, (void *)args->writefds, 500 (void *)args->exceptfds, (void *)args->timeout); 501 #endif 502 503 /* 504 * Store current time for computation of the amount of 505 * time left. 506 */ 507 if (args->timeout) { 508 if ((error = copyin(args->timeout, <v, sizeof(ltv)))) 509 goto select_out; 510 utv.tv_sec = ltv.tv_sec; 511 utv.tv_usec = ltv.tv_usec; 512 #ifdef DEBUG 513 if (ldebug(select)) 514 printf(LMSG("incoming timeout (%ld/%ld)"), 515 utv.tv_sec, utv.tv_usec); 516 #endif 517 518 if (itimerfix(&utv)) { 519 /* 520 * The timeval was invalid. Convert it to something 521 * valid that will act as it does under Linux. 522 */ 523 utv.tv_sec += utv.tv_usec / 1000000; 524 utv.tv_usec %= 1000000; 525 if (utv.tv_usec < 0) { 526 utv.tv_sec -= 1; 527 utv.tv_usec += 1000000; 528 } 529 if (utv.tv_sec < 0) 530 timevalclear(&utv); 531 } 532 microtime(&tv0); 533 tvp = &utv; 534 } else 535 tvp = NULL; 536 537 error = kern_select(td, args->nfds, args->readfds, args->writefds, 538 args->exceptfds, tvp); 539 540 #ifdef DEBUG 541 if (ldebug(select)) 542 printf(LMSG("real select returns %d"), error); 543 #endif 544 if (error) { 545 /* 546 * See fs/select.c in the Linux kernel. Without this, 547 * Maelstrom doesn't work. 548 */ 549 if (error == ERESTART) 550 error = EINTR; 551 goto select_out; 552 } 553 554 if (args->timeout) { 555 if (td->td_retval[0]) { 556 /* 557 * Compute how much time was left of the timeout, 558 * by subtracting the current time and the time 559 * before we started the call, and subtracting 560 * that result from the user-supplied value. 561 */ 562 microtime(&tv1); 563 timevalsub(&tv1, &tv0); 564 timevalsub(&utv, &tv1); 565 if (utv.tv_sec < 0) 566 timevalclear(&utv); 567 } else 568 timevalclear(&utv); 569 #ifdef DEBUG 570 if (ldebug(select)) 571 printf(LMSG("outgoing timeout (%ld/%ld)"), 572 utv.tv_sec, utv.tv_usec); 573 #endif 574 ltv.tv_sec = utv.tv_sec; 575 ltv.tv_usec = utv.tv_usec; 576 if ((error = copyout(<v, args->timeout, sizeof(ltv)))) 577 goto select_out; 578 } 579 580 select_out: 581 #ifdef DEBUG 582 if (ldebug(select)) 583 printf(LMSG("select_out -> %d"), error); 584 #endif 585 return error; 586 } 587 588 int 589 linux_mremap(struct thread *td, struct linux_mremap_args *args) 590 { 591 struct munmap_args /* { 592 void *addr; 593 size_t len; 594 } */ bsd_args; 595 int error = 0; 596 597 #ifdef DEBUG 598 if (ldebug(mremap)) 599 printf(ARGS(mremap, "%p, %08lx, %08lx, %08lx"), 600 (void *)(uintptr_t)args->addr, 601 (unsigned long)args->old_len, 602 (unsigned long)args->new_len, 603 (unsigned long)args->flags); 604 #endif 605 args->new_len = round_page(args->new_len); 606 args->old_len = round_page(args->old_len); 607 608 if (args->new_len > args->old_len) { 609 td->td_retval[0] = 0; 610 return ENOMEM; 611 } 612 613 if (args->new_len < args->old_len) { 614 bsd_args.addr = 615 (caddr_t)((uintptr_t)args->addr + args->new_len); 616 bsd_args.len = args->old_len - args->new_len; 617 error = munmap(td, &bsd_args); 618 } 619 620 td->td_retval[0] = error ? 0 : (uintptr_t)args->addr; 621 return error; 622 } 623 624 #define LINUX_MS_ASYNC 0x0001 625 #define LINUX_MS_INVALIDATE 0x0002 626 #define LINUX_MS_SYNC 0x0004 627 628 int 629 linux_msync(struct thread *td, struct linux_msync_args *args) 630 { 631 struct msync_args bsd_args; 632 633 bsd_args.addr = (caddr_t)(uintptr_t)args->addr; 634 bsd_args.len = (uintptr_t)args->len; 635 bsd_args.flags = args->fl & ~LINUX_MS_SYNC; 636 637 return msync(td, &bsd_args); 638 } 639 640 #ifndef __alpha__ 641 int 642 linux_time(struct thread *td, struct linux_time_args *args) 643 { 644 struct timeval tv; 645 l_time_t tm; 646 int error; 647 648 #ifdef DEBUG 649 if (ldebug(time)) 650 printf(ARGS(time, "*")); 651 #endif 652 653 microtime(&tv); 654 tm = tv.tv_sec; 655 if (args->tm && (error = copyout(&tm, args->tm, sizeof(tm)))) 656 return error; 657 td->td_retval[0] = tm; 658 return 0; 659 } 660 #endif /*!__alpha__*/ 661 662 struct l_times_argv { 663 l_long tms_utime; 664 l_long tms_stime; 665 l_long tms_cutime; 666 l_long tms_cstime; 667 }; 668 669 #ifdef __alpha__ 670 #define CLK_TCK 1024 /* Linux uses 1024 on alpha */ 671 #else 672 #define CLK_TCK 100 /* Linux uses 100 */ 673 #endif 674 675 #define CONVTCK(r) (r.tv_sec * CLK_TCK + r.tv_usec / (1000000 / CLK_TCK)) 676 677 int 678 linux_times(struct thread *td, struct linux_times_args *args) 679 { 680 struct timeval tv, utime, stime, cutime, cstime; 681 struct l_times_argv tms; 682 struct proc *p; 683 int error; 684 685 #ifdef DEBUG 686 if (ldebug(times)) 687 printf(ARGS(times, "*")); 688 #endif 689 690 p = td->td_proc; 691 PROC_LOCK(p); 692 calcru(p, &utime, &stime); 693 calccru(p, &cutime, &cstime); 694 PROC_UNLOCK(p); 695 696 tms.tms_utime = CONVTCK(utime); 697 tms.tms_stime = CONVTCK(stime); 698 699 tms.tms_cutime = CONVTCK(cutime); 700 tms.tms_cstime = CONVTCK(cstime); 701 702 if ((error = copyout(&tms, args->buf, sizeof(tms)))) 703 return error; 704 705 microuptime(&tv); 706 td->td_retval[0] = (int)CONVTCK(tv); 707 return 0; 708 } 709 710 int 711 linux_newuname(struct thread *td, struct linux_newuname_args *args) 712 { 713 struct l_new_utsname utsname; 714 char osname[LINUX_MAX_UTSNAME]; 715 char osrelease[LINUX_MAX_UTSNAME]; 716 char *p; 717 718 #ifdef DEBUG 719 if (ldebug(newuname)) 720 printf(ARGS(newuname, "*")); 721 #endif 722 723 linux_get_osname(td, osname); 724 linux_get_osrelease(td, osrelease); 725 726 bzero(&utsname, sizeof(utsname)); 727 strlcpy(utsname.sysname, osname, LINUX_MAX_UTSNAME); 728 getcredhostname(td->td_ucred, utsname.nodename, LINUX_MAX_UTSNAME); 729 strlcpy(utsname.release, osrelease, LINUX_MAX_UTSNAME); 730 strlcpy(utsname.version, version, LINUX_MAX_UTSNAME); 731 for (p = utsname.version; *p != '\0'; ++p) 732 if (*p == '\n') { 733 *p = '\0'; 734 break; 735 } 736 #ifdef __i386__ 737 { 738 const char *class; 739 switch (cpu_class) { 740 case CPUCLASS_686: 741 class = "i686"; 742 break; 743 case CPUCLASS_586: 744 class = "i586"; 745 break; 746 case CPUCLASS_486: 747 class = "i486"; 748 break; 749 default: 750 class = "i386"; 751 } 752 strlcpy(utsname.machine, class, LINUX_MAX_UTSNAME); 753 } 754 #elif defined(__amd64__) /* XXX: Linux can change 'personality'. */ 755 #ifdef COMPAT_LINUX32 756 strlcpy(utsname.machine, "i686", LINUX_MAX_UTSNAME); 757 #else 758 strlcpy(utsname.machine, "x86_64", LINUX_MAX_UTSNAME); 759 #endif /* COMPAT_LINUX32 */ 760 #else /* something other than i386 or amd64 - assume we and Linux agree */ 761 strlcpy(utsname.machine, machine, LINUX_MAX_UTSNAME); 762 #endif /* __i386__ */ 763 strlcpy(utsname.domainname, domainname, LINUX_MAX_UTSNAME); 764 765 return (copyout(&utsname, args->buf, sizeof(utsname))); 766 } 767 768 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 769 struct l_utimbuf { 770 l_time_t l_actime; 771 l_time_t l_modtime; 772 }; 773 774 int 775 linux_utime(struct thread *td, struct linux_utime_args *args) 776 { 777 struct timeval tv[2], *tvp; 778 struct l_utimbuf lut; 779 char *fname; 780 int error; 781 782 LCONVPATHEXIST(td, args->fname, &fname); 783 784 #ifdef DEBUG 785 if (ldebug(utime)) 786 printf(ARGS(utime, "%s, *"), fname); 787 #endif 788 789 if (args->times) { 790 if ((error = copyin(args->times, &lut, sizeof lut))) { 791 LFREEPATH(fname); 792 return error; 793 } 794 tv[0].tv_sec = lut.l_actime; 795 tv[0].tv_usec = 0; 796 tv[1].tv_sec = lut.l_modtime; 797 tv[1].tv_usec = 0; 798 tvp = tv; 799 } else 800 tvp = NULL; 801 802 error = kern_utimes(td, fname, UIO_SYSSPACE, tvp, UIO_SYSSPACE); 803 LFREEPATH(fname); 804 return (error); 805 } 806 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 807 808 #define __WCLONE 0x80000000 809 810 #ifndef __alpha__ 811 int 812 linux_waitpid(struct thread *td, struct linux_waitpid_args *args) 813 { 814 int error, options, tmpstat; 815 816 #ifdef DEBUG 817 if (ldebug(waitpid)) 818 printf(ARGS(waitpid, "%d, %p, %d"), 819 args->pid, (void *)args->status, args->options); 820 #endif 821 822 options = (args->options & (WNOHANG | WUNTRACED)); 823 /* WLINUXCLONE should be equal to __WCLONE, but we make sure */ 824 if (args->options & __WCLONE) 825 options |= WLINUXCLONE; 826 827 error = kern_wait(td, args->pid, &tmpstat, options, NULL); 828 if (error) 829 return error; 830 831 if (args->status) { 832 tmpstat &= 0xffff; 833 if (WIFSIGNALED(tmpstat)) 834 tmpstat = (tmpstat & 0xffffff80) | 835 BSD_TO_LINUX_SIGNAL(WTERMSIG(tmpstat)); 836 else if (WIFSTOPPED(tmpstat)) 837 tmpstat = (tmpstat & 0xffff00ff) | 838 (BSD_TO_LINUX_SIGNAL(WSTOPSIG(tmpstat)) << 8); 839 return copyout(&tmpstat, args->status, sizeof(int)); 840 } 841 842 return 0; 843 } 844 #endif /*!__alpha__*/ 845 846 int 847 linux_wait4(struct thread *td, struct linux_wait4_args *args) 848 { 849 int error, options, tmpstat; 850 struct rusage ru, *rup; 851 struct proc *p; 852 853 #ifdef DEBUG 854 if (ldebug(wait4)) 855 printf(ARGS(wait4, "%d, %p, %d, %p"), 856 args->pid, (void *)args->status, args->options, 857 (void *)args->rusage); 858 #endif 859 860 options = (args->options & (WNOHANG | WUNTRACED)); 861 /* WLINUXCLONE should be equal to __WCLONE, but we make sure */ 862 if (args->options & __WCLONE) 863 options |= WLINUXCLONE; 864 865 if (args->rusage != NULL) 866 rup = &ru; 867 else 868 rup = NULL; 869 error = kern_wait(td, args->pid, &tmpstat, options, rup); 870 if (error) 871 return error; 872 873 p = td->td_proc; 874 PROC_LOCK(p); 875 SIGDELSET(p->p_siglist, SIGCHLD); 876 PROC_UNLOCK(p); 877 878 if (args->status) { 879 tmpstat &= 0xffff; 880 if (WIFSIGNALED(tmpstat)) 881 tmpstat = (tmpstat & 0xffffff80) | 882 BSD_TO_LINUX_SIGNAL(WTERMSIG(tmpstat)); 883 else if (WIFSTOPPED(tmpstat)) 884 tmpstat = (tmpstat & 0xffff00ff) | 885 (BSD_TO_LINUX_SIGNAL(WSTOPSIG(tmpstat)) << 8); 886 error = copyout(&tmpstat, args->status, sizeof(int)); 887 } 888 if (args->rusage != NULL && error == 0) 889 error = copyout(&ru, args->rusage, sizeof(ru)); 890 891 return (error); 892 } 893 894 int 895 linux_mknod(struct thread *td, struct linux_mknod_args *args) 896 { 897 char *path; 898 int error; 899 900 LCONVPATHCREAT(td, args->path, &path); 901 902 #ifdef DEBUG 903 if (ldebug(mknod)) 904 printf(ARGS(mknod, "%s, %d, %d"), path, args->mode, args->dev); 905 #endif 906 907 if (args->mode & S_IFIFO) 908 error = kern_mkfifo(td, path, UIO_SYSSPACE, args->mode); 909 else 910 error = kern_mknod(td, path, UIO_SYSSPACE, args->mode, 911 args->dev); 912 LFREEPATH(path); 913 return (error); 914 } 915 916 /* 917 * UGH! This is just about the dumbest idea I've ever heard!! 918 */ 919 int 920 linux_personality(struct thread *td, struct linux_personality_args *args) 921 { 922 #ifdef DEBUG 923 if (ldebug(personality)) 924 printf(ARGS(personality, "%lu"), (unsigned long)args->per); 925 #endif 926 #ifndef __alpha__ 927 if (args->per != 0) 928 return EINVAL; 929 #endif 930 931 /* Yes Jim, it's still a Linux... */ 932 td->td_retval[0] = 0; 933 return 0; 934 } 935 936 struct l_itimerval { 937 l_timeval it_interval; 938 l_timeval it_value; 939 }; 940 941 #define B2L_ITIMERVAL(bip, lip) \ 942 (bip)->it_interval.tv_sec = (lip)->it_interval.tv_sec; \ 943 (bip)->it_interval.tv_usec = (lip)->it_interval.tv_usec; \ 944 (bip)->it_value.tv_sec = (lip)->it_value.tv_sec; \ 945 (bip)->it_value.tv_usec = (lip)->it_value.tv_usec; 946 947 int 948 linux_setitimer(struct thread *td, struct linux_setitimer_args *uap) 949 { 950 int error; 951 struct l_itimerval ls; 952 struct itimerval aitv, oitv; 953 954 #ifdef DEBUG 955 if (ldebug(setitimer)) 956 printf(ARGS(setitimer, "%p, %p"), 957 (void *)uap->itv, (void *)uap->oitv); 958 #endif 959 960 if (uap->itv == NULL) { 961 uap->itv = uap->oitv; 962 return (linux_getitimer(td, (struct linux_getitimer_args *)uap)); 963 } 964 965 error = copyin(uap->itv, &ls, sizeof(ls)); 966 if (error != 0) 967 return (error); 968 B2L_ITIMERVAL(&aitv, &ls); 969 #ifdef DEBUG 970 if (ldebug(setitimer)) { 971 printf("setitimer: value: sec: %ld, usec: %ld\n", 972 aitv.it_value.tv_sec, aitv.it_value.tv_usec); 973 printf("setitimer: interval: sec: %ld, usec: %ld\n", 974 aitv.it_interval.tv_sec, aitv.it_interval.tv_usec); 975 } 976 #endif 977 error = kern_setitimer(td, uap->which, &aitv, &oitv); 978 if (error != 0 || uap->oitv == NULL) 979 return (error); 980 B2L_ITIMERVAL(&ls, &oitv); 981 982 return (copyout(&ls, uap->oitv, sizeof(ls))); 983 } 984 985 int 986 linux_getitimer(struct thread *td, struct linux_getitimer_args *uap) 987 { 988 int error; 989 struct l_itimerval ls; 990 struct itimerval aitv; 991 992 #ifdef DEBUG 993 if (ldebug(getitimer)) 994 printf(ARGS(getitimer, "%p"), (void *)uap->itv); 995 #endif 996 error = kern_getitimer(td, uap->which, &aitv); 997 if (error != 0) 998 return (error); 999 B2L_ITIMERVAL(&ls, &aitv); 1000 return (copyout(&ls, uap->itv, sizeof(ls))); 1001 } 1002 1003 #ifndef __alpha__ 1004 int 1005 linux_nice(struct thread *td, struct linux_nice_args *args) 1006 { 1007 struct setpriority_args bsd_args; 1008 1009 bsd_args.which = PRIO_PROCESS; 1010 bsd_args.who = 0; /* current process */ 1011 bsd_args.prio = args->inc; 1012 return setpriority(td, &bsd_args); 1013 } 1014 #endif /*!__alpha__*/ 1015 1016 int 1017 linux_setgroups(struct thread *td, struct linux_setgroups_args *args) 1018 { 1019 struct ucred *newcred, *oldcred; 1020 l_gid_t linux_gidset[NGROUPS]; 1021 gid_t *bsd_gidset; 1022 int ngrp, error; 1023 struct proc *p; 1024 1025 ngrp = args->gidsetsize; 1026 if (ngrp < 0 || ngrp >= NGROUPS) 1027 return (EINVAL); 1028 error = copyin(args->grouplist, linux_gidset, ngrp * sizeof(l_gid_t)); 1029 if (error) 1030 return (error); 1031 newcred = crget(); 1032 p = td->td_proc; 1033 PROC_LOCK(p); 1034 oldcred = p->p_ucred; 1035 1036 /* 1037 * cr_groups[0] holds egid. Setting the whole set from 1038 * the supplied set will cause egid to be changed too. 1039 * Keep cr_groups[0] unchanged to prevent that. 1040 */ 1041 1042 if ((error = suser_cred(oldcred, SUSER_ALLOWJAIL)) != 0) { 1043 PROC_UNLOCK(p); 1044 crfree(newcred); 1045 return (error); 1046 } 1047 1048 crcopy(newcred, oldcred); 1049 if (ngrp > 0) { 1050 newcred->cr_ngroups = ngrp + 1; 1051 1052 bsd_gidset = newcred->cr_groups; 1053 ngrp--; 1054 while (ngrp >= 0) { 1055 bsd_gidset[ngrp + 1] = linux_gidset[ngrp]; 1056 ngrp--; 1057 } 1058 } 1059 else 1060 newcred->cr_ngroups = 1; 1061 1062 setsugid(p); 1063 p->p_ucred = newcred; 1064 PROC_UNLOCK(p); 1065 crfree(oldcred); 1066 return (0); 1067 } 1068 1069 int 1070 linux_getgroups(struct thread *td, struct linux_getgroups_args *args) 1071 { 1072 struct ucred *cred; 1073 l_gid_t linux_gidset[NGROUPS]; 1074 gid_t *bsd_gidset; 1075 int bsd_gidsetsz, ngrp, error; 1076 1077 cred = td->td_ucred; 1078 bsd_gidset = cred->cr_groups; 1079 bsd_gidsetsz = cred->cr_ngroups - 1; 1080 1081 /* 1082 * cr_groups[0] holds egid. Returning the whole set 1083 * here will cause a duplicate. Exclude cr_groups[0] 1084 * to prevent that. 1085 */ 1086 1087 if ((ngrp = args->gidsetsize) == 0) { 1088 td->td_retval[0] = bsd_gidsetsz; 1089 return (0); 1090 } 1091 1092 if (ngrp < bsd_gidsetsz) 1093 return (EINVAL); 1094 1095 ngrp = 0; 1096 while (ngrp < bsd_gidsetsz) { 1097 linux_gidset[ngrp] = bsd_gidset[ngrp + 1]; 1098 ngrp++; 1099 } 1100 1101 if ((error = copyout(linux_gidset, args->grouplist, 1102 ngrp * sizeof(l_gid_t)))) 1103 return (error); 1104 1105 td->td_retval[0] = ngrp; 1106 return (0); 1107 } 1108 1109 #ifndef __alpha__ 1110 int 1111 linux_setrlimit(struct thread *td, struct linux_setrlimit_args *args) 1112 { 1113 struct rlimit bsd_rlim; 1114 struct l_rlimit rlim; 1115 u_int which; 1116 int error; 1117 1118 #ifdef DEBUG 1119 if (ldebug(setrlimit)) 1120 printf(ARGS(setrlimit, "%d, %p"), 1121 args->resource, (void *)args->rlim); 1122 #endif 1123 1124 if (args->resource >= LINUX_RLIM_NLIMITS) 1125 return (EINVAL); 1126 1127 which = linux_to_bsd_resource[args->resource]; 1128 if (which == -1) 1129 return (EINVAL); 1130 1131 error = copyin(args->rlim, &rlim, sizeof(rlim)); 1132 if (error) 1133 return (error); 1134 1135 bsd_rlim.rlim_cur = (rlim_t)rlim.rlim_cur; 1136 bsd_rlim.rlim_max = (rlim_t)rlim.rlim_max; 1137 return (kern_setrlimit(td, which, &bsd_rlim)); 1138 } 1139 1140 int 1141 linux_old_getrlimit(struct thread *td, struct linux_old_getrlimit_args *args) 1142 { 1143 struct l_rlimit rlim; 1144 struct proc *p = td->td_proc; 1145 struct rlimit bsd_rlim; 1146 u_int which; 1147 1148 #ifdef DEBUG 1149 if (ldebug(old_getrlimit)) 1150 printf(ARGS(old_getrlimit, "%d, %p"), 1151 args->resource, (void *)args->rlim); 1152 #endif 1153 1154 if (args->resource >= LINUX_RLIM_NLIMITS) 1155 return (EINVAL); 1156 1157 which = linux_to_bsd_resource[args->resource]; 1158 if (which == -1) 1159 return (EINVAL); 1160 1161 PROC_LOCK(p); 1162 lim_rlimit(p, which, &bsd_rlim); 1163 PROC_UNLOCK(p); 1164 1165 #ifdef COMPAT_LINUX32 1166 rlim.rlim_cur = (unsigned int)bsd_rlim.rlim_cur; 1167 if (rlim.rlim_cur == UINT_MAX) 1168 rlim.rlim_cur = INT_MAX; 1169 rlim.rlim_max = (unsigned int)bsd_rlim.rlim_max; 1170 if (rlim.rlim_max == UINT_MAX) 1171 rlim.rlim_max = INT_MAX; 1172 #else 1173 rlim.rlim_cur = (unsigned long)bsd_rlim.rlim_cur; 1174 if (rlim.rlim_cur == ULONG_MAX) 1175 rlim.rlim_cur = LONG_MAX; 1176 rlim.rlim_max = (unsigned long)bsd_rlim.rlim_max; 1177 if (rlim.rlim_max == ULONG_MAX) 1178 rlim.rlim_max = LONG_MAX; 1179 #endif 1180 return (copyout(&rlim, args->rlim, sizeof(rlim))); 1181 } 1182 1183 int 1184 linux_getrlimit(struct thread *td, struct linux_getrlimit_args *args) 1185 { 1186 struct l_rlimit rlim; 1187 struct proc *p = td->td_proc; 1188 struct rlimit bsd_rlim; 1189 u_int which; 1190 1191 #ifdef DEBUG 1192 if (ldebug(getrlimit)) 1193 printf(ARGS(getrlimit, "%d, %p"), 1194 args->resource, (void *)args->rlim); 1195 #endif 1196 1197 if (args->resource >= LINUX_RLIM_NLIMITS) 1198 return (EINVAL); 1199 1200 which = linux_to_bsd_resource[args->resource]; 1201 if (which == -1) 1202 return (EINVAL); 1203 1204 PROC_LOCK(p); 1205 lim_rlimit(p, which, &bsd_rlim); 1206 PROC_UNLOCK(p); 1207 1208 rlim.rlim_cur = (l_ulong)bsd_rlim.rlim_cur; 1209 rlim.rlim_max = (l_ulong)bsd_rlim.rlim_max; 1210 return (copyout(&rlim, args->rlim, sizeof(rlim))); 1211 } 1212 #endif /*!__alpha__*/ 1213 1214 int 1215 linux_sched_setscheduler(struct thread *td, 1216 struct linux_sched_setscheduler_args *args) 1217 { 1218 struct sched_setscheduler_args bsd; 1219 1220 #ifdef DEBUG 1221 if (ldebug(sched_setscheduler)) 1222 printf(ARGS(sched_setscheduler, "%d, %d, %p"), 1223 args->pid, args->policy, (const void *)args->param); 1224 #endif 1225 1226 switch (args->policy) { 1227 case LINUX_SCHED_OTHER: 1228 bsd.policy = SCHED_OTHER; 1229 break; 1230 case LINUX_SCHED_FIFO: 1231 bsd.policy = SCHED_FIFO; 1232 break; 1233 case LINUX_SCHED_RR: 1234 bsd.policy = SCHED_RR; 1235 break; 1236 default: 1237 return EINVAL; 1238 } 1239 1240 bsd.pid = args->pid; 1241 bsd.param = (struct sched_param *)args->param; 1242 return sched_setscheduler(td, &bsd); 1243 } 1244 1245 int 1246 linux_sched_getscheduler(struct thread *td, 1247 struct linux_sched_getscheduler_args *args) 1248 { 1249 struct sched_getscheduler_args bsd; 1250 int error; 1251 1252 #ifdef DEBUG 1253 if (ldebug(sched_getscheduler)) 1254 printf(ARGS(sched_getscheduler, "%d"), args->pid); 1255 #endif 1256 1257 bsd.pid = args->pid; 1258 error = sched_getscheduler(td, &bsd); 1259 1260 switch (td->td_retval[0]) { 1261 case SCHED_OTHER: 1262 td->td_retval[0] = LINUX_SCHED_OTHER; 1263 break; 1264 case SCHED_FIFO: 1265 td->td_retval[0] = LINUX_SCHED_FIFO; 1266 break; 1267 case SCHED_RR: 1268 td->td_retval[0] = LINUX_SCHED_RR; 1269 break; 1270 } 1271 1272 return error; 1273 } 1274 1275 int 1276 linux_sched_get_priority_max(struct thread *td, 1277 struct linux_sched_get_priority_max_args *args) 1278 { 1279 struct sched_get_priority_max_args bsd; 1280 1281 #ifdef DEBUG 1282 if (ldebug(sched_get_priority_max)) 1283 printf(ARGS(sched_get_priority_max, "%d"), args->policy); 1284 #endif 1285 1286 switch (args->policy) { 1287 case LINUX_SCHED_OTHER: 1288 bsd.policy = SCHED_OTHER; 1289 break; 1290 case LINUX_SCHED_FIFO: 1291 bsd.policy = SCHED_FIFO; 1292 break; 1293 case LINUX_SCHED_RR: 1294 bsd.policy = SCHED_RR; 1295 break; 1296 default: 1297 return EINVAL; 1298 } 1299 return sched_get_priority_max(td, &bsd); 1300 } 1301 1302 int 1303 linux_sched_get_priority_min(struct thread *td, 1304 struct linux_sched_get_priority_min_args *args) 1305 { 1306 struct sched_get_priority_min_args bsd; 1307 1308 #ifdef DEBUG 1309 if (ldebug(sched_get_priority_min)) 1310 printf(ARGS(sched_get_priority_min, "%d"), args->policy); 1311 #endif 1312 1313 switch (args->policy) { 1314 case LINUX_SCHED_OTHER: 1315 bsd.policy = SCHED_OTHER; 1316 break; 1317 case LINUX_SCHED_FIFO: 1318 bsd.policy = SCHED_FIFO; 1319 break; 1320 case LINUX_SCHED_RR: 1321 bsd.policy = SCHED_RR; 1322 break; 1323 default: 1324 return EINVAL; 1325 } 1326 return sched_get_priority_min(td, &bsd); 1327 } 1328 1329 #define REBOOT_CAD_ON 0x89abcdef 1330 #define REBOOT_CAD_OFF 0 1331 #define REBOOT_HALT 0xcdef0123 1332 1333 int 1334 linux_reboot(struct thread *td, struct linux_reboot_args *args) 1335 { 1336 struct reboot_args bsd_args; 1337 1338 #ifdef DEBUG 1339 if (ldebug(reboot)) 1340 printf(ARGS(reboot, "0x%x"), args->cmd); 1341 #endif 1342 if (args->cmd == REBOOT_CAD_ON || args->cmd == REBOOT_CAD_OFF) 1343 return (0); 1344 bsd_args.opt = (args->cmd == REBOOT_HALT) ? RB_HALT : 0; 1345 return (reboot(td, &bsd_args)); 1346 } 1347 1348 #ifndef __alpha__ 1349 1350 /* 1351 * The FreeBSD native getpid(2), getgid(2) and getuid(2) also modify 1352 * td->td_retval[1] when COMPAT_43 is defined. This 1353 * globbers registers that are assumed to be preserved. The following 1354 * lightweight syscalls fixes this. See also linux_getgid16() and 1355 * linux_getuid16() in linux_uid16.c. 1356 * 1357 * linux_getpid() - MP SAFE 1358 * linux_getgid() - MP SAFE 1359 * linux_getuid() - MP SAFE 1360 */ 1361 1362 int 1363 linux_getpid(struct thread *td, struct linux_getpid_args *args) 1364 { 1365 1366 td->td_retval[0] = td->td_proc->p_pid; 1367 return (0); 1368 } 1369 1370 int 1371 linux_getgid(struct thread *td, struct linux_getgid_args *args) 1372 { 1373 1374 td->td_retval[0] = td->td_ucred->cr_rgid; 1375 return (0); 1376 } 1377 1378 int 1379 linux_getuid(struct thread *td, struct linux_getuid_args *args) 1380 { 1381 1382 td->td_retval[0] = td->td_ucred->cr_ruid; 1383 return (0); 1384 } 1385 1386 #endif /*!__alpha__*/ 1387 1388 int 1389 linux_getsid(struct thread *td, struct linux_getsid_args *args) 1390 { 1391 struct getsid_args bsd; 1392 bsd.pid = args->pid; 1393 return getsid(td, &bsd); 1394 } 1395