1 /*- 2 * Copyright (c) 2002 Doug Rabson 3 * Copyright (c) 1994-1995 Søren Schmidt 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer 11 * in this position and unchanged. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of the author may not be used to endorse or promote products 16 * derived from this software without specific prior written permission 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_compat.h" 34 35 #include <sys/param.h> 36 #include <sys/blist.h> 37 #include <sys/fcntl.h> 38 #if defined(__i386__) 39 #include <sys/imgact_aout.h> 40 #endif 41 #include <sys/jail.h> 42 #include <sys/kernel.h> 43 #include <sys/limits.h> 44 #include <sys/lock.h> 45 #include <sys/malloc.h> 46 #include <sys/mman.h> 47 #include <sys/mount.h> 48 #include <sys/mutex.h> 49 #include <sys/namei.h> 50 #include <sys/priv.h> 51 #include <sys/proc.h> 52 #include <sys/reboot.h> 53 #include <sys/racct.h> 54 #include <sys/resourcevar.h> 55 #include <sys/sched.h> 56 #include <sys/sdt.h> 57 #include <sys/signalvar.h> 58 #include <sys/stat.h> 59 #include <sys/syscallsubr.h> 60 #include <sys/sysctl.h> 61 #include <sys/sysproto.h> 62 #include <sys/systm.h> 63 #include <sys/time.h> 64 #include <sys/vmmeter.h> 65 #include <sys/vnode.h> 66 #include <sys/wait.h> 67 #include <sys/cpuset.h> 68 69 #include <security/mac/mac_framework.h> 70 71 #include <vm/vm.h> 72 #include <vm/pmap.h> 73 #include <vm/vm_kern.h> 74 #include <vm/vm_map.h> 75 #include <vm/vm_extern.h> 76 #include <vm/vm_object.h> 77 #include <vm/swap_pager.h> 78 79 #ifdef COMPAT_LINUX32 80 #include <machine/../linux32/linux.h> 81 #include <machine/../linux32/linux32_proto.h> 82 #else 83 #include <machine/../linux/linux.h> 84 #include <machine/../linux/linux_proto.h> 85 #endif 86 87 #include <compat/linux/linux_dtrace.h> 88 #include <compat/linux/linux_file.h> 89 #include <compat/linux/linux_mib.h> 90 #include <compat/linux/linux_signal.h> 91 #include <compat/linux/linux_util.h> 92 #include <compat/linux/linux_sysproto.h> 93 #include <compat/linux/linux_emul.h> 94 #include <compat/linux/linux_misc.h> 95 96 /* DTrace init */ 97 LIN_SDT_PROVIDER_DECLARE(LINUX_DTRACE); 98 99 /* Linuxulator-global DTrace probes */ 100 LIN_SDT_PROBE_DECLARE(locks, emul_lock, locked); 101 LIN_SDT_PROBE_DECLARE(locks, emul_lock, unlock); 102 LIN_SDT_PROBE_DECLARE(locks, emul_shared_rlock, locked); 103 LIN_SDT_PROBE_DECLARE(locks, emul_shared_rlock, unlock); 104 LIN_SDT_PROBE_DECLARE(locks, emul_shared_wlock, locked); 105 LIN_SDT_PROBE_DECLARE(locks, emul_shared_wlock, unlock); 106 107 int stclohz; /* Statistics clock frequency */ 108 109 static unsigned int linux_to_bsd_resource[LINUX_RLIM_NLIMITS] = { 110 RLIMIT_CPU, RLIMIT_FSIZE, RLIMIT_DATA, RLIMIT_STACK, 111 RLIMIT_CORE, RLIMIT_RSS, RLIMIT_NPROC, RLIMIT_NOFILE, 112 RLIMIT_MEMLOCK, RLIMIT_AS 113 }; 114 115 struct l_sysinfo { 116 l_long uptime; /* Seconds since boot */ 117 l_ulong loads[3]; /* 1, 5, and 15 minute load averages */ 118 #define LINUX_SYSINFO_LOADS_SCALE 65536 119 l_ulong totalram; /* Total usable main memory size */ 120 l_ulong freeram; /* Available memory size */ 121 l_ulong sharedram; /* Amount of shared memory */ 122 l_ulong bufferram; /* Memory used by buffers */ 123 l_ulong totalswap; /* Total swap space size */ 124 l_ulong freeswap; /* swap space still available */ 125 l_ushort procs; /* Number of current processes */ 126 l_ushort pads; 127 l_ulong totalbig; 128 l_ulong freebig; 129 l_uint mem_unit; 130 char _f[20-2*sizeof(l_long)-sizeof(l_int)]; /* padding */ 131 }; 132 int 133 linux_sysinfo(struct thread *td, struct linux_sysinfo_args *args) 134 { 135 struct l_sysinfo sysinfo; 136 vm_object_t object; 137 int i, j; 138 struct timespec ts; 139 140 getnanouptime(&ts); 141 if (ts.tv_nsec != 0) 142 ts.tv_sec++; 143 sysinfo.uptime = ts.tv_sec; 144 145 /* Use the information from the mib to get our load averages */ 146 for (i = 0; i < 3; i++) 147 sysinfo.loads[i] = averunnable.ldavg[i] * 148 LINUX_SYSINFO_LOADS_SCALE / averunnable.fscale; 149 150 sysinfo.totalram = physmem * PAGE_SIZE; 151 sysinfo.freeram = sysinfo.totalram - vm_cnt.v_wire_count * PAGE_SIZE; 152 153 sysinfo.sharedram = 0; 154 mtx_lock(&vm_object_list_mtx); 155 TAILQ_FOREACH(object, &vm_object_list, object_list) 156 if (object->shadow_count > 1) 157 sysinfo.sharedram += object->resident_page_count; 158 mtx_unlock(&vm_object_list_mtx); 159 160 sysinfo.sharedram *= PAGE_SIZE; 161 sysinfo.bufferram = 0; 162 163 swap_pager_status(&i, &j); 164 sysinfo.totalswap = i * PAGE_SIZE; 165 sysinfo.freeswap = (i - j) * PAGE_SIZE; 166 167 sysinfo.procs = nprocs; 168 169 /* The following are only present in newer Linux kernels. */ 170 sysinfo.totalbig = 0; 171 sysinfo.freebig = 0; 172 sysinfo.mem_unit = 1; 173 174 return (copyout(&sysinfo, args->info, sizeof(sysinfo))); 175 } 176 177 int 178 linux_alarm(struct thread *td, struct linux_alarm_args *args) 179 { 180 struct itimerval it, old_it; 181 u_int secs; 182 int error; 183 184 #ifdef DEBUG 185 if (ldebug(alarm)) 186 printf(ARGS(alarm, "%u"), args->secs); 187 #endif 188 189 secs = args->secs; 190 191 if (secs > INT_MAX) 192 secs = INT_MAX; 193 194 it.it_value.tv_sec = (long) secs; 195 it.it_value.tv_usec = 0; 196 it.it_interval.tv_sec = 0; 197 it.it_interval.tv_usec = 0; 198 error = kern_setitimer(td, ITIMER_REAL, &it, &old_it); 199 if (error) 200 return (error); 201 if (timevalisset(&old_it.it_value)) { 202 if (old_it.it_value.tv_usec != 0) 203 old_it.it_value.tv_sec++; 204 td->td_retval[0] = old_it.it_value.tv_sec; 205 } 206 return (0); 207 } 208 209 int 210 linux_brk(struct thread *td, struct linux_brk_args *args) 211 { 212 struct vmspace *vm = td->td_proc->p_vmspace; 213 vm_offset_t new, old; 214 struct obreak_args /* { 215 char * nsize; 216 } */ tmp; 217 218 #ifdef DEBUG 219 if (ldebug(brk)) 220 printf(ARGS(brk, "%p"), (void *)(uintptr_t)args->dsend); 221 #endif 222 old = (vm_offset_t)vm->vm_daddr + ctob(vm->vm_dsize); 223 new = (vm_offset_t)args->dsend; 224 tmp.nsize = (char *)new; 225 if (((caddr_t)new > vm->vm_daddr) && !sys_obreak(td, &tmp)) 226 td->td_retval[0] = (long)new; 227 else 228 td->td_retval[0] = (long)old; 229 230 return (0); 231 } 232 233 #if defined(__i386__) 234 /* XXX: what about amd64/linux32? */ 235 236 int 237 linux_uselib(struct thread *td, struct linux_uselib_args *args) 238 { 239 struct nameidata ni; 240 struct vnode *vp; 241 struct exec *a_out; 242 struct vattr attr; 243 vm_offset_t vmaddr; 244 unsigned long file_offset; 245 unsigned long bss_size; 246 char *library; 247 ssize_t aresid; 248 int error, locked, writecount; 249 250 LCONVPATHEXIST(td, args->library, &library); 251 252 #ifdef DEBUG 253 if (ldebug(uselib)) 254 printf(ARGS(uselib, "%s"), library); 255 #endif 256 257 a_out = NULL; 258 locked = 0; 259 vp = NULL; 260 261 NDINIT(&ni, LOOKUP, ISOPEN | FOLLOW | LOCKLEAF | AUDITVNODE1, 262 UIO_SYSSPACE, library, td); 263 error = namei(&ni); 264 LFREEPATH(library); 265 if (error) 266 goto cleanup; 267 268 vp = ni.ni_vp; 269 NDFREE(&ni, NDF_ONLY_PNBUF); 270 271 /* 272 * From here on down, we have a locked vnode that must be unlocked. 273 * XXX: The code below largely duplicates exec_check_permissions(). 274 */ 275 locked = 1; 276 277 /* Writable? */ 278 error = VOP_GET_WRITECOUNT(vp, &writecount); 279 if (error != 0) 280 goto cleanup; 281 if (writecount != 0) { 282 error = ETXTBSY; 283 goto cleanup; 284 } 285 286 /* Executable? */ 287 error = VOP_GETATTR(vp, &attr, td->td_ucred); 288 if (error) 289 goto cleanup; 290 291 if ((vp->v_mount->mnt_flag & MNT_NOEXEC) || 292 ((attr.va_mode & 0111) == 0) || (attr.va_type != VREG)) { 293 /* EACCESS is what exec(2) returns. */ 294 error = ENOEXEC; 295 goto cleanup; 296 } 297 298 /* Sensible size? */ 299 if (attr.va_size == 0) { 300 error = ENOEXEC; 301 goto cleanup; 302 } 303 304 /* Can we access it? */ 305 error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td); 306 if (error) 307 goto cleanup; 308 309 /* 310 * XXX: This should use vn_open() so that it is properly authorized, 311 * and to reduce code redundancy all over the place here. 312 * XXX: Not really, it duplicates far more of exec_check_permissions() 313 * than vn_open(). 314 */ 315 #ifdef MAC 316 error = mac_vnode_check_open(td->td_ucred, vp, VREAD); 317 if (error) 318 goto cleanup; 319 #endif 320 error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL); 321 if (error) 322 goto cleanup; 323 324 /* Pull in executable header into exec_map */ 325 error = vm_mmap(exec_map, (vm_offset_t *)&a_out, PAGE_SIZE, 326 VM_PROT_READ, VM_PROT_READ, 0, OBJT_VNODE, vp, 0); 327 if (error) 328 goto cleanup; 329 330 /* Is it a Linux binary ? */ 331 if (((a_out->a_magic >> 16) & 0xff) != 0x64) { 332 error = ENOEXEC; 333 goto cleanup; 334 } 335 336 /* 337 * While we are here, we should REALLY do some more checks 338 */ 339 340 /* Set file/virtual offset based on a.out variant. */ 341 switch ((int)(a_out->a_magic & 0xffff)) { 342 case 0413: /* ZMAGIC */ 343 file_offset = 1024; 344 break; 345 case 0314: /* QMAGIC */ 346 file_offset = 0; 347 break; 348 default: 349 error = ENOEXEC; 350 goto cleanup; 351 } 352 353 bss_size = round_page(a_out->a_bss); 354 355 /* Check various fields in header for validity/bounds. */ 356 if (a_out->a_text & PAGE_MASK || a_out->a_data & PAGE_MASK) { 357 error = ENOEXEC; 358 goto cleanup; 359 } 360 361 /* text + data can't exceed file size */ 362 if (a_out->a_data + a_out->a_text > attr.va_size) { 363 error = EFAULT; 364 goto cleanup; 365 } 366 367 /* 368 * text/data/bss must not exceed limits 369 * XXX - this is not complete. it should check current usage PLUS 370 * the resources needed by this library. 371 */ 372 PROC_LOCK(td->td_proc); 373 if (a_out->a_text > maxtsiz || 374 a_out->a_data + bss_size > lim_cur(td->td_proc, RLIMIT_DATA) || 375 racct_set(td->td_proc, RACCT_DATA, a_out->a_data + 376 bss_size) != 0) { 377 PROC_UNLOCK(td->td_proc); 378 error = ENOMEM; 379 goto cleanup; 380 } 381 PROC_UNLOCK(td->td_proc); 382 383 /* 384 * Prevent more writers. 385 * XXX: Note that if any of the VM operations fail below we don't 386 * clear this flag. 387 */ 388 VOP_SET_TEXT(vp); 389 390 /* 391 * Lock no longer needed 392 */ 393 locked = 0; 394 VOP_UNLOCK(vp, 0); 395 396 /* 397 * Check if file_offset page aligned. Currently we cannot handle 398 * misalinged file offsets, and so we read in the entire image 399 * (what a waste). 400 */ 401 if (file_offset & PAGE_MASK) { 402 #ifdef DEBUG 403 printf("uselib: Non page aligned binary %lu\n", file_offset); 404 #endif 405 /* Map text+data read/write/execute */ 406 407 /* a_entry is the load address and is page aligned */ 408 vmaddr = trunc_page(a_out->a_entry); 409 410 /* get anon user mapping, read+write+execute */ 411 error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0, 412 &vmaddr, a_out->a_text + a_out->a_data, 0, VMFS_NO_SPACE, 413 VM_PROT_ALL, VM_PROT_ALL, 0); 414 if (error) 415 goto cleanup; 416 417 error = vn_rdwr(UIO_READ, vp, (void *)vmaddr, file_offset, 418 a_out->a_text + a_out->a_data, UIO_USERSPACE, 0, 419 td->td_ucred, NOCRED, &aresid, td); 420 if (error != 0) 421 goto cleanup; 422 if (aresid != 0) { 423 error = ENOEXEC; 424 goto cleanup; 425 } 426 } else { 427 #ifdef DEBUG 428 printf("uselib: Page aligned binary %lu\n", file_offset); 429 #endif 430 /* 431 * for QMAGIC, a_entry is 20 bytes beyond the load address 432 * to skip the executable header 433 */ 434 vmaddr = trunc_page(a_out->a_entry); 435 436 /* 437 * Map it all into the process's space as a single 438 * copy-on-write "data" segment. 439 */ 440 error = vm_mmap(&td->td_proc->p_vmspace->vm_map, &vmaddr, 441 a_out->a_text + a_out->a_data, VM_PROT_ALL, VM_PROT_ALL, 442 MAP_PRIVATE | MAP_FIXED, OBJT_VNODE, vp, file_offset); 443 if (error) 444 goto cleanup; 445 } 446 #ifdef DEBUG 447 printf("mem=%08lx = %08lx %08lx\n", (long)vmaddr, ((long *)vmaddr)[0], 448 ((long *)vmaddr)[1]); 449 #endif 450 if (bss_size != 0) { 451 /* Calculate BSS start address */ 452 vmaddr = trunc_page(a_out->a_entry) + a_out->a_text + 453 a_out->a_data; 454 455 /* allocate some 'anon' space */ 456 error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0, 457 &vmaddr, bss_size, 0, VMFS_NO_SPACE, VM_PROT_ALL, 458 VM_PROT_ALL, 0); 459 if (error) 460 goto cleanup; 461 } 462 463 cleanup: 464 /* Unlock vnode if needed */ 465 if (locked) 466 VOP_UNLOCK(vp, 0); 467 468 /* Release the temporary mapping. */ 469 if (a_out) 470 kmap_free_wakeup(exec_map, (vm_offset_t)a_out, PAGE_SIZE); 471 472 return (error); 473 } 474 475 #endif /* __i386__ */ 476 477 int 478 linux_select(struct thread *td, struct linux_select_args *args) 479 { 480 l_timeval ltv; 481 struct timeval tv0, tv1, utv, *tvp; 482 int error; 483 484 #ifdef DEBUG 485 if (ldebug(select)) 486 printf(ARGS(select, "%d, %p, %p, %p, %p"), args->nfds, 487 (void *)args->readfds, (void *)args->writefds, 488 (void *)args->exceptfds, (void *)args->timeout); 489 #endif 490 491 /* 492 * Store current time for computation of the amount of 493 * time left. 494 */ 495 if (args->timeout) { 496 if ((error = copyin(args->timeout, <v, sizeof(ltv)))) 497 goto select_out; 498 utv.tv_sec = ltv.tv_sec; 499 utv.tv_usec = ltv.tv_usec; 500 #ifdef DEBUG 501 if (ldebug(select)) 502 printf(LMSG("incoming timeout (%jd/%ld)"), 503 (intmax_t)utv.tv_sec, utv.tv_usec); 504 #endif 505 506 if (itimerfix(&utv)) { 507 /* 508 * The timeval was invalid. Convert it to something 509 * valid that will act as it does under Linux. 510 */ 511 utv.tv_sec += utv.tv_usec / 1000000; 512 utv.tv_usec %= 1000000; 513 if (utv.tv_usec < 0) { 514 utv.tv_sec -= 1; 515 utv.tv_usec += 1000000; 516 } 517 if (utv.tv_sec < 0) 518 timevalclear(&utv); 519 } 520 microtime(&tv0); 521 tvp = &utv; 522 } else 523 tvp = NULL; 524 525 error = kern_select(td, args->nfds, args->readfds, args->writefds, 526 args->exceptfds, tvp, sizeof(l_int) * 8); 527 528 #ifdef DEBUG 529 if (ldebug(select)) 530 printf(LMSG("real select returns %d"), error); 531 #endif 532 if (error) 533 goto select_out; 534 535 if (args->timeout) { 536 if (td->td_retval[0]) { 537 /* 538 * Compute how much time was left of the timeout, 539 * by subtracting the current time and the time 540 * before we started the call, and subtracting 541 * that result from the user-supplied value. 542 */ 543 microtime(&tv1); 544 timevalsub(&tv1, &tv0); 545 timevalsub(&utv, &tv1); 546 if (utv.tv_sec < 0) 547 timevalclear(&utv); 548 } else 549 timevalclear(&utv); 550 #ifdef DEBUG 551 if (ldebug(select)) 552 printf(LMSG("outgoing timeout (%jd/%ld)"), 553 (intmax_t)utv.tv_sec, utv.tv_usec); 554 #endif 555 ltv.tv_sec = utv.tv_sec; 556 ltv.tv_usec = utv.tv_usec; 557 if ((error = copyout(<v, args->timeout, sizeof(ltv)))) 558 goto select_out; 559 } 560 561 select_out: 562 #ifdef DEBUG 563 if (ldebug(select)) 564 printf(LMSG("select_out -> %d"), error); 565 #endif 566 return (error); 567 } 568 569 int 570 linux_mremap(struct thread *td, struct linux_mremap_args *args) 571 { 572 struct munmap_args /* { 573 void *addr; 574 size_t len; 575 } */ bsd_args; 576 int error = 0; 577 578 #ifdef DEBUG 579 if (ldebug(mremap)) 580 printf(ARGS(mremap, "%p, %08lx, %08lx, %08lx"), 581 (void *)(uintptr_t)args->addr, 582 (unsigned long)args->old_len, 583 (unsigned long)args->new_len, 584 (unsigned long)args->flags); 585 #endif 586 587 if (args->flags & ~(LINUX_MREMAP_FIXED | LINUX_MREMAP_MAYMOVE)) { 588 td->td_retval[0] = 0; 589 return (EINVAL); 590 } 591 592 /* 593 * Check for the page alignment. 594 * Linux defines PAGE_MASK to be FreeBSD ~PAGE_MASK. 595 */ 596 if (args->addr & PAGE_MASK) { 597 td->td_retval[0] = 0; 598 return (EINVAL); 599 } 600 601 args->new_len = round_page(args->new_len); 602 args->old_len = round_page(args->old_len); 603 604 if (args->new_len > args->old_len) { 605 td->td_retval[0] = 0; 606 return (ENOMEM); 607 } 608 609 if (args->new_len < args->old_len) { 610 bsd_args.addr = 611 (caddr_t)((uintptr_t)args->addr + args->new_len); 612 bsd_args.len = args->old_len - args->new_len; 613 error = sys_munmap(td, &bsd_args); 614 } 615 616 td->td_retval[0] = error ? 0 : (uintptr_t)args->addr; 617 return (error); 618 } 619 620 #define LINUX_MS_ASYNC 0x0001 621 #define LINUX_MS_INVALIDATE 0x0002 622 #define LINUX_MS_SYNC 0x0004 623 624 int 625 linux_msync(struct thread *td, struct linux_msync_args *args) 626 { 627 struct msync_args bsd_args; 628 629 bsd_args.addr = (caddr_t)(uintptr_t)args->addr; 630 bsd_args.len = (uintptr_t)args->len; 631 bsd_args.flags = args->fl & ~LINUX_MS_SYNC; 632 633 return (sys_msync(td, &bsd_args)); 634 } 635 636 int 637 linux_time(struct thread *td, struct linux_time_args *args) 638 { 639 struct timeval tv; 640 l_time_t tm; 641 int error; 642 643 #ifdef DEBUG 644 if (ldebug(time)) 645 printf(ARGS(time, "*")); 646 #endif 647 648 microtime(&tv); 649 tm = tv.tv_sec; 650 if (args->tm && (error = copyout(&tm, args->tm, sizeof(tm)))) 651 return (error); 652 td->td_retval[0] = tm; 653 return (0); 654 } 655 656 struct l_times_argv { 657 l_clock_t tms_utime; 658 l_clock_t tms_stime; 659 l_clock_t tms_cutime; 660 l_clock_t tms_cstime; 661 }; 662 663 664 /* 665 * Glibc versions prior to 2.2.1 always use hard-coded CLK_TCK value. 666 * Since 2.2.1 Glibc uses value exported from kernel via AT_CLKTCK 667 * auxiliary vector entry. 668 */ 669 #define CLK_TCK 100 670 671 #define CONVOTCK(r) (r.tv_sec * CLK_TCK + r.tv_usec / (1000000 / CLK_TCK)) 672 #define CONVNTCK(r) (r.tv_sec * stclohz + r.tv_usec / (1000000 / stclohz)) 673 674 #define CONVTCK(r) (linux_kernver(td) >= LINUX_KERNVER_2004000 ? \ 675 CONVNTCK(r) : CONVOTCK(r)) 676 677 int 678 linux_times(struct thread *td, struct linux_times_args *args) 679 { 680 struct timeval tv, utime, stime, cutime, cstime; 681 struct l_times_argv tms; 682 struct proc *p; 683 int error; 684 685 #ifdef DEBUG 686 if (ldebug(times)) 687 printf(ARGS(times, "*")); 688 #endif 689 690 if (args->buf != NULL) { 691 p = td->td_proc; 692 PROC_LOCK(p); 693 PROC_SLOCK(p); 694 calcru(p, &utime, &stime); 695 PROC_SUNLOCK(p); 696 calccru(p, &cutime, &cstime); 697 PROC_UNLOCK(p); 698 699 tms.tms_utime = CONVTCK(utime); 700 tms.tms_stime = CONVTCK(stime); 701 702 tms.tms_cutime = CONVTCK(cutime); 703 tms.tms_cstime = CONVTCK(cstime); 704 705 if ((error = copyout(&tms, args->buf, sizeof(tms)))) 706 return (error); 707 } 708 709 microuptime(&tv); 710 td->td_retval[0] = (int)CONVTCK(tv); 711 return (0); 712 } 713 714 int 715 linux_newuname(struct thread *td, struct linux_newuname_args *args) 716 { 717 struct l_new_utsname utsname; 718 char osname[LINUX_MAX_UTSNAME]; 719 char osrelease[LINUX_MAX_UTSNAME]; 720 char *p; 721 722 #ifdef DEBUG 723 if (ldebug(newuname)) 724 printf(ARGS(newuname, "*")); 725 #endif 726 727 linux_get_osname(td, osname); 728 linux_get_osrelease(td, osrelease); 729 730 bzero(&utsname, sizeof(utsname)); 731 strlcpy(utsname.sysname, osname, LINUX_MAX_UTSNAME); 732 getcredhostname(td->td_ucred, utsname.nodename, LINUX_MAX_UTSNAME); 733 getcreddomainname(td->td_ucred, utsname.domainname, LINUX_MAX_UTSNAME); 734 strlcpy(utsname.release, osrelease, LINUX_MAX_UTSNAME); 735 strlcpy(utsname.version, version, LINUX_MAX_UTSNAME); 736 for (p = utsname.version; *p != '\0'; ++p) 737 if (*p == '\n') { 738 *p = '\0'; 739 break; 740 } 741 strlcpy(utsname.machine, linux_platform, LINUX_MAX_UTSNAME); 742 743 return (copyout(&utsname, args->buf, sizeof(utsname))); 744 } 745 746 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 747 struct l_utimbuf { 748 l_time_t l_actime; 749 l_time_t l_modtime; 750 }; 751 752 int 753 linux_utime(struct thread *td, struct linux_utime_args *args) 754 { 755 struct timeval tv[2], *tvp; 756 struct l_utimbuf lut; 757 char *fname; 758 int error; 759 760 LCONVPATHEXIST(td, args->fname, &fname); 761 762 #ifdef DEBUG 763 if (ldebug(utime)) 764 printf(ARGS(utime, "%s, *"), fname); 765 #endif 766 767 if (args->times) { 768 if ((error = copyin(args->times, &lut, sizeof lut))) { 769 LFREEPATH(fname); 770 return (error); 771 } 772 tv[0].tv_sec = lut.l_actime; 773 tv[0].tv_usec = 0; 774 tv[1].tv_sec = lut.l_modtime; 775 tv[1].tv_usec = 0; 776 tvp = tv; 777 } else 778 tvp = NULL; 779 780 error = kern_utimes(td, fname, UIO_SYSSPACE, tvp, UIO_SYSSPACE); 781 LFREEPATH(fname); 782 return (error); 783 } 784 785 int 786 linux_utimes(struct thread *td, struct linux_utimes_args *args) 787 { 788 l_timeval ltv[2]; 789 struct timeval tv[2], *tvp = NULL; 790 char *fname; 791 int error; 792 793 LCONVPATHEXIST(td, args->fname, &fname); 794 795 #ifdef DEBUG 796 if (ldebug(utimes)) 797 printf(ARGS(utimes, "%s, *"), fname); 798 #endif 799 800 if (args->tptr != NULL) { 801 if ((error = copyin(args->tptr, ltv, sizeof ltv))) { 802 LFREEPATH(fname); 803 return (error); 804 } 805 tv[0].tv_sec = ltv[0].tv_sec; 806 tv[0].tv_usec = ltv[0].tv_usec; 807 tv[1].tv_sec = ltv[1].tv_sec; 808 tv[1].tv_usec = ltv[1].tv_usec; 809 tvp = tv; 810 } 811 812 error = kern_utimes(td, fname, UIO_SYSSPACE, tvp, UIO_SYSSPACE); 813 LFREEPATH(fname); 814 return (error); 815 } 816 817 int 818 linux_futimesat(struct thread *td, struct linux_futimesat_args *args) 819 { 820 l_timeval ltv[2]; 821 struct timeval tv[2], *tvp = NULL; 822 char *fname; 823 int error, dfd; 824 825 dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd; 826 LCONVPATHEXIST_AT(td, args->filename, &fname, dfd); 827 828 #ifdef DEBUG 829 if (ldebug(futimesat)) 830 printf(ARGS(futimesat, "%s, *"), fname); 831 #endif 832 833 if (args->utimes != NULL) { 834 if ((error = copyin(args->utimes, ltv, sizeof ltv))) { 835 LFREEPATH(fname); 836 return (error); 837 } 838 tv[0].tv_sec = ltv[0].tv_sec; 839 tv[0].tv_usec = ltv[0].tv_usec; 840 tv[1].tv_sec = ltv[1].tv_sec; 841 tv[1].tv_usec = ltv[1].tv_usec; 842 tvp = tv; 843 } 844 845 error = kern_utimesat(td, dfd, fname, UIO_SYSSPACE, tvp, UIO_SYSSPACE); 846 LFREEPATH(fname); 847 return (error); 848 } 849 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 850 851 int 852 linux_common_wait(struct thread *td, int pid, int *status, 853 int options, struct rusage *ru) 854 { 855 int error, tmpstat; 856 857 error = kern_wait(td, pid, &tmpstat, options, ru); 858 if (error) 859 return (error); 860 861 if (status) { 862 tmpstat &= 0xffff; 863 if (WIFSIGNALED(tmpstat)) 864 tmpstat = (tmpstat & 0xffffff80) | 865 BSD_TO_LINUX_SIGNAL(WTERMSIG(tmpstat)); 866 else if (WIFSTOPPED(tmpstat)) 867 tmpstat = (tmpstat & 0xffff00ff) | 868 (BSD_TO_LINUX_SIGNAL(WSTOPSIG(tmpstat)) << 8); 869 error = copyout(&tmpstat, status, sizeof(int)); 870 } 871 872 return (error); 873 } 874 875 int 876 linux_waitpid(struct thread *td, struct linux_waitpid_args *args) 877 { 878 int options; 879 880 #ifdef DEBUG 881 if (ldebug(waitpid)) 882 printf(ARGS(waitpid, "%d, %p, %d"), 883 args->pid, (void *)args->status, args->options); 884 #endif 885 /* 886 * this is necessary because the test in kern_wait doesn't work 887 * because we mess with the options here 888 */ 889 if (args->options & ~(WUNTRACED | WNOHANG | WCONTINUED | __WCLONE)) 890 return (EINVAL); 891 892 options = (args->options & (WNOHANG | WUNTRACED)); 893 /* WLINUXCLONE should be equal to __WCLONE, but we make sure */ 894 if (args->options & __WCLONE) 895 options |= WLINUXCLONE; 896 897 return (linux_common_wait(td, args->pid, args->status, options, NULL)); 898 } 899 900 901 int 902 linux_mknod(struct thread *td, struct linux_mknod_args *args) 903 { 904 char *path; 905 int error; 906 907 LCONVPATHCREAT(td, args->path, &path); 908 909 #ifdef DEBUG 910 if (ldebug(mknod)) 911 printf(ARGS(mknod, "%s, %d, %d"), path, args->mode, args->dev); 912 #endif 913 914 switch (args->mode & S_IFMT) { 915 case S_IFIFO: 916 case S_IFSOCK: 917 error = kern_mkfifo(td, path, UIO_SYSSPACE, args->mode); 918 break; 919 920 case S_IFCHR: 921 case S_IFBLK: 922 error = kern_mknod(td, path, UIO_SYSSPACE, args->mode, 923 args->dev); 924 break; 925 926 case S_IFDIR: 927 error = EPERM; 928 break; 929 930 case 0: 931 args->mode |= S_IFREG; 932 /* FALLTHROUGH */ 933 case S_IFREG: 934 error = kern_open(td, path, UIO_SYSSPACE, 935 O_WRONLY | O_CREAT | O_TRUNC, args->mode); 936 if (error == 0) 937 kern_close(td, td->td_retval[0]); 938 break; 939 940 default: 941 error = EINVAL; 942 break; 943 } 944 LFREEPATH(path); 945 return (error); 946 } 947 948 int 949 linux_mknodat(struct thread *td, struct linux_mknodat_args *args) 950 { 951 char *path; 952 int error, dfd; 953 954 dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd; 955 LCONVPATHCREAT_AT(td, args->filename, &path, dfd); 956 957 #ifdef DEBUG 958 if (ldebug(mknodat)) 959 printf(ARGS(mknodat, "%s, %d, %d"), path, args->mode, args->dev); 960 #endif 961 962 switch (args->mode & S_IFMT) { 963 case S_IFIFO: 964 case S_IFSOCK: 965 error = kern_mkfifoat(td, dfd, path, UIO_SYSSPACE, args->mode); 966 break; 967 968 case S_IFCHR: 969 case S_IFBLK: 970 error = kern_mknodat(td, dfd, path, UIO_SYSSPACE, args->mode, 971 args->dev); 972 break; 973 974 case S_IFDIR: 975 error = EPERM; 976 break; 977 978 case 0: 979 args->mode |= S_IFREG; 980 /* FALLTHROUGH */ 981 case S_IFREG: 982 error = kern_openat(td, dfd, path, UIO_SYSSPACE, 983 O_WRONLY | O_CREAT | O_TRUNC, args->mode); 984 if (error == 0) 985 kern_close(td, td->td_retval[0]); 986 break; 987 988 default: 989 error = EINVAL; 990 break; 991 } 992 LFREEPATH(path); 993 return (error); 994 } 995 996 /* 997 * UGH! This is just about the dumbest idea I've ever heard!! 998 */ 999 int 1000 linux_personality(struct thread *td, struct linux_personality_args *args) 1001 { 1002 #ifdef DEBUG 1003 if (ldebug(personality)) 1004 printf(ARGS(personality, "%lu"), (unsigned long)args->per); 1005 #endif 1006 if (args->per != 0) 1007 return (EINVAL); 1008 1009 /* Yes Jim, it's still a Linux... */ 1010 td->td_retval[0] = 0; 1011 return (0); 1012 } 1013 1014 struct l_itimerval { 1015 l_timeval it_interval; 1016 l_timeval it_value; 1017 }; 1018 1019 #define B2L_ITIMERVAL(bip, lip) \ 1020 (bip)->it_interval.tv_sec = (lip)->it_interval.tv_sec; \ 1021 (bip)->it_interval.tv_usec = (lip)->it_interval.tv_usec; \ 1022 (bip)->it_value.tv_sec = (lip)->it_value.tv_sec; \ 1023 (bip)->it_value.tv_usec = (lip)->it_value.tv_usec; 1024 1025 int 1026 linux_setitimer(struct thread *td, struct linux_setitimer_args *uap) 1027 { 1028 int error; 1029 struct l_itimerval ls; 1030 struct itimerval aitv, oitv; 1031 1032 #ifdef DEBUG 1033 if (ldebug(setitimer)) 1034 printf(ARGS(setitimer, "%p, %p"), 1035 (void *)uap->itv, (void *)uap->oitv); 1036 #endif 1037 1038 if (uap->itv == NULL) { 1039 uap->itv = uap->oitv; 1040 return (linux_getitimer(td, (struct linux_getitimer_args *)uap)); 1041 } 1042 1043 error = copyin(uap->itv, &ls, sizeof(ls)); 1044 if (error != 0) 1045 return (error); 1046 B2L_ITIMERVAL(&aitv, &ls); 1047 #ifdef DEBUG 1048 if (ldebug(setitimer)) { 1049 printf("setitimer: value: sec: %jd, usec: %ld\n", 1050 (intmax_t)aitv.it_value.tv_sec, aitv.it_value.tv_usec); 1051 printf("setitimer: interval: sec: %jd, usec: %ld\n", 1052 (intmax_t)aitv.it_interval.tv_sec, aitv.it_interval.tv_usec); 1053 } 1054 #endif 1055 error = kern_setitimer(td, uap->which, &aitv, &oitv); 1056 if (error != 0 || uap->oitv == NULL) 1057 return (error); 1058 B2L_ITIMERVAL(&ls, &oitv); 1059 1060 return (copyout(&ls, uap->oitv, sizeof(ls))); 1061 } 1062 1063 int 1064 linux_getitimer(struct thread *td, struct linux_getitimer_args *uap) 1065 { 1066 int error; 1067 struct l_itimerval ls; 1068 struct itimerval aitv; 1069 1070 #ifdef DEBUG 1071 if (ldebug(getitimer)) 1072 printf(ARGS(getitimer, "%p"), (void *)uap->itv); 1073 #endif 1074 error = kern_getitimer(td, uap->which, &aitv); 1075 if (error != 0) 1076 return (error); 1077 B2L_ITIMERVAL(&ls, &aitv); 1078 return (copyout(&ls, uap->itv, sizeof(ls))); 1079 } 1080 1081 int 1082 linux_nice(struct thread *td, struct linux_nice_args *args) 1083 { 1084 struct setpriority_args bsd_args; 1085 1086 bsd_args.which = PRIO_PROCESS; 1087 bsd_args.who = 0; /* current process */ 1088 bsd_args.prio = args->inc; 1089 return (sys_setpriority(td, &bsd_args)); 1090 } 1091 1092 int 1093 linux_setgroups(struct thread *td, struct linux_setgroups_args *args) 1094 { 1095 struct ucred *newcred, *oldcred; 1096 l_gid_t *linux_gidset; 1097 gid_t *bsd_gidset; 1098 int ngrp, error; 1099 struct proc *p; 1100 1101 ngrp = args->gidsetsize; 1102 if (ngrp < 0 || ngrp >= ngroups_max + 1) 1103 return (EINVAL); 1104 linux_gidset = malloc(ngrp * sizeof(*linux_gidset), M_TEMP, M_WAITOK); 1105 error = copyin(args->grouplist, linux_gidset, ngrp * sizeof(l_gid_t)); 1106 if (error) 1107 goto out; 1108 newcred = crget(); 1109 p = td->td_proc; 1110 PROC_LOCK(p); 1111 oldcred = crcopysafe(p, newcred); 1112 1113 /* 1114 * cr_groups[0] holds egid. Setting the whole set from 1115 * the supplied set will cause egid to be changed too. 1116 * Keep cr_groups[0] unchanged to prevent that. 1117 */ 1118 1119 if ((error = priv_check_cred(oldcred, PRIV_CRED_SETGROUPS, 0)) != 0) { 1120 PROC_UNLOCK(p); 1121 crfree(newcred); 1122 goto out; 1123 } 1124 1125 if (ngrp > 0) { 1126 newcred->cr_ngroups = ngrp + 1; 1127 1128 bsd_gidset = newcred->cr_groups; 1129 ngrp--; 1130 while (ngrp >= 0) { 1131 bsd_gidset[ngrp + 1] = linux_gidset[ngrp]; 1132 ngrp--; 1133 } 1134 } else 1135 newcred->cr_ngroups = 1; 1136 1137 setsugid(p); 1138 p->p_ucred = newcred; 1139 PROC_UNLOCK(p); 1140 crfree(oldcred); 1141 error = 0; 1142 out: 1143 free(linux_gidset, M_TEMP); 1144 return (error); 1145 } 1146 1147 int 1148 linux_getgroups(struct thread *td, struct linux_getgroups_args *args) 1149 { 1150 struct ucred *cred; 1151 l_gid_t *linux_gidset; 1152 gid_t *bsd_gidset; 1153 int bsd_gidsetsz, ngrp, error; 1154 1155 cred = td->td_ucred; 1156 bsd_gidset = cred->cr_groups; 1157 bsd_gidsetsz = cred->cr_ngroups - 1; 1158 1159 /* 1160 * cr_groups[0] holds egid. Returning the whole set 1161 * here will cause a duplicate. Exclude cr_groups[0] 1162 * to prevent that. 1163 */ 1164 1165 if ((ngrp = args->gidsetsize) == 0) { 1166 td->td_retval[0] = bsd_gidsetsz; 1167 return (0); 1168 } 1169 1170 if (ngrp < bsd_gidsetsz) 1171 return (EINVAL); 1172 1173 ngrp = 0; 1174 linux_gidset = malloc(bsd_gidsetsz * sizeof(*linux_gidset), 1175 M_TEMP, M_WAITOK); 1176 while (ngrp < bsd_gidsetsz) { 1177 linux_gidset[ngrp] = bsd_gidset[ngrp + 1]; 1178 ngrp++; 1179 } 1180 1181 error = copyout(linux_gidset, args->grouplist, ngrp * sizeof(l_gid_t)); 1182 free(linux_gidset, M_TEMP); 1183 if (error) 1184 return (error); 1185 1186 td->td_retval[0] = ngrp; 1187 return (0); 1188 } 1189 1190 int 1191 linux_setrlimit(struct thread *td, struct linux_setrlimit_args *args) 1192 { 1193 struct rlimit bsd_rlim; 1194 struct l_rlimit rlim; 1195 u_int which; 1196 int error; 1197 1198 #ifdef DEBUG 1199 if (ldebug(setrlimit)) 1200 printf(ARGS(setrlimit, "%d, %p"), 1201 args->resource, (void *)args->rlim); 1202 #endif 1203 1204 if (args->resource >= LINUX_RLIM_NLIMITS) 1205 return (EINVAL); 1206 1207 which = linux_to_bsd_resource[args->resource]; 1208 if (which == -1) 1209 return (EINVAL); 1210 1211 error = copyin(args->rlim, &rlim, sizeof(rlim)); 1212 if (error) 1213 return (error); 1214 1215 bsd_rlim.rlim_cur = (rlim_t)rlim.rlim_cur; 1216 bsd_rlim.rlim_max = (rlim_t)rlim.rlim_max; 1217 return (kern_setrlimit(td, which, &bsd_rlim)); 1218 } 1219 1220 int 1221 linux_old_getrlimit(struct thread *td, struct linux_old_getrlimit_args *args) 1222 { 1223 struct l_rlimit rlim; 1224 struct proc *p = td->td_proc; 1225 struct rlimit bsd_rlim; 1226 u_int which; 1227 1228 #ifdef DEBUG 1229 if (ldebug(old_getrlimit)) 1230 printf(ARGS(old_getrlimit, "%d, %p"), 1231 args->resource, (void *)args->rlim); 1232 #endif 1233 1234 if (args->resource >= LINUX_RLIM_NLIMITS) 1235 return (EINVAL); 1236 1237 which = linux_to_bsd_resource[args->resource]; 1238 if (which == -1) 1239 return (EINVAL); 1240 1241 PROC_LOCK(p); 1242 lim_rlimit(p, which, &bsd_rlim); 1243 PROC_UNLOCK(p); 1244 1245 #ifdef COMPAT_LINUX32 1246 rlim.rlim_cur = (unsigned int)bsd_rlim.rlim_cur; 1247 if (rlim.rlim_cur == UINT_MAX) 1248 rlim.rlim_cur = INT_MAX; 1249 rlim.rlim_max = (unsigned int)bsd_rlim.rlim_max; 1250 if (rlim.rlim_max == UINT_MAX) 1251 rlim.rlim_max = INT_MAX; 1252 #else 1253 rlim.rlim_cur = (unsigned long)bsd_rlim.rlim_cur; 1254 if (rlim.rlim_cur == ULONG_MAX) 1255 rlim.rlim_cur = LONG_MAX; 1256 rlim.rlim_max = (unsigned long)bsd_rlim.rlim_max; 1257 if (rlim.rlim_max == ULONG_MAX) 1258 rlim.rlim_max = LONG_MAX; 1259 #endif 1260 return (copyout(&rlim, args->rlim, sizeof(rlim))); 1261 } 1262 1263 int 1264 linux_getrlimit(struct thread *td, struct linux_getrlimit_args *args) 1265 { 1266 struct l_rlimit rlim; 1267 struct proc *p = td->td_proc; 1268 struct rlimit bsd_rlim; 1269 u_int which; 1270 1271 #ifdef DEBUG 1272 if (ldebug(getrlimit)) 1273 printf(ARGS(getrlimit, "%d, %p"), 1274 args->resource, (void *)args->rlim); 1275 #endif 1276 1277 if (args->resource >= LINUX_RLIM_NLIMITS) 1278 return (EINVAL); 1279 1280 which = linux_to_bsd_resource[args->resource]; 1281 if (which == -1) 1282 return (EINVAL); 1283 1284 PROC_LOCK(p); 1285 lim_rlimit(p, which, &bsd_rlim); 1286 PROC_UNLOCK(p); 1287 1288 rlim.rlim_cur = (l_ulong)bsd_rlim.rlim_cur; 1289 rlim.rlim_max = (l_ulong)bsd_rlim.rlim_max; 1290 return (copyout(&rlim, args->rlim, sizeof(rlim))); 1291 } 1292 1293 int 1294 linux_sched_setscheduler(struct thread *td, 1295 struct linux_sched_setscheduler_args *args) 1296 { 1297 struct sched_setscheduler_args bsd; 1298 1299 #ifdef DEBUG 1300 if (ldebug(sched_setscheduler)) 1301 printf(ARGS(sched_setscheduler, "%d, %d, %p"), 1302 args->pid, args->policy, (const void *)args->param); 1303 #endif 1304 1305 switch (args->policy) { 1306 case LINUX_SCHED_OTHER: 1307 bsd.policy = SCHED_OTHER; 1308 break; 1309 case LINUX_SCHED_FIFO: 1310 bsd.policy = SCHED_FIFO; 1311 break; 1312 case LINUX_SCHED_RR: 1313 bsd.policy = SCHED_RR; 1314 break; 1315 default: 1316 return (EINVAL); 1317 } 1318 1319 bsd.pid = args->pid; 1320 bsd.param = (struct sched_param *)args->param; 1321 return (sys_sched_setscheduler(td, &bsd)); 1322 } 1323 1324 int 1325 linux_sched_getscheduler(struct thread *td, 1326 struct linux_sched_getscheduler_args *args) 1327 { 1328 struct sched_getscheduler_args bsd; 1329 int error; 1330 1331 #ifdef DEBUG 1332 if (ldebug(sched_getscheduler)) 1333 printf(ARGS(sched_getscheduler, "%d"), args->pid); 1334 #endif 1335 1336 bsd.pid = args->pid; 1337 error = sys_sched_getscheduler(td, &bsd); 1338 1339 switch (td->td_retval[0]) { 1340 case SCHED_OTHER: 1341 td->td_retval[0] = LINUX_SCHED_OTHER; 1342 break; 1343 case SCHED_FIFO: 1344 td->td_retval[0] = LINUX_SCHED_FIFO; 1345 break; 1346 case SCHED_RR: 1347 td->td_retval[0] = LINUX_SCHED_RR; 1348 break; 1349 } 1350 1351 return (error); 1352 } 1353 1354 int 1355 linux_sched_get_priority_max(struct thread *td, 1356 struct linux_sched_get_priority_max_args *args) 1357 { 1358 struct sched_get_priority_max_args bsd; 1359 1360 #ifdef DEBUG 1361 if (ldebug(sched_get_priority_max)) 1362 printf(ARGS(sched_get_priority_max, "%d"), args->policy); 1363 #endif 1364 1365 switch (args->policy) { 1366 case LINUX_SCHED_OTHER: 1367 bsd.policy = SCHED_OTHER; 1368 break; 1369 case LINUX_SCHED_FIFO: 1370 bsd.policy = SCHED_FIFO; 1371 break; 1372 case LINUX_SCHED_RR: 1373 bsd.policy = SCHED_RR; 1374 break; 1375 default: 1376 return (EINVAL); 1377 } 1378 return (sys_sched_get_priority_max(td, &bsd)); 1379 } 1380 1381 int 1382 linux_sched_get_priority_min(struct thread *td, 1383 struct linux_sched_get_priority_min_args *args) 1384 { 1385 struct sched_get_priority_min_args bsd; 1386 1387 #ifdef DEBUG 1388 if (ldebug(sched_get_priority_min)) 1389 printf(ARGS(sched_get_priority_min, "%d"), args->policy); 1390 #endif 1391 1392 switch (args->policy) { 1393 case LINUX_SCHED_OTHER: 1394 bsd.policy = SCHED_OTHER; 1395 break; 1396 case LINUX_SCHED_FIFO: 1397 bsd.policy = SCHED_FIFO; 1398 break; 1399 case LINUX_SCHED_RR: 1400 bsd.policy = SCHED_RR; 1401 break; 1402 default: 1403 return (EINVAL); 1404 } 1405 return (sys_sched_get_priority_min(td, &bsd)); 1406 } 1407 1408 #define REBOOT_CAD_ON 0x89abcdef 1409 #define REBOOT_CAD_OFF 0 1410 #define REBOOT_HALT 0xcdef0123 1411 #define REBOOT_RESTART 0x01234567 1412 #define REBOOT_RESTART2 0xA1B2C3D4 1413 #define REBOOT_POWEROFF 0x4321FEDC 1414 #define REBOOT_MAGIC1 0xfee1dead 1415 #define REBOOT_MAGIC2 0x28121969 1416 #define REBOOT_MAGIC2A 0x05121996 1417 #define REBOOT_MAGIC2B 0x16041998 1418 1419 int 1420 linux_reboot(struct thread *td, struct linux_reboot_args *args) 1421 { 1422 struct reboot_args bsd_args; 1423 1424 #ifdef DEBUG 1425 if (ldebug(reboot)) 1426 printf(ARGS(reboot, "0x%x"), args->cmd); 1427 #endif 1428 1429 if (args->magic1 != REBOOT_MAGIC1) 1430 return (EINVAL); 1431 1432 switch (args->magic2) { 1433 case REBOOT_MAGIC2: 1434 case REBOOT_MAGIC2A: 1435 case REBOOT_MAGIC2B: 1436 break; 1437 default: 1438 return (EINVAL); 1439 } 1440 1441 switch (args->cmd) { 1442 case REBOOT_CAD_ON: 1443 case REBOOT_CAD_OFF: 1444 return (priv_check(td, PRIV_REBOOT)); 1445 case REBOOT_HALT: 1446 bsd_args.opt = RB_HALT; 1447 break; 1448 case REBOOT_RESTART: 1449 case REBOOT_RESTART2: 1450 bsd_args.opt = 0; 1451 break; 1452 case REBOOT_POWEROFF: 1453 bsd_args.opt = RB_POWEROFF; 1454 break; 1455 default: 1456 return (EINVAL); 1457 } 1458 return (sys_reboot(td, &bsd_args)); 1459 } 1460 1461 1462 /* 1463 * The FreeBSD native getpid(2), getgid(2) and getuid(2) also modify 1464 * td->td_retval[1] when COMPAT_43 is defined. This clobbers registers that 1465 * are assumed to be preserved. The following lightweight syscalls fixes 1466 * this. See also linux_getgid16() and linux_getuid16() in linux_uid16.c 1467 * 1468 * linux_getpid() - MP SAFE 1469 * linux_getgid() - MP SAFE 1470 * linux_getuid() - MP SAFE 1471 */ 1472 1473 int 1474 linux_getpid(struct thread *td, struct linux_getpid_args *args) 1475 { 1476 struct linux_emuldata *em; 1477 1478 #ifdef DEBUG 1479 if (ldebug(getpid)) 1480 printf(ARGS(getpid, "")); 1481 #endif 1482 1483 if (linux_use26(td)) { 1484 em = em_find(td->td_proc, EMUL_DONTLOCK); 1485 KASSERT(em != NULL, ("getpid: emuldata not found.\n")); 1486 td->td_retval[0] = em->shared->group_pid; 1487 } else { 1488 td->td_retval[0] = td->td_proc->p_pid; 1489 } 1490 1491 return (0); 1492 } 1493 1494 int 1495 linux_gettid(struct thread *td, struct linux_gettid_args *args) 1496 { 1497 1498 #ifdef DEBUG 1499 if (ldebug(gettid)) 1500 printf(ARGS(gettid, "")); 1501 #endif 1502 1503 td->td_retval[0] = td->td_proc->p_pid; 1504 return (0); 1505 } 1506 1507 1508 int 1509 linux_getppid(struct thread *td, struct linux_getppid_args *args) 1510 { 1511 struct linux_emuldata *em; 1512 struct proc *p, *pp; 1513 1514 #ifdef DEBUG 1515 if (ldebug(getppid)) 1516 printf(ARGS(getppid, "")); 1517 #endif 1518 1519 if (!linux_use26(td)) { 1520 PROC_LOCK(td->td_proc); 1521 td->td_retval[0] = td->td_proc->p_pptr->p_pid; 1522 PROC_UNLOCK(td->td_proc); 1523 return (0); 1524 } 1525 1526 em = em_find(td->td_proc, EMUL_DONTLOCK); 1527 1528 KASSERT(em != NULL, ("getppid: process emuldata not found.\n")); 1529 1530 /* find the group leader */ 1531 p = pfind(em->shared->group_pid); 1532 1533 if (p == NULL) { 1534 #ifdef DEBUG 1535 printf(LMSG("parent process not found.\n")); 1536 #endif 1537 return (0); 1538 } 1539 1540 pp = p->p_pptr; /* switch to parent */ 1541 PROC_LOCK(pp); 1542 PROC_UNLOCK(p); 1543 1544 /* if its also linux process */ 1545 if (pp->p_sysent == &elf_linux_sysvec) { 1546 em = em_find(pp, EMUL_DONTLOCK); 1547 KASSERT(em != NULL, ("getppid: parent emuldata not found.\n")); 1548 1549 td->td_retval[0] = em->shared->group_pid; 1550 } else 1551 td->td_retval[0] = pp->p_pid; 1552 1553 PROC_UNLOCK(pp); 1554 1555 return (0); 1556 } 1557 1558 int 1559 linux_getgid(struct thread *td, struct linux_getgid_args *args) 1560 { 1561 1562 #ifdef DEBUG 1563 if (ldebug(getgid)) 1564 printf(ARGS(getgid, "")); 1565 #endif 1566 1567 td->td_retval[0] = td->td_ucred->cr_rgid; 1568 return (0); 1569 } 1570 1571 int 1572 linux_getuid(struct thread *td, struct linux_getuid_args *args) 1573 { 1574 1575 #ifdef DEBUG 1576 if (ldebug(getuid)) 1577 printf(ARGS(getuid, "")); 1578 #endif 1579 1580 td->td_retval[0] = td->td_ucred->cr_ruid; 1581 return (0); 1582 } 1583 1584 1585 int 1586 linux_getsid(struct thread *td, struct linux_getsid_args *args) 1587 { 1588 struct getsid_args bsd; 1589 1590 #ifdef DEBUG 1591 if (ldebug(getsid)) 1592 printf(ARGS(getsid, "%i"), args->pid); 1593 #endif 1594 1595 bsd.pid = args->pid; 1596 return (sys_getsid(td, &bsd)); 1597 } 1598 1599 int 1600 linux_nosys(struct thread *td, struct nosys_args *ignore) 1601 { 1602 1603 return (ENOSYS); 1604 } 1605 1606 int 1607 linux_getpriority(struct thread *td, struct linux_getpriority_args *args) 1608 { 1609 struct getpriority_args bsd_args; 1610 int error; 1611 1612 #ifdef DEBUG 1613 if (ldebug(getpriority)) 1614 printf(ARGS(getpriority, "%i, %i"), args->which, args->who); 1615 #endif 1616 1617 bsd_args.which = args->which; 1618 bsd_args.who = args->who; 1619 error = sys_getpriority(td, &bsd_args); 1620 td->td_retval[0] = 20 - td->td_retval[0]; 1621 return (error); 1622 } 1623 1624 int 1625 linux_sethostname(struct thread *td, struct linux_sethostname_args *args) 1626 { 1627 int name[2]; 1628 1629 #ifdef DEBUG 1630 if (ldebug(sethostname)) 1631 printf(ARGS(sethostname, "*, %i"), args->len); 1632 #endif 1633 1634 name[0] = CTL_KERN; 1635 name[1] = KERN_HOSTNAME; 1636 return (userland_sysctl(td, name, 2, 0, 0, 0, args->hostname, 1637 args->len, 0, 0)); 1638 } 1639 1640 int 1641 linux_setdomainname(struct thread *td, struct linux_setdomainname_args *args) 1642 { 1643 int name[2]; 1644 1645 #ifdef DEBUG 1646 if (ldebug(setdomainname)) 1647 printf(ARGS(setdomainname, "*, %i"), args->len); 1648 #endif 1649 1650 name[0] = CTL_KERN; 1651 name[1] = KERN_NISDOMAINNAME; 1652 return (userland_sysctl(td, name, 2, 0, 0, 0, args->name, 1653 args->len, 0, 0)); 1654 } 1655 1656 int 1657 linux_exit_group(struct thread *td, struct linux_exit_group_args *args) 1658 { 1659 struct linux_emuldata *em; 1660 1661 #ifdef DEBUG 1662 if (ldebug(exit_group)) 1663 printf(ARGS(exit_group, "%i"), args->error_code); 1664 #endif 1665 1666 em = em_find(td->td_proc, EMUL_DONTLOCK); 1667 if (em->shared->refs > 1) { 1668 EMUL_SHARED_WLOCK(&emul_shared_lock); 1669 em->shared->flags |= EMUL_SHARED_HASXSTAT; 1670 em->shared->xstat = W_EXITCODE(args->error_code, 0); 1671 EMUL_SHARED_WUNLOCK(&emul_shared_lock); 1672 if (linux_use26(td)) 1673 linux_kill_threads(td, SIGKILL); 1674 } 1675 1676 /* 1677 * XXX: we should send a signal to the parent if 1678 * SIGNAL_EXIT_GROUP is set. We ignore that (temporarily?) 1679 * as it doesnt occur often. 1680 */ 1681 exit1(td, W_EXITCODE(args->error_code, 0)); 1682 1683 return (0); 1684 } 1685 1686 #define _LINUX_CAPABILITY_VERSION 0x19980330 1687 1688 struct l_user_cap_header { 1689 l_int version; 1690 l_int pid; 1691 }; 1692 1693 struct l_user_cap_data { 1694 l_int effective; 1695 l_int permitted; 1696 l_int inheritable; 1697 }; 1698 1699 int 1700 linux_capget(struct thread *td, struct linux_capget_args *args) 1701 { 1702 struct l_user_cap_header luch; 1703 struct l_user_cap_data lucd; 1704 int error; 1705 1706 if (args->hdrp == NULL) 1707 return (EFAULT); 1708 1709 error = copyin(args->hdrp, &luch, sizeof(luch)); 1710 if (error != 0) 1711 return (error); 1712 1713 if (luch.version != _LINUX_CAPABILITY_VERSION) { 1714 luch.version = _LINUX_CAPABILITY_VERSION; 1715 error = copyout(&luch, args->hdrp, sizeof(luch)); 1716 if (error) 1717 return (error); 1718 return (EINVAL); 1719 } 1720 1721 if (luch.pid) 1722 return (EPERM); 1723 1724 if (args->datap) { 1725 /* 1726 * The current implementation doesn't support setting 1727 * a capability (it's essentially a stub) so indicate 1728 * that no capabilities are currently set or available 1729 * to request. 1730 */ 1731 bzero (&lucd, sizeof(lucd)); 1732 error = copyout(&lucd, args->datap, sizeof(lucd)); 1733 } 1734 1735 return (error); 1736 } 1737 1738 int 1739 linux_capset(struct thread *td, struct linux_capset_args *args) 1740 { 1741 struct l_user_cap_header luch; 1742 struct l_user_cap_data lucd; 1743 int error; 1744 1745 if (args->hdrp == NULL || args->datap == NULL) 1746 return (EFAULT); 1747 1748 error = copyin(args->hdrp, &luch, sizeof(luch)); 1749 if (error != 0) 1750 return (error); 1751 1752 if (luch.version != _LINUX_CAPABILITY_VERSION) { 1753 luch.version = _LINUX_CAPABILITY_VERSION; 1754 error = copyout(&luch, args->hdrp, sizeof(luch)); 1755 if (error) 1756 return (error); 1757 return (EINVAL); 1758 } 1759 1760 if (luch.pid) 1761 return (EPERM); 1762 1763 error = copyin(args->datap, &lucd, sizeof(lucd)); 1764 if (error != 0) 1765 return (error); 1766 1767 /* We currently don't support setting any capabilities. */ 1768 if (lucd.effective || lucd.permitted || lucd.inheritable) { 1769 linux_msg(td, 1770 "capset effective=0x%x, permitted=0x%x, " 1771 "inheritable=0x%x is not implemented", 1772 (int)lucd.effective, (int)lucd.permitted, 1773 (int)lucd.inheritable); 1774 return (EPERM); 1775 } 1776 1777 return (0); 1778 } 1779 1780 int 1781 linux_prctl(struct thread *td, struct linux_prctl_args *args) 1782 { 1783 int error = 0, max_size; 1784 struct proc *p = td->td_proc; 1785 char comm[LINUX_MAX_COMM_LEN]; 1786 struct linux_emuldata *em; 1787 int pdeath_signal; 1788 1789 #ifdef DEBUG 1790 if (ldebug(prctl)) 1791 printf(ARGS(prctl, "%d, %d, %d, %d, %d"), args->option, 1792 args->arg2, args->arg3, args->arg4, args->arg5); 1793 #endif 1794 1795 switch (args->option) { 1796 case LINUX_PR_SET_PDEATHSIG: 1797 if (!LINUX_SIG_VALID(args->arg2)) 1798 return (EINVAL); 1799 em = em_find(p, EMUL_DOLOCK); 1800 KASSERT(em != NULL, ("prctl: emuldata not found.\n")); 1801 em->pdeath_signal = args->arg2; 1802 EMUL_UNLOCK(&emul_lock); 1803 break; 1804 case LINUX_PR_GET_PDEATHSIG: 1805 em = em_find(p, EMUL_DOLOCK); 1806 KASSERT(em != NULL, ("prctl: emuldata not found.\n")); 1807 pdeath_signal = em->pdeath_signal; 1808 EMUL_UNLOCK(&emul_lock); 1809 error = copyout(&pdeath_signal, 1810 (void *)(register_t)args->arg2, 1811 sizeof(pdeath_signal)); 1812 break; 1813 case LINUX_PR_GET_KEEPCAPS: 1814 /* 1815 * Indicate that we always clear the effective and 1816 * permitted capability sets when the user id becomes 1817 * non-zero (actually the capability sets are simply 1818 * always zero in the current implementation). 1819 */ 1820 td->td_retval[0] = 0; 1821 break; 1822 case LINUX_PR_SET_KEEPCAPS: 1823 /* 1824 * Ignore requests to keep the effective and permitted 1825 * capability sets when the user id becomes non-zero. 1826 */ 1827 break; 1828 case LINUX_PR_SET_NAME: 1829 /* 1830 * To be on the safe side we need to make sure to not 1831 * overflow the size a linux program expects. We already 1832 * do this here in the copyin, so that we don't need to 1833 * check on copyout. 1834 */ 1835 max_size = MIN(sizeof(comm), sizeof(p->p_comm)); 1836 error = copyinstr((void *)(register_t)args->arg2, comm, 1837 max_size, NULL); 1838 1839 /* Linux silently truncates the name if it is too long. */ 1840 if (error == ENAMETOOLONG) { 1841 /* 1842 * XXX: copyinstr() isn't documented to populate the 1843 * array completely, so do a copyin() to be on the 1844 * safe side. This should be changed in case 1845 * copyinstr() is changed to guarantee this. 1846 */ 1847 error = copyin((void *)(register_t)args->arg2, comm, 1848 max_size - 1); 1849 comm[max_size - 1] = '\0'; 1850 } 1851 if (error) 1852 return (error); 1853 1854 PROC_LOCK(p); 1855 strlcpy(p->p_comm, comm, sizeof(p->p_comm)); 1856 PROC_UNLOCK(p); 1857 break; 1858 case LINUX_PR_GET_NAME: 1859 PROC_LOCK(p); 1860 strlcpy(comm, p->p_comm, sizeof(comm)); 1861 PROC_UNLOCK(p); 1862 error = copyout(comm, (void *)(register_t)args->arg2, 1863 strlen(comm) + 1); 1864 break; 1865 default: 1866 error = EINVAL; 1867 break; 1868 } 1869 1870 return (error); 1871 } 1872 1873 /* 1874 * Get affinity of a process. 1875 */ 1876 int 1877 linux_sched_getaffinity(struct thread *td, 1878 struct linux_sched_getaffinity_args *args) 1879 { 1880 int error; 1881 struct cpuset_getaffinity_args cga; 1882 1883 #ifdef DEBUG 1884 if (ldebug(sched_getaffinity)) 1885 printf(ARGS(sched_getaffinity, "%d, %d, *"), args->pid, 1886 args->len); 1887 #endif 1888 if (args->len < sizeof(cpuset_t)) 1889 return (EINVAL); 1890 1891 cga.level = CPU_LEVEL_WHICH; 1892 cga.which = CPU_WHICH_PID; 1893 cga.id = args->pid; 1894 cga.cpusetsize = sizeof(cpuset_t); 1895 cga.mask = (cpuset_t *) args->user_mask_ptr; 1896 1897 if ((error = sys_cpuset_getaffinity(td, &cga)) == 0) 1898 td->td_retval[0] = sizeof(cpuset_t); 1899 1900 return (error); 1901 } 1902 1903 /* 1904 * Set affinity of a process. 1905 */ 1906 int 1907 linux_sched_setaffinity(struct thread *td, 1908 struct linux_sched_setaffinity_args *args) 1909 { 1910 struct cpuset_setaffinity_args csa; 1911 1912 #ifdef DEBUG 1913 if (ldebug(sched_setaffinity)) 1914 printf(ARGS(sched_setaffinity, "%d, %d, *"), args->pid, 1915 args->len); 1916 #endif 1917 if (args->len < sizeof(cpuset_t)) 1918 return (EINVAL); 1919 1920 csa.level = CPU_LEVEL_WHICH; 1921 csa.which = CPU_WHICH_PID; 1922 csa.id = args->pid; 1923 csa.cpusetsize = sizeof(cpuset_t); 1924 csa.mask = (cpuset_t *) args->user_mask_ptr; 1925 1926 return (sys_cpuset_setaffinity(td, &csa)); 1927 } 1928