1 /*- 2 * Copyright (c) 2002 Doug Rabson 3 * Copyright (c) 1994-1995 Søren Schmidt 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer 11 * in this position and unchanged. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of the author may not be used to endorse or promote products 16 * derived from this software without specific prior written permission 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_compat.h" 34 35 #include <sys/param.h> 36 #include <sys/blist.h> 37 #include <sys/fcntl.h> 38 #if defined(__i386__) 39 #include <sys/imgact_aout.h> 40 #endif 41 #include <sys/jail.h> 42 #include <sys/kernel.h> 43 #include <sys/limits.h> 44 #include <sys/lock.h> 45 #include <sys/malloc.h> 46 #include <sys/mman.h> 47 #include <sys/mount.h> 48 #include <sys/mutex.h> 49 #include <sys/namei.h> 50 #include <sys/priv.h> 51 #include <sys/proc.h> 52 #include <sys/reboot.h> 53 #include <sys/racct.h> 54 #include <sys/resourcevar.h> 55 #include <sys/sched.h> 56 #include <sys/sdt.h> 57 #include <sys/signalvar.h> 58 #include <sys/stat.h> 59 #include <sys/syscallsubr.h> 60 #include <sys/sysctl.h> 61 #include <sys/sysproto.h> 62 #include <sys/systm.h> 63 #include <sys/time.h> 64 #include <sys/vmmeter.h> 65 #include <sys/vnode.h> 66 #include <sys/wait.h> 67 #include <sys/cpuset.h> 68 69 #include <security/mac/mac_framework.h> 70 71 #include <vm/vm.h> 72 #include <vm/pmap.h> 73 #include <vm/vm_kern.h> 74 #include <vm/vm_map.h> 75 #include <vm/vm_extern.h> 76 #include <vm/vm_object.h> 77 #include <vm/swap_pager.h> 78 79 #ifdef COMPAT_LINUX32 80 #include <machine/../linux32/linux.h> 81 #include <machine/../linux32/linux32_proto.h> 82 #else 83 #include <machine/../linux/linux.h> 84 #include <machine/../linux/linux_proto.h> 85 #endif 86 87 #include <compat/linux/linux_dtrace.h> 88 #include <compat/linux/linux_file.h> 89 #include <compat/linux/linux_mib.h> 90 #include <compat/linux/linux_signal.h> 91 #include <compat/linux/linux_timer.h> 92 #include <compat/linux/linux_util.h> 93 #include <compat/linux/linux_sysproto.h> 94 #include <compat/linux/linux_emul.h> 95 #include <compat/linux/linux_misc.h> 96 97 /** 98 * Special DTrace provider for the linuxulator. 99 * 100 * In this file we define the provider for the entire linuxulator. All 101 * modules (= files of the linuxulator) use it. 102 * 103 * We define a different name depending on the emulated bitsize, see 104 * ../../<ARCH>/linux{,32}/linux.h, e.g.: 105 * native bitsize = linuxulator 106 * amd64, 32bit emulation = linuxulator32 107 */ 108 LIN_SDT_PROVIDER_DEFINE(LINUX_DTRACE); 109 110 int stclohz; /* Statistics clock frequency */ 111 112 static unsigned int linux_to_bsd_resource[LINUX_RLIM_NLIMITS] = { 113 RLIMIT_CPU, RLIMIT_FSIZE, RLIMIT_DATA, RLIMIT_STACK, 114 RLIMIT_CORE, RLIMIT_RSS, RLIMIT_NPROC, RLIMIT_NOFILE, 115 RLIMIT_MEMLOCK, RLIMIT_AS 116 }; 117 118 struct l_sysinfo { 119 l_long uptime; /* Seconds since boot */ 120 l_ulong loads[3]; /* 1, 5, and 15 minute load averages */ 121 #define LINUX_SYSINFO_LOADS_SCALE 65536 122 l_ulong totalram; /* Total usable main memory size */ 123 l_ulong freeram; /* Available memory size */ 124 l_ulong sharedram; /* Amount of shared memory */ 125 l_ulong bufferram; /* Memory used by buffers */ 126 l_ulong totalswap; /* Total swap space size */ 127 l_ulong freeswap; /* swap space still available */ 128 l_ushort procs; /* Number of current processes */ 129 l_ushort pads; 130 l_ulong totalbig; 131 l_ulong freebig; 132 l_uint mem_unit; 133 char _f[20-2*sizeof(l_long)-sizeof(l_int)]; /* padding */ 134 }; 135 136 struct l_pselect6arg { 137 l_uintptr_t ss; 138 l_size_t ss_len; 139 }; 140 141 static int linux_utimensat_nsec_valid(l_long); 142 143 144 int 145 linux_sysinfo(struct thread *td, struct linux_sysinfo_args *args) 146 { 147 struct l_sysinfo sysinfo; 148 vm_object_t object; 149 int i, j; 150 struct timespec ts; 151 152 getnanouptime(&ts); 153 if (ts.tv_nsec != 0) 154 ts.tv_sec++; 155 sysinfo.uptime = ts.tv_sec; 156 157 /* Use the information from the mib to get our load averages */ 158 for (i = 0; i < 3; i++) 159 sysinfo.loads[i] = averunnable.ldavg[i] * 160 LINUX_SYSINFO_LOADS_SCALE / averunnable.fscale; 161 162 sysinfo.totalram = physmem * PAGE_SIZE; 163 sysinfo.freeram = sysinfo.totalram - vm_cnt.v_wire_count * PAGE_SIZE; 164 165 sysinfo.sharedram = 0; 166 mtx_lock(&vm_object_list_mtx); 167 TAILQ_FOREACH(object, &vm_object_list, object_list) 168 if (object->shadow_count > 1) 169 sysinfo.sharedram += object->resident_page_count; 170 mtx_unlock(&vm_object_list_mtx); 171 172 sysinfo.sharedram *= PAGE_SIZE; 173 sysinfo.bufferram = 0; 174 175 swap_pager_status(&i, &j); 176 sysinfo.totalswap = i * PAGE_SIZE; 177 sysinfo.freeswap = (i - j) * PAGE_SIZE; 178 179 sysinfo.procs = nprocs; 180 181 /* The following are only present in newer Linux kernels. */ 182 sysinfo.totalbig = 0; 183 sysinfo.freebig = 0; 184 sysinfo.mem_unit = 1; 185 186 return (copyout(&sysinfo, args->info, sizeof(sysinfo))); 187 } 188 189 int 190 linux_alarm(struct thread *td, struct linux_alarm_args *args) 191 { 192 struct itimerval it, old_it; 193 u_int secs; 194 195 #ifdef DEBUG 196 if (ldebug(alarm)) 197 printf(ARGS(alarm, "%u"), args->secs); 198 #endif 199 200 secs = args->secs; 201 202 if (secs > INT_MAX) 203 secs = INT_MAX; 204 205 it.it_value.tv_sec = (long) secs; 206 it.it_value.tv_usec = 0; 207 it.it_interval.tv_sec = 0; 208 it.it_interval.tv_usec = 0; 209 /* 210 * According to POSIX and Linux implementation 211 * the alarm() system call is always successfull. 212 * Ignore errors and return 0 as a Linux does. 213 */ 214 kern_setitimer(td, ITIMER_REAL, &it, &old_it); 215 if (timevalisset(&old_it.it_value)) { 216 if (old_it.it_value.tv_usec != 0) 217 old_it.it_value.tv_sec++; 218 td->td_retval[0] = old_it.it_value.tv_sec; 219 } 220 return (0); 221 } 222 223 int 224 linux_brk(struct thread *td, struct linux_brk_args *args) 225 { 226 struct vmspace *vm = td->td_proc->p_vmspace; 227 vm_offset_t new, old; 228 struct obreak_args /* { 229 char * nsize; 230 } */ tmp; 231 232 #ifdef DEBUG 233 if (ldebug(brk)) 234 printf(ARGS(brk, "%p"), (void *)(uintptr_t)args->dsend); 235 #endif 236 old = (vm_offset_t)vm->vm_daddr + ctob(vm->vm_dsize); 237 new = (vm_offset_t)args->dsend; 238 tmp.nsize = (char *)new; 239 if (((caddr_t)new > vm->vm_daddr) && !sys_obreak(td, &tmp)) 240 td->td_retval[0] = (long)new; 241 else 242 td->td_retval[0] = (long)old; 243 244 return (0); 245 } 246 247 #if defined(__i386__) 248 /* XXX: what about amd64/linux32? */ 249 250 int 251 linux_uselib(struct thread *td, struct linux_uselib_args *args) 252 { 253 struct nameidata ni; 254 struct vnode *vp; 255 struct exec *a_out; 256 struct vattr attr; 257 vm_offset_t vmaddr; 258 unsigned long file_offset; 259 unsigned long bss_size; 260 char *library; 261 ssize_t aresid; 262 int error, locked, writecount; 263 264 LCONVPATHEXIST(td, args->library, &library); 265 266 #ifdef DEBUG 267 if (ldebug(uselib)) 268 printf(ARGS(uselib, "%s"), library); 269 #endif 270 271 a_out = NULL; 272 locked = 0; 273 vp = NULL; 274 275 NDINIT(&ni, LOOKUP, ISOPEN | FOLLOW | LOCKLEAF | AUDITVNODE1, 276 UIO_SYSSPACE, library, td); 277 error = namei(&ni); 278 LFREEPATH(library); 279 if (error) 280 goto cleanup; 281 282 vp = ni.ni_vp; 283 NDFREE(&ni, NDF_ONLY_PNBUF); 284 285 /* 286 * From here on down, we have a locked vnode that must be unlocked. 287 * XXX: The code below largely duplicates exec_check_permissions(). 288 */ 289 locked = 1; 290 291 /* Writable? */ 292 error = VOP_GET_WRITECOUNT(vp, &writecount); 293 if (error != 0) 294 goto cleanup; 295 if (writecount != 0) { 296 error = ETXTBSY; 297 goto cleanup; 298 } 299 300 /* Executable? */ 301 error = VOP_GETATTR(vp, &attr, td->td_ucred); 302 if (error) 303 goto cleanup; 304 305 if ((vp->v_mount->mnt_flag & MNT_NOEXEC) || 306 ((attr.va_mode & 0111) == 0) || (attr.va_type != VREG)) { 307 /* EACCESS is what exec(2) returns. */ 308 error = ENOEXEC; 309 goto cleanup; 310 } 311 312 /* Sensible size? */ 313 if (attr.va_size == 0) { 314 error = ENOEXEC; 315 goto cleanup; 316 } 317 318 /* Can we access it? */ 319 error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td); 320 if (error) 321 goto cleanup; 322 323 /* 324 * XXX: This should use vn_open() so that it is properly authorized, 325 * and to reduce code redundancy all over the place here. 326 * XXX: Not really, it duplicates far more of exec_check_permissions() 327 * than vn_open(). 328 */ 329 #ifdef MAC 330 error = mac_vnode_check_open(td->td_ucred, vp, VREAD); 331 if (error) 332 goto cleanup; 333 #endif 334 error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL); 335 if (error) 336 goto cleanup; 337 338 /* Pull in executable header into exec_map */ 339 error = vm_mmap(exec_map, (vm_offset_t *)&a_out, PAGE_SIZE, 340 VM_PROT_READ, VM_PROT_READ, 0, OBJT_VNODE, vp, 0); 341 if (error) 342 goto cleanup; 343 344 /* Is it a Linux binary ? */ 345 if (((a_out->a_magic >> 16) & 0xff) != 0x64) { 346 error = ENOEXEC; 347 goto cleanup; 348 } 349 350 /* 351 * While we are here, we should REALLY do some more checks 352 */ 353 354 /* Set file/virtual offset based on a.out variant. */ 355 switch ((int)(a_out->a_magic & 0xffff)) { 356 case 0413: /* ZMAGIC */ 357 file_offset = 1024; 358 break; 359 case 0314: /* QMAGIC */ 360 file_offset = 0; 361 break; 362 default: 363 error = ENOEXEC; 364 goto cleanup; 365 } 366 367 bss_size = round_page(a_out->a_bss); 368 369 /* Check various fields in header for validity/bounds. */ 370 if (a_out->a_text & PAGE_MASK || a_out->a_data & PAGE_MASK) { 371 error = ENOEXEC; 372 goto cleanup; 373 } 374 375 /* text + data can't exceed file size */ 376 if (a_out->a_data + a_out->a_text > attr.va_size) { 377 error = EFAULT; 378 goto cleanup; 379 } 380 381 /* 382 * text/data/bss must not exceed limits 383 * XXX - this is not complete. it should check current usage PLUS 384 * the resources needed by this library. 385 */ 386 PROC_LOCK(td->td_proc); 387 if (a_out->a_text > maxtsiz || 388 a_out->a_data + bss_size > lim_cur_proc(td->td_proc, RLIMIT_DATA) || 389 racct_set(td->td_proc, RACCT_DATA, a_out->a_data + 390 bss_size) != 0) { 391 PROC_UNLOCK(td->td_proc); 392 error = ENOMEM; 393 goto cleanup; 394 } 395 PROC_UNLOCK(td->td_proc); 396 397 /* 398 * Prevent more writers. 399 * XXX: Note that if any of the VM operations fail below we don't 400 * clear this flag. 401 */ 402 VOP_SET_TEXT(vp); 403 404 /* 405 * Lock no longer needed 406 */ 407 locked = 0; 408 VOP_UNLOCK(vp, 0); 409 410 /* 411 * Check if file_offset page aligned. Currently we cannot handle 412 * misalinged file offsets, and so we read in the entire image 413 * (what a waste). 414 */ 415 if (file_offset & PAGE_MASK) { 416 #ifdef DEBUG 417 printf("uselib: Non page aligned binary %lu\n", file_offset); 418 #endif 419 /* Map text+data read/write/execute */ 420 421 /* a_entry is the load address and is page aligned */ 422 vmaddr = trunc_page(a_out->a_entry); 423 424 /* get anon user mapping, read+write+execute */ 425 error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0, 426 &vmaddr, a_out->a_text + a_out->a_data, 0, VMFS_NO_SPACE, 427 VM_PROT_ALL, VM_PROT_ALL, 0); 428 if (error) 429 goto cleanup; 430 431 error = vn_rdwr(UIO_READ, vp, (void *)vmaddr, file_offset, 432 a_out->a_text + a_out->a_data, UIO_USERSPACE, 0, 433 td->td_ucred, NOCRED, &aresid, td); 434 if (error != 0) 435 goto cleanup; 436 if (aresid != 0) { 437 error = ENOEXEC; 438 goto cleanup; 439 } 440 } else { 441 #ifdef DEBUG 442 printf("uselib: Page aligned binary %lu\n", file_offset); 443 #endif 444 /* 445 * for QMAGIC, a_entry is 20 bytes beyond the load address 446 * to skip the executable header 447 */ 448 vmaddr = trunc_page(a_out->a_entry); 449 450 /* 451 * Map it all into the process's space as a single 452 * copy-on-write "data" segment. 453 */ 454 error = vm_mmap(&td->td_proc->p_vmspace->vm_map, &vmaddr, 455 a_out->a_text + a_out->a_data, VM_PROT_ALL, VM_PROT_ALL, 456 MAP_PRIVATE | MAP_FIXED, OBJT_VNODE, vp, file_offset); 457 if (error) 458 goto cleanup; 459 } 460 #ifdef DEBUG 461 printf("mem=%08lx = %08lx %08lx\n", (long)vmaddr, ((long *)vmaddr)[0], 462 ((long *)vmaddr)[1]); 463 #endif 464 if (bss_size != 0) { 465 /* Calculate BSS start address */ 466 vmaddr = trunc_page(a_out->a_entry) + a_out->a_text + 467 a_out->a_data; 468 469 /* allocate some 'anon' space */ 470 error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0, 471 &vmaddr, bss_size, 0, VMFS_NO_SPACE, VM_PROT_ALL, 472 VM_PROT_ALL, 0); 473 if (error) 474 goto cleanup; 475 } 476 477 cleanup: 478 /* Unlock vnode if needed */ 479 if (locked) 480 VOP_UNLOCK(vp, 0); 481 482 /* Release the temporary mapping. */ 483 if (a_out) 484 kmap_free_wakeup(exec_map, (vm_offset_t)a_out, PAGE_SIZE); 485 486 return (error); 487 } 488 489 #endif /* __i386__ */ 490 491 int 492 linux_select(struct thread *td, struct linux_select_args *args) 493 { 494 l_timeval ltv; 495 struct timeval tv0, tv1, utv, *tvp; 496 int error; 497 498 #ifdef DEBUG 499 if (ldebug(select)) 500 printf(ARGS(select, "%d, %p, %p, %p, %p"), args->nfds, 501 (void *)args->readfds, (void *)args->writefds, 502 (void *)args->exceptfds, (void *)args->timeout); 503 #endif 504 505 /* 506 * Store current time for computation of the amount of 507 * time left. 508 */ 509 if (args->timeout) { 510 if ((error = copyin(args->timeout, <v, sizeof(ltv)))) 511 goto select_out; 512 utv.tv_sec = ltv.tv_sec; 513 utv.tv_usec = ltv.tv_usec; 514 #ifdef DEBUG 515 if (ldebug(select)) 516 printf(LMSG("incoming timeout (%jd/%ld)"), 517 (intmax_t)utv.tv_sec, utv.tv_usec); 518 #endif 519 520 if (itimerfix(&utv)) { 521 /* 522 * The timeval was invalid. Convert it to something 523 * valid that will act as it does under Linux. 524 */ 525 utv.tv_sec += utv.tv_usec / 1000000; 526 utv.tv_usec %= 1000000; 527 if (utv.tv_usec < 0) { 528 utv.tv_sec -= 1; 529 utv.tv_usec += 1000000; 530 } 531 if (utv.tv_sec < 0) 532 timevalclear(&utv); 533 } 534 microtime(&tv0); 535 tvp = &utv; 536 } else 537 tvp = NULL; 538 539 error = kern_select(td, args->nfds, args->readfds, args->writefds, 540 args->exceptfds, tvp, LINUX_NFDBITS); 541 542 #ifdef DEBUG 543 if (ldebug(select)) 544 printf(LMSG("real select returns %d"), error); 545 #endif 546 if (error) 547 goto select_out; 548 549 if (args->timeout) { 550 if (td->td_retval[0]) { 551 /* 552 * Compute how much time was left of the timeout, 553 * by subtracting the current time and the time 554 * before we started the call, and subtracting 555 * that result from the user-supplied value. 556 */ 557 microtime(&tv1); 558 timevalsub(&tv1, &tv0); 559 timevalsub(&utv, &tv1); 560 if (utv.tv_sec < 0) 561 timevalclear(&utv); 562 } else 563 timevalclear(&utv); 564 #ifdef DEBUG 565 if (ldebug(select)) 566 printf(LMSG("outgoing timeout (%jd/%ld)"), 567 (intmax_t)utv.tv_sec, utv.tv_usec); 568 #endif 569 ltv.tv_sec = utv.tv_sec; 570 ltv.tv_usec = utv.tv_usec; 571 if ((error = copyout(<v, args->timeout, sizeof(ltv)))) 572 goto select_out; 573 } 574 575 select_out: 576 #ifdef DEBUG 577 if (ldebug(select)) 578 printf(LMSG("select_out -> %d"), error); 579 #endif 580 return (error); 581 } 582 583 int 584 linux_mremap(struct thread *td, struct linux_mremap_args *args) 585 { 586 struct munmap_args /* { 587 void *addr; 588 size_t len; 589 } */ bsd_args; 590 int error = 0; 591 592 #ifdef DEBUG 593 if (ldebug(mremap)) 594 printf(ARGS(mremap, "%p, %08lx, %08lx, %08lx"), 595 (void *)(uintptr_t)args->addr, 596 (unsigned long)args->old_len, 597 (unsigned long)args->new_len, 598 (unsigned long)args->flags); 599 #endif 600 601 if (args->flags & ~(LINUX_MREMAP_FIXED | LINUX_MREMAP_MAYMOVE)) { 602 td->td_retval[0] = 0; 603 return (EINVAL); 604 } 605 606 /* 607 * Check for the page alignment. 608 * Linux defines PAGE_MASK to be FreeBSD ~PAGE_MASK. 609 */ 610 if (args->addr & PAGE_MASK) { 611 td->td_retval[0] = 0; 612 return (EINVAL); 613 } 614 615 args->new_len = round_page(args->new_len); 616 args->old_len = round_page(args->old_len); 617 618 if (args->new_len > args->old_len) { 619 td->td_retval[0] = 0; 620 return (ENOMEM); 621 } 622 623 if (args->new_len < args->old_len) { 624 bsd_args.addr = 625 (caddr_t)((uintptr_t)args->addr + args->new_len); 626 bsd_args.len = args->old_len - args->new_len; 627 error = sys_munmap(td, &bsd_args); 628 } 629 630 td->td_retval[0] = error ? 0 : (uintptr_t)args->addr; 631 return (error); 632 } 633 634 #define LINUX_MS_ASYNC 0x0001 635 #define LINUX_MS_INVALIDATE 0x0002 636 #define LINUX_MS_SYNC 0x0004 637 638 int 639 linux_msync(struct thread *td, struct linux_msync_args *args) 640 { 641 struct msync_args bsd_args; 642 643 bsd_args.addr = (caddr_t)(uintptr_t)args->addr; 644 bsd_args.len = (uintptr_t)args->len; 645 bsd_args.flags = args->fl & ~LINUX_MS_SYNC; 646 647 return (sys_msync(td, &bsd_args)); 648 } 649 650 int 651 linux_time(struct thread *td, struct linux_time_args *args) 652 { 653 struct timeval tv; 654 l_time_t tm; 655 int error; 656 657 #ifdef DEBUG 658 if (ldebug(time)) 659 printf(ARGS(time, "*")); 660 #endif 661 662 microtime(&tv); 663 tm = tv.tv_sec; 664 if (args->tm && (error = copyout(&tm, args->tm, sizeof(tm)))) 665 return (error); 666 td->td_retval[0] = tm; 667 return (0); 668 } 669 670 struct l_times_argv { 671 l_clock_t tms_utime; 672 l_clock_t tms_stime; 673 l_clock_t tms_cutime; 674 l_clock_t tms_cstime; 675 }; 676 677 678 /* 679 * Glibc versions prior to 2.2.1 always use hard-coded CLK_TCK value. 680 * Since 2.2.1 Glibc uses value exported from kernel via AT_CLKTCK 681 * auxiliary vector entry. 682 */ 683 #define CLK_TCK 100 684 685 #define CONVOTCK(r) (r.tv_sec * CLK_TCK + r.tv_usec / (1000000 / CLK_TCK)) 686 #define CONVNTCK(r) (r.tv_sec * stclohz + r.tv_usec / (1000000 / stclohz)) 687 688 #define CONVTCK(r) (linux_kernver(td) >= LINUX_KERNVER_2004000 ? \ 689 CONVNTCK(r) : CONVOTCK(r)) 690 691 int 692 linux_times(struct thread *td, struct linux_times_args *args) 693 { 694 struct timeval tv, utime, stime, cutime, cstime; 695 struct l_times_argv tms; 696 struct proc *p; 697 int error; 698 699 #ifdef DEBUG 700 if (ldebug(times)) 701 printf(ARGS(times, "*")); 702 #endif 703 704 if (args->buf != NULL) { 705 p = td->td_proc; 706 PROC_LOCK(p); 707 PROC_STATLOCK(p); 708 calcru(p, &utime, &stime); 709 PROC_STATUNLOCK(p); 710 calccru(p, &cutime, &cstime); 711 PROC_UNLOCK(p); 712 713 tms.tms_utime = CONVTCK(utime); 714 tms.tms_stime = CONVTCK(stime); 715 716 tms.tms_cutime = CONVTCK(cutime); 717 tms.tms_cstime = CONVTCK(cstime); 718 719 if ((error = copyout(&tms, args->buf, sizeof(tms)))) 720 return (error); 721 } 722 723 microuptime(&tv); 724 td->td_retval[0] = (int)CONVTCK(tv); 725 return (0); 726 } 727 728 int 729 linux_newuname(struct thread *td, struct linux_newuname_args *args) 730 { 731 struct l_new_utsname utsname; 732 char osname[LINUX_MAX_UTSNAME]; 733 char osrelease[LINUX_MAX_UTSNAME]; 734 char *p; 735 736 #ifdef DEBUG 737 if (ldebug(newuname)) 738 printf(ARGS(newuname, "*")); 739 #endif 740 741 linux_get_osname(td, osname); 742 linux_get_osrelease(td, osrelease); 743 744 bzero(&utsname, sizeof(utsname)); 745 strlcpy(utsname.sysname, osname, LINUX_MAX_UTSNAME); 746 getcredhostname(td->td_ucred, utsname.nodename, LINUX_MAX_UTSNAME); 747 getcreddomainname(td->td_ucred, utsname.domainname, LINUX_MAX_UTSNAME); 748 strlcpy(utsname.release, osrelease, LINUX_MAX_UTSNAME); 749 strlcpy(utsname.version, version, LINUX_MAX_UTSNAME); 750 for (p = utsname.version; *p != '\0'; ++p) 751 if (*p == '\n') { 752 *p = '\0'; 753 break; 754 } 755 strlcpy(utsname.machine, linux_kplatform, LINUX_MAX_UTSNAME); 756 757 return (copyout(&utsname, args->buf, sizeof(utsname))); 758 } 759 760 struct l_utimbuf { 761 l_time_t l_actime; 762 l_time_t l_modtime; 763 }; 764 765 int 766 linux_utime(struct thread *td, struct linux_utime_args *args) 767 { 768 struct timeval tv[2], *tvp; 769 struct l_utimbuf lut; 770 char *fname; 771 int error; 772 773 LCONVPATHEXIST(td, args->fname, &fname); 774 775 #ifdef DEBUG 776 if (ldebug(utime)) 777 printf(ARGS(utime, "%s, *"), fname); 778 #endif 779 780 if (args->times) { 781 if ((error = copyin(args->times, &lut, sizeof lut))) { 782 LFREEPATH(fname); 783 return (error); 784 } 785 tv[0].tv_sec = lut.l_actime; 786 tv[0].tv_usec = 0; 787 tv[1].tv_sec = lut.l_modtime; 788 tv[1].tv_usec = 0; 789 tvp = tv; 790 } else 791 tvp = NULL; 792 793 error = kern_utimesat(td, AT_FDCWD, fname, UIO_SYSSPACE, tvp, 794 UIO_SYSSPACE); 795 LFREEPATH(fname); 796 return (error); 797 } 798 799 int 800 linux_utimes(struct thread *td, struct linux_utimes_args *args) 801 { 802 l_timeval ltv[2]; 803 struct timeval tv[2], *tvp = NULL; 804 char *fname; 805 int error; 806 807 LCONVPATHEXIST(td, args->fname, &fname); 808 809 #ifdef DEBUG 810 if (ldebug(utimes)) 811 printf(ARGS(utimes, "%s, *"), fname); 812 #endif 813 814 if (args->tptr != NULL) { 815 if ((error = copyin(args->tptr, ltv, sizeof ltv))) { 816 LFREEPATH(fname); 817 return (error); 818 } 819 tv[0].tv_sec = ltv[0].tv_sec; 820 tv[0].tv_usec = ltv[0].tv_usec; 821 tv[1].tv_sec = ltv[1].tv_sec; 822 tv[1].tv_usec = ltv[1].tv_usec; 823 tvp = tv; 824 } 825 826 error = kern_utimesat(td, AT_FDCWD, fname, UIO_SYSSPACE, 827 tvp, UIO_SYSSPACE); 828 LFREEPATH(fname); 829 return (error); 830 } 831 832 static int 833 linux_utimensat_nsec_valid(l_long nsec) 834 { 835 836 if (nsec == LINUX_UTIME_OMIT || nsec == LINUX_UTIME_NOW) 837 return (0); 838 if (nsec >= 0 && nsec <= 999999999) 839 return (0); 840 return (1); 841 } 842 843 int 844 linux_utimensat(struct thread *td, struct linux_utimensat_args *args) 845 { 846 struct l_timespec l_times[2]; 847 struct timespec times[2], *timesp = NULL; 848 char *path = NULL; 849 int error, dfd, flags = 0; 850 851 dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd; 852 853 #ifdef DEBUG 854 if (ldebug(utimensat)) 855 printf(ARGS(utimensat, "%d, *"), dfd); 856 #endif 857 858 if (args->flags & ~LINUX_AT_SYMLINK_NOFOLLOW) 859 return (EINVAL); 860 861 if (args->times != NULL) { 862 error = copyin(args->times, l_times, sizeof(l_times)); 863 if (error != 0) 864 return (error); 865 866 if (linux_utimensat_nsec_valid(l_times[0].tv_nsec) != 0 || 867 linux_utimensat_nsec_valid(l_times[1].tv_nsec) != 0) 868 return (EINVAL); 869 870 times[0].tv_sec = l_times[0].tv_sec; 871 switch (l_times[0].tv_nsec) 872 { 873 case LINUX_UTIME_OMIT: 874 times[0].tv_nsec = UTIME_OMIT; 875 break; 876 case LINUX_UTIME_NOW: 877 times[0].tv_nsec = UTIME_NOW; 878 break; 879 default: 880 times[0].tv_nsec = l_times[0].tv_nsec; 881 } 882 883 times[1].tv_sec = l_times[1].tv_sec; 884 switch (l_times[1].tv_nsec) 885 { 886 case LINUX_UTIME_OMIT: 887 times[1].tv_nsec = UTIME_OMIT; 888 break; 889 case LINUX_UTIME_NOW: 890 times[1].tv_nsec = UTIME_NOW; 891 break; 892 default: 893 times[1].tv_nsec = l_times[1].tv_nsec; 894 break; 895 } 896 timesp = times; 897 } 898 899 if (times[0].tv_nsec == UTIME_OMIT && times[1].tv_nsec == UTIME_OMIT) 900 /* This breaks POSIX, but is what the Linux kernel does 901 * _on purpose_ (documented in the man page for utimensat(2)), 902 * so we must follow that behaviour. */ 903 return (0); 904 905 if (args->pathname != NULL) 906 LCONVPATHEXIST_AT(td, args->pathname, &path, dfd); 907 else if (args->flags != 0) 908 return (EINVAL); 909 910 if (args->flags & LINUX_AT_SYMLINK_NOFOLLOW) 911 flags |= AT_SYMLINK_NOFOLLOW; 912 913 if (path == NULL) 914 error = kern_futimens(td, dfd, timesp, UIO_SYSSPACE); 915 else { 916 error = kern_utimensat(td, dfd, path, UIO_SYSSPACE, timesp, 917 UIO_SYSSPACE, flags); 918 LFREEPATH(path); 919 } 920 921 return (error); 922 } 923 924 int 925 linux_futimesat(struct thread *td, struct linux_futimesat_args *args) 926 { 927 l_timeval ltv[2]; 928 struct timeval tv[2], *tvp = NULL; 929 char *fname; 930 int error, dfd; 931 932 dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd; 933 LCONVPATHEXIST_AT(td, args->filename, &fname, dfd); 934 935 #ifdef DEBUG 936 if (ldebug(futimesat)) 937 printf(ARGS(futimesat, "%s, *"), fname); 938 #endif 939 940 if (args->utimes != NULL) { 941 if ((error = copyin(args->utimes, ltv, sizeof ltv))) { 942 LFREEPATH(fname); 943 return (error); 944 } 945 tv[0].tv_sec = ltv[0].tv_sec; 946 tv[0].tv_usec = ltv[0].tv_usec; 947 tv[1].tv_sec = ltv[1].tv_sec; 948 tv[1].tv_usec = ltv[1].tv_usec; 949 tvp = tv; 950 } 951 952 error = kern_utimesat(td, dfd, fname, UIO_SYSSPACE, tvp, UIO_SYSSPACE); 953 LFREEPATH(fname); 954 return (error); 955 } 956 957 int 958 linux_common_wait(struct thread *td, int pid, int *status, 959 int options, struct rusage *ru) 960 { 961 int error, tmpstat; 962 963 error = kern_wait(td, pid, &tmpstat, options, ru); 964 if (error) 965 return (error); 966 967 if (status) { 968 tmpstat &= 0xffff; 969 if (WIFSIGNALED(tmpstat)) 970 tmpstat = (tmpstat & 0xffffff80) | 971 bsd_to_linux_signal(WTERMSIG(tmpstat)); 972 else if (WIFSTOPPED(tmpstat)) 973 tmpstat = (tmpstat & 0xffff00ff) | 974 (bsd_to_linux_signal(WSTOPSIG(tmpstat)) << 8); 975 else if (WIFCONTINUED(tmpstat)) 976 tmpstat = 0xffff; 977 error = copyout(&tmpstat, status, sizeof(int)); 978 } 979 980 return (error); 981 } 982 983 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 984 int 985 linux_waitpid(struct thread *td, struct linux_waitpid_args *args) 986 { 987 struct linux_wait4_args wait4_args; 988 989 #ifdef DEBUG 990 if (ldebug(waitpid)) 991 printf(ARGS(waitpid, "%d, %p, %d"), 992 args->pid, (void *)args->status, args->options); 993 #endif 994 995 wait4_args.pid = args->pid; 996 wait4_args.status = args->status; 997 wait4_args.options = args->options; 998 wait4_args.rusage = NULL; 999 1000 return (linux_wait4(td, &wait4_args)); 1001 } 1002 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 1003 1004 int 1005 linux_wait4(struct thread *td, struct linux_wait4_args *args) 1006 { 1007 int error, options; 1008 struct rusage ru, *rup; 1009 1010 #ifdef DEBUG 1011 if (ldebug(wait4)) 1012 printf(ARGS(wait4, "%d, %p, %d, %p"), 1013 args->pid, (void *)args->status, args->options, 1014 (void *)args->rusage); 1015 #endif 1016 if (args->options & ~(LINUX_WUNTRACED | LINUX_WNOHANG | 1017 LINUX_WCONTINUED | __WCLONE | __WNOTHREAD | __WALL)) 1018 return (EINVAL); 1019 1020 options = WEXITED; 1021 linux_to_bsd_waitopts(args->options, &options); 1022 1023 if (args->rusage != NULL) 1024 rup = &ru; 1025 else 1026 rup = NULL; 1027 error = linux_common_wait(td, args->pid, args->status, options, rup); 1028 if (error != 0) 1029 return (error); 1030 if (args->rusage != NULL) 1031 error = linux_copyout_rusage(&ru, args->rusage); 1032 return (error); 1033 } 1034 1035 int 1036 linux_waitid(struct thread *td, struct linux_waitid_args *args) 1037 { 1038 int status, options, sig; 1039 struct __wrusage wru; 1040 siginfo_t siginfo; 1041 l_siginfo_t lsi; 1042 idtype_t idtype; 1043 struct proc *p; 1044 int error; 1045 1046 options = 0; 1047 linux_to_bsd_waitopts(args->options, &options); 1048 1049 if (options & ~(WNOHANG | WNOWAIT | WEXITED | WUNTRACED | WCONTINUED)) 1050 return (EINVAL); 1051 if (!(options & (WEXITED | WUNTRACED | WCONTINUED))) 1052 return (EINVAL); 1053 1054 switch (args->idtype) { 1055 case LINUX_P_ALL: 1056 idtype = P_ALL; 1057 break; 1058 case LINUX_P_PID: 1059 if (args->id <= 0) 1060 return (EINVAL); 1061 idtype = P_PID; 1062 break; 1063 case LINUX_P_PGID: 1064 if (args->id <= 0) 1065 return (EINVAL); 1066 idtype = P_PGID; 1067 break; 1068 default: 1069 return (EINVAL); 1070 } 1071 1072 error = kern_wait6(td, idtype, args->id, &status, options, 1073 &wru, &siginfo); 1074 if (error != 0) 1075 return (error); 1076 if (args->rusage != NULL) { 1077 error = linux_copyout_rusage(&wru.wru_children, 1078 args->rusage); 1079 if (error != 0) 1080 return (error); 1081 } 1082 if (args->info != NULL) { 1083 p = td->td_proc; 1084 if (td->td_retval[0] == 0) 1085 bzero(&lsi, sizeof(lsi)); 1086 else { 1087 sig = bsd_to_linux_signal(siginfo.si_signo); 1088 siginfo_to_lsiginfo(&siginfo, &lsi, sig); 1089 } 1090 error = copyout(&lsi, args->info, sizeof(lsi)); 1091 } 1092 td->td_retval[0] = 0; 1093 1094 return (error); 1095 } 1096 1097 int 1098 linux_mknod(struct thread *td, struct linux_mknod_args *args) 1099 { 1100 char *path; 1101 int error; 1102 1103 LCONVPATHCREAT(td, args->path, &path); 1104 1105 #ifdef DEBUG 1106 if (ldebug(mknod)) 1107 printf(ARGS(mknod, "%s, %d, %ju"), path, args->mode, 1108 (uintmax_t)args->dev); 1109 #endif 1110 1111 switch (args->mode & S_IFMT) { 1112 case S_IFIFO: 1113 case S_IFSOCK: 1114 error = kern_mkfifoat(td, AT_FDCWD, path, UIO_SYSSPACE, 1115 args->mode); 1116 break; 1117 1118 case S_IFCHR: 1119 case S_IFBLK: 1120 error = kern_mknodat(td, AT_FDCWD, path, UIO_SYSSPACE, 1121 args->mode, args->dev); 1122 break; 1123 1124 case S_IFDIR: 1125 error = EPERM; 1126 break; 1127 1128 case 0: 1129 args->mode |= S_IFREG; 1130 /* FALLTHROUGH */ 1131 case S_IFREG: 1132 error = kern_openat(td, AT_FDCWD, path, UIO_SYSSPACE, 1133 O_WRONLY | O_CREAT | O_TRUNC, args->mode); 1134 if (error == 0) 1135 kern_close(td, td->td_retval[0]); 1136 break; 1137 1138 default: 1139 error = EINVAL; 1140 break; 1141 } 1142 LFREEPATH(path); 1143 return (error); 1144 } 1145 1146 int 1147 linux_mknodat(struct thread *td, struct linux_mknodat_args *args) 1148 { 1149 char *path; 1150 int error, dfd; 1151 1152 dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd; 1153 LCONVPATHCREAT_AT(td, args->filename, &path, dfd); 1154 1155 #ifdef DEBUG 1156 if (ldebug(mknodat)) 1157 printf(ARGS(mknodat, "%s, %d, %d"), path, args->mode, args->dev); 1158 #endif 1159 1160 switch (args->mode & S_IFMT) { 1161 case S_IFIFO: 1162 case S_IFSOCK: 1163 error = kern_mkfifoat(td, dfd, path, UIO_SYSSPACE, args->mode); 1164 break; 1165 1166 case S_IFCHR: 1167 case S_IFBLK: 1168 error = kern_mknodat(td, dfd, path, UIO_SYSSPACE, args->mode, 1169 args->dev); 1170 break; 1171 1172 case S_IFDIR: 1173 error = EPERM; 1174 break; 1175 1176 case 0: 1177 args->mode |= S_IFREG; 1178 /* FALLTHROUGH */ 1179 case S_IFREG: 1180 error = kern_openat(td, dfd, path, UIO_SYSSPACE, 1181 O_WRONLY | O_CREAT | O_TRUNC, args->mode); 1182 if (error == 0) 1183 kern_close(td, td->td_retval[0]); 1184 break; 1185 1186 default: 1187 error = EINVAL; 1188 break; 1189 } 1190 LFREEPATH(path); 1191 return (error); 1192 } 1193 1194 /* 1195 * UGH! This is just about the dumbest idea I've ever heard!! 1196 */ 1197 int 1198 linux_personality(struct thread *td, struct linux_personality_args *args) 1199 { 1200 #ifdef DEBUG 1201 if (ldebug(personality)) 1202 printf(ARGS(personality, "%lu"), (unsigned long)args->per); 1203 #endif 1204 if (args->per != 0) 1205 return (EINVAL); 1206 1207 /* Yes Jim, it's still a Linux... */ 1208 td->td_retval[0] = 0; 1209 return (0); 1210 } 1211 1212 struct l_itimerval { 1213 l_timeval it_interval; 1214 l_timeval it_value; 1215 }; 1216 1217 #define B2L_ITIMERVAL(bip, lip) \ 1218 (bip)->it_interval.tv_sec = (lip)->it_interval.tv_sec; \ 1219 (bip)->it_interval.tv_usec = (lip)->it_interval.tv_usec; \ 1220 (bip)->it_value.tv_sec = (lip)->it_value.tv_sec; \ 1221 (bip)->it_value.tv_usec = (lip)->it_value.tv_usec; 1222 1223 int 1224 linux_setitimer(struct thread *td, struct linux_setitimer_args *uap) 1225 { 1226 int error; 1227 struct l_itimerval ls; 1228 struct itimerval aitv, oitv; 1229 1230 #ifdef DEBUG 1231 if (ldebug(setitimer)) 1232 printf(ARGS(setitimer, "%p, %p"), 1233 (void *)uap->itv, (void *)uap->oitv); 1234 #endif 1235 1236 if (uap->itv == NULL) { 1237 uap->itv = uap->oitv; 1238 return (linux_getitimer(td, (struct linux_getitimer_args *)uap)); 1239 } 1240 1241 error = copyin(uap->itv, &ls, sizeof(ls)); 1242 if (error != 0) 1243 return (error); 1244 B2L_ITIMERVAL(&aitv, &ls); 1245 #ifdef DEBUG 1246 if (ldebug(setitimer)) { 1247 printf("setitimer: value: sec: %jd, usec: %ld\n", 1248 (intmax_t)aitv.it_value.tv_sec, aitv.it_value.tv_usec); 1249 printf("setitimer: interval: sec: %jd, usec: %ld\n", 1250 (intmax_t)aitv.it_interval.tv_sec, aitv.it_interval.tv_usec); 1251 } 1252 #endif 1253 error = kern_setitimer(td, uap->which, &aitv, &oitv); 1254 if (error != 0 || uap->oitv == NULL) 1255 return (error); 1256 B2L_ITIMERVAL(&ls, &oitv); 1257 1258 return (copyout(&ls, uap->oitv, sizeof(ls))); 1259 } 1260 1261 int 1262 linux_getitimer(struct thread *td, struct linux_getitimer_args *uap) 1263 { 1264 int error; 1265 struct l_itimerval ls; 1266 struct itimerval aitv; 1267 1268 #ifdef DEBUG 1269 if (ldebug(getitimer)) 1270 printf(ARGS(getitimer, "%p"), (void *)uap->itv); 1271 #endif 1272 error = kern_getitimer(td, uap->which, &aitv); 1273 if (error != 0) 1274 return (error); 1275 B2L_ITIMERVAL(&ls, &aitv); 1276 return (copyout(&ls, uap->itv, sizeof(ls))); 1277 } 1278 1279 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 1280 int 1281 linux_nice(struct thread *td, struct linux_nice_args *args) 1282 { 1283 struct setpriority_args bsd_args; 1284 1285 bsd_args.which = PRIO_PROCESS; 1286 bsd_args.who = 0; /* current process */ 1287 bsd_args.prio = args->inc; 1288 return (sys_setpriority(td, &bsd_args)); 1289 } 1290 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 1291 1292 int 1293 linux_setgroups(struct thread *td, struct linux_setgroups_args *args) 1294 { 1295 struct ucred *newcred, *oldcred; 1296 l_gid_t *linux_gidset; 1297 gid_t *bsd_gidset; 1298 int ngrp, error; 1299 struct proc *p; 1300 1301 ngrp = args->gidsetsize; 1302 if (ngrp < 0 || ngrp >= ngroups_max + 1) 1303 return (EINVAL); 1304 linux_gidset = malloc(ngrp * sizeof(*linux_gidset), M_LINUX, M_WAITOK); 1305 error = copyin(args->grouplist, linux_gidset, ngrp * sizeof(l_gid_t)); 1306 if (error) 1307 goto out; 1308 newcred = crget(); 1309 crextend(newcred, ngrp + 1); 1310 p = td->td_proc; 1311 PROC_LOCK(p); 1312 oldcred = p->p_ucred; 1313 crcopy(newcred, oldcred); 1314 1315 /* 1316 * cr_groups[0] holds egid. Setting the whole set from 1317 * the supplied set will cause egid to be changed too. 1318 * Keep cr_groups[0] unchanged to prevent that. 1319 */ 1320 1321 if ((error = priv_check_cred(oldcred, PRIV_CRED_SETGROUPS, 0)) != 0) { 1322 PROC_UNLOCK(p); 1323 crfree(newcred); 1324 goto out; 1325 } 1326 1327 if (ngrp > 0) { 1328 newcred->cr_ngroups = ngrp + 1; 1329 1330 bsd_gidset = newcred->cr_groups; 1331 ngrp--; 1332 while (ngrp >= 0) { 1333 bsd_gidset[ngrp + 1] = linux_gidset[ngrp]; 1334 ngrp--; 1335 } 1336 } else 1337 newcred->cr_ngroups = 1; 1338 1339 setsugid(p); 1340 proc_set_cred(p, newcred); 1341 PROC_UNLOCK(p); 1342 crfree(oldcred); 1343 error = 0; 1344 out: 1345 free(linux_gidset, M_LINUX); 1346 return (error); 1347 } 1348 1349 int 1350 linux_getgroups(struct thread *td, struct linux_getgroups_args *args) 1351 { 1352 struct ucred *cred; 1353 l_gid_t *linux_gidset; 1354 gid_t *bsd_gidset; 1355 int bsd_gidsetsz, ngrp, error; 1356 1357 cred = td->td_ucred; 1358 bsd_gidset = cred->cr_groups; 1359 bsd_gidsetsz = cred->cr_ngroups - 1; 1360 1361 /* 1362 * cr_groups[0] holds egid. Returning the whole set 1363 * here will cause a duplicate. Exclude cr_groups[0] 1364 * to prevent that. 1365 */ 1366 1367 if ((ngrp = args->gidsetsize) == 0) { 1368 td->td_retval[0] = bsd_gidsetsz; 1369 return (0); 1370 } 1371 1372 if (ngrp < bsd_gidsetsz) 1373 return (EINVAL); 1374 1375 ngrp = 0; 1376 linux_gidset = malloc(bsd_gidsetsz * sizeof(*linux_gidset), 1377 M_LINUX, M_WAITOK); 1378 while (ngrp < bsd_gidsetsz) { 1379 linux_gidset[ngrp] = bsd_gidset[ngrp + 1]; 1380 ngrp++; 1381 } 1382 1383 error = copyout(linux_gidset, args->grouplist, ngrp * sizeof(l_gid_t)); 1384 free(linux_gidset, M_LINUX); 1385 if (error) 1386 return (error); 1387 1388 td->td_retval[0] = ngrp; 1389 return (0); 1390 } 1391 1392 int 1393 linux_setrlimit(struct thread *td, struct linux_setrlimit_args *args) 1394 { 1395 struct rlimit bsd_rlim; 1396 struct l_rlimit rlim; 1397 u_int which; 1398 int error; 1399 1400 #ifdef DEBUG 1401 if (ldebug(setrlimit)) 1402 printf(ARGS(setrlimit, "%d, %p"), 1403 args->resource, (void *)args->rlim); 1404 #endif 1405 1406 if (args->resource >= LINUX_RLIM_NLIMITS) 1407 return (EINVAL); 1408 1409 which = linux_to_bsd_resource[args->resource]; 1410 if (which == -1) 1411 return (EINVAL); 1412 1413 error = copyin(args->rlim, &rlim, sizeof(rlim)); 1414 if (error) 1415 return (error); 1416 1417 bsd_rlim.rlim_cur = (rlim_t)rlim.rlim_cur; 1418 bsd_rlim.rlim_max = (rlim_t)rlim.rlim_max; 1419 return (kern_setrlimit(td, which, &bsd_rlim)); 1420 } 1421 1422 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 1423 int 1424 linux_old_getrlimit(struct thread *td, struct linux_old_getrlimit_args *args) 1425 { 1426 struct l_rlimit rlim; 1427 struct rlimit bsd_rlim; 1428 u_int which; 1429 1430 #ifdef DEBUG 1431 if (ldebug(old_getrlimit)) 1432 printf(ARGS(old_getrlimit, "%d, %p"), 1433 args->resource, (void *)args->rlim); 1434 #endif 1435 1436 if (args->resource >= LINUX_RLIM_NLIMITS) 1437 return (EINVAL); 1438 1439 which = linux_to_bsd_resource[args->resource]; 1440 if (which == -1) 1441 return (EINVAL); 1442 1443 lim_rlimit(td, which, &bsd_rlim); 1444 1445 #ifdef COMPAT_LINUX32 1446 rlim.rlim_cur = (unsigned int)bsd_rlim.rlim_cur; 1447 if (rlim.rlim_cur == UINT_MAX) 1448 rlim.rlim_cur = INT_MAX; 1449 rlim.rlim_max = (unsigned int)bsd_rlim.rlim_max; 1450 if (rlim.rlim_max == UINT_MAX) 1451 rlim.rlim_max = INT_MAX; 1452 #else 1453 rlim.rlim_cur = (unsigned long)bsd_rlim.rlim_cur; 1454 if (rlim.rlim_cur == ULONG_MAX) 1455 rlim.rlim_cur = LONG_MAX; 1456 rlim.rlim_max = (unsigned long)bsd_rlim.rlim_max; 1457 if (rlim.rlim_max == ULONG_MAX) 1458 rlim.rlim_max = LONG_MAX; 1459 #endif 1460 return (copyout(&rlim, args->rlim, sizeof(rlim))); 1461 } 1462 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 1463 1464 int 1465 linux_getrlimit(struct thread *td, struct linux_getrlimit_args *args) 1466 { 1467 struct l_rlimit rlim; 1468 struct rlimit bsd_rlim; 1469 u_int which; 1470 1471 #ifdef DEBUG 1472 if (ldebug(getrlimit)) 1473 printf(ARGS(getrlimit, "%d, %p"), 1474 args->resource, (void *)args->rlim); 1475 #endif 1476 1477 if (args->resource >= LINUX_RLIM_NLIMITS) 1478 return (EINVAL); 1479 1480 which = linux_to_bsd_resource[args->resource]; 1481 if (which == -1) 1482 return (EINVAL); 1483 1484 lim_rlimit(td, which, &bsd_rlim); 1485 1486 rlim.rlim_cur = (l_ulong)bsd_rlim.rlim_cur; 1487 rlim.rlim_max = (l_ulong)bsd_rlim.rlim_max; 1488 return (copyout(&rlim, args->rlim, sizeof(rlim))); 1489 } 1490 1491 int 1492 linux_sched_setscheduler(struct thread *td, 1493 struct linux_sched_setscheduler_args *args) 1494 { 1495 struct sched_param sched_param; 1496 struct thread *tdt; 1497 int error, policy; 1498 1499 #ifdef DEBUG 1500 if (ldebug(sched_setscheduler)) 1501 printf(ARGS(sched_setscheduler, "%d, %d, %p"), 1502 args->pid, args->policy, (const void *)args->param); 1503 #endif 1504 1505 switch (args->policy) { 1506 case LINUX_SCHED_OTHER: 1507 policy = SCHED_OTHER; 1508 break; 1509 case LINUX_SCHED_FIFO: 1510 policy = SCHED_FIFO; 1511 break; 1512 case LINUX_SCHED_RR: 1513 policy = SCHED_RR; 1514 break; 1515 default: 1516 return (EINVAL); 1517 } 1518 1519 error = copyin(args->param, &sched_param, sizeof(sched_param)); 1520 if (error) 1521 return (error); 1522 1523 tdt = linux_tdfind(td, args->pid, -1); 1524 if (tdt == NULL) 1525 return (ESRCH); 1526 1527 error = kern_sched_setscheduler(td, tdt, policy, &sched_param); 1528 PROC_UNLOCK(tdt->td_proc); 1529 return (error); 1530 } 1531 1532 int 1533 linux_sched_getscheduler(struct thread *td, 1534 struct linux_sched_getscheduler_args *args) 1535 { 1536 struct thread *tdt; 1537 int error, policy; 1538 1539 #ifdef DEBUG 1540 if (ldebug(sched_getscheduler)) 1541 printf(ARGS(sched_getscheduler, "%d"), args->pid); 1542 #endif 1543 1544 tdt = linux_tdfind(td, args->pid, -1); 1545 if (tdt == NULL) 1546 return (ESRCH); 1547 1548 error = kern_sched_getscheduler(td, tdt, &policy); 1549 PROC_UNLOCK(tdt->td_proc); 1550 1551 switch (policy) { 1552 case SCHED_OTHER: 1553 td->td_retval[0] = LINUX_SCHED_OTHER; 1554 break; 1555 case SCHED_FIFO: 1556 td->td_retval[0] = LINUX_SCHED_FIFO; 1557 break; 1558 case SCHED_RR: 1559 td->td_retval[0] = LINUX_SCHED_RR; 1560 break; 1561 } 1562 return (error); 1563 } 1564 1565 int 1566 linux_sched_get_priority_max(struct thread *td, 1567 struct linux_sched_get_priority_max_args *args) 1568 { 1569 struct sched_get_priority_max_args bsd; 1570 1571 #ifdef DEBUG 1572 if (ldebug(sched_get_priority_max)) 1573 printf(ARGS(sched_get_priority_max, "%d"), args->policy); 1574 #endif 1575 1576 switch (args->policy) { 1577 case LINUX_SCHED_OTHER: 1578 bsd.policy = SCHED_OTHER; 1579 break; 1580 case LINUX_SCHED_FIFO: 1581 bsd.policy = SCHED_FIFO; 1582 break; 1583 case LINUX_SCHED_RR: 1584 bsd.policy = SCHED_RR; 1585 break; 1586 default: 1587 return (EINVAL); 1588 } 1589 return (sys_sched_get_priority_max(td, &bsd)); 1590 } 1591 1592 int 1593 linux_sched_get_priority_min(struct thread *td, 1594 struct linux_sched_get_priority_min_args *args) 1595 { 1596 struct sched_get_priority_min_args bsd; 1597 1598 #ifdef DEBUG 1599 if (ldebug(sched_get_priority_min)) 1600 printf(ARGS(sched_get_priority_min, "%d"), args->policy); 1601 #endif 1602 1603 switch (args->policy) { 1604 case LINUX_SCHED_OTHER: 1605 bsd.policy = SCHED_OTHER; 1606 break; 1607 case LINUX_SCHED_FIFO: 1608 bsd.policy = SCHED_FIFO; 1609 break; 1610 case LINUX_SCHED_RR: 1611 bsd.policy = SCHED_RR; 1612 break; 1613 default: 1614 return (EINVAL); 1615 } 1616 return (sys_sched_get_priority_min(td, &bsd)); 1617 } 1618 1619 #define REBOOT_CAD_ON 0x89abcdef 1620 #define REBOOT_CAD_OFF 0 1621 #define REBOOT_HALT 0xcdef0123 1622 #define REBOOT_RESTART 0x01234567 1623 #define REBOOT_RESTART2 0xA1B2C3D4 1624 #define REBOOT_POWEROFF 0x4321FEDC 1625 #define REBOOT_MAGIC1 0xfee1dead 1626 #define REBOOT_MAGIC2 0x28121969 1627 #define REBOOT_MAGIC2A 0x05121996 1628 #define REBOOT_MAGIC2B 0x16041998 1629 1630 int 1631 linux_reboot(struct thread *td, struct linux_reboot_args *args) 1632 { 1633 struct reboot_args bsd_args; 1634 1635 #ifdef DEBUG 1636 if (ldebug(reboot)) 1637 printf(ARGS(reboot, "0x%x"), args->cmd); 1638 #endif 1639 1640 if (args->magic1 != REBOOT_MAGIC1) 1641 return (EINVAL); 1642 1643 switch (args->magic2) { 1644 case REBOOT_MAGIC2: 1645 case REBOOT_MAGIC2A: 1646 case REBOOT_MAGIC2B: 1647 break; 1648 default: 1649 return (EINVAL); 1650 } 1651 1652 switch (args->cmd) { 1653 case REBOOT_CAD_ON: 1654 case REBOOT_CAD_OFF: 1655 return (priv_check(td, PRIV_REBOOT)); 1656 case REBOOT_HALT: 1657 bsd_args.opt = RB_HALT; 1658 break; 1659 case REBOOT_RESTART: 1660 case REBOOT_RESTART2: 1661 bsd_args.opt = 0; 1662 break; 1663 case REBOOT_POWEROFF: 1664 bsd_args.opt = RB_POWEROFF; 1665 break; 1666 default: 1667 return (EINVAL); 1668 } 1669 return (sys_reboot(td, &bsd_args)); 1670 } 1671 1672 1673 /* 1674 * The FreeBSD native getpid(2), getgid(2) and getuid(2) also modify 1675 * td->td_retval[1] when COMPAT_43 is defined. This clobbers registers that 1676 * are assumed to be preserved. The following lightweight syscalls fixes 1677 * this. See also linux_getgid16() and linux_getuid16() in linux_uid16.c 1678 * 1679 * linux_getpid() - MP SAFE 1680 * linux_getgid() - MP SAFE 1681 * linux_getuid() - MP SAFE 1682 */ 1683 1684 int 1685 linux_getpid(struct thread *td, struct linux_getpid_args *args) 1686 { 1687 1688 #ifdef DEBUG 1689 if (ldebug(getpid)) 1690 printf(ARGS(getpid, "")); 1691 #endif 1692 td->td_retval[0] = td->td_proc->p_pid; 1693 1694 return (0); 1695 } 1696 1697 int 1698 linux_gettid(struct thread *td, struct linux_gettid_args *args) 1699 { 1700 struct linux_emuldata *em; 1701 1702 #ifdef DEBUG 1703 if (ldebug(gettid)) 1704 printf(ARGS(gettid, "")); 1705 #endif 1706 1707 em = em_find(td); 1708 KASSERT(em != NULL, ("gettid: emuldata not found.\n")); 1709 1710 td->td_retval[0] = em->em_tid; 1711 1712 return (0); 1713 } 1714 1715 1716 int 1717 linux_getppid(struct thread *td, struct linux_getppid_args *args) 1718 { 1719 1720 #ifdef DEBUG 1721 if (ldebug(getppid)) 1722 printf(ARGS(getppid, "")); 1723 #endif 1724 1725 PROC_LOCK(td->td_proc); 1726 td->td_retval[0] = td->td_proc->p_pptr->p_pid; 1727 PROC_UNLOCK(td->td_proc); 1728 return (0); 1729 } 1730 1731 int 1732 linux_getgid(struct thread *td, struct linux_getgid_args *args) 1733 { 1734 1735 #ifdef DEBUG 1736 if (ldebug(getgid)) 1737 printf(ARGS(getgid, "")); 1738 #endif 1739 1740 td->td_retval[0] = td->td_ucred->cr_rgid; 1741 return (0); 1742 } 1743 1744 int 1745 linux_getuid(struct thread *td, struct linux_getuid_args *args) 1746 { 1747 1748 #ifdef DEBUG 1749 if (ldebug(getuid)) 1750 printf(ARGS(getuid, "")); 1751 #endif 1752 1753 td->td_retval[0] = td->td_ucred->cr_ruid; 1754 return (0); 1755 } 1756 1757 1758 int 1759 linux_getsid(struct thread *td, struct linux_getsid_args *args) 1760 { 1761 struct getsid_args bsd; 1762 1763 #ifdef DEBUG 1764 if (ldebug(getsid)) 1765 printf(ARGS(getsid, "%i"), args->pid); 1766 #endif 1767 1768 bsd.pid = args->pid; 1769 return (sys_getsid(td, &bsd)); 1770 } 1771 1772 int 1773 linux_nosys(struct thread *td, struct nosys_args *ignore) 1774 { 1775 1776 return (ENOSYS); 1777 } 1778 1779 int 1780 linux_getpriority(struct thread *td, struct linux_getpriority_args *args) 1781 { 1782 struct getpriority_args bsd_args; 1783 int error; 1784 1785 #ifdef DEBUG 1786 if (ldebug(getpriority)) 1787 printf(ARGS(getpriority, "%i, %i"), args->which, args->who); 1788 #endif 1789 1790 bsd_args.which = args->which; 1791 bsd_args.who = args->who; 1792 error = sys_getpriority(td, &bsd_args); 1793 td->td_retval[0] = 20 - td->td_retval[0]; 1794 return (error); 1795 } 1796 1797 int 1798 linux_sethostname(struct thread *td, struct linux_sethostname_args *args) 1799 { 1800 int name[2]; 1801 1802 #ifdef DEBUG 1803 if (ldebug(sethostname)) 1804 printf(ARGS(sethostname, "*, %i"), args->len); 1805 #endif 1806 1807 name[0] = CTL_KERN; 1808 name[1] = KERN_HOSTNAME; 1809 return (userland_sysctl(td, name, 2, 0, 0, 0, args->hostname, 1810 args->len, 0, 0)); 1811 } 1812 1813 int 1814 linux_setdomainname(struct thread *td, struct linux_setdomainname_args *args) 1815 { 1816 int name[2]; 1817 1818 #ifdef DEBUG 1819 if (ldebug(setdomainname)) 1820 printf(ARGS(setdomainname, "*, %i"), args->len); 1821 #endif 1822 1823 name[0] = CTL_KERN; 1824 name[1] = KERN_NISDOMAINNAME; 1825 return (userland_sysctl(td, name, 2, 0, 0, 0, args->name, 1826 args->len, 0, 0)); 1827 } 1828 1829 int 1830 linux_exit_group(struct thread *td, struct linux_exit_group_args *args) 1831 { 1832 1833 #ifdef DEBUG 1834 if (ldebug(exit_group)) 1835 printf(ARGS(exit_group, "%i"), args->error_code); 1836 #endif 1837 1838 LINUX_CTR2(exit_group, "thread(%d) (%d)", td->td_tid, 1839 args->error_code); 1840 1841 /* 1842 * XXX: we should send a signal to the parent if 1843 * SIGNAL_EXIT_GROUP is set. We ignore that (temporarily?) 1844 * as it doesnt occur often. 1845 */ 1846 exit1(td, args->error_code, 0); 1847 /* NOTREACHED */ 1848 } 1849 1850 #define _LINUX_CAPABILITY_VERSION 0x19980330 1851 1852 struct l_user_cap_header { 1853 l_int version; 1854 l_int pid; 1855 }; 1856 1857 struct l_user_cap_data { 1858 l_int effective; 1859 l_int permitted; 1860 l_int inheritable; 1861 }; 1862 1863 int 1864 linux_capget(struct thread *td, struct linux_capget_args *args) 1865 { 1866 struct l_user_cap_header luch; 1867 struct l_user_cap_data lucd; 1868 int error; 1869 1870 if (args->hdrp == NULL) 1871 return (EFAULT); 1872 1873 error = copyin(args->hdrp, &luch, sizeof(luch)); 1874 if (error != 0) 1875 return (error); 1876 1877 if (luch.version != _LINUX_CAPABILITY_VERSION) { 1878 luch.version = _LINUX_CAPABILITY_VERSION; 1879 error = copyout(&luch, args->hdrp, sizeof(luch)); 1880 if (error) 1881 return (error); 1882 return (EINVAL); 1883 } 1884 1885 if (luch.pid) 1886 return (EPERM); 1887 1888 if (args->datap) { 1889 /* 1890 * The current implementation doesn't support setting 1891 * a capability (it's essentially a stub) so indicate 1892 * that no capabilities are currently set or available 1893 * to request. 1894 */ 1895 bzero (&lucd, sizeof(lucd)); 1896 error = copyout(&lucd, args->datap, sizeof(lucd)); 1897 } 1898 1899 return (error); 1900 } 1901 1902 int 1903 linux_capset(struct thread *td, struct linux_capset_args *args) 1904 { 1905 struct l_user_cap_header luch; 1906 struct l_user_cap_data lucd; 1907 int error; 1908 1909 if (args->hdrp == NULL || args->datap == NULL) 1910 return (EFAULT); 1911 1912 error = copyin(args->hdrp, &luch, sizeof(luch)); 1913 if (error != 0) 1914 return (error); 1915 1916 if (luch.version != _LINUX_CAPABILITY_VERSION) { 1917 luch.version = _LINUX_CAPABILITY_VERSION; 1918 error = copyout(&luch, args->hdrp, sizeof(luch)); 1919 if (error) 1920 return (error); 1921 return (EINVAL); 1922 } 1923 1924 if (luch.pid) 1925 return (EPERM); 1926 1927 error = copyin(args->datap, &lucd, sizeof(lucd)); 1928 if (error != 0) 1929 return (error); 1930 1931 /* We currently don't support setting any capabilities. */ 1932 if (lucd.effective || lucd.permitted || lucd.inheritable) { 1933 linux_msg(td, 1934 "capset effective=0x%x, permitted=0x%x, " 1935 "inheritable=0x%x is not implemented", 1936 (int)lucd.effective, (int)lucd.permitted, 1937 (int)lucd.inheritable); 1938 return (EPERM); 1939 } 1940 1941 return (0); 1942 } 1943 1944 int 1945 linux_prctl(struct thread *td, struct linux_prctl_args *args) 1946 { 1947 int error = 0, max_size; 1948 struct proc *p = td->td_proc; 1949 char comm[LINUX_MAX_COMM_LEN]; 1950 struct linux_emuldata *em; 1951 int pdeath_signal; 1952 1953 #ifdef DEBUG 1954 if (ldebug(prctl)) 1955 printf(ARGS(prctl, "%d, %ju, %ju, %ju, %ju"), args->option, 1956 (uintmax_t)args->arg2, (uintmax_t)args->arg3, 1957 (uintmax_t)args->arg4, (uintmax_t)args->arg5); 1958 #endif 1959 1960 switch (args->option) { 1961 case LINUX_PR_SET_PDEATHSIG: 1962 if (!LINUX_SIG_VALID(args->arg2)) 1963 return (EINVAL); 1964 em = em_find(td); 1965 KASSERT(em != NULL, ("prctl: emuldata not found.\n")); 1966 em->pdeath_signal = args->arg2; 1967 break; 1968 case LINUX_PR_GET_PDEATHSIG: 1969 em = em_find(td); 1970 KASSERT(em != NULL, ("prctl: emuldata not found.\n")); 1971 pdeath_signal = em->pdeath_signal; 1972 error = copyout(&pdeath_signal, 1973 (void *)(register_t)args->arg2, 1974 sizeof(pdeath_signal)); 1975 break; 1976 case LINUX_PR_GET_KEEPCAPS: 1977 /* 1978 * Indicate that we always clear the effective and 1979 * permitted capability sets when the user id becomes 1980 * non-zero (actually the capability sets are simply 1981 * always zero in the current implementation). 1982 */ 1983 td->td_retval[0] = 0; 1984 break; 1985 case LINUX_PR_SET_KEEPCAPS: 1986 /* 1987 * Ignore requests to keep the effective and permitted 1988 * capability sets when the user id becomes non-zero. 1989 */ 1990 break; 1991 case LINUX_PR_SET_NAME: 1992 /* 1993 * To be on the safe side we need to make sure to not 1994 * overflow the size a linux program expects. We already 1995 * do this here in the copyin, so that we don't need to 1996 * check on copyout. 1997 */ 1998 max_size = MIN(sizeof(comm), sizeof(p->p_comm)); 1999 error = copyinstr((void *)(register_t)args->arg2, comm, 2000 max_size, NULL); 2001 2002 /* Linux silently truncates the name if it is too long. */ 2003 if (error == ENAMETOOLONG) { 2004 /* 2005 * XXX: copyinstr() isn't documented to populate the 2006 * array completely, so do a copyin() to be on the 2007 * safe side. This should be changed in case 2008 * copyinstr() is changed to guarantee this. 2009 */ 2010 error = copyin((void *)(register_t)args->arg2, comm, 2011 max_size - 1); 2012 comm[max_size - 1] = '\0'; 2013 } 2014 if (error) 2015 return (error); 2016 2017 PROC_LOCK(p); 2018 strlcpy(p->p_comm, comm, sizeof(p->p_comm)); 2019 PROC_UNLOCK(p); 2020 break; 2021 case LINUX_PR_GET_NAME: 2022 PROC_LOCK(p); 2023 strlcpy(comm, p->p_comm, sizeof(comm)); 2024 PROC_UNLOCK(p); 2025 error = copyout(comm, (void *)(register_t)args->arg2, 2026 strlen(comm) + 1); 2027 break; 2028 default: 2029 error = EINVAL; 2030 break; 2031 } 2032 2033 return (error); 2034 } 2035 2036 int 2037 linux_sched_setparam(struct thread *td, 2038 struct linux_sched_setparam_args *uap) 2039 { 2040 struct sched_param sched_param; 2041 struct thread *tdt; 2042 int error; 2043 2044 #ifdef DEBUG 2045 if (ldebug(sched_setparam)) 2046 printf(ARGS(sched_setparam, "%d, *"), uap->pid); 2047 #endif 2048 2049 error = copyin(uap->param, &sched_param, sizeof(sched_param)); 2050 if (error) 2051 return (error); 2052 2053 tdt = linux_tdfind(td, uap->pid, -1); 2054 if (tdt == NULL) 2055 return (ESRCH); 2056 2057 error = kern_sched_setparam(td, tdt, &sched_param); 2058 PROC_UNLOCK(tdt->td_proc); 2059 return (error); 2060 } 2061 2062 int 2063 linux_sched_getparam(struct thread *td, 2064 struct linux_sched_getparam_args *uap) 2065 { 2066 struct sched_param sched_param; 2067 struct thread *tdt; 2068 int error; 2069 2070 #ifdef DEBUG 2071 if (ldebug(sched_getparam)) 2072 printf(ARGS(sched_getparam, "%d, *"), uap->pid); 2073 #endif 2074 2075 tdt = linux_tdfind(td, uap->pid, -1); 2076 if (tdt == NULL) 2077 return (ESRCH); 2078 2079 error = kern_sched_getparam(td, tdt, &sched_param); 2080 PROC_UNLOCK(tdt->td_proc); 2081 if (error == 0) 2082 error = copyout(&sched_param, uap->param, 2083 sizeof(sched_param)); 2084 return (error); 2085 } 2086 2087 /* 2088 * Get affinity of a process. 2089 */ 2090 int 2091 linux_sched_getaffinity(struct thread *td, 2092 struct linux_sched_getaffinity_args *args) 2093 { 2094 int error; 2095 struct thread *tdt; 2096 struct cpuset_getaffinity_args cga; 2097 2098 #ifdef DEBUG 2099 if (ldebug(sched_getaffinity)) 2100 printf(ARGS(sched_getaffinity, "%d, %d, *"), args->pid, 2101 args->len); 2102 #endif 2103 if (args->len < sizeof(cpuset_t)) 2104 return (EINVAL); 2105 2106 tdt = linux_tdfind(td, args->pid, -1); 2107 if (tdt == NULL) 2108 return (ESRCH); 2109 2110 PROC_UNLOCK(tdt->td_proc); 2111 cga.level = CPU_LEVEL_WHICH; 2112 cga.which = CPU_WHICH_TID; 2113 cga.id = tdt->td_tid; 2114 cga.cpusetsize = sizeof(cpuset_t); 2115 cga.mask = (cpuset_t *) args->user_mask_ptr; 2116 2117 if ((error = sys_cpuset_getaffinity(td, &cga)) == 0) 2118 td->td_retval[0] = sizeof(cpuset_t); 2119 2120 return (error); 2121 } 2122 2123 /* 2124 * Set affinity of a process. 2125 */ 2126 int 2127 linux_sched_setaffinity(struct thread *td, 2128 struct linux_sched_setaffinity_args *args) 2129 { 2130 struct cpuset_setaffinity_args csa; 2131 struct thread *tdt; 2132 2133 #ifdef DEBUG 2134 if (ldebug(sched_setaffinity)) 2135 printf(ARGS(sched_setaffinity, "%d, %d, *"), args->pid, 2136 args->len); 2137 #endif 2138 if (args->len < sizeof(cpuset_t)) 2139 return (EINVAL); 2140 2141 tdt = linux_tdfind(td, args->pid, -1); 2142 if (tdt == NULL) 2143 return (ESRCH); 2144 2145 PROC_UNLOCK(tdt->td_proc); 2146 csa.level = CPU_LEVEL_WHICH; 2147 csa.which = CPU_WHICH_TID; 2148 csa.id = tdt->td_tid; 2149 csa.cpusetsize = sizeof(cpuset_t); 2150 csa.mask = (cpuset_t *) args->user_mask_ptr; 2151 2152 return (sys_cpuset_setaffinity(td, &csa)); 2153 } 2154 2155 struct linux_rlimit64 { 2156 uint64_t rlim_cur; 2157 uint64_t rlim_max; 2158 }; 2159 2160 int 2161 linux_prlimit64(struct thread *td, struct linux_prlimit64_args *args) 2162 { 2163 struct rlimit rlim, nrlim; 2164 struct linux_rlimit64 lrlim; 2165 struct proc *p; 2166 u_int which; 2167 int flags; 2168 int error; 2169 2170 #ifdef DEBUG 2171 if (ldebug(prlimit64)) 2172 printf(ARGS(prlimit64, "%d, %d, %p, %p"), args->pid, 2173 args->resource, (void *)args->new, (void *)args->old); 2174 #endif 2175 2176 if (args->resource >= LINUX_RLIM_NLIMITS) 2177 return (EINVAL); 2178 2179 which = linux_to_bsd_resource[args->resource]; 2180 if (which == -1) 2181 return (EINVAL); 2182 2183 if (args->new != NULL) { 2184 /* 2185 * Note. Unlike FreeBSD where rlim is signed 64-bit Linux 2186 * rlim is unsigned 64-bit. FreeBSD treats negative limits 2187 * as INFINITY so we do not need a conversion even. 2188 */ 2189 error = copyin(args->new, &nrlim, sizeof(nrlim)); 2190 if (error != 0) 2191 return (error); 2192 } 2193 2194 flags = PGET_HOLD | PGET_NOTWEXIT; 2195 if (args->new != NULL) 2196 flags |= PGET_CANDEBUG; 2197 else 2198 flags |= PGET_CANSEE; 2199 error = pget(args->pid, flags, &p); 2200 if (error != 0) 2201 return (error); 2202 2203 if (args->old != NULL) { 2204 PROC_LOCK(p); 2205 lim_rlimit_proc(p, which, &rlim); 2206 PROC_UNLOCK(p); 2207 if (rlim.rlim_cur == RLIM_INFINITY) 2208 lrlim.rlim_cur = LINUX_RLIM_INFINITY; 2209 else 2210 lrlim.rlim_cur = rlim.rlim_cur; 2211 if (rlim.rlim_max == RLIM_INFINITY) 2212 lrlim.rlim_max = LINUX_RLIM_INFINITY; 2213 else 2214 lrlim.rlim_max = rlim.rlim_max; 2215 error = copyout(&lrlim, args->old, sizeof(lrlim)); 2216 if (error != 0) 2217 goto out; 2218 } 2219 2220 if (args->new != NULL) 2221 error = kern_proc_setrlimit(td, p, which, &nrlim); 2222 2223 out: 2224 PRELE(p); 2225 return (error); 2226 } 2227 2228 int 2229 linux_pselect6(struct thread *td, struct linux_pselect6_args *args) 2230 { 2231 struct timeval utv, tv0, tv1, *tvp; 2232 struct l_pselect6arg lpse6; 2233 struct l_timespec lts; 2234 struct timespec uts; 2235 l_sigset_t l_ss; 2236 sigset_t *ssp; 2237 sigset_t ss; 2238 int error; 2239 2240 ssp = NULL; 2241 if (args->sig != NULL) { 2242 error = copyin(args->sig, &lpse6, sizeof(lpse6)); 2243 if (error != 0) 2244 return (error); 2245 if (lpse6.ss_len != sizeof(l_ss)) 2246 return (EINVAL); 2247 if (lpse6.ss != 0) { 2248 error = copyin(PTRIN(lpse6.ss), &l_ss, 2249 sizeof(l_ss)); 2250 if (error != 0) 2251 return (error); 2252 linux_to_bsd_sigset(&l_ss, &ss); 2253 ssp = &ss; 2254 } 2255 } 2256 2257 /* 2258 * Currently glibc changes nanosecond number to microsecond. 2259 * This mean losing precision but for now it is hardly seen. 2260 */ 2261 if (args->tsp != NULL) { 2262 error = copyin(args->tsp, <s, sizeof(lts)); 2263 if (error != 0) 2264 return (error); 2265 error = linux_to_native_timespec(&uts, <s); 2266 if (error != 0) 2267 return (error); 2268 2269 TIMESPEC_TO_TIMEVAL(&utv, &uts); 2270 if (itimerfix(&utv)) 2271 return (EINVAL); 2272 2273 microtime(&tv0); 2274 tvp = &utv; 2275 } else 2276 tvp = NULL; 2277 2278 error = kern_pselect(td, args->nfds, args->readfds, args->writefds, 2279 args->exceptfds, tvp, ssp, LINUX_NFDBITS); 2280 2281 if (error == 0 && args->tsp != NULL) { 2282 if (td->td_retval[0] != 0) { 2283 /* 2284 * Compute how much time was left of the timeout, 2285 * by subtracting the current time and the time 2286 * before we started the call, and subtracting 2287 * that result from the user-supplied value. 2288 */ 2289 2290 microtime(&tv1); 2291 timevalsub(&tv1, &tv0); 2292 timevalsub(&utv, &tv1); 2293 if (utv.tv_sec < 0) 2294 timevalclear(&utv); 2295 } else 2296 timevalclear(&utv); 2297 2298 TIMEVAL_TO_TIMESPEC(&utv, &uts); 2299 2300 native_to_linux_timespec(<s, &uts); 2301 error = copyout(<s, args->tsp, sizeof(lts)); 2302 } 2303 2304 return (error); 2305 } 2306 2307 int 2308 linux_ppoll(struct thread *td, struct linux_ppoll_args *args) 2309 { 2310 struct timespec ts0, ts1; 2311 struct l_timespec lts; 2312 struct timespec uts, *tsp; 2313 l_sigset_t l_ss; 2314 sigset_t *ssp; 2315 sigset_t ss; 2316 int error; 2317 2318 if (args->sset != NULL) { 2319 if (args->ssize != sizeof(l_ss)) 2320 return (EINVAL); 2321 error = copyin(args->sset, &l_ss, sizeof(l_ss)); 2322 if (error) 2323 return (error); 2324 linux_to_bsd_sigset(&l_ss, &ss); 2325 ssp = &ss; 2326 } else 2327 ssp = NULL; 2328 if (args->tsp != NULL) { 2329 error = copyin(args->tsp, <s, sizeof(lts)); 2330 if (error) 2331 return (error); 2332 error = linux_to_native_timespec(&uts, <s); 2333 if (error != 0) 2334 return (error); 2335 2336 nanotime(&ts0); 2337 tsp = &uts; 2338 } else 2339 tsp = NULL; 2340 2341 error = kern_poll(td, args->fds, args->nfds, tsp, ssp); 2342 2343 if (error == 0 && args->tsp != NULL) { 2344 if (td->td_retval[0]) { 2345 nanotime(&ts1); 2346 timespecsub(&ts1, &ts0); 2347 timespecsub(&uts, &ts1); 2348 if (uts.tv_sec < 0) 2349 timespecclear(&uts); 2350 } else 2351 timespecclear(&uts); 2352 2353 native_to_linux_timespec(<s, &uts); 2354 error = copyout(<s, args->tsp, sizeof(lts)); 2355 } 2356 2357 return (error); 2358 } 2359 2360 #if defined(DEBUG) || defined(KTR) 2361 /* XXX: can be removed when every ldebug(...) and KTR stuff are removed. */ 2362 2363 #ifdef COMPAT_LINUX32 2364 #define L_MAXSYSCALL LINUX32_SYS_MAXSYSCALL 2365 #else 2366 #define L_MAXSYSCALL LINUX_SYS_MAXSYSCALL 2367 #endif 2368 2369 u_char linux_debug_map[howmany(L_MAXSYSCALL, sizeof(u_char))]; 2370 2371 static int 2372 linux_debug(int syscall, int toggle, int global) 2373 { 2374 2375 if (global) { 2376 char c = toggle ? 0 : 0xff; 2377 2378 memset(linux_debug_map, c, sizeof(linux_debug_map)); 2379 return (0); 2380 } 2381 if (syscall < 0 || syscall >= L_MAXSYSCALL) 2382 return (EINVAL); 2383 if (toggle) 2384 clrbit(linux_debug_map, syscall); 2385 else 2386 setbit(linux_debug_map, syscall); 2387 return (0); 2388 } 2389 #undef L_MAXSYSCALL 2390 2391 /* 2392 * Usage: sysctl linux.debug=<syscall_nr>.<0/1> 2393 * 2394 * E.g.: sysctl linux.debug=21.0 2395 * 2396 * As a special case, syscall "all" will apply to all syscalls globally. 2397 */ 2398 #define LINUX_MAX_DEBUGSTR 16 2399 int 2400 linux_sysctl_debug(SYSCTL_HANDLER_ARGS) 2401 { 2402 char value[LINUX_MAX_DEBUGSTR], *p; 2403 int error, sysc, toggle; 2404 int global = 0; 2405 2406 value[0] = '\0'; 2407 error = sysctl_handle_string(oidp, value, LINUX_MAX_DEBUGSTR, req); 2408 if (error || req->newptr == NULL) 2409 return (error); 2410 for (p = value; *p != '\0' && *p != '.'; p++); 2411 if (*p == '\0') 2412 return (EINVAL); 2413 *p++ = '\0'; 2414 sysc = strtol(value, NULL, 0); 2415 toggle = strtol(p, NULL, 0); 2416 if (strcmp(value, "all") == 0) 2417 global = 1; 2418 error = linux_debug(sysc, toggle, global); 2419 return (error); 2420 } 2421 2422 #endif /* DEBUG || KTR */ 2423 2424 int 2425 linux_sched_rr_get_interval(struct thread *td, 2426 struct linux_sched_rr_get_interval_args *uap) 2427 { 2428 struct timespec ts; 2429 struct l_timespec lts; 2430 struct thread *tdt; 2431 int error; 2432 2433 /* 2434 * According to man in case the invalid pid specified 2435 * EINVAL should be returned. 2436 */ 2437 if (uap->pid < 0) 2438 return (EINVAL); 2439 2440 tdt = linux_tdfind(td, uap->pid, -1); 2441 if (tdt == NULL) 2442 return (ESRCH); 2443 2444 error = kern_sched_rr_get_interval_td(td, tdt, &ts); 2445 PROC_UNLOCK(tdt->td_proc); 2446 if (error != 0) 2447 return (error); 2448 native_to_linux_timespec(<s, &ts); 2449 return (copyout(<s, uap->interval, sizeof(lts))); 2450 } 2451 2452 /* 2453 * In case when the Linux thread is the initial thread in 2454 * the thread group thread id is equal to the process id. 2455 * Glibc depends on this magic (assert in pthread_getattr_np.c). 2456 */ 2457 struct thread * 2458 linux_tdfind(struct thread *td, lwpid_t tid, pid_t pid) 2459 { 2460 struct linux_emuldata *em; 2461 struct thread *tdt; 2462 struct proc *p; 2463 2464 tdt = NULL; 2465 if (tid == 0 || tid == td->td_tid) { 2466 tdt = td; 2467 PROC_LOCK(tdt->td_proc); 2468 } else if (tid > PID_MAX) 2469 tdt = tdfind(tid, pid); 2470 else { 2471 /* 2472 * Initial thread where the tid equal to the pid. 2473 */ 2474 p = pfind(tid); 2475 if (p != NULL) { 2476 if (SV_PROC_ABI(p) != SV_ABI_LINUX) { 2477 /* 2478 * p is not a Linuxulator process. 2479 */ 2480 PROC_UNLOCK(p); 2481 return (NULL); 2482 } 2483 FOREACH_THREAD_IN_PROC(p, tdt) { 2484 em = em_find(tdt); 2485 if (tid == em->em_tid) 2486 return (tdt); 2487 } 2488 PROC_UNLOCK(p); 2489 } 2490 return (NULL); 2491 } 2492 2493 return (tdt); 2494 } 2495 2496 void 2497 linux_to_bsd_waitopts(int options, int *bsdopts) 2498 { 2499 2500 if (options & LINUX_WNOHANG) 2501 *bsdopts |= WNOHANG; 2502 if (options & LINUX_WUNTRACED) 2503 *bsdopts |= WUNTRACED; 2504 if (options & LINUX_WEXITED) 2505 *bsdopts |= WEXITED; 2506 if (options & LINUX_WCONTINUED) 2507 *bsdopts |= WCONTINUED; 2508 if (options & LINUX_WNOWAIT) 2509 *bsdopts |= WNOWAIT; 2510 2511 if (options & __WCLONE) 2512 *bsdopts |= WLINUXCLONE; 2513 } 2514