1 /*- 2 * Copyright (c) 2002 Doug Rabson 3 * Copyright (c) 1994-1995 Søren Schmidt 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer 11 * in this position and unchanged. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of the author may not be used to endorse or promote products 16 * derived from this software without specific prior written permission 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_compat.h" 34 35 #include <sys/param.h> 36 #include <sys/blist.h> 37 #include <sys/fcntl.h> 38 #if defined(__i386__) 39 #include <sys/imgact_aout.h> 40 #endif 41 #include <sys/jail.h> 42 #include <sys/kernel.h> 43 #include <sys/limits.h> 44 #include <sys/lock.h> 45 #include <sys/malloc.h> 46 #include <sys/mman.h> 47 #include <sys/mount.h> 48 #include <sys/mutex.h> 49 #include <sys/namei.h> 50 #include <sys/priv.h> 51 #include <sys/proc.h> 52 #include <sys/reboot.h> 53 #include <sys/racct.h> 54 #include <sys/random.h> 55 #include <sys/resourcevar.h> 56 #include <sys/sched.h> 57 #include <sys/sdt.h> 58 #include <sys/signalvar.h> 59 #include <sys/stat.h> 60 #include <sys/syscallsubr.h> 61 #include <sys/sysctl.h> 62 #include <sys/sysproto.h> 63 #include <sys/systm.h> 64 #include <sys/time.h> 65 #include <sys/vmmeter.h> 66 #include <sys/vnode.h> 67 #include <sys/wait.h> 68 #include <sys/cpuset.h> 69 #include <sys/uio.h> 70 71 #include <security/mac/mac_framework.h> 72 73 #include <vm/vm.h> 74 #include <vm/pmap.h> 75 #include <vm/vm_kern.h> 76 #include <vm/vm_map.h> 77 #include <vm/vm_extern.h> 78 #include <vm/vm_object.h> 79 #include <vm/swap_pager.h> 80 81 #ifdef COMPAT_LINUX32 82 #include <machine/../linux32/linux.h> 83 #include <machine/../linux32/linux32_proto.h> 84 #else 85 #include <machine/../linux/linux.h> 86 #include <machine/../linux/linux_proto.h> 87 #endif 88 89 #include <compat/linux/linux_dtrace.h> 90 #include <compat/linux/linux_file.h> 91 #include <compat/linux/linux_mib.h> 92 #include <compat/linux/linux_signal.h> 93 #include <compat/linux/linux_timer.h> 94 #include <compat/linux/linux_util.h> 95 #include <compat/linux/linux_sysproto.h> 96 #include <compat/linux/linux_emul.h> 97 #include <compat/linux/linux_misc.h> 98 99 /** 100 * Special DTrace provider for the linuxulator. 101 * 102 * In this file we define the provider for the entire linuxulator. All 103 * modules (= files of the linuxulator) use it. 104 * 105 * We define a different name depending on the emulated bitsize, see 106 * ../../<ARCH>/linux{,32}/linux.h, e.g.: 107 * native bitsize = linuxulator 108 * amd64, 32bit emulation = linuxulator32 109 */ 110 LIN_SDT_PROVIDER_DEFINE(LINUX_DTRACE); 111 112 int stclohz; /* Statistics clock frequency */ 113 114 static unsigned int linux_to_bsd_resource[LINUX_RLIM_NLIMITS] = { 115 RLIMIT_CPU, RLIMIT_FSIZE, RLIMIT_DATA, RLIMIT_STACK, 116 RLIMIT_CORE, RLIMIT_RSS, RLIMIT_NPROC, RLIMIT_NOFILE, 117 RLIMIT_MEMLOCK, RLIMIT_AS 118 }; 119 120 struct l_sysinfo { 121 l_long uptime; /* Seconds since boot */ 122 l_ulong loads[3]; /* 1, 5, and 15 minute load averages */ 123 #define LINUX_SYSINFO_LOADS_SCALE 65536 124 l_ulong totalram; /* Total usable main memory size */ 125 l_ulong freeram; /* Available memory size */ 126 l_ulong sharedram; /* Amount of shared memory */ 127 l_ulong bufferram; /* Memory used by buffers */ 128 l_ulong totalswap; /* Total swap space size */ 129 l_ulong freeswap; /* swap space still available */ 130 l_ushort procs; /* Number of current processes */ 131 l_ushort pads; 132 l_ulong totalbig; 133 l_ulong freebig; 134 l_uint mem_unit; 135 char _f[20-2*sizeof(l_long)-sizeof(l_int)]; /* padding */ 136 }; 137 138 struct l_pselect6arg { 139 l_uintptr_t ss; 140 l_size_t ss_len; 141 }; 142 143 static int linux_utimensat_nsec_valid(l_long); 144 145 146 int 147 linux_sysinfo(struct thread *td, struct linux_sysinfo_args *args) 148 { 149 struct l_sysinfo sysinfo; 150 vm_object_t object; 151 int i, j; 152 struct timespec ts; 153 154 bzero(&sysinfo, sizeof(sysinfo)); 155 getnanouptime(&ts); 156 if (ts.tv_nsec != 0) 157 ts.tv_sec++; 158 sysinfo.uptime = ts.tv_sec; 159 160 /* Use the information from the mib to get our load averages */ 161 for (i = 0; i < 3; i++) 162 sysinfo.loads[i] = averunnable.ldavg[i] * 163 LINUX_SYSINFO_LOADS_SCALE / averunnable.fscale; 164 165 sysinfo.totalram = physmem * PAGE_SIZE; 166 sysinfo.freeram = sysinfo.totalram - vm_cnt.v_wire_count * PAGE_SIZE; 167 168 sysinfo.sharedram = 0; 169 mtx_lock(&vm_object_list_mtx); 170 TAILQ_FOREACH(object, &vm_object_list, object_list) 171 if (object->shadow_count > 1) 172 sysinfo.sharedram += object->resident_page_count; 173 mtx_unlock(&vm_object_list_mtx); 174 175 sysinfo.sharedram *= PAGE_SIZE; 176 sysinfo.bufferram = 0; 177 178 swap_pager_status(&i, &j); 179 sysinfo.totalswap = i * PAGE_SIZE; 180 sysinfo.freeswap = (i - j) * PAGE_SIZE; 181 182 sysinfo.procs = nprocs; 183 184 /* The following are only present in newer Linux kernels. */ 185 sysinfo.totalbig = 0; 186 sysinfo.freebig = 0; 187 sysinfo.mem_unit = 1; 188 189 return (copyout(&sysinfo, args->info, sizeof(sysinfo))); 190 } 191 192 int 193 linux_alarm(struct thread *td, struct linux_alarm_args *args) 194 { 195 struct itimerval it, old_it; 196 u_int secs; 197 int error; 198 199 #ifdef DEBUG 200 if (ldebug(alarm)) 201 printf(ARGS(alarm, "%u"), args->secs); 202 #endif 203 secs = args->secs; 204 /* 205 * Linux alarm() is always successful. Limit secs to INT32_MAX / 2 206 * to match kern_setitimer()'s limit to avoid error from it. 207 * 208 * XXX. Linux limit secs to INT_MAX on 32 and does not limit on 64-bit 209 * platforms. 210 */ 211 if (secs > INT32_MAX / 2) 212 secs = INT32_MAX / 2; 213 214 it.it_value.tv_sec = secs; 215 it.it_value.tv_usec = 0; 216 timevalclear(&it.it_interval); 217 error = kern_setitimer(td, ITIMER_REAL, &it, &old_it); 218 KASSERT(error == 0, ("kern_setitimer returns %d", error)); 219 220 if ((old_it.it_value.tv_sec == 0 && old_it.it_value.tv_usec > 0) || 221 old_it.it_value.tv_usec >= 500000) 222 old_it.it_value.tv_sec++; 223 td->td_retval[0] = old_it.it_value.tv_sec; 224 return (0); 225 } 226 227 int 228 linux_brk(struct thread *td, struct linux_brk_args *args) 229 { 230 struct vmspace *vm = td->td_proc->p_vmspace; 231 vm_offset_t new, old; 232 struct obreak_args /* { 233 char * nsize; 234 } */ tmp; 235 236 #ifdef DEBUG 237 if (ldebug(brk)) 238 printf(ARGS(brk, "%p"), (void *)(uintptr_t)args->dsend); 239 #endif 240 old = (vm_offset_t)vm->vm_daddr + ctob(vm->vm_dsize); 241 new = (vm_offset_t)args->dsend; 242 tmp.nsize = (char *)new; 243 if (((caddr_t)new > vm->vm_daddr) && !sys_obreak(td, &tmp)) 244 td->td_retval[0] = (long)new; 245 else 246 td->td_retval[0] = (long)old; 247 248 return (0); 249 } 250 251 #if defined(__i386__) 252 /* XXX: what about amd64/linux32? */ 253 254 int 255 linux_uselib(struct thread *td, struct linux_uselib_args *args) 256 { 257 struct nameidata ni; 258 struct vnode *vp; 259 struct exec *a_out; 260 struct vattr attr; 261 vm_offset_t vmaddr; 262 unsigned long file_offset; 263 unsigned long bss_size; 264 char *library; 265 ssize_t aresid; 266 int error, locked, writecount; 267 268 LCONVPATHEXIST(td, args->library, &library); 269 270 #ifdef DEBUG 271 if (ldebug(uselib)) 272 printf(ARGS(uselib, "%s"), library); 273 #endif 274 275 a_out = NULL; 276 locked = 0; 277 vp = NULL; 278 279 NDINIT(&ni, LOOKUP, ISOPEN | FOLLOW | LOCKLEAF | AUDITVNODE1, 280 UIO_SYSSPACE, library, td); 281 error = namei(&ni); 282 LFREEPATH(library); 283 if (error) 284 goto cleanup; 285 286 vp = ni.ni_vp; 287 NDFREE(&ni, NDF_ONLY_PNBUF); 288 289 /* 290 * From here on down, we have a locked vnode that must be unlocked. 291 * XXX: The code below largely duplicates exec_check_permissions(). 292 */ 293 locked = 1; 294 295 /* Writable? */ 296 error = VOP_GET_WRITECOUNT(vp, &writecount); 297 if (error != 0) 298 goto cleanup; 299 if (writecount != 0) { 300 error = ETXTBSY; 301 goto cleanup; 302 } 303 304 /* Executable? */ 305 error = VOP_GETATTR(vp, &attr, td->td_ucred); 306 if (error) 307 goto cleanup; 308 309 if ((vp->v_mount->mnt_flag & MNT_NOEXEC) || 310 ((attr.va_mode & 0111) == 0) || (attr.va_type != VREG)) { 311 /* EACCESS is what exec(2) returns. */ 312 error = ENOEXEC; 313 goto cleanup; 314 } 315 316 /* Sensible size? */ 317 if (attr.va_size == 0) { 318 error = ENOEXEC; 319 goto cleanup; 320 } 321 322 /* Can we access it? */ 323 error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td); 324 if (error) 325 goto cleanup; 326 327 /* 328 * XXX: This should use vn_open() so that it is properly authorized, 329 * and to reduce code redundancy all over the place here. 330 * XXX: Not really, it duplicates far more of exec_check_permissions() 331 * than vn_open(). 332 */ 333 #ifdef MAC 334 error = mac_vnode_check_open(td->td_ucred, vp, VREAD); 335 if (error) 336 goto cleanup; 337 #endif 338 error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL); 339 if (error) 340 goto cleanup; 341 342 /* Pull in executable header into exec_map */ 343 error = vm_mmap(exec_map, (vm_offset_t *)&a_out, PAGE_SIZE, 344 VM_PROT_READ, VM_PROT_READ, 0, OBJT_VNODE, vp, 0); 345 if (error) 346 goto cleanup; 347 348 /* Is it a Linux binary ? */ 349 if (((a_out->a_magic >> 16) & 0xff) != 0x64) { 350 error = ENOEXEC; 351 goto cleanup; 352 } 353 354 /* 355 * While we are here, we should REALLY do some more checks 356 */ 357 358 /* Set file/virtual offset based on a.out variant. */ 359 switch ((int)(a_out->a_magic & 0xffff)) { 360 case 0413: /* ZMAGIC */ 361 file_offset = 1024; 362 break; 363 case 0314: /* QMAGIC */ 364 file_offset = 0; 365 break; 366 default: 367 error = ENOEXEC; 368 goto cleanup; 369 } 370 371 bss_size = round_page(a_out->a_bss); 372 373 /* Check various fields in header for validity/bounds. */ 374 if (a_out->a_text & PAGE_MASK || a_out->a_data & PAGE_MASK) { 375 error = ENOEXEC; 376 goto cleanup; 377 } 378 379 /* text + data can't exceed file size */ 380 if (a_out->a_data + a_out->a_text > attr.va_size) { 381 error = EFAULT; 382 goto cleanup; 383 } 384 385 /* 386 * text/data/bss must not exceed limits 387 * XXX - this is not complete. it should check current usage PLUS 388 * the resources needed by this library. 389 */ 390 PROC_LOCK(td->td_proc); 391 if (a_out->a_text > maxtsiz || 392 a_out->a_data + bss_size > lim_cur_proc(td->td_proc, RLIMIT_DATA) || 393 racct_set(td->td_proc, RACCT_DATA, a_out->a_data + 394 bss_size) != 0) { 395 PROC_UNLOCK(td->td_proc); 396 error = ENOMEM; 397 goto cleanup; 398 } 399 PROC_UNLOCK(td->td_proc); 400 401 /* 402 * Prevent more writers. 403 * XXX: Note that if any of the VM operations fail below we don't 404 * clear this flag. 405 */ 406 VOP_SET_TEXT(vp); 407 408 /* 409 * Lock no longer needed 410 */ 411 locked = 0; 412 VOP_UNLOCK(vp, 0); 413 414 /* 415 * Check if file_offset page aligned. Currently we cannot handle 416 * misalinged file offsets, and so we read in the entire image 417 * (what a waste). 418 */ 419 if (file_offset & PAGE_MASK) { 420 #ifdef DEBUG 421 printf("uselib: Non page aligned binary %lu\n", file_offset); 422 #endif 423 /* Map text+data read/write/execute */ 424 425 /* a_entry is the load address and is page aligned */ 426 vmaddr = trunc_page(a_out->a_entry); 427 428 /* get anon user mapping, read+write+execute */ 429 error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0, 430 &vmaddr, a_out->a_text + a_out->a_data, 0, VMFS_NO_SPACE, 431 VM_PROT_ALL, VM_PROT_ALL, 0); 432 if (error) 433 goto cleanup; 434 435 error = vn_rdwr(UIO_READ, vp, (void *)vmaddr, file_offset, 436 a_out->a_text + a_out->a_data, UIO_USERSPACE, 0, 437 td->td_ucred, NOCRED, &aresid, td); 438 if (error != 0) 439 goto cleanup; 440 if (aresid != 0) { 441 error = ENOEXEC; 442 goto cleanup; 443 } 444 } else { 445 #ifdef DEBUG 446 printf("uselib: Page aligned binary %lu\n", file_offset); 447 #endif 448 /* 449 * for QMAGIC, a_entry is 20 bytes beyond the load address 450 * to skip the executable header 451 */ 452 vmaddr = trunc_page(a_out->a_entry); 453 454 /* 455 * Map it all into the process's space as a single 456 * copy-on-write "data" segment. 457 */ 458 error = vm_mmap(&td->td_proc->p_vmspace->vm_map, &vmaddr, 459 a_out->a_text + a_out->a_data, VM_PROT_ALL, VM_PROT_ALL, 460 MAP_PRIVATE | MAP_FIXED, OBJT_VNODE, vp, file_offset); 461 if (error) 462 goto cleanup; 463 } 464 #ifdef DEBUG 465 printf("mem=%08lx = %08lx %08lx\n", (long)vmaddr, ((long *)vmaddr)[0], 466 ((long *)vmaddr)[1]); 467 #endif 468 if (bss_size != 0) { 469 /* Calculate BSS start address */ 470 vmaddr = trunc_page(a_out->a_entry) + a_out->a_text + 471 a_out->a_data; 472 473 /* allocate some 'anon' space */ 474 error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0, 475 &vmaddr, bss_size, 0, VMFS_NO_SPACE, VM_PROT_ALL, 476 VM_PROT_ALL, 0); 477 if (error) 478 goto cleanup; 479 } 480 481 cleanup: 482 /* Unlock vnode if needed */ 483 if (locked) 484 VOP_UNLOCK(vp, 0); 485 486 /* Release the temporary mapping. */ 487 if (a_out) 488 kmap_free_wakeup(exec_map, (vm_offset_t)a_out, PAGE_SIZE); 489 490 return (error); 491 } 492 493 #endif /* __i386__ */ 494 495 int 496 linux_select(struct thread *td, struct linux_select_args *args) 497 { 498 l_timeval ltv; 499 struct timeval tv0, tv1, utv, *tvp; 500 int error; 501 502 #ifdef DEBUG 503 if (ldebug(select)) 504 printf(ARGS(select, "%d, %p, %p, %p, %p"), args->nfds, 505 (void *)args->readfds, (void *)args->writefds, 506 (void *)args->exceptfds, (void *)args->timeout); 507 #endif 508 509 /* 510 * Store current time for computation of the amount of 511 * time left. 512 */ 513 if (args->timeout) { 514 if ((error = copyin(args->timeout, <v, sizeof(ltv)))) 515 goto select_out; 516 utv.tv_sec = ltv.tv_sec; 517 utv.tv_usec = ltv.tv_usec; 518 #ifdef DEBUG 519 if (ldebug(select)) 520 printf(LMSG("incoming timeout (%jd/%ld)"), 521 (intmax_t)utv.tv_sec, utv.tv_usec); 522 #endif 523 524 if (itimerfix(&utv)) { 525 /* 526 * The timeval was invalid. Convert it to something 527 * valid that will act as it does under Linux. 528 */ 529 utv.tv_sec += utv.tv_usec / 1000000; 530 utv.tv_usec %= 1000000; 531 if (utv.tv_usec < 0) { 532 utv.tv_sec -= 1; 533 utv.tv_usec += 1000000; 534 } 535 if (utv.tv_sec < 0) 536 timevalclear(&utv); 537 } 538 microtime(&tv0); 539 tvp = &utv; 540 } else 541 tvp = NULL; 542 543 error = kern_select(td, args->nfds, args->readfds, args->writefds, 544 args->exceptfds, tvp, LINUX_NFDBITS); 545 546 #ifdef DEBUG 547 if (ldebug(select)) 548 printf(LMSG("real select returns %d"), error); 549 #endif 550 if (error) 551 goto select_out; 552 553 if (args->timeout) { 554 if (td->td_retval[0]) { 555 /* 556 * Compute how much time was left of the timeout, 557 * by subtracting the current time and the time 558 * before we started the call, and subtracting 559 * that result from the user-supplied value. 560 */ 561 microtime(&tv1); 562 timevalsub(&tv1, &tv0); 563 timevalsub(&utv, &tv1); 564 if (utv.tv_sec < 0) 565 timevalclear(&utv); 566 } else 567 timevalclear(&utv); 568 #ifdef DEBUG 569 if (ldebug(select)) 570 printf(LMSG("outgoing timeout (%jd/%ld)"), 571 (intmax_t)utv.tv_sec, utv.tv_usec); 572 #endif 573 ltv.tv_sec = utv.tv_sec; 574 ltv.tv_usec = utv.tv_usec; 575 if ((error = copyout(<v, args->timeout, sizeof(ltv)))) 576 goto select_out; 577 } 578 579 select_out: 580 #ifdef DEBUG 581 if (ldebug(select)) 582 printf(LMSG("select_out -> %d"), error); 583 #endif 584 return (error); 585 } 586 587 int 588 linux_mremap(struct thread *td, struct linux_mremap_args *args) 589 { 590 uintptr_t addr; 591 size_t len; 592 int error = 0; 593 594 #ifdef DEBUG 595 if (ldebug(mremap)) 596 printf(ARGS(mremap, "%p, %08lx, %08lx, %08lx"), 597 (void *)(uintptr_t)args->addr, 598 (unsigned long)args->old_len, 599 (unsigned long)args->new_len, 600 (unsigned long)args->flags); 601 #endif 602 603 if (args->flags & ~(LINUX_MREMAP_FIXED | LINUX_MREMAP_MAYMOVE)) { 604 td->td_retval[0] = 0; 605 return (EINVAL); 606 } 607 608 /* 609 * Check for the page alignment. 610 * Linux defines PAGE_MASK to be FreeBSD ~PAGE_MASK. 611 */ 612 if (args->addr & PAGE_MASK) { 613 td->td_retval[0] = 0; 614 return (EINVAL); 615 } 616 617 args->new_len = round_page(args->new_len); 618 args->old_len = round_page(args->old_len); 619 620 if (args->new_len > args->old_len) { 621 td->td_retval[0] = 0; 622 return (ENOMEM); 623 } 624 625 if (args->new_len < args->old_len) { 626 addr = args->addr + args->new_len; 627 len = args->old_len - args->new_len; 628 error = kern_munmap(td, addr, len); 629 } 630 631 td->td_retval[0] = error ? 0 : (uintptr_t)args->addr; 632 return (error); 633 } 634 635 #define LINUX_MS_ASYNC 0x0001 636 #define LINUX_MS_INVALIDATE 0x0002 637 #define LINUX_MS_SYNC 0x0004 638 639 int 640 linux_msync(struct thread *td, struct linux_msync_args *args) 641 { 642 643 return (kern_msync(td, args->addr, args->len, 644 args->fl & ~LINUX_MS_SYNC)); 645 } 646 647 int 648 linux_time(struct thread *td, struct linux_time_args *args) 649 { 650 struct timeval tv; 651 l_time_t tm; 652 int error; 653 654 #ifdef DEBUG 655 if (ldebug(time)) 656 printf(ARGS(time, "*")); 657 #endif 658 659 microtime(&tv); 660 tm = tv.tv_sec; 661 if (args->tm && (error = copyout(&tm, args->tm, sizeof(tm)))) 662 return (error); 663 td->td_retval[0] = tm; 664 return (0); 665 } 666 667 struct l_times_argv { 668 l_clock_t tms_utime; 669 l_clock_t tms_stime; 670 l_clock_t tms_cutime; 671 l_clock_t tms_cstime; 672 }; 673 674 675 /* 676 * Glibc versions prior to 2.2.1 always use hard-coded CLK_TCK value. 677 * Since 2.2.1 Glibc uses value exported from kernel via AT_CLKTCK 678 * auxiliary vector entry. 679 */ 680 #define CLK_TCK 100 681 682 #define CONVOTCK(r) (r.tv_sec * CLK_TCK + r.tv_usec / (1000000 / CLK_TCK)) 683 #define CONVNTCK(r) (r.tv_sec * stclohz + r.tv_usec / (1000000 / stclohz)) 684 685 #define CONVTCK(r) (linux_kernver(td) >= LINUX_KERNVER_2004000 ? \ 686 CONVNTCK(r) : CONVOTCK(r)) 687 688 int 689 linux_times(struct thread *td, struct linux_times_args *args) 690 { 691 struct timeval tv, utime, stime, cutime, cstime; 692 struct l_times_argv tms; 693 struct proc *p; 694 int error; 695 696 #ifdef DEBUG 697 if (ldebug(times)) 698 printf(ARGS(times, "*")); 699 #endif 700 701 if (args->buf != NULL) { 702 p = td->td_proc; 703 PROC_LOCK(p); 704 PROC_STATLOCK(p); 705 calcru(p, &utime, &stime); 706 PROC_STATUNLOCK(p); 707 calccru(p, &cutime, &cstime); 708 PROC_UNLOCK(p); 709 710 tms.tms_utime = CONVTCK(utime); 711 tms.tms_stime = CONVTCK(stime); 712 713 tms.tms_cutime = CONVTCK(cutime); 714 tms.tms_cstime = CONVTCK(cstime); 715 716 if ((error = copyout(&tms, args->buf, sizeof(tms)))) 717 return (error); 718 } 719 720 microuptime(&tv); 721 td->td_retval[0] = (int)CONVTCK(tv); 722 return (0); 723 } 724 725 int 726 linux_newuname(struct thread *td, struct linux_newuname_args *args) 727 { 728 struct l_new_utsname utsname; 729 char osname[LINUX_MAX_UTSNAME]; 730 char osrelease[LINUX_MAX_UTSNAME]; 731 char *p; 732 733 #ifdef DEBUG 734 if (ldebug(newuname)) 735 printf(ARGS(newuname, "*")); 736 #endif 737 738 linux_get_osname(td, osname); 739 linux_get_osrelease(td, osrelease); 740 741 bzero(&utsname, sizeof(utsname)); 742 strlcpy(utsname.sysname, osname, LINUX_MAX_UTSNAME); 743 getcredhostname(td->td_ucred, utsname.nodename, LINUX_MAX_UTSNAME); 744 getcreddomainname(td->td_ucred, utsname.domainname, LINUX_MAX_UTSNAME); 745 strlcpy(utsname.release, osrelease, LINUX_MAX_UTSNAME); 746 strlcpy(utsname.version, version, LINUX_MAX_UTSNAME); 747 for (p = utsname.version; *p != '\0'; ++p) 748 if (*p == '\n') { 749 *p = '\0'; 750 break; 751 } 752 strlcpy(utsname.machine, linux_kplatform, LINUX_MAX_UTSNAME); 753 754 return (copyout(&utsname, args->buf, sizeof(utsname))); 755 } 756 757 struct l_utimbuf { 758 l_time_t l_actime; 759 l_time_t l_modtime; 760 }; 761 762 int 763 linux_utime(struct thread *td, struct linux_utime_args *args) 764 { 765 struct timeval tv[2], *tvp; 766 struct l_utimbuf lut; 767 char *fname; 768 int error; 769 770 LCONVPATHEXIST(td, args->fname, &fname); 771 772 #ifdef DEBUG 773 if (ldebug(utime)) 774 printf(ARGS(utime, "%s, *"), fname); 775 #endif 776 777 if (args->times) { 778 if ((error = copyin(args->times, &lut, sizeof lut))) { 779 LFREEPATH(fname); 780 return (error); 781 } 782 tv[0].tv_sec = lut.l_actime; 783 tv[0].tv_usec = 0; 784 tv[1].tv_sec = lut.l_modtime; 785 tv[1].tv_usec = 0; 786 tvp = tv; 787 } else 788 tvp = NULL; 789 790 error = kern_utimesat(td, AT_FDCWD, fname, UIO_SYSSPACE, tvp, 791 UIO_SYSSPACE); 792 LFREEPATH(fname); 793 return (error); 794 } 795 796 int 797 linux_utimes(struct thread *td, struct linux_utimes_args *args) 798 { 799 l_timeval ltv[2]; 800 struct timeval tv[2], *tvp = NULL; 801 char *fname; 802 int error; 803 804 LCONVPATHEXIST(td, args->fname, &fname); 805 806 #ifdef DEBUG 807 if (ldebug(utimes)) 808 printf(ARGS(utimes, "%s, *"), fname); 809 #endif 810 811 if (args->tptr != NULL) { 812 if ((error = copyin(args->tptr, ltv, sizeof ltv))) { 813 LFREEPATH(fname); 814 return (error); 815 } 816 tv[0].tv_sec = ltv[0].tv_sec; 817 tv[0].tv_usec = ltv[0].tv_usec; 818 tv[1].tv_sec = ltv[1].tv_sec; 819 tv[1].tv_usec = ltv[1].tv_usec; 820 tvp = tv; 821 } 822 823 error = kern_utimesat(td, AT_FDCWD, fname, UIO_SYSSPACE, 824 tvp, UIO_SYSSPACE); 825 LFREEPATH(fname); 826 return (error); 827 } 828 829 static int 830 linux_utimensat_nsec_valid(l_long nsec) 831 { 832 833 if (nsec == LINUX_UTIME_OMIT || nsec == LINUX_UTIME_NOW) 834 return (0); 835 if (nsec >= 0 && nsec <= 999999999) 836 return (0); 837 return (1); 838 } 839 840 int 841 linux_utimensat(struct thread *td, struct linux_utimensat_args *args) 842 { 843 struct l_timespec l_times[2]; 844 struct timespec times[2], *timesp = NULL; 845 char *path = NULL; 846 int error, dfd, flags = 0; 847 848 dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd; 849 850 #ifdef DEBUG 851 if (ldebug(utimensat)) 852 printf(ARGS(utimensat, "%d, *"), dfd); 853 #endif 854 855 if (args->flags & ~LINUX_AT_SYMLINK_NOFOLLOW) 856 return (EINVAL); 857 858 if (args->times != NULL) { 859 error = copyin(args->times, l_times, sizeof(l_times)); 860 if (error != 0) 861 return (error); 862 863 if (linux_utimensat_nsec_valid(l_times[0].tv_nsec) != 0 || 864 linux_utimensat_nsec_valid(l_times[1].tv_nsec) != 0) 865 return (EINVAL); 866 867 times[0].tv_sec = l_times[0].tv_sec; 868 switch (l_times[0].tv_nsec) 869 { 870 case LINUX_UTIME_OMIT: 871 times[0].tv_nsec = UTIME_OMIT; 872 break; 873 case LINUX_UTIME_NOW: 874 times[0].tv_nsec = UTIME_NOW; 875 break; 876 default: 877 times[0].tv_nsec = l_times[0].tv_nsec; 878 } 879 880 times[1].tv_sec = l_times[1].tv_sec; 881 switch (l_times[1].tv_nsec) 882 { 883 case LINUX_UTIME_OMIT: 884 times[1].tv_nsec = UTIME_OMIT; 885 break; 886 case LINUX_UTIME_NOW: 887 times[1].tv_nsec = UTIME_NOW; 888 break; 889 default: 890 times[1].tv_nsec = l_times[1].tv_nsec; 891 break; 892 } 893 timesp = times; 894 895 /* This breaks POSIX, but is what the Linux kernel does 896 * _on purpose_ (documented in the man page for utimensat(2)), 897 * so we must follow that behaviour. */ 898 if (times[0].tv_nsec == UTIME_OMIT && 899 times[1].tv_nsec == UTIME_OMIT) 900 return (0); 901 } 902 903 if (args->pathname != NULL) 904 LCONVPATHEXIST_AT(td, args->pathname, &path, dfd); 905 else if (args->flags != 0) 906 return (EINVAL); 907 908 if (args->flags & LINUX_AT_SYMLINK_NOFOLLOW) 909 flags |= AT_SYMLINK_NOFOLLOW; 910 911 if (path == NULL) 912 error = kern_futimens(td, dfd, timesp, UIO_SYSSPACE); 913 else { 914 error = kern_utimensat(td, dfd, path, UIO_SYSSPACE, timesp, 915 UIO_SYSSPACE, flags); 916 LFREEPATH(path); 917 } 918 919 return (error); 920 } 921 922 int 923 linux_futimesat(struct thread *td, struct linux_futimesat_args *args) 924 { 925 l_timeval ltv[2]; 926 struct timeval tv[2], *tvp = NULL; 927 char *fname; 928 int error, dfd; 929 930 dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd; 931 LCONVPATHEXIST_AT(td, args->filename, &fname, dfd); 932 933 #ifdef DEBUG 934 if (ldebug(futimesat)) 935 printf(ARGS(futimesat, "%s, *"), fname); 936 #endif 937 938 if (args->utimes != NULL) { 939 if ((error = copyin(args->utimes, ltv, sizeof ltv))) { 940 LFREEPATH(fname); 941 return (error); 942 } 943 tv[0].tv_sec = ltv[0].tv_sec; 944 tv[0].tv_usec = ltv[0].tv_usec; 945 tv[1].tv_sec = ltv[1].tv_sec; 946 tv[1].tv_usec = ltv[1].tv_usec; 947 tvp = tv; 948 } 949 950 error = kern_utimesat(td, dfd, fname, UIO_SYSSPACE, tvp, UIO_SYSSPACE); 951 LFREEPATH(fname); 952 return (error); 953 } 954 955 int 956 linux_common_wait(struct thread *td, int pid, int *status, 957 int options, struct rusage *ru) 958 { 959 int error, tmpstat; 960 961 error = kern_wait(td, pid, &tmpstat, options, ru); 962 if (error) 963 return (error); 964 965 if (status) { 966 tmpstat &= 0xffff; 967 if (WIFSIGNALED(tmpstat)) 968 tmpstat = (tmpstat & 0xffffff80) | 969 bsd_to_linux_signal(WTERMSIG(tmpstat)); 970 else if (WIFSTOPPED(tmpstat)) 971 tmpstat = (tmpstat & 0xffff00ff) | 972 (bsd_to_linux_signal(WSTOPSIG(tmpstat)) << 8); 973 else if (WIFCONTINUED(tmpstat)) 974 tmpstat = 0xffff; 975 error = copyout(&tmpstat, status, sizeof(int)); 976 } 977 978 return (error); 979 } 980 981 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 982 int 983 linux_waitpid(struct thread *td, struct linux_waitpid_args *args) 984 { 985 struct linux_wait4_args wait4_args; 986 987 #ifdef DEBUG 988 if (ldebug(waitpid)) 989 printf(ARGS(waitpid, "%d, %p, %d"), 990 args->pid, (void *)args->status, args->options); 991 #endif 992 993 wait4_args.pid = args->pid; 994 wait4_args.status = args->status; 995 wait4_args.options = args->options; 996 wait4_args.rusage = NULL; 997 998 return (linux_wait4(td, &wait4_args)); 999 } 1000 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 1001 1002 int 1003 linux_wait4(struct thread *td, struct linux_wait4_args *args) 1004 { 1005 int error, options; 1006 struct rusage ru, *rup; 1007 1008 #ifdef DEBUG 1009 if (ldebug(wait4)) 1010 printf(ARGS(wait4, "%d, %p, %d, %p"), 1011 args->pid, (void *)args->status, args->options, 1012 (void *)args->rusage); 1013 #endif 1014 if (args->options & ~(LINUX_WUNTRACED | LINUX_WNOHANG | 1015 LINUX_WCONTINUED | __WCLONE | __WNOTHREAD | __WALL)) 1016 return (EINVAL); 1017 1018 options = WEXITED; 1019 linux_to_bsd_waitopts(args->options, &options); 1020 1021 if (args->rusage != NULL) 1022 rup = &ru; 1023 else 1024 rup = NULL; 1025 error = linux_common_wait(td, args->pid, args->status, options, rup); 1026 if (error != 0) 1027 return (error); 1028 if (args->rusage != NULL) 1029 error = linux_copyout_rusage(&ru, args->rusage); 1030 return (error); 1031 } 1032 1033 int 1034 linux_waitid(struct thread *td, struct linux_waitid_args *args) 1035 { 1036 int status, options, sig; 1037 struct __wrusage wru; 1038 siginfo_t siginfo; 1039 l_siginfo_t lsi; 1040 idtype_t idtype; 1041 struct proc *p; 1042 int error; 1043 1044 options = 0; 1045 linux_to_bsd_waitopts(args->options, &options); 1046 1047 if (options & ~(WNOHANG | WNOWAIT | WEXITED | WUNTRACED | WCONTINUED)) 1048 return (EINVAL); 1049 if (!(options & (WEXITED | WUNTRACED | WCONTINUED))) 1050 return (EINVAL); 1051 1052 switch (args->idtype) { 1053 case LINUX_P_ALL: 1054 idtype = P_ALL; 1055 break; 1056 case LINUX_P_PID: 1057 if (args->id <= 0) 1058 return (EINVAL); 1059 idtype = P_PID; 1060 break; 1061 case LINUX_P_PGID: 1062 if (args->id <= 0) 1063 return (EINVAL); 1064 idtype = P_PGID; 1065 break; 1066 default: 1067 return (EINVAL); 1068 } 1069 1070 error = kern_wait6(td, idtype, args->id, &status, options, 1071 &wru, &siginfo); 1072 if (error != 0) 1073 return (error); 1074 if (args->rusage != NULL) { 1075 error = linux_copyout_rusage(&wru.wru_children, 1076 args->rusage); 1077 if (error != 0) 1078 return (error); 1079 } 1080 if (args->info != NULL) { 1081 p = td->td_proc; 1082 if (td->td_retval[0] == 0) 1083 bzero(&lsi, sizeof(lsi)); 1084 else { 1085 sig = bsd_to_linux_signal(siginfo.si_signo); 1086 siginfo_to_lsiginfo(&siginfo, &lsi, sig); 1087 } 1088 error = copyout(&lsi, args->info, sizeof(lsi)); 1089 } 1090 td->td_retval[0] = 0; 1091 1092 return (error); 1093 } 1094 1095 int 1096 linux_mknod(struct thread *td, struct linux_mknod_args *args) 1097 { 1098 char *path; 1099 int error; 1100 1101 LCONVPATHCREAT(td, args->path, &path); 1102 1103 #ifdef DEBUG 1104 if (ldebug(mknod)) 1105 printf(ARGS(mknod, "%s, %d, %ju"), path, args->mode, 1106 (uintmax_t)args->dev); 1107 #endif 1108 1109 switch (args->mode & S_IFMT) { 1110 case S_IFIFO: 1111 case S_IFSOCK: 1112 error = kern_mkfifoat(td, AT_FDCWD, path, UIO_SYSSPACE, 1113 args->mode); 1114 break; 1115 1116 case S_IFCHR: 1117 case S_IFBLK: 1118 error = kern_mknodat(td, AT_FDCWD, path, UIO_SYSSPACE, 1119 args->mode, args->dev); 1120 break; 1121 1122 case S_IFDIR: 1123 error = EPERM; 1124 break; 1125 1126 case 0: 1127 args->mode |= S_IFREG; 1128 /* FALLTHROUGH */ 1129 case S_IFREG: 1130 error = kern_openat(td, AT_FDCWD, path, UIO_SYSSPACE, 1131 O_WRONLY | O_CREAT | O_TRUNC, args->mode); 1132 if (error == 0) 1133 kern_close(td, td->td_retval[0]); 1134 break; 1135 1136 default: 1137 error = EINVAL; 1138 break; 1139 } 1140 LFREEPATH(path); 1141 return (error); 1142 } 1143 1144 int 1145 linux_mknodat(struct thread *td, struct linux_mknodat_args *args) 1146 { 1147 char *path; 1148 int error, dfd; 1149 1150 dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd; 1151 LCONVPATHCREAT_AT(td, args->filename, &path, dfd); 1152 1153 #ifdef DEBUG 1154 if (ldebug(mknodat)) 1155 printf(ARGS(mknodat, "%s, %d, %d"), path, args->mode, args->dev); 1156 #endif 1157 1158 switch (args->mode & S_IFMT) { 1159 case S_IFIFO: 1160 case S_IFSOCK: 1161 error = kern_mkfifoat(td, dfd, path, UIO_SYSSPACE, args->mode); 1162 break; 1163 1164 case S_IFCHR: 1165 case S_IFBLK: 1166 error = kern_mknodat(td, dfd, path, UIO_SYSSPACE, args->mode, 1167 args->dev); 1168 break; 1169 1170 case S_IFDIR: 1171 error = EPERM; 1172 break; 1173 1174 case 0: 1175 args->mode |= S_IFREG; 1176 /* FALLTHROUGH */ 1177 case S_IFREG: 1178 error = kern_openat(td, dfd, path, UIO_SYSSPACE, 1179 O_WRONLY | O_CREAT | O_TRUNC, args->mode); 1180 if (error == 0) 1181 kern_close(td, td->td_retval[0]); 1182 break; 1183 1184 default: 1185 error = EINVAL; 1186 break; 1187 } 1188 LFREEPATH(path); 1189 return (error); 1190 } 1191 1192 /* 1193 * UGH! This is just about the dumbest idea I've ever heard!! 1194 */ 1195 int 1196 linux_personality(struct thread *td, struct linux_personality_args *args) 1197 { 1198 struct linux_pemuldata *pem; 1199 struct proc *p = td->td_proc; 1200 uint32_t old; 1201 1202 #ifdef DEBUG 1203 if (ldebug(personality)) 1204 printf(ARGS(personality, "%u"), args->per); 1205 #endif 1206 1207 PROC_LOCK(p); 1208 pem = pem_find(p); 1209 old = pem->persona; 1210 if (args->per != 0xffffffff) 1211 pem->persona = args->per; 1212 PROC_UNLOCK(p); 1213 1214 td->td_retval[0] = old; 1215 return (0); 1216 } 1217 1218 struct l_itimerval { 1219 l_timeval it_interval; 1220 l_timeval it_value; 1221 }; 1222 1223 #define B2L_ITIMERVAL(bip, lip) \ 1224 (bip)->it_interval.tv_sec = (lip)->it_interval.tv_sec; \ 1225 (bip)->it_interval.tv_usec = (lip)->it_interval.tv_usec; \ 1226 (bip)->it_value.tv_sec = (lip)->it_value.tv_sec; \ 1227 (bip)->it_value.tv_usec = (lip)->it_value.tv_usec; 1228 1229 int 1230 linux_setitimer(struct thread *td, struct linux_setitimer_args *uap) 1231 { 1232 int error; 1233 struct l_itimerval ls; 1234 struct itimerval aitv, oitv; 1235 1236 #ifdef DEBUG 1237 if (ldebug(setitimer)) 1238 printf(ARGS(setitimer, "%p, %p"), 1239 (void *)uap->itv, (void *)uap->oitv); 1240 #endif 1241 1242 if (uap->itv == NULL) { 1243 uap->itv = uap->oitv; 1244 return (linux_getitimer(td, (struct linux_getitimer_args *)uap)); 1245 } 1246 1247 error = copyin(uap->itv, &ls, sizeof(ls)); 1248 if (error != 0) 1249 return (error); 1250 B2L_ITIMERVAL(&aitv, &ls); 1251 #ifdef DEBUG 1252 if (ldebug(setitimer)) { 1253 printf("setitimer: value: sec: %jd, usec: %ld\n", 1254 (intmax_t)aitv.it_value.tv_sec, aitv.it_value.tv_usec); 1255 printf("setitimer: interval: sec: %jd, usec: %ld\n", 1256 (intmax_t)aitv.it_interval.tv_sec, aitv.it_interval.tv_usec); 1257 } 1258 #endif 1259 error = kern_setitimer(td, uap->which, &aitv, &oitv); 1260 if (error != 0 || uap->oitv == NULL) 1261 return (error); 1262 B2L_ITIMERVAL(&ls, &oitv); 1263 1264 return (copyout(&ls, uap->oitv, sizeof(ls))); 1265 } 1266 1267 int 1268 linux_getitimer(struct thread *td, struct linux_getitimer_args *uap) 1269 { 1270 int error; 1271 struct l_itimerval ls; 1272 struct itimerval aitv; 1273 1274 #ifdef DEBUG 1275 if (ldebug(getitimer)) 1276 printf(ARGS(getitimer, "%p"), (void *)uap->itv); 1277 #endif 1278 error = kern_getitimer(td, uap->which, &aitv); 1279 if (error != 0) 1280 return (error); 1281 B2L_ITIMERVAL(&ls, &aitv); 1282 return (copyout(&ls, uap->itv, sizeof(ls))); 1283 } 1284 1285 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 1286 int 1287 linux_nice(struct thread *td, struct linux_nice_args *args) 1288 { 1289 struct setpriority_args bsd_args; 1290 1291 bsd_args.which = PRIO_PROCESS; 1292 bsd_args.who = 0; /* current process */ 1293 bsd_args.prio = args->inc; 1294 return (sys_setpriority(td, &bsd_args)); 1295 } 1296 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 1297 1298 int 1299 linux_setgroups(struct thread *td, struct linux_setgroups_args *args) 1300 { 1301 struct ucred *newcred, *oldcred; 1302 l_gid_t *linux_gidset; 1303 gid_t *bsd_gidset; 1304 int ngrp, error; 1305 struct proc *p; 1306 1307 ngrp = args->gidsetsize; 1308 if (ngrp < 0 || ngrp >= ngroups_max + 1) 1309 return (EINVAL); 1310 linux_gidset = malloc(ngrp * sizeof(*linux_gidset), M_LINUX, M_WAITOK); 1311 error = copyin(args->grouplist, linux_gidset, ngrp * sizeof(l_gid_t)); 1312 if (error) 1313 goto out; 1314 newcred = crget(); 1315 crextend(newcred, ngrp + 1); 1316 p = td->td_proc; 1317 PROC_LOCK(p); 1318 oldcred = p->p_ucred; 1319 crcopy(newcred, oldcred); 1320 1321 /* 1322 * cr_groups[0] holds egid. Setting the whole set from 1323 * the supplied set will cause egid to be changed too. 1324 * Keep cr_groups[0] unchanged to prevent that. 1325 */ 1326 1327 if ((error = priv_check_cred(oldcred, PRIV_CRED_SETGROUPS, 0)) != 0) { 1328 PROC_UNLOCK(p); 1329 crfree(newcred); 1330 goto out; 1331 } 1332 1333 if (ngrp > 0) { 1334 newcred->cr_ngroups = ngrp + 1; 1335 1336 bsd_gidset = newcred->cr_groups; 1337 ngrp--; 1338 while (ngrp >= 0) { 1339 bsd_gidset[ngrp + 1] = linux_gidset[ngrp]; 1340 ngrp--; 1341 } 1342 } else 1343 newcred->cr_ngroups = 1; 1344 1345 setsugid(p); 1346 proc_set_cred(p, newcred); 1347 PROC_UNLOCK(p); 1348 crfree(oldcred); 1349 error = 0; 1350 out: 1351 free(linux_gidset, M_LINUX); 1352 return (error); 1353 } 1354 1355 int 1356 linux_getgroups(struct thread *td, struct linux_getgroups_args *args) 1357 { 1358 struct ucred *cred; 1359 l_gid_t *linux_gidset; 1360 gid_t *bsd_gidset; 1361 int bsd_gidsetsz, ngrp, error; 1362 1363 cred = td->td_ucred; 1364 bsd_gidset = cred->cr_groups; 1365 bsd_gidsetsz = cred->cr_ngroups - 1; 1366 1367 /* 1368 * cr_groups[0] holds egid. Returning the whole set 1369 * here will cause a duplicate. Exclude cr_groups[0] 1370 * to prevent that. 1371 */ 1372 1373 if ((ngrp = args->gidsetsize) == 0) { 1374 td->td_retval[0] = bsd_gidsetsz; 1375 return (0); 1376 } 1377 1378 if (ngrp < bsd_gidsetsz) 1379 return (EINVAL); 1380 1381 ngrp = 0; 1382 linux_gidset = malloc(bsd_gidsetsz * sizeof(*linux_gidset), 1383 M_LINUX, M_WAITOK); 1384 while (ngrp < bsd_gidsetsz) { 1385 linux_gidset[ngrp] = bsd_gidset[ngrp + 1]; 1386 ngrp++; 1387 } 1388 1389 error = copyout(linux_gidset, args->grouplist, ngrp * sizeof(l_gid_t)); 1390 free(linux_gidset, M_LINUX); 1391 if (error) 1392 return (error); 1393 1394 td->td_retval[0] = ngrp; 1395 return (0); 1396 } 1397 1398 int 1399 linux_setrlimit(struct thread *td, struct linux_setrlimit_args *args) 1400 { 1401 struct rlimit bsd_rlim; 1402 struct l_rlimit rlim; 1403 u_int which; 1404 int error; 1405 1406 #ifdef DEBUG 1407 if (ldebug(setrlimit)) 1408 printf(ARGS(setrlimit, "%d, %p"), 1409 args->resource, (void *)args->rlim); 1410 #endif 1411 1412 if (args->resource >= LINUX_RLIM_NLIMITS) 1413 return (EINVAL); 1414 1415 which = linux_to_bsd_resource[args->resource]; 1416 if (which == -1) 1417 return (EINVAL); 1418 1419 error = copyin(args->rlim, &rlim, sizeof(rlim)); 1420 if (error) 1421 return (error); 1422 1423 bsd_rlim.rlim_cur = (rlim_t)rlim.rlim_cur; 1424 bsd_rlim.rlim_max = (rlim_t)rlim.rlim_max; 1425 return (kern_setrlimit(td, which, &bsd_rlim)); 1426 } 1427 1428 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 1429 int 1430 linux_old_getrlimit(struct thread *td, struct linux_old_getrlimit_args *args) 1431 { 1432 struct l_rlimit rlim; 1433 struct rlimit bsd_rlim; 1434 u_int which; 1435 1436 #ifdef DEBUG 1437 if (ldebug(old_getrlimit)) 1438 printf(ARGS(old_getrlimit, "%d, %p"), 1439 args->resource, (void *)args->rlim); 1440 #endif 1441 1442 if (args->resource >= LINUX_RLIM_NLIMITS) 1443 return (EINVAL); 1444 1445 which = linux_to_bsd_resource[args->resource]; 1446 if (which == -1) 1447 return (EINVAL); 1448 1449 lim_rlimit(td, which, &bsd_rlim); 1450 1451 #ifdef COMPAT_LINUX32 1452 rlim.rlim_cur = (unsigned int)bsd_rlim.rlim_cur; 1453 if (rlim.rlim_cur == UINT_MAX) 1454 rlim.rlim_cur = INT_MAX; 1455 rlim.rlim_max = (unsigned int)bsd_rlim.rlim_max; 1456 if (rlim.rlim_max == UINT_MAX) 1457 rlim.rlim_max = INT_MAX; 1458 #else 1459 rlim.rlim_cur = (unsigned long)bsd_rlim.rlim_cur; 1460 if (rlim.rlim_cur == ULONG_MAX) 1461 rlim.rlim_cur = LONG_MAX; 1462 rlim.rlim_max = (unsigned long)bsd_rlim.rlim_max; 1463 if (rlim.rlim_max == ULONG_MAX) 1464 rlim.rlim_max = LONG_MAX; 1465 #endif 1466 return (copyout(&rlim, args->rlim, sizeof(rlim))); 1467 } 1468 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 1469 1470 int 1471 linux_getrlimit(struct thread *td, struct linux_getrlimit_args *args) 1472 { 1473 struct l_rlimit rlim; 1474 struct rlimit bsd_rlim; 1475 u_int which; 1476 1477 #ifdef DEBUG 1478 if (ldebug(getrlimit)) 1479 printf(ARGS(getrlimit, "%d, %p"), 1480 args->resource, (void *)args->rlim); 1481 #endif 1482 1483 if (args->resource >= LINUX_RLIM_NLIMITS) 1484 return (EINVAL); 1485 1486 which = linux_to_bsd_resource[args->resource]; 1487 if (which == -1) 1488 return (EINVAL); 1489 1490 lim_rlimit(td, which, &bsd_rlim); 1491 1492 rlim.rlim_cur = (l_ulong)bsd_rlim.rlim_cur; 1493 rlim.rlim_max = (l_ulong)bsd_rlim.rlim_max; 1494 return (copyout(&rlim, args->rlim, sizeof(rlim))); 1495 } 1496 1497 int 1498 linux_sched_setscheduler(struct thread *td, 1499 struct linux_sched_setscheduler_args *args) 1500 { 1501 struct sched_param sched_param; 1502 struct thread *tdt; 1503 int error, policy; 1504 1505 #ifdef DEBUG 1506 if (ldebug(sched_setscheduler)) 1507 printf(ARGS(sched_setscheduler, "%d, %d, %p"), 1508 args->pid, args->policy, (const void *)args->param); 1509 #endif 1510 1511 switch (args->policy) { 1512 case LINUX_SCHED_OTHER: 1513 policy = SCHED_OTHER; 1514 break; 1515 case LINUX_SCHED_FIFO: 1516 policy = SCHED_FIFO; 1517 break; 1518 case LINUX_SCHED_RR: 1519 policy = SCHED_RR; 1520 break; 1521 default: 1522 return (EINVAL); 1523 } 1524 1525 error = copyin(args->param, &sched_param, sizeof(sched_param)); 1526 if (error) 1527 return (error); 1528 1529 tdt = linux_tdfind(td, args->pid, -1); 1530 if (tdt == NULL) 1531 return (ESRCH); 1532 1533 error = kern_sched_setscheduler(td, tdt, policy, &sched_param); 1534 PROC_UNLOCK(tdt->td_proc); 1535 return (error); 1536 } 1537 1538 int 1539 linux_sched_getscheduler(struct thread *td, 1540 struct linux_sched_getscheduler_args *args) 1541 { 1542 struct thread *tdt; 1543 int error, policy; 1544 1545 #ifdef DEBUG 1546 if (ldebug(sched_getscheduler)) 1547 printf(ARGS(sched_getscheduler, "%d"), args->pid); 1548 #endif 1549 1550 tdt = linux_tdfind(td, args->pid, -1); 1551 if (tdt == NULL) 1552 return (ESRCH); 1553 1554 error = kern_sched_getscheduler(td, tdt, &policy); 1555 PROC_UNLOCK(tdt->td_proc); 1556 1557 switch (policy) { 1558 case SCHED_OTHER: 1559 td->td_retval[0] = LINUX_SCHED_OTHER; 1560 break; 1561 case SCHED_FIFO: 1562 td->td_retval[0] = LINUX_SCHED_FIFO; 1563 break; 1564 case SCHED_RR: 1565 td->td_retval[0] = LINUX_SCHED_RR; 1566 break; 1567 } 1568 return (error); 1569 } 1570 1571 int 1572 linux_sched_get_priority_max(struct thread *td, 1573 struct linux_sched_get_priority_max_args *args) 1574 { 1575 struct sched_get_priority_max_args bsd; 1576 1577 #ifdef DEBUG 1578 if (ldebug(sched_get_priority_max)) 1579 printf(ARGS(sched_get_priority_max, "%d"), args->policy); 1580 #endif 1581 1582 switch (args->policy) { 1583 case LINUX_SCHED_OTHER: 1584 bsd.policy = SCHED_OTHER; 1585 break; 1586 case LINUX_SCHED_FIFO: 1587 bsd.policy = SCHED_FIFO; 1588 break; 1589 case LINUX_SCHED_RR: 1590 bsd.policy = SCHED_RR; 1591 break; 1592 default: 1593 return (EINVAL); 1594 } 1595 return (sys_sched_get_priority_max(td, &bsd)); 1596 } 1597 1598 int 1599 linux_sched_get_priority_min(struct thread *td, 1600 struct linux_sched_get_priority_min_args *args) 1601 { 1602 struct sched_get_priority_min_args bsd; 1603 1604 #ifdef DEBUG 1605 if (ldebug(sched_get_priority_min)) 1606 printf(ARGS(sched_get_priority_min, "%d"), args->policy); 1607 #endif 1608 1609 switch (args->policy) { 1610 case LINUX_SCHED_OTHER: 1611 bsd.policy = SCHED_OTHER; 1612 break; 1613 case LINUX_SCHED_FIFO: 1614 bsd.policy = SCHED_FIFO; 1615 break; 1616 case LINUX_SCHED_RR: 1617 bsd.policy = SCHED_RR; 1618 break; 1619 default: 1620 return (EINVAL); 1621 } 1622 return (sys_sched_get_priority_min(td, &bsd)); 1623 } 1624 1625 #define REBOOT_CAD_ON 0x89abcdef 1626 #define REBOOT_CAD_OFF 0 1627 #define REBOOT_HALT 0xcdef0123 1628 #define REBOOT_RESTART 0x01234567 1629 #define REBOOT_RESTART2 0xA1B2C3D4 1630 #define REBOOT_POWEROFF 0x4321FEDC 1631 #define REBOOT_MAGIC1 0xfee1dead 1632 #define REBOOT_MAGIC2 0x28121969 1633 #define REBOOT_MAGIC2A 0x05121996 1634 #define REBOOT_MAGIC2B 0x16041998 1635 1636 int 1637 linux_reboot(struct thread *td, struct linux_reboot_args *args) 1638 { 1639 struct reboot_args bsd_args; 1640 1641 #ifdef DEBUG 1642 if (ldebug(reboot)) 1643 printf(ARGS(reboot, "0x%x"), args->cmd); 1644 #endif 1645 1646 if (args->magic1 != REBOOT_MAGIC1) 1647 return (EINVAL); 1648 1649 switch (args->magic2) { 1650 case REBOOT_MAGIC2: 1651 case REBOOT_MAGIC2A: 1652 case REBOOT_MAGIC2B: 1653 break; 1654 default: 1655 return (EINVAL); 1656 } 1657 1658 switch (args->cmd) { 1659 case REBOOT_CAD_ON: 1660 case REBOOT_CAD_OFF: 1661 return (priv_check(td, PRIV_REBOOT)); 1662 case REBOOT_HALT: 1663 bsd_args.opt = RB_HALT; 1664 break; 1665 case REBOOT_RESTART: 1666 case REBOOT_RESTART2: 1667 bsd_args.opt = 0; 1668 break; 1669 case REBOOT_POWEROFF: 1670 bsd_args.opt = RB_POWEROFF; 1671 break; 1672 default: 1673 return (EINVAL); 1674 } 1675 return (sys_reboot(td, &bsd_args)); 1676 } 1677 1678 1679 /* 1680 * The FreeBSD native getpid(2), getgid(2) and getuid(2) also modify 1681 * td->td_retval[1] when COMPAT_43 is defined. This clobbers registers that 1682 * are assumed to be preserved. The following lightweight syscalls fixes 1683 * this. See also linux_getgid16() and linux_getuid16() in linux_uid16.c 1684 * 1685 * linux_getpid() - MP SAFE 1686 * linux_getgid() - MP SAFE 1687 * linux_getuid() - MP SAFE 1688 */ 1689 1690 int 1691 linux_getpid(struct thread *td, struct linux_getpid_args *args) 1692 { 1693 1694 #ifdef DEBUG 1695 if (ldebug(getpid)) 1696 printf(ARGS(getpid, "")); 1697 #endif 1698 td->td_retval[0] = td->td_proc->p_pid; 1699 1700 return (0); 1701 } 1702 1703 int 1704 linux_gettid(struct thread *td, struct linux_gettid_args *args) 1705 { 1706 struct linux_emuldata *em; 1707 1708 #ifdef DEBUG 1709 if (ldebug(gettid)) 1710 printf(ARGS(gettid, "")); 1711 #endif 1712 1713 em = em_find(td); 1714 KASSERT(em != NULL, ("gettid: emuldata not found.\n")); 1715 1716 td->td_retval[0] = em->em_tid; 1717 1718 return (0); 1719 } 1720 1721 1722 int 1723 linux_getppid(struct thread *td, struct linux_getppid_args *args) 1724 { 1725 1726 #ifdef DEBUG 1727 if (ldebug(getppid)) 1728 printf(ARGS(getppid, "")); 1729 #endif 1730 1731 td->td_retval[0] = kern_getppid(td); 1732 return (0); 1733 } 1734 1735 int 1736 linux_getgid(struct thread *td, struct linux_getgid_args *args) 1737 { 1738 1739 #ifdef DEBUG 1740 if (ldebug(getgid)) 1741 printf(ARGS(getgid, "")); 1742 #endif 1743 1744 td->td_retval[0] = td->td_ucred->cr_rgid; 1745 return (0); 1746 } 1747 1748 int 1749 linux_getuid(struct thread *td, struct linux_getuid_args *args) 1750 { 1751 1752 #ifdef DEBUG 1753 if (ldebug(getuid)) 1754 printf(ARGS(getuid, "")); 1755 #endif 1756 1757 td->td_retval[0] = td->td_ucred->cr_ruid; 1758 return (0); 1759 } 1760 1761 1762 int 1763 linux_getsid(struct thread *td, struct linux_getsid_args *args) 1764 { 1765 struct getsid_args bsd; 1766 1767 #ifdef DEBUG 1768 if (ldebug(getsid)) 1769 printf(ARGS(getsid, "%i"), args->pid); 1770 #endif 1771 1772 bsd.pid = args->pid; 1773 return (sys_getsid(td, &bsd)); 1774 } 1775 1776 int 1777 linux_nosys(struct thread *td, struct nosys_args *ignore) 1778 { 1779 1780 return (ENOSYS); 1781 } 1782 1783 int 1784 linux_getpriority(struct thread *td, struct linux_getpriority_args *args) 1785 { 1786 struct getpriority_args bsd_args; 1787 int error; 1788 1789 #ifdef DEBUG 1790 if (ldebug(getpriority)) 1791 printf(ARGS(getpriority, "%i, %i"), args->which, args->who); 1792 #endif 1793 1794 bsd_args.which = args->which; 1795 bsd_args.who = args->who; 1796 error = sys_getpriority(td, &bsd_args); 1797 td->td_retval[0] = 20 - td->td_retval[0]; 1798 return (error); 1799 } 1800 1801 int 1802 linux_sethostname(struct thread *td, struct linux_sethostname_args *args) 1803 { 1804 int name[2]; 1805 1806 #ifdef DEBUG 1807 if (ldebug(sethostname)) 1808 printf(ARGS(sethostname, "*, %i"), args->len); 1809 #endif 1810 1811 name[0] = CTL_KERN; 1812 name[1] = KERN_HOSTNAME; 1813 return (userland_sysctl(td, name, 2, 0, 0, 0, args->hostname, 1814 args->len, 0, 0)); 1815 } 1816 1817 int 1818 linux_setdomainname(struct thread *td, struct linux_setdomainname_args *args) 1819 { 1820 int name[2]; 1821 1822 #ifdef DEBUG 1823 if (ldebug(setdomainname)) 1824 printf(ARGS(setdomainname, "*, %i"), args->len); 1825 #endif 1826 1827 name[0] = CTL_KERN; 1828 name[1] = KERN_NISDOMAINNAME; 1829 return (userland_sysctl(td, name, 2, 0, 0, 0, args->name, 1830 args->len, 0, 0)); 1831 } 1832 1833 int 1834 linux_exit_group(struct thread *td, struct linux_exit_group_args *args) 1835 { 1836 1837 #ifdef DEBUG 1838 if (ldebug(exit_group)) 1839 printf(ARGS(exit_group, "%i"), args->error_code); 1840 #endif 1841 1842 LINUX_CTR2(exit_group, "thread(%d) (%d)", td->td_tid, 1843 args->error_code); 1844 1845 /* 1846 * XXX: we should send a signal to the parent if 1847 * SIGNAL_EXIT_GROUP is set. We ignore that (temporarily?) 1848 * as it doesnt occur often. 1849 */ 1850 exit1(td, args->error_code, 0); 1851 /* NOTREACHED */ 1852 } 1853 1854 #define _LINUX_CAPABILITY_VERSION 0x19980330 1855 1856 struct l_user_cap_header { 1857 l_int version; 1858 l_int pid; 1859 }; 1860 1861 struct l_user_cap_data { 1862 l_int effective; 1863 l_int permitted; 1864 l_int inheritable; 1865 }; 1866 1867 int 1868 linux_capget(struct thread *td, struct linux_capget_args *args) 1869 { 1870 struct l_user_cap_header luch; 1871 struct l_user_cap_data lucd; 1872 int error; 1873 1874 if (args->hdrp == NULL) 1875 return (EFAULT); 1876 1877 error = copyin(args->hdrp, &luch, sizeof(luch)); 1878 if (error != 0) 1879 return (error); 1880 1881 if (luch.version != _LINUX_CAPABILITY_VERSION) { 1882 luch.version = _LINUX_CAPABILITY_VERSION; 1883 error = copyout(&luch, args->hdrp, sizeof(luch)); 1884 if (error) 1885 return (error); 1886 return (EINVAL); 1887 } 1888 1889 if (luch.pid) 1890 return (EPERM); 1891 1892 if (args->datap) { 1893 /* 1894 * The current implementation doesn't support setting 1895 * a capability (it's essentially a stub) so indicate 1896 * that no capabilities are currently set or available 1897 * to request. 1898 */ 1899 bzero (&lucd, sizeof(lucd)); 1900 error = copyout(&lucd, args->datap, sizeof(lucd)); 1901 } 1902 1903 return (error); 1904 } 1905 1906 int 1907 linux_capset(struct thread *td, struct linux_capset_args *args) 1908 { 1909 struct l_user_cap_header luch; 1910 struct l_user_cap_data lucd; 1911 int error; 1912 1913 if (args->hdrp == NULL || args->datap == NULL) 1914 return (EFAULT); 1915 1916 error = copyin(args->hdrp, &luch, sizeof(luch)); 1917 if (error != 0) 1918 return (error); 1919 1920 if (luch.version != _LINUX_CAPABILITY_VERSION) { 1921 luch.version = _LINUX_CAPABILITY_VERSION; 1922 error = copyout(&luch, args->hdrp, sizeof(luch)); 1923 if (error) 1924 return (error); 1925 return (EINVAL); 1926 } 1927 1928 if (luch.pid) 1929 return (EPERM); 1930 1931 error = copyin(args->datap, &lucd, sizeof(lucd)); 1932 if (error != 0) 1933 return (error); 1934 1935 /* We currently don't support setting any capabilities. */ 1936 if (lucd.effective || lucd.permitted || lucd.inheritable) { 1937 linux_msg(td, 1938 "capset effective=0x%x, permitted=0x%x, " 1939 "inheritable=0x%x is not implemented", 1940 (int)lucd.effective, (int)lucd.permitted, 1941 (int)lucd.inheritable); 1942 return (EPERM); 1943 } 1944 1945 return (0); 1946 } 1947 1948 int 1949 linux_prctl(struct thread *td, struct linux_prctl_args *args) 1950 { 1951 int error = 0, max_size; 1952 struct proc *p = td->td_proc; 1953 char comm[LINUX_MAX_COMM_LEN]; 1954 struct linux_emuldata *em; 1955 int pdeath_signal; 1956 1957 #ifdef DEBUG 1958 if (ldebug(prctl)) 1959 printf(ARGS(prctl, "%d, %ju, %ju, %ju, %ju"), args->option, 1960 (uintmax_t)args->arg2, (uintmax_t)args->arg3, 1961 (uintmax_t)args->arg4, (uintmax_t)args->arg5); 1962 #endif 1963 1964 switch (args->option) { 1965 case LINUX_PR_SET_PDEATHSIG: 1966 if (!LINUX_SIG_VALID(args->arg2)) 1967 return (EINVAL); 1968 em = em_find(td); 1969 KASSERT(em != NULL, ("prctl: emuldata not found.\n")); 1970 em->pdeath_signal = args->arg2; 1971 break; 1972 case LINUX_PR_GET_PDEATHSIG: 1973 em = em_find(td); 1974 KASSERT(em != NULL, ("prctl: emuldata not found.\n")); 1975 pdeath_signal = em->pdeath_signal; 1976 error = copyout(&pdeath_signal, 1977 (void *)(register_t)args->arg2, 1978 sizeof(pdeath_signal)); 1979 break; 1980 case LINUX_PR_GET_KEEPCAPS: 1981 /* 1982 * Indicate that we always clear the effective and 1983 * permitted capability sets when the user id becomes 1984 * non-zero (actually the capability sets are simply 1985 * always zero in the current implementation). 1986 */ 1987 td->td_retval[0] = 0; 1988 break; 1989 case LINUX_PR_SET_KEEPCAPS: 1990 /* 1991 * Ignore requests to keep the effective and permitted 1992 * capability sets when the user id becomes non-zero. 1993 */ 1994 break; 1995 case LINUX_PR_SET_NAME: 1996 /* 1997 * To be on the safe side we need to make sure to not 1998 * overflow the size a linux program expects. We already 1999 * do this here in the copyin, so that we don't need to 2000 * check on copyout. 2001 */ 2002 max_size = MIN(sizeof(comm), sizeof(p->p_comm)); 2003 error = copyinstr((void *)(register_t)args->arg2, comm, 2004 max_size, NULL); 2005 2006 /* Linux silently truncates the name if it is too long. */ 2007 if (error == ENAMETOOLONG) { 2008 /* 2009 * XXX: copyinstr() isn't documented to populate the 2010 * array completely, so do a copyin() to be on the 2011 * safe side. This should be changed in case 2012 * copyinstr() is changed to guarantee this. 2013 */ 2014 error = copyin((void *)(register_t)args->arg2, comm, 2015 max_size - 1); 2016 comm[max_size - 1] = '\0'; 2017 } 2018 if (error) 2019 return (error); 2020 2021 PROC_LOCK(p); 2022 strlcpy(p->p_comm, comm, sizeof(p->p_comm)); 2023 PROC_UNLOCK(p); 2024 break; 2025 case LINUX_PR_GET_NAME: 2026 PROC_LOCK(p); 2027 strlcpy(comm, p->p_comm, sizeof(comm)); 2028 PROC_UNLOCK(p); 2029 error = copyout(comm, (void *)(register_t)args->arg2, 2030 strlen(comm) + 1); 2031 break; 2032 default: 2033 error = EINVAL; 2034 break; 2035 } 2036 2037 return (error); 2038 } 2039 2040 int 2041 linux_sched_setparam(struct thread *td, 2042 struct linux_sched_setparam_args *uap) 2043 { 2044 struct sched_param sched_param; 2045 struct thread *tdt; 2046 int error; 2047 2048 #ifdef DEBUG 2049 if (ldebug(sched_setparam)) 2050 printf(ARGS(sched_setparam, "%d, *"), uap->pid); 2051 #endif 2052 2053 error = copyin(uap->param, &sched_param, sizeof(sched_param)); 2054 if (error) 2055 return (error); 2056 2057 tdt = linux_tdfind(td, uap->pid, -1); 2058 if (tdt == NULL) 2059 return (ESRCH); 2060 2061 error = kern_sched_setparam(td, tdt, &sched_param); 2062 PROC_UNLOCK(tdt->td_proc); 2063 return (error); 2064 } 2065 2066 int 2067 linux_sched_getparam(struct thread *td, 2068 struct linux_sched_getparam_args *uap) 2069 { 2070 struct sched_param sched_param; 2071 struct thread *tdt; 2072 int error; 2073 2074 #ifdef DEBUG 2075 if (ldebug(sched_getparam)) 2076 printf(ARGS(sched_getparam, "%d, *"), uap->pid); 2077 #endif 2078 2079 tdt = linux_tdfind(td, uap->pid, -1); 2080 if (tdt == NULL) 2081 return (ESRCH); 2082 2083 error = kern_sched_getparam(td, tdt, &sched_param); 2084 PROC_UNLOCK(tdt->td_proc); 2085 if (error == 0) 2086 error = copyout(&sched_param, uap->param, 2087 sizeof(sched_param)); 2088 return (error); 2089 } 2090 2091 /* 2092 * Get affinity of a process. 2093 */ 2094 int 2095 linux_sched_getaffinity(struct thread *td, 2096 struct linux_sched_getaffinity_args *args) 2097 { 2098 int error; 2099 struct thread *tdt; 2100 2101 #ifdef DEBUG 2102 if (ldebug(sched_getaffinity)) 2103 printf(ARGS(sched_getaffinity, "%d, %d, *"), args->pid, 2104 args->len); 2105 #endif 2106 if (args->len < sizeof(cpuset_t)) 2107 return (EINVAL); 2108 2109 tdt = linux_tdfind(td, args->pid, -1); 2110 if (tdt == NULL) 2111 return (ESRCH); 2112 2113 PROC_UNLOCK(tdt->td_proc); 2114 2115 error = kern_cpuset_getaffinity(td, CPU_LEVEL_WHICH, CPU_WHICH_TID, 2116 tdt->td_tid, sizeof(cpuset_t), (cpuset_t *)args->user_mask_ptr); 2117 if (error == 0) 2118 td->td_retval[0] = sizeof(cpuset_t); 2119 2120 return (error); 2121 } 2122 2123 /* 2124 * Set affinity of a process. 2125 */ 2126 int 2127 linux_sched_setaffinity(struct thread *td, 2128 struct linux_sched_setaffinity_args *args) 2129 { 2130 struct thread *tdt; 2131 2132 #ifdef DEBUG 2133 if (ldebug(sched_setaffinity)) 2134 printf(ARGS(sched_setaffinity, "%d, %d, *"), args->pid, 2135 args->len); 2136 #endif 2137 if (args->len < sizeof(cpuset_t)) 2138 return (EINVAL); 2139 2140 tdt = linux_tdfind(td, args->pid, -1); 2141 if (tdt == NULL) 2142 return (ESRCH); 2143 2144 PROC_UNLOCK(tdt->td_proc); 2145 2146 return (kern_cpuset_setaffinity(td, CPU_LEVEL_WHICH, CPU_WHICH_TID, 2147 tdt->td_tid, sizeof(cpuset_t), (cpuset_t *) args->user_mask_ptr)); 2148 } 2149 2150 struct linux_rlimit64 { 2151 uint64_t rlim_cur; 2152 uint64_t rlim_max; 2153 }; 2154 2155 int 2156 linux_prlimit64(struct thread *td, struct linux_prlimit64_args *args) 2157 { 2158 struct rlimit rlim, nrlim; 2159 struct linux_rlimit64 lrlim; 2160 struct proc *p; 2161 u_int which; 2162 int flags; 2163 int error; 2164 2165 #ifdef DEBUG 2166 if (ldebug(prlimit64)) 2167 printf(ARGS(prlimit64, "%d, %d, %p, %p"), args->pid, 2168 args->resource, (void *)args->new, (void *)args->old); 2169 #endif 2170 2171 if (args->resource >= LINUX_RLIM_NLIMITS) 2172 return (EINVAL); 2173 2174 which = linux_to_bsd_resource[args->resource]; 2175 if (which == -1) 2176 return (EINVAL); 2177 2178 if (args->new != NULL) { 2179 /* 2180 * Note. Unlike FreeBSD where rlim is signed 64-bit Linux 2181 * rlim is unsigned 64-bit. FreeBSD treats negative limits 2182 * as INFINITY so we do not need a conversion even. 2183 */ 2184 error = copyin(args->new, &nrlim, sizeof(nrlim)); 2185 if (error != 0) 2186 return (error); 2187 } 2188 2189 flags = PGET_HOLD | PGET_NOTWEXIT; 2190 if (args->new != NULL) 2191 flags |= PGET_CANDEBUG; 2192 else 2193 flags |= PGET_CANSEE; 2194 error = pget(args->pid, flags, &p); 2195 if (error != 0) 2196 return (error); 2197 2198 if (args->old != NULL) { 2199 PROC_LOCK(p); 2200 lim_rlimit_proc(p, which, &rlim); 2201 PROC_UNLOCK(p); 2202 if (rlim.rlim_cur == RLIM_INFINITY) 2203 lrlim.rlim_cur = LINUX_RLIM_INFINITY; 2204 else 2205 lrlim.rlim_cur = rlim.rlim_cur; 2206 if (rlim.rlim_max == RLIM_INFINITY) 2207 lrlim.rlim_max = LINUX_RLIM_INFINITY; 2208 else 2209 lrlim.rlim_max = rlim.rlim_max; 2210 error = copyout(&lrlim, args->old, sizeof(lrlim)); 2211 if (error != 0) 2212 goto out; 2213 } 2214 2215 if (args->new != NULL) 2216 error = kern_proc_setrlimit(td, p, which, &nrlim); 2217 2218 out: 2219 PRELE(p); 2220 return (error); 2221 } 2222 2223 int 2224 linux_pselect6(struct thread *td, struct linux_pselect6_args *args) 2225 { 2226 struct timeval utv, tv0, tv1, *tvp; 2227 struct l_pselect6arg lpse6; 2228 struct l_timespec lts; 2229 struct timespec uts; 2230 l_sigset_t l_ss; 2231 sigset_t *ssp; 2232 sigset_t ss; 2233 int error; 2234 2235 ssp = NULL; 2236 if (args->sig != NULL) { 2237 error = copyin(args->sig, &lpse6, sizeof(lpse6)); 2238 if (error != 0) 2239 return (error); 2240 if (lpse6.ss_len != sizeof(l_ss)) 2241 return (EINVAL); 2242 if (lpse6.ss != 0) { 2243 error = copyin(PTRIN(lpse6.ss), &l_ss, 2244 sizeof(l_ss)); 2245 if (error != 0) 2246 return (error); 2247 linux_to_bsd_sigset(&l_ss, &ss); 2248 ssp = &ss; 2249 } 2250 } 2251 2252 /* 2253 * Currently glibc changes nanosecond number to microsecond. 2254 * This mean losing precision but for now it is hardly seen. 2255 */ 2256 if (args->tsp != NULL) { 2257 error = copyin(args->tsp, <s, sizeof(lts)); 2258 if (error != 0) 2259 return (error); 2260 error = linux_to_native_timespec(&uts, <s); 2261 if (error != 0) 2262 return (error); 2263 2264 TIMESPEC_TO_TIMEVAL(&utv, &uts); 2265 if (itimerfix(&utv)) 2266 return (EINVAL); 2267 2268 microtime(&tv0); 2269 tvp = &utv; 2270 } else 2271 tvp = NULL; 2272 2273 error = kern_pselect(td, args->nfds, args->readfds, args->writefds, 2274 args->exceptfds, tvp, ssp, LINUX_NFDBITS); 2275 2276 if (error == 0 && args->tsp != NULL) { 2277 if (td->td_retval[0] != 0) { 2278 /* 2279 * Compute how much time was left of the timeout, 2280 * by subtracting the current time and the time 2281 * before we started the call, and subtracting 2282 * that result from the user-supplied value. 2283 */ 2284 2285 microtime(&tv1); 2286 timevalsub(&tv1, &tv0); 2287 timevalsub(&utv, &tv1); 2288 if (utv.tv_sec < 0) 2289 timevalclear(&utv); 2290 } else 2291 timevalclear(&utv); 2292 2293 TIMEVAL_TO_TIMESPEC(&utv, &uts); 2294 2295 error = native_to_linux_timespec(<s, &uts); 2296 if (error == 0) 2297 error = copyout(<s, args->tsp, sizeof(lts)); 2298 } 2299 2300 return (error); 2301 } 2302 2303 int 2304 linux_ppoll(struct thread *td, struct linux_ppoll_args *args) 2305 { 2306 struct timespec ts0, ts1; 2307 struct l_timespec lts; 2308 struct timespec uts, *tsp; 2309 l_sigset_t l_ss; 2310 sigset_t *ssp; 2311 sigset_t ss; 2312 int error; 2313 2314 if (args->sset != NULL) { 2315 if (args->ssize != sizeof(l_ss)) 2316 return (EINVAL); 2317 error = copyin(args->sset, &l_ss, sizeof(l_ss)); 2318 if (error) 2319 return (error); 2320 linux_to_bsd_sigset(&l_ss, &ss); 2321 ssp = &ss; 2322 } else 2323 ssp = NULL; 2324 if (args->tsp != NULL) { 2325 error = copyin(args->tsp, <s, sizeof(lts)); 2326 if (error) 2327 return (error); 2328 error = linux_to_native_timespec(&uts, <s); 2329 if (error != 0) 2330 return (error); 2331 2332 nanotime(&ts0); 2333 tsp = &uts; 2334 } else 2335 tsp = NULL; 2336 2337 error = kern_poll(td, args->fds, args->nfds, tsp, ssp); 2338 2339 if (error == 0 && args->tsp != NULL) { 2340 if (td->td_retval[0]) { 2341 nanotime(&ts1); 2342 timespecsub(&ts1, &ts0); 2343 timespecsub(&uts, &ts1); 2344 if (uts.tv_sec < 0) 2345 timespecclear(&uts); 2346 } else 2347 timespecclear(&uts); 2348 2349 error = native_to_linux_timespec(<s, &uts); 2350 if (error == 0) 2351 error = copyout(<s, args->tsp, sizeof(lts)); 2352 } 2353 2354 return (error); 2355 } 2356 2357 #if defined(DEBUG) || defined(KTR) 2358 /* XXX: can be removed when every ldebug(...) and KTR stuff are removed. */ 2359 2360 #ifdef COMPAT_LINUX32 2361 #define L_MAXSYSCALL LINUX32_SYS_MAXSYSCALL 2362 #else 2363 #define L_MAXSYSCALL LINUX_SYS_MAXSYSCALL 2364 #endif 2365 2366 u_char linux_debug_map[howmany(L_MAXSYSCALL, sizeof(u_char))]; 2367 2368 static int 2369 linux_debug(int syscall, int toggle, int global) 2370 { 2371 2372 if (global) { 2373 char c = toggle ? 0 : 0xff; 2374 2375 memset(linux_debug_map, c, sizeof(linux_debug_map)); 2376 return (0); 2377 } 2378 if (syscall < 0 || syscall >= L_MAXSYSCALL) 2379 return (EINVAL); 2380 if (toggle) 2381 clrbit(linux_debug_map, syscall); 2382 else 2383 setbit(linux_debug_map, syscall); 2384 return (0); 2385 } 2386 #undef L_MAXSYSCALL 2387 2388 /* 2389 * Usage: sysctl linux.debug=<syscall_nr>.<0/1> 2390 * 2391 * E.g.: sysctl linux.debug=21.0 2392 * 2393 * As a special case, syscall "all" will apply to all syscalls globally. 2394 */ 2395 #define LINUX_MAX_DEBUGSTR 16 2396 int 2397 linux_sysctl_debug(SYSCTL_HANDLER_ARGS) 2398 { 2399 char value[LINUX_MAX_DEBUGSTR], *p; 2400 int error, sysc, toggle; 2401 int global = 0; 2402 2403 value[0] = '\0'; 2404 error = sysctl_handle_string(oidp, value, LINUX_MAX_DEBUGSTR, req); 2405 if (error || req->newptr == NULL) 2406 return (error); 2407 for (p = value; *p != '\0' && *p != '.'; p++); 2408 if (*p == '\0') 2409 return (EINVAL); 2410 *p++ = '\0'; 2411 sysc = strtol(value, NULL, 0); 2412 toggle = strtol(p, NULL, 0); 2413 if (strcmp(value, "all") == 0) 2414 global = 1; 2415 error = linux_debug(sysc, toggle, global); 2416 return (error); 2417 } 2418 2419 #endif /* DEBUG || KTR */ 2420 2421 int 2422 linux_sched_rr_get_interval(struct thread *td, 2423 struct linux_sched_rr_get_interval_args *uap) 2424 { 2425 struct timespec ts; 2426 struct l_timespec lts; 2427 struct thread *tdt; 2428 int error; 2429 2430 /* 2431 * According to man in case the invalid pid specified 2432 * EINVAL should be returned. 2433 */ 2434 if (uap->pid < 0) 2435 return (EINVAL); 2436 2437 tdt = linux_tdfind(td, uap->pid, -1); 2438 if (tdt == NULL) 2439 return (ESRCH); 2440 2441 error = kern_sched_rr_get_interval_td(td, tdt, &ts); 2442 PROC_UNLOCK(tdt->td_proc); 2443 if (error != 0) 2444 return (error); 2445 error = native_to_linux_timespec(<s, &ts); 2446 if (error != 0) 2447 return (error); 2448 return (copyout(<s, uap->interval, sizeof(lts))); 2449 } 2450 2451 /* 2452 * In case when the Linux thread is the initial thread in 2453 * the thread group thread id is equal to the process id. 2454 * Glibc depends on this magic (assert in pthread_getattr_np.c). 2455 */ 2456 struct thread * 2457 linux_tdfind(struct thread *td, lwpid_t tid, pid_t pid) 2458 { 2459 struct linux_emuldata *em; 2460 struct thread *tdt; 2461 struct proc *p; 2462 2463 tdt = NULL; 2464 if (tid == 0 || tid == td->td_tid) { 2465 tdt = td; 2466 PROC_LOCK(tdt->td_proc); 2467 } else if (tid > PID_MAX) 2468 tdt = tdfind(tid, pid); 2469 else { 2470 /* 2471 * Initial thread where the tid equal to the pid. 2472 */ 2473 p = pfind(tid); 2474 if (p != NULL) { 2475 if (SV_PROC_ABI(p) != SV_ABI_LINUX) { 2476 /* 2477 * p is not a Linuxulator process. 2478 */ 2479 PROC_UNLOCK(p); 2480 return (NULL); 2481 } 2482 FOREACH_THREAD_IN_PROC(p, tdt) { 2483 em = em_find(tdt); 2484 if (tid == em->em_tid) 2485 return (tdt); 2486 } 2487 PROC_UNLOCK(p); 2488 } 2489 return (NULL); 2490 } 2491 2492 return (tdt); 2493 } 2494 2495 void 2496 linux_to_bsd_waitopts(int options, int *bsdopts) 2497 { 2498 2499 if (options & LINUX_WNOHANG) 2500 *bsdopts |= WNOHANG; 2501 if (options & LINUX_WUNTRACED) 2502 *bsdopts |= WUNTRACED; 2503 if (options & LINUX_WEXITED) 2504 *bsdopts |= WEXITED; 2505 if (options & LINUX_WCONTINUED) 2506 *bsdopts |= WCONTINUED; 2507 if (options & LINUX_WNOWAIT) 2508 *bsdopts |= WNOWAIT; 2509 2510 if (options & __WCLONE) 2511 *bsdopts |= WLINUXCLONE; 2512 } 2513 2514 int 2515 linux_getrandom(struct thread *td, struct linux_getrandom_args *args) 2516 { 2517 struct uio uio; 2518 struct iovec iov; 2519 int error; 2520 2521 if (args->flags & ~(LINUX_GRND_NONBLOCK|LINUX_GRND_RANDOM)) 2522 return (EINVAL); 2523 if (args->count > INT_MAX) 2524 args->count = INT_MAX; 2525 2526 iov.iov_base = args->buf; 2527 iov.iov_len = args->count; 2528 2529 uio.uio_iov = &iov; 2530 uio.uio_iovcnt = 1; 2531 uio.uio_resid = iov.iov_len; 2532 uio.uio_segflg = UIO_USERSPACE; 2533 uio.uio_rw = UIO_READ; 2534 uio.uio_td = td; 2535 2536 error = read_random_uio(&uio, args->flags & LINUX_GRND_NONBLOCK); 2537 if (error == 0) 2538 td->td_retval[0] = args->count - uio.uio_resid; 2539 return (error); 2540 } 2541 2542 int 2543 linux_mincore(struct thread *td, struct linux_mincore_args *args) 2544 { 2545 2546 /* Needs to be page-aligned */ 2547 if (args->start & PAGE_MASK) 2548 return (EINVAL); 2549 return (kern_mincore(td, args->start, args->len, args->vec)); 2550 } 2551