1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2002 Doug Rabson 5 * Copyright (c) 1994-1995 Søren Schmidt 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer 13 * in this position and unchanged. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. The name of the author may not be used to endorse or promote products 18 * derived from this software without specific prior written permission 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_compat.h" 36 37 #include <sys/param.h> 38 #include <sys/blist.h> 39 #include <sys/fcntl.h> 40 #if defined(__i386__) 41 #include <sys/imgact_aout.h> 42 #endif 43 #include <sys/jail.h> 44 #include <sys/kernel.h> 45 #include <sys/limits.h> 46 #include <sys/lock.h> 47 #include <sys/malloc.h> 48 #include <sys/mman.h> 49 #include <sys/mount.h> 50 #include <sys/msgbuf.h> 51 #include <sys/mutex.h> 52 #include <sys/namei.h> 53 #include <sys/priv.h> 54 #include <sys/proc.h> 55 #include <sys/procctl.h> 56 #include <sys/reboot.h> 57 #include <sys/racct.h> 58 #include <sys/random.h> 59 #include <sys/resourcevar.h> 60 #include <sys/sched.h> 61 #include <sys/sdt.h> 62 #include <sys/signalvar.h> 63 #include <sys/stat.h> 64 #include <sys/syscallsubr.h> 65 #include <sys/sysctl.h> 66 #include <sys/sysproto.h> 67 #include <sys/systm.h> 68 #include <sys/time.h> 69 #include <sys/vmmeter.h> 70 #include <sys/vnode.h> 71 #include <sys/wait.h> 72 #include <sys/cpuset.h> 73 #include <sys/uio.h> 74 75 #include <security/mac/mac_framework.h> 76 77 #include <vm/vm.h> 78 #include <vm/pmap.h> 79 #include <vm/vm_kern.h> 80 #include <vm/vm_map.h> 81 #include <vm/vm_extern.h> 82 #include <vm/swap_pager.h> 83 84 #ifdef COMPAT_LINUX32 85 #include <machine/../linux32/linux.h> 86 #include <machine/../linux32/linux32_proto.h> 87 #else 88 #include <machine/../linux/linux.h> 89 #include <machine/../linux/linux_proto.h> 90 #endif 91 92 #include <compat/linux/linux_dtrace.h> 93 #include <compat/linux/linux_file.h> 94 #include <compat/linux/linux_mib.h> 95 #include <compat/linux/linux_signal.h> 96 #include <compat/linux/linux_timer.h> 97 #include <compat/linux/linux_util.h> 98 #include <compat/linux/linux_sysproto.h> 99 #include <compat/linux/linux_emul.h> 100 #include <compat/linux/linux_misc.h> 101 102 /** 103 * Special DTrace provider for the linuxulator. 104 * 105 * In this file we define the provider for the entire linuxulator. All 106 * modules (= files of the linuxulator) use it. 107 * 108 * We define a different name depending on the emulated bitsize, see 109 * ../../<ARCH>/linux{,32}/linux.h, e.g.: 110 * native bitsize = linuxulator 111 * amd64, 32bit emulation = linuxulator32 112 */ 113 LIN_SDT_PROVIDER_DEFINE(LINUX_DTRACE); 114 115 int stclohz; /* Statistics clock frequency */ 116 117 static unsigned int linux_to_bsd_resource[LINUX_RLIM_NLIMITS] = { 118 RLIMIT_CPU, RLIMIT_FSIZE, RLIMIT_DATA, RLIMIT_STACK, 119 RLIMIT_CORE, RLIMIT_RSS, RLIMIT_NPROC, RLIMIT_NOFILE, 120 RLIMIT_MEMLOCK, RLIMIT_AS 121 }; 122 123 struct l_sysinfo { 124 l_long uptime; /* Seconds since boot */ 125 l_ulong loads[3]; /* 1, 5, and 15 minute load averages */ 126 #define LINUX_SYSINFO_LOADS_SCALE 65536 127 l_ulong totalram; /* Total usable main memory size */ 128 l_ulong freeram; /* Available memory size */ 129 l_ulong sharedram; /* Amount of shared memory */ 130 l_ulong bufferram; /* Memory used by buffers */ 131 l_ulong totalswap; /* Total swap space size */ 132 l_ulong freeswap; /* swap space still available */ 133 l_ushort procs; /* Number of current processes */ 134 l_ushort pads; 135 l_ulong totalbig; 136 l_ulong freebig; 137 l_uint mem_unit; 138 char _f[20-2*sizeof(l_long)-sizeof(l_int)]; /* padding */ 139 }; 140 141 struct l_pselect6arg { 142 l_uintptr_t ss; 143 l_size_t ss_len; 144 }; 145 146 static int linux_utimensat_nsec_valid(l_long); 147 148 149 int 150 linux_sysinfo(struct thread *td, struct linux_sysinfo_args *args) 151 { 152 struct l_sysinfo sysinfo; 153 int i, j; 154 struct timespec ts; 155 156 bzero(&sysinfo, sizeof(sysinfo)); 157 getnanouptime(&ts); 158 if (ts.tv_nsec != 0) 159 ts.tv_sec++; 160 sysinfo.uptime = ts.tv_sec; 161 162 /* Use the information from the mib to get our load averages */ 163 for (i = 0; i < 3; i++) 164 sysinfo.loads[i] = averunnable.ldavg[i] * 165 LINUX_SYSINFO_LOADS_SCALE / averunnable.fscale; 166 167 sysinfo.totalram = physmem * PAGE_SIZE; 168 sysinfo.freeram = sysinfo.totalram - vm_wire_count() * PAGE_SIZE; 169 170 sysinfo.sharedram = 0; 171 sysinfo.bufferram = 0; 172 173 swap_pager_status(&i, &j); 174 sysinfo.totalswap = i * PAGE_SIZE; 175 sysinfo.freeswap = (i - j) * PAGE_SIZE; 176 177 sysinfo.procs = nprocs; 178 179 /* The following are only present in newer Linux kernels. */ 180 sysinfo.totalbig = 0; 181 sysinfo.freebig = 0; 182 sysinfo.mem_unit = 1; 183 184 return (copyout(&sysinfo, args->info, sizeof(sysinfo))); 185 } 186 187 #ifdef LINUX_LEGACY_SYSCALLS 188 int 189 linux_alarm(struct thread *td, struct linux_alarm_args *args) 190 { 191 struct itimerval it, old_it; 192 u_int secs; 193 int error; 194 195 secs = args->secs; 196 /* 197 * Linux alarm() is always successful. Limit secs to INT32_MAX / 2 198 * to match kern_setitimer()'s limit to avoid error from it. 199 * 200 * XXX. Linux limit secs to INT_MAX on 32 and does not limit on 64-bit 201 * platforms. 202 */ 203 if (secs > INT32_MAX / 2) 204 secs = INT32_MAX / 2; 205 206 it.it_value.tv_sec = secs; 207 it.it_value.tv_usec = 0; 208 timevalclear(&it.it_interval); 209 error = kern_setitimer(td, ITIMER_REAL, &it, &old_it); 210 KASSERT(error == 0, ("kern_setitimer returns %d", error)); 211 212 if ((old_it.it_value.tv_sec == 0 && old_it.it_value.tv_usec > 0) || 213 old_it.it_value.tv_usec >= 500000) 214 old_it.it_value.tv_sec++; 215 td->td_retval[0] = old_it.it_value.tv_sec; 216 return (0); 217 } 218 #endif 219 220 int 221 linux_brk(struct thread *td, struct linux_brk_args *args) 222 { 223 struct vmspace *vm = td->td_proc->p_vmspace; 224 uintptr_t new, old; 225 226 old = (uintptr_t)vm->vm_daddr + ctob(vm->vm_dsize); 227 new = (uintptr_t)args->dsend; 228 if ((caddr_t)new > vm->vm_daddr && !kern_break(td, &new)) 229 td->td_retval[0] = (register_t)new; 230 else 231 td->td_retval[0] = (register_t)old; 232 233 return (0); 234 } 235 236 #if defined(__i386__) 237 /* XXX: what about amd64/linux32? */ 238 239 int 240 linux_uselib(struct thread *td, struct linux_uselib_args *args) 241 { 242 struct nameidata ni; 243 struct vnode *vp; 244 struct exec *a_out; 245 vm_map_t map; 246 vm_map_entry_t entry; 247 struct vattr attr; 248 vm_offset_t vmaddr; 249 unsigned long file_offset; 250 unsigned long bss_size; 251 char *library; 252 ssize_t aresid; 253 int error; 254 bool locked, opened, textset; 255 256 LCONVPATHEXIST(td, args->library, &library); 257 258 a_out = NULL; 259 vp = NULL; 260 locked = false; 261 textset = false; 262 opened = false; 263 264 NDINIT(&ni, LOOKUP, ISOPEN | FOLLOW | LOCKLEAF | AUDITVNODE1, 265 UIO_SYSSPACE, library, td); 266 error = namei(&ni); 267 LFREEPATH(library); 268 if (error) 269 goto cleanup; 270 271 vp = ni.ni_vp; 272 NDFREE(&ni, NDF_ONLY_PNBUF); 273 274 /* 275 * From here on down, we have a locked vnode that must be unlocked. 276 * XXX: The code below largely duplicates exec_check_permissions(). 277 */ 278 locked = true; 279 280 /* Executable? */ 281 error = VOP_GETATTR(vp, &attr, td->td_ucred); 282 if (error) 283 goto cleanup; 284 285 if ((vp->v_mount->mnt_flag & MNT_NOEXEC) || 286 ((attr.va_mode & 0111) == 0) || (attr.va_type != VREG)) { 287 /* EACCESS is what exec(2) returns. */ 288 error = ENOEXEC; 289 goto cleanup; 290 } 291 292 /* Sensible size? */ 293 if (attr.va_size == 0) { 294 error = ENOEXEC; 295 goto cleanup; 296 } 297 298 /* Can we access it? */ 299 error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td); 300 if (error) 301 goto cleanup; 302 303 /* 304 * XXX: This should use vn_open() so that it is properly authorized, 305 * and to reduce code redundancy all over the place here. 306 * XXX: Not really, it duplicates far more of exec_check_permissions() 307 * than vn_open(). 308 */ 309 #ifdef MAC 310 error = mac_vnode_check_open(td->td_ucred, vp, VREAD); 311 if (error) 312 goto cleanup; 313 #endif 314 error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL); 315 if (error) 316 goto cleanup; 317 opened = true; 318 319 /* Pull in executable header into exec_map */ 320 error = vm_mmap(exec_map, (vm_offset_t *)&a_out, PAGE_SIZE, 321 VM_PROT_READ, VM_PROT_READ, 0, OBJT_VNODE, vp, 0); 322 if (error) 323 goto cleanup; 324 325 /* Is it a Linux binary ? */ 326 if (((a_out->a_magic >> 16) & 0xff) != 0x64) { 327 error = ENOEXEC; 328 goto cleanup; 329 } 330 331 /* 332 * While we are here, we should REALLY do some more checks 333 */ 334 335 /* Set file/virtual offset based on a.out variant. */ 336 switch ((int)(a_out->a_magic & 0xffff)) { 337 case 0413: /* ZMAGIC */ 338 file_offset = 1024; 339 break; 340 case 0314: /* QMAGIC */ 341 file_offset = 0; 342 break; 343 default: 344 error = ENOEXEC; 345 goto cleanup; 346 } 347 348 bss_size = round_page(a_out->a_bss); 349 350 /* Check various fields in header for validity/bounds. */ 351 if (a_out->a_text & PAGE_MASK || a_out->a_data & PAGE_MASK) { 352 error = ENOEXEC; 353 goto cleanup; 354 } 355 356 /* text + data can't exceed file size */ 357 if (a_out->a_data + a_out->a_text > attr.va_size) { 358 error = EFAULT; 359 goto cleanup; 360 } 361 362 /* 363 * text/data/bss must not exceed limits 364 * XXX - this is not complete. it should check current usage PLUS 365 * the resources needed by this library. 366 */ 367 PROC_LOCK(td->td_proc); 368 if (a_out->a_text > maxtsiz || 369 a_out->a_data + bss_size > lim_cur_proc(td->td_proc, RLIMIT_DATA) || 370 racct_set(td->td_proc, RACCT_DATA, a_out->a_data + 371 bss_size) != 0) { 372 PROC_UNLOCK(td->td_proc); 373 error = ENOMEM; 374 goto cleanup; 375 } 376 PROC_UNLOCK(td->td_proc); 377 378 /* 379 * Prevent more writers. 380 */ 381 error = VOP_SET_TEXT(vp); 382 if (error != 0) 383 goto cleanup; 384 textset = true; 385 386 /* 387 * Lock no longer needed 388 */ 389 locked = false; 390 VOP_UNLOCK(vp); 391 392 /* 393 * Check if file_offset page aligned. Currently we cannot handle 394 * misalinged file offsets, and so we read in the entire image 395 * (what a waste). 396 */ 397 if (file_offset & PAGE_MASK) { 398 /* Map text+data read/write/execute */ 399 400 /* a_entry is the load address and is page aligned */ 401 vmaddr = trunc_page(a_out->a_entry); 402 403 /* get anon user mapping, read+write+execute */ 404 error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0, 405 &vmaddr, a_out->a_text + a_out->a_data, 0, VMFS_NO_SPACE, 406 VM_PROT_ALL, VM_PROT_ALL, 0); 407 if (error) 408 goto cleanup; 409 410 error = vn_rdwr(UIO_READ, vp, (void *)vmaddr, file_offset, 411 a_out->a_text + a_out->a_data, UIO_USERSPACE, 0, 412 td->td_ucred, NOCRED, &aresid, td); 413 if (error != 0) 414 goto cleanup; 415 if (aresid != 0) { 416 error = ENOEXEC; 417 goto cleanup; 418 } 419 } else { 420 /* 421 * for QMAGIC, a_entry is 20 bytes beyond the load address 422 * to skip the executable header 423 */ 424 vmaddr = trunc_page(a_out->a_entry); 425 426 /* 427 * Map it all into the process's space as a single 428 * copy-on-write "data" segment. 429 */ 430 map = &td->td_proc->p_vmspace->vm_map; 431 error = vm_mmap(map, &vmaddr, 432 a_out->a_text + a_out->a_data, VM_PROT_ALL, VM_PROT_ALL, 433 MAP_PRIVATE | MAP_FIXED, OBJT_VNODE, vp, file_offset); 434 if (error) 435 goto cleanup; 436 vm_map_lock(map); 437 if (!vm_map_lookup_entry(map, vmaddr, &entry)) { 438 vm_map_unlock(map); 439 error = EDOOFUS; 440 goto cleanup; 441 } 442 entry->eflags |= MAP_ENTRY_VN_EXEC; 443 vm_map_unlock(map); 444 textset = false; 445 } 446 447 if (bss_size != 0) { 448 /* Calculate BSS start address */ 449 vmaddr = trunc_page(a_out->a_entry) + a_out->a_text + 450 a_out->a_data; 451 452 /* allocate some 'anon' space */ 453 error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0, 454 &vmaddr, bss_size, 0, VMFS_NO_SPACE, VM_PROT_ALL, 455 VM_PROT_ALL, 0); 456 if (error) 457 goto cleanup; 458 } 459 460 cleanup: 461 if (opened) { 462 if (locked) 463 VOP_UNLOCK(vp); 464 locked = false; 465 VOP_CLOSE(vp, FREAD, td->td_ucred, td); 466 } 467 if (textset) { 468 if (!locked) { 469 locked = true; 470 VOP_LOCK(vp, LK_SHARED | LK_RETRY); 471 } 472 VOP_UNSET_TEXT_CHECKED(vp); 473 } 474 if (locked) 475 VOP_UNLOCK(vp); 476 477 /* Release the temporary mapping. */ 478 if (a_out) 479 kmap_free_wakeup(exec_map, (vm_offset_t)a_out, PAGE_SIZE); 480 481 return (error); 482 } 483 484 #endif /* __i386__ */ 485 486 #ifdef LINUX_LEGACY_SYSCALLS 487 int 488 linux_select(struct thread *td, struct linux_select_args *args) 489 { 490 l_timeval ltv; 491 struct timeval tv0, tv1, utv, *tvp; 492 int error; 493 494 /* 495 * Store current time for computation of the amount of 496 * time left. 497 */ 498 if (args->timeout) { 499 if ((error = copyin(args->timeout, <v, sizeof(ltv)))) 500 goto select_out; 501 utv.tv_sec = ltv.tv_sec; 502 utv.tv_usec = ltv.tv_usec; 503 504 if (itimerfix(&utv)) { 505 /* 506 * The timeval was invalid. Convert it to something 507 * valid that will act as it does under Linux. 508 */ 509 utv.tv_sec += utv.tv_usec / 1000000; 510 utv.tv_usec %= 1000000; 511 if (utv.tv_usec < 0) { 512 utv.tv_sec -= 1; 513 utv.tv_usec += 1000000; 514 } 515 if (utv.tv_sec < 0) 516 timevalclear(&utv); 517 } 518 microtime(&tv0); 519 tvp = &utv; 520 } else 521 tvp = NULL; 522 523 error = kern_select(td, args->nfds, args->readfds, args->writefds, 524 args->exceptfds, tvp, LINUX_NFDBITS); 525 if (error) 526 goto select_out; 527 528 if (args->timeout) { 529 if (td->td_retval[0]) { 530 /* 531 * Compute how much time was left of the timeout, 532 * by subtracting the current time and the time 533 * before we started the call, and subtracting 534 * that result from the user-supplied value. 535 */ 536 microtime(&tv1); 537 timevalsub(&tv1, &tv0); 538 timevalsub(&utv, &tv1); 539 if (utv.tv_sec < 0) 540 timevalclear(&utv); 541 } else 542 timevalclear(&utv); 543 ltv.tv_sec = utv.tv_sec; 544 ltv.tv_usec = utv.tv_usec; 545 if ((error = copyout(<v, args->timeout, sizeof(ltv)))) 546 goto select_out; 547 } 548 549 select_out: 550 return (error); 551 } 552 #endif 553 554 int 555 linux_mremap(struct thread *td, struct linux_mremap_args *args) 556 { 557 uintptr_t addr; 558 size_t len; 559 int error = 0; 560 561 if (args->flags & ~(LINUX_MREMAP_FIXED | LINUX_MREMAP_MAYMOVE)) { 562 td->td_retval[0] = 0; 563 return (EINVAL); 564 } 565 566 /* 567 * Check for the page alignment. 568 * Linux defines PAGE_MASK to be FreeBSD ~PAGE_MASK. 569 */ 570 if (args->addr & PAGE_MASK) { 571 td->td_retval[0] = 0; 572 return (EINVAL); 573 } 574 575 args->new_len = round_page(args->new_len); 576 args->old_len = round_page(args->old_len); 577 578 if (args->new_len > args->old_len) { 579 td->td_retval[0] = 0; 580 return (ENOMEM); 581 } 582 583 if (args->new_len < args->old_len) { 584 addr = args->addr + args->new_len; 585 len = args->old_len - args->new_len; 586 error = kern_munmap(td, addr, len); 587 } 588 589 td->td_retval[0] = error ? 0 : (uintptr_t)args->addr; 590 return (error); 591 } 592 593 #define LINUX_MS_ASYNC 0x0001 594 #define LINUX_MS_INVALIDATE 0x0002 595 #define LINUX_MS_SYNC 0x0004 596 597 int 598 linux_msync(struct thread *td, struct linux_msync_args *args) 599 { 600 601 return (kern_msync(td, args->addr, args->len, 602 args->fl & ~LINUX_MS_SYNC)); 603 } 604 605 #ifdef LINUX_LEGACY_SYSCALLS 606 int 607 linux_time(struct thread *td, struct linux_time_args *args) 608 { 609 struct timeval tv; 610 l_time_t tm; 611 int error; 612 613 microtime(&tv); 614 tm = tv.tv_sec; 615 if (args->tm && (error = copyout(&tm, args->tm, sizeof(tm)))) 616 return (error); 617 td->td_retval[0] = tm; 618 return (0); 619 } 620 #endif 621 622 struct l_times_argv { 623 l_clock_t tms_utime; 624 l_clock_t tms_stime; 625 l_clock_t tms_cutime; 626 l_clock_t tms_cstime; 627 }; 628 629 630 /* 631 * Glibc versions prior to 2.2.1 always use hard-coded CLK_TCK value. 632 * Since 2.2.1 Glibc uses value exported from kernel via AT_CLKTCK 633 * auxiliary vector entry. 634 */ 635 #define CLK_TCK 100 636 637 #define CONVOTCK(r) (r.tv_sec * CLK_TCK + r.tv_usec / (1000000 / CLK_TCK)) 638 #define CONVNTCK(r) (r.tv_sec * stclohz + r.tv_usec / (1000000 / stclohz)) 639 640 #define CONVTCK(r) (linux_kernver(td) >= LINUX_KERNVER_2004000 ? \ 641 CONVNTCK(r) : CONVOTCK(r)) 642 643 int 644 linux_times(struct thread *td, struct linux_times_args *args) 645 { 646 struct timeval tv, utime, stime, cutime, cstime; 647 struct l_times_argv tms; 648 struct proc *p; 649 int error; 650 651 if (args->buf != NULL) { 652 p = td->td_proc; 653 PROC_LOCK(p); 654 PROC_STATLOCK(p); 655 calcru(p, &utime, &stime); 656 PROC_STATUNLOCK(p); 657 calccru(p, &cutime, &cstime); 658 PROC_UNLOCK(p); 659 660 tms.tms_utime = CONVTCK(utime); 661 tms.tms_stime = CONVTCK(stime); 662 663 tms.tms_cutime = CONVTCK(cutime); 664 tms.tms_cstime = CONVTCK(cstime); 665 666 if ((error = copyout(&tms, args->buf, sizeof(tms)))) 667 return (error); 668 } 669 670 microuptime(&tv); 671 td->td_retval[0] = (int)CONVTCK(tv); 672 return (0); 673 } 674 675 int 676 linux_newuname(struct thread *td, struct linux_newuname_args *args) 677 { 678 struct l_new_utsname utsname; 679 char osname[LINUX_MAX_UTSNAME]; 680 char osrelease[LINUX_MAX_UTSNAME]; 681 char *p; 682 683 linux_get_osname(td, osname); 684 linux_get_osrelease(td, osrelease); 685 686 bzero(&utsname, sizeof(utsname)); 687 strlcpy(utsname.sysname, osname, LINUX_MAX_UTSNAME); 688 getcredhostname(td->td_ucred, utsname.nodename, LINUX_MAX_UTSNAME); 689 getcreddomainname(td->td_ucred, utsname.domainname, LINUX_MAX_UTSNAME); 690 strlcpy(utsname.release, osrelease, LINUX_MAX_UTSNAME); 691 strlcpy(utsname.version, version, LINUX_MAX_UTSNAME); 692 for (p = utsname.version; *p != '\0'; ++p) 693 if (*p == '\n') { 694 *p = '\0'; 695 break; 696 } 697 strlcpy(utsname.machine, linux_kplatform, LINUX_MAX_UTSNAME); 698 699 return (copyout(&utsname, args->buf, sizeof(utsname))); 700 } 701 702 struct l_utimbuf { 703 l_time_t l_actime; 704 l_time_t l_modtime; 705 }; 706 707 #ifdef LINUX_LEGACY_SYSCALLS 708 int 709 linux_utime(struct thread *td, struct linux_utime_args *args) 710 { 711 struct timeval tv[2], *tvp; 712 struct l_utimbuf lut; 713 char *fname; 714 int error; 715 716 LCONVPATHEXIST(td, args->fname, &fname); 717 718 if (args->times) { 719 if ((error = copyin(args->times, &lut, sizeof lut))) { 720 LFREEPATH(fname); 721 return (error); 722 } 723 tv[0].tv_sec = lut.l_actime; 724 tv[0].tv_usec = 0; 725 tv[1].tv_sec = lut.l_modtime; 726 tv[1].tv_usec = 0; 727 tvp = tv; 728 } else 729 tvp = NULL; 730 731 error = kern_utimesat(td, AT_FDCWD, fname, UIO_SYSSPACE, tvp, 732 UIO_SYSSPACE); 733 LFREEPATH(fname); 734 return (error); 735 } 736 #endif 737 738 #ifdef LINUX_LEGACY_SYSCALLS 739 int 740 linux_utimes(struct thread *td, struct linux_utimes_args *args) 741 { 742 l_timeval ltv[2]; 743 struct timeval tv[2], *tvp = NULL; 744 char *fname; 745 int error; 746 747 LCONVPATHEXIST(td, args->fname, &fname); 748 749 if (args->tptr != NULL) { 750 if ((error = copyin(args->tptr, ltv, sizeof ltv))) { 751 LFREEPATH(fname); 752 return (error); 753 } 754 tv[0].tv_sec = ltv[0].tv_sec; 755 tv[0].tv_usec = ltv[0].tv_usec; 756 tv[1].tv_sec = ltv[1].tv_sec; 757 tv[1].tv_usec = ltv[1].tv_usec; 758 tvp = tv; 759 } 760 761 error = kern_utimesat(td, AT_FDCWD, fname, UIO_SYSSPACE, 762 tvp, UIO_SYSSPACE); 763 LFREEPATH(fname); 764 return (error); 765 } 766 #endif 767 768 static int 769 linux_utimensat_nsec_valid(l_long nsec) 770 { 771 772 if (nsec == LINUX_UTIME_OMIT || nsec == LINUX_UTIME_NOW) 773 return (0); 774 if (nsec >= 0 && nsec <= 999999999) 775 return (0); 776 return (1); 777 } 778 779 int 780 linux_utimensat(struct thread *td, struct linux_utimensat_args *args) 781 { 782 struct l_timespec l_times[2]; 783 struct timespec times[2], *timesp = NULL; 784 char *path = NULL; 785 int error, dfd, flags = 0; 786 787 dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd; 788 789 if (args->flags & ~LINUX_AT_SYMLINK_NOFOLLOW) 790 return (EINVAL); 791 792 if (args->times != NULL) { 793 error = copyin(args->times, l_times, sizeof(l_times)); 794 if (error != 0) 795 return (error); 796 797 if (linux_utimensat_nsec_valid(l_times[0].tv_nsec) != 0 || 798 linux_utimensat_nsec_valid(l_times[1].tv_nsec) != 0) 799 return (EINVAL); 800 801 times[0].tv_sec = l_times[0].tv_sec; 802 switch (l_times[0].tv_nsec) 803 { 804 case LINUX_UTIME_OMIT: 805 times[0].tv_nsec = UTIME_OMIT; 806 break; 807 case LINUX_UTIME_NOW: 808 times[0].tv_nsec = UTIME_NOW; 809 break; 810 default: 811 times[0].tv_nsec = l_times[0].tv_nsec; 812 } 813 814 times[1].tv_sec = l_times[1].tv_sec; 815 switch (l_times[1].tv_nsec) 816 { 817 case LINUX_UTIME_OMIT: 818 times[1].tv_nsec = UTIME_OMIT; 819 break; 820 case LINUX_UTIME_NOW: 821 times[1].tv_nsec = UTIME_NOW; 822 break; 823 default: 824 times[1].tv_nsec = l_times[1].tv_nsec; 825 break; 826 } 827 timesp = times; 828 829 /* This breaks POSIX, but is what the Linux kernel does 830 * _on purpose_ (documented in the man page for utimensat(2)), 831 * so we must follow that behaviour. */ 832 if (times[0].tv_nsec == UTIME_OMIT && 833 times[1].tv_nsec == UTIME_OMIT) 834 return (0); 835 } 836 837 if (args->pathname != NULL) 838 LCONVPATHEXIST_AT(td, args->pathname, &path, dfd); 839 else if (args->flags != 0) 840 return (EINVAL); 841 842 if (args->flags & LINUX_AT_SYMLINK_NOFOLLOW) 843 flags |= AT_SYMLINK_NOFOLLOW; 844 845 if (path == NULL) 846 error = kern_futimens(td, dfd, timesp, UIO_SYSSPACE); 847 else { 848 error = kern_utimensat(td, dfd, path, UIO_SYSSPACE, timesp, 849 UIO_SYSSPACE, flags); 850 LFREEPATH(path); 851 } 852 853 return (error); 854 } 855 856 #ifdef LINUX_LEGACY_SYSCALLS 857 int 858 linux_futimesat(struct thread *td, struct linux_futimesat_args *args) 859 { 860 l_timeval ltv[2]; 861 struct timeval tv[2], *tvp = NULL; 862 char *fname; 863 int error, dfd; 864 865 dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd; 866 LCONVPATHEXIST_AT(td, args->filename, &fname, dfd); 867 868 if (args->utimes != NULL) { 869 if ((error = copyin(args->utimes, ltv, sizeof ltv))) { 870 LFREEPATH(fname); 871 return (error); 872 } 873 tv[0].tv_sec = ltv[0].tv_sec; 874 tv[0].tv_usec = ltv[0].tv_usec; 875 tv[1].tv_sec = ltv[1].tv_sec; 876 tv[1].tv_usec = ltv[1].tv_usec; 877 tvp = tv; 878 } 879 880 error = kern_utimesat(td, dfd, fname, UIO_SYSSPACE, tvp, UIO_SYSSPACE); 881 LFREEPATH(fname); 882 return (error); 883 } 884 #endif 885 886 static int 887 linux_common_wait(struct thread *td, int pid, int *statusp, 888 int options, struct __wrusage *wrup) 889 { 890 siginfo_t siginfo; 891 idtype_t idtype; 892 id_t id; 893 int error, status, tmpstat; 894 895 if (pid == WAIT_ANY) { 896 idtype = P_ALL; 897 id = 0; 898 } else if (pid < 0) { 899 idtype = P_PGID; 900 id = (id_t)-pid; 901 } else { 902 idtype = P_PID; 903 id = (id_t)pid; 904 } 905 906 /* 907 * For backward compatibility we implicitly add flags WEXITED 908 * and WTRAPPED here. 909 */ 910 options |= WEXITED | WTRAPPED; 911 error = kern_wait6(td, idtype, id, &status, options, wrup, &siginfo); 912 if (error) 913 return (error); 914 915 if (statusp) { 916 tmpstat = status & 0xffff; 917 if (WIFSIGNALED(tmpstat)) { 918 tmpstat = (tmpstat & 0xffffff80) | 919 bsd_to_linux_signal(WTERMSIG(tmpstat)); 920 } else if (WIFSTOPPED(tmpstat)) { 921 tmpstat = (tmpstat & 0xffff00ff) | 922 (bsd_to_linux_signal(WSTOPSIG(tmpstat)) << 8); 923 #if defined(__amd64__) && !defined(COMPAT_LINUX32) 924 if (WSTOPSIG(status) == SIGTRAP) { 925 tmpstat = linux_ptrace_status(td, 926 siginfo.si_pid, tmpstat); 927 } 928 #endif 929 } else if (WIFCONTINUED(tmpstat)) { 930 tmpstat = 0xffff; 931 } 932 error = copyout(&tmpstat, statusp, sizeof(int)); 933 } 934 935 return (error); 936 } 937 938 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 939 int 940 linux_waitpid(struct thread *td, struct linux_waitpid_args *args) 941 { 942 struct linux_wait4_args wait4_args; 943 944 wait4_args.pid = args->pid; 945 wait4_args.status = args->status; 946 wait4_args.options = args->options; 947 wait4_args.rusage = NULL; 948 949 return (linux_wait4(td, &wait4_args)); 950 } 951 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 952 953 int 954 linux_wait4(struct thread *td, struct linux_wait4_args *args) 955 { 956 int error, options; 957 struct __wrusage wru, *wrup; 958 959 if (args->options & ~(LINUX_WUNTRACED | LINUX_WNOHANG | 960 LINUX_WCONTINUED | __WCLONE | __WNOTHREAD | __WALL)) 961 return (EINVAL); 962 963 options = WEXITED; 964 linux_to_bsd_waitopts(args->options, &options); 965 966 if (args->rusage != NULL) 967 wrup = &wru; 968 else 969 wrup = NULL; 970 error = linux_common_wait(td, args->pid, args->status, options, wrup); 971 if (error != 0) 972 return (error); 973 if (args->rusage != NULL) 974 error = linux_copyout_rusage(&wru.wru_self, args->rusage); 975 return (error); 976 } 977 978 int 979 linux_waitid(struct thread *td, struct linux_waitid_args *args) 980 { 981 int status, options, sig; 982 struct __wrusage wru; 983 siginfo_t siginfo; 984 l_siginfo_t lsi; 985 idtype_t idtype; 986 struct proc *p; 987 int error; 988 989 options = 0; 990 linux_to_bsd_waitopts(args->options, &options); 991 992 if (options & ~(WNOHANG | WNOWAIT | WEXITED | WUNTRACED | WCONTINUED)) 993 return (EINVAL); 994 if (!(options & (WEXITED | WUNTRACED | WCONTINUED))) 995 return (EINVAL); 996 997 switch (args->idtype) { 998 case LINUX_P_ALL: 999 idtype = P_ALL; 1000 break; 1001 case LINUX_P_PID: 1002 if (args->id <= 0) 1003 return (EINVAL); 1004 idtype = P_PID; 1005 break; 1006 case LINUX_P_PGID: 1007 if (args->id <= 0) 1008 return (EINVAL); 1009 idtype = P_PGID; 1010 break; 1011 default: 1012 return (EINVAL); 1013 } 1014 1015 error = kern_wait6(td, idtype, args->id, &status, options, 1016 &wru, &siginfo); 1017 if (error != 0) 1018 return (error); 1019 if (args->rusage != NULL) { 1020 error = linux_copyout_rusage(&wru.wru_children, 1021 args->rusage); 1022 if (error != 0) 1023 return (error); 1024 } 1025 if (args->info != NULL) { 1026 p = td->td_proc; 1027 bzero(&lsi, sizeof(lsi)); 1028 if (td->td_retval[0] != 0) { 1029 sig = bsd_to_linux_signal(siginfo.si_signo); 1030 siginfo_to_lsiginfo(&siginfo, &lsi, sig); 1031 } 1032 error = copyout(&lsi, args->info, sizeof(lsi)); 1033 } 1034 td->td_retval[0] = 0; 1035 1036 return (error); 1037 } 1038 1039 #ifdef LINUX_LEGACY_SYSCALLS 1040 int 1041 linux_mknod(struct thread *td, struct linux_mknod_args *args) 1042 { 1043 char *path; 1044 int error; 1045 1046 LCONVPATHCREAT(td, args->path, &path); 1047 1048 switch (args->mode & S_IFMT) { 1049 case S_IFIFO: 1050 case S_IFSOCK: 1051 error = kern_mkfifoat(td, AT_FDCWD, path, UIO_SYSSPACE, 1052 args->mode); 1053 break; 1054 1055 case S_IFCHR: 1056 case S_IFBLK: 1057 error = kern_mknodat(td, AT_FDCWD, path, UIO_SYSSPACE, 1058 args->mode, args->dev); 1059 break; 1060 1061 case S_IFDIR: 1062 error = EPERM; 1063 break; 1064 1065 case 0: 1066 args->mode |= S_IFREG; 1067 /* FALLTHROUGH */ 1068 case S_IFREG: 1069 error = kern_openat(td, AT_FDCWD, path, UIO_SYSSPACE, 1070 O_WRONLY | O_CREAT | O_TRUNC, args->mode); 1071 if (error == 0) 1072 kern_close(td, td->td_retval[0]); 1073 break; 1074 1075 default: 1076 error = EINVAL; 1077 break; 1078 } 1079 LFREEPATH(path); 1080 return (error); 1081 } 1082 #endif 1083 1084 int 1085 linux_mknodat(struct thread *td, struct linux_mknodat_args *args) 1086 { 1087 char *path; 1088 int error, dfd; 1089 1090 dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd; 1091 LCONVPATHCREAT_AT(td, args->filename, &path, dfd); 1092 1093 switch (args->mode & S_IFMT) { 1094 case S_IFIFO: 1095 case S_IFSOCK: 1096 error = kern_mkfifoat(td, dfd, path, UIO_SYSSPACE, args->mode); 1097 break; 1098 1099 case S_IFCHR: 1100 case S_IFBLK: 1101 error = kern_mknodat(td, dfd, path, UIO_SYSSPACE, args->mode, 1102 args->dev); 1103 break; 1104 1105 case S_IFDIR: 1106 error = EPERM; 1107 break; 1108 1109 case 0: 1110 args->mode |= S_IFREG; 1111 /* FALLTHROUGH */ 1112 case S_IFREG: 1113 error = kern_openat(td, dfd, path, UIO_SYSSPACE, 1114 O_WRONLY | O_CREAT | O_TRUNC, args->mode); 1115 if (error == 0) 1116 kern_close(td, td->td_retval[0]); 1117 break; 1118 1119 default: 1120 error = EINVAL; 1121 break; 1122 } 1123 LFREEPATH(path); 1124 return (error); 1125 } 1126 1127 /* 1128 * UGH! This is just about the dumbest idea I've ever heard!! 1129 */ 1130 int 1131 linux_personality(struct thread *td, struct linux_personality_args *args) 1132 { 1133 struct linux_pemuldata *pem; 1134 struct proc *p = td->td_proc; 1135 uint32_t old; 1136 1137 PROC_LOCK(p); 1138 pem = pem_find(p); 1139 old = pem->persona; 1140 if (args->per != 0xffffffff) 1141 pem->persona = args->per; 1142 PROC_UNLOCK(p); 1143 1144 td->td_retval[0] = old; 1145 return (0); 1146 } 1147 1148 struct l_itimerval { 1149 l_timeval it_interval; 1150 l_timeval it_value; 1151 }; 1152 1153 #define B2L_ITIMERVAL(bip, lip) \ 1154 (bip)->it_interval.tv_sec = (lip)->it_interval.tv_sec; \ 1155 (bip)->it_interval.tv_usec = (lip)->it_interval.tv_usec; \ 1156 (bip)->it_value.tv_sec = (lip)->it_value.tv_sec; \ 1157 (bip)->it_value.tv_usec = (lip)->it_value.tv_usec; 1158 1159 int 1160 linux_setitimer(struct thread *td, struct linux_setitimer_args *uap) 1161 { 1162 int error; 1163 struct l_itimerval ls; 1164 struct itimerval aitv, oitv; 1165 1166 if (uap->itv == NULL) { 1167 uap->itv = uap->oitv; 1168 return (linux_getitimer(td, (struct linux_getitimer_args *)uap)); 1169 } 1170 1171 error = copyin(uap->itv, &ls, sizeof(ls)); 1172 if (error != 0) 1173 return (error); 1174 B2L_ITIMERVAL(&aitv, &ls); 1175 error = kern_setitimer(td, uap->which, &aitv, &oitv); 1176 if (error != 0 || uap->oitv == NULL) 1177 return (error); 1178 B2L_ITIMERVAL(&ls, &oitv); 1179 1180 return (copyout(&ls, uap->oitv, sizeof(ls))); 1181 } 1182 1183 int 1184 linux_getitimer(struct thread *td, struct linux_getitimer_args *uap) 1185 { 1186 int error; 1187 struct l_itimerval ls; 1188 struct itimerval aitv; 1189 1190 error = kern_getitimer(td, uap->which, &aitv); 1191 if (error != 0) 1192 return (error); 1193 B2L_ITIMERVAL(&ls, &aitv); 1194 return (copyout(&ls, uap->itv, sizeof(ls))); 1195 } 1196 1197 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 1198 int 1199 linux_nice(struct thread *td, struct linux_nice_args *args) 1200 { 1201 1202 return (kern_setpriority(td, PRIO_PROCESS, 0, args->inc)); 1203 } 1204 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 1205 1206 int 1207 linux_setgroups(struct thread *td, struct linux_setgroups_args *args) 1208 { 1209 struct ucred *newcred, *oldcred; 1210 l_gid_t *linux_gidset; 1211 gid_t *bsd_gidset; 1212 int ngrp, error; 1213 struct proc *p; 1214 1215 ngrp = args->gidsetsize; 1216 if (ngrp < 0 || ngrp >= ngroups_max + 1) 1217 return (EINVAL); 1218 linux_gidset = malloc(ngrp * sizeof(*linux_gidset), M_LINUX, M_WAITOK); 1219 error = copyin(args->grouplist, linux_gidset, ngrp * sizeof(l_gid_t)); 1220 if (error) 1221 goto out; 1222 newcred = crget(); 1223 crextend(newcred, ngrp + 1); 1224 p = td->td_proc; 1225 PROC_LOCK(p); 1226 oldcred = p->p_ucred; 1227 crcopy(newcred, oldcred); 1228 1229 /* 1230 * cr_groups[0] holds egid. Setting the whole set from 1231 * the supplied set will cause egid to be changed too. 1232 * Keep cr_groups[0] unchanged to prevent that. 1233 */ 1234 1235 if ((error = priv_check_cred(oldcred, PRIV_CRED_SETGROUPS)) != 0) { 1236 PROC_UNLOCK(p); 1237 crfree(newcred); 1238 goto out; 1239 } 1240 1241 if (ngrp > 0) { 1242 newcred->cr_ngroups = ngrp + 1; 1243 1244 bsd_gidset = newcred->cr_groups; 1245 ngrp--; 1246 while (ngrp >= 0) { 1247 bsd_gidset[ngrp + 1] = linux_gidset[ngrp]; 1248 ngrp--; 1249 } 1250 } else 1251 newcred->cr_ngroups = 1; 1252 1253 setsugid(p); 1254 proc_set_cred(p, newcred); 1255 PROC_UNLOCK(p); 1256 crfree(oldcred); 1257 error = 0; 1258 out: 1259 free(linux_gidset, M_LINUX); 1260 return (error); 1261 } 1262 1263 int 1264 linux_getgroups(struct thread *td, struct linux_getgroups_args *args) 1265 { 1266 struct ucred *cred; 1267 l_gid_t *linux_gidset; 1268 gid_t *bsd_gidset; 1269 int bsd_gidsetsz, ngrp, error; 1270 1271 cred = td->td_ucred; 1272 bsd_gidset = cred->cr_groups; 1273 bsd_gidsetsz = cred->cr_ngroups - 1; 1274 1275 /* 1276 * cr_groups[0] holds egid. Returning the whole set 1277 * here will cause a duplicate. Exclude cr_groups[0] 1278 * to prevent that. 1279 */ 1280 1281 if ((ngrp = args->gidsetsize) == 0) { 1282 td->td_retval[0] = bsd_gidsetsz; 1283 return (0); 1284 } 1285 1286 if (ngrp < bsd_gidsetsz) 1287 return (EINVAL); 1288 1289 ngrp = 0; 1290 linux_gidset = malloc(bsd_gidsetsz * sizeof(*linux_gidset), 1291 M_LINUX, M_WAITOK); 1292 while (ngrp < bsd_gidsetsz) { 1293 linux_gidset[ngrp] = bsd_gidset[ngrp + 1]; 1294 ngrp++; 1295 } 1296 1297 error = copyout(linux_gidset, args->grouplist, ngrp * sizeof(l_gid_t)); 1298 free(linux_gidset, M_LINUX); 1299 if (error) 1300 return (error); 1301 1302 td->td_retval[0] = ngrp; 1303 return (0); 1304 } 1305 1306 int 1307 linux_setrlimit(struct thread *td, struct linux_setrlimit_args *args) 1308 { 1309 struct rlimit bsd_rlim; 1310 struct l_rlimit rlim; 1311 u_int which; 1312 int error; 1313 1314 if (args->resource >= LINUX_RLIM_NLIMITS) 1315 return (EINVAL); 1316 1317 which = linux_to_bsd_resource[args->resource]; 1318 if (which == -1) 1319 return (EINVAL); 1320 1321 error = copyin(args->rlim, &rlim, sizeof(rlim)); 1322 if (error) 1323 return (error); 1324 1325 bsd_rlim.rlim_cur = (rlim_t)rlim.rlim_cur; 1326 bsd_rlim.rlim_max = (rlim_t)rlim.rlim_max; 1327 return (kern_setrlimit(td, which, &bsd_rlim)); 1328 } 1329 1330 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) 1331 int 1332 linux_old_getrlimit(struct thread *td, struct linux_old_getrlimit_args *args) 1333 { 1334 struct l_rlimit rlim; 1335 struct rlimit bsd_rlim; 1336 u_int which; 1337 1338 if (args->resource >= LINUX_RLIM_NLIMITS) 1339 return (EINVAL); 1340 1341 which = linux_to_bsd_resource[args->resource]; 1342 if (which == -1) 1343 return (EINVAL); 1344 1345 lim_rlimit(td, which, &bsd_rlim); 1346 1347 #ifdef COMPAT_LINUX32 1348 rlim.rlim_cur = (unsigned int)bsd_rlim.rlim_cur; 1349 if (rlim.rlim_cur == UINT_MAX) 1350 rlim.rlim_cur = INT_MAX; 1351 rlim.rlim_max = (unsigned int)bsd_rlim.rlim_max; 1352 if (rlim.rlim_max == UINT_MAX) 1353 rlim.rlim_max = INT_MAX; 1354 #else 1355 rlim.rlim_cur = (unsigned long)bsd_rlim.rlim_cur; 1356 if (rlim.rlim_cur == ULONG_MAX) 1357 rlim.rlim_cur = LONG_MAX; 1358 rlim.rlim_max = (unsigned long)bsd_rlim.rlim_max; 1359 if (rlim.rlim_max == ULONG_MAX) 1360 rlim.rlim_max = LONG_MAX; 1361 #endif 1362 return (copyout(&rlim, args->rlim, sizeof(rlim))); 1363 } 1364 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ 1365 1366 int 1367 linux_getrlimit(struct thread *td, struct linux_getrlimit_args *args) 1368 { 1369 struct l_rlimit rlim; 1370 struct rlimit bsd_rlim; 1371 u_int which; 1372 1373 if (args->resource >= LINUX_RLIM_NLIMITS) 1374 return (EINVAL); 1375 1376 which = linux_to_bsd_resource[args->resource]; 1377 if (which == -1) 1378 return (EINVAL); 1379 1380 lim_rlimit(td, which, &bsd_rlim); 1381 1382 rlim.rlim_cur = (l_ulong)bsd_rlim.rlim_cur; 1383 rlim.rlim_max = (l_ulong)bsd_rlim.rlim_max; 1384 return (copyout(&rlim, args->rlim, sizeof(rlim))); 1385 } 1386 1387 int 1388 linux_sched_setscheduler(struct thread *td, 1389 struct linux_sched_setscheduler_args *args) 1390 { 1391 struct sched_param sched_param; 1392 struct thread *tdt; 1393 int error, policy; 1394 1395 switch (args->policy) { 1396 case LINUX_SCHED_OTHER: 1397 policy = SCHED_OTHER; 1398 break; 1399 case LINUX_SCHED_FIFO: 1400 policy = SCHED_FIFO; 1401 break; 1402 case LINUX_SCHED_RR: 1403 policy = SCHED_RR; 1404 break; 1405 default: 1406 return (EINVAL); 1407 } 1408 1409 error = copyin(args->param, &sched_param, sizeof(sched_param)); 1410 if (error) 1411 return (error); 1412 1413 if (linux_map_sched_prio) { 1414 switch (policy) { 1415 case SCHED_OTHER: 1416 if (sched_param.sched_priority != 0) 1417 return (EINVAL); 1418 1419 sched_param.sched_priority = 1420 PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE; 1421 break; 1422 case SCHED_FIFO: 1423 case SCHED_RR: 1424 if (sched_param.sched_priority < 1 || 1425 sched_param.sched_priority >= LINUX_MAX_RT_PRIO) 1426 return (EINVAL); 1427 1428 /* 1429 * Map [1, LINUX_MAX_RT_PRIO - 1] to 1430 * [0, RTP_PRIO_MAX - RTP_PRIO_MIN] (rounding down). 1431 */ 1432 sched_param.sched_priority = 1433 (sched_param.sched_priority - 1) * 1434 (RTP_PRIO_MAX - RTP_PRIO_MIN + 1) / 1435 (LINUX_MAX_RT_PRIO - 1); 1436 break; 1437 } 1438 } 1439 1440 tdt = linux_tdfind(td, args->pid, -1); 1441 if (tdt == NULL) 1442 return (ESRCH); 1443 1444 error = kern_sched_setscheduler(td, tdt, policy, &sched_param); 1445 PROC_UNLOCK(tdt->td_proc); 1446 return (error); 1447 } 1448 1449 int 1450 linux_sched_getscheduler(struct thread *td, 1451 struct linux_sched_getscheduler_args *args) 1452 { 1453 struct thread *tdt; 1454 int error, policy; 1455 1456 tdt = linux_tdfind(td, args->pid, -1); 1457 if (tdt == NULL) 1458 return (ESRCH); 1459 1460 error = kern_sched_getscheduler(td, tdt, &policy); 1461 PROC_UNLOCK(tdt->td_proc); 1462 1463 switch (policy) { 1464 case SCHED_OTHER: 1465 td->td_retval[0] = LINUX_SCHED_OTHER; 1466 break; 1467 case SCHED_FIFO: 1468 td->td_retval[0] = LINUX_SCHED_FIFO; 1469 break; 1470 case SCHED_RR: 1471 td->td_retval[0] = LINUX_SCHED_RR; 1472 break; 1473 } 1474 return (error); 1475 } 1476 1477 int 1478 linux_sched_get_priority_max(struct thread *td, 1479 struct linux_sched_get_priority_max_args *args) 1480 { 1481 struct sched_get_priority_max_args bsd; 1482 1483 if (linux_map_sched_prio) { 1484 switch (args->policy) { 1485 case LINUX_SCHED_OTHER: 1486 td->td_retval[0] = 0; 1487 return (0); 1488 case LINUX_SCHED_FIFO: 1489 case LINUX_SCHED_RR: 1490 td->td_retval[0] = LINUX_MAX_RT_PRIO - 1; 1491 return (0); 1492 default: 1493 return (EINVAL); 1494 } 1495 } 1496 1497 switch (args->policy) { 1498 case LINUX_SCHED_OTHER: 1499 bsd.policy = SCHED_OTHER; 1500 break; 1501 case LINUX_SCHED_FIFO: 1502 bsd.policy = SCHED_FIFO; 1503 break; 1504 case LINUX_SCHED_RR: 1505 bsd.policy = SCHED_RR; 1506 break; 1507 default: 1508 return (EINVAL); 1509 } 1510 return (sys_sched_get_priority_max(td, &bsd)); 1511 } 1512 1513 int 1514 linux_sched_get_priority_min(struct thread *td, 1515 struct linux_sched_get_priority_min_args *args) 1516 { 1517 struct sched_get_priority_min_args bsd; 1518 1519 if (linux_map_sched_prio) { 1520 switch (args->policy) { 1521 case LINUX_SCHED_OTHER: 1522 td->td_retval[0] = 0; 1523 return (0); 1524 case LINUX_SCHED_FIFO: 1525 case LINUX_SCHED_RR: 1526 td->td_retval[0] = 1; 1527 return (0); 1528 default: 1529 return (EINVAL); 1530 } 1531 } 1532 1533 switch (args->policy) { 1534 case LINUX_SCHED_OTHER: 1535 bsd.policy = SCHED_OTHER; 1536 break; 1537 case LINUX_SCHED_FIFO: 1538 bsd.policy = SCHED_FIFO; 1539 break; 1540 case LINUX_SCHED_RR: 1541 bsd.policy = SCHED_RR; 1542 break; 1543 default: 1544 return (EINVAL); 1545 } 1546 return (sys_sched_get_priority_min(td, &bsd)); 1547 } 1548 1549 #define REBOOT_CAD_ON 0x89abcdef 1550 #define REBOOT_CAD_OFF 0 1551 #define REBOOT_HALT 0xcdef0123 1552 #define REBOOT_RESTART 0x01234567 1553 #define REBOOT_RESTART2 0xA1B2C3D4 1554 #define REBOOT_POWEROFF 0x4321FEDC 1555 #define REBOOT_MAGIC1 0xfee1dead 1556 #define REBOOT_MAGIC2 0x28121969 1557 #define REBOOT_MAGIC2A 0x05121996 1558 #define REBOOT_MAGIC2B 0x16041998 1559 1560 int 1561 linux_reboot(struct thread *td, struct linux_reboot_args *args) 1562 { 1563 struct reboot_args bsd_args; 1564 1565 if (args->magic1 != REBOOT_MAGIC1) 1566 return (EINVAL); 1567 1568 switch (args->magic2) { 1569 case REBOOT_MAGIC2: 1570 case REBOOT_MAGIC2A: 1571 case REBOOT_MAGIC2B: 1572 break; 1573 default: 1574 return (EINVAL); 1575 } 1576 1577 switch (args->cmd) { 1578 case REBOOT_CAD_ON: 1579 case REBOOT_CAD_OFF: 1580 return (priv_check(td, PRIV_REBOOT)); 1581 case REBOOT_HALT: 1582 bsd_args.opt = RB_HALT; 1583 break; 1584 case REBOOT_RESTART: 1585 case REBOOT_RESTART2: 1586 bsd_args.opt = 0; 1587 break; 1588 case REBOOT_POWEROFF: 1589 bsd_args.opt = RB_POWEROFF; 1590 break; 1591 default: 1592 return (EINVAL); 1593 } 1594 return (sys_reboot(td, &bsd_args)); 1595 } 1596 1597 1598 int 1599 linux_getpid(struct thread *td, struct linux_getpid_args *args) 1600 { 1601 1602 td->td_retval[0] = td->td_proc->p_pid; 1603 1604 return (0); 1605 } 1606 1607 int 1608 linux_gettid(struct thread *td, struct linux_gettid_args *args) 1609 { 1610 struct linux_emuldata *em; 1611 1612 em = em_find(td); 1613 KASSERT(em != NULL, ("gettid: emuldata not found.\n")); 1614 1615 td->td_retval[0] = em->em_tid; 1616 1617 return (0); 1618 } 1619 1620 1621 int 1622 linux_getppid(struct thread *td, struct linux_getppid_args *args) 1623 { 1624 1625 td->td_retval[0] = kern_getppid(td); 1626 return (0); 1627 } 1628 1629 int 1630 linux_getgid(struct thread *td, struct linux_getgid_args *args) 1631 { 1632 1633 td->td_retval[0] = td->td_ucred->cr_rgid; 1634 return (0); 1635 } 1636 1637 int 1638 linux_getuid(struct thread *td, struct linux_getuid_args *args) 1639 { 1640 1641 td->td_retval[0] = td->td_ucred->cr_ruid; 1642 return (0); 1643 } 1644 1645 int 1646 linux_getsid(struct thread *td, struct linux_getsid_args *args) 1647 { 1648 1649 return (kern_getsid(td, args->pid)); 1650 } 1651 1652 int 1653 linux_nosys(struct thread *td, struct nosys_args *ignore) 1654 { 1655 1656 return (ENOSYS); 1657 } 1658 1659 int 1660 linux_getpriority(struct thread *td, struct linux_getpriority_args *args) 1661 { 1662 int error; 1663 1664 error = kern_getpriority(td, args->which, args->who); 1665 td->td_retval[0] = 20 - td->td_retval[0]; 1666 return (error); 1667 } 1668 1669 int 1670 linux_sethostname(struct thread *td, struct linux_sethostname_args *args) 1671 { 1672 int name[2]; 1673 1674 name[0] = CTL_KERN; 1675 name[1] = KERN_HOSTNAME; 1676 return (userland_sysctl(td, name, 2, 0, 0, 0, args->hostname, 1677 args->len, 0, 0)); 1678 } 1679 1680 int 1681 linux_setdomainname(struct thread *td, struct linux_setdomainname_args *args) 1682 { 1683 int name[2]; 1684 1685 name[0] = CTL_KERN; 1686 name[1] = KERN_NISDOMAINNAME; 1687 return (userland_sysctl(td, name, 2, 0, 0, 0, args->name, 1688 args->len, 0, 0)); 1689 } 1690 1691 int 1692 linux_exit_group(struct thread *td, struct linux_exit_group_args *args) 1693 { 1694 1695 LINUX_CTR2(exit_group, "thread(%d) (%d)", td->td_tid, 1696 args->error_code); 1697 1698 /* 1699 * XXX: we should send a signal to the parent if 1700 * SIGNAL_EXIT_GROUP is set. We ignore that (temporarily?) 1701 * as it doesnt occur often. 1702 */ 1703 exit1(td, args->error_code, 0); 1704 /* NOTREACHED */ 1705 } 1706 1707 #define _LINUX_CAPABILITY_VERSION_1 0x19980330 1708 #define _LINUX_CAPABILITY_VERSION_2 0x20071026 1709 #define _LINUX_CAPABILITY_VERSION_3 0x20080522 1710 1711 struct l_user_cap_header { 1712 l_int version; 1713 l_int pid; 1714 }; 1715 1716 struct l_user_cap_data { 1717 l_int effective; 1718 l_int permitted; 1719 l_int inheritable; 1720 }; 1721 1722 int 1723 linux_capget(struct thread *td, struct linux_capget_args *uap) 1724 { 1725 struct l_user_cap_header luch; 1726 struct l_user_cap_data lucd[2]; 1727 int error, u32s; 1728 1729 if (uap->hdrp == NULL) 1730 return (EFAULT); 1731 1732 error = copyin(uap->hdrp, &luch, sizeof(luch)); 1733 if (error != 0) 1734 return (error); 1735 1736 switch (luch.version) { 1737 case _LINUX_CAPABILITY_VERSION_1: 1738 u32s = 1; 1739 break; 1740 case _LINUX_CAPABILITY_VERSION_2: 1741 case _LINUX_CAPABILITY_VERSION_3: 1742 u32s = 2; 1743 break; 1744 default: 1745 luch.version = _LINUX_CAPABILITY_VERSION_1; 1746 error = copyout(&luch, uap->hdrp, sizeof(luch)); 1747 if (error) 1748 return (error); 1749 return (EINVAL); 1750 } 1751 1752 if (luch.pid) 1753 return (EPERM); 1754 1755 if (uap->datap) { 1756 /* 1757 * The current implementation doesn't support setting 1758 * a capability (it's essentially a stub) so indicate 1759 * that no capabilities are currently set or available 1760 * to request. 1761 */ 1762 memset(&lucd, 0, u32s * sizeof(lucd[0])); 1763 error = copyout(&lucd, uap->datap, u32s * sizeof(lucd[0])); 1764 } 1765 1766 return (error); 1767 } 1768 1769 int 1770 linux_capset(struct thread *td, struct linux_capset_args *uap) 1771 { 1772 struct l_user_cap_header luch; 1773 struct l_user_cap_data lucd[2]; 1774 int error, i, u32s; 1775 1776 if (uap->hdrp == NULL || uap->datap == NULL) 1777 return (EFAULT); 1778 1779 error = copyin(uap->hdrp, &luch, sizeof(luch)); 1780 if (error != 0) 1781 return (error); 1782 1783 switch (luch.version) { 1784 case _LINUX_CAPABILITY_VERSION_1: 1785 u32s = 1; 1786 break; 1787 case _LINUX_CAPABILITY_VERSION_2: 1788 case _LINUX_CAPABILITY_VERSION_3: 1789 u32s = 2; 1790 break; 1791 default: 1792 luch.version = _LINUX_CAPABILITY_VERSION_1; 1793 error = copyout(&luch, uap->hdrp, sizeof(luch)); 1794 if (error) 1795 return (error); 1796 return (EINVAL); 1797 } 1798 1799 if (luch.pid) 1800 return (EPERM); 1801 1802 error = copyin(uap->datap, &lucd, u32s * sizeof(lucd[0])); 1803 if (error != 0) 1804 return (error); 1805 1806 /* We currently don't support setting any capabilities. */ 1807 for (i = 0; i < u32s; i++) { 1808 if (lucd[i].effective || lucd[i].permitted || 1809 lucd[i].inheritable) { 1810 linux_msg(td, 1811 "capset[%d] effective=0x%x, permitted=0x%x, " 1812 "inheritable=0x%x is not implemented", i, 1813 (int)lucd[i].effective, (int)lucd[i].permitted, 1814 (int)lucd[i].inheritable); 1815 return (EPERM); 1816 } 1817 } 1818 1819 return (0); 1820 } 1821 1822 int 1823 linux_prctl(struct thread *td, struct linux_prctl_args *args) 1824 { 1825 int error = 0, max_size; 1826 struct proc *p = td->td_proc; 1827 char comm[LINUX_MAX_COMM_LEN]; 1828 int pdeath_signal; 1829 1830 switch (args->option) { 1831 case LINUX_PR_SET_PDEATHSIG: 1832 if (!LINUX_SIG_VALID(args->arg2)) 1833 return (EINVAL); 1834 pdeath_signal = linux_to_bsd_signal(args->arg2); 1835 return (kern_procctl(td, P_PID, 0, PROC_PDEATHSIG_CTL, 1836 &pdeath_signal)); 1837 case LINUX_PR_GET_PDEATHSIG: 1838 error = kern_procctl(td, P_PID, 0, PROC_PDEATHSIG_STATUS, 1839 &pdeath_signal); 1840 if (error != 0) 1841 return (error); 1842 pdeath_signal = bsd_to_linux_signal(pdeath_signal); 1843 return (copyout(&pdeath_signal, 1844 (void *)(register_t)args->arg2, 1845 sizeof(pdeath_signal))); 1846 break; 1847 case LINUX_PR_GET_KEEPCAPS: 1848 /* 1849 * Indicate that we always clear the effective and 1850 * permitted capability sets when the user id becomes 1851 * non-zero (actually the capability sets are simply 1852 * always zero in the current implementation). 1853 */ 1854 td->td_retval[0] = 0; 1855 break; 1856 case LINUX_PR_SET_KEEPCAPS: 1857 /* 1858 * Ignore requests to keep the effective and permitted 1859 * capability sets when the user id becomes non-zero. 1860 */ 1861 break; 1862 case LINUX_PR_SET_NAME: 1863 /* 1864 * To be on the safe side we need to make sure to not 1865 * overflow the size a Linux program expects. We already 1866 * do this here in the copyin, so that we don't need to 1867 * check on copyout. 1868 */ 1869 max_size = MIN(sizeof(comm), sizeof(p->p_comm)); 1870 error = copyinstr((void *)(register_t)args->arg2, comm, 1871 max_size, NULL); 1872 1873 /* Linux silently truncates the name if it is too long. */ 1874 if (error == ENAMETOOLONG) { 1875 /* 1876 * XXX: copyinstr() isn't documented to populate the 1877 * array completely, so do a copyin() to be on the 1878 * safe side. This should be changed in case 1879 * copyinstr() is changed to guarantee this. 1880 */ 1881 error = copyin((void *)(register_t)args->arg2, comm, 1882 max_size - 1); 1883 comm[max_size - 1] = '\0'; 1884 } 1885 if (error) 1886 return (error); 1887 1888 PROC_LOCK(p); 1889 strlcpy(p->p_comm, comm, sizeof(p->p_comm)); 1890 PROC_UNLOCK(p); 1891 break; 1892 case LINUX_PR_GET_NAME: 1893 PROC_LOCK(p); 1894 strlcpy(comm, p->p_comm, sizeof(comm)); 1895 PROC_UNLOCK(p); 1896 error = copyout(comm, (void *)(register_t)args->arg2, 1897 strlen(comm) + 1); 1898 break; 1899 default: 1900 error = EINVAL; 1901 break; 1902 } 1903 1904 return (error); 1905 } 1906 1907 int 1908 linux_sched_setparam(struct thread *td, 1909 struct linux_sched_setparam_args *uap) 1910 { 1911 struct sched_param sched_param; 1912 struct thread *tdt; 1913 int error, policy; 1914 1915 error = copyin(uap->param, &sched_param, sizeof(sched_param)); 1916 if (error) 1917 return (error); 1918 1919 tdt = linux_tdfind(td, uap->pid, -1); 1920 if (tdt == NULL) 1921 return (ESRCH); 1922 1923 if (linux_map_sched_prio) { 1924 error = kern_sched_getscheduler(td, tdt, &policy); 1925 if (error) 1926 goto out; 1927 1928 switch (policy) { 1929 case SCHED_OTHER: 1930 if (sched_param.sched_priority != 0) { 1931 error = EINVAL; 1932 goto out; 1933 } 1934 sched_param.sched_priority = 1935 PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE; 1936 break; 1937 case SCHED_FIFO: 1938 case SCHED_RR: 1939 if (sched_param.sched_priority < 1 || 1940 sched_param.sched_priority >= LINUX_MAX_RT_PRIO) { 1941 error = EINVAL; 1942 goto out; 1943 } 1944 /* 1945 * Map [1, LINUX_MAX_RT_PRIO - 1] to 1946 * [0, RTP_PRIO_MAX - RTP_PRIO_MIN] (rounding down). 1947 */ 1948 sched_param.sched_priority = 1949 (sched_param.sched_priority - 1) * 1950 (RTP_PRIO_MAX - RTP_PRIO_MIN + 1) / 1951 (LINUX_MAX_RT_PRIO - 1); 1952 break; 1953 } 1954 } 1955 1956 error = kern_sched_setparam(td, tdt, &sched_param); 1957 out: PROC_UNLOCK(tdt->td_proc); 1958 return (error); 1959 } 1960 1961 int 1962 linux_sched_getparam(struct thread *td, 1963 struct linux_sched_getparam_args *uap) 1964 { 1965 struct sched_param sched_param; 1966 struct thread *tdt; 1967 int error, policy; 1968 1969 tdt = linux_tdfind(td, uap->pid, -1); 1970 if (tdt == NULL) 1971 return (ESRCH); 1972 1973 error = kern_sched_getparam(td, tdt, &sched_param); 1974 if (error) { 1975 PROC_UNLOCK(tdt->td_proc); 1976 return (error); 1977 } 1978 1979 if (linux_map_sched_prio) { 1980 error = kern_sched_getscheduler(td, tdt, &policy); 1981 PROC_UNLOCK(tdt->td_proc); 1982 if (error) 1983 return (error); 1984 1985 switch (policy) { 1986 case SCHED_OTHER: 1987 sched_param.sched_priority = 0; 1988 break; 1989 case SCHED_FIFO: 1990 case SCHED_RR: 1991 /* 1992 * Map [0, RTP_PRIO_MAX - RTP_PRIO_MIN] to 1993 * [1, LINUX_MAX_RT_PRIO - 1] (rounding up). 1994 */ 1995 sched_param.sched_priority = 1996 (sched_param.sched_priority * 1997 (LINUX_MAX_RT_PRIO - 1) + 1998 (RTP_PRIO_MAX - RTP_PRIO_MIN - 1)) / 1999 (RTP_PRIO_MAX - RTP_PRIO_MIN) + 1; 2000 break; 2001 } 2002 } else 2003 PROC_UNLOCK(tdt->td_proc); 2004 2005 error = copyout(&sched_param, uap->param, sizeof(sched_param)); 2006 return (error); 2007 } 2008 2009 /* 2010 * Get affinity of a process. 2011 */ 2012 int 2013 linux_sched_getaffinity(struct thread *td, 2014 struct linux_sched_getaffinity_args *args) 2015 { 2016 int error; 2017 struct thread *tdt; 2018 2019 if (args->len < sizeof(cpuset_t)) 2020 return (EINVAL); 2021 2022 tdt = linux_tdfind(td, args->pid, -1); 2023 if (tdt == NULL) 2024 return (ESRCH); 2025 2026 PROC_UNLOCK(tdt->td_proc); 2027 2028 error = kern_cpuset_getaffinity(td, CPU_LEVEL_WHICH, CPU_WHICH_TID, 2029 tdt->td_tid, sizeof(cpuset_t), (cpuset_t *)args->user_mask_ptr); 2030 if (error == 0) 2031 td->td_retval[0] = sizeof(cpuset_t); 2032 2033 return (error); 2034 } 2035 2036 /* 2037 * Set affinity of a process. 2038 */ 2039 int 2040 linux_sched_setaffinity(struct thread *td, 2041 struct linux_sched_setaffinity_args *args) 2042 { 2043 struct thread *tdt; 2044 2045 if (args->len < sizeof(cpuset_t)) 2046 return (EINVAL); 2047 2048 tdt = linux_tdfind(td, args->pid, -1); 2049 if (tdt == NULL) 2050 return (ESRCH); 2051 2052 PROC_UNLOCK(tdt->td_proc); 2053 2054 return (kern_cpuset_setaffinity(td, CPU_LEVEL_WHICH, CPU_WHICH_TID, 2055 tdt->td_tid, sizeof(cpuset_t), (cpuset_t *) args->user_mask_ptr)); 2056 } 2057 2058 struct linux_rlimit64 { 2059 uint64_t rlim_cur; 2060 uint64_t rlim_max; 2061 }; 2062 2063 int 2064 linux_prlimit64(struct thread *td, struct linux_prlimit64_args *args) 2065 { 2066 struct rlimit rlim, nrlim; 2067 struct linux_rlimit64 lrlim; 2068 struct proc *p; 2069 u_int which; 2070 int flags; 2071 int error; 2072 2073 if (args->resource >= LINUX_RLIM_NLIMITS) 2074 return (EINVAL); 2075 2076 which = linux_to_bsd_resource[args->resource]; 2077 if (which == -1) 2078 return (EINVAL); 2079 2080 if (args->new != NULL) { 2081 /* 2082 * Note. Unlike FreeBSD where rlim is signed 64-bit Linux 2083 * rlim is unsigned 64-bit. FreeBSD treats negative limits 2084 * as INFINITY so we do not need a conversion even. 2085 */ 2086 error = copyin(args->new, &nrlim, sizeof(nrlim)); 2087 if (error != 0) 2088 return (error); 2089 } 2090 2091 flags = PGET_HOLD | PGET_NOTWEXIT; 2092 if (args->new != NULL) 2093 flags |= PGET_CANDEBUG; 2094 else 2095 flags |= PGET_CANSEE; 2096 if (args->pid == 0) { 2097 p = td->td_proc; 2098 PHOLD(p); 2099 } else { 2100 error = pget(args->pid, flags, &p); 2101 if (error != 0) 2102 return (error); 2103 } 2104 if (args->old != NULL) { 2105 PROC_LOCK(p); 2106 lim_rlimit_proc(p, which, &rlim); 2107 PROC_UNLOCK(p); 2108 if (rlim.rlim_cur == RLIM_INFINITY) 2109 lrlim.rlim_cur = LINUX_RLIM_INFINITY; 2110 else 2111 lrlim.rlim_cur = rlim.rlim_cur; 2112 if (rlim.rlim_max == RLIM_INFINITY) 2113 lrlim.rlim_max = LINUX_RLIM_INFINITY; 2114 else 2115 lrlim.rlim_max = rlim.rlim_max; 2116 error = copyout(&lrlim, args->old, sizeof(lrlim)); 2117 if (error != 0) 2118 goto out; 2119 } 2120 2121 if (args->new != NULL) 2122 error = kern_proc_setrlimit(td, p, which, &nrlim); 2123 2124 out: 2125 PRELE(p); 2126 return (error); 2127 } 2128 2129 int 2130 linux_pselect6(struct thread *td, struct linux_pselect6_args *args) 2131 { 2132 struct timeval utv, tv0, tv1, *tvp; 2133 struct l_pselect6arg lpse6; 2134 struct l_timespec lts; 2135 struct timespec uts; 2136 l_sigset_t l_ss; 2137 sigset_t *ssp; 2138 sigset_t ss; 2139 int error; 2140 2141 ssp = NULL; 2142 if (args->sig != NULL) { 2143 error = copyin(args->sig, &lpse6, sizeof(lpse6)); 2144 if (error != 0) 2145 return (error); 2146 if (lpse6.ss_len != sizeof(l_ss)) 2147 return (EINVAL); 2148 if (lpse6.ss != 0) { 2149 error = copyin(PTRIN(lpse6.ss), &l_ss, 2150 sizeof(l_ss)); 2151 if (error != 0) 2152 return (error); 2153 linux_to_bsd_sigset(&l_ss, &ss); 2154 ssp = &ss; 2155 } 2156 } 2157 2158 /* 2159 * Currently glibc changes nanosecond number to microsecond. 2160 * This mean losing precision but for now it is hardly seen. 2161 */ 2162 if (args->tsp != NULL) { 2163 error = copyin(args->tsp, <s, sizeof(lts)); 2164 if (error != 0) 2165 return (error); 2166 error = linux_to_native_timespec(&uts, <s); 2167 if (error != 0) 2168 return (error); 2169 2170 TIMESPEC_TO_TIMEVAL(&utv, &uts); 2171 if (itimerfix(&utv)) 2172 return (EINVAL); 2173 2174 microtime(&tv0); 2175 tvp = &utv; 2176 } else 2177 tvp = NULL; 2178 2179 error = kern_pselect(td, args->nfds, args->readfds, args->writefds, 2180 args->exceptfds, tvp, ssp, LINUX_NFDBITS); 2181 2182 if (error == 0 && args->tsp != NULL) { 2183 if (td->td_retval[0] != 0) { 2184 /* 2185 * Compute how much time was left of the timeout, 2186 * by subtracting the current time and the time 2187 * before we started the call, and subtracting 2188 * that result from the user-supplied value. 2189 */ 2190 2191 microtime(&tv1); 2192 timevalsub(&tv1, &tv0); 2193 timevalsub(&utv, &tv1); 2194 if (utv.tv_sec < 0) 2195 timevalclear(&utv); 2196 } else 2197 timevalclear(&utv); 2198 2199 TIMEVAL_TO_TIMESPEC(&utv, &uts); 2200 2201 error = native_to_linux_timespec(<s, &uts); 2202 if (error == 0) 2203 error = copyout(<s, args->tsp, sizeof(lts)); 2204 } 2205 2206 return (error); 2207 } 2208 2209 int 2210 linux_ppoll(struct thread *td, struct linux_ppoll_args *args) 2211 { 2212 struct timespec ts0, ts1; 2213 struct l_timespec lts; 2214 struct timespec uts, *tsp; 2215 l_sigset_t l_ss; 2216 sigset_t *ssp; 2217 sigset_t ss; 2218 int error; 2219 2220 if (args->sset != NULL) { 2221 if (args->ssize != sizeof(l_ss)) 2222 return (EINVAL); 2223 error = copyin(args->sset, &l_ss, sizeof(l_ss)); 2224 if (error) 2225 return (error); 2226 linux_to_bsd_sigset(&l_ss, &ss); 2227 ssp = &ss; 2228 } else 2229 ssp = NULL; 2230 if (args->tsp != NULL) { 2231 error = copyin(args->tsp, <s, sizeof(lts)); 2232 if (error) 2233 return (error); 2234 error = linux_to_native_timespec(&uts, <s); 2235 if (error != 0) 2236 return (error); 2237 2238 nanotime(&ts0); 2239 tsp = &uts; 2240 } else 2241 tsp = NULL; 2242 2243 error = kern_poll(td, args->fds, args->nfds, tsp, ssp); 2244 2245 if (error == 0 && args->tsp != NULL) { 2246 if (td->td_retval[0]) { 2247 nanotime(&ts1); 2248 timespecsub(&ts1, &ts0, &ts1); 2249 timespecsub(&uts, &ts1, &uts); 2250 if (uts.tv_sec < 0) 2251 timespecclear(&uts); 2252 } else 2253 timespecclear(&uts); 2254 2255 error = native_to_linux_timespec(<s, &uts); 2256 if (error == 0) 2257 error = copyout(<s, args->tsp, sizeof(lts)); 2258 } 2259 2260 return (error); 2261 } 2262 2263 int 2264 linux_sched_rr_get_interval(struct thread *td, 2265 struct linux_sched_rr_get_interval_args *uap) 2266 { 2267 struct timespec ts; 2268 struct l_timespec lts; 2269 struct thread *tdt; 2270 int error; 2271 2272 /* 2273 * According to man in case the invalid pid specified 2274 * EINVAL should be returned. 2275 */ 2276 if (uap->pid < 0) 2277 return (EINVAL); 2278 2279 tdt = linux_tdfind(td, uap->pid, -1); 2280 if (tdt == NULL) 2281 return (ESRCH); 2282 2283 error = kern_sched_rr_get_interval_td(td, tdt, &ts); 2284 PROC_UNLOCK(tdt->td_proc); 2285 if (error != 0) 2286 return (error); 2287 error = native_to_linux_timespec(<s, &ts); 2288 if (error != 0) 2289 return (error); 2290 return (copyout(<s, uap->interval, sizeof(lts))); 2291 } 2292 2293 /* 2294 * In case when the Linux thread is the initial thread in 2295 * the thread group thread id is equal to the process id. 2296 * Glibc depends on this magic (assert in pthread_getattr_np.c). 2297 */ 2298 struct thread * 2299 linux_tdfind(struct thread *td, lwpid_t tid, pid_t pid) 2300 { 2301 struct linux_emuldata *em; 2302 struct thread *tdt; 2303 struct proc *p; 2304 2305 tdt = NULL; 2306 if (tid == 0 || tid == td->td_tid) { 2307 tdt = td; 2308 PROC_LOCK(tdt->td_proc); 2309 } else if (tid > PID_MAX) 2310 tdt = tdfind(tid, pid); 2311 else { 2312 /* 2313 * Initial thread where the tid equal to the pid. 2314 */ 2315 p = pfind(tid); 2316 if (p != NULL) { 2317 if (SV_PROC_ABI(p) != SV_ABI_LINUX) { 2318 /* 2319 * p is not a Linuxulator process. 2320 */ 2321 PROC_UNLOCK(p); 2322 return (NULL); 2323 } 2324 FOREACH_THREAD_IN_PROC(p, tdt) { 2325 em = em_find(tdt); 2326 if (tid == em->em_tid) 2327 return (tdt); 2328 } 2329 PROC_UNLOCK(p); 2330 } 2331 return (NULL); 2332 } 2333 2334 return (tdt); 2335 } 2336 2337 void 2338 linux_to_bsd_waitopts(int options, int *bsdopts) 2339 { 2340 2341 if (options & LINUX_WNOHANG) 2342 *bsdopts |= WNOHANG; 2343 if (options & LINUX_WUNTRACED) 2344 *bsdopts |= WUNTRACED; 2345 if (options & LINUX_WEXITED) 2346 *bsdopts |= WEXITED; 2347 if (options & LINUX_WCONTINUED) 2348 *bsdopts |= WCONTINUED; 2349 if (options & LINUX_WNOWAIT) 2350 *bsdopts |= WNOWAIT; 2351 2352 if (options & __WCLONE) 2353 *bsdopts |= WLINUXCLONE; 2354 } 2355 2356 int 2357 linux_getrandom(struct thread *td, struct linux_getrandom_args *args) 2358 { 2359 struct uio uio; 2360 struct iovec iov; 2361 int error; 2362 2363 if (args->flags & ~(LINUX_GRND_NONBLOCK|LINUX_GRND_RANDOM)) 2364 return (EINVAL); 2365 if (args->count > INT_MAX) 2366 args->count = INT_MAX; 2367 2368 iov.iov_base = args->buf; 2369 iov.iov_len = args->count; 2370 2371 uio.uio_iov = &iov; 2372 uio.uio_iovcnt = 1; 2373 uio.uio_resid = iov.iov_len; 2374 uio.uio_segflg = UIO_USERSPACE; 2375 uio.uio_rw = UIO_READ; 2376 uio.uio_td = td; 2377 2378 error = read_random_uio(&uio, args->flags & LINUX_GRND_NONBLOCK); 2379 if (error == 0) 2380 td->td_retval[0] = args->count - uio.uio_resid; 2381 return (error); 2382 } 2383 2384 int 2385 linux_mincore(struct thread *td, struct linux_mincore_args *args) 2386 { 2387 2388 /* Needs to be page-aligned */ 2389 if (args->start & PAGE_MASK) 2390 return (EINVAL); 2391 return (kern_mincore(td, args->start, args->len, args->vec)); 2392 } 2393 2394 #define SYSLOG_TAG "<6>" 2395 2396 int 2397 linux_syslog(struct thread *td, struct linux_syslog_args *args) 2398 { 2399 char buf[128], *src, *dst; 2400 u_int seq; 2401 int buflen, error; 2402 2403 if (args->type != LINUX_SYSLOG_ACTION_READ_ALL) { 2404 linux_msg(td, "syslog unsupported type 0x%x", args->type); 2405 return (EINVAL); 2406 } 2407 2408 if (args->len < 6) { 2409 td->td_retval[0] = 0; 2410 return (0); 2411 } 2412 2413 error = priv_check(td, PRIV_MSGBUF); 2414 if (error) 2415 return (error); 2416 2417 mtx_lock(&msgbuf_lock); 2418 msgbuf_peekbytes(msgbufp, NULL, 0, &seq); 2419 mtx_unlock(&msgbuf_lock); 2420 2421 dst = args->buf; 2422 error = copyout(&SYSLOG_TAG, dst, sizeof(SYSLOG_TAG)); 2423 /* The -1 is to skip the trailing '\0'. */ 2424 dst += sizeof(SYSLOG_TAG) - 1; 2425 2426 while (error == 0) { 2427 mtx_lock(&msgbuf_lock); 2428 buflen = msgbuf_peekbytes(msgbufp, buf, sizeof(buf), &seq); 2429 mtx_unlock(&msgbuf_lock); 2430 2431 if (buflen == 0) 2432 break; 2433 2434 for (src = buf; src < buf + buflen && error == 0; src++) { 2435 if (*src == '\0') 2436 continue; 2437 2438 if (dst >= args->buf + args->len) 2439 goto out; 2440 2441 error = copyout(src, dst, 1); 2442 dst++; 2443 2444 if (*src == '\n' && *(src + 1) != '<' && 2445 dst + sizeof(SYSLOG_TAG) < args->buf + args->len) { 2446 error = copyout(&SYSLOG_TAG, 2447 dst, sizeof(SYSLOG_TAG)); 2448 dst += sizeof(SYSLOG_TAG) - 1; 2449 } 2450 } 2451 } 2452 out: 2453 td->td_retval[0] = dst - args->buf; 2454 return (error); 2455 } 2456 2457 int 2458 linux_getcpu(struct thread *td, struct linux_getcpu_args *args) 2459 { 2460 int cpu, error, node; 2461 2462 cpu = td->td_oncpu; /* Make sure it doesn't change during copyout(9) */ 2463 error = 0; 2464 node = cpuid_to_pcpu[cpu]->pc_domain; 2465 2466 if (args->cpu != NULL) 2467 error = copyout(&cpu, args->cpu, sizeof(l_int)); 2468 if (args->node != NULL) 2469 error = copyout(&node, args->node, sizeof(l_int)); 2470 return (error); 2471 } 2472